aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4133-drm-amdgpu-powerplay-Added-missing-endian-fixes-for-.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4134-drm-amd-powerplay-implement-smu7_smumgr-for-asics-wi.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4135-drm-amd-powerplay-fix-bug-get-wrong-evv-voltage-of-P.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4136-drm-amdgpu-Use-the-drm_driver.dumb_destroy-default.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4137-drm-amd-dc-Add-dc-display-driver-v3.patch159
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4138-drm-amd-display-No-need-to-keep-track-of-unreffed-cl.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4139-dma-buf-keep-only-not-signaled-fence-in-reservation_.patch217
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4140-drm-amdgpu-Restore-scalable-VM-size-calculation.patch68
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4141-drm-amdgpu-fix-and-cleanup-UVD-IB-generation-v2.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4142-drm-amdgpu-cleanup-VCN-IB-generation-v2.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4143-drm-amdkfd-Disable-the-perf-counters-for-old-kernels.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4144-drm-amd-display-fix-Polaris-12-bw-bounding-box-v2.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4145-drm-amdkfd-Fix-and-simplify-sync-object-handling-for.patch100
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4146-drm-amdkfd-use-px-to-print-user-space-address-instea.patch55
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4147-drm-amdgpu-Fix-unbalanced-memory-accounting-in-error.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4148-drm-amdkfd-Take-reference-to-gtt-usertask.patch47
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4149-drm-amdgpu-Avoid-GFP_NOIO.patch59
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4150-drm-amdgpu-Fix-acquiring-VM-on-large-BAR-systems.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4151-drm-amdkfd-Simplify-dGPU-event-page-allocation.patch284
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4152-drm-amdkfd-Backwards-compatibility-with-old-Thunk.patch148
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4153-drm-amdkfd-Remove-pm_map_process_scratch_cik.patch115
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4154-drm-amdgpu-Remove-pm_map_process_cik.patch347
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4155-drm-amdkfd-Put-packet-sizes-directly-into-packet_man.patch290
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4156-drm-amdkfd-GPU-recovery-support-from-KFD-step-1.patch150
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4157-drm-amdkfd-signal-hw_exception-event-on-GPU-reset.patch82
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4158-drm-amdkfd-remove-check-for-PCIe-upstream-bridge.patch192
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4159-drm-amdgpu-Enable-the-gpu-reset-from-amdkfd.patch59
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4160-drm-amdkfd-CMA-Refactor-CMA-code.patch428
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4161-drm-amdkfd-CMA-Store-cpuva-in-KFD-BO.patch120
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4162-drm-amdkfd-CMA-Handle-userptr-to-userptr-BO-copy.patch284
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4163-drm-amdgpu-kfd2kgd-Support-BO-create-from-sg.patch141
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4164-drm-amdgpu-CMA-Validate-BOs-before-use.patch53
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4165-drm-amdkfd-CMA-Use-shadow-system-BO-for-userptr.patch311
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4166-Fix-SVM-missing-on-Raven.patch85
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4167-drm-amdkfd-Implement-SPI-debug-and-exception-support.patch587
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4168-drm-amd-powerplay-initialzie-the-dpm-intial-enabled-.patch102
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4169-drm-amd-powerplay-Get-more-than-8-level-gfxclk-state.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4170-amd-powerplay-implement-the-vega12_force_clock_level.patch80
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4171-drm-amd-display-Update-MST-edid-property-every-time.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4172-drm-amd-display-Check-dc_sink-every-time-in-MST-hotp.patch68
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4173-drm-amd-powerplay-header-file-interface-to-SMU-updat.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4174-drm-amd-powerplay-add-registry-key-to-disable-ACG.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4175-drm-amdgpu-fix-null-pointer-panic-with-direct-fw-loa.patch55
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4176-drm-amdgpu-use-ctx-bytes_moved.patch54
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4177-drm-amdgpu-fix-and-cleanup-cpu-visible-VRAM-handling.patch123
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4178-drm-amd-display-Fix-64-bit-division-in-hwss_edp_powe.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4179-drm-amd-display-Remove-PRE_VEGA-flag.patch51
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4180-drm-amd-display-remove-dummy-is_blanked-to-optimise-.patch75
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4181-drm-ttm-keep-a-reference-to-transfer-pipelined-BOs.patch115
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4182-drm-amdgpu-gfx9-cache-DB_DEBUG2-and-make-it-availabl.patch69
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4183-Revert-drm-amd-display-fix-dereferencing-possible-ER.patch37
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4184-Revert-drm-amd-display-disable-CRTCs-with-NULL-FB-on.patch77
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4185-drm-amdgpu-add-emit_reg_write_reg_wait-ring-callback.patch96
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4186-drm-amdgpu-gfx9-add-emit_reg_write_reg_wait-ring-cal.patch70
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4187-drm-amdgpu-sdma4-add-emit_reg_write_reg_wait-ring-ca.patch38
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4188-drm-amdgpu-uvd7-add-emit_reg_write_reg_wait-ring-cal.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4189-drm-amdgpu-vce4-add-emit_reg_write_reg_wait-ring-cal.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4190-drm-amdgpu-vcn1-add-emit_reg_write_reg_wait-ring-cal.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4191-drm-amdgpu-gmc9-use-amdgpu_ring_emit_reg_write_reg_w.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4192-drm-amdgpu-gmc-steal-the-appropriate-amount-of-vram-.patch280
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4193-drm-amdgpu-always-allocate-a-PASIDs-for-each-VM-v2.patch119
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4194-drm-amdgpu-Free-VGA-stolen-memory-as-soon-as-possibl.patch200
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4195-drm-gpu-sched-fix-force-APP-kill-hang-v4.patch398
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4196-drm-amdgpu-revert-add-new-bo-flag-that-indicates-BOs.patch65
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4197-drm-amdgpu-revert-Don-t-change-preferred-domian-when.patch139
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4198-drm-amdgpu-re-validate-per-VM-BOs-if-required-v2.patch49
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4199-drm-amdgpu-Code-Indentation-change-in-the-function.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4200-drm-amd-display-dal-3.1.42.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4201-drm-amd-display-fix-brightness-level-after-resume-fr.patch74
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4202-drm-amd-display-Move-dp_pixel_encoding_type-to-strea.patch87
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4203-drm-amd-display-Fix-regamma-not-affecting-full-inten.patch66
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4204-drm-amd-display-add-method-to-check-for-supported-ra.patch119
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4205-drm-amd-display-Fix-bug-where-refresh-rate-becomes-f.patch117
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4206-drm-amd-display-fix-segfault-on-insufficient-TG-duri.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4207-drm-amd-display-Fix-bug-that-causes-black-screen.patch70
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4208-drm-amd-display-change-dml-init-to-use-default-struc.patch172
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4209-drm-amd-display-Add-back-code-to-allow-for-rounding-.patch47
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4210-drm-amd-display-Check-lid-state-to-determine-fast-bo.patch82
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4211-drm-amd-display-Do-not-create-memory-allocation-if-s.patch54
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4212-drm-amd-display-Move-DCC-support-functions-into-dchu.patch606
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4213-drm-amd-display-fix-LFC-tearing-at-top-of-screen.patch41
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4214-drm-amd-display-HDMI-has-no-sound-after-Panel-power-.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4215-drm-amd-display-refactor-vupdate-interrupt-registrat.patch67
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4216-drm-amd-display-Check-SCRATCH-reg-to-determine-S3-re.patch74
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4217-drm-amd-display-add-rq-dlg-ttu-to-dtn-log.patch542
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4218-drm-amd-display-add-calculated-clock-logging-to-DTN.patch38
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4219-drm-amd-display-add-missing-colorspace-for-set-black.patch55
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4220-drm-amd-display-Use-dig-enable-to-determine-fast-boo.patch166
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4221-drm-amdgpu-ifdef-unused-var.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4222-drm-amdgpu-add-amdgpu_bo_param.patch207
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4223-drm-amdgpu-use-amdgpu_bo_param-for-amdgpu_bo_create-.patch487
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4224-drm-amdgpu-fix-amdgpu_bo_create-param-changed-for-tt.patch52
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4225-drm-amdkcl-fix-amdgpu_bo_param-changed-compile-error.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4226-drm-amdgpu-print-the-vbios-version-in-the-debugfs-fi.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4227-drm-scheduler-always-put-last_sched-fence-in-entity_.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4228-drm-scheduler-move-last_sched-fence-updating-prior-t.patch51
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4229-drm-amdgpu-limit-reg_write_reg_wait-workaround-to-SR.patch40
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4230-drm-amdgpu-set-preferred_domain-independent-of-fallb.patch76
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4231-drm-amdgpu-handle-domain-mask-checking-v2.patch107
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4232-drm-scheduler-fix-build-broken-by-move-last_sched-fe.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4233-drm-amdgpu-optionally-do-a-writeback-but-don-t-inval.patch176
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4234-drm-amdgpu-fix-list-not-initialized.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4235-drm-amdgpu-init-gfx9-aperture-settings.patch46
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4236-drm-amdgpu-simplify-bo_va-list-when-vm-bo-update-v2.patch65
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4237-drm-amdgpu-bo-could-be-null-when-access-in-vm-bo-upd.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4238-drm-amdgpu-print-DMA-buf-status-in-debugfs.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4239-drm-amdgpu-Rename-amdgpu_display_framebuffer_domains.patch105
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4240-drm-amdgpu-Remove-VRAM-from-shared-bo-domains.patch42
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4241-drm-amdgpu-pm-document-power_dpm_force_performance_l.patch83
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4242-drm-amdgpu-pm-document-power_dpm_state.patch60
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4243-drm-amdgpu-pm-document-pp_table.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4244-drm-amdgpu-pm-document-pp_dpm_sclk-pp_dpm_mclk-pp_dp.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4245-drm-amdgpu-pm-document-pp_power_profile_mode.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4246-drm-amdgpu-pm-document-pp_od_clk_voltage.patch51
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4247-drm-amd-pp-Change-voltage-clk-range-for-OD-feature-o.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4248-drm-amdgpu-Enable-scatter-gather-display-support.patch59
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4249-Revert-drm-amdgpu-defer-test-IBs-on-the-rings-at-boo.patch78
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4250-drm-amdkfd-Use-shared-IH-client-ID.patch65
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4251-drm-amdkfd-Implement-hw_exception-work-thread-to-han.patch134
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4252-drm-amdkfd-Remove-queue-node-when-destroy-queue-fail.patch54
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4253-drm-amdgpu-Always-call-kfd-post-reset-after-reset.patch38
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4254-drm-amdkfd-CMA-Remove-diff.-device-restriction.patch102
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4255-drm-amdkfd-CMA-Store-mem_type-in-KFD-BO.patch127
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4256-drm-amdkfd-CMA-Support-for-diff.-devices.patch40
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4257-drm-amdkfd-Remove-unused-variable.patch26
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4258-drm-amdgpu-uvd7-add-emit_reg_write_reg_wait-ring-cal.patch68
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4259-Hybrid-Version-18.30.0.15.patch27
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4260-Hybrid-Version-18.30.1.15.patch27
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4261-Revert-drm-amdgpu-set-COMPUTE_PGM_RSRC1-for-SGPR-VGP.patch54
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4262-drm-amdgpu-change-pp_dpm-clk-mclk-pcie-input-format.patch170
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4263-drm-amdgpu-fix-amdgpu_atpx_get_client_id-s-return-ty.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4264-drm-amdgpu-Set-graphics-noretry-to-1.patch38
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4265-drm-amdfd-Don-t-hard-code-wait-time.patch56
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4266-drm-amdkfd-CMA-Add-intermediate-wait-if-mGPU.patch77
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4267-drm-amdkfd-CMA-Support-multi-device-VRAM-copy.patch223
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4268-drm-amdkfd-Reduce-priority-of-context-saving-waves-b.patch107
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4269-drm-amdkfd-Introduce-kfd-kernel-module-parameter-hal.patch75
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4270-drm-amdkfd-Use-module-parameters-noretry-as-the-inte.patch93
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4271-drm-amdkfd-Separate-trap-handler-assembly-code-and-i.patch1220
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4272-drm-amdkfd-Mellanox-Support-PeerSync-interface.patch57
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4273-drm-amdkfd-Fix-CP-soft-hang-on-APUs.patch103
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4274-drm-amdgpu-Don-t-use-kiq-to-send-invalid_tlbs-packag.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4275-drm-amdgpu-Don-t-use-shadow-BO-for-compute-context.patch75
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4276-drm-amdkfd-Fix-typos-in-trap-handler-comments.patch85
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4277-drm-amdkfd-Align-Makefile-with-upstream.patch79
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4278-drm-amdkfd-Align-CIK-interrupt-processing-with-upstr.patch194
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4279-drm-amdkfd-Remove-IH-patching-workaround-for-Vega10.patch92
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4280-drm-amdkfd-Clean-up-mmap-handling.patch142
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4281-drm-amdkfd-fix-uninitialized-variable-use.patch42
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4282-drm-amdkfd-remove-unused-parameter-from-quiesce_mm-r.patch157
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4283-drm-amdkfd-Fix-kernel-queue-rollback-for-64-bit-wptr.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4284-drm-amdkfd-Match-release_mem-interface-with-other-PM.patch107
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4285-drm-amdkfd-Simplify-packet-manager-initialization.patch117
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4286-drm-amdkfd-Fix-error-handling-in-pm_init.patch61
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4287-drm-amdkfd-Fix-pm_debugfs_runlist.patch49
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4288-drm-amdkfd-Check-ctx_save_restore_area_address.patch63
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4289-drm-amdkfd-Fix-error-handling-around-kfd_process_cre.patch64
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4290-drm-amdkfd-Fix-error-handling-in-APU-CWSR-mapping.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4291-drm-amdkfd-Simplify-error-handling-in-kfd_create_pro.patch54
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4292-drm-amdkfd-Simplify-obj-handle-allocation.patch51
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4293-drm-amdkfd-Error-if-trying-to-acquire-VM-for-a-PDD-t.patch37
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4294-drm-amdkfd-Cosmetic-changes-to-match-upstream.patch1825
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4295-drm-amdkfd-Add-sanity-checks-in-IRQ-handlers.patch137
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4296-drm-amdgpu-Check-NULL-pointer-for-job-before-reset-j.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4297-drm-amd-amdgpu-vcn10-Add-callback-for-emit_reg_write.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4298-drm-amd-amdgpu-Add-some-documentation-to-the-debugfs.patch317
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4299-drm-amdgpu-abstract-bo_base-init-function.patch148
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4300-drm-amdgpu-Fix-KFD-doorbell-SG-BO-mapping.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4301-drm-amdkfd-Don-t-use-kmap_atomic.patch52
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4302-drm-amdkcl-fixed-can-t-find-kgd_kfd_interface.h-head.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4303-drm-amdgpu-set-COMPUTE_PGM_RSRC1-for-SGPR-VGPR-clear.patch61
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4304-drm-admgpu-fix-mode_valid-s-return-type.patch103
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4305-drm-amdgpu-add-VEGAM-ASIC-type.patch27
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4306-drm-amdgpu-bypass-GPU-info-firmware-load-for-VEGAM.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4307-drm-amdgpu-set-VEGAM-to-ASIC-family-and-ip-blocks.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4308-drm-amdgpu-specify-VEGAM-ucode-SMU-load-method.patch27
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4309-drm-amdgpu-add-VEGAM-SMU-firmware-support.patch42
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4310-drm-amdgpu-virtual_dce-add-VEGAM-support.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4311-drm-amdgpu-add-VEGAM-dc-support-check.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4312-drm-amdgpu-skip-VEGAM-MC-firmware-load.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4313-drm-amdgpu-add-VEGAM-GMC-golden-settings.patch27
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4314-drm-amdgpu-initialize-VEGAM-GMC-v2.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4315-drm-amdgpu-add-VEGAM-SDMA-firmware-support.patch50
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4316-drm-amdgpu-add-VEGAM-SDMA-golden-settings.patch27
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4317-drm-amdgpu-add-VEGAM-GFX-firmware-support.patch96
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4318-drm-amdgpu-add-VEGAM-GFX-golden-settings.patch72
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4319-drm-amdgpu-initialize-VEGAM-GFX.patch106
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4320-drm-amdgpu-add-VEGAM-UVD-firmware-support.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4321-drm-amdgpu-add-VEGAM-UVD-encode-support.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4322-drm-amdgpu-add-VEGAM-VCE-firmware-support.patch49
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4323-drm-amdgpu-add-VEGAM-to-VCE-harvest-config.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4324-drm-amdgpu-add-VEGAM-support-to-vi.patch82
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4325-drm-amdgpu-add-VEGAM-pci-ids.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4326-drm-amd-display-Implement-VEGAM-device-IDs-in-DC.patch240
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4327-drm-amd-display-Implement-VEGAM-device-IDs-in-DM.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4328-drm-amdgpu-Add-VEGAM-support-to-the-legacy-DCE-11-mo.patch82
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4329-drm-amd-display-Use-HBR2-if-eDP-monitor-it-doesn-t-a.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4330-drm-amd-powerplay-add-smu75-header-files.patch1676
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4331-drm-amd-add-a-new-struct-in-atombios.h.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4332-drm-amd-powerplay-update-ppatomctrl.c-v2.patch97
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4333-drm-amd-powerplay-update-process-pptables.patch91
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4334-drm-amd-powerplay-add-smumgr-support-for-VEGAM-v2.patch2532
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4335-drm-amd-powerplay-add-specific-changes-for-VEGAM-in-.patch167
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4336-drm-powerplay-Add-powertune-table-for-VEGAM.patch224
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4337-drm-scheduler-don-t-update-last-scheduled-fence-in-T.patch40
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4338-drm-amdgpu-For-sriov-reset-move-IB-test-into-exclusi.patch50
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4339-drm-amdgpu-sriov-Need-to-set-in_gpu_reset-flag-to-ba.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4340-drm-amd-display-Fix-deadlock-when-flushing-irq.patch40
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4341-drm-amd-display-Unify-dm-resume-sequence-into-a-sing.patch113
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4342-drm-amd-display-Disallow-enabling-CRTC-without-prima.patch77
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4343-drm-amd-display-fix-issue-related-to-infopacket-was-.patch161
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4344-drm-amd-display-Make-program_output_csc-HWSS-interfa.patch72
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4345-drm-amd-display-Refactor-otg_blank-sequence.patch172
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4346-drm-amd-display-DP-link-validation-bug-for-YUV422.patch78
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4347-drm-amd-display-dal-3.1.43.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4348-drm-amd-display-Add-user_regamma-to-color-module.patch469
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4349-drm-amd-display-add-cursor-TTU-CRQ-related.patch69
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4350-drm-amd-display-add-some-DTN-logs-for-input-and-outp.patch208
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4351-drm-amd-display-update-dtn-logging-and-goldens.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4352-drm-amd-display-Correct-rounding-calcs-in-mod_freesy.patch67
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4353-drm-amd-display-compact-the-rq-dlg-ttu-log.patch390
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4354-drm-amd-display-Add-assert-that-chroma-pitch-is-non-.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4355-drm-amd-display-Update-MST-edid-property-every-time.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4356-drm-amd-display-reprogram-infoframe-during-apply_ctx.patch64
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4357-drm-amd-display-Check-dc_sink-every-time-in-MST-hotp.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4358-drm-amd-display-to-synchronize-the-hubp-and-dpp-prog.patch72
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4359-drm-amd-display-dal-3.1.44.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4360-drm-amd-display-Use-int-for-calculating-vline-start.patch38
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4361-drm-amd-display-Couple-formatting-fixes.patch61
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4362-drm-amd-display-Add-VG12-ASIC-IDs.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4363-drm-amd-Add-BIOS-smu_info-v3_3-required-struct-def.patch225
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4364-drm-amd-display-Add-get_firmware_info_v3_2-for-VG12.patch128
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4365-drm-amd-display-Don-t-return-ddc-result-and-read_byt.patch141
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4366-drm-amd-display-Use-kvzalloc-for-potentially-large-a.patch250
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4367-drm-amd-display-disable-FBC-on-underlay-pipe.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4368-drm-amdgpu-Switch-to-interruptable-wait-to-recover-f.patch47
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4369-drm-amd-amdgpu-Add-some-documentation-to-the-debugfs.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4370-drm-amdgpu-invalidate-parent-bo-when-shadow-bo-was-i.patch42
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4371-drm-amd-powerplay-fix-spelling-mistake-contruct-cons.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4372-drm-amd-display-clean-up-assignment-of-amdgpu_crtc.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4373-drm-ttm-remove-priority-hard-code-when-initializing-.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4374-drm-amdgpu-set-ttm-bo-priority-before-initialization.patch46
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4375-drm-amdgpu-gmc9-remove-unused-register-defs.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4376-drm-amdgpu-fix-null-pointer-for-bo-unmap-trace-funct.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4377-drm-amd-display-remove-need-of-modeset-flag-for-over.patch63
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4378-drm-amdgpu-Add-support-to-change-mtype-for-2nd-part-.patch160
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4379-drm-amdgpu-drop-printing-the-BO-offset-in-the-gem-de.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4380-drm-amdgpu-print-the-BO-flags-in-the-gem-debugfs-ent.patch53
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4381-drm-amdgpu-gfx9-Update-golden-setting-for-gfx9_0.patch56
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4382-drm-amd-powerplay-new-framework-to-honour-DAL-clock-.patch87
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4383-drm-amd-powerplay-add-a-framework-for-perfroming-pre.patch77
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4384-drm-amdgpu-Drop-the-unused-header-files-in-soc15.c.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4385-drm-amdgpu-Fix-hardcoded-base-offset-of-vram-pages.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4386-drm-amd-Add-vega20_ip_offset.h-headerfile-for-vega20.patch1076
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4387-drm-amdgpu-Add-vega20-to-asic_type-enum.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4388-drm-amdgpu-Add-gpu_info-firmware-for-vega20.patch40
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4389-drm-amdgpu-set-asic-family-for-vega20.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4390-drm-amdgpu-Add-smu-firmware-support-for-vega20.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4391-drm-amdgpu-powerplay-Add-initial-vega20-support-v2.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4392-drm-amdgpu-psp-Add-initial-psp-support-for-vega20.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4393-drm-amdgpu-Add-vega20-ucode-loading-method.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4394-drm-amdgpu-Specify-vega20-uvd-firmware.patch46
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4395-drm-amdgpu-Specify-vega20-vce-firmware.patch46
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4396-drm-amdgpu-virtual_dce-Add-vega20-support.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4397-drm-amdgpu-gmc9-Add-vega20-support.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4398-drm-amdgpu-mmhub-Add-clockgating-support-for-vega20.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4399-drm-amdgpu-sdma4-Specify-vega20-firmware.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4400-drm-amdgpu-sdma4-Add-vega20-golden-settings-v3.patch64
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4401-drm-amdgpu-sdma4-Add-clockgating-support-for-vega20.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4402-drm-amdgpu-gfx9-Add-support-for-vega20-firmware.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4403-drm-amdgpu-gfx9-Add-vega20-golden-settings-v3.patch59
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4404-drm-amdgpu-gfx9-Add-gfx-config-for-vega20.-v3.patch41
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4405-drm-amdgpu-gfx9-Add-support-for-vega20.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4406-drm-amdgpu-gfx9-Add-clockgatting-support-for-vega20.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4407-drm-amdgpu-soc15-Add-vega20-soc15_common_early_init-.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4408-drm-amdgpu-soc15-Set-common-clockgating-for-vega20.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4409-drm-amdgpu-soc15-dynamic-initialize-ip-offset-for-ve.patch122
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4410-drm-amdgpu-soc15-Add-ip-blocks-for-vega20-v2.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4411-drm-amdgpu-Add-nbio-support-for-vega20-v2.patch86
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4412-drm-amdgpu-Add-vega20-soc-init-sequence-on-emulator-.patch10123
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4413-drm-amd-display-dm-Add-vega20-support.patch53
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4414-drm-amdgpu-Add-vega20-to-dc-support-check.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4415-drm-amd-Add-dce-12.1-gpio-aux-registers.patch211
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4416-drm-amd-display-Add-Vega20-config.-support.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4417-drm-amd-display-Remove-COMBO_DISPLAY_PLL0-from-Vega2.patch68
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4418-drm-amd-display-Add-BIOS-smu_info-v3_3-support-for-V.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4419-drm-amd-display-Add-harvest-IP-support-for-Vega20.patch247
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4420-drm-amdgpu-atomfirmware-add-new-gfx_info-data-table-.patch64
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4421-drm-amdgpu-atomfirmware-add-parser-for-gfx_info-tabl.patch83
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4422-drm-amdgpu-vg20-fallback-to-vbios-table-if-gpu-info-.patch92
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4423-drm-amdgpu-drop-gpu_info-firmware-for-vega20.patch76
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4424-drm-amdgpu-Set-vega20-load_type-to-AMDGPU_FW_LOAD_DI.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4425-drm-amd-powerplay-update-vega20-cg-flags.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4426-drm-include-Fix-MP1_BASE-address-for-vega20.patch37
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4427-drm-amd-include-vg20-adjust-VCE_BASE-to-reuse-vce-4..patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4428-drm-amdgpu-Disable-ip-modules-that-are-not-ready-yet.patch50
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4429-drm-amdgpu-vg20-Restruct-uvd-to-support-multiple-uvd.patch1358
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4430-drm-amdgpu-vg20-Restruct-uvd.inst-to-support-multipl.patch1874
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4431-drm-amdgpu-vg20-Restruct-uvd.idle_work-to-support-mu.patch124
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4432-drm-amdgpu-vg20-increase-3-rings-for-AMDGPU_MAX_RING.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4433-drm-amdgpu-vg20-Enable-the-2nd-instance-for-uvd.patch104
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4434-drm-amdgpu-vg20-Add-IH-client-ID-for-the-2nd-UVD.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4435-drm-amdgpu-vg20-Enable-the-2nd-instance-IRQ-for-uvd-.patch61
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4436-drm-amdgpu-vg20-Enable-2nd-instance-queue-maping-for.patch69
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4437-drm-amdgpu-vg20-Enable-UVD-VCE-for-Vega20.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4438-drm-amdgpu-add-df-3.6-headers.patch145
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4439-drm-amdgpu-df-implement-df-v3_6-callback-functions-v.patch207
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4440-drm-amdgpu-Switch-to-use-df_v3_6_funcs-for-vega20-v2.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4441-drm-amdgpu-Add-vega20-pci-ids.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4442-drm-amdgpu-flag-Vega20-as-experimental.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4443-drm-amdgpu-gem-remove-unused-variable.patch27
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4444-drm-amdgpu-Skip-drm_sched_entity-related-ops-for-KIQ.patch76
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4445-drm-scheduler-remove-unused-parameter.patch183
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4446-drm-amdgpu-remove-unused-member.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4447-drm-scheduler-Remove-obsolete-spinlock.patch76
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4448-drm-amd-amdgpu-Code-comments-for-the-amdgpu_ttm.c-dr.patch934
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4449-drm-amdgpu-display-remove-VEGAM-config-option.patch277
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4450-drm-amdgpu-display-remove-VEGA20-config-option.patch286
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4451-drm-amdgpu-display-fix-vega12-20-handling-in-dal_asi.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4452-drm-amd-pp-missing-curly-braces-in-smu7_enable_sclk_.patch40
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4453-drm-scheduler-fix-function-name-prefix-in-comments.patch51
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4454-drm-amd-display-Cleanup-unused-SetPlaneConfig.patch148
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4455-drm-amd-display-get-rid-of-32.32-unsigned-fixed-poin.patch3536
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4456-drm-amd-display-inline-more-of-fixed-point-code.patch721
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4457-drm-amd-display-Make-DisplayStats-work-with-just-DC-.patch140
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4458-drm-amd-display-add-fixed-point-fractional-bit-trunc.patch42
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4459-drm-amd-display-truncate-scaling-ratios-and-inits-to.patch62
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4460-drm-amd-display-underflow-blankscreen-recovery.patch307
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4461-drm-amd-display-Update-HW-sequencer-initialization.patch125
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4462-drm-amd-display-fix-31_32_fixpt-shift-functions.patch76
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4463-drm-amd-display-fix-a-32-bit-shift-meant-to-be-64-wa.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4464-drm-amd-display-Add-dc-cap-to-restrict-VSR-downscali.patch73
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4465-drm-amd-display-disable-mpo-if-brightness-adjusted.patch40
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4466-drm-amd-display-Log-DTN-only-after-the-atomic-commit.patch170
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4467-drm-amd-display-update-dml-to-allow-sync-with-DV.patch1064
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4468-drm-amd-display-Fix-up-dm-logging-functionality.patch135
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4469-drm-amd-display-use-macro-for-logs.patch81
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4470-drm-amd-display-don-t-create-new-dc_sink-if-nothing-.patch198
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4471-drm-amd-display-Only-limit-VSR-downscaling-when-actu.patch49
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4472-drm-amd-display-constify-a-few-dc_surface_update-fie.patch46
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4473-drm-amd-display-Add-fullscreen-transitions-to-log.patch250
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4474-drm-amd-display-fix-bug-with-index-check.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4475-drm-amd-display-Clear-underflow-status-for-debug-pur.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4476-drm-amd-display-DCN1-link-encoder.patch1852
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4477-drm-amd-display-fix-memory-leaks.patch87
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4478-drm-amd-display-Clear-connector-s-edid-pointer.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4479-drm-amd-pp-Fix-build-warning-in-vegam.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4480-drm-amdgpu-fix-insert-nop-for-VCN-decode-ring.patch63
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4481-drm-amdgpu-fix-insert-nop-for-UVD7-ring.patch51
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4482-drm-amdgpu-fix-insert-nop-for-UVD6-ring.patch60
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4483-drm-amdgpu-fix-insert-nop-for-UVD5-ring.patch60
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4484-drm-amdgpu-fix-insert-nop-for-UVD4.2-ring.patch60
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4485-Remove-calls-to-suspend-resume-atomic-helpers-from-a.patch55
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4486-Revert-drm-amdgpu-vg20-Restruct-uvd.idle_work-to-sup.patch127
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4487-drm-amdgpu-count-fences-from-all-uvd-instances-in-id.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4488-drm-amdgpu-Take-uvd-encode-rings-into-account-in-idl.patch42
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4489-drm-amdgpu-Take-vcn-encode-rings-into-account-in-idl.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4490-drm-amdkfd-Fix-kernel-queue-64-bit-doorbell-offset-c.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4491-drm-amdgpu-Avoid-invalidate-tlbs-when-gpu-is-on-rese.patch61
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4492-drm-amdkfd-Fix-race-between-scheduler-and-context-re.patch665
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4493-drm-amdkfd-Change-the-control-stack-mtype-from-UC-to.patch106
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4494-drm-amdgpu-Avoid-destroy-hqd-when-GPU-is-on-reset.patch57
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4495-drm-amd-pp-fix-a-couple-locking-issues.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4496-drm-amdgpu-skip-CG-for-VCN-when-late_init-fini.patch37
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4497-drm-amd-pp-Add-smu-support-for-VCN-powergating-on-RV.patch53
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4498-drm-amdgpu-Add-CG-PG-flags-for-VCN.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4499-drm-amdgpu-Add-SOC15_WAIT_ON_RREG-macro-define.patch41
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4500-drm-amdgpu-Add-static-CG-control-for-VCN-on-RV.patch136
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4501-drm-amdgpu-Enable-VCN-CG-by-default-on-RV.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4502-drm-amdgpu-Add-VCN-static-PG-support-on-RV.patch156
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4503-drm-amdgpu-Enable-VCN-static-PG-by-default-on-RV.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4504-drm-amdgpu-Add-runtime-VCN-PG-support.patch132
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4505-drm-amdgpu-rework-VM-state-machine-lock-handling-v2.patch299
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4506-drm-amdgpu-cleanup-amdgpu_vm_validate_pt_bos-v2.patch72
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4507-drm-amdgpu-further-optimize-amdgpu_vm_handle_moved.patch80
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4508-drm-amdgpu-kmap-PDs-PTs-in-amdgpu_vm_update_director.patch55
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4509-drm-amdgpu-consistenly-use-VM-moved-flag.patch91
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4510-drm-amdgpu-move-VM-BOs-on-LRU-again.patch96
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4511-drm-amdgpu-add-rcu_barrier-after-entity-fini.patch60
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4512-drm-amdgpu-Remove-unused-variable-in-amdgpu_device_g.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4513-drm-amdkfd-sriov-Put-the-pre-and-post-reset-in-exclu.patch84
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4514-drm-amdgpu-pp-remove-duplicate-assignment.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4515-drm-amdgpu-Update-GFX-info-structure-to-match-what-v.patch59
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4516-drm-amd-display-Remove-use-of-division-operator-for-.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4517-drm-amd-display-Implement-dm_pp_get_clock_levels_by_.patch82
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4518-drm-amdgpu-vcn_v1_0_is_idle-can-be-static.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4519-drm-amdkfd-Fix-a-copy-error-when-exit-compute-profil.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4520-drm-amdkfd-Add-debugfs-interface-to-trigger-HWS-hang.patch201
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4521-drm-amdkcl-4.17-fix-prime-bo-for-raven-A-A-issue.patch46
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4522-drm-amdgpu-defer-test-IBs-on-the-rings-at-boot-V3.patch92
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4523-drm-amd-display-Release-fake-sink.patch107
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4524-drm-amd-display-pass-pipe_ctx-straight-to-blank_pixe.patch70
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4525-drm-amd-display-add-register-offset-0-check.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4526-drm-amd-display-Do-not-program-interrupt-status-on-d.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4527-drm-amd-display-Clean-up-submit_channel_request.patch64
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4528-drm-amd-display-upgrade-scaler-math.patch525
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4529-drm-amd-display-dal-3.1.45.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4530-drm-amd-display-Prefix-event-prints-with-Event.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4531-drm-amd-display-Read-DPCD-link-caps-up-to-and-includ.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4532-drm-amd-display-AUX-will-exit-when-HPD-LOW-detected.patch177
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4533-drm-amd-display-Add-function-to-get-optc-active-size.patch95
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4534-drm-amd-display-replace-msleep-with-udelay-in-fbc-pa.patch55
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4535-drm-amd-display-add-DPCD-read-for-Sink-ieee-OUI.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4536-drm-amd-display-add-config-for-sending-VSIF.patch71
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4537-drm-amd-display-Fix-indentation-in-dcn10-resource-co.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4538-drm-amd-display-Read-DP_SINK_COUNT_ESI-range-on-HPD-.patch90
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4539-drm-amd-display-Default-log-masks-should-include-all.patch192
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4540-drm-amd-display-Optimize-DP_SINK_STATUS_ESI-range-re.patch61
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4541-drm-amd-display-Dynamic-HDR-metadata-mem-buffer.patch46
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4542-drm-amd-display-Refactor-audio-programming.patch440
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4543-drm-amd-display-HLG-support.patch200
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4544-drm-amd-display-DP-component-depth-16-bpc.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4545-drm-amd-display-Added-documentation-for-some-DC-inte.patch94
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4546-drm-amd-display-dal-3.1.46.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4547-drm-amd-display-Set-TMZ-and-DCC-for-secondary-surfac.patch82
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4548-drm-amd-display-Destroy-connector-state-on-reset.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4549-drm-amd-display-Prefix-TIMING_STANDARD-entries-with-.patch154
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4550-drm-amd-display-DP-YCbCr-4-2-0-support.patch52
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4551-drm-amd-display-decouple-front-and-backend-pgm-using.patch351
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4552-drm-amd-display-add-dentist-frequency-to-resource-po.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4553-drm-amd-display-fix-dscl_manual_ratio_init.patch108
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4554-drm-amd-display-check-if-audio-clk-enable-is-applica.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4555-drm-amd-display-Do-not-limit-color-depth-to-8bpc.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4556-drm-amd-display-dal-3.1.47.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4557-drm-amd-display-Fix-wrong-latency-assignment-for-VEG.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4558-drm-amdgpu-display-check-if-ppfuncs-exists-before-us.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4559-drm-amdgpu-display-drop-DRM_AMD_DC_FBC-kconfig-optio.patch292
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4560-drm-amdgpu-display-enable-CONFIG_DRM_AMD_DC_DCN1_0-b.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4561-drm-amd-display-avoid-sleeping-in-atomic-context-whi.patch58
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4562-drm-amdkcl-4.7-fix-__drm_atomic_helper_connector_des.patch434
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4563-drm-scheduler-fix-a-corner-case-in-dependency-optimi.patch42
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4564-drm-amdgpu-remove-unnecessary-scheduler-entity-for-V.patch150
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4565-drm-amd-pp-Add-cases-for-getting-phys-and-disp-clks-.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4566-drm-amdgpu-Use-GTT-for-dumb-buffer-if-sg-display-ena.patch60
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4567-drm-amdgpu-Add-helper-function-to-get-buffer-domain.patch97
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4568-drm-amdgpu-To-get-gds-gws-and-oa-from-adev-gds.patch86
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4569-drm-amdgpu-correct-SMU11-SYSPLL0-clock-id-values.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4570-drm-amd-powerplay-bug-fixs-for-getsmuclockinfo.patch41
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4571-drm-amdgpu-typo-fix-for-vega20-cg-flags.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4572-drm-amdgpu-fix-ISO-C90-forbids-mixed-declarations.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4573-drm-amdgpu-gds-bo-must-not-be-per-vm-bo.patch40
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4574-drm-amd-pp-Connect-display_clock_voltage_request-to-.patch115
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4575-drm-amd-pp-Allow-underclocking-when-od-table-is-empt.patch121
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4576-drm-gfx9-Update-gc-goldensetting-for-vega20.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4577-drm-amdgpu-Fix-NULL-pointer-when-load-kfd-driver-wit.patch62
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4578-drm-amdgpu-add-kernel-doc-for-amdgpu_object.c.patch480
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4579-drm-amdgpu-add-checking-for-sos-version.patch77
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4580-drm-amdgpu-fix-the-missed-vcn-fw-version-report.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4581-drm-amdgpu-df-fix-potential-array-out-of-bounds-read.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4582-Revert-drm-amdgpu-Add-an-ATPX-quirk-for-hybrid-lapto.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4583-Revert-drm-amdgpu-add-new-device-to-use-atpx-quirk.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4584-Partially-revert-drm-amdgpu-add-atpx-quirk-handling-.patch37
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4585-drm-amdgpu-pp-switch-the-default-dpm-implementation-.patch49
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4586-drm-amdgpu-Add-documentation-for-PRIME-related-code.patch231
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4587-drm-amdgpu-replace-mutex-with-spin_lock-V2.patch107
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4588-drm-amdgpu-pp-replace-mutex-with-spin_lock-V2.patch573
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4589-drm-amdgpu-avoid-sleep-while-executing-atombios-tabl.patch71
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4590-drm-amdgpu-pp-Revert-replace-mutex-with-spin_lock-V2.patch545
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4591-drm-amdgpu-Fix-ups-for-amdgpu_object.c-documentation.patch42
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4592-drm-scheduler-Avoid-using-wait_event_killable-for-dy.patch178
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4593-drm-amdgpu-move-amdgpu_ctx_mgr_entity_fini-to-f_ops-.patch145
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4594-drm-amdgpu-fix-clear_all-and-replace-handling-in-the.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4595-Revert-drm-amdgpu-fix-clear_all-and-replace-handling.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4596-drm-amdgpu-fix-clear_all-and-replace-handling-in-the.patch49
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4597-drm-amd-powerplay-fix-missed-hwmgr-check-warning-bef.patch55
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4598-drm-amdgpu-define-vcn-jpeg-ring.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4599-drm-amdgpu-add-vcn-jpeg-ring.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4600-drm-amdgpu-add-jpeg-packet-defines-to-soc15d.h.patch53
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4601-drm-amdgpu-add-more-jpeg-register-offset-headers.patch71
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4602-drm-amdgpu-implement-jpeg-ring-functions.patch318
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4603-drm-amdgpu-set-jpeg-ring-functions.patch91
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4604-drm-amdgpu-add-vcn-jpeg-irq-support.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4605-drm-amdgpu-initialize-vcn-jpeg-ring.patch67
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4606-drm-amdgpu-implement-patch-for-fixing-a-known-bug.patch134
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4607-drm-amdgpu-define-and-add-extra-dword-for-jpeg-ring.patch62
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4608-drm-amdgpu-add-patch-to-jpeg-ring.patch40
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4609-drm-amdgpu-add-vcn-jpeg-sw-finish.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4610-drm-amdgpu-add-vcn-jpeg-ring-test.patch93
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4611-drm-amdgpu-add-vcn-jpeg-ib-test.patch139
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4612-drm-amdgpu-enable-vcn-jpeg-ib-test.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4613-drm-amdgpu-add-AMDGPU_HW_IP_VCN_JPEG-to-info-query.patch60
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4614-drm-amdgpu-add-AMDGPU_HW_IP_VCN_JPEG-to-queue-mgr.patch51
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4615-drm-amdgpu-Grab-put-runtime-PM-references-in-atomic_.patch95
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4616-drm-amd-powerplay-fix-wrong-clock-adjust-sequence.patch49
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4617-drm-amdgpu-rename-rmn-to-amn-in-the-MMU-notifier-cod.patch340
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4618-drm-amdgpu-fix-documentation-of-amdgpu_mn.c-v2.patch203
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4619-drm-amdgpu-Correct-the-ndw-of-bo-update-mapping.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4620-drm-amdgpu-change-gfx8-ib-test-to-use-WB.patch107
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4621-drm-amd-Update-KFD-Thunk-ioctl-ABI-to-match-upstream.patch578
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4622-drm-amdgpu-Doorbell-assignment-for-8-sdma-user-queue.patch129
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4623-drm-amdkfd-Make-the-number-of-SDMA-queues-variable.patch180
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4624-drm-amdgpu-Fix-NULL-pointer-when-PP-block-is-disable.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4625-drm-amd-Interface-change-to-support-64-bit-page_tabl.patch263
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4626-drm-amdgpu-Add-vega20-support-on-kfd-probe.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4627-drm-amdkfd-Vega20-bring-up-on-amdkfd-side.patch148
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4628-drm-amdkfd-reflect-atomic-support-in-IO-link-propert.patch118
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4629-drm-amdgpu-Changed-CU-reservation-golden-settings.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4630-drm-amdkfd-Add-check-user-queue-busy-interface.patch246
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4631-drm-amdkfd-Replace-mqd-with-mqd_mgr-as-the-variable-.patch531
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4632-Hybrid-Version-18.30.2.15.patch27
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4633-Revert-drm-amdgpu-replace-mutex-with-spin_lock-V2.patch75
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4634-Revert-drm-amd-display-avoid-sleeping-in-atomic-cont.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4635-drm-amdgpu-Added-ISR-for-CP-ECC-EDC-interrupt-v2.patch165
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4636-drm-amdgpu-Add-interrupt-SQ-source-struct-to-amdgpu_.patch37
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4637-drm-amdgpu-Add-plumbing-for-handling-SQ-EDC-ECC-inte.patch190
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4638-drm-amdgpu-remove-unused-parameter-for-va-update.patch49
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4639-drm-amd-pp-initialize-result-to-before-or-ing-in-dat.patch40
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4640-drm-amd-display-Fix-stale-buffer-object-bo-use.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4641-drm-amd-pp-Fix-OD-feature-enable-failed-on-Vega10-wo.patch53
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4642-drm-amdgpu-Update-function-level-documentation-for-G.patch633
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4643-drm-amd-include-Update-df-3.6-mask-and-shift-definit.patch40
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4644-drm-amdgpu-fix-parsing-indirect-register-list-v2.patch71
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4645-drm-amd-powerplay-remove-uncessary-extra-gfxoff-cont.patch47
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4646-drm-amd-powerplay-Set-higher-SCLK-MCLK-frequency-tha.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4647-drm-amdgpu-Add-BRACKET_LAYOUT_ENUMs-to-ObjectID.h.patch42
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4648-drm-amdgpu-update-documentation-for-amdgpu_irq.c-v3.patch465
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4649-drm-amdgpu-fix-typo-in-amdgpu_mn.c-comments.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4650-drm-amdgpu-Consolidate-visible-vs.-real-vram-check-v.patch188
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4651-drm-doc-Add-amdgpu-hwmon-power-documentation-v2.patch136
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4652-drm-amdgpu-vg20-support-new-UVD-FW-version-naming-co.patch60
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4653-drm-amd-pp-Add-S3-support-for-OD-feature.patch296
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4654-drm-amdkfd-Fix-the-case-when-a-process-is-NULL.patch46
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4655-drm-amdgpu-band-aid-validating-VM-PTs.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4656-drm-amd-pp-Fix-wrong-clock-unit-exported-to-Display.patch141
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4657-drm-amd-display-use-the-get_crtc-instead-of-get-exis.patch52
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4658-drm-amdgpu-add-new-DF-1.7-register-defs.patch42
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4659-drm-amdgpu-add-new-DF-callback-for-ECC-setup.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4660-drm-amdgpu-add-a-df-1.7-implementation-of-enable_ecc.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4661-drm-amdgpu-gmc9-disable-partial-wr-rmw-if-ECC-is-not.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4662-Revert-drm-amd-display-Implement-dm_pp_get_clock_lev.patch88
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4663-Revert-drm-amdgpu-band-aid-validating-VM-PTs.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4664-Revert-drm-amdgpu-move-VM-BOs-on-LRU-again.patch94
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4665-drm-amdgpu-Make-sure-IB-tests-flushed-after-IP-resum.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4666-drm-amdgpu-gfx9-Update-golden-settings-for-vg10.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4667-drm-amd-display-Fix-Vega10-black-screen-after-mode-c.patch77
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4668-drm-amd-pp-Read-vbios-vddc-limit-before-use-them.patch46
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4669-drm-amd-pp-Update-clk-with-od-setting-when-set-power.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4670-drm-amdgpu-Make-struct-amdgpu_atif-private-to-amdgpu.patch194
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4671-drm-amdgpu-s-disp_detetion_ports-disp_detection_port.patch40
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4672-drm-amdgpu-Add-amdgpu_atpx_get_dhandle.patch54
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4673-drm-amdgpu-Dynamically-probe-for-ATIF-handle-v2.patch234
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4674-drm-amdgpu-Grab-put-runtime-PM-references-in-atomic_.patch70
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4675-drm-amdgpu-Count-disabled-CRTCs-in-commit-tail-earli.patch196
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4676-drm-amdgpu-delete-duplicated-code-about-runtime-PM-r.patch37
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4677-drm-amd-display-Fix-warning-observed-in-mode-change-.patch38
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4678-drm-amd-display-Fix-Edid-emulation-for-linux.patch274
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4679-drm-amd-display-fix-invalid-function-table-override.patch73
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4680-drm-amd-display-make-function-tables-const.patch80
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4681-drm-amd-amdgpu-Removing-unwanted-code-from-the-below.patch191
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4682-drm-amdgpu-Fix-vce-work-queue-was-not-cancelled-when.patch69
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4683-drm-amd-display-skip-multisync-for-slave-displays-ha.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4684-drm-amd-display-multisync-should-be-enabled-only-for.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4685-drm-amd-display-skip-multisync-redo-for-already-enab.patch105
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4686-drm-amd-display-initialize-new_stream-status.primary.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4687-x86-MCE-AMD-mce-code-changes-to-fix-the-crash.patch137
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4688-drm-amdgpu-No-action-when-VCN-PG-state-is-unchanged.patch61
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4689-tpm-tpm_crb-Use-start-method-value-from-ACPI-table-d.patch175
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4690-drm-amd-display-Fix-BUG_ON-during-CRTC-atomic-check-.patch59
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4691-drm-amd-display-Make-atomic-check-validate-underscan.patch90
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4692-drm-amd-display-Update-color-props-when-modeset-is-r.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4693-drm-amd-powerplay-add-control-gfxoff-enabling-in-lat.patch50
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4694-drm-amd-powerplay-fix-missed-hwmgr-check-warning-bef.patch42
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4695-drm-amd-powerplay-Set-higher-SCLK-MCLK-frequency-tha.patch47
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4696-drm-amd-pp-Fix-uninitialized-variable.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4697-drm-amdgpu-Use-kvmalloc_array-for-allocating-VRAM-ma.patch57
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4698-drm-amdgpu-Don-t-default-to-DC-support-for-Kaveri-an.patch60
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4699-drm-amdgpu-All-UVD-instances-share-one-idle_work-han.patch109
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4700-drm-amdgpu-Update-pin_size-values-before-unpinning-B.patch59
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4701-drm-amdgpu-Refactor-amdgpu_vram_mgr_bo_invisible_siz.patch88
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4702-drm-amdgpu-Make-amdgpu_vram_mgr_bo_invisible_size-al.patch60
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4703-drm-amdgpu-GPU-vs-CPU-page-size-fixes-in-amdgpu_vm_b.patch63
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4704-drm-amdgpu-fix-UBSAN-Undefined-behaviour-for-amdgpu_.patch52
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4705-drm-amdgpu-Support-new-VCN-FW-version-naming-convent.patch81
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4706-drm-amd-display-release-spinlock-before-committing-u.patch60
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4707-drm-amd-powerplay-correct-vega12-thermal-support-as-.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4708-drm-amd-powerplay-correct-vega12-bootup-values-setti.patch183
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4709-drm-amd-powerplay-smc_dpm_info-structure-change.patch91
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4710-drm-amdgpu-fix-swapped-emit_ib_size-in-vce3.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4711-drm-amdgpu-pm-fix-display-count-in-non-DC-path.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4712-drm-amdgpu-fix-user-fence-write-race-condition.patch58
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4713-drm-amd-display-adding-ycbcr420-pixel-encoding-for-h.patch52
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4714-drm-amd-display-add-a-check-for-display-depth-validi.patch90
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4715-Revert-drm-amd-display-Don-t-return-ddc-result-and-r.patch151
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4716-drm-amdgpu-Reserve-VM-root-shared-fence-slot-for-com.patch46
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4717-drm-amdgpu-Verify-root-PD-is-mapped-into-kernel-addr.patch60
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4718-amd-dc-dce100-On-dce100-set-clocks-to-0-on-suspend.patch55
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4719-drm-amdgpu-pp-smu7-use-a-local-variable-for-toc-inde.patch92
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4720-drm-amd-display-Fix-DP-HBR2-Eye-Diagram-Pattern-on-C.patch85
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4721-drm-amdgpu-allocate-shared-fence-slot-in-VA-IOCTL.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4722-drm-amd-pp-Make-sure-clock_voltage_limit_table-on-dc.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4723-drm-amdgpu-Fix-uvd-firmware-version-information-for-.patch96
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4724-drm-amd-display-fix-type-of-variable.patch38
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4725-drm-amdgpu-Fix-ups-for-amdgpu_object.c-documentation.patch261
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4726-drm-amd-pp-Remove-SAMU-support-in-powerplay.patch755
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4727-drm-amdgpu-Use-real-power-source-in-powerplay-instan.patch318
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4728-drm-amd-pp-Implement-update_smc_table-for-CI.patch118
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4729-drm-amdgpu-Get-real-power-source-to-initizlize-ac_po.patch40
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4730-drm-amdgpu-Update-function-level-documentation-for-G.patch101
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4731-drm-amd-display-Drop-to-fail-safe-mode-if-edid-is-ba.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4732-drm-amd-display-Write-TEST_EDID_CHECKSUM_WRITE-for-E.patch86
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4733-drm-amd-display-Stream-encoder-update.patch68
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4734-drm-amd-display-Move-i2c-and-aux-structs-into-dc_ddc.patch180
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4735-drm-amd-display-Add-use_dynamic_meta-flag-to-stream_.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4736-drm-amd-display-Drop-duplicate-dc_stream_set_static_.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4737-drm-amd-display-Make-it-more-clear-when-info-frames-.patch49
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4738-drm-amd-display-Convert-quotes-to-Ascii-quotes.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4739-drm-amd-display-Disable-stats-by-default.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4740-drm-amd-display-Add-new-transfer-type-HWPWL.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4741-drm-amd-display-create-sink_id-in-dc_sink-structure-.patch78
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4742-drm-amd-display-Allow-DP-register-double-buffer.patch138
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4743-drm-amd-display-Add-num_opp-to-resource_caps.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4744-drm-amd-display-Do-not-skip-FBC-init-in-failsafe-mod.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4745-amdgpu-display-use-modern-ktime-accessors.patch55
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4746-drm-amdgpu-update-ib_start-size_alignment-same-as-wi.patch105
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4747-drm-amdgpu-correct-GART-location-info.patch70
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4748-drm-amdgpu-Use-correct-enum-to-set-powergating-state.patch38
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4749-drm-amd-amdgpu-Add-a-GPU_LOAD-entry-to-sysfs-v3.patch94
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4750-drm-amdgpu-Polish-SQ-IH.patch85
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4751-drm-amdgpu-Add-parsing-SQ_EDC_INFO-to-SQ-IH-v3.patch199
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4752-drm-amd-display-replace-clocks_value-struct-with-dc_.patch378
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4753-drm-amd-display-redesign-dce-dcn-clock-voltage-updat.patch918
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4754-drm-amd-display-rename-display-clock-block-to-dccg.patch755
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4755-drm-amd-display-move-clock-programming-from-set_band.patch284
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4756-drm-amd-display-Adding-dm-pp-clocks-getting-by-volta.patch80
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4757-drm-amd-display-Apply-clock-for-voltage-request.patch61
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4758-drm-amd-display-Adding-Get-static-clocks-for-dm_pp-i.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4759-drm-amd-display-dal-3.1.48.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4760-drm-amd-display-Introduce-pp-smu-raven-functions.patch170
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4761-drm-amd-display-remove-invalid-assert-when-no-max_pi.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4762-drm-amd-display-Use-tg-count-for-opp-init.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4763-drm-amd-display-Use-local-structs-instead-of-struct-.patch87
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4764-drm-amd-display-Add-clock-types-to-applying-clk-for-.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4765-drm-amd-display-get-rid-of-cur_clks-from-dcn_bw_outp.patch348
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4766-drm-amd-display-move-dcn1-dispclk-programming-to-dcc.patch315
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4767-drm-amd-display-clean-up-dccg-divider-calc-and-dcn-c.patch334
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4768-drm-amd-display-rename-dce_disp_clk-to-dccg.patch432
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4769-drm-amd-display-clean-up-set_bandwidth-usage.patch100
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4770-drm-amd-display-remove-unnecessary-pplib-volage-requ.patch52
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4771-drm-amd-display-Temporarily-remove-Chroma-logs.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4772-drm-amd-display-Define-dp_alt_mode.patch112
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4773-drm-amd-display-fix-dccg-dcn1-ifdef.patch112
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4774-drm-amd-display-fix-pplib-voltage-request.patch124
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4775-drm-amd-display-add-CHG_DONE-mash-sh-defines-for-den.patch41
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4776-drm-amd-display-change-dentist-DID-enum-values-to-up.patch94
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4777-drm-amd-display-add-safe_to_lower-support-to-dcn-wm-.patch463
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4778-drm-amd-display-support-ACrYCb2101010.patch42
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4779-drm-amd-display-fix-use-of-uninitialized-memory.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4780-drm-amd-display-dal-3.1.49.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4781-drm-amd-display-Add-front-end-for-dp-debugfs-files.patch284
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4782-drm-amd-display-dal-3.1.50.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4783-drm-amd-display-clean-rq-dlg-ttu-reg-structs-before-.patch77
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4784-drm-amd-display-dal-3.1.51.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4785-drm-amd-display-fix-potential-infinite-loop-in-fbc-p.patch42
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4786-drm-amd-display-Enable-PPLib-calls-from-DC-on-linux.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4787-drm-amd-display-Add-dmpp-clks-types-for-conversion.patch82
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4788-drm-amd-display-Convert-10kHz-clks-from-PPLib-into-k.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4789-drm-amd-display-move-dml-defaults-to-respective-dcn-.patch165
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4790-drm-amd-display-Moving-powerplay-functions-to-a-sepa.patch1035
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4791-drm-amd-display-fix-dcn1-watermark-range-reporting.patch165
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4792-drm-amd-display-remove-dcn1-watermark-sets-b-c-and-d.patch66
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4793-drm-amd-display-separate-out-wm-change-request-dcn-w.patch108
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4794-drm-amd-display-move-dcn-watermark-programming-to-se.patch170
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4795-drm-amd-display-remove-soc_bounding_box.c.patch193
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4796-drm-amd-display-Check-scaling-ration-not-viewports-p.patch40
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4797-drm-amd-display-dal-3.1.52.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4798-drm-amd-display-add-valid-regoffset-and-NULL-pointer.patch191
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4799-drm-amd-display-get-board-layout-for-edid-emulation.patch583
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4800-drm-amd-display-Allow-option-to-use-worst-case-water.patch74
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4801-drm-amdgpu-Rename-entity-cleanup-finctions.patch83
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4802-drm-amd-display-don-t-initialize-result.patch40
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4803-drm-amdgpu-remove-duplicated-codes.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4804-drm-amd-display-Drop-unnecessary-header-file.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4805-drm-amd-display-Fix-dm-pp-clks-type-convert-error.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4806-drm-amdgpu-Rename-set_mmhub_powergating_by_smu-to-po.patch136
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4807-drm-amd-pp-Rename-enable_per_cu_power_gating-to-powe.patch94
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4808-drm-amd-pp-Unify-powergate_uvd-vce-mmhub-to-set_powe.patch272
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4809-drm-amd-pp-Add-gfx-pg-support-in-smu-through-set_pow.patch93
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4810-drm-amd-pp-Add-powergate_gfx-backend-function-on-Rav.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4811-drm-amdgpu-Add-gfx_off-support-in-smu-through-pp_set.patch119
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4812-drm-amdgpu-Split-set_pg_state-into-separate-function.patch88
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4813-drm-amdgpu-Move-CG-PG-setting-out-of-delay-worker-th.patch69
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4814-drm-amdgpu-Add-stutter-mode-ctrl-in-module-parameter.patch53
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4815-drm-amd-display-Ctrl-stutter-mode-through-module-par.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4816-drm-amd-display-Fix-a-typo-in-wm_min_memg_clk_in_khz.patch131
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4817-drm-amd-powerplay-drop-the-acg-fix.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4818-drm-amd-powerplay-revise-default-dpm-tables-setup.patch424
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4819-drm-amd-powerplay-retrieve-all-clock-ranges-on-start.patch142
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4820-drm-amd-powerplay-revise-clock-level-setup.patch470
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4821-drm-amd-powerplay-initialize-uvd-vce-powergate-statu.patch79
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4822-drm-amd-powerplay-correct-smc-display-config-for-mul.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4823-drm-amd-powerplay-drop-unnecessary-uclk-hard-min-set.patch47
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4824-drm-amd-powerplay-correct-vega12-max-num-of-dpm-leve.patch72
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4825-drm-amd-powerplay-apply-clocks-adjust-rules-on-power.patch213
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4826-drm-amd-powerplay-set-vega12-pre-display-configurati.patch78
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4827-drm-amd-powerplay-cosmetic-fix.patch104
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4828-drm-amdgpu-Use-gmc_vram_full_visible-in-vram_mgr_bo_.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4829-drm-amdgpu-Remove-amdgpu_gem_map_attach-target_dev-d.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4830-drm-amdgpu-pp-add-missing-byte-swapping-in-process_p.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4831-drm-amdgpu-pp-fix-endian-swapping-in-atomctrl_get_vo.patch41
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4832-drm-amdgpu-pp-fix-copy-paste-typo-in-smu7_init_dpm_d.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4833-drm-amdgpu-pp-fix-copy-paste-typo-in-smu7_get_pp_tab.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4834-drm-amdgpu-sdma-simplify-sdma-instance-setup.patch229
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4835-drm-amdgpu-vce-simplify-vce-instance-setup.patch179
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4836-drm-amd-Replace-drm_dev_unref-with-drm_dev_put.patch80
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4837-drm-amd-add-SPDX-identifier-and-clarify-license.patch26
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4838-drm-amdgpu-fix-the-wrong-type-of-gem-object-creation.patch67
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4839-drm-amdgpu-update-uvd_v6_0_ring_vm_funcs-to-use-new-.patch41
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4840-drm-amd-pp-Convert-clock-unit-to-KHz-as-defined.patch165
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4841-drm-amd-pp-Memory-Latency-is-always-25us-on-Vega10.patch66
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4842-drm-amd-pp-Switch-the-tolerable-latency-for-display.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4843-drm-amd-display-Notify-powerplay-the-min_dcef-clock.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4844-drm-amd-display-Notify-powerplay-the-display-control.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4845-drm-amd-pp-Refine-the-interface-exported-to-display.patch169
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4846-drm-amd-pp-Remove-duplicate-code-in-vega12_hwmgr.c.patch76
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4847-drm-amdgpu-switch-firmware-path-for-CIK-parts-v2.patch320
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4848-drm-amdgpu-switch-firmware-path-for-SI-parts.patch191
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4849-drm-amdgpu-update-amd_pcie.h-to-include-gen4-speeds.patch41
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4850-drm-amdgpu-use-pcie-functions-for-link-width-and-spe.patch341
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4851-drm-amd-pp-Export-notify_smu_enable_pwe-to-display.patch82
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4852-drm-amd-display-Refine-the-implementation-of-dm_pp_g.patch150
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4853-drm-amd-display-Fix-copy-error-when-set-memory-clock.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4854-drm-amd-pp-Remove-the-same-struct-define-in-powerpla.patch212
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4855-drm-amd-display-off-by-one-in-find_irq_source_info.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4856-Revert-drm-amd-display-Fix-indentation-in-dcn10-reso.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4857-drm-amd-display-dc-dce-Fix-multiple-potential-intege.patch70
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4858-drm-amd-Remove-errors-from-sphinx-documentation.patch125
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4859-drm-amdgpu-update-documentation-for-amdgpu_drv.c.patch437
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4860-drm-amd-Add-sphinx-documentation-for-amd_ip_funcs.patch95
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4861-drm-amdgpu-separate-gpu-address-from-bo-pin.patch550
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4862-drm-amdgpu-allocate-gart-memory-when-it-s-required-v.patch180
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4863-drm-amdgpu-fix-kmap-error-handling-for-bo-creations.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4864-drm-amdgpu-Add-CLK-IP-base-offset.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4865-drm-amd-pp-Convert-10KHz-to-KHz-as-variable-name.patch52
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4866-drm-amd-display-Make-function-pointer-structs-const.patch96
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4867-drm-amdgpu-Add-support-for-logging-process-info-in-a.patch110
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4868-drm-amdgpu-Present-amdgpu_task_info-in-VM_FAULTS.patch88
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4869-drm-amd-pp-Send-khz-clock-values-to-DC-for-smu7-8.patch86
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4870-drm-amdgpu-Take-VCN-jpeg-ring-into-account-in-idle-w.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4871-drm-amdgpu-move-cache-window-setup-after-power-and-c.patch41
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4872-drm-amdgpu-get-VCN-start-to-process-in-the-dpm-disab.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4873-drm-amd-pp-fix-semicolon.cocci-warnings.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4874-drm-amdgpu-pin-the-csb-buffer-on-hw-init-v2.patch93
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4875-drm-amdgpu-init-CSIB-regardless-of-rlc-version-and-p.patch40
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4876-drm-amdgpu-correct-rlc-save-restore-list-initializat.patch62
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4877-drm-amdgpu-drop-mmRLC_PG_CNTL-clear-v2.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4878-drm-amdgpu-no-touch-for-the-reserved-bit-of-RLC_CGTT.patch54
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4879-drm-amdgpu-reduce-the-idle-period-that-RLC-has-to-wa.patch50
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4880-drm-amd-powerplay-add-vega12-SMU-gfxoff-support-v3.patch115
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4881-drm-amd-powerplay-no-need-to-mask-workable-gfxoff-fe.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4882-drm-amd-powerplay-convert-the-sclk-mclk-into-Mhz-for.patch42
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4883-drm-amd-Add-interrupt-source-definitions-for-VI-v3.patch136
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4884-drm-amd-Use-newly-added-interrupt-source-defs-for-VI.patch391
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4885-drm-amd-Add-interrupt-source-definitions-for-SOC15-v.patch457
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4886-drm-amd-Use-newly-added-interrupt-source-defs-for-SO.patch226
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4887-drm-amdgpu-fix-TTM-move-entity-init-order.patch101
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4888-drm-amdgpu-Keep-track-of-amount-of-pinned-CPU-visibl.patch150
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4889-drm-amdgpu-Make-pin_size-values-atomic.patch181
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4890-drm-amdgpu-Warn-and-update-pin_size-values-when-dest.patch84
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4891-Revert-drm-amd-display-make-dm_dp_aux_transfer-retur.patch156
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4892-drm-amd-display-Separate-HUBP-surface-size-and-rotat.patch122
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4893-drm-amd-display-Add-avoid_vbios_exec_table-debug-bit.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4894-drm-amd-display-support-access-ddc-for-mst-branch.patch40
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4895-drm-amd-display-Implement-cursor-multiplier.patch117
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4896-drm-amd-display-Linux-Set-Read-link-rate-and-lane-co.patch124
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4897-drm-amd-display-Move-common-GPIO-registers-into-a-co.patch40
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4898-drm-amd-display-fix-bug-where-we-are-creating-bogus-.patch126
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4899-drm-amd-display-generic-indirect-register-access.patch136
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4900-drm-amd-display-fix-incorrect-check-for-atom-table-s.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4901-drm-amd-display-set-read-link-rate-and-lane-count-th.patch323
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4902-drm-amd-display-dal-3.1.53.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4903-drm-amd-display-Correct-calculation-of-duration-time.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4904-drm-amd-display-Add-Azalia-registers-to-HW-sequencer.patch40
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4905-drm-amd-display-Define-couple-extra-DCN-registers.patch79
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4906-drm-amd-display-Expose-configure_encoder-for-link_en.patch51
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4907-drm-amd-display-Serialize-is_dp_sink_present.patch103
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4908-drm-amd-display-Break-out-function-to-simply-read-au.patch200
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4909-drm-amd-display-Return-aux-replies-directly-to-DRM.patch324
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4910-drm-amd-display-Convert-remaining-loggers-off-dc_log.patch1148
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4911-drm-amd-display-read-DP-sink-and-DP-branch-hardware-.patch104
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4912-drm-amd-display-dcc-always-on-for-bw-calculations-on.patch47
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4913-drm-amd-display-hook-dp-test-pattern-through-debugfs.patch325
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4914-drm-amd-display-remove-dentist_vco_freq-from-resourc.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4915-drm-amd-display-drop-unused-register-defines.patch40
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4916-drm-amd-display-add-additional-info-for-cursor-posit.patch113
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4917-drm-amd-display-Patch-for-extend-time-to-panel-power.patch57
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4918-drm-amd-display-Linux-set-read-lane-settings-through.patch471
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4919-drm-amd-display-Fix-compile-error-on-older-GCC-versi.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4920-drm-amd-display-add-missing-mask-for-dcn.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4921-drm-amd-display-set-default-GPIO_ID_HPD.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4922-drm-amd-display-add-dcn-cursor-hotsport-rotation-and.patch106
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4923-drm-amd-display-expose-dcn10_aux_initialize-in-heade.patch64
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4924-drm-amd-display-Linux-hook-test-pattern-through-debu.patch134
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4925-drm-amd-display-dal-3.1.54.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4926-drm-amd-display-Add-YCbCr420-only-support-for-HDMI-4.patch53
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4927-drm-amd-display-Expose-bunch-of-functions-from-dcn10.patch173
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4928-drm-amd-display-Right-shift-AUX-reply-value-sooner-t.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4929-drm-amd-display-Read-AUX-channel-even-if-only-status.patch47
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4930-drm-amd-display-introduce-concept-of-send_reset_leng.patch157
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4931-drm-amd-display-add-DalEnableHDMI20-key-support.patch82
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4932-drm-amd-display-add-pp-to-dc-powerlevel-enum-transla.patch70
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4933-drm-amd-display-Add-NULL-check-for-local-sink-in-edp.patch41
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4934-drm-amd-display-Return-out_link_loss-from-interrupt-.patch77
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4935-drm-amd-display-Add-CRC-support-for-DCN.patch193
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4936-drm-amd-display-Expose-couple-OPTC-functions-through.patch37
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4937-drm-amd-display-dp-debugfs-allow-link-rate-lane-coun.patch41
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4938-drm-amd-display-Fix-new-stream-count-check-in-dc_add.patch42
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4939-drm-amd-display-add-max-scl-ratio-to-soc-bounding-bo.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4940-drm-amd-display-update-dml-to-match-DV-dml.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4941-drm-amd-display-dal-3.1.55.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4942-drm-amd-display-Initialize-data-structure-for-DalMpV.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4943-drm-amd-display-properly-turn-autocal-off.patch54
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4944-drm-amdgpu-vi-fix-mixed-up-state-in-smu-clockgating-.patch64
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4945-drm-amdgpu-pp-smu7-drop-unused-values-in-smu-data-st.patch55
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4946-drm-amdgpu-pp-smu7-remove-local-mc_addr-variable.patch64
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4947-drm-amdgpu-pp-smu7-cache-smu-firmware-toc.patch158
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4948-drm-amdgpu-pp-remove-dead-vega12-code.patch67
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4949-drm-amdgpu-pp-split-out-common-smumgr-smu9-code.patch920
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4950-drm-amdgpu-pp-switch-smu-callback-type-for-get_argum.patch118
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4951-Revert-drm-amd-powerplay-fix-performance-drop-on-Veg.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4952-drm-amdgpu-Allow-to-create-BO-lists-in-CS-ioctl-v3.patch302
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4953-drm-amd-display-Add-headers-for-hardcoded-1d-luts.patch77
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4954-drm-amd-display-Refactor-SDR-cursor-boosting-in-HDR-.patch320
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4955-drm-amd-display-add-HDR-visual-confirm.patch205
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4956-drm-amd-display-Add-hook-for-MST-root-branch-info.patch70
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4957-drm-amd-display-Move-address-tracking-out-of-HUBP.patch107
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4958-drm-amd-display-add-new-dc-debug-structure-to-track-.patch54
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4959-drm-amd-display-dal-3.1.56.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4960-drm-amd-display-Null-ptr-check-for-set_sdr_white_lev.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4961-drm-amd-display-Fix-some-checkpatch.pl-errors-and-wa.patch150
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4962-drm-amdgpu-cleanup-job-header.patch175
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4963-drm-amdgpu-remove-fence-context-from-the-job.patch73
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4964-drm-amdgpu-remove-ring-parameter-from-amdgpu_job_sub.patch206
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4965-drm-amdgpu-remove-job-ring.patch278
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4966-drm-amdgpu-add-amdgpu_job_submit_direct-helper.patch318
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4967-drm-amdgpu-remove-job-adev-v2.patch113
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4968-drm-amdgpu-minor-cleanup-in-amdgpu_job.c.patch66
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4969-drm-amdgpu-allow-for-more-flexible-priority-handling.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4970-drm-amdgpu-change-ring-priority-after-pushing-the-jo.patch81
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4971-drm-amdgpu-simplify-the-bo-reference-on-amdgpu_bo_up.patch51
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4972-drm-amdgpu-pm-Remove-VLA-usage.patch174
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4973-drm-amdgpu-powerplay-use-irq-source-defines-for-smu7.patch52
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4974-drm-amd-powerplay-fixed-uninitialized-value.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4975-drm-amdgpu-display-Replace-CONFIG_DRM_AMD_DC_DCN1_0-.patch673
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4976-drm-amdgpu-remove-superflous-UVD-encode-entity.patch105
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4977-drm-amdgpu-clean-up-UVD-instance-handling-v2.patch304
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4978-drm-amdgpu-fix-spelling-mistake-successed-succeeded.patch58
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4979-drm-amd-display-Drop-unused-backlight-functions-in-D.patch54
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4980-drm-amd-display-Honor-pplib-stutter-mask-for-all-ASI.patch58
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4981-drm-amdgpu-lock-and-unlock-console-only-for-amdgpu_f.patch104
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4982-drm-amd-pp-Set-Max-clock-level-to-display-by-default.patch55
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4983-drm-amd-display-Convert-10kHz-clks-from-PPLib-into-k.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4984-300-compilaiton.patch38
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4985-patch-correction-amdgpu-clean-up-UVD-instance-handli.patch26
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4986-drm-amdgpu-use-drm_fb-helper-for-console_-un-lock.patch67
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4987-drm-amdgpu-Fix-warning-in-dma_fence_is_later-on-resu.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4988-drm-amdgpu-apci-don-t-call-sbios-request-function-if.patch101
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4989-drm-amdgpu-acpi-skip-backlight-events-for-DC.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4990-drm-amdgpu-split-ip-suspend-into-2-phases.patch129
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4991-drm-amdgpu-rework-suspend-and-resume-to-deal-with-at.patch140
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4992-drm-amdgpu-Fix-RLC-safe-mode-test-in-gfx_v9_0_enter_.patch42
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4993-drm-amd-powerplay-slow-UCLK-switch-when-multiple-dis.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4994-drm-amd-powerplay-correct-the-argument-for-PPSMC_MSG.patch46
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4995-drm-amd-powerplay-allow-slow-switch-only-if-NBPState.patch49
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4996-drm-amdgpu-Don-t-warn-on-destroying-a-pinned-BO.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4997-drm-amdgpu-move-the-amdgpu_fbdev_set_suspend-further.patch49
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4998-drm-amd-display-Remove-unnecessary-warning.patch38
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4999-drm-amd-display-allow-diags-to-skip-initial-link-tra.patch94
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5000-drm-amd-display-DPP-CM-ICSC-AYCRCB8888-format-suppor.patch55
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5001-drm-amd-display-Decouple-aux-from-i2c.patch1949
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5002-drm-amd-display-separate-dc_debug-into-dc_debug_opti.patch161
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5003-drm-amd-display-DC-3.1.58.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5004-drm-amdgpu-clean-up-coding-style-a-bit.patch98
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5005-drm-amdgpu-expose-only-the-first-UVD-instance-for-no.patch94
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5006-drm-amdgpu-consistenly-name-amdgpu_bo_-functions.patch428
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5007-drm-amdgpu-reduce-the-number-of-placements-for-a-BO.patch57
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5008-drm-amdgpu-gmc9-clarify-GPUVM-fault-error-message.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5009-gpu-drm-amdgpu-Replace-mdelay-with-msleep-in-cik_pci.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5010-drm-amdgpu-add-support-for-inplace-IB-patching-for-M.patch89
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5011-drm-amdgpu-patch-the-IBs-for-the-second-UVD-instance.patch73
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5012-drm-amd-display-Retry-link-training-again.patch102
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5013-drm-amd-display-flatten-aux_engine-and-engine.patch665
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5014-drm-amd-display-Prevent-PSR-from-being-enabled-if-in.patch124
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5015-drm-amd-display-DC-3.1.59.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5016-drm-amd-Add-missing-fields-in-atom_integrated_system.patch62
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5017-drm-amdgpu-implement-harvesting-support-for-UVD-7.2-.patch353
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5018-drm-amdgpu-correct-evict-flag-for-bo-move.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5019-drm-amdgpu-clean-up-the-superfluous-space-and-align-.patch281
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5020-drm-amd-pp-Polaris12-Fix-a-chunk-of-registers-missed.patch75
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5021-drm-amd-pp-Delete-unused-temp-variables.patch72
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5022-drm-amd-pp-Convert-voltage-unit-in-mV-4-to-mV-on-CZ-.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5023-drm-amdgpu-fix-a-reversed-condition.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5024-drm-amdgpu-add-proper-error-handling-to-amdgpu_bo_li.patch116
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5025-drm-amdgpu-fix-total-size-calculation.patch37
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5026-drm-amdgpu-return-error-if-both-BOs-and-bo_list-hand.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5027-drm-amdgpu-add-new-amdgpu_vm_bo_trace_cs-function-v2.patch116
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5028-drm-amdgpu-move-bo_list-defines-to-amdgpu_bo_list.h.patch158
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5029-drm-amdgpu-always-recreate-bo_list.patch132
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5030-drm-amdgpu-nuke-amdgpu_bo_list_free.patch80
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5031-drm-amdgpu-add-bo_list-iterators.patch251
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5032-drm-amdgpu-allocate-the-bo_list-array-after-the-list.patch237
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5033-drm-amdgpu-create-an-empty-bo_list-if-no-handle-is-p.patch221
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5034-drm-amdgpu-Replace-ttm_bo_reference-with-ttm_bo_get.patch62
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5035-drm-amdgpu-Replace-ttm_bo_unref-with-ttm_bo_put.patch89
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5036-drm-amd-display-add-missing-void-parameter-to-dc_cre.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5037-drm-amdgpu-pm-Fix-potential-Spectre-v1.patch52
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5038-drm-amd-display-Report-non-DP-display-as-disconnecte.patch54
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5039-drm-amd-display-Only-require-EDID-read-for-HDMI-and-.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5040-drm-amd-display-Use-requested-HDMI-aspect-ratio.patch47
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5041-drm-amd-display-DP-Compliance-400.1.1-failure.patch97
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5042-drm-amd-display-Implement-backlight_ops.get_brightne.patch79
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5043-drm-amd-display-Read-back-max-backlight-value-at-boo.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5044-drm-amd-display-Destroy-aux_engines-only-once.patch37
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5045-drm-amd-display-Implement-custom-degamma-lut-on-dcn.patch170
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5046-drm-amd-display-Use-calculated-disp_clk_khz-value-fo.patch83
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5047-drm-amd-display-Don-t-share-clk-source-between-DP-an.patch130
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5048-drm-amd-display-add-vbios-table-check-for-enabling-d.patch73
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5049-drm-amd-display-Add-NULL-check-for-enabling-dp-ss.patch41
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5050-drm-amd-display-program-display-clock-on-cache-match.patch56
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5051-drm-amd-display-update-clk-for-various-HDMI-color-de.patch57
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5052-drm-amd-display-display-connected-to-dp-1-does-not-l.patch67
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5053-drm-amdgpu-sriov-give-8s-for-recover-vram-under-RUNT.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5054-drm-amd-display-fix-single-link-DVI-has-no-display.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5055-drm-amd-display-Allow-clock-sharing-b-w-HDMI-and-DVI.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5056-drm-amd-display-Pass-connector-id-when-executing-VBI.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5057-drm-amd-display-Guard-against-null-crtc-in-CRC-IRQ.patch50
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5058-drm-amd-pp-Add-ACP-PG-support-in-SMU.patch115
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5059-drm-amdgpu-Power-down-acp-if-board-uses-AZ-v2.patch73
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5060-drm-amd-amdgpu-Enabling-Power-Gating-for-Stoney-plat.patch112
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5061-drm-amdgpu-acp-Powrgate-acp-via-smu.patch195
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5062-drm-amgpu-acp-Implement-set_powergating_state-for-ac.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5063-drm-amdgpu-Add-job-pipe-sync-dependecy-trace.patch80
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5064-drm-amd-pp-Implement-get_performance_level-for-legac.patch121
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5065-drm-amd-display-pass-compat_level-to-hubp.patch83
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5066-drm-amd-display-Move-PME-to-function-pointer-call-se.patch227
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5067-drm-amd-display-dal-3.1.60.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5068-drm-amd-display-Set-DFS-bypass-flags-for-dce110.patch71
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5069-drm-amd-display-Enable-DFS-bypass-support-in-DC-conf.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5070-drm-amd-display-Add-support-for-toggling-DFS-bypass.patch190
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5071-drm-amdgpu-Add-amdgpu_gfx_off_ctrl-function.patch114
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5072-drm-amdgpu-Put-enable-gfx-off-feature-to-a-delay-thr.patch104
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5073-drm-amdgpu-Ctrl-gfx-off-via-amdgpu_gfx_off_ctrl.patch63
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5074-drm-amdgpu-Disable-gfx-off-if-VCN-is-busy.patch38
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5075-drm-amdgpu-move-gfx-definitions-into-amdgpu_gfx-head.patch762
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5076-drm-amdgpu-move-ih-definitions-into-amdgpu_ih-header.patch80
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5077-drm-amdgpu-move-sdma-definitions-into-amdgpu_sdma-he.patch351
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5078-drm-amdgpu-move-firmware-definitions-into-amdgpu_uco.patch102
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5079-drm-amdgpu-move-psp-macro-into-amdgpu_psp-header.patch110
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5080-drm-amdgpu-move-gem-definitions-into-amdgpu_gem-head.patch379
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5081-drm-amd-display-pass-the-right-num-of-modes-added.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5082-drm-amd-display-correct-image-viewport-calculation.patch97
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5083-drm-amd-display-Print-DPP-DTN-log-info-only-for-enab.patch84
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5084-drm-amd-display-Use-DGAM-ROM-or-RAM.patch126
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5085-drm-amd-display-Add-check-for-num-of-entries-in-gamm.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5086-drm-amdgpu-Delay-100ms-to-enable-gfx-off-feature.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5087-drm-amdgpu-move-ring-macros-into-amdgpu_ring-header.patch107
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5088-drm-amdgpu-remove-useless-gds-switch-macro.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5089-drm-amdgpu-move-display-definitions-into-amdgpu_disp.patch221
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5090-drm-amdgpu-move-gmc-macros-into-amdgpu_gmc-header.patch56
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5091-drm-amdgpu-move-vm-definitions-into-amdgpu_vm-header.patch108
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5092-drm-amdgpu-move-missed-gfxoff-entry-into-amdgpu_gfx-.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5093-drm-amdgpu-pp-endian-fixes-for-process_pptables_v1_0.patch386
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5094-drm-amdgpu-pp-endian-fixes-for-processpptables.c.patch126
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5095-drm-amdgpu-add-emit-reg-write-reg-wait-for-vcn-jpeg.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5096-drm-amdgpu-add-system-interrupt-register-offset-head.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5097-drm-amdgpu-add-system-interrupt-mask-for-jrbc.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5098-drm-amdgpu-enable-system-interrupt-for-jrbc.patch42
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5099-drm-amdgpu-add-emit-trap-for-vcn-jpeg.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5100-drm-amdgpu-fix-emit-frame-size-and-comments-for-jpeg.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5101-drm-amdgpu-powerplay-check-vrefresh-when-when-changi.patch129
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5102-drm-amdgpu-Cancel-gfx-off-delay-work-when-driver-fin.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5103-drm-amd-display-dc-3.1.61.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5104-drm-amd-display-fix-PIP-bugs-on-Dal3.patch266
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5105-drm-amd-display-Add-dprefclk-value-to-dce_dccg.patch60
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5106-drm-amd-display-fix-dml-handling-of-mono8-16-pixel-f.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5107-drm-amd-display-add-retimer-log-for-HWQ-tuning-use.patch249
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5108-drm-amd-display-Remove-redundant-non-zero-and-overfl.patch46
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5109-drm-amd-display-dc-3.1.62.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5110-drm-amdgpu-add-AVFS-control-to-PP_FEATURE_MASK.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5111-drm-amdgpu-powerplay-smu7-enable-AVFS-control-via-pp.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5112-drm-amdgpu-powerplay-vega10-enable-AVFS-control-via-.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5113-drm-amd-display-enable-ABGR-and-XBGR-formats-v4.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5114-drm-amdgpu-enable-ABGR-and-XBGR-formats-v2.patch174
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5115-drm-amdgpu-include-Add-nbio-7.4-header-files-v4.patch53097
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5116-drm-amdgpu-include-Add-sdma0-1-4.2-register-headerfi.patch8078
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5117-drm-amdgpu-include-add-thm-11.0.2-headers.patch155
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5118-drm-amdgpu-include-Add-mp-11.0-header-files.-v2.patch926
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5119-Revert-drm-amdgpu-Add-nbio-support-for-vega20-v2.patch82
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5120-drm-amdgpu-Add-nbio-7.4-support-for-vega20-v3.patch346
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5121-drm-amdgpu-update-atomfirmware.h.patch114
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5122-drm-amd-powerplay-add-vega20_inc.h-v2.patch59
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5123-drm-amd-powerplay-add-smu11_driver_if.h-v4.patch857
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5124-drm-amd-powerplay-add-vega20_ppsmc.h-v2.patch151
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5125-drm-amd-powerplay-add-vega20_pptable.h-v2.patch164
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5126-drm-amd-powerplay-add-the-smu-manager-for-vega20-v2.patch644
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5127-drm-amd-powerplay-new-interfaces-for-ActivityMonitor.patch103
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5128-drm-amd-powerplay-add-the-hw-manager-for-vega20-v3.patch4098
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5129-drm-amd-powerplay-support-workload-profile-query-and.patch239
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5130-drm-amd-powerplay-init-vega20-uvd-vce-powergate-stat.patch54
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5131-drm-amd-powerplay-correct-force-clock-level-related-.patch234
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5132-drm-amd-powerplay-export-vega20-stable-pstate-clocks.patch62
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5133-drm-amd-powerplay-add-vega20-pre_display_config_chan.patch83
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5134-drm-amd-powerplay-conv-the-vega20-pstate-sclk-mclk-i.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5135-drm-amd-powerplay-initialize-vega20-overdrive-settin.patch635
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5136-drm-amd-powerplay-new-interfaces-for-overdrive-vega2.patch207
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5137-drm-amd-powerplay-revise-vega20-PPSMC_MSG_SetSoftMin.patch175
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5138-drm-amd-powerplay-update-vega20-clocks-threshold-set.patch159
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5139-drm-amdgpu-enable-vega20-powerplay-support.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5140-drm-amdgpu-Add-psp-11.0-support-for-vega20.-v2.patch733
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5141-drm-amdgpu-vg20-Change-the-load-type-of-vega20-to-ps.patch42
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5142-drm-amd-powerplay-enable-fclk-ss-by-default.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5143-drm-amd-powerplay-remove-setting-soc-floor-voltage-b.patch52
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5144-drm-amd-powerplay-avoid-enabling-disabling-uvd-vce-d.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5145-drm-amd-powerplay-correct-the-argument-for-PPSMC_MSG.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5146-drm-amd-powerplay-allow-slow-switch-only-if-NBPState.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5147-drm-amd-powerplay-remove-max-DCEFCLK-limitation.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5148-drm-amd-powerplay-added-voltage-boot-time-calibratio.patch47
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5149-drm-amdgpu-gfx9-Update-gfx9-golden-settings.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5150-drm-amdgpu-update-vega20-sdma-golden-settings.patch136
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5151-drm-amdgpu-psp-Enlarge-PSP-TMR-SIZE-from-3M-to-4M.patch54
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5152-drm-amdgpu-remove-experimental-flag-for-vega20.patch38
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5153-drm-amdgpu-Cancel-the-delay-work-when-suspend.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5154-drm-amd-pp-OverDrive-gfx-domain-voltage-on-Tonga.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5155-drm-amdgpu-fix-integer-overflow-test-in-amdgpu_bo_li.patch38
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5156-drm-amdgpu-Change-VCE-booting-with-firmware-loaded-b.patch62
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5157-drm-amdgpu-Use-kvmalloc-for-allocating-UVD-VCE-VCN-B.patch113
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5158-drm-amdgpu-added-support-2nd-UVD-instance.patch80
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5159-drm-amd-display-Program-vline-interrupt-on-FAST-upda.patch49
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5160-drm-amd-display-Enable-Stereo-in-Dal3.patch387
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5161-drm-amd-display-Program-vsc_infopacket-in-commit_pla.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5162-drm-amd-display-Handle-HDR-meta-update-as-fast-updat.patch61
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5163-drm-amd-display-HDR-dynamic-meta-should-be-treated-a.patch46
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5164-drm-amd-display-Program-gamut-remap-as-part-of-strea.patch85
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5165-drm-amdgpu-Improve-a-error-message-and-fix-a-typo.patch41
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5166-drm-amdgpu-Remove-VM-based-compute-profile-switching.patch196
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5167-drm-amdgpu-hybrid-add-AMDGPU-VERSION.patch38
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5168-drm-amdgpu-cleanup-HW_IP-query.patch250
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5169-Revert-drm-amdgpu-switch-firmware-path-for-SI-parts.patch189
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5170-Revert-drm-amdgpu-switch-firmware-path-for-CIK-parts.patch316
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5171-drm-amdgpu-Refine-function-name-and-function-args.patch126
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5172-drm-amdgpu-Set-power-ungate-state-when-suspend-fini.patch126
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5173-drm-amdgpu-Set-clock-ungate-state-when-suspend-fini.patch131
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5174-drm-amdgpu-fix-VM-size-reporting-on-Raven.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5175-drm-amdgpu-Do-not-evict-VRAM-on-APUs-with-disabled-H.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5176-drm-amd-display-Do-not-retain-link-settings.patch53
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5177-drm-amd-display-Create-new-i2c-resource.patch2888
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5178-drm-amd-display-Program-csc-matrix-as-part-of-stream.patch85
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5179-drm-amdgpu-display-disable-eDP-fast-boot-optimizatio.patch37
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5180-drm-amd-display-Define-registers-for-dcn10.patch38
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5181-drm-amd-display-Combine-dce80-and-dce100-i2c-hw-func.patch349
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5182-drm-amd-display-move-edp-fast-boot-optimization-flag.patch128
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5183-drm-amd-display-implement-DPMS-DTN-test-v2.patch607
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5184-drm-amdgpu-Remove-the-sriov-checking-and-add-firmwar.patch122
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5185-drm-amdgpu-use-kiq-to-do-invalidate-tlb.patch171
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5186-drm-amdgpu-remove-fulll-access-for-suspend-phase1.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5187-drm-amdgpu-Fix-compile-warning.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5188-drm-amdgpu-fix-sdma-doorbell-range-setting.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5189-drm-amdgpu-sriov-Only-sriov-runtime-support-use-kiq.patch55
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5190-drm-amd-display-fix-a-compile-warning.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5191-drm-amd-display-indent-an-if-statement.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5192-drm-amdgpu-Don-t-use-kiq-in-gpu-reset.patch53
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5193-drm-amdgpu-display-add-support-for-LVDS-v5.patch346
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5194-drm-amdgpu-amdgpu_kiq_reg_write_reg_wait-can-be-stat.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5195-drm-amdgpu-cleanup-GPU-recovery-check-a-bit-v2.patch193
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5196-drm-amdgpu-validate-the-VM-root-PD-from-the-VM-code.patch42
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5197-drm-amdgpu-move-setting-the-GART-addr-into-TTM.patch52
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5198-drm-amdgpu-rename-gart.robj-into-gart.bo.patch212
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5199-drm-amdgpu-remove-gart.table_addr.patch255
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5200-drm-amdgpu-set-correct-base-for-THM-NBIF-MP1-IP.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5201-drm-amdgpu-Only-retrieve-GPU-address-of-GART-table-a.patch94
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5202-drm-amdgpu-switch-firmware-path-for-SI-parts.patch191
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5203-drm-amdgpu-switch-firmware-path-for-CIK-parts-v2.patch320
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5204-Hybrid-Version-18.45.0.418.patch27
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5205-drm-amdgpu-add-amdgpu_gmc_pd_addr-helper.patch196
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5206-drm-amdgpu-add-ring-soft-recovery-v4.patch100
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5207-drm-amdgpu-implement-soft_recovery-for-GFX7.patch51
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5208-drm-amdgpu-implement-soft_recovery-for-GFX8-v2.patch53
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5209-drm-amdgpu-implement-soft_recovery-for-GFX9.patch51
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5210-drm-amdgpu-Adjust-the-VM-size-based-on-system-memory.patch110
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5211-drm-amdgpu-Enable-disable-gfx-PG-feature-in-rlc-safe.patch49
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5212-drm-amdgpu-Remove-duplicated-power-source-update.patch38
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5213-drm-amdgpu-Fix-vce-initialize-failed-on-Kaveri-Mulli.patch128
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5214-drm-amdgpu-Update-power-state-at-the-end-of-smu-hw_i.patch72
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5215-drm-amdgpu-Power-on-uvd-block-when-hw_fini.patch46
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5216-drm-amdgpu-Remove-dead-code-in-amdgpu_pm.c.patch72
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5217-drm-amdgpu-Remove-duplicate-code-in-gfx_v8_0.c.patch161
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5218-drm-amdgpu-Refine-gfx_v8_0_kcq_disable-function.patch85
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5219-drm-amdgpu-Remove-duplicate-code-in-gfx_v9_0.c.patch152
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5220-drm-amdgpu-Refine-gfx_v9_0_kcq_disable-function.patch86
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5221-drm-amdgpu-Change-kiq-initialize-reset-sequence-on-g.patch126
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5222-drm-amdgpu-Change-kiq-ring-initialize-sequence-on-gf.patch98
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5223-drm-amdgpu-amdgpu_ctx_add_fence-can-t-fail.patch80
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5224-drm-amdgpu-fix-holding-mn_lock-while-allocating-memo.patch79
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5225-drm-amdgpu-add-amdgpu_gmc_get_pde_for_bo-helper-v2.patch175
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5226-drm-amdgpu-enable-GTT-PD-PT-for-raven-v3.patch90
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5227-drm-amdgpu-Refine-gmc9-VM-fault-print.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5228-drm-amdgpu-remove-extra-newline-when-printing-VM-fau.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5229-drm-amdgpu-move-full-access-into-amdgpu_device_ip_su.patch63
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5230-drm-amdgpu-Need-to-set-moved-to-true-when-evict-bo.patch46
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5231-drm-amdgpu-remove-amdgpu_bo_gpu_accessible.patch52
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5232-drm-amdgpu-move-amdgpu_device_-vram-gtt-_location.patch268
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5233-drm-amdgpu-fix-amdgpu_gmc_gart_location-a-little-bit.patch65
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5234-drm-amdgpu-stop-using-gart_start-as-offset-for-the-G.patch70
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5235-drm-amdgpu-distinct-between-allocated-GART-space-and.patch84
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5236-drm-amdgpu-use-the-smaller-hole-for-GART.patch37
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5237-drm-amdgpu-remove-redundant-memset.patch37
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5238-drm-amdgpu-add-missing-CHIP_HAINAN-in-amdgpu_ucode_g.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5239-drm-amdgpu-put-GART-away-from-VRAM-v2.patch56
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5240-drm-amdgpu-Revert-kmap-PDs-PTs-in-amdgpu_vm_update_d.patch56
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5241-drm-amdgpu-gmc9-rework-stolen-vga-memory-handling.patch127
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5242-drm-amdgpu-gmc9-don-t-keep-stolen-memory-on-Raven.patch38
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5243-drm-amdgpu-gmc9-don-t-keep-stolen-memory-on-vega12.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5244-drm-amdgpu-gmc9-don-t-keep-stolen-memory-on-vega20.patch46
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5245-drm-amd-powerplay-added-vega20-overdrive-support-V3.patch452
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5246-drm-amd-powerplay-correct-data-type-to-support-under.patch38
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5247-drm-amdgpu-Set-pasid-for-compute-vm-v2.patch215
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5248-drm-amd-display-Eliminate-i2c-hw-function-pointers.patch806
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5249-drm-amd-display-dc-3.1.63.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5250-drm-amd-display-Use-non-deprecated-vblank-handler.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5251-drm-amd-display-Add-support-for-hw_state-logging-via.patch156
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5252-drm-amd-display-eliminate-long-wait-between-register.patch59
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5253-drm-amd-display-Fix-memory-leak-caused-by-missed-dc_.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5254-drm-amd-display-Remove-redundant-i2c-structs.patch363
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5255-drm-amd-display-support-48-MHZ-refclk-off.patch101
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5256-drm-amd-display-Flatten-unnecessary-i2c-functions.patch212
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5257-drm-amdgpu-fix-mask-in-GART-location-calculation.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5258-drm-amdgpu-revert-stop-using-gart_start-as-offset-fo.patch72
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5259-drm-amdgpu-Fix-SDMA-hang-in-prt-mode-v2.patch58
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5260-drm-amdgpu-add-new-polaris-pci-id.patch55
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5261-drm-amdgpu-sriov-Correct-the-setting-about-sdma-door.patch103
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5262-drm-amdgpu-add-picasso-to-asic_type-enum.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5263-drm-amdgpu-add-soc15-support-for-picasso.patch97
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5264-drm-amdgpu-add-picasso-ucode-loading-method.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5265-drm-amdgpu-add-picasso-support-for-vcn.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5266-drm-amdgpu-add-clockgating-support-for-picasso.patch73
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5267-drm-amdgpu-add-picasso-support-for-gmc.patch54
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5268-drm-amdgpu-add-picasso-support-for-gfx_v9_0.patch136
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5269-drm-amdgpu-add-picasso-support-for-sdma_v4.patch81
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5270-drm-amdgpu-add-picasso-for-amdgpu-kms.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5271-drm-amdgpu-Add-pg-support-for-gfxoff-for-PCO.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5272-drm-amdgpu-Enable-SDMA-power-gating-for-PCO.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5273-drm-amdgpu-enable-mmhub-power-gating.patch303
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5274-drm-amdgpu-enable-vcn-powergating-for-PCO.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5275-drm-amdgpu-add-ip-blocks-for-picasso-v2.patch49
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5276-drm-amdgpu-add-new-raven-series-device.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5277-drm-amdgpu-enable-gfxoff-in-non-sriov-and-stutter-mo.patch47
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5278-drm-amdgpu-use-IP-presence-to-free-uvd-and-vce-handl.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5279-drm-amdgpu-move-get_rev_id-at-first-before-load-gpu_.patch69
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5280-drm-amdgpu-set-external-rev-id-for-raven2.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5281-drm-amdgpu-add-raven2-to-gpu_info-firmware.patch41
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5282-drm-amdgpu-add-raven2-vcn-firmware-support.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5283-drm-amdgpu-add-psp-support-for-raven2.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5284-drm-amdgpu-sdma4-specify-raven2-firmware.patch41
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5285-drm-amdgpu-sdma4-Add-raven2-golden-setting.patch56
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5286-drm-amdgpu-gfx9-add-support-for-raven2-gfx-firmware.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5287-drm-amdgpu-gfx9-add-raven2-golden-setting.patch89
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5288-drm-amd-display-Add-Raven2-definitions-in-dc.patch249
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5289-drm-amd-display-Add-DC-config-flag-for-Raven2-v2.patch51
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5290-drm-amd-powerplay-update-smu10_verify_smc-to-raven2-.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5291-drm-amd-powerplay-round-up-the-Mhz-convertion-v2.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5292-drm-amd-powerplay-disable-raven2-force-dpm-level-sup.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5293-drm-amdgpu-set-CG-flags-for-raven2-v2.patch87
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5294-drm-amdgpu-Initialize-fences-array-entries-in-amdgpu.patch50
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5295-drm-amdgpu-soc15-clean-up-picasso-support.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5296-drm-amdgpu-simplify-Raven-Raven2-and-Picasso-handlin.patch511
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5297-drm-amd-display-Fix-3D-stereo-issues.patch112
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5298-drm-amd-display-stop-using-switch-for-different-CS-r.patch521
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5299-drm-amd-display-dc-3.1.66.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5300-drm-amd-display-add-query-HPD-interface.patch63
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5301-drm-amd-display-Drop-amdgpu_display_manager.dal-memb.patch49
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5302-drm-amd-display-Drop-amdgpu_dm_prev_state-struct.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5303-drm-amdgpu-add-GDS-GWS-and-OA-debugfs-files.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5304-drm-amdgpu-stop-crashing-on-GDS-GWS-OA-eviction.patch63
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5305-drm-amdgpu-don-t-allocate-zero-sized-kernel-BOs.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5306-drm-amdgpu-drop-size-check.patch46
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5307-drm-amd-amdgpu-Avoid-fault-when-allocating-an-empty-.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5308-drm-amdgpu-use-processed-values-for-counting.patch62
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5309-drm-amdgpu-update-vram_info-structure-in-atomfirmwar.patch88
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5310-drm-amdgpu-fix-unknown-vram-mem-type-for-vega20.patch69
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5311-drm-amd-powerplay-update-OD-feature-judgement.patch148
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5312-drm-amd-powerplay-update-OD-to-take-voltage-value-in.patch327
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5313-drm-amd-powerplay-retrieve-the-updated-clock-table-a.patch246
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5314-drm-amdgpu-stop-pipelining-VM-PDs-PTs-moves.patch51
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5315-drm-amdgpu-always-enable-shadow-BOs-v2.patch55
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5316-drm-amdgpu-shadow-BOs-don-t-need-any-alignment.patch50
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5317-drm-amdgpu-always-recover-VRAM-during-GPU-recovery.patch63
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5318-drm-amdgpu-fix-shadow-BO-restoring.patch266
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5319-drm-amdgpu-fix-up-GDS-GWS-OA-shifting.patch252
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5320-drm-amdgpu-initialize-GDS-GWS-OA-domains-even-when-t.patch91
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5321-drm-amdgpu-move-reserving-GDS-GWS-OA-into-common-cod.patch156
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5322-drm-amd-Add-ucode-DMCU-support.patch115
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5323-drm-amd-Add-PSP-DMCU-support.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5324-drm-amd-Add-DM-DMCU-support.patch172
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5325-drm-amdgpu-Add-DMCU-to-firmware-query-interface.patch62
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5326-drm-amd-display-Add-DMCU-firmware-version.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5327-drm-amdgpu-display-return-proper-error-codes-in-dm.patch57
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5328-drm-amdgpu-try-allocating-VRAM-as-power-of-two.patch99
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5329-drm-amdgpu-enable-AGP-aperture-for-GMC9-v2.patch88
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5330-drm-amdgpu-fix-the-page-fault-of-raven2.patch83
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5331-drm-amdgpu-add-amdgpu_gmc_agp_location-v3.patch96
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5332-drm-amdgpu-Temporary-fix-amdgpu_vm_release_compute-b.patch116
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5333-drm-amdgpu-fix-VM-clearing-for-the-root-PD.patch42
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5334-drm-amdgpu-fix-preamble-handling.patch55
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5335-amdgpu-fix-multi-process-hang-issue.patch56
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5336-drm-amdgpu-Fix-page-fault-and-kasan-warning-on-pci-d.patch201
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5337-drm-amd-display-Fix-bug-use-wrong-pp-interface.patch51
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5338-drm-amdgpu-remove-extra-root-PD-alignment.patch60
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5339-drm-amdgpu-add-helper-for-VM-PD-PT-allocation-parame.patch105
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5340-drm-amdgpu-add-GMC9-support-for-PDs-PTs-in-system-me.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5341-drm-amdgpu-add-amdgpu_gmc_get_pde_for_bo-helper-v2.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5342-drm-amd-display-Improve-spelling-grammar-and-formatt.patch638
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5343-drm-amd-display-Support-reading-hw-state-from-debugf.patch386
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5344-Revert-drm-amdgpu-Temporary-fix-amdgpu_vm_release_co.patch118
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5345-drm-amdgpu-Use-drm_dev_unplug-in-PCI-.remove.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5346-drm-amdgpu-move-size-calculations-to-the-front-of-th.patch123
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5347-drm-amdgpu-fix-amdgpu_mn_unlock-in-the-CS-error-path.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5348-drm-amdgpu-correctly-sign-extend-48bit-addresses-v3.patch240
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5349-drm-amdgpu-use-the-AGP-aperture-for-system-memory-ac.patch143
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5350-drm-amd-display-Build-stream-update-and-plane-update.patch176
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5351-drm-amd-display-Add-DP-YCbCr-4-2-0-support.patch243
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5352-drm-amd-display-Fix-DAL217-tests-modify-DTN-logs-for.patch47
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5353-drm-amd-display-Add-driver-side-parsing-for-CM.patch68
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5354-drm-amd-display-remove-dead-dc-vbios-code.patch1906
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5355-drm-amd-display-remove-unused-clk_src-code.patch137
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5356-drm-amd-display-add-disconnect_delay-to-dc_panel_pat.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5357-drm-amd-display-add-aux-transition-event-log.patch177
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5358-drm-amd-display-num-of-sw-i2c-aux-engines-less-than-.patch269
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5359-drm-amd-display-Use-DRM-helper-for-best_encoder.patch71
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5360-drm-amd-display-Reorder-resource_pool-to-put-i2c-wit.patch41
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5361-drm-amd-display-use-link-type-to-decide-stream-enc-a.patch40
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5362-drm-amd-display-Remove-call-to-amdgpu_pm_compute_clo.patch47
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5363-drm-amd-display-clean-code-for-transition-event-log.patch94
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5364-drm-amd-display-Add-invariant-support-instrumentatio.patch193
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5365-drm-amd-display-Fix-warning-storm-on-Raven2.patch90
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5366-drm-amd-display-RV2-DP-MST-2nd-display-within-daisy-.patch57
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5367-drm-amdgpu-interim-disable-RV2-GFX-CG-flag-for-urgen.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5368-drm-drivers-drop-redundant-drm_edid_to_eld-calls.patch53
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5369-drm-amdgpu-add-license-to-Makefiles.patch803
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5370-drm-amdgpu-Fix-header-file-dependencies.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5371-drm-amdgpu-re-enable-CGCG-on-CZ-and-disable-on-ST.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5372-drm-amdgpu-Handle-64-bit-return-from-drm_crtc_vblank.patch66
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5373-drm-amdgpu-fix-module-parameter-descriptions.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5374-drm-amd-amdgpu-re-add-missing-GC-9.1-and-SDMA0-4.1-s.patch32841
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5375-vga_switcheroo-Use-device-link-for-HDA-controller.patch141
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5376-drm-amd-pp-fix-missing-CONFIG_ACPI.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5377-drm-amdgpu-sdma4-use-a-helper-for-SDMA_OP_POLL_REGME.patch116
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5378-drm-amdgpu-include-pagemap.h-for-release_pages.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5379-drm-amdgpu-fix-32-bit-build-warning.patch46
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5380-drm-amdgpu-Add-AMDGPU_GPU_PAGES_IN_CPU_PAGE-define.patch116
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5381-drm-amd-display-Use-2-factor-allocator-calls.patch54
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5382-drm-amdgpu-move-context-related-stuff-to-amdgpu_ctx..patch251
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5383-drm-amdgpu-add-status-checking-after-fw-is-loaded.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5384-drm-amdgpu-revert-psp-firmware-load-status-check.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5385-Hybrid-Version-18.50.0.418.patch27
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5386-drm-amdgpu-improve-VM-state-machine-documentation-v2.patch192
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5387-drm-amdgpu-Fix-compute-VM-BO-params-after-rebase-v2.patch56
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5388-drm-amdgpu-Fix-warnings-while-make-xmldocs.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5389-drm-amd-powerplay-fix-compile-warning-for-wrong-data.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5390-drm-amdgpu-move-PSP-init-prior-to-IH-in-gpu-reset.patch41
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5391-drm-amd-include-update-the-bitfield-define-for-PF_MA.patch38
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5392-drm-amdgpu-gmc-add-initial-xgmi-structure-to-amdgpu_.patch54
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5393-drm-amdgpu-gmc9-add-a-new-gfxhub-1.1-helper-for-xgmi.patch162
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5394-drm-amdgpu-gmc9-Adjust-GART-and-AGP-location-with-xg.patch158
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5395-drm-amdgpu-Add-psp-function-interfaces-for-XGMI-supp.patch88
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5396-drm-amdgpu-Add-place-holder-functions-for-xgmi-topol.patch67
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5397-drm-amdgpu-Generate-XGMI-topology-info-from-driver-l.patch205
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5398-drm-amdgpu-Init-correct-fb-region-for-none-XGMI-conf.patch37
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5399-drm-amdgpu-fix-error-handling-in-amdgpu_cs_user_fenc.patch69
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5400-drm-amdgpu-add-amdgpu_vm_pt_parent-helper.patch81
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5401-drm-amdgpu-add-amdgpu_vm_update_func.patch84
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5402-drm-amdgpu-Fix-SDMA-TO-after-GPU-reset-v3.patch55
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5403-drm-amdgpu-move-cs-dependencies-front-a-bit.patch49
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5404-drm-amdgpu-Move-fault-hash-table-to-amdgpu-vm.patch411
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5405-drm-amd-display-fix-ptr_ret.cocci-warnings.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5406-drm-amdgpu-Add-error-message-when-register-failed-to.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5407-drm-amdgpu-add-some-VM-PD-PT-iterators-v2.patch258
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5408-drm-amdgpu-use-leaf-iterator-for-allocating-PD-PT.patch212
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5409-drm-amdgpu-use-dfs-iterator-to-free-PDs-PTs.patch110
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5410-drm-amdgpu-use-the-DFS-iterator-in-amdgpu_vm_invalid.patch84
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5411-drm-amdgpu-use-leaf-iterator-for-filling-PTs.patch115
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5412-drm-amd-display-Fix-pflip-IRQ-status-after-gpu-reset.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5413-drm-amdgpu-remove-amdgpu_bo_list_entry.robj.patch328
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5414-drm-amdgpu-remove-amdgpu_bo_list_entry.robj-for-rele.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5415-drm-amdgpu-fix-compilation-of-amdgpu_amdkfd_gpuvm.c.patch38
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5416-drm-amdgpu-use-a-single-linked-list-for-amdgpu_vm_bo.patch201
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5417-drm-amdgpu-Style-fixes-to-PRIME-code-documentation.patch189
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5418-drm-amd-display-add-aux-i2c-event-log.patch91
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5419-drm-amdgpu-fix-parameter-documentation-for-amdgpu_vm.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5420-drm-amdgpu-add-vega20-sriov-capability-detection.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5421-drm-amdgpu-Exclude-MM-engines-for-vega20-virtual-dev.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5422-drm-amd-dc-Trigger-set-power-state-task-when-display.patch37
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5423-drm-amd-pp-Honour-DC-s-clock-limits-on-Rv.patch94
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5424-drm-amd-pp-Return-error-immediately-if-load-firmware.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5425-drm-amd-display-Refactor-FPGA-specific-link-setup.patch147
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5426-drm-amd-display-use-proper-pipe_ctx-index.patch51
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5427-drm-amd-display-add-pp_smu-NULL-pointer-check.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5428-drm-amd-display-Add-color-bit-info-to-freesync-infof.patch331
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5429-drm-amd-display-program-v_update-and-v_ready-with-pr.patch97
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5430-drm-amd-display-dc-3.1.67.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5431-drm-amd-display-Stereo-3D-support-in-VSC.patch103
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5432-drm-amd-display-Guard-against-null-stream-dereferenc.patch61
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5433-drm-amd-display-Remove-mst_hotplug_work.patch65
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5434-drm-amd-display-fix-gamma-not-being-applied.patch81
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5435-drm-amd-display-Raise-dispclk-value-for-dce120-by-15.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5436-drm-amdgpu-powerplay-add-get_argument-callback-for-v.patch185
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5437-drm-amdgpu-powerplay-Move-vega10_enable_smc_features.patch121
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5438-drm-amdgpu-powerplay-add-smu-smc_table_manager-callb.patch112
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5439-drm-amdgpu-powerplay-add-smu-smc_table_manager-callb.patch191
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5440-drm-amdgpu-add-new-AMDGPU_PP_SENSOR_ENABLED_SMC_FEAT.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5441-drm-amdgpu-implement-ENABLED_SMC_FEATURES_MASK-senso.patch75
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5442-drm-amdgpu-implement-ENABLED_SMC_FEATURES_MASK-senso.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5443-drm-amdgpu-implement-ENABLED_SMC_FEATURES_MASK-senso.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5444-drm-amdgpu-print-smc-feature-mask-in-debugfs-amdgpu_.patch40
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5445-drm-amd-display-remove-redundant-null-pointer-check-.patch37
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5446-drm-amdgpu-Add-warning-message-for-INT-SW-fallback.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5447-drm-amdgpu-sriov-Correct-the-setting-about-sdma-door.patch38
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5448-drm-amdgpu-Deactivate-SW-interrupt-fallback-in-amdgp.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5449-drm-amdgpu-Refine-function-name.patch120
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5450-drm-amdgpu-Halt-rlc-cp-in-rlc_safe_mode.patch130
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5451-drm-amdgpu-Remove-redundant-code-in-gfx_v8_0.c.patch54
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5452-drm-amd-pp-Disable-dpm-features-on-smu7-8-when-suspe.patch121
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5453-drm-amdgpu-drop-extra-newline-in-amdgpu_iv-trace.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5454-drm-amdgpu-make-function-pointers-mandatory.patch434
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5455-drm-amdgpu-cleanup-amdgpu_ih.c.patch435
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5456-drm-amdgpu-Move-fence-SW-fallback-warning-v3.patch86
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5457-drm-amdgpu-move-more-interrupt-processing-into-amdgp.patch144
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5458-drm-amdgpu-move-more-defines-into-amdgpu_irq.h.patch833
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5459-drm-amdgpu-Use-register-UVD_SCRATCH9-for-VCN-ring-ib.patch92
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5460-drm-amdgpu-Add-new-register-offset-mask-to-support-V.patch93
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5461-drm-amdgpu-Add-DPG-support-flag.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5462-drm-amdgpu-Add-DPG-mode-read-write-macro.patch53
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5463-drm-amdgpu-Add-DPG-mode-support-for-vcn-1.0.patch413
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5464-drm-amdgpu-Add-DPG-pause-state.patch50
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5465-drm-amdgpu-Add-DPG-pause-mode-support.patch217
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5466-drm-amdgpu-soc15-fix-warnings-in-register-macro.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5467-drm-amdgpu-vcn-whitespace-cleanup.patch129
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5468-drm-amd-powerplay-correct-the-hwmon-interface-ppt-li.patch57
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5469-drm-amd-powerplay-tell-the-correct-gfx-voltage-V2.patch82
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5470-drm-amd-powerplay-enable-fan-RPM-and-pwm-settings-V2.patch331
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5471-drm-amdgpu-added-vega20-LBPW-support.patch154
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5472-drm-amdgpu-change-Raven-always-on-CUs-to-4.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5473-drm-amdgpu-vega20-make-power-profile-output-more-con.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5474-drm-amdgpu-add-default-case-to-switch-statement.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5475-drm-amdgpu-added-AMD-GPU-instance-counting-V2.patch158
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5476-drm-amd-powerplay-helper-interfaces-for-MGPU-fan-boo.patch157
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5477-drm-amd-powerplay-enable-MGPU-fan-boost-feature-on-V.patch52
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5478-drm-amdgpu-Fix-comments-error-in-sdma_v4_1_update_po.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5479-drm-amd-pp-Fix-fan-s-RPM-setting-not-work-on-VI-Vega.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5480-drm-amd-pp-Avoid-divide-by-zero-in-fan_ctrl_set_fan_.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5481-drm-amd-pp-Expose-the-smu-support-for-SDMA-PG-cntl.patch85
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5482-drm-amdgpu-Move-out-power-up-down-sdma-out-of-smu.patch96
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5483-drm-amd-pp-Remove-uncessary-extra-vcn-pg-cntl-in-smu.patch57
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5484-drm-amd-pp-Remove-wrong-code-in-fiji_start_smu.patch37
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5485-drm-amd-powerplay-Enable-Disable-NBPSTATE-on-On-OFF-.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5486-drm-amd-display-Add-DC-build_id-to-determine-build-t.patch61
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5487-drm-amd-display-fix-4K-stereo-screen-flash-issue.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5488-drm-amd-display-Add-a-check-function-for-virtual-sig.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5489-drm-amd-display-Calculate-swizzle-mode-using-bpp-dur.patch213
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5490-drm-amd-display-Add-function-to-fetch-clock-requirem.patch92
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5491-drm-amd-display-block-DP-YCbCr420-modes.patch50
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5492-drm-amd-display-clean-up-encoding-checks.patch178
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5493-drm-amd-display-WA-for-DF-keeps-awake-after-S0i3.patch95
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5494-drm-amd-display-dc-3.1.68.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5495-drm-amd-display-fix-memory-leak-in-resource-pools.patch126
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5496-drm-amd-display-Flatten-irq-handler-data-struct.patch130
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5497-drm-amd-display-fix-Interlace-video-timing.patch132
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5498-drm-amd-display-HLK-Periodic-Frame-Notification-test.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5499-drm-amd-display-Fix-Vega10-lightup-on-S3-resume.patch88
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5500-drm-amd-display-Raise-dispclk-value-for-dce_update_c.patch50
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5501-drm-amd-display-Signal-hw_done-after-waiting-for-fli.patch113
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5502-drm-amdgpu-Refine-uvd_v6-7_0_enc_get_destroy_msg.patch90
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5503-drm-amdgpu-Add-new-AMDGPU_PP_SENSOR_MIN-MAX_FAN_RPM-.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5504-drm-amd-pp-Implement-AMDGPU_PP_SENSOR_MIN-MAX_FAN_RP.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5505-drm-amdgpu-Add-fan-RPM-setting-via-sysfs.patch349
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5506-drm-amdgpu-Disable-sysfs-pwm1-if-not-in-manual-fan-c.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5507-drm-amdgpu-Drop-dead-define-in-amdgpu.h.patch63
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5508-drm-amd-pp-Fix-memory-leak-on-CI-AI.patch38
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5509-drm-amdgpu-Move-gfx-flag-in_suspend-to-adev.patch152
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5510-drm-amd-pp-Refine-function-iceland_start_smu.patch46
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5511-drm-amd-pp-Setup-SoftRegsStart-before-request-smu-lo.patch82
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5512-drm-amd-pp-Refine-smu7-8-request_smu_load_fw-callbac.patch228
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5513-drm-amdgpu-Remove-FW_LOAD_DIRECT-type-support-on-VI.patch422
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5514-drm-amdgpu-Don-t-reallocate-ucode-bo-when-suspend.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5515-drm-amd-pp-Allocate-ucode-bo-in-request_smu_load_fw.patch63
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5516-drm-amd-pp-Implement-load_firmware-interface.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5517-drm-amdgpu-Add-fw-load-in-gfx_v8-and-sdma_v3.patch61
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5518-drm-amdgpu-Change-VI-gfx-sdma-smu-init-sequence.patch122
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5519-drm-amdgpu-skip-IB-tests-for-KIQ-in-general.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5520-drm-amdgpu-Always-enable-fan-sensors-for-read.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5521-drm-amdgpu-remove-the-intterupt-handling-for-the-KIQ.patch225
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5522-drm-amdgpu-fix-AGP-location-with-VRAM-at-0x0.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5523-drm-amdgpu-fix-incorrect-use-of-amdgpu_irq_add_id-in.patch100
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5524-drm-amdgpu-vcn-Remove-unused-code.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5525-drm-amdgpu-vcn-fix-dpg-pause-mode-hang-issue.patch73
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5526-drm-amdgpu-vcn-Replace-value-with-defined-macro.patch110
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5527-drm-amdgpu-vcn-Correct-VCN-cache-window-definition.patch132
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5528-drm-amdgpu-add-CP_DEBUG-register-definition-for-GC9..patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5529-drm-amdgpu-fix-CPDMA-hang-in-PRT-mode.patch64
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5530-drm-amdgpu-Limit-the-max-mc-address-to-hole-start.patch53
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5531-drm-amdgpu-Change-SI-CI-gfx-sdma-smu-init-sequence.patch145
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5532-drm-amdgpu-Change-AI-gfx-sdma-smu-init-sequence.patch56
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5533-drm-amdgpu-Refine-function-amdgpu_device_ip_late_ini.patch42
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5534-drm-amdgpu-Check-late_init-status-before-set-cg-pg-s.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5535-drm-amdgpu-Split-amdgpu_ucode_init-fini_bo-into-two-.patch155
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5536-drm-amdgpu-Remove-amdgpu_ucode_fini_bo.patch88
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5537-drm-amdgpu-split-ip-hw_init-into-2-phases.patch103
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5538-drm-amdgpu-Load-fw-between-hw_init-resume_phase1-and.patch290
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5539-drm-amdgpu-Remove-wrong-fw-loading-type-warning.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5540-drm-amdgpu-Remove-the-direct-fw-loading-support-for-.patch78
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5541-drm-amdgpu-powerplay-endian-fixes-for-vega10_process.patch60
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5542-drm-amdgpu-powerplay-endian-fixes-for-vega12_process.patch79
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5543-drm-amdgpu-powerplay-endian-fixes-for-vega20_process.patch106
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5544-drm-amdgpu-powerplay-factor-out-some-pptable-helpers.patch265
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5545-drm-amdgpu-Suppress-keypresses-from-ACPI_VIDEO-event.patch71
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5546-drm-amdgpu-powerplay-fix-missing-break-in-switch-sta.patch122
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5547-drm-amdgpu-remove-set-but-not-used-variable-ring-in-.patch37
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5548-drm-amdgpu-remove-set-but-not-used-variable-header.patch41
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5549-drm-amd-powerplay-translate-power_profile-mode-to-pp.patch97
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5550-drm-amd-powerplay-hint-when-power-profile-setting-is.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5551-drm-amdgpu-Set-the-default-value-about-gds-vmid0-siz.patch51
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5552-drm-amdgpu-vcn-Add-new-register-offset-mask-for-VCN.patch101
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5553-drm-amdgpu-vcn-Update-latest-UVD_MPC-register-for-VC.patch89
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5554-drm-amdgpu-vcn-Update-latest-spg-mode-stop-for-VCN.patch76
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5555-drm-amdgpu-vcn-Add-ring-W-R-PTR-check-for-VCN-DPG-mo.patch40
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5556-drm-amdgpu-vcn-Reduce-unnecessary-local-variable.patch58
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5557-drm-amdgpu-vcn-Update-DPG-mode-VCN-memory-control.patch58
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5558-drm-amdgpu-vcn-Update-DPG-mode-VCN-global-tiling-reg.patch54
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5559-drm-amdgpu-vcn-Add-DPG-mode-Register-XX-check.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5560-drm-amdgpu-vcn-Remove-DPG-mode-unused-steps-during-v.patch78
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5561-drm-amdgpu-vcn-Apply-new-UMC-enable-for-VNC-DPG-mode.patch41
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5562-drm-amdgpu-vcn-Update-SPG-mode-VCN-memory-control.patch41
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5563-drm-amdgpu-vcn-Update-SPG-mode-VCN-global-tiling.patch46
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5564-drm-amdgpu-vcn-Move-SPG-mode-mc-resume-after-MPC-con.patch40
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5565-drm-amdgpu-vcn-Add-SPG-mode-Register-XX-check.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5566-drm-amdgpu-vcn-Remove-SPG-mode-unused-steps-during-v.patch81
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5567-drm-amdgpu-vcn-Apply-new-UMC-enable-for-VNC-DPG-mode.patch44
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5568-drm-amdgpu-vcn-Set-VCPU-busy-after-gate-power-during.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5569-drm-amdgpu-vcn-Update-SPG-mode-UVD-status-clear.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5570-drm-amdgpu-display-dm-amdgpu-make-dp-phy-debugfs-for.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5571-drm-amdgpu-update-Vega20-SDMA-golden-setting.patch37
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5572-drm-amd-powerplay-added-I2C-controller-configuration.patch440
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5573-drm-amd-powerplay-update-PPtable-with-DC-BTC-and-Tvr.patch92
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5574-drm-amdgpu-Update-gc_9_0-golden-settings.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5575-drm-amdgpu-fix-sdma-doorbell-comments-typo.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5576-drm-amd-display-fix-bug-of-accessing-invalid-memory.patch55
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5577-drm-amd-display-dc-3.2.01.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5578-drm-amd-display-handle-max_vstartup-larger-than-vbla.patch49
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5579-drm-amd-display-move-pplib-smu-notification-to-dccg-.patch1973
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5580-drm-amd-display-remove-safe_to_lower-flag-from-dc-us.patch297
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5581-drm-amd-display-Freesync-does-not-engage-on-some-dis.patch94
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5582-drm-amd-display-clean-up-base-dccg-struct.patch363
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5583-drm-amd-display-split-dccg-clock-manager-into-asic-f.patch2952
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5584-drm-amd-display-Add-support-for-Freesync-2-HDR-and-C.patch290
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5585-drm-amd-display-initialize-dc_transfer_func-ctx.patch47
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5586-drm-amd-display-expose-hwseq-functions-and-add-regis.patch220
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5587-drm-amd-display-fix-report-display-count-logic.patch148
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5588-drm-amd-display-Add-link-encoder-dp_ycbcr420_support.patch145
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5589-drm-amd-display-Retiring-set_display_requirements-in.patch74
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5590-drm-amd-display-Retiring-set_display_requirements-in.patch125
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5591-drm-amd-display-rename-dccg-to-clk_mgr.patch3638
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5592-drm-amd-display-add-dccg-block.patch142
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5593-drm-amd-display-dc-3.2.02.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5594-drm-amd-display-explicit-uint64_t-casting.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5595-drm-amd-display-rename-cstate_pstate_watermarks_st1.patch41
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5596-drm-amd-display-Fix-incorrect-end-slope-of-EETF.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5597-drm-amdgpu-correct-SPDX-identifier-in-amdgpu_trace_p.patch30
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5598-drm-amd-powerplay-bump-the-PPtable-version-supported.patch104
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5599-drm-amd-powerplay-correct-the-clocks-for-DAL-to-be-K.patch118
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5600-drm-amd-powerplay-revise-Vega20-pptable-version-chec.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5601-drm-amdgpu-support-Vega20-A1-ASICs.patch60
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5602-Revert-drm-amdgpu-add-amdgpu_gmc_get_pde_for_bo-help.patch33
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5603-drm-amdgpu-update-smu-firmware-images-for-VI-variant.patch114
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5604-drm-amd-display-Raise-dispclk-value-for-Polaris.patch47
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5605-drm-amdgpu-update-mc-firmware-image-for-polaris12-va.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5606-drm-amd-display-Fix-6x4K-displays-light-up-on-Vega20.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5607-drm-amdgpu-gmc8-update-MC-firmware-for-polaris.patch74
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5608-drm-amdgpu-gmc8-always-load-MC-firmware-in-the-drive.patch46
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5609-drm-amdgpu-both-support-PCO-FP5-AM4-rlc-fw.patch58
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5610-drm-amdgpu-update-SMC-firmware-image-for-polaris10-v.patch31
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5611-drm-amdgpu-powerplay-fix-mclk-switch-limit-on-polari.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5612-drm-amdgpu-powerplay-fix-clock-stretcher-limits-on-p.patch47
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5613-drm-amdgpu-powerplay-Apply-avfs-cks-off-voltages-on-.patch49
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5614-drm-amdgpu-revert-the-commit-interim-disable-RV2-GFX.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5615-drm-amdgpu-separate-amdgpu_rlc-into-a-single-file.patch463
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5616-drm-amdgpu-abstract-the-function-of-enter-exit-safe-.patch1456
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5617-drm-amdgpu-make-gfx9-enter-into-rlc-safe-mode-when-s.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5618-drm-amdkfd-Roll-back-all-q4-amdkfd-patches-added-by-.patch8070
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5619-drm-amdkfd-Change-the-control-stack-mtype-from-UC-to.patch51
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5620-drm-amdkfd-remove-check-for-PCIe-upstream-bridge.patch69
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5621-drm-amdgpu-kfd2kgd-Support-BO-create-from-sg.patch42
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5622-drm-amd-Update-KFD-Thunk-ioctl-ABI-to-match-upstream.patch374
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5623-drm-amdkfd-Fixing-compilation-issues.patch57
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5624-drm-amdkfd-Disable-the-perf-counters-for-old-kernels.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5625-drm-amdkfd-use-px-to-print-user-space-address-instea.patch55
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5626-drm-amdkfd-Simplify-dGPU-event-page-allocation.patch284
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5627-drm-amdkfd-Backwards-compatibility-with-old-Thunk.patch148
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5628-drm-amdkfd-Remove-pm_map_process_scratch_cik.patch115
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5629-drm-amdgpu-Remove-pm_map_process_cik.patch347
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5630-drm-amdkfd-Put-packet-sizes-directly-into-packet_man.patch290
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5631-drm-amdkfd-GPU-recovery-support-from-KFD-step-1.patch150
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5632-drm-amd-Add-kfd-ioctl-defines-for-hw_exception-event.patch49
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5633-drm-amdkfd-signal-hw_exception-event-on-GPU-reset.patch62
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5634-drm-amdkfd-remove-check-for-PCIe-upstream-bridge.patch37
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5635-drm-amdkfd-CMA-Refactor-CMA-code.patch430
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5636-drm-amdkfd-CMA-Store-cpuva-in-KFD-BO.patch120
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5637-drm-amdkfd-CMA-Handle-userptr-to-userptr-BO-copy.patch399
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5638-drm-amdkfd-CMA-Use-shadow-system-BO-for-userptr.patch359
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5639-Fix-SVM-missing-on-Raven.patch85
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5640-drm-amdkfd-Implement-SPI-debug-and-exception-support.patch587
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5641-drm-amdkfd-Implement-hw_exception-work-thread-to-han.patch134
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5642-drm-amdkfd-CMA-Remove-diff.-device-restriction.patch102
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5643-drm-amdkfd-CMA-Store-mem_type-in-KFD-BO.patch127
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5644-drm-amdkfd-CMA-Support-for-diff.-devices.patch40
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5645-drm-amdkfd-Remove-unused-variable.patch26
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5646-drm-amdfd-Don-t-hard-code-wait-time.patch56
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5647-drm-amdkfd-CMA-Add-intermediate-wait-if-mGPU.patch74
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5648-drm-amdkfd-CMA-Support-multi-device-VRAM-copy.patch223
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5649-drm-amdkfd-Reduce-priority-of-context-saving-waves-b.patch107
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5650-drm-amdkfd-Introduce-kfd-kernel-module-parameter-hal.patch75
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5651-drm-amdkfd-Use-module-parameters-noretry-as-the-inte.patch93
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5652-drm-amdkfd-Separate-trap-handler-assembly-code-and-i.patch1220
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5653-drm-amdkfd-Mellanox-Support-PeerSync-interface.patch57
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5654-drm-amdkfd-Fix-CP-soft-hang-on-APUs.patch103
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5655-drm-amdkfd-Fix-typos-in-trap-handler-comments.patch85
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5656-drm-amdkfd-Align-Makefile-with-upstream.patch79
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5657-drm-amdkfd-Align-CIK-interrupt-processing-with-upstr.patch194
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5658-drm-amdkfd-Remove-IH-patching-workaround-for-Vega10.patch92
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5659-drm-amdkfd-Clean-up-mmap-handling.patch142
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5660-drm-amdkfd-fix-uninitialized-variable-use.patch42
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5661-drm-amdkfd-Fix-kernel-queue-rollback-for-64-bit-wptr.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5662-drm-amdkfd-Match-release_mem-interface-with-other-PM.patch107
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5663-drm-amdkfd-Simplify-packet-manager-initialization.patch117
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5664-drm-amdkfd-Fix-error-handling-in-pm_init.patch61
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5665-drm-amdkfd-Fix-pm_debugfs_runlist.patch49
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5666-drm-amdkfd-Check-ctx_save_restore_area_address.patch63
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5667-drm-amdkfd-Fix-error-handling-around-kfd_process_cre.patch64
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5668-drm-amdkfd-Fix-error-handling-in-APU-CWSR-mapping.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5669-drm-amdkfd-Simplify-error-handling-in-kfd_create_pro.patch54
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5670-drm-amdkfd-Simplify-obj-handle-allocation.patch51
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5671-drm-amdkfd-Error-if-trying-to-acquire-VM-for-a-PDD-t.patch37
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5672-drm-amdkfd-Cosmetic-changes-to-match-upstream.patch1825
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5673-drm-amdkfd-Add-sanity-checks-in-IRQ-handlers.patch137
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5674-drm-amdkfd-Don-t-use-kmap_atomic.patch52
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5675-drm-amdkcl-fixed-can-t-find-kgd_kfd_interface.h-head.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5676-drm-amdkfd-Fix-kernel-queue-64-bit-doorbell-offset-c.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5677-drm-amdkfd-Fix-race-between-scheduler-and-context-re.patch665
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5678-drm-amdkfd-Add-debugfs-interface-to-trigger-HWS-hang.patch201
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5679-drm-amdkfd-Make-the-number-of-SDMA-queues-variable.patch180
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5680-drm-amdkfd-Vega20-bring-up-on-amdkfd-side.patch148
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5681-drm-amdkfd-reflect-atomic-support-in-IO-link-propert.patch118
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5682-drm-amdkfd-Add-check-user-queue-busy-interface.patch246
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5683-drm-amdkfd-Replace-mqd-with-mqd_mgr-as-the-variable-.patch531
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5684-drm-amd-amdgpu-Removing-unwanted-code-from-the-below.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5685-drm-amdkfd-Conditionally-enable-PCIe-atomics.patch43
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5686-drm-amdkfd-Fix-return-value-0-when-execute_queues_cp.patch29
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5687-drm-amdkfd-don-t-always-call-execute_queues_cpsch.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5688-drm-amdkfd-kfd_dev_is_large_bar-can-be-static.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5689-drm-amdkfd-fix-build-select-MMU_NOTIFIER.patch41
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5690-drm-amdkfd-Try-to-enable-atomics-for-all-GPUs.patch60
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5691-drm-amdkfd-Remove-queue-node-when-destroy-queue-fail.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5692-drm-amdkfd-Remove-vla.patch58
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5693-drm-admkfd-use-modern-ktime-accessors.patch47
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5694-drm-amdkfd-Stop-using-GFP_NOIO-explicitly.patch85
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5695-drm-amdkfd-fix-zero-reading-of-VMID-and-PASID-for-Ha.patch49
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5696-drm-amdkfd-Clean-up-reference-of-radeon.patch124
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5697-drm-amdkfd-Optimize-out-some-duplicated-code-in-kfd_.patch59
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5698-drm-amdkfd-Add-CU-masking-ioctl-to-KFD.patch162
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5699-drm-amdkfd-Call-kfd2kgd.set_compute_idle.patch153
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5700-Removed-DKMS-installed-KFD-check-for-kernel-version.patch34
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5701-drm-amdgpu-Merge-amdkfd-into-amdgpu.patch291
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5702-drm-amdgpu-Move-KFD-parameters-to-amdgpu-v3.patch348
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5703-kbuild-create-built-in.o-automatically-if-parent-dir.patch91
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5704-kbuild-remove-incremental-linking-option.patch233
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5705-kbuild-rename-built-in.o-to-built-in.a.patch387
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5706-drm-amdgpu-Need-to-set-moved-to-true-when-evict-bo.patch46
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5707-Fix-compilation-error.patch54
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5708-drm-amdkfd-Release-an-acquired-process-vm.patch177
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5709-drm-amdgpu-Relocate-some-definitions-v2.patch232
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5710-drm-amdkfd-Copy-in-KFD-related-files.patch28459
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5711-drm-amdkfd-kfd-expose-the-hive_id-of-the-device-thro.patch102
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5712-drm-amdkfd-Add-new-iolink-type-defines.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5713-drm-amdkfd-Generate-xGMI-direct-iolink.patch173
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5714-drm-amdkfd-Only-add-bi-directional-iolink-on-GPU-wit.patch94
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5715-drm-amdkfd-change-system-memory-overcommit-limit.patch208
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5716-drm-amdkfd-Reliably-prevent-reclaim-FS-while-holding.patch613
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5717-Reverted-Update-KFD-Thunk-ioctl-ABI-to-match-upstrea.patch579
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5718-SWDEV-168581-dc-fix-sporadic-multiple-aux-transactio.patch183
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5719-drm-amdkfd-Rebsed-some-changes-in-kfd.patch333
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5720-drm-amdgpu-Clean-up-KFD-init-and-fini.patch76
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5721-net-ethernet-xgbe-expand-PHY_GBIT_FEAUTRES.patch104
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5722-Code-cleanup.patch28
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5723-drm-amdgpu-vcn-Fixed-S3-hung-issue.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5724-drm-amdgpu-change-VEGA-booting-with-firmware-loaded-.patch54
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5725-Fix-compilation-error-for-kfd.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5726-amd-i2s-fix-to-the-fage-fault-when-iommu-is-enabled.patch156
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5727-amd-i2s-dma-pointer-uses-Link-position-counter.This-.patch117
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5728-mmc-core-Move-calls-to-prepare_hs400_tuning-closer-t.patch50
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5729-mmc-core-more-fine-grained-hooks-for-HS400-tuning.patch89
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5730-mmc-sdhci-Export-sdhci-tuning-function-symbol.patch91
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5731-mmc-sdhci-Export-sdhci_request.patch53
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5732-mmc-sdhci-add-adma_table_cnt-member-to-struct-sdhci_.patch77
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5733-mmc-sdhci-introduce-adma_write_desc-hook-to-struct-s.patch128
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5734-mmc-sdhci-Add-version-V4-definition.patch46
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5735-mmc-sdhci-Add-sd-host-v4-mode.patch105
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5736-mmc-sdhci-Add-ADMA2-64-bit-addressing-support-for-V4.patch211
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5737-mmc-sdhci-Add-32-bit-block-count-support-for-v4-mode.patch80
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5738-mmc-sdhci-Add-Auto-CMD-Auto-Select-support.patch117
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5739-amd-xgbe-use-dma_mapping_error-to-check-map-errors.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5740-lib-crc-Move-polynomial-definition-to-separate-heade.patch96
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5741-lib-crc-Use-consistent-naming-for-CRC-32-polynomials.patch105
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5742-net-ethernet-Use-existing-define-with-polynomial.patch46
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5743-net-amd-fix-return-type-of-ndo_start_xmit-function.patch45
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5744-net-phy-Add-helper-for-advertise-to-lcl-value.patch71
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5745-drivers-net-remove-net-busy_poll.h-inclusion-when-no.patch35
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5746-amd-eMMC-sdhci-HS400-workaround-for-ZP.patch103
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5747-drm-amd-display-Raise-dispclk-value-for-CZ.patch48
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5748-drm-amdgpu-gfx8-disable-EDC.patch38
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5749-net-phy-Also-request-modules-for-C45-IDs.patch56
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5750-amd-xgbe-Fix-mdio-access-for-non-zero-ports-and-clau.patch94
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5751-Revert-drm-amdgpu-make-gfx9-enter-into-rlc-safe-mode.patch36
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5752-Revert-drm-amdgpu-abstract-the-function-of-enter-exi.patch1446
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5753-Revert-drm-amdgpu-separate-amdgpu_rlc-into-a-single-.patch454
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5754-drm-amdgpu-make-gfx9-enter-into-rlc-safe-mode-when-s.patch39
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5755-Revert-drm-amdgpu-revert-the-commit-interim-disable-.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5756-Revert-drm-amdgpu-revert-psp-firmware-load-status-ch.patch32
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5757-drm-amdgpu-psp-ignore-psp-response-status.patch54
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/5758-RTQA4-Fix-build-error-for-hs400-and-hs200.patch81
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/amd-emmc-patches.scc13
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/amd-xgbe-patches.scc7
-rwxr-xr-xcommon/recipes-kernel/linux/linux-yocto-4.14.71/amdgpu-patches.scc1603
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/misc-patches.scc5
-rw-r--r--common/recipes-kernel/linux/linux-yocto_4.14.bbappend4
1631 files changed, 361060 insertions, 1 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4133-drm-amdgpu-powerplay-Added-missing-endian-fixes-for-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4133-drm-amdgpu-powerplay-Added-missing-endian-fixes-for-.patch
new file mode 100644
index 00000000..02742a7f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4133-drm-amdgpu-powerplay-Added-missing-endian-fixes-for-.patch
@@ -0,0 +1,45 @@
+From 781c7828a3b57d912ba68593e2e9a16350d5edde Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Fri, 8 Jul 2016 10:12:10 -0400
+Subject: [PATCH 4133/5725] drm/amdgpu/powerplay: Added missing endian fixes
+ for ppatomctrl.c
+
+V2
+Atom tables are in LE format.
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+index f591b68..63d1060 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+@@ -1073,8 +1073,10 @@ int atomctrl_get_voltage_evv_on_sclk(
+ GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
+ (uint32_t *)&get_voltage_info_param_space);
+
+- *voltage = result ? 0 :
+- le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
++ if (0 != result)
++ return result;
++
++ *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
+ (&get_voltage_info_param_space))->usVoltageLevel);
+
+ return result;
+@@ -1279,7 +1281,8 @@ int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index,
+ result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
+ (uint32_t *)&efuse_param);
+- *efuse = result ? 0 : le32_to_cpu(efuse_param.ulEfuseValue) & mask;
++ if (!result)
++ *efuse = le32_to_cpu(efuse_param.ulEfuseValue) & mask;
+
+ return result;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4134-drm-amd-powerplay-implement-smu7_smumgr-for-asics-wi.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4134-drm-amd-powerplay-implement-smu7_smumgr-for-asics-wi.patch
new file mode 100644
index 00000000..53bfc116
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4134-drm-amd-powerplay-implement-smu7_smumgr-for-asics-wi.patch
@@ -0,0 +1,35 @@
+From 74ae856aa0a67927e74879917e8b65c0dbc13a23 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Fri, 19 Aug 2016 20:35:48 +0800
+Subject: [PATCH 4134/5725] drm/amd/powerplay: implement smu7_smumgr for asics
+ with smu ip version 7 (V2)
+
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+index d644a9b..64d33b7 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+@@ -283,9 +283,11 @@ int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t
+
+ result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit);
+
+- *value = result ? 0 : cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11);
++ if (result)
++ return result;
+
+- return result;
++ *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11);
++ return 0;
+ }
+
+ int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4135-drm-amd-powerplay-fix-bug-get-wrong-evv-voltage-of-P.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4135-drm-amd-powerplay-fix-bug-get-wrong-evv-voltage-of-P.patch
new file mode 100644
index 00000000..c13b7556
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4135-drm-amd-powerplay-fix-bug-get-wrong-evv-voltage-of-P.patch
@@ -0,0 +1,39 @@
+From bd8d55b5ce66715bb8193431e6863008c7de7083 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Wed, 26 Oct 2016 12:56:07 +0800
+Subject: [PATCH 4135/5725] drm/amd/powerplay: fix bug get wrong evv voltage of
+ Polaris V2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+index 63d1060..d3eeafb 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+@@ -1323,9 +1323,11 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
+ GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
+ (uint32_t *)&get_voltage_info_param_space);
+
+- *voltage = result ? 0 :
+- le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel);
++ if (0 != result)
++ return result;
+
++ *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)
++ (&get_voltage_info_param_space))->ulVoltageLevel);
+ return result;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4136-drm-amdgpu-Use-the-drm_driver.dumb_destroy-default.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4136-drm-amdgpu-Use-the-drm_driver.dumb_destroy-default.patch
new file mode 100644
index 00000000..05d8a1a4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4136-drm-amdgpu-Use-the-drm_driver.dumb_destroy-default.patch
@@ -0,0 +1,35 @@
+From 3b333942ab68ee667cfe8c5df7ba2765010e6a44 Mon Sep 17 00:00:00 2001
+From: noral f <noralf@tronnes.org>
+Date: Sun, 23 Jul 2017 21:16:45 +0200
+Subject: [PATCH 4136/5725] drm/amdgpu: Use the drm_driver.dumb_destroy default
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+drm_gem_dumb_destroy() is the drm_driver.dumb_destroy default,
+so no need to set it.
+
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: Christian König <christian.koenig@amd.com>
+Signed-off-by: Noralf Trønnes <noralf@tronnes.org>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/1500837417-40580-30-git-send-email-noralf@tronnes.org
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 1397a47..3f68ca9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -895,7 +895,6 @@ static struct drm_driver kms_driver = {
+ .gem_close_object = amdgpu_gem_object_close,
+ .dumb_create = amdgpu_mode_dumb_create,
+ .dumb_map_offset = amdgpu_mode_dumb_mmap,
+- .dumb_destroy = drm_gem_dumb_destroy,
+ .fops = &amdgpu_driver_kms_fops,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4137-drm-amd-dc-Add-dc-display-driver-v3.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4137-drm-amd-dc-Add-dc-display-driver-v3.patch
new file mode 100644
index 00000000..3c0a79d2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4137-drm-amd-dc-Add-dc-display-driver-v3.patch
@@ -0,0 +1,159 @@
+From a9c2b51cc3d75fb871bcc685e203027ca6779f3a Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Tue, 12 Sep 2017 15:58:20 -0400
+Subject: [PATCH 4137/5725] drm/amd/dc: Add dc display driver (v3)
+
+Supported DCE versions: 8.0, 10.0, 11.0, 11.2
+
+Added functions get_norm_pix_clk & calculate_phy_pix_clks and
+No need to keep track of unreffedclk sources.
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 91 ++++++++++++++---------
+ 1 file changed, 57 insertions(+), 34 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 4ab04c7..81c6de7 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -265,30 +265,23 @@ bool resource_construct(
+
+ return true;
+ }
+-static int find_matching_clock_source(
+- const struct resource_pool *pool,
+- struct clock_source *clock_source)
+-{
+-
+- int i;
+-
+- for (i = 0; i < pool->clk_src_count; i++) {
+- if (pool->clock_sources[i] == clock_source)
+- return i;
+- }
+- return -1;
+-}
+
+ void resource_unreference_clock_source(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct clock_source *clock_source)
+ {
+- int i = find_matching_clock_source(pool, clock_source);
++ int i;
+
+- if (i > -1)
++ for (i = 0; i < pool->clk_src_count; i++) {
++ if (pool->clock_sources[i] != clock_source)
++ continue;
++
+ res_ctx->clock_source_ref_count[i]--;
+
++ break;
++ }
++
+ if (pool->dp_clock_source == clock_source)
+ res_ctx->dp_clock_source_ref_count--;
+ }
+@@ -298,31 +291,19 @@ void resource_reference_clock_source(
+ const struct resource_pool *pool,
+ struct clock_source *clock_source)
+ {
+- int i = find_matching_clock_source(pool, clock_source);
+-
+- if (i > -1)
++ int i;
++ for (i = 0; i < pool->clk_src_count; i++) {
++ if (pool->clock_sources[i] != clock_source)
++ continue;
++
+ res_ctx->clock_source_ref_count[i]++;
++ break;
++ }
+
+ if (pool->dp_clock_source == clock_source)
+ res_ctx->dp_clock_source_ref_count++;
+ }
+
+-int resource_get_clock_source_reference(
+- struct resource_context *res_ctx,
+- const struct resource_pool *pool,
+- struct clock_source *clock_source)
+-{
+- int i = find_matching_clock_source(pool, clock_source);
+-
+- if (i > -1)
+- return res_ctx->clock_source_ref_count[i];
+-
+- if (pool->dp_clock_source == clock_source)
+- return res_ctx->dp_clock_source_ref_count;
+-
+- return -1;
+-}
+-
+ bool resource_are_streams_timing_synchronizable(
+ struct dc_stream_state *stream1,
+ struct dc_stream_state *stream2)
+@@ -1648,6 +1629,46 @@ static struct dc_stream_state *find_pll_sharable_stream(
+ return NULL;
+ }
+
++static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
++{
++ uint32_t pix_clk = timing->pix_clk_khz;
++ uint32_t normalized_pix_clk = pix_clk;
++
++ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
++ pix_clk /= 2;
++ if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) {
++ switch (timing->display_color_depth) {
++ case COLOR_DEPTH_888:
++ normalized_pix_clk = pix_clk;
++ break;
++ case COLOR_DEPTH_101010:
++ normalized_pix_clk = (pix_clk * 30) / 24;
++ break;
++ case COLOR_DEPTH_121212:
++ normalized_pix_clk = (pix_clk * 36) / 24;
++ break;
++ case COLOR_DEPTH_161616:
++ normalized_pix_clk = (pix_clk * 48) / 24;
++ break;
++ default:
++ ASSERT(0);
++ break;
++ }
++ }
++ return normalized_pix_clk;
++}
++
++static void calculate_phy_pix_clks(struct dc_stream_state *stream)
++{
++ /* update actual pixel clock on all streams */
++ if (dc_is_hdmi_signal(stream->signal))
++ stream->phy_pix_clk = get_norm_pix_clk(
++ &stream->timing);
++ else
++ stream->phy_pix_clk =
++ stream->timing.pix_clk_khz;
++}
++
+ enum dc_status resource_map_pool_resources(
+ const struct dc *dc,
+ struct dc_state *context,
+@@ -2550,6 +2571,8 @@ enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
+ struct timing_generator *tg = core_dc->res_pool->timing_generators[0];
+ enum dc_status res = DC_OK;
+
++ calculate_phy_pix_clks(stream);
++
+ if (!tg->funcs->validate_timing(tg, &stream->timing))
+ res = DC_FAIL_CONTROLLER_VALIDATE;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4138-drm-amd-display-No-need-to-keep-track-of-unreffed-cl.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4138-drm-amd-display-No-need-to-keep-track-of-unreffed-cl.patch
new file mode 100644
index 00000000..222c5b97
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4138-drm-amd-display-No-need-to-keep-track-of-unreffed-cl.patch
@@ -0,0 +1,36 @@
+From 948decc05f43acba3a626a85db237111598c0064 Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Tue, 5 Sep 2017 15:50:48 -0400
+Subject: [PATCH 4138/5725] drm/amd/display: No need to keep track of unreffed
+ clk sources V2
+
+This simplifies clock source reprogramming a bit.
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <Harry.Wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 858edb3..b17afcf 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -1939,10 +1939,7 @@ static void dce110_reset_hw_ctx_wrap(
+ pipe_ctx_old->stream_res.tg->funcs->disable_crtc(pipe_ctx_old->stream_res.tg);
+ pipe_ctx_old->plane_res.mi->funcs->free_mem_input(
+ pipe_ctx_old->plane_res.mi, dc->current_state->stream_count);
+-
+- if (old_clk && 0 == resource_get_clock_source_reference(&context->res_ctx,
+- dc->res_pool,
+- old_clk))
++ if (old_clk)
+ old_clk->funcs->cs_power_down(old_clk);
+
+ dc->hwss.disable_plane(dc, pipe_ctx_old);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4139-dma-buf-keep-only-not-signaled-fence-in-reservation_.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4139-dma-buf-keep-only-not-signaled-fence-in-reservation_.patch
new file mode 100644
index 00000000..f8c4c6ad
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4139-dma-buf-keep-only-not-signaled-fence-in-reservation_.patch
@@ -0,0 +1,217 @@
+From 4257a32da4d8822eb49b8f726d8168282536ea5f Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Wed, 9 Jan 2019 18:15:08 +0530
+Subject: [PATCH 4139/5725] dma-buf: keep only not signaled fence in
+ reservation_object_add_shared_replace v3
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The amdgpu issue to also need signaled fences in the reservation objects
+should be fixed by now.
+
+Optimize the list by keeping only the not signaled yet fences around.
+
+v2: temporary put the signaled fences at the end of the new container
+v3: put the old fence at the end of the new container as well.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
+Tested-by: Chris Wilson <chris@chris-wilson.co.uk>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20171114142436.1360-1-christian.koenig@amd.com
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/dma-buf/reservation.c | 87 ++++++++++++++++++++++++++++---------------
+ 1 file changed, 58 insertions(+), 29 deletions(-)
+ mode change 100644 => 100755 drivers/dma-buf/reservation.c
+
+diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
+old mode 100644
+new mode 100755
+index 012fa3d..1bc15f6
+--- a/drivers/dma-buf/reservation.c
++++ b/drivers/dma-buf/reservation.c
+@@ -104,7 +104,8 @@ reservation_object_add_shared_inplace(struct reservation_object *obj,
+ struct reservation_object_list *fobj,
+ struct dma_fence *fence)
+ {
+- u32 i;
++ struct dma_fence *signaled = NULL;
++ u32 i, signaled_idx;
+
+ dma_fence_get(fence);
+
+@@ -126,17 +127,28 @@ reservation_object_add_shared_inplace(struct reservation_object *obj,
+ dma_fence_put(old_fence);
+ return;
+ }
++
++ if (!signaled && dma_fence_is_signaled(old_fence)) {
++ signaled = old_fence;
++ signaled_idx = i;
++ }
+ }
+
+ /*
+ * memory barrier is added by write_seqcount_begin,
+ * fobj->shared_count is protected by this lock too
+ */
+- RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
+- fobj->shared_count++;
++ if (signaled) {
++ RCU_INIT_POINTER(fobj->shared[signaled_idx], fence);
++ } else {
++ RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
++ fobj->shared_count++;
++ }
+
+ write_seqcount_end(&obj->seq);
+ preempt_enable();
++
++ dma_fence_put(signaled);
+ }
+
+ static void
+@@ -145,8 +157,7 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
+ struct reservation_object_list *fobj,
+ struct dma_fence *fence)
+ {
+- unsigned i;
+- struct dma_fence *old_fence = NULL;
++ unsigned i, j, k;
+
+ dma_fence_get(fence);
+
+@@ -162,24 +173,21 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
+ * references from the old struct are carried over to
+ * the new.
+ */
+- fobj->shared_count = old->shared_count;
+-
+- for (i = 0; i < old->shared_count; ++i) {
++ for (i = 0, j = 0, k = fobj->shared_max; i < old->shared_count; ++i) {
+ struct dma_fence *check;
+
+ check = rcu_dereference_protected(old->shared[i],
+ reservation_object_held(obj));
+
+- if (!old_fence && check->context == fence->context) {
+- old_fence = check;
+- RCU_INIT_POINTER(fobj->shared[i], fence);
+- } else
+- RCU_INIT_POINTER(fobj->shared[i], check);
+- }
+- if (!old_fence) {
+- RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
+- fobj->shared_count++;
++ if (check->context == fence->context ||
++ dma_fence_is_signaled(check))
++ RCU_INIT_POINTER(fobj->shared[--k], check);
++ else
++ RCU_INIT_POINTER(fobj->shared[j++], check);
+ }
++ fobj->shared_count = j;
++ RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
++ fobj->shared_count++;
+
+ done:
+ preempt_disable();
+@@ -192,10 +200,18 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
+ write_seqcount_end(&obj->seq);
+ preempt_enable();
+
+- if (old)
+- kfree_rcu(old, rcu);
++ if (!old)
++ return;
+
+- dma_fence_put(old_fence);
++ /* Drop the references to the signaled fences */
++ for (i = k; i < fobj->shared_max; ++i) {
++ struct dma_fence *f;
++
++ f = rcu_dereference_protected(fobj->shared[i],
++ reservation_object_held(obj));
++ dma_fence_put(f);
++ }
++ kfree_rcu(old, rcu);
+ }
+
+ /**
+@@ -358,8 +374,9 @@ EXPORT_SYMBOL(reservation_object_copy_fences);
+ * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
+ * the required size, and must be freed by caller)
+ *
+- * RETURNS
+- * Zero or -errno
++ * Retrieve all fences from the reservation object. If the pointer for the
++ * exclusive fence is not specified the fence is put into the array of the
++ * shared fences as well. Returns either zero or -ENOMEM.
+ */
+ int reservation_object_get_fences_rcu(struct reservation_object *obj,
+ struct dma_fence **pfence_excl,
+@@ -373,8 +390,8 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
+
+ do {
+ struct reservation_object_list *fobj;
+- unsigned seq;
+- unsigned int i;
++ unsigned int i, seq;
++ size_t sz = 0;
+
+ shared_count = i = 0;
+
+@@ -386,9 +403,14 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
+ goto unlock;
+
+ fobj = rcu_dereference(obj->fence);
+- if (fobj) {
++ if (fobj)
++ sz += sizeof(*shared) * fobj->shared_max;
++
++ if (!pfence_excl && fence_excl)
++ sz += sizeof(*shared);
++
++ if (sz) {
+ struct dma_fence **nshared;
+- size_t sz = sizeof(*shared) * fobj->shared_max;
+
+ nshared = krealloc(shared, sz,
+ GFP_NOWAIT | __GFP_NOWARN);
+@@ -404,13 +426,19 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
+ break;
+ }
+ shared = nshared;
+- shared_count = fobj->shared_count;
+-
++ shared_count = fobj ? fobj->shared_count : 0;
+ for (i = 0; i < shared_count; ++i) {
+ shared[i] = rcu_dereference(fobj->shared[i]);
+ if (!dma_fence_get_rcu(shared[i]))
+ break;
+ }
++
++ if (!pfence_excl && fence_excl) {
++ shared[i] = fence_excl;
++ fence_excl = NULL;
++ ++i;
++ ++shared_count;
++ }
+ }
+
+ if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
+@@ -432,7 +460,8 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
+
+ *pshared_count = shared_count;
+ *pshared = shared;
+- *pfence_excl = fence_excl;
++ if (pfence_excl)
++ *pfence_excl = fence_excl;
+
+ return ret;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4140-drm-amdgpu-Restore-scalable-VM-size-calculation.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4140-drm-amdgpu-Restore-scalable-VM-size-calculation.patch
new file mode 100644
index 00000000..652fb84d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4140-drm-amdgpu-Restore-scalable-VM-size-calculation.patch
@@ -0,0 +1,68 @@
+From d02621a5c25d78ced0d763bacec2f8846574511c Mon Sep 17 00:00:00 2001
+From: ozeng <oak.zeng@amd.com>
+Date: Thu, 6 Jul 2017 15:07:50 -0500
+Subject: [PATCH 4140/5725] drm/amdgpu: Restore scalable VM size calculation
+
+fa2f1a68af48de01c775585c552438aba795b2f5 made both the GART size
+and VM size scalable to system memory size. This was reverted later
+to solve SWDEV-123010. However the revert caused KFD huge buffer
+test failure reported in SWDEV-126381. This restore the scalable
+VM size calculation to allow KFD allocate huge buffer.
+
+BUG: SWDEV-126381
+
+Change-Id: I68cf765dfcb94921aae1a815ab896a367e3f3005
+Signed-off-by: Oak Zeng <oak.zeng@amd.com>
+Reviewed-by: Roger.He <Hongbo.He@amd.com>
+
+ Conflicts:
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 24 ++++++++++++++++++++++++
+ 1 file changed, 24 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 968ec87..2bac39ee 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -982,6 +982,9 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
+ */
+ static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
+ {
++ struct sysinfo si;
++ int phys_ram_gb, amdgpu_vm_size_aligned;
++
+ if (amdgpu_sched_jobs < 4) {
+ dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
+ amdgpu_sched_jobs);
+@@ -1006,6 +1009,27 @@ static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
+ amdgpu_gtt_size = -1;
+ }
+
++ /* Compute the GPU VM space only if the user
++ * hasn't changed it from the default.
++ */
++ if (amdgpu_vm_size == -1) {
++ /* Computation depends on the amount of physical RAM available.
++ * Cannot exceed 1TB.
++ */
++ si_meminfo(&si);
++ phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit) >> 30;
++ amdgpu_vm_size = min(phys_ram_gb * 3 + 16, 1024);
++
++ /* GPUVM sizes are almost never perfect powers of two.
++ * Round up to nearest power of two starting from
++ * the minimum allowed but aligned size of 32GB */
++ amdgpu_vm_size_aligned = 32;
++ while (amdgpu_vm_size > amdgpu_vm_size_aligned)
++ amdgpu_vm_size_aligned *= 2;
++
++ amdgpu_vm_size = amdgpu_vm_size_aligned;
++ }
++
+ /* valid range is between 4 and 9 inclusive */
+ if (amdgpu_vm_fragment_size != -1 &&
+ (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4141-drm-amdgpu-fix-and-cleanup-UVD-IB-generation-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4141-drm-amdgpu-fix-and-cleanup-UVD-IB-generation-v2.patch
new file mode 100644
index 00000000..532f67c5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4141-drm-amdgpu-fix-and-cleanup-UVD-IB-generation-v2.patch
@@ -0,0 +1,43 @@
+From 6fa2c740af47c0bb5aa6305569ca669c61f5d6d6 Mon Sep 17 00:00:00 2001
+From: christian koenig <ckoenig.leichtzumerken@gmail.com>
+Date: Wed, 7 Feb 2018 20:48:21 +0100
+Subject: [PATCH 4141/5725] drm/amdgpu: fix and cleanup UVD IB generation(v2)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Moving the structure assignment to inside the if condition.
+
+Change-Id: I4956f054cd23736b605ab058acacb078207a53cb
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Leo Liu <leo.liu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index 0685e18..6cf5ccf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -959,7 +959,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
+ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
+ bool direct, struct dma_fence **fence)
+ {
+- struct ttm_operation_ctx ctx = { true, false };
+ struct amdgpu_device *adev = ring->adev;
+ struct dma_fence *f = NULL;
+ struct amdgpu_job *job;
+@@ -973,6 +972,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
+ amdgpu_bo_unpin(bo);
+
+ if (!ring->adev->uvd.address_64_bit) {
++ struct ttm_operation_ctx ctx = { true, false };
++
+ amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
+ amdgpu_uvd_force_into_uvd_segment(bo);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4142-drm-amdgpu-cleanup-VCN-IB-generation-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4142-drm-amdgpu-cleanup-VCN-IB-generation-v2.patch
new file mode 100644
index 00000000..80e4dc42
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4142-drm-amdgpu-cleanup-VCN-IB-generation-v2.patch
@@ -0,0 +1,34 @@
+From 5044370805caf7ff40a4debecf3cb8cde73443fa Mon Sep 17 00:00:00 2001
+From: christian koenig <ckoenig.leichtzumerken@gmail.com>
+Date: Wed, 7 Feb 2018 20:48:22 +0100
+Subject: [PATCH 4142/5725] drm/amdgpu: cleanup VCN IB generation (v2)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Removed struct ttm_operation_ctx ctx = { true, false }; which
+is not used in the function.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Tested-and-Reviewed-by: Leo Liu <leo.liu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 3ed7926..01cc8de 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -277,7 +277,6 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
+ struct amdgpu_bo *bo, bool direct,
+ struct dma_fence **fence)
+ {
+- struct ttm_operation_ctx ctx = { true, false };
+ struct amdgpu_device *adev = ring->adev;
+ struct dma_fence *f = NULL;
+ struct amdgpu_job *job;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4143-drm-amdkfd-Disable-the-perf-counters-for-old-kernels.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4143-drm-amdkfd-Disable-the-perf-counters-for-old-kernels.patch
new file mode 100644
index 00000000..62035ec7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4143-drm-amdkfd-Disable-the-perf-counters-for-old-kernels.patch
@@ -0,0 +1,32 @@
+From 9335b27a248873ac43730ef4f649c403880b9518 Mon Sep 17 00:00:00 2001
+From: Yong Zhao <Yong.Zhao@amd.com>
+Date: Fri, 28 Apr 2017 18:08:09 -0400
+Subject: [PATCH 4143/5725] drm/amdkfd: Disable the perf counters for old
+ kernels
+
+Because IOMMU functions are missing for old kernels such as 3.10 on
+Redhat 7.3, we choose to disable the performance counter feature on
+those kernels.
+
+Change-Id: Ie159d61a9b36cc38bd306b5e28fa5a3b83646d09
+Signed-off-by: Yong Zhao <Yong.Zhao@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_topology.h | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+index f4d29c4..4c518fe8 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+@@ -190,4 +190,8 @@ struct kfd_topology_device *kfd_create_topology_device(
+ struct list_head *device_list);
+ void kfd_release_topology_device_list(struct list_head *device_list);
+
++extern bool amd_iommu_pc_supported(void);
++extern u8 amd_iommu_pc_get_max_banks(u16 devid);
++extern u8 amd_iommu_pc_get_max_counters(u16 devid);
++
+ #endif /* __KFD_TOPOLOGY_H__ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4144-drm-amd-display-fix-Polaris-12-bw-bounding-box-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4144-drm-amd-display-fix-Polaris-12-bw-bounding-box-v2.patch
new file mode 100644
index 00000000..40b109e8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4144-drm-amd-display-fix-Polaris-12-bw-bounding-box-v2.patch
@@ -0,0 +1,36 @@
+From 2f8ebd8ec602b5680b96fac7880d634c125b795c Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Thu, 8 Mar 2018 12:08:01 -0500
+Subject: [PATCH 4144/5725] drm/amd/display: fix Polaris 12 bw bounding box
+ (v2)
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+index 0a5eb32..56f46a0 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+@@ -53,10 +53,11 @@ static enum bw_calcs_version bw_calcs_version_from_asic_id(struct hw_asic_id asi
+ return BW_CALCS_VERSION_CARRIZO;
+
+ case FAMILY_VI:
++ if (ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev))
++ return BW_CALCS_VERSION_POLARIS12;
+ if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev))
+ return BW_CALCS_VERSION_POLARIS10;
+- if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) ||
+- ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev))
++ if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev))
+ return BW_CALCS_VERSION_POLARIS11;
+ return BW_CALCS_VERSION_INVALID;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4145-drm-amdkfd-Fix-and-simplify-sync-object-handling-for.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4145-drm-amdkfd-Fix-and-simplify-sync-object-handling-for.patch
new file mode 100644
index 00000000..8dc10249
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4145-drm-amdkfd-Fix-and-simplify-sync-object-handling-for.patch
@@ -0,0 +1,100 @@
+From fb9d4b3b795048b0b9793508f284c313410eafa8 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Tue, 13 Mar 2018 16:05:59 -0400
+Subject: [PATCH 4145/5725] drm/amdkfd: Fix and simplify sync object handling
+ for KFD
+
+The adev parameter in amdgpu_sync_fence and amdgpu_sync_resv is only
+needed for updating sync->last_vm_update. This breaks if different
+adevs ars passed to calls for the same sync object.
+
+Always pass NULL for calls from KFD because sync objects used for
+KFD don't belong to any particular device, and KFD doesn't need the
+sync->last_vm_update fence.
+
+This fixes kernel log warnings on multi-GPU systems after recent
+changes in amdgpu_amdkfd_gpuvm_restore_process_bos.
+
+Change-Id: I5739c5761f65bdd0e6dd749210960df1472be9df
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 28 +++++-------------------
+ 1 file changed, 5 insertions(+), 23 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 769fdcf..9061f44 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -417,23 +417,6 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
+ return 0;
+ }
+
+-static int sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+- struct dma_fence *f)
+-{
+- int ret = amdgpu_sync_fence(adev, sync, f, false);
+-
+- /* Sync objects can't handle multiple GPUs (contexts) updating
+- * sync->last_vm_update. Fortunately we don't need it for
+- * KFD's purposes, so we can just drop that fence.
+- */
+- if (sync->last_vm_update) {
+- dma_fence_put(sync->last_vm_update);
+- sync->last_vm_update = NULL;
+- }
+-
+- return ret;
+-}
+-
+ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
+ {
+ struct amdgpu_bo *pd = vm->root.base.bo;
+@@ -444,7 +427,7 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
+ if (ret)
+ return ret;
+
+- return sync_vm_fence(adev, sync, vm->last_update);
++ return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
+ }
+
+ /* add_bo_to_vm - Add a BO to a VM
+@@ -850,7 +833,7 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
+ /* Add the eviction fence back */
+ amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
+
+- sync_vm_fence(adev, sync, bo_va->last_pt_update);
++ amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
+
+ return 0;
+ }
+@@ -875,7 +858,7 @@ static int update_gpuvm_pte(struct amdgpu_device *adev,
+ return ret;
+ }
+
+- return sync_vm_fence(adev, sync, bo_va->last_pt_update);
++ return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
+ }
+
+ static int map_bo_to_gpuvm(struct amdgpu_device *adev,
+@@ -953,7 +936,7 @@ static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
+ vm_list_node) {
+ struct amdgpu_bo *pd = peer_vm->root.base.bo;
+
+- ret = amdgpu_sync_resv(amdgpu_ttm_adev(pd->tbo.bdev),
++ ret = amdgpu_sync_resv(NULL,
+ sync, pd->tbo.resv,
+ AMDGPU_FENCE_OWNER_UNDEFINED, false);
+ if (ret)
+@@ -2329,8 +2312,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
+ pr_debug("Memory eviction: Validate BOs failed. Try again\n");
+ goto validate_map_fail;
+ }
+- ret = amdgpu_sync_fence(amdgpu_ttm_adev(bo->tbo.bdev),
+- &sync_obj, bo->tbo.moving, false);
++ ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false);
+ if (ret) {
+ pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
+ goto validate_map_fail;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4146-drm-amdkfd-use-px-to-print-user-space-address-instea.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4146-drm-amdkfd-use-px-to-print-user-space-address-instea.patch
new file mode 100644
index 00000000..f3d8dfd1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4146-drm-amdkfd-use-px-to-print-user-space-address-instea.patch
@@ -0,0 +1,55 @@
+From e80daa88b18d5b7fdbd2e408dbb94d5f2e127a64 Mon Sep 17 00:00:00 2001
+From: Philip Yang <Philip.Yang@amd.com>
+Date: Tue, 20 Mar 2018 10:45:26 -0400
+Subject: [PATCH 4146/5725] drm/amdkfd: use %px to print user space address
+ instead of %p
+
+Change-Id: I003ad6f543ca472dafb67ad986ff36a56a225494
+Signed-off-by: Philip Yang <Philip.Yang@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_queue.c | 8 ++++----
+ 2 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 01c8b19..0c89373 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -247,7 +247,7 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
+ pr_debug("Queue Size: 0x%llX, %u\n",
+ q_properties->queue_size, args->ring_size);
+
+- pr_debug("Queue r/w Pointers: %p, %p\n",
++ pr_debug("Queue r/w Pointers: %px, %px\n",
+ q_properties->read_ptr,
+ q_properties->write_ptr);
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+index a5315d4..6dcd621 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+@@ -36,8 +36,8 @@ void print_queue_properties(struct queue_properties *q)
+ pr_debug("Queue Address: 0x%llX\n", q->queue_address);
+ pr_debug("Queue Id: %u\n", q->queue_id);
+ pr_debug("Queue Process Vmid: %u\n", q->vmid);
+- pr_debug("Queue Read Pointer: 0x%p\n", q->read_ptr);
+- pr_debug("Queue Write Pointer: 0x%p\n", q->write_ptr);
++ pr_debug("Queue Read Pointer: 0x%px\n", q->read_ptr);
++ pr_debug("Queue Write Pointer: 0x%px\n", q->write_ptr);
+ pr_debug("Queue Doorbell Pointer: 0x%p\n", q->doorbell_ptr);
+ pr_debug("Queue Doorbell Offset: %u\n", q->doorbell_off);
+ }
+@@ -53,8 +53,8 @@ void print_queue(struct queue *q)
+ pr_debug("Queue Address: 0x%llX\n", q->properties.queue_address);
+ pr_debug("Queue Id: %u\n", q->properties.queue_id);
+ pr_debug("Queue Process Vmid: %u\n", q->properties.vmid);
+- pr_debug("Queue Read Pointer: 0x%p\n", q->properties.read_ptr);
+- pr_debug("Queue Write Pointer: 0x%p\n", q->properties.write_ptr);
++ pr_debug("Queue Read Pointer: 0x%px\n", q->properties.read_ptr);
++ pr_debug("Queue Write Pointer: 0x%px\n", q->properties.write_ptr);
+ pr_debug("Queue Doorbell Pointer: 0x%p\n", q->properties.doorbell_ptr);
+ pr_debug("Queue Doorbell Offset: %u\n", q->properties.doorbell_off);
+ pr_debug("Queue MQD Address: 0x%p\n", q->mqd);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4147-drm-amdgpu-Fix-unbalanced-memory-accounting-in-error.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4147-drm-amdgpu-Fix-unbalanced-memory-accounting-in-error.patch
new file mode 100644
index 00000000..9bac88e7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4147-drm-amdgpu-Fix-unbalanced-memory-accounting-in-error.patch
@@ -0,0 +1,31 @@
+From e0c9c35342db9ece530982054a5541786c6c87f8 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Mon, 19 Mar 2018 18:00:47 -0400
+Subject: [PATCH 4147/5725] drm/amdgpu: Fix unbalanced memory accounting in
+ error case
+
+When the userptr BO has already been created, unreffing the BO will
+unreserve the memory limit. Don't do it twice.
+
+Change-Id: Ibdcd972ef9177756d83b9bebf0244200e7099748
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 9061f44..b3112fb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -1297,6 +1297,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+
+ allocate_init_user_pages_failed:
+ amdgpu_bo_unref(&bo);
++ /* Don't unreserve system mem limit twice */
++ goto err_reserve_limit;
+ err_bo_create:
+ if (!sg)
+ unreserve_system_mem_limit(adev, size, alloc_domain);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4148-drm-amdkfd-Take-reference-to-gtt-usertask.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4148-drm-amdkfd-Take-reference-to-gtt-usertask.patch
new file mode 100644
index 00000000..a5c54077
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4148-drm-amdkfd-Take-reference-to-gtt-usertask.patch
@@ -0,0 +1,47 @@
+From 2f624615743214a059c25bde1525daac702c88d0 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Mon, 19 Mar 2018 16:01:16 -0400
+Subject: [PATCH 4148/5725] drm/amdkfd: Take reference to gtt->usertask
+
+Theoretically the task struct can be destroyed before the BO. Use
+the task_struct's reference count to prevent that.
+
+Change-Id: I20f0c42c8f521347bc93987d0703db1f07696000
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 71a5c67..6c81f2d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -1024,6 +1024,9 @@ static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
+ {
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+
++ if (gtt->usertask)
++ put_task_struct(gtt->usertask);
++
+ ttm_dma_tt_fini(&gtt->ttm);
+ kfree(gtt);
+ }
+@@ -1112,8 +1115,13 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
+ return -EINVAL;
+
+ gtt->userptr = addr;
+- gtt->usertask = current->group_leader;
+ gtt->userflags = flags;
++
++ if (gtt->usertask)
++ put_task_struct(gtt->usertask);
++ gtt->usertask = current->group_leader;
++ get_task_struct(gtt->usertask);
++
+ spin_lock_init(&gtt->guptasklock);
+ INIT_LIST_HEAD(&gtt->guptasks);
+ atomic_set(&gtt->mmu_invalidations, 0);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4149-drm-amdgpu-Avoid-GFP_NOIO.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4149-drm-amdgpu-Avoid-GFP_NOIO.patch
new file mode 100644
index 00000000..64e0b3b0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4149-drm-amdgpu-Avoid-GFP_NOIO.patch
@@ -0,0 +1,59 @@
+From 2e383fd7906e921c6b5a680de88757f4d53b8b60 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Mon, 19 Mar 2018 16:10:38 -0400
+Subject: [PATCH 4149/5725] drm/amdgpu: Avoid GFP_NOIO
+
+Instead allocate the memory before taking the problematic lock and
+free it later if it's not needed.
+
+Change-Id: I2ab8af1d5071401447942cbc0968113b8d87ea68
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 17 +++++++++--------
+ 1 file changed, 9 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+index 3168565..3fc0917 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+@@ -412,7 +412,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
+ enum amdgpu_mn_type type =
+ bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX;
+ struct amdgpu_mn *rmn;
+- struct amdgpu_mn_node *node = NULL;
++ struct amdgpu_mn_node *node = NULL, *new_node;
+ struct list_head bos;
+ struct interval_tree_node *it;
+
+@@ -420,6 +420,10 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
+ if (IS_ERR(rmn))
+ return PTR_ERR(rmn);
+
++ new_node = kmalloc(sizeof(*new_node), GFP_KERNEL);
++ if (!new_node)
++ return -ENOMEM;
++
+ INIT_LIST_HEAD(&bos);
+
+ down_write(&rmn->lock);
+@@ -433,13 +437,10 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
+ list_splice(&node->bos, &bos);
+ }
+
+- if (!node) {
+- node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_NOIO);
+- if (!node) {
+- up_write(&rmn->lock);
+- return -ENOMEM;
+- }
+- }
++ if (!node)
++ node = new_node;
++ else
++ kfree(new_node);
+
+ bo->mn = rmn;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4150-drm-amdgpu-Fix-acquiring-VM-on-large-BAR-systems.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4150-drm-amdgpu-Fix-acquiring-VM-on-large-BAR-systems.patch
new file mode 100644
index 00000000..666873f7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4150-drm-amdgpu-Fix-acquiring-VM-on-large-BAR-systems.patch
@@ -0,0 +1,32 @@
+From 4e9babb2ea3572f2c4bbf0739bb4968d90dc1290 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Wed, 21 Mar 2018 16:59:34 -0400
+Subject: [PATCH 4150/5725] drm/amdgpu: Fix acquiring VM on large-BAR systems
+
+On large-BAR systems the VM page tables for compute are accessed by
+the CPU. Always allow CPU access to the page directory so that it can
+be used later by the CPU when a VM is converted to a compute VM.
+
+Change-Id: I5f2876c3d7a159bda2ddabb6014b4a3123275071
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 6c071ae..1a7d9aba 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2454,8 +2454,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ if (vm->use_cpu_for_update)
+ flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ else
+- flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+- AMDGPU_GEM_CREATE_SHADOW);
++ flags |= AMDGPU_GEM_CREATE_SHADOW;
+
+ size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
+ r = amdgpu_bo_create(adev, size, align, AMDGPU_GEM_DOMAIN_VRAM, flags,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4151-drm-amdkfd-Simplify-dGPU-event-page-allocation.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4151-drm-amdkfd-Simplify-dGPU-event-page-allocation.patch
new file mode 100644
index 00000000..d2794767
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4151-drm-amdkfd-Simplify-dGPU-event-page-allocation.patch
@@ -0,0 +1,284 @@
+From 25edfba75db6c5455606aa0e626c427c7e2477dd Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Mon, 19 Mar 2018 18:02:07 -0400
+Subject: [PATCH 4151/5725] drm/amdkfd: Simplify dGPU event page allocation
+
+Deal with all the events page allocation in kfd_chardev.c and remove
+unnecessary checks for APU. This will also potentially allow mixed
+configurations of dGPUs with APUs.
+
+Explicitly set the events page in the ioctl instead of doing it
+implicitly in kfd_event_create. This also fixes a potential memory
+leak if the events page was already set in a previous call. This
+will now fail.
+
+Explicitly remember how the events page was allocated so it can be
+freed correctly.
+
+Change-Id: I77ecd0b699c20d2e9a1ff7226e387df143ad6a5b
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 84 +++++++++++++++++++-------------
+ drivers/gpu/drm/amd/amdkfd/kfd_events.c | 69 +++++++++++---------------
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 4 +-
+ 3 files changed, 80 insertions(+), 77 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 0c89373..c5e6488 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -976,55 +976,69 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
+ void *data)
+ {
+ struct kfd_ioctl_create_event_args *args = data;
+- struct kfd_dev *kfd;
+- struct kfd_process_device *pdd;
+- int err = -EINVAL;
+- void *mem, *kern_addr = NULL;
+-
+- pr_debug("Event page offset 0x%llx\n", args->event_page_offset);
++ int err;
+
++ /* For dGPUs the event page is allocated in user mode. The
++ * handle is passed to KFD with the first call to this IOCTL
++ * through the event_page_offset field.
++ */
+ if (args->event_page_offset) {
++ struct kfd_dev *kfd;
++ struct kfd_process_device *pdd;
++ void *mem, *kern_addr;
++
++ if (p->signal_page) {
++ pr_err("Event page is already set\n");
++ return -EINVAL;
++ }
++
+ kfd = kfd_device_by_id(GET_GPU_ID(args->event_page_offset));
+ if (!kfd) {
+ pr_err("Getting device by id failed in %s\n", __func__);
+- return -EFAULT;
++ return -EINVAL;
+ }
+- if (!kfd->device_info->needs_iommu_device) {
+- mutex_lock(&p->mutex);
+- pdd = kfd_bind_process_to_device(kfd, p);
+- if (IS_ERR(pdd)) {
+- err = PTR_ERR(pdd);
+- goto out_upwrite;
+- }
+- mem = kfd_process_device_translate_handle(pdd,
++
++ mutex_lock(&p->mutex);
++ pdd = kfd_bind_process_to_device(kfd, p);
++ if (IS_ERR(pdd)) {
++ err = PTR_ERR(pdd);
++ goto out_unlock;
++ }
++
++ mem = kfd_process_device_translate_handle(pdd,
+ GET_IDR_HANDLE(args->event_page_offset));
+- if (!mem) {
+- pr_err("Can't find BO, offset is 0x%llx\n",
+- args->event_page_offset);
+- err = -EFAULT;
+- goto out_upwrite;
+- }
+- mutex_unlock(&p->mutex);
++ if (!mem) {
++ pr_err("Can't find BO, offset is 0x%llx\n",
++ args->event_page_offset);
++ err = -EINVAL;
++ goto out_unlock;
++ }
++ mutex_unlock(&p->mutex);
+
+- /* Map dGPU gtt BO to kernel */
+- kfd->kfd2kgd->map_gtt_bo_to_kernel(kfd->kgd,
+- mem, &kern_addr, NULL);
++ err = kfd->kfd2kgd->map_gtt_bo_to_kernel(kfd->kgd,
++ mem, &kern_addr, NULL);
++ if (err) {
++ pr_err("Failed to map event page to kernel\n");
++ return err;
++ }
++
++ err = kfd_event_page_set(p, kern_addr);
++ if (err) {
++ pr_err("Failed to set event page\n");
++ return err;
+ }
+ }
+
+- err = kfd_event_create(filp, p,
+- args->event_type,
+- args->auto_reset != 0,
+- args->node_id,
+- &args->event_id,
+- &args->event_trigger_data,
+- &args->event_page_offset,
+- &args->event_slot_index,
+- kern_addr);
++
++ err = kfd_event_create(filp, p, args->event_type,
++ args->auto_reset != 0, args->node_id,
++ &args->event_id, &args->event_trigger_data,
++ &args->event_page_offset,
++ &args->event_slot_index);
+
+ return err;
+
+-out_upwrite:
++out_unlock:
+ mutex_unlock(&p->mutex);
+ return err;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+index a92ca78..d002016 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+@@ -51,8 +51,8 @@ struct kfd_event_waiter {
+ */
+ struct kfd_signal_page {
+ uint64_t *kernel_address;
+- uint64_t handle;
+ uint64_t __user *user_address;
++ bool need_to_free_pages;
+ };
+
+
+@@ -80,6 +80,7 @@ static struct kfd_signal_page *allocate_signal_page(struct kfd_process *p)
+ KFD_SIGNAL_EVENT_LIMIT * 8);
+
+ page->kernel_address = backing_store;
++ page->need_to_free_pages = true;
+ pr_debug("Allocated new event signal page at %p, for process %p\n",
+ page, p);
+
+@@ -112,29 +113,6 @@ static int allocate_event_notification_slot(struct kfd_process *p,
+ return 0;
+ }
+
+-static struct kfd_signal_page *allocate_signal_page_dgpu(
+- struct kfd_process *p, uint64_t *kernel_address, uint64_t handle)
+-{
+- struct kfd_signal_page *my_page;
+-
+- my_page = kzalloc(sizeof(*my_page), GFP_KERNEL);
+- if (!my_page)
+- return NULL;
+-
+- /* Initialize all events to unsignaled */
+- memset(kernel_address, (uint8_t) UNSIGNALED_EVENT_SLOT,
+- KFD_SIGNAL_EVENT_LIMIT * 8);
+-
+- my_page->kernel_address = kernel_address;
+- my_page->handle = handle;
+- my_page->user_address = NULL;
+-
+- pr_debug("Allocated new event signal page at %p, for process %p\n",
+- my_page, p);
+-
+- return my_page;
+-}
+-
+ /*
+ * Assumes that p->event_mutex is held and of course that p is not going
+ * away (current or locked).
+@@ -284,9 +262,9 @@ static void shutdown_signal_page(struct kfd_process *p)
+ struct kfd_signal_page *page = p->signal_page;
+
+ if (page) {
+- if (page->user_address)
++ if (page->need_to_free_pages)
+ free_pages((unsigned long)page->kernel_address,
+- get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
++ get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
+ kfree(page);
+ }
+ }
+@@ -308,11 +286,32 @@ static bool event_can_be_cpu_signaled(const struct kfd_event *ev)
+ return ev->type == KFD_EVENT_TYPE_SIGNAL;
+ }
+
++int kfd_event_page_set(struct kfd_process *p, void *kernel_address)
++{
++ struct kfd_signal_page *page;
++
++ if (p->signal_page)
++ return -EBUSY;
++
++ page = kzalloc(sizeof(*page), GFP_KERNEL);
++ if (!page)
++ return -ENOMEM;
++
++ /* Initialize all events to unsignaled */
++ memset(kernel_address, (uint8_t) UNSIGNALED_EVENT_SLOT,
++ KFD_SIGNAL_EVENT_LIMIT * 8);
++
++ page->kernel_address = kernel_address;
++
++ p->signal_page = page;
++
++ return 0;
++}
++
+ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
+ uint32_t event_type, bool auto_reset, uint32_t node_id,
+ uint32_t *event_id, uint32_t *event_trigger_data,
+- uint64_t *event_page_offset, uint32_t *event_slot_index,
+- void *kern_addr)
++ uint64_t *event_page_offset, uint32_t *event_slot_index)
+ {
+ int ret = 0;
+ struct kfd_event *ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+@@ -326,19 +325,10 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
+
+ init_waitqueue_head(&ev->wq);
+
+- mutex_lock(&p->event_mutex);
+-
+- if (kern_addr && !p->signal_page) {
+- p->signal_page = allocate_signal_page_dgpu(p, kern_addr,
+- *event_page_offset);
+- if (!p->signal_page) {
+- ret = -ENOMEM;
+- goto out;
+- }
+- }
+-
+ *event_page_offset = 0;
+
++ mutex_lock(&p->event_mutex);
++
+ switch (event_type) {
+ case KFD_EVENT_TYPE_SIGNAL:
+ case KFD_EVENT_TYPE_DEBUG:
+@@ -361,7 +351,6 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
+ kfree(ev);
+ }
+
+-out:
+ mutex_unlock(&p->event_mutex);
+
+ return ret;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index b2ef0f5..5928080 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -1056,11 +1056,11 @@ void kfd_signal_iommu_event(struct kfd_dev *dev,
+ void kfd_signal_hw_exception_event(unsigned int pasid);
+ int kfd_set_event(struct kfd_process *p, uint32_t event_id);
+ int kfd_reset_event(struct kfd_process *p, uint32_t event_id);
++int kfd_event_page_set(struct kfd_process *p, void *kernel_address);
+ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
+ uint32_t event_type, bool auto_reset, uint32_t node_id,
+ uint32_t *event_id, uint32_t *event_trigger_data,
+- uint64_t *event_page_offset, uint32_t *event_slot_index,
+- void *kern_addr);
++ uint64_t *event_page_offset, uint32_t *event_slot_index);
+ int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
+
+ void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4152-drm-amdkfd-Backwards-compatibility-with-old-Thunk.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4152-drm-amdkfd-Backwards-compatibility-with-old-Thunk.patch
new file mode 100644
index 00000000..4244e447
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4152-drm-amdkfd-Backwards-compatibility-with-old-Thunk.patch
@@ -0,0 +1,148 @@
+From d4a55f30acc79996138639c9f9c79a137ff43e14 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Mon, 19 Mar 2018 18:35:35 -0400
+Subject: [PATCH 4152/5725] drm/amdkfd: Backwards compatibility with old Thunk
+
+Don't assume a fixed events page size. Old upstream KFD versions and
+corresponding Thunk builds used a smaller size. Instead use the size
+of the actual allocation or mapping to determine the event limit.
+
+Change-Id: I759095f15c2d5cd9414dc9c292fd1e2889ef45a0
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 5 +++--
+ drivers/gpu/drm/amd/amdkfd/kfd_events.c | 28 ++++++++++++++++++++++------
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 4 +++-
+ 3 files changed, 28 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index c5e6488..73aec76 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -986,6 +986,7 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
+ struct kfd_dev *kfd;
+ struct kfd_process_device *pdd;
+ void *mem, *kern_addr;
++ uint64_t size;
+
+ if (p->signal_page) {
+ pr_err("Event page is already set\n");
+@@ -1016,13 +1017,13 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
+ mutex_unlock(&p->mutex);
+
+ err = kfd->kfd2kgd->map_gtt_bo_to_kernel(kfd->kgd,
+- mem, &kern_addr, NULL);
++ mem, &kern_addr, &size);
+ if (err) {
+ pr_err("Failed to map event page to kernel\n");
+ return err;
+ }
+
+- err = kfd_event_page_set(p, kern_addr);
++ err = kfd_event_page_set(p, kern_addr, size);
+ if (err) {
+ pr_err("Failed to set event page\n");
+ return err;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+index d002016..644ce9d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+@@ -100,9 +100,17 @@ static int allocate_event_notification_slot(struct kfd_process *p,
+ p->signal_page = allocate_signal_page(p);
+ if (!p->signal_page)
+ return -ENOMEM;
++ /* Oldest user mode expects 256 event slots */
++ p->signal_mapped_size = 256*8;
+ }
+
+- id = idr_alloc(&p->event_idr, ev, 0, KFD_SIGNAL_EVENT_LIMIT,
++ /*
++ * Compatibility with old user mode: Only use signal slots
++ * user mode has mapped, may be less than
++ * KFD_SIGNAL_EVENT_LIMIT. This also allows future increase
++ * of the event limit without breaking user mode.
++ */
++ id = idr_alloc(&p->event_idr, ev, 0, p->signal_mapped_size / 8,
+ GFP_KERNEL);
+ if (id < 0)
+ return id;
+@@ -176,7 +184,8 @@ static int create_signal_event(struct file *devkfd,
+ {
+ int ret;
+
+- if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) {
++ if (p->signal_mapped_size &&
++ p->signal_event_count == p->signal_mapped_size / 8) {
+ if (!p->signal_event_limit_reached) {
+ pr_warn("Signal event wasn't created because limit was reached\n");
+ p->signal_event_limit_reached = true;
+@@ -286,7 +295,8 @@ static bool event_can_be_cpu_signaled(const struct kfd_event *ev)
+ return ev->type == KFD_EVENT_TYPE_SIGNAL;
+ }
+
+-int kfd_event_page_set(struct kfd_process *p, void *kernel_address)
++int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
++ uint64_t size)
+ {
+ struct kfd_signal_page *page;
+
+@@ -304,6 +314,7 @@ int kfd_event_page_set(struct kfd_process *p, void *kernel_address)
+ page->kernel_address = kernel_address;
+
+ p->signal_page = page;
++ p->signal_mapped_size = size;
+
+ return 0;
+ }
+@@ -769,9 +780,10 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
+
+ unsigned long pfn;
+ struct kfd_signal_page *page;
++ int ret;
+
+- /* check required size is logical */
+- if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) !=
++ /* check required size doesn't exceed the allocated size */
++ if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) <
+ get_order(vma->vm_end - vma->vm_start)) {
+ pr_err("Event page mmap requested illegal size\n");
+ return -EINVAL;
+@@ -801,8 +813,12 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
+ page->user_address = (uint64_t __user *)vma->vm_start;
+
+ /* mapping the page to user process */
+- return remap_pfn_range(vma, vma->vm_start, pfn,
++ ret = remap_pfn_range(vma, vma->vm_start, pfn,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
++ if (!ret)
++ p->signal_mapped_size = vma->vm_end - vma->vm_start;
++
++ return ret;
+ }
+
+ /*
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 5928080..a33984c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -712,6 +712,7 @@ struct kfd_process {
+ struct idr event_idr;
+ /* Event page */
+ struct kfd_signal_page *signal_page;
++ size_t signal_mapped_size;
+ size_t signal_event_count;
+ bool signal_event_limit_reached;
+
+@@ -1056,7 +1057,8 @@ void kfd_signal_iommu_event(struct kfd_dev *dev,
+ void kfd_signal_hw_exception_event(unsigned int pasid);
+ int kfd_set_event(struct kfd_process *p, uint32_t event_id);
+ int kfd_reset_event(struct kfd_process *p, uint32_t event_id);
+-int kfd_event_page_set(struct kfd_process *p, void *kernel_address);
++int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
++ uint64_t size);
+ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
+ uint32_t event_type, bool auto_reset, uint32_t node_id,
+ uint32_t *event_id, uint32_t *event_trigger_data,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4153-drm-amdkfd-Remove-pm_map_process_scratch_cik.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4153-drm-amdkfd-Remove-pm_map_process_scratch_cik.patch
new file mode 100644
index 00000000..4d92b829
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4153-drm-amdkfd-Remove-pm_map_process_scratch_cik.patch
@@ -0,0 +1,115 @@
+From edf9c86fdd95914288fcb7744381724ee3523ed8 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Tue, 27 Mar 2018 15:07:49 -0400
+Subject: [PATCH 4153/5725] drm/amdkfd: Remove pm_map_process_scratch_cik
+
+The packet structure is identical with the VI packet. So we can use
+pm_map_process_vi instead.
+
+Change-Id: Ifff68999017d86f91869ab40435b9f973e37dd3b
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c | 42 ++---------------------
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 ++
+ 3 files changed, 5 insertions(+), 41 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
+index 2808422..b8a7c4a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
+@@ -85,47 +85,10 @@ static int pm_map_process_cik(struct packet_manager *pm, uint32_t *buffer,
+ return 0;
+ }
+
+-static int pm_map_process_scratch_cik(struct packet_manager *pm,
+- uint32_t *buffer, struct qcm_process_device *qpd)
+-{
+- struct pm4_map_process_scratch_kv *packet;
+-
+- packet = (struct pm4_map_process_scratch_kv *)buffer;
+-
+- memset(buffer, 0, sizeof(struct pm4_map_process_scratch_kv));
+-
+- packet->header.u32all = pm_build_pm4_header(IT_MAP_PROCESS,
+- sizeof(struct pm4_map_process_scratch_kv));
+- packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
+- packet->bitfields2.process_quantum = 1;
+- packet->bitfields2.pasid = qpd->pqm->process->pasid;
+- packet->bitfields3.page_table_base = qpd->page_table_base;
+- packet->bitfields14.gds_size = qpd->gds_size;
+- packet->bitfields14.num_gws = qpd->num_gws;
+- packet->bitfields14.num_oac = qpd->num_oac;
+- packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
+-
+- packet->sh_mem_config = qpd->sh_mem_config;
+- packet->sh_mem_bases = qpd->sh_mem_bases;
+- packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
+- packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
+-
+- packet->sh_hidden_private_base_vmid = qpd->sh_hidden_private_base;
+-
+- packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
+- packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
+-
+- return 0;
+-}
+-
+ static uint32_t pm_get_map_process_packet_size_cik(void)
+ {
+ return sizeof(struct pm4_map_process);
+ }
+-static uint32_t pm_get_map_process_scratch_packet_size_cik(void)
+-{
+- return sizeof(struct pm4_map_process_scratch_kv);
+-}
+
+
+ static struct packet_manager_funcs kfd_cik_pm_funcs = {
+@@ -146,15 +109,14 @@ static struct packet_manager_funcs kfd_cik_pm_funcs = {
+ };
+
+ static struct packet_manager_funcs kfd_cik_scratch_pm_funcs = {
+- .map_process = pm_map_process_scratch_cik,
++ .map_process = pm_map_process_vi,
+ .runlist = pm_runlist_vi,
+ .set_resources = pm_set_resources_vi,
+ .map_queues = pm_map_queues_vi,
+ .unmap_queues = pm_unmap_queues_vi,
+ .query_status = pm_query_status_vi,
+ .release_mem = pm_release_mem_vi,
+- .get_map_process_packet_size =
+- pm_get_map_process_scratch_packet_size_cik,
++ .get_map_process_packet_size = pm_get_map_process_packet_size_vi,
+ .get_runlist_packet_size = pm_get_runlist_packet_size_vi,
+ .get_set_resources_packet_size = pm_get_set_resources_packet_size_vi,
+ .get_map_queues_packet_size = pm_get_map_queues_packet_size_vi,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+index 9022ecb..13ff604d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+@@ -67,7 +67,7 @@ static void submit_packet_vi(struct kernel_queue *kq)
+ kq->pending_wptr);
+ }
+
+-static int pm_map_process_vi(struct packet_manager *pm,
++int pm_map_process_vi(struct packet_manager *pm,
+ uint32_t *buffer, struct qcm_process_device *qpd)
+ {
+ struct pm4_mes_map_process *packet;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index a33984c..795bec1 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -1004,6 +1004,8 @@ void pm_release_ib(struct packet_manager *pm);
+
+ /* Following PM funcs can be shared among CIK and VI */
+ unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
++int pm_map_process_vi(struct packet_manager *pm,
++ uint32_t *buffer, struct qcm_process_device *qpd);
+ int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
+ uint64_t ib, size_t ib_size_in_dwords, bool chain);
+ int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4154-drm-amdgpu-Remove-pm_map_process_cik.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4154-drm-amdgpu-Remove-pm_map_process_cik.patch
new file mode 100644
index 00000000..e5077657
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4154-drm-amdgpu-Remove-pm_map_process_cik.patch
@@ -0,0 +1,347 @@
+From af9ee366994d3ccab78f486715e44161941950c3 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Tue, 27 Mar 2018 15:23:19 -0400
+Subject: [PATCH 4154/5725] drm/amdgpu: Remove pm_map_process_cik
+
+This deprecated packet format does not support scratch memory, which
+has long been required by the runtime. It was not upstreamed and can
+be removed.
+
+Now CIK and VI use the same packets across the board, so there is no
+more need to maintain a separate function table for CIK. The FW
+version check is also no longer needed.
+
+Change-Id: Icb2d9fb0e83eb0dc1547fd85bf4cd971b4b08fec
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 4 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c | 79 ----------------------
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c | 26 +++----
+ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 10 ++-
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 31 +--------
+ 6 files changed, 23 insertions(+), 129 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 8c04f7a2..b0c159a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -885,7 +885,7 @@ static void uninitialize(struct device_queue_manager *dqm)
+ static int start_nocpsch(struct device_queue_manager *dqm)
+ {
+ init_interrupts(dqm);
+- return pm_init(&dqm->packets, dqm, dqm->dev->mec_fw_version);
++ return pm_init(&dqm->packets, dqm);
+ }
+
+ static int stop_nocpsch(struct device_queue_manager *dqm)
+@@ -1030,7 +1030,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
+
+ retval = 0;
+
+- retval = pm_init(&dqm->packets, dqm, dqm->dev->mec_fw_version);
++ retval = pm_init(&dqm->packets, dqm);
+ if (retval)
+ goto fail_packet_manager_init;
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
+index b8a7c4a..b48c29f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
+@@ -53,82 +53,3 @@ static void submit_packet_cik(struct kernel_queue *kq)
+ write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
+ kq->pending_wptr);
+ }
+-
+-static int pm_map_process_cik(struct packet_manager *pm, uint32_t *buffer,
+- struct qcm_process_device *qpd)
+-{
+- struct pm4_map_process *packet;
+-
+- packet = (struct pm4_map_process *)buffer;
+-
+- memset(buffer, 0, sizeof(struct pm4_map_process));
+-
+- packet->header.u32all = pm_build_pm4_header(IT_MAP_PROCESS,
+- sizeof(struct pm4_map_process));
+- packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
+- packet->bitfields2.process_quantum = 1;
+- packet->bitfields2.pasid = qpd->pqm->process->pasid;
+- packet->bitfields3.page_table_base = qpd->page_table_base;
+- packet->bitfields10.gds_size = qpd->gds_size;
+- packet->bitfields10.num_gws = qpd->num_gws;
+- packet->bitfields10.num_oac = qpd->num_oac;
+- packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
+-
+- packet->sh_mem_config = qpd->sh_mem_config;
+- packet->sh_mem_bases = qpd->sh_mem_bases;
+- packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
+- packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
+-
+- packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
+- packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
+-
+- return 0;
+-}
+-
+-static uint32_t pm_get_map_process_packet_size_cik(void)
+-{
+- return sizeof(struct pm4_map_process);
+-}
+-
+-
+-static struct packet_manager_funcs kfd_cik_pm_funcs = {
+- .map_process = pm_map_process_cik,
+- .runlist = pm_runlist_vi,
+- .set_resources = pm_set_resources_vi,
+- .map_queues = pm_map_queues_vi,
+- .unmap_queues = pm_unmap_queues_vi,
+- .query_status = pm_query_status_vi,
+- .release_mem = pm_release_mem_vi,
+- .get_map_process_packet_size = pm_get_map_process_packet_size_cik,
+- .get_runlist_packet_size = pm_get_runlist_packet_size_vi,
+- .get_set_resources_packet_size = pm_get_set_resources_packet_size_vi,
+- .get_map_queues_packet_size = pm_get_map_queues_packet_size_vi,
+- .get_unmap_queues_packet_size = pm_get_unmap_queues_packet_size_vi,
+- .get_query_status_packet_size = pm_get_query_status_packet_size_vi,
+- .get_release_mem_packet_size = pm_get_release_mem_packet_size_vi,
+-};
+-
+-static struct packet_manager_funcs kfd_cik_scratch_pm_funcs = {
+- .map_process = pm_map_process_vi,
+- .runlist = pm_runlist_vi,
+- .set_resources = pm_set_resources_vi,
+- .map_queues = pm_map_queues_vi,
+- .unmap_queues = pm_unmap_queues_vi,
+- .query_status = pm_query_status_vi,
+- .release_mem = pm_release_mem_vi,
+- .get_map_process_packet_size = pm_get_map_process_packet_size_vi,
+- .get_runlist_packet_size = pm_get_runlist_packet_size_vi,
+- .get_set_resources_packet_size = pm_get_set_resources_packet_size_vi,
+- .get_map_queues_packet_size = pm_get_map_queues_packet_size_vi,
+- .get_unmap_queues_packet_size = pm_get_unmap_queues_packet_size_vi,
+- .get_query_status_packet_size = pm_get_query_status_packet_size_vi,
+- .get_release_mem_packet_size = pm_get_release_mem_packet_size_vi,
+-};
+-
+-void kfd_pm_func_init_cik(struct packet_manager *pm, uint16_t fw_ver)
+-{
+- if (fw_ver >= KFD_SCRATCH_KV_FW_VER)
+- pm->pmf = &kfd_cik_scratch_pm_funcs;
+- else
+- pm->pmf = &kfd_cik_pm_funcs;
+-}
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+index 5fe4f60..b53e5ee 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+@@ -370,7 +370,7 @@ static struct packet_manager_funcs kfd_v9_pm_funcs = {
+ .get_release_mem_packet_size = pm_get_release_mem_packet_size_v9,
+ };
+
+-void kfd_pm_func_init_v9(struct packet_manager *pm, uint16_t fw_ver)
++void kfd_pm_func_init_v9(struct packet_manager *pm)
+ {
+ pm->pmf = &kfd_v9_pm_funcs;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+index 13ff604d..e798873 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+@@ -67,7 +67,7 @@ static void submit_packet_vi(struct kernel_queue *kq)
+ kq->pending_wptr);
+ }
+
+-int pm_map_process_vi(struct packet_manager *pm,
++static int pm_map_process_vi(struct packet_manager *pm,
+ uint32_t *buffer, struct qcm_process_device *qpd)
+ {
+ struct pm4_mes_map_process *packet;
+@@ -112,7 +112,7 @@ unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size)
+ return header.u32All;
+ }
+
+-int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
++static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
+ uint64_t ib, size_t ib_size_in_dwords, bool chain)
+ {
+ struct pm4_mes_runlist *packet;
+@@ -150,7 +150,7 @@ int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
+ return 0;
+ }
+
+-int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer,
++static int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+ struct queue *q, bool is_static)
+ {
+ struct pm4_mes_map_queues *packet;
+@@ -237,7 +237,7 @@ int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
+ return 0;
+ }
+
+-int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
++static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+ enum kfd_queue_type type,
+ enum kfd_unmap_queues_filter filter,
+ uint32_t filter_param, bool reset,
+@@ -302,7 +302,7 @@ int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+
+ }
+
+-int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
++static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
+ uint64_t fence_address, uint32_t fence_value)
+ {
+ struct pm4_mes_query_status *packet;
+@@ -329,7 +329,7 @@ int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
+ }
+
+
+-uint32_t pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
++static uint32_t pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
+ {
+ struct pm4_mec_release_mem *packet;
+
+@@ -358,12 +358,12 @@ uint32_t pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
+ return sizeof(struct pm4_mec_release_mem) / sizeof(unsigned int);
+ }
+
+-uint32_t pm_get_map_process_packet_size_vi(void)
++static uint32_t pm_get_map_process_packet_size_vi(void)
+ {
+ return sizeof(struct pm4_mes_map_process);
+ }
+
+-uint32_t pm_get_runlist_packet_size_vi(void)
++static uint32_t pm_get_runlist_packet_size_vi(void)
+ {
+ return sizeof(struct pm4_mes_runlist);
+ }
+@@ -373,22 +373,22 @@ uint32_t pm_get_set_resources_packet_size_vi(void)
+ return sizeof(struct pm4_mes_set_resources);
+ }
+
+-uint32_t pm_get_map_queues_packet_size_vi(void)
++static uint32_t pm_get_map_queues_packet_size_vi(void)
+ {
+ return sizeof(struct pm4_mes_map_queues);
+ }
+
+-uint32_t pm_get_unmap_queues_packet_size_vi(void)
++static uint32_t pm_get_unmap_queues_packet_size_vi(void)
+ {
+ return sizeof(struct pm4_mes_unmap_queues);
+ }
+
+-uint32_t pm_get_query_status_packet_size_vi(void)
++static uint32_t pm_get_query_status_packet_size_vi(void)
+ {
+ return sizeof(struct pm4_mes_query_status);
+ }
+
+-uint32_t pm_get_release_mem_packet_size_vi(void)
++static uint32_t pm_get_release_mem_packet_size_vi(void)
+ {
+ return sizeof(struct pm4_mec_release_mem);
+ }
+@@ -411,7 +411,7 @@ static struct packet_manager_funcs kfd_vi_pm_funcs = {
+ .get_release_mem_packet_size = pm_get_release_mem_packet_size_vi,
+ };
+
+-void kfd_pm_func_init_vi(struct packet_manager *pm, uint16_t fw_ver)
++void kfd_pm_func_init_vi(struct packet_manager *pm)
+ {
+ pm->pmf = &kfd_vi_pm_funcs;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+index 98c89d2..8abefd7 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+@@ -217,8 +217,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
+ return retval;
+ }
+
+-int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm,
+- uint16_t fw_ver)
++int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
+ {
+ pm->dqm = dqm;
+ mutex_init(&pm->lock);
+@@ -232,18 +231,17 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm,
+ switch (pm->dqm->dev->device_info->asic_family) {
+ case CHIP_KAVERI:
+ case CHIP_HAWAII:
+- kfd_pm_func_init_cik(pm, fw_ver);
+- break;
++ /* PM4 packet structures on CIK are the same as on VI */
+ case CHIP_CARRIZO:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+- kfd_pm_func_init_vi(pm, fw_ver);
++ kfd_pm_func_init_vi(pm);
+ break;
+ case CHIP_VEGA10:
+ case CHIP_RAVEN:
+- kfd_pm_func_init_v9(pm, fw_ver);
++ kfd_pm_func_init_v9(pm);
+ break;
+ default:
+ BUG();
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 795bec1..641ea82 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -986,8 +986,7 @@ struct packet_manager_funcs {
+
+ };
+
+-int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm,
+- uint16_t fw_ver);
++int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
+ void pm_uninit(struct packet_manager *pm);
+ int pm_send_set_resources(struct packet_manager *pm,
+ struct scheduling_resources *res);
+@@ -1004,36 +1003,12 @@ void pm_release_ib(struct packet_manager *pm);
+
+ /* Following PM funcs can be shared among CIK and VI */
+ unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
+-int pm_map_process_vi(struct packet_manager *pm,
+- uint32_t *buffer, struct qcm_process_device *qpd);
+-int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
+- uint64_t ib, size_t ib_size_in_dwords, bool chain);
+-int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+- struct queue *q, bool is_static);
+ int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
+ struct scheduling_resources *res);
+-int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+- enum kfd_queue_type type,
+- enum kfd_unmap_queues_filter filter,
+- uint32_t filter_param, bool reset,
+- unsigned int sdma_engine);
+-int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
+- uint64_t fence_address, uint32_t fence_value);
+-uint32_t pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer);
+-
+-uint32_t pm_get_map_process_packet_size_vi(void);
+-uint32_t pm_get_runlist_packet_size_vi(void);
+ uint32_t pm_get_set_resources_packet_size_vi(void);
+-uint32_t pm_get_map_queues_packet_size_vi(void);
+-uint32_t pm_get_unmap_queues_packet_size_vi(void);
+-uint32_t pm_get_query_status_packet_size_vi(void);
+-uint32_t pm_get_release_mem_packet_size_vi(void);
+-
+-
+-void kfd_pm_func_init_vi(struct packet_manager *pm, uint16_t fw_ver);
+-void kfd_pm_func_init_cik(struct packet_manager *pm, uint16_t fw_ver);
+
+-void kfd_pm_func_init_v9(struct packet_manager *pm, uint16_t fw_ver);
++void kfd_pm_func_init_vi(struct packet_manager *pm);
++void kfd_pm_func_init_v9(struct packet_manager *pm);
+
+
+ uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4155-drm-amdkfd-Put-packet-sizes-directly-into-packet_man.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4155-drm-amdkfd-Put-packet-sizes-directly-into-packet_man.patch
new file mode 100644
index 00000000..1d1518dd
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4155-drm-amdkfd-Put-packet-sizes-directly-into-packet_man.patch
@@ -0,0 +1,290 @@
+From b853e66d368457a681f3d0981e4eda1ab113dcdd Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Tue, 27 Mar 2018 15:50:08 -0400
+Subject: [PATCH 4155/5725] drm/amdkfd: Put packet sizes directly into
+ packet_manager_funcs
+
+This is more efficient than indirectly calling a size query function
+that just returns the constant size.
+
+Change-Id: Ifbab7d7ea74b66e7de56e061a2c8fa78cfc0db47
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c | 58 ++++++---------------
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c | 64 ++++++------------------
+ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 20 ++++----
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 17 +++----
+ 4 files changed, 46 insertions(+), 113 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+index b53e5ee..f311f13 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+@@ -323,51 +323,21 @@ static uint32_t pm_release_mem_v9(uint64_t gpu_addr, uint32_t *buffer)
+ return sizeof(struct pm4_mec_release_mem) / sizeof(unsigned int);
+ }
+
+-static uint32_t pm_get_map_process_packet_size_v9(void)
+-{
+- return sizeof(struct pm4_mes_map_process);
+-}
+-
+-static uint32_t pm_get_runlist_packet_size_v9(void)
+-{
+- return sizeof(struct pm4_mes_runlist);
+-}
+-
+-static uint32_t pm_get_map_queues_packet_size_v9(void)
+-{
+- return sizeof(struct pm4_mes_map_queues);
+-}
+-
+-static uint32_t pm_get_unmap_queues_packet_size_v9(void)
+-{
+- return sizeof(struct pm4_mes_unmap_queues);
+-}
+-
+-static uint32_t pm_get_query_status_packet_size_v9(void)
+-{
+- return sizeof(struct pm4_mes_query_status);
+-}
+-
+-static uint32_t pm_get_release_mem_packet_size_v9(void)
+-{
+- return sizeof(struct pm4_mec_release_mem);
+-}
+-
+ static struct packet_manager_funcs kfd_v9_pm_funcs = {
+- .map_process = pm_map_process_v9,
+- .runlist = pm_runlist_v9,
+- .set_resources = pm_set_resources_vi,
+- .map_queues = pm_map_queues_v9,
+- .unmap_queues = pm_unmap_queues_v9,
+- .query_status = pm_query_status_v9,
+- .release_mem = pm_release_mem_v9,
+- .get_map_process_packet_size = pm_get_map_process_packet_size_v9,
+- .get_runlist_packet_size = pm_get_runlist_packet_size_v9,
+- .get_set_resources_packet_size = pm_get_set_resources_packet_size_vi,
+- .get_map_queues_packet_size = pm_get_map_queues_packet_size_v9,
+- .get_unmap_queues_packet_size = pm_get_unmap_queues_packet_size_v9,
+- .get_query_status_packet_size = pm_get_query_status_packet_size_v9,
+- .get_release_mem_packet_size = pm_get_release_mem_packet_size_v9,
++ .map_process = pm_map_process_v9,
++ .runlist = pm_runlist_v9,
++ .set_resources = pm_set_resources_vi,
++ .map_queues = pm_map_queues_v9,
++ .unmap_queues = pm_unmap_queues_v9,
++ .query_status = pm_query_status_v9,
++ .release_mem = pm_release_mem_v9,
++ .map_process_size = sizeof(struct pm4_mes_map_process),
++ .runlist_size = sizeof(struct pm4_mes_runlist),
++ .set_resources_size = sizeof(struct pm4_mes_set_resources),
++ .map_queues_size = sizeof(struct pm4_mes_map_queues),
++ .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
++ .query_status_size = sizeof(struct pm4_mes_query_status),
++ .release_mem_size = sizeof(struct pm4_mec_release_mem)
+ };
+
+ void kfd_pm_func_init_v9(struct packet_manager *pm)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+index e798873..178c5d0 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+@@ -358,57 +358,21 @@ static uint32_t pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
+ return sizeof(struct pm4_mec_release_mem) / sizeof(unsigned int);
+ }
+
+-static uint32_t pm_get_map_process_packet_size_vi(void)
+-{
+- return sizeof(struct pm4_mes_map_process);
+-}
+-
+-static uint32_t pm_get_runlist_packet_size_vi(void)
+-{
+- return sizeof(struct pm4_mes_runlist);
+-}
+-
+-uint32_t pm_get_set_resources_packet_size_vi(void)
+-{
+- return sizeof(struct pm4_mes_set_resources);
+-}
+-
+-static uint32_t pm_get_map_queues_packet_size_vi(void)
+-{
+- return sizeof(struct pm4_mes_map_queues);
+-}
+-
+-static uint32_t pm_get_unmap_queues_packet_size_vi(void)
+-{
+- return sizeof(struct pm4_mes_unmap_queues);
+-}
+-
+-static uint32_t pm_get_query_status_packet_size_vi(void)
+-{
+- return sizeof(struct pm4_mes_query_status);
+-}
+-
+-static uint32_t pm_get_release_mem_packet_size_vi(void)
+-{
+- return sizeof(struct pm4_mec_release_mem);
+-}
+-
+-
+ static struct packet_manager_funcs kfd_vi_pm_funcs = {
+- .map_process = pm_map_process_vi,
+- .runlist = pm_runlist_vi,
+- .set_resources = pm_set_resources_vi,
+- .map_queues = pm_map_queues_vi,
+- .unmap_queues = pm_unmap_queues_vi,
+- .query_status = pm_query_status_vi,
+- .release_mem = pm_release_mem_vi,
+- .get_map_process_packet_size = pm_get_map_process_packet_size_vi,
+- .get_runlist_packet_size = pm_get_runlist_packet_size_vi,
+- .get_set_resources_packet_size = pm_get_set_resources_packet_size_vi,
+- .get_map_queues_packet_size = pm_get_map_queues_packet_size_vi,
+- .get_unmap_queues_packet_size = pm_get_unmap_queues_packet_size_vi,
+- .get_query_status_packet_size = pm_get_query_status_packet_size_vi,
+- .get_release_mem_packet_size = pm_get_release_mem_packet_size_vi,
++ .map_process = pm_map_process_vi,
++ .runlist = pm_runlist_vi,
++ .set_resources = pm_set_resources_vi,
++ .map_queues = pm_map_queues_vi,
++ .unmap_queues = pm_unmap_queues_vi,
++ .query_status = pm_query_status_vi,
++ .release_mem = pm_release_mem_vi,
++ .map_process_size = sizeof(struct pm4_mes_map_process),
++ .runlist_size = sizeof(struct pm4_mes_runlist),
++ .set_resources_size = sizeof(struct pm4_mes_set_resources),
++ .map_queues_size = sizeof(struct pm4_mes_map_queues),
++ .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
++ .query_status_size = sizeof(struct pm4_mes_query_status),
++ .release_mem_size = sizeof(struct pm4_mec_release_mem)
+ };
+
+ void kfd_pm_func_init_vi(struct packet_manager *pm)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+index 8abefd7..699352b 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+@@ -69,9 +69,9 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
+ pr_debug("Over subscribed runlist\n");
+ }
+
+- map_queue_size = pm->pmf->get_map_queues_packet_size();
++ map_queue_size = pm->pmf->map_queues_size;
+ /* calculate run list ib allocation size */
+- *rlib_size = process_count * pm->pmf->get_map_process_packet_size() +
++ *rlib_size = process_count * pm->pmf->map_process_size +
+ queue_count * map_queue_size;
+
+ /*
+@@ -79,7 +79,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
+ * when over subscription
+ */
+ if (*over_subscription)
+- *rlib_size += pm->pmf->get_runlist_packet_size();
++ *rlib_size += pm->pmf->runlist_size;
+
+ pr_debug("runlist ib size %d\n", *rlib_size);
+ }
+@@ -160,7 +160,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
+ return retval;
+
+ proccesses_mapped++;
+- inc_wptr(&rl_wptr, pm->pmf->get_map_process_packet_size(),
++ inc_wptr(&rl_wptr, pm->pmf->map_process_size,
+ alloc_size_bytes);
+
+ list_for_each_entry(kq, &qpd->priv_queue_list, list) {
+@@ -178,7 +178,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
+ return retval;
+
+ inc_wptr(&rl_wptr,
+- pm->pmf->get_map_queues_packet_size(),
++ pm->pmf->map_queues_size,
+ alloc_size_bytes);
+ }
+
+@@ -197,7 +197,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
+ return retval;
+
+ inc_wptr(&rl_wptr,
+- pm->pmf->get_map_queues_packet_size(),
++ pm->pmf->map_queues_size,
+ alloc_size_bytes);
+ }
+ }
+@@ -262,7 +262,7 @@ int pm_send_set_resources(struct packet_manager *pm,
+ uint32_t *buffer, size;
+ int retval = 0;
+
+- size = pm->pmf->get_set_resources_packet_size();
++ size = pm->pmf->set_resources_size;
+ mutex_lock(&pm->lock);
+ pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ size / sizeof(uint32_t),
+@@ -299,7 +299,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
+
+ pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
+
+- packet_size_dwords = pm->pmf->get_runlist_packet_size() /
++ packet_size_dwords = pm->pmf->runlist_size /
+ sizeof(uint32_t);
+ mutex_lock(&pm->lock);
+
+@@ -337,7 +337,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
+ if (WARN_ON(!fence_address))
+ return -EFAULT;
+
+- size = pm->pmf->get_query_status_packet_size();
++ size = pm->pmf->query_status_size;
+ mutex_lock(&pm->lock);
+ pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ size / sizeof(uint32_t), (unsigned int **)&buffer);
+@@ -366,7 +366,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
+ uint32_t *buffer, size;
+ int retval = 0;
+
+- size = pm->pmf->get_unmap_queues_packet_size();
++ size = pm->pmf->unmap_queues_size;
+ mutex_lock(&pm->lock);
+ pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ size / sizeof(uint32_t), (unsigned int **)&buffer);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 641ea82..0c2fa89 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -976,14 +976,14 @@ struct packet_manager_funcs {
+ uint64_t fence_address, uint32_t fence_value);
+ uint32_t (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
+
+- uint32_t (*get_map_process_packet_size)(void);
+- uint32_t (*get_runlist_packet_size)(void);
+- uint32_t (*get_set_resources_packet_size)(void);
+- uint32_t (*get_map_queues_packet_size)(void);
+- uint32_t (*get_unmap_queues_packet_size)(void);
+- uint32_t (*get_query_status_packet_size)(void);
+- uint32_t (*get_release_mem_packet_size)(void);
+-
++ /* Packet sizes */
++ int map_process_size;
++ int runlist_size;
++ int set_resources_size;
++ int map_queues_size;
++ int unmap_queues_size;
++ int query_status_size;
++ int release_mem_size;
+ };
+
+ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
+@@ -1005,7 +1005,6 @@ void pm_release_ib(struct packet_manager *pm);
+ unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
+ int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
+ struct scheduling_resources *res);
+-uint32_t pm_get_set_resources_packet_size_vi(void);
+
+ void kfd_pm_func_init_vi(struct packet_manager *pm);
+ void kfd_pm_func_init_v9(struct packet_manager *pm);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4156-drm-amdkfd-GPU-recovery-support-from-KFD-step-1.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4156-drm-amdkfd-GPU-recovery-support-from-KFD-step-1.patch
new file mode 100644
index 00000000..e08063f5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4156-drm-amdkfd-GPU-recovery-support-from-KFD-step-1.patch
@@ -0,0 +1,150 @@
+From 60aacf851af2af7e39ff8782112d59093dbbc11b Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Wed, 28 Feb 2018 11:46:32 -0500
+Subject: [PATCH 4156/5725] drm/amdkfd: GPU recovery support from KFD (step 1)
+
+Lock KFD and evict existing queues on reset
+
+Change-Id: I0f0526b5beac68bd7a96ead58b95a57d4f7f8b13
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 5 ++++
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 43 +++++++++++++++++++++++++++++---
+ drivers/gpu/drm/amd/amdkfd/kfd_events.c | 5 ++++
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 4 +++
+ 4 files changed, 54 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 73aec76..fd62468 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -136,6 +136,11 @@ static int kfd_open(struct inode *inode, struct file *filep)
+ if (IS_ERR(process))
+ return PTR_ERR(process);
+
++ if (kfd_is_locked()) {
++ kfd_unref_process(process);
++ return -EAGAIN;
++ }
++
+ dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
+ process->pasid, process->is_32bit_user_mode);
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index a9ad2a8..768373f 100755
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -32,7 +32,13 @@
+ #include "kfd_iommu.h"
+
+ #define MQD_SIZE_ALIGNED 768
+-static atomic_t kfd_device_suspended = ATOMIC_INIT(0);
++
++/*
++ * kfd_locked is used to lock the kfd driver during suspend or reset
++ * once locked, kfd driver will stop any further GPU execution.
++ * create process (open) will return -EAGAIN.
++ */
++static atomic_t kfd_locked = ATOMIC_INIT(0);
+
+ #ifdef KFD_SUPPORT_IOMMU_V2
+ static const struct kfd_device_info kaveri_device_info = {
+@@ -549,21 +555,52 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
+
+ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
+ {
++ if (!kfd->init_complete)
++ return 0;
++ kgd2kfd_suspend(kfd);
++
++ /* hold dqm->lock to prevent further execution*/
++ mutex_lock(&kfd->dqm->lock);
++
++ kfd_signal_reset_event(kfd);
+ return 0;
+ }
+
++/*
++ * Fix me. KFD won't be able to resume existing process for now.
++ * We will keep all existing process in a evicted state and
++ * wait the process to be terminated.
++ */
++
+ int kgd2kfd_post_reset(struct kfd_dev *kfd)
+ {
++ int ret, count;
++
++ if (!kfd->init_complete)
++ return 0;
++
++ mutex_unlock(&kfd->dqm->lock);
++
++ ret = kfd_resume(kfd);
++ if (ret)
++ return ret;
++ count = atomic_dec_return(&kfd_locked);
++ WARN_ONCE(count != 0, "KFD reset ref. error");
+ return 0;
+ }
+
++bool kfd_is_locked(void)
++{
++ return (atomic_read(&kfd_locked) > 0);
++}
++
+ void kgd2kfd_suspend(struct kfd_dev *kfd)
+ {
+ if (!kfd->init_complete)
+ return;
+
+ /* For first KFD device suspend all the KFD processes */
+- if (atomic_inc_return(&kfd_device_suspended) == 1)
++ if (atomic_inc_return(&kfd_locked) == 1)
+ kfd_suspend_all_processes();
+
+ kfd->dqm->ops.stop(kfd->dqm);
+@@ -582,7 +619,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
+ if (ret)
+ return ret;
+
+- count = atomic_dec_return(&kfd_device_suspended);
++ count = atomic_dec_return(&kfd_locked);
+ WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
+ if (count == 0)
+ ret = kfd_resume_all_processes();
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+index 644ce9d..09c1c31 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+@@ -1009,3 +1009,8 @@ void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
+ mutex_unlock(&p->event_mutex);
+ kfd_unref_process(p);
+ }
++
++void kfd_signal_reset_event(struct kfd_dev *dev)
++{
++ /*todo*/
++}
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 0c2fa89..0a019a6 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -1044,10 +1044,14 @@ int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
+ void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
+ struct kfd_vm_fault_info *info);
+
++void kfd_signal_reset_event(struct kfd_dev *dev);
++
+ void kfd_flush_tlb(struct kfd_process_device *pdd);
+
+ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
+
++bool kfd_is_locked(void);
++
+ #define KFD_SCRATCH_KV_FW_VER 413
+
+ /* PeerDirect support */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4157-drm-amdkfd-signal-hw_exception-event-on-GPU-reset.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4157-drm-amdkfd-signal-hw_exception-event-on-GPU-reset.patch
new file mode 100644
index 00000000..84f38194
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4157-drm-amdkfd-signal-hw_exception-event-on-GPU-reset.patch
@@ -0,0 +1,82 @@
+From a8850728a13479e09bde246d3399c8ccc90a48bc Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Tue, 3 Apr 2018 16:11:00 -0400
+Subject: [PATCH 4157/5725] drm/amdkfd: signal hw_exception event on GPU reset
+
+Change-Id: I8fae18208103920796f81858f359a9cec563125c
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_events.c | 24 +++++++++++++++++++++++-
+ drivers/gpu/drm/amd/amdkfd/kfd_events.h | 1 +
+ include/uapi/linux/kfd_ioctl.h | 8 ++++++++
+ 3 files changed, 32 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+index 09c1c31..24d8a21 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+@@ -1012,5 +1012,27 @@ void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
+
+ void kfd_signal_reset_event(struct kfd_dev *dev)
+ {
+- /*todo*/
++ struct kfd_hsa_hw_exception_data hw_exception_data;
++ struct kfd_process *p;
++ struct kfd_event *ev;
++ unsigned int temp;
++ uint32_t id, idx;
++
++ /* Whole gpu reset caused by GPU hang , and memory is lost */
++ memset(&hw_exception_data, 0, sizeof(hw_exception_data));
++ hw_exception_data.gpu_id = dev->id;
++ hw_exception_data.memory_lost = 1;
++
++ idx = srcu_read_lock(&kfd_processes_srcu);
++ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
++ mutex_lock(&p->event_mutex);
++ id = KFD_FIRST_NONSIGNAL_EVENT_ID;
++ idr_for_each_entry_continue(&p->event_idr, ev, id)
++ if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
++ ev->hw_exception_data = hw_exception_data;
++ set_event(ev);
++ }
++ mutex_unlock(&p->event_mutex);
++ }
++ srcu_read_unlock(&kfd_processes_srcu, idx);
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
+index abca5bf..c7ac6c7 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
+@@ -66,6 +66,7 @@ struct kfd_event {
+ /* type specific data */
+ union {
+ struct kfd_hsa_memory_exception_data memory_exception_data;
++ struct kfd_hsa_hw_exception_data hw_exception_data;
+ };
+ };
+
+diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
+index ec0574e..85d833e 100644
+--- a/include/uapi/linux/kfd_ioctl.h
++++ b/include/uapi/linux/kfd_ioctl.h
+@@ -251,6 +251,14 @@ struct kfd_hsa_memory_exception_data {
+ uint32_t pad;
+ };
+
++/* hw exception data */
++struct kfd_hsa_hw_exception_data {
++ uint32_t reset_type;
++ uint32_t reset_cause;
++ uint32_t memory_lost;
++ uint32_t gpu_id;
++};
++
+ /* Event data */
+ struct kfd_event_data {
+ union {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4158-drm-amdkfd-remove-check-for-PCIe-upstream-bridge.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4158-drm-amdkfd-remove-check-for-PCIe-upstream-bridge.patch
new file mode 100644
index 00000000..9047efbe
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4158-drm-amdkfd-remove-check-for-PCIe-upstream-bridge.patch
@@ -0,0 +1,192 @@
+From b24f7a1d9fbb22a99f9ddef8dff05916fbc10ab6 Mon Sep 17 00:00:00 2001
+From: welu <Wei.Lu2@amd.com>
+Date: Wed, 4 Apr 2018 11:44:04 -0400
+Subject: [PATCH 4158/5725] drm/amdkfd: remove check for PCIe upstream bridge
+
+atomic support for GFX9 GPUs.
+1. set vega10 needs_pci_atomics as false because vega10 do not need
+pci atomics;
+2. firstly try to enable atomics in pci_enable_atomic_ops_to_root()
+and if this function failed and need_pic_atomics is true,
+we need to report the error and return NULL.
+
+Bug:SWDEV-149359
+
+Change-Id: I71cbbe63cb1f03f606f8f4b5e4b8c796e164e0d1
+Signed-off-by: welu <Wei.Lu2@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 17 ++++++----
+ drivers/pci/pci.c | 57 +++++++++++++++------------------
+ include/linux/pci.h | 2 +-
+ 3 files changed, 37 insertions(+), 39 deletions(-)
+ mode change 100755 => 100644 drivers/gpu/drm/amd/amdkfd/kfd_device.c
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+old mode 100755
+new mode 100644
+index 768373f..4ee56ab
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -222,7 +222,7 @@ static const struct kfd_device_info vega10_device_info = {
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = false,
+- .needs_pci_atomics = true,
++ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+ };
+
+@@ -358,7 +358,7 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ struct pci_dev *pdev, const struct kfd2kgd_calls *f2g)
+ {
+ struct kfd_dev *kfd;
+-
++ int ret;
+ const struct kfd_device_info *device_info =
+ lookup_device_info(pdev->device);
+
+@@ -372,11 +372,14 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ * 32 and 64-bit requests are possible and must be
+ * supported.
+ */
+- if (pci_enable_atomic_ops_to_root(pdev) < 0) {
+- dev_info(kfd_device,
+- "skipped device %x:%x, PCI rejects atomics",
+- pdev->vendor, pdev->device);
+- return NULL;
++ ret = pci_enable_atomic_ops_to_root(pdev,
++ PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
++ PCI_EXP_DEVCAP2_ATOMIC_COMP64);
++ if (device_info->needs_pci_atomics && ret < 0) {
++ dev_info(kfd_device,
++ "skipped device %x:%x, PCI rejects atomics",
++ pdev->vendor, pdev->device);
++ return NULL;
+ }
+ }
+
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 06dfd52..1dde9da 100755
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -2985,24 +2985,34 @@ bool pci_acs_path_enabled(struct pci_dev *start,
+ /**
+ * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
+ * @dev: the PCI device
++ * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
++ * PCI_EXP_DEVCAP2_ATOMIC_COMP32
++ * PCI_EXP_DEVCAP2_ATOMIC_COMP64
++ * PCI_EXP_DEVCAP2_ATOMIC_COMP128
++ *
++ * Return 0 if all upstream bridges support AtomicOp routing, egress
++ * blocking is disabled on all upstream ports, and the root port supports
++ * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
++ * AtomicOp completion), or negative otherwise.
+ *
+- * Return 0 if the device is capable of generating AtomicOp requests,
+- * all upstream bridges support AtomicOp routing, egress blocking is disabled
+- * on all upstream ports, and the root port supports 32-bit, 64-bit and/or
+- * 128-bit AtomicOp completion, or negative otherwise.
+ */
+-int pci_enable_atomic_ops_to_root(struct pci_dev *dev)
++int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
+ {
+ struct pci_bus *bus = dev->bus;
++ struct pci_dev *bridge;
++ u32 cap, ctl2;
+
+ if (!pci_is_pcie(dev))
+ return -EINVAL;
+
+- switch (pci_pcie_type(dev)) {
+ /*
+- * PCIe 3.0, 6.15 specifies that endpoints and root ports are permitted
+- * to implement AtomicOp requester capabilities.
+- */
++ * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
++ * AtomicOp requesters. For now, we only support endpoints as
++ * requesters and root ports as completers. No endpoints as
++ * completers, and no peer-to-peer.
++ */
++
++ switch (pci_pcie_type(dev)) {
+ case PCI_EXP_TYPE_ENDPOINT:
+ case PCI_EXP_TYPE_LEG_END:
+ case PCI_EXP_TYPE_RC_END:
+@@ -3012,44 +3022,30 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev)
+ }
+
+ while (bus->parent) {
+- struct pci_dev *bridge = bus->self;
+- u32 cap;
++ bridge = bus->self;
+
+ pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
+
+ switch (pci_pcie_type(bridge)) {
+- /*
+- * Upstream, downstream and root ports may implement AtomicOp
+- * routing capabilities. AtomicOp routing via a root port is
+- * not considered.
+- */
++ /* Ensure switch ports support AtomicOp routing */
+ case PCI_EXP_TYPE_UPSTREAM:
+ case PCI_EXP_TYPE_DOWNSTREAM:
+ if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
+ return -EINVAL;
+ break;
+
+- /*
+- * Root ports are permitted to implement AtomicOp completion
+- * capabilities.
+- */
++ /* Ensure root port supports all the sizes we care about */
+ case PCI_EXP_TYPE_ROOT_PORT:
+- if (!(cap & (PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
+- PCI_EXP_DEVCAP2_ATOMIC_COMP64 |
+- PCI_EXP_DEVCAP2_ATOMIC_COMP128)))
++ if ((cap & cap_mask) != cap_mask)
+ return -EINVAL;
+ break;
+ }
+
+- /*
+- * Upstream ports may block AtomicOps on egress.
+- */
+- if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
+- u32 ctl2;
+-
++ /* Ensure upstream ports don't block AtomicOps on egress */
++ if (!bridge->has_secondary_link) {
+ pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
+ &ctl2);
+- if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_BLOCK)
++ if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
+ return -EINVAL;
+ }
+
+@@ -3058,7 +3054,6 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev)
+
+ pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_ATOMIC_REQ);
+-
+ return 0;
+ }
+ EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 339f5b7..76a681f 100755
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -2075,7 +2075,7 @@ void pci_request_acs(void);
+ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
+ bool pci_acs_path_enabled(struct pci_dev *start,
+ struct pci_dev *end, u16 acs_flags);
+-int pci_enable_atomic_ops_to_root(struct pci_dev *dev);
++int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
+
+ #define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
+ #define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4159-drm-amdgpu-Enable-the-gpu-reset-from-amdkfd.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4159-drm-amdgpu-Enable-the-gpu-reset-from-amdkfd.patch
new file mode 100644
index 00000000..83ef4324
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4159-drm-amdgpu-Enable-the-gpu-reset-from-amdkfd.patch
@@ -0,0 +1,59 @@
+From f5a2ef7a15db3b770068feb323030dd55046812a Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Wed, 4 Apr 2018 16:11:14 -0400
+Subject: [PATCH 4159/5725] drm/amdgpu: Enable the gpu reset from amdkfd
+
+Change-Id: Ia5a9d69c2e6cfa65e9f8a7d344169697adc69f35
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 3 ++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 3 ++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 3 ++-
+ 3 files changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+index 84f8f71..b04471b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+@@ -228,7 +228,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
+ .get_tile_config = amdgpu_amdkfd_get_tile_config,
+ .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
+ .copy_mem_to_mem = amdgpu_amdkfd_copy_mem_to_mem,
+- .get_vram_usage = amdgpu_amdkfd_get_vram_usage
++ .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
++ .gpu_recover = amdgpu_amdkfd_gpu_reset
+ };
+
+ struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions()
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+index dfd0026..d723ae2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+@@ -201,7 +201,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
+ .get_tile_config = amdgpu_amdkfd_get_tile_config,
+ .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
+ .copy_mem_to_mem = amdgpu_amdkfd_copy_mem_to_mem,
+- .get_vram_usage = amdgpu_amdkfd_get_vram_usage
++ .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
++ .gpu_recover = amdgpu_amdkfd_gpu_reset
+ };
+
+ struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions()
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+index f044739..49291d6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+@@ -251,7 +251,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
+ .get_tile_config = amdgpu_amdkfd_get_tile_config,
+ .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
+ .copy_mem_to_mem = amdgpu_amdkfd_copy_mem_to_mem,
+- .get_vram_usage = amdgpu_amdkfd_get_vram_usage
++ .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
++ .gpu_recover = amdgpu_amdkfd_gpu_reset
+ };
+
+ struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions()
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4160-drm-amdkfd-CMA-Refactor-CMA-code.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4160-drm-amdkfd-CMA-Refactor-CMA-code.patch
new file mode 100644
index 00000000..67343863
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4160-drm-amdkfd-CMA-Refactor-CMA-code.patch
@@ -0,0 +1,428 @@
+From 4c5de602a4b2e3a49fe96f5ab6403697209e0121 Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Thu, 22 Mar 2018 17:25:54 -0400
+Subject: [PATCH 4160/5725] drm/amdkfd: CMA: Refactor CMA code
+
+This is similar to process_vm_rw() functions. This refactoring is also
+helpful for the special handling of userptr BOs (upcoming commits).
+
+This commit does not change any functionality.
+
+v2: Fix potential fence leak
+
+Change-Id: Ic8f9c6a7599d2beac54d768831618df0207f10e9
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 329 +++++++++++++++++--------------
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 17 ++
+ 2 files changed, 203 insertions(+), 143 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index fd62468..ebb862b 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1706,22 +1706,164 @@ static int kfd_ioctl_ipc_import_handle(struct file *filep,
+ return r;
+ }
+
++/* Update cma_iter.cur_bo with KFD BO that is assocaited with
++ * cma_iter.array.va_addr
++ */
++static int kfd_cma_iter_update_bo(struct cma_iter *ci)
++{
++ struct kfd_memory_range *arr = ci->array;
++ uint64_t va_end = arr->va_addr + arr->size - 1;
++
++ mutex_lock(&ci->p->mutex);
++ ci->cur_bo = kfd_process_find_bo_from_interval(ci->p, arr->va_addr,
++ va_end);
++ mutex_unlock(&ci->p->mutex);
++
++ if (!ci->cur_bo || va_end > ci->cur_bo->it.last) {
++ pr_err("CMA failed. Range out of bounds\n");
++ return -EFAULT;
++ }
++ return 0;
++}
++
++/* Advance iter by @size bytes. */
++static int kfd_cma_iter_advance(struct cma_iter *ci, unsigned long size)
++{
++ int ret = 0;
++
++ ci->offset += size;
++ if (WARN_ON(size > ci->total || ci->offset > ci->array->size))
++ return -EFAULT;
++ ci->total -= size;
++ /* If current range is copied, move to next range if available. */
++ if (ci->offset == ci->array->size) {
++
++ /* End of all ranges */
++ if (!(--ci->nr_segs))
++ return 0;
++
++ ci->array++;
++ ci->offset = 0;
++ ret = kfd_cma_iter_update_bo(ci);
++ if (ret)
++ return ret;
++ }
++ ci->bo_offset = (ci->array->va_addr + ci->offset) -
++ ci->cur_bo->it.start;
++ return ret;
++}
++
++static int kfd_cma_iter_init(struct kfd_memory_range *arr, unsigned long segs,
++ struct kfd_process *p, struct cma_iter *ci)
++{
++ int ret;
++ int nr;
++
++ if (!arr || !segs)
++ return -EINVAL;
++
++ memset(ci, 0, sizeof(*ci));
++ ci->array = arr;
++ ci->nr_segs = segs;
++ ci->p = p;
++ ci->offset = 0;
++ for (nr = 0; nr < segs; nr++)
++ ci->total += arr[nr].size;
++
++ /* Valid but size is 0. So copied will also be 0 */
++ if (!ci->total)
++ return 0;
++
++ ret = kfd_cma_iter_update_bo(ci);
++ if (!ret)
++ ci->bo_offset = arr->va_addr - ci->cur_bo->it.start;
++ return ret;
++}
++
++static bool kfd_cma_iter_end(struct cma_iter *ci)
++{
++ if (!(ci->nr_segs) || !(ci->total))
++ return true;
++ return false;
++}
++
++/* Copy single range from source iterator @si to destination iterator @di.
++ * @si will move to next range and @di will move by bytes copied.
++ * @return : 0 for success or -ve for failure
++ * @f: The last fence if any
++ * @copied: out: number of bytes copied
++ */
++static int kfd_copy_single_range(struct cma_iter *si, struct cma_iter *di,
++ bool cma_write, struct dma_fence **f,
++ uint64_t *copied)
++{
++ int err = 0;
++ uint64_t copy_size, n;
++ uint64_t size = si->array->size;
++ struct kfd_bo *src_bo = si->cur_bo;
++ struct dma_fence *lfence = NULL;
++
++ if (!src_bo || !di || !copied)
++ return -EINVAL;
++ *copied = 0;
++ if (f)
++ *f = NULL;
++
++ while (size && !kfd_cma_iter_end(di)) {
++ struct dma_fence *fence = NULL;
++ struct kfd_bo *dst_bo = di->cur_bo;
++
++ copy_size = min(size, (di->array->size - di->offset));
++
++ /* Check both BOs belong to same device */
++ if (src_bo->dev->kgd != dst_bo->dev->kgd) {
++ pr_err("CMA fail. Not same dev\n");
++ return -EINVAL;
++ }
++
++ err = dst_bo->dev->kfd2kgd->copy_mem_to_mem(src_bo->dev->kgd,
++ src_bo->mem, si->bo_offset, dst_bo->mem, di->bo_offset,
++ copy_size, &fence, &n);
++ if (err) {
++ pr_err("GPU CMA %d failed\n", err);
++ break;
++ }
++
++ if (fence) {
++ dma_fence_put(lfence);
++ lfence = fence;
++ }
++ size -= n;
++ *copied += n;
++ err = kfd_cma_iter_advance(si, n);
++ if (err)
++ break;
++ err = kfd_cma_iter_advance(di, n);
++ if (err)
++ break;
++ }
++
++ if (f)
++ *f = dma_fence_get(lfence);
++ dma_fence_put(lfence);
++
++ return err;
++}
++
+ static int kfd_ioctl_cross_memory_copy(struct file *filep,
+ struct kfd_process *local_p, void *data)
+ {
+ struct kfd_ioctl_cross_memory_copy_args *args = data;
+ struct kfd_memory_range *src_array, *dst_array;
+- struct kfd_bo *src_bo, *dst_bo;
+- struct kfd_process *remote_p, *src_p, *dst_p;
++ struct kfd_process *remote_p;
+ struct task_struct *remote_task;
+ struct mm_struct *remote_mm;
+ struct pid *remote_pid;
+- struct dma_fence *fence = NULL, *lfence = NULL;
+- uint64_t dst_va_addr;
+- uint64_t copied, total_copied = 0;
+- uint64_t src_offset, dst_offset, dst_va_addr_end;
++ struct dma_fence *lfence = NULL;
++ uint64_t copied = 0, total_copied = 0;
++ struct cma_iter di, si;
+ const char *cma_op;
+- int i, j = 0, err = 0;
++ int err = 0;
+
+ /* Check parameters */
+ if (args->src_mem_range_array == 0 || args->dst_mem_range_array == 0 ||
+@@ -1787,160 +1929,61 @@ static int kfd_ioctl_cross_memory_copy(struct file *filep,
+ err = -EINVAL;
+ goto kfd_process_fail;
+ }
+-
++ /* Initialise cma_iter si & @di with source & destination range. */
+ if (KFD_IS_CROSS_MEMORY_WRITE(args->flags)) {
+- src_p = local_p;
+- dst_p = remote_p;
+ cma_op = "WRITE";
+ pr_debug("CMA WRITE: local -> remote\n");
++ err = kfd_cma_iter_init(dst_array, args->dst_mem_array_size,
++ remote_p, &di);
++ if (err)
++ goto kfd_process_fail;
++ err = kfd_cma_iter_init(src_array, args->src_mem_array_size,
++ local_p, &si);
++ if (err)
++ goto kfd_process_fail;
+ } else {
+- src_p = remote_p;
+- dst_p = local_p;
+ cma_op = "READ";
+ pr_debug("CMA READ: remote -> local\n");
+- }
+
++ err = kfd_cma_iter_init(dst_array, args->dst_mem_array_size,
++ local_p, &di);
++ if (err)
++ goto kfd_process_fail;
++ err = kfd_cma_iter_init(src_array, args->src_mem_array_size,
++ remote_p, &si);
++ if (err)
++ goto kfd_process_fail;
++ }
+
+- /* For each source kfd_range:
+- * - Find the BO. Each range has to be within the same BO.
+- * - Copy this range to single or multiple destination BOs.
+- * - dst_va_addr - will point to next va address into which data will
+- * be copied.
+- * - dst_bo & src_bo - the current destination and source BOs
+- * - src_offset & dst_offset - offset into the respective BOs from
+- * data will be sourced or copied
++ /* Copy one si range at a time into di. After each call to
++ * kfd_copy_single_range() si will move to next range. di will be
++ * incremented by bytes copied
+ */
+- dst_va_addr = dst_array[0].va_addr;
+- dst_va_addr_end = dst_va_addr + dst_array[0].size - 1;
+- mutex_lock(&dst_p->mutex);
+- dst_bo = kfd_process_find_bo_from_interval(dst_p,
+- dst_va_addr,
+- dst_va_addr_end);
+- mutex_unlock(&dst_p->mutex);
+- if (!dst_bo || dst_va_addr_end > dst_bo->it.last) {
+- pr_err("CMA %s failed. Invalid dst range\n", cma_op);
+- err = -EFAULT;
+- goto kfd_process_fail;
+- }
+- dst_offset = dst_va_addr - dst_bo->it.start;
+-
+- for (i = 0; i < args->src_mem_array_size; i++) {
+- uint64_t src_va_addr_end = src_array[i].va_addr +
+- src_array[i].size - 1;
+- uint64_t src_size_to_copy = src_array[i].size;
+-
+- mutex_lock(&src_p->mutex);
+- src_bo = kfd_process_find_bo_from_interval(src_p,
+- src_array[i].va_addr,
+- src_va_addr_end);
+- mutex_unlock(&src_p->mutex);
+- if (!src_bo || src_va_addr_end > src_bo->it.last) {
+- pr_err("CMA %s failed. Invalid src range\n", cma_op);
+- err = -EFAULT;
+- break;
+- }
++ while (!kfd_cma_iter_end(&si) && !kfd_cma_iter_end(&di)) {
++ struct dma_fence *fence = NULL;
+
+- src_offset = src_array[i].va_addr - src_bo->it.start;
++ err = kfd_copy_single_range(&si, &di,
++ KFD_IS_CROSS_MEMORY_WRITE(args->flags),
++ &fence, &copied);
++ total_copied += copied;
+
+- /* Copy src_bo to one or multiple dst_bo(s) based on size and
+- * and current copy location.
+- */
+- while (j < args->dst_mem_array_size) {
+- uint64_t copy_size;
+- int64_t space_left;
+-
+- /* Find the current copy_size. This will be smaller of
+- * the following
+- * - space left in the current dest memory range
+- * - data left to copy from source range
+- */
+- space_left = (dst_array[j].va_addr + dst_array[j].size)
+- - dst_va_addr;
+- copy_size = (src_size_to_copy < space_left) ?
+- src_size_to_copy : space_left;
+-
+- /* Check both BOs belong to same device */
+- if (src_bo->dev->kgd != dst_bo->dev->kgd) {
+- pr_err("CMA %s fail. Not same dev\n", cma_op);
+- err = -EINVAL;
+- break;
+- }
++ if (err)
++ break;
+
+- /* Store prev fence. Release it when a later fence is
+- * created
+- */
++ /* Release old fence if a later fence is created. If no
++ * new fence is created, then keep the preivous fence
++ */
++ if (fence) {
++ dma_fence_put(lfence);
+ lfence = fence;
+- fence = NULL;
+-
+- err = dst_bo->dev->kfd2kgd->copy_mem_to_mem(
+- src_bo->dev->kgd,
+- src_bo->mem, src_offset,
+- dst_bo->mem, dst_offset,
+- copy_size,
+- &fence, &copied);
+-
+- if (err) {
+- pr_err("GPU CMA %s failed\n", cma_op);
+- break;
+- }
+-
+- /* Later fence available. Release old fence */
+- if (fence && lfence) {
+- dma_fence_put(lfence);
+- lfence = NULL;
+- }
+-
+- total_copied += copied;
+- src_size_to_copy -= copied;
+- space_left -= copied;
+- dst_va_addr += copied;
+- dst_offset += copied;
+- src_offset += copied;
+- if (dst_va_addr > dst_bo->it.last + 1) {
+- pr_err("CMA %s fail. Mem overflow\n", cma_op);
+- err = -EFAULT;
+- break;
+- }
+-
+- /* If the cur dest range is full move to next one */
+- if (space_left <= 0) {
+- if (++j >= args->dst_mem_array_size)
+- break;
+-
+- dst_va_addr = dst_array[j].va_addr;
+- dst_va_addr_end = dst_va_addr +
+- dst_array[j].size - 1;
+- dst_bo = kfd_process_find_bo_from_interval(
+- dst_p,
+- dst_va_addr,
+- dst_va_addr_end);
+- if (!dst_bo ||
+- dst_va_addr_end > dst_bo->it.last) {
+- pr_err("CMA %s failed. Invalid dst range\n",
+- cma_op);
+- err = -EFAULT;
+- break;
+- }
+- dst_offset = dst_va_addr - dst_bo->it.start;
+- }
+-
+- /* If the cur src range is done, move to next one */
+- if (src_size_to_copy <= 0)
+- break;
+ }
+- if (err)
+- break;
+ }
+
+ /* Wait for the last fence irrespective of error condition */
+- if (fence) {
+- if (dma_fence_wait_timeout(fence, false, msecs_to_jiffies(1000))
+- < 0)
++ if (lfence) {
++ if (dma_fence_wait_timeout(lfence, false,
++ msecs_to_jiffies(1000)) < 0)
+ pr_err("CMA %s failed. BO timed out\n", cma_op);
+- dma_fence_put(fence);
+- } else if (lfence) {
+- pr_debug("GPU copy fail. But wait for prev DMA to finish\n");
+- dma_fence_wait_timeout(lfence, true, msecs_to_jiffies(1000));
+ dma_fence_put(lfence);
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 0a019a6..da61ae8 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -303,6 +303,23 @@ struct kfd_bo {
+ struct kfd_ipc_obj *kfd_ipc_obj;
+ };
+
++/* Similar to iov_iter */
++struct cma_iter {
++ /* points to current entry of range array */
++ struct kfd_memory_range *array;
++ /* total number of entries in the initial array */
++ unsigned long nr_segs;
++ /* total amount of data pointed by kfd array*/
++ unsigned long total;
++ /* offset into the entry pointed by cma_iter.array */
++ unsigned long offset;
++ struct kfd_process *p;
++ /* current kfd_bo associated with cma_iter.array.va_addr */
++ struct kfd_bo *cur_bo;
++ /* offset w.r.t cur_bo */
++ unsigned long bo_offset;
++};
++
+ /* KGD2KFD callbacks */
+ void kgd2kfd_exit(void);
+ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4161-drm-amdkfd-CMA-Store-cpuva-in-KFD-BO.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4161-drm-amdkfd-CMA-Store-cpuva-in-KFD-BO.patch
new file mode 100644
index 00000000..0db3d337
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4161-drm-amdkfd-CMA-Store-cpuva-in-KFD-BO.patch
@@ -0,0 +1,120 @@
+From e29e43c08389af65610f248759519ba7f2b8b325 Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Thu, 22 Mar 2018 17:43:59 -0400
+Subject: [PATCH 4161/5725] drm/amdkfd: CMA: Store cpuva in KFD BO
+
+For userptr BOs store cpu VA in KFD BO. This is needed for supporting
+CMA operations on userptr
+
+Change-Id: I95e96f487fbc64957ceaf3f2875bd773d2bf9970
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 10 +++++++++-
+ drivers/gpu/drm/amd/amdkfd/kfd_ipc.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 4 +++-
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 5 +++--
+ 4 files changed, 16 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index ebb862b..ef1bd27 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1246,6 +1246,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ uint64_t offset = args->mmap_offset;
+ uint32_t flags = args->flags;
+ struct vm_area_struct *vma;
++ uint64_t cpuva = 0;
+
+ if (args->size == 0)
+ return -EINVAL;
+@@ -1275,6 +1276,13 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ flags |= KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL;
+ flags &= ~KFD_IOC_ALLOC_MEM_FLAGS_USERPTR;
+ offset = (pfn << PAGE_SHIFT);
++ } else {
++ if (offset & (PAGE_SIZE - 1)) {
++ pr_debug("Unaligned userptr address:%llx\n",
++ offset);
++ return -EINVAL;
++ }
++ cpuva = offset;
+ }
+ } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
+ if (args->size != kfd_doorbell_process_slice(dev))
+@@ -1299,7 +1307,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ goto err_unlock;
+
+ idr_handle = kfd_process_device_create_obj_handle(pdd, mem,
+- args->va_addr, args->size, NULL);
++ args->va_addr, args->size, cpuva, NULL);
+ if (idr_handle < 0) {
+ err = -EFAULT;
+ goto err_free;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_ipc.c b/drivers/gpu/drm/amd/amdkfd/kfd_ipc.c
+index 97806ed..845dbf7 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_ipc.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_ipc.c
+@@ -140,7 +140,7 @@ static int kfd_import_dmabuf_create_kfd_bo(struct kfd_dev *dev,
+ goto err_unlock;
+
+ idr_handle = kfd_process_device_create_obj_handle(pdd, mem,
+- va_addr, size,
++ va_addr, size, 0,
+ ipc_obj);
+ if (idr_handle < 0) {
+ r = -EFAULT;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index da61ae8..facd9d9 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -301,6 +301,8 @@ struct kfd_bo {
+ struct kfd_dev *dev;
+ struct list_head cb_data_head;
+ struct kfd_ipc_obj *kfd_ipc_obj;
++ /* page-aligned VA address */
++ uint64_t cpuva;
+ };
+
+ /* Similar to iov_iter */
+@@ -801,7 +803,7 @@ int kfd_reserved_mem_mmap(struct kfd_process *process,
+ /* KFD process API for creating and translating handles */
+ int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
+ void *mem, uint64_t start,
+- uint64_t length,
++ uint64_t length, uint64_t cpuva,
+ struct kfd_ipc_obj *ipc_obj);
+ void *kfd_process_device_translate_handle(struct kfd_process_device *p,
+ int handle);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index c627b63..5c6f124 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -140,7 +140,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
+ * created and the ioctls have not had the chance to run.
+ */
+ handle = kfd_process_device_create_obj_handle(
+- pdd, mem, gpu_va, size, NULL);
++ pdd, mem, gpu_va, size, 0, NULL);
+
+ if (handle < 0) {
+ err = handle;
+@@ -806,7 +806,7 @@ bool kfd_has_process_device_data(struct kfd_process *p)
+ */
+ int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
+ void *mem, uint64_t start,
+- uint64_t length,
++ uint64_t length, uint64_t cpuva,
+ struct kfd_ipc_obj *ipc_obj)
+ {
+ int handle;
+@@ -827,6 +827,7 @@ int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
+ buf_obj->mem = mem;
+ buf_obj->dev = pdd->dev;
+ buf_obj->kfd_ipc_obj = ipc_obj;
++ buf_obj->cpuva = cpuva;
+
+ INIT_LIST_HEAD(&buf_obj->cb_data_head);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4162-drm-amdkfd-CMA-Handle-userptr-to-userptr-BO-copy.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4162-drm-amdkfd-CMA-Handle-userptr-to-userptr-BO-copy.patch
new file mode 100644
index 00000000..0ab2b27c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4162-drm-amdkfd-CMA-Handle-userptr-to-userptr-BO-copy.patch
@@ -0,0 +1,284 @@
+From 4d98ca2586f4857e43946b29175cb5d953d79b15 Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Mon, 26 Mar 2018 16:45:06 -0400
+Subject: [PATCH 4162/5725] drm/amdkfd: CMA: Handle userptr to userptr BO copy
+
+CMA userptr implementations are incomplete because it doesn't properly
+handle if the BO is evicted. This patch handles the case where both
+source and destination BOs are userptr. It is more efficient to use CPU
+to do the copy in this case, very similar to process_vm_read/write()
+functions.
+
+Change-Id: I5d01d906f04190d71e8663785718060411dede4e
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 179 +++++++++++++++++++++++++++++--
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 +
+ 2 files changed, 172 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index ef1bd27..bd09647 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -35,6 +35,7 @@
+ #include <linux/mman.h>
+ #include <asm/processor.h>
+ #include <linux/ptrace.h>
++#include <linux/pagemap.h>
+
+ #include "kfd_priv.h"
+ #include "kfd_device_queue_manager.h"
+@@ -1714,6 +1715,12 @@ static int kfd_ioctl_ipc_import_handle(struct file *filep,
+ return r;
+ }
+
++/* Maximum number of entries for process pages array which lives on stack */
++#define MAX_PP_STACK_COUNT 16
++/* Maximum number of pages kmalloc'd to hold struct page's during copy */
++#define MAX_KMALLOC_PAGES (PAGE_SIZE * 2)
++#define MAX_PP_KMALLOC_COUNT (MAX_KMALLOC_PAGES/sizeof(struct page *))
++
+ /* Update cma_iter.cur_bo with KFD BO that is assocaited with
+ * cma_iter.array.va_addr
+ */
+@@ -1762,7 +1769,8 @@ static int kfd_cma_iter_advance(struct cma_iter *ci, unsigned long size)
+ }
+
+ static int kfd_cma_iter_init(struct kfd_memory_range *arr, unsigned long segs,
+- struct kfd_process *p, struct cma_iter *ci)
++ struct kfd_process *p, struct mm_struct *mm,
++ struct task_struct *task, struct cma_iter *ci)
+ {
+ int ret;
+ int nr;
+@@ -1775,6 +1783,8 @@ static int kfd_cma_iter_init(struct kfd_memory_range *arr, unsigned long segs,
+ ci->nr_segs = segs;
+ ci->p = p;
+ ci->offset = 0;
++ ci->mm = mm;
++ ci->task = task;
+ for (nr = 0; nr < segs; nr++)
+ ci->total += arr[nr].size;
+
+@@ -1795,6 +1805,159 @@ static bool kfd_cma_iter_end(struct cma_iter *ci)
+ return false;
+ }
+
++/* Copies @size bytes from si->cur_bo to di->cur_bo BO. The function assumes
++ * both source and dest. BOs are userptr BOs. Both BOs can either belong to
++ * current process or one of the BOs can belong to a differnt
++ * process. @Returns 0 on success, -ve on failure
++ *
++ * @si: Source iter
++ * @di: Dest. iter
++ * @cma_write: Indicates if it is write to remote or read from remote
++ * @size: amount of bytes to be copied
++ * @copied: Return number of bytes actually copied.
++ */
++static int kfd_copy_userptr_bos(struct cma_iter *si, struct cma_iter *di,
++ bool cma_write, uint64_t size,
++ uint64_t *copied)
++{
++ int i, ret = 0, locked;
++ unsigned int nents, nl;
++ unsigned int offset_in_page;
++ struct page *pp_stack[MAX_PP_STACK_COUNT];
++ struct page **process_pages = pp_stack;
++ unsigned long rva, lva = 0, flags = 0;
++ uint64_t copy_size, to_copy = size;
++ struct cma_iter *li, *ri;
++
++ if (cma_write) {
++ ri = di;
++ li = si;
++ flags |= FOLL_WRITE;
++ } else {
++ li = di;
++ ri = si;
++ }
++ /* rva: remote virtual address. Page aligned to start page.
++ * rva + offset_in_page: Points to remote start address
++ * lva: local virtual address. Points to the start address.
++ * nents: computes number of remote pages to request
++ */
++ offset_in_page = ri->bo_offset & (PAGE_SIZE - 1);
++ rva = (ri->cur_bo->cpuva + ri->bo_offset) & PAGE_MASK;
++ lva = li->cur_bo->cpuva + li->bo_offset;
++
++ nents = (size + offset_in_page + PAGE_SIZE - 1) / PAGE_SIZE;
++
++ copy_size = min_t(uint64_t, size, PAGE_SIZE - offset_in_page);
++ *copied = 0;
++
++ if (nents > MAX_PP_STACK_COUNT) {
++ /* For reliability kmalloc only 2 pages worth */
++ process_pages = kmalloc(min_t(size_t, MAX_KMALLOC_PAGES,
++ sizeof(struct pages *)*nents),
++ GFP_KERNEL);
++
++ if (!process_pages)
++ return -ENOMEM;
++ }
++
++ while (nents && to_copy) {
++ nl = min_t(unsigned int, MAX_PP_KMALLOC_COUNT, nents);
++ locked = 1;
++ down_read(&ri->mm->mmap_sem);
++ nl = get_user_pages_remote(ri->task, ri->mm, rva, nl,
++ flags, process_pages, NULL,
++ &locked);
++ if (locked)
++ up_read(&ri->mm->mmap_sem);
++ if (nl <= 0) {
++ pr_err("CMA: Invalid virtual address 0x%lx\n", rva);
++ ret = -EFAULT;
++ break;
++ }
++
++ for (i = 0; i < nl; i++) {
++ unsigned int n;
++ void *kaddr = kmap_atomic(process_pages[i]);
++
++ if (cma_write) {
++ n = copy_from_user(kaddr+offset_in_page,
++ (void *)lva, copy_size);
++ set_page_dirty(process_pages[i]);
++ } else {
++ n = copy_to_user((void *)lva,
++ kaddr+offset_in_page,
++ copy_size);
++ }
++ kunmap_atomic(kaddr);
++ if (n) {
++ ret = -EFAULT;
++ break;
++ }
++ to_copy -= copy_size;
++ if (!to_copy)
++ break;
++ lva += copy_size;
++ rva += (copy_size + offset_in_page);
++ WARN_ONCE(rva & (PAGE_SIZE - 1),
++ "CMA: Error in remote VA computation");
++ offset_in_page = 0;
++ copy_size = min_t(uint64_t, to_copy, PAGE_SIZE);
++ }
++
++ for (i = 0; i < nl; i++)
++ put_page(process_pages[i]);
++
++ if (ret)
++ break;
++ nents -= nl;
++ }
++
++ if (process_pages != pp_stack)
++ kfree(process_pages);
++
++ *copied = (size - to_copy);
++ return ret;
++
++}
++
++/* Copies @size bytes from si->cur_bo to di->cur_bo starting at their
++ * respective offset.
++ * @si: Source iter
++ * @di: Dest. iter
++ * @cma_write: Indicates if it is write to remote or read from remote
++ * @size: amount of bytes to be copied
++ * @f: Return the last fence if any
++ * @copied: Return number of bytes actually copied.
++ */
++static int kfd_copy_bos(struct cma_iter *si, struct cma_iter *di,
++ int cma_write, uint64_t size,
++ struct dma_fence **f, uint64_t *copied)
++{
++ int err = 0;
++ struct kfd_bo *dst_bo = di->cur_bo, *src_bo = si->cur_bo;
++ uint64_t src_offset = si->bo_offset, dst_offset = di->bo_offset;
++ struct kgd_mem *src_mem = src_bo->mem, *dst_mem = dst_bo->mem;
++
++ *copied = 0;
++ if (f)
++ *f = NULL;
++ if (src_bo->cpuva && dst_bo->cpuva)
++ return kfd_copy_userptr_bos(si, di, cma_write, size, copied);
++
++ if (src_bo->dev->kgd != dst_bo->dev->kgd) {
++ pr_err("CMA %d fail. Not same dev\n", cma_write);
++ err = -EINVAL;
++ }
++
++ err = dst_bo->dev->kfd2kgd->copy_mem_to_mem(src_bo->dev->kgd, src_mem,
++ src_offset, dst_mem,
++ dst_offset, size, f,
++ copied);
++
++ return err;
++}
++
+ /* Copy single range from source iterator @si to destination iterator @di.
+ * @si will move to next range and @di will move by bytes copied.
+ * @return : 0 for success or -ve for failure
+@@ -1829,11 +1992,9 @@ static int kfd_copy_single_range(struct cma_iter *si, struct cma_iter *di,
+ return -EINVAL;
+ }
+
+- err = dst_bo->dev->kfd2kgd->copy_mem_to_mem(src_bo->dev->kgd,
+- src_bo->mem, si->bo_offset, dst_bo->mem, di->bo_offset,
+- copy_size, &fence, &n);
++ err = kfd_copy_bos(si, di, cma_write, copy_size, &fence, &n);
+ if (err) {
+- pr_err("GPU CMA %d failed\n", err);
++ pr_err("CMA %d failed\n", err);
+ break;
+ }
+
+@@ -1942,11 +2103,11 @@ static int kfd_ioctl_cross_memory_copy(struct file *filep,
+ cma_op = "WRITE";
+ pr_debug("CMA WRITE: local -> remote\n");
+ err = kfd_cma_iter_init(dst_array, args->dst_mem_array_size,
+- remote_p, &di);
++ remote_p, remote_mm, remote_task, &di);
+ if (err)
+ goto kfd_process_fail;
+ err = kfd_cma_iter_init(src_array, args->src_mem_array_size,
+- local_p, &si);
++ local_p, current->mm, current, &si);
+ if (err)
+ goto kfd_process_fail;
+ } else {
+@@ -1954,11 +2115,11 @@ static int kfd_ioctl_cross_memory_copy(struct file *filep,
+ pr_debug("CMA READ: remote -> local\n");
+
+ err = kfd_cma_iter_init(dst_array, args->dst_mem_array_size,
+- local_p, &di);
++ local_p, current->mm, current, &di);
+ if (err)
+ goto kfd_process_fail;
+ err = kfd_cma_iter_init(src_array, args->src_mem_array_size,
+- remote_p, &si);
++ remote_p, remote_mm, remote_task, &si);
+ if (err)
+ goto kfd_process_fail;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index facd9d9..2744154 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -316,6 +316,8 @@ struct cma_iter {
+ /* offset into the entry pointed by cma_iter.array */
+ unsigned long offset;
+ struct kfd_process *p;
++ struct mm_struct *mm;
++ struct task_struct *task;
+ /* current kfd_bo associated with cma_iter.array.va_addr */
+ struct kfd_bo *cur_bo;
+ /* offset w.r.t cur_bo */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4163-drm-amdgpu-kfd2kgd-Support-BO-create-from-sg.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4163-drm-amdgpu-kfd2kgd-Support-BO-create-from-sg.patch
new file mode 100644
index 00000000..1a14d4dc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4163-drm-amdgpu-kfd2kgd-Support-BO-create-from-sg.patch
@@ -0,0 +1,141 @@
+From 911066d0a097bd5cee306247cb953fb4359add37 Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Tue, 27 Mar 2018 11:28:55 -0400
+Subject: [PATCH 4163/5725] drm/amdgpu: kfd2kgd: Support BO create from sg
+
+Change-Id: I3d50a285f6c5645995dcd45b66129fb8837f2bd4
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 23 +++++++++++++++++++----
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 3 ++-
+ drivers/gpu/drm/amd/include/kgd_kfd_interface.h | 2 +-
+ 5 files changed, 24 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+index 63dbe3c..5c785ac 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+@@ -181,7 +181,7 @@ void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm);
+ uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm);
+ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+ struct kgd_dev *kgd, uint64_t va, uint64_t size,
+- void *vm, struct kgd_mem **mem,
++ void *vm, struct sg_table *sg, struct kgd_mem **mem,
+ uint64_t *offset, uint32_t flags);
+ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
+ struct kgd_dev *kgd, struct kgd_mem *mem);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index b3112fb..83ed761 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -911,6 +911,17 @@ static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
+ return sg;
+ }
+
++static bool check_sg_size(struct sg_table *sgt, uint64_t size)
++{
++ unsigned int count;
++ struct scatterlist *sg;
++
++ for_each_sg(sgt->sgl, sg, sgt->nents, count)
++ size -= sg->length;
++
++ return (size == 0);
++}
++
+ static int process_validate_vms(struct amdkfd_process_info *process_info)
+ {
+ struct amdgpu_vm *peer_vm;
+@@ -1158,13 +1169,12 @@ uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
+
+ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+ struct kgd_dev *kgd, uint64_t va, uint64_t size,
+- void *vm, struct kgd_mem **mem,
++ void *vm, struct sg_table *sg, struct kgd_mem **mem,
+ uint64_t *offset, uint32_t flags)
+ {
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+ uint64_t user_addr = 0;
+- struct sg_table *sg = NULL;
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct amdgpu_bo *bo;
+ int byte_align;
+@@ -1185,6 +1195,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+ } else if (flags & ALLOC_MEM_FLAGS_GTT) {
+ domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
+ alloc_flags = 0;
++ if (sg && !check_sg_size(sg, size))
++ return -EINVAL;
+ } else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
+ domain = AMDGPU_GEM_DOMAIN_GTT;
+ alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
+@@ -1194,18 +1206,21 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+ user_addr = *offset;
+ } else if (flags & ALLOC_MEM_FLAGS_DOORBELL) {
+ domain = AMDGPU_GEM_DOMAIN_GTT;
+- alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
+ alloc_flags = 0;
+ if (size > UINT_MAX)
+ return -EINVAL;
++ WARN_ON(sg);
+ sg = create_doorbell_sg(*offset, size);
+ if (!sg)
+ return -ENOMEM;
+- bo_type = ttm_bo_type_sg;
+ } else {
+ return -EINVAL;
+ }
+
++ if (sg) {
++ alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
++ bo_type = ttm_bo_type_sg;
++ }
+ *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
+ if (!*mem) {
+ ret = -ENOMEM;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index bd09647..1a35938 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1301,7 +1301,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+
+ err = dev->kfd2kgd->alloc_memory_of_gpu(
+ dev->kgd, args->va_addr, args->size,
+- pdd->vm, (struct kgd_mem **) &mem, &offset,
++ pdd->vm, NULL, (struct kgd_mem **) &mem, &offset,
+ flags);
+
+ if (err)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 5c6f124..ef71670 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -120,7 +120,8 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
+ int err;
+
+ err = kdev->kfd2kgd->alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
+- pdd->vm, &mem, NULL, flags);
++ pdd->vm, NULL, &mem, NULL,
++ flags);
+ if (err)
+ goto err_alloc_mem;
+
+diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+index e164abb..5060052 100644
+--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+@@ -345,7 +345,7 @@ struct kfd2kgd_calls {
+ int (*sync_memory)(struct kgd_dev *kgd, struct kgd_mem *mem, bool intr);
+
+ int (*alloc_memory_of_gpu)(struct kgd_dev *kgd, uint64_t va,
+- uint64_t size, void *vm,
++ uint64_t size, void *vm, struct sg_table *sg,
+ struct kgd_mem **mem, uint64_t *offset,
+ uint32_t flags);
+ int (*free_memory_of_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4164-drm-amdgpu-CMA-Validate-BOs-before-use.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4164-drm-amdgpu-CMA-Validate-BOs-before-use.patch
new file mode 100644
index 00000000..9b2bada5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4164-drm-amdgpu-CMA-Validate-BOs-before-use.patch
@@ -0,0 +1,53 @@
+From 7d3142e579c36a3d54034bee2d0f4ad1e52e06cd Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Mon, 26 Feb 2018 18:20:23 -0500
+Subject: [PATCH 4164/5725] drm/amdgpu: CMA: Validate BOs before use
+
+The CMA copy is submitted in Kernel mode. Before submitting the command
+ensure that the BOs are validated.
+
+Change-Id: I1ca03934486eac32d0947654e727439d13f20b8e
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 83ed761..90e98c9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -2439,6 +2439,23 @@ int amdgpu_amdkfd_copy_mem_to_mem(struct kgd_dev *kgd, struct kgd_mem *src_mem,
+ return r;
+ }
+
++ /* The process to which the Source and Dest BOs belong to could be
++ * evicted and the BOs invalidated. So validate BOs before use
++ */
++ r = amdgpu_amdkfd_bo_validate(src_mem->bo, src_mem->domain, false);
++ if (r) {
++ pr_err("CMA fail: SRC BO validate failed %d\n", r);
++ goto validate_fail;
++ }
++
++
++ r = amdgpu_amdkfd_bo_validate(dst_mem->bo, dst_mem->domain, false);
++ if (r) {
++ pr_err("CMA fail: DST BO validate failed %d\n", r);
++ goto validate_fail;
++ }
++
++
+ r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst, size, NULL,
+ &fence);
+ if (r)
+@@ -2453,6 +2470,7 @@ int amdgpu_amdkfd_copy_mem_to_mem(struct kgd_dev *kgd, struct kgd_mem *src_mem,
+ *f = dma_fence_get(fence);
+ dma_fence_put(fence);
+
++validate_fail:
+ ttm_eu_backoff_reservation(&ticket, &list);
+ return r;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4165-drm-amdkfd-CMA-Use-shadow-system-BO-for-userptr.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4165-drm-amdkfd-CMA-Use-shadow-system-BO-for-userptr.patch
new file mode 100644
index 00000000..9aaeb05c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4165-drm-amdkfd-CMA-Use-shadow-system-BO-for-userptr.patch
@@ -0,0 +1,311 @@
+From e3660ab379356393311bf7d36234d71012b61f0d Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Tue, 27 Mar 2018 14:36:18 -0400
+Subject: [PATCH 4165/5725] drm/amdkfd: CMA: Use shadow system BO for userptr
+
+userptrs BO could be evicted during CMA operations. If one of the BO
+involved is a userptr, then a shadow BO is created using its underlying
+pages. A sg table is created by pinning the backing system pages and
+system BO is created using this sg table. This temporary BO is used for
+the copy operation.
+
+v2: get_user_pages() could return less than requrested pages. Handle
+this condition
+
+Change-Id: Ied26bb481bfa8bb5b488f46f94451477b45746e0
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 216 ++++++++++++++++++++++++++++++-
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 13 ++
+ 2 files changed, 227 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 1a35938..a242208 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1721,6 +1721,187 @@ static int kfd_ioctl_ipc_import_handle(struct file *filep,
+ #define MAX_KMALLOC_PAGES (PAGE_SIZE * 2)
+ #define MAX_PP_KMALLOC_COUNT (MAX_KMALLOC_PAGES/sizeof(struct page *))
+
++static void kfd_put_sg_table(struct sg_table *sg)
++{
++ unsigned int i;
++ struct scatterlist *s;
++
++ for_each_sg(sg->sgl, s, sg->nents, i)
++ put_page(sg_page(s));
++}
++
++
++/* Create a sg table for the given userptr BO by pinning its system pages
++ * @bo: userptr BO
++ * @offset: Offset into BO
++ * @mm/@task: mm_struct & task_struct of the process that holds the BO
++ * @size: in/out: desired size / actual size which could be smaller
++ * @sg_size: out: Size of sg table. This is ALIGN_UP(@size)
++ * @ret_sg: out sg table
++ */
++static int kfd_create_sg_table_from_userptr_bo(struct kfd_bo *bo,
++ int64_t offset, int cma_write,
++ struct mm_struct *mm,
++ struct task_struct *task,
++ uint64_t *size,
++ uint64_t *sg_size,
++ struct sg_table **ret_sg)
++{
++ int ret, locked = 1;
++ struct sg_table *sg = NULL;
++ unsigned int i, offset_in_page, flags = 0;
++ unsigned long nents, n;
++ unsigned long pa = (bo->cpuva + offset) & PAGE_MASK;
++ unsigned int cur_page = 0;
++ struct scatterlist *s;
++ uint64_t sz = *size;
++ struct page **process_pages;
++
++ *sg_size = 0;
++ sg = kmalloc(sizeof(*sg), GFP_KERNEL);
++ if (!sg)
++ return -ENOMEM;
++
++ offset_in_page = offset & (PAGE_SIZE - 1);
++ nents = (sz + offset_in_page + PAGE_SIZE - 1) / PAGE_SIZE;
++
++ ret = sg_alloc_table(sg, nents, GFP_KERNEL);
++ if (unlikely(ret)) {
++ ret = -ENOMEM;
++ goto sg_alloc_fail;
++ }
++ process_pages = kmalloc_array(nents, sizeof(struct pages *),
++ GFP_KERNEL);
++ if (!process_pages) {
++ ret = -ENOMEM;
++ goto page_alloc_fail;
++ }
++
++ if (cma_write)
++ flags = FOLL_WRITE;
++ locked = 1;
++ down_read(&mm->mmap_sem);
++ n = get_user_pages_remote(task, mm, pa, nents, flags, process_pages,
++ NULL, &locked);
++ if (locked)
++ up_read(&mm->mmap_sem);
++ if (n <= 0) {
++ pr_err("CMA: Invalid virtual address 0x%lx\n", pa);
++ ret = -EFAULT;
++ goto get_user_fail;
++ }
++ if (n != nents) {
++ /* Pages pinned < requested. Set the size accordingly */
++ *size = (n * PAGE_SIZE) - offset_in_page;
++ pr_debug("Requested %lx but pinned %lx\n", nents, n);
++ }
++
++ sz = 0;
++ for_each_sg(sg->sgl, s, n, i) {
++ sg_set_page(s, process_pages[cur_page], PAGE_SIZE,
++ offset_in_page);
++ sg_dma_address(s) = page_to_phys(process_pages[cur_page]);
++ offset_in_page = 0;
++ cur_page++;
++ sz += PAGE_SIZE;
++ }
++ *ret_sg = sg;
++ *sg_size = sz;
++
++ kfree(process_pages);
++ return 0;
++
++get_user_fail:
++ kfree(process_pages);
++page_alloc_fail:
++ sg_free_table(sg);
++sg_alloc_fail:
++ kfree(sg);
++ return ret;
++}
++
++static void kfd_free_cma_bos(struct cma_iter *ci)
++{
++ struct cma_system_bo *cma_bo, *tmp;
++
++ list_for_each_entry_safe(cma_bo, tmp, &ci->cma_list, list) {
++ struct kfd_dev *dev = cma_bo->dev;
++
++ /* sg table is deleted by free_memory_of_gpu */
++ kfd_put_sg_table(cma_bo->sg);
++ dev->kfd2kgd->free_memory_of_gpu(dev->kgd, cma_bo->mem);
++ list_del(&cma_bo->list);
++ kfree(cma_bo);
++ }
++}
++
++/* Create a system BO by pinning underlying system pages of the given userptr
++ * BO @ubo
++ * @ubo: Userptr BO
++ * @offset: Offset into ubo
++ * @size: in/out: The size of the new BO could be less than requested if all
++ * the pages couldn't be pinned. This would be reflected in @size
++ * @mm/@task: mm/task to which @ubo belongs to
++ * @cma_bo: out: new system BO
++ */
++static int kfd_create_cma_system_bo(struct kfd_dev *kdev, struct kfd_bo *ubo,
++ uint64_t *size, uint64_t offset,
++ int cma_write, struct kfd_process *p,
++ struct mm_struct *mm,
++ struct task_struct *task,
++ struct cma_system_bo **cma_bo)
++{
++ int ret;
++ struct kfd_process_device *pdd = NULL;
++ struct cma_system_bo *cbo;
++ uint64_t sg_size;
++
++ uint32_t flags = ALLOC_MEM_FLAGS_GTT | ALLOC_MEM_FLAGS_NONPAGED |
++ ALLOC_MEM_FLAGS_NO_SUBSTITUTE;
++
++ *cma_bo = NULL;
++ cbo = kzalloc(sizeof(**cma_bo), GFP_KERNEL);
++ if (!cbo)
++ return -ENOMEM;
++
++ INIT_LIST_HEAD(&cbo->list);
++ ret = kfd_create_sg_table_from_userptr_bo(ubo, offset, cma_write, mm,
++ task, size, &sg_size,
++ &cbo->sg);
++ if (ret) {
++ pr_err("Failed to create system BO. sg table error %d\n", ret);
++ return ret;
++ }
++
++ mutex_lock(&p->mutex);
++ pdd = kfd_get_process_device_data(kdev, p);
++ if (!pdd) {
++ pr_err("Process device data doesn't exist\n");
++ ret = -EINVAL;
++ goto pdd_fail;
++ }
++
++ ret = kdev->kfd2kgd->alloc_memory_of_gpu(kdev->kgd, 0ULL, sg_size,
++ pdd->vm, cbo->sg,
++ &cbo->mem, NULL, flags);
++ if (ret) {
++ pr_err("Failed to create shadow system BO %d\n", ret);
++ goto pdd_fail;
++ }
++ mutex_unlock(&p->mutex);
++ cbo->dev = kdev;
++ *cma_bo = cbo;
++
++ return ret;
++
++pdd_fail:
++ mutex_unlock(&p->mutex);
++ kfd_put_sg_table(cbo->sg);
++ sg_free_table(cbo->sg);
++ kfree(cbo->sg);
++ return ret;
++}
++
+ /* Update cma_iter.cur_bo with KFD BO that is assocaited with
+ * cma_iter.array.va_addr
+ */
+@@ -1779,6 +1960,7 @@ static int kfd_cma_iter_init(struct kfd_memory_range *arr, unsigned long segs,
+ return -EINVAL;
+
+ memset(ci, 0, sizeof(*ci));
++ INIT_LIST_HEAD(&ci->cma_list);
+ ci->array = arr;
+ ci->nr_segs = segs;
+ ci->p = p;
+@@ -1945,16 +2127,43 @@ static int kfd_copy_bos(struct cma_iter *si, struct cma_iter *di,
+ if (src_bo->cpuva && dst_bo->cpuva)
+ return kfd_copy_userptr_bos(si, di, cma_write, size, copied);
+
+- if (src_bo->dev->kgd != dst_bo->dev->kgd) {
++ /* If either source or dest. is userptr, create a shadow system BO
++ * by using the underlying userptr BO pages. Then use this shadow
++ * BO for copy. src_offset & dst_offset are adjusted because the new BO
++ * is only created for the window (offset, size) requested.
++ * The BOs are stored in cma_list for deferred cleanup. This minimizes
++ * fence waiting just to the last fence.
++ */
++ if (src_bo->cpuva) {
++ err = kfd_create_cma_system_bo(dst_bo->dev, src_bo, &size,
++ si->bo_offset, cma_write,
++ si->p, si->mm, si->task,
++ &si->cma_bo);
++ src_mem = si->cma_bo->mem;
++ src_offset = si->bo_offset & (PAGE_SIZE - 1);
++ list_add_tail(&si->cma_bo->list, &si->cma_list);
++ } else if (dst_bo->cpuva) {
++ err = kfd_create_cma_system_bo(src_bo->dev, dst_bo, &size,
++ di->bo_offset, cma_write,
++ di->p, di->mm, di->task,
++ &di->cma_bo);
++ dst_mem = di->cma_bo->mem;
++ dst_offset = di->bo_offset & (PAGE_SIZE - 1);
++ list_add_tail(&di->cma_bo->list, &di->cma_list);
++ } else if (src_bo->dev->kgd != dst_bo->dev->kgd) {
+ pr_err("CMA %d fail. Not same dev\n", cma_write);
+ err = -EINVAL;
+ }
+
++ if (err) {
++ pr_err("Failed to create system BO %d", err);
++ err = -EINVAL;
++ }
++
+ err = dst_bo->dev->kfd2kgd->copy_mem_to_mem(src_bo->dev->kgd, src_mem,
+ src_offset, dst_mem,
+ dst_offset, size, f,
+ copied);
+-
+ return err;
+ }
+
+@@ -2156,6 +2365,9 @@ static int kfd_ioctl_cross_memory_copy(struct file *filep,
+ dma_fence_put(lfence);
+ }
+
++ kfd_free_cma_bos(&si);
++ kfd_free_cma_bos(&di);
++
+ kfd_process_fail:
+ mmput(remote_mm);
+ mm_access_fail:
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 2744154..cbb65b0 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -305,6 +305,13 @@ struct kfd_bo {
+ uint64_t cpuva;
+ };
+
++struct cma_system_bo {
++ struct kgd_mem *mem;
++ struct sg_table *sg;
++ struct kfd_dev *dev;
++ struct list_head list;
++};
++
+ /* Similar to iov_iter */
+ struct cma_iter {
+ /* points to current entry of range array */
+@@ -322,6 +329,12 @@ struct cma_iter {
+ struct kfd_bo *cur_bo;
+ /* offset w.r.t cur_bo */
+ unsigned long bo_offset;
++ /* If cur_bo is a userptr BO, then a shadow system BO is created
++ * using its underlying pages. cma_bo holds this BO. cma_list is a
++ * list cma_bos created in one session
++ */
++ struct cma_system_bo *cma_bo;
++ struct list_head cma_list;
+ };
+
+ /* KGD2KFD callbacks */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4166-Fix-SVM-missing-on-Raven.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4166-Fix-SVM-missing-on-Raven.patch
new file mode 100644
index 00000000..f5ef7205
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4166-Fix-SVM-missing-on-Raven.patch
@@ -0,0 +1,85 @@
+From 709b7d9804f9a25e771b3aed96902313ca65b8ca Mon Sep 17 00:00:00 2001
+From: Yong Zhao <yong.zhao@amd.com>
+Date: Thu, 5 Apr 2018 15:37:09 -0400
+Subject: [PATCH 4166/5725] Fix SVM missing on Raven
+
+gpuvm_base and gpuvm_limit are used in Thunk to reserve SVM, but we
+accidentally set them as 0, resulting in no SVM on Raven. To fix that,
+we set both the value the same as on Vega10.
+
+As part of the fix, we moved GPUVM aperture initialization into
+ASIC-specific kfd_init_apertures_* functions for all ASICs.
+
+Fix: SWDEV-149576
+
+Change-Id: I76ab262900ed8880944b755080f93dca5c8ea8bb
+Signed-off-by: Yong Zhao <yong.zhao@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | 36 +++++++++++++++++++++-------
+ 1 file changed, 28 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+index 2c00711..5672710 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+@@ -332,9 +332,22 @@ void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
+ pdd->lds_base = MAKE_LDS_APP_BASE_VI();
+ pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
+
+- pdd->gpuvm_base = MAKE_GPUVM_APP_BASE_VI(id + 1);
+- pdd->gpuvm_limit = MAKE_GPUVM_APP_LIMIT(
+- pdd->gpuvm_base, pdd->dev->shared_resources.gpuvm_size);
++ if (!pdd->dev->device_info->needs_iommu_device) {
++ /* dGPUs: SVM aperture starting at 0
++ * with small reserved space for kernel.
++ * Set them to CANONICAL addresses.
++ */
++ pdd->gpuvm_base = SVM_USER_BASE;
++ pdd->gpuvm_limit =
++ pdd->dev->shared_resources.gpuvm_size - 1;
++ } else {
++ /* set them to non CANONICAL addresses, and no SVM is
++ * allocated.
++ */
++ pdd->gpuvm_base = MAKE_GPUVM_APP_BASE_VI(id + 1);
++ pdd->gpuvm_limit = MAKE_GPUVM_APP_LIMIT(pdd->gpuvm_base,
++ pdd->dev->shared_resources.gpuvm_size);
++ }
+
+ pdd->scratch_base = MAKE_SCRATCH_APP_BASE_VI();
+ pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
+@@ -345,6 +358,16 @@ void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id)
+ pdd->lds_base = MAKE_LDS_APP_BASE_V9();
+ pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
+
++ /* Raven needs SVM to support graphic handle, etc. Leave the small
++ * reserved space before SVM on Raven as well, even though we don't
++ * have to.
++ * Set gpuvm_base and gpuvm_limit to CANONICAL addresses so that they
++ * are used in Thunk to reserve SVM.
++ */
++ pdd->gpuvm_base = SVM_USER_BASE;
++ pdd->gpuvm_limit =
++ pdd->dev->shared_resources.gpuvm_size - 1;
++
+ pdd->scratch_base = MAKE_SCRATCH_APP_BASE_V9();
+ pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
+ }
+@@ -397,12 +420,9 @@ int kfd_init_apertures(struct kfd_process *process)
+ }
+
+ if (!dev->device_info->needs_iommu_device) {
+- /* dGPUs: SVM aperture starting at 0
+- * with small reserved space for kernel
++ /* dGPUs: the reserved space for kernel
++ * before SVM
+ */
+- pdd->gpuvm_base = SVM_USER_BASE;
+- pdd->gpuvm_limit =
+- dev->shared_resources.gpuvm_size - 1;
+ pdd->qpd.cwsr_base = SVM_CWSR_BASE;
+ pdd->qpd.ib_base = SVM_IB_BASE;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4167-drm-amdkfd-Implement-SPI-debug-and-exception-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4167-drm-amdkfd-Implement-SPI-debug-and-exception-support.patch
new file mode 100644
index 00000000..96b4ea2d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4167-drm-amdkfd-Implement-SPI-debug-and-exception-support.patch
@@ -0,0 +1,587 @@
+From f1f7d96ba6f0de5623f489c3d28a22c3f95b5707 Mon Sep 17 00:00:00 2001
+From: Jay Cornwall <Jay.Cornwall@amd.com>
+Date: Tue, 3 Apr 2018 18:41:50 -0500
+Subject: [PATCH 4167/5725] drm/amdkfd: Implement SPI debug and exception
+ support in gfx9 trap handler
+
+The SPI can be configured to populate trap temporary SGPRs with data
+specific to individual wavefronts. These SGPRs are currently trashed
+by the context save/restore handler and trap/exception handler.
+
+- Shuffle some ttmp register usage to preserve SPI debug data
+- Save/restore SPI debug ttmps 6-11 and 13-15 in context save area
+- Propagate exceptions to second-level trap handler
+- Modify second-level jump protocol to preserve SPI debug ttmps
+- Defer VGPR XNACK mask save until VGPR save, clear mask before using
+- Save/restore scalar XNACK state
+
+Change-Id: I7699ea7a0e61b32c532e50c26a3e24976660960f
+Signed-off-by: Jay Cornwall <Jay.Cornwall@amd.com>
+---
+ .../gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 312 +++++++++++++--------
+ 1 file changed, 198 insertions(+), 114 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+index bd2957c..8ef6b44 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+@@ -122,11 +122,14 @@ var SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK = 0x800
+
+ var SQ_WAVE_IB_STS_RCNT_SHIFT = 16 //FIXME
+ var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT = 15 //FIXME
++var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK = 0x1F8000
+ var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG = 0x00007FFF //FIXME
+
+ var SQ_BUF_RSRC_WORD1_ATC_SHIFT = 24
+ var SQ_BUF_RSRC_WORD3_MTYPE_SHIFT = 27
+
++var TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT = 26 // bits [31:26] unused by SPI debug data
++var TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK = 0xFC000000
+
+ /* Save */
+ var S_SAVE_BUF_RSRC_WORD1_STRIDE = 0x00040000 //stride is 4 bytes
+@@ -151,7 +154,7 @@ var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3
+ var s_save_pc_hi = ttmp1
+ var s_save_exec_lo = ttmp2
+ var s_save_exec_hi = ttmp3
+-var s_save_status = ttmp4
++var s_save_tmp = ttmp4
+ var s_save_trapsts = ttmp5 //not really used until the end of the SAVE routine
+ var s_save_xnack_mask_lo = ttmp6
+ var s_save_xnack_mask_hi = ttmp7
+@@ -159,11 +162,12 @@ var s_save_buf_rsrc0 = ttmp8
+ var s_save_buf_rsrc1 = ttmp9
+ var s_save_buf_rsrc2 = ttmp10
+ var s_save_buf_rsrc3 = ttmp11
+-
++var s_save_status = ttmp12
+ var s_save_mem_offset = ttmp14
+ var s_save_alloc_size = s_save_trapsts //conflict
+-var s_save_tmp = s_save_buf_rsrc2 //shared with s_save_buf_rsrc2 (conflict: should not use mem access with s_save_tmp at the same time)
+ var s_save_m0 = ttmp15
++var s_save_ttmps_lo = s_save_tmp //no conflict
++var s_save_ttmps_hi = s_save_trapsts //no conflict
+
+ /* Restore */
+ var S_RESTORE_BUF_RSRC_WORD1_STRIDE = S_SAVE_BUF_RSRC_WORD1_STRIDE
+@@ -186,7 +190,7 @@ var s_restore_spi_init_hi = exec_hi
+
+ var s_restore_mem_offset = ttmp12
+ var s_restore_alloc_size = ttmp3
+-var s_restore_tmp = ttmp6
++var s_restore_tmp = ttmp2
+ var s_restore_mem_offset_save = s_restore_tmp //no conflict
+
+ var s_restore_m0 = s_restore_alloc_size //no conflict
+@@ -205,6 +209,8 @@ var s_restore_buf_rsrc0 = ttmp8
+ var s_restore_buf_rsrc1 = ttmp9
+ var s_restore_buf_rsrc2 = ttmp10
+ var s_restore_buf_rsrc3 = ttmp11
++var s_restore_ttmps_lo = s_restore_tmp //no conflict
++var s_restore_ttmps_hi = s_restore_alloc_size //no conflict
+
+ /**************************************************************************/
+ /* trap handler entry points */
+@@ -235,25 +241,25 @@ L_SKIP_RESTORE:
+ s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC
+ s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK //check whether this is for save
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+- s_and_b32 ttmp8, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK //check whether this is for save
++ s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK //check whether this is for save
+ s_cbranch_scc1 L_SAVE //this is the operation for save
+
+ // ********* Handle non-CWSR traps *******************
+ if (!EMU_RUN_HACK)
+ // Illegal instruction is a non-maskable exception which blocks context save.
+ // Halt the wavefront and return from the trap.
+- s_and_b32 ttmp8, s_save_trapsts, SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
++ s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
+ s_cbranch_scc1 L_HALT_WAVE
+
+ // If STATUS.MEM_VIOL is asserted then we cannot fetch from the TMA.
+ // Instead, halt the wavefront and return from the trap.
+- s_and_b32 ttmp8, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK
+- s_cbranch_scc0 L_NO_MEM_VIOL
++ s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK
++ s_cbranch_scc0 L_FETCH_2ND_TRAP
+
+ L_HALT_WAVE:
+ // If STATUS.HALT is set then this fault must come from SQC instruction fetch.
+ // We cannot prevent further faults so just terminate the wavefront.
+- s_and_b32 ttmp8, s_save_status, SQ_WAVE_STATUS_HALT_MASK
++ s_and_b32 ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+ s_cbranch_scc0 L_NOT_ALREADY_HALTED
+ s_endpgm
+ L_NOT_ALREADY_HALTED:
+@@ -264,19 +270,31 @@ L_NOT_ALREADY_HALTED:
+ s_sub_u32 ttmp0, ttmp0, 0x8
+ s_subb_u32 ttmp1, ttmp1, 0x0
+
+- s_branch L_EXCP_CASE
+-
+-L_NO_MEM_VIOL:
+- /* read tba and tma for next level trap handler, ttmp4 is used as s_save_status */
+- s_getreg_b32 ttmp14,hwreg(HW_REG_SQ_SHADER_TMA_LO)
+- s_getreg_b32 ttmp15,hwreg(HW_REG_SQ_SHADER_TMA_HI)
+- s_lshl_b64 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8
+- s_load_dwordx4 [ttmp8, ttmp9, ttmp10, ttmp11], [ttmp14, ttmp15], 0
+- s_waitcnt lgkmcnt(0)
+- s_or_b32 ttmp7, ttmp8, ttmp9
+- s_cbranch_scc0 L_NO_NEXT_TRAP //next level trap handler not been set
+- s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //restore HW status(SCC)
+- s_setpc_b64 [ttmp8,ttmp9] //jump to next level trap handler
++L_FETCH_2ND_TRAP:
++ // Preserve and clear scalar XNACK state before issuing scalar reads.
++ // Save IB_STS.FIRST_REPLAY[15] and IB_STS.RCNT[20:16] into unused space ttmp11[31:26].
++ s_getreg_b32 ttmp2, hwreg(HW_REG_IB_STS)
++ s_and_b32 ttmp3, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
++ s_lshl_b32 ttmp3, ttmp3, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
++ s_andn2_b32 ttmp11, ttmp11, TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK
++ s_or_b32 ttmp11, ttmp11, ttmp3
++
++ s_andn2_b32 ttmp2, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
++ s_setreg_b32 hwreg(HW_REG_IB_STS), ttmp2
++
++ // Read second-level TBA/TMA from first-level TMA and jump if available.
++ // ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data)
++ // ttmp12 holds SQ_WAVE_STATUS
++ s_getreg_b32 ttmp4, hwreg(HW_REG_SQ_SHADER_TMA_LO)
++ s_getreg_b32 ttmp5, hwreg(HW_REG_SQ_SHADER_TMA_HI)
++ s_lshl_b64 [ttmp4, ttmp5], [ttmp4, ttmp5], 0x8
++ s_load_dwordx2 [ttmp2, ttmp3], [ttmp4, ttmp5], 0x0 glc:1 // second-level TBA
++ s_waitcnt lgkmcnt(0)
++ s_load_dwordx2 [ttmp4, ttmp5], [ttmp4, ttmp5], 0x8 glc:1 // second-level TMA
++ s_waitcnt lgkmcnt(0)
++ s_and_b64 [ttmp2, ttmp3], [ttmp2, ttmp3], [ttmp2, ttmp3]
++ s_cbranch_scc0 L_NO_NEXT_TRAP // second-level trap handler not been set
++ s_setpc_b64 [ttmp2, ttmp3] // jump to second-level trap handler
+
+ L_NO_NEXT_TRAP:
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+@@ -286,8 +304,18 @@ L_NO_NEXT_TRAP:
+ s_addc_u32 ttmp1, ttmp1, 0
+ L_EXCP_CASE:
+ s_and_b32 ttmp1, ttmp1, 0xFFFF
+- s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //restore HW status(SCC)
+- s_rfe_b64 [ttmp0, ttmp1]
++
++ // Restore SQ_WAVE_IB_STS.
++ s_lshr_b32 ttmp2, ttmp11, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
++ s_and_b32 ttmp2, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
++ s_setreg_b32 hwreg(HW_REG_IB_STS), ttmp2
++
++ // Restore SQ_WAVE_STATUS.
++ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
++ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
++ s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status
++
++ s_rfe_b64 [ttmp0, ttmp1]
+ end
+ // ********* End handling of non-CWSR traps *******************
+
+@@ -307,8 +335,6 @@ end
+ s_mov_b32 s_save_tmp, 0 //clear saveCtx bit
+ s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp //clear saveCtx bit
+
+- s_mov_b32 s_save_xnack_mask_lo, xnack_mask_lo //save XNACK_MASK
+- s_mov_b32 s_save_xnack_mask_hi, xnack_mask_hi //save XNACK must before any memory operation
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_RCNT_SHIFT, SQ_WAVE_IB_STS_RCNT_SIZE) //save RCNT
+ s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_RCNT_SHIFT
+ s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
+@@ -350,7 +376,6 @@ if G8SR_DEBUG_TIMESTAMP
+ s_waitcnt lgkmcnt(0)
+ end
+
+- /* setup Resource Contants */
+ if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_SINGLE_WAVE))
+ //calculate wd_addr using absolute thread id
+ v_readlane_b32 s_save_tmp, v9, 0
+@@ -368,7 +393,24 @@ end
+ else
+ end
+
++ // Save trap temporaries 6-11, 13-15 initialized by SPI debug dispatch logic
++ // ttmp SR memory offset : size(VGPR)+size(SGPR)+0x40
++ get_vgpr_size_bytes(s_save_ttmps_lo)
++ get_sgpr_size_bytes(s_save_ttmps_hi)
++ s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, s_save_ttmps_hi
++ s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, s_save_spi_init_lo
++ s_addc_u32 s_save_ttmps_hi, s_save_spi_init_hi, 0x0
++ s_and_b32 s_save_ttmps_hi, s_save_ttmps_hi, 0xFFFF
++ s_store_dwordx2 [ttmp6, ttmp7], [s_save_ttmps_lo, s_save_ttmps_hi], 0x40 glc:1
++ ack_sqc_store_workaround()
++ s_store_dwordx4 [ttmp8, ttmp9, ttmp10, ttmp11], [s_save_ttmps_lo, s_save_ttmps_hi], 0x48 glc:1
++ ack_sqc_store_workaround()
++ s_store_dword ttmp13, [s_save_ttmps_lo, s_save_ttmps_hi], 0x58 glc:1
++ ack_sqc_store_workaround()
++ s_store_dwordx2 [ttmp14, ttmp15], [s_save_ttmps_lo, s_save_ttmps_hi], 0x5C glc:1
++ ack_sqc_store_workaround()
+
++ /* setup Resource Contants */
+ s_mov_b32 s_save_buf_rsrc0, s_save_spi_init_lo //base_addr_lo
+ s_and_b32 s_save_buf_rsrc1, s_save_spi_init_hi, 0x0000FFFF //base_addr_hi
+ s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE
+@@ -425,8 +467,8 @@ end
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+ write_hwreg_to_mem(s_save_trapsts, s_save_buf_rsrc0, s_save_mem_offset) //TRAPSTS
+
+- write_hwreg_to_mem(s_save_xnack_mask_lo, s_save_buf_rsrc0, s_save_mem_offset) //XNACK_MASK_LO
+- write_hwreg_to_mem(s_save_xnack_mask_hi, s_save_buf_rsrc0, s_save_mem_offset) //XNACK_MASK_HI
++ write_hwreg_to_mem(xnack_mask_lo, s_save_buf_rsrc0, s_save_mem_offset) //XNACK_MASK_LO
++ write_hwreg_to_mem(xnack_mask_hi, s_save_buf_rsrc0, s_save_mem_offset) //XNACK_MASK_HI
+
+ //use s_save_tmp would introduce conflict here between s_save_tmp and s_save_buf_rsrc2
+ s_getreg_b32 s_save_m0, hwreg(HW_REG_MODE) //MODE
+@@ -502,6 +544,8 @@ end
+ s_mov_b32 s_save_mem_offset, 0
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_mov_b32 exec_hi, 0xFFFFFFFF
++ s_mov_b32 xnack_mask_lo, 0x0
++ s_mov_b32 xnack_mask_hi, 0x0
+
+ if (SWIZZLE_EN)
+ s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+@@ -1038,6 +1082,21 @@ end
+ s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE), s_restore_m0
+ //s_setreg_b32 hwreg(HW_REG_TRAPSTS), s_restore_trapsts //don't overwrite SAVECTX bit as it may be set through external SAVECTX during restore
+ s_setreg_b32 hwreg(HW_REG_MODE), s_restore_mode
++
++ // Restore trap temporaries 6-11, 13-15 initialized by SPI debug dispatch logic
++ // ttmp SR memory offset : size(VGPR)+size(SGPR)+0x40
++ get_vgpr_size_bytes(s_restore_ttmps_lo)
++ get_sgpr_size_bytes(s_restore_ttmps_hi)
++ s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_ttmps_hi
++ s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_buf_rsrc0
++ s_addc_u32 s_restore_ttmps_hi, s_restore_buf_rsrc1, 0x0
++ s_and_b32 s_restore_ttmps_hi, s_restore_ttmps_hi, 0xFFFF
++ s_load_dwordx2 [ttmp6, ttmp7], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x40 glc:1
++ s_load_dwordx4 [ttmp8, ttmp9, ttmp10, ttmp11], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x48 glc:1
++ s_load_dword ttmp13, [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x58 glc:1
++ s_load_dwordx2 [ttmp14, ttmp15], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x5C glc:1
++ s_waitcnt lgkmcnt(0)
++
+ //reuse s_restore_m0 as a temp register
+ s_and_b32 s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_RCNT_MASK
+ s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_RCNT_SHIFT
+@@ -1085,9 +1144,7 @@ function write_hwreg_to_mem(s, s_rsrc, s_mem_offset)
+ s_mov_b32 exec_lo, m0 //assuming exec_lo is not needed anymore from this point on
+ s_mov_b32 m0, s_mem_offset
+ s_buffer_store_dword s, s_rsrc, m0 glc:1
+-if ACK_SQC_STORE
+- s_waitcnt lgkmcnt(0)
+-end
++ ack_sqc_store_workaround()
+ s_add_u32 s_mem_offset, s_mem_offset, 4
+ s_mov_b32 m0, exec_lo
+ end
+@@ -1097,21 +1154,13 @@ end
+ function write_16sgpr_to_mem(s, s_rsrc, s_mem_offset)
+
+ s_buffer_store_dwordx4 s[0], s_rsrc, 0 glc:1
+-if ACK_SQC_STORE
+- s_waitcnt lgkmcnt(0)
+-end
++ ack_sqc_store_workaround()
+ s_buffer_store_dwordx4 s[4], s_rsrc, 16 glc:1
+-if ACK_SQC_STORE
+- s_waitcnt lgkmcnt(0)
+-end
++ ack_sqc_store_workaround()
+ s_buffer_store_dwordx4 s[8], s_rsrc, 32 glc:1
+-if ACK_SQC_STORE
+- s_waitcnt lgkmcnt(0)
+-end
++ ack_sqc_store_workaround()
+ s_buffer_store_dwordx4 s[12], s_rsrc, 48 glc:1
+-if ACK_SQC_STORE
+- s_waitcnt lgkmcnt(0)
+-end
++ ack_sqc_store_workaround()
+ s_add_u32 s_rsrc[0], s_rsrc[0], 4*16
+ s_addc_u32 s_rsrc[1], s_rsrc[1], 0x0 // +scc
+ end
+@@ -1151,56 +1200,80 @@ function get_hwreg_size_bytes
+ return 128 //HWREG size 128 bytes
+ end
+
++function ack_sqc_store_workaround
++ if ACK_SQC_STORE
++ s_waitcnt lgkmcnt(0)
++ end
++end
+
+
+ #endif
+
+ static const uint32_t cwsr_trap_gfx9_hex[] = {
+- 0xbf820001, 0xbf820130,
+- 0xb8f0f802, 0x89708670,
+- 0xb8f1f803, 0x8674ff71,
+- 0x00000400, 0xbf850023,
+- 0x8674ff71, 0x00000800,
+- 0xbf850003, 0x8674ff71,
+- 0x00000100, 0xbf840009,
+- 0x8674ff70, 0x00002000,
++ 0xbf820001, 0xbf820158,
++ 0xb8f8f802, 0x89788678,
++ 0xb8f1f803, 0x866eff71,
++ 0x00000400, 0xbf850034,
++ 0x866eff71, 0x00000800,
++ 0xbf850003, 0x866eff71,
++ 0x00000100, 0xbf840008,
++ 0x866eff78, 0x00002000,
+ 0xbf840001, 0xbf810000,
+- 0x8770ff70, 0x00002000,
++ 0x8778ff78, 0x00002000,
+ 0x80ec886c, 0x82ed806d,
+- 0xbf820010, 0xb8faf812,
+- 0xb8fbf813, 0x8efa887a,
+- 0xc00a1d3d, 0x00000000,
+- 0xbf8cc07f, 0x87737574,
+- 0xbf840002, 0xb970f802,
+- 0xbe801d74, 0xb8f1f803,
+- 0x8671ff71, 0x000001ff,
+- 0xbf850002, 0x806c846c,
+- 0x826d806d, 0x866dff6d,
+- 0x0000ffff, 0xb970f802,
+- 0xbe801f6c, 0x866dff6d,
+- 0x0000ffff, 0xbef60080,
+- 0xb9760283, 0xbef20068,
+- 0xbef30069, 0xb8f62407,
+- 0x8e769c76, 0x876d766d,
+- 0xb8f603c7, 0x8e769b76,
+- 0x876d766d, 0xb8f6f807,
+- 0x8676ff76, 0x00007fff,
+- 0xb976f807, 0xbeee007e,
+- 0xbeef007f, 0xbefe0180,
+- 0xbf900004, 0xbf8e0002,
+- 0xbf88fffe, 0xbef4007e,
++ 0xb8eef807, 0x866fff6e,
++ 0x001f8000, 0x8e6f8b6f,
++ 0x8977ff77, 0xfc000000,
++ 0x87776f77, 0x896eff6e,
++ 0x001f8000, 0xb96ef807,
++ 0xb8f0f812, 0xb8f1f813,
++ 0x8ef08870, 0xc0071bb8,
++ 0x00000000, 0xbf8cc07f,
++ 0xc0071c38, 0x00000008,
++ 0xbf8cc07f, 0x86ee6e6e,
++ 0xbf840001, 0xbe801d6e,
++ 0xb8f1f803, 0x8671ff71,
++ 0x000001ff, 0xbf850002,
++ 0x806c846c, 0x826d806d,
++ 0x866dff6d, 0x0000ffff,
++ 0x8f6e8b77, 0x866eff6e,
++ 0x001f8000, 0xb96ef807,
++ 0x86fe7e7e, 0x86ea6a6a,
++ 0xb978f802, 0xbe801f6c,
++ 0x866dff6d, 0x0000ffff,
++ 0xbef00080, 0xb9700283,
++ 0xb8f02407, 0x8e709c70,
++ 0x876d706d, 0xb8f003c7,
++ 0x8e709b70, 0x876d706d,
++ 0xb8f0f807, 0x8670ff70,
++ 0x00007fff, 0xb970f807,
++ 0xbeee007e, 0xbeef007f,
++ 0xbefe0180, 0xbf900004,
++ 0xbf8e0002, 0xbf88fffe,
++ 0xb8f02a05, 0x80708170,
++ 0x8e708a70, 0xb8f11605,
++ 0x80718171, 0x8e718671,
++ 0x80707170, 0x80707e70,
++ 0x8271807f, 0x8671ff71,
++ 0x0000ffff, 0xc0471cb8,
++ 0x00000040, 0xbf8cc07f,
++ 0xc04b1d38, 0x00000048,
++ 0xbf8cc07f, 0xc0431e78,
++ 0x00000058, 0xbf8cc07f,
++ 0xc0471eb8, 0x0000005c,
++ 0xbf8cc07f, 0xbef4007e,
+ 0x8675ff7f, 0x0000ffff,
+ 0x8775ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+- 0x00807fac, 0x8676ff7f,
+- 0x08000000, 0x8f768376,
+- 0x87777677, 0x8676ff7f,
+- 0x70000000, 0x8f768176,
+- 0x87777677, 0xbefb007c,
++ 0x00807fac, 0x8670ff7f,
++ 0x08000000, 0x8f708370,
++ 0x87777077, 0x8670ff7f,
++ 0x70000000, 0x8f708170,
++ 0x87777077, 0xbefb007c,
+ 0xbefa0080, 0xb8fa2a05,
+ 0x807a817a, 0x8e7a8a7a,
+- 0xb8f61605, 0x80768176,
+- 0x8e768676, 0x807a767a,
++ 0xb8f01605, 0x80708170,
++ 0x8e708670, 0x807a707a,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xbefe007c,
+ 0xbefc007a, 0xc0611efa,
+@@ -1221,26 +1294,26 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
+ 0x0000007c, 0xbf8cc07f,
+ 0x807a847a, 0xbefc007e,
+ 0xbefe007c, 0xbefc007a,
+- 0xc0611c3a, 0x0000007c,
++ 0xc0611e3a, 0x0000007c,
+ 0xbf8cc07f, 0x807a847a,
+ 0xbefc007e, 0xb8f1f803,
+ 0xbefe007c, 0xbefc007a,
+ 0xc0611c7a, 0x0000007c,
+ 0xbf8cc07f, 0x807a847a,
+ 0xbefc007e, 0xbefe007c,
+- 0xbefc007a, 0xc0611cba,
++ 0xbefc007a, 0xc0611a3a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x807a847a, 0xbefc007e,
+ 0xbefe007c, 0xbefc007a,
+- 0xc0611cfa, 0x0000007c,
++ 0xc0611a7a, 0x0000007c,
+ 0xbf8cc07f, 0x807a847a,
+ 0xbefc007e, 0xb8fbf801,
+ 0xbefe007c, 0xbefc007a,
+ 0xc0611efa, 0x0000007c,
+ 0xbf8cc07f, 0x807a847a,
+- 0xbefc007e, 0x8676ff7f,
++ 0xbefc007e, 0x8670ff7f,
+ 0x04000000, 0xbeef0080,
+- 0x876f6f76, 0xb8fa2a05,
++ 0x876f6f70, 0xb8fa2a05,
+ 0x807a817a, 0x8e7a8a7a,
+ 0xb8f11605, 0x80718171,
+ 0x8e718471, 0x8e768271,
+@@ -1262,6 +1335,7 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
+ 0xbf0a717c, 0xbf85ffe7,
+ 0xbef40172, 0xbefa0080,
+ 0xbefe00c1, 0xbeff00c1,
++ 0xbee80080, 0xbee90080,
+ 0xbef600ff, 0x01000000,
+ 0xe0724000, 0x7a1d0000,
+ 0xe0724100, 0x7a1d0100,
+@@ -1270,13 +1344,13 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
+ 0xbefe00c1, 0xbeff00c1,
+ 0xb8f14306, 0x8671c171,
+ 0xbf84002c, 0xbf8a0000,
+- 0x8676ff6f, 0x04000000,
++ 0x8670ff6f, 0x04000000,
+ 0xbf840028, 0x8e718671,
+ 0x8e718271, 0xbef60071,
+ 0xb8fa2a05, 0x807a817a,
+- 0x8e7a8a7a, 0xb8f61605,
+- 0x80768176, 0x8e768676,
+- 0x807a767a, 0x807aff7a,
++ 0x8e7a8a7a, 0xb8f01605,
++ 0x80708170, 0x8e708670,
++ 0x807a707a, 0x807aff7a,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xd28c0002, 0x000100c1,
+@@ -1308,24 +1382,24 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
+ 0x7a1d0300, 0x807c847c,
+ 0x807aff7a, 0x00000400,
+ 0xbf0a717c, 0xbf85ffef,
+- 0xbf9c0000, 0xbf8200c5,
++ 0xbf9c0000, 0xbf8200d9,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+- 0x8672ff7f, 0x08000000,
+- 0x8f728372, 0x87777277,
+- 0x8672ff7f, 0x70000000,
+- 0x8f728172, 0x87777277,
+- 0x8672ff7f, 0x04000000,
++ 0x866eff7f, 0x08000000,
++ 0x8f6e836e, 0x87776e77,
++ 0x866eff7f, 0x70000000,
++ 0x8f6e816e, 0x87776e77,
++ 0x866eff7f, 0x04000000,
+ 0xbf84001e, 0xbefe00c1,
+ 0xbeff00c1, 0xb8ef4306,
+ 0x866fc16f, 0xbf840019,
+ 0x8e6f866f, 0x8e6f826f,
+ 0xbef6006f, 0xb8f82a05,
+ 0x80788178, 0x8e788a78,
+- 0xb8f21605, 0x80728172,
+- 0x8e728672, 0x80787278,
++ 0xb8ee1605, 0x806e816e,
++ 0x8e6e866e, 0x80786e78,
+ 0x8078ff78, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0080, 0xe0510000,
+@@ -1338,7 +1412,7 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
+ 0xb8ef2a05, 0x806f816f,
+ 0x8e6f826f, 0x8e76886f,
+ 0xbef600ff, 0x01000000,
+- 0xbef20078, 0x8078ff78,
++ 0xbeee0078, 0x8078ff78,
+ 0x00000400, 0xbefc0084,
+ 0xbf11087c, 0x806fff6f,
+ 0x00008000, 0xe0524000,
+@@ -1351,14 +1425,14 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
+ 0x807c847c, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7c,
+ 0xbf85ffee, 0xbf9c0000,
+- 0xe0524000, 0x721d0000,
+- 0xe0524100, 0x721d0100,
+- 0xe0524200, 0x721d0200,
+- 0xe0524300, 0x721d0300,
++ 0xe0524000, 0x6e1d0000,
++ 0xe0524100, 0x6e1d0100,
++ 0xe0524200, 0x6e1d0200,
++ 0xe0524300, 0x6e1d0300,
+ 0xb8f82a05, 0x80788178,
+- 0x8e788a78, 0xb8f21605,
+- 0x80728172, 0x8e728672,
+- 0x80787278, 0x80f8c078,
++ 0x8e788a78, 0xb8ee1605,
++ 0x806e816e, 0x8e6e866e,
++ 0x80786e78, 0x80f8c078,
+ 0xb8ef1605, 0x806f816f,
+ 0x8e6f846f, 0x8e76826f,
+ 0xbef600ff, 0x01000000,
+@@ -1372,8 +1446,8 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
+ 0xbe8e2d0e, 0xbf06807c,
+ 0xbf84fff0, 0xb8f82a05,
+ 0x80788178, 0x8e788a78,
+- 0xb8f21605, 0x80728172,
+- 0x8e728672, 0x80787278,
++ 0xb8ee1605, 0x806e816e,
++ 0x8e6e866e, 0x80786e78,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xc0211bfa,
+ 0x00000078, 0x80788478,
+@@ -1397,14 +1471,24 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
+ 0x000003ff, 0xb96f4803,
+ 0x866f71ff, 0xfffff800,
+ 0x8f6f8b6f, 0xb96fa2c3,
+- 0xb973f801, 0x866fff6d,
++ 0xb973f801, 0xb8ee2a05,
++ 0x806e816e, 0x8e6e8a6e,
++ 0xb8ef1605, 0x806f816f,
++ 0x8e6f866f, 0x806e6f6e,
++ 0x806e746e, 0x826f8075,
++ 0x866fff6f, 0x0000ffff,
++ 0xc0071cb7, 0x00000040,
++ 0xc00b1d37, 0x00000048,
++ 0xc0031e77, 0x00000058,
++ 0xc0071eb7, 0x0000005c,
++ 0xbf8cc07f, 0x866fff6d,
+ 0xf0000000, 0x8f6f9c6f,
+- 0x8e6f906f, 0xbef20080,
+- 0x87726f72, 0x866fff6d,
++ 0x8e6f906f, 0xbeee0080,
++ 0x876e6f6e, 0x866fff6d,
+ 0x08000000, 0x8f6f9b6f,
+- 0x8e6f8f6f, 0x87726f72,
++ 0x8e6f8f6f, 0x876e6f6e,
+ 0x866fff70, 0x00800000,
+- 0x8f6f976f, 0xb972f807,
++ 0x8f6f976f, 0xb96ef807,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0xb970f802, 0xbf8a0000,
+ 0x95806f6c, 0xbf810000,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4168-drm-amd-powerplay-initialzie-the-dpm-intial-enabled-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4168-drm-amd-powerplay-initialzie-the-dpm-intial-enabled-.patch
new file mode 100644
index 00000000..8405fc30
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4168-drm-amd-powerplay-initialzie-the-dpm-intial-enabled-.patch
@@ -0,0 +1,102 @@
+From ee1009ddef4548d44487aec9e90bbbe7369b7540 Mon Sep 17 00:00:00 2001
+From: Kenneth Feng <kenneth.feng@amd.com>
+Date: Tue, 10 Apr 2018 17:05:36 +0800
+Subject: [PATCH 4168/5725] drm/amd/powerplay: initialzie the dpm intial
+ enabled state
+
+To expose the right dpm levels to the sysfs
+
+Change-Id: I4dc2209a6236834df387eb3d198ad77242d4c561
+Signed-off-by: Kenneth Feng <kenneth.feng@amd.com>
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index ce79b92..835d810 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -545,6 +545,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+ return -EINVAL);
+
+ dpm_table->dpm_levels[i].value = clock;
++ dpm_table->dpm_levels[i].enabled = true;
+ }
+
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+@@ -564,6 +565,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+ return -EINVAL);
+
+ dpm_table->dpm_levels[i].value = clock;
++ dpm_table->dpm_levels[i].enabled = true;
+ }
+
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+@@ -584,6 +586,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+ return -EINVAL);
+
+ dpm_table->dpm_levels[i].value = clock;
++ dpm_table->dpm_levels[i].enabled = true;
+ }
+
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+@@ -604,6 +607,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+ return -EINVAL);
+
+ dpm_table->dpm_levels[i].value = clock;
++ dpm_table->dpm_levels[i].enabled = true;
+ }
+
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+@@ -624,6 +628,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+ return -EINVAL);
+
+ dpm_table->dpm_levels[i].value = clock;
++ dpm_table->dpm_levels[i].enabled = true;
+ }
+
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+@@ -644,6 +649,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+ return -EINVAL);
+
+ dpm_table->dpm_levels[i].value = clock;
++ dpm_table->dpm_levels[i].enabled = true;
+ }
+
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+@@ -665,6 +671,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+ return -EINVAL);
+
+ dpm_table->dpm_levels[i].value = clock;
++ dpm_table->dpm_levels[i].enabled = true;
+ }
+
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+@@ -685,6 +692,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+ return -EINVAL);
+
+ dpm_table->dpm_levels[i].value = clock;
++ dpm_table->dpm_levels[i].enabled = true;
+ }
+
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+@@ -705,6 +713,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+ return -EINVAL);
+
+ dpm_table->dpm_levels[i].value = clock;
++ dpm_table->dpm_levels[i].enabled = true;
+ }
+
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+@@ -725,6 +734,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+ return -EINVAL);
+
+ dpm_table->dpm_levels[i].value = clock;
++ dpm_table->dpm_levels[i].enabled = true;
+ }
+
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4169-drm-amd-powerplay-Get-more-than-8-level-gfxclk-state.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4169-drm-amd-powerplay-Get-more-than-8-level-gfxclk-state.patch
new file mode 100644
index 00000000..2a0d8225
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4169-drm-amd-powerplay-Get-more-than-8-level-gfxclk-state.patch
@@ -0,0 +1,31 @@
+From f12d403a68ec1c93d775d9864893130a3ef1c42f Mon Sep 17 00:00:00 2001
+From: Kenneth Feng <kenneth.feng@amd.com>
+Date: Wed, 4 Apr 2018 15:17:22 +0800
+Subject: [PATCH 4169/5725] drm/amd/powerplay: Get more than 8 level gfxclk
+ states
+
+To apply on Vega12 for more than 8 gfx dpm levels
+
+Change-Id: I0a0e1e044b35d27a28a3145b2de365d3be6132cd
+Signed-off-by: Kenneth Feng <kenneth.feng@amd.com>
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+index bc98b1d..e81ded1 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+@@ -33,7 +33,7 @@
+ #define WaterMarksExist 1
+ #define WaterMarksLoaded 2
+
+-#define VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS 8
++#define VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS 16
+ #define VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS 8
+ #define VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS 8
+ #define VG12_PSUEDO_NUM_UCLK_DPM_LEVELS 4
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4170-amd-powerplay-implement-the-vega12_force_clock_level.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4170-amd-powerplay-implement-the-vega12_force_clock_level.patch
new file mode 100644
index 00000000..38fe6d81
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4170-amd-powerplay-implement-the-vega12_force_clock_level.patch
@@ -0,0 +1,80 @@
+From 0db89649363ff328da386101330377dc63d427ba Mon Sep 17 00:00:00 2001
+From: Kenneth Feng <kenneth.feng@amd.com>
+Date: Mon, 9 Apr 2018 14:53:51 +0800
+Subject: [PATCH 4170/5725] amd/powerplay: implement the
+ vega12_force_clock_level interface
+
+pp_dpm_sclk/pp_dpm_mclk in sysfs implemented to force
+gfxclk/uclk dpm level for Vega12
+
+Change-Id: I69816de5da21de4264d3e6b6ead2c8ed3e00d742
+Signed-off-by: Kenneth Feng <kenneth.feng@amd.com>
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 42 +++++++++++++++++++++-
+ 1 file changed, 41 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index 835d810..782e209 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -1001,15 +1001,55 @@ static uint32_t vega12_find_highest_dpm_level(
+
+ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
+ {
++ struct vega12_hwmgr *data = hwmgr->backend;
++ if (data->smc_state_table.gfx_boot_level !=
++ data->dpm_table.gfx_table.dpm_state.soft_min_level) {
++ smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SetSoftMinByFreq,
++ PPCLK_GFXCLK<<16 | data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_boot_level].value);
++ data->dpm_table.gfx_table.dpm_state.soft_min_level =
++ data->smc_state_table.gfx_boot_level;
++ }
++
++ if (data->smc_state_table.mem_boot_level !=
++ data->dpm_table.mem_table.dpm_state.soft_min_level) {
++ smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SetSoftMinByFreq,
++ PPCLK_UCLK<<16 | data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_boot_level].value);
++ data->dpm_table.mem_table.dpm_state.soft_min_level =
++ data->smc_state_table.mem_boot_level;
++ }
++
+ return 0;
++
+ }
+
+ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
+ {
++ struct vega12_hwmgr *data = hwmgr->backend;
++ if (data->smc_state_table.gfx_max_level !=
++ data->dpm_table.gfx_table.dpm_state.soft_max_level) {
++ smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SetSoftMaxByFreq,
++ /* plus the vale by 1 to align the resolution */
++ PPCLK_GFXCLK<<16 | (data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_max_level].value + 1));
++ data->dpm_table.gfx_table.dpm_state.soft_max_level =
++ data->smc_state_table.gfx_max_level;
++ }
++
++ if (data->smc_state_table.mem_max_level !=
++ data->dpm_table.mem_table.dpm_state.soft_max_level) {
++ smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SetSoftMaxByFreq,
++ /* plus the vale by 1 to align the resolution */
++ PPCLK_UCLK<<16 | (data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_max_level].value + 1));
++ data->dpm_table.mem_table.dpm_state.soft_max_level =
++ data->smc_state_table.mem_max_level;
++ }
++
+ return 0;
+ }
+
+-
+ int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
+ {
+ struct vega12_hwmgr *data =
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4171-drm-amd-display-Update-MST-edid-property-every-time.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4171-drm-amd-display-Update-MST-edid-property-every-time.patch
new file mode 100644
index 00000000..6bb5b0d7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4171-drm-amd-display-Update-MST-edid-property-every-time.patch
@@ -0,0 +1,44 @@
+From 89132ecfd25256cf80ec2502825a9c81baf8c6be Mon Sep 17 00:00:00 2001
+From: "Jerry (Fangzhi) Zuo" <Jerry.Zuo@amd.com>
+Date: Tue, 17 Apr 2018 15:24:49 -0400
+Subject: [PATCH 4171/5725] drm/amd/display: Update MST edid property every
+ time
+
+Extended fix to: "Don't read EDID in atomic_check"
+
+Fix display property not observed in GUI display after hot plug.
+
+Call drm_mode_connector_update_edid_property every time in
+.get_modes hook, due to the fact that edid property is getting
+removed from usermode ioctl DRM_IOCTL_MODE_GETCONNECTOR each time
+in hot unplug.
+
+Signed-off-by: Jerry (Fangzhi) Zuo <Jerry.Zuo@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index d30dc0c..e57f690 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -264,11 +264,11 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
+ if (aconnector->dc_sink)
+ amdgpu_dm_update_freesync_caps(
+ connector, edid);
+-
+- drm_mode_connector_update_edid_property(
+- &aconnector->base, edid);
+ }
+
++ drm_mode_connector_update_edid_property(
++ &aconnector->base, aconnector->edid);
++
+ ret = dm_connector_update_modes(connector, aconnector->edid);
+
+ return ret;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4172-drm-amd-display-Check-dc_sink-every-time-in-MST-hotp.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4172-drm-amd-display-Check-dc_sink-every-time-in-MST-hotp.patch
new file mode 100644
index 00000000..6f8502cb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4172-drm-amd-display-Check-dc_sink-every-time-in-MST-hotp.patch
@@ -0,0 +1,68 @@
+From ea179b97793cc28c2d7d4020d3c9ddd86b11bd28 Mon Sep 17 00:00:00 2001
+From: "Jerry (Fangzhi) Zuo" <Jerry.Zuo@amd.com>
+Date: Tue, 17 Apr 2018 15:36:15 -0400
+Subject: [PATCH 4172/5725] drm/amd/display: Check dc_sink every time in MST
+ hotplug
+
+Extended fix to: "Don't read EDID in atomic_check"
+
+Fix issue of missing dc_sink in .mode_valid in hot plug routine.
+
+Need to check dc_sink everytime in .get_modes hook after checking
+edid, since edid is not getting removed in hot unplug but dc_sink
+doesn't.
+
+Signed-off-by: Jerry (Fangzhi) Zuo <Jerry.Zuo@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index e57f690..a9b76fd 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -237,10 +237,6 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
+
+ if (!aconnector->edid) {
+ struct edid *edid;
+- struct dc_sink *dc_sink;
+- struct dc_sink_init_data init_params = {
+- .link = aconnector->dc_link,
+- .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
+ edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
+
+ if (!edid) {
+@@ -251,11 +247,17 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
+ }
+
+ aconnector->edid = edid;
++ }
+
++ if (!aconnector->dc_sink) {
++ struct dc_sink *dc_sink;
++ struct dc_sink_init_data init_params = {
++ .link = aconnector->dc_link,
++ .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
+ dc_sink = dc_link_add_remote_sink(
+ aconnector->dc_link,
+- (uint8_t *)edid,
+- (edid->extensions + 1) * EDID_LENGTH,
++ (uint8_t *)aconnector->edid,
++ (aconnector->edid->extensions + 1) * EDID_LENGTH,
+ &init_params);
+
+ dc_sink->priv = aconnector;
+@@ -263,7 +265,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
+
+ if (aconnector->dc_sink)
+ amdgpu_dm_update_freesync_caps(
+- connector, edid);
++ connector, aconnector->edid);
+ }
+
+ drm_mode_connector_update_edid_property(
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4173-drm-amd-powerplay-header-file-interface-to-SMU-updat.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4173-drm-amd-powerplay-header-file-interface-to-SMU-updat.patch
new file mode 100644
index 00000000..9dbb35ed
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4173-drm-amd-powerplay-header-file-interface-to-SMU-updat.patch
@@ -0,0 +1,32 @@
+From 7e25e5cc7206056a03b56d2d7f449c72ca93e300 Mon Sep 17 00:00:00 2001
+From: Kenneth Feng <kenneth.feng@amd.com>
+Date: Tue, 17 Apr 2018 21:49:51 +0800
+Subject: [PATCH 4173/5725] drm/amd/powerplay: header file interface to SMU
+ update
+
+Change-Id: I6d309f651dff5f657c1aa424efec2048b9b64a0a
+Signed-off-by: Kenneth Feng <kenneth.feng@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
+index fb696e3..2f8a3b9 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
+@@ -412,8 +412,10 @@ typedef struct {
+ QuadraticInt_t ReservedEquation2;
+ QuadraticInt_t ReservedEquation3;
+
++ uint16_t MinVoltageUlvGfx;
++ uint16_t MinVoltageUlvSoc;
+
+- uint32_t Reserved[15];
++ uint32_t Reserved[14];
+
+
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4174-drm-amd-powerplay-add-registry-key-to-disable-ACG.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4174-drm-amd-powerplay-add-registry-key-to-disable-ACG.patch
new file mode 100644
index 00000000..9a9d6f8f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4174-drm-amd-powerplay-add-registry-key-to-disable-ACG.patch
@@ -0,0 +1,34 @@
+From 52286b9d1c7e8042645cea9840297986cfb12e5a Mon Sep 17 00:00:00 2001
+From: Kenneth Feng <kenneth.feng@amd.com>
+Date: Fri, 20 Apr 2018 13:55:39 +0800
+Subject: [PATCH 4174/5725] drm/amd/powerplay: add registry key to disable ACG
+
+For the dummy ACG fuses,need to disable ACG, otherwise
+corruption will be caused.
+
+Change-Id: Ic32b138720cada2de510cbda607c681ad409e748
+Signed-off-by: Kenneth Feng <kenneth.feng@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+index 7fa1ba8..888ddca 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+@@ -224,6 +224,11 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
+ ppsmc_pptable->AcgGfxclkSpreadPercent = smc_dpm_table.acggfxclkspreadpercent;
+ ppsmc_pptable->AcgGfxclkSpreadFreq = smc_dpm_table.acggfxclkspreadfreq;
+
++ /* 0xFFFF will disable the ACG feature */
++ if (!(hwmgr->feature_mask & PP_ACG_MASK)) {
++ ppsmc_pptable->AcgThresholdFreqHigh = 0xFFFF;
++ ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF;
++ }
+
+ return 0;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4175-drm-amdgpu-fix-null-pointer-panic-with-direct-fw-loa.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4175-drm-amdgpu-fix-null-pointer-panic-with-direct-fw-loa.patch
new file mode 100644
index 00000000..f197d500
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4175-drm-amdgpu-fix-null-pointer-panic-with-direct-fw-loa.patch
@@ -0,0 +1,55 @@
+From ed65d402d64c102d14ed02863ecec029b5ba955f Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Sun, 8 Apr 2018 14:39:18 +0800
+Subject: [PATCH 4175/5725] drm/amdgpu: fix null pointer panic with direct fw
+ loading on gpu reset
+
+When system uses fw direct loading, then psp context structure won't be
+initiliazed. And it is also unable to execute mode reset.
+
+[ 434.601474] amdgpu 0000:0c:00.0: GPU reset begin!
+[ 434.694326] amdgpu 0000:0c:00.0: GPU reset
+[ 434.743152] BUG: unable to handle kernel NULL pointer dereference at
+0000000000000058
+[ 434.838474] IP: psp_gpu_reset+0xc/0x30 [amdgpu]
+[ 434.893532] PGD 406ed9067
+[ 434.893533] P4D 406ed9067
+[ 434.926376] PUD 400b46067
+[ 434.959217] PMD 0
+[ 435.033379] Oops: 0000 [#1] SMP
+[ 435.072573] Modules linked in: amdgpu(OE) chash(OE) gpu_sched(OE) ttm(OE)
+drm_kms_helper(OE) drm(OE) fb_sys_fops syscopyarea sysfillrect sysimgblt
+rpcsec_gss_krb5 auth_rpcgss nfsv4 nfs lockd grace fscache snd_hda_codec_realtek
+snd_hda_codec_generic snd_hda_codec_hdmi snd_hda_intel snd_hda_codec
+snd_hda_core snd_hwdep snd_pcm edac_mce_amd snd_seq_midi snd_seq_midi_event
+kvm_amd snd_rawmidi kvm irqbypass crct10dif_pclmul crc32_pclmul snd_seq
+ghash_clmulni_intel snd_seq_device pcbc snd_timer eeepc_wmi aesni_intel snd
+asus_wmi aes_x86_64 sparse_keymap crypto_simd glue_helper joydev soundcore
+wmi_bmof cryptd video i2c_piix4 shpchp 8250_dw i2c_designware_platform mac_hid
+i2c_designware_core sunrpc parport_pc ppdev lp parport autofs4 hid_generic igb
+usbhid dca ptp mxm_wmi pps_core ahci hid i2c_algo_bit
+[ 435.931754] libahci wmi
+
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index 5992024..4ce246c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -509,6 +509,9 @@ static int psp_resume(void *handle)
+
+ int psp_gpu_reset(struct amdgpu_device *adev)
+ {
++ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
++ return 0;
++
+ return psp_mode1_reset(&adev->psp);
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4176-drm-amdgpu-use-ctx-bytes_moved.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4176-drm-amdgpu-use-ctx-bytes_moved.patch
new file mode 100644
index 00000000..6f2d6529
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4176-drm-amdgpu-use-ctx-bytes_moved.patch
@@ -0,0 +1,54 @@
+From a9aaf2cea1b7d1e084b2580c0a019d509991d1d3 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Thu, 5 Apr 2018 14:46:41 +0200
+Subject: [PATCH 4176/5725] drm/amdgpu: use ctx bytes_moved
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Instead of the global (inaccurate) counter.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 10 +++-------
+ 1 file changed, 3 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 8596b38..19265a9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -412,7 +412,6 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
+ struct amdgpu_bo_list_entry *candidate = p->evictable;
+ struct amdgpu_bo *bo = candidate->robj;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+- u64 initial_bytes_moved, bytes_moved;
+ bool update_bytes_moved_vis;
+ uint32_t other;
+
+@@ -436,18 +435,15 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
+ continue;
+
+ /* Good we can try to move this BO somewhere else */
+- amdgpu_ttm_placement_from_domain(bo, other);
+ update_bytes_moved_vis =
+ adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+ bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+ bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT;
+- initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
++ amdgpu_ttm_placement_from_domain(bo, other);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+- bytes_moved = atomic64_read(&adev->num_bytes_moved) -
+- initial_bytes_moved;
+- p->bytes_moved += bytes_moved;
++ p->bytes_moved += ctx.bytes_moved;
+ if (update_bytes_moved_vis)
+- p->bytes_moved_vis += bytes_moved;
++ p->bytes_moved_vis += ctx.bytes_moved;
+
+ if (unlikely(r))
+ break;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4177-drm-amdgpu-fix-and-cleanup-cpu-visible-VRAM-handling.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4177-drm-amdgpu-fix-and-cleanup-cpu-visible-VRAM-handling.patch
new file mode 100644
index 00000000..74b4f33b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4177-drm-amdgpu-fix-and-cleanup-cpu-visible-VRAM-handling.patch
@@ -0,0 +1,123 @@
+From 7a6ff6773c8cb0b0141e113d9b330896328a79f5 Mon Sep 17 00:00:00 2001
+From: christian koenig <christian.koenig@amd.com>
+Date: Thu, 5 Apr 2018 16:42:03 +0200
+Subject: [PATCH 4177/5725] drm/amdgpu: fix and cleanup cpu visible VRAM
+ handling
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The detection if a BO was placed in CPU visible VRAM was incorrect.
+
+Fix it and merge it with the correct detection in amdgpu_ttm.c
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 6 ++----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 21 +++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 19 +++----------------
+ 3 files changed, 26 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 19265a9..6a5534e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -382,8 +382,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
+
+ p->bytes_moved += ctx.bytes_moved;
+ if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+- bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+- bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
++ amdgpu_bo_in_cpu_visible_vram(bo))
+ p->bytes_moved_vis += ctx.bytes_moved;
+
+ if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains &&
+@@ -437,8 +436,7 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
+ /* Good we can try to move this BO somewhere else */
+ update_bytes_moved_vis =
+ adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+- bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+- bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT;
++ amdgpu_bo_in_cpu_visible_vram(bo);
+ amdgpu_ttm_placement_from_domain(bo, other);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ p->bytes_moved += ctx.bytes_moved;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+index 092e853..92873fa 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+@@ -207,6 +207,27 @@ static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
+ }
+
+ /**
++ * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
++ */
++static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
++{
++ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
++ unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
++ struct drm_mm_node *node = bo->tbo.mem.mm_node;
++ unsigned long pages_left;
++
++ if (bo->tbo.mem.mem_type != TTM_PL_VRAM)
++ return false;
++
++ for (pages_left = bo->tbo.mem.num_pages; pages_left;
++ pages_left -= node->size, node++)
++ if (node->start < fpfn)
++ return true;
++
++ return false;
++}
++
++/**
+ * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
+ */
+ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 6c81f2d..aa6d4e0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -241,20 +241,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
+ if (!adev->mman.buffer_funcs_enabled) {
+ amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+ } else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+- !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
+- unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
+- struct drm_mm_node *node = bo->mem.mm_node;
+- unsigned long pages_left;
+-
+- for (pages_left = bo->mem.num_pages;
+- pages_left;
+- pages_left -= node->size, node++) {
+- if (node->start < fpfn)
+- break;
+- }
+-
+- if (!pages_left)
+- goto gtt;
++ !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
++ amdgpu_bo_in_cpu_visible_vram(abo)) {
+
+ /* Try evicting to the CPU inaccessible part of VRAM
+ * first, but only set GTT as busy placement, so this
+@@ -263,12 +251,11 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
+ */
+ amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT);
+- abo->placements[0].fpfn = fpfn;
++ abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
+ abo->placements[0].lpfn = 0;
+ abo->placement.busy_placement = &abo->placements[1];
+ abo->placement.num_busy_placement = 1;
+ } else {
+-gtt:
+ amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
+ }
+ break;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4178-drm-amd-display-Fix-64-bit-division-in-hwss_edp_powe.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4178-drm-amd-display-Fix-64-bit-division-in-hwss_edp_powe.patch
new file mode 100644
index 00000000..e498505a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4178-drm-amd-display-Fix-64-bit-division-in-hwss_edp_powe.patch
@@ -0,0 +1,28 @@
+From 6adefe7e01ef9731bf49bec5b830dd17536800ff Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Tue, 10 Apr 2018 16:08:44 -0400
+Subject: [PATCH 4178/5725] drm/amd/display: Fix 64-bit division in
+ hwss_edp_power_control
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index b17afcf..1e3ed0e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -857,7 +857,7 @@ void hwss_edp_power_control(
+ dm_get_elapse_time_in_ns(
+ ctx,
+ current_ts,
+- link->link_trace.time_stamp.edp_poweroff) / 1000000;
++ div64_u64(link->link_trace.time_stamp.edp_poweroff, 1000000));
+ unsigned long long wait_time_ms = 0;
+
+ /* max 500ms from LCDVDD off to on */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4179-drm-amd-display-Remove-PRE_VEGA-flag.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4179-drm-amd-display-Remove-PRE_VEGA-flag.patch
new file mode 100644
index 00000000..9d6c49da
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4179-drm-amd-display-Remove-PRE_VEGA-flag.patch
@@ -0,0 +1,51 @@
+From 97bdf63fde47ee460b46c2980613d2b94de8f44b Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Mon, 9 Apr 2018 14:27:46 -0400
+Subject: [PATCH 4179/5725] drm/amd/display: Remove PRE_VEGA flag
+
+We enabled this upstream by default now and no longer need the flag.
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 ---
+ drivers/gpu/drm/amd/display/Kconfig | 8 --------
+ 2 files changed, 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 2bac39ee..1bf1219 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2210,9 +2210,6 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
+ case CHIP_POLARIS12:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+-#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
+- return amdgpu_dc != 0;
+-#endif
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
+index 5f0a690..c3d49f8 100644
+--- a/drivers/gpu/drm/amd/display/Kconfig
++++ b/drivers/gpu/drm/amd/display/Kconfig
+@@ -9,14 +9,6 @@ config DRM_AMD_DC
+ support for AMDGPU.This adds required support for Vega and
+ Raven ASICs.
+
+-config DRM_AMD_DC_PRE_VEGA
+- bool "DC support for Polaris and older ASICs"
+- default y
+- help
+- Choose this option to enable the new DC support for older asics
+- by default. This includes Polaris, Carrizo, Tonga, Bonaire,
+- and Hawaii.
+-
+ config DRM_AMD_DC_FBC
+ bool "AMD FBC - Enable Frame Buffer Compression"
+ depends on DRM_AMD_DC
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4180-drm-amd-display-remove-dummy-is_blanked-to-optimise-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4180-drm-amd-display-remove-dummy-is_blanked-to-optimise-.patch
new file mode 100644
index 00000000..2fac72f0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4180-drm-amd-display-remove-dummy-is_blanked-to-optimise-.patch
@@ -0,0 +1,75 @@
+From c49a9291e75a1396363d95e5bde011d35f913f29 Mon Sep 17 00:00:00 2001
+From: Shirish S <shirish.s@amd.com>
+Date: Wed, 28 Mar 2018 12:22:22 +0530
+Subject: [PATCH 4180/5725] drm/amd/display: remove dummy is_blanked() to
+ optimise boot time
+
+is_blanked() hook is a dummy one for underlay pipe, hence
+when called, it loops for ~300ms at boot.
+
+This patch removes this dummy call and adds missing checks.
+
+Signed-off-by: Shirish S <shirish.s@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c | 3 +++
+ drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c | 3 ++-
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c | 7 -------
+ 3 files changed, 5 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+index ebc96b7..481f692 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+@@ -230,6 +230,9 @@ bool hwss_wait_for_blank_complete(
+ {
+ int counter;
+
++ /* Not applicable if the pipe is not primary, save 300ms of boot time */
++ if (!tg->funcs->is_blanked)
++ return true;
+ for (counter = 0; counter < 100; counter++) {
+ if (tg->funcs->is_blanked(tg))
+ break;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
+index 4877243..0275d6d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
+@@ -53,7 +53,8 @@ void dce_pipe_control_lock(struct dc *dc,
+ struct dce_hwseq *hws = dc->hwseq;
+
+ /* Not lock pipe when blank */
+- if (lock && pipe->stream_res.tg->funcs->is_blanked(pipe->stream_res.tg))
++ if (lock && pipe->stream_res.tg->funcs->is_blanked &&
++ pipe->stream_res.tg->funcs->is_blanked(pipe->stream_res.tg))
+ return;
+
+ val = REG_GET_4(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst],
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
+index 8ad0481..a3cef60 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
+@@ -648,12 +648,6 @@ static void dce110_timing_generator_v_disable_vga(
+ return;
+ }
+
+-static bool dce110_tg_v_is_blanked(struct timing_generator *tg)
+-{
+- /* Signal comes from the primary pipe, underlay is never blanked. */
+- return false;
+-}
+-
+ /** ********************************************************************************************
+ *
+ * DCE11 Timing Generator Constructor / Destructor
+@@ -670,7 +664,6 @@ static const struct timing_generator_funcs dce110_tg_v_funcs = {
+ .set_early_control = dce110_timing_generator_v_set_early_control,
+ .wait_for_state = dce110_timing_generator_v_wait_for_state,
+ .set_blank = dce110_timing_generator_v_set_blank,
+- .is_blanked = dce110_tg_v_is_blanked,
+ .set_colors = dce110_timing_generator_v_set_colors,
+ .set_overscan_blank_color =
+ dce110_timing_generator_v_set_overscan_color_black,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4181-drm-ttm-keep-a-reference-to-transfer-pipelined-BOs.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4181-drm-ttm-keep-a-reference-to-transfer-pipelined-BOs.patch
new file mode 100644
index 00000000..ebb62acd
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4181-drm-ttm-keep-a-reference-to-transfer-pipelined-BOs.patch
@@ -0,0 +1,115 @@
+From 1d6bd2922f4861f64799c61410028e3c86565194 Mon Sep 17 00:00:00 2001
+From: christian koenig <christian.koenig@amd.com>
+Date: Fri, 9 Mar 2018 13:39:47 +0100
+Subject: [PATCH 4181/5725] drm/ttm: keep a reference to transfer pipelined BOs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Make sure the transfered BO is never destroy before the transfer BO.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Roger He <Hongbo.He@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/ttm/ttm_bo_util.c
+
+Change-Id: I967267305604a98371dd395428ac6729412db03a
+---
+ drivers/gpu/drm/ttm/ttm_bo_util.c | 51 ++++++++++++++++++++++++---------------
+ 1 file changed, 31 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
+index de9161f..0b0a72e 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
++++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
+@@ -39,6 +39,11 @@
+ #include <linux/module.h>
+ #include <linux/reservation.h>
+
++struct ttm_transfer_obj {
++ struct ttm_buffer_object base;
++ struct ttm_buffer_object *bo;
++};
++
+ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
+ {
+ ttm_bo_mem_put(bo, &bo->mem);
+@@ -435,7 +440,11 @@ EXPORT_SYMBOL(ttm_bo_move_memcpy);
+
+ static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
+ {
+- kfree(bo);
++ struct ttm_transfer_obj *fbo;
++
++ fbo = container_of(bo, struct ttm_transfer_obj, base);
++ ttm_bo_unref(&fbo->bo);
++ kfree(fbo);
+ }
+
+ /**
+@@ -456,14 +465,15 @@ static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
+ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
+ struct ttm_buffer_object **new_obj)
+ {
+- struct ttm_buffer_object *fbo;
++ struct ttm_transfer_obj *fbo;
+ int ret;
+
+ fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
+ if (!fbo)
+ return -ENOMEM;
+
+- *fbo = *bo;
++ fbo->base = *bo;
++ fbo->bo = ttm_bo_reference(bo);
+
+ /**
+ * Fix up members that we shouldn't copy directly:
+@@ -471,25 +481,26 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
+ */
+
+ atomic_inc(&bo->bdev->glob->bo_count);
+- INIT_LIST_HEAD(&fbo->ddestroy);
+- INIT_LIST_HEAD(&fbo->lru);
+- INIT_LIST_HEAD(&fbo->swap);
+- INIT_LIST_HEAD(&fbo->io_reserve_lru);
+- mutex_init(&fbo->wu_mutex);
+- fbo->moving = NULL;
+- drm_vma_node_reset(&fbo->vma_node);
+- atomic_set(&fbo->cpu_writers, 0);
+-
+- kref_init(&fbo->list_kref);
+- kref_init(&fbo->kref);
+- fbo->destroy = &ttm_transfered_destroy;
+- fbo->acc_size = 0;
+- fbo->resv = &fbo->ttm_resv;
+- reservation_object_init(fbo->resv);
+- ret = reservation_object_trylock(fbo->resv);
++ INIT_LIST_HEAD(&fbo->base.ddestroy);
++ INIT_LIST_HEAD(&fbo->base.lru);
++ INIT_LIST_HEAD(&fbo->base.swap);
++ INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
++ mutex_init(&fbo->base.wu_mutex);
++ fbo->base.moving = NULL;
++ drm_vma_node_reset(&fbo->base.vma_node);
++ atomic_set(&fbo->base.cpu_writers, 0);
++
++ kref_init(&fbo->base.list_kref);
++ kref_init(&fbo->base.kref);
++ fbo->base.destroy = &ttm_transfered_destroy;
++ fbo->base.acc_size = 0;
++ fbo->base.resv = &fbo->base.ttm_resv;
++ reservation_object_init(fbo->base.resv);
++ ret = reservation_object_trylock(fbo->base.resv);
++
+ WARN_ON(!ret);
+
+- *new_obj = fbo;
++ *new_obj = &fbo->base;
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4182-drm-amdgpu-gfx9-cache-DB_DEBUG2-and-make-it-availabl.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4182-drm-amdgpu-gfx9-cache-DB_DEBUG2-and-make-it-availabl.patch
new file mode 100644
index 00000000..29d8d9a1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4182-drm-amdgpu-gfx9-cache-DB_DEBUG2-and-make-it-availabl.patch
@@ -0,0 +1,69 @@
+From 2e02cffd6b608be5897b8e704eeecf36051feadf Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 10 Apr 2018 10:15:26 -0500
+Subject: [PATCH 4182/5725] drm/amdgpu/gfx9: cache DB_DEBUG2 and make it
+ available to userspace
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Userspace needs to query this value to work around a hw bug in
+certain cases.
+
+Acked-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 3 +++
+ 3 files changed, 6 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index a866d5d..44ab364 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -899,6 +899,8 @@ struct amdgpu_gfx_config {
+
+ /* gfx configure feature */
+ uint32_t double_offchip_lds_buf;
++ /* cached value of DB_DEBUG2 */
++ uint32_t db_debug2;
+ };
+
+ struct amdgpu_cu_info {
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index f808372..ba036af 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1675,6 +1675,7 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
+
+ gfx_v9_0_setup_rb(adev);
+ gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
++ adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
+
+ /* XXX SH_MEM regs */
+ /* where to put LDS, scratch, GPUVM in FSA64 space */
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 0be00c4..9006576 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -287,6 +287,7 @@ static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
+ { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
+ { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
++ { SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)},
+ };
+
+ static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
+@@ -315,6 +316,8 @@ static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
+ } else {
+ if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
+ return adev->gfx.config.gb_addr_config;
++ else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))
++ return adev->gfx.config.db_debug2;
+ return RREG32(reg_offset);
+ }
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4183-Revert-drm-amd-display-fix-dereferencing-possible-ER.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4183-Revert-drm-amd-display-fix-dereferencing-possible-ER.patch
new file mode 100644
index 00000000..be9f163c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4183-Revert-drm-amd-display-fix-dereferencing-possible-ER.patch
@@ -0,0 +1,37 @@
+From c703aec41498fd839b250e07cb5df747b027fe7d Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Thu, 12 Apr 2018 10:30:09 -0400
+Subject: [PATCH 4183/5725] Revert "drm/amd/display: fix dereferencing possible
+ ERR_PTR()"
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This reverts commit cd2d6c92a8e39d7e50a5af9fcc67d07e6a89e91d.
+
+Cc: Shirish S <shirish.s@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 20a1890..4bcd7f4 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -5114,9 +5114,6 @@ static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
+ return -EDEADLK;
+
+ crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
+- if (IS_ERR(crtc_state))
+- return PTR_ERR(crtc_state);
+-
+ if (crtc->primary == plane && crtc_state->active) {
+ if (!plane_state->fb)
+ return -EINVAL;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4184-Revert-drm-amd-display-disable-CRTCs-with-NULL-FB-on.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4184-Revert-drm-amd-display-disable-CRTCs-with-NULL-FB-on.patch
new file mode 100644
index 00000000..c090d58e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4184-Revert-drm-amd-display-disable-CRTCs-with-NULL-FB-on.patch
@@ -0,0 +1,77 @@
+From c0192997187267019f2a5b514211c1839ec470a2 Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Thu, 12 Apr 2018 10:46:22 -0400
+Subject: [PATCH 4184/5725] Revert "drm/amd/display: disable CRTCs with NULL FB
+ on their primary plane (V2)"
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This seems to cause flickering and lock-ups for a wide range of users.
+Revert until we've found a proper fix for the flickering and lock-ups.
+
+This reverts commit 36cc549d59864b7161f0e23d710c1c4d1b9cf022.
+
+Cc: Shirish S <shirish.s@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+
+Change-Id: I200f4cc297e80d0a6673bf286853feaf5a8a8fe1
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 27 -----------------------
+ 1 file changed, 27 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 4bcd7f4..4760ff4 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -5098,30 +5098,6 @@ static int dm_update_planes_state(struct dc *dc,
+ return ret;
+ }
+
+-static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
+- struct drm_crtc *crtc)
+-{
+- struct drm_plane *plane;
+- struct drm_crtc_state *crtc_state;
+-
+- WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
+-
+- drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
+- struct drm_plane_state *plane_state =
+- drm_atomic_get_plane_state(state, plane);
+-
+- if (IS_ERR(plane_state))
+- return -EDEADLK;
+-
+- crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
+- if (crtc->primary == plane && crtc_state->active) {
+- if (!plane_state->fb)
+- return -EINVAL;
+- }
+- }
+- return 0;
+-}
+-
+ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+ {
+@@ -5148,9 +5124,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+- ret = dm_atomic_check_plane_state_fb(state, crtc);
+- if (ret)
+- goto fail;
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
+ !new_crtc_state->color_mgmt_changed &&
+ (dm_old_crtc_state->freesync_enabled == dm_new_crtc_state->freesync_enabled))
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4185-drm-amdgpu-add-emit_reg_write_reg_wait-ring-callback.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4185-drm-amdgpu-add-emit_reg_write_reg_wait-ring-callback.patch
new file mode 100644
index 00000000..f65bbc15
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4185-drm-amdgpu-add-emit_reg_write_reg_wait-ring-callback.patch
@@ -0,0 +1,96 @@
+From fee37d98f398e7adbf8ab7af3e2a3cbf2d63efc1 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 27 Mar 2018 11:58:14 -0500
+Subject: [PATCH 4185/5725] drm/amdgpu: add emit_reg_write_reg_wait ring
+ callback
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This callback writes a value to a register and then reads
+back another register and waits for a value in a single
+operation.
+
+Provide a helper function using two operations for engines
+that don't support this opertion.
+
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 20 ++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 7 +++++++
+ 3 files changed, 28 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 44ab364..526c7a0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1891,6 +1891,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
+ #define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
+ #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
+ #define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
++#define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
+ #define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
+ #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
+ #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+index d5f526f..49cad08 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+@@ -459,6 +459,26 @@ void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+ spin_unlock(&adev->ring_lru_list_lock);
+ }
+
++/**
++ * amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper
++ *
++ * @adev: amdgpu_device pointer
++ * @reg0: register to write
++ * @reg1: register to wait on
++ * @ref: reference value to write/wait on
++ * @mask: mask to wait on
++ *
++ * Helper for rings that don't support write and wait in a
++ * single oneshot packet.
++ */
++void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
++ uint32_t reg0, uint32_t reg1,
++ uint32_t ref, uint32_t mask)
++{
++ amdgpu_ring_emit_wreg(ring, reg0, ref);
++ amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
++}
++
+ /*
+ * Debugfs info
+ */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+index 4dc3208..6ed21bd 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+@@ -154,6 +154,9 @@ struct amdgpu_ring_funcs {
+ void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
+ void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask);
++ void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
++ uint32_t reg0, uint32_t reg1,
++ uint32_t ref, uint32_t mask);
+ void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
+ /* priority functions */
+ void (*set_priority) (struct amdgpu_ring *ring,
+@@ -228,6 +231,10 @@ int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type,
+ int *blacklist, int num_blacklist,
+ bool lru_pipe_order, struct amdgpu_ring **ring);
+ void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring);
++void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
++ uint32_t reg0, uint32_t val0,
++ uint32_t reg1, uint32_t val1);
++
+ static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
+ {
+ int i = 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4186-drm-amdgpu-gfx9-add-emit_reg_write_reg_wait-ring-cal.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4186-drm-amdgpu-gfx9-add-emit_reg_write_reg_wait-ring-cal.patch
new file mode 100644
index 00000000..2fc05496
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4186-drm-amdgpu-gfx9-add-emit_reg_write_reg_wait-ring-cal.patch
@@ -0,0 +1,70 @@
+From 32e132e631bce83c114a4000765db9c6a5ebd027 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 27 Mar 2018 15:07:50 -0500
+Subject: [PATCH 4186/5725] drm/amdgpu/gfx9: add emit_reg_write_reg_wait ring
+ callback (v2)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This adds support for writing and reading back in a single
+oneshot packet. This is needed to send a tlb invalidation
+and wait for ack in a single operation.
+
+v2: squash the gfx ring stall fix
+
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Emily Deng <Emily.Deng@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index ba036af..c407f1f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -4220,6 +4220,15 @@ static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
+ }
+
++static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
++ uint32_t reg0, uint32_t reg1,
++ uint32_t ref, uint32_t mask)
++{
++ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
++
++ gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, ref, mask, 0x20);
++}
++
+ static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
+ enum amdgpu_interrupt_state state)
+ {
+@@ -4542,6 +4551,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
+ .emit_tmz = gfx_v9_0_ring_emit_tmz,
+ .emit_wreg = gfx_v9_0_ring_emit_wreg,
+ .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
++ .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
+ };
+
+ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
+@@ -4577,6 +4587,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
+ .set_priority = gfx_v9_0_ring_set_priority_compute,
+ .emit_wreg = gfx_v9_0_ring_emit_wreg,
+ .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
++ .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
+ };
+
+ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
+@@ -4607,6 +4618,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
+ .emit_rreg = gfx_v9_0_ring_emit_rreg,
+ .emit_wreg = gfx_v9_0_ring_emit_wreg,
+ .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
++ .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
+ };
+
+ static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4187-drm-amdgpu-sdma4-add-emit_reg_write_reg_wait-ring-ca.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4187-drm-amdgpu-sdma4-add-emit_reg_write_reg_wait-ring-ca.patch
new file mode 100644
index 00000000..7220a21f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4187-drm-amdgpu-sdma4-add-emit_reg_write_reg_wait-ring-ca.patch
@@ -0,0 +1,38 @@
+From 602ffa309bb1f8f3f84be3539807d8d879586ce2 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 27 Mar 2018 16:51:41 -0500
+Subject: [PATCH 4187/5725] drm/amdgpu/sdma4: add emit_reg_write_reg_wait ring
+ callback (v2)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This adds support for writing and reading back in a single
+oneshot packet. This is needed to send a tlb invalidation
+and wait for ack in a single operation.
+
+v2: squash sdma hang fix into this patch
+
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Emily Deng <Emily.Deng@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index 320daec..f2a3800 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -1616,6 +1616,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
+ .pad_ib = sdma_v4_0_ring_pad_ib,
+ .emit_wreg = sdma_v4_0_ring_emit_wreg,
+ .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
++ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ };
+
+ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4188-drm-amdgpu-uvd7-add-emit_reg_write_reg_wait-ring-cal.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4188-drm-amdgpu-uvd7-add-emit_reg_write_reg_wait-ring-cal.patch
new file mode 100644
index 00000000..10ce5343
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4188-drm-amdgpu-uvd7-add-emit_reg_write_reg_wait-ring-cal.patch
@@ -0,0 +1,34 @@
+From 84a6a2a15806a827991ea194e8c75ba5ae894906 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 27 Mar 2018 17:05:19 -0500
+Subject: [PATCH 4188/5725] drm/amdgpu/uvd7: add emit_reg_write_reg_wait ring
+ callback
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This adds support for writing and reading back using the
+helper since the engines doesn't have a oneshot packet.
+
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+index 4a4fd4b..31e339c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+@@ -1735,6 +1735,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
+ .end_use = amdgpu_uvd_ring_end_use,
+ .emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
+ .emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
++ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ };
+
+ static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4189-drm-amdgpu-vce4-add-emit_reg_write_reg_wait-ring-cal.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4189-drm-amdgpu-vce4-add-emit_reg_write_reg_wait-ring-cal.patch
new file mode 100644
index 00000000..4ef961aa
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4189-drm-amdgpu-vce4-add-emit_reg_write_reg_wait-ring-cal.patch
@@ -0,0 +1,34 @@
+From 63b47c4664de5e3419043a00c2d16ab7c4df03d5 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 27 Mar 2018 17:06:33 -0500
+Subject: [PATCH 4189/5725] drm/amdgpu/vce4: add emit_reg_write_reg_wait ring
+ callback
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This adds support for writing and reading back using the
+helper since the engines doesn't have a oneshot packet.
+
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vce_v4_0.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+index 73fd48d..8fd1b74 100755
+--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+@@ -1081,6 +1081,7 @@ static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
+ .end_use = amdgpu_vce_ring_end_use,
+ .emit_wreg = vce_v4_0_emit_wreg,
+ .emit_reg_wait = vce_v4_0_emit_reg_wait,
++ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ };
+
+ static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4190-drm-amdgpu-vcn1-add-emit_reg_write_reg_wait-ring-cal.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4190-drm-amdgpu-vcn1-add-emit_reg_write_reg_wait-ring-cal.patch
new file mode 100644
index 00000000..cbb042f4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4190-drm-amdgpu-vcn1-add-emit_reg_write_reg_wait-ring-cal.patch
@@ -0,0 +1,34 @@
+From 48c8409329aaee0b1def4b56a93df3b50b588248 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 27 Mar 2018 17:06:52 -0500
+Subject: [PATCH 4190/5725] drm/amdgpu/vcn1: add emit_reg_write_reg_wait ring
+ callback
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This adds support for writing and reading back using the
+helper since the engines doesn't have a oneshot packet.
+
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 9de2dac..48c469f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -1177,6 +1177,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
+ .end_use = amdgpu_vcn_ring_end_use,
+ .emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
+ .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
++ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ };
+
+ static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4191-drm-amdgpu-gmc9-use-amdgpu_ring_emit_reg_write_reg_w.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4191-drm-amdgpu-gmc9-use-amdgpu_ring_emit_reg_write_reg_w.patch
new file mode 100644
index 00000000..70130329
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4191-drm-amdgpu-gmc9-use-amdgpu_ring_emit_reg_write_reg_w.patch
@@ -0,0 +1,43 @@
+From b3e03fc1e3e59f737bd820241fcd7fb069d06183 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 27 Mar 2018 17:10:56 -0500
+Subject: [PATCH 4191/5725] drm/amdgpu/gmc9: use
+ amdgpu_ring_emit_reg_write_reg_wait in gpu tlb flush
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Use amdgpu_ring_emit_reg_write_reg_wait. On engines that support it,
+it provides a write and wait in a single packet which avoids a missed
+ack if a world switch happens between the request and waiting for the
+ack.
+
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 4017c9c..2d40733 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -384,11 +384,9 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
+ amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
+ upper_32_bits(pd_addr));
+
+- amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_req + eng, req);
+-
+- /* wait for the invalidate to complete */
+- amdgpu_ring_emit_reg_wait(ring, hub->vm_inv_eng0_ack + eng,
+- 1 << vmid, 1 << vmid);
++ amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
++ hub->vm_inv_eng0_ack + eng,
++ req, 1 << vmid);
+
+ return pd_addr;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4192-drm-amdgpu-gmc-steal-the-appropriate-amount-of-vram-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4192-drm-amdgpu-gmc-steal-the-appropriate-amount-of-vram-.patch
new file mode 100644
index 00000000..1fd03e1e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4192-drm-amdgpu-gmc-steal-the-appropriate-amount-of-vram-.patch
@@ -0,0 +1,280 @@
+From 902cb65faf2a55f952e0101e8aee55d2852bf1be Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexdeucher@gmail.com>
+Date: Fri, 6 Apr 2018 14:54:09 -0500
+Subject: [PATCH 4192/5725] drm/amdgpu/gmc: steal the appropriate amount of
+ vram for fw hand-over (v2)
+
+Steal 9 MB for vga emulation and fb if vga is enabled, otherwise,
+steal enough to cover the current display size as set by the vbios.
+
+If no memory is used (e.g., secondary or headless card), skip
+stolen memory reserve.
+
+v2: skip reservation if vram is limited, address Christian's comments
+
+Reviewed-and-Tested-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com> (v2)
+Signed-off-by: Alex Deucher <alexdeucher@gmail.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 14 +++++----
+ drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 23 +++++++++++++--
+ drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 23 +++++++++++++--
+ drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 23 +++++++++++++--
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 51 +++++++++++++++++++++++++++++----
+ 5 files changed, 116 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index aa6d4e0..53b0fcc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -1642,12 +1642,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
+ return r;
+ }
+
+- r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
+- AMDGPU_GEM_DOMAIN_VRAM,
+- &adev->stolen_vga_memory,
+- NULL, NULL);
+- if (r)
+- return r;
++ if (adev->gmc.stolen_size) {
++ r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ &adev->stolen_vga_memory,
++ NULL, NULL);
++ if (r)
++ return r;
++ }
+
+ DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
+ (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+index 94c6b13..ef77f79 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+@@ -824,6 +824,25 @@ static int gmc_v6_0_late_init(void *handle)
+ return 0;
+ }
+
++static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
++{
++ u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
++ unsigned size;
++
++ if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
++ size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
++ } else {
++ u32 viewport = RREG32(mmVIEWPORT_SIZE);
++ size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
++ REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
++ 4);
++ }
++ /* return 0 if the pre-OS buffer uses up most of vram */
++ if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
++ return 0;
++ return size;
++}
++
+ static int gmc_v6_0_sw_init(void *handle)
+ {
+ int r;
+@@ -850,8 +869,6 @@ static int gmc_v6_0_sw_init(void *handle)
+
+ adev->gmc.mc_mask = 0xffffffffffULL;
+
+- adev->gmc.stolen_size = 256 * 1024;
+-
+ adev->need_dma32 = false;
+ dma_bits = adev->need_dma32 ? 32 : 40;
+ r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
+@@ -876,6 +893,8 @@ static int gmc_v6_0_sw_init(void *handle)
+ if (r)
+ return r;
+
++ adev->gmc.stolen_size = gmc_v6_0_get_vbios_fb_size(adev);
++
+ r = amdgpu_bo_init(adev);
+ if (r)
+ return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+index a1348d5..cee1aec 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+@@ -966,6 +966,25 @@ static int gmc_v7_0_late_init(void *handle)
+ return 0;
+ }
+
++static unsigned gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev)
++{
++ u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
++ unsigned size;
++
++ if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
++ size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
++ } else {
++ u32 viewport = RREG32(mmVIEWPORT_SIZE);
++ size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
++ REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
++ 4);
++ }
++ /* return 0 if the pre-OS buffer uses up most of vram */
++ if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
++ return 0;
++ return size;
++}
++
+ static int gmc_v7_0_sw_init(void *handle)
+ {
+ int r;
+@@ -1000,8 +1019,6 @@ static int gmc_v7_0_sw_init(void *handle)
+ */
+ adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
+
+- adev->gmc.stolen_size = 256 * 1024;
+-
+ /* set DMA mask + need_dma32 flags.
+ * PCIE - can handle 40-bits.
+ * IGP - can handle 40-bits
+@@ -1031,6 +1048,8 @@ static int gmc_v7_0_sw_init(void *handle)
+ if (r)
+ return r;
+
++ adev->gmc.stolen_size = gmc_v7_0_get_vbios_fb_size(adev);
++
+ /* Memory manager */
+ r = amdgpu_bo_init(adev);
+ if (r)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index 4e1b464..67f9b74 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -1061,6 +1061,25 @@ static int gmc_v8_0_late_init(void *handle)
+ return 0;
+ }
+
++static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
++{
++ u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
++ unsigned size;
++
++ if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
++ size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
++ } else {
++ u32 viewport = RREG32(mmVIEWPORT_SIZE);
++ size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
++ REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
++ 4);
++ }
++ /* return 0 if the pre-OS buffer uses up most of vram */
++ if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
++ return 0;
++ return size;
++}
++
+ #define mmMC_SEQ_MISC0_FIJI 0xA71
+
+ static int gmc_v8_0_sw_init(void *handle)
+@@ -1102,8 +1121,6 @@ static int gmc_v8_0_sw_init(void *handle)
+ */
+ adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
+
+- adev->gmc.stolen_size = 256 * 1024;
+-
+ /* set DMA mask + need_dma32 flags.
+ * PCIE - can handle 40-bits.
+ * IGP - can handle 40-bits
+@@ -1133,6 +1150,8 @@ static int gmc_v8_0_sw_init(void *handle)
+ if (r)
+ return r;
+
++ adev->gmc.stolen_size = gmc_v8_0_get_vbios_fb_size(adev);
++
+ /* Memory manager */
+ r = amdgpu_bo_init(adev);
+ if (r)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 2d40733..315045a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -56,6 +56,14 @@
+ #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
+ #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
+
++/* add these here since we already include dce12 headers and these are for DCN */
++#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
++#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
++#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
++#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
++#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
++#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
++
+ /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
+ #define AMDGPU_NUM_OF_VMIDS 8
+
+@@ -791,6 +799,41 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
+ return amdgpu_gart_table_vram_alloc(adev);
+ }
+
++static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
++{
++ u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
++ unsigned size;
++
++ if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
++ size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
++ } else {
++ u32 viewport;
++
++ switch (adev->asic_type) {
++ case CHIP_RAVEN:
++ viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
++ size = (REG_GET_FIELD(viewport,
++ HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
++ REG_GET_FIELD(viewport,
++ HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
++ 4);
++ break;
++ case CHIP_VEGA10:
++ case CHIP_VEGA12:
++ default:
++ viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
++ size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
++ REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
++ 4);
++ break;
++ }
++ }
++ /* return 0 if the pre-OS buffer uses up most of vram */
++ if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
++ return 0;
++ return size;
++}
++
+ static int gmc_v9_0_sw_init(void *handle)
+ {
+ int r;
+@@ -842,12 +885,6 @@ static int gmc_v9_0_sw_init(void *handle)
+ */
+ adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
+
+- /*
+- * It needs to reserve 8M stolen memory for vega10
+- * TODO: Figure out how to avoid that...
+- */
+- adev->gmc.stolen_size = 8 * 1024 * 1024;
+-
+ /* set DMA mask + need_dma32 flags.
+ * PCIE - can handle 44-bits.
+ * IGP - can handle 44-bits
+@@ -871,6 +908,8 @@ static int gmc_v9_0_sw_init(void *handle)
+ if (r)
+ return r;
+
++ adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
++
+ /* Memory manager */
+ r = amdgpu_bo_init(adev);
+ if (r)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4193-drm-amdgpu-always-allocate-a-PASIDs-for-each-VM-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4193-drm-amdgpu-always-allocate-a-PASIDs-for-each-VM-v2.patch
new file mode 100644
index 00000000..92f9b303
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4193-drm-amdgpu-always-allocate-a-PASIDs-for-each-VM-v2.patch
@@ -0,0 +1,119 @@
+From fd7dbf22505881b9b994ac984dc888698e42479a Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 5 Jan 2018 14:17:08 +0100
+Subject: [PATCH 4193/5725] drm/amdgpu: always allocate a PASIDs for each VM v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Start to always allocate a pasid for each VM.
+
+v2: use dev_warn when we run out of PASIDs
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+
+Change-Id: Ibf02af359ed52d625677b45b98e174ea3e566aef
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 43 ++++++++++++++++++++++-----------
+ 1 file changed, 29 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index a57cc20..efbed62 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -875,7 +875,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
+ {
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_fpriv *fpriv;
+- int r;
++ int r, pasid;
+
+ file_priv->driver_priv = NULL;
+
+@@ -889,28 +889,25 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
+ goto out_suspend;
+ }
+
+- r = amdgpu_vm_init(adev, &fpriv->vm,
+- AMDGPU_VM_CONTEXT_GFX, 0);
+- if (r) {
+- kfree(fpriv);
+- goto out_suspend;
++ pasid = amdgpu_pasid_alloc(16);
++ if (pasid < 0) {
++ dev_warn(adev->dev, "No more PASIDs available!");
++ pasid = 0;
+ }
++ r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid);
++ if (r)
++ goto error_pasid;
+
+ fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
+ if (!fpriv->prt_va) {
+ r = -ENOMEM;
+- amdgpu_vm_fini(adev, &fpriv->vm);
+- kfree(fpriv);
+- goto out_suspend;
++ goto error_vm;
+ }
+
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
+- if (r) {
+- amdgpu_vm_fini(adev, &fpriv->vm);
+- kfree(fpriv);
+- goto out_suspend;
+- }
++ if (r)
++ goto error_vm;
+ }
+
+ mutex_init(&fpriv->bo_list_lock);
+@@ -921,6 +918,16 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
+ amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
+
+ file_priv->driver_priv = fpriv;
++ goto out_suspend;
++
++error_vm:
++ amdgpu_vm_fini(adev, &fpriv->vm);
++
++error_pasid:
++ if (pasid)
++ amdgpu_pasid_free(pasid);
++
++ kfree(fpriv);
+
+ out_suspend:
+ pm_runtime_mark_last_busy(dev->dev);
+@@ -944,6 +951,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
+ struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+ struct amdgpu_bo_list *list;
+ struct amdgpu_sem *sem;
++ struct amdgpu_bo *pd;
++ unsigned int pasid;
+ int handle;
+
+ if (!fpriv)
+@@ -968,7 +977,13 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
+ amdgpu_bo_unreserve(adev->virt.csa_obj);
+ }
+
++ pasid = fpriv->vm.pasid;
++ pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
++
+ amdgpu_vm_fini(adev, &fpriv->vm);
++ if (pasid)
++ amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
++ amdgpu_bo_unref(&pd);
+
+ idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
+ amdgpu_bo_list_free(list);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4194-drm-amdgpu-Free-VGA-stolen-memory-as-soon-as-possibl.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4194-drm-amdgpu-Free-VGA-stolen-memory-as-soon-as-possibl.patch
new file mode 100644
index 00000000..9844443d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4194-drm-amdgpu-Free-VGA-stolen-memory-as-soon-as-possibl.patch
@@ -0,0 +1,200 @@
+From 529b33b4adb036be8b967661a21a34deb21b4c24 Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Fri, 6 Apr 2018 14:54:10 -0500
+Subject: [PATCH 4194/5725] drm/amdgpu: Free VGA stolen memory as soon as
+ possible.
+
+Reserved VRAM is used to avoid overriding pre OS FB.
+Once our display stack takes over we don't need the reserved
+VRAM anymore.
+
+v2:
+Remove comment, we know actually why we need to reserve the stolen VRAM.
+Fix return type for amdgpu_ttm_late_init.
+v3:
+Return 0 in amdgpu_bo_late_init, rebase on changes to previous patch
+v4: rebase
+v5:
+For GMC9 reserve always just 9M and keep the stolem memory around
+until GART table curruption on S3 resume is resolved.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 7 +++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 7 +++++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 2 ++
+ drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 2 ++
+ drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 2 ++
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 26 ++++++++++++++++++++++++++
+ 8 files changed, 46 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 74d05cc..bdffe43 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -894,6 +894,13 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
+ return amdgpu_ttm_init(adev);
+ }
+
++int amdgpu_bo_late_init(struct amdgpu_device *adev)
++{
++ amdgpu_ttm_late_init(adev);
++
++ return 0;
++}
++
+ void amdgpu_bo_fini(struct amdgpu_device *adev)
+ {
+ amdgpu_ttm_fini(adev);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+index 92873fa..291477d 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+@@ -262,6 +262,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
+ int amdgpu_bo_unpin(struct amdgpu_bo *bo);
+ int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
+ int amdgpu_bo_init(struct amdgpu_device *adev);
++int amdgpu_bo_late_init(struct amdgpu_device *adev);
+ void amdgpu_bo_fini(struct amdgpu_device *adev);
+ int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
+ struct vm_area_struct *vma);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 53b0fcc..2f8d89f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -1724,14 +1724,17 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
+ return 0;
+ }
+
++void amdgpu_ttm_late_init(struct amdgpu_device *adev)
++{
++ amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
++}
++
+ void amdgpu_ttm_fini(struct amdgpu_device *adev)
+ {
+ if (!adev->mman.initialized)
+ return;
+
+ amdgpu_ttm_debugfs_fini(adev);
+-
+- amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
+ amdgpu_ttm_fw_reserve_vram_fini(adev);
+ if (adev->mman.aper_base_kaddr)
+ iounmap(adev->mman.aper_base_kaddr);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+index 9a364e7..265c3ed 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+@@ -81,6 +81,7 @@ uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
+ uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
+
+ int amdgpu_ttm_init(struct amdgpu_device *adev);
++void amdgpu_ttm_late_init(struct amdgpu_device *adev);
+ void amdgpu_ttm_fini(struct amdgpu_device *adev);
+ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
+ bool enable);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+index ef77f79..1170699 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+@@ -818,6 +818,8 @@ static int gmc_v6_0_late_init(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
++ amdgpu_bo_late_init(adev);
++
+ if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
+ return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
+ else
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+index cee1aec..22707a4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+@@ -960,6 +960,8 @@ static int gmc_v7_0_late_init(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
++ amdgpu_bo_late_init(adev);
++
+ if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
+ return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
+ else
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index 67f9b74..2829ae8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -1055,6 +1055,8 @@ static int gmc_v8_0_late_init(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
++ amdgpu_bo_late_init(adev);
++
+ if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
+ return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
+ else
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 315045a..263d3ab 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -664,6 +664,11 @@ static int gmc_v9_0_late_init(void *handle)
+ unsigned i;
+ int r;
+
++ /*
++ * TODO - Uncomment once GART corruption issue is fixed.
++ */
++ /* amdgpu_bo_late_init(adev); */
++
+ for(i = 0; i < adev->num_rings; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+ unsigned vmhub = ring->funcs->vmhub;
+@@ -804,6 +809,13 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
+ u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
+ unsigned size;
+
++ /*
++ * TODO Remove once GART corruption is resolved
++ * Check related code in gmc_v9_0_sw_fini
++ * */
++ size = 9 * 1024 * 1024;
++
++#if 0
+ if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
+ size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
+ } else {
+@@ -831,6 +843,8 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
+ /* return 0 if the pre-OS buffer uses up most of vram */
+ if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
+ return 0;
++
++#endif
+ return size;
+ }
+
+@@ -953,6 +967,18 @@ static int gmc_v9_0_sw_fini(void *handle)
+ amdgpu_gem_force_release(adev);
+ amdgpu_vm_manager_fini(adev);
+ gmc_v9_0_gart_fini(adev);
++
++ /*
++ * TODO:
++ * Currently there is a bug where some memory client outside
++ * of the driver writes to first 8M of VRAM on S3 resume,
++ * this overrides GART which by default gets placed in first 8M and
++ * causes VM_FAULTS once GTT is accessed.
++ * Keep the stolen memory reservation until the while this is not solved.
++ * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
++ */
++ amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
++
+ amdgpu_bo_fini(adev);
+
+ return 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4195-drm-gpu-sched-fix-force-APP-kill-hang-v4.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4195-drm-gpu-sched-fix-force-APP-kill-hang-v4.patch
new file mode 100644
index 00000000..b0e7a77c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4195-drm-gpu-sched-fix-force-APP-kill-hang-v4.patch
@@ -0,0 +1,398 @@
+From 6cddd831befde582b4da8634487287642e695a36 Mon Sep 17 00:00:00 2001
+From: Emily Deng <Emily.Deng@amd.com>
+Date: Mon, 16 Apr 2018 10:07:02 +0800
+Subject: [PATCH 4195/5725] drm/gpu-sched: fix force APP kill hang(v4)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+issue:
+there are VMC page fault occurred if force APP kill during
+3dmark test, the cause is in entity_fini we manually signal
+all those jobs in entity's queue which confuse the sync/dep
+mechanism:
+
+1)page fault occurred in sdma's clear job which operate on
+shadow buffer, and shadow buffer's Gart table is cleaned by
+ttm_bo_release since the fence in its reservation was fake signaled
+by entity_fini() under the case of SIGKILL received.
+
+2)page fault occurred in gfx' job because during the lifetime
+of gfx job we manually fake signal all jobs from its entity
+in entity_fini(), thus the unmapping/clear PTE job depend on those
+result fence is satisfied and sdma start clearing the PTE and lead
+to GFX page fault.
+
+fix:
+1)should at least wait all jobs already scheduled complete in entity_fini()
+if SIGKILL is the case.
+
+2)if a fence signaled and try to clear some entity's dependency, should
+set this entity guilty to prevent its job really run since the dependency
+is fake signaled.
+
+v2:
+splitting drm_sched_entity_fini() into two functions:
+1)The first one is does the waiting, removes the entity from the
+runqueue and returns an error when the process was killed.
+2)The second one then goes over the entity, install it as
+completion signal for the remaining jobs and signals all jobs
+with an error code.
+
+v3:
+1)Replace the fini1 and fini2 with better name
+2)Call the first part before the VM teardown in
+amdgpu_driver_postclose_kms() and the second part
+after the VM teardown
+3)Keep the original function drm_sched_entity_fini to
+refine the code.
+
+v4:
+1)Rename entity->finished to entity->last_scheduled;
+2)Rename drm_sched_entity_fini_job_cb() to
+drm_sched_entity_kill_jobs_cb();
+3)Pass NULL to drm_sched_entity_fini_job_cb() if -ENOENT;
+4)Replace the type of entity->fini_status with "int";
+5)Remove the check about entity->finished.
+
+Signed-off-by: Monk Liu <Monk.Liu@amd.com>
+Signed-off-by: Emily Deng <Emily.Deng@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/scheduler/gpu_scheduler.c
+
+Change-Id: I8c859960c8faf8ab36210f098e6514b455bea171
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 64 ++++++++++++++++++++++++----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 5 ++-
+ drivers/gpu/drm/scheduler/gpu_scheduler.c | 71 ++++++++++++++++++++++++++-----
+ include/drm/gpu_scheduler.h | 7 +++
+ 5 files changed, 128 insertions(+), 21 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 526c7a0..01496d7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -702,6 +702,8 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
+ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
+
+ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
++void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr);
++void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
+ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
+
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+index 2e705f9..bdeec74 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+@@ -113,8 +113,9 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
+ return r;
+ }
+
+-static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
++static void amdgpu_ctx_fini(struct kref *ref)
+ {
++ struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
+ struct amdgpu_device *adev = ctx->adev;
+ unsigned i, j;
+
+@@ -142,13 +143,11 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
+ kfree(ctx->fences);
+ ctx->fences = NULL;
+
+- for (i = 0; i < adev->num_rings; i++)
+- drm_sched_entity_fini(&adev->rings[i]->sched,
+- &ctx->rings[i].entity);
+-
+ amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
+
+ mutex_destroy(&ctx->lock);
++
++ kfree(ctx);
+ }
+
+ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
+@@ -187,12 +186,15 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
+ static void amdgpu_ctx_do_release(struct kref *ref)
+ {
+ struct amdgpu_ctx *ctx;
++ u32 i;
+
+ ctx = container_of(ref, struct amdgpu_ctx, refcount);
+
+- amdgpu_ctx_fini(ctx);
++ for (i = 0; i < ctx->adev->num_rings; i++)
++ drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
++ &ctx->rings[i].entity);
+
+- kfree(ctx);
++ amdgpu_ctx_fini(ref);
+ }
+
+ static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
+@@ -452,16 +454,62 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
+ idr_init(&mgr->ctx_handles);
+ }
+
++void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
++{
++ struct amdgpu_ctx *ctx;
++ struct idr *idp;
++ uint32_t id, i;
++
++ idp = &mgr->ctx_handles;
++
++ idr_for_each_entry(idp, ctx, id) {
++
++ if (!ctx->adev)
++ return;
++
++ for (i = 0; i < ctx->adev->num_rings; i++)
++ if (kref_read(&ctx->refcount) == 1)
++ drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
++ &ctx->rings[i].entity);
++ else
++ DRM_ERROR("ctx %p is still alive\n", ctx);
++ }
++}
++
++void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
++{
++ struct amdgpu_ctx *ctx;
++ struct idr *idp;
++ uint32_t id, i;
++
++ idp = &mgr->ctx_handles;
++
++ idr_for_each_entry(idp, ctx, id) {
++
++ if (!ctx->adev)
++ return;
++
++ for (i = 0; i < ctx->adev->num_rings; i++)
++ if (kref_read(&ctx->refcount) == 1)
++ drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched,
++ &ctx->rings[i].entity);
++ else
++ DRM_ERROR("ctx %p is still alive\n", ctx);
++ }
++}
++
+ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
+ {
+ struct amdgpu_ctx *ctx;
+ struct idr *idp;
+ uint32_t id;
+
++ amdgpu_ctx_mgr_entity_cleanup(mgr);
++
+ idp = &mgr->ctx_handles;
+
+ idr_for_each_entry(idp, ctx, id) {
+- if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
++ if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
+ DRM_ERROR("ctx %p is still alive\n", ctx);
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index efbed62..79d1060 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -959,8 +959,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
+ return;
+
+ pm_runtime_get_sync(dev->dev);
+-
+- amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
++ amdgpu_ctx_mgr_entity_fini(&fpriv->ctx_mgr);
+
+ if (adev->asic_type != CHIP_RAVEN) {
+ amdgpu_uvd_free_handles(adev, file_priv);
+@@ -981,6 +980,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
+ pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
+
+ amdgpu_vm_fini(adev, &fpriv->vm);
++ amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
++
+ if (pasid)
+ amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
+ amdgpu_bo_unref(&pd);
+diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+index 0d95888..f45b4fd 100644
+--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
++++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+@@ -135,6 +135,8 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
+ entity->rq = rq;
+ entity->sched = sched;
+ entity->guilty = guilty;
++ entity->fini_status = 0;
++ entity->last_scheduled = NULL;
+
+ spin_lock_init(&entity->rq_lock);
+ spin_lock_init(&entity->queue_lock);
+@@ -196,19 +198,30 @@ static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
+ return true;
+ }
+
++static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
++ struct dma_fence_cb *cb)
++{
++ struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
++ finish_cb);
++ drm_sched_fence_finished(job->s_fence);
++ WARN_ON(job->s_fence->parent);
++ dma_fence_put(&job->s_fence->finished);
++ job->sched->ops->free_job(job);
++}
++
++
+ /**
+ * Destroy a context entity
+ *
+ * @sched Pointer to scheduler instance
+ * @entity The pointer to a valid scheduler entity
+ *
+- * Cleanup and free the allocated resources.
++ * Splitting drm_sched_entity_fini() into two functions, The first one is does the waiting,
++ * removes the entity from the runqueue and returns an error when the process was killed.
+ */
+-void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
++void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity)
+ {
+- int r;
+-
+ if (!drm_sched_entity_is_initialized(sched, entity))
+ return;
+ /**
+@@ -216,13 +229,28 @@ void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
+ * queued IBs or discard them on SIGKILL
+ */
+ if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
+- r = -ERESTARTSYS;
++ entity->fini_status = -ERESTARTSYS;
+ else
+- r = wait_event_killable(sched->job_scheduled,
++ entity->fini_status = wait_event_killable(sched->job_scheduled,
+ drm_sched_entity_is_idle(entity));
+ drm_sched_entity_set_rq(entity, NULL);
+- if (r) {
++}
++EXPORT_SYMBOL(drm_sched_entity_do_release);
++
++/**
++ * Destroy a context entity
++ *
++ * @sched Pointer to scheduler instance
++ * @entity The pointer to a valid scheduler entity
++ *
++ * The second one then goes over the entity and signals all jobs with an error code.
++ */
++void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
++ struct drm_sched_entity *entity)
++{
++ if (entity->fini_status) {
+ struct drm_sched_job *job;
++ int r;
+
+ /* Park the kernel for a moment to make sure it isn't processing
+ * our enity.
+@@ -240,13 +268,26 @@ void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
+ struct drm_sched_fence *s_fence = job->s_fence;
+ drm_sched_fence_scheduled(s_fence);
+ dma_fence_set_error(&s_fence->finished, -ESRCH);
+- drm_sched_fence_finished(s_fence);
+- WARN_ON(s_fence->parent);
+- dma_fence_put(&s_fence->finished);
+- sched->ops->free_job(job);
++ r = dma_fence_add_callback(entity->last_scheduled, &job->finish_cb,
++ drm_sched_entity_kill_jobs_cb);
++ if (r == -ENOENT)
++ drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
++ else if (r)
++ DRM_ERROR("fence add callback failed (%d)\n", r);
+ }
++
++ dma_fence_put(entity->last_scheduled);
++ entity->last_scheduled = NULL;
+ }
+ }
++EXPORT_SYMBOL(drm_sched_entity_cleanup);
++
++void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
++ struct drm_sched_entity *entity)
++{
++ drm_sched_entity_do_release(sched, entity);
++ drm_sched_entity_cleanup(sched, entity);
++}
+ EXPORT_SYMBOL(drm_sched_entity_fini);
+
+ static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
+@@ -529,6 +570,10 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
+ spin_unlock(&sched->job_list_lock);
+ fence = sched->ops->run_job(s_job);
+ atomic_inc(&sched->hw_rq_count);
++
++ dma_fence_put(s_job->entity->last_scheduled);
++ s_job->entity->last_scheduled = dma_fence_get(&s_fence->finished);
++
+ if (fence) {
+ s_fence->parent = dma_fence_get(fence);
+ r = dma_fence_add_callback(fence, &s_fence->cb,
+@@ -555,6 +600,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
+ void *owner)
+ {
+ job->sched = sched;
++ job->entity = entity;
+ job->s_priority = entity->rq - sched->sched_rq;
+ job->s_fence = drm_sched_fence_create(entity, owner);
+ if (!job->s_fence)
+@@ -668,6 +714,9 @@ static int drm_sched_main(void *param)
+ fence = sched->ops->run_job(sched_job);
+ drm_sched_fence_scheduled(s_fence);
+
++ dma_fence_put(entity->last_scheduled);
++ entity->last_scheduled = dma_fence_get(&s_fence->finished);
++
+ if (fence) {
+ s_fence->parent = dma_fence_get(fence);
+ r = dma_fence_add_callback(fence, &s_fence->cb,
+diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
+index dfd54fb..1df6229 100644
+--- a/include/drm/gpu_scheduler.h
++++ b/include/drm/gpu_scheduler.h
+@@ -63,6 +63,8 @@ struct drm_sched_entity {
+ struct dma_fence *dependency;
+ struct dma_fence_cb cb;
+ atomic_t *guilty; /* points to ctx's guilty */
++ int fini_status;
++ struct dma_fence *last_scheduled;
+ };
+
+ /**
+@@ -99,6 +101,7 @@ struct drm_sched_job {
+ uint64_t id;
+ atomic_t karma;
+ enum drm_sched_priority s_priority;
++ struct drm_sched_entity *entity;
+ };
+
+ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
+@@ -148,6 +151,10 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity,
+ struct drm_sched_rq *rq,
+ uint32_t jobs, atomic_t *guilty);
++void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
++ struct drm_sched_entity *entity);
++void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
++ struct drm_sched_entity *entity);
+ void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity);
+ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4196-drm-amdgpu-revert-add-new-bo-flag-that-indicates-BOs.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4196-drm-amdgpu-revert-add-new-bo-flag-that-indicates-BOs.patch
new file mode 100644
index 00000000..5dd9b798
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4196-drm-amdgpu-revert-add-new-bo-flag-that-indicates-BOs.patch
@@ -0,0 +1,65 @@
+From 2718cc2a7419aed3681a854feeb4ac8ad656ee70 Mon Sep 17 00:00:00 2001
+From: christian koenig <christian.koenig@amd.com>
+Date: Tue, 10 Apr 2018 13:42:29 +0200
+Subject: [PATCH 4196/5725] drm/amdgpu: revert "add new bo flag that indicates
+ BOs don't need fallback (v2)"
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This reverts commit 6f51d28bfe8e1a676de5cd877639245bed3cc818.
+
+Makes fallback handling to complicated. This is just a feature for the
+GEM interface and shouldn't leak into the core BO create function.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Acked-by: Chunming Zhou <david1.zhou@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 5 ++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 7 ++-----
+ 2 files changed, 4 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 6a5534e..9aa47bd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -384,9 +384,8 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
+ if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+ amdgpu_bo_in_cpu_visible_vram(bo))
+ p->bytes_moved_vis += ctx.bytes_moved;
+-
+- if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains &&
+- !(bo->flags & AMDGPU_GEM_CREATE_NO_FALLBACK)) {
++
++ if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
+ domain = bo->allowed_domains;
+ goto retry;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index bdffe43..c8d4278 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -413,8 +413,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
+ drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
+ INIT_LIST_HEAD(&bo->shadow_list);
+ INIT_LIST_HEAD(&bo->va);flags;
+- bo->preferred_domains = preferred_domains;
+- bo->allowed_domains = allowed_domains;
+
+ bo->flags = flags;
+ #ifdef CONFIG_X86_32
+@@ -451,9 +449,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
+ r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
+ &bo->placement, page_align, &ctx, acc_size,
+ NULL, resv, &amdgpu_ttm_bo_destroy);
+-
+- if (unlikely(r && r != -ERESTARTSYS) && type == ttm_bo_type_device &&
+- !(flags & AMDGPU_GEM_CREATE_NO_FALLBACK)) {
++
++ if (unlikely(r && r != -ERESTARTSYS) && type == ttm_bo_type_device) {
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+ flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ goto retry;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4197-drm-amdgpu-revert-Don-t-change-preferred-domian-when.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4197-drm-amdgpu-revert-Don-t-change-preferred-domian-when.patch
new file mode 100644
index 00000000..467a832e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4197-drm-amdgpu-revert-Don-t-change-preferred-domian-when.patch
@@ -0,0 +1,139 @@
+From 995e96a33da84a0239a8c59fe2488673afb7ad95 Mon Sep 17 00:00:00 2001
+From: christian koenig <christian.koenig@amd.com>
+Date: Tue, 10 Apr 2018 13:42:38 +0200
+Subject: [PATCH 4197/5725] drm/amdgpu: revert "Don't change preferred domian
+ when fallback GTT v6"
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This reverts commit 7d1ca1325260a9e9329b10a21e3692e6f188936f.
+
+Makes fallback handling to complicated. This is just a feature for the
+GEM interface and shouldn't leak into the core BO create function.
+
+The intended change to preserve the preferred domains is implemented in
+a follow up patch.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Acked-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+
+Change-Id: Ieb6b2bf7a421039a6ef6667566e8be9122242a04
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 16 +++++++++++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 38 +++++++++++-------------------
+ 2 files changed, 28 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+index c3e71dd..fb95cba 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+@@ -83,11 +83,23 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
+ }
+ }
+
++retry:
+ r = amdgpu_bo_create(adev, size, alignment, initial_domain,
+ flags, type, resv, &bo);
+ if (r) {
+- DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
+- size, initial_domain, alignment, r);
++ if (r != -ERESTARTSYS) {
++ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
++ flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
++ goto retry;
++ }
++
++ if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
++ initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
++ goto retry;
++ }
++ DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
++ size, initial_domain, alignment, r);
++ }
+ return r;
+ }
+ *obj = &bo->gem_base;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index c8d4278..f14b27a 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -379,7 +379,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
+ struct amdgpu_bo *bo;
+ unsigned long page_align;
+ size_t acc_size;
+- u32 domains, preferred_domains, allowed_domains;
+ int r;
+
+ page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
+@@ -393,7 +392,13 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
+ acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
+ sizeof(struct amdgpu_bo));
+
+- preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
++ bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
++ if (bo == NULL)
++ return -ENOMEM;
++ drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
++ INIT_LIST_HEAD(&bo->shadow_list);
++ INIT_LIST_HEAD(&bo->va);
++ bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT |
+ AMDGPU_GEM_DOMAIN_CPU |
+ AMDGPU_GEM_DOMAIN_GDS |
+@@ -401,18 +406,11 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
+ AMDGPU_GEM_DOMAIN_OA |
+ AMDGPU_GEM_DOMAIN_DGMA |
+ AMDGPU_GEM_DOMAIN_DGMA_IMPORT);
+- allowed_domains = preferred_domains;
++ bo->allowed_domains = bo->preferred_domains;
++
+ if (type != ttm_bo_type_kernel &&
+- allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
+- allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
+- domains = preferred_domains;
+-retry:
+- bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
+- if (bo == NULL)
+- return -ENOMEM;
+- drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
+- INIT_LIST_HEAD(&bo->shadow_list);
+- INIT_LIST_HEAD(&bo->va);flags;
++ bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
++ bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
+
+ bo->flags = flags;
+ #ifdef CONFIG_X86_32
+@@ -445,21 +443,13 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
+
+ bo->tbo.bdev = &adev->mman.bdev;
+
+- amdgpu_ttm_placement_from_domain(bo, domains);
++ amdgpu_ttm_placement_from_domain(bo, domain);
++
+ r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
+ &bo->placement, page_align, &ctx, acc_size,
+ NULL, resv, &amdgpu_ttm_bo_destroy);
+
+- if (unlikely(r && r != -ERESTARTSYS) && type == ttm_bo_type_device) {
+- if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+- flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+- goto retry;
+- } else if (domains != allowed_domains) {
+- domains = allowed_domains;
+- goto retry;
+- }
+- }
+- if (unlikely(r))
++ if (unlikely(r != 0))
+ return r;
+
+ if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4198-drm-amdgpu-re-validate-per-VM-BOs-if-required-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4198-drm-amdgpu-re-validate-per-VM-BOs-if-required-v2.patch
new file mode 100644
index 00000000..41107e4e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4198-drm-amdgpu-re-validate-per-VM-BOs-if-required-v2.patch
@@ -0,0 +1,49 @@
+From e95027d2e5580dcff825de9ddb9d5697f051a4c4 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 19 Mar 2018 11:49:14 +0100
+Subject: [PATCH 4198/5725] drm/amdgpu: re-validate per VM BOs if required v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+If a per VM BO ends up in a allowed domain it never moves back into the
+prefered domain.
+
+v2: move the extra handling into amdgpu_vm_bo_update when we exit the
+ state machine. Make memory type handling generic.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 1a7d9aba..93f929a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1585,7 +1585,20 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+ }
+
+ spin_lock(&vm->status_lock);
+- list_del_init(&bo_va->base.vm_status);
++ if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
++ unsigned mem_type = bo->tbo.mem.mem_type;
++
++ /* If the BO is not in its preferred location add it back to
++ * the evicted list so that it gets validated again on the
++ * next command submission.
++ */
++ if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
++ list_add_tail(&bo_va->base.vm_status, &vm->evicted);
++ else
++ list_del_init(&bo_va->base.vm_status);
++ } else {
++ list_del_init(&bo_va->base.vm_status);
++ }
+ spin_unlock(&vm->status_lock);
+
+ list_splice_init(&bo_va->invalids, &bo_va->valids);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4199-drm-amdgpu-Code-Indentation-change-in-the-function.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4199-drm-amdgpu-Code-Indentation-change-in-the-function.patch
new file mode 100644
index 00000000..205b8151
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4199-drm-amdgpu-Code-Indentation-change-in-the-function.patch
@@ -0,0 +1,34 @@
+From 512e3dd42ad819e22c6f372baba293ce2a586c64 Mon Sep 17 00:00:00 2001
+From: Kalyan Alle <kalyan.alle@amd.com>
+Date: Mon, 16 Apr 2018 12:17:57 +0530
+Subject: [PATCH 4199/5725] drm/amdgpu: Code Indentation change in the function
+
+amdgpu_device_ip_late_init
+ MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Code indentation modified, space replaced by tab to clear
+warnings while doing git am
+
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 1bf1219..6b221cc5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1819,7 +1819,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
+ }
+
+ queue_delayed_work(system_wq, &adev->late_init_work,
+- msecs_to_jiffies(AMDGPU_RESUME_MS));
++ msecs_to_jiffies(AMDGPU_RESUME_MS));
+
+ amdgpu_device_fill_reset_magic(adev);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4200-drm-amd-display-dal-3.1.42.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4200-drm-amd-display-dal-3.1.42.patch
new file mode 100644
index 00000000..c4bd7045
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4200-drm-amd-display-dal-3.1.42.patch
@@ -0,0 +1,28 @@
+From 76e9453dcb4923806f7c3f4dbbd6889bca9eaea4 Mon Sep 17 00:00:00 2001
+From: Eric Yang <Eric.Yang2@amd.com>
+Date: Tue, 3 Apr 2018 11:36:14 -0400
+Subject: [PATCH 4200/5725] drm/amd/display: dal 3.1.42
+
+Signed-off-by: Eric Yang <Eric.Yang2@amd.com>
+Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index e1463aa..bcddb71 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -38,7 +38,7 @@
+ #include "inc/compressor.h"
+ #include "dml/display_mode_lib.h"
+
+-#define DC_VER "3.1.41"
++#define DC_VER "3.1.42"
+
+ #define MAX_SURFACES 3
+ #define MAX_STREAMS 6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4201-drm-amd-display-fix-brightness-level-after-resume-fr.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4201-drm-amd-display-fix-brightness-level-after-resume-fr.patch
new file mode 100644
index 00000000..3936b07e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4201-drm-amd-display-fix-brightness-level-after-resume-fr.patch
@@ -0,0 +1,74 @@
+From e00879fc1239810ea04c474b80d0b7702e4ddf80 Mon Sep 17 00:00:00 2001
+From: Roman Li <Roman.Li@amd.com>
+Date: Thu, 29 Mar 2018 10:56:17 -0400
+Subject: [PATCH 4201/5725] drm/amd/display: fix brightness level after resume
+ from suspend
+
+Adding missing call to cache current backlight values.
+Otherwise the brightness resets to default value on resume.
+
+Signed-off-by: Roman Li <Roman.Li@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 13 +++++++++++++
+ drivers/gpu/drm/amd/display/dc/dc_link.h | 2 ++
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 4 +++-
+ 3 files changed, 18 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 0cd286f..b44cf52 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -2018,6 +2018,19 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
+ return true;
+ }
+
++bool dc_link_set_abm_disable(const struct dc_link *link)
++{
++ struct dc *core_dc = link->ctx->dc;
++ struct abm *abm = core_dc->res_pool->abm;
++
++ if ((abm == NULL) || (abm->funcs->set_backlight_level == NULL))
++ return false;
++
++ abm->funcs->set_abm_immediate_disable(abm);
++
++ return true;
++}
++
+ bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait)
+ {
+ struct dc *core_dc = link->ctx->dc;
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
+index eeff987..8a716baa 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
+@@ -141,6 +141,8 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
+ bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
+ uint32_t frame_ramp, const struct dc_stream_state *stream);
+
++bool dc_link_set_abm_disable(const struct dc_link *dc_link);
++
+ bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait);
+
+ bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 1e3ed0e..5bed5a3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -1046,8 +1046,10 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+
+- if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP)
++ if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
+ link->dc->hwss.edp_backlight_control(link, false);
++ dc_link_set_abm_disable(link);
++ }
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4202-drm-amd-display-Move-dp_pixel_encoding_type-to-strea.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4202-drm-amd-display-Move-dp_pixel_encoding_type-to-strea.patch
new file mode 100644
index 00000000..b8083a02
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4202-drm-amd-display-Move-dp_pixel_encoding_type-to-strea.patch
@@ -0,0 +1,87 @@
+From 4c0fc5f66b68f053eb80b5af79c5997a29b000de Mon Sep 17 00:00:00 2001
+From: Eric Bernstein <eric.bernstein@amd.com>
+Date: Tue, 3 Apr 2018 11:23:11 -0400
+Subject: [PATCH 4202/5725] drm/amd/display: Move dp_pixel_encoding_type to
+ stream_encoder include
+
+Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
+Reviewed-by: Nikola Cornij <Nikola.Cornij@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h | 17 -----------------
+ .../gpu/drm/amd/display/dc/inc/hw/stream_encoder.h | 19 +++++++++++++++++++
+ 2 files changed, 19 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+index 9fe7302..cf7433e 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+@@ -186,23 +186,6 @@ enum controller_dp_test_pattern {
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA
+ };
+
+-enum dp_pixel_encoding_type {
+- DP_PIXEL_ENCODING_TYPE_RGB444 = 0x00000000,
+- DP_PIXEL_ENCODING_TYPE_YCBCR422 = 0x00000001,
+- DP_PIXEL_ENCODING_TYPE_YCBCR444 = 0x00000002,
+- DP_PIXEL_ENCODING_TYPE_RGB_WIDE_GAMUT = 0x00000003,
+- DP_PIXEL_ENCODING_TYPE_Y_ONLY = 0x00000004,
+- DP_PIXEL_ENCODING_TYPE_YCBCR420 = 0x00000005
+-};
+-
+-enum dp_component_depth {
+- DP_COMPONENT_PIXEL_DEPTH_6BPC = 0x00000000,
+- DP_COMPONENT_PIXEL_DEPTH_8BPC = 0x00000001,
+- DP_COMPONENT_PIXEL_DEPTH_10BPC = 0x00000002,
+- DP_COMPONENT_PIXEL_DEPTH_12BPC = 0x00000003,
+- DP_COMPONENT_PIXEL_DEPTH_16BPC = 0x00000004
+-};
+-
+ enum dc_lut_mode {
+ LUT_BYPASS,
+ LUT_RAM_A,
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+index 5c21336..cfa7ec9 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+@@ -29,11 +29,29 @@
+ #define STREAM_ENCODER_H_
+
+ #include "audio_types.h"
++#include "hw_shared.h"
+
+ struct dc_bios;
+ struct dc_context;
+ struct dc_crtc_timing;
+
++enum dp_pixel_encoding_type {
++ DP_PIXEL_ENCODING_TYPE_RGB444 = 0x00000000,
++ DP_PIXEL_ENCODING_TYPE_YCBCR422 = 0x00000001,
++ DP_PIXEL_ENCODING_TYPE_YCBCR444 = 0x00000002,
++ DP_PIXEL_ENCODING_TYPE_RGB_WIDE_GAMUT = 0x00000003,
++ DP_PIXEL_ENCODING_TYPE_Y_ONLY = 0x00000004,
++ DP_PIXEL_ENCODING_TYPE_YCBCR420 = 0x00000005
++};
++
++enum dp_component_depth {
++ DP_COMPONENT_PIXEL_DEPTH_6BPC = 0x00000000,
++ DP_COMPONENT_PIXEL_DEPTH_8BPC = 0x00000001,
++ DP_COMPONENT_PIXEL_DEPTH_10BPC = 0x00000002,
++ DP_COMPONENT_PIXEL_DEPTH_12BPC = 0x00000003,
++ DP_COMPONENT_PIXEL_DEPTH_16BPC = 0x00000004
++};
++
+ struct encoder_info_frame {
+ /* auxiliary video information */
+ struct dc_info_packet avi;
+@@ -138,6 +156,7 @@ struct stream_encoder_funcs {
+
+ void (*set_avmute)(
+ struct stream_encoder *enc, bool enable);
++
+ };
+
+ #endif /* STREAM_ENCODER_H_ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4203-drm-amd-display-Fix-regamma-not-affecting-full-inten.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4203-drm-amd-display-Fix-regamma-not-affecting-full-inten.patch
new file mode 100644
index 00000000..566a23ce
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4203-drm-amd-display-Fix-regamma-not-affecting-full-inten.patch
@@ -0,0 +1,66 @@
+From f6c8e652bc51d783469d2c7a59e014b9af0b2396 Mon Sep 17 00:00:00 2001
+From: "Leo (Sunpeng) Li" <sunpeng.li@amd.com>
+Date: Tue, 3 Apr 2018 16:07:16 -0400
+Subject: [PATCH 4203/5725] drm/amd/display: Fix regamma not affecting
+ full-intensity color values
+
+Hardware understands the regamma LUT as a piecewise linear function,
+with points spaced exponentially along the range. We previously
+programmed the LUT for range [2^-10, 2^0). This causes (normalized)
+color values of 1 (=2^0) to miss the programmed LUT, and fall onto the
+end region.
+
+For DCE, the end region is extrapolated using a single (base, slope)
+pair, using the max y-value from the last point in the curve as base.
+This presents a problem, since this value affects all three color
+channels. Scaling down the intensity of say - the blue regamma curve -
+will not affect it's end region. This is especially noticiable when
+using RedShift. It scales down the blue and green channels, but leaves
+full-intensity colors unshifted.
+
+Therefore, extend the range to cover [2^-10, 2^1) by programming another
+hardware segment, containing only one point. That way, we won't be
+hitting the end region.
+
+Note that things are a bit different for DCN, since the end region can
+be set per-channel.
+
+Signed-off-by: Leo (Sunpeng) Li <sunpeng.li@amd.com>
+Reviewed-by: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 5bed5a3..b95dd9f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -456,10 +456,13 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
+
+ } else {
+ /* 10 segments
+- * segment is from 2^-10 to 2^0
++ * segment is from 2^-10 to 2^1
++ * We include an extra segment for range [2^0, 2^1). This is to
++ * ensure that colors with normalized values of 1 don't miss the
++ * LUT.
+ */
+ region_start = -10;
+- region_end = 0;
++ region_end = 1;
+
+ seg_distr[0] = 4;
+ seg_distr[1] = 4;
+@@ -471,7 +474,7 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
+ seg_distr[7] = 4;
+ seg_distr[8] = 4;
+ seg_distr[9] = 4;
+- seg_distr[10] = -1;
++ seg_distr[10] = 0;
+ seg_distr[11] = -1;
+ seg_distr[12] = -1;
+ seg_distr[13] = -1;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4204-drm-amd-display-add-method-to-check-for-supported-ra.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4204-drm-amd-display-add-method-to-check-for-supported-ra.patch
new file mode 100644
index 00000000..856d4bd9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4204-drm-amd-display-add-method-to-check-for-supported-ra.patch
@@ -0,0 +1,119 @@
+From 88c7014a725334981d5f8c8f05866853601f7357 Mon Sep 17 00:00:00 2001
+From: Anthony Koo <Anthony.Koo@amd.com>
+Date: Wed, 4 Apr 2018 20:59:43 -0400
+Subject: [PATCH 4204/5725] drm/amd/display: add method to check for supported
+ range
+
+Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ .../drm/amd/display/modules/freesync/freesync.c | 64 ++++++++++++++++++++--
+ .../gpu/drm/amd/display/modules/inc/mod_freesync.h | 7 +++
+ 2 files changed, 65 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+index 5e12e46..4af73a7 100644
+--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
++++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+@@ -168,6 +168,21 @@ static unsigned int calc_v_total_from_duration(
+ return v_total;
+ }
+
++static unsigned long long calc_nominal_field_rate(const struct dc_stream_state *stream)
++{
++ unsigned long long nominal_field_rate_in_uhz = 0;
++
++ /* Calculate nominal field rate for stream */
++ nominal_field_rate_in_uhz = stream->timing.pix_clk_khz;
++ nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL;
++ nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz,
++ stream->timing.h_total);
++ nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz,
++ stream->timing.v_total);
++
++ return nominal_field_rate_in_uhz;
++}
++
+ static void update_v_total_for_static_ramp(
+ struct core_freesync *core_freesync,
+ const struct dc_stream_state *stream,
+@@ -623,12 +638,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+
+ /* Calculate nominal field rate for stream */
+- nominal_field_rate_in_uhz = stream->timing.pix_clk_khz;
+- nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL;
+- nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz,
+- stream->timing.h_total);
+- nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz,
+- stream->timing.v_total);
++ nominal_field_rate_in_uhz = calc_nominal_field_rate(stream);
+
+ min_refresh_in_uhz = in_config->min_refresh_in_uhz;
+ max_refresh_in_uhz = in_config->max_refresh_in_uhz;
+@@ -878,3 +888,45 @@ void mod_freesync_get_settings(struct mod_freesync *mod_freesync,
+ }
+ }
+
++bool mod_freesync_is_valid_range(struct mod_freesync *mod_freesync,
++ const struct dc_stream_state *stream,
++ uint32_t min_refresh_cap_in_uhz,
++ uint32_t max_refresh_cap_in_uhz,
++ uint32_t min_refresh_request_in_uhz,
++ uint32_t max_refresh_request_in_uhz)
++{
++ /* Calculate nominal field rate for stream */
++ unsigned long long nominal_field_rate_in_uhz =
++ calc_nominal_field_rate(stream);
++
++ // Check nominal is within range
++ if (nominal_field_rate_in_uhz > max_refresh_cap_in_uhz ||
++ nominal_field_rate_in_uhz < min_refresh_cap_in_uhz)
++ return false;
++
++ // If nominal is less than max, limit the max allowed refresh rate
++ if (nominal_field_rate_in_uhz < max_refresh_cap_in_uhz)
++ max_refresh_cap_in_uhz = nominal_field_rate_in_uhz;
++
++ // Don't allow min > max
++ if (min_refresh_request_in_uhz > max_refresh_request_in_uhz)
++ return false;
++
++ // Check min is within range
++ if (min_refresh_request_in_uhz > max_refresh_cap_in_uhz ||
++ min_refresh_request_in_uhz < min_refresh_cap_in_uhz)
++ return false;
++
++ // Check max is within range
++ if (max_refresh_request_in_uhz > max_refresh_cap_in_uhz ||
++ max_refresh_request_in_uhz < min_refresh_cap_in_uhz)
++ return false;
++
++ // For variable range, check for at least 10 Hz range
++ if ((max_refresh_request_in_uhz != min_refresh_request_in_uhz) &&
++ (max_refresh_request_in_uhz - min_refresh_request_in_uhz < 10000000))
++ return false;
++
++ return true;
++}
++
+diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+index bd75ca5..e7d77bb 100644
+--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
++++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+@@ -159,4 +159,11 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
+ const struct dc_stream_state *stream,
+ struct mod_vrr_params *in_out_vrr);
+
++bool mod_freesync_is_valid_range(struct mod_freesync *mod_freesync,
++ const struct dc_stream_state *stream,
++ uint32_t min_refresh_cap_in_uhz,
++ uint32_t max_refresh_cap_in_uhz,
++ uint32_t min_refresh_request_in_uhz,
++ uint32_t max_refresh_request_in_uhz);
++
+ #endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4205-drm-amd-display-Fix-bug-where-refresh-rate-becomes-f.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4205-drm-amd-display-Fix-bug-where-refresh-rate-becomes-f.patch
new file mode 100644
index 00000000..fd56981a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4205-drm-amd-display-Fix-bug-where-refresh-rate-becomes-f.patch
@@ -0,0 +1,117 @@
+From 6709907d1154021d6f4b4eb3bd5a5c424f2321ce Mon Sep 17 00:00:00 2001
+From: Anthony Koo <Anthony.Koo@amd.com>
+Date: Wed, 4 Apr 2018 21:01:21 -0400
+Subject: [PATCH 4205/5725] drm/amd/display: Fix bug where refresh rate becomes
+ fixed
+
+This issue occurs if refresh rate range is very small and lfc is not used.
+When frame spikes occur, refresh rate becomes fixed and will not restore properly
+
+Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ .../drm/amd/display/modules/freesync/freesync.c | 43 ++++++++++++----------
+ .../gpu/drm/amd/display/modules/inc/mod_freesync.h | 3 ++
+ 2 files changed, 26 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+index 4af73a7..be6a6c6 100644
+--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
++++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+@@ -168,21 +168,6 @@ static unsigned int calc_v_total_from_duration(
+ return v_total;
+ }
+
+-static unsigned long long calc_nominal_field_rate(const struct dc_stream_state *stream)
+-{
+- unsigned long long nominal_field_rate_in_uhz = 0;
+-
+- /* Calculate nominal field rate for stream */
+- nominal_field_rate_in_uhz = stream->timing.pix_clk_khz;
+- nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL;
+- nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz,
+- stream->timing.h_total);
+- nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz,
+- stream->timing.v_total);
+-
+- return nominal_field_rate_in_uhz;
+-}
+-
+ static void update_v_total_for_static_ramp(
+ struct core_freesync *core_freesync,
+ const struct dc_stream_state *stream,
+@@ -441,10 +426,11 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync,
+ in_out_vrr->adjust.v_total_min;
+ } else {
+ in_out_vrr->adjust.v_total_min =
+- calc_v_total_from_refresh(
+- stream, in_out_vrr->max_refresh_in_uhz);
++ calc_v_total_from_refresh(stream,
++ in_out_vrr->max_refresh_in_uhz);
+ in_out_vrr->adjust.v_total_max =
+- in_out_vrr->adjust.v_total_min;
++ calc_v_total_from_refresh(stream,
++ in_out_vrr->min_refresh_in_uhz);
+ }
+ }
+ }
+@@ -638,7 +624,8 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+
+ /* Calculate nominal field rate for stream */
+- nominal_field_rate_in_uhz = calc_nominal_field_rate(stream);
++ nominal_field_rate_in_uhz =
++ mod_freesync_calc_nominal_field_rate(stream);
+
+ min_refresh_in_uhz = in_config->min_refresh_in_uhz;
+ max_refresh_in_uhz = in_config->max_refresh_in_uhz;
+@@ -888,6 +875,22 @@ void mod_freesync_get_settings(struct mod_freesync *mod_freesync,
+ }
+ }
+
++unsigned long long mod_freesync_calc_nominal_field_rate(
++ const struct dc_stream_state *stream)
++{
++ unsigned long long nominal_field_rate_in_uhz = 0;
++
++ /* Calculate nominal field rate for stream */
++ nominal_field_rate_in_uhz = stream->timing.pix_clk_khz;
++ nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL;
++ nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz,
++ stream->timing.h_total);
++ nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz,
++ stream->timing.v_total);
++
++ return nominal_field_rate_in_uhz;
++}
++
+ bool mod_freesync_is_valid_range(struct mod_freesync *mod_freesync,
+ const struct dc_stream_state *stream,
+ uint32_t min_refresh_cap_in_uhz,
+@@ -897,7 +900,7 @@ bool mod_freesync_is_valid_range(struct mod_freesync *mod_freesync,
+ {
+ /* Calculate nominal field rate for stream */
+ unsigned long long nominal_field_rate_in_uhz =
+- calc_nominal_field_rate(stream);
++ mod_freesync_calc_nominal_field_rate(stream);
+
+ // Check nominal is within range
+ if (nominal_field_rate_in_uhz > max_refresh_cap_in_uhz ||
+diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+index e7d77bb..85c98af 100644
+--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
++++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+@@ -159,6 +159,9 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
+ const struct dc_stream_state *stream,
+ struct mod_vrr_params *in_out_vrr);
+
++unsigned long long mod_freesync_calc_nominal_field_rate(
++ const struct dc_stream_state *stream);
++
+ bool mod_freesync_is_valid_range(struct mod_freesync *mod_freesync,
+ const struct dc_stream_state *stream,
+ uint32_t min_refresh_cap_in_uhz,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4206-drm-amd-display-fix-segfault-on-insufficient-TG-duri.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4206-drm-amd-display-fix-segfault-on-insufficient-TG-duri.patch
new file mode 100644
index 00000000..671dda26
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4206-drm-amd-display-fix-segfault-on-insufficient-TG-duri.patch
@@ -0,0 +1,29 @@
+From a0f68e9ed71f14e26b1811ebd9a360e7b282895d Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Thu, 29 Mar 2018 16:39:10 -0400
+Subject: [PATCH 4206/5725] drm/amd/display: fix segfault on insufficient TG
+ during validation
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 81c6de7..7f79258 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -1699,7 +1699,7 @@ enum dc_status resource_map_pool_resources(
+ pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
+ #endif
+
+- if (pipe_idx < 0)
++ if (pipe_idx < 0 || context->res_ctx.pipe_ctx[pipe_idx].stream_res.tg == NULL)
+ return DC_NO_CONTROLLER_RESOURCE;
+
+ pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4207-drm-amd-display-Fix-bug-that-causes-black-screen.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4207-drm-amd-display-Fix-bug-that-causes-black-screen.patch
new file mode 100644
index 00000000..3f646100
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4207-drm-amd-display-Fix-bug-that-causes-black-screen.patch
@@ -0,0 +1,70 @@
+From f2cb2d95721f2c9a9f59fd999eb06a2bac93c41e Mon Sep 17 00:00:00 2001
+From: Anthony Koo <Anthony.Koo@amd.com>
+Date: Wed, 4 Apr 2018 21:04:42 -0400
+Subject: [PATCH 4207/5725] drm/amd/display: Fix bug that causes black screen
+
+Ignore MSA bit on DP display is usually set during SetTimings, but
+there was a case where the module thought refresh rate was not valid
+and ignore MSA bit was not set.
+
+Later, a valid refresh rate range was requested but since ignore MSA bit
+not set, it caused black screen.
+
+Issue if with how the module checked for VRR support. Fix up that logic.
+DM should call new valid_range function to determine if timing is supported.
+
+Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ .../gpu/drm/amd/display/modules/freesync/freesync.c | 18 ++++++------------
+ 1 file changed, 6 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+index be6a6c6..4887c88 100644
+--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
++++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+@@ -613,7 +613,6 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
+ {
+ struct core_freesync *core_freesync = NULL;
+ unsigned long long nominal_field_rate_in_uhz = 0;
+- bool nominal_field_rate_in_range = true;
+ unsigned int refresh_range = 0;
+ unsigned int min_refresh_in_uhz = 0;
+ unsigned int max_refresh_in_uhz = 0;
+@@ -638,15 +637,6 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
+ if (max_refresh_in_uhz > nominal_field_rate_in_uhz)
+ max_refresh_in_uhz = nominal_field_rate_in_uhz;
+
+- /* Allow for some rounding error of actual video timing by taking ceil.
+- * For example, 144 Hz mode timing may actually be 143.xxx Hz when
+- * calculated from pixel rate and vertical/horizontal totals, but
+- * this should be allowed instead of blocking FreeSync.
+- */
+- if ((min_refresh_in_uhz / 1000000) >
+- ((nominal_field_rate_in_uhz + 1000000 - 1) / 1000000))
+- nominal_field_rate_in_range = false;
+-
+ // Full range may be larger than current video timing, so cap at nominal
+ if (min_refresh_in_uhz > nominal_field_rate_in_uhz)
+ min_refresh_in_uhz = nominal_field_rate_in_uhz;
+@@ -658,10 +648,14 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
+
+ in_out_vrr->state = in_config->state;
+
+- if ((in_config->state == VRR_STATE_UNSUPPORTED) ||
+- (!nominal_field_rate_in_range)) {
++ if (in_config->state == VRR_STATE_UNSUPPORTED) {
+ in_out_vrr->state = VRR_STATE_UNSUPPORTED;
+ in_out_vrr->supported = false;
++ in_out_vrr->adjust.v_total_min = stream->timing.v_total;
++ in_out_vrr->adjust.v_total_max = stream->timing.v_total;
++
++ return;
++
+ } else {
+ in_out_vrr->min_refresh_in_uhz = min_refresh_in_uhz;
+ in_out_vrr->max_duration_in_us =
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4208-drm-amd-display-change-dml-init-to-use-default-struc.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4208-drm-amd-display-change-dml-init-to-use-default-struc.patch
new file mode 100644
index 00000000..0c1e6272
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4208-drm-amd-display-change-dml-init-to-use-default-struc.patch
@@ -0,0 +1,172 @@
+From 121d20cd5e105a6f99ad03b8f902eef800688697 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Fri, 23 Mar 2018 15:25:43 -0400
+Subject: [PATCH 4208/5725] drm/amd/display: change dml init to use default
+ structs
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Eric Bernstein <Eric.Bernstein@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ .../gpu/drm/amd/display/dc/dml/display_mode_lib.c | 138 ++++++++++++---------
+ 1 file changed, 76 insertions(+), 62 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+index c109b2c..fd9d97a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+@@ -26,75 +26,89 @@
+ #include "display_mode_lib.h"
+ #include "dc_features.h"
+
++static const struct _vcs_dpi_ip_params_st dcn1_0_ip = {
++ .rob_buffer_size_kbytes = 64,
++ .det_buffer_size_kbytes = 164,
++ .dpte_buffer_size_in_pte_reqs = 42,
++ .dpp_output_buffer_pixels = 2560,
++ .opp_output_buffer_lines = 1,
++ .pixel_chunk_size_kbytes = 8,
++ .pte_enable = 1,
++ .pte_chunk_size_kbytes = 2,
++ .meta_chunk_size_kbytes = 2,
++ .writeback_chunk_size_kbytes = 2,
++ .line_buffer_size_bits = 589824,
++ .max_line_buffer_lines = 12,
++ .IsLineBufferBppFixed = 0,
++ .LineBufferFixedBpp = -1,
++ .writeback_luma_buffer_size_kbytes = 12,
++ .writeback_chroma_buffer_size_kbytes = 8,
++ .max_num_dpp = 4,
++ .max_num_wb = 2,
++ .max_dchub_pscl_bw_pix_per_clk = 4,
++ .max_pscl_lb_bw_pix_per_clk = 2,
++ .max_lb_vscl_bw_pix_per_clk = 4,
++ .max_vscl_hscl_bw_pix_per_clk = 4,
++ .max_hscl_ratio = 4,
++ .max_vscl_ratio = 4,
++ .hscl_mults = 4,
++ .vscl_mults = 4,
++ .max_hscl_taps = 8,
++ .max_vscl_taps = 8,
++ .dispclk_ramp_margin_percent = 1,
++ .underscan_factor = 1.10,
++ .min_vblank_lines = 14,
++ .dppclk_delay_subtotal = 90,
++ .dispclk_delay_subtotal = 42,
++ .dcfclk_cstate_latency = 10,
++ .max_inter_dcn_tile_repeaters = 8,
++ .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0,
++ .bug_forcing_LC_req_same_size_fixed = 0,
++};
++
++static const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = {
++ .sr_exit_time_us = 9.0,
++ .sr_enter_plus_exit_time_us = 11.0,
++ .urgent_latency_us = 4.0,
++ .writeback_latency_us = 12.0,
++ .ideal_dram_bw_after_urgent_percent = 80.0,
++ .max_request_size_bytes = 256,
++ .downspread_percent = 0.5,
++ .dram_page_open_time_ns = 50.0,
++ .dram_rw_turnaround_time_ns = 17.5,
++ .dram_return_buffer_per_channel_bytes = 8192,
++ .round_trip_ping_latency_dcfclk_cycles = 128,
++ .urgent_out_of_order_return_per_channel_bytes = 256,
++ .channel_interleave_bytes = 256,
++ .num_banks = 8,
++ .num_chans = 2,
++ .vmm_page_size_bytes = 4096,
++ .dram_clock_change_latency_us = 17.0,
++ .writeback_dram_clock_change_latency_us = 23.0,
++ .return_bus_width_bytes = 64,
++};
++
+ static void set_soc_bounding_box(struct _vcs_dpi_soc_bounding_box_st *soc, enum dml_project project)
+ {
+- if (project == DML_PROJECT_RAVEN1) {
+- soc->sr_exit_time_us = 9.0;
+- soc->sr_enter_plus_exit_time_us = 11.0;
+- soc->urgent_latency_us = 4.0;
+- soc->writeback_latency_us = 12.0;
+- soc->ideal_dram_bw_after_urgent_percent = 80.0;
+- soc->max_request_size_bytes = 256;
+- soc->downspread_percent = 0.5;
+- soc->dram_page_open_time_ns = 50.0;
+- soc->dram_rw_turnaround_time_ns = 17.5;
+- soc->dram_return_buffer_per_channel_bytes = 8192;
+- soc->round_trip_ping_latency_dcfclk_cycles = 128;
+- soc->urgent_out_of_order_return_per_channel_bytes = 256;
+- soc->channel_interleave_bytes = 256;
+- soc->num_banks = 8;
+- soc->num_chans = 2;
+- soc->vmm_page_size_bytes = 4096;
+- soc->dram_clock_change_latency_us = 17.0;
+- soc->writeback_dram_clock_change_latency_us = 23.0;
+- soc->return_bus_width_bytes = 64;
+- } else {
+- BREAK_TO_DEBUGGER(); /* Invalid Project Specified */
++ switch (project) {
++ case DML_PROJECT_RAVEN1:
++ *soc = dcn1_0_soc;
++ break;
++ default:
++ ASSERT(0);
++ break;
+ }
+ }
+
+ static void set_ip_params(struct _vcs_dpi_ip_params_st *ip, enum dml_project project)
+ {
+- if (project == DML_PROJECT_RAVEN1) {
+- ip->rob_buffer_size_kbytes = 64;
+- ip->det_buffer_size_kbytes = 164;
+- ip->dpte_buffer_size_in_pte_reqs = 42;
+- ip->dpp_output_buffer_pixels = 2560;
+- ip->opp_output_buffer_lines = 1;
+- ip->pixel_chunk_size_kbytes = 8;
+- ip->pte_enable = 1;
+- ip->pte_chunk_size_kbytes = 2;
+- ip->meta_chunk_size_kbytes = 2;
+- ip->writeback_chunk_size_kbytes = 2;
+- ip->line_buffer_size_bits = 589824;
+- ip->max_line_buffer_lines = 12;
+- ip->IsLineBufferBppFixed = 0;
+- ip->LineBufferFixedBpp = -1;
+- ip->writeback_luma_buffer_size_kbytes = 12;
+- ip->writeback_chroma_buffer_size_kbytes = 8;
+- ip->max_num_dpp = 4;
+- ip->max_num_wb = 2;
+- ip->max_dchub_pscl_bw_pix_per_clk = 4;
+- ip->max_pscl_lb_bw_pix_per_clk = 2;
+- ip->max_lb_vscl_bw_pix_per_clk = 4;
+- ip->max_vscl_hscl_bw_pix_per_clk = 4;
+- ip->max_hscl_ratio = 4;
+- ip->max_vscl_ratio = 4;
+- ip->hscl_mults = 4;
+- ip->vscl_mults = 4;
+- ip->max_hscl_taps = 8;
+- ip->max_vscl_taps = 8;
+- ip->dispclk_ramp_margin_percent = 1;
+- ip->underscan_factor = 1.10;
+- ip->min_vblank_lines = 14;
+- ip->dppclk_delay_subtotal = 90;
+- ip->dispclk_delay_subtotal = 42;
+- ip->dcfclk_cstate_latency = 10;
+- ip->max_inter_dcn_tile_repeaters = 8;
+- ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0;
+- ip->bug_forcing_LC_req_same_size_fixed = 0;
+- } else {
+- BREAK_TO_DEBUGGER(); /* Invalid Project Specified */
++ switch (project) {
++ case DML_PROJECT_RAVEN1:
++ *ip = dcn1_0_ip;
++ break;
++ default:
++ ASSERT(0);
++ break;
+ }
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4209-drm-amd-display-Add-back-code-to-allow-for-rounding-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4209-drm-amd-display-Add-back-code-to-allow-for-rounding-.patch
new file mode 100644
index 00000000..7b4f6345
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4209-drm-amd-display-Add-back-code-to-allow-for-rounding-.patch
@@ -0,0 +1,47 @@
+From 1c0ca771836d1cd9e113f1b7787c61de08ec1efa Mon Sep 17 00:00:00 2001
+From: Anthony Koo <Anthony.Koo@amd.com>
+Date: Thu, 5 Apr 2018 15:20:15 -0400
+Subject: [PATCH 4209/5725] drm/amd/display: Add back code to allow for
+ rounding error
+
+Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/modules/freesync/freesync.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+index 4887c88..abd5c93 100644
+--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
++++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+@@ -896,6 +896,17 @@ bool mod_freesync_is_valid_range(struct mod_freesync *mod_freesync,
+ unsigned long long nominal_field_rate_in_uhz =
+ mod_freesync_calc_nominal_field_rate(stream);
+
++ /* Allow for some rounding error of actual video timing by taking ceil.
++ * For example, 144 Hz mode timing may actually be 143.xxx Hz when
++ * calculated from pixel rate and vertical/horizontal totals, but
++ * this should be allowed instead of blocking FreeSync.
++ */
++ nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, 1000000);
++ min_refresh_cap_in_uhz /= 1000000;
++ max_refresh_cap_in_uhz /= 1000000;
++ min_refresh_request_in_uhz /= 1000000;
++ max_refresh_request_in_uhz /= 1000000;
++
+ // Check nominal is within range
+ if (nominal_field_rate_in_uhz > max_refresh_cap_in_uhz ||
+ nominal_field_rate_in_uhz < min_refresh_cap_in_uhz)
+@@ -921,7 +932,7 @@ bool mod_freesync_is_valid_range(struct mod_freesync *mod_freesync,
+
+ // For variable range, check for at least 10 Hz range
+ if ((max_refresh_request_in_uhz != min_refresh_request_in_uhz) &&
+- (max_refresh_request_in_uhz - min_refresh_request_in_uhz < 10000000))
++ (max_refresh_request_in_uhz - min_refresh_request_in_uhz < 10))
+ return false;
+
+ return true;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4210-drm-amd-display-Check-lid-state-to-determine-fast-bo.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4210-drm-amd-display-Check-lid-state-to-determine-fast-bo.patch
new file mode 100644
index 00000000..f473c74b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4210-drm-amd-display-Check-lid-state-to-determine-fast-bo.patch
@@ -0,0 +1,82 @@
+From 9da4c2c7659cc5acbb68f55110fe41d87ef6ac8d Mon Sep 17 00:00:00 2001
+From: Yongqiang Sun <yongqiang.sun@amd.com>
+Date: Wed, 4 Apr 2018 17:27:18 -0400
+Subject: [PATCH 4210/5725] drm/amd/display: Check lid state to determine fast
+ boot optimization.
+
+For legacy enable boot up with lid closed, eDP information couldn't be
+read correctly via SBIOS_SCRATCH_3 results in eDP cannot be light up
+properly when open lid.
+Check lid state instead can resolve the issue.
+
+Signed-off-by: Yongqiang Sun <yongqiang.sun@amd.com>
+Reviewed-by: Eric Yang <eric.yang2@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc_stream.h | 1 +
+ .../amd/display/dc/dce110/dce110_hw_sequencer.c | 24 ++++++++++++++--------
+ 2 files changed, 17 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+index aefc76b..4750768 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+@@ -98,6 +98,7 @@ struct dc_stream_state {
+ int phy_pix_clk;
+ enum signal_type signal;
+ bool dpms_off;
++ bool lid_state_closed;
+
+ struct dc_stream_status status;
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index b95dd9f..430d67b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -1481,6 +1481,17 @@ static void disable_vga_and_power_gate_all_controllers(
+ }
+ }
+
++static bool is_eDP_lid_closed(struct dc_state *context)
++{
++ int i;
++
++ for (i = 0; i < context->stream_count; i++) {
++ if (context->streams[i]->signal == SIGNAL_TYPE_EDP)
++ return context->streams[i]->lid_state_closed;
++ }
++ return false;
++}
++
+ static struct dc_link *get_link_for_edp_not_in_use(
+ struct dc *dc,
+ struct dc_state *context)
+@@ -1515,20 +1526,17 @@ static struct dc_link *get_link_for_edp_not_in_use(
+ */
+ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
+ {
+- struct dc_bios *dcb = dc->ctx->dc_bios;
+-
+- /* vbios already light up eDP, so we can leverage vbios and skip eDP
++ /* check eDP lid state:
++ * If lid is open, vbios already light up eDP, so we can leverage vbios and skip eDP
+ * programming
+ */
+- bool can_eDP_fast_boot_optimize =
+- (dcb->funcs->get_vga_enabled_displays(dc->ctx->dc_bios) == ATOM_DISPLAY_LCD1_ACTIVE);
+-
+- /* if OS doesn't light up eDP and eDP link is available, we want to disable */
++ bool lid_state_closed = is_eDP_lid_closed(context);
+ struct dc_link *edp_link_to_turnoff = NULL;
+
+- if (can_eDP_fast_boot_optimize) {
++ if (!lid_state_closed) {
+ edp_link_to_turnoff = get_link_for_edp_not_in_use(dc, context);
+
++ /* if OS doesn't light up eDP and eDP link is available, we want to disable */
+ if (!edp_link_to_turnoff)
+ dc->apply_edp_fast_boot_optimization = true;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4211-drm-amd-display-Do-not-create-memory-allocation-if-s.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4211-drm-amd-display-Do-not-create-memory-allocation-if-s.patch
new file mode 100644
index 00000000..c4d533ed
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4211-drm-amd-display-Do-not-create-memory-allocation-if-s.patch
@@ -0,0 +1,54 @@
+From f06c43e6131a531e7518419117bef4ab69951975 Mon Sep 17 00:00:00 2001
+From: Anthony Koo <Anthony.Koo@amd.com>
+Date: Fri, 6 Apr 2018 12:07:19 -0400
+Subject: [PATCH 4211/5725] drm/amd/display: Do not create memory allocation if
+ stats not enabled
+
+Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/modules/stats/stats.c | 26 +++++++++++++----------
+ 1 file changed, 15 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
+index ed5f680..48e0219 100644
+--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
++++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
+@@ -115,18 +115,22 @@ struct mod_stats *mod_stats_create(struct dc *dc)
+ &reg_data, sizeof(unsigned int), &flag))
+ core_stats->enabled = reg_data;
+
+- core_stats->entries = DAL_STATS_ENTRIES_REGKEY_DEFAULT;
+- if (dm_read_persistent_data(dc->ctx, NULL, NULL,
+- DAL_STATS_ENTRIES_REGKEY,
+- &reg_data, sizeof(unsigned int), &flag)) {
+- if (reg_data > DAL_STATS_ENTRIES_REGKEY_MAX)
+- core_stats->entries = DAL_STATS_ENTRIES_REGKEY_MAX;
+- else
+- core_stats->entries = reg_data;
+- }
++ if (core_stats->enabled) {
++ core_stats->entries = DAL_STATS_ENTRIES_REGKEY_DEFAULT;
++ if (dm_read_persistent_data(dc->ctx, NULL, NULL,
++ DAL_STATS_ENTRIES_REGKEY,
++ &reg_data, sizeof(unsigned int), &flag)) {
++ if (reg_data > DAL_STATS_ENTRIES_REGKEY_MAX)
++ core_stats->entries = DAL_STATS_ENTRIES_REGKEY_MAX;
++ else
++ core_stats->entries = reg_data;
++ }
+
+- core_stats->time = kzalloc(sizeof(struct stats_time_cache) * core_stats->entries,
+- GFP_KERNEL);
++ core_stats->time = kzalloc(sizeof(struct stats_time_cache) * core_stats->entries,
++ GFP_KERNEL);
++ } else {
++ core_stats->entries = 0;
++ }
+
+ if (core_stats->time == NULL)
+ goto fail_construct;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4212-drm-amd-display-Move-DCC-support-functions-into-dchu.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4212-drm-amd-display-Move-DCC-support-functions-into-dchu.patch
new file mode 100644
index 00000000..e6e9ca81
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4212-drm-amd-display-Move-DCC-support-functions-into-dchu.patch
@@ -0,0 +1,606 @@
+From 387e89d496d8c3e225642ce5beb744d95cb321ef Mon Sep 17 00:00:00 2001
+From: Eric Bernstein <eric.bernstein@amd.com>
+Date: Thu, 5 Apr 2018 17:09:20 -0400
+Subject: [PATCH 4212/5725] drm/amd/display: Move DCC support functions into
+ dchubbub
+
+Added dchububu.h header file for common enum/struct definitions.
+Added new interface functions get_dcc_compression_cap,
+dcc_support_swizzle, dcc_support_pixel_format.
+
+Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
+Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c | 221 +++++++++++++++++++-
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h | 7 +-
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 231 +--------------------
+ drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h | 64 ++++++
+ 4 files changed, 291 insertions(+), 232 deletions(-)
+ create mode 100644 drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+index 738f67f..b9fb14a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+@@ -476,8 +476,227 @@ void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub)
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req);
+ }
+
++static bool hubbub1_dcc_support_swizzle(
++ enum swizzle_mode_values swizzle,
++ unsigned int bytes_per_element,
++ enum segment_order *segment_order_horz,
++ enum segment_order *segment_order_vert)
++{
++ bool standard_swizzle = false;
++ bool display_swizzle = false;
++
++ switch (swizzle) {
++ case DC_SW_4KB_S:
++ case DC_SW_64KB_S:
++ case DC_SW_VAR_S:
++ case DC_SW_4KB_S_X:
++ case DC_SW_64KB_S_X:
++ case DC_SW_VAR_S_X:
++ standard_swizzle = true;
++ break;
++ case DC_SW_4KB_D:
++ case DC_SW_64KB_D:
++ case DC_SW_VAR_D:
++ case DC_SW_4KB_D_X:
++ case DC_SW_64KB_D_X:
++ case DC_SW_VAR_D_X:
++ display_swizzle = true;
++ break;
++ default:
++ break;
++ }
++
++ if (bytes_per_element == 1 && standard_swizzle) {
++ *segment_order_horz = segment_order__contiguous;
++ *segment_order_vert = segment_order__na;
++ return true;
++ }
++ if (bytes_per_element == 2 && standard_swizzle) {
++ *segment_order_horz = segment_order__non_contiguous;
++ *segment_order_vert = segment_order__contiguous;
++ return true;
++ }
++ if (bytes_per_element == 4 && standard_swizzle) {
++ *segment_order_horz = segment_order__non_contiguous;
++ *segment_order_vert = segment_order__contiguous;
++ return true;
++ }
++ if (bytes_per_element == 8 && standard_swizzle) {
++ *segment_order_horz = segment_order__na;
++ *segment_order_vert = segment_order__contiguous;
++ return true;
++ }
++ if (bytes_per_element == 8 && display_swizzle) {
++ *segment_order_horz = segment_order__contiguous;
++ *segment_order_vert = segment_order__non_contiguous;
++ return true;
++ }
++
++ return false;
++}
++
++static bool hubbub1_dcc_support_pixel_format(
++ enum surface_pixel_format format,
++ unsigned int *bytes_per_element)
++{
++ /* DML: get_bytes_per_element */
++ switch (format) {
++ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
++ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
++ *bytes_per_element = 2;
++ return true;
++ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
++ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
++ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
++ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
++ *bytes_per_element = 4;
++ return true;
++ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
++ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
++ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
++ *bytes_per_element = 8;
++ return true;
++ default:
++ return false;
++ }
++}
++
++static void hubbub1_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
++ unsigned int bytes_per_element)
++{
++ /* copied from DML. might want to refactor DML to leverage from DML */
++ /* DML : get_blk256_size */
++ if (bytes_per_element == 1) {
++ *blk256_width = 16;
++ *blk256_height = 16;
++ } else if (bytes_per_element == 2) {
++ *blk256_width = 16;
++ *blk256_height = 8;
++ } else if (bytes_per_element == 4) {
++ *blk256_width = 8;
++ *blk256_height = 8;
++ } else if (bytes_per_element == 8) {
++ *blk256_width = 8;
++ *blk256_height = 4;
++ }
++}
++
++static void hubbub1_det_request_size(
++ unsigned int height,
++ unsigned int width,
++ unsigned int bpe,
++ bool *req128_horz_wc,
++ bool *req128_vert_wc)
++{
++ unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */
++
++ unsigned int blk256_height = 0;
++ unsigned int blk256_width = 0;
++ unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
++
++ hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe);
++
++ swath_bytes_horz_wc = height * blk256_height * bpe;
++ swath_bytes_vert_wc = width * blk256_width * bpe;
++
++ *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
++ false : /* full 256B request */
++ true; /* half 128b request */
++
++ *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
++ false : /* full 256B request */
++ true; /* half 128b request */
++}
++
++static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub,
++ const struct dc_dcc_surface_param *input,
++ struct dc_surface_dcc_cap *output)
++{
++ struct dc *dc = hubbub->ctx->dc;
++ /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
++ enum dcc_control dcc_control;
++ unsigned int bpe;
++ enum segment_order segment_order_horz, segment_order_vert;
++ bool req128_horz_wc, req128_vert_wc;
++
++ memset(output, 0, sizeof(*output));
++
++ if (dc->debug.disable_dcc == DCC_DISABLE)
++ return false;
++
++ if (!hubbub->funcs->dcc_support_pixel_format(input->format, &bpe))
++ return false;
++
++ if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe,
++ &segment_order_horz, &segment_order_vert))
++ return false;
++
++ hubbub1_det_request_size(input->surface_size.height, input->surface_size.width,
++ bpe, &req128_horz_wc, &req128_vert_wc);
++
++ if (!req128_horz_wc && !req128_vert_wc) {
++ dcc_control = dcc_control__256_256_xxx;
++ } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
++ if (!req128_horz_wc)
++ dcc_control = dcc_control__256_256_xxx;
++ else if (segment_order_horz == segment_order__contiguous)
++ dcc_control = dcc_control__128_128_xxx;
++ else
++ dcc_control = dcc_control__256_64_64;
++ } else if (input->scan == SCAN_DIRECTION_VERTICAL) {
++ if (!req128_vert_wc)
++ dcc_control = dcc_control__256_256_xxx;
++ else if (segment_order_vert == segment_order__contiguous)
++ dcc_control = dcc_control__128_128_xxx;
++ else
++ dcc_control = dcc_control__256_64_64;
++ } else {
++ if ((req128_horz_wc &&
++ segment_order_horz == segment_order__non_contiguous) ||
++ (req128_vert_wc &&
++ segment_order_vert == segment_order__non_contiguous))
++ /* access_dir not known, must use most constraining */
++ dcc_control = dcc_control__256_64_64;
++ else
++ /* reg128 is true for either horz and vert
++ * but segment_order is contiguous
++ */
++ dcc_control = dcc_control__128_128_xxx;
++ }
++
++ if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
++ dcc_control != dcc_control__256_256_xxx)
++ return false;
++
++ switch (dcc_control) {
++ case dcc_control__256_256_xxx:
++ output->grph.rgb.max_uncompressed_blk_size = 256;
++ output->grph.rgb.max_compressed_blk_size = 256;
++ output->grph.rgb.independent_64b_blks = false;
++ break;
++ case dcc_control__128_128_xxx:
++ output->grph.rgb.max_uncompressed_blk_size = 128;
++ output->grph.rgb.max_compressed_blk_size = 128;
++ output->grph.rgb.independent_64b_blks = false;
++ break;
++ case dcc_control__256_64_64:
++ output->grph.rgb.max_uncompressed_blk_size = 256;
++ output->grph.rgb.max_compressed_blk_size = 64;
++ output->grph.rgb.independent_64b_blks = true;
++ break;
++ }
++
++ output->capable = true;
++ output->const_color_support = false;
++
++ return true;
++}
++
+ static const struct hubbub_funcs hubbub1_funcs = {
+- .update_dchub = hubbub1_update_dchub
++ .update_dchub = hubbub1_update_dchub,
++ .dcc_support_swizzle = hubbub1_dcc_support_swizzle,
++ .dcc_support_pixel_format = hubbub1_dcc_support_pixel_format,
++ .get_dcc_compression_cap = hubbub1_get_dcc_compression_cap,
+ };
+
+ void hubbub1_construct(struct hubbub *hubbub,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+index a16e908..f479f54 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+@@ -27,6 +27,7 @@
+ #define __DC_HUBBUB_DCN10_H__
+
+ #include "core_types.h"
++#include "dchubbub.h"
+
+ #define HUBHUB_REG_LIST_DCN()\
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\
+@@ -173,12 +174,6 @@ struct dcn_hubbub_wm {
+ struct dcn_hubbub_wm_set sets[4];
+ };
+
+-struct hubbub_funcs {
+- void (*update_dchub)(
+- struct hubbub *hubbub,
+- struct dchub_init_data *dh_data);
+-};
+-
+ struct hubbub {
+ const struct hubbub_funcs *funcs;
+ struct dc_context *ctx;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index f305f65..2c0a315 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -937,235 +937,16 @@ static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
+ return idle_pipe;
+ }
+
+-enum dcc_control {
+- dcc_control__256_256_xxx,
+- dcc_control__128_128_xxx,
+- dcc_control__256_64_64,
+-};
+-
+-enum segment_order {
+- segment_order__na,
+- segment_order__contiguous,
+- segment_order__non_contiguous,
+-};
+-
+-static bool dcc_support_pixel_format(
+- enum surface_pixel_format format,
+- unsigned int *bytes_per_element)
+-{
+- /* DML: get_bytes_per_element */
+- switch (format) {
+- case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+- case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+- *bytes_per_element = 2;
+- return true;
+- case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+- case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+- case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+- case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+- *bytes_per_element = 4;
+- return true;
+- case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+- case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+- case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+- *bytes_per_element = 8;
+- return true;
+- default:
+- return false;
+- }
+-}
+-
+-static bool dcc_support_swizzle(
+- enum swizzle_mode_values swizzle,
+- unsigned int bytes_per_element,
+- enum segment_order *segment_order_horz,
+- enum segment_order *segment_order_vert)
+-{
+- bool standard_swizzle = false;
+- bool display_swizzle = false;
+-
+- switch (swizzle) {
+- case DC_SW_4KB_S:
+- case DC_SW_64KB_S:
+- case DC_SW_VAR_S:
+- case DC_SW_4KB_S_X:
+- case DC_SW_64KB_S_X:
+- case DC_SW_VAR_S_X:
+- standard_swizzle = true;
+- break;
+- case DC_SW_4KB_D:
+- case DC_SW_64KB_D:
+- case DC_SW_VAR_D:
+- case DC_SW_4KB_D_X:
+- case DC_SW_64KB_D_X:
+- case DC_SW_VAR_D_X:
+- display_swizzle = true;
+- break;
+- default:
+- break;
+- }
+-
+- if (bytes_per_element == 1 && standard_swizzle) {
+- *segment_order_horz = segment_order__contiguous;
+- *segment_order_vert = segment_order__na;
+- return true;
+- }
+- if (bytes_per_element == 2 && standard_swizzle) {
+- *segment_order_horz = segment_order__non_contiguous;
+- *segment_order_vert = segment_order__contiguous;
+- return true;
+- }
+- if (bytes_per_element == 4 && standard_swizzle) {
+- *segment_order_horz = segment_order__non_contiguous;
+- *segment_order_vert = segment_order__contiguous;
+- return true;
+- }
+- if (bytes_per_element == 8 && standard_swizzle) {
+- *segment_order_horz = segment_order__na;
+- *segment_order_vert = segment_order__contiguous;
+- return true;
+- }
+- if (bytes_per_element == 8 && display_swizzle) {
+- *segment_order_horz = segment_order__contiguous;
+- *segment_order_vert = segment_order__non_contiguous;
+- return true;
+- }
+-
+- return false;
+-}
+-
+-static void get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
+- unsigned int bytes_per_element)
+-{
+- /* copied from DML. might want to refactor DML to leverage from DML */
+- /* DML : get_blk256_size */
+- if (bytes_per_element == 1) {
+- *blk256_width = 16;
+- *blk256_height = 16;
+- } else if (bytes_per_element == 2) {
+- *blk256_width = 16;
+- *blk256_height = 8;
+- } else if (bytes_per_element == 4) {
+- *blk256_width = 8;
+- *blk256_height = 8;
+- } else if (bytes_per_element == 8) {
+- *blk256_width = 8;
+- *blk256_height = 4;
+- }
+-}
+-
+-static void det_request_size(
+- unsigned int height,
+- unsigned int width,
+- unsigned int bpe,
+- bool *req128_horz_wc,
+- bool *req128_vert_wc)
+-{
+- unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */
+-
+- unsigned int blk256_height = 0;
+- unsigned int blk256_width = 0;
+- unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
+-
+- get_blk256_size(&blk256_width, &blk256_height, bpe);
+-
+- swath_bytes_horz_wc = height * blk256_height * bpe;
+- swath_bytes_vert_wc = width * blk256_width * bpe;
+-
+- *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
+- false : /* full 256B request */
+- true; /* half 128b request */
+-
+- *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
+- false : /* full 256B request */
+- true; /* half 128b request */
+-}
+-
+-static bool get_dcc_compression_cap(const struct dc *dc,
++static bool dcn10_get_dcc_compression_cap(const struct dc *dc,
+ const struct dc_dcc_surface_param *input,
+ struct dc_surface_dcc_cap *output)
+ {
+- /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
+- enum dcc_control dcc_control;
+- unsigned int bpe;
+- enum segment_order segment_order_horz, segment_order_vert;
+- bool req128_horz_wc, req128_vert_wc;
+-
+- memset(output, 0, sizeof(*output));
+-
+- if (dc->debug.disable_dcc == DCC_DISABLE)
+- return false;
+-
+- if (!dcc_support_pixel_format(input->format,
+- &bpe))
+- return false;
+-
+- if (!dcc_support_swizzle(input->swizzle_mode, bpe,
+- &segment_order_horz, &segment_order_vert))
+- return false;
+-
+- det_request_size(input->surface_size.height, input->surface_size.width,
+- bpe, &req128_horz_wc, &req128_vert_wc);
+-
+- if (!req128_horz_wc && !req128_vert_wc) {
+- dcc_control = dcc_control__256_256_xxx;
+- } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
+- if (!req128_horz_wc)
+- dcc_control = dcc_control__256_256_xxx;
+- else if (segment_order_horz == segment_order__contiguous)
+- dcc_control = dcc_control__128_128_xxx;
+- else
+- dcc_control = dcc_control__256_64_64;
+- } else if (input->scan == SCAN_DIRECTION_VERTICAL) {
+- if (!req128_vert_wc)
+- dcc_control = dcc_control__256_256_xxx;
+- else if (segment_order_vert == segment_order__contiguous)
+- dcc_control = dcc_control__128_128_xxx;
+- else
+- dcc_control = dcc_control__256_64_64;
+- } else {
+- if ((req128_horz_wc &&
+- segment_order_horz == segment_order__non_contiguous) ||
+- (req128_vert_wc &&
+- segment_order_vert == segment_order__non_contiguous))
+- /* access_dir not known, must use most constraining */
+- dcc_control = dcc_control__256_64_64;
+- else
+- /* reg128 is true for either horz and vert
+- * but segment_order is contiguous
+- */
+- dcc_control = dcc_control__128_128_xxx;
+- }
+-
+- if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
+- dcc_control != dcc_control__256_256_xxx)
+- return false;
+-
+- switch (dcc_control) {
+- case dcc_control__256_256_xxx:
+- output->grph.rgb.max_uncompressed_blk_size = 256;
+- output->grph.rgb.max_compressed_blk_size = 256;
+- output->grph.rgb.independent_64b_blks = false;
+- break;
+- case dcc_control__128_128_xxx:
+- output->grph.rgb.max_uncompressed_blk_size = 128;
+- output->grph.rgb.max_compressed_blk_size = 128;
+- output->grph.rgb.independent_64b_blks = false;
+- break;
+- case dcc_control__256_64_64:
+- output->grph.rgb.max_uncompressed_blk_size = 256;
+- output->grph.rgb.max_compressed_blk_size = 64;
+- output->grph.rgb.independent_64b_blks = true;
+- break;
+- }
+-
+- output->capable = true;
+- output->const_color_support = false;
+-
+- return true;
++ return dc->res_pool->hubbub->funcs->get_dcc_compression_cap(
++ dc->res_pool->hubbub,
++ input,
++ output);
+ }
+
+-
+ static void dcn10_destroy_resource_pool(struct resource_pool **pool)
+ {
+ struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool);
+@@ -1186,7 +967,7 @@ static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_st
+ }
+
+ static struct dc_cap_funcs cap_funcs = {
+- .get_dcc_compression_cap = get_dcc_compression_cap
++ .get_dcc_compression_cap = dcn10_get_dcc_compression_cap
+ };
+
+ static struct resource_funcs dcn10_res_pool_funcs = {
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+new file mode 100644
+index 0000000..02f757d
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+@@ -0,0 +1,64 @@
++/*
++ * Copyright 2012-15 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef __DAL_DCHUBBUB_H__
++#define __DAL_DCHUBBUB_H__
++
++
++enum dcc_control {
++ dcc_control__256_256_xxx,
++ dcc_control__128_128_xxx,
++ dcc_control__256_64_64,
++};
++
++enum segment_order {
++ segment_order__na,
++ segment_order__contiguous,
++ segment_order__non_contiguous,
++};
++
++
++struct hubbub_funcs {
++ void (*update_dchub)(
++ struct hubbub *hubbub,
++ struct dchub_init_data *dh_data);
++
++ bool (*get_dcc_compression_cap)(struct hubbub *hubbub,
++ const struct dc_dcc_surface_param *input,
++ struct dc_surface_dcc_cap *output);
++
++ bool (*dcc_support_swizzle)(
++ enum swizzle_mode_values swizzle,
++ unsigned int bytes_per_element,
++ enum segment_order *segment_order_horz,
++ enum segment_order *segment_order_vert);
++
++ bool (*dcc_support_pixel_format)(
++ enum surface_pixel_format format,
++ unsigned int *bytes_per_element);
++};
++
++
++#endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4213-drm-amd-display-fix-LFC-tearing-at-top-of-screen.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4213-drm-amd-display-fix-LFC-tearing-at-top-of-screen.patch
new file mode 100644
index 00000000..522a6aaa
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4213-drm-amd-display-fix-LFC-tearing-at-top-of-screen.patch
@@ -0,0 +1,41 @@
+From e788d2f033853a30166a123440ba2e161138f60f Mon Sep 17 00:00:00 2001
+From: Anthony Koo <Anthony.Koo@amd.com>
+Date: Fri, 6 Apr 2018 12:12:06 -0400
+Subject: [PATCH 4213/5725] drm/amd/display: fix LFC tearing at top of screen
+
+Tearing occurred because new VTOTAL MIN/MAX was being programmed
+too early.
+The flip can happen within the VUPDATE high region, and the new min/max
+would take effect immediately. But this means that frame is not variable
+anymore, and tearing would occur when the flip actually happens.
+
+The fixed insert duration should be programmed on the first VUPDATE
+interrupt instead.
+
+Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/modules/freesync/freesync.c | 6 ------
+ 1 file changed, 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+index abd5c93..daad60e 100644
+--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
++++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+@@ -371,12 +371,6 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
+ inserted_frame_duration_in_us;
+ in_out_vrr->btr.frames_to_insert = frames_to_insert;
+ in_out_vrr->btr.frame_counter = frames_to_insert;
+-
+- in_out_vrr->adjust.v_total_min =
+- calc_v_total_from_duration(stream, in_out_vrr,
+- in_out_vrr->btr.inserted_duration_in_us);
+- in_out_vrr->adjust.v_total_max =
+- in_out_vrr->adjust.v_total_min;
+ }
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4214-drm-amd-display-HDMI-has-no-sound-after-Panel-power-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4214-drm-amd-display-HDMI-has-no-sound-after-Panel-power-.patch
new file mode 100644
index 00000000..0ed7099d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4214-drm-amd-display-HDMI-has-no-sound-after-Panel-power-.patch
@@ -0,0 +1,30 @@
+From 8551dbb39be200f208d8bdab2e9a273830d05e12 Mon Sep 17 00:00:00 2001
+From: Charlene Liu <charlene.liu@amd.com>
+Date: Fri, 6 Apr 2018 23:03:12 -0400
+Subject: [PATCH 4214/5725] drm/amd/display: HDMI has no sound after Panel
+ power off/on
+
+Signed-off-by: Charlene Liu <charlene.liu@amd.com>
+Reviewed-by: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Cc: stable@vger.kernel.org
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+index 07c3242..84e26c8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+@@ -718,6 +718,8 @@ static void dce110_stream_encoder_update_hdmi_info_packets(
+ if (info_frame->avi.valid) {
+ const uint32_t *content =
+ (const uint32_t *) &info_frame->avi.sb[0];
++ /*we need turn on clock before programming AFMT block*/
++ REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
+
+ REG_WRITE(AFMT_AVI_INFO0, content[0]);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4215-drm-amd-display-refactor-vupdate-interrupt-registrat.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4215-drm-amd-display-refactor-vupdate-interrupt-registrat.patch
new file mode 100644
index 00000000..672d61cd
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4215-drm-amd-display-refactor-vupdate-interrupt-registrat.patch
@@ -0,0 +1,67 @@
+From 3e79e98b358b012f11f11c31377bc7838cdeb9be Mon Sep 17 00:00:00 2001
+From: Anthony Koo <Anthony.Koo@amd.com>
+Date: Fri, 6 Apr 2018 13:55:39 -0400
+Subject: [PATCH 4215/5725] drm/amd/display: refactor vupdate interrupt
+ registration
+
+We only need to register once OS calls the interrupt control.
+Also, if we are entering static screen mode, disable after ramping is done.
+Disable shall be done via timer of 2 seconds regardless of ramping
+complete or not, just to simplify.
+
+Also, ramp to mid instead of min, due to better flicker performance...
+
+Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ .../gpu/drm/amd/display/modules/freesync/freesync.c | 19 ++++++++-----------
+ 1 file changed, 8 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+index daad60e..349387e 100644
+--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
++++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+@@ -109,12 +109,6 @@ static unsigned int calc_duration_in_us_from_v_total(
+ * 1000) * stream->timing.h_total,
+ stream->timing.pix_clk_khz));
+
+- if (duration_in_us < in_vrr->min_duration_in_us)
+- duration_in_us = in_vrr->min_duration_in_us;
+-
+- if (duration_in_us > in_vrr->max_duration_in_us)
+- duration_in_us = in_vrr->max_duration_in_us;
+-
+ return duration_in_us;
+ }
+
+@@ -230,10 +224,9 @@ static void update_v_total_for_static_ramp(
+ }
+ }
+
+- v_total = calc_v_total_from_duration(stream,
+- in_out_vrr,
+- current_duration_in_us);
+-
++ v_total = div64_u64(div64_u64(((unsigned long long)(
++ current_duration_in_us) * stream->timing.pix_clk_khz),
++ stream->timing.h_total), 1000);
+
+ in_out_vrr->adjust.v_total_min = v_total;
+ in_out_vrr->adjust.v_total_max = v_total;
+@@ -702,7 +695,11 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
+ } else if (in_out_vrr->state == VRR_STATE_ACTIVE_FIXED) {
+ in_out_vrr->fixed.target_refresh_in_uhz =
+ in_out_vrr->min_refresh_in_uhz;
+- if (in_out_vrr->fixed.ramping_active) {
++ if (in_out_vrr->fixed.ramping_active &&
++ in_out_vrr->fixed.fixed_active) {
++ /* Do not update vtotals if ramping is already active
++ * in order to continue ramp from current refresh.
++ */
+ in_out_vrr->fixed.fixed_active = true;
+ } else {
+ in_out_vrr->fixed.fixed_active = true;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4216-drm-amd-display-Check-SCRATCH-reg-to-determine-S3-re.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4216-drm-amd-display-Check-SCRATCH-reg-to-determine-S3-re.patch
new file mode 100644
index 00000000..dea707bd
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4216-drm-amd-display-Check-SCRATCH-reg-to-determine-S3-re.patch
@@ -0,0 +1,74 @@
+From f2d21896f604143f0ffef1bdb8d138d9e0f22642 Mon Sep 17 00:00:00 2001
+From: Yongqiang Sun <yongqiang.sun@amd.com>
+Date: Fri, 6 Apr 2018 21:38:10 -0400
+Subject: [PATCH 4216/5725] drm/amd/display: Check SCRATCH reg to determine S3
+ resume.
+
+Use lid state only to determine fast boot optimization is not enough.
+For S3/Resume, due to bios isn't involved in boot, eDP wasn't
+light up, while lid state is open, if do fast boot optimization,
+eDP panel will skip enable link and result in black screen after boot.
+And becasue of bios isn't involved, no matter UEFI or Legacy boot,
+BIOS_SCRATCH_3 value should be 0, use this to determine the case.
+
+Signed-off-by: Yongqiang Sun <yongqiang.sun@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ .../amd/display/dc/dce110/dce110_hw_sequencer.c | 33 ++++++++++++++++++----
+ 1 file changed, 28 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 430d67b..3920310 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -1526,18 +1526,41 @@ static struct dc_link *get_link_for_edp_not_in_use(
+ */
+ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
+ {
+- /* check eDP lid state:
+- * If lid is open, vbios already light up eDP, so we can leverage vbios and skip eDP
+- * programming
++ /* check eDP lid state and BIOS_SCRATCH_3 to determine fast boot optimization
++ * UEFI boot
++ * edp_active_status_from_scratch fast boot optimization
++ * S4/S5 resume:
++ * Lid Open true true
++ * Lid Close false false
++ *
++ * S3/ resume:
++ * Lid Open false false
++ * Lid Close false false
++ *
++ * Legacy boot:
++ * edp_active_status_from_scratch fast boot optimization
++ * S4/S resume:
++ * Lid Open true true
++ * Lid Close true false
++ *
++ * S3/ resume:
++ * Lid Open false false
++ * Lid Close false false
+ */
++ struct dc_bios *dcb = dc->ctx->dc_bios;
+ bool lid_state_closed = is_eDP_lid_closed(context);
+ struct dc_link *edp_link_to_turnoff = NULL;
++ bool edp_active_status_from_scratch =
++ (dcb->funcs->get_vga_enabled_displays(dc->ctx->dc_bios) == ATOM_DISPLAY_LCD1_ACTIVE);
+
++ /*Lid open*/
+ if (!lid_state_closed) {
+ edp_link_to_turnoff = get_link_for_edp_not_in_use(dc, context);
+
+- /* if OS doesn't light up eDP and eDP link is available, we want to disable */
+- if (!edp_link_to_turnoff)
++ /* if OS doesn't light up eDP and eDP link is available, we want to disable
++ * If resume from S4/S5, should optimization.
++ */
++ if (!edp_link_to_turnoff && edp_active_status_from_scratch)
+ dc->apply_edp_fast_boot_optimization = true;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4217-drm-amd-display-add-rq-dlg-ttu-to-dtn-log.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4217-drm-amd-display-add-rq-dlg-ttu-to-dtn-log.patch
new file mode 100644
index 00000000..5c90693c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4217-drm-amd-display-add-rq-dlg-ttu-to-dtn-log.patch
@@ -0,0 +1,542 @@
+From 9d80ce040f018f9618b5ff5e3a96cb7cbe8ecd70 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Thu, 29 Mar 2018 08:43:02 -0400
+Subject: [PATCH 4217/5725] drm/amd/display: add rq/dlg/ttu to dtn log
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc_helper.c | 59 ++++++++
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 153 ++++++++++++++++++++-
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 19 +--
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 114 ++++++++++++++-
+ drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h | 20 +++
+ drivers/gpu/drm/amd/display/dc/inc/reg_helper.h | 56 ++++++++
+ 6 files changed, 401 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
+index 48e1fcf5..bd0fda0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
++++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
+@@ -117,6 +117,65 @@ uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr,
+ return reg_val;
+ }
+
++uint32_t generic_reg_get6(const struct dc_context *ctx, uint32_t addr,
++ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
++ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
++ uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
++ uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
++ uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
++ uint8_t shift6, uint32_t mask6, uint32_t *field_value6)
++{
++ uint32_t reg_val = dm_read_reg(ctx, addr);
++ *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
++ *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
++ *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
++ *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
++ *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
++ *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
++ return reg_val;
++}
++
++uint32_t generic_reg_get7(const struct dc_context *ctx, uint32_t addr,
++ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
++ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
++ uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
++ uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
++ uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
++ uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
++ uint8_t shift7, uint32_t mask7, uint32_t *field_value7)
++{
++ uint32_t reg_val = dm_read_reg(ctx, addr);
++ *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
++ *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
++ *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
++ *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
++ *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
++ *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
++ *field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7);
++ return reg_val;
++}
++
++uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr,
++ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
++ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
++ uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
++ uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
++ uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
++ uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
++ uint8_t shift7, uint32_t mask7, uint32_t *field_value7,
++ uint8_t shift8, uint32_t mask8, uint32_t *field_value8)
++{
++ uint32_t reg_val = dm_read_reg(ctx, addr);
++ *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
++ *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
++ *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
++ *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
++ *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
++ *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
++ *field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7);
++ *field_value8 = get_reg_field_value_ex(reg_val, mask8, shift8);
++ return reg_val;
++}
+ /* note: va version of this is pretty bad idea, since there is a output parameter pass by pointer
+ * compiler won't be able to check for size match and is prone to stack corruption type of bugs
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+index 4ca9b6e..5806217 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+@@ -756,9 +756,159 @@ void min_set_viewport(
+ PRI_VIEWPORT_Y_START_C, viewport_c->y);
+ }
+
+-void hubp1_read_state(struct dcn10_hubp *hubp1,
++void hubp1_read_state(struct hubp *hubp,
+ struct dcn_hubp_state *s)
+ {
++ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
++ struct _vcs_dpi_display_dlg_regs_st *dlg_attr = &s->dlg_attr;
++ struct _vcs_dpi_display_ttu_regs_st *ttu_attr = &s->ttu_attr;
++ struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
++
++ /* Requester */
++ REG_GET(HUBPRET_CONTROL,
++ DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs->plane1_base_address);
++ REG_GET_4(DCN_EXPANSION_MODE,
++ DRQ_EXPANSION_MODE, &rq_regs->drq_expansion_mode,
++ PRQ_EXPANSION_MODE, &rq_regs->prq_expansion_mode,
++ MRQ_EXPANSION_MODE, &rq_regs->mrq_expansion_mode,
++ CRQ_EXPANSION_MODE, &rq_regs->crq_expansion_mode);
++ REG_GET_8(DCHUBP_REQ_SIZE_CONFIG,
++ CHUNK_SIZE, &rq_regs->rq_regs_l.chunk_size,
++ MIN_CHUNK_SIZE, &rq_regs->rq_regs_l.min_chunk_size,
++ META_CHUNK_SIZE, &rq_regs->rq_regs_l.meta_chunk_size,
++ MIN_META_CHUNK_SIZE, &rq_regs->rq_regs_l.min_meta_chunk_size,
++ DPTE_GROUP_SIZE, &rq_regs->rq_regs_l.dpte_group_size,
++ MPTE_GROUP_SIZE, &rq_regs->rq_regs_l.mpte_group_size,
++ SWATH_HEIGHT, &rq_regs->rq_regs_l.swath_height,
++ PTE_ROW_HEIGHT_LINEAR, &rq_regs->rq_regs_l.pte_row_height_linear);
++ REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C,
++ CHUNK_SIZE_C, &rq_regs->rq_regs_c.chunk_size,
++ MIN_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_chunk_size,
++ META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.meta_chunk_size,
++ MIN_META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_meta_chunk_size,
++ DPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.dpte_group_size,
++ MPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.mpte_group_size,
++ SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height,
++ PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear);
++
++ /* DLG - Per hubp */
++ REG_GET_2(BLANK_OFFSET_0,
++ REFCYC_H_BLANK_END, &dlg_attr->refcyc_h_blank_end,
++ DLG_V_BLANK_END, &dlg_attr->dlg_vblank_end);
++
++ REG_GET(BLANK_OFFSET_1,
++ MIN_DST_Y_NEXT_START, &dlg_attr->min_dst_y_next_start);
++
++ REG_GET(DST_DIMENSIONS,
++ REFCYC_PER_HTOTAL, &dlg_attr->refcyc_per_htotal);
++
++ REG_GET_2(DST_AFTER_SCALER,
++ REFCYC_X_AFTER_SCALER, &dlg_attr->refcyc_x_after_scaler,
++ DST_Y_AFTER_SCALER, &dlg_attr->dst_y_after_scaler);
++
++ if (REG(PREFETCH_SETTINS))
++ REG_GET_2(PREFETCH_SETTINS,
++ DST_Y_PREFETCH, &dlg_attr->dst_y_prefetch,
++ VRATIO_PREFETCH, &dlg_attr->vratio_prefetch);
++ else
++ REG_GET_2(PREFETCH_SETTINGS,
++ DST_Y_PREFETCH, &dlg_attr->dst_y_prefetch,
++ VRATIO_PREFETCH, &dlg_attr->vratio_prefetch);
++
++ REG_GET_2(VBLANK_PARAMETERS_0,
++ DST_Y_PER_VM_VBLANK, &dlg_attr->dst_y_per_vm_vblank,
++ DST_Y_PER_ROW_VBLANK, &dlg_attr->dst_y_per_row_vblank);
++
++ REG_GET(REF_FREQ_TO_PIX_FREQ,
++ REF_FREQ_TO_PIX_FREQ, &dlg_attr->ref_freq_to_pix_freq);
++
++ /* DLG - Per luma/chroma */
++ REG_GET(VBLANK_PARAMETERS_1,
++ REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr->refcyc_per_pte_group_vblank_l);
++
++ REG_GET(VBLANK_PARAMETERS_3,
++ REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr->refcyc_per_meta_chunk_vblank_l);
++
++ if (REG(NOM_PARAMETERS_0))
++ REG_GET(NOM_PARAMETERS_0,
++ DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr->dst_y_per_pte_row_nom_l);
++
++ if (REG(NOM_PARAMETERS_1))
++ REG_GET(NOM_PARAMETERS_1,
++ REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr->refcyc_per_pte_group_nom_l);
++
++ REG_GET(NOM_PARAMETERS_4,
++ DST_Y_PER_META_ROW_NOM_L, &dlg_attr->dst_y_per_meta_row_nom_l);
++
++ REG_GET(NOM_PARAMETERS_5,
++ REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr->refcyc_per_meta_chunk_nom_l);
++
++ REG_GET_2(PER_LINE_DELIVERY_PRE,
++ REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr->refcyc_per_line_delivery_pre_l,
++ REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr->refcyc_per_line_delivery_pre_c);
++
++ REG_GET_2(PER_LINE_DELIVERY,
++ REFCYC_PER_LINE_DELIVERY_L, &dlg_attr->refcyc_per_line_delivery_l,
++ REFCYC_PER_LINE_DELIVERY_C, &dlg_attr->refcyc_per_line_delivery_c);
++
++ if (REG(PREFETCH_SETTINS_C))
++ REG_GET(PREFETCH_SETTINS_C,
++ VRATIO_PREFETCH_C, &dlg_attr->vratio_prefetch_c);
++ else
++ REG_GET(PREFETCH_SETTINGS_C,
++ VRATIO_PREFETCH_C, &dlg_attr->vratio_prefetch_c);
++
++ REG_GET(VBLANK_PARAMETERS_2,
++ REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr->refcyc_per_pte_group_vblank_c);
++
++ REG_GET(VBLANK_PARAMETERS_4,
++ REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr->refcyc_per_meta_chunk_vblank_c);
++
++ if (REG(NOM_PARAMETERS_2))
++ REG_GET(NOM_PARAMETERS_2,
++ DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr->dst_y_per_pte_row_nom_c);
++
++ if (REG(NOM_PARAMETERS_3))
++ REG_GET(NOM_PARAMETERS_3,
++ REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr->refcyc_per_pte_group_nom_c);
++
++ REG_GET(NOM_PARAMETERS_6,
++ DST_Y_PER_META_ROW_NOM_C, &dlg_attr->dst_y_per_meta_row_nom_c);
++
++ REG_GET(NOM_PARAMETERS_7,
++ REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr->refcyc_per_meta_chunk_nom_c);
++
++ /* TTU - per hubp */
++ REG_GET_2(DCN_TTU_QOS_WM,
++ QoS_LEVEL_LOW_WM, &ttu_attr->qos_level_low_wm,
++ QoS_LEVEL_HIGH_WM, &ttu_attr->qos_level_high_wm);
++
++ REG_GET_2(DCN_GLOBAL_TTU_CNTL,
++ MIN_TTU_VBLANK, &ttu_attr->min_ttu_vblank,
++ QoS_LEVEL_FLIP, &ttu_attr->qos_level_flip);
++
++ /* TTU - per luma/chroma */
++ /* Assumed surf0 is luma and 1 is chroma */
++
++ REG_GET_3(DCN_SURF0_TTU_CNTL0,
++ REFCYC_PER_REQ_DELIVERY, &ttu_attr->refcyc_per_req_delivery_l,
++ QoS_LEVEL_FIXED, &ttu_attr->qos_level_fixed_l,
++ QoS_RAMP_DISABLE, &ttu_attr->qos_ramp_disable_l);
++
++ REG_GET(DCN_SURF0_TTU_CNTL1,
++ REFCYC_PER_REQ_DELIVERY_PRE,
++ &ttu_attr->refcyc_per_req_delivery_pre_l);
++
++ REG_GET_3(DCN_SURF1_TTU_CNTL0,
++ REFCYC_PER_REQ_DELIVERY, &ttu_attr->refcyc_per_req_delivery_c,
++ QoS_LEVEL_FIXED, &ttu_attr->qos_level_fixed_c,
++ QoS_RAMP_DISABLE, &ttu_attr->qos_ramp_disable_c);
++
++ REG_GET(DCN_SURF1_TTU_CNTL1,
++ REFCYC_PER_REQ_DELIVERY_PRE,
++ &ttu_attr->refcyc_per_req_delivery_pre_c);
++
++ /* Rest of hubp */
+ REG_GET(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, &s->pixel_format);
+
+@@ -956,6 +1106,7 @@ static struct hubp_funcs dcn10_hubp_funcs = {
+ .hubp_disconnect = hubp1_disconnect,
+ .hubp_clk_cntl = hubp1_clk_cntl,
+ .hubp_vtg_sel = hubp1_vtg_sel,
++ .hubp_read_state = hubp1_read_state,
+ };
+
+ /*****************************************/
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+index e0d6d32..920ae3a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+@@ -690,24 +690,7 @@ void dcn10_hubp_construct(
+ const struct dcn_mi_shift *hubp_shift,
+ const struct dcn_mi_mask *hubp_mask);
+
+-
+-struct dcn_hubp_state {
+- uint32_t pixel_format;
+- uint32_t inuse_addr_hi;
+- uint32_t viewport_width;
+- uint32_t viewport_height;
+- uint32_t rotation_angle;
+- uint32_t h_mirror_en;
+- uint32_t sw_mode;
+- uint32_t dcc_en;
+- uint32_t blank_en;
+- uint32_t underflow_status;
+- uint32_t ttu_disable;
+- uint32_t min_ttu_vblank;
+- uint32_t qos_level_low_wm;
+- uint32_t qos_level_high_wm;
+-};
+-void hubp1_read_state(struct dcn10_hubp *hubp1,
++void hubp1_read_state(struct hubp *hubp,
+ struct dcn_hubp_state *s);
+
+ enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 1f59b27..c9d4e96 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -112,6 +112,104 @@ void dcn10_log_hubbub_state(struct dc *dc)
+ DTN_INFO("\n");
+ }
+
++static void print_rq_dlg_ttu_regs(struct dc_context *dc_ctx, struct dcn_hubp_state *s)
++{
++ struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
++ struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
++ struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
++
++ DTN_INFO("========Requester========\n");
++ DTN_INFO("drq_expansion_mode = 0x%0x\n", rq_regs->drq_expansion_mode);
++ DTN_INFO("prq_expansion_mode = 0x%0x\n", rq_regs->prq_expansion_mode);
++ DTN_INFO("mrq_expansion_mode = 0x%0x\n", rq_regs->mrq_expansion_mode);
++ DTN_INFO("crq_expansion_mode = 0x%0x\n", rq_regs->crq_expansion_mode);
++ DTN_INFO("plane1_base_address = 0x%0x\n", rq_regs->plane1_base_address);
++ DTN_INFO("==<LUMA>==\n");
++ DTN_INFO("chunk_size = 0x%0x\n", rq_regs->rq_regs_l.chunk_size);
++ DTN_INFO("min_chunk_size = 0x%0x\n", rq_regs->rq_regs_l.min_chunk_size);
++ DTN_INFO("meta_chunk_size = 0x%0x\n", rq_regs->rq_regs_l.meta_chunk_size);
++ DTN_INFO("min_meta_chunk_size = 0x%0x\n", rq_regs->rq_regs_l.min_meta_chunk_size);
++ DTN_INFO("dpte_group_size = 0x%0x\n", rq_regs->rq_regs_l.dpte_group_size);
++ DTN_INFO("mpte_group_size = 0x%0x\n", rq_regs->rq_regs_l.mpte_group_size);
++ DTN_INFO("swath_height = 0x%0x\n", rq_regs->rq_regs_l.swath_height);
++ DTN_INFO("pte_row_height_linear = 0x%0x\n", rq_regs->rq_regs_l.pte_row_height_linear);
++ DTN_INFO("==<CHROMA>==\n");
++ DTN_INFO("chunk_size = 0x%0x\n", rq_regs->rq_regs_c.chunk_size);
++ DTN_INFO("min_chunk_size = 0x%0x\n", rq_regs->rq_regs_c.min_chunk_size);
++ DTN_INFO("meta_chunk_size = 0x%0x\n", rq_regs->rq_regs_c.meta_chunk_size);
++ DTN_INFO("min_meta_chunk_size = 0x%0x\n", rq_regs->rq_regs_c.min_meta_chunk_size);
++ DTN_INFO("dpte_group_size = 0x%0x\n", rq_regs->rq_regs_c.dpte_group_size);
++ DTN_INFO("mpte_group_size = 0x%0x\n", rq_regs->rq_regs_c.mpte_group_size);
++ DTN_INFO("swath_height = 0x%0x\n", rq_regs->rq_regs_c.swath_height);
++ DTN_INFO("pte_row_height_linear = 0x%0x\n", rq_regs->rq_regs_c.pte_row_height_linear);
++
++ DTN_INFO("========DLG========\n");
++ DTN_INFO("refcyc_h_blank_end = 0x%0x\n", dlg_regs->refcyc_h_blank_end);
++ DTN_INFO("dlg_vblank_end = 0x%0x\n", dlg_regs->dlg_vblank_end);
++ DTN_INFO("min_dst_y_next_start = 0x%0x\n", dlg_regs->min_dst_y_next_start);
++ DTN_INFO("refcyc_per_htotal = 0x%0x\n", dlg_regs->refcyc_per_htotal);
++ DTN_INFO("refcyc_x_after_scaler = 0x%0x\n", dlg_regs->refcyc_x_after_scaler);
++ DTN_INFO("dst_y_after_scaler = 0x%0x\n", dlg_regs->dst_y_after_scaler);
++ DTN_INFO("dst_y_prefetch = 0x%0x\n", dlg_regs->dst_y_prefetch);
++ DTN_INFO("dst_y_per_vm_vblank = 0x%0x\n", dlg_regs->dst_y_per_vm_vblank);
++ DTN_INFO("dst_y_per_row_vblank = 0x%0x\n", dlg_regs->dst_y_per_row_vblank);
++ DTN_INFO("dst_y_per_vm_flip = 0x%0x\n", dlg_regs->dst_y_per_vm_flip);
++ DTN_INFO("dst_y_per_row_flip = 0x%0x\n", dlg_regs->dst_y_per_row_flip);
++ DTN_INFO("ref_freq_to_pix_freq = 0x%0x\n", dlg_regs->ref_freq_to_pix_freq);
++ DTN_INFO("vratio_prefetch = 0x%0x\n", dlg_regs->vratio_prefetch);
++ DTN_INFO("vratio_prefetch_c = 0x%0x\n", dlg_regs->vratio_prefetch_c);
++ DTN_INFO("refcyc_per_pte_group_vblank_l = 0x%0x\n", dlg_regs->refcyc_per_pte_group_vblank_l);
++ DTN_INFO("refcyc_per_pte_group_vblank_c = 0x%0x\n", dlg_regs->refcyc_per_pte_group_vblank_c);
++ DTN_INFO("refcyc_per_meta_chunk_vblank_l = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_vblank_l);
++ DTN_INFO("refcyc_per_meta_chunk_vblank_c = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_vblank_c);
++ DTN_INFO("refcyc_per_pte_group_flip_l = 0x%0x\n", dlg_regs->refcyc_per_pte_group_flip_l);
++ DTN_INFO("refcyc_per_pte_group_flip_c = 0x%0x\n", dlg_regs->refcyc_per_pte_group_flip_c);
++ DTN_INFO("refcyc_per_meta_chunk_flip_l = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_flip_l);
++ DTN_INFO("refcyc_per_meta_chunk_flip_c = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_flip_c);
++ DTN_INFO("dst_y_per_pte_row_nom_l = 0x%0x\n", dlg_regs->dst_y_per_pte_row_nom_l);
++ DTN_INFO("dst_y_per_pte_row_nom_c = 0x%0x\n", dlg_regs->dst_y_per_pte_row_nom_c);
++ DTN_INFO("refcyc_per_pte_group_nom_l = 0x%0x\n", dlg_regs->refcyc_per_pte_group_nom_l);
++ DTN_INFO("refcyc_per_pte_group_nom_c = 0x%0x\n", dlg_regs->refcyc_per_pte_group_nom_c);
++ DTN_INFO("dst_y_per_meta_row_nom_l = 0x%0x\n", dlg_regs->dst_y_per_meta_row_nom_l);
++ DTN_INFO("dst_y_per_meta_row_nom_c = 0x%0x\n", dlg_regs->dst_y_per_meta_row_nom_c);
++ DTN_INFO("refcyc_per_meta_chunk_nom_l = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_nom_l);
++ DTN_INFO("refcyc_per_meta_chunk_nom_c = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_nom_c);
++ DTN_INFO("refcyc_per_line_delivery_pre_l = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_pre_l);
++ DTN_INFO("refcyc_per_line_delivery_pre_c = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_pre_c);
++ DTN_INFO("refcyc_per_line_delivery_l = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_l);
++ DTN_INFO("refcyc_per_line_delivery_c = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_c);
++ DTN_INFO("chunk_hdl_adjust_cur0 = 0x%0x\n", dlg_regs->chunk_hdl_adjust_cur0);
++ DTN_INFO("dst_y_offset_cur1 = 0x%0x\n", dlg_regs->dst_y_offset_cur1);
++ DTN_INFO("chunk_hdl_adjust_cur1 = 0x%0x\n", dlg_regs->chunk_hdl_adjust_cur1);
++ DTN_INFO("vready_after_vcount0 = 0x%0x\n", dlg_regs->vready_after_vcount0);
++ DTN_INFO("dst_y_delta_drq_limit = 0x%0x\n", dlg_regs->dst_y_delta_drq_limit);
++ DTN_INFO("xfc_reg_transfer_delay = 0x%0x\n", dlg_regs->xfc_reg_transfer_delay);
++ DTN_INFO("xfc_reg_precharge_delay = 0x%0x\n", dlg_regs->xfc_reg_precharge_delay);
++ DTN_INFO("xfc_reg_remote_surface_flip_latency = 0x%0x\n", dlg_regs->xfc_reg_remote_surface_flip_latency);
++
++ DTN_INFO("========TTU========\n");
++ DTN_INFO("qos_level_low_wm = 0x%0x\n", ttu_regs->qos_level_low_wm);
++ DTN_INFO("qos_level_high_wm = 0x%0x\n", ttu_regs->qos_level_high_wm);
++ DTN_INFO("min_ttu_vblank = 0x%0x\n", ttu_regs->min_ttu_vblank);
++ DTN_INFO("qos_level_flip = 0x%0x\n", ttu_regs->qos_level_flip);
++ DTN_INFO("refcyc_per_req_delivery_pre_l = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_l);
++ DTN_INFO("refcyc_per_req_delivery_l = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_l);
++ DTN_INFO("refcyc_per_req_delivery_pre_c = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_c);
++ DTN_INFO("refcyc_per_req_delivery_c = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_c);
++ DTN_INFO("refcyc_per_req_delivery_cur0 = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_cur0);
++ DTN_INFO("refcyc_per_req_delivery_pre_cur0 = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_cur0);
++ DTN_INFO("refcyc_per_req_delivery_cur1 = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_cur1);
++ DTN_INFO("refcyc_per_req_delivery_pre_cur1 = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_cur1);
++ DTN_INFO("qos_level_fixed_l = 0x%0x\n", ttu_regs->qos_level_fixed_l);
++ DTN_INFO("qos_ramp_disable_l = 0x%0x\n", ttu_regs->qos_ramp_disable_l);
++ DTN_INFO("qos_level_fixed_c = 0x%0x\n", ttu_regs->qos_level_fixed_c);
++ DTN_INFO("qos_ramp_disable_c = 0x%0x\n", ttu_regs->qos_ramp_disable_c);
++ DTN_INFO("qos_level_fixed_cur0 = 0x%0x\n", ttu_regs->qos_level_fixed_cur0);
++ DTN_INFO("qos_ramp_disable_cur0 = 0x%0x\n", ttu_regs->qos_ramp_disable_cur0);
++ DTN_INFO("qos_level_fixed_cur1 = 0x%0x\n", ttu_regs->qos_level_fixed_cur1);
++ DTN_INFO("qos_ramp_disable_cur1 = 0x%0x\n", ttu_regs->qos_ramp_disable_cur1);
++}
++
+ void dcn10_log_hw_state(struct dc *dc)
+ {
+ struct dc_context *dc_ctx = dc->ctx;
+@@ -129,7 +227,7 @@ void dcn10_log_hw_state(struct dc *dc)
+ struct hubp *hubp = pool->hubps[i];
+ struct dcn_hubp_state s;
+
+- hubp1_read_state(TO_DCN10_HUBP(hubp), &s);
++ hubp->funcs->hubp_read_state(hubp, &s);
+
+ DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh"
+ " %6d %8d %7d %8xh",
+@@ -201,6 +299,20 @@ void dcn10_log_hw_state(struct dc *dc)
+ }
+ DTN_INFO("\n");
+
++ for (i = 0; i < pool->pipe_count; i++) {
++ struct hubp *hubp = pool->hubps[i];
++ struct dcn_hubp_state s = {0};
++
++ if (!dc->current_state->res_ctx.pipe_ctx[i].stream)
++ continue;
++
++ hubp->funcs->hubp_read_state(hubp, &s);
++ DTN_INFO("RQ-DLG-TTU registers for HUBP%d:\n", i);
++ print_rq_dlg_ttu_regs(dc_ctx, &s);
++ DTN_INFO("\n");
++ }
++ DTN_INFO("\n");
++
+ log_mpc_crc(dc);
+
+ DTN_INFO_END();
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+index 9ced254..3866147 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+@@ -56,6 +56,25 @@ struct hubp {
+ bool power_gated;
+ };
+
++struct dcn_hubp_state {
++ struct _vcs_dpi_display_dlg_regs_st dlg_attr;
++ struct _vcs_dpi_display_ttu_regs_st ttu_attr;
++ struct _vcs_dpi_display_rq_regs_st rq_regs;
++ uint32_t pixel_format;
++ uint32_t inuse_addr_hi;
++ uint32_t viewport_width;
++ uint32_t viewport_height;
++ uint32_t rotation_angle;
++ uint32_t h_mirror_en;
++ uint32_t sw_mode;
++ uint32_t dcc_en;
++ uint32_t blank_en;
++ uint32_t underflow_status;
++ uint32_t ttu_disable;
++ uint32_t min_ttu_vblank;
++ uint32_t qos_level_low_wm;
++ uint32_t qos_level_high_wm;
++};
+
+ struct hubp_funcs {
+ void (*hubp_setup)(
+@@ -121,6 +140,7 @@ struct hubp_funcs {
+
+ void (*hubp_clk_cntl)(struct hubp *hubp, bool enable);
+ void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst);
++ void (*hubp_read_state)(struct hubp *hubp, struct dcn_hubp_state *s);
+
+ };
+
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
+index 77eb728..3306e7b 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
+@@ -183,6 +183,36 @@
+ FN(reg_name, f4), v4, \
+ FN(reg_name, f5), v5)
+
++#define REG_GET_6(reg_name, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6) \
++ generic_reg_get6(CTX, REG(reg_name), \
++ FN(reg_name, f1), v1, \
++ FN(reg_name, f2), v2, \
++ FN(reg_name, f3), v3, \
++ FN(reg_name, f4), v4, \
++ FN(reg_name, f5), v5, \
++ FN(reg_name, f6), v6)
++
++#define REG_GET_7(reg_name, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7) \
++ generic_reg_get7(CTX, REG(reg_name), \
++ FN(reg_name, f1), v1, \
++ FN(reg_name, f2), v2, \
++ FN(reg_name, f3), v3, \
++ FN(reg_name, f4), v4, \
++ FN(reg_name, f5), v5, \
++ FN(reg_name, f6), v6, \
++ FN(reg_name, f7), v7)
++
++#define REG_GET_8(reg_name, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8) \
++ generic_reg_get8(CTX, REG(reg_name), \
++ FN(reg_name, f1), v1, \
++ FN(reg_name, f2), v2, \
++ FN(reg_name, f3), v3, \
++ FN(reg_name, f4), v4, \
++ FN(reg_name, f5), v5, \
++ FN(reg_name, f6), v6, \
++ FN(reg_name, f7), v7, \
++ FN(reg_name, f8), v8)
++
+ /* macro to poll and wait for a register field to read back given value */
+
+ #define REG_WAIT(reg_name, field, val, delay_between_poll_us, max_try) \
+@@ -389,4 +419,30 @@ uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
+ uint8_t shift5, uint32_t mask5, uint32_t *field_value5);
+
++uint32_t generic_reg_get6(const struct dc_context *ctx, uint32_t addr,
++ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
++ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
++ uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
++ uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
++ uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
++ uint8_t shift6, uint32_t mask6, uint32_t *field_value6);
++
++uint32_t generic_reg_get7(const struct dc_context *ctx, uint32_t addr,
++ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
++ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
++ uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
++ uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
++ uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
++ uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
++ uint8_t shift7, uint32_t mask7, uint32_t *field_value7);
++
++uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr,
++ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
++ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
++ uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
++ uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
++ uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
++ uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
++ uint8_t shift7, uint32_t mask7, uint32_t *field_value7,
++ uint8_t shift8, uint32_t mask8, uint32_t *field_value8);
+ #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4218-drm-amd-display-add-calculated-clock-logging-to-DTN.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4218-drm-amd-display-add-calculated-clock-logging-to-DTN.patch
new file mode 100644
index 00000000..5cd610d4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4218-drm-amd-display-add-calculated-clock-logging-to-DTN.patch
@@ -0,0 +1,38 @@
+From 9fc34a808f4602ba9421d53654cd4e6918c94c4b Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Wed, 4 Apr 2018 16:03:38 -0400
+Subject: [PATCH 4218/5725] drm/amd/display: add calculated clock logging to
+ DTN
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index c9d4e96..468113d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -311,7 +311,16 @@ void dcn10_log_hw_state(struct dc *dc)
+ print_rq_dlg_ttu_regs(dc_ctx, &s);
+ DTN_INFO("\n");
+ }
+- DTN_INFO("\n");
++
++ DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
++ "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
++ dc->current_state->bw.dcn.calc_clk.dcfclk_khz,
++ dc->current_state->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
++ dc->current_state->bw.dcn.calc_clk.dispclk_khz,
++ dc->current_state->bw.dcn.calc_clk.dppclk_khz,
++ dc->current_state->bw.dcn.calc_clk.max_supported_dppclk_khz,
++ dc->current_state->bw.dcn.calc_clk.fclk_khz,
++ dc->current_state->bw.dcn.calc_clk.socclk_khz);
+
+ log_mpc_crc(dc);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4219-drm-amd-display-add-missing-colorspace-for-set-black.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4219-drm-amd-display-add-missing-colorspace-for-set-black.patch
new file mode 100644
index 00000000..56414817
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4219-drm-amd-display-add-missing-colorspace-for-set-black.patch
@@ -0,0 +1,55 @@
+From 26f556956aacc7dd898ff8c8a573e2b303a36089 Mon Sep 17 00:00:00 2001
+From: Yue Hin Lau <Yuehin.Lau@amd.com>
+Date: Mon, 9 Apr 2018 14:46:32 -0400
+Subject: [PATCH 4219/5725] drm/amd/display: add missing colorspace for set
+ black color
+
+Signed-off-by: Yue Hin Lau <Yuehin.Lau@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ .../gpu/drm/amd/display/dc/core/dc_hw_sequencer.c | 21 ++++++++++++++++++++-
+ 1 file changed, 20 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+index 481f692..83d1215 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+@@ -208,6 +208,7 @@ void color_space_to_black_color(
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ case COLOR_SPACE_YCBCR709_LIMITED:
++ case COLOR_SPACE_2020_YCBCR:
+ *black_color = black_color_format[BLACK_COLOR_FORMAT_YUV_CV];
+ break;
+
+@@ -216,7 +217,25 @@ void color_space_to_black_color(
+ black_color_format[BLACK_COLOR_FORMAT_RGB_LIMITED];
+ break;
+
+- default:
++ /**
++ * Remove default and add case for all color space
++ * so when we forget to add new color space
++ * compiler will give a warning
++ */
++ case COLOR_SPACE_UNKNOWN:
++ case COLOR_SPACE_SRGB:
++ case COLOR_SPACE_XR_RGB:
++ case COLOR_SPACE_MSREF_SCRGB:
++ case COLOR_SPACE_XV_YCC_709:
++ case COLOR_SPACE_XV_YCC_601:
++ case COLOR_SPACE_2020_RGB_FULLRANGE:
++ case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
++ case COLOR_SPACE_ADOBERGB:
++ case COLOR_SPACE_DCIP3:
++ case COLOR_SPACE_DISPLAYNATIVE:
++ case COLOR_SPACE_DOLBYVISION:
++ case COLOR_SPACE_APPCTRL:
++ case COLOR_SPACE_CUSTOMPOINTS:
+ /* fefault is sRGB black (full range). */
+ *black_color =
+ black_color_format[BLACK_COLOR_FORMAT_RGB_FULLRANGE];
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4220-drm-amd-display-Use-dig-enable-to-determine-fast-boo.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4220-drm-amd-display-Use-dig-enable-to-determine-fast-boo.patch
new file mode 100644
index 00000000..2d8ded3e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4220-drm-amd-display-Use-dig-enable-to-determine-fast-boo.patch
@@ -0,0 +1,166 @@
+From 0666d10c5c6fa478c7fb0b229520cbef053377e2 Mon Sep 17 00:00:00 2001
+From: Yongqiang Sun <yongqiang.sun@amd.com>
+Date: Mon, 9 Apr 2018 16:15:20 -0400
+Subject: [PATCH 4220/5725] drm/amd/display: Use dig enable to determine fast
+ boot optimization.
+
+Linux doesn't know lid state, better to check dig enable
+value from register.
+
+Signed-off-by: Yongqiang Sun <yongqiang.sun@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc_stream.h | 1 -
+ .../gpu/drm/amd/display/dc/dce/dce_link_encoder.c | 6 ++-
+ .../gpu/drm/amd/display/dc/dce/dce_link_encoder.h | 2 +
+ .../amd/display/dc/dce110/dce110_hw_sequencer.c | 47 +++++++---------------
+ .../gpu/drm/amd/display/dc/inc/hw/link_encoder.h | 1 +
+ 5 files changed, 21 insertions(+), 36 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+index 4750768..aefc76b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+@@ -98,7 +98,6 @@ struct dc_stream_state {
+ int phy_pix_clk;
+ enum signal_type signal;
+ bool dpms_off;
+- bool lid_state_closed;
+
+ struct dc_stream_status status;
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+index 8167cad..dbe3b26 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+@@ -113,6 +113,7 @@ static const struct link_encoder_funcs dce110_lnk_enc_funcs = {
+ .connect_dig_be_to_fe = dce110_link_encoder_connect_dig_be_to_fe,
+ .enable_hpd = dce110_link_encoder_enable_hpd,
+ .disable_hpd = dce110_link_encoder_disable_hpd,
++ .is_dig_enabled = dce110_is_dig_enabled,
+ .destroy = dce110_link_encoder_destroy
+ };
+
+@@ -535,8 +536,9 @@ void dce110_psr_program_secondary_packet(struct link_encoder *enc,
+ DP_SEC_GSP0_PRIORITY, 1);
+ }
+
+-static bool is_dig_enabled(const struct dce110_link_encoder *enc110)
++bool dce110_is_dig_enabled(struct link_encoder *enc)
+ {
++ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ uint32_t value;
+
+ REG_GET(DIG_BE_EN_CNTL, DIG_ENABLE, &value);
+@@ -1031,7 +1033,7 @@ void dce110_link_encoder_disable_output(
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+- if (!is_dig_enabled(enc110)) {
++ if (!dce110_is_dig_enabled(enc)) {
+ /* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
+ return;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
+index 0ec3433..3470694 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
+@@ -263,4 +263,6 @@ void dce110_psr_program_dp_dphy_fast_training(struct link_encoder *enc,
+ void dce110_psr_program_secondary_packet(struct link_encoder *enc,
+ unsigned int sdp_transmit_line_num_deadline);
+
++bool dce110_is_dig_enabled(struct link_encoder *enc);
++
+ #endif /* __DC_LINK_ENCODER__DCE110_H__ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 3920310..0c92348 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -1481,15 +1481,15 @@ static void disable_vga_and_power_gate_all_controllers(
+ }
+ }
+
+-static bool is_eDP_lid_closed(struct dc_state *context)
++static struct dc_link *get_link_for_edp(struct dc *dc)
+ {
+ int i;
+
+- for (i = 0; i < context->stream_count; i++) {
+- if (context->streams[i]->signal == SIGNAL_TYPE_EDP)
+- return context->streams[i]->lid_state_closed;
++ for (i = 0; i < dc->link_count; i++) {
++ if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP)
++ return dc->links[i];
+ }
+- return false;
++ return NULL;
+ }
+
+ static struct dc_link *get_link_for_edp_not_in_use(
+@@ -1526,41 +1526,22 @@ static struct dc_link *get_link_for_edp_not_in_use(
+ */
+ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
+ {
+- /* check eDP lid state and BIOS_SCRATCH_3 to determine fast boot optimization
+- * UEFI boot
+- * edp_active_status_from_scratch fast boot optimization
+- * S4/S5 resume:
+- * Lid Open true true
+- * Lid Close false false
+- *
+- * S3/ resume:
+- * Lid Open false false
+- * Lid Close false false
+- *
+- * Legacy boot:
+- * edp_active_status_from_scratch fast boot optimization
+- * S4/S resume:
+- * Lid Open true true
+- * Lid Close true false
+- *
+- * S3/ resume:
+- * Lid Open false false
+- * Lid Close false false
+- */
+- struct dc_bios *dcb = dc->ctx->dc_bios;
+- bool lid_state_closed = is_eDP_lid_closed(context);
+ struct dc_link *edp_link_to_turnoff = NULL;
+- bool edp_active_status_from_scratch =
+- (dcb->funcs->get_vga_enabled_displays(dc->ctx->dc_bios) == ATOM_DISPLAY_LCD1_ACTIVE);
++ struct dc_link *edp_link = get_link_for_edp(dc);
++ bool can_eDP_fast_boot_optimize = false;
++
++ if (edp_link) {
++ can_eDP_fast_boot_optimize =
++ edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc);
++ }
+
+- /*Lid open*/
+- if (!lid_state_closed) {
++ if (can_eDP_fast_boot_optimize) {
+ edp_link_to_turnoff = get_link_for_edp_not_in_use(dc, context);
+
+ /* if OS doesn't light up eDP and eDP link is available, we want to disable
+ * If resume from S4/S5, should optimization.
+ */
+- if (!edp_link_to_turnoff && edp_active_status_from_scratch)
++ if (!edp_link_to_turnoff)
+ dc->apply_edp_fast_boot_optimization = true;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+index 54d8a13..cf6df2e 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+@@ -149,6 +149,7 @@ struct link_encoder_funcs {
+ bool connect);
+ void (*enable_hpd)(struct link_encoder *enc);
+ void (*disable_hpd)(struct link_encoder *enc);
++ bool (*is_dig_enabled)(struct link_encoder *enc);
+ void (*destroy)(struct link_encoder **enc);
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4221-drm-amdgpu-ifdef-unused-var.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4221-drm-amdgpu-ifdef-unused-var.patch
new file mode 100644
index 00000000..fe307667
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4221-drm-amdgpu-ifdef-unused-var.patch
@@ -0,0 +1,30 @@
+From 6e63106288eac2d18bcbcb24deb87a7053bb550e Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Tue, 17 Apr 2018 10:20:30 -0400
+Subject: [PATCH 4221/5725] drm/amdgpu: ifdef unused var
+
+Code that's using it is guarded with #if 0. Do the same for unused var.
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 263d3ab..b05fe26 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -806,7 +806,9 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
+
+ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
+ {
++#if 0
+ u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
++#endif
+ unsigned size;
+
+ /*
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4222-drm-amdgpu-add-amdgpu_bo_param.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4222-drm-amdgpu-add-amdgpu_bo_param.patch
new file mode 100644
index 00000000..7af150e5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4222-drm-amdgpu-add-amdgpu_bo_param.patch
@@ -0,0 +1,207 @@
+From 717e59265bec03634dd7babb38e9f2556f044dbb Mon Sep 17 00:00:00 2001
+From: Chunming Zhou <david1.zhou@amd.com>
+Date: Mon, 16 Apr 2018 17:57:19 +0800
+Subject: [PATCH 4222/5725] drm/amdgpu: add amdgpu_bo_param
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+amdgpu_bo_create has too many parameters, and used in
+too many places. Collect them to one structure.
+
+Change-Id: Ib2aa98ee37a70f3cb0d61eef1d336e89187554d5
+Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 69 +++++++++++++++++-------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 9 ++++
+ 2 files changed, 48 insertions(+), 30 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index f14b27a..4c29d69 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -363,28 +363,25 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
+ man->size << PAGE_SHIFT);
+ return false;
+ }
+-
+-static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
+- int byte_align, u32 domain,
+- u64 flags, enum ttm_bo_type type,
+- struct reservation_object *resv,
++static int amdgpu_bo_do_create(struct amdgpu_device *adev,
++ struct amdgpu_bo_param *bp,
+ struct amdgpu_bo **bo_ptr)
+ {
+ struct ttm_operation_ctx ctx = {
+- .interruptible = (type != ttm_bo_type_kernel),
++ .interruptible = (bp->type != ttm_bo_type_kernel),
+ .no_wait_gpu = false,
+- .resv = resv,
++ .resv = bp->resv,
+ .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
+ };
+ struct amdgpu_bo *bo;
+- unsigned long page_align;
++ unsigned long page_align, size = bp->size;
+ size_t acc_size;
+ int r;
+
+- page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
++ page_align = roundup(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
+ size = ALIGN(size, PAGE_SIZE);
+
+- if (!amdgpu_bo_validate_size(adev, size, domain))
++ if (!amdgpu_bo_validate_size(adev, size, bp->domain))
+ return -ENOMEM;
+
+ *bo_ptr = NULL;
+@@ -408,11 +405,11 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
+ AMDGPU_GEM_DOMAIN_DGMA_IMPORT);
+ bo->allowed_domains = bo->preferred_domains;
+
+- if (type != ttm_bo_type_kernel &&
++ if (bp->type != ttm_bo_type_kernel &&
+ bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
+ bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
+
+- bo->flags = flags;
++ bo->flags = bp->flags;
+ #ifdef CONFIG_X86_32
+ /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
+ * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
+@@ -442,12 +439,11 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
+ #endif
+
+ bo->tbo.bdev = &adev->mman.bdev;
++ amdgpu_ttm_placement_from_domain(bo, bp->domain);
+
+- amdgpu_ttm_placement_from_domain(bo, domain);
+-
+- r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
++ r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
+ &bo->placement, page_align, &ctx, acc_size,
+- NULL, resv, &amdgpu_ttm_bo_destroy);
++ NULL, bp->resv, &amdgpu_ttm_bo_destroy);
+
+ if (unlikely(r != 0))
+ return r;
+@@ -463,10 +459,10 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
+ if (domain & AMDGPU_GEM_DOMAIN_DGMA && adev->ssg.enabled)
+ bo->tbo.ssg_can_map = true;
+
+- if (type == ttm_bo_type_kernel)
++ if (bp->type == ttm_bo_type_kernel)
+ bo->tbo.priority = 1;
+
+- if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
++ if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
+ bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
+ struct dma_fence *fence;
+
+@@ -483,14 +479,14 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
+ #endif
+ dma_fence_put(fence);
+ }
+- if (!resv)
++ if (!bp->resv)
+ amdgpu_bo_unreserve(bo);
+ *bo_ptr = bo;
+
+ trace_amdgpu_bo_create(bo);
+
+ /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
+- if (type == ttm_bo_type_device)
++ if (bp->type == ttm_bo_type_device)
+ bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+
+ if (((flags & AMDGPU_GEM_CREATE_NO_EVICT) && amdgpu_no_evict) ||
+@@ -505,7 +501,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
+ return 0;
+
+ fail_unreserve:
+- if (!resv)
++ if (!bp->resv)
+ ww_mutex_unlock(&bo->tbo.resv->lock);
+ amdgpu_bo_unref(&bo);
+ return r;
+@@ -515,16 +511,22 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
+ unsigned long size, int byte_align,
+ struct amdgpu_bo *bo)
+ {
++ struct amdgpu_bo_param bp = {
++ .size = size,
++ .byte_align = byte_align,
++ .domain = AMDGPU_GEM_DOMAIN_GTT,
++ .flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
++ AMDGPU_GEM_CREATE_SHADOW,
++ .type = ttm_bo_type_kernel,
++ .resv = bo->tbo.resv
++ };
+ int r;
+
+ if (bo->shadow)
+ return 0;
+-
+- r = amdgpu_bo_do_create(adev, size, byte_align, AMDGPU_GEM_DOMAIN_GTT,
+- AMDGPU_GEM_CREATE_CPU_GTT_USWC |
+- AMDGPU_GEM_CREATE_SHADOW,
+- ttm_bo_type_kernel,
+- bo->tbo.resv, &bo->shadow);
++
++ r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
++
+ if (!r) {
+ bo->shadow->parent = amdgpu_bo_ref(bo);
+ mutex_lock(&adev->shadow_list_lock);
+@@ -541,11 +543,18 @@ int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
+ struct reservation_object *resv,
+ struct amdgpu_bo **bo_ptr)
+ {
+- uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
++ struct amdgpu_bo_param bp = {
++ .size = size,
++ .byte_align = byte_align,
++ .domain = domain,
++ .flags = flags & ~AMDGPU_GEM_CREATE_SHADOW,
++ .type = type,
++ .resv = resv
++ };
+ int r;
++
++ r = amdgpu_bo_do_create(adev, &bp, bo_ptr);
+
+- r = amdgpu_bo_do_create(adev, size, byte_align, domain,
+- parent_flags, type, resv, bo_ptr);
+ if (r)
+ return r;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+index 291477d..7abf76b 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+@@ -33,6 +33,15 @@
+
+ #define AMDGPU_BO_INVALID_OFFSET LONG_MAX
+
++struct amdgpu_bo_param {
++ unsigned long size;
++ int byte_align;
++ u32 domain;
++ u64 flags;
++ enum ttm_bo_type type;
++ struct reservation_object *resv;
++};
++
+ /* bo virtual addresses in a vm */
+ struct amdgpu_bo_va_mapping {
+ struct amdgpu_bo_va *bo_va;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4223-drm-amdgpu-use-amdgpu_bo_param-for-amdgpu_bo_create-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4223-drm-amdgpu-use-amdgpu_bo_param-for-amdgpu_bo_create-.patch
new file mode 100644
index 00000000..e538006e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4223-drm-amdgpu-use-amdgpu_bo_param-for-amdgpu_bo_create-.patch
@@ -0,0 +1,487 @@
+From 55dbe1d69907ccd1e0eb062c787767117398a02f Mon Sep 17 00:00:00 2001
+From: Chunming Zhou <david1.zhou@amd.com>
+Date: Mon, 16 Apr 2018 18:27:50 +0800
+Subject: [PATCH 4223/5725] drm/amdgpu: use amdgpu_bo_param for
+ amdgpu_bo_create v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+After that, we can easily add new parameter when need.
+
+v2:
+a) rebase.
+b) Initialize struct amdgpu_bo_param, future new
+member could only be used in some one case, but all member
+should have its own initial value.
+
+Change-Id: I6e80039c3801f163129ecc605d931483fdbc91db
+Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com> (v1)
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+Cc: christian.koenig@amd.com
+Cc: Felix.Kuehling@amd.com
+
+Conflicts:
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 14 ++++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 14 ++++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c | 15 ++++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 17 ++++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 13 ++++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 59 ++++++++++++------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 6 +--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 12 +++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_test.c | 18 +++++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 15 ++++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 26 ++++++++---
+ 11 files changed, 132 insertions(+), 77 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index bb1e7d0..99ef4ee 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -282,13 +282,19 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ struct amdgpu_bo *bo = NULL;
++ struct amdgpu_bo_param bp;
+ int r;
+ uint64_t gpu_addr_tmp = 0;
+ void *cpu_ptr_tmp = NULL;
+-
+- r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
+- AMDGPU_GEM_CREATE_CPU_GTT_USWC, ttm_bo_type_kernel,
+- NULL, &bo);
++
++ memset(&bp, 0, sizeof(bp));
++ bp.size = size;
++ bp.byte_align = PAGE_SIZE;
++ bp.domain = AMDGPU_GEM_DOMAIN_GTT;
++ bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
++ bp.type = ttm_bo_type_kernel;
++ bp.resv = NULL;
++ r = amdgpu_bo_create(adev, &bp, &bo);
+ if (r) {
+ dev_err(adev->dev,
+ "failed to allocate BO for amdkfd (%d)\n", r);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 90e98c9..11165a7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -1177,6 +1177,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+ uint64_t user_addr = 0;
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct amdgpu_bo *bo;
++ struct amdgpu_bo_param bp;
+ int byte_align;
+ u32 domain, alloc_domain;
+ u64 alloc_flags;
+@@ -1269,11 +1270,14 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+ pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
+ va, size, domain_string(alloc_domain));
+
+- /* Allocate buffer object. Userptr objects need to start out
+- * in the CPU domain, get moved to GTT when pinned.
+- */
+- ret = amdgpu_bo_create(adev, size, byte_align,
+- alloc_domain, alloc_flags, bo_type, NULL, &bo);
++ memset(&bp, 0, sizeof(bp));
++ bp.size = size;
++ bp.byte_align = byte_align;
++ bp.domain = alloc_domain;
++ bp.flags = alloc_flags;
++ bp.type = bo_type;
++ bp.resv = NULL;
++ ret = amdgpu_bo_create(adev, &bp, &bo);
+ if (ret) {
+ pr_debug("Failed to create BO on domain %s. ret %d\n",
+ domain_string(alloc_domain), ret);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+index 02b849b..19cfff3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+@@ -75,13 +75,20 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
+ {
+ struct amdgpu_bo *dobj = NULL;
+ struct amdgpu_bo *sobj = NULL;
++ struct amdgpu_bo_param bp;
+ uint64_t saddr, daddr;
+ int r, n;
+ int time;
+
++ memset(&bp, 0, sizeof(bp));
++ bp.size = size;
++ bp.byte_align = PAGE_SIZE;
++ bp.domain = sdomain;
++ bp.flags = 0;
++ bp.type = ttm_bo_type_kernel;
++ bp.resv = NULL;
+ n = AMDGPU_BENCHMARK_ITERATIONS;
+- r = amdgpu_bo_create(adev, size, PAGE_SIZE,sdomain, 0,
+- ttm_bo_type_kernel, NULL, &sobj);
++ r = amdgpu_bo_create(adev, &bp, &sobj);
+ if (r) {
+ goto out_cleanup;
+ }
+@@ -93,8 +100,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
+ if (r) {
+ goto out_cleanup;
+ }
+- r = amdgpu_bo_create(adev, size, PAGE_SIZE, ddomain, 0,
+- ttm_bo_type_kernel, NULL, &dobj);
++ bp.domain = ddomain;
++ r = amdgpu_bo_create(adev, &bp, &dobj);
+ if (r) {
+ goto out_cleanup;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+index cf0f186..17d6b9f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+@@ -113,12 +113,17 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
+ int r;
+
+ if (adev->gart.robj == NULL) {
+- r = amdgpu_bo_create(adev, adev->gart.table_size, PAGE_SIZE,
+- AMDGPU_GEM_DOMAIN_VRAM,
+- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
+- ttm_bo_type_kernel, NULL,
+- &adev->gart.robj);
++ struct amdgpu_bo_param bp;
++
++ memset(&bp, 0, sizeof(bp));
++ bp.size = adev->gart.table_size;
++ bp.byte_align = PAGE_SIZE;
++ bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
++ bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
++ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
++ bp.type = ttm_bo_type_kernel;
++ bp.resv = NULL;
++ r = amdgpu_bo_create(adev, &bp, &adev->gart.robj);
+ if (r) {
+ return r;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+index fb95cba..f1031f4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+@@ -58,8 +58,10 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
+ struct amdgpu_bo *robj;
+ struct amdgpu_bo *bo;
+ unsigned long max_size;
++ struct amdgpu_bo_param bp;
+ int r;
+-
++
++ memset(&bp, 0, sizeof(bp));
+ *obj = NULL;
+ /* At least align on page size */
+ if (alignment < PAGE_SIZE) {
+@@ -83,9 +85,14 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
+ }
+ }
+
++ bp.size = size;
++ bp.byte_align = alignment;
++ bp.type = type;
++ bp.resv = resv;
+ retry:
+- r = amdgpu_bo_create(adev, size, alignment, initial_domain,
+- flags, type, resv, &bo);
++ bp.flags = flags;
++ bp.domain = initial_domain;
++ r = amdgpu_bo_create(adev, &bp, &bo);
+ if (r) {
+ if (r != -ERESTARTSYS) {
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 4c29d69..b68f2b8 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -214,14 +214,21 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
+ u32 domain, struct amdgpu_bo **bo_ptr,
+ u64 *gpu_addr, void **cpu_addr)
+ {
++ struct amdgpu_bo_param bp;
+ bool free = false;
+ int r;
+
++ memset(&bp, 0, sizeof(bp));
++ bp.size = size;
++ bp.byte_align = align;
++ bp.domain = domain;
++ bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
++ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
++ bp.type = ttm_bo_type_kernel;
++ bp.resv = NULL;
++
+ if (!*bo_ptr) {
+- r = amdgpu_bo_create(adev, size, align, domain,
+- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
+- ttm_bo_type_kernel, NULL, bo_ptr);
++ r = amdgpu_bo_create(adev, &bp, bo_ptr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
+ r);
+@@ -511,20 +518,21 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
+ unsigned long size, int byte_align,
+ struct amdgpu_bo *bo)
+ {
+- struct amdgpu_bo_param bp = {
+- .size = size,
+- .byte_align = byte_align,
+- .domain = AMDGPU_GEM_DOMAIN_GTT,
+- .flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
+- AMDGPU_GEM_CREATE_SHADOW,
+- .type = ttm_bo_type_kernel,
+- .resv = bo->tbo.resv
+- };
++ struct amdgpu_bo_param bp;
+ int r;
+
+ if (bo->shadow)
+ return 0;
+
++ memset(&bp, 0, sizeof(bp));
++ bp.size = size;
++ bp.byte_align = byte_align;
++ bp.domain = AMDGPU_GEM_DOMAIN_GTT;
++ bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
++ AMDGPU_GEM_CREATE_SHADOW;
++ bp.type = ttm_bo_type_kernel;
++ bp.resv = bo->tbo.resv;
++
+ r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
+
+ if (!r) {
+@@ -537,35 +545,26 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
+ return r;
+ }
+
+-int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
+- int byte_align, u32 domain,
+- u64 flags, enum ttm_bo_type type,
+- struct reservation_object *resv,
++int amdgpu_bo_create(struct amdgpu_device *adev,
++ struct amdgpu_bo_param *bp,
+ struct amdgpu_bo **bo_ptr)
+ {
+- struct amdgpu_bo_param bp = {
+- .size = size,
+- .byte_align = byte_align,
+- .domain = domain,
+- .flags = flags & ~AMDGPU_GEM_CREATE_SHADOW,
+- .type = type,
+- .resv = resv
+- };
++ u64 flags = bp->flags;
+ int r;
+
+- r = amdgpu_bo_do_create(adev, &bp, bo_ptr);
+-
++ bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
++ r = amdgpu_bo_do_create(adev, bp, bo_ptr);
+ if (r)
+ return r;
+
+ if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) {
+- if (!resv)
++ if (!bp->resv)
+ WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
+ NULL));
+
+- r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
++ r = amdgpu_bo_create_shadow(adev, bp->size, bp->byte_align, (*bo_ptr));
+
+- if (!resv)
++ if (!bp->resv)
+ reservation_object_unlock((*bo_ptr)->tbo.resv);
+
+ if (r)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+index 7abf76b..951af42 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+@@ -244,10 +244,8 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
+ return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
+ }
+
+-int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
+- int byte_align, u32 domain,
+- u64 flags, enum ttm_bo_type type,
+- struct reservation_object *resv,
++int amdgpu_bo_create(struct amdgpu_device *adev,
++ struct amdgpu_bo_param *bp,
+ struct amdgpu_bo **bo_ptr);
+ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
+ unsigned long size, int align,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+index 716f880..a7a0f0c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+@@ -100,12 +100,18 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
+ struct reservation_object *resv = attach->dmabuf->resv;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_bo *bo;
++ struct amdgpu_bo_param bp;
+ int ret;
+
++ memset(&bp, 0, sizeof(bp));
++ bp.size = attach->dmabuf->size;
++ bp.byte_align = PAGE_SIZE;
++ bp.domain = AMDGPU_GEM_DOMAIN_CPU;
++ bp.flags = 0;
++ bp.type = ttm_bo_type_sg;
++ bp.resv = resv;
+ ww_mutex_lock(&resv->lock, NULL);
+- ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE,
+- AMDGPU_GEM_DOMAIN_CPU, 0, ttm_bo_type_sg,
+- resv, &bo);
++ ret = amdgpu_bo_create(adev, &bp, &bo);
+ if (ret)
+ goto error;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+index 2dbe875..d167e8a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+@@ -33,6 +33,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
+ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+ struct amdgpu_bo *vram_obj = NULL;
+ struct amdgpu_bo **gtt_obj = NULL;
++ struct amdgpu_bo_param bp;
+ uint64_t gart_addr, vram_addr;
+ unsigned n, size;
+ int i, r;
+@@ -58,9 +59,15 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
+ r = 1;
+ goto out_cleanup;
+ }
+-
+- r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 0,
+- ttm_bo_type_kernel, NULL, &vram_obj);
++ memset(&bp, 0, sizeof(bp));
++ bp.size = size;
++ bp.byte_align = PAGE_SIZE;
++ bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
++ bp.flags = 0;
++ bp.type = ttm_bo_type_kernel;
++ bp.resv = NULL;
++
++ r = amdgpu_bo_create(adev, &bp, &vram_obj);
+ if (r) {
+ DRM_ERROR("Failed to create VRAM object\n");
+ goto out_cleanup;
+@@ -79,9 +86,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
+ void **vram_start, **vram_end;
+ struct dma_fence *fence = NULL;
+
+- r = amdgpu_bo_create(adev, size, PAGE_SIZE,
+- AMDGPU_GEM_DOMAIN_GTT, 0,
+- ttm_bo_type_kernel, NULL, gtt_obj + i);
++ bp.domain = AMDGPU_GEM_DOMAIN_GTT;
++ r = amdgpu_bo_create(adev, &bp, gtt_obj + i);
+ if (r) {
+ DRM_ERROR("Failed to create GTT object %d\n", i);
+ goto out_lclean;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 2f8d89f..80c4771 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -1517,6 +1517,7 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
+ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
+ {
+ struct ttm_operation_ctx ctx = { false, false };
++ struct amdgpu_bo_param bp;
+ int r = 0;
+ int i;
+ u64 vram_size = adev->gmc.visible_vram_size;
+@@ -1524,17 +1525,21 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
+ u64 size = adev->fw_vram_usage.size;
+ struct amdgpu_bo *bo;
+
++ memset(&bp, 0, sizeof(bp));
++ bp.size = adev->fw_vram_usage.size;
++ bp.byte_align = PAGE_SIZE;
++ bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
++ bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
++ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
++ bp.type = ttm_bo_type_kernel;
++ bp.resv = NULL;
+ adev->fw_vram_usage.va = NULL;
+ adev->fw_vram_usage.reserved_bo = NULL;
+
+ if (adev->fw_vram_usage.size > 0 &&
+ adev->fw_vram_usage.size <= vram_size) {
+
+- r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, PAGE_SIZE,
+- AMDGPU_GEM_DOMAIN_VRAM,
+- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
+- ttm_bo_type_kernel, NULL,
++ r = amdgpu_bo_create(adev, &bp,
+ &adev->fw_vram_usage.reserved_bo);
+ if (r)
+ goto error_create;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 93f929a..d693066 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -412,11 +412,16 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
+ struct amdgpu_bo *pt;
+
+ if (!entry->base.bo) {
+- r = amdgpu_bo_create(adev,
+- amdgpu_vm_bo_size(adev, level),
+- AMDGPU_GPU_PAGE_SIZE,
+- AMDGPU_GEM_DOMAIN_VRAM, flags,
+- ttm_bo_type_kernel, resv, &pt);
++ struct amdgpu_bo_param bp;
++
++ memset(&bp, 0, sizeof(bp));
++ bp.size = amdgpu_vm_bo_size(adev, level);
++ bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
++ bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
++ bp.flags = flags;
++ bp.type = ttm_bo_type_kernel;
++ bp.resv = resv;
++ r = amdgpu_bo_create(adev, &bp, &pt);
+ if (r)
+ return r;
+
+@@ -2416,6 +2421,7 @@ static void amdgpu_inc_compute_vms(struct amdgpu_device *adev)
+ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int vm_context, unsigned int pasid)
+ {
++ struct amdgpu_bo_param bp;
+ const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
+ AMDGPU_VM_PTE_COUNT(adev) * 8);
+ unsigned ring_instance;
+@@ -2470,8 +2476,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ flags |= AMDGPU_GEM_CREATE_SHADOW;
+
+ size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
+- r = amdgpu_bo_create(adev, size, align, AMDGPU_GEM_DOMAIN_VRAM, flags,
+- ttm_bo_type_kernel, NULL, &vm->root.base.bo);
++ memset(&bp, 0, sizeof(bp));
++ bp.size = size;
++ bp.byte_align = align;
++ bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
++ bp.flags = flags;
++ bp.type = ttm_bo_type_kernel;
++ bp.resv = NULL;
++ r = amdgpu_bo_create(adev, &bp, &vm->root.base.bo);
+ if (r)
+ goto error_free_sched_entity;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4224-drm-amdgpu-fix-amdgpu_bo_create-param-changed-for-tt.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4224-drm-amdgpu-fix-amdgpu_bo_create-param-changed-for-tt.patch
new file mode 100644
index 00000000..21505329
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4224-drm-amdgpu-fix-amdgpu_bo_create-param-changed-for-tt.patch
@@ -0,0 +1,52 @@
+From 5586b4999095fe79c1a20b3002302373c0770578 Mon Sep 17 00:00:00 2001
+From: Kevin Wang <Kevin1.Wang@amd.com>
+Date: Mon, 23 Apr 2018 10:17:34 +0800
+Subject: [PATCH 4224/5725] drm/amdgpu: fix amdgpu_bo_create param changed for
+ ttm module
+
+Change-Id: Ie26589cdd997583e5e5ff7ac4a0b9e7b6cc7d127
+Signed-off-by: Kevin Wang <Kevin1.Wang@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 80c4771..0ddb4c3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -1340,6 +1340,7 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
+ static int amdgpu_direct_gma_init(struct amdgpu_device *adev)
+ {
+ struct amdgpu_bo *abo;
++ struct amdgpu_bo_param bp;
+ unsigned long size;
+ int r;
+
+@@ -1347,11 +1348,18 @@ static int amdgpu_direct_gma_init(struct amdgpu_device *adev)
+ return 0;
+
+ size = (unsigned long)amdgpu_direct_gma_size << 20;
++
++ memset(&bp, 0, sizeof(bp));
++ bp.size = size;
++ bp.byte_align = PAGE_SIZE;
++ bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
++ bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
++ AMDGPU_GEM_CREATE_TOP_DOWN;
++ bp.type = ttm_bo_type_kernel;
++ bp.resv = NULL;
++
+ /* reserve in visible vram */
+- r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+- AMDGPU_GEM_CREATE_TOP_DOWN,
+- ttm_bo_type_kernel, NULL, &abo);
++ r = amdgpu_bo_create(adev, &bp, &abo);
+ if (unlikely(r))
+ goto error_out;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4225-drm-amdkcl-fix-amdgpu_bo_param-changed-compile-error.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4225-drm-amdkcl-fix-amdgpu_bo_param-changed-compile-error.patch
new file mode 100644
index 00000000..70ff32e4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4225-drm-amdkcl-fix-amdgpu_bo_param-changed-compile-error.patch
@@ -0,0 +1,45 @@
+From 15e16c601f8dac26a1656ab050652d0c37d14dc5 Mon Sep 17 00:00:00 2001
+From: Kevin Wang <Kevin1.Wang@amd.com>
+Date: Mon, 23 Apr 2018 10:34:33 +0800
+Subject: [PATCH 4225/5725] drm/amdkcl: fix amdgpu_bo_param changed compile
+ error
+
+Change-Id: Ife408c022d3bc7add402114984eefe0c7f5c2c97
+Signed-off-by: Kevin Wang <Kevin1.Wang@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index b68f2b8..ffe6b05 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -463,7 +463,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
+ else
+ amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
+
+- if (domain & AMDGPU_GEM_DOMAIN_DGMA && adev->ssg.enabled)
++ if (bp->domain & AMDGPU_GEM_DOMAIN_DGMA && adev->ssg.enabled)
+ bo->tbo.ssg_can_map = true;
+
+ if (bp->type == ttm_bo_type_kernel)
+@@ -496,12 +496,12 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
+ if (bp->type == ttm_bo_type_device)
+ bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+
+- if (((flags & AMDGPU_GEM_CREATE_NO_EVICT) && amdgpu_no_evict) ||
+- domain & (AMDGPU_GEM_DOMAIN_DGMA | AMDGPU_GEM_DOMAIN_DGMA_IMPORT)) {
++ if (((bp->flags & AMDGPU_GEM_CREATE_NO_EVICT) && amdgpu_no_evict) ||
++ bp->domain & (AMDGPU_GEM_DOMAIN_DGMA | AMDGPU_GEM_DOMAIN_DGMA_IMPORT)) {
+ r = amdgpu_bo_reserve(bo, false);
+ if (unlikely(r != 0))
+ return r;
+- r = amdgpu_bo_pin(bo, domain, NULL);
++ r = amdgpu_bo_pin(bo, bp->domain, NULL);
+ amdgpu_bo_unreserve(bo);
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4226-drm-amdgpu-print-the-vbios-version-in-the-debugfs-fi.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4226-drm-amdgpu-print-the-vbios-version-in-the-debugfs-fi.patch
new file mode 100644
index 00000000..6aaf82d1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4226-drm-amdgpu-print-the-vbios-version-in-the-debugfs-fi.patch
@@ -0,0 +1,48 @@
+From 74f559a43ff5801db2b0f37e278da1f7dc38710c Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 17 Apr 2018 08:55:44 -0500
+Subject: [PATCH 4226/5725] drm/amdgpu: print the vbios version in the debugfs
+ firmware info
+
+Useful for info gathering about what firmwares are in use in
+the driver.
+
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index 79d1060..73c2f7a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -31,6 +31,7 @@
+ #include "amdgpu_sched.h"
+ #include "amdgpu_uvd.h"
+ #include "amdgpu_vce.h"
++#include "atom.h"
+
+ #include <linux/vga_switcheroo.h>
+ #include <linux/slab.h>
+@@ -1143,6 +1144,7 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
+ struct amdgpu_device *adev = dev->dev_private;
+ struct drm_amdgpu_info_firmware fw_info;
+ struct drm_amdgpu_query_fw query_fw;
++ struct atom_context *ctx = adev->mode_info.atom_context;
+ int ret, i;
+
+ /* VCE */
+@@ -1289,6 +1291,9 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
+ seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
++
++ seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
++
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4227-drm-scheduler-always-put-last_sched-fence-in-entity_.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4227-drm-scheduler-always-put-last_sched-fence-in-entity_.patch
new file mode 100644
index 00000000..33ac000e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4227-drm-scheduler-always-put-last_sched-fence-in-entity_.patch
@@ -0,0 +1,39 @@
+From 19c308db4c4f992d89ad7f6dff1bbc56d5244f6c Mon Sep 17 00:00:00 2001
+From: Pixel Ding <Pixel.Ding@amd.com>
+Date: Wed, 18 Apr 2018 04:33:26 -0400
+Subject: [PATCH 4227/5725] drm/scheduler: always put last_sched fence in
+ entity_fini
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Fix the potential memleak since scheduler main thread always
+hold one last_sched fence.
+
+Signed-off-by: Pixel Ding <Pixel.Ding@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/scheduler/gpu_scheduler.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+index f45b4fd..c8ec777 100644
+--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
++++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+@@ -275,10 +275,10 @@ void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
+ else if (r)
+ DRM_ERROR("fence add callback failed (%d)\n", r);
+ }
+-
+- dma_fence_put(entity->last_scheduled);
+- entity->last_scheduled = NULL;
+ }
++
++ dma_fence_put(entity->last_scheduled);
++ entity->last_scheduled = NULL;
+ }
+ EXPORT_SYMBOL(drm_sched_entity_cleanup);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4228-drm-scheduler-move-last_sched-fence-updating-prior-t.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4228-drm-scheduler-move-last_sched-fence-updating-prior-t.patch
new file mode 100644
index 00000000..97306f64
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4228-drm-scheduler-move-last_sched-fence-updating-prior-t.patch
@@ -0,0 +1,51 @@
+From 79410781814b5434dc9847d5af8f1e5b27a2971d Mon Sep 17 00:00:00 2001
+From: Pixel Ding <Pixel.Ding@amd.com>
+Date: Wed, 18 Apr 2018 04:37:40 -0400
+Subject: [PATCH 4228/5725] drm/scheduler: move last_sched fence updating prior
+ to job popping
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Make sure main thread won't update last_sched fence when entity
+is cleanup.
+
+Fix a racing issue which is caused by putting last_sched fence
+twice. Running vulkaninfo in tight loop can produce this issue
+as seeing wild fence pointer.
+
+Signed-off-by: Pixel Ding <Pixel.Ding@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Monk Liu <Monk.Liu@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/scheduler/gpu_scheduler.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+index c8ec777..1e40d3b 100644
+--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
++++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+@@ -401,6 +401,9 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
+ if (entity->guilty && atomic_read(entity->guilty))
+ dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
+
++ dma_fence_put(entity->last_scheduled);
++ entity->last_scheduled = dma_fence_get(&s_fence->finished);
++
+ spsc_queue_pop(&entity->job_queue);
+ return sched_job;
+ }
+@@ -714,9 +717,6 @@ static int drm_sched_main(void *param)
+ fence = sched->ops->run_job(sched_job);
+ drm_sched_fence_scheduled(s_fence);
+
+- dma_fence_put(entity->last_scheduled);
+- entity->last_scheduled = dma_fence_get(&s_fence->finished);
+-
+ if (fence) {
+ s_fence->parent = dma_fence_get(fence);
+ r = dma_fence_add_callback(fence, &s_fence->cb,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4229-drm-amdgpu-limit-reg_write_reg_wait-workaround-to-SR.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4229-drm-amdgpu-limit-reg_write_reg_wait-workaround-to-SR.patch
new file mode 100644
index 00000000..eced9ed1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4229-drm-amdgpu-limit-reg_write_reg_wait-workaround-to-SR.patch
@@ -0,0 +1,40 @@
+From f3b9f7c97663a6c9f1947ba37088e99c032c1bb8 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Tue, 17 Apr 2018 14:47:42 +0200
+Subject: [PATCH 4229/5725] drm/amdgpu: limit reg_write_reg_wait workaround to
+ SRIOV v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Turned out that this locks up some bare metal Vega10.
+
+v2: fix stupid typo
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index c407f1f..d04a78b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -4226,7 +4226,12 @@ static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
+ {
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
+
+- gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, ref, mask, 0x20);
++ if (amdgpu_sriov_vf(ring->adev))
++ gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
++ ref, mask, 0x20);
++ else
++ amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
++ ref, mask);
+ }
+
+ static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4230-drm-amdgpu-set-preferred_domain-independent-of-fallb.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4230-drm-amdgpu-set-preferred_domain-independent-of-fallb.patch
new file mode 100644
index 00000000..a09d296f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4230-drm-amdgpu-set-preferred_domain-independent-of-fallb.patch
@@ -0,0 +1,76 @@
+From 9f2208202d0f1b6fb0ecff582c4a6680299313d6 Mon Sep 17 00:00:00 2001
+From: Chunming Zhou <david1.zhou@amd.com>
+Date: Tue, 17 Apr 2018 11:52:53 +0800
+Subject: [PATCH 4230/5725] drm/amdgpu: set preferred_domain independent of
+ fallback handling
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+When GEM needs to fallback to GTT for VRAM BOs we still want the
+preferred domain to be untouched so that the BO has a cance to move back
+to VRAM in the future.
+
+Change-Id: I8cfdf3f30532f7e5d80b8e4266b7800211de2f0b
+Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 5 ++++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 1 +
+ 3 files changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+index f1031f4..df85dcc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+@@ -89,6 +89,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
+ bp.byte_align = alignment;
+ bp.type = type;
+ bp.resv = resv;
++ bp.preferred_domain = initial_domain;
+ retry:
+ bp.flags = flags;
+ bp.domain = initial_domain;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index ffe6b05..08fcc74 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -382,6 +382,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
+ };
+ struct amdgpu_bo *bo;
+ unsigned long page_align, size = bp->size;
++ u32 preferred_domains;
+ size_t acc_size;
+ int r;
+
+@@ -402,7 +403,9 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
+ drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
+ INIT_LIST_HEAD(&bo->shadow_list);
+ INIT_LIST_HEAD(&bo->va);
+- bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
++ preferred_domains = bp->preferred_domain ? bp->preferred_domain :
++ bp->domain;
++ bo->preferred_domains = preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT |
+ AMDGPU_GEM_DOMAIN_CPU |
+ AMDGPU_GEM_DOMAIN_GDS |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+index 951af42..2c79c25 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+@@ -37,6 +37,7 @@ struct amdgpu_bo_param {
+ unsigned long size;
+ int byte_align;
+ u32 domain;
++ u32 preferred_domain;
+ u64 flags;
+ enum ttm_bo_type type;
+ struct reservation_object *resv;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4231-drm-amdgpu-handle-domain-mask-checking-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4231-drm-amdgpu-handle-domain-mask-checking-v2.patch
new file mode 100644
index 00000000..73bdabd5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4231-drm-amdgpu-handle-domain-mask-checking-v2.patch
@@ -0,0 +1,107 @@
+From 5848a2162722409228abc7b3064f08baeab7d472 Mon Sep 17 00:00:00 2001
+From: Chunming Zhou <david1.zhou@amd.com>
+Date: Tue, 17 Apr 2018 18:34:40 +0800
+Subject: [PATCH 4231/5725] drm/amdgpu: handle domain mask checking v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+if domain is illegal, we should return error.
+v2:
+ remove duplicated domain checking.
+
+Change-Id: I65a738f5ac4fc34be76de867afb0db1d4bd27c24
+Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+ include/uapi/drm/amdgpu_drm.h
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 8 +-------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 11 +----------
+ include/uapi/drm/amdgpu_drm.h | 10 ++++++++++
+ 3 files changed, 12 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+index df85dcc..88de5c1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+@@ -262,13 +262,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
+ return -EINVAL;
+
+ /* reject invalid gem domains */
+- if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU |
+- AMDGPU_GEM_DOMAIN_GTT |
+- AMDGPU_GEM_DOMAIN_VRAM |
+- AMDGPU_GEM_DOMAIN_DGMA |
+- AMDGPU_GEM_DOMAIN_GDS |
+- AMDGPU_GEM_DOMAIN_GWS |
+- AMDGPU_GEM_DOMAIN_OA))
++ if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
+ return -EINVAL;
+
+ /* create a gem object to contain this object in */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 08fcc74..af6f1c5 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -382,7 +382,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
+ };
+ struct amdgpu_bo *bo;
+ unsigned long page_align, size = bp->size;
+- u32 preferred_domains;
+ size_t acc_size;
+ int r;
+
+@@ -403,16 +402,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
+ drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
+ INIT_LIST_HEAD(&bo->shadow_list);
+ INIT_LIST_HEAD(&bo->va);
+- preferred_domains = bp->preferred_domain ? bp->preferred_domain :
++ bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
+ bp->domain;
+- bo->preferred_domains = preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
+- AMDGPU_GEM_DOMAIN_GTT |
+- AMDGPU_GEM_DOMAIN_CPU |
+- AMDGPU_GEM_DOMAIN_GDS |
+- AMDGPU_GEM_DOMAIN_GWS |
+- AMDGPU_GEM_DOMAIN_OA |
+- AMDGPU_GEM_DOMAIN_DGMA |
+- AMDGPU_GEM_DOMAIN_DGMA_IMPORT);
+ bo->allowed_domains = bo->preferred_domains;
+
+ if (bp->type != ttm_bo_type_kernel &&
+diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
+index be4fbb3..d04ef13 100644
+--- a/include/uapi/drm/amdgpu_drm.h
++++ b/include/uapi/drm/amdgpu_drm.h
+@@ -92,6 +92,12 @@ extern "C" {
+ #define AMDGPU_GEM_DOMAIN_OA 0x20
+ #define AMDGPU_GEM_DOMAIN_DGMA 0x40
+ #define AMDGPU_GEM_DOMAIN_DGMA_IMPORT 0x80
++#define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \
++ AMDGPU_GEM_DOMAIN_GTT | \
++ AMDGPU_GEM_DOMAIN_VRAM | \
++ AMDGPU_GEM_DOMAIN_GDS | \
++ AMDGPU_GEM_DOMAIN_GWS | \
++ AMDGPU_GEM_DOMAIN_OA)
+
+ /* Flag that CPU access will be required for the case of VRAM domain */
+ #define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
+@@ -589,6 +595,10 @@ union drm_amdgpu_cs {
+ /* Preempt flag, IB should set Pre_enb bit if PREEMPT flag detected */
+ #define AMDGPU_IB_FLAG_PREEMPT (1<<2)
+
++/* The IB fence should do the L2 writeback but not invalidate any shader
++ * caches (L2/vL1/sL1/I$). */
++#define AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE (1 << 3)
++
+ struct drm_amdgpu_cs_chunk_ib {
+ __u32 _pad;
+ /** AMDGPU_IB_FLAG_* */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4232-drm-scheduler-fix-build-broken-by-move-last_sched-fe.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4232-drm-scheduler-fix-build-broken-by-move-last_sched-fe.patch
new file mode 100644
index 00000000..96267395
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4232-drm-scheduler-fix-build-broken-by-move-last_sched-fe.patch
@@ -0,0 +1,35 @@
+From 5607e808b3a5be1ea1ae6f6c5f47dacbfa0b5fbd Mon Sep 17 00:00:00 2001
+From: christian koenig <christian.koenig@amd.com>
+Date: Wed, 18 Apr 2018 12:05:05 +0200
+Subject: [PATCH 4232/5725] drm/scheduler: fix build broken by "move last_sched
+ fence updating prior to job popping"
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+We don't have s_fence as local variable here.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Acked-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/scheduler/gpu_scheduler.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+index 1e40d3b..203f553 100644
+--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
++++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+@@ -402,7 +402,7 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
+ dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
+
+ dma_fence_put(entity->last_scheduled);
+- entity->last_scheduled = dma_fence_get(&s_fence->finished);
++ entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
+
+ spsc_queue_pop(&entity->job_queue);
+ return sched_job;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4233-drm-amdgpu-optionally-do-a-writeback-but-don-t-inval.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4233-drm-amdgpu-optionally-do-a-writeback-but-don-t-inval.patch
new file mode 100644
index 00000000..34b53207
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4233-drm-amdgpu-optionally-do-a-writeback-but-don-t-inval.patch
@@ -0,0 +1,176 @@
+From e0c57dec07e7d2f1733561c0bb8a692bd65ade48 Mon Sep 17 00:00:00 2001
+From: Marek Olsak <marek.olsak@amd.com>
+Date: Tue, 3 Apr 2018 13:05:03 -0400
+Subject: [PATCH 4233/5725] drm/amdgpu: optionally do a writeback but don't
+ invalidate TC for IB fences
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+There is a new IB flag that enables this new behavior.
+Full invalidation is unnecessary for RELEASE_MEM and doesn't make sense
+when draw calls from two adjacent gfx IBs run in parallel. This will be
+the new default for Mesa.
+
+v2: bump the version
+
+Signed-off-by: Marek Olšák <marek.olsak@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 ++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 5 +++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 8 ++++++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 4 +++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 11 +++++++----
+ drivers/gpu/drm/amd/amdgpu/soc15d.h | 1 +
+ 7 files changed, 23 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 3f68ca9..51e2928 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -75,9 +75,10 @@
+ * - 3.23.0 - Add query for VRAM lost counter
+ * - 3.24.0 - Add high priority compute support for gfx9
+ * - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk).
++ * - 3.26.0 - GFX9: Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE.
+ */
+ #define KMS_DRIVER_MAJOR 3
+-#define KMS_DRIVER_MINOR 25
++#define KMS_DRIVER_MINOR 26
+ #define KMS_DRIVER_PATCHLEVEL 0
+
+ #define AMDGPU_VERSION "18.20.2.15"
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+index 97449e0..4a3cef9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+@@ -131,7 +131,8 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
+ * Emits a fence command on the requested ring (all asics).
+ * Returns 0 on success, -ENOMEM on failure.
+ */
+-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
++int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
++ unsigned flags)
+ {
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_fence *fence;
+@@ -149,7 +150,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
+ adev->fence_context + ring->idx,
+ seq);
+ amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
+- seq, AMDGPU_FENCE_FLAG_INT);
++ seq, flags | AMDGPU_FENCE_FLAG_INT);
+
+ ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
+ /* This function can't be called concurrently anyway, otherwise
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+index 29c8015..3f7afcf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+@@ -127,6 +127,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ struct amdgpu_vm *vm;
+ uint64_t fence_ctx;
+ uint32_t status = 0, alloc_size;
++ unsigned fence_flags = 0;
+
+ unsigned i;
+ int r = 0;
+@@ -234,7 +235,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ )
+ amdgpu_ring_emit_hdp_invalidate(ring);
+
+- r = amdgpu_fence_emit(ring, f);
++ if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
++ fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
++
++ r = amdgpu_fence_emit(ring, f, fence_flags);
+ if (r) {
+ dev_err(adev->dev, "failed to emit fence (%d)\n", r);
+ if (job && job->vmid)
+@@ -249,7 +253,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ /* wrap the last IB with fence */
+ if (job && job->uf_addr) {
+ amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
+- AMDGPU_FENCE_FLAG_64BIT);
++ fence_flags | AMDGPU_FENCE_FLAG_64BIT);
+ }
+
+ if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+index 6ed21bd..79ca5b7 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+@@ -41,6 +41,7 @@
+
+ #define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
+ #define AMDGPU_FENCE_FLAG_INT (1 << 1)
++#define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
+
+ enum amdgpu_ring_type {
+ AMDGPU_RING_TYPE_GFX,
+@@ -89,7 +90,8 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
+ unsigned irq_type);
+ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
+ void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
+-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence);
++int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence,
++ unsigned flags);
+ int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s);
+ void amdgpu_fence_process(struct amdgpu_ring *ring);
+ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index d693066..56a8614 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -633,7 +633,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
+ amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
+
+ if (vm_flush_needed || pasid_mapping_needed) {
+- r = amdgpu_fence_emit(ring, &fence);
++ r = amdgpu_fence_emit(ring, &fence, 0);
+ if (r)
+ return r;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index d04a78b..cf1d206 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -3857,13 +3857,16 @@ static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
+ {
+ bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
+ bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
++ bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
+
+ /* RELEASE_MEM - flush caches, send int */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
+- amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
+- EOP_TC_ACTION_EN |
+- EOP_TC_WB_ACTION_EN |
+- EOP_TC_MD_ACTION_EN |
++ amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
++ EOP_TC_NC_ACTION_EN) :
++ (EOP_TCL1_ACTION_EN |
++ EOP_TC_ACTION_EN |
++ EOP_TC_WB_ACTION_EN |
++ EOP_TC_MD_ACTION_EN)) |
+ EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
+ EVENT_INDEX(5)));
+ amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
+index f22f7a8..8dc2910 100755
+--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
++++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
+@@ -159,6 +159,7 @@
+ #define EOP_TC_WB_ACTION_EN (1 << 15) /* L2 */
+ #define EOP_TCL1_ACTION_EN (1 << 16)
+ #define EOP_TC_ACTION_EN (1 << 17) /* L2 */
++#define EOP_TC_NC_ACTION_EN (1 << 19)
+ #define EOP_TC_MD_ACTION_EN (1 << 21) /* L2 metadata */
+
+ #define DATA_SEL(x) ((x) << 29)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4234-drm-amdgpu-fix-list-not-initialized.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4234-drm-amdgpu-fix-list-not-initialized.patch
new file mode 100644
index 00000000..f13f6512
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4234-drm-amdgpu-fix-list-not-initialized.patch
@@ -0,0 +1,36 @@
+From 9b6ae51744793eb3716cd9ab6996859a9731cae3 Mon Sep 17 00:00:00 2001
+From: Chunming Zhou <david1.zhou@amd.com>
+Date: Wed, 18 Apr 2018 18:35:09 +0800
+Subject: [PATCH 4234/5725] drm/amdgpu: fix list not initialized
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Otherwise, cpu stuck for 22s with kernel panic.
+
+Change-Id: I5b87cde662a4658c9ab253ba88d009c9628a44ca
+Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 56a8614..5543487 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1597,10 +1597,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+ * the evicted list so that it gets validated again on the
+ * next command submission.
+ */
++ list_del_init(&bo_va->base.vm_status);
+ if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
+ list_add_tail(&bo_va->base.vm_status, &vm->evicted);
+- else
+- list_del_init(&bo_va->base.vm_status);
+ } else {
+ list_del_init(&bo_va->base.vm_status);
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4235-drm-amdgpu-init-gfx9-aperture-settings.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4235-drm-amdgpu-init-gfx9-aperture-settings.patch
new file mode 100644
index 00000000..b5fe9fe7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4235-drm-amdgpu-init-gfx9-aperture-settings.patch
@@ -0,0 +1,46 @@
+From 99f97e7738d66b731a0a61143ceb13449ac676a5 Mon Sep 17 00:00:00 2001
+From: Flora Cui <Flora.Cui@amd.com>
+Date: Wed, 18 Apr 2018 17:12:19 +0800
+Subject: [PATCH 4235/5725] drm/amdgpu: init gfx9 aperture settings
+
+Change-Id: Ie5683816e21d1dbf9b4f84f05ecbe056a321f06c
+Signed-off-by: Flora Cui <Flora.Cui@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 5 ++++-
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 3 +--
+ 2 files changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index cf1d206..6105d81 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1692,7 +1692,10 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
+ tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
+ SH_MEM_ALIGNMENT_MODE_UNALIGNED);
+ WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
+- tmp = adev->gmc.shared_aperture_start >> 48;
++ tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
++ (adev->gmc.private_aperture_start >> 48));
++ tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
++ (adev->gmc.shared_aperture_start >> 48));
+ WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
+ }
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index b05fe26..108e06f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -561,8 +561,7 @@ static int gmc_v9_0_early_init(void *handle)
+ adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
+ adev->gmc.shared_aperture_end =
+ adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
+- adev->gmc.private_aperture_start =
+- adev->gmc.shared_aperture_end + 1;
++ adev->gmc.private_aperture_start = 0x1000000000000000ULL;
+ adev->gmc.private_aperture_end =
+ adev->gmc.private_aperture_start + (4ULL << 30) - 1;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4236-drm-amdgpu-simplify-bo_va-list-when-vm-bo-update-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4236-drm-amdgpu-simplify-bo_va-list-when-vm-bo-update-v2.patch
new file mode 100644
index 00000000..272cf93a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4236-drm-amdgpu-simplify-bo_va-list-when-vm-bo-update-v2.patch
@@ -0,0 +1,65 @@
+From ab0c4b83f2ecf4a80e93356f2f4f730a5f8fbb29 Mon Sep 17 00:00:00 2001
+From: Junwei Zhang <Jerry.Zhang@amd.com>
+Date: Thu, 19 Apr 2018 13:17:26 +0800
+Subject: [PATCH 4236/5725] drm/amdgpu: simplify bo_va list when vm bo update
+ (v2)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+v2: fix compiling warning
+
+Change-Id: I05145e726302b37e0519500ac1fd46bf03afbf75
+Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 22 ++++++++++------------
+ 1 file changed, 10 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 5543487..125860a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1530,6 +1530,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+ uint64_t flags;
+ uint64_t vram_base_offset = adev->vm_manager.vram_base_offset;
+ struct amdgpu_device *bo_adev;
++ uint32_t mem_type;
+ int r;
+
+ if (clear || !bo_va->base.bo) {
+@@ -1590,19 +1591,16 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+ }
+
+ spin_lock(&vm->status_lock);
+- if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
+- unsigned mem_type = bo->tbo.mem.mem_type;
++ list_del_init(&bo_va->base.vm_status);
+
+- /* If the BO is not in its preferred location add it back to
+- * the evicted list so that it gets validated again on the
+- * next command submission.
+- */
+- list_del_init(&bo_va->base.vm_status);
+- if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
+- list_add_tail(&bo_va->base.vm_status, &vm->evicted);
+- } else {
+- list_del_init(&bo_va->base.vm_status);
+- }
++ /* If the BO is not in its preferred location add it back to
++ * the evicted list so that it gets validated again on the
++ * next command submission.
++ */
++ mem_type = bo->tbo.mem.mem_type;
++ if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
++ !(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
++ list_add_tail(&bo_va->base.vm_status, &vm->evicted);
+ spin_unlock(&vm->status_lock);
+
+ list_splice_init(&bo_va->invalids, &bo_va->valids);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4237-drm-amdgpu-bo-could-be-null-when-access-in-vm-bo-upd.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4237-drm-amdgpu-bo-could-be-null-when-access-in-vm-bo-upd.patch
new file mode 100644
index 00000000..149670b8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4237-drm-amdgpu-bo-could-be-null-when-access-in-vm-bo-upd.patch
@@ -0,0 +1,43 @@
+From 5dc100fa2bccc72447fb01dc9d1cea7425a2f008 Mon Sep 17 00:00:00 2001
+From: Junwei Zhang <Jerry.Zhang@amd.com>
+Date: Mon, 23 Apr 2018 17:21:21 +0800
+Subject: [PATCH 4237/5725] drm/amdgpu: bo could be null when access in vm bo
+ update
+
+Change-Id: Ifb19b5cb96816817f1ee99bcac3cb4ac3881a94c
+Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: David Zhou <david1.zhou@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 125860a..af645fb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1530,7 +1530,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+ uint64_t flags;
+ uint64_t vram_base_offset = adev->vm_manager.vram_base_offset;
+ struct amdgpu_device *bo_adev;
+- uint32_t mem_type;
+ int r;
+
+ if (clear || !bo_va->base.bo) {
+@@ -1597,9 +1596,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+ * the evicted list so that it gets validated again on the
+ * next command submission.
+ */
+- mem_type = bo->tbo.mem.mem_type;
+ if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
+- !(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
++ !(bo->preferred_domains &
++ amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)))
+ list_add_tail(&bo_va->base.vm_status, &vm->evicted);
+ spin_unlock(&vm->status_lock);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4238-drm-amdgpu-print-DMA-buf-status-in-debugfs.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4238-drm-amdgpu-print-DMA-buf-status-in-debugfs.patch
new file mode 100644
index 00000000..b860d66f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4238-drm-amdgpu-print-DMA-buf-status-in-debugfs.patch
@@ -0,0 +1,48 @@
+From 5b793f8d74f275ffcaebfacf53a4514486868004 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Sun, 25 Mar 2018 10:10:25 +0200
+Subject: [PATCH 4238/5725] drm/amdgpu: print DMA-buf status in debugfs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Just note if a BO was imported/exported.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+index 88de5c1..3621ff0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+@@ -931,6 +931,8 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
+ struct seq_file *m = data;
+
++ struct dma_buf_attachment *attachment;
++ struct dma_buf *dma_buf;
+ unsigned domain;
+ const char *placement;
+ unsigned pin_count;
+@@ -965,6 +967,15 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
+ pin_count = READ_ONCE(bo->pin_count);
+ if (pin_count)
+ seq_printf(m, " pin count %d", pin_count);
++
++ dma_buf = READ_ONCE(bo->gem_base.dma_buf);
++ attachment = READ_ONCE(bo->gem_base.import_attach);
++
++ if (attachment)
++ seq_printf(m, " imported from %p", dma_buf);
++ else if (dma_buf)
++ seq_printf(m, " exported as %p", dma_buf);
++
+ seq_printf(m, "\n");
+
+ return 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4239-drm-amdgpu-Rename-amdgpu_display_framebuffer_domains.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4239-drm-amdgpu-Rename-amdgpu_display_framebuffer_domains.patch
new file mode 100644
index 00000000..f7056906
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4239-drm-amdgpu-Rename-amdgpu_display_framebuffer_domains.patch
@@ -0,0 +1,105 @@
+From 4c1e23516e34c8563b5eba3c03fd92642e0a068a Mon Sep 17 00:00:00 2001
+From: Samuel Li <Samuel.Li@amd.com>
+Date: Wed, 18 Apr 2018 15:06:02 -0400
+Subject: [PATCH 4239/5725] drm/amdgpu: Rename
+ amdgpu_display_framebuffer_domains()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+It returns supported domains for display, and domains actually used are to be
+decided later when pinned.
+
+Signed-off-by: Samuel Li <Samuel.Li@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_display.h | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 2 +-
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 +--
+ 5 files changed, 6 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+index 10b25d9..26a5a9c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+@@ -191,7 +191,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
+ goto cleanup;
+ }
+
+- r = amdgpu_bo_pin(new_abo, amdgpu_display_framebuffer_domains(adev), &base);
++ r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev), &base);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("failed to pin new abo buffer before flip\n");
+ goto unreserve;
+@@ -505,7 +505,7 @@ static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
+ .create_handle = amdgpu_display_user_framebuffer_create_handle,
+ };
+
+-uint32_t amdgpu_display_framebuffer_domains(struct amdgpu_device *adev)
++uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev)
+ {
+ uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
+index 4471b2d..ec11434 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
+@@ -23,7 +23,7 @@
+ #ifndef __AMDGPU_DISPLAY_H__
+ #define __AMDGPU_DISPLAY_H__
+
+-uint32_t amdgpu_display_framebuffer_domains(struct amdgpu_device *adev);
++uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev);
+ struct drm_framebuffer *
+ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+index 6c110e4..cc55fa5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+@@ -137,7 +137,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
+ /* need to align pitch with crtc limits */
+ mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
+ fb_tiled);
+- domain = amdgpu_display_framebuffer_domains(adev);
++ domain = amdgpu_display_supported_domains(adev);
+
+ height = ALIGN(mode_cmd->height, 8);
+ size = mode_cmd->pitches[0] * height;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+index a7a0f0c..63d5a01 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+@@ -219,7 +219,7 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_operation_ctx ctx = { true, false };
+- u32 domain = amdgpu_display_framebuffer_domains(adev);
++ u32 domain = amdgpu_display_supported_domains(adev);
+ int ret;
+ bool reads = (direction == DMA_BIDIRECTIONAL ||
+ direction == DMA_FROM_DEVICE);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 4760ff4..ab3fd8a 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3221,12 +3221,11 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
+ return r;
+
+ if (plane->type != DRM_PLANE_TYPE_CURSOR)
+- domain = amdgpu_display_framebuffer_domains(adev);
++ domain = amdgpu_display_supported_domains(adev);
+ else
+ domain = AMDGPU_GEM_DOMAIN_VRAM;
+
+ r = amdgpu_bo_pin(rbo, domain, &afb->address);
+-
+ amdgpu_bo_unreserve(rbo);
+
+ if (unlikely(r != 0)) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4240-drm-amdgpu-Remove-VRAM-from-shared-bo-domains.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4240-drm-amdgpu-Remove-VRAM-from-shared-bo-domains.patch
new file mode 100644
index 00000000..09ea4615
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4240-drm-amdgpu-Remove-VRAM-from-shared-bo-domains.patch
@@ -0,0 +1,42 @@
+From e45ae87a05e45e3da96c2900e2f8fba33f38a7aa Mon Sep 17 00:00:00 2001
+From: Samuel Li <Samuel.Li@amd.com>
+Date: Wed, 18 Apr 2018 16:26:18 -0400
+Subject: [PATCH 4240/5725] drm/amdgpu: Remove VRAM from shared bo domains.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This fixes an issue introduced by change "allow framebuffer in GART
+memory as well" which could lead to a shared buffer ending up
+pinned in vram. Use GTT if it is included in the domain, otherwise
+return an error.
+
+Signed-off-by: Samuel Li <Samuel.Li@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index af6f1c5..dcf5acd 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -734,8 +734,12 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
+ return -EINVAL;
+
+ /* A shared bo cannot be migrated to VRAM */
+- if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM))
+- return -EINVAL;
++ if (bo->prime_shared_count) {
++ if (domain & AMDGPU_GEM_DOMAIN_GTT)
++ domain = AMDGPU_GEM_DOMAIN_GTT;
++ else
++ return -EINVAL;
++ }
+
+ if (bo->pin_count) {
+ uint32_t mem_type = bo->tbo.mem.mem_type;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4241-drm-amdgpu-pm-document-power_dpm_force_performance_l.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4241-drm-amdgpu-pm-document-power_dpm_force_performance_l.patch
new file mode 100644
index 00000000..8a60af5f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4241-drm-amdgpu-pm-document-power_dpm_force_performance_l.patch
@@ -0,0 +1,83 @@
+From 48f623d8f05bbae0112be5b4a43a1da80c630d14 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 19 Apr 2018 13:46:03 -0500
+Subject: [PATCH 4241/5725] drm/amdgpu/pm: document
+ power_dpm_force_performance_level
+
+Provide documentation for power_dpm_force_performance_level
+which is used to adjust things related to GPU power states.
+
+Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 53 ++++++++++++++++++++++++++++++++++
+ 1 file changed, 53 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index 44a8bb2..ef1eda4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -131,6 +131,59 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
+ return count;
+ }
+
++
++/**
++ * DOC: power_dpm_force_performance_level
++ *
++ * The amdgpu driver provides a sysfs API for adjusting certain power
++ * related parameters. The file power_dpm_force_performance_level is
++ * used for this. It accepts the following arguments:
++ * - auto
++ * - low
++ * - high
++ * - manual
++ * - GPU fan
++ * - profile_standard
++ * - profile_min_sclk
++ * - profile_min_mclk
++ * - profile_peak
++ *
++ * auto
++ *
++ * When auto is selected, the driver will attempt to dynamically select
++ * the optimal power profile for current conditions in the driver.
++ *
++ * low
++ *
++ * When low is selected, the clocks are forced to the lowest power state.
++ *
++ * high
++ *
++ * When high is selected, the clocks are forced to the highest power state.
++ *
++ * manual
++ *
++ * When manual is selected, the user can manually adjust which power states
++ * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
++ * and pp_dpm_pcie files and adjust the power state transition heuristics
++ * via the pp_power_profile_mode sysfs file.
++ *
++ * profile_standard
++ * profile_min_sclk
++ * profile_min_mclk
++ * profile_peak
++ *
++ * When the profiling modes are selected, clock and power gating are
++ * disabled and the clocks are set for different profiling cases. This
++ * mode is recommended for profiling specific work loads where you do
++ * not want clock or power gating for clock fluctuation to interfere
++ * with your results. profile_standard sets the clocks to a fixed clock
++ * level which varies from asic to asic. profile_min_sclk forces the sclk
++ * to the lowest level. profile_min_mclk forces the mclk to the lowest level.
++ * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
++ *
++ */
++
+ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4242-drm-amdgpu-pm-document-power_dpm_state.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4242-drm-amdgpu-pm-document-power_dpm_state.patch
new file mode 100644
index 00000000..5f886020
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4242-drm-amdgpu-pm-document-power_dpm_state.patch
@@ -0,0 +1,60 @@
+From ca54165717095b3d283a5d1ecd53ca480f8b4b30 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 19 Apr 2018 13:56:41 -0500
+Subject: [PATCH 4242/5725] drm/amdgpu/pm: document power_dpm_state
+
+This is a legacy file and is only provided for
+backwards compatibility.
+
+Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 31 +++++++++++++++++++++++++++++++
+ 1 file changed, 31 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index ef1eda4..05d701b0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -77,6 +77,37 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
+ }
+ }
+
++/**
++ * DOC: power_dpm_state
++ *
++ * This is a legacy interface and is only provided for backwards compatibility.
++ * The amdgpu driver provides a sysfs API for adjusting certain power
++ * related parameters. The file power_dpm_state is used for this.
++ * It accepts the following arguments:
++ * - battery
++ * - balanced
++ * - performance
++ *
++ * battery
++ *
++ * On older GPUs, the vbios provided a special power state for battery
++ * operation. Selecting battery switched to this state. This is no
++ * longer provided on newer GPUs so the option does nothing in that case.
++ *
++ * balanced
++ *
++ * On older GPUs, the vbios provided a special power state for balanced
++ * operation. Selecting balanced switched to this state. This is no
++ * longer provided on newer GPUs so the option does nothing in that case.
++ *
++ * performance
++ *
++ * On older GPUs, the vbios provided a special power state for performance
++ * operation. Selecting performance switched to this state. This is no
++ * longer provided on newer GPUs so the option does nothing in that case.
++ *
++ */
++
+ static ssize_t amdgpu_get_dpm_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4243-drm-amdgpu-pm-document-pp_table.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4243-drm-amdgpu-pm-document-pp_table.patch
new file mode 100644
index 00000000..044f2739
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4243-drm-amdgpu-pm-document-pp_table.patch
@@ -0,0 +1,39 @@
+From 1990b877ce450e26bae85244b759875daebc035a Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 19 Apr 2018 14:02:52 -0500
+Subject: [PATCH 4243/5725] drm/amdgpu/pm: document pp_table
+
+This file is for uploading new powerplay tables.
+
+Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index 05d701b0..b659f4b3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -408,6 +408,17 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
+ return count;
+ }
+
++/**
++ * DOC: pp_table
++ *
++ * The amdgpu driver provides a sysfs API for uploading new powerplay
++ * tables. The file pp_table is used for this. Reading the file
++ * will dump the current power play table. Writing to the file
++ * will attempt to upload a new powerplay table and re-initialize
++ * powerplay using that new table.
++ *
++ */
++
+ static ssize_t amdgpu_get_pp_table(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4244-drm-amdgpu-pm-document-pp_dpm_sclk-pp_dpm_mclk-pp_dp.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4244-drm-amdgpu-pm-document-pp_dpm_sclk-pp_dpm_mclk-pp_dp.patch
new file mode 100644
index 00000000..4e167213
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4244-drm-amdgpu-pm-document-pp_dpm_sclk-pp_dpm_mclk-pp_dp.patch
@@ -0,0 +1,48 @@
+From 70c8b518c22a49a9dd6caf85720dfb66c7dd7bb4 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 19 Apr 2018 14:22:24 -0500
+Subject: [PATCH 4244/5725] drm/amdgpu/pm: document pp_dpm_sclk pp_dpm_mclk
+ pp_dpm_pcie (v2)
+
+Used for manually masking dpm states.
+
+v2: drop comment about current state (Rex)
+
+Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index b659f4b3..4480212 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -540,6 +540,23 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
+
+ }
+
++/**
++ * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie
++ *
++ * The amdgpu driver provides a sysfs API for adjusting what power levels
++ * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
++ * and pp_dpm_pcie are used for this.
++ *
++ * Reading back the files will show you the available power levels within
++ * the power state and the clock information for those levels.
++ *
++ * To manually adjust these states, first select manual using
++ * power_dpm_force_performance_level. Writing a string of the level
++ * numbers to the file will select which levels you want to enable.
++ * E.g., writing 456 to the file will enable levels 4, 5, and 6.
++ *
++ */
++
+ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4245-drm-amdgpu-pm-document-pp_power_profile_mode.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4245-drm-amdgpu-pm-document-pp_power_profile_mode.patch
new file mode 100644
index 00000000..da022540
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4245-drm-amdgpu-pm-document-pp_power_profile_mode.patch
@@ -0,0 +1,48 @@
+From 802d1f256294057e380528b8859c4e5c24c042de Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 19 Apr 2018 14:38:31 -0500
+Subject: [PATCH 4245/5725] drm/amdgpu/pm: document pp_power_profile_mode
+
+sysfs file for adjusting power level heuristics.
+
+Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index 4480212..3345760 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -781,6 +781,26 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
+ return count;
+ }
+
++/**
++ * DOC: pp_power_profile_mode
++ *
++ * The amdgpu driver provides a sysfs API for adjusting the heuristics
++ * related to switching between power levels in a power state. The file
++ * pp_power_profile_mode is used for this.
++ *
++ * Reading this file outputs a list of all of the predefined power profiles
++ * and the relevant heuristics settings for that profile.
++ *
++ * To select a profile or create a custom profile, first select manual using
++ * power_dpm_force_performance_level. Writing the number of a predefined
++ * profile to pp_power_profile_mode will enable those heuristics. To
++ * create a custom set of heuristics, write a string of numbers to the file
++ * starting with the number of the custom profile along with a setting
++ * for each heuristic parameter. Due to differences across asic families
++ * the heuristic parameters vary from family to family.
++ *
++ */
++
+ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4246-drm-amdgpu-pm-document-pp_od_clk_voltage.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4246-drm-amdgpu-pm-document-pp_od_clk_voltage.patch
new file mode 100644
index 00000000..48c548a3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4246-drm-amdgpu-pm-document-pp_od_clk_voltage.patch
@@ -0,0 +1,51 @@
+From 51b14036234ae03f676aaf09dc1d23eab5b83928 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 19 Apr 2018 14:59:55 -0500
+Subject: [PATCH 4246/5725] drm/amdgpu/pm: document pp_od_clk_voltage
+
+sysfs interface for fine grained clock and voltage control.
+
+Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index 3345760..4cbce49 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -455,6 +455,29 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
+ return count;
+ }
+
++/**
++ * DOC: pp_od_clk_voltage
++ *
++ * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
++ * in each power level within a power state. The pp_od_clk_voltage is used for
++ * this.
++ *
++ * Reading the file will display:
++ * - a list of engine clock levels and voltages labeled OD_SCLK
++ * - a list of memory clock levels and voltages labeled OD_MCLK
++ * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
++ *
++ * To manually adjust these settings, first select manual using
++ * power_dpm_force_performance_level. Enter a new value for each
++ * level by writing a string that contains "s/m level clock voltage" to
++ * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
++ * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
++ * 810 mV. When you have edited all of the states as needed, write
++ * "c" (commit) to the file to commit your changes. If you want to reset to the
++ * default power levels, write "r" (reset) to the file to reset them.
++ *
++ */
++
+ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4247-drm-amd-pp-Change-voltage-clk-range-for-OD-feature-o.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4247-drm-amd-pp-Change-voltage-clk-range-for-OD-feature-o.patch
new file mode 100644
index 00000000..8dfa26b8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4247-drm-amd-pp-Change-voltage-clk-range-for-OD-feature-o.patch
@@ -0,0 +1,43 @@
+From 837ff93e2b22e80dd59ee7456ef7f412c57a7c78 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Wed, 18 Apr 2018 21:09:35 +0800
+Subject: [PATCH 4247/5725] drm/amd/pp: Change voltage/clk range for OD feature
+ on VI
+
+read vddc range from vbios.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+index d3eeafb..7766f5c 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+@@ -1520,17 +1520,15 @@ void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc,
+ case CHIP_FIJI:
+ *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc/4);
+ *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc/4);
+- return;
++ break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
+ *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc/100);
+ *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc/100);
+- return;
+- default:
+ break;
++ default:
++ return;
+ }
+ }
+- *max_vddc = 0;
+- *min_vddc = 0;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4248-drm-amdgpu-Enable-scatter-gather-display-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4248-drm-amdgpu-Enable-scatter-gather-display-support.patch
new file mode 100644
index 00000000..87f86d38
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4248-drm-amdgpu-Enable-scatter-gather-display-support.patch
@@ -0,0 +1,59 @@
+From 544d0d569ce1ea6549d4400cb92e90f65922c3d5 Mon Sep 17 00:00:00 2001
+From: Samuel Li <Samuel.Li@amd.com>
+Date: Wed, 18 Apr 2018 16:15:52 -0400
+Subject: [PATCH 4248/5725] drm/amdgpu: Enable scatter gather display support
+
+Enables sg display if vram size <= THRESHOLD(256M); otherwise
+still use vram as display buffer.
+This patch fixed some potention issues introduced by change
+"allow framebuffer in GART memory as well" due to CZ/ST hardware
+limitation.
+
+v2: Change default setting to auto.
+v3: Move some logic from amdgpu_display_framebuffer_domains()
+ to pin function, suggested by Christian.
+v4: Split into several patches.
+v5: Drop module parameter for now.
+
+Signed-off-by: Samuel Li <Samuel.Li@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 9 +++++++++
+ 2 files changed, 10 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 01496d7..7ba0b6b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -140,6 +140,7 @@ extern int amdgpu_si_support;
+ extern int amdgpu_cik_support;
+ #endif
+
++#define AMDGPU_SG_THRESHOLD (256*1024*1024)
+ #define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
+ #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
+ #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index dcf5acd..9d6c659 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -741,6 +741,15 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
+ return -EINVAL;
+ }
+
++ /* This assumes only APU display buffers are pinned with (VRAM|GTT).
++ * See function amdgpu_display_supported_domains()
++ */
++ if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
++ domain = AMDGPU_GEM_DOMAIN_VRAM;
++ if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
++ domain = AMDGPU_GEM_DOMAIN_GTT;
++ }
++
+ if (bo->pin_count) {
+ uint32_t mem_type = bo->tbo.mem.mem_type;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4249-Revert-drm-amdgpu-defer-test-IBs-on-the-rings-at-boo.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4249-Revert-drm-amdgpu-defer-test-IBs-on-the-rings-at-boo.patch
new file mode 100644
index 00000000..11225c16
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4249-Revert-drm-amdgpu-defer-test-IBs-on-the-rings-at-boo.patch
@@ -0,0 +1,78 @@
+From e8a38c7525b9f8e7f8b9951c3b9f9c8d2cad2194 Mon Sep 17 00:00:00 2001
+From: Kevin Wang <Kevin1.Wang@amd.com>
+Date: Wed, 25 Apr 2018 14:18:08 +0800
+Subject: [PATCH 4249/5725] Revert "drm/amdgpu: defer test IBs on the rings at
+ boot (V3)"
+
+This reverts commit 2e05f86270834085e052a2de0cf968fdcaef80ba.
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 17 +++++++++++------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 3 ---
+ 2 files changed, 11 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 6b221cc5..a23b1ec 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1752,10 +1752,6 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
+ if (amdgpu_emu_mode == 1)
+ return 0;
+
+- r = amdgpu_ib_ring_tests(adev);
+- if (r)
+- DRM_ERROR("ib ring test failed (%d).\n", r);
+-
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+@@ -1818,8 +1814,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
+ }
+ }
+
+- queue_delayed_work(system_wq, &adev->late_init_work,
+- msecs_to_jiffies(AMDGPU_RESUME_MS));
++ mod_delayed_work(system_wq, &adev->late_init_work,
++ msecs_to_jiffies(AMDGPU_RESUME_MS));
+
+ amdgpu_device_fill_reset_magic(adev);
+
+@@ -2491,6 +2487,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+ goto failed;
+ }
+
++ r = amdgpu_ib_ring_tests(adev);
++ if (r)
++ DRM_ERROR("ib ring test failed (%d).\n", r);
++
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_init_data_exchange(adev);
+
+@@ -2756,6 +2756,11 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
+
+ amdgpu_fence_driver_resume(adev);
+
++ if (resume) {
++ r = amdgpu_ib_ring_tests(adev);
++ if (r)
++ DRM_ERROR("ib ring test failed (%d).\n", r);
++ }
+
+ r = amdgpu_device_ip_late_init(adev);
+ if (r)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index 73c2f7a..ba812da 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -289,9 +289,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ if (!info->return_size || !info->return_pointer)
+ return -EINVAL;
+
+- /* Ensure IB tests are run on ring */
+- flush_delayed_work(&adev->late_init_work);
+-
+ switch (info->query) {
+ case AMDGPU_INFO_VIRTUAL_RANGE: {
+ struct drm_amdgpu_virtual_range range_info;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4250-drm-amdkfd-Use-shared-IH-client-ID.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4250-drm-amdkfd-Use-shared-IH-client-ID.patch
new file mode 100644
index 00000000..a0d94c9d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4250-drm-amdkfd-Use-shared-IH-client-ID.patch
@@ -0,0 +1,65 @@
+From aa5ce7bc3bfeff05e39cd335be2d637292fa5f8b Mon Sep 17 00:00:00 2001
+From: Oak Zeng <Oak.Zeng@amd.com>
+Date: Thu, 8 Mar 2018 17:49:16 -0500
+Subject: [PATCH 4250/5725] drm/amdkfd: Use shared IH client ID
+
+Change-Id: I571f0febaea4d72f727951b7b60813c168b7defd
+Signed-off-by: Oak Zeng <Oak.Zeng@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/soc15_int.h | 39 +---------------------------------
+ 1 file changed, 1 insertion(+), 38 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/soc15_int.h b/drivers/gpu/drm/amd/amdkfd/soc15_int.h
+index e00d03d..011c14c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/soc15_int.h
++++ b/drivers/gpu/drm/amd/amdkfd/soc15_int.h
+@@ -22,45 +22,8 @@
+
+ #ifndef HSA_SOC15_INT_H_INCLUDED
+ #define HSA_SOC15_INT_H_INCLUDED
+-/*
+- * vega10+ IH clients
+- */
+-enum soc15_ih_client_id {
+- SOC15_IH_CLIENTID_IH = 0x00,
+- SOC15_IH_CLIENTID_ACP = 0x01,
+- SOC15_IH_CLIENTID_ATHUB = 0x02,
+- SOC15_IH_CLIENTID_BIF = 0x03,
+- SOC15_IH_CLIENTID_DCE = 0x04,
+- SOC15_IH_CLIENTID_ISP = 0x05,
+- SOC15_IH_CLIENTID_PCIE0 = 0x06,
+- SOC15_IH_CLIENTID_RLC = 0x07,
+- SOC15_IH_CLIENTID_SDMA0 = 0x08,
+- SOC15_IH_CLIENTID_SDMA1 = 0x09,
+- SOC15_IH_CLIENTID_SE0SH = 0x0a,
+- SOC15_IH_CLIENTID_SE1SH = 0x0b,
+- SOC15_IH_CLIENTID_SE2SH = 0x0c,
+- SOC15_IH_CLIENTID_SE3SH = 0x0d,
+- SOC15_IH_CLIENTID_SYSHUB = 0x0e,
+- SOC15_IH_CLIENTID_THM = 0x0f,
+- SOC15_IH_CLIENTID_UVD = 0x10,
+- SOC15_IH_CLIENTID_VCE0 = 0x11,
+- SOC15_IH_CLIENTID_VMC = 0x12,
+- SOC15_IH_CLIENTID_XDMA = 0x13,
+- SOC15_IH_CLIENTID_GRBM_CP = 0x14,
+- SOC15_IH_CLIENTID_ATS = 0x15,
+- SOC15_IH_CLIENTID_ROM_SMUIO = 0x16,
+- SOC15_IH_CLIENTID_DF = 0x17,
+- SOC15_IH_CLIENTID_VCE1 = 0x18,
+- SOC15_IH_CLIENTID_PWR = 0x19,
+- SOC15_IH_CLIENTID_UTCL2 = 0x1b,
+- SOC15_IH_CLIENTID_EA = 0x1c,
+- SOC15_IH_CLIENTID_UTCL2LOG = 0x1d,
+- SOC15_IH_CLIENTID_MP0 = 0x1e,
+- SOC15_IH_CLIENTID_MP1 = 0x1f,
+-
+- SOC15_IH_CLIENTID_MAX
+-};
+
++#include "soc15_ih_clientid.h"
+
+ #define SOC15_INTSRC_CP_END_OF_PIPE 181
+ #define SOC15_INTSRC_CP_BAD_OPCODE 183
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4251-drm-amdkfd-Implement-hw_exception-work-thread-to-han.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4251-drm-amdkfd-Implement-hw_exception-work-thread-to-han.patch
new file mode 100644
index 00000000..2e680162
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4251-drm-amdkfd-Implement-hw_exception-work-thread-to-han.patch
@@ -0,0 +1,134 @@
+From 22f128a6d675e2eb759b07ee3eaeb0d56e2f0b48 Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Thu, 5 Apr 2018 15:01:40 -0400
+Subject: [PATCH 4251/5725] drm/amdkfd: Implement hw_exception work thread to
+ handle hws hang
+
+Change-Id: I021fe1e875baa4242c5347e02559a414937dfa96
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 4 +---
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 22 +++++++++++++++++++++-
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 4 ++++
+ 3 files changed, 26 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index a242208..d1a18c9 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -137,10 +137,8 @@ static int kfd_open(struct inode *inode, struct file *filep)
+ if (IS_ERR(process))
+ return PTR_ERR(process);
+
+- if (kfd_is_locked()) {
+- kfd_unref_process(process);
++ if (kfd_is_locked())
+ return -EAGAIN;
+- }
+
+ dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
+ process->pasid, process->is_32bit_user_mode);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index b0c159a..82c7dbe 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -60,6 +60,8 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
+ static void deallocate_sdma_queue(struct device_queue_manager *dqm,
+ unsigned int sdma_queue_id);
+
++static void kfd_process_hw_exception(struct work_struct *work);
++
+ static inline
+ enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
+ {
+@@ -1021,6 +1023,8 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
+ dqm->active_runlist = false;
+ dqm->sdma_bitmap = (1 << get_num_sdma_queues(dqm)) - 1;
+
++ INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
++
+ return 0;
+ }
+
+@@ -1053,6 +1057,8 @@ static int start_cpsch(struct device_queue_manager *dqm)
+ init_interrupts(dqm);
+
+ mutex_lock(&dqm->lock);
++ /* clear hang status when driver try to start the hw scheduler */
++ dqm->is_hws_hang = false;
+ execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+ mutex_unlock(&dqm->lock);
+
+@@ -1268,6 +1274,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
+ {
+ int retval = 0;
+
++ if (dqm->is_hws_hang)
++ return -EIO;
+ if (!dqm->active_runlist)
+ return retval;
+
+@@ -1306,9 +1314,13 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm,
+ {
+ int retval;
+
++ if (dqm->is_hws_hang)
++ return -EIO;
+ retval = unmap_queues_cpsch(dqm, filter, filter_param);
+ if (retval) {
+ pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
++ dqm->is_hws_hang = true;
++ schedule_work(&dqm->hw_exception_work);
+ return retval;
+ }
+
+@@ -1590,7 +1602,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
+ }
+
+ retval = execute_queues_cpsch(dqm, filter, 0);
+- if (retval || qpd->reset_wavefronts) {
++ if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
+ pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
+ dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
+ qpd->reset_wavefronts = false;
+@@ -1611,6 +1623,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
+
+ out:
+ mutex_unlock(&dqm->lock);
++
+ return retval;
+ }
+
+@@ -1744,6 +1757,13 @@ int kfd_process_vm_fault(struct device_queue_manager *dqm,
+ return ret;
+ }
+
++static void kfd_process_hw_exception(struct work_struct *work)
++{
++ struct device_queue_manager *dqm = container_of(work,
++ struct device_queue_manager, hw_exception_work);
++ dqm->dev->kfd2kgd->gpu_recover(dqm->dev->kgd);
++}
++
+ #if defined(CONFIG_DEBUG_FS)
+
+ static void seq_reg_dump(struct seq_file *m,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+index 978458a..3f17e5e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+@@ -195,6 +195,10 @@ struct device_queue_manager {
+ struct kfd_mem_obj *fence_mem;
+ bool active_runlist;
+ int sched_policy;
++
++ /* hw exception */
++ bool is_hws_hang;
++ struct work_struct hw_exception_work;
+ };
+
+ void device_queue_manager_init_cik(
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4252-drm-amdkfd-Remove-queue-node-when-destroy-queue-fail.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4252-drm-amdkfd-Remove-queue-node-when-destroy-queue-fail.patch
new file mode 100644
index 00000000..930dd525
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4252-drm-amdkfd-Remove-queue-node-when-destroy-queue-fail.patch
@@ -0,0 +1,54 @@
+From 49f0dbaafc217feae0226277fa4b1c65d8ec26e6 Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Wed, 11 Apr 2018 13:24:05 -0400
+Subject: [PATCH 4252/5725] drm/amdkfd: Remove queue node when destroy queue
+ failed
+
+HWS may hang in the middle of destroy queue, remove the queue from the
+process queue list so it won't be freed again in the future
+
+Change-Id: I5ef218616cf414ccb9a5be295d1e9860feeda490
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index f259fd3..c950149 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -241,7 +241,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
+ }
+
+ if (retval != 0) {
+- pr_err("DQM create queue failed\n");
++ pr_err("Pasid %d DQM create queue %d failed. ret %d\n",
++ pqm->process->pasid, type, retval);
+ goto err_create_queue;
+ }
+
+@@ -317,13 +318,16 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
+
+ if (pqn->q) {
+ dqm = pqn->q->device->dqm;
+- kfree(pqn->q->properties.cu_mask);
+- pqn->q->properties.cu_mask = NULL;
+ retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
+ if (retval) {
+- pr_debug("Destroy queue failed, returned %d\n", retval);
+- goto err_destroy_queue;
++ pr_err("Pasid %d destroy queue %d failed, ret %d\n",
++ pqm->process->pasid,
++ pqn->q->properties.queue_id, retval);
++ if (retval != -ETIME)
++ goto err_destroy_queue;
+ }
++ kfree(pqn->q->properties.cu_mask);
++ pqn->q->properties.cu_mask = NULL;
+ uninit_queue(pqn->q);
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4253-drm-amdgpu-Always-call-kfd-post-reset-after-reset.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4253-drm-amdgpu-Always-call-kfd-post-reset-after-reset.patch
new file mode 100644
index 00000000..ed7c7c23
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4253-drm-amdgpu-Always-call-kfd-post-reset-after-reset.patch
@@ -0,0 +1,38 @@
+From 09e93809fdfc9cff10f07c64e8deba7ee261d568 Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Wed, 11 Apr 2018 15:42:39 -0400
+Subject: [PATCH 4253/5725] drm/amdgpu: Always call kfd post reset after reset
+
+Even reset failed, kfd post reset need to be called to make lock balance on
+kfd side
+
+Change-Id: I8b6ef29d7527915611be0b96a9cd039bc75bb0a9
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index a23b1ec..8859f19 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3321,10 +3321,10 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
+ } else {
+ dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
+- /*unlock kfd after a successfully recovery*/
+- amdgpu_amdkfd_post_reset(adev);
+ }
+-
++
++ /*unlock kfd */
++ amdgpu_amdkfd_post_reset(adev);
+ amdgpu_vf_error_trans_all(adev);
+ adev->in_gpu_reset = 0;
+ mutex_unlock(&adev->lock_reset);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4254-drm-amdkfd-CMA-Remove-diff.-device-restriction.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4254-drm-amdkfd-CMA-Remove-diff.-device-restriction.patch
new file mode 100644
index 00000000..bb79eff3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4254-drm-amdkfd-CMA-Remove-diff.-device-restriction.patch
@@ -0,0 +1,102 @@
+From ce3bd835c235507674e471acc612104da3eeec45 Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Fri, 6 Apr 2018 18:07:25 -0400
+Subject: [PATCH 4254/5725] drm/amdkfd: CMA: Remove diff. device restriction
+
+CMA is supported in certain situations if the BOs are registered to differnt
+devices. They are -
+
+a) If both source and destination are userptr then device doens't matter
+as CPU is used to copy.
+b) If one of them is a userptr, then the shadow system BO will be created
+on the other device. So the copy will done by that device.
+
+The non supported cases are -
+
+a) The system BOs are always registered to the first device. So if one
+BO is system and the other BO is a local memory in different device then
+it is not supported currently.
+b) If both BOs are in local memory of different devices then it is not
+supported.
+
+BUG:SWDEV-146559
+
+Change-Id: I0ff5426402c147dd19ec15abafd18807ecca25fe
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 21 ++++++++++-----------
+ 1 file changed, 10 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index d1a18c9..a216225 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -2118,6 +2118,7 @@ static int kfd_copy_bos(struct cma_iter *si, struct cma_iter *di,
+ struct kfd_bo *dst_bo = di->cur_bo, *src_bo = si->cur_bo;
+ uint64_t src_offset = si->bo_offset, dst_offset = di->bo_offset;
+ struct kgd_mem *src_mem = src_bo->mem, *dst_mem = dst_bo->mem;
++ struct kfd_dev *dev = dst_bo->dev;
+
+ *copied = 0;
+ if (f)
+@@ -2129,11 +2130,14 @@ static int kfd_copy_bos(struct cma_iter *si, struct cma_iter *di,
+ * by using the underlying userptr BO pages. Then use this shadow
+ * BO for copy. src_offset & dst_offset are adjusted because the new BO
+ * is only created for the window (offset, size) requested.
++ * The shadow BO is created on the other device. This means if the
++ * other BO is a device memory, the copy will be using that device.
+ * The BOs are stored in cma_list for deferred cleanup. This minimizes
+ * fence waiting just to the last fence.
+ */
+ if (src_bo->cpuva) {
+- err = kfd_create_cma_system_bo(dst_bo->dev, src_bo, &size,
++ dev = dst_bo->dev;
++ err = kfd_create_cma_system_bo(dev, src_bo, &size,
+ si->bo_offset, cma_write,
+ si->p, si->mm, si->task,
+ &si->cma_bo);
+@@ -2141,7 +2145,8 @@ static int kfd_copy_bos(struct cma_iter *si, struct cma_iter *di,
+ src_offset = si->bo_offset & (PAGE_SIZE - 1);
+ list_add_tail(&si->cma_bo->list, &si->cma_list);
+ } else if (dst_bo->cpuva) {
+- err = kfd_create_cma_system_bo(src_bo->dev, dst_bo, &size,
++ dev = src_bo->dev;
++ err = kfd_create_cma_system_bo(dev, dst_bo, &size,
+ di->bo_offset, cma_write,
+ di->p, di->mm, di->task,
+ &di->cma_bo);
+@@ -2150,15 +2155,15 @@ static int kfd_copy_bos(struct cma_iter *si, struct cma_iter *di,
+ list_add_tail(&di->cma_bo->list, &di->cma_list);
+ } else if (src_bo->dev->kgd != dst_bo->dev->kgd) {
+ pr_err("CMA %d fail. Not same dev\n", cma_write);
+- err = -EINVAL;
++ return -EINVAL;
+ }
+
+ if (err) {
+ pr_err("Failed to create system BO %d", err);
+- err = -EINVAL;
++ return -EINVAL;
+ }
+
+- err = dst_bo->dev->kfd2kgd->copy_mem_to_mem(src_bo->dev->kgd, src_mem,
++ err = dst_bo->dev->kfd2kgd->copy_mem_to_mem(dev->kgd, src_mem,
+ src_offset, dst_mem,
+ dst_offset, size, f,
+ copied);
+@@ -2193,12 +2198,6 @@ static int kfd_copy_single_range(struct cma_iter *si, struct cma_iter *di,
+
+ copy_size = min(size, (di->array->size - di->offset));
+
+- /* Check both BOs belong to same device */
+- if (src_bo->dev->kgd != dst_bo->dev->kgd) {
+- pr_err("CMA fail. Not same dev\n");
+- return -EINVAL;
+- }
+-
+ err = kfd_copy_bos(si, di, cma_write, copy_size, &fence, &n);
+ if (err) {
+ pr_err("CMA %d failed\n", err);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4255-drm-amdkfd-CMA-Store-mem_type-in-KFD-BO.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4255-drm-amdkfd-CMA-Store-mem_type-in-KFD-BO.patch
new file mode 100644
index 00000000..5af75f67
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4255-drm-amdkfd-CMA-Store-mem_type-in-KFD-BO.patch
@@ -0,0 +1,127 @@
+From f8525598b2b0c327686910133fcc7c3df5e4c154 Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Mon, 9 Apr 2018 16:03:21 -0400
+Subject: [PATCH 4255/5725] drm/amdkfd: CMA: Store mem_type in KFD BO
+
+It is needed for supporting CMA when the BOs belong to different
+devices.
+
+Change-Id: I9acc5595e574141d8955e36ff0a98e5bac9b6fc1
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 7 ++++++-
+ drivers/gpu/drm/amd/amdkfd/kfd_ipc.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 ++
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 10 +++++++++-
+ 4 files changed, 18 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index a216225..0972243 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1246,6 +1246,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ uint32_t flags = args->flags;
+ struct vm_area_struct *vma;
+ uint64_t cpuva = 0;
++ unsigned int mem_type = 0;
+
+ if (args->size == 0)
+ return -EINVAL;
+@@ -1305,8 +1306,12 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ if (err)
+ goto err_unlock;
+
++ mem_type = flags & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM |
++ KFD_IOC_ALLOC_MEM_FLAGS_GTT |
++ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR |
++ KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL);
+ idr_handle = kfd_process_device_create_obj_handle(pdd, mem,
+- args->va_addr, args->size, cpuva, NULL);
++ args->va_addr, args->size, cpuva, mem_type, NULL);
+ if (idr_handle < 0) {
+ err = -EFAULT;
+ goto err_free;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_ipc.c b/drivers/gpu/drm/amd/amdkfd/kfd_ipc.c
+index 845dbf7..a53d954 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_ipc.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_ipc.c
+@@ -140,7 +140,7 @@ static int kfd_import_dmabuf_create_kfd_bo(struct kfd_dev *dev,
+ goto err_unlock;
+
+ idr_handle = kfd_process_device_create_obj_handle(pdd, mem,
+- va_addr, size, 0,
++ va_addr, size, 0, 0,
+ ipc_obj);
+ if (idr_handle < 0) {
+ r = -EFAULT;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index cbb65b0..3f77bab 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -303,6 +303,7 @@ struct kfd_bo {
+ struct kfd_ipc_obj *kfd_ipc_obj;
+ /* page-aligned VA address */
+ uint64_t cpuva;
++ unsigned int mem_type;
+ };
+
+ struct cma_system_bo {
+@@ -819,6 +820,7 @@ int kfd_reserved_mem_mmap(struct kfd_process *process,
+ int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
+ void *mem, uint64_t start,
+ uint64_t length, uint64_t cpuva,
++ unsigned int mem_type,
+ struct kfd_ipc_obj *ipc_obj);
+ void *kfd_process_device_translate_handle(struct kfd_process_device *p,
+ int handle);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index ef71670..3650183 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -118,6 +118,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
+ struct kgd_mem *mem = NULL;
+ int handle;
+ int err;
++ unsigned int mem_type;
+
+ err = kdev->kfd2kgd->alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
+ pdd->vm, NULL, &mem, NULL,
+@@ -135,13 +136,18 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
+ goto sync_memory_failed;
+ }
+
++ mem_type = flags & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM |
++ KFD_IOC_ALLOC_MEM_FLAGS_GTT |
++ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR |
++ KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL);
++
+ /* Create an obj handle so kfd_process_device_remove_obj_handle
+ * will take care of the bo removal when the process finishes.
+ * We do not need to take p->mutex, because the process is just
+ * created and the ioctls have not had the chance to run.
+ */
+ handle = kfd_process_device_create_obj_handle(
+- pdd, mem, gpu_va, size, 0, NULL);
++ pdd, mem, gpu_va, size, 0, mem_type, NULL);
+
+ if (handle < 0) {
+ err = handle;
+@@ -808,6 +814,7 @@ bool kfd_has_process_device_data(struct kfd_process *p)
+ int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
+ void *mem, uint64_t start,
+ uint64_t length, uint64_t cpuva,
++ unsigned int mem_type,
+ struct kfd_ipc_obj *ipc_obj)
+ {
+ int handle;
+@@ -829,6 +836,7 @@ int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
+ buf_obj->dev = pdd->dev;
+ buf_obj->kfd_ipc_obj = ipc_obj;
+ buf_obj->cpuva = cpuva;
++ buf_obj->mem_type = mem_type;
+
+ INIT_LIST_HEAD(&buf_obj->cb_data_head);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4256-drm-amdkfd-CMA-Support-for-diff.-devices.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4256-drm-amdkfd-CMA-Support-for-diff.-devices.patch
new file mode 100644
index 00000000..147d7aba
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4256-drm-amdkfd-CMA-Support-for-diff.-devices.patch
@@ -0,0 +1,40 @@
+From 6c5d72666e8df579b1d136158e44bf219d3f0e97 Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Mon, 9 Apr 2018 16:27:07 -0400
+Subject: [PATCH 4256/5725] drm/amdkfd: CMA: Support for diff. devices
+
+Support CMA between System Memory BO and Local Memory BO even if they
+are registered to separate devices. The copy will be done by the device
+to which Local Memory BO belongs to. The system memory BO will be
+temporarily mapped into this device's gart.
+
+Change-Id: Ief4af0db8b5f6af1a2fa1ed0596cf9e2fd953841
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 0972243..9be0ac1 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -2159,8 +2159,14 @@ static int kfd_copy_bos(struct cma_iter *si, struct cma_iter *di,
+ dst_offset = di->bo_offset & (PAGE_SIZE - 1);
+ list_add_tail(&di->cma_bo->list, &di->cma_list);
+ } else if (src_bo->dev->kgd != dst_bo->dev->kgd) {
+- pr_err("CMA %d fail. Not same dev\n", cma_write);
+- return -EINVAL;
++ /* This indicates that either or/both BOs are in local mem. */
++ if (src_bo->mem_type == KFD_IOC_ALLOC_MEM_FLAGS_VRAM &&
++ dst_bo->mem_type == KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
++ pr_err("CMA fail. Local mem & not in same dev\n");
++ return -EINVAL;
++ } else if (src_bo->mem_type == KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
++ dev = src_bo->dev;
++ /* else already set to dst_bo->dev */
+ }
+
+ if (err) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4257-drm-amdkfd-Remove-unused-variable.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4257-drm-amdkfd-Remove-unused-variable.patch
new file mode 100644
index 00000000..f29a5e41
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4257-drm-amdkfd-Remove-unused-variable.patch
@@ -0,0 +1,26 @@
+From b6f66788d04b9e6ca6bf692765ed2b64f38b7824 Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Mon, 9 Apr 2018 16:37:04 -0400
+Subject: [PATCH 4257/5725] drm/amdkfd: Remove unused variable
+
+Change-Id: Ic3d7beda97308b09b8765ce1dc69970814943dfe
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 9be0ac1..98d9b1b 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -2205,7 +2205,6 @@ static int kfd_copy_single_range(struct cma_iter *si, struct cma_iter *di,
+
+ while (size && !kfd_cma_iter_end(di)) {
+ struct dma_fence *fence = NULL;
+- struct kfd_bo *dst_bo = di->cur_bo;
+
+ copy_size = min(size, (di->array->size - di->offset));
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4258-drm-amdgpu-uvd7-add-emit_reg_write_reg_wait-ring-cal.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4258-drm-amdgpu-uvd7-add-emit_reg_write_reg_wait-ring-cal.patch
new file mode 100644
index 00000000..efa40f14
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4258-drm-amdgpu-uvd7-add-emit_reg_write_reg_wait-ring-cal.patch
@@ -0,0 +1,68 @@
+From fe0e6d3a17b050001b667bec61b540d15be3ba00 Mon Sep 17 00:00:00 2001
+From: Xiaojie Yuan <Xiaojie.Yuan@amd.com>
+Date: Thu, 26 Apr 2018 18:50:26 +0800
+Subject: [PATCH 4258/5725] drm/amdgpu/uvd7: add emit_reg_write_reg_wait ring
+ callback
+
+Fix the NULL pointer dereference while running amdgpu_test:
+
+[ 54.972246] BUG: unable to handle kernel NULL pointer dereference at 0000000000000000
+[ 54.972265] IP: (null)
+[ 54.972273] PGD 0 P4D 0
+[ 54.972280] Oops: 0010 [#1] SMP PTI
+[ 54.972288] Modules linked in: amdkfd amd_iommu_v2 amdgpu(OE) chash gpu_sched ttm drm_kms_helper drm i2c_algo_bit fb_sys_fops syscopyarea sysfillrect sysimgblt snd_hda_codec_realtek snd_hda_codec_generic snd_hda_codec_hdmi snd_hda_intel snd_hda_codec snd_hda_core snd_hwdep intel_rapl snd_pcm snd_seq_midi snd_seq_midi_event snd_rawmidi x86_pkg_temp_thermal intel_powerclamp coretemp kvm_intel snd_seq snd_seq_device kvm irqbypass snd_timer crct10dif_pclmul crc32_pclmul ghash_clmulni_intel pcbc snd soundcore joydev input_leds aesni_intel aes_x86_64 crypto_simd glue_helper cryptd idma64 virt_dma mei_me intel_lpss_pci serio_raw intel_cstate intel_rapl_perf shpchp intel_pch_thermal mei mac_hid intel_lpss acpi_pad parport_pc ppdev nfsd lp auth_rpcgss nfs_acl lockd grace sunrpc parport autofs4 hid_generic
+[ 54.972434] usbhid mxm_wmi e1000e psmouse ahci hid libahci wmi pinctrl_sunrisepoint video pinctrl_intel
+[ 54.972457] CPU: 6 PID: 1393 Comm: uvd Tainted: G OE 4.16.0-rc7-27fb84fda777 #1
+[ 54.972473] Hardware name: MSI MS-7984/Z170 KRAIT GAMING (MS-7984), BIOS B.80 05/11/2016
+[ 54.972489] RIP: 0010: (null)
+[ 54.972497] RSP: 0018:ffffaea002c8bcc0 EFLAGS: 00010202
+[ 54.972508] RAX: 0000000000000000 RBX: ffff9d30d3c56f60 RCX: 00000000007c0002
+[ 54.972522] RDX: 000000000001a6fb RSI: 000000000001a6e9 RDI: ffff9d30d3c56f60
+[ 54.972536] RBP: ffffaea002c8bd10 R08: 0000000000000002 R09: ffffffffc06977d0
+[ 54.972550] R10: 0000000000000040 R11: 0000000000000000 R12: 0000000000000002
+[ 54.972564] R13: ffff9d30d3c5001c R14: ffff9d30d3c50000 R15: 0000000000000006
+[ 54.972579] FS: 0000000000000000(0000) GS:ffff9d30eed80000(0000) knlGS:0000000000000000
+[ 54.972594] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 54.972606] CR2: 0000000000000000 CR3: 00000002dbc0a001 CR4: 00000000003606e0
+[ 54.972620] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ 54.972634] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[ 54.972648] Call Trace:
+[ 54.972685] ? gmc_v9_0_emit_flush_gpu_tlb+0x111/0x140 [amdgpu]
+[ 54.972721] uvd_v7_0_ring_emit_vm_flush+0x31/0x70 [amdgpu]
+[ 54.972751] amdgpu_vm_flush+0x5dc/0x6c0 [amdgpu]
+[ 54.972787] ? pp_dpm_powergate_uvd+0x50/0x80 [amdgpu]
+[ 54.972816] amdgpu_ib_schedule+0x120/0x4e0 [amdgpu]
+[ 54.972850] amdgpu_job_run+0x17b/0x1c0 [amdgpu]
+[ 54.972861] drm_sched_main+0x2cc/0x490 [gpu_sched]
+[ 54.972873] ? wait_woken+0x80/0x80
+[ 54.972882] kthread+0x121/0x140
+[ 54.972891] ? drm_sched_job_finish+0xf0/0xf0 [gpu_sched]
+[ 54.972902] ? kthread_create_worker_on_cpu+0x70/0x70
+[ 54.972914] ret_from_fork+0x35/0x40
+[ 54.972922] Code: Bad RIP value.
+[ 54.972932] RIP: (null) RSP: ffffaea002c8bcc0
+[ 54.972943] CR2: 0000000000000000
+[ 54.972951] ---[ end trace 5feb349263bbf633 ]---
+
+Change-Id: I15708d781fdf84c8947ce04dbea904f1fbf1876a
+Signed-off-by: Xiaojie Yuan <Xiaojie.Yuan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+index 31e339c..8245bb6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+@@ -1705,6 +1705,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
+ .end_use = amdgpu_uvd_ring_end_use,
+ .emit_wreg = uvd_v7_0_ring_emit_wreg,
+ .emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
++ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ };
+
+ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4259-Hybrid-Version-18.30.0.15.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4259-Hybrid-Version-18.30.0.15.patch
new file mode 100644
index 00000000..c3159102
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4259-Hybrid-Version-18.30.0.15.patch
@@ -0,0 +1,27 @@
+From edaaebb6fe920fc2fe44a1ad063f90144346d599 Mon Sep 17 00:00:00 2001
+From: Junshan Fang <Junshan.Fang@amd.com>
+Date: Mon, 7 May 2018 11:12:09 +0800
+Subject: [PATCH 4259/5725] Hybrid Version: 18.30.0.15
+
+Change-Id: I1d7b123b7c519bf3a6e5f3f135431cc038ad47db
+Signed-off-by: Junshan Fang <Junshan.Fang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 51e2928..d05760e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -81,7 +81,7 @@
+ #define KMS_DRIVER_MINOR 26
+ #define KMS_DRIVER_PATCHLEVEL 0
+
+-#define AMDGPU_VERSION "18.20.2.15"
++#define AMDGPU_VERSION "18.30.0.15"
+
+ int amdgpu_vram_limit = 0;
+ int amdgpu_vis_vram_limit = 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4260-Hybrid-Version-18.30.1.15.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4260-Hybrid-Version-18.30.1.15.patch
new file mode 100644
index 00000000..3e2bbde8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4260-Hybrid-Version-18.30.1.15.patch
@@ -0,0 +1,27 @@
+From 8cb4b9016a4358f17da3ea3ada24cf6131b49256 Mon Sep 17 00:00:00 2001
+From: Junshan Fang <Junshan.Fang@amd.com>
+Date: Tue, 8 May 2018 10:59:45 +0800
+Subject: [PATCH 4260/5725] Hybrid Version: 18.30.1.15
+
+Change-Id: I9465b82d0fa9d83f17adf88f59c26e2d81fe5302
+Signed-off-by: Junshan Fang <Junshan.Fang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index d05760e..677e62c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -81,7 +81,7 @@
+ #define KMS_DRIVER_MINOR 26
+ #define KMS_DRIVER_PATCHLEVEL 0
+
+-#define AMDGPU_VERSION "18.30.0.15"
++#define AMDGPU_VERSION "18.30.1.15"
+
+ int amdgpu_vram_limit = 0;
+ int amdgpu_vis_vram_limit = 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4261-Revert-drm-amdgpu-set-COMPUTE_PGM_RSRC1-for-SGPR-VGP.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4261-Revert-drm-amdgpu-set-COMPUTE_PGM_RSRC1-for-SGPR-VGP.patch
new file mode 100644
index 00000000..89be6ef3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4261-Revert-drm-amdgpu-set-COMPUTE_PGM_RSRC1-for-SGPR-VGP.patch
@@ -0,0 +1,54 @@
+From b57fd7e8738e2cf8fca9896018115ff70a5f98e0 Mon Sep 17 00:00:00 2001
+From: Kevin1 Wang <Kevin1.Wang@amd.com>
+Date: Wed, 9 May 2018 00:52:01 -0400
+Subject: [PATCH 4261/5725] Revert "drm/amdgpu: set COMPUTE_PGM_RSRC1 for
+ SGPR/VGPR clearing shaders"
+
+This reverts commit c7915157243f06eca018fad9ef4f5a35d872b8f5.
+
+Change-Id: Idecc849a388c09a348af532dc863e1d91f5f96d3
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index befc7a0..4c9ea8d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -1459,11 +1459,10 @@ static const u32 sgpr_init_compute_shader[] =
+ static const u32 vgpr_init_regs[] =
+ {
+ mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
+- mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
++ mmCOMPUTE_RESOURCE_LIMITS, 0,
+ mmCOMPUTE_NUM_THREAD_X, 256*4,
+ mmCOMPUTE_NUM_THREAD_Y, 1,
+ mmCOMPUTE_NUM_THREAD_Z, 1,
+- mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */
+ mmCOMPUTE_PGM_RSRC2, 20,
+ mmCOMPUTE_USER_DATA_0, 0xedcedc00,
+ mmCOMPUTE_USER_DATA_1, 0xedcedc01,
+@@ -1480,11 +1479,10 @@ static const u32 vgpr_init_regs[] =
+ static const u32 sgpr1_init_regs[] =
+ {
+ mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
+- mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
++ mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
+ mmCOMPUTE_NUM_THREAD_X, 256*5,
+ mmCOMPUTE_NUM_THREAD_Y, 1,
+ mmCOMPUTE_NUM_THREAD_Z, 1,
+- mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
+ mmCOMPUTE_PGM_RSRC2, 20,
+ mmCOMPUTE_USER_DATA_0, 0xedcedc00,
+ mmCOMPUTE_USER_DATA_1, 0xedcedc01,
+@@ -1505,7 +1503,6 @@ static const u32 sgpr2_init_regs[] =
+ mmCOMPUTE_NUM_THREAD_X, 256*5,
+ mmCOMPUTE_NUM_THREAD_Y, 1,
+ mmCOMPUTE_NUM_THREAD_Z, 1,
+- mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
+ mmCOMPUTE_PGM_RSRC2, 20,
+ mmCOMPUTE_USER_DATA_0, 0xedcedc00,
+ mmCOMPUTE_USER_DATA_1, 0xedcedc01,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4262-drm-amdgpu-change-pp_dpm-clk-mclk-pcie-input-format.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4262-drm-amdgpu-change-pp_dpm-clk-mclk-pcie-input-format.patch
new file mode 100644
index 00000000..dd73107a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4262-drm-amdgpu-change-pp_dpm-clk-mclk-pcie-input-format.patch
@@ -0,0 +1,170 @@
+From 251a1de871bf614d36f5bfd4e2b4630836d06729 Mon Sep 17 00:00:00 2001
+From: welu <wei.lu2@amd.com>
+Date: Tue, 24 Apr 2018 09:13:20 -0400
+Subject: [PATCH 4262/5725] drm/amdgpu: change pp_dpm clk/mclk/pcie input
+ format.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+1. support more than 8 values when setting get_pp_dpm_mclk/
+sclk/pcie, the former design just parse command format like
+"echo xxxx > pp_dpm_sclk" and current can parse "echo xx xxx
+ xxxx > pp_dpm_sclk" whose operation is more user-friendly
+and convinent and can offer more values;
+2. be compatible with former design like "xx".
+3. add DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie
+Bug:KFD-385
+
+Change-Id: Ic2d4deebce03a84104d7da058b0b281c66407772
+Signed-off-by: welu <wei.lu2@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 103 +++++++++++++++++++--------------
+ 1 file changed, 59 insertions(+), 44 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index 4cbce49..6f07b51 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -574,10 +574,10 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
+ * the power state and the clock information for those levels.
+ *
+ * To manually adjust these states, first select manual using
+- * power_dpm_force_performance_level. Writing a string of the level
+- * numbers to the file will select which levels you want to enable.
+- * E.g., writing 456 to the file will enable levels 4, 5, and 6.
+- *
++ * power_dpm_force_performance_level.
++ * Secondly,Enter a new value for each level by inputing a string that
++ * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
++ * E.g., echo 4 5 6 to > pp_dpm_sclk will enable sclk levels 4, 5, and 6.
+ */
+
+ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
+@@ -602,23 +602,27 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
+ struct amdgpu_device *adev = ddev->dev_private;
+ int ret;
+ long level;
+- uint32_t i, mask = 0;
+- char sub_str[2];
++ uint32_t mask = 0;
++ char *sub_str = NULL;
++ char *tmp;
++ char buf_cpy[count];
++ const char delimiter[3] = {' ', '\n', '\0'};
+
+- for (i = 0; i < strlen(buf); i++) {
+- if (*(buf + i) == '\n')
+- continue;
+- sub_str[0] = *(buf + i);
+- sub_str[1] = '\0';
+- ret = kstrtol(sub_str, 0, &level);
++ memcpy(buf_cpy, buf, count+1);
++ tmp = buf_cpy;
++ while (tmp[0]) {
++ sub_str = strsep(&tmp, delimiter);
++ if (strlen(sub_str)) {
++ ret = kstrtol(sub_str, 0, &level);
+
+- if (ret) {
+- count = -EINVAL;
+- goto fail;
+- }
+- mask |= 1 << level;
++ if (ret) {
++ count = -EINVAL;
++ goto fail;
++ }
++ mask |= 1 << level;
++ } else
++ break;
+ }
+-
+ if (adev->powerplay.pp_funcs->force_clock_level)
+ amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
+
+@@ -648,21 +652,26 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
+ struct amdgpu_device *adev = ddev->dev_private;
+ int ret;
+ long level;
+- uint32_t i, mask = 0;
+- char sub_str[2];
++ uint32_t mask = 0;
++ char *sub_str = NULL;
++ char *tmp;
++ char buf_cpy[count];
++ const char delimiter[3] = {' ', '\n', '\0'};
+
+- for (i = 0; i < strlen(buf); i++) {
+- if (*(buf + i) == '\n')
+- continue;
+- sub_str[0] = *(buf + i);
+- sub_str[1] = '\0';
+- ret = kstrtol(sub_str, 0, &level);
++ memcpy(buf_cpy, buf, count+1);
++ tmp = buf_cpy;
++ while (tmp[0]) {
++ sub_str = strsep(&tmp, delimiter);
++ if (strlen(sub_str)) {
++ ret = kstrtol(sub_str, 0, &level);
+
+- if (ret) {
+- count = -EINVAL;
+- goto fail;
+- }
+- mask |= 1 << level;
++ if (ret) {
++ count = -EINVAL;
++ goto fail;
++ }
++ mask |= 1 << level;
++ } else
++ break;
+ }
+ if (adev->powerplay.pp_funcs->force_clock_level)
+ amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
+@@ -693,21 +702,27 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
+ struct amdgpu_device *adev = ddev->dev_private;
+ int ret;
+ long level;
+- uint32_t i, mask = 0;
+- char sub_str[2];
++ uint32_t mask = 0;
++ char *sub_str = NULL;
++ char *tmp;
++ char buf_cpy[count];
++ const char delimiter[3] = {' ', '\n', '\0'};
+
+- for (i = 0; i < strlen(buf); i++) {
+- if (*(buf + i) == '\n')
+- continue;
+- sub_str[0] = *(buf + i);
+- sub_str[1] = '\0';
+- ret = kstrtol(sub_str, 0, &level);
++ memcpy(buf_cpy, buf, count+1);
++ tmp = buf_cpy;
+
+- if (ret) {
+- count = -EINVAL;
+- goto fail;
+- }
+- mask |= 1 << level;
++ while (tmp[0]) {
++ sub_str = strsep(&tmp, delimiter);
++ if (strlen(sub_str)) {
++ ret = kstrtol(sub_str, 0, &level);
++
++ if (ret) {
++ count = -EINVAL;
++ goto fail;
++ }
++ mask |= 1 << level;
++ } else
++ break;
+ }
+ if (adev->powerplay.pp_funcs->force_clock_level)
+ amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4263-drm-amdgpu-fix-amdgpu_atpx_get_client_id-s-return-ty.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4263-drm-amdgpu-fix-amdgpu_atpx_get_client_id-s-return-ty.patch
new file mode 100644
index 00000000..7302c44f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4263-drm-amdgpu-fix-amdgpu_atpx_get_client_id-s-return-ty.patch
@@ -0,0 +1,35 @@
+From fb342b9a640952577d8272e644ee5aa0d5119a38 Mon Sep 17 00:00:00 2001
+From: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
+Date: Tue, 24 Apr 2018 15:15:34 +0200
+Subject: [PATCH 4263/5725] drm/amdgpu: fix amdgpu_atpx_get_client_id()'s
+ return type
+
+The method struct vga_switcheroo_handler::get_client_id() is defined
+as returning an 'enum vga_switcheroo_client_id' but the implementation
+in this driver, amdgpu_atpx_get_client_id(), returns an 'int'.
+
+Fix this by returning 'enum vga_switcheroo_client_id' in this driver too.
+
+Change-Id: I715369ee7579a72692872688df25f364e93847e9
+Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+index 6021dc4..d5186b9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+@@ -550,7 +550,7 @@ static int amdgpu_atpx_init(void)
+ * look up whether we are the integrated or discrete GPU (all asics).
+ * Returns the client id.
+ */
+-static int amdgpu_atpx_get_client_id(struct pci_dev *pdev)
++static enum vga_switcheroo_client_id amdgpu_atpx_get_client_id(struct pci_dev *pdev)
+ {
+ if (amdgpu_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev))
+ return VGA_SWITCHEROO_IGD;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4264-drm-amdgpu-Set-graphics-noretry-to-1.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4264-drm-amdgpu-Set-graphics-noretry-to-1.patch
new file mode 100644
index 00000000..9aee56c1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4264-drm-amdgpu-Set-graphics-noretry-to-1.patch
@@ -0,0 +1,38 @@
+From 459511eca2dc3111d0d86069e59c4fb6958452df Mon Sep 17 00:00:00 2001
+From: Philip Yang <Philip.Yang@amd.com>
+Date: Tue, 27 Mar 2018 18:15:59 -0400
+Subject: [PATCH 4264/5725] drm/amdgpu: Set graphics noretry to 1
+
+Fix graphics hang issue while computer noretry set to 1
+
+BUG: SWDEV-146501
+
+Change-Id: If3f5a692119a953c14cb18a823ac82e611886a98
+Signed-off-by: Philip Yang <Philip.Yang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 6105d81..9a3d62b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1686,11 +1686,15 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
+ if (i == 0) {
+ tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
+ SH_MEM_ALIGNMENT_MODE_UNALIGNED);
++ tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
++ 1);
+ WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
+ WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
+ } else {
+ tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
+ SH_MEM_ALIGNMENT_MODE_UNALIGNED);
++ tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
++ 1);
+ WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
+ tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
+ (adev->gmc.private_aperture_start >> 48));
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4265-drm-amdfd-Don-t-hard-code-wait-time.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4265-drm-amdfd-Don-t-hard-code-wait-time.patch
new file mode 100644
index 00000000..89988dd2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4265-drm-amdfd-Don-t-hard-code-wait-time.patch
@@ -0,0 +1,56 @@
+From 807d1e07d5c965134b482c04314141d444dd75a7 Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Fri, 13 Apr 2018 14:48:10 -0400
+Subject: [PATCH 4265/5725] drm/amdfd: Don't hard code wait time
+
+Also dma_fence_wait_timeout() returns 0 if fence timed out. Handle that.
+
+Change-Id: Ia5f4f97f35d3dac0b5263449a366d9a051664598
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 21 ++++++++++++++++++---
+ 1 file changed, 18 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 98d9b1b..9426a66 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1838,6 +1838,21 @@ static void kfd_free_cma_bos(struct cma_iter *ci)
+ }
+ }
+
++/* 1 second timeout */
++#define CMA_WAIT_TIMEOUT msecs_to_jiffies(1000)
++
++static int kfd_cma_fence_wait(struct dma_fence *f)
++{
++ int ret;
++
++ ret = dma_fence_wait_timeout(f, false, CMA_WAIT_TIMEOUT);
++ if (likely(ret > 0))
++ return 0;
++ if (!ret)
++ ret = -ETIME;
++ return ret;
++}
++
+ /* Create a system BO by pinning underlying system pages of the given userptr
+ * BO @ubo
+ * @ubo: Userptr BO
+@@ -2366,10 +2381,10 @@ static int kfd_ioctl_cross_memory_copy(struct file *filep,
+
+ /* Wait for the last fence irrespective of error condition */
+ if (lfence) {
+- if (dma_fence_wait_timeout(lfence, false,
+- msecs_to_jiffies(1000)) < 0)
+- pr_err("CMA %s failed. BO timed out\n", cma_op);
++ err = kfd_cma_fence_wait(lfence);
+ dma_fence_put(lfence);
++ if (err)
++ pr_err("CMA %s failed. BO timed out\n", cma_op);
+ }
+
+ kfd_free_cma_bos(&si);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4266-drm-amdkfd-CMA-Add-intermediate-wait-if-mGPU.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4266-drm-amdkfd-CMA-Add-intermediate-wait-if-mGPU.patch
new file mode 100644
index 00000000..8c8c2bbd
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4266-drm-amdkfd-CMA-Add-intermediate-wait-if-mGPU.patch
@@ -0,0 +1,77 @@
+From dcc8d6fb42976e899c75091092c3ecb1532b6afa Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Thu, 12 Apr 2018 14:24:22 -0400
+Subject: [PATCH 4266/5725] drm/amdkfd: CMA: Add intermediate wait if mGPU
+
+CMA can happen on multiple GPUs. The current approach of keeping track
+of only the latest fence is not sufficient. Before throwing away the old
+fence check if it belongs to the same context. If not wait before
+releasing it.
+
+The current approach will be suboptimal in a mGPU (> 2) system if CMA
+ioctl is called with a long list of memory ranges where potentially each
+range copy could be done by different GPU. In this situation, the better
+approach would be to call the ioctl repeatedly with shorter list.
+
+Change-Id: Icf522cf8bfa648e24900745622600f920c0de320
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 25 +++++++++++++++++++++++--
+ 1 file changed, 23 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 9426a66..b07fe36 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1853,6 +1853,20 @@ static int kfd_cma_fence_wait(struct dma_fence *f)
+ return ret;
+ }
+
++/* Put previous (old) fence @pf but it waits for @pf to signal if the context
++ * of the current fence @cf is different.
++ */
++static int kfd_fence_put_wait_if_diff_context(struct dma_fence *cf,
++ struct dma_fence *pf)
++{
++ int ret = 0;
++
++ if (pf && cf && cf->context != pf->context)
++ ret = kfd_cma_fence_wait(pf);
++ dma_fence_put(pf);
++ return ret;
++}
++
+ /* Create a system BO by pinning underlying system pages of the given userptr
+ * BO @ubo
+ * @ubo: Userptr BO
+@@ -2230,9 +2244,13 @@ static int kfd_copy_single_range(struct cma_iter *si, struct cma_iter *di,
+ }
+
+ if (fence) {
+- dma_fence_put(lfence);
++ err = kfd_fence_put_wait_if_diff_context(fence,
++ lfence);
+ lfence = fence;
++ if (err)
++ break;
+ }
++
+ size -= n;
+ *copied += n;
+ err = kfd_cma_iter_advance(si, n);
+@@ -2374,8 +2392,11 @@ static int kfd_ioctl_cross_memory_copy(struct file *filep,
+ * new fence is created, then keep the preivous fence
+ */
+ if (fence) {
+- dma_fence_put(lfence);
++ err = kfd_fence_put_wait_if_diff_context(fence,
++ lfence);
+ lfence = fence;
++ if (err)
++ break;
+ }
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4267-drm-amdkfd-CMA-Support-multi-device-VRAM-copy.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4267-drm-amdkfd-CMA-Support-multi-device-VRAM-copy.patch
new file mode 100644
index 00000000..33b35239
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4267-drm-amdkfd-CMA-Support-multi-device-VRAM-copy.patch
@@ -0,0 +1,223 @@
+From aae9664f4449916f2f353727bdddceb1e98c3752 Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Thu, 12 Apr 2018 14:56:17 -0400
+Subject: [PATCH 4267/5725] drm/amdkfd: CMA: Support multi device VRAM copy
+
+Support copy from VRAM on device1 to VRAM on device2. This is done using
+an intermediate System BO and double copy.
+ [VRAM]--gpu1-->[System BO]--gpu2-->[VRAM]
+
+BUG: SWDEV-150755
+
+Change-Id: I7edf2df3cc1688c1ebd1fa0ea8fa82d39cbf50d1
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 124 +++++++++++++++++++++++--------
+ 1 file changed, 95 insertions(+), 29 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index b07fe36..66c294a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1831,7 +1831,8 @@ static void kfd_free_cma_bos(struct cma_iter *ci)
+ struct kfd_dev *dev = cma_bo->dev;
+
+ /* sg table is deleted by free_memory_of_gpu */
+- kfd_put_sg_table(cma_bo->sg);
++ if (cma_bo->sg)
++ kfd_put_sg_table(cma_bo->sg);
+ dev->kfd2kgd->free_memory_of_gpu(dev->kgd, cma_bo->mem);
+ list_del(&cma_bo->list);
+ kfree(cma_bo);
+@@ -1867,16 +1868,21 @@ static int kfd_fence_put_wait_if_diff_context(struct dma_fence *cf,
+ return ret;
+ }
+
+-/* Create a system BO by pinning underlying system pages of the given userptr
+- * BO @ubo
+- * @ubo: Userptr BO
+- * @offset: Offset into ubo
++#define MAX_SYSTEM_BO_SIZE (512*PAGE_SIZE)
++
++/* Create an equivalent system BO for the given @bo. If @bo is a userptr then
++ * create a new system BO by pinning underlying system pages of the given
++ * userptr BO. If @bo is in Local Memory then create an empty system BO and
++ * then copy @bo into this new BO.
++ * @bo: Userptr BO or Local Memory BO
++ * @offset: Offset into bo
+ * @size: in/out: The size of the new BO could be less than requested if all
+- * the pages couldn't be pinned. This would be reflected in @size
+- * @mm/@task: mm/task to which @ubo belongs to
++ * the pages couldn't be pinned or size > MAX_SYSTEM_BO_SIZE. This would
++ * be reflected in @size
++ * @mm/@task: mm/task to which @bo belongs to
+ * @cma_bo: out: new system BO
+ */
+-static int kfd_create_cma_system_bo(struct kfd_dev *kdev, struct kfd_bo *ubo,
++static int kfd_create_cma_system_bo(struct kfd_dev *kdev, struct kfd_bo *bo,
+ uint64_t *size, uint64_t offset,
+ int cma_write, struct kfd_process *p,
+ struct mm_struct *mm,
+@@ -1886,7 +1892,8 @@ static int kfd_create_cma_system_bo(struct kfd_dev *kdev, struct kfd_bo *ubo,
+ int ret;
+ struct kfd_process_device *pdd = NULL;
+ struct cma_system_bo *cbo;
+- uint64_t sg_size;
++ uint64_t bo_size = 0;
++ struct dma_fence *f;
+
+ uint32_t flags = ALLOC_MEM_FLAGS_GTT | ALLOC_MEM_FLAGS_NONPAGED |
+ ALLOC_MEM_FLAGS_NO_SUBSTITUTE;
+@@ -1897,40 +1904,75 @@ static int kfd_create_cma_system_bo(struct kfd_dev *kdev, struct kfd_bo *ubo,
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cbo->list);
+- ret = kfd_create_sg_table_from_userptr_bo(ubo, offset, cma_write, mm,
+- task, size, &sg_size,
+- &cbo->sg);
+- if (ret) {
+- pr_err("Failed to create system BO. sg table error %d\n", ret);
+- return ret;
++ if (bo->mem_type == KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
++ bo_size = min(*size, MAX_SYSTEM_BO_SIZE);
++ else if (bo->cpuva) {
++ ret = kfd_create_sg_table_from_userptr_bo(bo, offset,
++ cma_write, mm, task,
++ size, &bo_size,
++ &cbo->sg);
++ if (ret) {
++ pr_err("CMA: BO create with sg failed %d\n", ret);
++ goto sg_fail;
++ }
++ } else {
++ WARN_ON(1);
++ ret = -EINVAL;
++ goto sg_fail;
+ }
+-
+ mutex_lock(&p->mutex);
+ pdd = kfd_get_process_device_data(kdev, p);
+ if (!pdd) {
++ mutex_unlock(&p->mutex);
+ pr_err("Process device data doesn't exist\n");
+ ret = -EINVAL;
+ goto pdd_fail;
+ }
+
+- ret = kdev->kfd2kgd->alloc_memory_of_gpu(kdev->kgd, 0ULL, sg_size,
++ ret = kdev->kfd2kgd->alloc_memory_of_gpu(kdev->kgd, 0ULL, bo_size,
+ pdd->vm, cbo->sg,
+ &cbo->mem, NULL, flags);
++ mutex_unlock(&p->mutex);
+ if (ret) {
+ pr_err("Failed to create shadow system BO %d\n", ret);
+ goto pdd_fail;
+ }
+- mutex_unlock(&p->mutex);
++
++ if (bo->mem_type == KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
++ ret = kdev->kfd2kgd->copy_mem_to_mem(kdev->kgd, bo->mem,
++ offset, cbo->mem, 0,
++ bo_size, &f, size);
++ if (ret) {
++ pr_err("CMA: Intermediate copy failed %d\n", ret);
++ goto copy_fail;
++ }
++
++ /* Wait for the copy to finish as subsequent copy will be done
++ * by different device
++ */
++ ret = kfd_cma_fence_wait(f);
++ dma_fence_put(f);
++ if (ret) {
++ pr_err("CMA: Intermediate copy timed out %d\n", ret);
++ goto copy_fail;
++ }
++ }
++
+ cbo->dev = kdev;
+ *cma_bo = cbo;
+
+ return ret;
+
++copy_fail:
++ kdev->kfd2kgd->free_memory_of_gpu(kdev->kgd, bo->mem);
+ pdd_fail:
+- mutex_unlock(&p->mutex);
+- kfd_put_sg_table(cbo->sg);
+- sg_free_table(cbo->sg);
+- kfree(cbo->sg);
++ if (cbo->sg) {
++ kfd_put_sg_table(cbo->sg);
++ sg_free_table(cbo->sg);
++ kfree(cbo->sg);
++ }
++sg_fail:
++ kfree(cbo);
+ return ret;
+ }
+
+@@ -2153,6 +2195,7 @@ static int kfd_copy_bos(struct cma_iter *si, struct cma_iter *di,
+ uint64_t src_offset = si->bo_offset, dst_offset = di->bo_offset;
+ struct kgd_mem *src_mem = src_bo->mem, *dst_mem = dst_bo->mem;
+ struct kfd_dev *dev = dst_bo->dev;
++ struct cma_system_bo *tmp_bo = NULL;
+
+ *copied = 0;
+ if (f)
+@@ -2188,11 +2231,22 @@ static int kfd_copy_bos(struct cma_iter *si, struct cma_iter *di,
+ dst_offset = di->bo_offset & (PAGE_SIZE - 1);
+ list_add_tail(&di->cma_bo->list, &di->cma_list);
+ } else if (src_bo->dev->kgd != dst_bo->dev->kgd) {
+- /* This indicates that either or/both BOs are in local mem. */
++ /* This indicates that atleast on of the BO is in local mem.
++ * If both are in local mem of different devices then create an
++ * intermediate System BO and do a double copy
++ * [VRAM]--gpu1-->[System BO]--gpu2-->[VRAM].
++ * If only one BO is in VRAM then use that GPU to do the copy
++ */
+ if (src_bo->mem_type == KFD_IOC_ALLOC_MEM_FLAGS_VRAM &&
+ dst_bo->mem_type == KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
+- pr_err("CMA fail. Local mem & not in same dev\n");
+- return -EINVAL;
++ dev = dst_bo->dev;
++ err = kfd_create_cma_system_bo(src_bo->dev, src_bo,
++ &size, si->bo_offset,
++ cma_write, si->p,
++ si->mm, si->task,
++ &tmp_bo);
++ src_mem = tmp_bo->mem;
++ src_offset = 0;
+ } else if (src_bo->mem_type == KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
+ dev = src_bo->dev;
+ /* else already set to dst_bo->dev */
+@@ -2203,10 +2257,22 @@ static int kfd_copy_bos(struct cma_iter *si, struct cma_iter *di,
+ return -EINVAL;
+ }
+
+- err = dst_bo->dev->kfd2kgd->copy_mem_to_mem(dev->kgd, src_mem,
+- src_offset, dst_mem,
+- dst_offset, size, f,
+- copied);
++ err = dev->kfd2kgd->copy_mem_to_mem(dev->kgd, src_mem, src_offset,
++ dst_mem, dst_offset, size, f,
++ copied);
++ /* The tmp_bo allocates additional memory. So it is better to wait and
++ * delete. Also since multiple GPUs are involved the copies are
++ * currently not pipelined.
++ */
++ if (tmp_bo) {
++ if (!err) {
++ kfd_cma_fence_wait(*f);
++ dma_fence_put(*f);
++ *f = NULL;
++ }
++ dev->kfd2kgd->free_memory_of_gpu(dev->kgd, tmp_bo->mem);
++ kfree(tmp_bo);
++ }
+ return err;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4268-drm-amdkfd-Reduce-priority-of-context-saving-waves-b.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4268-drm-amdkfd-Reduce-priority-of-context-saving-waves-b.patch
new file mode 100644
index 00000000..9f40c73c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4268-drm-amdkfd-Reduce-priority-of-context-saving-waves-b.patch
@@ -0,0 +1,107 @@
+From eec7aa12bac1be97a2de95ec430b97ba8156c9a2 Mon Sep 17 00:00:00 2001
+From: Jay Cornwall <Jay.Cornwall@amd.com>
+Date: Mon, 16 Apr 2018 18:39:08 -0500
+Subject: [PATCH 4268/5725] drm/amdkfd: Reduce priority of context-saving waves
+ before spin-wait
+
+Synchronization between context-saving wavefronts is achieved by
+sending a SAVEWAVE message to the SPI and then spin-waiting for a
+response. These spin-waiting wavefronts may inhibit the progress
+of other wavefronts in the context save handler, leading to the
+synchronization condition never being achieved.
+
+Before spin-waiting reduce the priority of each wavefront to
+guarantee foward progress in the others.
+
+Change-Id: Ibd10aa30f7d836a6c4890f68887c9b62b676aabc
+Signed-off-by: Jay Cornwall <Jay.Cornwall@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm | 10 ++++++++--
+ drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 8 +++++++-
+ 2 files changed, 15 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+index 751cc2e..dec5ea4 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+@@ -98,6 +98,7 @@ var SWIZZLE_EN = 0 //whether we use swi
+ /**************************************************************************/
+ var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23
+ var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
++var SQ_WAVE_STATUS_SPI_PRIO_SHIFT = 1
+ var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
+
+ var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
+@@ -319,6 +320,10 @@ end
+ s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC
+ end
+
++ // Set SPI_PRIO=2 to avoid starving instruction fetch in the waves we're waiting for.
++ s_or_b32 s_save_tmp, s_save_status, (2 << SQ_WAVE_STATUS_SPI_PRIO_SHIFT)
++ s_setreg_b32 hwreg(HW_REG_STATUS), s_save_tmp
++
+ L_SLEEP:
+ s_sleep 0x2 // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause SQ hang, since the 7,8th wave could not get arbit to exec inst, while other waves are stuck into the sleep-loop and waiting for wrexec!=0
+
+@@ -1132,7 +1137,7 @@ end
+ #endif
+
+ static const uint32_t cwsr_trap_gfx8_hex[] = {
+- 0xbf820001, 0xbf820123,
++ 0xbf820001, 0xbf820125,
+ 0xb8f4f802, 0x89748674,
+ 0xb8f5f803, 0x8675ff75,
+ 0x00000400, 0xbf850011,
+@@ -1158,7 +1163,8 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
+ 0x867aff7a, 0x00007fff,
+ 0xb97af807, 0xbef2007e,
+ 0xbef3007f, 0xbefe0180,
+- 0xbf900004, 0xbf8e0002,
++ 0xbf900004, 0x877a8474,
++ 0xb97af802, 0xbf8e0002,
+ 0xbf88fffe, 0xbef8007e,
+ 0x8679ff7f, 0x0000ffff,
+ 0x8779ff79, 0x00040000,
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+index 8ef6b44..adb3308 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+@@ -97,6 +97,7 @@ var ACK_SQC_STORE = 1 //workaround for suspected SQC store bug causing
+ /**************************************************************************/
+ var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23
+ var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
++var SQ_WAVE_STATUS_SPI_PRIO_SHIFT = 1
+ var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
+ var SQ_WAVE_STATUS_HALT_MASK = 0x2000
+
+@@ -362,6 +363,10 @@ end
+ s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC
+ end
+
++ // Set SPI_PRIO=2 to avoid starving instruction fetch in the waves we're waiting for.
++ s_or_b32 s_save_tmp, s_save_status, (2 << SQ_WAVE_STATUS_SPI_PRIO_SHIFT)
++ s_setreg_b32 hwreg(HW_REG_STATUS), s_save_tmp
++
+ L_SLEEP:
+ s_sleep 0x2 // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause SQ hang, since the 7,8th wave could not get arbit to exec inst, while other waves are stuck into the sleep-loop and waiting for wrexec!=0
+
+@@ -1210,7 +1215,7 @@ end
+ #endif
+
+ static const uint32_t cwsr_trap_gfx9_hex[] = {
+- 0xbf820001, 0xbf820158,
++ 0xbf820001, 0xbf82015a,
+ 0xb8f8f802, 0x89788678,
+ 0xb8f1f803, 0x866eff71,
+ 0x00000400, 0xbf850034,
+@@ -1249,6 +1254,7 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
+ 0x00007fff, 0xb970f807,
+ 0xbeee007e, 0xbeef007f,
+ 0xbefe0180, 0xbf900004,
++ 0x87708478, 0xb970f802,
+ 0xbf8e0002, 0xbf88fffe,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0xb8f11605,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4269-drm-amdkfd-Introduce-kfd-kernel-module-parameter-hal.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4269-drm-amdkfd-Introduce-kfd-kernel-module-parameter-hal.patch
new file mode 100644
index 00000000..069ad7ed
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4269-drm-amdkfd-Introduce-kfd-kernel-module-parameter-hal.patch
@@ -0,0 +1,75 @@
+From 3590cde3de2f22ceb930666c3f59a58d81f2a458 Mon Sep 17 00:00:00 2001
+From: Yong Zhao <yong.zhao@amd.com>
+Date: Fri, 20 Apr 2018 15:45:02 -0400
+Subject: [PATCH 4269/5725] drm/amdkfd: Introduce kfd kernel module parameter
+ halt_if_hws_hang
+
+The parameter will enable developers to do scandumps without the need to
+change, rebuild and redeploy the kernel.
+
+Signed-off-by: Yong Zhao <yong.zhao@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/amdkfd/kfd_module.c
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+
+Change-Id: Iefe34cbaafb3831c3f008ca81bcbd0a3304e692a
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 7 +++++++
+ drivers/gpu/drm/amd/amdkfd/kfd_module.c | 4 ++++
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 5 +++++
+ 3 files changed, 16 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 82c7dbe..e60aaf8 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -1230,6 +1230,13 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
+ while (*fence_addr != fence_value) {
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("qcm fence wait loop timeout expired\n");
++ /* In HWS case, this is used to halt the driver thread
++ * in order not to mess up CP states before doing
++ * scandumps for FW debugging.
++ */
++ while (halt_if_hws_hang)
++ schedule();
++
+ return -ETIME;
+ }
+ schedule();
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+index 34d44ff..ab0bb2d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+@@ -97,6 +97,10 @@ module_param(priv_cp_queues, int, 0644);
+ MODULE_PARM_DESC(priv_cp_queues,
+ "Enable privileged mode for CP queues (0 = off (default), 1 = on)");
+
++int halt_if_hws_hang;
++module_param(halt_if_hws_hang, int, 0644);
++MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
++
+ int kgd2kfd_init(unsigned int interface_version,
+ const struct kgd2kfd_calls **g2f)
+ {
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 3f77bab..f721b99 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -152,6 +152,11 @@ extern int vega10_noretry;
+ */
+ extern int priv_cp_queues;
+
++/*
++ * Halt if HWS hang is detected
++ */
++extern int halt_if_hws_hang;
++
+ /**
+ * enum kfd_sched_policy
+ *
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4270-drm-amdkfd-Use-module-parameters-noretry-as-the-inte.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4270-drm-amdkfd-Use-module-parameters-noretry-as-the-inte.patch
new file mode 100644
index 00000000..4447f5a7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4270-drm-amdkfd-Use-module-parameters-noretry-as-the-inte.patch
@@ -0,0 +1,93 @@
+From 6739356b5880a9bad6ded31ec318abbafb3051a7 Mon Sep 17 00:00:00 2001
+From: Yong Zhao <yong.zhao@amd.com>
+Date: Fri, 20 Apr 2018 15:50:28 -0400
+Subject: [PATCH 4270/5725] drm/amdkfd: Use module parameters noretry as the
+ internal variable name
+
+This makes all module parameters use the same form. Meanwhile clean up
+the surrounding code.
+
+Change-Id: I0f6d8db10e66256f3971cc4da4c1328a63b0101c
+Signed-off-by: Yong Zhao <yong.zhao@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_module.c | 14 ++++++++------
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 +-
+ 3 files changed, 10 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+index 6198bf2..cc27190 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+@@ -60,7 +60,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
+ qpd->sh_mem_config =
+ SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
+- if (vega10_noretry &&
++ if (noretry &&
+ !dqm->dev->device_info->needs_iommu_device)
+ qpd->sh_mem_config |=
+ 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+index ab0bb2d..a05f734 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+@@ -63,7 +63,7 @@ MODULE_PARM_DESC(hws_max_conc_proc,
+
+ int cwsr_enable = 1;
+ module_param(cwsr_enable, int, 0444);
+-MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = Off, 1 = On (Default))");
++MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = off, 1 = on (default))");
+
+ int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
+ module_param(max_num_of_queues_per_device, int, 0444);
+@@ -75,8 +75,6 @@ module_param(send_sigterm, int, 0444);
+ MODULE_PARM_DESC(send_sigterm,
+ "Send sigterm to HSA process on unhandled exception (0 = disable, 1 = enable)");
+
+-static int amdkfd_init_completed;
+-
+ int debug_largebar;
+ module_param(debug_largebar, int, 0444);
+ MODULE_PARM_DESC(debug_largebar,
+@@ -87,10 +85,10 @@ module_param(ignore_crat, int, 0444);
+ MODULE_PARM_DESC(ignore_crat,
+ "Ignore CRAT table during KFD initialization (0 = use CRAT (default), 1 = ignore CRAT)");
+
+-int vega10_noretry = 1;
+-module_param_named(noretry, vega10_noretry, int, 0644);
++int noretry = 1;
++module_param(noretry, int, 0644);
+ MODULE_PARM_DESC(noretry,
+- "Set sh_mem_config.retry_disable on Vega10 (0 = retry enabled, 1 = retry disabled (default))");
++ "Set sh_mem_config.retry_disable on GFXv9+ dGPUs (0 = retry enabled, 1 = retry disabled (default))");
+
+ int priv_cp_queues;
+ module_param(priv_cp_queues, int, 0644);
+@@ -101,6 +99,10 @@ int halt_if_hws_hang;
+ module_param(halt_if_hws_hang, int, 0644);
+ MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
+
++
++static int amdkfd_init_completed;
++
++
+ int kgd2kfd_init(unsigned int interface_version,
+ const struct kgd2kfd_calls **g2f)
+ {
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index f721b99..7cd4819 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -145,7 +145,7 @@ extern int ignore_crat;
+ /*
+ * Set sh_mem_config.retry_disable on Vega10
+ */
+-extern int vega10_noretry;
++extern int noretry;
+
+ /*
+ * Enable privileged mode for all CP queues including user queues
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4271-drm-amdkfd-Separate-trap-handler-assembly-code-and-i.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4271-drm-amdkfd-Separate-trap-handler-assembly-code-and-i.patch
new file mode 100644
index 00000000..21fc68c6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4271-drm-amdkfd-Separate-trap-handler-assembly-code-and-i.patch
@@ -0,0 +1,1220 @@
+From f52eb74c6603fea6c9c91c5d9c039528e4e683e7 Mon Sep 17 00:00:00 2001
+From: Yong Zhao <yong.zhao@amd.com>
+Date: Fri, 20 Apr 2018 14:57:04 -0400
+Subject: [PATCH 4271/5725] drm/amdkfd: Separate trap handler assembly code and
+ its hex values
+
+Since the assembly code is inside "#if 0", it is ineffective. Despite that,
+during debugging, we need to change the assembly code, extract it into
+a separate file and compile the new file into hex values using sp3.
+That process also requires us to remove "#if 0" and modify lines starting
+with "#", so that sp3 can successfully compile the new file.
+
+With this change, all the above chore is no longer needed, and
+cwsr_trap_handler_gfx*.asm can be directly used by sp3 to generate its
+hex values.
+
+Change-Id: Iadcff11fc21beecfed215c12ff257d5a1d0f7486
+Signed-off-by: Yong Zhao <yong.zhao@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h | 560 +++++++++++++++++++++
+ .../gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm | 267 +---------
+ .../gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 300 +----------
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 3 +-
+ 4 files changed, 575 insertions(+), 555 deletions(-)
+ create mode 100644 drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+new file mode 100644
+index 0000000..a546a21
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+@@ -0,0 +1,560 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++static const uint32_t cwsr_trap_gfx8_hex[] = {
++ 0xbf820001, 0xbf820125,
++ 0xb8f4f802, 0x89748674,
++ 0xb8f5f803, 0x8675ff75,
++ 0x00000400, 0xbf850011,
++ 0xc00a1e37, 0x00000000,
++ 0xbf8c007f, 0x87777978,
++ 0xbf840002, 0xb974f802,
++ 0xbe801d78, 0xb8f5f803,
++ 0x8675ff75, 0x000001ff,
++ 0xbf850002, 0x80708470,
++ 0x82718071, 0x8671ff71,
++ 0x0000ffff, 0xb974f802,
++ 0xbe801f70, 0xb8f5f803,
++ 0x8675ff75, 0x00000100,
++ 0xbf840006, 0xbefa0080,
++ 0xb97a0203, 0x8671ff71,
++ 0x0000ffff, 0x80f08870,
++ 0x82f18071, 0xbefa0080,
++ 0xb97a0283, 0xbef60068,
++ 0xbef70069, 0xb8fa1c07,
++ 0x8e7a9c7a, 0x87717a71,
++ 0xb8fa03c7, 0x8e7a9b7a,
++ 0x87717a71, 0xb8faf807,
++ 0x867aff7a, 0x00007fff,
++ 0xb97af807, 0xbef2007e,
++ 0xbef3007f, 0xbefe0180,
++ 0xbf900004, 0x877a8474,
++ 0xb97af802, 0xbf8e0002,
++ 0xbf88fffe, 0xbef8007e,
++ 0x8679ff7f, 0x0000ffff,
++ 0x8779ff79, 0x00040000,
++ 0xbefa0080, 0xbefb00ff,
++ 0x00807fac, 0x867aff7f,
++ 0x08000000, 0x8f7a837a,
++ 0x877b7a7b, 0x867aff7f,
++ 0x70000000, 0x8f7a817a,
++ 0x877b7a7b, 0xbeef007c,
++ 0xbeee0080, 0xb8ee2a05,
++ 0x806e816e, 0x8e6e8a6e,
++ 0xb8fa1605, 0x807a817a,
++ 0x8e7a867a, 0x806e7a6e,
++ 0xbefa0084, 0xbefa00ff,
++ 0x01000000, 0xbefe007c,
++ 0xbefc006e, 0xc0611bfc,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc006e, 0xc0611c3c,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc006e, 0xc0611c7c,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc006e, 0xc0611cbc,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc006e, 0xc0611cfc,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc006e, 0xc0611d3c,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0xb8f5f803,
++ 0xbefe007c, 0xbefc006e,
++ 0xc0611d7c, 0x0000007c,
++ 0x806e846e, 0xbefc007e,
++ 0xbefe007c, 0xbefc006e,
++ 0xc0611dbc, 0x0000007c,
++ 0x806e846e, 0xbefc007e,
++ 0xbefe007c, 0xbefc006e,
++ 0xc0611dfc, 0x0000007c,
++ 0x806e846e, 0xbefc007e,
++ 0xb8eff801, 0xbefe007c,
++ 0xbefc006e, 0xc0611bfc,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc006e, 0xc0611b3c,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc006e, 0xc0611b7c,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0x867aff7f,
++ 0x04000000, 0xbef30080,
++ 0x8773737a, 0xb8ee2a05,
++ 0x806e816e, 0x8e6e8a6e,
++ 0xb8f51605, 0x80758175,
++ 0x8e758475, 0x8e7a8275,
++ 0xbefa00ff, 0x01000000,
++ 0xbef60178, 0x80786e78,
++ 0x82798079, 0xbefc0080,
++ 0xbe802b00, 0xbe822b02,
++ 0xbe842b04, 0xbe862b06,
++ 0xbe882b08, 0xbe8a2b0a,
++ 0xbe8c2b0c, 0xbe8e2b0e,
++ 0xc06b003c, 0x00000000,
++ 0xc06b013c, 0x00000010,
++ 0xc06b023c, 0x00000020,
++ 0xc06b033c, 0x00000030,
++ 0x8078c078, 0x82798079,
++ 0x807c907c, 0xbf0a757c,
++ 0xbf85ffeb, 0xbef80176,
++ 0xbeee0080, 0xbefe00c1,
++ 0xbeff00c1, 0xbefa00ff,
++ 0x01000000, 0xe0724000,
++ 0x6e1e0000, 0xe0724100,
++ 0x6e1e0100, 0xe0724200,
++ 0x6e1e0200, 0xe0724300,
++ 0x6e1e0300, 0xbefe00c1,
++ 0xbeff00c1, 0xb8f54306,
++ 0x8675c175, 0xbf84002c,
++ 0xbf8a0000, 0x867aff73,
++ 0x04000000, 0xbf840028,
++ 0x8e758675, 0x8e758275,
++ 0xbefa0075, 0xb8ee2a05,
++ 0x806e816e, 0x8e6e8a6e,
++ 0xb8fa1605, 0x807a817a,
++ 0x8e7a867a, 0x806e7a6e,
++ 0x806eff6e, 0x00000080,
++ 0xbefa00ff, 0x01000000,
++ 0xbefc0080, 0xd28c0002,
++ 0x000100c1, 0xd28d0003,
++ 0x000204c1, 0xd1060002,
++ 0x00011103, 0x7e0602ff,
++ 0x00000200, 0xbefc00ff,
++ 0x00010000, 0xbe80007b,
++ 0x867bff7b, 0xff7fffff,
++ 0x877bff7b, 0x00058000,
++ 0xd8ec0000, 0x00000002,
++ 0xbf8c007f, 0xe0765000,
++ 0x6e1e0002, 0x32040702,
++ 0xd0c9006a, 0x0000eb02,
++ 0xbf87fff7, 0xbefb0000,
++ 0xbeee00ff, 0x00000400,
++ 0xbefe00c1, 0xbeff00c1,
++ 0xb8f52a05, 0x80758175,
++ 0x8e758275, 0x8e7a8875,
++ 0xbefa00ff, 0x01000000,
++ 0xbefc0084, 0xbf0a757c,
++ 0xbf840015, 0xbf11017c,
++ 0x8075ff75, 0x00001000,
++ 0x7e000300, 0x7e020301,
++ 0x7e040302, 0x7e060303,
++ 0xe0724000, 0x6e1e0000,
++ 0xe0724100, 0x6e1e0100,
++ 0xe0724200, 0x6e1e0200,
++ 0xe0724300, 0x6e1e0300,
++ 0x807c847c, 0x806eff6e,
++ 0x00000400, 0xbf0a757c,
++ 0xbf85ffef, 0xbf9c0000,
++ 0xbf8200ca, 0xbef8007e,
++ 0x8679ff7f, 0x0000ffff,
++ 0x8779ff79, 0x00040000,
++ 0xbefa0080, 0xbefb00ff,
++ 0x00807fac, 0x8676ff7f,
++ 0x08000000, 0x8f768376,
++ 0x877b767b, 0x8676ff7f,
++ 0x70000000, 0x8f768176,
++ 0x877b767b, 0x8676ff7f,
++ 0x04000000, 0xbf84001e,
++ 0xbefe00c1, 0xbeff00c1,
++ 0xb8f34306, 0x8673c173,
++ 0xbf840019, 0x8e738673,
++ 0x8e738273, 0xbefa0073,
++ 0xb8f22a05, 0x80728172,
++ 0x8e728a72, 0xb8f61605,
++ 0x80768176, 0x8e768676,
++ 0x80727672, 0x8072ff72,
++ 0x00000080, 0xbefa00ff,
++ 0x01000000, 0xbefc0080,
++ 0xe0510000, 0x721e0000,
++ 0xe0510100, 0x721e0000,
++ 0x807cff7c, 0x00000200,
++ 0x8072ff72, 0x00000200,
++ 0xbf0a737c, 0xbf85fff6,
++ 0xbef20080, 0xbefe00c1,
++ 0xbeff00c1, 0xb8f32a05,
++ 0x80738173, 0x8e738273,
++ 0x8e7a8873, 0xbefa00ff,
++ 0x01000000, 0xbef60072,
++ 0x8072ff72, 0x00000400,
++ 0xbefc0084, 0xbf11087c,
++ 0x8073ff73, 0x00008000,
++ 0xe0524000, 0x721e0000,
++ 0xe0524100, 0x721e0100,
++ 0xe0524200, 0x721e0200,
++ 0xe0524300, 0x721e0300,
++ 0xbf8c0f70, 0x7e000300,
++ 0x7e020301, 0x7e040302,
++ 0x7e060303, 0x807c847c,
++ 0x8072ff72, 0x00000400,
++ 0xbf0a737c, 0xbf85ffee,
++ 0xbf9c0000, 0xe0524000,
++ 0x761e0000, 0xe0524100,
++ 0x761e0100, 0xe0524200,
++ 0x761e0200, 0xe0524300,
++ 0x761e0300, 0xb8f22a05,
++ 0x80728172, 0x8e728a72,
++ 0xb8f61605, 0x80768176,
++ 0x8e768676, 0x80727672,
++ 0x80f2c072, 0xb8f31605,
++ 0x80738173, 0x8e738473,
++ 0x8e7a8273, 0xbefa00ff,
++ 0x01000000, 0xbefc0073,
++ 0xc031003c, 0x00000072,
++ 0x80f2c072, 0xbf8c007f,
++ 0x80fc907c, 0xbe802d00,
++ 0xbe822d02, 0xbe842d04,
++ 0xbe862d06, 0xbe882d08,
++ 0xbe8a2d0a, 0xbe8c2d0c,
++ 0xbe8e2d0e, 0xbf06807c,
++ 0xbf84fff1, 0xb8f22a05,
++ 0x80728172, 0x8e728a72,
++ 0xb8f61605, 0x80768176,
++ 0x8e768676, 0x80727672,
++ 0xbefa0084, 0xbefa00ff,
++ 0x01000000, 0xc0211cfc,
++ 0x00000072, 0x80728472,
++ 0xc0211c3c, 0x00000072,
++ 0x80728472, 0xc0211c7c,
++ 0x00000072, 0x80728472,
++ 0xc0211bbc, 0x00000072,
++ 0x80728472, 0xc0211bfc,
++ 0x00000072, 0x80728472,
++ 0xc0211d3c, 0x00000072,
++ 0x80728472, 0xc0211d7c,
++ 0x00000072, 0x80728472,
++ 0xc0211a3c, 0x00000072,
++ 0x80728472, 0xc0211a7c,
++ 0x00000072, 0x80728472,
++ 0xc0211dfc, 0x00000072,
++ 0x80728472, 0xc0211b3c,
++ 0x00000072, 0x80728472,
++ 0xc0211b7c, 0x00000072,
++ 0x80728472, 0xbf8c007f,
++ 0x8671ff71, 0x0000ffff,
++ 0xbefc0073, 0xbefe006e,
++ 0xbeff006f, 0x867375ff,
++ 0x000003ff, 0xb9734803,
++ 0x867375ff, 0xfffff800,
++ 0x8f738b73, 0xb973a2c3,
++ 0xb977f801, 0x8673ff71,
++ 0xf0000000, 0x8f739c73,
++ 0x8e739073, 0xbef60080,
++ 0x87767376, 0x8673ff71,
++ 0x08000000, 0x8f739b73,
++ 0x8e738f73, 0x87767376,
++ 0x8673ff74, 0x00800000,
++ 0x8f739773, 0xb976f807,
++ 0x86fe7e7e, 0x86ea6a6a,
++ 0xb974f802, 0xbf8a0000,
++ 0x95807370, 0xbf810000,
++};
++
++
++static const uint32_t cwsr_trap_gfx9_hex[] = {
++ 0xbf820001, 0xbf82015a,
++ 0xb8f8f802, 0x89788678,
++ 0xb8f1f803, 0x866eff71,
++ 0x00000400, 0xbf850034,
++ 0x866eff71, 0x00000800,
++ 0xbf850003, 0x866eff71,
++ 0x00000100, 0xbf840008,
++ 0x866eff78, 0x00002000,
++ 0xbf840001, 0xbf810000,
++ 0x8778ff78, 0x00002000,
++ 0x80ec886c, 0x82ed806d,
++ 0xb8eef807, 0x866fff6e,
++ 0x001f8000, 0x8e6f8b6f,
++ 0x8977ff77, 0xfc000000,
++ 0x87776f77, 0x896eff6e,
++ 0x001f8000, 0xb96ef807,
++ 0xb8f0f812, 0xb8f1f813,
++ 0x8ef08870, 0xc0071bb8,
++ 0x00000000, 0xbf8cc07f,
++ 0xc0071c38, 0x00000008,
++ 0xbf8cc07f, 0x86ee6e6e,
++ 0xbf840001, 0xbe801d6e,
++ 0xb8f1f803, 0x8671ff71,
++ 0x000001ff, 0xbf850002,
++ 0x806c846c, 0x826d806d,
++ 0x866dff6d, 0x0000ffff,
++ 0x8f6e8b77, 0x866eff6e,
++ 0x001f8000, 0xb96ef807,
++ 0x86fe7e7e, 0x86ea6a6a,
++ 0xb978f802, 0xbe801f6c,
++ 0x866dff6d, 0x0000ffff,
++ 0xbef00080, 0xb9700283,
++ 0xb8f02407, 0x8e709c70,
++ 0x876d706d, 0xb8f003c7,
++ 0x8e709b70, 0x876d706d,
++ 0xb8f0f807, 0x8670ff70,
++ 0x00007fff, 0xb970f807,
++ 0xbeee007e, 0xbeef007f,
++ 0xbefe0180, 0xbf900004,
++ 0x87708478, 0xb970f802,
++ 0xbf8e0002, 0xbf88fffe,
++ 0xb8f02a05, 0x80708170,
++ 0x8e708a70, 0xb8f11605,
++ 0x80718171, 0x8e718671,
++ 0x80707170, 0x80707e70,
++ 0x8271807f, 0x8671ff71,
++ 0x0000ffff, 0xc0471cb8,
++ 0x00000040, 0xbf8cc07f,
++ 0xc04b1d38, 0x00000048,
++ 0xbf8cc07f, 0xc0431e78,
++ 0x00000058, 0xbf8cc07f,
++ 0xc0471eb8, 0x0000005c,
++ 0xbf8cc07f, 0xbef4007e,
++ 0x8675ff7f, 0x0000ffff,
++ 0x8775ff75, 0x00040000,
++ 0xbef60080, 0xbef700ff,
++ 0x00807fac, 0x8670ff7f,
++ 0x08000000, 0x8f708370,
++ 0x87777077, 0x8670ff7f,
++ 0x70000000, 0x8f708170,
++ 0x87777077, 0xbefb007c,
++ 0xbefa0080, 0xb8fa2a05,
++ 0x807a817a, 0x8e7a8a7a,
++ 0xb8f01605, 0x80708170,
++ 0x8e708670, 0x807a707a,
++ 0xbef60084, 0xbef600ff,
++ 0x01000000, 0xbefe007c,
++ 0xbefc007a, 0xc0611efa,
++ 0x0000007c, 0xbf8cc07f,
++ 0x807a847a, 0xbefc007e,
++ 0xbefe007c, 0xbefc007a,
++ 0xc0611b3a, 0x0000007c,
++ 0xbf8cc07f, 0x807a847a,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc007a, 0xc0611b7a,
++ 0x0000007c, 0xbf8cc07f,
++ 0x807a847a, 0xbefc007e,
++ 0xbefe007c, 0xbefc007a,
++ 0xc0611bba, 0x0000007c,
++ 0xbf8cc07f, 0x807a847a,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc007a, 0xc0611bfa,
++ 0x0000007c, 0xbf8cc07f,
++ 0x807a847a, 0xbefc007e,
++ 0xbefe007c, 0xbefc007a,
++ 0xc0611e3a, 0x0000007c,
++ 0xbf8cc07f, 0x807a847a,
++ 0xbefc007e, 0xb8f1f803,
++ 0xbefe007c, 0xbefc007a,
++ 0xc0611c7a, 0x0000007c,
++ 0xbf8cc07f, 0x807a847a,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc007a, 0xc0611a3a,
++ 0x0000007c, 0xbf8cc07f,
++ 0x807a847a, 0xbefc007e,
++ 0xbefe007c, 0xbefc007a,
++ 0xc0611a7a, 0x0000007c,
++ 0xbf8cc07f, 0x807a847a,
++ 0xbefc007e, 0xb8fbf801,
++ 0xbefe007c, 0xbefc007a,
++ 0xc0611efa, 0x0000007c,
++ 0xbf8cc07f, 0x807a847a,
++ 0xbefc007e, 0x8670ff7f,
++ 0x04000000, 0xbeef0080,
++ 0x876f6f70, 0xb8fa2a05,
++ 0x807a817a, 0x8e7a8a7a,
++ 0xb8f11605, 0x80718171,
++ 0x8e718471, 0x8e768271,
++ 0xbef600ff, 0x01000000,
++ 0xbef20174, 0x80747a74,
++ 0x82758075, 0xbefc0080,
++ 0xbf800000, 0xbe802b00,
++ 0xbe822b02, 0xbe842b04,
++ 0xbe862b06, 0xbe882b08,
++ 0xbe8a2b0a, 0xbe8c2b0c,
++ 0xbe8e2b0e, 0xc06b003a,
++ 0x00000000, 0xbf8cc07f,
++ 0xc06b013a, 0x00000010,
++ 0xbf8cc07f, 0xc06b023a,
++ 0x00000020, 0xbf8cc07f,
++ 0xc06b033a, 0x00000030,
++ 0xbf8cc07f, 0x8074c074,
++ 0x82758075, 0x807c907c,
++ 0xbf0a717c, 0xbf85ffe7,
++ 0xbef40172, 0xbefa0080,
++ 0xbefe00c1, 0xbeff00c1,
++ 0xbee80080, 0xbee90080,
++ 0xbef600ff, 0x01000000,
++ 0xe0724000, 0x7a1d0000,
++ 0xe0724100, 0x7a1d0100,
++ 0xe0724200, 0x7a1d0200,
++ 0xe0724300, 0x7a1d0300,
++ 0xbefe00c1, 0xbeff00c1,
++ 0xb8f14306, 0x8671c171,
++ 0xbf84002c, 0xbf8a0000,
++ 0x8670ff6f, 0x04000000,
++ 0xbf840028, 0x8e718671,
++ 0x8e718271, 0xbef60071,
++ 0xb8fa2a05, 0x807a817a,
++ 0x8e7a8a7a, 0xb8f01605,
++ 0x80708170, 0x8e708670,
++ 0x807a707a, 0x807aff7a,
++ 0x00000080, 0xbef600ff,
++ 0x01000000, 0xbefc0080,
++ 0xd28c0002, 0x000100c1,
++ 0xd28d0003, 0x000204c1,
++ 0xd1060002, 0x00011103,
++ 0x7e0602ff, 0x00000200,
++ 0xbefc00ff, 0x00010000,
++ 0xbe800077, 0x8677ff77,
++ 0xff7fffff, 0x8777ff77,
++ 0x00058000, 0xd8ec0000,
++ 0x00000002, 0xbf8cc07f,
++ 0xe0765000, 0x7a1d0002,
++ 0x68040702, 0xd0c9006a,
++ 0x0000e302, 0xbf87fff7,
++ 0xbef70000, 0xbefa00ff,
++ 0x00000400, 0xbefe00c1,
++ 0xbeff00c1, 0xb8f12a05,
++ 0x80718171, 0x8e718271,
++ 0x8e768871, 0xbef600ff,
++ 0x01000000, 0xbefc0084,
++ 0xbf0a717c, 0xbf840015,
++ 0xbf11017c, 0x8071ff71,
++ 0x00001000, 0x7e000300,
++ 0x7e020301, 0x7e040302,
++ 0x7e060303, 0xe0724000,
++ 0x7a1d0000, 0xe0724100,
++ 0x7a1d0100, 0xe0724200,
++ 0x7a1d0200, 0xe0724300,
++ 0x7a1d0300, 0x807c847c,
++ 0x807aff7a, 0x00000400,
++ 0xbf0a717c, 0xbf85ffef,
++ 0xbf9c0000, 0xbf8200d9,
++ 0xbef4007e, 0x8675ff7f,
++ 0x0000ffff, 0x8775ff75,
++ 0x00040000, 0xbef60080,
++ 0xbef700ff, 0x00807fac,
++ 0x866eff7f, 0x08000000,
++ 0x8f6e836e, 0x87776e77,
++ 0x866eff7f, 0x70000000,
++ 0x8f6e816e, 0x87776e77,
++ 0x866eff7f, 0x04000000,
++ 0xbf84001e, 0xbefe00c1,
++ 0xbeff00c1, 0xb8ef4306,
++ 0x866fc16f, 0xbf840019,
++ 0x8e6f866f, 0x8e6f826f,
++ 0xbef6006f, 0xb8f82a05,
++ 0x80788178, 0x8e788a78,
++ 0xb8ee1605, 0x806e816e,
++ 0x8e6e866e, 0x80786e78,
++ 0x8078ff78, 0x00000080,
++ 0xbef600ff, 0x01000000,
++ 0xbefc0080, 0xe0510000,
++ 0x781d0000, 0xe0510100,
++ 0x781d0000, 0x807cff7c,
++ 0x00000200, 0x8078ff78,
++ 0x00000200, 0xbf0a6f7c,
++ 0xbf85fff6, 0xbef80080,
++ 0xbefe00c1, 0xbeff00c1,
++ 0xb8ef2a05, 0x806f816f,
++ 0x8e6f826f, 0x8e76886f,
++ 0xbef600ff, 0x01000000,
++ 0xbeee0078, 0x8078ff78,
++ 0x00000400, 0xbefc0084,
++ 0xbf11087c, 0x806fff6f,
++ 0x00008000, 0xe0524000,
++ 0x781d0000, 0xe0524100,
++ 0x781d0100, 0xe0524200,
++ 0x781d0200, 0xe0524300,
++ 0x781d0300, 0xbf8c0f70,
++ 0x7e000300, 0x7e020301,
++ 0x7e040302, 0x7e060303,
++ 0x807c847c, 0x8078ff78,
++ 0x00000400, 0xbf0a6f7c,
++ 0xbf85ffee, 0xbf9c0000,
++ 0xe0524000, 0x6e1d0000,
++ 0xe0524100, 0x6e1d0100,
++ 0xe0524200, 0x6e1d0200,
++ 0xe0524300, 0x6e1d0300,
++ 0xb8f82a05, 0x80788178,
++ 0x8e788a78, 0xb8ee1605,
++ 0x806e816e, 0x8e6e866e,
++ 0x80786e78, 0x80f8c078,
++ 0xb8ef1605, 0x806f816f,
++ 0x8e6f846f, 0x8e76826f,
++ 0xbef600ff, 0x01000000,
++ 0xbefc006f, 0xc031003a,
++ 0x00000078, 0x80f8c078,
++ 0xbf8cc07f, 0x80fc907c,
++ 0xbf800000, 0xbe802d00,
++ 0xbe822d02, 0xbe842d04,
++ 0xbe862d06, 0xbe882d08,
++ 0xbe8a2d0a, 0xbe8c2d0c,
++ 0xbe8e2d0e, 0xbf06807c,
++ 0xbf84fff0, 0xb8f82a05,
++ 0x80788178, 0x8e788a78,
++ 0xb8ee1605, 0x806e816e,
++ 0x8e6e866e, 0x80786e78,
++ 0xbef60084, 0xbef600ff,
++ 0x01000000, 0xc0211bfa,
++ 0x00000078, 0x80788478,
++ 0xc0211b3a, 0x00000078,
++ 0x80788478, 0xc0211b7a,
++ 0x00000078, 0x80788478,
++ 0xc0211eba, 0x00000078,
++ 0x80788478, 0xc0211efa,
++ 0x00000078, 0x80788478,
++ 0xc0211c3a, 0x00000078,
++ 0x80788478, 0xc0211c7a,
++ 0x00000078, 0x80788478,
++ 0xc0211a3a, 0x00000078,
++ 0x80788478, 0xc0211a7a,
++ 0x00000078, 0x80788478,
++ 0xc0211cfa, 0x00000078,
++ 0x80788478, 0xbf8cc07f,
++ 0x866dff6d, 0x0000ffff,
++ 0xbefc006f, 0xbefe007a,
++ 0xbeff007b, 0x866f71ff,
++ 0x000003ff, 0xb96f4803,
++ 0x866f71ff, 0xfffff800,
++ 0x8f6f8b6f, 0xb96fa2c3,
++ 0xb973f801, 0xb8ee2a05,
++ 0x806e816e, 0x8e6e8a6e,
++ 0xb8ef1605, 0x806f816f,
++ 0x8e6f866f, 0x806e6f6e,
++ 0x806e746e, 0x826f8075,
++ 0x866fff6f, 0x0000ffff,
++ 0xc0071cb7, 0x00000040,
++ 0xc00b1d37, 0x00000048,
++ 0xc0031e77, 0x00000058,
++ 0xc0071eb7, 0x0000005c,
++ 0xbf8cc07f, 0x866fff6d,
++ 0xf0000000, 0x8f6f9c6f,
++ 0x8e6f906f, 0xbeee0080,
++ 0x876e6f6e, 0x866fff6d,
++ 0x08000000, 0x8f6f9b6f,
++ 0x8e6f8f6f, 0x876e6f6e,
++ 0x866fff70, 0x00800000,
++ 0x8f6f976f, 0xb96ef807,
++ 0x86fe7e7e, 0x86ea6a6a,
++ 0xb970f802, 0xbf8a0000,
++ 0x95806f6c, 0xbf810000,
++};
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+index dec5ea4..6641348 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+@@ -20,9 +20,12 @@
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-#if 0
+-HW (VI) source code for CWSR trap handler
+-#Version 18 + multiple trap handler
++/* To compile this assembly code:
++ * PROJECT=vi ./sp3 cwsr_trap_handler_gfx8.asm -hex tmp.hex
++ */
++
++/* HW (VI) source code for CWSR trap handler */
++/* Version 18 + multiple trap handler */
+
+ // this performance-optimal version was originally from Seven Xu at SRDC
+
+@@ -150,7 +153,7 @@ var s_save_spi_init_lo = exec_lo
+ var s_save_spi_init_hi = exec_hi
+
+ //tba_lo and tba_hi need to be saved/restored
+-var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3¡¯h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]}
++var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3'h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]}
+ var s_save_pc_hi = ttmp1
+ var s_save_exec_lo = ttmp2
+ var s_save_exec_hi = ttmp3
+@@ -1132,259 +1135,3 @@ end
+ function get_hwreg_size_bytes
+ return 128 //HWREG size 128 bytes
+ end
+-
+-
+-#endif
+-
+-static const uint32_t cwsr_trap_gfx8_hex[] = {
+- 0xbf820001, 0xbf820125,
+- 0xb8f4f802, 0x89748674,
+- 0xb8f5f803, 0x8675ff75,
+- 0x00000400, 0xbf850011,
+- 0xc00a1e37, 0x00000000,
+- 0xbf8c007f, 0x87777978,
+- 0xbf840002, 0xb974f802,
+- 0xbe801d78, 0xb8f5f803,
+- 0x8675ff75, 0x000001ff,
+- 0xbf850002, 0x80708470,
+- 0x82718071, 0x8671ff71,
+- 0x0000ffff, 0xb974f802,
+- 0xbe801f70, 0xb8f5f803,
+- 0x8675ff75, 0x00000100,
+- 0xbf840006, 0xbefa0080,
+- 0xb97a0203, 0x8671ff71,
+- 0x0000ffff, 0x80f08870,
+- 0x82f18071, 0xbefa0080,
+- 0xb97a0283, 0xbef60068,
+- 0xbef70069, 0xb8fa1c07,
+- 0x8e7a9c7a, 0x87717a71,
+- 0xb8fa03c7, 0x8e7a9b7a,
+- 0x87717a71, 0xb8faf807,
+- 0x867aff7a, 0x00007fff,
+- 0xb97af807, 0xbef2007e,
+- 0xbef3007f, 0xbefe0180,
+- 0xbf900004, 0x877a8474,
+- 0xb97af802, 0xbf8e0002,
+- 0xbf88fffe, 0xbef8007e,
+- 0x8679ff7f, 0x0000ffff,
+- 0x8779ff79, 0x00040000,
+- 0xbefa0080, 0xbefb00ff,
+- 0x00807fac, 0x867aff7f,
+- 0x08000000, 0x8f7a837a,
+- 0x877b7a7b, 0x867aff7f,
+- 0x70000000, 0x8f7a817a,
+- 0x877b7a7b, 0xbeef007c,
+- 0xbeee0080, 0xb8ee2a05,
+- 0x806e816e, 0x8e6e8a6e,
+- 0xb8fa1605, 0x807a817a,
+- 0x8e7a867a, 0x806e7a6e,
+- 0xbefa0084, 0xbefa00ff,
+- 0x01000000, 0xbefe007c,
+- 0xbefc006e, 0xc0611bfc,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc006e, 0xc0611c3c,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc006e, 0xc0611c7c,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc006e, 0xc0611cbc,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc006e, 0xc0611cfc,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc006e, 0xc0611d3c,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0xb8f5f803,
+- 0xbefe007c, 0xbefc006e,
+- 0xc0611d7c, 0x0000007c,
+- 0x806e846e, 0xbefc007e,
+- 0xbefe007c, 0xbefc006e,
+- 0xc0611dbc, 0x0000007c,
+- 0x806e846e, 0xbefc007e,
+- 0xbefe007c, 0xbefc006e,
+- 0xc0611dfc, 0x0000007c,
+- 0x806e846e, 0xbefc007e,
+- 0xb8eff801, 0xbefe007c,
+- 0xbefc006e, 0xc0611bfc,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc006e, 0xc0611b3c,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc006e, 0xc0611b7c,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0x867aff7f,
+- 0x04000000, 0xbef30080,
+- 0x8773737a, 0xb8ee2a05,
+- 0x806e816e, 0x8e6e8a6e,
+- 0xb8f51605, 0x80758175,
+- 0x8e758475, 0x8e7a8275,
+- 0xbefa00ff, 0x01000000,
+- 0xbef60178, 0x80786e78,
+- 0x82798079, 0xbefc0080,
+- 0xbe802b00, 0xbe822b02,
+- 0xbe842b04, 0xbe862b06,
+- 0xbe882b08, 0xbe8a2b0a,
+- 0xbe8c2b0c, 0xbe8e2b0e,
+- 0xc06b003c, 0x00000000,
+- 0xc06b013c, 0x00000010,
+- 0xc06b023c, 0x00000020,
+- 0xc06b033c, 0x00000030,
+- 0x8078c078, 0x82798079,
+- 0x807c907c, 0xbf0a757c,
+- 0xbf85ffeb, 0xbef80176,
+- 0xbeee0080, 0xbefe00c1,
+- 0xbeff00c1, 0xbefa00ff,
+- 0x01000000, 0xe0724000,
+- 0x6e1e0000, 0xe0724100,
+- 0x6e1e0100, 0xe0724200,
+- 0x6e1e0200, 0xe0724300,
+- 0x6e1e0300, 0xbefe00c1,
+- 0xbeff00c1, 0xb8f54306,
+- 0x8675c175, 0xbf84002c,
+- 0xbf8a0000, 0x867aff73,
+- 0x04000000, 0xbf840028,
+- 0x8e758675, 0x8e758275,
+- 0xbefa0075, 0xb8ee2a05,
+- 0x806e816e, 0x8e6e8a6e,
+- 0xb8fa1605, 0x807a817a,
+- 0x8e7a867a, 0x806e7a6e,
+- 0x806eff6e, 0x00000080,
+- 0xbefa00ff, 0x01000000,
+- 0xbefc0080, 0xd28c0002,
+- 0x000100c1, 0xd28d0003,
+- 0x000204c1, 0xd1060002,
+- 0x00011103, 0x7e0602ff,
+- 0x00000200, 0xbefc00ff,
+- 0x00010000, 0xbe80007b,
+- 0x867bff7b, 0xff7fffff,
+- 0x877bff7b, 0x00058000,
+- 0xd8ec0000, 0x00000002,
+- 0xbf8c007f, 0xe0765000,
+- 0x6e1e0002, 0x32040702,
+- 0xd0c9006a, 0x0000eb02,
+- 0xbf87fff7, 0xbefb0000,
+- 0xbeee00ff, 0x00000400,
+- 0xbefe00c1, 0xbeff00c1,
+- 0xb8f52a05, 0x80758175,
+- 0x8e758275, 0x8e7a8875,
+- 0xbefa00ff, 0x01000000,
+- 0xbefc0084, 0xbf0a757c,
+- 0xbf840015, 0xbf11017c,
+- 0x8075ff75, 0x00001000,
+- 0x7e000300, 0x7e020301,
+- 0x7e040302, 0x7e060303,
+- 0xe0724000, 0x6e1e0000,
+- 0xe0724100, 0x6e1e0100,
+- 0xe0724200, 0x6e1e0200,
+- 0xe0724300, 0x6e1e0300,
+- 0x807c847c, 0x806eff6e,
+- 0x00000400, 0xbf0a757c,
+- 0xbf85ffef, 0xbf9c0000,
+- 0xbf8200ca, 0xbef8007e,
+- 0x8679ff7f, 0x0000ffff,
+- 0x8779ff79, 0x00040000,
+- 0xbefa0080, 0xbefb00ff,
+- 0x00807fac, 0x8676ff7f,
+- 0x08000000, 0x8f768376,
+- 0x877b767b, 0x8676ff7f,
+- 0x70000000, 0x8f768176,
+- 0x877b767b, 0x8676ff7f,
+- 0x04000000, 0xbf84001e,
+- 0xbefe00c1, 0xbeff00c1,
+- 0xb8f34306, 0x8673c173,
+- 0xbf840019, 0x8e738673,
+- 0x8e738273, 0xbefa0073,
+- 0xb8f22a05, 0x80728172,
+- 0x8e728a72, 0xb8f61605,
+- 0x80768176, 0x8e768676,
+- 0x80727672, 0x8072ff72,
+- 0x00000080, 0xbefa00ff,
+- 0x01000000, 0xbefc0080,
+- 0xe0510000, 0x721e0000,
+- 0xe0510100, 0x721e0000,
+- 0x807cff7c, 0x00000200,
+- 0x8072ff72, 0x00000200,
+- 0xbf0a737c, 0xbf85fff6,
+- 0xbef20080, 0xbefe00c1,
+- 0xbeff00c1, 0xb8f32a05,
+- 0x80738173, 0x8e738273,
+- 0x8e7a8873, 0xbefa00ff,
+- 0x01000000, 0xbef60072,
+- 0x8072ff72, 0x00000400,
+- 0xbefc0084, 0xbf11087c,
+- 0x8073ff73, 0x00008000,
+- 0xe0524000, 0x721e0000,
+- 0xe0524100, 0x721e0100,
+- 0xe0524200, 0x721e0200,
+- 0xe0524300, 0x721e0300,
+- 0xbf8c0f70, 0x7e000300,
+- 0x7e020301, 0x7e040302,
+- 0x7e060303, 0x807c847c,
+- 0x8072ff72, 0x00000400,
+- 0xbf0a737c, 0xbf85ffee,
+- 0xbf9c0000, 0xe0524000,
+- 0x761e0000, 0xe0524100,
+- 0x761e0100, 0xe0524200,
+- 0x761e0200, 0xe0524300,
+- 0x761e0300, 0xb8f22a05,
+- 0x80728172, 0x8e728a72,
+- 0xb8f61605, 0x80768176,
+- 0x8e768676, 0x80727672,
+- 0x80f2c072, 0xb8f31605,
+- 0x80738173, 0x8e738473,
+- 0x8e7a8273, 0xbefa00ff,
+- 0x01000000, 0xbefc0073,
+- 0xc031003c, 0x00000072,
+- 0x80f2c072, 0xbf8c007f,
+- 0x80fc907c, 0xbe802d00,
+- 0xbe822d02, 0xbe842d04,
+- 0xbe862d06, 0xbe882d08,
+- 0xbe8a2d0a, 0xbe8c2d0c,
+- 0xbe8e2d0e, 0xbf06807c,
+- 0xbf84fff1, 0xb8f22a05,
+- 0x80728172, 0x8e728a72,
+- 0xb8f61605, 0x80768176,
+- 0x8e768676, 0x80727672,
+- 0xbefa0084, 0xbefa00ff,
+- 0x01000000, 0xc0211cfc,
+- 0x00000072, 0x80728472,
+- 0xc0211c3c, 0x00000072,
+- 0x80728472, 0xc0211c7c,
+- 0x00000072, 0x80728472,
+- 0xc0211bbc, 0x00000072,
+- 0x80728472, 0xc0211bfc,
+- 0x00000072, 0x80728472,
+- 0xc0211d3c, 0x00000072,
+- 0x80728472, 0xc0211d7c,
+- 0x00000072, 0x80728472,
+- 0xc0211a3c, 0x00000072,
+- 0x80728472, 0xc0211a7c,
+- 0x00000072, 0x80728472,
+- 0xc0211dfc, 0x00000072,
+- 0x80728472, 0xc0211b3c,
+- 0x00000072, 0x80728472,
+- 0xc0211b7c, 0x00000072,
+- 0x80728472, 0xbf8c007f,
+- 0x8671ff71, 0x0000ffff,
+- 0xbefc0073, 0xbefe006e,
+- 0xbeff006f, 0x867375ff,
+- 0x000003ff, 0xb9734803,
+- 0x867375ff, 0xfffff800,
+- 0x8f738b73, 0xb973a2c3,
+- 0xb977f801, 0x8673ff71,
+- 0xf0000000, 0x8f739c73,
+- 0x8e739073, 0xbef60080,
+- 0x87767376, 0x8673ff71,
+- 0x08000000, 0x8f739b73,
+- 0x8e738f73, 0x87767376,
+- 0x8673ff74, 0x00800000,
+- 0x8f739773, 0xb976f807,
+- 0x86fe7e7e, 0x86ea6a6a,
+- 0xb974f802, 0xbf8a0000,
+- 0x95807370, 0xbf810000,
+-};
+-
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+index adb3308..e4e7c1d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+@@ -20,9 +20,12 @@
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-#if 0
+-HW (GFX9) source code for CWSR trap handler
+-#Version 18 + multiple trap handler
++/* To compile this assembly code:
++ * PROJECT=greenland ./sp3 cwsr_trap_handler_gfx9.asm -hex tmp.hex
++ */
++
++/* HW (GFX9) source code for CWSR trap handler */
++/* Version 18 + multiple trap handler */
+
+ // this performance-optimal version was originally from Seven Xu at SRDC
+
+@@ -151,7 +154,7 @@ var S_SAVE_PC_HI_FIRST_REPLAY_MASK = 0x08000000 //FIXME
+ var s_save_spi_init_lo = exec_lo
+ var s_save_spi_init_hi = exec_hi
+
+-var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3¡¯h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]}
++var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3'h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]}
+ var s_save_pc_hi = ttmp1
+ var s_save_exec_lo = ttmp2
+ var s_save_exec_hi = ttmp3
+@@ -1210,292 +1213,3 @@ function ack_sqc_store_workaround
+ s_waitcnt lgkmcnt(0)
+ end
+ end
+-
+-
+-#endif
+-
+-static const uint32_t cwsr_trap_gfx9_hex[] = {
+- 0xbf820001, 0xbf82015a,
+- 0xb8f8f802, 0x89788678,
+- 0xb8f1f803, 0x866eff71,
+- 0x00000400, 0xbf850034,
+- 0x866eff71, 0x00000800,
+- 0xbf850003, 0x866eff71,
+- 0x00000100, 0xbf840008,
+- 0x866eff78, 0x00002000,
+- 0xbf840001, 0xbf810000,
+- 0x8778ff78, 0x00002000,
+- 0x80ec886c, 0x82ed806d,
+- 0xb8eef807, 0x866fff6e,
+- 0x001f8000, 0x8e6f8b6f,
+- 0x8977ff77, 0xfc000000,
+- 0x87776f77, 0x896eff6e,
+- 0x001f8000, 0xb96ef807,
+- 0xb8f0f812, 0xb8f1f813,
+- 0x8ef08870, 0xc0071bb8,
+- 0x00000000, 0xbf8cc07f,
+- 0xc0071c38, 0x00000008,
+- 0xbf8cc07f, 0x86ee6e6e,
+- 0xbf840001, 0xbe801d6e,
+- 0xb8f1f803, 0x8671ff71,
+- 0x000001ff, 0xbf850002,
+- 0x806c846c, 0x826d806d,
+- 0x866dff6d, 0x0000ffff,
+- 0x8f6e8b77, 0x866eff6e,
+- 0x001f8000, 0xb96ef807,
+- 0x86fe7e7e, 0x86ea6a6a,
+- 0xb978f802, 0xbe801f6c,
+- 0x866dff6d, 0x0000ffff,
+- 0xbef00080, 0xb9700283,
+- 0xb8f02407, 0x8e709c70,
+- 0x876d706d, 0xb8f003c7,
+- 0x8e709b70, 0x876d706d,
+- 0xb8f0f807, 0x8670ff70,
+- 0x00007fff, 0xb970f807,
+- 0xbeee007e, 0xbeef007f,
+- 0xbefe0180, 0xbf900004,
+- 0x87708478, 0xb970f802,
+- 0xbf8e0002, 0xbf88fffe,
+- 0xb8f02a05, 0x80708170,
+- 0x8e708a70, 0xb8f11605,
+- 0x80718171, 0x8e718671,
+- 0x80707170, 0x80707e70,
+- 0x8271807f, 0x8671ff71,
+- 0x0000ffff, 0xc0471cb8,
+- 0x00000040, 0xbf8cc07f,
+- 0xc04b1d38, 0x00000048,
+- 0xbf8cc07f, 0xc0431e78,
+- 0x00000058, 0xbf8cc07f,
+- 0xc0471eb8, 0x0000005c,
+- 0xbf8cc07f, 0xbef4007e,
+- 0x8675ff7f, 0x0000ffff,
+- 0x8775ff75, 0x00040000,
+- 0xbef60080, 0xbef700ff,
+- 0x00807fac, 0x8670ff7f,
+- 0x08000000, 0x8f708370,
+- 0x87777077, 0x8670ff7f,
+- 0x70000000, 0x8f708170,
+- 0x87777077, 0xbefb007c,
+- 0xbefa0080, 0xb8fa2a05,
+- 0x807a817a, 0x8e7a8a7a,
+- 0xb8f01605, 0x80708170,
+- 0x8e708670, 0x807a707a,
+- 0xbef60084, 0xbef600ff,
+- 0x01000000, 0xbefe007c,
+- 0xbefc007a, 0xc0611efa,
+- 0x0000007c, 0xbf8cc07f,
+- 0x807a847a, 0xbefc007e,
+- 0xbefe007c, 0xbefc007a,
+- 0xc0611b3a, 0x0000007c,
+- 0xbf8cc07f, 0x807a847a,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc007a, 0xc0611b7a,
+- 0x0000007c, 0xbf8cc07f,
+- 0x807a847a, 0xbefc007e,
+- 0xbefe007c, 0xbefc007a,
+- 0xc0611bba, 0x0000007c,
+- 0xbf8cc07f, 0x807a847a,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc007a, 0xc0611bfa,
+- 0x0000007c, 0xbf8cc07f,
+- 0x807a847a, 0xbefc007e,
+- 0xbefe007c, 0xbefc007a,
+- 0xc0611e3a, 0x0000007c,
+- 0xbf8cc07f, 0x807a847a,
+- 0xbefc007e, 0xb8f1f803,
+- 0xbefe007c, 0xbefc007a,
+- 0xc0611c7a, 0x0000007c,
+- 0xbf8cc07f, 0x807a847a,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc007a, 0xc0611a3a,
+- 0x0000007c, 0xbf8cc07f,
+- 0x807a847a, 0xbefc007e,
+- 0xbefe007c, 0xbefc007a,
+- 0xc0611a7a, 0x0000007c,
+- 0xbf8cc07f, 0x807a847a,
+- 0xbefc007e, 0xb8fbf801,
+- 0xbefe007c, 0xbefc007a,
+- 0xc0611efa, 0x0000007c,
+- 0xbf8cc07f, 0x807a847a,
+- 0xbefc007e, 0x8670ff7f,
+- 0x04000000, 0xbeef0080,
+- 0x876f6f70, 0xb8fa2a05,
+- 0x807a817a, 0x8e7a8a7a,
+- 0xb8f11605, 0x80718171,
+- 0x8e718471, 0x8e768271,
+- 0xbef600ff, 0x01000000,
+- 0xbef20174, 0x80747a74,
+- 0x82758075, 0xbefc0080,
+- 0xbf800000, 0xbe802b00,
+- 0xbe822b02, 0xbe842b04,
+- 0xbe862b06, 0xbe882b08,
+- 0xbe8a2b0a, 0xbe8c2b0c,
+- 0xbe8e2b0e, 0xc06b003a,
+- 0x00000000, 0xbf8cc07f,
+- 0xc06b013a, 0x00000010,
+- 0xbf8cc07f, 0xc06b023a,
+- 0x00000020, 0xbf8cc07f,
+- 0xc06b033a, 0x00000030,
+- 0xbf8cc07f, 0x8074c074,
+- 0x82758075, 0x807c907c,
+- 0xbf0a717c, 0xbf85ffe7,
+- 0xbef40172, 0xbefa0080,
+- 0xbefe00c1, 0xbeff00c1,
+- 0xbee80080, 0xbee90080,
+- 0xbef600ff, 0x01000000,
+- 0xe0724000, 0x7a1d0000,
+- 0xe0724100, 0x7a1d0100,
+- 0xe0724200, 0x7a1d0200,
+- 0xe0724300, 0x7a1d0300,
+- 0xbefe00c1, 0xbeff00c1,
+- 0xb8f14306, 0x8671c171,
+- 0xbf84002c, 0xbf8a0000,
+- 0x8670ff6f, 0x04000000,
+- 0xbf840028, 0x8e718671,
+- 0x8e718271, 0xbef60071,
+- 0xb8fa2a05, 0x807a817a,
+- 0x8e7a8a7a, 0xb8f01605,
+- 0x80708170, 0x8e708670,
+- 0x807a707a, 0x807aff7a,
+- 0x00000080, 0xbef600ff,
+- 0x01000000, 0xbefc0080,
+- 0xd28c0002, 0x000100c1,
+- 0xd28d0003, 0x000204c1,
+- 0xd1060002, 0x00011103,
+- 0x7e0602ff, 0x00000200,
+- 0xbefc00ff, 0x00010000,
+- 0xbe800077, 0x8677ff77,
+- 0xff7fffff, 0x8777ff77,
+- 0x00058000, 0xd8ec0000,
+- 0x00000002, 0xbf8cc07f,
+- 0xe0765000, 0x7a1d0002,
+- 0x68040702, 0xd0c9006a,
+- 0x0000e302, 0xbf87fff7,
+- 0xbef70000, 0xbefa00ff,
+- 0x00000400, 0xbefe00c1,
+- 0xbeff00c1, 0xb8f12a05,
+- 0x80718171, 0x8e718271,
+- 0x8e768871, 0xbef600ff,
+- 0x01000000, 0xbefc0084,
+- 0xbf0a717c, 0xbf840015,
+- 0xbf11017c, 0x8071ff71,
+- 0x00001000, 0x7e000300,
+- 0x7e020301, 0x7e040302,
+- 0x7e060303, 0xe0724000,
+- 0x7a1d0000, 0xe0724100,
+- 0x7a1d0100, 0xe0724200,
+- 0x7a1d0200, 0xe0724300,
+- 0x7a1d0300, 0x807c847c,
+- 0x807aff7a, 0x00000400,
+- 0xbf0a717c, 0xbf85ffef,
+- 0xbf9c0000, 0xbf8200d9,
+- 0xbef4007e, 0x8675ff7f,
+- 0x0000ffff, 0x8775ff75,
+- 0x00040000, 0xbef60080,
+- 0xbef700ff, 0x00807fac,
+- 0x866eff7f, 0x08000000,
+- 0x8f6e836e, 0x87776e77,
+- 0x866eff7f, 0x70000000,
+- 0x8f6e816e, 0x87776e77,
+- 0x866eff7f, 0x04000000,
+- 0xbf84001e, 0xbefe00c1,
+- 0xbeff00c1, 0xb8ef4306,
+- 0x866fc16f, 0xbf840019,
+- 0x8e6f866f, 0x8e6f826f,
+- 0xbef6006f, 0xb8f82a05,
+- 0x80788178, 0x8e788a78,
+- 0xb8ee1605, 0x806e816e,
+- 0x8e6e866e, 0x80786e78,
+- 0x8078ff78, 0x00000080,
+- 0xbef600ff, 0x01000000,
+- 0xbefc0080, 0xe0510000,
+- 0x781d0000, 0xe0510100,
+- 0x781d0000, 0x807cff7c,
+- 0x00000200, 0x8078ff78,
+- 0x00000200, 0xbf0a6f7c,
+- 0xbf85fff6, 0xbef80080,
+- 0xbefe00c1, 0xbeff00c1,
+- 0xb8ef2a05, 0x806f816f,
+- 0x8e6f826f, 0x8e76886f,
+- 0xbef600ff, 0x01000000,
+- 0xbeee0078, 0x8078ff78,
+- 0x00000400, 0xbefc0084,
+- 0xbf11087c, 0x806fff6f,
+- 0x00008000, 0xe0524000,
+- 0x781d0000, 0xe0524100,
+- 0x781d0100, 0xe0524200,
+- 0x781d0200, 0xe0524300,
+- 0x781d0300, 0xbf8c0f70,
+- 0x7e000300, 0x7e020301,
+- 0x7e040302, 0x7e060303,
+- 0x807c847c, 0x8078ff78,
+- 0x00000400, 0xbf0a6f7c,
+- 0xbf85ffee, 0xbf9c0000,
+- 0xe0524000, 0x6e1d0000,
+- 0xe0524100, 0x6e1d0100,
+- 0xe0524200, 0x6e1d0200,
+- 0xe0524300, 0x6e1d0300,
+- 0xb8f82a05, 0x80788178,
+- 0x8e788a78, 0xb8ee1605,
+- 0x806e816e, 0x8e6e866e,
+- 0x80786e78, 0x80f8c078,
+- 0xb8ef1605, 0x806f816f,
+- 0x8e6f846f, 0x8e76826f,
+- 0xbef600ff, 0x01000000,
+- 0xbefc006f, 0xc031003a,
+- 0x00000078, 0x80f8c078,
+- 0xbf8cc07f, 0x80fc907c,
+- 0xbf800000, 0xbe802d00,
+- 0xbe822d02, 0xbe842d04,
+- 0xbe862d06, 0xbe882d08,
+- 0xbe8a2d0a, 0xbe8c2d0c,
+- 0xbe8e2d0e, 0xbf06807c,
+- 0xbf84fff0, 0xb8f82a05,
+- 0x80788178, 0x8e788a78,
+- 0xb8ee1605, 0x806e816e,
+- 0x8e6e866e, 0x80786e78,
+- 0xbef60084, 0xbef600ff,
+- 0x01000000, 0xc0211bfa,
+- 0x00000078, 0x80788478,
+- 0xc0211b3a, 0x00000078,
+- 0x80788478, 0xc0211b7a,
+- 0x00000078, 0x80788478,
+- 0xc0211eba, 0x00000078,
+- 0x80788478, 0xc0211efa,
+- 0x00000078, 0x80788478,
+- 0xc0211c3a, 0x00000078,
+- 0x80788478, 0xc0211c7a,
+- 0x00000078, 0x80788478,
+- 0xc0211a3a, 0x00000078,
+- 0x80788478, 0xc0211a7a,
+- 0x00000078, 0x80788478,
+- 0xc0211cfa, 0x00000078,
+- 0x80788478, 0xbf8cc07f,
+- 0x866dff6d, 0x0000ffff,
+- 0xbefc006f, 0xbefe007a,
+- 0xbeff007b, 0x866f71ff,
+- 0x000003ff, 0xb96f4803,
+- 0x866f71ff, 0xfffff800,
+- 0x8f6f8b6f, 0xb96fa2c3,
+- 0xb973f801, 0xb8ee2a05,
+- 0x806e816e, 0x8e6e8a6e,
+- 0xb8ef1605, 0x806f816f,
+- 0x8e6f866f, 0x806e6f6e,
+- 0x806e746e, 0x826f8075,
+- 0x866fff6f, 0x0000ffff,
+- 0xc0071cb7, 0x00000040,
+- 0xc00b1d37, 0x00000048,
+- 0xc0031e77, 0x00000058,
+- 0xc0071eb7, 0x0000005c,
+- 0xbf8cc07f, 0x866fff6d,
+- 0xf0000000, 0x8f6f9c6f,
+- 0x8e6f906f, 0xbeee0080,
+- 0x876e6f6e, 0x866fff6d,
+- 0x08000000, 0x8f6f9b6f,
+- 0x8e6f8f6f, 0x876e6f6e,
+- 0x866fff70, 0x00800000,
+- 0x8f6f976f, 0xb96ef807,
+- 0x86fe7e7e, 0x86ea6a6a,
+- 0xb970f802, 0xbf8a0000,
+- 0x95806f6c, 0xbf810000,
+-};
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 4ee56ab..dc5017ff 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -27,8 +27,7 @@
+ #include "kfd_priv.h"
+ #include "kfd_device_queue_manager.h"
+ #include "kfd_pm4_headers_vi.h"
+-#include "cwsr_trap_handler_gfx8.asm"
+-#include "cwsr_trap_handler_gfx9.asm"
++#include "cwsr_trap_handler.h"
+ #include "kfd_iommu.h"
+
+ #define MQD_SIZE_ALIGNED 768
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4272-drm-amdkfd-Mellanox-Support-PeerSync-interface.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4272-drm-amdkfd-Mellanox-Support-PeerSync-interface.patch
new file mode 100644
index 00000000..a0aae18f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4272-drm-amdkfd-Mellanox-Support-PeerSync-interface.patch
@@ -0,0 +1,57 @@
+From 02368b53786d4e742117d188595626e91b750f7a Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Tue, 24 Apr 2018 17:57:53 -0400
+Subject: [PATCH 4272/5725] drm/amdkfd: Mellanox: Support PeerSync interface
+
+The mellanox driver doesn't support memory invalidation for their
+new PeerSync interface. If a non NULL pointer is passed into
+ib_register_peer_memory_client() the Mellanox driver assumes peer device
+(AMD) requires invalidation. This would end in ignoring AMD device.
+
+The current kfd implementation of rdma doesn't use the invalidate and
+keeps the memory pinned for the entire duration. So passing NULL doesn't
+change the current behaviour. However, for a robust and secure solution
+this needs to be revisted.
+
+BUG: SWDEV-149064
+
+Change-Id: I6737331d65b1d2e63c2ebb970c40fe61d32f8d22
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_peerdirect.c | 6 +-----
+ 1 file changed, 1 insertion(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_peerdirect.c b/drivers/gpu/drm/amd/amdkfd/kfd_peerdirect.c
+index fae8e8c..1b1a0ca 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_peerdirect.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_peerdirect.c
+@@ -137,7 +137,6 @@ static void (*pfn_ib_unregister_peer_memory_client)(void *reg_handle);
+
+ static const struct amd_rdma_interface *rdma_interface;
+
+-static invalidate_peer_memory ib_invalidate_callback;
+ static void *ib_reg_handle;
+
+ struct amd_mem_context {
+@@ -169,9 +168,6 @@ static void free_callback(void *client_priv)
+
+ pr_debug("mem_context->core_context 0x%p\n", mem_context->core_context);
+
+- /* Call back IB stack asking to invalidate memory */
+- (*ib_invalidate_callback) (ib_reg_handle, mem_context->core_context);
+-
+ /* amdkfd will free resources when we return from this callback.
+ * Set flag to inform that there is nothing to do on "put_pages", etc.
+ */
+@@ -478,7 +474,7 @@ void kfd_init_peer_direct(void)
+ strcpy(amd_mem_client.version, AMD_PEER_BRIDGE_DRIVER_VERSION);
+
+ ib_reg_handle = pfn_ib_register_peer_memory_client(&amd_mem_client,
+- &ib_invalidate_callback);
++ NULL);
+
+ if (!ib_reg_handle) {
+ pr_err("Cannot register peer memory client\n");
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4273-drm-amdkfd-Fix-CP-soft-hang-on-APUs.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4273-drm-amdkfd-Fix-CP-soft-hang-on-APUs.patch
new file mode 100644
index 00000000..6092eb2e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4273-drm-amdkfd-Fix-CP-soft-hang-on-APUs.patch
@@ -0,0 +1,103 @@
+From 4e83b01c57a19f196207e35e2c514fb025ab4a90 Mon Sep 17 00:00:00 2001
+From: Yong Zhao <yong.zhao@amd.com>
+Date: Wed, 25 Apr 2018 11:56:55 -0400
+Subject: [PATCH 4273/5725] drm/amdkfd: Fix CP soft hang on APUs
+
+The problem happens on Raven and Carrizo. The context save handler
+should not clear the high bits of PC_HI before extracting the bits
+of IB_STS.
+
+The bug is not relevant to VEGA10 until we enable demand paging.
+
+Fix: KFD-381
+
+Change-Id: I85615c9dad965972cc039074bfcd4c18e370ad34
+Signed-off-by: Jay Cornwall <Jay.Cornwall@amd.com>
+Signed-off-by: Yong Zhao <yong.zhao@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h | 4 ++--
+ drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm | 3 +--
+ drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 3 +--
+ 3 files changed, 4 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+index a546a21..f68aef0 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+@@ -253,7 +253,6 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
+ 0x00000072, 0x80728472,
+ 0xc0211b7c, 0x00000072,
+ 0x80728472, 0xbf8c007f,
+- 0x8671ff71, 0x0000ffff,
+ 0xbefc0073, 0xbefe006e,
+ 0xbeff006f, 0x867375ff,
+ 0x000003ff, 0xb9734803,
+@@ -267,6 +266,7 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
+ 0x8e738f73, 0x87767376,
+ 0x8673ff74, 0x00800000,
+ 0x8f739773, 0xb976f807,
++ 0x8671ff71, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0xb974f802, 0xbf8a0000,
+ 0x95807370, 0xbf810000,
+@@ -530,7 +530,6 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
+ 0x00000078, 0x80788478,
+ 0xc0211cfa, 0x00000078,
+ 0x80788478, 0xbf8cc07f,
+- 0x866dff6d, 0x0000ffff,
+ 0xbefc006f, 0xbefe007a,
+ 0xbeff007b, 0x866f71ff,
+ 0x000003ff, 0xb96f4803,
+@@ -554,6 +553,7 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
+ 0x8e6f8f6f, 0x876e6f6e,
+ 0x866fff70, 0x00800000,
+ 0x8f6f976f, 0xb96ef807,
++ 0x866dff6d, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0xb970f802, 0xbf8a0000,
+ 0x95806f6c, 0xbf810000,
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+index 6641348..6302402 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+@@ -1015,8 +1015,6 @@ end
+
+ s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS
+
+- s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
+-
+ //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise:
+ if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
+ s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8 //pc[31:0]+8 //two back-to-back s_trap are used (first for save and second for restore)
+@@ -1052,6 +1050,7 @@ end
+ s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT
+ s_setreg_b32 hwreg(HW_REG_IB_STS), s_restore_tmp
+
++ s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+ s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+index e4e7c1d..fc62fb8 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+@@ -1067,8 +1067,6 @@ end
+
+ s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS
+
+- s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
+-
+ //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise:
+ if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
+ s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8 //pc[31:0]+8 //two back-to-back s_trap are used (first for save and second for restore)
+@@ -1119,6 +1117,7 @@ end
+ s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT
+ s_setreg_b32 hwreg(HW_REG_IB_STS), s_restore_tmp
+
++ s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+ s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4274-drm-amdgpu-Don-t-use-kiq-to-send-invalid_tlbs-packag.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4274-drm-amdgpu-Don-t-use-kiq-to-send-invalid_tlbs-packag.patch
new file mode 100644
index 00000000..f0d6e064
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4274-drm-amdgpu-Don-t-use-kiq-to-send-invalid_tlbs-packag.patch
@@ -0,0 +1,29 @@
+From b25f2d7078a9aa344bec6253a625383e1fc2a5f2 Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Mon, 23 Apr 2018 13:52:13 -0400
+Subject: [PATCH 4274/5725] drm/amdgpu: Don't use kiq to send invalid_tlbs
+ package on GPU reset
+
+Change-Id: I94d8e3800200aadc1fcb58ecf19a1bd29fc53251
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+index 49291d6..a81e301 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+@@ -989,7 +989,7 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
+ int vmid;
+ struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
+
+- if (ring->ready)
++ if (ring->ready && (!adev->in_gpu_reset))
+ return invalidate_tlbs_with_kiq(adev, pasid);
+
+ for (vmid = 0; vmid < 16; vmid++) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4275-drm-amdgpu-Don-t-use-shadow-BO-for-compute-context.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4275-drm-amdgpu-Don-t-use-shadow-BO-for-compute-context.patch
new file mode 100644
index 00000000..d2f4d5f3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4275-drm-amdgpu-Don-t-use-shadow-BO-for-compute-context.patch
@@ -0,0 +1,75 @@
+From b5bade0746d9a5cfb2ccf99d391f77132fbdc80a Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Mon, 23 Apr 2018 13:53:33 -0400
+Subject: [PATCH 4275/5725] drm/amdgpu: Don't use shadow BO for compute context
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Shadow BO is located on GTT and its parent (PT and PD) BO could located on VRAM.
+In some case, the BO on GTT could be evicted but the parent did not. This may
+cause the shadow BO not be put in the evict list and could not be invalidated
+correctly.
+
+In current state, KFD won't support revovery the process after gpu reset.
+Avoid use the shadow BO for now. Further investigation is needed to fix the
+shadow BO invalidation issue.
+
+Change-Id: I9ab0fb70b5517738f0d0313ed19af8154ee39098
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index af645fb..edf2559 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -399,11 +399,12 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
+ eaddr = eaddr & ((1 << shift) - 1);
+
+ flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
++ if (vm->root.base.bo->shadow)
++ flags |= AMDGPU_GEM_CREATE_SHADOW;
+ if (vm->use_cpu_for_update)
+ flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ else
+- flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+- AMDGPU_GEM_CREATE_SHADOW);
++ flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+
+ /* walk over the address space and allocate the page tables */
+ for (pt_idx = from; pt_idx <= to; ++pt_idx) {
+@@ -2468,7 +2469,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ if (vm->use_cpu_for_update)
+ flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+- else
++ else if (vm_context != AMDGPU_VM_CONTEXT_COMPUTE)
+ flags |= AMDGPU_GEM_CREATE_SHADOW;
+
+ size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
+@@ -2549,8 +2550,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ *
+ * Reinitializes the page directory to reflect the changed ATS
+ * setting. May also switch to the compute power profile if this is
+- * the first compute VM. May leave behind an unused shadow BO for the
+- * page directory when switching from SDMA updates to CPU updates.
++ * the first compute VM.
+ *
+ * Returns 0 for success, -errno for errors.
+ */
+@@ -2609,6 +2609,8 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+
+ /* Count the new compute VM */
+ amdgpu_inc_compute_vms(adev);
++ /* Free the shadow bo for compute VM */
++ amdgpu_bo_unref(&vm->root.base.bo->shadow);
+
+ error:
+ amdgpu_bo_unreserve(vm->root.base.bo);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4276-drm-amdkfd-Fix-typos-in-trap-handler-comments.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4276-drm-amdkfd-Fix-typos-in-trap-handler-comments.patch
new file mode 100644
index 00000000..ac468ff9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4276-drm-amdkfd-Fix-typos-in-trap-handler-comments.patch
@@ -0,0 +1,85 @@
+From d4b9431a6b77d4a141306c4a20371d20766150a1 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Tue, 17 Apr 2018 18:30:24 -0400
+Subject: [PATCH 4276/5725] drm/amdkfd: Fix typos in trap handler comments
+
+Fixed for upstreaming to avoid checkpatch warnings.
+
+Change-Id: Id0e78f1be11c3ee03b2d99bcb9c77cf1221c77fa
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm | 8 ++++----
+ drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 8 ++++----
+ 2 files changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+index 6302402..a2a04bb 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+@@ -77,7 +77,7 @@ var G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 = G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_D
+ /*************************************************************************/
+ /* control on how to run the shader */
+ /*************************************************************************/
+-//any hack that needs to be made to run this code in EMU (either becasue various EMU code are not ready or no compute save & restore in EMU run)
++//any hack that needs to be made to run this code in EMU (either because various EMU code are not ready or no compute save & restore in EMU run)
+ var EMU_RUN_HACK = 0
+ var EMU_RUN_HACK_RESTORE_NORMAL = 0
+ var EMU_RUN_HACK_SAVE_NORMAL_EXIT = 0
+@@ -91,9 +91,9 @@ var WG_BASE_ADDR_HI = 0x0
+ var WAVE_SPACE = 0x5000 //memory size that each wave occupies in workgroup state mem
+ var CTX_SAVE_CONTROL = 0x0
+ var CTX_RESTORE_CONTROL = CTX_SAVE_CONTROL
+-var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either becasue various RTL code are not ready or no compute save & restore in RTL run)
++var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either because various RTL code are not ready or no compute save & restore in RTL run)
+ var SGPR_SAVE_USE_SQC = 1 //use SQC D$ to do the write
+-var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //becasue TC EMU curently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes
++var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //because TC EMU currently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes
+ var SWIZZLE_EN = 0 //whether we use swizzled buffer addressing
+
+ /**************************************************************************/
+@@ -1055,7 +1055,7 @@ end
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+ s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
+
+- s_barrier //barrier to ensure the readiness of LDS before access attemps from any other wave in the same TG //FIXME not performance-optimal at this time
++ s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
+
+ if G8SR_DEBUG_TIMESTAMP
+ s_memrealtime s_g8sr_ts_restore_d
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+index fc62fb8..998be96 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+@@ -77,7 +77,7 @@ var G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 = G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_D
+ /*************************************************************************/
+ /* control on how to run the shader */
+ /*************************************************************************/
+-//any hack that needs to be made to run this code in EMU (either becasue various EMU code are not ready or no compute save & restore in EMU run)
++//any hack that needs to be made to run this code in EMU (either because various EMU code are not ready or no compute save & restore in EMU run)
+ var EMU_RUN_HACK = 0
+ var EMU_RUN_HACK_RESTORE_NORMAL = 0
+ var EMU_RUN_HACK_SAVE_NORMAL_EXIT = 0
+@@ -89,9 +89,9 @@ var WG_BASE_ADDR_HI = 0x0
+ var WAVE_SPACE = 0x5000 //memory size that each wave occupies in workgroup state mem
+ var CTX_SAVE_CONTROL = 0x0
+ var CTX_RESTORE_CONTROL = CTX_SAVE_CONTROL
+-var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either becasue various RTL code are not ready or no compute save & restore in RTL run)
++var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either because various RTL code are not ready or no compute save & restore in RTL run)
+ var SGPR_SAVE_USE_SQC = 1 //use SQC D$ to do the write
+-var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //becasue TC EMU curently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes
++var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //because TC EMU currently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes
+ var SWIZZLE_EN = 0 //whether we use swizzled buffer addressing
+ var ACK_SQC_STORE = 1 //workaround for suspected SQC store bug causing incorrect stores under concurrency
+
+@@ -1122,7 +1122,7 @@ end
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+ s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
+
+- s_barrier //barrier to ensure the readiness of LDS before access attemps from any other wave in the same TG //FIXME not performance-optimal at this time
++ s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
+
+ if G8SR_DEBUG_TIMESTAMP
+ s_memrealtime s_g8sr_ts_restore_d
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4277-drm-amdkfd-Align-Makefile-with-upstream.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4277-drm-amdkfd-Align-Makefile-with-upstream.patch
new file mode 100644
index 00000000..343e2236
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4277-drm-amdkfd-Align-Makefile-with-upstream.patch
@@ -0,0 +1,79 @@
+From 5d1782e6331406b75e1d4d3eca8fd1aacc9fb190 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Mon, 23 Apr 2018 21:16:21 -0400
+Subject: [PATCH 4277/5725] drm/amdkfd: Align Makefile with upstream
+
+Changed includes of amd_rdma.h to work with the upstream include paths
+and removed unnecessary inclusion in kfd_priv.h.
+
+Change-Id: Id624bf0e358bd416348986f756be7b51e242fc3c
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/Makefile | 7 ++-----
+ drivers/gpu/drm/amd/amdkfd/kfd_peerdirect.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 1 -
+ drivers/gpu/drm/amd/amdkfd/kfd_rdma.c | 2 +-
+ 4 files changed, 4 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
+index b65537a..66f1921 100644
+--- a/drivers/gpu/drm/amd/amdkfd/Makefile
++++ b/drivers/gpu/drm/amd/amdkfd/Makefile
+@@ -23,11 +23,8 @@
+ # Makefile for Heterogenous System Architecture support for AMD GPU devices
+ #
+
+-FULL_AMD_PATH=$(src)/..
+-
+-ccflags-y := -Iinclude/drm \
+- -I$(FULL_AMD_PATH)/include/ \
+- -I$(FULL_AMD_PATH)/include/asic_reg
++ccflags-y := -Idrivers/gpu/drm/amd/include/ \
++ -Idrivers/gpu/drm/amd/include/asic_reg
+
+ amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
+ kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_peerdirect.c b/drivers/gpu/drm/amd/amdkfd/kfd_peerdirect.c
+index 1b1a0ca..87344cc 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_peerdirect.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_peerdirect.c
+@@ -49,9 +49,9 @@
+ #include <linux/slab.h>
+ #include <linux/scatterlist.h>
+ #include <linux/module.h>
++#include <drm/amd_rdma.h>
+
+ #include "kfd_priv.h"
+-#include "amd_rdma.h"
+
+
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 7cd4819..25bfc4f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -39,7 +39,6 @@
+ #include <linux/kfifo.h>
+ #include <kgd_kfd_interface.h>
+
+-#include "amd_rdma.h"
+ #include "amd_shared.h"
+
+ #define KFD_SYSFS_FILE_MODE 0444
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_rdma.c b/drivers/gpu/drm/amd/amdkfd/kfd_rdma.c
+index 985855f..3454514 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_rdma.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_rdma.c
+@@ -25,7 +25,7 @@
+ #include <linux/pid.h>
+ #include <linux/err.h>
+ #include <linux/slab.h>
+-#include "amd_rdma.h"
++#include <drm/amd_rdma.h>
+ #include "kfd_priv.h"
+
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4278-drm-amdkfd-Align-CIK-interrupt-processing-with-upstr.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4278-drm-amdkfd-Align-CIK-interrupt-processing-with-upstr.patch
new file mode 100644
index 00000000..728a6986
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4278-drm-amdkfd-Align-CIK-interrupt-processing-with-upstr.patch
@@ -0,0 +1,194 @@
+From d8dbdbca8ea2222761e366560ba69901a861f1a0 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Mon, 23 Apr 2018 21:59:05 -0400
+Subject: [PATCH 4278/5725] drm/amdkfd: Align CIK interrupt processing with
+ upstream
+
+Remove bitfields from struct cik_ih_ring_entry and use shiftr and
+masks instead. Reorder the INTSRC definitions to match upstream.
+Minor clean-up and simplification of VM-fault related code that
+hasn't been upstreamed yet.
+
+Change-Id: I23ded8d8b3b2731bf28517bb84023fa8d1d893cf
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c | 73 +++++++++++++-----------
+ drivers/gpu/drm/amd/amdkfd/cik_int.h | 25 ++------
+ 2 files changed, 46 insertions(+), 52 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+index 751c004..1261432 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
++++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+@@ -24,18 +24,13 @@
+ #include "kfd_events.h"
+ #include "cik_int.h"
+
+-static bool is_cpc_vm_fault(struct kfd_dev *dev,
+- const uint32_t *ih_ring_entry)
++static bool is_cpc_vm_fault(struct kfd_dev *dev, uint32_t source_id,
++ unsigned int vmid)
+ {
+- const struct cik_ih_ring_entry *ihre =
+- (const struct cik_ih_ring_entry *)ih_ring_entry;
+-
+- if ((ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
+- ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) &&
+- ihre->vmid >= dev->vm_info.first_vmid_kfd &&
+- ihre->vmid <= dev->vm_info.last_vmid_kfd)
+- return true;
+- return false;
++ return (source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
++ source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) &&
++ vmid >= dev->vm_info.first_vmid_kfd &&
++ vmid <= dev->vm_info.last_vmid_kfd;
+ }
+
+ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
+@@ -46,8 +41,7 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
+ const struct cik_ih_ring_entry *ihre =
+ (const struct cik_ih_ring_entry *)ih_ring_entry;
+ const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
+- struct cik_ih_ring_entry *tmp_ihre =
+- (struct cik_ih_ring_entry *) patched_ihre;
++ unsigned int vmid, pasid;
+
+ /* This workaround is due to HW/FW limitation on Hawaii that
+ * VMID and PASID are not written into ih_ring_entry
+@@ -55,23 +49,34 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
+ if ((ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
+ ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) &&
+ dev->device_info->asic_family == CHIP_HAWAII) {
++ struct cik_ih_ring_entry *tmp_ihre =
++ (struct cik_ih_ring_entry *)patched_ihre;
++
+ *patched_flag = true;
+ *tmp_ihre = *ihre;
+
+- tmp_ihre->vmid = f2g->read_vmid_from_vmfault_reg(dev->kgd);
+- tmp_ihre->pasid = f2g->get_atc_vmid_pasid_mapping_pasid(
+- dev->kgd, tmp_ihre->vmid);
+- return (tmp_ihre->pasid != 0) &&
+- tmp_ihre->vmid >= dev->vm_info.first_vmid_kfd &&
+- tmp_ihre->vmid <= dev->vm_info.last_vmid_kfd;
++ vmid = f2g->read_vmid_from_vmfault_reg(dev->kgd);
++ pasid = f2g->get_atc_vmid_pasid_mapping_pasid(dev->kgd, vmid);
++
++ tmp_ihre->ring_id &= 0x000000ff;
++ tmp_ihre->ring_id |= vmid << 8;
++ tmp_ihre->ring_id |= pasid << 16;
++
++ return (pasid != 0) &&
++ vmid >= dev->vm_info.first_vmid_kfd &&
++ vmid <= dev->vm_info.last_vmid_kfd;
+ }
++
++ vmid = (ihre->ring_id & 0x0000ff00) >> 8;
++ pasid = (ihre->ring_id & 0xffff0000) >> 16;
++
+ /* Do not process in ISR, just request it to be forwarded to WQ. */
+- return (ihre->pasid != 0) &&
++ return (pasid != 0) &&
+ (ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE ||
+- ihre->source_id == CIK_INTSRC_SDMA_TRAP ||
+- ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG ||
+- ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE ||
+- is_cpc_vm_fault(dev, ih_ring_entry));
++ ihre->source_id == CIK_INTSRC_SDMA_TRAP ||
++ ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG ||
++ ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE ||
++ is_cpc_vm_fault(dev, ihre->source_id, vmid));
+ }
+
+ static void cik_event_interrupt_wq(struct kfd_dev *dev,
+@@ -80,33 +85,35 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev,
+ const struct cik_ih_ring_entry *ihre =
+ (const struct cik_ih_ring_entry *)ih_ring_entry;
+ uint32_t context_id = ihre->data & 0xfffffff;
++ unsigned int vmid = (ihre->ring_id & 0x0000ff00) >> 8;
++ unsigned int pasid = (ihre->ring_id & 0xffff0000) >> 16;
+
+- if (ihre->pasid == 0)
++ if (pasid == 0)
+ return;
+
+ if (ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE)
+- kfd_signal_event_interrupt(ihre->pasid, context_id, 28);
++ kfd_signal_event_interrupt(pasid, context_id, 28);
+ else if (ihre->source_id == CIK_INTSRC_SDMA_TRAP)
+- kfd_signal_event_interrupt(ihre->pasid, context_id, 28);
++ kfd_signal_event_interrupt(pasid, context_id, 28);
+ else if (ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG)
+- kfd_signal_event_interrupt(ihre->pasid, context_id & 0xff, 8);
++ kfd_signal_event_interrupt(pasid, context_id & 0xff, 8);
+ else if (ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE)
+- kfd_signal_hw_exception_event(ihre->pasid);
++ kfd_signal_hw_exception_event(pasid);
+ else if (ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
+ ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) {
+ struct kfd_vm_fault_info info;
+
+- kfd_process_vm_fault(dev->dqm, ihre->pasid);
++ kfd_process_vm_fault(dev->dqm, pasid);
+
+ memset(&info, 0, sizeof(info));
+ dev->kfd2kgd->get_vm_fault_info(dev->kgd, &info);
+ if (!info.page_addr && !info.status)
+ return;
+
+- if (info.vmid == ihre->vmid)
+- kfd_signal_vm_fault_event(dev, ihre->pasid, &info);
++ if (info.vmid == vmid)
++ kfd_signal_vm_fault_event(dev, pasid, &info);
+ else
+- kfd_signal_vm_fault_event(dev, ihre->pasid, NULL);
++ kfd_signal_vm_fault_event(dev, pasid, NULL);
+ }
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/cik_int.h b/drivers/gpu/drm/amd/amdkfd/cik_int.h
+index ff8255d..a2079a0 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cik_int.h
++++ b/drivers/gpu/drm/amd/amdkfd/cik_int.h
+@@ -26,32 +26,19 @@
+ #include <linux/types.h>
+
+ struct cik_ih_ring_entry {
+- uint32_t source_id:8;
+- uint32_t reserved1:8;
+- uint32_t reserved2:16;
+-
+- uint32_t data:28;
+- uint32_t reserved3:4;
+-
+- /* pipeid, meid and unused3 are officially called RINGID,
+- * but for our purposes, they always decode into pipe and ME.
+- */
+- uint32_t pipeid:2;
+- uint32_t meid:2;
+- uint32_t reserved4:4;
+- uint32_t vmid:8;
+- uint32_t pasid:16;
+-
+- uint32_t reserved5;
++ uint32_t source_id;
++ uint32_t data;
++ uint32_t ring_id;
++ uint32_t reserved;
+ };
+
+-#define CIK_INTSRC_DEQUEUE_COMPLETE 0xC6
+ #define CIK_INTSRC_CP_END_OF_PIPE 0xB5
+ #define CIK_INTSRC_CP_BAD_OPCODE 0xB7
++#define CIK_INTSRC_DEQUEUE_COMPLETE 0xC6
++#define CIK_INTSRC_SDMA_TRAP 0xE0
+ #define CIK_INTSRC_SQ_INTERRUPT_MSG 0xEF
+ #define CIK_INTSRC_GFX_PAGE_INV_FAULT 0x92
+ #define CIK_INTSRC_GFX_MEM_PROT_FAULT 0x93
+-#define CIK_INTSRC_SDMA_TRAP 0xE0
+
+ #endif
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4279-drm-amdkfd-Remove-IH-patching-workaround-for-Vega10.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4279-drm-amdkfd-Remove-IH-patching-workaround-for-Vega10.patch
new file mode 100644
index 00000000..dab57a4a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4279-drm-amdkfd-Remove-IH-patching-workaround-for-Vega10.patch
@@ -0,0 +1,92 @@
+From 6014dea5e5c76b3e44003773538e1b3bc85f7c25 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Wed, 25 Apr 2018 17:06:33 -0400
+Subject: [PATCH 4279/5725] drm/amdkfd: Remove IH patching workaround for
+ Vega10
+
+Early CP firmware during bring-up failed to set the pasid in the IH
+ring entries. We had a racy driver workaround at the time. Current
+production firmware no longer requires this hack.
+
+Change-Id: Iccd0a4412918645e0b985be9eb9bb2aaeb486d37
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c | 49 ++++---------------------
+ 1 file changed, 7 insertions(+), 42 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+index 009d6f4..728aaad 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+@@ -25,24 +25,12 @@
+ #include "soc15_int.h"
+
+
+-static uint32_t kfd_get_pasid_from_vmid(struct kfd_dev *dev, uint8_t vmid)
+-{
+- uint32_t pasid = 0;
+- const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
+-
+- if (f2g->get_atc_vmid_pasid_mapping_valid(dev->kgd, vmid))
+- pasid = f2g->get_atc_vmid_pasid_mapping_pasid(dev->kgd, vmid);
+-
+- return pasid;
+-}
+-
+ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
+ const uint32_t *ih_ring_entry,
+ uint32_t *patched_ihre,
+ bool *patched_flag)
+ {
+ uint16_t source_id, client_id, pasid, vmid;
+- bool result = false;
+
+ source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
+ client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
+@@ -59,36 +47,13 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
+ data[4], data[5], data[6], data[7]);
+ }
+
+- if ((vmid >= dev->vm_info.first_vmid_kfd &&
+- vmid <= dev->vm_info.last_vmid_kfd) &&
+- (source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
+- source_id == SOC15_INTSRC_SDMA_TRAP ||
+- source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
+- source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
+- client_id == SOC15_IH_CLIENTID_VMC ||
+- client_id == SOC15_IH_CLIENTID_UTCL2)) {
+-
+- /*
+- * KFD want to handle this INT, but MEC firmware did
+- * not send pasid. Try to get it from vmid mapping
+- * and patch the ih entry. It's a temp workaround.
+- */
+- WARN_ONCE((!pasid), "Fix me.\n");
+- if (!pasid) {
+- uint32_t temp = le32_to_cpu(ih_ring_entry[3]);
+-
+- pasid = kfd_get_pasid_from_vmid(dev, vmid);
+- memcpy(patched_ihre, ih_ring_entry,
+- dev->device_info->ih_ring_entry_size);
+- patched_ihre[3] = cpu_to_le32(temp | pasid);
+- *patched_flag = true;
+- }
+- result = pasid ? true : false;
+- }
+-
+- /* Do not process in ISR, just request it to be forwarded to WQ. */
+- return result;
+-
++ return (pasid != 0) &&
++ (source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
++ source_id == SOC15_INTSRC_SDMA_TRAP ||
++ source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
++ source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
++ client_id == SOC15_IH_CLIENTID_VMC ||
++ client_id == SOC15_IH_CLIENTID_UTCL2);
+ }
+
+ static void event_interrupt_wq_v9(struct kfd_dev *dev,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4280-drm-amdkfd-Clean-up-mmap-handling.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4280-drm-amdkfd-Clean-up-mmap-handling.patch
new file mode 100644
index 00000000..3c18c870
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4280-drm-amdkfd-Clean-up-mmap-handling.patch
@@ -0,0 +1,142 @@
+From a7219a8bb0766dd741bad58e70862b5fdce6eac1 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Mon, 23 Apr 2018 22:36:47 -0400
+Subject: [PATCH 4280/5725] drm/amdkfd: Clean up mmap handling
+
+Remove reserved bits in mmap addresses. The mmap offset is no longer
+used for TTM/DRM mappings. So it makes no sense to encode TTM/DRM
+address space limitations.
+
+Centralize encoding and parsing of the GPU ID in the mmap offset.
+
+Cosmetic changes to match upstream.
+
+Change-Id: I5edb10d257006ee915534756d9b0e28381c889ef
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 27 +++++++++++++--------------
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 9 ++++-----
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 10 ++++------
+ 3 files changed, 21 insertions(+), 25 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 66c294a..491652c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -2711,34 +2711,33 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+ static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
+ {
+ struct kfd_process *process;
+- struct kfd_dev *kfd;
++ struct kfd_dev *dev = NULL;
+ unsigned long vm_pgoff;
+- unsigned long long mmap_type;
++ unsigned int gpu_id;
+
+ process = kfd_get_process(current);
+ if (IS_ERR(process))
+ return PTR_ERR(process);
+
+ vm_pgoff = vma->vm_pgoff;
+- vma->vm_pgoff = KFD_MMAP_OFFSET_VALUE_GET(vma->vm_pgoff);
+- mmap_type = vm_pgoff & KFD_MMAP_TYPE_MASK;
++ vma->vm_pgoff = KFD_MMAP_OFFSET_VALUE_GET(vm_pgoff);
++ gpu_id = KFD_MMAP_GPU_ID_GET(vm_pgoff);
++ if (gpu_id)
++ dev = kfd_device_by_id(gpu_id);
+
+- switch (mmap_type) {
++ switch (vm_pgoff & KFD_MMAP_TYPE_MASK) {
+ case KFD_MMAP_TYPE_DOORBELL:
+- kfd = kfd_device_by_id(KFD_MMAP_GPU_ID_GET(vm_pgoff));
+- if (!kfd)
+- return -EFAULT;
+- return kfd_doorbell_mmap(kfd, process, vma);
++ if (!dev)
++ return -ENODEV;
++ return kfd_doorbell_mmap(dev, process, vma);
+
+ case KFD_MMAP_TYPE_EVENTS:
+ return kfd_event_mmap(process, vma);
+
+ case KFD_MMAP_TYPE_RESERVED_MEM:
+- return kfd_reserved_mem_mmap(process, vma);
+-
+- default:
+- pr_err("Unsupported kfd mmap type %llx\n", mmap_type);
+- break;
++ if (!dev)
++ return -ENODEV;
++ return kfd_reserved_mem_mmap(dev, process, vma);
+ }
+
+ return -EFAULT;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 25bfc4f..25e8e5a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -49,8 +49,7 @@
+ /* Use upper bits of mmap offset to store KFD driver specific information.
+ * BITS[63:62] - Encode MMAP type
+ * BITS[61:46] - Encode gpu_id. To identify to which GPU the offset belongs to
+- * BITS[45:40] - Reserved. Not Used.
+- * BITS[39:0] - MMAP offset value. Used by TTM.
++ * BITS[45:0] - MMAP offset value
+ *
+ * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these
+ * defines are w.r.t to PAGE_SIZE
+@@ -69,7 +68,7 @@
+ #define KFD_MMAP_GPU_ID_GET(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \
+ >> KFD_MMAP_GPU_ID_SHIFT)
+
+-#define KFD_MMAP_OFFSET_VALUE_MASK (0xFFFFFFFFFFULL >> PAGE_SHIFT)
++#define KFD_MMAP_OFFSET_VALUE_MASK (0x3FFFFFFFFFFFULL >> PAGE_SHIFT)
+ #define KFD_MMAP_OFFSET_VALUE_GET(offset) (offset & KFD_MMAP_OFFSET_VALUE_MASK)
+
+ /*
+@@ -817,8 +816,8 @@ struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
+ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
+ struct kfd_process *p);
+
+-int kfd_reserved_mem_mmap(struct kfd_process *process,
+- struct vm_area_struct *vma);
++int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
++ struct vm_area_struct *vma);
+
+ /* KFD process API for creating and translating handles */
+ int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 3650183..0b04c63 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -482,7 +482,8 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
+ if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
+ continue;
+
+- offset = (dev->id | KFD_MMAP_TYPE_RESERVED_MEM) << PAGE_SHIFT;
++ offset = (KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id))
++ << PAGE_SHIFT;
+ qpd->tba_addr = (uint64_t)vm_mmap(filep, 0,
+ KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
+ MAP_SHARED, offset);
+@@ -1151,15 +1152,12 @@ static void restore_process_worker(struct work_struct *work)
+ pr_info("Finished restoring process of pasid %d\n", p->pasid);
+ }
+
+-int kfd_reserved_mem_mmap(struct kfd_process *process,
+- struct vm_area_struct *vma)
++int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
++ struct vm_area_struct *vma)
+ {
+- struct kfd_dev *dev = kfd_device_by_id(vma->vm_pgoff);
+ struct kfd_process_device *pdd;
+ struct qcm_process_device *qpd;
+
+- if (!dev)
+- return -EINVAL;
+ if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
+ pr_err("Incorrect CWSR mapping size.\n");
+ return -EINVAL;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4281-drm-amdkfd-fix-uninitialized-variable-use.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4281-drm-amdkfd-fix-uninitialized-variable-use.patch
new file mode 100644
index 00000000..6668bb94
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4281-drm-amdkfd-fix-uninitialized-variable-use.patch
@@ -0,0 +1,42 @@
+From 399e87d6ca79deb20aad8b2b5cbc22b6b9089e2e Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Thu, 15 Mar 2018 17:49:40 +0100
+Subject: [PATCH 4281/5725] drm/amdkfd: fix uninitialized variable use
+
+When CONFIG_ACPI is disabled, we never initialize the acpi_table
+structure in kfd_create_crat_image_virtual:
+
+drivers/gpu/drm/amd/amdkfd/kfd_crat.c: In function 'kfd_create_crat_image_virtual':
+drivers/gpu/drm/amd/amdkfd/kfd_crat.c:888:40: error: 'acpi_table' may be used uninitialized in this function [-Werror=maybe-uninitialized]
+
+The undefined behavior also happens for any other acpi_get_table()
+failure, but then the compiler can't warn about it.
+
+This adds an error check that prevents the structure from
+being used in error, avoiding both the undefined behavior and
+the warning about it.
+
+Change-Id: Ib90f712f6964f4c94f65f8c3a9153ac51f65504c
+Fixes: 520b8fb755cc ("drm/amdkfd: Add topology support for CPUs")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+index 24d0634..a803898 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+@@ -915,7 +915,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
+
+ #ifdef CONFIG_ACPI
+ status = acpi_get_table("DSDT", 0, &acpi_table);
+- if (status == AE_NOT_FOUND)
++ if (status != AE_OK)
+ pr_warn("DSDT table not found for OEM information\n");
+ else {
+ crat_table->oem_revision = acpi_table->revision;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4282-drm-amdkfd-remove-unused-parameter-from-quiesce_mm-r.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4282-drm-amdkfd-remove-unused-parameter-from-quiesce_mm-r.patch
new file mode 100644
index 00000000..2acea1bc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4282-drm-amdkfd-remove-unused-parameter-from-quiesce_mm-r.patch
@@ -0,0 +1,157 @@
+From ca5e5df1c09f70fd8998caaf1c9155a5c1a42361 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Mon, 23 Apr 2018 23:22:36 -0400
+Subject: [PATCH 4282/5725] drm/amdkfd: remove unused parameter from
+ quiesce_mm/resume_mm
+
+Change-Id: Ib17d9f51e8fd154460cee8a3ee71fc88edaeef80
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 4 ++--
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 30 +++++-------------------
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 9 ++++---
+ drivers/gpu/drm/amd/include/kgd_kfd_interface.h | 4 ++--
+ 4 files changed, 14 insertions(+), 33 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 11165a7..dfa909f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -1936,7 +1936,7 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
+ evicted_bos = atomic_inc_return(&process_info->evicted_bos);
+ if (evicted_bos == 1) {
+ /* First eviction, stop the queues */
+- r = kgd2kfd->quiesce_mm(NULL, mm);
++ r = kgd2kfd->quiesce_mm(mm);
+ if (r)
+ pr_err("Failed to quiesce KFD\n");
+ schedule_delayed_work(&process_info->work, 1);
+@@ -2226,7 +2226,7 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
+ evicted_bos)
+ goto unlock_out;
+ evicted_bos = 0;
+- if (kgd2kfd->resume_mm(NULL, mm)) {
++ if (kgd2kfd->resume_mm(mm)) {
+ pr_err("%s: Failed to resume KFD\n", __func__);
+ /* No recovery from this failure. Probably the CP is
+ * hanging. No point trying again.
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index dc5017ff..5b22ae0 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -678,10 +678,9 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
+ spin_unlock(&kfd->interrupt_lock);
+ }
+
+-int kgd2kfd_quiesce_mm(struct kfd_dev *kfd, struct mm_struct *mm)
++int kgd2kfd_quiesce_mm(struct mm_struct *mm)
+ {
+ struct kfd_process *p;
+- struct kfd_process_device *pdd;
+ int r;
+
+ /* Because we are called from arbitrary context (workqueue) as opposed
+@@ -690,26 +689,17 @@ int kgd2kfd_quiesce_mm(struct kfd_dev *kfd, struct mm_struct *mm)
+ */
+ p = kfd_lookup_process_by_mm(mm);
+ if (!p)
+- return -ENODEV;
++ return -ESRCH;
+
+- if (kfd) {
+- r = -ENODEV;
+- pdd = kfd_get_process_device_data(kfd, p);
+- if (pdd)
+- r = kfd->dqm->ops.evict_process_queues(kfd->dqm,
+- &pdd->qpd);
+- } else {
+- r = kfd_process_evict_queues(p);
+- }
++ r = kfd_process_evict_queues(p);
+
+ kfd_unref_process(p);
+ return r;
+ }
+
+-int kgd2kfd_resume_mm(struct kfd_dev *kfd, struct mm_struct *mm)
++int kgd2kfd_resume_mm(struct mm_struct *mm)
+ {
+ struct kfd_process *p;
+- struct kfd_process_device *pdd;
+ int r;
+
+ /* Because we are called from arbitrary context (workqueue) as opposed
+@@ -718,17 +708,9 @@ int kgd2kfd_resume_mm(struct kfd_dev *kfd, struct mm_struct *mm)
+ */
+ p = kfd_lookup_process_by_mm(mm);
+ if (!p)
+- return -ENODEV;
++ return -ESRCH;
+
+- if (kfd) {
+- r = -ENODEV;
+- pdd = kfd_get_process_device_data(kfd, p);
+- if (pdd)
+- r = kfd->dqm->ops.restore_process_queues(kfd->dqm,
+- &pdd->qpd);
+- } else {
+- r = kfd_process_restore_queues(p);
+- }
++ r = kfd_process_restore_queues(p);
+
+ kfd_unref_process(p);
+ return r;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 25e8e5a..e810a15 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -630,11 +630,10 @@ struct qcm_process_device {
+ /* Approx. time before evicting the process again */
+ #define PROCESS_ACTIVE_TIME_MS 10
+
++int kgd2kfd_quiesce_mm(struct mm_struct *mm);
++int kgd2kfd_resume_mm(struct mm_struct *mm);
+ int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
+ struct dma_fence *fence);
+-int kfd_process_evict_queues(struct kfd_process *p);
+-int kfd_process_restore_queues(struct kfd_process *p);
+-
+
+ /* 8 byte handle containing GPU ID in the most significant 4 bytes and
+ * idr_handle in the least significant 4 bytes
+@@ -804,6 +803,8 @@ struct kfd_process *kfd_get_process(const struct task_struct *task);
+ struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid);
+ struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm);
+ void kfd_unref_process(struct kfd_process *p);
++int kfd_process_evict_queues(struct kfd_process *p);
++int kfd_process_restore_queues(struct kfd_process *p);
+ void kfd_suspend_all_processes(void);
+ int kfd_resume_all_processes(void);
+
+@@ -971,8 +972,6 @@ int pqm_get_wave_state(struct process_queue_manager *pqm,
+ void __user *ctl_stack,
+ u32 *ctl_stack_used_size,
+ u32 *save_area_used_size);
+-int kgd2kfd_quiesce_mm(struct kfd_dev *kfd, struct mm_struct *mm);
+-int kgd2kfd_resume_mm(struct kfd_dev *kfd, struct mm_struct *mm);
+
+ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
+ unsigned int fence_value,
+diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+index 5060052..dd0b3c7 100644
+--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+@@ -443,8 +443,8 @@ struct kgd2kfd_calls {
+ void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry);
+ void (*suspend)(struct kfd_dev *kfd);
+ int (*resume)(struct kfd_dev *kfd);
+- int (*quiesce_mm)(struct kfd_dev *kfd, struct mm_struct *mm);
+- int (*resume_mm)(struct kfd_dev *kfd, struct mm_struct *mm);
++ int (*quiesce_mm)(struct mm_struct *mm);
++ int (*resume_mm)(struct mm_struct *mm);
+ int (*schedule_evict_and_restore_process)(struct mm_struct *mm,
+ struct dma_fence *fence);
+ int (*pre_reset)(struct kfd_dev *kfd);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4283-drm-amdkfd-Fix-kernel-queue-rollback-for-64-bit-wptr.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4283-drm-amdkfd-Fix-kernel-queue-rollback-for-64-bit-wptr.patch
new file mode 100644
index 00000000..b315ea30
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4283-drm-amdkfd-Fix-kernel-queue-rollback-for-64-bit-wptr.patch
@@ -0,0 +1,34 @@
+From d59254eb69d5b89b8a4dc52c975b9f6fbc7a72bd Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Tue, 24 Apr 2018 00:05:49 -0400
+Subject: [PATCH 4283/5725] drm/amdkfd: Fix kernel queue rollback for 64-bit
+ wptr
+
+Change-Id: I41afaa534cf23ba77e522729bc113d258b402b46
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+index 8cf9d44..51b976d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+@@ -315,7 +315,13 @@ static void submit_packet(struct kernel_queue *kq)
+
+ static void rollback_packet(struct kernel_queue *kq)
+ {
+- kq->pending_wptr = *kq->queue->properties.write_ptr;
++ if (kq->dev->device_info->doorbell_size == 8) {
++ kq->pending_wptr64 = *kq->wptr64_kernel;
++ kq->pending_wptr = *kq->wptr_kernel %
++ (kq->queue->properties.queue_size / 4);
++ } else {
++ kq->pending_wptr = *kq->wptr_kernel;
++ }
+ }
+
+ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4284-drm-amdkfd-Match-release_mem-interface-with-other-PM.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4284-drm-amdkfd-Match-release_mem-interface-with-other-PM.patch
new file mode 100644
index 00000000..6e49e651
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4284-drm-amdkfd-Match-release_mem-interface-with-other-PM.patch
@@ -0,0 +1,107 @@
+From fbbd81580282d599ce588fafa89e2be06ca393d0 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Tue, 24 Apr 2018 17:59:05 -0400
+Subject: [PATCH 4284/5725] drm/amdkfd: Match release_mem interface with other
+ PM functions
+
+Return an error status instead of the length of the packet. The
+packet size can be read from pmf->release_mem_size. This makes the
+interface consistent with the other packet manager functions.
+
+Change-Id: I1980dc11738b9233aa549044afd5f3c6c564ff60
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 11 +++++++----
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c | 4 ++--
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c | 4 ++--
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 +-
+ 4 files changed, 12 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index e60aaf8..8067092 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -208,16 +208,19 @@ static int allocate_vmid(struct device_queue_manager *dqm,
+ static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
+ struct qcm_process_device *qpd)
+ {
+- uint32_t len;
++ const struct packet_manager_funcs *pmf = qpd->dqm->packets.pmf;
++ int ret;
+
+ if (!qpd->ib_kaddr)
+ return -ENOMEM;
+
+- len = qpd->dqm->packets.pmf->release_mem(qpd->ib_base,
+- (uint32_t *)qpd->ib_kaddr);
++ ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
++ if (ret)
++ return ret;
+
+ return kdev->kfd2kgd->submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
+- qpd->ib_base, (uint32_t *)qpd->ib_kaddr, len);
++ qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
++ pmf->release_mem_size / sizeof(uint32_t));
+ }
+
+ static void deallocate_vmid(struct device_queue_manager *dqm,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+index f311f13..c6d5a33 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+@@ -295,7 +295,7 @@ static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
+ }
+
+
+-static uint32_t pm_release_mem_v9(uint64_t gpu_addr, uint32_t *buffer)
++static int pm_release_mem_v9(uint64_t gpu_addr, uint32_t *buffer)
+ {
+ struct pm4_mec_release_mem *packet;
+
+@@ -320,7 +320,7 @@ static uint32_t pm_release_mem_v9(uint64_t gpu_addr, uint32_t *buffer)
+
+ packet->data_lo = 0;
+
+- return sizeof(struct pm4_mec_release_mem) / sizeof(unsigned int);
++ return 0;
+ }
+
+ static struct packet_manager_funcs kfd_v9_pm_funcs = {
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+index 178c5d0..a1a2e7b 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+@@ -329,7 +329,7 @@ static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
+ }
+
+
+-static uint32_t pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
++static int pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
+ {
+ struct pm4_mec_release_mem *packet;
+
+@@ -355,7 +355,7 @@ static uint32_t pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
+
+ packet->data_lo = 0;
+
+- return sizeof(struct pm4_mec_release_mem) / sizeof(unsigned int);
++ return 0;
+ }
+
+ static struct packet_manager_funcs kfd_vi_pm_funcs = {
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index e810a15..62bc7df 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -1012,7 +1012,7 @@ struct packet_manager_funcs {
+ unsigned int sdma_engine);
+ int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
+ uint64_t fence_address, uint32_t fence_value);
+- uint32_t (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
++ int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
+
+ /* Packet sizes */
+ int map_process_size;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4285-drm-amdkfd-Simplify-packet-manager-initialization.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4285-drm-amdkfd-Simplify-packet-manager-initialization.patch
new file mode 100644
index 00000000..b48471e0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4285-drm-amdkfd-Simplify-packet-manager-initialization.patch
@@ -0,0 +1,117 @@
+From 694226f4f18f76d296b149049528cc64d83cb664 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Tue, 24 Apr 2018 18:05:50 -0400
+Subject: [PATCH 4285/5725] drm/amdkfd: Simplify packet manager initialization
+
+Assign the function tables directly instead of doing it in a one-line
+function. Also making the tables const while I'm at it.
+
+Change-Id: If03fe1f89fc5badf15f3e9dd356b44601152bd7b
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c | 8 +-------
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c | 7 +------
+ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 4 ++--
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 9 ++++-----
+ 4 files changed, 8 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+index c6d5a33..6724b1a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+@@ -323,7 +323,7 @@ static int pm_release_mem_v9(uint64_t gpu_addr, uint32_t *buffer)
+ return 0;
+ }
+
+-static struct packet_manager_funcs kfd_v9_pm_funcs = {
++const struct packet_manager_funcs kfd_v9_pm_funcs = {
+ .map_process = pm_map_process_v9,
+ .runlist = pm_runlist_v9,
+ .set_resources = pm_set_resources_vi,
+@@ -339,9 +339,3 @@ static struct packet_manager_funcs kfd_v9_pm_funcs = {
+ .query_status_size = sizeof(struct pm4_mes_query_status),
+ .release_mem_size = sizeof(struct pm4_mec_release_mem)
+ };
+-
+-void kfd_pm_func_init_v9(struct packet_manager *pm)
+-{
+- pm->pmf = &kfd_v9_pm_funcs;
+-}
+-
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+index a1a2e7b..357478f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+@@ -358,7 +358,7 @@ static int pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
+ return 0;
+ }
+
+-static struct packet_manager_funcs kfd_vi_pm_funcs = {
++const struct packet_manager_funcs kfd_vi_pm_funcs = {
+ .map_process = pm_map_process_vi,
+ .runlist = pm_runlist_vi,
+ .set_resources = pm_set_resources_vi,
+@@ -374,8 +374,3 @@ static struct packet_manager_funcs kfd_vi_pm_funcs = {
+ .query_status_size = sizeof(struct pm4_mes_query_status),
+ .release_mem_size = sizeof(struct pm4_mec_release_mem)
+ };
+-
+-void kfd_pm_func_init_vi(struct packet_manager *pm)
+-{
+- pm->pmf = &kfd_vi_pm_funcs;
+-}
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+index 699352b..bc6e854 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+@@ -237,11 +237,11 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
+ case CHIP_FIJI:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+- kfd_pm_func_init_vi(pm);
++ pm->pmf = &kfd_vi_pm_funcs;
+ break;
+ case CHIP_VEGA10:
+ case CHIP_RAVEN:
+- kfd_pm_func_init_v9(pm);
++ pm->pmf = &kfd_v9_pm_funcs;
+ break;
+ default:
+ BUG();
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 62bc7df..bb05e95 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -992,7 +992,7 @@ struct packet_manager {
+ struct kfd_mem_obj *ib_buffer_obj;
+ unsigned int ib_size_bytes;
+
+- struct packet_manager_funcs *pmf;
++ const struct packet_manager_funcs *pmf;
+ };
+
+ struct packet_manager_funcs {
+@@ -1024,6 +1024,9 @@ struct packet_manager_funcs {
+ int release_mem_size;
+ };
+
++extern const struct packet_manager_funcs kfd_vi_pm_funcs;
++extern const struct packet_manager_funcs kfd_v9_pm_funcs;
++
+ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
+ void pm_uninit(struct packet_manager *pm);
+ int pm_send_set_resources(struct packet_manager *pm,
+@@ -1044,10 +1047,6 @@ unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
+ int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
+ struct scheduling_resources *res);
+
+-void kfd_pm_func_init_vi(struct packet_manager *pm);
+-void kfd_pm_func_init_v9(struct packet_manager *pm);
+-
+-
+ uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
+
+ /* Events */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4286-drm-amdkfd-Fix-error-handling-in-pm_init.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4286-drm-amdkfd-Fix-error-handling-in-pm_init.patch
new file mode 100644
index 00000000..448a1270
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4286-drm-amdkfd-Fix-error-handling-in-pm_init.patch
@@ -0,0 +1,61 @@
+From 9a915f78444eaa270bc86d024a02285f491c633f Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Wed, 25 Apr 2018 17:50:18 -0400
+Subject: [PATCH 4286/5725] drm/amdkfd: Fix error handling in pm_init
+
+Avoid BUG_ON. To avoid the need for cleaning up the kernel queue in
+case of an error, do the pm->pmf initialization first.
+
+Change-Id: I8260eacbff5101205aeab26d28a6f106eff5b00b
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 24 +++++++++++++-----------
+ 1 file changed, 13 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+index bc6e854..c6f3218 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+@@ -219,16 +219,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
+
+ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
+ {
+- pm->dqm = dqm;
+- mutex_init(&pm->lock);
+- pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
+- if (!pm->priv_queue) {
+- mutex_destroy(&pm->lock);
+- return -ENOMEM;
+- }
+- pm->allocated = false;
+-
+- switch (pm->dqm->dev->device_info->asic_family) {
++ switch (dqm->dev->device_info->asic_family) {
+ case CHIP_KAVERI:
+ case CHIP_HAWAII:
+ /* PM4 packet structures on CIK are the same as on VI */
+@@ -244,9 +235,20 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
+ pm->pmf = &kfd_v9_pm_funcs;
+ break;
+ default:
+- BUG();
++ WARN(1, "Unexpected ASIC family %u",
++ dqm->dev->device_info->asic_family);
++ return -EINVAL;
+ }
+
++ pm->dqm = dqm;
++ mutex_init(&pm->lock);
++ pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
++ if (!pm->priv_queue) {
++ mutex_destroy(&pm->lock);
++ return -ENOMEM;
++ }
++ pm->allocated = false;
++
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4287-drm-amdkfd-Fix-pm_debugfs_runlist.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4287-drm-amdkfd-Fix-pm_debugfs_runlist.patch
new file mode 100644
index 00000000..27cf6ddf
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4287-drm-amdkfd-Fix-pm_debugfs_runlist.patch
@@ -0,0 +1,49 @@
+From fff3b6983608a0e513eb2e874325827207e3f485 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Wed, 25 Apr 2018 17:53:13 -0400
+Subject: [PATCH 4287/5725] drm/amdkfd: Fix pm_debugfs_runlist
+
+Guard it with #if defined(CONFIG_DEBUG_FS) and take the pm->lock while
+dumping the runlist IB.
+
+Change-Id: If52078d2003c34b44e8f19996c0263d6211dea13
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+index c6f3218..cd380ad 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+@@ -400,17 +400,25 @@ void pm_release_ib(struct packet_manager *pm)
+ mutex_unlock(&pm->lock);
+ }
+
++#if defined(CONFIG_DEBUG_FS)
++
+ int pm_debugfs_runlist(struct seq_file *m, void *data)
+ {
+ struct packet_manager *pm = data;
+
++ mutex_lock(&pm->lock);
++
+ if (!pm->allocated) {
+ seq_puts(m, " No active runlist\n");
+- return 0;
++ goto out;
+ }
+
+ seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
+ pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false);
+
++out:
++ mutex_unlock(&pm->lock);
+ return 0;
+ }
++
++#endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4288-drm-amdkfd-Check-ctx_save_restore_area_address.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4288-drm-amdkfd-Check-ctx_save_restore_area_address.patch
new file mode 100644
index 00000000..0556b625
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4288-drm-amdkfd-Check-ctx_save_restore_area_address.patch
@@ -0,0 +1,63 @@
+From 4adaa84776afde96e636d6282977a73ec6c07951 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Wed, 25 Apr 2018 17:34:07 -0400
+Subject: [PATCH 4288/5725] drm/amdkfd: Check ctx_save_restore_area_address
+
+Only program cp_hqd_ctx_save_control if the save restore area has a
+valid virtual address. Otherwise save restore can not be safely
+enabled for a queue.
+
+Change-Id: Ibcf19713068c5733988f8a4472755d56d2e72d8b
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 4 ++--
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+index f4e8efc..d556779 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+@@ -159,7 +159,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
+ (1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT);
+ }
+
+- if (mm->dev->cwsr_enabled) {
++ if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) {
+ m->cp_hqd_persistent_state |=
+ (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
+ m->cp_hqd_ctx_save_base_addr_lo =
+@@ -249,7 +249,7 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
+ if (priv_cp_queues)
+ m->cp_hqd_pq_control |=
+ 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT;
+- if (mm->dev->cwsr_enabled)
++ if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address)
+ m->cp_hqd_ctx_save_control = 0;
+
+ update_cu_mask(mm, mqd, q);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+index eff7580..c537f37 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+@@ -159,7 +159,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
+ (1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT);
+ }
+
+- if (mm->dev->cwsr_enabled) {
++ if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) {
+ m->cp_hqd_persistent_state |=
+ (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
+ m->cp_hqd_ctx_save_base_addr_lo =
+@@ -254,7 +254,7 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
+ if (priv_cp_queues)
+ m->cp_hqd_pq_control |=
+ 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT;
+- if (mm->dev->cwsr_enabled)
++ if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address)
+ m->cp_hqd_ctx_save_control =
+ atc_bit << CP_HQD_CTX_SAVE_CONTROL__ATC__SHIFT |
+ mtype << CP_HQD_CTX_SAVE_CONTROL__MTYPE__SHIFT;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4289-drm-amdkfd-Fix-error-handling-around-kfd_process_cre.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4289-drm-amdkfd-Fix-error-handling-around-kfd_process_cre.patch
new file mode 100644
index 00000000..fcd73a3f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4289-drm-amdkfd-Fix-error-handling-around-kfd_process_cre.patch
@@ -0,0 +1,64 @@
+From df9fb730247f6683baf44736e8ccc46be8483438 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Tue, 24 Apr 2018 18:26:51 -0400
+Subject: [PATCH 4289/5725] drm/amdkfd: Fix error handling around
+ kfd_process_create_wq
+
+Change-Id: Ic4c2b210db0cd248e82916f7f4b04b6c2071ed69
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_module.c | 5 +++--
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 7 ++++++-
+ 2 files changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+index a05f734..261657f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+@@ -102,7 +102,6 @@ MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (defau
+
+ static int amdkfd_init_completed;
+
+-
+ int kgd2kfd_init(unsigned int interface_version,
+ const struct kgd2kfd_calls **g2f)
+ {
+@@ -155,7 +154,7 @@ static int __init kfd_module_init(void)
+
+ err = kfd_ipc_init();
+ if (err < 0)
+- goto err_topology;
++ goto err_ipc;
+
+ err = kfd_process_create_wq();
+ if (err < 0)
+@@ -172,6 +171,8 @@ static int __init kfd_module_init(void)
+ return 0;
+
+ err_create_wq:
++err_ipc:
++ kfd_topology_shutdown();
+ err_topology:
+ kfd_chardev_exit();
+ err_ioctl:
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 0b04c63..2208794 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -80,7 +80,12 @@ int kfd_process_create_wq(void)
+ if (!kfd_restore_wq)
+ kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
+
+- return kfd_process_wq && kfd_restore_wq ? 0 : -ENOMEM;
++ if (!kfd_process_wq || !kfd_restore_wq) {
++ kfd_process_destroy_wq();
++ return -ENOMEM;
++ }
++
++ return 0;
+ }
+
+ void kfd_process_destroy_wq(void)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4290-drm-amdkfd-Fix-error-handling-in-APU-CWSR-mapping.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4290-drm-amdkfd-Fix-error-handling-in-APU-CWSR-mapping.patch
new file mode 100644
index 00000000..909e5559
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4290-drm-amdkfd-Fix-error-handling-in-APU-CWSR-mapping.patch
@@ -0,0 +1,34 @@
+From 773d8279e82ca7538a9b67ebdad96b6ff00d7ca6 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Wed, 25 Apr 2018 18:21:26 -0400
+Subject: [PATCH 4290/5725] drm/amdkfd: Fix error handling in APU CWSR mapping
+
+Change-Id: Id808e7e2161be85ae771440d7fbcff087ba6154b
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 2208794..cf78bc6 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -494,11 +494,12 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
+ MAP_SHARED, offset);
+
+ if (IS_ERR_VALUE(qpd->tba_addr)) {
+- pr_err("Failure to set tba address. error -%d.\n",
+- (int)qpd->tba_addr);
++ int err = qpd->tba_addr;
++
++ pr_err("Failure to set tba address. error %d.\n", err);
+ qpd->tba_addr = 0;
+ qpd->cwsr_kaddr = NULL;
+- return -ENOMEM;
++ return err;
+ }
+
+ memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4291-drm-amdkfd-Simplify-error-handling-in-kfd_create_pro.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4291-drm-amdkfd-Simplify-error-handling-in-kfd_create_pro.patch
new file mode 100644
index 00000000..99b86839
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4291-drm-amdkfd-Simplify-error-handling-in-kfd_create_pro.patch
@@ -0,0 +1,54 @@
+From c57287ab1dd944d5276d470d0c5df7ef89d5f9de Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Wed, 25 Apr 2018 18:23:39 -0400
+Subject: [PATCH 4291/5725] drm/amdkfd: Simplify error handling in
+ kfd_create_process_device_data
+
+Call init_doorbell_bitmap early to avoid excessive cleanup on failure.
+
+Change-Id: I59667a6313b0fb8192761a3287461f0a3d438928
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 17 ++++++-----------
+ 1 file changed, 6 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index cf78bc6..ecee955 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -676,6 +676,12 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
+ if (!pdd)
+ return NULL;
+
++ if (init_doorbell_bitmap(&pdd->qpd, dev)) {
++ pr_err("Failed to init doorbell for process\n");
++ kfree(pdd);
++ return NULL;
++ }
++
+ pdd->dev = dev;
+ INIT_LIST_HEAD(&pdd->qpd.queues_list);
+ INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
+@@ -689,19 +695,8 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
+
+ /* Init idr used for memory handle translation */
+ idr_init(&pdd->alloc_idr);
+- if (init_doorbell_bitmap(&pdd->qpd, dev)) {
+- pr_err("Failed to init doorbell for process\n");
+- goto err_create_pdd;
+- }
+
+ return pdd;
+-
+-err_create_pdd:
+- kfree(pdd->qpd.doorbell_bitmap);
+- idr_destroy(&pdd->alloc_idr);
+- list_del(&pdd->per_device_list);
+- kfree(pdd);
+- return NULL;
+ }
+
+ /**
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4292-drm-amdkfd-Simplify-obj-handle-allocation.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4292-drm-amdkfd-Simplify-obj-handle-allocation.patch
new file mode 100644
index 00000000..28bfdfae
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4292-drm-amdkfd-Simplify-obj-handle-allocation.patch
@@ -0,0 +1,51 @@
+From 9a909d46cdc3369acaeab9768cf4e77b49409e6f Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Thu, 26 Apr 2018 15:11:48 -0400
+Subject: [PATCH 4292/5725] drm/amdkfd: Simplify obj handle allocation
+
+MIN and MAX_IDR_IDs aren't necessary because the entire ID range is
+fine for this purpose, including 0.
+
+Don't use the idr_preload functionality. This is meant for situations
+where an ID allocation is done in a place where it cannot fail. Then
+idr_preload can be done in a place where it's still OK to fail. Here
+both are in the same place, so it's not necessary to preload the memory
+allocation.
+
+Change-Id: I484657cd43904b546a5c605b766955925068ff99
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 10 +---------
+ 1 file changed, 1 insertion(+), 9 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index ecee955..f62baaa 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -60,9 +60,6 @@ static struct workqueue_struct *kfd_process_wq;
+ */
+ static struct workqueue_struct *kfd_restore_wq;
+
+-#define MIN_IDR_ID 1
+-#define MAX_IDR_ID 0 /*0 - for unlimited*/
+-
+ static struct kfd_process *find_process(const struct task_struct *thread,
+ bool ref);
+ static void kfd_process_ref_release(struct kref *ref);
+@@ -842,12 +839,7 @@ int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
+
+ INIT_LIST_HEAD(&buf_obj->cb_data_head);
+
+- idr_preload(GFP_KERNEL);
+-
+- handle = idr_alloc(&pdd->alloc_idr, buf_obj, MIN_IDR_ID, MAX_IDR_ID,
+- GFP_NOWAIT);
+-
+- idr_preload_end();
++ handle = idr_alloc(&pdd->alloc_idr, buf_obj, 0, 0, GFP_KERNEL);
+
+ if (handle < 0)
+ kfree(buf_obj);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4293-drm-amdkfd-Error-if-trying-to-acquire-VM-for-a-PDD-t.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4293-drm-amdkfd-Error-if-trying-to-acquire-VM-for-a-PDD-t.patch
new file mode 100644
index 00000000..9b7c1e86
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4293-drm-amdkfd-Error-if-trying-to-acquire-VM-for-a-PDD-t.patch
@@ -0,0 +1,37 @@
+From 50a8c659c0cf5fe01a2f8cb8a5ae86f291a87af7 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Thu, 26 Apr 2018 15:22:46 -0400
+Subject: [PATCH 4293/5725] drm/amdkfd: Error if trying to acquire VM for a PDD
+ twice
+
+Return an error in kfd_process_device_init_vm is an attempt is made
+to acquire a VM for a PDD that already has a VM. This could happen
+if kfd_ioctl_acquire_vm is called multiple times for the same device
+and process, or if it is called too late, after the process has
+already been bound to the device by another ioctl.
+
+Returning an error here can help detect potential future problems in
+user mode code instead of silently masking them.
+
+Change-Id: I55e46e2654e4d761ae4b43c194bd9a7f1dd3eefa
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index f62baaa..7a28c21 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -718,7 +718,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
+ int ret;
+
+ if (pdd->vm)
+- return 0;
++ return drm_file ? -EBUSY : 0;
+
+ p = pdd->process;
+ dev = pdd->dev;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4294-drm-amdkfd-Cosmetic-changes-to-match-upstream.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4294-drm-amdkfd-Cosmetic-changes-to-match-upstream.patch
new file mode 100644
index 00000000..460b39c5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4294-drm-amdkfd-Cosmetic-changes-to-match-upstream.patch
@@ -0,0 +1,1825 @@
+From 9bde4e85bcb800b0f043f91f9092e8b9d6377e24 Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Wed, 9 Jan 2019 18:20:04 +0530
+Subject: [PATCH 4294/5725] drm/amdkfd: Cosmetic changes to match upstream
+
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 15 ++-
+ drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 57 ++++++---
+ drivers/gpu/drm/amd/amdkfd/kfd_crat.h | 48 ++++----
+ drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 69 ++++++-----
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 24 ++--
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 9 +-
+ .../drm/amd/amdkfd/kfd_device_queue_manager_v9.c | 4 +-
+ .../drm/amd/amdkfd/kfd_device_queue_manager_vi.c | 29 ++---
+ drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c | 15 +--
+ drivers/gpu/drm/amd/amdkfd/kfd_events.c | 7 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | 14 +--
+ drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_iommu.c | 3 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c | 2 -
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c | 5 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c | 97 ++++++++-------
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 11 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 1 -
+ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 13 +--
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 40 +++----
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 130 ++++++++++-----------
+ .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 4 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 58 ++++-----
+ drivers/gpu/drm/amd/amdkfd/kfd_topology.h | 9 +-
+ drivers/gpu/drm/amd/amdkfd/soc15_int.h | 2 +-
+ 26 files changed, 334 insertions(+), 336 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 491652c..bb38da1 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -24,6 +24,7 @@
+ #include <linux/export.h>
+ #include <linux/err.h>
+ #include <linux/fs.h>
++#include <linux/file.h>
+ #include <linux/sched.h>
+ #include <linux/sched/mm.h>
+ #include <linux/slab.h>
+@@ -45,7 +46,6 @@
+ static long kfd_ioctl(struct file *, unsigned int, unsigned long);
+ static int kfd_open(struct inode *, struct file *);
+ static int kfd_mmap(struct file *, struct vm_area_struct *);
+-static bool kfd_dev_is_large_bar(struct kfd_dev *dev);
+
+ static const char kfd_dev_name[] = "kfd";
+
+@@ -903,7 +903,7 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
+ mutex_lock(&p->mutex);
+
+ if (!kfd_has_process_device_data(p))
+- goto out_upwrite;
++ goto out_unlock;
+
+ /* Run over all pdd of the process */
+ pdd = kfd_get_first_process_device_data(p);
+@@ -912,7 +912,7 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
+ pdd = kfd_get_next_process_device_data(p, pdd);
+ } while (pdd);
+
+- goto out_upwrite;
++ goto out_unlock;
+ }
+
+ /* Fill in process-aperture information for all available
+@@ -929,7 +929,7 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
+ if (!kfd_has_process_device_data(p)) {
+ args->num_of_nodes = 0;
+ kfree(pa);
+- goto out_upwrite;
++ goto out_unlock;
+ }
+
+ /* Run over all pdd of the process */
+@@ -971,7 +971,7 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
+ kfree(pa);
+ return ret ? -EFAULT : 0;
+
+-out_upwrite:
++out_unlock:
+ mutex_unlock(&p->mutex);
+ return 0;
+ }
+@@ -1325,8 +1325,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ return 0;
+
+ err_free:
+- dev->kfd2kgd->free_memory_of_gpu(dev->kgd,
+- (struct kgd_mem *) mem);
++ dev->kfd2kgd->free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+ err_unlock:
+ mutex_unlock(&p->mutex);
+ return err;
+@@ -1367,7 +1366,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
+ /* If freeing the buffer failed, leave the handle in place for
+ * clean-up during process tear-down.
+ */
+- if (ret == 0)
++ if (!ret)
+ kfd_process_device_remove_obj_handle(
+ pdd, GET_IDR_HANDLE(args->handle));
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+index a803898..6688882 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+@@ -1,7 +1,27 @@
+-#include <linux/kernel.h>
+-#include <linux/acpi.h>
+-#include <linux/mm.h>
++/*
++ * Copyright 2015-2017 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
+ #include <linux/pci.h>
++#include <linux/acpi.h>
+ #include "kfd_crat.h"
+ #include "kfd_priv.h"
+ #include "kfd_topology.h"
+@@ -266,6 +286,7 @@ static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache,
+
+ id = cache->processor_id_low;
+
++ pr_debug("Found cache entry in CRAT table with processor_id=%d\n", id);
+ list_for_each_entry(dev, device_list, list) {
+ total_num_of_cu = (dev->node_props.array_count *
+ dev->node_props.cu_per_simd_array);
+@@ -415,11 +436,15 @@ static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr,
+ ret = kfd_parse_subtype_cache(cache, device_list);
+ break;
+ case CRAT_SUBTYPE_TLB_AFFINITY:
+- /* For now, nothing to do here */
++ /*
++ * For now, nothing to do here
++ */
+ pr_debug("Found TLB entry in CRAT table (not processing)\n");
+ break;
+ case CRAT_SUBTYPE_CCOMPUTE_AFFINITY:
+- /* For now, nothing to do here */
++ /*
++ * For now, nothing to do here
++ */
+ pr_debug("Found CCOMPUTE entry in CRAT table (not processing)\n");
+ break;
+ case CRAT_SUBTYPE_IOLINK_AFFINITY:
+@@ -444,9 +469,8 @@ static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr,
+ *
+ * Return - 0 if successful else -ve value
+ */
+-int kfd_parse_crat_table(void *crat_image,
+- struct list_head *device_list,
+- uint32_t proximity_domain)
++int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
++ uint32_t proximity_domain)
+ {
+ struct kfd_topology_device *top_dev = NULL;
+ struct crat_subtype_generic *sub_type_hdr;
+@@ -693,7 +717,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
+ * crat_image will be NULL
+ * @size: [OUT] size of crat_image
+ *
+- * Return 0 if successful else return -ve value
++ * Return 0 if successful else return error code
+ */
+ #ifdef CONFIG_ACPI
+ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
+@@ -725,10 +749,8 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
+ }
+
+ pcrat_image = kmalloc(crat_table->length, GFP_KERNEL);
+- if (!pcrat_image) {
+- pr_err("No memory for allocating CRAT image\n");
++ if (!pcrat_image)
+ return -ENOMEM;
+- }
+
+ memcpy(pcrat_image, crat_table, crat_table->length);
+
+@@ -1072,8 +1094,8 @@ static int kfd_fill_gpu_direct_io_link(int *avail_size,
+ * [OUT] actual size of data filled in crat_image
+ */
+ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
+- size_t *size, struct kfd_dev *kdev,
+- uint32_t proximity_domain)
++ size_t *size, struct kfd_dev *kdev,
++ uint32_t proximity_domain)
+ {
+ struct crat_header *crat_table = (struct crat_header *)pcrat_image;
+ struct crat_subtype_generic *sub_type_hdr;
+@@ -1241,7 +1263,8 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
+ * Return 0 if successful else return -ve value
+ */
+ int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
+- int flags, struct kfd_dev *kdev, uint32_t proximity_domain)
++ int flags, struct kfd_dev *kdev,
++ uint32_t proximity_domain)
+ {
+ void *pcrat_image = NULL;
+ int ret = 0;
+@@ -1271,8 +1294,8 @@ int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
+ if (!pcrat_image)
+ return -ENOMEM;
+ *size = VCRAT_SIZE_FOR_GPU;
+- ret = kfd_create_vcrat_image_gpu(pcrat_image, size,
+- kdev, proximity_domain);
++ ret = kfd_create_vcrat_image_gpu(pcrat_image, size, kdev,
++ proximity_domain);
+ break;
+ case (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU):
+ /* TODO: */
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+index 00de41f..cd7ee6d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+@@ -24,7 +24,6 @@
+ #define KFD_CRAT_H_INCLUDED
+
+ #include <linux/types.h>
+-#include "kfd_priv.h"
+
+ #pragma pack(1)
+
+@@ -228,12 +227,12 @@ struct crat_subtype_ccompute {
+ /*
+ * HSA IO Link Affinity structure and definitions
+ */
+-#define CRAT_IOLINK_FLAGS_ENABLED (1 << 0)
+-#define CRAT_IOLINK_FLAGS_NON_COHERENT (1 << 1)
+-#define CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT (1 << 2)
+-#define CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT (1 << 3)
+-#define CRAT_IOLINK_FLAGS_NO_PEER_TO_PEER_DMA (1 << 4)
+-#define CRAT_IOLINK_FLAGS_RESERVED_MASK 0xffffffe0
++#define CRAT_IOLINK_FLAGS_ENABLED (1 << 0)
++#define CRAT_IOLINK_FLAGS_NON_COHERENT (1 << 1)
++#define CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT (1 << 2)
++#define CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT (1 << 3)
++#define CRAT_IOLINK_FLAGS_NO_PEER_TO_PEER_DMA (1 << 4)
++#define CRAT_IOLINK_FLAGS_RESERVED_MASK 0xffffffe0
+
+ /*
+ * IO interface types
+@@ -241,18 +240,18 @@ struct crat_subtype_ccompute {
+ #define CRAT_IOLINK_TYPE_UNDEFINED 0
+ #define CRAT_IOLINK_TYPE_HYPERTRANSPORT 1
+ #define CRAT_IOLINK_TYPE_PCIEXPRESS 2
+-#define CRAT_IOLINK_TYPE_AMBA 3
+-#define CRAT_IOLINK_TYPE_MIPI 4
+-#define CRAT_IOLINK_TYPE_QPI_1_1 5
+-#define CRAT_IOLINK_TYPE_RESERVED1 6
+-#define CRAT_IOLINK_TYPE_RESERVED2 7
+-#define CRAT_IOLINK_TYPE_RAPID_IO 8
+-#define CRAT_IOLINK_TYPE_INFINIBAND 9
+-#define CRAT_IOLINK_TYPE_RESERVED3 10
+-#define CRAT_IOLINK_TYPE_OTHER 11
+-#define CRAT_IOLINK_TYPE_MAX 255
+-
+-#define CRAT_IOLINK_RESERVED_LENGTH 24
++#define CRAT_IOLINK_TYPE_AMBA 3
++#define CRAT_IOLINK_TYPE_MIPI 4
++#define CRAT_IOLINK_TYPE_QPI_1_1 5
++#define CRAT_IOLINK_TYPE_RESERVED1 6
++#define CRAT_IOLINK_TYPE_RESERVED2 7
++#define CRAT_IOLINK_TYPE_RAPID_IO 8
++#define CRAT_IOLINK_TYPE_INFINIBAND 9
++#define CRAT_IOLINK_TYPE_RESERVED3 10
++#define CRAT_IOLINK_TYPE_OTHER 11
++#define CRAT_IOLINK_TYPE_MAX 255
++
++#define CRAT_IOLINK_RESERVED_LENGTH 24
+
+ struct crat_subtype_iolink {
+ uint8_t type;
+@@ -308,13 +307,16 @@ struct cdit_header {
+
+ #pragma pack()
+
++struct kfd_dev;
++
+ #ifdef CONFIG_ACPI
+ int kfd_create_crat_image_acpi(void **crat_image, size_t *size);
+ #endif
+ void kfd_destroy_crat_image(void *crat_image);
+-int kfd_parse_crat_table(void *crat_image,
+- struct list_head *device_list,
+- uint32_t proximity_domain);
++int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
++ uint32_t proximity_domain);
+ int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
+- int flags, struct kfd_dev *kdev, uint32_t proximity_domain);
++ int flags, struct kfd_dev *kdev,
++ uint32_t proximity_domain);
++
+ #endif /* KFD_CRAT_H_INCLUDED */
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+index 232e28f..4bd6ebf 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2014 Advanced Micro Devices, Inc.
++ * Copyright 2016-2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 5b22ae0..8fb7580 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -240,6 +240,7 @@ static const struct kfd_device_info vega10_vf_device_info = {
+ .num_sdma_engines = 2,
+ };
+
++
+ struct kfd_deviceid {
+ unsigned short did;
+ const struct kfd_device_info *device_info;
+@@ -288,35 +289,35 @@ static const struct kfd_deviceid supported_devices[] = {
+ { 0x67B9, &hawaii_device_info }, /* Hawaii */
+ { 0x67BA, &hawaii_device_info }, /* Hawaii */
+ { 0x67BE, &hawaii_device_info }, /* Hawaii */
+- { 0x6920, &tonga_device_info }, /* Tonga */
+- { 0x6921, &tonga_device_info }, /* Tonga */
+- { 0x6928, &tonga_device_info }, /* Tonga */
+- { 0x6929, &tonga_device_info }, /* Tonga */
+- { 0x692B, &tonga_device_info }, /* Tonga */
+- { 0x692F, &tonga_vf_device_info }, /* Tonga vf */
+- { 0x6938, &tonga_device_info }, /* Tonga */
+- { 0x6939, &tonga_device_info }, /* Tonga */
+- { 0x7300, &fiji_device_info }, /* Fiji */
+- { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/
+- { 0x67C0, &polaris10_device_info }, /* Polaris10 */
+- { 0x67C1, &polaris10_device_info }, /* Polaris10 */
+- { 0x67C2, &polaris10_device_info }, /* Polaris10 */
++ { 0x6920, &tonga_device_info }, /* Tonga */
++ { 0x6921, &tonga_device_info }, /* Tonga */
++ { 0x6928, &tonga_device_info }, /* Tonga */
++ { 0x6929, &tonga_device_info }, /* Tonga */
++ { 0x692B, &tonga_device_info }, /* Tonga */
++ { 0x692F, &tonga_vf_device_info }, /* Tonga vf */
++ { 0x6938, &tonga_device_info }, /* Tonga */
++ { 0x6939, &tonga_device_info }, /* Tonga */
++ { 0x7300, &fiji_device_info }, /* Fiji */
++ { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/
++ { 0x67C0, &polaris10_device_info }, /* Polaris10 */
++ { 0x67C1, &polaris10_device_info }, /* Polaris10 */
++ { 0x67C2, &polaris10_device_info }, /* Polaris10 */
+ { 0x67C4, &polaris10_device_info }, /* Polaris10 */
+ { 0x67C7, &polaris10_device_info }, /* Polaris10 */
+- { 0x67C8, &polaris10_device_info }, /* Polaris10 */
+- { 0x67C9, &polaris10_device_info }, /* Polaris10 */
+- { 0x67CA, &polaris10_device_info }, /* Polaris10 */
+- { 0x67CC, &polaris10_device_info }, /* Polaris10 */
+- { 0x67CF, &polaris10_device_info }, /* Polaris10 */
+- { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/
++ { 0x67C8, &polaris10_device_info }, /* Polaris10 */
++ { 0x67C9, &polaris10_device_info }, /* Polaris10 */
++ { 0x67CA, &polaris10_device_info }, /* Polaris10 */
++ { 0x67CC, &polaris10_device_info }, /* Polaris10 */
++ { 0x67CF, &polaris10_device_info }, /* Polaris10 */
++ { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/
+ { 0x67DF, &polaris10_device_info }, /* Polaris10 */
+- { 0x67E0, &polaris11_device_info }, /* Polaris11 */
+- { 0x67E1, &polaris11_device_info }, /* Polaris11 */
++ { 0x67E0, &polaris11_device_info }, /* Polaris11 */
++ { 0x67E1, &polaris11_device_info }, /* Polaris11 */
+ { 0x67E3, &polaris11_device_info }, /* Polaris11 */
+- { 0x67E7, &polaris11_device_info }, /* Polaris11 */
+- { 0x67E8, &polaris11_device_info }, /* Polaris11 */
+- { 0x67E9, &polaris11_device_info }, /* Polaris11 */
+- { 0x67EB, &polaris11_device_info }, /* Polaris11 */
++ { 0x67E7, &polaris11_device_info }, /* Polaris11 */
++ { 0x67E8, &polaris11_device_info }, /* Polaris11 */
++ { 0x67E9, &polaris11_device_info }, /* Polaris11 */
++ { 0x67EB, &polaris11_device_info }, /* Polaris11 */
+ { 0x67EF, &polaris11_device_info }, /* Polaris11 */
+ { 0x67FF, &polaris11_device_info }, /* Polaris11 */
+ { 0x6860, &vega10_device_info }, /* Vega10 */
+@@ -366,11 +367,10 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ return NULL;
+ }
+
+- if (device_info->needs_pci_atomics) {
+- /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
+- * 32 and 64-bit requests are possible and must be
+- * supported.
+- */
++ /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
++ * 32 and 64-bit requests are possible and must be
++ * supported.
++ */
+ ret = pci_enable_atomic_ops_to_root(pdev,
+ PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
+ PCI_EXP_DEVCAP2_ATOMIC_COMP64);
+@@ -379,7 +379,6 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ "skipped device %x:%x, PCI rejects atomics",
+ pdev->vendor, pdev->device);
+ return NULL;
+- }
+ }
+
+ kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
+@@ -427,7 +426,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ KGD_ENGINE_SDMA1);
+ kfd->shared_resources = *gpu_resources;
+
+- /* Usually first_vmid_kfd = 8, last_vmid_kfd = 15 */
+ kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
+ kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
+ kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
+@@ -669,10 +667,11 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
+
+ spin_lock(&kfd->interrupt_lock);
+
+- if (kfd->interrupts_active && interrupt_is_wanted(kfd, ih_ring_entry,
+- patched_ihre, &is_patched)
++ if (kfd->interrupts_active
++ && interrupt_is_wanted(kfd, ih_ring_entry,
++ patched_ihre, &is_patched)
+ && enqueue_ih_ring_entry(kfd,
+- is_patched ? patched_ihre : ih_ring_entry))
++ is_patched ? patched_ihre : ih_ring_entry))
+ queue_work(kfd->ih_wq, &kfd->interrupt_work);
+
+ spin_unlock(&kfd->interrupt_lock);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 8067092..d7822e2 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -21,10 +21,11 @@
+ *
+ */
+
++#include <linux/ratelimit.h>
++#include <linux/printk.h>
+ #include <linux/slab.h>
+ #include <linux/list.h>
+ #include <linux/types.h>
+-#include <linux/printk.h>
+ #include <linux/bitops.h>
+ #include <linux/sched.h>
+ #include "kfd_priv.h"
+@@ -199,7 +200,7 @@ static int allocate_vmid(struct device_queue_manager *dqm,
+ dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd,
+ qpd->vmid,
+ qpd->page_table_base);
+- /*invalidate the VM context after pasid and vmid mapping is set up*/
++ /* invalidate the VM context after pasid and vmid mapping is set up */
+ kfd_flush_tlb(qpd_to_pdd(qpd));
+
+ return 0;
+@@ -289,7 +290,6 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
+ if (retval) {
+ if (list_empty(&qpd->queues_list))
+ deallocate_vmid(dqm, qpd, q);
+-
+ goto out_unlock;
+ }
+
+@@ -482,11 +482,9 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ int retval;
+ struct mqd_manager *mqd;
+ struct kfd_process_device *pdd;
+-
+ bool prev_active = false;
+
+ mutex_lock(&dqm->lock);
+-
+ pdd = kfd_get_process_device_data(q->device, q->process);
+ if (!pdd) {
+ retval = -ENODEV;
+@@ -502,7 +500,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ * Eviction state logic: we only mark active queues as evicted
+ * to avoid the overhead of restoring inactive queues later
+ */
+- if (pdd->qpd.evicted > 0)
++ if (pdd->qpd.evicted)
+ q->properties.is_evicted = (q->properties.queue_size > 0 &&
+ q->properties.queue_percent > 0 &&
+ q->properties.queue_address != 0);
+@@ -762,9 +760,9 @@ static int register_process(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+ {
+ struct device_process_node *n;
+- int retval;
+ struct kfd_process_device *pdd;
+ uint32_t pd_base;
++ int retval;
+
+ n = kzalloc(sizeof(*n), GFP_KERNEL);
+ if (!n)
+@@ -781,7 +779,6 @@ static int register_process(struct device_queue_manager *dqm,
+
+ /* Update PD Base in QPD */
+ qpd->page_table_base = pd_base;
+- pr_debug("Updated PD address to 0x%08x\n", pd_base);
+
+ retval = dqm->asic_ops.update_qpd(dqm, qpd);
+
+@@ -1076,9 +1073,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
+ static int stop_cpsch(struct device_queue_manager *dqm)
+ {
+ mutex_lock(&dqm->lock);
+-
+ unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+-
+ mutex_unlock(&dqm->lock);
+
+ kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
+@@ -1633,7 +1628,6 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
+
+ out:
+ mutex_unlock(&dqm->lock);
+-
+ return retval;
+ }
+
+@@ -1648,7 +1642,13 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
+ return NULL;
+
+ switch (dev->device_info->asic_family) {
++ /* HWS is not available on Hawaii. */
+ case CHIP_HAWAII:
++ /* HWS depends on CWSR for timely dequeue. CWSR is not
++ * available on Tonga.
++ *
++ * FIXME: This argument also applies to Kaveri.
++ */
+ case CHIP_TONGA:
+ dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
+ break;
+@@ -1728,7 +1728,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
+
+ case CHIP_VEGA10:
+ case CHIP_RAVEN:
+- device_queue_manager_init_v9_vega10(&dqm->asic_ops);
++ device_queue_manager_init_v9(&dqm->asic_ops);
+ break;
+ default:
+ WARN(1, "Unexpected ASIC family %u",
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+index 3f17e5e..82fafd0 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+@@ -209,7 +209,7 @@ void device_queue_manager_init_vi(
+ struct device_queue_manager_asic_ops *asic_ops);
+ void device_queue_manager_init_vi_tonga(
+ struct device_queue_manager_asic_ops *asic_ops);
+-void device_queue_manager_init_v9_vega10(
++void device_queue_manager_init_v9(
+ struct device_queue_manager_asic_ops *asic_ops);
+ void program_sh_mem_settings(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd);
+@@ -218,18 +218,11 @@ unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
+ unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
+ unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
+
+-int process_evict_queues(struct device_queue_manager *dqm,
+- struct qcm_process_device *qpd);
+-int process_restore_queues(struct device_queue_manager *dqm,
+- struct qcm_process_device *qpd);
+-
+-
+ static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
+ {
+ return (pdd->lds_base >> 16) & 0xFF;
+ }
+
+-/* This function is only useful for GFXv7 and v8 */
+ static inline unsigned int
+ get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
+ {
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+index cc27190..4175153 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2016 Advanced Micro Devices, Inc.
++ * Copyright 2016-2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+@@ -32,7 +32,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
+ static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q,
+ struct qcm_process_device *qpd);
+
+-void device_queue_manager_init_v9_vega10(
++void device_queue_manager_init_v9(
+ struct device_queue_manager_asic_ops *asic_ops)
+ {
+ asic_ops->update_qpd = update_qpd_v9;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+index 030b014..fd60a11 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+@@ -33,35 +33,22 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size);
+-static int update_qpd_vi(struct device_queue_manager *dqm,
+- struct qcm_process_device *qpd);
+-static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
+- struct qcm_process_device *qpd);
+-
+-/*
+- * Tonga device queue manager functions
+- */
+ static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size);
++static int update_qpd_vi(struct device_queue_manager *dqm,
++ struct qcm_process_device *qpd);
+ static int update_qpd_vi_tonga(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd);
++static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
++ struct qcm_process_device *qpd);
+ static void init_sdma_vm_tonga(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd);
+
+-void device_queue_manager_init_vi_tonga(
+- struct device_queue_manager_asic_ops *asic_ops)
+-{
+- asic_ops->set_cache_memory_policy = set_cache_memory_policy_vi_tonga;
+- asic_ops->update_qpd = update_qpd_vi_tonga;
+- asic_ops->init_sdma_vm = init_sdma_vm_tonga;
+-}
+-
+-
+ void device_queue_manager_init_vi(
+ struct device_queue_manager_asic_ops *asic_ops)
+ {
+@@ -70,6 +57,14 @@ void device_queue_manager_init_vi(
+ asic_ops->init_sdma_vm = init_sdma_vm;
+ }
+
++void device_queue_manager_init_vi_tonga(
++ struct device_queue_manager_asic_ops *asic_ops)
++{
++ asic_ops->set_cache_memory_policy = set_cache_memory_policy_vi_tonga;
++ asic_ops->update_qpd = update_qpd_vi_tonga;
++ asic_ops->init_sdma_vm = init_sdma_vm_tonga;
++}
++
+ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
+ {
+ /* In 64-bit mode, we can only control the top 3 bits of the LDS,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+index fc41689..c3744d8 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+@@ -115,7 +115,7 @@ int kfd_doorbell_init(struct kfd_dev *kfd)
+ pr_debug("doorbell aperture size == 0x%08lX\n",
+ kfd->shared_resources.doorbell_aperture_size);
+
+- pr_debug("doorbell kernel address == 0x%p\n", kfd->doorbell_kernel_ptr);
++ pr_debug("doorbell kernel address == %p\n", kfd->doorbell_kernel_ptr);
+
+ return 0;
+ }
+@@ -189,7 +189,7 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
+
+ pr_debug("Get kernel queue doorbell\n"
+ " doorbell offset == 0x%08X\n"
+- " kernel address == 0x%p\n",
++ " kernel address == %p\n",
+ *doorbell_off, (kfd->doorbell_kernel_ptr + inx));
+
+ return kfd->doorbell_kernel_ptr + inx;
+@@ -210,7 +210,7 @@ void write_kernel_doorbell(void __iomem *db, u32 value)
+ {
+ if (db) {
+ writel(value, db);
+- pr_debug("Writing %d to doorbell address 0x%p\n", value, db);
++ pr_debug("Writing %d to doorbell address %p\n", value, db);
+ }
+ }
+
+@@ -220,14 +220,10 @@ void write_kernel_doorbell64(void __iomem *db, u64 value)
+ WARN(((unsigned long)db & 7) != 0,
+ "Unaligned 64-bit doorbell");
+ writeq(value, (u64 __iomem *)db);
+- pr_debug("writing %llu to doorbell address 0x%p\n", value, db);
++ pr_debug("writing %llu to doorbell address %p\n", value, db);
+ }
+ }
+
+-/*
+- * queue_ids are in the range [0,MAX_PROCESS_QUEUES) and are mapped 1:1
+- * to doorbells with the process's doorbell page
+- */
+ unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd,
+ struct kfd_process *process,
+ unsigned int doorbell_id)
+@@ -239,7 +235,8 @@ unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd,
+ * units regardless of the ASIC-dependent doorbell size.
+ */
+ return kfd->doorbell_id_offset +
+- process->doorbell_index * (kfd_doorbell_process_slice(kfd)/sizeof(u32)) +
++ process->doorbell_index
++ * kfd_doorbell_process_slice(kfd) / sizeof(u32) +
+ doorbell_id * kfd->device_info->doorbell_size / sizeof(u32);
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+index 24d8a21..1dc1584 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+@@ -390,7 +390,11 @@ static void set_event(struct kfd_event *ev)
+ {
+ struct kfd_event_waiter *waiter;
+
+- /* Auto reset if the list is non-empty and we're waking someone. */
++ /* Auto reset if the list is non-empty and we're waking
++ * someone. waitqueue_active is safe here because we're
++ * protected by the p->event_mutex, which is also held when
++ * updating the wait queues in kfd_wait_on_events.
++ */
+ ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq);
+
+ list_for_each_entry(waiter, &ev->wq.head, wait.entry)
+@@ -777,7 +781,6 @@ int kfd_wait_on_events(struct kfd_process *p,
+
+ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
+ {
+-
+ unsigned long pfn;
+ struct kfd_signal_page *page;
+ int ret;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+index 5672710..0cae2e9 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+@@ -289,7 +289,6 @@
+
+ #define MAKE_LDS_APP_BASE_VI() \
+ (((uint64_t)(0x1UL) << 61) + 0x0)
+-
+ #define MAKE_LDS_APP_LIMIT(base) \
+ (((uint64_t)(base) & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF)
+
+@@ -323,7 +322,7 @@ int kfd_set_process_dgpu_aperture(struct kfd_process_device *pdd,
+ return 0;
+ }
+
+-void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
++static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
+ {
+ /*
+ * node id couldn't be 0 - the three MSB bits of
+@@ -353,7 +352,7 @@ void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
+ pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
+ }
+
+-void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id)
++static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id)
+ {
+ pdd->lds_base = MAKE_LDS_APP_BASE_V9();
+ pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
+@@ -388,10 +387,10 @@ int kfd_init_apertures(struct kfd_process *process)
+ pdd = kfd_create_process_device_data(dev, process);
+ if (!pdd) {
+ pr_err("Failed to create process device data\n");
+- return -1;
++ return -ENOMEM;
+ }
+ /*
+- * For 64 bit process aperture will be statically reserved in
++ * For 64 bit process apertures will be statically reserved in
+ * the x86_64 non canonical process address space
+ * amdkfd doesn't currently support apertures for 32 bit process
+ */
+@@ -415,8 +414,9 @@ int kfd_init_apertures(struct kfd_process *process)
+ kfd_init_apertures_v9(pdd, id);
+ break;
+ default:
+- pr_err("Unknown chip in kfd_init_apertures\n");
+- return -1;
++ WARN(1, "Unexpected ASIC family %u",
++ dev->device_info->asic_family);
++ return -EINVAL;
+ }
+
+ if (!dev->device_info->needs_iommu_device) {
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+index 728aaad..5217e51 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2016 Advanced Micro Devices, Inc.
++ * Copyright 2016-2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+index 5b798f9..7a61f38 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+@@ -75,7 +75,8 @@ int kfd_iommu_device_init(struct kfd_dev *kfd)
+ }
+
+ if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) {
+- dev_err(kfd_device, "error required iommu flags ats %i, pri %i, pasid %i\n",
++ dev_err(kfd_device,
++ "error required iommu flags ats %i, pri %i, pasid %i\n",
+ (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0,
+ (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0,
+ (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
+index b48c29f..19e54ac 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
+@@ -22,8 +22,6 @@
+ */
+
+ #include "kfd_kernel_queue.h"
+-#include "kfd_pm4_headers.h"
+-#include "kfd_pm4_opcodes.h"
+
+ static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev,
+ enum kfd_queue_type type, unsigned int queue_size);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+index 6724b1a..684a3bf 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2016 Advanced Micro Devices, Inc.
++ * Copyright 2016-2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+@@ -44,7 +44,7 @@ static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev,
+ int retval;
+
+ retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
+- if (retval != 0)
++ if (retval)
+ return false;
+
+ kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
+@@ -126,7 +126,6 @@ static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer,
+ concurrent_proc_cnt = min(pm->dqm->processes_count,
+ kfd->max_proc_per_quantum);
+
+-
+ packet = (struct pm4_mes_runlist *)buffer;
+
+ memset(buffer, 0, sizeof(struct pm4_mes_runlist));
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+index 357478f..bf20c6d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+@@ -67,12 +67,25 @@ static void submit_packet_vi(struct kernel_queue *kq)
+ kq->pending_wptr);
+ }
+
+-static int pm_map_process_vi(struct packet_manager *pm,
+- uint32_t *buffer, struct qcm_process_device *qpd)
++unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size)
++{
++ union PM4_MES_TYPE_3_HEADER header;
++
++ header.u32All = 0;
++ header.opcode = opcode;
++ header.count = packet_size / 4 - 2;
++ header.type = PM4_TYPE_3;
++
++ return header.u32All;
++}
++
++static int pm_map_process_vi(struct packet_manager *pm, uint32_t *buffer,
++ struct qcm_process_device *qpd)
+ {
+ struct pm4_mes_map_process *packet;
+
+ packet = (struct pm4_mes_map_process *)buffer;
++
+ memset(buffer, 0, sizeof(struct pm4_mes_map_process));
+
+ packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
+@@ -99,27 +112,16 @@ static int pm_map_process_vi(struct packet_manager *pm,
+ return 0;
+ }
+
+-
+-unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size)
+-{
+- union PM4_MES_TYPE_3_HEADER header;
+-
+- header.u32All = 0;
+- header.opcode = opcode;
+- header.count = packet_size / 4 - 2;
+- header.type = PM4_TYPE_3;
+-
+- return header.u32All;
+-}
+-
+ static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
+ uint64_t ib, size_t ib_size_in_dwords, bool chain)
+ {
+ struct pm4_mes_runlist *packet;
+-
+ int concurrent_proc_cnt = 0;
+ struct kfd_dev *kfd = pm->dqm->dev;
+
++ if (WARN_ON(!ib))
++ return -EFAULT;
++
+ /* Determine the number of processes to map together to HW:
+ * it can not exceed the number of VMIDs available to the
+ * scheduler, and it is determined by the smaller of the number
+@@ -132,7 +134,6 @@ static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
+ concurrent_proc_cnt = min(pm->dqm->processes_count,
+ kfd->max_proc_per_quantum);
+
+-
+ packet = (struct pm4_mes_runlist *)buffer;
+
+ memset(buffer, 0, sizeof(struct pm4_mes_runlist));
+@@ -150,6 +151,34 @@ static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
+ return 0;
+ }
+
++int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
++ struct scheduling_resources *res)
++{
++ struct pm4_mes_set_resources *packet;
++
++ packet = (struct pm4_mes_set_resources *)buffer;
++ memset(buffer, 0, sizeof(struct pm4_mes_set_resources));
++
++ packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES,
++ sizeof(struct pm4_mes_set_resources));
++
++ packet->bitfields2.queue_type =
++ queue_type__mes_set_resources__hsa_interface_queue_hiq;
++ packet->bitfields2.vmid_mask = res->vmid_mask;
++ packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
++ packet->bitfields7.oac_mask = res->oac_mask;
++ packet->bitfields8.gds_heap_base = res->gds_heap_base;
++ packet->bitfields8.gds_heap_size = res->gds_heap_size;
++
++ packet->gws_mask_lo = lower_32_bits(res->gws_mask);
++ packet->gws_mask_hi = upper_32_bits(res->gws_mask);
++
++ packet->queue_mask_lo = lower_32_bits(res->queue_mask);
++ packet->queue_mask_hi = upper_32_bits(res->queue_mask);
++
++ return 0;
++}
++
+ static int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+ struct queue *q, bool is_static)
+ {
+@@ -209,34 +238,6 @@ static int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+ return 0;
+ }
+
+-int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
+- struct scheduling_resources *res)
+-{
+- struct pm4_mes_set_resources *packet;
+-
+- packet = (struct pm4_mes_set_resources *)buffer;
+- memset(buffer, 0, sizeof(struct pm4_mes_set_resources));
+-
+- packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES,
+- sizeof(struct pm4_mes_set_resources));
+-
+- packet->bitfields2.queue_type =
+- queue_type__mes_set_resources__hsa_interface_queue_hiq;
+- packet->bitfields2.vmid_mask = res->vmid_mask;
+- packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
+- packet->bitfields7.oac_mask = res->oac_mask;
+- packet->bitfields8.gds_heap_base = res->gds_heap_base;
+- packet->bitfields8.gds_heap_size = res->gds_heap_size;
+-
+- packet->gws_mask_lo = lower_32_bits(res->gws_mask);
+- packet->gws_mask_hi = upper_32_bits(res->gws_mask);
+-
+- packet->queue_mask_lo = lower_32_bits(res->queue_mask);
+- packet->queue_mask_hi = upper_32_bits(res->queue_mask);
+-
+- return 0;
+-}
+-
+ static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+ enum kfd_queue_type type,
+ enum kfd_unmap_queues_filter filter,
+@@ -310,7 +311,6 @@ static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
+ packet = (struct pm4_mes_query_status *)buffer;
+ memset(buffer, 0, sizeof(struct pm4_mes_query_status));
+
+-
+ packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS,
+ sizeof(struct pm4_mes_query_status));
+
+@@ -328,16 +328,15 @@ static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
+ return 0;
+ }
+
+-
+ static int pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
+ {
+ struct pm4_mec_release_mem *packet;
+
+ packet = (struct pm4_mec_release_mem *)buffer;
+- memset(buffer, 0, sizeof(struct pm4_mec_release_mem));
++ memset(buffer, 0, sizeof(*packet));
+
+ packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM,
+- sizeof(struct pm4_mec_release_mem));
++ sizeof(*packet));
+
+ packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
+ packet->bitfields2.event_index = event_index___release_mem__end_of_pipe;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+index d556779..cc2c3fb 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2016 Advanced Micro Devices, Inc.
++ * Copyright 2016-2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+@@ -217,8 +217,9 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
+ pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
+ m->cp_hqd_pq_doorbell_control);
+
+- m->cp_hqd_ib_control = 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT |
+- 1 << CP_HQD_IB_CONTROL__IB_EXE_DISABLE__SHIFT;
++ m->cp_hqd_ib_control =
++ 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT |
++ 1 << CP_HQD_IB_CONTROL__IB_EXE_DISABLE__SHIFT;
+
+ /*
+ * HW does not clamp this field correctly. Maximum EOP queue size
+@@ -243,8 +244,8 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
+ 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT |
+ 1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT |
+ 1 << CP_HQD_PQ_CONTROL__WPP_CLAMP_EN__SHIFT;
+- m->cp_hqd_pq_doorbell_control |=
+- 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
++ m->cp_hqd_pq_doorbell_control |= 1 <<
++ CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
+ }
+ if (priv_cp_queues)
+ m->cp_hqd_pq_control |=
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+index c537f37..e3ae2d4 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+@@ -544,4 +544,3 @@ struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type,
+ mqd->update_mqd = update_mqd_tonga;
+ return mqd;
+ }
+-
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+index cd380ad..c317feb4 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+@@ -26,7 +26,6 @@
+ #include "kfd_device_queue_manager.h"
+ #include "kfd_kernel_queue.h"
+ #include "kfd_priv.h"
+-#include "kfd_pm4_opcodes.h"
+
+ static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
+ unsigned int buffer_size_bytes)
+@@ -45,8 +44,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
+ unsigned int process_count, queue_count, compute_queue_count;
+ unsigned int map_queue_size;
+ unsigned int max_proc_per_quantum = 1;
+-
+- struct kfd_dev *dev = pm->dqm->dev;
++ struct kfd_dev *dev = pm->dqm->dev;
+
+ process_count = pm->dqm->processes_count;
+ queue_count = pm->dqm->queue_count;
+@@ -57,14 +55,13 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
+ * hws_max_conc_proc has been done in
+ * kgd2kfd_device_init().
+ */
+-
+ *over_subscription = false;
+
+ if (dev->max_proc_per_quantum > 1)
+ max_proc_per_quantum = dev->max_proc_per_quantum;
+
+ if ((process_count > max_proc_per_quantum) ||
+- compute_queue_count > get_queues_num(pm->dqm)) {
++ compute_queue_count > get_queues_num(pm->dqm)) {
+ *over_subscription = true;
+ pr_debug("Over subscribed runlist\n");
+ }
+@@ -193,6 +190,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
+ &rl_buffer[rl_wptr],
+ q,
+ qpd->is_debug);
++
+ if (retval)
+ return retval;
+
+@@ -301,8 +299,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
+
+ pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
+
+- packet_size_dwords = pm->pmf->runlist_size /
+- sizeof(uint32_t);
++ packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
+ mutex_lock(&pm->lock);
+
+ retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+@@ -311,7 +308,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
+ goto fail_acquire_packet_buffer;
+
+ retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr,
+- rl_ib_size / sizeof(uint32_t), false);
++ rl_ib_size / sizeof(uint32_t), false);
+ if (retval)
+ goto fail_create_runlist;
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index bb05e95..fffdec6 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -30,13 +30,13 @@
+ #include <linux/atomic.h>
+ #include <linux/workqueue.h>
+ #include <linux/spinlock.h>
+-#include <linux/idr.h>
+ #include <linux/kfd_ioctl.h>
+-#include <linux/pid.h>
+-#include <linux/interval_tree.h>
++#include <linux/idr.h>
+ #include <linux/seq_file.h>
+ #include <linux/kref.h>
+ #include <linux/kfifo.h>
++#include <linux/pid.h>
++#include <linux/interval_tree.h>
+ #include <kgd_kfd_interface.h>
+
+ #include "amd_shared.h"
+@@ -81,7 +81,6 @@
+ #define KFD_CIK_HIQ_PIPE 4
+ #define KFD_CIK_HIQ_QUEUE 0
+
+-
+ /* Macro for allocating structures */
+ #define kfd_alloc_struct(ptr_to_struct) \
+ ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
+@@ -114,14 +113,14 @@ extern int max_num_of_queues_per_device;
+ /* Kernel module parameter to specify the scheduling policy */
+ extern int sched_policy;
+
+-extern int cwsr_enable;
+-
+ /*
+ * Kernel module parameter to specify the maximum process
+ * number per HW scheduler
+ */
+ extern int hws_max_conc_proc;
+
++extern int cwsr_enable;
++
+ /*
+ * Kernel module parameter to specify whether to send sigterm to HSA process on
+ * unhandled exception
+@@ -442,7 +441,11 @@ enum KFD_QUEUE_PRIORITY {
+ * @is_interop: Defines if this is a interop queue. Interop queue means that
+ * the queue can access both graphics and compute resources.
+ *
+- * @is_active: Defines if the queue is active or not.
++ * @is_evicted: Defines if the queue is evicted. Only active queues
++ * are evicted, rendering them inactive.
++ *
++ * @is_active: Defines if the queue is active or not. @is_active and
++ * @is_evicted are protected by the DQM lock.
+ *
+ * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
+ * of the queue.
+@@ -464,7 +467,7 @@ struct queue_properties {
+ void __iomem *doorbell_ptr;
+ uint32_t doorbell_off;
+ bool is_interop;
+- bool is_evicted; /* true -> queue is evicted */
++ bool is_evicted;
+ bool is_active;
+ /* Not relevant for user mode queues in cp scheduling */
+ unsigned int vmid;
+@@ -583,7 +586,6 @@ struct qcm_process_device {
+ struct list_head priv_queue_list;
+
+ unsigned int queue_count;
+- /* a data field only meaningful for non-HWS case */
+ unsigned int vmid;
+ bool is_debug;
+ unsigned int evicted; /* eviction counter, 0=active */
+@@ -614,11 +616,11 @@ struct qcm_process_device {
+ uint64_t tma_addr;
+
+ /* IB memory */
+- uint64_t ib_base; /* ib_base+ib_size must be below cwsr_base */
++ uint64_t ib_base;
+ void *ib_kaddr;
+
+ /*doorbell resources per process per device*/
+- unsigned long *doorbell_bitmap;
++ unsigned long *doorbell_bitmap;
+ };
+
+ /* KFD Memory Eviction */
+@@ -756,7 +758,7 @@ struct kfd_process {
+ struct rb_root_cached bo_interval_tree;
+
+ /* Information used for memory eviction */
+- void *process_info;
++ void *kgd_process_info;
+ /* Eviction fence that is attached to all the BOs of this process. The
+ * fence will be triggered during eviction and new one will be created
+ * during restore
+@@ -799,7 +801,7 @@ struct amdkfd_ioctl_desc {
+ int kfd_process_create_wq(void);
+ void kfd_process_destroy_wq(void);
+ struct kfd_process *kfd_create_process(struct file *filep);
+-struct kfd_process *kfd_get_process(const struct task_struct *task);
++struct kfd_process *kfd_get_process(const struct task_struct *);
+ struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid);
+ struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm);
+ void kfd_unref_process(struct kfd_process *p);
+@@ -811,7 +813,7 @@ int kfd_resume_all_processes(void);
+ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
+ struct file *drm_file);
+ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
+- struct kfd_process *p);
++ struct kfd_process *p);
+ struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
+ struct kfd_process *p);
+ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
+@@ -859,7 +861,7 @@ void kfd_pasid_free(unsigned int pasid);
+ size_t kfd_doorbell_process_slice(struct kfd_dev *kfd);
+ int kfd_doorbell_init(struct kfd_dev *kfd);
+ void kfd_doorbell_fini(struct kfd_dev *kfd);
+-int kfd_doorbell_mmap(struct kfd_dev *kfd, struct kfd_process *process,
++int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
+ struct vm_area_struct *vma);
+ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
+ unsigned int *doorbell_off);
+@@ -982,8 +984,6 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
+ #define KFD_FENCE_COMPLETED (100)
+ #define KFD_FENCE_INIT (10)
+
+-struct packet_manager_func;
+-
+ struct packet_manager {
+ struct device_queue_manager *dqm;
+ struct kernel_queue *priv_queue;
+@@ -996,7 +996,7 @@ struct packet_manager {
+ };
+
+ struct packet_manager_funcs {
+- /* Support different firmware versions for PM4 packets */
++ /* Support ASIC-specific packet formats for PM4 packets */
+ int (*map_process)(struct packet_manager *pm, uint32_t *buffer,
+ struct qcm_process_device *qpd);
+ int (*runlist)(struct packet_manager *pm, uint32_t *buffer,
+@@ -1042,7 +1042,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
+
+ void pm_release_ib(struct packet_manager *pm);
+
+-/* Following PM funcs can be shared among CIK and VI */
++/* Following PM funcs can be shared among VI and AI */
+ unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
+ int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
+ struct scheduling_resources *res);
+@@ -1089,8 +1089,6 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
+
+ bool kfd_is_locked(void);
+
+-#define KFD_SCRATCH_KV_FW_VER 413
+-
+ /* PeerDirect support */
+ void kfd_init_peer_direct(void);
+ void kfd_close_peer_direct(void);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 7a28c21..9477e50 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -30,6 +30,7 @@
+ #include <linux/notifier.h>
+ #include <linux/compat.h>
+ #include <linux/mman.h>
++#include <linux/file.h>
+ #include <asm/page.h>
+ #include "kfd_ipc.h"
+
+@@ -184,8 +185,8 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
+ /* kfd_process_device_reserve_ib_mem - Reserve memory inside the
+ * process for IB usage The memory reserved is for KFD to submit
+ * IB to AMDGPU from kernel. If the memory is reserved
+- * successfully, ib_kaddr_assigned will have the CPU/kernel
+- * address. Check ib_kaddr_assigned before accessing the memory.
++ * successfully, ib_kaddr will have the CPU/kernel
++ * address. Check ib_kaddr before accessing the memory.
+ */
+ static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
+ {
+@@ -212,7 +213,6 @@ static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
+ struct kfd_process *kfd_create_process(struct file *filep)
+ {
+ struct kfd_process *process;
+-
+ struct task_struct *thread = current;
+
+ if (!thread->mm)
+@@ -348,7 +348,9 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
+
+ list_for_each_entry_safe(pdd, temp, &p->per_device_data,
+ per_device_list) {
+- /* Destroy the GPUVM VM context */
++ pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n",
++ pdd->dev->id, p->pasid);
++
+ if (pdd->drm_file)
+ fput(pdd->drm_file);
+ else if (pdd->vm)
+@@ -401,9 +403,6 @@ static void kfd_process_ref_release(struct kref *ref)
+ {
+ struct kfd_process *p = container_of(ref, struct kfd_process, ref);
+
+- if (WARN_ON(!kfd_process_wq))
+- return;
+-
+ INIT_WORK(&p->release_work, kfd_process_wq_release);
+ queue_work(kfd_process_wq, &p->release_work);
+ }
+@@ -486,9 +485,9 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
+
+ offset = (KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id))
+ << PAGE_SHIFT;
+- qpd->tba_addr = (uint64_t)vm_mmap(filep, 0,
+- KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
+- MAP_SHARED, offset);
++ qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
++ KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
++ MAP_SHARED, offset);
+
+ if (IS_ERR_VALUE(qpd->tba_addr)) {
+ int err = qpd->tba_addr;
+@@ -725,10 +724,11 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
+
+ if (drm_file)
+ ret = dev->kfd2kgd->acquire_process_vm(
+- dev->kgd, drm_file, &pdd->vm, &p->process_info, &p->ef);
++ dev->kgd, drm_file,
++ &pdd->vm, &p->kgd_process_info, &p->ef);
+ else
+ ret = dev->kfd2kgd->create_process_vm(
+- dev->kgd, &pdd->vm, &p->process_info, &p->ef);
++ dev->kgd, &pdd->vm, &p->kgd_process_info, &p->ef);
+ if (ret) {
+ pr_err("Failed to create process VM object\n");
+ return ret;
+@@ -942,42 +942,6 @@ struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
+ return ret_p;
+ }
+
+-void kfd_suspend_all_processes(void)
+-{
+- struct kfd_process *p;
+- unsigned int temp;
+- int idx = srcu_read_lock(&kfd_processes_srcu);
+-
+- hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+- cancel_delayed_work_sync(&p->eviction_work);
+- cancel_delayed_work_sync(&p->restore_work);
+-
+- if (kfd_process_evict_queues(p))
+- pr_err("Failed to suspend process %d\n", p->pasid);
+- dma_fence_signal(p->ef);
+- dma_fence_put(p->ef);
+- p->ef = NULL;
+- }
+- srcu_read_unlock(&kfd_processes_srcu, idx);
+-}
+-
+-int kfd_resume_all_processes(void)
+-{
+- struct kfd_process *p;
+- unsigned int temp;
+- int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
+-
+- hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+- if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
+- pr_err("Restore process %d failed during resume\n",
+- p->pasid);
+- ret = -EFAULT;
+- }
+- }
+- srcu_read_unlock(&kfd_processes_srcu, idx);
+- return ret;
+-}
+-
+ /* This increments the process->ref counter. */
+ struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
+ {
+@@ -1069,15 +1033,14 @@ static void evict_process_worker(struct work_struct *work)
+ "Eviction fence mismatch\n");
+
+ /* Narrow window of overlap between restore and evict work
+- * item is possible. Once
+- * amdgpu_amdkfd_gpuvm_restore_process_bos unreserves KFD BOs,
+- * it is possible to evicted again. But restore has few more
+- * steps of finish. So lets wait for any previous restore work
+- * to complete
++ * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
++ * unreserves KFD BOs, it is possible to evicted again. But
++ * restore has few more steps of finish. So lets wait for any
++ * previous restore work to complete
+ */
+ flush_delayed_work(&p->restore_work);
+
+- pr_info("Started evicting process of pasid %d\n", p->pasid);
++ pr_info("Started evicting pasid %d\n", p->pasid);
+ ret = kfd_process_evict_queues(p);
+ if (!ret) {
+ dma_fence_signal(p->ef);
+@@ -1086,10 +1049,9 @@ static void evict_process_worker(struct work_struct *work)
+ queue_delayed_work(kfd_restore_wq, &p->restore_work,
+ msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
+
+- pr_info("Finished evicting process of pasid %d\n", p->pasid);
++ pr_info("Finished evicting pasid %d\n", p->pasid);
+ } else
+- pr_err("Failed to quiesce user queues. Cannot evict pasid %d\n",
+- p->pasid);
++ pr_err("Failed to evict queues of pasid %d\n", p->pasid);
+ }
+
+ static void restore_process_worker(struct work_struct *work)
+@@ -1115,7 +1077,7 @@ static void restore_process_worker(struct work_struct *work)
+ struct kfd_process_device,
+ per_device_list);
+
+- pr_info("Started restoring process of pasid %d\n", p->pasid);
++ pr_info("Started restoring pasid %d\n", p->pasid);
+
+ /* Setting last_restore_timestamp before successful restoration.
+ * Otherwise this would have to be set by KGD (restore_process_bos)
+@@ -1128,10 +1090,11 @@ static void restore_process_worker(struct work_struct *work)
+ */
+
+ p->last_restore_timestamp = get_jiffies_64();
+- ret = pdd->dev->kfd2kgd->restore_process_bos(p->process_info, &p->ef);
++ ret = pdd->dev->kfd2kgd->restore_process_bos(p->kgd_process_info,
++ &p->ef);
+ if (ret) {
+- pr_info("Restore failed, try again after %d ms\n",
+- PROCESS_BACK_OFF_TIME_MS);
++ pr_info("Failed to restore BOs of pasid %d, retry after %d ms\n",
++ p->pasid, PROCESS_BACK_OFF_TIME_MS);
+ ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
+ msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
+ WARN(!ret, "reschedule restore work failed\n");
+@@ -1139,10 +1102,46 @@ static void restore_process_worker(struct work_struct *work)
+ }
+
+ ret = kfd_process_restore_queues(p);
+- if (ret)
+- pr_err("Failed to resume user queues\n");
++ if (!ret)
++ pr_info("Finished restoring pasid %d\n", p->pasid);
++ else
++ pr_err("Failed to restore queues of pasid %d\n", p->pasid);
++}
++
++void kfd_suspend_all_processes(void)
++{
++ struct kfd_process *p;
++ unsigned int temp;
++ int idx = srcu_read_lock(&kfd_processes_srcu);
+
+- pr_info("Finished restoring process of pasid %d\n", p->pasid);
++ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
++ cancel_delayed_work_sync(&p->eviction_work);
++ cancel_delayed_work_sync(&p->restore_work);
++
++ if (kfd_process_evict_queues(p))
++ pr_err("Failed to suspend process %d\n", p->pasid);
++ dma_fence_signal(p->ef);
++ dma_fence_put(p->ef);
++ p->ef = NULL;
++ }
++ srcu_read_unlock(&kfd_processes_srcu, idx);
++}
++
++int kfd_resume_all_processes(void)
++{
++ struct kfd_process *p;
++ unsigned int temp;
++ int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
++
++ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
++ if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
++ pr_err("Restore process %d failed during resume\n",
++ p->pasid);
++ ret = -EFAULT;
++ }
++ }
++ srcu_read_unlock(&kfd_processes_srcu, idx);
++ return ret;
+ }
+
+ int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
+@@ -1176,7 +1175,6 @@ int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
+ KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
+ }
+
+-
+ void kfd_flush_tlb(struct kfd_process_device *pdd)
+ {
+ struct kfd_dev *dev = pdd->dev;
+@@ -1211,7 +1209,7 @@ int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
+ r = pqm_debugfs_mqds(m, &p->pqm);
+ mutex_unlock(&p->mutex);
+
+- if (r != 0)
++ if (r)
+ break;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index c950149..e18ed45 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -188,7 +188,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
+ case KFD_QUEUE_TYPE_SDMA:
+ if (dev->dqm->sdma_queue_count
+ >= get_num_sdma_queues(dev->dqm)) {
+- pr_debug("Over-subscription is not allowed for SDMA\n");
++ pr_debug("Over-subscription is not allowed for SDMA.\n");
+ retval = -EPERM;
+ goto err_create_queue;
+ }
+@@ -206,7 +206,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
+ case KFD_QUEUE_TYPE_COMPUTE:
+ /* check if there is over subscription */
+ if ((dev->dqm->sched_policy ==
+- KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
++ KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
+ ((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
+ (dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
+ pr_debug("Over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+index 320c8d3..82cff10 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -196,6 +196,7 @@ struct kfd_topology_device *kfd_create_topology_device(
+ return dev;
+ }
+
++
+ #define sysfs_show_gen_prop(buffer, fmt, ...) \
+ snprintf(buffer, PAGE_SIZE, "%s"fmt, buffer, __VA_ARGS__)
+ #define sysfs_show_32bit_prop(buffer, name, value) \
+@@ -739,7 +740,7 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
+ }
+
+ /* All hardware blocks have the same number of attributes. */
+- num_attrs = sizeof(perf_attr_iommu)/sizeof(struct kfd_perf_attr);
++ num_attrs = ARRAY_SIZE(perf_attr_iommu);
+ list_for_each_entry(perf, &dev->perf_props, list) {
+ perf->attr_group = kzalloc(sizeof(struct kfd_perf_attr)
+ * num_attrs + sizeof(struct attribute_group),
+@@ -890,7 +891,8 @@ static void kfd_debug_print_topology(void)
+ up_read(&topology_lock);
+ }
+
+-/* Helper function for intializing platform_xx members of kfd_system_properties
++/* Helper function for intializing platform_xx members of
++ * kfd_system_properties. Uses OEM info from the last CPU/APU node.
+ */
+ static void kfd_update_system_properties(void)
+ {
+@@ -1013,13 +1015,12 @@ int kfd_topology_init(void)
+ */
+ #ifdef CONFIG_ACPI
+ ret = kfd_create_crat_image_acpi(&crat_image, &image_size);
+- if (ret == 0) {
++ if (!ret) {
+ ret = kfd_parse_crat_table(crat_image,
+ &temp_topology_device_list,
+ proximity_domain);
+ if (ret ||
+- kfd_is_acpi_crat_invalid(&temp_topology_device_list)) {
+-
++ kfd_is_acpi_crat_invalid(&temp_topology_device_list)) {
+ kfd_release_topology_device_list(
+ &temp_topology_device_list);
+ kfd_destroy_crat_image(crat_image);
+@@ -1029,8 +1030,8 @@ int kfd_topology_init(void)
+ #endif
+ if (!crat_image) {
+ ret = kfd_create_crat_image_virtual(&crat_image, &image_size,
+- COMPUTE_UNIT_CPU, NULL,
+- proximity_domain);
++ COMPUTE_UNIT_CPU, NULL,
++ proximity_domain);
+ cpu_only_node = 1;
+ if (ret) {
+ pr_err("Error creating VCRAT table for CPU\n");
+@@ -1038,8 +1039,8 @@ int kfd_topology_init(void)
+ }
+
+ ret = kfd_parse_crat_table(crat_image,
+- &temp_topology_device_list,
+- proximity_domain);
++ &temp_topology_device_list,
++ proximity_domain);
+ if (ret) {
+ pr_err("Error parsing VCRAT table for CPU\n");
+ goto err;
+@@ -1051,12 +1052,12 @@ int kfd_topology_init(void)
+
+ down_write(&topology_lock);
+ kfd_topology_update_device_list(&temp_topology_device_list,
+- &topology_device_list);
++ &topology_device_list);
+ atomic_set(&topology_crat_proximity_domain, sys_props.num_devices-1);
+ ret = kfd_topology_update_sysfs();
+ up_write(&topology_lock);
+
+- if (ret == 0) {
++ if (!ret) {
+ sys_props.generation_count++;
+ kfd_update_system_properties();
+ kfd_debug_print_topology();
+@@ -1144,7 +1145,6 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
+ break;
+ }
+ up_write(&topology_lock);
+-
+ return out_dev;
+ }
+
+@@ -1212,8 +1212,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
+
+ pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
+
+- proximity_domain = atomic_inc_return(&
+- topology_crat_proximity_domain);
++ proximity_domain = atomic_inc_return(&topology_crat_proximity_domain);
+
+ /* Check to see if this gpu device exists in the topology_device_list.
+ * If so, assign the gpu to that device,
+@@ -1224,15 +1223,16 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
+ dev = kfd_assign_gpu(gpu);
+ if (!dev) {
+ res = kfd_create_crat_image_virtual(&crat_image, &image_size,
+- COMPUTE_UNIT_GPU,
+- gpu, proximity_domain);
++ COMPUTE_UNIT_GPU, gpu,
++ proximity_domain);
+ if (res) {
+ pr_err("Error creating VCRAT for GPU (ID: 0x%x)\n",
+ gpu_id);
+ return res;
+ }
+ res = kfd_parse_crat_table(crat_image,
+- &temp_topology_device_list, proximity_domain);
++ &temp_topology_device_list,
++ proximity_domain);
+ if (res) {
+ pr_err("Error parsing VCRAT for GPU (ID: 0x%x)\n",
+ gpu_id);
+@@ -1249,14 +1249,13 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
+ res = kfd_topology_update_sysfs();
+ up_write(&topology_lock);
+
+- if (res == 0)
++ if (!res)
+ sys_props.generation_count++;
+ else
+ pr_err("Failed to update GPU (ID: 0x%x) to sysfs topology. res=%d\n",
+ gpu_id, res);
+ dev = kfd_assign_gpu(gpu);
+- if (!dev) {
+- pr_err("Could not assign GPU\n");
++ if (WARN_ON(!dev)) {
+ res = -ENODEV;
+ goto err;
+ }
+@@ -1315,14 +1314,15 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
+ HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
+ break;
+ default:
+- BUG();
++ WARN(1, "Unexpected ASIC family %u",
++ dev->gpu->device_info->asic_family);
+ }
+
+ /* Fix errors in CZ CRAT.
+- * simd_count: Carrizo CRAT reports wrong simd_count, probably because
+- * it doesn't consider masked out CUs
+- * max_waves_per_simd: Carrizo reports wrong max_waves_per_simd.
+- * capability flag: Carrizo CRAT doesn't report IOMMU flags.
++ * simd_count: Carrizo CRAT reports wrong simd_count, probably
++ * because it doesn't consider masked out CUs
++ * max_waves_per_simd: Carrizo reports wrong max_waves_per_simd
++ * capability flag: Carrizo CRAT doesn't report IOMMU flags
+ */
+ if (dev->gpu->device_info->asic_family == CHIP_CARRIZO) {
+ dev->node_props.simd_count =
+@@ -1362,7 +1362,7 @@ int kfd_topology_remove_device(struct kfd_dev *gpu)
+
+ up_write(&topology_lock);
+
+- if (res == 0)
++ if (!res)
+ kfd_notify_gpu_change(gpu_id, 0);
+
+ return res;
+@@ -1403,7 +1403,7 @@ static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
+ {
+ int first_cpu_of_numa_node;
+
+- if (!cpumask || (cpumask == cpu_none_mask))
++ if (!cpumask || cpumask == cpu_none_mask)
+ return -1;
+ first_cpu_of_numa_node = cpumask_first(cpumask);
+ if (first_cpu_of_numa_node >= nr_cpu_ids)
+@@ -1446,7 +1446,7 @@ int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data)
+
+ seq_printf(m, "Node %u, gpu_id %x:\n", i++, dev->gpu->id);
+ r = dqm_debugfs_hqds(m, dev->gpu->dqm);
+- if (r != 0)
++ if (r)
+ break;
+ }
+
+@@ -1471,7 +1471,7 @@ int kfd_debugfs_rls_by_device(struct seq_file *m, void *data)
+
+ seq_printf(m, "Node %u, gpu_id %x:\n", i++, dev->gpu->id);
+ r = pm_debugfs_runlist(m, &dev->gpu->dqm->packets);
+- if (r != 0)
++ if (r)
+ break;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+index 4c518fe8..2b36baf 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+@@ -46,9 +46,6 @@
+ #define HSA_CAP_DOORBELL_TYPE_PRE_1_0 0x0
+ #define HSA_CAP_DOORBELL_TYPE_1_0 0x1
+ #define HSA_CAP_DOORBELL_TYPE_2_0 0x2
+-#define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK 0x00000f00
+-#define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT 8
+-#define HSA_CAP_DOORBELL_PACKET_TYPE 0x00001000
+ #define HSA_CAP_AQL_QUEUE_DOUBLE_MAP 0x00004000
+
+ struct kfd_node_properties {
+@@ -169,9 +166,9 @@ struct kfd_topology_device {
+ struct attribute attr_gpuid;
+ struct attribute attr_name;
+ struct attribute attr_props;
+- uint8_t oem_id[CRAT_OEMID_LENGTH];
+- uint8_t oem_table_id[CRAT_OEMTABLEID_LENGTH];
+- uint32_t oem_revision;
++ uint8_t oem_id[CRAT_OEMID_LENGTH];
++ uint8_t oem_table_id[CRAT_OEMTABLEID_LENGTH];
++ uint32_t oem_revision;
+ };
+
+ struct kfd_system_properties {
+diff --git a/drivers/gpu/drm/amd/amdkfd/soc15_int.h b/drivers/gpu/drm/amd/amdkfd/soc15_int.h
+index 011c14c..0bc0b25 100644
+--- a/drivers/gpu/drm/amd/amdkfd/soc15_int.h
++++ b/drivers/gpu/drm/amd/amdkfd/soc15_int.h
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2016 Advanced Micro Devices, Inc.
++ * Copyright 2016-2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4295-drm-amdkfd-Add-sanity-checks-in-IRQ-handlers.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4295-drm-amdkfd-Add-sanity-checks-in-IRQ-handlers.patch
new file mode 100644
index 00000000..3cbc5853
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4295-drm-amdkfd-Add-sanity-checks-in-IRQ-handlers.patch
@@ -0,0 +1,137 @@
+From 1a97cc79780e70d1e6624a8d2fdc3c1d5a56461f Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Mon, 30 Apr 2018 19:22:49 -0400
+Subject: [PATCH 4295/5725] drm/amdkfd: Add sanity checks in IRQ handlers
+
+Only accept interrupts from KFD VMIDs. Just checking for a PASID may
+not be enough because amdgpu started using PASIDs to map VM faults
+to processes.
+
+Warn if an IRQ doesn't have a valid PASID (indicating a firmware bug).
+
+Change-Id: I34ca5b4b03ffe51a23d03490fc65b6c946bbbf51
+Suggested-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Suggested-by: Oak Zeng <Oak.Zeng@amd.com>
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c | 33 +++++++++---------
+ drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c | 44 ++++++++++++++----------
+ 2 files changed, 43 insertions(+), 34 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+index 1261432..5d2475d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
++++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+@@ -24,15 +24,6 @@
+ #include "kfd_events.h"
+ #include "cik_int.h"
+
+-static bool is_cpc_vm_fault(struct kfd_dev *dev, uint32_t source_id,
+- unsigned int vmid)
+-{
+- return (source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
+- source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) &&
+- vmid >= dev->vm_info.first_vmid_kfd &&
+- vmid <= dev->vm_info.last_vmid_kfd;
+-}
+-
+ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
+ const uint32_t *ih_ring_entry,
+ uint32_t *patched_ihre,
+@@ -67,16 +58,26 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
+ vmid <= dev->vm_info.last_vmid_kfd;
+ }
+
++ /* Only handle interrupts from KFD VMIDs */
+ vmid = (ihre->ring_id & 0x0000ff00) >> 8;
++ if (vmid < dev->vm_info.first_vmid_kfd ||
++ vmid > dev->vm_info.last_vmid_kfd)
++ return 0;
++
++ /* If there is no valid PASID, it's likely a firmware bug */
+ pasid = (ihre->ring_id & 0xffff0000) >> 16;
++ if (WARN_ONCE(pasid == 0, "FW bug: No PASID in KFD interrupt"))
++ return 0;
+
+- /* Do not process in ISR, just request it to be forwarded to WQ. */
+- return (pasid != 0) &&
+- (ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE ||
+- ihre->source_id == CIK_INTSRC_SDMA_TRAP ||
+- ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG ||
+- ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE ||
+- is_cpc_vm_fault(dev, ihre->source_id, vmid));
++ /* Interrupt types we care about: various signals and faults.
++ * They will be forwarded to a work queue (see below).
++ */
++ return ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE ||
++ ihre->source_id == CIK_INTSRC_SDMA_TRAP ||
++ ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG ||
++ ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE ||
++ ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
++ ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT;
+ }
+
+ static void cik_event_interrupt_wq(struct kfd_dev *dev,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+index 5217e51..f836897 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+@@ -31,29 +31,37 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
+ bool *patched_flag)
+ {
+ uint16_t source_id, client_id, pasid, vmid;
++ const uint32_t *data = ih_ring_entry;
+
+- source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
+- client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
+- pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
++ /* Only handle interrupts from KFD VMIDs */
+ vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
++ if (vmid < dev->vm_info.first_vmid_kfd ||
++ vmid > dev->vm_info.last_vmid_kfd)
++ return 0;
++
++ /* If there is no valid PASID, it's likely a firmware bug */
++ pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
++ if (WARN_ONCE(pasid == 0, "FW bug: No PASID in KFD interrupt"))
++ return 0;
+
+- if (pasid) {
+- const uint32_t *data = ih_ring_entry;
++ source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
++ client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
+
+- pr_debug("client id 0x%x, source id %d, pasid 0x%x. raw data:\n",
+- client_id, source_id, pasid);
+- pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
+- data[0], data[1], data[2], data[3],
+- data[4], data[5], data[6], data[7]);
+- }
++ pr_debug("client id 0x%x, source id %d, pasid 0x%x. raw data:\n",
++ client_id, source_id, pasid);
++ pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
++ data[0], data[1], data[2], data[3],
++ data[4], data[5], data[6], data[7]);
+
+- return (pasid != 0) &&
+- (source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
+- source_id == SOC15_INTSRC_SDMA_TRAP ||
+- source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
+- source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
+- client_id == SOC15_IH_CLIENTID_VMC ||
+- client_id == SOC15_IH_CLIENTID_UTCL2);
++ /* Interrupt types we care about: various signals and faults.
++ * They will be forwarded to a work queue (see below).
++ */
++ return source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
++ source_id == SOC15_INTSRC_SDMA_TRAP ||
++ source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
++ source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
++ client_id == SOC15_IH_CLIENTID_VMC ||
++ client_id == SOC15_IH_CLIENTID_UTCL2;
+ }
+
+ static void event_interrupt_wq_v9(struct kfd_dev *dev,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4296-drm-amdgpu-Check-NULL-pointer-for-job-before-reset-j.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4296-drm-amdgpu-Check-NULL-pointer-for-job-before-reset-j.patch
new file mode 100644
index 00000000..320bdff0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4296-drm-amdgpu-Check-NULL-pointer-for-job-before-reset-j.patch
@@ -0,0 +1,32 @@
+From 51d6742573db2d69a58afcd5c6c08a7dee34f77e Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Mon, 23 Apr 2018 13:46:40 -0400
+Subject: [PATCH 4296/5725] drm/amdgpu: Check NULL pointer for job before reset
+ job's ring
+
+job could be NULL when amdgpu_device_gpu_recover is called
+
+Change-Id: Ie30e365d2616b8bb280c67c97d9d98dba5050472
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 8859f19..0ece1c3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3279,7 +3279,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ if (job && job->ring->idx != i)
+ continue;
+
+- drm_sched_hw_job_reset(&ring->sched, &job->base);
++ drm_sched_hw_job_reset(&ring->sched, job ? &job->base : NULL);
+
+ /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
+ amdgpu_fence_driver_force_completion(ring);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4297-drm-amd-amdgpu-vcn10-Add-callback-for-emit_reg_write.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4297-drm-amd-amdgpu-vcn10-Add-callback-for-emit_reg_write.patch
new file mode 100644
index 00000000..9b8444d4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4297-drm-amd-amdgpu-vcn10-Add-callback-for-emit_reg_write.patch
@@ -0,0 +1,31 @@
+From 2dfe71d3485ea5420f93e830a47e7db0adadcd95 Mon Sep 17 00:00:00 2001
+From: Tom St Denis <tom.stdenis@amd.com>
+Date: Tue, 1 May 2018 10:15:16 -0400
+Subject: [PATCH 4297/5725] drm/amd/amdgpu: vcn10 Add callback for
+ emit_reg_write_reg_wait
+
+The callback .emit_reg_write_reg_wait was missing for vcn decode
+which resulted in a kernel oops.
+
+Change-Id: I77b68bb82c690394cd6e598c7876373c08336eb4
+Signed-off-by: Tom St Denis <tom.stdenis@amd.com>
+Reviewed-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 48c469f..bc00178 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -1147,6 +1147,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
+ .end_use = amdgpu_vcn_ring_end_use,
+ .emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
+ .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
++ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ };
+
+ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4298-drm-amd-amdgpu-Add-some-documentation-to-the-debugfs.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4298-drm-amd-amdgpu-Add-some-documentation-to-the-debugfs.patch
new file mode 100644
index 00000000..20607678
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4298-drm-amd-amdgpu-Add-some-documentation-to-the-debugfs.patch
@@ -0,0 +1,317 @@
+From ade3dea05e69e32798da34883f5e1e5167a0e08a Mon Sep 17 00:00:00 2001
+From: Tom St Denis <tom.stdenis@amd.com>
+Date: Wed, 2 May 2018 13:01:36 -0400
+Subject: [PATCH 4298/5725] drm/amd/amdgpu: Add some documentation to the
+ debugfs entries
+
+Change-Id: Id5d6ce7c6bfdd6672689258fb7b582d67c263f60
+Signed-off-by: Tom St Denis <tom.stdenis@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 192 +++++++++++++++++++++++++++-
+ 1 file changed, 189 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+index 2c58922e..cc19d6a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+@@ -28,9 +28,14 @@
+ #include <linux/debugfs.h>
+ #include "amdgpu.h"
+
+-/*
+- * Debugfs
+- */
++/**
++ * amdgpu_debugfs_add_files - Add simple debugfs entries
++ *
++ * @adev: Device to attach debugfs entries to
++ * @files: Array of function callbacks that respond to reads
++ * @nfiles: Number of callbacks to register
++ *
++*/
+ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
+ const struct drm_info_list *files,
+ unsigned nfiles)
+@@ -64,6 +69,33 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
+
+ #if defined(CONFIG_DEBUG_FS)
+
++/**
++ * amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes
++ *
++ * @read: True if reading
++ * @f: open file handle
++ * @buf: User buffer to write/read to
++ * @size: Number of bytes to write/read
++ * @pos: Offset to seek to
++ *
++ * This debugfs entry has special meaning on the offset being sought.
++ * Various bits have different meanings:
++ *
++ * Bit 62: Indicates a GRBM bank switch is needed
++ * Bit 61: Indicates a SRBM bank switch is needed (implies bit 62 is
++ * zero)
++ * Bits 24..33: The SE or ME selector if needed
++ * Bits 34..43: The SH (or SA) or PIPE selector if needed
++ * Bits 44..53: The INSTANCE (or CU/WGP) or QUEUE selector if needed
++ *
++ * Bit 23: Indicates that the PM power gating lock should be held
++ * This is necessary to read registers that might be
++ * unreliable during a power gating transistion.
++ *
++ * The lower bits are the BYTE offset of the register to read. This
++ * allows reading multiple registers in a single call and having
++ * the returned size reflect that.
++ */
+ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
+ char __user *buf, size_t size, loff_t *pos)
+ {
+@@ -163,12 +195,18 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
+ return result;
+ }
+
++/**
++ * amdgpu_debugfs_regs_read - Callback for reading MMIO registers
++*/
+ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+ {
+ return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos);
+ }
+
++/**
++ * amdgpu_debugfs_regs_write - Callback for writing MMIO registers
++*/
+ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+ {
+@@ -176,6 +214,18 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
+ return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
+ }
+
++/**
++ * amdgpu_debugfs_regs_pcie_read - Read from a PCIE register
++ *
++ * @f: open file handle
++ * @buf: User buffer to store read data in
++ * @size: Number of bytes to read
++ * @pos: Offset to seek to
++ *
++ * The lower bits are the BYTE offset of the register to read. This
++ * allows reading multiple registers in a single call and having
++ * the returned size reflect that.
++*/
+ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+ {
+@@ -203,6 +253,18 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
+ return result;
+ }
+
++/**
++ * amdgpu_debugfs_regs_pcie_write - Write to a PCIE register
++ *
++ * @f: open file handle
++ * @buf: User buffer to write data from
++ * @size: Number of bytes to write
++ * @pos: Offset to seek to
++ *
++ * The lower bits are the BYTE offset of the register to write. This
++ * allows writing multiple registers in a single call and having
++ * the returned size reflect that.
++*/
+ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+ {
+@@ -231,6 +293,18 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
+ return result;
+ }
+
++/**
++ * amdgpu_debugfs_regs_didt_read - Read from a DIDT register
++ *
++ * @f: open file handle
++ * @buf: User buffer to store read data in
++ * @size: Number of bytes to read
++ * @pos: Offset to seek to
++ *
++ * The lower bits are the BYTE offset of the register to read. This
++ * allows reading multiple registers in a single call and having
++ * the returned size reflect that.
++*/
+ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+ {
+@@ -258,6 +332,18 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
+ return result;
+ }
+
++/**
++ * amdgpu_debugfs_regs_didt_write - Write to a DIDT register
++ *
++ * @f: open file handle
++ * @buf: User buffer to write data from
++ * @size: Number of bytes to write
++ * @pos: Offset to seek to
++ *
++ * The lower bits are the BYTE offset of the register to write. This
++ * allows writing multiple registers in a single call and having
++ * the returned size reflect that.
++*/
+ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+ {
+@@ -286,6 +372,18 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
+ return result;
+ }
+
++/**
++ * amdgpu_debugfs_regs_smc_read - Read from a SMC register
++ *
++ * @f: open file handle
++ * @buf: User buffer to store read data in
++ * @size: Number of bytes to read
++ * @pos: Offset to seek to
++ *
++ * The lower bits are the BYTE offset of the register to read. This
++ * allows reading multiple registers in a single call and having
++ * the returned size reflect that.
++*/
+ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+ {
+@@ -313,6 +411,18 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
+ return result;
+ }
+
++/**
++ * amdgpu_debugfs_regs_smc_write - Write to a SMC register
++ *
++ * @f: open file handle
++ * @buf: User buffer to write data from
++ * @size: Number of bytes to write
++ * @pos: Offset to seek to
++ *
++ * The lower bits are the BYTE offset of the register to write. This
++ * allows writing multiple registers in a single call and having
++ * the returned size reflect that.
++*/
+ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+ {
+@@ -341,6 +451,20 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
+ return result;
+ }
+
++/**
++ * amdgpu_debugfs_gca_config_read - Read from gfx config data
++ *
++ * @f: open file handle
++ * @buf: User buffer to store read data in
++ * @size: Number of bytes to read
++ * @pos: Offset to seek to
++ *
++ * This file is used to access configuration data in a somewhat
++ * stable fashion. The format is a series of DWORDs with the first
++ * indicating which revision it is. New content is appended to the
++ * end so that older software can still read the data.
++*/
++
+ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+ {
+@@ -417,6 +541,19 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
+ return result;
+ }
+
++/**
++ * amdgpu_debugfs_sensor_read - Read from the powerplay sensors
++ *
++ * @f: open file handle
++ * @buf: User buffer to store read data in
++ * @size: Number of bytes to read
++ * @pos: Offset to seek to
++ *
++ * The offset is treated as the BYTE address of one of the sensors
++ * enumerated in amd/include/kgd_pp_interface.h under the
++ * 'amd_pp_sensors' enumeration. For instance to read the UVD VCLK
++ * you would use the offset 3 * 4 = 12.
++*/
+ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+ {
+@@ -456,6 +593,27 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
+ return !r ? outsize : r;
+ }
+
++/** amdgpu_debugfs_wave_read - Read WAVE STATUS data
++ *
++ * @f: open file handle
++ * @buf: User buffer to store read data in
++ * @size: Number of bytes to read
++ * @pos: Offset to seek to
++ *
++ * The offset being sought changes which wave that the status data
++ * will be returned for. The bits are used as follows:
++ *
++ * Bits 0..6: Byte offset into data
++ * Bits 7..14: SE selector
++ * Bits 15..22: SH/SA selector
++ * Bits 23..30: CU/{WGP+SIMD} selector
++ * Bits 31..36: WAVE ID selector
++ * Bits 37..44: SIMD ID selector
++ *
++ * The returned data begins with one DWORD of version information
++ * Followed by WAVE STATUS registers relevant to the GFX IP version
++ * being used. See gfx_v8_0_read_wave_data() for an example output.
++*/
+ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+ {
+@@ -506,6 +664,28 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
+ return result;
+ }
+
++/** amdgpu_debugfs_gpr_read - Read wave gprs
++ *
++ * @f: open file handle
++ * @buf: User buffer to store read data in
++ * @size: Number of bytes to read
++ * @pos: Offset to seek to
++ *
++ * The offset being sought changes which wave that the status data
++ * will be returned for. The bits are used as follows:
++ *
++ * Bits 0..11: Byte offset into data
++ * Bits 12..19: SE selector
++ * Bits 20..27: SH/SA selector
++ * Bits 28..35: CU/{WGP+SIMD} selector
++ * Bits 36..43: WAVE ID selector
++ * Bits 37..44: SIMD ID selector
++ * Bits 52..59: Thread selector
++ * Bits 60..61: Bank selector (VGPR=0,SGPR=1)
++ *
++ * The return data comes from the SGPR or VGPR register bank for
++ * the selected operational unit.
++*/
+ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+ {
+@@ -636,6 +816,12 @@ static const char *debugfs_regs_names[] = {
+ "amdgpu_gpr",
+ };
+
++/**
++ * amdgpu_debugfs_regs_init - Initialize debugfs entries that provide
++ * register access.
++ *
++ * @adev: The device to attach the debugfs entries to
++*/
+ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
+ {
+ struct drm_minor *minor = adev->ddev->primary;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4299-drm-amdgpu-abstract-bo_base-init-function.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4299-drm-amdgpu-abstract-bo_base-init-function.patch
new file mode 100644
index 00000000..e731b739
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4299-drm-amdgpu-abstract-bo_base-init-function.patch
@@ -0,0 +1,148 @@
+From 7791169a4e436b4414797b2cc9ae40c5290c26ee Mon Sep 17 00:00:00 2001
+From: Chunming Zhou <david1.zhou@amd.com>
+Date: Tue, 24 Apr 2018 12:14:39 +0800
+Subject: [PATCH 4299/5725] drm/amdgpu: abstract bo_base init function
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Change-Id: I6eb1f509c83fc0c5f03563afd230bbcd9308a3d0
+Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 73 ++++++++++++++++++----------------
+ 1 file changed, 38 insertions(+), 35 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index edf2559..dd84ed4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -94,6 +94,36 @@ struct amdgpu_prt_cb {
+ struct dma_fence_cb cb;
+ };
+
++static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
++ struct amdgpu_vm *vm,
++ struct amdgpu_bo *bo)
++{
++ base->vm = vm;
++ base->bo = bo;
++ INIT_LIST_HEAD(&base->bo_list);
++ INIT_LIST_HEAD(&base->vm_status);
++
++ if (!bo)
++ return;
++ list_add_tail(&base->bo_list, &bo->va);
++
++ if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
++ return;
++
++ if (bo->preferred_domains &
++ amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
++ return;
++
++ /*
++ * we checked all the prerequisites, but it looks like this per vm bo
++ * is currently evicted. add the bo to the evicted list to make sure it
++ * is validated on next vm use to avoid fault.
++ * */
++ spin_lock(&vm->status_lock);
++ list_move_tail(&base->vm_status, &vm->evicted);
++ spin_unlock(&vm->status_lock);
++}
++
+ /**
+ * amdgpu_vm_level_shift - return the addr shift for each level
+ *
+@@ -447,11 +477,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
+ */
+ pt->parent = amdgpu_bo_ref(parent->base.bo);
+
+- entry->base.vm = vm;
+- entry->base.bo = pt;
+- list_add_tail(&entry->base.bo_list, &pt->va);
++ amdgpu_vm_bo_base_init(&entry->base, vm, pt);
+ spin_lock(&vm->status_lock);
+- list_add(&entry->base.vm_status, &vm->relocated);
++ list_move(&entry->base.vm_status, &vm->relocated);
+ spin_unlock(&vm->status_lock);
+ }
+
+@@ -1871,36 +1899,12 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
+ if (bo_va == NULL) {
+ return NULL;
+ }
+- bo_va->base.vm = vm;
+- bo_va->base.bo = bo;
+- INIT_LIST_HEAD(&bo_va->base.bo_list);
+- INIT_LIST_HEAD(&bo_va->base.vm_status);
++ amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
+
+ bo_va->ref_count = 1;
+ INIT_LIST_HEAD(&bo_va->valids);
+ INIT_LIST_HEAD(&bo_va->invalids);
+
+- if (!bo)
+- return bo_va;
+-
+- list_add_tail(&bo_va->base.bo_list, &bo->va);
+-
+- if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
+- return bo_va;
+-
+- if (bo->preferred_domains &
+- amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
+- return bo_va;
+-
+- /*
+- * We checked all the prerequisites, but it looks like this per VM BO
+- * is currently evicted. add the BO to the evicted list to make sure it
+- * is validated on next VM use to avoid fault.
+- * */
+- spin_lock(&vm->status_lock);
+- list_move_tail(&bo_va->base.vm_status, &vm->evicted);
+- spin_unlock(&vm->status_lock);
+-
+ return bo_va;
+ }
+
+@@ -2419,6 +2423,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int vm_context, unsigned int pasid)
+ {
+ struct amdgpu_bo_param bp;
++ struct amdgpu_bo *root;
+ const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
+ AMDGPU_VM_PTE_COUNT(adev) * 8);
+ unsigned ring_instance;
+@@ -2480,23 +2485,21 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ bp.flags = flags;
+ bp.type = ttm_bo_type_kernel;
+ bp.resv = NULL;
+- r = amdgpu_bo_create(adev, &bp, &vm->root.base.bo);
++ r = amdgpu_bo_create(adev, &bp, &root);
+ if (r)
+ goto error_free_sched_entity;
+
+- r = amdgpu_bo_reserve(vm->root.base.bo, true);
++ r = amdgpu_bo_reserve(root, true);
+ if (r)
+ goto error_free_root;
+
+- r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
++ r = amdgpu_vm_clear_bo(adev, vm, root,
+ adev->vm_manager.root_level,
+ vm->pte_support_ats);
+ if (r)
+ goto error_unreserve;
+
+- vm->root.base.vm = vm;
+- list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
+- list_add_tail(&vm->root.base.vm_status, &vm->evicted);
++ amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
+ amdgpu_bo_unreserve(vm->root.base.bo);
+
+ if (pasid) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4300-drm-amdgpu-Fix-KFD-doorbell-SG-BO-mapping.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4300-drm-amdgpu-Fix-KFD-doorbell-SG-BO-mapping.patch
new file mode 100644
index 00000000..7d0b79ca
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4300-drm-amdgpu-Fix-KFD-doorbell-SG-BO-mapping.patch
@@ -0,0 +1,34 @@
+From 15dca8b36276e246c15455cebbcdeef4c9ebcfb4 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Thu, 3 May 2018 17:37:56 -0400
+Subject: [PATCH 4300/5725] drm/amdgpu: Fix KFD doorbell SG BO mapping
+
+This type of BO was mistaken for an invalid userptr BO. Improve that
+check to test that it's actually a userptr BO so that SG BOs that
+are still in the CPU domain can be validated and mapped correctly.
+
+Bug: SWDEV-152552
+
+Change-Id: I563f86ffc9d0beabe065768a9195caef06577048
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index dfa909f..d43473e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -1468,7 +1468,8 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
+ * the queues are still stopped and we can leave mapping for
+ * the next restore worker
+ */
+- if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
++ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
++ bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
+ is_invalid_userptr = true;
+
+ if (check_if_add_bo_to_vm(avm, mem)) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4301-drm-amdkfd-Don-t-use-kmap_atomic.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4301-drm-amdkfd-Don-t-use-kmap_atomic.patch
new file mode 100644
index 00000000..00b9d4bb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4301-drm-amdkfd-Don-t-use-kmap_atomic.patch
@@ -0,0 +1,52 @@
+From 9eeff2bb210e18fce442e9ff62ab050bc7bbe5e2 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Fri, 4 May 2018 18:59:38 -0400
+Subject: [PATCH 4301/5725] drm/amdkfd: Don't use kmap_atomic
+
+kmap_atomic is an optimization that's only useful for CONFIG_HIGHMEM
+which isn't applicable on x86_64. It also requires a lot more care
+because it disabled page faults. This causes problems with
+copy_from_user in the atomic section unless pages are faulted in
+explicitly.
+
+Since KFD only supports 64-bit kernels, we don't need to handle the
+complication of HIGHMEM and kmap_atomic. Use plain kmap instead.
+
+Bug: SWDEV-138474
+
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+
+Change-Id: I6ecbd7c10fb8b589dc1ab8af8795ff3e6d416db1
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index bb38da1..01a253c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -2133,7 +2133,7 @@ static int kfd_copy_userptr_bos(struct cma_iter *si, struct cma_iter *di,
+
+ for (i = 0; i < nl; i++) {
+ unsigned int n;
+- void *kaddr = kmap_atomic(process_pages[i]);
++ void *kaddr = kmap(process_pages[i]);
+
+ if (cma_write) {
+ n = copy_from_user(kaddr+offset_in_page,
+@@ -2144,7 +2144,7 @@ static int kfd_copy_userptr_bos(struct cma_iter *si, struct cma_iter *di,
+ kaddr+offset_in_page,
+ copy_size);
+ }
+- kunmap_atomic(kaddr);
++ kunmap(kaddr);
+ if (n) {
+ ret = -EFAULT;
+ break;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4302-drm-amdkcl-fixed-can-t-find-kgd_kfd_interface.h-head.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4302-drm-amdkcl-fixed-can-t-find-kgd_kfd_interface.h-head.patch
new file mode 100644
index 00000000..2733d638
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4302-drm-amdkcl-fixed-can-t-find-kgd_kfd_interface.h-head.patch
@@ -0,0 +1,32 @@
+From def66e36a43f695e911b19bcca8798e9276cba17 Mon Sep 17 00:00:00 2001
+From: Kevin Wang <Kevin1.Wang@amd.com>
+Date: Wed, 9 May 2018 10:12:21 +0800
+Subject: [PATCH 4302/5725] drm/amdkcl: fixed can't find kgd_kfd_interface.h
+ header error
+
+Change-Id: Ic38c5e605ba98183f4efaf68e0523dfa8aa22d8c
+Signed-off-by: Kevin Wang <Kevin1.Wang@amd.com>
+Reviewed-by: Le Ma <Le.Ma@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/Makefile | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
+index 66f1921..4804f9c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/Makefile
++++ b/drivers/gpu/drm/amd/amdkfd/Makefile
+@@ -23,8 +23,9 @@
+ # Makefile for Heterogenous System Architecture support for AMD GPU devices
+ #
+
+-ccflags-y := -Idrivers/gpu/drm/amd/include/ \
+- -Idrivers/gpu/drm/amd/include/asic_reg
++FULL_AMD_PATH=$(src)/..
++ccflags-y := -I$(FULL_AMD_PATH)/include \
++ -I$(FULL_AMD_PATH)/include/asic_reg
+
+ amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
+ kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4303-drm-amdgpu-set-COMPUTE_PGM_RSRC1-for-SGPR-VGPR-clear.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4303-drm-amdgpu-set-COMPUTE_PGM_RSRC1-for-SGPR-VGPR-clear.patch
new file mode 100644
index 00000000..23226725
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4303-drm-amdgpu-set-COMPUTE_PGM_RSRC1-for-SGPR-VGPR-clear.patch
@@ -0,0 +1,61 @@
+From 5fa5d542021ba3f890da59d79c01043fc1b29068 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Nicolai=20H=C3=A4hnle?= <nicolai.haehnle@amd.com>
+Date: Thu, 12 Apr 2018 16:34:19 +0200
+Subject: [PATCH 4303/5725] drm/amdgpu: set COMPUTE_PGM_RSRC1 for SGPR/VGPR
+ clearing shaders
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Otherwise, the SQ may skip some of the register writes, or shader waves may
+be allocated where we don't expect them, so that as a result we don't actually
+reset all of the register SRAMs. This can lead to spurious ECC errors later on
+if a shader uses an uninitialized register.
+
+Signed-off-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 4c9ea8d..befc7a0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -1459,10 +1459,11 @@ static const u32 sgpr_init_compute_shader[] =
+ static const u32 vgpr_init_regs[] =
+ {
+ mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
+- mmCOMPUTE_RESOURCE_LIMITS, 0,
++ mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
+ mmCOMPUTE_NUM_THREAD_X, 256*4,
+ mmCOMPUTE_NUM_THREAD_Y, 1,
+ mmCOMPUTE_NUM_THREAD_Z, 1,
++ mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */
+ mmCOMPUTE_PGM_RSRC2, 20,
+ mmCOMPUTE_USER_DATA_0, 0xedcedc00,
+ mmCOMPUTE_USER_DATA_1, 0xedcedc01,
+@@ -1479,10 +1480,11 @@ static const u32 vgpr_init_regs[] =
+ static const u32 sgpr1_init_regs[] =
+ {
+ mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
+- mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
++ mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
+ mmCOMPUTE_NUM_THREAD_X, 256*5,
+ mmCOMPUTE_NUM_THREAD_Y, 1,
+ mmCOMPUTE_NUM_THREAD_Z, 1,
++ mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
+ mmCOMPUTE_PGM_RSRC2, 20,
+ mmCOMPUTE_USER_DATA_0, 0xedcedc00,
+ mmCOMPUTE_USER_DATA_1, 0xedcedc01,
+@@ -1503,6 +1505,7 @@ static const u32 sgpr2_init_regs[] =
+ mmCOMPUTE_NUM_THREAD_X, 256*5,
+ mmCOMPUTE_NUM_THREAD_Y, 1,
+ mmCOMPUTE_NUM_THREAD_Z, 1,
++ mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
+ mmCOMPUTE_PGM_RSRC2, 20,
+ mmCOMPUTE_USER_DATA_0, 0xedcedc00,
+ mmCOMPUTE_USER_DATA_1, 0xedcedc01,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4304-drm-admgpu-fix-mode_valid-s-return-type.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4304-drm-admgpu-fix-mode_valid-s-return-type.patch
new file mode 100644
index 00000000..da4a1a66
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4304-drm-admgpu-fix-mode_valid-s-return-type.patch
@@ -0,0 +1,103 @@
+From de7d417043414cb86f8cfd00247c76c9ca628c88 Mon Sep 17 00:00:00 2001
+From: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
+Date: Tue, 24 Apr 2018 15:14:18 +0200
+Subject: [PATCH 4304/5725] drm/admgpu: fix mode_valid's return type
+
+The method struct drm_connector_helper_funcs::mode_valid is defined
+as returning an 'enum drm_mode_status' but the driver implementation
+for this method uses an 'int' for it.
+
+Fix this by using 'enum drm_mode_status' in the driver too.
+
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c | 8 ++++----
+ drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 2 +-
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +-
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 2 +-
+ 4 files changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+index 820d66b..c0b2ab9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+@@ -694,7 +694,7 @@ static int amdgpu_connector_lvds_get_modes(struct drm_connector *connector)
+ return ret;
+ }
+
+-static int amdgpu_connector_lvds_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status amdgpu_connector_lvds_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+ {
+ struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
+@@ -842,7 +842,7 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector)
+ return ret;
+ }
+
+-static int amdgpu_connector_vga_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status amdgpu_connector_vga_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+ {
+ struct drm_device *dev = connector->dev;
+@@ -1163,7 +1163,7 @@ static void amdgpu_connector_dvi_force(struct drm_connector *connector)
+ amdgpu_connector->use_digital = true;
+ }
+
+-static int amdgpu_connector_dvi_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+ {
+ struct drm_device *dev = connector->dev;
+@@ -1435,7 +1435,7 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
+ return ret;
+ }
+
+-static int amdgpu_connector_dp_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status amdgpu_connector_dp_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+ {
+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+index bdcc7f8..6b8c746 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+@@ -330,7 +330,7 @@ static int dce_virtual_get_modes(struct drm_connector *connector)
+ return 0;
+ }
+
+-static int dce_virtual_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status dce_virtual_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+ {
+ return MODE_OK;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index ab3fd8a..448fd0b 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3009,7 +3009,7 @@ static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
+ create_eml_sink(aconnector);
+ }
+
+-int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
++enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+ {
+ int result = MODE_ERROR;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+index e66dabb..51c09a4 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+@@ -271,7 +271,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
+ struct dc_link *link,
+ int link_index);
+
+-int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
++enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+
+ void dm_restore_drm_connector_state(struct drm_device *dev,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4305-drm-amdgpu-add-VEGAM-ASIC-type.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4305-drm-amdgpu-add-VEGAM-ASIC-type.patch
new file mode 100644
index 00000000..e4a832ed
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4305-drm-amdgpu-add-VEGAM-ASIC-type.patch
@@ -0,0 +1,27 @@
+From f2a5b53782ab5e25e9a3f080f6fb39e2a22d64de Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Thu, 9 Nov 2017 13:18:24 -0500
+Subject: [PATCH 4305/5725] drm/amdgpu: add VEGAM ASIC type
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 0ece1c3..f2abcf1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -83,6 +83,7 @@ static const char *amdgpu_asic_name[] = {
+ "POLARIS10",
+ "POLARIS11",
+ "POLARIS12",
++ "VEGAM",
+ "VEGA10",
+ "VEGA12",
+ "RAVEN",
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4306-drm-amdgpu-bypass-GPU-info-firmware-load-for-VEGAM.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4306-drm-amdgpu-bypass-GPU-info-firmware-load-for-VEGAM.patch
new file mode 100644
index 00000000..66cb064d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4306-drm-amdgpu-bypass-GPU-info-firmware-load-for-VEGAM.patch
@@ -0,0 +1,45 @@
+From 25866d58978f3dae1b648bfff41d9a864215ed16 Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Thu, 9 Nov 2017 13:19:58 -0500
+Subject: [PATCH 4306/5725] drm/amdgpu: bypass GPU info firmware load for VEGAM
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 ++-
+ include/drm/amd_asic_type.h | 1 +
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index f2abcf1..7ad26d4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1417,9 +1417,10 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
+ case CHIP_TOPAZ:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+- case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
++ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
++ case CHIP_VEGAM:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ #ifdef CONFIG_DRM_AMDGPU_SI
+diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h
+index 6c731c5..695bde7 100644
+--- a/include/drm/amd_asic_type.h
++++ b/include/drm/amd_asic_type.h
+@@ -44,6 +44,7 @@ enum amd_asic_type {
+ CHIP_POLARIS10,
+ CHIP_POLARIS11,
+ CHIP_POLARIS12,
++ CHIP_VEGAM,
+ CHIP_VEGA10,
+ CHIP_VEGA12,
+ CHIP_RAVEN,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4307-drm-amdgpu-set-VEGAM-to-ASIC-family-and-ip-blocks.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4307-drm-amdgpu-set-VEGAM-to-ASIC-family-and-ip-blocks.patch
new file mode 100644
index 00000000..67862216
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4307-drm-amdgpu-set-VEGAM-to-ASIC-family-and-ip-blocks.patch
@@ -0,0 +1,31 @@
+From ffbfeb2d15f3148f2cb6c781a7e21b3ff744d178 Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Thu, 9 Nov 2017 13:22:54 -0500
+Subject: [PATCH 4307/5725] drm/amdgpu: set VEGAM to ASIC family and ip blocks
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 7ad26d4..f18252b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1526,9 +1526,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
+ case CHIP_TOPAZ:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+- case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
++ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
++ case CHIP_VEGAM:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4308-drm-amdgpu-specify-VEGAM-ucode-SMU-load-method.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4308-drm-amdgpu-specify-VEGAM-ucode-SMU-load-method.patch
new file mode 100644
index 00000000..5f4747cb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4308-drm-amdgpu-specify-VEGAM-ucode-SMU-load-method.patch
@@ -0,0 +1,27 @@
+From 3da4c7bede18f0eb5fe9d2664d9f6ffe6b72073c Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Thu, 9 Nov 2017 13:26:54 -0500
+Subject: [PATCH 4308/5725] drm/amdgpu: specify VEGAM ucode SMU load method
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+index 0c74c09..ee71c40 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+@@ -295,6 +295,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
++ case CHIP_VEGAM:
+ if (!load_type)
+ return AMDGPU_FW_LOAD_DIRECT;
+ else
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4309-drm-amdgpu-add-VEGAM-SMU-firmware-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4309-drm-amdgpu-add-VEGAM-SMU-firmware-support.patch
new file mode 100644
index 00000000..42dccb6a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4309-drm-amdgpu-add-VEGAM-SMU-firmware-support.patch
@@ -0,0 +1,42 @@
+From 194a989520c780af8b997cbbce45f9448c28a5bf Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Thu, 9 Nov 2017 13:24:47 -0500
+Subject: [PATCH 4309/5725] drm/amdgpu: add VEGAM SMU firmware support
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | 3 +++
+ drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | 1 +
+ 2 files changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+index a8a942c..5b3d3bf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+@@ -385,6 +385,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
+ case CHIP_POLARIS12:
+ strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+ break;
++ case CHIP_VEGAM:
++ strcpy(fw_name, "amdgpu/vegam_smc.bin");
++ break;
+ case CHIP_VEGA10:
+ if ((adev->pdev->device == 0x687f) &&
+ ((adev->pdev->revision == 0xc0) ||
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+index c28b60a..ee236df 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+@@ -41,6 +41,7 @@ MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
+ MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
++MODULE_FIRMWARE("amdgpu/vegam_smc.bin");
+ MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
+ MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
+ MODULE_FIRMWARE("amdgpu/vega12_smc.bin");
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4310-drm-amdgpu-virtual_dce-add-VEGAM-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4310-drm-amdgpu-virtual_dce-add-VEGAM-support.patch
new file mode 100644
index 00000000..bebad951
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4310-drm-amdgpu-virtual_dce-add-VEGAM-support.patch
@@ -0,0 +1,30 @@
+From a68ab3347dd2307e8b90ba072be1262fcc93da37 Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Fri, 3 Nov 2017 14:22:16 -0400
+Subject: [PATCH 4310/5725] drm/amdgpu/virtual_dce: add VEGAM support
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+index 6b8c746..8724edd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+@@ -463,8 +463,9 @@ static int dce_virtual_hw_init(void *handle)
+ break;
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+- case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
++ case CHIP_POLARIS11:
++ case CHIP_VEGAM:
+ dce_v11_0_disable_dce(adev);
+ break;
+ case CHIP_TOPAZ:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4311-drm-amdgpu-add-VEGAM-dc-support-check.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4311-drm-amdgpu-add-VEGAM-dc-support-check.patch
new file mode 100644
index 00000000..8b3ee796
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4311-drm-amdgpu-add-VEGAM-dc-support-check.patch
@@ -0,0 +1,31 @@
+From 0d9ed94ed2cd751bcf5d579b68b2677778a69f5f Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Wed, 8 Nov 2017 18:07:12 -0500
+Subject: [PATCH 4311/5725] drm/amdgpu: add VEGAM dc support check
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index f18252b..30e607e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2204,9 +2204,10 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
+ case CHIP_MULLINS:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+- case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
++ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
++ case CHIP_VEGAM:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_VEGA10:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4312-drm-amdgpu-skip-VEGAM-MC-firmware-load.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4312-drm-amdgpu-skip-VEGAM-MC-firmware-load.patch
new file mode 100644
index 00000000..abc73196
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4312-drm-amdgpu-skip-VEGAM-MC-firmware-load.patch
@@ -0,0 +1,29 @@
+From 8ad4b252545ed2be9a9768a9f9dbc54e20c2a833 Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Thu, 16 Nov 2017 13:15:12 -0500
+Subject: [PATCH 4312/5725] drm/amdgpu: skip VEGAM MC firmware load
+
+Directly loaded by VBIOS
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index 2829ae8..26f42fa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -231,6 +231,7 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
+ case CHIP_FIJI:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
++ case CHIP_VEGAM:
+ return 0;
+ default: BUG();
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4313-drm-amdgpu-add-VEGAM-GMC-golden-settings.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4313-drm-amdgpu-add-VEGAM-GMC-golden-settings.patch
new file mode 100644
index 00000000..1ec2c155
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4313-drm-amdgpu-add-VEGAM-GMC-golden-settings.patch
@@ -0,0 +1,27 @@
+From ef2da80861f7e4b349770838ed222342f24704f0 Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Wed, 11 Apr 2018 15:18:20 -0500
+Subject: [PATCH 4313/5725] drm/amdgpu: add VEGAM GMC golden settings
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index 26f42fa..dbda097 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -138,6 +138,7 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
+ break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
++ case CHIP_VEGAM:
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_polaris11_a11,
+ ARRAY_SIZE(golden_settings_polaris11_a11));
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4314-drm-amdgpu-initialize-VEGAM-GMC-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4314-drm-amdgpu-initialize-VEGAM-GMC-v2.patch
new file mode 100644
index 00000000..2f69c87c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4314-drm-amdgpu-initialize-VEGAM-GMC-v2.patch
@@ -0,0 +1,43 @@
+From ad3389bbfcb27a6840db8b5812892621e6319d51 Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Wed, 11 Apr 2018 15:20:35 -0500
+Subject: [PATCH 4314/5725] drm/amdgpu: initialize VEGAM GMC (v2)
+
+v2: use proper register rather than hardcoding.
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index dbda097..c696bad 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -573,9 +573,10 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
+ /* set the gart size */
+ if (amdgpu_gart_size == -1) {
+ switch (adev->asic_type) {
+- case CHIP_POLARIS11: /* all engines support GPUVM */
+ case CHIP_POLARIS10: /* all engines support GPUVM */
++ case CHIP_POLARIS11: /* all engines support GPUVM */
+ case CHIP_POLARIS12: /* all engines support GPUVM */
++ case CHIP_VEGAM: /* all engines support GPUVM */
+ default:
+ adev->gmc.gart_size = 256ULL << 20;
+ break;
+@@ -1097,7 +1098,8 @@ static int gmc_v8_0_sw_init(void *handle)
+ } else {
+ u32 tmp;
+
+- if (adev->asic_type == CHIP_FIJI)
++ if ((adev->asic_type == CHIP_FIJI) ||
++ (adev->asic_type == CHIP_VEGAM))
+ tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
+ else
+ tmp = RREG32(mmMC_SEQ_MISC0);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4315-drm-amdgpu-add-VEGAM-SDMA-firmware-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4315-drm-amdgpu-add-VEGAM-SDMA-firmware-support.patch
new file mode 100644
index 00000000..735a870b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4315-drm-amdgpu-add-VEGAM-SDMA-firmware-support.patch
@@ -0,0 +1,50 @@
+From aa3cf09b9870b3ebdfdc1a207ae77ad6ad72887e Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Thu, 9 Nov 2017 13:56:12 -0500
+Subject: [PATCH 4315/5725] drm/amdgpu: add VEGAM SDMA firmware support
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+index 5a4f3c8..331ce0a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+@@ -62,6 +62,8 @@ MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin");
+ MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin");
+ MODULE_FIRMWARE("amdgpu/polaris12_sdma.bin");
+ MODULE_FIRMWARE("amdgpu/polaris12_sdma1.bin");
++MODULE_FIRMWARE("amdgpu/vegam_sdma.bin");
++MODULE_FIRMWARE("amdgpu/vegam_sdma1.bin");
+
+
+ static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
+@@ -275,15 +277,18 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
+ case CHIP_FIJI:
+ chip_name = "fiji";
+ break;
+- case CHIP_POLARIS11:
+- chip_name = "polaris11";
+- break;
+ case CHIP_POLARIS10:
+ chip_name = "polaris10";
+ break;
++ case CHIP_POLARIS11:
++ chip_name = "polaris11";
++ break;
+ case CHIP_POLARIS12:
+ chip_name = "polaris12";
+ break;
++ case CHIP_VEGAM:
++ chip_name = "vegam";
++ break;
+ case CHIP_CARRIZO:
+ chip_name = "carrizo";
+ break;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4316-drm-amdgpu-add-VEGAM-SDMA-golden-settings.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4316-drm-amdgpu-add-VEGAM-SDMA-golden-settings.patch
new file mode 100644
index 00000000..2d2e2a6a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4316-drm-amdgpu-add-VEGAM-SDMA-golden-settings.patch
@@ -0,0 +1,27 @@
+From dedeaa91b9d49d4f92736f2976662d48a5310d83 Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Wed, 11 Apr 2018 15:22:20 -0500
+Subject: [PATCH 4316/5725] drm/amdgpu: add VEGAM SDMA golden settings
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+index 331ce0a..d082751 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+@@ -211,6 +211,7 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
+ break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
++ case CHIP_VEGAM:
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_polaris11_a11,
+ ARRAY_SIZE(golden_settings_polaris11_a11));
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4317-drm-amdgpu-add-VEGAM-GFX-firmware-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4317-drm-amdgpu-add-VEGAM-GFX-firmware-support.patch
new file mode 100644
index 00000000..3817df10
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4317-drm-amdgpu-add-VEGAM-GFX-firmware-support.patch
@@ -0,0 +1,96 @@
+From 558cd297fe8508b9dd336cc04de01db2a6122145 Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Fri, 10 Nov 2017 11:04:09 -0500
+Subject: [PATCH 4317/5725] drm/amdgpu: add VEGAM GFX firmware support
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 42 ++++++++++++++++++++++-------------
+ 1 file changed, 26 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index befc7a0..8e956a9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -125,18 +125,6 @@ MODULE_FIRMWARE("amdgpu/fiji_mec.bin");
+ MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
+ MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
+
+-MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
+-MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin");
+-MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
+-MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin");
+-MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
+-MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin");
+-MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
+-MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin");
+-MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
+-MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin");
+-MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
+-
+ MODULE_FIRMWARE("amdgpu/polaris10_ce.bin");
+ MODULE_FIRMWARE("amdgpu/polaris10_ce_2.bin");
+ MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin");
+@@ -149,6 +137,18 @@ MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
+ MODULE_FIRMWARE("amdgpu/polaris10_mec2_2.bin");
+ MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
+
++MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
++MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin");
++MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
++MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin");
++MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
++MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin");
++MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
++MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin");
++MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
++MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin");
++MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
++
+ MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
+ MODULE_FIRMWARE("amdgpu/polaris12_ce_2.bin");
+ MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
+@@ -161,6 +161,13 @@ MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
+ MODULE_FIRMWARE("amdgpu/polaris12_mec2_2.bin");
+ MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
+
++MODULE_FIRMWARE("amdgpu/vegam_ce.bin");
++MODULE_FIRMWARE("amdgpu/vegam_pfp.bin");
++MODULE_FIRMWARE("amdgpu/vegam_me.bin");
++MODULE_FIRMWARE("amdgpu/vegam_mec.bin");
++MODULE_FIRMWARE("amdgpu/vegam_mec2.bin");
++MODULE_FIRMWARE("amdgpu/vegam_rlc.bin");
++
+ static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
+ {
+ {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
+@@ -918,17 +925,20 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
+ case CHIP_FIJI:
+ chip_name = "fiji";
+ break;
+- case CHIP_POLARIS11:
+- chip_name = "polaris11";
++ case CHIP_STONEY:
++ chip_name = "stoney";
+ break;
+ case CHIP_POLARIS10:
+ chip_name = "polaris10";
+ break;
++ case CHIP_POLARIS11:
++ chip_name = "polaris11";
++ break;
+ case CHIP_POLARIS12:
+ chip_name = "polaris12";
+ break;
+- case CHIP_STONEY:
+- chip_name = "stoney";
++ case CHIP_VEGAM:
++ chip_name = "vegam";
+ break;
+ default:
+ BUG();
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4318-drm-amdgpu-add-VEGAM-GFX-golden-settings.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4318-drm-amdgpu-add-VEGAM-GFX-golden-settings.patch
new file mode 100644
index 00000000..01b7edac
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4318-drm-amdgpu-add-VEGAM-GFX-golden-settings.patch
@@ -0,0 +1,72 @@
+From fbede9fd7f43521e96ef6ed200b5e0d0129716bc Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Thu, 16 Nov 2017 13:41:03 -0500
+Subject: [PATCH 4318/5725] drm/amdgpu: add VEGAM GFX golden settings
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 39 +++++++++++++++++++++++++++++++++++
+ 1 file changed, 39 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 8e956a9..58826de 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -299,6 +299,37 @@ static const u32 tonga_mgcg_cgcg_init[] =
+ mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
+ };
+
++static const u32 golden_settings_vegam_a11[] =
++{
++ mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
++ mmCB_HW_CONTROL_2, 0x0f000000, 0x0d000000,
++ mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
++ mmDB_DEBUG2, 0xf00fffff, 0x00000400,
++ mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
++ mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
++ mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x3a00161a,
++ mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002e,
++ mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
++ mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
++ mmSQ_CONFIG, 0x07f80000, 0x01180000,
++ mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
++ mmTCC_CTRL, 0x00100000, 0xf31fff7f,
++ mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
++ mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
++ mmTCP_CHAN_STEER_LO, 0xffffffff, 0x32761054,
++ mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
++};
++
++static const u32 vegam_golden_common_all[] =
++{
++ mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
++ mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
++ mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
++ mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
++ mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
++ mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
++};
++
+ static const u32 golden_settings_polaris11_a11[] =
+ {
+ mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208,
+@@ -719,6 +750,14 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
+ tonga_golden_common_all,
+ ARRAY_SIZE(tonga_golden_common_all));
+ break;
++ case CHIP_VEGAM:
++ amdgpu_device_program_register_sequence(adev,
++ golden_settings_vegam_a11,
++ ARRAY_SIZE(golden_settings_vegam_a11));
++ amdgpu_device_program_register_sequence(adev,
++ vegam_golden_common_all,
++ ARRAY_SIZE(vegam_golden_common_all));
++ break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ amdgpu_device_program_register_sequence(adev,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4319-drm-amdgpu-initialize-VEGAM-GFX.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4319-drm-amdgpu-initialize-VEGAM-GFX.patch
new file mode 100644
index 00000000..57c7fb1f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4319-drm-amdgpu-initialize-VEGAM-GFX.patch
@@ -0,0 +1,106 @@
+From 28efb2050770458db8da2d6abe34d912b1d962f3 Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Thu, 16 Nov 2017 13:49:56 -0500
+Subject: [PATCH 4319/5725] drm/amdgpu: initialize VEGAM GFX
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 21 +++++++++++++++------
+ 1 file changed, 15 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 58826de..3fa37a4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -1819,6 +1819,7 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
+ gb_addr_config = POLARIS11_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_POLARIS10:
++ case CHIP_VEGAM:
+ ret = amdgpu_atombios_get_gfx_info(adev);
+ if (ret)
+ return ret;
+@@ -2006,12 +2007,13 @@ static int gfx_v8_0_sw_init(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ switch (adev->asic_type) {
+- case CHIP_FIJI:
+ case CHIP_TONGA:
++ case CHIP_CARRIZO:
++ case CHIP_FIJI:
++ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+- case CHIP_POLARIS10:
+- case CHIP_CARRIZO:
++ case CHIP_VEGAM:
+ adev->gfx.mec.num_mec = 2;
+ break;
+ case CHIP_TOPAZ:
+@@ -2372,6 +2374,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
+
+ break;
+ case CHIP_FIJI:
++ case CHIP_VEGAM:
+ modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+@@ -3553,6 +3556,7 @@ gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
+ {
+ switch (adev->asic_type) {
+ case CHIP_FIJI:
++ case CHIP_VEGAM:
+ *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
+ RB_XSEL2(1) | PKR_MAP(2) |
+ PKR_XSEL(1) | PKR_YSEL(1) |
+@@ -4120,7 +4124,8 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
+ gfx_v8_0_init_power_gating(adev);
+ WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
+ } else if ((adev->asic_type == CHIP_POLARIS11) ||
+- (adev->asic_type == CHIP_POLARIS12)) {
++ (adev->asic_type == CHIP_POLARIS12) ||
++ (adev->asic_type == CHIP_VEGAM)) {
+ gfx_v8_0_init_csb(adev);
+ gfx_v8_0_init_save_restore_list(adev);
+ gfx_v8_0_enable_save_restore_machine(adev);
+@@ -4195,7 +4200,8 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
+ WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
+ if (adev->asic_type == CHIP_POLARIS11 ||
+ adev->asic_type == CHIP_POLARIS10 ||
+- adev->asic_type == CHIP_POLARIS12) {
++ adev->asic_type == CHIP_POLARIS12 ||
++ adev->asic_type == CHIP_VEGAM) {
+ tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D);
+ tmp &= ~0x3;
+ WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp);
+@@ -5547,7 +5553,8 @@ static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade
+ bool enable)
+ {
+ if ((adev->asic_type == CHIP_POLARIS11) ||
+- (adev->asic_type == CHIP_POLARIS12))
++ (adev->asic_type == CHIP_POLARIS12) ||
++ (adev->asic_type == CHIP_VEGAM))
+ /* Send msg to SMU via Powerplay */
+ amdgpu_device_ip_set_powergating_state(adev,
+ AMD_IP_BLOCK_TYPE_SMC,
+@@ -5637,6 +5644,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
+ break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
++ case CHIP_VEGAM:
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
+ gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
+ else
+@@ -6203,6 +6211,7 @@ static int gfx_v8_0_set_clockgating_state(void *handle,
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
++ case CHIP_VEGAM:
+ gfx_v8_0_polaris_update_gfx_clock_gating(adev, state);
+ break;
+ default:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4320-drm-amdgpu-add-VEGAM-UVD-firmware-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4320-drm-amdgpu-add-VEGAM-UVD-firmware-support.patch
new file mode 100644
index 00000000..6a4d2836
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4320-drm-amdgpu-add-VEGAM-UVD-firmware-support.patch
@@ -0,0 +1,45 @@
+From 4eb66e6ec2df18b5619ca8e7ed78a49b88bfedc2 Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Fri, 10 Nov 2017 12:27:40 -0500
+Subject: [PATCH 4320/5725] drm/amdgpu: add VEGAM UVD firmware support
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index 6cf5ccf..ff8a62a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -66,6 +66,7 @@
+ #define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin"
+ #define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin"
+ #define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin"
++#define FIRMWARE_VEGAM "amdgpu/vegam_uvd.bin"
+
+ #define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin"
+ #define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin"
+@@ -109,6 +110,7 @@ MODULE_FIRMWARE(FIRMWARE_STONEY);
+ MODULE_FIRMWARE(FIRMWARE_POLARIS10);
+ MODULE_FIRMWARE(FIRMWARE_POLARIS11);
+ MODULE_FIRMWARE(FIRMWARE_POLARIS12);
++MODULE_FIRMWARE(FIRMWARE_VEGAM);
+
+ MODULE_FIRMWARE(FIRMWARE_VEGA10);
+ MODULE_FIRMWARE(FIRMWARE_VEGA12);
+@@ -172,6 +174,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ case CHIP_VEGA12:
+ fw_name = FIRMWARE_VEGA12;
+ break;
++ case CHIP_VEGAM:
++ fw_name = FIRMWARE_VEGAM;
++ break;
+ default:
+ return -EINVAL;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4321-drm-amdgpu-add-VEGAM-UVD-encode-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4321-drm-amdgpu-add-VEGAM-UVD-encode-support.patch
new file mode 100644
index 00000000..8a73b3be
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4321-drm-amdgpu-add-VEGAM-UVD-encode-support.patch
@@ -0,0 +1,28 @@
+From f71a255355fd9a1e47427f6dd27cd5f02ef9f1a6 Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Wed, 11 Apr 2018 15:24:01 -0500
+Subject: [PATCH 4321/5725] drm/amdgpu: add VEGAM UVD encode support
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+index 981e233..3c58adc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+@@ -62,7 +62,7 @@ static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
+ static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
+ {
+ return ((adev->asic_type >= CHIP_POLARIS10) &&
+- (adev->asic_type <= CHIP_POLARIS12) &&
++ (adev->asic_type <= CHIP_VEGAM) &&
+ (!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16));
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4322-drm-amdgpu-add-VEGAM-VCE-firmware-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4322-drm-amdgpu-add-VEGAM-VCE-firmware-support.patch
new file mode 100644
index 00000000..99bd40bd
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4322-drm-amdgpu-add-VEGAM-VCE-firmware-support.patch
@@ -0,0 +1,49 @@
+From 597424491a72bcd2648246385a2035da3cbf36b2 Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Wed, 11 Apr 2018 15:25:57 -0500
+Subject: [PATCH 4322/5725] drm/amdgpu: add VEGAM VCE firmware support
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+index d7261e0..e2186ed 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+@@ -51,8 +51,9 @@
+ #define FIRMWARE_FIJI "amdgpu/fiji_vce.bin"
+ #define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
+ #define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin"
+-#define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
+-#define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin"
++#define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
++#define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin"
++#define FIRMWARE_VEGAM "amdgpu/vegam_vce.bin"
+
+ #define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin"
+ #define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin"
+@@ -71,6 +72,7 @@ MODULE_FIRMWARE(FIRMWARE_STONEY);
+ MODULE_FIRMWARE(FIRMWARE_POLARIS10);
+ MODULE_FIRMWARE(FIRMWARE_POLARIS11);
+ MODULE_FIRMWARE(FIRMWARE_POLARIS12);
++MODULE_FIRMWARE(FIRMWARE_VEGAM);
+
+ MODULE_FIRMWARE(FIRMWARE_VEGA10);
+ MODULE_FIRMWARE(FIRMWARE_VEGA12);
+@@ -132,6 +134,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
+ case CHIP_POLARIS12:
+ fw_name = FIRMWARE_POLARIS12;
+ break;
++ case CHIP_VEGAM:
++ fw_name = FIRMWARE_VEGAM;
++ break;
+ case CHIP_VEGA10:
+ fw_name = FIRMWARE_VEGA10;
+ break;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4323-drm-amdgpu-add-VEGAM-to-VCE-harvest-config.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4323-drm-amdgpu-add-VEGAM-to-VCE-harvest-config.patch
new file mode 100644
index 00000000..5c767a8b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4323-drm-amdgpu-add-VEGAM-to-VCE-harvest-config.patch
@@ -0,0 +1,29 @@
+From e98cccc28cb81b8cff1e69e24a406cba2ea3db7b Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Fri, 10 Nov 2017 12:32:04 -0500
+Subject: [PATCH 4323/5725] drm/amdgpu: add VEGAM to VCE harvest config
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+index 67294c6..f67822f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+@@ -388,7 +388,8 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
+ default:
+ if ((adev->asic_type == CHIP_POLARIS10) ||
+ (adev->asic_type == CHIP_POLARIS11) ||
+- (adev->asic_type == CHIP_POLARIS12))
++ (adev->asic_type == CHIP_POLARIS12) ||
++ (adev->asic_type == CHIP_VEGAM))
+ return AMDGPU_VCE_HARVEST_VCE1;
+
+ return 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4324-drm-amdgpu-add-VEGAM-support-to-vi.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4324-drm-amdgpu-add-VEGAM-support-to-vi.patch
new file mode 100644
index 00000000..cdead119
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4324-drm-amdgpu-add-VEGAM-support-to-vi.patch
@@ -0,0 +1,82 @@
+From 6ae8be24f47b13f3777b3ad83de91fc6fd1638ae Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Wed, 11 Apr 2018 15:28:28 -0500
+Subject: [PATCH 4324/5725] drm/amdgpu: add VEGAM support to vi
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vi.c | 31 +++++++++++++++++++++++++++++--
+ 1 file changed, 29 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
+index eab5d1e..a791b04 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vi.c
++++ b/drivers/gpu/drm/amd/amdgpu/vi.c
+@@ -305,9 +305,10 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)
+ stoney_mgcg_cgcg_init,
+ ARRAY_SIZE(stoney_mgcg_cgcg_init));
+ break;
+- case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
++ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
++ case CHIP_VEGAM:
+ default:
+ break;
+ }
+@@ -1096,6 +1097,30 @@ static int vi_common_early_init(void *handle)
+ adev->pg_flags = 0;
+ adev->external_rev_id = adev->rev_id + 0x64;
+ break;
++ case CHIP_VEGAM:
++ adev->cg_flags = 0;
++ /*AMD_CG_SUPPORT_GFX_MGCG |
++ AMD_CG_SUPPORT_GFX_RLC_LS |
++ AMD_CG_SUPPORT_GFX_CP_LS |
++ AMD_CG_SUPPORT_GFX_CGCG |
++ AMD_CG_SUPPORT_GFX_CGLS |
++ AMD_CG_SUPPORT_GFX_3D_CGCG |
++ AMD_CG_SUPPORT_GFX_3D_CGLS |
++ AMD_CG_SUPPORT_SDMA_MGCG |
++ AMD_CG_SUPPORT_SDMA_LS |
++ AMD_CG_SUPPORT_BIF_MGCG |
++ AMD_CG_SUPPORT_BIF_LS |
++ AMD_CG_SUPPORT_HDP_MGCG |
++ AMD_CG_SUPPORT_HDP_LS |
++ AMD_CG_SUPPORT_ROM_MGCG |
++ AMD_CG_SUPPORT_MC_MGCG |
++ AMD_CG_SUPPORT_MC_LS |
++ AMD_CG_SUPPORT_DRM_LS |
++ AMD_CG_SUPPORT_UVD_MGCG |
++ AMD_CG_SUPPORT_VCE_MGCG;*/
++ adev->pg_flags = 0;
++ adev->external_rev_id = adev->rev_id + 0x6E;
++ break;
+ case CHIP_CARRIZO:
+ adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
+ AMD_CG_SUPPORT_GFX_MGCG |
+@@ -1491,6 +1516,7 @@ static int vi_common_set_clockgating_state(void *handle,
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
++ case CHIP_VEGAM:
+ vi_common_set_clockgating_state_by_smu(adev, state);
+ default:
+ break;
+@@ -1620,9 +1646,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
+ }
+ break;
+- case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
++ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
++ case CHIP_VEGAM:
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4325-drm-amdgpu-add-VEGAM-pci-ids.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4325-drm-amdgpu-add-VEGAM-pci-ids.patch
new file mode 100644
index 00000000..061aad4c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4325-drm-amdgpu-add-VEGAM-pci-ids.patch
@@ -0,0 +1,29 @@
+From 901ed47135c622519491aef610c7367fccaa74d9 Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Thu, 9 Nov 2017 13:25:31 -0500
+Subject: [PATCH 4325/5725] drm/amdgpu: add VEGAM pci ids
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 677e62c..92a8967 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -545,6 +545,9 @@ static const struct pci_device_id pciidlist[] = {
+ {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ {0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
++ /* VEGAM */
++ {0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
++ {0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
+ /* Vega 10 */
+ {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4326-drm-amd-display-Implement-VEGAM-device-IDs-in-DC.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4326-drm-amd-display-Implement-VEGAM-device-IDs-in-DC.patch
new file mode 100644
index 00000000..ecec65c8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4326-drm-amd-display-Implement-VEGAM-device-IDs-in-DC.patch
@@ -0,0 +1,240 @@
+From fad0868d548991bf4e56db2a62a69e5975e89926 Mon Sep 17 00:00:00 2001
+From: "Jerry (Fangzhi) Zuo" <Jerry.Zuo@amd.com>
+Date: Wed, 11 Apr 2018 15:39:35 -0500
+Subject: [PATCH 4326/5725] drm/amd/display: Implement VEGAM device IDs in DC
+
+Implement device IDs for VEGAM
+
+Signed-off-by: Jerry (Fangzhi) Zuo <Jerry.Zuo@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/display/Kconfig | 7 +++++++
+ drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c | 3 +++
+ drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c | 3 +++
+ drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c | 9 +++++++++
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 7 +++++++
+ drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c | 6 ++++++
+ drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c | 3 +++
+ drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c | 3 +++
+ drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c | 3 +++
+ drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h | 3 +++
+ drivers/gpu/drm/amd/display/include/dal_asic_id.h | 7 +++++++
+ drivers/gpu/drm/amd/display/include/dal_types.h | 3 +++
+ 12 files changed, 57 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
+index c3d49f8..e5b309f 100644
+--- a/drivers/gpu/drm/amd/display/Kconfig
++++ b/drivers/gpu/drm/amd/display/Kconfig
+@@ -34,4 +34,11 @@ config DEBUG_KERNEL_DC
+ if you want to hit
+ kdgb_break in assert.
+
+++config DRM_AMD_DC_VEGAM
++ bool "VEGAM support"
++ depends on DRM_AMD_DC
++ help
++ Choose this option if you want to have
++ VEGAM support for display engine
++
+ endmenu
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
+index 2979358..be066c4 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
+@@ -51,6 +51,9 @@ bool dal_bios_parser_init_cmd_tbl_helper(
+ return true;
+
+ case DCE_VERSION_11_2:
++#if defined(CONFIG_DRM_AMD_DC_VEGAM)
++ case DCE_VERSION_11_22:
++#endif
+ *h = dal_cmd_tbl_helper_dce112_get_table();
+ return true;
+
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+index 9a4d30d..9b9e069 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+@@ -52,6 +52,9 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
+ return true;
+
+ case DCE_VERSION_11_2:
++#if defined(CONFIG_DRM_AMD_DC_VEGAM)
++ case DCE_VERSION_11_22:
++#endif
+ *h = dal_cmd_tbl_helper_dce112_get_table2();
+ return true;
+ #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+index 56f46a0..4ee3c26 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+@@ -59,6 +59,10 @@ static enum bw_calcs_version bw_calcs_version_from_asic_id(struct hw_asic_id asi
+ return BW_CALCS_VERSION_POLARIS10;
+ if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev))
+ return BW_CALCS_VERSION_POLARIS11;
++#if defined(CONFIG_DRM_AMD_DC_VEGAM)
++ if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev))
++ return BW_CALCS_VERSION_VEGAM;
++#endif
+ return BW_CALCS_VERSION_INVALID;
+
+ case FAMILY_AI:
+@@ -2147,6 +2151,11 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
+ dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/
+ break;
+ case BW_CALCS_VERSION_POLARIS10:
++#if defined(CONFIG_DRM_AMD_DC_VEGAM)
++ /* TODO: Treat VEGAM the same as P10 for now
++ * Need to tune the para for VEGAM if needed */
++ case BW_CALCS_VERSION_VEGAM:
++#endif
+ vbios.memory_type = bw_def_gddr5;
+ vbios.dram_channel_width_in_bits = 32;
+ vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 7f79258..ad41b64 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -79,6 +79,10 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
+ ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) {
+ dc_version = DCE_VERSION_11_2;
+ }
++#if defined(CONFIG_DRM_AMD_DC_VEGAM)
++ if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev))
++ dc_version = DCE_VERSION_11_22;
++#endif
+ break;
+ case FAMILY_AI:
+ dc_version = DCE_VERSION_12_0;
+@@ -125,6 +129,9 @@ struct resource_pool *dc_create_resource_pool(
+ num_virtual_links, dc, asic_id);
+ break;
+ case DCE_VERSION_11_2:
++#if defined(CONFIG_DRM_AMD_DC_VEGAM)
++ case DCE_VERSION_11_22:
++#endif
+ res_pool = dce112_create_resource_pool(
+ num_virtual_links, dc);
+ break;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+index 67dad7f..223db98 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+@@ -590,6 +590,9 @@ static uint32_t dce110_get_pix_clk_dividers(
+ pll_settings, pix_clk_params);
+ break;
+ case DCE_VERSION_11_2:
++#if defined(CONFIG_DRM_AMD_DC_VEGAM)
++ case DCE_VERSION_11_22:
++#endif
+ case DCE_VERSION_12_0:
+ #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case DCN_VERSION_1_0:
+@@ -979,6 +982,9 @@ static bool dce110_program_pix_clk(
+
+ break;
+ case DCE_VERSION_11_2:
++#if defined(CONFIG_DRM_AMD_DC_VEGAM)
++ case DCE_VERSION_11_22:
++#endif
+ case DCE_VERSION_12_0:
+ #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case DCN_VERSION_1_0:
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+index 87b580f..61fe484 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
++++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+@@ -75,6 +75,9 @@ bool dal_hw_factory_init(
+ return true;
+ case DCE_VERSION_11_0:
+ case DCE_VERSION_11_2:
++#if defined(CONFIG_DRM_AMD_DC_VEGAM)
++ case DCE_VERSION_11_22:
++#endif
+ dal_hw_factory_dce110_init(factory);
+ return true;
+ case DCE_VERSION_12_0:
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+index 0ae8ace..910ae2b7 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
++++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+@@ -72,6 +72,9 @@ bool dal_hw_translate_init(
+ case DCE_VERSION_10_0:
+ case DCE_VERSION_11_0:
+ case DCE_VERSION_11_2:
++#if defined(CONFIG_DRM_AMD_DC_VEGAM)
++ case DCE_VERSION_11_22:
++#endif
+ dal_hw_translate_dce110_init(translate);
+ return true;
+ case DCE_VERSION_12_0:
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
+index 5cbf662..c3d7c32 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
+@@ -83,6 +83,9 @@ struct i2caux *dal_i2caux_create(
+ case DCE_VERSION_8_3:
+ return dal_i2caux_dce80_create(ctx);
+ case DCE_VERSION_11_2:
++#if defined(CONFIG_DRM_AMD_DC_VEGAM)
++ case DCE_VERSION_11_22:
++#endif
+ return dal_i2caux_dce112_create(ctx);
+ case DCE_VERSION_11_0:
+ return dal_i2caux_dce110_create(ctx);
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
+index 0bd87f2..933ea7a 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
+@@ -43,6 +43,9 @@ enum bw_calcs_version {
+ BW_CALCS_VERSION_POLARIS10,
+ BW_CALCS_VERSION_POLARIS11,
+ BW_CALCS_VERSION_POLARIS12,
++#if defined(CONFIG_DRM_AMD_DC_VEGAM)
++ BW_CALCS_VERSION_VEGAM,
++#endif
+ BW_CALCS_VERSION_STONEY,
+ BW_CALCS_VERSION_VEGA10
+ };
+diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+index 9831cb5..3e8e535 100644
+--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
++++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+@@ -98,7 +98,14 @@
+ (eChipRev < VI_POLARIS11_M_A0))
+ #define ASIC_REV_IS_POLARIS11_M(eChipRev) ((eChipRev >= VI_POLARIS11_M_A0) && \
+ (eChipRev < VI_POLARIS12_V_A0))
++#if defined(CONFIG_DRM_AMD_DC_VEGAM)
++#define VI_VEGAM_A0 110
++#define ASIC_REV_IS_POLARIS12_V(eChipRev) ((eChipRev >= VI_POLARIS12_V_A0) && \
++ (eChipRev < VI_VEGAM_A0))
++#define ASIC_REV_IS_VEGAM(eChipRev) (eChipRev >= VI_VEGAM_A0)
++#else
+ #define ASIC_REV_IS_POLARIS12_V(eChipRev) (eChipRev >= VI_POLARIS12_V_A0)
++#endif
+
+ /* DCE11 */
+ #define CZ_CARRIZO_A0 0x01
+diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
+index fa54396..5b1f8ce 100644
+--- a/drivers/gpu/drm/amd/display/include/dal_types.h
++++ b/drivers/gpu/drm/amd/display/include/dal_types.h
+@@ -40,6 +40,9 @@ enum dce_version {
+ DCE_VERSION_10_0,
+ DCE_VERSION_11_0,
+ DCE_VERSION_11_2,
++#if defined(CONFIG_DRM_AMD_DC_VEGAM)
++ DCE_VERSION_11_22,
++#endif
+ DCE_VERSION_12_0,
+ DCE_VERSION_MAX,
+ DCN_VERSION_1_0,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4327-drm-amd-display-Implement-VEGAM-device-IDs-in-DM.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4327-drm-amd-display-Implement-VEGAM-device-IDs-in-DM.patch
new file mode 100644
index 00000000..aa290658
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4327-drm-amd-display-Implement-VEGAM-device-IDs-in-DM.patch
@@ -0,0 +1,44 @@
+From 036bda0e07df150fb6c3bfc1b2bd20545f8b5d5b Mon Sep 17 00:00:00 2001
+From: "Jerry (Fangzhi) Zuo" <Jerry.Zuo@amd.com>
+Date: Thu, 9 Nov 2017 11:51:13 -0500
+Subject: [PATCH 4327/5725] drm/amd/display: Implement VEGAM device IDs in DM
+
+Add CHIP_VEGAM
+
+Signed-off-by: Jerry (Fangzhi) Zuo <Jerry.Zuo@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 448fd0b..bd05986 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1518,6 +1518,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
++#if defined(CONFIG_DRM_AMD_DC_VEGAM)
++ case CHIP_VEGAM:
++#endif
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ if (dce110_register_irq_handlers(dm->adev)) {
+@@ -1762,6 +1765,9 @@ static int dm_early_init(void *handle)
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+ case CHIP_POLARIS10:
++#if defined(CONFIG_DRM_AMD_DC_VEGAM)
++ case CHIP_VEGAM:
++#endif
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4328-drm-amdgpu-Add-VEGAM-support-to-the-legacy-DCE-11-mo.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4328-drm-amdgpu-Add-VEGAM-support-to-the-legacy-DCE-11-mo.patch
new file mode 100644
index 00000000..d6a685bf
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4328-drm-amdgpu-Add-VEGAM-support-to-the-legacy-DCE-11-mo.patch
@@ -0,0 +1,82 @@
+From b05aa2ebe0482b6f231e951045ab8c84eec14094 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 19 Apr 2018 16:38:46 -0500
+Subject: [PATCH 4328/5725] drm/amdgpu: Add VEGAM support to the legacy DCE 11
+ module
+
+DC is preferred.
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+index f354281..cfb2ddb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+@@ -173,6 +173,7 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
+ ARRAY_SIZE(polaris11_golden_settings_a11));
+ break;
+ case CHIP_POLARIS10:
++ case CHIP_VEGAM:
+ amdgpu_device_program_register_sequence(adev,
+ polaris10_golden_settings_a11,
+ ARRAY_SIZE(polaris10_golden_settings_a11));
+@@ -473,6 +474,7 @@ static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
+ num_crtc = 2;
+ break;
+ case CHIP_POLARIS10:
++ case CHIP_VEGAM:
+ num_crtc = 6;
+ break;
+ case CHIP_POLARIS11:
+@@ -1445,6 +1447,7 @@ static int dce_v11_0_audio_init(struct amdgpu_device *adev)
+ adev->mode_info.audio.num_pins = 7;
+ break;
+ case CHIP_POLARIS10:
++ case CHIP_VEGAM:
+ adev->mode_info.audio.num_pins = 8;
+ break;
+ case CHIP_POLARIS11:
+@@ -2253,7 +2256,8 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
+
+ if ((adev->asic_type == CHIP_POLARIS10) ||
+ (adev->asic_type == CHIP_POLARIS11) ||
+- (adev->asic_type == CHIP_POLARIS12)) {
++ (adev->asic_type == CHIP_POLARIS12) ||
++ (adev->asic_type == CHIP_VEGAM)) {
+ struct amdgpu_encoder *amdgpu_encoder =
+ to_amdgpu_encoder(amdgpu_crtc->encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+@@ -2673,7 +2677,8 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
+
+ if ((adev->asic_type == CHIP_POLARIS10) ||
+ (adev->asic_type == CHIP_POLARIS11) ||
+- (adev->asic_type == CHIP_POLARIS12)) {
++ (adev->asic_type == CHIP_POLARIS12) ||
++ (adev->asic_type == CHIP_VEGAM)) {
+ struct amdgpu_encoder *amdgpu_encoder =
+ to_amdgpu_encoder(amdgpu_crtc->encoder);
+ int encoder_mode =
+@@ -2831,6 +2836,7 @@ static int dce_v11_0_early_init(void *handle)
+ adev->mode_info.num_dig = 9;
+ break;
+ case CHIP_POLARIS10:
++ case CHIP_VEGAM:
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+@@ -2950,7 +2956,8 @@ static int dce_v11_0_hw_init(void *handle)
+ amdgpu_atombios_encoder_init_dig(adev);
+ if ((adev->asic_type == CHIP_POLARIS10) ||
+ (adev->asic_type == CHIP_POLARIS11) ||
+- (adev->asic_type == CHIP_POLARIS12)) {
++ (adev->asic_type == CHIP_POLARIS12) ||
++ (adev->asic_type == CHIP_VEGAM)) {
+ amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
+ DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
+ amdgpu_atombios_crtc_set_dce_clock(adev, 0,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4329-drm-amd-display-Use-HBR2-if-eDP-monitor-it-doesn-t-a.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4329-drm-amd-display-Use-HBR2-if-eDP-monitor-it-doesn-t-a.patch
new file mode 100644
index 00000000..38f2f9e3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4329-drm-amd-display-Use-HBR2-if-eDP-monitor-it-doesn-t-a.patch
@@ -0,0 +1,39 @@
+From 45c687c5fafe29d1b3258e28c807826d1617ad17 Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Tue, 21 Nov 2017 13:34:48 -0500
+Subject: [PATCH 4329/5725] drm/amd/display: Use HBR2 if eDP monitor it doesn't
+ advertise link rate
+
+Some eDP displays use the extra link rate table to advertise link rate
+support. If they do that they don't need to provide link rate through
+the usual registers. Since we don't currently have support for the extra
+link rate table default to HBR2 for the display in this.
+
+Note that this is a HACK. Ultimately we need to teach DC to use the
+extra link rate table.
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index a6dcd42..7f0fdf7 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -2396,6 +2396,10 @@ bool detect_dp_sink_caps(struct dc_link *link)
+ void detect_edp_sink_caps(struct dc_link *link)
+ {
+ retrieve_link_cap(link);
++
++ if (link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)
++ link->reported_link_cap.link_rate = LINK_RATE_HIGH2;
++
+ link->verified_link_cap = link->reported_link_cap;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4330-drm-amd-powerplay-add-smu75-header-files.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4330-drm-amd-powerplay-add-smu75-header-files.patch
new file mode 100644
index 00000000..9fe6946a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4330-drm-amd-powerplay-add-smu75-header-files.patch
@@ -0,0 +1,1676 @@
+From 27db33307a4082558297f1d5ffc5ddfe4b866778 Mon Sep 17 00:00:00 2001
+From: Eric Huang <JinHuiEric.Huang@amd.com>
+Date: Thu, 9 Nov 2017 16:29:28 -0500
+Subject: [PATCH 4330/5725] drm/amd/powerplay: add smu75 header files
+
+Signed-off-by: Eric Huang <JinHuiEric.Huang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/inc/smu75.h | 760 ++++++++++++++++++
+ drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h | 886 +++++++++++++++++++++
+ 2 files changed, 1646 insertions(+)
+ create mode 100644 drivers/gpu/drm/amd/powerplay/inc/smu75.h
+ create mode 100644 drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h
+
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu75.h b/drivers/gpu/drm/amd/powerplay/inc/smu75.h
+new file mode 100644
+index 0000000..7715230
+--- /dev/null
++++ b/drivers/gpu/drm/amd/powerplay/inc/smu75.h
+@@ -0,0 +1,760 @@
++/*
++ * Copyright 2017 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++#ifndef SMU75_H
++#define SMU75_H
++
++#pragma pack(push, 1)
++
++typedef struct {
++ uint32_t high;
++ uint32_t low;
++} data_64_t;
++
++typedef struct {
++ data_64_t high;
++ data_64_t low;
++} data_128_t;
++
++#define SMU__DGPU_ONLY
++
++#define SMU__NUM_SCLK_DPM_STATE 8
++#define SMU__NUM_MCLK_DPM_LEVELS 4
++#define SMU__NUM_LCLK_DPM_LEVELS 8
++#define SMU__NUM_PCIE_DPM_LEVELS 8
++
++#define SMU7_CONTEXT_ID_SMC 1
++#define SMU7_CONTEXT_ID_VBIOS 2
++
++#define SMU75_MAX_LEVELS_VDDC 16
++#define SMU75_MAX_LEVELS_VDDGFX 16
++#define SMU75_MAX_LEVELS_VDDCI 8
++#define SMU75_MAX_LEVELS_MVDD 4
++
++#define SMU_MAX_SMIO_LEVELS 4
++
++#define SMU75_MAX_LEVELS_GRAPHICS SMU__NUM_SCLK_DPM_STATE
++#define SMU75_MAX_LEVELS_MEMORY SMU__NUM_MCLK_DPM_LEVELS
++#define SMU75_MAX_LEVELS_GIO SMU__NUM_LCLK_DPM_LEVELS
++#define SMU75_MAX_LEVELS_LINK SMU__NUM_PCIE_DPM_LEVELS
++#define SMU75_MAX_LEVELS_UVD 8
++#define SMU75_MAX_LEVELS_VCE 8
++#define SMU75_MAX_LEVELS_ACP 8
++#define SMU75_MAX_LEVELS_SAMU 8
++#define SMU75_MAX_ENTRIES_SMIO 32
++
++#define DPM_NO_LIMIT 0
++#define DPM_NO_UP 1
++#define DPM_GO_DOWN 2
++#define DPM_GO_UP 3
++
++#define SMU7_FIRST_DPM_GRAPHICS_LEVEL 0
++#define SMU7_FIRST_DPM_MEMORY_LEVEL 0
++
++#define GPIO_CLAMP_MODE_VRHOT 1
++#define GPIO_CLAMP_MODE_THERM 2
++#define GPIO_CLAMP_MODE_DC 4
++
++#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0
++#define SCRATCH_B_TARG_PCIE_INDEX_MASK (0x7<<SCRATCH_B_TARG_PCIE_INDEX_SHIFT)
++#define SCRATCH_B_CURR_PCIE_INDEX_SHIFT 3
++#define SCRATCH_B_CURR_PCIE_INDEX_MASK (0x7<<SCRATCH_B_CURR_PCIE_INDEX_SHIFT)
++#define SCRATCH_B_TARG_UVD_INDEX_SHIFT 6
++#define SCRATCH_B_TARG_UVD_INDEX_MASK (0x7<<SCRATCH_B_TARG_UVD_INDEX_SHIFT)
++#define SCRATCH_B_CURR_UVD_INDEX_SHIFT 9
++#define SCRATCH_B_CURR_UVD_INDEX_MASK (0x7<<SCRATCH_B_CURR_UVD_INDEX_SHIFT)
++#define SCRATCH_B_TARG_VCE_INDEX_SHIFT 12
++#define SCRATCH_B_TARG_VCE_INDEX_MASK (0x7<<SCRATCH_B_TARG_VCE_INDEX_SHIFT)
++#define SCRATCH_B_CURR_VCE_INDEX_SHIFT 15
++#define SCRATCH_B_CURR_VCE_INDEX_MASK (0x7<<SCRATCH_B_CURR_VCE_INDEX_SHIFT)
++#define SCRATCH_B_TARG_ACP_INDEX_SHIFT 18
++#define SCRATCH_B_TARG_ACP_INDEX_MASK (0x7<<SCRATCH_B_TARG_ACP_INDEX_SHIFT)
++#define SCRATCH_B_CURR_ACP_INDEX_SHIFT 21
++#define SCRATCH_B_CURR_ACP_INDEX_MASK (0x7<<SCRATCH_B_CURR_ACP_INDEX_SHIFT)
++#define SCRATCH_B_TARG_SAMU_INDEX_SHIFT 24
++#define SCRATCH_B_TARG_SAMU_INDEX_MASK (0x7<<SCRATCH_B_TARG_SAMU_INDEX_SHIFT)
++#define SCRATCH_B_CURR_SAMU_INDEX_SHIFT 27
++#define SCRATCH_B_CURR_SAMU_INDEX_MASK (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT)
++
++/* Virtualization Defines */
++#define CG_XDMA_MASK 0x1
++#define CG_XDMA_SHIFT 0
++#define CG_UVD_MASK 0x2
++#define CG_UVD_SHIFT 1
++#define CG_VCE_MASK 0x4
++#define CG_VCE_SHIFT 2
++#define CG_SAMU_MASK 0x8
++#define CG_SAMU_SHIFT 3
++#define CG_GFX_MASK 0x10
++#define CG_GFX_SHIFT 4
++#define CG_SDMA_MASK 0x20
++#define CG_SDMA_SHIFT 5
++#define CG_HDP_MASK 0x40
++#define CG_HDP_SHIFT 6
++#define CG_MC_MASK 0x80
++#define CG_MC_SHIFT 7
++#define CG_DRM_MASK 0x100
++#define CG_DRM_SHIFT 8
++#define CG_ROM_MASK 0x200
++#define CG_ROM_SHIFT 9
++#define CG_BIF_MASK 0x400
++#define CG_BIF_SHIFT 10
++
++#if defined SMU__DGPU_ONLY
++#define SMU75_DTE_ITERATIONS 5
++#define SMU75_DTE_SOURCES 3
++#define SMU75_DTE_SINKS 1
++#define SMU75_NUM_CPU_TES 0
++#define SMU75_NUM_GPU_TES 1
++#define SMU75_NUM_NON_TES 2
++#define SMU75_DTE_FAN_SCALAR_MIN 0x100
++#define SMU75_DTE_FAN_SCALAR_MAX 0x166
++#define SMU75_DTE_FAN_TEMP_MAX 93
++#define SMU75_DTE_FAN_TEMP_MIN 83
++#endif
++#define SMU75_THERMAL_INPUT_LOOP_COUNT 2
++#define SMU75_THERMAL_CLAMP_MODE_COUNT 2
++
++#define EXP_M1_1 93
++#define EXP_M2_1 195759
++#define EXP_B_1 111176531
++
++#define EXP_M1_2 67
++#define EXP_M2_2 153720
++#define EXP_B_2 94415767
++
++#define EXP_M1_3 48
++#define EXP_M2_3 119796
++#define EXP_B_3 79195279
++
++#define EXP_M1_4 550
++#define EXP_M2_4 1484190
++#define EXP_B_4 1051432828
++
++#define EXP_M1_5 394
++#define EXP_M2_5 1143049
++#define EXP_B_5 864288432
++
++struct SMU7_HystController_Data {
++ uint16_t waterfall_up;
++ uint16_t waterfall_down;
++ uint16_t waterfall_limit;
++ uint16_t release_cnt;
++ uint16_t release_limit;
++ uint16_t spare;
++};
++
++typedef struct SMU7_HystController_Data SMU7_HystController_Data;
++
++struct SMU75_PIDController {
++ uint32_t Ki;
++ int32_t LFWindupUpperLim;
++ int32_t LFWindupLowerLim;
++ uint32_t StatePrecision;
++ uint32_t LfPrecision;
++ uint32_t LfOffset;
++ uint32_t MaxState;
++ uint32_t MaxLfFraction;
++ uint32_t StateShift;
++};
++
++typedef struct SMU75_PIDController SMU75_PIDController;
++
++struct SMU7_LocalDpmScoreboard {
++ uint32_t PercentageBusy;
++
++ int32_t PIDError;
++ int32_t PIDIntegral;
++ int32_t PIDOutput;
++
++ uint32_t SigmaDeltaAccum;
++ uint32_t SigmaDeltaOutput;
++ uint32_t SigmaDeltaLevel;
++
++ uint32_t UtilizationSetpoint;
++
++ uint8_t TdpClampMode;
++ uint8_t TdcClampMode;
++ uint8_t ThermClampMode;
++ uint8_t VoltageBusy;
++
++ int8_t CurrLevel;
++ int8_t TargLevel;
++ uint8_t LevelChangeInProgress;
++ uint8_t UpHyst;
++
++ uint8_t DownHyst;
++ uint8_t VoltageDownHyst;
++ uint8_t DpmEnable;
++ uint8_t DpmRunning;
++
++ uint8_t DpmForce;
++ uint8_t DpmForceLevel;
++ uint8_t DisplayWatermark;
++ uint8_t McArbIndex;
++
++ uint32_t MinimumPerfSclk;
++
++ uint8_t AcpiReq;
++ uint8_t AcpiAck;
++ uint8_t GfxClkSlow;
++ uint8_t GpioClampMode;
++
++ uint8_t EnableModeSwitchRLCNotification;
++ uint8_t EnabledLevelsChange;
++ uint8_t DteClampMode;
++ uint8_t FpsClampMode;
++
++ uint16_t LevelResidencyCounters [SMU75_MAX_LEVELS_GRAPHICS];
++ uint16_t LevelSwitchCounters [SMU75_MAX_LEVELS_GRAPHICS];
++
++ void (*TargetStateCalculator)(uint8_t);
++ void (*SavedTargetStateCalculator)(uint8_t);
++
++ uint16_t AutoDpmInterval;
++ uint16_t AutoDpmRange;
++
++ uint8_t FpsEnabled;
++ uint8_t MaxPerfLevel;
++ uint8_t AllowLowClkInterruptToHost;
++ uint8_t FpsRunning;
++
++ uint32_t MaxAllowedFrequency;
++
++ uint32_t FilteredSclkFrequency;
++ uint32_t LastSclkFrequency;
++ uint32_t FilteredSclkFrequencyCnt;
++
++ uint8_t MinPerfLevel;
++#ifdef SMU__FIRMWARE_SCKS_PRESENT__1
++ uint8_t ScksClampMode;
++ uint8_t padding[2];
++#else
++ uint8_t padding[3];
++#endif
++
++ uint16_t FpsAlpha;
++ uint16_t DeltaTime;
++ uint32_t CurrentFps;
++ uint32_t FilteredFps;
++ uint32_t FrameCount;
++ uint32_t FrameCountLast;
++ uint16_t FpsTargetScalar;
++ uint16_t FpsWaterfallLimitScalar;
++ uint16_t FpsAlphaScalar;
++ uint16_t spare8;
++ SMU7_HystController_Data HystControllerData;
++};
++
++typedef struct SMU7_LocalDpmScoreboard SMU7_LocalDpmScoreboard;
++
++#define SMU7_MAX_VOLTAGE_CLIENTS 12
++
++typedef uint8_t (*VoltageChangeHandler_t)(uint16_t, uint8_t);
++
++#define VDDC_MASK 0x00007FFF
++#define VDDC_SHIFT 0
++#define VDDCI_MASK 0x3FFF8000
++#define VDDCI_SHIFT 15
++#define PHASES_MASK 0xC0000000
++#define PHASES_SHIFT 30
++
++typedef uint32_t SMU_VoltageLevel;
++
++struct SMU7_VoltageScoreboard {
++ SMU_VoltageLevel TargetVoltage;
++ uint16_t MaxVid;
++ uint8_t HighestVidOffset;
++ uint8_t CurrentVidOffset;
++
++ uint16_t CurrentVddc;
++ uint16_t CurrentVddci;
++
++ uint8_t ControllerBusy;
++ uint8_t CurrentVid;
++ uint8_t CurrentVddciVid;
++ uint8_t padding;
++
++ SMU_VoltageLevel RequestedVoltage[SMU7_MAX_VOLTAGE_CLIENTS];
++ SMU_VoltageLevel TargetVoltageState;
++ uint8_t EnabledRequest[SMU7_MAX_VOLTAGE_CLIENTS];
++
++ uint8_t padding2;
++ uint8_t padding3;
++ uint8_t ControllerEnable;
++ uint8_t ControllerRunning;
++ uint16_t CurrentStdVoltageHiSidd;
++ uint16_t CurrentStdVoltageLoSidd;
++ uint8_t OverrideVoltage;
++ uint8_t padding4;
++ uint8_t padding5;
++ uint8_t CurrentPhases;
++
++ VoltageChangeHandler_t ChangeVddc;
++ VoltageChangeHandler_t ChangeVddci;
++ VoltageChangeHandler_t ChangePhase;
++ VoltageChangeHandler_t ChangeMvdd;
++
++ VoltageChangeHandler_t functionLinks[6];
++
++ uint16_t * VddcFollower1;
++ int16_t Driver_OD_RequestedVidOffset1;
++ int16_t Driver_OD_RequestedVidOffset2;
++};
++
++typedef struct SMU7_VoltageScoreboard SMU7_VoltageScoreboard;
++
++#define SMU7_MAX_PCIE_LINK_SPEEDS 3
++
++struct SMU7_PCIeLinkSpeedScoreboard {
++ uint8_t DpmEnable;
++ uint8_t DpmRunning;
++ uint8_t DpmForce;
++ uint8_t DpmForceLevel;
++
++ uint8_t CurrentLinkSpeed;
++ uint8_t EnabledLevelsChange;
++ uint16_t AutoDpmInterval;
++
++ uint16_t AutoDpmRange;
++ uint16_t AutoDpmCount;
++
++ uint8_t DpmMode;
++ uint8_t AcpiReq;
++ uint8_t AcpiAck;
++ uint8_t CurrentLinkLevel;
++};
++
++typedef struct SMU7_PCIeLinkSpeedScoreboard SMU7_PCIeLinkSpeedScoreboard;
++
++#define SMU7_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
++#define SMU7_LKGE_LUT_NUM_OF_VOLT_ENTRIES 16
++
++#define SMU7_SCALE_I 7
++#define SMU7_SCALE_R 12
++
++struct SMU7_PowerScoreboard {
++ uint32_t GpuPower;
++
++ uint32_t VddcPower;
++ uint32_t VddcVoltage;
++ uint32_t VddcCurrent;
++
++ uint32_t VddciPower;
++ uint32_t VddciVoltage;
++ uint32_t VddciCurrent;
++
++ uint32_t RocPower;
++
++ uint16_t Telemetry_1_slope;
++ uint16_t Telemetry_2_slope;
++ int32_t Telemetry_1_offset;
++ int32_t Telemetry_2_offset;
++
++ uint8_t MCLK_patch_flag;
++ uint8_t reserved[3];
++};
++
++typedef struct SMU7_PowerScoreboard SMU7_PowerScoreboard;
++
++#define SMU7_SCLK_DPM_CONFIG_MASK 0x01
++#define SMU7_VOLTAGE_CONTROLLER_CONFIG_MASK 0x02
++#define SMU7_THERMAL_CONTROLLER_CONFIG_MASK 0x04
++#define SMU7_MCLK_DPM_CONFIG_MASK 0x08
++#define SMU7_UVD_DPM_CONFIG_MASK 0x10
++#define SMU7_VCE_DPM_CONFIG_MASK 0x20
++#define SMU7_ACP_DPM_CONFIG_MASK 0x40
++#define SMU7_SAMU_DPM_CONFIG_MASK 0x80
++#define SMU7_PCIEGEN_DPM_CONFIG_MASK 0x100
++
++#define SMU7_ACP_MCLK_HANDSHAKE_DISABLE 0x00000001
++#define SMU7_ACP_SCLK_HANDSHAKE_DISABLE 0x00000002
++#define SMU7_UVD_MCLK_HANDSHAKE_DISABLE 0x00000100
++#define SMU7_UVD_SCLK_HANDSHAKE_DISABLE 0x00000200
++#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE 0x00010000
++#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000
++
++struct SMU75_SoftRegisters {
++ uint32_t RefClockFrequency;
++ uint32_t PmTimerPeriod;
++ uint32_t FeatureEnables;
++#if defined (SMU__DGPU_ONLY)
++ uint32_t PreVBlankGap;
++ uint32_t VBlankTimeout;
++ uint32_t TrainTimeGap;
++ uint32_t MvddSwitchTime;
++ uint32_t LongestAcpiTrainTime;
++ uint32_t AcpiDelay;
++ uint32_t G5TrainTime;
++ uint32_t DelayMpllPwron;
++ uint32_t VoltageChangeTimeout;
++#endif
++ uint32_t HandshakeDisables;
++
++ uint8_t DisplayPhy1Config;
++ uint8_t DisplayPhy2Config;
++ uint8_t DisplayPhy3Config;
++ uint8_t DisplayPhy4Config;
++
++ uint8_t DisplayPhy5Config;
++ uint8_t DisplayPhy6Config;
++ uint8_t DisplayPhy7Config;
++ uint8_t DisplayPhy8Config;
++
++ uint32_t AverageGraphicsActivity;
++ uint32_t AverageMemoryActivity;
++ uint32_t AverageGioActivity;
++
++ uint8_t SClkDpmEnabledLevels;
++ uint8_t MClkDpmEnabledLevels;
++ uint8_t LClkDpmEnabledLevels;
++ uint8_t PCIeDpmEnabledLevels;
++
++ uint8_t UVDDpmEnabledLevels;
++ uint8_t SAMUDpmEnabledLevels;
++ uint8_t ACPDpmEnabledLevels;
++ uint8_t VCEDpmEnabledLevels;
++
++ uint32_t DRAM_LOG_ADDR_H;
++ uint32_t DRAM_LOG_ADDR_L;
++ uint32_t DRAM_LOG_PHY_ADDR_H;
++ uint32_t DRAM_LOG_PHY_ADDR_L;
++ uint32_t DRAM_LOG_BUFF_SIZE;
++ uint32_t UlvEnterCount;
++ uint32_t UlvTime;
++ uint32_t UcodeLoadStatus;
++ uint32_t AllowMvddSwitch;
++ uint8_t Activity_Weight;
++ uint8_t Reserved8[3];
++};
++
++typedef struct SMU75_SoftRegisters SMU75_SoftRegisters;
++
++struct SMU75_Firmware_Header {
++ uint32_t Digest[5];
++ uint32_t Version;
++ uint32_t HeaderSize;
++ uint32_t Flags;
++ uint32_t EntryPoint;
++ uint32_t CodeSize;
++ uint32_t ImageSize;
++
++ uint32_t Rtos;
++ uint32_t SoftRegisters;
++ uint32_t DpmTable;
++ uint32_t FanTable;
++ uint32_t CacConfigTable;
++ uint32_t CacStatusTable;
++ uint32_t mcRegisterTable;
++ uint32_t mcArbDramTimingTable;
++ uint32_t PmFuseTable;
++ uint32_t Globals;
++ uint32_t ClockStretcherTable;
++ uint32_t VftTable;
++ uint32_t Reserved1;
++ uint32_t AvfsCksOff_AvfsGbvTable;
++ uint32_t AvfsCksOff_BtcGbvTable;
++ uint32_t MM_AvfsTable;
++ uint32_t PowerSharingTable;
++ uint32_t AvfsTable;
++ uint32_t AvfsCksOffGbvTable;
++ uint32_t AvfsMeanNSigma;
++ uint32_t AvfsSclkOffsetTable;
++ uint32_t Reserved[12];
++ uint32_t Signature;
++};
++
++typedef struct SMU75_Firmware_Header SMU75_Firmware_Header;
++
++#define SMU7_FIRMWARE_HEADER_LOCATION 0x20000
++
++enum DisplayConfig {
++ PowerDown = 1,
++ DP54x4,
++ DP54x2,
++ DP54x1,
++ DP27x4,
++ DP27x2,
++ DP27x1,
++ HDMI297,
++ HDMI162,
++ LVDS,
++ DP324x4,
++ DP324x2,
++ DP324x1
++};
++
++#define MC_BLOCK_COUNT 1
++#define CPL_BLOCK_COUNT 5
++#define SE_BLOCK_COUNT 15
++#define GC_BLOCK_COUNT 24
++
++struct SMU7_Local_Cac {
++ uint8_t BlockId;
++ uint8_t SignalId;
++ uint8_t Threshold;
++ uint8_t Padding;
++};
++
++typedef struct SMU7_Local_Cac SMU7_Local_Cac;
++
++struct SMU7_Local_Cac_Table {
++ SMU7_Local_Cac CplLocalCac[CPL_BLOCK_COUNT];
++ SMU7_Local_Cac McLocalCac[MC_BLOCK_COUNT];
++ SMU7_Local_Cac SeLocalCac[SE_BLOCK_COUNT];
++ SMU7_Local_Cac GcLocalCac[GC_BLOCK_COUNT];
++};
++
++typedef struct SMU7_Local_Cac_Table SMU7_Local_Cac_Table;
++
++#pragma pack(pop)
++
++#define CG_SYS_BITMASK_FIRST_BIT 0
++#define CG_SYS_BITMASK_LAST_BIT 10
++#define CG_SYS_BIF_MGLS_SHIFT 0
++#define CG_SYS_ROM_SHIFT 1
++#define CG_SYS_MC_MGCG_SHIFT 2
++#define CG_SYS_MC_MGLS_SHIFT 3
++#define CG_SYS_SDMA_MGCG_SHIFT 4
++#define CG_SYS_SDMA_MGLS_SHIFT 5
++#define CG_SYS_DRM_MGCG_SHIFT 6
++#define CG_SYS_HDP_MGCG_SHIFT 7
++#define CG_SYS_HDP_MGLS_SHIFT 8
++#define CG_SYS_DRM_MGLS_SHIFT 9
++#define CG_SYS_BIF_MGCG_SHIFT 10
++
++#define CG_SYS_BIF_MGLS_MASK 0x1
++#define CG_SYS_ROM_MASK 0x2
++#define CG_SYS_MC_MGCG_MASK 0x4
++#define CG_SYS_MC_MGLS_MASK 0x8
++#define CG_SYS_SDMA_MGCG_MASK 0x10
++#define CG_SYS_SDMA_MGLS_MASK 0x20
++#define CG_SYS_DRM_MGCG_MASK 0x40
++#define CG_SYS_HDP_MGCG_MASK 0x80
++#define CG_SYS_HDP_MGLS_MASK 0x100
++#define CG_SYS_DRM_MGLS_MASK 0x200
++#define CG_SYS_BIF_MGCG_MASK 0x400
++
++#define CG_GFX_BITMASK_FIRST_BIT 16
++#define CG_GFX_BITMASK_LAST_BIT 24
++
++#define CG_GFX_CGCG_SHIFT 16
++#define CG_GFX_CGLS_SHIFT 17
++#define CG_CPF_MGCG_SHIFT 18
++#define CG_RLC_MGCG_SHIFT 19
++#define CG_GFX_OTHERS_MGCG_SHIFT 20
++#define CG_GFX_3DCG_SHIFT 21
++#define CG_GFX_3DLS_SHIFT 22
++#define CG_GFX_RLC_LS_SHIFT 23
++#define CG_GFX_CP_LS_SHIFT 24
++
++#define CG_GFX_CGCG_MASK 0x00010000
++#define CG_GFX_CGLS_MASK 0x00020000
++#define CG_CPF_MGCG_MASK 0x00040000
++#define CG_RLC_MGCG_MASK 0x00080000
++#define CG_GFX_OTHERS_MGCG_MASK 0x00100000
++#define CG_GFX_3DCG_MASK 0x00200000
++#define CG_GFX_3DLS_MASK 0x00400000
++#define CG_GFX_RLC_LS_MASK 0x00800000
++#define CG_GFX_CP_LS_MASK 0x01000000
++
++
++#define VRCONF_VDDC_MASK 0x000000FF
++#define VRCONF_VDDC_SHIFT 0
++#define VRCONF_VDDGFX_MASK 0x0000FF00
++#define VRCONF_VDDGFX_SHIFT 8
++#define VRCONF_VDDCI_MASK 0x00FF0000
++#define VRCONF_VDDCI_SHIFT 16
++#define VRCONF_MVDD_MASK 0xFF000000
++#define VRCONF_MVDD_SHIFT 24
++
++#define VR_MERGED_WITH_VDDC 0
++#define VR_SVI2_PLANE_1 1
++#define VR_SVI2_PLANE_2 2
++#define VR_SMIO_PATTERN_1 3
++#define VR_SMIO_PATTERN_2 4
++#define VR_STATIC_VOLTAGE 5
++
++#define CLOCK_STRETCHER_MAX_ENTRIES 0x4
++#define CKS_LOOKUPTable_MAX_ENTRIES 0x4
++
++#define CLOCK_STRETCHER_SETTING_DDT_MASK 0x01
++#define CLOCK_STRETCHER_SETTING_DDT_SHIFT 0x0
++#define CLOCK_STRETCHER_SETTING_STRETCH_AMOUNT_MASK 0x1E
++#define CLOCK_STRETCHER_SETTING_STRETCH_AMOUNT_SHIFT 0x1
++#define CLOCK_STRETCHER_SETTING_ENABLE_MASK 0x80
++#define CLOCK_STRETCHER_SETTING_ENABLE_SHIFT 0x7
++
++struct SMU_ClockStretcherDataTableEntry {
++ uint8_t minVID;
++ uint8_t maxVID;
++
++ uint16_t setting;
++};
++typedef struct SMU_ClockStretcherDataTableEntry SMU_ClockStretcherDataTableEntry;
++
++struct SMU_ClockStretcherDataTable {
++ SMU_ClockStretcherDataTableEntry ClockStretcherDataTableEntry[CLOCK_STRETCHER_MAX_ENTRIES];
++};
++typedef struct SMU_ClockStretcherDataTable SMU_ClockStretcherDataTable;
++
++struct SMU_CKS_LOOKUPTableEntry {
++ uint16_t minFreq;
++ uint16_t maxFreq;
++
++ uint8_t setting;
++ uint8_t padding[3];
++};
++typedef struct SMU_CKS_LOOKUPTableEntry SMU_CKS_LOOKUPTableEntry;
++
++struct SMU_CKS_LOOKUPTable {
++ SMU_CKS_LOOKUPTableEntry CKS_LOOKUPTableEntry[CKS_LOOKUPTable_MAX_ENTRIES];
++};
++typedef struct SMU_CKS_LOOKUPTable SMU_CKS_LOOKUPTable;
++
++struct AgmAvfsData_t {
++ uint16_t avgPsmCount[28];
++ uint16_t minPsmCount[28];
++};
++typedef struct AgmAvfsData_t AgmAvfsData_t;
++
++enum VFT_COLUMNS {
++ SCLK0,
++ SCLK1,
++ SCLK2,
++ SCLK3,
++ SCLK4,
++ SCLK5,
++ SCLK6,
++ SCLK7,
++
++ NUM_VFT_COLUMNS
++};
++enum {
++ SCS_FUSE_T0,
++ SCS_FUSE_T1,
++ NUM_SCS_FUSE_TEMPERATURE
++};
++enum {
++ SCKS_ON,
++ SCKS_OFF,
++ NUM_SCKS_STATE_TYPES
++};
++
++#define VFT_TABLE_DEFINED
++
++#define TEMP_RANGE_MAXSTEPS 12
++struct VFT_CELL_t {
++ uint16_t Voltage;
++};
++
++typedef struct VFT_CELL_t VFT_CELL_t;
++#ifdef SMU__FIRMWARE_SCKS_PRESENT__1
++struct SCS_CELL_t {
++ uint16_t PsmCnt[NUM_SCKS_STATE_TYPES];
++};
++typedef struct SCS_CELL_t SCS_CELL_t;
++#endif
++
++struct VFT_TABLE_t {
++ VFT_CELL_t Cell[TEMP_RANGE_MAXSTEPS][NUM_VFT_COLUMNS];
++ uint16_t AvfsGbv [NUM_VFT_COLUMNS];
++ uint16_t BtcGbv [NUM_VFT_COLUMNS];
++ int16_t Temperature [TEMP_RANGE_MAXSTEPS];
++
++#ifdef SMU__FIRMWARE_SCKS_PRESENT__1
++ SCS_CELL_t ScksCell[TEMP_RANGE_MAXSTEPS][NUM_VFT_COLUMNS];
++#endif
++
++ uint8_t NumTemperatureSteps;
++ uint8_t padding[3];
++};
++typedef struct VFT_TABLE_t VFT_TABLE_t;
++
++#define BTCGB_VDROOP_TABLE_MAX_ENTRIES 2
++#define AVFSGB_VDROOP_TABLE_MAX_ENTRIES 2
++
++struct GB_VDROOP_TABLE_t {
++ int32_t a0;
++ int32_t a1;
++ int32_t a2;
++ uint32_t spare;
++};
++typedef struct GB_VDROOP_TABLE_t GB_VDROOP_TABLE_t;
++
++struct SMU_QuadraticCoeffs {
++ int32_t m1;
++ int32_t b;
++
++ int16_t m2;
++ uint8_t m1_shift;
++ uint8_t m2_shift;
++};
++typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs;
++
++struct AVFS_Margin_t {
++ VFT_CELL_t Cell[NUM_VFT_COLUMNS];
++};
++typedef struct AVFS_Margin_t AVFS_Margin_t;
++
++struct AVFS_CksOff_Gbv_t {
++ VFT_CELL_t Cell[NUM_VFT_COLUMNS];
++};
++typedef struct AVFS_CksOff_Gbv_t AVFS_CksOff_Gbv_t;
++
++struct AVFS_CksOff_AvfsGbv_t {
++ VFT_CELL_t Cell[NUM_VFT_COLUMNS];
++};
++typedef struct AVFS_CksOff_AvfsGbv_t AVFS_CksOff_AvfsGbv_t;
++
++struct AVFS_CksOff_BtcGbv_t {
++ VFT_CELL_t Cell[NUM_VFT_COLUMNS];
++};
++typedef struct AVFS_CksOff_BtcGbv_t AVFS_CksOff_BtcGbv_t;
++
++struct AVFS_meanNsigma_t {
++ uint32_t Aconstant[3];
++ uint16_t DC_tol_sigma;
++ uint16_t Platform_mean;
++ uint16_t Platform_sigma;
++ uint16_t PSM_Age_CompFactor;
++ uint8_t Static_Voltage_Offset[NUM_VFT_COLUMNS];
++};
++typedef struct AVFS_meanNsigma_t AVFS_meanNsigma_t;
++
++struct AVFS_Sclk_Offset_t {
++ uint16_t Sclk_Offset[8];
++};
++typedef struct AVFS_Sclk_Offset_t AVFS_Sclk_Offset_t;
++
++struct Power_Sharing_t {
++ uint32_t EnergyCounter;
++ uint32_t EngeryThreshold;
++ uint64_t AM_SCLK_CNT;
++ uint64_t AM_0_BUSY_CNT;
++};
++typedef struct Power_Sharing_t Power_Sharing_t;
++
++
++#endif
++
++
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h
+new file mode 100644
+index 0000000..b64e58a2
+--- /dev/null
++++ b/drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h
+@@ -0,0 +1,886 @@
++/*
++ * Copyright 2017 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef SMU75_DISCRETE_H
++#define SMU75_DISCRETE_H
++
++#include "smu75.h"
++
++#pragma pack(push, 1)
++
++#define NUM_SCLK_RANGE 8
++
++#define VCO_3_6 1
++#define VCO_2_4 3
++
++#define POSTDIV_DIV_BY_1 0
++#define POSTDIV_DIV_BY_2 1
++#define POSTDIV_DIV_BY_4 2
++#define POSTDIV_DIV_BY_8 3
++#define POSTDIV_DIV_BY_16 4
++
++struct sclkFcwRange_t {
++ uint8_t vco_setting; /* 1: 3-6GHz, 3: 2-4GHz */
++ uint8_t postdiv; /* divide by 2^n */
++ uint16_t fcw_pcc;
++ uint16_t fcw_trans_upper;
++ uint16_t fcw_trans_lower;
++};
++typedef struct sclkFcwRange_t sclkFcwRange_t;
++
++struct SMIO_Pattern {
++ uint16_t Voltage;
++ uint8_t Smio;
++ uint8_t padding;
++};
++
++typedef struct SMIO_Pattern SMIO_Pattern;
++
++struct SMIO_Table {
++ SMIO_Pattern Pattern[SMU_MAX_SMIO_LEVELS];
++};
++
++typedef struct SMIO_Table SMIO_Table;
++
++struct SMU_SclkSetting {
++ uint32_t SclkFrequency;
++ uint16_t Fcw_int;
++ uint16_t Fcw_frac;
++ uint16_t Pcc_fcw_int;
++ uint8_t PllRange;
++ uint8_t SSc_En;
++ uint16_t Sclk_slew_rate;
++ uint16_t Pcc_up_slew_rate;
++ uint16_t Pcc_down_slew_rate;
++ uint16_t Fcw1_int;
++ uint16_t Fcw1_frac;
++ uint16_t Sclk_ss_slew_rate;
++};
++typedef struct SMU_SclkSetting SMU_SclkSetting;
++
++struct SMU75_Discrete_GraphicsLevel {
++ SMU_VoltageLevel MinVoltage;
++
++ uint8_t pcieDpmLevel;
++ uint8_t DeepSleepDivId;
++ uint16_t ActivityLevel;
++
++ uint32_t CgSpllFuncCntl3;
++ uint32_t CgSpllFuncCntl4;
++ uint32_t CcPwrDynRm;
++ uint32_t CcPwrDynRm1;
++
++ uint8_t SclkDid;
++ uint8_t padding;
++ uint8_t EnabledForActivity;
++ uint8_t EnabledForThrottle;
++ uint8_t UpHyst;
++ uint8_t DownHyst;
++ uint8_t VoltageDownHyst;
++ uint8_t PowerThrottle;
++
++ SMU_SclkSetting SclkSetting;
++
++ uint8_t ScksStretchThreshVid[NUM_SCKS_STATE_TYPES];
++ uint16_t Padding;
++};
++
++typedef struct SMU75_Discrete_GraphicsLevel SMU75_Discrete_GraphicsLevel;
++
++struct SMU75_Discrete_ACPILevel {
++ uint32_t Flags;
++ SMU_VoltageLevel MinVoltage;
++ uint32_t SclkFrequency;
++ uint8_t SclkDid;
++ uint8_t DisplayWatermark;
++ uint8_t DeepSleepDivId;
++ uint8_t padding;
++ uint32_t CcPwrDynRm;
++ uint32_t CcPwrDynRm1;
++
++ SMU_SclkSetting SclkSetting;
++};
++
++typedef struct SMU75_Discrete_ACPILevel SMU75_Discrete_ACPILevel;
++
++struct SMU75_Discrete_Ulv {
++ uint32_t CcPwrDynRm;
++ uint32_t CcPwrDynRm1;
++ uint16_t VddcOffset;
++ uint8_t VddcOffsetVid;
++ uint8_t VddcPhase;
++ uint16_t BifSclkDfs;
++ uint16_t Reserved;
++};
++
++typedef struct SMU75_Discrete_Ulv SMU75_Discrete_Ulv;
++
++struct SMU75_Discrete_MemoryLevel {
++ SMU_VoltageLevel MinVoltage;
++ uint32_t MinMvdd;
++
++ uint32_t MclkFrequency;
++
++ uint8_t StutterEnable;
++ uint8_t EnabledForThrottle;
++ uint8_t EnabledForActivity;
++ uint8_t padding_0;
++
++ uint8_t UpHyst;
++ uint8_t DownHyst;
++ uint8_t VoltageDownHyst;
++ uint8_t padding_1;
++
++ uint16_t ActivityLevel;
++ uint8_t DisplayWatermark;
++ uint8_t padding_2;
++
++ uint16_t Fcw_int;
++ uint16_t Fcw_frac;
++ uint8_t Postdiv;
++ uint8_t padding_3[3];
++};
++
++typedef struct SMU75_Discrete_MemoryLevel SMU75_Discrete_MemoryLevel;
++
++struct SMU75_Discrete_LinkLevel {
++ uint8_t PcieGenSpeed;
++ uint8_t PcieLaneCount;
++ uint8_t EnabledForActivity;
++ uint8_t SPC;
++ uint32_t DownThreshold;
++ uint32_t UpThreshold;
++ uint16_t BifSclkDfs;
++ uint16_t Reserved;
++};
++
++typedef struct SMU75_Discrete_LinkLevel SMU75_Discrete_LinkLevel;
++
++
++/* MC ARB DRAM Timing registers. */
++struct SMU75_Discrete_MCArbDramTimingTableEntry {
++ uint32_t McArbDramTiming;
++ uint32_t McArbDramTiming2;
++ uint32_t McArbBurstTime;
++ uint32_t McArbRfshRate;
++ uint32_t McArbMisc3;
++};
++
++typedef struct SMU75_Discrete_MCArbDramTimingTableEntry SMU75_Discrete_MCArbDramTimingTableEntry;
++
++struct SMU75_Discrete_MCArbDramTimingTable {
++ SMU75_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS];
++};
++
++typedef struct SMU75_Discrete_MCArbDramTimingTable SMU75_Discrete_MCArbDramTimingTable;
++
++/* UVD VCLK/DCLK state (level) definition. */
++struct SMU75_Discrete_UvdLevel {
++ uint32_t VclkFrequency;
++ uint32_t DclkFrequency;
++ SMU_VoltageLevel MinVoltage;
++ uint8_t VclkDivider;
++ uint8_t DclkDivider;
++ uint8_t padding[2];
++};
++
++typedef struct SMU75_Discrete_UvdLevel SMU75_Discrete_UvdLevel;
++
++/* Clocks for other external blocks (VCE, ACP, SAMU). */
++struct SMU75_Discrete_ExtClkLevel {
++ uint32_t Frequency;
++ SMU_VoltageLevel MinVoltage;
++ uint8_t Divider;
++ uint8_t padding[3];
++};
++
++typedef struct SMU75_Discrete_ExtClkLevel SMU75_Discrete_ExtClkLevel;
++
++struct SMU75_Discrete_StateInfo {
++ uint32_t SclkFrequency;
++ uint32_t MclkFrequency;
++ uint32_t VclkFrequency;
++ uint32_t DclkFrequency;
++ uint32_t SamclkFrequency;
++ uint32_t AclkFrequency;
++ uint32_t EclkFrequency;
++ uint16_t MvddVoltage;
++ uint16_t padding16;
++ uint8_t DisplayWatermark;
++ uint8_t McArbIndex;
++ uint8_t McRegIndex;
++ uint8_t SeqIndex;
++ uint8_t SclkDid;
++ int8_t SclkIndex;
++ int8_t MclkIndex;
++ uint8_t PCIeGen;
++};
++
++typedef struct SMU75_Discrete_StateInfo SMU75_Discrete_StateInfo;
++
++struct SMU75_Discrete_DpmTable {
++ SMU75_PIDController GraphicsPIDController;
++ SMU75_PIDController MemoryPIDController;
++ SMU75_PIDController LinkPIDController;
++
++ uint32_t SystemFlags;
++
++ uint32_t VRConfig;
++ uint32_t SmioMask1;
++ uint32_t SmioMask2;
++ SMIO_Table SmioTable1;
++ SMIO_Table SmioTable2;
++
++ uint32_t MvddLevelCount;
++
++ uint8_t BapmVddcVidHiSidd [SMU75_MAX_LEVELS_VDDC];
++ uint8_t BapmVddcVidLoSidd [SMU75_MAX_LEVELS_VDDC];
++ uint8_t BapmVddcVidHiSidd2 [SMU75_MAX_LEVELS_VDDC];
++
++ uint8_t GraphicsDpmLevelCount;
++ uint8_t MemoryDpmLevelCount;
++ uint8_t LinkLevelCount;
++ uint8_t MasterDeepSleepControl;
++
++ uint8_t UvdLevelCount;
++ uint8_t VceLevelCount;
++ uint8_t AcpLevelCount;
++ uint8_t SamuLevelCount;
++
++ uint8_t ThermOutGpio;
++ uint8_t ThermOutPolarity;
++ uint8_t ThermOutMode;
++ uint8_t BootPhases;
++
++ uint8_t VRHotLevel;
++ uint8_t LdoRefSel;
++
++ uint8_t Reserved1[2];
++
++ uint16_t FanStartTemperature;
++ uint16_t FanStopTemperature;
++
++ uint16_t MaxVoltage;
++ uint16_t Reserved2;
++ uint32_t Reserved;
++
++ SMU75_Discrete_GraphicsLevel GraphicsLevel [SMU75_MAX_LEVELS_GRAPHICS];
++ SMU75_Discrete_MemoryLevel MemoryACPILevel;
++ SMU75_Discrete_MemoryLevel MemoryLevel [SMU75_MAX_LEVELS_MEMORY];
++ SMU75_Discrete_LinkLevel LinkLevel [SMU75_MAX_LEVELS_LINK];
++ SMU75_Discrete_ACPILevel ACPILevel;
++ SMU75_Discrete_UvdLevel UvdLevel [SMU75_MAX_LEVELS_UVD];
++ SMU75_Discrete_ExtClkLevel VceLevel [SMU75_MAX_LEVELS_VCE];
++ SMU75_Discrete_ExtClkLevel AcpLevel [SMU75_MAX_LEVELS_ACP];
++ SMU75_Discrete_ExtClkLevel SamuLevel [SMU75_MAX_LEVELS_SAMU];
++ SMU75_Discrete_Ulv Ulv;
++
++ uint8_t DisplayWatermark [SMU75_MAX_LEVELS_MEMORY][SMU75_MAX_LEVELS_GRAPHICS];
++
++ uint32_t SclkStepSize;
++ uint32_t Smio [SMU75_MAX_ENTRIES_SMIO];
++
++ uint8_t UvdBootLevel;
++ uint8_t VceBootLevel;
++ uint8_t AcpBootLevel;
++ uint8_t SamuBootLevel;
++
++ uint8_t GraphicsBootLevel;
++ uint8_t GraphicsVoltageChangeEnable;
++ uint8_t GraphicsThermThrottleEnable;
++ uint8_t GraphicsInterval;
++
++ uint8_t VoltageInterval;
++ uint8_t ThermalInterval;
++ uint16_t TemperatureLimitHigh;
++
++ uint16_t TemperatureLimitLow;
++ uint8_t MemoryBootLevel;
++ uint8_t MemoryVoltageChangeEnable;
++
++ uint16_t BootMVdd;
++ uint8_t MemoryInterval;
++ uint8_t MemoryThermThrottleEnable;
++
++ uint16_t VoltageResponseTime;
++ uint16_t PhaseResponseTime;
++
++ uint8_t PCIeBootLinkLevel;
++ uint8_t PCIeGenInterval;
++ uint8_t DTEInterval;
++ uint8_t DTEMode;
++
++ uint8_t SVI2Enable;
++ uint8_t VRHotGpio;
++ uint8_t AcDcGpio;
++ uint8_t ThermGpio;
++
++ uint16_t PPM_PkgPwrLimit;
++ uint16_t PPM_TemperatureLimit;
++
++ uint16_t DefaultTdp;
++ uint16_t TargetTdp;
++
++ uint16_t FpsHighThreshold;
++ uint16_t FpsLowThreshold;
++
++ uint16_t BAPMTI_R [SMU75_DTE_ITERATIONS][SMU75_DTE_SOURCES][SMU75_DTE_SINKS];
++ uint16_t BAPMTI_RC [SMU75_DTE_ITERATIONS][SMU75_DTE_SOURCES][SMU75_DTE_SINKS];
++
++ uint16_t TemperatureLimitEdge;
++ uint16_t TemperatureLimitHotspot;
++
++ uint16_t BootVddc;
++ uint16_t BootVddci;
++
++ uint16_t FanGainEdge;
++ uint16_t FanGainHotspot;
++
++ uint32_t LowSclkInterruptThreshold;
++ uint32_t VddGfxReChkWait;
++
++ uint8_t ClockStretcherAmount;
++ uint8_t Sclk_CKS_masterEn0_7;
++ uint8_t Sclk_CKS_masterEn8_15;
++ uint8_t DPMFreezeAndForced;
++
++ uint8_t Sclk_voltageOffset[8];
++
++ SMU_ClockStretcherDataTable ClockStretcherDataTable;
++ SMU_CKS_LOOKUPTable CKS_LOOKUPTable;
++
++ uint32_t CurrSclkPllRange;
++ sclkFcwRange_t SclkFcwRangeTable[NUM_SCLK_RANGE];
++
++ GB_VDROOP_TABLE_t BTCGB_VDROOP_TABLE[BTCGB_VDROOP_TABLE_MAX_ENTRIES];
++ SMU_QuadraticCoeffs AVFSGB_FUSE_TABLE[AVFSGB_VDROOP_TABLE_MAX_ENTRIES];
++};
++
++typedef struct SMU75_Discrete_DpmTable SMU75_Discrete_DpmTable;
++
++struct SMU75_Discrete_FanTable {
++ uint16_t FdoMode;
++ int16_t TempMin;
++ int16_t TempMed;
++ int16_t TempMax;
++ int16_t Slope1;
++ int16_t Slope2;
++ int16_t FdoMin;
++ int16_t HystUp;
++ int16_t HystDown;
++ int16_t HystSlope;
++ int16_t TempRespLim;
++ int16_t TempCurr;
++ int16_t SlopeCurr;
++ int16_t PwmCurr;
++ uint32_t RefreshPeriod;
++ int16_t FdoMax;
++ uint8_t TempSrc;
++ int8_t Padding;
++};
++
++typedef struct SMU75_Discrete_FanTable SMU75_Discrete_FanTable;
++
++#define SMU7_DISCRETE_GPIO_SCLK_DEBUG 4
++#define SMU7_DISCRETE_GPIO_SCLK_DEBUG_BIT (0x1 << SMU7_DISCRETE_GPIO_SCLK_DEBUG)
++
++
++
++struct SMU7_MclkDpmScoreboard {
++ uint32_t PercentageBusy;
++
++ int32_t PIDError;
++ int32_t PIDIntegral;
++ int32_t PIDOutput;
++
++ uint32_t SigmaDeltaAccum;
++ uint32_t SigmaDeltaOutput;
++ uint32_t SigmaDeltaLevel;
++
++ uint32_t UtilizationSetpoint;
++
++ uint8_t TdpClampMode;
++ uint8_t TdcClampMode;
++ uint8_t ThermClampMode;
++ uint8_t VoltageBusy;
++
++ int8_t CurrLevel;
++ int8_t TargLevel;
++ uint8_t LevelChangeInProgress;
++ uint8_t UpHyst;
++
++ uint8_t DownHyst;
++ uint8_t VoltageDownHyst;
++ uint8_t DpmEnable;
++ uint8_t DpmRunning;
++
++ uint8_t DpmForce;
++ uint8_t DpmForceLevel;
++ uint8_t padding2;
++ uint8_t McArbIndex;
++
++ uint32_t MinimumPerfMclk;
++
++ uint8_t AcpiReq;
++ uint8_t AcpiAck;
++ uint8_t MclkSwitchInProgress;
++ uint8_t MclkSwitchCritical;
++
++ uint8_t IgnoreVBlank;
++ uint8_t TargetMclkIndex;
++ uint8_t TargetMvddIndex;
++ uint8_t MclkSwitchResult;
++
++ uint16_t VbiFailureCount;
++ uint8_t VbiWaitCounter;
++ uint8_t EnabledLevelsChange;
++
++ uint16_t LevelResidencyCounters [SMU75_MAX_LEVELS_MEMORY];
++ uint16_t LevelSwitchCounters [SMU75_MAX_LEVELS_MEMORY];
++
++ void (*TargetStateCalculator)(uint8_t);
++ void (*SavedTargetStateCalculator)(uint8_t);
++
++ uint16_t AutoDpmInterval;
++ uint16_t AutoDpmRange;
++
++ uint16_t VbiTimeoutCount;
++ uint16_t MclkSwitchingTime;
++
++ uint8_t fastSwitch;
++ uint8_t Save_PIC_VDDGFX_EXIT;
++ uint8_t Save_PIC_VDDGFX_ENTER;
++ uint8_t VbiTimeout;
++
++ uint32_t HbmTempRegBackup;
++};
++
++typedef struct SMU7_MclkDpmScoreboard SMU7_MclkDpmScoreboard;
++
++struct SMU7_UlvScoreboard {
++ uint8_t EnterUlv;
++ uint8_t ExitUlv;
++ uint8_t UlvActive;
++ uint8_t WaitingForUlv;
++ uint8_t UlvEnable;
++ uint8_t UlvRunning;
++ uint8_t UlvMasterEnable;
++ uint8_t padding;
++ uint32_t UlvAbortedCount;
++ uint32_t UlvTimeStamp;
++};
++
++typedef struct SMU7_UlvScoreboard SMU7_UlvScoreboard;
++
++struct VddgfxSavedRegisters {
++ uint32_t GPU_DBG[3];
++ uint32_t MEC_BaseAddress_Hi;
++ uint32_t MEC_BaseAddress_Lo;
++ uint32_t THM_TMON0_CTRL2__RDIR_PRESENT;
++ uint32_t THM_TMON1_CTRL2__RDIR_PRESENT;
++ uint32_t CP_INT_CNTL;
++};
++
++typedef struct VddgfxSavedRegisters VddgfxSavedRegisters;
++
++struct SMU7_VddGfxScoreboard {
++ uint8_t VddGfxEnable;
++ uint8_t VddGfxActive;
++ uint8_t VPUResetOccured;
++ uint8_t padding;
++
++ uint32_t VddGfxEnteredCount;
++ uint32_t VddGfxAbortedCount;
++
++ uint32_t VddGfxVid;
++
++ VddgfxSavedRegisters SavedRegisters;
++};
++
++typedef struct SMU7_VddGfxScoreboard SMU7_VddGfxScoreboard;
++
++struct SMU7_TdcLimitScoreboard {
++ uint8_t Enable;
++ uint8_t Running;
++ uint16_t Alpha;
++ uint32_t FilteredIddc;
++ uint32_t IddcLimit;
++ uint32_t IddcHyst;
++ SMU7_HystController_Data HystControllerData;
++};
++
++typedef struct SMU7_TdcLimitScoreboard SMU7_TdcLimitScoreboard;
++
++struct SMU7_PkgPwrLimitScoreboard {
++ uint8_t Enable;
++ uint8_t Running;
++ uint16_t Alpha;
++ uint32_t FilteredPkgPwr;
++ uint32_t Limit;
++ uint32_t Hyst;
++ uint32_t LimitFromDriver;
++ uint8_t PowerSharingEnabled;
++ uint8_t PowerSharingCounter;
++ uint8_t PowerSharingINTEnabled;
++ uint8_t GFXActivityCounterEnabled;
++ uint32_t EnergyCount;
++ uint32_t PSACTCount;
++ uint8_t RollOverRequired;
++ uint8_t RollOverCount;
++ uint8_t padding[2];
++ SMU7_HystController_Data HystControllerData;
++};
++
++typedef struct SMU7_PkgPwrLimitScoreboard SMU7_PkgPwrLimitScoreboard;
++
++struct SMU7_BapmScoreboard {
++ uint32_t source_powers[SMU75_DTE_SOURCES];
++ uint32_t source_powers_last[SMU75_DTE_SOURCES];
++ int32_t entity_temperatures[SMU75_NUM_GPU_TES];
++ int32_t initial_entity_temperatures[SMU75_NUM_GPU_TES];
++ int32_t Limit;
++ int32_t Hyst;
++ int32_t therm_influence_coeff_table[SMU75_DTE_ITERATIONS * SMU75_DTE_SOURCES * SMU75_DTE_SINKS * 2];
++ int32_t therm_node_table[SMU75_DTE_ITERATIONS * SMU75_DTE_SOURCES * SMU75_DTE_SINKS];
++ uint16_t ConfigTDPPowerScalar;
++ uint16_t FanSpeedPowerScalar;
++ uint16_t OverDrivePowerScalar;
++ uint16_t OverDriveLimitScalar;
++ uint16_t FinalPowerScalar;
++ uint8_t VariantID;
++ uint8_t spare997;
++
++ SMU7_HystController_Data HystControllerData;
++
++ int32_t temperature_gradient_slope;
++ int32_t temperature_gradient;
++ uint32_t measured_temperature;
++};
++
++
++typedef struct SMU7_BapmScoreboard SMU7_BapmScoreboard;
++
++struct SMU7_AcpiScoreboard {
++ uint32_t SavedInterruptMask[2];
++ uint8_t LastACPIRequest;
++ uint8_t CgBifResp;
++ uint8_t RequestType;
++ uint8_t Padding;
++ SMU75_Discrete_ACPILevel D0Level;
++};
++
++typedef struct SMU7_AcpiScoreboard SMU7_AcpiScoreboard;
++
++struct SMU75_Discrete_PmFuses {
++ uint8_t BapmVddCVidHiSidd[8];
++
++ uint8_t BapmVddCVidLoSidd[8];
++
++ uint8_t VddCVid[8];
++
++ uint8_t SviLoadLineEn;
++ uint8_t SviLoadLineVddC;
++ uint8_t SviLoadLineTrimVddC;
++ uint8_t SviLoadLineOffsetVddC;
++
++ uint16_t TDC_VDDC_PkgLimit;
++ uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
++ uint8_t TDC_MAWt;
++
++ uint8_t TdcWaterfallCtl;
++ uint8_t LPMLTemperatureMin;
++ uint8_t LPMLTemperatureMax;
++ uint8_t Reserved;
++
++ uint8_t LPMLTemperatureScaler[16];
++
++ int16_t FuzzyFan_ErrorSetDelta;
++ int16_t FuzzyFan_ErrorRateSetDelta;
++ int16_t FuzzyFan_PwmSetDelta;
++ uint16_t Reserved6;
++
++ uint8_t GnbLPML[16];
++
++ uint8_t GnbLPMLMaxVid;
++ uint8_t GnbLPMLMinVid;
++ uint8_t Reserved1[2];
++
++ uint16_t BapmVddCBaseLeakageHiSidd;
++ uint16_t BapmVddCBaseLeakageLoSidd;
++
++ uint16_t VFT_Temp[3];
++ uint8_t Version;
++ uint8_t padding;
++
++ SMU_QuadraticCoeffs VFT_ATE[3];
++
++ SMU_QuadraticCoeffs AVFS_GB;
++ SMU_QuadraticCoeffs ATE_ACBTC_GB;
++
++ SMU_QuadraticCoeffs P2V;
++
++ uint32_t PsmCharzFreq;
++
++ uint16_t InversionVoltage;
++ uint16_t PsmCharzTemp;
++
++ uint32_t EnabledAvfsModules;
++
++ SMU_QuadraticCoeffs BtcGbv_CksOff;
++};
++
++typedef struct SMU75_Discrete_PmFuses SMU75_Discrete_PmFuses;
++
++struct SMU7_Discrete_Log_Header_Table {
++ uint32_t version;
++ uint32_t asic_id;
++ uint16_t flags;
++ uint16_t entry_size;
++ uint32_t total_size;
++ uint32_t num_of_entries;
++ uint8_t type;
++ uint8_t mode;
++ uint8_t filler_0[2];
++ uint32_t filler_1[2];
++};
++
++typedef struct SMU7_Discrete_Log_Header_Table SMU7_Discrete_Log_Header_Table;
++
++struct SMU7_Discrete_Log_Cntl {
++ uint8_t Enabled;
++ uint8_t Type;
++ uint8_t padding[2];
++ uint32_t BufferSize;
++ uint32_t SamplesLogged;
++ uint32_t SampleSize;
++ uint32_t AddrL;
++ uint32_t AddrH;
++};
++
++typedef struct SMU7_Discrete_Log_Cntl SMU7_Discrete_Log_Cntl;
++
++#if defined SMU__DGPU_ONLY
++#define CAC_ACC_NW_NUM_OF_SIGNALS 87
++#endif
++
++
++struct SMU7_Discrete_Cac_Collection_Table {
++ uint32_t temperature;
++ uint32_t cac_acc_nw[CAC_ACC_NW_NUM_OF_SIGNALS];
++};
++
++typedef struct SMU7_Discrete_Cac_Collection_Table SMU7_Discrete_Cac_Collection_Table;
++
++struct SMU7_Discrete_Cac_Verification_Table {
++ uint32_t VddcTotalPower;
++ uint32_t VddcLeakagePower;
++ uint32_t VddcConstantPower;
++ uint32_t VddcGfxDynamicPower;
++ uint32_t VddcUvdDynamicPower;
++ uint32_t VddcVceDynamicPower;
++ uint32_t VddcAcpDynamicPower;
++ uint32_t VddcPcieDynamicPower;
++ uint32_t VddcDceDynamicPower;
++ uint32_t VddcCurrent;
++ uint32_t VddcVoltage;
++ uint32_t VddciTotalPower;
++ uint32_t VddciLeakagePower;
++ uint32_t VddciConstantPower;
++ uint32_t VddciDynamicPower;
++ uint32_t Vddr1TotalPower;
++ uint32_t Vddr1LeakagePower;
++ uint32_t Vddr1ConstantPower;
++ uint32_t Vddr1DynamicPower;
++ uint32_t spare[4];
++ uint32_t temperature;
++};
++
++typedef struct SMU7_Discrete_Cac_Verification_Table SMU7_Discrete_Cac_Verification_Table;
++
++struct SMU7_Discrete_Pm_Status_Table {
++ int32_t T_meas_max[SMU75_THERMAL_INPUT_LOOP_COUNT];
++ int32_t T_meas_acc[SMU75_THERMAL_INPUT_LOOP_COUNT];
++
++ uint32_t I_calc_max;
++ uint32_t I_calc_acc;
++ uint32_t P_meas_acc;
++ uint32_t V_meas_load_acc;
++ uint32_t I_meas_acc;
++ uint32_t P_meas_acc_vddci;
++ uint32_t V_meas_load_acc_vddci;
++ uint32_t I_meas_acc_vddci;
++
++ uint16_t Sclk_dpm_residency[8];
++ uint16_t Uvd_dpm_residency[8];
++ uint16_t Vce_dpm_residency[8];
++ uint16_t Mclk_dpm_residency[4];
++
++ uint32_t P_roc_acc;
++ uint32_t PkgPwr_max;
++ uint32_t PkgPwr_acc;
++ uint32_t MclkSwitchingTime_max;
++ uint32_t MclkSwitchingTime_acc;
++ uint32_t FanPwm_acc;
++ uint32_t FanRpm_acc;
++ uint32_t Gfx_busy_acc;
++ uint32_t Mc_busy_acc;
++ uint32_t Fps_acc;
++
++ uint32_t AccCnt;
++};
++
++typedef struct SMU7_Discrete_Pm_Status_Table SMU7_Discrete_Pm_Status_Table;
++
++struct SMU7_Discrete_AutoWattMan_Status_Table {
++ int32_t T_meas_acc[SMU75_THERMAL_INPUT_LOOP_COUNT];
++ uint16_t Sclk_dpm_residency[8];
++ uint16_t Mclk_dpm_residency[4];
++ uint32_t TgpPwr_acc;
++ uint32_t Gfx_busy_acc;
++ uint32_t Mc_busy_acc;
++ uint32_t AccCnt;
++};
++
++typedef struct SMU7_Discrete_AutoWattMan_Status_Table SMU7_Discrete_AutoWattMan_Status_Table;
++
++#define SMU7_MAX_GFX_CU_COUNT 24
++#define SMU7_MIN_GFX_CU_COUNT 8
++#define SMU7_GFX_CU_PG_ENABLE_DC_MAX_CU_SHIFT 0
++#define SMU7_GFX_CU_PG_ENABLE_DC_MAX_CU_MASK (0xFFFF << SMU7_GFX_CU_PG_ENABLE_DC_MAX_CU_SHIFT)
++#define SMU7_GFX_CU_PG_ENABLE_AC_MAX_CU_SHIFT 16
++#define SMU7_GFX_CU_PG_ENABLE_AC_MAX_CU_MASK (0xFFFF << SMU7_GFX_CU_PG_ENABLE_AC_MAX_CU_SHIFT)
++
++struct SMU7_GfxCuPgScoreboard {
++ uint8_t Enabled;
++ uint8_t WaterfallUp;
++ uint8_t WaterfallDown;
++ uint8_t WaterfallLimit;
++ uint8_t CurrMaxCu;
++ uint8_t TargMaxCu;
++ uint8_t ClampMode;
++ uint8_t Active;
++ uint8_t MaxSupportedCu;
++ uint8_t MinSupportedCu;
++ uint8_t PendingGfxCuHostInterrupt;
++ uint8_t LastFilteredMaxCuInteger;
++ uint16_t FilteredMaxCu;
++ uint16_t FilteredMaxCuAlpha;
++ uint16_t FilterResetCount;
++ uint16_t FilterResetCountLimit;
++ uint8_t ForceCu;
++ uint8_t ForceCuCount;
++ uint8_t AcModeMaxCu;
++ uint8_t DcModeMaxCu;
++};
++
++typedef struct SMU7_GfxCuPgScoreboard SMU7_GfxCuPgScoreboard;
++
++#define SMU7_SCLK_CAC 0x561
++#define SMU7_MCLK_CAC 0xF9
++#define SMU7_VCLK_CAC 0x2DE
++#define SMU7_DCLK_CAC 0x2DE
++#define SMU7_ECLK_CAC 0x25E
++#define SMU7_ACLK_CAC 0x25E
++#define SMU7_SAMCLK_CAC 0x25E
++#define SMU7_DISPCLK_CAC 0x100
++#define SMU7_CAC_CONSTANT 0x2EE3430
++#define SMU7_CAC_CONSTANT_SHIFT 18
++
++#define SMU7_VDDCI_MCLK_CONST 1765
++#define SMU7_VDDCI_MCLK_CONST_SHIFT 16
++#define SMU7_VDDCI_VDDCI_CONST 50958
++#define SMU7_VDDCI_VDDCI_CONST_SHIFT 14
++#define SMU7_VDDCI_CONST 11781
++#define SMU7_VDDCI_STROBE_PWR 1331
++
++#define SMU7_VDDR1_CONST 693
++#define SMU7_VDDR1_CAC_WEIGHT 20
++#define SMU7_VDDR1_CAC_WEIGHT_SHIFT 19
++#define SMU7_VDDR1_STROBE_PWR 512
++
++#define SMU7_AREA_COEFF_UVD 0xA78
++#define SMU7_AREA_COEFF_VCE 0x190A
++#define SMU7_AREA_COEFF_ACP 0x22D1
++#define SMU7_AREA_COEFF_SAMU 0x534
++
++#define SMU7_THERM_OUT_MODE_DISABLE 0x0
++#define SMU7_THERM_OUT_MODE_THERM_ONLY 0x1
++#define SMU7_THERM_OUT_MODE_THERM_VRHOT 0x2
++
++#define SQ_Enable_MASK 0x1
++#define SQ_IR_MASK 0x2
++#define SQ_PCC_MASK 0x4
++#define SQ_EDC_MASK 0x8
++
++#define TCP_Enable_MASK 0x100
++#define TCP_IR_MASK 0x200
++#define TCP_PCC_MASK 0x400
++#define TCP_EDC_MASK 0x800
++
++#define TD_Enable_MASK 0x10000
++#define TD_IR_MASK 0x20000
++#define TD_PCC_MASK 0x40000
++#define TD_EDC_MASK 0x80000
++
++#define DB_Enable_MASK 0x1000000
++#define DB_IR_MASK 0x2000000
++#define DB_PCC_MASK 0x4000000
++#define DB_EDC_MASK 0x8000000
++
++#define SQ_Enable_SHIFT 0
++#define SQ_IR_SHIFT 1
++#define SQ_PCC_SHIFT 2
++#define SQ_EDC_SHIFT 3
++
++#define TCP_Enable_SHIFT 8
++#define TCP_IR_SHIFT 9
++#define TCP_PCC_SHIFT 10
++#define TCP_EDC_SHIFT 11
++
++#define TD_Enable_SHIFT 16
++#define TD_IR_SHIFT 17
++#define TD_PCC_SHIFT 18
++#define TD_EDC_SHIFT 19
++
++#define DB_Enable_SHIFT 24
++#define DB_IR_SHIFT 25
++#define DB_PCC_SHIFT 26
++#define DB_EDC_SHIFT 27
++
++#define PMFUSES_AVFSSIZE 104
++
++#define BTCGB0_Vdroop_Enable_MASK 0x1
++#define BTCGB1_Vdroop_Enable_MASK 0x2
++#define AVFSGB0_Vdroop_Enable_MASK 0x4
++#define AVFSGB1_Vdroop_Enable_MASK 0x8
++
++#define BTCGB0_Vdroop_Enable_SHIFT 0
++#define BTCGB1_Vdroop_Enable_SHIFT 1
++#define AVFSGB0_Vdroop_Enable_SHIFT 2
++#define AVFSGB1_Vdroop_Enable_SHIFT 3
++
++#pragma pack(pop)
++
++
++#endif
++
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4331-drm-amd-add-a-new-struct-in-atombios.h.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4331-drm-amd-add-a-new-struct-in-atombios.h.patch
new file mode 100644
index 00000000..9c0adf1a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4331-drm-amd-add-a-new-struct-in-atombios.h.patch
@@ -0,0 +1,33 @@
+From 5bb9b7e9b8821ec2141a4cf9a738621ced0bc874 Mon Sep 17 00:00:00 2001
+From: Eric Huang <JinHuiEric.Huang@amd.com>
+Date: Fri, 17 Nov 2017 11:17:48 -0500
+Subject: [PATCH 4331/5725] drm/amd: add a new struct in atombios.h
+
+Signed-off-by: Eric Huang <JinHuiEric.Huang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/include/atombios.h | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
+index f696bbb..7931502 100644
+--- a/drivers/gpu/drm/amd/include/atombios.h
++++ b/drivers/gpu/drm/amd/include/atombios.h
+@@ -632,6 +632,13 @@ typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2
+ ULONG ulReserved;
+ }COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2;
+
++typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3
++{
++ COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 ulClock;
++ USHORT usMclk_fcw_frac; //fractional divider of fcw = usSclk_fcw_frac/65536
++ USHORT usMclk_fcw_int; //integer divider of fcwc
++}COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3;
++
+ //Input parameter of DynamicMemorySettingsTable
+ //when ATOM_COMPUTE_CLOCK_FREQ.ulComputeClockFlag = COMPUTE_MEMORY_PLL_PARAM
+ typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4332-drm-amd-powerplay-update-ppatomctrl.c-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4332-drm-amd-powerplay-update-ppatomctrl.c-v2.patch
new file mode 100644
index 00000000..8e84d3ea
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4332-drm-amd-powerplay-update-ppatomctrl.c-v2.patch
@@ -0,0 +1,97 @@
+From 2715e8d18be7bd17bb5e8d36997c6d1a235d42c5 Mon Sep 17 00:00:00 2001
+From: Eric Huang <JinHuiEric.Huang@amd.com>
+Date: Fri, 17 Nov 2017 11:21:02 -0500
+Subject: [PATCH 4332/5725] drm/amd/powerplay: update ppatomctrl.c (v2)
+
+used for calculating memory clocks in powerplay.
+
+v2: handle endian swapping of atom data (Alex)
+
+Signed-off-by: Eric Huang <JinHuiEric.Huang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c | 31 ++++++++++++++++++++++++
+ drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h | 10 ++++++++
+ 2 files changed, 41 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+index 7766f5c..9c7625c 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+@@ -23,6 +23,7 @@
+ #include "pp_debug.h"
+ #include <linux/module.h>
+ #include <linux/slab.h>
++#include <linux/delay.h>
+ #include "atom.h"
+ #include "ppatomctrl.h"
+ #include "atombios.h"
+@@ -314,6 +315,36 @@ int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
+ return result;
+ }
+
++int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr,
++ uint32_t clock_value,
++ pp_atomctrl_memory_clock_param_ai *mpll_param)
++{
++ struct amdgpu_device *adev = hwmgr->adev;
++ COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3 mpll_parameters = {0};
++ int result;
++
++ mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
++
++ result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
++ GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
++ (uint32_t *)&mpll_parameters);
++
++ /* VEGAM's mpll takes sometime to finish computing */
++ udelay(10);
++
++ if (!result) {
++ mpll_param->ulMclk_fcw_int =
++ le16_to_cpu(mpll_parameters.usMclk_fcw_int);
++ mpll_param->ulMclk_fcw_frac =
++ le16_to_cpu(mpll_parameters.usMclk_fcw_frac);
++ mpll_param->ulClock =
++ le32_to_cpu(mpll_parameters.ulClock.ulClock);
++ mpll_param->ulPostDiv = mpll_parameters.ulClock.ucPostDiv;
++ }
++
++ return result;
++}
++
+ int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
+ uint32_t clock_value,
+ pp_atomctrl_clock_dividers_kong *dividers)
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
+index e1b5d6b..3ee54f1 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
+@@ -146,6 +146,14 @@ struct pp_atomctrl_memory_clock_param {
+ };
+ typedef struct pp_atomctrl_memory_clock_param pp_atomctrl_memory_clock_param;
+
++struct pp_atomctrl_memory_clock_param_ai {
++ uint32_t ulClock;
++ uint32_t ulPostDiv;
++ uint16_t ulMclk_fcw_frac;
++ uint16_t ulMclk_fcw_int;
++};
++typedef struct pp_atomctrl_memory_clock_param_ai pp_atomctrl_memory_clock_param_ai;
++
+ struct pp_atomctrl_internal_ss_info {
+ uint32_t speed_spectrum_percentage; /* in 1/100 percentage */
+ uint32_t speed_spectrum_rate; /* in KHz */
+@@ -295,6 +303,8 @@ extern bool atomctrl_is_voltage_controlled_by_gpio_v3(struct pp_hwmgr *hwmgr, ui
+ extern int atomctrl_get_voltage_table_v3(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode, pp_atomctrl_voltage_table *voltage_table);
+ extern int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
+ uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param);
++extern int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr,
++ uint32_t clock_value, pp_atomctrl_memory_clock_param_ai *mpll_param);
+ extern int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
+ uint32_t clock_value,
+ pp_atomctrl_clock_dividers_kong *dividers);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4333-drm-amd-powerplay-update-process-pptables.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4333-drm-amd-powerplay-update-process-pptables.patch
new file mode 100644
index 00000000..3d7f7647
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4333-drm-amd-powerplay-update-process-pptables.patch
@@ -0,0 +1,91 @@
+From 8b6a86ba907c75c31229df0f3ec4ce25b381376e Mon Sep 17 00:00:00 2001
+From: Eric Huang <JinHuiEric.Huang@amd.com>
+Date: Fri, 17 Nov 2017 11:31:09 -0500
+Subject: [PATCH 4333/5725] drm/amd/powerplay: update process pptables
+
+Add functionality to fetch gpio table from vbios.
+
+Signed-off-by: Eric Huang <JinHuiEric.Huang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../amd/powerplay/hwmgr/process_pptables_v1_0.c | 37 ++++++++++++++++++++++
+ 1 file changed, 37 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+index 8516516..f0d48b1 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+@@ -728,6 +728,32 @@ static int get_mm_clock_voltage_table(
+ return 0;
+ }
+
++static int get_gpio_table(struct pp_hwmgr *hwmgr,
++ struct phm_ppt_v1_gpio_table **pp_tonga_gpio_table,
++ const ATOM_Tonga_GPIO_Table *atom_gpio_table)
++{
++ uint32_t table_size;
++ struct phm_ppt_v1_gpio_table *pp_gpio_table;
++ struct phm_ppt_v1_information *pp_table_information =
++ (struct phm_ppt_v1_information *)(hwmgr->pptable);
++
++ table_size = sizeof(struct phm_ppt_v1_gpio_table);
++ pp_gpio_table = kzalloc(table_size, GFP_KERNEL);
++ if (!pp_gpio_table)
++ return -ENOMEM;
++
++ if (pp_table_information->vdd_dep_on_sclk->count <
++ atom_gpio_table->ucVRHotTriggeredSclkDpmIndex)
++ PP_ASSERT_WITH_CODE(false,
++ "SCLK DPM index for VRHot cannot exceed the total sclk level count!",);
++ else
++ pp_gpio_table->vrhot_triggered_sclk_dpm_index =
++ atom_gpio_table->ucVRHotTriggeredSclkDpmIndex;
++
++ *pp_tonga_gpio_table = pp_gpio_table;
++
++ return 0;
++}
+ /**
+ * Private Function used during initialization.
+ * Initialize clock voltage dependency
+@@ -761,11 +787,15 @@ static int init_clock_voltage_dependency(
+ const PPTable_Generic_SubTable_Header *pcie_table =
+ (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
+ le16_to_cpu(powerplay_table->usPCIETableOffset));
++ const ATOM_Tonga_GPIO_Table *gpio_table =
++ (const ATOM_Tonga_GPIO_Table *)(((unsigned long) powerplay_table) +
++ le16_to_cpu(powerplay_table->usGPIOTableOffset));
+
+ pp_table_information->vdd_dep_on_sclk = NULL;
+ pp_table_information->vdd_dep_on_mclk = NULL;
+ pp_table_information->mm_dep_table = NULL;
+ pp_table_information->pcie_table = NULL;
++ pp_table_information->gpio_table = NULL;
+
+ if (powerplay_table->usMMDependencyTableOffset != 0)
+ result = get_mm_clock_voltage_table(hwmgr,
+@@ -810,6 +840,10 @@ static int init_clock_voltage_dependency(
+ result = get_valid_clk(hwmgr, &pp_table_information->valid_sclk_values,
+ pp_table_information->vdd_dep_on_sclk);
+
++ if (!result && gpio_table)
++ result = get_gpio_table(hwmgr, &pp_table_information->gpio_table,
++ gpio_table);
++
+ return result;
+ }
+
+@@ -1116,6 +1150,9 @@ static int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr)
+ kfree(pp_table_information->pcie_table);
+ pp_table_information->pcie_table = NULL;
+
++ kfree(pp_table_information->gpio_table);
++ pp_table_information->gpio_table = NULL;
++
+ kfree(hwmgr->pptable);
+ hwmgr->pptable = NULL;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4334-drm-amd-powerplay-add-smumgr-support-for-VEGAM-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4334-drm-amd-powerplay-add-smumgr-support-for-VEGAM-v2.patch
new file mode 100644
index 00000000..308a2832
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4334-drm-amd-powerplay-add-smumgr-support-for-VEGAM-v2.patch
@@ -0,0 +1,2532 @@
+From d35f797a94ae4fd2255fbc41e4c3df5789cebf23 Mon Sep 17 00:00:00 2001
+From: Eric Huang <JinHuiEric.Huang@amd.com>
+Date: Wed, 11 Apr 2018 15:32:58 -0500
+Subject: [PATCH 4334/5725] drm/amd/powerplay: add smumgr support for VEGAM
+ (v2)
+
+The smumgr handles communication between the driver
+and the SMU for power management.
+
+v2: fix typo (Alex)
+
+Signed-off-by: Eric Huang <JinHuiEric.Huang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 6 +
+ drivers/gpu/drm/amd/powerplay/smumgr/Makefile | 2 +-
+ .../gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c | 2382 ++++++++++++++++++++
+ .../gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h | 75 +
+ 4 files changed, 2464 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+ create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+index 43f0ea8..71b4233 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+@@ -40,6 +40,7 @@ extern const struct pp_smumgr_func iceland_smu_funcs;
+ extern const struct pp_smumgr_func tonga_smu_funcs;
+ extern const struct pp_smumgr_func fiji_smu_funcs;
+ extern const struct pp_smumgr_func polaris10_smu_funcs;
++extern const struct pp_smumgr_func vegam_smu_funcs;
+ extern const struct pp_smumgr_func vega10_smu_funcs;
+ extern const struct pp_smumgr_func vega12_smu_funcs;
+ extern const struct pp_smumgr_func smu10_smu_funcs;
+@@ -136,6 +137,11 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
+ polaris_set_asic_special_caps(hwmgr);
+ hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
+ break;
++ case CHIP_VEGAM:
++ hwmgr->smumgr_funcs = &vegam_smu_funcs;
++ polaris_set_asic_special_caps(hwmgr);
++ hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
++ break;
+ default:
+ return -EINVAL;
+ }
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+index 5010d49..5e9db66 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+@@ -6,7 +6,7 @@
+ SMU_MGR = smumgr.o smu8_smumgr.o tonga_smumgr.o fiji_smumgr.o \
+ polaris10_smumgr.o iceland_smumgr.o \
+ smu7_smumgr.o vega10_smumgr.o smu10_smumgr.o ci_smumgr.o \
+- vega12_smumgr.o
++ vega12_smumgr.o vegam_smumgr.o
+
+ AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+new file mode 100644
+index 0000000..c9a5633
+--- /dev/null
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+@@ -0,0 +1,2382 @@
++/*
++ * Copyright 2017 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++#include "pp_debug.h"
++#include "smumgr.h"
++#include "smu_ucode_xfer_vi.h"
++#include "vegam_smumgr.h"
++#include "smu/smu_7_1_3_d.h"
++#include "smu/smu_7_1_3_sh_mask.h"
++#include "gmc/gmc_8_1_d.h"
++#include "gmc/gmc_8_1_sh_mask.h"
++#include "oss/oss_3_0_d.h"
++#include "gca/gfx_8_0_d.h"
++#include "bif/bif_5_0_d.h"
++#include "bif/bif_5_0_sh_mask.h"
++#include "ppatomctrl.h"
++#include "cgs_common.h"
++#include "smu7_ppsmc.h"
++
++#include "smu7_dyn_defaults.h"
++
++#include "smu7_hwmgr.h"
++#include "hardwaremanager.h"
++#include "ppatomctrl.h"
++#include "atombios.h"
++#include "pppcielanes.h"
++
++#include "dce/dce_11_2_d.h"
++#include "dce/dce_11_2_sh_mask.h"
++
++#define PPVEGAM_TARGETACTIVITY_DFLT 50
++
++#define VOLTAGE_VID_OFFSET_SCALE1 625
++#define VOLTAGE_VID_OFFSET_SCALE2 100
++#define POWERTUNE_DEFAULT_SET_MAX 1
++#define VDDC_VDDCI_DELTA 200
++#define MC_CG_ARB_FREQ_F1 0x0b
++
++#define STRAP_ASIC_RO_LSB 2168
++#define STRAP_ASIC_RO_MSB 2175
++
++#define PPSMC_MSG_ApplyAvfsCksOffVoltage ((uint16_t) 0x415)
++#define PPSMC_MSG_EnableModeSwitchRLCNotification ((uint16_t) 0x305)
++
++static const struct vegam_pt_defaults
++vegam_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
++ /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
++ * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
++ { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
++ { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
++ { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
++};
++
++static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = {
++ {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112},
++ {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
++ {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112},
++ {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160},
++ {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112},
++ {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160},
++ {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108},
++ {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} };
++
++static int vegam_smu_init(struct pp_hwmgr *hwmgr)
++{
++ struct vegam_smumgr *smu_data;
++
++ smu_data = kzalloc(sizeof(struct vegam_smumgr), GFP_KERNEL);
++ if (smu_data == NULL)
++ return -ENOMEM;
++
++ hwmgr->smu_backend = smu_data;
++
++ if (smu7_init(hwmgr)) {
++ kfree(smu_data);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int vegam_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr)
++{
++ int result = 0;
++
++ /* Wait for smc boot up */
++ /* PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */
++
++ /* Assert reset */
++ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
++ SMC_SYSCON_RESET_CNTL, rst_reg, 1);
++
++ result = smu7_upload_smu_firmware_image(hwmgr);
++ if (result != 0)
++ return result;
++
++ /* Clear status */
++ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0);
++
++ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
++ SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
++
++ /* De-assert reset */
++ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
++ SMC_SYSCON_RESET_CNTL, rst_reg, 0);
++
++
++ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1);
++
++
++ /* Call Test SMU message with 0x20000 offset to trigger SMU start */
++ smu7_send_msg_to_smc_offset(hwmgr);
++
++ /* Wait done bit to be set */
++ /* Check pass/failed indicator */
++
++ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, SMU_STATUS, SMU_DONE, 0);
++
++ if (1 != PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
++ SMU_STATUS, SMU_PASS))
++ PP_ASSERT_WITH_CODE(false, "SMU Firmware start failed!", return -1);
++
++ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0);
++
++ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
++ SMC_SYSCON_RESET_CNTL, rst_reg, 1);
++
++ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
++ SMC_SYSCON_RESET_CNTL, rst_reg, 0);
++
++ /* Wait for firmware to initialize */
++ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
++
++ return result;
++}
++
++static int vegam_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr)
++{
++ int result = 0;
++
++ /* wait for smc boot up */
++ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0);
++
++ /* Clear firmware interrupt enable flag */
++ /* PHM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */
++ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
++ ixFIRMWARE_FLAGS, 0);
++
++ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
++ SMC_SYSCON_RESET_CNTL,
++ rst_reg, 1);
++
++ result = smu7_upload_smu_firmware_image(hwmgr);
++ if (result != 0)
++ return result;
++
++ /* Set smc instruct start point at 0x0 */
++ smu7_program_jump_on_start(hwmgr);
++
++ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
++ SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
++
++ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
++ SMC_SYSCON_RESET_CNTL, rst_reg, 0);
++
++ /* Wait for firmware to initialize */
++
++ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
++ FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
++
++ return result;
++}
++
++static int vegam_start_smu(struct pp_hwmgr *hwmgr)
++{
++ int result = 0;
++ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
++
++ /* Only start SMC if SMC RAM is not running */
++ if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) {
++ smu_data->protected_mode = (uint8_t)(PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
++ CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
++ smu_data->smu7_data.security_hard_key = (uint8_t)(PHM_READ_VFPF_INDIRECT_FIELD(
++ hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
++
++ /* Check if SMU is running in protected mode */
++ if (smu_data->protected_mode == 0)
++ result = vegam_start_smu_in_non_protection_mode(hwmgr);
++ else
++ result = vegam_start_smu_in_protection_mode(hwmgr);
++
++ if (result != 0)
++ PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result);
++ }
++
++ /* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */
++ smu7_read_smc_sram_dword(hwmgr,
++ SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU75_Firmware_Header, SoftRegisters),
++ &(smu_data->smu7_data.soft_regs_start),
++ 0x40000);
++
++ result = smu7_request_smu_load_fw(hwmgr);
++
++ return result;
++}
++
++static int vegam_process_firmware_header(struct pp_hwmgr *hwmgr)
++{
++ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
++ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
++ uint32_t tmp;
++ int result;
++ bool error = false;
++
++ result = smu7_read_smc_sram_dword(hwmgr,
++ SMU7_FIRMWARE_HEADER_LOCATION +
++ offsetof(SMU75_Firmware_Header, DpmTable),
++ &tmp, SMC_RAM_END);
++
++ if (0 == result)
++ smu_data->smu7_data.dpm_table_start = tmp;
++
++ error |= (0 != result);
++
++ result = smu7_read_smc_sram_dword(hwmgr,
++ SMU7_FIRMWARE_HEADER_LOCATION +
++ offsetof(SMU75_Firmware_Header, SoftRegisters),
++ &tmp, SMC_RAM_END);
++
++ if (!result) {
++ data->soft_regs_start = tmp;
++ smu_data->smu7_data.soft_regs_start = tmp;
++ }
++
++ error |= (0 != result);
++
++ result = smu7_read_smc_sram_dword(hwmgr,
++ SMU7_FIRMWARE_HEADER_LOCATION +
++ offsetof(SMU75_Firmware_Header, mcRegisterTable),
++ &tmp, SMC_RAM_END);
++
++ if (!result)
++ smu_data->smu7_data.mc_reg_table_start = tmp;
++
++ result = smu7_read_smc_sram_dword(hwmgr,
++ SMU7_FIRMWARE_HEADER_LOCATION +
++ offsetof(SMU75_Firmware_Header, FanTable),
++ &tmp, SMC_RAM_END);
++
++ if (!result)
++ smu_data->smu7_data.fan_table_start = tmp;
++
++ error |= (0 != result);
++
++ result = smu7_read_smc_sram_dword(hwmgr,
++ SMU7_FIRMWARE_HEADER_LOCATION +
++ offsetof(SMU75_Firmware_Header, mcArbDramTimingTable),
++ &tmp, SMC_RAM_END);
++
++ if (!result)
++ smu_data->smu7_data.arb_table_start = tmp;
++
++ error |= (0 != result);
++
++ result = smu7_read_smc_sram_dword(hwmgr,
++ SMU7_FIRMWARE_HEADER_LOCATION +
++ offsetof(SMU75_Firmware_Header, Version),
++ &tmp, SMC_RAM_END);
++
++ if (!result)
++ hwmgr->microcode_version_info.SMC = tmp;
++
++ error |= (0 != result);
++
++ return error ? -1 : 0;
++}
++
++static bool vegam_is_dpm_running(struct pp_hwmgr *hwmgr)
++{
++ return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
++ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
++ ? true : false;
++}
++
++static uint32_t vegam_get_mac_definition(uint32_t value)
++{
++ switch (value) {
++ case SMU_MAX_LEVELS_GRAPHICS:
++ return SMU75_MAX_LEVELS_GRAPHICS;
++ case SMU_MAX_LEVELS_MEMORY:
++ return SMU75_MAX_LEVELS_MEMORY;
++ case SMU_MAX_LEVELS_LINK:
++ return SMU75_MAX_LEVELS_LINK;
++ case SMU_MAX_ENTRIES_SMIO:
++ return SMU75_MAX_ENTRIES_SMIO;
++ case SMU_MAX_LEVELS_VDDC:
++ return SMU75_MAX_LEVELS_VDDC;
++ case SMU_MAX_LEVELS_VDDGFX:
++ return SMU75_MAX_LEVELS_VDDGFX;
++ case SMU_MAX_LEVELS_VDDCI:
++ return SMU75_MAX_LEVELS_VDDCI;
++ case SMU_MAX_LEVELS_MVDD:
++ return SMU75_MAX_LEVELS_MVDD;
++ case SMU_UVD_MCLK_HANDSHAKE_DISABLE:
++ return SMU7_UVD_MCLK_HANDSHAKE_DISABLE |
++ SMU7_VCE_MCLK_HANDSHAKE_DISABLE;
++ }
++
++ pr_warn("can't get the mac of %x\n", value);
++ return 0;
++}
++
++static int vegam_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
++{
++ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
++ uint32_t mm_boot_level_offset, mm_boot_level_value;
++ struct phm_ppt_v1_information *table_info =
++ (struct phm_ppt_v1_information *)(hwmgr->pptable);
++
++ smu_data->smc_state_table.UvdBootLevel = 0;
++ if (table_info->mm_dep_table->count > 0)
++ smu_data->smc_state_table.UvdBootLevel =
++ (uint8_t) (table_info->mm_dep_table->count - 1);
++ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU75_Discrete_DpmTable,
++ UvdBootLevel);
++ mm_boot_level_offset /= 4;
++ mm_boot_level_offset *= 4;
++ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
++ CGS_IND_REG__SMC, mm_boot_level_offset);
++ mm_boot_level_value &= 0x00FFFFFF;
++ mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
++ cgs_write_ind_register(hwmgr->device,
++ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
++
++ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_UVDDPM) ||
++ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_StablePState))
++ smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_UVDDPM_SetEnabledMask,
++ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
++ return 0;
++}
++
++static int vegam_update_vce_smc_table(struct pp_hwmgr *hwmgr)
++{
++ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
++ uint32_t mm_boot_level_offset, mm_boot_level_value;
++ struct phm_ppt_v1_information *table_info =
++ (struct phm_ppt_v1_information *)(hwmgr->pptable);
++
++ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_StablePState))
++ smu_data->smc_state_table.VceBootLevel =
++ (uint8_t) (table_info->mm_dep_table->count - 1);
++ else
++ smu_data->smc_state_table.VceBootLevel = 0;
++
++ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
++ offsetof(SMU75_Discrete_DpmTable, VceBootLevel);
++ mm_boot_level_offset /= 4;
++ mm_boot_level_offset *= 4;
++ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
++ CGS_IND_REG__SMC, mm_boot_level_offset);
++ mm_boot_level_value &= 0xFF00FFFF;
++ mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
++ cgs_write_ind_register(hwmgr->device,
++ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
++
++ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
++ smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_VCEDPM_SetEnabledMask,
++ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
++ return 0;
++}
++
++static int vegam_update_samu_smc_table(struct pp_hwmgr *hwmgr)
++{
++ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
++ uint32_t mm_boot_level_offset, mm_boot_level_value;
++
++
++ smu_data->smc_state_table.SamuBootLevel = 0;
++ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
++ offsetof(SMU75_Discrete_DpmTable, SamuBootLevel);
++
++ mm_boot_level_offset /= 4;
++ mm_boot_level_offset *= 4;
++ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
++ CGS_IND_REG__SMC, mm_boot_level_offset);
++ mm_boot_level_value &= 0xFFFFFF00;
++ mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
++ cgs_write_ind_register(hwmgr->device,
++ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
++
++ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_StablePState))
++ smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SAMUDPM_SetEnabledMask,
++ (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
++ return 0;
++}
++
++
++static int vegam_update_bif_smc_table(struct pp_hwmgr *hwmgr)
++{
++ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
++ struct phm_ppt_v1_information *table_info =
++ (struct phm_ppt_v1_information *)(hwmgr->pptable);
++ struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
++ int max_entry, i;
++
++ max_entry = (SMU75_MAX_LEVELS_LINK < pcie_table->count) ?
++ SMU75_MAX_LEVELS_LINK :
++ pcie_table->count;
++ /* Setup BIF_SCLK levels */
++ for (i = 0; i < max_entry; i++)
++ smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
++ return 0;
++}
++
++static int vegam_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
++{
++ switch (type) {
++ case SMU_UVD_TABLE:
++ vegam_update_uvd_smc_table(hwmgr);
++ break;
++ case SMU_VCE_TABLE:
++ vegam_update_vce_smc_table(hwmgr);
++ break;
++ case SMU_SAMU_TABLE:
++ vegam_update_samu_smc_table(hwmgr);
++ break;
++ case SMU_BIF_TABLE:
++ vegam_update_bif_smc_table(hwmgr);
++ break;
++ default:
++ break;
++ }
++ return 0;
++}
++
++static void vegam_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
++{
++ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
++ struct phm_ppt_v1_information *table_info =
++ (struct phm_ppt_v1_information *)(hwmgr->pptable);
++
++ if (table_info &&
++ table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
++ table_info->cac_dtp_table->usPowerTuneDataSetID)
++ smu_data->power_tune_defaults =
++ &vegam_power_tune_data_set_array
++ [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
++ else
++ smu_data->power_tune_defaults = &vegam_power_tune_data_set_array[0];
++
++}
++
++static int vegam_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
++ SMU75_Discrete_DpmTable *table)
++{
++ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
++ uint32_t count, level;
++
++ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
++ count = data->mvdd_voltage_table.count;
++ if (count > SMU_MAX_SMIO_LEVELS)
++ count = SMU_MAX_SMIO_LEVELS;
++ for (level = 0; level < count; level++) {
++ table->SmioTable2.Pattern[level].Voltage = PP_HOST_TO_SMC_US(
++ data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
++ /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
++ table->SmioTable2.Pattern[level].Smio =
++ (uint8_t) level;
++ table->Smio[level] |=
++ data->mvdd_voltage_table.entries[level].smio_low;
++ }
++ table->SmioMask2 = data->mvdd_voltage_table.mask_low;
++
++ table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
++ }
++
++ return 0;
++}
++
++static int vegam_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
++ struct SMU75_Discrete_DpmTable *table)
++{
++ uint32_t count, level;
++ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
++
++ count = data->vddci_voltage_table.count;
++
++ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
++ if (count > SMU_MAX_SMIO_LEVELS)
++ count = SMU_MAX_SMIO_LEVELS;
++ for (level = 0; level < count; ++level) {
++ table->SmioTable1.Pattern[level].Voltage = PP_HOST_TO_SMC_US(
++ data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
++ table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
++
++ table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
++ }
++ }
++
++ table->SmioMask1 = data->vddci_voltage_table.mask_low;
++
++ return 0;
++}
++
++static int vegam_populate_cac_table(struct pp_hwmgr *hwmgr,
++ struct SMU75_Discrete_DpmTable *table)
++{
++ uint32_t count;
++ uint8_t index;
++ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
++ struct phm_ppt_v1_information *table_info =
++ (struct phm_ppt_v1_information *)(hwmgr->pptable);
++ struct phm_ppt_v1_voltage_lookup_table *lookup_table =
++ table_info->vddc_lookup_table;
++ /* tables is already swapped, so in order to use the value from it,
++ * we need to swap it back.
++ * We are populating vddc CAC data to BapmVddc table
++ * in split and merged mode
++ */
++ for (count = 0; count < lookup_table->count; count++) {
++ index = phm_get_voltage_index(lookup_table,
++ data->vddc_voltage_table.entries[count].value);
++ table->BapmVddcVidLoSidd[count] =
++ convert_to_vid(lookup_table->entries[index].us_cac_low);
++ table->BapmVddcVidHiSidd[count] =
++ convert_to_vid(lookup_table->entries[index].us_cac_mid);
++ table->BapmVddcVidHiSidd2[count] =
++ convert_to_vid(lookup_table->entries[index].us_cac_high);
++ }
++
++ return 0;
++}
++
++static int vegam_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
++ struct SMU75_Discrete_DpmTable *table)
++{
++ vegam_populate_smc_vddci_table(hwmgr, table);
++ vegam_populate_smc_mvdd_table(hwmgr, table);
++ vegam_populate_cac_table(hwmgr, table);
++
++ return 0;
++}
++
++static int vegam_populate_ulv_level(struct pp_hwmgr *hwmgr,
++ struct SMU75_Discrete_Ulv *state)
++{
++ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
++ struct phm_ppt_v1_information *table_info =
++ (struct phm_ppt_v1_information *)(hwmgr->pptable);
++
++ state->CcPwrDynRm = 0;
++ state->CcPwrDynRm1 = 0;
++
++ state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
++ state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
++ VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
++
++ state->VddcPhase = data->vddc_phase_shed_control ^ 0x3;
++
++ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
++ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
++ CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
++
++ return 0;
++}
++
++static int vegam_populate_ulv_state(struct pp_hwmgr *hwmgr,
++ struct SMU75_Discrete_DpmTable *table)
++{
++ return vegam_populate_ulv_level(hwmgr, &table->Ulv);
++}
++
++static int vegam_populate_smc_link_level(struct pp_hwmgr *hwmgr,
++ struct SMU75_Discrete_DpmTable *table)
++{
++ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
++ struct vegam_smumgr *smu_data =
++ (struct vegam_smumgr *)(hwmgr->smu_backend);
++ struct smu7_dpm_table *dpm_table = &data->dpm_table;
++ int i;
++
++ /* Index (dpm_table->pcie_speed_table.count)
++ * is reserved for PCIE boot level. */
++ for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
++ table->LinkLevel[i].PcieGenSpeed =
++ (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
++ table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
++ dpm_table->pcie_speed_table.dpm_levels[i].param1);
++ table->LinkLevel[i].EnabledForActivity = 1;
++ table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
++ table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
++ table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
++ }
++
++ smu_data->smc_state_table.LinkLevelCount =
++ (uint8_t)dpm_table->pcie_speed_table.count;
++
++/* To Do move to hwmgr */
++ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
++ phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
++
++ return 0;
++}
++
++static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
++ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
++ uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
++{
++ uint32_t i;
++ uint16_t vddci;
++ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
++
++ *voltage = *mvdd = 0;
++
++ /* clock - voltage dependency table is empty table */
++ if (dep_table->count == 0)
++ return -EINVAL;
++
++ for (i = 0; i < dep_table->count; i++) {
++ /* find first sclk bigger than request */
++ if (dep_table->entries[i].clk >= clock) {
++ *voltage |= (dep_table->entries[i].vddc *
++ VOLTAGE_SCALE) << VDDC_SHIFT;
++ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
++ *voltage |= (data->vbios_boot_state.vddci_bootup_value *
++ VOLTAGE_SCALE) << VDDCI_SHIFT;
++ else if (dep_table->entries[i].vddci)
++ *voltage |= (dep_table->entries[i].vddci *
++ VOLTAGE_SCALE) << VDDCI_SHIFT;
++ else {
++ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
++ (dep_table->entries[i].vddc -
++ (uint16_t)VDDC_VDDCI_DELTA));
++ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
++ }
++
++ if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
++ *mvdd = data->vbios_boot_state.mvdd_bootup_value *
++ VOLTAGE_SCALE;
++ else if (dep_table->entries[i].mvdd)
++ *mvdd = (uint32_t) dep_table->entries[i].mvdd *
++ VOLTAGE_SCALE;
++
++ *voltage |= 1 << PHASES_SHIFT;
++ return 0;
++ }
++ }
++
++ /* sclk is bigger than max sclk in the dependence table */
++ *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
++ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
++ (dep_table->entries[i - 1].vddc -
++ (uint16_t)VDDC_VDDCI_DELTA));
++
++ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
++ *voltage |= (data->vbios_boot_state.vddci_bootup_value *
++ VOLTAGE_SCALE) << VDDCI_SHIFT;
++ else if (dep_table->entries[i - 1].vddci)
++ *voltage |= (dep_table->entries[i - 1].vddci *
++ VOLTAGE_SCALE) << VDDC_SHIFT;
++ else
++ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
++
++ if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
++ *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
++ else if (dep_table->entries[i].mvdd)
++ *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
++
++ return 0;
++}
++
++static void vegam_get_sclk_range_table(struct pp_hwmgr *hwmgr,
++ SMU75_Discrete_DpmTable *table)
++{
++ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
++ uint32_t i, ref_clk;
++
++ struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
++
++ ref_clk = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
++
++ if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
++ for (i = 0; i < NUM_SCLK_RANGE; i++) {
++ table->SclkFcwRangeTable[i].vco_setting =
++ range_table_from_vbios.entry[i].ucVco_setting;
++ table->SclkFcwRangeTable[i].postdiv =
++ range_table_from_vbios.entry[i].ucPostdiv;
++ table->SclkFcwRangeTable[i].fcw_pcc =
++ range_table_from_vbios.entry[i].usFcw_pcc;
++
++ table->SclkFcwRangeTable[i].fcw_trans_upper =
++ range_table_from_vbios.entry[i].usFcw_trans_upper;
++ table->SclkFcwRangeTable[i].fcw_trans_lower =
++ range_table_from_vbios.entry[i].usRcw_trans_lower;
++
++ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
++ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
++ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
++ }
++ return;
++ }
++
++ for (i = 0; i < NUM_SCLK_RANGE; i++) {
++ smu_data->range_table[i].trans_lower_frequency =
++ (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
++ smu_data->range_table[i].trans_upper_frequency =
++ (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
++
++ table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
++ table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
++ table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
++
++ table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
++ table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
++
++ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
++ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
++ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
++ }
++}
++
++static int vegam_calculate_sclk_params(struct pp_hwmgr *hwmgr,
++ uint32_t clock, SMU_SclkSetting *sclk_setting)
++{
++ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
++ const SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table);
++ struct pp_atomctrl_clock_dividers_ai dividers;
++ uint32_t ref_clock;
++ uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
++ uint8_t i;
++ int result;
++ uint64_t temp;
++
++ sclk_setting->SclkFrequency = clock;
++ /* get the engine clock dividers for this clock value */
++ result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, &dividers);
++ if (result == 0) {
++ sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
++ sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
++ sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
++ sclk_setting->PllRange = dividers.ucSclkPllRange;
++ sclk_setting->Sclk_slew_rate = 0x400;
++ sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
++ sclk_setting->Pcc_down_slew_rate = 0xffff;
++ sclk_setting->SSc_En = dividers.ucSscEnable;
++ sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
++ sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
++ sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
++ return result;
++ }
++
++ ref_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
++
++ for (i = 0; i < NUM_SCLK_RANGE; i++) {
++ if (clock > smu_data->range_table[i].trans_lower_frequency
++ && clock <= smu_data->range_table[i].trans_upper_frequency) {
++ sclk_setting->PllRange = i;
++ break;
++ }
++ }
++
++ sclk_setting->Fcw_int = (uint16_t)
++ ((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) /
++ ref_clock);
++ temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
++ temp <<= 0x10;
++ do_div(temp, ref_clock);
++ sclk_setting->Fcw_frac = temp & 0xffff;
++
++ pcc_target_percent = 10; /* Hardcode 10% for now. */
++ pcc_target_freq = clock - (clock * pcc_target_percent / 100);
++ sclk_setting->Pcc_fcw_int = (uint16_t)
++ ((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) /
++ ref_clock);
++
++ ss_target_percent = 2; /* Hardcode 2% for now. */
++ sclk_setting->SSc_En = 0;
++ if (ss_target_percent) {
++ sclk_setting->SSc_En = 1;
++ ss_target_freq = clock - (clock * ss_target_percent / 100);
++ sclk_setting->Fcw1_int = (uint16_t)
++ ((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) /
++ ref_clock);
++ temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
++ temp <<= 0x10;
++ do_div(temp, ref_clock);
++ sclk_setting->Fcw1_frac = temp & 0xffff;
++ }
++
++ return 0;
++}
++
++static uint8_t vegam_get_sleep_divider_id_from_clock(uint32_t clock,
++ uint32_t clock_insr)
++{
++ uint8_t i;
++ uint32_t temp;
++ uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK);
++
++ PP_ASSERT_WITH_CODE((clock >= min),
++ "Engine clock can't satisfy stutter requirement!",
++ return 0);
++ for (i = 31; ; i--) {
++ temp = clock / (i + 1);
++
++ if (temp >= min || i == 0)
++ break;
++ }
++ return i;
++}
++
++static int vegam_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
++ uint32_t clock, struct SMU75_Discrete_GraphicsLevel *level)
++{
++ int result;
++ /* PP_Clocks minClocks; */
++ uint32_t mvdd;
++ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
++ struct phm_ppt_v1_information *table_info =
++ (struct phm_ppt_v1_information *)(hwmgr->pptable);
++ SMU_SclkSetting curr_sclk_setting = { 0 };
++
++ result = vegam_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
++
++ /* populate graphics levels */
++ result = vegam_get_dependency_volt_by_clk(hwmgr,
++ table_info->vdd_dep_on_sclk, clock,
++ &level->MinVoltage, &mvdd);
++
++ PP_ASSERT_WITH_CODE((0 == result),
++ "can not find VDDC voltage value for "
++ "VDDC engine clock dependency table",
++ return result);
++ level->ActivityLevel = (uint16_t)(SclkDPMTuning_VEGAM >> DPMTuning_Activity_Shift);
++
++ level->CcPwrDynRm = 0;
++ level->CcPwrDynRm1 = 0;
++ level->EnabledForActivity = 0;
++ level->EnabledForThrottle = 1;
++ level->VoltageDownHyst = 0;
++ level->PowerThrottle = 0;
++ data->display_timing.min_clock_in_sr = hwmgr->display_config->min_core_set_clock_in_sr;
++
++ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
++ level->DeepSleepDivId = vegam_get_sleep_divider_id_from_clock(clock,
++ hwmgr->display_config->min_core_set_clock_in_sr);
++
++ level->SclkSetting = curr_sclk_setting;
++
++ CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
++ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
++ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
++ CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
++ CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
++ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
++ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
++ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
++ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
++ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
++ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
++ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
++ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
++ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
++ return 0;
++}
++
++static int vegam_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
++{
++ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
++ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
++ struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
++ struct phm_ppt_v1_information *table_info =
++ (struct phm_ppt_v1_information *)(hwmgr->pptable);
++ struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
++ uint8_t pcie_entry_cnt = (uint8_t) hw_data->dpm_table.pcie_speed_table.count;
++ int result = 0;
++ uint32_t array = smu_data->smu7_data.dpm_table_start +
++ offsetof(SMU75_Discrete_DpmTable, GraphicsLevel);
++ uint32_t array_size = sizeof(struct SMU75_Discrete_GraphicsLevel) *
++ SMU75_MAX_LEVELS_GRAPHICS;
++ struct SMU75_Discrete_GraphicsLevel *levels =
++ smu_data->smc_state_table.GraphicsLevel;
++ uint32_t i, max_entry;
++ uint8_t hightest_pcie_level_enabled = 0,
++ lowest_pcie_level_enabled = 0,
++ mid_pcie_level_enabled = 0,
++ count = 0;
++
++ vegam_get_sclk_range_table(hwmgr, &(smu_data->smc_state_table));
++
++ for (i = 0; i < dpm_table->sclk_table.count; i++) {
++
++ result = vegam_populate_single_graphic_level(hwmgr,
++ dpm_table->sclk_table.dpm_levels[i].value,
++ &(smu_data->smc_state_table.GraphicsLevel[i]));
++ if (result)
++ return result;
++
++ levels[i].UpHyst = (uint8_t)
++ (SclkDPMTuning_VEGAM >> DPMTuning_Uphyst_Shift);
++ levels[i].DownHyst = (uint8_t)
++ (SclkDPMTuning_VEGAM >> DPMTuning_Downhyst_Shift);
++ /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
++ if (i > 1)
++ levels[i].DeepSleepDivId = 0;
++ }
++ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_SPLLShutdownSupport))
++ smu_data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
++
++ smu_data->smc_state_table.GraphicsDpmLevelCount =
++ (uint8_t)dpm_table->sclk_table.count;
++ hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask =
++ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
++
++ for (i = 0; i < dpm_table->sclk_table.count; i++)
++ levels[i].EnabledForActivity =
++ (hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask >> i) & 0x1;
++
++ if (pcie_table != NULL) {
++ PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
++ "There must be 1 or more PCIE levels defined in PPTable.",
++ return -EINVAL);
++ max_entry = pcie_entry_cnt - 1;
++ for (i = 0; i < dpm_table->sclk_table.count; i++)
++ levels[i].pcieDpmLevel =
++ (uint8_t) ((i < max_entry) ? i : max_entry);
++ } else {
++ while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
++ ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
++ (1 << (hightest_pcie_level_enabled + 1))) != 0))
++ hightest_pcie_level_enabled++;
++
++ while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
++ ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
++ (1 << lowest_pcie_level_enabled)) == 0))
++ lowest_pcie_level_enabled++;
++
++ while ((count < hightest_pcie_level_enabled) &&
++ ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
++ (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
++ count++;
++
++ mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
++ hightest_pcie_level_enabled ?
++ (lowest_pcie_level_enabled + 1 + count) :
++ hightest_pcie_level_enabled;
++
++ /* set pcieDpmLevel to hightest_pcie_level_enabled */
++ for (i = 2; i < dpm_table->sclk_table.count; i++)
++ levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
++
++ /* set pcieDpmLevel to lowest_pcie_level_enabled */
++ levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
++
++ /* set pcieDpmLevel to mid_pcie_level_enabled */
++ levels[1].pcieDpmLevel = mid_pcie_level_enabled;
++ }
++ /* level count will send to smc once at init smc table and never change */
++ result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
++ (uint32_t)array_size, SMC_RAM_END);
++
++ return result;
++}
++
++static int vegam_calculate_mclk_params(struct pp_hwmgr *hwmgr,
++ uint32_t clock, struct SMU75_Discrete_MemoryLevel *mem_level)
++{
++ struct pp_atomctrl_memory_clock_param_ai mpll_param;
++
++ PP_ASSERT_WITH_CODE(!atomctrl_get_memory_pll_dividers_ai(hwmgr,
++ clock, &mpll_param),
++ "Failed to retrieve memory pll parameter.",
++ return -EINVAL);
++
++ mem_level->MclkFrequency = (uint32_t)mpll_param.ulClock;
++ mem_level->Fcw_int = (uint16_t)mpll_param.ulMclk_fcw_int;
++ mem_level->Fcw_frac = (uint16_t)mpll_param.ulMclk_fcw_frac;
++ mem_level->Postdiv = (uint8_t)mpll_param.ulPostDiv;
++
++ return 0;
++}
++
++static int vegam_populate_single_memory_level(struct pp_hwmgr *hwmgr,
++ uint32_t clock, struct SMU75_Discrete_MemoryLevel *mem_level)
++{
++ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
++ struct phm_ppt_v1_information *table_info =
++ (struct phm_ppt_v1_information *)(hwmgr->pptable);
++ int result = 0;
++ uint32_t mclk_stutter_mode_threshold = 60000;
++
++
++ if (table_info->vdd_dep_on_mclk) {
++ result = vegam_get_dependency_volt_by_clk(hwmgr,
++ table_info->vdd_dep_on_mclk, clock,
++ &mem_level->MinVoltage, &mem_level->MinMvdd);
++ PP_ASSERT_WITH_CODE(!result,
++ "can not find MinVddc voltage value from memory "
++ "VDDC voltage dependency table", return result);
++ }
++
++ result = vegam_calculate_mclk_params(hwmgr, clock, mem_level);
++ PP_ASSERT_WITH_CODE(!result,
++ "Failed to calculate mclk params.",
++ return -EINVAL);
++
++ mem_level->EnabledForThrottle = 1;
++ mem_level->EnabledForActivity = 0;
++ mem_level->VoltageDownHyst = 0;
++ mem_level->ActivityLevel = (uint16_t)
++ (MemoryDPMTuning_VEGAM >> DPMTuning_Activity_Shift);
++ mem_level->StutterEnable = false;
++ mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
++
++ data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
++
++ if (mclk_stutter_mode_threshold &&
++ (clock <= mclk_stutter_mode_threshold) &&
++ (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
++ STUTTER_ENABLE) & 0x1))
++ mem_level->StutterEnable = true;
++
++ if (!result) {
++ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
++ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
++ CONVERT_FROM_HOST_TO_SMC_US(mem_level->Fcw_int);
++ CONVERT_FROM_HOST_TO_SMC_US(mem_level->Fcw_frac);
++ CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
++ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
++ }
++
++ return result;
++}
++
++static int vegam_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
++{
++ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
++ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
++ struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
++ int result;
++ /* populate MCLK dpm table to SMU7 */
++ uint32_t array = smu_data->smu7_data.dpm_table_start +
++ offsetof(SMU75_Discrete_DpmTable, MemoryLevel);
++ uint32_t array_size = sizeof(SMU75_Discrete_MemoryLevel) *
++ SMU75_MAX_LEVELS_MEMORY;
++ struct SMU75_Discrete_MemoryLevel *levels =
++ smu_data->smc_state_table.MemoryLevel;
++ uint32_t i;
++
++ for (i = 0; i < dpm_table->mclk_table.count; i++) {
++ PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
++ "can not populate memory level as memory clock is zero",
++ return -EINVAL);
++ result = vegam_populate_single_memory_level(hwmgr,
++ dpm_table->mclk_table.dpm_levels[i].value,
++ &levels[i]);
++
++ if (result)
++ return result;
++
++ levels[i].UpHyst = (uint8_t)
++ (MemoryDPMTuning_VEGAM >> DPMTuning_Uphyst_Shift);
++ levels[i].DownHyst = (uint8_t)
++ (MemoryDPMTuning_VEGAM >> DPMTuning_Downhyst_Shift);
++ }
++
++ smu_data->smc_state_table.MemoryDpmLevelCount =
++ (uint8_t)dpm_table->mclk_table.count;
++ hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask =
++ phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
++
++ for (i = 0; i < dpm_table->mclk_table.count; i++)
++ levels[i].EnabledForActivity =
++ (hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask >> i) & 0x1;
++
++ levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
++ PPSMC_DISPLAY_WATERMARK_HIGH;
++
++ /* level count will send to smc once at init smc table and never change */
++ result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
++ (uint32_t)array_size, SMC_RAM_END);
++
++ return result;
++}
++
++static int vegam_populate_mvdd_value(struct pp_hwmgr *hwmgr,
++ uint32_t mclk, SMIO_Pattern *smio_pat)
++{
++ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
++ struct phm_ppt_v1_information *table_info =
++ (struct phm_ppt_v1_information *)(hwmgr->pptable);
++ uint32_t i = 0;
++
++ if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
++ /* find mvdd value which clock is more than request */
++ for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
++ if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
++ smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
++ break;
++ }
++ }
++ PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
++ "MVDD Voltage is outside the supported range.",
++ return -EINVAL);
++ } else
++ return -EINVAL;
++
++ return 0;
++}
++
++static int vegam_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
++ SMU75_Discrete_DpmTable *table)
++{
++ int result = 0;
++ uint32_t sclk_frequency;
++ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
++ struct phm_ppt_v1_information *table_info =
++ (struct phm_ppt_v1_information *)(hwmgr->pptable);
++ SMIO_Pattern vol_level;
++ uint32_t mvdd;
++ uint16_t us_mvdd;
++
++ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
++
++ /* Get MinVoltage and Frequency from DPM0,
++ * already converted to SMC_UL */
++ sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
++ result = vegam_get_dependency_volt_by_clk(hwmgr,
++ table_info->vdd_dep_on_sclk,
++ sclk_frequency,
++ &table->ACPILevel.MinVoltage, &mvdd);
++ PP_ASSERT_WITH_CODE(!result,
++ "Cannot find ACPI VDDC voltage value "
++ "in Clock Dependency Table",
++ );
++
++ result = vegam_calculate_sclk_params(hwmgr, sclk_frequency,
++ &(table->ACPILevel.SclkSetting));
++ PP_ASSERT_WITH_CODE(!result,
++ "Error retrieving Engine Clock dividers from VBIOS.",
++ return result);
++
++ table->ACPILevel.DeepSleepDivId = 0;
++ table->ACPILevel.CcPwrDynRm = 0;
++ table->ACPILevel.CcPwrDynRm1 = 0;
++
++ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
++ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
++ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
++ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
++
++ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
++ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
++ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
++ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
++ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
++ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
++ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
++ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
++ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
++ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
++
++
++ /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
++ table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
++ result = vegam_get_dependency_volt_by_clk(hwmgr,
++ table_info->vdd_dep_on_mclk,
++ table->MemoryACPILevel.MclkFrequency,
++ &table->MemoryACPILevel.MinVoltage, &mvdd);
++ PP_ASSERT_WITH_CODE((0 == result),
++ "Cannot find ACPI VDDCI voltage value "
++ "in Clock Dependency Table",
++ );
++
++ us_mvdd = 0;
++ if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
++ (data->mclk_dpm_key_disabled))
++ us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
++ else {
++ if (!vegam_populate_mvdd_value(hwmgr,
++ data->dpm_table.mclk_table.dpm_levels[0].value,
++ &vol_level))
++ us_mvdd = vol_level.Voltage;
++ }
++
++ if (!vegam_populate_mvdd_value(hwmgr, 0, &vol_level))
++ table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
++ else
++ table->MemoryACPILevel.MinMvdd = 0;
++
++ table->MemoryACPILevel.StutterEnable = false;
++
++ table->MemoryACPILevel.EnabledForThrottle = 0;
++ table->MemoryACPILevel.EnabledForActivity = 0;
++ table->MemoryACPILevel.UpHyst = 0;
++ table->MemoryACPILevel.DownHyst = 100;
++ table->MemoryACPILevel.VoltageDownHyst = 0;
++ table->MemoryACPILevel.ActivityLevel =
++ PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity);
++
++ CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
++ CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
++
++ return result;
++}
++
++static int vegam_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
++ SMU75_Discrete_DpmTable *table)
++{
++ int result = -EINVAL;
++ uint8_t count;
++ struct pp_atomctrl_clock_dividers_vi dividers;
++ struct phm_ppt_v1_information *table_info =
++ (struct phm_ppt_v1_information *)(hwmgr->pptable);
++ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
++ table_info->mm_dep_table;
++ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
++ uint32_t vddci;
++
++ table->VceLevelCount = (uint8_t)(mm_table->count);
++ table->VceBootLevel = 0;
++
++ for (count = 0; count < table->VceLevelCount; count++) {
++ table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
++ table->VceLevel[count].MinVoltage = 0;
++ table->VceLevel[count].MinVoltage |=
++ (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
++
++ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
++ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
++ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
++ else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
++ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
++ else
++ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
++
++
++ table->VceLevel[count].MinVoltage |=
++ (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
++ table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
++
++ /*retrieve divider value for VBIOS */
++ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
++ table->VceLevel[count].Frequency, &dividers);
++ PP_ASSERT_WITH_CODE((0 == result),
++ "can not find divide id for VCE engine clock",
++ return result);
++
++ table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
++
++ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
++ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
++ }
++ return result;
++}
++
++static int vegam_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
++ SMU75_Discrete_DpmTable *table)
++{
++ int result = -EINVAL;
++ uint8_t count;
++ struct pp_atomctrl_clock_dividers_vi dividers;
++ struct phm_ppt_v1_information *table_info =
++ (struct phm_ppt_v1_information *)(hwmgr->pptable);
++ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
++ table_info->mm_dep_table;
++ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
++ uint32_t vddci;
++
++ table->SamuBootLevel = 0;
++ table->SamuLevelCount = (uint8_t)(mm_table->count);
++
++ for (count = 0; count < table->SamuLevelCount; count++) {
++ /* not sure whether we need evclk or not */
++ table->SamuLevel[count].MinVoltage = 0;
++ table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
++ table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
++ VOLTAGE_SCALE) << VDDC_SHIFT;
++
++ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
++ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
++ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
++ else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
++ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
++ else
++ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
++
++ table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
++ table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
++
++ /* retrieve divider value for VBIOS */
++ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
++ table->SamuLevel[count].Frequency, &dividers);
++ PP_ASSERT_WITH_CODE((0 == result),
++ "can not find divide id for samu clock", return result);
++
++ table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
++
++ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
++ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
++ }
++ return result;
++}
++
++static int vegam_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
++ int32_t eng_clock, int32_t mem_clock,
++ SMU75_Discrete_MCArbDramTimingTableEntry *arb_regs)
++{
++ uint32_t dram_timing;
++ uint32_t dram_timing2;
++ uint32_t burst_time;
++ uint32_t rfsh_rate;
++ uint32_t misc3;
++
++ int result;
++
++ result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
++ eng_clock, mem_clock);
++ PP_ASSERT_WITH_CODE(result == 0,
++ "Error calling VBIOS to set DRAM_TIMING.",
++ return result);
++
++ dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
++ dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
++ burst_time = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
++ rfsh_rate = cgs_read_register(hwmgr->device, mmMC_ARB_RFSH_RATE);
++ misc3 = cgs_read_register(hwmgr->device, mmMC_ARB_MISC3);
++
++ arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
++ arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
++ arb_regs->McArbBurstTime = PP_HOST_TO_SMC_UL(burst_time);
++ arb_regs->McArbRfshRate = PP_HOST_TO_SMC_UL(rfsh_rate);
++ arb_regs->McArbMisc3 = PP_HOST_TO_SMC_UL(misc3);
++
++ return 0;
++}
++
++static int vegam_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
++{
++ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
++ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
++ struct SMU75_Discrete_MCArbDramTimingTable arb_regs = {0};
++ uint32_t i, j;
++ int result = 0;
++
++ for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) {
++ for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) {
++ result = vegam_populate_memory_timing_parameters(hwmgr,
++ hw_data->dpm_table.sclk_table.dpm_levels[i].value,
++ hw_data->dpm_table.mclk_table.dpm_levels[j].value,
++ &arb_regs.entries[i][j]);
++ if (result)
++ return result;
++ }
++ }
++
++ result = smu7_copy_bytes_to_smc(
++ hwmgr,
++ smu_data->smu7_data.arb_table_start,
++ (uint8_t *)&arb_regs,
++ sizeof(SMU75_Discrete_MCArbDramTimingTable),
++ SMC_RAM_END);
++ return result;
++}
++
++static int vegam_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
++ struct SMU75_Discrete_DpmTable *table)
++{
++ int result = -EINVAL;
++ uint8_t count;
++ struct pp_atomctrl_clock_dividers_vi dividers;
++ struct phm_ppt_v1_information *table_info =
++ (struct phm_ppt_v1_information *)(hwmgr->pptable);
++ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
++ table_info->mm_dep_table;
++ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
++ uint32_t vddci;
++
++ table->UvdLevelCount = (uint8_t)(mm_table->count);
++ table->UvdBootLevel = 0;
++
++ for (count = 0; count < table->UvdLevelCount; count++) {
++ table->UvdLevel[count].MinVoltage = 0;
++ table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
++ table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
++ table->UvdLevel[count].MinVoltage |=
++ (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
++
++ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
++ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
++ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
++ else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
++ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
++ else
++ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
++
++ table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
++ table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
++
++ /* retrieve divider value for VBIOS */
++ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
++ table->UvdLevel[count].VclkFrequency, &dividers);
++ PP_ASSERT_WITH_CODE((0 == result),
++ "can not find divide id for Vclk clock", return result);
++
++ table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
++
++ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
++ table->UvdLevel[count].DclkFrequency, &dividers);
++ PP_ASSERT_WITH_CODE((0 == result),
++ "can not find divide id for Dclk clock", return result);
++
++ table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
++
++ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
++ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
++ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
++ }
++
++ return result;
++}
++
++static int vegam_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
++ struct SMU75_Discrete_DpmTable *table)
++{
++ int result = 0;
++ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
++
++ table->GraphicsBootLevel = 0;
++ table->MemoryBootLevel = 0;
++
++ /* find boot level from dpm table */
++ result = phm_find_boot_level(&(data->dpm_table.sclk_table),
++ data->vbios_boot_state.sclk_bootup_value,
++ (uint32_t *)&(table->GraphicsBootLevel));
++
++ result = phm_find_boot_level(&(data->dpm_table.mclk_table),
++ data->vbios_boot_state.mclk_bootup_value,
++ (uint32_t *)&(table->MemoryBootLevel));
++
++ table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
++ VOLTAGE_SCALE;
++ table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
++ VOLTAGE_SCALE;
++ table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
++ VOLTAGE_SCALE;
++
++ CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
++ CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
++ CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
++
++ return 0;
++}
++
++static int vegam_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
++{
++ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
++ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
++ struct phm_ppt_v1_information *table_info =
++ (struct phm_ppt_v1_information *)(hwmgr->pptable);
++ uint8_t count, level;
++
++ count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
++
++ for (level = 0; level < count; level++) {
++ if (table_info->vdd_dep_on_sclk->entries[level].clk >=
++ hw_data->vbios_boot_state.sclk_bootup_value) {
++ smu_data->smc_state_table.GraphicsBootLevel = level;
++ break;
++ }
++ }
++
++ count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
++ for (level = 0; level < count; level++) {
++ if (table_info->vdd_dep_on_mclk->entries[level].clk >=
++ hw_data->vbios_boot_state.mclk_bootup_value) {
++ smu_data->smc_state_table.MemoryBootLevel = level;
++ break;
++ }
++ }
++
++ return 0;
++}
++
++static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
++{
++ uint32_t tmp;
++ tmp = raw_setting * 4096 / 100;
++ return (uint16_t)tmp;
++}
++
++static int vegam_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
++{
++ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
++
++ const struct vegam_pt_defaults *defaults = smu_data->power_tune_defaults;
++ SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table);
++ struct phm_ppt_v1_information *table_info =
++ (struct phm_ppt_v1_information *)(hwmgr->pptable);
++ struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
++ struct pp_advance_fan_control_parameters *fan_table =
++ &hwmgr->thermal_controller.advanceFanControlParameters;
++ int i, j, k;
++ const uint16_t *pdef1;
++ const uint16_t *pdef2;
++
++ table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
++ table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
++
++ PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
++ "Target Operating Temp is out of Range!",
++ );
++
++ table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
++ cac_dtp_table->usTargetOperatingTemp * 256);
++ table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
++ cac_dtp_table->usTemperatureLimitHotspot * 256);
++ table->FanGainEdge = PP_HOST_TO_SMC_US(
++ scale_fan_gain_settings(fan_table->usFanGainEdge));
++ table->FanGainHotspot = PP_HOST_TO_SMC_US(
++ scale_fan_gain_settings(fan_table->usFanGainHotspot));
++
++ pdef1 = defaults->BAPMTI_R;
++ pdef2 = defaults->BAPMTI_RC;
++
++ for (i = 0; i < SMU75_DTE_ITERATIONS; i++) {
++ for (j = 0; j < SMU75_DTE_SOURCES; j++) {
++ for (k = 0; k < SMU75_DTE_SINKS; k++) {
++ table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
++ table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
++ pdef1++;
++ pdef2++;
++ }
++ }
++ }
++
++ return 0;
++}
++
++static int vegam_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
++{
++ uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
++ struct vegam_smumgr *smu_data =
++ (struct vegam_smumgr *)(hwmgr->smu_backend);
++
++ uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
++ struct phm_ppt_v1_information *table_info =
++ (struct phm_ppt_v1_information *)(hwmgr->pptable);
++ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
++ table_info->vdd_dep_on_sclk;
++ uint32_t mask = (1 << ((STRAP_ASIC_RO_MSB - STRAP_ASIC_RO_LSB) + 1)) - 1;
++
++ stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
++
++ atomctrl_read_efuse(hwmgr, STRAP_ASIC_RO_LSB, STRAP_ASIC_RO_MSB,
++ mask, &efuse);
++
++ min = 1200;
++ max = 2500;
++
++ ro = efuse * (max - min) / 255 + min;
++
++ /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
++ for (i = 0; i < sclk_table->count; i++) {
++ smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
++ sclk_table->entries[i].cks_enable << i;
++ volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) *
++ 136418 - (ro - 70) * 1000000) /
++ (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
++ volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 *
++ 3232 - (ro - 65) * 1000000) /
++ (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
++
++ if (volt_without_cks >= volt_with_cks)
++ volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
++ sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
++
++ smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
++ }
++
++ smu_data->smc_state_table.LdoRefSel =
++ (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ?
++ table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 5;
++ /* Populate CKS Lookup Table */
++ if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
++ stretch_amount2 = 0;
++ else if (stretch_amount == 3 || stretch_amount == 4)
++ stretch_amount2 = 1;
++ else {
++ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_ClockStretcher);
++ PP_ASSERT_WITH_CODE(false,
++ "Stretch Amount in PPTable not supported\n",
++ return -EINVAL);
++ }
++
++ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
++ value &= 0xFFFFFFFE;
++ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
++
++ return 0;
++}
++
++static bool vegam_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
++{
++ uint32_t efuse;
++
++ efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
++ ixSMU_EFUSE_0 + (49 * 4));
++ efuse &= 0x00000001;
++
++ if (efuse)
++ return true;
++
++ return false;
++}
++
++static int vegam_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
++{
++ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
++ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
++
++ SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table);
++ int result = 0;
++ struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
++ AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
++ AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
++ uint32_t tmp, i;
++
++ struct phm_ppt_v1_information *table_info =
++ (struct phm_ppt_v1_information *)hwmgr->pptable;
++ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
++ table_info->vdd_dep_on_sclk;
++
++ if (!hwmgr->avfs_supported)
++ return 0;
++
++ result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
++
++ if (0 == result) {
++ table->BTCGB_VDROOP_TABLE[0].a0 =
++ PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
++ table->BTCGB_VDROOP_TABLE[0].a1 =
++ PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
++ table->BTCGB_VDROOP_TABLE[0].a2 =
++ PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
++ table->BTCGB_VDROOP_TABLE[1].a0 =
++ PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
++ table->BTCGB_VDROOP_TABLE[1].a1 =
++ PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
++ table->BTCGB_VDROOP_TABLE[1].a2 =
++ PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
++ table->AVFSGB_FUSE_TABLE[0].m1 =
++ PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
++ table->AVFSGB_FUSE_TABLE[0].m2 =
++ PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
++ table->AVFSGB_FUSE_TABLE[0].b =
++ PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
++ table->AVFSGB_FUSE_TABLE[0].m1_shift = 24;
++ table->AVFSGB_FUSE_TABLE[0].m2_shift = 12;
++ table->AVFSGB_FUSE_TABLE[1].m1 =
++ PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
++ table->AVFSGB_FUSE_TABLE[1].m2 =
++ PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
++ table->AVFSGB_FUSE_TABLE[1].b =
++ PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
++ table->AVFSGB_FUSE_TABLE[1].m1_shift = 24;
++ table->AVFSGB_FUSE_TABLE[1].m2_shift = 12;
++ table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
++ AVFS_meanNsigma.Aconstant[0] =
++ PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
++ AVFS_meanNsigma.Aconstant[1] =
++ PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
++ AVFS_meanNsigma.Aconstant[2] =
++ PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
++ AVFS_meanNsigma.DC_tol_sigma =
++ PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
++ AVFS_meanNsigma.Platform_mean =
++ PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
++ AVFS_meanNsigma.PSM_Age_CompFactor =
++ PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
++ AVFS_meanNsigma.Platform_sigma =
++ PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
++
++ for (i = 0; i < sclk_table->count; i++) {
++ AVFS_meanNsigma.Static_Voltage_Offset[i] =
++ (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
++ AVFS_SclkOffset.Sclk_Offset[i] =
++ PP_HOST_TO_SMC_US((uint16_t)
++ (sclk_table->entries[i].sclk_offset) / 100);
++ }
++
++ result = smu7_read_smc_sram_dword(hwmgr,
++ SMU7_FIRMWARE_HEADER_LOCATION +
++ offsetof(SMU75_Firmware_Header, AvfsMeanNSigma),
++ &tmp, SMC_RAM_END);
++ smu7_copy_bytes_to_smc(hwmgr,
++ tmp,
++ (uint8_t *)&AVFS_meanNsigma,
++ sizeof(AVFS_meanNsigma_t),
++ SMC_RAM_END);
++
++ result = smu7_read_smc_sram_dword(hwmgr,
++ SMU7_FIRMWARE_HEADER_LOCATION +
++ offsetof(SMU75_Firmware_Header, AvfsSclkOffsetTable),
++ &tmp, SMC_RAM_END);
++ smu7_copy_bytes_to_smc(hwmgr,
++ tmp,
++ (uint8_t *)&AVFS_SclkOffset,
++ sizeof(AVFS_Sclk_Offset_t),
++ SMC_RAM_END);
++
++ data->avfs_vdroop_override_setting =
++ (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
++ (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
++ (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
++ (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
++ data->apply_avfs_cks_off_voltage =
++ (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
++ }
++ return result;
++}
++
++static int vegam_populate_vr_config(struct pp_hwmgr *hwmgr,
++ struct SMU75_Discrete_DpmTable *table)
++{
++ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
++ struct vegam_smumgr *smu_data =
++ (struct vegam_smumgr *)(hwmgr->smu_backend);
++ uint16_t config;
++
++ config = VR_MERGED_WITH_VDDC;
++ table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
++
++ /* Set Vddc Voltage Controller */
++ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
++ config = VR_SVI2_PLANE_1;
++ table->VRConfig |= config;
++ } else {
++ PP_ASSERT_WITH_CODE(false,
++ "VDDC should be on SVI2 control in merged mode!",
++ );
++ }
++ /* Set Vddci Voltage Controller */
++ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
++ config = VR_SVI2_PLANE_2; /* only in merged mode */
++ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
++ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
++ config = VR_SMIO_PATTERN_1;
++ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
++ } else {
++ config = VR_STATIC_VOLTAGE;
++ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
++ }
++ /* Set Mvdd Voltage Controller */
++ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
++ if (config != VR_SVI2_PLANE_2) {
++ config = VR_SVI2_PLANE_2;
++ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
++ cgs_write_ind_register(hwmgr->device,
++ CGS_IND_REG__SMC,
++ smu_data->smu7_data.soft_regs_start +
++ offsetof(SMU75_SoftRegisters, AllowMvddSwitch),
++ 0x1);
++ } else {
++ PP_ASSERT_WITH_CODE(false,
++ "SVI2 Plane 2 is already taken, set MVDD as Static",);
++ config = VR_STATIC_VOLTAGE;
++ table->VRConfig = (config << VRCONF_MVDD_SHIFT);
++ }
++ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
++ config = VR_SMIO_PATTERN_2;
++ table->VRConfig = (config << VRCONF_MVDD_SHIFT);
++ cgs_write_ind_register(hwmgr->device,
++ CGS_IND_REG__SMC,
++ smu_data->smu7_data.soft_regs_start +
++ offsetof(SMU75_SoftRegisters, AllowMvddSwitch),
++ 0x1);
++ } else {
++ config = VR_STATIC_VOLTAGE;
++ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
++ }
++
++ return 0;
++}
++
++static int vegam_populate_svi_load_line(struct pp_hwmgr *hwmgr)
++{
++ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
++ const struct vegam_pt_defaults *defaults = smu_data->power_tune_defaults;
++
++ smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
++ smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
++ smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
++ smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
++
++ return 0;
++}
++
++static int vegam_populate_tdc_limit(struct pp_hwmgr *hwmgr)
++{
++ uint16_t tdc_limit;
++ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
++ struct phm_ppt_v1_information *table_info =
++ (struct phm_ppt_v1_information *)(hwmgr->pptable);
++ const struct vegam_pt_defaults *defaults = smu_data->power_tune_defaults;
++
++ tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
++ smu_data->power_tune_table.TDC_VDDC_PkgLimit =
++ CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
++ smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
++ defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
++ smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
++
++ return 0;
++}
++
++static int vegam_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
++{
++ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
++ const struct vegam_pt_defaults *defaults = smu_data->power_tune_defaults;
++ uint32_t temp;
++
++ if (smu7_read_smc_sram_dword(hwmgr,
++ fuse_table_offset +
++ offsetof(SMU75_Discrete_PmFuses, TdcWaterfallCtl),
++ (uint32_t *)&temp, SMC_RAM_END))
++ PP_ASSERT_WITH_CODE(false,
++ "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
++ return -EINVAL);
++ else {
++ smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
++ smu_data->power_tune_table.LPMLTemperatureMin =
++ (uint8_t)((temp >> 16) & 0xff);
++ smu_data->power_tune_table.LPMLTemperatureMax =
++ (uint8_t)((temp >> 8) & 0xff);
++ smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
++ }
++ return 0;
++}
++
++static int vegam_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
++{
++ int i;
++ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
++
++ /* Currently not used. Set all to zero. */
++ for (i = 0; i < 16; i++)
++ smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
++
++ return 0;
++}
++
++static int vegam_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
++{
++ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
++
++/* TO DO move to hwmgr */
++ if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
++ || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
++ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
++ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
++
++ smu_data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
++ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
++ return 0;
++}
++
++static int vegam_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
++{
++ int i;
++ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
++
++ /* Currently not used. Set all to zero. */
++ for (i = 0; i < 16; i++)
++ smu_data->power_tune_table.GnbLPML[i] = 0;
++
++ return 0;
++}
++
++static int vegam_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
++{
++ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
++ struct phm_ppt_v1_information *table_info =
++ (struct phm_ppt_v1_information *)(hwmgr->pptable);
++ uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
++ uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
++ struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
++
++ hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
++ lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
++
++ smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
++ CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
++ smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
++ CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
++
++ return 0;
++}
++
++static int vegam_populate_pm_fuses(struct pp_hwmgr *hwmgr)
++{
++ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
++ uint32_t pm_fuse_table_offset;
++
++ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_PowerContainment)) {
++ if (smu7_read_smc_sram_dword(hwmgr,
++ SMU7_FIRMWARE_HEADER_LOCATION +
++ offsetof(SMU75_Firmware_Header, PmFuseTable),
++ &pm_fuse_table_offset, SMC_RAM_END))
++ PP_ASSERT_WITH_CODE(false,
++ "Attempt to get pm_fuse_table_offset Failed!",
++ return -EINVAL);
++
++ if (vegam_populate_svi_load_line(hwmgr))
++ PP_ASSERT_WITH_CODE(false,
++ "Attempt to populate SviLoadLine Failed!",
++ return -EINVAL);
++
++ if (vegam_populate_tdc_limit(hwmgr))
++ PP_ASSERT_WITH_CODE(false,
++ "Attempt to populate TDCLimit Failed!", return -EINVAL);
++
++ if (vegam_populate_dw8(hwmgr, pm_fuse_table_offset))
++ PP_ASSERT_WITH_CODE(false,
++ "Attempt to populate TdcWaterfallCtl, "
++ "LPMLTemperature Min and Max Failed!",
++ return -EINVAL);
++
++ if (0 != vegam_populate_temperature_scaler(hwmgr))
++ PP_ASSERT_WITH_CODE(false,
++ "Attempt to populate LPMLTemperatureScaler Failed!",
++ return -EINVAL);
++
++ if (vegam_populate_fuzzy_fan(hwmgr))
++ PP_ASSERT_WITH_CODE(false,
++ "Attempt to populate Fuzzy Fan Control parameters Failed!",
++ return -EINVAL);
++
++ if (vegam_populate_gnb_lpml(hwmgr))
++ PP_ASSERT_WITH_CODE(false,
++ "Attempt to populate GnbLPML Failed!",
++ return -EINVAL);
++
++ if (vegam_populate_bapm_vddc_base_leakage_sidd(hwmgr))
++ PP_ASSERT_WITH_CODE(false,
++ "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
++ "Sidd Failed!", return -EINVAL);
++
++ if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
++ (uint8_t *)&smu_data->power_tune_table,
++ (sizeof(struct SMU75_Discrete_PmFuses) - PMFUSES_AVFSSIZE),
++ SMC_RAM_END))
++ PP_ASSERT_WITH_CODE(false,
++ "Attempt to download PmFuseTable Failed!",
++ return -EINVAL);
++ }
++ return 0;
++}
++
++static int vegam_enable_reconfig_cus(struct pp_hwmgr *hwmgr)
++{
++ struct amdgpu_device *adev = hwmgr->adev;
++
++ smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_EnableModeSwitchRLCNotification,
++ adev->gfx.cu_info.number);
++
++ return 0;
++}
++
++static int vegam_init_smc_table(struct pp_hwmgr *hwmgr)
++{
++ int result;
++ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
++ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
++
++ struct phm_ppt_v1_information *table_info =
++ (struct phm_ppt_v1_information *)(hwmgr->pptable);
++ struct SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table);
++ uint8_t i;
++ struct pp_atomctrl_gpio_pin_assignment gpio_pin;
++ struct phm_ppt_v1_gpio_table *gpio_table =
++ (struct phm_ppt_v1_gpio_table *)table_info->gpio_table;
++ pp_atomctrl_clock_dividers_vi dividers;
++
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_AutomaticDCTransition);
++
++ vegam_initialize_power_tune_defaults(hwmgr);
++
++ if (SMU7_VOLTAGE_CONTROL_NONE != hw_data->voltage_control)
++ vegam_populate_smc_voltage_tables(hwmgr, table);
++
++ table->SystemFlags = 0;
++ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_AutomaticDCTransition))
++ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
++
++ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_StepVddc))
++ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
++
++ if (hw_data->is_memory_gddr5)
++ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
++
++ if (hw_data->ulv_supported && table_info->us_ulv_voltage_offset) {
++ result = vegam_populate_ulv_state(hwmgr, table);
++ PP_ASSERT_WITH_CODE(!result,
++ "Failed to initialize ULV state!", return result);
++ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
++ ixCG_ULV_PARAMETER, SMU7_CGULVPARAMETER_DFLT);
++ }
++
++ result = vegam_populate_smc_link_level(hwmgr, table);
++ PP_ASSERT_WITH_CODE(!result,
++ "Failed to initialize Link Level!", return result);
++
++ result = vegam_populate_all_graphic_levels(hwmgr);
++ PP_ASSERT_WITH_CODE(!result,
++ "Failed to initialize Graphics Level!", return result);
++
++ result = vegam_populate_all_memory_levels(hwmgr);
++ PP_ASSERT_WITH_CODE(!result,
++ "Failed to initialize Memory Level!", return result);
++
++ result = vegam_populate_smc_acpi_level(hwmgr, table);
++ PP_ASSERT_WITH_CODE(!result,
++ "Failed to initialize ACPI Level!", return result);
++
++ result = vegam_populate_smc_vce_level(hwmgr, table);
++ PP_ASSERT_WITH_CODE(!result,
++ "Failed to initialize VCE Level!", return result);
++
++ result = vegam_populate_smc_samu_level(hwmgr, table);
++ PP_ASSERT_WITH_CODE(!result,
++ "Failed to initialize SAMU Level!", return result);
++
++ /* Since only the initial state is completely set up at this point
++ * (the other states are just copies of the boot state) we only
++ * need to populate the ARB settings for the initial state.
++ */
++ result = vegam_program_memory_timing_parameters(hwmgr);
++ PP_ASSERT_WITH_CODE(!result,
++ "Failed to Write ARB settings for the initial state.", return result);
++
++ result = vegam_populate_smc_uvd_level(hwmgr, table);
++ PP_ASSERT_WITH_CODE(!result,
++ "Failed to initialize UVD Level!", return result);
++
++ result = vegam_populate_smc_boot_level(hwmgr, table);
++ PP_ASSERT_WITH_CODE(!result,
++ "Failed to initialize Boot Level!", return result);
++
++ result = vegam_populate_smc_initial_state(hwmgr);
++ PP_ASSERT_WITH_CODE(!result,
++ "Failed to initialize Boot State!", return result);
++
++ result = vegam_populate_bapm_parameters_in_dpm_table(hwmgr);
++ PP_ASSERT_WITH_CODE(!result,
++ "Failed to populate BAPM Parameters!", return result);
++
++ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_ClockStretcher)) {
++ result = vegam_populate_clock_stretcher_data_table(hwmgr);
++ PP_ASSERT_WITH_CODE(!result,
++ "Failed to populate Clock Stretcher Data Table!",
++ return result);
++ }
++
++ result = vegam_populate_avfs_parameters(hwmgr);
++ PP_ASSERT_WITH_CODE(!result,
++ "Failed to populate AVFS Parameters!", return result;);
++
++ table->CurrSclkPllRange = 0xff;
++ table->GraphicsVoltageChangeEnable = 1;
++ table->GraphicsThermThrottleEnable = 1;
++ table->GraphicsInterval = 1;
++ table->VoltageInterval = 1;
++ table->ThermalInterval = 1;
++ table->TemperatureLimitHigh =
++ table_info->cac_dtp_table->usTargetOperatingTemp *
++ SMU7_Q88_FORMAT_CONVERSION_UNIT;
++ table->TemperatureLimitLow =
++ (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
++ SMU7_Q88_FORMAT_CONVERSION_UNIT;
++ table->MemoryVoltageChangeEnable = 1;
++ table->MemoryInterval = 1;
++ table->VoltageResponseTime = 0;
++ table->PhaseResponseTime = 0;
++ table->MemoryThermThrottleEnable = 1;
++
++ PP_ASSERT_WITH_CODE(hw_data->dpm_table.pcie_speed_table.count >= 1,
++ "There must be 1 or more PCIE levels defined in PPTable.",
++ return -EINVAL);
++ table->PCIeBootLinkLevel =
++ hw_data->dpm_table.pcie_speed_table.count;
++ table->PCIeGenInterval = 1;
++ table->VRConfig = 0;
++
++ result = vegam_populate_vr_config(hwmgr, table);
++ PP_ASSERT_WITH_CODE(!result,
++ "Failed to populate VRConfig setting!", return result);
++
++ table->ThermGpio = 17;
++ table->SclkStepSize = 0x4000;
++
++ if (atomctrl_get_pp_assign_pin(hwmgr,
++ VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
++ table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
++ if (gpio_table)
++ table->VRHotLevel =
++ table_info->gpio_table->vrhot_triggered_sclk_dpm_index;
++ } else {
++ table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
++ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_RegulatorHot);
++ }
++
++ if (atomctrl_get_pp_assign_pin(hwmgr,
++ PP_AC_DC_SWITCH_GPIO_PINID, &gpio_pin)) {
++ table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
++ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_AutomaticDCTransition) &&
++ !smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UseNewGPIOScheme))
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
++ } else {
++ table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
++ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_AutomaticDCTransition);
++ }
++
++ /* Thermal Output GPIO */
++ if (atomctrl_get_pp_assign_pin(hwmgr,
++ THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin)) {
++ table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
++
++ /* For porlarity read GPIOPAD_A with assigned Gpio pin
++ * since VBIOS will program this register to set 'inactive state',
++ * driver can then determine 'active state' from this and
++ * program SMU with correct polarity
++ */
++ table->ThermOutPolarity =
++ (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
++ (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
++ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
++
++ /* if required, combine VRHot/PCC with thermal out GPIO */
++ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_RegulatorHot) &&
++ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_CombinePCCWithThermalSignal))
++ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
++ } else {
++ table->ThermOutGpio = 17;
++ table->ThermOutPolarity = 1;
++ table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
++ }
++
++ /* Populate BIF_SCLK levels into SMC DPM table */
++ for (i = 0; i <= hw_data->dpm_table.pcie_speed_table.count; i++) {
++ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
++ smu_data->bif_sclk_table[i], &dividers);
++ PP_ASSERT_WITH_CODE(!result,
++ "Can not find DFS divide id for Sclk",
++ return result);
++
++ if (i == 0)
++ table->Ulv.BifSclkDfs =
++ PP_HOST_TO_SMC_US((uint16_t)(dividers.pll_post_divider));
++ else
++ table->LinkLevel[i - 1].BifSclkDfs =
++ PP_HOST_TO_SMC_US((uint16_t)(dividers.pll_post_divider));
++ }
++
++ for (i = 0; i < SMU75_MAX_ENTRIES_SMIO; i++)
++ table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
++
++ CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
++ CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
++ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
++ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
++ CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
++ CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
++ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
++ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
++ CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
++ CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
++
++ /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
++ result = smu7_copy_bytes_to_smc(hwmgr,
++ smu_data->smu7_data.dpm_table_start +
++ offsetof(SMU75_Discrete_DpmTable, SystemFlags),
++ (uint8_t *)&(table->SystemFlags),
++ sizeof(SMU75_Discrete_DpmTable) - 3 * sizeof(SMU75_PIDController),
++ SMC_RAM_END);
++ PP_ASSERT_WITH_CODE(!result,
++ "Failed to upload dpm data to SMC memory!", return result);
++
++ result = vegam_populate_pm_fuses(hwmgr);
++ PP_ASSERT_WITH_CODE(!result,
++ "Failed to populate PM fuses to SMC memory!", return result);
++
++ result = vegam_enable_reconfig_cus(hwmgr);
++ PP_ASSERT_WITH_CODE(!result,
++ "Failed to enable reconfigurable CUs!", return result);
++
++ return 0;
++}
++
++static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member)
++{
++ switch (type) {
++ case SMU_SoftRegisters:
++ switch (member) {
++ case HandshakeDisables:
++ return offsetof(SMU75_SoftRegisters, HandshakeDisables);
++ case VoltageChangeTimeout:
++ return offsetof(SMU75_SoftRegisters, VoltageChangeTimeout);
++ case AverageGraphicsActivity:
++ return offsetof(SMU75_SoftRegisters, AverageGraphicsActivity);
++ case PreVBlankGap:
++ return offsetof(SMU75_SoftRegisters, PreVBlankGap);
++ case VBlankTimeout:
++ return offsetof(SMU75_SoftRegisters, VBlankTimeout);
++ case UcodeLoadStatus:
++ return offsetof(SMU75_SoftRegisters, UcodeLoadStatus);
++ case DRAM_LOG_ADDR_H:
++ return offsetof(SMU75_SoftRegisters, DRAM_LOG_ADDR_H);
++ case DRAM_LOG_ADDR_L:
++ return offsetof(SMU75_SoftRegisters, DRAM_LOG_ADDR_L);
++ case DRAM_LOG_PHY_ADDR_H:
++ return offsetof(SMU75_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
++ case DRAM_LOG_PHY_ADDR_L:
++ return offsetof(SMU75_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
++ case DRAM_LOG_BUFF_SIZE:
++ return offsetof(SMU75_SoftRegisters, DRAM_LOG_BUFF_SIZE);
++ }
++ case SMU_Discrete_DpmTable:
++ switch (member) {
++ case UvdBootLevel:
++ return offsetof(SMU75_Discrete_DpmTable, UvdBootLevel);
++ case VceBootLevel:
++ return offsetof(SMU75_Discrete_DpmTable, VceBootLevel);
++ case SamuBootLevel:
++ return offsetof(SMU75_Discrete_DpmTable, SamuBootLevel);
++ case LowSclkInterruptThreshold:
++ return offsetof(SMU75_Discrete_DpmTable, LowSclkInterruptThreshold);
++ }
++ }
++ pr_warn("can't get the offset of type %x member %x\n", type, member);
++ return 0;
++}
++
++static int vegam_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
++{
++ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
++
++ if (data->need_update_smu7_dpm_table &
++ (DPMTABLE_OD_UPDATE_SCLK +
++ DPMTABLE_UPDATE_SCLK +
++ DPMTABLE_UPDATE_MCLK))
++ return vegam_program_memory_timing_parameters(hwmgr);
++
++ return 0;
++}
++
++static int vegam_update_sclk_threshold(struct pp_hwmgr *hwmgr)
++{
++ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
++ struct vegam_smumgr *smu_data =
++ (struct vegam_smumgr *)(hwmgr->smu_backend);
++ int result = 0;
++ uint32_t low_sclk_interrupt_threshold = 0;
++
++ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_SclkThrottleLowNotification)
++ && (data->low_sclk_interrupt_threshold != 0)) {
++ low_sclk_interrupt_threshold =
++ data->low_sclk_interrupt_threshold;
++
++ CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
++
++ result = smu7_copy_bytes_to_smc(
++ hwmgr,
++ smu_data->smu7_data.dpm_table_start +
++ offsetof(SMU75_Discrete_DpmTable,
++ LowSclkInterruptThreshold),
++ (uint8_t *)&low_sclk_interrupt_threshold,
++ sizeof(uint32_t),
++ SMC_RAM_END);
++ }
++ PP_ASSERT_WITH_CODE((result == 0),
++ "Failed to update SCLK threshold!", return result);
++
++ result = vegam_program_mem_timing_parameters(hwmgr);
++ PP_ASSERT_WITH_CODE((result == 0),
++ "Failed to program memory timing parameters!",
++ );
++
++ return result;
++}
++
++int vegam_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
++{
++ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
++ int ret;
++
++ if (!hwmgr->avfs_supported)
++ return 0;
++
++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
++ if (!ret) {
++ if (data->apply_avfs_cks_off_voltage)
++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
++ }
++
++ return ret;
++}
++
++static int vegam_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
++{
++ PP_ASSERT_WITH_CODE(hwmgr->thermal_controller.fanInfo.bNoFan,
++ "VBIOS fan info is not correct!",
++ );
++ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_MicrocodeFanControl);
++ return 0;
++}
++
++const struct pp_smumgr_func vegam_smu_funcs = {
++ .smu_init = vegam_smu_init,
++ .smu_fini = smu7_smu_fini,
++ .start_smu = vegam_start_smu,
++ .check_fw_load_finish = smu7_check_fw_load_finish,
++ .request_smu_load_fw = smu7_reload_firmware,
++ .request_smu_load_specific_fw = NULL,
++ .send_msg_to_smc = smu7_send_msg_to_smc,
++ .send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter,
++ .process_firmware_header = vegam_process_firmware_header,
++ .is_dpm_running = vegam_is_dpm_running,
++ .get_mac_definition = vegam_get_mac_definition,
++ .update_smc_table = vegam_update_smc_table,
++ .init_smc_table = vegam_init_smc_table,
++ .get_offsetof = vegam_get_offsetof,
++ .populate_all_graphic_levels = vegam_populate_all_graphic_levels,
++ .populate_all_memory_levels = vegam_populate_all_memory_levels,
++ .update_sclk_threshold = vegam_update_sclk_threshold,
++ .is_hw_avfs_present = vegam_is_hw_avfs_present,
++ .thermal_avfs_enable = vegam_thermal_avfs_enable,
++ .is_dpm_running = vegam_is_dpm_running,
++ .thermal_setup_fan_table = vegam_thermal_setup_fan_table,
++};
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h
+new file mode 100644
+index 0000000..2b65582
+--- /dev/null
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h
+@@ -0,0 +1,75 @@
++/*
++ * Copyright 2017 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef _VEGAM_SMUMANAGER_H
++#define _VEGAM_SMUMANAGER_H
++
++
++#include <pp_endian.h>
++#include "smu75_discrete.h"
++#include "smu7_smumgr.h"
++
++#define SMC_RAM_END 0x40000
++
++#define DPMTuning_Uphyst_Shift 0
++#define DPMTuning_Downhyst_Shift 8
++#define DPMTuning_Activity_Shift 16
++
++#define GraphicsDPMTuning_VEGAM 0x001e6400
++#define MemoryDPMTuning_VEGAM 0x000f3c0a
++#define SclkDPMTuning_VEGAM 0x002d000a
++#define MclkDPMTuning_VEGAM 0x001f100a
++
++
++struct vegam_pt_defaults {
++ uint8_t SviLoadLineEn;
++ uint8_t SviLoadLineVddC;
++ uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
++ uint8_t TDC_MAWt;
++ uint8_t TdcWaterfallCtl;
++ uint8_t DTEAmbientTempBase;
++
++ uint32_t DisplayCac;
++ uint32_t BAPM_TEMP_GRADIENT;
++ uint16_t BAPMTI_R[SMU75_DTE_ITERATIONS * SMU75_DTE_SOURCES * SMU75_DTE_SINKS];
++ uint16_t BAPMTI_RC[SMU75_DTE_ITERATIONS * SMU75_DTE_SOURCES * SMU75_DTE_SINKS];
++};
++
++struct vegam_range_table {
++ uint32_t trans_lower_frequency; /* in 10khz */
++ uint32_t trans_upper_frequency;
++};
++
++struct vegam_smumgr {
++ struct smu7_smumgr smu7_data;
++ uint8_t protected_mode;
++ SMU75_Discrete_DpmTable smc_state_table;
++ struct SMU75_Discrete_Ulv ulv_setting;
++ struct SMU75_Discrete_PmFuses power_tune_table;
++ struct vegam_range_table range_table[NUM_SCLK_RANGE];
++ const struct vegam_pt_defaults *power_tune_defaults;
++ uint32_t bif_sclk_table[SMU75_MAX_LEVELS_LINK];
++};
++
++
++#endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4335-drm-amd-powerplay-add-specific-changes-for-VEGAM-in-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4335-drm-amd-powerplay-add-specific-changes-for-VEGAM-in-.patch
new file mode 100644
index 00000000..01792a68
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4335-drm-amd-powerplay-add-specific-changes-for-VEGAM-in-.patch
@@ -0,0 +1,167 @@
+From e30fb1e861663ff159ddd846b302d0ef8457ef77 Mon Sep 17 00:00:00 2001
+From: Eric Huang <JinHuiEric.Huang@amd.com>
+Date: Wed, 11 Apr 2018 15:38:11 -0500
+Subject: [PATCH 4335/5725] drm/amd/powerplay: add specific changes for VEGAM
+ in smu7_hwmgr.c
+
+VEGAM specific changes for smu7:
+1. add avfs control.
+2. add a smc message defferent as smu7.
+3. don't switch mc arb memory timing.
+4. update LCAC_MC0/1_CNTL value.
+
+Signed-off-by: Eric Huang <JinHuiEric.Huang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 71 ++++++++++++++++++++----
+ 1 file changed, 61 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index f1dabd7..194d45a 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -84,6 +84,14 @@ static const struct profile_mode_setting smu7_profiling[6] =
+ {0, 0, 0, 0, 0, 0, 0, 0},
+ };
+
++#define PPSMC_MSG_SetVBITimeout_VEGAM ((uint16_t) 0x310)
++
++#define ixPWR_SVI2_PLANE1_LOAD 0xC0200280
++#define PWR_SVI2_PLANE1_LOAD__PSI1_MASK 0x00000020L
++#define PWR_SVI2_PLANE1_LOAD__PSI0_EN_MASK 0x00000040L
++#define PWR_SVI2_PLANE1_LOAD__PSI1__SHIFT 0x00000005
++#define PWR_SVI2_PLANE1_LOAD__PSI0_EN__SHIFT 0x00000006
++
+ /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
+ enum DPM_EVENT_SRC {
+ DPM_EVENT_SRC_ANALOG = 0,
+@@ -165,6 +173,13 @@ static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
+ */
+ static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
+ {
++ if (hwmgr->chip_id == CHIP_VEGAM) {
++ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
++ CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI1, 0);
++ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
++ CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI0_EN, 0);
++ }
++
+ if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable);
+
+@@ -966,6 +981,22 @@ static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
+ return 0;
+ }
+
++static int smu7_disable_sclk_vce_handshake(struct pp_hwmgr *hwmgr)
++{
++ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
++ uint32_t soft_register_value = 0;
++ uint32_t handshake_disables_offset = data->soft_regs_start
++ + smum_get_offsetof(hwmgr,
++ SMU_SoftRegisters, HandshakeDisables);
++
++ soft_register_value = cgs_read_ind_register(hwmgr->device,
++ CGS_IND_REG__SMC, handshake_disables_offset);
++ soft_register_value |= SMU7_VCE_SCLK_HANDSHAKE_DISABLE;
++ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
++ handshake_disables_offset, soft_register_value);
++ return 0;
++}
++
+ static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
+ {
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+@@ -989,6 +1020,9 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+
+ /* enable SCLK dpm */
+ if (!data->sclk_dpm_key_disabled)
++ if (hwmgr->chip_id == CHIP_VEGAM)
++ smu7_disable_sclk_vce_handshake(hwmgr);
++
+ PP_ASSERT_WITH_CODE(
+ (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)),
+ "Failed to enable SCLK DPM during DPM Start Function!",
+@@ -998,13 +1032,15 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+ if (0 == data->mclk_dpm_key_disabled) {
+ if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
+ smu7_disable_handshake_uvd(hwmgr);
++
+ PP_ASSERT_WITH_CODE(
+ (0 == smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_MCLKDPM_Enable)),
+ "Failed to enable MCLK DPM during DPM Start Function!",
+ return -EINVAL);
+
+- PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
++ if (hwmgr->chip_family != CHIP_VEGAM)
++ PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
+
+
+ if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
+@@ -1020,8 +1056,13 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
+ udelay(10);
+- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
+- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
++ if (hwmgr->chip_id == CHIP_VEGAM) {
++ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400009);
++ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400009);
++ } else {
++ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
++ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
++ }
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
+ }
+ }
+@@ -1262,10 +1303,12 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to process firmware header!", result = tmp_result);
+
+- tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
+- PP_ASSERT_WITH_CODE((0 == tmp_result),
+- "Failed to initialize switch from ArbF0 to F1!",
+- result = tmp_result);
++ if (hwmgr->chip_id != CHIP_VEGAM) {
++ tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
++ PP_ASSERT_WITH_CODE((0 == tmp_result),
++ "Failed to initialize switch from ArbF0 to F1!",
++ result = tmp_result);
++ }
+
+ result = smu7_setup_default_dpm_tables(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+@@ -2755,6 +2798,9 @@ static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
+ case CHIP_POLARIS12:
+ switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
+ break;
++ case CHIP_VEGAM:
++ switch_limit_us = 30;
++ break;
+ default:
+ switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
+ break;
+@@ -3800,9 +3846,14 @@ static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
+ {
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+- if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK)
+- smum_send_msg_to_smc_with_parameter(hwmgr,
+- (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
++ if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) {
++ if (hwmgr->chip_id == CHIP_VEGAM)
++ smum_send_msg_to_smc_with_parameter(hwmgr,
++ (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2);
++ else
++ smum_send_msg_to_smc_with_parameter(hwmgr,
++ (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
++ }
+ return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4336-drm-powerplay-Add-powertune-table-for-VEGAM.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4336-drm-powerplay-Add-powertune-table-for-VEGAM.patch
new file mode 100644
index 00000000..14404823
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4336-drm-powerplay-Add-powertune-table-for-VEGAM.patch
@@ -0,0 +1,224 @@
+From a610153ac7aae28f8e15dfe43a9783d62add93e6 Mon Sep 17 00:00:00 2001
+From: Eric Huang <JinHuiEric.Huang@amd.com>
+Date: Wed, 11 Apr 2018 18:23:54 -0500
+Subject: [PATCH 4336/5725] drm/powerplay: Add powertune table for VEGAM
+
+Add the powertune table for VEGAM.
+
+Signed-off-by: Eric Huang <JinHuiEric.Huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ .../gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c | 189 +++++++++++++++++++++
+ 1 file changed, 189 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+index 4a9c481..c3ac84f 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+@@ -623,6 +623,190 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris11_Kicker[] =
+ { 0xFFFFFFFF } /* End of list */
+ };
+
++static const struct gpu_pt_config_reg GCCACConfig_VegaM[] =
++{
++// ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
++// Offset Mask Shift Value Type
++// ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
++ // DIDT_SQ
++ //
++ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060013, GPU_CONFIGREG_GC_CAC_IND },
++ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860013, GPU_CONFIGREG_GC_CAC_IND },
++ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060013, GPU_CONFIGREG_GC_CAC_IND },
++ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860013, GPU_CONFIGREG_GC_CAC_IND },
++ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060013, GPU_CONFIGREG_GC_CAC_IND },
++ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860013, GPU_CONFIGREG_GC_CAC_IND },
++ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060013, GPU_CONFIGREG_GC_CAC_IND },
++ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860013, GPU_CONFIGREG_GC_CAC_IND },
++ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060013, GPU_CONFIGREG_GC_CAC_IND },
++
++ // DIDT_TD
++ //
++ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0013, GPU_CONFIGREG_GC_CAC_IND },
++ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0013, GPU_CONFIGREG_GC_CAC_IND },
++ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0013, GPU_CONFIGREG_GC_CAC_IND },
++ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0013, GPU_CONFIGREG_GC_CAC_IND },
++ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0013, GPU_CONFIGREG_GC_CAC_IND },
++
++ // DIDT_TCP
++ //
++ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100013, GPU_CONFIGREG_GC_CAC_IND },
++ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900013, GPU_CONFIGREG_GC_CAC_IND },
++ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100013, GPU_CONFIGREG_GC_CAC_IND },
++ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900013, GPU_CONFIGREG_GC_CAC_IND },
++ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100013, GPU_CONFIGREG_GC_CAC_IND },
++ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900013, GPU_CONFIGREG_GC_CAC_IND },
++
++ { 0xFFFFFFFF } // End of list
++};
++
++static const struct gpu_pt_config_reg DIDTConfig_VegaM[] =
++{
++// ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
++// Offset Mask Shift Value Type
++// ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
++ // DIDT_SQ
++ //
++ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++
++ // DIDT_TD
++ //
++ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0009, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0009, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++
++ // DIDT_TCP
++ //
++ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT,0x01aa, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++
++ { 0xFFFFFFFF } // End of list
++};
+ static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
+ {
+ uint32_t en = enable ? 1 : 0;
+@@ -768,6 +952,11 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
+ PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error);
+ result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris12);
+ PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error);
++ } else if (hwmgr->chip_id == CHIP_VEGAM) {
++ result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_VegaM);
++ PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
++ result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_VegaM);
++ PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+ }
+ }
+ cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4337-drm-scheduler-don-t-update-last-scheduled-fence-in-T.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4337-drm-scheduler-don-t-update-last-scheduled-fence-in-T.patch
new file mode 100644
index 00000000..839da139
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4337-drm-scheduler-don-t-update-last-scheduled-fence-in-T.patch
@@ -0,0 +1,40 @@
+From 6aed7c90e6e8e50b9477752782bd052d6ea256e8 Mon Sep 17 00:00:00 2001
+From: Pixel Ding <Pixel.Ding@amd.com>
+Date: Tue, 24 Apr 2018 22:52:45 -0400
+Subject: [PATCH 4337/5725] drm/scheduler: don't update last scheduled fence in
+ TDR
+
+The current sequence in scheduler thread is:
+1. update last sched fence
+2. job begin (adding to mirror list)
+3. job finish (remove from mirror list)
+4. back to 1
+
+Since we update last sched prior to joining mirror list, the jobs
+in mirror list already pass the last sched fence. TDR just run
+the jobs in mirror list, so we should not update the last sched
+fences in TDR.
+
+Signed-off-by: Pixel Ding <Pixel.Ding@amd.com>
+Reviewed-by: Monk Liu <monk.liu@amd.com>
+---
+ drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+index 203f553..3d41246 100644
+--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
++++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+@@ -574,9 +574,6 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
+ fence = sched->ops->run_job(s_job);
+ atomic_inc(&sched->hw_rq_count);
+
+- dma_fence_put(s_job->entity->last_scheduled);
+- s_job->entity->last_scheduled = dma_fence_get(&s_fence->finished);
+-
+ if (fence) {
+ s_fence->parent = dma_fence_get(fence);
+ r = dma_fence_add_callback(fence, &s_fence->cb,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4338-drm-amdgpu-For-sriov-reset-move-IB-test-into-exclusi.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4338-drm-amdgpu-For-sriov-reset-move-IB-test-into-exclusi.patch
new file mode 100644
index 00000000..c7b2de56
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4338-drm-amdgpu-For-sriov-reset-move-IB-test-into-exclusi.patch
@@ -0,0 +1,50 @@
+From 54c3274c63a32a0379f483b14ade2030885ae969 Mon Sep 17 00:00:00 2001
+From: Emily Deng <Emily.Deng@amd.com>
+Date: Thu, 26 Apr 2018 18:02:55 +0800
+Subject: [PATCH 4338/5725] drm/amdgpu: For sriov reset, move IB test into
+ exclusive mode
+
+When put the IB test out of exclusive mode, and do sriov reset,
+the IB test will randomly fail. As out of exclusive mode it uses
+kiq to do read and write registers, but as it has world switch,
+the kiq read and write time will be random, sometimes it will
+beyond the MAX_KIQ_REG_WAIT and then the read or write register
+will fail, which will result the IB test fail.
+
+Signed-off-by: Emily Deng <Emily.Deng@amd.com>
+Reviewed-by: Monk Liu <monk.liu@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 30e607e..405d3a8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3213,19 +3213,19 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
+
+ /* now we are okay to resume SMC/CP/SDMA */
+ r = amdgpu_device_ip_reinit_late_sriov(adev);
+- amdgpu_virt_release_full_gpu(adev, true);
+ if (r)
+ goto error;
+
+ amdgpu_irq_gpu_reset_resume_helper(adev);
+ r = amdgpu_ib_ring_tests(adev);
++
++error:
++ amdgpu_virt_release_full_gpu(adev, true);
+ if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
+ atomic_inc(&adev->vram_lost_counter);
+ r = amdgpu_device_handle_vram_lost(adev);
+ }
+
+-error:
+-
+ return r;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4339-drm-amdgpu-sriov-Need-to-set-in_gpu_reset-flag-to-ba.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4339-drm-amdgpu-sriov-Need-to-set-in_gpu_reset-flag-to-ba.patch
new file mode 100644
index 00000000..bc4711a9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4339-drm-amdgpu-sriov-Need-to-set-in_gpu_reset-flag-to-ba.patch
@@ -0,0 +1,34 @@
+From 623d0e2714f5a2134190949178bb185c5fe9faf3 Mon Sep 17 00:00:00 2001
+From: Emily Deng <Emily.Deng@amd.com>
+Date: Thu, 26 Apr 2018 18:02:14 +0800
+Subject: [PATCH 4339/5725] drm/amdgpu/sriov: Need to set in_gpu_reset flag to
+ back after gpu reset
+
+After host os reset gpu reset, need to set flag in_gpu_reset to
+zero.
+
+Signed-off-by: Emily Deng <Emily.Deng@amd.com>
+Reviewed-by: Monk Liu <monk.liu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+index 4933486..078f70f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
++++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+@@ -260,8 +260,10 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
+ } while (timeout > 1);
+
+ flr_done:
+- if (locked)
++ if (locked) {
++ adev->in_gpu_reset = 0;
+ mutex_unlock(&adev->lock_reset);
++ }
+
+ /* Trigger recovery for world switch failure if no TDR */
+ if (amdgpu_lockup_timeout == 0)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4340-drm-amd-display-Fix-deadlock-when-flushing-irq.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4340-drm-amd-display-Fix-deadlock-when-flushing-irq.patch
new file mode 100644
index 00000000..c85049ca
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4340-drm-amd-display-Fix-deadlock-when-flushing-irq.patch
@@ -0,0 +1,40 @@
+From 4d051e2c132c0baf8ea1167fb03202dbf718f135 Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Wed, 10 Jan 2018 10:01:38 -0500
+Subject: [PATCH 4340/5725] drm/amd/display: Fix deadlock when flushing irq
+
+Lock irq table when reading a work in queue,
+unlock to flush the work, lock again till all tasks
+are cleared
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+index 470c4e2..30bec90 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+@@ -329,14 +329,15 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
+ {
+ int src;
+ struct irq_list_head *lh;
++ unsigned long irq_table_flags;
+ DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
+-
+ for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
+-
++ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+ /* The handler was removed from the table,
+ * it means it is safe to flush all the 'work'
+ * (because no code can schedule a new one). */
+ lh = &adev->dm.irq_handler_list_low_tab[src];
++ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+ flush_work(&lh->work);
+ }
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4341-drm-amd-display-Unify-dm-resume-sequence-into-a-sing.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4341-drm-amd-display-Unify-dm-resume-sequence-into-a-sing.patch
new file mode 100644
index 00000000..f91991a3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4341-drm-amd-display-Unify-dm-resume-sequence-into-a-sing.patch
@@ -0,0 +1,113 @@
+From b5fe5a0f9c8b1dd862e97ca0ded232b2ecbbc5fa Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Sat, 3 Feb 2018 14:18:07 -0500
+Subject: [PATCH 4341/5725] drm/amd/display: Unify dm resume sequence into a
+ single call
+
+Merge amdgpu_dm_display_resume function into dm_resume,
+as it is not called anywhere else anymore.
+
+Initially the call was broken down into 2 functions for cursor corruption
+issue. Now the issue is not visible, hence the dm_resume will perform
+dm_display_resume in it.
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+
+Change-Id: Ic1ab06cf9fda834095a8d6952c3c5801de8d67ee
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 31 ++++++++---------------
+ 1 file changed, 11 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index bd05986..f13b0c0 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -642,18 +642,6 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
+ static int dm_resume(void *handle)
+ {
+ struct amdgpu_device *adev = handle;
+- struct amdgpu_display_manager *dm = &adev->dm;
+- int ret = 0;
+-
+- /* power on hardware */
+- dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
+-
+- ret = amdgpu_dm_display_resume(adev);
+- return ret;
+-}
+-
+-int amdgpu_dm_display_resume(struct amdgpu_device *adev)
+-{
+ struct drm_device *ddev = adev->ddev;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct amdgpu_dm_connector *aconnector;
+@@ -664,10 +652,12 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
+ struct drm_plane *plane;
+ struct drm_plane_state *new_plane_state;
+ struct dm_plane_state *dm_new_plane_state;
+-
+- int ret = 0;
++ int ret;
+ int i;
+
++ /* power on hardware */
++ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
++
+ /* program HPD filter */
+ dc_resume(dm->dc);
+
+@@ -681,8 +671,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
+ amdgpu_dm_irq_resume_early(adev);
+
+ /* Do detection*/
+- list_for_each_entry(connector,
+- &ddev->mode_config.connector_list, head) {
++ list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ /*
+@@ -704,7 +693,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
+ }
+
+ /* Force mode set in atomic comit */
+- for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i)
++ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
+ new_crtc_state->active_changed = true;
+
+ /*
+@@ -712,7 +701,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
+ * them here, since they were duplicated as part of the suspend
+ * procedure.
+ */
+- for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i) {
++ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (dm_new_crtc_state->stream) {
+ WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
+@@ -721,7 +710,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
+ }
+ }
+
+- for_each_new_plane_in_state(adev->dm.cached_state, plane, new_plane_state, i) {
++ for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
+ dm_new_plane_state = to_dm_plane_state(new_plane_state);
+ if (dm_new_plane_state->dc_state) {
+ WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
+@@ -730,7 +719,9 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
+ }
+ }
+
+- ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
++ ret = drm_atomic_helper_resume(ddev, dm->cached_state);
++
++ dm->cached_state = NULL;
+
+ amdgpu_dm_irq_resume_late(adev);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4342-drm-amd-display-Disallow-enabling-CRTC-without-prima.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4342-drm-amd-display-Disallow-enabling-CRTC-without-prima.patch
new file mode 100644
index 00000000..d8538e71
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4342-drm-amd-display-Disallow-enabling-CRTC-without-prima.patch
@@ -0,0 +1,77 @@
+From eecb5859a6d33b1333b874f31c3f5f3792bac0a2 Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Mon, 16 Apr 2018 17:28:11 -0400
+Subject: [PATCH 4342/5725] drm/amd/display: Disallow enabling CRTC without
+ primary plane with FB
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The below commit
+
+ "drm/atomic: Try to preserve the crtc enabled state in drm_atomic_remove_fb, v2"
+
+introduces a slight behavioral change to rmfb. Instead of disabling a crtc
+when the primary plane is disabled, it now preserves it.
+
+Since DC is currently not equipped to handle this we need to fail such
+a commit, otherwise we might see a corrupted screen.
+
+This is based on Shirish's previous approach but avoids adding all
+planes to the new atomic state which leads to a full update in DC for
+any commit, and is not what we intend.
+
+Theoretically DM should be able to deal with states with fully populated planes,
+even for simple updates, such as cursor updates. This should still be
+addressed in the future.
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Tested-by: Michel Dänzer <michel.daenzer@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Cc: stable@vger.kernel.org
+
+Conflicts:
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+
+Change-Id: Ie77fd8f58b8058751278393e7849fc90f22209e0
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index f13b0c0..508424f 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4795,6 +4795,7 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct drm_connector_state *new_con_state = NULL;
+ struct dm_connector_state *dm_conn_state = NULL;
++ struct drm_plane_state *new_plane_state = NULL;
+
+ new_stream = NULL;
+
+@@ -4802,6 +4803,12 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ acrtc = to_amdgpu_crtc(crtc);
+
++ new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
++
++ if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
++ ret = -EINVAL;
++ goto fail;
++ }
+ aconnector = amdgpu_dm_find_first_crtc_matching_connector(
+ state, crtc);
+
+@@ -5006,7 +5013,7 @@ static int dm_update_planes_state(struct dc *dc,
+ if (!dm_old_crtc_state->stream)
+ continue;
+
+- DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n",
++ DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
+ plane->base.id, old_plane_crtc->base.id);
+
+ if (!dc_remove_plane_from_context(
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4343-drm-amd-display-fix-issue-related-to-infopacket-was-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4343-drm-amd-display-fix-issue-related-to-infopacket-was-.patch
new file mode 100644
index 00000000..057e2671
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4343-drm-amd-display-fix-issue-related-to-infopacket-was-.patch
@@ -0,0 +1,161 @@
+From 348434db194aa17074389adc635aad83435f4780 Mon Sep 17 00:00:00 2001
+From: Anthony Koo <Anthony.Koo@amd.com>
+Date: Wed, 11 Apr 2018 13:19:56 -0400
+Subject: [PATCH 4343/5725] drm/amd/display: fix issue related to infopacket
+ was not transmitted
+
+Check in code was incorrect, and infopacket is only transmitted after update
+function is called multiple times.
+Purpose of the function was to check if infopackets are being enabled, and
+then enable global control. Fix the code to do this.
+
+Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ .../drm/amd/display/dc/dce/dce_stream_encoder.c | 25 ++++++----------------
+ .../amd/display/dc/dcn10/dcn10_stream_encoder.c | 11 ++++++----
+ 2 files changed, 13 insertions(+), 23 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+index 84e26c8..e265a0a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+@@ -819,7 +819,7 @@ static void dce110_stream_encoder_update_dp_info_packets(
+ const struct encoder_info_frame *info_frame)
+ {
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+- uint32_t value = REG_READ(DP_SEC_CNTL);
++ uint32_t value = 0;
+
+ if (info_frame->vsc.valid)
+ dce110_update_generic_info_packet(
+@@ -853,6 +853,7 @@ static void dce110_stream_encoder_update_dp_info_packets(
+ * Therefore we need to enable master bit
+ * if at least on of the fields is not 0
+ */
++ value = REG_READ(DP_SEC_CNTL);
+ if (value)
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+ }
+@@ -862,7 +863,7 @@ static void dce110_stream_encoder_stop_dp_info_packets(
+ {
+ /* stop generic packets on DP */
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+- uint32_t value = REG_READ(DP_SEC_CNTL);
++ uint32_t value = 0;
+
+ if (enc110->se_mask->DP_SEC_AVI_ENABLE) {
+ REG_SET_7(DP_SEC_CNTL, 0,
+@@ -875,25 +876,10 @@ static void dce110_stream_encoder_stop_dp_info_packets(
+ DP_SEC_STREAM_ENABLE, 0);
+ }
+
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+- if (enc110->se_mask->DP_SEC_GSP7_ENABLE) {
+- REG_SET_10(DP_SEC_CNTL, 0,
+- DP_SEC_GSP0_ENABLE, 0,
+- DP_SEC_GSP1_ENABLE, 0,
+- DP_SEC_GSP2_ENABLE, 0,
+- DP_SEC_GSP3_ENABLE, 0,
+- DP_SEC_GSP4_ENABLE, 0,
+- DP_SEC_GSP5_ENABLE, 0,
+- DP_SEC_GSP6_ENABLE, 0,
+- DP_SEC_GSP7_ENABLE, 0,
+- DP_SEC_MPG_ENABLE, 0,
+- DP_SEC_STREAM_ENABLE, 0);
+- }
+-#endif
+ /* this register shared with audio info frame.
+ * therefore we need to keep master enabled
+ * if at least one of the fields is not 0 */
+-
++ value = REG_READ(DP_SEC_CNTL);
+ if (value)
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+
+@@ -1496,7 +1482,7 @@ static void dce110_se_disable_dp_audio(
+ struct stream_encoder *enc)
+ {
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+- uint32_t value = REG_READ(DP_SEC_CNTL);
++ uint32_t value = 0;
+
+ /* Disable Audio packets */
+ REG_UPDATE_5(DP_SEC_CNTL,
+@@ -1508,6 +1494,7 @@ static void dce110_se_disable_dp_audio(
+
+ /* This register shared with encoder info frame. Therefore we need to
+ keep master enabled if at least on of the fields is not 0 */
++ value = REG_READ(DP_SEC_CNTL);
+ if (value != 0)
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+index 9ec46f8..befd863 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+@@ -686,7 +686,7 @@ void enc1_stream_encoder_update_dp_info_packets(
+ const struct encoder_info_frame *info_frame)
+ {
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+- uint32_t value = REG_READ(DP_SEC_CNTL);
++ uint32_t value = 0;
+
+ if (info_frame->vsc.valid)
+ enc1_update_generic_info_packet(
+@@ -713,6 +713,7 @@ void enc1_stream_encoder_update_dp_info_packets(
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid);
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid);
+
++
+ /* This bit is the master enable bit.
+ * When enabling secondary stream engine,
+ * this master bit must also be set.
+@@ -720,6 +721,7 @@ void enc1_stream_encoder_update_dp_info_packets(
+ * Therefore we need to enable master bit
+ * if at least on of the fields is not 0
+ */
++ value = REG_READ(DP_SEC_CNTL);
+ if (value)
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+ }
+@@ -729,7 +731,7 @@ void enc1_stream_encoder_stop_dp_info_packets(
+ {
+ /* stop generic packets on DP */
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+- uint32_t value = REG_READ(DP_SEC_CNTL);
++ uint32_t value = 0;
+
+ REG_SET_10(DP_SEC_CNTL, 0,
+ DP_SEC_GSP0_ENABLE, 0,
+@@ -746,7 +748,7 @@ void enc1_stream_encoder_stop_dp_info_packets(
+ /* this register shared with audio info frame.
+ * therefore we need to keep master enabled
+ * if at least one of the fields is not 0 */
+-
++ value = REG_READ(DP_SEC_CNTL);
+ if (value)
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+
+@@ -1356,7 +1358,7 @@ static void enc1_se_disable_dp_audio(
+ struct stream_encoder *enc)
+ {
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+- uint32_t value = REG_READ(DP_SEC_CNTL);
++ uint32_t value = 0;
+
+ /* Disable Audio packets */
+ REG_UPDATE_5(DP_SEC_CNTL,
+@@ -1369,6 +1371,7 @@ static void enc1_se_disable_dp_audio(
+ /* This register shared with encoder info frame. Therefore we need to
+ * keep master enabled if at least on of the fields is not 0
+ */
++ value = REG_READ(DP_SEC_CNTL);
+ if (value != 0)
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4344-drm-amd-display-Make-program_output_csc-HWSS-interfa.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4344-drm-amd-display-Make-program_output_csc-HWSS-interfa.patch
new file mode 100644
index 00000000..1644eeab
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4344-drm-amd-display-Make-program_output_csc-HWSS-interfa.patch
@@ -0,0 +1,72 @@
+From da4055818ceaa2ad9975e1688b466bb418107796 Mon Sep 17 00:00:00 2001
+From: Eric Bernstein <eric.bernstein@amd.com>
+Date: Mon, 9 Apr 2018 15:47:42 -0400
+Subject: [PATCH 4344/5725] drm/amd/display: Make program_output_csc HWSS
+ interface function
+
+Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 5 +++--
+ drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 7 +++++++
+ 2 files changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 468113d..4e442e9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -1579,7 +1579,7 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
+ }
+ }
+
+-static void program_output_csc(struct dc *dc,
++static void dcn10_program_output_csc(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix,
+@@ -1932,7 +1932,7 @@ static void update_dchubp_dpp(
+ /*gamut remap*/
+ program_gamut_remap(pipe_ctx);
+
+- program_output_csc(dc,
++ dc->hwss.program_output_csc(dc,
+ pipe_ctx,
+ pipe_ctx->stream->output_color_space,
+ pipe_ctx->stream->csc_color_matrix.matrix,
+@@ -2690,6 +2690,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
+ .update_pending_status = dcn10_update_pending_status,
+ .set_input_transfer_func = dcn10_set_input_transfer_func,
+ .set_output_transfer_func = dcn10_set_output_transfer_func,
++ .program_output_csc = dcn10_program_output_csc,
+ .power_down = dce110_power_down,
+ .enable_accelerated_mode = dce110_enable_accelerated_mode,
+ .enable_timing_synchronization = dcn10_enable_timing_synchronization,
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+index f54d478..be6cf7e 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+@@ -95,6 +95,12 @@ struct hw_sequencer_funcs {
+ enum dc_color_space colorspace,
+ uint16_t *matrix);
+
++ void (*program_output_csc)(struct dc *dc,
++ struct pipe_ctx *pipe_ctx,
++ enum dc_color_space colorspace,
++ uint16_t *matrix,
++ int opp_id);
++
+ void (*update_plane_addr)(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+@@ -203,6 +209,7 @@ struct hw_sequencer_funcs {
+
+ void (*set_cursor_position)(struct pipe_ctx *pipe);
+ void (*set_cursor_attribute)(struct pipe_ctx *pipe);
++
+ };
+
+ void color_space_to_black_color(
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4345-drm-amd-display-Refactor-otg_blank-sequence.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4345-drm-amd-display-Refactor-otg_blank-sequence.patch
new file mode 100644
index 00000000..7508cade
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4345-drm-amd-display-Refactor-otg_blank-sequence.patch
@@ -0,0 +1,172 @@
+From a5cf7173ba18a223b523d5f0da123d842040db67 Mon Sep 17 00:00:00 2001
+From: Eric Bernstein <eric.bernstein@amd.com>
+Date: Mon, 9 Apr 2018 17:19:27 -0400
+Subject: [PATCH 4345/5725] drm/amd/display: Refactor otg_blank sequence
+
+Also rename otg_blank to blank_pixel_data.
+
+Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ .../amd/display/dc/dce110/dce110_hw_sequencer.c | 6 ++--
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 35 +++++++++++-----------
+ drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 8 ++++-
+ 3 files changed, 28 insertions(+), 21 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 0c92348..6a0ae02 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -1233,7 +1233,7 @@ static void program_scaler(const struct dc *dc,
+ &pipe_ctx->plane_res.scl_data);
+ }
+
+-static enum dc_status dce110_prog_pixclk_crtc_otg(
++static enum dc_status dce110_enable_stream_timing(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc)
+@@ -1309,7 +1309,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
+ pipe_ctx[pipe_ctx->pipe_idx];
+
+ /* */
+- dc->hwss.prog_pixclk_crtc_otg(pipe_ctx, context, dc);
++ dc->hwss.enable_stream_timing(pipe_ctx, context, dc);
+
+ /* FPGA does not program backend */
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+@@ -3058,7 +3058,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
+ .get_position = get_position,
+ .set_static_screen_control = set_static_screen_control,
+ .reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap,
+- .prog_pixclk_crtc_otg = dce110_prog_pixclk_crtc_otg,
++ .enable_stream_timing = dce110_enable_stream_timing,
+ .setup_stereo = NULL,
+ .set_avmute = dce110_set_avmute,
+ .wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 4e442e9..48eaf6a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -593,7 +593,7 @@ static void false_optc_underflow_wa(
+ tg->funcs->clear_optc_underflow(tg);
+ }
+
+-static enum dc_status dcn10_prog_pixclk_crtc_otg(
++static enum dc_status dcn10_enable_stream_timing(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc)
+@@ -1965,9 +1965,9 @@ static void update_dchubp_dpp(
+ hubp->funcs->set_blank(hubp, false);
+ }
+
+-static void dcn10_otg_blank(
++static void dcn10_blank_pixel_data(
+ struct dc *dc,
+- struct stream_resource stream_res,
++ struct stream_resource *stream_res,
+ struct dc_stream_state *stream,
+ bool blank)
+ {
+@@ -1978,21 +1978,21 @@ static void dcn10_otg_blank(
+ color_space = stream->output_color_space;
+ color_space_to_black_color(dc, color_space, &black_color);
+
+- if (stream_res.tg->funcs->set_blank_color)
+- stream_res.tg->funcs->set_blank_color(
+- stream_res.tg,
++ if (stream_res->tg->funcs->set_blank_color)
++ stream_res->tg->funcs->set_blank_color(
++ stream_res->tg,
+ &black_color);
+
+ if (!blank) {
+- if (stream_res.tg->funcs->set_blank)
+- stream_res.tg->funcs->set_blank(stream_res.tg, blank);
+- if (stream_res.abm)
+- stream_res.abm->funcs->set_abm_level(stream_res.abm, stream->abm_level);
++ if (stream_res->tg->funcs->set_blank)
++ stream_res->tg->funcs->set_blank(stream_res->tg, blank);
++ if (stream_res->abm)
++ stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
+ } else if (blank) {
+- if (stream_res.abm)
+- stream_res.abm->funcs->set_abm_immediate_disable(stream_res.abm);
+- if (stream_res.tg->funcs->set_blank)
+- stream_res.tg->funcs->set_blank(stream_res.tg, blank);
++ if (stream_res->abm)
++ stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
++ if (stream_res->tg->funcs->set_blank)
++ stream_res->tg->funcs->set_blank(stream_res->tg, blank);
+ }
+ }
+
+@@ -2031,7 +2031,7 @@ static void program_all_pipe_in_tree(
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg);
+
+- dcn10_otg_blank(dc, pipe_ctx->stream_res,
++ dc->hwss.blank_pixel_data(dc, &pipe_ctx->stream_res,
+ pipe_ctx->stream, blank);
+ }
+
+@@ -2151,7 +2151,7 @@ static void dcn10_apply_ctx_for_surface(
+
+ if (num_planes == 0) {
+ /* OTG blank before remove all front end */
+- dcn10_otg_blank(dc, top_pipe_to_program->stream_res, top_pipe_to_program->stream, true);
++ dc->hwss.blank_pixel_data(dc, &top_pipe_to_program->stream_res, top_pipe_to_program->stream, true);
+ }
+
+ /* Disconnect unused mpcc */
+@@ -2702,10 +2702,11 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
+ .blank_stream = dce110_blank_stream,
+ .enable_display_power_gating = dcn10_dummy_display_power_gating,
+ .disable_plane = dcn10_disable_plane,
++ .blank_pixel_data = dcn10_blank_pixel_data,
+ .pipe_control_lock = dcn10_pipe_control_lock,
+ .set_bandwidth = dcn10_set_bandwidth,
+ .reset_hw_ctx_wrap = reset_hw_ctx_wrap,
+- .prog_pixclk_crtc_otg = dcn10_prog_pixclk_crtc_otg,
++ .enable_stream_timing = dcn10_enable_stream_timing,
+ .set_drr = set_drr,
+ .get_position = get_position,
+ .set_static_screen_control = set_static_screen_control,
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+index be6cf7e..29abf3e 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+@@ -65,6 +65,7 @@ struct dchub_init_data;
+ struct dc_static_screen_events;
+ struct resource_pool;
+ struct resource_context;
++struct stream_resource;
+
+ struct hw_sequencer_funcs {
+
+@@ -162,6 +163,11 @@ struct hw_sequencer_funcs {
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
++ void (*blank_pixel_data)(
++ struct dc *dc,
++ struct stream_resource *stream_res,
++ struct dc_stream_state *stream,
++ bool blank);
+
+ void (*set_bandwidth)(
+ struct dc *dc,
+@@ -177,7 +183,7 @@ struct hw_sequencer_funcs {
+ void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_events *events);
+
+- enum dc_status (*prog_pixclk_crtc_otg)(
++ enum dc_status (*enable_stream_timing)(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4346-drm-amd-display-DP-link-validation-bug-for-YUV422.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4346-drm-amd-display-DP-link-validation-bug-for-YUV422.patch
new file mode 100644
index 00000000..1eb13ae4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4346-drm-amd-display-DP-link-validation-bug-for-YUV422.patch
@@ -0,0 +1,78 @@
+From ecce06487bbc9787f429050e7a366b4bf646841e Mon Sep 17 00:00:00 2001
+From: Hersen Wu <hersenxs.wu@amd.com>
+Date: Wed, 11 Apr 2018 15:22:10 -0400
+Subject: [PATCH 4346/5725] drm/amd/display: DP link validation bug for YUV422
+
+remove limit YUV422 color depth to 24bits which is
+workaround for old ASIC
+
+Signed-off-by: Hersen Wu <hersenxs.wu@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 47 +++++++++++-------------
+ 1 file changed, 21 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index 7f0fdf7..cb376cf 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -1379,34 +1379,29 @@ static uint32_t bandwidth_in_kbps_from_timing(
+ uint32_t bits_per_channel = 0;
+ uint32_t kbps;
+
+- if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
++ switch (timing->display_color_depth) {
++ case COLOR_DEPTH_666:
++ bits_per_channel = 6;
++ break;
++ case COLOR_DEPTH_888:
++ bits_per_channel = 8;
++ break;
++ case COLOR_DEPTH_101010:
++ bits_per_channel = 10;
++ break;
++ case COLOR_DEPTH_121212:
+ bits_per_channel = 12;
+- else{
+-
+- switch (timing->display_color_depth) {
+-
+- case COLOR_DEPTH_666:
+- bits_per_channel = 6;
+- break;
+- case COLOR_DEPTH_888:
+- bits_per_channel = 8;
+- break;
+- case COLOR_DEPTH_101010:
+- bits_per_channel = 10;
+- break;
+- case COLOR_DEPTH_121212:
+- bits_per_channel = 12;
+- break;
+- case COLOR_DEPTH_141414:
+- bits_per_channel = 14;
+- break;
+- case COLOR_DEPTH_161616:
+- bits_per_channel = 16;
+- break;
+- default:
+- break;
+- }
++ break;
++ case COLOR_DEPTH_141414:
++ bits_per_channel = 14;
++ break;
++ case COLOR_DEPTH_161616:
++ bits_per_channel = 16;
++ break;
++ default:
++ break;
+ }
++
+ ASSERT(bits_per_channel != 0);
+
+ kbps = timing->pix_clk_khz;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4347-drm-amd-display-dal-3.1.43.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4347-drm-amd-display-dal-3.1.43.patch
new file mode 100644
index 00000000..c637990c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4347-drm-amd-display-dal-3.1.43.patch
@@ -0,0 +1,28 @@
+From 3195bd9f0c9adad6643f208eb165e97663229f5d Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Mon, 16 Apr 2018 13:30:02 -0400
+Subject: [PATCH 4347/5725] drm/amd/display: dal 3.1.43
+
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index bcddb71..27c2ce0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -38,7 +38,7 @@
+ #include "inc/compressor.h"
+ #include "dml/display_mode_lib.h"
+
+-#define DC_VER "3.1.42"
++#define DC_VER "3.1.43"
+
+ #define MAX_SURFACES 3
+ #define MAX_STREAMS 6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4348-drm-amd-display-Add-user_regamma-to-color-module.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4348-drm-amd-display-Add-user_regamma-to-color-module.patch
new file mode 100644
index 00000000..5fc046e1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4348-drm-amd-display-Add-user_regamma-to-color-module.patch
@@ -0,0 +1,469 @@
+From fb4c5dd9dfd9874d908943b4647ee1f5955de1e7 Mon Sep 17 00:00:00 2001
+From: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Date: Fri, 13 Apr 2018 16:06:24 -0400
+Subject: [PATCH 4348/5725] drm/amd/display: Add user_regamma to color module
+
+Signed-off-by: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ .../drm/amd/display/modules/color/color_gamma.c | 314 ++++++++++++++++++++-
+ .../drm/amd/display/modules/color/color_gamma.h | 48 +++-
+ 2 files changed, 348 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+index e7e374f..ad0ff50 100644
+--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+@@ -185,14 +185,14 @@ struct dividers {
+
+ static void build_coefficients(struct gamma_coefficients *coefficients, bool is_2_4)
+ {
+- static const int32_t numerator01[] = { 31308, 180000};
+- static const int32_t numerator02[] = { 12920, 4500};
+- static const int32_t numerator03[] = { 55, 99};
+- static const int32_t numerator04[] = { 55, 99};
+- static const int32_t numerator05[] = { 2400, 2200};
++ static const int32_t numerator01[] = { 31308, 180000};
++ static const int32_t numerator02[] = { 12920, 4500};
++ static const int32_t numerator03[] = { 55, 99};
++ static const int32_t numerator04[] = { 55, 99};
++ static const int32_t numerator05[] = { 2400, 2200};
+
+- uint32_t i = 0;
+- uint32_t index = is_2_4 == true ? 0:1;
++ uint32_t i = 0;
++ uint32_t index = is_2_4 == true ? 0:1;
+
+ do {
+ coefficients->a0[i] = dal_fixed31_32_from_fraction(
+@@ -691,7 +691,7 @@ static void build_degamma(struct pwl_float_data_ex *curve,
+ }
+ }
+
+-static bool scale_gamma(struct pwl_float_data *pwl_rgb,
++static void scale_gamma(struct pwl_float_data *pwl_rgb,
+ const struct dc_gamma *ramp,
+ struct dividers dividers)
+ {
+@@ -752,11 +752,9 @@ static bool scale_gamma(struct pwl_float_data *pwl_rgb,
+ dividers.divider3);
+ rgb->b = dal_fixed31_32_mul(rgb_last->b,
+ dividers.divider3);
+-
+- return true;
+ }
+
+-static bool scale_gamma_dx(struct pwl_float_data *pwl_rgb,
++static void scale_gamma_dx(struct pwl_float_data *pwl_rgb,
+ const struct dc_gamma *ramp,
+ struct dividers dividers)
+ {
+@@ -818,8 +816,71 @@ static bool scale_gamma_dx(struct pwl_float_data *pwl_rgb,
+ pwl_rgb[i-1].g, 2), pwl_rgb[i-2].g);
+ pwl_rgb[i].b = dal_fixed31_32_sub(dal_fixed31_32_mul_int(
+ pwl_rgb[i-1].b, 2), pwl_rgb[i-2].b);
++}
+
+- return true;
++/* todo: all these scale_gamma functions are inherently the same but
++ * take different structures as params or different format for ramp
++ * values. We could probably implement it in a more generic fashion
++ */
++static void scale_user_regamma_ramp(struct pwl_float_data *pwl_rgb,
++ const struct regamma_ramp *ramp,
++ struct dividers dividers)
++{
++ unsigned short max_driver = 0xFFFF;
++ unsigned short max_os = 0xFF00;
++ unsigned short scaler = max_os;
++ uint32_t i;
++ struct pwl_float_data *rgb = pwl_rgb;
++ struct pwl_float_data *rgb_last = rgb + GAMMA_RGB_256_ENTRIES - 1;
++
++ i = 0;
++ do {
++ if (ramp->gamma[i] > max_os ||
++ ramp->gamma[i + 256] > max_os ||
++ ramp->gamma[i + 512] > max_os) {
++ scaler = max_driver;
++ break;
++ }
++ i++;
++ } while (i != GAMMA_RGB_256_ENTRIES);
++
++ i = 0;
++ do {
++ rgb->r = dal_fixed31_32_from_fraction(
++ ramp->gamma[i], scaler);
++ rgb->g = dal_fixed31_32_from_fraction(
++ ramp->gamma[i + 256], scaler);
++ rgb->b = dal_fixed31_32_from_fraction(
++ ramp->gamma[i + 512], scaler);
++
++ ++rgb;
++ ++i;
++ } while (i != GAMMA_RGB_256_ENTRIES);
++
++ rgb->r = dal_fixed31_32_mul(rgb_last->r,
++ dividers.divider1);
++ rgb->g = dal_fixed31_32_mul(rgb_last->g,
++ dividers.divider1);
++ rgb->b = dal_fixed31_32_mul(rgb_last->b,
++ dividers.divider1);
++
++ ++rgb;
++
++ rgb->r = dal_fixed31_32_mul(rgb_last->r,
++ dividers.divider2);
++ rgb->g = dal_fixed31_32_mul(rgb_last->g,
++ dividers.divider2);
++ rgb->b = dal_fixed31_32_mul(rgb_last->b,
++ dividers.divider2);
++
++ ++rgb;
++
++ rgb->r = dal_fixed31_32_mul(rgb_last->r,
++ dividers.divider3);
++ rgb->g = dal_fixed31_32_mul(rgb_last->g,
++ dividers.divider3);
++ rgb->b = dal_fixed31_32_mul(rgb_last->b,
++ dividers.divider3);
+ }
+
+ /*
+@@ -949,7 +1010,7 @@ static inline void copy_rgb_regamma_to_coordinates_x(
+ uint32_t i = 0;
+ const struct pwl_float_data_ex *rgb_regamma = rgb_ex;
+
+- while (i <= hw_points_num) {
++ while (i <= hw_points_num + 1) {
+ coords->regamma_y_red = rgb_regamma->r;
+ coords->regamma_y_green = rgb_regamma->g;
+ coords->regamma_y_blue = rgb_regamma->b;
+@@ -1002,6 +1063,102 @@ static bool calculate_interpolated_hardware_curve(
+ return true;
+ }
+
++/* The "old" interpolation uses a complicated scheme to build an array of
++ * coefficients while also using an array of 0-255 normalized to 0-1
++ * Then there's another loop using both of the above + new scaled user ramp
++ * and we concatenate them. It also searches for points of interpolation and
++ * uses enums for positions.
++ *
++ * This function uses a different approach:
++ * user ramp is always applied on X with 0/255, 1/255, 2/255, ..., 255/255
++ * To find index for hwX , we notice the following:
++ * i/255 <= hwX < (i+1)/255 <=> i <= 255*hwX < i+1
++ * See apply_lut_1d which is the same principle, but on 4K entry 1D LUT
++ *
++ * Once the index is known, combined Y is simply:
++ * user_ramp(index) + (hwX-index/255)*(user_ramp(index+1) - user_ramp(index)
++ *
++ * We should switch to this method in all cases, it's simpler and faster
++ * ToDo one day - for now this only applies to ADL regamma to avoid regression
++ * for regular use cases (sRGB and PQ)
++ */
++static void interpolate_user_regamma(uint32_t hw_points_num,
++ struct pwl_float_data *rgb_user,
++ bool apply_degamma,
++ struct dc_transfer_func_distributed_points *tf_pts)
++{
++ uint32_t i;
++ uint32_t color = 0;
++ int32_t index;
++ int32_t index_next;
++ struct fixed31_32 *tf_point;
++ struct fixed31_32 hw_x;
++ struct fixed31_32 norm_factor =
++ dal_fixed31_32_from_int_nonconst(255);
++ struct fixed31_32 norm_x;
++ struct fixed31_32 index_f;
++ struct fixed31_32 lut1;
++ struct fixed31_32 lut2;
++ struct fixed31_32 delta_lut;
++ struct fixed31_32 delta_index;
++
++ i = 0;
++ /* fixed_pt library has problems handling too small values */
++ while (i != 32) {
++ tf_pts->red[i] = dal_fixed31_32_zero;
++ tf_pts->green[i] = dal_fixed31_32_zero;
++ tf_pts->blue[i] = dal_fixed31_32_zero;
++ ++i;
++ }
++ while (i <= hw_points_num + 1) {
++ for (color = 0; color < 3; color++) {
++ if (color == 0)
++ tf_point = &tf_pts->red[i];
++ else if (color == 1)
++ tf_point = &tf_pts->green[i];
++ else
++ tf_point = &tf_pts->blue[i];
++
++ if (apply_degamma) {
++ if (color == 0)
++ hw_x = coordinates_x[i].regamma_y_red;
++ else if (color == 1)
++ hw_x = coordinates_x[i].regamma_y_green;
++ else
++ hw_x = coordinates_x[i].regamma_y_blue;
++ } else
++ hw_x = coordinates_x[i].x;
++
++ norm_x = dal_fixed31_32_mul(norm_factor, hw_x);
++ index = dal_fixed31_32_floor(norm_x);
++ if (index < 0 || index > 255)
++ continue;
++
++ index_f = dal_fixed31_32_from_int_nonconst(index);
++ index_next = (index == 255) ? index : index + 1;
++
++ if (color == 0) {
++ lut1 = rgb_user[index].r;
++ lut2 = rgb_user[index_next].r;
++ } else if (color == 1) {
++ lut1 = rgb_user[index].g;
++ lut2 = rgb_user[index_next].g;
++ } else {
++ lut1 = rgb_user[index].b;
++ lut2 = rgb_user[index_next].b;
++ }
++
++ // we have everything now, so interpolate
++ delta_lut = dal_fixed31_32_sub(lut2, lut1);
++ delta_index = dal_fixed31_32_sub(norm_x, index_f);
++
++ *tf_point = dal_fixed31_32_add(lut1,
++ dal_fixed31_32_mul(delta_index, delta_lut));
++ }
++ ++i;
++ }
++}
++
+ static void build_new_custom_resulted_curve(
+ uint32_t hw_points_num,
+ struct dc_transfer_func_distributed_points *tf_pts)
+@@ -1025,6 +1182,29 @@ static void build_new_custom_resulted_curve(
+ }
+ }
+
++static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma,
++ uint32_t hw_points_num)
++{
++ uint32_t i;
++
++ struct gamma_coefficients coeff;
++ struct pwl_float_data_ex *rgb = rgb_regamma;
++ const struct hw_x_point *coord_x = coordinates_x;
++
++ build_coefficients(&coeff, true);
++
++ i = 0;
++ while (i != hw_points_num + 1) {
++ rgb->r = translate_from_linear_space_ex(
++ coord_x->x, &coeff, 0);
++ rgb->g = rgb->r;
++ rgb->b = rgb->r;
++ ++coord_x;
++ ++rgb;
++ ++i;
++ }
++}
++
+ static bool map_regamma_hw_to_x_user(
+ const struct dc_gamma *ramp,
+ struct pixel_gamma_point *coeff128,
+@@ -1062,6 +1242,7 @@ static bool map_regamma_hw_to_x_user(
+ }
+ }
+
++ /* this should be named differently, all it does is clamp to 0-1 */
+ build_new_custom_resulted_curve(hw_points_num, tf_pts);
+
+ return true;
+@@ -1168,6 +1349,113 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
+ return ret;
+ }
+
++bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
++ const struct regamma_lut *regamma)
++{
++ struct gamma_coefficients coeff;
++ const struct hw_x_point *coord_x = coordinates_x;
++ uint32_t i = 0;
++
++ do {
++ coeff.a0[i] = dal_fixed31_32_from_fraction(
++ regamma->coeff.A0[i], 10000000);
++ coeff.a1[i] = dal_fixed31_32_from_fraction(
++ regamma->coeff.A1[i], 1000);
++ coeff.a2[i] = dal_fixed31_32_from_fraction(
++ regamma->coeff.A2[i], 1000);
++ coeff.a3[i] = dal_fixed31_32_from_fraction(
++ regamma->coeff.A3[i], 1000);
++ coeff.user_gamma[i] = dal_fixed31_32_from_fraction(
++ regamma->coeff.gamma[i], 1000);
++
++ ++i;
++ } while (i != 3);
++
++ i = 0;
++ /* fixed_pt library has problems handling too small values */
++ while (i != 32) {
++ output_tf->tf_pts.red[i] = dal_fixed31_32_zero;
++ output_tf->tf_pts.green[i] = dal_fixed31_32_zero;
++ output_tf->tf_pts.blue[i] = dal_fixed31_32_zero;
++ ++coord_x;
++ ++i;
++ }
++ while (i != MAX_HW_POINTS + 1) {
++ output_tf->tf_pts.red[i] = translate_from_linear_space_ex(
++ coord_x->x, &coeff, 0);
++ output_tf->tf_pts.green[i] = translate_from_linear_space_ex(
++ coord_x->x, &coeff, 1);
++ output_tf->tf_pts.blue[i] = translate_from_linear_space_ex(
++ coord_x->x, &coeff, 2);
++ ++coord_x;
++ ++i;
++ }
++
++ // this function just clamps output to 0-1
++ build_new_custom_resulted_curve(MAX_HW_POINTS, &output_tf->tf_pts);
++ output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
++
++ return true;
++}
++
++bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
++ const struct regamma_lut *regamma)
++{
++ struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
++ struct dividers dividers;
++
++ struct pwl_float_data *rgb_user = NULL;
++ struct pwl_float_data_ex *rgb_regamma = NULL;
++ bool ret = false;
++
++ if (regamma == NULL)
++ return false;
++
++ output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
++
++ rgb_user = kzalloc(sizeof(*rgb_user) * (GAMMA_RGB_256_ENTRIES + _EXTRA_POINTS),
++ GFP_KERNEL);
++ if (!rgb_user)
++ goto rgb_user_alloc_fail;
++
++ rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS),
++ GFP_KERNEL);
++ if (!rgb_regamma)
++ goto rgb_regamma_alloc_fail;
++
++ dividers.divider1 = dal_fixed31_32_from_fraction(3, 2);
++ dividers.divider2 = dal_fixed31_32_from_int(2);
++ dividers.divider3 = dal_fixed31_32_from_fraction(5, 2);
++
++ scale_user_regamma_ramp(rgb_user, &regamma->ramp, dividers);
++
++ if (regamma->flags.bits.applyDegamma == 1) {
++ apply_degamma_for_user_regamma(rgb_regamma, MAX_HW_POINTS);
++ copy_rgb_regamma_to_coordinates_x(coordinates_x,
++ MAX_HW_POINTS, rgb_regamma);
++ }
++
++ interpolate_user_regamma(MAX_HW_POINTS, rgb_user,
++ regamma->flags.bits.applyDegamma, tf_pts);
++
++ // no custom HDR curves!
++ tf_pts->end_exponent = 0;
++ tf_pts->x_point_at_y1_red = 1;
++ tf_pts->x_point_at_y1_green = 1;
++ tf_pts->x_point_at_y1_blue = 1;
++
++ // this function just clamps output to 0-1
++ build_new_custom_resulted_curve(MAX_HW_POINTS, tf_pts);
++
++ ret = true;
++
++ kfree(rgb_regamma);
++rgb_regamma_alloc_fail:
++ kfree(rgb_user);
++rgb_user_alloc_fail:
++ return ret;
++}
++
+ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
+ const struct dc_gamma *ramp, bool mapUserRamp)
+ {
+diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
+index b7f9bc2..b6404899 100644
+--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
+@@ -32,6 +32,47 @@ struct dc_transfer_func_distributed_points;
+ struct dc_rgb_fixed;
+ enum dc_transfer_func_predefined;
+
++/* For SetRegamma ADL interface support
++ * Must match escape type
++ */
++union regamma_flags {
++ unsigned int raw;
++ struct {
++ unsigned int gammaRampArray :1; // RegammaRamp is in use
++ unsigned int gammaFromEdid :1; //gamma from edid is in use
++ unsigned int gammaFromEdidEx :1; //gamma from edid is in use , but only for Display Id 1.2
++ unsigned int gammaFromUser :1; //user custom gamma is used
++ unsigned int coeffFromUser :1; //coeff. A0-A3 from user is in use
++ unsigned int coeffFromEdid :1; //coeff. A0-A3 from edid is in use
++ unsigned int applyDegamma :1; //flag for additional degamma correction in driver
++ unsigned int gammaPredefinedSRGB :1; //flag for SRGB gamma
++ unsigned int gammaPredefinedPQ :1; //flag for PQ gamma
++ unsigned int gammaPredefinedPQ2084Interim :1; //flag for PQ gamma, lower max nits
++ unsigned int gammaPredefined36 :1; //flag for 3.6 gamma
++ unsigned int gammaPredefinedReset :1; //flag to return to previous gamma
++ } bits;
++};
++
++struct regamma_ramp {
++ unsigned short gamma[256*3]; // gamma ramp packed in same way as OS windows ,r , g & b
++};
++
++struct regamma_coeff {
++ int gamma[3];
++ int A0[3];
++ int A1[3];
++ int A2[3];
++ int A3[3];
++};
++
++struct regamma_lut {
++ union regamma_flags flags;
++ union {
++ struct regamma_ramp ramp;
++ struct regamma_coeff coeff;
++ };
++};
++
+ void setup_x_points_distribution(void);
+ void precompute_pq(void);
+ void precompute_de_pq(void);
+@@ -45,9 +86,14 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf,
+ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
+ struct dc_transfer_func_distributed_points *points);
+
+-bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
++bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
+ struct dc_transfer_func_distributed_points *points);
+
++bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
++ const struct regamma_lut *regamma);
++
++bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
++ const struct regamma_lut *regamma);
+
+
+ #endif /* COLOR_MOD_COLOR_GAMMA_H_ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4349-drm-amd-display-add-cursor-TTU-CRQ-related.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4349-drm-amd-display-add-cursor-TTU-CRQ-related.patch
new file mode 100644
index 00000000..a77e7765
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4349-drm-amd-display-add-cursor-TTU-CRQ-related.patch
@@ -0,0 +1,69 @@
+From 07adcbd5cf7e8d35032e85ca550babc630ac9dd9 Mon Sep 17 00:00:00 2001
+From: Charlene Liu <charlene.liu@amd.com>
+Date: Mon, 16 Apr 2018 15:14:15 -0400
+Subject: [PATCH 4349/5725] drm/amd/display: add cursor TTU CRQ related
+
+Signed-off-by: Charlene Liu <charlene.liu@amd.com>
+Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 7 +++++++
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 10 +++++++++-
+ 2 files changed, 16 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+index 5806217..759fcd1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+@@ -613,6 +613,13 @@ void hubp1_program_deadline(
+ REG_SET(DCN_SURF1_TTU_CNTL1, 0,
+ REFCYC_PER_REQ_DELIVERY_PRE,
+ ttu_attr->refcyc_per_req_delivery_pre_c);
++
++ REG_SET_3(DCN_CUR0_TTU_CNTL0, 0,
++ REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_cur0,
++ QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_cur0,
++ QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_cur0);
++ REG_SET(DCN_CUR0_TTU_CNTL1, 0,
++ REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur0);
+ }
+
+ static void hubp1_setup(
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+index 920ae3a..02045a8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+@@ -93,6 +93,8 @@
+ SRI(DCN_SURF0_TTU_CNTL1, HUBPREQ, id),\
+ SRI(DCN_SURF1_TTU_CNTL0, HUBPREQ, id),\
+ SRI(DCN_SURF1_TTU_CNTL1, HUBPREQ, id),\
++ SRI(DCN_CUR0_TTU_CNTL0, HUBPREQ, id),\
++ SRI(DCN_CUR0_TTU_CNTL1, HUBPREQ, id),\
+ SRI(HUBP_CLK_CNTL, HUBP, id)
+
+ /* Register address initialization macro for ASICs with VM */
+@@ -203,6 +205,8 @@
+ uint32_t DCN_SURF0_TTU_CNTL1; \
+ uint32_t DCN_SURF1_TTU_CNTL0; \
+ uint32_t DCN_SURF1_TTU_CNTL1; \
++ uint32_t DCN_CUR0_TTU_CNTL0; \
++ uint32_t DCN_CUR0_TTU_CNTL1; \
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB; \
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB; \
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB; \
+@@ -368,7 +372,11 @@
+ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_2, DST_Y_PER_PTE_ROW_NOM_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_3, REFCYC_PER_PTE_GROUP_NOM_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, mask_sh),\
+- HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, mask_sh)
++ HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, mask_sh),\
++ HUBP_SF(HUBPREQ0_DCN_CUR0_TTU_CNTL0, REFCYC_PER_REQ_DELIVERY, mask_sh),\
++ HUBP_SF(HUBPREQ0_DCN_CUR0_TTU_CNTL0, QoS_LEVEL_FIXED, mask_sh),\
++ HUBP_SF(HUBPREQ0_DCN_CUR0_TTU_CNTL0, QoS_RAMP_DISABLE, mask_sh),\
++ HUBP_SF(HUBPREQ0_DCN_CUR0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh)
+
+ #define HUBP_MASK_SH_LIST_DCN10(mask_sh)\
+ HUBP_MASK_SH_LIST_DCN(mask_sh),\
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4350-drm-amd-display-add-some-DTN-logs-for-input-and-outp.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4350-drm-amd-display-add-some-DTN-logs-for-input-and-outp.patch
new file mode 100644
index 00000000..68b3b737
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4350-drm-amd-display-add-some-DTN-logs-for-input-and-outp.patch
@@ -0,0 +1,208 @@
+From e7700a4c9891e43baf82f7f39189cef1f98c2936 Mon Sep 17 00:00:00 2001
+From: Anthony Koo <Anthony.Koo@amd.com>
+Date: Fri, 13 Apr 2018 09:40:21 -0400
+Subject: [PATCH 4350/5725] drm/amd/display: add some DTN logs for input and
+ output tf
+
+Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | 25 +++++++++++++
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h | 27 ++++++++++++++
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 41 ++++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h | 16 +++++++++
+ 4 files changed, 109 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+index c008a71..8c4d9e5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+@@ -98,6 +98,30 @@ enum gamut_remap_select {
+ GAMUT_REMAP_COMB_COEFF
+ };
+
++void dpp_read_state(struct dpp *dpp_base,
++ struct dcn_dpp_state *s)
++{
++ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
++
++ REG_GET(CM_IGAM_CONTROL,
++ CM_IGAM_LUT_MODE, &s->igam_lut_mode);
++ REG_GET(CM_IGAM_CONTROL,
++ CM_IGAM_INPUT_FORMAT, &s->igam_input_format);
++ REG_GET(CM_DGAM_CONTROL,
++ CM_DGAM_LUT_MODE, &s->dgam_lut_mode);
++ REG_GET(CM_RGAM_CONTROL,
++ CM_RGAM_LUT_MODE, &s->rgam_lut_mode);
++ REG_GET(CM_GAMUT_REMAP_CONTROL,
++ CM_GAMUT_REMAP_MODE, &s->gamut_remap_mode);
++
++ s->gamut_remap_c11_c12 = REG_READ(CM_GAMUT_REMAP_C11_C12);
++ s->gamut_remap_c13_c14 = REG_READ(CM_GAMUT_REMAP_C13_C14);
++ s->gamut_remap_c21_c22 = REG_READ(CM_GAMUT_REMAP_C21_C22);
++ s->gamut_remap_c23_c24 = REG_READ(CM_GAMUT_REMAP_C23_C24);
++ s->gamut_remap_c31_c32 = REG_READ(CM_GAMUT_REMAP_C31_C32);
++ s->gamut_remap_c33_c34 = REG_READ(CM_GAMUT_REMAP_C33_C34);
++}
++
+ /* Program gamut remap in bypass mode */
+ void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp)
+ {
+@@ -450,6 +474,7 @@ void dpp1_dppclk_control(
+ }
+
+ static const struct dpp_funcs dcn10_dpp_funcs = {
++ .dpp_read_state = dpp_read_state,
+ .dpp_reset = dpp_reset,
+ .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
+ .dpp_get_optimal_number_of_taps = dpp_get_optimal_number_of_taps,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+index 3fccf99..5944a3b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+@@ -44,6 +44,10 @@
+ #define TF_REG_LIST_DCN(id) \
+ SRI(CM_GAMUT_REMAP_CONTROL, CM, id),\
+ SRI(CM_GAMUT_REMAP_C11_C12, CM, id),\
++ SRI(CM_GAMUT_REMAP_C13_C14, CM, id),\
++ SRI(CM_GAMUT_REMAP_C21_C22, CM, id),\
++ SRI(CM_GAMUT_REMAP_C23_C24, CM, id),\
++ SRI(CM_GAMUT_REMAP_C31_C32, CM, id),\
+ SRI(CM_GAMUT_REMAP_C33_C34, CM, id),\
+ SRI(DSCL_EXT_OVERSCAN_LEFT_RIGHT, DSCL, id), \
+ SRI(DSCL_EXT_OVERSCAN_TOP_BOTTOM, DSCL, id), \
+@@ -177,6 +181,14 @@
+ TF_SF(CM0_CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C11, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C12, mask_sh),\
++ TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C13, mask_sh),\
++ TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C14, mask_sh),\
++ TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C21, mask_sh),\
++ TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C22, mask_sh),\
++ TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C23, mask_sh),\
++ TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C24, mask_sh),\
++ TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C31, mask_sh),\
++ TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C32, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C33, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C34, mask_sh),\
+ TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh),\
+@@ -524,6 +536,14 @@
+ type CM_GAMUT_REMAP_MODE; \
+ type CM_GAMUT_REMAP_C11; \
+ type CM_GAMUT_REMAP_C12; \
++ type CM_GAMUT_REMAP_C13; \
++ type CM_GAMUT_REMAP_C14; \
++ type CM_GAMUT_REMAP_C21; \
++ type CM_GAMUT_REMAP_C22; \
++ type CM_GAMUT_REMAP_C23; \
++ type CM_GAMUT_REMAP_C24; \
++ type CM_GAMUT_REMAP_C31; \
++ type CM_GAMUT_REMAP_C32; \
+ type CM_GAMUT_REMAP_C33; \
+ type CM_GAMUT_REMAP_C34; \
+ type CM_COMA_C11; \
+@@ -1095,6 +1115,10 @@ struct dcn_dpp_mask {
+ uint32_t RECOUT_SIZE; \
+ uint32_t CM_GAMUT_REMAP_CONTROL; \
+ uint32_t CM_GAMUT_REMAP_C11_C12; \
++ uint32_t CM_GAMUT_REMAP_C13_C14; \
++ uint32_t CM_GAMUT_REMAP_C21_C22; \
++ uint32_t CM_GAMUT_REMAP_C23_C24; \
++ uint32_t CM_GAMUT_REMAP_C31_C32; \
+ uint32_t CM_GAMUT_REMAP_C33_C34; \
+ uint32_t CM_COMA_C11_C12; \
+ uint32_t CM_COMA_C33_C34; \
+@@ -1407,6 +1431,9 @@ bool dpp_get_optimal_number_of_taps(
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps);
+
++void dpp_read_state(struct dpp *dpp_base,
++ struct dcn_dpp_state *s);
++
+ void dpp_reset(struct dpp *dpp_base);
+
+ void dpp1_cm_program_regamma_lut(
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 48eaf6a..9be751f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -250,6 +250,47 @@ void dcn10_log_hw_state(struct dc *dc)
+ }
+ DTN_INFO("\n");
+
++ DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
++ " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
++ "C31 C32 C33 C34\n");
++ for (i = 0; i < pool->pipe_count; i++) {
++ struct dpp *dpp = pool->dpps[i];
++ struct dcn_dpp_state s;
++
++ dpp->funcs->dpp_read_state(dpp, &s);
++
++ DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
++ "%08xh %08xh %08xh %08xh %08xh %08xh %08xh",
++ dpp->inst,
++ s.igam_input_format,
++ (s.igam_lut_mode == 0) ? "BypassFixed" :
++ ((s.igam_lut_mode == 1) ? "BypassFloat" :
++ ((s.igam_lut_mode == 2) ? "RAM" :
++ ((s.igam_lut_mode == 3) ? "RAM" :
++ "Unknown"))),
++ (s.dgam_lut_mode == 0) ? "Bypass" :
++ ((s.dgam_lut_mode == 1) ? "sRGB" :
++ ((s.dgam_lut_mode == 2) ? "Ycc" :
++ ((s.dgam_lut_mode == 3) ? "RAM" :
++ ((s.dgam_lut_mode == 4) ? "RAM" :
++ "Unknown")))),
++ (s.rgam_lut_mode == 0) ? "Bypass" :
++ ((s.rgam_lut_mode == 1) ? "sRGB" :
++ ((s.rgam_lut_mode == 2) ? "Ycc" :
++ ((s.rgam_lut_mode == 3) ? "RAM" :
++ ((s.rgam_lut_mode == 4) ? "RAM" :
++ "Unknown")))),
++ s.gamut_remap_mode,
++ s.gamut_remap_c11_c12,
++ s.gamut_remap_c13_c14,
++ s.gamut_remap_c21_c22,
++ s.gamut_remap_c23_c24,
++ s.gamut_remap_c31_c32,
++ s.gamut_remap_c33_c34);
++ DTN_INFO("\n");
++ }
++ DTN_INFO("\n");
++
+ DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct mpcc_state s = {0};
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+index bb7af1b..582458f 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+@@ -44,7 +44,23 @@ struct dpp_grph_csc_adjustment {
+ enum graphics_gamut_adjust_type gamut_adjust_type;
+ };
+
++struct dcn_dpp_state {
++ uint32_t igam_lut_mode;
++ uint32_t igam_input_format;
++ uint32_t dgam_lut_mode;
++ uint32_t rgam_lut_mode;
++ uint32_t gamut_remap_mode;
++ uint32_t gamut_remap_c11_c12;
++ uint32_t gamut_remap_c13_c14;
++ uint32_t gamut_remap_c21_c22;
++ uint32_t gamut_remap_c23_c24;
++ uint32_t gamut_remap_c31_c32;
++ uint32_t gamut_remap_c33_c34;
++};
++
+ struct dpp_funcs {
++ void (*dpp_read_state)(struct dpp *dpp, struct dcn_dpp_state *s);
++
+ void (*dpp_reset)(struct dpp *dpp);
+
+ void (*dpp_set_scaler)(struct dpp *dpp,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4351-drm-amd-display-update-dtn-logging-and-goldens.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4351-drm-amd-display-update-dtn-logging-and-goldens.patch
new file mode 100644
index 00000000..309b2cdf
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4351-drm-amd-display-update-dtn-logging-and-goldens.patch
@@ -0,0 +1,28 @@
+From 13d4975a9f40210010f5d08fc2ea33eae0daacd6 Mon Sep 17 00:00:00 2001
+From: Anthony Koo <Anthony.Koo@amd.com>
+Date: Tue, 17 Apr 2018 12:12:56 -0400
+Subject: [PATCH 4351/5725] drm/amd/display: update dtn logging and goldens
+
+Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 9be751f..5e7498e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -260,7 +260,7 @@ void dcn10_log_hw_state(struct dc *dc)
+ dpp->funcs->dpp_read_state(dpp, &s);
+
+ DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
+- "%08xh %08xh %08xh %08xh %08xh %08xh %08xh",
++ "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
+ dpp->inst,
+ s.igam_input_format,
+ (s.igam_lut_mode == 0) ? "BypassFixed" :
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4352-drm-amd-display-Correct-rounding-calcs-in-mod_freesy.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4352-drm-amd-display-Correct-rounding-calcs-in-mod_freesy.patch
new file mode 100644
index 00000000..393b1e80
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4352-drm-amd-display-Correct-rounding-calcs-in-mod_freesy.patch
@@ -0,0 +1,67 @@
+From f694980e6ab2d0658efb2f14b48c798342237276 Mon Sep 17 00:00:00 2001
+From: Anthony Koo <Anthony.Koo@amd.com>
+Date: Tue, 17 Apr 2018 11:40:31 -0400
+Subject: [PATCH 4352/5725] drm/amd/display: Correct rounding calcs in
+ mod_freesync_is_valid_range
+
+Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ .../drm/amd/display/modules/freesync/freesync.c | 39 +++++++++++++++++++---
+ 1 file changed, 34 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+index 349387e..769f467 100644
+--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
++++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+@@ -887,12 +887,41 @@ bool mod_freesync_is_valid_range(struct mod_freesync *mod_freesync,
+ unsigned long long nominal_field_rate_in_uhz =
+ mod_freesync_calc_nominal_field_rate(stream);
+
+- /* Allow for some rounding error of actual video timing by taking ceil.
+- * For example, 144 Hz mode timing may actually be 143.xxx Hz when
+- * calculated from pixel rate and vertical/horizontal totals, but
+- * this should be allowed instead of blocking FreeSync.
++ /* Typically nominal refresh calculated can have some fractional part.
++ * Allow for some rounding error of actual video timing by taking floor
++ * of caps and request. Round the nominal refresh rate.
++ *
++ * Dividing will convert everything to units in Hz although input
++ * variable name is in uHz!
++ *
++ * Also note, this takes care of rounding error on the nominal refresh
++ * so by rounding error we only expect it to be off by a small amount,
++ * such as < 0.1 Hz. i.e. 143.9xxx or 144.1xxx.
++ *
++ * Example 1. Caps Min = 40 Hz, Max = 144 Hz
++ * Request Min = 40 Hz, Max = 144 Hz
++ * Nominal = 143.5x Hz rounded to 144 Hz
++ * This function should allow this as valid request
++ *
++ * Example 2. Caps Min = 40 Hz, Max = 144 Hz
++ * Request Min = 40 Hz, Max = 144 Hz
++ * Nominal = 144.4x Hz rounded to 144 Hz
++ * This function should allow this as valid request
++ *
++ * Example 3. Caps Min = 40 Hz, Max = 144 Hz
++ * Request Min = 40 Hz, Max = 144 Hz
++ * Nominal = 120.xx Hz rounded to 120 Hz
++ * This function should return NOT valid since the requested
++ * max is greater than current timing's nominal
++ *
++ * Example 4. Caps Min = 40 Hz, Max = 120 Hz
++ * Request Min = 40 Hz, Max = 120 Hz
++ * Nominal = 144.xx Hz rounded to 144 Hz
++ * This function should return NOT valid since the nominal
++ * is greater than the capability's max refresh
+ */
+- nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, 1000000);
++ nominal_field_rate_in_uhz =
++ div_u64(nominal_field_rate_in_uhz + 500000, 1000000);
+ min_refresh_cap_in_uhz /= 1000000;
+ max_refresh_cap_in_uhz /= 1000000;
+ min_refresh_request_in_uhz /= 1000000;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4353-drm-amd-display-compact-the-rq-dlg-ttu-log.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4353-drm-amd-display-compact-the-rq-dlg-ttu-log.patch
new file mode 100644
index 00000000..b979adfe
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4353-drm-amd-display-compact-the-rq-dlg-ttu-log.patch
@@ -0,0 +1,390 @@
+From 04151c4a461ca0336af26b19039f63923239b76e Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Wed, 11 Apr 2018 11:51:32 -0400
+Subject: [PATCH 4353/5725] drm/amd/display: compact the rq/dlg/ttu log
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 4 +-
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 24 ++-
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 235 +++++++++------------
+ drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h | 22 +-
+ 4 files changed, 128 insertions(+), 157 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+index 759fcd1..159bebc 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+@@ -763,10 +763,10 @@ void min_set_viewport(
+ PRI_VIEWPORT_Y_START_C, viewport_c->y);
+ }
+
+-void hubp1_read_state(struct hubp *hubp,
+- struct dcn_hubp_state *s)
++void hubp1_read_state(struct hubp *hubp)
+ {
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
++ struct dcn_hubp_state *s = &hubp1->state;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr = &s->dlg_attr;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr = &s->ttu_attr;
+ struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+index 02045a8..fe9b8c4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+@@ -619,8 +619,29 @@ struct dcn_mi_mask {
+ DCN_HUBP_REG_FIELD_LIST(uint32_t);
+ };
+
++struct dcn_hubp_state {
++ struct _vcs_dpi_display_dlg_regs_st dlg_attr;
++ struct _vcs_dpi_display_ttu_regs_st ttu_attr;
++ struct _vcs_dpi_display_rq_regs_st rq_regs;
++ uint32_t pixel_format;
++ uint32_t inuse_addr_hi;
++ uint32_t viewport_width;
++ uint32_t viewport_height;
++ uint32_t rotation_angle;
++ uint32_t h_mirror_en;
++ uint32_t sw_mode;
++ uint32_t dcc_en;
++ uint32_t blank_en;
++ uint32_t underflow_status;
++ uint32_t ttu_disable;
++ uint32_t min_ttu_vblank;
++ uint32_t qos_level_low_wm;
++ uint32_t qos_level_high_wm;
++};
++
+ struct dcn10_hubp {
+ struct hubp base;
++ struct dcn_hubp_state state;
+ const struct dcn_mi_registers *hubp_regs;
+ const struct dcn_mi_shift *hubp_shift;
+ const struct dcn_mi_mask *hubp_mask;
+@@ -698,8 +719,7 @@ void dcn10_hubp_construct(
+ const struct dcn_mi_shift *hubp_shift,
+ const struct dcn_mi_mask *hubp_mask);
+
+-void hubp1_read_state(struct hubp *hubp,
+- struct dcn_hubp_state *s);
++void hubp1_read_state(struct hubp *hubp);
+
+ enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 5e7498e..fe52cbc 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -112,143 +112,127 @@ void dcn10_log_hubbub_state(struct dc *dc)
+ DTN_INFO("\n");
+ }
+
+-static void print_rq_dlg_ttu_regs(struct dc_context *dc_ctx, struct dcn_hubp_state *s)
+-{
+- struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
+- struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
+- struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
+-
+- DTN_INFO("========Requester========\n");
+- DTN_INFO("drq_expansion_mode = 0x%0x\n", rq_regs->drq_expansion_mode);
+- DTN_INFO("prq_expansion_mode = 0x%0x\n", rq_regs->prq_expansion_mode);
+- DTN_INFO("mrq_expansion_mode = 0x%0x\n", rq_regs->mrq_expansion_mode);
+- DTN_INFO("crq_expansion_mode = 0x%0x\n", rq_regs->crq_expansion_mode);
+- DTN_INFO("plane1_base_address = 0x%0x\n", rq_regs->plane1_base_address);
+- DTN_INFO("==<LUMA>==\n");
+- DTN_INFO("chunk_size = 0x%0x\n", rq_regs->rq_regs_l.chunk_size);
+- DTN_INFO("min_chunk_size = 0x%0x\n", rq_regs->rq_regs_l.min_chunk_size);
+- DTN_INFO("meta_chunk_size = 0x%0x\n", rq_regs->rq_regs_l.meta_chunk_size);
+- DTN_INFO("min_meta_chunk_size = 0x%0x\n", rq_regs->rq_regs_l.min_meta_chunk_size);
+- DTN_INFO("dpte_group_size = 0x%0x\n", rq_regs->rq_regs_l.dpte_group_size);
+- DTN_INFO("mpte_group_size = 0x%0x\n", rq_regs->rq_regs_l.mpte_group_size);
+- DTN_INFO("swath_height = 0x%0x\n", rq_regs->rq_regs_l.swath_height);
+- DTN_INFO("pte_row_height_linear = 0x%0x\n", rq_regs->rq_regs_l.pte_row_height_linear);
+- DTN_INFO("==<CHROMA>==\n");
+- DTN_INFO("chunk_size = 0x%0x\n", rq_regs->rq_regs_c.chunk_size);
+- DTN_INFO("min_chunk_size = 0x%0x\n", rq_regs->rq_regs_c.min_chunk_size);
+- DTN_INFO("meta_chunk_size = 0x%0x\n", rq_regs->rq_regs_c.meta_chunk_size);
+- DTN_INFO("min_meta_chunk_size = 0x%0x\n", rq_regs->rq_regs_c.min_meta_chunk_size);
+- DTN_INFO("dpte_group_size = 0x%0x\n", rq_regs->rq_regs_c.dpte_group_size);
+- DTN_INFO("mpte_group_size = 0x%0x\n", rq_regs->rq_regs_c.mpte_group_size);
+- DTN_INFO("swath_height = 0x%0x\n", rq_regs->rq_regs_c.swath_height);
+- DTN_INFO("pte_row_height_linear = 0x%0x\n", rq_regs->rq_regs_c.pte_row_height_linear);
+-
+- DTN_INFO("========DLG========\n");
+- DTN_INFO("refcyc_h_blank_end = 0x%0x\n", dlg_regs->refcyc_h_blank_end);
+- DTN_INFO("dlg_vblank_end = 0x%0x\n", dlg_regs->dlg_vblank_end);
+- DTN_INFO("min_dst_y_next_start = 0x%0x\n", dlg_regs->min_dst_y_next_start);
+- DTN_INFO("refcyc_per_htotal = 0x%0x\n", dlg_regs->refcyc_per_htotal);
+- DTN_INFO("refcyc_x_after_scaler = 0x%0x\n", dlg_regs->refcyc_x_after_scaler);
+- DTN_INFO("dst_y_after_scaler = 0x%0x\n", dlg_regs->dst_y_after_scaler);
+- DTN_INFO("dst_y_prefetch = 0x%0x\n", dlg_regs->dst_y_prefetch);
+- DTN_INFO("dst_y_per_vm_vblank = 0x%0x\n", dlg_regs->dst_y_per_vm_vblank);
+- DTN_INFO("dst_y_per_row_vblank = 0x%0x\n", dlg_regs->dst_y_per_row_vblank);
+- DTN_INFO("dst_y_per_vm_flip = 0x%0x\n", dlg_regs->dst_y_per_vm_flip);
+- DTN_INFO("dst_y_per_row_flip = 0x%0x\n", dlg_regs->dst_y_per_row_flip);
+- DTN_INFO("ref_freq_to_pix_freq = 0x%0x\n", dlg_regs->ref_freq_to_pix_freq);
+- DTN_INFO("vratio_prefetch = 0x%0x\n", dlg_regs->vratio_prefetch);
+- DTN_INFO("vratio_prefetch_c = 0x%0x\n", dlg_regs->vratio_prefetch_c);
+- DTN_INFO("refcyc_per_pte_group_vblank_l = 0x%0x\n", dlg_regs->refcyc_per_pte_group_vblank_l);
+- DTN_INFO("refcyc_per_pte_group_vblank_c = 0x%0x\n", dlg_regs->refcyc_per_pte_group_vblank_c);
+- DTN_INFO("refcyc_per_meta_chunk_vblank_l = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_vblank_l);
+- DTN_INFO("refcyc_per_meta_chunk_vblank_c = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_vblank_c);
+- DTN_INFO("refcyc_per_pte_group_flip_l = 0x%0x\n", dlg_regs->refcyc_per_pte_group_flip_l);
+- DTN_INFO("refcyc_per_pte_group_flip_c = 0x%0x\n", dlg_regs->refcyc_per_pte_group_flip_c);
+- DTN_INFO("refcyc_per_meta_chunk_flip_l = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_flip_l);
+- DTN_INFO("refcyc_per_meta_chunk_flip_c = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_flip_c);
+- DTN_INFO("dst_y_per_pte_row_nom_l = 0x%0x\n", dlg_regs->dst_y_per_pte_row_nom_l);
+- DTN_INFO("dst_y_per_pte_row_nom_c = 0x%0x\n", dlg_regs->dst_y_per_pte_row_nom_c);
+- DTN_INFO("refcyc_per_pte_group_nom_l = 0x%0x\n", dlg_regs->refcyc_per_pte_group_nom_l);
+- DTN_INFO("refcyc_per_pte_group_nom_c = 0x%0x\n", dlg_regs->refcyc_per_pte_group_nom_c);
+- DTN_INFO("dst_y_per_meta_row_nom_l = 0x%0x\n", dlg_regs->dst_y_per_meta_row_nom_l);
+- DTN_INFO("dst_y_per_meta_row_nom_c = 0x%0x\n", dlg_regs->dst_y_per_meta_row_nom_c);
+- DTN_INFO("refcyc_per_meta_chunk_nom_l = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_nom_l);
+- DTN_INFO("refcyc_per_meta_chunk_nom_c = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_nom_c);
+- DTN_INFO("refcyc_per_line_delivery_pre_l = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_pre_l);
+- DTN_INFO("refcyc_per_line_delivery_pre_c = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_pre_c);
+- DTN_INFO("refcyc_per_line_delivery_l = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_l);
+- DTN_INFO("refcyc_per_line_delivery_c = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_c);
+- DTN_INFO("chunk_hdl_adjust_cur0 = 0x%0x\n", dlg_regs->chunk_hdl_adjust_cur0);
+- DTN_INFO("dst_y_offset_cur1 = 0x%0x\n", dlg_regs->dst_y_offset_cur1);
+- DTN_INFO("chunk_hdl_adjust_cur1 = 0x%0x\n", dlg_regs->chunk_hdl_adjust_cur1);
+- DTN_INFO("vready_after_vcount0 = 0x%0x\n", dlg_regs->vready_after_vcount0);
+- DTN_INFO("dst_y_delta_drq_limit = 0x%0x\n", dlg_regs->dst_y_delta_drq_limit);
+- DTN_INFO("xfc_reg_transfer_delay = 0x%0x\n", dlg_regs->xfc_reg_transfer_delay);
+- DTN_INFO("xfc_reg_precharge_delay = 0x%0x\n", dlg_regs->xfc_reg_precharge_delay);
+- DTN_INFO("xfc_reg_remote_surface_flip_latency = 0x%0x\n", dlg_regs->xfc_reg_remote_surface_flip_latency);
+-
+- DTN_INFO("========TTU========\n");
+- DTN_INFO("qos_level_low_wm = 0x%0x\n", ttu_regs->qos_level_low_wm);
+- DTN_INFO("qos_level_high_wm = 0x%0x\n", ttu_regs->qos_level_high_wm);
+- DTN_INFO("min_ttu_vblank = 0x%0x\n", ttu_regs->min_ttu_vblank);
+- DTN_INFO("qos_level_flip = 0x%0x\n", ttu_regs->qos_level_flip);
+- DTN_INFO("refcyc_per_req_delivery_pre_l = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_l);
+- DTN_INFO("refcyc_per_req_delivery_l = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_l);
+- DTN_INFO("refcyc_per_req_delivery_pre_c = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_c);
+- DTN_INFO("refcyc_per_req_delivery_c = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_c);
+- DTN_INFO("refcyc_per_req_delivery_cur0 = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_cur0);
+- DTN_INFO("refcyc_per_req_delivery_pre_cur0 = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_cur0);
+- DTN_INFO("refcyc_per_req_delivery_cur1 = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_cur1);
+- DTN_INFO("refcyc_per_req_delivery_pre_cur1 = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_cur1);
+- DTN_INFO("qos_level_fixed_l = 0x%0x\n", ttu_regs->qos_level_fixed_l);
+- DTN_INFO("qos_ramp_disable_l = 0x%0x\n", ttu_regs->qos_ramp_disable_l);
+- DTN_INFO("qos_level_fixed_c = 0x%0x\n", ttu_regs->qos_level_fixed_c);
+- DTN_INFO("qos_ramp_disable_c = 0x%0x\n", ttu_regs->qos_ramp_disable_c);
+- DTN_INFO("qos_level_fixed_cur0 = 0x%0x\n", ttu_regs->qos_level_fixed_cur0);
+- DTN_INFO("qos_ramp_disable_cur0 = 0x%0x\n", ttu_regs->qos_ramp_disable_cur0);
+- DTN_INFO("qos_level_fixed_cur1 = 0x%0x\n", ttu_regs->qos_level_fixed_cur1);
+- DTN_INFO("qos_ramp_disable_cur1 = 0x%0x\n", ttu_regs->qos_ramp_disable_cur1);
+-}
+-
+-void dcn10_log_hw_state(struct dc *dc)
++static void dcn10_log_hubp_states(struct dc *dc)
+ {
+ struct dc_context *dc_ctx = dc->ctx;
+ struct resource_pool *pool = dc->res_pool;
+ int i;
+
+- DTN_INFO_BEGIN();
+-
+- dcn10_log_hubbub_state(dc);
+-
+ DTN_INFO("HUBP: format addr_hi width height"
+ " rot mir sw_mode dcc_en blank_en ttu_dis underflow"
+ " min_ttu_vblank qos_low_wm qos_high_wm\n");
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct hubp *hubp = pool->hubps[i];
+- struct dcn_hubp_state s;
++ struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
+
+- hubp->funcs->hubp_read_state(hubp, &s);
++ hubp->funcs->hubp_read_state(hubp);
+
+ DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh"
+ " %6d %8d %7d %8xh",
+ hubp->inst,
+- s.pixel_format,
+- s.inuse_addr_hi,
+- s.viewport_width,
+- s.viewport_height,
+- s.rotation_angle,
+- s.h_mirror_en,
+- s.sw_mode,
+- s.dcc_en,
+- s.blank_en,
+- s.ttu_disable,
+- s.underflow_status);
+- DTN_INFO_MICRO_SEC(s.min_ttu_vblank);
+- DTN_INFO_MICRO_SEC(s.qos_level_low_wm);
+- DTN_INFO_MICRO_SEC(s.qos_level_high_wm);
++ s->pixel_format,
++ s->inuse_addr_hi,
++ s->viewport_width,
++ s->viewport_height,
++ s->rotation_angle,
++ s->h_mirror_en,
++ s->sw_mode,
++ s->dcc_en,
++ s->blank_en,
++ s->ttu_disable,
++ s->underflow_status);
++ DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
++ DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
++ DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
+ DTN_INFO("\n");
+ }
++
++ DTN_INFO("\n=========RQ========\n");
++ DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
++ " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
++ " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
++ for (i = 0; i < pool->pipe_count; i++) {
++ struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
++ struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
++
++ DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
++ i, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
++ rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
++ rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
++ rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
++ rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
++ rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
++ rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
++ rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
++ rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
++ }
++
++ DTN_INFO("========DLG========\n");
++ DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
++ " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
++ " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
++ " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
++ " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
++ " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
++ " x_rp_dlay x_rr_sfl\n");
++ for (i = 0; i < pool->pipe_count; i++) {
++ struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
++ struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
++
++ DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
++ "% 8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
++ " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
++ i, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
++ dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
++ dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
++ dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
++ dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
++ dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
++ dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
++ dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
++ dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
++ dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
++ dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
++ dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
++ dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
++ dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
++ dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
++ dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
++ dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
++ dlg_regs->xfc_reg_remote_surface_flip_latency);
++ }
++
++ DTN_INFO("========TTU========\n");
++ DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
++ " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
++ " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
++ for (i = 0; i < pool->pipe_count; i++) {
++ struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
++ struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
++
++ DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
++ i, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
++ ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
++ ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
++ ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
++ ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
++ ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
++ ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
++ }
+ DTN_INFO("\n");
++}
++
++void dcn10_log_hw_state(struct dc *dc)
++{
++ struct dc_context *dc_ctx = dc->ctx;
++ struct resource_pool *pool = dc->res_pool;
++ int i;
++
++ DTN_INFO_BEGIN();
++
++ dcn10_log_hubbub_state(dc);
++
++ dcn10_log_hubp_states(dc);
+
+ DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
+ " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
+@@ -340,19 +324,6 @@ void dcn10_log_hw_state(struct dc *dc)
+ }
+ DTN_INFO("\n");
+
+- for (i = 0; i < pool->pipe_count; i++) {
+- struct hubp *hubp = pool->hubps[i];
+- struct dcn_hubp_state s = {0};
+-
+- if (!dc->current_state->res_ctx.pipe_ctx[i].stream)
+- continue;
+-
+- hubp->funcs->hubp_read_state(hubp, &s);
+- DTN_INFO("RQ-DLG-TTU registers for HUBP%d:\n", i);
+- print_rq_dlg_ttu_regs(dc_ctx, &s);
+- DTN_INFO("\n");
+- }
+-
+ DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
+ "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
+ dc->current_state->bw.dcn.calc_clk.dcfclk_khz,
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+index 3866147..331f8ff 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+@@ -56,26 +56,6 @@ struct hubp {
+ bool power_gated;
+ };
+
+-struct dcn_hubp_state {
+- struct _vcs_dpi_display_dlg_regs_st dlg_attr;
+- struct _vcs_dpi_display_ttu_regs_st ttu_attr;
+- struct _vcs_dpi_display_rq_regs_st rq_regs;
+- uint32_t pixel_format;
+- uint32_t inuse_addr_hi;
+- uint32_t viewport_width;
+- uint32_t viewport_height;
+- uint32_t rotation_angle;
+- uint32_t h_mirror_en;
+- uint32_t sw_mode;
+- uint32_t dcc_en;
+- uint32_t blank_en;
+- uint32_t underflow_status;
+- uint32_t ttu_disable;
+- uint32_t min_ttu_vblank;
+- uint32_t qos_level_low_wm;
+- uint32_t qos_level_high_wm;
+-};
+-
+ struct hubp_funcs {
+ void (*hubp_setup)(
+ struct hubp *hubp,
+@@ -140,7 +120,7 @@ struct hubp_funcs {
+
+ void (*hubp_clk_cntl)(struct hubp *hubp, bool enable);
+ void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst);
+- void (*hubp_read_state)(struct hubp *hubp, struct dcn_hubp_state *s);
++ void (*hubp_read_state)(struct hubp *hubp);
+
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4354-drm-amd-display-Add-assert-that-chroma-pitch-is-non-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4354-drm-amd-display-Add-assert-that-chroma-pitch-is-non-.patch
new file mode 100644
index 00000000..b9b0c57a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4354-drm-amd-display-Add-assert-that-chroma-pitch-is-non-.patch
@@ -0,0 +1,31 @@
+From 8a0914695a1a2a96308aa985c77515052b147864 Mon Sep 17 00:00:00 2001
+From: Julian Parkin <jparkin@amd.com>
+Date: Fri, 13 Apr 2018 13:23:02 -0400
+Subject: [PATCH 4354/5725] drm/amd/display: Add assert that chroma pitch is
+ non zero
+
+Signed-off-by: Julian Parkin <jparkin@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+index 159bebc..0cbc83e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+@@ -146,6 +146,9 @@ void hubp1_program_size_and_rotation(
+ * 444 or 420 luma
+ */
+ if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
++ ASSERT(plane_size->video.chroma_pitch != 0);
++ /* Chroma pitch zero can cause system hang! */
++
+ pitch = plane_size->video.luma_pitch - 1;
+ meta_pitch = dcc->video.meta_pitch_l - 1;
+ pitch_c = plane_size->video.chroma_pitch - 1;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4355-drm-amd-display-Update-MST-edid-property-every-time.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4355-drm-amd-display-Update-MST-edid-property-every-time.patch
new file mode 100644
index 00000000..1ac3e5b8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4355-drm-amd-display-Update-MST-edid-property-every-time.patch
@@ -0,0 +1,45 @@
+From 737bdcb21b913c95313a0ae5e7fe5e1607d0d548 Mon Sep 17 00:00:00 2001
+From: "Jerry (Fangzhi) Zuo" <Jerry.Zuo@amd.com>
+Date: Tue, 17 Apr 2018 13:49:48 -0400
+Subject: [PATCH 4355/5725] drm/amd/display: Update MST edid property every
+ time
+
+Extended fix to: "Don't read EDID in atomic_check"
+
+Fix display property not observed in GUI display after hot plug.
+
+Call drm_mode_connector_update_edid_property every time in
+.get_modes hook, due to the fact that edid property is getting
+removed from usermode ioctl DRM_IOCTL_MODE_GETCONNECTOR each time
+in hot unplug.
+
+Signed-off-by: Jerry (Fangzhi) Zuo <Jerry.Zuo@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+
+Change-Id: Ibc3033985455f1dfff079a7c1cfc2aa14de294f0
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index a9b76fd..131736e 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -269,9 +269,9 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
+ }
+
+ drm_mode_connector_update_edid_property(
+- &aconnector->base, aconnector->edid);
++ &aconnector->base, aconnector->edid);
+
+- ret = dm_connector_update_modes(connector, aconnector->edid);
++ ret = drm_add_edid_modes(connector, aconnector->edid);
+
+ return ret;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4356-drm-amd-display-reprogram-infoframe-during-apply_ctx.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4356-drm-amd-display-reprogram-infoframe-during-apply_ctx.patch
new file mode 100644
index 00000000..1e29e59b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4356-drm-amd-display-reprogram-infoframe-during-apply_ctx.patch
@@ -0,0 +1,64 @@
+From 0880629ed07eb5ff116ab0eeea26616144f413c8 Mon Sep 17 00:00:00 2001
+From: Julian Parkin <jparkin@amd.com>
+Date: Tue, 17 Apr 2018 11:49:06 -0400
+Subject: [PATCH 4356/5725] drm/amd/display: reprogram infoframe during
+ apply_ctx_to_hw
+
+To ensure the infoframe gets updated during an SDR/HDR switch
+this change adds a new function to to check if the HDR static
+metadata has changed and adds it to is_timing_changed and
+pipe_need_reprogram checks
+
+Signed-off-by: Julian Parkin <jparkin@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index ad41b64..60cf748 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -1309,6 +1309,19 @@ bool dc_add_all_planes_for_stream(
+ }
+
+
++static bool is_hdr_static_meta_changed(struct dc_stream_state *cur_stream,
++ struct dc_stream_state *new_stream)
++{
++ if (cur_stream == NULL)
++ return true;
++
++ if (memcmp(&cur_stream->hdr_static_metadata,
++ &new_stream->hdr_static_metadata,
++ sizeof(struct dc_info_packet)) != 0)
++ return true;
++
++ return false;
++}
+
+ static bool is_timing_changed(struct dc_stream_state *cur_stream,
+ struct dc_stream_state *new_stream)
+@@ -1344,6 +1357,9 @@ static bool are_stream_backends_same(
+ if (is_timing_changed(stream_a, stream_b))
+ return false;
+
++ if (is_hdr_static_meta_changed(stream_a, stream_b))
++ return false;
++
+ return true;
+ }
+
+@@ -2441,6 +2457,8 @@ bool pipe_need_reprogram(
+ if (is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream))
+ return true;
+
++ if (is_hdr_static_meta_changed(pipe_ctx_old->stream, pipe_ctx->stream))
++ return true;
+
+ return false;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4357-drm-amd-display-Check-dc_sink-every-time-in-MST-hotp.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4357-drm-amd-display-Check-dc_sink-every-time-in-MST-hotp.patch
new file mode 100644
index 00000000..3254a599
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4357-drm-amd-display-Check-dc_sink-every-time-in-MST-hotp.patch
@@ -0,0 +1,44 @@
+From 186a555a1e2bfbd94d24d59ef7b42731d0e7fa70 Mon Sep 17 00:00:00 2001
+From: "Jerry (Fangzhi) Zuo" <Jerry.Zuo@amd.com>
+Date: Tue, 17 Apr 2018 14:39:09 -0400
+Subject: [PATCH 4357/5725] drm/amd/display: Check dc_sink every time in MST
+ hotplug
+
+Extended fix to: "Don't read EDID in atomic_check"
+
+Fix issue of missing dc_sink in .mode_valid in hot plug routine.
+
+Need to check dc_sink everytime in .get_modes hook after checking
+edid, since edid is not getting removed in hot unplug but dc_sink
+doesn't.
+
+Signed-off-by: Jerry (Fangzhi) Zuo <Jerry.Zuo@amd.com>
+Reviewed-by: Roman Li <Roman.Li@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+
+Change-Id: I8ff7af544334884b5df53dd95ed90c590cf7ceda
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index 131736e..e3110d6 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -252,8 +252,8 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
+ if (!aconnector->dc_sink) {
+ struct dc_sink *dc_sink;
+ struct dc_sink_init_data init_params = {
+- .link = aconnector->dc_link,
+- .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
++ .link = aconnector->dc_link,
++ .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
+ dc_sink = dc_link_add_remote_sink(
+ aconnector->dc_link,
+ (uint8_t *)aconnector->edid,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4358-drm-amd-display-to-synchronize-the-hubp-and-dpp-prog.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4358-drm-amd-display-to-synchronize-the-hubp-and-dpp-prog.patch
new file mode 100644
index 00000000..8424dfa1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4358-drm-amd-display-to-synchronize-the-hubp-and-dpp-prog.patch
@@ -0,0 +1,72 @@
+From 4c8f4ced643046fc074fe70ff45f8b9d95e88d1c Mon Sep 17 00:00:00 2001
+From: Martin Tsai <Martin.Tsai@amd.com>
+Date: Tue, 17 Apr 2018 17:20:06 -0400
+Subject: [PATCH 4358/5725] drm/amd/display: to synchronize the hubp and dpp
+ programming in cursor control
+
+Signed-off-by: Martin Tsai <Martin.Tsai@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+index 3b2ddbd..3732a1d 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+@@ -178,6 +178,7 @@ bool dc_stream_set_cursor_attributes(
+ int i;
+ struct dc *core_dc;
+ struct resource_context *res_ctx;
++ struct pipe_ctx *pipe_to_program = NULL;
+
+ if (NULL == stream) {
+ dm_error("DC: dc_stream is NULL!\n");
+@@ -205,9 +206,17 @@ bool dc_stream_set_cursor_attributes(
+ if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
+ continue;
+
++ if (!pipe_to_program) {
++ pipe_to_program = pipe_ctx;
++ core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true);
++ }
+
+ core_dc->hwss.set_cursor_attribute(pipe_ctx);
+ }
++
++ if (pipe_to_program)
++ core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, false);
++
+ return true;
+ }
+
+@@ -218,6 +227,7 @@ bool dc_stream_set_cursor_position(
+ int i;
+ struct dc *core_dc;
+ struct resource_context *res_ctx;
++ struct pipe_ctx *pipe_to_program = NULL;
+
+ if (NULL == stream) {
+ dm_error("DC: dc_stream is NULL!\n");
+@@ -243,9 +253,17 @@ bool dc_stream_set_cursor_position(
+ !pipe_ctx->plane_res.ipp)
+ continue;
+
++ if (!pipe_to_program) {
++ pipe_to_program = pipe_ctx;
++ core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true);
++ }
++
+ core_dc->hwss.set_cursor_position(pipe_ctx);
+ }
+
++ if (pipe_to_program)
++ core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, false);
++
+ return true;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4359-drm-amd-display-dal-3.1.44.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4359-drm-amd-display-dal-3.1.44.patch
new file mode 100644
index 00000000..28372b81
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4359-drm-amd-display-dal-3.1.44.patch
@@ -0,0 +1,28 @@
+From dbdf5994b4a27dabc1935257a09a6a414d1bec10 Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Mon, 16 Apr 2018 13:30:41 -0400
+Subject: [PATCH 4359/5725] drm/amd/display: dal 3.1.44
+
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 27c2ce0..936adbf 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -38,7 +38,7 @@
+ #include "inc/compressor.h"
+ #include "dml/display_mode_lib.h"
+
+-#define DC_VER "3.1.43"
++#define DC_VER "3.1.44"
+
+ #define MAX_SURFACES 3
+ #define MAX_STREAMS 6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4360-drm-amd-display-Use-int-for-calculating-vline-start.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4360-drm-amd-display-Use-int-for-calculating-vline-start.patch
new file mode 100644
index 00000000..9688210d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4360-drm-amd-display-Use-int-for-calculating-vline-start.patch
@@ -0,0 +1,38 @@
+From 3c0f6f60e2ce5e82eb0a33aeca9e3914c31be55c Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Tue, 10 Apr 2018 16:06:34 -0400
+Subject: [PATCH 4360/5725] drm/amd/display: Use int for calculating vline
+ start
+
+We are not sure these calculations will never need negative numbers. Use
+signed integers and warn and cap at 0 if this ever happens.
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+index 2c5dbec..c734b7f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+@@ -96,10 +96,10 @@ static void optc1_disable_stereo(struct timing_generator *optc)
+ static uint32_t get_start_vline(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing)
+ {
+ struct dc_crtc_timing patched_crtc_timing;
+- uint32_t vesa_sync_start;
+- uint32_t asic_blank_end;
+- uint32_t interlace_factor;
+- uint32_t vertical_line_start;
++ int vesa_sync_start;
++ int asic_blank_end;
++ int interlace_factor;
++ int vertical_line_start;
+
+ patched_crtc_timing = *dc_crtc_timing;
+ optc1_apply_front_porch_workaround(optc, &patched_crtc_timing);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4361-drm-amd-display-Couple-formatting-fixes.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4361-drm-amd-display-Couple-formatting-fixes.patch
new file mode 100644
index 00000000..f6737315
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4361-drm-amd-display-Couple-formatting-fixes.patch
@@ -0,0 +1,61 @@
+From 1fe610d6e52dc2375a0ec1414913c71c019b1474 Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Fri, 20 Apr 2018 10:53:50 -0400
+Subject: [PATCH 4361/5725] drm/amd/display: Couple formatting fixes
+
+Things such as mis-indent, and space at beginning of line.
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1 +
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 6 +++---
+ drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c | 2 +-
+ 3 files changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 508424f..11b9601 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2863,6 +2863,7 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
+ const struct dc_link *link = aconnector->dc_link;
+ struct amdgpu_device *adev = connector->dev->dev_private;
+ struct amdgpu_display_manager *dm = &adev->dm;
++
+ #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
+ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 60cf748..412b48b 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -1797,9 +1797,9 @@ enum dc_status dc_validate_global_state(
+ return DC_ERROR_UNEXPECTED;
+
+ if (dc->res_pool->funcs->validate_global) {
+- result = dc->res_pool->funcs->validate_global(dc, new_ctx);
+- if (result != DC_OK)
+- return result;
++ result = dc->res_pool->funcs->validate_global(dc, new_ctx);
++ if (result != DC_OK)
++ return result;
+ }
+
+ for (i = 0; i < new_ctx->stream_count; i++) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+index 0a47663..00c0a1e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+@@ -430,7 +430,7 @@ static struct stream_encoder *dce112_stream_encoder_create(
+
+ if (!enc110)
+ return NULL;
+-
++
+ dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4362-drm-amd-display-Add-VG12-ASIC-IDs.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4362-drm-amd-display-Add-VG12-ASIC-IDs.patch
new file mode 100644
index 00000000..890a9482
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4362-drm-amd-display-Add-VG12-ASIC-IDs.patch
@@ -0,0 +1,35 @@
+From 602f61c760885ef0151f7b99618d01d2368e37b5 Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Fri, 20 Apr 2018 11:05:07 -0400
+Subject: [PATCH 4362/5725] drm/amd/display: Add VG12 ASIC IDs
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/include/dal_asic_id.h | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+index 3e8e535..1b987b6 100644
+--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
++++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+@@ -120,9 +120,14 @@
+
+ #define AI_GREENLAND_P_A0 1
+ #define AI_GREENLAND_P_A1 2
++#define AI_UNKNOWN 0xFF
+
+-#define ASICREV_IS_GREENLAND_M(eChipRev) (eChipRev < AI_UNKNOWN)
+-#define ASICREV_IS_GREENLAND_P(eChipRev) (eChipRev < AI_UNKNOWN)
++#define AI_VEGA12_P_A0 20
++#define ASICREV_IS_GREENLAND_M(eChipRev) (eChipRev < AI_VEGA12_P_A0)
++#define ASICREV_IS_GREENLAND_P(eChipRev) (eChipRev < AI_VEGA12_P_A0)
++
++#define ASICREV_IS_VEGA12_P(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_UNKNOWN))
++#define ASICREV_IS_VEGA12_p(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_UNKNOWN))
+
+ /* DCN1_0 */
+ #define INTERNAL_REV_RAVEN_A0 0x00 /* First spin of Raven */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4363-drm-amd-Add-BIOS-smu_info-v3_3-required-struct-def.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4363-drm-amd-Add-BIOS-smu_info-v3_3-required-struct-def.patch
new file mode 100644
index 00000000..d2f16ca8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4363-drm-amd-Add-BIOS-smu_info-v3_3-required-struct-def.patch
@@ -0,0 +1,225 @@
+From b3cef6f7917e7e9d0452368db8c18b874835ce16 Mon Sep 17 00:00:00 2001
+From: "Jerry (Fangzhi) Zuo" <Jerry.Zuo@amd.com>
+Date: Mon, 5 Mar 2018 14:59:57 -0500
+Subject: [PATCH 4363/5725] drm/amd: Add BIOS smu_info v3_3 required struct
+ def.
+
+Signed-off-by: Jerry (Fangzhi) Zuo <Jerry.Zuo@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/include/atomfirmware.h | 170 ++++++++++++++++++++++++++++-
+ 1 file changed, 168 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
+index 0f5ad54..de177ce 100644
+--- a/drivers/gpu/drm/amd/include/atomfirmware.h
++++ b/drivers/gpu/drm/amd/include/atomfirmware.h
+@@ -501,6 +501,32 @@ enum atom_cooling_solution_id{
+ LIQUID_COOLING = 0x01
+ };
+
++struct atom_firmware_info_v3_2 {
++ struct atom_common_table_header table_header;
++ uint32_t firmware_revision;
++ uint32_t bootup_sclk_in10khz;
++ uint32_t bootup_mclk_in10khz;
++ uint32_t firmware_capability; // enum atombios_firmware_capability
++ uint32_t main_call_parser_entry; /* direct address of main parser call in VBIOS binary. */
++ uint32_t bios_scratch_reg_startaddr; // 1st bios scratch register dword address
++ uint16_t bootup_vddc_mv;
++ uint16_t bootup_vddci_mv;
++ uint16_t bootup_mvddc_mv;
++ uint16_t bootup_vddgfx_mv;
++ uint8_t mem_module_id;
++ uint8_t coolingsolution_id; /*0: Air cooling; 1: Liquid cooling ... */
++ uint8_t reserved1[2];
++ uint32_t mc_baseaddr_high;
++ uint32_t mc_baseaddr_low;
++ uint8_t board_i2c_feature_id; // enum of atom_board_i2c_feature_id_def
++ uint8_t board_i2c_feature_gpio_id; // i2c id find in gpio_lut data table gpio_id
++ uint8_t board_i2c_feature_slave_addr;
++ uint8_t reserved3;
++ uint16_t bootup_mvddq_mv;
++ uint16_t bootup_mvpp_mv;
++ uint32_t zfbstartaddrin16mb;
++ uint32_t reserved2[3];
++};
+
+ /*
+ ***************************************************************************
+@@ -1169,7 +1195,29 @@ struct atom_gfx_info_v2_2
+ uint32_t rlc_gpu_timer_refclk;
+ };
+
+-
++struct atom_gfx_info_v2_3 {
++ struct atom_common_table_header table_header;
++ uint8_t gfxip_min_ver;
++ uint8_t gfxip_max_ver;
++ uint8_t max_shader_engines;
++ uint8_t max_tile_pipes;
++ uint8_t max_cu_per_sh;
++ uint8_t max_sh_per_se;
++ uint8_t max_backends_per_se;
++ uint8_t max_texture_channel_caches;
++ uint32_t regaddr_cp_dma_src_addr;
++ uint32_t regaddr_cp_dma_src_addr_hi;
++ uint32_t regaddr_cp_dma_dst_addr;
++ uint32_t regaddr_cp_dma_dst_addr_hi;
++ uint32_t regaddr_cp_dma_command;
++ uint32_t regaddr_cp_status;
++ uint32_t regaddr_rlc_gpu_clock_32;
++ uint32_t rlc_gpu_timer_refclk;
++ uint8_t active_cu_per_sh;
++ uint8_t active_rb_per_se;
++ uint16_t gcgoldenoffset;
++ uint32_t rm21_sram_vmin_value;
++};
+
+ /*
+ ***************************************************************************
+@@ -1198,6 +1246,76 @@ struct atom_smu_info_v3_1
+ uint8_t fw_ctf_polarity; // GPIO polarity for CTF
+ };
+
++struct atom_smu_info_v3_2 {
++ struct atom_common_table_header table_header;
++ uint8_t smuip_min_ver;
++ uint8_t smuip_max_ver;
++ uint8_t smu_rsd1;
++ uint8_t gpuclk_ss_mode;
++ uint16_t sclk_ss_percentage;
++ uint16_t sclk_ss_rate_10hz;
++ uint16_t gpuclk_ss_percentage; // in unit of 0.001%
++ uint16_t gpuclk_ss_rate_10hz;
++ uint32_t core_refclk_10khz;
++ uint8_t ac_dc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for AC/DC switching, =0xff means invalid
++ uint8_t ac_dc_polarity; // GPIO polarity for AC/DC switching
++ uint8_t vr0hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR0 HOT event, =0xff means invalid
++ uint8_t vr0hot_polarity; // GPIO polarity for VR0 HOT event
++ uint8_t vr1hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR1 HOT event , =0xff means invalid
++ uint8_t vr1hot_polarity; // GPIO polarity for VR1 HOT event
++ uint8_t fw_ctf_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for CTF, =0xff means invalid
++ uint8_t fw_ctf_polarity; // GPIO polarity for CTF
++ uint8_t pcc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for PCC, =0xff means invalid
++ uint8_t pcc_gpio_polarity; // GPIO polarity for CTF
++ uint16_t smugoldenoffset;
++ uint32_t gpupll_vco_freq_10khz;
++ uint32_t bootup_smnclk_10khz;
++ uint32_t bootup_socclk_10khz;
++ uint32_t bootup_mp0clk_10khz;
++ uint32_t bootup_mp1clk_10khz;
++ uint32_t bootup_lclk_10khz;
++ uint32_t bootup_dcefclk_10khz;
++ uint32_t ctf_threshold_override_value;
++ uint32_t reserved[5];
++};
++
++struct atom_smu_info_v3_3 {
++ struct atom_common_table_header table_header;
++ uint8_t smuip_min_ver;
++ uint8_t smuip_max_ver;
++ uint8_t smu_rsd1;
++ uint8_t gpuclk_ss_mode;
++ uint16_t sclk_ss_percentage;
++ uint16_t sclk_ss_rate_10hz;
++ uint16_t gpuclk_ss_percentage; // in unit of 0.001%
++ uint16_t gpuclk_ss_rate_10hz;
++ uint32_t core_refclk_10khz;
++ uint8_t ac_dc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for AC/DC switching, =0xff means invalid
++ uint8_t ac_dc_polarity; // GPIO polarity for AC/DC switching
++ uint8_t vr0hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR0 HOT event, =0xff means invalid
++ uint8_t vr0hot_polarity; // GPIO polarity for VR0 HOT event
++ uint8_t vr1hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR1 HOT event , =0xff means invalid
++ uint8_t vr1hot_polarity; // GPIO polarity for VR1 HOT event
++ uint8_t fw_ctf_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for CTF, =0xff means invalid
++ uint8_t fw_ctf_polarity; // GPIO polarity for CTF
++ uint8_t pcc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for PCC, =0xff means invalid
++ uint8_t pcc_gpio_polarity; // GPIO polarity for CTF
++ uint16_t smugoldenoffset;
++ uint32_t gpupll_vco_freq_10khz;
++ uint32_t bootup_smnclk_10khz;
++ uint32_t bootup_socclk_10khz;
++ uint32_t bootup_mp0clk_10khz;
++ uint32_t bootup_mp1clk_10khz;
++ uint32_t bootup_lclk_10khz;
++ uint32_t bootup_dcefclk_10khz;
++ uint32_t ctf_threshold_override_value;
++ uint32_t syspll3_0_vco_freq_10khz;
++ uint32_t syspll3_1_vco_freq_10khz;
++ uint32_t bootup_fclk_10khz;
++ uint32_t bootup_waflclk_10khz;
++ uint32_t reserved[3];
++};
++
+ /*
+ ***************************************************************************
+ Data Table smc_dpm_info structure
+@@ -1283,7 +1401,6 @@ struct atom_smc_dpm_info_v4_1
+ uint32_t boardreserved[10];
+ };
+
+-
+ /*
+ ***************************************************************************
+ Data Table asic_profiling_info structure
+@@ -1864,6 +1981,55 @@ enum atom_smu9_syspll0_clock_id
+ SMU9_SYSPLL0_DISPCLK_ID = 11, // DISPCLK
+ };
+
++enum atom_smu11_syspll_id {
++ SMU11_SYSPLL0_ID = 0,
++ SMU11_SYSPLL1_0_ID = 1,
++ SMU11_SYSPLL1_1_ID = 2,
++ SMU11_SYSPLL1_2_ID = 3,
++ SMU11_SYSPLL2_ID = 4,
++ SMU11_SYSPLL3_0_ID = 5,
++ SMU11_SYSPLL3_1_ID = 6,
++};
++
++
++enum atom_smu11_syspll0_clock_id {
++ SMU11_SYSPLL0_SOCCLK_ID = 0, // SOCCLK
++ SMU11_SYSPLL0_MP0CLK_ID = 1, // MP0CLK
++ SMU11_SYSPLL0_DCLK_ID = 2, // DCLK
++ SMU11_SYSPLL0_VCLK_ID = 3, // VCLK
++ SMU11_SYSPLL0_ECLK_ID = 4, // ECLK
++ SMU11_SYSPLL0_DCEFCLK_ID = 5, // DCEFCLK
++};
++
++
++enum atom_smu11_syspll1_0_clock_id {
++ SMU11_SYSPLL1_0_UCLKA_ID = 0, // UCLK_a
++};
++
++enum atom_smu11_syspll1_1_clock_id {
++ SMU11_SYSPLL1_0_UCLKB_ID = 0, // UCLK_b
++};
++
++enum atom_smu11_syspll1_2_clock_id {
++ SMU11_SYSPLL1_0_FCLK_ID = 0, // FCLK
++};
++
++enum atom_smu11_syspll2_clock_id {
++ SMU11_SYSPLL2_GFXCLK_ID = 0, // GFXCLK
++};
++
++enum atom_smu11_syspll3_0_clock_id {
++ SMU11_SYSPLL3_0_WAFCLK_ID = 0, // WAFCLK
++ SMU11_SYSPLL3_0_DISPCLK_ID = 1, // DISPCLK
++ SMU11_SYSPLL3_0_DPREFCLK_ID = 2, // DPREFCLK
++};
++
++enum atom_smu11_syspll3_1_clock_id {
++ SMU11_SYSPLL3_1_MP1CLK_ID = 0, // MP1CLK
++ SMU11_SYSPLL3_1_SMNCLK_ID = 1, // SMNCLK
++ SMU11_SYSPLL3_1_LCLK_ID = 2, // LCLK
++};
++
+ struct atom_get_smu_clock_info_output_parameters_v3_1
+ {
+ union {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4364-drm-amd-display-Add-get_firmware_info_v3_2-for-VG12.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4364-drm-amd-display-Add-get_firmware_info_v3_2-for-VG12.patch
new file mode 100644
index 00000000..5f4f325e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4364-drm-amd-display-Add-get_firmware_info_v3_2-for-VG12.patch
@@ -0,0 +1,128 @@
+From 7686f1f3937593e56f1a407f7f2270083a44d529 Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Fri, 20 Apr 2018 10:56:18 -0400
+Subject: [PATCH 4364/5725] drm/amd/display: Add get_firmware_info_v3_2 for
+ VG12
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | 86 +++++++++++++++++++++-
+ 1 file changed, 85 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+index 985fe8c..10a5807 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+@@ -70,6 +70,10 @@ static enum bp_result get_firmware_info_v3_1(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info);
+
++static enum bp_result get_firmware_info_v3_2(
++ struct bios_parser *bp,
++ struct dc_firmware_info *info);
++
+ static struct atom_hpd_int_record *get_hpd_record(struct bios_parser *bp,
+ struct atom_display_object_path_v2 *object);
+
+@@ -1321,9 +1325,11 @@ static enum bp_result bios_parser_get_firmware_info(
+ case 3:
+ switch (revision.minor) {
+ case 1:
+- case 2:
+ result = get_firmware_info_v3_1(bp, info);
+ break;
++ case 2:
++ result = get_firmware_info_v3_2(bp, info);
++ break;
+ default:
+ break;
+ }
+@@ -1383,6 +1389,84 @@ static enum bp_result get_firmware_info_v3_1(
+ return BP_RESULT_OK;
+ }
+
++static enum bp_result get_firmware_info_v3_2(
++ struct bios_parser *bp,
++ struct dc_firmware_info *info)
++{
++ struct atom_firmware_info_v3_2 *firmware_info;
++ struct atom_display_controller_info_v4_1 *dce_info = NULL;
++ struct atom_common_table_header *header;
++ struct atom_data_revision revision;
++ struct atom_smu_info_v3_2 *smu_info_v3_2 = NULL;
++ struct atom_smu_info_v3_3 *smu_info_v3_3 = NULL;
++
++ if (!info)
++ return BP_RESULT_BADINPUT;
++
++ firmware_info = GET_IMAGE(struct atom_firmware_info_v3_2,
++ DATA_TABLES(firmwareinfo));
++
++ dce_info = GET_IMAGE(struct atom_display_controller_info_v4_1,
++ DATA_TABLES(dce_info));
++
++ if (!firmware_info || !dce_info)
++ return BP_RESULT_BADBIOSTABLE;
++
++ memset(info, 0, sizeof(*info));
++
++ header = GET_IMAGE(struct atom_common_table_header,
++ DATA_TABLES(smu_info));
++ get_atom_data_table_revision(header, &revision);
++
++ if (revision.minor == 2) {
++ /* Vega12 */
++ smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2,
++ DATA_TABLES(smu_info));
++
++ if (!smu_info_v3_2)
++ return BP_RESULT_BADBIOSTABLE;
++
++ info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10;
++ } else if (revision.minor == 3) {
++ /* Vega20 */
++ smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3,
++ DATA_TABLES(smu_info));
++
++ if (!smu_info_v3_3)
++ return BP_RESULT_BADBIOSTABLE;
++
++ info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10;
++ }
++
++ // We need to convert from 10KHz units into KHz units.
++ info->default_memory_clk = firmware_info->bootup_mclk_in10khz * 10;
++
++ /* 27MHz for Vega10 & Vega12; 100MHz for Vega20 */
++ info->pll_info.crystal_frequency = dce_info->dce_refclk_10khz * 10;
++ /* Hardcode frequency if BIOS gives no DCE Ref Clk */
++ if (info->pll_info.crystal_frequency == 0) {
++ if (revision.minor == 2)
++ info->pll_info.crystal_frequency = 27000;
++ else if (revision.minor == 3)
++ info->pll_info.crystal_frequency = 100000;
++ }
++ /*dp_phy_ref_clk is not correct for atom_display_controller_info_v4_2, but we don't use it*/
++ info->dp_phy_ref_clk = dce_info->dpphy_refclk_10khz * 10;
++ info->i2c_engine_ref_clk = dce_info->i2c_engine_refclk_10khz * 10;
++
++ /* Get GPU PLL VCO Clock */
++ if (bp->cmd_tbl.get_smu_clock_info != NULL) {
++ if (revision.minor == 2)
++ info->smu_gpu_pll_output_freq =
++ bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10;
++ else if (revision.minor == 3)
++ info->smu_gpu_pll_output_freq =
++ bp->cmd_tbl.get_smu_clock_info(bp, SMU11_SYSPLL3_0_ID) * 10;
++ }
++
++ return BP_RESULT_OK;
++}
++
+ static enum bp_result bios_parser_get_encoder_cap_info(
+ struct dc_bios *dcb,
+ struct graphics_object_id object_id,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4365-drm-amd-display-Don-t-return-ddc-result-and-read_byt.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4365-drm-amd-display-Don-t-return-ddc-result-and-read_byt.patch
new file mode 100644
index 00000000..d3f95179
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4365-drm-amd-display-Don-t-return-ddc-result-and-read_byt.patch
@@ -0,0 +1,141 @@
+From a1cae4e31f769e95aaafb9b98357a70d499fa362 Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Tue, 24 Apr 2018 10:49:20 -0400
+Subject: [PATCH 4365/5725] drm/amd/display: Don't return ddc result and
+ read_bytes in same return value
+
+The two ranges overlap.
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 20 ++++++++++++--------
+ drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c | 10 +++++++---
+ drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h | 5 +++--
+ 3 files changed, 22 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index e3110d6..559bd2a 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -82,21 +82,22 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
+ enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
+ I2C_MOT_TRUE : I2C_MOT_FALSE;
+ enum ddc_result res;
+- ssize_t read_bytes;
++ uint32_t read_bytes = msg->size;
+
+ if (WARN_ON(msg->size > 16))
+ return -E2BIG;
+
+ switch (msg->request & ~DP_AUX_I2C_MOT) {
+ case DP_AUX_NATIVE_READ:
+- read_bytes = dal_ddc_service_read_dpcd_data(
++ res = dal_ddc_service_read_dpcd_data(
+ TO_DM_AUX(aux)->ddc_service,
+ false,
+ I2C_MOT_UNDEF,
+ msg->address,
+ msg->buffer,
+- msg->size);
+- return read_bytes;
++ msg->size,
++ &read_bytes);
++ break;
+ case DP_AUX_NATIVE_WRITE:
+ res = dal_ddc_service_write_dpcd_data(
+ TO_DM_AUX(aux)->ddc_service,
+@@ -107,14 +108,15 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
+ msg->size);
+ break;
+ case DP_AUX_I2C_READ:
+- read_bytes = dal_ddc_service_read_dpcd_data(
++ res = dal_ddc_service_read_dpcd_data(
+ TO_DM_AUX(aux)->ddc_service,
+ true,
+ mot,
+ msg->address,
+ msg->buffer,
+- msg->size);
+- return read_bytes;
++ msg->size,
++ &read_bytes);
++ break;
+ case DP_AUX_I2C_WRITE:
+ res = dal_ddc_service_write_dpcd_data(
+ TO_DM_AUX(aux)->ddc_service,
+@@ -136,7 +138,9 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
+ r == DDC_RESULT_SUCESSFULL);
+ #endif
+
+- return msg->size;
++ if (res != DDC_RESULT_SUCESSFULL)
++ return -EIO;
++ return read_bytes;
+ }
+
+ static enum drm_connector_status
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+index 49c2fac..ae48d60 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+@@ -629,13 +629,14 @@ bool dal_ddc_service_query_ddc_data(
+ return ret;
+ }
+
+-ssize_t dal_ddc_service_read_dpcd_data(
++enum ddc_result dal_ddc_service_read_dpcd_data(
+ struct ddc_service *ddc,
+ bool i2c,
+ enum i2c_mot_mode mot,
+ uint32_t address,
+ uint8_t *data,
+- uint32_t len)
++ uint32_t len,
++ uint32_t *read)
+ {
+ struct aux_payload read_payload = {
+ .i2c_over_aux = i2c,
+@@ -652,6 +653,8 @@ ssize_t dal_ddc_service_read_dpcd_data(
+ .mot = mot
+ };
+
++ *read = 0;
++
+ if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
+ BREAK_TO_DEBUGGER();
+ return DDC_RESULT_FAILED_INVALID_OPERATION;
+@@ -661,7 +664,8 @@ ssize_t dal_ddc_service_read_dpcd_data(
+ ddc->ctx->i2caux,
+ ddc->ddc_pin,
+ &command)) {
+- return (ssize_t)command.payloads->length;
++ *read = command.payloads->length;
++ return DDC_RESULT_SUCESSFULL;
+ }
+
+ return DDC_RESULT_FAILED_OPERATION;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+index 090b7a8..30b3a08 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+@@ -102,13 +102,14 @@ bool dal_ddc_service_query_ddc_data(
+ uint8_t *read_buf,
+ uint32_t read_size);
+
+-ssize_t dal_ddc_service_read_dpcd_data(
++enum ddc_result dal_ddc_service_read_dpcd_data(
+ struct ddc_service *ddc,
+ bool i2c,
+ enum i2c_mot_mode mot,
+ uint32_t address,
+ uint8_t *data,
+- uint32_t len);
++ uint32_t len,
++ uint32_t *read);
+
+ enum ddc_result dal_ddc_service_write_dpcd_data(
+ struct ddc_service *ddc,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4366-drm-amd-display-Use-kvzalloc-for-potentially-large-a.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4366-drm-amd-display-Use-kvzalloc-for-potentially-large-a.patch
new file mode 100644
index 00000000..e2409d19
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4366-drm-amd-display-Use-kvzalloc-for-potentially-large-a.patch
@@ -0,0 +1,250 @@
+From b17e81a56cb5dcef910104604238eaeb8aedc114 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
+Date: Tue, 17 Apr 2018 12:25:22 +0200
+Subject: [PATCH 4366/5725] drm/amd/display: Use kvzalloc for potentially large
+ allocations
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Allocating up to 32 physically contiguous pages can easily fail (and has
+failed for me), and isn't necessary anyway.
+
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_surface.c | 14 ++---
+ .../drm/amd/display/modules/color/color_gamma.c | 72 ++++++++++++----------
+ 2 files changed, 45 insertions(+), 41 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+index 9593877..68a71ad 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+@@ -72,8 +72,8 @@ struct dc_plane_state *dc_create_plane_state(struct dc *dc)
+ {
+ struct dc *core_dc = dc;
+
+- struct dc_plane_state *plane_state = kzalloc(sizeof(*plane_state),
+- GFP_KERNEL);
++ struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state),
++ GFP_KERNEL);
+
+ if (NULL == plane_state)
+ return NULL;
+@@ -126,7 +126,7 @@ static void dc_plane_state_free(struct kref *kref)
+ {
+ struct dc_plane_state *plane_state = container_of(kref, struct dc_plane_state, refcount);
+ destruct(plane_state);
+- kfree(plane_state);
++ kvfree(plane_state);
+ }
+
+ void dc_plane_state_release(struct dc_plane_state *plane_state)
+@@ -142,7 +142,7 @@ void dc_gamma_retain(struct dc_gamma *gamma)
+ static void dc_gamma_free(struct kref *kref)
+ {
+ struct dc_gamma *gamma = container_of(kref, struct dc_gamma, refcount);
+- kfree(gamma);
++ kvfree(gamma);
+ }
+
+ void dc_gamma_release(struct dc_gamma **gamma)
+@@ -153,7 +153,7 @@ void dc_gamma_release(struct dc_gamma **gamma)
+
+ struct dc_gamma *dc_create_gamma(void)
+ {
+- struct dc_gamma *gamma = kzalloc(sizeof(*gamma), GFP_KERNEL);
++ struct dc_gamma *gamma = kvzalloc(sizeof(*gamma), GFP_KERNEL);
+
+ if (gamma == NULL)
+ goto alloc_fail;
+@@ -173,7 +173,7 @@ void dc_transfer_func_retain(struct dc_transfer_func *tf)
+ static void dc_transfer_func_free(struct kref *kref)
+ {
+ struct dc_transfer_func *tf = container_of(kref, struct dc_transfer_func, refcount);
+- kfree(tf);
++ kvfree(tf);
+ }
+
+ void dc_transfer_func_release(struct dc_transfer_func *tf)
+@@ -183,7 +183,7 @@ void dc_transfer_func_release(struct dc_transfer_func *tf)
+
+ struct dc_transfer_func *dc_create_transfer_func()
+ {
+- struct dc_transfer_func *tf = kzalloc(sizeof(*tf), GFP_KERNEL);
++ struct dc_transfer_func *tf = kvzalloc(sizeof(*tf), GFP_KERNEL);
+
+ if (tf == NULL)
+ goto alloc_fail;
+diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+index ad0ff50..15e5b72 100644
+--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+@@ -1274,19 +1274,19 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
+
+ output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+
+- rgb_user = kzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
+- GFP_KERNEL);
++ rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
++ GFP_KERNEL);
+ if (!rgb_user)
+ goto rgb_user_alloc_fail;
+- rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS),
+- GFP_KERNEL);
++ rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS),
++ GFP_KERNEL);
+ if (!rgb_regamma)
+ goto rgb_regamma_alloc_fail;
+- axix_x = kzalloc(sizeof(*axix_x) * (ramp->num_entries + 3),
+- GFP_KERNEL);
++ axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + 3),
++ GFP_KERNEL);
+ if (!axix_x)
+ goto axix_x_alloc_fail;
+- coeff = kzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
++ coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
+ if (!coeff)
+ goto coeff_alloc_fail;
+
+@@ -1338,13 +1338,13 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
+
+ ret = true;
+
+- kfree(coeff);
++ kvfree(coeff);
+ coeff_alloc_fail:
+- kfree(axix_x);
++ kvfree(axix_x);
+ axix_x_alloc_fail:
+- kfree(rgb_regamma);
++ kvfree(rgb_regamma);
+ rgb_regamma_alloc_fail:
+- kfree(rgb_user);
++ kvfree(rgb_user);
+ rgb_user_alloc_fail:
+ return ret;
+ }
+@@ -1480,19 +1480,19 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
+
+ input_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+
+- rgb_user = kzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
+- GFP_KERNEL);
++ rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
++ GFP_KERNEL);
+ if (!rgb_user)
+ goto rgb_user_alloc_fail;
+- curve = kzalloc(sizeof(*curve) * (MAX_HW_POINTS + _EXTRA_POINTS),
+- GFP_KERNEL);
++ curve = kvzalloc(sizeof(*curve) * (MAX_HW_POINTS + _EXTRA_POINTS),
++ GFP_KERNEL);
+ if (!curve)
+ goto curve_alloc_fail;
+- axix_x = kzalloc(sizeof(*axix_x) * (ramp->num_entries + _EXTRA_POINTS),
+- GFP_KERNEL);
++ axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + _EXTRA_POINTS),
++ GFP_KERNEL);
+ if (!axix_x)
+ goto axix_x_alloc_fail;
+- coeff = kzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
++ coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
+ if (!coeff)
+ goto coeff_alloc_fail;
+
+@@ -1534,13 +1534,13 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
+
+ ret = true;
+
+- kfree(coeff);
++ kvfree(coeff);
+ coeff_alloc_fail:
+- kfree(axix_x);
++ kvfree(axix_x);
+ axix_x_alloc_fail:
+- kfree(curve);
++ kvfree(curve);
+ curve_alloc_fail:
+- kfree(rgb_user);
++ kvfree(rgb_user);
+ rgb_user_alloc_fail:
+
+ return ret;
+@@ -1569,8 +1569,9 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
+ }
+ ret = true;
+ } else if (trans == TRANSFER_FUNCTION_PQ) {
+- rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS +
+- _EXTRA_POINTS), GFP_KERNEL);
++ rgb_regamma = kvzalloc(sizeof(*rgb_regamma) *
++ (MAX_HW_POINTS + _EXTRA_POINTS),
++ GFP_KERNEL);
+ if (!rgb_regamma)
+ goto rgb_regamma_alloc_fail;
+ points->end_exponent = 7;
+@@ -1590,11 +1591,12 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
+ }
+ ret = true;
+
+- kfree(rgb_regamma);
++ kvfree(rgb_regamma);
+ } else if (trans == TRANSFER_FUNCTION_SRGB ||
+ trans == TRANSFER_FUNCTION_BT709) {
+- rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS +
+- _EXTRA_POINTS), GFP_KERNEL);
++ rgb_regamma = kvzalloc(sizeof(*rgb_regamma) *
++ (MAX_HW_POINTS + _EXTRA_POINTS),
++ GFP_KERNEL);
+ if (!rgb_regamma)
+ goto rgb_regamma_alloc_fail;
+ points->end_exponent = 0;
+@@ -1612,7 +1614,7 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
+ }
+ ret = true;
+
+- kfree(rgb_regamma);
++ kvfree(rgb_regamma);
+ }
+ rgb_regamma_alloc_fail:
+ return ret;
+@@ -1636,8 +1638,9 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
+ }
+ ret = true;
+ } else if (trans == TRANSFER_FUNCTION_PQ) {
+- rgb_degamma = kzalloc(sizeof(*rgb_degamma) * (MAX_HW_POINTS +
+- _EXTRA_POINTS), GFP_KERNEL);
++ rgb_degamma = kvzalloc(sizeof(*rgb_degamma) *
++ (MAX_HW_POINTS + _EXTRA_POINTS),
++ GFP_KERNEL);
+ if (!rgb_degamma)
+ goto rgb_degamma_alloc_fail;
+
+@@ -1652,11 +1655,12 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
+ }
+ ret = true;
+
+- kfree(rgb_degamma);
++ kvfree(rgb_degamma);
+ } else if (trans == TRANSFER_FUNCTION_SRGB ||
+ trans == TRANSFER_FUNCTION_BT709) {
+- rgb_degamma = kzalloc(sizeof(*rgb_degamma) * (MAX_HW_POINTS +
+- _EXTRA_POINTS), GFP_KERNEL);
++ rgb_degamma = kvzalloc(sizeof(*rgb_degamma) *
++ (MAX_HW_POINTS + _EXTRA_POINTS),
++ GFP_KERNEL);
+ if (!rgb_degamma)
+ goto rgb_degamma_alloc_fail;
+
+@@ -1670,7 +1674,7 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
+ }
+ ret = true;
+
+- kfree(rgb_degamma);
++ kvfree(rgb_degamma);
+ }
+ points->end_exponent = 0;
+ points->x_point_at_y1_red = 1;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4367-drm-amd-display-disable-FBC-on-underlay-pipe.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4367-drm-amd-display-disable-FBC-on-underlay-pipe.patch
new file mode 100644
index 00000000..b7ce5007
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4367-drm-amd-display-disable-FBC-on-underlay-pipe.patch
@@ -0,0 +1,45 @@
+From d483aef9196e297eb956aeb8e62946ccf70a0458 Mon Sep 17 00:00:00 2001
+From: Shirish S <shirish.s@amd.com>
+Date: Wed, 25 Apr 2018 14:42:28 +0530
+Subject: [PATCH 4367/5725] drm/amd/display: disable FBC on underlay pipe
+
+FBC is not applicable for the underlay pipe,
+hence disallow enabling and disabling of the same.
+
+This also fixes the BUG hit of calling sleep in
+atomic context.
+
+Signed-off-by: Shirish S <shirish.s@amd.com>
+Reviewed-by: Roman Li <Roman.Li@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 6a0ae02..45578d4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -2764,6 +2764,9 @@ static void dce110_program_front_end_for_pipe(
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct xfm_grph_csc_adjustment adjust;
+ struct out_csc_color_matrix tbl_entry;
++#if defined(CONFIG_DRM_AMD_DC_FBC)
++ unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
++#endif
+ unsigned int i;
+ DC_LOGGER_INIT();
+ memset(&tbl_entry, 0, sizeof(tbl_entry));
+@@ -2805,7 +2808,9 @@ static void dce110_program_front_end_for_pipe(
+ program_scaler(dc, pipe_ctx);
+
+ #if defined(CONFIG_DRM_AMD_DC_FBC)
+- if (dc->fbc_compressor && old_pipe->stream) {
++ /* fbc not applicable on Underlay pipe */
++ if (dc->fbc_compressor && old_pipe->stream &&
++ pipe_ctx->pipe_idx != underlay_idx) {
+ if (plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
+ dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
+ else
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4368-drm-amdgpu-Switch-to-interruptable-wait-to-recover-f.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4368-drm-amdgpu-Switch-to-interruptable-wait-to-recover-f.patch
new file mode 100644
index 00000000..d550a8d8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4368-drm-amdgpu-Switch-to-interruptable-wait-to-recover-f.patch
@@ -0,0 +1,47 @@
+From 15b840ee15fc8eb706ed9201b89afc12d6b9c614 Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Mon, 30 Apr 2018 10:04:42 -0400
+Subject: [PATCH 4368/5725] drm/amdgpu: Switch to interruptable wait to recover
+ from ring hang.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+v2:
+Use dma_fence_wait instead of dma_fence_wait_timeout(...,MAX_SCHEDULE_TIMEOUT)
+Avoid printing error message for ERESTARTSYS
+
+Originally-by: David Panariti <David.Panariti@amd.com>
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+
+Change-Id: I11ceb0cb9229dd3b6347cf89b8dec74b14ddf1da
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+index bdeec74..eb82bbf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+@@ -438,9 +438,11 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
+
+ if (other) {
+ signed long r;
+- r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
++ r = dma_fence_wait(other, true);
+ if (r < 0) {
+- DRM_ERROR("Error (%ld) waiting for fence!\n", r);
++ if (r != -ERESTARTSYS)
++ DRM_ERROR("Error (%ld) waiting for fence!\n", r);
++
+ return r;
+ }
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4369-drm-amd-amdgpu-Add-some-documentation-to-the-debugfs.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4369-drm-amd-amdgpu-Add-some-documentation-to-the-debugfs.patch
new file mode 100644
index 00000000..61a2ef0b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4369-drm-amd-amdgpu-Add-some-documentation-to-the-debugfs.patch
@@ -0,0 +1,32 @@
+From 1317e920ae7fd87909bd8d58caedd7176b656507 Mon Sep 17 00:00:00 2001
+From: Tom St Denis <tom.stdenis@amd.com>
+Date: Wed, 2 May 2018 13:01:36 -0400
+Subject: [PATCH 4369/5725] drm/amd/amdgpu: Add some documentation to the
+ debugfs entries
+
+Signed-off-by: Tom St Denis <tom.stdenis@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+
+Change-Id: Id1bdcd57048f62ca340e23facec1699de245129e
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+index cc19d6a..8a57c0c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+@@ -68,7 +68,6 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
+ }
+
+ #if defined(CONFIG_DEBUG_FS)
+-
+ /**
+ * amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes
+ *
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4370-drm-amdgpu-invalidate-parent-bo-when-shadow-bo-was-i.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4370-drm-amdgpu-invalidate-parent-bo-when-shadow-bo-was-i.patch
new file mode 100644
index 00000000..d8ef5a48
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4370-drm-amdgpu-invalidate-parent-bo-when-shadow-bo-was-i.patch
@@ -0,0 +1,42 @@
+From d5aeec65a69b4be760b53124c9027a7786d92203 Mon Sep 17 00:00:00 2001
+From: Chunming Zhou <david1.zhou@amd.com>
+Date: Tue, 24 Apr 2018 13:54:10 +0800
+Subject: [PATCH 4370/5725] drm/amdgpu: invalidate parent bo when shadow bo was
+ invalidated
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Shadow BO is located on GTT and its parent (PT and PD) BO could located on VRAM.
+In some case, the BO on GTT could be evicted but the parent did not. This may
+cause the shadow BO not be put in the evict list and could not be invalidate
+correctly.
+v2: suggested by Christian
+
+Change-Id: Iad10d9a3031fa2b243879b9e58ee4d8c527eb433
+Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
+Reported-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index dd84ed4..336abd4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2282,6 +2282,10 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
+ {
+ struct amdgpu_vm_bo_base *bo_base;
+
++ /* shadow bo doesn't have bo base, its validation needs its parent */
++ if (bo->parent && bo->parent->shadow == bo)
++ bo = bo->parent;
++
+ list_for_each_entry(bo_base, &bo->va, bo_list) {
+ struct amdgpu_vm *vm = bo_base->vm;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4371-drm-amd-powerplay-fix-spelling-mistake-contruct-cons.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4371-drm-amd-powerplay-fix-spelling-mistake-contruct-cons.patch
new file mode 100644
index 00000000..4f52e5d5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4371-drm-amd-powerplay-fix-spelling-mistake-contruct-cons.patch
@@ -0,0 +1,44 @@
+From f4fb48618d13cd04a0350270c5b8d32e1d72fffa Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.king@canonical.com>
+Date: Sat, 28 Apr 2018 23:21:55 +0100
+Subject: [PATCH 4371/5725] drm/amd/powerplay: fix spelling mistake: "contruct"
+ -> "construct"
+
+Trivial fix to spelling mistake in PP_ASSERT_WITH_CODE message text
+
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 2 +-
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index 194d45a..3f0162d 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -1271,7 +1271,7 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+
+ tmp_result = smu7_construct_voltage_tables(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+- "Failed to contruct voltage tables!",
++ "Failed to construct voltage tables!",
+ result = tmp_result);
+ }
+ smum_initialize_mc_reg_table(hwmgr);
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+index 055c692..0ad2ca3 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+@@ -2860,7 +2860,7 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+
+ tmp_result = vega10_construct_voltage_tables(hwmgr);
+ PP_ASSERT_WITH_CODE(!tmp_result,
+- "Failed to contruct voltage tables!",
++ "Failed to construct voltage tables!",
+ result = tmp_result);
+
+ tmp_result = vega10_init_smc_table(hwmgr);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4372-drm-amd-display-clean-up-assignment-of-amdgpu_crtc.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4372-drm-amd-display-clean-up-assignment-of-amdgpu_crtc.patch
new file mode 100644
index 00000000..bfc0a802
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4372-drm-amd-display-clean-up-assignment-of-amdgpu_crtc.patch
@@ -0,0 +1,33 @@
+From cab6afb9f10ebe54445508755424b9193a03dc7b Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.king@canonical.com>
+Date: Wed, 2 May 2018 15:43:16 +0100
+Subject: [PATCH 4372/5725] drm/amd/display: clean up assignment of amdgpu_crtc
+
+The declaration of pointer amdgpu_crtc has a redundant assignment to
+amdgpu_crtc. Clean this up by removing it.
+
+Detected by CoverityScan, CID#1460299 ("Evaluation order violation")
+
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 11b9601..17d9675 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3931,7 +3931,7 @@ static void remove_stream(struct amdgpu_device *adev,
+ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct dc_cursor_position *position)
+ {
+- struct amdgpu_crtc *amdgpu_crtc = amdgpu_crtc = to_amdgpu_crtc(crtc);
++ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ int x, y;
+ int xorigin = 0, yorigin = 0;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4373-drm-ttm-remove-priority-hard-code-when-initializing-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4373-drm-ttm-remove-priority-hard-code-when-initializing-.patch
new file mode 100644
index 00000000..8290bafc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4373-drm-ttm-remove-priority-hard-code-when-initializing-.patch
@@ -0,0 +1,35 @@
+From 022f519b9a7ff93526a316db8ee56760625e0031 Mon Sep 17 00:00:00 2001
+From: Junwei Zhang <Jerry.Zhang@amd.com>
+Date: Fri, 11 May 2018 10:54:40 +0800
+Subject: [PATCH 4373/5725] drm/ttm: remove priority hard code when
+ initializing ttm bo
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Then priority could be set before initialization.
+By default, it requires to kzalloc ttm bo. In fact, we always do so.
+
+Change-Id: I5d159e899d9f51afc66231b320be8b6ccadc1a52
+Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: David Zhou <david1.zhou@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/ttm/ttm_bo.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index 7f0f16b..d741f18 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -1180,7 +1180,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
+ reservation_object_init(&bo->ttm_resv);
+ atomic_inc(&bo->bdev->glob->bo_count);
+ drm_vma_node_reset(&bo->vma_node);
+- bo->priority = 0;
+
+ /*
+ * For ttm_bo_type_device buffers, allocate
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4374-drm-amdgpu-set-ttm-bo-priority-before-initialization.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4374-drm-amdgpu-set-ttm-bo-priority-before-initialization.patch
new file mode 100644
index 00000000..3044a47c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4374-drm-amdgpu-set-ttm-bo-priority-before-initialization.patch
@@ -0,0 +1,46 @@
+From ebcff1f34fc75e1e4d7f01f11880a0c2d55ca654 Mon Sep 17 00:00:00 2001
+From: Junwei Zhang <Jerry.Zhang@amd.com>
+Date: Fri, 11 May 2018 11:02:23 +0800
+Subject: [PATCH 4374/5725] drm/amdgpu: set ttm bo priority before
+ initialization
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Change-Id: I98aff2c81c0211f7f9bc7c049e1fabba92bb5c53
+Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: David Zhou <david1.zhou@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 9d6c659..719423e 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -441,6 +441,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
+
+ bo->tbo.bdev = &adev->mman.bdev;
+ amdgpu_ttm_placement_from_domain(bo, bp->domain);
++ if (bp->type == ttm_bo_type_kernel)
++ bo->tbo.priority = 1;
+
+ r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
+ &bo->placement, page_align, &ctx, acc_size,
+@@ -460,9 +462,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
+ if (bp->domain & AMDGPU_GEM_DOMAIN_DGMA && adev->ssg.enabled)
+ bo->tbo.ssg_can_map = true;
+
+- if (bp->type == ttm_bo_type_kernel)
+- bo->tbo.priority = 1;
+-
+ if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
+ bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
+ struct dma_fence *fence;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4375-drm-amdgpu-gmc9-remove-unused-register-defs.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4375-drm-amdgpu-gmc9-remove-unused-register-defs.patch
new file mode 100644
index 00000000..cae60227
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4375-drm-amdgpu-gmc9-remove-unused-register-defs.patch
@@ -0,0 +1,45 @@
+From ca99f900301f56663ef331cf7aba6effa866f558 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 10 May 2018 15:10:14 -0500
+Subject: [PATCH 4375/5725] drm/amdgpu/gmc9: remove unused register defs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+These got moved to the new df module so no longer
+used in this file.
+
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 14 --------------
+ 1 file changed, 14 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 108e06f..35cea8d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -42,20 +42,6 @@
+ #include "gfxhub_v1_0.h"
+ #include "mmhub_v1_0.h"
+
+-#define mmDF_CS_AON0_DramBaseAddress0 0x0044
+-#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
+-//DF_CS_AON0_DramBaseAddress0
+-#define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
+-#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
+-#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
+-#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
+-#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
+-#define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
+-#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
+-#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
+-#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
+-#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
+-
+ /* add these here since we already include dce12 headers and these are for DCN */
+ #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
+ #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4376-drm-amdgpu-fix-null-pointer-for-bo-unmap-trace-funct.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4376-drm-amdgpu-fix-null-pointer-for-bo-unmap-trace-funct.patch
new file mode 100644
index 00000000..79a7c377
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4376-drm-amdgpu-fix-null-pointer-for-bo-unmap-trace-funct.patch
@@ -0,0 +1,32 @@
+From 39e9fa4b960848e21f3a6b59ae5edc6ff1af963b Mon Sep 17 00:00:00 2001
+From: Junwei Zhang <Jerry.Zhang@amd.com>
+Date: Fri, 11 May 2018 14:54:31 +0800
+Subject: [PATCH 4376/5725] drm/amdgpu: fix null pointer for bo unmap trace
+ function
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Change-Id: Ib1e901ec7f3c5449f23f7ec0e5ae9c50510180d8
+Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+index 532263a..e96e26d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+@@ -275,7 +275,7 @@ TRACE_EVENT(amdgpu_vm_bo_unmap,
+ ),
+
+ TP_fast_assign(
+- __entry->bo = bo_va->base.bo;
++ __entry->bo = bo_va ? bo_va->base.bo : NULL;
+ __entry->start = mapping->start;
+ __entry->last = mapping->last;
+ __entry->offset = mapping->offset;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4377-drm-amd-display-remove-need-of-modeset-flag-for-over.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4377-drm-amd-display-remove-need-of-modeset-flag-for-over.patch
new file mode 100644
index 00000000..604db2ac
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4377-drm-amd-display-remove-need-of-modeset-flag-for-over.patch
@@ -0,0 +1,63 @@
+From df4cd0a954c56baeaf26a80f135ad012e68212e8 Mon Sep 17 00:00:00 2001
+From: Shirish S <shirish.s@amd.com>
+Date: Fri, 27 Apr 2018 15:47:21 +0530
+Subject: [PATCH 4377/5725] drm/amd/display: remove need of modeset flag for
+ overlay planes (V2)
+
+This patch is in continuation to the
+"843e3c7 drm/amd/display: defer modeset check in dm_update_planes_state"
+where we started to eliminate the dependency on
+DRM_MODE_ATOMIC_ALLOW_MODESET to be set by the user space,
+which as such is not mandatory.
+
+After deferring, this patch eliminates the dependency on the flag
+for overlay planes.
+
+This has to be done in stages as its a pretty complex and requires thorough
+testing before we free primary planes as well from dependency on modeset
+flag.
+
+V2: Simplified the plane type check.
+
+Signed-off-by: Shirish S <shirish.s@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 17d9675..be3200c 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4291,7 +4291,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ }
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+- if (!pflip_needed) {
++ if (!pflip_needed || plane->type == DRM_PLANE_TYPE_OVERLAY) {
+ WARN_ON(!dm_new_plane_state->dc_state);
+
+ plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
+@@ -5001,7 +5001,8 @@ static int dm_update_planes_state(struct dc *dc,
+
+ /* Remove any changed/removed planes */
+ if (!enable) {
+- if (pflip_needed)
++ if (pflip_needed &&
++ plane->type != DRM_PLANE_TYPE_OVERLAY)
+ continue;
+
+ if (!old_plane_crtc)
+@@ -5048,7 +5049,8 @@ static int dm_update_planes_state(struct dc *dc,
+ if (!dm_new_crtc_state->stream)
+ continue;
+
+- if (pflip_needed)
++ if (pflip_needed &&
++ plane->type != DRM_PLANE_TYPE_OVERLAY)
+ continue;
+
+ WARN_ON(dm_new_plane_state->dc_state);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4378-drm-amdgpu-Add-support-to-change-mtype-for-2nd-part-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4378-drm-amdgpu-Add-support-to-change-mtype-for-2nd-part-.patch
new file mode 100644
index 00000000..846c3b0e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4378-drm-amdgpu-Add-support-to-change-mtype-for-2nd-part-.patch
@@ -0,0 +1,160 @@
+From 68a86a467ac670a4301ce893bcfbfeb20004a617 Mon Sep 17 00:00:00 2001
+From: Yong Zhao <yong.zhao@amd.com>
+Date: Mon, 14 May 2018 12:15:27 -0400
+Subject: [PATCH 4378/5725] drm/amdgpu: Add support to change mtype for 2nd
+ part of gart BOs on GFX9
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This change prepares for a workaround in amdkfd for a GFX9 HW bug. It
+requires the control stack memory of compute queues, which is allocated
+from the second page of MQD gart BOs, to have mtype NC, rather than
+the default UC.
+
+Change-Id: I5f91cbeb7e7ba380a7b9f272b1cee6e4c1840175
+Signed-off-by: Yong Zhao <yong.zhao@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 54 +++++++++++++++++++++++++++------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 5 +--
+ include/drm/amd_asic_type.h | 1 +
+ include/uapi/drm/amdgpu_drm.h | 6 ++--
+ 4 files changed, 52 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 0ddb4c3..1bd7997 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -879,6 +879,45 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
+ sg_free_table(ttm->sg);
+ }
+
++int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
++ struct ttm_buffer_object *tbo,
++ uint64_t flags)
++{
++ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
++ struct ttm_tt *ttm = tbo->ttm;
++ struct amdgpu_ttm_tt *gtt = (void *)ttm;
++ int r;
++
++ if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) {
++ uint64_t page_idx = 1;
++
++ r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
++ ttm->pages, gtt->ttm.dma_address, flags);
++ if (r)
++ goto gart_bind_fail;
++
++ /* Patch mtype of the second part BO */
++ flags &= ~AMDGPU_PTE_MTYPE_MASK;
++ flags |= AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_NC);
++
++ r = amdgpu_gart_bind(adev,
++ gtt->offset + (page_idx << PAGE_SHIFT),
++ ttm->num_pages - page_idx,
++ &ttm->pages[page_idx],
++ &(gtt->ttm.dma_address[page_idx]), flags);
++ } else {
++ r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
++ ttm->pages, gtt->ttm.dma_address, flags);
++ }
++
++gart_bind_fail:
++ if (r)
++ DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
++ ttm->num_pages, gtt->offset);
++
++ return r;
++}
++
+ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
+ struct ttm_mem_reg *bo_mem)
+ {
+@@ -953,8 +992,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
+
+ flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
+ gtt->offset = (u64)tmp.start << PAGE_SHIFT;
+- r = amdgpu_gart_bind(adev, gtt->offset, bo->ttm->num_pages,
+- bo->ttm->pages, gtt->ttm.dma_address, flags);
++ r = amdgpu_ttm_gart_bind(adev, bo, flags);
+ if (unlikely(r)) {
+ ttm_bo_mem_put(bo, &tmp);
+ return r;
+@@ -971,19 +1009,15 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
+ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
+ {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
+- struct amdgpu_ttm_tt *gtt = (void *)tbo->ttm;
+ uint64_t flags;
+ int r;
+
+- if (!gtt)
++ if (!tbo->ttm)
+ return 0;
+
+- flags = amdgpu_ttm_tt_pte_flags(adev, &gtt->ttm.ttm, &tbo->mem);
+- r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
+- gtt->ttm.ttm.pages, gtt->ttm.dma_address, flags);
+- if (r)
+- DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+- gtt->ttm.ttm.num_pages, gtt->offset);
++ flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
++ r = amdgpu_ttm_gart_bind(adev, tbo, flags);
++
+ return r;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+index beba1a5..84658b5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+@@ -78,11 +78,12 @@ struct amdgpu_bo_list_entry;
+ /* PDE Block Fragment Size for VEGA10 */
+ #define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59)
+
+-/* VEGA10 only */
++
++/* For GFX9 */
+ #define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57)
+ #define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL)
+
+-/* For Raven */
++#define AMDGPU_MTYPE_NC 0
+ #define AMDGPU_MTYPE_CC 2
+
+ #define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \
+diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h
+index 695bde7..dd63d08 100644
+--- a/include/drm/amd_asic_type.h
++++ b/include/drm/amd_asic_type.h
+@@ -47,6 +47,7 @@ enum amd_asic_type {
+ CHIP_VEGAM,
+ CHIP_VEGA10,
+ CHIP_VEGA12,
++ CHIP_VEGA20,
+ CHIP_RAVEN,
+ CHIP_LAST,
+ };
+diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
+index d04ef13..ad3e1f2 100644
+--- a/include/uapi/drm/amdgpu_drm.h
++++ b/include/uapi/drm/amdgpu_drm.h
+@@ -115,8 +115,10 @@ extern "C" {
+ #define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6)
+ /* Flag that BO sharing will be explicitly synchronized */
+ #define AMDGPU_GEM_CREATE_EXPLICIT_SYNC (1 << 7)
+-/* Flag that BO doesn't need fallback */
+-#define AMDGPU_GEM_CREATE_NO_FALLBACK (1 << 8)
++/* Flag that indicates allocating MQD gart on GFX9, where the mtype
++ * for the second page onward should be set to NC.
++*/
++#define AMDGPU_GEM_CREATE_MQD_GFX9 (1 << 8)
+
+ /* hybrid specific */
+ /* Flag that the memory allocation should be from top of domain */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4379-drm-amdgpu-drop-printing-the-BO-offset-in-the-gem-de.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4379-drm-amdgpu-drop-printing-the-BO-offset-in-the-gem-de.patch
new file mode 100644
index 00000000..0abc2aa9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4379-drm-amdgpu-drop-printing-the-BO-offset-in-the-gem-de.patch
@@ -0,0 +1,35 @@
+From b64ebf4c09c4088d8f28e4b9b9f94970171287d5 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 11 May 2018 23:13:39 +0800
+Subject: [PATCH 4379/5725] drm/amdgpu: drop printing the BO offset in the gem
+ debugfs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+It is meaningless anyway.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+index 3621ff0..22e1298 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+@@ -960,10 +960,6 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
+ seq_printf(m, "\t0x%08x: %12ld byte %s",
+ id, amdgpu_bo_size(bo), placement);
+
+- offset = READ_ONCE(bo->tbo.mem.start);
+- if (offset != AMDGPU_BO_INVALID_OFFSET)
+- seq_printf(m, " @ 0x%010Lx", offset);
+-
+ pin_count = READ_ONCE(bo->pin_count);
+ if (pin_count)
+ seq_printf(m, " pin count %d", pin_count);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4380-drm-amdgpu-print-the-BO-flags-in-the-gem-debugfs-ent.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4380-drm-amdgpu-print-the-BO-flags-in-the-gem-debugfs-ent.patch
new file mode 100644
index 00000000..661d84f4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4380-drm-amdgpu-print-the-BO-flags-in-the-gem-debugfs-ent.patch
@@ -0,0 +1,53 @@
+From 48e718bf83742b864896f982345c4a94ebaa9fb0 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 11 May 2018 23:14:29 +0800
+Subject: [PATCH 4380/5725] drm/amdgpu: print the BO flags in the gem debugfs
+ entry
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Quite useful to know.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+index 22e1298..b432b21 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+@@ -925,6 +925,12 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
+ }
+
+ #if defined(CONFIG_DEBUG_FS)
++
++#define amdgpu_debugfs_gem_bo_print_flag(m, bo, flag) \
++ if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \
++ seq_printf((m), " " #flag); \
++ }
++
+ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
+ {
+ struct drm_gem_object *gobj = ptr;
+@@ -972,6 +978,15 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
+ else if (dma_buf)
+ seq_printf(m, " exported as %p", dma_buf);
+
++ amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
++ amdgpu_debugfs_gem_bo_print_flag(m, bo, NO_CPU_ACCESS);
++ amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_GTT_USWC);
++ amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CLEARED);
++ amdgpu_debugfs_gem_bo_print_flag(m, bo, SHADOW);
++ amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
++ amdgpu_debugfs_gem_bo_print_flag(m, bo, VM_ALWAYS_VALID);
++ amdgpu_debugfs_gem_bo_print_flag(m, bo, EXPLICIT_SYNC);
++
+ seq_printf(m, "\n");
+
+ return 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4381-drm-amdgpu-gfx9-Update-golden-setting-for-gfx9_0.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4381-drm-amdgpu-gfx9-Update-golden-setting-for-gfx9_0.patch
new file mode 100644
index 00000000..b1ce2d1b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4381-drm-amdgpu-gfx9-Update-golden-setting-for-gfx9_0.patch
@@ -0,0 +1,56 @@
+From 6324ddc6661fc2ee3567fdbda98daf7e73ec054f Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Sat, 10 Mar 2018 05:15:18 +0800
+Subject: [PATCH 4381/5725] drm/amdgpu/gfx9: Update golden setting for gfx9_0.
+
+Update golden_settings_gc_9_0[].
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 17 +++++------------
+ 1 file changed, 5 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 9a3d62b..0b3d2e0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -72,29 +72,22 @@ MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
+
+ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
+ {
+- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
+- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
+- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
+- SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
+- SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
+- SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
+- SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
+- SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
+- SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
+- SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
+- SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
+- SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
+ };
+
+ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4382-drm-amd-powerplay-new-framework-to-honour-DAL-clock-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4382-drm-amd-powerplay-new-framework-to-honour-DAL-clock-.patch
new file mode 100644
index 00000000..72045dd7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4382-drm-amd-powerplay-new-framework-to-honour-DAL-clock-.patch
@@ -0,0 +1,87 @@
+From b22ad036507501190c9283aff6706bd9faa85daa Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Wed, 9 May 2018 10:57:53 +0800
+Subject: [PATCH 4382/5725] drm/amd/powerplay: new framework to honour DAL
+ clock limits
+
+This is needed for vega12 and vega20 which do not support legacy
+powerstate. With this new framework, the DAL clocks limits can also
+be honored on these asics.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c | 9 +++++++++
+ drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c | 7 +++++++
+ drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h | 2 ++
+ drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 2 ++
+ 4 files changed, 20 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+index e411012..f5571e9f 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+@@ -132,6 +132,15 @@ int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ return 0;
+ }
+
++int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr)
++{
++ PHM_FUNC_CHECK(hwmgr);
++
++ if (hwmgr->hwmgr_func->apply_clocks_adjust_rules != NULL)
++ return hwmgr->hwmgr_func->apply_clocks_adjust_rules(hwmgr);
++ return 0;
++}
++
+ int phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
+ {
+ PHM_FUNC_CHECK(hwmgr);
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+index 308bff2..2a2955c 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+@@ -265,6 +265,13 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
+ if (skip)
+ return 0;
+
++ if (!hwmgr->ps)
++ /*
++ * for vega12/vega20 which does not support power state manager
++ * DAL clock limits should also be honoured
++ */
++ phm_apply_clock_adjust_rules(hwmgr);
++
+ phm_display_configuration_changed(hwmgr);
+
+ if (hwmgr->ps)
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+index 9bb8785..e029555 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+@@ -410,6 +410,8 @@ extern int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ struct pp_power_state *adjusted_ps,
+ const struct pp_power_state *current_ps);
+
++extern int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr);
++
+ extern int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level);
+ extern int phm_display_configuration_changed(struct pp_hwmgr *hwmgr);
+ extern int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr);
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+index 3c321c7..9b6c6af 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+@@ -229,6 +229,8 @@ struct pp_hwmgr_func {
+ struct pp_power_state *prequest_ps,
+ const struct pp_power_state *pcurrent_ps);
+
++ int (*apply_clocks_adjust_rules)(struct pp_hwmgr *hwmgr);
++
+ int (*force_dpm_level)(struct pp_hwmgr *hw_mgr,
+ enum amd_dpm_forced_level level);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4383-drm-amd-powerplay-add-a-framework-for-perfroming-pre.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4383-drm-amd-powerplay-add-a-framework-for-perfroming-pre.patch
new file mode 100644
index 00000000..1602f304
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4383-drm-amd-powerplay-add-a-framework-for-perfroming-pre.patch
@@ -0,0 +1,77 @@
+From 65b30940581d3e841d8e34d987d2cc331bcbc79f Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Wed, 9 May 2018 11:08:29 +0800
+Subject: [PATCH 4383/5725] drm/amd/powerplay: add a framework for perfroming
+ pre display configuration change settings
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c | 10 ++++++++++
+ drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c | 2 ++
+ drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h | 1 +
+ drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 1 +
+ 4 files changed, 14 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+index f5571e9f..a0bb921 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+@@ -170,6 +170,16 @@ int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr)
+ return 0;
+ }
+
++int phm_pre_display_configuration_changed(struct pp_hwmgr *hwmgr)
++{
++ PHM_FUNC_CHECK(hwmgr);
++
++ if (NULL != hwmgr->hwmgr_func->pre_display_config_changed)
++ hwmgr->hwmgr_func->pre_display_config_changed(hwmgr);
++
++ return 0;
++
++}
+
+ int phm_display_configuration_changed(struct pp_hwmgr *hwmgr)
+ {
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+index 2a2955c..0af13c1 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+@@ -272,6 +272,8 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
+ */
+ phm_apply_clock_adjust_rules(hwmgr);
+
++ phm_pre_display_configuration_changed(hwmgr);
++
+ phm_display_configuration_changed(hwmgr);
+
+ if (hwmgr->ps)
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+index e029555..a202247 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+@@ -413,6 +413,7 @@ extern int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ extern int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr);
+
+ extern int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level);
++extern int phm_pre_display_configuration_changed(struct pp_hwmgr *hwmgr);
+ extern int phm_display_configuration_changed(struct pp_hwmgr *hwmgr);
+ extern int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr);
+ extern int phm_register_irq_handlers(struct pp_hwmgr *hwmgr);
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+index 9b6c6af..b99fb8a 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+@@ -254,6 +254,7 @@ struct pp_hwmgr_func {
+ const void *state);
+ int (*enable_clock_power_gating)(struct pp_hwmgr *hwmgr);
+ int (*notify_smc_display_config_after_ps_adjustment)(struct pp_hwmgr *hwmgr);
++ int (*pre_display_config_changed)(struct pp_hwmgr *hwmgr);
+ int (*display_config_changed)(struct pp_hwmgr *hwmgr);
+ int (*disable_clock_power_gating)(struct pp_hwmgr *hwmgr);
+ int (*update_clock_gatings)(struct pp_hwmgr *hwmgr,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4384-drm-amdgpu-Drop-the-unused-header-files-in-soc15.c.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4384-drm-amdgpu-Drop-the-unused-header-files-in-soc15.c.patch
new file mode 100644
index 00000000..6bd8e6a7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4384-drm-amdgpu-Drop-the-unused-header-files-in-soc15.c.patch
@@ -0,0 +1,29 @@
+From e41ca33ca8f744c4b69528b89a15f32fb831b2f5 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 11 May 2018 14:41:40 +0800
+Subject: [PATCH 4384/5725] drm/amdgpu: Drop the unused header files in
+ soc15.c.
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 9006576..f31df18 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -41,8 +41,6 @@
+ #include "sdma1/sdma1_4_0_offset.h"
+ #include "hdp/hdp_4_0_offset.h"
+ #include "hdp/hdp_4_0_sh_mask.h"
+-#include "mp/mp_9_0_offset.h"
+-#include "mp/mp_9_0_sh_mask.h"
+ #include "smuio/smuio_9_0_offset.h"
+ #include "smuio/smuio_9_0_sh_mask.h"
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4385-drm-amdgpu-Fix-hardcoded-base-offset-of-vram-pages.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4385-drm-amdgpu-Fix-hardcoded-base-offset-of-vram-pages.patch
new file mode 100644
index 00000000..e84c50d8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4385-drm-amdgpu-Fix-hardcoded-base-offset-of-vram-pages.patch
@@ -0,0 +1,35 @@
+From 4bb56c09293782eb85e5e7643b66496a6a119e34 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Tue, 6 Feb 2018 12:29:23 +0800
+Subject: [PATCH 4385/5725] drm/amdgpu: Fix hardcoded base offset of vram pages
+
+In gmc_v9_0_vram_gtt_location(),the vram_base_offset is hardcoded
+to 0 in dGPU. Fix it by reading mmMC_VM_FB_OFFSET or return
+zfb_phys_addr if ZFB is enabled.
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 35cea8d..3c0a0c3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -692,10 +692,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
+ amdgpu_device_vram_location(adev, &adev->gmc, base);
+ amdgpu_device_gart_location(adev, mc);
+ /* base offset of vram pages */
+- if (adev->flags & AMD_IS_APU)
+- adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
+- else
+- adev->vm_manager.vram_base_offset = 0;
++ adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
+ }
+
+ /**
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4386-drm-amd-Add-vega20_ip_offset.h-headerfile-for-vega20.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4386-drm-amd-Add-vega20_ip_offset.h-headerfile-for-vega20.patch
new file mode 100644
index 00000000..3c433c9d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4386-drm-amd-Add-vega20_ip_offset.h-headerfile-for-vega20.patch
@@ -0,0 +1,1076 @@
+From 9c28627f6ee6bfc88a1524e74c4c07c521ac4885 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Tue, 30 Jan 2018 10:59:23 +0800
+Subject: [PATCH 4386/5725] drm/amd: Add vega20_ip_offset.h headerfile for
+ vega20.
+
+This headerfile contains vega20's ip base addresses.
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/include/vega20_ip_offset.h | 1050 ++++++++++++++++++++++++
+ 1 file changed, 1050 insertions(+)
+ create mode 100644 drivers/gpu/drm/amd/include/vega20_ip_offset.h
+
+diff --git a/drivers/gpu/drm/amd/include/vega20_ip_offset.h b/drivers/gpu/drm/amd/include/vega20_ip_offset.h
+new file mode 100644
+index 0000000..2da2d97
+--- /dev/null
++++ b/drivers/gpu/drm/amd/include/vega20_ip_offset.h
+@@ -0,0 +1,1050 @@
++/*
++ * Copyright (C) 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
++ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++#ifndef _vega20_ip_offset_HEADER
++#define _vega20_ip_offset_HEADER
++
++#define MAX_INSTANCE 6
++#define MAX_SEGMENT 6
++
++
++struct IP_BASE_INSTANCE
++{
++ unsigned int segment[MAX_SEGMENT];
++};
++
++struct IP_BASE
++{
++ struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
++};
++
++
++static const struct IP_BASE ATHUB_BASE ={ { { { 0x00000C20, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } } } };
++static const struct IP_BASE CLK_BASE ={ { { { 0x00016C00, 0x00016E00, 0x00017000, 0x00017200, 0x0001B000, 0x0001B200 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } } } };
++static const struct IP_BASE DCE_BASE ={ { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } } } };
++static const struct IP_BASE DF_BASE ={ { { { 0x00007000, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } } } };
++static const struct IP_BASE FUSE_BASE ={ { { { 0x00017400, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } } } };
++static const struct IP_BASE GC_BASE ={ { { { 0x00002000, 0x0000A000, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } } } };
++static const struct IP_BASE HDP_BASE ={ { { { 0x00000F20, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } } } };
++static const struct IP_BASE MMHUB_BASE ={ { { { 0x0001A000, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } } } };
++static const struct IP_BASE MP0_BASE ={ { { { 0x00016000, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } } } };
++static const struct IP_BASE MP1_BASE ={ { { { 0x00016200, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } } } };
++static const struct IP_BASE NBIO_BASE ={ { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } } } };
++static const struct IP_BASE OSSSYS_BASE ={ { { { 0x000010A0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } } } };
++static const struct IP_BASE SDMA0_BASE ={ { { { 0x00001260, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } } } };
++static const struct IP_BASE SDMA1_BASE ={ { { { 0x00001860, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } } } };
++static const struct IP_BASE SMUIO_BASE ={ { { { 0x00016800, 0x00016A00, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } } } };
++static const struct IP_BASE THM_BASE ={ { { { 0x00016600, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } } } };
++static const struct IP_BASE UMC_BASE ={ { { { 0x00014000, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } } } };
++static const struct IP_BASE UVD_BASE ={ { { { 0x00007800, 0x00007E00, 0, 0, 0, 0 } },
++ { { 0, 0x00009000, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } } } };
++static const struct IP_BASE VCE_BASE ={ { { { 0x00008800, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } } } };
++static const struct IP_BASE XDMA_BASE ={ { { { 0x00003400, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } } } };
++static const struct IP_BASE RSMU_BASE ={ { { { 0x00012000, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } },
++ { { 0, 0, 0, 0, 0, 0 } } } };
++
++
++#define ATHUB_BASE__INST0_SEG0 0x00000C20
++#define ATHUB_BASE__INST0_SEG1 0
++#define ATHUB_BASE__INST0_SEG2 0
++#define ATHUB_BASE__INST0_SEG3 0
++#define ATHUB_BASE__INST0_SEG4 0
++#define ATHUB_BASE__INST0_SEG5 0
++
++#define ATHUB_BASE__INST1_SEG0 0
++#define ATHUB_BASE__INST1_SEG1 0
++#define ATHUB_BASE__INST1_SEG2 0
++#define ATHUB_BASE__INST1_SEG3 0
++#define ATHUB_BASE__INST1_SEG4 0
++#define ATHUB_BASE__INST1_SEG5 0
++
++#define ATHUB_BASE__INST2_SEG0 0
++#define ATHUB_BASE__INST2_SEG1 0
++#define ATHUB_BASE__INST2_SEG2 0
++#define ATHUB_BASE__INST2_SEG3 0
++#define ATHUB_BASE__INST2_SEG4 0
++#define ATHUB_BASE__INST2_SEG5 0
++
++#define ATHUB_BASE__INST3_SEG0 0
++#define ATHUB_BASE__INST3_SEG1 0
++#define ATHUB_BASE__INST3_SEG2 0
++#define ATHUB_BASE__INST3_SEG3 0
++#define ATHUB_BASE__INST3_SEG4 0
++#define ATHUB_BASE__INST3_SEG5 0
++
++#define ATHUB_BASE__INST4_SEG0 0
++#define ATHUB_BASE__INST4_SEG1 0
++#define ATHUB_BASE__INST4_SEG2 0
++#define ATHUB_BASE__INST4_SEG3 0
++#define ATHUB_BASE__INST4_SEG4 0
++#define ATHUB_BASE__INST4_SEG5 0
++
++#define ATHUB_BASE__INST5_SEG0 0
++#define ATHUB_BASE__INST5_SEG1 0
++#define ATHUB_BASE__INST5_SEG2 0
++#define ATHUB_BASE__INST5_SEG3 0
++#define ATHUB_BASE__INST5_SEG4 0
++#define ATHUB_BASE__INST5_SEG5 0
++
++#define CLK_BASE__INST0_SEG0 0x00016C00
++#define CLK_BASE__INST0_SEG1 0x00016E00
++#define CLK_BASE__INST0_SEG2 0x00017000
++#define CLK_BASE__INST0_SEG3 0x00017200
++#define CLK_BASE__INST0_SEG4 0x0001B000
++#define CLK_BASE__INST0_SEG5 0x0001B200
++
++#define CLK_BASE__INST1_SEG0 0
++#define CLK_BASE__INST1_SEG1 0
++#define CLK_BASE__INST1_SEG2 0
++#define CLK_BASE__INST1_SEG3 0
++#define CLK_BASE__INST1_SEG4 0
++#define CLK_BASE__INST1_SEG5 0
++
++#define CLK_BASE__INST2_SEG0 0
++#define CLK_BASE__INST2_SEG1 0
++#define CLK_BASE__INST2_SEG2 0
++#define CLK_BASE__INST2_SEG3 0
++#define CLK_BASE__INST2_SEG4 0
++#define CLK_BASE__INST2_SEG5 0
++
++#define CLK_BASE__INST3_SEG0 0
++#define CLK_BASE__INST3_SEG1 0
++#define CLK_BASE__INST3_SEG2 0
++#define CLK_BASE__INST3_SEG3 0
++#define CLK_BASE__INST3_SEG4 0
++#define CLK_BASE__INST3_SEG5 0
++
++#define CLK_BASE__INST4_SEG0 0
++#define CLK_BASE__INST4_SEG1 0
++#define CLK_BASE__INST4_SEG2 0
++#define CLK_BASE__INST4_SEG3 0
++#define CLK_BASE__INST4_SEG4 0
++#define CLK_BASE__INST4_SEG5 0
++
++#define CLK_BASE__INST5_SEG0 0
++#define CLK_BASE__INST5_SEG1 0
++#define CLK_BASE__INST5_SEG2 0
++#define CLK_BASE__INST5_SEG3 0
++#define CLK_BASE__INST5_SEG4 0
++#define CLK_BASE__INST5_SEG5 0
++
++#define DCE_BASE__INST0_SEG0 0x00000012
++#define DCE_BASE__INST0_SEG1 0x000000C0
++#define DCE_BASE__INST0_SEG2 0x000034C0
++#define DCE_BASE__INST0_SEG3 0
++#define DCE_BASE__INST0_SEG4 0
++#define DCE_BASE__INST0_SEG5 0
++
++#define DCE_BASE__INST1_SEG0 0
++#define DCE_BASE__INST1_SEG1 0
++#define DCE_BASE__INST1_SEG2 0
++#define DCE_BASE__INST1_SEG3 0
++#define DCE_BASE__INST1_SEG4 0
++#define DCE_BASE__INST1_SEG5 0
++
++#define DCE_BASE__INST2_SEG0 0
++#define DCE_BASE__INST2_SEG1 0
++#define DCE_BASE__INST2_SEG2 0
++#define DCE_BASE__INST2_SEG3 0
++#define DCE_BASE__INST2_SEG4 0
++#define DCE_BASE__INST2_SEG5 0
++
++#define DCE_BASE__INST3_SEG0 0
++#define DCE_BASE__INST3_SEG1 0
++#define DCE_BASE__INST3_SEG2 0
++#define DCE_BASE__INST3_SEG3 0
++#define DCE_BASE__INST3_SEG4 0
++#define DCE_BASE__INST3_SEG5 0
++
++#define DCE_BASE__INST4_SEG0 0
++#define DCE_BASE__INST4_SEG1 0
++#define DCE_BASE__INST4_SEG2 0
++#define DCE_BASE__INST4_SEG3 0
++#define DCE_BASE__INST4_SEG4 0
++#define DCE_BASE__INST4_SEG5 0
++
++#define DCE_BASE__INST5_SEG0 0
++#define DCE_BASE__INST5_SEG1 0
++#define DCE_BASE__INST5_SEG2 0
++#define DCE_BASE__INST5_SEG3 0
++#define DCE_BASE__INST5_SEG4 0
++#define DCE_BASE__INST5_SEG5 0
++
++#define DF_BASE__INST0_SEG0 0x00007000
++#define DF_BASE__INST0_SEG1 0
++#define DF_BASE__INST0_SEG2 0
++#define DF_BASE__INST0_SEG3 0
++#define DF_BASE__INST0_SEG4 0
++#define DF_BASE__INST0_SEG5 0
++
++#define DF_BASE__INST1_SEG0 0
++#define DF_BASE__INST1_SEG1 0
++#define DF_BASE__INST1_SEG2 0
++#define DF_BASE__INST1_SEG3 0
++#define DF_BASE__INST1_SEG4 0
++#define DF_BASE__INST1_SEG5 0
++
++#define DF_BASE__INST2_SEG0 0
++#define DF_BASE__INST2_SEG1 0
++#define DF_BASE__INST2_SEG2 0
++#define DF_BASE__INST2_SEG3 0
++#define DF_BASE__INST2_SEG4 0
++#define DF_BASE__INST2_SEG5 0
++
++#define DF_BASE__INST3_SEG0 0
++#define DF_BASE__INST3_SEG1 0
++#define DF_BASE__INST3_SEG2 0
++#define DF_BASE__INST3_SEG3 0
++#define DF_BASE__INST3_SEG4 0
++#define DF_BASE__INST3_SEG5 0
++
++#define DF_BASE__INST4_SEG0 0
++#define DF_BASE__INST4_SEG1 0
++#define DF_BASE__INST4_SEG2 0
++#define DF_BASE__INST4_SEG3 0
++#define DF_BASE__INST4_SEG4 0
++#define DF_BASE__INST4_SEG5 0
++
++#define DF_BASE__INST5_SEG0 0
++#define DF_BASE__INST5_SEG1 0
++#define DF_BASE__INST5_SEG2 0
++#define DF_BASE__INST5_SEG3 0
++#define DF_BASE__INST5_SEG4 0
++#define DF_BASE__INST5_SEG5 0
++
++#define FUSE_BASE__INST0_SEG0 0x00017400
++#define FUSE_BASE__INST0_SEG1 0
++#define FUSE_BASE__INST0_SEG2 0
++#define FUSE_BASE__INST0_SEG3 0
++#define FUSE_BASE__INST0_SEG4 0
++#define FUSE_BASE__INST0_SEG5 0
++
++#define FUSE_BASE__INST1_SEG0 0
++#define FUSE_BASE__INST1_SEG1 0
++#define FUSE_BASE__INST1_SEG2 0
++#define FUSE_BASE__INST1_SEG3 0
++#define FUSE_BASE__INST1_SEG4 0
++#define FUSE_BASE__INST1_SEG5 0
++
++#define FUSE_BASE__INST2_SEG0 0
++#define FUSE_BASE__INST2_SEG1 0
++#define FUSE_BASE__INST2_SEG2 0
++#define FUSE_BASE__INST2_SEG3 0
++#define FUSE_BASE__INST2_SEG4 0
++#define FUSE_BASE__INST2_SEG5 0
++
++#define FUSE_BASE__INST3_SEG0 0
++#define FUSE_BASE__INST3_SEG1 0
++#define FUSE_BASE__INST3_SEG2 0
++#define FUSE_BASE__INST3_SEG3 0
++#define FUSE_BASE__INST3_SEG4 0
++#define FUSE_BASE__INST3_SEG5 0
++
++#define FUSE_BASE__INST4_SEG0 0
++#define FUSE_BASE__INST4_SEG1 0
++#define FUSE_BASE__INST4_SEG2 0
++#define FUSE_BASE__INST4_SEG3 0
++#define FUSE_BASE__INST4_SEG4 0
++#define FUSE_BASE__INST4_SEG5 0
++
++#define FUSE_BASE__INST5_SEG0 0
++#define FUSE_BASE__INST5_SEG1 0
++#define FUSE_BASE__INST5_SEG2 0
++#define FUSE_BASE__INST5_SEG3 0
++#define FUSE_BASE__INST5_SEG4 0
++#define FUSE_BASE__INST5_SEG5 0
++
++#define GC_BASE__INST0_SEG0 0x00002000
++#define GC_BASE__INST0_SEG1 0x0000A000
++#define GC_BASE__INST0_SEG2 0
++#define GC_BASE__INST0_SEG3 0
++#define GC_BASE__INST0_SEG4 0
++#define GC_BASE__INST0_SEG5 0
++
++#define GC_BASE__INST1_SEG0 0
++#define GC_BASE__INST1_SEG1 0
++#define GC_BASE__INST1_SEG2 0
++#define GC_BASE__INST1_SEG3 0
++#define GC_BASE__INST1_SEG4 0
++#define GC_BASE__INST1_SEG5 0
++
++#define GC_BASE__INST2_SEG0 0
++#define GC_BASE__INST2_SEG1 0
++#define GC_BASE__INST2_SEG2 0
++#define GC_BASE__INST2_SEG3 0
++#define GC_BASE__INST2_SEG4 0
++#define GC_BASE__INST2_SEG5 0
++
++#define GC_BASE__INST3_SEG0 0
++#define GC_BASE__INST3_SEG1 0
++#define GC_BASE__INST3_SEG2 0
++#define GC_BASE__INST3_SEG3 0
++#define GC_BASE__INST3_SEG4 0
++#define GC_BASE__INST3_SEG5 0
++
++#define GC_BASE__INST4_SEG0 0
++#define GC_BASE__INST4_SEG1 0
++#define GC_BASE__INST4_SEG2 0
++#define GC_BASE__INST4_SEG3 0
++#define GC_BASE__INST4_SEG4 0
++#define GC_BASE__INST4_SEG5 0
++
++#define GC_BASE__INST5_SEG0 0
++#define GC_BASE__INST5_SEG1 0
++#define GC_BASE__INST5_SEG2 0
++#define GC_BASE__INST5_SEG3 0
++#define GC_BASE__INST5_SEG4 0
++#define GC_BASE__INST5_SEG5 0
++
++#define HDP_BASE__INST0_SEG0 0x00000F20
++#define HDP_BASE__INST0_SEG1 0
++#define HDP_BASE__INST0_SEG2 0
++#define HDP_BASE__INST0_SEG3 0
++#define HDP_BASE__INST0_SEG4 0
++#define HDP_BASE__INST0_SEG5 0
++
++#define HDP_BASE__INST1_SEG0 0
++#define HDP_BASE__INST1_SEG1 0
++#define HDP_BASE__INST1_SEG2 0
++#define HDP_BASE__INST1_SEG3 0
++#define HDP_BASE__INST1_SEG4 0
++#define HDP_BASE__INST1_SEG5 0
++
++#define HDP_BASE__INST2_SEG0 0
++#define HDP_BASE__INST2_SEG1 0
++#define HDP_BASE__INST2_SEG2 0
++#define HDP_BASE__INST2_SEG3 0
++#define HDP_BASE__INST2_SEG4 0
++#define HDP_BASE__INST2_SEG5 0
++
++#define HDP_BASE__INST3_SEG0 0
++#define HDP_BASE__INST3_SEG1 0
++#define HDP_BASE__INST3_SEG2 0
++#define HDP_BASE__INST3_SEG3 0
++#define HDP_BASE__INST3_SEG4 0
++#define HDP_BASE__INST3_SEG5 0
++
++#define HDP_BASE__INST4_SEG0 0
++#define HDP_BASE__INST4_SEG1 0
++#define HDP_BASE__INST4_SEG2 0
++#define HDP_BASE__INST4_SEG3 0
++#define HDP_BASE__INST4_SEG4 0
++#define HDP_BASE__INST4_SEG5 0
++
++#define HDP_BASE__INST5_SEG0 0
++#define HDP_BASE__INST5_SEG1 0
++#define HDP_BASE__INST5_SEG2 0
++#define HDP_BASE__INST5_SEG3 0
++#define HDP_BASE__INST5_SEG4 0
++#define HDP_BASE__INST5_SEG5 0
++
++#define MMHUB_BASE__INST0_SEG0 0x0001A000
++#define MMHUB_BASE__INST0_SEG1 0
++#define MMHUB_BASE__INST0_SEG2 0
++#define MMHUB_BASE__INST0_SEG3 0
++#define MMHUB_BASE__INST0_SEG4 0
++#define MMHUB_BASE__INST0_SEG5 0
++
++#define MMHUB_BASE__INST1_SEG0 0
++#define MMHUB_BASE__INST1_SEG1 0
++#define MMHUB_BASE__INST1_SEG2 0
++#define MMHUB_BASE__INST1_SEG3 0
++#define MMHUB_BASE__INST1_SEG4 0
++#define MMHUB_BASE__INST1_SEG5 0
++
++#define MMHUB_BASE__INST2_SEG0 0
++#define MMHUB_BASE__INST2_SEG1 0
++#define MMHUB_BASE__INST2_SEG2 0
++#define MMHUB_BASE__INST2_SEG3 0
++#define MMHUB_BASE__INST2_SEG4 0
++#define MMHUB_BASE__INST2_SEG5 0
++
++#define MMHUB_BASE__INST3_SEG0 0
++#define MMHUB_BASE__INST3_SEG1 0
++#define MMHUB_BASE__INST3_SEG2 0
++#define MMHUB_BASE__INST3_SEG3 0
++#define MMHUB_BASE__INST3_SEG4 0
++#define MMHUB_BASE__INST3_SEG5 0
++
++#define MMHUB_BASE__INST4_SEG0 0
++#define MMHUB_BASE__INST4_SEG1 0
++#define MMHUB_BASE__INST4_SEG2 0
++#define MMHUB_BASE__INST4_SEG3 0
++#define MMHUB_BASE__INST4_SEG4 0
++#define MMHUB_BASE__INST4_SEG5 0
++
++#define MMHUB_BASE__INST5_SEG0 0
++#define MMHUB_BASE__INST5_SEG1 0
++#define MMHUB_BASE__INST5_SEG2 0
++#define MMHUB_BASE__INST5_SEG3 0
++#define MMHUB_BASE__INST5_SEG4 0
++#define MMHUB_BASE__INST5_SEG5 0
++
++#define MP0_BASE__INST0_SEG0 0x00016000
++#define MP0_BASE__INST0_SEG1 0
++#define MP0_BASE__INST0_SEG2 0
++#define MP0_BASE__INST0_SEG3 0
++#define MP0_BASE__INST0_SEG4 0
++#define MP0_BASE__INST0_SEG5 0
++
++#define MP0_BASE__INST1_SEG0 0
++#define MP0_BASE__INST1_SEG1 0
++#define MP0_BASE__INST1_SEG2 0
++#define MP0_BASE__INST1_SEG3 0
++#define MP0_BASE__INST1_SEG4 0
++#define MP0_BASE__INST1_SEG5 0
++
++#define MP0_BASE__INST2_SEG0 0
++#define MP0_BASE__INST2_SEG1 0
++#define MP0_BASE__INST2_SEG2 0
++#define MP0_BASE__INST2_SEG3 0
++#define MP0_BASE__INST2_SEG4 0
++#define MP0_BASE__INST2_SEG5 0
++
++#define MP0_BASE__INST3_SEG0 0
++#define MP0_BASE__INST3_SEG1 0
++#define MP0_BASE__INST3_SEG2 0
++#define MP0_BASE__INST3_SEG3 0
++#define MP0_BASE__INST3_SEG4 0
++#define MP0_BASE__INST3_SEG5 0
++
++#define MP0_BASE__INST4_SEG0 0
++#define MP0_BASE__INST4_SEG1 0
++#define MP0_BASE__INST4_SEG2 0
++#define MP0_BASE__INST4_SEG3 0
++#define MP0_BASE__INST4_SEG4 0
++#define MP0_BASE__INST4_SEG5 0
++
++#define MP0_BASE__INST5_SEG0 0
++#define MP0_BASE__INST5_SEG1 0
++#define MP0_BASE__INST5_SEG2 0
++#define MP0_BASE__INST5_SEG3 0
++#define MP0_BASE__INST5_SEG4 0
++#define MP0_BASE__INST5_SEG5 0
++
++#define MP1_BASE__INST0_SEG0 0x00016200
++#define MP1_BASE__INST0_SEG1 0
++#define MP1_BASE__INST0_SEG2 0
++#define MP1_BASE__INST0_SEG3 0
++#define MP1_BASE__INST0_SEG4 0
++#define MP1_BASE__INST0_SEG5 0
++
++#define MP1_BASE__INST1_SEG0 0
++#define MP1_BASE__INST1_SEG1 0
++#define MP1_BASE__INST1_SEG2 0
++#define MP1_BASE__INST1_SEG3 0
++#define MP1_BASE__INST1_SEG4 0
++#define MP1_BASE__INST1_SEG5 0
++
++#define MP1_BASE__INST2_SEG0 0
++#define MP1_BASE__INST2_SEG1 0
++#define MP1_BASE__INST2_SEG2 0
++#define MP1_BASE__INST2_SEG3 0
++#define MP1_BASE__INST2_SEG4 0
++#define MP1_BASE__INST2_SEG5 0
++
++#define MP1_BASE__INST3_SEG0 0
++#define MP1_BASE__INST3_SEG1 0
++#define MP1_BASE__INST3_SEG2 0
++#define MP1_BASE__INST3_SEG3 0
++#define MP1_BASE__INST3_SEG4 0
++#define MP1_BASE__INST3_SEG5 0
++
++#define MP1_BASE__INST4_SEG0 0
++#define MP1_BASE__INST4_SEG1 0
++#define MP1_BASE__INST4_SEG2 0
++#define MP1_BASE__INST4_SEG3 0
++#define MP1_BASE__INST4_SEG4 0
++#define MP1_BASE__INST4_SEG5 0
++
++#define MP1_BASE__INST5_SEG0 0
++#define MP1_BASE__INST5_SEG1 0
++#define MP1_BASE__INST5_SEG2 0
++#define MP1_BASE__INST5_SEG3 0
++#define MP1_BASE__INST5_SEG4 0
++#define MP1_BASE__INST5_SEG5 0
++
++#define NBIO_BASE__INST0_SEG0 0x00000000
++#define NBIO_BASE__INST0_SEG1 0x00000014
++#define NBIO_BASE__INST0_SEG2 0x00000D20
++#define NBIO_BASE__INST0_SEG3 0x00010400
++#define NBIO_BASE__INST0_SEG4 0
++#define NBIO_BASE__INST0_SEG5 0
++
++#define NBIO_BASE__INST1_SEG0 0
++#define NBIO_BASE__INST1_SEG1 0
++#define NBIO_BASE__INST1_SEG2 0
++#define NBIO_BASE__INST1_SEG3 0
++#define NBIO_BASE__INST1_SEG4 0
++#define NBIO_BASE__INST1_SEG5 0
++
++#define NBIO_BASE__INST2_SEG0 0
++#define NBIO_BASE__INST2_SEG1 0
++#define NBIO_BASE__INST2_SEG2 0
++#define NBIO_BASE__INST2_SEG3 0
++#define NBIO_BASE__INST2_SEG4 0
++#define NBIO_BASE__INST2_SEG5 0
++
++#define NBIO_BASE__INST3_SEG0 0
++#define NBIO_BASE__INST3_SEG1 0
++#define NBIO_BASE__INST3_SEG2 0
++#define NBIO_BASE__INST3_SEG3 0
++#define NBIO_BASE__INST3_SEG4 0
++#define NBIO_BASE__INST3_SEG5 0
++
++#define NBIO_BASE__INST4_SEG0 0
++#define NBIO_BASE__INST4_SEG1 0
++#define NBIO_BASE__INST4_SEG2 0
++#define NBIO_BASE__INST4_SEG3 0
++#define NBIO_BASE__INST4_SEG4 0
++#define NBIO_BASE__INST4_SEG5 0
++
++#define NBIO_BASE__INST5_SEG0 0
++#define NBIO_BASE__INST5_SEG1 0
++#define NBIO_BASE__INST5_SEG2 0
++#define NBIO_BASE__INST5_SEG3 0
++#define NBIO_BASE__INST5_SEG4 0
++#define NBIO_BASE__INST5_SEG5 0
++
++#define OSSSYS_BASE__INST0_SEG0 0x000010A0
++#define OSSSYS_BASE__INST0_SEG1 0
++#define OSSSYS_BASE__INST0_SEG2 0
++#define OSSSYS_BASE__INST0_SEG3 0
++#define OSSSYS_BASE__INST0_SEG4 0
++#define OSSSYS_BASE__INST0_SEG5 0
++
++#define OSSSYS_BASE__INST1_SEG0 0
++#define OSSSYS_BASE__INST1_SEG1 0
++#define OSSSYS_BASE__INST1_SEG2 0
++#define OSSSYS_BASE__INST1_SEG3 0
++#define OSSSYS_BASE__INST1_SEG4 0
++#define OSSSYS_BASE__INST1_SEG5 0
++
++#define OSSSYS_BASE__INST2_SEG0 0
++#define OSSSYS_BASE__INST2_SEG1 0
++#define OSSSYS_BASE__INST2_SEG2 0
++#define OSSSYS_BASE__INST2_SEG3 0
++#define OSSSYS_BASE__INST2_SEG4 0
++#define OSSSYS_BASE__INST2_SEG5 0
++
++#define OSSSYS_BASE__INST3_SEG0 0
++#define OSSSYS_BASE__INST3_SEG1 0
++#define OSSSYS_BASE__INST3_SEG2 0
++#define OSSSYS_BASE__INST3_SEG3 0
++#define OSSSYS_BASE__INST3_SEG4 0
++#define OSSSYS_BASE__INST3_SEG5 0
++
++#define OSSSYS_BASE__INST4_SEG0 0
++#define OSSSYS_BASE__INST4_SEG1 0
++#define OSSSYS_BASE__INST4_SEG2 0
++#define OSSSYS_BASE__INST4_SEG3 0
++#define OSSSYS_BASE__INST4_SEG4 0
++#define OSSSYS_BASE__INST4_SEG5 0
++
++#define OSSSYS_BASE__INST5_SEG0 0
++#define OSSSYS_BASE__INST5_SEG1 0
++#define OSSSYS_BASE__INST5_SEG2 0
++#define OSSSYS_BASE__INST5_SEG3 0
++#define OSSSYS_BASE__INST5_SEG4 0
++#define OSSSYS_BASE__INST5_SEG5 0
++
++#define SDMA0_BASE__INST0_SEG0 0x00001260
++#define SDMA0_BASE__INST0_SEG1 0
++#define SDMA0_BASE__INST0_SEG2 0
++#define SDMA0_BASE__INST0_SEG3 0
++#define SDMA0_BASE__INST0_SEG4 0
++#define SDMA0_BASE__INST0_SEG5 0
++
++#define SDMA0_BASE__INST1_SEG0 0
++#define SDMA0_BASE__INST1_SEG1 0
++#define SDMA0_BASE__INST1_SEG2 0
++#define SDMA0_BASE__INST1_SEG3 0
++#define SDMA0_BASE__INST1_SEG4 0
++#define SDMA0_BASE__INST1_SEG5 0
++
++#define SDMA0_BASE__INST2_SEG0 0
++#define SDMA0_BASE__INST2_SEG1 0
++#define SDMA0_BASE__INST2_SEG2 0
++#define SDMA0_BASE__INST2_SEG3 0
++#define SDMA0_BASE__INST2_SEG4 0
++#define SDMA0_BASE__INST2_SEG5 0
++
++#define SDMA0_BASE__INST3_SEG0 0
++#define SDMA0_BASE__INST3_SEG1 0
++#define SDMA0_BASE__INST3_SEG2 0
++#define SDMA0_BASE__INST3_SEG3 0
++#define SDMA0_BASE__INST3_SEG4 0
++#define SDMA0_BASE__INST3_SEG5 0
++
++#define SDMA0_BASE__INST4_SEG0 0
++#define SDMA0_BASE__INST4_SEG1 0
++#define SDMA0_BASE__INST4_SEG2 0
++#define SDMA0_BASE__INST4_SEG3 0
++#define SDMA0_BASE__INST4_SEG4 0
++#define SDMA0_BASE__INST4_SEG5 0
++
++#define SDMA0_BASE__INST5_SEG0 0
++#define SDMA0_BASE__INST5_SEG1 0
++#define SDMA0_BASE__INST5_SEG2 0
++#define SDMA0_BASE__INST5_SEG3 0
++#define SDMA0_BASE__INST5_SEG4 0
++#define SDMA0_BASE__INST5_SEG5 0
++
++#define SDMA1_BASE__INST0_SEG0 0x00001860
++#define SDMA1_BASE__INST0_SEG1 0
++#define SDMA1_BASE__INST0_SEG2 0
++#define SDMA1_BASE__INST0_SEG3 0
++#define SDMA1_BASE__INST0_SEG4 0
++#define SDMA1_BASE__INST0_SEG5 0
++
++#define SDMA1_BASE__INST1_SEG0 0
++#define SDMA1_BASE__INST1_SEG1 0
++#define SDMA1_BASE__INST1_SEG2 0
++#define SDMA1_BASE__INST1_SEG3 0
++#define SDMA1_BASE__INST1_SEG4 0
++#define SDMA1_BASE__INST1_SEG5 0
++
++#define SDMA1_BASE__INST2_SEG0 0
++#define SDMA1_BASE__INST2_SEG1 0
++#define SDMA1_BASE__INST2_SEG2 0
++#define SDMA1_BASE__INST2_SEG3 0
++#define SDMA1_BASE__INST2_SEG4 0
++#define SDMA1_BASE__INST2_SEG5 0
++
++#define SDMA1_BASE__INST3_SEG0 0
++#define SDMA1_BASE__INST3_SEG1 0
++#define SDMA1_BASE__INST3_SEG2 0
++#define SDMA1_BASE__INST3_SEG3 0
++#define SDMA1_BASE__INST3_SEG4 0
++#define SDMA1_BASE__INST3_SEG5 0
++
++#define SDMA1_BASE__INST4_SEG0 0
++#define SDMA1_BASE__INST4_SEG1 0
++#define SDMA1_BASE__INST4_SEG2 0
++#define SDMA1_BASE__INST4_SEG3 0
++#define SDMA1_BASE__INST4_SEG4 0
++#define SDMA1_BASE__INST4_SEG5 0
++
++#define SDMA1_BASE__INST5_SEG0 0
++#define SDMA1_BASE__INST5_SEG1 0
++#define SDMA1_BASE__INST5_SEG2 0
++#define SDMA1_BASE__INST5_SEG3 0
++#define SDMA1_BASE__INST5_SEG4 0
++#define SDMA1_BASE__INST5_SEG5 0
++
++#define SMUIO_BASE__INST0_SEG0 0x00016800
++#define SMUIO_BASE__INST0_SEG1 0x00016A00
++#define SMUIO_BASE__INST0_SEG2 0
++#define SMUIO_BASE__INST0_SEG3 0
++#define SMUIO_BASE__INST0_SEG4 0
++#define SMUIO_BASE__INST0_SEG5 0
++
++#define SMUIO_BASE__INST1_SEG0 0
++#define SMUIO_BASE__INST1_SEG1 0
++#define SMUIO_BASE__INST1_SEG2 0
++#define SMUIO_BASE__INST1_SEG3 0
++#define SMUIO_BASE__INST1_SEG4 0
++#define SMUIO_BASE__INST1_SEG5 0
++
++#define SMUIO_BASE__INST2_SEG0 0
++#define SMUIO_BASE__INST2_SEG1 0
++#define SMUIO_BASE__INST2_SEG2 0
++#define SMUIO_BASE__INST2_SEG3 0
++#define SMUIO_BASE__INST2_SEG4 0
++#define SMUIO_BASE__INST2_SEG5 0
++
++#define SMUIO_BASE__INST3_SEG0 0
++#define SMUIO_BASE__INST3_SEG1 0
++#define SMUIO_BASE__INST3_SEG2 0
++#define SMUIO_BASE__INST3_SEG3 0
++#define SMUIO_BASE__INST3_SEG4 0
++#define SMUIO_BASE__INST3_SEG5 0
++
++#define SMUIO_BASE__INST4_SEG0 0
++#define SMUIO_BASE__INST4_SEG1 0
++#define SMUIO_BASE__INST4_SEG2 0
++#define SMUIO_BASE__INST4_SEG3 0
++#define SMUIO_BASE__INST4_SEG4 0
++#define SMUIO_BASE__INST4_SEG5 0
++
++#define SMUIO_BASE__INST5_SEG0 0
++#define SMUIO_BASE__INST5_SEG1 0
++#define SMUIO_BASE__INST5_SEG2 0
++#define SMUIO_BASE__INST5_SEG3 0
++#define SMUIO_BASE__INST5_SEG4 0
++#define SMUIO_BASE__INST5_SEG5 0
++
++#define THM_BASE__INST0_SEG0 0x00016600
++#define THM_BASE__INST0_SEG1 0
++#define THM_BASE__INST0_SEG2 0
++#define THM_BASE__INST0_SEG3 0
++#define THM_BASE__INST0_SEG4 0
++#define THM_BASE__INST0_SEG5 0
++
++#define THM_BASE__INST1_SEG0 0
++#define THM_BASE__INST1_SEG1 0
++#define THM_BASE__INST1_SEG2 0
++#define THM_BASE__INST1_SEG3 0
++#define THM_BASE__INST1_SEG4 0
++#define THM_BASE__INST1_SEG5 0
++
++#define THM_BASE__INST2_SEG0 0
++#define THM_BASE__INST2_SEG1 0
++#define THM_BASE__INST2_SEG2 0
++#define THM_BASE__INST2_SEG3 0
++#define THM_BASE__INST2_SEG4 0
++#define THM_BASE__INST2_SEG5 0
++
++#define THM_BASE__INST3_SEG0 0
++#define THM_BASE__INST3_SEG1 0
++#define THM_BASE__INST3_SEG2 0
++#define THM_BASE__INST3_SEG3 0
++#define THM_BASE__INST3_SEG4 0
++#define THM_BASE__INST3_SEG5 0
++
++#define THM_BASE__INST4_SEG0 0
++#define THM_BASE__INST4_SEG1 0
++#define THM_BASE__INST4_SEG2 0
++#define THM_BASE__INST4_SEG3 0
++#define THM_BASE__INST4_SEG4 0
++#define THM_BASE__INST4_SEG5 0
++
++#define THM_BASE__INST5_SEG0 0
++#define THM_BASE__INST5_SEG1 0
++#define THM_BASE__INST5_SEG2 0
++#define THM_BASE__INST5_SEG3 0
++#define THM_BASE__INST5_SEG4 0
++#define THM_BASE__INST5_SEG5 0
++
++#define UMC_BASE__INST0_SEG0 0x00014000
++#define UMC_BASE__INST0_SEG1 0
++#define UMC_BASE__INST0_SEG2 0
++#define UMC_BASE__INST0_SEG3 0
++#define UMC_BASE__INST0_SEG4 0
++#define UMC_BASE__INST0_SEG5 0
++
++#define UMC_BASE__INST1_SEG0 0
++#define UMC_BASE__INST1_SEG1 0
++#define UMC_BASE__INST1_SEG2 0
++#define UMC_BASE__INST1_SEG3 0
++#define UMC_BASE__INST1_SEG4 0
++#define UMC_BASE__INST1_SEG5 0
++
++#define UMC_BASE__INST2_SEG0 0
++#define UMC_BASE__INST2_SEG1 0
++#define UMC_BASE__INST2_SEG2 0
++#define UMC_BASE__INST2_SEG3 0
++#define UMC_BASE__INST2_SEG4 0
++#define UMC_BASE__INST2_SEG5 0
++
++#define UMC_BASE__INST3_SEG0 0
++#define UMC_BASE__INST3_SEG1 0
++#define UMC_BASE__INST3_SEG2 0
++#define UMC_BASE__INST3_SEG3 0
++#define UMC_BASE__INST3_SEG4 0
++#define UMC_BASE__INST3_SEG5 0
++
++#define UMC_BASE__INST4_SEG0 0
++#define UMC_BASE__INST4_SEG1 0
++#define UMC_BASE__INST4_SEG2 0
++#define UMC_BASE__INST4_SEG3 0
++#define UMC_BASE__INST4_SEG4 0
++#define UMC_BASE__INST4_SEG5 0
++
++#define UMC_BASE__INST5_SEG0 0
++#define UMC_BASE__INST5_SEG1 0
++#define UMC_BASE__INST5_SEG2 0
++#define UMC_BASE__INST5_SEG3 0
++#define UMC_BASE__INST5_SEG4 0
++#define UMC_BASE__INST5_SEG5 0
++
++#define UVD_BASE__INST0_SEG0 0x00007800
++#define UVD_BASE__INST0_SEG1 0x00007E00
++#define UVD_BASE__INST0_SEG2 0
++#define UVD_BASE__INST0_SEG3 0
++#define UVD_BASE__INST0_SEG4 0
++#define UVD_BASE__INST0_SEG5 0
++
++#define UVD_BASE__INST1_SEG0 0
++#define UVD_BASE__INST1_SEG1 0x00009000
++#define UVD_BASE__INST1_SEG2 0
++#define UVD_BASE__INST1_SEG3 0
++#define UVD_BASE__INST1_SEG4 0
++#define UVD_BASE__INST1_SEG5 0
++
++#define UVD_BASE__INST2_SEG0 0
++#define UVD_BASE__INST2_SEG1 0
++#define UVD_BASE__INST2_SEG2 0
++#define UVD_BASE__INST2_SEG3 0
++#define UVD_BASE__INST2_SEG4 0
++#define UVD_BASE__INST2_SEG5 0
++
++#define UVD_BASE__INST3_SEG0 0
++#define UVD_BASE__INST3_SEG1 0
++#define UVD_BASE__INST3_SEG2 0
++#define UVD_BASE__INST3_SEG3 0
++#define UVD_BASE__INST3_SEG4 0
++#define UVD_BASE__INST3_SEG5 0
++
++#define UVD_BASE__INST4_SEG0 0
++#define UVD_BASE__INST4_SEG1 0
++#define UVD_BASE__INST4_SEG2 0
++#define UVD_BASE__INST4_SEG3 0
++#define UVD_BASE__INST4_SEG4 0
++#define UVD_BASE__INST4_SEG5 0
++
++#define UVD_BASE__INST5_SEG0 0
++#define UVD_BASE__INST5_SEG1 0
++#define UVD_BASE__INST5_SEG2 0
++#define UVD_BASE__INST5_SEG3 0
++#define UVD_BASE__INST5_SEG4 0
++#define UVD_BASE__INST5_SEG5 0
++
++#define VCE_BASE__INST0_SEG0 0x00008800
++#define VCE_BASE__INST0_SEG1 0
++#define VCE_BASE__INST0_SEG2 0
++#define VCE_BASE__INST0_SEG3 0
++#define VCE_BASE__INST0_SEG4 0
++#define VCE_BASE__INST0_SEG5 0
++
++#define VCE_BASE__INST1_SEG0 0
++#define VCE_BASE__INST1_SEG1 0
++#define VCE_BASE__INST1_SEG2 0
++#define VCE_BASE__INST1_SEG3 0
++#define VCE_BASE__INST1_SEG4 0
++#define VCE_BASE__INST1_SEG5 0
++
++#define VCE_BASE__INST2_SEG0 0
++#define VCE_BASE__INST2_SEG1 0
++#define VCE_BASE__INST2_SEG2 0
++#define VCE_BASE__INST2_SEG3 0
++#define VCE_BASE__INST2_SEG4 0
++#define VCE_BASE__INST2_SEG5 0
++
++#define VCE_BASE__INST3_SEG0 0
++#define VCE_BASE__INST3_SEG1 0
++#define VCE_BASE__INST3_SEG2 0
++#define VCE_BASE__INST3_SEG3 0
++#define VCE_BASE__INST3_SEG4 0
++#define VCE_BASE__INST3_SEG5 0
++
++#define VCE_BASE__INST4_SEG0 0
++#define VCE_BASE__INST4_SEG1 0
++#define VCE_BASE__INST4_SEG2 0
++#define VCE_BASE__INST4_SEG3 0
++#define VCE_BASE__INST4_SEG4 0
++#define VCE_BASE__INST4_SEG5 0
++
++#define VCE_BASE__INST5_SEG0 0
++#define VCE_BASE__INST5_SEG1 0
++#define VCE_BASE__INST5_SEG2 0
++#define VCE_BASE__INST5_SEG3 0
++#define VCE_BASE__INST5_SEG4 0
++#define VCE_BASE__INST5_SEG5 0
++
++#define XDMA_BASE__INST0_SEG0 0x00003400
++#define XDMA_BASE__INST0_SEG1 0
++#define XDMA_BASE__INST0_SEG2 0
++#define XDMA_BASE__INST0_SEG3 0
++#define XDMA_BASE__INST0_SEG4 0
++#define XDMA_BASE__INST0_SEG5 0
++
++#define XDMA_BASE__INST1_SEG0 0
++#define XDMA_BASE__INST1_SEG1 0
++#define XDMA_BASE__INST1_SEG2 0
++#define XDMA_BASE__INST1_SEG3 0
++#define XDMA_BASE__INST1_SEG4 0
++#define XDMA_BASE__INST1_SEG5 0
++
++#define XDMA_BASE__INST2_SEG0 0
++#define XDMA_BASE__INST2_SEG1 0
++#define XDMA_BASE__INST2_SEG2 0
++#define XDMA_BASE__INST2_SEG3 0
++#define XDMA_BASE__INST2_SEG4 0
++#define XDMA_BASE__INST2_SEG5 0
++
++#define XDMA_BASE__INST3_SEG0 0
++#define XDMA_BASE__INST3_SEG1 0
++#define XDMA_BASE__INST3_SEG2 0
++#define XDMA_BASE__INST3_SEG3 0
++#define XDMA_BASE__INST3_SEG4 0
++#define XDMA_BASE__INST3_SEG5 0
++
++#define XDMA_BASE__INST4_SEG0 0
++#define XDMA_BASE__INST4_SEG1 0
++#define XDMA_BASE__INST4_SEG2 0
++#define XDMA_BASE__INST4_SEG3 0
++#define XDMA_BASE__INST4_SEG4 0
++#define XDMA_BASE__INST4_SEG5 0
++
++#define XDMA_BASE__INST5_SEG0 0
++#define XDMA_BASE__INST5_SEG1 0
++#define XDMA_BASE__INST5_SEG2 0
++#define XDMA_BASE__INST5_SEG3 0
++#define XDMA_BASE__INST5_SEG4 0
++#define XDMA_BASE__INST5_SEG5 0
++
++#define RSMU_BASE__INST0_SEG0 0x00012000
++#define RSMU_BASE__INST0_SEG1 0
++#define RSMU_BASE__INST0_SEG2 0
++#define RSMU_BASE__INST0_SEG3 0
++#define RSMU_BASE__INST0_SEG4 0
++#define RSMU_BASE__INST0_SEG5 0
++
++#define RSMU_BASE__INST1_SEG0 0
++#define RSMU_BASE__INST1_SEG1 0
++#define RSMU_BASE__INST1_SEG2 0
++#define RSMU_BASE__INST1_SEG3 0
++#define RSMU_BASE__INST1_SEG4 0
++#define RSMU_BASE__INST1_SEG5 0
++
++#define RSMU_BASE__INST2_SEG0 0
++#define RSMU_BASE__INST2_SEG1 0
++#define RSMU_BASE__INST2_SEG2 0
++#define RSMU_BASE__INST2_SEG3 0
++#define RSMU_BASE__INST2_SEG4 0
++#define RSMU_BASE__INST2_SEG5 0
++
++#define RSMU_BASE__INST3_SEG0 0
++#define RSMU_BASE__INST3_SEG1 0
++#define RSMU_BASE__INST3_SEG2 0
++#define RSMU_BASE__INST3_SEG3 0
++#define RSMU_BASE__INST3_SEG4 0
++#define RSMU_BASE__INST3_SEG5 0
++
++#define RSMU_BASE__INST4_SEG0 0
++#define RSMU_BASE__INST4_SEG1 0
++#define RSMU_BASE__INST4_SEG2 0
++#define RSMU_BASE__INST4_SEG3 0
++#define RSMU_BASE__INST4_SEG4 0
++#define RSMU_BASE__INST4_SEG5 0
++
++#define RSMU_BASE__INST5_SEG0 0
++#define RSMU_BASE__INST5_SEG1 0
++#define RSMU_BASE__INST5_SEG2 0
++#define RSMU_BASE__INST5_SEG3 0
++#define RSMU_BASE__INST5_SEG4 0
++#define RSMU_BASE__INST5_SEG5 0
++
++#endif
++
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4387-drm-amdgpu-Add-vega20-to-asic_type-enum.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4387-drm-amdgpu-Add-vega20-to-asic_type-enum.patch
new file mode 100644
index 00000000..4a27e142
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4387-drm-amdgpu-Add-vega20-to-asic_type-enum.patch
@@ -0,0 +1,30 @@
+From 0bb4609466186a070b8709401ec63a13bd387ab2 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 20 Apr 2018 12:27:54 +0800
+Subject: [PATCH 4387/5725] drm/amdgpu: Add vega20 to asic_type enum.
+
+Add vega20 to amd_asic_type enum and amdgpu_asic_name[].
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 405d3a8..f2a038b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -86,6 +86,7 @@ static const char *amdgpu_asic_name[] = {
+ "VEGAM",
+ "VEGA10",
+ "VEGA12",
++ "VEGA20",
+ "RAVEN",
+ "LAST",
+ };
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4388-drm-amdgpu-Add-gpu_info-firmware-for-vega20.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4388-drm-amdgpu-Add-gpu_info-firmware-for-vega20.patch
new file mode 100644
index 00000000..c16a203b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4388-drm-amdgpu-Add-gpu_info-firmware-for-vega20.patch
@@ -0,0 +1,40 @@
+From f65218a86a3412c2349ee408fcc78846c97c66b8 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 20 Apr 2018 12:31:04 +0800
+Subject: [PATCH 4388/5725] drm/amdgpu: Add gpu_info firmware for vega20.
+
+vega20_gpu_info firmware stores gpu configuration for vega20.
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index f2a038b..282260b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -60,6 +60,7 @@
+
+ MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
+ MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
++MODULE_FIRMWARE("amdgpu/vega20_gpu_info.bin");
+ MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
+
+ #define AMDGPU_RESUME_MS 2000
+@@ -1446,6 +1447,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
+ case CHIP_VEGA12:
+ chip_name = "vega12";
+ break;
++ case CHIP_VEGA20:
++ chip_name = "vega20";
++ break;
+ case CHIP_RAVEN:
+ chip_name = "raven";
+ break;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4389-drm-amdgpu-set-asic-family-for-vega20.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4389-drm-amdgpu-set-asic-family-for-vega20.patch
new file mode 100644
index 00000000..75ad3d8b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4389-drm-amdgpu-set-asic-family-for-vega20.patch
@@ -0,0 +1,28 @@
+From 55c86d510ffee82a1fa5642c9348d386136674bf Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 20 Apr 2018 12:33:33 +0800
+Subject: [PATCH 4389/5725] drm/amdgpu: set asic family for vega20.
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 282260b..eaa8922 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1576,6 +1576,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
+ #endif
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
++ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+ if (adev->asic_type == CHIP_RAVEN)
+ adev->family = AMDGPU_FAMILY_RV;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4390-drm-amdgpu-Add-smu-firmware-support-for-vega20.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4390-drm-amdgpu-Add-smu-firmware-support-for-vega20.patch
new file mode 100644
index 00000000..3ddac9a2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4390-drm-amdgpu-Add-smu-firmware-support-for-vega20.patch
@@ -0,0 +1,43 @@
+From e781df0df1359e64b4e62035a7ef83203a1ec9a4 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 20 Apr 2018 12:46:21 +0800
+Subject: [PATCH 4390/5725] drm/amdgpu: Add smu firmware support for vega20
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | 3 +++
+ drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | 1 +
+ 2 files changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+index 5b3d3bf..e950730 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+@@ -400,6 +400,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
+ case CHIP_VEGA12:
+ strcpy(fw_name, "amdgpu/vega12_smc.bin");
+ break;
++ case CHIP_VEGA20:
++ strcpy(fw_name, "amdgpu/vega20_smc.bin");
++ break;
+ default:
+ DRM_ERROR("SMC firmware not supported\n");
+ return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+index ee236df..c983793 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+@@ -45,6 +45,7 @@ MODULE_FIRMWARE("amdgpu/vegam_smc.bin");
+ MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
+ MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
+ MODULE_FIRMWARE("amdgpu/vega12_smc.bin");
++MODULE_FIRMWARE("amdgpu/vega20_smc.bin");
+
+ int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
+ {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4391-drm-amdgpu-powerplay-Add-initial-vega20-support-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4391-drm-amdgpu-powerplay-Add-initial-vega20-support-v2.patch
new file mode 100644
index 00000000..dc349840
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4391-drm-amdgpu-powerplay-Add-initial-vega20-support-v2.patch
@@ -0,0 +1,30 @@
+From 488ed90009247f2783dc321dc205fddf9b31cdef Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 20 Apr 2018 13:32:46 +0800
+Subject: [PATCH 4391/5725] drm/amdgpu/powerplay: Add initial vega20 support v2
+
+Initial powerplay support the same as vega10 for now.
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+index 71b4233..e63bc47 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+@@ -151,6 +151,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
+ hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
+ switch (hwmgr->chip_id) {
+ case CHIP_VEGA10:
++ case CHIP_VEGA20:
+ hwmgr->smumgr_funcs = &vega10_smu_funcs;
+ vega10_hwmgr_init(hwmgr);
+ break;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4392-drm-amdgpu-psp-Add-initial-psp-support-for-vega20.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4392-drm-amdgpu-psp-Add-initial-psp-support-for-vega20.patch
new file mode 100644
index 00000000..b76a2786
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4392-drm-amdgpu-psp-Add-initial-psp-support-for-vega20.patch
@@ -0,0 +1,45 @@
+From 47e49c4e3d1e7472abe8e7459c0855ec0e5d68f9 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 20 Apr 2018 13:36:54 +0800
+Subject: [PATCH 4392/5725] drm/amdgpu/psp: Add initial psp support for vega20
+
+The same as vega10 for now.
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/psp_v3_1.c | 3 +++
+ 2 files changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index 4ce246c..a13cd21 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -52,6 +52,7 @@ static int psp_sw_init(void *handle)
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
++ case CHIP_VEGA20:
+ psp_v3_1_set_psp_funcs(psp);
+ break;
+ case CHIP_RAVEN:
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+index 196e75d..0c768e3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+@@ -41,6 +41,9 @@ MODULE_FIRMWARE("amdgpu/vega10_sos.bin");
+ MODULE_FIRMWARE("amdgpu/vega10_asd.bin");
+ MODULE_FIRMWARE("amdgpu/vega12_sos.bin");
+ MODULE_FIRMWARE("amdgpu/vega12_asd.bin");
++MODULE_FIRMWARE("amdgpu/vega20_sos.bin");
++MODULE_FIRMWARE("amdgpu/vega20_asd.bin");
++
+
+ #define smnMP1_FIRMWARE_FLAGS 0x3010028
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4393-drm-amdgpu-Add-vega20-ucode-loading-method.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4393-drm-amdgpu-Add-vega20-ucode-loading-method.patch
new file mode 100644
index 00000000..ff48deab
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4393-drm-amdgpu-Add-vega20-ucode-loading-method.patch
@@ -0,0 +1,30 @@
+From 63b929cea72759764b725c5e0b62e49824ae07e3 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 20 Apr 2018 13:38:24 +0800
+Subject: [PATCH 4393/5725] drm/amdgpu: Add vega20 ucode loading method
+
+The same as vega10.
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+index ee71c40..63e2996 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+@@ -303,6 +303,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
+ case CHIP_VEGA10:
+ case CHIP_RAVEN:
+ case CHIP_VEGA12:
++ case CHIP_VEGA20:
+ if (!load_type)
+ return AMDGPU_FW_LOAD_DIRECT;
+ else
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4394-drm-amdgpu-Specify-vega20-uvd-firmware.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4394-drm-amdgpu-Specify-vega20-uvd-firmware.patch
new file mode 100644
index 00000000..d655d60a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4394-drm-amdgpu-Specify-vega20-uvd-firmware.patch
@@ -0,0 +1,46 @@
+From a77b04a1b85ff01e0a2d1b05e67c461fd53f04b4 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 11 May 2018 13:44:09 -0500
+Subject: [PATCH 4394/5725] drm/amdgpu: Specify vega20 uvd firmware
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index ff8a62a..b98bac9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -70,6 +70,7 @@
+
+ #define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin"
+ #define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin"
++#define FIRMWARE_VEGA20 "amdgpu/vega20_uvd.bin"
+
+ #define mmUVD_GPCOM_VCPU_DATA0_VEGA10 (0x03c4 + 0x7e00)
+ #define mmUVD_GPCOM_VCPU_DATA1_VEGA10 (0x03c5 + 0x7e00)
+@@ -114,6 +115,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGAM);
+
+ MODULE_FIRMWARE(FIRMWARE_VEGA10);
+ MODULE_FIRMWARE(FIRMWARE_VEGA12);
++MODULE_FIRMWARE(FIRMWARE_VEGA20);
+
+ static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
+
+@@ -177,6 +179,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ case CHIP_VEGAM:
+ fw_name = FIRMWARE_VEGAM;
+ break;
++ case CHIP_VEGA20:
++ fw_name = FIRMWARE_VEGA20;
++ break;
+ default:
+ return -EINVAL;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4395-drm-amdgpu-Specify-vega20-vce-firmware.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4395-drm-amdgpu-Specify-vega20-vce-firmware.patch
new file mode 100644
index 00000000..b3d6727d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4395-drm-amdgpu-Specify-vega20-vce-firmware.patch
@@ -0,0 +1,46 @@
+From 8d07e6357e5c2134aed0bfdcf9b4b9d0cedbec31 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 20 Apr 2018 13:46:49 +0800
+Subject: [PATCH 4395/5725] drm/amdgpu: Specify vega20 vce firmware
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+index e2186ed..1b1d8e1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+@@ -57,6 +57,7 @@
+
+ #define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin"
+ #define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin"
++#define FIRMWARE_VEGA20 "amdgpu/vega20_vce.bin"
+
+ #ifdef CONFIG_DRM_AMDGPU_CIK
+ MODULE_FIRMWARE(FIRMWARE_BONAIRE);
+@@ -76,6 +77,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGAM);
+
+ MODULE_FIRMWARE(FIRMWARE_VEGA10);
+ MODULE_FIRMWARE(FIRMWARE_VEGA12);
++MODULE_FIRMWARE(FIRMWARE_VEGA20);
+
+ static void amdgpu_vce_idle_work_handler(struct work_struct *work);
+
+@@ -143,6 +145,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
+ case CHIP_VEGA12:
+ fw_name = FIRMWARE_VEGA12;
+ break;
++ case CHIP_VEGA20:
++ fw_name = FIRMWARE_VEGA20;
++ break;
+
+ default:
+ return -EINVAL;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4396-drm-amdgpu-virtual_dce-Add-vega20-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4396-drm-amdgpu-virtual_dce-Add-vega20-support.patch
new file mode 100644
index 00000000..552d1b65
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4396-drm-amdgpu-virtual_dce-Add-vega20-support.patch
@@ -0,0 +1,28 @@
+From 3c6023c6b4542bff56da7aa0d95fd1100eca8b56 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 20 Apr 2018 13:48:23 +0800
+Subject: [PATCH 4396/5725] drm/amdgpu/virtual_dce: Add vega20 support
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+index 8724edd..28bf8cf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+@@ -476,6 +476,7 @@ static int dce_virtual_hw_init(void *handle)
+ break;
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
++ case CHIP_VEGA20:
+ break;
+ default:
+ DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4397-drm-amdgpu-gmc9-Add-vega20-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4397-drm-amdgpu-gmc9-Add-vega20-support.patch
new file mode 100644
index 00000000..53610e09
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4397-drm-amdgpu-gmc9-Add-vega20-support.patch
@@ -0,0 +1,44 @@
+From 3c321188cc395acd23a68638bc5361e3354fdf87 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 20 Apr 2018 13:56:43 +0800
+Subject: [PATCH 4397/5725] drm/amdgpu/gmc9: Add vega20 support
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 3c0a0c3..1f3249b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -752,6 +752,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
+ switch (adev->asic_type) {
+ case CHIP_VEGA10: /* all engines support GPUVM */
+ case CHIP_VEGA12: /* all engines support GPUVM */
++ case CHIP_VEGA20:
+ default:
+ adev->gmc.gart_size = 512ULL << 20;
+ break;
+@@ -857,6 +858,7 @@ static int gmc_v9_0_sw_init(void *handle)
+ break;
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
++ case CHIP_VEGA20:
+ /*
+ * To fulfill 4-level page support,
+ * vm size is 256TB (48bit), maximum size of Vega10,
+@@ -973,6 +975,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
++ case CHIP_VEGA20:
+ soc15_program_register_sequence(adev,
+ golden_settings_mmhub_1_0_0,
+ ARRAY_SIZE(golden_settings_mmhub_1_0_0));
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4398-drm-amdgpu-mmhub-Add-clockgating-support-for-vega20.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4398-drm-amdgpu-mmhub-Add-clockgating-support-for-vega20.patch
new file mode 100644
index 00000000..6a656100
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4398-drm-amdgpu-mmhub-Add-clockgating-support-for-vega20.patch
@@ -0,0 +1,29 @@
+From d958b120b0cfe9c88995a163a9a7bb779d98d0e3 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 20 Apr 2018 13:58:09 +0800
+Subject: [PATCH 4398/5725] drm/amdgpu/mmhub: Add clockgating support for
+ vega20
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+index 43f9257..3d53c44 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+@@ -734,6 +734,7 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
++ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+ mmhub_v1_0_update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4399-drm-amdgpu-sdma4-Specify-vega20-firmware.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4399-drm-amdgpu-sdma4-Specify-vega20-firmware.patch
new file mode 100644
index 00000000..4dff0627
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4399-drm-amdgpu-sdma4-Specify-vega20-firmware.patch
@@ -0,0 +1,39 @@
+From 242ab8f25bfe8264a4dd4c053aee43f3c0ff09f0 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 20 Apr 2018 14:00:02 +0800
+Subject: [PATCH 4399/5725] drm/amdgpu/sdma4: Specify vega20 firmware
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index f2a3800..3c10f54 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -42,6 +42,8 @@ MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
+ MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
+ MODULE_FIRMWARE("amdgpu/vega12_sdma.bin");
+ MODULE_FIRMWARE("amdgpu/vega12_sdma1.bin");
++MODULE_FIRMWARE("amdgpu/vega20_sdma.bin");
++MODULE_FIRMWARE("amdgpu/vega20_sdma1.bin");
+ MODULE_FIRMWARE("amdgpu/raven_sdma.bin");
+
+ #define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
+@@ -182,6 +184,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
+ case CHIP_VEGA12:
+ chip_name = "vega12";
+ break;
++ case CHIP_VEGA20:
++ chip_name = "vega20";
++ break;
+ case CHIP_RAVEN:
+ chip_name = "raven";
+ break;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4400-drm-amdgpu-sdma4-Add-vega20-golden-settings-v3.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4400-drm-amdgpu-sdma4-Add-vega20-golden-settings-v3.patch
new file mode 100644
index 00000000..0a449d4d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4400-drm-amdgpu-sdma4-Add-vega20-golden-settings-v3.patch
@@ -0,0 +1,64 @@
+From 472dd302f317f8ebd000451d3c068a919ad5c4d2 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Tue, 23 Jan 2018 11:13:02 +0800
+Subject: [PATCH 4400/5725] drm/amdgpu/sdma4: Add vega20 golden settings (v3)
+
+v2: squash in updates (Alex)
+v3: squash in more updates (Alex)
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 27 +++++++++++++++++++++++++++
+ 1 file changed, 27 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index 3c10f54..b1114e5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -109,6 +109,28 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] =
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0)
+ };
+
++static const struct soc15_reg_golden golden_settings_sdma_4_2[] =
++{
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0)
++};
++
+ static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
+ {
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00000002),
+@@ -141,6 +163,11 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
+ golden_settings_sdma_vg12,
+ ARRAY_SIZE(golden_settings_sdma_vg12));
+ break;
++ case CHIP_VEGA20:
++ soc15_program_register_sequence(adev,
++ golden_settings_sdma_4_2,
++ ARRAY_SIZE(golden_settings_sdma_4_2));
++ break;
+ case CHIP_RAVEN:
+ soc15_program_register_sequence(adev,
+ golden_settings_sdma_4_1,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4401-drm-amdgpu-sdma4-Add-clockgating-support-for-vega20.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4401-drm-amdgpu-sdma4-Add-clockgating-support-for-vega20.patch
new file mode 100644
index 00000000..b1b82f76
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4401-drm-amdgpu-sdma4-Add-clockgating-support-for-vega20.patch
@@ -0,0 +1,29 @@
+From f98e072bf9562c36ce1950231bfee27c636603a5 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Tue, 23 Jan 2018 11:16:16 +0800
+Subject: [PATCH 4401/5725] drm/amdgpu/sdma4: Add clockgating support for
+ vega20
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index b1114e5..0b076d4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -1552,6 +1552,7 @@ static int sdma_v4_0_set_clockgating_state(void *handle,
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
++ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+ sdma_v4_0_update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4402-drm-amdgpu-gfx9-Add-support-for-vega20-firmware.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4402-drm-amdgpu-gfx9-Add-support-for-vega20-firmware.patch
new file mode 100644
index 00000000..37626cb5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4402-drm-amdgpu-gfx9-Add-support-for-vega20-firmware.patch
@@ -0,0 +1,44 @@
+From 0d033574ae6bf42ef3d2f9b63ad0be1b32406893 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 20 Apr 2018 14:22:48 +0800
+Subject: [PATCH 4402/5725] drm/amdgpu/gfx9: Add support for vega20 firmware
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 0b3d2e0..47ab06a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -63,6 +63,13 @@ MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
+ MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
+ MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
+
++MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
++MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
++MODULE_FIRMWARE("amdgpu/vega20_me.bin");
++MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
++MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
++MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
++
+ MODULE_FIRMWARE("amdgpu/raven_ce.bin");
+ MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
+ MODULE_FIRMWARE("amdgpu/raven_me.bin");
+@@ -461,6 +468,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
+ case CHIP_VEGA12:
+ chip_name = "vega12";
+ break;
++ case CHIP_VEGA20:
++ chip_name = "vega20";
++ break;
+ case CHIP_RAVEN:
+ chip_name = "raven";
+ break;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4403-drm-amdgpu-gfx9-Add-vega20-golden-settings-v3.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4403-drm-amdgpu-gfx9-Add-vega20-golden-settings-v3.patch
new file mode 100644
index 00000000..4b2fb4ea
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4403-drm-amdgpu-gfx9-Add-vega20-golden-settings-v3.patch
@@ -0,0 +1,59 @@
+From 0db85cf51654aae688974fae4bce958db6d477e3 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Tue, 23 Jan 2018 14:47:26 +0800
+Subject: [PATCH 4403/5725] drm/amdgpu/gfx9: Add vega20 golden settings (v3)
+
+v2: squash in updates (Alex)
+v3: squash in more updates (Alex)
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 22 ++++++++++++++++++++++
+ 1 file changed, 22 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 47ab06a..2019170 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -108,6 +108,20 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800)
+ };
+
++static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
++{
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
++};
++
+ static const struct soc15_reg_golden golden_settings_gc_9_1[] =
+ {
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
+@@ -241,6 +255,14 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
+ golden_settings_gc_9_2_1_vg12,
+ ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
+ break;
++ case CHIP_VEGA20:
++ soc15_program_register_sequence(adev,
++ golden_settings_gc_9_0,
++ ARRAY_SIZE(golden_settings_gc_9_0));
++ soc15_program_register_sequence(adev,
++ golden_settings_gc_9_0_vg20,
++ ARRAY_SIZE(golden_settings_gc_9_0_vg20));
++ break;
+ case CHIP_RAVEN:
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_9_1,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4404-drm-amdgpu-gfx9-Add-gfx-config-for-vega20.-v3.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4404-drm-amdgpu-gfx9-Add-gfx-config-for-vega20.-v3.patch
new file mode 100644
index 00000000..5fd9bc7e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4404-drm-amdgpu-gfx9-Add-gfx-config-for-vega20.-v3.patch
@@ -0,0 +1,41 @@
+From 66a475ed75c98aa5d5cabc8bc1ed913abd206eee Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 20 Apr 2018 14:40:11 +0800
+Subject: [PATCH 4404/5725] drm/amdgpu/gfx9: Add gfx config for vega20. (v3)
+
+v2: clean up (Alex)
+v3: additional cleanups (Alex)
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 2019170..5c9e44f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1137,6 +1137,17 @@ static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
+ gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
+ DRM_INFO("fix gfx.config for vega12\n");
+ break;
++ case CHIP_VEGA20:
++ adev->gfx.config.max_hw_contexts = 8;
++ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
++ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
++ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
++ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
++ //TODO: Need to update this for vega20
++ gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
++ gb_addr_config &= ~0xf3e777ff;
++ gb_addr_config |= 0x22014042;
++ break;
+ case CHIP_RAVEN:
+ adev->gfx.config.max_hw_contexts = 8;
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4405-drm-amdgpu-gfx9-Add-support-for-vega20.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4405-drm-amdgpu-gfx9-Add-support-for-vega20.patch
new file mode 100644
index 00000000..826e8c1a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4405-drm-amdgpu-gfx9-Add-support-for-vega20.patch
@@ -0,0 +1,36 @@
+From edc70851db3532e8c02e30ff5dcbe25ac69945e9 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 20 Apr 2018 15:51:26 +0800
+Subject: [PATCH 4405/5725] drm/amdgpu/gfx9: Add support for vega20
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 5c9e44f..8ab13c3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1430,6 +1430,7 @@ static int gfx_v9_0_sw_init(void *handle)
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
++ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+ adev->gfx.mec.num_mec = 2;
+ break;
+@@ -4725,6 +4726,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
++ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+ adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
+ break;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4406-drm-amdgpu-gfx9-Add-clockgatting-support-for-vega20.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4406-drm-amdgpu-gfx9-Add-clockgatting-support-for-vega20.patch
new file mode 100644
index 00000000..c1fbca7a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4406-drm-amdgpu-gfx9-Add-clockgatting-support-for-vega20.patch
@@ -0,0 +1,29 @@
+From b58a0e72bdd835b862bf75b34a8eea776b9160e8 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Tue, 23 Jan 2018 15:03:36 +0800
+Subject: [PATCH 4406/5725] drm/amdgpu/gfx9: Add clockgatting support for
+ vega20
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 8ab13c3..5f812dd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -3724,6 +3724,7 @@ static int gfx_v9_0_set_clockgating_state(void *handle,
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
++ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+ gfx_v9_0_update_gfx_clock_gating(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4407-drm-amdgpu-soc15-Add-vega20-soc15_common_early_init-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4407-drm-amdgpu-soc15-Add-vega20-soc15_common_early_init-.patch
new file mode 100644
index 00000000..498e3c68
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4407-drm-amdgpu-soc15-Add-vega20-soc15_common_early_init-.patch
@@ -0,0 +1,35 @@
+From a34c8fe22f6bcd83ca345adcaa042df02201aa3c Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 26 Jan 2018 15:06:22 +0800
+Subject: [PATCH 4407/5725] drm/amdgpu/soc15:Add vega20 soc15_common_early_init
+ support
+
+Set external_rev_id and disable cg,pg for now.
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index f31df18..f45bea8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -658,6 +658,11 @@ static int soc15_common_early_init(void *handle)
+ adev->pg_flags = 0;
+ adev->external_rev_id = adev->rev_id + 0x14;
+ break;
++ case CHIP_VEGA20:
++ adev->cg_flags = 0;
++ adev->pg_flags = 0;
++ adev->external_rev_id = adev->rev_id + 0x28;
++ break;
+ case CHIP_RAVEN:
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4408-drm-amdgpu-soc15-Set-common-clockgating-for-vega20.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4408-drm-amdgpu-soc15-Set-common-clockgating-for-vega20.patch
new file mode 100644
index 00000000..5736f044
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4408-drm-amdgpu-soc15-Set-common-clockgating-for-vega20.patch
@@ -0,0 +1,30 @@
+From 9e4ebe01e8add6020e279b76a1a91857a086ed9d Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 26 Jan 2018 15:10:55 +0800
+Subject: [PATCH 4408/5725] drm/amdgpu/soc15: Set common clockgating for
+ vega20.
+
+Same as vega10 for now.
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index f45bea8..1fd75f5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -875,6 +875,7 @@ static int soc15_common_set_clockgating_state(void *handle,
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
++ case CHIP_VEGA20:
+ adev->nbio_funcs->update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
+ adev->nbio_funcs->update_medium_grain_light_sleep(adev,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4409-drm-amdgpu-soc15-dynamic-initialize-ip-offset-for-ve.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4409-drm-amdgpu-soc15-dynamic-initialize-ip-offset-for-ve.patch
new file mode 100644
index 00000000..f83d1293
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4409-drm-amdgpu-soc15-dynamic-initialize-ip-offset-for-ve.patch
@@ -0,0 +1,122 @@
+From aa0f9d4161502946d3813537cfabde0e26722b48 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 23 Mar 2018 14:42:28 -0500
+Subject: [PATCH 4409/5725] drm/amdgpu/soc15: dynamic initialize ip offset for
+ vega20
+
+Vega20 need a seperate vega20_reg_init.c due to ip base
+offset difference.
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/Makefile | 3 +-
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 3 ++
+ drivers/gpu/drm/amd/amdgpu/soc15.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c | 53 ++++++++++++++++++++++++++++
+ 4 files changed, 59 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
+index 4a558d6..53b246a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/Makefile
++++ b/drivers/gpu/drm/amd/amdgpu/Makefile
+@@ -41,7 +41,8 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
+ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
+
+ amdgpu-y += \
+- vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o
++ vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
++ vega20_reg_init.o
+
+ # add DF block
+ amdgpu-y += \
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 1fd75f5..c3133d1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -487,6 +487,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+ case CHIP_RAVEN:
+ vega10_reg_base_init(adev);
+ break;
++ case CHIP_VEGA20:
++ vega20_reg_base_init(adev);
++ break;
+ default:
+ return -EINVAL;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
+index f70da8a..1f714b7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
+@@ -55,5 +55,6 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
+ const u32 array_size);
+
+ int vega10_reg_base_init(struct amdgpu_device *adev);
++int vega20_reg_base_init(struct amdgpu_device *adev);
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
+new file mode 100644
+index 0000000..52778de
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
+@@ -0,0 +1,53 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++#include "amdgpu.h"
++#include "soc15.h"
++
++#include "soc15_common.h"
++#include "soc15_hw_ip.h"
++#include "vega20_ip_offset.h"
++
++int vega20_reg_base_init(struct amdgpu_device *adev)
++{
++ /* HW has more IP blocks, only initialized the blocke beend by our driver */
++ uint32_t i;
++ for (i = 0 ; i < MAX_INSTANCE ; ++i) {
++ adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
++ adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
++ adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
++ adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
++ adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
++ adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
++ adev->reg_offset[UVD_HWIP][i] = (uint32_t *)(&(UVD_BASE.instance[i]));
++ adev->reg_offset[VCE_HWIP][i] = (uint32_t *)(&(VCE_BASE.instance[i]));
++ adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
++ adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCE_BASE.instance[i]));
++ adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
++ adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(SDMA0_BASE.instance[i]));
++ adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(SDMA1_BASE.instance[i]));
++ adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
++ }
++ return 0;
++}
++
++
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4410-drm-amdgpu-soc15-Add-ip-blocks-for-vega20-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4410-drm-amdgpu-soc15-Add-ip-blocks-for-vega20-v2.patch
new file mode 100644
index 00000000..662ea56d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4410-drm-amdgpu-soc15-Add-ip-blocks-for-vega20-v2.patch
@@ -0,0 +1,32 @@
+From 57253ff3882935ee4e5b253cf969491f3502314a Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 20 Apr 2018 18:35:42 +0800
+Subject: [PATCH 4410/5725] drm/amdgpu/soc15: Add ip blocks for vega20 (v2)
+
+Same as vega10 now.
+
+v2: squash in typo fix
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index c3133d1..10337fb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -508,6 +508,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
++ case CHIP_VEGA20:
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4411-drm-amdgpu-Add-nbio-support-for-vega20-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4411-drm-amdgpu-Add-nbio-support-for-vega20-v2.patch
new file mode 100644
index 00000000..1622d2a1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4411-drm-amdgpu-Add-nbio-support-for-vega20-v2.patch
@@ -0,0 +1,86 @@
+From d5b0a4f04720bc2af575bb8e6abe080823ae2fef Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 23 Mar 2018 14:44:28 -0500
+Subject: [PATCH 4411/5725] drm/amdgpu: Add nbio support for vega20 (v2)
+
+Some register offset in nbio v7.4 are different with v7.0.
+
+v2: Use nbio7.0 for now.
+
+TODO: add a new nbio 7.4 module (Alex)
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c | 18 +++++++++++++++++-
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 2 ++
+ 2 files changed, 19 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+index df34dc7..365517c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+@@ -34,10 +34,19 @@
+ #define smnCPM_CONTROL 0x11180460
+ #define smnPCIE_CNTL2 0x11180070
+
++/* vega20 */
++#define mmRCC_DEV0_EPF0_STRAP0_VG20 0x0011
++#define mmRCC_DEV0_EPF0_STRAP0_VG20_BASE_IDX 2
++
+ static u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev)
+ {
+ u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
+
++ if (adev->asic_type == CHIP_VEGA20)
++ tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0_VG20);
++ else
++ tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
++
+ tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
+ tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
+
+@@ -75,10 +84,14 @@ static void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instan
+ SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
+
+ u32 doorbell_range = RREG32(reg);
++ u32 range = 2;
++
++ if (adev->asic_type == CHIP_VEGA20)
++ range = 8;
+
+ if (use_doorbell) {
+ doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
+- doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 2);
++ doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, range);
+ } else
+ doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
+
+@@ -133,6 +146,9 @@ static void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
+ {
+ uint32_t def, data;
+
++ if (adev->asic_type == CHIP_VEGA20)
++ return;
++
+ /* NBIF_MGCG_CTRL_LCLK */
+ def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 10337fb..4e065c6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -496,6 +496,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+
+ if (adev->flags & AMD_IS_APU)
+ adev->nbio_funcs = &nbio_v7_0_funcs;
++ else if (adev->asic_type == CHIP_VEGA20)
++ adev->nbio_funcs = &nbio_v7_0_funcs;
+ else
+ adev->nbio_funcs = &nbio_v6_1_funcs;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4412-drm-amdgpu-Add-vega20-soc-init-sequence-on-emulator-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4412-drm-amdgpu-Add-vega20-soc-init-sequence-on-emulator-.patch
new file mode 100644
index 00000000..7b1256a4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4412-drm-amdgpu-Add-vega20-soc-init-sequence-on-emulator-.patch
@@ -0,0 +1,10123 @@
+From 447b4c7773043906f623c25874c1020aa67643b7 Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Wed, 7 Feb 2018 14:54:39 -0500
+Subject: [PATCH 4412/5725] drm/amdgpu: Add vega20 soc init sequence on
+ emulator (v3)
+
+v2: cleanups (Alex)
+v3: make it vega20 only (Alex)
+
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/emu_soc.c | 10091 +++++++++++++++++++++++++++++++++
+ 1 file changed, 10091 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/emu_soc.c b/drivers/gpu/drm/amd/amdgpu/emu_soc.c
+index d72c25c..91f00fb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/emu_soc.c
++++ b/drivers/gpu/drm/amd/amdgpu/emu_soc.c
+@@ -26,8 +26,10099 @@
+ #include "soc15_common.h"
+ #include "soc15_hw_ip.h"
+
++static void wreg32_idx_byteoffset(struct amdgpu_device *adev, u32 offset, u32 value) {
++
++ static u32 maxoffset = 0;
++ static int count = 0;
++
++ WREG32(0xc, offset);
++ RREG32(0xc);
++ WREG32(0xd, value);
++ RREG32(0xd);
++
++ if (offset > maxoffset)
++ maxoffset = offset;
++
++ count++;
++ if (count % 100 == 0) {
++ DRM_INFO("%5d registers written, max offset %08x\n", count, maxoffset);
++ msleep(1);
++ }
++
++}
++
++static void vg20_lsd_soc_init_with_umc(struct amdgpu_device *adev)
++{
++
++ wreg32_idx_byteoffset(adev, 0x10131800, 0x40a40);
++ wreg32_idx_byteoffset(adev, 0x10141010, 0x10000000);
++ wreg32_idx_byteoffset(adev, 0x10134008, 0xfa042021);
++ wreg32_idx_byteoffset(adev, 0x387C, 0x3);
++ wreg32_idx_byteoffset(adev, 0x381C, 0x30000);
++ wreg32_idx_byteoffset(adev, 0x101237A4, 0x8000100f);
++ wreg32_idx_byteoffset(adev, 0x101237A8, 0x140f);
++ wreg32_idx_byteoffset(adev, 0x3780, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1403BC0, 0x100040);
++ wreg32_idx_byteoffset(adev, 0x1403BC4, 0x100140);
++ wreg32_idx_byteoffset(adev, 0x1403BC8, 0x100240);
++ wreg32_idx_byteoffset(adev, 0x1403BCC, 0x100300);
++ /* End nbio_init_sequence on GPU 0 */
++
++ wreg32_idx_byteoffset(adev, 0x11100060, 0x2830);
++ wreg32_idx_byteoffset(adev, 0x3780, 0x1);
++ /* Done with NBIO Init on GPU 0 */
++
++ /* Start with DF Init on GPU 0*/
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C668, 0x31a1b74);
++ wreg32_idx_byteoffset(adev, 0x1C66C, 0x36666332);
++ wreg32_idx_byteoffset(adev, 0x1C7F8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C72C, 0xffffffff);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x460001);
++ wreg32_idx_byteoffset(adev, 0x1CE94, 0xaf);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x470001);
++ wreg32_idx_byteoffset(adev, 0x1CE94, 0xaf);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C730, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C600, 0x1000001);
++ wreg32_idx_byteoffset(adev, 0x1C604, 0x2001);
++ wreg32_idx_byteoffset(adev, 0x1C608, 0x380007f);
++ wreg32_idx_byteoffset(adev, 0x1C60C, 0x7000007);
++ wreg32_idx_byteoffset(adev, 0x1C4A8, 0x40);
++ wreg32_idx_byteoffset(adev, 0x1C73C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C598, 0x138);
++ wreg32_idx_byteoffset(adev, 0x1C59C, 0x44041);
++ wreg32_idx_byteoffset(adev, 0x1C594, 0x40);
++ wreg32_idx_byteoffset(adev, 0x1C584, 0x138);
++ wreg32_idx_byteoffset(adev, 0x1C588, 0x44041);
++ wreg32_idx_byteoffset(adev, 0x1C58C, 0x30);
++ wreg32_idx_byteoffset(adev, 0x1C590, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C5F0, 0x90);
++ wreg32_idx_byteoffset(adev, 0x1C5F4, 0x8000);
++ wreg32_idx_byteoffset(adev, 0x1C5F8, 0x8e);
++ wreg32_idx_byteoffset(adev, 0x1C5FC, 0x8000);
++ wreg32_idx_byteoffset(adev, 0x1C4AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C5EC, 0x4090c);
++ wreg32_idx_byteoffset(adev, 0x1C490, 0xbe);
++ wreg32_idx_byteoffset(adev, 0x1C494, 0x8000);
++ wreg32_idx_byteoffset(adev, 0x1C498, 0xac);
++ wreg32_idx_byteoffset(adev, 0x1C49C, 0x8000);
++ wreg32_idx_byteoffset(adev, 0x1C488, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C48C, 0x540980c);
++ wreg32_idx_byteoffset(adev, 0x1C4B0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4B8, 0x8);
++ wreg32_idx_byteoffset(adev, 0x1C4BC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4C0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4C4, 0x6fc81);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x480001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x490001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x4a0001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x4b0001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x4c0001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x4d0001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x4e0001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x4f0001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x500001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x510001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x520001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x530001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x540001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x550001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x560001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x570001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x580001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x590001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x5a0001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x5b0001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x5c0001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x5d0001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x5e0001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x5f0001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x600001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x610001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x620001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x630001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x640001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x650001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x660001);
++ wreg32_idx_byteoffset(adev, 0x1DDA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x480001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x4000003);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0x4107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1001818);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x40a11);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x40a11);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x40a11);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x1018);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0xc12);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0x60606);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0x80808);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0x5020000);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0x6040000);
++ wreg32_idx_byteoffset(adev, 0x1DD3C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD2C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD4C, 0x5);
++ wreg32_idx_byteoffset(adev, 0x1DD34, 0x6);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7cfffff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC48, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC68, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1DC88, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC4C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC6C, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC8C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCAC, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC50, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC70, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC90, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x490001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x8000007);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0x24107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1003031);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x161c);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x1015);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0x6060603);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0x8080804);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0x2000004);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0x4000006);
++ wreg32_idx_byteoffset(adev, 0x1DD3C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD2C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD4C, 0x505);
++ wreg32_idx_byteoffset(adev, 0x1DD34, 0x606);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7fffff3);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x10800);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x10400);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC48, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC68, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC88, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA8, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC4C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC6C, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1DC8C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCAC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC50, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC70, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC90, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC54, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC74, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC94, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB4, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x4a0001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x8000007);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0x84107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1003031);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x161c);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x1015);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0x6060603);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0x8080804);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0x2000004);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0x4000006);
++ wreg32_idx_byteoffset(adev, 0x1DD3C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD2C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD4C, 0x505);
++ wreg32_idx_byteoffset(adev, 0x1DD34, 0x606);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7fffff3);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x10800);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x10400);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC48, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC68, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC88, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA8, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC4C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC6C, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1DC8C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCAC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC50, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC70, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC90, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC54, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC74, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC94, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB4, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x4b0001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x4000003);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0x74107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1001818);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x40a11);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x40a11);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x40a11);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x1018);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0xc12);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0x60606);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0x80808);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0x5020000);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0x6040000);
++ wreg32_idx_byteoffset(adev, 0x1DD3C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD2C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD4C, 0x5);
++ wreg32_idx_byteoffset(adev, 0x1DD34, 0x6);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7cfffff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC48, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC68, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1DC88, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC4C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC6C, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC8C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCAC, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC50, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC70, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC90, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x4c0001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x8000007);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0x44107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1003031);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x161c);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x1015);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0x6060603);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0x8080804);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0x2000004);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0x4000006);
++ wreg32_idx_byteoffset(adev, 0x1DD3C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD2C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD4C, 0x505);
++ wreg32_idx_byteoffset(adev, 0x1DD34, 0x606);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7fffff3);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x10800);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x10400);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC48, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC68, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC88, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA8, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC4C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC6C, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1DC8C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCAC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC50, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC70, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC90, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC54, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC74, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC94, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB4, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x4d0001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x8000007);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0xb4107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1003031);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x161c);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x1015);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0x6060603);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0x8080804);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0x2000004);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0x4000006);
++ wreg32_idx_byteoffset(adev, 0x1DD3C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD2C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD4C, 0x505);
++ wreg32_idx_byteoffset(adev, 0x1DD34, 0x606);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7fffff3);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x10800);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x10400);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC48, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC68, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC88, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA8, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC4C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC6C, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1DC8C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCAC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC50, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC70, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC90, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC54, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC74, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC94, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB4, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x4e0001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x8000007);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0x44107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1003031);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x161c);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x1015);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0x6060603);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0x8080804);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0x2000004);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0x4000006);
++ wreg32_idx_byteoffset(adev, 0x1DD3C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD2C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD4C, 0x505);
++ wreg32_idx_byteoffset(adev, 0x1DD34, 0x606);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7fffff3);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x10800);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x10400);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC48, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC68, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC88, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA8, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC4C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC6C, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1DC8C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCAC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC50, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC70, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC90, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC54, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC74, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC94, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB4, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x4f0001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x8000007);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0x84107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1003031);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x161c);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x1015);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0x6060603);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0x8080804);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0x2000004);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0x4000006);
++ wreg32_idx_byteoffset(adev, 0x1DD3C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD2C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD4C, 0x505);
++ wreg32_idx_byteoffset(adev, 0x1DD34, 0x606);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7fffff3);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x10800);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x10400);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC48, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC68, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC88, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA8, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC4C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC6C, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1DC8C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCAC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC50, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC70, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC90, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC54, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC74, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC94, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB4, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x500001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x4000003);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0x54107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1001818);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x40a11);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x40a11);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x40a11);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x1018);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0xc12);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0x60606);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0x80808);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0x5020000);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0x6040000);
++ wreg32_idx_byteoffset(adev, 0x1DD3C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD2C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD4C, 0x5);
++ wreg32_idx_byteoffset(adev, 0x1DD34, 0x6);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7cfffff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC48, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC68, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1DC88, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC4C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC6C, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC8C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCAC, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC50, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC70, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC90, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x510001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x8000007);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0x34107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1003031);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x161c);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x1015);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0x6060603);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0x8080804);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0x2000004);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0x4000006);
++ wreg32_idx_byteoffset(adev, 0x1DD3C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD2C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD4C, 0x505);
++ wreg32_idx_byteoffset(adev, 0x1DD34, 0x606);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7fffff3);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x10800);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x10400);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC48, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC68, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC88, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA8, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC4C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC6C, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1DC8C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCAC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC50, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC70, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC90, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC54, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC74, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC94, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB4, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x520001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x8000007);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0x44107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1003031);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x161c);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x1015);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0x6060603);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0x8080804);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0x2000004);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0x4000006);
++ wreg32_idx_byteoffset(adev, 0x1DD3C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD2C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD4C, 0x505);
++ wreg32_idx_byteoffset(adev, 0x1DD34, 0x606);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7fffff3);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x10800);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x10400);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC48, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC68, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC88, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA8, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC4C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC6C, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1DC8C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCAC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC50, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC70, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC90, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC54, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC74, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC94, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB4, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x530001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x4000003);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0x64107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1001818);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x40a11);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x40a11);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x40a11);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x1018);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0xc12);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0x60606);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0x80808);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0x5020000);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0x6040000);
++ wreg32_idx_byteoffset(adev, 0x1DD3C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD2C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD4C, 0x5);
++ wreg32_idx_byteoffset(adev, 0x1DD34, 0x6);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7cfffff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC48, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC68, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1DC88, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC4C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC6C, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC8C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCAC, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC50, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC70, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC90, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x540001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x8000007);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0x84107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1003031);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x161c);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x1015);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0x6060603);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0x8080804);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0x2000004);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0x4000006);
++ wreg32_idx_byteoffset(adev, 0x1DD3C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD2C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD4C, 0x505);
++ wreg32_idx_byteoffset(adev, 0x1DD34, 0x606);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7fffff3);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x10800);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x10400);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC48, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC68, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC88, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA8, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC4C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC6C, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1DC8C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCAC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC50, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC70, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC90, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC54, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC74, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC94, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB4, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x550001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x8000007);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0x4107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1003031);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x161c);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x1015);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0x6060603);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0x8080804);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0x2000004);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0x4000006);
++ wreg32_idx_byteoffset(adev, 0x1DD3C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD2C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD4C, 0x505);
++ wreg32_idx_byteoffset(adev, 0x1DD34, 0x606);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7fffff3);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x10800);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x10400);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC48, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC68, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC88, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA8, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC4C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC6C, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1DC8C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCAC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC50, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC70, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC90, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC54, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC74, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC94, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB4, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x560001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x8000007);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0x84107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1003031);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x161c);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x1015);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0x6060603);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0x8080804);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0x2000004);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0x4000006);
++ wreg32_idx_byteoffset(adev, 0x1DD3C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD2C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD4C, 0x505);
++ wreg32_idx_byteoffset(adev, 0x1DD34, 0x606);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7fffff3);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x10800);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x10400);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC48, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC68, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC88, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA8, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC4C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC6C, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1DC8C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCAC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC50, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC70, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC90, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC54, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC74, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC94, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB4, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x570001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x8000007);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0xf4107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1003031);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x142209);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x161c);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x1015);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0x6060603);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0x8080804);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0x2000004);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0x4000006);
++ wreg32_idx_byteoffset(adev, 0x1DD3C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD2C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD4C, 0x505);
++ wreg32_idx_byteoffset(adev, 0x1DD34, 0x606);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7fffff3);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x10800);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x10400);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC48, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DC68, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC88, 0x8400);
++ wreg32_idx_byteoffset(adev, 0x1DCA8, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1DC4C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC6C, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1DC8C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCAC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC50, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC70, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC90, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC54, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC74, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC94, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCB4, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x580001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x2000001);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0xd4107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1030001);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x201);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x201);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x201);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x1c0f);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x1e11);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0x1001);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0xe01);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0x618);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0x616);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7ffe7ff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x42000);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x400000);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x48400);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x400000);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x3);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x590001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x8000003);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0x94107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x10a0003);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0xc0201);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0xc0201);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0xc0201);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x1c1f);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x1621);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0x18000603);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0x16000603);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0x9000904);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0xa000c06);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7fffff3);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x10800);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x10400);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x21000);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x200000);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x20400);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x200000);
++ wreg32_idx_byteoffset(adev, 0x1DC48, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC68, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC88, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC4C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC6C, 0x4);
++ wreg32_idx_byteoffset(adev, 0x1DC8C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCAC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x5a0001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x2000001);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0xf4107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1060001);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x40201);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x40201);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x40201);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x1e1f);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x1618);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0xc0903);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0x100c03);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0xc0604);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0x100806);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7ffe7ff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x10800);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x10400);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC48, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC68, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC88, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x5b0001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0xd4107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1030000);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x201);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x201);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x201);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x2020);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x1212);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0xc06);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0x160a);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0xc06);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0x160a);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x5c0001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x2000001);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0x64107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1030001);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x201);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x201);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x201);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x1c0f);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x1e11);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0x1001);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0xe01);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0x618);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0x616);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7ffe7ff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x42000);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x400000);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x48400);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x400000);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x3);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x5d0001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x4000003);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0x74107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1060003);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x40201);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x40201);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x40201);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x1c1f);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x1621);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0x180603);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0x160603);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0x90904);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0xa0c06);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7cfffff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x10800);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x10400);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x21000);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x200000);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x20400);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x200000);
++ wreg32_idx_byteoffset(adev, 0x1DC48, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC68, 0x4);
++ wreg32_idx_byteoffset(adev, 0x1DC88, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x5e0001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x2000001);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0x14107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1060001);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x40201);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x40201);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x40201);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x1e1f);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x1618);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0xc0903);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0x100c03);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0xc0604);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0x100806);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7ffe7ff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x10800);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x10400);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x110000);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC48, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC68, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC88, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x5f0001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x2000000);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0x24107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1030000);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x201);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x201);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x201);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x2020);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x1212);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0xc06);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0x160a);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0xc06);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0x160a);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x600001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0xa4107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1050000);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x40201);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x40201);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x40201);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x2020);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x1a1a);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0xa0808);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0xc0a0a);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0xa0808);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0xc0a0a);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC48, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC68, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC88, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x610001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x1000000);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0xd4107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1050000);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x40201);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x40201);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x40201);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x2020);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x1a1a);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0xa0808);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0xc0a0a);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0xa0808);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0xc0a0a);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC48, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC68, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC88, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x620001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x2000001);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x4);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0xc4107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1030001);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x40201);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x40201);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x40201);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x1819);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x1915);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0xe0601);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0xe0a01);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0x910);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0xa0e);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x31800);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x300000);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x30400);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x300000);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC48, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC68, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC88, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x630001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x2000001);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0xb4107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1030001);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x40201);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x40201);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x40201);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x1e14);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x2218);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0xc0c);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0xa0a);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0xa0c0c);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0xa0a0a);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x200000);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x200000);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC48, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC68, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC88, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x640001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x2000001);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x5);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0x54107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1030001);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x40201);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x40201);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x40201);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x2022);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x2115);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0xe0106);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0xe0a0a);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0x1809);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0x160a);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x200000);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x200000);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x42000);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x4);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x40400);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC48, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC68, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC88, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x650001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x4000003);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0x84107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1030003);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0xc0201);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0xc0201);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0xc0201);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x1a23);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x1819);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0x118);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0x8040116);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0x1008);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0x20e0a);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7fffff3);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x400000);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x400000);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x31800);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x300000);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x30400);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x300000);
++ wreg32_idx_byteoffset(adev, 0x1DC48, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC68, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC88, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC4C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC6C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC8C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCAC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x660001);
++ wreg32_idx_byteoffset(adev, 0x1DDF0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DDF8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1DD14, 0x24107800);
++ wreg32_idx_byteoffset(adev, 0x1DD18, 0x1030000);
++ wreg32_idx_byteoffset(adev, 0x1DD1C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1DD08, 0x201);
++ wreg32_idx_byteoffset(adev, 0x1DD0C, 0x201);
++ wreg32_idx_byteoffset(adev, 0x1DD04, 0x201);
++ wreg32_idx_byteoffset(adev, 0x1DD20, 0x2020);
++ wreg32_idx_byteoffset(adev, 0x1DD24, 0x1818);
++ wreg32_idx_byteoffset(adev, 0x1DD38, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DD40, 0xc0c);
++ wreg32_idx_byteoffset(adev, 0x1DD28, 0x1010);
++ wreg32_idx_byteoffset(adev, 0x1DD48, 0xc0c);
++ wreg32_idx_byteoffset(adev, 0x1DD30, 0x1010);
++ wreg32_idx_byteoffset(adev, 0x1C510, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C51C, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C514, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1C520, 0x7ffffff);
++ wreg32_idx_byteoffset(adev, 0x1DC40, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC60, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC80, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC44, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC64, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DC84, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1DCA4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C840, 0x800000);
++ wreg32_idx_byteoffset(adev, 0x1C844, 0xec404206);
++ wreg32_idx_byteoffset(adev, 0x1C848, 0x220201f6);
++ wreg32_idx_byteoffset(adev, 0x1C84C, 0xd);
++ wreg32_idx_byteoffset(adev, 0x1C850, 0x83e4e08);
++ wreg32_idx_byteoffset(adev, 0x1C894, 0x1450001);
++ wreg32_idx_byteoffset(adev, 0x1D9F8, 0xffffffff);
++ wreg32_idx_byteoffset(adev, 0x1D9FC, 0xffffffff);
++ wreg32_idx_byteoffset(adev, 0x1D0F0, 0xfe861ee0);
++ wreg32_idx_byteoffset(adev, 0x1D0FC, 0x2f);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x1001011);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x10001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x20001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x30001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x1001011);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x40001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x50001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x1001011);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x60001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x1001011);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x70001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x80001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x90001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x1001011);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0xa0001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x1001011);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0xb0001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0xc0001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x1001011);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0xd0001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0xe0001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0xf0001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x1001011);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x100001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x1001011);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x110001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x120001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x130001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x1001011);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x140001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x150001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x1001011);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x160001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x1001011);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x170001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x180001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x190001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x1001011);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x1a0001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x1001011);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x1b0001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x1c0001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x1001011);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x1d0001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x1e0001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x1f0001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C4A0, 0x42424202);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x1001011);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x20);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9C8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9DC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9CC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9EC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C9F0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x200001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x10001);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x210001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x220001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x230001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x10001);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x240001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x250001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x10001);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x260001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x10001);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x270001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x280001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x290001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x10001);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x2a0001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x10001);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x2b0001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x2c0001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x10001);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x2d0001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x2e0001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x2f0001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x10001);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x300001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x10001);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x310001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x320001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x330001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x10001);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x340001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x350001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x10001);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x360001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x10001);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x370001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x380001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x390001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x10001);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x3a0001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x10001);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x3b0001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x3c0001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x10001);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x3d0001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x3e0001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x3f0001);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x86);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x21818);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff0173);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x10001);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x80000);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x400001);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x6);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x2020);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff01ff);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x410001);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x6);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x2020);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff01ff);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x420001);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x43);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1CE00, 0xa2000);
++ wreg32_idx_byteoffset(adev, 0x1CE0C, 0x16);
++ wreg32_idx_byteoffset(adev, 0x1C460, 0x450045);
++ wreg32_idx_byteoffset(adev, 0x1C464, 0x430043);
++ wreg32_idx_byteoffset(adev, 0x1C4E0, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E4, 0x10);
++ wreg32_idx_byteoffset(adev, 0x1C4E8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C4D0, 0x18);
++ wreg32_idx_byteoffset(adev, 0x1C4A4, 0x1ff01ff);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D0D0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1D0D4, 0xc);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x460001);
++ wreg32_idx_byteoffset(adev, 0x1CE98, 0x3);
++ wreg32_idx_byteoffset(adev, 0x1CE80, 0x4f);
++ wreg32_idx_byteoffset(adev, 0x1CE84, 0x9f);
++ wreg32_idx_byteoffset(adev, 0x1CE9C, 0x25243888);
++ wreg32_idx_byteoffset(adev, 0x1CEA4, 0x11282894);
++ wreg32_idx_byteoffset(adev, 0x1CEA8, 0xe0000f);
++ wreg32_idx_byteoffset(adev, 0x1CE90, 0x40640010);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C618, 0x2000);
++ wreg32_idx_byteoffset(adev, 0x1C6E0, 0x7e6a14e9);
++ wreg32_idx_byteoffset(adev, 0x1C6E4, 0x94a0c408);
++ wreg32_idx_byteoffset(adev, 0x1C6E8, 0xc1a9625a);
++ wreg32_idx_byteoffset(adev, 0x1C6EC, 0xe43ee220);
++ wreg32_idx_byteoffset(adev, 0x1C6F0, 0x60316b41);
++ wreg32_idx_byteoffset(adev, 0x1C6F4, 0xd5b85760);
++ wreg32_idx_byteoffset(adev, 0x1C6F8, 0x2b589527);
++ wreg32_idx_byteoffset(adev, 0x1C6FC, 0xc7a447aa);
++ wreg32_idx_byteoffset(adev, 0x1C6D0, 0x83103ba1);
++ wreg32_idx_byteoffset(adev, 0x1C6D4, 0x668446d5);
++ wreg32_idx_byteoffset(adev, 0x1C6D8, 0xf6592b5f);
++ wreg32_idx_byteoffset(adev, 0x1C6DC, 0xfce059a);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x5614351);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1CE88, 0x37e20000);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x470001);
++ wreg32_idx_byteoffset(adev, 0x1CE98, 0x3);
++ wreg32_idx_byteoffset(adev, 0x1CE80, 0x4f);
++ wreg32_idx_byteoffset(adev, 0x1CE84, 0x9f);
++ wreg32_idx_byteoffset(adev, 0x1CE9C, 0x25243888);
++ wreg32_idx_byteoffset(adev, 0x1CEA4, 0x11282894);
++ wreg32_idx_byteoffset(adev, 0x1CEA8, 0x60000f);
++ wreg32_idx_byteoffset(adev, 0x1CE90, 0x9d000010);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C618, 0x2000);
++ wreg32_idx_byteoffset(adev, 0x1C6E0, 0x62c06669);
++ wreg32_idx_byteoffset(adev, 0x1C6E4, 0xa1b42b58);
++ wreg32_idx_byteoffset(adev, 0x1C6E8, 0x75f0e8fd);
++ wreg32_idx_byteoffset(adev, 0x1C6EC, 0xa73b2946);
++ wreg32_idx_byteoffset(adev, 0x1C6F0, 0xa30106a);
++ wreg32_idx_byteoffset(adev, 0x1C6F4, 0x44235d43);
++ wreg32_idx_byteoffset(adev, 0x1C6F8, 0xe83784c5);
++ wreg32_idx_byteoffset(adev, 0x1C6FC, 0xaee1454);
++ wreg32_idx_byteoffset(adev, 0x1C6D0, 0x778df19d);
++ wreg32_idx_byteoffset(adev, 0x1C6D4, 0x6e622afe);
++ wreg32_idx_byteoffset(adev, 0x1C6D8, 0xb2daede3);
++ wreg32_idx_byteoffset(adev, 0x1C6DC, 0x445cc41e);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x6224542);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1CE88, 0x2eb20000);
++ wreg32_idx_byteoffset(adev, 0x1C08C, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x430001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C574, 0x410000e);
++ wreg32_idx_byteoffset(adev, 0x1C578, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C57C, 0x410000e);
++ wreg32_idx_byteoffset(adev, 0x1C580, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C560, 0x1010);
++ wreg32_idx_byteoffset(adev, 0x1C564, 0x410007f);
++ wreg32_idx_byteoffset(adev, 0x1C568, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C56C, 0x410007f);
++ wreg32_idx_byteoffset(adev, 0x1C570, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1CBF8, 0x4a2a400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x440001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C574, 0x4);
++ wreg32_idx_byteoffset(adev, 0x1C578, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C57C, 0x4);
++ wreg32_idx_byteoffset(adev, 0x1C580, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C560, 0x808);
++ wreg32_idx_byteoffset(adev, 0x1C564, 0x410417f);
++ wreg32_idx_byteoffset(adev, 0x1C568, 0x1041041);
++ wreg32_idx_byteoffset(adev, 0x1C56C, 0x410417f);
++ wreg32_idx_byteoffset(adev, 0x1C570, 0x1041041);
++ wreg32_idx_byteoffset(adev, 0x1CBF8, 0x4a2a400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C3F8, 0x6650c);
++ wreg32_idx_byteoffset(adev, 0x1C350, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C354, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C358, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C35C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C360, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C364, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C124, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C120, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C12C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C128, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C134, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C130, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C13C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C138, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C144, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C140, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C14C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C148, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x10001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x20001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x30001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x40001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x50001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x60001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x70001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x80001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x90001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0xa0001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0xb0001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0xc0001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0xd0001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0xe0001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0xf0001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x100001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x110001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x120001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x130001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x140001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x150001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x160001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x170001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x180001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x190001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x1a0001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x1b0001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x1c0001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x1d0001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x1e0001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x1f0001);
++ wreg32_idx_byteoffset(adev, 0x1C114, 0x7f000);
++ wreg32_idx_byteoffset(adev, 0x1C110, 0x21);
++ wreg32_idx_byteoffset(adev, 0x1C11C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C118, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C1B4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C304, 0x7ff00);
++ wreg32_idx_byteoffset(adev, 0x1C308, 0x7ff3c);
++ wreg32_idx_byteoffset(adev, 0x1C30C, 0x3ff801);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C314, 0x7ff40);
++ wreg32_idx_byteoffset(adev, 0x1C318, 0x7ffbc);
++ wreg32_idx_byteoffset(adev, 0x1C31C, 0x3ffa01);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1C324, 0x7ffc0);
++ wreg32_idx_byteoffset(adev, 0x1C328, 0x7ffcc);
++ wreg32_idx_byteoffset(adev, 0x1C32C, 0x3ffe01);
++ wreg32_idx_byteoffset(adev, 0x1C320, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1C334, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C338, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C33C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C330, 0x400);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x400001);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x410001);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0xc0401);
++ wreg32_idx_byteoffset(adev, 0x1C310, 0x80401);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x450001);
++ wreg32_idx_byteoffset(adev, 0x1C300, 0x40401);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x450001);
++ wreg32_idx_byteoffset(adev, 0x1C3F0, 0x1);
++ wreg32_idx_byteoffset(adev, 0x1C760, 0x400000);
++ wreg32_idx_byteoffset(adev, 0x1C764, 0x400000);
++ wreg32_idx_byteoffset(adev, 0x1C768, 0x400000);
++ wreg32_idx_byteoffset(adev, 0x1C76C, 0x400000);
++ wreg32_idx_byteoffset(adev, 0x1C770, 0x400000);
++ wreg32_idx_byteoffset(adev, 0x1C774, 0x400000);
++ wreg32_idx_byteoffset(adev, 0x1C778, 0x400000);
++ wreg32_idx_byteoffset(adev, 0x1C77C, 0x400000);
++ wreg32_idx_byteoffset(adev, 0x1C78C, 0x200);
++ wreg32_idx_byteoffset(adev, 0x1C740, 0x22312223);
++ wreg32_idx_byteoffset(adev, 0x1C744, 0x22312223);
++ wreg32_idx_byteoffset(adev, 0x1C748, 0x22312223);
++ wreg32_idx_byteoffset(adev, 0x1C74C, 0x22312223);
++ wreg32_idx_byteoffset(adev, 0x1C788, 0xff);
++ wreg32_idx_byteoffset(adev, 0x1C738, 0x26d400);
++ wreg32_idx_byteoffset(adev, 0x1D490, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1D504, 0x4000000);
++ wreg32_idx_byteoffset(adev, 0x1D4FC, 0xe0000280);
++ wreg32_idx_byteoffset(adev, 0x1D4F8, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D508, 0xc0700420);
++ wreg32_idx_byteoffset(adev, 0x1C468, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C678, 0x1010011);
++ wreg32_idx_byteoffset(adev, 0x1C67C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C5AC, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D7C4, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C780, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C784, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C46C, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C724, 0xffffffff);
++ wreg32_idx_byteoffset(adev, 0x1C728, 0xe);
++ wreg32_idx_byteoffset(adev, 0x1C504, 0x2);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1D040, 0x0);
++ wreg32_idx_byteoffset(adev, 0x1C504, 0x3);
++ wreg32_idx_byteoffset(adev, 0x11EE0864, 0x0);
++ wreg32_idx_byteoffset(adev, 0x11EE1864, 0x0);
++ wreg32_idx_byteoffset(adev, 0x11EE2864, 0x0);
++ wreg32_idx_byteoffset(adev, 0x11EE3864, 0x0);
++ wreg32_idx_byteoffset(adev, 0x11EF0034, 0x1);
++ wreg32_idx_byteoffset(adev, 0x111402D8, 0x340079c);
++
++
++ wreg32_idx_byteoffset(adev, 0x681B8, 0x3);
++ wreg32_idx_byteoffset(adev, 0x683B8, 0x3);
++
++ /*Start with InitMmhub on GPU 0 */
++ wreg32_idx_byteoffset(adev, 0x68E18, 0x0007a640);
++ wreg32_idx_byteoffset(adev, 0x68E14, 0x00000009);
++ wreg32_idx_byteoffset(adev, 0x68E18, 0x0002a64a);
++ wreg32_idx_byteoffset(adev, 0x68E14, 0x0000000d);
++ wreg32_idx_byteoffset(adev, 0x68E18, 0x0002a680);
++ wreg32_idx_byteoffset(adev, 0x68E14, 0x00000011);
++ wreg32_idx_byteoffset(adev, 0x68E18, 0x0006a684);
++ wreg32_idx_byteoffset(adev, 0x68E14, 0x00000019);
++ wreg32_idx_byteoffset(adev, 0x68E18, 0x000ea68e);
++ wreg32_idx_byteoffset(adev, 0x68E14, 0x00000029);
++ wreg32_idx_byteoffset(adev, 0x68E18, 0x0000a69e);
++ wreg32_idx_byteoffset(adev, 0x68E14, 0x0000002b);
++ wreg32_idx_byteoffset(adev, 0x68E18, 0x0034a6c0);
++ wreg32_idx_byteoffset(adev, 0x68E14, 0x00000061);
++ wreg32_idx_byteoffset(adev, 0x68E18, 0x0083a707);
++ wreg32_idx_byteoffset(adev, 0x68E14, 0x000000e6);
++ wreg32_idx_byteoffset(adev, 0x68E18, 0x0008a7a4);
++ wreg32_idx_byteoffset(adev, 0x68E14, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x68E18, 0x0001a7b8);
++ wreg32_idx_byteoffset(adev, 0x68E14, 0x000000f3);
++ wreg32_idx_byteoffset(adev, 0x68E18, 0x0000a7dc);
++ wreg32_idx_byteoffset(adev, 0x68E14, 0x000000f5);
++ wreg32_idx_byteoffset(adev, 0x68E18, 0x0012a7f5);
++ wreg32_idx_byteoffset(adev, 0x68E14, 0x00000109);
++ wreg32_idx_byteoffset(adev, 0x68E18, 0x0012a810);
++ wreg32_idx_byteoffset(adev, 0x68E14, 0x0000011d);
++ wreg32_idx_byteoffset(adev, 0x68E18, 0x0007a82c);
++ wreg32_idx_byteoffset(adev, 0x68E1C, 0x00494000);
++ wreg32_idx_byteoffset(adev, 0x68E20, 0x00019000);
++ wreg32_idx_byteoffset(adev, 0x68E44, 0x0025a000);
++ wreg32_idx_byteoffset(adev, 0x68E40, 0x00000027);
++ wreg32_idx_byteoffset(adev, 0x68E44, 0x002ca02c);
++ wreg32_idx_byteoffset(adev, 0x68E40, 0x00000055);
++ wreg32_idx_byteoffset(adev, 0x68E44, 0x0002a061);
++ wreg32_idx_byteoffset(adev, 0x68E40, 0x00000059);
++ wreg32_idx_byteoffset(adev, 0x68E44, 0x0017a068);
++ wreg32_idx_byteoffset(adev, 0x68E40, 0x00000072);
++ wreg32_idx_byteoffset(adev, 0x68E44, 0x0018a100);
++ wreg32_idx_byteoffset(adev, 0x68E40, 0x0000008c);
++ wreg32_idx_byteoffset(adev, 0x68E44, 0x0004a132);
++ wreg32_idx_byteoffset(adev, 0x68E40, 0x00000092);
++ wreg32_idx_byteoffset(adev, 0x68E44, 0x000ca141);
++ wreg32_idx_byteoffset(adev, 0x68E40, 0x000000a0);
++ wreg32_idx_byteoffset(adev, 0x68E44, 0x002fa158);
++ wreg32_idx_byteoffset(adev, 0x68E40, 0x000000d1);
++ wreg32_idx_byteoffset(adev, 0x68E44, 0x0017a1d0);
++ wreg32_idx_byteoffset(adev, 0x68E40, 0x000000ea);
++ wreg32_idx_byteoffset(adev, 0x68E44, 0x0001a1e9);
++ wreg32_idx_byteoffset(adev, 0x68E40, 0x000000ed);
++ wreg32_idx_byteoffset(adev, 0x68E44, 0x0013a1ec);
++ wreg32_idx_byteoffset(adev, 0x68E40, 0x00000102);
++ wreg32_idx_byteoffset(adev, 0x68E44, 0x0007a201);
++ wreg32_idx_byteoffset(adev, 0x68E40, 0x0000010b);
++ wreg32_idx_byteoffset(adev, 0x68E44, 0x0003a20a);
++ wreg32_idx_byteoffset(adev, 0x68E40, 0x00000110);
++ wreg32_idx_byteoffset(adev, 0x68E44, 0x0007a580);
++ wreg32_idx_byteoffset(adev, 0x68E40, 0x00000119);
++ wreg32_idx_byteoffset(adev, 0x68E44, 0x0000a590);
++ wreg32_idx_byteoffset(adev, 0x68E40, 0x0000011b);
++ wreg32_idx_byteoffset(adev, 0x68E44, 0x0004a594);
++ wreg32_idx_byteoffset(adev, 0x68E40, 0x00000121);
++ wreg32_idx_byteoffset(adev, 0x68E44, 0x0001a59c);
++ wreg32_idx_byteoffset(adev, 0x68E40, 0x00000124);
++ wreg32_idx_byteoffset(adev, 0x68E44, 0x0007a82c);
++ wreg32_idx_byteoffset(adev, 0x68E48, 0x00258000);
++ wreg32_idx_byteoffset(adev, 0x68E4C, 0x00004800);
++ wreg32_idx_byteoffset(adev, 0x68E70, 0x0025a080);
++ wreg32_idx_byteoffset(adev, 0x68E6C, 0x00000027);
++ wreg32_idx_byteoffset(adev, 0x68E70, 0x002ca0ac);
++ wreg32_idx_byteoffset(adev, 0x68E6C, 0x00000055);
++ wreg32_idx_byteoffset(adev, 0x68E70, 0x0002a0e1);
++ wreg32_idx_byteoffset(adev, 0x68E6C, 0x00000059);
++ wreg32_idx_byteoffset(adev, 0x68E70, 0x0017a0e8);
++ wreg32_idx_byteoffset(adev, 0x68E6C, 0x00000072);
++ wreg32_idx_byteoffset(adev, 0x68E70, 0x0018a240);
++ wreg32_idx_byteoffset(adev, 0x68E6C, 0x0000008c);
++ wreg32_idx_byteoffset(adev, 0x68E70, 0x0004a272);
++ wreg32_idx_byteoffset(adev, 0x68E6C, 0x00000092);
++ wreg32_idx_byteoffset(adev, 0x68E70, 0x000ca281);
++ wreg32_idx_byteoffset(adev, 0x68E6C, 0x000000a0);
++ wreg32_idx_byteoffset(adev, 0x68E70, 0x002fa298);
++ wreg32_idx_byteoffset(adev, 0x68E6C, 0x000000d1);
++ wreg32_idx_byteoffset(adev, 0x68E70, 0x0017a310);
++ wreg32_idx_byteoffset(adev, 0x68E6C, 0x000000ea);
++ wreg32_idx_byteoffset(adev, 0x68E70, 0x0001a329);
++ wreg32_idx_byteoffset(adev, 0x68E6C, 0x000000ed);
++ wreg32_idx_byteoffset(adev, 0x68E70, 0x0013a32c);
++ wreg32_idx_byteoffset(adev, 0x68E6C, 0x00000102);
++ wreg32_idx_byteoffset(adev, 0x68E70, 0x0007a341);
++ wreg32_idx_byteoffset(adev, 0x68E6C, 0x0000010b);
++ wreg32_idx_byteoffset(adev, 0x68E70, 0x0003a34a);
++ wreg32_idx_byteoffset(adev, 0x68E6C, 0x00000110);
++ wreg32_idx_byteoffset(adev, 0x68E70, 0x0007a580);
++ wreg32_idx_byteoffset(adev, 0x68E6C, 0x00000119);
++ wreg32_idx_byteoffset(adev, 0x68E70, 0x0000a590);
++ wreg32_idx_byteoffset(adev, 0x68E6C, 0x0000011b);
++ wreg32_idx_byteoffset(adev, 0x68E70, 0x0004a594);
++ wreg32_idx_byteoffset(adev, 0x68E6C, 0x00000121);
++ wreg32_idx_byteoffset(adev, 0x68E70, 0x0001a59c);
++ wreg32_idx_byteoffset(adev, 0x68E6C, 0x00000124);
++ wreg32_idx_byteoffset(adev, 0x68E70, 0x0007a82c);
++ wreg32_idx_byteoffset(adev, 0x68E74, 0x00258000);
++ wreg32_idx_byteoffset(adev, 0x68E78, 0x00004800);
++ wreg32_idx_byteoffset(adev, 0x68000, 0xfe5fe0f8);
++ wreg32_idx_byteoffset(adev, 0x68400, 0x55555554);
++ wreg32_idx_byteoffset(adev, 0x68038, 0xfe5fe0fa);
++ wreg32_idx_byteoffset(adev, 0x68400, 0x65555554);
++ wreg32_idx_byteoffset(adev, 0x68008, 0xfe5fe0f8);
++ wreg32_idx_byteoffset(adev, 0x68400, 0x65555544);
++ wreg32_idx_byteoffset(adev, 0x6800C, 0xfe5fe0f8);
++ wreg32_idx_byteoffset(adev, 0x68400, 0x65555504);
++ wreg32_idx_byteoffset(adev, 0x68010, 0xfe5fe0f8);
++ wreg32_idx_byteoffset(adev, 0x68400, 0x65555404);
++ wreg32_idx_byteoffset(adev, 0x68014, 0xfe5fe0f8);
++ wreg32_idx_byteoffset(adev, 0x68400, 0x65555004);
++ wreg32_idx_byteoffset(adev, 0x68200, 0xfe5fe0f8);
++ wreg32_idx_byteoffset(adev, 0x68900, 0x55555554);
++ wreg32_idx_byteoffset(adev, 0x68204, 0xfe5fe0f8);
++ wreg32_idx_byteoffset(adev, 0x68900, 0x55555550);
++ wreg32_idx_byteoffset(adev, 0x68230, 0xfe5fe0f8);
++ wreg32_idx_byteoffset(adev, 0x68900, 0x54555550);
++ wreg32_idx_byteoffset(adev, 0x68290, 0x7800e408);
++ wreg32_idx_byteoffset(adev, 0x68230, 0xfe5fe0f0);
++ wreg32_idx_byteoffset(adev, 0x680B0, 0xfe5fe0f8);
++ wreg32_idx_byteoffset(adev, 0x68408, 0x55555554);
++ wreg32_idx_byteoffset(adev, 0x680E8, 0xfe5fe0fa);
++ wreg32_idx_byteoffset(adev, 0x68408, 0x65555554);
++ wreg32_idx_byteoffset(adev, 0x680B8, 0xfe5fe0f8);
++ wreg32_idx_byteoffset(adev, 0x68408, 0x65555544);
++ wreg32_idx_byteoffset(adev, 0x680BC, 0xfe5fe0f8);
++ wreg32_idx_byteoffset(adev, 0x68408, 0x65555504);
++ wreg32_idx_byteoffset(adev, 0x680C0, 0xfe5fe0f8);
++ wreg32_idx_byteoffset(adev, 0x68408, 0x65555404);
++ wreg32_idx_byteoffset(adev, 0x680C4, 0xfe5fe0f8);
++ wreg32_idx_byteoffset(adev, 0x68408, 0x65555004);
++ wreg32_idx_byteoffset(adev, 0x680C8, 0xfe5fe0f8);
++ wreg32_idx_byteoffset(adev, 0x68408, 0x65554004);
++ wreg32_idx_byteoffset(adev, 0x682B0, 0xfe5fe0f8);
++ wreg32_idx_byteoffset(adev, 0x68908, 0x55555554);
++ wreg32_idx_byteoffset(adev, 0x682B4, 0xfe5fe0f8);
++ wreg32_idx_byteoffset(adev, 0x68908, 0x55555550);
++ wreg32_idx_byteoffset(adev, 0x68CC8, 0x000107bf);
++ wreg32_idx_byteoffset(adev, 0x68CC8, 0x000707bf);
++ wreg32_idx_byteoffset(adev, 0x681B8, 0x00000003);
++ wreg32_idx_byteoffset(adev, 0x683B8, 0x00000003);
++ wreg32_idx_byteoffset(adev, 0x68090, 0x7be0e408);
++ wreg32_idx_byteoffset(adev, 0x687C8, 0x000107bf);
++ wreg32_idx_byteoffset(adev, 0x687C8, 0x000707bf);
++ wreg32_idx_byteoffset(adev, 0x68004, 0xfe5fe069);
++ wreg32_idx_byteoffset(adev, 0x68004, 0xfe5fe669);
++ wreg32_idx_byteoffset(adev, 0x68008, 0xfe5fe0a8);
++ wreg32_idx_byteoffset(adev, 0x68008, 0xfe5feaa8);
++ wreg32_idx_byteoffset(adev, 0x6800C, 0xfe5fe0a8);
++ wreg32_idx_byteoffset(adev, 0x6800C, 0xfe5feaa8);
++ wreg32_idx_byteoffset(adev, 0x68010, 0xfe5fe0a8);
++ wreg32_idx_byteoffset(adev, 0x68010, 0xfe5feaa8);
++ wreg32_idx_byteoffset(adev, 0x68014, 0xfe5fe0a8);
++ wreg32_idx_byteoffset(adev, 0x68014, 0xfe5feaa8);
++ wreg32_idx_byteoffset(adev, 0x68200, 0xfe5fe0a8);
++ wreg32_idx_byteoffset(adev, 0x68200, 0xfe5feaa8);
++ wreg32_idx_byteoffset(adev, 0x68204, 0xfe5fe0a8);
++ wreg32_idx_byteoffset(adev, 0x68204, 0xfe5feaa8);
++ wreg32_idx_byteoffset(adev, 0x680B4, 0xfe5fe069);
++ wreg32_idx_byteoffset(adev, 0x680B4, 0xfe5fe669);
++ wreg32_idx_byteoffset(adev, 0x680B8, 0xfe5fe0c8);
++ wreg32_idx_byteoffset(adev, 0x680B8, 0xfe5fecc8);
++ wreg32_idx_byteoffset(adev, 0x680BC, 0xfe5fe0a8);
++ wreg32_idx_byteoffset(adev, 0x680BC, 0xfe5feaa8);
++ wreg32_idx_byteoffset(adev, 0x680C0, 0xfe5fe0a8);
++ wreg32_idx_byteoffset(adev, 0x680C0, 0xfe5feaa8);
++ wreg32_idx_byteoffset(adev, 0x680C4, 0xfe5fe0a8);
++ wreg32_idx_byteoffset(adev, 0x680C4, 0xfe5feaa8);
++ wreg32_idx_byteoffset(adev, 0x680C8, 0xfe5fe0a8);
++ wreg32_idx_byteoffset(adev, 0x680C8, 0xfe5feaa8);
++ wreg32_idx_byteoffset(adev, 0x682B0, 0xfe5fe0a8);
++ wreg32_idx_byteoffset(adev, 0x682B0, 0xfe5feaa8);
++ wreg32_idx_byteoffset(adev, 0x682B4, 0xfe5fe0a8);
++ wreg32_idx_byteoffset(adev, 0x682B4, 0xfe5feaa8);
++ wreg32_idx_byteoffset(adev, 0x682B8, 0xfe5fe069);
++ wreg32_idx_byteoffset(adev, 0x682B8, 0xfe5fe669);
++ wreg32_idx_byteoffset(adev, 0x6A0B0, 0x00101010);
++ wreg32_idx_byteoffset(adev, 0x6A0B4, 0x0010101f);
++ wreg32_idx_byteoffset(adev, 0x6A0CC, 0x00002541);
++ wreg32_idx_byteoffset(adev, 0x6A0CC, 0x00002141);
++ wreg32_idx_byteoffset(adev, 0x69A00, 0x00080603);
++ wreg32_idx_byteoffset(adev, 0x69A5C, 0x00000081);
++ wreg32_idx_byteoffset(adev, 0x69A5C, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x69B04, 0x007ffe81);
++ wreg32_idx_byteoffset(adev, 0x69D34, 0x01010000);
++ wreg32_idx_byteoffset(adev, 0x69DB4, 0x01011fff);
++ wreg32_idx_byteoffset(adev, 0x69B40, 0x00000004);
++ wreg32_idx_byteoffset(adev, 0x69B40, 0x0000000c);
++ wreg32_idx_byteoffset(adev, 0x69B40, 0x0000001c);
++ wreg32_idx_byteoffset(adev, 0x69B40, 0x0000003c);
++ wreg32_idx_byteoffset(adev, 0x69B40, 0x0000007c);
++ wreg32_idx_byteoffset(adev, 0x69B40, 0x000000fc);
++ wreg32_idx_byteoffset(adev, 0x69B40, 0x000001fc);
++ wreg32_idx_byteoffset(adev, 0x69B40, 0x000003fc);
++ wreg32_idx_byteoffset(adev, 0x69B40, 0x000007fc);
++ wreg32_idx_byteoffset(adev, 0x69B40, 0x00000ffc);
++ wreg32_idx_byteoffset(adev, 0x69B40, 0x00001ffc);
++ wreg32_idx_byteoffset(adev, 0x69B40, 0x00003ffc);
++ wreg32_idx_byteoffset(adev, 0x69B40, 0x00007ffc);
++ wreg32_idx_byteoffset(adev, 0x69B40, 0x0000fffc);
++ wreg32_idx_byteoffset(adev, 0x69B40, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x69600, 0x00050b00);
++ wreg32_idx_byteoffset(adev, 0x69D38, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x69DB8, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x69CB8, 0x00000000);
++ /*## End with InitMmhub on GPU 0*/
++
++ /* insert MMHUB seq here.*/
++ wreg32_idx_byteoffset(adev, 0x3D80, 0x10100000);
++ wreg32_idx_byteoffset(adev, 0x3D88, 0x00000010);
++ /* Done with MMHUB Init on GPU 0*/
++
++ /*Start with UMC Init on GPU 0*/
++ wreg32_idx_byteoffset(adev, 0x4B644, 0x800000ff);
++ /* Start with UmcInit() on GPU 0*/
++ wreg32_idx_byteoffset(adev, 0x57200, 0x00000101);
++ wreg32_idx_byteoffset(adev, 0x157200, 0x00000101);
++ wreg32_idx_byteoffset(adev, 0x257200, 0x00000101);
++ wreg32_idx_byteoffset(adev, 0x357200, 0x00000101);
++ wreg32_idx_byteoffset(adev, 0x457200, 0x00000101);
++ wreg32_idx_byteoffset(adev, 0x557200, 0x00000101);
++ wreg32_idx_byteoffset(adev, 0x657200, 0x00000101);
++ wreg32_idx_byteoffset(adev, 0x757200, 0x00000101);
++ wreg32_idx_byteoffset(adev, 0x571E8, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x1571E8, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x2571E8, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x3571E8, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x4571E8, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x5571E8, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x6571E8, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x7571E8, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x571E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1571E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2571E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3571E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4571E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5571E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6571E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7571E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x571E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1571E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2571E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3571E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4571E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5571E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6571E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7571E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x57170, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x157170, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x257170, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x357170, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x457170, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x557170, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x657170, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x757170, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5717C, 0x040e7d98);
++ wreg32_idx_byteoffset(adev, 0x15717C, 0x040e7d98);
++ wreg32_idx_byteoffset(adev, 0x25717C, 0x040e7d98);
++ wreg32_idx_byteoffset(adev, 0x35717C, 0x040e7d98);
++ wreg32_idx_byteoffset(adev, 0x45717C, 0x040e7d98);
++ wreg32_idx_byteoffset(adev, 0x55717C, 0x040e7d98);
++ wreg32_idx_byteoffset(adev, 0x65717C, 0x040e7d98);
++ wreg32_idx_byteoffset(adev, 0x75717C, 0x040e7d98);
++ wreg32_idx_byteoffset(adev, 0x57178, 0x00e3950b);
++ wreg32_idx_byteoffset(adev, 0x157178, 0x00e3950b);
++ wreg32_idx_byteoffset(adev, 0x257178, 0x00e3950b);
++ wreg32_idx_byteoffset(adev, 0x357178, 0x00e3950b);
++ wreg32_idx_byteoffset(adev, 0x457178, 0x00e3950b);
++ wreg32_idx_byteoffset(adev, 0x557178, 0x00e3950b);
++ wreg32_idx_byteoffset(adev, 0x657178, 0x00e3950b);
++ wreg32_idx_byteoffset(adev, 0x757178, 0x00e3950b);
++ wreg32_idx_byteoffset(adev, 0x57180, 0x001bff78);
++ wreg32_idx_byteoffset(adev, 0x157180, 0x001bff78);
++ wreg32_idx_byteoffset(adev, 0x257180, 0x001bff78);
++ wreg32_idx_byteoffset(adev, 0x357180, 0x001bff78);
++ wreg32_idx_byteoffset(adev, 0x457180, 0x001bff78);
++ wreg32_idx_byteoffset(adev, 0x557180, 0x001bff78);
++ wreg32_idx_byteoffset(adev, 0x657180, 0x001bff78);
++ wreg32_idx_byteoffset(adev, 0x757180, 0x001bff78);
++ wreg32_idx_byteoffset(adev, 0x57184, 0x00e8d63e);
++ wreg32_idx_byteoffset(adev, 0x157184, 0x00e8d63e);
++ wreg32_idx_byteoffset(adev, 0x257184, 0x00e8d63e);
++ wreg32_idx_byteoffset(adev, 0x357184, 0x00e8d63e);
++ wreg32_idx_byteoffset(adev, 0x457184, 0x00e8d63e);
++ wreg32_idx_byteoffset(adev, 0x557184, 0x00e8d63e);
++ wreg32_idx_byteoffset(adev, 0x657184, 0x00e8d63e);
++ wreg32_idx_byteoffset(adev, 0x757184, 0x00e8d63e);
++ wreg32_idx_byteoffset(adev, 0x57174, 0x04538e50);
++ wreg32_idx_byteoffset(adev, 0x157174, 0x04538e50);
++ wreg32_idx_byteoffset(adev, 0x257174, 0x04538e50);
++ wreg32_idx_byteoffset(adev, 0x357174, 0x04538e50);
++ wreg32_idx_byteoffset(adev, 0x457174, 0x04538e50);
++ wreg32_idx_byteoffset(adev, 0x557174, 0x04538e50);
++ wreg32_idx_byteoffset(adev, 0x657174, 0x04538e50);
++ wreg32_idx_byteoffset(adev, 0x757174, 0x04538e50);
++ wreg32_idx_byteoffset(adev, 0x57188, 0x04493f6b);
++ wreg32_idx_byteoffset(adev, 0x157188, 0x04493f6b);
++ wreg32_idx_byteoffset(adev, 0x257188, 0x04493f6b);
++ wreg32_idx_byteoffset(adev, 0x357188, 0x04493f6b);
++ wreg32_idx_byteoffset(adev, 0x457188, 0x04493f6b);
++ wreg32_idx_byteoffset(adev, 0x557188, 0x04493f6b);
++ wreg32_idx_byteoffset(adev, 0x657188, 0x04493f6b);
++ wreg32_idx_byteoffset(adev, 0x757188, 0x04493f6b);
++ wreg32_idx_byteoffset(adev, 0x5718C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15718C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25718C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35718C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45718C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55718C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65718C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75718C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x571A8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1571A8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2571A8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3571A8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4571A8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5571A8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6571A8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7571A8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x571C4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1571C4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2571C4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3571C4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4571C4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5571C4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6571C4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7571C4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x57198, 0x038a57ee);
++ wreg32_idx_byteoffset(adev, 0x157198, 0x038a57ee);
++ wreg32_idx_byteoffset(adev, 0x257198, 0x038a57ee);
++ wreg32_idx_byteoffset(adev, 0x357198, 0x038a57ee);
++ wreg32_idx_byteoffset(adev, 0x457198, 0x038a57ee);
++ wreg32_idx_byteoffset(adev, 0x557198, 0x038a57ee);
++ wreg32_idx_byteoffset(adev, 0x657198, 0x038a57ee);
++ wreg32_idx_byteoffset(adev, 0x757198, 0x038a57ee);
++ wreg32_idx_byteoffset(adev, 0x571B4, 0x0040c703);
++ wreg32_idx_byteoffset(adev, 0x1571B4, 0x0040c703);
++ wreg32_idx_byteoffset(adev, 0x2571B4, 0x0040c703);
++ wreg32_idx_byteoffset(adev, 0x3571B4, 0x0040c703);
++ wreg32_idx_byteoffset(adev, 0x4571B4, 0x0040c703);
++ wreg32_idx_byteoffset(adev, 0x5571B4, 0x0040c703);
++ wreg32_idx_byteoffset(adev, 0x6571B4, 0x0040c703);
++ wreg32_idx_byteoffset(adev, 0x7571B4, 0x0040c703);
++ wreg32_idx_byteoffset(adev, 0x571D0, 0x0178f557);
++ wreg32_idx_byteoffset(adev, 0x1571D0, 0x0178f557);
++ wreg32_idx_byteoffset(adev, 0x2571D0, 0x0178f557);
++ wreg32_idx_byteoffset(adev, 0x3571D0, 0x0178f557);
++ wreg32_idx_byteoffset(adev, 0x4571D0, 0x0178f557);
++ wreg32_idx_byteoffset(adev, 0x5571D0, 0x0178f557);
++ wreg32_idx_byteoffset(adev, 0x6571D0, 0x0178f557);
++ wreg32_idx_byteoffset(adev, 0x7571D0, 0x0178f557);
++ wreg32_idx_byteoffset(adev, 0x57194, 0x04be3ff0);
++ wreg32_idx_byteoffset(adev, 0x157194, 0x04be3ff0);
++ wreg32_idx_byteoffset(adev, 0x257194, 0x04be3ff0);
++ wreg32_idx_byteoffset(adev, 0x357194, 0x04be3ff0);
++ wreg32_idx_byteoffset(adev, 0x457194, 0x04be3ff0);
++ wreg32_idx_byteoffset(adev, 0x557194, 0x04be3ff0);
++ wreg32_idx_byteoffset(adev, 0x657194, 0x04be3ff0);
++ wreg32_idx_byteoffset(adev, 0x757194, 0x04be3ff0);
++ wreg32_idx_byteoffset(adev, 0x571B0, 0x04673dc5);
++ wreg32_idx_byteoffset(adev, 0x1571B0, 0x04673dc5);
++ wreg32_idx_byteoffset(adev, 0x2571B0, 0x04673dc5);
++ wreg32_idx_byteoffset(adev, 0x3571B0, 0x04673dc5);
++ wreg32_idx_byteoffset(adev, 0x4571B0, 0x04673dc5);
++ wreg32_idx_byteoffset(adev, 0x5571B0, 0x04673dc5);
++ wreg32_idx_byteoffset(adev, 0x6571B0, 0x04673dc5);
++ wreg32_idx_byteoffset(adev, 0x7571B0, 0x04673dc5);
++ wreg32_idx_byteoffset(adev, 0x571CC, 0x05b51842);
++ wreg32_idx_byteoffset(adev, 0x1571CC, 0x05b51842);
++ wreg32_idx_byteoffset(adev, 0x2571CC, 0x05b51842);
++ wreg32_idx_byteoffset(adev, 0x3571CC, 0x05b51842);
++ wreg32_idx_byteoffset(adev, 0x4571CC, 0x05b51842);
++ wreg32_idx_byteoffset(adev, 0x5571CC, 0x05b51842);
++ wreg32_idx_byteoffset(adev, 0x6571CC, 0x05b51842);
++ wreg32_idx_byteoffset(adev, 0x7571CC, 0x05b51842);
++ wreg32_idx_byteoffset(adev, 0x5719C, 0x063c818f);
++ wreg32_idx_byteoffset(adev, 0x15719C, 0x063c818f);
++ wreg32_idx_byteoffset(adev, 0x25719C, 0x063c818f);
++ wreg32_idx_byteoffset(adev, 0x35719C, 0x063c818f);
++ wreg32_idx_byteoffset(adev, 0x45719C, 0x063c818f);
++ wreg32_idx_byteoffset(adev, 0x55719C, 0x063c818f);
++ wreg32_idx_byteoffset(adev, 0x65719C, 0x063c818f);
++ wreg32_idx_byteoffset(adev, 0x75719C, 0x063c818f);
++ wreg32_idx_byteoffset(adev, 0x571B8, 0x07512627);
++ wreg32_idx_byteoffset(adev, 0x1571B8, 0x07512627);
++ wreg32_idx_byteoffset(adev, 0x2571B8, 0x07512627);
++ wreg32_idx_byteoffset(adev, 0x3571B8, 0x07512627);
++ wreg32_idx_byteoffset(adev, 0x4571B8, 0x07512627);
++ wreg32_idx_byteoffset(adev, 0x5571B8, 0x07512627);
++ wreg32_idx_byteoffset(adev, 0x6571B8, 0x07512627);
++ wreg32_idx_byteoffset(adev, 0x7571B8, 0x07512627);
++ wreg32_idx_byteoffset(adev, 0x571D4, 0x07473b3c);
++ wreg32_idx_byteoffset(adev, 0x1571D4, 0x07473b3c);
++ wreg32_idx_byteoffset(adev, 0x2571D4, 0x07473b3c);
++ wreg32_idx_byteoffset(adev, 0x3571D4, 0x07473b3c);
++ wreg32_idx_byteoffset(adev, 0x4571D4, 0x07473b3c);
++ wreg32_idx_byteoffset(adev, 0x5571D4, 0x07473b3c);
++ wreg32_idx_byteoffset(adev, 0x6571D4, 0x07473b3c);
++ wreg32_idx_byteoffset(adev, 0x7571D4, 0x07473b3c);
++ wreg32_idx_byteoffset(adev, 0x571A0, 0x03dbd9ed);
++ wreg32_idx_byteoffset(adev, 0x1571A0, 0x03dbd9ed);
++ wreg32_idx_byteoffset(adev, 0x2571A0, 0x03dbd9ed);
++ wreg32_idx_byteoffset(adev, 0x3571A0, 0x03dbd9ed);
++ wreg32_idx_byteoffset(adev, 0x4571A0, 0x03dbd9ed);
++ wreg32_idx_byteoffset(adev, 0x5571A0, 0x03dbd9ed);
++ wreg32_idx_byteoffset(adev, 0x6571A0, 0x03dbd9ed);
++ wreg32_idx_byteoffset(adev, 0x7571A0, 0x03dbd9ed);
++ wreg32_idx_byteoffset(adev, 0x571BC, 0x03e3b861);
++ wreg32_idx_byteoffset(adev, 0x1571BC, 0x03e3b861);
++ wreg32_idx_byteoffset(adev, 0x2571BC, 0x03e3b861);
++ wreg32_idx_byteoffset(adev, 0x3571BC, 0x03e3b861);
++ wreg32_idx_byteoffset(adev, 0x4571BC, 0x03e3b861);
++ wreg32_idx_byteoffset(adev, 0x5571BC, 0x03e3b861);
++ wreg32_idx_byteoffset(adev, 0x6571BC, 0x03e3b861);
++ wreg32_idx_byteoffset(adev, 0x7571BC, 0x03e3b861);
++ wreg32_idx_byteoffset(adev, 0x571D8, 0x043c67fe);
++ wreg32_idx_byteoffset(adev, 0x1571D8, 0x043c67fe);
++ wreg32_idx_byteoffset(adev, 0x2571D8, 0x043c67fe);
++ wreg32_idx_byteoffset(adev, 0x3571D8, 0x043c67fe);
++ wreg32_idx_byteoffset(adev, 0x4571D8, 0x043c67fe);
++ wreg32_idx_byteoffset(adev, 0x5571D8, 0x043c67fe);
++ wreg32_idx_byteoffset(adev, 0x6571D8, 0x043c67fe);
++ wreg32_idx_byteoffset(adev, 0x7571D8, 0x043c67fe);
++ wreg32_idx_byteoffset(adev, 0x57190, 0x050be7f7);
++ wreg32_idx_byteoffset(adev, 0x157190, 0x050be7f7);
++ wreg32_idx_byteoffset(adev, 0x257190, 0x050be7f7);
++ wreg32_idx_byteoffset(adev, 0x357190, 0x050be7f7);
++ wreg32_idx_byteoffset(adev, 0x457190, 0x050be7f7);
++ wreg32_idx_byteoffset(adev, 0x557190, 0x050be7f7);
++ wreg32_idx_byteoffset(adev, 0x657190, 0x050be7f7);
++ wreg32_idx_byteoffset(adev, 0x757190, 0x050be7f7);
++ wreg32_idx_byteoffset(adev, 0x571AC, 0x00bd2fdb);
++ wreg32_idx_byteoffset(adev, 0x1571AC, 0x00bd2fdb);
++ wreg32_idx_byteoffset(adev, 0x2571AC, 0x00bd2fdb);
++ wreg32_idx_byteoffset(adev, 0x3571AC, 0x00bd2fdb);
++ wreg32_idx_byteoffset(adev, 0x4571AC, 0x00bd2fdb);
++ wreg32_idx_byteoffset(adev, 0x5571AC, 0x00bd2fdb);
++ wreg32_idx_byteoffset(adev, 0x6571AC, 0x00bd2fdb);
++ wreg32_idx_byteoffset(adev, 0x7571AC, 0x00bd2fdb);
++ wreg32_idx_byteoffset(adev, 0x571C8, 0x018411be);
++ wreg32_idx_byteoffset(adev, 0x1571C8, 0x018411be);
++ wreg32_idx_byteoffset(adev, 0x2571C8, 0x018411be);
++ wreg32_idx_byteoffset(adev, 0x3571C8, 0x018411be);
++ wreg32_idx_byteoffset(adev, 0x4571C8, 0x018411be);
++ wreg32_idx_byteoffset(adev, 0x5571C8, 0x018411be);
++ wreg32_idx_byteoffset(adev, 0x6571C8, 0x018411be);
++ wreg32_idx_byteoffset(adev, 0x7571C8, 0x018411be);
++ wreg32_idx_byteoffset(adev, 0x571A4, 0x0103733f);
++ wreg32_idx_byteoffset(adev, 0x1571A4, 0x0103733f);
++ wreg32_idx_byteoffset(adev, 0x2571A4, 0x0103733f);
++ wreg32_idx_byteoffset(adev, 0x3571A4, 0x0103733f);
++ wreg32_idx_byteoffset(adev, 0x4571A4, 0x0103733f);
++ wreg32_idx_byteoffset(adev, 0x5571A4, 0x0103733f);
++ wreg32_idx_byteoffset(adev, 0x6571A4, 0x0103733f);
++ wreg32_idx_byteoffset(adev, 0x7571A4, 0x0103733f);
++ wreg32_idx_byteoffset(adev, 0x571C0, 0x03e82c2d);
++ wreg32_idx_byteoffset(adev, 0x1571C0, 0x03e82c2d);
++ wreg32_idx_byteoffset(adev, 0x2571C0, 0x03e82c2d);
++ wreg32_idx_byteoffset(adev, 0x3571C0, 0x03e82c2d);
++ wreg32_idx_byteoffset(adev, 0x4571C0, 0x03e82c2d);
++ wreg32_idx_byteoffset(adev, 0x5571C0, 0x03e82c2d);
++ wreg32_idx_byteoffset(adev, 0x6571C0, 0x03e82c2d);
++ wreg32_idx_byteoffset(adev, 0x7571C0, 0x03e82c2d);
++ wreg32_idx_byteoffset(adev, 0x571DC, 0x01886ca5);
++ wreg32_idx_byteoffset(adev, 0x1571DC, 0x01886ca5);
++ wreg32_idx_byteoffset(adev, 0x2571DC, 0x01886ca5);
++ wreg32_idx_byteoffset(adev, 0x3571DC, 0x01886ca5);
++ wreg32_idx_byteoffset(adev, 0x4571DC, 0x01886ca5);
++ wreg32_idx_byteoffset(adev, 0x5571DC, 0x01886ca5);
++ wreg32_idx_byteoffset(adev, 0x6571DC, 0x01886ca5);
++ wreg32_idx_byteoffset(adev, 0x7571DC, 0x01886ca5);
++ wreg32_idx_byteoffset(adev, 0x57020, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x157020, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x257020, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x357020, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x457020, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x557020, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x657020, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x757020, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x57010, 0x00000004);
++ wreg32_idx_byteoffset(adev, 0x157010, 0x00000004);
++ wreg32_idx_byteoffset(adev, 0x257010, 0x00000004);
++ wreg32_idx_byteoffset(adev, 0x357010, 0x00000004);
++ wreg32_idx_byteoffset(adev, 0x457010, 0x00000004);
++ wreg32_idx_byteoffset(adev, 0x557010, 0x00000004);
++ wreg32_idx_byteoffset(adev, 0x657010, 0x00000004);
++ wreg32_idx_byteoffset(adev, 0x757010, 0x00000004);
++ wreg32_idx_byteoffset(adev, 0x5721C, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x50104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x52104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x54104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x56104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x150104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x152104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x154104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x156104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x250104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x252104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x254104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x256104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x350104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x352104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x354104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x356104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x450104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x452104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x454104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x456104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x550104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x552104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x554104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x556104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x650104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x652104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x654104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x656104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x750104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x752104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x754104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x756104, 0x30408008);
++ wreg32_idx_byteoffset(adev, 0x50104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x52104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x54104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x56104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x150104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x152104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x154104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x156104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x250104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x252104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x254104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x256104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x350104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x352104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x354104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x356104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x450104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x452104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x454104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x456104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x550104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x552104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x554104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x556104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x650104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x652104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x654104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x656104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x750104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x752104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x754104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x756104, 0xb0408008);
++ wreg32_idx_byteoffset(adev, 0x50100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x52100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x54100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x56100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x150100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x152100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x154100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x156100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x250100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x252100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x254100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x256100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x350100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x352100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x354100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x356100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x450100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x452100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x454100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x456100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x550100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x552100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x554100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x556100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x650100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x652100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x654100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x656100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x750100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x752100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x754100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x756100, 0x00000103);
++ wreg32_idx_byteoffset(adev, 0x50000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x52000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x54000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x56000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x150000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x152000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x154000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x156000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x250000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x252000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x254000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x256000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x350000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x352000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x354000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x356000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x450000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x452000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x454000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x456000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x550000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x552000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x554000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x556000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x650000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x652000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x654000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x656000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x750000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x752000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x754000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x756000, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x50010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756010, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756014, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x52030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x54030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x56030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x150030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x152030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x154030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x156030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x250030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x252030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x254030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x256030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x350030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x352030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x354030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x356030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x450030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x452030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x454030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x456030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x550030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x552030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x554030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x556030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x650030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x652030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x654030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x656030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x750030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x752030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x754030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x756030, 0x00100408);
++ wreg32_idx_byteoffset(adev, 0x50020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x52020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x54020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x56020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x150020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x152020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x154020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x156020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x250020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x252020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x254020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x256020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x350020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x352020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x354020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x356020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x450020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x452020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x454020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x456020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x550020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x552020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x554020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x556020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x650020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x652020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x654020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x656020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x750020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x752020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x754020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x756020, 0xfffffffe);
++ wreg32_idx_byteoffset(adev, 0x50028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756028, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x52040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x54040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x56040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x150040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x152040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x154040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x156040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x250040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x252040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x254040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x256040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x350040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x352040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x354040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x356040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x450040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x452040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x454040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x456040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x550040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x552040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x554040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x556040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x650040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x652040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x654040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x656040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x750040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x752040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x754040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x756040, 0x03055376);
++ wreg32_idx_byteoffset(adev, 0x50054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756054, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x52050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x54050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x56050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x150050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x152050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x154050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x156050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x250050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x252050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x254050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x256050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x350050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x352050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x354050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x356050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x450050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x452050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x454050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x456050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x550050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x552050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x554050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x556050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x650050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x652050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x654050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x656050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x750050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x752050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x754050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x756050, 0xfe4cb753);
++ wreg32_idx_byteoffset(adev, 0x500C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x520C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x540C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x560C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1500C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1520C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1540C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1560C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2500C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2520C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2540C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2560C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3500C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3520C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3540C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3560C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4500C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4520C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4540C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4560C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5500C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5520C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5540C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5560C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6500C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6520C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6540C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6560C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7500C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7520C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7540C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7560C8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x500CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x520CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x540CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x560CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1500CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1520CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1540CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1560CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2500CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2520CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2540CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2560CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3500CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3520CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3540CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3560CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4500CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4520CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4540CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4560CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5500CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5520CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5540CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5560CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6500CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6520CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6540CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6560CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7500CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7520CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7540CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7560CC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x500D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x520D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x540D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x560D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1500D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1520D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1540D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1560D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2500D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2520D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2540D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2560D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3500D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3520D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3540D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3560D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4500D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4520D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4540D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4560D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5500D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5520D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5540D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5560D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6500D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6520D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6540D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6560D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7500D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7520D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7540D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7560D0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x500D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x520D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x540D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x560D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1500D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1520D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1540D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1560D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2500D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2520D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2540D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2560D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3500D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3520D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3540D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3560D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4500D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4520D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4540D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4560D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5500D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5520D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5540D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5560D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6500D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6520D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6540D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6560D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7500D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7520D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7540D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7560D4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x500D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x520D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x540D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x560D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1500D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1520D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1540D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1560D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2500D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2520D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2540D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2560D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3500D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3520D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3540D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3560D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4500D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4520D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4540D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4560D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5500D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5520D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5540D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5560D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6500D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6520D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6540D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6560D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7500D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7520D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7540D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7560D8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x500E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x520E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x540E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x560E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1500E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1520E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1540E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1560E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2500E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2520E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2540E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2560E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3500E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3520E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3540E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3560E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4500E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4520E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4540E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4560E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5500E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5520E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5540E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5560E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6500E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6520E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6540E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6560E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7500E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7520E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7540E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7560E8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x500E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x520E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x540E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x560E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1500E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1520E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1540E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1560E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2500E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2520E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2540E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2560E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3500E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3520E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3540E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3560E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4500E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4520E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4540E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4560E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5500E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5520E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5540E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5560E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6500E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6520E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6540E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6560E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7500E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7520E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7540E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7560E0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x500E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x520E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x540E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x560E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1500E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1520E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1540E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x1560E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2500E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2520E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2540E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x2560E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3500E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3520E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3540E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x3560E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4500E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4520E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4540E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x4560E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5500E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5520E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5540E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5560E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6500E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6520E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6540E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x6560E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7500E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7520E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7540E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x7560E4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756C14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x52204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x54204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x56204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x150204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x152204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x154204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x156204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x250204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x252204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x254204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x256204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x350204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x352204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x354204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x356204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x450204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x452204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x454204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x456204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x550204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x552204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x554204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x556204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x650204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x652204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x654204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x656204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x750204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x752204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x754204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x756204, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x50208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x52208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x54208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x56208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x150208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x152208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x154208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x156208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x250208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x252208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x254208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x256208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x350208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x352208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x354208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x356208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x450208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x452208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x454208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x456208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x550208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x552208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x554208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x556208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x650208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x652208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x654208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x656208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x750208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x752208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x754208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x756208, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x5020C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x5220C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x5420C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x5620C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x15020C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x15220C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x15420C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x15620C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x25020C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x25220C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x25420C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x25620C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x35020C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x35220C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x35420C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x35620C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x45020C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x45220C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x45420C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x45620C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x55020C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x55220C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x55420C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x55620C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x65020C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x65220C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x65420C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x65620C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x75020C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x75220C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x75420C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x75620C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x50210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x52210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x54210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x56210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x150210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x152210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x154210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x156210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x250210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x252210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x254210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x256210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x350210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x352210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x354210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x356210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x450210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x452210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x454210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x456210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x550210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x552210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x554210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x556210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x650210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x652210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x654210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x656210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x750210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x752210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x754210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x756210, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x50214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x52214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x54214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x56214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x150214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x152214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x154214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x156214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x250214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x252214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x254214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x256214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x350214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x352214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x354214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x356214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x450214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x452214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x454214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x456214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x550214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x552214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x554214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x556214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x650214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x652214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x654214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x656214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x750214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x752214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x754214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x756214, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x50218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x52218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x54218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x56218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x150218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x152218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x154218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x156218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x250218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x252218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x254218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x256218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x350218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x352218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x354218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x356218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x450218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x452218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x454218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x456218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x550218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x552218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x554218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x556218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x650218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x652218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x654218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x656218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x750218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x752218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x754218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x756218, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x5021C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5221C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5421C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5621C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15021C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15221C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15421C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15621C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25021C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25221C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25421C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25621C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35021C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35221C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35421C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35621C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45021C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45221C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45421C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45621C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55021C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55221C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55421C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55621C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65021C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65221C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65421C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65621C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75021C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75221C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75421C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75621C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x52220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x54220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x56220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x150220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x152220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x154220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x156220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x250220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x252220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x254220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x256220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x350220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x352220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x354220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x356220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x450220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x452220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x454220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x456220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x550220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x552220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x554220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x556220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x650220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x652220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x654220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x656220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x750220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x752220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x754220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x756220, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x50224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x52224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x54224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x56224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x150224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x152224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x154224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x156224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x250224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x252224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x254224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x256224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x350224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x352224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x354224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x356224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x450224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x452224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x454224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x456224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x550224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x552224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x554224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x556224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x650224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x652224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x654224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x656224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x750224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x752224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x754224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x756224, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x50228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x52228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x54228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x56228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x150228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x152228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x154228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x156228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x250228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x252228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x254228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x256228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x350228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x352228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x354228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x356228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x450228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x452228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x454228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x456228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x550228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x552228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x554228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x556228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x650228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x652228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x654228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x656228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x750228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x752228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x754228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x756228, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x50230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x52230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x54230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x56230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x150230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x152230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x154230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x156230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x250230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x252230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x254230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x256230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x350230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x352230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x354230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x356230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x450230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x452230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x454230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x456230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x550230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x552230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x554230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x556230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x650230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x652230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x654230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x656230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x750230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x752230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x754230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x756230, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x50234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x52234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x54234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x56234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x150234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x152234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x154234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x156234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x250234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x252234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x254234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x256234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x350234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x352234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x354234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x356234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x450234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x452234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x454234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x456234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x550234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x552234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x554234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x556234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x650234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x652234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x654234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x656234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x750234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x752234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x754234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x756234, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x50238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x52238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x54238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x56238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x150238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x152238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x154238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x156238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x250238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x252238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x254238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x256238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x350238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x352238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x354238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x356238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x450238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x452238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x454238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x456238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x550238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x552238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x554238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x556238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x650238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x652238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x654238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x656238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x750238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x752238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x754238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x756238, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x5023C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5223C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5423C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5623C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15023C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15223C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15423C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15623C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25023C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25223C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25423C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25623C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35023C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35223C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35423C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35623C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45023C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45223C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45423C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45623C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55023C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55223C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55423C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55623C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65023C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65223C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65423C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65623C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75023C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75223C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75423C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75623C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x52240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x54240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x56240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x150240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x152240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x154240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x156240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x250240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x252240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x254240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x256240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x350240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x352240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x354240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x356240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x450240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x452240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x454240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x456240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x550240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x552240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x554240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x556240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x650240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x652240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x654240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x656240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x750240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x752240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x754240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x756240, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x50244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x52244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x54244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x56244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x150244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x152244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x154244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x156244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x250244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x252244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x254244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x256244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x350244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x352244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x354244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x356244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x450244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x452244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x454244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x456244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x550244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x552244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x554244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x556244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x650244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x652244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x654244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x656244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x750244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x752244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x754244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x756244, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x50250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x52250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x54250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x56250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x150250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x152250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x154250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x156250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x250250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x252250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x254250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x256250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x350250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x352250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x354250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x356250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x450250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x452250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x454250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x456250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x550250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x552250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x554250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x556250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x650250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x652250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x654250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x656250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x750250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x752250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x754250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x756250, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x50254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x52254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x54254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x56254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x150254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x152254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x154254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x156254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x250254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x252254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x254254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x256254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x350254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x352254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x354254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x356254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x450254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x452254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x454254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x456254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x550254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x552254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x554254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x556254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x650254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x652254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x654254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x656254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x750254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x752254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x754254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x756254, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x50258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x52258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x54258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x56258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x150258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x152258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x154258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x156258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x250258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x252258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x254258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x256258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x350258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x352258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x354258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x356258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x450258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x452258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x454258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x456258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x550258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x552258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x554258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x556258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x650258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x652258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x654258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x656258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x750258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x752258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x754258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x756258, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x5025C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x5225C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x5425C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x5625C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x15025C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x15225C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x15425C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x15625C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x25025C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x25225C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x25425C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x25625C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x35025C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x35225C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x35425C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x35625C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x45025C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x45225C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x45425C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x45625C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x55025C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x55225C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x55425C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x55625C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x65025C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x65225C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x65425C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x65625C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x75025C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x75225C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x75425C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x75625C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x50304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x52304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x54304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x56304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x150304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x152304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x154304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x156304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x250304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x252304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x254304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x256304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x350304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x352304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x354304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x356304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x450304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x452304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x454304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x456304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x550304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x552304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x554304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x556304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x650304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x652304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x654304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x656304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x750304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x752304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x754304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x756304, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x50308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x52308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x54308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x56308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x150308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x152308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x154308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x156308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x250308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x252308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x254308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x256308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x350308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x352308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x354308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x356308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x450308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x452308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x454308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x456308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x550308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x552308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x554308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x556308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x650308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x652308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x654308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x656308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x750308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x752308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x754308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x756308, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x5030C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x5230C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x5430C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x5630C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x15030C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x15230C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x15430C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x15630C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x25030C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x25230C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x25430C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x25630C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x35030C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x35230C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x35430C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x35630C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x45030C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x45230C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x45430C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x45630C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x55030C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x55230C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x55430C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x55630C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x65030C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x65230C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x65430C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x65630C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x75030C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x75230C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x75430C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x75630C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x50310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x52310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x54310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x56310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x150310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x152310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x154310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x156310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x250310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x252310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x254310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x256310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x350310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x352310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x354310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x356310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x450310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x452310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x454310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x456310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x550310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x552310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x554310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x556310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x650310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x652310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x654310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x656310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x750310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x752310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x754310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x756310, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x50314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x52314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x54314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x56314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x150314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x152314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x154314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x156314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x250314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x252314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x254314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x256314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x350314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x352314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x354314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x356314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x450314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x452314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x454314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x456314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x550314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x552314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x554314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x556314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x650314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x652314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x654314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x656314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x750314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x752314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x754314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x756314, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x50318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x52318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x54318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x56318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x150318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x152318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x154318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x156318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x250318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x252318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x254318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x256318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x350318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x352318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x354318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x356318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x450318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x452318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x454318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x456318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x550318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x552318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x554318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x556318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x650318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x652318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x654318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x656318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x750318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x752318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x754318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x756318, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x5031C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5231C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5431C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5631C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15031C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15231C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15431C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15631C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25031C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25231C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25431C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25631C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35031C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35231C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35431C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35631C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45031C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45231C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45431C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45631C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55031C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55231C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55431C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55631C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65031C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65231C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65431C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65631C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75031C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75231C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75431C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75631C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x52320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x54320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x56320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x150320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x152320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x154320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x156320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x250320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x252320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x254320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x256320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x350320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x352320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x354320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x356320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x450320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x452320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x454320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x456320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x550320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x552320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x554320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x556320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x650320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x652320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x654320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x656320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x750320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x752320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x754320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x756320, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x50324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x52324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x54324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x56324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x150324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x152324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x154324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x156324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x250324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x252324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x254324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x256324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x350324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x352324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x354324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x356324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x450324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x452324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x454324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x456324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x550324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x552324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x554324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x556324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x650324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x652324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x654324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x656324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x750324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x752324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x754324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x756324, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x50328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x52328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x54328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x56328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x150328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x152328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x154328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x156328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x250328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x252328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x254328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x256328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x350328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x352328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x354328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x356328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x450328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x452328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x454328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x456328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x550328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x552328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x554328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x556328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x650328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x652328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x654328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x656328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x750328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x752328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x754328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x756328, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x50330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x52330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x54330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x56330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x150330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x152330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x154330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x156330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x250330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x252330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x254330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x256330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x350330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x352330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x354330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x356330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x450330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x452330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x454330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x456330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x550330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x552330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x554330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x556330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x650330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x652330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x654330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x656330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x750330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x752330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x754330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x756330, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x50334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x52334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x54334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x56334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x150334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x152334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x154334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x156334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x250334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x252334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x254334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x256334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x350334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x352334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x354334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x356334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x450334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x452334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x454334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x456334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x550334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x552334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x554334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x556334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x650334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x652334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x654334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x656334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x750334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x752334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x754334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x756334, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x50338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x52338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x54338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x56338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x150338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x152338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x154338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x156338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x250338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x252338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x254338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x256338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x350338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x352338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x354338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x356338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x450338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x452338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x454338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x456338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x550338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x552338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x554338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x556338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x650338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x652338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x654338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x656338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x750338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x752338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x754338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x756338, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x5033C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5233C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5433C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5633C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15033C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15233C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15433C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15633C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25033C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25233C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25433C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25633C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35033C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35233C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35433C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35633C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45033C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45233C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45433C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45633C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55033C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55233C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55433C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55633C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65033C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65233C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65433C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65633C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75033C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75233C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75433C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75633C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x52340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x54340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x56340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x150340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x152340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x154340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x156340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x250340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x252340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x254340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x256340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x350340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x352340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x354340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x356340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x450340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x452340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x454340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x456340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x550340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x552340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x554340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x556340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x650340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x652340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x654340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x656340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x750340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x752340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x754340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x756340, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x50344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x52344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x54344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x56344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x150344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x152344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x154344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x156344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x250344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x252344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x254344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x256344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x350344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x352344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x354344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x356344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x450344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x452344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x454344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x456344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x550344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x552344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x554344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x556344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x650344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x652344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x654344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x656344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x750344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x752344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x754344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x756344, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x50350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x52350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x54350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x56350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x150350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x152350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x154350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x156350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x250350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x252350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x254350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x256350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x350350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x352350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x354350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x356350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x450350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x452350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x454350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x456350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x550350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x552350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x554350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x556350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x650350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x652350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x654350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x656350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x750350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x752350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x754350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x756350, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x50354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x52354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x54354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x56354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x150354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x152354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x154354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x156354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x250354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x252354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x254354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x256354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x350354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x352354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x354354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x356354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x450354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x452354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x454354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x456354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x550354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x552354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x554354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x556354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x650354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x652354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x654354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x656354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x750354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x752354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x754354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x756354, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x50358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x52358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x54358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x56358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x150358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x152358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x154358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x156358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x250358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x252358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x254358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x256358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x350358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x352358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x354358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x356358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x450358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x452358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x454358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x456358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x550358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x552358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x554358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x556358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x650358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x652358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x654358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x656358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x750358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x752358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x754358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x756358, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x5035C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x5235C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x5435C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x5635C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x15035C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x15235C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x15435C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x15635C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x25035C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x25235C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x25435C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x25635C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x35035C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x35235C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x35435C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x35635C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x45035C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x45235C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x45435C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x45635C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x55035C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x55235C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x55435C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x55635C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x65035C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x65235C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x65435C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x65635C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x75035C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x75235C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x75435C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x75635C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x50404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x52404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x54404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x56404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x150404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x152404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x154404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x156404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x250404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x252404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x254404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x256404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x350404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x352404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x354404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x356404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x450404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x452404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x454404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x456404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x550404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x552404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x554404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x556404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x650404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x652404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x654404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x656404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x750404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x752404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x754404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x756404, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x50408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x52408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x54408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x56408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x150408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x152408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x154408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x156408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x250408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x252408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x254408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x256408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x350408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x352408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x354408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x356408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x450408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x452408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x454408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x456408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x550408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x552408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x554408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x556408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x650408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x652408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x654408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x656408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x750408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x752408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x754408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x756408, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x5040C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x5240C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x5440C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x5640C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x15040C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x15240C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x15440C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x15640C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x25040C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x25240C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x25440C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x25640C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x35040C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x35240C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x35440C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x35640C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x45040C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x45240C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x45440C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x45640C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x55040C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x55240C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x55440C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x55640C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x65040C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x65240C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x65440C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x65640C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x75040C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x75240C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x75440C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x75640C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x50410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x52410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x54410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x56410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x150410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x152410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x154410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x156410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x250410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x252410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x254410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x256410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x350410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x352410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x354410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x356410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x450410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x452410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x454410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x456410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x550410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x552410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x554410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x556410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x650410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x652410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x654410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x656410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x750410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x752410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x754410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x756410, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x50414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x52414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x54414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x56414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x150414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x152414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x154414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x156414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x250414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x252414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x254414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x256414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x350414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x352414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x354414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x356414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x450414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x452414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x454414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x456414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x550414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x552414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x554414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x556414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x650414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x652414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x654414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x656414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x750414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x752414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x754414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x756414, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x50418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x52418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x54418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x56418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x150418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x152418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x154418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x156418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x250418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x252418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x254418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x256418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x350418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x352418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x354418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x356418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x450418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x452418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x454418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x456418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x550418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x552418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x554418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x556418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x650418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x652418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x654418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x656418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x750418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x752418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x754418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x756418, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x5041C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5241C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5441C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5641C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15041C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15241C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15441C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15641C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25041C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25241C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25441C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25641C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35041C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35241C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35441C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35641C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45041C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45241C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45441C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45641C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55041C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55241C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55441C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55641C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65041C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65241C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65441C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65641C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75041C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75241C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75441C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75641C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x52420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x54420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x56420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x150420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x152420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x154420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x156420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x250420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x252420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x254420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x256420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x350420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x352420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x354420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x356420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x450420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x452420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x454420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x456420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x550420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x552420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x554420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x556420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x650420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x652420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x654420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x656420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x750420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x752420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x754420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x756420, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x50424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x52424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x54424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x56424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x150424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x152424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x154424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x156424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x250424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x252424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x254424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x256424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x350424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x352424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x354424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x356424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x450424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x452424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x454424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x456424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x550424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x552424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x554424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x556424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x650424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x652424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x654424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x656424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x750424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x752424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x754424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x756424, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x50428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x52428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x54428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x56428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x150428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x152428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x154428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x156428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x250428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x252428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x254428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x256428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x350428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x352428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x354428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x356428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x450428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x452428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x454428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x456428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x550428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x552428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x554428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x556428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x650428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x652428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x654428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x656428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x750428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x752428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x754428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x756428, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x50430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x52430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x54430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x56430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x150430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x152430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x154430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x156430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x250430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x252430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x254430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x256430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x350430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x352430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x354430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x356430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x450430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x452430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x454430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x456430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x550430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x552430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x554430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x556430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x650430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x652430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x654430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x656430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x750430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x752430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x754430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x756430, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x50434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x52434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x54434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x56434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x150434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x152434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x154434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x156434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x250434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x252434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x254434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x256434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x350434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x352434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x354434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x356434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x450434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x452434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x454434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x456434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x550434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x552434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x554434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x556434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x650434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x652434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x654434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x656434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x750434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x752434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x754434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x756434, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x50438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x52438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x54438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x56438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x150438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x152438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x154438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x156438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x250438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x252438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x254438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x256438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x350438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x352438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x354438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x356438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x450438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x452438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x454438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x456438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x550438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x552438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x554438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x556438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x650438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x652438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x654438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x656438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x750438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x752438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x754438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x756438, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x5043C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5243C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5443C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5643C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15043C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15243C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15443C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15643C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25043C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25243C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25443C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25643C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35043C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35243C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35443C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35643C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45043C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45243C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45443C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45643C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55043C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55243C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55443C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55643C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65043C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65243C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65443C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65643C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75043C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75243C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75443C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75643C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x52440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x54440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x56440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x150440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x152440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x154440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x156440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x250440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x252440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x254440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x256440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x350440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x352440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x354440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x356440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x450440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x452440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x454440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x456440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x550440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x552440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x554440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x556440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x650440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x652440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x654440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x656440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x750440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x752440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x754440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x756440, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x50444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x52444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x54444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x56444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x150444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x152444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x154444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x156444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x250444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x252444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x254444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x256444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x350444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x352444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x354444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x356444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x450444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x452444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x454444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x456444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x550444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x552444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x554444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x556444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x650444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x652444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x654444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x656444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x750444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x752444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x754444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x756444, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x50450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x52450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x54450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x56450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x150450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x152450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x154450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x156450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x250450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x252450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x254450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x256450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x350450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x352450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x354450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x356450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x450450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x452450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x454450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x456450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x550450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x552450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x554450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x556450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x650450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x652450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x654450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x656450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x750450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x752450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x754450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x756450, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x50454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x52454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x54454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x56454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x150454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x152454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x154454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x156454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x250454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x252454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x254454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x256454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x350454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x352454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x354454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x356454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x450454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x452454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x454454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x456454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x550454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x552454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x554454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x556454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x650454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x652454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x654454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x656454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x750454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x752454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x754454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x756454, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x50458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x52458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x54458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x56458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x150458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x152458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x154458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x156458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x250458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x252458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x254458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x256458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x350458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x352458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x354458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x356458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x450458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x452458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x454458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x456458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x550458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x552458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x554458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x556458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x650458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x652458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x654458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x656458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x750458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x752458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x754458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x756458, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x5045C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x5245C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x5445C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x5645C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x15045C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x15245C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x15445C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x15645C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x25045C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x25245C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x25445C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x25645C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x35045C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x35245C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x35445C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x35645C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x45045C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x45245C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x45445C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x45645C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x55045C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x55245C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x55445C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x55645C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x65045C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x65245C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x65445C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x65645C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x75045C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x75245C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x75445C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x75645C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x50504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x52504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x54504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x56504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x150504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x152504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x154504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x156504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x250504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x252504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x254504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x256504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x350504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x352504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x354504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x356504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x450504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x452504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x454504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x456504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x550504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x552504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x554504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x556504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x650504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x652504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x654504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x656504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x750504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x752504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x754504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x756504, 0x0c0c0c04);
++ wreg32_idx_byteoffset(adev, 0x50508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x52508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x54508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x56508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x150508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x152508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x154508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x156508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x250508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x252508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x254508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x256508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x350508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x352508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x354508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x356508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x450508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x452508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x454508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x456508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x550508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x552508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x554508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x556508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x650508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x652508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x654508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x656508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x750508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x752508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x754508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x756508, 0x10100010);
++ wreg32_idx_byteoffset(adev, 0x5050C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x5250C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x5450C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x5650C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x15050C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x15250C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x15450C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x15650C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x25050C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x25250C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x25450C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x25650C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x35050C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x35250C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x35450C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x35650C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x45050C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x45250C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x45450C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x45650C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x55050C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x55250C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x55450C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x55650C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x65050C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x65250C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x65450C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x65650C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x75050C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x75250C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x75450C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x75650C, 0x0e040404);
++ wreg32_idx_byteoffset(adev, 0x50510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x52510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x54510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x56510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x150510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x152510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x154510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x156510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x250510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x252510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x254510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x256510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x350510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x352510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x354510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x356510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x450510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x452510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x454510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x456510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x550510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x552510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x554510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x556510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x650510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x652510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x654510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x656510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x750510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x752510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x754510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x756510, 0x48900024);
++ wreg32_idx_byteoffset(adev, 0x50514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x52514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x54514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x56514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x150514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x152514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x154514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x156514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x250514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x252514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x254514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x256514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x350514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x352514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x354514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x356514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x450514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x452514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x454514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x456514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x550514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x552514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x554514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x556514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x650514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x652514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x654514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x656514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x750514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x752514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x754514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x756514, 0x000e0a04);
++ wreg32_idx_byteoffset(adev, 0x50518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x52518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x54518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x56518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x150518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x152518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x154518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x156518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x250518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x252518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x254518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x256518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x350518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x352518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x354518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x356518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x450518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x452518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x454518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x456518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x550518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x552518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x554518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x556518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x650518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x652518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x654518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x656518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x750518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x752518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x754518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x756518, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x5051C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5251C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5451C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5651C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15051C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15251C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15451C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15651C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25051C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25251C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25451C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25651C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35051C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35251C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35451C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35651C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45051C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45251C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45451C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45651C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55051C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55251C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55451C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55651C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65051C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65251C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65451C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65651C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75051C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75251C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75451C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75651C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x52520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x54520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x56520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x150520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x152520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x154520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x156520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x250520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x252520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x254520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x256520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x350520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x352520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x354520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x356520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x450520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x452520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x454520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x456520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x550520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x552520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x554520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x556520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x650520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x652520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x654520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x656520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x750520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x752520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x754520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x756520, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x50524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x52524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x54524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x56524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x150524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x152524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x154524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x156524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x250524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x252524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x254524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x256524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x350524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x352524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x354524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x356524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x450524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x452524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x454524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x456524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x550524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x552524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x554524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x556524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x650524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x652524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x654524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x656524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x750524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x752524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x754524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x756524, 0x03110300);
++ wreg32_idx_byteoffset(adev, 0x50528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x52528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x54528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x56528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x150528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x152528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x154528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x156528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x250528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x252528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x254528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x256528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x350528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x352528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x354528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x356528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x450528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x452528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x454528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x456528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x550528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x552528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x554528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x556528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x650528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x652528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x654528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x656528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x750528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x752528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x754528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x756528, 0x000a080a);
++ wreg32_idx_byteoffset(adev, 0x50530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x52530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x54530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x56530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x150530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x152530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x154530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x156530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x250530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x252530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x254530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x256530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x350530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x352530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x354530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x356530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x450530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x452530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x454530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x456530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x550530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x552530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x554530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x556530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x650530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x652530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x654530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x656530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x750530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x752530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x754530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x756530, 0x00001e78);
++ wreg32_idx_byteoffset(adev, 0x50534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x52534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x54534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x56534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x150534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x152534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x154534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x156534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x250534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x252534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x254534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x256534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x350534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x352534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x354534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x356534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x450534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x452534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x454534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x456534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x550534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x552534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x554534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x556534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x650534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x652534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x654534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x656534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x750534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x752534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x754534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x756534, 0x20201008);
++ wreg32_idx_byteoffset(adev, 0x50538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x52538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x54538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x56538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x150538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x152538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x154538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x156538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x250538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x252538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x254538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x256538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x350538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x352538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x354538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x356538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x450538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x452538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x454538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x456538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x550538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x552538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x554538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x556538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x650538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x652538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x654538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x656538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x750538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x752538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x754538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x756538, 0x0000010e);
++ wreg32_idx_byteoffset(adev, 0x5053C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5253C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5453C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5653C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15053C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15253C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15453C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15653C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25053C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25253C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25453C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25653C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35053C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35253C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35453C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35653C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45053C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45253C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45453C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45653C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55053C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55253C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55453C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55653C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65053C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65253C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65453C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65653C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75053C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75253C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75453C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75653C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x52540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x54540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x56540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x150540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x152540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x154540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x156540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x250540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x252540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x254540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x256540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x350540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x352540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x354540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x356540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x450540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x452540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x454540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x456540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x550540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x552540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x554540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x556540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x650540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x652540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x654540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x656540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x750540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x752540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x754540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x756540, 0x000000f0);
++ wreg32_idx_byteoffset(adev, 0x50544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x52544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x54544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x56544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x150544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x152544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x154544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x156544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x250544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x252544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x254544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x256544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x350544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x352544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x354544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x356544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x450544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x452544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x454544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x456544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x550544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x552544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x554544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x556544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x650544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x652544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x654544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x656544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x750544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x752544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x754544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x756544, 0x3a3a0008);
++ wreg32_idx_byteoffset(adev, 0x50550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x52550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x54550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x56550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x150550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x152550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x154550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x156550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x250550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x252550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x254550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x256550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x350550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x352550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x354550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x356550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x450550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x452550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x454550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x456550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x550550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x552550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x554550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x556550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x650550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x652550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x654550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x656550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x750550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x752550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x754550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x756550, 0x000a0000);
++ wreg32_idx_byteoffset(adev, 0x50554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x52554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x54554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x56554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x150554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x152554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x154554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x156554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x250554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x252554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x254554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x256554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x350554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x352554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x354554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x356554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x450554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x452554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x454554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x456554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x550554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x552554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x554554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x556554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x650554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x652554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x654554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x656554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x750554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x752554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x754554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x756554, 0x0a00000a);
++ wreg32_idx_byteoffset(adev, 0x50558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x52558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x54558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x56558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x150558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x152558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x154558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x156558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x250558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x252558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x254558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x256558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x350558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x352558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x354558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x356558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x450558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x452558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x454558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x456558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x550558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x552558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x554558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x556558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x650558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x652558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x654558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x656558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x750558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x752558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x754558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x756558, 0x010c0202);
++ wreg32_idx_byteoffset(adev, 0x5055C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x5255C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x5455C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x5655C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x15055C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x15255C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x15455C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x15655C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x25055C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x25255C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x25455C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x25655C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x35055C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x35255C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x35455C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x35655C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x45055C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x45255C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x45455C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x45655C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x55055C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x55255C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x55455C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x55655C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x65055C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x65255C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x65455C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x65655C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x75055C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x75255C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x75455C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x75655C, 0x000c2a2a);
++ wreg32_idx_byteoffset(adev, 0x50260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x52260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x54260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x56260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x150260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x152260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x154260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x156260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x250260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x252260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x254260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x256260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x350260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x352260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x354260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x356260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x450260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x452260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x454260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x456260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x550260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x552260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x554260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x556260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x650260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x652260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x654260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x656260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x750260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x752260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x754260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x756260, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x50360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x52360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x54360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x56360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x150360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x152360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x154360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x156360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x250360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x252360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x254360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x256360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x350360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x352360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x354360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x356360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x450360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x452360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x454360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x456360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x550360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x552360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x554360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x556360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x650360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x652360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x654360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x656360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x750360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x752360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x754360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x756360, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x50460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x52460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x54460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x56460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x150460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x152460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x154460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x156460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x250460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x252460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x254460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x256460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x350460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x352460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x354460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x356460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x450460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x452460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x454460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x456460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x550460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x552460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x554460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x556460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x650460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x652460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x654460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x656460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x750460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x752460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x754460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x756460, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x50560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x52560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x54560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x56560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x150560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x152560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x154560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x156560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x250560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x252560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x254560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x256560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x350560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x352560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x354560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x356560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x450560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x452560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x454560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x456560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x550560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x552560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x554560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x556560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x650560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x652560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x654560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x656560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x750560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x752560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x754560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x756560, 0x00000100);
++ wreg32_idx_byteoffset(adev, 0x50200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x52200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x54200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x56200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x150200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x152200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x154200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x156200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x250200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x252200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x254200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x256200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x350200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x352200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x354200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x356200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x450200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x452200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x454200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x456200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x550200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x552200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x554200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x556200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x650200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x652200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x654200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x656200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x750200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x752200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x754200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x756200, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x50300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x52300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x54300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x56300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x150300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x152300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x154300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x156300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x250300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x252300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x254300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x256300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x350300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x352300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x354300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x356300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x450300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x452300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x454300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x456300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x550300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x552300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x554300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x556300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x650300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x652300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x654300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x656300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x750300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x752300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x754300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x756300, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x50400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x52400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x54400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x56400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x150400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x152400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x154400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x156400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x250400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x252400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x254400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x256400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x350400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x352400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x354400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x356400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x450400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x452400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x454400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x456400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x550400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x552400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x554400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x556400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x650400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x652400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x654400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x656400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x750400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x752400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x754400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x756400, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x50500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x52500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x54500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x56500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x150500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x152500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x154500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x156500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x250500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x252500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x254500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x256500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x350500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x352500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x354500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x356500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x450500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x452500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x454500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x456500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x550500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x552500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x554500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x556500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x650500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x652500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x654500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x656500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x750500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x752500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x754500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x756500, 0x0000011e);
++ wreg32_idx_byteoffset(adev, 0x5010C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x5210C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x5410C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x5610C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x15010C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x15210C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x15410C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x15610C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x25010C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x25210C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x25410C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x25610C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x35010C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x35210C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x35410C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x35610C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x45010C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x45210C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x45410C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x45610C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x55010C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x55210C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x55410C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x55610C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x65010C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x65210C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x65410C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x65610C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x75010C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x75210C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x75410C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x75610C, 0x00101004);
++ wreg32_idx_byteoffset(adev, 0x50110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x52110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x54110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x56110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x150110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x152110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x154110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x156110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x250110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x252110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x254110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x256110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x350110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x352110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x354110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x356110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x450110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x452110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x454110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x456110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x550110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x552110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x554110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x556110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x650110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x652110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x654110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x656110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x750110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x752110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x754110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x756110, 0x00101820);
++ wreg32_idx_byteoffset(adev, 0x50114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x52114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x54114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x56114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x150114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x152114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x154114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x156114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x250114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x252114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x254114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x256114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x350114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x352114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x354114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x356114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x450114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x452114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x454114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x456114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x550114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x552114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x554114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x556114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x650114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x652114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x654114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x656114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x750114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x752114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x754114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x756114, 0x18182018);
++ wreg32_idx_byteoffset(adev, 0x50168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756168, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5016C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5216C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5416C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5616C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15016C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15216C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15416C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15616C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25016C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25216C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25416C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25616C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35016C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35216C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35416C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35616C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45016C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45216C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45416C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45616C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55016C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55216C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55416C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55616C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65016C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65216C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65416C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65616C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75016C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75216C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75416C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75616C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x501E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x521E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x541E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x561E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x1501E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x1521E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x1541E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x1561E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x2501E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x2521E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x2541E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x2561E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x3501E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x3521E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x3541E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x3561E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x4501E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x4521E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x4541E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x4561E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x5501E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x5521E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x5541E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x5561E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x6501E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x6521E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x6541E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x6561E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x7501E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x7521E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x7541E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x7561E0, 0x00000002);
++ wreg32_idx_byteoffset(adev, 0x50070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x52070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x54070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x56070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x150070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x152070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x154070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x156070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x250070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x252070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x254070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x256070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x350070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x352070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x354070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x356070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x450070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x452070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x454070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x456070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x550070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x552070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x554070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x556070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x650070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x652070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x654070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x656070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x750070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x752070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x754070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x756070, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x50078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x52078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x54078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x56078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x150078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x152078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x154078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x156078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x250078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x252078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x254078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x256078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x350078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x352078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x354078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x356078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x450078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x452078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x454078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x456078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x550078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x552078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x554078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x556078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x650078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x652078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x654078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x656078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x750078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x752078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x754078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x756078, 0x00001000);
++ wreg32_idx_byteoffset(adev, 0x5111C, 0x40000008);
++ wreg32_idx_byteoffset(adev, 0x50120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x52120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x54120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x56120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x150120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x152120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x154120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x156120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x250120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x252120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x254120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x256120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x350120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x352120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x354120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x356120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x450120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x452120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x454120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x456120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x550120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x552120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x554120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x556120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x650120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x652120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x654120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x656120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x750120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x752120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x754120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x756120, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x51124, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x50DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756DC0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x52164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x54164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x56164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x150164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x152164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x154164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x156164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x250164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x252164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x254164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x256164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x350164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x352164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x354164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x356164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x450164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x452164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x454164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x456164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x550164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x552164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x554164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x556164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x650164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x652164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x654164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x656164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x750164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x752164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x754164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x756164, 0x00000270);
++ wreg32_idx_byteoffset(adev, 0x50284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x52284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x54284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x56284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x150284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x152284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x154284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x156284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x250284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x252284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x254284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x256284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x350284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x352284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x354284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x356284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x450284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x452284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x454284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x456284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x550284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x552284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x554284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x556284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x650284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x652284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x654284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x656284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x750284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x752284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x754284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x756284, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x50384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x52384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x54384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x56384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x150384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x152384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x154384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x156384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x250384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x252384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x254384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x256384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x350384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x352384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x354384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x356384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x450384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x452384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x454384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x456384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x550384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x552384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x554384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x556384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x650384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x652384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x654384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x656384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x750384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x752384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x754384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x756384, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x50484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x52484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x54484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x56484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x150484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x152484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x154484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x156484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x250484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x252484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x254484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x256484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x350484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x352484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x354484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x356484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x450484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x452484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x454484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x456484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x550484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x552484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x554484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x556484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x650484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x652484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x654484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x656484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x750484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x752484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x754484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x756484, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x50584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x52584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x54584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x56584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x150584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x152584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x154584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x156584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x250584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x252584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x254584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x256584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x350584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x352584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x354584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x356584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x450584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x452584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x454584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x456584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x550584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x552584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x554584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x556584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x650584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x652584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x654584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x656584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x750584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x752584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x754584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x756584, 0x00000777);
++ wreg32_idx_byteoffset(adev, 0x50148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756148, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5014C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5214C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5414C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5614C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15014C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15214C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15414C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15614C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25014C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25214C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25414C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25614C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35014C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35214C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35414C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35614C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45014C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45214C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45414C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45614C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55014C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55214C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55414C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55614C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65014C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65214C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65414C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65614C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75014C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75214C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75414C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75614C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756D80, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756D88, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756DB0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756DB4, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756DB8, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756DBC, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x52138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x54138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x56138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x150138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x152138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x154138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x156138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x250138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x252138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x254138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x256138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x350138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x352138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x354138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x356138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x450138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x452138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x454138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x456138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x550138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x552138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x554138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x556138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x650138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x652138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x654138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x656138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x750138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x752138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x754138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x756138, 0x03204060);
++ wreg32_idx_byteoffset(adev, 0x5013C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x5213C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x5413C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x5613C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x15013C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x15213C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x15413C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x15613C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x25013C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x25213C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x25413C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x25613C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x35013C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x35213C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x35413C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x35613C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x45013C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x45213C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x45413C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x45613C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x55013C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x55213C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x55413C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x55613C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x65013C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x65213C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x65413C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x65613C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x75013C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x75213C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x75413C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x75613C, 0x00000017);
++ wreg32_idx_byteoffset(adev, 0x50130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756130, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x500F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x520F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x540F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x560F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x1500F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x1520F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x1540F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x1560F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x2500F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x2520F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x2540F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x2560F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x3500F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x3520F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x3540F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x3560F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x4500F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x4520F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x4540F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x4560F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x5500F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x5520F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x5540F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x5560F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x6500F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x6520F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x6540F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x6560F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x7500F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x7520F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x7540F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x7560F4, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x5012C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x5212C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x5412C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x5612C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x15012C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x15212C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x15412C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x15612C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x25012C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x25212C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x25412C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x25612C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x35012C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x35212C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x35412C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x35612C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x45012C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x45212C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x45412C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x45612C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x55012C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x55212C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x55412C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x55612C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x65012C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x65212C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x65412C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x65612C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x75012C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x75212C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x75412C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x75612C, 0x41180469);
++ wreg32_idx_byteoffset(adev, 0x50DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756DD0, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756288, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756388, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756488, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756588, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x52108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x54108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x56108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x150108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x152108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x154108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x156108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x250108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x252108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x254108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x256108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x350108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x352108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x354108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x356108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x450108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x452108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x454108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x456108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x550108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x552108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x554108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x556108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x650108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x652108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x654108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x656108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x750108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x752108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x754108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x756108, 0x0c400001);
++ wreg32_idx_byteoffset(adev, 0x57080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x157080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x257080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x357080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x457080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x557080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x657080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x757080, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5703C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15703C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25703C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35703C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45703C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55703C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65703C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75703C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x57038, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x157038, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x257038, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x357038, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x457038, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x557038, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x657038, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x757038, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x57040, 0x0067220a);
++ wreg32_idx_byteoffset(adev, 0x157040, 0x0067220a);
++ wreg32_idx_byteoffset(adev, 0x257040, 0x0067220a);
++ wreg32_idx_byteoffset(adev, 0x357040, 0x0067220a);
++ wreg32_idx_byteoffset(adev, 0x457040, 0x0067220a);
++ wreg32_idx_byteoffset(adev, 0x557040, 0x0067220a);
++ wreg32_idx_byteoffset(adev, 0x657040, 0x0067220a);
++ wreg32_idx_byteoffset(adev, 0x757040, 0x0067220a);
++ wreg32_idx_byteoffset(adev, 0x57044, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x157044, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x257044, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x357044, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x457044, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x557044, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x657044, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x757044, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x57048, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x157048, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x257048, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x357048, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x457048, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x557048, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x657048, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x757048, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5704C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x15704C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x25704C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x35704C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x45704C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x55704C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x65704C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x75704C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x52118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x54118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x56118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x150118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x152118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x154118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x156118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x250118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x252118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x254118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x256118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x350118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x352118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x354118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x356118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x450118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x452118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x454118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x456118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x550118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x552118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x554118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x556118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x650118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x652118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x654118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x656118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x750118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x752118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x754118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x756118, 0x00000047);
++ wreg32_idx_byteoffset(adev, 0x50140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756140, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756C20, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756D04, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x52D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x54D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x56D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x150D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x152D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x154D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x156D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x250D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x252D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x254D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x256D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x350D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x352D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x354D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x356D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x450D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x452D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x454D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x456D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x550D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x552D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x554D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x556D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x650D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x652D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x654D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x656D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x750D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x752D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x754D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x756D2C, 0x0001678d);
++ wreg32_idx_byteoffset(adev, 0x50D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x52D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x54D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x56D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x150D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x152D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x154D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x156D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x250D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x252D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x254D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x256D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x350D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x352D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x354D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x356D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x450D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x452D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x454D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x456D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x550D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x552D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x554D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x556D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x650D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x652D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x654D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x656D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x750D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x752D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x754D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x756D28, 0xd4fd61c8);
++ wreg32_idx_byteoffset(adev, 0x50D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756D08, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x52D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x54D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x56D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x150D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x152D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x154D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x156D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x250D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x252D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x254D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x256D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x350D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x352D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x354D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x356D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x450D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x452D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x454D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x456D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x550D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x552D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x554D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x556D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x650D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x652D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x654D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x656D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x750D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x752D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x754D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x756D34, 0x00013fdd);
++ wreg32_idx_byteoffset(adev, 0x50D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x52D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x54D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x56D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x150D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x152D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x154D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x156D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x250D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x252D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x254D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x256D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x350D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x352D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x354D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x356D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x450D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x452D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x454D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x456D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x550D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x552D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x554D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x556D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x650D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x652D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x654D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x656D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x750D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x752D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x754D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x756D30, 0x96b97167);
++ wreg32_idx_byteoffset(adev, 0x50D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756D0C, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x52D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x54D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x56D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x150D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x152D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x154D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x156D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x250D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x252D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x254D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x256D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x350D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x352D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x354D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x356D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x450D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x452D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x454D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x456D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x550D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x552D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x554D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x556D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x650D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x652D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x654D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x656D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x750D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x752D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x754D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x756D3C, 0x0000fa3b);
++ wreg32_idx_byteoffset(adev, 0x50D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x52D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x54D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x56D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x150D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x152D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x154D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x156D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x250D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x252D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x254D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x256D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x350D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x352D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x354D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x356D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x450D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x452D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x454D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x456D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x550D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x552D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x554D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x556D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x650D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x652D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x654D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x656D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x750D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x752D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x754D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x756D38, 0x2f8dd6cd);
++ wreg32_idx_byteoffset(adev, 0x50D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756D10, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x52D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x54D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x56D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x150D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x152D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x154D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x156D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x250D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x252D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x254D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x256D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x350D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x352D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x354D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x356D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x450D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x452D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x454D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x456D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x550D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x552D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x554D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x556D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x650D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x652D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x654D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x656D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x750D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x752D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x754D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x756D44, 0x00010589);
++ wreg32_idx_byteoffset(adev, 0x50D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x52D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x54D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x56D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x150D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x152D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x154D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x156D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x250D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x252D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x254D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x256D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x350D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x352D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x354D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x356D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x450D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x452D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x454D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x456D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x550D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x552D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x554D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x556D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x650D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x652D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x654D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x656D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x750D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x752D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x754D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x756D40, 0x5fc68e4e);
++ wreg32_idx_byteoffset(adev, 0x50D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756D14, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x50D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x52D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x54D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x56D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x150D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x152D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x154D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x156D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x250D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x252D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x254D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x256D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x350D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x352D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x354D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x356D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x450D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x452D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x454D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x456D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x550D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x552D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x554D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x556D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x650D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x652D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x654D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x656D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x750D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x752D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x754D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x756D4C, 0x0000881b);
++ wreg32_idx_byteoffset(adev, 0x50D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x52D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x54D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x56D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x150D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x152D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x154D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x156D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x250D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x252D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x254D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x256D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x350D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x352D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x354D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x356D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x450D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x452D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x454D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x456D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x550D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x552D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x554D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x556D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x650D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x652D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x654D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x656D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x750D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x752D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x754D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x756D48, 0x02182082);
++ wreg32_idx_byteoffset(adev, 0x50144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x52144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x54144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x56144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x150144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x152144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x154144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x156144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x250144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x252144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x254144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x256144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x350144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x352144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x354144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x356144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x450144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x452144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x454144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x456144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x550144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x552144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x554144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x556144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x650144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x652144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x654144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x656144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x750144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x752144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x754144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x756144, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x57018, 0x00004200);
++ wreg32_idx_byteoffset(adev, 0x157018, 0x00004200);
++ wreg32_idx_byteoffset(adev, 0x257018, 0x00004200);
++ wreg32_idx_byteoffset(adev, 0x357018, 0x00004200);
++ wreg32_idx_byteoffset(adev, 0x457018, 0x00004200);
++ wreg32_idx_byteoffset(adev, 0x557018, 0x00004200);
++ wreg32_idx_byteoffset(adev, 0x657018, 0x00004200);
++ wreg32_idx_byteoffset(adev, 0x757018, 0x00004200);
++ wreg32_idx_byteoffset(adev, 0x57018, 0x00004600);
++ wreg32_idx_byteoffset(adev, 0x157018, 0x00004600);
++ wreg32_idx_byteoffset(adev, 0x257018, 0x00004600);
++ wreg32_idx_byteoffset(adev, 0x357018, 0x00004600);
++ wreg32_idx_byteoffset(adev, 0x457018, 0x00004600);
++ wreg32_idx_byteoffset(adev, 0x557018, 0x00004600);
++ wreg32_idx_byteoffset(adev, 0x657018, 0x00004600);
++ wreg32_idx_byteoffset(adev, 0x757018, 0x00004600);
++ wreg32_idx_byteoffset(adev, 0x57018, 0x00004400);
++ wreg32_idx_byteoffset(adev, 0x157018, 0x00004400);
++ wreg32_idx_byteoffset(adev, 0x257018, 0x00004400);
++ wreg32_idx_byteoffset(adev, 0x357018, 0x00004400);
++ wreg32_idx_byteoffset(adev, 0x457018, 0x00004400);
++ wreg32_idx_byteoffset(adev, 0x557018, 0x00004400);
++ wreg32_idx_byteoffset(adev, 0x657018, 0x00004400);
++ wreg32_idx_byteoffset(adev, 0x757018, 0x00004400);
++ wreg32_idx_byteoffset(adev, 0x57010, 0x00000024);
++ wreg32_idx_byteoffset(adev, 0x157010, 0x00000024);
++ wreg32_idx_byteoffset(adev, 0x257010, 0x00000024);
++ wreg32_idx_byteoffset(adev, 0x357010, 0x00000024);
++ wreg32_idx_byteoffset(adev, 0x457010, 0x00000024);
++ wreg32_idx_byteoffset(adev, 0x557010, 0x00000024);
++ wreg32_idx_byteoffset(adev, 0x657010, 0x00000024);
++ wreg32_idx_byteoffset(adev, 0x757010, 0x00000024);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00720080);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000343);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0000f00b);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x0000c800);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0000f00e);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x0000c800);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0000f011);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x0000c800);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0000f014);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x0000c800);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0000f008);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000080);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x000900de);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x0000fff0);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x000900df);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x0000ffff);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x000900e0);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x0000fff0);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x000900e1);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x0000ffef);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x000900f7);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00003210);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x000900fe);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00004000);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x000900d4);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000400);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x000900d7);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000400);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x000900f2);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x000009c4);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x001900f2);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x000009c4);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x002900f2);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x000009c4);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x003900f2);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x000009c4);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x000900f3);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x000000fa);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x001900f3);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x000000fa);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x002900f3);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x000000fa);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x003900f3);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x000000fa);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x000900f4);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x0000007d);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x001900f4);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x0000007d);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x002900f4);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x0000007d);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x003900f4);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x0000007d);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x000200aa);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000018);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x001200aa);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000018);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x002200aa);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000018);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x003200aa);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000018);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0002f087);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0002003f);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000080);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0002003c);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00020040);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090000);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090001);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000498);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090002);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00008410);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090003);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000008);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090004);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090005);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000c00);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090006);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x000007ff);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090007);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000740);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090008);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00008c10);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090009);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000430);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009000a);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000540);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009000b);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00008c10);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009000c);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000438);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009000d);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000540);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009000e);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00008c10);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009000f);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090010);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000440);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090011);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00008c10);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090012);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090013);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090014);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00001c00);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090015);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x000007ff);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090016);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000740);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090017);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00009c10);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090018);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x000004b0);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090019);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000540);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009001a);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00009c10);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009001b);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000008);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009001c);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000618);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009001d);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00009c10);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009001e);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009001f);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000440);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090020);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00009c10);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090021);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090022);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090023);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000400);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090024);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x000007f8);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090025);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000440);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090026);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00009c10);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090027);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000007);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090028);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x0000009a);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090029);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00001410);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009002a);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000020);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009002b);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000788);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009002c);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00008448);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009002d);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000008);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009002e);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000004);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009002f);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00001400);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090030);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000007);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090031);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000618);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090032);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00008410);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090033);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000437);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090034);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000540);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090035);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00008c10);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090036);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000570);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090037);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000540);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090038);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00009410);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090039);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000008);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009003a);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000004);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009003b);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00001400);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009003c);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000530);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009003d);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000540);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009003e);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00009410);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009003f);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090040);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000004);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090041);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00001400);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090042);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000430);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090043);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000540);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090044);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00009410);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090045);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090046);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000004);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090047);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000400);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090048);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090049);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000004);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009004a);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000c00);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009004b);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000410);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009004c);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000540);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009004d);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00009410);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009004e);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000008);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009004f);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000658);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090050);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00009410);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090051);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000008);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090052);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090053);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000400);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090054);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000038);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090055);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000788);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090056);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00009448);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090057);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x0000000a);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090058);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090059);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00001400);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009005a);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x0000000b);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009005b);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000048);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009005c);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00009410);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009005d);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x0000001c);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009005e);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000340);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009005f);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00009410);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090060);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x0000001d);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090061);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000048);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090062);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00009410);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090063);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000004);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090064);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000340);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090065);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00009410);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090066);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090067);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000004);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090068);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00001400);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090069);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009006a);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009006b);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00001400);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009006c);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x0000000c);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009006d);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000048);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009006e);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00009410);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009006f);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000004);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090070);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000048);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090071);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00009410);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090072);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000007);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090073);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000740);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090074);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00009410);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090075);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000014);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090076);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000048);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090077);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00009410);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090078);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x000007f8);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090079);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000440);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009007a);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00009410);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009007b);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000010);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009007c);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000788);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009007d);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00009448);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009007e);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x0000ff80);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x0009007f);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x000006ff);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090080);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00008448);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090081);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x0000fff8);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090082);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x000006f7);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090083);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00008448);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090084);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090085);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x5700C, 0x00090086);
++ wreg32_idx_byteoffset(adev, 0x57004, 0x00000000);
++ wreg32_idx_byteoffset(adev, 0x570C8, 0x00001bd5);
++ wreg32_idx_byteoffset(adev, 0x570C8, 0x00000bd5);
++ while (1) {
++ u32 temp_val = RREG32(0x570c8 / 4);
++
++ pr_err("waiting DFI_INIT_COMPLETE ...........\n");
++
++ if (temp_val & 0x4000)
++ break;
++
++ pr_err("in the wait...........\n");
++ udelay(10);
++ }
++ pr_err("DFI init success.");
++
++ wreg32_idx_byteoffset(adev, 0x51124, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0x5111C, 0x00000008);
++ wreg32_idx_byteoffset(adev, 0x57018, 0x0000c400);
++ wreg32_idx_byteoffset(adev, 0x157018, 0x0000c400);
++ wreg32_idx_byteoffset(adev, 0x257018, 0x0000c400);
++ wreg32_idx_byteoffset(adev, 0x357018, 0x0000c400);
++ wreg32_idx_byteoffset(adev, 0x457018, 0x0000c400);
++ wreg32_idx_byteoffset(adev, 0x557018, 0x0000c400);
++ wreg32_idx_byteoffset(adev, 0x657018, 0x0000c400);
++ wreg32_idx_byteoffset(adev, 0x757018, 0x0000c400);
++ wreg32_idx_byteoffset(adev, 0x5111C, 0x00030008);
++ wreg32_idx_byteoffset(adev, 0x5111C, 0x00020008);
++ wreg32_idx_byteoffset(adev, 0x57000, 0x0008e80f);
++ wreg32_idx_byteoffset(adev, 0x5716C, 0x00100000);
++ wreg32_idx_byteoffset(adev, 0x5716C, 0x00110010);
++ wreg32_idx_byteoffset(adev, 0x5716C, 0x00120013);
++ wreg32_idx_byteoffset(adev, 0x5716C, 0x001300cc);
++ wreg32_idx_byteoffset(adev, 0x5716C, 0x00140000);
++ wreg32_idx_byteoffset(adev, 0x57000, 0x0008e800);
++ wreg32_idx_byteoffset(adev, 0x50100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x52100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x54100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x56100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x150100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x152100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x154100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x156100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x250100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x252100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x254100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x256100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x350100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x352100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x354100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x356100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x450100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x452100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x454100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x456100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x550100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x552100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x554100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x556100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x650100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x652100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x654100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x656100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x750100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x752100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x754100, 0x80000103);
++ wreg32_idx_byteoffset(adev, 0x756100, 0x80000103);
++ /* Done with UmcInit() on GPU 0*/
++
++ /* Done with UMC Init on GPU 0 */
++
++ /* Start with GC Init on GPU 0 */
++ wreg32_idx_byteoffset(adev, 0xABC8, 0x003f3fbf);
++ wreg32_idx_byteoffset(adev, 0x9C68, 0x00000001);
++ wreg32_idx_byteoffset(adev, 0xABE8, 0x0d40bff0);
++ wreg32_idx_byteoffset(adev, 0xA31C, 0xffffffff);
++ wreg32_idx_byteoffset(adev, 0xA320, 0x0000001f);
++ wreg32_idx_byteoffset(adev, 0xA324, 0xffffffff);
++ wreg32_idx_byteoffset(adev, 0xA328, 0x0000001f);
++ wreg32_idx_byteoffset(adev, 0xA32C, 0xffffffff);
++ wreg32_idx_byteoffset(adev, 0xA330, 0x0000001f);
++ wreg32_idx_byteoffset(adev, 0xA334, 0xffffffff);
++ wreg32_idx_byteoffset(adev, 0xA338, 0x0000001f);
++ wreg32_idx_byteoffset(adev, 0xA33C, 0xffffffff);
++ wreg32_idx_byteoffset(adev, 0xA340, 0x0000001f);
++ wreg32_idx_byteoffset(adev, 0xA344, 0xffffffff);
++ wreg32_idx_byteoffset(adev, 0xA348, 0x0000001f);
++ wreg32_idx_byteoffset(adev, 0xA34C, 0xffffffff);
++ wreg32_idx_byteoffset(adev, 0xA350, 0x0000001f);
++ wreg32_idx_byteoffset(adev, 0xA354, 0xffffffff);
++ wreg32_idx_byteoffset(adev, 0xA358, 0x0000001f);
++ wreg32_idx_byteoffset(adev, 0xA35C, 0xffffffff);
++ wreg32_idx_byteoffset(adev, 0xA360, 0x0000001f);
++ wreg32_idx_byteoffset(adev, 0xA364, 0xffffffff);
++ wreg32_idx_byteoffset(adev, 0xA368, 0x0000001f);
++ wreg32_idx_byteoffset(adev, 0xA36C, 0xffffffff);
++ wreg32_idx_byteoffset(adev, 0xA370, 0x0000001f);
++ wreg32_idx_byteoffset(adev, 0xA374, 0xffffffff);
++ wreg32_idx_byteoffset(adev, 0xA378, 0x0000001f);
++ wreg32_idx_byteoffset(adev, 0xA37C, 0xffffffff);
++ wreg32_idx_byteoffset(adev, 0xA380, 0x0000001f);
++ wreg32_idx_byteoffset(adev, 0xA384, 0xffffffff);
++ wreg32_idx_byteoffset(adev, 0xA388, 0x0000001f);
++ wreg32_idx_byteoffset(adev, 0xA38C, 0xffffffff);
++ wreg32_idx_byteoffset(adev, 0xA390, 0x0000001f);
++ wreg32_idx_byteoffset(adev, 0xA394, 0xffffffff);
++ wreg32_idx_byteoffset(adev, 0xA398, 0x0000001f);
++ wreg32_idx_byteoffset(adev, 0xA39C, 0xffffffff);
++ wreg32_idx_byteoffset(adev, 0xA3A0, 0x0000001f);
++ wreg32_idx_byteoffset(adev, 0xA3A4, 0xffffffff);
++ wreg32_idx_byteoffset(adev, 0xA3A8, 0x0000001f);
++ wreg32_idx_byteoffset(adev, 0xA120, 0x00020000);
++ wreg32_idx_byteoffset(adev, 0xA100, 0x00080602);
++ wreg32_idx_byteoffset(adev, 0xA140, 0x60e40feb);
++ /* Done with GC Init on GPU 0*/
++
++}
++
++static const u32 vega20_golden_init[] =
++{
++ 0xde3, 0xffffffff, 0x00000100,
++ 0xde4, 0xffffffff, 0x00000000,
++ 0xde3, 0xffffffff, 0x00000100,
++ 0xde3, 0xffffffff, 0x00000100,
++ 0x296b, 0xffffffff, 0x00000000,
++ 0x2680, 0xffffffff, 0x00014104,
++ 0x2682, 0xffffffff, 0x0a000000,
++ 0x2688, 0xffffffff, 0x04000080,
++ 0x263e, 0xffffffff, 0x2a114042,
++ 0x2542, 0xffffffff, 0x010b0000,
++ 0x2b03, 0xffffffff, 0xb5d3f197,
++ 0x2b04, 0xffffffff, 0x4a2c0e68,
++ 0x260d, 0xffffffff, 0x00000420,
++ 0x22fc, 0xffffffff, 0x00000001,
++ 0x22fd, 0xffffffff, 0x04040000,
++ 0xc281, 0xffffffff, 0x00000000,
++ 0x127a, 0xffffffff, 0x40000051,
++ 0x127b, 0xffffffff, 0x3f000100,
++ 0x127d, 0xffffffff, 0x02831f07,
++ 0x12e7, 0xffffffff, 0x00403000,
++ 0x1347, 0xffffffff, 0x00403000,
++ 0x13a7, 0xffffffff, 0x00403000,
++ 0x1407, 0xffffffff, 0x00403000,
++ 0x1467, 0xffffffff, 0x00403000,
++ 0x14c7, 0xffffffff, 0x00403000,
++ 0x1527, 0xffffffff, 0x00403000,
++ 0x1587, 0xffffffff, 0x00403000,
++ 0x15e7, 0xffffffff, 0x00403000,
++ 0x1647, 0xffffffff, 0x00403000,
++ 0x187a, 0xffffffff, 0x0003c800,
++ 0x187b, 0xffffffff, 0x3f000100,
++ 0x187d, 0xffffffff, 0x02831f07,
++ 0x18e7, 0xffffffff, 0x00403000,
++ 0x1947, 0xffffffff, 0x00403000,
++ 0x19a7, 0xffffffff, 0x00403000,
++ 0x1a07, 0xffffffff, 0x00403000,
++ 0x1a67, 0xffffffff, 0x00403000,
++ 0x1ac7, 0xffffffff, 0x00403000,
++ 0x1b27, 0xffffffff, 0x00403000,
++ 0x1b87, 0xffffffff, 0x00403000,
++ 0x1be7, 0xffffffff, 0x00403000,
++ 0x1c47, 0xffffffff, 0x00403000,
++ 0xde3, 0xffffffff, 0x00000100,
++ 0xde4, 0xffffffff, 0x00000000,
++ 0xde3, 0xffffffff, 0x00000100,
++ 0xeca2, 0xffffffff, 0x00000000,
++ 0xeca3, 0xffffffff, 0x00000000,
++ 0xeca4, 0xffffffff, 0x00000000,
++ 0x263e, 0xffffffff, 0x28104042,
++ 0x263e, 0xffffffff, 0x28104042,
++ 0xc200, 0xffffffff, 0xe0000000,
++ 0x263e, 0xffffffff, 0x28104042,
++ 0x2b03, 0xffffffff, 0xb5d3f197,
++ 0x2b04, 0xffffffff, 0x4a2c0e68,
++ 0x2711, 0xffffffff, 0x000000ff,
++ 0x260c, 0xffffffff, 0x00000000,
++ 0x21d8, 0xffffffff, 0x00002b16,
++ 0x21d9, 0xffffffff, 0x00008040,
++ 0xc441, 0xffffffff, 0x01000108,
++ 0xc24d, 0xffffffff, 0x00000001,
++ 0xd808, 0xffffffff, 0x00000000,
++ 0x2300, 0xffffffff, 0x01180000,
++ 0xc248, 0xffffffff, 0xffffffff,
++ 0x2231, 0xffffffff, 0x09000000,
++ 0xc330, 0xffffffff, 0x00000000,
++ 0xc332, 0xffffffff, 0x0000df80,
++ 0xc336, 0xffffffff, 0x02249249,
++ 0xc281, 0xffffffff, 0x00000000,
++ 0xdc02, 0xffffffff, 0x00000000,
++ 0xf084, 0xffffffff, 0x00018100,
++ 0x2285, 0xffffffff, 0x00000007,
++ 0x22fc, 0xffffffff, 0x00000001,
++ 0x22c9, 0xffffffff, 0x00ffffff,
++ 0x2987, 0xffffffff, 0x00002500,
++ 0x2985, 0xffffffff, 0x00000001,
++ 0x2986, 0xffffffff, 0x00000000,
++ 0x2974, 0xffffffff, 0x00000000,
++ 0x2975, 0xffffffff, 0x000000ff,
++ 0x2980, 0xffffffff, 0x00000090,
++ 0x2981, 0xffffffff, 0x0000018f,
++ 0x2880, 0xffffffff, 0x00000000,
++ 0x2881, 0xffffffff, 0x00000000,
++ 0x2971, 0xffffffff, 0x00000000,
++ 0x2972, 0xffffffff, 0x000fffff,
++ 0x2984, 0xffffffff, 0x00000000,
++ 0x2983, 0xffffffff, 0x00000002,
++ 0x2982, 0xffffffff, 0x00000000,
++ 0x2987, 0xffffffff, 0x00002001,
++ 0x1a833, 0xffffffff, 0x00002500,
++ 0x1a831, 0xffffffff, 0x00000001,
++ 0x1a832, 0xffffffff, 0x00000000,
++ 0x1a820, 0xffffffff, 0x00000000,
++ 0x1a821, 0xffffffff, 0x000000ff,
++ 0x1a82c, 0xffffffff, 0x00000090,
++ 0x1a82d, 0xffffffff, 0x0000018f,
++ 0x1a6c0, 0xffffffff, 0x00000000,
++ 0x1a6c1, 0xffffffff, 0x00000000,
++ 0x1a81d, 0xffffffff, 0x00000000,
++ 0x1a81e, 0xffffffff, 0x000fffff,
++ 0x1a830, 0xffffffff, 0x00000000,
++ 0x1a82f, 0xffffffff, 0x00000002,
++ 0x1a82e, 0xffffffff, 0x00000000,
++ 0x1a833, 0xffffffff, 0x00002001,
++ 0xf60, 0xffffffff, 0x00900000,
++ 0xf62, 0xffffffff, 0x00000000,
++ 0xf61, 0xffffffff, 0x00000000,
++ 0xff3, 0xffffffff, 0x2d600861,
++ 0xde0, 0xffffffff, 0x00000001,
++ 0xe1f, 0xffffffff, 0x00000003,
++};
++
+ int emu_soc_asic_init(struct amdgpu_device *adev)
+ {
++ if (adev->asic_type == CHIP_VEGA20) {
++ vg20_lsd_soc_init_with_umc(adev);
++
++ amdgpu_device_program_register_sequence(adev,
++ (const u32 *)vega20_golden_init,
++ ARRAY_SIZE(vega20_golden_init));
++ }
++
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4413-drm-amd-display-dm-Add-vega20-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4413-drm-amd-display-dm-Add-vega20-support.patch
new file mode 100644
index 00000000..46a8faf9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4413-drm-amd-display-dm-Add-vega20-support.patch
@@ -0,0 +1,53 @@
+From 942541c09ff3ff6dca3ac6d7126df3657bade3b9 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 20 Apr 2018 19:50:01 +0800
+Subject: [PATCH 4413/5725] drm/amd/display/dm: Add vega20 support
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index be3200c..21be9fd 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1111,6 +1111,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
+
+ if (adev->asic_type == CHIP_VEGA10 ||
+ adev->asic_type == CHIP_VEGA12 ||
++ adev->asic_type == CHIP_VEGA20 ||
+ adev->asic_type == CHIP_RAVEN)
+ client_id = SOC15_IH_CLIENTID_DCE;
+
+@@ -1514,6 +1515,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+ #endif
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
++ case CHIP_VEGA20:
+ if (dce110_register_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+@@ -1766,6 +1768,7 @@ static int dm_early_init(void *handle)
+ break;
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
++ case CHIP_VEGA20:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+@@ -2014,6 +2017,7 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
+
+ if (adev->asic_type == CHIP_VEGA10 ||
+ adev->asic_type == CHIP_VEGA12 ||
++ adev->asic_type == CHIP_VEGA20 ||
+ adev->asic_type == CHIP_RAVEN) {
+ /* Fill GFX9 params */
+ plane_state->tiling_info.gfx9.num_pipes =
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4414-drm-amdgpu-Add-vega20-to-dc-support-check.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4414-drm-amdgpu-Add-vega20-to-dc-support-check.patch
new file mode 100644
index 00000000..96066a05
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4414-drm-amdgpu-Add-vega20-to-dc-support-check.patch
@@ -0,0 +1,28 @@
+From 0c2b9c3c203ce89f5ef55ce7008a66eae9bcbc0e Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Sat, 3 Feb 2018 12:19:46 +0800
+Subject: [PATCH 4414/5725] drm/amdgpu: Add vega20 to dc support check
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index eaa8922..0eee9f2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2218,6 +2218,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
+ case CHIP_FIJI:
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
++ case CHIP_VEGA20:
+ #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case CHIP_RAVEN:
+ #endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4415-drm-amd-Add-dce-12.1-gpio-aux-registers.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4415-drm-amd-Add-dce-12.1-gpio-aux-registers.patch
new file mode 100644
index 00000000..32559430
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4415-drm-amd-Add-dce-12.1-gpio-aux-registers.patch
@@ -0,0 +1,211 @@
+From 40cae14a8e7ec5c3d70797580e9b03b43b7b1851 Mon Sep 17 00:00:00 2001
+From: Roman Li <Roman.Li@amd.com>
+Date: Wed, 14 Feb 2018 17:20:54 -0500
+Subject: [PATCH 4415/5725] drm/amd: Add dce-12.1 gpio aux registers
+
+Updating dce12 register headers by adding dc registers
+required for potential DP LTTPR support.
+
+Signed-off-by: Roman Li <Roman.Li@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/include/asic_reg/dce/dce_12_0_offset.h | 12 ++
+ .../amd/include/asic_reg/dce/dce_12_0_sh_mask.h | 152 +++++++++++++++++++++
+ 2 files changed, 164 insertions(+)
+ mode change 100644 => 100755 drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
+ mode change 100644 => 100755 drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h
+
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
+old mode 100644
+new mode 100755
+index f730d06..b6f74bf
+--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
+@@ -2095,6 +2095,18 @@
+ #define mmDC_GPIO_AUX_CTRL_2_BASE_IDX 2
+ #define mmDC_GPIO_RXEN 0x212f
+ #define mmDC_GPIO_RXEN_BASE_IDX 2
++#define mmDC_GPIO_AUX_CTRL_3 0x2130
++#define mmDC_GPIO_AUX_CTRL_3_BASE_IDX 2
++#define mmDC_GPIO_AUX_CTRL_4 0x2131
++#define mmDC_GPIO_AUX_CTRL_4_BASE_IDX 2
++#define mmDC_GPIO_AUX_CTRL_5 0x2132
++#define mmDC_GPIO_AUX_CTRL_5_BASE_IDX 2
++#define mmAUXI2C_PAD_ALL_PWR_OK 0x2133
++#define mmAUXI2C_PAD_ALL_PWR_OK_BASE_IDX 2
++#define mmDC_GPIO_PULLUPEN 0x2134
++#define mmDC_GPIO_PULLUPEN_BASE_IDX 2
++#define mmDC_GPIO_AUX_CTRL_6 0x2135
++#define mmDC_GPIO_AUX_CTRL_6_BASE_IDX 2
+ #define mmBPHYC_DAC_MACRO_CNTL 0x2136
+ #define mmBPHYC_DAC_MACRO_CNTL_BASE_IDX 2
+ #define mmDAC_MACRO_CNTL_RESERVED0 0x2136
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h
+old mode 100644
+new mode 100755
+index 6d3162c..bcd190a
+--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h
+@@ -10971,6 +10971,158 @@
+ #define DC_GPIO_RXEN__DC_GPIO_BLON_RXEN_MASK 0x00100000L
+ #define DC_GPIO_RXEN__DC_GPIO_DIGON_RXEN_MASK 0x00200000L
+ #define DC_GPIO_RXEN__DC_GPIO_ENA_BL_RXEN_MASK 0x00400000L
++//DC_GPIO_AUX_CTRL_3
++#define DC_GPIO_AUX_CTRL_3__AUX1_NEN_RTERM__SHIFT 0x0
++#define DC_GPIO_AUX_CTRL_3__AUX2_NEN_RTERM__SHIFT 0x1
++#define DC_GPIO_AUX_CTRL_3__AUX3_NEN_RTERM__SHIFT 0x2
++#define DC_GPIO_AUX_CTRL_3__AUX4_NEN_RTERM__SHIFT 0x3
++#define DC_GPIO_AUX_CTRL_3__AUX5_NEN_RTERM__SHIFT 0x4
++#define DC_GPIO_AUX_CTRL_3__AUX6_NEN_RTERM__SHIFT 0x5
++#define DC_GPIO_AUX_CTRL_3__AUX1_DP_DN_SWAP__SHIFT 0x8
++#define DC_GPIO_AUX_CTRL_3__AUX2_DP_DN_SWAP__SHIFT 0x9
++#define DC_GPIO_AUX_CTRL_3__AUX3_DP_DN_SWAP__SHIFT 0xa
++#define DC_GPIO_AUX_CTRL_3__AUX4_DP_DN_SWAP__SHIFT 0xb
++#define DC_GPIO_AUX_CTRL_3__AUX5_DP_DN_SWAP__SHIFT 0xc
++#define DC_GPIO_AUX_CTRL_3__AUX6_DP_DN_SWAP__SHIFT 0xd
++#define DC_GPIO_AUX_CTRL_3__AUX1_HYS_TUNE__SHIFT 0x10
++#define DC_GPIO_AUX_CTRL_3__AUX2_HYS_TUNE__SHIFT 0x12
++#define DC_GPIO_AUX_CTRL_3__AUX3_HYS_TUNE__SHIFT 0x14
++#define DC_GPIO_AUX_CTRL_3__AUX4_HYS_TUNE__SHIFT 0x16
++#define DC_GPIO_AUX_CTRL_3__AUX5_HYS_TUNE__SHIFT 0x18
++#define DC_GPIO_AUX_CTRL_3__AUX6_HYS_TUNE__SHIFT 0x1a
++#define DC_GPIO_AUX_CTRL_3__AUX1_NEN_RTERM_MASK 0x00000001L
++#define DC_GPIO_AUX_CTRL_3__AUX2_NEN_RTERM_MASK 0x00000002L
++#define DC_GPIO_AUX_CTRL_3__AUX3_NEN_RTERM_MASK 0x00000004L
++#define DC_GPIO_AUX_CTRL_3__AUX4_NEN_RTERM_MASK 0x00000008L
++#define DC_GPIO_AUX_CTRL_3__AUX5_NEN_RTERM_MASK 0x00000010L
++#define DC_GPIO_AUX_CTRL_3__AUX6_NEN_RTERM_MASK 0x00000020L
++#define DC_GPIO_AUX_CTRL_3__AUX1_DP_DN_SWAP_MASK 0x00000100L
++#define DC_GPIO_AUX_CTRL_3__AUX2_DP_DN_SWAP_MASK 0x00000200L
++#define DC_GPIO_AUX_CTRL_3__AUX3_DP_DN_SWAP_MASK 0x00000400L
++#define DC_GPIO_AUX_CTRL_3__AUX4_DP_DN_SWAP_MASK 0x00000800L
++#define DC_GPIO_AUX_CTRL_3__AUX5_DP_DN_SWAP_MASK 0x00001000L
++#define DC_GPIO_AUX_CTRL_3__AUX6_DP_DN_SWAP_MASK 0x00002000L
++#define DC_GPIO_AUX_CTRL_3__AUX1_HYS_TUNE_MASK 0x00030000L
++#define DC_GPIO_AUX_CTRL_3__AUX2_HYS_TUNE_MASK 0x000C0000L
++#define DC_GPIO_AUX_CTRL_3__AUX3_HYS_TUNE_MASK 0x00300000L
++#define DC_GPIO_AUX_CTRL_3__AUX4_HYS_TUNE_MASK 0x00C00000L
++#define DC_GPIO_AUX_CTRL_3__AUX5_HYS_TUNE_MASK 0x03000000L
++#define DC_GPIO_AUX_CTRL_3__AUX6_HYS_TUNE_MASK 0x0C000000L
++//DC_GPIO_AUX_CTRL_4
++#define DC_GPIO_AUX_CTRL_4__AUX1_AUX_CTRL__SHIFT 0x0
++#define DC_GPIO_AUX_CTRL_4__AUX2_AUX_CTRL__SHIFT 0x4
++#define DC_GPIO_AUX_CTRL_4__AUX3_AUX_CTRL__SHIFT 0x8
++#define DC_GPIO_AUX_CTRL_4__AUX4_AUX_CTRL__SHIFT 0xc
++#define DC_GPIO_AUX_CTRL_4__AUX5_AUX_CTRL__SHIFT 0x10
++#define DC_GPIO_AUX_CTRL_4__AUX6_AUX_CTRL__SHIFT 0x14
++#define DC_GPIO_AUX_CTRL_4__AUX1_AUX_CTRL_MASK 0x0000000FL
++#define DC_GPIO_AUX_CTRL_4__AUX2_AUX_CTRL_MASK 0x000000F0L
++#define DC_GPIO_AUX_CTRL_4__AUX3_AUX_CTRL_MASK 0x00000F00L
++#define DC_GPIO_AUX_CTRL_4__AUX4_AUX_CTRL_MASK 0x0000F000L
++#define DC_GPIO_AUX_CTRL_4__AUX5_AUX_CTRL_MASK 0x000F0000L
++#define DC_GPIO_AUX_CTRL_4__AUX6_AUX_CTRL_MASK 0x00F00000L
++//DC_GPIO_AUX_CTRL_5
++#define DC_GPIO_AUX_CTRL_5__AUX1_VOD_TUNE__SHIFT 0x0
++#define DC_GPIO_AUX_CTRL_5__AUX2_VOD_TUNE__SHIFT 0x2
++#define DC_GPIO_AUX_CTRL_5__AUX3_VOD_TUNE__SHIFT 0x4
++#define DC_GPIO_AUX_CTRL_5__AUX4_VOD_TUNE__SHIFT 0x6
++#define DC_GPIO_AUX_CTRL_5__AUX5_VOD_TUNE__SHIFT 0x8
++#define DC_GPIO_AUX_CTRL_5__AUX6_VOD_TUNE__SHIFT 0xa
++#define DC_GPIO_AUX_CTRL_5__DDC_PAD1_I2CMODE__SHIFT 0xc
++#define DC_GPIO_AUX_CTRL_5__DDC_PAD2_I2CMODE__SHIFT 0xd
++#define DC_GPIO_AUX_CTRL_5__DDC_PAD3_I2CMODE__SHIFT 0xe
++#define DC_GPIO_AUX_CTRL_5__DDC_PAD4_I2CMODE__SHIFT 0xf
++#define DC_GPIO_AUX_CTRL_5__DDC_PAD5_I2CMODE__SHIFT 0x10
++#define DC_GPIO_AUX_CTRL_5__DDC_PAD6_I2CMODE__SHIFT 0x11
++#define DC_GPIO_AUX_CTRL_5__DDC1_I2C_VPH_1V2_EN__SHIFT 0x12
++#define DC_GPIO_AUX_CTRL_5__DDC2_I2C_VPH_1V2_EN__SHIFT 0x13
++#define DC_GPIO_AUX_CTRL_5__DDC3_I2C_VPH_1V2_EN__SHIFT 0x14
++#define DC_GPIO_AUX_CTRL_5__DDC4_I2C_VPH_1V2_EN__SHIFT 0x15
++#define DC_GPIO_AUX_CTRL_5__DDC5_I2C_VPH_1V2_EN__SHIFT 0x16
++#define DC_GPIO_AUX_CTRL_5__DDC6_I2C_VPH_1V2_EN__SHIFT 0x17
++#define DC_GPIO_AUX_CTRL_5__DDC1_PAD_I2C_CTRL__SHIFT 0x18
++#define DC_GPIO_AUX_CTRL_5__DDC2_PAD_I2C_CTRL__SHIFT 0x19
++#define DC_GPIO_AUX_CTRL_5__DDC3_PAD_I2C_CTRL__SHIFT 0x1a
++#define DC_GPIO_AUX_CTRL_5__DDC4_PAD_I2C_CTRL__SHIFT 0x1b
++#define DC_GPIO_AUX_CTRL_5__DDC5_PAD_I2C_CTRL__SHIFT 0x1c
++#define DC_GPIO_AUX_CTRL_5__DDC6_PAD_I2C_CTRL__SHIFT 0x1d
++#define DC_GPIO_AUX_CTRL_5__AUX1_VOD_TUNE_MASK 0x00000003L
++#define DC_GPIO_AUX_CTRL_5__AUX2_VOD_TUNE_MASK 0x0000000CL
++#define DC_GPIO_AUX_CTRL_5__AUX3_VOD_TUNE_MASK 0x00000030L
++#define DC_GPIO_AUX_CTRL_5__AUX4_VOD_TUNE_MASK 0x000000C0L
++#define DC_GPIO_AUX_CTRL_5__AUX5_VOD_TUNE_MASK 0x00000300L
++#define DC_GPIO_AUX_CTRL_5__AUX6_VOD_TUNE_MASK 0x00000C00L
++#define DC_GPIO_AUX_CTRL_5__DDC_PAD1_I2CMODE_MASK 0x00001000L
++#define DC_GPIO_AUX_CTRL_5__DDC_PAD2_I2CMODE_MASK 0x00002000L
++#define DC_GPIO_AUX_CTRL_5__DDC_PAD3_I2CMODE_MASK 0x00004000L
++#define DC_GPIO_AUX_CTRL_5__DDC_PAD4_I2CMODE_MASK 0x00008000L
++#define DC_GPIO_AUX_CTRL_5__DDC_PAD5_I2CMODE_MASK 0x00010000L
++#define DC_GPIO_AUX_CTRL_5__DDC_PAD6_I2CMODE_MASK 0x00020000L
++#define DC_GPIO_AUX_CTRL_5__DDC1_I2C_VPH_1V2_EN_MASK 0x00040000L
++#define DC_GPIO_AUX_CTRL_5__DDC2_I2C_VPH_1V2_EN_MASK 0x00080000L
++#define DC_GPIO_AUX_CTRL_5__DDC3_I2C_VPH_1V2_EN_MASK 0x00100000L
++#define DC_GPIO_AUX_CTRL_5__DDC4_I2C_VPH_1V2_EN_MASK 0x00200000L
++#define DC_GPIO_AUX_CTRL_5__DDC5_I2C_VPH_1V2_EN_MASK 0x00400000L
++#define DC_GPIO_AUX_CTRL_5__DDC6_I2C_VPH_1V2_EN_MASK 0x00800000L
++#define DC_GPIO_AUX_CTRL_5__DDC1_PAD_I2C_CTRL_MASK 0x01000000L
++#define DC_GPIO_AUX_CTRL_5__DDC2_PAD_I2C_CTRL_MASK 0x02000000L
++#define DC_GPIO_AUX_CTRL_5__DDC3_PAD_I2C_CTRL_MASK 0x04000000L
++#define DC_GPIO_AUX_CTRL_5__DDC4_PAD_I2C_CTRL_MASK 0x08000000L
++#define DC_GPIO_AUX_CTRL_5__DDC5_PAD_I2C_CTRL_MASK 0x10000000L
++#define DC_GPIO_AUX_CTRL_5__DDC6_PAD_I2C_CTRL_MASK 0x20000000L
++//AUXI2C_PAD_ALL_PWR_OK
++#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY1_ALL_PWR_OK__SHIFT 0x0
++#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY2_ALL_PWR_OK__SHIFT 0x1
++#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY3_ALL_PWR_OK__SHIFT 0x2
++#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY4_ALL_PWR_OK__SHIFT 0x3
++#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY5_ALL_PWR_OK__SHIFT 0x4
++#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY6_ALL_PWR_OK__SHIFT 0x5
++#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY1_ALL_PWR_OK_MASK 0x00000001L
++#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY2_ALL_PWR_OK_MASK 0x00000002L
++#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY3_ALL_PWR_OK_MASK 0x00000004L
++#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY4_ALL_PWR_OK_MASK 0x00000008L
++#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY5_ALL_PWR_OK_MASK 0x00000010L
++#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY6_ALL_PWR_OK_MASK 0x00000020L
++//DC_GPIO_PULLUPEN
++#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICA_PU_EN__SHIFT 0x0
++#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICB_PU_EN__SHIFT 0x1
++#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICC_PU_EN__SHIFT 0x2
++#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICD_PU_EN__SHIFT 0x3
++#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICE_PU_EN__SHIFT 0x4
++#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICF_PU_EN__SHIFT 0x5
++#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICG_PU_EN__SHIFT 0x6
++#define DC_GPIO_PULLUPEN__DC_GPIO_HSYNCA_PU_EN__SHIFT 0x8
++#define DC_GPIO_PULLUPEN__DC_GPIO_VSYNCA_PU_EN__SHIFT 0x9
++#define DC_GPIO_PULLUPEN__DC_GPIO_HPD1_PU_EN__SHIFT 0xe
++#define DC_GPIO_PULLUPEN__DC_GPIO_BLON_PU_EN__SHIFT 0x14
++#define DC_GPIO_PULLUPEN__DC_GPIO_DIGON_PU_EN__SHIFT 0x15
++#define DC_GPIO_PULLUPEN__DC_GPIO_ENA_BL_PU_EN__SHIFT 0x16
++#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICA_PU_EN_MASK 0x00000001L
++#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICB_PU_EN_MASK 0x00000002L
++#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICC_PU_EN_MASK 0x00000004L
++#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICD_PU_EN_MASK 0x00000008L
++#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICE_PU_EN_MASK 0x00000010L
++#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICF_PU_EN_MASK 0x00000020L
++#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICG_PU_EN_MASK 0x00000040L
++#define DC_GPIO_PULLUPEN__DC_GPIO_HSYNCA_PU_EN_MASK 0x00000100L
++#define DC_GPIO_PULLUPEN__DC_GPIO_VSYNCA_PU_EN_MASK 0x00000200L
++#define DC_GPIO_PULLUPEN__DC_GPIO_HPD1_PU_EN_MASK 0x00004000L
++#define DC_GPIO_PULLUPEN__DC_GPIO_BLON_PU_EN_MASK 0x00100000L
++#define DC_GPIO_PULLUPEN__DC_GPIO_DIGON_PU_EN_MASK 0x00200000L
++#define DC_GPIO_PULLUPEN__DC_GPIO_ENA_BL_PU_EN_MASK 0x00400000L
++//DC_GPIO_AUX_CTRL_6
++#define DC_GPIO_AUX_CTRL_6__AUX1_PAD_RXSEL__SHIFT 0x0
++#define DC_GPIO_AUX_CTRL_6__AUX2_PAD_RXSEL__SHIFT 0x2
++#define DC_GPIO_AUX_CTRL_6__AUX3_PAD_RXSEL__SHIFT 0x4
++#define DC_GPIO_AUX_CTRL_6__AUX4_PAD_RXSEL__SHIFT 0x6
++#define DC_GPIO_AUX_CTRL_6__AUX5_PAD_RXSEL__SHIFT 0x8
++#define DC_GPIO_AUX_CTRL_6__AUX6_PAD_RXSEL__SHIFT 0xa
++#define DC_GPIO_AUX_CTRL_6__AUX1_PAD_RXSEL_MASK 0x00000003L
++#define DC_GPIO_AUX_CTRL_6__AUX2_PAD_RXSEL_MASK 0x0000000CL
++#define DC_GPIO_AUX_CTRL_6__AUX3_PAD_RXSEL_MASK 0x00000030L
++#define DC_GPIO_AUX_CTRL_6__AUX4_PAD_RXSEL_MASK 0x000000C0L
++#define DC_GPIO_AUX_CTRL_6__AUX5_PAD_RXSEL_MASK 0x00000300L
++#define DC_GPIO_AUX_CTRL_6__AUX6_PAD_RXSEL_MASK 0x00000C00L
+ //BPHYC_DAC_MACRO_CNTL
+ #define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_WHITE_LEVEL__SHIFT 0x0
+ #define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_WHITE_FINE_CONTROL__SHIFT 0x8
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4416-drm-amd-display-Add-Vega20-config.-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4416-drm-amd-display-Add-Vega20-config.-support.patch
new file mode 100644
index 00000000..b2757659
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4416-drm-amd-display-Add-Vega20-config.-support.patch
@@ -0,0 +1,34 @@
+From 6d8a620884cc42750d7bfff11f8c4b5448530234 Mon Sep 17 00:00:00 2001
+From: "Jerry (Fangzhi) Zuo" <Jerry.Zuo@amd.com>
+Date: Fri, 11 May 2018 13:46:19 -0500
+Subject: [PATCH 4416/5725] drm/amd/display: Add Vega20 config. support
+
+Signed-off-by: Jerry (Fangzhi) Zuo <Jerry.Zuo@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/display/Kconfig | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
+index e5b309f..23fbd99 100644
+--- a/drivers/gpu/drm/amd/display/Kconfig
++++ b/drivers/gpu/drm/amd/display/Kconfig
+@@ -41,4 +41,12 @@ config DEBUG_KERNEL_DC
+ Choose this option if you want to have
+ VEGAM support for display engine
+
++config DRM_AMD_DC_VG20
++ bool "Vega20 support"
++ depends on DRM_AMD_DC
++ help
++ Choose this option if you want to have
++ Vega20 support for display engine
++
++
+ endmenu
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4417-drm-amd-display-Remove-COMBO_DISPLAY_PLL0-from-Vega2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4417-drm-amd-display-Remove-COMBO_DISPLAY_PLL0-from-Vega2.patch
new file mode 100644
index 00000000..c63951ea
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4417-drm-amd-display-Remove-COMBO_DISPLAY_PLL0-from-Vega2.patch
@@ -0,0 +1,68 @@
+From c1bdeb4c0296d7092c2f1a12f308b77ec45f1be4 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 20 Apr 2018 21:03:10 +0800
+Subject: [PATCH 4417/5725] drm/amd/display: Remove COMBO_DISPLAY_PLL0 from
+ Vega20
+
+Signed-off-by: Jerry (Fangzhi) Zuo <Jerry.Zuo@amd.com>
+Reviewed-by: Hersen Wu <hersenxs.wu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 11 ++++++++++-
+ drivers/gpu/drm/amd/display/include/dal_asic_id.h | 6 ++++++
+ 2 files changed, 16 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index 78e6beb..aa4cf30 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -35,7 +35,7 @@
+ #endif
+ #include "core_types.h"
+ #include "dc_types.h"
+-
++#include "dal_asic_id.h"
+
+ #define TO_DCE_CLOCKS(clocks)\
+ container_of(clocks, struct dce_disp_clk, base)
+@@ -413,9 +413,18 @@ static int dce112_set_clock(
+ /*VBIOS will determine DPREFCLK frequency, so we don't set it*/
+ dce_clk_params.target_clock_frequency = 0;
+ dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
++#ifndef CONFIG_DRM_AMD_DC_VG20
+ dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
+ (dce_clk_params.pll_id ==
+ CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
++#else
++ if (!ASICREV_IS_VEGA20_P(clk->ctx->asic_id.hw_internal_rev))
++ dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
++ (dce_clk_params.pll_id ==
++ CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
++ else
++ dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
++#endif
+
+ bp->funcs->set_dce_clock(bp, &dce_clk_params);
+
+diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+index 1b987b6..77d2856 100644
+--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
++++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+@@ -117,6 +117,12 @@
+ ((rev >= STONEY_A0) && (rev < CZ_UNKNOWN))
+
+ /* DCE12 */
++#define AI_UNKNOWN 0xFF
++
++#ifdef CONFIG_DRM_AMD_DC_VG20
++#define AI_VEGA20_P_A0 40
++#define ASICREV_IS_VEGA20_P(eChipRev) ((eChipRev >= AI_VEGA20_P_A0) && (eChipRev < AI_UNKNOWN))
++#endif
+
+ #define AI_GREENLAND_P_A0 1
+ #define AI_GREENLAND_P_A1 2
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4418-drm-amd-display-Add-BIOS-smu_info-v3_3-support-for-V.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4418-drm-amd-display-Add-BIOS-smu_info-v3_3-support-for-V.patch
new file mode 100644
index 00000000..6ee69b2d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4418-drm-amd-display-Add-BIOS-smu_info-v3_3-support-for-V.patch
@@ -0,0 +1,34 @@
+From 810f529ebfec34e49d73ae535cc05f570f86cfcf Mon Sep 17 00:00:00 2001
+From: "Jerry (Fangzhi) Zuo" <Jerry.Zuo@amd.com>
+Date: Fri, 11 May 2018 13:51:43 -0500
+Subject: [PATCH 4418/5725] drm/amd/display: Add BIOS smu_info v3_3 support for
+ Vega20
+
+Signed-off-by: Jerry (Fangzhi) Zuo <Jerry.Zuo@amd.com>
+Reviewed-by: Hersen Wu <hersenxs.wu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+index 10a5807..4561673 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+@@ -1330,6 +1330,11 @@ static enum bp_result bios_parser_get_firmware_info(
+ case 2:
+ result = get_firmware_info_v3_2(bp, info);
+ break;
++ case 3:
++#ifdef CONFIG_DRM_AMD_DC_VG20
++ result = get_firmware_info_v3_2(bp, info);
++#endif
++ break;
+ default:
+ break;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4419-drm-amd-display-Add-harvest-IP-support-for-Vega20.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4419-drm-amd-display-Add-harvest-IP-support-for-Vega20.patch
new file mode 100644
index 00000000..f8a42bfc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4419-drm-amd-display-Add-harvest-IP-support-for-Vega20.patch
@@ -0,0 +1,247 @@
+From 65af737105bd64feb320ebbad85e3fdf8d0d4afd Mon Sep 17 00:00:00 2001
+From: "Jerry (Fangzhi) Zuo" <Jerry.Zuo@amd.com>
+Date: Mon, 5 Mar 2018 16:12:23 -0500
+Subject: [PATCH 4419/5725] drm/amd/display: Add harvest IP support for Vega20
+
+Retrieve fuses to determine the availability of pipes, and
+eliminate pipes that cannot be used.
+
+Signed-off-by: Jerry (Fangzhi) Zuo <Jerry.Zuo@amd.com>
+Reviewed-by: Hersen Wu <hersenxs.wu@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+---
+ .../drm/amd/display/dc/dce120/dce120_resource.c | 208 +++++++++++++++++++++
+ 1 file changed, 208 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+index fda0157..545f35f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+@@ -814,6 +814,213 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
+ dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges);
+ }
+
++#ifdef CONFIG_DRM_AMD_DC_VG20
++static uint32_t read_pipe_fuses(struct dc_context *ctx)
++{
++ uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0);
++ /* VG20 support max 6 pipes */
++ value = value & 0x3f;
++ return value;
++}
++
++static bool construct(
++ uint8_t num_virtual_links,
++ struct dc *dc,
++ struct dce110_resource_pool *pool)
++{
++ unsigned int i;
++ int j;
++ struct dc_context *ctx = dc->ctx;
++ struct irq_service_init_data irq_init_data;
++ bool harvest_enabled = ASICREV_IS_VEGA20_P(ctx->asic_id.hw_internal_rev);
++ uint32_t pipe_fuses;
++
++ ctx->dc_bios->regs = &bios_regs;
++
++ pool->base.res_cap = &res_cap;
++ pool->base.funcs = &dce120_res_pool_funcs;
++
++ /* TODO: Fill more data from GreenlandAsicCapability.cpp */
++ pool->base.pipe_count = res_cap.num_timing_generator;
++ pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator;
++ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
++
++ dc->caps.max_downscale_ratio = 200;
++ dc->caps.i2c_speed_in_khz = 100;
++ dc->caps.max_cursor_size = 128;
++ dc->caps.dual_link_dvi = true;
++
++ dc->debug = debug_defaults;
++
++ /*************************************************
++ * Create resources *
++ *************************************************/
++
++ pool->base.clock_sources[DCE120_CLK_SRC_PLL0] =
++ dce120_clock_source_create(ctx, ctx->dc_bios,
++ CLOCK_SOURCE_COMBO_PHY_PLL0,
++ &clk_src_regs[0], false);
++ pool->base.clock_sources[DCE120_CLK_SRC_PLL1] =
++ dce120_clock_source_create(ctx, ctx->dc_bios,
++ CLOCK_SOURCE_COMBO_PHY_PLL1,
++ &clk_src_regs[1], false);
++ pool->base.clock_sources[DCE120_CLK_SRC_PLL2] =
++ dce120_clock_source_create(ctx, ctx->dc_bios,
++ CLOCK_SOURCE_COMBO_PHY_PLL2,
++ &clk_src_regs[2], false);
++ pool->base.clock_sources[DCE120_CLK_SRC_PLL3] =
++ dce120_clock_source_create(ctx, ctx->dc_bios,
++ CLOCK_SOURCE_COMBO_PHY_PLL3,
++ &clk_src_regs[3], false);
++ pool->base.clock_sources[DCE120_CLK_SRC_PLL4] =
++ dce120_clock_source_create(ctx, ctx->dc_bios,
++ CLOCK_SOURCE_COMBO_PHY_PLL4,
++ &clk_src_regs[4], false);
++ pool->base.clock_sources[DCE120_CLK_SRC_PLL5] =
++ dce120_clock_source_create(ctx, ctx->dc_bios,
++ CLOCK_SOURCE_COMBO_PHY_PLL5,
++ &clk_src_regs[5], false);
++ pool->base.clk_src_count = DCE120_CLK_SRC_TOTAL;
++
++ pool->base.dp_clock_source =
++ dce120_clock_source_create(ctx, ctx->dc_bios,
++ CLOCK_SOURCE_ID_DP_DTO,
++ &clk_src_regs[0], true);
++
++ for (i = 0; i < pool->base.clk_src_count; i++) {
++ if (pool->base.clock_sources[i] == NULL) {
++ dm_error("DC: failed to create clock sources!\n");
++ BREAK_TO_DEBUGGER();
++ goto clk_src_create_fail;
++ }
++ }
++
++ pool->base.display_clock = dce120_disp_clk_create(ctx);
++ if (pool->base.display_clock == NULL) {
++ dm_error("DC: failed to create display clock!\n");
++ BREAK_TO_DEBUGGER();
++ goto disp_clk_create_fail;
++ }
++
++ pool->base.dmcu = dce_dmcu_create(ctx,
++ &dmcu_regs,
++ &dmcu_shift,
++ &dmcu_mask);
++ if (pool->base.dmcu == NULL) {
++ dm_error("DC: failed to create dmcu!\n");
++ BREAK_TO_DEBUGGER();
++ goto res_create_fail;
++ }
++
++ pool->base.abm = dce_abm_create(ctx,
++ &abm_regs,
++ &abm_shift,
++ &abm_mask);
++ if (pool->base.abm == NULL) {
++ dm_error("DC: failed to create abm!\n");
++ BREAK_TO_DEBUGGER();
++ goto res_create_fail;
++ }
++
++ irq_init_data.ctx = dc->ctx;
++ pool->base.irqs = dal_irq_service_dce120_create(&irq_init_data);
++ if (!pool->base.irqs)
++ goto irqs_create_fail;
++
++ /* retrieve valid pipe fuses */
++ if (harvest_enabled)
++ pipe_fuses = read_pipe_fuses(ctx);
++
++ /* index to valid pipe resource */
++ j = 0;
++ for (i = 0; i < pool->base.pipe_count; i++) {
++ if (harvest_enabled) {
++ if ((pipe_fuses & (1 << i)) != 0) {
++ dm_error("DC: skip invalid pipe %d!\n", i);
++ continue;
++ }
++ }
++
++ pool->base.timing_generators[j] =
++ dce120_timing_generator_create(
++ ctx,
++ i,
++ &dce120_tg_offsets[i]);
++ if (pool->base.timing_generators[j] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error("DC: failed to create tg!\n");
++ goto controller_create_fail;
++ }
++
++ pool->base.mis[j] = dce120_mem_input_create(ctx, i);
++
++ if (pool->base.mis[j] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC: failed to create memory input!\n");
++ goto controller_create_fail;
++ }
++
++ pool->base.ipps[j] = dce120_ipp_create(ctx, i);
++ if (pool->base.ipps[i] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC: failed to create input pixel processor!\n");
++ goto controller_create_fail;
++ }
++
++ pool->base.transforms[j] = dce120_transform_create(ctx, i);
++ if (pool->base.transforms[i] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC: failed to create transform!\n");
++ goto res_create_fail;
++ }
++
++ pool->base.opps[j] = dce120_opp_create(
++ ctx,
++ i);
++ if (pool->base.opps[j] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC: failed to create output pixel processor!\n");
++ }
++
++ /* check next valid pipe */
++ j++;
++ }
++
++ /* valid pipe num */
++ pool->base.pipe_count = j;
++ pool->base.timing_generator_count = j;
++
++ if (!resource_construct(num_virtual_links, dc, &pool->base,
++ &res_create_funcs))
++ goto res_create_fail;
++
++ /* Create hardware sequencer */
++ if (!dce120_hw_sequencer_create(dc))
++ goto controller_create_fail;
++
++ dc->caps.max_planes = pool->base.pipe_count;
++
++ bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id);
++
++ bw_calcs_data_update_from_pplib(dc);
++
++ return true;
++
++irqs_create_fail:
++controller_create_fail:
++disp_clk_create_fail:
++clk_src_create_fail:
++res_create_fail:
++
++ destruct(pool);
++
++ return false;
++}
++#else
+ static bool construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+@@ -988,6 +1195,7 @@ static bool construct(
+
+ return false;
+ }
++#endif
+
+ struct resource_pool *dce120_create_resource_pool(
+ uint8_t num_virtual_links,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4420-drm-amdgpu-atomfirmware-add-new-gfx_info-data-table-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4420-drm-amdgpu-atomfirmware-add-new-gfx_info-data-table-.patch
new file mode 100644
index 00000000..001aa7bb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4420-drm-amdgpu-atomfirmware-add-new-gfx_info-data-table-.patch
@@ -0,0 +1,64 @@
+From 6d948c28f25321d6d9195e60faed17f9dea77bf5 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 15 Mar 2018 21:32:27 -0500
+Subject: [PATCH 4420/5725] drm/amdgpu/atomfirmware: add new gfx_info data
+ table v2.4 (v2)
+
+Adds additional gfx configuration data.
+
+v2: fix typo
+
+Reviewed-by: Hawking Zhang <Hawking.Zhang>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/include/atomfirmware.h | 34 ++++++++++++++++++++++++++++++
+ 1 file changed, 34 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
+index de177ce..fd5e80c 100644
+--- a/drivers/gpu/drm/amd/include/atomfirmware.h
++++ b/drivers/gpu/drm/amd/include/atomfirmware.h
+@@ -1219,6 +1219,40 @@ struct atom_gfx_info_v2_3 {
+ uint32_t rm21_sram_vmin_value;
+ };
+
++struct atom_gfx_info_v2_4 {
++ struct atom_common_table_header table_header;
++ uint8_t gfxip_min_ver;
++ uint8_t gfxip_max_ver;
++ uint8_t gc_num_se;
++ uint8_t max_tile_pipes;
++ uint8_t gc_num_cu_per_sh;
++ uint8_t gc_num_sh_per_se;
++ uint8_t gc_num_rb_per_se;
++ uint8_t gc_num_tccs;
++ uint32_t regaddr_cp_dma_src_addr;
++ uint32_t regaddr_cp_dma_src_addr_hi;
++ uint32_t regaddr_cp_dma_dst_addr;
++ uint32_t regaddr_cp_dma_dst_addr_hi;
++ uint32_t regaddr_cp_dma_command;
++ uint32_t regaddr_cp_status;
++ uint32_t regaddr_rlc_gpu_clock_32;
++ uint32_t rlc_gpu_timer_refclk;
++ uint8_t active_cu_per_sh;
++ uint8_t active_rb_per_se;
++ uint16_t gcgoldenoffset;
++ uint32_t rm21_sram_vmin_value;
++ uint16_t gc_num_gprs;
++ uint16_t gc_gsprim_buff_depth;
++ uint16_t gc_parameter_cache_depth;
++ uint16_t gc_wave_size;
++ uint16_t gc_max_waves_per_simd;
++ uint16_t gc_lds_size;
++ uint8_t gc_num_max_gs_thds;
++ uint8_t gc_gs_table_depth;
++ uint8_t gc_double_offchip_lds_buffer;
++ uint8_t gc_max_scratch_slots_per_cu;
++};
++
+ /*
+ ***************************************************************************
+ Data Table smu_info structure
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4421-drm-amdgpu-atomfirmware-add-parser-for-gfx_info-tabl.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4421-drm-amdgpu-atomfirmware-add-parser-for-gfx_info-tabl.patch
new file mode 100644
index 00000000..c091b1e6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4421-drm-amdgpu-atomfirmware-add-parser-for-gfx_info-tabl.patch
@@ -0,0 +1,83 @@
+From 136824c9c5d1a828ce6619f1134318f2ca79e59f Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 20 Mar 2018 12:24:03 -0500
+Subject: [PATCH 4421/5725] drm/amdgpu/atomfirmware: add parser for gfx_info
+ table
+
+Add support for the gfx_info table on boards that use atomfirmware.
+
+Reviewed-by: Hawking Zhang <Hawking.Zhang>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c | 46 ++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h | 1 +
+ 2 files changed, 47 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+index a0f48cb..7014d58 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+@@ -322,3 +322,49 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
+
+ return ret;
+ }
++
++union gfx_info {
++ struct atom_gfx_info_v2_4 v24;
++};
++
++int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
++{
++ struct amdgpu_mode_info *mode_info = &adev->mode_info;
++ int index;
++ uint8_t frev, crev;
++ uint16_t data_offset;
++
++ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
++ gfx_info);
++ if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
++ &frev, &crev, &data_offset)) {
++ union gfx_info *gfx_info = (union gfx_info *)
++ (mode_info->atom_context->bios + data_offset);
++ switch (crev) {
++ case 4:
++ adev->gfx.config.max_shader_engines = gfx_info->v24.gc_num_se;
++ adev->gfx.config.max_cu_per_sh = gfx_info->v24.gc_num_cu_per_sh;
++ adev->gfx.config.max_sh_per_se = gfx_info->v24.gc_num_sh_per_se;
++ adev->gfx.config.max_backends_per_se = gfx_info->v24.gc_num_rb_per_se;
++ adev->gfx.config.max_texture_channel_caches = gfx_info->v24.gc_num_tccs;
++ adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
++ adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
++ adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
++ adev->gfx.config.gs_prim_buffer_depth =
++ le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth);
++ adev->gfx.config.double_offchip_lds_buf =
++ gfx_info->v24.gc_double_offchip_lds_buffer;
++ adev->gfx.cu_info.wave_front_size = gfx_info->v24.gc_wave_size;
++ adev->gfx.cu_info.max_waves_per_simd =
++ le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
++ adev->gfx.cu_info.max_scratch_slots_per_cu =
++ gfx_info->v24.gc_max_scratch_slots_per_cu;
++ adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size);
++ return 0;
++ default:
++ return -EINVAL;
++ }
++
++ }
++ return -EINVAL;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+index 7689c96..20f158f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+@@ -30,5 +30,6 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
+ int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev);
+ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev);
+ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
++int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
+
+ #endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4422-drm-amdgpu-vg20-fallback-to-vbios-table-if-gpu-info-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4422-drm-amdgpu-vg20-fallback-to-vbios-table-if-gpu-info-.patch
new file mode 100644
index 00000000..bd589e9d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4422-drm-amdgpu-vg20-fallback-to-vbios-table-if-gpu-info-.patch
@@ -0,0 +1,92 @@
+From f9276c221a64b92acf7ab7d190cde2adb40bec9c Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 20 Mar 2018 13:24:30 -0500
+Subject: [PATCH 4422/5725] drm/amdgpu/vg20: fallback to vbios table if gpu
+ info fw is not available (v2)
+
+First try and fetch the gpu info firmware and then fall back to
+the vbios table if the gpu info firmware is not available.
+
+v2: warning fix (Alex)
+
+Reviewed-by: Hawking Zhang <Hawking.Zhang>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 +++
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 16 ++++++++++++++--
+ 2 files changed, 17 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 0eee9f2..cfb66cc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1508,6 +1508,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
+ goto out;
+ }
+ out:
++ /* fall back to vbios tables for vega20 */
++ if (adev->asic_type == CHIP_VEGA20)
++ return 0;
+ return err;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 5f812dd..e6616e7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -27,6 +27,7 @@
+ #include "amdgpu_gfx.h"
+ #include "soc15.h"
+ #include "soc15d.h"
++#include "amdgpu_atomfirmware.h"
+
+ #include "gc/gc_9_0_offset.h"
+ #include "gc/gc_9_0_sh_mask.h"
+@@ -1113,9 +1114,10 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
+ .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q
+ };
+
+-static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
++static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
+ {
+ u32 gb_addr_config;
++ int err;
+
+ adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
+
+@@ -1147,6 +1149,12 @@ static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
+ gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
+ gb_addr_config &= ~0xf3e777ff;
+ gb_addr_config |= 0x22014042;
++ /* check vbios table if gpu info is not available */
++ if (!adev->gfx.config.max_shader_engines) {
++ err = amdgpu_atomfirmware_get_gfx_info(adev);
++ if (err)
++ return err;
++ }
+ break;
+ case CHIP_RAVEN:
+ adev->gfx.config.max_hw_contexts = 8;
+@@ -1197,6 +1205,8 @@ static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
+ adev->gfx.config.gb_addr_config,
+ GB_ADDR_CONFIG,
+ PIPE_INTERLEAVE_SIZE));
++
++ return 0;
+ }
+
+ static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
+@@ -1558,7 +1568,9 @@ static int gfx_v9_0_sw_init(void *handle)
+
+ adev->gfx.ce_ram_size = 0x8000;
+
+- gfx_v9_0_gpu_early_init(adev);
++ r = gfx_v9_0_gpu_early_init(adev);
++ if (r)
++ return r;
+
+ r = gfx_v9_0_ngg_init(adev);
+ if (r)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4423-drm-amdgpu-drop-gpu_info-firmware-for-vega20.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4423-drm-amdgpu-drop-gpu_info-firmware-for-vega20.patch
new file mode 100644
index 00000000..df8b08a8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4423-drm-amdgpu-drop-gpu_info-firmware-for-vega20.patch
@@ -0,0 +1,76 @@
+From 758b3a555359a7b5dcafe83fc0b79d73e3744d87 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed, 18 Apr 2018 11:05:12 -0500
+Subject: [PATCH 4423/5725] drm/amdgpu: drop gpu_info firmware for vega20
+
+No longer required.
+
+Reviewed-by: Amber Lin <Amber.Lin@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 8 +-------
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 8 +++-----
+ 2 files changed, 4 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index cfb66cc..8267990 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -60,7 +60,6 @@
+
+ MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
+ MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
+-MODULE_FIRMWARE("amdgpu/vega20_gpu_info.bin");
+ MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
+
+ #define AMDGPU_RESUME_MS 2000
+@@ -1439,6 +1438,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ #endif
++ case CHIP_VEGA20:
+ default:
+ return 0;
+ case CHIP_VEGA10:
+@@ -1447,9 +1447,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
+ case CHIP_VEGA12:
+ chip_name = "vega12";
+ break;
+- case CHIP_VEGA20:
+- chip_name = "vega20";
+- break;
+ case CHIP_RAVEN:
+ chip_name = "raven";
+ break;
+@@ -1508,9 +1505,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
+ goto out;
+ }
+ out:
+- /* fall back to vbios tables for vega20 */
+- if (adev->asic_type == CHIP_VEGA20)
+- return 0;
+ return err;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index e6616e7..dcb916b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1150,11 +1150,9 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
+ gb_addr_config &= ~0xf3e777ff;
+ gb_addr_config |= 0x22014042;
+ /* check vbios table if gpu info is not available */
+- if (!adev->gfx.config.max_shader_engines) {
+- err = amdgpu_atomfirmware_get_gfx_info(adev);
+- if (err)
+- return err;
+- }
++ err = amdgpu_atomfirmware_get_gfx_info(adev);
++ if (err)
++ return err;
+ break;
+ case CHIP_RAVEN:
+ adev->gfx.config.max_hw_contexts = 8;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4424-drm-amdgpu-Set-vega20-load_type-to-AMDGPU_FW_LOAD_DI.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4424-drm-amdgpu-Set-vega20-load_type-to-AMDGPU_FW_LOAD_DI.patch
new file mode 100644
index 00000000..83756bf5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4424-drm-amdgpu-Set-vega20-load_type-to-AMDGPU_FW_LOAD_DI.patch
@@ -0,0 +1,36 @@
+From fd7010b531eec13cc7fba117739580790d5386e0 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Tue, 24 Apr 2018 11:11:16 +0800
+Subject: [PATCH 4424/5725] drm/amdgpu: Set vega20 load_type to
+ AMDGPU_FW_LOAD_DIRECT.
+
+Please revert this patch when psp load fw is enabled.
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+index 63e2996..abcc163 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+@@ -303,11 +303,12 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
+ case CHIP_VEGA10:
+ case CHIP_RAVEN:
+ case CHIP_VEGA12:
+- case CHIP_VEGA20:
+ if (!load_type)
+ return AMDGPU_FW_LOAD_DIRECT;
+ else
+ return AMDGPU_FW_LOAD_PSP;
++ case CHIP_VEGA20:
++ return AMDGPU_FW_LOAD_DIRECT;
+ default:
+ DRM_ERROR("Unknow firmware load type\n");
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4425-drm-amd-powerplay-update-vega20-cg-flags.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4425-drm-amd-powerplay-update-vega20-cg-flags.patch
new file mode 100644
index 00000000..1e31b90d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4425-drm-amd-powerplay-update-vega20-cg-flags.patch
@@ -0,0 +1,45 @@
+From f39069076bd355c8080fe31e95a91fd9c4b48269 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 26 Mar 2018 11:43:04 +0800
+Subject: [PATCH 4425/5725] drm/amd/powerplay: update vega20 cg flags
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 19 ++++++++++++++++++-
+ 1 file changed, 18 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 4e065c6..91d87f8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -665,7 +665,24 @@ static int soc15_common_early_init(void *handle)
+ adev->external_rev_id = adev->rev_id + 0x14;
+ break;
+ case CHIP_VEGA20:
+- adev->cg_flags = 0;
++ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
++ AMD_CG_SUPPORT_GFX_MGLS |
++ AMD_CG_SUPPORT_GFX_CGCG |
++ AMD_CG_SUPPORT_GFX_CGLS |
++ AMD_CG_SUPPORT_GFX_3D_CGCG |
++ AMD_CG_SUPPORT_GFX_3D_CGLS |
++ AMD_CG_SUPPORT_GFX_CP_LS |
++ AMD_CG_SUPPORT_MC_LS |
++ AMD_CG_SUPPORT_MC_MGCG |
++ AMD_CG_SUPPORT_SDMA_MGCG |
++ AMD_CG_SUPPORT_SDMA_LS |
++ AMD_CG_SUPPORT_BIF_MGCG |
++ AMD_CG_SUPPORT_BIF_LS |
++ AMD_CG_SUPPORT_HDP_MGCG |
++ AMD_CG_SUPPORT_BIF_LS |
++ AMD_CG_SUPPORT_ROM_MGCG |
++ AMD_CG_SUPPORT_VCE_MGCG |
++ AMD_CG_SUPPORT_UVD_MGCG;
+ adev->pg_flags = 0;
+ adev->external_rev_id = adev->rev_id + 0x28;
+ break;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4426-drm-include-Fix-MP1_BASE-address-for-vega20.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4426-drm-include-Fix-MP1_BASE-address-for-vega20.patch
new file mode 100644
index 00000000..b03e6867
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4426-drm-include-Fix-MP1_BASE-address-for-vega20.patch
@@ -0,0 +1,37 @@
+From ff894c7b5b2785d69978dbbafb49cf3862e9c463 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Wed, 18 Apr 2018 10:52:44 +0800
+Subject: [PATCH 4426/5725] drm/include: Fix MP1_BASE address for vega20
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/include/vega20_ip_offset.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/include/vega20_ip_offset.h b/drivers/gpu/drm/amd/include/vega20_ip_offset.h
+index 2da2d97..97db93c 100644
+--- a/drivers/gpu/drm/amd/include/vega20_ip_offset.h
++++ b/drivers/gpu/drm/amd/include/vega20_ip_offset.h
+@@ -90,7 +90,7 @@ static const struct IP_BASE MP0_BASE ={ { { { 0x00016000, 0, 0, 0, 0,
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+-static const struct IP_BASE MP1_BASE ={ { { { 0x00016200, 0, 0, 0, 0, 0 } },
++static const struct IP_BASE MP1_BASE ={ { { { 0x00016000, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+@@ -542,7 +542,7 @@ static const struct IP_BASE RSMU_BASE ={ { { { 0x00012000, 0, 0, 0, 0
+ #define MP0_BASE__INST5_SEG4 0
+ #define MP0_BASE__INST5_SEG5 0
+
+-#define MP1_BASE__INST0_SEG0 0x00016200
++#define MP1_BASE__INST0_SEG0 0x00016000
+ #define MP1_BASE__INST0_SEG1 0
+ #define MP1_BASE__INST0_SEG2 0
+ #define MP1_BASE__INST0_SEG3 0
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4427-drm-amd-include-vg20-adjust-VCE_BASE-to-reuse-vce-4..patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4427-drm-amd-include-vg20-adjust-VCE_BASE-to-reuse-vce-4..patch
new file mode 100644
index 00000000..90971f6c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4427-drm-amd-include-vg20-adjust-VCE_BASE-to-reuse-vce-4..patch
@@ -0,0 +1,35 @@
+From dc1eddcea6f1d72e73e9f7536e8c3319556e163e Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Tue, 17 Apr 2018 16:25:58 -0400
+Subject: [PATCH 4427/5725] drm/amd/include/vg20: adjust VCE_BASE to reuse vce
+ 4.0 header files
+
+Vega20 uses vce 4.1 engine, all the registers have the
+same absolute offset with vce 4.0. By adjusting vega20
+VCE_BASE, vce 4.1 can reuse vce 4.0 header files.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/include/vega20_ip_offset.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/include/vega20_ip_offset.h b/drivers/gpu/drm/amd/include/vega20_ip_offset.h
+index 97db93c..2a2a9cc 100644
+--- a/drivers/gpu/drm/amd/include/vega20_ip_offset.h
++++ b/drivers/gpu/drm/amd/include/vega20_ip_offset.h
+@@ -144,7 +144,8 @@ static const struct IP_BASE UVD_BASE ={ { { { 0x00007800, 0x00007E00,
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+-static const struct IP_BASE VCE_BASE ={ { { { 0x00008800, 0, 0, 0, 0, 0 } },
++/* Adjust VCE_BASE to make vce_4_1 use vce_4_0 offset header files*/
++static const struct IP_BASE VCE_BASE ={ { { { 0x00007E00/* 0x00008800 */, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4428-drm-amdgpu-Disable-ip-modules-that-are-not-ready-yet.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4428-drm-amdgpu-Disable-ip-modules-that-are-not-ready-yet.patch
new file mode 100644
index 00000000..c68dc3fe
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4428-drm-amdgpu-Disable-ip-modules-that-are-not-ready-yet.patch
@@ -0,0 +1,50 @@
+From de1f7371d3f64370230a4864b72e3b364bd75d5d Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Tue, 24 Apr 2018 11:20:16 +0800
+Subject: [PATCH 4428/5725] drm/amdgpu: Disable ip modules that are not ready
+ yet
+
+Please enable above ips on soc15.c when they're available.
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 91d87f8..8d0d054 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -514,9 +514,11 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+- amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
+- if (!amdgpu_sriov_vf(adev))
+- amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
++ if (adev->asic_type != CHIP_VEGA20) {
++ amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
++ if (!amdgpu_sriov_vf(adev))
++ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
++ }
+ if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ #if defined(CONFIG_DRM_AMD_DC)
+@@ -527,8 +529,10 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+ #endif
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+- amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
+- amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
++ if (adev->asic_type != CHIP_VEGA20) {
++ amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
++ amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
++ }
+ break;
+ case CHIP_RAVEN:
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4429-drm-amdgpu-vg20-Restruct-uvd-to-support-multiple-uvd.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4429-drm-amdgpu-vg20-Restruct-uvd-to-support-multiple-uvd.patch
new file mode 100644
index 00000000..681e8ebf
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4429-drm-amdgpu-vg20-Restruct-uvd-to-support-multiple-uvd.patch
@@ -0,0 +1,1358 @@
+From 528d51d0e60140ff7d6569e88539acf3039eca00 Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Wed, 9 Jan 2019 18:27:46 +0530
+Subject: [PATCH 4429/5725] drm/amdgpu/vg20:Restruct uvd to support multiple
+ uvds
+
+Vega20 has dual-UVD. Need Restruct amdgpu_device::uvd to support
+multiple uvds. There are no any logical changes here.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 6 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 4 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c | 4 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 107 ++++++++++----------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | 19 ++--
+ drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | 27 +++---
+ drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 25 ++---
+ drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 77 +++++++--------
+ drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 135 +++++++++++++-------------
+ 9 files changed, 208 insertions(+), 196 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+index 4a3cef9..fafe54a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+@@ -376,14 +376,14 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
+ struct amdgpu_device *adev = ring->adev;
+ uint64_t index;
+
+- if (ring != &adev->uvd.ring) {
++ if (ring != &adev->uvd.inst->ring) {
+ ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
+ ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
+ } else {
+ /* put fence directly behind firmware */
+ index = ALIGN(adev->uvd.fw->size, 8);
+- ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index;
+- ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
++ ring->fence_drv.cpu_addr = adev->uvd.inst->cpu_addr + index;
++ ring->fence_drv.gpu_addr = adev->uvd.inst->gpu_addr + index;
+ }
+ amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
+ amdgpu_irq_get(adev, irq_src, irq_type);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index ba812da..46cfddf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -359,7 +359,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ break;
+ case AMDGPU_HW_IP_UVD:
+ type = AMD_IP_BLOCK_TYPE_UVD;
+- ring_mask = adev->uvd.ring.ready ? 1 : 0;
++ ring_mask = adev->uvd.inst->ring.ready ? 1 : 0;
+ ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+ ib_size_alignment = 16;
+ break;
+@@ -373,7 +373,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ case AMDGPU_HW_IP_UVD_ENC:
+ type = AMD_IP_BLOCK_TYPE_UVD;
+ for (i = 0; i < adev->uvd.num_enc_rings; i++)
+- ring_mask |= ((adev->uvd.ring_enc[i].ready ? 1 : 0) << i);
++ ring_mask |= ((adev->uvd.inst->ring_enc[i].ready ? 1 : 0) << i);
+ ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+ ib_size_alignment = 1;
+ break;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+index 262c126..2458d38 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+@@ -77,13 +77,13 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
+ *out_ring = &adev->sdma.instance[ring].ring;
+ break;
+ case AMDGPU_HW_IP_UVD:
+- *out_ring = &adev->uvd.ring;
++ *out_ring = &adev->uvd.inst->ring;
+ break;
+ case AMDGPU_HW_IP_VCE:
+ *out_ring = &adev->vce.ring[ring];
+ break;
+ case AMDGPU_HW_IP_UVD_ENC:
+- *out_ring = &adev->uvd.ring_enc[ring];
++ *out_ring = &adev->uvd.inst->ring_enc[ring];
+ break;
+ case AMDGPU_HW_IP_VCN_DEC:
+ *out_ring = &adev->vcn.ring_dec;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index b98bac9..263cd945 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -129,7 +129,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ unsigned version_major, version_minor, family_id;
+ int i, r;
+
+- INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
++ INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler);
+
+ switch (adev->asic_type) {
+ #ifdef CONFIG_DRM_AMDGPU_CIK
+@@ -237,16 +237,16 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
+
+ r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
+- AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.vcpu_bo,
+- &adev->uvd.gpu_addr, &adev->uvd.cpu_addr);
++ AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst->vcpu_bo,
++ &adev->uvd.inst->gpu_addr, &adev->uvd.inst->cpu_addr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
+ return r;
+ }
+
+- ring = &adev->uvd.ring;
++ ring = &adev->uvd.inst->ring;
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+- r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity,
++ r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity,
+ rq, amdgpu_sched_jobs, NULL);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up UVD run queue.\n");
+@@ -254,8 +254,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ }
+
+ for (i = 0; i < adev->uvd.max_handles; ++i) {
+- atomic_set(&adev->uvd.handles[i], 0);
+- adev->uvd.filp[i] = NULL;
++ atomic_set(&adev->uvd.inst->handles[i], 0);
++ adev->uvd.inst->filp[i] = NULL;
+ }
+
+ /* from uvd v5.0 HW addressing capacity increased to 64 bits */
+@@ -285,18 +285,18 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
+ {
+ int i;
+- kfree(adev->uvd.saved_bo);
++ kfree(adev->uvd.inst->saved_bo);
+
+- drm_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
++ drm_sched_entity_fini(&adev->uvd.inst->ring.sched, &adev->uvd.inst->entity);
+
+- amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo,
+- &adev->uvd.gpu_addr,
+- (void **)&adev->uvd.cpu_addr);
++ amdgpu_bo_free_kernel(&adev->uvd.inst->vcpu_bo,
++ &adev->uvd.inst->gpu_addr,
++ (void **)&adev->uvd.inst->cpu_addr);
+
+- amdgpu_ring_fini(&adev->uvd.ring);
++ amdgpu_ring_fini(&adev->uvd.inst->ring);
+
+ for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
+- amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
++ amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
+
+ release_firmware(adev->uvd.fw);
+
+@@ -309,29 +309,30 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
+ void *ptr;
+ int i;
+
+- if (adev->uvd.vcpu_bo == NULL)
++ if (adev->uvd.inst->vcpu_bo == NULL)
+ return 0;
+- cancel_delayed_work_sync(&adev->uvd.idle_work);
++
++ cancel_delayed_work_sync(&adev->uvd.inst->idle_work);
+
+ /* only valid for physical mode */
+ if (adev->asic_type < CHIP_POLARIS10) {
+ for (i = 0; i < adev->uvd.max_handles; ++i)
+- if (atomic_read(&adev->uvd.handles[i]))
++ if (atomic_read(&adev->uvd.inst->handles[i]))
+ break;
+
+ if (i == adev->uvd.max_handles)
+ return 0;
+ }
+
+- size = amdgpu_bo_size(adev->uvd.vcpu_bo);
+- ptr = adev->uvd.cpu_addr;
++ size = amdgpu_bo_size(adev->uvd.inst->vcpu_bo);
++ ptr = adev->uvd.inst->cpu_addr;
+
+- adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
+- if (!adev->uvd.saved_bo)
++ adev->uvd.inst->saved_bo = kmalloc(size, GFP_KERNEL);
++ if (!adev->uvd.inst->saved_bo)
+ return -ENOMEM;
+-
+- memcpy_fromio(adev->uvd.saved_bo, ptr, size);
+-
++
++ memcpy_fromio(adev->uvd.inst->saved_bo, ptr, size);
++
+ return 0;
+ }
+
+@@ -340,16 +341,16 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
+ unsigned size;
+ void *ptr;
+
+- if (adev->uvd.vcpu_bo == NULL)
++ if (adev->uvd.inst->vcpu_bo == NULL)
+ return -EINVAL;
+
+- size = amdgpu_bo_size(adev->uvd.vcpu_bo);
+- ptr = adev->uvd.cpu_addr;
++ size = amdgpu_bo_size(adev->uvd.inst->vcpu_bo);
++ ptr = adev->uvd.inst->cpu_addr;
+
+- if (adev->uvd.saved_bo != NULL) {
+- memcpy_toio(ptr, adev->uvd.saved_bo, size);
+- kfree(adev->uvd.saved_bo);
+- adev->uvd.saved_bo = NULL;
++ if (adev->uvd.inst->saved_bo != NULL) {
++ memcpy_toio(ptr, adev->uvd.inst->saved_bo, size);
++ kfree(adev->uvd.inst->saved_bo);
++ adev->uvd.inst->saved_bo = NULL;
+ } else {
+ const struct common_firmware_header *hdr;
+ unsigned offset;
+@@ -357,14 +358,14 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
+ hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
+- memcpy_toio(adev->uvd.cpu_addr, adev->uvd.fw->data + offset,
++ memcpy_toio(adev->uvd.inst->cpu_addr, adev->uvd.fw->data + offset,
+ le32_to_cpu(hdr->ucode_size_bytes));
+ size -= le32_to_cpu(hdr->ucode_size_bytes);
+ ptr += le32_to_cpu(hdr->ucode_size_bytes);
+ }
+ memset_io(ptr, 0, size);
+ /* to restore uvd fence seq */
+- amdgpu_fence_driver_force_completion(&adev->uvd.ring);
++ amdgpu_fence_driver_force_completion(&adev->uvd.inst->ring);
+ }
+
+ return 0;
+@@ -372,12 +373,12 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
+
+ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
+ {
+- struct amdgpu_ring *ring = &adev->uvd.ring;
++ struct amdgpu_ring *ring = &adev->uvd.inst->ring;
+ int i, r;
+
+ for (i = 0; i < adev->uvd.max_handles; ++i) {
+- uint32_t handle = atomic_read(&adev->uvd.handles[i]);
+- if (handle != 0 && adev->uvd.filp[i] == filp) {
++ uint32_t handle = atomic_read(&adev->uvd.inst->handles[i]);
++ if (handle != 0 && adev->uvd.inst->filp[i] == filp) {
+ struct dma_fence *fence;
+
+ r = amdgpu_uvd_get_destroy_msg(ring, handle,
+@@ -390,8 +391,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+
+- adev->uvd.filp[i] = NULL;
+- atomic_set(&adev->uvd.handles[i], 0);
++ adev->uvd.inst->filp[i] = NULL;
++ atomic_set(&adev->uvd.inst->handles[i], 0);
+ }
+ }
+ }
+@@ -695,13 +696,13 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
+
+ /* try to alloc a new handle */
+ for (i = 0; i < adev->uvd.max_handles; ++i) {
+- if (atomic_read(&adev->uvd.handles[i]) == handle) {
++ if (atomic_read(&adev->uvd.inst->handles[i]) == handle) {
+ DRM_ERROR("Handle 0x%x already in use!\n", handle);
+ return -EINVAL;
+ }
+
+- if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
+- adev->uvd.filp[i] = ctx->parser->filp;
++ if (!atomic_cmpxchg(&adev->uvd.inst->handles[i], 0, handle)) {
++ adev->uvd.inst->filp[i] = ctx->parser->filp;
+ return 0;
+ }
+ }
+@@ -718,8 +719,8 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
+
+ /* validate the handle */
+ for (i = 0; i < adev->uvd.max_handles; ++i) {
+- if (atomic_read(&adev->uvd.handles[i]) == handle) {
+- if (adev->uvd.filp[i] != ctx->parser->filp) {
++ if (atomic_read(&adev->uvd.inst->handles[i]) == handle) {
++ if (adev->uvd.inst->filp[i] != ctx->parser->filp) {
+ DRM_ERROR("UVD handle collision detected!\n");
+ return -EINVAL;
+ }
+@@ -733,7 +734,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
+ case 2:
+ /* it's a destroy msg, free the handle */
+ for (i = 0; i < adev->uvd.max_handles; ++i)
+- atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
++ atomic_cmpxchg(&adev->uvd.inst->handles[i], handle, 0);
+ amdgpu_bo_kunmap(bo);
+ return 0;
+
+@@ -809,7 +810,7 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
+ }
+
+ if ((cmd == 0 || cmd == 0x3) &&
+- (start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) {
++ (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) {
+ DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
+ start, end);
+ return -EINVAL;
+@@ -1042,7 +1043,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
+ if (r)
+ goto err_free;
+
+- r = amdgpu_job_submit(job, ring, &adev->uvd.entity,
++ r = amdgpu_job_submit(job, ring, &adev->uvd.inst->entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+ if (r)
+ goto err_free;
+@@ -1132,8 +1133,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
+ {
+ struct amdgpu_device *adev =
+- container_of(work, struct amdgpu_device, uvd.idle_work.work);
+- unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
++ container_of(work, struct amdgpu_device, uvd.inst->idle_work.work);
++ unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.inst->ring);
+
+ if (fences == 0) {
+ if (adev->pm.dpm_enabled) {
+@@ -1147,7 +1148,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
+ AMD_CG_STATE_GATE);
+ }
+ } else {
+- schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
++ schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
+ }
+ }
+
+@@ -1159,7 +1160,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+- set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
++ set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work);
+ if (set_clocks) {
+ if (adev->pm.dpm_enabled) {
+ amdgpu_dpm_enable_uvd(adev, true);
+@@ -1176,7 +1177,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
+ void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
+ {
+ if (!amdgpu_sriov_vf(ring->adev))
+- schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
++ schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
+ }
+
+ /**
+@@ -1210,7 +1211,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+ } else {
+- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
++ DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
+ }
+
+@@ -1238,7 +1239,7 @@ uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
+ * necessarily linear. So we need to count
+ * all non-zero handles.
+ */
+- if (atomic_read(&adev->uvd.handles[i]))
++ if (atomic_read(&adev->uvd.inst->handles[i]))
+ used_handles++;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+index 32ea20b..b1579fb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+@@ -31,30 +31,37 @@
+ #define AMDGPU_UVD_SESSION_SIZE (50*1024)
+ #define AMDGPU_UVD_FIRMWARE_OFFSET 256
+
++#define AMDGPU_MAX_UVD_INSTANCES 2
++
+ #define AMDGPU_UVD_FIRMWARE_SIZE(adev) \
+ (AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(((const struct common_firmware_header *)(adev)->uvd.fw->data)->ucode_size_bytes) + \
+ 8) - AMDGPU_UVD_FIRMWARE_OFFSET)
+
+-struct amdgpu_uvd {
++struct amdgpu_uvd_inst {
+ struct amdgpu_bo *vcpu_bo;
+ void *cpu_addr;
+ uint64_t gpu_addr;
+- unsigned fw_version;
+ void *saved_bo;
+- unsigned max_handles;
+ atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
+ struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
+ struct delayed_work idle_work;
+- const struct firmware *fw; /* UVD firmware */
+ struct amdgpu_ring ring;
+ struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
+ struct amdgpu_irq_src irq;
+- bool address_64_bit;
+- bool use_ctx_buf;
+ struct drm_sched_entity entity;
+ struct drm_sched_entity entity_enc;
+ uint32_t srbm_soft_reset;
++};
++
++struct amdgpu_uvd {
++ const struct firmware *fw; /* UVD firmware */
++ unsigned fw_version;
++ unsigned max_handles;
+ unsigned num_enc_rings;
++ uint8_t num_uvd_inst;
++ bool address_64_bit;
++ bool use_ctx_buf;
++ struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
+ };
+
+ int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+index 64a633d..4ee0c10 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+@@ -93,6 +93,7 @@ static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring)
+ static int uvd_v4_2_early_init(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++ adev->uvd.num_uvd_inst = 1;
+
+ uvd_v4_2_set_ring_funcs(adev);
+ uvd_v4_2_set_irq_funcs(adev);
+@@ -107,7 +108,7 @@ static int uvd_v4_2_sw_init(void *handle)
+ int r;
+
+ /* UVD TRAP */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
+ if (r)
+ return r;
+
+@@ -119,9 +120,9 @@ static int uvd_v4_2_sw_init(void *handle)
+ if (r)
+ return r;
+
+- ring = &adev->uvd.ring;
++ ring = &adev->uvd.inst->ring;
+ sprintf(ring->name, "uvd");
+- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
++ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+
+ return r;
+ }
+@@ -150,7 +151,7 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
+ static int uvd_v4_2_hw_init(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+- struct amdgpu_ring *ring = &adev->uvd.ring;
++ struct amdgpu_ring *ring = &adev->uvd.inst->ring;
+ uint32_t tmp;
+ int r;
+
+@@ -208,7 +209,7 @@ static int uvd_v4_2_hw_init(void *handle)
+ static int uvd_v4_2_hw_fini(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+- struct amdgpu_ring *ring = &adev->uvd.ring;
++ struct amdgpu_ring *ring = &adev->uvd.inst->ring;
+
+ if (RREG32(mmUVD_STATUS) != 0)
+ uvd_v4_2_stop(adev);
+@@ -251,7 +252,7 @@ static int uvd_v4_2_resume(void *handle)
+ */
+ static int uvd_v4_2_start(struct amdgpu_device *adev)
+ {
+- struct amdgpu_ring *ring = &adev->uvd.ring;
++ struct amdgpu_ring *ring = &adev->uvd.inst->ring;
+ uint32_t rb_bufsz;
+ int i, j, r;
+ u32 tmp;
+@@ -562,7 +563,7 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
+ uint32_t size;
+
+ /* programm the VCPU memory controller bits 0-27 */
+- addr = (adev->uvd.gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
++ addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
+ size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
+ WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
+ WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
+@@ -579,11 +580,11 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
+ WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
+
+ /* bits 28-31 */
+- addr = (adev->uvd.gpu_addr >> 28) & 0xF;
++ addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF;
+ WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
+
+ /* bits 32-39 */
+- addr = (adev->uvd.gpu_addr >> 32) & 0xFF;
++ addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF;
+ WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
+
+ WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+@@ -690,7 +691,7 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+ {
+ DRM_DEBUG("IH: UVD TRAP\n");
+- amdgpu_fence_process(&adev->uvd.ring);
++ amdgpu_fence_process(&adev->uvd.inst->ring);
+ return 0;
+ }
+
+@@ -783,7 +784,7 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
+
+ static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
+ {
+- adev->uvd.ring.funcs = &uvd_v4_2_ring_funcs;
++ adev->uvd.inst->ring.funcs = &uvd_v4_2_ring_funcs;
+ }
+
+ static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = {
+@@ -793,8 +794,8 @@ static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = {
+
+ static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev)
+ {
+- adev->uvd.irq.num_types = 1;
+- adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs;
++ adev->uvd.inst->irq.num_types = 1;
++ adev->uvd.inst->irq.funcs = &uvd_v4_2_irq_funcs;
+ }
+
+ const struct amdgpu_ip_block_version uvd_v4_2_ip_block =
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+index c1fe30c..01810f2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+@@ -89,6 +89,7 @@ static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
+ static int uvd_v5_0_early_init(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++ adev->uvd.num_uvd_inst = 1;
+
+ uvd_v5_0_set_ring_funcs(adev);
+ uvd_v5_0_set_irq_funcs(adev);
+@@ -103,7 +104,7 @@ static int uvd_v5_0_sw_init(void *handle)
+ int r;
+
+ /* UVD TRAP */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
+ if (r)
+ return r;
+
+@@ -115,9 +116,9 @@ static int uvd_v5_0_sw_init(void *handle)
+ if (r)
+ return r;
+
+- ring = &adev->uvd.ring;
++ ring = &adev->uvd.inst->ring;
+ sprintf(ring->name, "uvd");
+- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
++ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+
+ return r;
+ }
+@@ -144,7 +145,7 @@ static int uvd_v5_0_sw_fini(void *handle)
+ static int uvd_v5_0_hw_init(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+- struct amdgpu_ring *ring = &adev->uvd.ring;
++ struct amdgpu_ring *ring = &adev->uvd.inst->ring;
+ uint32_t tmp;
+ int r;
+
+@@ -204,7 +205,7 @@ static int uvd_v5_0_hw_init(void *handle)
+ static int uvd_v5_0_hw_fini(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+- struct amdgpu_ring *ring = &adev->uvd.ring;
++ struct amdgpu_ring *ring = &adev->uvd.inst->ring;
+
+ if (RREG32(mmUVD_STATUS) != 0)
+ uvd_v5_0_stop(adev);
+@@ -253,9 +254,9 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
+
+ /* programm memory controller bits 0-27 */
+ WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+- lower_32_bits(adev->uvd.gpu_addr));
++ lower_32_bits(adev->uvd.inst->gpu_addr));
+ WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+- upper_32_bits(adev->uvd.gpu_addr));
++ upper_32_bits(adev->uvd.inst->gpu_addr));
+
+ offset = AMDGPU_UVD_FIRMWARE_OFFSET;
+ size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
+@@ -287,7 +288,7 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
+ */
+ static int uvd_v5_0_start(struct amdgpu_device *adev)
+ {
+- struct amdgpu_ring *ring = &adev->uvd.ring;
++ struct amdgpu_ring *ring = &adev->uvd.inst->ring;
+ uint32_t rb_bufsz, tmp;
+ uint32_t lmi_swap_cntl;
+ uint32_t mp_swap_cntl;
+@@ -612,7 +613,7 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+ {
+ DRM_DEBUG("IH: UVD TRAP\n");
+- amdgpu_fence_process(&adev->uvd.ring);
++ amdgpu_fence_process(&adev->uvd.inst->ring);
+ return 0;
+ }
+
+@@ -891,7 +892,7 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
+
+ static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
+ {
+- adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs;
++ adev->uvd.inst->ring.funcs = &uvd_v5_0_ring_funcs;
+ }
+
+ static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
+@@ -901,8 +902,8 @@ static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
+
+ static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
+ {
+- adev->uvd.irq.num_types = 1;
+- adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs;
++ adev->uvd.inst->irq.num_types = 1;
++ adev->uvd.inst->irq.funcs = &uvd_v5_0_irq_funcs;
+ }
+
+ const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+index 3c58adc..2778e48 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+@@ -91,7 +91,7 @@ static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
+ {
+ struct amdgpu_device *adev = ring->adev;
+
+- if (ring == &adev->uvd.ring_enc[0])
++ if (ring == &adev->uvd.inst->ring_enc[0])
+ return RREG32(mmUVD_RB_RPTR);
+ else
+ return RREG32(mmUVD_RB_RPTR2);
+@@ -121,7 +121,7 @@ static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
+ {
+ struct amdgpu_device *adev = ring->adev;
+
+- if (ring == &adev->uvd.ring_enc[0])
++ if (ring == &adev->uvd.inst->ring_enc[0])
+ return RREG32(mmUVD_RB_WPTR);
+ else
+ return RREG32(mmUVD_RB_WPTR2);
+@@ -152,7 +152,7 @@ static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
+ {
+ struct amdgpu_device *adev = ring->adev;
+
+- if (ring == &adev->uvd.ring_enc[0])
++ if (ring == &adev->uvd.inst->ring_enc[0])
+ WREG32(mmUVD_RB_WPTR,
+ lower_32_bits(ring->wptr));
+ else
+@@ -375,6 +375,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ static int uvd_v6_0_early_init(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++ adev->uvd.num_uvd_inst = 1;
+
+ if (!(adev->flags & AMD_IS_APU) &&
+ (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
+@@ -399,14 +400,14 @@ static int uvd_v6_0_sw_init(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* UVD TRAP */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
+ if (r)
+ return r;
+
+ /* UVD ENC TRAP */
+ if (uvd_v6_0_enc_support(adev)) {
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.inst->irq);
+ if (r)
+ return r;
+ }
+@@ -418,17 +419,17 @@ static int uvd_v6_0_sw_init(void *handle)
+
+ if (!uvd_v6_0_enc_support(adev)) {
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i)
+- adev->uvd.ring_enc[i].funcs = NULL;
++ adev->uvd.inst->ring_enc[i].funcs = NULL;
+
+- adev->uvd.irq.num_types = 1;
++ adev->uvd.inst->irq.num_types = 1;
+ adev->uvd.num_enc_rings = 0;
+
+ DRM_INFO("UVD ENC is disabled\n");
+ } else {
+ struct drm_sched_rq *rq;
+- ring = &adev->uvd.ring_enc[0];
++ ring = &adev->uvd.inst->ring_enc[0];
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+- r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
++ r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc,
+ rq, amdgpu_sched_jobs, NULL);
+ if (r) {
+ DRM_ERROR("Failed setting up UVD ENC run queue.\n");
+@@ -440,17 +441,17 @@ static int uvd_v6_0_sw_init(void *handle)
+ if (r)
+ return r;
+
+- ring = &adev->uvd.ring;
++ ring = &adev->uvd.inst->ring;
+ sprintf(ring->name, "uvd");
+- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
++ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+ if (r)
+ return r;
+
+ if (uvd_v6_0_enc_support(adev)) {
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+- ring = &adev->uvd.ring_enc[i];
++ ring = &adev->uvd.inst->ring_enc[i];
+ sprintf(ring->name, "uvd_enc%d", i);
+- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
++ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+ if (r)
+ return r;
+ }
+@@ -469,10 +470,10 @@ static int uvd_v6_0_sw_fini(void *handle)
+ return r;
+
+ if (uvd_v6_0_enc_support(adev)) {
+- drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
++ drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc);
+
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i)
+- amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
++ amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
+ }
+
+ return amdgpu_uvd_sw_fini(adev);
+@@ -488,7 +489,7 @@ static int uvd_v6_0_sw_fini(void *handle)
+ static int uvd_v6_0_hw_init(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+- struct amdgpu_ring *ring = &adev->uvd.ring;
++ struct amdgpu_ring *ring = &adev->uvd.inst->ring;
+ uint32_t tmp;
+ int i, r;
+
+@@ -532,7 +533,7 @@ static int uvd_v6_0_hw_init(void *handle)
+
+ if (uvd_v6_0_enc_support(adev)) {
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+- ring = &adev->uvd.ring_enc[i];
++ ring = &adev->uvd.inst->ring_enc[i];
+ ring->ready = true;
+ r = amdgpu_ring_test_ring(ring);
+ if (r) {
+@@ -563,7 +564,7 @@ static int uvd_v6_0_hw_init(void *handle)
+ static int uvd_v6_0_hw_fini(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+- struct amdgpu_ring *ring = &adev->uvd.ring;
++ struct amdgpu_ring *ring = &adev->uvd.inst->ring;
+
+ if (RREG32(mmUVD_STATUS) != 0)
+ uvd_v6_0_stop(adev);
+@@ -611,9 +612,9 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
+
+ /* programm memory controller bits 0-27 */
+ WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+- lower_32_bits(adev->uvd.gpu_addr));
++ lower_32_bits(adev->uvd.inst->gpu_addr));
+ WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+- upper_32_bits(adev->uvd.gpu_addr));
++ upper_32_bits(adev->uvd.inst->gpu_addr));
+
+ offset = AMDGPU_UVD_FIRMWARE_OFFSET;
+ size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
+@@ -726,7 +727,7 @@ static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
+ */
+ static int uvd_v6_0_start(struct amdgpu_device *adev)
+ {
+- struct amdgpu_ring *ring = &adev->uvd.ring;
++ struct amdgpu_ring *ring = &adev->uvd.inst->ring;
+ uint32_t rb_bufsz, tmp;
+ uint32_t lmi_swap_cntl;
+ uint32_t mp_swap_cntl;
+@@ -866,14 +867,14 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
+ WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
+
+ if (uvd_v6_0_enc_support(adev)) {
+- ring = &adev->uvd.ring_enc[0];
++ ring = &adev->uvd.inst->ring_enc[0];
+ WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
+
+- ring = &adev->uvd.ring_enc[1];
++ ring = &adev->uvd.inst->ring_enc[1];
+ WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+ WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
+@@ -1174,10 +1175,10 @@ static bool uvd_v6_0_check_soft_reset(void *handle)
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
+
+ if (srbm_soft_reset) {
+- adev->uvd.srbm_soft_reset = srbm_soft_reset;
++ adev->uvd.inst->srbm_soft_reset = srbm_soft_reset;
+ return true;
+ } else {
+- adev->uvd.srbm_soft_reset = 0;
++ adev->uvd.inst->srbm_soft_reset = 0;
+ return false;
+ }
+ }
+@@ -1186,7 +1187,7 @@ static int uvd_v6_0_pre_soft_reset(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- if (!adev->uvd.srbm_soft_reset)
++ if (!adev->uvd.inst->srbm_soft_reset)
+ return 0;
+
+ uvd_v6_0_stop(adev);
+@@ -1198,9 +1199,9 @@ static int uvd_v6_0_soft_reset(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset;
+
+- if (!adev->uvd.srbm_soft_reset)
++ if (!adev->uvd.inst->srbm_soft_reset)
+ return 0;
+- srbm_soft_reset = adev->uvd.srbm_soft_reset;
++ srbm_soft_reset = adev->uvd.inst->srbm_soft_reset;
+
+ if (srbm_soft_reset) {
+ u32 tmp;
+@@ -1228,7 +1229,7 @@ static int uvd_v6_0_post_soft_reset(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- if (!adev->uvd.srbm_soft_reset)
++ if (!adev->uvd.inst->srbm_soft_reset)
+ return 0;
+
+ mdelay(5);
+@@ -1254,17 +1255,17 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
+
+ switch (entry->src_id) {
+ case 124:
+- amdgpu_fence_process(&adev->uvd.ring);
++ amdgpu_fence_process(&adev->uvd.inst->ring);
+ break;
+ case 119:
+ if (likely(uvd_v6_0_enc_support(adev)))
+- amdgpu_fence_process(&adev->uvd.ring_enc[0]);
++ amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]);
+ else
+ int_handled = false;
+ break;
+ case 120:
+ if (likely(uvd_v6_0_enc_support(adev)))
+- amdgpu_fence_process(&adev->uvd.ring_enc[1]);
++ amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]);
+ else
+ int_handled = false;
+ break;
+@@ -1633,10 +1634,10 @@ static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
+ static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
+ {
+ if (adev->asic_type >= CHIP_POLARIS10) {
+- adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs;
++ adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs;
+ DRM_INFO("UVD is enabled in VM mode\n");
+ } else {
+- adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs;
++ adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs;
+ DRM_INFO("UVD is enabled in physical mode\n");
+ }
+ }
+@@ -1646,7 +1647,7 @@ static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
+ int i;
+
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i)
+- adev->uvd.ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
++ adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
+
+ DRM_INFO("UVD ENC is enabled in VM mode\n");
+ }
+@@ -1659,11 +1660,11 @@ static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
+ static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
+ {
+ if (uvd_v6_0_enc_support(adev))
+- adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1;
++ adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1;
+ else
+- adev->uvd.irq.num_types = 1;
++ adev->uvd.inst->irq.num_types = 1;
+
+- adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
++ adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs;
+ }
+
+ const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+index 8245bb6..debf206 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+@@ -72,7 +72,7 @@ static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
+ {
+ struct amdgpu_device *adev = ring->adev;
+
+- if (ring == &adev->uvd.ring_enc[0])
++ if (ring == &adev->uvd.inst->ring_enc[0])
+ return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
+ else
+ return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
+@@ -106,7 +106,7 @@ static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
+ if (ring->use_doorbell)
+ return adev->wb.wb[ring->wptr_offs];
+
+- if (ring == &adev->uvd.ring_enc[0])
++ if (ring == &adev->uvd.inst->ring_enc[0])
+ return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
+ else
+ return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
+@@ -144,7 +144,7 @@ static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
+ return;
+ }
+
+- if (ring == &adev->uvd.ring_enc[0])
++ if (ring == &adev->uvd.inst->ring_enc[0])
+ WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
+ lower_32_bits(ring->wptr));
+ else
+@@ -170,8 +170,8 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
+
+ r = amdgpu_ring_alloc(ring, 16);
+ if (r) {
+- DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n",
+- ring->idx, r);
++ DRM_ERROR("amdgpu: uvd enc failed to lock (%d)ring %d (%d).\n",
++ ring->me, ring->idx, r);
+ return r;
+ }
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
+@@ -184,11 +184,11 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
+ }
+
+ if (i < adev->usec_timeout) {
+- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
+- ring->idx, i);
++ DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n",
++ ring->me, ring->idx, i);
+ } else {
+- DRM_ERROR("amdgpu: ring %d test failed\n",
+- ring->idx);
++ DRM_ERROR("amdgpu: (%d)ring %d test failed\n",
++ ring->me, ring->idx);
+ r = -ETIMEDOUT;
+ }
+
+@@ -342,24 +342,24 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+
+ r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
+ if (r) {
+- DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
++ DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ring->me, r);
+ goto error;
+ }
+
+ r = uvd_v7_0_enc_get_destroy_msg(ring, 1, true, &fence);
+ if (r) {
+- DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
++ DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ring->me, r);
+ goto error;
+ }
+
+ r = dma_fence_wait_timeout(fence, false, timeout);
+ if (r == 0) {
+- DRM_ERROR("amdgpu: IB test timed out.\n");
++ DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ring->me);
+ r = -ETIMEDOUT;
+ } else if (r < 0) {
+- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
++ DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ring->me, r);
+ } else {
+- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
++ DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ring->me, ring->idx);
+ r = 0;
+ }
+ error:
+@@ -370,6 +370,7 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ static int uvd_v7_0_early_init(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++ adev->uvd.num_uvd_inst = 1;
+
+ if (amdgpu_sriov_vf(adev))
+ adev->uvd.num_enc_rings = 1;
+@@ -390,13 +391,13 @@ static int uvd_v7_0_sw_init(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* UVD TRAP */
+- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.irq);
++ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.inst->irq);
+ if (r)
+ return r;
+
+ /* UVD ENC TRAP */
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.irq);
++ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.inst->irq);
+ if (r)
+ return r;
+ }
+@@ -415,9 +416,9 @@ static int uvd_v7_0_sw_init(void *handle)
+ DRM_INFO("PSP loading UVD firmware\n");
+ }
+
+- ring = &adev->uvd.ring_enc[0];
++ ring = &adev->uvd.inst->ring_enc[0];
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+- r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
++ r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc,
+ rq, amdgpu_sched_jobs, NULL);
+ if (r) {
+ DRM_ERROR("Failed setting up UVD ENC run queue.\n");
+@@ -428,15 +429,15 @@ static int uvd_v7_0_sw_init(void *handle)
+ if (r)
+ return r;
+ if (!amdgpu_sriov_vf(adev)) {
+- ring = &adev->uvd.ring;
++ ring = &adev->uvd.inst->ring;
+ sprintf(ring->name, "uvd");
+- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
++ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+ if (r)
+ return r;
+ }
+
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+- ring = &adev->uvd.ring_enc[i];
++ ring = &adev->uvd.inst->ring_enc[i];
+ sprintf(ring->name, "uvd_enc%d", i);
+ if (amdgpu_sriov_vf(adev)) {
+ ring->use_doorbell = true;
+@@ -449,7 +450,7 @@ static int uvd_v7_0_sw_init(void *handle)
+ else
+ ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1;
+ }
+- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
++ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+ if (r)
+ return r;
+ }
+@@ -472,10 +473,10 @@ static int uvd_v7_0_sw_fini(void *handle)
+ if (r)
+ return r;
+
+- drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
++ drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc);
+
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i)
+- amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
++ amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
+
+ return amdgpu_uvd_sw_fini(adev);
+ }
+@@ -490,7 +491,7 @@ static int uvd_v7_0_sw_fini(void *handle)
+ static int uvd_v7_0_hw_init(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+- struct amdgpu_ring *ring = &adev->uvd.ring;
++ struct amdgpu_ring *ring = &adev->uvd.inst->ring;
+ uint32_t tmp;
+ int i, r;
+
+@@ -543,7 +544,7 @@ static int uvd_v7_0_hw_init(void *handle)
+ }
+
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+- ring = &adev->uvd.ring_enc[i];
++ ring = &adev->uvd.inst->ring_enc[i];
+ ring->ready = true;
+ r = amdgpu_ring_test_ring(ring);
+ if (r) {
+@@ -569,7 +570,7 @@ static int uvd_v7_0_hw_init(void *handle)
+ static int uvd_v7_0_hw_fini(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+- struct amdgpu_ring *ring = &adev->uvd.ring;
++ struct amdgpu_ring *ring = &adev->uvd.inst->ring;
+
+ if (!amdgpu_sriov_vf(adev))
+ uvd_v7_0_stop(adev);
+@@ -627,9 +628,9 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
+ offset = 0;
+ } else {
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+- lower_32_bits(adev->uvd.gpu_addr));
++ lower_32_bits(adev->uvd.inst->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+- upper_32_bits(adev->uvd.gpu_addr));
++ upper_32_bits(adev->uvd.inst->gpu_addr));
+ offset = size;
+ }
+
+@@ -638,16 +639,16 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
+
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
+- lower_32_bits(adev->uvd.gpu_addr + offset));
++ lower_32_bits(adev->uvd.inst->gpu_addr + offset));
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
+- upper_32_bits(adev->uvd.gpu_addr + offset));
++ upper_32_bits(adev->uvd.inst->gpu_addr + offset));
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
+
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
+- lower_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
++ lower_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
+- upper_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
++ upper_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
+ AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
+@@ -688,10 +689,10 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
+ /* 4, set resp to zero */
+ WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
+
+- WDOORBELL32(adev->uvd.ring_enc[0].doorbell_index, 0);
+- adev->wb.wb[adev->uvd.ring_enc[0].wptr_offs] = 0;
+- adev->uvd.ring_enc[0].wptr = 0;
+- adev->uvd.ring_enc[0].wptr_old = 0;
++ WDOORBELL32(adev->uvd.inst->ring_enc[0].doorbell_index, 0);
++ adev->wb.wb[adev->uvd.inst->ring_enc[0].wptr_offs] = 0;
++ adev->uvd.inst->ring_enc[0].wptr = 0;
++ adev->uvd.inst->ring_enc[0].wptr_old = 0;
+
+ /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
+ WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
+@@ -742,7 +743,7 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
+
+ init_table += header->uvd_table_offset;
+
+- ring = &adev->uvd.ring;
++ ring = &adev->uvd.inst->ring;
+ ring->wptr = 0;
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
+
+@@ -757,9 +758,9 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
+ offset = 0;
+ } else {
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+- lower_32_bits(adev->uvd.gpu_addr));
++ lower_32_bits(adev->uvd.inst->gpu_addr));
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+- upper_32_bits(adev->uvd.gpu_addr));
++ upper_32_bits(adev->uvd.inst->gpu_addr));
+ offset = size;
+ }
+
+@@ -768,16 +769,16 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size);
+
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+- lower_32_bits(adev->uvd.gpu_addr + offset));
++ lower_32_bits(adev->uvd.inst->gpu_addr + offset));
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+- upper_32_bits(adev->uvd.gpu_addr + offset));
++ upper_32_bits(adev->uvd.inst->gpu_addr + offset));
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
+
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+- lower_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
++ lower_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+- upper_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
++ upper_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2),
+ AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
+@@ -841,7 +842,7 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp);
+
+- ring = &adev->uvd.ring_enc[0];
++ ring = &adev->uvd.inst->ring_enc[0];
+ ring->wptr = 0;
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
+@@ -874,7 +875,7 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
+ */
+ static int uvd_v7_0_start(struct amdgpu_device *adev)
+ {
+- struct amdgpu_ring *ring = &adev->uvd.ring;
++ struct amdgpu_ring *ring = &adev->uvd.inst->ring;
+ uint32_t rb_bufsz, tmp;
+ uint32_t lmi_swap_cntl;
+ uint32_t mp_swap_cntl;
+@@ -1027,14 +1028,14 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
+ ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
+
+- ring = &adev->uvd.ring_enc[0];
++ ring = &adev->uvd.inst->ring_enc[0];
+ WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
+
+- ring = &adev->uvd.ring_enc[1];
++ ring = &adev->uvd.inst->ring_enc[1];
+ WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+@@ -1183,8 +1184,8 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
+ WREG32_SOC15(UVD, 0, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
+ r = amdgpu_ring_alloc(ring, 3);
+ if (r) {
+- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
+- ring->idx, r);
++ DRM_ERROR("amdgpu: (%d)cp failed to lock ring %d (%d).\n",
++ ring->me, ring->idx, r);
+ return r;
+ }
+ amdgpu_ring_write(ring,
+@@ -1199,11 +1200,11 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
+ }
+
+ if (i < adev->usec_timeout) {
+- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
+- ring->idx, i);
++ DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n",
++ ring->me, ring->idx, i);
+ } else {
+- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
+- ring->idx, tmp);
++ DRM_ERROR("(%d)amdgpu: ring %d test failed (0x%08X)\n",
++ ring->me, ring->idx, tmp);
+ r = -EINVAL;
+ }
+ return r;
+@@ -1386,10 +1387,10 @@ static bool uvd_v7_0_check_soft_reset(void *handle)
+ SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
+
+ if (srbm_soft_reset) {
+- adev->uvd.srbm_soft_reset = srbm_soft_reset;
++ adev->uvd.inst->srbm_soft_reset = srbm_soft_reset;
+ return true;
+ } else {
+- adev->uvd.srbm_soft_reset = 0;
++ adev->uvd.inst->srbm_soft_reset = 0;
+ return false;
+ }
+ }
+@@ -1398,7 +1399,7 @@ static int uvd_v7_0_pre_soft_reset(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- if (!adev->uvd.srbm_soft_reset)
++ if (!adev->uvd.inst->srbm_soft_reset)
+ return 0;
+
+ uvd_v7_0_stop(adev);
+@@ -1410,9 +1411,9 @@ static int uvd_v7_0_soft_reset(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset;
+
+- if (!adev->uvd.srbm_soft_reset)
++ if (!adev->uvd.inst->srbm_soft_reset)
+ return 0;
+- srbm_soft_reset = adev->uvd.srbm_soft_reset;
++ srbm_soft_reset = adev->uvd.inst->srbm_soft_reset;
+
+ if (srbm_soft_reset) {
+ u32 tmp;
+@@ -1440,7 +1441,7 @@ static int uvd_v7_0_post_soft_reset(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- if (!adev->uvd.srbm_soft_reset)
++ if (!adev->uvd.inst->srbm_soft_reset)
+ return 0;
+
+ mdelay(5);
+@@ -1465,14 +1466,14 @@ static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
+ DRM_DEBUG("IH: UVD TRAP\n");
+ switch (entry->src_id) {
+ case 124:
+- amdgpu_fence_process(&adev->uvd.ring);
++ amdgpu_fence_process(&adev->uvd.inst->ring);
+ break;
+ case 119:
+- amdgpu_fence_process(&adev->uvd.ring_enc[0]);
++ amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]);
+ break;
+ case 120:
+ if (!amdgpu_sriov_vf(adev))
+- amdgpu_fence_process(&adev->uvd.ring_enc[1]);
++ amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]);
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n",
+@@ -1741,7 +1742,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
+
+ static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
+ {
+- adev->uvd.ring.funcs = &uvd_v7_0_ring_vm_funcs;
++ adev->uvd.inst->ring.funcs = &uvd_v7_0_ring_vm_funcs;
+ DRM_INFO("UVD is enabled in VM mode\n");
+ }
+
+@@ -1750,7 +1751,7 @@ static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
+ int i;
+
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i)
+- adev->uvd.ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
++ adev->uvd.inst->ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
+
+ DRM_INFO("UVD ENC is enabled in VM mode\n");
+ }
+@@ -1762,8 +1763,8 @@ static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
+
+ static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
+ {
+- adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1;
+- adev->uvd.irq.funcs = &uvd_v7_0_irq_funcs;
++ adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1;
++ adev->uvd.inst->irq.funcs = &uvd_v7_0_irq_funcs;
+ }
+
+ const struct amdgpu_ip_block_version uvd_v7_0_ip_block =
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4430-drm-amdgpu-vg20-Restruct-uvd.inst-to-support-multipl.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4430-drm-amdgpu-vg20-Restruct-uvd.inst-to-support-multipl.patch
new file mode 100644
index 00000000..586cc880
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4430-drm-amdgpu-vg20-Restruct-uvd.inst-to-support-multipl.patch
@@ -0,0 +1,1874 @@
+From b120d0a52e56b00f6c667dcffb6fba1455db2af6 Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Wed, 9 Jan 2019 19:29:02 +0530
+Subject: [PATCH 4430/5725] drm/amdgpu/vg20:Restruct uvd.inst to support
+ multiple instances
+
+Vega20 has dual-UVD. Need add multiple instances support for uvd.
+Restruct uvd.inst, using uvd.inst[0] to replace uvd.inst->.
+Repurpose amdgpu_ring::me for instance index, and initialize to 0.
+There are no any logical changes here.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 6 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 12 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 231 +++----
+ drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 1002 +++++++++++++++--------------
+ 5 files changed, 661 insertions(+), 591 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+index fafe54a..bed6d77 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+@@ -376,14 +376,14 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
+ struct amdgpu_device *adev = ring->adev;
+ uint64_t index;
+
+- if (ring != &adev->uvd.inst->ring) {
++ if (ring != &adev->uvd.inst[ring->me].ring) {
+ ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
+ ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
+ } else {
+ /* put fence directly behind firmware */
+ index = ALIGN(adev->uvd.fw->size, 8);
+- ring->fence_drv.cpu_addr = adev->uvd.inst->cpu_addr + index;
+- ring->fence_drv.gpu_addr = adev->uvd.inst->gpu_addr + index;
++ ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
++ ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
+ }
+ amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
+ amdgpu_irq_get(adev, irq_src, irq_type);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index 46cfddf..9de27ce 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -283,7 +283,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ struct drm_crtc *crtc;
+ uint32_t ui32 = 0;
+ uint64_t ui64 = 0;
+- int i, found;
++ int i, j, found;
+ int ui32_size = sizeof(ui32);
+
+ if (!info->return_size || !info->return_pointer)
+@@ -359,7 +359,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ break;
+ case AMDGPU_HW_IP_UVD:
+ type = AMD_IP_BLOCK_TYPE_UVD;
+- ring_mask = adev->uvd.inst->ring.ready ? 1 : 0;
++ for (i = 0; i < adev->uvd.num_uvd_inst; i++)
++ ring_mask |= ((adev->uvd.inst[i].ring.ready ? 1 : 0) << i);
+ ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+ ib_size_alignment = 16;
+ break;
+@@ -372,8 +373,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ break;
+ case AMDGPU_HW_IP_UVD_ENC:
+ type = AMD_IP_BLOCK_TYPE_UVD;
+- for (i = 0; i < adev->uvd.num_enc_rings; i++)
+- ring_mask |= ((adev->uvd.inst->ring_enc[i].ready ? 1 : 0) << i);
++ for (i = 0; i < adev->uvd.num_uvd_inst; i++)
++ for (j = 0; j < adev->uvd.num_enc_rings; j++)
++ ring_mask |=
++ ((adev->uvd.inst[i].ring_enc[j].ready ? 1 : 0) <<
++ (j + i * adev->uvd.num_enc_rings));
+ ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+ ib_size_alignment = 1;
+ break;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+index 49cad08..c6850b6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+@@ -362,6 +362,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
+
+ dma_fence_put(ring->vmid_wait);
+ ring->vmid_wait = NULL;
++ ring->me = 0;
+
+ ring->adev->rings[ring->idx] = NULL;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index 263cd945..c9ed917 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -127,7 +127,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ const char *fw_name;
+ const struct common_firmware_header *hdr;
+ unsigned version_major, version_minor, family_id;
+- int i, r;
++ int i, j, r;
+
+ INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler);
+
+@@ -236,28 +236,30 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
+
+- r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
+- AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst->vcpu_bo,
+- &adev->uvd.inst->gpu_addr, &adev->uvd.inst->cpu_addr);
+- if (r) {
+- dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
+- return r;
+- }
++ for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+
+- ring = &adev->uvd.inst->ring;
+- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+- r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity,
+- rq, amdgpu_sched_jobs, NULL);
+- if (r != 0) {
+- DRM_ERROR("Failed setting up UVD run queue.\n");
+- return r;
+- }
++ r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
++ &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
++ if (r) {
++ dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
++ return r;
++ }
+
+- for (i = 0; i < adev->uvd.max_handles; ++i) {
+- atomic_set(&adev->uvd.inst->handles[i], 0);
+- adev->uvd.inst->filp[i] = NULL;
+- }
++ ring = &adev->uvd.inst[j].ring;
++ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
++ r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity,
++ rq, amdgpu_sched_jobs, NULL);
++ if (r != 0) {
++ DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j);
++ return r;
++ }
+
++ for (i = 0; i < adev->uvd.max_handles; ++i) {
++ atomic_set(&adev->uvd.inst[j].handles[i], 0);
++ adev->uvd.inst[j].filp[i] = NULL;
++ }
++ }
+ /* from uvd v5.0 HW addressing capacity increased to 64 bits */
+ if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
+ adev->uvd.address_64_bit = true;
+@@ -284,20 +286,22 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+
+ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
+ {
+- int i;
+- kfree(adev->uvd.inst->saved_bo);
++ int i, j;
+
+- drm_sched_entity_fini(&adev->uvd.inst->ring.sched, &adev->uvd.inst->entity);
++ for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
++ kfree(adev->uvd.inst[j].saved_bo);
+
+- amdgpu_bo_free_kernel(&adev->uvd.inst->vcpu_bo,
+- &adev->uvd.inst->gpu_addr,
+- (void **)&adev->uvd.inst->cpu_addr);
++ drm_sched_entity_fini(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);
+
+- amdgpu_ring_fini(&adev->uvd.inst->ring);
++ amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
++ &adev->uvd.inst[j].gpu_addr,
++ (void **)&adev->uvd.inst[j].cpu_addr);
+
+- for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
+- amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
++ amdgpu_ring_fini(&adev->uvd.inst[j].ring);
+
++ for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
++ amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
++ }
+ release_firmware(adev->uvd.fw);
+
+ return 0;
+@@ -307,32 +311,33 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
+ {
+ unsigned size;
+ void *ptr;
+- int i;
++ int i, j;
+
+- if (adev->uvd.inst->vcpu_bo == NULL)
+- return 0;
++ for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
++ if (adev->uvd.inst[j].vcpu_bo == NULL)
++ continue;
+
+- cancel_delayed_work_sync(&adev->uvd.inst->idle_work);
++ cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work);
+
+- /* only valid for physical mode */
+- if (adev->asic_type < CHIP_POLARIS10) {
+- for (i = 0; i < adev->uvd.max_handles; ++i)
+- if (atomic_read(&adev->uvd.inst->handles[i]))
+- break;
++ /* only valid for physical mode */
++ if (adev->asic_type < CHIP_POLARIS10) {
++ for (i = 0; i < adev->uvd.max_handles; ++i)
++ if (atomic_read(&adev->uvd.inst[j].handles[i]))
++ break;
+
+- if (i == adev->uvd.max_handles)
+- return 0;
+- }
++ if (i == adev->uvd.max_handles)
++ continue;
++ }
+
+- size = amdgpu_bo_size(adev->uvd.inst->vcpu_bo);
+- ptr = adev->uvd.inst->cpu_addr;
++ size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
++ ptr = adev->uvd.inst[j].cpu_addr;
++
++ adev->uvd.inst[j].saved_bo = kmalloc(size, GFP_KERNEL);
++ if (!adev->uvd.inst[j].saved_bo)
++ return -ENOMEM;
++ memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
++ }
+
+- adev->uvd.inst->saved_bo = kmalloc(size, GFP_KERNEL);
+- if (!adev->uvd.inst->saved_bo)
+- return -ENOMEM;
+-
+- memcpy_fromio(adev->uvd.inst->saved_bo, ptr, size);
+-
+ return 0;
+ }
+
+@@ -340,59 +345,65 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
+ {
+ unsigned size;
+ void *ptr;
++ int i;
+
+- if (adev->uvd.inst->vcpu_bo == NULL)
+- return -EINVAL;
++ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
++ if (adev->uvd.inst[i].vcpu_bo == NULL)
++ return -EINVAL;
+
+- size = amdgpu_bo_size(adev->uvd.inst->vcpu_bo);
+- ptr = adev->uvd.inst->cpu_addr;
++ size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo);
++ ptr = adev->uvd.inst[i].cpu_addr;
+
+- if (adev->uvd.inst->saved_bo != NULL) {
+- memcpy_toio(ptr, adev->uvd.inst->saved_bo, size);
+- kfree(adev->uvd.inst->saved_bo);
+- adev->uvd.inst->saved_bo = NULL;
+- } else {
+- const struct common_firmware_header *hdr;
+- unsigned offset;
+-
+- hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
+- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+- offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
+- memcpy_toio(adev->uvd.inst->cpu_addr, adev->uvd.fw->data + offset,
+- le32_to_cpu(hdr->ucode_size_bytes));
+- size -= le32_to_cpu(hdr->ucode_size_bytes);
+- ptr += le32_to_cpu(hdr->ucode_size_bytes);
++ if (adev->uvd.inst[i].saved_bo != NULL) {
++ memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
++ kfree(adev->uvd.inst[i].saved_bo);
++ adev->uvd.inst[i].saved_bo = NULL;
++ } else {
++ const struct common_firmware_header *hdr;
++ unsigned offset;
++
++ hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
++ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
++ offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
++ memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset,
++ le32_to_cpu(hdr->ucode_size_bytes));
++ size -= le32_to_cpu(hdr->ucode_size_bytes);
++ ptr += le32_to_cpu(hdr->ucode_size_bytes);
++ }
++ memset_io(ptr, 0, size);
++ /* to restore uvd fence seq */
++ amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring);
+ }
+- memset_io(ptr, 0, size);
+- /* to restore uvd fence seq */
+- amdgpu_fence_driver_force_completion(&adev->uvd.inst->ring);
+ }
+-
+ return 0;
+ }
+
+ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
+ {
+- struct amdgpu_ring *ring = &adev->uvd.inst->ring;
+- int i, r;
++ struct amdgpu_ring *ring;
++ int i, j, r;
+
+- for (i = 0; i < adev->uvd.max_handles; ++i) {
+- uint32_t handle = atomic_read(&adev->uvd.inst->handles[i]);
+- if (handle != 0 && adev->uvd.inst->filp[i] == filp) {
+- struct dma_fence *fence;
+-
+- r = amdgpu_uvd_get_destroy_msg(ring, handle,
+- false, &fence);
+- if (r) {
+- DRM_ERROR("Error destroying UVD (%d)!\n", r);
+- continue;
+- }
++ for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
++ ring = &adev->uvd.inst[j].ring;
+
+- dma_fence_wait(fence, false);
+- dma_fence_put(fence);
++ for (i = 0; i < adev->uvd.max_handles; ++i) {
++ uint32_t handle = atomic_read(&adev->uvd.inst[j].handles[i]);
++ if (handle != 0 && adev->uvd.inst[j].filp[i] == filp) {
++ struct dma_fence *fence;
++
++ r = amdgpu_uvd_get_destroy_msg(ring, handle,
++ false, &fence);
++ if (r) {
++ DRM_ERROR("Error destroying UVD(%d) %d!\n", j, r);
++ continue;
++ }
+
+- adev->uvd.inst->filp[i] = NULL;
+- atomic_set(&adev->uvd.inst->handles[i], 0);
++ dma_fence_wait(fence, false);
++ dma_fence_put(fence);
++
++ adev->uvd.inst[j].filp[i] = NULL;
++ atomic_set(&adev->uvd.inst[j].handles[i], 0);
++ }
+ }
+ }
+ }
+@@ -667,15 +678,16 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
+ void *ptr;
+ long r;
+ int i;
++ uint32_t ip_instance = ctx->parser->job->ring->me;
+
+ if (offset & 0x3F) {
+- DRM_ERROR("UVD messages must be 64 byte aligned!\n");
++ DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance);
+ return -EINVAL;
+ }
+
+ r = amdgpu_bo_kmap(bo, &ptr);
+ if (r) {
+- DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
++ DRM_ERROR("Failed mapping the UVD(%d) message (%ld)!\n", ip_instance, r);
+ return r;
+ }
+
+@@ -685,7 +697,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
+ handle = msg[2];
+
+ if (handle == 0) {
+- DRM_ERROR("Invalid UVD handle!\n");
++ DRM_ERROR("Invalid UVD(%d) handle!\n", ip_instance);
+ return -EINVAL;
+ }
+
+@@ -696,18 +708,18 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
+
+ /* try to alloc a new handle */
+ for (i = 0; i < adev->uvd.max_handles; ++i) {
+- if (atomic_read(&adev->uvd.inst->handles[i]) == handle) {
+- DRM_ERROR("Handle 0x%x already in use!\n", handle);
++ if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
++ DRM_ERROR("(%d)Handle 0x%x already in use!\n", ip_instance, handle);
+ return -EINVAL;
+ }
+
+- if (!atomic_cmpxchg(&adev->uvd.inst->handles[i], 0, handle)) {
+- adev->uvd.inst->filp[i] = ctx->parser->filp;
++ if (!atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], 0, handle)) {
++ adev->uvd.inst[ip_instance].filp[i] = ctx->parser->filp;
+ return 0;
+ }
+ }
+
+- DRM_ERROR("No more free UVD handles!\n");
++ DRM_ERROR("No more free UVD(%d) handles!\n", ip_instance);
+ return -ENOSPC;
+
+ case 1:
+@@ -719,27 +731,27 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
+
+ /* validate the handle */
+ for (i = 0; i < adev->uvd.max_handles; ++i) {
+- if (atomic_read(&adev->uvd.inst->handles[i]) == handle) {
+- if (adev->uvd.inst->filp[i] != ctx->parser->filp) {
+- DRM_ERROR("UVD handle collision detected!\n");
++ if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
++ if (adev->uvd.inst[ip_instance].filp[i] != ctx->parser->filp) {
++ DRM_ERROR("UVD(%d) handle collision detected!\n", ip_instance);
+ return -EINVAL;
+ }
+ return 0;
+ }
+ }
+
+- DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
++ DRM_ERROR("Invalid UVD(%d) handle 0x%x!\n", ip_instance, handle);
+ return -ENOENT;
+
+ case 2:
+ /* it's a destroy msg, free the handle */
+ for (i = 0; i < adev->uvd.max_handles; ++i)
+- atomic_cmpxchg(&adev->uvd.inst->handles[i], handle, 0);
++ atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], handle, 0);
+ amdgpu_bo_kunmap(bo);
+ return 0;
+
+ default:
+- DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
++ DRM_ERROR("Illegal UVD(%d) message type (%d)!\n", ip_instance, msg_type);
+ return -EINVAL;
+ }
+ BUG();
+@@ -1043,7 +1055,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
+ if (r)
+ goto err_free;
+
+- r = amdgpu_job_submit(job, ring, &adev->uvd.inst->entity,
++ r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+ if (r)
+ goto err_free;
+@@ -1191,27 +1203,28 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ {
+ struct dma_fence *fence;
+ long r;
++ uint32_t ip_instance = ring->me;
+
+ r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
+ if (r) {
+- DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
++ DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ip_instance, r);
+ goto error;
+ }
+
+ r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
+ if (r) {
+- DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
++ DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ip_instance, r);
+ goto error;
+ }
+
+ r = dma_fence_wait_timeout(fence, false, timeout);
+ if (r == 0) {
+- DRM_ERROR("amdgpu: IB test timed out.\n");
++ DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ip_instance);
+ r = -ETIMEDOUT;
+ } else if (r < 0) {
+- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
++ DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ip_instance, r);
+ } else {
+- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
++ DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ip_instance, ring->idx);
+ r = 0;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+index debf206..38816227 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+@@ -58,7 +58,7 @@ static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
+ {
+ struct amdgpu_device *adev = ring->adev;
+
+- return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
++ return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
+ }
+
+ /**
+@@ -72,10 +72,10 @@ static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
+ {
+ struct amdgpu_device *adev = ring->adev;
+
+- if (ring == &adev->uvd.inst->ring_enc[0])
+- return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
++ if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
++ return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
+ else
+- return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
++ return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
+ }
+
+ /**
+@@ -89,7 +89,7 @@ static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
+ {
+ struct amdgpu_device *adev = ring->adev;
+
+- return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
++ return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
+ }
+
+ /**
+@@ -106,10 +106,10 @@ static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
+ if (ring->use_doorbell)
+ return adev->wb.wb[ring->wptr_offs];
+
+- if (ring == &adev->uvd.inst->ring_enc[0])
+- return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
++ if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
++ return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
+ else
+- return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
++ return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
+ }
+
+ /**
+@@ -123,7 +123,7 @@ static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
+ {
+ struct amdgpu_device *adev = ring->adev;
+
+- WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
++ WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
+ }
+
+ /**
+@@ -144,11 +144,11 @@ static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
+ return;
+ }
+
+- if (ring == &adev->uvd.inst->ring_enc[0])
+- WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
++ if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
++ WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR,
+ lower_32_bits(ring->wptr));
+ else
+- WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2,
++ WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2,
+ lower_32_bits(ring->wptr));
+ }
+
+@@ -387,19 +387,21 @@ static int uvd_v7_0_sw_init(void *handle)
+ {
+ struct amdgpu_ring *ring;
+ struct drm_sched_rq *rq;
+- int i, r;
++ int i, j, r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- /* UVD TRAP */
+- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.inst->irq);
+- if (r)
+- return r;
+-
+- /* UVD ENC TRAP */
+- for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.inst->irq);
++ for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
++ /* UVD TRAP */
++ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.inst[j].irq);
+ if (r)
+ return r;
++
++ /* UVD ENC TRAP */
++ for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
++ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.inst[j].irq);
++ if (r)
++ return r;
++ }
+ }
+
+ r = amdgpu_uvd_sw_init(adev);
+@@ -416,43 +418,48 @@ static int uvd_v7_0_sw_init(void *handle)
+ DRM_INFO("PSP loading UVD firmware\n");
+ }
+
+- ring = &adev->uvd.inst->ring_enc[0];
+- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+- r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc,
+- rq, amdgpu_sched_jobs, NULL);
+- if (r) {
+- DRM_ERROR("Failed setting up UVD ENC run queue.\n");
+- return r;
++ for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
++ ring = &adev->uvd.inst[j].ring_enc[0];
++ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
++ r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity_enc,
++ rq, amdgpu_sched_jobs, NULL);
++ if (r) {
++ DRM_ERROR("(%d)Failed setting up UVD ENC run queue.\n", j);
++ return r;
++ }
+ }
+
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+- if (!amdgpu_sriov_vf(adev)) {
+- ring = &adev->uvd.inst->ring;
+- sprintf(ring->name, "uvd");
+- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+- if (r)
+- return r;
+- }
+
+- for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+- ring = &adev->uvd.inst->ring_enc[i];
+- sprintf(ring->name, "uvd_enc%d", i);
+- if (amdgpu_sriov_vf(adev)) {
+- ring->use_doorbell = true;
+-
+- /* currently only use the first enconding ring for
+- * sriov, so set unused location for other unused rings.
+- */
+- if (i == 0)
+- ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
+- else
+- ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1;
++ for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
++ if (!amdgpu_sriov_vf(adev)) {
++ ring = &adev->uvd.inst[j].ring;
++ sprintf(ring->name, "uvd<%d>", j);
++ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
++ if (r)
++ return r;
++ }
++
++ for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
++ ring = &adev->uvd.inst[j].ring_enc[i];
++ sprintf(ring->name, "uvd_enc%d<%d>", i, j);
++ if (amdgpu_sriov_vf(adev)) {
++ ring->use_doorbell = true;
++
++ /* currently only use the first enconding ring for
++ * sriov, so set unused location for other unused rings.
++ */
++ if (i == 0)
++ ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
++ else
++ ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1;
++ }
++ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
++ if (r)
++ return r;
+ }
+- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+- if (r)
+- return r;
+ }
+
+ r = amdgpu_virt_alloc_mm_table(adev);
+@@ -464,7 +471,7 @@ static int uvd_v7_0_sw_init(void *handle)
+
+ static int uvd_v7_0_sw_fini(void *handle)
+ {
+- int i, r;
++ int i, j, r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_virt_free_mm_table(adev);
+@@ -473,11 +480,12 @@ static int uvd_v7_0_sw_fini(void *handle)
+ if (r)
+ return r;
+
+- drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc);
+-
+- for (i = 0; i < adev->uvd.num_enc_rings; ++i)
+- amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
++ for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
++ drm_sched_entity_fini(&adev->uvd.inst[j].ring_enc[0].sched, &adev->uvd.inst[j].entity_enc);
+
++ for (i = 0; i < adev->uvd.num_enc_rings; ++i)
++ amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
++ }
+ return amdgpu_uvd_sw_fini(adev);
+ }
+
+@@ -491,9 +499,9 @@ static int uvd_v7_0_sw_fini(void *handle)
+ static int uvd_v7_0_hw_init(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+- struct amdgpu_ring *ring = &adev->uvd.inst->ring;
++ struct amdgpu_ring *ring;
+ uint32_t tmp;
+- int i, r;
++ int i, j, r;
+
+ if (amdgpu_sriov_vf(adev))
+ r = uvd_v7_0_sriov_start(adev);
+@@ -502,57 +510,60 @@ static int uvd_v7_0_hw_init(void *handle)
+ if (r)
+ goto done;
+
+- if (!amdgpu_sriov_vf(adev)) {
+- ring->ready = true;
+- r = amdgpu_ring_test_ring(ring);
+- if (r) {
+- ring->ready = false;
+- goto done;
++ for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
++ ring = &adev->uvd.inst[j].ring;
++
++ if (!amdgpu_sriov_vf(adev)) {
++ ring->ready = true;
++ r = amdgpu_ring_test_ring(ring);
++ if (r) {
++ ring->ready = false;
++ goto done;
++ }
++
++ r = amdgpu_ring_alloc(ring, 10);
++ if (r) {
++ DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r);
++ goto done;
++ }
++
++ tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
++ mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
++ amdgpu_ring_write(ring, tmp);
++ amdgpu_ring_write(ring, 0xFFFFF);
++
++ tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
++ mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
++ amdgpu_ring_write(ring, tmp);
++ amdgpu_ring_write(ring, 0xFFFFF);
++
++ tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
++ mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
++ amdgpu_ring_write(ring, tmp);
++ amdgpu_ring_write(ring, 0xFFFFF);
++
++ /* Clear timeout status bits */
++ amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
++ mmUVD_SEMA_TIMEOUT_STATUS), 0));
++ amdgpu_ring_write(ring, 0x8);
++
++ amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
++ mmUVD_SEMA_CNTL), 0));
++ amdgpu_ring_write(ring, 3);
++
++ amdgpu_ring_commit(ring);
+ }
+
+- r = amdgpu_ring_alloc(ring, 10);
+- if (r) {
+- DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
+- goto done;
++ for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
++ ring = &adev->uvd.inst[j].ring_enc[i];
++ ring->ready = true;
++ r = amdgpu_ring_test_ring(ring);
++ if (r) {
++ ring->ready = false;
++ goto done;
++ }
+ }
+-
+- tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
+- mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
+- amdgpu_ring_write(ring, tmp);
+- amdgpu_ring_write(ring, 0xFFFFF);
+-
+- tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
+- mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
+- amdgpu_ring_write(ring, tmp);
+- amdgpu_ring_write(ring, 0xFFFFF);
+-
+- tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
+- mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
+- amdgpu_ring_write(ring, tmp);
+- amdgpu_ring_write(ring, 0xFFFFF);
+-
+- /* Clear timeout status bits */
+- amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
+- mmUVD_SEMA_TIMEOUT_STATUS), 0));
+- amdgpu_ring_write(ring, 0x8);
+-
+- amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
+- mmUVD_SEMA_CNTL), 0));
+- amdgpu_ring_write(ring, 3);
+-
+- amdgpu_ring_commit(ring);
+ }
+-
+- for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+- ring = &adev->uvd.inst->ring_enc[i];
+- ring->ready = true;
+- r = amdgpu_ring_test_ring(ring);
+- if (r) {
+- ring->ready = false;
+- goto done;
+- }
+- }
+-
+ done:
+ if (!r)
+ DRM_INFO("UVD and UVD ENC initialized successfully.\n");
+@@ -570,7 +581,7 @@ static int uvd_v7_0_hw_init(void *handle)
+ static int uvd_v7_0_hw_fini(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+- struct amdgpu_ring *ring = &adev->uvd.inst->ring;
++ int i;
+
+ if (!amdgpu_sriov_vf(adev))
+ uvd_v7_0_stop(adev);
+@@ -579,7 +590,8 @@ static int uvd_v7_0_hw_fini(void *handle)
+ DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
+ }
+
+- ring->ready = false;
++ for (i = 0; i < adev->uvd.num_uvd_inst; ++i)
++ adev->uvd.inst[i].ring.ready = false;
+
+ return 0;
+ }
+@@ -619,48 +631,51 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
+ {
+ uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
+ uint32_t offset;
++ int i;
+
+- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+- WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+- lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
+- WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+- upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
+- offset = 0;
+- } else {
+- WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+- lower_32_bits(adev->uvd.inst->gpu_addr));
+- WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+- upper_32_bits(adev->uvd.inst->gpu_addr));
+- offset = size;
+- }
++ for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
++ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
++ WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
++ lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
++ WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
++ upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
++ offset = 0;
++ } else {
++ WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
++ lower_32_bits(adev->uvd.inst[i].gpu_addr));
++ WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
++ upper_32_bits(adev->uvd.inst[i].gpu_addr));
++ offset = size;
++ }
+
+- WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
+- AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+- WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
+-
+- WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
+- lower_32_bits(adev->uvd.inst->gpu_addr + offset));
+- WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
+- upper_32_bits(adev->uvd.inst->gpu_addr + offset));
+- WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
+- WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
+-
+- WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
+- lower_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
+- WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
+- upper_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
+- WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
+- WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
+- AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
+-
+- WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
+- adev->gfx.config.gb_addr_config);
+- WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
+- adev->gfx.config.gb_addr_config);
+- WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
+- adev->gfx.config.gb_addr_config);
+-
+- WREG32_SOC15(UVD, 0, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
++ WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
++ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
++ WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
++
++ WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
++ lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
++ WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
++ upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
++ WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
++ WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
++
++ WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
++ lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
++ WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
++ upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
++ WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
++ WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2,
++ AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
++
++ WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG,
++ adev->gfx.config.gb_addr_config);
++ WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG,
++ adev->gfx.config.gb_addr_config);
++ WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG,
++ adev->gfx.config.gb_addr_config);
++
++ WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
++ }
+ }
+
+ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
+@@ -670,6 +685,7 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
+ uint64_t addr = table->gpu_addr;
+ struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
+ uint32_t size;
++ int i;
+
+ size = header->header_size + header->vce_table_size + header->uvd_table_size;
+
+@@ -689,11 +705,12 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
+ /* 4, set resp to zero */
+ WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
+
+- WDOORBELL32(adev->uvd.inst->ring_enc[0].doorbell_index, 0);
+- adev->wb.wb[adev->uvd.inst->ring_enc[0].wptr_offs] = 0;
+- adev->uvd.inst->ring_enc[0].wptr = 0;
+- adev->uvd.inst->ring_enc[0].wptr_old = 0;
+-
++ for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
++ WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
++ adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
++ adev->uvd.inst[i].ring_enc[0].wptr = 0;
++ adev->uvd.inst[i].ring_enc[0].wptr_old = 0;
++ }
+ /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
+ WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
+
+@@ -726,6 +743,7 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
+ struct mmsch_v1_0_cmd_end end = { {0} };
+ uint32_t *init_table = adev->virt.mm_table.cpu_addr;
+ struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
++ uint8_t i = 0;
+
+ direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
+ direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
+@@ -743,120 +761,121 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
+
+ init_table += header->uvd_table_offset;
+
+- ring = &adev->uvd.inst->ring;
+- ring->wptr = 0;
+- size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
+-
+- MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS),
+- 0xFFFFFFFF, 0x00000004);
+- /* mc resume*/
+- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+- lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
+- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+- upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
+- offset = 0;
+- } else {
+- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+- lower_32_bits(adev->uvd.inst->gpu_addr));
+- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+- upper_32_bits(adev->uvd.inst->gpu_addr));
+- offset = size;
++ for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
++ ring = &adev->uvd.inst[i].ring;
++ ring->wptr = 0;
++ size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
++
++ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
++ 0xFFFFFFFF, 0x00000004);
++ /* mc resume*/
++ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
++ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
++ lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
++ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
++ upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
++ offset = 0;
++ } else {
++ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
++ lower_32_bits(adev->uvd.inst[i].gpu_addr));
++ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
++ upper_32_bits(adev->uvd.inst[i].gpu_addr));
++ offset = size;
++ }
++
++ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
++ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
++ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
++
++ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
++ lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
++ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
++ upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
++ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
++ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
++
++ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
++ lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
++ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
++ upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
++ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
++ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
++ AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
++
++ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
++ /* mc resume end*/
++
++ /* disable clock gating */
++ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL),
++ ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
++
++ /* disable interupt */
++ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
++ ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
++
++ /* stall UMC and register bus before resetting VCPU */
++ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
++ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
++ UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
++
++ /* put LMI, VCPU, RBC etc... into reset */
++ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
++ (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
++ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
++ UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
++ UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
++ UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
++ UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
++ UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
++ UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
++
++ /* initialize UVD memory controller */
++ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL),
++ (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
++ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
++ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
++ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
++ UVD_LMI_CTRL__REQ_MODE_MASK |
++ 0x00100000L));
++
++ /* take all subblocks out of reset, except VCPU */
++ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
++ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
++
++ /* enable VCPU clock */
++ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
++ UVD_VCPU_CNTL__CLK_EN_MASK);
++
++ /* enable master interrupt */
++ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
++ ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
++ (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
++
++ /* clear the bit 4 of UVD_STATUS */
++ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
++ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
++
++ /* force RBC into idle state */
++ size = order_base_2(ring->ring_size);
++ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
++ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
++ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
++
++ ring = &adev->uvd.inst[i].ring_enc[0];
++ ring->wptr = 0;
++ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr);
++ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
++ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4);
++
++ /* boot up the VCPU */
++ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0);
++
++ /* enable UMC */
++ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
++ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
++
++ MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02);
+ }
+-
+- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
+- AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size);
+-
+- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+- lower_32_bits(adev->uvd.inst->gpu_addr + offset));
+- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+- upper_32_bits(adev->uvd.inst->gpu_addr + offset));
+- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
+- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
+-
+- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+- lower_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
+- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+- upper_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
+- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
+- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2),
+- AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
+-
+- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
+- /* mc resume end*/
+-
+- /* disable clock gating */
+- MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL),
+- ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
+-
+- /* disable interupt */
+- MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
+- ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
+-
+- /* stall UMC and register bus before resetting VCPU */
+- MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
+- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
+- UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+-
+- /* put LMI, VCPU, RBC etc... into reset */
+- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
+- (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
+-
+- /* initialize UVD memory controller */
+- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL),
+- (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+- UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+- UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+- UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+- UVD_LMI_CTRL__REQ_MODE_MASK |
+- 0x00100000L));
+-
+- /* take all subblocks out of reset, except VCPU */
+- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
+- UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+-
+- /* enable VCPU clock */
+- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
+- UVD_VCPU_CNTL__CLK_EN_MASK);
+-
+- /* enable master interrupt */
+- MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
+- ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
+- (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
+-
+- /* clear the bit 4 of UVD_STATUS */
+- MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS),
+- ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
+-
+- /* force RBC into idle state */
+- size = order_base_2(ring->ring_size);
+- tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
+- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp);
+-
+- ring = &adev->uvd.inst->ring_enc[0];
+- ring->wptr = 0;
+- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr);
+- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
+- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE), ring->ring_size / 4);
+-
+- /* boot up the VCPU */
+- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0);
+-
+- /* enable UMC */
+- MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
+- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
+-
+- MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0x02, 0x02);
+-
+ /* add end packet */
+ memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
+ table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
+@@ -875,15 +894,17 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
+ */
+ static int uvd_v7_0_start(struct amdgpu_device *adev)
+ {
+- struct amdgpu_ring *ring = &adev->uvd.inst->ring;
++ struct amdgpu_ring *ring;
+ uint32_t rb_bufsz, tmp;
+ uint32_t lmi_swap_cntl;
+ uint32_t mp_swap_cntl;
+- int i, j, r;
++ int i, j, k, r;
+
+- /* disable DPG */
+- WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
+- ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
++ for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
++ /* disable DPG */
++ WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
++ ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
++ }
+
+ /* disable byte swapping */
+ lmi_swap_cntl = 0;
+@@ -891,157 +912,159 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
+
+ uvd_v7_0_mc_resume(adev);
+
+- /* disable clock gating */
+- WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL), 0,
+- ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
+-
+- /* disable interupt */
+- WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
+- ~UVD_MASTINT_EN__VCPU_EN_MASK);
+-
+- /* stall UMC and register bus before resetting VCPU */
+- WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
+- UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
+- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+- mdelay(1);
+-
+- /* put LMI, VCPU, RBC etc... into reset */
+- WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
+- UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
+- mdelay(5);
++ for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
++ ring = &adev->uvd.inst[k].ring;
++ /* disable clock gating */
++ WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
++ ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
+
+- /* initialize UVD memory controller */
+- WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL,
+- (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+- UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+- UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+- UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+- UVD_LMI_CTRL__REQ_MODE_MASK |
+- 0x00100000L);
++ /* disable interupt */
++ WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0,
++ ~UVD_MASTINT_EN__VCPU_EN_MASK);
++
++ /* stall UMC and register bus before resetting VCPU */
++ WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2),
++ UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
++ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
++ mdelay(1);
++
++ /* put LMI, VCPU, RBC etc... into reset */
++ WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
++ UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
++ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
++ UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
++ UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
++ UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
++ UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
++ UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
++ UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
++ mdelay(5);
++
++ /* initialize UVD memory controller */
++ WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL,
++ (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
++ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
++ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
++ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
++ UVD_LMI_CTRL__REQ_MODE_MASK |
++ 0x00100000L);
+
+ #ifdef __BIG_ENDIAN
+- /* swap (8 in 32) RB and IB */
+- lmi_swap_cntl = 0xa;
+- mp_swap_cntl = 0;
++ /* swap (8 in 32) RB and IB */
++ lmi_swap_cntl = 0xa;
++ mp_swap_cntl = 0;
+ #endif
+- WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
+- WREG32_SOC15(UVD, 0, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
+-
+- WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0, 0x40c2040);
+- WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA1, 0x0);
+- WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0, 0x40c2040);
+- WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB1, 0x0);
+- WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_ALU, 0);
+- WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX, 0x88);
+-
+- /* take all subblocks out of reset, except VCPU */
+- WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
+- UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+- mdelay(5);
++ WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
++ WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
+
+- /* enable VCPU clock */
+- WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL,
+- UVD_VCPU_CNTL__CLK_EN_MASK);
++ WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040);
++ WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0);
++ WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040);
++ WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0);
++ WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0);
++ WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88);
+
+- /* enable UMC */
+- WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
+- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
++ /* take all subblocks out of reset, except VCPU */
++ WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
++ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
++ mdelay(5);
+
+- /* boot up the VCPU */
+- WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, 0);
+- mdelay(10);
++ /* enable VCPU clock */
++ WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL,
++ UVD_VCPU_CNTL__CLK_EN_MASK);
+
+- for (i = 0; i < 10; ++i) {
+- uint32_t status;
++ /* enable UMC */
++ WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0,
++ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+- for (j = 0; j < 100; ++j) {
+- status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
++ /* boot up the VCPU */
++ WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0);
++ mdelay(10);
++
++ for (i = 0; i < 10; ++i) {
++ uint32_t status;
++
++ for (j = 0; j < 100; ++j) {
++ status = RREG32_SOC15(UVD, k, mmUVD_STATUS);
++ if (status & 2)
++ break;
++ mdelay(10);
++ }
++ r = 0;
+ if (status & 2)
+ break;
++
++ DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k);
++ WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET),
++ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
++ ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
++ mdelay(10);
++ WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0,
++ ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+ mdelay(10);
++ r = -1;
+ }
+- r = 0;
+- if (status & 2)
+- break;
+
+- DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
+- WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
+- UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
+- ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+- mdelay(10);
+- WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
+- ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+- mdelay(10);
+- r = -1;
+- }
+-
+- if (r) {
+- DRM_ERROR("UVD not responding, giving up!!!\n");
+- return r;
+- }
+- /* enable master interrupt */
+- WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
+- (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
+- ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
+-
+- /* clear the bit 4 of UVD_STATUS */
+- WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
+- ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+-
+- /* force RBC into idle state */
+- rb_bufsz = order_base_2(ring->ring_size);
+- tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
+- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+- WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
+-
+- /* set the write pointer delay */
+- WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
+-
+- /* set the wb address */
+- WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
+- (upper_32_bits(ring->gpu_addr) >> 2));
+-
+- /* programm the RB_BASE for ring buffer */
+- WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+- lower_32_bits(ring->gpu_addr));
+- WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+- upper_32_bits(ring->gpu_addr));
+-
+- /* Initialize the ring buffer's read and write pointers */
+- WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
+-
+- ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
+- WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
+- lower_32_bits(ring->wptr));
+-
+- WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
+- ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
+-
+- ring = &adev->uvd.inst->ring_enc[0];
+- WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+- WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+- WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
+- WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+- WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
++ if (r) {
++ DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k);
++ return r;
++ }
++ /* enable master interrupt */
++ WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN),
++ (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
++ ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
+
+- ring = &adev->uvd.inst->ring_enc[1];
+- WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+- WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+- WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+- WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+- WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
++ /* clear the bit 4 of UVD_STATUS */
++ WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0,
++ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+
++ /* force RBC into idle state */
++ rb_bufsz = order_base_2(ring->ring_size);
++ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
++ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
++ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
++ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
++ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
++ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
++ WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp);
++
++ /* set the write pointer delay */
++ WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0);
++
++ /* set the wb address */
++ WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
++ (upper_32_bits(ring->gpu_addr) >> 2));
++
++ /* programm the RB_BASE for ring buffer */
++ WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
++ lower_32_bits(ring->gpu_addr));
++ WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
++ upper_32_bits(ring->gpu_addr));
++
++ /* Initialize the ring buffer's read and write pointers */
++ WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0);
++
++ ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR);
++ WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR,
++ lower_32_bits(ring->wptr));
++
++ WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0,
++ ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
++
++ ring = &adev->uvd.inst[k].ring_enc[0];
++ WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
++ WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
++ WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr);
++ WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
++ WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4);
++
++ ring = &adev->uvd.inst[k].ring_enc[1];
++ WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
++ WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
++ WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr);
++ WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
++ WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4);
++ }
+ return 0;
+ }
+
+@@ -1054,26 +1077,30 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
+ */
+ static void uvd_v7_0_stop(struct amdgpu_device *adev)
+ {
+- /* force RBC into idle state */
+- WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, 0x11010101);
+-
+- /* Stall UMC and register bus before resetting VCPU */
+- WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
+- UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
+- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+- mdelay(1);
+-
+- /* put VCPU into reset */
+- WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
+- UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+- mdelay(5);
++ uint8_t i = 0;
++
++ for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
++ /* force RBC into idle state */
++ WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
+
+- /* disable VCPU clock */
+- WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, 0x0);
++ /* Stall UMC and register bus before resetting VCPU */
++ WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
++ UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
++ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
++ mdelay(1);
+
+- /* Unstall UMC and register bus */
+- WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
+- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
++ /* put VCPU into reset */
++ WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET,
++ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
++ mdelay(5);
++
++ /* disable VCPU clock */
++ WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0);
++
++ /* Unstall UMC and register bus */
++ WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
++ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
++ }
+ }
+
+ /**
+@@ -1092,26 +1119,26 @@ static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
+ WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
+
+ amdgpu_ring_write(ring,
+- PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
++ PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
+ amdgpu_ring_write(ring, seq);
+ amdgpu_ring_write(ring,
+- PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
++ PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
+ amdgpu_ring_write(ring, addr & 0xffffffff);
+ amdgpu_ring_write(ring,
+- PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
++ PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
+ amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
+ amdgpu_ring_write(ring,
+- PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
++ PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
+ amdgpu_ring_write(ring, 0);
+
+ amdgpu_ring_write(ring,
+- PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
++ PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring,
+- PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
++ PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring,
+- PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
++ PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
+ amdgpu_ring_write(ring, 2);
+ }
+
+@@ -1181,7 +1208,7 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
+ unsigned i;
+ int r;
+
+- WREG32_SOC15(UVD, 0, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
++ WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
+ r = amdgpu_ring_alloc(ring, 3);
+ if (r) {
+ DRM_ERROR("amdgpu: (%d)cp failed to lock ring %d (%d).\n",
+@@ -1189,11 +1216,11 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
+ return r;
+ }
+ amdgpu_ring_write(ring,
+- PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
++ PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
+ amdgpu_ring_write(ring, 0xDEADBEEF);
+ amdgpu_ring_commit(ring);
+ for (i = 0; i < adev->usec_timeout; i++) {
+- tmp = RREG32_SOC15(UVD, 0, mmUVD_CONTEXT_ID);
++ tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+@@ -1225,17 +1252,17 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_device *adev = ring->adev;
+
+ amdgpu_ring_write(ring,
+- PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
++ PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
+ amdgpu_ring_write(ring, vmid);
+
+ amdgpu_ring_write(ring,
+- PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
++ PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring,
+- PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
++ PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring,
+- PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0));
++ PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0));
+ amdgpu_ring_write(ring, ib->length_dw);
+ }
+
+@@ -1263,13 +1290,13 @@ static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
+ struct amdgpu_device *adev = ring->adev;
+
+ amdgpu_ring_write(ring,
+- PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
++ PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
+ amdgpu_ring_write(ring, reg << 2);
+ amdgpu_ring_write(ring,
+- PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
++ PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
+ amdgpu_ring_write(ring, val);
+ amdgpu_ring_write(ring,
+- PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
++ PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
+ amdgpu_ring_write(ring, 8);
+ }
+
+@@ -1279,16 +1306,16 @@ static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ struct amdgpu_device *adev = ring->adev;
+
+ amdgpu_ring_write(ring,
+- PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
++ PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
+ amdgpu_ring_write(ring, reg << 2);
+ amdgpu_ring_write(ring,
+- PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
++ PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
+ amdgpu_ring_write(ring, val);
+ amdgpu_ring_write(ring,
+- PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
++ PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0));
+ amdgpu_ring_write(ring, mask);
+ amdgpu_ring_write(ring,
+- PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
++ PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
+ amdgpu_ring_write(ring, 12);
+ }
+
+@@ -1313,7 +1340,7 @@ static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+ struct amdgpu_device *adev = ring->adev;
+
+ for (i = 0; i < count; i++)
+- amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
++ amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
+
+ }
+
+@@ -1381,16 +1408,16 @@ static bool uvd_v7_0_check_soft_reset(void *handle)
+
+ if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
+ REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
+- (RREG32_SOC15(UVD, 0, mmUVD_STATUS) &
++ (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
+ AMDGPU_UVD_STATUS_BUSY_MASK))
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
+ SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
+
+ if (srbm_soft_reset) {
+- adev->uvd.inst->srbm_soft_reset = srbm_soft_reset;
++ adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
+ return true;
+ } else {
+- adev->uvd.inst->srbm_soft_reset = 0;
++ adev->uvd.inst[ring->me].srbm_soft_reset = 0;
+ return false;
+ }
+ }
+@@ -1399,7 +1426,7 @@ static int uvd_v7_0_pre_soft_reset(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- if (!adev->uvd.inst->srbm_soft_reset)
++ if (!adev->uvd.inst[ring->me].srbm_soft_reset)
+ return 0;
+
+ uvd_v7_0_stop(adev);
+@@ -1411,9 +1438,9 @@ static int uvd_v7_0_soft_reset(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset;
+
+- if (!adev->uvd.inst->srbm_soft_reset)
++ if (!adev->uvd.inst[ring->me].srbm_soft_reset)
+ return 0;
+- srbm_soft_reset = adev->uvd.inst->srbm_soft_reset;
++ srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
+
+ if (srbm_soft_reset) {
+ u32 tmp;
+@@ -1441,7 +1468,7 @@ static int uvd_v7_0_post_soft_reset(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- if (!adev->uvd.inst->srbm_soft_reset)
++ if (!adev->uvd.inst[ring->me].srbm_soft_reset)
+ return 0;
+
+ mdelay(5);
+@@ -1463,17 +1490,29 @@ static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+ {
++ uint32_t ip_instance;
++
++ switch (entry->client_id) {
++ case SOC15_IH_CLIENTID_UVD:
++ ip_instance = 0;
++ break;
++ default:
++ DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
++ return 0;
++ }
++
+ DRM_DEBUG("IH: UVD TRAP\n");
++
+ switch (entry->src_id) {
+ case 124:
+- amdgpu_fence_process(&adev->uvd.inst->ring);
++ amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring);
+ break;
+ case 119:
+- amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]);
++ amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]);
+ break;
+ case 120:
+ if (!amdgpu_sriov_vf(adev))
+- amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]);
++ amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]);
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n",
+@@ -1489,9 +1528,9 @@ static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
+ {
+ uint32_t data, data1, data2, suvd_flags;
+
+- data = RREG32_SOC15(UVD, 0, mmUVD_CGC_CTRL);
+- data1 = RREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE);
+- data2 = RREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_CTRL);
++ data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
++ data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
++ data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
+
+ data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
+ UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
+@@ -1535,18 +1574,18 @@ static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
+ UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
+ data1 |= suvd_flags;
+
+- WREG32_SOC15(UVD, 0, mmUVD_CGC_CTRL, data);
+- WREG32_SOC15(UVD, 0, mmUVD_CGC_GATE, 0);
+- WREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE, data1);
+- WREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_CTRL, data2);
++ WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
++ WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
++ WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
++ WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
+ }
+
+ static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
+ {
+ uint32_t data, data1, cgc_flags, suvd_flags;
+
+- data = RREG32_SOC15(UVD, 0, mmUVD_CGC_GATE);
+- data1 = RREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE);
++ data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
++ data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
+
+ cgc_flags = UVD_CGC_GATE__SYS_MASK |
+ UVD_CGC_GATE__UDEC_MASK |
+@@ -1578,8 +1617,8 @@ static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
+ data |= cgc_flags;
+ data1 |= suvd_flags;
+
+- WREG32_SOC15(UVD, 0, mmUVD_CGC_GATE, data);
+- WREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE, data1);
++ WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
++ WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
+ }
+
+ static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+@@ -1638,7 +1677,7 @@ static int uvd_v7_0_set_powergating_state(void *handle,
+ if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
+ return 0;
+
+- WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
++ WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
+
+ if (state == AMD_PG_STATE_GATE) {
+ uvd_v7_0_stop(adev);
+@@ -1742,18 +1781,27 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
+
+ static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
+ {
+- adev->uvd.inst->ring.funcs = &uvd_v7_0_ring_vm_funcs;
+- DRM_INFO("UVD is enabled in VM mode\n");
++ int i;
++
++ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
++ adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
++ adev->uvd.inst[i].ring.me = i;
++ DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
++ }
+ }
+
+ static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
+ {
+- int i;
++ int i, j;
+
+- for (i = 0; i < adev->uvd.num_enc_rings; ++i)
+- adev->uvd.inst->ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
++ for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
++ for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
++ adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
++ adev->uvd.inst[j].ring_enc[i].me = j;
++ }
+
+- DRM_INFO("UVD ENC is enabled in VM mode\n");
++ DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j);
++ }
+ }
+
+ static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
+@@ -1763,8 +1811,12 @@ static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
+
+ static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
+ {
+- adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1;
+- adev->uvd.inst->irq.funcs = &uvd_v7_0_irq_funcs;
++ int i;
++
++ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
++ adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
++ adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
++ }
+ }
+
+ const struct amdgpu_ip_block_version uvd_v7_0_ip_block =
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4431-drm-amdgpu-vg20-Restruct-uvd.idle_work-to-support-mu.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4431-drm-amdgpu-vg20-Restruct-uvd.idle_work-to-support-mu.patch
new file mode 100644
index 00000000..c42be53d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4431-drm-amdgpu-vg20-Restruct-uvd.idle_work-to-support-mu.patch
@@ -0,0 +1,124 @@
+From 324b2b32fef0f7032c728cfe6709e978220e2d2e Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Wed, 9 Jan 2019 19:31:08 +0530
+Subject: [PATCH 4431/5725] drm/amdgpu/vg20:Restruct uvd.idle_work to support
+ multiple instance (v2)
+
+Vega20 dual-UVD Hardware need two idle_works, restruct to support
+multiple instance.
+
+v2: squash in indentation fix
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 17 +++++++++--------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | 7 ++++++-
+ 2 files changed, 15 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index c9ed917..3c040f3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -129,8 +129,6 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ unsigned version_major, version_minor, family_id;
+ int i, j, r;
+
+- INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler);
+-
+ switch (adev->asic_type) {
+ #ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_BONAIRE:
+@@ -237,6 +235,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
+
+ for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
++ adev->uvd.inst[j].delayed_work.ip_instance = j;
++ INIT_DELAYED_WORK(&adev->uvd.inst[j].delayed_work.idle_work, amdgpu_uvd_idle_work_handler);
+
+ r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
+@@ -317,7 +317,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
+ if (adev->uvd.inst[j].vcpu_bo == NULL)
+ continue;
+
+- cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work);
++ cancel_delayed_work_sync(&adev->uvd.inst[j].delayed_work.idle_work);
+
+ /* only valid for physical mode */
+ if (adev->asic_type < CHIP_POLARIS10) {
+@@ -1144,9 +1144,10 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+
+ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
+ {
++ struct amdgpu_delayed_work *my_work = (struct amdgpu_delayed_work *)work;
+ struct amdgpu_device *adev =
+- container_of(work, struct amdgpu_device, uvd.inst->idle_work.work);
+- unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.inst->ring);
++ container_of(work, struct amdgpu_device, uvd.inst[my_work->ip_instance].delayed_work.idle_work.work);
++ unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.inst[my_work->ip_instance].ring);
+
+ if (fences == 0) {
+ if (adev->pm.dpm_enabled) {
+@@ -1160,7 +1161,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
+ AMD_CG_STATE_GATE);
+ }
+ } else {
+- schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
++ schedule_delayed_work(&adev->uvd.inst[my_work->ip_instance].delayed_work.idle_work, UVD_IDLE_TIMEOUT);
+ }
+ }
+
+@@ -1172,7 +1173,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+- set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work);
++ set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst[ring->me].delayed_work.idle_work);
+ if (set_clocks) {
+ if (adev->pm.dpm_enabled) {
+ amdgpu_dpm_enable_uvd(adev, true);
+@@ -1189,7 +1190,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
+ void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
+ {
+ if (!amdgpu_sriov_vf(ring->adev))
+- schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
++ schedule_delayed_work(&ring->adev->uvd.inst[ring->me].delayed_work.idle_work, UVD_IDLE_TIMEOUT);
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+index b1579fb..7801eb8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+@@ -37,6 +37,11 @@
+ (AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(((const struct common_firmware_header *)(adev)->uvd.fw->data)->ucode_size_bytes) + \
+ 8) - AMDGPU_UVD_FIRMWARE_OFFSET)
+
++struct amdgpu_delayed_work{
++ struct delayed_work idle_work;
++ unsigned ip_instance;
++};
++
+ struct amdgpu_uvd_inst {
+ struct amdgpu_bo *vcpu_bo;
+ void *cpu_addr;
+@@ -44,12 +49,12 @@ struct amdgpu_uvd_inst {
+ void *saved_bo;
+ atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
+ struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
+- struct delayed_work idle_work;
+ struct amdgpu_ring ring;
+ struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
+ struct amdgpu_irq_src irq;
+ struct drm_sched_entity entity;
+ struct drm_sched_entity entity_enc;
++ struct amdgpu_delayed_work delayed_work;
+ uint32_t srbm_soft_reset;
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4432-drm-amdgpu-vg20-increase-3-rings-for-AMDGPU_MAX_RING.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4432-drm-amdgpu-vg20-increase-3-rings-for-AMDGPU_MAX_RING.patch
new file mode 100644
index 00000000..54a8e012
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4432-drm-amdgpu-vg20-increase-3-rings-for-AMDGPU_MAX_RING.patch
@@ -0,0 +1,33 @@
+From 7faf936d871beae746523b381734780ec7051f30 Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Mon, 23 Apr 2018 19:11:46 -0400
+Subject: [PATCH 4432/5725] drm/amdgpu/vg20:increase 3 rings for
+ AMDGPU_MAX_RINGS
+
+For Vega20, there are two UVD Hardware. One more UVD hardware
+adds one decode ring and two encode rings. So AMDGPU_MAX_RINGS
+need increase by 3.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+index 79ca5b7..dce4b82 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+@@ -28,7 +28,7 @@
+ #include <drm/gpu_scheduler.h>
+
+ /* max number of rings */
+-#define AMDGPU_MAX_RINGS 18
++#define AMDGPU_MAX_RINGS 21
+ #define AMDGPU_MAX_GFX_RINGS 1
+ #define AMDGPU_MAX_COMPUTE_RINGS 8
+ #define AMDGPU_MAX_VCE_RINGS 3
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4433-drm-amdgpu-vg20-Enable-the-2nd-instance-for-uvd.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4433-drm-amdgpu-vg20-Enable-the-2nd-instance-for-uvd.patch
new file mode 100644
index 00000000..6f14d5ce
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4433-drm-amdgpu-vg20-Enable-the-2nd-instance-for-uvd.patch
@@ -0,0 +1,104 @@
+From bd96fc812b85409c88884902eb5845dee2e49bee Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Fri, 11 May 2018 13:56:44 -0500
+Subject: [PATCH 4433/5725] drm/amdgpu/vg20:Enable the 2nd instance for uvd
+
+For Vega20, set num of uvd instance to 2, to enble 2nd instance.
+The IB test build-in registers need update for vega20 2nd instance.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 32 +++++++++++++++++---------------
+ drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 7 ++++++-
+ 2 files changed, 23 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index 3c040f3..c973b10 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -72,11 +72,12 @@
+ #define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin"
+ #define FIRMWARE_VEGA20 "amdgpu/vega20_uvd.bin"
+
+-#define mmUVD_GPCOM_VCPU_DATA0_VEGA10 (0x03c4 + 0x7e00)
+-#define mmUVD_GPCOM_VCPU_DATA1_VEGA10 (0x03c5 + 0x7e00)
+-#define mmUVD_GPCOM_VCPU_CMD_VEGA10 (0x03c3 + 0x7e00)
+-#define mmUVD_NO_OP_VEGA10 (0x03ff + 0x7e00)
+-#define mmUVD_ENGINE_CNTL_VEGA10 (0x03c6 + 0x7e00)
++/* These are common relative offsets for all asics, from uvd_7_0_offset.h, */
++#define UVD_GPCOM_VCPU_CMD 0x03c3
++#define UVD_GPCOM_VCPU_DATA0 0x03c4
++#define UVD_GPCOM_VCPU_DATA1 0x03c5
++#define UVD_NO_OP 0x03ff
++#define UVD_BASE_SI 0x3800
+
+ /**
+ * amdgpu_uvd_cs_ctx - Command submission parser context
+@@ -990,7 +991,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
+ uint64_t addr;
+ long r;
+ int i;
+-
++ unsigned offset_idx = 0;
++ unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
++
+ amdgpu_bo_kunmap(bo);
+ amdgpu_bo_unpin(bo);
+
+@@ -1009,17 +1012,16 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
+ goto err;
+
+ if (adev->asic_type >= CHIP_VEGA10) {
+- data[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0_VEGA10, 0);
+- data[1] = PACKET0(mmUVD_GPCOM_VCPU_DATA1_VEGA10, 0);
+- data[2] = PACKET0(mmUVD_GPCOM_VCPU_CMD_VEGA10, 0);
+- data[3] = PACKET0(mmUVD_NO_OP_VEGA10, 0);
+- } else {
+- data[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
+- data[1] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
+- data[2] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
+- data[3] = PACKET0(mmUVD_NO_OP, 0);
++ offset_idx = 1 + ring->me;
++ offset[1] = adev->reg_offset[UVD_HWIP][0][1];
++ offset[2] = adev->reg_offset[UVD_HWIP][1][1];
+ }
+
++ data[0] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA0, 0);
++ data[1] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA1, 0);
++ data[2] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_CMD, 0);
++ data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
++
+ ib = &job->ibs[0];
+ addr = amdgpu_bo_gpu_offset(bo);
+ ib->ptr[0] = data[0];
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+index 38816227..bc4db67 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+@@ -40,6 +40,8 @@
+ #include "mmhub/mmhub_1_0_offset.h"
+ #include "mmhub/mmhub_1_0_sh_mask.h"
+
++#define UVD7_MAX_HW_INSTANCES_VEGA20 2
++
+ static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
+ static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
+ static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
+@@ -370,7 +372,10 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ static int uvd_v7_0_early_init(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+- adev->uvd.num_uvd_inst = 1;
++ if (adev->asic_type == CHIP_VEGA20)
++ adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
++ else
++ adev->uvd.num_uvd_inst = 1;
+
+ if (amdgpu_sriov_vf(adev))
+ adev->uvd.num_enc_rings = 1;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4434-drm-amdgpu-vg20-Add-IH-client-ID-for-the-2nd-UVD.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4434-drm-amdgpu-vg20-Add-IH-client-ID-for-the-2nd-UVD.patch
new file mode 100644
index 00000000..ceb9521a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4434-drm-amdgpu-vg20-Add-IH-client-ID-for-the-2nd-UVD.patch
@@ -0,0 +1,30 @@
+From e446c5faec01e9a2be675a5629a2da1e3297f1e1 Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Mon, 23 Apr 2018 20:49:28 -0400
+Subject: [PATCH 4434/5725] drm/amdgpu/vg20:Add IH client ID for the 2nd UVD
+
+For Vega20, there are two UVD hardware. Need add
+the 2nd IH client ID for the 2nd UVD Hardware.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/include/soc15_ih_clientid.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/include/soc15_ih_clientid.h b/drivers/gpu/drm/amd/include/soc15_ih_clientid.h
+index a12d4f2..12e196c 100644
+--- a/drivers/gpu/drm/amd/include/soc15_ih_clientid.h
++++ b/drivers/gpu/drm/amd/include/soc15_ih_clientid.h
+@@ -43,6 +43,7 @@ enum soc15_ih_clientid {
+ SOC15_IH_CLIENTID_SE2SH = 0x0c,
+ SOC15_IH_CLIENTID_SE3SH = 0x0d,
+ SOC15_IH_CLIENTID_SYSHUB = 0x0e,
++ SOC15_IH_CLIENTID_UVD1 = 0x0e,
+ SOC15_IH_CLIENTID_THM = 0x0f,
+ SOC15_IH_CLIENTID_UVD = 0x10,
+ SOC15_IH_CLIENTID_VCE0 = 0x11,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4435-drm-amdgpu-vg20-Enable-the-2nd-instance-IRQ-for-uvd-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4435-drm-amdgpu-vg20-Enable-the-2nd-instance-IRQ-for-uvd-.patch
new file mode 100644
index 00000000..5c7d9f11
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4435-drm-amdgpu-vg20-Enable-the-2nd-instance-IRQ-for-uvd-.patch
@@ -0,0 +1,61 @@
+From ecb8fd50b8366505079e3bc405b3f21fd01cdd89 Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Mon, 23 Apr 2018 20:56:01 -0400
+Subject: [PATCH 4435/5725] drm/amdgpu/vg20:Enable the 2nd instance IRQ for uvd
+ 7.2
+
+For Vega20, the 2nd instance uvd IRQ using different client id.
+Enable the 2nd instance IRQ for uvd 7.2
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+index bc4db67..47a6af5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+@@ -49,6 +49,11 @@ static int uvd_v7_0_start(struct amdgpu_device *adev);
+ static void uvd_v7_0_stop(struct amdgpu_device *adev);
+ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
+
++static int amdgpu_ih_clientid_uvds[] = {
++ SOC15_IH_CLIENTID_UVD,
++ SOC15_IH_CLIENTID_UVD1
++};
++
+ /**
+ * uvd_v7_0_ring_get_rptr - get read pointer
+ *
+@@ -397,13 +402,13 @@ static int uvd_v7_0_sw_init(void *handle)
+
+ for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+ /* UVD TRAP */
+- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.inst[j].irq);
++ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], 124, &adev->uvd.inst[j].irq);
+ if (r)
+ return r;
+
+ /* UVD ENC TRAP */
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.inst[j].irq);
++ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + 119, &adev->uvd.inst[j].irq);
+ if (r)
+ return r;
+ }
+@@ -1501,6 +1506,9 @@ static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
+ case SOC15_IH_CLIENTID_UVD:
+ ip_instance = 0;
+ break;
++ case SOC15_IH_CLIENTID_UVD1:
++ ip_instance = 1;
++ break;
+ default:
+ DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
+ return 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4436-drm-amdgpu-vg20-Enable-2nd-instance-queue-maping-for.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4436-drm-amdgpu-vg20-Enable-2nd-instance-queue-maping-for.patch
new file mode 100644
index 00000000..a932314e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4436-drm-amdgpu-vg20-Enable-2nd-instance-queue-maping-for.patch
@@ -0,0 +1,69 @@
+From 53c04cd97b0d585d6d72d8f9d6ffd7ae41d19be0 Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Mon, 23 Apr 2018 21:00:58 -0400
+Subject: [PATCH 4436/5725] drm/amdgpu/vg20:Enable 2nd instance queue maping
+ for uvd 7.2
+
+Enable 2nd instance uvd queue maping for uvd 7.2. For user, only one UVD
+instance presents. there is two rings for uvd decode, and
+4 rings for uvd encode.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+index 2458d38..8af16e8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+@@ -66,6 +66,8 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
+ u32 ring,
+ struct amdgpu_ring **out_ring)
+ {
++ u32 instance;
++
+ switch (mapper->hw_ip) {
+ case AMDGPU_HW_IP_GFX:
+ *out_ring = &adev->gfx.gfx_ring[ring];
+@@ -77,13 +79,16 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
+ *out_ring = &adev->sdma.instance[ring].ring;
+ break;
+ case AMDGPU_HW_IP_UVD:
+- *out_ring = &adev->uvd.inst->ring;
++ instance = ring;
++ *out_ring = &adev->uvd.inst[instance].ring;
+ break;
+ case AMDGPU_HW_IP_VCE:
+ *out_ring = &adev->vce.ring[ring];
+ break;
+ case AMDGPU_HW_IP_UVD_ENC:
+- *out_ring = &adev->uvd.inst->ring_enc[ring];
++ instance = ring / adev->uvd.num_enc_rings;
++ *out_ring =
++ &adev->uvd.inst[instance].ring_enc[ring%adev->uvd.num_enc_rings];
+ break;
+ case AMDGPU_HW_IP_VCN_DEC:
+ *out_ring = &adev->vcn.ring_dec;
+@@ -240,13 +245,14 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
+ ip_num_rings = adev->sdma.num_instances;
+ break;
+ case AMDGPU_HW_IP_UVD:
+- ip_num_rings = 1;
++ ip_num_rings = adev->uvd.num_uvd_inst;
+ break;
+ case AMDGPU_HW_IP_VCE:
+ ip_num_rings = adev->vce.num_rings;
+ break;
+ case AMDGPU_HW_IP_UVD_ENC:
+- ip_num_rings = adev->uvd.num_enc_rings;
++ ip_num_rings =
++ adev->uvd.num_enc_rings * adev->uvd.num_uvd_inst;
+ break;
+ case AMDGPU_HW_IP_VCN_DEC:
+ ip_num_rings = 1;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4437-drm-amdgpu-vg20-Enable-UVD-VCE-for-Vega20.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4437-drm-amdgpu-vg20-Enable-UVD-VCE-for-Vega20.patch
new file mode 100644
index 00000000..04938d5d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4437-drm-amdgpu-vg20-Enable-UVD-VCE-for-Vega20.patch
@@ -0,0 +1,36 @@
+From d602c3578bc76ff5b7280c5216db866436ce72e5 Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Mon, 30 Apr 2018 08:43:12 -0400
+Subject: [PATCH 4437/5725] drm/amdgpu/vg20:Enable UVD/VCE for Vega20
+
+Vega20 ucode load type is set to AMDGPU_FW_LOAD_DIRECT for default.
+So UVD/VCE needn't PSP IP block up. UVD/VCE for Vega20 can be enabled
+at this moment.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Reviewed-by: Leo Liu <leo.liu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 8d0d054..7935484 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -529,10 +529,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+ #endif
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+- if (adev->asic_type != CHIP_VEGA20) {
+- amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
+- amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
+- }
++ amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
++ amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
+ break;
+ case CHIP_RAVEN:
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4438-drm-amdgpu-add-df-3.6-headers.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4438-drm-amdgpu-add-df-3.6-headers.patch
new file mode 100644
index 00000000..41e9af4e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4438-drm-amdgpu-add-df-3.6-headers.patch
@@ -0,0 +1,145 @@
+From 718426b3a2a9b75466cf8c6242f1e730e32f9622 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Mon, 14 May 2018 11:50:46 -0500
+Subject: [PATCH 4438/5725] drm/amdgpu: add df 3.6 headers
+
+Needed for vega20.
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/include/asic_reg/df/df_3_6_default.h | 26 ++++++++++++
+ .../drm/amd/include/asic_reg/df/df_3_6_offset.h | 33 +++++++++++++++
+ .../drm/amd/include/asic_reg/df/df_3_6_sh_mask.h | 48 ++++++++++++++++++++++
+ 3 files changed, 107 insertions(+)
+ create mode 100644 drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_default.h
+ create mode 100644 drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
+ create mode 100644 drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
+
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_default.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_default.h
+new file mode 100644
+index 0000000..e58c207
+--- /dev/null
++++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_default.h
+@@ -0,0 +1,26 @@
++/*
++ * Copyright (C) 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
++ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++#ifndef _df_3_6_DEFAULT_HEADER
++#define _df_3_6_DEFAULT_HEADER
++
++#define mmFabricConfigAccessControl_DEFAULT 0x00000000
++
++#endif
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
+new file mode 100644
+index 0000000..a9575db
+--- /dev/null
++++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
+@@ -0,0 +1,33 @@
++/*
++ * Copyright (C) 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
++ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++#ifndef _df_3_6_OFFSET_HEADER
++#define _df_3_6_OFFSET_HEADER
++
++#define mmFabricConfigAccessControl 0x0410
++#define mmFabricConfigAccessControl_BASE_IDX 0
++
++#define mmDF_PIE_AON0_DfGlobalClkGater 0x00fc
++#define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX 0
++
++#define mmDF_CS_UMC_AON0_DramBaseAddress0 0x0044
++#define mmDF_CS_UMC_AON0_DramBaseAddress0_BASE_IDX 0
++
++#endif
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
+new file mode 100644
+index 0000000..88f7c69
+--- /dev/null
++++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
+@@ -0,0 +1,48 @@
++/*
++ * Copyright (C) 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
++ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++#ifndef _df_3_6_SH_MASK_HEADER
++#define _df_3_6_SH_MASK_HEADER
++
++/* FabricConfigAccessControl */
++#define FabricConfigAccessControl__CfgRegInstAccEn__SHIFT 0x0
++#define FabricConfigAccessControl__CfgRegInstAccRegLock__SHIFT 0x1
++#define FabricConfigAccessControl__CfgRegInstID__SHIFT 0x10
++#define FabricConfigAccessControl__CfgRegInstAccEn_MASK 0x00000001L
++#define FabricConfigAccessControl__CfgRegInstAccRegLock_MASK 0x00000002L
++#define FabricConfigAccessControl__CfgRegInstID_MASK 0x00FF0000L
++
++/* DF_PIE_AON0_DfGlobalClkGater */
++#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode__SHIFT 0x0
++#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK 0x0000000FL
++
++/* DF_CS_AON0_DramBaseAddress0 */
++#define DF_CS_UMC_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
++#define DF_CS_UMC_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
++#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
++#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
++#define DF_CS_UMC_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
++#define DF_CS_UMC_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
++#define DF_CS_UMC_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
++#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
++#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
++#define DF_CS_UMC_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
++
++#endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4439-drm-amdgpu-df-implement-df-v3_6-callback-functions-v.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4439-drm-amdgpu-df-implement-df-v3_6-callback-functions-v.patch
new file mode 100644
index 00000000..5e894d03
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4439-drm-amdgpu-df-implement-df-v3_6-callback-functions-v.patch
@@ -0,0 +1,207 @@
+From 09a0d883ca7a3d39b7aebfa67ba8ff0a59f9b291 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Wed, 4 Apr 2018 14:30:28 +0800
+Subject: [PATCH 4439/5725] drm/amdgpu/df: implement df v3_6 callback functions
+ (v2)
+
+New df helpers for 3.6.
+
+v2: switch to using df 3.6 headers (Alex)
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/Makefile | 3 +-
+ drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 116 +++++++++++++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/df_v3_6.h | 40 ++++++++++++
+ 3 files changed, 158 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+ create mode 100644 drivers/gpu/drm/amd/amdgpu/df_v3_6.h
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
+index 53b246a..b16581d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/Makefile
++++ b/drivers/gpu/drm/amd/amdgpu/Makefile
+@@ -46,7 +46,8 @@ amdgpu-y += \
+
+ # add DF block
+ amdgpu-y += \
+- df_v1_7.o
++ df_v1_7.o \
++ df_v3_6.o
+
+ # add GMC block
+ amdgpu-y += \
+diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+new file mode 100644
+index 0000000..60608b3
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+@@ -0,0 +1,116 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++#include "amdgpu.h"
++#include "df_v3_6.h"
++
++#include "df/df_3_6_default.h"
++#include "df/df_3_6_offset.h"
++#include "df/df_3_6_sh_mask.h"
++
++static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0,
++ 16, 32, 0, 0, 0, 2, 4, 8};
++
++static void df_v3_6_init(struct amdgpu_device *adev)
++{
++}
++
++static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev,
++ bool enable)
++{
++ u32 tmp;
++
++ if (enable) {
++ tmp = RREG32_SOC15(DF, 0, mmFabricConfigAccessControl);
++ tmp &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK;
++ WREG32_SOC15(DF, 0, mmFabricConfigAccessControl, tmp);
++ } else
++ WREG32_SOC15(DF, 0, mmFabricConfigAccessControl,
++ mmFabricConfigAccessControl_DEFAULT);
++}
++
++static u32 df_v3_6_get_fb_channel_number(struct amdgpu_device *adev)
++{
++ u32 tmp;
++
++ tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DramBaseAddress0);
++ tmp &= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK;
++ tmp >>= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
++
++ return tmp;
++}
++
++static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev)
++{
++ int fb_channel_number;
++
++ fb_channel_number = adev->df_funcs->get_fb_channel_number(adev);
++ if (fb_channel_number > ARRAY_SIZE(df_v3_6_channel_number))
++ fb_channel_number = 0;
++
++ return df_v3_6_channel_number[fb_channel_number];
++}
++
++static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev,
++ bool enable)
++{
++ u32 tmp;
++
++ /* Put DF on broadcast mode */
++ adev->df_funcs->enable_broadcast_mode(adev, true);
++
++ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
++ tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
++ tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
++ tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY;
++ WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
++ } else {
++ tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
++ tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
++ tmp |= DF_V3_6_MGCG_DISABLE;
++ WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
++ }
++
++ /* Exit broadcast mode */
++ adev->df_funcs->enable_broadcast_mode(adev, false);
++}
++
++static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev,
++ u32 *flags)
++{
++ u32 tmp;
++
++ /* AMD_CG_SUPPORT_DF_MGCG */
++ tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
++ if (tmp & DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY)
++ *flags |= AMD_CG_SUPPORT_DF_MGCG;
++}
++
++const struct amdgpu_df_funcs df_v3_6_funcs = {
++ .init = df_v3_6_init,
++ .enable_broadcast_mode = df_v3_6_enable_broadcast_mode,
++ .get_fb_channel_number = df_v3_6_get_fb_channel_number,
++ .get_hbm_channel_number = df_v3_6_get_hbm_channel_number,
++ .update_medium_grain_clock_gating =
++ df_v3_6_update_medium_grain_clock_gating,
++ .get_clockgating_state = df_v3_6_get_clockgating_state,
++};
+diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.h b/drivers/gpu/drm/amd/amdgpu/df_v3_6.h
+new file mode 100644
+index 0000000..e79c58e
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.h
+@@ -0,0 +1,40 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __DF_V3_6_H__
++#define __DF_V3_6_H__
++
++#include "soc15_common.h"
++
++enum DF_V3_6_MGCG {
++ DF_V3_6_MGCG_DISABLE = 0,
++ DF_V3_6_MGCG_ENABLE_00_CYCLE_DELAY = 1,
++ DF_V3_6_MGCG_ENABLE_01_CYCLE_DELAY = 2,
++ DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY = 13,
++ DF_V3_6_MGCG_ENABLE_31_CYCLE_DELAY = 14,
++ DF_V3_6_MGCG_ENABLE_63_CYCLE_DELAY = 15
++};
++
++extern const struct amdgpu_df_funcs df_v3_6_funcs;
++
++#endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4440-drm-amdgpu-Switch-to-use-df_v3_6_funcs-for-vega20-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4440-drm-amdgpu-Switch-to-use-df_v3_6_funcs-for-vega20-v2.patch
new file mode 100644
index 00000000..ab9bda7a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4440-drm-amdgpu-Switch-to-use-df_v3_6_funcs-for-vega20-v2.patch
@@ -0,0 +1,43 @@
+From a552f7119fbcc156caf6c9b0ba90e34f60824ea6 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Wed, 4 Apr 2018 14:32:10 +0800
+Subject: [PATCH 4440/5725] drm/amdgpu: Switch to use df_v3_6_funcs for vega20
+ (v2)
+
+v2: fix whitespace (Alex)
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 7935484..8ccbcf9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -51,6 +51,7 @@
+ #include "gfxhub_v1_0.h"
+ #include "mmhub_v1_0.h"
+ #include "df_v1_7.h"
++#include "df_v3_6.h"
+ #include "vega10_ih.h"
+ #include "sdma_v4_0.h"
+ #include "uvd_v7_0.h"
+@@ -501,7 +502,10 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+ else
+ adev->nbio_funcs = &nbio_v6_1_funcs;
+
+- adev->df_funcs = &df_v1_7_funcs;
++ if (adev->asic_type == CHIP_VEGA20)
++ adev->df_funcs = &df_v3_6_funcs;
++ else
++ adev->df_funcs = &df_v1_7_funcs;
+ adev->nbio_funcs->detect_hw_virt(adev);
+
+ if (amdgpu_sriov_vf(adev))
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4441-drm-amdgpu-Add-vega20-pci-ids.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4441-drm-amdgpu-Add-vega20-pci-ids.patch
new file mode 100644
index 00000000..7bc7e26c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4441-drm-amdgpu-Add-vega20-pci-ids.patch
@@ -0,0 +1,34 @@
+From 14fbf997a81cc2646c9c85491cb66a4a519fee28 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Mon, 22 Jan 2018 19:08:33 +0800
+Subject: [PATCH 4441/5725] drm/amdgpu: Add vega20 pci ids
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 92a8967..1b19061 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -564,6 +564,13 @@ static const struct pci_device_id pciidlist[] = {
+ {0x1002, 0x69A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
+ {0x1002, 0x69A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
+ {0x1002, 0x69AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
++ /* Vega 20 */
++ {0x1002, 0x66A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
++ {0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
++ {0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
++ {0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
++ {0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
++ {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+ /* Raven */
+ {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4442-drm-amdgpu-flag-Vega20-as-experimental.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4442-drm-amdgpu-flag-Vega20-as-experimental.patch
new file mode 100644
index 00000000..755e3838
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4442-drm-amdgpu-flag-Vega20-as-experimental.patch
@@ -0,0 +1,39 @@
+From 32db2da5a018a9a19a00b28f527368728e2e6733 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Mon, 14 May 2018 11:28:04 -0500
+Subject: [PATCH 4442/5725] drm/amdgpu: flag Vega20 as experimental
+
+Must set amdgpu.exp_hw_support=1 on the kernel command line in
+grub to enable support.
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 1b19061..8222902 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -565,12 +565,12 @@ static const struct pci_device_id pciidlist[] = {
+ {0x1002, 0x69A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
+ {0x1002, 0x69AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
+ /* Vega 20 */
+- {0x1002, 0x66A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+- {0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+- {0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+- {0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+- {0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+- {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
++ {0x1002, 0x66A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
++ {0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
++ {0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
++ {0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
++ {0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
++ {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
+ /* Raven */
+ {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4443-drm-amdgpu-gem-remove-unused-variable.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4443-drm-amdgpu-gem-remove-unused-variable.patch
new file mode 100644
index 00000000..5a5b9d45
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4443-drm-amdgpu-gem-remove-unused-variable.patch
@@ -0,0 +1,27 @@
+From 2815c7fa7f71eddd2ec98a4b50cccaea32147b17 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 15 May 2018 14:09:21 -0500
+Subject: [PATCH 4443/5725] drm/amdgpu/gem: remove unused variable
+
+Trivial.
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+index b432b21..6c41cf9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+@@ -942,7 +942,6 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
+ unsigned domain;
+ const char *placement;
+ unsigned pin_count;
+- uint64_t offset;
+
+ domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+ switch (domain) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4444-drm-amdgpu-Skip-drm_sched_entity-related-ops-for-KIQ.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4444-drm-amdgpu-Skip-drm_sched_entity-related-ops-for-KIQ.patch
new file mode 100644
index 00000000..466b30aa
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4444-drm-amdgpu-Skip-drm_sched_entity-related-ops-for-KIQ.patch
@@ -0,0 +1,76 @@
+From 88c448421d08af06b2a6c38a6e41b7f01f6ed55b Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Tue, 15 May 2018 14:12:21 -0400
+Subject: [PATCH 4444/5725] drm/amdgpu: Skip drm_sched_entity related ops for
+ KIQ ring.
+
+Following change 75fbed2 we never initialize or use the GPU
+scheduler for KIQ and hence we need to skip KIQ ring when iterating
+amdgpu_ctx's scheduler entites.
+
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 20 +++++++++++++++++---
+ 1 file changed, 17 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+index eb82bbf..51fb09f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+@@ -190,9 +190,13 @@ static void amdgpu_ctx_do_release(struct kref *ref)
+
+ ctx = container_of(ref, struct amdgpu_ctx, refcount);
+
+- for (i = 0; i < ctx->adev->num_rings; i++)
++ for (i = 0; i < ctx->adev->num_rings; i++) {
++ if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
++ continue;
++
+ drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
+ &ctx->rings[i].entity);
++ }
+
+ amdgpu_ctx_fini(ref);
+ }
+@@ -469,12 +473,17 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
+ if (!ctx->adev)
+ return;
+
+- for (i = 0; i < ctx->adev->num_rings; i++)
++ for (i = 0; i < ctx->adev->num_rings; i++) {
++
++ if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
++ continue;
++
+ if (kref_read(&ctx->refcount) == 1)
+ drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
+ &ctx->rings[i].entity);
+ else
+ DRM_ERROR("ctx %p is still alive\n", ctx);
++ }
+ }
+ }
+
+@@ -491,12 +500,17 @@ void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
+ if (!ctx->adev)
+ return;
+
+- for (i = 0; i < ctx->adev->num_rings; i++)
++ for (i = 0; i < ctx->adev->num_rings; i++) {
++
++ if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
++ continue;
++
+ if (kref_read(&ctx->refcount) == 1)
+ drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched,
+ &ctx->rings[i].entity);
+ else
+ DRM_ERROR("ctx %p is still alive\n", ctx);
++ }
+ }
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4445-drm-scheduler-remove-unused-parameter.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4445-drm-scheduler-remove-unused-parameter.patch
new file mode 100644
index 00000000..d5ac0d1e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4445-drm-scheduler-remove-unused-parameter.patch
@@ -0,0 +1,183 @@
+From 603db0d48cc228d234a2ba69eca4d01b9e518972 Mon Sep 17 00:00:00 2001
+From: Nayan Deshmukh <nayan26deshmukh@gmail.com>
+Date: Thu, 29 Mar 2018 22:36:32 +0530
+Subject: [PATCH 4445/5725] drm/scheduler: remove unused parameter
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+this patch also effect the amdgpu and etnaviv drivers which
+use the function drm_sched_entity_init
+
+Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
+Suggested-by: Christian König <christian.koenig@amd.com>
+Acked-by: Lucas Stach <l.stach@pengutronix.de>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/etnaviv/etnaviv_drv.c
+ drivers/gpu/drm/scheduler/gpu_scheduler.c
+
+Change-Id: I15f949005824c8553a768bdd26a4ce686dcafefb
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 2 +-
+ drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 +--
+ include/drm/gpu_scheduler.h | 2 +-
+ 10 files changed, 11 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+index 51fb09f..58795d4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+@@ -93,7 +93,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
+ continue;
+
+ r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
+- rq, amdgpu_sched_jobs, &ctx->guilty);
++ rq, &ctx->guilty);
+ if (r)
+ goto failed;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 1bd7997..cead212 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -111,7 +111,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
+ ring = adev->mman.buffer_funcs_ring;
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
+- rq, amdgpu_sched_jobs, NULL);
++ rq, NULL);
+ if (r) {
+ DRM_ERROR("Failed setting up TTM BO move run queue.\n");
+ goto error_entity;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index c973b10..4ab11bb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -250,7 +250,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ ring = &adev->uvd.inst[j].ring;
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity,
+- rq, amdgpu_sched_jobs, NULL);
++ rq, NULL);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j);
+ return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+index 1b1d8e1..23d960e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+@@ -191,7 +191,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
+ ring = &adev->vce.ring[0];
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&ring->sched, &adev->vce.entity,
+- rq, amdgpu_sched_jobs, NULL);
++ rq, NULL);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up VCE run queue.\n");
+ return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 01cc8de..be15303 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -106,7 +106,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+ ring = &adev->vcn.ring_dec;
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
+- rq, amdgpu_sched_jobs, NULL);
++ rq, NULL);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up VCN dec run queue.\n");
+ return r;
+@@ -115,7 +115,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+ ring = &adev->vcn.ring_enc[0];
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
+- rq, amdgpu_sched_jobs, NULL);
++ rq, NULL);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up VCN enc run queue.\n");
+ return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 336abd4..caf5f61 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2453,7 +2453,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ ring = adev->vm_manager.vm_pte_rings[ring_instance];
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ r = drm_sched_entity_init(&ring->sched, &vm->entity,
+- rq, amdgpu_sched_jobs, NULL);
++ rq, NULL);
+ if (r)
+ return r;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+index 2778e48..8ce51946 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+@@ -430,7 +430,7 @@ static int uvd_v6_0_sw_init(void *handle)
+ ring = &adev->uvd.inst->ring_enc[0];
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc,
+- rq, amdgpu_sched_jobs, NULL);
++ rq, NULL);
+ if (r) {
+ DRM_ERROR("Failed setting up UVD ENC run queue.\n");
+ return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+index 47a6af5..a0080d7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+@@ -432,7 +432,7 @@ static int uvd_v7_0_sw_init(void *handle)
+ ring = &adev->uvd.inst[j].ring_enc[0];
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity_enc,
+- rq, amdgpu_sched_jobs, NULL);
++ rq, NULL);
+ if (r) {
+ DRM_ERROR("(%d)Failed setting up UVD ENC run queue.\n", j);
+ return r;
+diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+index 3d41246..026e0d8 100644
+--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
++++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+@@ -118,14 +118,13 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
+ * @entity The pointer to a valid drm_sched_entity
+ * @rq The run queue this entity belongs
+ * @kernel If this is an entity for the kernel
+- * @jobs The max number of jobs in the job queue
+ *
+ * return 0 if succeed. negative error code on failure
+ */
+ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity,
+ struct drm_sched_rq *rq,
+- uint32_t jobs, atomic_t *guilty)
++ atomic_t *guilty)
+ {
+ if (!(sched && entity && rq))
+ return -EINVAL;
+diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
+index 1df6229..86c7344 100644
+--- a/include/drm/gpu_scheduler.h
++++ b/include/drm/gpu_scheduler.h
+@@ -150,7 +150,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched);
+ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity,
+ struct drm_sched_rq *rq,
+- uint32_t jobs, atomic_t *guilty);
++ atomic_t *guilty);
+ void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity);
+ void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4446-drm-amdgpu-remove-unused-member.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4446-drm-amdgpu-remove-unused-member.patch
new file mode 100644
index 00000000..a316a6d9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4446-drm-amdgpu-remove-unused-member.patch
@@ -0,0 +1,33 @@
+From ccbeba8f72c3e3371845efd9bd71401eb4977cf6 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Thu, 19 Apr 2018 09:57:21 +0200
+Subject: [PATCH 4446/5725] drm/amdgpu: remove unused member
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This lock isn't used any more.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+index 84658b5..3492ab7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+@@ -190,9 +190,6 @@ struct amdgpu_vm {
+ struct amdgpu_vm_pt root;
+ struct dma_fence *last_update;
+
+- /* protecting freed */
+- spinlock_t freed_lock;
+-
+ /* Scheduler entity for page table updates */
+ struct drm_sched_entity entity;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4447-drm-scheduler-Remove-obsolete-spinlock.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4447-drm-scheduler-Remove-obsolete-spinlock.patch
new file mode 100644
index 00000000..1bfe7393
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4447-drm-scheduler-Remove-obsolete-spinlock.patch
@@ -0,0 +1,76 @@
+From 34147464b6e29c6468a3bec16b2e1e2f8b3200eb Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Tue, 15 May 2018 14:42:20 -0400
+Subject: [PATCH 4447/5725] drm/scheduler: Remove obsolete spinlock.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This spinlock is superfluous, any call to drm_sched_entity_push_job
+should already be under a lock together with matching drm_sched_job_init
+to match the order of insertion into queue with job's fence seqence
+number.
+
+v2:
+Improve patch description.
+Add functions documentation describing the locking considerations
+
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Acked-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/scheduler/gpu_scheduler.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+index 026e0d8..be5d321 100644
+--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
++++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+@@ -138,7 +138,6 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
+ entity->last_scheduled = NULL;
+
+ spin_lock_init(&entity->rq_lock);
+- spin_lock_init(&entity->queue_lock);
+ spsc_queue_init(&entity->job_queue);
+
+ atomic_set(&entity->fence_seq, 0);
+@@ -412,6 +411,10 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
+ *
+ * @sched_job The pointer to job required to submit
+ *
++ * Note: To guarantee that the order of insertion to queue matches
++ * the job's fence sequence number this function should be
++ * called with drm_sched_job_init under common lock.
++ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
+@@ -422,11 +425,8 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
+
+ trace_drm_sched_job(sched_job, entity);
+
+- spin_lock(&entity->queue_lock);
+ first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
+
+- spin_unlock(&entity->queue_lock);
+-
+ /* first job wakes up scheduler */
+ if (first) {
+ /* Add the entity to the run queue */
+@@ -592,7 +592,12 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
+ }
+ EXPORT_SYMBOL(drm_sched_job_recovery);
+
+-/* init a sched_job with basic field */
++/**
++ * Init a sched_job with basic field
++ *
++ * Note: Refer to drm_sched_entity_push_job documentation
++ * for locking considerations.
++ */
+ int drm_sched_job_init(struct drm_sched_job *job,
+ struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4448-drm-amd-amdgpu-Code-comments-for-the-amdgpu_ttm.c-dr.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4448-drm-amd-amdgpu-Code-comments-for-the-amdgpu_ttm.c-dr.patch
new file mode 100644
index 00000000..dcc3c341
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4448-drm-amd-amdgpu-Code-comments-for-the-amdgpu_ttm.c-dr.patch
@@ -0,0 +1,934 @@
+From e9e25d149146e3c0461a3112e9aa99e91920cd11 Mon Sep 17 00:00:00 2001
+From: Tom St Denis <tom.stdenis@amd.com>
+Date: Wed, 9 May 2018 14:22:29 -0400
+Subject: [PATCH 4448/5725] drm/amd/amdgpu: Code comments for the amdgpu_ttm.c
+ driver. (v2)
+
+NFC just comments.
+
+(v2): Updated based on feedback from Alex Deucher.
+
+Signed-off-by: Tom St Denis <tom.stdenis@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+
+Change-Id: I9d65d90517f52abcfe7ce356c58ebe2f561a642d
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 368 ++++++++++++++++++++++++++++++--
+ 1 file changed, 351 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index cead212..acaa441 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -63,16 +63,44 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
+ /*
+ * Global memory.
+ */
++
++/**
++ * amdgpu_ttm_mem_global_init - Initialize and acquire reference to
++ * memory object
++ *
++ * @ref: Object for initialization.
++ *
++ * This is called by drm_global_item_ref() when an object is being
++ * initialized.
++ */
+ static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref)
+ {
+ return ttm_mem_global_init(ref->object);
+ }
+
++/**
++ * amdgpu_ttm_mem_global_release - Drop reference to a memory object
++ *
++ * @ref: Object being removed
++ *
++ * This is called by drm_global_item_unref() when an object is being
++ * released.
++ */
+ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
+ {
+ ttm_mem_global_release(ref->object);
+ }
+
++/**
++ * amdgpu_ttm_global_init - Initialize global TTM memory reference
++ * structures.
++ *
++ * @adev: AMDGPU device for which the global structures need to be
++ * registered.
++ *
++ * This is called as part of the AMDGPU ttm init from amdgpu_ttm_init()
++ * during bring up.
++ */
+ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
+ {
+ struct drm_global_reference *global_ref;
+@@ -80,7 +108,9 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
+ struct drm_sched_rq *rq;
+ int r;
+
++ /* ensure reference is false in case init fails */
+ adev->mman.mem_global_referenced = false;
++
+ global_ref = &adev->mman.mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+@@ -146,6 +176,18 @@ static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+ return 0;
+ }
+
++/**
++ * amdgpu_init_mem_type - Initialize a memory manager for a specific
++ * type of memory request.
++ *
++ * @bdev: The TTM BO device object (contains a reference to
++ * amdgpu_device)
++ * @type: The type of memory requested
++ * @man:
++ *
++ * This is called by ttm_bo_init_mm() when a buffer object is being
++ * initialized.
++ */
+ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+ {
+@@ -161,6 +203,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_TT:
++ /* GTT memory */
+ man->func = &amdgpu_gtt_mgr_func;
+ man->gpu_offset = adev->gmc.gart_start;
+ man->available_caching = TTM_PL_MASK_CACHING;
+@@ -210,6 +253,14 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ return 0;
+ }
+
++/**
++ * amdgpu_evict_flags - Compute placement flags
++ *
++ * @bo: The buffer object to evict
++ * @placement: Possible destination(s) for evicted BO
++ *
++ * Fill in placement data when ttm_bo_evict() is called
++ */
+ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+ {
+@@ -221,12 +272,14 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
+ .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
+ };
+
++ /* Don't handle scatter gather BOs */
+ if (bo->type == ttm_bo_type_sg) {
+ placement->num_placement = 0;
+ placement->num_busy_placement = 0;
+ return;
+ }
+
++ /* Object isn't an AMDGPU object so ignore */
+ if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
+ placement->placement = &placements;
+ placement->busy_placement = &placements;
+@@ -234,11 +287,13 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
+ placement->num_busy_placement = 1;
+ return;
+ }
++
+ abo = ttm_to_amdgpu_bo(bo);
+ switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
+ case AMDGPU_PL_DGMA:
+ if (!adev->mman.buffer_funcs_enabled) {
++ /* Move to system memory */
+ amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+ } else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+ !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
+@@ -256,6 +311,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
+ abo->placement.busy_placement = &abo->placements[1];
+ abo->placement.num_busy_placement = 1;
+ } else {
++ /* Move to GTT memory */
+ amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
+ }
+ break;
+@@ -267,6 +323,15 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
+ *placement = abo->placement;
+ }
+
++/**
++ * amdgpu_verify_access - Verify access for a mmap call
++ *
++ * @bo: The buffer object to map
++ * @filp: The file pointer from the process performing the mmap
++ *
++ * This is called by ttm_bo_mmap() to verify whether a process
++ * has the right to mmap a BO to their process space.
++ */
+ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+ {
+ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
+@@ -285,6 +350,15 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+ filp->private_data);
+ }
+
++/**
++ * amdgpu_move_null - Register memory for a buffer object
++ *
++ * @bo: The bo to assign the memory to
++ * @new_mem: The memory to be assigned.
++ *
++ * Assign the memory from new_mem to the memory of the buffer object
++ * bo.
++*/
+ static void amdgpu_move_null(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *new_mem)
+ {
+@@ -295,6 +369,10 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
+ new_mem->mm_node = NULL;
+ }
+
++/**
++ * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT
++ * buffer.
++*/
+ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
+ struct drm_mm_node *mm_node,
+ struct ttm_mem_reg *mem)
+@@ -309,9 +387,10 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
+ }
+
+ /**
+- * amdgpu_find_mm_node - Helper function finds the drm_mm_node
+- * corresponding to @offset. It also modifies the offset to be
+- * within the drm_mm_node returned
++ * amdgpu_find_mm_node - Helper function finds the drm_mm_node
++ * corresponding to @offset. It also modifies
++ * the offset to be within the drm_mm_node
++ * returned
+ */
+ static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
+ unsigned long *offset)
+@@ -450,7 +529,12 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
+ return r;
+ }
+
+-
++/**
++ * amdgpu_move_blit - Copy an entire buffer to another buffer
++ *
++ * This is a helper called by amdgpu_bo_move() and
++ * amdgpu_move_vram_ram() to help move buffers to and from VRAM.
++ */
+ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem,
+@@ -485,6 +569,11 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
+ return r;
+ }
+
++/**
++ * amdgpu_move_vram_ram - Copy VRAM buffer to RAM buffer
++ *
++ * Called by amdgpu_bo_move().
++*/
+ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_mem_reg *new_mem)
+@@ -495,8 +584,10 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_place placements;
+ struct ttm_placement placement;
+ int r;
+-
++
+ adev = amdgpu_ttm_adev(bo->bdev);
++
++ /* create space/pages for new_mem in GTT space */
+ tmp_mem = *new_mem;
+ tmp_mem.mm_node = NULL;
+ placement.num_placement = 1;
+@@ -510,26 +601,37 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
+ if (unlikely(r)) {
+ return r;
+ }
+-
++
++ /* set caching flags */
+ r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
+ if (unlikely(r)) {
+ goto out_cleanup;
+ }
+-
++
++ /* Bind the memory to the GTT space */
+ r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx);
+ if (unlikely(r)) {
+ goto out_cleanup;
+ }
++
++ /* blit VRAM to GTT */
+ r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem);
+ if (unlikely(r)) {
+ goto out_cleanup;
+ }
++
++ /* move BO (in tmp_mem) to new_mem */
+ r = ttm_bo_move_ttm(bo, ctx, new_mem);
+ out_cleanup:
+ ttm_bo_mem_put(bo, &tmp_mem);
+ return r;
+ }
+
++/**
++ * amdgpu_move_ram_vram - Copy buffer from RAM to VRAM
++ *
++ * Called by amdgpu_bo_move().
++*/
+ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_mem_reg *new_mem)
+@@ -542,6 +644,8 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
+ int r;
+
+ adev = amdgpu_ttm_adev(bo->bdev);
++
++ /* make space in GTT for old_mem buffer */
+ tmp_mem = *new_mem;
+ tmp_mem.mm_node = NULL;
+ placement.num_placement = 1;
+@@ -555,10 +659,14 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
+ if (unlikely(r)) {
+ return r;
+ }
++
++ /* move/bind old memory to GTT space */
+ r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
+ if (unlikely(r)) {
+ goto out_cleanup;
+ }
++
++ /* copy to VRAM */
+ r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem);
+ if (unlikely(r)) {
+ goto out_cleanup;
+@@ -568,6 +676,11 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
+ return r;
+ }
+
++/**
++ * amdgpu_bo_move - Move a buffer object to a new memory location
++ *
++ * Called by ttm_bo_handle_move_mem()
++*/
+ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_mem_reg *new_mem)
+@@ -637,6 +750,11 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
+ return 0;
+ }
+
++/**
++ * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
++ *
++ * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
++*/
+ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+ {
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+@@ -731,6 +849,14 @@ struct amdgpu_ttm_tt {
+ uint32_t last_set_pages;
+ };
+
++/**
++ * amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to
++ * by a USERPTR pointer to memory
++ *
++ * Called by amdgpu_gem_userptr_ioctl() and amdgpu_cs_parser_bos().
++ * This provides a wrapper around the get_user_pages() call to provide
++ * device accessible pages that back user memory.
++*/
+ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
+ {
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+@@ -760,6 +886,7 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
+ }
+ }
+
++ /* loop enough times using contiguous pages of memory */
+ do {
+ unsigned num_pages = ttm->num_pages - pinned;
+ uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
+@@ -793,6 +920,14 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
+ return r;
+ }
+
++/**
++ * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages
++ * as necessary.
++ *
++ * Called by amdgpu_cs_list_validate(). This creates the page list
++ * that backs user memory and will ultimately be mapped into the device
++ * address space.
++*/
+ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
+ {
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+@@ -807,6 +942,11 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
+ }
+ }
+
++/**
++ * amdgpu_ttm_tt_mark_user_page - Mark pages as dirty
++ *
++ * Called while unpinning userptr pages
++*/
+ void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
+ {
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+@@ -825,7 +965,12 @@ void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
+ }
+ }
+
+-/* prepare the sg table with the user pages */
++/**
++ * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the
++ * user pages
++ *
++ * Called by amdgpu_ttm_backend_bind()
++**/
+ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
+ {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
+@@ -837,17 +982,20 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
+ enum dma_data_direction direction = write ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
++ /* Allocate an SG array and squash pages into it */
+ r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
+ ttm->num_pages << PAGE_SHIFT,
+ GFP_KERNEL);
+ if (r)
+ goto release_sg;
+-
++
++ /* Map SG to device */
+ r = -ENOMEM;
+ nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
+ if (nents != ttm->sg->nents)
+ goto release_sg;
+-
++
++ /* convert SG to linear array of pages and dma addresses */
+ drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
+ gtt->ttm.dma_address, ttm->num_pages);
+
+@@ -858,6 +1006,9 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
+ return r;
+ }
+
++/**
++ * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
++*/
+ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
+ {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
+@@ -871,9 +1022,10 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
+ if (!ttm->sg->sgl)
+ return;
+
+- /* free the sg table and pages again */
++ /* unmap the pages mapped to the device */
+ dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
+-
++
++ /* mark the pages as dirty */
+ amdgpu_ttm_tt_mark_user_pages(ttm);
+
+ sg_free_table(ttm->sg);
+@@ -918,6 +1070,12 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
+ return r;
+ }
+
++/**
++ * amdgpu_ttm_backend_bind - Bind GTT memory
++ *
++ * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
++ * This handles binding GTT memory to the device address space.
++*/
+ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
+ struct ttm_mem_reg *bo_mem)
+ {
+@@ -947,8 +1105,11 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
+ gtt->offset = AMDGPU_BO_INVALID_OFFSET;
+ return 0;
+ }
+-
++
++ /* compute PTE flags relevant to this BO memory */
+ flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
++
++ /* bind pages into GART page tables */
+ gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
+ r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
+ ttm->pages, gtt->ttm.dma_address, flags);
+@@ -959,6 +1120,9 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
+ return r;
+ }
+
++/**
++ * amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object
++*/
+ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
+ {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+@@ -975,6 +1139,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
+ amdgpu_gtt_mgr_has_gart_addr(&bo->mem))
+ return 0;
+
++ /* allocate GTT space */
+ tmp = bo->mem;
+ tmp.mm_node = NULL;
+ placement.num_placement = 1;
+@@ -989,8 +1154,11 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
+ r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
+ if (unlikely(r))
+ return r;
+-
++
++ /* compute PTE flags for this buffer object */
+ flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
++
++ /* Bind pages */
+ gtt->offset = (u64)tmp.start << PAGE_SHIFT;
+ r = amdgpu_ttm_gart_bind(adev, bo, flags);
+ if (unlikely(r)) {
+@@ -1006,6 +1174,12 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
+ return 0;
+ }
+
++/**
++ * amdgpu_ttm_recover_gart - Rebind GTT pages
++ *
++ * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
++ * rebind GTT pages during a GPU reset.
++*/
+ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
+ {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
+@@ -1021,12 +1195,19 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
+ return r;
+ }
+
++/**
++ * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
++ *
++ * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
++ * ttm_tt_destroy().
++*/
+ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
+ {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ int r;
+-
++
++ /* if the pages have userptr pinning then clear that first */
+ if (gtt->userptr)
+ amdgpu_ttm_tt_unpin_userptr(ttm);
+
+@@ -1058,6 +1239,13 @@ static struct ttm_backend_func amdgpu_backend_func = {
+ .destroy = &amdgpu_ttm_backend_destroy,
+ };
+
++/**
++ * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
++ *
++ * @bo: The buffer object to create a GTT ttm_tt object around
++ *
++ * Called by ttm_tt_create().
++*/
+ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
+ {
+@@ -1071,6 +1259,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
+ return NULL;
+ }
+ gtt->ttm.ttm.func = &amdgpu_backend_func;
++ /* allocate space for the uninitialized page entries */
+ if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
+ kfree(gtt);
+ return NULL;
+@@ -1078,13 +1267,20 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
+ return &gtt->ttm.ttm;
+ }
+
++/**
++ * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
++ *
++ * Map the pages of a ttm_tt object to an address space visible
++ * to the underlying device.
++*/
+ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
+ {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+-
++
++ /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
+ if (gtt && gtt->userptr) {
+ ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!ttm->sg)
+@@ -1103,9 +1299,17 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
+ return 0;
+ }
+
++ /* fall back to generic helper to populate the page array
++ * and map them to the device */
+ return ttm_populate_and_map_pages(adev->dev, &gtt->ttm, ctx);
+ }
+
++/**
++ * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
++ *
++ * Unmaps pages of a ttm_tt object from the device address space and
++ * unpopulates the page array backing it.
++*/
+ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
+ {
+ struct amdgpu_device *adev;
+@@ -1124,9 +1328,21 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
+
+ adev = amdgpu_ttm_adev(ttm->bdev);
+
++ /* fall back to generic helper to unmap and unpopulate array */
+ ttm_unmap_and_unpopulate_pages(adev->dev, &gtt->ttm);
+ }
+
++/**
++ * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt
++ * for the current task
++ *
++ * @ttm: The ttm_tt object to bind this userptr object to
++ * @addr: The address in the current tasks VM space to use
++ * @flags: Requirements of userptr object.
++ *
++ * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
++ * to current task
++*/
+ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
+ uint32_t flags)
+ {
+@@ -1151,6 +1367,9 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
+ return 0;
+ }
+
++/**
++ * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
++*/
+ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
+ {
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+@@ -1164,6 +1383,12 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
+ return gtt->usertask->mm;
+ }
+
++/**
++ * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays
++ * inside an address range for the
++ * current task.
++ *
++*/
+ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
+ unsigned long end)
+ {
+@@ -1174,10 +1399,16 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
+ if (gtt == NULL || !gtt->userptr)
+ return false;
+
++ /* Return false if no part of the ttm_tt object lies within
++ * the range
++ */
+ size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
+ if (gtt->userptr > end || gtt->userptr + size <= start)
+ return false;
+
++ /* Search the lists of tasks that hold this mapping and see
++ * if current is one of them. If it is return false.
++ */
+ spin_lock(&gtt->guptasklock);
+ list_for_each_entry(entry, &gtt->guptasks, list) {
+ if (entry->task == current) {
+@@ -1192,6 +1423,10 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
+ return true;
+ }
+
++/**
++ * amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been
++ * invalidated?
++*/
+ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
+ int *last_invalidated)
+ {
+@@ -1202,6 +1437,12 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
+ return prev_invalidated != *last_invalidated;
+ }
+
++/**
++ * amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this
++ * ttm_tt object been invalidated
++ * since the last time they've
++ * been set?
++*/
+ bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
+ {
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+@@ -1212,6 +1453,9 @@ bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
+ return atomic_read(&gtt->mmu_invalidations) != gtt->last_set_pages;
+ }
+
++/**
++ * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
++*/
+ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
+ {
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+@@ -1222,6 +1466,12 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
+ return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
+ }
+
++/**
++ * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
++ *
++ * @ttm: The ttm_tt object to compute the flags for
++ * @mem: The memory registry backing this ttm_tt object
++*/
+ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
+ struct ttm_mem_reg *mem)
+ {
+@@ -1249,6 +1499,16 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
+ return flags;
+ }
+
++/**
++ * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict
++ * a buffer object.
++ *
++ * Return true if eviction is sensible. Called by
++ * ttm_mem_evict_first() on behalf of ttm_bo_mem_force_space()
++ * which tries to evict buffer objects until it can find space
++ * for a new object and by ttm_bo_force_list_clean() which is
++ * used to clean out a memory space.
++*/
+ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+ const struct ttm_place *place)
+ {
+@@ -1295,6 +1555,19 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+ return ttm_bo_eviction_valuable(bo, place);
+ }
+
++/**
++ * amdgpu_ttm_access_memory - Read or Write memory that backs a
++ * buffer object.
++ *
++ * @bo: The buffer object to read/write
++ * @offset: Offset into buffer object
++ * @buf: Secondary buffer to write/read from
++ * @len: Length in bytes of access
++ * @write: true if writing
++ *
++ * This is used to access VRAM that backs a buffer object via MMIO
++ * access for debugging purposes.
++*/
+ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
+ unsigned long offset,
+ void *buf, int len, int write)
+@@ -1634,12 +1907,22 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
+ return r;
+ }
+
++/**
++ * amdgpu_ttm_init - Init the memory management (ttm) as well as
++ * various gtt/vram related fields.
++ *
++ * This initializes all of the memory space pools that the TTM layer
++ * will need such as the GTT space (system memory mapped to the device),
++ * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
++ * can be mapped per VMID.
++*/
+ int amdgpu_ttm_init(struct amdgpu_device *adev)
+ {
+ uint64_t gtt_size;
+ int r;
+ u64 vis_vram_limit;
+
++ /* initialize global references for vram/gtt */
+ r = amdgpu_ttm_global_init(adev);
+ if (r) {
+ return r;
+@@ -1659,7 +1942,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
+
+ /* We opt to avoid OOM on system pages allocations */
+ adev->mman.bdev.no_retry = true;
+-
++
++ /* Initialize VRAM pool with all of VRAM divided into pages */
+ r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
+ adev->gmc.real_vram_size >> PAGE_SHIFT);
+ if (r) {
+@@ -1689,6 +1973,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
+ return r;
+ }
+
++ /* allocate memory as required for VGA
++ * This is used for VGA emulation and pre-OS scanout buffers to
++ * avoid display artifacts while transitioning between pre-OS
++ * and driver. */
+ if (adev->gmc.stolen_size) {
+ r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+@@ -1701,6 +1989,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
+ DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
+ (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
+
++ /* Compute GTT size, either bsaed on 3/4th the size of RAM size
++ * or whatever the user passed on module init */
+ if (amdgpu_gtt_size == -1) {
+ struct sysinfo si;
+
+@@ -1713,6 +2003,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
+
+ /* reserve for DGMA import domain */
+ gtt_size -= (uint64_t)amdgpu_direct_gma_size << 20;
++
++ /* Initialize GTT memory pool */
+ r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
+ if (r) {
+ DRM_ERROR("Failed initializing GTT heap.\n");
+@@ -1724,6 +2016,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
+ amdgpu_direct_gma_init(adev);
+ amdgpu_ssg_init(adev);
+
++ /* Initialize various on-chip memory pools */
+ adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
+ adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
+ adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT;
+@@ -1763,6 +2056,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
+ }
+ }
+
++ /* Register debugfs entries for amdgpu_ttm */
+ r = amdgpu_ttm_debugfs_init(adev);
+ if (r) {
+ DRM_ERROR("Failed to init debugfs\n");
+@@ -1771,11 +2065,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
+ return 0;
+ }
+
++/**
++ * amdgpu_ttm_late_init - Handle any late initialization for
++ * amdgpu_ttm
++*/
+ void amdgpu_ttm_late_init(struct amdgpu_device *adev)
+ {
++ /* return the VGA stolen memory (if any) back to VRAM */
+ amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
+ }
+
++/**
++ * amdgpu_ttm_fini - De-initialize the TTM memory pools
++*/
+ void amdgpu_ttm_fini(struct amdgpu_device *adev)
+ {
+ if (!adev->mman.initialized)
+@@ -2113,6 +2415,11 @@ static const struct drm_info_list amdgpu_ttm_dgma_debugfs_list[] = {
+ {"amdgpu_dgma_import_mm", amdgpu_mm_dump_table, 0, &ttm_pl_dgma_import}
+ };
+
++/**
++ * amdgpu_ttm_vram_read - Linear read access to VRAM
++ *
++ * Accesses VRAM via MMIO for debugging purposes.
++*/
+ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+ {
+@@ -2152,6 +2459,11 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
+ return result;
+ }
+
++/**
++ * amdgpu_ttm_vram_write - Linear write access to VRAM
++ *
++ * Accesses VRAM via MMIO for debugging purposes.
++*/
+ static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+ {
+@@ -2200,6 +2512,9 @@ static const struct file_operations amdgpu_ttm_vram_fops = {
+
+ #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+
++/**
++ * amdgpu_ttm_gtt_read - Linear read access to GTT memory
++*/
+ static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+ {
+@@ -2249,6 +2564,13 @@ static const struct file_operations amdgpu_ttm_gtt_fops = {
+
+ #if !defined(OS_NAME_RHEL_6)
+
++/**
++ * amdgpu_iomem_read - Virtual read access to GPU mapped memory
++ *
++ * This function is used to read memory that has been mapped to the
++ * GPU and the known addresses are not physical addresses but instead
++ * bus addresses (e.g., what you'd put in an IB or ring buffer).
++*/
+ static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+ {
+@@ -2257,6 +2579,7 @@ static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
+ ssize_t result = 0;
+ int r;
+
++ /* retrieve the IOMMU domain if any for this device */
+ dom = iommu_get_domain_for_dev(adev->dev);
+
+ while (size) {
+@@ -2269,6 +2592,10 @@ static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
+
+ bytes = bytes < size ? bytes : size;
+
++ /* Translate the bus address to a physical address. If
++ * the domain is NULL it means there is no IOMMU active
++ * and the address translation is the identity
++ */
+ addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
+
+ pfn = addr >> PAGE_SHIFT;
+@@ -2293,6 +2620,13 @@ static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
+ return result;
+ }
+
++/**
++ * amdgpu_iomem_write - Virtual write access to GPU mapped memory
++ *
++ * This function is used to write memory that has been mapped to the
++ * GPU and the known addresses are not physical addresses but instead
++ * bus addresses (e.g., what you'd put in an IB or ring buffer).
++*/
+ static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+ {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4449-drm-amdgpu-display-remove-VEGAM-config-option.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4449-drm-amdgpu-display-remove-VEGAM-config-option.patch
new file mode 100644
index 00000000..0e5ecdaf
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4449-drm-amdgpu-display-remove-VEGAM-config-option.patch
@@ -0,0 +1,277 @@
+From 66404f69b0911ef3cda961eacf957a2d6ff34ed8 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed, 16 May 2018 08:39:58 -0500
+Subject: [PATCH 4449/5725] drm/amdgpu/display: remove VEGAM config option
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Leftover from bringup. No need to keep it around for
+upstream.
+
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/display/Kconfig | 7 -------
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 4 ----
+ drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c | 2 --
+ drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c | 2 --
+ drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c | 4 ----
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 4 ----
+ drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c | 4 ----
+ drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c | 2 --
+ drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c | 2 --
+ drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c | 2 --
+ drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h | 2 --
+ drivers/gpu/drm/amd/display/include/dal_asic_id.h | 6 +-----
+ drivers/gpu/drm/amd/display/include/dal_types.h | 2 --
+ 13 files changed, 1 insertion(+), 42 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
+index 23fbd99..499f7dc 100644
+--- a/drivers/gpu/drm/amd/display/Kconfig
++++ b/drivers/gpu/drm/amd/display/Kconfig
+@@ -34,13 +34,6 @@ config DEBUG_KERNEL_DC
+ if you want to hit
+ kdgb_break in assert.
+
+-+config DRM_AMD_DC_VEGAM
+- bool "VEGAM support"
+- depends on DRM_AMD_DC
+- help
+- Choose this option if you want to have
+- VEGAM support for display engine
+-
+ config DRM_AMD_DC_VG20
+ bool "Vega20 support"
+ depends on DRM_AMD_DC
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 21be9fd..b37005e 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1510,9 +1510,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
+-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ case CHIP_VEGAM:
+-#endif
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+@@ -1758,9 +1756,7 @@ static int dm_early_init(void *handle)
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+ case CHIP_POLARIS10:
+-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ case CHIP_VEGAM:
+-#endif
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
+index be066c4..253bbb1 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
+@@ -51,9 +51,7 @@ bool dal_bios_parser_init_cmd_tbl_helper(
+ return true;
+
+ case DCE_VERSION_11_2:
+-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ case DCE_VERSION_11_22:
+-#endif
+ *h = dal_cmd_tbl_helper_dce112_get_table();
+ return true;
+
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+index 9b9e069..bbbcef5 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+@@ -52,9 +52,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
+ return true;
+
+ case DCE_VERSION_11_2:
+-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ case DCE_VERSION_11_22:
+-#endif
+ *h = dal_cmd_tbl_helper_dce112_get_table2();
+ return true;
+ #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+index 4ee3c26..2c4e8f0 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+@@ -59,10 +59,8 @@ static enum bw_calcs_version bw_calcs_version_from_asic_id(struct hw_asic_id asi
+ return BW_CALCS_VERSION_POLARIS10;
+ if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev))
+ return BW_CALCS_VERSION_POLARIS11;
+-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev))
+ return BW_CALCS_VERSION_VEGAM;
+-#endif
+ return BW_CALCS_VERSION_INVALID;
+
+ case FAMILY_AI:
+@@ -2151,11 +2149,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
+ dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/
+ break;
+ case BW_CALCS_VERSION_POLARIS10:
+-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ /* TODO: Treat VEGAM the same as P10 for now
+ * Need to tune the para for VEGAM if needed */
+ case BW_CALCS_VERSION_VEGAM:
+-#endif
+ vbios.memory_type = bw_def_gddr5;
+ vbios.dram_channel_width_in_bits = 32;
+ vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 412b48b..db4fdf6 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -79,10 +79,8 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
+ ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) {
+ dc_version = DCE_VERSION_11_2;
+ }
+-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev))
+ dc_version = DCE_VERSION_11_22;
+-#endif
+ break;
+ case FAMILY_AI:
+ dc_version = DCE_VERSION_12_0;
+@@ -129,9 +127,7 @@ struct resource_pool *dc_create_resource_pool(
+ num_virtual_links, dc, asic_id);
+ break;
+ case DCE_VERSION_11_2:
+-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ case DCE_VERSION_11_22:
+-#endif
+ res_pool = dce112_create_resource_pool(
+ num_virtual_links, dc);
+ break;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+index 223db98..0570e7e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+@@ -590,9 +590,7 @@ static uint32_t dce110_get_pix_clk_dividers(
+ pll_settings, pix_clk_params);
+ break;
+ case DCE_VERSION_11_2:
+-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ case DCE_VERSION_11_22:
+-#endif
+ case DCE_VERSION_12_0:
+ #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case DCN_VERSION_1_0:
+@@ -982,9 +980,7 @@ static bool dce110_program_pix_clk(
+
+ break;
+ case DCE_VERSION_11_2:
+-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ case DCE_VERSION_11_22:
+-#endif
+ case DCE_VERSION_12_0:
+ #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case DCN_VERSION_1_0:
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+index 61fe484..0caee35 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
++++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+@@ -75,9 +75,7 @@ bool dal_hw_factory_init(
+ return true;
+ case DCE_VERSION_11_0:
+ case DCE_VERSION_11_2:
+-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ case DCE_VERSION_11_22:
+-#endif
+ dal_hw_factory_dce110_init(factory);
+ return true;
+ case DCE_VERSION_12_0:
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+index 910ae2b7..55c7074 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
++++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+@@ -72,9 +72,7 @@ bool dal_hw_translate_init(
+ case DCE_VERSION_10_0:
+ case DCE_VERSION_11_0:
+ case DCE_VERSION_11_2:
+-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ case DCE_VERSION_11_22:
+-#endif
+ dal_hw_translate_dce110_init(translate);
+ return true;
+ case DCE_VERSION_12_0:
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
+index c3d7c32..14dc8c9 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
+@@ -83,9 +83,7 @@ struct i2caux *dal_i2caux_create(
+ case DCE_VERSION_8_3:
+ return dal_i2caux_dce80_create(ctx);
+ case DCE_VERSION_11_2:
+-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ case DCE_VERSION_11_22:
+-#endif
+ return dal_i2caux_dce112_create(ctx);
+ case DCE_VERSION_11_0:
+ return dal_i2caux_dce110_create(ctx);
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
+index 933ea7a..eece165 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
+@@ -43,9 +43,7 @@ enum bw_calcs_version {
+ BW_CALCS_VERSION_POLARIS10,
+ BW_CALCS_VERSION_POLARIS11,
+ BW_CALCS_VERSION_POLARIS12,
+-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ BW_CALCS_VERSION_VEGAM,
+-#endif
+ BW_CALCS_VERSION_STONEY,
+ BW_CALCS_VERSION_VEGA10
+ };
+diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+index 77d2856..6aeb5a2 100644
+--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
++++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+@@ -86,6 +86,7 @@
+ #define VI_POLARIS10_P_A0 80
+ #define VI_POLARIS11_M_A0 90
+ #define VI_POLARIS12_V_A0 100
++#define VI_VEGAM_A0 110
+
+ #define VI_UNKNOWN 0xFF
+
+@@ -98,14 +99,9 @@
+ (eChipRev < VI_POLARIS11_M_A0))
+ #define ASIC_REV_IS_POLARIS11_M(eChipRev) ((eChipRev >= VI_POLARIS11_M_A0) && \
+ (eChipRev < VI_POLARIS12_V_A0))
+-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+-#define VI_VEGAM_A0 110
+ #define ASIC_REV_IS_POLARIS12_V(eChipRev) ((eChipRev >= VI_POLARIS12_V_A0) && \
+ (eChipRev < VI_VEGAM_A0))
+ #define ASIC_REV_IS_VEGAM(eChipRev) (eChipRev >= VI_VEGAM_A0)
+-#else
+-#define ASIC_REV_IS_POLARIS12_V(eChipRev) (eChipRev >= VI_POLARIS12_V_A0)
+-#endif
+
+ /* DCE11 */
+ #define CZ_CARRIZO_A0 0x01
+diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
+index 5b1f8ce..840142b 100644
+--- a/drivers/gpu/drm/amd/display/include/dal_types.h
++++ b/drivers/gpu/drm/amd/display/include/dal_types.h
+@@ -40,9 +40,7 @@ enum dce_version {
+ DCE_VERSION_10_0,
+ DCE_VERSION_11_0,
+ DCE_VERSION_11_2,
+-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ DCE_VERSION_11_22,
+-#endif
+ DCE_VERSION_12_0,
+ DCE_VERSION_MAX,
+ DCN_VERSION_1_0,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4450-drm-amdgpu-display-remove-VEGA20-config-option.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4450-drm-amdgpu-display-remove-VEGA20-config-option.patch
new file mode 100644
index 00000000..6a73842a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4450-drm-amdgpu-display-remove-VEGA20-config-option.patch
@@ -0,0 +1,286 @@
+From fa457245eee8dc5b9077a385222775f4ecb10f42 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed, 16 May 2018 15:28:59 -0500
+Subject: [PATCH 4450/5725] drm/amdgpu/display: remove VEGA20 config option
+
+Leftover from bringup. No need to keep it around for
+upstream.
+
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/display/Kconfig | 8 -
+ drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | 2 -
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 6 -
+ .../drm/amd/display/dc/dce120/dce120_resource.c | 177 ---------------------
+ drivers/gpu/drm/amd/display/include/dal_asic_id.h | 2 -
+ 5 files changed, 195 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
+index 499f7dc..c3d49f8 100644
+--- a/drivers/gpu/drm/amd/display/Kconfig
++++ b/drivers/gpu/drm/amd/display/Kconfig
+@@ -34,12 +34,4 @@ config DEBUG_KERNEL_DC
+ if you want to hit
+ kdgb_break in assert.
+
+-config DRM_AMD_DC_VG20
+- bool "Vega20 support"
+- depends on DRM_AMD_DC
+- help
+- Choose this option if you want to have
+- Vega20 support for display engine
+-
+-
+ endmenu
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+index 4561673..b8cef7a 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+@@ -1331,9 +1331,7 @@ static enum bp_result bios_parser_get_firmware_info(
+ result = get_firmware_info_v3_2(bp, info);
+ break;
+ case 3:
+-#ifdef CONFIG_DRM_AMD_DC_VG20
+ result = get_firmware_info_v3_2(bp, info);
+-#endif
+ break;
+ default:
+ break;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index aa4cf30..f043e5e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -413,18 +413,12 @@ static int dce112_set_clock(
+ /*VBIOS will determine DPREFCLK frequency, so we don't set it*/
+ dce_clk_params.target_clock_frequency = 0;
+ dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
+-#ifndef CONFIG_DRM_AMD_DC_VG20
+- dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
+- (dce_clk_params.pll_id ==
+- CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
+-#else
+ if (!ASICREV_IS_VEGA20_P(clk->ctx->asic_id.hw_internal_rev))
+ dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
+ (dce_clk_params.pll_id ==
+ CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
+ else
+ dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
+-#endif
+
+ bp->funcs->set_dce_clock(bp, &dce_clk_params);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+index 545f35f..2d58dac 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+@@ -814,7 +814,6 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
+ dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges);
+ }
+
+-#ifdef CONFIG_DRM_AMD_DC_VG20
+ static uint32_t read_pipe_fuses(struct dc_context *ctx)
+ {
+ uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0);
+@@ -1020,182 +1019,6 @@ static bool construct(
+
+ return false;
+ }
+-#else
+-static bool construct(
+- uint8_t num_virtual_links,
+- struct dc *dc,
+- struct dce110_resource_pool *pool)
+-{
+- unsigned int i;
+- struct dc_context *ctx = dc->ctx;
+- struct irq_service_init_data irq_init_data;
+-
+- ctx->dc_bios->regs = &bios_regs;
+-
+- pool->base.res_cap = &res_cap;
+- pool->base.funcs = &dce120_res_pool_funcs;
+-
+- /* TODO: Fill more data from GreenlandAsicCapability.cpp */
+- pool->base.pipe_count = res_cap.num_timing_generator;
+- pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator;
+- pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+-
+- dc->caps.max_downscale_ratio = 200;
+- dc->caps.i2c_speed_in_khz = 100;
+- dc->caps.max_cursor_size = 128;
+- dc->caps.dual_link_dvi = true;
+-
+- dc->debug = debug_defaults;
+-
+- /*************************************************
+- * Create resources *
+- *************************************************/
+-
+- pool->base.clock_sources[DCE120_CLK_SRC_PLL0] =
+- dce120_clock_source_create(ctx, ctx->dc_bios,
+- CLOCK_SOURCE_COMBO_PHY_PLL0,
+- &clk_src_regs[0], false);
+- pool->base.clock_sources[DCE120_CLK_SRC_PLL1] =
+- dce120_clock_source_create(ctx, ctx->dc_bios,
+- CLOCK_SOURCE_COMBO_PHY_PLL1,
+- &clk_src_regs[1], false);
+- pool->base.clock_sources[DCE120_CLK_SRC_PLL2] =
+- dce120_clock_source_create(ctx, ctx->dc_bios,
+- CLOCK_SOURCE_COMBO_PHY_PLL2,
+- &clk_src_regs[2], false);
+- pool->base.clock_sources[DCE120_CLK_SRC_PLL3] =
+- dce120_clock_source_create(ctx, ctx->dc_bios,
+- CLOCK_SOURCE_COMBO_PHY_PLL3,
+- &clk_src_regs[3], false);
+- pool->base.clock_sources[DCE120_CLK_SRC_PLL4] =
+- dce120_clock_source_create(ctx, ctx->dc_bios,
+- CLOCK_SOURCE_COMBO_PHY_PLL4,
+- &clk_src_regs[4], false);
+- pool->base.clock_sources[DCE120_CLK_SRC_PLL5] =
+- dce120_clock_source_create(ctx, ctx->dc_bios,
+- CLOCK_SOURCE_COMBO_PHY_PLL5,
+- &clk_src_regs[5], false);
+- pool->base.clk_src_count = DCE120_CLK_SRC_TOTAL;
+-
+- pool->base.dp_clock_source =
+- dce120_clock_source_create(ctx, ctx->dc_bios,
+- CLOCK_SOURCE_ID_DP_DTO,
+- &clk_src_regs[0], true);
+-
+- for (i = 0; i < pool->base.clk_src_count; i++) {
+- if (pool->base.clock_sources[i] == NULL) {
+- dm_error("DC: failed to create clock sources!\n");
+- BREAK_TO_DEBUGGER();
+- goto clk_src_create_fail;
+- }
+- }
+-
+- pool->base.display_clock = dce120_disp_clk_create(ctx);
+- if (pool->base.display_clock == NULL) {
+- dm_error("DC: failed to create display clock!\n");
+- BREAK_TO_DEBUGGER();
+- goto disp_clk_create_fail;
+- }
+-
+- pool->base.dmcu = dce_dmcu_create(ctx,
+- &dmcu_regs,
+- &dmcu_shift,
+- &dmcu_mask);
+- if (pool->base.dmcu == NULL) {
+- dm_error("DC: failed to create dmcu!\n");
+- BREAK_TO_DEBUGGER();
+- goto res_create_fail;
+- }
+-
+- pool->base.abm = dce_abm_create(ctx,
+- &abm_regs,
+- &abm_shift,
+- &abm_mask);
+- if (pool->base.abm == NULL) {
+- dm_error("DC: failed to create abm!\n");
+- BREAK_TO_DEBUGGER();
+- goto res_create_fail;
+- }
+-
+- irq_init_data.ctx = dc->ctx;
+- pool->base.irqs = dal_irq_service_dce120_create(&irq_init_data);
+- if (!pool->base.irqs)
+- goto irqs_create_fail;
+-
+- for (i = 0; i < pool->base.pipe_count; i++) {
+- pool->base.timing_generators[i] =
+- dce120_timing_generator_create(
+- ctx,
+- i,
+- &dce120_tg_offsets[i]);
+- if (pool->base.timing_generators[i] == NULL) {
+- BREAK_TO_DEBUGGER();
+- dm_error("DC: failed to create tg!\n");
+- goto controller_create_fail;
+- }
+-
+- pool->base.mis[i] = dce120_mem_input_create(ctx, i);
+-
+- if (pool->base.mis[i] == NULL) {
+- BREAK_TO_DEBUGGER();
+- dm_error(
+- "DC: failed to create memory input!\n");
+- goto controller_create_fail;
+- }
+-
+- pool->base.ipps[i] = dce120_ipp_create(ctx, i);
+- if (pool->base.ipps[i] == NULL) {
+- BREAK_TO_DEBUGGER();
+- dm_error(
+- "DC: failed to create input pixel processor!\n");
+- goto controller_create_fail;
+- }
+-
+- pool->base.transforms[i] = dce120_transform_create(ctx, i);
+- if (pool->base.transforms[i] == NULL) {
+- BREAK_TO_DEBUGGER();
+- dm_error(
+- "DC: failed to create transform!\n");
+- goto res_create_fail;
+- }
+-
+- pool->base.opps[i] = dce120_opp_create(
+- ctx,
+- i);
+- if (pool->base.opps[i] == NULL) {
+- BREAK_TO_DEBUGGER();
+- dm_error(
+- "DC: failed to create output pixel processor!\n");
+- }
+- }
+-
+- if (!resource_construct(num_virtual_links, dc, &pool->base,
+- &res_create_funcs))
+- goto res_create_fail;
+-
+- /* Create hardware sequencer */
+- if (!dce120_hw_sequencer_create(dc))
+- goto controller_create_fail;
+-
+- dc->caps.max_planes = pool->base.pipe_count;
+-
+- bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id);
+-
+- bw_calcs_data_update_from_pplib(dc);
+-
+- return true;
+-
+-irqs_create_fail:
+-controller_create_fail:
+-disp_clk_create_fail:
+-clk_src_create_fail:
+-res_create_fail:
+-
+- destruct(pool);
+-
+- return false;
+-}
+-#endif
+
+ struct resource_pool *dce120_create_resource_pool(
+ uint8_t num_virtual_links,
+diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+index 6aeb5a2..cac069d 100644
+--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
++++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+@@ -115,10 +115,8 @@
+ /* DCE12 */
+ #define AI_UNKNOWN 0xFF
+
+-#ifdef CONFIG_DRM_AMD_DC_VG20
+ #define AI_VEGA20_P_A0 40
+ #define ASICREV_IS_VEGA20_P(eChipRev) ((eChipRev >= AI_VEGA20_P_A0) && (eChipRev < AI_UNKNOWN))
+-#endif
+
+ #define AI_GREENLAND_P_A0 1
+ #define AI_GREENLAND_P_A1 2
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4451-drm-amdgpu-display-fix-vega12-20-handling-in-dal_asi.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4451-drm-amdgpu-display-fix-vega12-20-handling-in-dal_asi.patch
new file mode 100644
index 00000000..c2f3cd35
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4451-drm-amdgpu-display-fix-vega12-20-handling-in-dal_asi.patch
@@ -0,0 +1,45 @@
+From f02c33ab6a3199958c27034641d2c52a5d078b6d Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed, 16 May 2018 15:34:19 -0500
+Subject: [PATCH 4451/5725] drm/amdgpu/display: fix vega12/20 handling in
+ dal_asic_id.h
+
+- Remove unused ASICREV_IS_VEGA12_p() macro
+- Fix ASICREV_IS_VEGA12_P() macro to properly check against vega20
+
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/include/dal_asic_id.h | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+index cac069d..25029ed 100644
+--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
++++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+@@ -115,19 +115,17 @@
+ /* DCE12 */
+ #define AI_UNKNOWN 0xFF
+
+-#define AI_VEGA20_P_A0 40
+-#define ASICREV_IS_VEGA20_P(eChipRev) ((eChipRev >= AI_VEGA20_P_A0) && (eChipRev < AI_UNKNOWN))
+-
+ #define AI_GREENLAND_P_A0 1
+ #define AI_GREENLAND_P_A1 2
+ #define AI_UNKNOWN 0xFF
+
+ #define AI_VEGA12_P_A0 20
++#define AI_VEGA20_P_A0 40
+ #define ASICREV_IS_GREENLAND_M(eChipRev) (eChipRev < AI_VEGA12_P_A0)
+ #define ASICREV_IS_GREENLAND_P(eChipRev) (eChipRev < AI_VEGA12_P_A0)
+
+-#define ASICREV_IS_VEGA12_P(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_UNKNOWN))
+-#define ASICREV_IS_VEGA12_p(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_UNKNOWN))
++#define ASICREV_IS_VEGA12_P(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_VEGA20_P_A0))
++#define ASICREV_IS_VEGA20_P(eChipRev) ((eChipRev >= AI_VEGA20_P_A0) && (eChipRev < AI_UNKNOWN))
+
+ /* DCN1_0 */
+ #define INTERNAL_REV_RAVEN_A0 0x00 /* First spin of Raven */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4452-drm-amd-pp-missing-curly-braces-in-smu7_enable_sclk_.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4452-drm-amd-pp-missing-curly-braces-in-smu7_enable_sclk_.patch
new file mode 100644
index 00000000..6b39953a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4452-drm-amd-pp-missing-curly-braces-in-smu7_enable_sclk_.patch
@@ -0,0 +1,40 @@
+From 20cdcc8b5ad6d1e7567a2daac46771012f76da3a Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Thu, 17 May 2018 15:56:05 +0300
+Subject: [PATCH 4452/5725] drm/amd/pp: missing curly braces in
+ smu7_enable_sclk_mclk_dpm()
+
+We added some more lines of code to this if statement but forgot to add
+curly braces.
+
+Fixes: 0c24e7ef233b ("drm/amd/powerplay: add specific changes for VEGAM in smu7_hwmgr.c")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index 3f0162d..f697a56 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -1019,7 +1019,7 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ /* enable SCLK dpm */
+- if (!data->sclk_dpm_key_disabled)
++ if (!data->sclk_dpm_key_disabled) {
+ if (hwmgr->chip_id == CHIP_VEGAM)
+ smu7_disable_sclk_vce_handshake(hwmgr);
+
+@@ -1027,6 +1027,7 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+ (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)),
+ "Failed to enable SCLK DPM during DPM Start Function!",
+ return -EINVAL);
++ }
+
+ /* enable MCLK dpm */
+ if (0 == data->mclk_dpm_key_disabled) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4453-drm-scheduler-fix-function-name-prefix-in-comments.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4453-drm-scheduler-fix-function-name-prefix-in-comments.patch
new file mode 100644
index 00000000..1d906b6e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4453-drm-scheduler-fix-function-name-prefix-in-comments.patch
@@ -0,0 +1,51 @@
+From d5c62b8ea051ce9124881dd894e0cc0185a8562d Mon Sep 17 00:00:00 2001
+From: Nayan Deshmukh <nayan26deshmukh@gmail.com>
+Date: Wed, 16 May 2018 18:54:18 +0530
+Subject: [PATCH 4453/5725] drm/scheduler: fix function name prefix in comments
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+That got missed while moving the files outside of amdgpu.
+
+Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/scheduler/sched_fence.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
+index d799e63..b3d7b82 100644
+--- a/drivers/gpu/drm/scheduler/sched_fence.c
++++ b/drivers/gpu/drm/scheduler/sched_fence.c
+@@ -87,7 +87,7 @@ static bool drm_sched_fence_enable_signaling(struct dma_fence *f)
+ }
+
+ /**
+- * amd_sched_fence_free - free up the fence memory
++ * drm_sched_fence_free - free up the fence memory
+ *
+ * @rcu: RCU callback head
+ *
+@@ -103,7 +103,7 @@ static void drm_sched_fence_free(struct rcu_head *rcu)
+ }
+
+ /**
+- * amd_sched_fence_release_scheduled - callback that fence can be freed
++ * drm_sched_fence_release_scheduled - callback that fence can be freed
+ *
+ * @fence: fence
+ *
+@@ -118,7 +118,7 @@ static void drm_sched_fence_release_scheduled(struct dma_fence *f)
+ }
+
+ /**
+- * amd_sched_fence_release_finished - drop extra reference
++ * drm_sched_fence_release_finished - drop extra reference
+ *
+ * @f: fence
+ *
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4454-drm-amd-display-Cleanup-unused-SetPlaneConfig.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4454-drm-amd-display-Cleanup-unused-SetPlaneConfig.patch
new file mode 100644
index 00000000..b1bc3554
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4454-drm-amd-display-Cleanup-unused-SetPlaneConfig.patch
@@ -0,0 +1,148 @@
+From d70c2583735eb9e63155c67ec8cbc752a2234a4e Mon Sep 17 00:00:00 2001
+From: Anthony Koo <Anthony.Koo@amd.com>
+Date: Thu, 12 Apr 2018 22:40:02 -0400
+Subject: [PATCH 4454/5725] drm/amd/display: Cleanup unused SetPlaneConfig
+
+Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ .../amd/display/dc/dce110/dce110_hw_sequencer.c | 69 ----------------------
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 10 ----
+ drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 5 --
+ 3 files changed, 84 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 45578d4..8a61b10 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -2286,74 +2286,6 @@ static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
+
+ pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust);
+ }
+-
+-/**
+- * TODO REMOVE, USE UPDATE INSTEAD
+- */
+-static void set_plane_config(
+- const struct dc *dc,
+- struct pipe_ctx *pipe_ctx,
+- struct resource_context *res_ctx)
+-{
+- struct mem_input *mi = pipe_ctx->plane_res.mi;
+- struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+- struct xfm_grph_csc_adjustment adjust;
+- struct out_csc_color_matrix tbl_entry;
+- unsigned int i;
+-
+- memset(&adjust, 0, sizeof(adjust));
+- memset(&tbl_entry, 0, sizeof(tbl_entry));
+- adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+-
+- dce_enable_fe_clock(dc->hwseq, mi->inst, true);
+-
+- set_default_colors(pipe_ctx);
+- if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
+- tbl_entry.color_space =
+- pipe_ctx->stream->output_color_space;
+-
+- for (i = 0; i < 12; i++)
+- tbl_entry.regval[i] =
+- pipe_ctx->stream->csc_color_matrix.matrix[i];
+-
+- pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment
+- (pipe_ctx->plane_res.xfm, &tbl_entry);
+- }
+-
+- if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
+- adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+-
+- for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
+- adjust.temperature_matrix[i] =
+- pipe_ctx->stream->gamut_remap_matrix.matrix[i];
+- }
+-
+- pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust);
+-
+- pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
+- program_scaler(dc, pipe_ctx);
+-
+- program_surface_visibility(dc, pipe_ctx);
+-
+- mi->funcs->mem_input_program_surface_config(
+- mi,
+- plane_state->format,
+- &plane_state->tiling_info,
+- &plane_state->plane_size,
+- plane_state->rotation,
+- NULL,
+- false);
+- if (mi->funcs->set_blank)
+- mi->funcs->set_blank(mi, pipe_ctx->plane_state->visible);
+-
+- if (dc->config.gpu_vm_support)
+- mi->funcs->mem_input_program_pte_vm(
+- pipe_ctx->plane_res.mi,
+- plane_state->format,
+- &plane_state->tiling_info,
+- plane_state->rotation);
+-}
+-
+ static void update_plane_addr(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+ {
+@@ -3040,7 +2972,6 @@ static const struct hw_sequencer_funcs dce110_funcs = {
+ .init_hw = init_hw,
+ .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ .apply_ctx_for_surface = dce110_apply_ctx_for_surface,
+- .set_plane_config = set_plane_config,
+ .update_plane_addr = update_plane_addr,
+ .update_pending_status = dce110_update_pending_status,
+ .set_input_transfer_func = dce110_set_input_transfer_func,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index fe52cbc..b88e020 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2510,15 +2510,6 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ set_static_screen_control(pipe_ctx[i]->stream_res.tg, value);
+ }
+
+-static void set_plane_config(
+- const struct dc *dc,
+- struct pipe_ctx *pipe_ctx,
+- struct resource_context *res_ctx)
+-{
+- /* TODO */
+- program_gamut_remap(pipe_ctx);
+-}
+-
+ static void dcn10_config_stereo_parameters(
+ struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
+ {
+@@ -2696,7 +2687,6 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
+ .init_hw = dcn10_init_hw,
+ .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ .apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
+- .set_plane_config = set_plane_config,
+ .update_plane_addr = dcn10_update_plane_addr,
+ .update_dchub = dcn10_update_dchub,
+ .update_pending_status = dcn10_update_pending_status,
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+index 29abf3e..63fc6c4 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+@@ -83,11 +83,6 @@ struct hw_sequencer_funcs {
+ int num_planes,
+ struct dc_state *context);
+
+- void (*set_plane_config)(
+- const struct dc *dc,
+- struct pipe_ctx *pipe_ctx,
+- struct resource_context *res_ctx);
+-
+ void (*program_gamut_remap)(
+ struct pipe_ctx *pipe_ctx);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4455-drm-amd-display-get-rid-of-32.32-unsigned-fixed-poin.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4455-drm-amd-display-get-rid-of-32.32-unsigned-fixed-poin.patch
new file mode 100644
index 00000000..f6efc6db
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4455-drm-amd-display-get-rid-of-32.32-unsigned-fixed-poin.patch
@@ -0,0 +1,3536 @@
+From f8dd6c111ee12e09a00ca4ef0de5933421fb0ad2 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Wed, 18 Apr 2018 11:37:53 -0400
+Subject: [PATCH 4455/5725] drm/amd/display: get rid of 32.32 unsigned fixed
+ point
+
+32.32 is redundant, 31.32 does everything we use 32.32 for
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_color.c | 14 +-
+ drivers/gpu/drm/amd/display/dc/basics/Makefile | 2 +-
+ drivers/gpu/drm/amd/display/dc/basics/conversion.c | 28 +-
+ drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c | 176 ++++----
+ drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c | 161 --------
+ .../gpu/drm/amd/display/dc/calcs/custom_float.c | 46 +--
+ drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 8 +-
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 12 +-
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 68 ++--
+ drivers/gpu/drm/amd/display/dc/dc_dp_types.h | 2 +
+ drivers/gpu/drm/amd/display/dc/dc_types.h | 2 +-
+ drivers/gpu/drm/amd/display/dc/dce/dce_abm.c | 2 +-
+ .../gpu/drm/amd/display/dc/dce/dce_clock_source.c | 60 +--
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 26 +-
+ drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c | 6 +-
+ .../gpu/drm/amd/display/dc/dce/dce_scl_filters.c | 48 +--
+ .../drm/amd/display/dc/dce/dce_stream_encoder.c | 8 +-
+ drivers/gpu/drm/amd/display/dc/dce/dce_transform.c | 26 +-
+ .../amd/display/dc/dce110/dce110_hw_sequencer.c | 36 +-
+ .../drm/amd/display/dc/dce110/dce110_transform_v.c | 8 +-
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c | 86 ++--
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | 2 +-
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c | 6 +-
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c | 38 +-
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 4 +-
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 28 +-
+ .../amd/display/dc/dcn10/dcn10_stream_encoder.c | 8 +-
+ drivers/gpu/drm/amd/display/dc/irq_types.h | 2 +
+ drivers/gpu/drm/amd/display/include/fixed31_32.h | 118 +++---
+ drivers/gpu/drm/amd/display/include/fixed32_32.h | 129 ------
+ .../drm/amd/display/modules/color/color_gamma.c | 446 ++++++++++-----------
+ 32 files changed, 661 insertions(+), 947 deletions(-)
+ delete mode 100644 drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c
+ delete mode 100644 drivers/gpu/drm/amd/display/include/fixed32_32.h
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+index e3d90e9..b329393 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+@@ -88,9 +88,9 @@ static void __drm_lut_to_dc_gamma(struct drm_color_lut *lut,
+ g = drm_color_lut_extract(lut[i].green, 16);
+ b = drm_color_lut_extract(lut[i].blue, 16);
+
+- gamma->entries.red[i] = dal_fixed31_32_from_int(r);
+- gamma->entries.green[i] = dal_fixed31_32_from_int(g);
+- gamma->entries.blue[i] = dal_fixed31_32_from_int(b);
++ gamma->entries.red[i] = dc_fixpt_from_int(r);
++ gamma->entries.green[i] = dc_fixpt_from_int(g);
++ gamma->entries.blue[i] = dc_fixpt_from_int(b);
+ }
+ return;
+ }
+@@ -101,9 +101,9 @@ static void __drm_lut_to_dc_gamma(struct drm_color_lut *lut,
+ g = drm_color_lut_extract(lut[i].green, 16);
+ b = drm_color_lut_extract(lut[i].blue, 16);
+
+- gamma->entries.red[i] = dal_fixed31_32_from_fraction(r, MAX_DRM_LUT_VALUE);
+- gamma->entries.green[i] = dal_fixed31_32_from_fraction(g, MAX_DRM_LUT_VALUE);
+- gamma->entries.blue[i] = dal_fixed31_32_from_fraction(b, MAX_DRM_LUT_VALUE);
++ gamma->entries.red[i] = dc_fixpt_from_fraction(r, MAX_DRM_LUT_VALUE);
++ gamma->entries.green[i] = dc_fixpt_from_fraction(g, MAX_DRM_LUT_VALUE);
++ gamma->entries.blue[i] = dc_fixpt_from_fraction(b, MAX_DRM_LUT_VALUE);
+ }
+ }
+
+@@ -208,7 +208,7 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc)
+ for (i = 0; i < 12; i++) {
+ /* Skip 4th element */
+ if (i % 4 == 3) {
+- stream->gamut_remap_matrix.matrix[i] = dal_fixed31_32_zero;
++ stream->gamut_remap_matrix.matrix[i] = dc_fixpt_zero;
+ continue;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile
+index ddc7fab..fbf4dbf 100644
+--- a/drivers/gpu/drm/amd/display/dc/basics/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile
+@@ -3,7 +3,7 @@
+ # It provides the general basic services required by other DAL
+ # subcomponents.
+
+-BASICS = conversion.o fixpt31_32.o fixpt32_32.o \
++BASICS = conversion.o fixpt31_32.o \
+ logger.o log_helpers.o vector.o
+
+ AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
+diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
+index 3109649..50b47f1 100644
+--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c
++++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
+@@ -41,22 +41,22 @@ uint16_t fixed_point_to_int_frac(
+
+ uint16_t result;
+
+- uint16_t d = (uint16_t)dal_fixed31_32_floor(
+- dal_fixed31_32_abs(
++ uint16_t d = (uint16_t)dc_fixpt_floor(
++ dc_fixpt_abs(
+ arg));
+
+ if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor))
+- numerator = (uint16_t)dal_fixed31_32_round(
+- dal_fixed31_32_mul_int(
++ numerator = (uint16_t)dc_fixpt_round(
++ dc_fixpt_mul_int(
+ arg,
+ divisor));
+ else {
+- numerator = dal_fixed31_32_floor(
+- dal_fixed31_32_sub(
+- dal_fixed31_32_from_int(
++ numerator = dc_fixpt_floor(
++ dc_fixpt_sub(
++ dc_fixpt_from_int(
+ 1LL << integer_bits),
+- dal_fixed31_32_recip(
+- dal_fixed31_32_from_int(
++ dc_fixpt_recip(
++ dc_fixpt_from_int(
+ divisor))));
+ }
+
+@@ -66,8 +66,8 @@ uint16_t fixed_point_to_int_frac(
+ result = (uint16_t)(
+ (1 << (integer_bits + fractional_bits + 1)) + numerator);
+
+- if ((result != 0) && dal_fixed31_32_lt(
+- arg, dal_fixed31_32_zero))
++ if ((result != 0) && dc_fixpt_lt(
++ arg, dc_fixpt_zero))
+ result |= 1 << (integer_bits + fractional_bits);
+
+ return result;
+@@ -84,15 +84,15 @@ void convert_float_matrix(
+ uint32_t buffer_size)
+ {
+ const struct fixed31_32 min_2_13 =
+- dal_fixed31_32_from_fraction(S2D13_MIN, DIVIDER);
++ dc_fixpt_from_fraction(S2D13_MIN, DIVIDER);
+ const struct fixed31_32 max_2_13 =
+- dal_fixed31_32_from_fraction(S2D13_MAX, DIVIDER);
++ dc_fixpt_from_fraction(S2D13_MAX, DIVIDER);
+ uint32_t i;
+
+ for (i = 0; i < buffer_size; ++i) {
+ uint32_t reg_value =
+ fixed_point_to_int_frac(
+- dal_fixed31_32_clamp(
++ dc_fixpt_clamp(
+ flt[i],
+ min_2_13,
+ max_2_13),
+diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+index 7191c32..e398ecd 100644
+--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
++++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+@@ -64,7 +64,7 @@ static inline unsigned long long complete_integer_division_u64(
+ #define GET_FRACTIONAL_PART(x) \
+ (FRACTIONAL_PART_MASK & (x))
+
+-struct fixed31_32 dal_fixed31_32_from_fraction(
++struct fixed31_32 dc_fixpt_from_fraction(
+ long long numerator,
+ long long denominator)
+ {
+@@ -118,7 +118,7 @@ struct fixed31_32 dal_fixed31_32_from_fraction(
+ return res;
+ }
+
+-struct fixed31_32 dal_fixed31_32_from_int_nonconst(
++struct fixed31_32 dc_fixpt_from_int_nonconst(
+ long long arg)
+ {
+ struct fixed31_32 res;
+@@ -130,7 +130,7 @@ struct fixed31_32 dal_fixed31_32_from_int_nonconst(
+ return res;
+ }
+
+-struct fixed31_32 dal_fixed31_32_shl(
++struct fixed31_32 dc_fixpt_shl(
+ struct fixed31_32 arg,
+ unsigned char shift)
+ {
+@@ -144,7 +144,7 @@ struct fixed31_32 dal_fixed31_32_shl(
+ return res;
+ }
+
+-struct fixed31_32 dal_fixed31_32_add(
++struct fixed31_32 dc_fixpt_add(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+ {
+@@ -158,7 +158,7 @@ struct fixed31_32 dal_fixed31_32_add(
+ return res;
+ }
+
+-struct fixed31_32 dal_fixed31_32_sub(
++struct fixed31_32 dc_fixpt_sub(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+ {
+@@ -172,7 +172,7 @@ struct fixed31_32 dal_fixed31_32_sub(
+ return res;
+ }
+
+-struct fixed31_32 dal_fixed31_32_mul(
++struct fixed31_32 dc_fixpt_mul(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+ {
+@@ -213,7 +213,7 @@ struct fixed31_32 dal_fixed31_32_mul(
+ tmp = arg1_fra * arg2_fra;
+
+ tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
+- (tmp >= (unsigned long long)dal_fixed31_32_half.value);
++ (tmp >= (unsigned long long)dc_fixpt_half.value);
+
+ ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+
+@@ -225,7 +225,7 @@ struct fixed31_32 dal_fixed31_32_mul(
+ return res;
+ }
+
+-struct fixed31_32 dal_fixed31_32_sqr(
++struct fixed31_32 dc_fixpt_sqr(
+ struct fixed31_32 arg)
+ {
+ struct fixed31_32 res;
+@@ -257,7 +257,7 @@ struct fixed31_32 dal_fixed31_32_sqr(
+ tmp = arg_fra * arg_fra;
+
+ tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
+- (tmp >= (unsigned long long)dal_fixed31_32_half.value);
++ (tmp >= (unsigned long long)dc_fixpt_half.value);
+
+ ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+
+@@ -266,7 +266,7 @@ struct fixed31_32 dal_fixed31_32_sqr(
+ return res;
+ }
+
+-struct fixed31_32 dal_fixed31_32_recip(
++struct fixed31_32 dc_fixpt_recip(
+ struct fixed31_32 arg)
+ {
+ /*
+@@ -276,41 +276,41 @@ struct fixed31_32 dal_fixed31_32_recip(
+
+ ASSERT(arg.value);
+
+- return dal_fixed31_32_from_fraction(
+- dal_fixed31_32_one.value,
++ return dc_fixpt_from_fraction(
++ dc_fixpt_one.value,
+ arg.value);
+ }
+
+-struct fixed31_32 dal_fixed31_32_sinc(
++struct fixed31_32 dc_fixpt_sinc(
+ struct fixed31_32 arg)
+ {
+ struct fixed31_32 square;
+
+- struct fixed31_32 res = dal_fixed31_32_one;
++ struct fixed31_32 res = dc_fixpt_one;
+
+ int n = 27;
+
+ struct fixed31_32 arg_norm = arg;
+
+- if (dal_fixed31_32_le(
+- dal_fixed31_32_two_pi,
+- dal_fixed31_32_abs(arg))) {
+- arg_norm = dal_fixed31_32_sub(
++ if (dc_fixpt_le(
++ dc_fixpt_two_pi,
++ dc_fixpt_abs(arg))) {
++ arg_norm = dc_fixpt_sub(
+ arg_norm,
+- dal_fixed31_32_mul_int(
+- dal_fixed31_32_two_pi,
++ dc_fixpt_mul_int(
++ dc_fixpt_two_pi,
+ (int)div64_s64(
+ arg_norm.value,
+- dal_fixed31_32_two_pi.value)));
++ dc_fixpt_two_pi.value)));
+ }
+
+- square = dal_fixed31_32_sqr(arg_norm);
++ square = dc_fixpt_sqr(arg_norm);
+
+ do {
+- res = dal_fixed31_32_sub(
+- dal_fixed31_32_one,
+- dal_fixed31_32_div_int(
+- dal_fixed31_32_mul(
++ res = dc_fixpt_sub(
++ dc_fixpt_one,
++ dc_fixpt_div_int(
++ dc_fixpt_mul(
+ square,
+ res),
+ n * (n - 1)));
+@@ -319,37 +319,37 @@ struct fixed31_32 dal_fixed31_32_sinc(
+ } while (n > 2);
+
+ if (arg.value != arg_norm.value)
+- res = dal_fixed31_32_div(
+- dal_fixed31_32_mul(res, arg_norm),
++ res = dc_fixpt_div(
++ dc_fixpt_mul(res, arg_norm),
+ arg);
+
+ return res;
+ }
+
+-struct fixed31_32 dal_fixed31_32_sin(
++struct fixed31_32 dc_fixpt_sin(
+ struct fixed31_32 arg)
+ {
+- return dal_fixed31_32_mul(
++ return dc_fixpt_mul(
+ arg,
+- dal_fixed31_32_sinc(arg));
++ dc_fixpt_sinc(arg));
+ }
+
+-struct fixed31_32 dal_fixed31_32_cos(
++struct fixed31_32 dc_fixpt_cos(
+ struct fixed31_32 arg)
+ {
+ /* TODO implement argument normalization */
+
+- const struct fixed31_32 square = dal_fixed31_32_sqr(arg);
++ const struct fixed31_32 square = dc_fixpt_sqr(arg);
+
+- struct fixed31_32 res = dal_fixed31_32_one;
++ struct fixed31_32 res = dc_fixpt_one;
+
+ int n = 26;
+
+ do {
+- res = dal_fixed31_32_sub(
+- dal_fixed31_32_one,
+- dal_fixed31_32_div_int(
+- dal_fixed31_32_mul(
++ res = dc_fixpt_sub(
++ dc_fixpt_one,
++ dc_fixpt_div_int(
++ dc_fixpt_mul(
+ square,
+ res),
+ n * (n - 1)));
+@@ -372,31 +372,31 @@ static struct fixed31_32 fixed31_32_exp_from_taylor_series(
+ {
+ unsigned int n = 9;
+
+- struct fixed31_32 res = dal_fixed31_32_from_fraction(
++ struct fixed31_32 res = dc_fixpt_from_fraction(
+ n + 2,
+ n + 1);
+ /* TODO find correct res */
+
+- ASSERT(dal_fixed31_32_lt(arg, dal_fixed31_32_one));
++ ASSERT(dc_fixpt_lt(arg, dc_fixpt_one));
+
+ do
+- res = dal_fixed31_32_add(
+- dal_fixed31_32_one,
+- dal_fixed31_32_div_int(
+- dal_fixed31_32_mul(
++ res = dc_fixpt_add(
++ dc_fixpt_one,
++ dc_fixpt_div_int(
++ dc_fixpt_mul(
+ arg,
+ res),
+ n));
+ while (--n != 1);
+
+- return dal_fixed31_32_add(
+- dal_fixed31_32_one,
+- dal_fixed31_32_mul(
++ return dc_fixpt_add(
++ dc_fixpt_one,
++ dc_fixpt_mul(
+ arg,
+ res));
+ }
+
+-struct fixed31_32 dal_fixed31_32_exp(
++struct fixed31_32 dc_fixpt_exp(
+ struct fixed31_32 arg)
+ {
+ /*
+@@ -406,44 +406,44 @@ struct fixed31_32 dal_fixed31_32_exp(
+ * where m = round(x / ln(2)), r = x - m * ln(2)
+ */
+
+- if (dal_fixed31_32_le(
+- dal_fixed31_32_ln2_div_2,
+- dal_fixed31_32_abs(arg))) {
+- int m = dal_fixed31_32_round(
+- dal_fixed31_32_div(
++ if (dc_fixpt_le(
++ dc_fixpt_ln2_div_2,
++ dc_fixpt_abs(arg))) {
++ int m = dc_fixpt_round(
++ dc_fixpt_div(
+ arg,
+- dal_fixed31_32_ln2));
++ dc_fixpt_ln2));
+
+- struct fixed31_32 r = dal_fixed31_32_sub(
++ struct fixed31_32 r = dc_fixpt_sub(
+ arg,
+- dal_fixed31_32_mul_int(
+- dal_fixed31_32_ln2,
++ dc_fixpt_mul_int(
++ dc_fixpt_ln2,
+ m));
+
+ ASSERT(m != 0);
+
+- ASSERT(dal_fixed31_32_lt(
+- dal_fixed31_32_abs(r),
+- dal_fixed31_32_one));
++ ASSERT(dc_fixpt_lt(
++ dc_fixpt_abs(r),
++ dc_fixpt_one));
+
+ if (m > 0)
+- return dal_fixed31_32_shl(
++ return dc_fixpt_shl(
+ fixed31_32_exp_from_taylor_series(r),
+ (unsigned char)m);
+ else
+- return dal_fixed31_32_div_int(
++ return dc_fixpt_div_int(
+ fixed31_32_exp_from_taylor_series(r),
+ 1LL << -m);
+ } else if (arg.value != 0)
+ return fixed31_32_exp_from_taylor_series(arg);
+ else
+- return dal_fixed31_32_one;
++ return dc_fixpt_one;
+ }
+
+-struct fixed31_32 dal_fixed31_32_log(
++struct fixed31_32 dc_fixpt_log(
+ struct fixed31_32 arg)
+ {
+- struct fixed31_32 res = dal_fixed31_32_neg(dal_fixed31_32_one);
++ struct fixed31_32 res = dc_fixpt_neg(dc_fixpt_one);
+ /* TODO improve 1st estimation */
+
+ struct fixed31_32 error;
+@@ -453,15 +453,15 @@ struct fixed31_32 dal_fixed31_32_log(
+ /* TODO if arg is zero, return -INF */
+
+ do {
+- struct fixed31_32 res1 = dal_fixed31_32_add(
+- dal_fixed31_32_sub(
++ struct fixed31_32 res1 = dc_fixpt_add(
++ dc_fixpt_sub(
+ res,
+- dal_fixed31_32_one),
+- dal_fixed31_32_div(
++ dc_fixpt_one),
++ dc_fixpt_div(
+ arg,
+- dal_fixed31_32_exp(res)));
++ dc_fixpt_exp(res)));
+
+- error = dal_fixed31_32_sub(
++ error = dc_fixpt_sub(
+ res,
+ res1);
+
+@@ -472,17 +472,17 @@ struct fixed31_32 dal_fixed31_32_log(
+ return res;
+ }
+
+-struct fixed31_32 dal_fixed31_32_pow(
++struct fixed31_32 dc_fixpt_pow(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+ {
+- return dal_fixed31_32_exp(
+- dal_fixed31_32_mul(
+- dal_fixed31_32_log(arg1),
++ return dc_fixpt_exp(
++ dc_fixpt_mul(
++ dc_fixpt_log(arg1),
+ arg2));
+ }
+
+-int dal_fixed31_32_floor(
++int dc_fixpt_floor(
+ struct fixed31_32 arg)
+ {
+ unsigned long long arg_value = abs_i64(arg.value);
+@@ -493,12 +493,12 @@ int dal_fixed31_32_floor(
+ return -(int)GET_INTEGER_PART(arg_value);
+ }
+
+-int dal_fixed31_32_round(
++int dc_fixpt_round(
+ struct fixed31_32 arg)
+ {
+ unsigned long long arg_value = abs_i64(arg.value);
+
+- const long long summand = dal_fixed31_32_half.value;
++ const long long summand = dc_fixpt_half.value;
+
+ ASSERT(LLONG_MAX - (long long)arg_value >= summand);
+
+@@ -510,13 +510,13 @@ int dal_fixed31_32_round(
+ return -(int)GET_INTEGER_PART(arg_value);
+ }
+
+-int dal_fixed31_32_ceil(
++int dc_fixpt_ceil(
+ struct fixed31_32 arg)
+ {
+ unsigned long long arg_value = abs_i64(arg.value);
+
+- const long long summand = dal_fixed31_32_one.value -
+- dal_fixed31_32_epsilon.value;
++ const long long summand = dc_fixpt_one.value -
++ dc_fixpt_epsilon.value;
+
+ ASSERT(LLONG_MAX - (long long)arg_value >= summand);
+
+@@ -531,7 +531,7 @@ int dal_fixed31_32_ceil(
+ /* this function is a generic helper to translate fixed point value to
+ * specified integer format that will consist of integer_bits integer part and
+ * fractional_bits fractional part. For example it is used in
+- * dal_fixed31_32_u2d19 to receive 2 bits integer part and 19 bits fractional
++ * dc_fixpt_u2d19 to receive 2 bits integer part and 19 bits fractional
+ * part in 32 bits. It is used in hw programming (scaler)
+ */
+
+@@ -570,35 +570,35 @@ static inline unsigned int clamp_ux_dy(
+ return min_clamp;
+ }
+
+-unsigned int dal_fixed31_32_u2d19(
++unsigned int dc_fixpt_u2d19(
+ struct fixed31_32 arg)
+ {
+ return ux_dy(arg.value, 2, 19);
+ }
+
+-unsigned int dal_fixed31_32_u0d19(
++unsigned int dc_fixpt_u0d19(
+ struct fixed31_32 arg)
+ {
+ return ux_dy(arg.value, 0, 19);
+ }
+
+-unsigned int dal_fixed31_32_clamp_u0d14(
++unsigned int dc_fixpt_clamp_u0d14(
+ struct fixed31_32 arg)
+ {
+ return clamp_ux_dy(arg.value, 0, 14, 1);
+ }
+
+-unsigned int dal_fixed31_32_clamp_u0d10(
++unsigned int dc_fixpt_clamp_u0d10(
+ struct fixed31_32 arg)
+ {
+ return clamp_ux_dy(arg.value, 0, 10, 1);
+ }
+
+-int dal_fixed31_32_s4d19(
++int dc_fixpt_s4d19(
+ struct fixed31_32 arg)
+ {
+ if (arg.value < 0)
+- return -(int)ux_dy(dal_fixed31_32_abs(arg).value, 4, 19);
++ return -(int)ux_dy(dc_fixpt_abs(arg).value, 4, 19);
+ else
+ return ux_dy(arg.value, 4, 19);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c
+deleted file mode 100644
+index 4d3aaa8..0000000
+--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c
++++ /dev/null
+@@ -1,161 +0,0 @@
+-/*
+- * Copyright 2012-15 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- * Authors: AMD
+- *
+- */
+-
+-#include "dm_services.h"
+-#include "include/fixed32_32.h"
+-
+-static uint64_t u64_div(uint64_t n, uint64_t d)
+-{
+- uint32_t i = 0;
+- uint64_t r;
+- uint64_t q = div64_u64_rem(n, d, &r);
+-
+- for (i = 0; i < 32; ++i) {
+- uint64_t sbit = q & (1ULL<<63);
+-
+- r <<= 1;
+- r |= sbit ? 1 : 0;
+- q <<= 1;
+- if (r >= d) {
+- r -= d;
+- q |= 1;
+- }
+- }
+-
+- if (2*r >= d)
+- q += 1;
+- return q;
+-}
+-
+-struct fixed32_32 dal_fixed32_32_from_fraction(uint32_t n, uint32_t d)
+-{
+- struct fixed32_32 fx;
+-
+- fx.value = u64_div((uint64_t)n << 32, (uint64_t)d << 32);
+- return fx;
+-}
+-
+-struct fixed32_32 dal_fixed32_32_add(
+- struct fixed32_32 lhs,
+- struct fixed32_32 rhs)
+-{
+- struct fixed32_32 fx = {lhs.value + rhs.value};
+-
+- ASSERT(fx.value >= rhs.value);
+- return fx;
+-}
+-
+-struct fixed32_32 dal_fixed32_32_add_int(struct fixed32_32 lhs, uint32_t rhs)
+-{
+- struct fixed32_32 fx = {lhs.value + ((uint64_t)rhs << 32)};
+-
+- ASSERT(fx.value >= (uint64_t)rhs << 32);
+- return fx;
+-
+-}
+-struct fixed32_32 dal_fixed32_32_sub(
+- struct fixed32_32 lhs,
+- struct fixed32_32 rhs)
+-{
+- struct fixed32_32 fx;
+-
+- ASSERT(lhs.value >= rhs.value);
+- fx.value = lhs.value - rhs.value;
+- return fx;
+-}
+-
+-struct fixed32_32 dal_fixed32_32_sub_int(struct fixed32_32 lhs, uint32_t rhs)
+-{
+- struct fixed32_32 fx;
+-
+- ASSERT(lhs.value >= ((uint64_t)rhs<<32));
+- fx.value = lhs.value - ((uint64_t)rhs<<32);
+- return fx;
+-}
+-
+-struct fixed32_32 dal_fixed32_32_mul(
+- struct fixed32_32 lhs,
+- struct fixed32_32 rhs)
+-{
+- struct fixed32_32 fx;
+- uint64_t lhs_int = lhs.value>>32;
+- uint64_t lhs_frac = (uint32_t)lhs.value;
+- uint64_t rhs_int = rhs.value>>32;
+- uint64_t rhs_frac = (uint32_t)rhs.value;
+- uint64_t ahbh = lhs_int * rhs_int;
+- uint64_t ahbl = lhs_int * rhs_frac;
+- uint64_t albh = lhs_frac * rhs_int;
+- uint64_t albl = lhs_frac * rhs_frac;
+-
+- ASSERT((ahbh>>32) == 0);
+-
+- fx.value = (ahbh<<32) + ahbl + albh + (albl>>32);
+- return fx;
+-
+-}
+-
+-struct fixed32_32 dal_fixed32_32_mul_int(struct fixed32_32 lhs, uint32_t rhs)
+-{
+- struct fixed32_32 fx;
+- uint64_t lhsi = (lhs.value>>32) * (uint64_t)rhs;
+- uint64_t lhsf;
+-
+- ASSERT((lhsi>>32) == 0);
+- lhsf = ((uint32_t)lhs.value) * (uint64_t)rhs;
+- ASSERT((lhsi<<32) + lhsf >= lhsf);
+- fx.value = (lhsi<<32) + lhsf;
+- return fx;
+-}
+-
+-struct fixed32_32 dal_fixed32_32_div(
+- struct fixed32_32 lhs,
+- struct fixed32_32 rhs)
+-{
+- struct fixed32_32 fx;
+-
+- fx.value = u64_div(lhs.value, rhs.value);
+- return fx;
+-}
+-
+-struct fixed32_32 dal_fixed32_32_div_int(struct fixed32_32 lhs, uint32_t rhs)
+-{
+- struct fixed32_32 fx;
+-
+- fx.value = u64_div(lhs.value, (uint64_t)rhs << 32);
+- return fx;
+-}
+-
+-uint32_t dal_fixed32_32_ceil(struct fixed32_32 v)
+-{
+- ASSERT((uint32_t)v.value ? (v.value >> 32) + 1 >= 1 : true);
+- return (v.value>>32) + ((uint32_t)v.value ? 1 : 0);
+-}
+-
+-uint32_t dal_fixed32_32_round(struct fixed32_32 v)
+-{
+- ASSERT(v.value + (1ULL<<31) >= (1ULL<<31));
+- return (v.value + (1ULL<<31))>>32;
+-}
+-
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/custom_float.c b/drivers/gpu/drm/amd/display/dc/calcs/custom_float.c
+index 7243c37..31d167b 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/custom_float.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/custom_float.c
+@@ -36,41 +36,41 @@ static bool build_custom_float(
+ uint32_t exp_offset = (1 << (format->exponenta_bits - 1)) - 1;
+
+ const struct fixed31_32 mantissa_constant_plus_max_fraction =
+- dal_fixed31_32_from_fraction(
++ dc_fixpt_from_fraction(
+ (1LL << (format->mantissa_bits + 1)) - 1,
+ 1LL << format->mantissa_bits);
+
+ struct fixed31_32 mantiss;
+
+- if (dal_fixed31_32_eq(
++ if (dc_fixpt_eq(
+ value,
+- dal_fixed31_32_zero)) {
++ dc_fixpt_zero)) {
+ *negative = false;
+ *mantissa = 0;
+ *exponenta = 0;
+ return true;
+ }
+
+- if (dal_fixed31_32_lt(
++ if (dc_fixpt_lt(
+ value,
+- dal_fixed31_32_zero)) {
++ dc_fixpt_zero)) {
+ *negative = format->sign;
+- value = dal_fixed31_32_neg(value);
++ value = dc_fixpt_neg(value);
+ } else {
+ *negative = false;
+ }
+
+- if (dal_fixed31_32_lt(
++ if (dc_fixpt_lt(
+ value,
+- dal_fixed31_32_one)) {
++ dc_fixpt_one)) {
+ uint32_t i = 1;
+
+ do {
+- value = dal_fixed31_32_shl(value, 1);
++ value = dc_fixpt_shl(value, 1);
+ ++i;
+- } while (dal_fixed31_32_lt(
++ } while (dc_fixpt_lt(
+ value,
+- dal_fixed31_32_one));
++ dc_fixpt_one));
+
+ --i;
+
+@@ -81,15 +81,15 @@ static bool build_custom_float(
+ }
+
+ *exponenta = exp_offset - i;
+- } else if (dal_fixed31_32_le(
++ } else if (dc_fixpt_le(
+ mantissa_constant_plus_max_fraction,
+ value)) {
+ uint32_t i = 1;
+
+ do {
+- value = dal_fixed31_32_shr(value, 1);
++ value = dc_fixpt_shr(value, 1);
+ ++i;
+- } while (dal_fixed31_32_lt(
++ } while (dc_fixpt_lt(
+ mantissa_constant_plus_max_fraction,
+ value));
+
+@@ -98,23 +98,23 @@ static bool build_custom_float(
+ *exponenta = exp_offset;
+ }
+
+- mantiss = dal_fixed31_32_sub(
++ mantiss = dc_fixpt_sub(
+ value,
+- dal_fixed31_32_one);
++ dc_fixpt_one);
+
+- if (dal_fixed31_32_lt(
++ if (dc_fixpt_lt(
+ mantiss,
+- dal_fixed31_32_zero) ||
+- dal_fixed31_32_lt(
+- dal_fixed31_32_one,
++ dc_fixpt_zero) ||
++ dc_fixpt_lt(
++ dc_fixpt_one,
+ mantiss))
+- mantiss = dal_fixed31_32_zero;
++ mantiss = dc_fixpt_zero;
+ else
+- mantiss = dal_fixed31_32_shl(
++ mantiss = dc_fixpt_shl(
+ mantiss,
+ format->mantissa_bits);
+
+- *mantissa = dal_fixed31_32_floor(mantiss);
++ *mantissa = dc_fixpt_floor(mantiss);
+
+ return true;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+index a102c19..49a4ea4 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+@@ -873,14 +873,14 @@ bool dcn_validate_bandwidth(
+ }
+
+ if (pipe->plane_state->rotation % 2 == 0) {
+- ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dal_fixed31_32_one.value
++ ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dc_fixpt_one.value
+ || v->scaler_rec_out_width[input_idx] == v->viewport_width[input_idx]);
+- ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dal_fixed31_32_one.value
++ ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dc_fixpt_one.value
+ || v->scaler_recout_height[input_idx] == v->viewport_height[input_idx]);
+ } else {
+- ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dal_fixed31_32_one.value
++ ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dc_fixpt_one.value
+ || v->scaler_recout_height[input_idx] == v->viewport_width[input_idx]);
+- ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dal_fixed31_32_one.value
++ ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dc_fixpt_one.value
+ || v->scaler_rec_out_width[input_idx] == v->viewport_height[input_idx]);
+ }
+ v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index b44cf52..ea5d5ff 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -631,7 +631,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+ /* Need to setup mst link_cap struct here
+ * otherwise dc_link_detect() will leave mst link_cap
+ * empty which leads to allocate_mst_payload() has "0"
+- * pbn_per_slot value leading to exception on dal_fixed31_32_div()
++ * pbn_per_slot value leading to exception on dc_fixpt_div()
+ */
+ link->verified_link_cap = link->reported_link_cap;
+ return false;
+@@ -2059,10 +2059,10 @@ static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
+ &stream->sink->link->cur_link_settings;
+ uint32_t link_rate_in_mbps =
+ link_settings->link_rate * LINK_RATE_REF_FREQ_IN_MHZ;
+- struct fixed31_32 mbps = dal_fixed31_32_from_int(
++ struct fixed31_32 mbps = dc_fixpt_from_int(
+ link_rate_in_mbps * link_settings->lane_count);
+
+- return dal_fixed31_32_div_int(mbps, 54);
++ return dc_fixpt_div_int(mbps, 54);
+ }
+
+ static int get_color_depth(enum dc_color_depth color_depth)
+@@ -2103,7 +2103,7 @@ static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
+ numerator = 64 * PEAK_FACTOR_X1000;
+ denominator = 54 * 8 * 1000 * 1000;
+ kbps *= numerator;
+- peak_kbps = dal_fixed31_32_from_fraction(kbps, denominator);
++ peak_kbps = dc_fixpt_from_fraction(kbps, denominator);
+
+ return peak_kbps;
+ }
+@@ -2230,7 +2230,7 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
+ /* slot X.Y for only current stream */
+ pbn_per_slot = get_pbn_per_slot(stream);
+ pbn = get_pbn_from_timing(pipe_ctx);
+- avg_time_slots_per_mtp = dal_fixed31_32_div(pbn, pbn_per_slot);
++ avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
+
+ stream_encoder->funcs->set_mst_bandwidth(
+ stream_encoder,
+@@ -2247,7 +2247,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
+ struct link_encoder *link_encoder = link->link_enc;
+ struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
+ struct dp_mst_stream_allocation_table proposed_table = {0};
+- struct fixed31_32 avg_time_slots_per_mtp = dal_fixed31_32_from_int(0);
++ struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
+ uint8_t i;
+ bool mst_mode = (link->type == dc_connection_mst_branch);
+ DC_LOGGER_INIT(link->ctx->logger);
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index db4fdf6..68c1a99 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -495,9 +495,9 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
+ data->viewport_c.x = data->viewport.x / vpc_div;
+ data->viewport_c.y = data->viewport.y / vpc_div;
+ data->inits.h_c = (data->viewport.x % vpc_div) != 0 ?
+- dal_fixed31_32_half : dal_fixed31_32_zero;
++ dc_fixpt_half : dc_fixpt_zero;
+ data->inits.v_c = (data->viewport.y % vpc_div) != 0 ?
+- dal_fixed31_32_half : dal_fixed31_32_zero;
++ dc_fixpt_half : dc_fixpt_zero;
+ /* Round up, assume original video size always even dimensions */
+ data->viewport_c.width = (data->viewport.width + vpc_div - 1) / vpc_div;
+ data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div;
+@@ -626,10 +626,10 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
+ pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
+ rect_swap_helper(&surf_src);
+
+- pipe_ctx->plane_res.scl_data.ratios.horz = dal_fixed31_32_from_fraction(
++ pipe_ctx->plane_res.scl_data.ratios.horz = dc_fixpt_from_fraction(
+ surf_src.width,
+ plane_state->dst_rect.width);
+- pipe_ctx->plane_res.scl_data.ratios.vert = dal_fixed31_32_from_fraction(
++ pipe_ctx->plane_res.scl_data.ratios.vert = dc_fixpt_from_fraction(
+ surf_src.height,
+ plane_state->dst_rect.height);
+
+@@ -687,32 +687,32 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
+ * init_bot = init + scaling_ratio
+ * init_c = init + truncated_vp_c_offset(from calculate viewport)
+ */
+- data->inits.h = dal_fixed31_32_div_int(
+- dal_fixed31_32_add_int(data->ratios.horz, data->taps.h_taps + 1), 2);
++ data->inits.h = dc_fixpt_div_int(
++ dc_fixpt_add_int(data->ratios.horz, data->taps.h_taps + 1), 2);
+
+- data->inits.h_c = dal_fixed31_32_add(data->inits.h_c, dal_fixed31_32_div_int(
+- dal_fixed31_32_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2));
++ data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_div_int(
++ dc_fixpt_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2));
+
+- data->inits.v = dal_fixed31_32_div_int(
+- dal_fixed31_32_add_int(data->ratios.vert, data->taps.v_taps + 1), 2);
++ data->inits.v = dc_fixpt_div_int(
++ dc_fixpt_add_int(data->ratios.vert, data->taps.v_taps + 1), 2);
+
+- data->inits.v_c = dal_fixed31_32_add(data->inits.v_c, dal_fixed31_32_div_int(
+- dal_fixed31_32_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2));
++ data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int(
++ dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2));
+
+
+ /* Adjust for viewport end clip-off */
+ if ((data->viewport.x + data->viewport.width) < (src.x + src.width) && !flip_horz_scan_dir) {
+ int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x;
+- int int_part = dal_fixed31_32_floor(
+- dal_fixed31_32_sub(data->inits.h, data->ratios.horz));
++ int int_part = dc_fixpt_floor(
++ dc_fixpt_sub(data->inits.h, data->ratios.horz));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport.width += int_part < vp_clip ? int_part : vp_clip;
+ }
+ if ((data->viewport.y + data->viewport.height) < (src.y + src.height) && !flip_vert_scan_dir) {
+ int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y;
+- int int_part = dal_fixed31_32_floor(
+- dal_fixed31_32_sub(data->inits.v, data->ratios.vert));
++ int int_part = dc_fixpt_floor(
++ dc_fixpt_sub(data->inits.v, data->ratios.vert));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport.height += int_part < vp_clip ? int_part : vp_clip;
+@@ -720,8 +720,8 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
+ if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div && !flip_horz_scan_dir) {
+ int vp_clip = (src.x + src.width) / vpc_div -
+ data->viewport_c.width - data->viewport_c.x;
+- int int_part = dal_fixed31_32_floor(
+- dal_fixed31_32_sub(data->inits.h_c, data->ratios.horz_c));
++ int int_part = dc_fixpt_floor(
++ dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip;
+@@ -729,8 +729,8 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
+ if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div && !flip_vert_scan_dir) {
+ int vp_clip = (src.y + src.height) / vpc_div -
+ data->viewport_c.height - data->viewport_c.y;
+- int int_part = dal_fixed31_32_floor(
+- dal_fixed31_32_sub(data->inits.v_c, data->ratios.vert_c));
++ int int_part = dc_fixpt_floor(
++ dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip;
+@@ -740,9 +740,9 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
+ if (data->viewport.x && !flip_horz_scan_dir) {
+ int int_part;
+
+- data->inits.h = dal_fixed31_32_add(data->inits.h, dal_fixed31_32_mul_int(
++ data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
+ data->ratios.horz, recout_skip->width));
+- int_part = dal_fixed31_32_floor(data->inits.h) - data->viewport.x;
++ int_part = dc_fixpt_floor(data->inits.h) - data->viewport.x;
+ if (int_part < data->taps.h_taps) {
+ int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ?
+ (data->taps.h_taps - int_part) : data->viewport.x;
+@@ -755,15 +755,15 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
+ int_part = data->taps.h_taps;
+ }
+ data->inits.h.value &= 0xffffffff;
+- data->inits.h = dal_fixed31_32_add_int(data->inits.h, int_part);
++ data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
+ }
+
+ if (data->viewport_c.x && !flip_horz_scan_dir) {
+ int int_part;
+
+- data->inits.h_c = dal_fixed31_32_add(data->inits.h_c, dal_fixed31_32_mul_int(
++ data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
+ data->ratios.horz_c, recout_skip->width));
+- int_part = dal_fixed31_32_floor(data->inits.h_c) - data->viewport_c.x;
++ int_part = dc_fixpt_floor(data->inits.h_c) - data->viewport_c.x;
+ if (int_part < data->taps.h_taps_c) {
+ int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ?
+ (data->taps.h_taps_c - int_part) : data->viewport_c.x;
+@@ -776,15 +776,15 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
+ int_part = data->taps.h_taps_c;
+ }
+ data->inits.h_c.value &= 0xffffffff;
+- data->inits.h_c = dal_fixed31_32_add_int(data->inits.h_c, int_part);
++ data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
+ }
+
+ if (data->viewport.y && !flip_vert_scan_dir) {
+ int int_part;
+
+- data->inits.v = dal_fixed31_32_add(data->inits.v, dal_fixed31_32_mul_int(
++ data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
+ data->ratios.vert, recout_skip->height));
+- int_part = dal_fixed31_32_floor(data->inits.v) - data->viewport.y;
++ int_part = dc_fixpt_floor(data->inits.v) - data->viewport.y;
+ if (int_part < data->taps.v_taps) {
+ int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ?
+ (data->taps.v_taps - int_part) : data->viewport.y;
+@@ -797,15 +797,15 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
+ int_part = data->taps.v_taps;
+ }
+ data->inits.v.value &= 0xffffffff;
+- data->inits.v = dal_fixed31_32_add_int(data->inits.v, int_part);
++ data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
+ }
+
+ if (data->viewport_c.y && !flip_vert_scan_dir) {
+ int int_part;
+
+- data->inits.v_c = dal_fixed31_32_add(data->inits.v_c, dal_fixed31_32_mul_int(
++ data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
+ data->ratios.vert_c, recout_skip->height));
+- int_part = dal_fixed31_32_floor(data->inits.v_c) - data->viewport_c.y;
++ int_part = dc_fixpt_floor(data->inits.v_c) - data->viewport_c.y;
+ if (int_part < data->taps.v_taps_c) {
+ int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ?
+ (data->taps.v_taps_c - int_part) : data->viewport_c.y;
+@@ -818,12 +818,12 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
+ int_part = data->taps.v_taps_c;
+ }
+ data->inits.v_c.value &= 0xffffffff;
+- data->inits.v_c = dal_fixed31_32_add_int(data->inits.v_c, int_part);
++ data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
+ }
+
+ /* Interlaced inits based on final vert inits */
+- data->inits.v_bot = dal_fixed31_32_add(data->inits.v, data->ratios.vert);
+- data->inits.v_c_bot = dal_fixed31_32_add(data->inits.v_c, data->ratios.vert_c);
++ data->inits.v_bot = dc_fixpt_add(data->inits.v, data->ratios.vert);
++ data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c);
+
+ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
+ pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+index 2726b02..90bccd5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+@@ -26,6 +26,8 @@
+ #ifndef DC_DP_TYPES_H
+ #define DC_DP_TYPES_H
+
++#include "os_types.h"
++
+ enum dc_lane_count {
+ LANE_COUNT_UNKNOWN = 0,
+ LANE_COUNT_ONE = 1,
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
+index 5e041b7..f530871 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
+@@ -25,7 +25,7 @@
+ #ifndef DC_TYPES_H_
+ #define DC_TYPES_H_
+
+-#include "fixed32_32.h"
++#include "os_types.h"
+ #include "fixed31_32.h"
+ #include "irq_types.h"
+ #include "dc_dp_types.h"
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+index fe92a12..29294db 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+@@ -26,7 +26,7 @@
+ #include "dce_abm.h"
+ #include "dm_services.h"
+ #include "reg_helper.h"
+-#include "fixed32_32.h"
++#include "fixed31_32.h"
+ #include "dc.h"
+
+ #include "atom.h"
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+index 0570e7e..599c7ab 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+@@ -657,12 +657,12 @@ static uint32_t dce110_get_d_to_pixel_rate_in_hz(
+ return 0;
+ }
+
+- pix_rate = dal_fixed31_32_from_int(clk_src->ref_freq_khz);
+- pix_rate = dal_fixed31_32_mul_int(pix_rate, 1000);
+- pix_rate = dal_fixed31_32_mul_int(pix_rate, phase);
+- pix_rate = dal_fixed31_32_div_int(pix_rate, modulo);
++ pix_rate = dc_fixpt_from_int(clk_src->ref_freq_khz);
++ pix_rate = dc_fixpt_mul_int(pix_rate, 1000);
++ pix_rate = dc_fixpt_mul_int(pix_rate, phase);
++ pix_rate = dc_fixpt_div_int(pix_rate, modulo);
+
+- return dal_fixed31_32_round(pix_rate);
++ return dc_fixpt_round(pix_rate);
+ } else {
+ return dce110_get_dp_pixel_rate_from_combo_phy_pll(cs, pix_clk_params, pll_settings);
+ }
+@@ -711,12 +711,12 @@ static bool calculate_ss(
+ const struct spread_spectrum_data *ss_data,
+ struct delta_sigma_data *ds_data)
+ {
+- struct fixed32_32 fb_div;
+- struct fixed32_32 ss_amount;
+- struct fixed32_32 ss_nslip_amount;
+- struct fixed32_32 ss_ds_frac_amount;
+- struct fixed32_32 ss_step_size;
+- struct fixed32_32 modulation_time;
++ struct fixed31_32 fb_div;
++ struct fixed31_32 ss_amount;
++ struct fixed31_32 ss_nslip_amount;
++ struct fixed31_32 ss_ds_frac_amount;
++ struct fixed31_32 ss_step_size;
++ struct fixed31_32 modulation_time;
+
+ if (ds_data == NULL)
+ return false;
+@@ -731,42 +731,42 @@ static bool calculate_ss(
+
+ /* compute SS_AMOUNT_FBDIV & SS_AMOUNT_NFRAC_SLIP & SS_AMOUNT_DSFRAC*/
+ /* 6 decimal point support in fractional feedback divider */
+- fb_div = dal_fixed32_32_from_fraction(
++ fb_div = dc_fixpt_from_fraction(
+ pll_settings->fract_feedback_divider, 1000000);
+- fb_div = dal_fixed32_32_add_int(fb_div, pll_settings->feedback_divider);
++ fb_div = dc_fixpt_add_int(fb_div, pll_settings->feedback_divider);
+
+ ds_data->ds_frac_amount = 0;
+ /*spreadSpectrumPercentage is in the unit of .01%,
+ * so have to divided by 100 * 100*/
+- ss_amount = dal_fixed32_32_mul(
+- fb_div, dal_fixed32_32_from_fraction(ss_data->percentage,
++ ss_amount = dc_fixpt_mul(
++ fb_div, dc_fixpt_from_fraction(ss_data->percentage,
+ 100 * ss_data->percentage_divider));
+- ds_data->feedback_amount = dal_fixed32_32_floor(ss_amount);
++ ds_data->feedback_amount = dc_fixpt_floor(ss_amount);
+
+- ss_nslip_amount = dal_fixed32_32_sub(ss_amount,
+- dal_fixed32_32_from_int(ds_data->feedback_amount));
+- ss_nslip_amount = dal_fixed32_32_mul_int(ss_nslip_amount, 10);
+- ds_data->nfrac_amount = dal_fixed32_32_floor(ss_nslip_amount);
++ ss_nslip_amount = dc_fixpt_sub(ss_amount,
++ dc_fixpt_from_int(ds_data->feedback_amount));
++ ss_nslip_amount = dc_fixpt_mul_int(ss_nslip_amount, 10);
++ ds_data->nfrac_amount = dc_fixpt_floor(ss_nslip_amount);
+
+- ss_ds_frac_amount = dal_fixed32_32_sub(ss_nslip_amount,
+- dal_fixed32_32_from_int(ds_data->nfrac_amount));
+- ss_ds_frac_amount = dal_fixed32_32_mul_int(ss_ds_frac_amount, 65536);
+- ds_data->ds_frac_amount = dal_fixed32_32_floor(ss_ds_frac_amount);
++ ss_ds_frac_amount = dc_fixpt_sub(ss_nslip_amount,
++ dc_fixpt_from_int(ds_data->nfrac_amount));
++ ss_ds_frac_amount = dc_fixpt_mul_int(ss_ds_frac_amount, 65536);
++ ds_data->ds_frac_amount = dc_fixpt_floor(ss_ds_frac_amount);
+
+ /* compute SS_STEP_SIZE_DSFRAC */
+- modulation_time = dal_fixed32_32_from_fraction(
++ modulation_time = dc_fixpt_from_fraction(
+ pll_settings->reference_freq * 1000,
+ pll_settings->reference_divider * ss_data->modulation_freq_hz);
+
+ if (ss_data->flags.CENTER_SPREAD)
+- modulation_time = dal_fixed32_32_div_int(modulation_time, 4);
++ modulation_time = dc_fixpt_div_int(modulation_time, 4);
+ else
+- modulation_time = dal_fixed32_32_div_int(modulation_time, 2);
++ modulation_time = dc_fixpt_div_int(modulation_time, 2);
+
+- ss_step_size = dal_fixed32_32_div(ss_amount, modulation_time);
++ ss_step_size = dc_fixpt_div(ss_amount, modulation_time);
+ /* SS_STEP_SIZE_DSFRAC_DEC = Int(SS_STEP_SIZE * 2 ^ 16 * 10)*/
+- ss_step_size = dal_fixed32_32_mul_int(ss_step_size, 65536 * 10);
+- ds_data->ds_frac_size = dal_fixed32_32_floor(ss_step_size);
++ ss_step_size = dc_fixpt_mul_int(ss_step_size, 65536 * 10);
++ ds_data->ds_frac_size = dc_fixpt_floor(ss_step_size);
+
+ return true;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index f043e5e..8a581c6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -26,7 +26,7 @@
+ #include "dce_clocks.h"
+ #include "dm_services.h"
+ #include "reg_helper.h"
+-#include "fixed32_32.h"
++#include "fixed31_32.h"
+ #include "bios_parser_interface.h"
+ #include "dc.h"
+ #include "dmcu.h"
+@@ -228,19 +228,19 @@ static int dce_clocks_get_dp_ref_freq(struct display_clock *clk)
+ generated according to average value (case as with previous ASICs)
+ */
+ if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
+- struct fixed32_32 ss_percentage = dal_fixed32_32_div_int(
+- dal_fixed32_32_from_fraction(
++ struct fixed31_32 ss_percentage = dc_fixpt_div_int(
++ dc_fixpt_from_fraction(
+ clk_dce->dprefclk_ss_percentage,
+ clk_dce->dprefclk_ss_divider), 200);
+- struct fixed32_32 adj_dp_ref_clk_khz;
++ struct fixed31_32 adj_dp_ref_clk_khz;
+
+- ss_percentage = dal_fixed32_32_sub(dal_fixed32_32_one,
++ ss_percentage = dc_fixpt_sub(dc_fixpt_one,
+ ss_percentage);
+ adj_dp_ref_clk_khz =
+- dal_fixed32_32_mul_int(
++ dc_fixpt_mul_int(
+ ss_percentage,
+ dp_ref_clk_khz);
+- dp_ref_clk_khz = dal_fixed32_32_floor(adj_dp_ref_clk_khz);
++ dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
+ }
+
+ return dp_ref_clk_khz;
+@@ -256,19 +256,19 @@ static int dce_clocks_get_dp_ref_freq_wrkaround(struct display_clock *clk)
+ int dp_ref_clk_khz = 600000;
+
+ if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
+- struct fixed32_32 ss_percentage = dal_fixed32_32_div_int(
+- dal_fixed32_32_from_fraction(
++ struct fixed31_32 ss_percentage = dc_fixpt_div_int(
++ dc_fixpt_from_fraction(
+ clk_dce->dprefclk_ss_percentage,
+ clk_dce->dprefclk_ss_divider), 200);
+- struct fixed32_32 adj_dp_ref_clk_khz;
++ struct fixed31_32 adj_dp_ref_clk_khz;
+
+- ss_percentage = dal_fixed32_32_sub(dal_fixed32_32_one,
++ ss_percentage = dc_fixpt_sub(dc_fixpt_one,
+ ss_percentage);
+ adj_dp_ref_clk_khz =
+- dal_fixed32_32_mul_int(
++ dc_fixpt_mul_int(
+ ss_percentage,
+ dp_ref_clk_khz);
+- dp_ref_clk_khz = dal_fixed32_32_floor(adj_dp_ref_clk_khz);
++ dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
+ }
+
+ return dp_ref_clk_khz;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+index 2ee3d9b..a576b8b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+@@ -28,7 +28,7 @@
+ #include "dce_dmcu.h"
+ #include "dm_services.h"
+ #include "reg_helper.h"
+-#include "fixed32_32.h"
++#include "fixed31_32.h"
+ #include "dc.h"
+
+ #define TO_DCE_DMCU(dmcu)\
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c
+index d737e91..5d9506b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c
+@@ -195,13 +195,13 @@ static void dce_ipp_program_input_lut(
+
+ for (i = 0; i < gamma->num_entries; i++) {
+ REG_SET(DC_LUT_SEQ_COLOR, 0, DC_LUT_SEQ_COLOR,
+- dal_fixed31_32_round(
++ dc_fixpt_round(
+ gamma->entries.red[i]));
+ REG_SET(DC_LUT_SEQ_COLOR, 0, DC_LUT_SEQ_COLOR,
+- dal_fixed31_32_round(
++ dc_fixpt_round(
+ gamma->entries.green[i]));
+ REG_SET(DC_LUT_SEQ_COLOR, 0, DC_LUT_SEQ_COLOR,
+- dal_fixed31_32_round(
++ dc_fixpt_round(
+ gamma->entries.blue[i]));
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
+index 6243450..48862be 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
+@@ -1014,11 +1014,11 @@ static const uint16_t filter_8tap_64p_183[264] = {
+
+ const uint16_t *get_filter_3tap_16p(struct fixed31_32 ratio)
+ {
+- if (ratio.value < dal_fixed31_32_one.value)
++ if (ratio.value < dc_fixpt_one.value)
+ return filter_3tap_16p_upscale;
+- else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
++ else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ return filter_3tap_16p_117;
+- else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
++ else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ return filter_3tap_16p_150;
+ else
+ return filter_3tap_16p_183;
+@@ -1026,11 +1026,11 @@ const uint16_t *get_filter_3tap_16p(struct fixed31_32 ratio)
+
+ const uint16_t *get_filter_3tap_64p(struct fixed31_32 ratio)
+ {
+- if (ratio.value < dal_fixed31_32_one.value)
++ if (ratio.value < dc_fixpt_one.value)
+ return filter_3tap_64p_upscale;
+- else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
++ else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ return filter_3tap_64p_117;
+- else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
++ else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ return filter_3tap_64p_150;
+ else
+ return filter_3tap_64p_183;
+@@ -1038,11 +1038,11 @@ const uint16_t *get_filter_3tap_64p(struct fixed31_32 ratio)
+
+ const uint16_t *get_filter_4tap_16p(struct fixed31_32 ratio)
+ {
+- if (ratio.value < dal_fixed31_32_one.value)
++ if (ratio.value < dc_fixpt_one.value)
+ return filter_4tap_16p_upscale;
+- else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
++ else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ return filter_4tap_16p_117;
+- else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
++ else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ return filter_4tap_16p_150;
+ else
+ return filter_4tap_16p_183;
+@@ -1050,11 +1050,11 @@ const uint16_t *get_filter_4tap_16p(struct fixed31_32 ratio)
+
+ const uint16_t *get_filter_4tap_64p(struct fixed31_32 ratio)
+ {
+- if (ratio.value < dal_fixed31_32_one.value)
++ if (ratio.value < dc_fixpt_one.value)
+ return filter_4tap_64p_upscale;
+- else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
++ else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ return filter_4tap_64p_117;
+- else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
++ else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ return filter_4tap_64p_150;
+ else
+ return filter_4tap_64p_183;
+@@ -1062,11 +1062,11 @@ const uint16_t *get_filter_4tap_64p(struct fixed31_32 ratio)
+
+ const uint16_t *get_filter_5tap_64p(struct fixed31_32 ratio)
+ {
+- if (ratio.value < dal_fixed31_32_one.value)
++ if (ratio.value < dc_fixpt_one.value)
+ return filter_5tap_64p_upscale;
+- else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
++ else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ return filter_5tap_64p_117;
+- else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
++ else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ return filter_5tap_64p_150;
+ else
+ return filter_5tap_64p_183;
+@@ -1074,11 +1074,11 @@ const uint16_t *get_filter_5tap_64p(struct fixed31_32 ratio)
+
+ const uint16_t *get_filter_6tap_64p(struct fixed31_32 ratio)
+ {
+- if (ratio.value < dal_fixed31_32_one.value)
++ if (ratio.value < dc_fixpt_one.value)
+ return filter_6tap_64p_upscale;
+- else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
++ else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ return filter_6tap_64p_117;
+- else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
++ else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ return filter_6tap_64p_150;
+ else
+ return filter_6tap_64p_183;
+@@ -1086,11 +1086,11 @@ const uint16_t *get_filter_6tap_64p(struct fixed31_32 ratio)
+
+ const uint16_t *get_filter_7tap_64p(struct fixed31_32 ratio)
+ {
+- if (ratio.value < dal_fixed31_32_one.value)
++ if (ratio.value < dc_fixpt_one.value)
+ return filter_7tap_64p_upscale;
+- else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
++ else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ return filter_7tap_64p_117;
+- else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
++ else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ return filter_7tap_64p_150;
+ else
+ return filter_7tap_64p_183;
+@@ -1098,11 +1098,11 @@ const uint16_t *get_filter_7tap_64p(struct fixed31_32 ratio)
+
+ const uint16_t *get_filter_8tap_64p(struct fixed31_32 ratio)
+ {
+- if (ratio.value < dal_fixed31_32_one.value)
++ if (ratio.value < dc_fixpt_one.value)
+ return filter_8tap_64p_upscale;
+- else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
++ else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ return filter_8tap_64p_117;
+- else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
++ else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ return filter_8tap_64p_150;
+ else
+ return filter_8tap_64p_183;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+index e265a0a..0a6d483 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+@@ -683,11 +683,11 @@ static void dce110_stream_encoder_set_mst_bandwidth(
+ struct fixed31_32 avg_time_slots_per_mtp)
+ {
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+- uint32_t x = dal_fixed31_32_floor(
++ uint32_t x = dc_fixpt_floor(
+ avg_time_slots_per_mtp);
+- uint32_t y = dal_fixed31_32_ceil(
+- dal_fixed31_32_shl(
+- dal_fixed31_32_sub_int(
++ uint32_t y = dc_fixpt_ceil(
++ dc_fixpt_shl(
++ dc_fixpt_sub_int(
+ avg_time_slots_per_mtp,
+ x),
+ 26));
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+index 832c5da..a02e719 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+@@ -41,7 +41,7 @@
+ #define DC_LOGGER \
+ xfm_dce->base.ctx->logger
+
+-#define IDENTITY_RATIO(ratio) (dal_fixed31_32_u2d19(ratio) == (1 << 19))
++#define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19))
+ #define GAMUT_MATRIX_SIZE 12
+ #define SCL_PHASES 16
+
+@@ -256,27 +256,27 @@ static void calculate_inits(
+ struct fixed31_32 v_init;
+
+ inits->h_int_scale_ratio =
+- dal_fixed31_32_u2d19(data->ratios.horz) << 5;
++ dc_fixpt_u2d19(data->ratios.horz) << 5;
+ inits->v_int_scale_ratio =
+- dal_fixed31_32_u2d19(data->ratios.vert) << 5;
++ dc_fixpt_u2d19(data->ratios.vert) << 5;
+
+ h_init =
+- dal_fixed31_32_div_int(
+- dal_fixed31_32_add(
++ dc_fixpt_div_int(
++ dc_fixpt_add(
+ data->ratios.horz,
+- dal_fixed31_32_from_int(data->taps.h_taps + 1)),
++ dc_fixpt_from_int(data->taps.h_taps + 1)),
+ 2);
+- inits->h_init.integer = dal_fixed31_32_floor(h_init);
+- inits->h_init.fraction = dal_fixed31_32_u0d19(h_init) << 5;
++ inits->h_init.integer = dc_fixpt_floor(h_init);
++ inits->h_init.fraction = dc_fixpt_u0d19(h_init) << 5;
+
+ v_init =
+- dal_fixed31_32_div_int(
+- dal_fixed31_32_add(
++ dc_fixpt_div_int(
++ dc_fixpt_add(
+ data->ratios.vert,
+- dal_fixed31_32_from_int(data->taps.v_taps + 1)),
++ dc_fixpt_from_int(data->taps.v_taps + 1)),
+ 2);
+- inits->v_init.integer = dal_fixed31_32_floor(v_init);
+- inits->v_init.fraction = dal_fixed31_32_u0d19(v_init) << 5;
++ inits->v_init.integer = dc_fixpt_floor(v_init);
++ inits->v_init.fraction = dc_fixpt_u0d19(v_init) << 5;
+ }
+
+ static void program_scl_ratios_inits(
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 8a61b10..5fcb67a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -509,19 +509,19 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
+ rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
+ rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
+
+- arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+- dal_fixed31_32_from_int(region_start));
+- arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+- dal_fixed31_32_from_int(region_end));
++ arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2),
++ dc_fixpt_from_int(region_start));
++ arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2),
++ dc_fixpt_from_int(region_end));
+
+ y_r = rgb_resulted[0].red;
+ y_g = rgb_resulted[0].green;
+ y_b = rgb_resulted[0].blue;
+
+- y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
++ y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b));
+
+ arr_points[0].y = y1_min;
+- arr_points[0].slope = dal_fixed31_32_div(arr_points[0].y,
++ arr_points[0].slope = dc_fixpt_div(arr_points[0].y,
+ arr_points[0].x);
+
+ y_r = rgb_resulted[hw_points - 1].red;
+@@ -531,21 +531,21 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
+ /* see comment above, m_arrPoints[1].y should be the Y value for the
+ * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
+ */
+- y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
++ y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b));
+
+ arr_points[1].y = y3_max;
+
+- arr_points[1].slope = dal_fixed31_32_zero;
++ arr_points[1].slope = dc_fixpt_zero;
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+ /* for PQ, we want to have a straight line from last HW X point,
+ * and the slope to be such that we hit 1.0 at 10000 nits.
+ */
+- const struct fixed31_32 end_value = dal_fixed31_32_from_int(125);
++ const struct fixed31_32 end_value = dc_fixpt_from_int(125);
+
+- arr_points[1].slope = dal_fixed31_32_div(
+- dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
+- dal_fixed31_32_sub(end_value, arr_points[1].x));
++ arr_points[1].slope = dc_fixpt_div(
++ dc_fixpt_sub(dc_fixpt_one, arr_points[1].y),
++ dc_fixpt_sub(end_value, arr_points[1].x));
+ }
+
+ regamma_params->hw_points_num = hw_points;
+@@ -569,16 +569,16 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
+ i = 1;
+
+ while (i != hw_points + 1) {
+- if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red))
++ if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
+ rgb_plus_1->red = rgb->red;
+- if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green))
++ if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
+ rgb_plus_1->green = rgb->green;
+- if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
++ if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
+ rgb_plus_1->blue = rgb->blue;
+
+- rgb->delta_red = dal_fixed31_32_sub(rgb_plus_1->red, rgb->red);
+- rgb->delta_green = dal_fixed31_32_sub(rgb_plus_1->green, rgb->green);
+- rgb->delta_blue = dal_fixed31_32_sub(rgb_plus_1->blue, rgb->blue);
++ rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
++ rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
++ rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
+
+ ++rgb_plus_1;
+ ++rgb;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
+index 8ba3c12..a7dce06 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
+@@ -373,13 +373,13 @@ static void calculate_inits(
+ struct rect *chroma_viewport)
+ {
+ inits->h_int_scale_ratio_luma =
+- dal_fixed31_32_u2d19(data->ratios.horz) << 5;
++ dc_fixpt_u2d19(data->ratios.horz) << 5;
+ inits->v_int_scale_ratio_luma =
+- dal_fixed31_32_u2d19(data->ratios.vert) << 5;
++ dc_fixpt_u2d19(data->ratios.vert) << 5;
+ inits->h_int_scale_ratio_chroma =
+- dal_fixed31_32_u2d19(data->ratios.horz_c) << 5;
++ dc_fixpt_u2d19(data->ratios.horz_c) << 5;
+ inits->v_int_scale_ratio_chroma =
+- dal_fixed31_32_u2d19(data->ratios.vert_c) << 5;
++ dc_fixpt_u2d19(data->ratios.vert_c) << 5;
+
+ inits->h_init_luma.integer = 1;
+ inits->v_init_luma.integer = 1;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+index 96d5878..5d95a99 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+@@ -169,7 +169,7 @@ bool cm_helper_convert_to_custom_float(
+ }
+
+ if (fixpoint == true)
+- arr_points[1].custom_float_y = dal_fixed31_32_clamp_u0d14(arr_points[1].y);
++ arr_points[1].custom_float_y = dc_fixpt_clamp_u0d14(arr_points[1].y);
+ else if (!convert_to_custom_float_format(arr_points[1].y, &fmt,
+ &arr_points[1].custom_float_y)) {
+ BREAK_TO_DEBUGGER();
+@@ -327,19 +327,19 @@ bool cm_helper_translate_curve_to_hw_format(
+ rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
+ rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
+
+- arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+- dal_fixed31_32_from_int(region_start));
+- arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+- dal_fixed31_32_from_int(region_end));
++ arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2),
++ dc_fixpt_from_int(region_start));
++ arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2),
++ dc_fixpt_from_int(region_end));
+
+ y_r = rgb_resulted[0].red;
+ y_g = rgb_resulted[0].green;
+ y_b = rgb_resulted[0].blue;
+
+- y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
++ y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b));
+
+ arr_points[0].y = y1_min;
+- arr_points[0].slope = dal_fixed31_32_div(arr_points[0].y, arr_points[0].x);
++ arr_points[0].slope = dc_fixpt_div(arr_points[0].y, arr_points[0].x);
+ y_r = rgb_resulted[hw_points - 1].red;
+ y_g = rgb_resulted[hw_points - 1].green;
+ y_b = rgb_resulted[hw_points - 1].blue;
+@@ -347,22 +347,22 @@ bool cm_helper_translate_curve_to_hw_format(
+ /* see comment above, m_arrPoints[1].y should be the Y value for the
+ * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
+ */
+- y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
++ y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b));
+
+ arr_points[1].y = y3_max;
+
+- arr_points[1].slope = dal_fixed31_32_zero;
++ arr_points[1].slope = dc_fixpt_zero;
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+ /* for PQ, we want to have a straight line from last HW X point,
+ * and the slope to be such that we hit 1.0 at 10000 nits.
+ */
+ const struct fixed31_32 end_value =
+- dal_fixed31_32_from_int(125);
++ dc_fixpt_from_int(125);
+
+- arr_points[1].slope = dal_fixed31_32_div(
+- dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
+- dal_fixed31_32_sub(end_value, arr_points[1].x));
++ arr_points[1].slope = dc_fixpt_div(
++ dc_fixpt_sub(dc_fixpt_one, arr_points[1].y),
++ dc_fixpt_sub(end_value, arr_points[1].x));
+ }
+
+ lut_params->hw_points_num = hw_points;
+@@ -386,24 +386,24 @@ bool cm_helper_translate_curve_to_hw_format(
+
+ i = 1;
+ while (i != hw_points + 1) {
+- if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red))
++ if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
+ rgb_plus_1->red = rgb->red;
+- if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green))
++ if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
+ rgb_plus_1->green = rgb->green;
+- if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
++ if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
+ rgb_plus_1->blue = rgb->blue;
+
+- rgb->delta_red = dal_fixed31_32_sub(rgb_plus_1->red, rgb->red);
+- rgb->delta_green = dal_fixed31_32_sub(rgb_plus_1->green, rgb->green);
+- rgb->delta_blue = dal_fixed31_32_sub(rgb_plus_1->blue, rgb->blue);
++ rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
++ rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
++ rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
+
+ if (fixpoint == true) {
+- rgb->delta_red_reg = dal_fixed31_32_clamp_u0d10(rgb->delta_red);
+- rgb->delta_green_reg = dal_fixed31_32_clamp_u0d10(rgb->delta_green);
+- rgb->delta_blue_reg = dal_fixed31_32_clamp_u0d10(rgb->delta_blue);
+- rgb->red_reg = dal_fixed31_32_clamp_u0d14(rgb->red);
+- rgb->green_reg = dal_fixed31_32_clamp_u0d14(rgb->green);
+- rgb->blue_reg = dal_fixed31_32_clamp_u0d14(rgb->blue);
++ rgb->delta_red_reg = dc_fixpt_clamp_u0d10(rgb->delta_red);
++ rgb->delta_green_reg = dc_fixpt_clamp_u0d10(rgb->delta_green);
++ rgb->delta_blue_reg = dc_fixpt_clamp_u0d10(rgb->delta_blue);
++ rgb->red_reg = dc_fixpt_clamp_u0d14(rgb->red);
++ rgb->green_reg = dc_fixpt_clamp_u0d14(rgb->green);
++ rgb->blue_reg = dc_fixpt_clamp_u0d14(rgb->blue);
+ }
+
+ ++rgb_plus_1;
+@@ -489,19 +489,19 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
+ rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
+ rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
+
+- arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+- dal_fixed31_32_from_int(region_start));
+- arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+- dal_fixed31_32_from_int(region_end));
++ arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2),
++ dc_fixpt_from_int(region_start));
++ arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2),
++ dc_fixpt_from_int(region_end));
+
+ y_r = rgb_resulted[0].red;
+ y_g = rgb_resulted[0].green;
+ y_b = rgb_resulted[0].blue;
+
+- y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
++ y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b));
+
+ arr_points[0].y = y1_min;
+- arr_points[0].slope = dal_fixed31_32_div(arr_points[0].y, arr_points[0].x);
++ arr_points[0].slope = dc_fixpt_div(arr_points[0].y, arr_points[0].x);
+ y_r = rgb_resulted[hw_points - 1].red;
+ y_g = rgb_resulted[hw_points - 1].green;
+ y_b = rgb_resulted[hw_points - 1].blue;
+@@ -509,22 +509,22 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
+ /* see comment above, m_arrPoints[1].y should be the Y value for the
+ * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
+ */
+- y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
++ y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b));
+
+ arr_points[1].y = y3_max;
+
+- arr_points[1].slope = dal_fixed31_32_zero;
++ arr_points[1].slope = dc_fixpt_zero;
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+ /* for PQ, we want to have a straight line from last HW X point,
+ * and the slope to be such that we hit 1.0 at 10000 nits.
+ */
+ const struct fixed31_32 end_value =
+- dal_fixed31_32_from_int(125);
++ dc_fixpt_from_int(125);
+
+- arr_points[1].slope = dal_fixed31_32_div(
+- dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
+- dal_fixed31_32_sub(end_value, arr_points[1].x));
++ arr_points[1].slope = dc_fixpt_div(
++ dc_fixpt_sub(dc_fixpt_one, arr_points[1].y),
++ dc_fixpt_sub(end_value, arr_points[1].x));
+ }
+
+ lut_params->hw_points_num = hw_points;
+@@ -548,16 +548,16 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
+
+ i = 1;
+ while (i != hw_points + 1) {
+- if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red))
++ if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
+ rgb_plus_1->red = rgb->red;
+- if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green))
++ if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
+ rgb_plus_1->green = rgb->green;
+- if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
++ if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
+ rgb_plus_1->blue = rgb->blue;
+
+- rgb->delta_red = dal_fixed31_32_sub(rgb_plus_1->red, rgb->red);
+- rgb->delta_green = dal_fixed31_32_sub(rgb_plus_1->green, rgb->green);
+- rgb->delta_blue = dal_fixed31_32_sub(rgb_plus_1->blue, rgb->blue);
++ rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
++ rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
++ rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
+
+ ++rgb_plus_1;
+ ++rgb;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+index 8c4d9e5..20796da 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+@@ -130,7 +130,7 @@ void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp)
+ /* Gamut remap in bypass */
+ }
+
+-#define IDENTITY_RATIO(ratio) (dal_fixed31_32_u2d19(ratio) == (1 << 19))
++#define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19))
+
+
+ bool dpp_get_optimal_number_of_taps(
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+index 4f373c9..116977e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+@@ -811,13 +811,13 @@ void dpp1_program_input_lut(
+ REG_UPDATE(CM_IGAM_LUT_RW_INDEX, CM_IGAM_LUT_RW_INDEX, 0);
+ for (i = 0; i < gamma->num_entries; i++) {
+ REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
+- dal_fixed31_32_round(
++ dc_fixpt_round(
+ gamma->entries.red[i]));
+ REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
+- dal_fixed31_32_round(
++ dc_fixpt_round(
+ gamma->entries.green[i]));
+ REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
+- dal_fixed31_32_round(
++ dc_fixpt_round(
+ gamma->entries.blue[i]));
+ }
+ // Power off LUT memory
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+index 3eb824d..4ddd627 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+@@ -169,7 +169,7 @@ static enum dscl_mode_sel dpp1_dscl_get_dscl_mode(
+ const struct scaler_data *data,
+ bool dbg_always_scale)
+ {
+- const long long one = dal_fixed31_32_one.value;
++ const long long one = dc_fixpt_one.value;
+
+ if (dpp_base->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) {
+ /* DSCL is processing data in fixed format */
+@@ -464,8 +464,8 @@ static enum lb_memory_config dpp1_dscl_find_lb_memory_config(struct dcn10_dpp *d
+ int num_part_y, num_part_c;
+ int vtaps = scl_data->taps.v_taps;
+ int vtaps_c = scl_data->taps.v_taps_c;
+- int ceil_vratio = dal_fixed31_32_ceil(scl_data->ratios.vert);
+- int ceil_vratio_c = dal_fixed31_32_ceil(scl_data->ratios.vert_c);
++ int ceil_vratio = dc_fixpt_ceil(scl_data->ratios.vert);
++ int ceil_vratio_c = dc_fixpt_ceil(scl_data->ratios.vert_c);
+ enum lb_memory_config mem_cfg = LB_MEMORY_CONFIG_0;
+
+ if (dpp->base.ctx->dc->debug.use_max_lb)
+@@ -565,52 +565,52 @@ static void dpp1_dscl_set_manual_ratio_init(
+ uint32_t init_int = 0;
+
+ REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0,
+- SCL_H_SCALE_RATIO, dal_fixed31_32_u2d19(data->ratios.horz) << 5);
++ SCL_H_SCALE_RATIO, dc_fixpt_u2d19(data->ratios.horz) << 5);
+
+ REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0,
+- SCL_V_SCALE_RATIO, dal_fixed31_32_u2d19(data->ratios.vert) << 5);
++ SCL_V_SCALE_RATIO, dc_fixpt_u2d19(data->ratios.vert) << 5);
+
+ REG_SET(SCL_HORZ_FILTER_SCALE_RATIO_C, 0,
+- SCL_H_SCALE_RATIO_C, dal_fixed31_32_u2d19(data->ratios.horz_c) << 5);
++ SCL_H_SCALE_RATIO_C, dc_fixpt_u2d19(data->ratios.horz_c) << 5);
+
+ REG_SET(SCL_VERT_FILTER_SCALE_RATIO_C, 0,
+- SCL_V_SCALE_RATIO_C, dal_fixed31_32_u2d19(data->ratios.vert_c) << 5);
++ SCL_V_SCALE_RATIO_C, dc_fixpt_u2d19(data->ratios.vert_c) << 5);
+
+ /*
+ * 0.24 format for fraction, first five bits zeroed
+ */
+- init_frac = dal_fixed31_32_u0d19(data->inits.h) << 5;
+- init_int = dal_fixed31_32_floor(data->inits.h);
++ init_frac = dc_fixpt_u0d19(data->inits.h) << 5;
++ init_int = dc_fixpt_floor(data->inits.h);
+ REG_SET_2(SCL_HORZ_FILTER_INIT, 0,
+ SCL_H_INIT_FRAC, init_frac,
+ SCL_H_INIT_INT, init_int);
+
+- init_frac = dal_fixed31_32_u0d19(data->inits.h_c) << 5;
+- init_int = dal_fixed31_32_floor(data->inits.h_c);
++ init_frac = dc_fixpt_u0d19(data->inits.h_c) << 5;
++ init_int = dc_fixpt_floor(data->inits.h_c);
+ REG_SET_2(SCL_HORZ_FILTER_INIT_C, 0,
+ SCL_H_INIT_FRAC_C, init_frac,
+ SCL_H_INIT_INT_C, init_int);
+
+- init_frac = dal_fixed31_32_u0d19(data->inits.v) << 5;
+- init_int = dal_fixed31_32_floor(data->inits.v);
++ init_frac = dc_fixpt_u0d19(data->inits.v) << 5;
++ init_int = dc_fixpt_floor(data->inits.v);
+ REG_SET_2(SCL_VERT_FILTER_INIT, 0,
+ SCL_V_INIT_FRAC, init_frac,
+ SCL_V_INIT_INT, init_int);
+
+- init_frac = dal_fixed31_32_u0d19(data->inits.v_bot) << 5;
+- init_int = dal_fixed31_32_floor(data->inits.v_bot);
++ init_frac = dc_fixpt_u0d19(data->inits.v_bot) << 5;
++ init_int = dc_fixpt_floor(data->inits.v_bot);
+ REG_SET_2(SCL_VERT_FILTER_INIT_BOT, 0,
+ SCL_V_INIT_FRAC_BOT, init_frac,
+ SCL_V_INIT_INT_BOT, init_int);
+
+- init_frac = dal_fixed31_32_u0d19(data->inits.v_c) << 5;
+- init_int = dal_fixed31_32_floor(data->inits.v_c);
++ init_frac = dc_fixpt_u0d19(data->inits.v_c) << 5;
++ init_int = dc_fixpt_floor(data->inits.v_c);
+ REG_SET_2(SCL_VERT_FILTER_INIT_C, 0,
+ SCL_V_INIT_FRAC_C, init_frac,
+ SCL_V_INIT_INT_C, init_int);
+
+- init_frac = dal_fixed31_32_u0d19(data->inits.v_c_bot) << 5;
+- init_int = dal_fixed31_32_floor(data->inits.v_c_bot);
++ init_frac = dc_fixpt_u0d19(data->inits.v_c_bot) << 5;
++ init_int = dc_fixpt_floor(data->inits.v_c_bot);
+ REG_SET_2(SCL_VERT_FILTER_INIT_BOT_C, 0,
+ SCL_V_INIT_FRAC_BOT_C, init_frac,
+ SCL_V_INIT_INT_BOT_C, init_int);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+index 0cbc83e..185f93b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+@@ -1054,8 +1054,8 @@ void hubp1_cursor_set_position(
+ ASSERT(param->h_scale_ratio.value);
+
+ if (param->h_scale_ratio.value)
+- dst_x_offset = dal_fixed31_32_floor(dal_fixed31_32_div(
+- dal_fixed31_32_from_int(dst_x_offset),
++ dst_x_offset = dc_fixpt_floor(dc_fixpt_div(
++ dc_fixpt_from_int(dst_x_offset),
+ param->h_scale_ratio));
+
+ if (src_x_offset >= (int)param->viewport_width)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index b88e020..d3fc1a2 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -1700,22 +1700,22 @@ static uint16_t fixed_point_to_int_frac(
+
+ uint16_t result;
+
+- uint16_t d = (uint16_t)dal_fixed31_32_floor(
+- dal_fixed31_32_abs(
++ uint16_t d = (uint16_t)dc_fixpt_floor(
++ dc_fixpt_abs(
+ arg));
+
+ if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor))
+- numerator = (uint16_t)dal_fixed31_32_floor(
+- dal_fixed31_32_mul_int(
++ numerator = (uint16_t)dc_fixpt_floor(
++ dc_fixpt_mul_int(
+ arg,
+ divisor));
+ else {
+- numerator = dal_fixed31_32_floor(
+- dal_fixed31_32_sub(
+- dal_fixed31_32_from_int(
++ numerator = dc_fixpt_floor(
++ dc_fixpt_sub(
++ dc_fixpt_from_int(
+ 1LL << integer_bits),
+- dal_fixed31_32_recip(
+- dal_fixed31_32_from_int(
++ dc_fixpt_recip(
++ dc_fixpt_from_int(
+ divisor))));
+ }
+
+@@ -1725,8 +1725,8 @@ static uint16_t fixed_point_to_int_frac(
+ result = (uint16_t)(
+ (1 << (integer_bits + fractional_bits + 1)) + numerator);
+
+- if ((result != 0) && dal_fixed31_32_lt(
+- arg, dal_fixed31_32_zero))
++ if ((result != 0) && dc_fixpt_lt(
++ arg, dc_fixpt_zero))
+ result |= 1 << (integer_bits + fractional_bits);
+
+ return result;
+@@ -1740,8 +1740,8 @@ void build_prescale_params(struct dc_bias_and_scale *bias_and_scale,
+ && plane_state->input_csc_color_matrix.enable_adjustment
+ && plane_state->coeff_reduction_factor.value != 0) {
+ bias_and_scale->scale_blue = fixed_point_to_int_frac(
+- dal_fixed31_32_mul(plane_state->coeff_reduction_factor,
+- dal_fixed31_32_from_fraction(256, 255)),
++ dc_fixpt_mul(plane_state->coeff_reduction_factor,
++ dc_fixpt_from_fraction(256, 255)),
+ 2,
+ 13);
+ bias_and_scale->scale_red = bias_and_scale->scale_blue;
+@@ -2010,7 +2010,7 @@ static void dcn10_blank_pixel_data(
+
+ static void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
+ {
+- struct fixed31_32 multiplier = dal_fixed31_32_from_fraction(
++ struct fixed31_32 multiplier = dc_fixpt_from_fraction(
+ pipe_ctx->plane_state->sdr_white_level, 80);
+ uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
+ struct custom_float_format fmt;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+index befd863..653b7b2 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+@@ -603,11 +603,11 @@ void enc1_stream_encoder_set_mst_bandwidth(
+ struct fixed31_32 avg_time_slots_per_mtp)
+ {
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+- uint32_t x = dal_fixed31_32_floor(
++ uint32_t x = dc_fixpt_floor(
+ avg_time_slots_per_mtp);
+- uint32_t y = dal_fixed31_32_ceil(
+- dal_fixed31_32_shl(
+- dal_fixed31_32_sub_int(
++ uint32_t y = dc_fixpt_ceil(
++ dc_fixpt_shl(
++ dc_fixpt_sub_int(
+ avg_time_slots_per_mtp,
+ x),
+ 26));
+diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h
+index cc3b1bc..0b5f3a2 100644
+--- a/drivers/gpu/drm/amd/display/dc/irq_types.h
++++ b/drivers/gpu/drm/amd/display/dc/irq_types.h
+@@ -26,6 +26,8 @@
+ #ifndef __DAL_IRQ_TYPES_H__
+ #define __DAL_IRQ_TYPES_H__
+
++#include "os_types.h"
++
+ struct dc_context;
+
+ typedef void (*interrupt_handler)(void *);
+diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
+index 16cbdb4..b5b8d7d 100644
+--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
++++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
+@@ -50,16 +50,16 @@ struct fixed31_32 {
+ * Useful constants
+ */
+
+-static const struct fixed31_32 dal_fixed31_32_zero = { 0 };
+-static const struct fixed31_32 dal_fixed31_32_epsilon = { 1LL };
+-static const struct fixed31_32 dal_fixed31_32_half = { 0x80000000LL };
+-static const struct fixed31_32 dal_fixed31_32_one = { 0x100000000LL };
++static const struct fixed31_32 dc_fixpt_zero = { 0 };
++static const struct fixed31_32 dc_fixpt_epsilon = { 1LL };
++static const struct fixed31_32 dc_fixpt_half = { 0x80000000LL };
++static const struct fixed31_32 dc_fixpt_one = { 0x100000000LL };
+
+-static const struct fixed31_32 dal_fixed31_32_pi = { 13493037705LL };
+-static const struct fixed31_32 dal_fixed31_32_two_pi = { 26986075409LL };
+-static const struct fixed31_32 dal_fixed31_32_e = { 11674931555LL };
+-static const struct fixed31_32 dal_fixed31_32_ln2 = { 2977044471LL };
+-static const struct fixed31_32 dal_fixed31_32_ln2_div_2 = { 1488522236LL };
++static const struct fixed31_32 dc_fixpt_pi = { 13493037705LL };
++static const struct fixed31_32 dc_fixpt_two_pi = { 26986075409LL };
++static const struct fixed31_32 dc_fixpt_e = { 11674931555LL };
++static const struct fixed31_32 dc_fixpt_ln2 = { 2977044471LL };
++static const struct fixed31_32 dc_fixpt_ln2_div_2 = { 1488522236LL };
+
+ /*
+ * @brief
+@@ -70,7 +70,7 @@ static const struct fixed31_32 dal_fixed31_32_ln2_div_2 = { 1488522236LL };
+ * @brief
+ * result = numerator / denominator
+ */
+-struct fixed31_32 dal_fixed31_32_from_fraction(
++struct fixed31_32 dc_fixpt_from_fraction(
+ long long numerator,
+ long long denominator);
+
+@@ -78,8 +78,8 @@ struct fixed31_32 dal_fixed31_32_from_fraction(
+ * @brief
+ * result = arg
+ */
+-struct fixed31_32 dal_fixed31_32_from_int_nonconst(long long arg);
+-static inline struct fixed31_32 dal_fixed31_32_from_int(long long arg)
++struct fixed31_32 dc_fixpt_from_int_nonconst(long long arg);
++static inline struct fixed31_32 dc_fixpt_from_int(long long arg)
+ {
+ if (__builtin_constant_p(arg)) {
+ struct fixed31_32 res;
+@@ -87,7 +87,7 @@ static inline struct fixed31_32 dal_fixed31_32_from_int(long long arg)
+ res.value = arg << FIXED31_32_BITS_PER_FRACTIONAL_PART;
+ return res;
+ } else
+- return dal_fixed31_32_from_int_nonconst(arg);
++ return dc_fixpt_from_int_nonconst(arg);
+ }
+
+ /*
+@@ -99,7 +99,7 @@ static inline struct fixed31_32 dal_fixed31_32_from_int(long long arg)
+ * @brief
+ * result = -arg
+ */
+-static inline struct fixed31_32 dal_fixed31_32_neg(struct fixed31_32 arg)
++static inline struct fixed31_32 dc_fixpt_neg(struct fixed31_32 arg)
+ {
+ struct fixed31_32 res;
+
+@@ -112,10 +112,10 @@ static inline struct fixed31_32 dal_fixed31_32_neg(struct fixed31_32 arg)
+ * @brief
+ * result = abs(arg) := (arg >= 0) ? arg : -arg
+ */
+-static inline struct fixed31_32 dal_fixed31_32_abs(struct fixed31_32 arg)
++static inline struct fixed31_32 dc_fixpt_abs(struct fixed31_32 arg)
+ {
+ if (arg.value < 0)
+- return dal_fixed31_32_neg(arg);
++ return dc_fixpt_neg(arg);
+ else
+ return arg;
+ }
+@@ -129,7 +129,7 @@ static inline struct fixed31_32 dal_fixed31_32_abs(struct fixed31_32 arg)
+ * @brief
+ * result = arg1 < arg2
+ */
+-static inline bool dal_fixed31_32_lt(struct fixed31_32 arg1,
++static inline bool dc_fixpt_lt(struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+ {
+ return arg1.value < arg2.value;
+@@ -139,7 +139,7 @@ static inline bool dal_fixed31_32_lt(struct fixed31_32 arg1,
+ * @brief
+ * result = arg1 <= arg2
+ */
+-static inline bool dal_fixed31_32_le(struct fixed31_32 arg1,
++static inline bool dc_fixpt_le(struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+ {
+ return arg1.value <= arg2.value;
+@@ -149,7 +149,7 @@ static inline bool dal_fixed31_32_le(struct fixed31_32 arg1,
+ * @brief
+ * result = arg1 == arg2
+ */
+-static inline bool dal_fixed31_32_eq(struct fixed31_32 arg1,
++static inline bool dc_fixpt_eq(struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+ {
+ return arg1.value == arg2.value;
+@@ -159,7 +159,7 @@ static inline bool dal_fixed31_32_eq(struct fixed31_32 arg1,
+ * @brief
+ * result = min(arg1, arg2) := (arg1 <= arg2) ? arg1 : arg2
+ */
+-static inline struct fixed31_32 dal_fixed31_32_min(struct fixed31_32 arg1,
++static inline struct fixed31_32 dc_fixpt_min(struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+ {
+ if (arg1.value <= arg2.value)
+@@ -172,7 +172,7 @@ static inline struct fixed31_32 dal_fixed31_32_min(struct fixed31_32 arg1,
+ * @brief
+ * result = max(arg1, arg2) := (arg1 <= arg2) ? arg2 : arg1
+ */
+-static inline struct fixed31_32 dal_fixed31_32_max(struct fixed31_32 arg1,
++static inline struct fixed31_32 dc_fixpt_max(struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+ {
+ if (arg1.value <= arg2.value)
+@@ -187,14 +187,14 @@ static inline struct fixed31_32 dal_fixed31_32_max(struct fixed31_32 arg1,
+ * result = | arg, when min_value < arg < max_value
+ * | max_value, when arg >= max_value
+ */
+-static inline struct fixed31_32 dal_fixed31_32_clamp(
++static inline struct fixed31_32 dc_fixpt_clamp(
+ struct fixed31_32 arg,
+ struct fixed31_32 min_value,
+ struct fixed31_32 max_value)
+ {
+- if (dal_fixed31_32_le(arg, min_value))
++ if (dc_fixpt_le(arg, min_value))
+ return min_value;
+- else if (dal_fixed31_32_le(max_value, arg))
++ else if (dc_fixpt_le(max_value, arg))
+ return max_value;
+ else
+ return arg;
+@@ -209,7 +209,7 @@ static inline struct fixed31_32 dal_fixed31_32_clamp(
+ * @brief
+ * result = arg << shift
+ */
+-struct fixed31_32 dal_fixed31_32_shl(
++struct fixed31_32 dc_fixpt_shl(
+ struct fixed31_32 arg,
+ unsigned char shift);
+
+@@ -217,7 +217,7 @@ struct fixed31_32 dal_fixed31_32_shl(
+ * @brief
+ * result = arg >> shift
+ */
+-static inline struct fixed31_32 dal_fixed31_32_shr(
++static inline struct fixed31_32 dc_fixpt_shr(
+ struct fixed31_32 arg,
+ unsigned char shift)
+ {
+@@ -235,7 +235,7 @@ static inline struct fixed31_32 dal_fixed31_32_shr(
+ * @brief
+ * result = arg1 + arg2
+ */
+-struct fixed31_32 dal_fixed31_32_add(
++struct fixed31_32 dc_fixpt_add(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2);
+
+@@ -243,18 +243,18 @@ struct fixed31_32 dal_fixed31_32_add(
+ * @brief
+ * result = arg1 + arg2
+ */
+-static inline struct fixed31_32 dal_fixed31_32_add_int(struct fixed31_32 arg1,
++static inline struct fixed31_32 dc_fixpt_add_int(struct fixed31_32 arg1,
+ int arg2)
+ {
+- return dal_fixed31_32_add(arg1,
+- dal_fixed31_32_from_int(arg2));
++ return dc_fixpt_add(arg1,
++ dc_fixpt_from_int(arg2));
+ }
+
+ /*
+ * @brief
+ * result = arg1 - arg2
+ */
+-struct fixed31_32 dal_fixed31_32_sub(
++struct fixed31_32 dc_fixpt_sub(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2);
+
+@@ -262,11 +262,11 @@ struct fixed31_32 dal_fixed31_32_sub(
+ * @brief
+ * result = arg1 - arg2
+ */
+-static inline struct fixed31_32 dal_fixed31_32_sub_int(struct fixed31_32 arg1,
++static inline struct fixed31_32 dc_fixpt_sub_int(struct fixed31_32 arg1,
+ int arg2)
+ {
+- return dal_fixed31_32_sub(arg1,
+- dal_fixed31_32_from_int(arg2));
++ return dc_fixpt_sub(arg1,
++ dc_fixpt_from_int(arg2));
+ }
+
+
+@@ -279,7 +279,7 @@ static inline struct fixed31_32 dal_fixed31_32_sub_int(struct fixed31_32 arg1,
+ * @brief
+ * result = arg1 * arg2
+ */
+-struct fixed31_32 dal_fixed31_32_mul(
++struct fixed31_32 dc_fixpt_mul(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2);
+
+@@ -288,39 +288,39 @@ struct fixed31_32 dal_fixed31_32_mul(
+ * @brief
+ * result = arg1 * arg2
+ */
+-static inline struct fixed31_32 dal_fixed31_32_mul_int(struct fixed31_32 arg1,
++static inline struct fixed31_32 dc_fixpt_mul_int(struct fixed31_32 arg1,
+ int arg2)
+ {
+- return dal_fixed31_32_mul(arg1,
+- dal_fixed31_32_from_int(arg2));
++ return dc_fixpt_mul(arg1,
++ dc_fixpt_from_int(arg2));
+ }
+
+ /*
+ * @brief
+ * result = square(arg) := arg * arg
+ */
+-struct fixed31_32 dal_fixed31_32_sqr(
++struct fixed31_32 dc_fixpt_sqr(
+ struct fixed31_32 arg);
+
+ /*
+ * @brief
+ * result = arg1 / arg2
+ */
+-static inline struct fixed31_32 dal_fixed31_32_div_int(struct fixed31_32 arg1,
++static inline struct fixed31_32 dc_fixpt_div_int(struct fixed31_32 arg1,
+ long long arg2)
+ {
+- return dal_fixed31_32_from_fraction(arg1.value,
+- dal_fixed31_32_from_int(arg2).value);
++ return dc_fixpt_from_fraction(arg1.value,
++ dc_fixpt_from_int(arg2).value);
+ }
+
+ /*
+ * @brief
+ * result = arg1 / arg2
+ */
+-static inline struct fixed31_32 dal_fixed31_32_div(struct fixed31_32 arg1,
++static inline struct fixed31_32 dc_fixpt_div(struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+ {
+- return dal_fixed31_32_from_fraction(arg1.value,
++ return dc_fixpt_from_fraction(arg1.value,
+ arg2.value);
+ }
+
+@@ -336,7 +336,7 @@ static inline struct fixed31_32 dal_fixed31_32_div(struct fixed31_32 arg1,
+ * @note
+ * No special actions taken in case argument is zero.
+ */
+-struct fixed31_32 dal_fixed31_32_recip(
++struct fixed31_32 dc_fixpt_recip(
+ struct fixed31_32 arg);
+
+ /*
+@@ -352,7 +352,7 @@ struct fixed31_32 dal_fixed31_32_recip(
+ * Argument specified in radians,
+ * internally it's normalized to [-2pi...2pi] range.
+ */
+-struct fixed31_32 dal_fixed31_32_sinc(
++struct fixed31_32 dc_fixpt_sinc(
+ struct fixed31_32 arg);
+
+ /*
+@@ -363,7 +363,7 @@ struct fixed31_32 dal_fixed31_32_sinc(
+ * Argument specified in radians,
+ * internally it's normalized to [-2pi...2pi] range.
+ */
+-struct fixed31_32 dal_fixed31_32_sin(
++struct fixed31_32 dc_fixpt_sin(
+ struct fixed31_32 arg);
+
+ /*
+@@ -376,7 +376,7 @@ struct fixed31_32 dal_fixed31_32_sin(
+ * passing arguments outside that range
+ * will cause incorrect result!
+ */
+-struct fixed31_32 dal_fixed31_32_cos(
++struct fixed31_32 dc_fixpt_cos(
+ struct fixed31_32 arg);
+
+ /*
+@@ -391,7 +391,7 @@ struct fixed31_32 dal_fixed31_32_cos(
+ * @note
+ * Currently, function is verified for abs(arg) <= 1.
+ */
+-struct fixed31_32 dal_fixed31_32_exp(
++struct fixed31_32 dc_fixpt_exp(
+ struct fixed31_32 arg);
+
+ /*
+@@ -404,7 +404,7 @@ struct fixed31_32 dal_fixed31_32_exp(
+ * Currently, no special actions taken
+ * in case of invalid argument(s). Take care!
+ */
+-struct fixed31_32 dal_fixed31_32_log(
++struct fixed31_32 dc_fixpt_log(
+ struct fixed31_32 arg);
+
+ /*
+@@ -419,7 +419,7 @@ struct fixed31_32 dal_fixed31_32_log(
+ * @note
+ * Currently, abs(arg1) should be less than 1. Take care!
+ */
+-struct fixed31_32 dal_fixed31_32_pow(
++struct fixed31_32 dc_fixpt_pow(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2);
+
+@@ -432,21 +432,21 @@ struct fixed31_32 dal_fixed31_32_pow(
+ * @brief
+ * result = floor(arg) := greatest integer lower than or equal to arg
+ */
+-int dal_fixed31_32_floor(
++int dc_fixpt_floor(
+ struct fixed31_32 arg);
+
+ /*
+ * @brief
+ * result = round(arg) := integer nearest to arg
+ */
+-int dal_fixed31_32_round(
++int dc_fixpt_round(
+ struct fixed31_32 arg);
+
+ /*
+ * @brief
+ * result = ceil(arg) := lowest integer greater than or equal to arg
+ */
+-int dal_fixed31_32_ceil(
++int dc_fixpt_ceil(
+ struct fixed31_32 arg);
+
+ /* the following two function are used in scaler hw programming to convert fixed
+@@ -455,20 +455,20 @@ int dal_fixed31_32_ceil(
+ * fractional
+ */
+
+-unsigned int dal_fixed31_32_u2d19(
++unsigned int dc_fixpt_u2d19(
+ struct fixed31_32 arg);
+
+-unsigned int dal_fixed31_32_u0d19(
++unsigned int dc_fixpt_u0d19(
+ struct fixed31_32 arg);
+
+
+-unsigned int dal_fixed31_32_clamp_u0d14(
++unsigned int dc_fixpt_clamp_u0d14(
+ struct fixed31_32 arg);
+
+-unsigned int dal_fixed31_32_clamp_u0d10(
++unsigned int dc_fixpt_clamp_u0d10(
+ struct fixed31_32 arg);
+
+-int dal_fixed31_32_s4d19(
++int dc_fixpt_s4d19(
+ struct fixed31_32 arg);
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/include/fixed32_32.h b/drivers/gpu/drm/amd/display/include/fixed32_32.h
+deleted file mode 100644
+index 9c70341..0000000
+--- a/drivers/gpu/drm/amd/display/include/fixed32_32.h
++++ /dev/null
+@@ -1,129 +0,0 @@
+-/*
+- * Copyright 2012-15 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- * Authors: AMD
+- *
+- */
+-
+-
+-#ifndef __DAL_FIXED32_32_H__
+-#define __DAL_FIXED32_32_H__
+-
+-#include "os_types.h"
+-
+-struct fixed32_32 {
+- uint64_t value;
+-};
+-
+-static const struct fixed32_32 dal_fixed32_32_zero = { 0 };
+-static const struct fixed32_32 dal_fixed32_32_one = { 0x100000000LL };
+-static const struct fixed32_32 dal_fixed32_32_half = { 0x80000000LL };
+-
+-struct fixed32_32 dal_fixed32_32_from_fraction(uint32_t n, uint32_t d);
+-static inline struct fixed32_32 dal_fixed32_32_from_int(uint32_t value)
+-{
+- struct fixed32_32 fx;
+-
+- fx.value = (uint64_t)value<<32;
+- return fx;
+-}
+-
+-struct fixed32_32 dal_fixed32_32_add(
+- struct fixed32_32 lhs,
+- struct fixed32_32 rhs);
+-struct fixed32_32 dal_fixed32_32_add_int(
+- struct fixed32_32 lhs,
+- uint32_t rhs);
+-struct fixed32_32 dal_fixed32_32_sub(
+- struct fixed32_32 lhs,
+- struct fixed32_32 rhs);
+-struct fixed32_32 dal_fixed32_32_sub_int(
+- struct fixed32_32 lhs,
+- uint32_t rhs);
+-struct fixed32_32 dal_fixed32_32_mul(
+- struct fixed32_32 lhs,
+- struct fixed32_32 rhs);
+-struct fixed32_32 dal_fixed32_32_mul_int(
+- struct fixed32_32 lhs,
+- uint32_t rhs);
+-struct fixed32_32 dal_fixed32_32_div(
+- struct fixed32_32 lhs,
+- struct fixed32_32 rhs);
+-struct fixed32_32 dal_fixed32_32_div_int(
+- struct fixed32_32 lhs,
+- uint32_t rhs);
+-
+-static inline struct fixed32_32 dal_fixed32_32_min(struct fixed32_32 lhs,
+- struct fixed32_32 rhs)
+-{
+- return (lhs.value < rhs.value) ? lhs : rhs;
+-}
+-
+-static inline struct fixed32_32 dal_fixed32_32_max(struct fixed32_32 lhs,
+- struct fixed32_32 rhs)
+-{
+- return (lhs.value > rhs.value) ? lhs : rhs;
+-}
+-
+-static inline bool dal_fixed32_32_gt(struct fixed32_32 lhs, struct fixed32_32 rhs)
+-{
+- return lhs.value > rhs.value;
+-}
+-
+-static inline bool dal_fixed32_32_gt_int(struct fixed32_32 lhs, uint32_t rhs)
+-{
+- return lhs.value > ((uint64_t)rhs<<32);
+-}
+-
+-static inline bool dal_fixed32_32_lt(struct fixed32_32 lhs, struct fixed32_32 rhs)
+-{
+- return lhs.value < rhs.value;
+-}
+-
+-static inline bool dal_fixed32_32_lt_int(struct fixed32_32 lhs, uint32_t rhs)
+-{
+- return lhs.value < ((uint64_t)rhs<<32);
+-}
+-
+-static inline bool dal_fixed32_32_le(struct fixed32_32 lhs, struct fixed32_32 rhs)
+-{
+- return lhs.value <= rhs.value;
+-}
+-
+-static inline bool dal_fixed32_32_le_int(struct fixed32_32 lhs, uint32_t rhs)
+-{
+- return lhs.value <= ((uint64_t)rhs<<32);
+-}
+-
+-static inline bool dal_fixed32_32_eq(struct fixed32_32 lhs, struct fixed32_32 rhs)
+-{
+- return lhs.value == rhs.value;
+-}
+-
+-uint32_t dal_fixed32_32_ceil(struct fixed32_32 value);
+-static inline uint32_t dal_fixed32_32_floor(struct fixed32_32 value)
+-{
+- return value.value>>32;
+-}
+-
+-uint32_t dal_fixed32_32_round(struct fixed32_32 value);
+-
+-#endif
+diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+index 15e5b72..29d2ec8 100644
+--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+@@ -43,7 +43,7 @@ static bool de_pq_initialized; /* = false; */
+ /* one-time setup of X points */
+ void setup_x_points_distribution(void)
+ {
+- struct fixed31_32 region_size = dal_fixed31_32_from_int(128);
++ struct fixed31_32 region_size = dc_fixpt_from_int(128);
+ int32_t segment;
+ uint32_t seg_offset;
+ uint32_t index;
+@@ -53,8 +53,8 @@ void setup_x_points_distribution(void)
+ coordinates_x[MAX_HW_POINTS + 1].x = region_size;
+
+ for (segment = 6; segment > (6 - NUM_REGIONS); segment--) {
+- region_size = dal_fixed31_32_div_int(region_size, 2);
+- increment = dal_fixed31_32_div_int(region_size,
++ region_size = dc_fixpt_div_int(region_size, 2);
++ increment = dc_fixpt_div_int(region_size,
+ NUM_PTS_IN_REGION);
+ seg_offset = (segment + (NUM_REGIONS - 7)) * NUM_PTS_IN_REGION;
+ coordinates_x[seg_offset].x = region_size;
+@@ -62,7 +62,7 @@ void setup_x_points_distribution(void)
+ for (index = seg_offset + 1;
+ index < seg_offset + NUM_PTS_IN_REGION;
+ index++) {
+- coordinates_x[index].x = dal_fixed31_32_add
++ coordinates_x[index].x = dc_fixpt_add
+ (coordinates_x[index-1].x, increment);
+ }
+ }
+@@ -72,63 +72,63 @@ static void compute_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y)
+ {
+ /* consts for PQ gamma formula. */
+ const struct fixed31_32 m1 =
+- dal_fixed31_32_from_fraction(159301758, 1000000000);
++ dc_fixpt_from_fraction(159301758, 1000000000);
+ const struct fixed31_32 m2 =
+- dal_fixed31_32_from_fraction(7884375, 100000);
++ dc_fixpt_from_fraction(7884375, 100000);
+ const struct fixed31_32 c1 =
+- dal_fixed31_32_from_fraction(8359375, 10000000);
++ dc_fixpt_from_fraction(8359375, 10000000);
+ const struct fixed31_32 c2 =
+- dal_fixed31_32_from_fraction(188515625, 10000000);
++ dc_fixpt_from_fraction(188515625, 10000000);
+ const struct fixed31_32 c3 =
+- dal_fixed31_32_from_fraction(186875, 10000);
++ dc_fixpt_from_fraction(186875, 10000);
+
+ struct fixed31_32 l_pow_m1;
+ struct fixed31_32 base;
+
+- if (dal_fixed31_32_lt(in_x, dal_fixed31_32_zero))
+- in_x = dal_fixed31_32_zero;
++ if (dc_fixpt_lt(in_x, dc_fixpt_zero))
++ in_x = dc_fixpt_zero;
+
+- l_pow_m1 = dal_fixed31_32_pow(in_x, m1);
+- base = dal_fixed31_32_div(
+- dal_fixed31_32_add(c1,
+- (dal_fixed31_32_mul(c2, l_pow_m1))),
+- dal_fixed31_32_add(dal_fixed31_32_one,
+- (dal_fixed31_32_mul(c3, l_pow_m1))));
+- *out_y = dal_fixed31_32_pow(base, m2);
++ l_pow_m1 = dc_fixpt_pow(in_x, m1);
++ base = dc_fixpt_div(
++ dc_fixpt_add(c1,
++ (dc_fixpt_mul(c2, l_pow_m1))),
++ dc_fixpt_add(dc_fixpt_one,
++ (dc_fixpt_mul(c3, l_pow_m1))));
++ *out_y = dc_fixpt_pow(base, m2);
+ }
+
+ static void compute_de_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y)
+ {
+ /* consts for dePQ gamma formula. */
+ const struct fixed31_32 m1 =
+- dal_fixed31_32_from_fraction(159301758, 1000000000);
++ dc_fixpt_from_fraction(159301758, 1000000000);
+ const struct fixed31_32 m2 =
+- dal_fixed31_32_from_fraction(7884375, 100000);
++ dc_fixpt_from_fraction(7884375, 100000);
+ const struct fixed31_32 c1 =
+- dal_fixed31_32_from_fraction(8359375, 10000000);
++ dc_fixpt_from_fraction(8359375, 10000000);
+ const struct fixed31_32 c2 =
+- dal_fixed31_32_from_fraction(188515625, 10000000);
++ dc_fixpt_from_fraction(188515625, 10000000);
+ const struct fixed31_32 c3 =
+- dal_fixed31_32_from_fraction(186875, 10000);
++ dc_fixpt_from_fraction(186875, 10000);
+
+ struct fixed31_32 l_pow_m1;
+ struct fixed31_32 base, div;
+
+
+- if (dal_fixed31_32_lt(in_x, dal_fixed31_32_zero))
+- in_x = dal_fixed31_32_zero;
++ if (dc_fixpt_lt(in_x, dc_fixpt_zero))
++ in_x = dc_fixpt_zero;
+
+- l_pow_m1 = dal_fixed31_32_pow(in_x,
+- dal_fixed31_32_div(dal_fixed31_32_one, m2));
+- base = dal_fixed31_32_sub(l_pow_m1, c1);
++ l_pow_m1 = dc_fixpt_pow(in_x,
++ dc_fixpt_div(dc_fixpt_one, m2));
++ base = dc_fixpt_sub(l_pow_m1, c1);
+
+- if (dal_fixed31_32_lt(base, dal_fixed31_32_zero))
+- base = dal_fixed31_32_zero;
++ if (dc_fixpt_lt(base, dc_fixpt_zero))
++ base = dc_fixpt_zero;
+
+- div = dal_fixed31_32_sub(c2, dal_fixed31_32_mul(c3, l_pow_m1));
++ div = dc_fixpt_sub(c2, dc_fixpt_mul(c3, l_pow_m1));
+
+- *out_y = dal_fixed31_32_pow(dal_fixed31_32_div(base, div),
+- dal_fixed31_32_div(dal_fixed31_32_one, m1));
++ *out_y = dc_fixpt_pow(dc_fixpt_div(base, div),
++ dc_fixpt_div(dc_fixpt_one, m1));
+
+ }
+ /* one-time pre-compute PQ values - only for sdr_white_level 80 */
+@@ -138,14 +138,14 @@ void precompute_pq(void)
+ struct fixed31_32 x;
+ const struct hw_x_point *coord_x = coordinates_x + 32;
+ struct fixed31_32 scaling_factor =
+- dal_fixed31_32_from_fraction(80, 10000);
++ dc_fixpt_from_fraction(80, 10000);
+
+ /* pow function has problems with arguments too small */
+ for (i = 0; i < 32; i++)
+- pq_table[i] = dal_fixed31_32_zero;
++ pq_table[i] = dc_fixpt_zero;
+
+ for (i = 32; i <= MAX_HW_POINTS; i++) {
+- x = dal_fixed31_32_mul(coord_x->x, scaling_factor);
++ x = dc_fixpt_mul(coord_x->x, scaling_factor);
+ compute_pq(x, &pq_table[i]);
+ ++coord_x;
+ }
+@@ -158,7 +158,7 @@ void precompute_de_pq(void)
+ struct fixed31_32 y;
+ uint32_t begin_index, end_index;
+
+- struct fixed31_32 scaling_factor = dal_fixed31_32_from_int(125);
++ struct fixed31_32 scaling_factor = dc_fixpt_from_int(125);
+
+ /* X points is 2^-25 to 2^7
+ * De-gamma X is 2^-12 to 2^0 – we are skipping first -12-(-25) = 13 regions
+@@ -167,11 +167,11 @@ void precompute_de_pq(void)
+ end_index = begin_index + 12 * NUM_PTS_IN_REGION;
+
+ for (i = 0; i <= begin_index; i++)
+- de_pq_table[i] = dal_fixed31_32_zero;
++ de_pq_table[i] = dc_fixpt_zero;
+
+ for (; i <= end_index; i++) {
+ compute_de_pq(coordinates_x[i].x, &y);
+- de_pq_table[i] = dal_fixed31_32_mul(y, scaling_factor);
++ de_pq_table[i] = dc_fixpt_mul(y, scaling_factor);
+ }
+
+ for (; i <= MAX_HW_POINTS; i++)
+@@ -195,15 +195,15 @@ static void build_coefficients(struct gamma_coefficients *coefficients, bool is_
+ uint32_t index = is_2_4 == true ? 0:1;
+
+ do {
+- coefficients->a0[i] = dal_fixed31_32_from_fraction(
++ coefficients->a0[i] = dc_fixpt_from_fraction(
+ numerator01[index], 10000000);
+- coefficients->a1[i] = dal_fixed31_32_from_fraction(
++ coefficients->a1[i] = dc_fixpt_from_fraction(
+ numerator02[index], 1000);
+- coefficients->a2[i] = dal_fixed31_32_from_fraction(
++ coefficients->a2[i] = dc_fixpt_from_fraction(
+ numerator03[index], 1000);
+- coefficients->a3[i] = dal_fixed31_32_from_fraction(
++ coefficients->a3[i] = dc_fixpt_from_fraction(
+ numerator04[index], 1000);
+- coefficients->user_gamma[i] = dal_fixed31_32_from_fraction(
++ coefficients->user_gamma[i] = dc_fixpt_from_fraction(
+ numerator05[index], 1000);
+
+ ++i;
+@@ -218,33 +218,33 @@ static struct fixed31_32 translate_from_linear_space(
+ struct fixed31_32 a3,
+ struct fixed31_32 gamma)
+ {
+- const struct fixed31_32 one = dal_fixed31_32_from_int(1);
++ const struct fixed31_32 one = dc_fixpt_from_int(1);
+
+- if (dal_fixed31_32_lt(one, arg))
++ if (dc_fixpt_lt(one, arg))
+ return one;
+
+- if (dal_fixed31_32_le(arg, dal_fixed31_32_neg(a0)))
+- return dal_fixed31_32_sub(
++ if (dc_fixpt_le(arg, dc_fixpt_neg(a0)))
++ return dc_fixpt_sub(
+ a2,
+- dal_fixed31_32_mul(
+- dal_fixed31_32_add(
++ dc_fixpt_mul(
++ dc_fixpt_add(
+ one,
+ a3),
+- dal_fixed31_32_pow(
+- dal_fixed31_32_neg(arg),
+- dal_fixed31_32_recip(gamma))));
+- else if (dal_fixed31_32_le(a0, arg))
+- return dal_fixed31_32_sub(
+- dal_fixed31_32_mul(
+- dal_fixed31_32_add(
++ dc_fixpt_pow(
++ dc_fixpt_neg(arg),
++ dc_fixpt_recip(gamma))));
++ else if (dc_fixpt_le(a0, arg))
++ return dc_fixpt_sub(
++ dc_fixpt_mul(
++ dc_fixpt_add(
+ one,
+ a3),
+- dal_fixed31_32_pow(
++ dc_fixpt_pow(
+ arg,
+- dal_fixed31_32_recip(gamma))),
++ dc_fixpt_recip(gamma))),
+ a2);
+ else
+- return dal_fixed31_32_mul(
++ return dc_fixpt_mul(
+ arg,
+ a1);
+ }
+@@ -259,25 +259,25 @@ static struct fixed31_32 translate_to_linear_space(
+ {
+ struct fixed31_32 linear;
+
+- a0 = dal_fixed31_32_mul(a0, a1);
+- if (dal_fixed31_32_le(arg, dal_fixed31_32_neg(a0)))
++ a0 = dc_fixpt_mul(a0, a1);
++ if (dc_fixpt_le(arg, dc_fixpt_neg(a0)))
+
+- linear = dal_fixed31_32_neg(
+- dal_fixed31_32_pow(
+- dal_fixed31_32_div(
+- dal_fixed31_32_sub(a2, arg),
+- dal_fixed31_32_add(
+- dal_fixed31_32_one, a3)), gamma));
++ linear = dc_fixpt_neg(
++ dc_fixpt_pow(
++ dc_fixpt_div(
++ dc_fixpt_sub(a2, arg),
++ dc_fixpt_add(
++ dc_fixpt_one, a3)), gamma));
+
+- else if (dal_fixed31_32_le(dal_fixed31_32_neg(a0), arg) &&
+- dal_fixed31_32_le(arg, a0))
+- linear = dal_fixed31_32_div(arg, a1);
++ else if (dc_fixpt_le(dc_fixpt_neg(a0), arg) &&
++ dc_fixpt_le(arg, a0))
++ linear = dc_fixpt_div(arg, a1);
+ else
+- linear = dal_fixed31_32_pow(
+- dal_fixed31_32_div(
+- dal_fixed31_32_add(a2, arg),
+- dal_fixed31_32_add(
+- dal_fixed31_32_one, a3)), gamma);
++ linear = dc_fixpt_pow(
++ dc_fixpt_div(
++ dc_fixpt_add(a2, arg),
++ dc_fixpt_add(
++ dc_fixpt_one, a3)), gamma);
+
+ return linear;
+ }
+@@ -352,8 +352,8 @@ static bool find_software_points(
+ right = axis_x[max_number - 1].b;
+ }
+
+- if (dal_fixed31_32_le(left, hw_point) &&
+- dal_fixed31_32_le(hw_point, right)) {
++ if (dc_fixpt_le(left, hw_point) &&
++ dc_fixpt_le(hw_point, right)) {
+ *index_to_start = i;
+ *index_left = i;
+
+@@ -366,7 +366,7 @@ static bool find_software_points(
+
+ return true;
+ } else if ((i == *index_to_start) &&
+- dal_fixed31_32_le(hw_point, left)) {
++ dc_fixpt_le(hw_point, left)) {
+ *index_to_start = i;
+ *index_left = i;
+ *index_right = i;
+@@ -375,7 +375,7 @@ static bool find_software_points(
+
+ return true;
+ } else if ((i == max_number - 1) &&
+- dal_fixed31_32_le(right, hw_point)) {
++ dc_fixpt_le(right, hw_point)) {
+ *index_to_start = i;
+ *index_left = i;
+ *index_right = i;
+@@ -457,17 +457,17 @@ static bool build_custom_gamma_mapping_coefficients_worker(
+ }
+
+ if (hw_pos == HW_POINT_POSITION_MIDDLE)
+- point->coeff = dal_fixed31_32_div(
+- dal_fixed31_32_sub(
++ point->coeff = dc_fixpt_div(
++ dc_fixpt_sub(
+ coord_x,
+ left_pos),
+- dal_fixed31_32_sub(
++ dc_fixpt_sub(
+ right_pos,
+ left_pos));
+ else if (hw_pos == HW_POINT_POSITION_LEFT)
+- point->coeff = dal_fixed31_32_zero;
++ point->coeff = dc_fixpt_zero;
+ else if (hw_pos == HW_POINT_POSITION_RIGHT)
+- point->coeff = dal_fixed31_32_from_int(2);
++ point->coeff = dc_fixpt_from_int(2);
+ else {
+ BREAK_TO_DEBUGGER();
+ return false;
+@@ -502,45 +502,45 @@ static struct fixed31_32 calculate_mapped_value(
+
+ if ((point->left_index < 0) || (point->left_index > max_index)) {
+ BREAK_TO_DEBUGGER();
+- return dal_fixed31_32_zero;
++ return dc_fixpt_zero;
+ }
+
+ if ((point->right_index < 0) || (point->right_index > max_index)) {
+ BREAK_TO_DEBUGGER();
+- return dal_fixed31_32_zero;
++ return dc_fixpt_zero;
+ }
+
+ if (point->pos == HW_POINT_POSITION_MIDDLE)
+ if (channel == CHANNEL_NAME_RED)
+- result = dal_fixed31_32_add(
+- dal_fixed31_32_mul(
++ result = dc_fixpt_add(
++ dc_fixpt_mul(
+ point->coeff,
+- dal_fixed31_32_sub(
++ dc_fixpt_sub(
+ rgb[point->right_index].r,
+ rgb[point->left_index].r)),
+ rgb[point->left_index].r);
+ else if (channel == CHANNEL_NAME_GREEN)
+- result = dal_fixed31_32_add(
+- dal_fixed31_32_mul(
++ result = dc_fixpt_add(
++ dc_fixpt_mul(
+ point->coeff,
+- dal_fixed31_32_sub(
++ dc_fixpt_sub(
+ rgb[point->right_index].g,
+ rgb[point->left_index].g)),
+ rgb[point->left_index].g);
+ else
+- result = dal_fixed31_32_add(
+- dal_fixed31_32_mul(
++ result = dc_fixpt_add(
++ dc_fixpt_mul(
+ point->coeff,
+- dal_fixed31_32_sub(
++ dc_fixpt_sub(
+ rgb[point->right_index].b,
+ rgb[point->left_index].b)),
+ rgb[point->left_index].b);
+ else if (point->pos == HW_POINT_POSITION_LEFT) {
+ BREAK_TO_DEBUGGER();
+- result = dal_fixed31_32_zero;
++ result = dc_fixpt_zero;
+ } else {
+ BREAK_TO_DEBUGGER();
+- result = dal_fixed31_32_one;
++ result = dc_fixpt_one;
+ }
+
+ return result;
+@@ -558,7 +558,7 @@ static void build_pq(struct pwl_float_data_ex *rgb_regamma,
+ struct fixed31_32 x;
+ struct fixed31_32 output;
+ struct fixed31_32 scaling_factor =
+- dal_fixed31_32_from_fraction(sdr_white_level, 10000);
++ dc_fixpt_from_fraction(sdr_white_level, 10000);
+
+ if (!pq_initialized && sdr_white_level == 80) {
+ precompute_pq();
+@@ -579,15 +579,15 @@ static void build_pq(struct pwl_float_data_ex *rgb_regamma,
+ if (sdr_white_level == 80) {
+ output = pq_table[i];
+ } else {
+- x = dal_fixed31_32_mul(coord_x->x, scaling_factor);
++ x = dc_fixpt_mul(coord_x->x, scaling_factor);
+ compute_pq(x, &output);
+ }
+
+ /* should really not happen? */
+- if (dal_fixed31_32_lt(output, dal_fixed31_32_zero))
+- output = dal_fixed31_32_zero;
+- else if (dal_fixed31_32_lt(dal_fixed31_32_one, output))
+- output = dal_fixed31_32_one;
++ if (dc_fixpt_lt(output, dc_fixpt_zero))
++ output = dc_fixpt_zero;
++ else if (dc_fixpt_lt(dc_fixpt_one, output))
++ output = dc_fixpt_one;
+
+ rgb->r = output;
+ rgb->g = output;
+@@ -605,7 +605,7 @@ static void build_de_pq(struct pwl_float_data_ex *de_pq,
+ uint32_t i;
+ struct fixed31_32 output;
+
+- struct fixed31_32 scaling_factor = dal_fixed31_32_from_int(125);
++ struct fixed31_32 scaling_factor = dc_fixpt_from_int(125);
+
+ if (!de_pq_initialized) {
+ precompute_de_pq();
+@@ -616,9 +616,9 @@ static void build_de_pq(struct pwl_float_data_ex *de_pq,
+ for (i = 0; i <= hw_points_num; i++) {
+ output = de_pq_table[i];
+ /* should really not happen? */
+- if (dal_fixed31_32_lt(output, dal_fixed31_32_zero))
+- output = dal_fixed31_32_zero;
+- else if (dal_fixed31_32_lt(scaling_factor, output))
++ if (dc_fixpt_lt(output, dc_fixpt_zero))
++ output = dc_fixpt_zero;
++ else if (dc_fixpt_lt(scaling_factor, output))
+ output = scaling_factor;
+ de_pq[i].r = output;
+ de_pq[i].g = output;
+@@ -670,9 +670,9 @@ static void build_degamma(struct pwl_float_data_ex *curve,
+ end_index = begin_index + 12 * NUM_PTS_IN_REGION;
+
+ while (i != begin_index) {
+- curve[i].r = dal_fixed31_32_zero;
+- curve[i].g = dal_fixed31_32_zero;
+- curve[i].b = dal_fixed31_32_zero;
++ curve[i].r = dc_fixpt_zero;
++ curve[i].g = dc_fixpt_zero;
++ curve[i].b = dc_fixpt_zero;
+ i++;
+ }
+
+@@ -684,9 +684,9 @@ static void build_degamma(struct pwl_float_data_ex *curve,
+ i++;
+ }
+ while (i != hw_points_num + 1) {
+- curve[i].r = dal_fixed31_32_one;
+- curve[i].g = dal_fixed31_32_one;
+- curve[i].b = dal_fixed31_32_one;
++ curve[i].r = dc_fixpt_one;
++ curve[i].g = dc_fixpt_one;
++ curve[i].b = dc_fixpt_one;
+ i++;
+ }
+ }
+@@ -695,8 +695,8 @@ static void scale_gamma(struct pwl_float_data *pwl_rgb,
+ const struct dc_gamma *ramp,
+ struct dividers dividers)
+ {
+- const struct fixed31_32 max_driver = dal_fixed31_32_from_int(0xFFFF);
+- const struct fixed31_32 max_os = dal_fixed31_32_from_int(0xFF00);
++ const struct fixed31_32 max_driver = dc_fixpt_from_int(0xFFFF);
++ const struct fixed31_32 max_os = dc_fixpt_from_int(0xFF00);
+ struct fixed31_32 scaler = max_os;
+ uint32_t i;
+ struct pwl_float_data *rgb = pwl_rgb;
+@@ -705,9 +705,9 @@ static void scale_gamma(struct pwl_float_data *pwl_rgb,
+ i = 0;
+
+ do {
+- if (dal_fixed31_32_lt(max_os, ramp->entries.red[i]) ||
+- dal_fixed31_32_lt(max_os, ramp->entries.green[i]) ||
+- dal_fixed31_32_lt(max_os, ramp->entries.blue[i])) {
++ if (dc_fixpt_lt(max_os, ramp->entries.red[i]) ||
++ dc_fixpt_lt(max_os, ramp->entries.green[i]) ||
++ dc_fixpt_lt(max_os, ramp->entries.blue[i])) {
+ scaler = max_driver;
+ break;
+ }
+@@ -717,40 +717,40 @@ static void scale_gamma(struct pwl_float_data *pwl_rgb,
+ i = 0;
+
+ do {
+- rgb->r = dal_fixed31_32_div(
++ rgb->r = dc_fixpt_div(
+ ramp->entries.red[i], scaler);
+- rgb->g = dal_fixed31_32_div(
++ rgb->g = dc_fixpt_div(
+ ramp->entries.green[i], scaler);
+- rgb->b = dal_fixed31_32_div(
++ rgb->b = dc_fixpt_div(
+ ramp->entries.blue[i], scaler);
+
+ ++rgb;
+ ++i;
+ } while (i != ramp->num_entries);
+
+- rgb->r = dal_fixed31_32_mul(rgb_last->r,
++ rgb->r = dc_fixpt_mul(rgb_last->r,
+ dividers.divider1);
+- rgb->g = dal_fixed31_32_mul(rgb_last->g,
++ rgb->g = dc_fixpt_mul(rgb_last->g,
+ dividers.divider1);
+- rgb->b = dal_fixed31_32_mul(rgb_last->b,
++ rgb->b = dc_fixpt_mul(rgb_last->b,
+ dividers.divider1);
+
+ ++rgb;
+
+- rgb->r = dal_fixed31_32_mul(rgb_last->r,
++ rgb->r = dc_fixpt_mul(rgb_last->r,
+ dividers.divider2);
+- rgb->g = dal_fixed31_32_mul(rgb_last->g,
++ rgb->g = dc_fixpt_mul(rgb_last->g,
+ dividers.divider2);
+- rgb->b = dal_fixed31_32_mul(rgb_last->b,
++ rgb->b = dc_fixpt_mul(rgb_last->b,
+ dividers.divider2);
+
+ ++rgb;
+
+- rgb->r = dal_fixed31_32_mul(rgb_last->r,
++ rgb->r = dc_fixpt_mul(rgb_last->r,
+ dividers.divider3);
+- rgb->g = dal_fixed31_32_mul(rgb_last->g,
++ rgb->g = dc_fixpt_mul(rgb_last->g,
+ dividers.divider3);
+- rgb->b = dal_fixed31_32_mul(rgb_last->b,
++ rgb->b = dc_fixpt_mul(rgb_last->b,
+ dividers.divider3);
+ }
+
+@@ -759,62 +759,62 @@ static void scale_gamma_dx(struct pwl_float_data *pwl_rgb,
+ struct dividers dividers)
+ {
+ uint32_t i;
+- struct fixed31_32 min = dal_fixed31_32_zero;
+- struct fixed31_32 max = dal_fixed31_32_one;
++ struct fixed31_32 min = dc_fixpt_zero;
++ struct fixed31_32 max = dc_fixpt_one;
+
+- struct fixed31_32 delta = dal_fixed31_32_zero;
+- struct fixed31_32 offset = dal_fixed31_32_zero;
++ struct fixed31_32 delta = dc_fixpt_zero;
++ struct fixed31_32 offset = dc_fixpt_zero;
+
+ for (i = 0 ; i < ramp->num_entries; i++) {
+- if (dal_fixed31_32_lt(ramp->entries.red[i], min))
++ if (dc_fixpt_lt(ramp->entries.red[i], min))
+ min = ramp->entries.red[i];
+
+- if (dal_fixed31_32_lt(ramp->entries.green[i], min))
++ if (dc_fixpt_lt(ramp->entries.green[i], min))
+ min = ramp->entries.green[i];
+
+- if (dal_fixed31_32_lt(ramp->entries.blue[i], min))
++ if (dc_fixpt_lt(ramp->entries.blue[i], min))
+ min = ramp->entries.blue[i];
+
+- if (dal_fixed31_32_lt(max, ramp->entries.red[i]))
++ if (dc_fixpt_lt(max, ramp->entries.red[i]))
+ max = ramp->entries.red[i];
+
+- if (dal_fixed31_32_lt(max, ramp->entries.green[i]))
++ if (dc_fixpt_lt(max, ramp->entries.green[i]))
+ max = ramp->entries.green[i];
+
+- if (dal_fixed31_32_lt(max, ramp->entries.blue[i]))
++ if (dc_fixpt_lt(max, ramp->entries.blue[i]))
+ max = ramp->entries.blue[i];
+ }
+
+- if (dal_fixed31_32_lt(min, dal_fixed31_32_zero))
+- delta = dal_fixed31_32_neg(min);
++ if (dc_fixpt_lt(min, dc_fixpt_zero))
++ delta = dc_fixpt_neg(min);
+
+- offset = dal_fixed31_32_add(min, max);
++ offset = dc_fixpt_add(min, max);
+
+ for (i = 0 ; i < ramp->num_entries; i++) {
+- pwl_rgb[i].r = dal_fixed31_32_div(
+- dal_fixed31_32_add(
++ pwl_rgb[i].r = dc_fixpt_div(
++ dc_fixpt_add(
+ ramp->entries.red[i], delta), offset);
+- pwl_rgb[i].g = dal_fixed31_32_div(
+- dal_fixed31_32_add(
++ pwl_rgb[i].g = dc_fixpt_div(
++ dc_fixpt_add(
+ ramp->entries.green[i], delta), offset);
+- pwl_rgb[i].b = dal_fixed31_32_div(
+- dal_fixed31_32_add(
++ pwl_rgb[i].b = dc_fixpt_div(
++ dc_fixpt_add(
+ ramp->entries.blue[i], delta), offset);
+
+ }
+
+- pwl_rgb[i].r = dal_fixed31_32_sub(dal_fixed31_32_mul_int(
++ pwl_rgb[i].r = dc_fixpt_sub(dc_fixpt_mul_int(
+ pwl_rgb[i-1].r, 2), pwl_rgb[i-2].r);
+- pwl_rgb[i].g = dal_fixed31_32_sub(dal_fixed31_32_mul_int(
++ pwl_rgb[i].g = dc_fixpt_sub(dc_fixpt_mul_int(
+ pwl_rgb[i-1].g, 2), pwl_rgb[i-2].g);
+- pwl_rgb[i].b = dal_fixed31_32_sub(dal_fixed31_32_mul_int(
++ pwl_rgb[i].b = dc_fixpt_sub(dc_fixpt_mul_int(
+ pwl_rgb[i-1].b, 2), pwl_rgb[i-2].b);
+ ++i;
+- pwl_rgb[i].r = dal_fixed31_32_sub(dal_fixed31_32_mul_int(
++ pwl_rgb[i].r = dc_fixpt_sub(dc_fixpt_mul_int(
+ pwl_rgb[i-1].r, 2), pwl_rgb[i-2].r);
+- pwl_rgb[i].g = dal_fixed31_32_sub(dal_fixed31_32_mul_int(
++ pwl_rgb[i].g = dc_fixpt_sub(dc_fixpt_mul_int(
+ pwl_rgb[i-1].g, 2), pwl_rgb[i-2].g);
+- pwl_rgb[i].b = dal_fixed31_32_sub(dal_fixed31_32_mul_int(
++ pwl_rgb[i].b = dc_fixpt_sub(dc_fixpt_mul_int(
+ pwl_rgb[i-1].b, 2), pwl_rgb[i-2].b);
+ }
+
+@@ -846,40 +846,40 @@ static void scale_user_regamma_ramp(struct pwl_float_data *pwl_rgb,
+
+ i = 0;
+ do {
+- rgb->r = dal_fixed31_32_from_fraction(
++ rgb->r = dc_fixpt_from_fraction(
+ ramp->gamma[i], scaler);
+- rgb->g = dal_fixed31_32_from_fraction(
++ rgb->g = dc_fixpt_from_fraction(
+ ramp->gamma[i + 256], scaler);
+- rgb->b = dal_fixed31_32_from_fraction(
++ rgb->b = dc_fixpt_from_fraction(
+ ramp->gamma[i + 512], scaler);
+
+ ++rgb;
+ ++i;
+ } while (i != GAMMA_RGB_256_ENTRIES);
+
+- rgb->r = dal_fixed31_32_mul(rgb_last->r,
++ rgb->r = dc_fixpt_mul(rgb_last->r,
+ dividers.divider1);
+- rgb->g = dal_fixed31_32_mul(rgb_last->g,
++ rgb->g = dc_fixpt_mul(rgb_last->g,
+ dividers.divider1);
+- rgb->b = dal_fixed31_32_mul(rgb_last->b,
++ rgb->b = dc_fixpt_mul(rgb_last->b,
+ dividers.divider1);
+
+ ++rgb;
+
+- rgb->r = dal_fixed31_32_mul(rgb_last->r,
++ rgb->r = dc_fixpt_mul(rgb_last->r,
+ dividers.divider2);
+- rgb->g = dal_fixed31_32_mul(rgb_last->g,
++ rgb->g = dc_fixpt_mul(rgb_last->g,
+ dividers.divider2);
+- rgb->b = dal_fixed31_32_mul(rgb_last->b,
++ rgb->b = dc_fixpt_mul(rgb_last->b,
+ dividers.divider2);
+
+ ++rgb;
+
+- rgb->r = dal_fixed31_32_mul(rgb_last->r,
++ rgb->r = dc_fixpt_mul(rgb_last->r,
+ dividers.divider3);
+- rgb->g = dal_fixed31_32_mul(rgb_last->g,
++ rgb->g = dc_fixpt_mul(rgb_last->g,
+ dividers.divider3);
+- rgb->b = dal_fixed31_32_mul(rgb_last->b,
++ rgb->b = dc_fixpt_mul(rgb_last->b,
+ dividers.divider3);
+ }
+
+@@ -913,7 +913,7 @@ static void apply_lut_1d(
+ struct fixed31_32 lut2;
+ const int max_lut_index = 4095;
+ const struct fixed31_32 max_lut_index_f =
+- dal_fixed31_32_from_int_nonconst(max_lut_index);
++ dc_fixpt_from_int_nonconst(max_lut_index);
+ int32_t index = 0, index_next = 0;
+ struct fixed31_32 index_f;
+ struct fixed31_32 delta_lut;
+@@ -931,10 +931,10 @@ static void apply_lut_1d(
+ else
+ regamma_y = &tf_pts->blue[i];
+
+- norm_y = dal_fixed31_32_mul(max_lut_index_f,
++ norm_y = dc_fixpt_mul(max_lut_index_f,
+ *regamma_y);
+- index = dal_fixed31_32_floor(norm_y);
+- index_f = dal_fixed31_32_from_int_nonconst(index);
++ index = dc_fixpt_floor(norm_y);
++ index_f = dc_fixpt_from_int_nonconst(index);
+
+ if (index < 0 || index > max_lut_index)
+ continue;
+@@ -953,11 +953,11 @@ static void apply_lut_1d(
+ }
+
+ // we have everything now, so interpolate
+- delta_lut = dal_fixed31_32_sub(lut2, lut1);
+- delta_index = dal_fixed31_32_sub(norm_y, index_f);
++ delta_lut = dc_fixpt_sub(lut2, lut1);
++ delta_index = dc_fixpt_sub(norm_y, index_f);
+
+- *regamma_y = dal_fixed31_32_add(lut1,
+- dal_fixed31_32_mul(delta_index, delta_lut));
++ *regamma_y = dc_fixpt_add(lut1,
++ dc_fixpt_mul(delta_index, delta_lut));
+ }
+ }
+ }
+@@ -973,7 +973,7 @@ static void build_evenly_distributed_points(
+ uint32_t i = 0;
+
+ do {
+- struct fixed31_32 value = dal_fixed31_32_from_fraction(i,
++ struct fixed31_32 value = dc_fixpt_from_fraction(i,
+ numberof_points - 1);
+
+ p->r = value;
+@@ -984,21 +984,21 @@ static void build_evenly_distributed_points(
+ ++i;
+ } while (i != numberof_points);
+
+- p->r = dal_fixed31_32_div(p_last->r, dividers.divider1);
+- p->g = dal_fixed31_32_div(p_last->g, dividers.divider1);
+- p->b = dal_fixed31_32_div(p_last->b, dividers.divider1);
++ p->r = dc_fixpt_div(p_last->r, dividers.divider1);
++ p->g = dc_fixpt_div(p_last->g, dividers.divider1);
++ p->b = dc_fixpt_div(p_last->b, dividers.divider1);
+
+ ++p;
+
+- p->r = dal_fixed31_32_div(p_last->r, dividers.divider2);
+- p->g = dal_fixed31_32_div(p_last->g, dividers.divider2);
+- p->b = dal_fixed31_32_div(p_last->b, dividers.divider2);
++ p->r = dc_fixpt_div(p_last->r, dividers.divider2);
++ p->g = dc_fixpt_div(p_last->g, dividers.divider2);
++ p->b = dc_fixpt_div(p_last->b, dividers.divider2);
+
+ ++p;
+
+- p->r = dal_fixed31_32_div(p_last->r, dividers.divider3);
+- p->g = dal_fixed31_32_div(p_last->g, dividers.divider3);
+- p->b = dal_fixed31_32_div(p_last->b, dividers.divider3);
++ p->r = dc_fixpt_div(p_last->r, dividers.divider3);
++ p->g = dc_fixpt_div(p_last->g, dividers.divider3);
++ p->b = dc_fixpt_div(p_last->b, dividers.divider3);
+ }
+
+ static inline void copy_rgb_regamma_to_coordinates_x(
+@@ -1094,7 +1094,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
+ struct fixed31_32 *tf_point;
+ struct fixed31_32 hw_x;
+ struct fixed31_32 norm_factor =
+- dal_fixed31_32_from_int_nonconst(255);
++ dc_fixpt_from_int_nonconst(255);
+ struct fixed31_32 norm_x;
+ struct fixed31_32 index_f;
+ struct fixed31_32 lut1;
+@@ -1105,9 +1105,9 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
+ i = 0;
+ /* fixed_pt library has problems handling too small values */
+ while (i != 32) {
+- tf_pts->red[i] = dal_fixed31_32_zero;
+- tf_pts->green[i] = dal_fixed31_32_zero;
+- tf_pts->blue[i] = dal_fixed31_32_zero;
++ tf_pts->red[i] = dc_fixpt_zero;
++ tf_pts->green[i] = dc_fixpt_zero;
++ tf_pts->blue[i] = dc_fixpt_zero;
+ ++i;
+ }
+ while (i <= hw_points_num + 1) {
+@@ -1129,12 +1129,12 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
+ } else
+ hw_x = coordinates_x[i].x;
+
+- norm_x = dal_fixed31_32_mul(norm_factor, hw_x);
+- index = dal_fixed31_32_floor(norm_x);
++ norm_x = dc_fixpt_mul(norm_factor, hw_x);
++ index = dc_fixpt_floor(norm_x);
+ if (index < 0 || index > 255)
+ continue;
+
+- index_f = dal_fixed31_32_from_int_nonconst(index);
++ index_f = dc_fixpt_from_int_nonconst(index);
+ index_next = (index == 255) ? index : index + 1;
+
+ if (color == 0) {
+@@ -1149,11 +1149,11 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
+ }
+
+ // we have everything now, so interpolate
+- delta_lut = dal_fixed31_32_sub(lut2, lut1);
+- delta_index = dal_fixed31_32_sub(norm_x, index_f);
++ delta_lut = dc_fixpt_sub(lut2, lut1);
++ delta_index = dc_fixpt_sub(norm_x, index_f);
+
+- *tf_point = dal_fixed31_32_add(lut1,
+- dal_fixed31_32_mul(delta_index, delta_lut));
++ *tf_point = dc_fixpt_add(lut1,
++ dc_fixpt_mul(delta_index, delta_lut));
+ }
+ ++i;
+ }
+@@ -1168,15 +1168,15 @@ static void build_new_custom_resulted_curve(
+ i = 0;
+
+ while (i != hw_points_num + 1) {
+- tf_pts->red[i] = dal_fixed31_32_clamp(
+- tf_pts->red[i], dal_fixed31_32_zero,
+- dal_fixed31_32_one);
+- tf_pts->green[i] = dal_fixed31_32_clamp(
+- tf_pts->green[i], dal_fixed31_32_zero,
+- dal_fixed31_32_one);
+- tf_pts->blue[i] = dal_fixed31_32_clamp(
+- tf_pts->blue[i], dal_fixed31_32_zero,
+- dal_fixed31_32_one);
++ tf_pts->red[i] = dc_fixpt_clamp(
++ tf_pts->red[i], dc_fixpt_zero,
++ dc_fixpt_one);
++ tf_pts->green[i] = dc_fixpt_clamp(
++ tf_pts->green[i], dc_fixpt_zero,
++ dc_fixpt_one);
++ tf_pts->blue[i] = dc_fixpt_clamp(
++ tf_pts->blue[i], dc_fixpt_zero,
++ dc_fixpt_one);
+
+ ++i;
+ }
+@@ -1290,9 +1290,9 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
+ if (!coeff)
+ goto coeff_alloc_fail;
+
+- dividers.divider1 = dal_fixed31_32_from_fraction(3, 2);
+- dividers.divider2 = dal_fixed31_32_from_int(2);
+- dividers.divider3 = dal_fixed31_32_from_fraction(5, 2);
++ dividers.divider1 = dc_fixpt_from_fraction(3, 2);
++ dividers.divider2 = dc_fixpt_from_int(2);
++ dividers.divider3 = dc_fixpt_from_fraction(5, 2);
+
+ tf = output_tf->tf;
+
+@@ -1357,15 +1357,15 @@ bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
+ uint32_t i = 0;
+
+ do {
+- coeff.a0[i] = dal_fixed31_32_from_fraction(
++ coeff.a0[i] = dc_fixpt_from_fraction(
+ regamma->coeff.A0[i], 10000000);
+- coeff.a1[i] = dal_fixed31_32_from_fraction(
++ coeff.a1[i] = dc_fixpt_from_fraction(
+ regamma->coeff.A1[i], 1000);
+- coeff.a2[i] = dal_fixed31_32_from_fraction(
++ coeff.a2[i] = dc_fixpt_from_fraction(
+ regamma->coeff.A2[i], 1000);
+- coeff.a3[i] = dal_fixed31_32_from_fraction(
++ coeff.a3[i] = dc_fixpt_from_fraction(
+ regamma->coeff.A3[i], 1000);
+- coeff.user_gamma[i] = dal_fixed31_32_from_fraction(
++ coeff.user_gamma[i] = dc_fixpt_from_fraction(
+ regamma->coeff.gamma[i], 1000);
+
+ ++i;
+@@ -1374,9 +1374,9 @@ bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
+ i = 0;
+ /* fixed_pt library has problems handling too small values */
+ while (i != 32) {
+- output_tf->tf_pts.red[i] = dal_fixed31_32_zero;
+- output_tf->tf_pts.green[i] = dal_fixed31_32_zero;
+- output_tf->tf_pts.blue[i] = dal_fixed31_32_zero;
++ output_tf->tf_pts.red[i] = dc_fixpt_zero;
++ output_tf->tf_pts.green[i] = dc_fixpt_zero;
++ output_tf->tf_pts.blue[i] = dc_fixpt_zero;
+ ++coord_x;
+ ++i;
+ }
+@@ -1423,9 +1423,9 @@ bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
+ if (!rgb_regamma)
+ goto rgb_regamma_alloc_fail;
+
+- dividers.divider1 = dal_fixed31_32_from_fraction(3, 2);
+- dividers.divider2 = dal_fixed31_32_from_int(2);
+- dividers.divider3 = dal_fixed31_32_from_fraction(5, 2);
++ dividers.divider1 = dc_fixpt_from_fraction(3, 2);
++ dividers.divider2 = dc_fixpt_from_int(2);
++ dividers.divider3 = dc_fixpt_from_fraction(5, 2);
+
+ scale_user_regamma_ramp(rgb_user, &regamma->ramp, dividers);
+
+@@ -1496,9 +1496,9 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
+ if (!coeff)
+ goto coeff_alloc_fail;
+
+- dividers.divider1 = dal_fixed31_32_from_fraction(3, 2);
+- dividers.divider2 = dal_fixed31_32_from_int(2);
+- dividers.divider3 = dal_fixed31_32_from_fraction(5, 2);
++ dividers.divider1 = dc_fixpt_from_fraction(3, 2);
++ dividers.divider2 = dc_fixpt_from_int(2);
++ dividers.divider3 = dc_fixpt_from_fraction(5, 2);
+
+ tf = input_tf->tf;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4456-drm-amd-display-inline-more-of-fixed-point-code.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4456-drm-amd-display-inline-more-of-fixed-point-code.patch
new file mode 100644
index 00000000..6bf386ca
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4456-drm-amd-display-inline-more-of-fixed-point-code.patch
@@ -0,0 +1,721 @@
+From f2c680e3bbfdb2bbfc0f285635005e0e80f2e571 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Wed, 18 Apr 2018 13:54:24 -0400
+Subject: [PATCH 4456/5725] drm/amd/display: inline more of fixed point code
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c | 156 ++--------------
+ drivers/gpu/drm/amd/display/include/fixed31_32.h | 207 ++++++++++++---------
+ .../drm/amd/display/modules/color/color_gamma.c | 8 +-
+ 3 files changed, 135 insertions(+), 236 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+index e398ecd..e61dd97d 100644
+--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
++++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+@@ -64,9 +64,7 @@ static inline unsigned long long complete_integer_division_u64(
+ #define GET_FRACTIONAL_PART(x) \
+ (FRACTIONAL_PART_MASK & (x))
+
+-struct fixed31_32 dc_fixpt_from_fraction(
+- long long numerator,
+- long long denominator)
++struct fixed31_32 dc_fixpt_from_fraction(long long numerator, long long denominator)
+ {
+ struct fixed31_32 res;
+
+@@ -118,63 +116,7 @@ struct fixed31_32 dc_fixpt_from_fraction(
+ return res;
+ }
+
+-struct fixed31_32 dc_fixpt_from_int_nonconst(
+- long long arg)
+-{
+- struct fixed31_32 res;
+-
+- ASSERT((LONG_MIN <= arg) && (arg <= LONG_MAX));
+-
+- res.value = arg << FIXED31_32_BITS_PER_FRACTIONAL_PART;
+-
+- return res;
+-}
+-
+-struct fixed31_32 dc_fixpt_shl(
+- struct fixed31_32 arg,
+- unsigned char shift)
+-{
+- struct fixed31_32 res;
+-
+- ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
+- ((arg.value < 0) && (arg.value >= LLONG_MIN >> shift)));
+-
+- res.value = arg.value << shift;
+-
+- return res;
+-}
+-
+-struct fixed31_32 dc_fixpt_add(
+- struct fixed31_32 arg1,
+- struct fixed31_32 arg2)
+-{
+- struct fixed31_32 res;
+-
+- ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) ||
+- ((arg1.value < 0) && (LLONG_MIN - arg1.value <= arg2.value)));
+-
+- res.value = arg1.value + arg2.value;
+-
+- return res;
+-}
+-
+-struct fixed31_32 dc_fixpt_sub(
+- struct fixed31_32 arg1,
+- struct fixed31_32 arg2)
+-{
+- struct fixed31_32 res;
+-
+- ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) ||
+- ((arg2.value < 0) && (LLONG_MAX + arg2.value >= arg1.value)));
+-
+- res.value = arg1.value - arg2.value;
+-
+- return res;
+-}
+-
+-struct fixed31_32 dc_fixpt_mul(
+- struct fixed31_32 arg1,
+- struct fixed31_32 arg2)
++struct fixed31_32 dc_fixpt_mul(struct fixed31_32 arg1, struct fixed31_32 arg2)
+ {
+ struct fixed31_32 res;
+
+@@ -225,8 +167,7 @@ struct fixed31_32 dc_fixpt_mul(
+ return res;
+ }
+
+-struct fixed31_32 dc_fixpt_sqr(
+- struct fixed31_32 arg)
++struct fixed31_32 dc_fixpt_sqr(struct fixed31_32 arg)
+ {
+ struct fixed31_32 res;
+
+@@ -266,8 +207,7 @@ struct fixed31_32 dc_fixpt_sqr(
+ return res;
+ }
+
+-struct fixed31_32 dc_fixpt_recip(
+- struct fixed31_32 arg)
++struct fixed31_32 dc_fixpt_recip(struct fixed31_32 arg)
+ {
+ /*
+ * @note
+@@ -281,8 +221,7 @@ struct fixed31_32 dc_fixpt_recip(
+ arg.value);
+ }
+
+-struct fixed31_32 dc_fixpt_sinc(
+- struct fixed31_32 arg)
++struct fixed31_32 dc_fixpt_sinc(struct fixed31_32 arg)
+ {
+ struct fixed31_32 square;
+
+@@ -326,16 +265,14 @@ struct fixed31_32 dc_fixpt_sinc(
+ return res;
+ }
+
+-struct fixed31_32 dc_fixpt_sin(
+- struct fixed31_32 arg)
++struct fixed31_32 dc_fixpt_sin(struct fixed31_32 arg)
+ {
+ return dc_fixpt_mul(
+ arg,
+ dc_fixpt_sinc(arg));
+ }
+
+-struct fixed31_32 dc_fixpt_cos(
+- struct fixed31_32 arg)
++struct fixed31_32 dc_fixpt_cos(struct fixed31_32 arg)
+ {
+ /* TODO implement argument normalization */
+
+@@ -367,8 +304,7 @@ struct fixed31_32 dc_fixpt_cos(
+ *
+ * Calculated as Taylor series.
+ */
+-static struct fixed31_32 fixed31_32_exp_from_taylor_series(
+- struct fixed31_32 arg)
++static struct fixed31_32 fixed31_32_exp_from_taylor_series(struct fixed31_32 arg)
+ {
+ unsigned int n = 9;
+
+@@ -396,8 +332,7 @@ static struct fixed31_32 fixed31_32_exp_from_taylor_series(
+ res));
+ }
+
+-struct fixed31_32 dc_fixpt_exp(
+- struct fixed31_32 arg)
++struct fixed31_32 dc_fixpt_exp(struct fixed31_32 arg)
+ {
+ /*
+ * @brief
+@@ -440,8 +375,7 @@ struct fixed31_32 dc_fixpt_exp(
+ return dc_fixpt_one;
+ }
+
+-struct fixed31_32 dc_fixpt_log(
+- struct fixed31_32 arg)
++struct fixed31_32 dc_fixpt_log(struct fixed31_32 arg)
+ {
+ struct fixed31_32 res = dc_fixpt_neg(dc_fixpt_one);
+ /* TODO improve 1st estimation */
+@@ -472,61 +406,6 @@ struct fixed31_32 dc_fixpt_log(
+ return res;
+ }
+
+-struct fixed31_32 dc_fixpt_pow(
+- struct fixed31_32 arg1,
+- struct fixed31_32 arg2)
+-{
+- return dc_fixpt_exp(
+- dc_fixpt_mul(
+- dc_fixpt_log(arg1),
+- arg2));
+-}
+-
+-int dc_fixpt_floor(
+- struct fixed31_32 arg)
+-{
+- unsigned long long arg_value = abs_i64(arg.value);
+-
+- if (arg.value >= 0)
+- return (int)GET_INTEGER_PART(arg_value);
+- else
+- return -(int)GET_INTEGER_PART(arg_value);
+-}
+-
+-int dc_fixpt_round(
+- struct fixed31_32 arg)
+-{
+- unsigned long long arg_value = abs_i64(arg.value);
+-
+- const long long summand = dc_fixpt_half.value;
+-
+- ASSERT(LLONG_MAX - (long long)arg_value >= summand);
+-
+- arg_value += summand;
+-
+- if (arg.value >= 0)
+- return (int)GET_INTEGER_PART(arg_value);
+- else
+- return -(int)GET_INTEGER_PART(arg_value);
+-}
+-
+-int dc_fixpt_ceil(
+- struct fixed31_32 arg)
+-{
+- unsigned long long arg_value = abs_i64(arg.value);
+-
+- const long long summand = dc_fixpt_one.value -
+- dc_fixpt_epsilon.value;
+-
+- ASSERT(LLONG_MAX - (long long)arg_value >= summand);
+-
+- arg_value += summand;
+-
+- if (arg.value >= 0)
+- return (int)GET_INTEGER_PART(arg_value);
+- else
+- return -(int)GET_INTEGER_PART(arg_value);
+-}
+
+ /* this function is a generic helper to translate fixed point value to
+ * specified integer format that will consist of integer_bits integer part and
+@@ -570,32 +449,27 @@ static inline unsigned int clamp_ux_dy(
+ return min_clamp;
+ }
+
+-unsigned int dc_fixpt_u2d19(
+- struct fixed31_32 arg)
++unsigned int dc_fixpt_u2d19(struct fixed31_32 arg)
+ {
+ return ux_dy(arg.value, 2, 19);
+ }
+
+-unsigned int dc_fixpt_u0d19(
+- struct fixed31_32 arg)
++unsigned int dc_fixpt_u0d19(struct fixed31_32 arg)
+ {
+ return ux_dy(arg.value, 0, 19);
+ }
+
+-unsigned int dc_fixpt_clamp_u0d14(
+- struct fixed31_32 arg)
++unsigned int dc_fixpt_clamp_u0d14(struct fixed31_32 arg)
+ {
+ return clamp_ux_dy(arg.value, 0, 14, 1);
+ }
+
+-unsigned int dc_fixpt_clamp_u0d10(
+- struct fixed31_32 arg)
++unsigned int dc_fixpt_clamp_u0d10(struct fixed31_32 arg)
+ {
+ return clamp_ux_dy(arg.value, 0, 10, 1);
+ }
+
+-int dc_fixpt_s4d19(
+- struct fixed31_32 arg)
++int dc_fixpt_s4d19(struct fixed31_32 arg)
+ {
+ if (arg.value < 0)
+ return -(int)ux_dy(dc_fixpt_abs(arg).value, 4, 19);
+diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
+index b5b8d7d..ebfd33e 100644
+--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
++++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
+@@ -70,24 +70,19 @@ static const struct fixed31_32 dc_fixpt_ln2_div_2 = { 1488522236LL };
+ * @brief
+ * result = numerator / denominator
+ */
+-struct fixed31_32 dc_fixpt_from_fraction(
+- long long numerator,
+- long long denominator);
++struct fixed31_32 dc_fixpt_from_fraction(long long numerator, long long denominator);
+
+ /*
+ * @brief
+ * result = arg
+ */
+-struct fixed31_32 dc_fixpt_from_int_nonconst(long long arg);
+-static inline struct fixed31_32 dc_fixpt_from_int(long long arg)
++static inline struct fixed31_32 dc_fixpt_from_int(int arg)
+ {
+- if (__builtin_constant_p(arg)) {
+- struct fixed31_32 res;
+- BUILD_BUG_ON((LONG_MIN > arg) || (arg > LONG_MAX));
+- res.value = arg << FIXED31_32_BITS_PER_FRACTIONAL_PART;
+- return res;
+- } else
+- return dc_fixpt_from_int_nonconst(arg);
++ struct fixed31_32 res;
++
++ res.value = (long long) arg << FIXED31_32_BITS_PER_FRACTIONAL_PART;
++
++ return res;
+ }
+
+ /*
+@@ -129,8 +124,7 @@ static inline struct fixed31_32 dc_fixpt_abs(struct fixed31_32 arg)
+ * @brief
+ * result = arg1 < arg2
+ */
+-static inline bool dc_fixpt_lt(struct fixed31_32 arg1,
+- struct fixed31_32 arg2)
++static inline bool dc_fixpt_lt(struct fixed31_32 arg1, struct fixed31_32 arg2)
+ {
+ return arg1.value < arg2.value;
+ }
+@@ -139,8 +133,7 @@ static inline bool dc_fixpt_lt(struct fixed31_32 arg1,
+ * @brief
+ * result = arg1 <= arg2
+ */
+-static inline bool dc_fixpt_le(struct fixed31_32 arg1,
+- struct fixed31_32 arg2)
++static inline bool dc_fixpt_le(struct fixed31_32 arg1, struct fixed31_32 arg2)
+ {
+ return arg1.value <= arg2.value;
+ }
+@@ -149,8 +142,7 @@ static inline bool dc_fixpt_le(struct fixed31_32 arg1,
+ * @brief
+ * result = arg1 == arg2
+ */
+-static inline bool dc_fixpt_eq(struct fixed31_32 arg1,
+- struct fixed31_32 arg2)
++static inline bool dc_fixpt_eq(struct fixed31_32 arg1, struct fixed31_32 arg2)
+ {
+ return arg1.value == arg2.value;
+ }
+@@ -159,8 +151,7 @@ static inline bool dc_fixpt_eq(struct fixed31_32 arg1,
+ * @brief
+ * result = min(arg1, arg2) := (arg1 <= arg2) ? arg1 : arg2
+ */
+-static inline struct fixed31_32 dc_fixpt_min(struct fixed31_32 arg1,
+- struct fixed31_32 arg2)
++static inline struct fixed31_32 dc_fixpt_min(struct fixed31_32 arg1, struct fixed31_32 arg2)
+ {
+ if (arg1.value <= arg2.value)
+ return arg1;
+@@ -172,8 +163,7 @@ static inline struct fixed31_32 dc_fixpt_min(struct fixed31_32 arg1,
+ * @brief
+ * result = max(arg1, arg2) := (arg1 <= arg2) ? arg2 : arg1
+ */
+-static inline struct fixed31_32 dc_fixpt_max(struct fixed31_32 arg1,
+- struct fixed31_32 arg2)
++static inline struct fixed31_32 dc_fixpt_max(struct fixed31_32 arg1, struct fixed31_32 arg2)
+ {
+ if (arg1.value <= arg2.value)
+ return arg2;
+@@ -209,17 +199,23 @@ static inline struct fixed31_32 dc_fixpt_clamp(
+ * @brief
+ * result = arg << shift
+ */
+-struct fixed31_32 dc_fixpt_shl(
+- struct fixed31_32 arg,
+- unsigned char shift);
++static inline struct fixed31_32 dc_fixpt_shl(struct fixed31_32 arg, unsigned char shift)
++{
++ struct fixed31_32 res;
++
++ ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
++ ((arg.value < 0) && (arg.value >= LLONG_MIN >> shift)));
++
++ res.value = arg.value << shift;
++
++ return res;
++}
+
+ /*
+ * @brief
+ * result = arg >> shift
+ */
+-static inline struct fixed31_32 dc_fixpt_shr(
+- struct fixed31_32 arg,
+- unsigned char shift)
++static inline struct fixed31_32 dc_fixpt_shr(struct fixed31_32 arg, unsigned char shift)
+ {
+ struct fixed31_32 res;
+ res.value = arg.value >> shift;
+@@ -235,38 +231,50 @@ static inline struct fixed31_32 dc_fixpt_shr(
+ * @brief
+ * result = arg1 + arg2
+ */
+-struct fixed31_32 dc_fixpt_add(
+- struct fixed31_32 arg1,
+- struct fixed31_32 arg2);
++static inline struct fixed31_32 dc_fixpt_add(struct fixed31_32 arg1, struct fixed31_32 arg2)
++{
++ struct fixed31_32 res;
++
++ ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) ||
++ ((arg1.value < 0) && (LLONG_MIN - arg1.value <= arg2.value)));
++
++ res.value = arg1.value + arg2.value;
++
++ return res;
++}
+
+ /*
+ * @brief
+ * result = arg1 + arg2
+ */
+-static inline struct fixed31_32 dc_fixpt_add_int(struct fixed31_32 arg1,
+- int arg2)
++static inline struct fixed31_32 dc_fixpt_add_int(struct fixed31_32 arg1, int arg2)
+ {
+- return dc_fixpt_add(arg1,
+- dc_fixpt_from_int(arg2));
++ return dc_fixpt_add(arg1, dc_fixpt_from_int(arg2));
+ }
+
+ /*
+ * @brief
+ * result = arg1 - arg2
+ */
+-struct fixed31_32 dc_fixpt_sub(
+- struct fixed31_32 arg1,
+- struct fixed31_32 arg2);
++static inline struct fixed31_32 dc_fixpt_sub(struct fixed31_32 arg1, struct fixed31_32 arg2)
++{
++ struct fixed31_32 res;
++
++ ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) ||
++ ((arg2.value < 0) && (LLONG_MAX + arg2.value >= arg1.value)));
++
++ res.value = arg1.value - arg2.value;
++
++ return res;
++}
+
+ /*
+ * @brief
+ * result = arg1 - arg2
+ */
+-static inline struct fixed31_32 dc_fixpt_sub_int(struct fixed31_32 arg1,
+- int arg2)
++static inline struct fixed31_32 dc_fixpt_sub_int(struct fixed31_32 arg1, int arg2)
+ {
+- return dc_fixpt_sub(arg1,
+- dc_fixpt_from_int(arg2));
++ return dc_fixpt_sub(arg1, dc_fixpt_from_int(arg2));
+ }
+
+
+@@ -279,49 +287,40 @@ static inline struct fixed31_32 dc_fixpt_sub_int(struct fixed31_32 arg1,
+ * @brief
+ * result = arg1 * arg2
+ */
+-struct fixed31_32 dc_fixpt_mul(
+- struct fixed31_32 arg1,
+- struct fixed31_32 arg2);
++struct fixed31_32 dc_fixpt_mul(struct fixed31_32 arg1, struct fixed31_32 arg2);
+
+
+ /*
+ * @brief
+ * result = arg1 * arg2
+ */
+-static inline struct fixed31_32 dc_fixpt_mul_int(struct fixed31_32 arg1,
+- int arg2)
++static inline struct fixed31_32 dc_fixpt_mul_int(struct fixed31_32 arg1, int arg2)
+ {
+- return dc_fixpt_mul(arg1,
+- dc_fixpt_from_int(arg2));
++ return dc_fixpt_mul(arg1, dc_fixpt_from_int(arg2));
+ }
+
+ /*
+ * @brief
+ * result = square(arg) := arg * arg
+ */
+-struct fixed31_32 dc_fixpt_sqr(
+- struct fixed31_32 arg);
++struct fixed31_32 dc_fixpt_sqr(struct fixed31_32 arg);
+
+ /*
+ * @brief
+ * result = arg1 / arg2
+ */
+-static inline struct fixed31_32 dc_fixpt_div_int(struct fixed31_32 arg1,
+- long long arg2)
++static inline struct fixed31_32 dc_fixpt_div_int(struct fixed31_32 arg1, long long arg2)
+ {
+- return dc_fixpt_from_fraction(arg1.value,
+- dc_fixpt_from_int(arg2).value);
++ return dc_fixpt_from_fraction(arg1.value, dc_fixpt_from_int(arg2).value);
+ }
+
+ /*
+ * @brief
+ * result = arg1 / arg2
+ */
+-static inline struct fixed31_32 dc_fixpt_div(struct fixed31_32 arg1,
+- struct fixed31_32 arg2)
++static inline struct fixed31_32 dc_fixpt_div(struct fixed31_32 arg1, struct fixed31_32 arg2)
+ {
+- return dc_fixpt_from_fraction(arg1.value,
+- arg2.value);
++ return dc_fixpt_from_fraction(arg1.value, arg2.value);
+ }
+
+ /*
+@@ -336,8 +335,7 @@ static inline struct fixed31_32 dc_fixpt_div(struct fixed31_32 arg1,
+ * @note
+ * No special actions taken in case argument is zero.
+ */
+-struct fixed31_32 dc_fixpt_recip(
+- struct fixed31_32 arg);
++struct fixed31_32 dc_fixpt_recip(struct fixed31_32 arg);
+
+ /*
+ * @brief
+@@ -352,8 +350,7 @@ struct fixed31_32 dc_fixpt_recip(
+ * Argument specified in radians,
+ * internally it's normalized to [-2pi...2pi] range.
+ */
+-struct fixed31_32 dc_fixpt_sinc(
+- struct fixed31_32 arg);
++struct fixed31_32 dc_fixpt_sinc(struct fixed31_32 arg);
+
+ /*
+ * @brief
+@@ -363,8 +360,7 @@ struct fixed31_32 dc_fixpt_sinc(
+ * Argument specified in radians,
+ * internally it's normalized to [-2pi...2pi] range.
+ */
+-struct fixed31_32 dc_fixpt_sin(
+- struct fixed31_32 arg);
++struct fixed31_32 dc_fixpt_sin(struct fixed31_32 arg);
+
+ /*
+ * @brief
+@@ -376,8 +372,7 @@ struct fixed31_32 dc_fixpt_sin(
+ * passing arguments outside that range
+ * will cause incorrect result!
+ */
+-struct fixed31_32 dc_fixpt_cos(
+- struct fixed31_32 arg);
++struct fixed31_32 dc_fixpt_cos(struct fixed31_32 arg);
+
+ /*
+ * @brief
+@@ -391,8 +386,7 @@ struct fixed31_32 dc_fixpt_cos(
+ * @note
+ * Currently, function is verified for abs(arg) <= 1.
+ */
+-struct fixed31_32 dc_fixpt_exp(
+- struct fixed31_32 arg);
++struct fixed31_32 dc_fixpt_exp(struct fixed31_32 arg);
+
+ /*
+ * @brief
+@@ -404,8 +398,7 @@ struct fixed31_32 dc_fixpt_exp(
+ * Currently, no special actions taken
+ * in case of invalid argument(s). Take care!
+ */
+-struct fixed31_32 dc_fixpt_log(
+- struct fixed31_32 arg);
++struct fixed31_32 dc_fixpt_log(struct fixed31_32 arg);
+
+ /*
+ * @brief
+@@ -419,9 +412,13 @@ struct fixed31_32 dc_fixpt_log(
+ * @note
+ * Currently, abs(arg1) should be less than 1. Take care!
+ */
+-struct fixed31_32 dc_fixpt_pow(
+- struct fixed31_32 arg1,
+- struct fixed31_32 arg2);
++static inline struct fixed31_32 dc_fixpt_pow(struct fixed31_32 arg1, struct fixed31_32 arg2)
++{
++ return dc_fixpt_exp(
++ dc_fixpt_mul(
++ dc_fixpt_log(arg1),
++ arg2));
++}
+
+ /*
+ * @brief
+@@ -432,22 +429,56 @@ struct fixed31_32 dc_fixpt_pow(
+ * @brief
+ * result = floor(arg) := greatest integer lower than or equal to arg
+ */
+-int dc_fixpt_floor(
+- struct fixed31_32 arg);
++static inline int dc_fixpt_floor(struct fixed31_32 arg)
++{
++ unsigned long long arg_value = arg.value > 0 ? arg.value : -arg.value;
++
++ if (arg.value >= 0)
++ return (int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
++ else
++ return -(int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
++}
+
+ /*
+ * @brief
+ * result = round(arg) := integer nearest to arg
+ */
+-int dc_fixpt_round(
+- struct fixed31_32 arg);
++static inline int dc_fixpt_round(struct fixed31_32 arg)
++{
++ unsigned long long arg_value = arg.value > 0 ? arg.value : -arg.value;
++
++ const long long summand = dc_fixpt_half.value;
++
++ ASSERT(LLONG_MAX - (long long)arg_value >= summand);
++
++ arg_value += summand;
++
++ if (arg.value >= 0)
++ return (int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
++ else
++ return -(int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
++}
+
+ /*
+ * @brief
+ * result = ceil(arg) := lowest integer greater than or equal to arg
+ */
+-int dc_fixpt_ceil(
+- struct fixed31_32 arg);
++static inline int dc_fixpt_ceil(struct fixed31_32 arg)
++{
++ unsigned long long arg_value = arg.value > 0 ? arg.value : -arg.value;
++
++ const long long summand = dc_fixpt_one.value -
++ dc_fixpt_epsilon.value;
++
++ ASSERT(LLONG_MAX - (long long)arg_value >= summand);
++
++ arg_value += summand;
++
++ if (arg.value >= 0)
++ return (int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
++ else
++ return -(int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
++}
+
+ /* the following two function are used in scaler hw programming to convert fixed
+ * point value to format 2 bits from integer part and 19 bits from fractional
+@@ -455,20 +486,14 @@ int dc_fixpt_ceil(
+ * fractional
+ */
+
+-unsigned int dc_fixpt_u2d19(
+- struct fixed31_32 arg);
+-
+-unsigned int dc_fixpt_u0d19(
+- struct fixed31_32 arg);
++unsigned int dc_fixpt_u2d19(struct fixed31_32 arg);
+
++unsigned int dc_fixpt_u0d19(struct fixed31_32 arg);
+
+-unsigned int dc_fixpt_clamp_u0d14(
+- struct fixed31_32 arg);
++unsigned int dc_fixpt_clamp_u0d14(struct fixed31_32 arg);
+
+-unsigned int dc_fixpt_clamp_u0d10(
+- struct fixed31_32 arg);
++unsigned int dc_fixpt_clamp_u0d10(struct fixed31_32 arg);
+
+-int dc_fixpt_s4d19(
+- struct fixed31_32 arg);
++int dc_fixpt_s4d19(struct fixed31_32 arg);
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+index 29d2ec8..e803b37 100644
+--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+@@ -913,7 +913,7 @@ static void apply_lut_1d(
+ struct fixed31_32 lut2;
+ const int max_lut_index = 4095;
+ const struct fixed31_32 max_lut_index_f =
+- dc_fixpt_from_int_nonconst(max_lut_index);
++ dc_fixpt_from_int(max_lut_index);
+ int32_t index = 0, index_next = 0;
+ struct fixed31_32 index_f;
+ struct fixed31_32 delta_lut;
+@@ -934,7 +934,7 @@ static void apply_lut_1d(
+ norm_y = dc_fixpt_mul(max_lut_index_f,
+ *regamma_y);
+ index = dc_fixpt_floor(norm_y);
+- index_f = dc_fixpt_from_int_nonconst(index);
++ index_f = dc_fixpt_from_int(index);
+
+ if (index < 0 || index > max_lut_index)
+ continue;
+@@ -1094,7 +1094,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
+ struct fixed31_32 *tf_point;
+ struct fixed31_32 hw_x;
+ struct fixed31_32 norm_factor =
+- dc_fixpt_from_int_nonconst(255);
++ dc_fixpt_from_int(255);
+ struct fixed31_32 norm_x;
+ struct fixed31_32 index_f;
+ struct fixed31_32 lut1;
+@@ -1134,7 +1134,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
+ if (index < 0 || index > 255)
+ continue;
+
+- index_f = dc_fixpt_from_int_nonconst(index);
++ index_f = dc_fixpt_from_int(index);
+ index_next = (index == 255) ? index : index + 1;
+
+ if (color == 0) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4457-drm-amd-display-Make-DisplayStats-work-with-just-DC-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4457-drm-amd-display-Make-DisplayStats-work-with-just-DC-.patch
new file mode 100644
index 00000000..b4dcd0be
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4457-drm-amd-display-Make-DisplayStats-work-with-just-DC-.patch
@@ -0,0 +1,140 @@
+From 41ab3ae07811858df73db8eb7830297415204b0b Mon Sep 17 00:00:00 2001
+From: Anthony Koo <Anthony.Koo@amd.com>
+Date: Thu, 19 Apr 2018 10:05:22 -0400
+Subject: [PATCH 4457/5725] drm/amd/display: Make DisplayStats work with just
+ DC DisplayStats minor
+
+Remove dependency on the old FREESYNC_SW_STATS log mask used by DAL2
+Also rename from profiling to displaystats
+
+Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/basics/logger.c | 2 +-
+ drivers/gpu/drm/amd/display/include/logger_types.h | 2 +-
+ drivers/gpu/drm/amd/display/modules/stats/stats.c | 81 ++++++++++++----------
+ 3 files changed, 46 insertions(+), 39 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.c b/drivers/gpu/drm/amd/display/dc/basics/logger.c
+index 31bee05..0001a3c 100644
+--- a/drivers/gpu/drm/amd/display/dc/basics/logger.c
++++ b/drivers/gpu/drm/amd/display/dc/basics/logger.c
+@@ -61,7 +61,7 @@ static const struct dc_log_type_info log_type_info_tbl[] = {
+ {LOG_EVENT_UNDERFLOW, "Underflow"},
+ {LOG_IF_TRACE, "InterfaceTrace"},
+ {LOG_DTN, "DTN"},
+- {LOG_PROFILING, "Profiling"}
++ {LOG_DISPLAYSTATS, "DisplayStats"}
+ };
+
+
+diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
+index b608a08..0a540b9 100644
+--- a/drivers/gpu/drm/amd/display/include/logger_types.h
++++ b/drivers/gpu/drm/amd/display/include/logger_types.h
+@@ -98,7 +98,7 @@ enum dc_log_type {
+ LOG_EVENT_UNDERFLOW,
+ LOG_IF_TRACE,
+ LOG_PERF_TRACE,
+- LOG_PROFILING,
++ LOG_DISPLAYSTATS,
+
+ LOG_SECTION_TOTAL_COUNT
+ };
+diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
+index 48e0219..d16aac7 100644
+--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
++++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
+@@ -177,44 +177,51 @@ void mod_stats_dump(struct mod_stats *mod_stats)
+ logger = dc->ctx->logger;
+ time = core_stats->time;
+
+- //LogEntry* pLog = GetLog()->Open(LogMajor_ISR, LogMinor_ISR_FreeSyncSW);
+-
+- //if (!pLog->IsDummyEntry())
+- {
+- dm_logger_write(logger, LOG_PROFILING, "==Display Caps==\n");
+- dm_logger_write(logger, LOG_PROFILING, "\n");
+- dm_logger_write(logger, LOG_PROFILING, "\n");
+-
+- dm_logger_write(logger, LOG_PROFILING, "==Stats==\n");
+- dm_logger_write(logger, LOG_PROFILING,
+- "render avgRender minWindow midPoint maxWindow vsyncToFlip flipToVsync #vsyncBetweenFlip #frame insertDuration vTotalMin vTotalMax eventTrigs vSyncTime1 vSyncTime2 vSyncTime3 vSyncTime4 vSyncTime5 flags\n");
+-
+- for (int i = 0; i < core_stats->index && i < core_stats->entries; i++) {
+- dm_logger_write(logger, LOG_PROFILING,
+- "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
+- time[i].render_time_in_us,
+- time[i].avg_render_time_in_us_last_ten,
+- time[i].min_window,
+- time[i].lfc_mid_point_in_us,
+- time[i].max_window,
+- time[i].vsync_to_flip_time_in_us,
+- time[i].flip_to_vsync_time_in_us,
+- time[i].num_vsync_between_flips,
+- time[i].num_frames_inserted,
+- time[i].inserted_duration_in_us,
+- time[i].v_total_min,
+- time[i].v_total_max,
+- time[i].event_triggers,
+- time[i].v_sync_time_in_us[0],
+- time[i].v_sync_time_in_us[1],
+- time[i].v_sync_time_in_us[2],
+- time[i].v_sync_time_in_us[3],
+- time[i].v_sync_time_in_us[4],
+- time[i].flags);
+- }
++ dm_logger_write(logger, LOG_DISPLAYSTATS, "==Display Caps==");
++ dm_logger_write(logger, LOG_DISPLAYSTATS, " ");
++
++ dm_logger_write(logger, LOG_DISPLAYSTATS, "==Display Stats==");
++ dm_logger_write(logger, LOG_DISPLAYSTATS, " ");
++
++ dm_logger_write(logger, LOG_DISPLAYSTATS,
++ "%10s %10s %10s %10s %10s"
++ " %11s %11s %17s %10s %14s"
++ " %10s %10s %10s %10s %10s"
++ " %10s %10s %10s %10s",
++ "render", "avgRender",
++ "minWindow", "midPoint", "maxWindow",
++ "vsyncToFlip", "flipToVsync", "vsyncsBetweenFlip",
++ "numFrame", "insertDuration",
++ "vTotalMin", "vTotalMax", "eventTrigs",
++ "vSyncTime1", "vSyncTime2", "vSyncTime3",
++ "vSyncTime4", "vSyncTime5", "flags");
++
++ for (int i = 0; i < core_stats->index && i < core_stats->entries; i++) {
++ dm_logger_write(logger, LOG_DISPLAYSTATS,
++ "%10u %10u %10u %10u %10u"
++ " %11u %11u %17u %10u %14u"
++ " %10u %10u %10u %10u %10u"
++ " %10u %10u %10u %10u",
++ time[i].render_time_in_us,
++ time[i].avg_render_time_in_us_last_ten,
++ time[i].min_window,
++ time[i].lfc_mid_point_in_us,
++ time[i].max_window,
++ time[i].vsync_to_flip_time_in_us,
++ time[i].flip_to_vsync_time_in_us,
++ time[i].num_vsync_between_flips,
++ time[i].num_frames_inserted,
++ time[i].inserted_duration_in_us,
++ time[i].v_total_min,
++ time[i].v_total_max,
++ time[i].event_triggers,
++ time[i].v_sync_time_in_us[0],
++ time[i].v_sync_time_in_us[1],
++ time[i].v_sync_time_in_us[2],
++ time[i].v_sync_time_in_us[3],
++ time[i].v_sync_time_in_us[4],
++ time[i].flags);
+ }
+- //GetLog()->Close(pLog);
+- //GetLog()->UnSetLogMask(LogMajor_ISR, LogMinor_ISR_FreeSyncSW);
+ }
+
+ void mod_stats_reset_data(struct mod_stats *mod_stats)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4458-drm-amd-display-add-fixed-point-fractional-bit-trunc.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4458-drm-amd-display-add-fixed-point-fractional-bit-trunc.patch
new file mode 100644
index 00000000..ac6b3b60
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4458-drm-amd-display-add-fixed-point-fractional-bit-trunc.patch
@@ -0,0 +1,42 @@
+From 087833951a00439a47fd83d4ce587e55e83acde9 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Wed, 18 Apr 2018 14:11:43 -0400
+Subject: [PATCH 4458/5725] drm/amd/display: add fixed point fractional bit
+ truncation function
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/include/fixed31_32.h | 17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
+index ebfd33e..61f11e2 100644
+--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
++++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
+@@ -496,4 +496,21 @@ unsigned int dc_fixpt_clamp_u0d10(struct fixed31_32 arg);
+
+ int dc_fixpt_s4d19(struct fixed31_32 arg);
+
++static inline struct fixed31_32 dc_fixpt_truncate(struct fixed31_32 arg, unsigned int frac_bits)
++{
++ bool negative = arg.value < 0;
++
++ if (frac_bits >= FIXED31_32_BITS_PER_FRACTIONAL_PART) {
++ ASSERT(frac_bits == FIXED31_32_BITS_PER_FRACTIONAL_PART);
++ return arg;
++ }
++
++ if (negative)
++ arg.value = -arg.value;
++ arg.value &= (~0LL) << (FIXED31_32_BITS_PER_FRACTIONAL_PART - frac_bits);
++ if (negative)
++ arg.value = -arg.value;
++ return arg;
++}
++
+ #endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4459-drm-amd-display-truncate-scaling-ratios-and-inits-to.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4459-drm-amd-display-truncate-scaling-ratios-and-inits-to.patch
new file mode 100644
index 00000000..1ec12351
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4459-drm-amd-display-truncate-scaling-ratios-and-inits-to.patch
@@ -0,0 +1,62 @@
+From 0f0c02ca8ccb66fe6b834c9e8e7b8338be5d2600 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Wed, 18 Apr 2018 14:19:23 -0400
+Subject: [PATCH 4459/5725] drm/amd/display: truncate scaling ratios and inits
+ to 19 bit precision
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 25 +++++++++++++++--------
+ 1 file changed, 17 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 68c1a99..f02f366 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -651,6 +651,14 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
+ pipe_ctx->plane_res.scl_data.ratios.horz_c.value /= 2;
+ pipe_ctx->plane_res.scl_data.ratios.vert_c.value /= 2;
+ }
++ pipe_ctx->plane_res.scl_data.ratios.horz = dc_fixpt_truncate(
++ pipe_ctx->plane_res.scl_data.ratios.horz, 19);
++ pipe_ctx->plane_res.scl_data.ratios.vert = dc_fixpt_truncate(
++ pipe_ctx->plane_res.scl_data.ratios.vert, 19);
++ pipe_ctx->plane_res.scl_data.ratios.horz_c = dc_fixpt_truncate(
++ pipe_ctx->plane_res.scl_data.ratios.horz_c, 19);
++ pipe_ctx->plane_res.scl_data.ratios.vert_c = dc_fixpt_truncate(
++ pipe_ctx->plane_res.scl_data.ratios.vert_c, 19);
+ }
+
+ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *recout_skip)
+@@ -687,17 +695,18 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
+ * init_bot = init + scaling_ratio
+ * init_c = init + truncated_vp_c_offset(from calculate viewport)
+ */
+- data->inits.h = dc_fixpt_div_int(
+- dc_fixpt_add_int(data->ratios.horz, data->taps.h_taps + 1), 2);
++ data->inits.h = dc_fixpt_truncate(dc_fixpt_div_int(
++ dc_fixpt_add_int(data->ratios.horz, data->taps.h_taps + 1), 2), 19);
+
+- data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_div_int(
+- dc_fixpt_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2));
++ data->inits.h_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.h_c, dc_fixpt_div_int(
++ dc_fixpt_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2)), 19);
+
+- data->inits.v = dc_fixpt_div_int(
+- dc_fixpt_add_int(data->ratios.vert, data->taps.v_taps + 1), 2);
++ data->inits.v = dc_fixpt_truncate(dc_fixpt_div_int(
++ dc_fixpt_add_int(data->ratios.vert, data->taps.v_taps + 1), 2), 19);
++
++ data->inits.v_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int(
++ dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)), 19);
+
+- data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int(
+- dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2));
+
+
+ /* Adjust for viewport end clip-off */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4460-drm-amd-display-underflow-blankscreen-recovery.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4460-drm-amd-display-underflow-blankscreen-recovery.patch
new file mode 100644
index 00000000..429487ce
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4460-drm-amd-display-underflow-blankscreen-recovery.patch
@@ -0,0 +1,307 @@
+From 40113a9e16c7dad25263c1f3a37a1102f06ad21d Mon Sep 17 00:00:00 2001
+From: Charlene Liu <charlene.liu@amd.com>
+Date: Wed, 18 Apr 2018 14:31:41 -0400
+Subject: [PATCH 4460/5725] drm/amd/display: underflow/blankscreen recovery
+
+[Description]
+for any reason, if driver detects HUBP underflow,
+if a debug option enabled to enable recovery.
+it will kick in a sequence of recovery.
+
+Signed-off-by: Charlene Liu <charlene.liu@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c | 8 ++
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h | 7 +-
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 24 ++++++
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 3 +
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 90 +++++++++++++++++++++-
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 1 +
+ drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h | 2 +
+ 8 files changed, 135 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 936adbf..28c6210 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -239,6 +239,8 @@ struct dc_debug {
+ bool az_endpoint_mute_only;
+ bool always_use_regamma;
+ bool p010_mpo_support;
++ bool recovery_enabled;
++
+ };
+ struct dc_state;
+ struct resource_pool;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+index b9fb14a..943143e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+@@ -476,6 +476,14 @@ void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub)
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req);
+ }
+
++void hubbub1_soft_reset(struct hubbub *hubbub, bool reset)
++{
++ uint32_t reset_en = reset ? 1 : 0;
++
++ REG_UPDATE(DCHUBBUB_SOFT_RESET,
++ DCHUBBUB_GLOBAL_SOFT_RESET, reset_en);
++}
++
+ static bool hubbub1_dcc_support_swizzle(
+ enum swizzle_mode_values swizzle,
+ unsigned int bytes_per_element,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+index f479f54..6315a0e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+@@ -48,7 +48,8 @@
+ SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND),\
+ SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
+ SR(DCHUBBUB_TEST_DEBUG_INDEX), \
+- SR(DCHUBBUB_TEST_DEBUG_DATA)
++ SR(DCHUBBUB_TEST_DEBUG_DATA),\
++ SR(DCHUBBUB_SOFT_RESET)
+
+ #define HUBBUB_SR_WATERMARK_REG_LIST()\
+ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A),\
+@@ -105,6 +106,7 @@ struct dcn_hubbub_registers {
+ uint32_t DCHUBBUB_SDPIF_AGP_BOT;
+ uint32_t DCHUBBUB_SDPIF_AGP_TOP;
+ uint32_t DCHUBBUB_CRC_CTRL;
++ uint32_t DCHUBBUB_SOFT_RESET;
+ };
+
+ /* set field name */
+@@ -114,6 +116,7 @@ struct dcn_hubbub_registers {
+
+ #define HUBBUB_MASK_SH_LIST_DCN(mask_sh)\
+ HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \
++ HUBBUB_SF(DCHUBBUB_SOFT_RESET, DCHUBBUB_GLOBAL_SOFT_RESET, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, mask_sh), \
+@@ -143,6 +146,7 @@ struct dcn_hubbub_registers {
+ type DCHUBBUB_ARB_SAT_LEVEL;\
+ type DCHUBBUB_ARB_MIN_REQ_OUTSTAND;\
+ type DCHUBBUB_GLOBAL_TIMER_REFDIV;\
++ type DCHUBBUB_GLOBAL_SOFT_RESET; \
+ type SDPIF_FB_TOP;\
+ type SDPIF_FB_BASE;\
+ type SDPIF_FB_OFFSET;\
+@@ -201,6 +205,7 @@ void hubbub1_toggle_watermark_change_req(
+ void hubbub1_wm_read_state(struct hubbub *hubbub,
+ struct dcn_hubbub_wm *wm);
+
++void hubbub1_soft_reset(struct hubbub *hubbub, bool reset);
+ void hubbub1_construct(struct hubbub *hubbub,
+ struct dc_context *ctx,
+ const struct dcn_hubbub_registers *hubbub_regs,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+index 185f93b..d2ab78b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+@@ -78,6 +78,27 @@ static void hubp1_disconnect(struct hubp *hubp)
+ CURSOR_ENABLE, 0);
+ }
+
++static void hubp1_disable_control(struct hubp *hubp, bool disable_hubp)
++{
++ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
++ uint32_t disable = disable_hubp ? 1 : 0;
++
++ REG_UPDATE(DCHUBP_CNTL,
++ HUBP_DISABLE, disable);
++}
++
++static unsigned int hubp1_get_underflow_status(struct hubp *hubp)
++{
++ uint32_t hubp_underflow = 0;
++ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
++
++ REG_GET(DCHUBP_CNTL,
++ HUBP_UNDERFLOW_STATUS,
++ &hubp_underflow);
++
++ return hubp_underflow;
++}
++
+ static void hubp1_set_hubp_blank_en(struct hubp *hubp, bool blank)
+ {
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+@@ -1117,6 +1138,9 @@ static struct hubp_funcs dcn10_hubp_funcs = {
+ .hubp_clk_cntl = hubp1_clk_cntl,
+ .hubp_vtg_sel = hubp1_vtg_sel,
+ .hubp_read_state = hubp1_read_state,
++ .hubp_disable_control = hubp1_disable_control,
++ .hubp_get_underflow_status = hubp1_get_underflow_status,
++
+ };
+
+ /*****************************************/
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+index fe9b8c4..af38403 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+@@ -253,6 +253,7 @@
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\
++ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PIPES, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_BANKS, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, PIPE_INTERLEAVE, mask_sh),\
+@@ -421,6 +422,7 @@
+
+ #define DCN_HUBP_REG_FIELD_LIST(type) \
+ type HUBP_BLANK_EN;\
++ type HUBP_DISABLE;\
+ type HUBP_TTU_DISABLE;\
+ type HUBP_NO_OUTSTANDING_REQ;\
+ type HUBP_VTG_SEL;\
+@@ -723,4 +725,5 @@ void hubp1_read_state(struct hubp *hubp);
+
+ enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch);
+
++
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index d3fc1a2..ada55a9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -762,6 +762,90 @@ static void reset_back_end_for_pipe(
+ pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
+ }
+
++static bool dcn10_hw_wa_force_recovery(struct dc *dc)
++{
++ struct hubp *hubp ;
++ unsigned int i;
++ bool need_recover = true;
++
++ if (!dc->debug.recovery_enabled)
++ return false;
++
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ struct pipe_ctx *pipe_ctx =
++ &dc->current_state->res_ctx.pipe_ctx[i];
++ if (pipe_ctx != NULL) {
++ hubp = pipe_ctx->plane_res.hubp;
++ if (hubp != NULL) {
++ if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
++ /* one pipe underflow, we will reset all the pipes*/
++ need_recover = true;
++ }
++ }
++ }
++ }
++ if (!need_recover)
++ return false;
++ /*
++ DCHUBP_CNTL:HUBP_BLANK_EN=1
++ DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
++ DCHUBP_CNTL:HUBP_DISABLE=1
++ DCHUBP_CNTL:HUBP_DISABLE=0
++ DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
++ DCSURF_PRIMARY_SURFACE_ADDRESS
++ DCHUBP_CNTL:HUBP_BLANK_EN=0
++ */
++
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ struct pipe_ctx *pipe_ctx =
++ &dc->current_state->res_ctx.pipe_ctx[i];
++ if (pipe_ctx != NULL) {
++ hubp = pipe_ctx->plane_res.hubp;
++ /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
++ if (hubp != NULL)
++ hubp->funcs->set_hubp_blank_en(hubp, true);
++ }
++ }
++ /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
++ hubbub1_soft_reset(dc->res_pool->hubbub, true);
++
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ struct pipe_ctx *pipe_ctx =
++ &dc->current_state->res_ctx.pipe_ctx[i];
++ if (pipe_ctx != NULL) {
++ hubp = pipe_ctx->plane_res.hubp;
++ /*DCHUBP_CNTL:HUBP_DISABLE=1*/
++ if (hubp != NULL)
++ hubp->funcs->hubp_disable_control(hubp, true);
++ }
++ }
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ struct pipe_ctx *pipe_ctx =
++ &dc->current_state->res_ctx.pipe_ctx[i];
++ if (pipe_ctx != NULL) {
++ hubp = pipe_ctx->plane_res.hubp;
++ /*DCHUBP_CNTL:HUBP_DISABLE=0*/
++ if (hubp != NULL)
++ hubp->funcs->hubp_disable_control(hubp, true);
++ }
++ }
++ /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
++ hubbub1_soft_reset(dc->res_pool->hubbub, false);
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ struct pipe_ctx *pipe_ctx =
++ &dc->current_state->res_ctx.pipe_ctx[i];
++ if (pipe_ctx != NULL) {
++ hubp = pipe_ctx->plane_res.hubp;
++ /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
++ if (hubp != NULL)
++ hubp->funcs->set_hubp_blank_en(hubp, true);
++ }
++ }
++ return true;
++
++}
++
++
+ static void dcn10_verify_allow_pstate_change_high(struct dc *dc)
+ {
+ static bool should_log_hw_state; /* prevent hw state log by default */
+@@ -770,8 +854,12 @@ static void dcn10_verify_allow_pstate_change_high(struct dc *dc)
+ if (should_log_hw_state) {
+ dcn10_log_hw_state(dc);
+ }
+-
+ BREAK_TO_DEBUGGER();
++ if (dcn10_hw_wa_force_recovery(dc)) {
++ /*check again*/
++ if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub))
++ BREAK_TO_DEBUGGER();
++ }
+ }
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index 2c0a315..16c84e9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -446,6 +446,7 @@ static const struct dc_debug debug_defaults_drv = {
+ .vsr_support = true,
+ .performance_trace = false,
+ .az_endpoint_mute_only = true,
++ .recovery_enabled = false, /*enable this by default after testing.*/
+ };
+
+ static const struct dc_debug debug_defaults_diags = {
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+index 331f8ff..97df82c 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+@@ -121,6 +121,8 @@ struct hubp_funcs {
+ void (*hubp_clk_cntl)(struct hubp *hubp, bool enable);
+ void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst);
+ void (*hubp_read_state)(struct hubp *hubp);
++ void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp);
++ unsigned int (*hubp_get_underflow_status)(struct hubp *hubp);
+
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4461-drm-amd-display-Update-HW-sequencer-initialization.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4461-drm-amd-display-Update-HW-sequencer-initialization.patch
new file mode 100644
index 00000000..240b49c2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4461-drm-amd-display-Update-HW-sequencer-initialization.patch
@@ -0,0 +1,125 @@
+From 05d312b066d366f999d81938bd4bc5f33b705263 Mon Sep 17 00:00:00 2001
+From: Eric Bernstein <eric.bernstein@amd.com>
+Date: Tue, 17 Apr 2018 16:50:28 -0400
+Subject: [PATCH 4461/5725] drm/amd/display: Update HW sequencer initialization
+
+Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 6 +++---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h | 2 ++
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | 10 +++++-----
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h | 10 ++++++++++
+ 4 files changed, 20 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index ada55a9..858529e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -864,7 +864,7 @@ static void dcn10_verify_allow_pstate_change_high(struct dc *dc)
+ }
+
+ /* trigger HW to start disconnect plane from stream on the next vsync */
+-static void plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
++void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
+ {
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ int dpp_id = pipe_ctx->plane_res.dpp->inst;
+@@ -1047,7 +1047,7 @@ static void dcn10_init_hw(struct dc *dc)
+ dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
+ pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
+
+- plane_atomic_disconnect(dc, pipe_ctx);
++ hwss1_plane_atomic_disconnect(dc, pipe_ctx);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+@@ -2282,7 +2282,7 @@ static void dcn10_apply_ctx_for_surface(
+ old_pipe_ctx->plane_state &&
+ old_pipe_ctx->stream_res.tg == tg) {
+
+- plane_atomic_disconnect(dc, old_pipe_ctx);
++ hwss1_plane_atomic_disconnect(dc, old_pipe_ctx);
+ removed_pipe[i] = true;
+
+ DC_LOG_DC(
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+index 6c526b5..44f734b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+@@ -37,4 +37,6 @@ extern void fill_display_configs(
+
+ bool is_rgb_cspace(enum dc_color_space output_color_space);
+
++void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx);
++
+ #endif /* __DC_HWSS_DCN10_H__ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+index c734b7f..f2fbce0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+@@ -360,7 +360,7 @@ void optc1_program_timing(
+
+ }
+
+-static void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable)
++void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable)
+ {
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+@@ -1257,20 +1257,20 @@ void optc1_read_otg_state(struct optc *optc1,
+ OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status);
+ }
+
+-static void optc1_clear_optc_underflow(struct timing_generator *optc)
++void optc1_clear_optc_underflow(struct timing_generator *optc)
+ {
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_UPDATE(OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, 1);
+ }
+
+-static void optc1_tg_init(struct timing_generator *optc)
++void optc1_tg_init(struct timing_generator *optc)
+ {
+ optc1_set_blank_data_double_buffer(optc, true);
+ optc1_clear_optc_underflow(optc);
+ }
+
+-static bool optc1_is_tg_enabled(struct timing_generator *optc)
++bool optc1_is_tg_enabled(struct timing_generator *optc)
+ {
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t otg_enabled = 0;
+@@ -1281,7 +1281,7 @@ static bool optc1_is_tg_enabled(struct timing_generator *optc)
+
+ }
+
+-static bool optc1_is_optc_underflow_occurred(struct timing_generator *optc)
++bool optc1_is_optc_underflow_occurred(struct timing_generator *optc)
+ {
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t underflow_occurred = 0;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+index 89e09e5..c62052f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+@@ -497,4 +497,14 @@ void optc1_program_stereo(struct timing_generator *optc,
+
+ bool optc1_is_stereo_left_eye(struct timing_generator *optc);
+
++void optc1_clear_optc_underflow(struct timing_generator *optc);
++
++void optc1_tg_init(struct timing_generator *optc);
++
++bool optc1_is_tg_enabled(struct timing_generator *optc);
++
++bool optc1_is_optc_underflow_occurred(struct timing_generator *optc);
++
++void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable);
++
+ #endif /* __DC_TIMING_GENERATOR_DCN10_H__ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4462-drm-amd-display-fix-31_32_fixpt-shift-functions.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4462-drm-amd-display-fix-31_32_fixpt-shift-functions.patch
new file mode 100644
index 00000000..4a7dfe5e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4462-drm-amd-display-fix-31_32_fixpt-shift-functions.patch
@@ -0,0 +1,76 @@
+From faddf1ee373fe6439c4cc2df86ca81fafefbd483 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Mon, 23 Apr 2018 12:41:34 -0400
+Subject: [PATCH 4462/5725] drm/amd/display: fix 31_32_fixpt shift functions
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Eric Yang <eric.yang2@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/include/fixed31_32.h | 26 ++++++++++++++++--------
+ 1 file changed, 18 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
+index 61f11e2..bd8a304 100644
+--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
++++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
+@@ -27,6 +27,12 @@
+ #define __DAL_FIXED31_32_H__
+
+ #define FIXED31_32_BITS_PER_FRACTIONAL_PART 32
++#ifndef LLONG_MIN
++#define LLONG_MIN (1LL<<63)
++#endif
++#ifndef LLONG_MAX
++#define LLONG_MAX (-1LL>>1)
++#endif
+
+ /*
+ * @brief
+@@ -45,6 +51,7 @@ struct fixed31_32 {
+ long long value;
+ };
+
++
+ /*
+ * @brief
+ * Useful constants
+@@ -201,14 +208,12 @@ static inline struct fixed31_32 dc_fixpt_clamp(
+ */
+ static inline struct fixed31_32 dc_fixpt_shl(struct fixed31_32 arg, unsigned char shift)
+ {
+- struct fixed31_32 res;
+-
+ ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
+- ((arg.value < 0) && (arg.value >= LLONG_MIN >> shift)));
++ ((arg.value < 0) && (arg.value >= (LLONG_MIN / (1 << shift)))));
+
+- res.value = arg.value << shift;
++ arg.value = arg.value << shift;
+
+- return res;
++ return arg;
+ }
+
+ /*
+@@ -217,9 +222,14 @@ static inline struct fixed31_32 dc_fixpt_shl(struct fixed31_32 arg, unsigned cha
+ */
+ static inline struct fixed31_32 dc_fixpt_shr(struct fixed31_32 arg, unsigned char shift)
+ {
+- struct fixed31_32 res;
+- res.value = arg.value >> shift;
+- return res;
++ bool negative = arg.value < 0;
++
++ if (negative)
++ arg.value = -arg.value;
++ arg.value = arg.value >> shift;
++ if (negative)
++ arg.value = -arg.value;
++ return arg;
+ }
+
+ /*
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4463-drm-amd-display-fix-a-32-bit-shift-meant-to-be-64-wa.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4463-drm-amd-display-fix-a-32-bit-shift-meant-to-be-64-wa.patch
new file mode 100644
index 00000000..991fd3bf
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4463-drm-amd-display-fix-a-32-bit-shift-meant-to-be-64-wa.patch
@@ -0,0 +1,29 @@
+From 9c000464a9f5cb8ee6f6d7ffdb9bbc0f8e0c47f3 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Mon, 23 Apr 2018 14:39:23 -0400
+Subject: [PATCH 4463/5725] drm/amd/display: fix a 32 bit shift meant to be 64
+ warning
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/include/fixed31_32.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
+index bd8a304..76f64e9 100644
+--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
++++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
+@@ -209,7 +209,7 @@ static inline struct fixed31_32 dc_fixpt_clamp(
+ static inline struct fixed31_32 dc_fixpt_shl(struct fixed31_32 arg, unsigned char shift)
+ {
+ ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
+- ((arg.value < 0) && (arg.value >= (LLONG_MIN / (1 << shift)))));
++ ((arg.value < 0) && (arg.value >= (LLONG_MIN / (1LL << shift)))));
+
+ arg.value = arg.value << shift;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4464-drm-amd-display-Add-dc-cap-to-restrict-VSR-downscali.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4464-drm-amd-display-Add-dc-cap-to-restrict-VSR-downscali.patch
new file mode 100644
index 00000000..56269d6a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4464-drm-amd-display-Add-dc-cap-to-restrict-VSR-downscali.patch
@@ -0,0 +1,73 @@
+From 481b210e42288c12e4a6a5f73282345f3df79caa Mon Sep 17 00:00:00 2001
+From: Xingyue Tao <xingyue.tao@amd.com>
+Date: Thu, 19 Apr 2018 16:23:12 -0400
+Subject: [PATCH 4464/5725] drm/amd/display: Add dc cap to restrict VSR
+ downscaling src size
+
+- Adds int max_downscale_src_width in dc struct
+- Checks and does not support if downscale size is more than 4k (width > 3840)
+
+Signed-off-by: Xingyue Tao <xingyue.tao@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 1 +
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | 16 +++++++++++-----
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 1 +
+ 3 files changed, 13 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 28c6210..2023080 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -203,6 +203,7 @@ struct dc_debug {
+ bool clock_trace;
+ bool validation_trace;
+ bool bandwidth_calcs_trace;
++ int max_downscale_src_width;
+
+ /* stutter efficiency related */
+ bool disable_stutter;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+index 20796da..2da1389 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+@@ -145,12 +145,18 @@ bool dpp_get_optimal_number_of_taps(
+ else
+ pixel_width = scl_data->viewport.width;
+
+- /* Some ASICs does not support FP16 scaling, so we reject modes require this*/
+ if (scl_data->viewport.width != scl_data->h_active &&
+- scl_data->viewport.height != scl_data->v_active &&
+- dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
+- scl_data->format == PIXEL_FORMAT_FP16)
+- return false;
++ scl_data->viewport.height != scl_data->v_active) {
++
++ /* Some ASICs does not support FP16 scaling, so we reject modes require this*/
++ if (dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
++ scl_data->format == PIXEL_FORMAT_FP16)
++ return false;
++
++ if (dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
++ scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
++ return false;
++ }
+
+ /* TODO: add lb check */
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index 16c84e9..f69f3a5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -447,6 +447,7 @@ static const struct dc_debug debug_defaults_drv = {
+ .performance_trace = false,
+ .az_endpoint_mute_only = true,
+ .recovery_enabled = false, /*enable this by default after testing.*/
++ .max_downscale_src_width = 3840,
+ };
+
+ static const struct dc_debug debug_defaults_diags = {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4465-drm-amd-display-disable-mpo-if-brightness-adjusted.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4465-drm-amd-display-disable-mpo-if-brightness-adjusted.patch
new file mode 100644
index 00000000..8ef41d27
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4465-drm-amd-display-disable-mpo-if-brightness-adjusted.patch
@@ -0,0 +1,40 @@
+From e787d94827498d9a06832ee124f8298f6ae3ea2e Mon Sep 17 00:00:00 2001
+From: Yue Hin Lau <Yuehin.Lau@amd.com>
+Date: Wed, 18 Apr 2018 16:07:04 -0400
+Subject: [PATCH 4465/5725] drm/amd/display: disable mpo if brightness adjusted
+
+Signed-off-by: Yue Hin Lau <Yuehin.Lau@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 1 +
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 1 +
+ 2 files changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 2023080..a3e2851 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -75,6 +75,7 @@ struct dc_caps {
+ bool dynamic_audio;
+ bool is_apu;
+ bool dual_link_dvi;
++ bool post_blend_color_processing;
+ };
+
+ struct dc_dcc_surface_param {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index f69f3a5..ace2e03 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -1023,6 +1023,7 @@ static bool construct(
+ dc->caps.max_cursor_size = 256;
+ dc->caps.max_slave_planes = 1;
+ dc->caps.is_apu = true;
++ dc->caps.post_blend_color_processing = false;
+
+ if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
+ dc->debug = debug_defaults_drv;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4466-drm-amd-display-Log-DTN-only-after-the-atomic-commit.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4466-drm-amd-display-Log-DTN-only-after-the-atomic-commit.patch
new file mode 100644
index 00000000..a24cbdd9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4466-drm-amd-display-Log-DTN-only-after-the-atomic-commit.patch
@@ -0,0 +1,170 @@
+From fd8bb9249b5ebf86b833ce70e5975f163c516576 Mon Sep 17 00:00:00 2001
+From: Nikola Cornij <nikola.cornij@amd.com>
+Date: Mon, 23 Apr 2018 15:55:36 -0400
+Subject: [PATCH 4466/5725] drm/amd/display: Log DTN only after the atomic
+ commit in Diag
+
+Also print HUBP info only if pipe enabled. This fixes having different
+DTN logs for different test sequences.
+
+Signed-off-by: Nikola Cornij <nikola.cornij@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 119 +++++++++++----------
+ 1 file changed, 62 insertions(+), 57 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 858529e..400d0ca 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -127,24 +127,26 @@ static void dcn10_log_hubp_states(struct dc *dc)
+
+ hubp->funcs->hubp_read_state(hubp);
+
+- DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh"
+- " %6d %8d %7d %8xh",
+- hubp->inst,
+- s->pixel_format,
+- s->inuse_addr_hi,
+- s->viewport_width,
+- s->viewport_height,
+- s->rotation_angle,
+- s->h_mirror_en,
+- s->sw_mode,
+- s->dcc_en,
+- s->blank_en,
+- s->ttu_disable,
+- s->underflow_status);
+- DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
+- DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
+- DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
+- DTN_INFO("\n");
++ if (!s->blank_en) {
++ DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh"
++ " %6d %8d %7d %8xh",
++ hubp->inst,
++ s->pixel_format,
++ s->inuse_addr_hi,
++ s->viewport_width,
++ s->viewport_height,
++ s->rotation_angle,
++ s->h_mirror_en,
++ s->sw_mode,
++ s->dcc_en,
++ s->blank_en,
++ s->ttu_disable,
++ s->underflow_status);
++ DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
++ DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
++ DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
++ DTN_INFO("\n");
++ }
+ }
+
+ DTN_INFO("\n=========RQ========\n");
+@@ -155,16 +157,17 @@ static void dcn10_log_hubp_states(struct dc *dc)
+ struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
+ struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
+
+- DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
+- i, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
+- rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
+- rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
+- rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
+- rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
+- rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
+- rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
+- rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
+- rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
++ if (!s->blank_en)
++ DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
++ pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
++ rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
++ rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
++ rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
++ rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
++ rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
++ rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
++ rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
++ rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
+ }
+
+ DTN_INFO("========DLG========\n");
+@@ -179,27 +182,28 @@ static void dcn10_log_hubp_states(struct dc *dc)
+ struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
+
+- DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
+- "% 8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
+- " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
+- i, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
+- dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
+- dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
+- dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
+- dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
+- dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
+- dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
+- dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
+- dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
+- dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
+- dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
+- dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
+- dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
+- dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
+- dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
+- dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
+- dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
+- dlg_regs->xfc_reg_remote_surface_flip_latency);
++ if (!s->blank_en)
++ DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
++ "% 8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
++ " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
++ pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
++ dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
++ dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
++ dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
++ dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
++ dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
++ dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
++ dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
++ dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
++ dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
++ dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
++ dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
++ dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
++ dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
++ dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
++ dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
++ dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
++ dlg_regs->xfc_reg_remote_surface_flip_latency);
+ }
+
+ DTN_INFO("========TTU========\n");
+@@ -210,14 +214,15 @@ static void dcn10_log_hubp_states(struct dc *dc)
+ struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
+
+- DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
+- i, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
+- ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
+- ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
+- ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
+- ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
+- ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
+- ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
++ if (!s->blank_en)
++ DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
++ pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
++ ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
++ ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
++ ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
++ ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
++ ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
++ ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
+ }
+ DTN_INFO("\n");
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4467-drm-amd-display-update-dml-to-allow-sync-with-DV.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4467-drm-amd-display-update-dml-to-allow-sync-with-DV.patch
new file mode 100644
index 00000000..190b4e15
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4467-drm-amd-display-update-dml-to-allow-sync-with-DV.patch
@@ -0,0 +1,1064 @@
+From ed9f9a71a1e4ddc5e126f82e70a62cd12297ad43 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Tue, 20 Mar 2018 08:25:16 -0400
+Subject: [PATCH 4467/5725] drm/amd/display: update dml to allow sync with DV
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ .../drm/amd/display/dc/dml/display_mode_enums.h | 13 +
+ .../drm/amd/display/dc/dml/display_mode_structs.h | 962 +++++++++++----------
+ .../gpu/drm/amd/display/dc/dml/dml_inline_defs.h | 10 +
+ 3 files changed, 515 insertions(+), 470 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
+index b1ad355..47c19f8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
+@@ -108,4 +108,17 @@ enum output_standard {
+ dm_std_uninitialized = 0, dm_std_cvtr2, dm_std_cvt
+ };
+
++enum mpc_combine_affinity {
++ dm_mpc_always_when_possible,
++ dm_mpc_reduce_voltage,
++ dm_mpc_reduce_voltage_and_clocks
++};
++
++enum self_refresh_affinity {
++ dm_try_to_allow_self_refresh_and_mclk_switch,
++ dm_allow_self_refresh_and_mclk_switch,
++ dm_allow_self_refresh,
++ dm_neither_self_refresh_nor_mclk_switch
++};
++
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+index ce750ed..7fa0375 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+@@ -25,39 +25,39 @@
+ #ifndef __DISPLAY_MODE_STRUCTS_H__
+ #define __DISPLAY_MODE_STRUCTS_H__
+
+-typedef struct _vcs_dpi_voltage_scaling_st voltage_scaling_st;
+-typedef struct _vcs_dpi_soc_bounding_box_st soc_bounding_box_st;
+-typedef struct _vcs_dpi_ip_params_st ip_params_st;
+-typedef struct _vcs_dpi_display_pipe_source_params_st display_pipe_source_params_st;
+-typedef struct _vcs_dpi_display_output_params_st display_output_params_st;
+-typedef struct _vcs_dpi_display_bandwidth_st display_bandwidth_st;
+-typedef struct _vcs_dpi_scaler_ratio_depth_st scaler_ratio_depth_st;
+-typedef struct _vcs_dpi_scaler_taps_st scaler_taps_st;
+-typedef struct _vcs_dpi_display_pipe_dest_params_st display_pipe_dest_params_st;
+-typedef struct _vcs_dpi_display_pipe_params_st display_pipe_params_st;
+-typedef struct _vcs_dpi_display_clocks_and_cfg_st display_clocks_and_cfg_st;
+-typedef struct _vcs_dpi_display_e2e_pipe_params_st display_e2e_pipe_params_st;
+-typedef struct _vcs_dpi_dchub_buffer_sizing_st dchub_buffer_sizing_st;
+-typedef struct _vcs_dpi_watermarks_perf_st watermarks_perf_st;
+-typedef struct _vcs_dpi_cstate_pstate_watermarks_st cstate_pstate_watermarks_st;
+-typedef struct _vcs_dpi_wm_calc_pipe_params_st wm_calc_pipe_params_st;
+-typedef struct _vcs_dpi_vratio_pre_st vratio_pre_st;
+-typedef struct _vcs_dpi_display_data_rq_misc_params_st display_data_rq_misc_params_st;
+-typedef struct _vcs_dpi_display_data_rq_sizing_params_st display_data_rq_sizing_params_st;
+-typedef struct _vcs_dpi_display_data_rq_dlg_params_st display_data_rq_dlg_params_st;
+-typedef struct _vcs_dpi_display_cur_rq_dlg_params_st display_cur_rq_dlg_params_st;
+-typedef struct _vcs_dpi_display_rq_dlg_params_st display_rq_dlg_params_st;
+-typedef struct _vcs_dpi_display_rq_sizing_params_st display_rq_sizing_params_st;
+-typedef struct _vcs_dpi_display_rq_misc_params_st display_rq_misc_params_st;
+-typedef struct _vcs_dpi_display_rq_params_st display_rq_params_st;
+-typedef struct _vcs_dpi_display_dlg_regs_st display_dlg_regs_st;
+-typedef struct _vcs_dpi_display_ttu_regs_st display_ttu_regs_st;
+-typedef struct _vcs_dpi_display_data_rq_regs_st display_data_rq_regs_st;
+-typedef struct _vcs_dpi_display_rq_regs_st display_rq_regs_st;
+-typedef struct _vcs_dpi_display_dlg_sys_params_st display_dlg_sys_params_st;
+-typedef struct _vcs_dpi_display_dlg_prefetch_param_st display_dlg_prefetch_param_st;
+-typedef struct _vcs_dpi_display_pipe_clock_st display_pipe_clock_st;
+-typedef struct _vcs_dpi_display_arb_params_st display_arb_params_st;
++typedef struct _vcs_dpi_voltage_scaling_st voltage_scaling_st;
++typedef struct _vcs_dpi_soc_bounding_box_st soc_bounding_box_st;
++typedef struct _vcs_dpi_ip_params_st ip_params_st;
++typedef struct _vcs_dpi_display_pipe_source_params_st display_pipe_source_params_st;
++typedef struct _vcs_dpi_display_output_params_st display_output_params_st;
++typedef struct _vcs_dpi_display_bandwidth_st display_bandwidth_st;
++typedef struct _vcs_dpi_scaler_ratio_depth_st scaler_ratio_depth_st;
++typedef struct _vcs_dpi_scaler_taps_st scaler_taps_st;
++typedef struct _vcs_dpi_display_pipe_dest_params_st display_pipe_dest_params_st;
++typedef struct _vcs_dpi_display_pipe_params_st display_pipe_params_st;
++typedef struct _vcs_dpi_display_clocks_and_cfg_st display_clocks_and_cfg_st;
++typedef struct _vcs_dpi_display_e2e_pipe_params_st display_e2e_pipe_params_st;
++typedef struct _vcs_dpi_dchub_buffer_sizing_st dchub_buffer_sizing_st;
++typedef struct _vcs_dpi_watermarks_perf_st watermarks_perf_st;
++typedef struct _vcs_dpi_cstate_pstate_watermarks_st cstate_pstate_watermarks_st;
++typedef struct _vcs_dpi_wm_calc_pipe_params_st wm_calc_pipe_params_st;
++typedef struct _vcs_dpi_vratio_pre_st vratio_pre_st;
++typedef struct _vcs_dpi_display_data_rq_misc_params_st display_data_rq_misc_params_st;
++typedef struct _vcs_dpi_display_data_rq_sizing_params_st display_data_rq_sizing_params_st;
++typedef struct _vcs_dpi_display_data_rq_dlg_params_st display_data_rq_dlg_params_st;
++typedef struct _vcs_dpi_display_cur_rq_dlg_params_st display_cur_rq_dlg_params_st;
++typedef struct _vcs_dpi_display_rq_dlg_params_st display_rq_dlg_params_st;
++typedef struct _vcs_dpi_display_rq_sizing_params_st display_rq_sizing_params_st;
++typedef struct _vcs_dpi_display_rq_misc_params_st display_rq_misc_params_st;
++typedef struct _vcs_dpi_display_rq_params_st display_rq_params_st;
++typedef struct _vcs_dpi_display_dlg_regs_st display_dlg_regs_st;
++typedef struct _vcs_dpi_display_ttu_regs_st display_ttu_regs_st;
++typedef struct _vcs_dpi_display_data_rq_regs_st display_data_rq_regs_st;
++typedef struct _vcs_dpi_display_rq_regs_st display_rq_regs_st;
++typedef struct _vcs_dpi_display_dlg_sys_params_st display_dlg_sys_params_st;
++typedef struct _vcs_dpi_display_dlg_prefetch_param_st display_dlg_prefetch_param_st;
++typedef struct _vcs_dpi_display_pipe_clock_st display_pipe_clock_st;
++typedef struct _vcs_dpi_display_arb_params_st display_arb_params_st;
+
+ struct _vcs_dpi_voltage_scaling_st {
+ int state;
+@@ -72,89 +72,107 @@ struct _vcs_dpi_voltage_scaling_st {
+ double dppclk_mhz;
+ };
+
+-struct _vcs_dpi_soc_bounding_box_st {
+- double sr_exit_time_us;
+- double sr_enter_plus_exit_time_us;
+- double urgent_latency_us;
+- double writeback_latency_us;
+- double ideal_dram_bw_after_urgent_percent;
+- unsigned int max_request_size_bytes;
+- double downspread_percent;
+- double dram_page_open_time_ns;
+- double dram_rw_turnaround_time_ns;
+- double dram_return_buffer_per_channel_bytes;
+- double dram_channel_width_bytes;
++struct _vcs_dpi_soc_bounding_box_st {
++ double sr_exit_time_us;
++ double sr_enter_plus_exit_time_us;
++ double urgent_latency_us;
++ double urgent_latency_pixel_data_only_us;
++ double urgent_latency_pixel_mixed_with_vm_data_us;
++ double urgent_latency_vm_data_only_us;
++ double writeback_latency_us;
++ double ideal_dram_bw_after_urgent_percent;
++ double pct_ideal_dram_sdp_bw_after_urgent_pixel_only; // PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly
++ double pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm;
++ double pct_ideal_dram_sdp_bw_after_urgent_vm_only;
++ double max_avg_sdp_bw_use_normal_percent;
++ double max_avg_dram_bw_use_normal_percent;
++ unsigned int max_request_size_bytes;
++ double downspread_percent;
++ double dram_page_open_time_ns;
++ double dram_rw_turnaround_time_ns;
++ double dram_return_buffer_per_channel_bytes;
++ double dram_channel_width_bytes;
+ double fabric_datapath_to_dcn_data_return_bytes;
+ double dcn_downspread_percent;
+ double dispclk_dppclk_vco_speed_mhz;
+ double dfs_vco_period_ps;
+- unsigned int round_trip_ping_latency_dcfclk_cycles;
+- unsigned int urgent_out_of_order_return_per_channel_bytes;
+- unsigned int channel_interleave_bytes;
+- unsigned int num_banks;
+- unsigned int num_chans;
+- unsigned int vmm_page_size_bytes;
+- double dram_clock_change_latency_us;
+- double writeback_dram_clock_change_latency_us;
+- unsigned int return_bus_width_bytes;
+- unsigned int voltage_override;
+- double xfc_bus_transport_time_us;
+- double xfc_xbuf_latency_tolerance_us;
++ unsigned int urgent_out_of_order_return_per_channel_pixel_only_bytes;
++ unsigned int urgent_out_of_order_return_per_channel_pixel_and_vm_bytes;
++ unsigned int urgent_out_of_order_return_per_channel_vm_only_bytes;
++ unsigned int round_trip_ping_latency_dcfclk_cycles;
++ unsigned int urgent_out_of_order_return_per_channel_bytes;
++ unsigned int channel_interleave_bytes;
++ unsigned int num_banks;
++ unsigned int num_chans;
++ unsigned int vmm_page_size_bytes;
++ double dram_clock_change_latency_us;
++ double writeback_dram_clock_change_latency_us;
++ unsigned int return_bus_width_bytes;
++ unsigned int voltage_override;
++ double xfc_bus_transport_time_us;
++ double xfc_xbuf_latency_tolerance_us;
++ int use_urgent_burst_bw;
+ struct _vcs_dpi_voltage_scaling_st clock_limits[7];
+ };
+
+-struct _vcs_dpi_ip_params_st {
+- unsigned int max_inter_dcn_tile_repeaters;
+- unsigned int num_dsc;
+- unsigned int odm_capable;
+- unsigned int rob_buffer_size_kbytes;
+- unsigned int det_buffer_size_kbytes;
+- unsigned int dpte_buffer_size_in_pte_reqs;
+- unsigned int pde_proc_buffer_size_64k_reqs;
+- unsigned int dpp_output_buffer_pixels;
+- unsigned int opp_output_buffer_lines;
+- unsigned int pixel_chunk_size_kbytes;
+- unsigned char pte_enable;
+- unsigned int pte_chunk_size_kbytes;
+- unsigned int meta_chunk_size_kbytes;
+- unsigned int writeback_chunk_size_kbytes;
+- unsigned int line_buffer_size_bits;
+- unsigned int max_line_buffer_lines;
+- unsigned int writeback_luma_buffer_size_kbytes;
+- unsigned int writeback_chroma_buffer_size_kbytes;
+- unsigned int writeback_chroma_line_buffer_width_pixels;
+- unsigned int max_page_table_levels;
+- unsigned int max_num_dpp;
+- unsigned int max_num_otg;
+- unsigned int cursor_chunk_size;
+- unsigned int cursor_buffer_size;
+- unsigned int max_num_wb;
+- unsigned int max_dchub_pscl_bw_pix_per_clk;
+- unsigned int max_pscl_lb_bw_pix_per_clk;
+- unsigned int max_lb_vscl_bw_pix_per_clk;
+- unsigned int max_vscl_hscl_bw_pix_per_clk;
+- double max_hscl_ratio;
+- double max_vscl_ratio;
+- unsigned int hscl_mults;
+- unsigned int vscl_mults;
+- unsigned int max_hscl_taps;
+- unsigned int max_vscl_taps;
+- unsigned int xfc_supported;
+- unsigned int xfc_fill_constant_bytes;
+- double dispclk_ramp_margin_percent;
+- double xfc_fill_bw_overhead_percent;
+- double underscan_factor;
+- unsigned int min_vblank_lines;
+- unsigned int dppclk_delay_subtotal;
+- unsigned int dispclk_delay_subtotal;
+- unsigned int dcfclk_cstate_latency;
+- unsigned int dppclk_delay_scl;
+- unsigned int dppclk_delay_scl_lb_only;
+- unsigned int dppclk_delay_cnvc_formatter;
+- unsigned int dppclk_delay_cnvc_cursor;
+- unsigned int is_line_buffer_bpp_fixed;
+- unsigned int line_buffer_fixed_bpp;
+- unsigned int dcc_supported;
++struct _vcs_dpi_ip_params_st {
++ bool gpuvm_enable;
++ bool hostvm_enable;
++ unsigned int gpuvm_max_page_table_levels;
++ unsigned int hostvm_max_page_table_levels;
++ unsigned int hostvm_cached_page_table_levels;
++ unsigned int pte_group_size_bytes;
++ unsigned int max_inter_dcn_tile_repeaters;
++ unsigned int num_dsc;
++ unsigned int odm_capable;
++ unsigned int rob_buffer_size_kbytes;
++ unsigned int det_buffer_size_kbytes;
++ unsigned int dpte_buffer_size_in_pte_reqs;
++ unsigned int pde_proc_buffer_size_64k_reqs;
++ unsigned int dpp_output_buffer_pixels;
++ unsigned int opp_output_buffer_lines;
++ unsigned int pixel_chunk_size_kbytes;
++ unsigned char pte_enable;
++ unsigned int pte_chunk_size_kbytes;
++ unsigned int meta_chunk_size_kbytes;
++ unsigned int writeback_chunk_size_kbytes;
++ unsigned int line_buffer_size_bits;
++ unsigned int max_line_buffer_lines;
++ unsigned int writeback_luma_buffer_size_kbytes;
++ unsigned int writeback_chroma_buffer_size_kbytes;
++ unsigned int writeback_chroma_line_buffer_width_pixels;
++ unsigned int max_page_table_levels;
++ unsigned int max_num_dpp;
++ unsigned int max_num_otg;
++ unsigned int cursor_chunk_size;
++ unsigned int cursor_buffer_size;
++ unsigned int max_num_wb;
++ unsigned int max_dchub_pscl_bw_pix_per_clk;
++ unsigned int max_pscl_lb_bw_pix_per_clk;
++ unsigned int max_lb_vscl_bw_pix_per_clk;
++ unsigned int max_vscl_hscl_bw_pix_per_clk;
++ double max_hscl_ratio;
++ double max_vscl_ratio;
++ unsigned int hscl_mults;
++ unsigned int vscl_mults;
++ unsigned int max_hscl_taps;
++ unsigned int max_vscl_taps;
++ unsigned int xfc_supported;
++ unsigned int xfc_fill_constant_bytes;
++ double dispclk_ramp_margin_percent;
++ double xfc_fill_bw_overhead_percent;
++ double underscan_factor;
++ unsigned int min_vblank_lines;
++ unsigned int dppclk_delay_subtotal;
++ unsigned int dispclk_delay_subtotal;
++ unsigned int dcfclk_cstate_latency;
++ unsigned int dppclk_delay_scl;
++ unsigned int dppclk_delay_scl_lb_only;
++ unsigned int dppclk_delay_cnvc_formatter;
++ unsigned int dppclk_delay_cnvc_cursor;
++ unsigned int is_line_buffer_bpp_fixed;
++ unsigned int line_buffer_fixed_bpp;
++ unsigned int dcc_supported;
+
+ unsigned int IsLineBufferBppFixed;
+ unsigned int LineBufferFixedBpp;
+@@ -169,41 +187,45 @@ struct _vcs_dpi_display_xfc_params_st {
+ int xfc_slv_chunk_size_bytes;
+ };
+
+-struct _vcs_dpi_display_pipe_source_params_st {
+- int source_format;
+- unsigned char dcc;
+- unsigned int dcc_override;
+- unsigned int dcc_rate;
+- unsigned char dcc_use_global;
+- unsigned char vm;
+- unsigned char vm_levels_force_en;
+- unsigned int vm_levels_force;
+- int source_scan;
+- int sw_mode;
+- int macro_tile_size;
+- unsigned char is_display_sw;
+- unsigned int viewport_width;
+- unsigned int viewport_height;
+- unsigned int viewport_y_y;
+- unsigned int viewport_y_c;
+- unsigned int viewport_width_c;
+- unsigned int viewport_height_c;
+- unsigned int data_pitch;
+- unsigned int data_pitch_c;
+- unsigned int meta_pitch;
+- unsigned int meta_pitch_c;
+- unsigned int cur0_src_width;
+- int cur0_bpp;
+- unsigned int cur1_src_width;
+- int cur1_bpp;
+- int num_cursors;
+- unsigned char is_hsplit;
+- unsigned char dynamic_metadata_enable;
+- unsigned int dynamic_metadata_lines_before_active;
+- unsigned int dynamic_metadata_xmit_bytes;
+- unsigned int hsplit_grp;
+- unsigned char xfc_enable;
+- unsigned char xfc_slave;
++struct _vcs_dpi_display_pipe_source_params_st {
++ int source_format;
++ unsigned char dcc;
++ unsigned int dcc_override;
++ unsigned int dcc_rate;
++ unsigned char dcc_use_global;
++ unsigned char vm;
++ bool gpuvm; // gpuvm enabled
++ bool hostvm; // hostvm enabled
++ bool gpuvm_levels_force_en;
++ unsigned int gpuvm_levels_force;
++ bool hostvm_levels_force_en;
++ unsigned int hostvm_levels_force;
++ int source_scan;
++ int sw_mode;
++ int macro_tile_size;
++ unsigned char is_display_sw;
++ unsigned int viewport_width;
++ unsigned int viewport_height;
++ unsigned int viewport_y_y;
++ unsigned int viewport_y_c;
++ unsigned int viewport_width_c;
++ unsigned int viewport_height_c;
++ unsigned int data_pitch;
++ unsigned int data_pitch_c;
++ unsigned int meta_pitch;
++ unsigned int meta_pitch_c;
++ unsigned int cur0_src_width;
++ int cur0_bpp;
++ unsigned int cur1_src_width;
++ int cur1_bpp;
++ int num_cursors;
++ unsigned char is_hsplit;
++ unsigned char dynamic_metadata_enable;
++ unsigned int dynamic_metadata_lines_before_active;
++ unsigned int dynamic_metadata_xmit_bytes;
++ unsigned int hsplit_grp;
++ unsigned char xfc_enable;
++ unsigned char xfc_slave;
+ struct _vcs_dpi_display_xfc_params_st xfc_params;
+ };
+ struct writeback_st {
+@@ -219,335 +241,335 @@ struct writeback_st {
+ double wb_vratio;
+ };
+
+-struct _vcs_dpi_display_output_params_st {
+- int dp_lanes;
+- int output_bpp;
+- int dsc_enable;
+- int wb_enable;
+- int num_active_wb;
+- int opp_input_bpc;
+- int output_type;
+- int output_format;
+- int output_standard;
+- int dsc_slices;
++struct _vcs_dpi_display_output_params_st {
++ int dp_lanes;
++ int output_bpp;
++ int dsc_enable;
++ int wb_enable;
++ int num_active_wb;
++ int output_bpc;
++ int output_type;
++ int output_format;
++ int output_standard;
++ int dsc_slices;
+ struct writeback_st wb;
+ };
+
+-struct _vcs_dpi_display_bandwidth_st {
+- double total_bw_consumed_gbps;
+- double guaranteed_urgent_return_bw_gbps;
+-};
+-
+-struct _vcs_dpi_scaler_ratio_depth_st {
+- double hscl_ratio;
+- double vscl_ratio;
+- double hscl_ratio_c;
+- double vscl_ratio_c;
+- double vinit;
+- double vinit_c;
+- double vinit_bot;
+- double vinit_bot_c;
+- int lb_depth;
+- int scl_enable;
+-};
+-
+-struct _vcs_dpi_scaler_taps_st {
+- unsigned int htaps;
+- unsigned int vtaps;
+- unsigned int htaps_c;
+- unsigned int vtaps_c;
+-};
+-
+-struct _vcs_dpi_display_pipe_dest_params_st {
+- unsigned int recout_width;
+- unsigned int recout_height;
+- unsigned int full_recout_width;
+- unsigned int full_recout_height;
+- unsigned int hblank_start;
+- unsigned int hblank_end;
+- unsigned int vblank_start;
+- unsigned int vblank_end;
+- unsigned int htotal;
+- unsigned int vtotal;
+- unsigned int vactive;
+- unsigned int hactive;
+- unsigned int vstartup_start;
+- unsigned int vupdate_offset;
+- unsigned int vupdate_width;
+- unsigned int vready_offset;
+- unsigned char interlaced;
+- unsigned char underscan;
+- double pixel_rate_mhz;
+- unsigned char synchronized_vblank_all_planes;
+- unsigned char otg_inst;
+- unsigned char odm_split_cnt;
+- unsigned char odm_combine;
+-};
+-
+-struct _vcs_dpi_display_pipe_params_st {
+- display_pipe_source_params_st src;
+- display_pipe_dest_params_st dest;
+- scaler_ratio_depth_st scale_ratio_depth;
+- scaler_taps_st scale_taps;
+-};
+-
+-struct _vcs_dpi_display_clocks_and_cfg_st {
+- int voltage;
+- double dppclk_mhz;
+- double refclk_mhz;
+- double dispclk_mhz;
+- double dcfclk_mhz;
+- double socclk_mhz;
+-};
+-
+-struct _vcs_dpi_display_e2e_pipe_params_st {
+- display_pipe_params_st pipe;
+- display_output_params_st dout;
+- display_clocks_and_cfg_st clks_cfg;
+-};
+-
+-struct _vcs_dpi_dchub_buffer_sizing_st {
+- unsigned int swath_width_y;
+- unsigned int swath_height_y;
+- unsigned int swath_height_c;
+- unsigned int detail_buffer_size_y;
+-};
+-
+-struct _vcs_dpi_watermarks_perf_st {
+- double stutter_eff_in_active_region_percent;
+- double urgent_latency_supported_us;
+- double non_urgent_latency_supported_us;
+- double dram_clock_change_margin_us;
+- double dram_access_eff_percent;
+-};
+-
+-struct _vcs_dpi_cstate_pstate_watermarks_st {
+- double cstate_exit_us;
+- double cstate_enter_plus_exit_us;
+- double pstate_change_us;
+-};
+-
+-struct _vcs_dpi_wm_calc_pipe_params_st {
+- unsigned int num_dpp;
+- int voltage;
+- int output_type;
+- double dcfclk_mhz;
+- double socclk_mhz;
+- double dppclk_mhz;
+- double pixclk_mhz;
+- unsigned char interlace_en;
+- unsigned char pte_enable;
+- unsigned char dcc_enable;
+- double dcc_rate;
+- double bytes_per_pixel_c;
+- double bytes_per_pixel_y;
+- unsigned int swath_width_y;
+- unsigned int swath_height_y;
+- unsigned int swath_height_c;
+- unsigned int det_buffer_size_y;
+- double h_ratio;
+- double v_ratio;
+- unsigned int h_taps;
+- unsigned int h_total;
+- unsigned int v_total;
+- unsigned int v_active;
+- unsigned int e2e_index;
+- double display_pipe_line_delivery_time;
+- double read_bw;
+- unsigned int lines_in_det_y;
+- unsigned int lines_in_det_y_rounded_down_to_swath;
+- double full_det_buffering_time;
+- double dcfclk_deepsleep_mhz_per_plane;
+-};
+-
+-struct _vcs_dpi_vratio_pre_st {
+- double vratio_pre_l;
+- double vratio_pre_c;
+-};
+-
+-struct _vcs_dpi_display_data_rq_misc_params_st {
+- unsigned int full_swath_bytes;
+- unsigned int stored_swath_bytes;
+- unsigned int blk256_height;
+- unsigned int blk256_width;
+- unsigned int req_height;
+- unsigned int req_width;
+-};
+-
+-struct _vcs_dpi_display_data_rq_sizing_params_st {
+- unsigned int chunk_bytes;
+- unsigned int min_chunk_bytes;
+- unsigned int meta_chunk_bytes;
+- unsigned int min_meta_chunk_bytes;
+- unsigned int mpte_group_bytes;
+- unsigned int dpte_group_bytes;
+-};
+-
+-struct _vcs_dpi_display_data_rq_dlg_params_st {
+- unsigned int swath_width_ub;
+- unsigned int swath_height;
+- unsigned int req_per_swath_ub;
+- unsigned int meta_pte_bytes_per_frame_ub;
+- unsigned int dpte_req_per_row_ub;
+- unsigned int dpte_groups_per_row_ub;
+- unsigned int dpte_row_height;
+- unsigned int dpte_bytes_per_row_ub;
+- unsigned int meta_chunks_per_row_ub;
+- unsigned int meta_req_per_row_ub;
+- unsigned int meta_row_height;
+- unsigned int meta_bytes_per_row_ub;
+-};
+-
+-struct _vcs_dpi_display_cur_rq_dlg_params_st {
+- unsigned char enable;
+- unsigned int swath_height;
+- unsigned int req_per_line;
+-};
+-
+-struct _vcs_dpi_display_rq_dlg_params_st {
+- display_data_rq_dlg_params_st rq_l;
+- display_data_rq_dlg_params_st rq_c;
+- display_cur_rq_dlg_params_st rq_cur0;
+-};
+-
+-struct _vcs_dpi_display_rq_sizing_params_st {
+- display_data_rq_sizing_params_st rq_l;
+- display_data_rq_sizing_params_st rq_c;
+-};
+-
+-struct _vcs_dpi_display_rq_misc_params_st {
+- display_data_rq_misc_params_st rq_l;
+- display_data_rq_misc_params_st rq_c;
+-};
+-
+-struct _vcs_dpi_display_rq_params_st {
+- unsigned char yuv420;
+- unsigned char yuv420_10bpc;
+- display_rq_misc_params_st misc;
+- display_rq_sizing_params_st sizing;
+- display_rq_dlg_params_st dlg;
+-};
+-
+-struct _vcs_dpi_display_dlg_regs_st {
+- unsigned int refcyc_h_blank_end;
+- unsigned int dlg_vblank_end;
+- unsigned int min_dst_y_next_start;
+- unsigned int refcyc_per_htotal;
+- unsigned int refcyc_x_after_scaler;
+- unsigned int dst_y_after_scaler;
+- unsigned int dst_y_prefetch;
+- unsigned int dst_y_per_vm_vblank;
+- unsigned int dst_y_per_row_vblank;
+- unsigned int dst_y_per_vm_flip;
+- unsigned int dst_y_per_row_flip;
+- unsigned int ref_freq_to_pix_freq;
+- unsigned int vratio_prefetch;
+- unsigned int vratio_prefetch_c;
+- unsigned int refcyc_per_pte_group_vblank_l;
+- unsigned int refcyc_per_pte_group_vblank_c;
+- unsigned int refcyc_per_meta_chunk_vblank_l;
+- unsigned int refcyc_per_meta_chunk_vblank_c;
+- unsigned int refcyc_per_pte_group_flip_l;
+- unsigned int refcyc_per_pte_group_flip_c;
+- unsigned int refcyc_per_meta_chunk_flip_l;
+- unsigned int refcyc_per_meta_chunk_flip_c;
+- unsigned int dst_y_per_pte_row_nom_l;
+- unsigned int dst_y_per_pte_row_nom_c;
+- unsigned int refcyc_per_pte_group_nom_l;
+- unsigned int refcyc_per_pte_group_nom_c;
+- unsigned int dst_y_per_meta_row_nom_l;
+- unsigned int dst_y_per_meta_row_nom_c;
+- unsigned int refcyc_per_meta_chunk_nom_l;
+- unsigned int refcyc_per_meta_chunk_nom_c;
+- unsigned int refcyc_per_line_delivery_pre_l;
+- unsigned int refcyc_per_line_delivery_pre_c;
+- unsigned int refcyc_per_line_delivery_l;
+- unsigned int refcyc_per_line_delivery_c;
+- unsigned int chunk_hdl_adjust_cur0;
+- unsigned int chunk_hdl_adjust_cur1;
+- unsigned int vready_after_vcount0;
+- unsigned int dst_y_offset_cur0;
+- unsigned int dst_y_offset_cur1;
+- unsigned int xfc_reg_transfer_delay;
+- unsigned int xfc_reg_precharge_delay;
+- unsigned int xfc_reg_remote_surface_flip_latency;
+- unsigned int xfc_reg_prefetch_margin;
+- unsigned int dst_y_delta_drq_limit;
+-};
+-
+-struct _vcs_dpi_display_ttu_regs_st {
+- unsigned int qos_level_low_wm;
+- unsigned int qos_level_high_wm;
+- unsigned int min_ttu_vblank;
+- unsigned int qos_level_flip;
+- unsigned int refcyc_per_req_delivery_l;
+- unsigned int refcyc_per_req_delivery_c;
+- unsigned int refcyc_per_req_delivery_cur0;
+- unsigned int refcyc_per_req_delivery_cur1;
+- unsigned int refcyc_per_req_delivery_pre_l;
+- unsigned int refcyc_per_req_delivery_pre_c;
+- unsigned int refcyc_per_req_delivery_pre_cur0;
+- unsigned int refcyc_per_req_delivery_pre_cur1;
+- unsigned int qos_level_fixed_l;
+- unsigned int qos_level_fixed_c;
+- unsigned int qos_level_fixed_cur0;
+- unsigned int qos_level_fixed_cur1;
+- unsigned int qos_ramp_disable_l;
+- unsigned int qos_ramp_disable_c;
+- unsigned int qos_ramp_disable_cur0;
+- unsigned int qos_ramp_disable_cur1;
+-};
+-
+-struct _vcs_dpi_display_data_rq_regs_st {
+- unsigned int chunk_size;
+- unsigned int min_chunk_size;
+- unsigned int meta_chunk_size;
+- unsigned int min_meta_chunk_size;
+- unsigned int dpte_group_size;
+- unsigned int mpte_group_size;
+- unsigned int swath_height;
+- unsigned int pte_row_height_linear;
+-};
+-
+-struct _vcs_dpi_display_rq_regs_st {
+- display_data_rq_regs_st rq_regs_l;
+- display_data_rq_regs_st rq_regs_c;
+- unsigned int drq_expansion_mode;
+- unsigned int prq_expansion_mode;
+- unsigned int mrq_expansion_mode;
+- unsigned int crq_expansion_mode;
+- unsigned int plane1_base_address;
+-};
+-
+-struct _vcs_dpi_display_dlg_sys_params_st {
+- double t_mclk_wm_us;
+- double t_urg_wm_us;
+- double t_sr_wm_us;
+- double t_extra_us;
+- double mem_trip_us;
+- double t_srx_delay_us;
+- double deepsleep_dcfclk_mhz;
+- double total_flip_bw;
+- unsigned int total_flip_bytes;
+-};
+-
+-struct _vcs_dpi_display_dlg_prefetch_param_st {
+- double prefetch_bw;
+- unsigned int flip_bytes;
+-};
+-
+-struct _vcs_dpi_display_pipe_clock_st {
+- double dcfclk_mhz;
+- double dispclk_mhz;
+- double socclk_mhz;
+- double dscclk_mhz[6];
+- double dppclk_mhz[6];
+-};
+-
+-struct _vcs_dpi_display_arb_params_st {
+- int max_req_outstanding;
+- int min_req_outstanding;
+- int sat_level_us;
++struct _vcs_dpi_display_bandwidth_st {
++ double total_bw_consumed_gbps;
++ double guaranteed_urgent_return_bw_gbps;
++};
++
++struct _vcs_dpi_scaler_ratio_depth_st {
++ double hscl_ratio;
++ double vscl_ratio;
++ double hscl_ratio_c;
++ double vscl_ratio_c;
++ double vinit;
++ double vinit_c;
++ double vinit_bot;
++ double vinit_bot_c;
++ int lb_depth;
++ int scl_enable;
++};
++
++struct _vcs_dpi_scaler_taps_st {
++ unsigned int htaps;
++ unsigned int vtaps;
++ unsigned int htaps_c;
++ unsigned int vtaps_c;
++};
++
++struct _vcs_dpi_display_pipe_dest_params_st {
++ unsigned int recout_width;
++ unsigned int recout_height;
++ unsigned int full_recout_width;
++ unsigned int full_recout_height;
++ unsigned int hblank_start;
++ unsigned int hblank_end;
++ unsigned int vblank_start;
++ unsigned int vblank_end;
++ unsigned int htotal;
++ unsigned int vtotal;
++ unsigned int vactive;
++ unsigned int hactive;
++ unsigned int vstartup_start;
++ unsigned int vupdate_offset;
++ unsigned int vupdate_width;
++ unsigned int vready_offset;
++ unsigned char interlaced;
++ unsigned char underscan;
++ double pixel_rate_mhz;
++ unsigned char synchronized_vblank_all_planes;
++ unsigned char otg_inst;
++ unsigned char odm_split_cnt;
++ unsigned char odm_combine;
++};
++
++struct _vcs_dpi_display_pipe_params_st {
++ display_pipe_source_params_st src;
++ display_pipe_dest_params_st dest;
++ scaler_ratio_depth_st scale_ratio_depth;
++ scaler_taps_st scale_taps;
++};
++
++struct _vcs_dpi_display_clocks_and_cfg_st {
++ int voltage;
++ double dppclk_mhz;
++ double refclk_mhz;
++ double dispclk_mhz;
++ double dcfclk_mhz;
++ double socclk_mhz;
++};
++
++struct _vcs_dpi_display_e2e_pipe_params_st {
++ display_pipe_params_st pipe;
++ display_output_params_st dout;
++ display_clocks_and_cfg_st clks_cfg;
++};
++
++struct _vcs_dpi_dchub_buffer_sizing_st {
++ unsigned int swath_width_y;
++ unsigned int swath_height_y;
++ unsigned int swath_height_c;
++ unsigned int detail_buffer_size_y;
++};
++
++struct _vcs_dpi_watermarks_perf_st {
++ double stutter_eff_in_active_region_percent;
++ double urgent_latency_supported_us;
++ double non_urgent_latency_supported_us;
++ double dram_clock_change_margin_us;
++ double dram_access_eff_percent;
++};
++
++struct _vcs_dpi_cstate_pstate_watermarks_st {
++ double cstate_exit_us;
++ double cstate_enter_plus_exit_us;
++ double pstate_change_us;
++};
++
++struct _vcs_dpi_wm_calc_pipe_params_st {
++ unsigned int num_dpp;
++ int voltage;
++ int output_type;
++ double dcfclk_mhz;
++ double socclk_mhz;
++ double dppclk_mhz;
++ double pixclk_mhz;
++ unsigned char interlace_en;
++ unsigned char pte_enable;
++ unsigned char dcc_enable;
++ double dcc_rate;
++ double bytes_per_pixel_c;
++ double bytes_per_pixel_y;
++ unsigned int swath_width_y;
++ unsigned int swath_height_y;
++ unsigned int swath_height_c;
++ unsigned int det_buffer_size_y;
++ double h_ratio;
++ double v_ratio;
++ unsigned int h_taps;
++ unsigned int h_total;
++ unsigned int v_total;
++ unsigned int v_active;
++ unsigned int e2e_index;
++ double display_pipe_line_delivery_time;
++ double read_bw;
++ unsigned int lines_in_det_y;
++ unsigned int lines_in_det_y_rounded_down_to_swath;
++ double full_det_buffering_time;
++ double dcfclk_deepsleep_mhz_per_plane;
++};
++
++struct _vcs_dpi_vratio_pre_st {
++ double vratio_pre_l;
++ double vratio_pre_c;
++};
++
++struct _vcs_dpi_display_data_rq_misc_params_st {
++ unsigned int full_swath_bytes;
++ unsigned int stored_swath_bytes;
++ unsigned int blk256_height;
++ unsigned int blk256_width;
++ unsigned int req_height;
++ unsigned int req_width;
++};
++
++struct _vcs_dpi_display_data_rq_sizing_params_st {
++ unsigned int chunk_bytes;
++ unsigned int min_chunk_bytes;
++ unsigned int meta_chunk_bytes;
++ unsigned int min_meta_chunk_bytes;
++ unsigned int mpte_group_bytes;
++ unsigned int dpte_group_bytes;
++};
++
++struct _vcs_dpi_display_data_rq_dlg_params_st {
++ unsigned int swath_width_ub;
++ unsigned int swath_height;
++ unsigned int req_per_swath_ub;
++ unsigned int meta_pte_bytes_per_frame_ub;
++ unsigned int dpte_req_per_row_ub;
++ unsigned int dpte_groups_per_row_ub;
++ unsigned int dpte_row_height;
++ unsigned int dpte_bytes_per_row_ub;
++ unsigned int meta_chunks_per_row_ub;
++ unsigned int meta_req_per_row_ub;
++ unsigned int meta_row_height;
++ unsigned int meta_bytes_per_row_ub;
++};
++
++struct _vcs_dpi_display_cur_rq_dlg_params_st {
++ unsigned char enable;
++ unsigned int swath_height;
++ unsigned int req_per_line;
++};
++
++struct _vcs_dpi_display_rq_dlg_params_st {
++ display_data_rq_dlg_params_st rq_l;
++ display_data_rq_dlg_params_st rq_c;
++ display_cur_rq_dlg_params_st rq_cur0;
++};
++
++struct _vcs_dpi_display_rq_sizing_params_st {
++ display_data_rq_sizing_params_st rq_l;
++ display_data_rq_sizing_params_st rq_c;
++};
++
++struct _vcs_dpi_display_rq_misc_params_st {
++ display_data_rq_misc_params_st rq_l;
++ display_data_rq_misc_params_st rq_c;
++};
++
++struct _vcs_dpi_display_rq_params_st {
++ unsigned char yuv420;
++ unsigned char yuv420_10bpc;
++ display_rq_misc_params_st misc;
++ display_rq_sizing_params_st sizing;
++ display_rq_dlg_params_st dlg;
++};
++
++struct _vcs_dpi_display_dlg_regs_st {
++ unsigned int refcyc_h_blank_end;
++ unsigned int dlg_vblank_end;
++ unsigned int min_dst_y_next_start;
++ unsigned int refcyc_per_htotal;
++ unsigned int refcyc_x_after_scaler;
++ unsigned int dst_y_after_scaler;
++ unsigned int dst_y_prefetch;
++ unsigned int dst_y_per_vm_vblank;
++ unsigned int dst_y_per_row_vblank;
++ unsigned int dst_y_per_vm_flip;
++ unsigned int dst_y_per_row_flip;
++ unsigned int ref_freq_to_pix_freq;
++ unsigned int vratio_prefetch;
++ unsigned int vratio_prefetch_c;
++ unsigned int refcyc_per_pte_group_vblank_l;
++ unsigned int refcyc_per_pte_group_vblank_c;
++ unsigned int refcyc_per_meta_chunk_vblank_l;
++ unsigned int refcyc_per_meta_chunk_vblank_c;
++ unsigned int refcyc_per_pte_group_flip_l;
++ unsigned int refcyc_per_pte_group_flip_c;
++ unsigned int refcyc_per_meta_chunk_flip_l;
++ unsigned int refcyc_per_meta_chunk_flip_c;
++ unsigned int dst_y_per_pte_row_nom_l;
++ unsigned int dst_y_per_pte_row_nom_c;
++ unsigned int refcyc_per_pte_group_nom_l;
++ unsigned int refcyc_per_pte_group_nom_c;
++ unsigned int dst_y_per_meta_row_nom_l;
++ unsigned int dst_y_per_meta_row_nom_c;
++ unsigned int refcyc_per_meta_chunk_nom_l;
++ unsigned int refcyc_per_meta_chunk_nom_c;
++ unsigned int refcyc_per_line_delivery_pre_l;
++ unsigned int refcyc_per_line_delivery_pre_c;
++ unsigned int refcyc_per_line_delivery_l;
++ unsigned int refcyc_per_line_delivery_c;
++ unsigned int chunk_hdl_adjust_cur0;
++ unsigned int chunk_hdl_adjust_cur1;
++ unsigned int vready_after_vcount0;
++ unsigned int dst_y_offset_cur0;
++ unsigned int dst_y_offset_cur1;
++ unsigned int xfc_reg_transfer_delay;
++ unsigned int xfc_reg_precharge_delay;
++ unsigned int xfc_reg_remote_surface_flip_latency;
++ unsigned int xfc_reg_prefetch_margin;
++ unsigned int dst_y_delta_drq_limit;
++};
++
++struct _vcs_dpi_display_ttu_regs_st {
++ unsigned int qos_level_low_wm;
++ unsigned int qos_level_high_wm;
++ unsigned int min_ttu_vblank;
++ unsigned int qos_level_flip;
++ unsigned int refcyc_per_req_delivery_l;
++ unsigned int refcyc_per_req_delivery_c;
++ unsigned int refcyc_per_req_delivery_cur0;
++ unsigned int refcyc_per_req_delivery_cur1;
++ unsigned int refcyc_per_req_delivery_pre_l;
++ unsigned int refcyc_per_req_delivery_pre_c;
++ unsigned int refcyc_per_req_delivery_pre_cur0;
++ unsigned int refcyc_per_req_delivery_pre_cur1;
++ unsigned int qos_level_fixed_l;
++ unsigned int qos_level_fixed_c;
++ unsigned int qos_level_fixed_cur0;
++ unsigned int qos_level_fixed_cur1;
++ unsigned int qos_ramp_disable_l;
++ unsigned int qos_ramp_disable_c;
++ unsigned int qos_ramp_disable_cur0;
++ unsigned int qos_ramp_disable_cur1;
++};
++
++struct _vcs_dpi_display_data_rq_regs_st {
++ unsigned int chunk_size;
++ unsigned int min_chunk_size;
++ unsigned int meta_chunk_size;
++ unsigned int min_meta_chunk_size;
++ unsigned int dpte_group_size;
++ unsigned int mpte_group_size;
++ unsigned int swath_height;
++ unsigned int pte_row_height_linear;
++};
++
++struct _vcs_dpi_display_rq_regs_st {
++ display_data_rq_regs_st rq_regs_l;
++ display_data_rq_regs_st rq_regs_c;
++ unsigned int drq_expansion_mode;
++ unsigned int prq_expansion_mode;
++ unsigned int mrq_expansion_mode;
++ unsigned int crq_expansion_mode;
++ unsigned int plane1_base_address;
++};
++
++struct _vcs_dpi_display_dlg_sys_params_st {
++ double t_mclk_wm_us;
++ double t_urg_wm_us;
++ double t_sr_wm_us;
++ double t_extra_us;
++ double mem_trip_us;
++ double t_srx_delay_us;
++ double deepsleep_dcfclk_mhz;
++ double total_flip_bw;
++ unsigned int total_flip_bytes;
++};
++
++struct _vcs_dpi_display_dlg_prefetch_param_st {
++ double prefetch_bw;
++ unsigned int flip_bytes;
++};
++
++struct _vcs_dpi_display_pipe_clock_st {
++ double dcfclk_mhz;
++ double dispclk_mhz;
++ double socclk_mhz;
++ double dscclk_mhz[6];
++ double dppclk_mhz[6];
++};
++
++struct _vcs_dpi_display_arb_params_st {
++ int max_req_outstanding;
++ int min_req_outstanding;
++ int sat_level_us;
+ };
+
+ #endif /*__DISPLAY_MODE_STRUCTS_H__*/
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+index c1aa947..4bdd260 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+@@ -35,6 +35,16 @@ static inline double dml_min(double a, double b)
+ return (double) dcn_bw_min2(a, b);
+ }
+
++static inline double dml_min3(double a, double b, double c)
++{
++ return dml_min(dml_min(a, b), c);
++}
++
++static inline double dml_min4(double a, double b, double c, double d)
++{
++ return dml_min(dml_min(a, b), dml_min(c, d));
++}
++
+ static inline double dml_max(double a, double b)
+ {
+ return (double) dcn_bw_max2(a, b);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4468-drm-amd-display-Fix-up-dm-logging-functionality.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4468-drm-amd-display-Fix-up-dm-logging-functionality.patch
new file mode 100644
index 00000000..4f39cf10
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4468-drm-amd-display-Fix-up-dm-logging-functionality.patch
@@ -0,0 +1,135 @@
+From a0945386d2cd7449215a3491c51045bde5131711 Mon Sep 17 00:00:00 2001
+From: Anthony Koo <Anthony.Koo@amd.com>
+Date: Tue, 24 Apr 2018 15:21:33 -0400
+Subject: [PATCH 4468/5725] drm/amd/display: Fix up dm logging functionality
+
+Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 5 -----
+ .../gpu/drm/amd/display/dc/basics/log_helpers.c | 1 -
+ drivers/gpu/drm/amd/display/dc/basics/logger.c | 1 +
+ drivers/gpu/drm/amd/display/dc/dm_services.h | 4 ----
+ drivers/gpu/drm/amd/display/modules/stats/stats.c | 24 ++++++++++++++--------
+ 5 files changed, 17 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+index ca0b08b..bd44935 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -330,11 +330,6 @@ bool dm_helpers_dp_mst_send_payload_allocation(
+ return true;
+ }
+
+-bool dm_helpers_dc_conn_log(struct dc_context *ctx, struct log_entry *entry, enum dc_log_type event)
+-{
+- return true;
+-}
+-
+ void dm_dtn_log_begin(struct dc_context *ctx)
+ {}
+
+diff --git a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
+index 854678a..0214515 100644
+--- a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
++++ b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
+@@ -94,7 +94,6 @@ void dc_conn_log(struct dc_context *ctx,
+ dm_logger_append(&entry, "%2.2X ", hex_data[i]);
+
+ dm_logger_append(&entry, "^\n");
+- dm_helpers_dc_conn_log(ctx, &entry, event);
+
+ fail:
+ dm_logger_close(&entry);
+diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.c b/drivers/gpu/drm/amd/display/dc/basics/logger.c
+index 0001a3c..738a818 100644
+--- a/drivers/gpu/drm/amd/display/dc/basics/logger.c
++++ b/drivers/gpu/drm/amd/display/dc/basics/logger.c
+@@ -402,3 +402,4 @@ void dm_logger_close(struct log_entry *entry)
+ entry->max_buf_bytes = 0;
+ }
+ }
++
+diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
+index 8eafe1a..4ff9b2b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
++++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
+@@ -355,10 +355,6 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line);
+ /*
+ * Debug and verification hooks
+ */
+-bool dm_helpers_dc_conn_log(
+- struct dc_context *ctx,
+- struct log_entry *entry,
+- enum dc_log_type event);
+
+ void dm_dtn_log_begin(struct dc_context *ctx);
+ void dm_dtn_log_append_v(struct dc_context *ctx, const char *msg, ...);
+diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
+index d16aac7..ae2d92b 100644
+--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
++++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
+@@ -168,6 +168,7 @@ void mod_stats_dump(struct mod_stats *mod_stats)
+ struct core_stats *core_stats = NULL;
+ struct stats_time_cache *time = NULL;
+ unsigned int index = 0;
++ struct log_entry log_entry;
+
+ if (mod_stats == NULL)
+ return;
+@@ -177,17 +178,22 @@ void mod_stats_dump(struct mod_stats *mod_stats)
+ logger = dc->ctx->logger;
+ time = core_stats->time;
+
+- dm_logger_write(logger, LOG_DISPLAYSTATS, "==Display Caps==");
+- dm_logger_write(logger, LOG_DISPLAYSTATS, " ");
++ dm_logger_open(
++ dc->ctx->logger,
++ &log_entry,
++ LOG_DISPLAYSTATS);
+
+- dm_logger_write(logger, LOG_DISPLAYSTATS, "==Display Stats==");
+- dm_logger_write(logger, LOG_DISPLAYSTATS, " ");
++ dm_logger_append(&log_entry, "==Display Caps==\n");
++ dm_logger_append(&log_entry, "\n");
+
+- dm_logger_write(logger, LOG_DISPLAYSTATS,
++ dm_logger_append(&log_entry, "==Display Stats==\n");
++ dm_logger_append(&log_entry, "\n");
++
++ dm_logger_append(&log_entry,
+ "%10s %10s %10s %10s %10s"
+ " %11s %11s %17s %10s %14s"
+ " %10s %10s %10s %10s %10s"
+- " %10s %10s %10s %10s",
++ " %10s %10s %10s %10s\n",
+ "render", "avgRender",
+ "minWindow", "midPoint", "maxWindow",
+ "vsyncToFlip", "flipToVsync", "vsyncsBetweenFlip",
+@@ -197,11 +203,11 @@ void mod_stats_dump(struct mod_stats *mod_stats)
+ "vSyncTime4", "vSyncTime5", "flags");
+
+ for (int i = 0; i < core_stats->index && i < core_stats->entries; i++) {
+- dm_logger_write(logger, LOG_DISPLAYSTATS,
++ dm_logger_append(&log_entry,
+ "%10u %10u %10u %10u %10u"
+ " %11u %11u %17u %10u %14u"
+ " %10u %10u %10u %10u %10u"
+- " %10u %10u %10u %10u",
++ " %10u %10u %10u %10u\n",
+ time[i].render_time_in_us,
+ time[i].avg_render_time_in_us_last_ten,
+ time[i].min_window,
+@@ -222,6 +228,8 @@ void mod_stats_dump(struct mod_stats *mod_stats)
+ time[i].v_sync_time_in_us[4],
+ time[i].flags);
+ }
++
++ dm_logger_close(&log_entry);
+ }
+
+ void mod_stats_reset_data(struct mod_stats *mod_stats)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4469-drm-amd-display-use-macro-for-logs.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4469-drm-amd-display-use-macro-for-logs.patch
new file mode 100644
index 00000000..7becfb27
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4469-drm-amd-display-use-macro-for-logs.patch
@@ -0,0 +1,81 @@
+From 16beaec768532a6bbd9d59dde59f8dad2365b6b6 Mon Sep 17 00:00:00 2001
+From: Anthony Koo <Anthony.Koo@amd.com>
+Date: Tue, 24 Apr 2018 15:36:27 -0400
+Subject: [PATCH 4469/5725] drm/amd/display: use macro for logs
+
+Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ .../gpu/drm/amd/display/include/logger_interface.h | 9 +++++++++
+ drivers/gpu/drm/amd/display/modules/stats/stats.c | 19 ++++++-------------
+ 2 files changed, 15 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h
+index 28dee96..dc98d6d 100644
+--- a/drivers/gpu/drm/amd/display/include/logger_interface.h
++++ b/drivers/gpu/drm/amd/display/include/logger_interface.h
+@@ -190,4 +190,13 @@ void context_clock_trace(
+ } \
+ } while (0)
+
++#define DISPLAY_STATS_BEGIN(entry) \
++ dm_logger_open(dc->ctx->logger, &entry, LOG_DISPLAYSTATS)
++
++#define DISPLAY_STATS(msg, ...) \
++ dm_logger_append(&log_entry, msg, ##__VA_ARGS__)
++
++#define DISPLAY_STATS_END(entry) \
++ dm_logger_close(&entry)
++
+ #endif /* __DAL_LOGGER_INTERFACE_H__ */
+diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
+index ae2d92b..45acdbc 100644
+--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
++++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
+@@ -178,19 +178,13 @@ void mod_stats_dump(struct mod_stats *mod_stats)
+ logger = dc->ctx->logger;
+ time = core_stats->time;
+
+- dm_logger_open(
+- dc->ctx->logger,
+- &log_entry,
+- LOG_DISPLAYSTATS);
++ DISPLAY_STATS_BEGIN(log_entry);
+
+- dm_logger_append(&log_entry, "==Display Caps==\n");
+- dm_logger_append(&log_entry, "\n");
++ DISPLAY_STATS("==Display Caps==\n");
+
+- dm_logger_append(&log_entry, "==Display Stats==\n");
+- dm_logger_append(&log_entry, "\n");
++ DISPLAY_STATS("==Display Stats==\n");
+
+- dm_logger_append(&log_entry,
+- "%10s %10s %10s %10s %10s"
++ DISPLAY_STATS("%10s %10s %10s %10s %10s"
+ " %11s %11s %17s %10s %14s"
+ " %10s %10s %10s %10s %10s"
+ " %10s %10s %10s %10s\n",
+@@ -203,8 +197,7 @@ void mod_stats_dump(struct mod_stats *mod_stats)
+ "vSyncTime4", "vSyncTime5", "flags");
+
+ for (int i = 0; i < core_stats->index && i < core_stats->entries; i++) {
+- dm_logger_append(&log_entry,
+- "%10u %10u %10u %10u %10u"
++ DISPLAY_STATS("%10u %10u %10u %10u %10u"
+ " %11u %11u %17u %10u %14u"
+ " %10u %10u %10u %10u %10u"
+ " %10u %10u %10u %10u\n",
+@@ -229,7 +222,7 @@ void mod_stats_dump(struct mod_stats *mod_stats)
+ time[i].flags);
+ }
+
+- dm_logger_close(&log_entry);
++ DISPLAY_STATS_END(log_entry);
+ }
+
+ void mod_stats_reset_data(struct mod_stats *mod_stats)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4470-drm-amd-display-don-t-create-new-dc_sink-if-nothing-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4470-drm-amd-display-don-t-create-new-dc_sink-if-nothing-.patch
new file mode 100644
index 00000000..c06e641a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4470-drm-amd-display-don-t-create-new-dc_sink-if-nothing-.patch
@@ -0,0 +1,198 @@
+From a5fb0c76a967f9db863b611301ee9f236801a6ed Mon Sep 17 00:00:00 2001
+From: Samson Tam <Samson.Tam@amd.com>
+Date: Fri, 13 Apr 2018 18:38:56 -0400
+Subject: [PATCH 4470/5725] drm/amd/display: don't create new dc_sink if
+ nothing changed at detection
+
+Signed-off-by: Samson Tam <Samson.Tam@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 95 ++++++++++++++++++++++-----
+ 1 file changed, 77 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index ea5d5ff..2fa5218 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -469,6 +469,13 @@ static void link_disconnect_sink(struct dc_link *link)
+ link->dpcd_sink_count = 0;
+ }
+
++static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *link)
++{
++ dc_sink_release(link->local_sink);
++ link->local_sink = prev_sink;
++}
++
++
+ static bool detect_dp(
+ struct dc_link *link,
+ struct display_sink_capability *sink_caps,
+@@ -551,6 +558,17 @@ static bool detect_dp(
+ return true;
+ }
+
++static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid)
++{
++ if (old_edid->length != new_edid->length)
++ return false;
++
++ if (new_edid->length == 0)
++ return false;
++
++ return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0);
++}
++
+ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+ {
+ struct dc_sink_init_data sink_init_data = { 0 };
+@@ -558,9 +576,13 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+ uint8_t i;
+ bool converter_disable_audio = false;
+ struct audio_support *aud_support = &link->dc->res_pool->audio_support;
++ bool same_edid = false;
+ enum dc_edid_status edid_status;
+ struct dc_context *dc_ctx = link->ctx;
+ struct dc_sink *sink = NULL;
++ struct dc_sink *prev_sink = NULL;
++ struct dpcd_caps prev_dpcd_caps;
++ bool same_dpcd = true;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ DC_LOGGER_INIT(link->ctx->logger);
+ if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
+@@ -575,6 +597,11 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+ link->local_sink)
+ return true;
+
++ prev_sink = link->local_sink;
++ if (prev_sink != NULL) {
++ dc_sink_retain(prev_sink);
++ memcpy(&prev_dpcd_caps, &link->dpcd_caps, sizeof(struct dpcd_caps));
++ }
+ link_disconnect_sink(link);
+
+ if (new_connection_type != dc_connection_none) {
+@@ -616,14 +643,25 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+ link,
+ &sink_caps,
+ &converter_disable_audio,
+- aud_support, reason))
++ aud_support, reason)) {
++ if (prev_sink != NULL)
++ dc_sink_release(prev_sink);
+ return false;
++ }
+
++ // Check if dpcp block is the same
++ if (prev_sink != NULL) {
++ if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, sizeof(struct dpcd_caps)))
++ same_dpcd = false;
++ }
+ /* Active dongle downstream unplug */
+ if (link->type == dc_connection_active_dongle
+ && link->dpcd_caps.sink_count.
+- bits.SINK_COUNT == 0)
++ bits.SINK_COUNT == 0) {
++ if (prev_sink != NULL)
++ dc_sink_release(prev_sink);
+ return true;
++ }
+
+ if (link->type == dc_connection_mst_branch) {
+ LINK_INFO("link=%d, mst branch is now Connected\n",
+@@ -634,6 +672,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+ * pbn_per_slot value leading to exception on dc_fixpt_div()
+ */
+ link->verified_link_cap = link->reported_link_cap;
++ if (prev_sink != NULL)
++ dc_sink_release(prev_sink);
+ return false;
+ }
+
+@@ -643,6 +683,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+ default:
+ DC_ERROR("Invalid connector type! signal:%d\n",
+ link->connector_signal);
++ if (prev_sink != NULL)
++ dc_sink_release(prev_sink);
+ return false;
+ } /* switch() */
+
+@@ -665,6 +707,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+ sink = dc_sink_create(&sink_init_data);
+ if (!sink) {
+ DC_ERROR("Failed to create sink!\n");
++ if (prev_sink != NULL)
++ dc_sink_release(prev_sink);
+ return false;
+ }
+
+@@ -688,22 +732,33 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+ break;
+ }
+
+- if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+- sink_caps.transaction_type ==
+- DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
+- /*
+- * TODO debug why Dell 2413 doesn't like
+- * two link trainings
+- */
++ // Check if edid is the same
++ if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK)))
++ same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
+
+- /* deal with non-mst cases */
+- dp_hbr_verify_link_cap(link, &link->reported_link_cap);
+- }
++ // If both edid and dpcd are the same, then discard new sink and revert back to original sink
++ if ((same_edid) && (same_dpcd)) {
++ link_disconnect_remap(prev_sink, link);
++ sink = prev_sink;
++ prev_sink = NULL;
++ } else {
++ if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
++ sink_caps.transaction_type ==
++ DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
++ /*
++ * TODO debug why Dell 2413 doesn't like
++ * two link trainings
++ */
++
++ /* deal with non-mst cases */
++ dp_hbr_verify_link_cap(link, &link->reported_link_cap);
++ }
+
+- /* HDMI-DVI Dongle */
+- if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
+- !sink->edid_caps.edid_hdmi)
+- sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
++ /* HDMI-DVI Dongle */
++ if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
++ !sink->edid_caps.edid_hdmi)
++ sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
++ }
+
+ /* Connectivity log: detection */
+ for (i = 0; i < sink->dc_edid.length / EDID_BLOCK_SIZE; i++) {
+@@ -762,10 +817,14 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+ sink_caps.signal = SIGNAL_TYPE_NONE;
+ }
+
+- LINK_INFO("link=%d, dc_sink_in=%p is now %s\n",
++ LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p dpcd same=%d edid same=%d\n",
+ link->link_index, sink,
+ (sink_caps.signal == SIGNAL_TYPE_NONE ?
+- "Disconnected":"Connected"));
++ "Disconnected":"Connected"), prev_sink,
++ same_dpcd, same_edid);
++
++ if (prev_sink != NULL)
++ dc_sink_release(prev_sink);
+
+ return true;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4471-drm-amd-display-Only-limit-VSR-downscaling-when-actu.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4471-drm-amd-display-Only-limit-VSR-downscaling-when-actu.patch
new file mode 100644
index 00000000..e3f750f4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4471-drm-amd-display-Only-limit-VSR-downscaling-when-actu.patch
@@ -0,0 +1,49 @@
+From 72a06baca99adf700cba16e1adbfaed9eec17219 Mon Sep 17 00:00:00 2001
+From: Xingyue Tao <xingyue.tao@amd.com>
+Date: Thu, 19 Apr 2018 16:23:12 -0400
+Subject: [PATCH 4471/5725] drm/amd/display: Only limit VSR downscaling when
+ actually downscaling
+
+Signed-off-by: Xingyue Tao <xingyue.tao@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | 21 ++++++++++-----------
+ 1 file changed, 10 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+index 2da1389..46a35c7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+@@ -145,18 +145,17 @@ bool dpp_get_optimal_number_of_taps(
+ else
+ pixel_width = scl_data->viewport.width;
+
++ /* Some ASICs does not support FP16 scaling, so we reject modes require this*/
+ if (scl_data->viewport.width != scl_data->h_active &&
+- scl_data->viewport.height != scl_data->v_active) {
+-
+- /* Some ASICs does not support FP16 scaling, so we reject modes require this*/
+- if (dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
+- scl_data->format == PIXEL_FORMAT_FP16)
+- return false;
+-
+- if (dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
+- scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
+- return false;
+- }
++ scl_data->viewport.height != scl_data->v_active &&
++ dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
++ scl_data->format == PIXEL_FORMAT_FP16)
++ return false;
++
++ if (scl_data->viewport.width > scl_data->h_active &&
++ dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
++ scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
++ return false;
+
+ /* TODO: add lb check */
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4472-drm-amd-display-constify-a-few-dc_surface_update-fie.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4472-drm-amd-display-constify-a-few-dc_surface_update-fie.patch
new file mode 100644
index 00000000..dbd32197
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4472-drm-amd-display-constify-a-few-dc_surface_update-fie.patch
@@ -0,0 +1,46 @@
+From e19b6b3366904b89270657f6bb6d8b18359dca6f Mon Sep 17 00:00:00 2001
+From: Jun Lei <Jun.Lei@amd.com>
+Date: Thu, 26 Apr 2018 10:24:25 -0400
+Subject: [PATCH 4472/5725] drm/amd/display: constify a few dc_surface_update
+ fields
+
+Signed-off-by: Jun Lei <Jun.Lei@amd.com>
+Reviewed-by: Jun Lei <Jun.Lei@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index a3e2851..85edb34 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -503,18 +503,18 @@ struct dc_surface_update {
+ struct dc_plane_state *surface;
+
+ /* isr safe update parameters. null means no updates */
+- struct dc_flip_addrs *flip_addr;
+- struct dc_plane_info *plane_info;
+- struct dc_scaling_info *scaling_info;
++ const struct dc_flip_addrs *flip_addr;
++ const struct dc_plane_info *plane_info;
++ const struct dc_scaling_info *scaling_info;
+
+ /* following updates require alloc/sleep/spin that is not isr safe,
+ * null means no updates
+ */
+- struct dc_gamma *gamma;
+- struct dc_transfer_func *in_transfer_func;
++ const struct dc_gamma *gamma;
++ const struct dc_transfer_func *in_transfer_func;
+
+- struct dc_csc_transform *input_csc_color_matrix;
+- struct fixed31_32 *coeff_reduction_factor;
++ const struct dc_csc_transform *input_csc_color_matrix;
++ const struct fixed31_32 *coeff_reduction_factor;
+ };
+
+ /*
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4473-drm-amd-display-Add-fullscreen-transitions-to-log.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4473-drm-amd-display-Add-fullscreen-transitions-to-log.patch
new file mode 100644
index 00000000..6276c931
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4473-drm-amd-display-Add-fullscreen-transitions-to-log.patch
@@ -0,0 +1,250 @@
+From 2a956fb9850033bf9f0d01a393f47e5baf5c1f4c Mon Sep 17 00:00:00 2001
+From: Anthony Koo <Anthony.Koo@amd.com>
+Date: Thu, 26 Apr 2018 10:03:44 -0400
+Subject: [PATCH 4473/5725] drm/amd/display: Add fullscreen transitions to log
+
+Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ .../gpu/drm/amd/display/modules/inc/mod_stats.h | 4 +
+ drivers/gpu/drm/amd/display/modules/stats/stats.c | 137 +++++++++++++++++----
+ 2 files changed, 114 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
+index 3230e2a..3812094 100644
+--- a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
++++ b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
+@@ -46,6 +46,10 @@ void mod_stats_dump(struct mod_stats *mod_stats);
+
+ void mod_stats_reset_data(struct mod_stats *mod_stats);
+
++void mod_stats_update_event(struct mod_stats *mod_stats,
++ char *event_string,
++ unsigned int length);
++
+ void mod_stats_update_flip(struct mod_stats *mod_stats,
+ unsigned long timestamp_in_ns);
+
+diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
+index 45acdbc..4b00bae 100644
+--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
++++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
+@@ -36,9 +36,14 @@
+ #define DAL_STATS_ENTRIES_REGKEY_DEFAULT 0x00350000
+ #define DAL_STATS_ENTRIES_REGKEY_MAX 0x01000000
+
++#define DAL_STATS_EVENT_ENTRIES_DEFAULT 0x00000100
++
+ #define MOD_STATS_NUM_VSYNCS 5
++#define MOD_STATS_EVENT_STRING_MAX 512
+
+ struct stats_time_cache {
++ unsigned int entry_id;
++
+ unsigned long flip_timestamp_in_ns;
+ unsigned long vupdate_timestamp_in_ns;
+
+@@ -63,15 +68,26 @@ struct stats_time_cache {
+ unsigned int flags;
+ };
+
++struct stats_event_cache {
++ unsigned int entry_id;
++ char event_string[MOD_STATS_EVENT_STRING_MAX];
++};
++
+ struct core_stats {
+ struct mod_stats public;
+ struct dc *dc;
+
++ bool enabled;
++ unsigned int entries;
++ unsigned int event_entries;
++ unsigned int entry_id;
++
+ struct stats_time_cache *time;
+ unsigned int index;
+
+- bool enabled;
+- unsigned int entries;
++ struct stats_event_cache *events;
++ unsigned int event_index;
++
+ };
+
+ #define MOD_STATS_TO_CORE(mod_stats)\
+@@ -125,9 +141,18 @@ struct mod_stats *mod_stats_create(struct dc *dc)
+ else
+ core_stats->entries = reg_data;
+ }
++ core_stats->time = kzalloc(
++ sizeof(struct stats_time_cache) *
++ core_stats->entries,
++ GFP_KERNEL);
+
+- core_stats->time = kzalloc(sizeof(struct stats_time_cache) * core_stats->entries,
++
++ core_stats->event_entries = DAL_STATS_EVENT_ENTRIES_DEFAULT;
++ core_stats->events = kzalloc(
++ sizeof(struct stats_event_cache) *
++ core_stats->event_entries,
+ GFP_KERNEL);
++
+ } else {
+ core_stats->entries = 0;
+ }
+@@ -139,6 +164,10 @@ struct mod_stats *mod_stats_create(struct dc *dc)
+ * handle calculation cases that depend on previous flip data.
+ */
+ core_stats->index = 1;
++ core_stats->event_index = 0;
++
++ // Keeps track of ordering within the different stats structures
++ core_stats->entry_id = 0;
+
+ return &core_stats->public;
+
+@@ -167,6 +196,9 @@ void mod_stats_dump(struct mod_stats *mod_stats)
+ struct dal_logger *logger = NULL;
+ struct core_stats *core_stats = NULL;
+ struct stats_time_cache *time = NULL;
++ struct stats_event_cache *events = NULL;
++ unsigned int time_index = 1;
++ unsigned int event_index = 0;
+ unsigned int index = 0;
+ struct log_entry log_entry;
+
+@@ -177,6 +209,7 @@ void mod_stats_dump(struct mod_stats *mod_stats)
+ dc = core_stats->dc;
+ logger = dc->ctx->logger;
+ time = core_stats->time;
++ events = core_stats->events;
+
+ DISPLAY_STATS_BEGIN(log_entry);
+
+@@ -196,30 +229,39 @@ void mod_stats_dump(struct mod_stats *mod_stats)
+ "vSyncTime1", "vSyncTime2", "vSyncTime3",
+ "vSyncTime4", "vSyncTime5", "flags");
+
+- for (int i = 0; i < core_stats->index && i < core_stats->entries; i++) {
+- DISPLAY_STATS("%10u %10u %10u %10u %10u"
+- " %11u %11u %17u %10u %14u"
+- " %10u %10u %10u %10u %10u"
+- " %10u %10u %10u %10u\n",
+- time[i].render_time_in_us,
+- time[i].avg_render_time_in_us_last_ten,
+- time[i].min_window,
+- time[i].lfc_mid_point_in_us,
+- time[i].max_window,
+- time[i].vsync_to_flip_time_in_us,
+- time[i].flip_to_vsync_time_in_us,
+- time[i].num_vsync_between_flips,
+- time[i].num_frames_inserted,
+- time[i].inserted_duration_in_us,
+- time[i].v_total_min,
+- time[i].v_total_max,
+- time[i].event_triggers,
+- time[i].v_sync_time_in_us[0],
+- time[i].v_sync_time_in_us[1],
+- time[i].v_sync_time_in_us[2],
+- time[i].v_sync_time_in_us[3],
+- time[i].v_sync_time_in_us[4],
+- time[i].flags);
++ for (int i = 0; i < core_stats->entry_id; i++) {
++ if (event_index < core_stats->event_index &&
++ i == events[event_index].entry_id) {
++ DISPLAY_STATS("%s\n", events[event_index].event_string);
++ event_index++;
++ } else if (time_index < core_stats->index &&
++ i == time[time_index].entry_id) {
++ DISPLAY_STATS("%10u %10u %10u %10u %10u"
++ " %11u %11u %17u %10u %14u"
++ " %10u %10u %10u %10u %10u"
++ " %10u %10u %10u %10u\n",
++ time[time_index].render_time_in_us,
++ time[time_index].avg_render_time_in_us_last_ten,
++ time[time_index].min_window,
++ time[time_index].lfc_mid_point_in_us,
++ time[time_index].max_window,
++ time[time_index].vsync_to_flip_time_in_us,
++ time[time_index].flip_to_vsync_time_in_us,
++ time[time_index].num_vsync_between_flips,
++ time[time_index].num_frames_inserted,
++ time[time_index].inserted_duration_in_us,
++ time[time_index].v_total_min,
++ time[time_index].v_total_max,
++ time[time_index].event_triggers,
++ time[time_index].v_sync_time_in_us[0],
++ time[time_index].v_sync_time_in_us[1],
++ time[time_index].v_sync_time_in_us[2],
++ time[time_index].v_sync_time_in_us[3],
++ time[time_index].v_sync_time_in_us[4],
++ time[time_index].flags);
++
++ time_index++;
++ }
+ }
+
+ DISPLAY_STATS_END(log_entry);
+@@ -239,7 +281,46 @@ void mod_stats_reset_data(struct mod_stats *mod_stats)
+ memset(core_stats->time, 0,
+ sizeof(struct stats_time_cache) * core_stats->entries);
+
++ memset(core_stats->events, 0,
++ sizeof(struct stats_event_cache) * core_stats->event_entries);
++
+ core_stats->index = 1;
++ core_stats->event_index = 0;
++
++ // Keeps track of ordering within the different stats structures
++ core_stats->entry_id = 0;
++}
++
++void mod_stats_update_event(struct mod_stats *mod_stats,
++ char *event_string,
++ unsigned int length)
++{
++ struct core_stats *core_stats = NULL;
++ struct stats_event_cache *events = NULL;
++ unsigned int index = 0;
++ unsigned int copy_length = 0;
++
++ if (mod_stats == NULL)
++ return;
++
++ core_stats = MOD_STATS_TO_CORE(mod_stats);
++
++ if (core_stats->index >= core_stats->entries)
++ return;
++
++ events = core_stats->events;
++ index = core_stats->event_index;
++
++ copy_length = length;
++ if (length > MOD_STATS_EVENT_STRING_MAX)
++ copy_length = MOD_STATS_EVENT_STRING_MAX;
++
++ memcpy(&events[index].event_string, event_string, copy_length);
++ events[index].event_string[copy_length - 1] = '\0';
++
++ events[index].entry_id = core_stats->entry_id;
++ core_stats->event_index++;
++ core_stats->entry_id++;
+ }
+
+ void mod_stats_update_flip(struct mod_stats *mod_stats,
+@@ -280,7 +361,9 @@ void mod_stats_update_flip(struct mod_stats *mod_stats,
+ (timestamp_in_ns -
+ time[index - 1].vupdate_timestamp_in_ns) / 1000;
+
++ time[index].entry_id = core_stats->entry_id;
+ core_stats->index++;
++ core_stats->entry_id++;
+ }
+
+ void mod_stats_update_vupdate(struct mod_stats *mod_stats,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4474-drm-amd-display-fix-bug-with-index-check.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4474-drm-amd-display-fix-bug-with-index-check.patch
new file mode 100644
index 00000000..c69f8d78
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4474-drm-amd-display-fix-bug-with-index-check.patch
@@ -0,0 +1,28 @@
+From 3459fb42908937c9353f3ccb28ad96ca61677458 Mon Sep 17 00:00:00 2001
+From: Anthony Koo <Anthony.Koo@amd.com>
+Date: Fri, 27 Apr 2018 15:23:23 -0400
+Subject: [PATCH 4474/5725] drm/amd/display: fix bug with index check
+
+Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/modules/stats/stats.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
+index 4b00bae..fe9e4b3 100644
+--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
++++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
+@@ -305,7 +305,7 @@ void mod_stats_update_event(struct mod_stats *mod_stats,
+
+ core_stats = MOD_STATS_TO_CORE(mod_stats);
+
+- if (core_stats->index >= core_stats->entries)
++ if (core_stats->event_index >= core_stats->event_entries)
+ return;
+
+ events = core_stats->events;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4475-drm-amd-display-Clear-underflow-status-for-debug-pur.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4475-drm-amd-display-Clear-underflow-status-for-debug-pur.patch
new file mode 100644
index 00000000..b28647e3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4475-drm-amd-display-Clear-underflow-status-for-debug-pur.patch
@@ -0,0 +1,39 @@
+From fe08223921db486e3639e0d57c91a660c3d8dad3 Mon Sep 17 00:00:00 2001
+From: Nikola Cornij <nikola.cornij@amd.com>
+Date: Fri, 27 Apr 2018 17:26:25 -0400
+Subject: [PATCH 4475/5725] drm/amd/display: Clear underflow status for debug
+ purposes
+
+We want to keep underflow sticky bit on for the longevity tests outside
+of test environment. For debug purposes it is, however, useful to clear
+underflow status after the test that caused it so that the following
+tests are not affected. This change fullfils both requirements by clearing
+the underflow only from within Windows or Diags test environment.
+
+Signed-off-by: Nikola Cornij <nikola.cornij@amd.com>
+Reviewed-by: Nikola Cornij <Nikola.Cornij@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 400d0ca..c40993a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -326,6 +326,12 @@ void dcn10_log_hw_state(struct dc *dc)
+ s.h_total,
+ s.v_total,
+ s.underflow_occurred_status);
++
++ // Clear underflow for debug purposes
++ // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
++ // This function is called only from Windows or Diags test environment, hence it's safe to clear
++ // it from here without affecting the original intent.
++ tg->funcs->clear_optc_underflow(tg);
+ }
+ DTN_INFO("\n");
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4476-drm-amd-display-DCN1-link-encoder.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4476-drm-amd-display-DCN1-link-encoder.patch
new file mode 100644
index 00000000..98999423
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4476-drm-amd-display-DCN1-link-encoder.patch
@@ -0,0 +1,1852 @@
+From 1db0a6ae01121409e272cc1659c50de7e11a7f14 Mon Sep 17 00:00:00 2001
+From: Eric Bernstein <eric.bernstein@amd.com>
+Date: Thu, 26 Apr 2018 14:06:00 -0400
+Subject: [PATCH 4476/5725] drm/amd/display: DCN1 link encoder
+
+Create DCN1 link encoder files and update AUX and HPD register access.
+
+Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c | 2 -
+ drivers/gpu/drm/amd/display/dc/dcn10/Makefile | 2 +-
+ .../drm/amd/display/dc/dcn10/dcn10_link_encoder.c | 1362 ++++++++++++++++++++
+ .../drm/amd/display/dc/dcn10/dcn10_link_encoder.h | 330 +++++
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 43 +-
+ 5 files changed, 1716 insertions(+), 23 deletions(-)
+ create mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+ create mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+index 7c866a7..82cd1d6 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+@@ -11,8 +11,6 @@
+ #include "dc_link_dp.h"
+ #include "dc_link_ddc.h"
+ #include "dm_helpers.h"
+-#include "dce/dce_link_encoder.h"
+-#include "dce/dce_stream_encoder.h"
+ #include "dpcd_defs.h"
+
+ enum dc_status core_link_read_dpcd(
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+index bb5a130..c08137e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+@@ -5,7 +5,7 @@ DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
+ dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
+ dcn10_hubp.o dcn10_mpc.o \
+ dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
+- dcn10_hubbub.o dcn10_stream_encoder.o
++ dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o
+
+ AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10))
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+new file mode 100644
+index 0000000..21fa40a
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+@@ -0,0 +1,1362 @@
++/*
++ * Copyright 2012-15 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#include "reg_helper.h"
++
++#include "core_types.h"
++#include "link_encoder.h"
++#include "dcn10_link_encoder.h"
++#include "stream_encoder.h"
++#include "i2caux_interface.h"
++#include "dc_bios_types.h"
++
++#include "gpio_service_interface.h"
++
++#define CTX \
++ enc10->base.ctx
++#define DC_LOGGER \
++ enc10->base.ctx->logger
++
++#define REG(reg)\
++ (enc10->link_regs->reg)
++
++#undef FN
++#define FN(reg_name, field_name) \
++ enc10->link_shift->field_name, enc10->link_mask->field_name
++
++
++/*
++ * @brief
++ * Trigger Source Select
++ * ASIC-dependent, actual values for register programming
++ */
++#define DCN10_DIG_FE_SOURCE_SELECT_INVALID 0x0
++#define DCN10_DIG_FE_SOURCE_SELECT_DIGA 0x1
++#define DCN10_DIG_FE_SOURCE_SELECT_DIGB 0x2
++#define DCN10_DIG_FE_SOURCE_SELECT_DIGC 0x4
++#define DCN10_DIG_FE_SOURCE_SELECT_DIGD 0x08
++#define DCN10_DIG_FE_SOURCE_SELECT_DIGE 0x10
++#define DCN10_DIG_FE_SOURCE_SELECT_DIGF 0x20
++#define DCN10_DIG_FE_SOURCE_SELECT_DIGG 0x40
++
++enum {
++ DP_MST_UPDATE_MAX_RETRY = 50
++};
++
++
++
++static void aux_initialize(struct dcn10_link_encoder *enc10);
++
++
++static const struct link_encoder_funcs dcn10_lnk_enc_funcs = {
++ .validate_output_with_stream =
++ dcn10_link_encoder_validate_output_with_stream,
++ .hw_init = dcn10_link_encoder_hw_init,
++ .setup = dcn10_link_encoder_setup,
++ .enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
++ .enable_dp_output = dcn10_link_encoder_enable_dp_output,
++ .enable_dp_mst_output = dcn10_link_encoder_enable_dp_mst_output,
++ .disable_output = dcn10_link_encoder_disable_output,
++ .dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings,
++ .dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern,
++ .update_mst_stream_allocation_table =
++ dcn10_link_encoder_update_mst_stream_allocation_table,
++ .psr_program_dp_dphy_fast_training =
++ dcn10_psr_program_dp_dphy_fast_training,
++ .psr_program_secondary_packet = dcn10_psr_program_secondary_packet,
++ .connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe,
++ .enable_hpd = dcn10_link_encoder_enable_hpd,
++ .disable_hpd = dcn10_link_encoder_disable_hpd,
++ .is_dig_enabled = dcn10_is_dig_enabled,
++ .destroy = dcn10_link_encoder_destroy
++};
++
++static enum bp_result link_transmitter_control(
++ struct dcn10_link_encoder *enc10,
++ struct bp_transmitter_control *cntl)
++{
++ enum bp_result result;
++ struct dc_bios *bp = enc10->base.ctx->dc_bios;
++
++ result = bp->funcs->transmitter_control(bp, cntl);
++
++ return result;
++}
++
++static void enable_phy_bypass_mode(
++ struct dcn10_link_encoder *enc10,
++ bool enable)
++{
++ /* This register resides in DP back end block;
++ * transmitter is used for the offset
++ */
++ REG_UPDATE(DP_DPHY_CNTL, DPHY_BYPASS, enable);
++
++}
++
++static void disable_prbs_symbols(
++ struct dcn10_link_encoder *enc10,
++ bool disable)
++{
++ /* This register resides in DP back end block;
++ * transmitter is used for the offset
++ */
++ REG_UPDATE_4(DP_DPHY_CNTL,
++ DPHY_ATEST_SEL_LANE0, disable,
++ DPHY_ATEST_SEL_LANE1, disable,
++ DPHY_ATEST_SEL_LANE2, disable,
++ DPHY_ATEST_SEL_LANE3, disable);
++}
++
++static void disable_prbs_mode(
++ struct dcn10_link_encoder *enc10)
++{
++ REG_UPDATE(DP_DPHY_PRBS_CNTL, DPHY_PRBS_EN, 0);
++}
++
++static void program_pattern_symbols(
++ struct dcn10_link_encoder *enc10,
++ uint16_t pattern_symbols[8])
++{
++ /* This register resides in DP back end block;
++ * transmitter is used for the offset
++ */
++ REG_SET_3(DP_DPHY_SYM0, 0,
++ DPHY_SYM1, pattern_symbols[0],
++ DPHY_SYM2, pattern_symbols[1],
++ DPHY_SYM3, pattern_symbols[2]);
++
++ /* This register resides in DP back end block;
++ * transmitter is used for the offset
++ */
++ REG_SET_3(DP_DPHY_SYM1, 0,
++ DPHY_SYM4, pattern_symbols[3],
++ DPHY_SYM5, pattern_symbols[4],
++ DPHY_SYM6, pattern_symbols[5]);
++
++ /* This register resides in DP back end block;
++ * transmitter is used for the offset
++ */
++ REG_SET_2(DP_DPHY_SYM2, 0,
++ DPHY_SYM7, pattern_symbols[6],
++ DPHY_SYM8, pattern_symbols[7]);
++}
++
++static void set_dp_phy_pattern_d102(
++ struct dcn10_link_encoder *enc10)
++{
++ /* Disable PHY Bypass mode to setup the test pattern */
++ enable_phy_bypass_mode(enc10, false);
++
++ /* For 10-bit PRBS or debug symbols
++ * please use the following sequence:
++ *
++ * Enable debug symbols on the lanes
++ */
++ disable_prbs_symbols(enc10, true);
++
++ /* Disable PRBS mode */
++ disable_prbs_mode(enc10);
++
++ /* Program debug symbols to be output */
++ {
++ uint16_t pattern_symbols[8] = {
++ 0x2AA, 0x2AA, 0x2AA, 0x2AA,
++ 0x2AA, 0x2AA, 0x2AA, 0x2AA
++ };
++
++ program_pattern_symbols(enc10, pattern_symbols);
++ }
++
++ /* Enable phy bypass mode to enable the test pattern */
++
++ enable_phy_bypass_mode(enc10, true);
++}
++
++static void set_link_training_complete(
++ struct dcn10_link_encoder *enc10,
++ bool complete)
++{
++ /* This register resides in DP back end block;
++ * transmitter is used for the offset
++ */
++ REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, complete);
++
++}
++
++void dcn10_link_encoder_set_dp_phy_pattern_training_pattern(
++ struct link_encoder *enc,
++ uint32_t index)
++{
++ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
++ /* Write Training Pattern */
++
++ REG_WRITE(DP_DPHY_TRAINING_PATTERN_SEL, index);
++
++ /* Set HW Register Training Complete to false */
++
++ set_link_training_complete(enc10, false);
++
++ /* Disable PHY Bypass mode to output Training Pattern */
++
++ enable_phy_bypass_mode(enc10, false);
++
++ /* Disable PRBS mode */
++ disable_prbs_mode(enc10);
++}
++
++static void setup_panel_mode(
++ struct dcn10_link_encoder *enc10,
++ enum dp_panel_mode panel_mode)
++{
++ uint32_t value;
++
++ ASSERT(REG(DP_DPHY_INTERNAL_CTRL));
++ value = REG_READ(DP_DPHY_INTERNAL_CTRL);
++
++ switch (panel_mode) {
++ case DP_PANEL_MODE_EDP:
++ value = 0x1;
++ break;
++ case DP_PANEL_MODE_SPECIAL:
++ value = 0x11;
++ break;
++ default:
++ value = 0x0;
++ break;
++ }
++
++ REG_WRITE(DP_DPHY_INTERNAL_CTRL, value);
++}
++
++static void set_dp_phy_pattern_symbol_error(
++ struct dcn10_link_encoder *enc10)
++{
++ /* Disable PHY Bypass mode to setup the test pattern */
++ enable_phy_bypass_mode(enc10, false);
++
++ /* program correct panel mode*/
++ setup_panel_mode(enc10, DP_PANEL_MODE_DEFAULT);
++
++ /* A PRBS23 pattern is used for most DP electrical measurements. */
++
++ /* Enable PRBS symbols on the lanes */
++ disable_prbs_symbols(enc10, false);
++
++ /* For PRBS23 Set bit DPHY_PRBS_SEL=1 and Set bit DPHY_PRBS_EN=1 */
++ REG_UPDATE_2(DP_DPHY_PRBS_CNTL,
++ DPHY_PRBS_SEL, 1,
++ DPHY_PRBS_EN, 1);
++
++ /* Enable phy bypass mode to enable the test pattern */
++ enable_phy_bypass_mode(enc10, true);
++}
++
++static void set_dp_phy_pattern_prbs7(
++ struct dcn10_link_encoder *enc10)
++{
++ /* Disable PHY Bypass mode to setup the test pattern */
++ enable_phy_bypass_mode(enc10, false);
++
++ /* A PRBS7 pattern is used for most DP electrical measurements. */
++
++ /* Enable PRBS symbols on the lanes */
++ disable_prbs_symbols(enc10, false);
++
++ /* For PRBS7 Set bit DPHY_PRBS_SEL=0 and Set bit DPHY_PRBS_EN=1 */
++ REG_UPDATE_2(DP_DPHY_PRBS_CNTL,
++ DPHY_PRBS_SEL, 0,
++ DPHY_PRBS_EN, 1);
++
++ /* Enable phy bypass mode to enable the test pattern */
++ enable_phy_bypass_mode(enc10, true);
++}
++
++static void set_dp_phy_pattern_80bit_custom(
++ struct dcn10_link_encoder *enc10,
++ const uint8_t *pattern)
++{
++ /* Disable PHY Bypass mode to setup the test pattern */
++ enable_phy_bypass_mode(enc10, false);
++
++ /* Enable debug symbols on the lanes */
++
++ disable_prbs_symbols(enc10, true);
++
++ /* Enable PHY bypass mode to enable the test pattern */
++ /* TODO is it really needed ? */
++
++ enable_phy_bypass_mode(enc10, true);
++
++ /* Program 80 bit custom pattern */
++ {
++ uint16_t pattern_symbols[8];
++
++ pattern_symbols[0] =
++ ((pattern[1] & 0x03) << 8) | pattern[0];
++ pattern_symbols[1] =
++ ((pattern[2] & 0x0f) << 6) | ((pattern[1] >> 2) & 0x3f);
++ pattern_symbols[2] =
++ ((pattern[3] & 0x3f) << 4) | ((pattern[2] >> 4) & 0x0f);
++ pattern_symbols[3] =
++ (pattern[4] << 2) | ((pattern[3] >> 6) & 0x03);
++ pattern_symbols[4] =
++ ((pattern[6] & 0x03) << 8) | pattern[5];
++ pattern_symbols[5] =
++ ((pattern[7] & 0x0f) << 6) | ((pattern[6] >> 2) & 0x3f);
++ pattern_symbols[6] =
++ ((pattern[8] & 0x3f) << 4) | ((pattern[7] >> 4) & 0x0f);
++ pattern_symbols[7] =
++ (pattern[9] << 2) | ((pattern[8] >> 6) & 0x03);
++
++ program_pattern_symbols(enc10, pattern_symbols);
++ }
++
++ /* Enable phy bypass mode to enable the test pattern */
++
++ enable_phy_bypass_mode(enc10, true);
++}
++
++static void set_dp_phy_pattern_hbr2_compliance_cp2520_2(
++ struct dcn10_link_encoder *enc10,
++ unsigned int cp2520_pattern)
++{
++
++ /* previously there is a register DP_HBR2_EYE_PATTERN
++ * that is enabled to get the pattern.
++ * But it does not work with the latest spec change,
++ * so we are programming the following registers manually.
++ *
++ * The following settings have been confirmed
++ * by Nick Chorney and Sandra Liu
++ */
++
++ /* Disable PHY Bypass mode to setup the test pattern */
++
++ enable_phy_bypass_mode(enc10, false);
++
++ /* Setup DIG encoder in DP SST mode */
++ enc10->base.funcs->setup(&enc10->base, SIGNAL_TYPE_DISPLAY_PORT);
++
++ /* ensure normal panel mode. */
++ setup_panel_mode(enc10, DP_PANEL_MODE_DEFAULT);
++
++ /* no vbid after BS (SR)
++ * DP_LINK_FRAMING_CNTL changed history Sandra Liu
++ * 11000260 / 11000104 / 110000FC
++ */
++ REG_UPDATE_3(DP_LINK_FRAMING_CNTL,
++ DP_IDLE_BS_INTERVAL, 0xFC,
++ DP_VBID_DISABLE, 1,
++ DP_VID_ENHANCED_FRAME_MODE, 1);
++
++ /* swap every BS with SR */
++ REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_BS_COUNT, 0);
++
++ /* select cp2520 patterns */
++ if (REG(DP_DPHY_HBR2_PATTERN_CONTROL))
++ REG_UPDATE(DP_DPHY_HBR2_PATTERN_CONTROL,
++ DP_DPHY_HBR2_PATTERN_CONTROL, cp2520_pattern);
++ else
++ /* pre-DCE11 can only generate CP2520 pattern 2 */
++ ASSERT(cp2520_pattern == 2);
++
++ /* set link training complete */
++ set_link_training_complete(enc10, true);
++
++ /* disable video stream */
++ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
++
++ /* Disable PHY Bypass mode to setup the test pattern */
++ enable_phy_bypass_mode(enc10, false);
++}
++
++static void set_dp_phy_pattern_passthrough_mode(
++ struct dcn10_link_encoder *enc10,
++ enum dp_panel_mode panel_mode)
++{
++ /* program correct panel mode */
++ setup_panel_mode(enc10, panel_mode);
++
++ /* restore LINK_FRAMING_CNTL and DPHY_SCRAMBLER_BS_COUNT
++ * in case we were doing HBR2 compliance pattern before
++ */
++ REG_UPDATE_3(DP_LINK_FRAMING_CNTL,
++ DP_IDLE_BS_INTERVAL, 0x2000,
++ DP_VBID_DISABLE, 0,
++ DP_VID_ENHANCED_FRAME_MODE, 1);
++
++ REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_BS_COUNT, 0x1FF);
++
++ /* set link training complete */
++ set_link_training_complete(enc10, true);
++
++ /* Disable PHY Bypass mode to setup the test pattern */
++ enable_phy_bypass_mode(enc10, false);
++
++ /* Disable PRBS mode */
++ disable_prbs_mode(enc10);
++}
++
++/* return value is bit-vector */
++static uint8_t get_frontend_source(
++ enum engine_id engine)
++{
++ switch (engine) {
++ case ENGINE_ID_DIGA:
++ return DCN10_DIG_FE_SOURCE_SELECT_DIGA;
++ case ENGINE_ID_DIGB:
++ return DCN10_DIG_FE_SOURCE_SELECT_DIGB;
++ case ENGINE_ID_DIGC:
++ return DCN10_DIG_FE_SOURCE_SELECT_DIGC;
++ case ENGINE_ID_DIGD:
++ return DCN10_DIG_FE_SOURCE_SELECT_DIGD;
++ case ENGINE_ID_DIGE:
++ return DCN10_DIG_FE_SOURCE_SELECT_DIGE;
++ case ENGINE_ID_DIGF:
++ return DCN10_DIG_FE_SOURCE_SELECT_DIGF;
++ case ENGINE_ID_DIGG:
++ return DCN10_DIG_FE_SOURCE_SELECT_DIGG;
++ default:
++ ASSERT_CRITICAL(false);
++ return DCN10_DIG_FE_SOURCE_SELECT_INVALID;
++ }
++}
++
++static void configure_encoder(
++ struct dcn10_link_encoder *enc10,
++ const struct dc_link_settings *link_settings)
++{
++ /* set number of lanes */
++
++ REG_SET(DP_CONFIG, 0,
++ DP_UDI_LANES, link_settings->lane_count - LANE_COUNT_ONE);
++
++ /* setup scrambler */
++ REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_ADVANCE, 1);
++}
++
++void dcn10_psr_program_dp_dphy_fast_training(struct link_encoder *enc,
++ bool exit_link_training_required)
++{
++ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
++
++ if (exit_link_training_required)
++ REG_UPDATE(DP_DPHY_FAST_TRAINING,
++ DPHY_RX_FAST_TRAINING_CAPABLE, 1);
++ else {
++ REG_UPDATE(DP_DPHY_FAST_TRAINING,
++ DPHY_RX_FAST_TRAINING_CAPABLE, 0);
++ /*In DCE 11, we are able to pre-program a Force SR register
++ * to be able to trigger SR symbol after 5 idle patterns
++ * transmitted. Upon PSR Exit, DMCU can trigger
++ * DPHY_LOAD_BS_COUNT_START = 1. Upon writing 1 to
++ * DPHY_LOAD_BS_COUNT_START and the internal counter
++ * reaches DPHY_LOAD_BS_COUNT, the next BS symbol will be
++ * replaced by SR symbol once.
++ */
++
++ REG_UPDATE(DP_DPHY_BS_SR_SWAP_CNTL, DPHY_LOAD_BS_COUNT, 0x5);
++ }
++}
++
++void dcn10_psr_program_secondary_packet(struct link_encoder *enc,
++ unsigned int sdp_transmit_line_num_deadline)
++{
++ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
++
++ REG_UPDATE_2(DP_SEC_CNTL1,
++ DP_SEC_GSP0_LINE_NUM, sdp_transmit_line_num_deadline,
++ DP_SEC_GSP0_PRIORITY, 1);
++}
++
++bool dcn10_is_dig_enabled(struct link_encoder *enc)
++{
++ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
++ uint32_t value;
++
++ REG_GET(DIG_BE_EN_CNTL, DIG_ENABLE, &value);
++ return value;
++}
++
++static void link_encoder_disable(struct dcn10_link_encoder *enc10)
++{
++ /* reset training pattern */
++ REG_SET(DP_DPHY_TRAINING_PATTERN_SEL, 0,
++ DPHY_TRAINING_PATTERN_SEL, 0);
++
++ /* reset training complete */
++ REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, 0);
++
++ /* reset panel mode */
++ setup_panel_mode(enc10, DP_PANEL_MODE_DEFAULT);
++}
++
++static void hpd_initialize(
++ struct dcn10_link_encoder *enc10)
++{
++ /* Associate HPD with DIG_BE */
++ enum hpd_source_id hpd_source = enc10->base.hpd_source;
++
++ REG_UPDATE(DIG_BE_CNTL, DIG_HPD_SELECT, hpd_source);
++}
++
++bool dcn10_link_encoder_validate_dvi_output(
++ const struct dcn10_link_encoder *enc10,
++ enum signal_type connector_signal,
++ enum signal_type signal,
++ const struct dc_crtc_timing *crtc_timing)
++{
++ uint32_t max_pixel_clock = TMDS_MAX_PIXEL_CLOCK;
++
++ if (signal == SIGNAL_TYPE_DVI_DUAL_LINK)
++ max_pixel_clock *= 2;
++
++ /* This handles the case of HDMI downgrade to DVI we don't want to
++ * we don't want to cap the pixel clock if the DDI is not DVI.
++ */
++ if (connector_signal != SIGNAL_TYPE_DVI_DUAL_LINK &&
++ connector_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
++ max_pixel_clock = enc10->base.features.max_hdmi_pixel_clock;
++
++ /* DVI only support RGB pixel encoding */
++ if (crtc_timing->pixel_encoding != PIXEL_ENCODING_RGB)
++ return false;
++
++ /*connect DVI via adpater's HDMI connector*/
++ if ((connector_signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
++ connector_signal == SIGNAL_TYPE_HDMI_TYPE_A) &&
++ signal != SIGNAL_TYPE_HDMI_TYPE_A &&
++ crtc_timing->pix_clk_khz > TMDS_MAX_PIXEL_CLOCK)
++ return false;
++ if (crtc_timing->pix_clk_khz < TMDS_MIN_PIXEL_CLOCK)
++ return false;
++
++ if (crtc_timing->pix_clk_khz > max_pixel_clock)
++ return false;
++
++ /* DVI supports 6/8bpp single-link and 10/16bpp dual-link */
++ switch (crtc_timing->display_color_depth) {
++ case COLOR_DEPTH_666:
++ case COLOR_DEPTH_888:
++ break;
++ case COLOR_DEPTH_101010:
++ case COLOR_DEPTH_161616:
++ if (signal != SIGNAL_TYPE_DVI_DUAL_LINK)
++ return false;
++ break;
++ default:
++ return false;
++ }
++
++ return true;
++}
++
++static bool dcn10_link_encoder_validate_hdmi_output(
++ const struct dcn10_link_encoder *enc10,
++ const struct dc_crtc_timing *crtc_timing,
++ int adjusted_pix_clk_khz)
++{
++ enum dc_color_depth max_deep_color =
++ enc10->base.features.max_hdmi_deep_color;
++
++ if (max_deep_color < crtc_timing->display_color_depth)
++ return false;
++
++ if (crtc_timing->display_color_depth < COLOR_DEPTH_888)
++ return false;
++ if (adjusted_pix_clk_khz < TMDS_MIN_PIXEL_CLOCK)
++ return false;
++
++ if ((adjusted_pix_clk_khz == 0) ||
++ (adjusted_pix_clk_khz > enc10->base.features.max_hdmi_pixel_clock))
++ return false;
++
++ /* DCE11 HW does not support 420 */
++ if (!enc10->base.features.ycbcr420_supported &&
++ crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
++ return false;
++
++ if (!enc10->base.features.flags.bits.HDMI_6GB_EN &&
++ adjusted_pix_clk_khz >= 300000)
++ return false;
++ return true;
++}
++
++bool dcn10_link_encoder_validate_dp_output(
++ const struct dcn10_link_encoder *enc10,
++ const struct dc_crtc_timing *crtc_timing)
++{
++ /* default RGB only */
++ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
++ return true;
++
++ if (enc10->base.features.flags.bits.IS_YCBCR_CAPABLE)
++ return true;
++
++ /* for DCE 8.x or later DP Y-only feature,
++ * we need ASIC cap + FeatureSupportDPYonly, not support 666
++ */
++ if (crtc_timing->flags.Y_ONLY &&
++ enc10->base.features.flags.bits.IS_YCBCR_CAPABLE &&
++ crtc_timing->display_color_depth != COLOR_DEPTH_666)
++ return true;
++
++ return false;
++}
++
++void dcn10_link_encoder_construct(
++ struct dcn10_link_encoder *enc10,
++ const struct encoder_init_data *init_data,
++ const struct encoder_feature_support *enc_features,
++ const struct dcn10_link_enc_registers *link_regs,
++ const struct dcn10_link_enc_aux_registers *aux_regs,
++ const struct dcn10_link_enc_hpd_registers *hpd_regs,
++ const struct dcn10_link_enc_shift *link_shift,
++ const struct dcn10_link_enc_mask *link_mask)
++{
++ struct bp_encoder_cap_info bp_cap_info = {0};
++ const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
++ enum bp_result result = BP_RESULT_OK;
++
++ enc10->base.funcs = &dcn10_lnk_enc_funcs;
++ enc10->base.ctx = init_data->ctx;
++ enc10->base.id = init_data->encoder;
++
++ enc10->base.hpd_source = init_data->hpd_source;
++ enc10->base.connector = init_data->connector;
++
++ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
++
++ enc10->base.features = *enc_features;
++
++ enc10->base.transmitter = init_data->transmitter;
++
++ /* set the flag to indicate whether driver poll the I2C data pin
++ * while doing the DP sink detect
++ */
++
++/* if (dal_adapter_service_is_feature_supported(as,
++ FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
++ enc10->base.features.flags.bits.
++ DP_SINK_DETECT_POLL_DATA_PIN = true;*/
++
++ enc10->base.output_signals =
++ SIGNAL_TYPE_DVI_SINGLE_LINK |
++ SIGNAL_TYPE_DVI_DUAL_LINK |
++ SIGNAL_TYPE_LVDS |
++ SIGNAL_TYPE_DISPLAY_PORT |
++ SIGNAL_TYPE_DISPLAY_PORT_MST |
++ SIGNAL_TYPE_EDP |
++ SIGNAL_TYPE_HDMI_TYPE_A;
++
++ /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
++ * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
++ * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
++ * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS.
++ * Prefer DIG assignment is decided by board design.
++ * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design
++ * and VBIOS will filter out 7 UNIPHY for DCE 8.0.
++ * By this, adding DIGG should not hurt DCE 8.0.
++ * This will let DCE 8.1 share DCE 8.0 as much as possible
++ */
++
++ enc10->link_regs = link_regs;
++ enc10->aux_regs = aux_regs;
++ enc10->hpd_regs = hpd_regs;
++ enc10->link_shift = link_shift;
++ enc10->link_mask = link_mask;
++
++ switch (enc10->base.transmitter) {
++ case TRANSMITTER_UNIPHY_A:
++ enc10->base.preferred_engine = ENGINE_ID_DIGA;
++ break;
++ case TRANSMITTER_UNIPHY_B:
++ enc10->base.preferred_engine = ENGINE_ID_DIGB;
++ break;
++ case TRANSMITTER_UNIPHY_C:
++ enc10->base.preferred_engine = ENGINE_ID_DIGC;
++ break;
++ case TRANSMITTER_UNIPHY_D:
++ enc10->base.preferred_engine = ENGINE_ID_DIGD;
++ break;
++ case TRANSMITTER_UNIPHY_E:
++ enc10->base.preferred_engine = ENGINE_ID_DIGE;
++ break;
++ case TRANSMITTER_UNIPHY_F:
++ enc10->base.preferred_engine = ENGINE_ID_DIGF;
++ break;
++ case TRANSMITTER_UNIPHY_G:
++ enc10->base.preferred_engine = ENGINE_ID_DIGG;
++ break;
++ default:
++ ASSERT_CRITICAL(false);
++ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
++ }
++
++ /* default to one to mirror Windows behavior */
++ enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
++
++ result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios,
++ enc10->base.id, &bp_cap_info);
++
++ /* Override features with DCE-specific values */
++ if (result == BP_RESULT_OK) {
++ enc10->base.features.flags.bits.IS_HBR2_CAPABLE =
++ bp_cap_info.DP_HBR2_EN;
++ enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
++ bp_cap_info.DP_HBR3_EN;
++ enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
++ } else {
++ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
++ __func__,
++ result);
++ }
++}
++
++bool dcn10_link_encoder_validate_output_with_stream(
++ struct link_encoder *enc,
++ const struct dc_stream_state *stream)
++{
++ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
++ bool is_valid;
++
++ switch (stream->signal) {
++ case SIGNAL_TYPE_DVI_SINGLE_LINK:
++ case SIGNAL_TYPE_DVI_DUAL_LINK:
++ is_valid = dcn10_link_encoder_validate_dvi_output(
++ enc10,
++ stream->sink->link->connector_signal,
++ stream->signal,
++ &stream->timing);
++ break;
++ case SIGNAL_TYPE_HDMI_TYPE_A:
++ is_valid = dcn10_link_encoder_validate_hdmi_output(
++ enc10,
++ &stream->timing,
++ stream->phy_pix_clk);
++ break;
++ case SIGNAL_TYPE_DISPLAY_PORT:
++ case SIGNAL_TYPE_DISPLAY_PORT_MST:
++ is_valid = dcn10_link_encoder_validate_dp_output(
++ enc10, &stream->timing);
++ break;
++ case SIGNAL_TYPE_EDP:
++ is_valid = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ? true : false;
++ break;
++ case SIGNAL_TYPE_VIRTUAL:
++ is_valid = true;
++ break;
++ default:
++ is_valid = false;
++ break;
++ }
++
++ return is_valid;
++}
++
++void dcn10_link_encoder_hw_init(
++ struct link_encoder *enc)
++{
++ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
++ struct bp_transmitter_control cntl = { 0 };
++ enum bp_result result;
++
++ cntl.action = TRANSMITTER_CONTROL_INIT;
++ cntl.engine_id = ENGINE_ID_UNKNOWN;
++ cntl.transmitter = enc10->base.transmitter;
++ cntl.connector_obj_id = enc10->base.connector;
++ cntl.lanes_number = LANE_COUNT_FOUR;
++ cntl.coherent = false;
++ cntl.hpd_sel = enc10->base.hpd_source;
++
++ if (enc10->base.connector.id == CONNECTOR_ID_EDP)
++ cntl.signal = SIGNAL_TYPE_EDP;
++
++ result = link_transmitter_control(enc10, &cntl);
++
++ if (result != BP_RESULT_OK) {
++ DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
++ __func__);
++ BREAK_TO_DEBUGGER();
++ return;
++ }
++
++ if (enc10->base.connector.id == CONNECTOR_ID_LVDS) {
++ cntl.action = TRANSMITTER_CONTROL_BACKLIGHT_BRIGHTNESS;
++
++ result = link_transmitter_control(enc10, &cntl);
++
++ ASSERT(result == BP_RESULT_OK);
++
++ }
++ aux_initialize(enc10);
++
++ /* reinitialize HPD.
++ * hpd_initialize() will pass DIG_FE id to HW context.
++ * All other routine within HW context will use fe_engine_offset
++ * as DIG_FE id even caller pass DIG_FE id.
++ * So this routine must be called first.
++ */
++ hpd_initialize(enc10);
++}
++
++void dcn10_link_encoder_destroy(struct link_encoder **enc)
++{
++ kfree(TO_DCN10_LINK_ENC(*enc));
++ *enc = NULL;
++}
++
++void dcn10_link_encoder_setup(
++ struct link_encoder *enc,
++ enum signal_type signal)
++{
++ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
++
++ switch (signal) {
++ case SIGNAL_TYPE_EDP:
++ case SIGNAL_TYPE_DISPLAY_PORT:
++ /* DP SST */
++ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 0);
++ break;
++ case SIGNAL_TYPE_LVDS:
++ /* LVDS */
++ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 1);
++ break;
++ case SIGNAL_TYPE_DVI_SINGLE_LINK:
++ case SIGNAL_TYPE_DVI_DUAL_LINK:
++ /* TMDS-DVI */
++ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 2);
++ break;
++ case SIGNAL_TYPE_HDMI_TYPE_A:
++ /* TMDS-HDMI */
++ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 3);
++ break;
++ case SIGNAL_TYPE_DISPLAY_PORT_MST:
++ /* DP MST */
++ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 5);
++ break;
++ default:
++ ASSERT_CRITICAL(false);
++ /* invalid mode ! */
++ break;
++ }
++
++}
++
++/* TODO: still need depth or just pass in adjusted pixel clock? */
++void dcn10_link_encoder_enable_tmds_output(
++ struct link_encoder *enc,
++ enum clock_source_id clock_source,
++ enum dc_color_depth color_depth,
++ enum signal_type signal,
++ uint32_t pixel_clock)
++{
++ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
++ struct bp_transmitter_control cntl = { 0 };
++ enum bp_result result;
++
++ /* Enable the PHY */
++
++ cntl.action = TRANSMITTER_CONTROL_ENABLE;
++ cntl.engine_id = enc->preferred_engine;
++ cntl.transmitter = enc10->base.transmitter;
++ cntl.pll_id = clock_source;
++ cntl.signal = signal;
++ if (cntl.signal == SIGNAL_TYPE_DVI_DUAL_LINK)
++ cntl.lanes_number = 8;
++ else
++ cntl.lanes_number = 4;
++
++ cntl.hpd_sel = enc10->base.hpd_source;
++
++ cntl.pixel_clock = pixel_clock;
++ cntl.color_depth = color_depth;
++
++ result = link_transmitter_control(enc10, &cntl);
++
++ if (result != BP_RESULT_OK) {
++ DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
++ __func__);
++ BREAK_TO_DEBUGGER();
++ }
++}
++
++/* enables DP PHY output */
++void dcn10_link_encoder_enable_dp_output(
++ struct link_encoder *enc,
++ const struct dc_link_settings *link_settings,
++ enum clock_source_id clock_source)
++{
++ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
++ struct bp_transmitter_control cntl = { 0 };
++ enum bp_result result;
++
++ /* Enable the PHY */
++
++ /* number_of_lanes is used for pixel clock adjust,
++ * but it's not passed to asic_control.
++ * We need to set number of lanes manually.
++ */
++ configure_encoder(enc10, link_settings);
++
++ cntl.action = TRANSMITTER_CONTROL_ENABLE;
++ cntl.engine_id = enc->preferred_engine;
++ cntl.transmitter = enc10->base.transmitter;
++ cntl.pll_id = clock_source;
++ cntl.signal = SIGNAL_TYPE_DISPLAY_PORT;
++ cntl.lanes_number = link_settings->lane_count;
++ cntl.hpd_sel = enc10->base.hpd_source;
++ cntl.pixel_clock = link_settings->link_rate
++ * LINK_RATE_REF_FREQ_IN_KHZ;
++ /* TODO: check if undefined works */
++ cntl.color_depth = COLOR_DEPTH_UNDEFINED;
++
++ result = link_transmitter_control(enc10, &cntl);
++
++ if (result != BP_RESULT_OK) {
++ DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
++ __func__);
++ BREAK_TO_DEBUGGER();
++ }
++}
++
++/* enables DP PHY output in MST mode */
++void dcn10_link_encoder_enable_dp_mst_output(
++ struct link_encoder *enc,
++ const struct dc_link_settings *link_settings,
++ enum clock_source_id clock_source)
++{
++ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
++ struct bp_transmitter_control cntl = { 0 };
++ enum bp_result result;
++
++ /* Enable the PHY */
++
++ /* number_of_lanes is used for pixel clock adjust,
++ * but it's not passed to asic_control.
++ * We need to set number of lanes manually.
++ */
++ configure_encoder(enc10, link_settings);
++
++ cntl.action = TRANSMITTER_CONTROL_ENABLE;
++ cntl.engine_id = ENGINE_ID_UNKNOWN;
++ cntl.transmitter = enc10->base.transmitter;
++ cntl.pll_id = clock_source;
++ cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
++ cntl.lanes_number = link_settings->lane_count;
++ cntl.hpd_sel = enc10->base.hpd_source;
++ cntl.pixel_clock = link_settings->link_rate
++ * LINK_RATE_REF_FREQ_IN_KHZ;
++ /* TODO: check if undefined works */
++ cntl.color_depth = COLOR_DEPTH_UNDEFINED;
++
++ result = link_transmitter_control(enc10, &cntl);
++
++ if (result != BP_RESULT_OK) {
++ DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
++ __func__);
++ BREAK_TO_DEBUGGER();
++ }
++}
++/*
++ * @brief
++ * Disable transmitter and its encoder
++ */
++void dcn10_link_encoder_disable_output(
++ struct link_encoder *enc,
++ enum signal_type signal)
++{
++ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
++ struct bp_transmitter_control cntl = { 0 };
++ enum bp_result result;
++
++ if (!dcn10_is_dig_enabled(enc)) {
++ /* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
++ return;
++ }
++ /* Power-down RX and disable GPU PHY should be paired.
++ * Disabling PHY without powering down RX may cause
++ * symbol lock loss, on which we will get DP Sink interrupt.
++ */
++
++ /* There is a case for the DP active dongles
++ * where we want to disable the PHY but keep RX powered,
++ * for those we need to ignore DP Sink interrupt
++ * by checking lane count that has been set
++ * on the last do_enable_output().
++ */
++
++ /* disable transmitter */
++ cntl.action = TRANSMITTER_CONTROL_DISABLE;
++ cntl.transmitter = enc10->base.transmitter;
++ cntl.hpd_sel = enc10->base.hpd_source;
++ cntl.signal = signal;
++ cntl.connector_obj_id = enc10->base.connector;
++
++ result = link_transmitter_control(enc10, &cntl);
++
++ if (result != BP_RESULT_OK) {
++ DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
++ __func__);
++ BREAK_TO_DEBUGGER();
++ return;
++ }
++
++ /* disable encoder */
++ if (dc_is_dp_signal(signal))
++ link_encoder_disable(enc10);
++}
++
++void dcn10_link_encoder_dp_set_lane_settings(
++ struct link_encoder *enc,
++ const struct link_training_settings *link_settings)
++{
++ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
++ union dpcd_training_lane_set training_lane_set = { { 0 } };
++ int32_t lane = 0;
++ struct bp_transmitter_control cntl = { 0 };
++
++ if (!link_settings) {
++ BREAK_TO_DEBUGGER();
++ return;
++ }
++
++ cntl.action = TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS;
++ cntl.transmitter = enc10->base.transmitter;
++ cntl.connector_obj_id = enc10->base.connector;
++ cntl.lanes_number = link_settings->link_settings.lane_count;
++ cntl.hpd_sel = enc10->base.hpd_source;
++ cntl.pixel_clock = link_settings->link_settings.link_rate *
++ LINK_RATE_REF_FREQ_IN_KHZ;
++
++ for (lane = 0; lane < link_settings->link_settings.lane_count; lane++) {
++ /* translate lane settings */
++
++ training_lane_set.bits.VOLTAGE_SWING_SET =
++ link_settings->lane_settings[lane].VOLTAGE_SWING;
++ training_lane_set.bits.PRE_EMPHASIS_SET =
++ link_settings->lane_settings[lane].PRE_EMPHASIS;
++
++ /* post cursor 2 setting only applies to HBR2 link rate */
++ if (link_settings->link_settings.link_rate == LINK_RATE_HIGH2) {
++ /* this is passed to VBIOS
++ * to program post cursor 2 level
++ */
++ training_lane_set.bits.POST_CURSOR2_SET =
++ link_settings->lane_settings[lane].POST_CURSOR2;
++ }
++
++ cntl.lane_select = lane;
++ cntl.lane_settings = training_lane_set.raw;
++
++ /* call VBIOS table to set voltage swing and pre-emphasis */
++ link_transmitter_control(enc10, &cntl);
++ }
++}
++
++/* set DP PHY test and training patterns */
++void dcn10_link_encoder_dp_set_phy_pattern(
++ struct link_encoder *enc,
++ const struct encoder_set_dp_phy_pattern_param *param)
++{
++ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
++
++ switch (param->dp_phy_pattern) {
++ case DP_TEST_PATTERN_TRAINING_PATTERN1:
++ dcn10_link_encoder_set_dp_phy_pattern_training_pattern(enc, 0);
++ break;
++ case DP_TEST_PATTERN_TRAINING_PATTERN2:
++ dcn10_link_encoder_set_dp_phy_pattern_training_pattern(enc, 1);
++ break;
++ case DP_TEST_PATTERN_TRAINING_PATTERN3:
++ dcn10_link_encoder_set_dp_phy_pattern_training_pattern(enc, 2);
++ break;
++ case DP_TEST_PATTERN_TRAINING_PATTERN4:
++ dcn10_link_encoder_set_dp_phy_pattern_training_pattern(enc, 3);
++ break;
++ case DP_TEST_PATTERN_D102:
++ set_dp_phy_pattern_d102(enc10);
++ break;
++ case DP_TEST_PATTERN_SYMBOL_ERROR:
++ set_dp_phy_pattern_symbol_error(enc10);
++ break;
++ case DP_TEST_PATTERN_PRBS7:
++ set_dp_phy_pattern_prbs7(enc10);
++ break;
++ case DP_TEST_PATTERN_80BIT_CUSTOM:
++ set_dp_phy_pattern_80bit_custom(
++ enc10, param->custom_pattern);
++ break;
++ case DP_TEST_PATTERN_CP2520_1:
++ set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc10, 1);
++ break;
++ case DP_TEST_PATTERN_CP2520_2:
++ set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc10, 2);
++ break;
++ case DP_TEST_PATTERN_CP2520_3:
++ set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc10, 3);
++ break;
++ case DP_TEST_PATTERN_VIDEO_MODE: {
++ set_dp_phy_pattern_passthrough_mode(
++ enc10, param->dp_panel_mode);
++ break;
++ }
++
++ default:
++ /* invalid phy pattern */
++ ASSERT_CRITICAL(false);
++ break;
++ }
++}
++
++static void fill_stream_allocation_row_info(
++ const struct link_mst_stream_allocation *stream_allocation,
++ uint32_t *src,
++ uint32_t *slots)
++{
++ const struct stream_encoder *stream_enc = stream_allocation->stream_enc;
++
++ if (stream_enc) {
++ *src = stream_enc->id;
++ *slots = stream_allocation->slot_count;
++ } else {
++ *src = 0;
++ *slots = 0;
++ }
++}
++
++/* programs DP MST VC payload allocation */
++void dcn10_link_encoder_update_mst_stream_allocation_table(
++ struct link_encoder *enc,
++ const struct link_mst_stream_allocation_table *table)
++{
++ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
++ uint32_t value0 = 0;
++ uint32_t value1 = 0;
++ uint32_t value2 = 0;
++ uint32_t slots = 0;
++ uint32_t src = 0;
++ uint32_t retries = 0;
++
++ /* For CZ, there are only 3 pipes. So Virtual channel is up 3.*/
++
++ /* --- Set MSE Stream Attribute -
++ * Setup VC Payload Table on Tx Side,
++ * Issue allocation change trigger
++ * to commit payload on both tx and rx side
++ */
++
++ /* we should clean-up table each time */
++
++ if (table->stream_count >= 1) {
++ fill_stream_allocation_row_info(
++ &table->stream_allocations[0],
++ &src,
++ &slots);
++ } else {
++ src = 0;
++ slots = 0;
++ }
++
++ REG_UPDATE_2(DP_MSE_SAT0,
++ DP_MSE_SAT_SRC0, src,
++ DP_MSE_SAT_SLOT_COUNT0, slots);
++
++ if (table->stream_count >= 2) {
++ fill_stream_allocation_row_info(
++ &table->stream_allocations[1],
++ &src,
++ &slots);
++ } else {
++ src = 0;
++ slots = 0;
++ }
++
++ REG_UPDATE_2(DP_MSE_SAT0,
++ DP_MSE_SAT_SRC1, src,
++ DP_MSE_SAT_SLOT_COUNT1, slots);
++
++ if (table->stream_count >= 3) {
++ fill_stream_allocation_row_info(
++ &table->stream_allocations[2],
++ &src,
++ &slots);
++ } else {
++ src = 0;
++ slots = 0;
++ }
++
++ REG_UPDATE_2(DP_MSE_SAT1,
++ DP_MSE_SAT_SRC2, src,
++ DP_MSE_SAT_SLOT_COUNT2, slots);
++
++ if (table->stream_count >= 4) {
++ fill_stream_allocation_row_info(
++ &table->stream_allocations[3],
++ &src,
++ &slots);
++ } else {
++ src = 0;
++ slots = 0;
++ }
++
++ REG_UPDATE_2(DP_MSE_SAT1,
++ DP_MSE_SAT_SRC3, src,
++ DP_MSE_SAT_SLOT_COUNT3, slots);
++
++ /* --- wait for transaction finish */
++
++ /* send allocation change trigger (ACT) ?
++ * this step first sends the ACT,
++ * then double buffers the SAT into the hardware
++ * making the new allocation active on the DP MST mode link
++ */
++
++ /* DP_MSE_SAT_UPDATE:
++ * 0 - No Action
++ * 1 - Update SAT with trigger
++ * 2 - Update SAT without trigger
++ */
++ REG_UPDATE(DP_MSE_SAT_UPDATE,
++ DP_MSE_SAT_UPDATE, 1);
++
++ /* wait for update to complete
++ * (i.e. DP_MSE_SAT_UPDATE field is reset to 0)
++ * then wait for the transmission
++ * of at least 16 MTP headers on immediate local link.
++ * i.e. DP_MSE_16_MTP_KEEPOUT field (read only) is reset to 0
++ * a value of 1 indicates that DP MST mode
++ * is in the 16 MTP keepout region after a VC has been added.
++ * MST stream bandwidth (VC rate) can be configured
++ * after this bit is cleared
++ */
++ do {
++ udelay(10);
++
++ value0 = REG_READ(DP_MSE_SAT_UPDATE);
++
++ REG_GET(DP_MSE_SAT_UPDATE,
++ DP_MSE_SAT_UPDATE, &value1);
++
++ REG_GET(DP_MSE_SAT_UPDATE,
++ DP_MSE_16_MTP_KEEPOUT, &value2);
++
++ /* bit field DP_MSE_SAT_UPDATE is set to 1 already */
++ if (!value1 && !value2)
++ break;
++ ++retries;
++ } while (retries < DP_MST_UPDATE_MAX_RETRY);
++}
++
++void dcn10_link_encoder_connect_dig_be_to_fe(
++ struct link_encoder *enc,
++ enum engine_id engine,
++ bool connect)
++{
++ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
++ uint32_t field;
++
++ if (engine != ENGINE_ID_UNKNOWN) {
++
++ REG_GET(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, &field);
++
++ if (connect)
++ field |= get_frontend_source(engine);
++ else
++ field &= ~get_frontend_source(engine);
++
++ REG_UPDATE(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, field);
++ }
++}
++
++
++#define HPD_REG(reg)\
++ (enc10->hpd_regs->reg)
++
++#define HPD_REG_READ(reg_name) \
++ dm_read_reg(CTX, HPD_REG(reg_name))
++
++#define HPD_REG_UPDATE_N(reg_name, n, ...) \
++ generic_reg_update_ex(CTX, \
++ HPD_REG(reg_name), \
++ HPD_REG_READ(reg_name), \
++ n, __VA_ARGS__)
++
++#define HPD_REG_UPDATE(reg_name, field, val) \
++ HPD_REG_UPDATE_N(reg_name, 1, \
++ FN(reg_name, field), val)
++
++void dcn10_link_encoder_enable_hpd(struct link_encoder *enc)
++{
++ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
++
++ HPD_REG_UPDATE(DC_HPD_CONTROL,
++ DC_HPD_EN, 1);
++}
++
++void dcn10_link_encoder_disable_hpd(struct link_encoder *enc)
++{
++ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
++
++ HPD_REG_UPDATE(DC_HPD_CONTROL,
++ DC_HPD_EN, 0);
++}
++
++
++#define AUX_REG(reg)\
++ (enc10->aux_regs->reg)
++
++#define AUX_REG_READ(reg_name) \
++ dm_read_reg(CTX, AUX_REG(reg_name))
++
++#define AUX_REG_UPDATE_N(reg_name, n, ...) \
++ generic_reg_update_ex(CTX, \
++ AUX_REG(reg_name), \
++ AUX_REG_READ(reg_name), \
++ n, __VA_ARGS__)
++
++#define AUX_REG_UPDATE(reg_name, field, val) \
++ AUX_REG_UPDATE_N(reg_name, 1, \
++ FN(reg_name, field), val)
++
++#define AUX_REG_UPDATE_2(reg, f1, v1, f2, v2) \
++ AUX_REG_UPDATE_N(reg, 2,\
++ FN(reg, f1), v1,\
++ FN(reg, f2), v2)
++
++static void aux_initialize(
++ struct dcn10_link_encoder *enc10)
++{
++ enum hpd_source_id hpd_source = enc10->base.hpd_source;
++
++ AUX_REG_UPDATE_2(AUX_CONTROL,
++ AUX_HPD_SEL, hpd_source,
++ AUX_LS_READ_EN, 0);
++
++ /* 1/4 window (the maximum allowed) */
++ AUX_REG_UPDATE(AUX_DPHY_RX_CONTROL0,
++ AUX_RX_RECEIVE_WINDOW, 1);
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+new file mode 100644
+index 0000000..2a97cdb
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+@@ -0,0 +1,330 @@
++/*
++ * Copyright 2012-15 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef __DC_LINK_ENCODER__DCN10_H__
++#define __DC_LINK_ENCODER__DCN10_H__
++
++#include "link_encoder.h"
++
++#define TO_DCN10_LINK_ENC(link_encoder)\
++ container_of(link_encoder, struct dcn10_link_encoder, base)
++
++
++#define AUX_REG_LIST(id)\
++ SRI(AUX_CONTROL, DP_AUX, id), \
++ SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id)
++
++#define HPD_REG_LIST(id)\
++ SRI(DC_HPD_CONTROL, HPD, id)
++
++#define LE_DCN_COMMON_REG_LIST(id) \
++ SRI(DIG_BE_CNTL, DIG, id), \
++ SRI(DIG_BE_EN_CNTL, DIG, id), \
++ SRI(DP_CONFIG, DP, id), \
++ SRI(DP_DPHY_CNTL, DP, id), \
++ SRI(DP_DPHY_PRBS_CNTL, DP, id), \
++ SRI(DP_DPHY_SCRAM_CNTL, DP, id),\
++ SRI(DP_DPHY_SYM0, DP, id), \
++ SRI(DP_DPHY_SYM1, DP, id), \
++ SRI(DP_DPHY_SYM2, DP, id), \
++ SRI(DP_DPHY_TRAINING_PATTERN_SEL, DP, id), \
++ SRI(DP_LINK_CNTL, DP, id), \
++ SRI(DP_LINK_FRAMING_CNTL, DP, id), \
++ SRI(DP_MSE_SAT0, DP, id), \
++ SRI(DP_MSE_SAT1, DP, id), \
++ SRI(DP_MSE_SAT2, DP, id), \
++ SRI(DP_MSE_SAT_UPDATE, DP, id), \
++ SRI(DP_SEC_CNTL, DP, id), \
++ SRI(DP_VID_STREAM_CNTL, DP, id), \
++ SRI(DP_DPHY_FAST_TRAINING, DP, id), \
++ SRI(DP_SEC_CNTL1, DP, id), \
++ SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
++ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
++ SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id)
++
++#define LE_DCN10_REG_LIST(id)\
++ LE_DCN_COMMON_REG_LIST(id)
++
++struct dcn10_link_enc_aux_registers {
++ uint32_t AUX_CONTROL;
++ uint32_t AUX_DPHY_RX_CONTROL0;
++};
++
++struct dcn10_link_enc_hpd_registers {
++ uint32_t DC_HPD_CONTROL;
++};
++
++struct dcn10_link_enc_registers {
++ uint32_t DIG_BE_CNTL;
++ uint32_t DIG_BE_EN_CNTL;
++ uint32_t DP_CONFIG;
++ uint32_t DP_DPHY_CNTL;
++ uint32_t DP_DPHY_INTERNAL_CTRL;
++ uint32_t DP_DPHY_PRBS_CNTL;
++ uint32_t DP_DPHY_SCRAM_CNTL;
++ uint32_t DP_DPHY_SYM0;
++ uint32_t DP_DPHY_SYM1;
++ uint32_t DP_DPHY_SYM2;
++ uint32_t DP_DPHY_TRAINING_PATTERN_SEL;
++ uint32_t DP_LINK_CNTL;
++ uint32_t DP_LINK_FRAMING_CNTL;
++ uint32_t DP_MSE_SAT0;
++ uint32_t DP_MSE_SAT1;
++ uint32_t DP_MSE_SAT2;
++ uint32_t DP_MSE_SAT_UPDATE;
++ uint32_t DP_SEC_CNTL;
++ uint32_t DP_VID_STREAM_CNTL;
++ uint32_t DP_DPHY_FAST_TRAINING;
++ uint32_t DP_DPHY_BS_SR_SWAP_CNTL;
++ uint32_t DP_DPHY_HBR2_PATTERN_CONTROL;
++ uint32_t DP_SEC_CNTL1;
++};
++
++#define LE_SF(reg_name, field_name, post_fix)\
++ .field_name = reg_name ## __ ## field_name ## post_fix
++
++#define LINK_ENCODER_MASK_SH_LIST_DCN10(mask_sh)\
++ LE_SF(DIG0_DIG_BE_EN_CNTL, DIG_ENABLE, mask_sh),\
++ LE_SF(DIG0_DIG_BE_CNTL, DIG_HPD_SELECT, mask_sh),\
++ LE_SF(DIG0_DIG_BE_CNTL, DIG_MODE, mask_sh),\
++ LE_SF(DIG0_DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, mask_sh),\
++ LE_SF(DP0_DP_DPHY_CNTL, DPHY_BYPASS, mask_sh),\
++ LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE0, mask_sh),\
++ LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE1, mask_sh),\
++ LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE2, mask_sh),\
++ LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE3, mask_sh),\
++ LE_SF(DP0_DP_DPHY_PRBS_CNTL, DPHY_PRBS_EN, mask_sh),\
++ LE_SF(DP0_DP_DPHY_PRBS_CNTL, DPHY_PRBS_SEL, mask_sh),\
++ LE_SF(DP0_DP_DPHY_SYM0, DPHY_SYM1, mask_sh),\
++ LE_SF(DP0_DP_DPHY_SYM0, DPHY_SYM2, mask_sh),\
++ LE_SF(DP0_DP_DPHY_SYM0, DPHY_SYM3, mask_sh),\
++ LE_SF(DP0_DP_DPHY_SYM1, DPHY_SYM4, mask_sh),\
++ LE_SF(DP0_DP_DPHY_SYM1, DPHY_SYM5, mask_sh),\
++ LE_SF(DP0_DP_DPHY_SYM1, DPHY_SYM6, mask_sh),\
++ LE_SF(DP0_DP_DPHY_SYM2, DPHY_SYM7, mask_sh),\
++ LE_SF(DP0_DP_DPHY_SYM2, DPHY_SYM8, mask_sh),\
++ LE_SF(DP0_DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_BS_COUNT, mask_sh),\
++ LE_SF(DP0_DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_ADVANCE, mask_sh),\
++ LE_SF(DP0_DP_DPHY_FAST_TRAINING, DPHY_RX_FAST_TRAINING_CAPABLE, mask_sh),\
++ LE_SF(DP0_DP_DPHY_BS_SR_SWAP_CNTL, DPHY_LOAD_BS_COUNT, mask_sh),\
++ LE_SF(DP0_DP_DPHY_TRAINING_PATTERN_SEL, DPHY_TRAINING_PATTERN_SEL, mask_sh),\
++ LE_SF(DP0_DP_DPHY_HBR2_PATTERN_CONTROL, DP_DPHY_HBR2_PATTERN_CONTROL, mask_sh),\
++ LE_SF(DP0_DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, mask_sh),\
++ LE_SF(DP0_DP_LINK_FRAMING_CNTL, DP_IDLE_BS_INTERVAL, mask_sh),\
++ LE_SF(DP0_DP_LINK_FRAMING_CNTL, DP_VBID_DISABLE, mask_sh),\
++ LE_SF(DP0_DP_LINK_FRAMING_CNTL, DP_VID_ENHANCED_FRAME_MODE, mask_sh),\
++ LE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, mask_sh),\
++ LE_SF(DP0_DP_CONFIG, DP_UDI_LANES, mask_sh),\
++ LE_SF(DP0_DP_SEC_CNTL1, DP_SEC_GSP0_LINE_NUM, mask_sh),\
++ LE_SF(DP0_DP_SEC_CNTL1, DP_SEC_GSP0_PRIORITY, mask_sh),\
++ LE_SF(DP0_DP_MSE_SAT0, DP_MSE_SAT_SRC0, mask_sh),\
++ LE_SF(DP0_DP_MSE_SAT0, DP_MSE_SAT_SRC1, mask_sh),\
++ LE_SF(DP0_DP_MSE_SAT0, DP_MSE_SAT_SLOT_COUNT0, mask_sh),\
++ LE_SF(DP0_DP_MSE_SAT0, DP_MSE_SAT_SLOT_COUNT1, mask_sh),\
++ LE_SF(DP0_DP_MSE_SAT1, DP_MSE_SAT_SRC2, mask_sh),\
++ LE_SF(DP0_DP_MSE_SAT1, DP_MSE_SAT_SRC3, mask_sh),\
++ LE_SF(DP0_DP_MSE_SAT1, DP_MSE_SAT_SLOT_COUNT2, mask_sh),\
++ LE_SF(DP0_DP_MSE_SAT1, DP_MSE_SAT_SLOT_COUNT3, mask_sh),\
++ LE_SF(DP0_DP_MSE_SAT_UPDATE, DP_MSE_SAT_UPDATE, mask_sh),\
++ LE_SF(DP0_DP_MSE_SAT_UPDATE, DP_MSE_16_MTP_KEEPOUT, mask_sh),\
++ LE_SF(DP_AUX0_AUX_CONTROL, AUX_HPD_SEL, mask_sh),\
++ LE_SF(DP_AUX0_AUX_CONTROL, AUX_LS_READ_EN, mask_sh),\
++ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_RECEIVE_WINDOW, mask_sh),\
++ LE_SF(HPD0_DC_HPD_CONTROL, DC_HPD_EN, mask_sh)
++
++#define DCN_LINK_ENCODER_REG_FIELD_LIST(type) \
++ type DIG_ENABLE;\
++ type DIG_HPD_SELECT;\
++ type DIG_MODE;\
++ type DIG_FE_SOURCE_SELECT;\
++ type DPHY_BYPASS;\
++ type DPHY_ATEST_SEL_LANE0;\
++ type DPHY_ATEST_SEL_LANE1;\
++ type DPHY_ATEST_SEL_LANE2;\
++ type DPHY_ATEST_SEL_LANE3;\
++ type DPHY_PRBS_EN;\
++ type DPHY_PRBS_SEL;\
++ type DPHY_SYM1;\
++ type DPHY_SYM2;\
++ type DPHY_SYM3;\
++ type DPHY_SYM4;\
++ type DPHY_SYM5;\
++ type DPHY_SYM6;\
++ type DPHY_SYM7;\
++ type DPHY_SYM8;\
++ type DPHY_SCRAMBLER_BS_COUNT;\
++ type DPHY_SCRAMBLER_ADVANCE;\
++ type DPHY_RX_FAST_TRAINING_CAPABLE;\
++ type DPHY_LOAD_BS_COUNT;\
++ type DPHY_TRAINING_PATTERN_SEL;\
++ type DP_DPHY_HBR2_PATTERN_CONTROL;\
++ type DP_LINK_TRAINING_COMPLETE;\
++ type DP_IDLE_BS_INTERVAL;\
++ type DP_VBID_DISABLE;\
++ type DP_VID_ENHANCED_FRAME_MODE;\
++ type DP_VID_STREAM_ENABLE;\
++ type DP_UDI_LANES;\
++ type DP_SEC_GSP0_LINE_NUM;\
++ type DP_SEC_GSP0_PRIORITY;\
++ type DP_MSE_SAT_SRC0;\
++ type DP_MSE_SAT_SRC1;\
++ type DP_MSE_SAT_SRC2;\
++ type DP_MSE_SAT_SRC3;\
++ type DP_MSE_SAT_SLOT_COUNT0;\
++ type DP_MSE_SAT_SLOT_COUNT1;\
++ type DP_MSE_SAT_SLOT_COUNT2;\
++ type DP_MSE_SAT_SLOT_COUNT3;\
++ type DP_MSE_SAT_UPDATE;\
++ type DP_MSE_16_MTP_KEEPOUT;\
++ type AUX_HPD_SEL;\
++ type AUX_LS_READ_EN;\
++ type AUX_RX_RECEIVE_WINDOW;\
++ type DC_HPD_EN
++
++struct dcn10_link_enc_shift {
++ DCN_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
++};
++
++struct dcn10_link_enc_mask {
++ DCN_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
++};
++
++struct dcn10_link_encoder {
++ struct link_encoder base;
++ const struct dcn10_link_enc_registers *link_regs;
++ const struct dcn10_link_enc_aux_registers *aux_regs;
++ const struct dcn10_link_enc_hpd_registers *hpd_regs;
++ const struct dcn10_link_enc_shift *link_shift;
++ const struct dcn10_link_enc_mask *link_mask;
++};
++
++
++void dcn10_link_encoder_construct(
++ struct dcn10_link_encoder *enc10,
++ const struct encoder_init_data *init_data,
++ const struct encoder_feature_support *enc_features,
++ const struct dcn10_link_enc_registers *link_regs,
++ const struct dcn10_link_enc_aux_registers *aux_regs,
++ const struct dcn10_link_enc_hpd_registers *hpd_regs,
++ const struct dcn10_link_enc_shift *link_shift,
++ const struct dcn10_link_enc_mask *link_mask);
++
++bool dcn10_link_encoder_validate_dvi_output(
++ const struct dcn10_link_encoder *enc10,
++ enum signal_type connector_signal,
++ enum signal_type signal,
++ const struct dc_crtc_timing *crtc_timing);
++
++bool dcn10_link_encoder_validate_rgb_output(
++ const struct dcn10_link_encoder *enc10,
++ const struct dc_crtc_timing *crtc_timing);
++
++bool dcn10_link_encoder_validate_dp_output(
++ const struct dcn10_link_encoder *enc10,
++ const struct dc_crtc_timing *crtc_timing);
++
++bool dcn10_link_encoder_validate_wireless_output(
++ const struct dcn10_link_encoder *enc10,
++ const struct dc_crtc_timing *crtc_timing);
++
++bool dcn10_link_encoder_validate_output_with_stream(
++ struct link_encoder *enc,
++ const struct dc_stream_state *stream);
++
++/****************** HW programming ************************/
++
++/* initialize HW */ /* why do we initialze aux in here? */
++void dcn10_link_encoder_hw_init(struct link_encoder *enc);
++
++void dcn10_link_encoder_destroy(struct link_encoder **enc);
++
++/* program DIG_MODE in DIG_BE */
++/* TODO can this be combined with enable_output? */
++void dcn10_link_encoder_setup(
++ struct link_encoder *enc,
++ enum signal_type signal);
++
++/* enables TMDS PHY output */
++/* TODO: still need depth or just pass in adjusted pixel clock? */
++void dcn10_link_encoder_enable_tmds_output(
++ struct link_encoder *enc,
++ enum clock_source_id clock_source,
++ enum dc_color_depth color_depth,
++ enum signal_type signal,
++ uint32_t pixel_clock);
++
++/* enables DP PHY output */
++void dcn10_link_encoder_enable_dp_output(
++ struct link_encoder *enc,
++ const struct dc_link_settings *link_settings,
++ enum clock_source_id clock_source);
++
++/* enables DP PHY output in MST mode */
++void dcn10_link_encoder_enable_dp_mst_output(
++ struct link_encoder *enc,
++ const struct dc_link_settings *link_settings,
++ enum clock_source_id clock_source);
++
++/* disable PHY output */
++void dcn10_link_encoder_disable_output(
++ struct link_encoder *enc,
++ enum signal_type signal);
++
++/* set DP lane settings */
++void dcn10_link_encoder_dp_set_lane_settings(
++ struct link_encoder *enc,
++ const struct link_training_settings *link_settings);
++
++void dcn10_link_encoder_dp_set_phy_pattern(
++ struct link_encoder *enc,
++ const struct encoder_set_dp_phy_pattern_param *param);
++
++/* programs DP MST VC payload allocation */
++void dcn10_link_encoder_update_mst_stream_allocation_table(
++ struct link_encoder *enc,
++ const struct link_mst_stream_allocation_table *table);
++
++void dcn10_link_encoder_connect_dig_be_to_fe(
++ struct link_encoder *enc,
++ enum engine_id engine,
++ bool connect);
++
++void dcn10_link_encoder_set_dp_phy_pattern_training_pattern(
++ struct link_encoder *enc,
++ uint32_t index);
++
++void dcn10_link_encoder_enable_hpd(struct link_encoder *enc);
++
++void dcn10_link_encoder_disable_hpd(struct link_encoder *enc);
++
++void dcn10_psr_program_dp_dphy_fast_training(struct link_encoder *enc,
++ bool exit_link_training_required);
++
++void dcn10_psr_program_secondary_packet(struct link_encoder *enc,
++ unsigned int sdp_transmit_line_num_deadline);
++
++bool dcn10_is_dig_enabled(struct link_encoder *enc);
++
++#endif /* __DC_LINK_ENCODER__DCN10_H__ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index ace2e03..df5cb2d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -38,7 +38,7 @@
+ #include "dcn10/dcn10_hw_sequencer.h"
+ #include "dce110/dce110_hw_sequencer.h"
+ #include "dcn10/dcn10_opp.h"
+-#include "dce/dce_link_encoder.h"
++#include "dcn10/dcn10_link_encoder.h"
+ #include "dcn10/dcn10_stream_encoder.h"
+ #include "dce/dce_clocks.h"
+ #include "dce/dce_clock_source.h"
+@@ -214,13 +214,11 @@ static const struct dce_aduio_mask audio_mask = {
+ AUX_REG_LIST(id)\
+ }
+
+-static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
++static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+- aux_regs(3),
+- aux_regs(4),
+- aux_regs(5)
++ aux_regs(3)
+ };
+
+ #define hpd_regs(id)\
+@@ -228,13 +226,11 @@ static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
+ HPD_REG_LIST(id)\
+ }
+
+-static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
++static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+- hpd_regs(3),
+- hpd_regs(4),
+- hpd_regs(5)
++ hpd_regs(3)
+ };
+
+ #define link_regs(id)\
+@@ -243,14 +239,19 @@ static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
+ }
+
+-static const struct dce110_link_enc_registers link_enc_regs[] = {
++static const struct dcn10_link_enc_registers link_enc_regs[] = {
+ link_regs(0),
+ link_regs(1),
+ link_regs(2),
+- link_regs(3),
+- link_regs(4),
+- link_regs(5),
+- link_regs(6),
++ link_regs(3)
++};
++
++static const struct dcn10_link_enc_shift le_shift = {
++ LINK_ENCODER_MASK_SH_LIST_DCN10(__SHIFT)
++};
++
++static const struct dcn10_link_enc_mask le_mask = {
++ LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK)
+ };
+
+ #define ipp_regs(id)\
+@@ -583,20 +584,22 @@ static const struct encoder_feature_support link_enc_feature = {
+ struct link_encoder *dcn10_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+ {
+- struct dce110_link_encoder *enc110 =
+- kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
++ struct dcn10_link_encoder *enc10 =
++ kzalloc(sizeof(struct dcn10_link_encoder), GFP_KERNEL);
+
+- if (!enc110)
++ if (!enc10)
+ return NULL;
+
+- dce110_link_encoder_construct(enc110,
++ dcn10_link_encoder_construct(enc10,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+- &link_enc_hpd_regs[enc_init_data->hpd_source]);
++ &link_enc_hpd_regs[enc_init_data->hpd_source],
++ &le_shift,
++ &le_mask);
+
+- return &enc110->base;
++ return &enc10->base;
+ }
+
+ struct clock_source *dcn10_clock_source_create(
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4477-drm-amd-display-fix-memory-leaks.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4477-drm-amd-display-fix-memory-leaks.patch
new file mode 100644
index 00000000..0688d226
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4477-drm-amd-display-fix-memory-leaks.patch
@@ -0,0 +1,87 @@
+From 334870b8e1f253e99e09958f356667f7d1a85264 Mon Sep 17 00:00:00 2001
+From: Anthony Koo <Anthony.Koo@amd.com>
+Date: Fri, 27 Apr 2018 20:50:07 -0400
+Subject: [PATCH 4477/5725] drm/amd/display: fix memory leaks
+
+Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/modules/stats/stats.c | 24 +++++++++++++++--------
+ 1 file changed, 16 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
+index fe9e4b3..3f7d47f 100644
+--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
++++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
+@@ -115,12 +115,12 @@ struct mod_stats *mod_stats_create(struct dc *dc)
+ unsigned int reg_data;
+ int i = 0;
+
++ if (dc == NULL)
++ goto fail_construct;
++
+ core_stats = kzalloc(sizeof(struct core_stats), GFP_KERNEL);
+
+ if (core_stats == NULL)
+- goto fail_alloc_context;
+-
+- if (dc == NULL)
+ goto fail_construct;
+
+ core_stats->dc = dc;
+@@ -146,6 +146,8 @@ struct mod_stats *mod_stats_create(struct dc *dc)
+ core_stats->entries,
+ GFP_KERNEL);
+
++ if (core_stats->time == NULL)
++ goto fail_construct_time;
+
+ core_stats->event_entries = DAL_STATS_EVENT_ENTRIES_DEFAULT;
+ core_stats->events = kzalloc(
+@@ -153,13 +155,13 @@ struct mod_stats *mod_stats_create(struct dc *dc)
+ core_stats->event_entries,
+ GFP_KERNEL);
+
++ if (core_stats->events == NULL)
++ goto fail_construct_events;
++
+ } else {
+ core_stats->entries = 0;
+ }
+
+- if (core_stats->time == NULL)
+- goto fail_construct;
+-
+ /* Purposely leave index 0 unused so we don't need special logic to
+ * handle calculation cases that depend on previous flip data.
+ */
+@@ -171,10 +173,13 @@ struct mod_stats *mod_stats_create(struct dc *dc)
+
+ return &core_stats->public;
+
+-fail_construct:
++fail_construct_events:
++ kfree(core_stats->time);
++
++fail_construct_time:
+ kfree(core_stats);
+
+-fail_alloc_context:
++fail_construct:
+ return NULL;
+ }
+
+@@ -186,6 +191,9 @@ void mod_stats_destroy(struct mod_stats *mod_stats)
+ if (core_stats->time != NULL)
+ kfree(core_stats->time);
+
++ if (core_stats->events != NULL)
++ kfree(core_stats->events);
++
+ kfree(core_stats);
+ }
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4478-drm-amd-display-Clear-connector-s-edid-pointer.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4478-drm-amd-display-Clear-connector-s-edid-pointer.patch
new file mode 100644
index 00000000..9375c247
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4478-drm-amd-display-Clear-connector-s-edid-pointer.patch
@@ -0,0 +1,32 @@
+From 7d9bb56bda9b8157be16a50620821ffc2134d2c5 Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Fri, 27 Apr 2018 09:09:52 -0400
+Subject: [PATCH 4478/5725] drm/amd/display: Clear connector's edid pointer
+
+Clear connector's edid pointer on coonnector update, when unplugging
+the display.
+
+Fix poison EDID when hotplugging on previously used connector.
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Cc: stable@vger.kernel.org
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index b37005e..a796d56 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -907,6 +907,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
+ drm_mode_connector_update_edid_property(connector, NULL);
+ aconnector->num_modes = 0;
+ aconnector->dc_sink = NULL;
++ aconnector->edid = NULL;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4479-drm-amd-pp-Fix-build-warning-in-vegam.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4479-drm-amd-pp-Fix-build-warning-in-vegam.patch
new file mode 100644
index 00000000..772c082e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4479-drm-amd-pp-Fix-build-warning-in-vegam.patch
@@ -0,0 +1,48 @@
+From 1af886baae76311596b6fc693339c06fa2dc0cb8 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Thu, 17 May 2018 20:21:42 +0800
+Subject: [PATCH 4479/5725] drm/amd/pp: Fix build warning in vegam
+
+warning: missing braces around initializer [-Wmissing-braces]
+
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c | 2 +-
+ drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c | 4 +++-
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+index 9c7625c..c7927595 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+@@ -320,7 +320,7 @@ int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr,
+ pp_atomctrl_memory_clock_param_ai *mpll_param)
+ {
+ struct amdgpu_device *adev = hwmgr->adev;
+- COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3 mpll_parameters = {0};
++ COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3 mpll_parameters = {{0}, 0, 0};
+ int result;
+
+ mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+index c9a5633..a40f714 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+@@ -1366,10 +1366,12 @@ static int vegam_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+ {
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+- struct SMU75_Discrete_MCArbDramTimingTable arb_regs = {0};
++ struct SMU75_Discrete_MCArbDramTimingTable arb_regs;
+ uint32_t i, j;
+ int result = 0;
+
++ memset(&arb_regs, 0, sizeof(SMU75_Discrete_MCArbDramTimingTable));
++
+ for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) {
+ result = vegam_populate_memory_timing_parameters(hwmgr,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4480-drm-amdgpu-fix-insert-nop-for-VCN-decode-ring.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4480-drm-amdgpu-fix-insert-nop-for-VCN-decode-ring.patch
new file mode 100644
index 00000000..f3c249bc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4480-drm-amdgpu-fix-insert-nop-for-VCN-decode-ring.patch
@@ -0,0 +1,63 @@
+From 5533e9bfa2a352f2add239f6d516704183319691 Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Thu, 17 May 2018 13:31:49 -0400
+Subject: [PATCH 4480/5725] drm/amdgpu: fix insert nop for VCN decode ring
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+NO_OP register should be writen to 0
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index bc00178..c8db6ad 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -1085,14 +1085,17 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
+ return 0;
+ }
+
+-static void vcn_v1_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
++static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+ {
+- int i;
+ struct amdgpu_device *adev = ring->adev;
++ int i;
+
+- for (i = 0; i < count; i++)
+- amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
++ WARN_ON(ring->wptr % 2 || count % 2);
+
++ for (i = 0; i < count / 2; i++) {
++ amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
++ amdgpu_ring_write(ring, 0);
++ }
+ }
+
+
+@@ -1119,7 +1122,6 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
+ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_DEC,
+ .align_mask = 0xf,
+- .nop = PACKET0(0x81ff, 0),
+ .support_64bit_ptrs = false,
+ .vmhub = AMDGPU_MMHUB,
+ .get_rptr = vcn_v1_0_dec_ring_get_rptr,
+@@ -1139,7 +1141,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
+ .emit_hdp_invalidate = vcn_v1_0_dec_ring_emit_hdp_invalidate,
+ .test_ring = amdgpu_vcn_dec_ring_test_ring,
+ .test_ib = amdgpu_vcn_dec_ring_test_ib,
+- .insert_nop = vcn_v1_0_ring_insert_nop,
++ .insert_nop = vcn_v1_0_dec_ring_insert_nop,
+ .insert_start = vcn_v1_0_dec_ring_insert_start,
+ .insert_end = vcn_v1_0_dec_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4481-drm-amdgpu-fix-insert-nop-for-UVD7-ring.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4481-drm-amdgpu-fix-insert-nop-for-UVD7-ring.patch
new file mode 100644
index 00000000..f396d58a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4481-drm-amdgpu-fix-insert-nop-for-UVD7-ring.patch
@@ -0,0 +1,51 @@
+From e587fa5662d5616531487a5b9b527f8cf7316e87 Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Thu, 17 May 2018 13:37:50 -0400
+Subject: [PATCH 4481/5725] drm/amdgpu: fix insert nop for UVD7 ring
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+NO_OP register should be writen to 0
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+index a0080d7..78b5111 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+@@ -1346,12 +1346,15 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+
+ static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+ {
+- int i;
+ struct amdgpu_device *adev = ring->adev;
++ int i;
+
+- for (i = 0; i < count; i++)
+- amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
++ WARN_ON(ring->wptr % 2 || count % 2);
+
++ for (i = 0; i < count / 2; i++) {
++ amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
++ amdgpu_ring_write(ring, 0);
++ }
+ }
+
+ static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
+@@ -1731,7 +1734,6 @@ const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
+ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD,
+ .align_mask = 0xf,
+- .nop = PACKET0(0x81ff, 0),
+ .support_64bit_ptrs = false,
+ .vmhub = AMDGPU_MMHUB,
+ .get_rptr = uvd_v7_0_ring_get_rptr,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4482-drm-amdgpu-fix-insert-nop-for-UVD6-ring.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4482-drm-amdgpu-fix-insert-nop-for-UVD6-ring.patch
new file mode 100644
index 00000000..f9cdbae5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4482-drm-amdgpu-fix-insert-nop-for-UVD6-ring.patch
@@ -0,0 +1,60 @@
+From 809c95b389309e8b558b7b42271cb1eb1cb52b50 Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Thu, 17 May 2018 13:44:28 -0400
+Subject: [PATCH 4482/5725] drm/amdgpu: fix insert nop for UVD6 ring
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+NO_OP register should be writen to 0
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 15 +++++++++++++--
+ 1 file changed, 13 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+index 8ce51946..37bb32b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+@@ -1116,6 +1116,18 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+ amdgpu_ring_write(ring, 0xE);
+ }
+
++static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
++{
++ int i;
++
++ WARN_ON(ring->wptr % 2 || count % 2);
++
++ for (i = 0; i < count / 2; i++) {
++ amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
++ amdgpu_ring_write(ring, 0);
++ }
++}
++
+ static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+ {
+ uint32_t seq = ring->fence_drv.sync_seq;
+@@ -1548,7 +1560,6 @@ static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
+ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD,
+ .align_mask = 0xf,
+- .nop = PACKET0(mmUVD_NO_OP, 0),
+ .support_64bit_ptrs = false,
+ .get_rptr = uvd_v6_0_ring_get_rptr,
+ .get_wptr = uvd_v6_0_ring_get_wptr,
+@@ -1567,7 +1578,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
+ .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
+ .test_ring = uvd_v6_0_ring_test_ring,
+ .test_ib = amdgpu_uvd_ring_test_ib,
+- .insert_nop = amdgpu_ring_insert_nop,
++ .insert_nop = uvd_v6_0_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_uvd_ring_begin_use,
+ .end_use = amdgpu_uvd_ring_end_use,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4483-drm-amdgpu-fix-insert-nop-for-UVD5-ring.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4483-drm-amdgpu-fix-insert-nop-for-UVD5-ring.patch
new file mode 100644
index 00000000..ce0a1b7c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4483-drm-amdgpu-fix-insert-nop-for-UVD5-ring.patch
@@ -0,0 +1,60 @@
+From 08fee9e5fc38ce4a48413a2716d689510b91a8d1 Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Thu, 17 May 2018 13:52:00 -0400
+Subject: [PATCH 4483/5725] drm/amdgpu: fix insert nop for UVD5 ring
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+NO_OP register should be writen to 0
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 15 +++++++++++++--
+ 1 file changed, 13 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+index 01810f2..693944f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+@@ -567,6 +567,18 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
+ amdgpu_ring_write(ring, ib->length_dw);
+ }
+
++static void uvd_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
++{
++ int i;
++
++ WARN_ON(ring->wptr % 2 || count % 2);
++
++ for (i = 0; i < count / 2; i++) {
++ amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
++ amdgpu_ring_write(ring, 0);
++ }
++}
++
+ static bool uvd_v5_0_is_idle(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+@@ -867,7 +879,6 @@ static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
+ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD,
+ .align_mask = 0xf,
+- .nop = PACKET0(mmUVD_NO_OP, 0),
+ .support_64bit_ptrs = false,
+ .get_rptr = uvd_v5_0_ring_get_rptr,
+ .get_wptr = uvd_v5_0_ring_get_wptr,
+@@ -884,7 +895,7 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
+ .emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate,
+ .test_ring = uvd_v5_0_ring_test_ring,
+ .test_ib = amdgpu_uvd_ring_test_ib,
+- .insert_nop = amdgpu_ring_insert_nop,
++ .insert_nop = uvd_v5_0_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_uvd_ring_begin_use,
+ .end_use = amdgpu_uvd_ring_end_use,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4484-drm-amdgpu-fix-insert-nop-for-UVD4.2-ring.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4484-drm-amdgpu-fix-insert-nop-for-UVD4.2-ring.patch
new file mode 100644
index 00000000..eac551e7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4484-drm-amdgpu-fix-insert-nop-for-UVD4.2-ring.patch
@@ -0,0 +1,60 @@
+From f2dfc71461d4a2b779f56ccfcf295bad73cc1e41 Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Thu, 17 May 2018 13:54:21 -0400
+Subject: [PATCH 4484/5725] drm/amdgpu: fix insert nop for UVD4.2 ring
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+NO_OP register should be writen to 0
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | 15 +++++++++++++--
+ 1 file changed, 13 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+index 4ee0c10..d2f6caa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+@@ -550,6 +550,18 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
+ amdgpu_ring_write(ring, ib->length_dw);
+ }
+
++static void uvd_v4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
++{
++ int i;
++
++ WARN_ON(ring->wptr % 2 || count % 2);
++
++ for (i = 0; i < count / 2; i++) {
++ amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
++ amdgpu_ring_write(ring, 0);
++ }
++}
++
+ /**
+ * uvd_v4_2_mc_resume - memory controller programming
+ *
+@@ -759,7 +771,6 @@ static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
+ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD,
+ .align_mask = 0xf,
+- .nop = PACKET0(mmUVD_NO_OP, 0),
+ .support_64bit_ptrs = false,
+ .get_rptr = uvd_v4_2_ring_get_rptr,
+ .get_wptr = uvd_v4_2_ring_get_wptr,
+@@ -776,7 +787,7 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
+ .emit_hdp_invalidate = uvd_v4_2_ring_emit_hdp_invalidate,
+ .test_ring = uvd_v4_2_ring_test_ring,
+ .test_ib = amdgpu_uvd_ring_test_ib,
+- .insert_nop = amdgpu_ring_insert_nop,
++ .insert_nop = uvd_v4_2_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_uvd_ring_begin_use,
+ .end_use = amdgpu_uvd_ring_end_use,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4485-Remove-calls-to-suspend-resume-atomic-helpers-from-a.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4485-Remove-calls-to-suspend-resume-atomic-helpers-from-a.patch
new file mode 100644
index 00000000..0e762786
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4485-Remove-calls-to-suspend-resume-atomic-helpers-from-a.patch
@@ -0,0 +1,55 @@
+From 5d4c992cd144c786782ba7bcee5382655e815a28 Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Thu, 17 May 2018 11:18:34 -0400
+Subject: [PATCH 4485/5725] Remove calls to suspend/resume atomic helpers from
+ amdgpu_device_gpu_recover.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+First of all it's already being called from the display code from amd_ip_funcs.suspend/resume hooks.
+Second of all, the place in amdgpu_device_gpu_recover it's being called is wrong for GPU stalls since
+it is called BEFORE we cancel and force completion of all in flight jobs which were not yet processed.
+So, as Bas pointed in the ticket we will try to wait for fence in amdgpu_pm_compute_clocks but the pipe
+is hanged so we end up in deadlock.
+
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=106500
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 10 ++--------
+ 1 file changed, 2 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 8267990..0adc774 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3271,9 +3271,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+
+ /* block TTM */
+ resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+- /* store modesetting */
+- if (amdgpu_device_has_dc_support(adev))
+- state = drm_atomic_helper_suspend(adev->ddev);
+
+ /* block all schedulers and reset given job's ring */
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+@@ -3313,11 +3310,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+
+ kthread_unpark(adev->rings[i]->sched.thread);
+ }
+-
+- if (amdgpu_device_has_dc_support(adev)) {
+- if (drm_atomic_helper_resume(adev->ddev, state))
+- dev_info(adev->dev, "drm resume failed:%d\n", r);
+- } else {
++
++ if (!amdgpu_device_has_dc_support(adev)) {
+ drm_helper_resume_force_mode(adev->ddev);
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4486-Revert-drm-amdgpu-vg20-Restruct-uvd.idle_work-to-sup.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4486-Revert-drm-amdgpu-vg20-Restruct-uvd.idle_work-to-sup.patch
new file mode 100644
index 00000000..f1dd533f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4486-Revert-drm-amdgpu-vg20-Restruct-uvd.idle_work-to-sup.patch
@@ -0,0 +1,127 @@
+From df8da20dd800ad6b5cc1c6016822fcefec44882c Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Wed, 9 Jan 2019 19:34:04 +0530
+Subject: [PATCH 4486/5725] Revert "drm/amdgpu/vg20:Restruct uvd.idle_work to
+ support multiple instance (v2)"
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This reverts commit 4f7b8507bb4ba19f994e0d72eedd6029961be402.
+
+We don't need separate idle work handles for UVD 7.2. Both instances are
+driven by the same clock and power.
+
+Reviewed-by: James Zhu <James.Zhu@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 17 ++++++++---------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | 7 +------
+ 2 files changed, 9 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index 4ab11bb..d347d63 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -130,6 +130,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ unsigned version_major, version_minor, family_id;
+ int i, j, r;
+
++ INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler);
++
+ switch (adev->asic_type) {
+ #ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_BONAIRE:
+@@ -236,8 +238,6 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
+
+ for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+- adev->uvd.inst[j].delayed_work.ip_instance = j;
+- INIT_DELAYED_WORK(&adev->uvd.inst[j].delayed_work.idle_work, amdgpu_uvd_idle_work_handler);
+
+ r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
+@@ -318,7 +318,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
+ if (adev->uvd.inst[j].vcpu_bo == NULL)
+ continue;
+
+- cancel_delayed_work_sync(&adev->uvd.inst[j].delayed_work.idle_work);
++ cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work);
+
+ /* only valid for physical mode */
+ if (adev->asic_type < CHIP_POLARIS10) {
+@@ -1146,10 +1146,9 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+
+ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
+ {
+- struct amdgpu_delayed_work *my_work = (struct amdgpu_delayed_work *)work;
+ struct amdgpu_device *adev =
+- container_of(work, struct amdgpu_device, uvd.inst[my_work->ip_instance].delayed_work.idle_work.work);
+- unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.inst[my_work->ip_instance].ring);
++ container_of(work, struct amdgpu_device, uvd.inst->idle_work.work);
++ unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.inst->ring);
+
+ if (fences == 0) {
+ if (adev->pm.dpm_enabled) {
+@@ -1163,7 +1162,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
+ AMD_CG_STATE_GATE);
+ }
+ } else {
+- schedule_delayed_work(&adev->uvd.inst[my_work->ip_instance].delayed_work.idle_work, UVD_IDLE_TIMEOUT);
++ schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
+ }
+ }
+
+@@ -1175,7 +1174,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+- set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst[ring->me].delayed_work.idle_work);
++ set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work);
+ if (set_clocks) {
+ if (adev->pm.dpm_enabled) {
+ amdgpu_dpm_enable_uvd(adev, true);
+@@ -1192,7 +1191,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
+ void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
+ {
+ if (!amdgpu_sriov_vf(ring->adev))
+- schedule_delayed_work(&ring->adev->uvd.inst[ring->me].delayed_work.idle_work, UVD_IDLE_TIMEOUT);
++ schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+index 7801eb8..b1579fb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+@@ -37,11 +37,6 @@
+ (AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(((const struct common_firmware_header *)(adev)->uvd.fw->data)->ucode_size_bytes) + \
+ 8) - AMDGPU_UVD_FIRMWARE_OFFSET)
+
+-struct amdgpu_delayed_work{
+- struct delayed_work idle_work;
+- unsigned ip_instance;
+-};
+-
+ struct amdgpu_uvd_inst {
+ struct amdgpu_bo *vcpu_bo;
+ void *cpu_addr;
+@@ -49,12 +44,12 @@ struct amdgpu_uvd_inst {
+ void *saved_bo;
+ atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
+ struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
++ struct delayed_work idle_work;
+ struct amdgpu_ring ring;
+ struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
+ struct amdgpu_irq_src irq;
+ struct drm_sched_entity entity;
+ struct drm_sched_entity entity_enc;
+- struct amdgpu_delayed_work delayed_work;
+ uint32_t srbm_soft_reset;
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4487-drm-amdgpu-count-fences-from-all-uvd-instances-in-id.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4487-drm-amdgpu-count-fences-from-all-uvd-instances-in-id.patch
new file mode 100644
index 00000000..39a2cf1e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4487-drm-amdgpu-count-fences-from-all-uvd-instances-in-id.patch
@@ -0,0 +1,39 @@
+From 9a88796fd12958ccb4fa811dad3eaf8ffc9461cf Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 17 May 2018 12:33:34 -0500
+Subject: [PATCH 4487/5725] drm/amdgpu: count fences from all uvd instances in
+ idle handler
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Current multi-UVD hardware uses a single clock and power source
+so handle all instances in the idle handler.
+
+Reviewed-by: James Zhu <James.Zhu@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index d347d63..ee80a90 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -1148,7 +1148,11 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
+ {
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device, uvd.inst->idle_work.work);
+- unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.inst->ring);
++ unsigned fences = 0, i;
++
++ for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
++ fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
++ }
+
+ if (fences == 0) {
+ if (adev->pm.dpm_enabled) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4488-drm-amdgpu-Take-uvd-encode-rings-into-account-in-idl.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4488-drm-amdgpu-Take-uvd-encode-rings-into-account-in-idl.patch
new file mode 100644
index 00000000..89bf1e3a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4488-drm-amdgpu-Take-uvd-encode-rings-into-account-in-idl.patch
@@ -0,0 +1,42 @@
+From a275abdf042926fbaac0084ac64a723af8243864 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 17 May 2018 12:45:52 -0500
+Subject: [PATCH 4488/5725] drm/amdgpu: Take uvd encode rings into account in
+ idle work (v2)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Take the encode rings into account in the idle work handler.
+
+v2: fix typo: s/num_uvd_inst/num_enc_rings/
+
+Reviewed-by: James Zhu <James.Zhu@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index ee80a90..aadc494 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -1148,10 +1148,13 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
+ {
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device, uvd.inst->idle_work.work);
+- unsigned fences = 0, i;
++ unsigned fences = 0, i, j;
+
+ for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
++ for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
++ fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
++ }
+ }
+
+ if (fences == 0) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4489-drm-amdgpu-Take-vcn-encode-rings-into-account-in-idl.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4489-drm-amdgpu-Take-vcn-encode-rings-into-account-in-idl.patch
new file mode 100644
index 00000000..e7f17bbf
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4489-drm-amdgpu-Take-vcn-encode-rings-into-account-in-idl.patch
@@ -0,0 +1,36 @@
+From 06219f25742fa32e4eb5822e0eaa1752804ebc42 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 17 May 2018 13:03:05 -0500
+Subject: [PATCH 4489/5725] drm/amdgpu: Take vcn encode rings into account in
+ idle work
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Take the encode rings into account in the idle work handler.
+
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index be15303..ab4aa04 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -208,6 +208,11 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device, vcn.idle_work.work);
+ unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
++ unsigned i;
++
++ for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
++ fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
++ }
+
+ if (fences == 0) {
+ if (adev->pm.dpm_enabled) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4490-drm-amdkfd-Fix-kernel-queue-64-bit-doorbell-offset-c.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4490-drm-amdkfd-Fix-kernel-queue-64-bit-doorbell-offset-c.patch
new file mode 100644
index 00000000..1ea2f7b0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4490-drm-amdkfd-Fix-kernel-queue-64-bit-doorbell-offset-c.patch
@@ -0,0 +1,45 @@
+From 1961ae561bd8c1588756f1f97a2c95bbe31b13ec Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Mon, 7 May 2018 16:50:26 -0400
+Subject: [PATCH 4490/5725] drm/amdkfd: Fix kernel queue 64 bit doorbell offset
+ calculation
+
+The bitmap index calculation should reverse the logic used on allocation
+so it will clear the same bit used on allocation
+
+Change-Id: Idae2b7df4eef7f51f61294223e883916ded619ed
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+index c3744d8..ebe79bf 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+@@ -188,9 +188,9 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
+ *doorbell_off = kfd->doorbell_id_offset + inx;
+
+ pr_debug("Get kernel queue doorbell\n"
+- " doorbell offset == 0x%08X\n"
+- " kernel address == %p\n",
+- *doorbell_off, (kfd->doorbell_kernel_ptr + inx));
++ " doorbell offset == 0x%08X\n"
++ " doorbell index == 0x%x\n",
++ *doorbell_off, inx);
+
+ return kfd->doorbell_kernel_ptr + inx;
+ }
+@@ -199,7 +199,8 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
+ {
+ unsigned int inx;
+
+- inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
++ inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr)
++ * sizeof(u32) / kfd->device_info->doorbell_size;
+
+ mutex_lock(&kfd->doorbell_mutex);
+ __clear_bit(inx, kfd->doorbell_available_index);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4491-drm-amdgpu-Avoid-invalidate-tlbs-when-gpu-is-on-rese.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4491-drm-amdgpu-Avoid-invalidate-tlbs-when-gpu-is-on-rese.patch
new file mode 100644
index 00000000..2e56d37b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4491-drm-amdgpu-Avoid-invalidate-tlbs-when-gpu-is-on-rese.patch
@@ -0,0 +1,61 @@
+From 1342dd9b236b14c296e97c37da02a60d18e153dd Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Tue, 8 May 2018 11:48:06 -0400
+Subject: [PATCH 4491/5725] drm/amdgpu: Avoid invalidate tlbs when gpu is on
+ reset
+
+Change-Id: Iff018a8b01c9015f2b1e1368d1b1d515e62af6d5
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 3 +++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 3 +++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 5 ++++-
+ 3 files changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+index b04471b..f833437 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+@@ -822,6 +822,9 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
+ int vmid;
+ unsigned int tmp;
+
++ if (adev->in_gpu_reset)
++ return -EIO;
++
+ for (vmid = 0; vmid < 16; vmid++) {
+ if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
+ continue;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+index d723ae2..0c7c248 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+@@ -791,6 +791,9 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
+ int vmid;
+ unsigned int tmp;
+
++ if (adev->in_gpu_reset)
++ return -EIO;
++
+ #ifdef V8_SUPPORT_IT_OFFICIAL
+ struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+index a81e301..7af8ee6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+@@ -989,7 +989,10 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
+ int vmid;
+ struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
+
+- if (ring->ready && (!adev->in_gpu_reset))
++ if (adev->in_gpu_reset)
++ return -EIO;
++
++ if (ring->ready)
+ return invalidate_tlbs_with_kiq(adev, pasid);
+
+ for (vmid = 0; vmid < 16; vmid++) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4492-drm-amdkfd-Fix-race-between-scheduler-and-context-re.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4492-drm-amdkfd-Fix-race-between-scheduler-and-context-re.patch
new file mode 100644
index 00000000..605b4b97
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4492-drm-amdkfd-Fix-race-between-scheduler-and-context-re.patch
@@ -0,0 +1,665 @@
+From 4cc5dd11730a05c2ba5f0422cf721e6e7af361cf Mon Sep 17 00:00:00 2001
+From: Jay Cornwall <Jay.Cornwall@amd.com>
+Date: Fri, 11 May 2018 10:58:12 -0500
+Subject: [PATCH 4492/5725] drm/amdkfd: Fix race between scheduler and context
+ restore
+
+The scheduler may raise SQ_WAVE_STATUS.SPI_PRIO via SQ_CMD before
+context restore has completed. Restoring SPI_PRIO=0 after this point
+may cause context save to fail as the lower priority wavefronts
+are not selected for execution among spin-waiting wavefronts.
+
+Leave SPI_PRIO at its SPI-initialized or scheduler-raised value.
+
+v2: Also fix race with exception handler
+
+Change-Id: I82ad581824d956587f4110cdebc39e06c438d62f
+Signed-off-by: Jay Cornwall <Jay.Cornwall@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h | 458 +++++++++++----------
+ .../gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm | 18 +-
+ .../gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 16 +-
+ 3 files changed, 262 insertions(+), 230 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+index f68aef0..3621efb 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+@@ -21,18 +21,21 @@
+ */
+
+ static const uint32_t cwsr_trap_gfx8_hex[] = {
+- 0xbf820001, 0xbf820125,
++ 0xbf820001, 0xbf82012b,
+ 0xb8f4f802, 0x89748674,
+ 0xb8f5f803, 0x8675ff75,
+- 0x00000400, 0xbf850011,
++ 0x00000400, 0xbf850017,
+ 0xc00a1e37, 0x00000000,
+ 0xbf8c007f, 0x87777978,
+- 0xbf840002, 0xb974f802,
+- 0xbe801d78, 0xb8f5f803,
+- 0x8675ff75, 0x000001ff,
+- 0xbf850002, 0x80708470,
+- 0x82718071, 0x8671ff71,
+- 0x0000ffff, 0xb974f802,
++ 0xbf840005, 0x8f728374,
++ 0xb972e0c2, 0xbf800002,
++ 0xb9740002, 0xbe801d78,
++ 0xb8f5f803, 0x8675ff75,
++ 0x000001ff, 0xbf850002,
++ 0x80708470, 0x82718071,
++ 0x8671ff71, 0x0000ffff,
++ 0x8f728374, 0xb972e0c2,
++ 0xbf800002, 0xb9740002,
+ 0xbe801f70, 0xb8f5f803,
+ 0x8675ff75, 0x00000100,
+ 0xbf840006, 0xbefa0080,
+@@ -168,7 +171,7 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
+ 0x807c847c, 0x806eff6e,
+ 0x00000400, 0xbf0a757c,
+ 0xbf85ffef, 0xbf9c0000,
+- 0xbf8200ca, 0xbef8007e,
++ 0xbf8200cd, 0xbef8007e,
+ 0x8679ff7f, 0x0000ffff,
+ 0x8779ff79, 0x00040000,
+ 0xbefa0080, 0xbefb00ff,
+@@ -268,16 +271,18 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
+ 0x8f739773, 0xb976f807,
+ 0x8671ff71, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+- 0xb974f802, 0xbf8a0000,
+- 0x95807370, 0xbf810000,
++ 0x8f768374, 0xb976e0c2,
++ 0xbf800002, 0xb9740002,
++ 0xbf8a0000, 0x95807370,
++ 0xbf810000, 0x00000000,
+ };
+
+
+ static const uint32_t cwsr_trap_gfx9_hex[] = {
+- 0xbf820001, 0xbf82015a,
++ 0xbf820001, 0xbf82015d,
+ 0xb8f8f802, 0x89788678,
+ 0xb8f1f803, 0x866eff71,
+- 0x00000400, 0xbf850034,
++ 0x00000400, 0xbf850037,
+ 0x866eff71, 0x00000800,
+ 0xbf850003, 0x866eff71,
+ 0x00000100, 0xbf840008,
+@@ -303,258 +308,261 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
+ 0x8f6e8b77, 0x866eff6e,
+ 0x001f8000, 0xb96ef807,
+ 0x86fe7e7e, 0x86ea6a6a,
+- 0xb978f802, 0xbe801f6c,
+- 0x866dff6d, 0x0000ffff,
+- 0xbef00080, 0xb9700283,
+- 0xb8f02407, 0x8e709c70,
+- 0x876d706d, 0xb8f003c7,
+- 0x8e709b70, 0x876d706d,
+- 0xb8f0f807, 0x8670ff70,
+- 0x00007fff, 0xb970f807,
+- 0xbeee007e, 0xbeef007f,
+- 0xbefe0180, 0xbf900004,
+- 0x87708478, 0xb970f802,
+- 0xbf8e0002, 0xbf88fffe,
+- 0xb8f02a05, 0x80708170,
+- 0x8e708a70, 0xb8f11605,
+- 0x80718171, 0x8e718671,
+- 0x80707170, 0x80707e70,
+- 0x8271807f, 0x8671ff71,
+- 0x0000ffff, 0xc0471cb8,
+- 0x00000040, 0xbf8cc07f,
+- 0xc04b1d38, 0x00000048,
+- 0xbf8cc07f, 0xc0431e78,
+- 0x00000058, 0xbf8cc07f,
+- 0xc0471eb8, 0x0000005c,
+- 0xbf8cc07f, 0xbef4007e,
+- 0x8675ff7f, 0x0000ffff,
+- 0x8775ff75, 0x00040000,
+- 0xbef60080, 0xbef700ff,
+- 0x00807fac, 0x8670ff7f,
+- 0x08000000, 0x8f708370,
+- 0x87777077, 0x8670ff7f,
+- 0x70000000, 0x8f708170,
+- 0x87777077, 0xbefb007c,
+- 0xbefa0080, 0xb8fa2a05,
+- 0x807a817a, 0x8e7a8a7a,
+- 0xb8f01605, 0x80708170,
+- 0x8e708670, 0x807a707a,
+- 0xbef60084, 0xbef600ff,
+- 0x01000000, 0xbefe007c,
+- 0xbefc007a, 0xc0611efa,
+- 0x0000007c, 0xbf8cc07f,
+- 0x807a847a, 0xbefc007e,
++ 0x8f6e8378, 0xb96ee0c2,
++ 0xbf800002, 0xb9780002,
++ 0xbe801f6c, 0x866dff6d,
++ 0x0000ffff, 0xbef00080,
++ 0xb9700283, 0xb8f02407,
++ 0x8e709c70, 0x876d706d,
++ 0xb8f003c7, 0x8e709b70,
++ 0x876d706d, 0xb8f0f807,
++ 0x8670ff70, 0x00007fff,
++ 0xb970f807, 0xbeee007e,
++ 0xbeef007f, 0xbefe0180,
++ 0xbf900004, 0x87708478,
++ 0xb970f802, 0xbf8e0002,
++ 0xbf88fffe, 0xb8f02a05,
++ 0x80708170, 0x8e708a70,
++ 0xb8f11605, 0x80718171,
++ 0x8e718671, 0x80707170,
++ 0x80707e70, 0x8271807f,
++ 0x8671ff71, 0x0000ffff,
++ 0xc0471cb8, 0x00000040,
++ 0xbf8cc07f, 0xc04b1d38,
++ 0x00000048, 0xbf8cc07f,
++ 0xc0431e78, 0x00000058,
++ 0xbf8cc07f, 0xc0471eb8,
++ 0x0000005c, 0xbf8cc07f,
++ 0xbef4007e, 0x8675ff7f,
++ 0x0000ffff, 0x8775ff75,
++ 0x00040000, 0xbef60080,
++ 0xbef700ff, 0x00807fac,
++ 0x8670ff7f, 0x08000000,
++ 0x8f708370, 0x87777077,
++ 0x8670ff7f, 0x70000000,
++ 0x8f708170, 0x87777077,
++ 0xbefb007c, 0xbefa0080,
++ 0xb8fa2a05, 0x807a817a,
++ 0x8e7a8a7a, 0xb8f01605,
++ 0x80708170, 0x8e708670,
++ 0x807a707a, 0xbef60084,
++ 0xbef600ff, 0x01000000,
+ 0xbefe007c, 0xbefc007a,
+- 0xc0611b3a, 0x0000007c,
++ 0xc0611efa, 0x0000007c,
+ 0xbf8cc07f, 0x807a847a,
+ 0xbefc007e, 0xbefe007c,
+- 0xbefc007a, 0xc0611b7a,
++ 0xbefc007a, 0xc0611b3a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x807a847a, 0xbefc007e,
+ 0xbefe007c, 0xbefc007a,
+- 0xc0611bba, 0x0000007c,
++ 0xc0611b7a, 0x0000007c,
+ 0xbf8cc07f, 0x807a847a,
+ 0xbefc007e, 0xbefe007c,
+- 0xbefc007a, 0xc0611bfa,
++ 0xbefc007a, 0xc0611bba,
+ 0x0000007c, 0xbf8cc07f,
+ 0x807a847a, 0xbefc007e,
+ 0xbefe007c, 0xbefc007a,
+- 0xc0611e3a, 0x0000007c,
+- 0xbf8cc07f, 0x807a847a,
+- 0xbefc007e, 0xb8f1f803,
+- 0xbefe007c, 0xbefc007a,
+- 0xc0611c7a, 0x0000007c,
++ 0xc0611bfa, 0x0000007c,
+ 0xbf8cc07f, 0x807a847a,
+ 0xbefc007e, 0xbefe007c,
+- 0xbefc007a, 0xc0611a3a,
++ 0xbefc007a, 0xc0611e3a,
++ 0x0000007c, 0xbf8cc07f,
++ 0x807a847a, 0xbefc007e,
++ 0xb8f1f803, 0xbefe007c,
++ 0xbefc007a, 0xc0611c7a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x807a847a, 0xbefc007e,
+ 0xbefe007c, 0xbefc007a,
+- 0xc0611a7a, 0x0000007c,
+- 0xbf8cc07f, 0x807a847a,
+- 0xbefc007e, 0xb8fbf801,
+- 0xbefe007c, 0xbefc007a,
+- 0xc0611efa, 0x0000007c,
++ 0xc0611a3a, 0x0000007c,
+ 0xbf8cc07f, 0x807a847a,
+- 0xbefc007e, 0x8670ff7f,
+- 0x04000000, 0xbeef0080,
+- 0x876f6f70, 0xb8fa2a05,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc007a, 0xc0611a7a,
++ 0x0000007c, 0xbf8cc07f,
++ 0x807a847a, 0xbefc007e,
++ 0xb8fbf801, 0xbefe007c,
++ 0xbefc007a, 0xc0611efa,
++ 0x0000007c, 0xbf8cc07f,
++ 0x807a847a, 0xbefc007e,
++ 0x8670ff7f, 0x04000000,
++ 0xbeef0080, 0x876f6f70,
++ 0xb8fa2a05, 0x807a817a,
++ 0x8e7a8a7a, 0xb8f11605,
++ 0x80718171, 0x8e718471,
++ 0x8e768271, 0xbef600ff,
++ 0x01000000, 0xbef20174,
++ 0x80747a74, 0x82758075,
++ 0xbefc0080, 0xbf800000,
++ 0xbe802b00, 0xbe822b02,
++ 0xbe842b04, 0xbe862b06,
++ 0xbe882b08, 0xbe8a2b0a,
++ 0xbe8c2b0c, 0xbe8e2b0e,
++ 0xc06b003a, 0x00000000,
++ 0xbf8cc07f, 0xc06b013a,
++ 0x00000010, 0xbf8cc07f,
++ 0xc06b023a, 0x00000020,
++ 0xbf8cc07f, 0xc06b033a,
++ 0x00000030, 0xbf8cc07f,
++ 0x8074c074, 0x82758075,
++ 0x807c907c, 0xbf0a717c,
++ 0xbf85ffe7, 0xbef40172,
++ 0xbefa0080, 0xbefe00c1,
++ 0xbeff00c1, 0xbee80080,
++ 0xbee90080, 0xbef600ff,
++ 0x01000000, 0xe0724000,
++ 0x7a1d0000, 0xe0724100,
++ 0x7a1d0100, 0xe0724200,
++ 0x7a1d0200, 0xe0724300,
++ 0x7a1d0300, 0xbefe00c1,
++ 0xbeff00c1, 0xb8f14306,
++ 0x8671c171, 0xbf84002c,
++ 0xbf8a0000, 0x8670ff6f,
++ 0x04000000, 0xbf840028,
++ 0x8e718671, 0x8e718271,
++ 0xbef60071, 0xb8fa2a05,
+ 0x807a817a, 0x8e7a8a7a,
+- 0xb8f11605, 0x80718171,
+- 0x8e718471, 0x8e768271,
++ 0xb8f01605, 0x80708170,
++ 0x8e708670, 0x807a707a,
++ 0x807aff7a, 0x00000080,
+ 0xbef600ff, 0x01000000,
+- 0xbef20174, 0x80747a74,
+- 0x82758075, 0xbefc0080,
+- 0xbf800000, 0xbe802b00,
+- 0xbe822b02, 0xbe842b04,
+- 0xbe862b06, 0xbe882b08,
+- 0xbe8a2b0a, 0xbe8c2b0c,
+- 0xbe8e2b0e, 0xc06b003a,
+- 0x00000000, 0xbf8cc07f,
+- 0xc06b013a, 0x00000010,
+- 0xbf8cc07f, 0xc06b023a,
+- 0x00000020, 0xbf8cc07f,
+- 0xc06b033a, 0x00000030,
+- 0xbf8cc07f, 0x8074c074,
+- 0x82758075, 0x807c907c,
+- 0xbf0a717c, 0xbf85ffe7,
+- 0xbef40172, 0xbefa0080,
++ 0xbefc0080, 0xd28c0002,
++ 0x000100c1, 0xd28d0003,
++ 0x000204c1, 0xd1060002,
++ 0x00011103, 0x7e0602ff,
++ 0x00000200, 0xbefc00ff,
++ 0x00010000, 0xbe800077,
++ 0x8677ff77, 0xff7fffff,
++ 0x8777ff77, 0x00058000,
++ 0xd8ec0000, 0x00000002,
++ 0xbf8cc07f, 0xe0765000,
++ 0x7a1d0002, 0x68040702,
++ 0xd0c9006a, 0x0000e302,
++ 0xbf87fff7, 0xbef70000,
++ 0xbefa00ff, 0x00000400,
+ 0xbefe00c1, 0xbeff00c1,
+- 0xbee80080, 0xbee90080,
++ 0xb8f12a05, 0x80718171,
++ 0x8e718271, 0x8e768871,
+ 0xbef600ff, 0x01000000,
++ 0xbefc0084, 0xbf0a717c,
++ 0xbf840015, 0xbf11017c,
++ 0x8071ff71, 0x00001000,
++ 0x7e000300, 0x7e020301,
++ 0x7e040302, 0x7e060303,
+ 0xe0724000, 0x7a1d0000,
+ 0xe0724100, 0x7a1d0100,
+ 0xe0724200, 0x7a1d0200,
+ 0xe0724300, 0x7a1d0300,
++ 0x807c847c, 0x807aff7a,
++ 0x00000400, 0xbf0a717c,
++ 0xbf85ffef, 0xbf9c0000,
++ 0xbf8200dc, 0xbef4007e,
++ 0x8675ff7f, 0x0000ffff,
++ 0x8775ff75, 0x00040000,
++ 0xbef60080, 0xbef700ff,
++ 0x00807fac, 0x866eff7f,
++ 0x08000000, 0x8f6e836e,
++ 0x87776e77, 0x866eff7f,
++ 0x70000000, 0x8f6e816e,
++ 0x87776e77, 0x866eff7f,
++ 0x04000000, 0xbf84001e,
+ 0xbefe00c1, 0xbeff00c1,
+- 0xb8f14306, 0x8671c171,
+- 0xbf84002c, 0xbf8a0000,
+- 0x8670ff6f, 0x04000000,
+- 0xbf840028, 0x8e718671,
+- 0x8e718271, 0xbef60071,
+- 0xb8fa2a05, 0x807a817a,
+- 0x8e7a8a7a, 0xb8f01605,
+- 0x80708170, 0x8e708670,
+- 0x807a707a, 0x807aff7a,
++ 0xb8ef4306, 0x866fc16f,
++ 0xbf840019, 0x8e6f866f,
++ 0x8e6f826f, 0xbef6006f,
++ 0xb8f82a05, 0x80788178,
++ 0x8e788a78, 0xb8ee1605,
++ 0x806e816e, 0x8e6e866e,
++ 0x80786e78, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+- 0xd28c0002, 0x000100c1,
+- 0xd28d0003, 0x000204c1,
+- 0xd1060002, 0x00011103,
+- 0x7e0602ff, 0x00000200,
+- 0xbefc00ff, 0x00010000,
+- 0xbe800077, 0x8677ff77,
+- 0xff7fffff, 0x8777ff77,
+- 0x00058000, 0xd8ec0000,
+- 0x00000002, 0xbf8cc07f,
+- 0xe0765000, 0x7a1d0002,
+- 0x68040702, 0xd0c9006a,
+- 0x0000e302, 0xbf87fff7,
+- 0xbef70000, 0xbefa00ff,
+- 0x00000400, 0xbefe00c1,
+- 0xbeff00c1, 0xb8f12a05,
+- 0x80718171, 0x8e718271,
+- 0x8e768871, 0xbef600ff,
+- 0x01000000, 0xbefc0084,
+- 0xbf0a717c, 0xbf840015,
+- 0xbf11017c, 0x8071ff71,
+- 0x00001000, 0x7e000300,
++ 0xe0510000, 0x781d0000,
++ 0xe0510100, 0x781d0000,
++ 0x807cff7c, 0x00000200,
++ 0x8078ff78, 0x00000200,
++ 0xbf0a6f7c, 0xbf85fff6,
++ 0xbef80080, 0xbefe00c1,
++ 0xbeff00c1, 0xb8ef2a05,
++ 0x806f816f, 0x8e6f826f,
++ 0x8e76886f, 0xbef600ff,
++ 0x01000000, 0xbeee0078,
++ 0x8078ff78, 0x00000400,
++ 0xbefc0084, 0xbf11087c,
++ 0x806fff6f, 0x00008000,
++ 0xe0524000, 0x781d0000,
++ 0xe0524100, 0x781d0100,
++ 0xe0524200, 0x781d0200,
++ 0xe0524300, 0x781d0300,
++ 0xbf8c0f70, 0x7e000300,
+ 0x7e020301, 0x7e040302,
+- 0x7e060303, 0xe0724000,
+- 0x7a1d0000, 0xe0724100,
+- 0x7a1d0100, 0xe0724200,
+- 0x7a1d0200, 0xe0724300,
+- 0x7a1d0300, 0x807c847c,
+- 0x807aff7a, 0x00000400,
+- 0xbf0a717c, 0xbf85ffef,
+- 0xbf9c0000, 0xbf8200d9,
+- 0xbef4007e, 0x8675ff7f,
+- 0x0000ffff, 0x8775ff75,
+- 0x00040000, 0xbef60080,
+- 0xbef700ff, 0x00807fac,
+- 0x866eff7f, 0x08000000,
+- 0x8f6e836e, 0x87776e77,
+- 0x866eff7f, 0x70000000,
+- 0x8f6e816e, 0x87776e77,
+- 0x866eff7f, 0x04000000,
+- 0xbf84001e, 0xbefe00c1,
+- 0xbeff00c1, 0xb8ef4306,
+- 0x866fc16f, 0xbf840019,
+- 0x8e6f866f, 0x8e6f826f,
+- 0xbef6006f, 0xb8f82a05,
++ 0x7e060303, 0x807c847c,
++ 0x8078ff78, 0x00000400,
++ 0xbf0a6f7c, 0xbf85ffee,
++ 0xbf9c0000, 0xe0524000,
++ 0x6e1d0000, 0xe0524100,
++ 0x6e1d0100, 0xe0524200,
++ 0x6e1d0200, 0xe0524300,
++ 0x6e1d0300, 0xb8f82a05,
+ 0x80788178, 0x8e788a78,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+- 0x8078ff78, 0x00000080,
+- 0xbef600ff, 0x01000000,
+- 0xbefc0080, 0xe0510000,
+- 0x781d0000, 0xe0510100,
+- 0x781d0000, 0x807cff7c,
+- 0x00000200, 0x8078ff78,
+- 0x00000200, 0xbf0a6f7c,
+- 0xbf85fff6, 0xbef80080,
+- 0xbefe00c1, 0xbeff00c1,
+- 0xb8ef2a05, 0x806f816f,
+- 0x8e6f826f, 0x8e76886f,
+- 0xbef600ff, 0x01000000,
+- 0xbeee0078, 0x8078ff78,
+- 0x00000400, 0xbefc0084,
+- 0xbf11087c, 0x806fff6f,
+- 0x00008000, 0xe0524000,
+- 0x781d0000, 0xe0524100,
+- 0x781d0100, 0xe0524200,
+- 0x781d0200, 0xe0524300,
+- 0x781d0300, 0xbf8c0f70,
+- 0x7e000300, 0x7e020301,
+- 0x7e040302, 0x7e060303,
+- 0x807c847c, 0x8078ff78,
+- 0x00000400, 0xbf0a6f7c,
+- 0xbf85ffee, 0xbf9c0000,
+- 0xe0524000, 0x6e1d0000,
+- 0xe0524100, 0x6e1d0100,
+- 0xe0524200, 0x6e1d0200,
+- 0xe0524300, 0x6e1d0300,
++ 0x80f8c078, 0xb8ef1605,
++ 0x806f816f, 0x8e6f846f,
++ 0x8e76826f, 0xbef600ff,
++ 0x01000000, 0xbefc006f,
++ 0xc031003a, 0x00000078,
++ 0x80f8c078, 0xbf8cc07f,
++ 0x80fc907c, 0xbf800000,
++ 0xbe802d00, 0xbe822d02,
++ 0xbe842d04, 0xbe862d06,
++ 0xbe882d08, 0xbe8a2d0a,
++ 0xbe8c2d0c, 0xbe8e2d0e,
++ 0xbf06807c, 0xbf84fff0,
+ 0xb8f82a05, 0x80788178,
+ 0x8e788a78, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+- 0x80786e78, 0x80f8c078,
+- 0xb8ef1605, 0x806f816f,
+- 0x8e6f846f, 0x8e76826f,
++ 0x80786e78, 0xbef60084,
+ 0xbef600ff, 0x01000000,
+- 0xbefc006f, 0xc031003a,
+- 0x00000078, 0x80f8c078,
+- 0xbf8cc07f, 0x80fc907c,
+- 0xbf800000, 0xbe802d00,
+- 0xbe822d02, 0xbe842d04,
+- 0xbe862d06, 0xbe882d08,
+- 0xbe8a2d0a, 0xbe8c2d0c,
+- 0xbe8e2d0e, 0xbf06807c,
+- 0xbf84fff0, 0xb8f82a05,
+- 0x80788178, 0x8e788a78,
+- 0xb8ee1605, 0x806e816e,
+- 0x8e6e866e, 0x80786e78,
+- 0xbef60084, 0xbef600ff,
+- 0x01000000, 0xc0211bfa,
++ 0xc0211bfa, 0x00000078,
++ 0x80788478, 0xc0211b3a,
+ 0x00000078, 0x80788478,
+- 0xc0211b3a, 0x00000078,
+- 0x80788478, 0xc0211b7a,
++ 0xc0211b7a, 0x00000078,
++ 0x80788478, 0xc0211eba,
+ 0x00000078, 0x80788478,
+- 0xc0211eba, 0x00000078,
+- 0x80788478, 0xc0211efa,
++ 0xc0211efa, 0x00000078,
++ 0x80788478, 0xc0211c3a,
+ 0x00000078, 0x80788478,
+- 0xc0211c3a, 0x00000078,
+- 0x80788478, 0xc0211c7a,
++ 0xc0211c7a, 0x00000078,
++ 0x80788478, 0xc0211a3a,
+ 0x00000078, 0x80788478,
+- 0xc0211a3a, 0x00000078,
+- 0x80788478, 0xc0211a7a,
++ 0xc0211a7a, 0x00000078,
++ 0x80788478, 0xc0211cfa,
+ 0x00000078, 0x80788478,
+- 0xc0211cfa, 0x00000078,
+- 0x80788478, 0xbf8cc07f,
+- 0xbefc006f, 0xbefe007a,
+- 0xbeff007b, 0x866f71ff,
+- 0x000003ff, 0xb96f4803,
+- 0x866f71ff, 0xfffff800,
+- 0x8f6f8b6f, 0xb96fa2c3,
+- 0xb973f801, 0xb8ee2a05,
+- 0x806e816e, 0x8e6e8a6e,
+- 0xb8ef1605, 0x806f816f,
+- 0x8e6f866f, 0x806e6f6e,
+- 0x806e746e, 0x826f8075,
+- 0x866fff6f, 0x0000ffff,
+- 0xc0071cb7, 0x00000040,
+- 0xc00b1d37, 0x00000048,
+- 0xc0031e77, 0x00000058,
+- 0xc0071eb7, 0x0000005c,
+- 0xbf8cc07f, 0x866fff6d,
+- 0xf0000000, 0x8f6f9c6f,
+- 0x8e6f906f, 0xbeee0080,
+- 0x876e6f6e, 0x866fff6d,
+- 0x08000000, 0x8f6f9b6f,
+- 0x8e6f8f6f, 0x876e6f6e,
+- 0x866fff70, 0x00800000,
+- 0x8f6f976f, 0xb96ef807,
+- 0x866dff6d, 0x0000ffff,
+- 0x86fe7e7e, 0x86ea6a6a,
+- 0xb970f802, 0xbf8a0000,
++ 0xbf8cc07f, 0xbefc006f,
++ 0xbefe007a, 0xbeff007b,
++ 0x866f71ff, 0x000003ff,
++ 0xb96f4803, 0x866f71ff,
++ 0xfffff800, 0x8f6f8b6f,
++ 0xb96fa2c3, 0xb973f801,
++ 0xb8ee2a05, 0x806e816e,
++ 0x8e6e8a6e, 0xb8ef1605,
++ 0x806f816f, 0x8e6f866f,
++ 0x806e6f6e, 0x806e746e,
++ 0x826f8075, 0x866fff6f,
++ 0x0000ffff, 0xc0071cb7,
++ 0x00000040, 0xc00b1d37,
++ 0x00000048, 0xc0031e77,
++ 0x00000058, 0xc0071eb7,
++ 0x0000005c, 0xbf8cc07f,
++ 0x866fff6d, 0xf0000000,
++ 0x8f6f9c6f, 0x8e6f906f,
++ 0xbeee0080, 0x876e6f6e,
++ 0x866fff6d, 0x08000000,
++ 0x8f6f9b6f, 0x8e6f8f6f,
++ 0x876e6f6e, 0x866fff70,
++ 0x00800000, 0x8f6f976f,
++ 0xb96ef807, 0x866dff6d,
++ 0x0000ffff, 0x86fe7e7e,
++ 0x86ea6a6a, 0x8f6e8370,
++ 0xb96ee0c2, 0xbf800002,
++ 0xb9700002, 0xbf8a0000,
+ 0x95806f6c, 0xbf810000,
+ };
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+index a2a04bb..abe1a5d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+@@ -103,6 +103,10 @@ var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23
+ var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
+ var SQ_WAVE_STATUS_SPI_PRIO_SHIFT = 1
+ var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
++var SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT = 0
++var SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE = 1
++var SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT = 3
++var SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE = 29
+
+ var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
+ var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
+@@ -251,7 +255,7 @@ if (!EMU_RUN_HACK)
+ s_waitcnt lgkmcnt(0)
+ s_or_b32 ttmp7, ttmp8, ttmp9
+ s_cbranch_scc0 L_NO_NEXT_TRAP //next level trap handler not been set
+- s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //restore HW status(SCC)
++ set_status_without_spi_prio(s_save_status, ttmp2) //restore HW status(SCC)
+ s_setpc_b64 [ttmp8,ttmp9] //jump to next level trap handler
+
+ L_NO_NEXT_TRAP:
+@@ -262,7 +266,7 @@ L_NO_NEXT_TRAP:
+ s_addc_u32 ttmp1, ttmp1, 0
+ L_EXCP_CASE:
+ s_and_b32 ttmp1, ttmp1, 0xFFFF
+- s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //restore HW status(SCC)
++ set_status_without_spi_prio(s_save_status, ttmp2) //restore HW status(SCC)
+ s_rfe_b64 [ttmp0, ttmp1]
+ end
+ // ********* End handling of non-CWSR traps *******************
+@@ -1053,7 +1057,7 @@ end
+ s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+- s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
++ set_status_without_spi_prio(s_restore_status, s_restore_tmp) // SCC is included, which is changed by previous salu
+
+ s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
+
+@@ -1134,3 +1138,11 @@ end
+ function get_hwreg_size_bytes
+ return 128 //HWREG size 128 bytes
+ end
++
++function set_status_without_spi_prio(status, tmp)
++ // Do not restore STATUS.SPI_PRIO since scheduler may have raised it.
++ s_lshr_b32 tmp, status, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT
++ s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE), tmp
++ s_nop 0x2 // avoid S_SETREG => S_SETREG hazard
++ s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE), status
++end
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+index 998be96..0bb9c57 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+@@ -103,6 +103,10 @@ var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
+ var SQ_WAVE_STATUS_SPI_PRIO_SHIFT = 1
+ var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
+ var SQ_WAVE_STATUS_HALT_MASK = 0x2000
++var SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT = 0
++var SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE = 1
++var SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT = 3
++var SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE = 29
+
+ var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
+ var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
+@@ -317,7 +321,7 @@ L_EXCP_CASE:
+ // Restore SQ_WAVE_STATUS.
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+- s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status
++ set_status_without_spi_prio(s_save_status, ttmp2)
+
+ s_rfe_b64 [ttmp0, ttmp1]
+ end
+@@ -1120,7 +1124,7 @@ end
+ s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+- s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
++ set_status_without_spi_prio(s_restore_status, s_restore_tmp) // SCC is included, which is changed by previous salu
+
+ s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
+
+@@ -1212,3 +1216,11 @@ function ack_sqc_store_workaround
+ s_waitcnt lgkmcnt(0)
+ end
+ end
++
++function set_status_without_spi_prio(status, tmp)
++ // Do not restore STATUS.SPI_PRIO since scheduler may have raised it.
++ s_lshr_b32 tmp, status, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT
++ s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE), tmp
++ s_nop 0x2 // avoid S_SETREG => S_SETREG hazard
++ s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE), status
++end
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4493-drm-amdkfd-Change-the-control-stack-mtype-from-UC-to.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4493-drm-amdkfd-Change-the-control-stack-mtype-from-UC-to.patch
new file mode 100644
index 00000000..eb1170f1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4493-drm-amdkfd-Change-the-control-stack-mtype-from-UC-to.patch
@@ -0,0 +1,106 @@
+From 3d01b50aca2efb3e89c4412d7f1f390c423681f3 Mon Sep 17 00:00:00 2001
+From: Yong Zhao <yong.zhao@amd.com>
+Date: Mon, 14 May 2018 12:19:22 -0400
+Subject: [PATCH 4493/5725] drm/amdkfd: Change the control stack mtype from UC
+ to NC on GFX9
+
+Due to a HW bug on GFX9, the mtype of control stack buffers, which are
+allocated in mqd BOs on VMID 0 gart and are one page offset from mqd
+starting addresses, should be set to NC rather than the default gart
+mtype UC.
+
+Fix: KFD-381
+
+Change-Id: I865756efb038512ecb5d4071b2e3d3784db5d4ff
+Signed-off-by: Yong Zhao <yong.zhao@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 7 ++++++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 3 ++-
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 2 +-
+ drivers/gpu/drm/amd/include/kgd_kfd_interface.h | 2 +-
+ 5 files changed, 11 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index 99ef4ee..6a5d236 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -278,7 +278,7 @@ u32 pool_to_domain(enum kgd_memory_pool p)
+
+ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+ void **mem_obj, uint64_t *gpu_addr,
+- void **cpu_ptr)
++ void **cpu_ptr, bool mqd_gfx9)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ struct amdgpu_bo *bo = NULL;
+@@ -294,7 +294,12 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+ bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ bp.type = ttm_bo_type_kernel;
+ bp.resv = NULL;
++
++ if (mqd_gfx9)
++ bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9;
++
+ r = amdgpu_bo_create(adev, &bp, &bo);
++
+ if (r) {
+ dev_err(adev->dev,
+ "failed to allocate BO for amdkfd (%d)\n", r);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+index 5c785ac..c7116f6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+@@ -137,7 +137,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
+ /* Shared API */
+ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+ void **mem_obj, uint64_t *gpu_addr,
+- void **cpu_ptr);
++ void **cpu_ptr, bool mqd_gfx9);
+ void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
+ void get_local_mem_info(struct kgd_dev *kgd,
+ struct kfd_local_mem_info *mem_info);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 8fb7580..4ae2b07 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -462,7 +462,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
+
+ if (kfd->kfd2kgd->init_gtt_mem_allocation(
+ kfd->kgd, size, &kfd->gtt_mem,
+- &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){
++ &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
++ false)) {
+ dev_err(kfd_device, "Could not allocate %d bytes\n", size);
+ goto out;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+index cc2c3fb..58ea1fe 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+@@ -115,7 +115,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
+ ALIGN(sizeof(struct v9_mqd), PAGE_SIZE),
+ &((*mqd_mem_obj)->gtt_mem),
+ &((*mqd_mem_obj)->gpu_addr),
+- (void *)&((*mqd_mem_obj)->cpu_ptr));
++ (void *)&((*mqd_mem_obj)->cpu_ptr), true);
+ } else
+ retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd),
+ mqd_mem_obj);
+diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+index dd0b3c7..d26bba5 100644
+--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+@@ -255,7 +255,7 @@ struct tile_config {
+ struct kfd2kgd_calls {
+ int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size,
+ void **mem_obj, uint64_t *gpu_addr,
+- void **cpu_ptr);
++ void **cpu_ptr, bool mqd_gfx9);
+
+ void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4494-drm-amdgpu-Avoid-destroy-hqd-when-GPU-is-on-reset.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4494-drm-amdgpu-Avoid-destroy-hqd-when-GPU-is-on-reset.patch
new file mode 100644
index 00000000..59e21a03
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4494-drm-amdgpu-Avoid-destroy-hqd-when-GPU-is-on-reset.patch
@@ -0,0 +1,57 @@
+From a9cd1aa226d726aa2643b18129209e25e3efe2ba Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Mon, 14 May 2018 14:53:03 -0400
+Subject: [PATCH 4494/5725] drm/amdgpu: Avoid destroy hqd when GPU is on reset
+
+Change-Id: Ifd0c43973c818e592c727e994dbc77fd48e50e68
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 3 +++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 3 +++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 2 ++
+ 3 files changed, 8 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+index f833437..468b940 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+@@ -586,6 +586,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+ unsigned long flags, end_jiffies;
+ int retry;
+
++ if (adev->in_gpu_reset)
++ return -EIO;
++
+ acquire_queue(kgd, pipe_id, queue_id);
+ WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+index 0c7c248..e0c0e97 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+@@ -608,6 +608,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+ int retry;
+ struct vi_mqd *m = get_mqd(mqd);
+
++ if (adev->in_gpu_reset)
++ return -EIO;
++
+ acquire_queue(kgd, pipe_id, queue_id);
+
+ if (m->cp_hqd_vmid == 0)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+index 7af8ee6..adef83d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+@@ -745,6 +745,8 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+ unsigned long flags;
+ int retry;
+ #endif
++ if (adev->in_gpu_reset)
++ return -EIO;
+
+ acquire_queue(kgd, pipe_id, queue_id);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4495-drm-amd-pp-fix-a-couple-locking-issues.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4495-drm-amd-pp-fix-a-couple-locking-issues.patch
new file mode 100644
index 00000000..165b3b90
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4495-drm-amd-pp-fix-a-couple-locking-issues.patch
@@ -0,0 +1,34 @@
+From 6ba18f1f2c89ea3c28f1b503c85e9200af59f9ab Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Fri, 18 May 2018 14:59:46 +0800
+Subject: [PATCH 4495/5725] drm/amd/pp: fix a couple locking issues
+
+We should return unlock on the error path
+
+Change-Id: Ia1014864dfa03baeb39975568fa2259df942964c
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+index c3ac84f..c952845 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+@@ -954,9 +954,9 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
+ PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error);
+ } else if (hwmgr->chip_id == CHIP_VEGAM) {
+ result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_VegaM);
+- PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
++ PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error);
+ result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_VegaM);
+- PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
++ PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error);
+ }
+ }
+ cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4496-drm-amdgpu-skip-CG-for-VCN-when-late_init-fini.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4496-drm-amdgpu-skip-CG-for-VCN-when-late_init-fini.patch
new file mode 100644
index 00000000..e31e772e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4496-drm-amdgpu-skip-CG-for-VCN-when-late_init-fini.patch
@@ -0,0 +1,37 @@
+From 05a5f4e203c4c5b7475b5755ad0fbb4400dc6924 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Wed, 16 May 2018 20:06:53 +0800
+Subject: [PATCH 4496/5725] drm/amdgpu: skip CG for VCN when late_init/fini
+
+VCN clockgating is handled manually like VCE and UVD.
+
+Change-Id: Icb285a3acae6d2664f543d004486654373f0dc91
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 0adc774..1d20967 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1764,6 +1764,7 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
+ /* skip CG for VCE/UVD, it's handled specially */
+ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
++ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
+ adev->ip_blocks[i].version->funcs->set_clockgating_state) {
+ /* enable clockgating to save power */
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+@@ -1875,6 +1876,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
+
+ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
++ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
+ adev->ip_blocks[i].version->funcs->set_clockgating_state) {
+ /* ungate blocks before hw fini so that we can shutdown the blocks safely */
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4497-drm-amd-pp-Add-smu-support-for-VCN-powergating-on-RV.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4497-drm-amd-pp-Add-smu-support-for-VCN-powergating-on-RV.patch
new file mode 100644
index 00000000..2a2a3354
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4497-drm-amd-pp-Add-smu-support-for-VCN-powergating-on-RV.patch
@@ -0,0 +1,53 @@
+From e10dda05e76a3045dfa93cee7a33da8739693912 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Wed, 16 May 2018 20:09:09 +0800
+Subject: [PATCH 4497/5725] drm/amd/pp: Add smu support for VCN powergating on
+ RV
+
+Change-Id: I56283fa94c7bf3778a988369f18f83648ea2a55d
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 19 ++++++++++++++++++-
+ 1 file changed, 18 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+index fdb0282..2df791c 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+@@ -1128,6 +1128,23 @@ static int smu10_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr)
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
+ }
+
++static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
++{
++ if (bgate) {
++ amdgpu_device_ip_set_powergating_state(hwmgr->adev,
++ AMD_IP_BLOCK_TYPE_VCN,
++ AMD_PG_STATE_GATE);
++ smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_PowerDownVcn, 0);
++ } else {
++ smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_PowerUpVcn, 0);
++ amdgpu_device_ip_set_powergating_state(hwmgr->adev,
++ AMD_IP_BLOCK_TYPE_VCN,
++ AMD_PG_STATE_UNGATE);
++ }
++}
++
+ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
+ .backend_init = smu10_hwmgr_backend_init,
+ .backend_fini = smu10_hwmgr_backend_fini,
+@@ -1136,7 +1153,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
+ .force_dpm_level = smu10_dpm_force_dpm_level,
+ .get_power_state_size = smu10_get_power_state_size,
+ .powerdown_uvd = NULL,
+- .powergate_uvd = NULL,
++ .powergate_uvd = smu10_powergate_vcn,
+ .powergate_vce = NULL,
+ .get_mclk = smu10_dpm_get_mclk,
+ .get_sclk = smu10_dpm_get_sclk,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4498-drm-amdgpu-Add-CG-PG-flags-for-VCN.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4498-drm-amdgpu-Add-CG-PG-flags-for-VCN.patch
new file mode 100644
index 00000000..9983fdbe
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4498-drm-amdgpu-Add-CG-PG-flags-for-VCN.patch
@@ -0,0 +1,36 @@
+From 9d1dd7d2b401367a01b5108884d60a34489a793e Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Wed, 16 May 2018 20:10:25 +0800
+Subject: [PATCH 4498/5725] drm/amdgpu: Add CG/PG flags for VCN
+
+Change-Id: I43d116a7bdccb50f375b0e7f35943a4133812b87
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/include/amd_shared.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
+index e3a6fad..0c30256 100644
+--- a/drivers/gpu/drm/amd/include/amd_shared.h
++++ b/drivers/gpu/drm/amd/include/amd_shared.h
+@@ -92,7 +92,7 @@ enum amd_powergating_state {
+ #define AMD_CG_SUPPORT_GFX_3D_CGLS (1 << 21)
+ #define AMD_CG_SUPPORT_DRM_MGCG (1 << 22)
+ #define AMD_CG_SUPPORT_DF_MGCG (1 << 23)
+-
++#define AMD_CG_SUPPORT_VCN_MGCG (1 << 24)
+ /* PG flags */
+ #define AMD_PG_SUPPORT_GFX_PG (1 << 0)
+ #define AMD_PG_SUPPORT_GFX_SMG (1 << 1)
+@@ -108,6 +108,7 @@ enum amd_powergating_state {
+ #define AMD_PG_SUPPORT_GFX_QUICK_MG (1 << 11)
+ #define AMD_PG_SUPPORT_GFX_PIPELINE (1 << 12)
+ #define AMD_PG_SUPPORT_MMHUB (1 << 13)
++#define AMD_PG_SUPPORT_VCN (1 << 14)
+
+ enum PP_FEATURE_MASK {
+ PP_SCLK_DPM_MASK = 0x1,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4499-drm-amdgpu-Add-SOC15_WAIT_ON_RREG-macro-define.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4499-drm-amdgpu-Add-SOC15_WAIT_ON_RREG-macro-define.patch
new file mode 100644
index 00000000..b5f35ad6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4499-drm-amdgpu-Add-SOC15_WAIT_ON_RREG-macro-define.patch
@@ -0,0 +1,41 @@
+From 3552ed5de7c5230aece29c49a3ab7c8063268dee Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Thu, 17 May 2018 15:58:53 +0800
+Subject: [PATCH 4499/5725] drm/amdgpu: Add SOC15_WAIT_ON_RREG macro define
+
+Change-Id: Ic44aa31a34e24b07d9f18a17301b8969c01de321
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15_common.h | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+index def8650..0942f49 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
++++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+@@ -47,6 +47,21 @@
+ #define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \
+ WREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value)
+
++#define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask, ret) \
++ do { \
++ uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
++ uint32_t loop = adev->usec_timeout; \
++ while ((tmp_ & (mask)) != (expected_value)) { \
++ udelay(2); \
++ tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
++ loop--; \
++ if (!loop) { \
++ ret = -ETIMEDOUT; \
++ break; \
++ } \
++ } \
++ } while (0)
++
+ #endif
+
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4500-drm-amdgpu-Add-static-CG-control-for-VCN-on-RV.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4500-drm-amdgpu-Add-static-CG-control-for-VCN-on-RV.patch
new file mode 100644
index 00000000..a2988a91
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4500-drm-amdgpu-Add-static-CG-control-for-VCN-on-RV.patch
@@ -0,0 +1,136 @@
+From 06520ea3b049cee8fef273b9a509c177ba34f6a6 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Thu, 17 May 2018 11:11:22 +0800
+Subject: [PATCH 4500/5725] drm/amdgpu: Add static CG control for VCN on RV
+
+Change-Id: Iab9a0626250e13234730e85ea2b71de9ee748de5
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 50 +++++++++++++++++++++++++++--------
+ 1 file changed, 39 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index c8db6ad..ebc6bd7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -310,14 +310,14 @@ static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
+ *
+ * Disable clock gating for VCN block
+ */
+-static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev, bool sw)
++static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev)
+ {
+ uint32_t data;
+
+ /* JPEG disable CGC */
+ data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
+
+- if (sw)
++ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
+ data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ else
+ data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK;
+@@ -332,7 +332,7 @@ static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev, bool sw)
+
+ /* UVD disable CGC */
+ data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
+- if (sw)
++ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
+ data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ else
+ data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
+@@ -437,13 +437,13 @@ static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev, bool sw)
+ *
+ * Enable clock gating for VCN block
+ */
+-static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev, bool sw)
++static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
+ {
+ uint32_t data = 0;
+
+ /* enable JPEG CGC */
+ data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
+- if (sw)
++ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
+ data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ else
+ data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+@@ -457,7 +457,7 @@ static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev, bool sw)
+
+ /* enable UVD CGC */
+ data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
+- if (sw)
++ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
+ data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ else
+ data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+@@ -522,7 +522,7 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
+ vcn_v1_0_mc_resume(adev);
+
+ /* disable clock gating */
+- vcn_v1_0_disable_clock_gating(adev, true);
++ vcn_v1_0_disable_clock_gating(adev);
+
+ /* disable interupt */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
+@@ -703,15 +703,43 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev)
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+ /* enable clock gating */
+- vcn_v1_0_enable_clock_gating(adev, true);
++ vcn_v1_0_enable_clock_gating(adev);
+
+ return 0;
+ }
+
++bool vcn_v1_0_is_idle(void *handle)
++{
++ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++
++ return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == 0x2);
++}
++
++int vcn_v1_0_wait_for_idle(void *handle)
++{
++ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++ int ret = 0;
++
++ SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, 0x2, 0x2, ret);
++
++ return ret;
++}
++
+ static int vcn_v1_0_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+ {
+- /* needed for driver unload*/
++ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++ bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
++
++ if (enable) {
++ /* wait for STATUS to clear */
++ if (vcn_v1_0_is_idle(handle))
++ return -EBUSY;
++ vcn_v1_0_enable_clock_gating(adev);
++ } else {
++ /* disable HW gating and enable Sw gating */
++ vcn_v1_0_disable_clock_gating(adev);
++ }
+ return 0;
+ }
+
+@@ -1109,8 +1137,8 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
+ .hw_fini = vcn_v1_0_hw_fini,
+ .suspend = vcn_v1_0_suspend,
+ .resume = vcn_v1_0_resume,
+- .is_idle = NULL /* vcn_v1_0_is_idle */,
+- .wait_for_idle = NULL /* vcn_v1_0_wait_for_idle */,
++ .is_idle = vcn_v1_0_is_idle,
++ .wait_for_idle = vcn_v1_0_wait_for_idle,
+ .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */,
+ .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */,
+ .soft_reset = NULL /* vcn_v1_0_soft_reset */,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4501-drm-amdgpu-Enable-VCN-CG-by-default-on-RV.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4501-drm-amdgpu-Enable-VCN-CG-by-default-on-RV.patch
new file mode 100644
index 00000000..6f9e28af
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4501-drm-amdgpu-Enable-VCN-CG-by-default-on-RV.patch
@@ -0,0 +1,29 @@
+From caa221576a5d12a0d7a25ecd50c44e7e0b80b77f Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Thu, 17 May 2018 11:13:51 +0800
+Subject: [PATCH 4501/5725] drm/amdgpu: Enable VCN CG by default on RV
+
+Change-Id: I75fb989aadfb0c5a56108615d75320d1cc8a368e
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 8ccbcf9..485cb43 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -711,7 +711,8 @@ static int soc15_common_early_init(void *handle)
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+- AMD_CG_SUPPORT_SDMA_LS;
++ AMD_CG_SUPPORT_SDMA_LS |
++ AMD_CG_SUPPORT_VCN_MGCG;
+ adev->pg_flags = AMD_PG_SUPPORT_SDMA;
+
+ if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4502-drm-amdgpu-Add-VCN-static-PG-support-on-RV.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4502-drm-amdgpu-Add-VCN-static-PG-support-on-RV.patch
new file mode 100644
index 00000000..abdaf2da
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4502-drm-amdgpu-Add-VCN-static-PG-support-on-RV.patch
@@ -0,0 +1,156 @@
+From 8e0cc14bd9d51bc7d9af94c954d2ed60e937bf3f Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Thu, 17 May 2018 16:07:02 +0800
+Subject: [PATCH 4502/5725] drm/amdgpu: Add VCN static PG support on RV
+
+Change-Id: I78b225771c41688500b5f04906329effd8af143e
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 11 ++++
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 92 ++++++++++++++++++++++++++++++++-
+ 2 files changed, 102 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+index 2fd7db8..181e6af 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+@@ -45,6 +45,17 @@
+ #define VCN_ENC_CMD_REG_WRITE 0x0000000b
+ #define VCN_ENC_CMD_REG_WAIT 0x0000000c
+
++enum engine_status_constants {
++ UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON = 0x2AAAA0,
++ UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON = 0x00000002,
++ UVD_STATUS__UVD_BUSY = 0x00000004,
++ GB_ADDR_CONFIG_DEFAULT = 0x26010011,
++ UVD_STATUS__IDLE = 0x2,
++ UVD_STATUS__BUSY = 0x5,
++ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF = 0x1,
++ UVD_STATUS__RBC_BUSY = 0x1,
++};
++
+ struct amdgpu_vcn {
+ struct amdgpu_bo *vcpu_bo;
+ void *cpu_addr;
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index ebc6bd7..5f660cd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -502,6 +502,94 @@ static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
+ WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
+ }
+
++static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
++{
++ uint32_t data = 0;
++ int ret;
++
++ if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
++ data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
++ | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
++ | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
++ | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
++ | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
++ | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
++ | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
++ | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
++ | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
++ | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
++ | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
++
++ WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
++ SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF, ret);
++ } else {
++ data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
++ | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
++ | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
++ | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
++ | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
++ | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
++ | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
++ | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
++ | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
++ | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
++ | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
++ WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
++ SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFFFFF, ret);
++ }
++
++ /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */
++
++ data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
++ data &= ~0x103;
++ if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
++ data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | UVD_POWER_STATUS__UVD_PG_EN_MASK;
++
++ WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
++}
++
++static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
++{
++ uint32_t data = 0;
++ int ret;
++
++ if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
++ /* Before power off, this indicator has to be turned on */
++ data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
++ data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
++ data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
++ WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
++
++
++ data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
++ | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
++ | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
++ | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
++ | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
++ | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
++ | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
++ | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
++ | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
++ | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
++ | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
++
++ WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
++
++ data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
++ | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
++ | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
++ | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
++ | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
++ | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
++ | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
++ | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
++ | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
++ | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
++ | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT);
++ SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF, ret);
++ }
++}
++
+ /**
+ * vcn_v1_0_start - start VCN block
+ *
+@@ -521,6 +609,7 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
+
+ vcn_v1_0_mc_resume(adev);
+
++ vcn_1_0_disable_static_power_gating(adev);
+ /* disable clock gating */
+ vcn_v1_0_disable_clock_gating(adev);
+
+@@ -703,8 +792,9 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev)
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+ /* enable clock gating */
+- vcn_v1_0_enable_clock_gating(adev);
+
++ vcn_v1_0_enable_clock_gating(adev);
++ vcn_1_0_enable_static_power_gating(adev);
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4503-drm-amdgpu-Enable-VCN-static-PG-by-default-on-RV.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4503-drm-amdgpu-Enable-VCN-static-PG-by-default-on-RV.patch
new file mode 100644
index 00000000..eacb57b8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4503-drm-amdgpu-Enable-VCN-static-PG-by-default-on-RV.patch
@@ -0,0 +1,29 @@
+From 32c934131ba12685af016d537aa7236f08e439a2 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Thu, 17 May 2018 16:03:47 +0800
+Subject: [PATCH 4503/5725] drm/amdgpu: Enable VCN static PG by default on RV
+
+Change-Id: Id36f84407cd1896434e47a7b34f741c6dcda51f3
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 485cb43..9a7a85d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -713,7 +713,8 @@ static int soc15_common_early_init(void *handle)
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_VCN_MGCG;
+- adev->pg_flags = AMD_PG_SUPPORT_SDMA;
++
++ adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
+
+ if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
+ adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4504-drm-amdgpu-Add-runtime-VCN-PG-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4504-drm-amdgpu-Add-runtime-VCN-PG-support.patch
new file mode 100644
index 00000000..844a4312
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4504-drm-amdgpu-Add-runtime-VCN-PG-support.patch
@@ -0,0 +1,132 @@
+From 489c820a19bfcdb0b384d7bca7158427e2856664 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Wed, 16 May 2018 20:18:22 +0800
+Subject: [PATCH 4504/5725] drm/amdgpu: Add runtime VCN PG support
+
+Enable support for dynamically powering up/down VCN on demand.
+
+Change-Id: I41fce19c737d78294544b33ce219edb21769989a
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 16 +++++++++-------
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 32 +++++++++++++++++++++-----------
+ 2 files changed, 30 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index ab4aa04..21425669 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -215,11 +215,11 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
+ }
+
+ if (fences == 0) {
+- if (adev->pm.dpm_enabled) {
+- /* might be used when with pg/cg
++ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, false);
+- */
+- }
++ else
++ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
++ AMD_PG_STATE_GATE);
+ } else {
+ schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
+ }
+@@ -231,9 +231,11 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
+ bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
+
+ if (set_clocks && adev->pm.dpm_enabled) {
+- /* might be used when with pg/cg
+- amdgpu_dpm_enable_uvd(adev, true);
+- */
++ if (adev->pm.dpm_enabled)
++ amdgpu_dpm_enable_uvd(adev, true);
++ else
++ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
++ AMD_PG_STATE_UNGATE);
+ }
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 5f660cd..0f1570e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -35,7 +35,6 @@
+ #include "mmhub/mmhub_9_1_offset.h"
+ #include "mmhub/mmhub_9_1_sh_mask.h"
+
+-static int vcn_v1_0_start(struct amdgpu_device *adev);
+ static int vcn_v1_0_stop(struct amdgpu_device *adev);
+ static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
+ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
+@@ -156,10 +155,6 @@ static int vcn_v1_0_hw_init(void *handle)
+ struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+ int i, r;
+
+- r = vcn_v1_0_start(adev);
+- if (r)
+- goto done;
+-
+ ring->ready = true;
+ r = amdgpu_ring_test_ring(ring);
+ if (r) {
+@@ -195,11 +190,9 @@ static int vcn_v1_0_hw_fini(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+- int r;
+
+- r = vcn_v1_0_stop(adev);
+- if (r)
+- return r;
++ if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
++ vcn_v1_0_stop(adev);
+
+ ring->ready = false;
+
+@@ -791,7 +784,7 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev)
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+- /* enable clock gating */
++ WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0);
+
+ vcn_v1_0_enable_clock_gating(adev);
+ vcn_1_0_enable_static_power_gating(adev);
+@@ -1216,6 +1209,23 @@ static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t coun
+ }
+ }
+
++static int vcn_v1_0_set_powergating_state(void *handle,
++ enum amd_powergating_state state)
++{
++ /* This doesn't actually powergate the VCN block.
++ * That's done in the dpm code via the SMC. This
++ * just re-inits the block as necessary. The actual
++ * gating still happens in the dpm code. We should
++ * revisit this when there is a cleaner line between
++ * the smc and the hw blocks
++ */
++ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++
++ if (state == AMD_PG_STATE_GATE)
++ return vcn_v1_0_stop(adev);
++ else
++ return vcn_v1_0_start(adev);
++}
+
+ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
+ .name = "vcn_v1_0",
+@@ -1234,7 +1244,7 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
+ .soft_reset = NULL /* vcn_v1_0_soft_reset */,
+ .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
+ .set_clockgating_state = vcn_v1_0_set_clockgating_state,
+- .set_powergating_state = NULL /* vcn_v1_0_set_powergating_state */,
++ .set_powergating_state = vcn_v1_0_set_powergating_state,
+ };
+
+ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4505-drm-amdgpu-rework-VM-state-machine-lock-handling-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4505-drm-amdgpu-rework-VM-state-machine-lock-handling-v2.patch
new file mode 100644
index 00000000..5cbd53ed
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4505-drm-amdgpu-rework-VM-state-machine-lock-handling-v2.patch
@@ -0,0 +1,299 @@
+From 28e3a7d57dba306b92dd09cf999e0cad064fcbf5 Mon Sep 17 00:00:00 2001
+From: christian koenig <christian.koenig@amd.com>
+Date: Thu, 19 Apr 2018 10:56:02 +0200
+Subject: [PATCH 4505/5725] drm/amdgpu: rework VM state machine lock handling
+ v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Only the moved state needs a separate spin lock protection. All other
+states are protected by reserving the VM anyway.
+
+v2: fix some more incorrect cases
+
+Change-Id: I9cd5f107c46eea80091f8366234a6c3fb6f508be
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 66 +++++++++++-----------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 4 +--
+ 2 files changed, 21 insertions(+), 49 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index caf5f61..048e39e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -119,9 +119,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
+ * is currently evicted. add the bo to the evicted list to make sure it
+ * is validated on next vm use to avoid fault.
+ * */
+- spin_lock(&vm->status_lock);
+ list_move_tail(&base->vm_status, &vm->evicted);
+- spin_unlock(&vm->status_lock);
+ }
+
+ /**
+@@ -228,7 +226,6 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct ttm_bo_global *glob = adev->mman.bdev.glob;
+ int r;
+
+- spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->evicted)) {
+ struct amdgpu_vm_bo_base *bo_base;
+ struct amdgpu_bo *bo;
+@@ -236,10 +233,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ bo_base = list_first_entry(&vm->evicted,
+ struct amdgpu_vm_bo_base,
+ vm_status);
+- spin_unlock(&vm->status_lock);
+
+ bo = bo_base->bo;
+- BUG_ON(!bo);
+ if (bo->parent) {
+ r = validate(param, bo);
+ if (r)
+@@ -259,13 +254,14 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ return r;
+ }
+
+- spin_lock(&vm->status_lock);
+- if (bo->tbo.type != ttm_bo_type_kernel)
++ if (bo->tbo.type != ttm_bo_type_kernel) {
++ spin_lock(&vm->moved_lock);
+ list_move(&bo_base->vm_status, &vm->moved);
+- else
++ spin_unlock(&vm->moved_lock);
++ } else {
+ list_move(&bo_base->vm_status, &vm->relocated);
++ }
+ }
+- spin_unlock(&vm->status_lock);
+
+ return 0;
+ }
+@@ -279,13 +275,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ */
+ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
+ {
+- bool ready;
+-
+- spin_lock(&vm->status_lock);
+- ready = list_empty(&vm->evicted);
+- spin_unlock(&vm->status_lock);
+-
+- return ready;
++ return list_empty(&vm->evicted);
+ }
+
+ /**
+@@ -478,9 +468,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
+ pt->parent = amdgpu_bo_ref(parent->base.bo);
+
+ amdgpu_vm_bo_base_init(&entry->base, vm, pt);
+- spin_lock(&vm->status_lock);
+ list_move(&entry->base.vm_status, &vm->relocated);
+- spin_unlock(&vm->status_lock);
+ }
+
+ if (level < AMDGPU_VM_PTB) {
+@@ -927,10 +915,8 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
+ if (!entry->base.bo)
+ continue;
+
+- spin_lock(&vm->status_lock);
+ if (list_empty(&entry->base.vm_status))
+ list_add(&entry->base.vm_status, &vm->relocated);
+- spin_unlock(&vm->status_lock);
+ amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
+ }
+ }
+@@ -975,7 +961,6 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
+ params.func = amdgpu_vm_do_set_ptes;
+ }
+
+- spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->relocated)) {
+ struct amdgpu_vm_bo_base *bo_base, *parent;
+ struct amdgpu_vm_pt *pt, *entry;
+@@ -985,13 +970,10 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
+ struct amdgpu_vm_bo_base,
+ vm_status);
+ list_del_init(&bo_base->vm_status);
+- spin_unlock(&vm->status_lock);
+
+ bo = bo_base->bo->parent;
+- if (!bo) {
+- spin_lock(&vm->status_lock);
++ if (!bo)
+ continue;
+- }
+
+ parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
+ bo_list);
+@@ -1000,12 +982,10 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
+
+ amdgpu_vm_update_pde(&params, vm, pt, entry);
+
+- spin_lock(&vm->status_lock);
+ if (!vm->use_cpu_for_update &&
+ (ndw - params.ib->length_dw) < 32)
+ break;
+ }
+- spin_unlock(&vm->status_lock);
+
+ if (vm->use_cpu_for_update) {
+ /* Flush HDP */
+@@ -1108,9 +1088,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
+ if (entry->huge) {
+ /* Add the entry to the relocated list to update it. */
+ entry->huge = false;
+- spin_lock(&p->vm->status_lock);
+ list_move(&entry->base.vm_status, &p->vm->relocated);
+- spin_unlock(&p->vm->status_lock);
+ }
+ return;
+ }
+@@ -1618,8 +1596,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+ amdgpu_asic_flush_hdp(adev, NULL);
+ }
+
+- spin_lock(&vm->status_lock);
++ spin_lock(&vm->moved_lock);
+ list_del_init(&bo_va->base.vm_status);
++ spin_unlock(&vm->moved_lock);
+
+ /* If the BO is not in its preferred location add it back to
+ * the evicted list so that it gets validated again on the
+@@ -1629,7 +1608,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+ !(bo->preferred_domains &
+ amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)))
+ list_add_tail(&bo_va->base.vm_status, &vm->evicted);
+- spin_unlock(&vm->status_lock);
+
+ list_splice_init(&bo_va->invalids, &bo_va->valids);
+ bo_va->cleared = clear;
+@@ -1841,14 +1819,14 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
+ bool clear;
+ int r = 0;
+
+- spin_lock(&vm->status_lock);
++ spin_lock(&vm->moved_lock);
+ while (!list_empty(&vm->moved)) {
+ struct amdgpu_bo_va *bo_va;
+ struct reservation_object *resv;
+
+ bo_va = list_first_entry(&vm->moved,
+ struct amdgpu_bo_va, base.vm_status);
+- spin_unlock(&vm->status_lock);
++ spin_unlock(&vm->moved_lock);
+
+ resv = bo_va->base.bo->tbo.resv;
+
+@@ -1869,9 +1847,9 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
+ if (!clear && resv != vm->root.base.bo->tbo.resv)
+ reservation_object_unlock(resv);
+
+- spin_lock(&vm->status_lock);
++ spin_lock(&vm->moved_lock);
+ }
+- spin_unlock(&vm->status_lock);
++ spin_unlock(&vm->moved_lock);
+
+ return r;
+ }
+@@ -1933,10 +1911,10 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
+ amdgpu_vm_prt_get(adev);
+
+ if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
+- spin_lock(&vm->status_lock);
++ spin_lock(&vm->moved_lock);
+ if (list_empty(&bo_va->base.vm_status))
+ list_add(&bo_va->base.vm_status, &vm->moved);
+- spin_unlock(&vm->status_lock);
++ spin_unlock(&vm->moved_lock);
+ }
+ trace_amdgpu_vm_bo_map(bo_va, mapping);
+ }
+@@ -2246,9 +2224,9 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
+
+ list_del(&bo_va->base.bo_list);
+
+- spin_lock(&vm->status_lock);
++ spin_lock(&vm->moved_lock);
+ list_del(&bo_va->base.vm_status);
+- spin_unlock(&vm->status_lock);
++ spin_unlock(&vm->moved_lock);
+
+ list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
+ list_del(&mapping->list);
+@@ -2291,30 +2269,26 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
+
+ bo_base->moved = true;
+ if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
+- spin_lock(&bo_base->vm->status_lock);
+ if (bo->tbo.type == ttm_bo_type_kernel)
+ list_move(&bo_base->vm_status, &vm->evicted);
+ else
+ list_move_tail(&bo_base->vm_status,
+ &vm->evicted);
+- spin_unlock(&bo_base->vm->status_lock);
+ continue;
+ }
+
+ /* Don't add page tables to the moved state */
+ if (bo->tbo.type == ttm_bo_type_kernel) {
+- spin_lock(&bo_base->vm->status_lock);
+ if (list_empty(&bo_base->vm_status))
+ list_add(&bo_base->vm_status, &vm->relocated);
+- spin_unlock(&bo_base->vm->status_lock);
+
+ continue;
+ }
+
+- spin_lock(&bo_base->vm->status_lock);
++ spin_lock(&bo_base->vm->moved_lock);
+ if (list_empty(&bo_base->vm_status))
+ list_add(&bo_base->vm_status, &vm->moved);
+- spin_unlock(&bo_base->vm->status_lock);
++ spin_unlock(&bo_base->vm->moved_lock);
+ }
+ }
+
+@@ -2440,9 +2414,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ vm->va = RB_ROOT_CACHED;
+ for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
+ vm->reserved_vmid[i] = NULL;
+- spin_lock_init(&vm->status_lock);
+ INIT_LIST_HEAD(&vm->evicted);
+ INIT_LIST_HEAD(&vm->relocated);
++ spin_lock_init(&vm->moved_lock);
+ INIT_LIST_HEAD(&vm->moved);
+ INIT_LIST_HEAD(&vm->freed);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+index 3492ab7..7a4c967 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+@@ -171,9 +171,6 @@ struct amdgpu_vm {
+ /* tree of virtual addresses mapped */
+ struct rb_root_cached va;
+
+- /* protecting invalidated */
+- spinlock_t status_lock;
+-
+ /* BOs who needs a validation */
+ struct list_head evicted;
+
+@@ -182,6 +179,7 @@ struct amdgpu_vm {
+
+ /* BOs moved, but not yet updated in the PT */
+ struct list_head moved;
++ spinlock_t moved_lock;
+
+ /* BO mappings freed, but not yet updated in the PT */
+ struct list_head freed;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4506-drm-amdgpu-cleanup-amdgpu_vm_validate_pt_bos-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4506-drm-amdgpu-cleanup-amdgpu_vm_validate_pt_bos-v2.patch
new file mode 100644
index 00000000..c2f07200
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4506-drm-amdgpu-cleanup-amdgpu_vm_validate_pt_bos-v2.patch
@@ -0,0 +1,72 @@
+From c0d03122ca4119c4c19e6753930881f41b7cbd23 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Thu, 19 Apr 2018 11:02:54 +0200
+Subject: [PATCH 4506/5725] drm/amdgpu: cleanup amdgpu_vm_validate_pt_bos v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Use list_for_each_entry_safe here.
+
+v2: Drop the optimization, it doesn't work as expected.
+
+Change-Id: I7781e41382efc2f3ec8949b1abfd384953ca26ec
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 19 +++++++------------
+ 1 file changed, 7 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 048e39e..47dfcdb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -224,21 +224,16 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ void *param)
+ {
+ struct ttm_bo_global *glob = adev->mman.bdev.glob;
+- int r;
+-
+- while (!list_empty(&vm->evicted)) {
+- struct amdgpu_vm_bo_base *bo_base;
+- struct amdgpu_bo *bo;
++ struct amdgpu_vm_bo_base *bo_base, *tmp;
++ int r = 0;
+
+- bo_base = list_first_entry(&vm->evicted,
+- struct amdgpu_vm_bo_base,
+- vm_status);
++ list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
++ struct amdgpu_bo *bo = bo_base->bo;
+
+- bo = bo_base->bo;
+ if (bo->parent) {
+ r = validate(param, bo);
+ if (r)
+- return r;
++ break;
+
+ spin_lock(&glob->lru_lock);
+ ttm_bo_move_to_lru_tail(&bo->tbo);
+@@ -251,7 +246,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ vm->use_cpu_for_update) {
+ r = amdgpu_bo_kmap(bo, NULL);
+ if (r)
+- return r;
++ break;
+ }
+
+ if (bo->tbo.type != ttm_bo_type_kernel) {
+@@ -263,7 +258,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ }
+ }
+
+- return 0;
++ return r;
+ }
+
+ /**
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4507-drm-amdgpu-further-optimize-amdgpu_vm_handle_moved.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4507-drm-amdgpu-further-optimize-amdgpu_vm_handle_moved.patch
new file mode 100644
index 00000000..c0fc390a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4507-drm-amdgpu-further-optimize-amdgpu_vm_handle_moved.patch
@@ -0,0 +1,80 @@
+From 9542776d2b5d87c4c6e736867ef4254d58d2e041 Mon Sep 17 00:00:00 2001
+From: christian koenig <christian.koenig@amd.com>
+Date: Thu, 19 Apr 2018 11:08:24 +0200
+Subject: [PATCH 4507/5725] drm/amdgpu: further optimize amdgpu_vm_handle_moved
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Splice the moved list to a local one to avoid taking the lock over and
+over again.
+
+Change-Id: I690a56f9ef5914f781aa584407d9abff2c77f99f
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 29 +++++++++++++++--------------
+ 1 file changed, 15 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 47dfcdb..a8dbd20 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1811,19 +1811,18 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
+ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
+ {
++ struct amdgpu_bo_va *bo_va, *tmp;
++ struct list_head moved;
+ bool clear;
+- int r = 0;
++ int r;
+
++ INIT_LIST_HEAD(&moved);
+ spin_lock(&vm->moved_lock);
+- while (!list_empty(&vm->moved)) {
+- struct amdgpu_bo_va *bo_va;
+- struct reservation_object *resv;
+-
+- bo_va = list_first_entry(&vm->moved,
+- struct amdgpu_bo_va, base.vm_status);
+- spin_unlock(&vm->moved_lock);
++ list_splice_init(&vm->moved, &moved);
++ spin_unlock(&vm->moved_lock);
+
+- resv = bo_va->base.bo->tbo.resv;
++ list_for_each_entry_safe(bo_va, tmp, &moved, base.vm_status) {
++ struct reservation_object *resv = bo_va->base.bo->tbo.resv;
+
+ /* Per VM BOs never need to bo cleared in the page tables */
+ if (resv == vm->root.base.bo->tbo.resv)
+@@ -1836,17 +1835,19 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
+ clear = true;
+
+ r = amdgpu_vm_bo_update(adev, bo_va, clear);
+- if (r)
+- return r;
++ if (r) {
++ spin_lock(&vm->moved_lock);
++ list_splice(&moved, &vm->moved);
++ spin_unlock(&vm->moved_lock);
++ return r;
++ }
+
+ if (!clear && resv != vm->root.base.bo->tbo.resv)
+ reservation_object_unlock(resv);
+
+- spin_lock(&vm->moved_lock);
+ }
+- spin_unlock(&vm->moved_lock);
+
+- return r;
++ return 0;
+ }
+
+ /**
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4508-drm-amdgpu-kmap-PDs-PTs-in-amdgpu_vm_update_director.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4508-drm-amdgpu-kmap-PDs-PTs-in-amdgpu_vm_update_director.patch
new file mode 100644
index 00000000..b9f6eb77
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4508-drm-amdgpu-kmap-PDs-PTs-in-amdgpu_vm_update_director.patch
@@ -0,0 +1,55 @@
+From 9aef9a096bba95093015d9fea547a7b5e15d6142 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Thu, 19 Apr 2018 13:58:42 +0200
+Subject: [PATCH 4508/5725] drm/amdgpu: kmap PDs/PTs in
+ amdgpu_vm_update_directories
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+In theory it is possible that PDs/PTs can move without eviction.
+
+Change-Id: I684783ce1ec306d2856f9e8a902d3fd88a0a6607
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index a8dbd20..cbf3a2f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -242,13 +242,6 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ spin_unlock(&glob->lru_lock);
+ }
+
+- if (bo->tbo.type == ttm_bo_type_kernel &&
+- vm->use_cpu_for_update) {
+- r = amdgpu_bo_kmap(bo, NULL);
+- if (r)
+- break;
+- }
+-
+ if (bo->tbo.type != ttm_bo_type_kernel) {
+ spin_lock(&vm->moved_lock);
+ list_move(&bo_base->vm_status, &vm->moved);
+@@ -941,6 +934,14 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
+ params.adev = adev;
+
+ if (vm->use_cpu_for_update) {
++ struct amdgpu_vm_bo_base *bo_base;
++
++ list_for_each_entry(bo_base, &vm->relocated, vm_status) {
++ r = amdgpu_bo_kmap(bo_base->bo, NULL);
++ if (unlikely(r))
++ return r;
++ }
++
+ r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
+ if (unlikely(r))
+ return r;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4509-drm-amdgpu-consistenly-use-VM-moved-flag.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4509-drm-amdgpu-consistenly-use-VM-moved-flag.patch
new file mode 100644
index 00000000..0b6299ee
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4509-drm-amdgpu-consistenly-use-VM-moved-flag.patch
@@ -0,0 +1,91 @@
+From f348c4327234195ee2100312e4063e0c560064de Mon Sep 17 00:00:00 2001
+From: christian koenig <christian.koenig@amd.com>
+Date: Thu, 19 Apr 2018 14:22:56 +0200
+Subject: [PATCH 4509/5725] drm/amdgpu: consistenly use VM moved flag
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Instead of sometimes checking if the vm_status is empty use the moved
+flag and also reset it when the BO leaves the state machine.
+
+Change-Id: I6d617e5ad799fbbaf6e85b178e267bf689df52c1
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 28 ++++++++++++++++++----------
+ 1 file changed, 18 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index cbf3a2f..7ddbfe3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -903,8 +903,8 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
+ if (!entry->base.bo)
+ continue;
+
+- if (list_empty(&entry->base.vm_status))
+- list_add(&entry->base.vm_status, &vm->relocated);
++ if (!entry->base.moved)
++ list_move(&entry->base.vm_status, &vm->relocated);
+ amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
+ }
+ }
+@@ -965,6 +965,7 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
+ bo_base = list_first_entry(&vm->relocated,
+ struct amdgpu_vm_bo_base,
+ vm_status);
++ bo_base->moved = false;
+ list_del_init(&bo_base->vm_status);
+
+ bo = bo_base->bo->parent;
+@@ -1907,10 +1908,10 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
+ if (mapping->flags & AMDGPU_PTE_PRT)
+ amdgpu_vm_prt_get(adev);
+
+- if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
++ if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
++ !bo_va->base.moved) {
+ spin_lock(&vm->moved_lock);
+- if (list_empty(&bo_va->base.vm_status))
+- list_add(&bo_va->base.vm_status, &vm->moved);
++ list_move(&bo_va->base.vm_status, &vm->moved);
+ spin_unlock(&vm->moved_lock);
+ }
+ trace_amdgpu_vm_bo_map(bo_va, mapping);
+@@ -2263,6 +2264,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
+
+ list_for_each_entry(bo_base, &bo->va, bo_list) {
+ struct amdgpu_vm *vm = bo_base->vm;
++ bool was_moved = bo_base->moved;
+
+ bo_base->moved = true;
+ if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
+@@ -2281,11 +2283,17 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
+
+ continue;
+ }
+-
+- spin_lock(&bo_base->vm->moved_lock);
+- if (list_empty(&bo_base->vm_status))
+- list_add(&bo_base->vm_status, &vm->moved);
+- spin_unlock(&bo_base->vm->moved_lock);
++
++ if (was_moved)
++ continue;
++
++ if (bo->tbo.type == ttm_bo_type_kernel) {
++ list_move(&bo_base->vm_status, &vm->relocated);
++ } else {
++ spin_lock(&bo_base->vm->moved_lock);
++ list_move(&bo_base->vm_status, &vm->moved);
++ spin_unlock(&bo_base->vm->moved_lock);
++ }
+ }
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4510-drm-amdgpu-move-VM-BOs-on-LRU-again.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4510-drm-amdgpu-move-VM-BOs-on-LRU-again.patch
new file mode 100644
index 00000000..470f4284
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4510-drm-amdgpu-move-VM-BOs-on-LRU-again.patch
@@ -0,0 +1,96 @@
+From af1e7bb6a6e39fb16ca3c1280c72945de913cb2e Mon Sep 17 00:00:00 2001
+From: christian koenig <christian.koenig@amd.com>
+Date: Thu, 19 Apr 2018 15:01:12 +0200
+Subject: [PATCH 4510/5725] drm/amdgpu: move VM BOs on LRU again
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Move all BOs belonging to a VM on the LRU with every submission.
+
+Change-Id: I3cf9aa8aad9f0c7de66c5f0639a2e7518b507318
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 28 +++++++++++++++++++++++-----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 3 +++
+ 2 files changed, 26 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 7ddbfe3..e2f2892 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -251,6 +251,19 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ }
+ }
+
++ spin_lock(&glob->lru_lock);
++ list_for_each_entry(bo_base, &vm->idle, vm_status) {
++ struct amdgpu_bo *bo = bo_base->bo;
++
++ if (!bo->parent)
++ continue;
++
++ ttm_bo_move_to_lru_tail(&bo->tbo);
++ if (bo->shadow)
++ ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
++ }
++ spin_unlock(&glob->lru_lock);
++
+ return r;
+ }
+
+@@ -966,7 +979,7 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
+ struct amdgpu_vm_bo_base,
+ vm_status);
+ bo_base->moved = false;
+- list_del_init(&bo_base->vm_status);
++ list_move(&bo_base->vm_status, &vm->idle);
+
+ bo = bo_base->bo->parent;
+ if (!bo)
+@@ -1601,10 +1614,14 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+ * the evicted list so that it gets validated again on the
+ * next command submission.
+ */
+- if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
+- !(bo->preferred_domains &
+- amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)))
+- list_add_tail(&bo_va->base.vm_status, &vm->evicted);
++ if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
++ uint32_t mem_type = bo->tbo.mem.mem_type;
++
++ if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
++ list_add_tail(&bo_va->base.vm_status, &vm->evicted);
++ else
++ list_add(&bo_va->base.vm_status, &vm->idle);
++ }
+
+ list_splice_init(&bo_va->invalids, &bo_va->valids);
+ bo_va->cleared = clear;
+@@ -2423,6 +2440,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ INIT_LIST_HEAD(&vm->relocated);
+ spin_lock_init(&vm->moved_lock);
+ INIT_LIST_HEAD(&vm->moved);
++ INIT_LIST_HEAD(&vm->idle);
+ INIT_LIST_HEAD(&vm->freed);
+
+ /* create scheduler entity for page table updates */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+index 7a4c967..617d84b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+@@ -181,6 +181,9 @@ struct amdgpu_vm {
+ struct list_head moved;
+ spinlock_t moved_lock;
+
++ /* All BOs of this VM not currently in the state machine */
++ struct list_head idle;
++
+ /* BO mappings freed, but not yet updated in the PT */
+ struct list_head freed;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4511-drm-amdgpu-add-rcu_barrier-after-entity-fini.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4511-drm-amdgpu-add-rcu_barrier-after-entity-fini.patch
new file mode 100644
index 00000000..d36af629
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4511-drm-amdgpu-add-rcu_barrier-after-entity-fini.patch
@@ -0,0 +1,60 @@
+From 7f44c8e51ddeeb40383070b71026fb859d449ae1 Mon Sep 17 00:00:00 2001
+From: Emily Deng <Emily.Deng@amd.com>
+Date: Wed, 23 May 2018 15:53:03 +0800
+Subject: [PATCH 4511/5725] drm/amdgpu: add rcu_barrier after entity fini
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+To free the fence from the amdgpu_fence_slab, need twice call_rcu, to avoid
+the amdgpu_fence_slab_fini call kmem_cache_destroy(amdgpu_fence_slab) before
+kmem_cache_free(amdgpu_fence_slab, fence), add rcu_barrier after drm_sched_entity_fini.
+
+The kmem_cache_free(amdgpu_fence_slab, fence)'s call trace as below:
+1.drm_sched_entity_fini ->
+drm_sched_entity_cleanup ->
+dma_fence_put(entity->last_scheduled) ->
+drm_sched_fence_release_finished ->
+drm_sched_fence_release_scheduled ->
+call_rcu(&fence->finished.rcu, drm_sched_fence_free)
+
+2.drm_sched_fence_free ->
+dma_fence_put(fence->parent) ->
+amdgpu_fence_release ->
+call_rcu(&f->rcu, amdgpu_fence_free) ->
+kmem_cache_free(amdgpu_fence_slab, fence);
+
+v2:put the barrier before the kmem_cache_destroy
+v3:put the dma_fence_put(fence->parent) before call_rcu in
+drm_sched_fence_release_scheduled
+
+Change-Id: Ice5c1eab913b9202f44ebd43576d2ff33755f990
+Signed-off-by: Emily Deng <Emily.Deng@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/scheduler/sched_fence.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
+index b3d7b82..a3dac29 100644
+--- a/drivers/gpu/drm/scheduler/sched_fence.c
++++ b/drivers/gpu/drm/scheduler/sched_fence.c
+@@ -98,7 +98,6 @@ static void drm_sched_fence_free(struct rcu_head *rcu)
+ struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
+ struct drm_sched_fence *fence = to_drm_sched_fence(f);
+
+- dma_fence_put(fence->parent);
+ kmem_cache_free(sched_fence_slab, fence);
+ }
+
+@@ -114,6 +113,7 @@ static void drm_sched_fence_release_scheduled(struct dma_fence *f)
+ {
+ struct drm_sched_fence *fence = to_drm_sched_fence(f);
+
++ dma_fence_put(fence->parent);
+ call_rcu(&fence->finished.rcu, drm_sched_fence_free);
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4512-drm-amdgpu-Remove-unused-variable-in-amdgpu_device_g.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4512-drm-amdgpu-Remove-unused-variable-in-amdgpu_device_g.patch
new file mode 100644
index 00000000..a8874c4a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4512-drm-amdgpu-Remove-unused-variable-in-amdgpu_device_g.patch
@@ -0,0 +1,31 @@
+From 8ad8f64c142a53111e0802863e33b3dab4694c6b Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Wed, 23 May 2018 13:05:00 -0400
+Subject: [PATCH 4512/5725] drm/amdgpu: Remove unused variable in
+ amdgpu_device_gpu_recover
+
+It throws an unused variable warning.
+
+Change-Id: I3bb0b2b477273cf1799d682959033beb5134f851
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 1d20967..4d2637e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3248,7 +3248,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
+ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ struct amdgpu_job *job, bool force)
+ {
+- struct drm_atomic_state *state = NULL;
+ int i, r, resched;
+
+ if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4513-drm-amdkfd-sriov-Put-the-pre-and-post-reset-in-exclu.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4513-drm-amdkfd-sriov-Put-the-pre-and-post-reset-in-exclu.patch
new file mode 100644
index 00000000..04ef754d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4513-drm-amdkfd-sriov-Put-the-pre-and-post-reset-in-exclu.patch
@@ -0,0 +1,84 @@
+From 0a86052460bbe6b1d5b9d50455845ab77ade39da Mon Sep 17 00:00:00 2001
+From: Emily Deng <Emily.Deng@amd.com>
+Date: Fri, 25 May 2018 10:44:45 +0800
+Subject: [PATCH 4513/5725] drm/amdkfd/sriov:Put the pre and post reset in
+ exclusive mode v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+For sriov, need to put the amdkfd_pre_reset and amdkfd_post_reset
+in exlusive mode.
+
+v2: To unify the code path for sriov and bare metal, move
+the original pre and post reset into amdgpu_device_reset.
+
+Change-Id: Ic6f20e98fb5de1946b56d853b96d7271632d63c6
+Signed-off-by: Emily Deng <Emily.Deng@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 4d2637e..0e416d1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3125,6 +3125,8 @@ static int amdgpu_device_reset(struct amdgpu_device *adev)
+ bool need_full_reset, vram_lost = 0;
+ int r;
+
++ amdgpu_amdkfd_pre_reset(adev);
++
+ need_full_reset = amdgpu_device_ip_need_full_reset(adev);
+
+ if (!need_full_reset) {
+@@ -3183,6 +3185,8 @@ static int amdgpu_device_reset(struct amdgpu_device *adev)
+ }
+ }
+
++ amdgpu_amdkfd_post_reset(adev);
++
+ if (!r && ((need_full_reset && !(adev->flags & AMD_IS_APU)) || vram_lost))
+ r = amdgpu_device_handle_vram_lost(adev);
+
+@@ -3209,6 +3213,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
+ if (r)
+ return r;
+
++ amdgpu_amdkfd_pre_reset(adev);
++
+ /* Resume IP prior to SMC */
+ r = amdgpu_device_ip_reinit_early_sriov(adev);
+ if (r)
+@@ -3224,6 +3230,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
+
+ amdgpu_irq_gpu_reset_resume_helper(adev);
+ r = amdgpu_ib_ring_tests(adev);
++ amdgpu_amdkfd_post_reset(adev);
+
+ error:
+ amdgpu_virt_release_full_gpu(adev, true);
+@@ -3267,9 +3274,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ atomic_inc(&adev->gpu_reset_counter);
+ adev->in_gpu_reset = 1;
+
+- /* Block kfd */
+- amdgpu_amdkfd_pre_reset(adev);
+-
+ /* block TTM */
+ resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+
+@@ -3326,8 +3330,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
+ }
+
+- /*unlock kfd */
+- amdgpu_amdkfd_post_reset(adev);
+ amdgpu_vf_error_trans_all(adev);
+ adev->in_gpu_reset = 0;
+ mutex_unlock(&adev->lock_reset);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4514-drm-amdgpu-pp-remove-duplicate-assignment.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4514-drm-amdgpu-pp-remove-duplicate-assignment.patch
new file mode 100644
index 00000000..ef09eca4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4514-drm-amdgpu-pp-remove-duplicate-assignment.patch
@@ -0,0 +1,31 @@
+From 0ee7084c714dead88e20e0ad9df1de97683470b2 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Mon, 21 May 2018 10:16:28 -0500
+Subject: [PATCH 4514/5725] drm/amdgpu/pp: remove duplicate assignment
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+is_dpm_running callback was assigned to the same value
+twice. Drop the duplicate.
+
+Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+index a40f714..2de4895 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+@@ -2379,6 +2379,5 @@ const struct pp_smumgr_func vegam_smu_funcs = {
+ .update_sclk_threshold = vegam_update_sclk_threshold,
+ .is_hw_avfs_present = vegam_is_hw_avfs_present,
+ .thermal_avfs_enable = vegam_thermal_avfs_enable,
+- .is_dpm_running = vegam_is_dpm_running,
+ .thermal_setup_fan_table = vegam_thermal_setup_fan_table,
+ };
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4515-drm-amdgpu-Update-GFX-info-structure-to-match-what-v.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4515-drm-amdgpu-Update-GFX-info-structure-to-match-what-v.patch
new file mode 100644
index 00000000..ceeb5059
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4515-drm-amdgpu-Update-GFX-info-structure-to-match-what-v.patch
@@ -0,0 +1,59 @@
+From 0fc2e70b9160580bd55e2529fba3659512878cb4 Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Tue, 22 May 2018 11:45:41 -0400
+Subject: [PATCH 4515/5725] drm/amdgpu: Update GFX info structure to match what
+ vega20 used
+
+Update to the latest version from the vbios team.
+
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c | 8 +++-----
+ drivers/gpu/drm/amd/include/atomfirmware.h | 3 ++-
+ 2 files changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+index 7014d58..2369158 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+@@ -354,11 +354,9 @@ int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
+ le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth);
+ adev->gfx.config.double_offchip_lds_buf =
+ gfx_info->v24.gc_double_offchip_lds_buffer;
+- adev->gfx.cu_info.wave_front_size = gfx_info->v24.gc_wave_size;
+- adev->gfx.cu_info.max_waves_per_simd =
+- le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
+- adev->gfx.cu_info.max_scratch_slots_per_cu =
+- gfx_info->v24.gc_max_scratch_slots_per_cu;
++ adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size);
++ adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
++ adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu;
+ adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size);
+ return 0;
+ default:
+diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
+index fd5e80c..c6c1666 100644
+--- a/drivers/gpu/drm/amd/include/atomfirmware.h
++++ b/drivers/gpu/drm/amd/include/atomfirmware.h
+@@ -1240,7 +1240,6 @@ struct atom_gfx_info_v2_4 {
+ uint8_t active_cu_per_sh;
+ uint8_t active_rb_per_se;
+ uint16_t gcgoldenoffset;
+- uint32_t rm21_sram_vmin_value;
+ uint16_t gc_num_gprs;
+ uint16_t gc_gsprim_buff_depth;
+ uint16_t gc_parameter_cache_depth;
+@@ -1251,6 +1250,8 @@ struct atom_gfx_info_v2_4 {
+ uint8_t gc_gs_table_depth;
+ uint8_t gc_double_offchip_lds_buffer;
+ uint8_t gc_max_scratch_slots_per_cu;
++ uint32_t sram_rm_fuses_val;
++ uint32_t sram_custom_rm_fuses_val;
+ };
+
+ /*
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4516-drm-amd-display-Remove-use-of-division-operator-for-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4516-drm-amd-display-Remove-use-of-division-operator-for-.patch
new file mode 100644
index 00000000..d7ec1464
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4516-drm-amd-display-Remove-use-of-division-operator-for-.patch
@@ -0,0 +1,35 @@
+From c5d1ed3079e37b5f11fb7e6d70cfb0084699374f Mon Sep 17 00:00:00 2001
+From: David Francis <David.Francis@amd.com>
+Date: Tue, 22 May 2018 16:01:08 -0400
+Subject: [PATCH 4516/5725] drm/amd/display: Remove use of division operator
+ for long longs
+
+In fixed31_32.h, in dc_fixpt_shl,'/' was used for division of one long
+long int by another long long int. As there is no inbuilt long long
+int division function in c, gcc inserted its own. However, gcc does not
+link the library that contains this function. To avoid this, use
+bitwise operators instead of /
+
+Signed-off-by: David Francis <David.Francis@amd.com>
+Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/include/fixed31_32.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
+index 76f64e9..bb0d4eb 100644
+--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
++++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
+@@ -209,7 +209,7 @@ static inline struct fixed31_32 dc_fixpt_clamp(
+ static inline struct fixed31_32 dc_fixpt_shl(struct fixed31_32 arg, unsigned char shift)
+ {
+ ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
+- ((arg.value < 0) && (arg.value >= (LLONG_MIN / (1LL << shift)))));
++ ((arg.value < 0) && (arg.value >= ~(LLONG_MAX >> shift))));
+
+ arg.value = arg.value << shift;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4517-drm-amd-display-Implement-dm_pp_get_clock_levels_by_.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4517-drm-amd-display-Implement-dm_pp_get_clock_levels_by_.patch
new file mode 100644
index 00000000..cfe6b8b1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4517-drm-amd-display-Implement-dm_pp_get_clock_levels_by_.patch
@@ -0,0 +1,82 @@
+From e456b76c45edf3617d4151d52b74041694780e45 Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Fri, 18 May 2018 17:07:06 -0400
+Subject: [PATCH 4517/5725] drm/amd/display: Implement
+ dm_pp_get_clock_levels_by_type_with_latency
+
+This is required so we use the correct minimum clocks for Vega. Without
+this pplib will never be able to enter the lowest clock states.
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 46 +++++++++++++++++++++-
+ 1 file changed, 44 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+index 0229c7ed..ead3d21 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+@@ -234,6 +234,34 @@ static void pp_to_dc_clock_levels(
+ }
+ }
+
++static void pp_to_dc_clock_levels_with_latency(
++ const struct pp_clock_levels_with_latency *pp_clks,
++ struct dm_pp_clock_levels_with_latency *clk_level_info,
++ enum dm_pp_clock_type dc_clk_type)
++{
++ uint32_t i;
++
++ if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
++ DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
++ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
++ pp_clks->num_levels,
++ DM_PP_MAX_CLOCK_LEVELS);
++
++ clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
++ } else
++ clk_level_info->num_levels = pp_clks->num_levels;
++
++ DRM_DEBUG("DM_PPLIB: values for %s clock\n",
++ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
++
++ for (i = 0; i < clk_level_info->num_levels; i++) {
++ DRM_DEBUG("DM_PPLIB:\t %d\n", pp_clks->data[i].clocks_in_khz);
++ /* translate 10kHz to kHz */
++ clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
++ clk_level_info->data[i].latency_in_us = pp_clks->data[i].clocks_in_khz;
++ }
++}
++
+ bool dm_pp_get_clock_levels_by_type(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+@@ -311,8 +339,22 @@ bool dm_pp_get_clock_levels_by_type_with_latency(
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels_with_latency *clk_level_info)
+ {
+- /* TODO: to be implemented */
+- return false;
++ struct amdgpu_device *adev = ctx->driver_context;
++ void *pp_handle = adev->powerplay.pp_handle;
++ struct pp_clock_levels_with_latency pp_clks = { 0 };
++ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
++
++ if (!pp_funcs->get_clock_by_type_with_latency)
++ return false;
++
++ if (pp_funcs->get_clock_by_type_with_latency(pp_handle,
++ dc_to_pp_clock_type(clk_type),
++ &pp_clks))
++ return false;
++
++ pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
++
++ return true;
+ }
+
+ bool dm_pp_get_clock_levels_by_type_with_voltage(
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4518-drm-amdgpu-vcn_v1_0_is_idle-can-be-static.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4518-drm-amdgpu-vcn_v1_0_is_idle-can-be-static.patch
new file mode 100644
index 00000000..80617880
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4518-drm-amdgpu-vcn_v1_0_is_idle-can-be-static.patch
@@ -0,0 +1,36 @@
+From 55cba7eb3de5dd970c8b090e2456b0d111efff36 Mon Sep 17 00:00:00 2001
+From: kbuild test robot <fengguang.wu@intel.com>
+Date: Fri, 25 May 2018 02:54:45 +0800
+Subject: [PATCH 4518/5725] drm/amdgpu: vcn_v1_0_is_idle() can be static
+
+Fixes: 9b4c412a654c ("drm/amdgpu: Add static CG control for VCN on RV")
+Signed-off-by: kbuild test robot <fengguang.wu@intel.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 0f1570e..7756a93 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -791,14 +791,14 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev)
+ return 0;
+ }
+
+-bool vcn_v1_0_is_idle(void *handle)
++static bool vcn_v1_0_is_idle(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == 0x2);
+ }
+
+-int vcn_v1_0_wait_for_idle(void *handle)
++static int vcn_v1_0_wait_for_idle(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int ret = 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4519-drm-amdkfd-Fix-a-copy-error-when-exit-compute-profil.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4519-drm-amdkfd-Fix-a-copy-error-when-exit-compute-profil.patch
new file mode 100644
index 00000000..9fc39b45
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4519-drm-amdkfd-Fix-a-copy-error-when-exit-compute-profil.patch
@@ -0,0 +1,33 @@
+From b54f5ca3b2774afa0564607fcac003f356d87df6 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Fri, 18 May 2018 11:39:45 +0800
+Subject: [PATCH 4519/5725] drm/amdkfd: Fix a copy error when exit compute
+ profile mode
+
+Change-Id: I75fe5d92cc9b0b5e442769f0dedb47c207913968
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index e2f2892..1fcc586 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2694,7 +2694,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ /* Last KFD VM: enable graphics power profile */
+ if (adev->powerplay.pp_funcs->switch_power_profile)
+ amdgpu_dpm_switch_power_profile(adev,
+- PP_SMC_POWER_PROFILE_COMPUTE, true);
++ PP_SMC_POWER_PROFILE_COMPUTE, false);
+ }
+ mutex_unlock(&id_mgr->lock);
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4520-drm-amdkfd-Add-debugfs-interface-to-trigger-HWS-hang.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4520-drm-amdkfd-Add-debugfs-interface-to-trigger-HWS-hang.patch
new file mode 100644
index 00000000..b6234e83
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4520-drm-amdkfd-Add-debugfs-interface-to-trigger-HWS-hang.patch
@@ -0,0 +1,201 @@
+From 4cab61cd826409a97670a05d6e4bdf7557298b93 Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Tue, 8 May 2018 18:30:56 -0400
+Subject: [PATCH 4520/5725] drm/amdkfd: Add debugfs interface to trigger HWS
+ hang
+
+Change-Id: I7c08975b93a734d3075654edecd716db3a8ee7ea
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c | 48 ++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 23 +++++++++++
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 12 ++++++
+ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 26 ++++++++++++
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 4 ++
+ 5 files changed, 113 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+index 4bd6ebf..ab37d36 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+@@ -21,6 +21,8 @@
+ */
+
+ #include <linux/debugfs.h>
++#include <linux/uaccess.h>
++
+ #include "kfd_priv.h"
+
+ static struct dentry *debugfs_root;
+@@ -32,6 +34,38 @@ static int kfd_debugfs_open(struct inode *inode, struct file *file)
+ return single_open(file, show, NULL);
+ }
+
++static ssize_t kfd_debugfs_hang_hws_write(struct file *file,
++ const char __user *user_buf, size_t size, loff_t *ppos)
++{
++ struct kfd_dev *dev;
++ char tmp[16];
++ uint32_t gpu_id;
++ int ret = -EINVAL;
++
++ memset(tmp, 0, 16);
++ if (size >= 16) {
++ pr_err("Invalid input for gpu id.\n");
++ goto out;
++ }
++ if (copy_from_user(tmp, user_buf, size)) {
++ ret = -EFAULT;
++ goto out;
++ }
++ if (kstrtoint(tmp, 10, &gpu_id)) {
++ pr_err("Invalid input for gpu id.\n");
++ goto out;
++ }
++ dev = kfd_device_by_id(gpu_id);
++ if (dev) {
++ kfd_debugfs_hang_hws(dev);
++ ret = size;
++ } else
++ pr_err("Cannot find device %d.\n", gpu_id);
++
++out:
++ return ret;
++}
++
+ static const struct file_operations kfd_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = kfd_debugfs_open,
+@@ -40,6 +74,15 @@ static const struct file_operations kfd_debugfs_fops = {
+ .release = single_release,
+ };
+
++static const struct file_operations kfd_debugfs_hang_hws_fops = {
++ .owner = THIS_MODULE,
++ .open = kfd_debugfs_open,
++ .read = seq_read,
++ .write = kfd_debugfs_hang_hws_write,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
+ void kfd_debugfs_init(void)
+ {
+ struct dentry *ent;
+@@ -65,6 +108,11 @@ void kfd_debugfs_init(void)
+ ent = debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
+ kfd_debugfs_rls_by_device,
+ &kfd_debugfs_fops);
++
++ ent = debugfs_create_file("hang_hws", S_IFREG | 0644, debugfs_root,
++ NULL,
++ &kfd_debugfs_hang_hws_fops);
++
+ if (!ent)
+ pr_warn("Failed to create rls in kfd debugfs\n");
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 4ae2b07..0c4703c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -945,3 +945,26 @@ int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
+ kfree(mem_obj);
+ return 0;
+ }
++
++#if defined(CONFIG_DEBUG_FS)
++
++/* This function will send a package to HIQ to hang the HWS
++ * which will trigger a GPU reset and bring the HWS back to normal state
++ */
++int kfd_debugfs_hang_hws(struct kfd_dev *dev)
++{
++ int r = 0;
++
++ if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
++ pr_err("HWS is not enabled");
++ return -EINVAL;
++ }
++
++ r = pm_debugfs_hang_hws(&dev->dqm->packets);
++ if (!r)
++ r = dqm_debugfs_execute_queues(dev->dqm);
++
++ return r;
++}
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index d7822e2..2c5d330 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -1855,4 +1855,16 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
+ return r;
+ }
+
++int dqm_debugfs_execute_queues(struct device_queue_manager *dqm)
++{
++ int r = 0;
++
++ mutex_lock(&dqm->lock);
++ dqm->active_runlist = true;
++ r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
++ mutex_unlock(&dqm->lock);
++
++ return r;
++}
++
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+index c317feb4..1092631 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+@@ -418,4 +418,30 @@ int pm_debugfs_runlist(struct seq_file *m, void *data)
+ return 0;
+ }
+
++int pm_debugfs_hang_hws(struct packet_manager *pm)
++{
++ uint32_t *buffer, size;
++ int r = 0;
++
++ size = pm->pmf->query_status_size;
++ mutex_lock(&pm->lock);
++ pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
++ size / sizeof(uint32_t), (unsigned int **)&buffer);
++ if (!buffer) {
++ pr_err("Failed to allocate buffer on kernel queue\n");
++ r = -ENOMEM;
++ goto out;
++ }
++ memset(buffer, 0x55, size);
++ pm->priv_queue->ops.submit_packet(pm->priv_queue);
++
++ pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
++ buffer[0], buffer[1], buffer[2], buffer[3],
++ buffer[4], buffer[5], buffer[6]);
++out:
++ mutex_unlock(&pm->lock);
++ return r;
++}
++
++
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index fffdec6..c63a6b0 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -1108,6 +1108,10 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data);
+ int kfd_debugfs_rls_by_device(struct seq_file *m, void *data);
+ int pm_debugfs_runlist(struct seq_file *m, void *data);
+
++int kfd_debugfs_hang_hws(struct kfd_dev *dev);
++int pm_debugfs_hang_hws(struct packet_manager *pm);
++int dqm_debugfs_execute_queues(struct device_queue_manager *dqm);
++
+ #else
+
+ static inline void kfd_debugfs_init(void) {}
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4521-drm-amdkcl-4.17-fix-prime-bo-for-raven-A-A-issue.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4521-drm-amdkcl-4.17-fix-prime-bo-for-raven-A-A-issue.patch
new file mode 100644
index 00000000..39cc8986
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4521-drm-amdkcl-4.17-fix-prime-bo-for-raven-A-A-issue.patch
@@ -0,0 +1,46 @@
+From 3e5c5260d3f2310f7ba4b40aa8a5497051f944ce Mon Sep 17 00:00:00 2001
+From: Junwei Zhang <Jerry.Zhang@amd.com>
+Date: Tue, 22 May 2018 16:26:41 +0800
+Subject: [PATCH 4521/5725] drm/amdkcl: [4.17] fix prime bo for raven A+A issue
+
+For kernel < 4.17, drm prime functions are not exported.
+So retain the old path for prime bo sharing.
+
+Change-Id: Icb4122ac0fcbada9a06006fbe31b9e80f0f1c9e9
+Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Le Ma <Le.Ma@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 --
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 --
+ 2 files changed, 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 7ba0b6b..f683cb5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -397,8 +397,6 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+ struct drm_gem_object *
+ amdgpu_gem_prime_foreign_bo(struct amdgpu_device *adev, struct amdgpu_bo *bo);
+-int amdgpu_gem_prime_pin(struct drm_gem_object *obj);
+-void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
+ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
+ void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
+ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 8222902..1c0cf9a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -915,8 +915,6 @@ static struct drm_driver kms_driver = {
+ .gem_prime_import = amdgpu_gem_prime_import,
+ #else
+ .gem_prime_import = drm_gem_prime_import,
+- .gem_prime_pin = amdgpu_gem_prime_pin,
+- .gem_prime_unpin = amdgpu_gem_prime_unpin,
+ #endif
+ .gem_prime_res_obj = amdgpu_gem_prime_res_obj,
+ .gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4522-drm-amdgpu-defer-test-IBs-on-the-rings-at-boot-V3.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4522-drm-amdgpu-defer-test-IBs-on-the-rings-at-boot-V3.patch
new file mode 100644
index 00000000..959bcda8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4522-drm-amdgpu-defer-test-IBs-on-the-rings-at-boot-V3.patch
@@ -0,0 +1,92 @@
+From 46b4b03858758ee548926f59948e937c2bf4f82e Mon Sep 17 00:00:00 2001
+From: Shirish S <shirish.s@amd.com>
+Date: Mon, 16 Apr 2018 12:17:57 +0530
+Subject: [PATCH 4522/5725] drm/amdgpu: defer test IBs on the rings at boot
+ (V3)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+amdgpu_ib_ring_tests() runs test IB's on rings at boot
+contributes to ~500 ms of amdgpu driver's boot time.
+
+This patch defers it and ensures that its executed
+in amdgpu_info_ioctl() if it wasn't scheduled.
+
+V2: Use queue_delayed_work() & flush_delayed_work().
+V3: removed usage of separate wq, ensure ib tests is
+ run before enabling clockgating.
+
+Signed-off-by: Shirish S <shirish.s@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 17 ++++++-----------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 3 +++
+ 2 files changed, 9 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 0e416d1..d02f2fa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1758,6 +1758,10 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
+ if (amdgpu_emu_mode == 1)
+ return 0;
+
++ r = amdgpu_ib_ring_tests(adev);
++ if (r)
++ DRM_ERROR("ib ring test failed (%d).\n", r);
++
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+@@ -1821,8 +1825,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
+ }
+ }
+
+- mod_delayed_work(system_wq, &adev->late_init_work,
+- msecs_to_jiffies(AMDGPU_RESUME_MS));
++ queue_delayed_work(system_wq, &adev->late_init_work,
++ msecs_to_jiffies(AMDGPU_RESUME_MS));
+
+ amdgpu_device_fill_reset_magic(adev);
+
+@@ -2497,10 +2501,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+ goto failed;
+ }
+
+- r = amdgpu_ib_ring_tests(adev);
+- if (r)
+- DRM_ERROR("ib ring test failed (%d).\n", r);
+-
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_init_data_exchange(adev);
+
+@@ -2766,11 +2766,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
+
+ amdgpu_fence_driver_resume(adev);
+
+- if (resume) {
+- r = amdgpu_ib_ring_tests(adev);
+- if (r)
+- DRM_ERROR("ib ring test failed (%d).\n", r);
+- }
+
+ r = amdgpu_device_ip_late_init(adev);
+ if (r)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index 9de27ce..86087c1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -289,6 +289,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ if (!info->return_size || !info->return_pointer)
+ return -EINVAL;
+
++ /* Ensure IB tests are run on ring */
++ flush_delayed_work(&adev->late_init_work);
++
+ switch (info->query) {
+ case AMDGPU_INFO_VIRTUAL_RANGE: {
+ struct drm_amdgpu_virtual_range range_info;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4523-drm-amd-display-Release-fake-sink.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4523-drm-amd-display-Release-fake-sink.patch
new file mode 100644
index 00000000..d1478b55
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4523-drm-amd-display-Release-fake-sink.patch
@@ -0,0 +1,107 @@
+From e027a0005137196219e591ac390881922364b335 Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Tue, 1 May 2018 11:33:25 -0400
+Subject: [PATCH 4523/5725] drm/amd/display: Release fake sink
+
+If connector doesn't have a sink, fake sink is created, but
+never released as it assumed that its destroyed with the
+stream it is used for. But now sink is released before the
+stream maintaing refcount consistency.
+
+This way we also avoid assigning anything to connector keeping
+all the operation local.
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 31 ++++++++++++-----------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index a796d56..3bf451f 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2409,27 +2409,22 @@ decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
+ }
+ }
+
+-static int create_fake_sink(struct amdgpu_dm_connector *aconnector)
++static struct dc_sink *
++create_fake_sink(struct amdgpu_dm_connector *aconnector)
+ {
+- struct dc_sink *sink = NULL;
+ struct dc_sink_init_data sink_init_data = { 0 };
+-
++ struct dc_sink *sink = NULL;
+ sink_init_data.link = aconnector->dc_link;
+ sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
+
+ sink = dc_sink_create(&sink_init_data);
+ if (!sink) {
+ DRM_ERROR("Failed to create sink!\n");
+- return -ENOMEM;
++ return NULL;
+ }
+-
+ sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
+- aconnector->fake_enable = true;
+
+- aconnector->dc_sink = sink;
+- aconnector->dc_link->local_sink = sink;
+-
+- return 0;
++ return sink;
+ }
+
+ static void set_multisync_trigger_params(
+@@ -2556,7 +2551,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ struct dc_stream_state *stream = NULL;
+ struct drm_display_mode mode = *drm_mode;
+ bool native_mode_found = false;
+-
++ struct dc_sink *sink = NULL;
+ if (aconnector == NULL) {
+ DRM_ERROR("aconnector is NULL!\n");
+ return stream;
+@@ -2574,15 +2569,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ return stream;
+ }
+
+- if (create_fake_sink(aconnector))
++ sink = create_fake_sink(aconnector);
++ if (!sink)
+ return stream;
++ } else {
++ sink = aconnector->dc_sink;
+ }
+
+- stream = dc_create_stream_for_sink(aconnector->dc_sink);
++ stream = dc_create_stream_for_sink(sink);
+
+ if (stream == NULL) {
+ DRM_ERROR("Failed to create stream for sink!\n");
+- return stream;
++ goto finish;
+ }
+
+ list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
+@@ -2621,12 +2619,15 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ fill_audio_info(
+ &stream->audio_info,
+ drm_connector,
+- aconnector->dc_sink);
++ sink);
+
+ update_stream_signal(stream);
+
+ if (dm_state && dm_state->freesync_capable)
+ stream->ignore_msa_timing_param = true;
++finish:
++ if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL)
++ dc_sink_release(sink);
+
+ return stream;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4524-drm-amd-display-pass-pipe_ctx-straight-to-blank_pixe.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4524-drm-amd-display-pass-pipe_ctx-straight-to-blank_pixe.patch
new file mode 100644
index 00000000..958d0f06
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4524-drm-amd-display-pass-pipe_ctx-straight-to-blank_pixe.patch
@@ -0,0 +1,70 @@
+From 77c7d43ad35ecc5030a36c4baf6eddb4a96fbdfd Mon Sep 17 00:00:00 2001
+From: Eric Bernstein <eric.bernstein@amd.com>
+Date: Tue, 1 May 2018 15:21:42 -0400
+Subject: [PATCH 4524/5725] drm/amd/display: pass pipe_ctx straight to
+ blank_pixel_data
+
+Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
+Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 10 +++++-----
+ drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 3 +--
+ 2 files changed, 6 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index c40993a..b7ff538 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2078,12 +2078,13 @@ static void update_dchubp_dpp(
+
+ static void dcn10_blank_pixel_data(
+ struct dc *dc,
+- struct stream_resource *stream_res,
+- struct dc_stream_state *stream,
++ struct pipe_ctx *pipe_ctx,
+ bool blank)
+ {
+ enum dc_color_space color_space;
+ struct tg_color black_color = {0};
++ struct stream_resource *stream_res = &pipe_ctx->stream_res;
++ struct dc_stream_state *stream = pipe_ctx->stream;
+
+ /* program otg blank color */
+ color_space = stream->output_color_space;
+@@ -2142,8 +2143,7 @@ static void program_all_pipe_in_tree(
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg);
+
+- dc->hwss.blank_pixel_data(dc, &pipe_ctx->stream_res,
+- pipe_ctx->stream, blank);
++ dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
+ }
+
+ if (pipe_ctx->plane_state != NULL) {
+@@ -2262,7 +2262,7 @@ static void dcn10_apply_ctx_for_surface(
+
+ if (num_planes == 0) {
+ /* OTG blank before remove all front end */
+- dc->hwss.blank_pixel_data(dc, &top_pipe_to_program->stream_res, top_pipe_to_program->stream, true);
++ dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true);
+ }
+
+ /* Disconnect unused mpcc */
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+index 63fc6c4..52db80f 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+@@ -160,8 +160,7 @@ struct hw_sequencer_funcs {
+ bool lock);
+ void (*blank_pixel_data)(
+ struct dc *dc,
+- struct stream_resource *stream_res,
+- struct dc_stream_state *stream,
++ struct pipe_ctx *pipe_ctx,
+ bool blank);
+
+ void (*set_bandwidth)(
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4525-drm-amd-display-add-register-offset-0-check.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4525-drm-amd-display-add-register-offset-0-check.patch
new file mode 100644
index 00000000..854f45a4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4525-drm-amd-display-add-register-offset-0-check.patch
@@ -0,0 +1,45 @@
+From 29812d6cf0deecb4b9256debd6a256bf71acd1b9 Mon Sep 17 00:00:00 2001
+From: Charlene Liu <charlene.liu@amd.com>
+Date: Tue, 1 May 2018 19:49:03 -0400
+Subject: [PATCH 4525/5725] drm/amd/display: add register offset != 0 check.
+
+Signed-off-by: Charlene Liu <charlene.liu@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c | 3 ++-
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 2 +-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+index 0a6d483..c063175 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+@@ -72,7 +72,8 @@ static void dce110_update_generic_info_packet(
+ uint32_t max_retries = 50;
+
+ /*we need turn on clock before programming AFMT block*/
+- REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
++ if (REG(AFMT_CNTL))
++ REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
+
+ if (REG(AFMT_VBI_PACKET_CONTROL1)) {
+ if (packet_index >= 8)
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 5fcb67a..6b22a53 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -1004,9 +1004,9 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
+ /*don't free audio if it is from retrain or internal disable stream*/
+ if (option == FREE_ACQUIRED_RESOURCE && dc->caps.dynamic_audio == true) {
+ /*we have to dynamic arbitrate the audio endpoints*/
+- pipe_ctx->stream_res.audio = NULL;
+ /*we free the resource, need reset is_audio_acquired*/
+ update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
++ pipe_ctx->stream_res.audio = NULL;
+ }
+
+ /* TODO: notify audio driver for if audio modes list changed
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4526-drm-amd-display-Do-not-program-interrupt-status-on-d.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4526-drm-amd-display-Do-not-program-interrupt-status-on-d.patch
new file mode 100644
index 00000000..419dd0b8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4526-drm-amd-display-Do-not-program-interrupt-status-on-d.patch
@@ -0,0 +1,32 @@
+From 75407f653b619f1830382cde35570dce2142c4b4 Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Thu, 3 May 2018 17:08:51 -0400
+Subject: [PATCH 4526/5725] drm/amd/display: Do not program interrupt status on
+ disabled crtc
+
+Prevent interrupt programming of a crtc on which the stream is disabled and
+it doesn't have an OTG to reference.
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+index 30bec90..034aa76 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+@@ -555,6 +555,9 @@ static inline int dm_irq_state(struct amdgpu_device *adev,
+ return 0;
+ }
+
++ if (acrtc->otg_inst == -1)
++ return 0;
++
+ irq_source = dal_irq_type + acrtc->otg_inst;
+
+ st = (state == AMDGPU_IRQ_STATE_ENABLE);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4527-drm-amd-display-Clean-up-submit_channel_request.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4527-drm-amd-display-Clean-up-submit_channel_request.patch
new file mode 100644
index 00000000..798638fe
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4527-drm-amd-display-Clean-up-submit_channel_request.patch
@@ -0,0 +1,64 @@
+From 9d6c30dc115a46aabac106ee0641e13d7c939174 Mon Sep 17 00:00:00 2001
+From: Charlene Liu <charlene.liu@amd.com>
+Date: Thu, 3 May 2018 17:51:07 -0400
+Subject: [PATCH 4527/5725] drm/amd/display: Clean up submit_channel_request
+
+Signed-off-by: Charlene Liu <charlene.liu@amd.com>
+Reviewed-by: Vitaly Prosyak <Vitaly.Prosyak@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ .../display/dc/i2caux/dce110/aux_engine_dce110.c | 34 +++++++++++-----------
+ 1 file changed, 17 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
+index 5f47f6c..9053578 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
+@@ -198,27 +198,27 @@ static void submit_channel_request(
+ ((request->type == AUX_TRANSACTION_TYPE_I2C) &&
+ ((request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
+ (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT)));
++ if (REG(AUXN_IMPCAL)) {
++ /* clear_aux_error */
++ REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
++ 1,
++ 0);
+
+- /* clear_aux_error */
+- REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
+- 1,
+- 0);
+-
+- REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
+- 1,
+- 0);
+-
+- /* force_default_calibrate */
+- REG_UPDATE_1BY1_2(AUXN_IMPCAL,
+- AUXN_IMPCAL_ENABLE, 1,
+- AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
++ REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
++ 1,
++ 0);
+
+- /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
++ /* force_default_calibrate */
++ REG_UPDATE_1BY1_2(AUXN_IMPCAL,
++ AUXN_IMPCAL_ENABLE, 1,
++ AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
+
+- REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
+- 1,
+- 0);
++ /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
+
++ REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
++ 1,
++ 0);
++ }
+ /* set the delay and the number of bytes to write */
+
+ /* The length include
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4528-drm-amd-display-upgrade-scaler-math.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4528-drm-amd-display-upgrade-scaler-math.patch
new file mode 100644
index 00000000..364cb4a5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4528-drm-amd-display-upgrade-scaler-math.patch
@@ -0,0 +1,525 @@
+From 28b39973cdb2d45d57e08e1c08e06d84b904bcdc Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Thu, 3 May 2018 13:42:43 -0400
+Subject: [PATCH 4528/5725] drm/amd/display: upgrade scaler math
+
+This change will allow the viewport overlap to apply to rotated/
+mirrored surfaces. Viewport overlap results in extra pixels being
+added to viewport allowing the first few pixels to be scaled as
+if there is no cut-off(mpo or pipe split) and allows us to get matching
+crc's between scaled split and unsplit outputs of the same thing.
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 419 +++++++++++++++-------
+ 1 file changed, 289 insertions(+), 130 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index f02f366..ad09f0c 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -521,13 +521,12 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
+ }
+ }
+
+-static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip)
++static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full)
+ {
+ const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ struct rect surf_src = plane_state->src_rect;
+ struct rect surf_clip = plane_state->clip_rect;
+- int recout_full_x, recout_full_y;
+ bool pri_split = pipe_ctx->bottom_pipe &&
+ pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
+ bool sec_split = pipe_ctx->top_pipe &&
+@@ -596,20 +595,22 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip
+ }
+ }
+ /* Unclipped recout offset = stream dst offset + ((surf dst offset - stream surf_src offset)
+- * * 1/ stream scaling ratio) - (surf surf_src offset * 1/ full scl
+- * ratio)
++ * * 1/ stream scaling ratio) - (surf surf_src offset * 1/ full scl
++ * ratio)
+ */
+- recout_full_x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
++ recout_full->x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
+ * stream->dst.width / stream->src.width -
+ surf_src.x * plane_state->dst_rect.width / surf_src.width
+ * stream->dst.width / stream->src.width;
+- recout_full_y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
++ recout_full->y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
+ * stream->dst.height / stream->src.height -
+ surf_src.y * plane_state->dst_rect.height / surf_src.height
+ * stream->dst.height / stream->src.height;
+
+- recout_skip->width = pipe_ctx->plane_res.scl_data.recout.x - recout_full_x;
+- recout_skip->height = pipe_ctx->plane_res.scl_data.recout.y - recout_full_y;
++ recout_full->width = plane_state->dst_rect.width
++ * stream->dst.width / stream->src.width;
++ recout_full->height = plane_state->dst_rect.height
++ * stream->dst.height / stream->src.height;
+ }
+
+ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
+@@ -661,7 +662,7 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
+ pipe_ctx->plane_res.scl_data.ratios.vert_c, 19);
+ }
+
+-static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *recout_skip)
++static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct rect *recout_full)
+ {
+ struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
+ struct rect src = pipe_ctx->plane_state->src_rect;
+@@ -679,15 +680,14 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
+ flip_vert_scan_dir = true;
+ else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
+ flip_horz_scan_dir = true;
+- if (pipe_ctx->plane_state->horizontal_mirror)
+- flip_horz_scan_dir = !flip_horz_scan_dir;
+
+ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
+ pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
+ rect_swap_helper(&src);
+ rect_swap_helper(&data->viewport_c);
+ rect_swap_helper(&data->viewport);
+- }
++ } else if (pipe_ctx->plane_state->horizontal_mirror)
++ flip_horz_scan_dir = !flip_horz_scan_dir;
+
+ /*
+ * Init calculated according to formula:
+@@ -707,127 +707,286 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
+ data->inits.v_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int(
+ dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)), 19);
+
++ if (!flip_horz_scan_dir) {
++ /* Adjust for viewport end clip-off */
++ if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
++ int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x;
++ int int_part = dc_fixpt_floor(
++ dc_fixpt_sub(data->inits.h, data->ratios.horz));
+
++ int_part = int_part > 0 ? int_part : 0;
++ data->viewport.width += int_part < vp_clip ? int_part : vp_clip;
++ }
++ if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
++ int vp_clip = (src.x + src.width) / vpc_div -
++ data->viewport_c.width - data->viewport_c.x;
++ int int_part = dc_fixpt_floor(
++ dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
++
++ int_part = int_part > 0 ? int_part : 0;
++ data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip;
++ }
+
+- /* Adjust for viewport end clip-off */
+- if ((data->viewport.x + data->viewport.width) < (src.x + src.width) && !flip_horz_scan_dir) {
+- int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x;
+- int int_part = dc_fixpt_floor(
+- dc_fixpt_sub(data->inits.h, data->ratios.horz));
+-
+- int_part = int_part > 0 ? int_part : 0;
+- data->viewport.width += int_part < vp_clip ? int_part : vp_clip;
+- }
+- if ((data->viewport.y + data->viewport.height) < (src.y + src.height) && !flip_vert_scan_dir) {
+- int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y;
+- int int_part = dc_fixpt_floor(
+- dc_fixpt_sub(data->inits.v, data->ratios.vert));
+-
+- int_part = int_part > 0 ? int_part : 0;
+- data->viewport.height += int_part < vp_clip ? int_part : vp_clip;
+- }
+- if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div && !flip_horz_scan_dir) {
+- int vp_clip = (src.x + src.width) / vpc_div -
+- data->viewport_c.width - data->viewport_c.x;
+- int int_part = dc_fixpt_floor(
+- dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
+-
+- int_part = int_part > 0 ? int_part : 0;
+- data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip;
+- }
+- if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div && !flip_vert_scan_dir) {
+- int vp_clip = (src.y + src.height) / vpc_div -
+- data->viewport_c.height - data->viewport_c.y;
+- int int_part = dc_fixpt_floor(
+- dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
+-
+- int_part = int_part > 0 ? int_part : 0;
+- data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip;
+- }
+-
+- /* Adjust for non-0 viewport offset */
+- if (data->viewport.x && !flip_horz_scan_dir) {
+- int int_part;
+-
+- data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
+- data->ratios.horz, recout_skip->width));
+- int_part = dc_fixpt_floor(data->inits.h) - data->viewport.x;
+- if (int_part < data->taps.h_taps) {
+- int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ?
+- (data->taps.h_taps - int_part) : data->viewport.x;
+- data->viewport.x -= int_adj;
+- data->viewport.width += int_adj;
+- int_part += int_adj;
+- } else if (int_part > data->taps.h_taps) {
+- data->viewport.x += int_part - data->taps.h_taps;
+- data->viewport.width -= int_part - data->taps.h_taps;
+- int_part = data->taps.h_taps;
++ /* Adjust for non-0 viewport offset */
++ if (data->viewport.x) {
++ int int_part;
++
++ data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
++ data->ratios.horz, data->recout.x - recout_full->x));
++ int_part = dc_fixpt_floor(data->inits.h) - data->viewport.x;
++ if (int_part < data->taps.h_taps) {
++ int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ?
++ (data->taps.h_taps - int_part) : data->viewport.x;
++ data->viewport.x -= int_adj;
++ data->viewport.width += int_adj;
++ int_part += int_adj;
++ } else if (int_part > data->taps.h_taps) {
++ data->viewport.x += int_part - data->taps.h_taps;
++ data->viewport.width -= int_part - data->taps.h_taps;
++ int_part = data->taps.h_taps;
++ }
++ data->inits.h.value &= 0xffffffff;
++ data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
+ }
+- data->inits.h.value &= 0xffffffff;
+- data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
+- }
+-
+- if (data->viewport_c.x && !flip_horz_scan_dir) {
+- int int_part;
+-
+- data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
+- data->ratios.horz_c, recout_skip->width));
+- int_part = dc_fixpt_floor(data->inits.h_c) - data->viewport_c.x;
+- if (int_part < data->taps.h_taps_c) {
+- int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ?
+- (data->taps.h_taps_c - int_part) : data->viewport_c.x;
+- data->viewport_c.x -= int_adj;
+- data->viewport_c.width += int_adj;
+- int_part += int_adj;
+- } else if (int_part > data->taps.h_taps_c) {
+- data->viewport_c.x += int_part - data->taps.h_taps_c;
+- data->viewport_c.width -= int_part - data->taps.h_taps_c;
+- int_part = data->taps.h_taps_c;
++
++ if (data->viewport_c.x) {
++ int int_part;
++
++ data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
++ data->ratios.horz_c, data->recout.x - recout_full->x));
++ int_part = dc_fixpt_floor(data->inits.h_c) - data->viewport_c.x;
++ if (int_part < data->taps.h_taps_c) {
++ int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ?
++ (data->taps.h_taps_c - int_part) : data->viewport_c.x;
++ data->viewport_c.x -= int_adj;
++ data->viewport_c.width += int_adj;
++ int_part += int_adj;
++ } else if (int_part > data->taps.h_taps_c) {
++ data->viewport_c.x += int_part - data->taps.h_taps_c;
++ data->viewport_c.width -= int_part - data->taps.h_taps_c;
++ int_part = data->taps.h_taps_c;
++ }
++ data->inits.h_c.value &= 0xffffffff;
++ data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
+ }
+- data->inits.h_c.value &= 0xffffffff;
+- data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
+- }
+-
+- if (data->viewport.y && !flip_vert_scan_dir) {
+- int int_part;
+-
+- data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
+- data->ratios.vert, recout_skip->height));
+- int_part = dc_fixpt_floor(data->inits.v) - data->viewport.y;
+- if (int_part < data->taps.v_taps) {
+- int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ?
+- (data->taps.v_taps - int_part) : data->viewport.y;
+- data->viewport.y -= int_adj;
+- data->viewport.height += int_adj;
+- int_part += int_adj;
+- } else if (int_part > data->taps.v_taps) {
+- data->viewport.y += int_part - data->taps.v_taps;
+- data->viewport.height -= int_part - data->taps.v_taps;
+- int_part = data->taps.v_taps;
++ } else {
++ /* Adjust for non-0 viewport offset */
++ if (data->viewport.x) {
++ int int_part = dc_fixpt_floor(
++ dc_fixpt_sub(data->inits.h, data->ratios.horz));
++
++ int_part = int_part > 0 ? int_part : 0;
++ data->viewport.width += int_part < data->viewport.x ? int_part : data->viewport.x;
++ data->viewport.x -= int_part < data->viewport.x ? int_part : data->viewport.x;
++ }
++ if (data->viewport_c.x) {
++ int int_part = dc_fixpt_floor(
++ dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
++
++ int_part = int_part > 0 ? int_part : 0;
++ data->viewport_c.width += int_part < data->viewport_c.x ? int_part : data->viewport_c.x;
++ data->viewport_c.x -= int_part < data->viewport_c.x ? int_part : data->viewport_c.x;
+ }
+- data->inits.v.value &= 0xffffffff;
+- data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
+- }
+-
+- if (data->viewport_c.y && !flip_vert_scan_dir) {
+- int int_part;
+-
+- data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
+- data->ratios.vert_c, recout_skip->height));
+- int_part = dc_fixpt_floor(data->inits.v_c) - data->viewport_c.y;
+- if (int_part < data->taps.v_taps_c) {
+- int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ?
+- (data->taps.v_taps_c - int_part) : data->viewport_c.y;
+- data->viewport_c.y -= int_adj;
+- data->viewport_c.height += int_adj;
+- int_part += int_adj;
+- } else if (int_part > data->taps.v_taps_c) {
+- data->viewport_c.y += int_part - data->taps.v_taps_c;
+- data->viewport_c.height -= int_part - data->taps.v_taps_c;
+- int_part = data->taps.v_taps_c;
++
++ /* Adjust for viewport end clip-off */
++ if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
++ int int_part;
++ int end_offset = src.x + src.width
++ - data->viewport.x - data->viewport.width;
++
++ /*
++ * this is init if vp had no offset, keep in mind this is from the
++ * right side of vp due to scan direction
++ */
++ data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
++ data->ratios.horz, data->recout.x - recout_full->x));
++ /*
++ * this is the difference between first pixel of viewport available to read
++ * and init position, takning into account scan direction
++ */
++ int_part = dc_fixpt_floor(data->inits.h) - end_offset;
++ if (int_part < data->taps.h_taps) {
++ int int_adj = end_offset >= (data->taps.h_taps - int_part) ?
++ (data->taps.h_taps - int_part) : end_offset;
++ data->viewport.width += int_adj;
++ int_part += int_adj;
++ } else if (int_part > data->taps.h_taps) {
++ data->viewport.width += int_part - data->taps.h_taps;
++ int_part = data->taps.h_taps;
++ }
++ data->inits.h.value &= 0xffffffff;
++ data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
++ }
++
++ if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
++ int int_part;
++ int end_offset = (src.x + src.width) / vpc_div
++ - data->viewport_c.x - data->viewport_c.width;
++
++ /*
++ * this is init if vp had no offset, keep in mind this is from the
++ * right side of vp due to scan direction
++ */
++ data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
++ data->ratios.horz_c, data->recout.x - recout_full->x));
++ /*
++ * this is the difference between first pixel of viewport available to read
++ * and init position, takning into account scan direction
++ */
++ int_part = dc_fixpt_floor(data->inits.h_c) - end_offset;
++ if (int_part < data->taps.h_taps_c) {
++ int int_adj = end_offset >= (data->taps.h_taps_c - int_part) ?
++ (data->taps.h_taps_c - int_part) : end_offset;
++ data->viewport_c.width += int_adj;
++ int_part += int_adj;
++ } else if (int_part > data->taps.h_taps_c) {
++ data->viewport_c.width += int_part - data->taps.h_taps_c;
++ int_part = data->taps.h_taps_c;
++ }
++ data->inits.h_c.value &= 0xffffffff;
++ data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
++ }
++
++ }
++ if (!flip_vert_scan_dir) {
++ /* Adjust for viewport end clip-off */
++ if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
++ int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y;
++ int int_part = dc_fixpt_floor(
++ dc_fixpt_sub(data->inits.v, data->ratios.vert));
++
++ int_part = int_part > 0 ? int_part : 0;
++ data->viewport.height += int_part < vp_clip ? int_part : vp_clip;
++ }
++ if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
++ int vp_clip = (src.y + src.height) / vpc_div -
++ data->viewport_c.height - data->viewport_c.y;
++ int int_part = dc_fixpt_floor(
++ dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
++
++ int_part = int_part > 0 ? int_part : 0;
++ data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip;
++ }
++
++ /* Adjust for non-0 viewport offset */
++ if (data->viewport.y) {
++ int int_part;
++
++ data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
++ data->ratios.vert, data->recout.y - recout_full->y));
++ int_part = dc_fixpt_floor(data->inits.v) - data->viewport.y;
++ if (int_part < data->taps.v_taps) {
++ int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ?
++ (data->taps.v_taps - int_part) : data->viewport.y;
++ data->viewport.y -= int_adj;
++ data->viewport.height += int_adj;
++ int_part += int_adj;
++ } else if (int_part > data->taps.v_taps) {
++ data->viewport.y += int_part - data->taps.v_taps;
++ data->viewport.height -= int_part - data->taps.v_taps;
++ int_part = data->taps.v_taps;
++ }
++ data->inits.v.value &= 0xffffffff;
++ data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
++ }
++
++ if (data->viewport_c.y) {
++ int int_part;
++
++ data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
++ data->ratios.vert_c, data->recout.y - recout_full->y));
++ int_part = dc_fixpt_floor(data->inits.v_c) - data->viewport_c.y;
++ if (int_part < data->taps.v_taps_c) {
++ int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ?
++ (data->taps.v_taps_c - int_part) : data->viewport_c.y;
++ data->viewport_c.y -= int_adj;
++ data->viewport_c.height += int_adj;
++ int_part += int_adj;
++ } else if (int_part > data->taps.v_taps_c) {
++ data->viewport_c.y += int_part - data->taps.v_taps_c;
++ data->viewport_c.height -= int_part - data->taps.v_taps_c;
++ int_part = data->taps.v_taps_c;
++ }
++ data->inits.v_c.value &= 0xffffffff;
++ data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
++ }
++ } else {
++ /* Adjust for non-0 viewport offset */
++ if (data->viewport.y) {
++ int int_part = dc_fixpt_floor(
++ dc_fixpt_sub(data->inits.v, data->ratios.vert));
++
++ int_part = int_part > 0 ? int_part : 0;
++ data->viewport.height += int_part < data->viewport.y ? int_part : data->viewport.y;
++ data->viewport.y -= int_part < data->viewport.y ? int_part : data->viewport.y;
++ }
++ if (data->viewport_c.y) {
++ int int_part = dc_fixpt_floor(
++ dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
++
++ int_part = int_part > 0 ? int_part : 0;
++ data->viewport_c.height += int_part < data->viewport_c.y ? int_part : data->viewport_c.y;
++ data->viewport_c.y -= int_part < data->viewport_c.y ? int_part : data->viewport_c.y;
++ }
++
++ /* Adjust for viewport end clip-off */
++ if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
++ int int_part;
++ int end_offset = src.y + src.height
++ - data->viewport.y - data->viewport.height;
++
++ /*
++ * this is init if vp had no offset, keep in mind this is from the
++ * right side of vp due to scan direction
++ */
++ data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
++ data->ratios.vert, data->recout.y - recout_full->y));
++ /*
++ * this is the difference between first pixel of viewport available to read
++ * and init position, taking into account scan direction
++ */
++ int_part = dc_fixpt_floor(data->inits.v) - end_offset;
++ if (int_part < data->taps.v_taps) {
++ int int_adj = end_offset >= (data->taps.v_taps - int_part) ?
++ (data->taps.v_taps - int_part) : end_offset;
++ data->viewport.height += int_adj;
++ int_part += int_adj;
++ } else if (int_part > data->taps.v_taps) {
++ data->viewport.height += int_part - data->taps.v_taps;
++ int_part = data->taps.v_taps;
++ }
++ data->inits.v.value &= 0xffffffff;
++ data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
++ }
++
++ if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
++ int int_part;
++ int end_offset = (src.y + src.height) / vpc_div
++ - data->viewport_c.y - data->viewport_c.height;
++
++ /*
++ * this is init if vp had no offset, keep in mind this is from the
++ * right side of vp due to scan direction
++ */
++ data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
++ data->ratios.vert_c, data->recout.y - recout_full->y));
++ /*
++ * this is the difference between first pixel of viewport available to read
++ * and init position, taking into account scan direction
++ */
++ int_part = dc_fixpt_floor(data->inits.v_c) - end_offset;
++ if (int_part < data->taps.v_taps_c) {
++ int int_adj = end_offset >= (data->taps.v_taps_c - int_part) ?
++ (data->taps.v_taps_c - int_part) : end_offset;
++ data->viewport_c.height += int_adj;
++ int_part += int_adj;
++ } else if (int_part > data->taps.v_taps_c) {
++ data->viewport_c.height += int_part - data->taps.v_taps_c;
++ int_part = data->taps.v_taps_c;
++ }
++ data->inits.v_c.value &= 0xffffffff;
++ data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
+ }
+- data->inits.v_c.value &= 0xffffffff;
+- data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
+ }
+
+ /* Interlaced inits based on final vert inits */
+@@ -845,7 +1004,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
+ {
+ const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+- struct view recout_skip = { 0 };
++ struct rect recout_full = { 0 };
+ bool res = false;
+ DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+ /* Important: scaling ratio calculation requires pixel format,
+@@ -865,7 +1024,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
+ if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16)
+ return false;
+
+- calculate_recout(pipe_ctx, &recout_skip);
++ calculate_recout(pipe_ctx, &recout_full);
+
+ /**
+ * Setting line buffer pixel depth to 24bpp yields banding
+@@ -909,7 +1068,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
+
+ if (res)
+ /* May need to re-check lb size after this in some obscure scenario */
+- calculate_inits_and_adj_vp(pipe_ctx, &recout_skip);
++ calculate_inits_and_adj_vp(pipe_ctx, &recout_full);
+
+ DC_LOG_SCALER(
+ "%s: Viewport:\nheight:%d width:%d x:%d "
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4529-drm-amd-display-dal-3.1.45.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4529-drm-amd-display-dal-3.1.45.patch
new file mode 100644
index 00000000..2038e74e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4529-drm-amd-display-dal-3.1.45.patch
@@ -0,0 +1,28 @@
+From a24c2998c4b94767f1b722e1dc5a256471cfed4d Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Mon, 16 Apr 2018 13:31:05 -0400
+Subject: [PATCH 4529/5725] drm/amd/display: dal 3.1.45
+
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 85edb34..81a576f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -38,7 +38,7 @@
+ #include "inc/compressor.h"
+ #include "dml/display_mode_lib.h"
+
+-#define DC_VER "3.1.44"
++#define DC_VER "3.1.45"
+
+ #define MAX_SURFACES 3
+ #define MAX_STREAMS 6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4530-drm-amd-display-Prefix-event-prints-with-Event.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4530-drm-amd-display-Prefix-event-prints-with-Event.patch
new file mode 100644
index 00000000..b33b9d4f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4530-drm-amd-display-Prefix-event-prints-with-Event.patch
@@ -0,0 +1,28 @@
+From d55fbcfe9699249492bf9063a43fd365580d1105 Mon Sep 17 00:00:00 2001
+From: Anthony Koo <Anthony.Koo@amd.com>
+Date: Tue, 8 May 2018 11:24:05 -0400
+Subject: [PATCH 4530/5725] drm/amd/display: Prefix event prints with ==Event==
+
+Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/modules/stats/stats.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
+index 3f7d47f..fa0665d 100644
+--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
++++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
+@@ -240,7 +240,7 @@ void mod_stats_dump(struct mod_stats *mod_stats)
+ for (int i = 0; i < core_stats->entry_id; i++) {
+ if (event_index < core_stats->event_index &&
+ i == events[event_index].entry_id) {
+- DISPLAY_STATS("%s\n", events[event_index].event_string);
++ DISPLAY_STATS("==Event==%s\n", events[event_index].event_string);
+ event_index++;
+ } else if (time_index < core_stats->index &&
+ i == time[time_index].entry_id) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4531-drm-amd-display-Read-DPCD-link-caps-up-to-and-includ.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4531-drm-amd-display-Read-DPCD-link-caps-up-to-and-includ.patch
new file mode 100644
index 00000000..058f2eba
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4531-drm-amd-display-Read-DPCD-link-caps-up-to-and-includ.patch
@@ -0,0 +1,33 @@
+From 0eda956b0f7650651fc3b6b1a2b1dddc6814e05c Mon Sep 17 00:00:00 2001
+From: Nikola Cornij <nikola.cornij@amd.com>
+Date: Mon, 7 May 2018 15:35:15 -0400
+Subject: [PATCH 4531/5725] drm/amd/display: Read DPCD link caps up to and
+ including DP_ADAPTER_CAP
+
+DP 1.4 compliance requires 16 bytes to be read when reading link caps,
+i.e. it requires DP_ADAPTER_CAP to be included. Included it for all DP
+versions because reading more than required won't fail.
+
+Signed-off-by: Nikola Cornij <nikola.cornij@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index cb376cf..163c887 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -2278,7 +2278,7 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
+
+ static bool retrieve_link_cap(struct dc_link *link)
+ {
+- uint8_t dpcd_data[DP_TRAINING_AUX_RD_INTERVAL - DP_DPCD_REV + 1];
++ uint8_t dpcd_data[DP_ADAPTER_CAP - DP_DPCD_REV + 1];
+
+ union down_stream_port_count down_strm_port_count;
+ union edp_configuration_cap edp_config_cap;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4532-drm-amd-display-AUX-will-exit-when-HPD-LOW-detected.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4532-drm-amd-display-AUX-will-exit-when-HPD-LOW-detected.patch
new file mode 100644
index 00000000..09186ad9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4532-drm-amd-display-AUX-will-exit-when-HPD-LOW-detected.patch
@@ -0,0 +1,177 @@
+From a54cec55c5d512f336e4a4e5141203c04df376af Mon Sep 17 00:00:00 2001
+From: Hersen Wu <hersenxs.wu@amd.com>
+Date: Mon, 23 Apr 2018 19:21:45 -0400
+Subject: [PATCH 4532/5725] drm/amd/display: AUX will exit when HPD LOW
+ detected
+
+This change shorten wait time when HPD LOW. With HPD LOW, without this
+change, AUX routine delay is 450us. With this change, it is 42us.
+
+Signed-off-by: Hersen Wu <hersenxs.wu@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c | 16 ++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h | 5 ++++-
+ .../display/dc/i2caux/dce110/aux_engine_dce110.c | 22 +++++++++++++++++-----
+ drivers/gpu/drm/amd/display/dc/i2caux/engine.h | 3 ++-
+ .../drm/amd/display/include/ddc_service_types.h | 3 ++-
+ 5 files changed, 41 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
+index bb526ad..1d73096 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
+@@ -157,6 +157,10 @@ static void process_read_reply(
+ ctx->operation_succeeded = false;
+ }
+ break;
++ case AUX_TRANSACTION_REPLY_HPD_DISCON:
++ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
++ ctx->operation_succeeded = false;
++ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+@@ -215,6 +219,10 @@ static void process_read_request(
+ * so we should not wait here */
+ }
+ break;
++ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
++ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
++ ctx->operation_succeeded = false;
++ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+@@ -370,6 +378,10 @@ static void process_write_reply(
+ ctx->operation_succeeded = false;
+ }
+ break;
++ case AUX_TRANSACTION_REPLY_HPD_DISCON:
++ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
++ ctx->operation_succeeded = false;
++ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+@@ -422,6 +434,10 @@ static void process_write_request(
+ * so we should not wait here */
+ }
+ break;
++ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
++ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
++ ctx->operation_succeeded = false;
++ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
+index 8e71324..b9e35d0 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
+@@ -51,6 +51,8 @@ enum aux_transaction_reply {
+ AUX_TRANSACTION_REPLY_I2C_NACK = 0x10,
+ AUX_TRANSACTION_REPLY_I2C_DEFER = 0x20,
+
++ AUX_TRANSACTION_REPLY_HPD_DISCON = 0x40,
++
+ AUX_TRANSACTION_REPLY_INVALID = 0xFF
+ };
+
+@@ -64,7 +66,8 @@ enum aux_channel_operation_result {
+ AUX_CHANNEL_OPERATION_SUCCEEDED,
+ AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN,
+ AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY,
+- AUX_CHANNEL_OPERATION_FAILED_TIMEOUT
++ AUX_CHANNEL_OPERATION_FAILED_TIMEOUT,
++ AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON
+ };
+
+ struct aux_engine;
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
+index 9053578..2b927f2 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
+@@ -291,6 +291,12 @@ static void process_channel_reply(
+ value = REG_GET(AUX_SW_STATUS,
+ AUX_SW_REPLY_BYTE_COUNT, &bytes_replied);
+
++ /* in case HPD is LOW, exit AUX transaction */
++ if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
++ reply->status = AUX_TRANSACTION_REPLY_HPD_DISCON;
++ return;
++ }
++
+ if (bytes_replied) {
+ uint32_t reply_result;
+
+@@ -347,8 +353,10 @@ static void process_channel_reply(
+ * because there was surely an error that was asserted
+ * that should have been handled
+ * for hot plug case, this could happens*/
+- if (!(value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
++ if (!(value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
++ reply->status = AUX_TRANSACTION_REPLY_INVALID;
+ ASSERT_CRITICAL(false);
++ }
+ }
+ }
+
+@@ -371,6 +379,10 @@ static enum aux_channel_operation_result get_channel_status(
+ value = REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
+ 10, aux110->timeout_period/10);
+
++ /* in case HPD is LOW, exit AUX transaction */
++ if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
++ return AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
++
+ /* Note that the following bits are set in 'status.bits'
+ * during CTS 4.2.1.2 (FW 3.3.1):
+ * AUX_SW_RX_MIN_COUNT_VIOL, AUX_SW_RX_INVALID_STOP,
+@@ -402,10 +414,10 @@ static enum aux_channel_operation_result get_channel_status(
+ return AUX_CHANNEL_OPERATION_SUCCEEDED;
+ }
+ } else {
+- /*time_elapsed >= aux_engine->timeout_period */
+- if (!(value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+- ASSERT_CRITICAL(false);
+-
++ /*time_elapsed >= aux_engine->timeout_period
++ * AUX_SW_STATUS__AUX_SW_HPD_DISCON = at this point
++ */
++ ASSERT_CRITICAL(false);
+ return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
+ }
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
+index 33de8a8..c110970 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
+@@ -53,7 +53,8 @@ enum i2caux_transaction_status {
+ I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE,
+ I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION,
+ I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
+- I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW
++ I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW,
++ I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON
+ };
+
+ struct i2caux_transaction_request {
+diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
+index 019e7a0..d968956 100644
+--- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h
++++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
+@@ -40,7 +40,8 @@ enum ddc_result {
+ DDC_RESULT_FAILED_INCOMPLETE,
+ DDC_RESULT_FAILED_OPERATION,
+ DDC_RESULT_FAILED_INVALID_OPERATION,
+- DDC_RESULT_FAILED_BUFFER_OVERFLOW
++ DDC_RESULT_FAILED_BUFFER_OVERFLOW,
++ DDC_RESULT_FAILED_HPD_DISCON
+ };
+
+ enum ddc_service_type {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4533-drm-amd-display-Add-function-to-get-optc-active-size.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4533-drm-amd-display-Add-function-to-get-optc-active-size.patch
new file mode 100644
index 00000000..d2d98337
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4533-drm-amd-display-Add-function-to-get-optc-active-size.patch
@@ -0,0 +1,95 @@
+From 873a1fd3c64ea82ba4ca184715316f507b61fe00 Mon Sep 17 00:00:00 2001
+From: Eric Bernstein <eric.bernstein@amd.com>
+Date: Tue, 8 May 2018 16:20:52 -0400
+Subject: [PATCH 4533/5725] drm/amd/display: Add function to get optc active
+ size
+
+Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | 32 ++++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h | 4 +++
+ .../drm/amd/display/dc/inc/hw/timing_generator.h | 3 ++
+ 3 files changed, 39 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+index f2fbce0..e6a3ade 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+@@ -1257,6 +1257,37 @@ void optc1_read_otg_state(struct optc *optc1,
+ OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status);
+ }
+
++bool optc1_get_otg_active_size(struct timing_generator *optc,
++ uint32_t *otg_active_width,
++ uint32_t *otg_active_height)
++{
++ uint32_t otg_enabled;
++ uint32_t v_blank_start;
++ uint32_t v_blank_end;
++ uint32_t h_blank_start;
++ uint32_t h_blank_end;
++ struct optc *optc1 = DCN10TG_FROM_TG(optc);
++
++
++ REG_GET(OTG_CONTROL,
++ OTG_MASTER_EN, &otg_enabled);
++
++ if (otg_enabled == 0)
++ return false;
++
++ REG_GET_2(OTG_V_BLANK_START_END,
++ OTG_V_BLANK_START, &v_blank_start,
++ OTG_V_BLANK_END, &v_blank_end);
++
++ REG_GET_2(OTG_H_BLANK_START_END,
++ OTG_H_BLANK_START, &h_blank_start,
++ OTG_H_BLANK_END, &h_blank_end);
++
++ *otg_active_width = v_blank_start - v_blank_end;
++ *otg_active_height = h_blank_start - h_blank_end;
++ return true;
++}
++
+ void optc1_clear_optc_underflow(struct timing_generator *optc)
+ {
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+@@ -1305,6 +1336,7 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
+ .get_position = optc1_get_position,
+ .get_frame_count = optc1_get_vblank_counter,
+ .get_scanoutpos = optc1_get_crtc_scanoutpos,
++ .get_otg_active_size = optc1_get_otg_active_size,
+ .set_early_control = optc1_set_early_control,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .wait_for_state = optc1_wait_for_state,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+index c62052f..59ed272 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+@@ -507,4 +507,8 @@ bool optc1_is_optc_underflow_occurred(struct timing_generator *optc);
+
+ void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable);
+
++bool optc1_get_otg_active_size(struct timing_generator *optc,
++ uint32_t *otg_active_width,
++ uint32_t *otg_active_height);
++
+ #endif /* __DC_TIMING_GENERATOR_DCN10_H__ */
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+index 69cb0a1..af700c7 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+@@ -156,6 +156,9 @@ struct timing_generator_funcs {
+ uint32_t *v_blank_end,
+ uint32_t *h_position,
+ uint32_t *v_position);
++ bool (*get_otg_active_size)(struct timing_generator *optc,
++ uint32_t *otg_active_width,
++ uint32_t *otg_active_height);
+ void (*set_early_control)(struct timing_generator *tg,
+ uint32_t early_cntl);
+ void (*wait_for_state)(struct timing_generator *tg,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4534-drm-amd-display-replace-msleep-with-udelay-in-fbc-pa.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4534-drm-amd-display-replace-msleep-with-udelay-in-fbc-pa.patch
new file mode 100644
index 00000000..9253aa6e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4534-drm-amd-display-replace-msleep-with-udelay-in-fbc-pa.patch
@@ -0,0 +1,55 @@
+From 5bacec606d24bf99af3a58f4be396fe092e23308 Mon Sep 17 00:00:00 2001
+From: Roman Li <Roman.Li@amd.com>
+Date: Thu, 3 May 2018 13:29:42 -0400
+Subject: [PATCH 4534/5725] drm/amd/display: replace msleep with udelay in fbc
+ path
+
+FBC enabling and disabling path has msleep which leads to
+BUG hit when called in atomic context, hence this patch
+replaces msleeps with udelays appropriately.
+
+Signed-off-by: Shirish S <shirish.s@amd.com>
+Signed-off-by: Roman Li <Roman.Li@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+index 9150d26..e2994d3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+@@ -121,10 +121,10 @@ static void reset_lb_on_vblank(struct dc_context *ctx)
+ frame_count = dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT);
+
+
+- for (retry = 100; retry > 0; retry--) {
++ for (retry = 10000; retry > 0; retry--) {
+ if (frame_count != dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT))
+ break;
+- msleep(1);
++ udelay(10);
+ }
+ if (!retry)
+ dm_error("Frame count did not increase for 100ms.\n");
+@@ -147,14 +147,14 @@ static void wait_for_fbc_state_changed(
+ uint32_t addr = mmFBC_STATUS;
+ uint32_t value;
+
+- while (counter < 10) {
++ while (counter < 1000) {
+ value = dm_read_reg(cp110->base.ctx, addr);
+ if (get_reg_field_value(
+ value,
+ FBC_STATUS,
+ FBC_ENABLE_STATUS) == enabled)
+ break;
+- msleep(10);
++ udelay(100);
+ counter++;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4535-drm-amd-display-add-DPCD-read-for-Sink-ieee-OUI.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4535-drm-amd-display-add-DPCD-read-for-Sink-ieee-OUI.patch
new file mode 100644
index 00000000..cbf605a7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4535-drm-amd-display-add-DPCD-read-for-Sink-ieee-OUI.patch
@@ -0,0 +1,45 @@
+From 497ae2b9041949bbc5b6e239bbcabed6be93f6c3 Mon Sep 17 00:00:00 2001
+From: Anthony Koo <Anthony.Koo@amd.com>
+Date: Tue, 8 May 2018 17:08:57 -0400
+Subject: [PATCH 4535/5725] drm/amd/display: add DPCD read for Sink ieee OUI
+
+Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index 163c887..0acc14f 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -2280,6 +2280,7 @@ static bool retrieve_link_cap(struct dc_link *link)
+ {
+ uint8_t dpcd_data[DP_ADAPTER_CAP - DP_DPCD_REV + 1];
+
++ struct dp_device_vendor_id sink_id;
+ union down_stream_port_count down_strm_port_count;
+ union edp_configuration_cap edp_config_cap;
+ union dp_downstream_port_present ds_port = { 0 };
+@@ -2369,6 +2370,17 @@ static bool retrieve_link_cap(struct dc_link *link)
+ &link->dpcd_caps.sink_count.raw,
+ sizeof(link->dpcd_caps.sink_count.raw));
+
++ /* read sink ieee oui */
++ core_link_read_dpcd(link,
++ DP_SINK_OUI,
++ (uint8_t *)(&sink_id),
++ sizeof(sink_id));
++
++ link->dpcd_caps.sink_dev_id =
++ (sink_id.ieee_oui[0] << 16) +
++ (sink_id.ieee_oui[1] << 8) +
++ (sink_id.ieee_oui[2]);
++
+ /* Connectivity log: detection */
+ CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4536-drm-amd-display-add-config-for-sending-VSIF.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4536-drm-amd-display-add-config-for-sending-VSIF.patch
new file mode 100644
index 00000000..09409227
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4536-drm-amd-display-add-config-for-sending-VSIF.patch
@@ -0,0 +1,71 @@
+From 2f2068367c27c186c4bd2dd5ae6807022f32c7f5 Mon Sep 17 00:00:00 2001
+From: Anthony Koo <Anthony.Koo@amd.com>
+Date: Tue, 8 May 2018 17:09:49 -0400
+Subject: [PATCH 4536/5725] drm/amd/display: add config for sending VSIF
+
+Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1 +
+ drivers/gpu/drm/amd/display/modules/freesync/freesync.c | 3 ++-
+ drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h | 2 ++
+ 3 files changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 3bf451f..a0b16ac 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4763,6 +4763,7 @@ void set_freesync_on_stream(struct amdgpu_display_manager *dm,
+ aconnector->min_vfreq * 1000000;
+ config.max_refresh_in_uhz =
+ aconnector->max_vfreq * 1000000;
++ config.vsif_supported = true;
+ }
+
+ mod_freesync_build_vrr_params(dm->freesync_module,
+diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+index 769f467..e168890 100644
+--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
++++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+@@ -492,7 +492,7 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
+ /* Check if Freesync is supported. Return if false. If true,
+ * set the corresponding bit in the info packet
+ */
+- if (!vrr->supported)
++ if (!vrr->supported || !vrr->send_vsif)
+ return;
+
+ if (dc_is_hdmi_signal(stream->signal)) {
+@@ -634,6 +634,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
+ return;
+
+ in_out_vrr->state = in_config->state;
++ in_out_vrr->send_vsif = in_config->vsif_supported;
+
+ if (in_config->state == VRR_STATE_UNSUPPORTED) {
+ in_out_vrr->state = VRR_STATE_UNSUPPORTED;
+diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+index 85c98af..a0f32cd 100644
+--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
++++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+@@ -78,6 +78,7 @@ enum mod_vrr_state {
+
+ struct mod_freesync_config {
+ enum mod_vrr_state state;
++ bool vsif_supported;
+ bool ramping;
+ bool btr;
+ unsigned int min_refresh_in_uhz;
+@@ -103,6 +104,7 @@ struct mod_vrr_params_fixed_refresh {
+
+ struct mod_vrr_params {
+ bool supported;
++ bool send_vsif;
+ enum mod_vrr_state state;
+
+ uint32_t min_refresh_in_uhz;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4537-drm-amd-display-Fix-indentation-in-dcn10-resource-co.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4537-drm-amd-display-Fix-indentation-in-dcn10-resource-co.patch
new file mode 100644
index 00000000..f2b983d3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4537-drm-amd-display-Fix-indentation-in-dcn10-resource-co.patch
@@ -0,0 +1,30 @@
+From a9d5020a6579694bba7db5c283525fe9c7082118 Mon Sep 17 00:00:00 2001
+From: Hersen Wu <hersenxs.wu@amd.com>
+Date: Tue, 8 May 2018 16:35:09 -0400
+Subject: [PATCH 4537/5725] drm/amd/display: Fix indentation in dcn10 resource
+ constructor
+
+Signed-off-by: Hersen Wu <hersenxs.wu@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index df5cb2d..99c223b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -1004,7 +1004,8 @@ static bool construct(
+
+ ctx->dc_bios->regs = &bios_regs;
+
+- pool->base.res_cap = &res_cap;
++ pool->base.res_cap = &res_cap;
++
+ pool->base.funcs = &dcn10_res_pool_funcs;
+
+ /*
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4538-drm-amd-display-Read-DP_SINK_COUNT_ESI-range-on-HPD-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4538-drm-amd-display-Read-DP_SINK_COUNT_ESI-range-on-HPD-.patch
new file mode 100644
index 00000000..c8e1defc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4538-drm-amd-display-Read-DP_SINK_COUNT_ESI-range-on-HPD-.patch
@@ -0,0 +1,90 @@
+From 10b0f3a5fb46eb514cfe7391024fb01ff48c31d3 Mon Sep 17 00:00:00 2001
+From: Nikola Cornij <nikola.cornij@amd.com>
+Date: Wed, 9 May 2018 13:11:35 -0400
+Subject: [PATCH 4538/5725] drm/amd/display: Read DP_SINK_COUNT_ESI range on
+ HPD for DP 1.4
+
+DP 1.4 compliance now requires that registers at DP_SINK_COUNT_ESI range
+(0x2002-0x2003, 0x200c-0x200f) are read instead of DP_SINK_COUNT range
+(0x200-0x2005.
+
+Signed-off-by: Nikola Cornij <nikola.cornij@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 35 ++++++++++++++++++++----
+ include/drm/drm_dp_helper.h | 5 ++++
+ 2 files changed, 35 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index 0acc14f..9b66634 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -1630,17 +1630,42 @@ static enum dc_status read_hpd_rx_irq_data(
+ struct dc_link *link,
+ union hpd_irq_data *irq_data)
+ {
++ static enum dc_status retval;
++
+ /* The HW reads 16 bytes from 200h on HPD,
+ * but if we get an AUX_DEFER, the HW cannot retry
+ * and this causes the CTS tests 4.3.2.1 - 3.2.4 to
+ * fail, so we now explicitly read 6 bytes which is
+ * the req from the above mentioned test cases.
++ *
++ * For DP 1.4 we need to read those from 2002h range.
+ */
+- return core_link_read_dpcd(
+- link,
+- DP_SINK_COUNT,
+- irq_data->raw,
+- sizeof(union hpd_irq_data));
++ if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14)
++ retval = core_link_read_dpcd(
++ link,
++ DP_SINK_COUNT,
++ irq_data->raw,
++ sizeof(union hpd_irq_data));
++ else {
++ /* Read 2 bytes at this location,... */
++ retval = core_link_read_dpcd(
++ link,
++ DP_SINK_COUNT_ESI,
++ irq_data->raw,
++ 2);
++
++ if (retval != DC_OK)
++ return retval;
++
++ /* ... then read remaining 4 at the other location */
++ retval = core_link_read_dpcd(
++ link,
++ DP_LANE0_1_STATUS_ESI,
++ &irq_data->raw[2],
++ 4);
++ }
++
++ return retval;
+ }
+
+ static bool allow_hpd_rx_irq(const struct dc_link *link)
+diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
+index 9a84c32..fb0db0f 100644
+--- a/include/drm/drm_dp_helper.h
++++ b/include/drm/drm_dp_helper.h
+@@ -756,6 +756,11 @@
+ #define DP_RECEIVER_ALPM_STATUS 0x200b /* eDP 1.4 */
+ # define DP_ALPM_LOCK_TIMEOUT_ERROR (1 << 0)
+
++#define DP_LANE0_1_STATUS_ESI 0x200c /* status same as 0x202 */
++#define DP_LANE2_3_STATUS_ESI 0x200d /* status same as 0x203 */
++#define DP_LANE_ALIGN_STATUS_UPDATED_ESI 0x200e /* status same as 0x204 */
++#define DP_SINK_STATUS_ESI 0x200f /* status same as 0x205 */
++
+ #define DP_DP13_DPCD_REV 0x2200
+ #define DP_DP13_MAX_LINK_RATE 0x2201
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4539-drm-amd-display-Default-log-masks-should-include-all.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4539-drm-amd-display-Default-log-masks-should-include-all.patch
new file mode 100644
index 00000000..b9ea50aa
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4539-drm-amd-display-Default-log-masks-should-include-all.patch
@@ -0,0 +1,192 @@
+From 8f0df9f43346466cd4d8cfacd122a7d730b82996 Mon Sep 17 00:00:00 2001
+From: Aric Cyr <aric.cyr@amd.com>
+Date: Wed, 9 May 2018 14:36:50 -0400
+Subject: [PATCH 4539/5725] drm/amd/display: Default log masks should include
+ all connectivity events
+
+Signed-off-by: Aric Cyr <aric.cyr@amd.com>
+Reviewed-by: Jun Lei <Jun.Lei@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ .../gpu/drm/amd/display/dc/basics/log_helpers.c | 2 +
+ drivers/gpu/drm/amd/display/dc/basics/logger.c | 83 +++++++++++-----------
+ .../gpu/drm/amd/display/include/logger_interface.h | 2 +
+ 3 files changed, 46 insertions(+), 41 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
+index 0214515..f6c00a5 100644
+--- a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
++++ b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
+@@ -78,6 +78,8 @@ void dc_conn_log(struct dc_context *ctx,
+ if (i == NUM_ELEMENTS(signal_type_info_tbl))
+ goto fail;
+
++ dm_logger_append_heading(&entry);
++
+ dm_logger_append(&entry, "[%s][ConnIdx:%d] ",
+ signal_type_info_tbl[i].name,
+ link->link_index);
+diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.c b/drivers/gpu/drm/amd/display/dc/basics/logger.c
+index 738a818..733bc5b 100644
+--- a/drivers/gpu/drm/amd/display/dc/basics/logger.c
++++ b/drivers/gpu/drm/amd/display/dc/basics/logger.c
+@@ -32,8 +32,9 @@
+ static const struct dc_log_type_info log_type_info_tbl[] = {
+ {LOG_ERROR, "Error"},
+ {LOG_WARNING, "Warning"},
+- {LOG_DEBUG, "Debug"},
++ {LOG_DEBUG, "Debug"},
+ {LOG_DC, "DC_Interface"},
++ {LOG_DTN, "DTN"},
+ {LOG_SURFACE, "Surface"},
+ {LOG_HW_HOTPLUG, "HW_Hotplug"},
+ {LOG_HW_LINK_TRAINING, "HW_LKTN"},
+@@ -60,7 +61,7 @@ static const struct dc_log_type_info log_type_info_tbl[] = {
+ {LOG_EVENT_LINK_LOSS, "LinkLoss"},
+ {LOG_EVENT_UNDERFLOW, "Underflow"},
+ {LOG_IF_TRACE, "InterfaceTrace"},
+- {LOG_DTN, "DTN"},
++ {LOG_PERF_TRACE, "PerfTrace"},
+ {LOG_DISPLAYSTATS, "DisplayStats"}
+ };
+
+@@ -128,8 +129,45 @@ uint32_t dal_logger_destroy(struct dal_logger **logger)
+ }
+
+ /* ------------------------------------------------------------------------ */
++void dm_logger_append_heading(struct log_entry *entry)
++{
++ int j;
++
++ for (j = 0; j < NUM_ELEMENTS(log_type_info_tbl); j++) {
+
++ const struct dc_log_type_info *info = &log_type_info_tbl[j];
+
++ if (info->type == entry->type)
++ dm_logger_append(entry, "[%s]\t", info->name);
++ }
++}
++
++
++/* Print everything unread existing in log_buffer to debug console*/
++void dm_logger_flush_buffer(struct dal_logger *logger, bool should_warn)
++{
++ char *string_start = &logger->log_buffer[logger->buffer_read_offset];
++
++ if (should_warn)
++ dm_output_to_console(
++ "---------------- FLUSHING LOG BUFFER ----------------\n");
++ while (logger->buffer_read_offset < logger->buffer_write_offset) {
++
++ if (logger->log_buffer[logger->buffer_read_offset] == '\0') {
++ dm_output_to_console("%s", string_start);
++ string_start = logger->log_buffer + logger->buffer_read_offset + 1;
++ }
++ logger->buffer_read_offset++;
++ }
++ if (should_warn)
++ dm_output_to_console(
++ "-------------- END FLUSHING LOG BUFFER --------------\n\n");
++}
++/* ------------------------------------------------------------------------ */
++
++/* Warning: Be careful that 'msg' is null terminated and the total size is
++ * less than DAL_LOGGER_BUFFER_MAX_LOG_LINE_SIZE (256) including '\0'
++ */
+ static bool dal_logger_should_log(
+ struct dal_logger *logger,
+ enum dc_log_type log_type)
+@@ -159,26 +197,6 @@ static void log_to_debug_console(struct log_entry *entry)
+ }
+ }
+
+-/* Print everything unread existing in log_buffer to debug console*/
+-void dm_logger_flush_buffer(struct dal_logger *logger, bool should_warn)
+-{
+- char *string_start = &logger->log_buffer[logger->buffer_read_offset];
+-
+- if (should_warn)
+- dm_output_to_console(
+- "---------------- FLUSHING LOG BUFFER ----------------\n");
+- while (logger->buffer_read_offset < logger->buffer_write_offset) {
+-
+- if (logger->log_buffer[logger->buffer_read_offset] == '\0') {
+- dm_output_to_console("%s", string_start);
+- string_start = logger->log_buffer + logger->buffer_read_offset + 1;
+- }
+- logger->buffer_read_offset++;
+- }
+- if (should_warn)
+- dm_output_to_console(
+- "-------------- END FLUSHING LOG BUFFER --------------\n\n");
+-}
+
+ static void log_to_internal_buffer(struct log_entry *entry)
+ {
+@@ -229,19 +247,6 @@ static void log_to_internal_buffer(struct log_entry *entry)
+ }
+ }
+
+-static void log_heading(struct log_entry *entry)
+-{
+- int j;
+-
+- for (j = 0; j < NUM_ELEMENTS(log_type_info_tbl); j++) {
+-
+- const struct dc_log_type_info *info = &log_type_info_tbl[j];
+-
+- if (info->type == entry->type)
+- dm_logger_append(entry, "[%s]\t", info->name);
+- }
+-}
+-
+ static void append_entry(
+ struct log_entry *entry,
+ char *buffer,
+@@ -259,11 +264,7 @@ static void append_entry(
+ entry->buf_offset += buf_size;
+ }
+
+-/* ------------------------------------------------------------------------ */
+
+-/* Warning: Be careful that 'msg' is null terminated and the total size is
+- * less than DAL_LOGGER_BUFFER_MAX_LOG_LINE_SIZE (256) including '\0'
+- */
+ void dm_logger_write(
+ struct dal_logger *logger,
+ enum dc_log_type log_type,
+@@ -287,7 +288,7 @@ void dm_logger_write(
+
+ entry.type = log_type;
+
+- log_heading(&entry);
++ dm_logger_append_heading(&entry);
+
+ size = dm_log_to_buffer(
+ buffer, LOG_MAX_LINE_SIZE - 1, msg, args);
+@@ -372,7 +373,7 @@ void dm_logger_open(
+
+ logger->open_count++;
+
+- log_heading(entry);
++ dm_logger_append_heading(entry);
+ }
+
+ void dm_logger_close(struct log_entry *entry)
+diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h
+index dc98d6d..0f10ed7 100644
+--- a/drivers/gpu/drm/amd/display/include/logger_interface.h
++++ b/drivers/gpu/drm/amd/display/include/logger_interface.h
+@@ -62,6 +62,8 @@ void dm_logger_append_va(
+ const char *msg,
+ va_list args);
+
++void dm_logger_append_heading(struct log_entry *entry);
++
+ void dm_logger_open(
+ struct dal_logger *logger,
+ struct log_entry *entry,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4540-drm-amd-display-Optimize-DP_SINK_STATUS_ESI-range-re.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4540-drm-amd-display-Optimize-DP_SINK_STATUS_ESI-range-re.patch
new file mode 100644
index 00000000..2af3f786
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4540-drm-amd-display-Optimize-DP_SINK_STATUS_ESI-range-re.patch
@@ -0,0 +1,61 @@
+From 43aefd39dbea27c0f6e238ed519f90946fc1e78b Mon Sep 17 00:00:00 2001
+From: Nikola Cornij <nikola.cornij@amd.com>
+Date: Wed, 9 May 2018 17:07:36 -0400
+Subject: [PATCH 4540/5725] drm/amd/display: Optimize DP_SINK_STATUS_ESI range
+ read on HPD
+
+DP_SINK_STATUS_ESI range data is not continual, but rather than
+getting it in two AUX reads, it's quicker to read more bytes in a
+AUX read and then memcpy the required fields (it's only 8 more
+bytes to read).
+
+Signed-off-by: Nikola Cornij <nikola.cornij@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 22 +++++++++++++---------
+ 1 file changed, 13 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index 9b66634..72a8a55 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -1647,22 +1647,26 @@ static enum dc_status read_hpd_rx_irq_data(
+ irq_data->raw,
+ sizeof(union hpd_irq_data));
+ else {
+- /* Read 2 bytes at this location,... */
++ /* Read 14 bytes in a single read and then copy only the required fields.
++ * This is more efficient than doing it in two separate AUX reads. */
++
++ uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1];
++
+ retval = core_link_read_dpcd(
+ link,
+ DP_SINK_COUNT_ESI,
+- irq_data->raw,
+- 2);
++ tmp,
++ sizeof(tmp));
+
+ if (retval != DC_OK)
+ return retval;
+
+- /* ... then read remaining 4 at the other location */
+- retval = core_link_read_dpcd(
+- link,
+- DP_LANE0_1_STATUS_ESI,
+- &irq_data->raw[2],
+- 4);
++ irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI];
++ irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI];
++ irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI];
++ irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI];
++ irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI];
++ irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI];
+ }
+
+ return retval;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4541-drm-amd-display-Dynamic-HDR-metadata-mem-buffer.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4541-drm-amd-display-Dynamic-HDR-metadata-mem-buffer.patch
new file mode 100644
index 00000000..b718b09a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4541-drm-amd-display-Dynamic-HDR-metadata-mem-buffer.patch
@@ -0,0 +1,46 @@
+From a27b9e7b6de6661523d380138e3913c1fa28cc9a Mon Sep 17 00:00:00 2001
+From: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Date: Tue, 8 May 2018 16:03:58 -0400
+Subject: [PATCH 4541/5725] drm/amd/display: Dynamic HDR metadata mem buffer
+
+Basic framework:
+- caps for reporting dynamic HDR metadata support
+- allocation of frame buffer memory and storage
+
+Signed-off-by: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 1 +
+ drivers/gpu/drm/amd/display/dc/dc_stream.h | 2 ++
+ 2 files changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 81a576f..693d828 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -68,6 +68,7 @@ struct dc_caps {
+ uint32_t max_planes;
+ uint32_t max_downscale_ratio;
+ uint32_t i2c_speed_in_khz;
++ uint32_t dmdata_alloc_size;
+ unsigned int max_cursor_size;
+ unsigned int max_video_width;
+ int linear_pitch_alignment;
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+index aefc76b..9468c2f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+@@ -65,6 +65,8 @@ struct dc_stream_state {
+ struct audio_info audio_info;
+
+ struct dc_info_packet hdr_static_metadata;
++ PHYSICAL_ADDRESS_LOC dmdata_address;
++
+ struct dc_transfer_func *out_transfer_func;
+ struct colorspace_transform gamut_remap_matrix;
+ struct dc_csc_transform csc_color_matrix;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4542-drm-amd-display-Refactor-audio-programming.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4542-drm-amd-display-Refactor-audio-programming.patch
new file mode 100644
index 00000000..10f7bdcc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4542-drm-amd-display-Refactor-audio-programming.patch
@@ -0,0 +1,440 @@
+From 5a2e7ffaa966e64a7c1c07813010ac71b992ee86 Mon Sep 17 00:00:00 2001
+From: Anthony Koo <Anthony.Koo@amd.com>
+Date: Thu, 10 May 2018 14:21:47 -0400
+Subject: [PATCH 4542/5725] drm/amd/display: Refactor audio programming
+
+Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 24 +-
+ .../amd/display/dc/dce110/dce110_hw_sequencer.c | 256 ++++++++++++---------
+ .../amd/display/dc/dce110/dce110_hw_sequencer.h | 4 +
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 16 +-
+ drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 5 +
+ 5 files changed, 163 insertions(+), 142 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 2fa5218..a9485c1 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -1861,28 +1861,6 @@ static enum dc_status enable_link(
+ break;
+ }
+
+- if (pipe_ctx->stream_res.audio && status == DC_OK) {
+- struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+- /* notify audio driver for audio modes of monitor */
+- struct pp_smu_funcs_rv *pp_smu = core_dc->res_pool->pp_smu;
+- unsigned int i, num_audio = 1;
+- for (i = 0; i < MAX_PIPES; i++) {
+- /*current_state not updated yet*/
+- if (core_dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL)
+- num_audio++;
+- }
+-
+- pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
+-
+- if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
+- /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
+- pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
+- /* un-mute audio */
+- /* TODO: audio should be per stream rather than per link */
+- pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
+- pipe_ctx->stream_res.stream_enc, false);
+- }
+-
+ return status;
+ }
+
+@@ -2415,6 +2393,8 @@ void core_link_enable_stream(
+ }
+ }
+
++ core_dc->hwss.enable_audio_stream(pipe_ctx);
++
+ /* turn off otg test pattern if enable */
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 6b22a53..0544568 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -972,19 +972,35 @@ void hwss_edp_backlight_control(
+ edp_receiver_ready_T9(link);
+ }
+
+-void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
++void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
+ {
+- struct dc_stream_state *stream = pipe_ctx->stream;
+- struct dc_link *link = stream->sink->link;
+- struct dc *dc = pipe_ctx->stream->ctx->dc;
++ struct dc *core_dc = pipe_ctx->stream->ctx->dc;
++ /* notify audio driver for audio modes of monitor */
++ struct pp_smu_funcs_rv *pp_smu = core_dc->res_pool->pp_smu;
++ unsigned int i, num_audio = 1;
+
+- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+- pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
+- pipe_ctx->stream_res.stream_enc);
++ if (pipe_ctx->stream_res.audio) {
++ for (i = 0; i < MAX_PIPES; i++) {
++ /*current_state not updated yet*/
++ if (core_dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL)
++ num_audio++;
++ }
+
+- if (dc_is_dp_signal(pipe_ctx->stream->signal))
+- pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
+- pipe_ctx->stream_res.stream_enc);
++ pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
++
++ if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
++ /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
++ pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
++ /* un-mute audio */
++ /* TODO: audio should be per stream rather than per link */
++ pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
++ pipe_ctx->stream_res.stream_enc, false);
++ }
++}
++
++void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
++{
++ struct dc *dc = pipe_ctx->stream->ctx->dc;
+
+ pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
+ pipe_ctx->stream_res.stream_enc, true);
+@@ -1015,7 +1031,23 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
+ * stream->stream_engine_id);
+ */
+ }
++}
+
++void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
++{
++ struct dc_stream_state *stream = pipe_ctx->stream;
++ struct dc_link *link = stream->sink->link;
++ struct dc *dc = pipe_ctx->stream->ctx->dc;
++
++ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
++ pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
++ pipe_ctx->stream_res.stream_enc);
++
++ if (dc_is_dp_signal(pipe_ctx->stream->signal))
++ pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
++ pipe_ctx->stream_res.stream_enc);
++
++ dc->hwss.disable_audio_stream(pipe_ctx, option);
+
+ link->link_enc->funcs->connect_dig_be_to_fe(
+ link->link_enc,
+@@ -1308,6 +1340,30 @@ static enum dc_status apply_single_controller_ctx_to_hw(
+ struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx.
+ pipe_ctx[pipe_ctx->pipe_idx];
+
++ if (pipe_ctx->stream_res.audio != NULL) {
++ struct audio_output audio_output;
++
++ build_audio_output(context, pipe_ctx, &audio_output);
++
++ if (dc_is_dp_signal(pipe_ctx->stream->signal))
++ pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup(
++ pipe_ctx->stream_res.stream_enc,
++ pipe_ctx->stream_res.audio->inst,
++ &pipe_ctx->stream->audio_info);
++ else
++ pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup(
++ pipe_ctx->stream_res.stream_enc,
++ pipe_ctx->stream_res.audio->inst,
++ &pipe_ctx->stream->audio_info,
++ &audio_output.crtc_info);
++
++ pipe_ctx->stream_res.audio->funcs->az_configure(
++ pipe_ctx->stream_res.audio,
++ pipe_ctx->stream->signal,
++ &audio_output.crtc_info,
++ &pipe_ctx->stream->audio_info);
++ }
++
+ /* */
+ dc->hwss.enable_stream_timing(pipe_ctx, context, dc);
+
+@@ -1966,6 +2022,86 @@ static void dce110_reset_hw_ctx_wrap(
+ }
+ }
+
++static void dce110_setup_audio_dto(
++ struct dc *dc,
++ struct dc_state *context)
++{
++ int i;
++
++ /* program audio wall clock. use HDMI as clock source if HDMI
++ * audio active. Otherwise, use DP as clock source
++ * first, loop to find any HDMI audio, if not, loop find DP audio
++ */
++ /* Setup audio rate clock source */
++ /* Issue:
++ * Audio lag happened on DP monitor when unplug a HDMI monitor
++ *
++ * Cause:
++ * In case of DP and HDMI connected or HDMI only, DCCG_AUDIO_DTO_SEL
++ * is set to either dto0 or dto1, audio should work fine.
++ * In case of DP connected only, DCCG_AUDIO_DTO_SEL should be dto1,
++ * set to dto0 will cause audio lag.
++ *
++ * Solution:
++ * Not optimized audio wall dto setup. When mode set, iterate pipe_ctx,
++ * find first available pipe with audio, setup audio wall DTO per topology
++ * instead of per pipe.
++ */
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
++
++ if (pipe_ctx->stream == NULL)
++ continue;
++
++ if (pipe_ctx->top_pipe)
++ continue;
++
++ if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A)
++ continue;
++
++ if (pipe_ctx->stream_res.audio != NULL) {
++ struct audio_output audio_output;
++
++ build_audio_output(context, pipe_ctx, &audio_output);
++
++ pipe_ctx->stream_res.audio->funcs->wall_dto_setup(
++ pipe_ctx->stream_res.audio,
++ pipe_ctx->stream->signal,
++ &audio_output.crtc_info,
++ &audio_output.pll_info);
++ break;
++ }
++ }
++
++ /* no HDMI audio is found, try DP audio */
++ if (i == dc->res_pool->pipe_count) {
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
++
++ if (pipe_ctx->stream == NULL)
++ continue;
++
++ if (pipe_ctx->top_pipe)
++ continue;
++
++ if (!dc_is_dp_signal(pipe_ctx->stream->signal))
++ continue;
++
++ if (pipe_ctx->stream_res.audio != NULL) {
++ struct audio_output audio_output;
++
++ build_audio_output(context, pipe_ctx, &audio_output);
++
++ pipe_ctx->stream_res.audio->funcs->wall_dto_setup(
++ pipe_ctx->stream_res.audio,
++ pipe_ctx->stream->signal,
++ &audio_output.crtc_info,
++ &audio_output.pll_info);
++ break;
++ }
++ }
++ }
++}
+
+ enum dc_status dce110_apply_ctx_to_hw(
+ struct dc *dc,
+@@ -2057,79 +2193,8 @@ enum dc_status dce110_apply_ctx_to_hw(
+ dc->res_pool->display_clock,
+ context->bw.dce.dispclk_khz * 115 / 100);
+ }
+- /* program audio wall clock. use HDMI as clock source if HDMI
+- * audio active. Otherwise, use DP as clock source
+- * first, loop to find any HDMI audio, if not, loop find DP audio
+- */
+- /* Setup audio rate clock source */
+- /* Issue:
+- * Audio lag happened on DP monitor when unplug a HDMI monitor
+- *
+- * Cause:
+- * In case of DP and HDMI connected or HDMI only, DCCG_AUDIO_DTO_SEL
+- * is set to either dto0 or dto1, audio should work fine.
+- * In case of DP connected only, DCCG_AUDIO_DTO_SEL should be dto1,
+- * set to dto0 will cause audio lag.
+- *
+- * Solution:
+- * Not optimized audio wall dto setup. When mode set, iterate pipe_ctx,
+- * find first available pipe with audio, setup audio wall DTO per topology
+- * instead of per pipe.
+- */
+- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+-
+- if (pipe_ctx->stream == NULL)
+- continue;
+-
+- if (pipe_ctx->top_pipe)
+- continue;
+-
+- if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A)
+- continue;
+-
+- if (pipe_ctx->stream_res.audio != NULL) {
+- struct audio_output audio_output;
+-
+- build_audio_output(context, pipe_ctx, &audio_output);
+-
+- pipe_ctx->stream_res.audio->funcs->wall_dto_setup(
+- pipe_ctx->stream_res.audio,
+- pipe_ctx->stream->signal,
+- &audio_output.crtc_info,
+- &audio_output.pll_info);
+- break;
+- }
+- }
+-
+- /* no HDMI audio is found, try DP audio */
+- if (i == dc->res_pool->pipe_count) {
+- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+-
+- if (pipe_ctx->stream == NULL)
+- continue;
+-
+- if (pipe_ctx->top_pipe)
+- continue;
+-
+- if (!dc_is_dp_signal(pipe_ctx->stream->signal))
+- continue;
+-
+- if (pipe_ctx->stream_res.audio != NULL) {
+- struct audio_output audio_output;
+-
+- build_audio_output(context, pipe_ctx, &audio_output);
+
+- pipe_ctx->stream_res.audio->funcs->wall_dto_setup(
+- pipe_ctx->stream_res.audio,
+- pipe_ctx->stream->signal,
+- &audio_output.crtc_info,
+- &audio_output.pll_info);
+- break;
+- }
+- }
+- }
++ dce110_setup_audio_dto(dc, context);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx_old =
+@@ -2148,31 +2213,6 @@ enum dc_status dce110_apply_ctx_to_hw(
+ if (pipe_ctx->top_pipe)
+ continue;
+
+- if (context->res_ctx.pipe_ctx[i].stream_res.audio != NULL) {
+-
+- struct audio_output audio_output;
+-
+- build_audio_output(context, pipe_ctx, &audio_output);
+-
+- if (dc_is_dp_signal(pipe_ctx->stream->signal))
+- pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup(
+- pipe_ctx->stream_res.stream_enc,
+- pipe_ctx->stream_res.audio->inst,
+- &pipe_ctx->stream->audio_info);
+- else
+- pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup(
+- pipe_ctx->stream_res.stream_enc,
+- pipe_ctx->stream_res.audio->inst,
+- &pipe_ctx->stream->audio_info,
+- &audio_output.crtc_info);
+-
+- pipe_ctx->stream_res.audio->funcs->az_configure(
+- pipe_ctx->stream_res.audio,
+- pipe_ctx->stream->signal,
+- &audio_output.crtc_info,
+- &pipe_ctx->stream->audio_info);
+- }
+-
+ status = apply_single_controller_ctx_to_hw(
+ pipe_ctx,
+ context,
+@@ -2985,6 +3025,8 @@ static const struct hw_sequencer_funcs dce110_funcs = {
+ .disable_stream = dce110_disable_stream,
+ .unblank_stream = dce110_unblank_stream,
+ .blank_stream = dce110_blank_stream,
++ .enable_audio_stream = dce110_enable_audio_stream,
++ .disable_audio_stream = dce110_disable_audio_stream,
+ .enable_display_pipe_clock_gating = enable_display_pipe_clock_gating,
+ .enable_display_power_gating = dce110_enable_display_power_gating,
+ .disable_plane = dce110_power_down_fe,
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+index 5d7e9f5..f48d5a6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+@@ -49,6 +49,10 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
+ struct dc_link_settings *link_settings);
+
+ void dce110_blank_stream(struct pipe_ctx *pipe_ctx);
++
++void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx);
++void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option);
++
+ void dce110_update_info_frame(struct pipe_ctx *pipe_ctx);
+
+ void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index b7ff538..72d0b6f6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -734,19 +734,7 @@ static void reset_back_end_for_pipe(
+ if (!pipe_ctx->stream->dpms_off)
+ core_link_disable_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
+ else if (pipe_ctx->stream_res.audio) {
+- /*
+- * if stream is already disabled outside of commit streams path,
+- * audio disable was skipped. Need to do it here
+- */
+- pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
+-
+- if (dc->caps.dynamic_audio == true) {
+- /*we have to dynamic arbitrate the audio endpoints*/
+- pipe_ctx->stream_res.audio = NULL;
+- /*we free the resource, need reset is_audio_acquired*/
+- update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
+- }
+-
++ dc->hwss.disable_audio_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
+ }
+
+ }
+@@ -2801,6 +2789,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
+ .disable_stream = dce110_disable_stream,
+ .unblank_stream = dce110_unblank_stream,
+ .blank_stream = dce110_blank_stream,
++ .enable_audio_stream = dce110_enable_audio_stream,
++ .disable_audio_stream = dce110_disable_audio_stream,
+ .enable_display_power_gating = dcn10_dummy_display_power_gating,
+ .disable_plane = dcn10_disable_plane,
+ .blank_pixel_data = dcn10_blank_pixel_data,
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+index 52db80f..a71770e 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+@@ -154,6 +154,11 @@ struct hw_sequencer_funcs {
+ struct dc_link_settings *link_settings);
+
+ void (*blank_stream)(struct pipe_ctx *pipe_ctx);
++
++ void (*enable_audio_stream)(struct pipe_ctx *pipe_ctx);
++
++ void (*disable_audio_stream)(struct pipe_ctx *pipe_ctx, int option);
++
+ void (*pipe_control_lock)(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4543-drm-amd-display-HLG-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4543-drm-amd-display-HLG-support.patch
new file mode 100644
index 00000000..8b681f68
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4543-drm-amd-display-HLG-support.patch
@@ -0,0 +1,200 @@
+From b85078591bed8f2359bf04c7f77818d8877387c5 Mon Sep 17 00:00:00 2001
+From: Vitaly Prosyak <vitaly.prosyak@amd.com>
+Date: Thu, 10 May 2018 12:37:35 -0500
+Subject: [PATCH 4543/5725] drm/amd/display: HLG support
+
+Low level calculation methods.
+
+Signed-off-by: Vitaly Prosyak <vitaly.prosyak@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +
+ .../drm/amd/display/modules/color/color_gamma.c | 137 +++++++++++++++++++++
+ 2 files changed, 139 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 693d828..cf076a6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -378,6 +378,8 @@ enum dc_transfer_func_predefined {
+ TRANSFER_FUNCTION_PQ,
+ TRANSFER_FUNCTION_LINEAR,
+ TRANSFER_FUNCTION_UNITY,
++ TRANSFER_FUNCTION_HLG,
++ TRANSFER_FUNCTION_HLG12
+ };
+
+ struct dc_transfer_func {
+diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+index e803b37..fa9a199 100644
+--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+@@ -131,6 +131,63 @@ static void compute_de_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y)
+ dc_fixpt_div(dc_fixpt_one, m1));
+
+ }
++
++/*de gamma, none linear to linear*/
++static void compute_hlg_oetf(struct fixed31_32 in_x, bool is_light0_12, struct fixed31_32 *out_y)
++{
++ struct fixed31_32 a;
++ struct fixed31_32 b;
++ struct fixed31_32 c;
++ struct fixed31_32 threshold;
++ struct fixed31_32 reference_white_level;
++
++ a = dc_fixpt_from_fraction(17883277, 100000000);
++ if (is_light0_12) {
++ /*light 0-12*/
++ b = dc_fixpt_from_fraction(28466892, 100000000);
++ c = dc_fixpt_from_fraction(55991073, 100000000);
++ threshold = dc_fixpt_one;
++ reference_white_level = dc_fixpt_half;
++ } else {
++ /*light 0-1*/
++ b = dc_fixpt_from_fraction(2372241, 100000000);
++ c = dc_fixpt_add(dc_fixpt_one, dc_fixpt_from_fraction(429347, 100000000));
++ threshold = dc_fixpt_from_fraction(1, 12);
++ reference_white_level = dc_fixpt_pow(dc_fixpt_from_fraction(3, 1), dc_fixpt_half);
++ }
++ if (dc_fixpt_lt(threshold, in_x))
++ *out_y = dc_fixpt_add(c, dc_fixpt_mul(a, dc_fixpt_log(dc_fixpt_sub(in_x, b))));
++ else
++ *out_y = dc_fixpt_mul(dc_fixpt_pow(in_x, dc_fixpt_half), reference_white_level);
++}
++
++/*re gamma, linear to none linear*/
++static void compute_hlg_eotf(struct fixed31_32 in_x, bool is_light0_12, struct fixed31_32 *out_y)
++{
++ struct fixed31_32 a;
++ struct fixed31_32 b;
++ struct fixed31_32 c;
++ struct fixed31_32 reference_white_level;
++
++ a = dc_fixpt_from_fraction(17883277, 100000000);
++ if (is_light0_12) {
++ /*light 0-12*/
++ b = dc_fixpt_from_fraction(28466892, 100000000);
++ c = dc_fixpt_from_fraction(55991073, 100000000);
++ reference_white_level = dc_fixpt_from_fraction(4, 1);
++ } else {
++ /*light 0-1*/
++ b = dc_fixpt_from_fraction(2372241, 100000000);
++ c = dc_fixpt_add(dc_fixpt_one, dc_fixpt_from_fraction(429347, 100000000));
++ reference_white_level = dc_fixpt_from_fraction(1, 3);
++ }
++ if (dc_fixpt_lt(dc_fixpt_half, in_x))
++ *out_y = dc_fixpt_add(dc_fixpt_exp(dc_fixpt_div(dc_fixpt_sub(in_x, c), a)), b);
++ else
++ *out_y = dc_fixpt_mul(dc_fixpt_pow(in_x, dc_fixpt_from_fraction(2, 1)), reference_white_level);
++}
++
++
+ /* one-time pre-compute PQ values - only for sdr_white_level 80 */
+ void precompute_pq(void)
+ {
+@@ -691,6 +748,48 @@ static void build_degamma(struct pwl_float_data_ex *curve,
+ }
+ }
+
++static void build_hlg_degamma(struct pwl_float_data_ex *degamma,
++ uint32_t hw_points_num,
++ const struct hw_x_point *coordinate_x, bool is_light0_12)
++{
++ uint32_t i;
++
++ struct pwl_float_data_ex *rgb = degamma;
++ const struct hw_x_point *coord_x = coordinate_x;
++
++ i = 0;
++
++ while (i != hw_points_num + 1) {
++ compute_hlg_oetf(coord_x->x, is_light0_12, &rgb->r);
++ rgb->g = rgb->r;
++ rgb->b = rgb->r;
++ ++coord_x;
++ ++rgb;
++ ++i;
++ }
++}
++
++static void build_hlg_regamma(struct pwl_float_data_ex *regamma,
++ uint32_t hw_points_num,
++ const struct hw_x_point *coordinate_x, bool is_light0_12)
++{
++ uint32_t i;
++
++ struct pwl_float_data_ex *rgb = regamma;
++ const struct hw_x_point *coord_x = coordinate_x;
++
++ i = 0;
++
++ while (i != hw_points_num + 1) {
++ compute_hlg_eotf(coord_x->x, is_light0_12, &rgb->r);
++ rgb->g = rgb->r;
++ rgb->b = rgb->r;
++ ++coord_x;
++ ++rgb;
++ ++i;
++ }
++}
++
+ static void scale_gamma(struct pwl_float_data *pwl_rgb,
+ const struct dc_gamma *ramp,
+ struct dividers dividers)
+@@ -1615,6 +1714,25 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
+ ret = true;
+
+ kvfree(rgb_regamma);
++ } else if (trans == TRANSFER_FUNCTION_HLG ||
++ trans == TRANSFER_FUNCTION_HLG12) {
++ rgb_regamma = kvzalloc(sizeof(*rgb_regamma) *
++ (MAX_HW_POINTS + _EXTRA_POINTS),
++ GFP_KERNEL);
++ if (!rgb_regamma)
++ goto rgb_regamma_alloc_fail;
++
++ build_hlg_regamma(rgb_regamma,
++ MAX_HW_POINTS,
++ coordinates_x,
++ trans == TRANSFER_FUNCTION_HLG12 ? true:false);
++ for (i = 0; i <= MAX_HW_POINTS ; i++) {
++ points->red[i] = rgb_regamma[i].r;
++ points->green[i] = rgb_regamma[i].g;
++ points->blue[i] = rgb_regamma[i].b;
++ }
++ ret = true;
++ kvfree(rgb_regamma);
+ }
+ rgb_regamma_alloc_fail:
+ return ret;
+@@ -1675,6 +1793,25 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
+ ret = true;
+
+ kvfree(rgb_degamma);
++ } else if (trans == TRANSFER_FUNCTION_HLG ||
++ trans == TRANSFER_FUNCTION_HLG12) {
++ rgb_degamma = kvzalloc(sizeof(*rgb_degamma) *
++ (MAX_HW_POINTS + _EXTRA_POINTS),
++ GFP_KERNEL);
++ if (!rgb_degamma)
++ goto rgb_degamma_alloc_fail;
++
++ build_hlg_degamma(rgb_degamma,
++ MAX_HW_POINTS,
++ coordinates_x,
++ trans == TRANSFER_FUNCTION_HLG12 ? true:false);
++ for (i = 0; i <= MAX_HW_POINTS ; i++) {
++ points->red[i] = rgb_degamma[i].r;
++ points->green[i] = rgb_degamma[i].g;
++ points->blue[i] = rgb_degamma[i].b;
++ }
++ ret = true;
++ kvfree(rgb_degamma);
+ }
+ points->end_exponent = 0;
+ points->x_point_at_y1_red = 1;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4544-drm-amd-display-DP-component-depth-16-bpc.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4544-drm-amd-display-DP-component-depth-16-bpc.patch
new file mode 100644
index 00000000..28f14651
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4544-drm-amd-display-DP-component-depth-16-bpc.patch
@@ -0,0 +1,33 @@
+From ae657b4d551bf3ba5d2a2b5f32641740cd33102d Mon Sep 17 00:00:00 2001
+From: Eric Bernstein <eric.bernstein@amd.com>
+Date: Thu, 10 May 2018 15:12:09 -0400
+Subject: [PATCH 4544/5725] drm/amd/display: DP component depth 16 bpc
+
+Add register programming to support 16bpc component
+depth for DP.
+
+Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+index 653b7b2..c928ee4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+@@ -319,6 +319,10 @@ void enc1_stream_encoder_dp_set_stream_attribute(
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+ DP_COMPONENT_PIXEL_DEPTH_12BPC);
+ break;
++ case COLOR_DEPTH_161616:
++ REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
++ DP_COMPONENT_PIXEL_DEPTH_16BPC);
++ break;
+ default:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+ DP_COMPONENT_PIXEL_DEPTH_6BPC);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4545-drm-amd-display-Added-documentation-for-some-DC-inte.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4545-drm-amd-display-Added-documentation-for-some-DC-inte.patch
new file mode 100644
index 00000000..907b773f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4545-drm-amd-display-Added-documentation-for-some-DC-inte.patch
@@ -0,0 +1,94 @@
+From c430f842b39dc315bc60f2e0e8b4ef7b1ed22270 Mon Sep 17 00:00:00 2001
+From: Yasir Al Shekerchi <YasirAl.Shekerchi@amd.com>
+Date: Fri, 4 May 2018 16:53:03 -0400
+Subject: [PATCH 4545/5725] drm/amd/display: Added documentation for some DC
+ interface functions
+
+Signed-off-by: Yasir Al Shekerchi <YasirAl.Shekerchi@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 16 ++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 16 ++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/core/dc_surface.c | 11 +++++++++++
+ 3 files changed, 43 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 898c801..3243d65 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -169,6 +169,22 @@ static bool create_links(
+ return false;
+ }
+
++/**
++ *****************************************************************************
++ * Function: dc_stream_adjust_vmin_vmax
++ *
++ * @brief
++ * Looks up the pipe context of dc_stream_state and updates the
++ * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
++ * Rate, which is a power-saving feature that targets reducing panel
++ * refresh rate while the screen is static
++ *
++ * @param [in] dc: dc reference
++ * @param [in] stream: Initial dc stream state
++ * @param [in] adjust: Updated parameters for vertical_total_min and
++ * vertical_total_max
++ *****************************************************************************
++ */
+ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_crtc_timing_adjust *adjust)
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index a9485c1..08b7ee5 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -2433,6 +2433,22 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
+ core_dc->hwss.set_avmute(pipe_ctx, enable);
+ }
+
++/**
++ *****************************************************************************
++ * Function: dc_link_enable_hpd_filter
++ *
++ * @brief
++ * If enable is true, programs HPD filter on associated HPD line using
++ * delay_on_disconnect/delay_on_connect values dependent on
++ * link->connector_signal
++ *
++ * If enable is false, programs HPD filter on associated HPD line with no
++ * delays on connect or disconnect
++ *
++ * @param [in] link: pointer to the dc link
++ * @param [in] enable: boolean specifying whether to enable hbd
++ *****************************************************************************
++ */
+ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
+ {
+ struct gpio *hpd;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+index 68a71ad..815dfb5 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+@@ -84,6 +84,17 @@ struct dc_plane_state *dc_create_plane_state(struct dc *dc)
+ return plane_state;
+ }
+
++/**
++ *****************************************************************************
++ * Function: dc_plane_get_status
++ *
++ * @brief
++ * Looks up the pipe context of plane_state and updates the pending status
++ * of the pipe context. Then returns plane_state->status
++ *
++ * @param [in] plane_state: pointer to the plane_state to get the status of
++ *****************************************************************************
++ */
+ const struct dc_plane_status *dc_plane_get_status(
+ const struct dc_plane_state *plane_state)
+ {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4546-drm-amd-display-dal-3.1.46.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4546-drm-amd-display-dal-3.1.46.patch
new file mode 100644
index 00000000..9de8236a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4546-drm-amd-display-dal-3.1.46.patch
@@ -0,0 +1,28 @@
+From f7c6e156a9ee6b8269c2ed7ecf78137f8487eaed Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Tue, 8 May 2018 12:24:40 -0400
+Subject: [PATCH 4546/5725] drm/amd/display: dal 3.1.46
+
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index cf076a6..3981837 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -38,7 +38,7 @@
+ #include "inc/compressor.h"
+ #include "dml/display_mode_lib.h"
+
+-#define DC_VER "3.1.45"
++#define DC_VER "3.1.46"
+
+ #define MAX_SURFACES 3
+ #define MAX_STREAMS 6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4547-drm-amd-display-Set-TMZ-and-DCC-for-secondary-surfac.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4547-drm-amd-display-Set-TMZ-and-DCC-for-secondary-surfac.patch
new file mode 100644
index 00000000..f2d34c6d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4547-drm-amd-display-Set-TMZ-and-DCC-for-secondary-surfac.patch
@@ -0,0 +1,82 @@
+From 8998476887fd818786a42868fafa3300e8fbe0ba Mon Sep 17 00:00:00 2001
+From: Eric Bernstein <eric.bernstein@amd.com>
+Date: Mon, 14 May 2018 16:55:07 -0400
+Subject: [PATCH 4547/5725] drm/amd/display: Set TMZ and DCC for secondary
+ surface
+
+Add register programming to support TMZ and DCC on
+secondary surfaces.
+
+Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 14 ++++++++++----
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 8 ++++++++
+ 2 files changed, 18 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+index d2ab78b..c28085b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+@@ -396,11 +396,15 @@ bool hubp1_program_surface_flip_and_addr(
+ if (address->grph_stereo.right_addr.quad_part == 0)
+ break;
+
+- REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
++ REG_UPDATE_8(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, address->tmz_surface,
+ PRIMARY_SURFACE_TMZ_C, address->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ, address->tmz_surface,
+- PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface);
++ PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface,
++ SECONDARY_SURFACE_TMZ, address->tmz_surface,
++ SECONDARY_SURFACE_TMZ_C, address->tmz_surface,
++ SECONDARY_META_SURFACE_TMZ, address->tmz_surface,
++ SECONDARY_META_SURFACE_TMZ_C, address->tmz_surface);
+
+ if (address->grph_stereo.right_meta_addr.quad_part != 0) {
+
+@@ -459,9 +463,11 @@ void hubp1_dcc_control(struct hubp *hubp, bool enable,
+ uint32_t dcc_ind_64b_blk = independent_64b_blks ? 1 : 0;
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+- REG_UPDATE_2(DCSURF_SURFACE_CONTROL,
++ REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, dcc_en,
+- PRIMARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk);
++ PRIMARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk,
++ SECONDARY_SURFACE_DCC_EN, dcc_en,
++ SECONDARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk);
+ }
+
+ void hubp1_program_surface_config(
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+index af38403..d901d50 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+@@ -312,6 +312,12 @@
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_META_SURFACE_TMZ_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_EN, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_IND_64B_BLK, mask_sh),\
++ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_TMZ, mask_sh),\
++ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_TMZ_C, mask_sh),\
++ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_META_SURFACE_TMZ, mask_sh),\
++ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_META_SURFACE_TMZ_C, mask_sh),\
++ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_EN, mask_sh),\
++ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_64B_BLK, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\
+@@ -489,6 +495,8 @@
+ type SECONDARY_META_SURFACE_TMZ_C;\
+ type PRIMARY_SURFACE_DCC_EN;\
+ type PRIMARY_SURFACE_DCC_IND_64B_BLK;\
++ type SECONDARY_SURFACE_DCC_EN;\
++ type SECONDARY_SURFACE_DCC_IND_64B_BLK;\
+ type DET_BUF_PLANE1_BASE_ADDRESS;\
+ type CROSSBAR_SRC_CB_B;\
+ type CROSSBAR_SRC_CR_R;\
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4548-drm-amd-display-Destroy-connector-state-on-reset.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4548-drm-amd-display-Destroy-connector-state-on-reset.patch
new file mode 100644
index 00000000..9424fe96
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4548-drm-amd-display-Destroy-connector-state-on-reset.patch
@@ -0,0 +1,43 @@
+From f6fc73f0a096c34b6043723e5b48657d04f2da9f Mon Sep 17 00:00:00 2001
+From: "Leo (Sunpeng) Li" <sunpeng.li@amd.com>
+Date: Wed, 16 May 2018 10:31:30 -0400
+Subject: [PATCH 4548/5725] drm/amd/display: Destroy connector state on reset
+
+When a DRM mode reset is called on resume, the connector state's
+destructor is not called. This leaves a dangling reference on the CRTC
+commit object, which was obtained by the connector state during commit
+setup.
+
+Signed-off-by: Leo (Sunpeng) Li <sunpeng.li@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index a0b16ac..0e24164 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2886,6 +2886,9 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
+ struct dm_connector_state *state =
+ to_dm_connector_state(connector->state);
+
++ if (connector->state)
++ __drm_atomic_helper_connector_destroy_state(connector->state);
++
+ kfree(state);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+@@ -2896,8 +2899,7 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
+ state->underscan_hborder = 0;
+ state->underscan_vborder = 0;
+
+- connector->state = &state->base;
+- connector->state->connector = connector;
++ __drm_atomic_helper_connector_reset(connector, &state->base);
+ }
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4549-drm-amd-display-Prefix-TIMING_STANDARD-entries-with-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4549-drm-amd-display-Prefix-TIMING_STANDARD-entries-with-.patch
new file mode 100644
index 00000000..48485868
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4549-drm-amd-display-Prefix-TIMING_STANDARD-entries-with-.patch
@@ -0,0 +1,154 @@
+From 2420d4df16aff8a37161333ef6be62091529fb11 Mon Sep 17 00:00:00 2001
+From: Reza Amini <Reza.Amini@amd.com>
+Date: Wed, 9 May 2018 15:41:47 -0400
+Subject: [PATCH 4549/5725] drm/amd/display: Prefix TIMING_STANDARD entries
+ with DC_
+
+Signed-off-by: Reza Amini <Reza.Amini@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/dc_ddc_types.h | 2 +-
+ drivers/gpu/drm/amd/display/dc/dc_hw_types.h | 34 ++++++++++++------------
+ drivers/gpu/drm/amd/display/dc/dc_types.h | 18 ++++++-------
+ drivers/gpu/drm/amd/display/include/fixed31_32.h | 7 +++++
+ 5 files changed, 35 insertions(+), 28 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 3243d65..16e8f37 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1629,7 +1629,7 @@ struct dc_sink *dc_link_add_remote_sink(
+ struct dc_sink *dc_sink;
+ enum dc_edid_status edid_status;
+
+- if (len > MAX_EDID_BUFFER_SIZE) {
++ if (len > DC_MAX_EDID_BUFFER_SIZE) {
+ dm_error("Max EDID buffer size breached!\n");
+ return NULL;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
+index e1affeb..ee04812 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
+@@ -109,7 +109,7 @@ struct ddc_service {
+
+ uint32_t address;
+ uint32_t edid_buf_len;
+- uint8_t edid_buf[MAX_EDID_BUFFER_SIZE];
++ uint8_t edid_buf[DC_MAX_EDID_BUFFER_SIZE];
+ };
+
+ #endif /* DC_DDC_TYPES_H_ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+index 4a05df3..7e5a41f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+@@ -567,25 +567,25 @@ struct scaling_taps {
+ };
+
+ enum dc_timing_standard {
+- TIMING_STANDARD_UNDEFINED,
+- TIMING_STANDARD_DMT,
+- TIMING_STANDARD_GTF,
+- TIMING_STANDARD_CVT,
+- TIMING_STANDARD_CVT_RB,
+- TIMING_STANDARD_CEA770,
+- TIMING_STANDARD_CEA861,
+- TIMING_STANDARD_HDMI,
+- TIMING_STANDARD_TV_NTSC,
+- TIMING_STANDARD_TV_NTSC_J,
+- TIMING_STANDARD_TV_PAL,
+- TIMING_STANDARD_TV_PAL_M,
+- TIMING_STANDARD_TV_PAL_CN,
+- TIMING_STANDARD_TV_SECAM,
+- TIMING_STANDARD_EXPLICIT,
++ DC_TIMING_STANDARD_UNDEFINED,
++ DC_TIMING_STANDARD_DMT,
++ DC_TIMING_STANDARD_GTF,
++ DC_TIMING_STANDARD_CVT,
++ DC_TIMING_STANDARD_CVT_RB,
++ DC_TIMING_STANDARD_CEA770,
++ DC_TIMING_STANDARD_CEA861,
++ DC_TIMING_STANDARD_HDMI,
++ DC_TIMING_STANDARD_TV_NTSC,
++ DC_TIMING_STANDARD_TV_NTSC_J,
++ DC_TIMING_STANDARD_TV_PAL,
++ DC_TIMING_STANDARD_TV_PAL_M,
++ DC_TIMING_STANDARD_TV_PAL_CN,
++ DC_TIMING_STANDARD_TV_SECAM,
++ DC_TIMING_STANDARD_EXPLICIT,
+ /*!< For explicit timings from EDID, VBIOS, etc.*/
+- TIMING_STANDARD_USER_OVERRIDE,
++ DC_TIMING_STANDARD_USER_OVERRIDE,
+ /*!< For mode timing override by user*/
+- TIMING_STANDARD_MAX
++ DC_TIMING_STANDARD_MAX
+ };
+
+ enum dc_color_depth {
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
+index f530871..f463d3a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
+@@ -98,7 +98,7 @@ struct dc_context {
+ };
+
+
+-#define MAX_EDID_BUFFER_SIZE 512
++#define DC_MAX_EDID_BUFFER_SIZE 512
+ #define EDID_BLOCK_SIZE 128
+ #define MAX_SURFACE_NUM 4
+ #define NUM_PIXEL_FORMATS 10
+@@ -137,13 +137,13 @@ enum plane_stereo_format {
+ */
+
+ enum dc_edid_connector_type {
+- EDID_CONNECTOR_UNKNOWN = 0,
+- EDID_CONNECTOR_ANALOG = 1,
+- EDID_CONNECTOR_DIGITAL = 10,
+- EDID_CONNECTOR_DVI = 11,
+- EDID_CONNECTOR_HDMIA = 12,
+- EDID_CONNECTOR_MDDI = 14,
+- EDID_CONNECTOR_DISPLAYPORT = 15
++ DC_EDID_CONNECTOR_UNKNOWN = 0,
++ DC_EDID_CONNECTOR_ANALOG = 1,
++ DC_EDID_CONNECTOR_DIGITAL = 10,
++ DC_EDID_CONNECTOR_DVI = 11,
++ DC_EDID_CONNECTOR_HDMIA = 12,
++ DC_EDID_CONNECTOR_MDDI = 14,
++ DC_EDID_CONNECTOR_DISPLAYPORT = 15
+ };
+
+ enum dc_edid_status {
+@@ -169,7 +169,7 @@ struct dc_cea_audio_mode {
+
+ struct dc_edid {
+ uint32_t length;
+- uint8_t raw_edid[MAX_EDID_BUFFER_SIZE];
++ uint8_t raw_edid[DC_MAX_EDID_BUFFER_SIZE];
+ };
+
+ /* When speaker location data block is not available, DEFAULT_SPEAKER_LOCATION
+diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
+index bb0d4eb..2b1b29f 100644
+--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
++++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
+@@ -26,6 +26,13 @@
+ #ifndef __DAL_FIXED31_32_H__
+ #define __DAL_FIXED31_32_H__
+
++#ifndef LLONG_MAX
++#define LLONG_MAX 9223372036854775807ll
++#endif
++#ifndef LLONG_MIN
++#define LLONG_MIN (-LLONG_MAX - 1ll)
++#endif
++
+ #define FIXED31_32_BITS_PER_FRACTIONAL_PART 32
+ #ifndef LLONG_MIN
+ #define LLONG_MIN (1LL<<63)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4550-drm-amd-display-DP-YCbCr-4-2-0-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4550-drm-amd-display-DP-YCbCr-4-2-0-support.patch
new file mode 100644
index 00000000..2e18f266
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4550-drm-amd-display-DP-YCbCr-4-2-0-support.patch
@@ -0,0 +1,52 @@
+From 3bf530f45e3d0284113006927a2289815cd0ca82 Mon Sep 17 00:00:00 2001
+From: Eric Bernstein <eric.bernstein@amd.com>
+Date: Mon, 14 May 2018 17:01:00 -0400
+Subject: [PATCH 4550/5725] drm/amd/display: DP YCbCr 4:2:0 support
+
+Update MSA MISC1 bit 6 programming to handle YCbCr 4:2:0
+and BT2020 cases.
+
+Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
+Reviewed-by: Hersen Wu <hersenxs.wu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+index c928ee4..147f614 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+@@ -298,9 +298,20 @@ void enc1_stream_encoder_dp_set_stream_attribute(
+ }
+
+ misc1 = REG_READ(DP_MSA_MISC);
++ /* For YCbCr420 and BT2020 Colorimetry Formats, VSC SDP shall be used.
++ * When MISC1, bit 6, is Set to 1, a Source device uses a VSC SDP to indicate the
++ * Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7,
++ * and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become “don’t careâ€).
++ */
++ if ((crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) ||
++ (output_color_space == COLOR_SPACE_2020_YCBCR) ||
++ (output_color_space == COLOR_SPACE_2020_RGB_FULLRANGE) ||
++ (output_color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE))
++ misc1 = misc1 | 0x40;
++ else
++ misc1 = misc1 & ~0x40;
+
+ /* set color depth */
+-
+ switch (crtc_timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+@@ -354,7 +365,6 @@ void enc1_stream_encoder_dp_set_stream_attribute(
+
+ switch (output_color_space) {
+ case COLOR_SPACE_SRGB:
+- misc0 = misc0 | 0x0;
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ dynamic_range_rgb = 0; /*full range*/
+ break;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4551-drm-amd-display-decouple-front-and-backend-pgm-using.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4551-drm-amd-display-decouple-front-and-backend-pgm-using.patch
new file mode 100644
index 00000000..80a0e260
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4551-drm-amd-display-decouple-front-and-backend-pgm-using.patch
@@ -0,0 +1,351 @@
+From f71c6762e8e370463f545eee939540bfc8bba2f7 Mon Sep 17 00:00:00 2001
+From: Samson Tam <Samson.Tam@amd.com>
+Date: Tue, 1 May 2018 10:39:26 -0400
+Subject: [PATCH 4551/5725] drm/amd/display: decouple front and backend pgm
+ using dpms_off as backend enable flag
+
+Signed-off-by: Samson Tam <Samson.Tam@amd.com>
+Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 125 +++++++++++++--------
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 34 ++++++
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 6 +
+ drivers/gpu/drm/amd/display/dc/dc_stream.h | 2 +
+ .../amd/display/dc/dce110/dce110_hw_sequencer.c | 38 +------
+ 5 files changed, 123 insertions(+), 82 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 16e8f37..a6a09b0 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1227,6 +1227,9 @@ static enum surface_update_type check_update_surfaces_for_stream(
+
+ if (stream_update->abm_level)
+ return UPDATE_TYPE_FULL;
++
++ if (stream_update->dpms_off)
++ return UPDATE_TYPE_FULL;
+ }
+
+ for (i = 0 ; i < surface_count; i++) {
+@@ -1281,6 +1284,71 @@ static struct dc_stream_status *stream_get_status(
+ static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
+
+
++static void commit_planes_do_stream_update(struct dc *dc,
++ struct dc_stream_state *stream,
++ struct dc_stream_update *stream_update,
++ enum surface_update_type update_type,
++ struct dc_state *context)
++{
++ int j;
++
++ // Stream updates
++ for (j = 0; j < dc->res_pool->pipe_count; j++) {
++ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
++
++ if (!pipe_ctx->top_pipe &&
++ pipe_ctx->stream &&
++ pipe_ctx->stream == stream) {
++
++ /* Fast update*/
++ // VRR program can be done as part of FAST UPDATE
++ if (stream_update->adjust)
++ dc->hwss.set_drr(&pipe_ctx, 1,
++ stream_update->adjust->v_total_min,
++ stream_update->adjust->v_total_max);
++
++ /* Full fe update*/
++ if (update_type == UPDATE_TYPE_FAST)
++ continue;
++
++ if (stream_update->dpms_off) {
++ if (*stream_update->dpms_off) {
++ core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE);
++ dc->hwss.pplib_apply_display_requirements(
++ dc, dc->current_state);
++ } else {
++ dc->hwss.pplib_apply_display_requirements(
++ dc, dc->current_state);
++ core_link_enable_stream(dc->current_state, pipe_ctx);
++ }
++ }
++
++ if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
++ if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
++ // if otg funcs defined check if blanked before programming
++ if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
++ pipe_ctx->stream_res.abm->funcs->set_abm_level(
++ pipe_ctx->stream_res.abm, stream->abm_level);
++ } else
++ pipe_ctx->stream_res.abm->funcs->set_abm_level(
++ pipe_ctx->stream_res.abm, stream->abm_level);
++ }
++
++ if (stream_update->periodic_fn_vsync_delta &&
++ pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
++ pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
++ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing,
++ pipe_ctx->stream->periodic_fn_vsync_delta);
++
++ if (stream_update->hdr_static_metadata ||
++ stream_update->vrr_infopacket) {
++ resource_build_info_frame(pipe_ctx);
++ dc->hwss.update_info_frame(pipe_ctx);
++ }
++ }
++ }
++}
++
+ static void commit_planes_for_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+@@ -1297,15 +1365,20 @@ static void commit_planes_for_stream(struct dc *dc,
+ context_clock_trace(dc, context);
+ }
+
++ // Stream updates
++ if (stream_update)
++ commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
++
+ if (surface_count == 0) {
+ /*
+ * In case of turning off screen, no need to program front end a second time.
+- * just return after program front end.
++ * just return after program blank.
+ */
+- dc->hwss.apply_ctx_for_surface(dc, stream, surface_count, context);
++ dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
+ return;
+ }
+
++ // Update Type FULL, Surface updates
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+@@ -1319,13 +1392,6 @@ static void commit_planes_for_stream(struct dc *dc,
+ if (!pipe_ctx->plane_state)
+ continue;
+
+- /* Fast update*/
+- // VRR program can be done as part of FAST UPDATE
+- if (stream_update && stream_update->adjust)
+- dc->hwss.set_drr(&pipe_ctx, 1,
+- stream_update->adjust->v_total_min,
+- stream_update->adjust->v_total_max);
+-
+ /* Full fe update*/
+ if (update_type == UPDATE_TYPE_FAST)
+ continue;
+@@ -1335,34 +1401,18 @@ static void commit_planes_for_stream(struct dc *dc,
+
+ dc->hwss.apply_ctx_for_surface(
+ dc, pipe_ctx->stream, stream_status->plane_count, context);
+-
+- if (stream_update && stream_update->abm_level && pipe_ctx->stream_res.abm) {
+- if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
+- // if otg funcs defined check if blanked before programming
+- if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
+- pipe_ctx->stream_res.abm->funcs->set_abm_level(
+- pipe_ctx->stream_res.abm, stream->abm_level);
+- } else
+- pipe_ctx->stream_res.abm->funcs->set_abm_level(
+- pipe_ctx->stream_res.abm, stream->abm_level);
+- }
+-
+- if (stream_update && stream_update->periodic_fn_vsync_delta &&
+- pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
+- pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
+- pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing,
+- pipe_ctx->stream->periodic_fn_vsync_delta);
+ }
+ }
+
+ if (update_type == UPDATE_TYPE_FULL)
+ context_timing_trace(dc, &context->res_ctx);
+
+- /* Lock the top pipe while updating plane addrs, since freesync requires
+- * plane addr update event triggers to be synchronized.
+- * top_pipe_to_program is expected to never be NULL
+- */
++ // Update Type FAST, Surface updates
+ if (update_type == UPDATE_TYPE_FAST) {
++ /* Lock the top pipe while updating plane addrs, since freesync requires
++ * plane addr update event triggers to be synchronized.
++ * top_pipe_to_program is expected to never be NULL
++ */
+ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
+
+ /* Perform requested Updates */
+@@ -1385,21 +1435,6 @@ static void commit_planes_for_stream(struct dc *dc,
+
+ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
+ }
+-
+- if (stream && stream_update)
+- for (j = 0; j < dc->res_pool->pipe_count; j++) {
+- struct pipe_ctx *pipe_ctx =
+- &context->res_ctx.pipe_ctx[j];
+-
+- if (pipe_ctx->stream != stream)
+- continue;
+-
+- if (stream_update->hdr_static_metadata ||
+- (stream_update->vrr_infopacket)) {
+- resource_build_info_frame(pipe_ctx);
+- dc->hwss.update_info_frame(pipe_ctx);
+- }
+- }
+ }
+
+ void dc_commit_updates_for_stream(struct dc *dc,
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 08b7ee5..a81ae8d 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -2360,9 +2360,43 @@ void core_link_enable_stream(
+ struct pipe_ctx *pipe_ctx)
+ {
+ struct dc *core_dc = pipe_ctx->stream->ctx->dc;
++ struct dc_stream_state *stream = pipe_ctx->stream;
+ enum dc_status status;
+ DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+
++ if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL) {
++ stream->sink->link->link_enc->funcs->setup(
++ stream->sink->link->link_enc,
++ pipe_ctx->stream->signal);
++ pipe_ctx->stream_res.stream_enc->funcs->setup_stereo_sync(
++ pipe_ctx->stream_res.stream_enc,
++ pipe_ctx->stream_res.tg->inst,
++ stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE);
++ }
++
++ if (dc_is_dp_signal(pipe_ctx->stream->signal))
++ pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(
++ pipe_ctx->stream_res.stream_enc,
++ &stream->timing,
++ stream->output_color_space);
++
++ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
++ pipe_ctx->stream_res.stream_enc->funcs->hdmi_set_stream_attribute(
++ pipe_ctx->stream_res.stream_enc,
++ &stream->timing,
++ stream->phy_pix_clk,
++ pipe_ctx->stream_res.audio != NULL);
++
++ if (dc_is_dvi_signal(pipe_ctx->stream->signal))
++ pipe_ctx->stream_res.stream_enc->funcs->dvi_set_stream_attribute(
++ pipe_ctx->stream_res.stream_enc,
++ &stream->timing,
++ (pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) ?
++ true : false);
++
++ resource_build_info_frame(pipe_ctx);
++ core_dc->hwss.update_info_frame(pipe_ctx);
++
+ /* eDP lit up by bios already, no need to enable again. */
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
+ core_dc->apply_edp_fast_boot_optimization) {
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index ad09f0c..b65aa6e 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -1524,6 +1524,9 @@ static bool are_stream_backends_same(
+ if (is_hdr_static_meta_changed(stream_a, stream_b))
+ return false;
+
++ if (stream_a->dpms_off != stream_b->dpms_off)
++ return false;
++
+ return true;
+ }
+
+@@ -2624,6 +2627,9 @@ bool pipe_need_reprogram(
+ if (is_hdr_static_meta_changed(pipe_ctx_old->stream, pipe_ctx->stream))
+ return true;
+
++ if (pipe_ctx_old->stream->dpms_off != pipe_ctx->stream->dpms_off)
++ return true;
++
+ return false;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+index 9468c2f..d06fd41 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+@@ -127,6 +127,8 @@ struct dc_stream_update {
+
+ struct dc_crtc_timing_adjust *adjust;
+ struct dc_info_packet *vrr_infopacket;
++
++ bool *dpms_off;
+ };
+
+ bool dc_is_stream_unchanged(
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 0544568..2c3b289 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -1337,8 +1337,6 @@ static enum dc_status apply_single_controller_ctx_to_hw(
+ struct dc *dc)
+ {
+ struct dc_stream_state *stream = pipe_ctx->stream;
+- struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx.
+- pipe_ctx[pipe_ctx->pipe_idx];
+
+ if (pipe_ctx->stream_res.audio != NULL) {
+ struct audio_output audio_output;
+@@ -1393,46 +1391,12 @@ static enum dc_status apply_single_controller_ctx_to_hw(
+ stream->timing.display_color_depth,
+ pipe_ctx->stream->signal);
+
+- if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL)
+- stream->sink->link->link_enc->funcs->setup(
+- stream->sink->link->link_enc,
+- pipe_ctx->stream->signal);
+-
+- if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL)
+- pipe_ctx->stream_res.stream_enc->funcs->setup_stereo_sync(
+- pipe_ctx->stream_res.stream_enc,
+- pipe_ctx->stream_res.tg->inst,
+- stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE);
+-
+-
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
+ pipe_ctx->stream_res.opp,
+ &stream->bit_depth_params,
+ &stream->clamping);
+
+- if (dc_is_dp_signal(pipe_ctx->stream->signal))
+- pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(
+- pipe_ctx->stream_res.stream_enc,
+- &stream->timing,
+- stream->output_color_space);
+-
+- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+- pipe_ctx->stream_res.stream_enc->funcs->hdmi_set_stream_attribute(
+- pipe_ctx->stream_res.stream_enc,
+- &stream->timing,
+- stream->phy_pix_clk,
+- pipe_ctx->stream_res.audio != NULL);
+-
+- if (dc_is_dvi_signal(pipe_ctx->stream->signal))
+- pipe_ctx->stream_res.stream_enc->funcs->dvi_set_stream_attribute(
+- pipe_ctx->stream_res.stream_enc,
+- &stream->timing,
+- (pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) ?
+- true : false);
+-
+- resource_build_info_frame(pipe_ctx);
+- dce110_update_info_frame(pipe_ctx);
+- if (!pipe_ctx_old->stream)
++ if (!stream->dpms_off)
+ core_link_enable_stream(context, pipe_ctx);
+
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4552-drm-amd-display-add-dentist-frequency-to-resource-po.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4552-drm-amd-display-add-dentist-frequency-to-resource-po.patch
new file mode 100644
index 00000000..c3cebdec
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4552-drm-amd-display-add-dentist-frequency-to-resource-po.patch
@@ -0,0 +1,28 @@
+From d92aa224da771a25aee57e21e99652883e2c2d2f Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Wed, 16 May 2018 08:51:11 -0400
+Subject: [PATCH 4552/5725] drm/amd/display: add dentist frequency to resource
+ pool
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/inc/core_types.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+index a94942d..4beddca0 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+@@ -148,6 +148,7 @@ struct resource_pool {
+ unsigned int underlay_pipe_index;
+ unsigned int stream_enc_count;
+ unsigned int ref_clock_inKhz;
++ unsigned int dentist_vco_freq_khz;
+ unsigned int timing_generator_count;
+
+ /*
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4553-drm-amd-display-fix-dscl_manual_ratio_init.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4553-drm-amd-display-fix-dscl_manual_ratio_init.patch
new file mode 100644
index 00000000..cb2b7fd8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4553-drm-amd-display-fix-dscl_manual_ratio_init.patch
@@ -0,0 +1,108 @@
+From fd46b92701f0121bf0d504c58c9d9a61b8d92322 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Thu, 17 May 2018 10:08:10 -0400
+Subject: [PATCH 4553/5725] drm/amd/display: fix dscl_manual_ratio_init
+
+This change will fix wb and display scaling when ratios of
+4 or more are involved
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c | 5 +++++
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | 3 +--
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h | 6 +-----
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c | 8 ++++----
+ drivers/gpu/drm/amd/display/include/fixed31_32.h | 2 ++
+ 5 files changed, 13 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+index e61dd97d..f289898 100644
+--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
++++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+@@ -449,6 +449,11 @@ static inline unsigned int clamp_ux_dy(
+ return min_clamp;
+ }
+
++unsigned int dc_fixpt_u3d19(struct fixed31_32 arg)
++{
++ return ux_dy(arg.value, 3, 19);
++}
++
+ unsigned int dc_fixpt_u2d19(struct fixed31_32 arg)
+ {
+ return ux_dy(arg.value, 2, 19);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+index 46a35c7..c69fa4b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+@@ -132,8 +132,7 @@ void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp)
+
+ #define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19))
+
+-
+-bool dpp_get_optimal_number_of_taps(
++static bool dpp_get_optimal_number_of_taps(
+ struct dpp *dpp,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+index 5944a3b..e862caf 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+@@ -1424,12 +1424,8 @@ void dpp1_set_degamma(
+ enum ipp_degamma_mode mode);
+
+ void dpp1_set_degamma_pwl(struct dpp *dpp_base,
+- const struct pwl_params *params);
++ const struct pwl_params *params);
+
+-bool dpp_get_optimal_number_of_taps(
+- struct dpp *dpp,
+- struct scaler_data *scl_data,
+- const struct scaling_taps *in_taps);
+
+ void dpp_read_state(struct dpp *dpp_base,
+ struct dcn_dpp_state *s);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+index 4ddd627..f862fd1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+@@ -565,16 +565,16 @@ static void dpp1_dscl_set_manual_ratio_init(
+ uint32_t init_int = 0;
+
+ REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0,
+- SCL_H_SCALE_RATIO, dc_fixpt_u2d19(data->ratios.horz) << 5);
++ SCL_H_SCALE_RATIO, dc_fixpt_u3d19(data->ratios.horz) << 5);
+
+ REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0,
+- SCL_V_SCALE_RATIO, dc_fixpt_u2d19(data->ratios.vert) << 5);
++ SCL_V_SCALE_RATIO, dc_fixpt_u3d19(data->ratios.vert) << 5);
+
+ REG_SET(SCL_HORZ_FILTER_SCALE_RATIO_C, 0,
+- SCL_H_SCALE_RATIO_C, dc_fixpt_u2d19(data->ratios.horz_c) << 5);
++ SCL_H_SCALE_RATIO_C, dc_fixpt_u3d19(data->ratios.horz_c) << 5);
+
+ REG_SET(SCL_VERT_FILTER_SCALE_RATIO_C, 0,
+- SCL_V_SCALE_RATIO_C, dc_fixpt_u2d19(data->ratios.vert_c) << 5);
++ SCL_V_SCALE_RATIO_C, dc_fixpt_u3d19(data->ratios.vert_c) << 5);
+
+ /*
+ * 0.24 format for fraction, first five bits zeroed
+diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
+index 2b1b29f..52a7333 100644
+--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
++++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
+@@ -503,6 +503,8 @@ static inline int dc_fixpt_ceil(struct fixed31_32 arg)
+ * fractional
+ */
+
++unsigned int dc_fixpt_u3d19(struct fixed31_32 arg);
++
+ unsigned int dc_fixpt_u2d19(struct fixed31_32 arg);
+
+ unsigned int dc_fixpt_u0d19(struct fixed31_32 arg);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4554-drm-amd-display-check-if-audio-clk-enable-is-applica.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4554-drm-amd-display-check-if-audio-clk-enable-is-applica.patch
new file mode 100644
index 00000000..84b76855
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4554-drm-amd-display-check-if-audio-clk-enable-is-applica.patch
@@ -0,0 +1,32 @@
+From 1acfde46d075f5b0b5d00a20bda109338845fc46 Mon Sep 17 00:00:00 2001
+From: Roman Li <Roman.Li@amd.com>
+Date: Thu, 17 May 2018 18:08:54 -0400
+Subject: [PATCH 4554/5725] drm/amd/display: check if audio clk enable is
+ applicable
+
+Fixing warning on dce10 with HDMI display.
+
+Signed-off-by: Roman Li <Roman.Li@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+index c063175..c0e813c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+@@ -720,7 +720,8 @@ static void dce110_stream_encoder_update_hdmi_info_packets(
+ const uint32_t *content =
+ (const uint32_t *) &info_frame->avi.sb[0];
+ /*we need turn on clock before programming AFMT block*/
+- REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
++ if (REG(AFMT_CNTL))
++ REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
+
+ REG_WRITE(AFMT_AVI_INFO0, content[0]);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4555-drm-amd-display-Do-not-limit-color-depth-to-8bpc.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4555-drm-amd-display-Do-not-limit-color-depth-to-8bpc.patch
new file mode 100644
index 00000000..770e5ea1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4555-drm-amd-display-Do-not-limit-color-depth-to-8bpc.patch
@@ -0,0 +1,34 @@
+From b1cd5f8f0f995ea4aebb84a6c5a08607f20a9c5e Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Wed, 16 May 2018 16:46:18 -0400
+Subject: [PATCH 4555/5725] drm/amd/display: Do not limit color depth to 8bpc
+
+Delete if statement that would force any display's color depth higher
+than 8 bpc to 8
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 ------
+ 1 file changed, 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 0e24164..48bd8f2 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2145,12 +2145,6 @@ convert_color_depth_from_display_info(const struct drm_connector *connector)
+ {
+ uint32_t bpc = connector->display_info.bpc;
+
+- /* Limited color depth to 8bit
+- * TODO: Still need to handle deep color
+- */
+- if (bpc > 8)
+- bpc = 8;
+-
+ switch (bpc) {
+ case 0:
+ /* Temporary Work around, DRM don't parse color depth for
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4556-drm-amd-display-dal-3.1.47.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4556-drm-amd-display-dal-3.1.47.patch
new file mode 100644
index 00000000..bc452411
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4556-drm-amd-display-dal-3.1.47.patch
@@ -0,0 +1,28 @@
+From a985085cdd996deb4d049162ef4aa57eae3b1b0a Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Tue, 8 May 2018 12:25:02 -0400
+Subject: [PATCH 4556/5725] drm/amd/display: dal 3.1.47
+
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 3981837..0d64023 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -38,7 +38,7 @@
+ #include "inc/compressor.h"
+ #include "dml/display_mode_lib.h"
+
+-#define DC_VER "3.1.46"
++#define DC_VER "3.1.47"
+
+ #define MAX_SURFACES 3
+ #define MAX_STREAMS 6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4557-drm-amd-display-Fix-wrong-latency-assignment-for-VEG.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4557-drm-amd-display-Fix-wrong-latency-assignment-for-VEG.patch
new file mode 100644
index 00000000..18aec796
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4557-drm-amd-display-Fix-wrong-latency-assignment-for-VEG.patch
@@ -0,0 +1,33 @@
+From 8c6d565627c82332119d70518dad00e9c362055f Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Tue, 29 May 2018 09:59:13 -0400
+Subject: [PATCH 4557/5725] drm/amd/display: Fix wrong latency assignment for
+ VEGA clock levels
+
+Also drop wrong 10kHz comment
+
+Fixes: drm/amd/display: Implement dm_pp_get_clock_levels_by_type_with_latency
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+index ead3d21..d5e6b45 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+@@ -256,9 +256,8 @@ static void pp_to_dc_clock_levels_with_latency(
+
+ for (i = 0; i < clk_level_info->num_levels; i++) {
+ DRM_DEBUG("DM_PPLIB:\t %d\n", pp_clks->data[i].clocks_in_khz);
+- /* translate 10kHz to kHz */
+ clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
+- clk_level_info->data[i].latency_in_us = pp_clks->data[i].clocks_in_khz;
++ clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
+ }
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4558-drm-amdgpu-display-check-if-ppfuncs-exists-before-us.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4558-drm-amdgpu-display-check-if-ppfuncs-exists-before-us.patch
new file mode 100644
index 00000000..78a73dcc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4558-drm-amdgpu-display-check-if-ppfuncs-exists-before-us.patch
@@ -0,0 +1,30 @@
+From 12ca0be853d9133fbcdd14bf973179e6b5c73b2e Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed, 30 May 2018 09:34:23 -0500
+Subject: [PATCH 4558/5725] drm/amdgpu/display: check if ppfuncs exists before
+ using it
+
+Fixes a crash on asics without powerplay yet (e.g., vega20).
+
+Reviewed-by: Rex Zhu<rezhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+index d5e6b45..5a33461 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+@@ -343,7 +343,7 @@ bool dm_pp_get_clock_levels_by_type_with_latency(
+ struct pp_clock_levels_with_latency pp_clks = { 0 };
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+- if (!pp_funcs->get_clock_by_type_with_latency)
++ if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency)
+ return false;
+
+ if (pp_funcs->get_clock_by_type_with_latency(pp_handle,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4559-drm-amdgpu-display-drop-DRM_AMD_DC_FBC-kconfig-optio.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4559-drm-amdgpu-display-drop-DRM_AMD_DC_FBC-kconfig-optio.patch
new file mode 100644
index 00000000..4321a321
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4559-drm-amdgpu-display-drop-DRM_AMD_DC_FBC-kconfig-optio.patch
@@ -0,0 +1,292 @@
+From 2ed614c212983f8f593fd4fe3ad0d296ea8f027b Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 31 May 2018 09:09:59 -0500
+Subject: [PATCH 4559/5725] drm/amdgpu/display: drop DRM_AMD_DC_FBC kconfig
+ option
+
+Just enable it always. This was leftover from feature
+bring up.
+
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/display/Kconfig | 10 ----------
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 5 +----
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 5 +----
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 --
+ drivers/gpu/drm/amd/display/dc/dc_types.h | 2 --
+ .../drm/amd/display/dc/dce110/dce110_compressor.c | 2 --
+ .../amd/display/dc/dce110/dce110_hw_sequencer.c | 22 ++--------------------
+ .../drm/amd/display/dc/dce110/dce110_resource.c | 7 +------
+ 8 files changed, 5 insertions(+), 50 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
+index c3d49f8..88e763f 100644
+--- a/drivers/gpu/drm/amd/display/Kconfig
++++ b/drivers/gpu/drm/amd/display/Kconfig
+@@ -9,16 +9,6 @@ config DRM_AMD_DC
+ support for AMDGPU.This adds required support for Vega and
+ Raven ASICs.
+
+-config DRM_AMD_DC_FBC
+- bool "AMD FBC - Enable Frame Buffer Compression"
+- depends on DRM_AMD_DC
+- help
+- Choose this option if you want to use frame buffer compression
+- support.
+- This is a power optimisation feature, check its availability
+- on your hardware before enabling this option.
+-
+-
+ config DRM_AMD_DC_DCN1_0
+ bool "DCN 1.0 Raven family"
+ depends on DRM_AMD_DC && X86
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 48bd8f2..792fd09 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -345,7 +345,6 @@ static void hotplug_notify_work_func(struct work_struct *work)
+ drm_kms_helper_hotplug_event(dev);
+ }
+
+-#if defined(CONFIG_DRM_AMD_DC_FBC)
+ /* Allocate memory for FBC compressed data */
+ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
+ {
+@@ -386,7 +385,6 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
+ }
+
+ }
+-#endif
+
+
+ /* Init display KMS
+@@ -3617,9 +3615,8 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
+ amdgpu_dm_connector_ddc_get_modes(connector, edid);
+ amdgpu_dm_connector_add_common_modes(encoder, connector);
+
+-#if defined(CONFIG_DRM_AMD_DC_FBC)
+ amdgpu_dm_fbc_init(connector);
+-#endif
++
+ return amdgpu_dm_connector->num_modes;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+index 51c09a4..88b646e 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+@@ -72,13 +72,11 @@ struct irq_list_head {
+ struct work_struct work;
+ };
+
+-#if defined(CONFIG_DRM_AMD_DC_FBC)
+ struct dm_comressor_info {
+ void *cpu_addr;
+ struct amdgpu_bo *bo_ptr;
+ uint64_t gpu_addr;
+ };
+-#endif
+
+ /**
+ * for_each_oldnew_plane_in_state_reverse - iterate over all planes in an atomic
+@@ -152,9 +150,8 @@ struct amdgpu_display_manager {
+ * Caches device atomic state for suspend/resume
+ */
+ struct drm_atomic_state *cached_state;
+-#if defined(CONFIG_DRM_AMD_DC_FBC)
++
+ struct dm_comressor_info compressor;
+-#endif
+ };
+
+ struct amdgpu_dm_connector {
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 0d64023..23a5045 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -289,9 +289,7 @@ struct dc {
+ bool apply_edp_fast_boot_optimization;
+
+ /* FBC compressor */
+-#if defined(CONFIG_DRM_AMD_DC_FBC)
+ struct compressor *fbc_compressor;
+-#endif
+ };
+
+ enum frame_buffer_mode {
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
+index f463d3a..40d620f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
+@@ -92,9 +92,7 @@ struct dc_context {
+ bool created_bios;
+ struct gpio_service *gpio_service;
+ struct i2caux *i2caux;
+-#if defined(CONFIG_DRM_AMD_DC_FBC)
+ uint64_t fbc_gpu_addr;
+-#endif
+ };
+
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+index e2994d3..a79fc0b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+@@ -551,9 +551,7 @@ void dce110_compressor_construct(struct dce110_compressor *compressor,
+ compressor->base.lpt_channels_num = 0;
+ compressor->base.attached_inst = 0;
+ compressor->base.is_enabled = false;
+-#if defined(CONFIG_DRM_AMD_DC_FBC)
+ compressor->base.funcs = &dce110_compressor_funcs;
+
+-#endif
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 2c3b289..4cd6bc0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -34,9 +34,7 @@
+ #include "dce/dce_hwseq.h"
+ #include "gpio_service_interface.h"
+
+-#if defined(CONFIG_DRM_AMD_DC_FBC)
+ #include "dce110_compressor.h"
+-#endif
+
+ #include "bios/bios_parser_helper.h"
+ #include "timing_generator.h"
+@@ -1471,10 +1469,8 @@ static void power_down_all_hw_blocks(struct dc *dc)
+
+ power_down_clock_sources(dc);
+
+-#if defined(CONFIG_DRM_AMD_DC_FBC)
+ if (dc->fbc_compressor)
+ dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
+-#endif
+ }
+
+ static void disable_vga_and_power_gate_all_controllers(
+@@ -1724,9 +1720,7 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ if (events->force_trigger)
+ value |= 0x1;
+
+-#if defined(CONFIG_DRM_AMD_DC_FBC)
+ value |= 0x84;
+-#endif
+
+ for (i = 0; i < num_pipes; i++)
+ pipe_ctx[i]->stream_res.tg->funcs->
+@@ -1854,8 +1848,6 @@ static void apply_min_clocks(
+ }
+ }
+
+-#if defined(CONFIG_DRM_AMD_DC_FBC)
+-
+ /*
+ * Check if FBC can be enabled
+ */
+@@ -1934,7 +1926,6 @@ static void enable_fbc(struct dc *dc,
+ compr->funcs->enable_fbc(compr, &params);
+ }
+ }
+-#endif
+
+ static void dce110_reset_hw_ctx_wrap(
+ struct dc *dc,
+@@ -2110,10 +2101,9 @@ enum dc_status dce110_apply_ctx_to_hw(
+
+ set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+
+-#if defined(CONFIG_DRM_AMD_DC_FBC)
+ if (dc->fbc_compressor)
+ dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
+-#endif
++
+ /*TODO: when pplib works*/
+ apply_min_clocks(dc, context, &clocks_state, true);
+
+@@ -2191,12 +2181,9 @@ enum dc_status dce110_apply_ctx_to_hw(
+
+ dcb->funcs->set_scratch_critical_state(dcb, false);
+
+-#if defined(CONFIG_DRM_AMD_DC_FBC)
+ if (dc->fbc_compressor)
+ enable_fbc(dc, context);
+
+-#endif
+-
+ return DC_OK;
+ }
+
+@@ -2511,10 +2498,9 @@ static void init_hw(struct dc *dc)
+ abm->funcs->init_backlight(abm);
+ abm->funcs->abm_init(abm);
+ }
+-#if defined(CONFIG_DRM_AMD_DC_FBC)
++
+ if (dc->fbc_compressor)
+ dc->fbc_compressor->funcs->power_up_fbc(dc->fbc_compressor);
+-#endif
+
+ }
+
+@@ -2700,9 +2686,7 @@ static void dce110_program_front_end_for_pipe(
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct xfm_grph_csc_adjustment adjust;
+ struct out_csc_color_matrix tbl_entry;
+-#if defined(CONFIG_DRM_AMD_DC_FBC)
+ unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
+-#endif
+ unsigned int i;
+ DC_LOGGER_INIT();
+ memset(&tbl_entry, 0, sizeof(tbl_entry));
+@@ -2743,7 +2727,6 @@ static void dce110_program_front_end_for_pipe(
+
+ program_scaler(dc, pipe_ctx);
+
+-#if defined(CONFIG_DRM_AMD_DC_FBC)
+ /* fbc not applicable on Underlay pipe */
+ if (dc->fbc_compressor && old_pipe->stream &&
+ pipe_ctx->pipe_idx != underlay_idx) {
+@@ -2752,7 +2735,6 @@ static void dce110_program_front_end_for_pipe(
+ else
+ enable_fbc(dc, dc->current_state);
+ }
+-#endif
+
+ mi->funcs->mem_input_program_surface_config(
+ mi,
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+index ee33786..20c0290 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+@@ -54,9 +54,8 @@
+
+ #define DC_LOGGER \
+ dc->ctx->logger
+-#if defined(CONFIG_DRM_AMD_DC_FBC)
++
+ #include "dce110/dce110_compressor.h"
+-#endif
+
+ #include "reg_helper.h"
+
+@@ -1267,12 +1266,8 @@ static bool construct(
+ }
+ }
+
+-#if defined(CONFIG_DRM_AMD_DC_FBC)
+ dc->fbc_compressor = dce110_compressor_create(ctx);
+
+-
+-
+-#endif
+ if (!underlay_create(ctx, &pool->base))
+ goto res_create_fail;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4560-drm-amdgpu-display-enable-CONFIG_DRM_AMD_DC_DCN1_0-b.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4560-drm-amdgpu-display-enable-CONFIG_DRM_AMD_DC_DCN1_0-b.patch
new file mode 100644
index 00000000..e316b29f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4560-drm-amdgpu-display-enable-CONFIG_DRM_AMD_DC_DCN1_0-b.patch
@@ -0,0 +1,31 @@
+From cad9327973f1f66df48948784f5429e4f315aee3 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 31 May 2018 09:28:47 -0500
+Subject: [PATCH 4560/5725] drm/amdgpu/display: enable CONFIG_DRM_AMD_DC_DCN1_0
+ by default
+
+It's required for displays on Raven. The DCN bandwidth calcs use
+floating point, but DCN is APU only and it already depends on
+X86.
+
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
+index 88e763f..b23c89a 100644
+--- a/drivers/gpu/drm/amd/display/Kconfig
++++ b/drivers/gpu/drm/amd/display/Kconfig
+@@ -12,6 +12,7 @@ config DRM_AMD_DC
+ config DRM_AMD_DC_DCN1_0
+ bool "DCN 1.0 Raven family"
+ depends on DRM_AMD_DC && X86
++ default y
+ help
+ Choose this option if you want to have
+ RV family for display engine
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4561-drm-amd-display-avoid-sleeping-in-atomic-context-whi.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4561-drm-amd-display-avoid-sleeping-in-atomic-context-whi.patch
new file mode 100644
index 00000000..7c5bc75d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4561-drm-amd-display-avoid-sleeping-in-atomic-context-whi.patch
@@ -0,0 +1,58 @@
+From 5f44c6ee22170803f2caa43e83a3124a5936ddbd Mon Sep 17 00:00:00 2001
+From: Shirish S <shirish.s@amd.com>
+Date: Mon, 28 May 2018 16:00:34 +0530
+Subject: [PATCH 4561/5725] drm/amd/display: avoid sleeping in atomic context
+ while creating new state (V2)
+
+This patch fixes the warning messages that are caused due to calling
+sleep in atomic context as below:
+
+BUG: sleeping function called from invalid context at mm/slab.h:419
+in_atomic(): 1, irqs_disabled(): 1, pid: 5, name: kworker/u4:0
+CPU: 1 PID: 5 Comm: kworker/u4:0 Tainted: G W 4.14.35 #941
+Workqueue: events_unbound commit_work
+Call Trace:
+ dump_stack+0x4d/0x63
+ ___might_sleep+0x11f/0x12e
+ kmem_cache_alloc_trace+0x41/0xea
+ dc_create_state+0x1f/0x30
+ dc_commit_updates_for_stream+0x73/0x4cf
+ ? amdgpu_get_crtc_scanoutpos+0x82/0x16b
+ amdgpu_dm_do_flip+0x239/0x298
+ amdgpu_dm_commit_planes.isra.23+0x379/0x54b
+ ? dc_commit_state+0x3da/0x404
+ amdgpu_dm_atomic_commit_tail+0x4fc/0x5d2
+ ? wait_for_common+0x5b/0x69
+ commit_tail+0x42/0x64
+ process_one_work+0x1b0/0x314
+ worker_thread+0x1cb/0x2c1
+ ? create_worker+0x1da/0x1da
+ kthread+0x156/0x15e
+ ? kthread_flush_work+0xea/0xea
+ ret_from_fork+0x22/0x40
+
+V2: fix applicable only to dc_create_state() and not dc_create().
+
+Signed-off-by: Shirish S <shirish.s@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index a6a09b0..36be7a3 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -970,7 +970,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
+ struct dc_state *dc_create_state(void)
+ {
+ struct dc_state *context = kzalloc(sizeof(struct dc_state),
+- GFP_KERNEL);
++ GFP_ATOMIC);
+
+ if (!context)
+ return NULL;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4562-drm-amdkcl-4.7-fix-__drm_atomic_helper_connector_des.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4562-drm-amdkcl-4.7-fix-__drm_atomic_helper_connector_des.patch
new file mode 100644
index 00000000..224c2642
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4562-drm-amdkcl-4.7-fix-__drm_atomic_helper_connector_des.patch
@@ -0,0 +1,434 @@
+From 5dc2ee59700d23d9a08413e762f6e8cd9db0ac89 Mon Sep 17 00:00:00 2001
+From: Kevin Wang <Kevin1.Wang@amd.com>
+Date: Mon, 11 Jun 2018 10:34:48 +0800
+Subject: [PATCH 4562/5725] drm/amdkcl: [4.7] fix
+ __drm_atomic_helper_connector_destroy_state api changed
+
+Change-Id: Id6001c09ba5ead870bc8e4bbb37da1c99c8d73bc
+Signed-off-by: Kevin Wang <Kevin1.Wang@amd.com>
+Reviewed-by: Le Ma <Le.Ma@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/scheduler/gpu_scheduler.c | 213 ++++++++++++++++++++++++------
+ 1 file changed, 172 insertions(+), 41 deletions(-)
+
+diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+index be5d321..1edc323 100644
+--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
++++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+@@ -21,6 +21,29 @@
+ *
+ */
+
++/**
++ * DOC: Overview
++ *
++ * The GPU scheduler provides entities which allow userspace to push jobs
++ * into software queues which are then scheduled on a hardware run queue.
++ * The software queues have a priority among them. The scheduler selects the entities
++ * from the run queue using a FIFO. The scheduler provides dependency handling
++ * features among jobs. The driver is supposed to provide callback functions for
++ * backend operations to the scheduler like submitting a job to hardware run queue,
++ * returning the dependencies of a job etc.
++ *
++ * The organisation of the scheduler is the following:
++ *
++ * 1. Each hw run queue has one scheduler
++ * 2. Each scheduler has multiple run queues with different priorities
++ * (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
++ * 3. Each scheduler run queue has a queue of entities to schedule
++ * 4. Entities themselves maintain a queue of jobs that will be scheduled on
++ * the hardware.
++ *
++ * The jobs in a entity are always scheduled in the order that they were pushed.
++ */
++
+ #include <linux/kthread.h>
+ #include <linux/wait.h>
+ #include <linux/sched.h>
+@@ -39,7 +62,13 @@ static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
+ static void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
+ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
+
+-/* Initialize a given run queue struct */
++/**
++ * drm_sched_rq_init - initialize a given run queue struct
++ *
++ * @rq: scheduler run queue
++ *
++ * Initializes a scheduler runqueue.
++ */
+ static void drm_sched_rq_init(struct drm_sched_rq *rq)
+ {
+ spin_lock_init(&rq->lock);
+@@ -47,6 +76,14 @@ static void drm_sched_rq_init(struct drm_sched_rq *rq)
+ rq->current_entity = NULL;
+ }
+
++/**
++ * drm_sched_rq_add_entity - add an entity
++ *
++ * @rq: scheduler run queue
++ * @entity: scheduler entity
++ *
++ * Adds a scheduler entity to the run queue.
++ */
+ static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
+ struct drm_sched_entity *entity)
+ {
+@@ -57,6 +94,14 @@ static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
+ spin_unlock(&rq->lock);
+ }
+
++/**
++ * drm_sched_rq_remove_entity - remove an entity
++ *
++ * @rq: scheduler run queue
++ * @entity: scheduler entity
++ *
++ * Removes a scheduler entity from the run queue.
++ */
+ static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
+ struct drm_sched_entity *entity)
+ {
+@@ -70,9 +115,9 @@ static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
+ }
+
+ /**
+- * Select an entity which could provide a job to run
++ * drm_sched_rq_select_entity - Select an entity which could provide a job to run
+ *
+- * @rq The run queue to check.
++ * @rq: scheduler run queue to check.
+ *
+ * Try to find a ready entity, returns NULL if none found.
+ */
+@@ -112,14 +157,16 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
+ }
+
+ /**
+- * Init a context entity used by scheduler when submit to HW ring.
++ * drm_sched_entity_init - Init a context entity used by scheduler when
++ * submit to HW ring.
+ *
+- * @sched The pointer to the scheduler
+- * @entity The pointer to a valid drm_sched_entity
+- * @rq The run queue this entity belongs
+- * @kernel If this is an entity for the kernel
++ * @sched: scheduler instance
++ * @entity: scheduler entity to init
++ * @rq: the run queue this entity belongs
++ * @guilty: atomic_t set to 1 when a job on this queue
++ * is found to be guilty causing a timeout
+ *
+- * return 0 if succeed. negative error code on failure
++ * Returns 0 on success or a negative error code on failure.
+ */
+ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity,
+@@ -148,10 +195,10 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
+ EXPORT_SYMBOL(drm_sched_entity_init);
+
+ /**
+- * Query if entity is initialized
++ * drm_sched_entity_is_initialized - Query if entity is initialized
+ *
+- * @sched Pointer to scheduler instance
+- * @entity The pointer to a valid scheduler entity
++ * @sched: Pointer to scheduler instance
++ * @entity: The pointer to a valid scheduler entity
+ *
+ * return true if entity is initialized, false otherwise
+ */
+@@ -163,11 +210,11 @@ static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
+ }
+
+ /**
+- * Check if entity is idle
++ * drm_sched_entity_is_idle - Check if entity is idle
+ *
+- * @entity The pointer to a valid scheduler entity
++ * @entity: scheduler entity
+ *
+- * Return true if entity don't has any unscheduled jobs.
++ * Returns true if the entity does not have any unscheduled jobs.
+ */
+ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
+ {
+@@ -179,9 +226,9 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
+ }
+
+ /**
+- * Check if entity is ready
++ * drm_sched_entity_is_ready - Check if entity is ready
+ *
+- * @entity The pointer to a valid scheduler entity
++ * @entity: scheduler entity
+ *
+ * Return true if entity could provide a job.
+ */
+@@ -209,12 +256,12 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
+
+
+ /**
+- * Destroy a context entity
++ * drm_sched_entity_do_release - Destroy a context entity
+ *
+- * @sched Pointer to scheduler instance
+- * @entity The pointer to a valid scheduler entity
++ * @sched: scheduler instance
++ * @entity: scheduler entity
+ *
+- * Splitting drm_sched_entity_fini() into two functions, The first one is does the waiting,
++ * Splitting drm_sched_entity_fini() into two functions, The first one does the waiting,
+ * removes the entity from the runqueue and returns an error when the process was killed.
+ */
+ void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
+@@ -236,12 +283,13 @@ void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
+ EXPORT_SYMBOL(drm_sched_entity_do_release);
+
+ /**
+- * Destroy a context entity
++ * drm_sched_entity_cleanup - Destroy a context entity
+ *
+- * @sched Pointer to scheduler instance
+- * @entity The pointer to a valid scheduler entity
++ * @sched: scheduler instance
++ * @entity: scheduler entity
+ *
+- * The second one then goes over the entity and signals all jobs with an error code.
++ * This should be called after @drm_sched_entity_do_release. It goes over the
++ * entity and signals all jobs with an error code if the process was killed.
+ */
+ void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity)
+@@ -280,6 +328,14 @@ void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
+ }
+ EXPORT_SYMBOL(drm_sched_entity_cleanup);
+
++/**
++ * drm_sched_entity_fini - Destroy a context entity
++ *
++ * @sched: scheduler instance
++ * @entity: scheduler entity
++ *
++ * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
++ */
+ void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity)
+ {
+@@ -305,6 +361,15 @@ static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb
+ dma_fence_put(f);
+ }
+
++/**
++ * drm_sched_entity_set_rq - Sets the run queue for an entity
++ *
++ * @entity: scheduler entity
++ * @rq: scheduler run queue
++ *
++ * Sets the run queue for an entity and removes the entity from the previous
++ * run queue in which was present.
++ */
+ void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
+ struct drm_sched_rq *rq)
+ {
+@@ -324,6 +389,14 @@ void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
+ }
+ EXPORT_SYMBOL(drm_sched_entity_set_rq);
+
++/**
++ * drm_sched_dependency_optimized
++ *
++ * @fence: the dependency fence
++ * @entity: the entity which depends on the above fence
++ *
++ * Returns true if the dependency can be optimized and false otherwise
++ */
+ bool drm_sched_dependency_optimized(struct dma_fence* fence,
+ struct drm_sched_entity *entity)
+ {
+@@ -407,9 +480,10 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
+ }
+
+ /**
+- * Submit a job to the job queue
++ * drm_sched_entity_push_job - Submit a job to the entity's job queue
+ *
+- * @sched_job The pointer to job required to submit
++ * @sched_job: job to submit
++ * @entity: scheduler entity
+ *
+ * Note: To guarantee that the order of insertion to queue matches
+ * the job's fence sequence number this function should be
+@@ -500,6 +574,13 @@ static void drm_sched_job_timedout(struct work_struct *work)
+ job->sched->ops->timedout_job(job);
+ }
+
++/**
++ * drm_sched_hw_job_reset - stop the scheduler if it contains the bad job
++ *
++ * @sched: scheduler instance
++ * @bad: bad scheduler job
++ *
++ */
+ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
+ {
+ struct drm_sched_job *s_job;
+@@ -544,6 +625,12 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
+ }
+ EXPORT_SYMBOL(drm_sched_hw_job_reset);
+
++/**
++ * drm_sched_job_recovery - recover jobs after a reset
++ *
++ * @sched: scheduler instance
++ *
++ */
+ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
+ {
+ struct drm_sched_job *s_job, *tmp;
+@@ -593,10 +680,17 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
+ EXPORT_SYMBOL(drm_sched_job_recovery);
+
+ /**
+- * Init a sched_job with basic field
++ * drm_sched_job_init - init a scheduler job
+ *
+- * Note: Refer to drm_sched_entity_push_job documentation
++ * @job: scheduler job to init
++ * @sched: scheduler instance
++ * @entity: scheduler entity to use
++ * @owner: job owner for debugging
++ *
++ * Refer to drm_sched_entity_push_job() documentation
+ * for locking considerations.
++ *
++ * Returns 0 for success, negative error code otherwise.
+ */
+ int drm_sched_job_init(struct drm_sched_job *job,
+ struct drm_gpu_scheduler *sched,
+@@ -620,7 +714,11 @@ int drm_sched_job_init(struct drm_sched_job *job,
+ EXPORT_SYMBOL(drm_sched_job_init);
+
+ /**
+- * Return ture if we can push more jobs to the hw.
++ * drm_sched_ready - is the scheduler ready
++ *
++ * @sched: scheduler instance
++ *
++ * Return true if we can push more jobs to the hw, otherwise false.
+ */
+ static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
+ {
+@@ -629,7 +727,10 @@ static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
+ }
+
+ /**
+- * Wake up the scheduler when it is ready
++ * drm_sched_wakeup - Wake up the scheduler when it is ready
++ *
++ * @sched: scheduler instance
++ *
+ */
+ static void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
+ {
+@@ -638,8 +739,12 @@ static void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
+ }
+
+ /**
+- * Select next entity to process
+-*/
++ * drm_sched_select_entity - Select next entity to process
++ *
++ * @sched: scheduler instance
++ *
++ * Returns the entity to process or NULL if none are found.
++ */
+ static struct drm_sched_entity *
+ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
+ {
+@@ -659,6 +764,14 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
+ return entity;
+ }
+
++/**
++ * drm_sched_process_job - process a job
++ *
++ * @f: fence
++ * @cb: fence callbacks
++ *
++ * Called after job has finished execution.
++ */
+ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
+ {
+ struct drm_sched_fence *s_fence =
+@@ -674,6 +787,13 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
+ wake_up_interruptible(&sched->wake_up_worker);
+ }
+
++/**
++ * drm_sched_blocked - check if the scheduler is blocked
++ *
++ * @sched: scheduler instance
++ *
++ * Returns true if blocked, otherwise false.
++*/
+ static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
+ {
+ if (kthread_should_park()) {
+@@ -684,6 +804,13 @@ static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
+ return false;
+ }
+
++/**
++ * drm_sched_main - main scheduler thread
++ *
++ * @param: scheduler instance
++ *
++ * Returns 0.
++ */
+ static int drm_sched_main(void *param)
+ {
+ struct sched_param sparam = {.sched_priority = 1};
+@@ -738,15 +865,17 @@ static int drm_sched_main(void *param)
+ }
+
+ /**
+- * Init a gpu scheduler instance
++ * drm_sched_init - Init a gpu scheduler instance
+ *
+- * @sched The pointer to the scheduler
+- * @ops The backend operations for this scheduler.
+- * @hw_submissions Number of hw submissions to do.
+- * @name Name used for debugging
++ * @sched: scheduler instance
++ * @ops: backend operations for this scheduler
++ * @hw_submission: number of hw submissions that can be in flight
++ * @hang_limit: number of times to allow a job to hang before dropping it
++ * @timeout: timeout value in jiffies for the scheduler
++ * @name: name used for debugging
+ *
+ * Return 0 on success, otherwise error code.
+-*/
++ */
+ int drm_sched_init(struct drm_gpu_scheduler *sched,
+ const struct drm_sched_backend_ops *ops,
+ unsigned hw_submission,
+@@ -782,9 +911,11 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
+ EXPORT_SYMBOL(drm_sched_init);
+
+ /**
+- * Destroy a gpu scheduler
++ * drm_sched_fini - Destroy a gpu scheduler
++ *
++ * @sched: scheduler instance
+ *
+- * @sched The pointer to the scheduler
++ * Tears down and cleans up the scheduler.
+ */
+ void drm_sched_fini(struct drm_gpu_scheduler *sched)
+ {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4563-drm-scheduler-fix-a-corner-case-in-dependency-optimi.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4563-drm-scheduler-fix-a-corner-case-in-dependency-optimi.patch
new file mode 100644
index 00000000..9d637a07
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4563-drm-scheduler-fix-a-corner-case-in-dependency-optimi.patch
@@ -0,0 +1,42 @@
+From 1fc0a28e65a535f2dffe6d97caed348a139dcb31 Mon Sep 17 00:00:00 2001
+From: Nayan Deshmukh <nayan26deshmukh@gmail.com>
+Date: Fri, 25 May 2018 10:15:46 +0530
+Subject: [PATCH 4563/5725] drm/scheduler: fix a corner case in dependency
+ optimization
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+When checking for a dependency fence for belonging to the same entity
+compare it with scheduled as well finished fence. Earlier we were only
+comparing it with the scheduled fence.
+
+Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/scheduler/gpu_scheduler.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+index 1edc323..a403e47 100644
+--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
++++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+@@ -421,8 +421,13 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
+ struct dma_fence * fence = entity->dependency;
+ struct drm_sched_fence *s_fence;
+
+- if (fence->context == entity->fence_context) {
+- /* We can ignore fences from ourself */
++ if (fence->context == entity->fence_context ||
++ fence->context == entity->fence_context + 1) {
++ /*
++ * Fence is a scheduled/finished fence from a job
++ * which belongs to the same entity, we can ignore
++ * fences from ourself
++ */
+ dma_fence_put(entity->dependency);
+ return false;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4564-drm-amdgpu-remove-unnecessary-scheduler-entity-for-V.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4564-drm-amdgpu-remove-unnecessary-scheduler-entity-for-V.patch
new file mode 100644
index 00000000..c8e1df0c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4564-drm-amdgpu-remove-unnecessary-scheduler-entity-for-V.patch
@@ -0,0 +1,150 @@
+From 8c157e3b17b014c53856b9f949540a16eceb337c Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Fri, 25 May 2018 10:53:39 -0400
+Subject: [PATCH 4564/5725] drm/amdgpu: remove unnecessary scheduler entity for
+ VCN
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+It should be stateless, and no need for scheduler to take care specially.
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 51 +++++++--------------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 2 --
+ 2 files changed, 10 insertions(+), 43 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 21425669..41a189d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -49,8 +49,6 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
+
+ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+ {
+- struct amdgpu_ring *ring;
+- struct drm_sched_rq *rq;
+ unsigned long bo_size;
+ const char *fw_name;
+ const struct common_firmware_header *hdr;
+@@ -103,24 +101,6 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+ return r;
+ }
+
+- ring = &adev->vcn.ring_dec;
+- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+- r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
+- rq, NULL);
+- if (r != 0) {
+- DRM_ERROR("Failed setting up VCN dec run queue.\n");
+- return r;
+- }
+-
+- ring = &adev->vcn.ring_enc[0];
+- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+- r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
+- rq, NULL);
+- if (r != 0) {
+- DRM_ERROR("Failed setting up VCN enc run queue.\n");
+- return r;
+- }
+-
+ return 0;
+ }
+
+@@ -130,10 +110,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
+
+ kfree(adev->vcn.saved_bo);
+
+- drm_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec);
+-
+- drm_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc);
+-
+ amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
+ &adev->vcn.gpu_addr,
+ (void **)&adev->vcn.cpu_addr);
+@@ -281,7 +257,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
+ }
+
+ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
+- struct amdgpu_bo *bo, bool direct,
++ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
+ {
+ struct amdgpu_device *adev = ring->adev;
+@@ -309,19 +285,12 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
+ }
+ ib->length_dw = 16;
+
+- if (direct) {
+- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+- job->fence = dma_fence_get(f);
+- if (r)
+- goto err_free;
++ r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
++ job->fence = dma_fence_get(f);
++ if (r)
++ goto err_free;
+
+- amdgpu_job_free(job);
+- } else {
+- r = amdgpu_job_submit(job, ring, &adev->vcn.entity_dec,
+- AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+- if (r)
+- goto err_free;
+- }
++ amdgpu_job_free(job);
+
+ amdgpu_bo_fence(bo, f, false);
+ amdgpu_bo_unreserve(bo);
+@@ -374,11 +343,11 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
+ for (i = 14; i < 1024; ++i)
+ msg[i] = cpu_to_le32(0x0);
+
+- return amdgpu_vcn_dec_send_msg(ring, bo, true, fence);
++ return amdgpu_vcn_dec_send_msg(ring, bo, fence);
+ }
+
+ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+- bool direct, struct dma_fence **fence)
++ struct dma_fence **fence)
+ {
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_bo *bo = NULL;
+@@ -401,7 +370,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
+ for (i = 6; i < 1024; ++i)
+ msg[i] = cpu_to_le32(0x0);
+
+- return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence);
++ return amdgpu_vcn_dec_send_msg(ring, bo, fence);
+ }
+
+ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+@@ -415,7 +384,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ goto error;
+ }
+
+- r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, true, &fence);
++ r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+ goto error;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+index 181e6af..773010b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+@@ -67,8 +67,6 @@ struct amdgpu_vcn {
+ struct amdgpu_ring ring_dec;
+ struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
+ struct amdgpu_irq_src irq;
+- struct drm_sched_entity entity_dec;
+- struct drm_sched_entity entity_enc;
+ unsigned num_enc_rings;
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4565-drm-amd-pp-Add-cases-for-getting-phys-and-disp-clks-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4565-drm-amd-pp-Add-cases-for-getting-phys-and-disp-clks-.patch
new file mode 100644
index 00000000..8422ca3f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4565-drm-amd-pp-Add-cases-for-getting-phys-and-disp-clks-.patch
@@ -0,0 +1,35 @@
+From 880c25251b0135c0b3a6f444bd7227b644d32919 Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Mon, 28 May 2018 11:22:17 -0400
+Subject: [PATCH 4565/5725] drm/amd/pp: Add cases for getting phys and disp
+ clks for SMU10
+
+Add case options to retrieve either physical or display clocks with
+voltage from SMU controller that are needed by display driver.
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+index 2df791c..73c2e43 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+@@ -1000,6 +1000,12 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
+ case amd_pp_soc_clock:
+ pclk_vol_table = pinfo->vdd_dep_on_socclk;
+ break;
++ case amd_pp_disp_clock:
++ pclk_vol_table = pinfo->vdd_dep_on_dispclk;
++ break;
++ case amd_pp_phy_clock:
++ pclk_vol_table = pinfo->vdd_dep_on_phyclk;
++ break;
+ default:
+ return -EINVAL;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4566-drm-amdgpu-Use-GTT-for-dumb-buffer-if-sg-display-ena.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4566-drm-amdgpu-Use-GTT-for-dumb-buffer-if-sg-display-ena.patch
new file mode 100644
index 00000000..cfe608fe
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4566-drm-amdgpu-Use-GTT-for-dumb-buffer-if-sg-display-ena.patch
@@ -0,0 +1,60 @@
+From 51a0c473e5a12afd8529c2fe0b11625a174aa69a Mon Sep 17 00:00:00 2001
+From: Deepak Sharma <Deepak.Sharma@amd.com>
+Date: Tue, 22 May 2018 15:31:23 -0700
+Subject: [PATCH 4566/5725] drm/amdgpu: Use GTT for dumb buffer if sg display
+ enabled (v2)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+When vram size <= THRESHOLD(256M) lets use GTT for dumb buffer
+allocation. As SG will be enabled with vram size <= 256M
+scan out will not be an issue.
+
+v2: Use amdgpu_display_supported_domains to get supported domain.
+
+Signed-off-by: Deepak Sharma <Deepak.Sharma@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+index 6c41cf9..2f4a6a7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+@@ -30,6 +30,7 @@
+ #include <drm/drmP.h>
+ #include <drm/amdgpu_drm.h>
+ #include "amdgpu.h"
++#include "amdgpu_display.h"
+
+ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
+ {
+@@ -900,15 +901,20 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
+ struct amdgpu_device *adev = dev->dev_private;
+ struct drm_gem_object *gobj;
+ uint32_t handle;
++ u32 domain = amdgpu_display_supported_domains(adev);
+ int r;
+
+ args->pitch = amdgpu_align_pitch(adev, args->width,
+ DIV_ROUND_UP(args->bpp, 8), 0);
+ args->size = (u64)args->pitch * args->height;
+ args->size = ALIGN(args->size, PAGE_SIZE);
++ if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
++ domain = AMDGPU_GEM_DOMAIN_VRAM;
++ if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
++ domain = AMDGPU_GEM_DOMAIN_GTT;
++ }
+
+- r = amdgpu_gem_object_create(adev, args->size, 0,
+- AMDGPU_GEM_DOMAIN_VRAM,
++ r = amdgpu_gem_object_create(adev, args->size, 0, domain,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ false, NULL, &gobj);
+ if (r)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4567-drm-amdgpu-Add-helper-function-to-get-buffer-domain.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4567-drm-amdgpu-Add-helper-function-to-get-buffer-domain.patch
new file mode 100644
index 00000000..ad450fce
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4567-drm-amdgpu-Add-helper-function-to-get-buffer-domain.patch
@@ -0,0 +1,97 @@
+From 1c9feef9832189d2eff784341beaf6646ae55d85 Mon Sep 17 00:00:00 2001
+From: Deepak Sharma <Deepak.Sharma@amd.com>
+Date: Fri, 25 May 2018 17:12:29 -0700
+Subject: [PATCH 4567/5725] drm/amdgpu: Add helper function to get buffer
+ domain
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Move logic of getting supported domain to a helper
+function
+
+Signed-off-by: Deepak Sharma <Deepak.Sharma@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 10 +++-------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 17 ++++++++++++-----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 3 ++-
+ 3 files changed, 17 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+index 2f4a6a7..275c316 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+@@ -901,19 +901,15 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
+ struct amdgpu_device *adev = dev->dev_private;
+ struct drm_gem_object *gobj;
+ uint32_t handle;
+- u32 domain = amdgpu_display_supported_domains(adev);
++ u32 domain;
+ int r;
+
+ args->pitch = amdgpu_align_pitch(adev, args->width,
+ DIV_ROUND_UP(args->bpp, 8), 0);
+ args->size = (u64)args->pitch * args->height;
+ args->size = ALIGN(args->size, PAGE_SIZE);
+- if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
+- domain = AMDGPU_GEM_DOMAIN_VRAM;
+- if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
+- domain = AMDGPU_GEM_DOMAIN_GTT;
+- }
+-
++ domain = amdgpu_bo_get_preferred_pin_domain(adev,
++ amdgpu_display_supported_domains(adev));
+ r = amdgpu_gem_object_create(adev, args->size, 0, domain,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ false, NULL, &gobj);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 719423e..526f6e8 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -743,11 +743,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
+ /* This assumes only APU display buffers are pinned with (VRAM|GTT).
+ * See function amdgpu_display_supported_domains()
+ */
+- if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
+- domain = AMDGPU_GEM_DOMAIN_VRAM;
+- if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
+- domain = AMDGPU_GEM_DOMAIN_GTT;
+- }
++ domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
+
+ if (bo->pin_count) {
+ uint32_t mem_type = bo->tbo.mem.mem_type;
+@@ -1107,3 +1103,14 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
+
+ return bo->tbo.offset;
+ }
++
++uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
++ uint32_t domain)
++{
++ if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
++ domain = AMDGPU_GEM_DOMAIN_VRAM;
++ if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
++ domain = AMDGPU_GEM_DOMAIN_GTT;
++ }
++ return domain;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+index 2c79c25..40e7b42 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+@@ -300,7 +300,8 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
+ struct reservation_object *resv,
+ struct dma_fence **fence,
+ bool direct);
+-
++uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
++ uint32_t domain);
+
+ /*
+ * sub allocation
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4568-drm-amdgpu-To-get-gds-gws-and-oa-from-adev-gds.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4568-drm-amdgpu-To-get-gds-gws-and-oa-from-adev-gds.patch
new file mode 100644
index 00000000..f0eb732c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4568-drm-amdgpu-To-get-gds-gws-and-oa-from-adev-gds.patch
@@ -0,0 +1,86 @@
+From c371c240620adfb591862c8b0e8f001270e14d4a Mon Sep 17 00:00:00 2001
+From: Emily Deng <Emily.Deng@amd.com>
+Date: Wed, 30 May 2018 10:04:25 +0800
+Subject: [PATCH 4568/5725] drm/amdgpu: To get gds, gws and oa from adev->gds
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+As now enabled per vm bo feature, the user mode driver won't supply the
+bo_list generally, for this case, the gdb_base, gds_size, gws_base, gws_size and
+oa_base, oa_size won't be set.
+
+Signed-off-by: Emily Deng <Emily.Deng@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 38 ++++++++++++++++++++--------------
+ 1 file changed, 23 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 9aa47bd..8ea1f81 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -522,6 +522,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+ struct amdgpu_bo_list_entry *e;
+ struct list_head duplicates;
+ unsigned i, tries = 10;
++ struct amdgpu_bo *gds;
++ struct amdgpu_bo *gws;
++ struct amdgpu_bo *oa;
+ int r;
+
+ INIT_LIST_HEAD(&p->validated);
+@@ -653,10 +656,11 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+
+ amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
+ p->bytes_moved_vis);
++
+ if (p->bo_list) {
+- struct amdgpu_bo *gds = p->bo_list->gds_obj;
+- struct amdgpu_bo *gws = p->bo_list->gws_obj;
+- struct amdgpu_bo *oa = p->bo_list->oa_obj;
++ gds = p->bo_list->gds_obj;
++ gws = p->bo_list->gws_obj;
++ oa = p->bo_list->oa_obj;
+ struct amdgpu_vm *vm = &fpriv->vm;
+ unsigned i;
+
+@@ -665,19 +669,23 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+
+ p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
+ }
++ } else {
++ gds = p->adev->gds.gds_gfx_bo;
++ gws = p->adev->gds.gws_gfx_bo;
++ oa = p->adev->gds.oa_gfx_bo;
++ }
+
+- if (gds) {
+- p->job->gds_base = amdgpu_bo_gpu_offset(gds);
+- p->job->gds_size = amdgpu_bo_size(gds);
+- }
+- if (gws) {
+- p->job->gws_base = amdgpu_bo_gpu_offset(gws);
+- p->job->gws_size = amdgpu_bo_size(gws);
+- }
+- if (oa) {
+- p->job->oa_base = amdgpu_bo_gpu_offset(oa);
+- p->job->oa_size = amdgpu_bo_size(oa);
+- }
++ if (gds) {
++ p->job->gds_base = amdgpu_bo_gpu_offset(gds);
++ p->job->gds_size = amdgpu_bo_size(gds);
++ }
++ if (gws) {
++ p->job->gws_base = amdgpu_bo_gpu_offset(gws);
++ p->job->gws_size = amdgpu_bo_size(gws);
++ }
++ if (oa) {
++ p->job->oa_base = amdgpu_bo_gpu_offset(oa);
++ p->job->oa_size = amdgpu_bo_size(oa);
+ }
+
+ if (!r && p->uf_entry.robj) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4569-drm-amdgpu-correct-SMU11-SYSPLL0-clock-id-values.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4569-drm-amdgpu-correct-SMU11-SYSPLL0-clock-id-values.patch
new file mode 100644
index 00000000..7c254a82
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4569-drm-amdgpu-correct-SMU11-SYSPLL0-clock-id-values.patch
@@ -0,0 +1,44 @@
+From 4d743fca07bda579cb8d41aae3cc8511acfa80f9 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 28 May 2018 08:53:03 +0800
+Subject: [PATCH 4569/5725] drm/amdgpu: correct SMU11 SYSPLL0 clock id values
+
+The SMU11 SYSPLL0 clock ids were assigned wrong values.
+
+Change-Id: I8dfafcce9e4ed6dabb7025a5a822d1135de6cb8a
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+---
+ drivers/gpu/drm/amd/include/atomfirmware.h | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
+index c6c1666..092d800 100644
+--- a/drivers/gpu/drm/amd/include/atomfirmware.h
++++ b/drivers/gpu/drm/amd/include/atomfirmware.h
+@@ -2026,17 +2026,15 @@ enum atom_smu11_syspll_id {
+ SMU11_SYSPLL3_1_ID = 6,
+ };
+
+-
+ enum atom_smu11_syspll0_clock_id {
+- SMU11_SYSPLL0_SOCCLK_ID = 0, // SOCCLK
+- SMU11_SYSPLL0_MP0CLK_ID = 1, // MP0CLK
+- SMU11_SYSPLL0_DCLK_ID = 2, // DCLK
+- SMU11_SYSPLL0_VCLK_ID = 3, // VCLK
+- SMU11_SYSPLL0_ECLK_ID = 4, // ECLK
++ SMU11_SYSPLL0_ECLK_ID = 0, // ECLK
++ SMU11_SYSPLL0_SOCCLK_ID = 1, // SOCCLK
++ SMU11_SYSPLL0_MP0CLK_ID = 2, // MP0CLK
++ SMU11_SYSPLL0_DCLK_ID = 3, // DCLK
++ SMU11_SYSPLL0_VCLK_ID = 4, // VCLK
+ SMU11_SYSPLL0_DCEFCLK_ID = 5, // DCEFCLK
+ };
+
+-
+ enum atom_smu11_syspll1_0_clock_id {
+ SMU11_SYSPLL1_0_UCLKA_ID = 0, // UCLK_a
+ };
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4570-drm-amd-powerplay-bug-fixs-for-getsmuclockinfo.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4570-drm-amd-powerplay-bug-fixs-for-getsmuclockinfo.patch
new file mode 100644
index 00000000..60f68872
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4570-drm-amd-powerplay-bug-fixs-for-getsmuclockinfo.patch
@@ -0,0 +1,41 @@
+From 6d299cd3423fe835d8536db71ced1a2b0b5348a2 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 28 May 2018 08:55:09 +0800
+Subject: [PATCH 4570/5725] drm/amd/powerplay: bug fixs for getsmuclockinfo
+
+The .syspll_id and .dfsdid are not initialzed correctly. And
+le32_to_cpu transfer is needed on the output.
+
+Change-Id: I491e70b276554f38e1a296970892347474814e75
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+index c97b0e5..5325661 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+@@ -496,7 +496,9 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKI
+ uint32_t ix;
+
+ parameters.clk_id = id;
++ parameters.syspll_id = 0;
+ parameters.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
++ parameters.dfsdid = 0;
+
+ ix = GetIndexIntoMasterCmdTable(getsmuclockinfo);
+
+@@ -505,7 +507,7 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKI
+ return -EINVAL;
+
+ output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&parameters;
+- *frequency = output->atom_smu_outputclkfreq.smu_clock_freq_hz / 10000;
++ *frequency = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
+
+ return 0;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4571-drm-amdgpu-typo-fix-for-vega20-cg-flags.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4571-drm-amdgpu-typo-fix-for-vega20-cg-flags.patch
new file mode 100644
index 00000000..20361a9d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4571-drm-amdgpu-typo-fix-for-vega20-cg-flags.patch
@@ -0,0 +1,31 @@
+From 89443406b49146529f203d3e4598d406689f3081 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 28 May 2018 09:22:09 +0800
+Subject: [PATCH 4571/5725] drm/amdgpu: typo fix for vega20 cg flags
+
+The AMD_CG_SUPPORT_HDP_LS was wrongly written as
+AMD_CG_SUPPORT_BIF_LS.
+
+Change-Id: If807cc3e9e9e10f8b132095c0a7b537be6285107
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 9a7a85d..83f2717 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -685,7 +685,7 @@ static int soc15_common_early_init(void *handle)
+ AMD_CG_SUPPORT_BIF_MGCG |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_HDP_MGCG |
+- AMD_CG_SUPPORT_BIF_LS |
++ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_ROM_MGCG |
+ AMD_CG_SUPPORT_VCE_MGCG |
+ AMD_CG_SUPPORT_UVD_MGCG;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4572-drm-amdgpu-fix-ISO-C90-forbids-mixed-declarations.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4572-drm-amdgpu-fix-ISO-C90-forbids-mixed-declarations.patch
new file mode 100644
index 00000000..ed1b6aa3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4572-drm-amdgpu-fix-ISO-C90-forbids-mixed-declarations.patch
@@ -0,0 +1,44 @@
+From 37a7089d56eda1fb6b2d569927d57bb209d236a3 Mon Sep 17 00:00:00 2001
+From: Chunming Zhou <david1.zhou@amd.com>
+Date: Wed, 30 May 2018 11:07:34 +0800
+Subject: [PATCH 4572/5725] drm/amdgpu: fix 'ISO C90 forbids mixed
+ declarations'
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Fixing a compiler warning:
+drivers/gpu/drm/amd/amdgpu//amdgpu_cs.c:663:3: warning: ISO C90 forbids mixed declarations and code [-Wdeclaration-after-statement]
+ struct amdgpu_vm *vm = &fpriv->vm;
+ ^~~~~~
+
+Change-Id: I412f5783e2839c53841e6ab665f939236bdc5bf1
+Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 8ea1f81..10e8dbb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -658,12 +658,12 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+ p->bytes_moved_vis);
+
+ if (p->bo_list) {
+- gds = p->bo_list->gds_obj;
+- gws = p->bo_list->gws_obj;
+- oa = p->bo_list->oa_obj;
+ struct amdgpu_vm *vm = &fpriv->vm;
+ unsigned i;
+
++ gds = p->bo_list->gds_obj;
++ gws = p->bo_list->gws_obj;
++ oa = p->bo_list->oa_obj;
+ for (i = 0; i < p->bo_list->num_entries; i++) {
+ struct amdgpu_bo *bo = p->bo_list->array[i].robj;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4573-drm-amdgpu-gds-bo-must-not-be-per-vm-bo.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4573-drm-amdgpu-gds-bo-must-not-be-per-vm-bo.patch
new file mode 100644
index 00000000..2d6808b3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4573-drm-amdgpu-gds-bo-must-not-be-per-vm-bo.patch
@@ -0,0 +1,40 @@
+From 5373139da9ccdecada62d15ff90f6b10851f933c Mon Sep 17 00:00:00 2001
+From: Chunming Zhou <david1.zhou@amd.com>
+Date: Wed, 30 May 2018 11:12:08 +0800
+Subject: [PATCH 4573/5725] drm/amdgpu: gds bo must not be per-vm-bo
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+In per-vm-bo case, there could be no bo list.
+But gds bo created from user space must be passed to bo list.
+So adding a check to prevent to creat gds bo as per-vm-bo.
+
+Change-Id: Idfa58c40447df0db2883413f9f7ccf56b47579f5
+Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+index 275c316..0e022b2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+@@ -269,6 +269,13 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
+ /* create a gem object to contain this object in */
+ if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
+ AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
++ if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
++ /* if gds bo is created from user space, it must be
++ * passed to bo list
++ */
++ DRM_ERROR("GDS bo cannot be per-vm-bo\n");
++ return -EINVAL;
++ }
+ flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+ if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
+ size = size << AMDGPU_GDS_SHIFT;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4574-drm-amd-pp-Connect-display_clock_voltage_request-to-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4574-drm-amd-pp-Connect-display_clock_voltage_request-to-.patch
new file mode 100644
index 00000000..f51c069a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4574-drm-amd-pp-Connect-display_clock_voltage_request-to-.patch
@@ -0,0 +1,115 @@
+From be6c44933d31fe66ad96d2291c917a8219e556af Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Tue, 29 May 2018 17:44:36 -0400
+Subject: [PATCH 4574/5725] drm/amd/pp: Connect display_clock_voltage_request
+ to a function pointer
+
+Get rid of an empty dublicate of smu10_display_clock_voltage_request
+
+Add display_clock_voltage_request to smu10 functions struct so it
+can be called from outside the class and connect the pointer to
+the function.
+
+That way Display driver can finally apply clock voltage requests
+when needed.
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 64 +++++++++++------------
+ 1 file changed, 31 insertions(+), 33 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+index 73c2e43..d4bc83e 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+@@ -53,8 +53,37 @@ static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic;
+
+
+ static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
+- struct pp_display_clock_request *clock_req);
++ struct pp_display_clock_request *clock_req)
++{
++ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
++ enum amd_pp_clock_type clk_type = clock_req->clock_type;
++ uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
++ PPSMC_Msg msg;
+
++ switch (clk_type) {
++ case amd_pp_dcf_clock:
++ if (clk_freq == smu10_data->dcf_actual_hard_min_freq)
++ return 0;
++ msg = PPSMC_MSG_SetHardMinDcefclkByFreq;
++ smu10_data->dcf_actual_hard_min_freq = clk_freq;
++ break;
++ case amd_pp_soc_clock:
++ msg = PPSMC_MSG_SetHardMinSocclkByFreq;
++ break;
++ case amd_pp_f_clock:
++ if (clk_freq == smu10_data->f_actual_hard_min_freq)
++ return 0;
++ smu10_data->f_actual_hard_min_freq = clk_freq;
++ msg = PPSMC_MSG_SetHardMinFclkByFreq;
++ break;
++ default:
++ pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
++ return -EINVAL;
++ }
++ smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq);
++
++ return 0;
++}
+
+ static struct smu10_power_state *cast_smu10_ps(struct pp_hw_power_state *hw_ps)
+ {
+@@ -1023,39 +1052,7 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
+ return 0;
+ }
+
+-static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
+- struct pp_display_clock_request *clock_req)
+-{
+- struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+- enum amd_pp_clock_type clk_type = clock_req->clock_type;
+- uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
+- PPSMC_Msg msg;
+-
+- switch (clk_type) {
+- case amd_pp_dcf_clock:
+- if (clk_freq == smu10_data->dcf_actual_hard_min_freq)
+- return 0;
+- msg = PPSMC_MSG_SetHardMinDcefclkByFreq;
+- smu10_data->dcf_actual_hard_min_freq = clk_freq;
+- break;
+- case amd_pp_soc_clock:
+- msg = PPSMC_MSG_SetHardMinSocclkByFreq;
+- break;
+- case amd_pp_f_clock:
+- if (clk_freq == smu10_data->f_actual_hard_min_freq)
+- return 0;
+- smu10_data->f_actual_hard_min_freq = clk_freq;
+- msg = PPSMC_MSG_SetHardMinFclkByFreq;
+- break;
+- default:
+- pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
+- return -EINVAL;
+- }
+
+- smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq);
+-
+- return 0;
+-}
+
+ static int smu10_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
+ {
+@@ -1188,6 +1185,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
+ .set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu,
+ .smus_notify_pwe = smu10_smus_notify_pwe,
+ .gfx_off_control = smu10_gfx_off_control,
++ .display_clock_voltage_request = smu10_display_clock_voltage_request,
+ };
+
+ int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4575-drm-amd-pp-Allow-underclocking-when-od-table-is-empt.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4575-drm-amd-pp-Allow-underclocking-when-od-table-is-empt.patch
new file mode 100644
index 00000000..eb05e16f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4575-drm-amd-pp-Allow-underclocking-when-od-table-is-empt.patch
@@ -0,0 +1,121 @@
+From 81a6e77c8965eabd30a4e416a30edbd3692afea9 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Wed, 30 May 2018 16:52:22 +0800
+Subject: [PATCH 4575/5725] drm/amd/pp: Allow underclocking when od table is
+ empty in vbios
+
+if max od engine clock limit and memory clock limit are not set
+in vbios. driver will allow underclocking instand of disable od feature
+completely.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c | 6 ------
+ drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c | 6 ------
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 5 ++++-
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 6 ++++++
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c | 6 ------
+ 5 files changed, 10 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+index f0d48b1..35bd987 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+@@ -870,12 +870,6 @@ static int init_over_drive_limits(
+ hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
+ hwmgr->platform_descriptor.overdriveVDDCStep = 0;
+
+- if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0 \
+- || hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) {
+- hwmgr->od_enabled = false;
+- pr_debug("OverDrive feature not support by VBIOS\n");
+- }
+-
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+index ce64dfa..925e171 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+@@ -1074,12 +1074,6 @@ static int init_overdrive_limits(struct pp_hwmgr *hwmgr,
+ powerplay_table,
+ (const ATOM_FIRMWARE_INFO_V2_1 *)fw_info);
+
+- if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0
+- && hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) {
+- hwmgr->od_enabled = false;
+- pr_debug("OverDrive feature not support by VBIOS\n");
+- }
+-
+ return result;
+ }
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index f697a56..46d6368 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -791,7 +791,8 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
+ data->dpm_table.sclk_table.count++;
+ }
+ }
+-
++ if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0)
++ hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk;
+ /* Initialize Mclk DPM table based on allow Mclk values */
+ data->dpm_table.mclk_table.count = 0;
+ for (i = 0; i < dep_mclk_table->count; i++) {
+@@ -806,6 +807,8 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
+ }
+ }
+
++ if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
++ hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk;
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+index 0ad2ca3..896c10b 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+@@ -1311,6 +1311,9 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+ vega10_setup_default_single_dpm_table(hwmgr,
+ dpm_table,
+ dep_gfx_table);
++ if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0)
++ hwmgr->platform_descriptor.overdriveLimit.engineClock =
++ dpm_table->dpm_levels[dpm_table->count-1].value;
+ vega10_init_dpm_state(&(dpm_table->dpm_state));
+
+ /* Initialize Mclk DPM table based on allow Mclk values */
+@@ -1319,6 +1322,9 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+ vega10_setup_default_single_dpm_table(hwmgr,
+ dpm_table,
+ dep_mclk_table);
++ if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
++ hwmgr->platform_descriptor.overdriveLimit.memoryClock =
++ dpm_table->dpm_levels[dpm_table->count-1].value;
+ vega10_init_dpm_state(&(dpm_table->dpm_state));
+
+ data->dpm_table.eclk_table.count = 0;
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+index 0768d25..16b1a9c 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+@@ -267,12 +267,6 @@ static int init_over_drive_limits(
+ hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
+ hwmgr->platform_descriptor.overdriveVDDCStep = 0;
+
+- if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0 ||
+- hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) {
+- hwmgr->od_enabled = false;
+- pr_debug("OverDrive feature not support by VBIOS\n");
+- }
+-
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4576-drm-gfx9-Update-gc-goldensetting-for-vega20.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4576-drm-gfx9-Update-gc-goldensetting-for-vega20.patch
new file mode 100644
index 00000000..ba17498a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4576-drm-gfx9-Update-gc-goldensetting-for-vega20.patch
@@ -0,0 +1,30 @@
+From 8f0f9d21c529385b3f2bf3e85df28f920d7204fb Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Thu, 24 May 2018 15:36:57 +0800
+Subject: [PATCH 4576/5725] drm/gfx9: Update gc goldensetting for vega20.
+
+Update mmCB_DCC_CONFIG register goldensetting.
+
+Change-Id: I45082847c55cc49b1f829f939a0aaac3b0024fb0
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index dcb916b..ea348fe 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -111,6 +111,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
+
+ static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
+ {
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4577-drm-amdgpu-Fix-NULL-pointer-when-load-kfd-driver-wit.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4577-drm-amdgpu-Fix-NULL-pointer-when-load-kfd-driver-wit.patch
new file mode 100644
index 00000000..0b6e833f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4577-drm-amdgpu-Fix-NULL-pointer-when-load-kfd-driver-wit.patch
@@ -0,0 +1,62 @@
+From 679118a682b3ef96eb0e90cc2aad3d120a59dae3 Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Wed, 9 Jan 2019 19:39:05 +0530
+Subject: [PATCH 4577/5725] drm/amdgpu: Fix NULL pointer when load kfd driver
+ with PP block is disabled
+
+When PP block is disabled, return a fix value(100M) for mclk and sclk on
+bare-metal mode. This will cover the emulation mode as well.
+
+Change-Id: If34e3517b6cb6f31e898bbe7921485fbddb79fb9
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 18 +++++++-----------
+ 1 file changed, 7 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index 6a5d236..893c492 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -383,15 +383,12 @@ void get_local_mem_info(struct kgd_dev *kgd,
+ mem_info->local_mem_size_public,
+ mem_info->local_mem_size_private);
+
+- if (amdgpu_emu_mode == 1) {
+- mem_info->mem_clk_max = 100;
+- return;
+- }
+-
+ if (amdgpu_sriov_vf(adev))
+ mem_info->mem_clk_max = adev->clock.default_mclk / 100;
+- else
++ else if (adev->powerplay.pp_funcs)
+ mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
++ else
++ mem_info->mem_clk_max = 100;
+ }
+
+ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
+@@ -408,14 +405,13 @@ uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ /* the sclk is in quantas of 10kHz */
+- if (amdgpu_emu_mode == 1)
+- return 100;
+-
+
+ if (amdgpu_sriov_vf(adev))
+ return adev->clock.default_sclk / 100;
+-
+- return amdgpu_dpm_get_sclk(adev, false) / 100;
++ else if (adev->powerplay.pp_funcs)
++ return amdgpu_dpm_get_sclk(adev, false) / 100;
++ else
++ return 100;
+ }
+
+ void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4578-drm-amdgpu-add-kernel-doc-for-amdgpu_object.c.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4578-drm-amdgpu-add-kernel-doc-for-amdgpu_object.c.patch
new file mode 100644
index 00000000..a84c9d7a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4578-drm-amdgpu-add-kernel-doc-for-amdgpu_object.c.patch
@@ -0,0 +1,480 @@
+From f363b213745fe1ccbb24b11e5bacb04ed41b1a12 Mon Sep 17 00:00:00 2001
+From: Samuel Li <Samuel.Li@amd.com>
+Date: Thu, 17 May 2018 17:58:45 -0400
+Subject: [PATCH 4578/5725] drm/amdgpu: add kernel doc for amdgpu_object.c
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Document the amdgpu buffer object API.
+
+v2: Add a DOC section and some more clarification.
+v3: Add some clarification and fix a spelling.
+
+Suggested-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Samuel Li <Samuel.Li@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+
+Change-Id: I286b181d79cefedd3c134d8f142ca67e23f44b63
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 269 +++++++++++++++++++++++++++++
+ 1 file changed, 269 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 526f6e8..82ab087 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -38,6 +38,19 @@
+ #include "amdgpu_trace.h"
+ #include "amdgpu_amdkfd.h"
+
++/**
++ * DOC: amdgpu_object
++ *
++ * This defines the interfaces to operate on an &amdgpu_bo buffer object which
++ * represents memory used by driver (VRAM, system memory, etc.). The driver
++ * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
++ * to create/destroy/set buffer object which are then managed by the kernel TTM
++ * memory manager.
++ * The interfaces are also used internally by kernel clients, including gfx,
++ * uvd, etc. for kernel managed allocations used by the GPU.
++ *
++*/
++
+ static bool amdgpu_need_backup(struct amdgpu_device *adev)
+ {
+ if (adev->flags & AMD_IS_APU)
+@@ -76,6 +89,15 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+ kfree(bo);
+ }
+
++/**
++ * amdgpu_ttm_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
++ * @bo: buffer object to be checked
++ *
++ * Uses destroy function associated with the object to determine if this is
++ * an &amdgpu_bo.
++ *
++ * Returns true if the object belongs to &amdgpu_bo, false if not.
++ */
+ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
+ {
+ if (bo->destroy == &amdgpu_ttm_bo_destroy)
+@@ -83,6 +105,14 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
+ return false;
+ }
+
++/**
++ * amdgpu_ttm_placement_from_domain - set buffer's placement
++ * @abo: &amdgpu_bo buffer object whose placement is to be set
++ * @domain: requested domain
++ *
++ * Sets buffer's placement according to requested domain and the buffer's
++ * flags.
++ */
+ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
+ {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+@@ -538,6 +568,19 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
+ return r;
+ }
+
++/**
++ * amdgpu_bo_create - create an &amdgpu_bo buffer object
++ * @adev: amdgpu device object
++ * @bp: parameters to be used for the buffer object
++ * @bo_ptr: pointer to the buffer object pointer
++ *
++ * Creates an &amdgpu_bo buffer object; and if requested, also creates a
++ * shadow object.
++ * Shadow object is used to backup the original buffer object, and is always
++ * in GTT.
++ *
++ * Returns 0 for success or a negative error code on failure.
++ */
+ int amdgpu_bo_create(struct amdgpu_device *adev,
+ struct amdgpu_bo_param *bp,
+ struct amdgpu_bo **bo_ptr)
+@@ -567,6 +610,20 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
+ return r;
+ }
+
++/**
++ * amdgpu_bo_backup_to_shadow - Backs up an &amdgpu_bo buffer object
++ * @adev: amdgpu device object
++ * @ring: amdgpu_ring for the engine handling the buffer operations
++ * @bo: &amdgpu_bo buffer to be backed up
++ * @resv: reservation object with embedded fence
++ * @fence: dma_fence associated with the operation
++ * @direct: whether to submit the job directly
++ *
++ * Copies an &amdgpu_bo buffer object to its shadow object.
++ * Not used for now.
++ *
++ * Returns 0 for success or a negative error code on failure.
++ */
+ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_bo *bo,
+@@ -599,6 +656,17 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
+ return r;
+ }
+
++/**
++ * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
++ * @bo: pointer to the buffer object
++ *
++ * Sets placement according to domain; and changes placement and caching
++ * policy of the buffer object according to the placement.
++ * This is used for validating shadow bos. It calls ttm_bo_validate() to
++ * make sure the buffer is resident where it needs to be.
++ *
++ * Returns 0 for success or a negative error code on failure.
++ */
+ int amdgpu_bo_validate(struct amdgpu_bo *bo)
+ {
+ struct ttm_operation_ctx ctx = { false, false };
+@@ -621,6 +689,21 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
+ return r;
+ }
+
++/**
++ * amdgpu_bo_restore_from_shadow - restore an &amdgpu_bo buffer object
++ * @adev: amdgpu device object
++ * @ring: amdgpu_ring for the engine handling the buffer operations
++ * @bo: &amdgpu_bo buffer to be restored
++ * @resv: reservation object with embedded fence
++ * @fence: dma_fence associated with the operation
++ * @direct: whether to submit the job directly
++ *
++ * Copies a buffer object's shadow content back to the object.
++ * This is used for recovering a buffer from its shadow in case of a gpu
++ * reset where vram context may be lost.
++ *
++ * Returns 0 for success or a negative error code on failure.
++ */
+ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_bo *bo,
+@@ -653,6 +736,16 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
+ return r;
+ }
+
++/**
++ * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
++ * @bo: &amdgpu_bo buffer object to be mapped
++ * @ptr: kernel virtual address to be returned
++ *
++ * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
++ * amdgpu_bo_kptr() to get the kernel virtual address.
++ *
++ * Returns 0 for success or a negative error code on failure.
++ */
+ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
+ {
+ void *kptr;
+@@ -683,6 +776,14 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
+ return 0;
+ }
+
++/**
++ * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
++ * @bo: &amdgpu_bo buffer object
++ *
++ * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
++ *
++ * Returns the virtual address of a buffer object area.
++ */
+ void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
+ {
+ bool is_iomem;
+@@ -690,12 +791,26 @@ void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
+ return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
+ }
+
++/**
++ * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
++ * @bo: &amdgpu_bo buffer object to be unmapped
++ *
++ * Unmaps a kernel map set up by amdgpu_bo_kmap().
++ */
+ void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
+ {
+ if (bo->kmap.bo)
+ ttm_bo_kunmap(&bo->kmap);
+ }
+
++/**
++ * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
++ * @bo: &amdgpu_bo buffer object
++ *
++ * References the contained &ttm_buffer_object.
++ *
++ * Returns a refcounted pointer to the &amdgpu_bo buffer object.
++ */
+ struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
+ {
+ if (bo == NULL)
+@@ -705,6 +820,12 @@ struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
+ return bo;
+ }
+
++/**
++ * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
++ * @bo: &amdgpu_bo buffer object
++ *
++ * Unreferences the contained &ttm_buffer_object and clear the pointer
++ */
+ void amdgpu_bo_unref(struct amdgpu_bo **bo)
+ {
+ struct ttm_buffer_object *tbo;
+@@ -718,6 +839,28 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo)
+ *bo = NULL;
+ }
+
++/**
++ * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
++ * @bo: &amdgpu_bo buffer object to be pinned
++ * @domain: domain to be pinned to
++ * @min_offset: the start of requested address range
++ * @max_offset: the end of requested address range
++ * @gpu_addr: GPU offset of the &amdgpu_bo buffer object
++ *
++ * Pins the buffer object according to requested domain and address range. If
++ * the memory is unbound gart memory, binds the pages into gart table. Adjusts
++ * pin_count and pin_size accordingly.
++ *
++ * Pinning means to lock pages in memory along with keeping them at a fixed
++ * offset. It is required when a buffer can not be moved, for example, when
++ * a display buffer is being scanned out.
++ *
++ * Compared with amdgpu_bo_pin(), this function gives more flexibility on
++ * where to pin a buffer if there are specific restrictions on where a buffer
++ * must be located.
++ *
++ * Returns 0 for success or a negative error code on failure.
++ */
+ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
+ u64 min_offset, u64 max_offset,
+ u64 *gpu_addr)
+@@ -812,11 +955,32 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
+ return r;
+ }
+
++/**
++ * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
++ * @bo: &amdgpu_bo buffer object to be pinned
++ * @domain: domain to be pinned to
++ * @gpu_addr: GPU offset of the &amdgpu_bo buffer object
++ *
++ * A simple wrapper to amdgpu_bo_pin_restricted().
++ * Provides a simpler API for buffers that do not have any strict restrictions
++ * on where a buffer must be located.
++ *
++ * Returns 0 for success or a negative error code on failure.
++ */
+ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
+ {
+ return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
+ }
+
++/**
++ * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
++ * @bo: &amdgpu_bo buffer object to be unpinned
++ *
++ * Decreases the pin_count, and clears the flags if pin_count reaches 0.
++ * Changes placement and pin size accordingly.
++ *
++ * Returns 0 for success or a negative error code on failure.
++ */
+ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
+ {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+@@ -852,6 +1016,15 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
+ return r;
+ }
+
++/**
++ * amdgpu_bo_evict_vram - evict VRAM buffers
++ * @adev: amdgpu device object
++ *
++ * Evicts all VRAM buffers on the lru list of the memory type.
++ * Mainly used for evicting vram at suspend time.
++ *
++ * Returns 0 for success or a negative error code on failure.
++ */
+ int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
+ {
+ /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
+@@ -874,6 +1047,14 @@ static const char *amdgpu_vram_names[] = {
+ "DDR4",
+ };
+
++/**
++ * amdgpu_bo_init - initialize memory manager
++ * @adev: amdgpu device object
++ *
++ * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
++ *
++ * Returns 0 for success or a negative error code on failure.
++ */
+ int amdgpu_bo_init(struct amdgpu_device *adev)
+ {
+ /* reserve PAT memory space to WC for VRAM */
+@@ -891,6 +1072,15 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
+ return amdgpu_ttm_init(adev);
+ }
+
++/**
++ * amdgpu_bo_late_init - late init
++ * @adev: amdgpu device object
++ *
++ * Calls amdgpu_ttm_late_init() to free resources used earlier during
++ * initialization.
++ *
++ * Returns 0 for success or a negative error code on failure.
++ */
+ int amdgpu_bo_late_init(struct amdgpu_device *adev)
+ {
+ amdgpu_ttm_late_init(adev);
+@@ -898,6 +1088,12 @@ int amdgpu_bo_late_init(struct amdgpu_device *adev)
+ return 0;
+ }
+
++/**
++ * amdgpu_bo_fini - tear down memory manager
++ * @adev: amdgpu device object
++ *
++ * Reverses amdgpu_bo_init() to tear down memory manager.
++ */
+ void amdgpu_bo_fini(struct amdgpu_device *adev)
+ {
+ amdgpu_ttm_fini(adev);
+@@ -905,12 +1101,31 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
+ arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
+ }
+
++/**
++ * amdgpu_bo_fbdev_mmap - mmap fbdev memory
++ * @bo: &amdgpu_bo buffer object
++ * @vma: vma as input from the fbdev mmap method
++ *
++ * Calls ttm_fbdev_mmap() to mmap fbdev memory if it is backed by a bo.
++ *
++ * Returns 0 for success or a negative error code on failure.
++ */
+ int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
+ struct vm_area_struct *vma)
+ {
+ return ttm_fbdev_mmap(vma, &bo->tbo);
+ }
+
++/**
++ * amdgpu_bo_set_tiling_flags - set tiling flags
++ * @bo: &amdgpu_bo buffer object
++ * @tiling_flags: new flags
++ *
++ * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
++ * kernel driver to set the tiling flags on a buffer.
++ *
++ * Returns 0 for success or a negative error code on failure.
++ */
+ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
+ {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+@@ -923,6 +1138,14 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
+ return 0;
+ }
+
++/**
++ * amdgpu_bo_get_tiling_flags - get tiling flags
++ * @bo: &amdgpu_bo buffer object
++ * @tiling_flags: returned flags
++ *
++ * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
++ * set the tiling flags on a buffer.
++ */
+ void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
+ {
+ lockdep_assert_held(&bo->tbo.resv->lock.base);
+@@ -931,6 +1154,18 @@ void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
+ *tiling_flags = bo->tiling_flags;
+ }
+
++/**
++ * amdgpu_bo_set_metadata - set metadata
++ * @bo: &amdgpu_bo buffer object
++ * @metadata: new metadata
++ * @metadata_size: size of the new metadata
++ * @flags: flags of the new metadata
++ *
++ * Sets buffer object's metadata, its size and flags.
++ * Used via GEM ioctl.
++ *
++ * Returns 0 for success or a negative error code on failure.
++ */
+ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
+ uint32_t metadata_size, uint64_t flags)
+ {
+@@ -960,6 +1195,20 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
+ return 0;
+ }
+
++/**
++ * amdgpu_bo_get_metadata - get metadata
++ * @bo: &amdgpu_bo buffer object
++ * @buffer: returned metadata
++ * @buffer_size: size of the buffer
++ * @metadata_size: size of the returned metadata
++ * @flags: flags of the returned metadata
++ *
++ * Gets buffer object's metadata, its size and flags. buffer_size shall not be
++ * less than metadata_size.
++ * Used via GEM ioctl.
++ *
++ * Returns 0 for success or a negative error code on failure.
++ */
+ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
+ size_t buffer_size, uint32_t *metadata_size,
+ uint64_t *flags)
+@@ -983,6 +1232,16 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
+ return 0;
+ }
+
++/**
++ * amdgpu_bo_move_notify - notification about a memory move
++ * @bo: pointer to a buffer object
++ * @evict: if this move is evicting the buffer from the graphics address space
++ * @new_mem: new information of the bufer object
++ *
++ * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
++ * bookkeeping.
++ * TTM driver callback which is called when ttm moves a buffer.
++ */
+ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
+ bool evict,
+ struct ttm_mem_reg *new_mem)
+@@ -1011,6 +1270,16 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
+ trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
+ }
+
++/**
++ * amdgpu_bo_fault_reserve_notify - notification about a memory fault
++ * @bo: pointer to a buffer object
++ *
++ * Notifies the driver we are taking a fault on this BO and have reserved it,
++ * also performs bookkeeping.
++ * TTM driver callback for dealing with vm faults.
++ *
++ * Returns 0 for success or a negative error code on failure.
++ */
+ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+ {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4579-drm-amdgpu-add-checking-for-sos-version.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4579-drm-amdgpu-add-checking-for-sos-version.patch
new file mode 100644
index 00000000..94fc4e9c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4579-drm-amdgpu-add-checking-for-sos-version.patch
@@ -0,0 +1,77 @@
+From ee9289575e95570caeefc6ffe2c9ef084c27e9b4 Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Sat, 12 May 2018 12:31:12 +0800
+Subject: [PATCH 4579/5725] drm/amdgpu: add checking for sos version
+
+The sos ucode version will be changed to align with the value of
+mmMP0_SMN_C2PMSG_58. Then we add a checking for this. Meanwhile, we have to be
+compatibility backwards. So it adds serveral recent legacy versions as the white
+list for the version checking.
+
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/psp_v3_1.c | 27 ++++++++++++++++++++++++++-
+ 1 file changed, 26 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+index 0c768e3..727071f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+@@ -47,6 +47,8 @@ MODULE_FIRMWARE("amdgpu/vega20_asd.bin");
+
+ #define smnMP1_FIRMWARE_FLAGS 0x3010028
+
++static uint32_t sos_old_versions[] = {1517616, 1510592, 1448594, 1446554};
++
+ static int
+ psp_v3_1_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type)
+ {
+@@ -210,12 +212,31 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
+ return ret;
+ }
+
++static bool psp_v3_1_match_version(struct amdgpu_device *adev, uint32_t ver)
++{
++ int i;
++
++ if (ver == adev->psp.sos_fw_version)
++ return true;
++
++ /*
++ * Double check if the latest four legacy versions.
++ * If yes, it is still the right version.
++ */
++ for (i = 0; i < sizeof(sos_old_versions) / sizeof(uint32_t); i++) {
++ if (sos_old_versions[i] == adev->psp.sos_fw_version)
++ return true;
++ }
++
++ return false;
++}
++
+ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
+ {
+ int ret;
+ unsigned int psp_gfxdrv_command_reg = 0;
+ struct amdgpu_device *adev = psp->adev;
+- uint32_t sol_reg;
++ uint32_t sol_reg, ver;
+
+ /* Check sOS sign of life register to confirm sys driver and sOS
+ * are already been loaded.
+@@ -248,6 +269,10 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
+ 0, true);
+
++ ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
++ if (!psp_v3_1_match_version(adev, ver))
++ DRM_WARN("SOS version doesn't match\n");
++
+ return ret;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4580-drm-amdgpu-fix-the-missed-vcn-fw-version-report.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4580-drm-amdgpu-fix-the-missed-vcn-fw-version-report.patch
new file mode 100644
index 00000000..094617c6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4580-drm-amdgpu-fix-the-missed-vcn-fw-version-report.patch
@@ -0,0 +1,32 @@
+From 20673d1552d89f2fb6ce5fbbb9de7f0c6d4e8210 Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Wed, 23 May 2018 11:18:43 +0800
+Subject: [PATCH 4580/5725] drm/amdgpu: fix the missed vcn fw version report
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+It missed vcn.fw_version setting when init vcn microcode, and it will be used to
+report vcn ucode version via amdgpu_firmware_info sysfs interface.
+
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 41a189d..8f2785a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -82,6 +82,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+ }
+
+ hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
++ adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
+ family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
+ version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
+ version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4581-drm-amdgpu-df-fix-potential-array-out-of-bounds-read.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4581-drm-amdgpu-df-fix-potential-array-out-of-bounds-read.patch
new file mode 100644
index 00000000..0fb87676
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4581-drm-amdgpu-df-fix-potential-array-out-of-bounds-read.patch
@@ -0,0 +1,36 @@
+From b87c84aecc8e2692b7924be30c6723cd9c269fd7 Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.king@canonical.com>
+Date: Wed, 30 May 2018 17:41:44 +0100
+Subject: [PATCH 4581/5725] drm/amdgpu/df: fix potential array out-of-bounds
+ read
+
+The comparison with the number of elements in array df_v3_7_channel_number
+is off-by-one and can produce an array out-of-bounds read if
+fb_channel_number is equal to the number of elements of the array. Fix
+this by changing the comparison to >= instead of >.
+
+Detected by CoverityScan, CID#1469489 ("Out-of-bounds read")
+
+Fixes: 13b581502d51 ("drm/amdgpu/df: implement df v3_6 callback functions (v2)")
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+index 60608b3..d5ebe56 100644
+--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
++++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+@@ -64,7 +64,7 @@ static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev)
+ int fb_channel_number;
+
+ fb_channel_number = adev->df_funcs->get_fb_channel_number(adev);
+- if (fb_channel_number > ARRAY_SIZE(df_v3_6_channel_number))
++ if (fb_channel_number >= ARRAY_SIZE(df_v3_6_channel_number))
+ fb_channel_number = 0;
+
+ return df_v3_6_channel_number[fb_channel_number];
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4582-Revert-drm-amdgpu-Add-an-ATPX-quirk-for-hybrid-lapto.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4582-Revert-drm-amdgpu-Add-an-ATPX-quirk-for-hybrid-lapto.patch
new file mode 100644
index 00000000..a30e81cd
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4582-Revert-drm-amdgpu-Add-an-ATPX-quirk-for-hybrid-lapto.patch
@@ -0,0 +1,35 @@
+From 4b7eca48e0cec9f6a6e9ffe0dc2b4956669429c6 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 24 May 2018 14:37:39 -0500
+Subject: [PATCH 4582/5725] Revert "drm/amdgpu: Add an ATPX quirk for hybrid
+ laptop"
+
+This reverts commit 13b40935cf64f59b93cf1c716a2033488e5a228c.
+
+This was a workaround for a bug in the HDA driver that prevented
+the HDA audio chip from going into runtime pm which prevented
+the GPU from going into runtime pm.
+
+Bug: https://bugs.freedesktop.org/show_bug.cgi?id=106597
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+index d5186b9..7a22755 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+@@ -568,7 +568,6 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
+ { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
+- { 0x1002, 0x67DF, 0x1028, 0x0774, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x699F, 0x1028, 0x0814, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0, 0, 0, 0, 0 },
+ };
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4583-Revert-drm-amdgpu-add-new-device-to-use-atpx-quirk.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4583-Revert-drm-amdgpu-add-new-device-to-use-atpx-quirk.patch
new file mode 100644
index 00000000..785a052d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4583-Revert-drm-amdgpu-add-new-device-to-use-atpx-quirk.patch
@@ -0,0 +1,35 @@
+From 606f715671e0a895f6c810ff1a1273ac2a3eaff3 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 24 May 2018 14:40:12 -0500
+Subject: [PATCH 4583/5725] Revert "drm/amdgpu: add new device to use atpx
+ quirk"
+
+This reverts commit 6c24a85d236eb2348d2e221993769fd93c168f65.
+
+This was a workaround for a bug in the HDA driver that prevented
+the HDA audio chip from going into runtime pm which prevented
+the GPU from going into runtime pm.
+
+Bug: https://bugs.freedesktop.org/show_bug.cgi?id=106597
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+index 7a22755..0a30c36 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+@@ -567,7 +567,6 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
+ /* HG _PR3 doesn't seem to work on this A+A weston board */
+ { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
+- { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x699F, 0x1028, 0x0814, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0, 0, 0, 0, 0 },
+ };
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4584-Partially-revert-drm-amdgpu-add-atpx-quirk-handling-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4584-Partially-revert-drm-amdgpu-add-atpx-quirk-handling-.patch
new file mode 100644
index 00000000..d0bdc747
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4584-Partially-revert-drm-amdgpu-add-atpx-quirk-handling-.patch
@@ -0,0 +1,37 @@
+From 213f936d0b9f988790eff1d8a575c033aaa55af1 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 24 May 2018 14:43:03 -0500
+Subject: [PATCH 4584/5725] Partially revert: drm/amdgpu: add atpx quirk
+ handling (v2)
+
+Remove the platform specific quirks, but leave the quirk
+infrastructure in place.
+
+This was a workaround for a bug in the HDA driver that prevented
+the HDA audio chip from going into runtime pm which prevented
+the GPU from going into runtime pm.
+
+Bug: https://bugs.freedesktop.org/show_bug.cgi?id=106597
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+index 0a30c36..7f46d1a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+@@ -565,8 +565,6 @@ static const struct vga_switcheroo_handler amdgpu_atpx_handler = {
+
+ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
+ /* HG _PR3 doesn't seem to work on this A+A weston board */
+- { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
+- { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x699F, 0x1028, 0x0814, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0, 0, 0, 0, 0 },
+ };
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4585-drm-amdgpu-pp-switch-the-default-dpm-implementation-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4585-drm-amdgpu-pp-switch-the-default-dpm-implementation-.patch
new file mode 100644
index 00000000..1f49dd06
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4585-drm-amdgpu-pp-switch-the-default-dpm-implementation-.patch
@@ -0,0 +1,49 @@
+From fa9fb4403bf47cf19de5fad9ea515c66bfc8f9dc Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 31 May 2018 12:46:08 -0500
+Subject: [PATCH 4585/5725] drm/amdgpu/pp: switch the default dpm
+ implementation for CI
+
+Switch hawaii and bonaire to use powerplay rather than the old
+dpm implementation. Powerplay supports more features and is
+better maintained. Ultimately, we can drop the older dpm
+implementation like we did for other older asics.
+
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Rex Zhu <rezhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/cik.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
+index 8ff4c60..702e257 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cik.c
++++ b/drivers/gpu/drm/amd/amdgpu/cik.c
+@@ -2003,9 +2003,9 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
+ if (amdgpu_dpm == -1)
+- amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
+- else
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
++ else
++ amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ #if defined(CONFIG_DRM_AMD_DC)
+@@ -2024,9 +2024,9 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
+ if (amdgpu_dpm == -1)
+- amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
+- else
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
++ else
++ amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ #if defined(CONFIG_DRM_AMD_DC)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4586-drm-amdgpu-Add-documentation-for-PRIME-related-code.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4586-drm-amdgpu-Add-documentation-for-PRIME-related-code.patch
new file mode 100644
index 00000000..424039b1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4586-drm-amdgpu-Add-documentation-for-PRIME-related-code.patch
@@ -0,0 +1,231 @@
+From 4bd7a3507a307848df7842f599c8295d45575948 Mon Sep 17 00:00:00 2001
+From: Michel Daenzer <michel.daenzer@amd.com>
+Date: Tue, 29 May 2018 18:33:41 +0200
+Subject: [PATCH 4586/5725] drm/amdgpu: Add documentation for PRIME related
+ code
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+
+Change-Id: I5bd9e7536250c9a1e9f914b87443c3b63aa2988f
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 119 ++++++++++++++++++++++++++++++
+ 1 file changed, 119 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+index 63d5a01..5472366 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+@@ -23,6 +23,14 @@
+ *
+ * Authors: Alex Deucher
+ */
++
++/**
++ * DOC: PRIME Buffer Sharing
++ *
++ * The following callback implementations are used for :ref:`sharing GEM buffer
++ * objects between different devices via PRIME <prime_buffer_sharing>`.
++ */
++
+ #include <drm/drmP.h>
+
+ #include "amdgpu.h"
+@@ -30,6 +38,14 @@
+ #include <drm/amdgpu_drm.h>
+ #include <linux/dma-buf.h>
+
++/**
++ * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
++ * implementation
++ * @obj: GEM buffer object
++ *
++ * Returns:
++ * A scatter/gather table for the pinned pages of the buffer object's memory.
++ */
+ struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
+ {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+@@ -38,6 +54,15 @@ struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
+ return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
+ }
+
++/**
++ * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
++ * @obj: GEM buffer object
++ *
++ * Sets up an in-kernel virtual mapping of the buffer object's memory.
++ *
++ * Returns:
++ * The virtual address of the mapping or an error pointer.
++ */
+ void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
+ {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+@@ -51,6 +76,13 @@ void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
+ return bo->dma_buf_vmap.virtual;
+ }
+
++/**
++ * amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation
++ * @obj: GEM buffer object
++ * @vaddr: virtual address (unused)
++ *
++ * Tears down the in-kernel virtual mapping of the buffer object's memory.
++ */
+ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+ {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+@@ -58,6 +90,17 @@ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+ ttm_bo_kunmap(&bo->dma_buf_vmap);
+ }
+
++/**
++ * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
++ * @obj: GEM buffer object
++ * @vma: virtual memory area
++ *
++ * Sets up a userspace mapping of the buffer object's memory in the given
++ * virtual memory area.
++ *
++ * Returns:
++ * 0 on success or negative error code.
++ */
+ int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+ {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+@@ -92,6 +135,19 @@ int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma
+ return ret;
+ }
+
++/**
++ * amdgpu_gem_prime_import_sg_table - &drm_driver.gem_prime_import_sg_table
++ * implementation
++ * @dev: DRM device
++ * @attach: DMA-buf attachment
++ * @sg: Scatter/gather table
++ *
++ * Import shared DMA buffer memory exported by another device.
++ *
++ * Returns:
++ * A new GEM buffer object of the given DRM device, representing the memory
++ * described by the given DMA-buf attachment and scatter/gather table.
++ */
+ struct drm_gem_object *
+ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+@@ -135,6 +191,19 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
+ }
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) || !defined(BUILD_AS_DKMS)
++/**
++ * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
++ * @dma_buf: shared DMA buffer
++ * @target_dev: target device
++ * @attach: DMA-buf attachment
++ *
++ * Makes sure that the shared DMA buffer can be accessed by the target device.
++ * For now, simply pins it to the GTT domain, where it should be accessible by
++ * all DMA devices.
++ *
++ * Returns:
++ * 0 on success or negative error code.
++ */
+ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
+ struct device *target_dev,
+ struct dma_buf_attachment *attach)
+@@ -184,6 +253,14 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
+ return r;
+ }
+
++/**
++ * amdgpu_gem_map_detach - &dma_buf_ops.detach implementation
++ * @dma_buf: shared DMA buffer
++ * @attach: DMA-buf attachment
++ *
++ * This is called when a shared DMA buffer no longer needs to be accessible by
++ * the other device. For now, simply unpins the buffer from GTT.
++ */
+ static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach)
+ {
+@@ -205,6 +282,13 @@ static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
+ }
+ #endif
+
++/**
++ * amdgpu_gem_prime_res_obj - &drm_driver.gem_prime_res_obj implementation
++ * @obj: GEM buffer object
++ *
++ * Returns:
++ * The buffer object's reservation object.
++ */
+ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
+ {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+@@ -213,6 +297,18 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
+ }
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) || !defined(BUILD_AS_DKMS)
++/**
++ * amdgpu_gem_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
++ * @dma_buf: shared DMA buffer
++ * @direction: direction of DMA transfer
++ *
++ * This is called before CPU access to the shared DMA buffer's memory. If it's
++ * a read access, the buffer is moved to the GTT domain if possible, for optimal
++ * CPU read performance.
++ *
++ * Returns:
++ * 0 on success or negative error code.
++ */
+ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
+ enum dma_data_direction direction)
+ {
+@@ -258,6 +354,18 @@ const struct dma_buf_ops amdgpu_dmabuf_ops = {
+ };
+ #endif
+
++/**
++ * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
++ * @dev: DRM device
++ * @gobj: GEM buffer object
++ * @flags: flags like DRM_CLOEXEC and DRM_RDWR
++ *
++ * The main work is done by the &drm_gem_prime_export helper, which in turn
++ * uses &amdgpu_gem_prime_res_obj.
++ *
++ * Returns:
++ * Shared DMA buffer representing the GEM buffer object from the given device.
++ */
+ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *gobj,
+ int flags)
+@@ -280,6 +388,17 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
+ }
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) || !defined(BUILD_AS_DKMS)
++/**
++ * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
++ * @dev: DRM device
++ * @dma_buf: Shared DMA buffer
++ *
++ * The main work is done by the &drm_gem_prime_import helper, which in turn
++ * uses &amdgpu_gem_prime_import_sg_table.
++ *
++ * Returns:
++ * GEM buffer object representing the shared DMA buffer for the given device.
++ */
+ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+ {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4587-drm-amdgpu-replace-mutex-with-spin_lock-V2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4587-drm-amdgpu-replace-mutex-with-spin_lock-V2.patch
new file mode 100644
index 00000000..c2e8c782
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4587-drm-amdgpu-replace-mutex-with-spin_lock-V2.patch
@@ -0,0 +1,107 @@
+From 849199947b4d0c7c032a40cd23e30c8715f464bf Mon Sep 17 00:00:00 2001
+From: Shirish S <shirish.s@amd.com>
+Date: Mon, 28 May 2018 16:11:43 +0530
+Subject: [PATCH 4587/5725] drm/amdgpu: replace mutex with spin_lock (V2)
+
+mutex's lead to sleeps which should be avoided in
+atomic context.
+Hence this patch replaces it with the spin_locks.
+
+Below is the stack trace:
+
+BUG: sleeping function called from invalid context at kernel/locking/mutex.c:**
+in_atomic(): 1, irqs_disabled(): 1, pid: 89, name: kworker/u4:3
+CPU: 1 PID: 89 Comm: kworker/u4:3 Tainted: G W 4.14.43 #8
+Workqueue: events_unbound commit_work
+Call Trace:
+ dump_stack+0x4d/0x63
+ ___might_sleep+0x11f/0x12e
+ mutex_lock+0x20/0x42
+ amdgpu_atom_execute_table+0x26/0x72
+ enable_disp_power_gating_v2_1+0x85/0xae
+ dce110_enable_display_power_gating+0x83/0x1b1
+ dce110_power_down_fe+0x4a/0x6d
+ dc_post_update_surfaces_to_stream+0x59/0x87
+ amdgpu_dm_do_flip+0x239/0x298
+ amdgpu_dm_commit_planes.isra.23+0x379/0x54b
+ ? drm_calc_timestamping_constants+0x14b/0x15c
+ amdgpu_dm_atomic_commit_tail+0x4fc/0x5d2
+ ? wait_for_common+0x5b/0x69
+ commit_tail+0x42/0x64
+ process_one_work+0x1b0/0x314
+ worker_thread+0x1cb/0x2c1
+ ? create_worker+0x1da/0x1da
+ kthread+0x156/0x15e
+ ? kthread_flush_work+0xea/0xea
+ ret_from_fork+0x22/0x40
+
+V2: Added stack trace in commit message.
+
+Change-Id: I3cb80912d854a8021286de778e844cbe687ed7f3
+Signed-off-by: Shirish S <shirish.s@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/atom.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/atom.h | 3 ++-
+ 3 files changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+index b5773e8..5ebab72 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+@@ -2033,7 +2033,7 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
+ return -ENOMEM;
+ }
+
+- mutex_init(&adev->mode_info.atom_context->mutex);
++ spin_lock_init(&adev->mode_info.atom_context->lock);
+ if (adev->is_atom_fw) {
+ amdgpu_atomfirmware_scratch_regs_init(adev);
+ amdgpu_atomfirmware_allocate_fb_scratch(adev);
+diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
+index 69500a8..bfd98f0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/atom.c
++++ b/drivers/gpu/drm/amd/amdgpu/atom.c
+@@ -1261,7 +1261,7 @@ int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * pa
+ {
+ int r;
+
+- mutex_lock(&ctx->mutex);
++ spin_lock(&ctx->lock);
+ /* reset data block */
+ ctx->data_block = 0;
+ /* reset reg block */
+@@ -1274,7 +1274,7 @@ int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * pa
+ ctx->divmul[0] = 0;
+ ctx->divmul[1] = 0;
+ r = amdgpu_atom_execute_table_locked(ctx, index, params);
+- mutex_unlock(&ctx->mutex);
++ spin_unlock(&ctx->lock);
+ return r;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h
+index a391709..54063e2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/atom.h
++++ b/drivers/gpu/drm/amd/amdgpu/atom.h
+@@ -26,6 +26,7 @@
+ #define ATOM_H
+
+ #include <linux/types.h>
++#include <linux/spinlock_types.h>
+ #include <drm/drmP.h>
+
+ #define ATOM_BIOS_MAGIC 0xAA55
+@@ -125,7 +126,7 @@ struct card_info {
+
+ struct atom_context {
+ struct card_info *card;
+- struct mutex mutex;
++ spinlock_t lock;
+ void *bios;
+ uint32_t cmd_table, data_table;
+ uint16_t *iio;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4588-drm-amdgpu-pp-replace-mutex-with-spin_lock-V2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4588-drm-amdgpu-pp-replace-mutex-with-spin_lock-V2.patch
new file mode 100644
index 00000000..1aaba396
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4588-drm-amdgpu-pp-replace-mutex-with-spin_lock-V2.patch
@@ -0,0 +1,573 @@
+From 277d7dda6e7cf33e2cee6801564371b2894ea452 Mon Sep 17 00:00:00 2001
+From: Shirish S <shirish.s@amd.com>
+Date: Tue, 29 May 2018 09:36:44 +0530
+Subject: [PATCH 4588/5725] drm/amdgpu/pp: replace mutex with spin_lock (V2)
+
+This patch replaces usage of mutex with spin_lock
+to avoid sleep in atomic context.
+
+Below is the stack trace:
+
+BUG: sleeping function called from invalid context at kernel/locking/mutex.c:**
+in_atomic(): 1, irqs_disabled(): 1, pid: 5, name: kworker/u4:0
+CPU: 1 PID: 5 Comm: kworker/u4:0 Tainted: G W 4.14.43 #9
+Workqueue: events_unbound commit_work
+Call Trace:
+ dump_stack+0x4d/0x63
+ ___might_sleep+0x11f/0x12e
+ mutex_lock+0x20/0x42
+ amd_powerplay_display_configuration_change+0x32/0x51
+ dm_pp_apply_display_requirements+0x10b/0x118
+ dce110_set_bandwidth+0x1a1/0x1b5
+ dc_commit_updates_for_stream+0x14c/0x4cf
+ ? amdgpu_get_crtc_scanoutpos+0x82/0x16b
+ amdgpu_dm_do_flip+0x239/0x298
+ amdgpu_dm_commit_planes.isra.23+0x379/0x54b
+ ? drm_calc_timestamping_constants+0x14b/0x15c
+ amdgpu_dm_atomic_commit_tail+0x4fc/0x5d2
+ ? wait_for_common+0x5b/0x69
+ commit_tail+0x42/0x64
+ process_one_work+0x1b0/0x314
+ worker_thread+0x1cb/0x2c1
+ ? create_worker+0x1da/0x1da
+ kthread+0x156/0x15e
+ ? kthread_flush_work+0xea/0xea
+ ret_from_fork+0x22/0x40
+
+V2: Added stack trace in the commit message.
+
+Signed-off-by: Shirish S <shirish.s@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 152 +++++++++++++-------------
+ drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 2 +-
+ 2 files changed, 77 insertions(+), 77 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index 76fc45f..3cdf852 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -182,10 +182,10 @@ static int pp_late_init(void *handle)
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+
+ if (hwmgr && hwmgr->pm_en) {
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ hwmgr_handle_task(hwmgr,
+ AMD_PP_TASK_COMPLETE_INIT, NULL);
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ }
+
+ if (adev->pm.smu_prv_buffer_size != 0)
+@@ -368,11 +368,11 @@ static int pp_dpm_force_performance_level(void *handle,
+ if (level == hwmgr->dpm_level)
+ return 0;
+
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ pp_dpm_en_umd_pstate(hwmgr, &level);
+ hwmgr->request_dpm_level = level;
+ hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+
+ return 0;
+ }
+@@ -386,9 +386,9 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ level = hwmgr->dpm_level;
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return level;
+ }
+
+@@ -404,9 +404,9 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool low)
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return clk;
+ }
+
+@@ -422,9 +422,9 @@ static uint32_t pp_dpm_get_mclk(void *handle, bool low)
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return clk;
+ }
+
+@@ -439,9 +439,9 @@ static void pp_dpm_powergate_vce(void *handle, bool gate)
+ pr_info("%s was not implemented.\n", __func__);
+ return;
+ }
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ }
+
+ static void pp_dpm_powergate_uvd(void *handle, bool gate)
+@@ -455,9 +455,9 @@ static void pp_dpm_powergate_uvd(void *handle, bool gate)
+ pr_info("%s was not implemented.\n", __func__);
+ return;
+ }
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ }
+
+ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
+@@ -469,9 +469,9 @@ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ ret = hwmgr_handle_task(hwmgr, task_id, user_state);
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+
+ return ret;
+ }
+@@ -485,7 +485,7 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
+ if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
+ return -EINVAL;
+
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+
+ state = hwmgr->current_ps;
+
+@@ -506,7 +506,7 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
+ pm_type = POWER_STATE_TYPE_DEFAULT;
+ break;
+ }
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+
+ return pm_type;
+ }
+@@ -522,9 +522,9 @@ static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
+ pr_info("%s was not implemented.\n", __func__);
+ return;
+ }
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ }
+
+ static uint32_t pp_dpm_get_fan_control_mode(void *handle)
+@@ -539,9 +539,9 @@ static uint32_t pp_dpm_get_fan_control_mode(void *handle)
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return mode;
+ }
+
+@@ -557,9 +557,9 @@ static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -576,9 +576,9 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
+ return 0;
+ }
+
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -593,9 +593,9 @@ static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
+ if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
+ return -EINVAL;
+
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -610,7 +610,7 @@ static int pp_dpm_get_pp_num_states(void *handle,
+ if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
+ return -EINVAL;
+
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+
+ data->nums = hwmgr->num_ps;
+
+@@ -634,7 +634,7 @@ static int pp_dpm_get_pp_num_states(void *handle,
+ data->states[i] = POWER_STATE_TYPE_DEFAULT;
+ }
+ }
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return 0;
+ }
+
+@@ -646,10 +646,10 @@ static int pp_dpm_get_pp_table(void *handle, char **table)
+ if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
+ return -EINVAL;
+
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ *table = (char *)hwmgr->soft_pp_table;
+ size = hwmgr->soft_pp_table_size;
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return size;
+ }
+
+@@ -677,7 +677,7 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ if (!hwmgr->hardcode_pp_table) {
+ hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
+ hwmgr->soft_pp_table_size,
+@@ -699,10 +699,10 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
+ if (ret)
+ goto err;
+ }
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return 0;
+ err:
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -719,12 +719,12 @@ static int pp_dpm_force_clock_level(void *handle,
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
+ ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
+ else
+ ret = -EINVAL;
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -741,9 +741,9 @@ static int pp_dpm_print_clock_levels(void *handle,
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -759,9 +759,9 @@ static int pp_dpm_get_sclk_od(void *handle)
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -778,9 +778,9 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
+ return 0;
+ }
+
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -796,9 +796,9 @@ static int pp_dpm_get_mclk_od(void *handle)
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -814,9 +814,9 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -837,9 +837,9 @@ static int pp_dpm_read_sensor(void *handle, int idx,
+ *((uint32_t *)value) = hwmgr->pstate_mclk;
+ return 0;
+ default:
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+ }
+@@ -884,10 +884,10 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
+ pr_info("%s was not implemented.\n", __func__);
+ return ret;
+ }
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
+ ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -924,7 +924,7 @@ static int pp_dpm_switch_power_profile(void *handle,
+ if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
+ return -EINVAL;
+
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+
+ if (!en) {
+ hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
+@@ -940,7 +940,7 @@ static int pp_dpm_switch_power_profile(void *handle,
+
+ if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+
+ return 0;
+ }
+@@ -963,10 +963,10 @@ static int pp_set_power_limit(void *handle, uint32_t limit)
+ if (limit > hwmgr->default_power_limit)
+ return -EINVAL;
+
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
+ hwmgr->power_limit = limit;
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return 0;
+ }
+
+@@ -977,14 +977,14 @@ static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
+ if (!hwmgr || !hwmgr->pm_en ||!limit)
+ return -EINVAL;
+
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+
+ if (default_limit)
+ *limit = hwmgr->default_power_limit;
+ else
+ *limit = hwmgr->power_limit;
+
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+
+ return 0;
+ }
+@@ -997,9 +997,9 @@ static int pp_display_configuration_change(void *handle,
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ phm_store_dal_configuration_data(hwmgr, display_config);
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return 0;
+ }
+
+@@ -1012,9 +1012,9 @@ static int pp_get_display_power_level(void *handle,
+ if (!hwmgr || !hwmgr->pm_en ||!output)
+ return -EINVAL;
+
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ ret = phm_get_dal_power_level(hwmgr, output);
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -1029,7 +1029,7 @@ static int pp_get_current_clocks(void *handle,
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+
+ phm_get_dal_power_level(hwmgr, &simple_clocks);
+
+@@ -1043,7 +1043,7 @@ static int pp_get_current_clocks(void *handle,
+
+ if (ret) {
+ pr_info("Error in phm_get_clock_info \n");
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return -EINVAL;
+ }
+
+@@ -1063,7 +1063,7 @@ static int pp_get_current_clocks(void *handle,
+ clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
+ clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
+ }
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return 0;
+ }
+
+@@ -1078,9 +1078,9 @@ static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struc
+ if (clocks == NULL)
+ return -EINVAL;
+
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ ret = phm_get_clock_by_type(hwmgr, type, clocks);
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -1094,9 +1094,9 @@ static int pp_get_clock_by_type_with_latency(void *handle,
+ if (!hwmgr || !hwmgr->pm_en ||!clocks)
+ return -EINVAL;
+
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -1110,11 +1110,11 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
+ if (!hwmgr || !hwmgr->pm_en ||!clocks)
+ return -EINVAL;
+
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+
+ ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
+
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -1127,10 +1127,10 @@ static int pp_set_watermarks_for_clocks_ranges(void *handle,
+ if (!hwmgr || !hwmgr->pm_en ||!wm_with_clock_ranges)
+ return -EINVAL;
+
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
+ wm_with_clock_ranges);
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+
+ return ret;
+ }
+@@ -1144,9 +1144,9 @@ static int pp_display_clock_voltage_request(void *handle,
+ if (!hwmgr || !hwmgr->pm_en ||!clock)
+ return -EINVAL;
+
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+ ret = phm_display_clock_voltage_request(hwmgr, clock);
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+
+ return ret;
+ }
+@@ -1160,12 +1160,12 @@ static int pp_get_display_mode_validation_clocks(void *handle,
+ if (!hwmgr || !hwmgr->pm_en ||!clocks)
+ return -EINVAL;
+
+- mutex_lock(&hwmgr->smu_lock);
++ spin_lock(&hwmgr->smu_lock);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
+ ret = phm_get_max_high_clocks(hwmgr, clocks);
+
+- mutex_unlock(&hwmgr->smu_lock);
++ spin_unlock(&hwmgr->smu_lock);
+
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+index b99fb8a..37d1382 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+@@ -709,7 +709,7 @@ struct pp_hwmgr {
+ uint32_t smu_version;
+ bool not_vf;
+ bool pm_en;
+- struct mutex smu_lock;
++ spinlock_t smu_lock;
+
+ uint32_t pp_table_version;
+ void *device;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4589-drm-amdgpu-avoid-sleep-while-executing-atombios-tabl.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4589-drm-amdgpu-avoid-sleep-while-executing-atombios-tabl.patch
new file mode 100644
index 00000000..415cbe31
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4589-drm-amdgpu-avoid-sleep-while-executing-atombios-tabl.patch
@@ -0,0 +1,71 @@
+From ad62d7bc1a8dc279b1687bc7bd14a77181f4ff7e Mon Sep 17 00:00:00 2001
+From: Shirish S <shirish.s@amd.com>
+Date: Tue, 29 May 2018 09:23:53 +0530
+Subject: [PATCH 4589/5725] drm/amdgpu: avoid sleep while executing atombios
+ table (V2)
+
+This patch replaces kzalloc's flag from GFP_KERNEL to
+GFP_ATOMIC to avoid sleeping in atomic context.
+
+Below is the stack trace:
+
+BUG: sleeping function called from invalid context at mm/slab.h:***
+in_atomic(): 1, irqs_disabled(): 0, pid: 1137, name: DrmThread
+CPU: 1 PID: 1137 Comm: DrmThread Tainted: G W 4.14.43 #10
+Call Trace:
+ dump_stack+0x4d/0x63
+ ___might_sleep+0x11f/0x12e
+ __kmalloc+0x76/0x126
+ amdgpu_atom_execute_table_locked+0xfc/0x285
+ amdgpu_atom_execute_table+0x5d/0x72
+ transmitter_control_v1_5+0xef/0x11a
+ hwss_edp_backlight_control+0x132/0x151
+ dce110_disable_stream+0x133/0x16e
+ core_link_disable_stream+0x1c5/0x23b
+ dce110_reset_hw_ctx_wrap+0xb4/0x1aa
+ dce110_apply_ctx_to_hw+0x4e/0x6da
+ ? generic_reg_get+0x1f/0x33
+ dc_commit_state+0x33f/0x3d2
+ amdgpu_dm_atomic_commit_tail+0x2cf/0x5d2
+ ? wait_for_common+0x5b/0x69
+ commit_tail+0x42/0x64
+ drm_atomic_helper_commit+0xdc/0xf9
+ drm_atomic_helper_set_config+0x5c/0x76
+ __drm_mode_set_config_internal+0x64/0x105
+ drm_mode_setcrtc+0x474/0x56f
+ ? drm_mode_getcrtc+0x155/0x155
+ drm_ioctl_kernel+0x6c/0xa8
+ drm_ioctl+0x267/0x353
+ ? drm_mode_getcrtc+0x155/0x155
+ amdgpu_drm_ioctl+0x4f/0x7f
+ vfs_ioctl+0x21/0x2f
+ do_vfs_ioctl+0x4c4/0x4e7
+ ? security_file_ioctl+0x3b/0x4f
+ SyS_ioctl+0x57/0x79
+ do_syscall_64+0x64/0x72
+ entry_SYSCALL_64_after_hwframe+0x3d/0xa2
+
+V2: Added stack trace in commit message.
+
+Signed-off-by: Shirish S <shirish.s@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/atom.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
+index bfd98f0..da4558c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/atom.c
++++ b/drivers/gpu/drm/amd/amdgpu/atom.c
+@@ -1221,7 +1221,7 @@ static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index,
+ ectx.abort = false;
+ ectx.last_jump = 0;
+ if (ws)
+- ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
++ ectx.ws = kzalloc(4 * ws, GFP_ATOMIC);
+ else
+ ectx.ws = NULL;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4590-drm-amdgpu-pp-Revert-replace-mutex-with-spin_lock-V2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4590-drm-amdgpu-pp-Revert-replace-mutex-with-spin_lock-V2.patch
new file mode 100644
index 00000000..fea83cb4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4590-drm-amdgpu-pp-Revert-replace-mutex-with-spin_lock-V2.patch
@@ -0,0 +1,545 @@
+From d7a781f741fcd2ec96f4416711e0b1b49311af32 Mon Sep 17 00:00:00 2001
+From: Christian Koenig <christian.koenig@amd.com>
+Date: Mon, 4 Jun 2018 12:45:00 +0200
+Subject: [PATCH 4590/5725] drm/amdgpu/pp: Revert "replace mutex with spin_lock
+ (V2)"
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+It breaks compilation. This reverts commit c68c36ebb3aeca3febfa839fd3f95e82525687aa.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 152 +++++++++++++-------------
+ drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 2 +-
+ 2 files changed, 77 insertions(+), 77 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index 3cdf852..76fc45f 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -182,10 +182,10 @@ static int pp_late_init(void *handle)
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+
+ if (hwmgr && hwmgr->pm_en) {
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ hwmgr_handle_task(hwmgr,
+ AMD_PP_TASK_COMPLETE_INIT, NULL);
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ }
+
+ if (adev->pm.smu_prv_buffer_size != 0)
+@@ -368,11 +368,11 @@ static int pp_dpm_force_performance_level(void *handle,
+ if (level == hwmgr->dpm_level)
+ return 0;
+
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ pp_dpm_en_umd_pstate(hwmgr, &level);
+ hwmgr->request_dpm_level = level;
+ hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+ }
+@@ -386,9 +386,9 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ level = hwmgr->dpm_level;
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return level;
+ }
+
+@@ -404,9 +404,9 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool low)
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return clk;
+ }
+
+@@ -422,9 +422,9 @@ static uint32_t pp_dpm_get_mclk(void *handle, bool low)
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return clk;
+ }
+
+@@ -439,9 +439,9 @@ static void pp_dpm_powergate_vce(void *handle, bool gate)
+ pr_info("%s was not implemented.\n", __func__);
+ return;
+ }
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ }
+
+ static void pp_dpm_powergate_uvd(void *handle, bool gate)
+@@ -455,9 +455,9 @@ static void pp_dpm_powergate_uvd(void *handle, bool gate)
+ pr_info("%s was not implemented.\n", __func__);
+ return;
+ }
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ }
+
+ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
+@@ -469,9 +469,9 @@ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr_handle_task(hwmgr, task_id, user_state);
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+
+ return ret;
+ }
+@@ -485,7 +485,7 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
+ if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
+ return -EINVAL;
+
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+
+ state = hwmgr->current_ps;
+
+@@ -506,7 +506,7 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
+ pm_type = POWER_STATE_TYPE_DEFAULT;
+ break;
+ }
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+
+ return pm_type;
+ }
+@@ -522,9 +522,9 @@ static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
+ pr_info("%s was not implemented.\n", __func__);
+ return;
+ }
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ }
+
+ static uint32_t pp_dpm_get_fan_control_mode(void *handle)
+@@ -539,9 +539,9 @@ static uint32_t pp_dpm_get_fan_control_mode(void *handle)
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return mode;
+ }
+
+@@ -557,9 +557,9 @@ static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -576,9 +576,9 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
+ return 0;
+ }
+
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -593,9 +593,9 @@ static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
+ if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
+ return -EINVAL;
+
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -610,7 +610,7 @@ static int pp_dpm_get_pp_num_states(void *handle,
+ if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
+ return -EINVAL;
+
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+
+ data->nums = hwmgr->num_ps;
+
+@@ -634,7 +634,7 @@ static int pp_dpm_get_pp_num_states(void *handle,
+ data->states[i] = POWER_STATE_TYPE_DEFAULT;
+ }
+ }
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return 0;
+ }
+
+@@ -646,10 +646,10 @@ static int pp_dpm_get_pp_table(void *handle, char **table)
+ if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
+ return -EINVAL;
+
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ *table = (char *)hwmgr->soft_pp_table;
+ size = hwmgr->soft_pp_table_size;
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return size;
+ }
+
+@@ -677,7 +677,7 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ if (!hwmgr->hardcode_pp_table) {
+ hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
+ hwmgr->soft_pp_table_size,
+@@ -699,10 +699,10 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
+ if (ret)
+ goto err;
+ }
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return 0;
+ err:
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -719,12 +719,12 @@ static int pp_dpm_force_clock_level(void *handle,
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
+ ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
+ else
+ ret = -EINVAL;
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -741,9 +741,9 @@ static int pp_dpm_print_clock_levels(void *handle,
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -759,9 +759,9 @@ static int pp_dpm_get_sclk_od(void *handle)
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -778,9 +778,9 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
+ return 0;
+ }
+
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -796,9 +796,9 @@ static int pp_dpm_get_mclk_od(void *handle)
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -814,9 +814,9 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -837,9 +837,9 @@ static int pp_dpm_read_sensor(void *handle, int idx,
+ *((uint32_t *)value) = hwmgr->pstate_mclk;
+ return 0;
+ default:
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+ }
+@@ -884,10 +884,10 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
+ pr_info("%s was not implemented.\n", __func__);
+ return ret;
+ }
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
+ ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -924,7 +924,7 @@ static int pp_dpm_switch_power_profile(void *handle,
+ if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
+ return -EINVAL;
+
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+
+ if (!en) {
+ hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
+@@ -940,7 +940,7 @@ static int pp_dpm_switch_power_profile(void *handle,
+
+ if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+ }
+@@ -963,10 +963,10 @@ static int pp_set_power_limit(void *handle, uint32_t limit)
+ if (limit > hwmgr->default_power_limit)
+ return -EINVAL;
+
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
+ hwmgr->power_limit = limit;
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return 0;
+ }
+
+@@ -977,14 +977,14 @@ static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
+ if (!hwmgr || !hwmgr->pm_en ||!limit)
+ return -EINVAL;
+
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+
+ if (default_limit)
+ *limit = hwmgr->default_power_limit;
+ else
+ *limit = hwmgr->power_limit;
+
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+ }
+@@ -997,9 +997,9 @@ static int pp_display_configuration_change(void *handle,
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ phm_store_dal_configuration_data(hwmgr, display_config);
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return 0;
+ }
+
+@@ -1012,9 +1012,9 @@ static int pp_get_display_power_level(void *handle,
+ if (!hwmgr || !hwmgr->pm_en ||!output)
+ return -EINVAL;
+
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = phm_get_dal_power_level(hwmgr, output);
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -1029,7 +1029,7 @@ static int pp_get_current_clocks(void *handle,
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+
+ phm_get_dal_power_level(hwmgr, &simple_clocks);
+
+@@ -1043,7 +1043,7 @@ static int pp_get_current_clocks(void *handle,
+
+ if (ret) {
+ pr_info("Error in phm_get_clock_info \n");
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return -EINVAL;
+ }
+
+@@ -1063,7 +1063,7 @@ static int pp_get_current_clocks(void *handle,
+ clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
+ clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
+ }
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return 0;
+ }
+
+@@ -1078,9 +1078,9 @@ static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struc
+ if (clocks == NULL)
+ return -EINVAL;
+
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = phm_get_clock_by_type(hwmgr, type, clocks);
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -1094,9 +1094,9 @@ static int pp_get_clock_by_type_with_latency(void *handle,
+ if (!hwmgr || !hwmgr->pm_en ||!clocks)
+ return -EINVAL;
+
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -1110,11 +1110,11 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
+ if (!hwmgr || !hwmgr->pm_en ||!clocks)
+ return -EINVAL;
+
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+
+ ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
+
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+
+@@ -1127,10 +1127,10 @@ static int pp_set_watermarks_for_clocks_ranges(void *handle,
+ if (!hwmgr || !hwmgr->pm_en ||!wm_with_clock_ranges)
+ return -EINVAL;
+
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
+ wm_with_clock_ranges);
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+
+ return ret;
+ }
+@@ -1144,9 +1144,9 @@ static int pp_display_clock_voltage_request(void *handle,
+ if (!hwmgr || !hwmgr->pm_en ||!clock)
+ return -EINVAL;
+
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+ ret = phm_display_clock_voltage_request(hwmgr, clock);
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+
+ return ret;
+ }
+@@ -1160,12 +1160,12 @@ static int pp_get_display_mode_validation_clocks(void *handle,
+ if (!hwmgr || !hwmgr->pm_en ||!clocks)
+ return -EINVAL;
+
+- spin_lock(&hwmgr->smu_lock);
++ mutex_lock(&hwmgr->smu_lock);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
+ ret = phm_get_max_high_clocks(hwmgr, clocks);
+
+- spin_unlock(&hwmgr->smu_lock);
++ mutex_unlock(&hwmgr->smu_lock);
+
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+index 37d1382..b99fb8a 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+@@ -709,7 +709,7 @@ struct pp_hwmgr {
+ uint32_t smu_version;
+ bool not_vf;
+ bool pm_en;
+- spinlock_t smu_lock;
++ struct mutex smu_lock;
+
+ uint32_t pp_table_version;
+ void *device;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4591-drm-amdgpu-Fix-ups-for-amdgpu_object.c-documentation.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4591-drm-amdgpu-Fix-ups-for-amdgpu_object.c-documentation.patch
new file mode 100644
index 00000000..863f54fd
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4591-drm-amdgpu-Fix-ups-for-amdgpu_object.c-documentation.patch
@@ -0,0 +1,42 @@
+From 2dda1c0568c6c85cf1cf4f6c95af71a03ec8a649 Mon Sep 17 00:00:00 2001
+From: Michel Daenzer <michel.daenzer@amd.com>
+Date: Fri, 1 Jun 2018 12:29:45 +0200
+Subject: [PATCH 4591/5725] drm/amdgpu: Fix-ups for amdgpu_object.c
+ documentation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+* Fix format of return value descriptions
+* Document all parameters of amdgpu_bo_free_kernel
+* Document amdgpu_bo_get_preferred_pin_domain
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 82ab087..8574f39 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -1373,6 +1373,14 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
+ return bo->tbo.offset;
+ }
+
++/**
++ * amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout
++ * @adev: amdgpu device object
++ * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
++ *
++ * Returns:
++ * Which of the allowed domains is preferred for pinning the BO for scanout.
++ */
+ uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
+ uint32_t domain)
+ {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4592-drm-scheduler-Avoid-using-wait_event_killable-for-dy.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4592-drm-scheduler-Avoid-using-wait_event_killable-for-dy.patch
new file mode 100644
index 00000000..8c2c49a2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4592-drm-scheduler-Avoid-using-wait_event_killable-for-dy.patch
@@ -0,0 +1,178 @@
+From 113558d1a3710de4b73789b286fed4e4859cab4c Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Wed, 30 May 2018 15:11:01 -0400
+Subject: [PATCH 4592/5725] drm/scheduler: Avoid using wait_event_killable for
+ dying process (V4)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Dying process might be blocked from receiving any more signals
+so avoid using it.
+
+Also retire enity->fini_status and just check the SW queue,
+if it's not empty do the fallback cleanup.
+
+Also handle entity->last_scheduled == NULL use case which
+happens when HW ring is already hangged whem a new entity
+tried to enqeue jobs.
+
+v2:
+Return the remaining timeout and use that as parameter for the next call.
+This way when we need to cleanup multiple queues we don't wait for the
+entire TO period for each queue but rather in total.
+Styling comments.
+Rebase.
+
+v3:
+Update types from unsigned to long.
+Work with jiffies instead of ms.
+Return 0 when TO expires.
+Rebase.
+
+v4:
+Remove unnecessary timeout calculation.
+
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/scheduler/gpu_scheduler.c
+
+Change-Id: I63143d46855d80439773f25e1a7cb3296eb755da
+---
+ drivers/gpu/drm/scheduler/gpu_scheduler.c | 67 ++++++++++++++++++++++---------
+ 1 file changed, 49 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+index a403e47..9eb6cca 100644
+--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
++++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+@@ -181,7 +181,6 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
+ entity->rq = rq;
+ entity->sched = sched;
+ entity->guilty = guilty;
+- entity->fini_status = 0;
+ entity->last_scheduled = NULL;
+
+ spin_lock_init(&entity->rq_lock);
+@@ -219,7 +218,8 @@ static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
+ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
+ {
+ rmb();
+- if (spsc_queue_peek(&entity->job_queue) == NULL)
++
++ if (!entity->rq || spsc_queue_peek(&entity->job_queue) == NULL)
+ return true;
+
+ return false;
+@@ -260,25 +260,39 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
+ *
+ * @sched: scheduler instance
+ * @entity: scheduler entity
++ * @timeout: time to wait in for Q to become empty in jiffies.
+ *
+ * Splitting drm_sched_entity_fini() into two functions, The first one does the waiting,
+ * removes the entity from the runqueue and returns an error when the process was killed.
++ *
++ * Returns the remaining time in jiffies left from the input timeout
+ */
+-void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
+- struct drm_sched_entity *entity)
++long drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
++ struct drm_sched_entity *entity, long timeout)
+ {
++ long ret = timeout;
++
+ if (!drm_sched_entity_is_initialized(sched, entity))
+- return;
++ return ret;
+ /**
+ * The client will not queue more IBs during this fini, consume existing
+ * queued IBs or discard them on SIGKILL
+ */
+- if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
+- entity->fini_status = -ERESTARTSYS;
+- else
+- entity->fini_status = wait_event_killable(sched->job_scheduled,
+- drm_sched_entity_is_idle(entity));
+- drm_sched_entity_set_rq(entity, NULL);
++ if (current->flags & PF_EXITING) {
++ if (timeout)
++ ret = wait_event_timeout(
++ sched->job_scheduled,
++ drm_sched_entity_is_idle(entity),
++ timeout);
++ } else
++ wait_event_killable(sched->job_scheduled, drm_sched_entity_is_idle(entity));
++
++
++ /* For killed process disable any more IBs enqueue right now */
++ if ((current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
++ drm_sched_entity_set_rq(entity, NULL);
++
++ return ret;
+ }
+ EXPORT_SYMBOL(drm_sched_entity_do_release);
+
+@@ -290,11 +304,18 @@ EXPORT_SYMBOL(drm_sched_entity_do_release);
+ *
+ * This should be called after @drm_sched_entity_do_release. It goes over the
+ * entity and signals all jobs with an error code if the process was killed.
++ *
+ */
+ void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity)
+ {
+- if (entity->fini_status) {
++
++ drm_sched_entity_set_rq(entity, NULL);
++
++ /* Consumption of existing IBs wasn't completed. Forcefully
++ * remove them here.
++ */
++ if (spsc_queue_peek(&entity->job_queue)) {
+ struct drm_sched_job *job;
+ int r;
+
+@@ -314,12 +335,22 @@ void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
+ struct drm_sched_fence *s_fence = job->s_fence;
+ drm_sched_fence_scheduled(s_fence);
+ dma_fence_set_error(&s_fence->finished, -ESRCH);
+- r = dma_fence_add_callback(entity->last_scheduled, &job->finish_cb,
+- drm_sched_entity_kill_jobs_cb);
+- if (r == -ENOENT)
++
++ /*
++ * When pipe is hanged by older entity, new entity might
++ * not even have chance to submit it's first job to HW
++ * and so entity->last_scheduled will remain NULL
++ */
++ if (!entity->last_scheduled) {
+ drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
+- else if (r)
+- DRM_ERROR("fence add callback failed (%d)\n", r);
++ } else {
++ r = dma_fence_add_callback(entity->last_scheduled, &job->finish_cb,
++ drm_sched_entity_kill_jobs_cb);
++ if (r == -ENOENT)
++ drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
++ else if (r)
++ DRM_ERROR("fence add callback failed (%d)\n", r);
++ }
+ }
+ }
+
+@@ -339,7 +370,7 @@ EXPORT_SYMBOL(drm_sched_entity_cleanup);
+ void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity)
+ {
+- drm_sched_entity_do_release(sched, entity);
++ drm_sched_entity_do_release(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
+ drm_sched_entity_cleanup(sched, entity);
+ }
+ EXPORT_SYMBOL(drm_sched_entity_fini);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4593-drm-amdgpu-move-amdgpu_ctx_mgr_entity_fini-to-f_ops-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4593-drm-amdgpu-move-amdgpu_ctx_mgr_entity_fini-to-f_ops-.patch
new file mode 100644
index 00000000..8c09aa16
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4593-drm-amdgpu-move-amdgpu_ctx_mgr_entity_fini-to-f_ops-.patch
@@ -0,0 +1,145 @@
+From 083acdc7a3158786d53e46a0e43ff7cf55135f88 Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Wed, 30 May 2018 15:28:52 -0400
+Subject: [PATCH 4593/5725] drm/amdgpu: move amdgpu_ctx_mgr_entity_fini to
+ f_ops flush hook (V4)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+With this we can now terminate jobs enqueue into SW queue the moment
+the task is being killed instead of waiting for last user of
+drm file to release it.
+
+Also stop checking for kref_read(&ctx->refcount) == 1 when
+calling drm_sched_entity_do_release since other task
+might still hold a reference to this entity but we don't
+care since KILL means terminate job submission regardless
+of what other tasks are doing.
+
+v2:
+Use returned remaining timeout as parameter for the next call.
+Rebase.
+
+v3:
+Switch to working with jiffies.
+Streamline remainder TO usage.
+Rebase.
+
+v4:
+Rebase.
+
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 14 ++++++++------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 12 ++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 1 -
+ include/drm/gpu_scheduler.h | 6 ++++--
+ 4 files changed, 24 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+index 58795d4..6f6fdab 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+@@ -465,26 +465,28 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
+ struct amdgpu_ctx *ctx;
+ struct idr *idp;
+ uint32_t id, i;
++ long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
+
+ idp = &mgr->ctx_handles;
+
++ mutex_lock(&mgr->lock);
+ idr_for_each_entry(idp, ctx, id) {
+
+- if (!ctx->adev)
++ if (!ctx->adev) {
++ mutex_unlock(&mgr->lock);
+ return;
++ }
+
+ for (i = 0; i < ctx->adev->num_rings; i++) {
+
+ if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
+ continue;
+
+- if (kref_read(&ctx->refcount) == 1)
+- drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
+- &ctx->rings[i].entity);
+- else
+- DRM_ERROR("ctx %p is still alive\n", ctx);
++ max_wait = drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
++ &ctx->rings[i].entity, max_wait);
+ }
+ }
++ mutex_unlock(&mgr->lock);
+ }
+
+ void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 1c0cf9a..96694c8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -861,9 +861,21 @@ static const struct dev_pm_ops amdgpu_pm_ops = {
+ .runtime_idle = amdgpu_pmops_runtime_idle,
+ };
+
++static int amdgpu_flush(struct file *f, fl_owner_t id)
++{
++ struct drm_file *file_priv = f->private_data;
++ struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
++
++ amdgpu_ctx_mgr_entity_fini(&fpriv->ctx_mgr);
++
++ return 0;
++}
++
++
+ static const struct file_operations amdgpu_driver_kms_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
++ .flush = amdgpu_flush,
+ .release = drm_release,
+ .unlocked_ioctl = amdgpu_drm_ioctl,
+ .mmap = amdgpu_mmap,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index 86087c1..a39919c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -964,7 +964,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
+ return;
+
+ pm_runtime_get_sync(dev->dev);
+- amdgpu_ctx_mgr_entity_fini(&fpriv->ctx_mgr);
+
+ if (adev->asic_type != CHIP_RAVEN) {
+ amdgpu_uvd_free_handles(adev, file_priv);
+diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
+index 86c7344..7ae23fb 100644
+--- a/include/drm/gpu_scheduler.h
++++ b/include/drm/gpu_scheduler.h
+@@ -27,6 +27,8 @@
+ #include <drm/spsc_queue.h>
+ #include <linux/dma-fence.h>
+
++#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
++
+ struct drm_gpu_scheduler;
+ struct drm_sched_rq;
+
+@@ -151,8 +153,8 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity,
+ struct drm_sched_rq *rq,
+ atomic_t *guilty);
+-void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
+- struct drm_sched_entity *entity);
++long drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
++ struct drm_sched_entity *entity, long timeout);
+ void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity);
+ void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4594-drm-amdgpu-fix-clear_all-and-replace-handling-in-the.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4594-drm-amdgpu-fix-clear_all-and-replace-handling-in-the.patch
new file mode 100644
index 00000000..54c8f34e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4594-drm-amdgpu-fix-clear_all-and-replace-handling-in-the.patch
@@ -0,0 +1,48 @@
+From a55ecf052163d381cd821f8d96cd011c8e7b0475 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <ckoenig.leichtzumerken@gmail.com>
+Date: Mon, 4 Jun 2018 12:59:52 +0200
+Subject: [PATCH 4594/5725] drm/amdgpu: fix clear_all and replace handling in
+ the VM (v2)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+v2: assign bo_va as well
+
+We need to put the lose ends on the invalid list because it is possible
+that we need to split up huge pages for them.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com> (v2)
+Reviewed-by: David Zhou <david1.zhou@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 1fcc586..4889137 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2153,7 +2153,8 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
+ before->last = saddr - 1;
+ before->offset = tmp->offset;
+ before->flags = tmp->flags;
+- list_add(&before->list, &tmp->list);
++ before->bo_va = tmp->bo_va;
++ list_add(&before->list, &tmp->bo_va->invalids);
+ }
+
+ /* Remember mapping split at the end */
+@@ -2163,7 +2164,8 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
+ after->offset = tmp->offset;
+ after->offset += after->start - tmp->start;
+ after->flags = tmp->flags;
+- list_add(&after->list, &tmp->list);
++ after->bo_va = tmp->bo_va;
++ list_add(&after->list, &tmp->bo_va->invalids);
+ }
+
+ list_del(&tmp->list);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4595-Revert-drm-amdgpu-fix-clear_all-and-replace-handling.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4595-Revert-drm-amdgpu-fix-clear_all-and-replace-handling.patch
new file mode 100644
index 00000000..28b0ac42
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4595-Revert-drm-amdgpu-fix-clear_all-and-replace-handling.patch
@@ -0,0 +1,44 @@
+From b67479ea6ec488ffadefa218c58bfa80131f120f Mon Sep 17 00:00:00 2001
+From: Junwei Zhang <Jerry.Zhang@amd.com>
+Date: Tue, 5 Jun 2018 17:02:21 +0800
+Subject: [PATCH 4595/5725] Revert "drm/amdgpu: fix clear_all and replace
+ handling in the VM (v2)"
+
+This reverts commit 6f422f9d0b42da8707fda42f789d6cf57056b444.
+
+Please ignore below patch, will re-send it with stable kernel tag
+
+ * 6f422f9 drm/amdgpu: fix clear_all and replace handling in the VM (v2)
+
+Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 4889137..1fcc586 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2153,8 +2153,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
+ before->last = saddr - 1;
+ before->offset = tmp->offset;
+ before->flags = tmp->flags;
+- before->bo_va = tmp->bo_va;
+- list_add(&before->list, &tmp->bo_va->invalids);
++ list_add(&before->list, &tmp->list);
+ }
+
+ /* Remember mapping split at the end */
+@@ -2164,8 +2163,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
+ after->offset = tmp->offset;
+ after->offset += after->start - tmp->start;
+ after->flags = tmp->flags;
+- after->bo_va = tmp->bo_va;
+- list_add(&after->list, &tmp->bo_va->invalids);
++ list_add(&after->list, &tmp->list);
+ }
+
+ list_del(&tmp->list);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4596-drm-amdgpu-fix-clear_all-and-replace-handling-in-the.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4596-drm-amdgpu-fix-clear_all-and-replace-handling-in-the.patch
new file mode 100644
index 00000000..06fb3aab
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4596-drm-amdgpu-fix-clear_all-and-replace-handling-in-the.patch
@@ -0,0 +1,49 @@
+From 3c8a31771e69f89e368a8cb13fe9c1bb4c4c627e Mon Sep 17 00:00:00 2001
+From: Junwei Zhang <Jerry.Zhang@amd.com>
+Date: Tue, 5 Jun 2018 17:31:51 +0800
+Subject: [PATCH 4596/5725] drm/amdgpu: fix clear_all and replace handling in
+ the VM (v2)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+v2: assign bo_va as well
+
+We need to put the lose ends on the invalid list because it is possible
+that we need to split up huge pages for them.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com> (v2)
+Reviewed-by: David Zhou <david1.zhou@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 1fcc586..4889137 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2153,7 +2153,8 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
+ before->last = saddr - 1;
+ before->offset = tmp->offset;
+ before->flags = tmp->flags;
+- list_add(&before->list, &tmp->list);
++ before->bo_va = tmp->bo_va;
++ list_add(&before->list, &tmp->bo_va->invalids);
+ }
+
+ /* Remember mapping split at the end */
+@@ -2163,7 +2164,8 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
+ after->offset = tmp->offset;
+ after->offset += after->start - tmp->start;
+ after->flags = tmp->flags;
+- list_add(&after->list, &tmp->list);
++ after->bo_va = tmp->bo_va;
++ list_add(&after->list, &tmp->bo_va->invalids);
+ }
+
+ list_del(&tmp->list);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4597-drm-amd-powerplay-fix-missed-hwmgr-check-warning-bef.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4597-drm-amd-powerplay-fix-missed-hwmgr-check-warning-bef.patch
new file mode 100644
index 00000000..79409fb2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4597-drm-amd-powerplay-fix-missed-hwmgr-check-warning-bef.patch
@@ -0,0 +1,55 @@
+From 49f677cba8f2e2f1988357b77b2adf06aed1098d Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Fri, 18 May 2018 10:39:16 +0800
+Subject: [PATCH 4597/5725] drm/amd/powerplay: fix missed hwmgr check warning
+ before call gfx_off_control handler
+
+Patch 9667849bbb8d: "drm/amd/powerplay: add control gfxoff enabling in late
+init" from Mar 13, 2018, leads to the following static checker warning:
+
+ drivers/gpu/drm/amd/amdgpu/../powerplay/amd_powerplay.c:194
+pp_late_init()
+ error: we previously assumed 'hwmgr' could be null (see line 185)
+
+drivers/gpu/drm/amd/amdgpu/../powerplay/amd_powerplay.c
+
+This patch fixes the warning to add hwmgr checking.
+
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index 76fc45f..f60de8e 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -180,6 +180,7 @@ static int pp_late_init(void *handle)
+ {
+ struct amdgpu_device *adev = handle;
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
++ int ret;
+
+ if (hwmgr && hwmgr->pm_en) {
+ mutex_lock(&hwmgr->smu_lock);
+@@ -191,6 +192,14 @@ static int pp_late_init(void *handle)
+ if (adev->pm.smu_prv_buffer_size != 0)
+ pp_reserve_vram_for_smu(adev);
+
++ if (hwmgr && hwmgr->hwmgr_func &&
++ hwmgr->hwmgr_func->gfx_off_control &&
++ (hwmgr->feature_mask & PP_GFXOFF_MASK)) {
++ ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr, true);
++ if (ret)
++ pr_err("gfx off enabling failed!\n");
++ }
++
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4598-drm-amdgpu-define-vcn-jpeg-ring.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4598-drm-amdgpu-define-vcn-jpeg-ring.patch
new file mode 100644
index 00000000..38f23752
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4598-drm-amdgpu-define-vcn-jpeg-ring.patch
@@ -0,0 +1,33 @@
+From 86461a48b4a1ffb197cee9830890cada16335fe1 Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Mon, 30 Apr 2018 16:35:34 -0400
+Subject: [PATCH 4598/5725] drm/amdgpu: define vcn jpeg ring
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add AMDGPU_RING_TYPE_VCN_JPEG ring define
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+index dce4b82..df20b61 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+@@ -52,7 +52,8 @@ enum amdgpu_ring_type {
+ AMDGPU_RING_TYPE_KIQ,
+ AMDGPU_RING_TYPE_UVD_ENC,
+ AMDGPU_RING_TYPE_VCN_DEC,
+- AMDGPU_RING_TYPE_VCN_ENC
++ AMDGPU_RING_TYPE_VCN_ENC,
++ AMDGPU_RING_TYPE_VCN_JPEG
+ };
+
+ struct amdgpu_device;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4599-drm-amdgpu-add-vcn-jpeg-ring.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4599-drm-amdgpu-add-vcn-jpeg-ring.patch
new file mode 100644
index 00000000..0af078f0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4599-drm-amdgpu-add-vcn-jpeg-ring.patch
@@ -0,0 +1,33 @@
+From 6cdfed7b466e6ab042b7a0343855dcf377acc4a1 Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Wed, 30 May 2018 14:13:33 -0400
+Subject: [PATCH 4599/5725] drm/amdgpu: add vcn jpeg ring
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add jpeg to amdgpu_vcn
+
+v2: remove unnecessary scheduler entity
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+index 773010b..6f3bed1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+@@ -66,6 +66,7 @@ struct amdgpu_vcn {
+ const struct firmware *fw; /* VCN firmware */
+ struct amdgpu_ring ring_dec;
+ struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
++ struct amdgpu_ring ring_jpeg;
+ struct amdgpu_irq_src irq;
+ unsigned num_enc_rings;
+ };
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4600-drm-amdgpu-add-jpeg-packet-defines-to-soc15d.h.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4600-drm-amdgpu-add-jpeg-packet-defines-to-soc15d.h.patch
new file mode 100644
index 00000000..ece7b699
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4600-drm-amdgpu-add-jpeg-packet-defines-to-soc15d.h.patch
@@ -0,0 +1,53 @@
+From 9d5925c4d690032072a58695f2d9c7c427c4a9b2 Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Mon, 30 Apr 2018 16:51:33 -0400
+Subject: [PATCH 4600/5725] drm/amdgpu: add jpeg packet defines to soc15d.h
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add new packet for vcn jpeg, including condition checks, types and packet
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15d.h | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
+index 8dc2910..edfe508 100755
+--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
++++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
+@@ -53,6 +53,29 @@
+
+ #define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
+
++#define PACKETJ_CONDITION_CHECK0 0
++#define PACKETJ_CONDITION_CHECK1 1
++#define PACKETJ_CONDITION_CHECK2 2
++#define PACKETJ_CONDITION_CHECK3 3
++#define PACKETJ_CONDITION_CHECK4 4
++#define PACKETJ_CONDITION_CHECK5 5
++#define PACKETJ_CONDITION_CHECK6 6
++#define PACKETJ_CONDITION_CHECK7 7
++
++#define PACKETJ_TYPE0 0
++#define PACKETJ_TYPE1 1
++#define PACKETJ_TYPE2 2
++#define PACKETJ_TYPE3 3
++#define PACKETJ_TYPE4 4
++#define PACKETJ_TYPE5 5
++#define PACKETJ_TYPE6 6
++#define PACKETJ_TYPE7 7
++
++#define PACKETJ(reg, r, cond, type) ((reg & 0x3FFFF) | \
++ ((r & 0x3F) << 18) | \
++ ((cond & 0xF) << 24) | \
++ ((type & 0xF) << 28))
++
+ /* Packet 3 types */
+ #define PACKET3_NOP 0x10
+ #define PACKET3_SET_BASE 0x11
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4601-drm-amdgpu-add-more-jpeg-register-offset-headers.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4601-drm-amdgpu-add-more-jpeg-register-offset-headers.patch
new file mode 100644
index 00000000..de218936
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4601-drm-amdgpu-add-more-jpeg-register-offset-headers.patch
@@ -0,0 +1,71 @@
+From 250d38b4e02a461e04fbda1558bb8ff8cc6691df Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Mon, 30 Apr 2018 16:55:39 -0400
+Subject: [PATCH 4601/5725] drm/amdgpu: add more jpeg register offset headers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add more jpeg registers defines that are needed for jpeg ring functions
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ .../drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h | 20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
+index 18a3247..fe0cbaa 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
+@@ -89,6 +89,8 @@
+ #define mmUVD_JPEG_RB_SIZE_BASE_IDX 1
+ #define mmUVD_JPEG_ADDR_CONFIG 0x021f
+ #define mmUVD_JPEG_ADDR_CONFIG_BASE_IDX 1
++#define mmUVD_JPEG_PITCH 0x0222
++#define mmUVD_JPEG_PITCH_BASE_IDX 1
+ #define mmUVD_JPEG_GPCOM_CMD 0x022c
+ #define mmUVD_JPEG_GPCOM_CMD_BASE_IDX 1
+ #define mmUVD_JPEG_GPCOM_DATA0 0x022d
+@@ -203,6 +205,8 @@
+ #define mmUVD_RB_WPTR4_BASE_IDX 1
+ #define mmUVD_JRBC_RB_RPTR 0x0457
+ #define mmUVD_JRBC_RB_RPTR_BASE_IDX 1
++#define mmUVD_LMI_JPEG_VMID 0x045d
++#define mmUVD_LMI_JPEG_VMID_BASE_IDX 1
+ #define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH 0x045e
+ #define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH_BASE_IDX 1
+ #define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW 0x045f
+@@ -231,6 +235,8 @@
+ #define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX 1
+ #define mmUVD_LMI_JRBC_IB_VMID 0x0507
+ #define mmUVD_LMI_JRBC_IB_VMID_BASE_IDX 1
++#define mmUVD_LMI_JRBC_RB_VMID 0x0508
++#define mmUVD_LMI_JRBC_RB_VMID_BASE_IDX 1
+ #define mmUVD_JRBC_RB_WPTR 0x0509
+ #define mmUVD_JRBC_RB_WPTR_BASE_IDX 1
+ #define mmUVD_JRBC_RB_CNTL 0x050a
+@@ -239,6 +245,20 @@
+ #define mmUVD_JRBC_IB_SIZE_BASE_IDX 1
+ #define mmUVD_JRBC_LMI_SWAP_CNTL 0x050d
+ #define mmUVD_JRBC_LMI_SWAP_CNTL_BASE_IDX 1
++#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW 0x050e
++#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 1
++#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x050f
++#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 1
++#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW 0x0510
++#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_BASE_IDX 1
++#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH 0x0511
++#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_BASE_IDX 1
++#define mmUVD_JRBC_RB_REF_DATA 0x0512
++#define mmUVD_JRBC_RB_REF_DATA_BASE_IDX 1
++#define mmUVD_JRBC_RB_COND_RD_TIMER 0x0513
++#define mmUVD_JRBC_RB_COND_RD_TIMER_BASE_IDX 1
++#define mmUVD_JRBC_EXTERNAL_REG_BASE 0x0517
++#define mmUVD_JRBC_EXTERNAL_REG_BASE_BASE_IDX 1
+ #define mmUVD_JRBC_SOFT_RESET 0x0519
+ #define mmUVD_JRBC_SOFT_RESET_BASE_IDX 1
+ #define mmUVD_JRBC_STATUS 0x051a
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4602-drm-amdgpu-implement-jpeg-ring-functions.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4602-drm-amdgpu-implement-jpeg-ring-functions.patch
new file mode 100644
index 00000000..8f430c3c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4602-drm-amdgpu-implement-jpeg-ring-functions.patch
@@ -0,0 +1,318 @@
+From c1807bc9cd2593bcd7ea4ceb4617c8b474257d8b Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Wed, 30 May 2018 14:39:07 -0400
+Subject: [PATCH 4602/5725] drm/amdgpu: implement jpeg ring functions
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Implement all ring functions needed for jpeg ring
+
+v2: remove unnecessary mem read function.
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 286 ++++++++++++++++++++++++++++++++++
+ 1 file changed, 286 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 7756a93..32bd4c6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -1163,6 +1163,292 @@ static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
+ amdgpu_ring_write(ring, val);
+ }
+
++
++/**
++ * vcn_v1_0_jpeg_ring_get_rptr - get read pointer
++ *
++ * @ring: amdgpu_ring pointer
++ *
++ * Returns the current hardware read pointer
++ */
++static uint64_t vcn_v1_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
++{
++ struct amdgpu_device *adev = ring->adev;
++
++ return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR);
++}
++
++/**
++ * vcn_v1_0_jpeg_ring_get_wptr - get write pointer
++ *
++ * @ring: amdgpu_ring pointer
++ *
++ * Returns the current hardware write pointer
++ */
++static uint64_t vcn_v1_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
++{
++ struct amdgpu_device *adev = ring->adev;
++
++ return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
++}
++
++/**
++ * vcn_v1_0_jpeg_ring_set_wptr - set write pointer
++ *
++ * @ring: amdgpu_ring pointer
++ *
++ * Commits the write pointer to the hardware
++ */
++static void vcn_v1_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
++{
++ struct amdgpu_device *adev = ring->adev;
++
++ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
++}
++
++/**
++ * vcn_v1_0_jpeg_ring_insert_start - insert a start command
++ *
++ * @ring: amdgpu_ring pointer
++ *
++ * Write a start command to the ring.
++ */
++static void vcn_v1_0_jpeg_ring_insert_start(struct amdgpu_ring *ring)
++{
++ struct amdgpu_device *adev = ring->adev;
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
++ amdgpu_ring_write(ring, 0x68e04);
++
++ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
++ amdgpu_ring_write(ring, 0x80010000);
++}
++
++/**
++ * vcn_v1_0_jpeg_ring_insert_end - insert a end command
++ *
++ * @ring: amdgpu_ring pointer
++ *
++ * Write a end command to the ring.
++ */
++static void vcn_v1_0_jpeg_ring_insert_end(struct amdgpu_ring *ring)
++{
++ struct amdgpu_device *adev = ring->adev;
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
++ amdgpu_ring_write(ring, 0x68e04);
++
++ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
++ amdgpu_ring_write(ring, 0x00010000);
++}
++
++/**
++ * vcn_v1_0_jpeg_ring_emit_fence - emit an fence & trap command
++ *
++ * @ring: amdgpu_ring pointer
++ * @fence: fence to emit
++ *
++ * Write a fence and a trap command to the ring.
++ */
++static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
++ unsigned flags)
++{
++ struct amdgpu_device *adev = ring->adev;
++
++ WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0));
++ amdgpu_ring_write(ring, seq);
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0));
++ amdgpu_ring_write(ring, seq);
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
++ amdgpu_ring_write(ring, lower_32_bits(addr));
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
++ amdgpu_ring_write(ring, upper_32_bits(addr));
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0));
++ amdgpu_ring_write(ring, 0x8);
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
++ amdgpu_ring_write(ring, 0);
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
++ amdgpu_ring_write(ring, 0x01400200);
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
++ amdgpu_ring_write(ring, seq);
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
++ amdgpu_ring_write(ring, lower_32_bits(addr));
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
++ amdgpu_ring_write(ring, upper_32_bits(addr));
++
++ amdgpu_ring_write(ring,
++ PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2));
++ amdgpu_ring_write(ring, 0xffffffff);
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
++ amdgpu_ring_write(ring, 0x3fbc);
++
++ amdgpu_ring_write(ring,
++ PACKETJ(0, 0, 0, PACKETJ_TYPE0));
++ amdgpu_ring_write(ring, 0x1);
++}
++
++/**
++ * vcn_v1_0_jpeg_ring_emit_ib - execute indirect buffer
++ *
++ * @ring: amdgpu_ring pointer
++ * @ib: indirect buffer to execute
++ *
++ * Write ring commands to execute the indirect buffer.
++ */
++static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
++ struct amdgpu_ib *ib,
++ unsigned vmid, bool ctx_switch)
++{
++ struct amdgpu_device *adev = ring->adev;
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
++ amdgpu_ring_write(ring, (vmid | (vmid << 4)));
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0));
++ amdgpu_ring_write(ring, (vmid | (vmid << 4)));
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
++ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
++ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0));
++ amdgpu_ring_write(ring, ib->length_dw);
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
++ amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
++ amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
++
++ amdgpu_ring_write(ring,
++ PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
++ amdgpu_ring_write(ring, 0);
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
++ amdgpu_ring_write(ring, 0x01400200);
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
++ amdgpu_ring_write(ring, 0x2);
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
++ amdgpu_ring_write(ring, 0x2);
++}
++
++static void vcn_v1_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring,
++ uint32_t reg, uint32_t val,
++ uint32_t mask)
++{
++ struct amdgpu_device *adev = ring->adev;
++ uint32_t reg_offset = (reg << 2);
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
++ amdgpu_ring_write(ring, 0x01400200);
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
++ amdgpu_ring_write(ring, val);
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
++ if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
++ ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
++ amdgpu_ring_write(ring, 0);
++ amdgpu_ring_write(ring,
++ PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
++ } else {
++ amdgpu_ring_write(ring, reg_offset);
++ amdgpu_ring_write(ring,
++ PACKETJ(0, 0, 0, PACKETJ_TYPE3));
++ }
++ amdgpu_ring_write(ring, mask);
++}
++
++static void vcn_v1_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
++ unsigned vmid, uint64_t pd_addr)
++{
++ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
++ uint32_t data0, data1, mask;
++
++ pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
++
++ /* wait for register write */
++ data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
++ data1 = lower_32_bits(pd_addr);
++ mask = 0xffffffff;
++ vcn_v1_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask);
++}
++
++static void vcn_v1_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring,
++ uint32_t reg, uint32_t val)
++{
++ struct amdgpu_device *adev = ring->adev;
++ uint32_t reg_offset = (reg << 2);
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
++ if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
++ ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
++ amdgpu_ring_write(ring, 0);
++ amdgpu_ring_write(ring,
++ PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
++ } else {
++ amdgpu_ring_write(ring, reg_offset);
++ amdgpu_ring_write(ring,
++ PACKETJ(0, 0, 0, PACKETJ_TYPE0));
++ }
++ amdgpu_ring_write(ring, val);
++}
++
++static void vcn_v1_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count)
++{
++ int i;
++
++ WARN_ON(ring->wptr % 2 || count % 2);
++
++ for (i = 0; i < count / 2; i++) {
++ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
++ amdgpu_ring_write(ring, 0);
++ }
++}
++
+ static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4603-drm-amdgpu-set-jpeg-ring-functions.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4603-drm-amdgpu-set-jpeg-ring-functions.patch
new file mode 100644
index 00000000..0db2e4ef
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4603-drm-amdgpu-set-jpeg-ring-functions.patch
@@ -0,0 +1,91 @@
+From bad91ddc41b9de711eca71ae5881886bf0f3965c Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Wed, 30 May 2018 14:23:33 -0400
+Subject: [PATCH 4603/5725] drm/amdgpu: set jpeg ring functions
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Set all vcn jpeg ring function pointers.
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 40 +++++++++++++++++++++++++++++++++++
+ 1 file changed, 40 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 32bd4c6..318c9fc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -38,6 +38,7 @@
+ static int vcn_v1_0_stop(struct amdgpu_device *adev);
+ static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
+ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
++static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
+ static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
+
+ /**
+@@ -55,6 +56,7 @@ static int vcn_v1_0_early_init(void *handle)
+
+ vcn_v1_0_set_dec_ring_funcs(adev);
+ vcn_v1_0_set_enc_ring_funcs(adev);
++ vcn_v1_0_set_jpeg_ring_funcs(adev);
+ vcn_v1_0_set_irq_funcs(adev);
+
+ return 0;
+@@ -1597,6 +1599,38 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ };
+
++static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = {
++ .type = AMDGPU_RING_TYPE_VCN_JPEG,
++ .align_mask = 0xf,
++ .nop = PACKET0(0x81ff, 0),
++ .support_64bit_ptrs = false,
++ .vmhub = AMDGPU_MMHUB,
++ .get_rptr = vcn_v1_0_jpeg_ring_get_rptr,
++ .get_wptr = vcn_v1_0_jpeg_ring_get_wptr,
++ .set_wptr = vcn_v1_0_jpeg_ring_set_wptr,
++ .emit_frame_size =
++ 6 + 6 + /* hdp invalidate / flush */
++ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
++ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
++ 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
++ 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
++ 6,
++ .emit_ib_size = 22, /* vcn_v1_0_dec_ring_emit_ib */
++ .emit_ib = vcn_v1_0_jpeg_ring_emit_ib,
++ .emit_fence = vcn_v1_0_jpeg_ring_emit_fence,
++ .emit_vm_flush = vcn_v1_0_jpeg_ring_emit_vm_flush,
++ //.test_ring
++ //.test_ib
++ .insert_nop = vcn_v1_0_jpeg_ring_nop,
++ .insert_start = vcn_v1_0_jpeg_ring_insert_start,
++ .insert_end = vcn_v1_0_jpeg_ring_insert_end,
++ .pad_ib = amdgpu_ring_generic_pad_ib,
++ .begin_use = amdgpu_vcn_ring_begin_use,
++ .end_use = amdgpu_vcn_ring_end_use,
++ .emit_wreg = vcn_v1_0_jpeg_ring_emit_wreg,
++ .emit_reg_wait = vcn_v1_0_jpeg_ring_emit_reg_wait,
++};
++
+ static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
+ {
+ adev->vcn.ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
+@@ -1613,6 +1647,12 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
+ DRM_INFO("VCN encode is enabled in VM mode\n");
+ }
+
++static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
++{
++ adev->vcn.ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs;
++ DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
++}
++
+ static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
+ .set = vcn_v1_0_set_interrupt_state,
+ .process = vcn_v1_0_process_interrupt,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4604-drm-amdgpu-add-vcn-jpeg-irq-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4604-drm-amdgpu-add-vcn-jpeg-irq-support.patch
new file mode 100644
index 00000000..52bfedfc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4604-drm-amdgpu-add-vcn-jpeg-irq-support.patch
@@ -0,0 +1,45 @@
+From 94d90b40394ec6814377f18589696e779ee131f6 Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Wed, 30 May 2018 14:42:33 -0400
+Subject: [PATCH 4604/5725] drm/amdgpu: add vcn jpeg irq support
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add vcn jpeg irq support.
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 318c9fc..4426393 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -88,6 +88,11 @@ static int vcn_v1_0_sw_init(void *handle)
+ return r;
+ }
+
++ /* VCN JPEG TRAP */
++ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.irq);
++ if (r)
++ return r;
++
+ r = amdgpu_vcn_sw_init(adev);
+ if (r)
+ return r;
+@@ -1475,6 +1480,9 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
+ case 120:
+ amdgpu_fence_process(&adev->vcn.ring_enc[1]);
+ break;
++ case 126:
++ amdgpu_fence_process(&adev->vcn.ring_jpeg);
++ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4605-drm-amdgpu-initialize-vcn-jpeg-ring.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4605-drm-amdgpu-initialize-vcn-jpeg-ring.patch
new file mode 100644
index 00000000..40e70996
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4605-drm-amdgpu-initialize-vcn-jpeg-ring.patch
@@ -0,0 +1,67 @@
+From d8cafcaa0e23ee60b242edbd2e75a2f77d0ba35b Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Wed, 30 May 2018 14:47:39 -0400
+Subject: [PATCH 4605/5725] drm/amdgpu: initialize vcn jpeg ring
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add implementations for vcn jpeg ring initialization
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 4426393..9ded8ef 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -125,6 +125,12 @@ static int vcn_v1_0_sw_init(void *handle)
+ return r;
+ }
+
++ ring = &adev->vcn.ring_jpeg;
++ sprintf(ring->name, "vcn_jpeg");
++ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
++ if (r)
++ return r;
++
+ return r;
+ }
+
+@@ -179,6 +185,14 @@ static int vcn_v1_0_hw_init(void *handle)
+ }
+ }
+
++ ring = &adev->vcn.ring_jpeg;
++ ring->ready = true;
++ r = amdgpu_ring_test_ring(ring);
++ if (r) {
++ ring->ready = false;
++ goto done;
++ }
++
+ done:
+ if (!r)
+ DRM_INFO("VCN decode and encode initialized successfully.\n");
+@@ -758,6 +772,15 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
+ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
+
++ ring = &adev->vcn.ring_jpeg;
++ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
++ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
++ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr));
++ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr));
++ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0);
++ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0);
++ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
++
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4606-drm-amdgpu-implement-patch-for-fixing-a-known-bug.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4606-drm-amdgpu-implement-patch-for-fixing-a-known-bug.patch
new file mode 100644
index 00000000..90335bc9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4606-drm-amdgpu-implement-patch-for-fixing-a-known-bug.patch
@@ -0,0 +1,134 @@
+From f9dbee6ad7ff29c4dfd78657fb238d1e8b8a05cd Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Wed, 30 May 2018 14:57:16 -0400
+Subject: [PATCH 4606/5725] drm/amdgpu: implement patch for fixing a known bug
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Implement a patch to maunally reset read pointer
+
+v2: using ring assignment instead of amdgpu_ring_write. adding comments
+for each steps in the patch function.
+v3: fixing a typo bug.
+v4: fixing a bug in v3.
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 92 +++++++++++++++++++++++++++++++++++
+ 1 file changed, 92 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 9ded8ef..5adb2d9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -40,6 +40,7 @@ static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
+ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
+ static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
+ static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
++static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
+
+ /**
+ * vcn_v1_0_early_init - set function pointers
+@@ -1479,6 +1480,97 @@ static void vcn_v1_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count)
+ }
+ }
+
++static void vcn_v1_0_jpeg_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
++{
++ struct amdgpu_device *adev = ring->adev;
++ ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
++ if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
++ ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
++ ring->ring[(*ptr)++] = 0;
++ ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0);
++ } else {
++ ring->ring[(*ptr)++] = reg_offset;
++ ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0);
++ }
++ ring->ring[(*ptr)++] = val;
++}
++
++static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr)
++{
++ struct amdgpu_device *adev = ring->adev;
++
++ uint32_t reg, reg_offset, val, mask, i;
++
++ // 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW
++ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW);
++ reg_offset = (reg << 2);
++ val = lower_32_bits(ring->gpu_addr);
++ vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
++
++ // 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH
++ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH);
++ reg_offset = (reg << 2);
++ val = upper_32_bits(ring->gpu_addr);
++ vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
++
++ // 3rd to 5th: issue MEM_READ commands
++ for (i = 0; i <= 2; i++) {
++ ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2);
++ ring->ring[ptr++] = 0;
++ }
++
++ // 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability
++ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
++ reg_offset = (reg << 2);
++ val = 0x13;
++ vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
++
++ // 7th: program mmUVD_JRBC_RB_REF_DATA
++ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA);
++ reg_offset = (reg << 2);
++ val = 0x1;
++ vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
++
++ // 8th: issue conditional register read mmUVD_JRBC_RB_CNTL
++ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
++ reg_offset = (reg << 2);
++ val = 0x1;
++ mask = 0x1;
++
++ ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0);
++ ring->ring[ptr++] = 0x01400200;
++ ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0);
++ ring->ring[ptr++] = val;
++ ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
++ if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
++ ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
++ ring->ring[ptr++] = 0;
++ ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3);
++ } else {
++ ring->ring[ptr++] = reg_offset;
++ ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3);
++ }
++ ring->ring[ptr++] = mask;
++
++ //9th to 21st: insert no-op
++ for (i = 0; i <= 12; i++) {
++ ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
++ ring->ring[ptr++] = 0;
++ }
++
++ //22nd: reset mmUVD_JRBC_RB_RPTR
++ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_RPTR);
++ reg_offset = (reg << 2);
++ val = 0;
++ vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
++
++ //23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch
++ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
++ reg_offset = (reg << 2);
++ val = 0x12;
++ vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
++}
++
+ static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4607-drm-amdgpu-define-and-add-extra-dword-for-jpeg-ring.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4607-drm-amdgpu-define-and-add-extra-dword-for-jpeg-ring.patch
new file mode 100644
index 00000000..8a61f26e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4607-drm-amdgpu-define-and-add-extra-dword-for-jpeg-ring.patch
@@ -0,0 +1,62 @@
+From 874f00cca5d3af57181b4cb6cd41dc107a3d1531 Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Fri, 1 Jun 2018 12:30:17 -0400
+Subject: [PATCH 4607/5725] drm/amdgpu: define and add extra dword for jpeg
+ ring
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Define extra dword for jpeg ring. Jpeg ring will allocate extra dword to store
+the patch commands for fixing the known issue.
+
+v2: dropping extra_dw for rings other than jpeg.
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 1 +
+ 3 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+index c6850b6..19e45a3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+@@ -304,7 +304,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
+ 0xffffffffffffffff : ring->buf_mask;
+ /* Allocate ring buffer */
+ if (ring->ring_obj == NULL) {
+- r = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
++ r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &ring->ring_obj,
+ &ring->gpu_addr,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+index df20b61..1c7ffc1 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+@@ -112,6 +112,7 @@ struct amdgpu_ring_funcs {
+ u32 nop;
+ bool support_64bit_ptrs;
+ unsigned vmhub;
++ unsigned extra_dw;
+
+ /* ring read/write ptr handling */
+ u64 (*get_rptr)(struct amdgpu_ring *ring);
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 5adb2d9..24a869f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -1728,6 +1728,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = {
+ .nop = PACKET0(0x81ff, 0),
+ .support_64bit_ptrs = false,
+ .vmhub = AMDGPU_MMHUB,
++ .extra_dw = 64,
+ .get_rptr = vcn_v1_0_jpeg_ring_get_rptr,
+ .get_wptr = vcn_v1_0_jpeg_ring_get_wptr,
+ .set_wptr = vcn_v1_0_jpeg_ring_set_wptr,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4608-drm-amdgpu-add-patch-to-jpeg-ring.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4608-drm-amdgpu-add-patch-to-jpeg-ring.patch
new file mode 100644
index 00000000..39f9e99a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4608-drm-amdgpu-add-patch-to-jpeg-ring.patch
@@ -0,0 +1,40 @@
+From 9c0b1624cf606ed507741592a2be3f1eb14b6036 Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Wed, 30 May 2018 15:19:52 -0400
+Subject: [PATCH 4608/5725] drm/amdgpu: add patch to jpeg ring
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add patch commands to jepg ring by calling set patch ring function.
+
+v2: remove modifications on max_dw, buf_mask and ptr_mask, since we are
+now using extra_dw for jpeg ring.
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 24a869f..a7a166f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -782,6 +782,13 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0);
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
+
++ /* initialize wptr */
++ ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
++
++ /* copy patch commands to the jpeg ring */
++ vcn_v1_0_jpeg_ring_set_patch_ring(ring,
++ (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
++
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4609-drm-amdgpu-add-vcn-jpeg-sw-finish.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4609-drm-amdgpu-add-vcn-jpeg-sw-finish.patch
new file mode 100644
index 00000000..3df6a71b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4609-drm-amdgpu-add-vcn-jpeg-sw-finish.patch
@@ -0,0 +1,34 @@
+From 9e029ced1170699f32db4292d770d62a14642ad4 Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Wed, 30 May 2018 15:32:16 -0400
+Subject: [PATCH 4609/5725] drm/amdgpu: add vcn jpeg sw finish
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add software finish for vcn jpeg ring.
+
+v2: remove unnecessary scheduler entity.
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 8f2785a..50e2315 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -120,6 +120,8 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+ amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
+
++ amdgpu_ring_fini(&adev->vcn.ring_jpeg);
++
+ release_firmware(adev->vcn.fw);
+
+ return 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4610-drm-amdgpu-add-vcn-jpeg-ring-test.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4610-drm-amdgpu-add-vcn-jpeg-ring-test.patch
new file mode 100644
index 00000000..202e3796
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4610-drm-amdgpu-add-vcn-jpeg-ring-test.patch
@@ -0,0 +1,93 @@
+From 9fdac1484e5147fd9cf5d584f962170e91933b44 Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Wed, 30 May 2018 15:49:51 -0400
+Subject: [PATCH 4610/5725] drm/amdgpu: add vcn jpeg ring test
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add a ring test for vcn jpeg.
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 40 +++++++++++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 2 ++
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 2 +-
+ 3 files changed, 43 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 50e2315..5bacf80 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -583,3 +583,43 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ dma_fence_put(fence);
+ return r;
+ }
++
++int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
++{
++ struct amdgpu_device *adev = ring->adev;
++ uint32_t tmp = 0;
++ unsigned i;
++ int r;
++
++ WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
++ r = amdgpu_ring_alloc(ring, 3);
++
++ if (r) {
++ DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
++ ring->idx, r);
++ return r;
++ }
++
++ amdgpu_ring_write(ring,
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0, 0, 0));
++ amdgpu_ring_write(ring, 0xDEADBEEF);
++ amdgpu_ring_commit(ring);
++
++ for (i = 0; i < adev->usec_timeout; i++) {
++ tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
++ if (tmp == 0xDEADBEEF)
++ break;
++ DRM_UDELAY(1);
++ }
++
++ if (i < adev->usec_timeout) {
++ DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
++ ring->idx, i);
++ } else {
++ DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
++ ring->idx, tmp);
++ r = -EINVAL;
++ }
++
++ return r;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+index 6f3bed1..0447fae 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+@@ -84,4 +84,6 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring);
+ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+
++int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring);
++
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index a7a166f..3d58236 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -1750,7 +1750,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = {
+ .emit_ib = vcn_v1_0_jpeg_ring_emit_ib,
+ .emit_fence = vcn_v1_0_jpeg_ring_emit_fence,
+ .emit_vm_flush = vcn_v1_0_jpeg_ring_emit_vm_flush,
+- //.test_ring
++ .test_ring = amdgpu_vcn_jpeg_ring_test_ring,
+ //.test_ib
+ .insert_nop = vcn_v1_0_jpeg_ring_nop,
+ .insert_start = vcn_v1_0_jpeg_ring_insert_start,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4611-drm-amdgpu-add-vcn-jpeg-ib-test.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4611-drm-amdgpu-add-vcn-jpeg-ib-test.patch
new file mode 100644
index 00000000..18e86cd7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4611-drm-amdgpu-add-vcn-jpeg-ib-test.patch
@@ -0,0 +1,139 @@
+From bd4990c746a7060fab8bdf177fa942b6796a1685 Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Wed, 30 May 2018 15:56:43 -0400
+Subject: [PATCH 4611/5725] drm/amdgpu: add vcn jpeg ib test
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add an ib test for vcn jpeg.
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 86 +++++++++++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 2 +-
+ 3 files changed, 88 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 5bacf80..7ea85c9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -623,3 +623,89 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
+
+ return r;
+ }
++
++static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
++ struct dma_fence **fence)
++{
++ struct amdgpu_device *adev = ring->adev;
++ struct amdgpu_job *job;
++ struct amdgpu_ib *ib;
++ struct dma_fence *f = NULL;
++ const unsigned ib_size_dw = 16;
++ int i, r;
++
++ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
++ if (r)
++ return r;
++
++ ib = &job->ibs[0];
++
++ ib->ptr[0] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH), 0, 0, PACKETJ_TYPE0);
++ ib->ptr[1] = 0xDEADBEEF;
++ for (i = 2; i < 16; i += 2) {
++ ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
++ ib->ptr[i+1] = 0;
++ }
++ ib->length_dw = 16;
++
++ r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
++ job->fence = dma_fence_get(f);
++ if (r)
++ goto err;
++
++ amdgpu_job_free(job);
++ if (fence)
++ *fence = dma_fence_get(f);
++ dma_fence_put(f);
++
++ return 0;
++
++err:
++ amdgpu_job_free(job);
++ return r;
++}
++
++int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
++{
++ struct amdgpu_device *adev = ring->adev;
++ uint32_t tmp = 0;
++ unsigned i;
++ struct dma_fence *fence = NULL;
++ long r = 0;
++
++ r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
++ if (r) {
++ DRM_ERROR("amdgpu: failed to set jpeg register (%ld).\n", r);
++ goto error;
++ }
++
++ r = dma_fence_wait_timeout(fence, false, timeout);
++ if (r == 0) {
++ DRM_ERROR("amdgpu: IB test timed out.\n");
++ r = -ETIMEDOUT;
++ goto error;
++ } else if (r < 0) {
++ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
++ goto error;
++ } else
++ r = 0;
++
++ for (i = 0; i < adev->usec_timeout; i++) {
++ tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH));
++ if (tmp == 0xDEADBEEF)
++ break;
++ DRM_UDELAY(1);
++ }
++
++ if (i < adev->usec_timeout)
++ DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
++ else {
++ DRM_ERROR("ib test failed (0x%08X)\n", tmp);
++ r = -EINVAL;
++ }
++
++ dma_fence_put(fence);
++
++error:
++ return r;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+index 0447fae..0b0b863 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+@@ -85,5 +85,6 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring);
+ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+
+ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring);
++int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 3d58236..d0c428f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -1751,7 +1751,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = {
+ .emit_fence = vcn_v1_0_jpeg_ring_emit_fence,
+ .emit_vm_flush = vcn_v1_0_jpeg_ring_emit_vm_flush,
+ .test_ring = amdgpu_vcn_jpeg_ring_test_ring,
+- //.test_ib
++ .test_ib = amdgpu_vcn_jpeg_ring_test_ib,
+ .insert_nop = vcn_v1_0_jpeg_ring_nop,
+ .insert_start = vcn_v1_0_jpeg_ring_insert_start,
+ .insert_end = vcn_v1_0_jpeg_ring_insert_end,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4612-drm-amdgpu-enable-vcn-jpeg-ib-test.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4612-drm-amdgpu-enable-vcn-jpeg-ib-test.patch
new file mode 100644
index 00000000..561c7c96
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4612-drm-amdgpu-enable-vcn-jpeg-ib-test.patch
@@ -0,0 +1,33 @@
+From b08df78526f05f3cfb3ed9e4901e40b114deb0b8 Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Tue, 1 May 2018 14:40:24 -0400
+Subject: [PATCH 4612/5725] drm/amdgpu: enable vcn jpeg ib test
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Enable vcn jpeg ib ring test in amdgpu_ib.c
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+index 3f7afcf..5a21f9c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+@@ -360,7 +360,8 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
+ ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
+ ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
+- ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
++ ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
++ ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
+ tmo = tmo_mm;
+ else
+ tmo = tmo_gfx;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4613-drm-amdgpu-add-AMDGPU_HW_IP_VCN_JPEG-to-info-query.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4613-drm-amdgpu-add-AMDGPU_HW_IP_VCN_JPEG-to-info-query.patch
new file mode 100644
index 00000000..8cdb6e7b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4613-drm-amdgpu-add-AMDGPU_HW_IP_VCN_JPEG-to-info-query.patch
@@ -0,0 +1,60 @@
+From cd9a83bbc761e3a67d7f3daebd616286cdc89ec9 Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Tue, 1 May 2018 14:58:25 -0400
+Subject: [PATCH 4613/5725] drm/amdgpu: add AMDGPU_HW_IP_VCN_JPEG to info query
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add AMDGPU_HW_IP_VCN_JPEG to info query
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 7 +++++++
+ include/uapi/drm/amdgpu_drm.h | 3 ++-
+ 2 files changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index a39919c..a1a53c6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -397,6 +397,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+ ib_size_alignment = 1;
+ break;
++ case AMDGPU_HW_IP_VCN_JPEG:
++ type = AMD_IP_BLOCK_TYPE_VCN;
++ ring_mask = adev->vcn.ring_jpeg.ready ? 1 : 0;
++ ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
++ ib_size_alignment = 16;
++ break;
+ default:
+ return -EINVAL;
+ }
+@@ -441,6 +447,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ break;
+ case AMDGPU_HW_IP_VCN_DEC:
+ case AMDGPU_HW_IP_VCN_ENC:
++ case AMDGPU_HW_IP_VCN_JPEG:
+ type = AMD_IP_BLOCK_TYPE_VCN;
+ break;
+ default:
+diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
+index ad3e1f2..f12f57a 100644
+--- a/include/uapi/drm/amdgpu_drm.h
++++ b/include/uapi/drm/amdgpu_drm.h
+@@ -550,7 +550,8 @@ struct drm_amdgpu_gem_va {
+ #define AMDGPU_HW_IP_UVD_ENC 5
+ #define AMDGPU_HW_IP_VCN_DEC 6
+ #define AMDGPU_HW_IP_VCN_ENC 7
+-#define AMDGPU_HW_IP_NUM 8
++#define AMDGPU_HW_IP_VCN_JPEG 8
++#define AMDGPU_HW_IP_NUM 9
+
+ #define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4614-drm-amdgpu-add-AMDGPU_HW_IP_VCN_JPEG-to-queue-mgr.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4614-drm-amdgpu-add-AMDGPU_HW_IP_VCN_JPEG-to-queue-mgr.patch
new file mode 100644
index 00000000..460c94a6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4614-drm-amdgpu-add-AMDGPU_HW_IP_VCN_JPEG-to-queue-mgr.patch
@@ -0,0 +1,51 @@
+From 2054d90f7d920d8c41189da351cc95806ee80b09 Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Tue, 1 May 2018 14:59:12 -0400
+Subject: [PATCH 4614/5725] drm/amdgpu: add AMDGPU_HW_IP_VCN_JPEG to queue mgr
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add AMDGPU_HW_IP_VCN_JPEG to queue mgr
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+index 8af16e8..ea9850c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+@@ -96,6 +96,9 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
+ case AMDGPU_HW_IP_VCN_ENC:
+ *out_ring = &adev->vcn.ring_enc[ring];
+ break;
++ case AMDGPU_HW_IP_VCN_JPEG:
++ *out_ring = &adev->vcn.ring_jpeg;
++ break;
+ default:
+ *out_ring = NULL;
+ DRM_ERROR("unknown HW IP type: %d\n", mapper->hw_ip);
+@@ -260,6 +263,9 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
+ case AMDGPU_HW_IP_VCN_ENC:
+ ip_num_rings = adev->vcn.num_enc_rings;
+ break;
++ case AMDGPU_HW_IP_VCN_JPEG:
++ ip_num_rings = 1;
++ break;
+ default:
+ DRM_DEBUG("unknown ip type: %d\n", hw_ip);
+ return -EINVAL;
+@@ -287,6 +293,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
+ case AMDGPU_HW_IP_UVD_ENC:
+ case AMDGPU_HW_IP_VCN_DEC:
+ case AMDGPU_HW_IP_VCN_ENC:
++ case AMDGPU_HW_IP_VCN_JPEG:
+ r = amdgpu_identity_map(adev, mapper, ring, out_ring);
+ break;
+ case AMDGPU_HW_IP_DMA:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4615-drm-amdgpu-Grab-put-runtime-PM-references-in-atomic_.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4615-drm-amdgpu-Grab-put-runtime-PM-references-in-atomic_.patch
new file mode 100644
index 00000000..53a182ba
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4615-drm-amdgpu-Grab-put-runtime-PM-references-in-atomic_.patch
@@ -0,0 +1,95 @@
+From 6158793586d3c3be6c015b9d2f18f16e37d95132 Mon Sep 17 00:00:00 2001
+From: Lyude Paul <lyude@redhat.com>
+Date: Mon, 4 Jun 2018 15:35:03 -0400
+Subject: [PATCH 4615/5725] drm/amdgpu: Grab/put runtime PM references in
+ atomic_commit_tail()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+So, unfortunately I recently made the discovery that in the upstream
+kernel, the only reason that amdgpu is not currently suffering from
+issues with runtime PM putting the GPU into suspend while it's driving
+displays is due to the fact that on most prime systems, we have sound
+devices associated with the GPU that hold their own runtime PM ref for
+the GPU.
+
+What this means however, is that in the event that there isn't any kind
+of sound device active (which can easily be reproduced by building a
+kernel with sound drivers disabled), the GPU will fall asleep even when
+there's displays active. This appears to be in part due to the fact that
+amdgpu has not actually ever relied on it's rpm_idle() function to be
+the only thing keeping it running, and normally grabs it's own power
+references whenever there are displays active (as can be seen with the
+original pre-DC codepath in amdgpu_display_crtc_set_config() in
+amdgpu_display.c). This means it's very likely that this bug was
+introduced during the switch over the DC.
+
+So to fix this, we start grabbing runtime PM references every time we
+enable a previously disabled CRTC in atomic_commit_tail(). This appears
+to be the correct solution, as it matches up with what i915 does in
+i915/intel_runtime_pm.c.
+
+The one sideaffect of this is that we ignore the variable that the
+pre-DC code used to use for tracking when it needed runtime PM refs,
+adev->have_disp_power_ref. This is mainly because there's no way for a
+driver to tell whether or not all of it's CRTCs are enabled or disabled
+when we've begun committing an atomic state, as there may be CRTC
+commits happening in parallel that aren't contained within the atomic
+state being committed. So, it's safer to just get/put a reference for
+each CRTC being enabled or disabled in the new atomic state.
+
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Acked-by: Christian König <christian.koenig@amd.com>.
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+
+Change-Id: I726d59525f00acfd1b13501271f0feb23c0d1af5
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 792fd09..c37871a 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -46,6 +46,7 @@
+ #include <linux/moduleparam.h>
+ #include <linux/version.h>
+ #include <linux/types.h>
++#include <linux/pm_runtime.h>
+
+ #include <drm/drmP.h>
+ #include <drm/drm_atomic.h>
+@@ -4466,6 +4467,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+ if (dm_old_crtc_state->stream)
+ remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+
++ pm_runtime_get_noresume(dev->dev);
++
+ acrtc->enabled = true;
+ acrtc->hw_mode = new_crtc_state->mode;
+ crtc->hwmode = new_crtc_state->mode;
+@@ -4595,6 +4598,16 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+ drm_atomic_helper_wait_for_flip_done(dev, state);
+
+ drm_atomic_helper_cleanup_planes(dev, state);
++
++ /* Finally, drop a runtime PM reference for each newly disabled CRTC,
++ * so we can put the GPU into runtime suspend if we're not driving any
++ * displays anymore
++ */
++ pm_runtime_mark_last_busy(dev->dev);
++ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
++ if (old_crtc_state->active && !new_crtc_state->active)
++ pm_runtime_put_autosuspend(dev->dev);
++ }
+ }
+
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4616-drm-amd-powerplay-fix-wrong-clock-adjust-sequence.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4616-drm-amd-powerplay-fix-wrong-clock-adjust-sequence.patch
new file mode 100644
index 00000000..3210f46d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4616-drm-amd-powerplay-fix-wrong-clock-adjust-sequence.patch
@@ -0,0 +1,49 @@
+From 76a5e22b4a31b667322d1195d300b95a28a39be6 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Wed, 6 Jun 2018 11:54:45 +0800
+Subject: [PATCH 4616/5725] drm/amd/powerplay: fix wrong clock adjust sequence
+
+The clocks should be adjusted after display configuration changed.
+Otherwise, the socclk and memclk may be forced on an unnecessary higher
+level.
+
+Change-Id: I0170e968ba1f64d15d4238b658eefc307adc2642
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+index 0af13c1..323990b 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+@@ -265,19 +265,18 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
+ if (skip)
+ return 0;
+
+- if (!hwmgr->ps)
+- /*
+- * for vega12/vega20 which does not support power state manager
+- * DAL clock limits should also be honoured
+- */
+- phm_apply_clock_adjust_rules(hwmgr);
+-
+ phm_pre_display_configuration_changed(hwmgr);
+
+ phm_display_configuration_changed(hwmgr);
+
+ if (hwmgr->ps)
+ power_state_management(hwmgr, new_ps);
++ else
++ /*
++ * for vega12/vega20 which does not support power state manager
++ * DAL clock limits should also be honoured
++ */
++ phm_apply_clock_adjust_rules(hwmgr);
+
+ phm_notify_smc_display_config_after_ps_adjustment(hwmgr);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4617-drm-amdgpu-rename-rmn-to-amn-in-the-MMU-notifier-cod.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4617-drm-amdgpu-rename-rmn-to-amn-in-the-MMU-notifier-cod.patch
new file mode 100644
index 00000000..2ce2b0eb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4617-drm-amdgpu-rename-rmn-to-amn-in-the-MMU-notifier-cod.patch
@@ -0,0 +1,340 @@
+From 0649da717ce493d05db377795b66e2dfb4ceae34 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 1 Jun 2018 16:53:11 +0200
+Subject: [PATCH 4617/5725] drm/amdgpu: rename rmn to amn in the MMU notifier
+ code
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Just a copy&paste leftover from radeon.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+
+Change-Id: I5ab6787fad32f873b859588661090e2b54c77a42
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 140 ++++++++++++++++-----------------
+ 1 file changed, 70 insertions(+), 70 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+index 3fc0917..211539f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+@@ -68,7 +68,7 @@ struct amdgpu_mn_node {
+ };
+
+ /**
+- * amdgpu_mn_destroy - destroy the rmn
++ * amdgpu_mn_destroy - destroy the amn
+ *
+ * @work: previously sheduled work item
+ *
+@@ -76,19 +76,19 @@ struct amdgpu_mn_node {
+ */
+ static void amdgpu_mn_destroy(struct work_struct *work)
+ {
+- struct amdgpu_mn *rmn = container_of(work, struct amdgpu_mn, work);
+- struct amdgpu_device *adev = rmn->adev;
++ struct amdgpu_mn *amn = container_of(work, struct amdgpu_mn, work);
++ struct amdgpu_device *adev = amn->adev;
+ struct amdgpu_mn_node *node, *next_node;
+ struct amdgpu_bo *bo, *next_bo;
+
+ mutex_lock(&adev->mn_lock);
+- down_write(&rmn->lock);
+- hash_del(&rmn->node);
++ down_write(&amn->lock);
++ hash_del(&amn->node);
+ rbtree_postorder_for_each_entry_safe(node, next_node,
+ #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
+- &rmn->objects, it.rb) {
++ &amn->objects, it.rb) {
+ #else
+- &rmn->objects.rb_root, it.rb) {
++ &amn->objects.rb_root, it.rb) {
+ #endif
+ list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
+ bo->mn = NULL;
+@@ -96,10 +96,10 @@ static void amdgpu_mn_destroy(struct work_struct *work)
+ }
+ kfree(node);
+ }
+- up_write(&rmn->lock);
++ up_write(&amn->lock);
+ mutex_unlock(&adev->mn_lock);
+- mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
+- kfree(rmn);
++ mmu_notifier_unregister_no_release(&amn->mn, amn->mm);
++ kfree(amn);
+ }
+
+ /**
+@@ -113,9 +113,9 @@ static void amdgpu_mn_destroy(struct work_struct *work)
+ static void amdgpu_mn_release(struct mmu_notifier *mn,
+ struct mm_struct *mm)
+ {
+- struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+- INIT_WORK(&rmn->work, amdgpu_mn_destroy);
+- schedule_work(&rmn->work);
++ struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
++ INIT_WORK(&amn->work, amdgpu_mn_destroy);
++ schedule_work(&amn->work);
+ }
+
+
+@@ -138,31 +138,31 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn)
+ }
+
+ /**
+- * amdgpu_mn_read_lock - take the rmn read lock
++ * amdgpu_mn_read_lock - take the amn read lock
+ *
+- * @rmn: our notifier
++ * @amn: our notifier
+ *
+- * Take the rmn read side lock.
++ * Take the amn read side lock.
+ */
+-static void amdgpu_mn_read_lock(struct amdgpu_mn *rmn)
++static void amdgpu_mn_read_lock(struct amdgpu_mn *amn)
+ {
+- mutex_lock(&rmn->read_lock);
+- if (atomic_inc_return(&rmn->recursion) == 1)
+- down_read_non_owner(&rmn->lock);
+- mutex_unlock(&rmn->read_lock);
++ mutex_lock(&amn->read_lock);
++ if (atomic_inc_return(&amn->recursion) == 1)
++ down_read_non_owner(&amn->lock);
++ mutex_unlock(&amn->read_lock);
+ }
+
+ /**
+- * amdgpu_mn_read_unlock - drop the rmn read lock
++ * amdgpu_mn_read_unlock - drop the amn read lock
+ *
+- * @rmn: our notifier
++ * @amn: our notifier
+ *
+- * Drop the rmn read side lock.
++ * Drop the amn read side lock.
+ */
+-static void amdgpu_mn_read_unlock(struct amdgpu_mn *rmn)
++static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn)
+ {
+- if (atomic_dec_return(&rmn->recursion) == 0)
+- up_read_non_owner(&rmn->lock);
++ if (atomic_dec_return(&amn->recursion) == 0)
++ up_read_non_owner(&amn->lock);
+ }
+
+ /**
+@@ -210,15 +210,15 @@ static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
+ unsigned long start,
+ unsigned long end)
+ {
+- struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
++ struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
+ struct interval_tree_node *it;
+
+ /* notification is exclusive, but interval is inclusive */
+ end -= 1;
+
+- amdgpu_mn_read_lock(rmn);
++ amdgpu_mn_read_lock(amn);
+
+- it = interval_tree_iter_first(&rmn->objects, start, end);
++ it = interval_tree_iter_first(&amn->objects, start, end);
+ while (it) {
+ struct amdgpu_mn_node *node;
+
+@@ -244,9 +244,9 @@ static void amdgpu_mn_invalidate_range_end_gfx(struct mmu_notifier *mn,
+ unsigned long start,
+ unsigned long end)
+ {
+- struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
++ struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
+
+- amdgpu_mn_read_unlock(rmn);
++ amdgpu_mn_read_unlock(amn);
+ }
+
+ /**
+@@ -332,7 +332,7 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
+ enum amdgpu_mn_type type)
+ {
+ struct mm_struct *mm = current->mm;
+- struct amdgpu_mn *rmn;
++ struct amdgpu_mn *amn;
+ unsigned long key = AMDGPU_MN_KEY(mm, type);
+ int r;
+ #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+@@ -350,48 +350,48 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
+ #endif
+
+ #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+- hash_for_each_possible(adev->mn_hash, rmn, node, node, key)
++ hash_for_each_possible(adev->mn_hash, amn, node, node, key)
+ #else
+- hash_for_each_possible(adev->mn_hash, rmn, node, key)
++ hash_for_each_possible(adev->mn_hash, amn, node, key)
+ #endif
+- if (AMDGPU_MN_KEY(rmn->mm, rmn->type) == key)
++ if (AMDGPU_MN_KEY(amn->mm, amn->type) == key)
+ goto release_locks;
+
+- rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
+- if (!rmn) {
+- rmn = ERR_PTR(-ENOMEM);
++ amn = kzalloc(sizeof(*amn), GFP_KERNEL);
++ if (!amn) {
++ amn = ERR_PTR(-ENOMEM);
+ goto release_locks;
+ }
+
+- rmn->adev = adev;
+- rmn->mm = mm;
+- init_rwsem(&rmn->lock);
++ amn->adev = adev;
++ amn->mm = mm;
++ init_rwsem(&amn->lock);
+ #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
+- rmn->objects = RB_ROOT;
++ amn->objects = RB_ROOT;
+ #else
+- rmn->objects = RB_ROOT_CACHED;
++ amn->objects = RB_ROOT_CACHED;
+ #endif
+- rmn->type = type;
+- rmn->mn.ops = &amdgpu_mn_ops[type];
+- mutex_init(&rmn->read_lock);
+- atomic_set(&rmn->recursion, 0);
++ amn->type = type;
++ amn->mn.ops = &amdgpu_mn_ops[type];
++ mutex_init(&amn->read_lock);
++ atomic_set(&amn->recursion, 0);
+
+- r = __mmu_notifier_register(&rmn->mn, mm);
++ r = __mmu_notifier_register(&amn->mn, mm);
+ if (r)
+- goto free_rmn;
++ goto free_amn;
+
+- hash_add(adev->mn_hash, &rmn->node, AMDGPU_MN_KEY(mm, type));
++ hash_add(adev->mn_hash, &amn->node, AMDGPU_MN_KEY(mm, type));
+
+ release_locks:
+ up_write(&mm->mmap_sem);
+ mutex_unlock(&adev->mn_lock);
+
+- return rmn;
++ return amn;
+
+-free_rmn:
++free_amn:
+ up_write(&mm->mmap_sem);
+ mutex_unlock(&adev->mn_lock);
+- kfree(rmn);
++ kfree(amn);
+
+ return ERR_PTR(r);
+ }
+@@ -411,14 +411,14 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ enum amdgpu_mn_type type =
+ bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX;
+- struct amdgpu_mn *rmn;
++ struct amdgpu_mn *amn;
+ struct amdgpu_mn_node *node = NULL, *new_node;
+ struct list_head bos;
+ struct interval_tree_node *it;
+
+- rmn = amdgpu_mn_get(adev, type);
+- if (IS_ERR(rmn))
+- return PTR_ERR(rmn);
++ amn = amdgpu_mn_get(adev, type);
++ if (IS_ERR(amn))
++ return PTR_ERR(amn);
+
+ new_node = kmalloc(sizeof(*new_node), GFP_KERNEL);
+ if (!new_node)
+@@ -426,12 +426,12 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
+
+ INIT_LIST_HEAD(&bos);
+
+- down_write(&rmn->lock);
++ down_write(&amn->lock);
+
+- while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
++ while ((it = interval_tree_iter_first(&amn->objects, addr, end))) {
+ kfree(node);
+ node = container_of(it, struct amdgpu_mn_node, it);
+- interval_tree_remove(&node->it, &rmn->objects);
++ interval_tree_remove(&node->it, &amn->objects);
+ addr = min(it->start, addr);
+ end = max(it->last, end);
+ list_splice(&node->bos, &bos);
+@@ -442,7 +442,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
+ else
+ kfree(new_node);
+
+- bo->mn = rmn;
++ bo->mn = amn;
+
+ node->it.start = addr;
+ node->it.last = end;
+@@ -450,9 +450,9 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
+ list_splice(&bos, &node->bos);
+ list_add(&bo->mn_list, &node->bos);
+
+- interval_tree_insert(&node->it, &rmn->objects);
++ interval_tree_insert(&node->it, &amn->objects);
+
+- up_write(&rmn->lock);
++ up_write(&amn->lock);
+
+ return 0;
+ }
+@@ -467,18 +467,18 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
+ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
+ {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+- struct amdgpu_mn *rmn;
++ struct amdgpu_mn *amn;
+ struct list_head *head;
+
+ mutex_lock(&adev->mn_lock);
+
+- rmn = bo->mn;
+- if (rmn == NULL) {
++ amn = bo->mn;
++ if (amn == NULL) {
+ mutex_unlock(&adev->mn_lock);
+ return;
+ }
+
+- down_write(&rmn->lock);
++ down_write(&amn->lock);
+
+ /* save the next list entry for later */
+ head = bo->mn_list.next;
+@@ -489,11 +489,11 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
+ if (list_empty(head)) {
+ struct amdgpu_mn_node *node;
+ node = container_of(head, struct amdgpu_mn_node, bos);
+- interval_tree_remove(&node->it, &rmn->objects);
++ interval_tree_remove(&node->it, &amn->objects);
+ kfree(node);
+ }
+
+- up_write(&rmn->lock);
++ up_write(&amn->lock);
+ mutex_unlock(&adev->mn_lock);
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4618-drm-amdgpu-fix-documentation-of-amdgpu_mn.c-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4618-drm-amdgpu-fix-documentation-of-amdgpu_mn.c-v2.patch
new file mode 100644
index 00000000..482cf4c3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4618-drm-amdgpu-fix-documentation-of-amdgpu_mn.c-v2.patch
@@ -0,0 +1,203 @@
+From 2bb424e80da086b42ea6f6f00a8a7ab1b2fd63d1 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Tue, 5 Jun 2018 11:47:43 +0200
+Subject: [PATCH 4618/5725] drm/amdgpu: fix documentation of amdgpu_mn.c v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+And wire it up as well.
+
+v2: improve the wording, fix label mismatch
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 74 ++++++++++++++++++++++++++--------
+ 1 file changed, 58 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+index 211539f..7aaef93 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+@@ -28,6 +28,21 @@
+ * Christian König <christian.koenig@amd.com>
+ */
+
++/**
++ * DOC: MMU Notifier
++ *
++ * For coherent userptr handling registers an MMU notifier to inform the driver
++ * about updates on the page tables of a process.
++ *
++ * When somebody tries to invalidate the page tables we block the update until
++ * all operations on the pages in question are completed, then those pages are
++ * marked as accessed and also dirty if it wasn't a read only access.
++ *
++ * New command submissions using the userptrs in question are delayed until all
++ * page table invalidation are completed and we once more see a coherent process
++ * address space.
++ */
++
+ #include <linux/firmware.h>
+ #include <linux/module.h>
+ #include <linux/mmu_notifier.h>
+@@ -38,6 +53,21 @@
+ #include "amdgpu.h"
+ #include "amdgpu_amdkfd.h"
+
++/**
++ * struct amdgpu_mn
++ *
++ * @adev: amdgpu device pointer
++ * @mm: process address space
++ * @mn: MMU notifier structur
++ * @work: destrution work item
++ * @node: hash table node to find structure by adev and mn
++ * @lock: rw semaphore protecting the notifier nodes
++ * @objects: interval tree containing amdgpu_mn_nodes
++ * @read_lock: mutex for recursive locking of @lock
++ * @recursion: depth of recursion
++ *
++ * Data for each amdgpu device and process address space.
++ */
+ struct amdgpu_mn {
+ /* constant after initialisation */
+ struct amdgpu_device *adev;
+@@ -62,13 +92,21 @@ struct amdgpu_mn {
+ atomic_t recursion;
+ };
+
++/**
++ * struct amdgpu_mn_node
++ *
++ * @it: interval node defining start-last of the affected address range
++ * @bos: list of all BOs in the affected address range
++ *
++ * Manages all BOs which are affected of a certain range of address space.
++ */
+ struct amdgpu_mn_node {
+ struct interval_tree_node it;
+ struct list_head bos;
+ };
+
+ /**
+- * amdgpu_mn_destroy - destroy the amn
++ * amdgpu_mn_destroy - destroy the MMU notifier
+ *
+ * @work: previously sheduled work item
+ *
+@@ -106,7 +144,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
+ * amdgpu_mn_release - callback to notify about mm destruction
+ *
+ * @mn: our notifier
+- * @mn: the mm this callback is about
++ * @mm: the mm this callback is about
+ *
+ * Shedule a work item to lazy destroy our notifier.
+ */
+@@ -114,13 +152,16 @@ static void amdgpu_mn_release(struct mmu_notifier *mn,
+ struct mm_struct *mm)
+ {
+ struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
++
+ INIT_WORK(&amn->work, amdgpu_mn_destroy);
+ schedule_work(&amn->work);
+ }
+
+
+ /**
+- * amdgpu_mn_lock - take the write side lock for this mn
++ * amdgpu_mn_lock - take the write side lock for this notifier
++ *
++ * @mn: our notifier
+ */
+ void amdgpu_mn_lock(struct amdgpu_mn *mn)
+ {
+@@ -129,7 +170,9 @@ void amdgpu_mn_lock(struct amdgpu_mn *mn)
+ }
+
+ /**
+- * amdgpu_mn_unlock - drop the write side lock for this mn
++ * amdgpu_mn_unlock - drop the write side lock for this notifier
++ *
++ * @mn: our notifier
+ */
+ void amdgpu_mn_unlock(struct amdgpu_mn *mn)
+ {
+@@ -138,11 +181,9 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn)
+ }
+
+ /**
+- * amdgpu_mn_read_lock - take the amn read lock
++ * amdgpu_mn_read_lock - take the read side lock for this notifier
+ *
+ * @amn: our notifier
+- *
+- * Take the amn read side lock.
+ */
+ static void amdgpu_mn_read_lock(struct amdgpu_mn *amn)
+ {
+@@ -153,11 +194,9 @@ static void amdgpu_mn_read_lock(struct amdgpu_mn *amn)
+ }
+
+ /**
+- * amdgpu_mn_read_unlock - drop the amn read lock
++ * amdgpu_mn_read_unlock - drop the read side lock for this notifier
+ *
+ * @amn: our notifier
+- *
+- * Drop the amn read side lock.
+ */
+ static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn)
+ {
+@@ -169,9 +208,11 @@ static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn)
+ * amdgpu_mn_invalidate_node - unmap all BOs of a node
+ *
+ * @node: the node with the BOs to unmap
++ * @start: start of address range affected
++ * @end: end of address range affected
+ *
+- * We block for all BOs and unmap them by move them
+- * into system domain again.
++ * Block for operations on BOs to finish and mark pages as accessed and
++ * potentially dirty.
+ */
+ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
+ unsigned long start,
+@@ -198,12 +239,12 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
+ * amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change
+ *
+ * @mn: our notifier
+- * @mn: the mm this callback is about
++ * @mm: the mm this callback is about
+ * @start: start of updated range
+ * @end: end of updated range
+ *
+- * We block for all BOs between start and end to be idle and
+- * unmap them by move them into system domain again.
++ * Block for operations on BOs to finish and mark pages as accessed and
++ * potentially dirty.
+ */
+ static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+@@ -233,7 +274,7 @@ static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
+ * amdgpu_mn_invalidate_range_end_gfx - callback to notify about mm change
+ *
+ * @mn: our notifier
+- * @mn: the mm this callback is about
++ * @mm: the mm this callback is about
+ * @start: start of updated range
+ * @end: end of updated range
+ *
+@@ -488,6 +529,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
+
+ if (list_empty(head)) {
+ struct amdgpu_mn_node *node;
++
+ node = container_of(head, struct amdgpu_mn_node, bos);
+ interval_tree_remove(&node->it, &amn->objects);
+ kfree(node);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4619-drm-amdgpu-Correct-the-ndw-of-bo-update-mapping.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4619-drm-amdgpu-Correct-the-ndw-of-bo-update-mapping.patch
new file mode 100644
index 00000000..7bbda8d5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4619-drm-amdgpu-Correct-the-ndw-of-bo-update-mapping.patch
@@ -0,0 +1,35 @@
+From 78b824fb71d647d763897f9a66fd65e411c786cb Mon Sep 17 00:00:00 2001
+From: Emily Deng <Emily.Deng@amd.com>
+Date: Fri, 8 Jun 2018 16:36:22 +0800
+Subject: [PATCH 4619/5725] drm/amdgpu: Correct the ndw of bo update mapping.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+For buffer object that has shadow buffer, need twice commands.
+
+Signed-off-by: Emily Deng <Emily.Deng@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>.
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 4889137..7dfb239 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1325,7 +1325,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
+ ndw += ncmds * 10;
+
+ /* extra commands for begin/end fragments */
+- ndw += 2 * 10 * adev->vm_manager.fragment_size;
++ if (vm->root.base.bo->shadow)
++ ndw += 2 * 10 * adev->vm_manager.fragment_size * 2;
++ else
++ ndw += 2 * 10 * adev->vm_manager.fragment_size;
+
+ params.func = amdgpu_vm_do_set_ptes;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4620-drm-amdgpu-change-gfx8-ib-test-to-use-WB.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4620-drm-amdgpu-change-gfx8-ib-test-to-use-WB.patch
new file mode 100644
index 00000000..b10939c7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4620-drm-amdgpu-change-gfx8-ib-test-to-use-WB.patch
@@ -0,0 +1,107 @@
+From 440f18f6b7eade25540e6096193dd3f59fe55d5a Mon Sep 17 00:00:00 2001
+From: Shirish S <shirish.s@amd.com>
+Date: Fri, 8 Jun 2018 10:15:42 +0530
+Subject: [PATCH 4620/5725] drm/amdgpu: change gfx8 ib test to use WB
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This patch is extends the usage of WB in
+gfx8's ib test which was originally
+implemented in the below upstream patch
+"ed9324a drm/amdgpu: change gfx9 ib test to use WB"
+
+For reference below are the reasons for switching
+to WB:
+
+1)Because when doing IB test we don't want to involve KIQ health
+status affect, and since SCRATCH register access is go through
+KIQ that way GFX IB test would failed due to KIQ fail.
+
+2)acccessing SCRATCH register cost much more time than WB method
+because SCRATCH register access runs through KIQ which at least could
+begin after GPU world switch back to current Guest VF
+
+Signed-off-by: Shirish S <shirish.s@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 35 +++++++++++++++++++++--------------
+ 1 file changed, 21 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 3fa37a4..1ad8528 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -866,26 +866,32 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ib ib;
+ struct dma_fence *f = NULL;
+- uint32_t scratch;
+- uint32_t tmp = 0;
++
++ unsigned int index;
++ uint64_t gpu_addr;
++ uint32_t tmp;
+ long r;
+
+- r = amdgpu_gfx_scratch_get(adev, &scratch);
++ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+- DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
++ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ return r;
+ }
+- WREG32(scratch, 0xCAFEDEAD);
++
++ gpu_addr = adev->wb.gpu_addr + (index * 4);
++ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ memset(&ib, 0, sizeof(ib));
+- r = amdgpu_ib_get(adev, NULL, 256, &ib);
++ r = amdgpu_ib_get(adev, NULL, 16, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err1;
+ }
+- ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
+- ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
+- ib.ptr[2] = 0xDEADBEEF;
+- ib.length_dw = 3;
++ ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
++ ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
++ ib.ptr[2] = lower_32_bits(gpu_addr);
++ ib.ptr[3] = upper_32_bits(gpu_addr);
++ ib.ptr[4] = 0xDEADBEEF;
++ ib.length_dw = 5;
+
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
+ if (r)
+@@ -900,20 +906,21 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+ goto err2;
+ }
+- tmp = RREG32(scratch);
++
++ tmp = adev->wb.wb[index];
+ if (tmp == 0xDEADBEEF) {
+ DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
+ } else {
+- DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
+- scratch, tmp);
++ DRM_ERROR("ib test on ring %d failed\n", ring->idx);
+ r = -EINVAL;
+ }
++
+ err2:
+ amdgpu_ib_free(adev, &ib, NULL);
+ dma_fence_put(f);
+ err1:
+- amdgpu_gfx_scratch_free(adev, scratch);
++ amdgpu_device_wb_free(adev, index);
+ return r;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4621-drm-amd-Update-KFD-Thunk-ioctl-ABI-to-match-upstream.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4621-drm-amd-Update-KFD-Thunk-ioctl-ABI-to-match-upstream.patch
new file mode 100644
index 00000000..cea8c76b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4621-drm-amd-Update-KFD-Thunk-ioctl-ABI-to-match-upstream.patch
@@ -0,0 +1,578 @@
+From aee31c54b48529865f078a01d3210b5cf66b2f89 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Thu, 24 May 2018 14:53:40 -0400
+Subject: [PATCH 4621/5725] drm/amd: Update KFD-Thunk ioctl ABI to match
+ upstream
+
+- Clean up and renumber scratch memory ioctl
+- Renumber get_tile_config ioctl
+- Renumber set_trap_handler ioctl
+- Update KFD_IOC_ALLOC_MEM_FLAGS
+- Renumber GPUVM memory management ioctls
+- Remove unused SEP_PROCESS_DGPU_APERTURE ioctl
+- Update memory management ioctls
+ Replace device_ids_array_size (in bytes) with n_devices. Fix error
+ handling and use n_success to update device_id arrays in objects.
+
+This commit breaks the ABI and requires a corresponding Thunk change.
+
+Change-Id: I62149841f1603ec36143836d2eb5ab0fcaf37cf5
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 4 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 133 ++++++++---------------
+ drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | 10 --
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 -
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 11 +-
+ drivers/gpu/drm/amd/include/kgd_kfd_interface.h | 24 ++--
+ include/uapi/linux/kfd_ioctl.h | 91 ++++++++--------
+ 7 files changed, 116 insertions(+), 159 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index d43473e..85d0bfb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -1246,9 +1246,9 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+ VI_BO_SIZE_ALIGN : 1;
+
+ mapping_flags = AMDGPU_VM_PAGE_READABLE;
+- if (!(flags & ALLOC_MEM_FLAGS_READONLY))
++ if (flags & ALLOC_MEM_FLAGS_WRITABLE)
+ mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
+- if (flags & ALLOC_MEM_FLAGS_EXECUTE_ACCESS)
++ if (flags & ALLOC_MEM_FLAGS_EXECUTABLE)
+ mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
+ if (flags & ALLOC_MEM_FLAGS_COHERENT)
+ mapping_flags |= AMDGPU_VM_MTYPE_UC;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 01a253c..0eab007 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1085,17 +1085,14 @@ static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
+
+ return err;
+ }
+-static int kfd_ioctl_alloc_scratch_memory(struct file *filep,
++static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
+ struct kfd_process *p, void *data)
+ {
+- struct kfd_ioctl_alloc_memory_of_scratch_args *args = data;
++ struct kfd_ioctl_set_scratch_backing_va_args *args = data;
+ struct kfd_process_device *pdd;
+ struct kfd_dev *dev;
+ long err;
+
+- if (args->size == 0)
+- return -EINVAL;
+-
+ dev = kfd_device_by_id(args->gpu_id);
+ if (!dev)
+ return -EINVAL;
+@@ -1383,31 +1380,30 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ void *mem;
+ struct kfd_dev *dev, *peer;
+ long err = 0;
+- int i, num_dev = 0;
++ int i;
+ uint32_t *devices_arr = NULL;
+
+ dev = kfd_device_by_id(GET_GPU_ID(args->handle));
+ if (!dev)
+ return -EINVAL;
+
+- if (args->device_ids_array_size == 0) {
+- pr_debug("Device ID array size is 0\n");
++ if (!args->n_devices) {
++ pr_debug("Device IDs array empty\n");
+ return -EINVAL;
+ }
+-
+- if (args->device_ids_array_size % sizeof(uint32_t)) {
+- pr_debug("Node IDs array size %u\n",
+- args->device_ids_array_size);
++ if (args->n_success > args->n_devices) {
++ pr_debug("n_success exceeds n_devices\n");
+ return -EINVAL;
+ }
+
+- devices_arr = kmalloc(args->device_ids_array_size, GFP_KERNEL);
++ devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
++ GFP_KERNEL);
+ if (!devices_arr)
+ return -ENOMEM;
+
+ err = copy_from_user(devices_arr,
+- (void __user *)args->device_ids_array_ptr,
+- args->device_ids_array_size);
++ (void __user *)args->device_ids_array_ptr,
++ args->n_devices * sizeof(*devices_arr));
+ if (err != 0) {
+ err = -EFAULT;
+ goto copy_from_user_failed;
+@@ -1428,12 +1424,11 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ goto get_mem_obj_from_handle_failed;
+ }
+
+- num_dev = args->device_ids_array_size / sizeof(uint32_t);
+- for (i = 0 ; i < num_dev; i++) {
++ for (i = args->n_success; i < args->n_devices; i++) {
+ peer = kfd_device_by_id(devices_arr[i]);
+ if (!peer) {
+ pr_debug("Getting device by id failed for 0x%x\n",
+- devices_arr[i]);
++ devices_arr[i]);
+ err = -EINVAL;
+ goto get_mem_obj_from_handle_failed;
+ }
+@@ -1444,12 +1439,13 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ goto get_mem_obj_from_handle_failed;
+ }
+ err = peer->kfd2kgd->map_memory_to_gpu(
+- peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
+- if (err != 0) {
+- pr_err("Failed to map to gpu %d, num_dev=%d\n",
+- i, num_dev);
++ peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
++ if (err) {
++ pr_err("Failed to map to gpu %d/%d\n",
++ i, args->n_devices);
+ goto map_memory_to_gpu_failed;
+ }
++ args->n_success = i+1;
+ }
+
+ mutex_unlock(&p->mutex);
+@@ -1461,7 +1457,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ }
+
+ /* Flush TLBs after waiting for the page table updates to complete */
+- for (i = 0; i < num_dev; i++) {
++ for (i = 0; i < args->n_devices; i++) {
+ peer = kfd_device_by_id(devices_arr[i]);
+ if (WARN_ON_ONCE(!peer))
+ continue;
+@@ -1494,30 +1490,29 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ void *mem;
+ struct kfd_dev *dev, *peer;
+ long err = 0;
+- uint32_t *devices_arr = NULL, num_dev, i;
++ uint32_t *devices_arr = NULL, i;
+
+ dev = kfd_device_by_id(GET_GPU_ID(args->handle));
+ if (!dev)
+ return -EINVAL;
+
+- if (args->device_ids_array_size == 0) {
+- pr_debug("Device ID array size is 0\n");
++ if (!args->n_devices) {
++ pr_debug("Device IDs array empty\n");
+ return -EINVAL;
+ }
+-
+- if (args->device_ids_array_size % sizeof(uint32_t)) {
+- pr_debug("Node IDs array size %u\n",
+- args->device_ids_array_size);
++ if (args->n_success > args->n_devices) {
++ pr_debug("n_success exceeds n_devices\n");
+ return -EINVAL;
+ }
+
+- devices_arr = kmalloc(args->device_ids_array_size, GFP_KERNEL);
++ devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
++ GFP_KERNEL);
+ if (!devices_arr)
+ return -ENOMEM;
+
+ err = copy_from_user(devices_arr,
+- (void __user *)args->device_ids_array_ptr,
+- args->device_ids_array_size);
++ (void __user *)args->device_ids_array_ptr,
++ args->n_devices * sizeof(*devices_arr));
+ if (err != 0) {
+ err = -EFAULT;
+ goto copy_from_user_failed;
+@@ -1527,8 +1522,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+
+ pdd = kfd_get_process_device_data(dev, p);
+ if (!pdd) {
+- pr_debug("Process device data doesn't exist\n");
+- err = -ENODEV;
++ err = -EINVAL;
+ goto bind_process_to_device_failed;
+ }
+
+@@ -1539,8 +1533,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ goto get_mem_obj_from_handle_failed;
+ }
+
+- num_dev = args->device_ids_array_size / sizeof(uint32_t);
+- for (i = 0 ; i < num_dev; i++) {
++ for (i = args->n_success; i < args->n_devices; i++) {
+ peer = kfd_device_by_id(devices_arr[i]);
+ if (!peer) {
+ err = -EINVAL;
+@@ -1556,9 +1549,10 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
+ if (err) {
+ pr_err("Failed to unmap from gpu %d/%d\n",
+- i, num_dev);
++ i, args->n_devices);
+ goto unmap_memory_from_gpu_failed;
+ }
++ args->n_success = i+1;
+ }
+ kfree(devices_arr);
+
+@@ -1575,34 +1569,6 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ return err;
+ }
+
+-static int kfd_ioctl_set_process_dgpu_aperture(struct file *filep,
+- struct kfd_process *p, void *data)
+-{
+- struct kfd_ioctl_set_process_dgpu_aperture_args *args = data;
+- struct kfd_dev *dev;
+- struct kfd_process_device *pdd;
+- long err;
+-
+- dev = kfd_device_by_id(args->gpu_id);
+- if (!dev)
+- return -EINVAL;
+-
+- mutex_lock(&p->mutex);
+-
+- pdd = kfd_bind_process_to_device(dev, p);
+- if (IS_ERR(pdd)) {
+- err = PTR_ERR(pdd);
+- goto exit;
+- }
+-
+- err = kfd_set_process_dgpu_aperture(pdd, args->dgpu_base,
+- args->dgpu_limit);
+-
+-exit:
+- mutex_unlock(&p->mutex);
+- return err;
+-}
+-
+ static int kfd_ioctl_get_dmabuf_info(struct file *filep,
+ struct kfd_process *p, void *data)
+ {
+@@ -1894,7 +1860,7 @@ static int kfd_create_cma_system_bo(struct kfd_dev *kdev, struct kfd_bo *bo,
+ uint64_t bo_size = 0;
+ struct dma_fence *f;
+
+- uint32_t flags = ALLOC_MEM_FLAGS_GTT | ALLOC_MEM_FLAGS_NONPAGED |
++ uint32_t flags = ALLOC_MEM_FLAGS_GTT | ALLOC_MEM_FLAGS_WRITABLE |
+ ALLOC_MEM_FLAGS_NO_SUBSTITUTE;
+
+ *cma_bo = NULL;
+@@ -2564,6 +2530,21 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL,
+ kfd_ioctl_dbg_wave_control, 0),
+
++ AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_SCRATCH_BACKING_VA,
++ kfd_ioctl_set_scratch_backing_va, 0),
++
++ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG,
++ kfd_ioctl_get_tile_config, 0),
++
++ AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_TRAP_HANDLER,
++ kfd_ioctl_set_trap_handler, 0),
++
++ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES_NEW,
++ kfd_ioctl_get_process_apertures_new, 0),
++
++ AMDKFD_IOCTL_DEF(AMDKFD_IOC_ACQUIRE_VM,
++ kfd_ioctl_acquire_vm, 0),
++
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_MEMORY_OF_GPU,
+ kfd_ioctl_alloc_memory_of_gpu, 0),
+
+@@ -2576,30 +2557,15 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU,
+ kfd_ioctl_unmap_memory_from_gpu, 0),
+
+- AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_MEMORY_OF_SCRATCH,
+- kfd_ioctl_alloc_scratch_memory, 0),
+-
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_CU_MASK,
+ kfd_ioctl_set_cu_mask, 0),
+
+- AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_PROCESS_DGPU_APERTURE,
+- kfd_ioctl_set_process_dgpu_aperture, 0),
+-
+- AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_TRAP_HANDLER,
+- kfd_ioctl_set_trap_handler, 0),
+-
+- AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES_NEW,
+- kfd_ioctl_get_process_apertures_new, 0),
+-
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_DMABUF_INFO,
+ kfd_ioctl_get_dmabuf_info, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
+ kfd_ioctl_import_dmabuf, 0),
+
+- AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG,
+- kfd_ioctl_get_tile_config, 0),
+-
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_IPC_IMPORT_HANDLE,
+ kfd_ioctl_ipc_import_handle, 0),
+
+@@ -2612,9 +2578,6 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE,
+ kfd_ioctl_get_queue_wave_state, 0),
+
+- AMDKFD_IOCTL_DEF(AMDKFD_IOC_ACQUIRE_VM,
+- kfd_ioctl_acquire_vm, 0)
+-
+ };
+
+ #define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+index 0cae2e9..f7de732 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+@@ -312,16 +312,6 @@
+ #define SVM_CWSR_BASE (SVM_USER_BASE - KFD_CWSR_TBA_TMA_SIZE)
+ #define SVM_IB_BASE (SVM_CWSR_BASE - PAGE_SIZE)
+
+-int kfd_set_process_dgpu_aperture(struct kfd_process_device *pdd,
+- uint64_t base, uint64_t limit)
+-{
+- if (base < SVM_USER_BASE) {
+- pr_err("Set dgpu vm base 0x%llx failed.\n", base);
+- return -EINVAL;
+- }
+- return 0;
+-}
+-
+ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
+ {
+ /*
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index c63a6b0..7bb56e2 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -918,8 +918,6 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd);
+
+ /* amdkfd Apertures */
+ int kfd_init_apertures(struct kfd_process *process);
+-int kfd_set_process_dgpu_aperture(struct kfd_process_device *pdd,
+- uint64_t base, uint64_t limit);
+
+ /* Queue Context Management */
+ int init_queue(struct queue **q, const struct queue_properties *properties);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 9477e50..e79479b 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -191,8 +191,10 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
+ static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
+ {
+ struct qcm_process_device *qpd = &pdd->qpd;
+- uint32_t flags = ALLOC_MEM_FLAGS_GTT | ALLOC_MEM_FLAGS_NONPAGED |
+- ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTE_ACCESS;
++ uint32_t flags = ALLOC_MEM_FLAGS_GTT |
++ ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
++ ALLOC_MEM_FLAGS_WRITABLE |
++ ALLOC_MEM_FLAGS_EXECUTABLE;
+ void *kaddr;
+ int ret;
+
+@@ -512,9 +514,8 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
+ {
+ struct kfd_dev *dev = pdd->dev;
+ struct qcm_process_device *qpd = &pdd->qpd;
+- uint32_t flags = ALLOC_MEM_FLAGS_GTT | ALLOC_MEM_FLAGS_NONPAGED |
+- ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_READONLY |
+- ALLOC_MEM_FLAGS_EXECUTE_ACCESS;
++ uint32_t flags = ALLOC_MEM_FLAGS_GTT |
++ ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE;
+ void *kaddr;
+ int ret;
+
+diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+index d26bba5..fabcf1ef 100644
+--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+@@ -156,24 +156,26 @@ struct tile_config {
+ uint32_t num_ranks;
+ };
+
++
+ /*
+- * Allocation flag domains currently only VRAM and GTT domain supported
++ * Allocation flag domains
++ * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
+ */
+-#define ALLOC_MEM_FLAGS_VRAM (1 << 0)
+-#define ALLOC_MEM_FLAGS_GTT (1 << 1)
+-#define ALLOC_MEM_FLAGS_USERPTR (1 << 2)
+-#define ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
++#define ALLOC_MEM_FLAGS_VRAM (1 << 0)
++#define ALLOC_MEM_FLAGS_GTT (1 << 1)
++#define ALLOC_MEM_FLAGS_USERPTR (1 << 2)
++#define ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
+
+ /*
+ * Allocation flags attributes/access options.
++ * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
+ */
+-#define ALLOC_MEM_FLAGS_NONPAGED (1 << 31)
+-#define ALLOC_MEM_FLAGS_READONLY (1 << 30)
+-#define ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
+-#define ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28)
++#define ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
++#define ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
++#define ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
++#define ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28) /* TODO */
+ #define ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
+-#define ALLOC_MEM_FLAGS_EXECUTE_ACCESS (1 << 26)
+-#define ALLOC_MEM_FLAGS_COHERENT (1 << 25)
++#define ALLOC_MEM_FLAGS_COHERENT (1 << 26) /* For GFXv9 or later */
+
+ /**
+ * struct kfd2kgd_calls
+diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
+index 85d833e..7a6bb85 100644
+--- a/include/uapi/linux/kfd_ioctl.h
++++ b/include/uapi/linux/kfd_ioctl.h
+@@ -279,6 +279,12 @@ struct kfd_ioctl_wait_events_args {
+ uint32_t wait_result; /* from KFD */
+ };
+
++struct kfd_ioctl_set_scratch_backing_va_args {
++ __u64 va_addr; /* to KFD */
++ __u32 gpu_id; /* to KFD */
++ __u32 pad;
++};
++
+ struct kfd_ioctl_alloc_memory_of_scratch_args {
+ uint64_t va_addr; /* to KFD */
+ uint64_t size; /* to KFD */
+@@ -321,15 +327,15 @@ struct kfd_ioctl_free_memory_of_gpu_args {
+ struct kfd_ioctl_map_memory_to_gpu_args {
+ uint64_t handle; /* to KFD */
+ uint64_t device_ids_array_ptr; /* to KFD */
+- uint32_t device_ids_array_size; /* to KFD */
+- uint32_t pad;
++ uint32_t n_devices; /* to KFD */
++ uint32_t n_success; /* to/from KFD */
+ };
+
+ struct kfd_ioctl_unmap_memory_from_gpu_args {
+ uint64_t handle; /* to KFD */
+ uint64_t device_ids_array_ptr; /* to KFD */
+- uint32_t device_ids_array_size; /* to KFD */
+- uint32_t pad;
++ uint32_t n_devices; /* to KFD */
++ uint32_t n_success; /* to/from KFD */
+ };
+
+ struct kfd_ioctl_set_process_dgpu_aperture_args {
+@@ -478,62 +484,59 @@ struct kfd_ioctl_cross_memory_copy_args {
+ #define AMDKFD_IOC_DBG_ADDRESS_WATCH \
+ AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args)
+
+-#define AMDKFD_IOC_DBG_WAVE_CONTROL \
+- AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args)
+-
+-#define AMDKFD_IOC_ALLOC_MEMORY_OF_GPU \
+- AMDKFD_IOWR(0x11, struct kfd_ioctl_alloc_memory_of_gpu_args)
++#define AMDKFD_IOC_DBG_WAVE_CONTROL \
++ AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args)
+
+-#define AMDKFD_IOC_FREE_MEMORY_OF_GPU \
+- AMDKFD_IOWR(0x12, struct kfd_ioctl_free_memory_of_gpu_args)
++#define AMDKFD_IOC_SET_SCRATCH_BACKING_VA \
++ AMDKFD_IOWR(0x11, struct kfd_ioctl_set_scratch_backing_va_args)
+
+-#define AMDKFD_IOC_MAP_MEMORY_TO_GPU \
+- AMDKFD_IOWR(0x13, struct kfd_ioctl_map_memory_to_gpu_args)
++#define AMDKFD_IOC_GET_TILE_CONFIG \
++ AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args)
+
+-#define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU \
+- AMDKFD_IOWR(0x14, struct kfd_ioctl_unmap_memory_from_gpu_args)
++#define AMDKFD_IOC_SET_TRAP_HANDLER \
++ AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args)
+
+-#define AMDKFD_IOC_ALLOC_MEMORY_OF_SCRATCH \
+- AMDKFD_IOWR(0x15, struct kfd_ioctl_alloc_memory_of_scratch_args)
++#define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW \
++ AMDKFD_IOWR(0x14, \
++ struct kfd_ioctl_get_process_apertures_new_args)
+
+-#define AMDKFD_IOC_SET_CU_MASK \
+- AMDKFD_IOW(0x16, struct kfd_ioctl_set_cu_mask_args)
++#define AMDKFD_IOC_ACQUIRE_VM \
++ AMDKFD_IOW(0x15, struct kfd_ioctl_acquire_vm_args)
+
+-#define AMDKFD_IOC_SET_PROCESS_DGPU_APERTURE \
+- AMDKFD_IOW(0x17, \
+- struct kfd_ioctl_set_process_dgpu_aperture_args)
++#define AMDKFD_IOC_ALLOC_MEMORY_OF_GPU \
++ AMDKFD_IOWR(0x16, struct kfd_ioctl_alloc_memory_of_gpu_args)
+
+-#define AMDKFD_IOC_SET_TRAP_HANDLER \
+- AMDKFD_IOW(0x18, struct kfd_ioctl_set_trap_handler_args)
++#define AMDKFD_IOC_FREE_MEMORY_OF_GPU \
++ AMDKFD_IOW(0x17, struct kfd_ioctl_free_memory_of_gpu_args)
+
+-#define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW \
+- AMDKFD_IOWR(0x19, struct kfd_ioctl_get_process_apertures_new_args)
++#define AMDKFD_IOC_MAP_MEMORY_TO_GPU \
++ AMDKFD_IOWR(0x18, struct kfd_ioctl_map_memory_to_gpu_args)
+
+-#define AMDKFD_IOC_GET_DMABUF_INFO \
+- AMDKFD_IOWR(0x1A, struct kfd_ioctl_get_dmabuf_info_args)
++#define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU \
++ AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args)
+
+-#define AMDKFD_IOC_IMPORT_DMABUF \
+- AMDKFD_IOWR(0x1B, struct kfd_ioctl_import_dmabuf_args)
++#define AMDKFD_IOC_SET_CU_MASK \
++ AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args)
+
+-#define AMDKFD_IOC_GET_TILE_CONFIG \
+- AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_tile_config_args)
++#define AMDKFD_IOC_GET_QUEUE_WAVE_STATE \
++ AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args)
+
+-#define AMDKFD_IOC_IPC_IMPORT_HANDLE \
+- AMDKFD_IOWR(0x1D, struct kfd_ioctl_ipc_import_handle_args)
++#define AMDKFD_IOC_GET_DMABUF_INFO \
++ AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args)
+
+-#define AMDKFD_IOC_IPC_EXPORT_HANDLE \
+- AMDKFD_IOWR(0x1E, struct kfd_ioctl_ipc_export_handle_args)
++#define AMDKFD_IOC_IMPORT_DMABUF \
++ AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
+
+-#define AMDKFD_IOC_CROSS_MEMORY_COPY \
+- AMDKFD_IOWR(0x1F, struct kfd_ioctl_cross_memory_copy_args)
++#define AMDKFD_IOC_IPC_IMPORT_HANDLE \
++ AMDKFD_IOWR(0x1E, struct kfd_ioctl_ipc_import_handle_args)
+
+-#define AMDKFD_IOC_GET_QUEUE_WAVE_STATE \
+- AMDKFD_IOWR(0x20, struct kfd_ioctl_get_queue_wave_state_args)
++#define AMDKFD_IOC_IPC_EXPORT_HANDLE \
++ AMDKFD_IOWR(0x1F, struct kfd_ioctl_ipc_export_handle_args)
+
+-#define AMDKFD_IOC_ACQUIRE_VM \
+- AMDKFD_IOW(0x21, struct kfd_ioctl_acquire_vm_args)
++#define AMDKFD_IOC_CROSS_MEMORY_COPY \
++ AMDKFD_IOWR(0x20, struct kfd_ioctl_cross_memory_copy_args)
+
+-#define AMDKFD_COMMAND_START 0x01
+-#define AMDKFD_COMMAND_END 0x22
++#define AMDKFD_COMMAND_START 0x01
++#define AMDKFD_COMMAND_END 0x21
+
+ #endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4622-drm-amdgpu-Doorbell-assignment-for-8-sdma-user-queue.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4622-drm-amdgpu-Doorbell-assignment-for-8-sdma-user-queue.patch
new file mode 100644
index 00000000..9920b928
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4622-drm-amdgpu-Doorbell-assignment-for-8-sdma-user-queue.patch
@@ -0,0 +1,129 @@
+From 65bcf58ed2b71e9d4b9cf5c2c5b64532631d2a72 Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Fri, 23 Mar 2018 16:20:41 -0500
+Subject: [PATCH 4622/5725] drm/amdgpu : Doorbell assignment for 8 sdma user
+ queue per engine
+
+Change-Id: I941c5a5e3f7885b77e2ee9efea511b62d3a72d9e
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 14 ++++-----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 38 +++++++++++++++----------
+ drivers/gpu/drm/amd/include/kgd_kfd_interface.h | 6 ++--
+ 3 files changed, 33 insertions(+), 25 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index f683cb5..89572c1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -541,16 +541,16 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
+ AMDGPU_DOORBELL64_GFX_RING0 = 0x8b,
+
+ /*
+- * Other graphics doorbells can be allocated here: from 0x8c to 0xef
++ * Other graphics doorbells can be allocated here: from 0x8c to 0xdf
+ * Graphics voltage island aperture 1
+- * default non-graphics QWORD index is 0xF0 - 0xFF inclusive
++ * default non-graphics QWORD index is 0xe0 - 0xFF inclusive
+ */
+
+- /* sDMA engines */
+- AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xF0,
+- AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1,
+- AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xF2,
+- AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3,
++ /* sDMA engines reserved from 0xe0 -oxef */
++ AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xE0,
++ AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xE1,
++ AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xE8,
++ AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xE9,
+
+ /* Interrupt handler */
+ AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index 893c492..a9f2657 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -141,7 +141,7 @@ static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
+
+ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
+ {
+- int i;
++ int i, n;
+ int last_valid_bit;
+
+ if (adev->kfd) {
+@@ -183,7 +183,15 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
+ &gpu_resources.doorbell_physical_address,
+ &gpu_resources.doorbell_aperture_size,
+ &gpu_resources.doorbell_start_offset);
+- if (adev->asic_type >= CHIP_VEGA10) {
++
++ if (adev->asic_type < CHIP_VEGA10) {
++ kgd2kfd->device_init(adev->kfd, &gpu_resources);
++ return;
++ }
++
++ n = (adev->asic_type < CHIP_VEGA20) ? 2 : 8;
++
++ for (i = 0; i < n; i += 2) {
+ /* On SOC15 the BIF is involved in routing
+ * doorbells using the low 12 bits of the
+ * address. Communicate the assignments to
+@@ -191,20 +199,20 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
+ * process in case of 64-bit doorbells so we
+ * can use each doorbell assignment twice.
+ */
+- gpu_resources.sdma_doorbell[0][0] =
+- AMDGPU_DOORBELL64_sDMA_ENGINE0;
+- gpu_resources.sdma_doorbell[0][1] =
+- AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200;
+- gpu_resources.sdma_doorbell[1][0] =
+- AMDGPU_DOORBELL64_sDMA_ENGINE1;
+- gpu_resources.sdma_doorbell[1][1] =
+- AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200;
+- /* Doorbells 0x0f0-0ff and 0x2f0-2ff are reserved for
+- * SDMA, IH and VCN. So don't use them for the CP.
+- */
+- gpu_resources.reserved_doorbell_mask = 0x1f0;
+- gpu_resources.reserved_doorbell_val = 0x0f0;
++ gpu_resources.sdma_doorbell[0][i] =
++ AMDGPU_DOORBELL64_sDMA_ENGINE0 + (i >> 1);
++ gpu_resources.sdma_doorbell[0][i+1] =
++ AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1);
++ gpu_resources.sdma_doorbell[1][i] =
++ AMDGPU_DOORBELL64_sDMA_ENGINE1 + (i >> 1);
++ gpu_resources.sdma_doorbell[1][i+1] =
++ AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1);
+ }
++ /* Doorbells 0x0e0-0ff and 0x2e0-2ff are reserved for
++ * SDMA, IH and VCN. So don't use them for the CP.
++ */
++ gpu_resources.reserved_doorbell_mask = 0x1e0;
++ gpu_resources.reserved_doorbell_val = 0x0e0;
+
+ kgd2kfd->device_init(adev->kfd, &gpu_resources);
+ }
+diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+index fabcf1ef..0069b26 100644
+--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+@@ -124,10 +124,10 @@ struct kgd2kfd_shared_resources {
+ * is reserved: (D & reserved_doorbell_mask) == reserved_doorbell_val
+ *
+ * KFD currently uses 1024 (= 0x3ff) doorbells per process. If
+- * doorbells 0x0f0-0x0f7 and 0x2f-0x2f7 are reserved, that means
+- * mask would be set to 0x1f8 and val set to 0x0f0.
++ * doorbells 0x0e0-0x0ff and 0x2e0-0x2ff are reserved, that means
++ * mask would be set to 0x1e0 and val set to 0x0e0.
+ */
+- unsigned int sdma_doorbell[2][2];
++ unsigned int sdma_doorbell[2][8];
+ unsigned int reserved_doorbell_mask;
+ unsigned int reserved_doorbell_val;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4623-drm-amdkfd-Make-the-number-of-SDMA-queues-variable.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4623-drm-amdkfd-Make-the-number-of-SDMA-queues-variable.patch
new file mode 100644
index 00000000..ef5d244b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4623-drm-amdkfd-Make-the-number-of-SDMA-queues-variable.patch
@@ -0,0 +1,180 @@
+From 4707ee0530e30301155707a539e11ee366cb57ef Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Fri, 9 Feb 2018 16:29:14 -0500
+Subject: [PATCH 4623/5725] drm/amdkfd: Make the number of SDMA queues variable
+
+Vega20 supports 8 SDMA queues per engine
+
+Change-Id: I0df3b0c1a4df253e7b25aa4df3746334d39c8848
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 14 +++++++++++++-
+ drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 6 ++++--
+ drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 1 -
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 1 +
+ 4 files changed, 18 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 0c4703c..f3afd4e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -54,6 +54,7 @@ static const struct kfd_device_info kaveri_device_info = {
+ .needs_iommu_device = true,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
++ .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info carrizo_device_info = {
+@@ -70,6 +71,7 @@ static const struct kfd_device_info carrizo_device_info = {
+ .needs_iommu_device = true,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
++ .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info raven_device_info = {
+@@ -85,6 +87,7 @@ static const struct kfd_device_info raven_device_info = {
+ .needs_iommu_device = true,
+ .needs_pci_atomics = true,
+ .num_sdma_engines = 1,
++ .num_sdma_queues_per_engine = 2,
+ };
+ #endif
+
+@@ -102,6 +105,7 @@ static const struct kfd_device_info hawaii_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
++ .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info tonga_device_info = {
+@@ -117,6 +121,7 @@ static const struct kfd_device_info tonga_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = true,
+ .num_sdma_engines = 2,
++ .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info tonga_vf_device_info = {
+@@ -132,6 +137,7 @@ static const struct kfd_device_info tonga_vf_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
++ .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info fiji_device_info = {
+@@ -147,6 +153,7 @@ static const struct kfd_device_info fiji_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = true,
+ .num_sdma_engines = 2,
++ .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info fiji_vf_device_info = {
+@@ -162,6 +169,7 @@ static const struct kfd_device_info fiji_vf_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
++ .num_sdma_queues_per_engine = 2,
+ };
+
+
+@@ -178,6 +186,7 @@ static const struct kfd_device_info polaris10_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = true,
+ .num_sdma_engines = 2,
++ .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info polaris10_vf_device_info = {
+@@ -193,6 +202,7 @@ static const struct kfd_device_info polaris10_vf_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
++ .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info polaris11_device_info = {
+@@ -208,6 +218,7 @@ static const struct kfd_device_info polaris11_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = true,
+ .num_sdma_engines = 2,
++ .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info vega10_device_info = {
+@@ -223,6 +234,7 @@ static const struct kfd_device_info vega10_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
++ .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info vega10_vf_device_info = {
+@@ -238,9 +250,9 @@ static const struct kfd_device_info vega10_vf_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
++ .num_sdma_queues_per_engine = 2,
+ };
+
+-
+ struct kfd_deviceid {
+ unsigned short did;
+ const struct kfd_device_info *device_info;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 2c5d330..91b88d2 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -109,7 +109,7 @@ static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm)
+ unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
+ {
+ return dqm->dev->device_info->num_sdma_engines
+- * KFD_SDMA_QUEUES_PER_ENGINE;
++ * dqm->dev->device_info->num_sdma_queues_per_engine;
+ }
+
+ void program_sh_mem_settings(struct device_queue_manager *dqm,
+@@ -1838,7 +1838,9 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
+ }
+
+ for (pipe = 0; pipe < get_num_sdma_engines(dqm); pipe++) {
+- for (queue = 0; queue < KFD_SDMA_QUEUES_PER_ENGINE; queue++) {
++ for (queue = 0;
++ queue < dqm->dev->device_info->num_sdma_queues_per_engine;
++ queue++) {
+ r = dqm->dev->kfd2kgd->hqd_sdma_dump(
+ dqm->dev->kgd, pipe, queue, &dump, &n_regs);
+ if (r)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+index 82fafd0..ad5c449 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+@@ -31,7 +31,6 @@
+
+ #define KFD_UNMAP_LATENCY_MS (4000)
+ #define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS (2 * KFD_UNMAP_LATENCY_MS + 1000)
+-#define KFD_SDMA_QUEUES_PER_ENGINE (2)
+
+ struct device_process_node {
+ struct qcm_process_device *qpd;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 7bb56e2..6b7f962 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -210,6 +210,7 @@ struct kfd_device_info {
+ bool needs_pci_atomics;
+ /* obtain from adev->sdma.num_instances */
+ unsigned int num_sdma_engines;
++ unsigned int num_sdma_queues_per_engine;
+ };
+
+ struct kfd_mem_obj {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4624-drm-amdgpu-Fix-NULL-pointer-when-PP-block-is-disable.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4624-drm-amdgpu-Fix-NULL-pointer-when-PP-block-is-disable.patch
new file mode 100644
index 00000000..c9ec21c3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4624-drm-amdgpu-Fix-NULL-pointer-when-PP-block-is-disable.patch
@@ -0,0 +1,44 @@
+From 111a8c79f79e31c709ddc626c64eb8fc31c4726c Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Wed, 14 Mar 2018 14:44:58 -0400
+Subject: [PATCH 4624/5725] drm/amdgpu: Fix NULL pointer when PP block is
+ disabled
+
+When PP block is disabled, return a fix value(100M) for mclk and sclk
+on bare-metal mode, This will cover the emulation mode as well.
+
+Change-Id: Ibf4153544090ef6ec51b28d6625fa18c7ea3c58b
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 7dfb239..651acca 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2408,7 +2408,8 @@ static void amdgpu_inc_compute_vms(struct amdgpu_device *adev)
+ if ((adev->vm_manager.n_compute_vms++ == 0) &&
+ (!amdgpu_sriov_vf(adev))) {
+ /* First Compute VM: enable compute power profile */
+- if (adev->powerplay.pp_funcs->switch_power_profile)
++ if (adev->powerplay.pp_funcs &&
++ adev->powerplay.pp_funcs->switch_power_profile)
+ amdgpu_dpm_switch_power_profile(adev,
+ PP_SMC_POWER_PROFILE_COMPUTE, true);
+ }
+@@ -2697,7 +2698,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ if ((--adev->vm_manager.n_compute_vms == 0) &&
+ (!amdgpu_sriov_vf(adev))) {
+ /* Last KFD VM: enable graphics power profile */
+- if (adev->powerplay.pp_funcs->switch_power_profile)
++ if (adev->powerplay.pp_funcs &&
++ adev->powerplay.pp_funcs->switch_power_profile)
+ amdgpu_dpm_switch_power_profile(adev,
+ PP_SMC_POWER_PROFILE_COMPUTE, false);
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4625-drm-amd-Interface-change-to-support-64-bit-page_tabl.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4625-drm-amd-Interface-change-to-support-64-bit-page_tabl.patch
new file mode 100644
index 00000000..e57606f6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4625-drm-amd-Interface-change-to-support-64-bit-page_tabl.patch
@@ -0,0 +1,263 @@
+From 80db7dbe15404c86fb466ef22a8dd47882fa197e Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Tue, 13 Mar 2018 17:44:09 -0400
+Subject: [PATCH 4625/5725] drm/amd: Interface change to support 64 bit
+ page_table_base
+
+amdgpu_gpuvm_get_process_page_dir should return the page table address
+in the format expected by the pm4_map_process packet for all ASIC
+generations
+
+Change-Id: I1ac76285ce94cb73444c867b87f38a58160342c6
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 7 ++++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 7 ++++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 7 +++----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 8 ++++++--
+ drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 11 ++++++-----
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c | 3 +--
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 +-
+ drivers/gpu/drm/amd/include/kgd_kfd_interface.h | 4 ++--
+ 9 files changed, 28 insertions(+), 23 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+index c7116f6..cb0588d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+@@ -178,7 +178,7 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
+ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
+ void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm);
+-uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm);
++uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm);
+ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+ struct kgd_dev *kgd, uint64_t va, uint64_t size,
+ void *vm, struct sg_table *sg, struct kgd_mem **mem,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+index 468b940..c36a1ce 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+@@ -145,7 +145,7 @@ static int alloc_memory_of_scratch(struct kgd_dev *kgd,
+ static int write_config_static_mem(struct kgd_dev *kgd, bool swizzle_enable,
+ uint8_t element_size, uint8_t index_stride, uint8_t mtype);
+ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
+- uint32_t page_table_base);
++ uint64_t page_table_base);
+ static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd);
+
+ /* Because of REG_GET_FIELD() being used, we put this function in the
+@@ -935,7 +935,7 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
+ }
+
+ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
+- uint32_t page_table_base)
++ uint64_t page_table_base)
+ {
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+@@ -943,7 +943,8 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
+ pr_err("trying to set page table base for wrong VMID\n");
+ return;
+ }
+- WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8, page_table_base);
++ WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8,
++ lower_32_bits(page_table_base));
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+index e0c0e97..69ac7be 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+@@ -116,7 +116,7 @@ static int alloc_memory_of_scratch(struct kgd_dev *kgd,
+ static int write_config_static_mem(struct kgd_dev *kgd, bool swizzle_enable,
+ uint8_t element_size, uint8_t index_stride, uint8_t mtype);
+ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
+- uint32_t page_table_base);
++ uint64_t page_table_base);
+ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
+ static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
+
+@@ -1006,7 +1006,7 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
+ }
+
+ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
+- uint32_t page_table_base)
++ uint64_t page_table_base)
+ {
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+@@ -1014,5 +1014,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
+ pr_err("trying to set page table base for wrong VMID\n");
+ return;
+ }
+- WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8, page_table_base);
++ WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8,
++ lower_32_bits(page_table_base));
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+index adef83d..b6852a1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+@@ -159,7 +159,7 @@ static int alloc_memory_of_scratch(struct kgd_dev *kgd,
+ static int write_config_static_mem(struct kgd_dev *kgd, bool swizzle_enable,
+ uint8_t element_size, uint8_t index_stride, uint8_t mtype);
+ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
+- uint32_t page_table_base);
++ uint64_t page_table_base);
+ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
+ static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
+
+@@ -1188,11 +1188,10 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
+ }
+
+ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
+- uint32_t page_table_base)
++ uint64_t page_table_base)
+ {
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+- uint64_t base = (uint64_t)page_table_base << PAGE_SHIFT |
+- AMDGPU_PTE_VALID;
++ uint64_t base = page_table_base | AMDGPU_PTE_VALID;
+
+ if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
+ pr_err("trying to set page table base for wrong VMID %u\n",
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 85d0bfb..0572e2d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -1160,11 +1160,15 @@ void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
+ kfree(vm);
+ }
+
+-uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
++uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
+ {
+ struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
++ struct amdgpu_bo *pd = avm->root.base.bo;
++ struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
+
+- return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
++ if (adev->asic_type < CHIP_VEGA10)
++ return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
++ return avm->pd_phys_addr;
+ }
+
+ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 91b88d2..bc80f28 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -656,7 +656,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
+ struct queue *q;
+ struct mqd_manager *mqd;
+ struct kfd_process_device *pdd;
+- uint32_t pd_base;
++ uint64_t pd_base;
+ int retval = 0;
+
+ pdd = qpd_to_pdd(qpd);
+@@ -676,7 +676,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
+
+ /* Update PD Base in QPD */
+ qpd->page_table_base = pd_base;
+- pr_debug("Updated PD address to 0x%08x\n", pd_base);
++ pr_debug("Updated PD address to 0x%llx\n", pd_base);
+
+ if (!list_empty(&qpd->queues_list)) {
+ dqm->dev->kfd2kgd->set_vm_context_page_table_base(
+@@ -717,7 +717,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
+ {
+ struct queue *q;
+ struct kfd_process_device *pdd;
+- uint32_t pd_base;
++ uint64_t pd_base;
+ int retval = 0;
+
+ pdd = qpd_to_pdd(qpd);
+@@ -737,7 +737,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
+
+ /* Update PD Base in QPD */
+ qpd->page_table_base = pd_base;
+- pr_debug("Updated PD address to 0x%08x\n", pd_base);
++ pr_debug("Updated PD address to 0x%llx\n", pd_base);
+
+ /* activate all active queues on the qpd */
+ list_for_each_entry(q, &qpd->queues_list, list) {
+@@ -761,7 +761,7 @@ static int register_process(struct device_queue_manager *dqm,
+ {
+ struct device_process_node *n;
+ struct kfd_process_device *pdd;
+- uint32_t pd_base;
++ uint64_t pd_base;
+ int retval;
+
+ n = kzalloc(sizeof(*n), GFP_KERNEL);
+@@ -779,6 +779,7 @@ static int register_process(struct device_queue_manager *dqm,
+
+ /* Update PD Base in QPD */
+ qpd->page_table_base = pd_base;
++ pr_debug("Updated PD address to 0x%llx\n", pd_base);
+
+ retval = dqm->asic_ops.update_qpd(dqm, qpd);
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+index 684a3bf..33830b1 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+@@ -71,8 +71,7 @@ static int pm_map_process_v9(struct packet_manager *pm,
+ uint32_t *buffer, struct qcm_process_device *qpd)
+ {
+ struct pm4_mes_map_process *packet;
+- uint64_t vm_page_table_base_addr =
+- (uint64_t)(qpd->page_table_base) << 12;
++ uint64_t vm_page_table_base_addr = qpd->page_table_base;
+
+ packet = (struct pm4_mes_map_process *)buffer;
+ memset(buffer, 0, sizeof(struct pm4_mes_map_process));
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 6b7f962..7e55085 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -600,11 +600,11 @@ struct qcm_process_device {
+ * All the memory management data should be here too
+ */
+ uint64_t gds_context_area;
++ uint64_t page_table_base;
+ uint32_t sh_mem_config;
+ uint32_t sh_mem_bases;
+ uint32_t sh_mem_ape1_base;
+ uint32_t sh_mem_ape1_limit;
+- uint32_t page_table_base;
+ uint32_t gds_size;
+ uint32_t num_gws;
+ uint32_t num_oac;
+diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+index 0069b26..e81fdbc 100644
+--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+@@ -277,7 +277,7 @@ struct kfd2kgd_calls {
+ int (*create_process_gpumem)(struct kgd_dev *kgd, uint64_t va, size_t size, void *vm, struct kgd_mem **mem);
+ void (*destroy_process_gpumem)(struct kgd_dev *kgd, struct kgd_mem *mem);
+
+- uint32_t (*get_process_page_dir)(void *vm);
++ uint64_t (*get_process_page_dir)(void *vm);
+
+ int (*alloc_pasid)(unsigned int bits);
+ void (*free_pasid)(unsigned int pasid);
+@@ -368,7 +368,7 @@ struct kfd2kgd_calls {
+ int (*map_gtt_bo_to_kernel)(struct kgd_dev *kgd,
+ struct kgd_mem *mem, void **kptr, uint64_t *size);
+ void (*set_vm_context_page_table_base)(struct kgd_dev *kgd, uint32_t vmid,
+- uint32_t page_table_base);
++ uint64_t page_table_base);
+
+ int (*pin_get_sg_table_bo)(struct kgd_dev *kgd,
+ struct kgd_mem *mem, uint64_t offset,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4626-drm-amdgpu-Add-vega20-support-on-kfd-probe.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4626-drm-amdgpu-Add-vega20-support-on-kfd-probe.patch
new file mode 100644
index 00000000..37bb2e5d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4626-drm-amdgpu-Add-vega20-support-on-kfd-probe.patch
@@ -0,0 +1,36 @@
+From 973f6f6445cac12d1f9486ce93c99560863f4cec Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Tue, 6 Feb 2018 15:37:53 -0500
+Subject: [PATCH 4626/5725] drm/amdgpu: Add vega20 support on kfd probe
+
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index a9f2657..af2481a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -94,7 +94,14 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
+ kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
+ break;
+ case CHIP_VEGA10:
++ case CHIP_VEGA20:
+ case CHIP_RAVEN:
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)
++ if (adev->asic_type == CHIP_RAVEN) {
++ dev_dbg(adev->dev, "DKMS installed kfd does not support Raven for kernel < 4.16\n");
++ return;
++ }
++#endif
+ kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
+ break;
+ default:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4627-drm-amdkfd-Vega20-bring-up-on-amdkfd-side.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4627-drm-amdkfd-Vega20-bring-up-on-amdkfd-side.patch
new file mode 100644
index 00000000..336341fc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4627-drm-amdkfd-Vega20-bring-up-on-amdkfd-side.patch
@@ -0,0 +1,148 @@
+From 43c20421e6e474085475f1ea32b1aaae87988587 Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Tue, 31 Oct 2017 13:32:53 -0400
+Subject: [PATCH 4627/5725] drm/amdkfd: Vega20 bring up on amdkfd side
+
+Change-Id: I6a2572ad6caf92e3feaf29a7b14fd4f0daa16dbc
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 1 +
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 22 ++++++++++++++++++++++
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 1 +
+ drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | 1 +
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 1 +
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c | 1 +
+ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 1 +
+ drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 1 +
+ 8 files changed, 29 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+index 6688882..c540b65 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+@@ -642,6 +642,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
+ num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
+ break;
+ case CHIP_VEGA10:
++ case CHIP_VEGA20:
+ pcache_info = vega10_cache_info;
+ num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
+ break;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index f3afd4e..a5b0e20 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -253,6 +253,22 @@ static const struct kfd_device_info vega10_vf_device_info = {
+ .num_sdma_queues_per_engine = 2,
+ };
+
++static const struct kfd_device_info vega20_device_info = {
++ .asic_family = CHIP_VEGA20,
++ .max_pasid_bits = 16,
++ .max_no_of_hqd = 24,
++ .doorbell_size = 8,
++ .ih_ring_entry_size = 8 * sizeof(uint32_t),
++ .event_interrupt_class = &event_interrupt_class_v9,
++ .num_of_watch_points = 4,
++ .mqd_size_aligned = MQD_SIZE_ALIGNED,
++ .supports_cwsr = true,
++ .needs_iommu_device = false,
++ .needs_pci_atomics = true,
++ .num_sdma_engines = 2,
++ .num_sdma_queues_per_engine = 8,
++};
++
+ struct kfd_deviceid {
+ unsigned short did;
+ const struct kfd_device_info *device_info;
+@@ -341,6 +357,12 @@ static const struct kfd_deviceid supported_devices[] = {
+ { 0x6868, &vega10_device_info }, /* Vega10 */
+ { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/
+ { 0x687F, &vega10_device_info }, /* Vega10 */
++ { 0x66a0, &vega20_device_info }, /* Vega20 */
++ { 0x66a1, &vega20_device_info }, /* Vega20 */
++ { 0x66a2, &vega20_device_info }, /* Vega20 */
++ { 0x66a3, &vega20_device_info }, /* Vega20 */
++ { 0x66a7, &vega20_device_info }, /* Vega20 */
++ { 0x66af, &vega20_device_info } /* Vega20 */
+ };
+
+ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index bc80f28..b071667 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -1728,6 +1728,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
+ break;
+
+ case CHIP_VEGA10:
++ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+ device_queue_manager_init_v9(&dqm->asic_ops);
+ break;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+index f7de732..8f123a2 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+@@ -400,6 +400,7 @@ int kfd_init_apertures(struct kfd_process *process)
+ kfd_init_apertures_vi(pdd, id);
+ break;
+ case CHIP_VEGA10:
++ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+ kfd_init_apertures_v9(pdd, id);
+ break;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+index 51b976d..be038c5 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+@@ -355,6 +355,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
+ break;
+
+ case CHIP_VEGA10:
++ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+ kernel_queue_init_v9(&kq->ops_asic_specific);
+ break;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+index 8279b74..d39e81c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+@@ -81,6 +81,7 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
+ case CHIP_POLARIS11:
+ return mqd_manager_init_vi_tonga(type, dev);
+ case CHIP_VEGA10:
++ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+ return mqd_manager_init_v9(type, dev);
+ default:
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+index 1092631..c6080ed3 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+@@ -229,6 +229,7 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
+ pm->pmf = &kfd_vi_pm_funcs;
+ break;
+ case CHIP_VEGA10:
++ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+ pm->pmf = &kfd_v9_pm_funcs;
+ break;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+index 82cff10..4fe5ebc 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -1308,6 +1308,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
+ HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
+ break;
+ case CHIP_VEGA10:
++ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+ dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
+ HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4628-drm-amdkfd-reflect-atomic-support-in-IO-link-propert.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4628-drm-amdkfd-reflect-atomic-support-in-IO-link-propert.patch
new file mode 100644
index 00000000..0d6cc509
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4628-drm-amdkfd-reflect-atomic-support-in-IO-link-propert.patch
@@ -0,0 +1,118 @@
+From cd0f8fd132a5e81d8443e95e60ec8acf93c1fdf4 Mon Sep 17 00:00:00 2001
+From: Eric Huang <JinHuiEric.Huang@amd.com>
+Date: Mon, 4 Jun 2018 15:22:24 -0400
+Subject: [PATCH 4628/5725] drm/amdkfd: reflect atomic support in IO link
+ properties
+
+Add the flags of properties according to Asic type and pcie
+capabilities.
+
+BUG: KFD-386
+
+Change-Id: I64c670d86c6a3992203948547eb87c5466662dfc
+Signed-off-by: Eric Huang <JinHuiEric.Huang@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 12 ++++++-----
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 ++
+ drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 35 +++++++++++++++++++++++++------
+ 3 files changed, 38 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index a5b0e20..10095087 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -400,6 +400,10 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ dev_err(kfd_device, "kgd2kfd_probe failed\n");
+ return NULL;
+ }
++
++ kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
++ if (!kfd)
++ return NULL;
+
+ /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
+ * 32 and 64-bit requests are possible and must be
+@@ -412,12 +416,10 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ dev_info(kfd_device,
+ "skipped device %x:%x, PCI rejects atomics",
+ pdev->vendor, pdev->device);
++ kfree(kfd);
+ return NULL;
+- }
+-
+- kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
+- if (!kfd)
+- return NULL;
++ } else if (!ret)
++ kfd->pci_atomic_requested = true;
+
+ kfd->kgd = kgd;
+ kfd->device_info = device_info;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 7e55085..7869a9d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -294,6 +294,8 @@ struct kfd_dev {
+ bool cwsr_enabled;
+ const void *cwsr_isa;
+ unsigned int cwsr_isa_size;
++
++ bool pci_atomic_requested;
+ };
+
+ struct kfd_ipc_obj;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+index 4fe5ebc..7702156 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -1182,17 +1182,40 @@ static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev)
+
+ static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev)
+ {
+- struct kfd_iolink_properties *link;
++ struct kfd_iolink_properties *link, *cpu_link;
++ struct kfd_topology_device *cpu_dev;
++ uint32_t cap;
++ uint32_t cpu_flag = CRAT_IOLINK_FLAGS_ENABLED;
++ uint32_t flag = CRAT_IOLINK_FLAGS_ENABLED;
+
+ if (!dev || !dev->gpu)
+ return;
+
+- /* GPU only creates direck links so apply flags setting to all */
+- if (dev->gpu->device_info->asic_family == CHIP_HAWAII)
+- list_for_each_entry(link, &dev->io_link_props, list)
+- link->flags = CRAT_IOLINK_FLAGS_ENABLED |
+- CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
++ pcie_capability_read_dword(dev->gpu->pdev,
++ PCI_EXP_DEVCAP2, &cap);
++
++ if (!(cap & (PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
++ PCI_EXP_DEVCAP2_ATOMIC_COMP64)))
++ cpu_flag |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
++ CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
++
++ if (!dev->gpu->pci_atomic_requested ||
++ dev->gpu->device_info->asic_family == CHIP_HAWAII)
++ flag |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
+ CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
++
++ /* GPU only creates direct links so apply flags setting to all */
++ list_for_each_entry(link, &dev->io_link_props, list) {
++ link->flags = flag;
++ cpu_dev = kfd_topology_device_by_proximity_domain(
++ link->node_to);
++ if (cpu_dev) {
++ list_for_each_entry(cpu_link,
++ &cpu_dev->io_link_props, list)
++ if (cpu_link->node_to == link->node_from)
++ cpu_link->flags = cpu_flag;
++ }
++ }
+ }
+
+ int kfd_topology_add_device(struct kfd_dev *gpu)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4629-drm-amdgpu-Changed-CU-reservation-golden-settings.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4629-drm-amdgpu-Changed-CU-reservation-golden-settings.patch
new file mode 100644
index 00000000..0df58a98
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4629-drm-amdgpu-Changed-CU-reservation-golden-settings.patch
@@ -0,0 +1,39 @@
+From 31e74e6aa5cdb2136868174938fa3e6793410204 Mon Sep 17 00:00:00 2001
+From: Oak Zeng <Oak.Zeng@amd.com>
+Date: Fri, 1 Jun 2018 17:25:06 -0400
+Subject: [PATCH 4629/5725] drm/amdgpu: Changed CU reservation golden settings
+
+With previous golden settings, compute task can't use
+reserved LDS (32K) on CU0 and CU1. On 64K LDS system,
+if compute work group allocate more than 32K LDS, then
+it can't be dispatched to CU0 and CU1 because of the
+reservation. This enables compute task to use reserved
+LDS on CU0 and CU1.
+
+Change-Id: I534981622f524fd22a38d0b0bcb302b9bc00a793
+Signed-off-by: Oak Zeng <Oak.Zeng@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Felix Kuehling <felix.kuehling@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index ea348fe..b093777 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -88,8 +88,8 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
+- SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
+- SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x00ffff87),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x00ffff8f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4630-drm-amdkfd-Add-check-user-queue-busy-interface.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4630-drm-amdkfd-Add-check-user-queue-busy-interface.patch
new file mode 100644
index 00000000..2ce76792
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4630-drm-amdkfd-Add-check-user-queue-busy-interface.patch
@@ -0,0 +1,246 @@
+From 795d53fab8f5333b8f1b858ed3a40f085e5c5804 Mon Sep 17 00:00:00 2001
+From: Philip Yang <Philip.Yang@amd.com>
+Date: Mon, 28 May 2018 16:22:24 -0400
+Subject: [PATCH 4630/5725] drm/amdkfd: Add check user queue busy interface
+
+Process is idle if both conditions are meet:
+ queue's rptr equals to wptr
+ control stack is empty, cntl_stack_offset = cntl_stack_size
+
+Change-Id: I316341eeea8ada302d216d1df36d2d8a6951c573
+Signed-off-by: Philip Yang <Philip.Yang@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h | 2 ++
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 28 +++++++++++++++
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 46 ++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 42 ++++++++++++++++++++++
+ 4 files changed, 118 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+index dcaeda8..336ea9c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+@@ -94,6 +94,8 @@ struct mqd_manager {
+ u32 *ctl_stack_used_size,
+ u32 *save_area_used_size);
+
++ bool (*check_queue_active)(struct queue *q);
++
+ #if defined(CONFIG_DEBUG_FS)
+ int (*debugfs_show_mqd)(struct seq_file *m, void *data);
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+index bd44a23..2441834 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+@@ -42,6 +42,31 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
+ return (struct cik_sdma_rlc_registers *)mqd;
+ }
+
++static bool check_sdma_queue_active(struct queue *q)
++{
++ uint32_t rptr, wptr;
++ struct cik_sdma_rlc_registers *m = get_sdma_mqd(q->mqd);
++
++ rptr = m->sdma_rlc_rb_rptr;
++ wptr = m->sdma_rlc_rb_wptr;
++ pr_debug("rptr=%d, wptr=%d\n", rptr, wptr);
++
++ return (rptr != wptr);
++}
++
++static bool check_queue_active(struct queue *q)
++{
++ uint32_t rptr, wptr;
++ struct cik_mqd *m = get_mqd(q->mqd);
++
++ rptr = m->cp_hqd_pq_rptr;
++ wptr = m->cp_hqd_pq_wptr;
++
++ pr_debug("rptr=%d, wptr=%d\n", rptr, wptr);
++
++ return (rptr != wptr);
++}
++
+ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+ {
+@@ -491,6 +516,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
+ mqd->update_mqd = update_mqd;
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
++ mqd->check_queue_active = check_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+ #endif
+@@ -502,6 +528,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
+ mqd->update_mqd = update_mqd_hiq;
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
++ mqd->check_queue_active = check_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+ #endif
+@@ -513,6 +540,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
+ mqd->update_mqd = update_mqd_sdma;
+ mqd->destroy_mqd = destroy_mqd_sdma;
+ mqd->is_occupied = is_occupied_sdma;
++ mqd->check_queue_active = check_sdma_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+index 58ea1fe..dcd24c4 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+@@ -41,6 +41,49 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
+ return (struct v9_sdma_mqd *)mqd;
+ }
+
++static bool check_sdma_queue_active(struct queue *q)
++{
++ uint32_t rptr, wptr;
++ uint32_t rptr_hi, wptr_hi;
++ struct v9_sdma_mqd *m = get_sdma_mqd(q->mqd);
++
++ rptr = m->sdmax_rlcx_rb_rptr;
++ wptr = m->sdmax_rlcx_rb_wptr;
++ rptr_hi = m->sdmax_rlcx_rb_rptr_hi;
++ wptr_hi = m->sdmax_rlcx_rb_wptr_hi;
++ pr_debug("rptr=%d, wptr=%d\n", rptr, wptr);
++ pr_debug("rptr_hi=%d, wptr_hi=%d\n", rptr_hi, wptr_hi);
++
++ return (rptr != wptr || rptr_hi != wptr_hi);
++}
++
++static bool check_queue_active(struct queue *q)
++{
++ uint32_t rptr, wptr;
++ uint32_t cntl_stack_offset, cntl_stack_size;
++ struct v9_mqd *m = get_mqd(q->mqd);
++
++ rptr = m->cp_hqd_pq_rptr;
++ wptr = m->cp_hqd_pq_wptr_lo % q->properties.queue_size;
++ cntl_stack_offset = m->cp_hqd_cntl_stack_offset;
++ cntl_stack_size = m->cp_hqd_cntl_stack_size;
++
++ pr_debug("rptr=%d, wptr=%d\n", rptr, wptr);
++ pr_debug("m->cp_hqd_cntl_stack_offset=0x%08x\n", cntl_stack_offset);
++ pr_debug("m->cp_hqd_cntl_stack_size=0x%08x\n", cntl_stack_size);
++
++ if ((rptr == 0 && wptr == 0) ||
++ cntl_stack_offset == 0xffffffff ||
++ cntl_stack_size > 0x5000)
++ return false;
++
++ /* Process is idle if both conditions are meet:
++ * queue's rptr equals to wptr
++ * control stack is empty, cntl_stack_offset = cntl_stack_size
++ */
++ return (rptr != wptr || cntl_stack_offset != cntl_stack_size);
++}
++
+ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+ {
+@@ -489,6 +532,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
+ mqd->get_wave_state = get_wave_state;
++ mqd->check_queue_active = check_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+ #endif
+@@ -500,6 +544,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
+ mqd->update_mqd = update_mqd_hiq;
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
++ mqd->check_queue_active = check_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+ #endif
+@@ -511,6 +556,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
+ mqd->update_mqd = update_mqd_sdma;
+ mqd->destroy_mqd = destroy_mqd_sdma;
+ mqd->is_occupied = is_occupied_sdma;
++ mqd->check_queue_active = check_sdma_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+index e3ae2d4..246fe6c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+@@ -44,6 +44,45 @@ static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
+ return (struct vi_sdma_mqd *)mqd;
+ }
+
++static bool check_sdma_queue_active(struct queue *q)
++{
++ uint32_t rptr, wptr;
++ struct vi_sdma_mqd *m = get_sdma_mqd(q->mqd);
++
++ rptr = m->sdmax_rlcx_rb_rptr;
++ wptr = m->sdmax_rlcx_rb_wptr;
++ pr_debug("rptr=%d, wptr=%d\n", rptr, wptr);
++
++ return (rptr != wptr);
++}
++
++static bool check_queue_active(struct queue *q)
++{
++ uint32_t rptr, wptr;
++ uint32_t cntl_stack_offset, cntl_stack_size;
++ struct vi_mqd *m = get_mqd(q->mqd);
++
++ rptr = m->cp_hqd_pq_rptr;
++ wptr = m->cp_hqd_pq_wptr;
++ cntl_stack_offset = m->cp_hqd_cntl_stack_offset;
++ cntl_stack_size = m->cp_hqd_cntl_stack_size;
++
++ pr_debug("rptr=%d, wptr=%d\n", rptr, wptr);
++ pr_debug("m->cp_hqd_cntl_stack_offset=0x%08x\n", cntl_stack_offset);
++ pr_debug("m->cp_hqd_cntl_stack_size=0x%08x\n", cntl_stack_size);
++
++ if ((rptr == 0 && wptr == 0) ||
++ cntl_stack_offset == 0xffffffff ||
++ cntl_stack_size > 0x5000)
++ return false;
++
++ /* Process is idle if both conditions are meet:
++ * queue's rptr equals to wptr
++ * control stack is empty, cntl_stack_offset = cntl_stack_size
++ */
++ return (rptr != wptr || cntl_stack_offset != cntl_stack_size);
++}
++
+ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+ {
+@@ -498,6 +537,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
+ mqd->get_wave_state = get_wave_state;
++ mqd->check_queue_active = check_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+ #endif
+@@ -509,6 +549,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
+ mqd->update_mqd = update_mqd_hiq;
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
++ mqd->check_queue_active = check_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+ #endif
+@@ -520,6 +561,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
+ mqd->update_mqd = update_mqd_sdma;
+ mqd->destroy_mqd = destroy_mqd_sdma;
+ mqd->is_occupied = is_occupied_sdma;
++ mqd->check_queue_active = check_sdma_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
+ #endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4631-drm-amdkfd-Replace-mqd-with-mqd_mgr-as-the-variable-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4631-drm-amdkfd-Replace-mqd-with-mqd_mgr-as-the-variable-.patch
new file mode 100644
index 00000000..48650b48
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4631-drm-amdkfd-Replace-mqd-with-mqd_mgr-as-the-variable-.patch
@@ -0,0 +1,531 @@
+From 859fe34bdcf6638189c4ebee65d96107b0a839db Mon Sep 17 00:00:00 2001
+From: Yong Zhao <yong.zhao@amd.com>
+Date: Mon, 4 Jun 2018 14:33:13 -0400
+Subject: [PATCH 4631/5725] drm/amdkfd: Replace mqd with mqd_mgr as the
+ variable name for mqd_manager
+
+This will make reading code much easier.
+
+Change-Id: If57ec96c8b22d3e0c6dd0ff04a17dcb8ff3a27c4
+Signed-off-by: Yong Zhao <yong.zhao@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 117 +++++++++++----------
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 17 +--
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h | 2 +-
+ .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 8 +-
+ 5 files changed, 74 insertions(+), 72 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index b071667..ae6f7d8 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -359,10 +359,10 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+ {
+ int retval;
+- struct mqd_manager *mqd;
++ struct mqd_manager *mqd_mgr;
+
+- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+- if (!mqd)
++ mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
++ if (!mqd_mgr)
+ return -ENOMEM;
+
+ retval = allocate_hqd(dqm, q);
+@@ -373,7 +373,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
+ if (retval)
+ goto out_deallocate_hqd;
+
+- retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
++ retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
+ &q->gart_mqd_addr, &q->properties);
+ if (retval)
+ goto out_deallocate_doorbell;
+@@ -387,15 +387,15 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
+ if (!q->properties.is_active)
+ return 0;
+
+- retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue, &q->properties,
+- q->process->mm);
++ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
++ &q->properties, q->process->mm);
+ if (retval)
+ goto out_uninit_mqd;
+
+ return 0;
+
+ out_uninit_mqd:
+- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
++ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
+ out_deallocate_doorbell:
+ deallocate_doorbell(qpd, q);
+ out_deallocate_hqd:
+@@ -412,11 +412,11 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
+ struct queue *q)
+ {
+ int retval;
+- struct mqd_manager *mqd;
++ struct mqd_manager *mqd_mgr;
+
+- mqd = dqm->ops.get_mqd_manager(dqm,
++ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+- if (!mqd)
++ if (!mqd_mgr)
+ return -ENOMEM;
+
+ if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
+@@ -433,14 +433,14 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
+
+ deallocate_doorbell(qpd, q);
+
+- retval = mqd->destroy_mqd(mqd, q->mqd,
++ retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
+ KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
+ KFD_UNMAP_LATENCY_MS,
+ q->pipe, q->queue);
+ if (retval == -ETIME)
+ qpd->reset_wavefronts = true;
+
+- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
++ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
+
+ list_del(&q->list);
+ if (list_empty(&qpd->queues_list)) {
+@@ -480,7 +480,7 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
+ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ {
+ int retval;
+- struct mqd_manager *mqd;
++ struct mqd_manager *mqd_mgr;
+ struct kfd_process_device *pdd;
+ bool prev_active = false;
+
+@@ -490,9 +490,9 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ retval = -ENODEV;
+ goto out_unlock;
+ }
+- mqd = dqm->ops.get_mqd_manager(dqm,
++ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+- if (!mqd) {
++ if (!mqd_mgr) {
+ retval = -ENOMEM;
+ goto out_unlock;
+ }
+@@ -519,7 +519,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ } else if (prev_active &&
+ (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
+ q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
+- retval = mqd->destroy_mqd(mqd, q->mqd,
++ retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
+ KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
+ KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
+ if (retval) {
+@@ -528,7 +528,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ }
+ }
+
+- retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
++ retval = mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties);
+
+ /*
+ * check active state vs. the previous state and modify
+@@ -546,7 +546,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ else if (q->properties.is_active &&
+ (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
+ q->properties.type == KFD_QUEUE_TYPE_SDMA))
+- retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue,
++ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
+ &q->properties, q->process->mm);
+
+ out_unlock:
+@@ -557,29 +557,29 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ static struct mqd_manager *get_mqd_manager(
+ struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
+ {
+- struct mqd_manager *mqd;
++ struct mqd_manager *mqd_mgr;
+
+ if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
+ return NULL;
+
+ pr_debug("mqd type %d\n", type);
+
+- mqd = dqm->mqds[type];
+- if (!mqd) {
+- mqd = mqd_manager_init(type, dqm->dev);
+- if (!mqd)
++ mqd_mgr = dqm->mqd_mgrs[type];
++ if (!mqd_mgr) {
++ mqd_mgr = mqd_manager_init(type, dqm->dev);
++ if (!mqd_mgr)
+ pr_err("mqd manager is NULL");
+- dqm->mqds[type] = mqd;
++ dqm->mqd_mgrs[type] = mqd_mgr;
+ }
+
+- return mqd;
++ return mqd_mgr;
+ }
+
+ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+ {
+ struct queue *q;
+- struct mqd_manager *mqd;
++ struct mqd_manager *mqd_mgr;
+ struct kfd_process_device *pdd;
+ int retval = 0;
+
+@@ -595,16 +595,16 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ if (!q->properties.is_active)
+ continue;
+- mqd = dqm->ops.get_mqd_manager(dqm,
++ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+- if (!mqd) { /* should not be here */
++ if (!mqd_mgr) { /* should not be here */
+ pr_err("Cannot evict queue, mqd mgr is NULL\n");
+ retval = -ENOMEM;
+ goto out;
+ }
+ q->properties.is_evicted = true;
+ q->properties.is_active = false;
+- retval = mqd->destroy_mqd(mqd, q->mqd,
++ retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
+ KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
+ KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
+ if (retval)
+@@ -654,7 +654,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+ {
+ struct queue *q;
+- struct mqd_manager *mqd;
++ struct mqd_manager *mqd_mgr;
+ struct kfd_process_device *pdd;
+ uint64_t pd_base;
+ int retval = 0;
+@@ -690,16 +690,16 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ if (!q->properties.is_evicted)
+ continue;
+- mqd = dqm->ops.get_mqd_manager(dqm,
++ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+- if (!mqd) { /* should not be here */
++ if (!mqd_mgr) { /* should not be here */
+ pr_err("Cannot restore queue, mqd mgr is NULL\n");
+ retval = -ENOMEM;
+ goto out;
+ }
+ q->properties.is_evicted = false;
+ q->properties.is_active = true;
+- retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
++ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
+ q->queue, &q->properties,
+ q->process->mm);
+ if (retval)
+@@ -880,7 +880,7 @@ static void uninitialize(struct device_queue_manager *dqm)
+
+ kfree(dqm->allocated_queues);
+ for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
+- kfree(dqm->mqds[i]);
++ kfree(dqm->mqd_mgrs[i]);
+ mutex_destroy(&dqm->lock);
+ kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
+ }
+@@ -924,11 +924,11 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd)
+ {
+- struct mqd_manager *mqd;
++ struct mqd_manager *mqd_mgr;
+ int retval;
+
+- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
+- if (!mqd)
++ mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
++ if (!mqd_mgr)
+ return -ENOMEM;
+
+ retval = allocate_sdma_queue(dqm, &q->sdma_id);
+@@ -947,19 +947,20 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
+ pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
+
+ dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
+- retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
++ retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
+ &q->gart_mqd_addr, &q->properties);
+ if (retval)
+ goto out_deallocate_doorbell;
+
+- retval = mqd->load_mqd(mqd, q->mqd, 0, 0, &q->properties, NULL);
++ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, 0, 0, &q->properties,
++ NULL);
+ if (retval)
+ goto out_uninit_mqd;
+
+ return 0;
+
+ out_uninit_mqd:
+- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
++ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
+ out_deallocate_doorbell:
+ deallocate_doorbell(qpd, q);
+ out_deallocate_sdma_queue:
+@@ -1135,7 +1136,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+ struct qcm_process_device *qpd)
+ {
+ int retval;
+- struct mqd_manager *mqd;
++ struct mqd_manager *mqd_mgr;
+
+ retval = 0;
+
+@@ -1162,10 +1163,10 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+ if (retval)
+ goto out_deallocate_sdma_queue;
+
+- mqd = dqm->ops.get_mqd_manager(dqm,
++ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+
+- if (!mqd) {
++ if (!mqd_mgr) {
+ retval = -ENOMEM;
+ goto out_deallocate_doorbell;
+ }
+@@ -1182,7 +1183,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+
+ q->properties.tba_addr = qpd->tba_addr;
+ q->properties.tma_addr = qpd->tma_addr;
+- retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
++ retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
+ &q->gart_mqd_addr, &q->properties);
+ if (retval)
+ goto out_deallocate_doorbell;
+@@ -1338,7 +1339,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
+ struct queue *q)
+ {
+ int retval;
+- struct mqd_manager *mqd;
++ struct mqd_manager *mqd_mgr;
+ bool preempt_all_queues;
+
+ preempt_all_queues = false;
+@@ -1358,9 +1359,9 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
+
+ }
+
+- mqd = dqm->ops.get_mqd_manager(dqm,
++ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+- if (!mqd) {
++ if (!mqd_mgr) {
+ retval = -ENOMEM;
+ goto failed;
+ }
+@@ -1381,7 +1382,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
+ if (retval == -ETIME)
+ qpd->reset_wavefronts = true;
+
+- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
++ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
+
+ /*
+ * Unconditionally decrement this counter, regardless of the queue's
+@@ -1530,7 +1531,7 @@ static int get_wave_state(struct device_queue_manager *dqm,
+ u32 *ctl_stack_used_size,
+ u32 *save_area_used_size)
+ {
+- struct mqd_manager *mqd;
++ struct mqd_manager *mqd_mgr;
+ int r;
+
+ mutex_lock(&dqm->lock);
+@@ -1541,19 +1542,19 @@ static int get_wave_state(struct device_queue_manager *dqm,
+ goto dqm_unlock;
+ }
+
+- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+- if (!mqd) {
++ mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
++ if (!mqd_mgr) {
+ r = -ENOMEM;
+ goto dqm_unlock;
+ }
+
+- if (!mqd->get_wave_state) {
++ if (!mqd_mgr->get_wave_state) {
+ r = -EINVAL;
+ goto dqm_unlock;
+ }
+
+- r = mqd->get_wave_state(mqd, q->mqd, ctl_stack, ctl_stack_used_size,
+- save_area_used_size);
++ r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
++ ctl_stack_used_size, save_area_used_size);
+
+ dqm_unlock:
+ mutex_unlock(&dqm->lock);
+@@ -1566,7 +1567,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
+ int retval;
+ struct queue *q, *next;
+ struct kernel_queue *kq, *kq_next;
+- struct mqd_manager *mqd;
++ struct mqd_manager *mqd_mgr;
+ struct device_process_node *cur, *next_dpn;
+ enum kfd_unmap_queues_filter filter =
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
+@@ -1616,15 +1617,15 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
+
+ /* lastly, free mqd resources */
+ list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
+- mqd = dqm->ops.get_mqd_manager(dqm,
++ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+- if (!mqd) {
++ if (!mqd_mgr) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ list_del(&q->list);
+ qpd->queue_count--;
+- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
++ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
+ }
+
+ out:
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+index ad5c449..1c4ef00 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+@@ -174,7 +174,7 @@ struct device_queue_manager {
+ struct device_queue_manager_ops ops;
+ struct device_queue_manager_asic_ops asic_ops;
+
+- struct mqd_manager *mqds[KFD_MQD_TYPE_MAX];
++ struct mqd_manager *mqd_mgrs[KFD_MQD_TYPE_MAX];
+ struct packet_manager packets;
+ struct kfd_dev *dev;
+ struct mutex lock;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+index be038c5..e78445d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+@@ -59,7 +59,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+ switch (type) {
+ case KFD_QUEUE_TYPE_DIQ:
+ case KFD_QUEUE_TYPE_HIQ:
+- kq->mqd = dev->dqm->ops.get_mqd_manager(dev->dqm,
++ kq->mqd_mgr = dev->dqm->ops.get_mqd_manager(dev->dqm,
+ KFD_MQD_TYPE_HIQ);
+ break;
+ default:
+@@ -67,7 +67,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+ return false;
+ }
+
+- if (!kq->mqd)
++ if (!kq->mqd_mgr)
+ return false;
+
+ prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
+@@ -131,7 +131,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+ kq->queue->device = dev;
+ kq->queue->process = kfd_get_process(current);
+
+- retval = kq->mqd->init_mqd(kq->mqd, &kq->queue->mqd,
++ retval = kq->mqd_mgr->init_mqd(kq->mqd_mgr, &kq->queue->mqd,
+ &kq->queue->mqd_mem_obj,
+ &kq->queue->gart_mqd_addr,
+ &kq->queue->properties);
+@@ -143,9 +143,9 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+ pr_debug("Assigning hiq to hqd\n");
+ kq->queue->pipe = KFD_CIK_HIQ_PIPE;
+ kq->queue->queue = KFD_CIK_HIQ_QUEUE;
+- kq->mqd->load_mqd(kq->mqd, kq->queue->mqd, kq->queue->pipe,
+- kq->queue->queue, &kq->queue->properties,
+- NULL);
++ kq->mqd_mgr->load_mqd(kq->mqd_mgr, kq->queue->mqd,
++ kq->queue->pipe, kq->queue->queue,
++ &kq->queue->properties, NULL);
+ } else {
+ /* allocate fence for DIQ */
+
+@@ -183,7 +183,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+ static void uninitialize(struct kernel_queue *kq)
+ {
+ if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
+- kq->mqd->destroy_mqd(kq->mqd,
++ kq->mqd_mgr->destroy_mqd(kq->mqd_mgr,
+ kq->queue->mqd,
+ KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
+ KFD_UNMAP_LATENCY_MS,
+@@ -192,7 +192,8 @@ static void uninitialize(struct kernel_queue *kq)
+ else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ)
+ kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj);
+
+- kq->mqd->uninit_mqd(kq->mqd, kq->queue->mqd, kq->queue->mqd_mem_obj);
++ kq->mqd_mgr->uninit_mqd(kq->mqd_mgr, kq->queue->mqd,
++ kq->queue->mqd_mem_obj);
+
+ kfd_gtt_sa_free(kq->dev, kq->rptr_mem);
+ kfd_gtt_sa_free(kq->dev, kq->wptr_mem);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
+index 82c94a6..384d7a3 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
+@@ -80,7 +80,7 @@ struct kernel_queue {
+
+ /* data */
+ struct kfd_dev *dev;
+- struct mqd_manager *mqd;
++ struct mqd_manager *mqd_mgr;
+ struct queue *queue;
+ uint64_t pending_wptr64;
+ uint32_t pending_wptr;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index e18ed45..8933323 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -439,7 +439,7 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
+ struct process_queue_node *pqn;
+ struct queue *q;
+ enum KFD_MQD_TYPE mqd_type;
+- struct mqd_manager *mqd_manager;
++ struct mqd_manager *mqd_mgr;
+ int r = 0;
+
+ list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
+@@ -462,11 +462,11 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
+ q->properties.type, q->device->id);
+ continue;
+ }
+- mqd_manager = q->device->dqm->ops.get_mqd_manager(
++ mqd_mgr = q->device->dqm->ops.get_mqd_manager(
+ q->device->dqm, mqd_type);
+ } else if (pqn->kq) {
+ q = pqn->kq->queue;
+- mqd_manager = pqn->kq->mqd;
++ mqd_mgr = pqn->kq->mqd_mgr;
+ switch (q->properties.type) {
+ case KFD_QUEUE_TYPE_DIQ:
+ seq_printf(m, " DIQ on device %x\n",
+@@ -486,7 +486,7 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
+ continue;
+ }
+
+- r = mqd_manager->debugfs_show_mqd(m, q->mqd);
++ r = mqd_mgr->debugfs_show_mqd(m, q->mqd);
+ if (r != 0)
+ break;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4632-Hybrid-Version-18.30.2.15.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4632-Hybrid-Version-18.30.2.15.patch
new file mode 100644
index 00000000..646a78f7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4632-Hybrid-Version-18.30.2.15.patch
@@ -0,0 +1,27 @@
+From 35be4f6619eb2ab0216d5c2c51de1026e3b57723 Mon Sep 17 00:00:00 2001
+From: Junshan Fang <Junshan.Fang@amd.com>
+Date: Tue, 12 Jun 2018 10:18:16 +0800
+Subject: [PATCH 4632/5725] Hybrid Version: 18.30.2.15
+
+Change-Id: I83243cc1d9e9fd1508e46cee81e6d1f79b9648b6
+Signed-off-by: Junshan Fang <Junshan.Fang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 96694c8..c41af58 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -81,7 +81,7 @@
+ #define KMS_DRIVER_MINOR 26
+ #define KMS_DRIVER_PATCHLEVEL 0
+
+-#define AMDGPU_VERSION "18.30.1.15"
++#define AMDGPU_VERSION "18.30.2.15"
+
+ int amdgpu_vram_limit = 0;
+ int amdgpu_vis_vram_limit = 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4633-Revert-drm-amdgpu-replace-mutex-with-spin_lock-V2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4633-Revert-drm-amdgpu-replace-mutex-with-spin_lock-V2.patch
new file mode 100644
index 00000000..ea0b86ec
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4633-Revert-drm-amdgpu-replace-mutex-with-spin_lock-V2.patch
@@ -0,0 +1,75 @@
+From 6e0ed7615685d183f017c9bdbd6907e2cff7009c Mon Sep 17 00:00:00 2001
+From: Shirish S <shirish.s@amd.com>
+Date: Mon, 11 Jun 2018 11:31:17 +0530
+Subject: [PATCH 4633/5725] Revert "drm/amdgpu: replace mutex with spin_lock
+ (V2)"
+
+This reverts commit 924d325820721def656ae5f203c7584a8576fd50.
+
+Reason: should not add spinlocks in critical section
+Signed-off-by: Shirish S <shirish.s@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/atom.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/atom.h | 3 +--
+ 3 files changed, 4 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+index 5ebab72..b5773e8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+@@ -2033,7 +2033,7 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
+ return -ENOMEM;
+ }
+
+- spin_lock_init(&adev->mode_info.atom_context->lock);
++ mutex_init(&adev->mode_info.atom_context->mutex);
+ if (adev->is_atom_fw) {
+ amdgpu_atomfirmware_scratch_regs_init(adev);
+ amdgpu_atomfirmware_allocate_fb_scratch(adev);
+diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
+index da4558c..6cd518f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/atom.c
++++ b/drivers/gpu/drm/amd/amdgpu/atom.c
+@@ -1261,7 +1261,7 @@ int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * pa
+ {
+ int r;
+
+- spin_lock(&ctx->lock);
++ mutex_lock(&ctx->mutex);
+ /* reset data block */
+ ctx->data_block = 0;
+ /* reset reg block */
+@@ -1274,7 +1274,7 @@ int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * pa
+ ctx->divmul[0] = 0;
+ ctx->divmul[1] = 0;
+ r = amdgpu_atom_execute_table_locked(ctx, index, params);
+- spin_unlock(&ctx->lock);
++ mutex_unlock(&ctx->mutex);
+ return r;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h
+index 54063e2..a391709 100644
+--- a/drivers/gpu/drm/amd/amdgpu/atom.h
++++ b/drivers/gpu/drm/amd/amdgpu/atom.h
+@@ -26,7 +26,6 @@
+ #define ATOM_H
+
+ #include <linux/types.h>
+-#include <linux/spinlock_types.h>
+ #include <drm/drmP.h>
+
+ #define ATOM_BIOS_MAGIC 0xAA55
+@@ -126,7 +125,7 @@ struct card_info {
+
+ struct atom_context {
+ struct card_info *card;
+- spinlock_t lock;
++ struct mutex mutex;
+ void *bios;
+ uint32_t cmd_table, data_table;
+ uint16_t *iio;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4634-Revert-drm-amd-display-avoid-sleeping-in-atomic-cont.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4634-Revert-drm-amd-display-avoid-sleeping-in-atomic-cont.patch
new file mode 100644
index 00000000..b4afb896
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4634-Revert-drm-amd-display-avoid-sleeping-in-atomic-cont.patch
@@ -0,0 +1,30 @@
+From a47aecd59e7bc3ac1f887beb3a06ec81de59d6f3 Mon Sep 17 00:00:00 2001
+From: Shirish S <shirish.s@amd.com>
+Date: Mon, 11 Jun 2018 11:32:09 +0530
+Subject: [PATCH 4634/5725] Revert "drm/amd/display: avoid sleeping in atomic
+ context while creating new state (V2)"
+
+This reverts commit 1c01756cfd3e98ddefd2b5f4a601e9f70581b240.
+
+Reason: not the right fix.
+Signed-off-by: Shirish S <shirish.s@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 36be7a3..a6a09b0 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -970,7 +970,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
+ struct dc_state *dc_create_state(void)
+ {
+ struct dc_state *context = kzalloc(sizeof(struct dc_state),
+- GFP_ATOMIC);
++ GFP_KERNEL);
+
+ if (!context)
+ return NULL;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4635-drm-amdgpu-Added-ISR-for-CP-ECC-EDC-interrupt-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4635-drm-amdgpu-Added-ISR-for-CP-ECC-EDC-interrupt-v2.patch
new file mode 100644
index 00000000..7b8d7a1d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4635-drm-amdgpu-Added-ISR-for-CP-ECC-EDC-interrupt-v2.patch
@@ -0,0 +1,165 @@
+From 72b12e47e86a563761cfb11bb0255b3bd712ffa1 Mon Sep 17 00:00:00 2001
+From: David Panariti <David.Panariti@amd.com>
+Date: Tue, 15 May 2018 11:45:11 -0400
+Subject: [PATCH 4635/5725] drm/amdgpu: Added ISR for CP ECC/EDC interrupt v2.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+ISR will DRM_ERROR ECC error message.
+
+v2:
+Remove CZ only limitation.
+Rebase.
+
+Signed-off-by: David Panariti <David.Panariti@amd.com>
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 75 +++++++++++++++++++++++++++++++++++
+ 2 files changed, 76 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 89572c1..0571196 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -988,6 +988,7 @@ struct amdgpu_gfx {
+ struct amdgpu_irq_src eop_irq;
+ struct amdgpu_irq_src priv_reg_irq;
+ struct amdgpu_irq_src priv_inst_irq;
++ struct amdgpu_irq_src cp_ecc_error_irq;
+ /* gfx status */
+ uint32_t gfx_current_status;
+ /* ce ram size*/
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 1ad8528..b92abc3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -2055,6 +2055,12 @@ static int gfx_v8_0_sw_init(void *handle)
+ if (r)
+ return r;
+
++ /* Add CP EDC/ECC irq */
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 197,
++ &adev->gfx.cp_ecc_error_irq);
++ if (r)
++ return r;
++
+ adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
+
+ gfx_v8_0_scratch_init(adev);
+@@ -5118,6 +5124,8 @@ static int gfx_v8_0_hw_fini(void *handle)
+ amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
+ amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+
++ amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
++
+ /* disable KCQ to avoid CPC touch memory not valid anymore */
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+ gfx_v8_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
+@@ -5549,6 +5557,12 @@ static int gfx_v8_0_late_init(void *handle)
+ if (r)
+ return r;
+
++ r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
++ if (r) {
++ DRM_ERROR("amdgpu_irq_get() failed to get IRQ for EDC, r: %d.\n", r);
++ return r;
++ }
++
+ amdgpu_device_ip_set_powergating_state(adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_GATE);
+@@ -6807,6 +6821,51 @@ static int gfx_v8_0_set_eop_interrupt_state(struct amdgpu_device *adev,
+ return 0;
+ }
+
++static int gfx_v8_0_set_cp_ecc_int_state(struct amdgpu_device *adev,
++ struct amdgpu_irq_src *source,
++ unsigned int type,
++ enum amdgpu_interrupt_state state)
++{
++ int enable_flag;
++
++ switch (state) {
++ case AMDGPU_IRQ_STATE_DISABLE:
++ enable_flag = 0;
++ break;
++
++ case AMDGPU_IRQ_STATE_ENABLE:
++ enable_flag = 1;
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ WREG32_FIELD(CP_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
++ WREG32_FIELD(CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, enable_flag);
++ WREG32_FIELD(CP_INT_CNTL_RING1, CP_ECC_ERROR_INT_ENABLE, enable_flag);
++ WREG32_FIELD(CP_INT_CNTL_RING2, CP_ECC_ERROR_INT_ENABLE, enable_flag);
++ WREG32_FIELD(CPC_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
++ WREG32_FIELD(CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
++ enable_flag);
++ WREG32_FIELD(CP_ME1_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
++ enable_flag);
++ WREG32_FIELD(CP_ME1_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
++ enable_flag);
++ WREG32_FIELD(CP_ME1_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
++ enable_flag);
++ WREG32_FIELD(CP_ME2_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
++ enable_flag);
++ WREG32_FIELD(CP_ME2_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
++ enable_flag);
++ WREG32_FIELD(CP_ME2_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
++ enable_flag);
++ WREG32_FIELD(CP_ME2_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
++ enable_flag);
++
++ return 0;
++}
++
+ static int gfx_v8_0_eop_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+@@ -6857,6 +6916,14 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
+ return 0;
+ }
+
++static int gfx_v8_0_cp_ecc_error_irq(struct amdgpu_device *adev,
++ struct amdgpu_irq_src *source,
++ struct amdgpu_iv_entry *entry)
++{
++ DRM_ERROR("ECC error detected.");
++ return 0;
++}
++
+ static int gfx_v8_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned int type,
+@@ -7059,6 +7126,11 @@ static const struct amdgpu_irq_src_funcs gfx_v8_0_kiq_irq_funcs = {
+ .process = gfx_v8_0_kiq_irq,
+ };
+
++static const struct amdgpu_irq_src_funcs gfx_v8_0_cp_ecc_error_irq_funcs = {
++ .set = gfx_v8_0_set_cp_ecc_int_state,
++ .process = gfx_v8_0_cp_ecc_error_irq,
++};
++
+ static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
+ {
+ adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
+@@ -7072,6 +7144,9 @@ static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
+
+ adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
+ adev->gfx.kiq.irq.funcs = &gfx_v8_0_kiq_irq_funcs;
++
++ adev->gfx.cp_ecc_error_irq.num_types = 1;
++ adev->gfx.cp_ecc_error_irq.funcs = &gfx_v8_0_cp_ecc_error_irq_funcs;
+ }
+
+ static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4636-drm-amdgpu-Add-interrupt-SQ-source-struct-to-amdgpu_.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4636-drm-amdgpu-Add-interrupt-SQ-source-struct-to-amdgpu_.patch
new file mode 100644
index 00000000..0fde6568
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4636-drm-amdgpu-Add-interrupt-SQ-source-struct-to-amdgpu_.patch
@@ -0,0 +1,37 @@
+From 08f4ba64765302b10cc4dc809ffe294b5f9d73a0 Mon Sep 17 00:00:00 2001
+From: David Panariti <David.Panariti@amd.com>
+Date: Tue, 22 May 2018 14:09:06 -0400
+Subject: [PATCH 4636/5725] drm/amdgpu: Add interrupt SQ source struct to
+ amdgpu_gfx struct v2.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+SQ can generate interrupts on EDC/ECC errors and this struct controls
+how the interrupt is handled. The guts are filled in in the
+gf_v<major>_<minor>.c files.
+
+v2:
+Rebase.
+
+Signed-off-by: David Panariti <David.Panariti@amd.com>
+Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 0571196..dbc5570 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -989,6 +989,7 @@ struct amdgpu_gfx {
+ struct amdgpu_irq_src priv_reg_irq;
+ struct amdgpu_irq_src priv_inst_irq;
+ struct amdgpu_irq_src cp_ecc_error_irq;
++ struct amdgpu_irq_src sq_irq;
+ /* gfx status */
+ uint32_t gfx_current_status;
+ /* ce ram size*/
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4637-drm-amdgpu-Add-plumbing-for-handling-SQ-EDC-ECC-inte.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4637-drm-amdgpu-Add-plumbing-for-handling-SQ-EDC-ECC-inte.patch
new file mode 100644
index 00000000..d65570b7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4637-drm-amdgpu-Add-plumbing-for-handling-SQ-EDC-ECC-inte.patch
@@ -0,0 +1,190 @@
+From 6aa439ca33be766ec35b6fc3d851f4b5c8930fb3 Mon Sep 17 00:00:00 2001
+From: David Panariti <David.Panariti@amd.com>
+Date: Tue, 22 May 2018 14:25:49 -0400
+Subject: [PATCH 4637/5725] drm/amdgpu: Add plumbing for handling SQ EDC/ECC
+ interrupts v2.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+SQ can generate interrupts and installs the ISR to
+handle the SQ interrupts.
+
+Add parsing SQ data in interrupt handler.
+
+v2:
+Remove CZ only limitation.
+Rebase.
+
+Signed-off-by: David Panariti <David.Panariti@amd.com>
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 109 +++++++++++++++++++++++++++++++++-
+ 1 file changed, 108 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index b92abc3..7b63823 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -2061,6 +2061,14 @@ static int gfx_v8_0_sw_init(void *handle)
+ if (r)
+ return r;
+
++ /* SQ interrupts. */
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 239,
++ &adev->gfx.sq_irq);
++ if (r) {
++ DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r);
++ return r;
++ }
++
+ adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
+
+ gfx_v8_0_scratch_init(adev);
+@@ -5126,6 +5134,8 @@ static int gfx_v8_0_hw_fini(void *handle)
+
+ amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
+
++ amdgpu_irq_put(adev, &adev->gfx.sq_irq, 0);
++
+ /* disable KCQ to avoid CPC touch memory not valid anymore */
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+ gfx_v8_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
+@@ -5563,6 +5573,14 @@ static int gfx_v8_0_late_init(void *handle)
+ return r;
+ }
+
++ r = amdgpu_irq_get(adev, &adev->gfx.sq_irq, 0);
++ if (r) {
++ DRM_ERROR(
++ "amdgpu_irq_get() failed to get IRQ for SQ, r: %d.\n",
++ r);
++ return r;
++ }
++
+ amdgpu_device_ip_set_powergating_state(adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_GATE);
+@@ -6866,6 +6884,32 @@ static int gfx_v8_0_set_cp_ecc_int_state(struct amdgpu_device *adev,
+ return 0;
+ }
+
++static int gfx_v8_0_set_sq_int_state(struct amdgpu_device *adev,
++ struct amdgpu_irq_src *source,
++ unsigned int type,
++ enum amdgpu_interrupt_state state)
++{
++ int enable_flag;
++
++ switch (state) {
++ case AMDGPU_IRQ_STATE_DISABLE:
++ enable_flag = 1;
++ break;
++
++ case AMDGPU_IRQ_STATE_ENABLE:
++ enable_flag = 0;
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ WREG32_FIELD(SQ_INTERRUPT_MSG_CTRL, STALL,
++ enable_flag);
++
++ return 0;
++}
++
+ static int gfx_v8_0_eop_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+@@ -6920,7 +6964,62 @@ static int gfx_v8_0_cp_ecc_error_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+ {
+- DRM_ERROR("ECC error detected.");
++ DRM_ERROR("CP EDC/ECC error detected.");
++ return 0;
++}
++
++static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
++ struct amdgpu_irq_src *source,
++ struct amdgpu_iv_entry *entry)
++{
++ u8 enc, se_id;
++ char type[20];
++
++ /* Parse all fields according to SQ_INTERRUPT* registers */
++ enc = (entry->src_data[0] >> 26) & 0x3;
++ se_id = (entry->src_data[0] >> 24) & 0x3;
++
++ switch (enc) {
++ case 0:
++ DRM_INFO("SQ general purpose intr detected:"
++ "se_id %d, immed_overflow %d, host_reg_overflow %d,"
++ "host_cmd_overflow %d, cmd_timestamp %d,"
++ "reg_timestamp %d, thread_trace_buff_full %d,"
++ "wlt %d, thread_trace %d.\n",
++ se_id,
++ (entry->src_data[0] >> 7) & 0x1,
++ (entry->src_data[0] >> 6) & 0x1,
++ (entry->src_data[0] >> 5) & 0x1,
++ (entry->src_data[0] >> 4) & 0x1,
++ (entry->src_data[0] >> 3) & 0x1,
++ (entry->src_data[0] >> 2) & 0x1,
++ (entry->src_data[0] >> 1) & 0x1,
++ entry->src_data[0] & 0x1
++ );
++ break;
++ case 1:
++ case 2:
++
++ if (enc == 1)
++ sprintf(type, "instruction intr");
++ else
++ sprintf(type, "EDC/ECC error");
++
++ DRM_INFO(
++ "SQ %s detected: "
++ "se_id %d, cu_id %d, simd_id %d, wave_id %d, vm_id %d\n",
++ type, se_id,
++ (entry->src_data[0] >> 20) & 0xf,
++ (entry->src_data[0] >> 18) & 0x3,
++ (entry->src_data[0] >> 14) & 0xf,
++ (entry->src_data[0] >> 10) & 0xf
++ );
++ break;
++ default:
++ DRM_ERROR("SQ invalid encoding type\n.");
++ return -EINVAL;
++ }
++
+ return 0;
+ }
+
+@@ -7131,6 +7230,11 @@ static const struct amdgpu_irq_src_funcs gfx_v8_0_cp_ecc_error_irq_funcs = {
+ .process = gfx_v8_0_cp_ecc_error_irq,
+ };
+
++static const struct amdgpu_irq_src_funcs gfx_v8_0_sq_irq_funcs = {
++ .set = gfx_v8_0_set_sq_int_state,
++ .process = gfx_v8_0_sq_irq,
++};
++
+ static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
+ {
+ adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
+@@ -7147,6 +7251,9 @@ static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
+
+ adev->gfx.cp_ecc_error_irq.num_types = 1;
+ adev->gfx.cp_ecc_error_irq.funcs = &gfx_v8_0_cp_ecc_error_irq_funcs;
++
++ adev->gfx.sq_irq.num_types = 1;
++ adev->gfx.sq_irq.funcs = &gfx_v8_0_sq_irq_funcs;
+ }
+
+ static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4638-drm-amdgpu-remove-unused-parameter-for-va-update.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4638-drm-amdgpu-remove-unused-parameter-for-va-update.patch
new file mode 100644
index 00000000..56a7b6c9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4638-drm-amdgpu-remove-unused-parameter-for-va-update.patch
@@ -0,0 +1,49 @@
+From 00540351fedfbe72ee403f10f25b495828c90298 Mon Sep 17 00:00:00 2001
+From: Junwei Zhang <Jerry.Zhang@amd.com>
+Date: Tue, 12 Jun 2018 13:57:45 +0800
+Subject: [PATCH 4638/5725] drm/amdgpu: remove unused parameter for va update
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Don't need validation list any more
+
+Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: David Zhou <david1.zhou@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+index 0e022b2..d89d43e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+@@ -661,7 +661,6 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
+ * @adev: amdgpu_device pointer
+ * @vm: vm to update
+ * @bo_va: bo_va to update
+- * @list: validation list
+ * @operation: map, unmap or clear
+ *
+ * Update the bo_va directly after setting its address. Errors are not
+@@ -670,7 +669,6 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
+ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo_va *bo_va,
+- struct list_head *list,
+ uint32_t operation)
+ {
+ int r;
+@@ -824,7 +822,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
+ break;
+ }
+ if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
+- amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, &list,
++ amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
+ args->operation);
+
+ error_backoff:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4639-drm-amd-pp-initialize-result-to-before-or-ing-in-dat.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4639-drm-amd-pp-initialize-result-to-before-or-ing-in-dat.patch
new file mode 100644
index 00000000..7c1f5162
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4639-drm-amd-pp-initialize-result-to-before-or-ing-in-dat.patch
@@ -0,0 +1,40 @@
+From 4d28f47d2370f7d0d3a4eb83383d60e43b472342 Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.king@canonical.com>
+Date: Wed, 6 Jun 2018 13:18:31 +0100
+Subject: [PATCH 4639/5725] drm/amd/pp: initialize result to before or'ing in
+ data
+
+The current use of result is or'ing in values and checking for
+a non-zero result, however, result is not initialized to zero
+so it potentially contains garbage to start with. Fix this by
+initializing it to the first return from the call to
+vega10_program_didt_config_registers.
+
+Detected by cppcheck:
+"(error) Uninitialized variable: result"
+
+Fixes: 9b7b8154cdb8 ("drm/amd/powerplay: added didt support for vega10")
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Acked-by: Huang Rui <ray.huang@amd.com>
+[Fix the subject as Colin's comment]
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+index a9efd855..dbe4b1f 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+@@ -1104,7 +1104,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
+ for (count = 0; count < num_se; count++) {
+ data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
+- result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
++ result = vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlResetConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4640-drm-amd-display-Fix-stale-buffer-object-bo-use.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4640-drm-amd-display-Fix-stale-buffer-object-bo-use.patch
new file mode 100644
index 00000000..55f562f6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4640-drm-amd-display-Fix-stale-buffer-object-bo-use.patch
@@ -0,0 +1,48 @@
+From 2756029b9a7e31523a42cb96084da882664346b8 Mon Sep 17 00:00:00 2001
+From: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+Date: Thu, 7 Jun 2018 11:48:40 +0530
+Subject: [PATCH 4640/5725] drm/amd/display: Fix stale buffer object (bo) use
+
+Fixes stale buffer object (bo) usage for cursor plane
+
+Cursor plane's bo operations are handled in DC code.
+Currently, atomic_commit() does not handle bo operations
+for cursor plane, as a result the bo assigned for cursor
+plane in dm_plane_helper_prepare_fb() is not coherent
+with the updates to the same made in dc code.This mismatch
+leads to "bo" corruption and hence crashes during S3 entry.
+
+This patch cleans up the code which was added as a hack
+for 4.9 version only.
+
+Reviewed-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Signed-off-by: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 11 -----------
+ 1 file changed, 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index c37871a..14d0cce 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3253,17 +3253,6 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
+ }
+ }
+
+- /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer
+- * prepare and cleanup in drm_atomic_helper_prepare_planes
+- * and drm_atomic_helper_cleanup_planes because fb doens't in s3.
+- * IN 4.10 kernel this code should be removed and amdgpu_device_suspend
+- * code touching fram buffers should be avoided for DC.
+- */
+- if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_state->crtc);
+-
+- acrtc->cursor_bo = obj;
+- }
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4641-drm-amd-pp-Fix-OD-feature-enable-failed-on-Vega10-wo.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4641-drm-amd-pp-Fix-OD-feature-enable-failed-on-Vega10-wo.patch
new file mode 100644
index 00000000..aa6d7eb0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4641-drm-amd-pp-Fix-OD-feature-enable-failed-on-Vega10-wo.patch
@@ -0,0 +1,53 @@
+From 1572dafa69d7c575e814487ce37498d857eace05 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Tue, 12 Jun 2018 14:26:00 +0800
+Subject: [PATCH 4641/5725] drm/amd/pp: Fix OD feature enable failed on Vega10
+ workstation cards
+
+As hw required, soc clock must large than mclk, So we set max soc
+clock to OD Max Memory clk.
+But on workstation, vbios do not support OD feature, the OD max memory
+clock is equal to 0. In this case, driver can support underclocking.
+and set od max memory clock to the value in highest memory dpm level.
+So the od max memory clock should be less than highest soc clock.
+and driver should not change the soc clock.
+
+caused by commit ca57b9b0a156
+("drm/amd/pp: Allow underclocking when od table is empty in vbios")
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+index 896c10b..f49e0d3 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+@@ -321,8 +321,12 @@ static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
+ odn_table->min_vddc = dep_table[0]->entries[0].vddc;
+
+ i = od_table[2]->count - 1;
+- od_table[2]->entries[i].clk = hwmgr->platform_descriptor.overdriveLimit.memoryClock;
+- od_table[2]->entries[i].vddc = odn_table->max_vddc;
++ od_table[2]->entries[i].clk = hwmgr->platform_descriptor.overdriveLimit.memoryClock > od_table[2]->entries[i].clk ?
++ hwmgr->platform_descriptor.overdriveLimit.memoryClock :
++ od_table[2]->entries[i].clk;
++ od_table[2]->entries[i].vddc = odn_table->max_vddc > od_table[2]->entries[i].vddc ?
++ odn_table->max_vddc :
++ od_table[2]->entries[i].vddc;
+
+ return 0;
+ }
+@@ -1325,6 +1329,7 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+ if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
+ hwmgr->platform_descriptor.overdriveLimit.memoryClock =
+ dpm_table->dpm_levels[dpm_table->count-1].value;
++
+ vega10_init_dpm_state(&(dpm_table->dpm_state));
+
+ data->dpm_table.eclk_table.count = 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4642-drm-amdgpu-Update-function-level-documentation-for-G.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4642-drm-amdgpu-Update-function-level-documentation-for-G.patch
new file mode 100644
index 00000000..8a206f9c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4642-drm-amdgpu-Update-function-level-documentation-for-G.patch
@@ -0,0 +1,633 @@
+From 83b86c7573978a66325325b3736b084bed2256a8 Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Mon, 11 Jun 2018 11:11:24 -0400
+Subject: [PATCH 4642/5725] drm/amdgpu: Update function level documentation for
+ GPUVM v3
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add/update function level documentation and add reference to amdgpu_vm.c
+in amdgpu.rst
+
+v2:
+Fix reference in rst file.
+Fix compilation warnings.
+Add space between function names and params list where
+it's missing.
+
+v3:
+Fix some funtion comments.
+Add formatted documentation to structs.
+
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 253 ++++++++++++++++++++++++++++-----
+ 1 file changed, 215 insertions(+), 38 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 651acca..3c7b1de 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -34,8 +34,9 @@
+ #include "amdgpu_trace.h"
+ #include "amdgpu_amdkfd.h"
+
+-/*
+- * GPUVM
++/**
++ * DOC: GPUVM
++ *
+ * GPUVM is similar to the legacy gart on older asics, however
+ * rather than there being a single global gart table
+ * for the entire GPU, there are multiple VM page tables active
+@@ -63,37 +64,84 @@ INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
+ #undef START
+ #undef LAST
+
+-/* Local structure. Encapsulate some VM table update parameters to reduce
++/**
++ * struct amdgpu_pte_update_params - Local structure
++ *
++ * Encapsulate some VM table update parameters to reduce
+ * the number of function parameters
++ *
+ */
+ struct amdgpu_pte_update_params {
+- /* amdgpu device we do this update for */
++
++ /**
++ * @adev: amdgpu device we do this update for
++ */
+ struct amdgpu_device *adev;
+- /* optional amdgpu_vm we do this update for */
++
++ /**
++ * @vm: optional amdgpu_vm we do this update for
++ */
+ struct amdgpu_vm *vm;
+- /* address where to copy page table entries from */
++
++ /**
++ * @src: address where to copy page table entries from
++ */
+ uint64_t src;
+- /* indirect buffer to fill with commands */
++
++ /**
++ * @ib: indirect buffer to fill with commands
++ */
+ struct amdgpu_ib *ib;
+- /* Function which actually does the update */
++
++ /**
++ * @func: Function which actually does the update
++ */
+ void (*func)(struct amdgpu_pte_update_params *params,
+ struct amdgpu_bo *bo, uint64_t pe,
+ uint64_t addr, unsigned count, uint32_t incr,
+ uint64_t flags);
+- /* The next two are used during VM update by CPU
+- * DMA addresses to use for mapping
+- * Kernel pointer of PD/PT BO that needs to be updated
++ /**
++ * @pages_addr:
++ *
++ * DMA addresses to use for mapping, used during VM update by CPU
+ */
+ dma_addr_t *pages_addr;
++
++ /**
++ * @kptr:
++ *
++ * Kernel pointer of PD/PT BO that needs to be updated,
++ * used during VM update by CPU
++ */
+ void *kptr;
+ };
+
+-/* Helper to disable partial resident texture feature from a fence callback */
++/**
++ * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
++ */
+ struct amdgpu_prt_cb {
++
++ /**
++ * @adev: amdgpu device
++ */
+ struct amdgpu_device *adev;
++
++ /**
++ * @cb: callback
++ */
+ struct dma_fence_cb cb;
+ };
+
++/**
++ * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
++ *
++ * @base: base structure for tracking BO usage in a VM
++ * @vm: vm to which bo is to be added
++ * @bo: amdgpu buffer object
++ *
++ * Initialize a bo_va_base structure and add it to the appropriate lists
++ *
++ */
+ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo)
+@@ -126,8 +174,10 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
+ * amdgpu_vm_level_shift - return the addr shift for each level
+ *
+ * @adev: amdgpu_device pointer
++ * @level: VMPT level
+ *
+- * Returns the number of bits the pfn needs to be right shifted for a level.
++ * Returns:
++ * The number of bits the pfn needs to be right shifted for a level.
+ */
+ static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
+ unsigned level)
+@@ -155,8 +205,10 @@ static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
+ * amdgpu_vm_num_entries - return the number of entries in a PD/PT
+ *
+ * @adev: amdgpu_device pointer
++ * @level: VMPT level
+ *
+- * Calculate the number of entries in a page directory or page table.
++ * Returns:
++ * The number of entries in a page directory or page table.
+ */
+ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
+ unsigned level)
+@@ -179,8 +231,10 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
+ * amdgpu_vm_bo_size - returns the size of the BOs in bytes
+ *
+ * @adev: amdgpu_device pointer
++ * @level: VMPT level
+ *
+- * Calculate the size of the BO for a page directory or page table in bytes.
++ * Returns:
++ * The size of the BO for a page directory or page table in bytes.
+ */
+ static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
+ {
+@@ -218,6 +272,9 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
+ * @param: parameter for the validation callback
+ *
+ * Validate the page table BOs on command submission if neccessary.
++ *
++ * Returns:
++ * Validation result.
+ */
+ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int (*validate)(void *p, struct amdgpu_bo *bo),
+@@ -273,6 +330,9 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ * @vm: VM to check
+ *
+ * Check if all VM PDs/PTs are ready for updates
++ *
++ * Returns:
++ * True if eviction list is empty.
+ */
+ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
+ {
+@@ -283,10 +343,14 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
+ * amdgpu_vm_clear_bo - initially clear the PDs/PTs
+ *
+ * @adev: amdgpu_device pointer
++ * @vm: VM to clear BO from
+ * @bo: BO to clear
+ * @level: level this BO is at
+ *
+ * Root PD needs to be reserved when calling this.
++ *
++ * Returns:
++ * 0 on success, errno otherwise.
+ */
+ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, struct amdgpu_bo *bo,
+@@ -382,10 +446,16 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
++ * @parent: parent PT
+ * @saddr: start of the address range
+ * @eaddr: end of the address range
++ * @level: VMPT level
++ * @ats: indicate ATS support from PTE
+ *
+ * Make sure the page directories and page tables are allocated
++ *
++ * Returns:
++ * 0 on success, errno otherwise.
+ */
+ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+@@ -495,6 +565,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
+ * @size: Size from start address we need.
+ *
+ * Make sure the page tables are allocated.
++ *
++ * Returns:
++ * 0 on success, errno otherwise.
+ */
+ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+@@ -560,6 +633,15 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
+ }
+ }
+
++/**
++ * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
++ *
++ * @ring: ring on which the job will be submitted
++ * @job: job to submit
++ *
++ * Returns:
++ * True if sync is needed.
++ */
+ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
+ struct amdgpu_job *job)
+ {
+@@ -587,6 +669,14 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
+ return vm_flush_needed || gds_switch_needed;
+ }
+
++/**
++ * amdgpu_vm_is_large_bar - Check if BAR is large enough
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Returns:
++ * True if BAR is large enough.
++ */
+ static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
+ {
+ return (adev->gmc.real_vram_size == adev->gmc.visible_vram_size);
+@@ -596,10 +686,12 @@ static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
+ * amdgpu_vm_flush - hardware flush the vm
+ *
+ * @ring: ring to use for flush
+- * @vmid: vmid number to use
+- * @pd_addr: address of the page directory
++ * @need_pipe_sync: is pipe sync needed
+ *
+ * Emit a VM flush when it is necessary.
++ *
++ * Returns:
++ * 0 on success, errno otherwise.
+ */
+ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
+ {
+@@ -707,6 +799,9 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
+ * Returns the found bo_va or NULL if none is found
+ *
+ * Object has to be reserved!
++ *
++ * Returns:
++ * Found bo_va or NULL.
+ */
+ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo)
+@@ -788,7 +883,10 @@ static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
+ * @addr: the unmapped addr
+ *
+ * Look up the physical address of the page that the pte resolves
+- * to and return the pointer for the page table entry.
++ * to.
++ *
++ * Returns:
++ * The pointer for the page table entry.
+ */
+ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
+ {
+@@ -841,6 +939,17 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
+ }
+ }
+
++
++/**
++ * amdgpu_vm_wait_pd - Wait for PT BOs to be free.
++ *
++ * @adev: amdgpu_device pointer
++ * @vm: related vm
++ * @owner: fence owner
++ *
++ * Returns:
++ * 0 on success, errno otherwise.
++ */
+ static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ void *owner)
+ {
+@@ -894,7 +1003,10 @@ static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
+ /*
+ * amdgpu_vm_invalidate_level - mark all PD levels as invalid
+ *
++ * @adev: amdgpu_device pointer
++ * @vm: related vm
+ * @parent: parent PD
++ * @level: VMPT level
+ *
+ * Mark all PD level as invalid after an error.
+ */
+@@ -929,7 +1041,9 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
+ * @vm: requested vm
+ *
+ * Makes sure all directories are up to date.
+- * Returns 0 for success, error for failure.
++ *
++ * Returns:
++ * 0 for success, error for failure.
+ */
+ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
+@@ -1116,14 +1230,15 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
+ * amdgpu_vm_update_ptes - make sure that page tables are valid
+ *
+ * @params: see amdgpu_pte_update_params definition
+- * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ * @dst: destination address to map to, the next dst inside the function
+ * @flags: mapping flags
+ *
+ * Update the page tables in the range @start - @end.
+- * Returns 0 for success, -EINVAL for failure.
++ *
++ * Returns:
++ * 0 for success, -EINVAL for failure.
+ */
+ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
+ uint64_t start, uint64_t end,
+@@ -1177,7 +1292,9 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
+ * @end: last PTE to handle
+ * @dst: addr those PTEs should point to
+ * @flags: hw mapping flags
+- * Returns 0 for success, -EINVAL for failure.
++ *
++ * Returns:
++ * 0 for success, -EINVAL for failure.
+ */
+ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
+ uint64_t start, uint64_t end,
+@@ -1249,7 +1366,9 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
+ * @fence: optional resulting fence
+ *
+ * Fill in the page table entries between @start and @last.
+- * Returns 0 for success, -EINVAL for failure.
++ *
++ * Returns:
++ * 0 for success, -EINVAL for failure.
+ */
+ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
+ struct dma_fence *exclusive,
+@@ -1404,7 +1523,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
+ *
+ * Split the mapping into smaller chunks so that each update fits
+ * into a SDMA IB.
+- * Returns 0 for success, -EINVAL for failure.
++ *
++ * Returns:
++ * 0 for success, -EINVAL for failure.
+ */
+ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
+ struct dma_fence *exclusive,
+@@ -1534,7 +1655,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
+ * @clear: if true clear the entries
+ *
+ * Fill in the page table entries for @bo_va.
+- * Returns 0 for success, -EINVAL for failure.
++ *
++ * Returns:
++ * 0 for success, -EINVAL for failure.
+ */
+ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+@@ -1639,6 +1762,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+
+ /**
+ * amdgpu_vm_update_prt_state - update the global PRT state
++ *
++ * @adev: amdgpu_device pointer
+ */
+ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
+ {
+@@ -1653,6 +1778,8 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
+
+ /**
+ * amdgpu_vm_prt_get - add a PRT user
++ *
++ * @adev: amdgpu_device pointer
+ */
+ static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
+ {
+@@ -1665,6 +1792,8 @@ static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
+
+ /**
+ * amdgpu_vm_prt_put - drop a PRT user
++ *
++ * @adev: amdgpu_device pointer
+ */
+ static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
+ {
+@@ -1674,6 +1803,8 @@ static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
+
+ /**
+ * amdgpu_vm_prt_cb - callback for updating the PRT status
++ *
++ * @fence: fence for the callback
+ */
+ static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
+ {
+@@ -1685,6 +1816,9 @@ static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
+
+ /**
+ * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
++ *
++ * @adev: amdgpu_device pointer
++ * @fence: fence for the callback
+ */
+ static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
+ struct dma_fence *fence)
+@@ -1776,9 +1910,11 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ * or if an error occurred)
+ *
+ * Make sure all freed BOs are cleared in the PT.
+- * Returns 0 for success.
+- *
+ * PTs have to be reserved and mutex must be locked!
++ *
++ * Returns:
++ * 0 for success.
++ *
+ */
+ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+@@ -1823,10 +1959,11 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+- * @sync: sync object to add fences to
+ *
+ * Make sure all BOs which are moved are updated in the PTs.
+- * Returns 0 for success.
++ *
++ * Returns:
++ * 0 for success.
+ *
+ * PTs have to be reserved!
+ */
+@@ -1881,7 +2018,9 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
+ *
+ * Add @bo into the requested vm.
+ * Add @bo to the list of bos associated with the vm
+- * Returns newly added bo_va or NULL for failure
++ *
++ * Returns:
++ * Newly added bo_va or NULL for failure
+ *
+ * Object has to be reserved!
+ */
+@@ -1947,7 +2086,9 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
+ * @flags: attributes of pages (read/write/valid/etc.)
+ *
+ * Add a mapping of the BO at the specefied addr into the VM.
+- * Returns 0 for success, error for failure.
++ *
++ * Returns:
++ * 0 for success, error for failure.
+ *
+ * Object has to be reserved and unreserved outside!
+ */
+@@ -2009,7 +2150,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+ *
+ * Add a mapping of the BO at the specefied addr into the VM. Replace existing
+ * mappings as we do so.
+- * Returns 0 for success, error for failure.
++ *
++ * Returns:
++ * 0 for success, error for failure.
+ *
+ * Object has to be reserved and unreserved outside!
+ */
+@@ -2066,7 +2209,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
+ * @saddr: where to the BO is mapped
+ *
+ * Remove a mapping of the BO at the specefied addr from the VM.
+- * Returns 0 for success, error for failure.
++ *
++ * Returns:
++ * 0 for success, error for failure.
+ *
+ * Object has to be reserved and unreserved outside!
+ */
+@@ -2120,7 +2265,9 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
+ * @size: size of the range
+ *
+ * Remove all mappings in a range, split them as appropriate.
+- * Returns 0 for success, error for failure.
++ *
++ * Returns:
++ * 0 for success, error for failure.
+ */
+ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+@@ -2219,6 +2366,10 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
+ * @vm: the requested VM
+ *
+ * Find a mapping by it's address.
++ *
++ * Returns:
++ * The amdgpu_bo_va_mapping matching for addr or NULL
++ *
+ */
+ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
+ uint64_t addr)
+@@ -2270,7 +2421,6 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
+ * amdgpu_vm_bo_invalidate - mark the bo as invalid
+ *
+ * @adev: amdgpu_device pointer
+- * @vm: requested vm
+ * @bo: amdgpu buffer object
+ *
+ * Mark @bo as invalid.
+@@ -2319,6 +2469,14 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
+ }
+ }
+
++/**
++ * amdgpu_vm_get_block_size - calculate VM page table size as power of two
++ *
++ * @vm_size: VM size
++ *
++ * Returns:
++ * VM page table as power of two
++ */
+ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
+ {
+ /* Total bits covered by PD + PTs */
+@@ -2424,6 +2582,9 @@ static void amdgpu_inc_compute_vms(struct amdgpu_device *adev)
+ * @vm_context: Indicates if it GFX or Compute context
+ *
+ * Init @vm fields.
++ *
++ * Returns:
++ * 0 for success, error for failure.
+ */
+ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int vm_context, unsigned int pasid)
+@@ -2549,6 +2710,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ /**
+ * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
+ *
++ * @adev: amdgpu_device pointer
++ * @vm: requested vm
++ *
+ * This only works on GFX VMs that don't have any BOs added and no
+ * page tables allocated yet.
+ *
+@@ -2562,7 +2726,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ * setting. May also switch to the compute power profile if this is
+ * the first compute VM.
+ *
+- * Returns 0 for success, -errno for errors.
++ * Returns:
++ * 0 for success, -errno for errors.
+ */
+ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ {
+@@ -2747,8 +2912,10 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ * @adev: amdgpu_device pointer
+ * @pasid: PASID do identify the VM
+ *
+- * This function is expected to be called in interrupt context. Returns
+- * true if there was fault credit, false otherwise
++ * This function is expected to be called in interrupt context.
++ *
++ * Returns:
++ * True if there was fault credit, false otherwise
+ */
+ bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
+ unsigned int pasid)
+@@ -2832,6 +2999,16 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
+ amdgpu_vmid_mgr_fini(adev);
+ }
+
++/**
++ * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
++ *
++ * @dev: drm device pointer
++ * @data: drm_amdgpu_vm
++ * @filp: drm file pointer
++ *
++ * Returns:
++ * 0 for success, -errno for errors.
++ */
+ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ {
+ union drm_amdgpu_vm *args = data;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4643-drm-amd-include-Update-df-3.6-mask-and-shift-definit.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4643-drm-amd-include-Update-df-3.6-mask-and-shift-definit.patch
new file mode 100644
index 00000000..b53565d5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4643-drm-amd-include-Update-df-3.6-mask-and-shift-definit.patch
@@ -0,0 +1,40 @@
+From 4b7fdbcbd36dd6e3ec05da2f17186e674cc816ec Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Tue, 12 Jun 2018 13:35:44 -0400
+Subject: [PATCH 4643/5725] drm/amd/include: Update df 3.6 mask and shift
+ definition
+
+The register field hsas been changed in df 3.6, update to correct setting
+
+Change-Id: Id625d7698b610c07081f421537964686f8f0b67c
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
+index 88f7c69..06fac50 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
+@@ -36,13 +36,13 @@
+ /* DF_CS_AON0_DramBaseAddress0 */
+ #define DF_CS_UMC_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
+ #define DF_CS_UMC_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
+-#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
+-#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
++#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x2
++#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x9
+ #define DF_CS_UMC_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
+ #define DF_CS_UMC_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
+ #define DF_CS_UMC_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
+-#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
+-#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
++#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x0000003CL
++#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000E00L
+ #define DF_CS_UMC_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
+
+ #endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4644-drm-amdgpu-fix-parsing-indirect-register-list-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4644-drm-amdgpu-fix-parsing-indirect-register-list-v2.patch
new file mode 100644
index 00000000..b8f5e927
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4644-drm-amdgpu-fix-parsing-indirect-register-list-v2.patch
@@ -0,0 +1,71 @@
+From 3d6c9f00ceb322ba49087adef8fd892fb5704951 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Tue, 29 May 2018 16:31:05 +0800
+Subject: [PATCH 4644/5725] drm/amdgpu: fix parsing indirect register list v2
+
+WARN_ON possible buffer overflow and avoid unnecessary dereference.
+
+v2: change BUG_ON to WARN_ON
+
+Change-Id: I6666d7dcf60acf524f290460d2ffe3f1f5f46354
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index b093777..4527150 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1843,13 +1843,15 @@ static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
+ int indirect_offset,
+ int list_size,
+ int *unique_indirect_regs,
+- int *unique_indirect_reg_count,
++ int unique_indirect_reg_count,
+ int *indirect_start_offsets,
+- int *indirect_start_offsets_count)
++ int *indirect_start_offsets_count,
++ int max_start_offsets_count)
+ {
+ int idx;
+
+ for (; indirect_offset < list_size; indirect_offset++) {
++ WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
+ indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
+ *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
+
+@@ -1857,14 +1859,14 @@ static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
+ indirect_offset += 2;
+
+ /* look for the matching indice */
+- for (idx = 0; idx < *unique_indirect_reg_count; idx++) {
++ for (idx = 0; idx < unique_indirect_reg_count; idx++) {
+ if (unique_indirect_regs[idx] ==
+ register_list_format[indirect_offset] ||
+ !unique_indirect_regs[idx])
+ break;
+ }
+
+- BUG_ON(idx >= *unique_indirect_reg_count);
++ BUG_ON(idx >= unique_indirect_reg_count);
+
+ if (!unique_indirect_regs[idx])
+ unique_indirect_regs[idx] = register_list_format[indirect_offset];
+@@ -1899,9 +1901,10 @@ static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
+ adev->gfx.rlc.reg_list_format_direct_reg_list_length,
+ adev->gfx.rlc.reg_list_format_size_bytes >> 2,
+ unique_indirect_regs,
+- &unique_indirect_reg_count,
++ unique_indirect_reg_count,
+ indirect_start_offsets,
+- &indirect_start_offsets_count);
++ &indirect_start_offsets_count,
++ ARRAY_SIZE(indirect_start_offsets));
+
+ /* enable auto inc in case it is disabled */
+ tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4645-drm-amd-powerplay-remove-uncessary-extra-gfxoff-cont.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4645-drm-amd-powerplay-remove-uncessary-extra-gfxoff-cont.patch
new file mode 100644
index 00000000..07eb3e6b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4645-drm-amd-powerplay-remove-uncessary-extra-gfxoff-cont.patch
@@ -0,0 +1,47 @@
+From 1b08b175bea11a7edb017f5ae5bda648d7520146 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Tue, 12 Jun 2018 17:01:23 +0800
+Subject: [PATCH 4645/5725] drm/amd/powerplay: remove uncessary extra gfxoff
+ control call
+
+Gfxoff is already enabled in amdgpu_device_ip_set_powergating_state.
+So, no need to enable it again in pp_late_init.
+
+Change-Id: Id33d2dac192645fc9dcdfaf5825420093a87f814
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 9 ---------
+ 1 file changed, 9 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index f60de8e..76fc45f 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -180,7 +180,6 @@ static int pp_late_init(void *handle)
+ {
+ struct amdgpu_device *adev = handle;
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+- int ret;
+
+ if (hwmgr && hwmgr->pm_en) {
+ mutex_lock(&hwmgr->smu_lock);
+@@ -192,14 +191,6 @@ static int pp_late_init(void *handle)
+ if (adev->pm.smu_prv_buffer_size != 0)
+ pp_reserve_vram_for_smu(adev);
+
+- if (hwmgr && hwmgr->hwmgr_func &&
+- hwmgr->hwmgr_func->gfx_off_control &&
+- (hwmgr->feature_mask & PP_GFXOFF_MASK)) {
+- ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr, true);
+- if (ret)
+- pr_err("gfx off enabling failed!\n");
+- }
+-
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4646-drm-amd-powerplay-Set-higher-SCLK-MCLK-frequency-tha.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4646-drm-amd-powerplay-Set-higher-SCLK-MCLK-frequency-tha.patch
new file mode 100644
index 00000000..bf3af7d7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4646-drm-amd-powerplay-Set-higher-SCLK-MCLK-frequency-tha.patch
@@ -0,0 +1,35 @@
+From f591ba9a8b68b500bafec5f0c3c380cedf683cf2 Mon Sep 17 00:00:00 2001
+From: Kenneth Feng <kenneth.feng@amd.com>
+Date: Tue, 12 Jun 2018 15:07:37 +0800
+Subject: [PATCH 4646/5725] drm/amd/powerplay: Set higher SCLK&MCLK frequency
+ than dpm7 in OD
+
+Fix the issue that SCLK&MCLK can't be set higher than dpm7 when
+OD is enabled in SMU7.
+
+Change-Id: If8249795739e29a063154cffce693b3c77cca151
+Signed-off-by: Kenneth Feng <kenneth.feng@amd.com>
+Acked-by: Rex Zhu<rezhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index 46d6368..0f307a7 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -3761,7 +3761,10 @@ static int smu7_generate_dpm_level_enable_mask(
+ const struct smu7_power_state *smu7_ps =
+ cast_const_phw_smu7_power_state(states->pnew_state);
+
+- result = smu7_trim_dpm_states(hwmgr, smu7_ps);
++ /*skip the trim if od is enabled*/
++ if (!hwmgr->od_enabled)
++ result = smu7_trim_dpm_states(hwmgr, smu7_ps);
++
+ if (result)
+ return result;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4647-drm-amdgpu-Add-BRACKET_LAYOUT_ENUMs-to-ObjectID.h.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4647-drm-amdgpu-Add-BRACKET_LAYOUT_ENUMs-to-ObjectID.h.patch
new file mode 100644
index 00000000..cf5b3cb3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4647-drm-amdgpu-Add-BRACKET_LAYOUT_ENUMs-to-ObjectID.h.patch
@@ -0,0 +1,42 @@
+From 5194a8715e8fc7aca80e482c3f9c76a06f411bf4 Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Thu, 7 Jun 2018 15:45:08 -0400
+Subject: [PATCH 4647/5725] drm/amdgpu: Add BRACKET_LAYOUT_ENUMs to ObjectID.h
+
+DC has an upcoming change that requires these to read the board layout.
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/ObjectID.h | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
+index 0619269..5b39362 100644
+--- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h
++++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
+@@ -136,6 +136,7 @@
+ #define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE 0x02
+ #define GENERIC_OBJECT_ID_MXM_OPM 0x03
+ #define GENERIC_OBJECT_ID_STEREO_PIN 0x04 //This object could show up from Misc Object table, it follows ATOM_OBJECT format, and contains one ATOM_OBJECT_GPIO_CNTL_RECORD for the stereo pin
++#define GENERIC_OBJECT_ID_BRACKET_LAYOUT 0x05
+
+ /****************************************************/
+ /* Graphics Object ENUM ID Definition */
+@@ -714,6 +715,13 @@
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_STEREO_PIN << OBJECT_ID_SHIFT)
+
++#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++ GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
++
++#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
++ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++ GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
+ /****************************************************/
+ /* Object Cap definition - Shared with BIOS */
+ /****************************************************/
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4648-drm-amdgpu-update-documentation-for-amdgpu_irq.c-v3.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4648-drm-amdgpu-update-documentation-for-amdgpu_irq.c-v3.patch
new file mode 100644
index 00000000..769acc98
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4648-drm-amdgpu-update-documentation-for-amdgpu_irq.c-v3.patch
@@ -0,0 +1,465 @@
+From 6345beebc018c673e95f5048eda467076d290899 Mon Sep 17 00:00:00 2001
+From: Slava Abramov <slava.abramov@amd.com>
+Date: Thu, 7 Jun 2018 17:27:07 -0400
+Subject: [PATCH 4648/5725] drm/amdgpu: update documentation for amdgpu_irq.c
+ v3
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add/update function level documentation and add reference to amdgpu_irq.c
+in amdgpu.rst
+
+v2:
+Added DOC comment
+Added more explanations for amdgpu_hotplug_work_func
+Properly formatted unused parameters
+Properly formatted return values
+Fixed usage of acronyms
+More consistent styling
+
+v3:
+Removed duplicate "not"
+Using '&' to refer to functions and types
+
+Change-Id: I59adf54cf036a2b5b89c1fb8efd5d98dd849a5f6
+Signed-off-by: Slava Abramov <slava.abramov@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 201 ++++++++++++++++++++++----------
+ 1 file changed, 139 insertions(+), 62 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+index 43a4d6a..da8eda8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+@@ -25,6 +25,23 @@
+ * Alex Deucher
+ * Jerome Glisse
+ */
++
++/**
++ * DOC: Interrupt Handling
++ *
++ * Interrupts generated within GPU hardware raise interrupt requests that are
++ * passed to amdgpu IRQ handler which is responsible for detecting source and
++ * type of the interrupt and dispatching matching handlers. If handling an
++ * interrupt requires calling kernel functions that may sleep processing is
++ * dispatched to work handlers.
++ *
++ * If MSI functionality is not disabled by module parameter then MSI
++ * support will be enabled.
++ *
++ * For GPU interrupt sources that may be driven by another driver, IRQ domain
++ * support is used (with mapping between virtual and hardware IRQs).
++ */
++
+ #include <linux/irq.h>
+ #include <drm/drmP.h>
+ #include <drm/drm_crtc_helper.h>
+@@ -43,19 +60,21 @@
+
+ #define AMDGPU_WAIT_IDLE_TIMEOUT 200
+
+-/*
+- * Handle hotplug events outside the interrupt handler proper.
+- */
+ /**
+- * amdgpu_hotplug_work_func - display hotplug work handler
++ * amdgpu_hotplug_work_func - work handler for display hotplug event
+ *
+- * @work: work struct
++ * @work: work struct pointer
+ *
+- * This is the hot plug event work handler (all asics).
+- * The work gets scheduled from the irq handler if there
+- * was a hot plug interrupt. It walks the connector table
+- * and calls the hotplug handler for each one, then sends
+- * a drm hotplug event to alert userspace.
++ * This is the hotplug event work handler (all ASICs).
++ * The work gets scheduled from the IRQ handler if there
++ * was a hotplug interrupt. It walks through the connector table
++ * and calls hotplug handler for each connector. After this, it sends
++ * a DRM hotplug event to alert userspace.
++ *
++ * This design approach is required in order to defer hotplug event handling
++ * from the IRQ handler to a work handler because hotplug handler has to use
++ * mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
++ * sleep).
+ */
+ static void amdgpu_hotplug_work_func(struct work_struct *work)
+ {
+@@ -74,13 +93,12 @@ static void amdgpu_hotplug_work_func(struct work_struct *work)
+ }
+
+ /**
+- * amdgpu_irq_reset_work_func - execute gpu reset
++ * amdgpu_irq_reset_work_func - execute GPU reset
+ *
+- * @work: work struct
++ * @work: work struct pointer
+ *
+- * Execute scheduled gpu reset (cayman+).
+- * This function is called when the irq handler
+- * thinks we need a gpu reset.
++ * Execute scheduled GPU reset (Cayman+).
++ * This function is called when the IRQ handler thinks we need a GPU reset.
+ */
+ static void amdgpu_irq_reset_work_func(struct work_struct *work)
+ {
+@@ -91,7 +109,13 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
+ amdgpu_device_gpu_recover(adev, NULL, false);
+ }
+
+-/* Disable *all* interrupts */
++/**
++ * amdgpu_irq_disable_all - disable *all* interrupts
++ *
++ * @adev: amdgpu device pointer
++ *
++ * Disable all types of interrupts from all sources.
++ */
+ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
+ {
+ unsigned long irqflags;
+@@ -123,11 +147,15 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
+ }
+
+ /**
+- * amdgpu_irq_handler - irq handler
++ * amdgpu_irq_handler - IRQ handler
++ *
++ * @irq: IRQ number (unused)
++ * @arg: pointer to DRM device
+ *
+- * @int irq, void *arg: args
++ * IRQ handler for amdgpu driver (all ASICs).
+ *
+- * This is the irq handler for the amdgpu driver (all asics).
++ * Returns:
++ * result of handling the IRQ, as defined by &irqreturn_t
+ */
+ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
+ {
+@@ -142,18 +170,18 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
+ }
+
+ /**
+- * amdgpu_msi_ok - asic specific msi checks
++ * amdgpu_msi_ok - check whether MSI functionality is enabled
+ *
+- * @adev: amdgpu device pointer
++ * @adev: amdgpu device pointer (unused)
++ *
++ * Checks whether MSI functionality has been disabled via module parameter
++ * (all ASICs).
+ *
+- * Handles asic specific MSI checks to determine if
+- * MSIs should be enabled on a particular chip (all asics).
+- * Returns true if MSIs should be enabled, false if MSIs
+- * should not be enabled.
++ * Returns:
++ * *true* if MSIs are allowed to be enabled or *false* otherwise
+ */
+ static bool amdgpu_msi_ok(struct amdgpu_device *adev)
+ {
+- /* force MSI on */
+ if (amdgpu_msi == 1)
+ return true;
+ else if (amdgpu_msi == 0)
+@@ -163,12 +191,15 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev)
+ }
+
+ /**
+- * amdgpu_irq_init - init driver interrupt info
++ * amdgpu_irq_init - initialize interrupt handling
+ *
+ * @adev: amdgpu device pointer
+ *
+- * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics).
+- * Returns 0 for success, error for failure.
++ * Sets up work functions for hotplug and reset interrupts, enables MSI
++ * functionality, initializes vblank, hotplug and reset interrupt handling.
++ *
++ * Returns:
++ * 0 on success or error code on failure
+ */
+ int amdgpu_irq_init(struct amdgpu_device *adev)
+ {
+@@ -176,7 +207,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
+
+ spin_lock_init(&adev->irq.lock);
+
+- /* enable msi */
++ /* Enable MSI if not disabled by module parameter */
+ adev->irq.msi_enabled = false;
+
+ if (amdgpu_msi_ok(adev)) {
+@@ -189,7 +220,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
+
+ if (!amdgpu_device_has_dc_support(adev)) {
+ if (!adev->enable_virtual_display)
+- /* Disable vblank irqs aggressively for power-saving */
++ /* Disable vblank IRQs aggressively for power-saving */
+ /* XXX: can this be enabled for DC? */
+ adev->ddev->vblank_disable_immediate = true;
+
+@@ -197,7 +228,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
+ if (r)
+ return r;
+
+- /* pre DCE11 */
++ /* Pre-DCE11 */
+ INIT_WORK(&adev->hotplug_work,
+ amdgpu_hotplug_work_func);
+ }
+@@ -220,11 +251,13 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
+ }
+
+ /**
+- * amdgpu_irq_fini - tear down driver interrupt info
++ * amdgpu_irq_fini - shut down interrupt handling
+ *
+ * @adev: amdgpu device pointer
+ *
+- * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics).
++ * Tears down work functions for hotplug and reset interrupts, disables MSI
++ * functionality, shuts down vblank, hotplug and reset interrupt handling,
++ * turns off interrupts from all sources (all ASICs).
+ */
+ void amdgpu_irq_fini(struct amdgpu_device *adev)
+ {
+@@ -264,12 +297,17 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
+ }
+
+ /**
+- * amdgpu_irq_add_id - register irq source
++ * amdgpu_irq_add_id - register IRQ source
+ *
+ * @adev: amdgpu device pointer
+- * @src_id: source id for this source
+- * @source: irq source
++ * @client_id: client id
++ * @src_id: source id
++ * @source: IRQ source pointer
++ *
++ * Registers IRQ source on a client.
+ *
++ * Returns:
++ * 0 on success or error code otherwise
+ */
+ int amdgpu_irq_add_id(struct amdgpu_device *adev,
+ unsigned client_id, unsigned src_id,
+@@ -312,12 +350,12 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
+ }
+
+ /**
+- * amdgpu_irq_dispatch - dispatch irq to IP blocks
++ * amdgpu_irq_dispatch - dispatch IRQ to IP blocks
+ *
+ * @adev: amdgpu device pointer
+- * @entry: interrupt vector
++ * @entry: interrupt vector pointer
+ *
+- * Dispatches the irq to the different IP blocks
++ * Dispatches IRQ to IP blocks.
+ */
+ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+@@ -361,13 +399,13 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
+ }
+
+ /**
+- * amdgpu_irq_update - update hw interrupt state
++ * amdgpu_irq_update - update hardware interrupt state
+ *
+ * @adev: amdgpu device pointer
+- * @src: interrupt src you want to enable
+- * @type: type of interrupt you want to update
++ * @src: interrupt source pointer
++ * @type: type of interrupt
+ *
+- * Updates the interrupt state for a specific src (all asics).
++ * Updates interrupt state for the specific source (all ASICs).
+ */
+ int amdgpu_irq_update(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src, unsigned type)
+@@ -378,7 +416,7 @@ int amdgpu_irq_update(struct amdgpu_device *adev,
+
+ spin_lock_irqsave(&adev->irq.lock, irqflags);
+
+- /* we need to determine after taking the lock, otherwise
++ /* We need to determine after taking the lock, otherwise
+ we might disable just enabled interrupts again */
+ if (amdgpu_irq_enabled(adev, src, type))
+ state = AMDGPU_IRQ_STATE_ENABLE;
+@@ -390,6 +428,14 @@ int amdgpu_irq_update(struct amdgpu_device *adev,
+ return r;
+ }
+
++/**
++ * amdgpu_irq_gpu_reset_resume_helper - update interrupt states on all sources
++ *
++ * @adev: amdgpu device pointer
++ *
++ * Updates state of all types of interrupts on all sources on resume after
++ * reset.
++ */
+ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
+ {
+ int i, j, k;
+@@ -413,10 +459,13 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
+ * amdgpu_irq_get - enable interrupt
+ *
+ * @adev: amdgpu device pointer
+- * @src: interrupt src you want to enable
+- * @type: type of interrupt you want to enable
++ * @src: interrupt source pointer
++ * @type: type of interrupt
+ *
+- * Enables the interrupt type for a specific src (all asics).
++ * Enables specified type of interrupt on the specified source (all ASICs).
++ *
++ * Returns:
++ * 0 on success or error code otherwise
+ */
+ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
+ unsigned type)
+@@ -440,10 +489,13 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
+ * amdgpu_irq_put - disable interrupt
+ *
+ * @adev: amdgpu device pointer
+- * @src: interrupt src you want to disable
+- * @type: type of interrupt you want to disable
++ * @src: interrupt source pointer
++ * @type: type of interrupt
++ *
++ * Enables specified type of interrupt on the specified source (all ASICs).
+ *
+- * Disables the interrupt type for a specific src (all asics).
++ * Returns:
++ * 0 on success or error code otherwise
+ */
+ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
+ unsigned type)
+@@ -464,12 +516,17 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
+ }
+
+ /**
+- * amdgpu_irq_enabled - test if irq is enabled or not
++ * amdgpu_irq_enabled - check whether interrupt is enabled or not
+ *
+ * @adev: amdgpu device pointer
+- * @idx: interrupt src you want to test
++ * @src: interrupt source pointer
++ * @type: type of interrupt
+ *
+- * Tests if the given interrupt source is enabled or not
++ * Checks whether the given type of interrupt is enabled on the given source.
++ *
++ * Returns:
++ * *true* if interrupt is enabled, *false* if interrupt is disabled or on
++ * invalid parameters
+ */
+ bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
+ unsigned type)
+@@ -487,6 +544,7 @@ bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
+ }
+
+ /* gen irq */
++/* XXX: Generic IRQ handling */
+ static void amdgpu_irq_mask(struct irq_data *irqd)
+ {
+ /* XXX */
+@@ -497,12 +555,26 @@ static void amdgpu_irq_unmask(struct irq_data *irqd)
+ /* XXX */
+ }
+
++/* amdgpu hardware interrupt chip descriptor */
+ static struct irq_chip amdgpu_irq_chip = {
+ .name = "amdgpu-ih",
+ .irq_mask = amdgpu_irq_mask,
+ .irq_unmask = amdgpu_irq_unmask,
+ };
+
++/**
++ * amdgpu_irqdomain_map - create mapping between virtual and hardware IRQ numbers
++ *
++ * @d: amdgpu IRQ domain pointer (unused)
++ * @irq: virtual IRQ number
++ * @hwirq: hardware irq number
++ *
++ * Current implementation assigns simple interrupt handler to the given virtual
++ * IRQ.
++ *
++ * Returns:
++ * 0 on success or error code otherwise
++ */
+ static int amdgpu_irqdomain_map(struct irq_domain *d,
+ unsigned int irq, irq_hw_number_t hwirq)
+ {
+@@ -514,17 +586,21 @@ static int amdgpu_irqdomain_map(struct irq_domain *d,
+ return 0;
+ }
+
++/* Implementation of methods for amdgpu IRQ domain */
+ static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
+ .map = amdgpu_irqdomain_map,
+ };
+
+ /**
+- * amdgpu_irq_add_domain - create a linear irq domain
++ * amdgpu_irq_add_domain - create a linear IRQ domain
+ *
+ * @adev: amdgpu device pointer
+ *
+- * Create an irq domain for GPU interrupt sources
++ * Creates an IRQ domain for GPU interrupt sources
+ * that may be driven by another driver (e.g., ACP).
++ *
++ * Returns:
++ * 0 on success or error code otherwise
+ */
+ int amdgpu_irq_add_domain(struct amdgpu_device *adev)
+ {
+@@ -539,11 +615,11 @@ int amdgpu_irq_add_domain(struct amdgpu_device *adev)
+ }
+
+ /**
+- * amdgpu_irq_remove_domain - remove the irq domain
++ * amdgpu_irq_remove_domain - remove the IRQ domain
+ *
+ * @adev: amdgpu device pointer
+ *
+- * Remove the irq domain for GPU interrupt sources
++ * Removes the IRQ domain for GPU interrupt sources
+ * that may be driven by another driver (e.g., ACP).
+ */
+ void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
+@@ -555,16 +631,17 @@ void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
+ }
+
+ /**
+- * amdgpu_irq_create_mapping - create a mapping between a domain irq and a
+- * Linux irq
++ * amdgpu_irq_create_mapping - create mapping between domain Linux IRQs
+ *
+ * @adev: amdgpu device pointer
+ * @src_id: IH source id
+ *
+- * Create a mapping between a domain irq (GPU IH src id) and a Linux irq
++ * Creates mapping between a domain IRQ (GPU IH src id) and a Linux IRQ
+ * Use this for components that generate a GPU interrupt, but are driven
+ * by a different driver (e.g., ACP).
+- * Returns the Linux irq.
++ *
++ * Returns:
++ * Linux IRQ
+ */
+ unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
+ {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4649-drm-amdgpu-fix-typo-in-amdgpu_mn.c-comments.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4649-drm-amdgpu-fix-typo-in-amdgpu_mn.c-comments.patch
new file mode 100644
index 00000000..aba73fd3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4649-drm-amdgpu-fix-typo-in-amdgpu_mn.c-comments.patch
@@ -0,0 +1,32 @@
+From 35632a900ec470b9a1bc957f1876d54a3e73912a Mon Sep 17 00:00:00 2001
+From: Slava Abramov <slava.abramov@amd.com>
+Date: Wed, 13 Jun 2018 10:50:31 -0400
+Subject: [PATCH 4649/5725] drm/amdgpu: fix typo in amdgpu_mn.c comments
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+In doc comments for struct amdgpu_mn: destrution -> destruction
+
+Signed-off-by: Slava Abramov <slava.abramov@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+index 7aaef93..2f3fb4f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+@@ -59,7 +59,7 @@
+ * @adev: amdgpu device pointer
+ * @mm: process address space
+ * @mn: MMU notifier structur
+- * @work: destrution work item
++ * @work: destruction work item
+ * @node: hash table node to find structure by adev and mn
+ * @lock: rw semaphore protecting the notifier nodes
+ * @objects: interval tree containing amdgpu_mn_nodes
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4650-drm-amdgpu-Consolidate-visible-vs.-real-vram-check-v.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4650-drm-amdgpu-Consolidate-visible-vs.-real-vram-check-v.patch
new file mode 100644
index 00000000..2937741f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4650-drm-amdgpu-Consolidate-visible-vs.-real-vram-check-v.patch
@@ -0,0 +1,188 @@
+From 618f7497d2013be59b679b92a1b5058f5595b39a Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Tue, 12 Jun 2018 14:28:20 -0400
+Subject: [PATCH 4650/5725] drm/amdgpu: Consolidate visible vs. real vram check
+ v2.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Move all instnaces of this check into a function in amdgpu_gmc.h
+Rename the original function to a more proper name.
+
+v2:
+Add more places to cleanup.
+
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 11 ++++++-----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 15 +++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 20 ++++----------------
+ 5 files changed, 27 insertions(+), 23 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 10e8dbb..abe2bff 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -31,6 +31,7 @@
+ #include <drm/drm_syncobj.h>
+ #include "amdgpu.h"
+ #include "amdgpu_trace.h"
++#include "amdgpu_gmc.h"
+
+ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
+ struct drm_amdgpu_cs_chunk_fence *data,
+@@ -302,7 +303,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
+ *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
+
+ /* Do the same for visible VRAM if half of it is free */
+- if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size) {
++ if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
+ u64 total_vis_vram = adev->gmc.visible_vram_size;
+ u64 used_vis_vram =
+ amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
+@@ -359,7 +360,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
+ * to move it. Don't move anything if the threshold is zero.
+ */
+ if (p->bytes_moved < p->bytes_moved_threshold) {
+- if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
++ if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
+ (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
+ /* And don't move a CPU_ACCESS_REQUIRED BO to limited
+ * visible VRAM if we've depleted our allowance to do
+@@ -381,7 +382,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+
+ p->bytes_moved += ctx.bytes_moved;
+- if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
++ if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
+ amdgpu_bo_in_cpu_visible_vram(bo))
+ p->bytes_moved_vis += ctx.bytes_moved;
+
+@@ -434,8 +435,8 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
+
+ /* Good we can try to move this BO somewhere else */
+ update_bytes_moved_vis =
+- adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+- amdgpu_bo_in_cpu_visible_vram(bo);
++ !amdgpu_gmc_vram_full_visible(&adev->gmc) &&
++ amdgpu_bo_in_cpu_visible_vram(bo);
+ amdgpu_ttm_placement_from_domain(bo, other);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ p->bytes_moved += ctx.bytes_moved;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+index f4fdeb2..5f1d59e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+@@ -112,4 +112,19 @@ struct amdgpu_gmc {
+ const struct amdgpu_gmc_funcs *gmc_funcs;
+ };
+
++/**
++ * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Returns:
++ * True if full VRAM is visible through the BAR
++ */
++static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc)
++{
++ WARN_ON(gmc->real_vram_size < gmc->visible_vram_size);
++
++ return (gmc->real_vram_size == gmc->visible_vram_size);
++}
++
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 8574f39..f218f1c 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -481,7 +481,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
+ if (unlikely(r != 0))
+ return r;
+
+- if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
++ if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
+ bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+ bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
+ amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index acaa441..905f13b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -295,7 +295,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
+ if (!adev->mman.buffer_funcs_enabled) {
+ /* Move to system memory */
+ amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+- } else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
++ } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
+ !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
+ amdgpu_bo_in_cpu_visible_vram(abo)) {
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 3c7b1de..3fb0340 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -33,6 +33,7 @@
+ #include "amdgpu.h"
+ #include "amdgpu_trace.h"
+ #include "amdgpu_amdkfd.h"
++#include "amdgpu_gmc.h"
+
+ /**
+ * DOC: GPUVM
+@@ -670,19 +671,6 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
+ }
+
+ /**
+- * amdgpu_vm_is_large_bar - Check if BAR is large enough
+- *
+- * @adev: amdgpu_device pointer
+- *
+- * Returns:
+- * True if BAR is large enough.
+- */
+-static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
+-{
+- return (adev->gmc.real_vram_size == adev->gmc.visible_vram_size);
+-}
+-
+-/**
+ * amdgpu_vm_flush - hardware flush the vm
+ *
+ * @ring: ring to use for flush
+@@ -2635,7 +2623,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ }
+ DRM_DEBUG_DRIVER("VM update mode is %s\n",
+ vm->use_cpu_for_update ? "CPU" : "SDMA");
+- WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
++ WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
+ "CPU update of VM recommended only for large BAR system\n");
+ vm->last_update = NULL;
+
+@@ -2769,7 +2757,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ vm->pte_support_ats = pte_support_ats;
+ DRM_DEBUG_DRIVER("VM update mode is %s\n",
+ vm->use_cpu_for_update ? "CPU" : "SDMA");
+- WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
++ WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
+ "CPU update of VM recommended only for large BAR system\n");
+
+ if (vm->pasid) {
+@@ -2968,7 +2956,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
+ */
+ #ifdef CONFIG_X86_64
+ if (amdgpu_vm_update_mode == -1) {
+- if (amdgpu_vm_is_large_bar(adev))
++ if (amdgpu_gmc_vram_full_visible(&adev->gmc))
+ adev->vm_manager.vm_update_mode =
+ AMDGPU_VM_USE_CPU_FOR_COMPUTE;
+ else
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4651-drm-doc-Add-amdgpu-hwmon-power-documentation-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4651-drm-doc-Add-amdgpu-hwmon-power-documentation-v2.patch
new file mode 100644
index 00000000..a70f54f0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4651-drm-doc-Add-amdgpu-hwmon-power-documentation-v2.patch
@@ -0,0 +1,136 @@
+From f7d9ae7a179c901b3a3b13936632a864c963f53e Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Fri, 1 Jun 2018 12:28:14 -0500
+Subject: [PATCH 4651/5725] drm/doc: Add amdgpu hwmon/power documentation (v2)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Document the hwmon and power control interfaces exposed
+by the amdgpu driver.
+
+v2: use section rather than chapter for now
+
+Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 45 ++++++++++++++++++++++++++++------
+ 1 file changed, 37 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index 6f07b51..2c904a6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -80,12 +80,15 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
+ /**
+ * DOC: power_dpm_state
+ *
+- * This is a legacy interface and is only provided for backwards compatibility.
+- * The amdgpu driver provides a sysfs API for adjusting certain power
+- * related parameters. The file power_dpm_state is used for this.
++ * The power_dpm_state file is a legacy interface and is only provided for
++ * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
++ * certain power related parameters. The file power_dpm_state is used for this.
+ * It accepts the following arguments:
++ *
+ * - battery
++ *
+ * - balanced
++ *
+ * - performance
+ *
+ * battery
+@@ -169,14 +172,21 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
+ * The amdgpu driver provides a sysfs API for adjusting certain power
+ * related parameters. The file power_dpm_force_performance_level is
+ * used for this. It accepts the following arguments:
++ *
+ * - auto
++ *
+ * - low
++ *
+ * - high
++ *
+ * - manual
+- * - GPU fan
++ *
+ * - profile_standard
++ *
+ * - profile_min_sclk
++ *
+ * - profile_min_mclk
++ *
+ * - profile_peak
+ *
+ * auto
+@@ -463,8 +473,11 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
+ * this.
+ *
+ * Reading the file will display:
++ *
+ * - a list of engine clock levels and voltages labeled OD_SCLK
++ *
+ * - a list of memory clock levels and voltages labeled OD_MCLK
++ *
+ * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
+ *
+ * To manually adjust these settings, first select manual using
+@@ -1285,35 +1298,51 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
+ * DOC: hwmon
+ *
+ * The amdgpu driver exposes the following sensor interfaces:
++ *
+ * - GPU temperature (via the on-die sensor)
++ *
+ * - GPU voltage
++ *
+ * - Northbridge voltage (APUs only)
++ *
+ * - GPU power
++ *
+ * - GPU fan
+ *
+ * hwmon interfaces for GPU temperature:
++ *
+ * - temp1_input: the on die GPU temperature in millidegrees Celsius
++ *
+ * - temp1_crit: temperature critical max value in millidegrees Celsius
++ *
+ * - temp1_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
+ *
+ * hwmon interfaces for GPU voltage:
++ *
+ * - in0_input: the voltage on the GPU in millivolts
++ *
+ * - in1_input: the voltage on the Northbridge in millivolts
+ *
+ * hwmon interfaces for GPU power:
++ *
+ * - power1_average: average power used by the GPU in microWatts
++ *
+ * - power1_cap_min: minimum cap supported in microWatts
++ *
+ * - power1_cap_max: maximum cap supported in microWatts
++ *
+ * - power1_cap: selected power cap in microWatts
+ *
+ * hwmon interfaces for GPU fan:
++ *
+ * - pwm1: pulse width modulation fan level (0-255)
+- * - pwm1_enable: pulse width modulation fan control method
+- * 0: no fan speed control
+- * 1: manual fan speed control using pwm interface
+- * 2: automatic fan speed control
++ *
++ * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
++ *
+ * - pwm1_min: pulse width modulation fan control minimum level (0)
++ *
+ * - pwm1_max: pulse width modulation fan control maximum level (255)
++ *
+ * - fan1_input: fan speed in RPM
+ *
+ * You can use hwmon tools like sensors to view this information on your system.
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4652-drm-amdgpu-vg20-support-new-UVD-FW-version-naming-co.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4652-drm-amdgpu-vg20-support-new-UVD-FW-version-naming-co.patch
new file mode 100644
index 00000000..073a6a8b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4652-drm-amdgpu-vg20-support-new-UVD-FW-version-naming-co.patch
@@ -0,0 +1,60 @@
+From 59feb212fc82ef449af46186baabbe38546236c2 Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Wed, 6 Jun 2018 14:38:14 -0400
+Subject: [PATCH 4652/5725] drm/amdgpu/vg20:support new UVD FW version naming
+ convention
+
+Vega20 UVD Firmware has a new version naming convention:
+ [31, 30] for encode interface major
+ [29, 24] for encode interface minor
+ [15, 8] for decode interface minor
+ [7, 0] for hardware family id
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Reviewed-by: Leo Liu <leo.liu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 21 ++++++++++++++++-----
+ 1 file changed, 16 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index aadc494..9ceab34 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -208,10 +208,21 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+
+ hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
+ family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
+- version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
+- version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+- DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
+- version_major, version_minor, family_id);
++
++ if (adev->asic_type < CHIP_VEGA20) {
++ version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
++ version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
++ DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
++ version_major, version_minor, family_id);
++ } else {
++ unsigned int enc_major, enc_minor, dec_minor;
++
++ dec_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
++ enc_minor = (le32_to_cpu(hdr->ucode_version) >> 24) & 0x3f;
++ enc_major = (le32_to_cpu(hdr->ucode_version) >> 30) & 0x3;
++ DRM_INFO("Found UVD firmware ENC: %hu.%hu DEC: .%hu Family ID: %hu\n",
++ enc_major, enc_minor, dec_minor, family_id);
++ }
+
+ /*
+ * Limit the number of UVD handles depending on microcode major
+@@ -219,7 +230,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ * instances support is 1.80. So all subsequent versions should
+ * also have the same support.
+ */
+- if ((version_major > 0x01) ||
++ if (adev->asic_type >= CHIP_VEGA20 || (version_major > 0x01) ||
+ ((version_major == 0x01) && (version_minor >= 0x50)))
+ adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4653-drm-amd-pp-Add-S3-support-for-OD-feature.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4653-drm-amd-pp-Add-S3-support-for-OD-feature.patch
new file mode 100644
index 00000000..de40eea9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4653-drm-amd-pp-Add-S3-support-for-OD-feature.patch
@@ -0,0 +1,296 @@
+From c3865754d985aa32866a01554c6f5b1b6f0b7e74 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Wed, 13 Jun 2018 18:26:38 +0800
+Subject: [PATCH 4653/5725] drm/amd/pp: Add S3 support for OD feature
+
+make custom values survive when S3 sleep transitions.
+so not reset the od table if it is not null.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 126 +++++++++++----------
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 79 +++++++------
+ 2 files changed, 107 insertions(+), 98 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index 0f307a7..e8285f1 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -885,6 +885,60 @@ static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr)
+ data->odn_dpm_table.max_vddc = max_vddc;
+ }
+
++static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
++{
++ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
++ struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
++ struct phm_ppt_v1_information *table_info =
++ (struct phm_ppt_v1_information *)(hwmgr->pptable);
++ uint32_t i;
++
++ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
++ struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
++
++ if (table_info == NULL)
++ return;
++
++ for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
++ if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
++ data->dpm_table.sclk_table.dpm_levels[i].value) {
++ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
++ break;
++ }
++ }
++
++ for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
++ if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
++ data->dpm_table.mclk_table.dpm_levels[i].value) {
++ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
++ break;
++ }
++ }
++
++ dep_table = table_info->vdd_dep_on_mclk;
++ odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
++
++ for (i = 0; i < dep_table->count; i++) {
++ if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
++ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
++ return;
++ }
++ }
++
++ dep_table = table_info->vdd_dep_on_sclk;
++ odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
++ for (i = 0; i < dep_table->count; i++) {
++ if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
++ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
++ return;
++ }
++ }
++ if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
++ data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
++ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
++ }
++}
++
+ static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+ {
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+@@ -904,10 +958,13 @@ static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+
+ /* initialize ODN table */
+ if (hwmgr->od_enabled) {
+- smu7_setup_voltage_range_from_vbios(hwmgr);
+- smu7_odn_initial_default_setting(hwmgr);
++ if (data->odn_dpm_table.max_vddc) {
++ smu7_check_dpm_table_updated(hwmgr);
++ } else {
++ smu7_setup_voltage_range_from_vbios(hwmgr);
++ smu7_odn_initial_default_setting(hwmgr);
++ }
+ }
+-
+ return 0;
+ }
+
+@@ -3716,8 +3773,9 @@ static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
+ uint32_t i;
+
+ for (i = 0; i < dpm_table->count; i++) {
+- if ((dpm_table->dpm_levels[i].value < low_limit)
+- || (dpm_table->dpm_levels[i].value > high_limit))
++ /*skip the trim if od is enabled*/
++ if (!hwmgr->od_enabled && (dpm_table->dpm_levels[i].value < low_limit
++ || dpm_table->dpm_levels[i].value > high_limit))
+ dpm_table->dpm_levels[i].enabled = false;
+ else
+ dpm_table->dpm_levels[i].enabled = true;
+@@ -3761,10 +3819,8 @@ static int smu7_generate_dpm_level_enable_mask(
+ const struct smu7_power_state *smu7_ps =
+ cast_const_phw_smu7_power_state(states->pnew_state);
+
+- /*skip the trim if od is enabled*/
+- if (!hwmgr->od_enabled)
+- result = smu7_trim_dpm_states(hwmgr, smu7_ps);
+
++ result = smu7_trim_dpm_states(hwmgr, smu7_ps);
+ if (result)
+ return result;
+
+@@ -4736,60 +4792,6 @@ static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
+ return true;
+ }
+
+-static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
+-{
+- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+- struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
+- struct phm_ppt_v1_information *table_info =
+- (struct phm_ppt_v1_information *)(hwmgr->pptable);
+- uint32_t i;
+-
+- struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
+- struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
+-
+- if (table_info == NULL)
+- return;
+-
+- for (i=0; i<data->dpm_table.sclk_table.count; i++) {
+- if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
+- data->dpm_table.sclk_table.dpm_levels[i].value) {
+- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+- break;
+- }
+- }
+-
+- for (i=0; i<data->dpm_table.mclk_table.count; i++) {
+- if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
+- data->dpm_table.mclk_table.dpm_levels[i].value) {
+- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+- break;
+- }
+- }
+-
+- dep_table = table_info->vdd_dep_on_mclk;
+- odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
+-
+- for (i=0; i < dep_table->count; i++) {
+- if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
+- return;
+- }
+- }
+-
+- dep_table = table_info->vdd_dep_on_sclk;
+- odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
+- for (i=0; i < dep_table->count; i++) {
+- if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
+- return;
+- }
+- }
+- if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
+- data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
+- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
+- }
+-}
+-
+ static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size)
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+index f49e0d3..0e3f3bb 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+@@ -2414,6 +2414,40 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
+ return result;
+ }
+
++static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
++{
++ struct vega10_hwmgr *data = hwmgr->backend;
++ struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
++ struct phm_ppt_v2_information *table_info = hwmgr->pptable;
++ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
++ struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
++ uint32_t i;
++
++ dep_table = table_info->vdd_dep_on_mclk;
++ odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk);
++
++ for (i = 0; i < dep_table->count; i++) {
++ if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
++ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
++ return;
++ }
++ }
++
++ dep_table = table_info->vdd_dep_on_sclk;
++ odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk);
++ for (i = 0; i < dep_table->count; i++) {
++ if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
++ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
++ return;
++ }
++ }
++
++ if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
++ data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
++ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
++ }
++}
++
+ /**
+ * Initializes the SMC table and uploads it
+ *
+@@ -2430,6 +2464,7 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
+ PPTable_t *pp_table = &(data->smc_state_table.pp_table);
+ struct pp_atomfwctrl_voltage_table voltage_table;
+ struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
++ struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
+
+ result = vega10_setup_default_dpm_tables(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+@@ -2437,8 +2472,14 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
+ return result);
+
+ /* initialize ODN table */
+- if (hwmgr->od_enabled)
+- vega10_odn_initial_default_setting(hwmgr);
++ if (hwmgr->od_enabled) {
++ if (odn_table->max_vddc) {
++ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
++ vega10_check_dpm_table_updated(hwmgr);
++ } else {
++ vega10_odn_initial_default_setting(hwmgr);
++ }
++ }
+
+ pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
+ VOLTAGE_OBJ_SVID2, &voltage_table);
+@@ -4694,40 +4735,6 @@ static bool vega10_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
+ return true;
+ }
+
+-static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
+-{
+- struct vega10_hwmgr *data = hwmgr->backend;
+- struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
+- struct phm_ppt_v2_information *table_info = hwmgr->pptable;
+- struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
+- struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
+- uint32_t i;
+-
+- dep_table = table_info->vdd_dep_on_mclk;
+- odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk);
+-
+- for (i = 0; i < dep_table->count; i++) {
+- if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+- data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
+- return;
+- }
+- }
+-
+- dep_table = table_info->vdd_dep_on_sclk;
+- odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk);
+- for (i = 0; i < dep_table->count; i++) {
+- if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+- data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
+- return;
+- }
+- }
+-
+- if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
+- data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
+- data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
+- }
+-}
+-
+ static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
+ enum PP_OD_DPM_TABLE_COMMAND type)
+ {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4654-drm-amdkfd-Fix-the-case-when-a-process-is-NULL.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4654-drm-amdkfd-Fix-the-case-when-a-process-is-NULL.patch
new file mode 100644
index 00000000..1e937f1e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4654-drm-amdkfd-Fix-the-case-when-a-process-is-NULL.patch
@@ -0,0 +1,46 @@
+From 9a52bf4850153c60518ebf0797b4c3894ab5988b Mon Sep 17 00:00:00 2001
+From: Wei Lu <wei.lu2@amd.com>
+Date: Fri, 8 Jun 2018 17:01:29 -0400
+Subject: [PATCH 4654/5725] drm/amdkfd: Fix the case when a process is NULL
+
+Return ERR_PTR(-EINVAL) if kfd_get_process fails to
+find the process.
+
+BUG: KFD-399
+
+Change-Id: I7d34c2662b37fe061994bd8a35ea22f93ae3b76a
+Signed-off-by: Wei Lu <wei.lu2@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 2 ++
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 0eab007..89176ec 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -2372,7 +2372,7 @@ static int kfd_ioctl_cross_memory_copy(struct file *filep,
+ }
+
+ remote_p = kfd_get_process(remote_task);
+- if (!remote_p) {
++ if (IS_ERR(remote_p)) {
+ pr_err("Cross mem copy failed. Invalid kfd process %d\n",
+ args->pid);
+ err = -EINVAL;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index e79479b..735c96a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -255,6 +255,8 @@ struct kfd_process *kfd_get_process(const struct task_struct *thread)
+ return ERR_PTR(-EINVAL);
+
+ process = find_process(thread, false);
++ if (!process)
++ return ERR_PTR(-EINVAL);
+
+ return process;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4655-drm-amdgpu-band-aid-validating-VM-PTs.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4655-drm-amdgpu-band-aid-validating-VM-PTs.patch
new file mode 100644
index 00000000..2a219a35
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4655-drm-amdgpu-band-aid-validating-VM-PTs.patch
@@ -0,0 +1,35 @@
+From f1ffe31f8cc1af9e53ffffc6b06ad0ce81be974c Mon Sep 17 00:00:00 2001
+From: Jim Qu <Jim.Qu@amd.com>
+Date: Thu, 21 Jun 2018 10:18:32 +0800
+Subject: [PATCH 4655/5725] drm/amdgpu: band aid validating VM PTs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Always validating the VM PTs takes to much time. Only always validate
+the per VM BOs for now.
+
+Change-Id: I1dfa9c75966769c4bba6da7441a2333ec6d63e37
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Jim Qu <Jim.Qu@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 3fb0340..a74cd1d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1081,7 +1081,7 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
+ struct amdgpu_vm_bo_base,
+ vm_status);
+ bo_base->moved = false;
+- list_move(&bo_base->vm_status, &vm->idle);
++ list_del_init(&bo_base->vm_status);
+
+ bo = bo_base->bo->parent;
+ if (!bo)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4656-drm-amd-pp-Fix-wrong-clock-unit-exported-to-Display.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4656-drm-amd-pp-Fix-wrong-clock-unit-exported-to-Display.patch
new file mode 100644
index 00000000..75d4cac1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4656-drm-amd-pp-Fix-wrong-clock-unit-exported-to-Display.patch
@@ -0,0 +1,141 @@
+From f4ac39cc0ed4c86d1c99723fcebbd60c707a1109 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Mon, 18 Jun 2018 18:15:15 +0800
+Subject: [PATCH 4656/5725] drm/amd/pp: Fix wrong clock-unit exported to
+ Display
+
+Transfer 10KHz (requested by smu) to KHz needed by Display
+component.
+
+This can fix the issue 4k Monitor can't be lit up on Vega/Raven.
+
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 4 ++--
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 10 +++++-----
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 10 +++++-----
+ 3 files changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+index d4bc83e..c905df4 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+@@ -993,7 +993,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
+
+ clocks->num_levels = 0;
+ for (i = 0; i < pclk_vol_table->count; i++) {
+- clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
++ clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
+ clocks->data[i].latency_in_us = latency_required ?
+ smu10_get_mem_latency(hwmgr,
+ pclk_vol_table->entries[i].clk) :
+@@ -1044,7 +1044,7 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
+
+ clocks->num_levels = 0;
+ for (i = 0; i < pclk_vol_table->count; i++) {
+- clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
++ clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
+ clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol;
+ clocks->num_levels++;
+ }
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+index 0e3f3bb..843dba9 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+@@ -4065,7 +4065,7 @@ static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
+ for (i = 0; i < dep_table->count; i++) {
+ if (dep_table->entries[i].clk) {
+ clocks->data[clocks->num_levels].clocks_in_khz =
+- dep_table->entries[i].clk;
++ dep_table->entries[i].clk * 10;
+ clocks->num_levels++;
+ }
+ }
+@@ -4102,7 +4102,7 @@ static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
+ clocks->data[clocks->num_levels].clocks_in_khz =
+ data->mclk_latency_table.entries
+ [data->mclk_latency_table.count].frequency =
+- dep_table->entries[i].clk;
++ dep_table->entries[i].clk * 10;
+ clocks->data[clocks->num_levels].latency_in_us =
+ data->mclk_latency_table.entries
+ [data->mclk_latency_table.count].latency =
+@@ -4124,7 +4124,7 @@ static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
+ uint32_t i;
+
+ for (i = 0; i < dep_table->count; i++) {
+- clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
++ clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
+ clocks->data[i].latency_in_us = 0;
+ clocks->num_levels++;
+ }
+@@ -4140,7 +4140,7 @@ static void vega10_get_socclocks(struct pp_hwmgr *hwmgr,
+ uint32_t i;
+
+ for (i = 0; i < dep_table->count; i++) {
+- clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
++ clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
+ clocks->data[i].latency_in_us = 0;
+ clocks->num_levels++;
+ }
+@@ -4200,7 +4200,7 @@ static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
+ }
+
+ for (i = 0; i < dep_table->count; i++) {
+- clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
++ clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
+ clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table->
+ entries[dep_table->entries[i].vddInd].us_vdd);
+ clocks->num_levels++;
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index 782e209..d685ce7 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -1576,7 +1576,7 @@ static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
+
+ for (i = 0; i < ucount; i++) {
+ clocks->data[i].clocks_in_khz =
+- dpm_table->dpm_levels[i].value * 100;
++ dpm_table->dpm_levels[i].value * 1000;
+
+ clocks->data[i].latency_in_us = 0;
+ }
+@@ -1608,7 +1608,7 @@ static int vega12_get_memclocks(struct pp_hwmgr *hwmgr,
+
+ for (i = 0; i < ucount; i++) {
+ clocks->data[i].clocks_in_khz =
+- dpm_table->dpm_levels[i].value * 100;
++ dpm_table->dpm_levels[i].value * 1000;
+
+ clocks->data[i].latency_in_us =
+ data->mclk_latency_table.entries[i].latency =
+@@ -1638,7 +1638,7 @@ static int vega12_get_dcefclocks(struct pp_hwmgr *hwmgr,
+
+ for (i = 0; i < ucount; i++) {
+ clocks->data[i].clocks_in_khz =
+- dpm_table->dpm_levels[i].value * 100;
++ dpm_table->dpm_levels[i].value * 1000;
+
+ clocks->data[i].latency_in_us = 0;
+ }
+@@ -1666,7 +1666,7 @@ static int vega12_get_socclocks(struct pp_hwmgr *hwmgr,
+
+ for (i = 0; i < ucount; i++) {
+ clocks->data[i].clocks_in_khz =
+- dpm_table->dpm_levels[i].value * 100;
++ dpm_table->dpm_levels[i].value * 1000;
+
+ clocks->data[i].latency_in_us = 0;
+ }
+@@ -1838,7 +1838,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
+ return -1);
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+- i, clocks.data[i].clocks_in_khz / 100,
++ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz == now) ? "*" : "");
+ break;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4657-drm-amd-display-use-the-get_crtc-instead-of-get-exis.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4657-drm-amd-display-use-the-get_crtc-instead-of-get-exis.patch
new file mode 100644
index 00000000..b473c987
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4657-drm-amd-display-use-the-get_crtc-instead-of-get-exis.patch
@@ -0,0 +1,52 @@
+From 085726ac4e489fa5ef30a983db19cb8d4e9a25a9 Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Wed, 13 Jun 2018 14:43:16 -0400
+Subject: [PATCH 4657/5725] drm/amd/display - use the get_crtc instead of get
+ existing crtc
+
+When notfy_freesync gets called, we are adding all the planes
+through drm_add_affected_planes, but we are getting a warning
+about crtc state missing and can potentially cause an issue.
+
+This patch proposes a simple workaround which would allow
+to safely dumblicate an existing crtc_state if one doesn't
+yet exist
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 14d0cce..1451026 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1606,12 +1606,24 @@ static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
+
+ retry:
+ drm_for_each_crtc(crtc, dev) {
++ struct drm_plane *plane;
++
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret)
+ goto fail;
+
+ /* TODO rework amdgpu_dm_commit_planes so we don't need this */
+- ret = drm_atomic_add_affected_planes(state, crtc);
++ WARN_ON(!drm_atomic_get_crtc_state(state, crtc));
++
++ drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
++ struct drm_plane_state *plane_state =
++ drm_atomic_get_plane_state(state, plane);
++
++ if (IS_ERR(plane_state)) {
++ ret = PTR_ERR(plane_state);
++ break;
++ }
++ }
+ if (ret)
+ goto fail;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4658-drm-amdgpu-add-new-DF-1.7-register-defs.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4658-drm-amdgpu-add-new-DF-1.7-register-defs.patch
new file mode 100644
index 00000000..cb0da0f1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4658-drm-amdgpu-add-new-DF-1.7-register-defs.patch
@@ -0,0 +1,42 @@
+From 670b5c6fe4167f157dc3cabb5130db25fba68e50 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 10 May 2018 14:45:12 -0500
+Subject: [PATCH 4658/5725] drm/amdgpu: add new DF 1.7 register defs
+
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h | 4 ++++
+ drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h | 4 ++++
+ 2 files changed, 8 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h
+index 2b305dd..e6044e2 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h
+@@ -30,4 +30,8 @@
+ #define mmDF_CS_AON0_DramBaseAddress0 0x0044
+ #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
+
++#define mmDF_CS_AON0_CoherentSlaveModeCtrlA0 0x0214
++#define mmDF_CS_AON0_CoherentSlaveModeCtrlA0_BASE_IDX 0
++
++
+ #endif
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h
+index 2ba8497..a78c994 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h
+@@ -45,4 +45,8 @@
+ #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
+ #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
+
++//DF_CS_AON0_CoherentSlaveModeCtrlA0
++#define DF_CS_AON0_CoherentSlaveModeCtrlA0__ForceParWrRMW__SHIFT 0x3
++#define DF_CS_AON0_CoherentSlaveModeCtrlA0__ForceParWrRMW_MASK 0x00000008L
++
+ #endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4659-drm-amdgpu-add-new-DF-callback-for-ECC-setup.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4659-drm-amdgpu-add-new-DF-callback-for-ECC-setup.patch
new file mode 100644
index 00000000..d2afb8b2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4659-drm-amdgpu-add-new-DF-callback-for-ECC-setup.patch
@@ -0,0 +1,31 @@
+From 92f6afe8862b0c6e0bf55647d24e96b365913675 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 10 May 2018 14:59:31 -0500
+Subject: [PATCH 4659/5725] drm/amdgpu: add new DF callback for ECC setup
+
+The ForceParWrRMW setting needs to be enabled for ECC, but disabled
+when ECC is not enabled.
+
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index dbc5570..dbd7a8c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1462,6 +1462,8 @@ struct amdgpu_df_funcs {
+ bool enable);
+ void (*get_clockgating_state)(struct amdgpu_device *adev,
+ u32 *flags);
++ void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev,
++ bool enable);
+ };
+ /* Define the HW IP blocks will be used in driver , add more if necessary */
+ enum amd_hw_ip_block_type {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4660-drm-amdgpu-add-a-df-1.7-implementation-of-enable_ecc.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4660-drm-amdgpu-add-a-df-1.7-implementation-of-enable_ecc.patch
new file mode 100644
index 00000000..b186bf56
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4660-drm-amdgpu-add-a-df-1.7-implementation-of-enable_ecc.patch
@@ -0,0 +1,43 @@
+From 5c719823b691014064ccec9bf4276ac6aa3401c9 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 10 May 2018 15:06:55 -0500
+Subject: [PATCH 4660/5725] drm/amdgpu: add a df 1.7 implementation of
+ enable_ecc_force_par_wr_rmw
+
+Needed for proper memory setup depending on whether ECC is
+enabled on a particular board.
+
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/df_v1_7.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
+index 4ffda99..9935371 100644
+--- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
++++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
+@@ -102,6 +102,13 @@ static void df_v1_7_get_clockgating_state(struct amdgpu_device *adev,
+ *flags |= AMD_CG_SUPPORT_DF_MGCG;
+ }
+
++static void df_v1_7_enable_ecc_force_par_wr_rmw(struct amdgpu_device *adev,
++ bool enable)
++{
++ WREG32_FIELD15(DF, 0, DF_CS_AON0_CoherentSlaveModeCtrlA0,
++ ForceParWrRMW, enable);
++}
++
+ const struct amdgpu_df_funcs df_v1_7_funcs = {
+ .init = df_v1_7_init,
+ .enable_broadcast_mode = df_v1_7_enable_broadcast_mode,
+@@ -109,4 +116,5 @@ const struct amdgpu_df_funcs df_v1_7_funcs = {
+ .get_hbm_channel_number = df_v1_7_get_hbm_channel_number,
+ .update_medium_grain_clock_gating = df_v1_7_update_medium_grain_clock_gating,
+ .get_clockgating_state = df_v1_7_get_clockgating_state,
++ .enable_ecc_force_par_wr_rmw = df_v1_7_enable_ecc_force_par_wr_rmw,
+ };
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4661-drm-amdgpu-gmc9-disable-partial-wr-rmw-if-ECC-is-not.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4661-drm-amdgpu-gmc9-disable-partial-wr-rmw-if-ECC-is-not.patch
new file mode 100644
index 00000000..80ac368f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4661-drm-amdgpu-gmc9-disable-partial-wr-rmw-if-ECC-is-not.patch
@@ -0,0 +1,32 @@
+From 9c1ba9daa7a50291ab825e16d0226c8267e498ab Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 10 May 2018 15:15:12 -0500
+Subject: [PATCH 4661/5725] drm/amdgpu/gmc9: disable partial wr rmw if ECC is
+ not enabled
+
+The vbios mistakenly sets this bit on some boards without ECC.
+This can lead to reduced performance in some workloads. Disable
+the bit if the board does not have ECC.
+
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 1f3249b..ad65e57c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -674,6 +674,7 @@ static int gmc_v9_0_late_init(void *handle)
+ DRM_INFO("ECC is active.\n");
+ } else if (r == 0) {
+ DRM_INFO("ECC is not present.\n");
++ adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
+ } else {
+ DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
+ return r;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4662-Revert-drm-amd-display-Implement-dm_pp_get_clock_lev.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4662-Revert-drm-amd-display-Implement-dm_pp_get_clock_lev.patch
new file mode 100644
index 00000000..32097784
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4662-Revert-drm-amd-display-Implement-dm_pp_get_clock_lev.patch
@@ -0,0 +1,88 @@
+From ec4f3289f18aa3898bbccc7f44adcbc73f6c7374 Mon Sep 17 00:00:00 2001
+From: Kevin Wang <Kevin1.Wang@amd.com>
+Date: Fri, 13 Jul 2018 11:05:37 +0800
+Subject: [PATCH 4662/5725] Revert "drm/amd/display: Implement
+ dm_pp_get_clock_levels_by_type_with_latency"
+
+This reverts commit b342bd14b90f31f3864ca8e6dba4a7174d53c1f0.
+
+Conflicts:
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+
+Change-Id: Ibefed56c4ccc604294deffafd9a59458e0ebd8e8
+
+this feature is not work well,so disable it.
+next release driver will enable it.
+
+Signed-off-by: Kevin Wang <Kevin1.Wang@amd.com>
+Reviewed-by: Evan Quan <Evan.Quan@amd.com>
+---
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 45 +---------------------
+ 1 file changed, 2 insertions(+), 43 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+index 5a33461..0229c7ed 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+@@ -234,33 +234,6 @@ static void pp_to_dc_clock_levels(
+ }
+ }
+
+-static void pp_to_dc_clock_levels_with_latency(
+- const struct pp_clock_levels_with_latency *pp_clks,
+- struct dm_pp_clock_levels_with_latency *clk_level_info,
+- enum dm_pp_clock_type dc_clk_type)
+-{
+- uint32_t i;
+-
+- if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
+- DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
+- DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
+- pp_clks->num_levels,
+- DM_PP_MAX_CLOCK_LEVELS);
+-
+- clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
+- } else
+- clk_level_info->num_levels = pp_clks->num_levels;
+-
+- DRM_DEBUG("DM_PPLIB: values for %s clock\n",
+- DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+-
+- for (i = 0; i < clk_level_info->num_levels; i++) {
+- DRM_DEBUG("DM_PPLIB:\t %d\n", pp_clks->data[i].clocks_in_khz);
+- clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
+- clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
+- }
+-}
+-
+ bool dm_pp_get_clock_levels_by_type(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+@@ -338,22 +311,8 @@ bool dm_pp_get_clock_levels_by_type_with_latency(
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels_with_latency *clk_level_info)
+ {
+- struct amdgpu_device *adev = ctx->driver_context;
+- void *pp_handle = adev->powerplay.pp_handle;
+- struct pp_clock_levels_with_latency pp_clks = { 0 };
+- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+-
+- if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency)
+- return false;
+-
+- if (pp_funcs->get_clock_by_type_with_latency(pp_handle,
+- dc_to_pp_clock_type(clk_type),
+- &pp_clks))
+- return false;
+-
+- pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
+-
+- return true;
++ /* TODO: to be implemented */
++ return false;
+ }
+
+ bool dm_pp_get_clock_levels_by_type_with_voltage(
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4663-Revert-drm-amdgpu-band-aid-validating-VM-PTs.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4663-Revert-drm-amdgpu-band-aid-validating-VM-PTs.patch
new file mode 100644
index 00000000..fc7ff9b1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4663-Revert-drm-amdgpu-band-aid-validating-VM-PTs.patch
@@ -0,0 +1,31 @@
+From 5c91e0c2198e17566bf5a9592101fba669315324 Mon Sep 17 00:00:00 2001
+From: Aaron Liu <aaron.liu@amd.com>
+Date: Thu, 12 Jul 2018 18:51:41 +0800
+Subject: [PATCH 4663/5725] Revert "drm/amdgpu: band aid validating VM PTs"
+
+This reverts commit 8c04c9cab535ab38a2ccf4d99ce172e75d6ce316.
+
+Reason for revert is still performance drop(5%-10%), ref to SWDEV-156548
+
+Change-Id: Icaf9c1a98ddd56ab199af6e148c9286742acfad3
+Signed-off-by: Aaron Liu <aaron.liu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index a74cd1d..3fb0340 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1081,7 +1081,7 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
+ struct amdgpu_vm_bo_base,
+ vm_status);
+ bo_base->moved = false;
+- list_del_init(&bo_base->vm_status);
++ list_move(&bo_base->vm_status, &vm->idle);
+
+ bo = bo_base->bo->parent;
+ if (!bo)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4664-Revert-drm-amdgpu-move-VM-BOs-on-LRU-again.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4664-Revert-drm-amdgpu-move-VM-BOs-on-LRU-again.patch
new file mode 100644
index 00000000..0266ea02
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4664-Revert-drm-amdgpu-move-VM-BOs-on-LRU-again.patch
@@ -0,0 +1,94 @@
+From 22f79d79e3e3be23b039021829da496afd683045 Mon Sep 17 00:00:00 2001
+From: Aaron Liu <aaron.liu@amd.com>
+Date: Thu, 12 Jul 2018 19:26:06 +0800
+Subject: [PATCH 4664/5725] Revert "drm/amdgpu: move VM BOs on LRU again"
+
+This reverts commit 198dd159998f3bfb65852366dcb110e5f63734b3.
+
+Reason for revert is still performance drop(5%-10%), ref to SWDEV-156548
+
+Change-Id: I008b0218b36fbd2f974d14aac231cd2a68bce7ac
+Signed-off-by: Aaron Liu <aaron.liu@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 28 +++++-----------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 3 ---
+ 2 files changed, 5 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 3fb0340..57dd137 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -309,19 +309,6 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ }
+ }
+
+- spin_lock(&glob->lru_lock);
+- list_for_each_entry(bo_base, &vm->idle, vm_status) {
+- struct amdgpu_bo *bo = bo_base->bo;
+-
+- if (!bo->parent)
+- continue;
+-
+- ttm_bo_move_to_lru_tail(&bo->tbo);
+- if (bo->shadow)
+- ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
+- }
+- spin_unlock(&glob->lru_lock);
+-
+ return r;
+ }
+
+@@ -1081,7 +1068,7 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
+ struct amdgpu_vm_bo_base,
+ vm_status);
+ bo_base->moved = false;
+- list_move(&bo_base->vm_status, &vm->idle);
++ list_del_init(&bo_base->vm_status);
+
+ bo = bo_base->bo->parent;
+ if (!bo)
+@@ -1728,14 +1715,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+ * the evicted list so that it gets validated again on the
+ * next command submission.
+ */
+- if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
+- uint32_t mem_type = bo->tbo.mem.mem_type;
+-
+- if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
+- list_add_tail(&bo_va->base.vm_status, &vm->evicted);
+- else
+- list_add(&bo_va->base.vm_status, &vm->idle);
+- }
++ if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
++ !(bo->preferred_domains &
++ amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)))
++ list_add_tail(&bo_va->base.vm_status, &vm->evicted);
+
+ list_splice_init(&bo_va->invalids, &bo_va->valids);
+ bo_va->cleared = clear;
+@@ -2595,7 +2578,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ INIT_LIST_HEAD(&vm->relocated);
+ spin_lock_init(&vm->moved_lock);
+ INIT_LIST_HEAD(&vm->moved);
+- INIT_LIST_HEAD(&vm->idle);
+ INIT_LIST_HEAD(&vm->freed);
+
+ /* create scheduler entity for page table updates */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+index 617d84b..7a4c967 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+@@ -181,9 +181,6 @@ struct amdgpu_vm {
+ struct list_head moved;
+ spinlock_t moved_lock;
+
+- /* All BOs of this VM not currently in the state machine */
+- struct list_head idle;
+-
+ /* BO mappings freed, but not yet updated in the PT */
+ struct list_head freed;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4665-drm-amdgpu-Make-sure-IB-tests-flushed-after-IP-resum.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4665-drm-amdgpu-Make-sure-IB-tests-flushed-after-IP-resum.patch
new file mode 100644
index 00000000..91875b0b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4665-drm-amdgpu-Make-sure-IB-tests-flushed-after-IP-resum.patch
@@ -0,0 +1,34 @@
+From 3b037896917e0d2315a8c6b51e9aff0ec58e8339 Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Fri, 13 Jul 2018 11:26:28 -0400
+Subject: [PATCH 4665/5725] drm/amdgpu: Make sure IB tests flushed after IP
+ resume
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Fixes: 2c773de2 (drm/amdgpu: defer test IBs on the rings at boot (V3))
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index d02f2fa..b39192f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2792,6 +2792,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
+ if (r)
+ return r;
+
++ /* Make sure IB tests flushed */
++ flush_delayed_work(&adev->late_init_work);
++
+ /* blat the mode back in */
+ if (fbcon) {
+ if (!amdgpu_device_has_dc_support(adev)) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4666-drm-amdgpu-gfx9-Update-golden-settings-for-vg10.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4666-drm-amdgpu-gfx9-Update-golden-settings-for-vg10.patch
new file mode 100644
index 00000000..e51e1069
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4666-drm-amdgpu-gfx9-Update-golden-settings-for-vg10.patch
@@ -0,0 +1,45 @@
+From 82f35afbb3d32a9f5a42a0d292b6c03db3634927 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Tue, 17 Jul 2018 14:54:23 +0800
+Subject: [PATCH 4666/5725] drm/amdgpu/gfx9: Update golden settings for vg10.
+
+Add some UTCL registers' goldensettings.
+
+Change-Id: I1aea720d4527604baf68bf82d432d93f35dd0cd4
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Tested-by: Kevin Wang <Kevin1.Wang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 4527150..7a881ef 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -102,11 +102,22 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
+ {
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
+- SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800)
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
+ };
+
+ static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4667-drm-amd-display-Fix-Vega10-black-screen-after-mode-c.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4667-drm-amd-display-Fix-Vega10-black-screen-after-mode-c.patch
new file mode 100644
index 00000000..49d85c5a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4667-drm-amd-display-Fix-Vega10-black-screen-after-mode-c.patch
@@ -0,0 +1,77 @@
+From b4f0867610fc91e8fc7fc73625e438c096baeef7 Mon Sep 17 00:00:00 2001
+From: "Jerry (Fangzhi) Zuo" <Jerry.Zuo@amd.com>
+Date: Tue, 17 Jul 2018 14:34:23 -0400
+Subject: [PATCH 4667/5725] drm/amd/display: Fix Vega10 black screen after mode
+ change
+
+Do not update clocks if no stream is showing up in the context.
+
+Signed-off-by: Jerry (Fangzhi) Zuo <Jerry.Zuo@amd.com>
+Reviewed-by: Hersen Wu <hersenxs.wu@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h | 5 +++++
+ drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c | 10 ++++++++++
+ 3 files changed, 16 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 4cd6bc0..383b63d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -2661,7 +2661,7 @@ static void pplib_apply_display_requirements(
+ dc->prev_display_config = *pp_display_cfg;
+ }
+
+-static void dce110_set_bandwidth(
++void dce110_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed)
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+index f48d5a6..1782757 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+@@ -64,6 +64,11 @@ void dce110_fill_display_configs(
+ const struct dc_state *context,
+ struct dm_pp_display_configuration *pp_display_cfg);
+
++void dce110_set_bandwidth(
++ struct dc *dc,
++ struct dc_state *context,
++ bool decrease_allowed);
++
+ uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
+
+ void dp_receiver_power_ctrl(struct dc_link *link, bool on);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
+index e96ff86..5853522 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
+@@ -244,7 +244,16 @@ static void dce120_update_dchub(
+ dh_data->dchub_info_valid = false;
+ }
+
++static void dce120_set_bandwidth(
++ struct dc *dc,
++ struct dc_state *context,
++ bool decrease_allowed)
++{
++ if (context->stream_count <= 0)
++ return;
+
++ dce110_set_bandwidth(dc, context, decrease_allowed);
++}
+
+ void dce120_hw_sequencer_construct(struct dc *dc)
+ {
+@@ -254,5 +263,6 @@ void dce120_hw_sequencer_construct(struct dc *dc)
+ dce110_hw_sequencer_construct(dc);
+ dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
+ dc->hwss.update_dchub = dce120_update_dchub;
++ dc->hwss.set_bandwidth = dce120_set_bandwidth;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4668-drm-amd-pp-Read-vbios-vddc-limit-before-use-them.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4668-drm-amd-pp-Read-vbios-vddc-limit-before-use-them.patch
new file mode 100644
index 00000000..911961c9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4668-drm-amd-pp-Read-vbios-vddc-limit-before-use-them.patch
@@ -0,0 +1,46 @@
+From fd4be4b072d029c3af9f7aad58031d219aa0bdfe Mon Sep 17 00:00:00 2001
+From: Rex Zhu <rex.zhu@amd.com>
+Date: Thu, 19 Jul 2018 16:21:42 +0800
+Subject: [PATCH 4668/5725] drm/amd/pp: Read vbios vddc limit before use them
+
+Use the vddc limit before read them from vbios
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+index 843dba9..79f96a1 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+@@ -295,7 +295,15 @@ static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
+ struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table[3];
+ struct phm_ppt_v1_clock_voltage_dependency_table *od_table[3];
++ struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
+ uint32_t i;
++ int result;
++
++ result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
++ if (!result) {
++ data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc;
++ data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc;
++ }
+
+ od_lookup_table = &odn_table->vddc_lookup_table;
+ vddc_lookup_table = table_info->vddc_lookup_table;
+@@ -2078,9 +2086,6 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
+ if (data->smu_features[GNLD_AVFS].supported) {
+ result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
+ if (!result) {
+- data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc;
+- data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc;
+-
+ pp_table->MinVoltageVid = (uint8_t)
+ convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
+ pp_table->MaxVoltageVid = (uint8_t)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4669-drm-amd-pp-Update-clk-with-od-setting-when-set-power.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4669-drm-amd-pp-Update-clk-with-od-setting-when-set-power.patch
new file mode 100644
index 00000000..b1b37ff1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4669-drm-amd-pp-Update-clk-with-od-setting-when-set-power.patch
@@ -0,0 +1,48 @@
+From 13edfac5ad0e424772244f3b03974020c110835b Mon Sep 17 00:00:00 2001
+From: Rex Zhu <rex.zhu@amd.com>
+Date: Thu, 19 Jul 2018 16:32:05 +0800
+Subject: [PATCH 4669/5725] drm/amd/pp: Update clk with od setting when set
+ power state
+
+This can fix the issue resume from S3, the user's OD setting
+were reverted to default.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+index 79f96a1..8be5a71 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+@@ -3268,10 +3268,25 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
+ {
+ int result = 0;
+ struct vega10_hwmgr *data = hwmgr->backend;
++ struct vega10_dpm_table *dpm_table = &data->dpm_table;
++ struct vega10_odn_dpm_table *odn_table = &data->odn_dpm_table;
++ struct vega10_odn_clock_voltage_dependency_table *odn_clk_table = &odn_table->vdd_dep_on_sclk;
++ int count;
+
+ if (!data->need_update_dpm_table)
+ return 0;
+
++ if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
++ for (count = 0; count < dpm_table->gfx_table.count; count++)
++ dpm_table->gfx_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
++ }
++
++ odn_clk_table = &odn_table->vdd_dep_on_mclk;
++ if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
++ for (count = 0; count < dpm_table->mem_table.count; count++)
++ dpm_table->mem_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
++ }
++
+ if (data->need_update_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) {
+ result = vega10_populate_all_graphic_levels(hwmgr);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4670-drm-amdgpu-Make-struct-amdgpu_atif-private-to-amdgpu.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4670-drm-amdgpu-Make-struct-amdgpu_atif-private-to-amdgpu.patch
new file mode 100644
index 00000000..0fbc3999
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4670-drm-amdgpu-Make-struct-amdgpu_atif-private-to-amdgpu.patch
@@ -0,0 +1,194 @@
+From d25afa620bfe37623b8a9c12b95f0e06381bc4ef Mon Sep 17 00:00:00 2001
+From: Lyude Paul <lyude@redhat.com>
+Date: Mon, 25 Jun 2018 21:09:04 -0400
+Subject: [PATCH 4670/5725] drm/amdgpu: Make struct amdgpu_atif private to
+ amdgpu_acpi.c
+
+Currently, there is nothing in amdgpu that actually uses these structs
+other than amdgpu_acpi.c. Additionally, since we're about to start
+saving the correct ACPI handle to use for calling ATIF in this struct
+this saves us from having to handle making sure that the acpi_handle
+(and by proxy, the type definition for acpi_handle and all of the other
+acpi headers) doesn't need to be included within the amdgpu_drv struct
+itself. This follows the example set by amdgpu_atpx_handler.c.
+
+Change-Id: I921f6d54341ded383c5d587137124bdfb969a800
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 40 ++---------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | 54 ++++++++++++++++++++++++++++++--
+ 2 files changed, 53 insertions(+), 41 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index dbd7a8c..c76a95c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -193,6 +193,7 @@ struct amdgpu_irq_src;
+ struct amdgpu_fpriv;
+ struct amdgpu_bo_va_mapping;
+ struct kfd_vm_fault_info;
++struct amdgpu_atif;
+
+ enum amdgpu_cp_irq {
+ AMDGPU_CP_IRQ_GFX_EOP = 0,
+@@ -1307,43 +1308,6 @@ struct amdgpu_vram_scratch {
+ /*
+ * ACPI
+ */
+-struct amdgpu_atif_notification_cfg {
+- bool enabled;
+- int command_code;
+-};
+-
+-struct amdgpu_atif_notifications {
+- bool display_switch;
+- bool expansion_mode_change;
+- bool thermal_state;
+- bool forced_power_state;
+- bool system_power_state;
+- bool display_conf_change;
+- bool px_gfx_switch;
+- bool brightness_change;
+- bool dgpu_display_event;
+-};
+-
+-struct amdgpu_atif_functions {
+- bool system_params;
+- bool sbios_requests;
+- bool select_active_disp;
+- bool lid_state;
+- bool get_tv_standard;
+- bool set_tv_standard;
+- bool get_panel_expansion_mode;
+- bool set_panel_expansion_mode;
+- bool temperature_change;
+- bool graphics_device_types;
+-};
+-
+-struct amdgpu_atif {
+- struct amdgpu_atif_notifications notifications;
+- struct amdgpu_atif_functions functions;
+- struct amdgpu_atif_notification_cfg notification_cfg;
+- struct amdgpu_encoder *encoder_for_bl;
+-};
+-
+ struct amdgpu_atcs_functions {
+ bool get_ext_state;
+ bool pcie_perf_req;
+@@ -1526,7 +1490,7 @@ struct amdgpu_device {
+ #if defined(CONFIG_DEBUG_FS)
+ struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
+ #endif
+- struct amdgpu_atif atif;
++ struct amdgpu_atif *atif;
+ struct amdgpu_atcs atcs;
+ struct mutex srbm_mutex;
+ /* GRBM index mutex. Protects concurrent access to GRBM index */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+index 8fa850a..22c7e8e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+@@ -34,6 +34,43 @@
+ #include "amd_acpi.h"
+ #include "atom.h"
+
++struct amdgpu_atif_notification_cfg {
++ bool enabled;
++ int command_code;
++};
++
++struct amdgpu_atif_notifications {
++ bool display_switch;
++ bool expansion_mode_change;
++ bool thermal_state;
++ bool forced_power_state;
++ bool system_power_state;
++ bool display_conf_change;
++ bool px_gfx_switch;
++ bool brightness_change;
++ bool dgpu_display_event;
++};
++
++struct amdgpu_atif_functions {
++ bool system_params;
++ bool sbios_requests;
++ bool select_active_disp;
++ bool lid_state;
++ bool get_tv_standard;
++ bool set_tv_standard;
++ bool get_panel_expansion_mode;
++ bool set_panel_expansion_mode;
++ bool temperature_change;
++ bool graphics_device_types;
++};
++
++struct amdgpu_atif {
++ struct amdgpu_atif_notifications notifications;
++ struct amdgpu_atif_functions functions;
++ struct amdgpu_atif_notification_cfg notification_cfg;
++ struct amdgpu_encoder *encoder_for_bl;
++};
++
+ /* Call the ATIF method
+ */
+ /**
+@@ -292,7 +329,7 @@ static int amdgpu_atif_get_sbios_requests(acpi_handle handle,
+ static int amdgpu_atif_handler(struct amdgpu_device *adev,
+ struct acpi_bus_event *event)
+ {
+- struct amdgpu_atif *atif = &adev->atif;
++ struct amdgpu_atif *atif = adev->atif;
+ struct atif_sbios_requests req;
+ acpi_handle handle;
+ int count;
+@@ -303,7 +340,8 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
+ if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
+ return NOTIFY_DONE;
+
+- if (!atif->notification_cfg.enabled ||
++ if (!atif ||
++ !atif->notification_cfg.enabled ||
+ event->type != atif->notification_cfg.command_code)
+ /* Not our event */
+ return NOTIFY_DONE;
+@@ -642,7 +680,7 @@ static int amdgpu_acpi_event(struct notifier_block *nb,
+ int amdgpu_acpi_init(struct amdgpu_device *adev)
+ {
+ acpi_handle handle;
+- struct amdgpu_atif *atif = &adev->atif;
++ struct amdgpu_atif *atif;
+ struct amdgpu_atcs *atcs = &adev->atcs;
+ int ret;
+
+@@ -659,11 +697,19 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
+ }
+
+ /* Call the ATIF method */
++ atif = kzalloc(sizeof(*atif), GFP_KERNEL);
++ if (!atif) {
++ DRM_WARN("Not enough memory to initialize ATIF\n");
++ goto out;
++ }
++
+ ret = amdgpu_atif_verify_interface(handle, atif);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
++ kfree(atif);
+ goto out;
+ }
++ adev->atif = atif;
+
+ if (atif->notifications.brightness_change) {
+ struct drm_encoder *tmp;
+@@ -720,4 +766,6 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
+ void amdgpu_acpi_fini(struct amdgpu_device *adev)
+ {
+ unregister_acpi_notifier(&adev->acpi_nb);
++ if (adev->atif)
++ kfree(adev->atif);
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4671-drm-amdgpu-s-disp_detetion_ports-disp_detection_port.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4671-drm-amdgpu-s-disp_detetion_ports-disp_detection_port.patch
new file mode 100644
index 00000000..af707137
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4671-drm-amdgpu-s-disp_detetion_ports-disp_detection_port.patch
@@ -0,0 +1,40 @@
+From 5365c6f700ca6b33ab8f4cb4b2119061e4e6df05 Mon Sep 17 00:00:00 2001
+From: Lyude Paul <lyude@redhat.com>
+Date: Mon, 25 Jun 2018 21:09:05 -0400
+Subject: [PATCH 4671/5725] drm/amdgpu:
+ s/disp_detetion_ports/disp_detection_ports/
+
+Fix typo.
+
+Reviewed-by: Jim Qu <Jim.Qu@amd.com>
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+index 7f46d1a..7ddf169 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+@@ -32,7 +32,7 @@ struct amdgpu_atpx_functions {
+ bool switch_start;
+ bool switch_end;
+ bool disp_connectors_mapping;
+- bool disp_detetion_ports;
++ bool disp_detection_ports;
+ };
+
+ struct amdgpu_atpx {
+@@ -156,7 +156,7 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
+ f->switch_start = mask & ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED;
+ f->switch_end = mask & ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED;
+ f->disp_connectors_mapping = mask & ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED;
+- f->disp_detetion_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
++ f->disp_detection_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
+ }
+
+ /**
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4672-drm-amdgpu-Add-amdgpu_atpx_get_dhandle.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4672-drm-amdgpu-Add-amdgpu_atpx_get_dhandle.patch
new file mode 100644
index 00000000..e48e1b1b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4672-drm-amdgpu-Add-amdgpu_atpx_get_dhandle.patch
@@ -0,0 +1,54 @@
+From e86917fba3bd41305582a8203c3567494a20a22c Mon Sep 17 00:00:00 2001
+From: Lyude Paul <lyude@redhat.com>
+Date: Mon, 25 Jun 2018 21:09:06 -0400
+Subject: [PATCH 4672/5725] drm/amdgpu: Add amdgpu_atpx_get_dhandle()
+
+Since it seems that some vendors are storing the ATIF ACPI methods under
+the same handle that ATPX lives under instead of the device's own
+handle, we're going to need to be able to retrieve this handle later so
+we can probe for ATIF there.
+
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 ++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | 6 ++++++
+ 2 files changed, 12 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index c76a95c..ad566b7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1926,6 +1926,12 @@ static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false;
+ static inline bool amdgpu_has_atpx(void) { return false; }
+ #endif
+
++#if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI)
++void *amdgpu_atpx_get_dhandle(void);
++#else
++static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; }
++#endif
++
+ /*
+ * KMS
+ */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+index 7ddf169..8db67d4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+@@ -90,6 +90,12 @@ bool amdgpu_atpx_dgpu_req_power_for_displays(void) {
+ return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays;
+ }
+
++#if defined(CONFIG_ACPI)
++void *amdgpu_atpx_get_dhandle(void) {
++ return amdgpu_atpx_priv.dhandle;
++}
++#endif
++
+ /**
+ * amdgpu_atpx_call - call an ATPX method
+ *
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4673-drm-amdgpu-Dynamically-probe-for-ATIF-handle-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4673-drm-amdgpu-Dynamically-probe-for-ATIF-handle-v2.patch
new file mode 100644
index 00000000..d4d629d8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4673-drm-amdgpu-Dynamically-probe-for-ATIF-handle-v2.patch
@@ -0,0 +1,234 @@
+From d928e306d055f05452f92a6b9cf6987969cee6a6 Mon Sep 17 00:00:00 2001
+From: Lyude Paul <lyude@redhat.com>
+Date: Mon, 25 Jun 2018 21:09:07 -0400
+Subject: [PATCH 4673/5725] drm/amdgpu: Dynamically probe for ATIF handle (v2)
+
+The other day I was testing one of the HP laptops at my office with an
+i915/amdgpu hybrid setup and noticed that hotplugging was non-functional
+on almost all of the display outputs. I eventually discovered that all
+of the external outputs were connected to the amdgpu device instead of
+i915, and that the hotplugs weren't being detected so long as the GPU
+was in runtime suspend. After some talking with folks at AMD, I learned
+that amdgpu is actually supposed to support hotplug detection in runtime
+suspend so long as the OEM has implemented it properly in the firmware.
+
+On this HP ZBook 15 G4 (the machine in question), amdgpu wasn't managing
+to find the ATIF handle at all despite the fact that I could see acpi
+events being sent in response to any hotplugging. After going through
+dumps of the firmware, I discovered that this machine did in fact
+support ATIF, but that it's ATIF method lived in an entirely different
+namespace than this device's handle (the device handle was
+\_SB_.PCI0.PEG0.PEGP, but ATIF lives in ATPX's handle at
+\_SB_.PCI0.GFX0).
+
+So, fix this by probing ATPX's ACPI parent's namespace if we can't find
+ATIF elsewhere, along with storing a pointer to the proper handle to use
+for ATIF and using that instead of the device's handle.
+
+This fixes HPD detection while in runtime suspend for this ZBook!
+
+v2: Update the comment to reflect how the namespaces are arranged
+based on the system configuration. (Alex)
+
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | 79 +++++++++++++++++++++++---------
+ 1 file changed, 58 insertions(+), 21 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+index 22c7e8e..0d8c3fc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+@@ -65,6 +65,8 @@ struct amdgpu_atif_functions {
+ };
+
+ struct amdgpu_atif {
++ acpi_handle handle;
++
+ struct amdgpu_atif_notifications notifications;
+ struct amdgpu_atif_functions functions;
+ struct amdgpu_atif_notification_cfg notification_cfg;
+@@ -83,8 +85,9 @@ struct amdgpu_atif {
+ * Executes the requested ATIF function (all asics).
+ * Returns a pointer to the acpi output buffer.
+ */
+-static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function,
+- struct acpi_buffer *params)
++static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
++ int function,
++ struct acpi_buffer *params)
+ {
+ acpi_status status;
+ union acpi_object atif_arg_elements[2];
+@@ -107,7 +110,8 @@ static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function,
+ atif_arg_elements[1].integer.value = 0;
+ }
+
+- status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer);
++ status = acpi_evaluate_object(atif->handle, NULL, &atif_arg,
++ &buffer);
+
+ /* Fail only if calling the method fails and ATIF is supported */
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+@@ -178,15 +182,14 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas
+ * (all asics).
+ * returns 0 on success, error on failure.
+ */
+-static int amdgpu_atif_verify_interface(acpi_handle handle,
+- struct amdgpu_atif *atif)
++static int amdgpu_atif_verify_interface(struct amdgpu_atif *atif)
+ {
+ union acpi_object *info;
+ struct atif_verify_interface output;
+ size_t size;
+ int err = 0;
+
+- info = amdgpu_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
++ info = amdgpu_atif_call(atif, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
+ if (!info)
+ return -EIO;
+
+@@ -213,6 +216,35 @@ static int amdgpu_atif_verify_interface(acpi_handle handle,
+ return err;
+ }
+
++static acpi_handle amdgpu_atif_probe_handle(acpi_handle dhandle)
++{
++ acpi_handle handle = NULL;
++ char acpi_method_name[255] = { 0 };
++ struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name };
++ acpi_status status;
++
++ /* For PX/HG systems, ATIF and ATPX are in the iGPU's namespace, on dGPU only
++ * systems, ATIF is in the dGPU's namespace.
++ */
++ status = acpi_get_handle(dhandle, "ATIF", &handle);
++ if (ACPI_SUCCESS(status))
++ goto out;
++
++ if (amdgpu_has_atpx()) {
++ status = acpi_get_handle(amdgpu_atpx_get_dhandle(), "ATIF",
++ &handle);
++ if (ACPI_SUCCESS(status))
++ goto out;
++ }
++
++ DRM_DEBUG_DRIVER("No ATIF handle found\n");
++ return NULL;
++out:
++ acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
++ DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name);
++ return handle;
++}
++
+ /**
+ * amdgpu_atif_get_notification_params - determine notify configuration
+ *
+@@ -225,15 +257,16 @@ static int amdgpu_atif_verify_interface(acpi_handle handle,
+ * where n is specified in the result if a notifier is used.
+ * Returns 0 on success, error on failure.
+ */
+-static int amdgpu_atif_get_notification_params(acpi_handle handle,
+- struct amdgpu_atif_notification_cfg *n)
++static int amdgpu_atif_get_notification_params(struct amdgpu_atif *atif)
+ {
+ union acpi_object *info;
++ struct amdgpu_atif_notification_cfg *n = &atif->notification_cfg;
+ struct atif_system_params params;
+ size_t size;
+ int err = 0;
+
+- info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL);
++ info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS,
++ NULL);
+ if (!info) {
+ err = -EIO;
+ goto out;
+@@ -287,14 +320,15 @@ static int amdgpu_atif_get_notification_params(acpi_handle handle,
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+-static int amdgpu_atif_get_sbios_requests(acpi_handle handle,
+- struct atif_sbios_requests *req)
++static int amdgpu_atif_get_sbios_requests(struct amdgpu_atif *atif,
++ struct atif_sbios_requests *req)
+ {
+ union acpi_object *info;
+ size_t size;
+ int count = 0;
+
+- info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL);
++ info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS,
++ NULL);
+ if (!info)
+ return -EIO;
+
+@@ -327,11 +361,10 @@ static int amdgpu_atif_get_sbios_requests(acpi_handle handle,
+ * Returns NOTIFY code
+ */
+ static int amdgpu_atif_handler(struct amdgpu_device *adev,
+- struct acpi_bus_event *event)
++ struct acpi_bus_event *event)
+ {
+ struct amdgpu_atif *atif = adev->atif;
+ struct atif_sbios_requests req;
+- acpi_handle handle;
+ int count;
+
+ DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
+@@ -347,8 +380,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
+ return NOTIFY_DONE;
+
+ /* Check pending SBIOS requests */
+- handle = ACPI_HANDLE(&adev->pdev->dev);
+- count = amdgpu_atif_get_sbios_requests(handle, &req);
++ count = amdgpu_atif_get_sbios_requests(atif, &req);
+
+ if (count <= 0)
+ return NOTIFY_DONE;
+@@ -679,7 +711,7 @@ static int amdgpu_acpi_event(struct notifier_block *nb,
+ */
+ int amdgpu_acpi_init(struct amdgpu_device *adev)
+ {
+- acpi_handle handle;
++ acpi_handle handle, atif_handle;
+ struct amdgpu_atif *atif;
+ struct amdgpu_atcs *atcs = &adev->atcs;
+ int ret;
+@@ -696,14 +728,20 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
+ DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret);
+ }
+
+- /* Call the ATIF method */
++ /* Probe for ATIF, and initialize it if found */
++ atif_handle = amdgpu_atif_probe_handle(handle);
++ if (!atif_handle)
++ goto out;
++
+ atif = kzalloc(sizeof(*atif), GFP_KERNEL);
+ if (!atif) {
+ DRM_WARN("Not enough memory to initialize ATIF\n");
+ goto out;
+ }
++ atif->handle = atif_handle;
+
+- ret = amdgpu_atif_verify_interface(handle, atif);
++ /* Call the ATIF method */
++ ret = amdgpu_atif_verify_interface(atif);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
+ kfree(atif);
+@@ -739,8 +777,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
+ }
+
+ if (atif->functions.system_params) {
+- ret = amdgpu_atif_get_notification_params(handle,
+- &atif->notification_cfg);
++ ret = amdgpu_atif_get_notification_params(atif);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n",
+ ret);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4674-drm-amdgpu-Grab-put-runtime-PM-references-in-atomic_.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4674-drm-amdgpu-Grab-put-runtime-PM-references-in-atomic_.patch
new file mode 100644
index 00000000..0b5fc1d5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4674-drm-amdgpu-Grab-put-runtime-PM-references-in-atomic_.patch
@@ -0,0 +1,70 @@
+From 1f84d22a2744def4a3fea7b227b566c00c004971 Mon Sep 17 00:00:00 2001
+From: Lyude Paul <lyude@redhat.com>
+Date: Mon, 4 Jun 2018 15:35:03 -0400
+Subject: [PATCH 4674/5725] drm/amdgpu: Grab/put runtime PM references in
+ atomic_commit_tail()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+So, unfortunately I recently made the discovery that in the upstream
+kernel, the only reason that amdgpu is not currently suffering from
+issues with runtime PM putting the GPU into suspend while it's driving
+displays is due to the fact that on most prime systems, we have sound
+devices associated with the GPU that hold their own runtime PM ref for
+the GPU.
+
+What this means however, is that in the event that there isn't any kind
+of sound device active (which can easily be reproduced by building a
+kernel with sound drivers disabled), the GPU will fall asleep even when
+there's displays active. This appears to be in part due to the fact that
+amdgpu has not actually ever relied on it's rpm_idle() function to be
+the only thing keeping it running, and normally grabs it's own power
+references whenever there are displays active (as can be seen with the
+original pre-DC codepath in amdgpu_display_crtc_set_config() in
+amdgpu_display.c). This means it's very likely that this bug was
+introduced during the switch over the DC.
+
+So to fix this, we start grabbing runtime PM references every time we
+enable a previously disabled CRTC in atomic_commit_tail(). This appears
+to be the correct solution, as it matches up with what i915 does in
+i915/intel_runtime_pm.c.
+
+The one sideaffect of this is that we ignore the variable that the
+pre-DC code used to use for tracking when it needed runtime PM refs,
+adev->have_disp_power_ref. This is mainly because there's no way for a
+driver to tell whether or not all of it's CRTCs are enabled or disabled
+when we've begun committing an atomic state, as there may be CRTC
+commits happening in parallel that aren't contained within the atomic
+state being committed. So, it's safer to just get/put a reference for
+each CRTC being enabled or disabled in the new atomic state.
+
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Acked-by: Christian König <christian.koenig@amd.com>.
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+
+Change-Id: I726d59525f00acfd1b13501271f0feb23c0d1af5
+Signed-off-by: Jim Qu <Jim.Qu@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 1451026..95ab236 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4605,6 +4605,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+ * displays anymore
+ */
+ pm_runtime_mark_last_busy(dev->dev);
++
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (old_crtc_state->active && !new_crtc_state->active)
+ pm_runtime_put_autosuspend(dev->dev);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4675-drm-amdgpu-Count-disabled-CRTCs-in-commit-tail-earli.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4675-drm-amdgpu-Count-disabled-CRTCs-in-commit-tail-earli.patch
new file mode 100644
index 00000000..57f6185b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4675-drm-amdgpu-Count-disabled-CRTCs-in-commit-tail-earli.patch
@@ -0,0 +1,196 @@
+From e41ea6dc264ca01bf5693c43155e2908f5736fd3 Mon Sep 17 00:00:00 2001
+From: Lyude Paul <lyude@redhat.com>
+Date: Thu, 21 Jun 2018 16:48:26 -0400
+Subject: [PATCH 4675/5725] drm/amdgpu: Count disabled CRTCs in commit tail
+ earlier
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This fixes a regression I accidentally reduced that was picked up by
+kasan, where we were checking the CRTC atomic states after DRM's helpers
+had already freed them. Example:
+
+==================================================================
+BUG: KASAN: use-after-free in amdgpu_dm_atomic_commit_tail.cold.50+0x13d/0x15a [amdgpu]
+Read of size 1 at addr ffff8803a697b071 by task kworker/u16:0/7
+
+CPU: 7 PID: 7 Comm: kworker/u16:0 Tainted: G O 4.18.0-rc1Lyude-Upstream+ #1
+Hardware name: HP HP ZBook 15 G4/8275, BIOS P70 Ver. 01.21 05/02/2018
+Workqueue: events_unbound commit_work [drm_kms_helper]
+Call Trace:
+ dump_stack+0xc1/0x169
+ ? dump_stack_print_info.cold.1+0x42/0x42
+ ? kmsg_dump_rewind_nolock+0xd9/0xd9
+ ? printk+0x9f/0xc5
+ ? amdgpu_dm_atomic_commit_tail.cold.50+0x13d/0x15a [amdgpu]
+ print_address_description+0x6c/0x23c
+ ? amdgpu_dm_atomic_commit_tail.cold.50+0x13d/0x15a [amdgpu]
+ kasan_report.cold.6+0x241/0x2fd
+ amdgpu_dm_atomic_commit_tail.cold.50+0x13d/0x15a [amdgpu]
+ ? commit_planes_to_stream.constprop.45+0x13b0/0x13b0 [amdgpu]
+ ? cpu_load_update_active+0x290/0x290
+ ? finish_task_switch+0x2bd/0x840
+ ? __switch_to_asm+0x34/0x70
+ ? read_word_at_a_time+0xe/0x20
+ ? strscpy+0x14b/0x460
+ ? drm_atomic_helper_wait_for_dependencies+0x47d/0x7e0 [drm_kms_helper]
+ commit_tail+0x96/0xe0 [drm_kms_helper]
+ process_one_work+0x88a/0x1360
+ ? create_worker+0x540/0x540
+ ? __sched_text_start+0x8/0x8
+ ? move_queued_task+0x760/0x760
+ ? call_rcu_sched+0x20/0x20
+ ? vsnprintf+0xcda/0x1350
+ ? wait_woken+0x1c0/0x1c0
+ ? mutex_unlock+0x1d/0x40
+ ? init_timer_key+0x190/0x230
+ ? schedule+0xea/0x390--
+ ? __schedule+0x1ea0/0x1ea0
+ ? need_to_create_worker+0xe4/0x210
+ ? init_worker_pool+0x700/0x700
+ ? try_to_del_timer_sync+0xbf/0x110
+ ? del_timer+0x120/0x120
+ ? __mutex_lock_slowpath+0x10/0x10
+ worker_thread+0x196/0x11f0
+ ? flush_rcu_work+0x50/0x50
+ ? __switch_to_asm+0x34/0x70
+ ? __switch_to_asm+0x34/0x70
+ ? __switch_to_asm+0x40/0x70
+ ? __switch_to_asm+0x34/0x70
+ ? __switch_to_asm+0x40/0x70
+ ? __switch_to_asm+0x34/0x70
+ ? __switch_to_asm+0x40/0x70
+ ? __schedule+0x7d6/0x1ea0
+ ? migrate_swap_stop+0x850/0x880
+ ? __sched_text_start+0x8/0x8
+ ? save_stack+0x8c/0xb0
+ ? kasan_kmalloc+0xbf/0xe0
+ ? kmem_cache_alloc_trace+0xe4/0x190
+ ? kthread+0x98/0x390
+ ? ret_from_fork+0x35/0x40
+ ? ret_from_fork+0x35/0x40
+ ? deactivate_slab.isra.67+0x3c4/0x5c0
+ ? kthread+0x98/0x390
+ ? kthread+0x98/0x390
+ ? set_track+0x76/0x120
+ ? schedule+0xea/0x390
+ ? __schedule+0x1ea0/0x1ea0
+ ? wait_woken+0x1c0/0x1c0
+ ? kasan_unpoison_shadow+0x30/0x40
+ ? parse_args.cold.15+0x17a/0x17a
+ ? flush_rcu_work+0x50/0x50
+ kthread+0x2d4/0x390
+ ? kthread_create_worker_on_cpu+0xc0/0xc0
+ ret_from_fork+0x35/0x40
+
+Allocated by task 1124:
+ kasan_kmalloc+0xbf/0xe0
+ kmem_cache_alloc_trace+0xe4/0x190
+ dm_crtc_duplicate_state+0x78/0x130 [amdgpu]
+ drm_atomic_get_crtc_state+0x147/0x410 [drm]
+ page_flip_common+0x57/0x230 [drm_kms_helper]
+ drm_atomic_helper_page_flip+0xa6/0x110 [drm_kms_helper]
+ drm_mode_page_flip_ioctl+0xc4b/0x10a0 [drm]
+ drm_ioctl_kernel+0x1d4/0x260 [drm]
+ drm_ioctl+0x433/0x920 [drm]
+ amdgpu_drm_ioctl+0x11d/0x290 [amdgpu]
+ do_vfs_ioctl+0x1a1/0x13d0
+ ksys_ioctl+0x60/0x90
+ __x64_sys_ioctl+0x6f/0xb0
+ do_syscall_64+0x147/0x440
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Freed by task 1124:
+ __kasan_slab_free+0x12e/0x180
+ kfree+0x92/0x1a0
+ drm_atomic_state_default_clear+0x315/0xc40 [drm]
+ __drm_atomic_state_free+0x35/0xd0 [drm]
+ drm_atomic_helper_update_plane+0xac/0x350 [drm_kms_helper]
+ __setplane_internal+0x2d6/0x840 [drm]
+ drm_mode_cursor_universal+0x41e/0xbe0 [drm]
+ drm_mode_cursor_common+0x49f/0x880 [drm]
+ drm_mode_cursor_ioctl+0xd8/0x130 [drm]
+ drm_ioctl_kernel+0x1d4/0x260 [drm]
+ drm_ioctl+0x433/0x920 [drm]
+ amdgpu_drm_ioctl+0x11d/0x290 [amdgpu]
+ do_vfs_ioctl+0x1a1/0x13d0
+ ksys_ioctl+0x60/0x90
+ __x64_sys_ioctl+0x6f/0xb0
+ do_syscall_64+0x147/0x440
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+The buggy address belongs to the object at ffff8803a697b068
+ which belongs to the cache kmalloc-1024 of size 1024
+The buggy address is located 9 bytes inside of
+ 1024-byte region [ffff8803a697b068, ffff8803a697b468)
+The buggy address belongs to the page:
+page:ffffea000e9a5e00 count:1 mapcount:0 mapping:ffff88041e00efc0 index:0x0 compound_mapcount: 0
+flags: 0x8000000000008100(slab|head)
+raw: 8000000000008100 ffffea000ecbc208 ffff88041e000c70 ffff88041e00efc0
+raw: 0000000000000000 0000000000170017 00000001ffffffff 0000000000000000
+page dumped because: kasan: bad access detected
+
+Memory state around the buggy address:
+ ffff8803a697af00: fb fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+ ffff8803a697af80: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+>ffff8803a697b000: fc fc fc fc fc fc fc fc fc fc fc fc fc fb fb fb
+ ^
+ ffff8803a697b080: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ ffff8803a697b100: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+==================================================================
+
+So, we fix this by counting the number of CRTCs this atomic commit disabled
+early on in the function before their atomic states have been freed, then use
+that count later to do the appropriate number of RPM puts at the end of the
+function.
+
+Change-Id: I20cc63a10d3374ad1ad1e065b854fff4716e6686
+Acked-by: Michel Dänzer <michel.daenzer@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Cc: stable@vger.kernel.org
+Fixes: 97028037a38ae ("drm/amdgpu: Grab/put runtime PM references in atomic_commit_tail()")
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Cc: Michel Dänzer <michel@daenzer.net>
+Reported-by: Michel Dänzer <michel@daenzer.net>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Jim Qu <Jim.Qu@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 95ab236..ef5a699 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4404,6 +4404,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
++ int crtc_disable_count = 0;
+
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
+
+@@ -4557,6 +4558,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ bool modeset_needed;
+
++ if (old_crtc_state->active && !new_crtc_state->active)
++ crtc_disable_count++;
++
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ modeset_needed = modeset_required(
+@@ -4604,6 +4608,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+ * so we can put the GPU into runtime suspend if we're not driving any
+ * displays anymore
+ */
++ for (i = 0; i < crtc_disable_count; i++)
++ pm_runtime_put_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->dev);
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4676-drm-amdgpu-delete-duplicated-code-about-runtime-PM-r.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4676-drm-amdgpu-delete-duplicated-code-about-runtime-PM-r.patch
new file mode 100644
index 00000000..6fcd7634
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4676-drm-amdgpu-delete-duplicated-code-about-runtime-PM-r.patch
@@ -0,0 +1,37 @@
+From ca1ca1e0053c68573fd4a3056ad676f19e70b32d Mon Sep 17 00:00:00 2001
+From: Jim Qu <Jim.Qu@amd.com>
+Date: Tue, 14 Aug 2018 13:56:24 +0800
+Subject: [PATCH 4676/5725] drm/amdgpu: delete duplicated code about runtime PM
+ reference
+
+Otherwise, it will unbalance the runtime PM ref, causes GPU suspend
+under some cases.
+
+Change-Id: Icaf240d84b71315547cd149d0b73cfb7764488e4
+
+Reviewed-by: Kevin Wang <Kevin1,Wang@amd.com>
+Signed-off-by: Jim Qu <Jim.Qu@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 5 -----
+ 1 file changed, 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index ef5a699..c7b5232 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4611,11 +4611,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+ for (i = 0; i < crtc_disable_count; i++)
+ pm_runtime_put_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->dev);
+-
+- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+- if (old_crtc_state->active && !new_crtc_state->active)
+- pm_runtime_put_autosuspend(dev->dev);
+- }
+ }
+
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4677-drm-amd-display-Fix-warning-observed-in-mode-change-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4677-drm-amd-display-Fix-warning-observed-in-mode-change-.patch
new file mode 100644
index 00000000..69e9b55b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4677-drm-amd-display-Fix-warning-observed-in-mode-change-.patch
@@ -0,0 +1,38 @@
+From e6635fc09721783e5deb63d450cd89005596f204 Mon Sep 17 00:00:00 2001
+From: "Jerry (Fangzhi) Zuo" <Jerry.Zuo@amd.com>
+Date: Tue, 31 Jul 2018 15:15:38 -0400
+Subject: [PATCH 4677/5725] drm/amd/display: Fix warning observed in mode
+ change on Vega
+
+[Why]
+DOUBLE_BUFFER_EN bit is getting cleared before enable blanking.
+That leads to CRTC_BLANK_DATA_EN is getting updated immediately.
+
+[How]
+Get DOUBLE_BUFFER_EN bit set, the same as DCE110.
+
+Change-Id: Ib88dc8b81b4e5dadca1fa086d2b509e491846396
+Signed-off-by: Jerry (Fangzhi) Zuo <Jerry.Zuo@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Acked-by: Junwei Zhang <Jerry.Zhang@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+index 2ea490f..04b866f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
++++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+@@ -772,7 +772,7 @@ void dce120_tg_set_blank(struct timing_generator *tg,
+
+ CRTC_REG_SET(
+ CRTC0_CRTC_DOUBLE_BUFFER_CONTROL,
+- CRTC_BLANK_DATA_DOUBLE_BUFFER_EN, 0);
++ CRTC_BLANK_DATA_DOUBLE_BUFFER_EN, 1);
+
+ if (enable_blanking)
+ CRTC_REG_SET(CRTC0_CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4678-drm-amd-display-Fix-Edid-emulation-for-linux.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4678-drm-amd-display-Fix-Edid-emulation-for-linux.patch
new file mode 100644
index 00000000..db5e8287
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4678-drm-amd-display-Fix-Edid-emulation-for-linux.patch
@@ -0,0 +1,274 @@
+From 044c4f679ede41a14cb57629d58547f5a8f6bc08 Mon Sep 17 00:00:00 2001
+From: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Date: Tue, 11 Sep 2018 15:57:28 -0400
+Subject: [PATCH 4678/5725] drm/amd/display: Fix Edid emulation for linux
+
+[Why] EDID emulation didn't work properly for linux, as we stop
+programming if nothing is connected physically.
+
+[How] We get a flag from DRM when we want to do edid emulation. We
+check if this flag is true and nothing is connected physically, if
+so we only program the front end using VIRTUAL_SIGNAL.
+
+Change-Id: I6c928dfaf1cd6fdd6b039b268e5afe091d63bb20
+Signed-off-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 139 +++++++++++++++++++++-
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 4 +-
+ drivers/gpu/drm/amd/display/dc/dc_link.h | 2 +
+ 3 files changed, 138 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index c7b5232..913e6c1 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -638,6 +638,87 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
+ return NULL;
+ }
+
++static void emulated_link_detect(struct dc_link *link)
++{
++ struct dc_sink_init_data sink_init_data = { 0 };
++ struct display_sink_capability sink_caps = { 0 };
++ enum dc_edid_status edid_status;
++ struct dc_context *dc_ctx = link->ctx;
++ struct dc_sink *sink = NULL;
++ struct dc_sink *prev_sink = NULL;
++
++ link->type = dc_connection_none;
++ prev_sink = link->local_sink;
++
++ if (prev_sink != NULL)
++ dc_sink_retain(prev_sink);
++
++ switch (link->connector_signal) {
++ case SIGNAL_TYPE_HDMI_TYPE_A: {
++ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
++ sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
++ break;
++ }
++
++ case SIGNAL_TYPE_DVI_SINGLE_LINK: {
++ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
++ sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
++ break;
++ }
++
++ case SIGNAL_TYPE_DVI_DUAL_LINK: {
++ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
++ sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
++ break;
++ }
++
++ case SIGNAL_TYPE_LVDS: {
++ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
++ sink_caps.signal = SIGNAL_TYPE_LVDS;
++ break;
++ }
++
++ case SIGNAL_TYPE_EDP: {
++ sink_caps.transaction_type =
++ DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
++ sink_caps.signal = SIGNAL_TYPE_EDP;
++ break;
++ }
++
++ case SIGNAL_TYPE_DISPLAY_PORT: {
++ sink_caps.transaction_type =
++ DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
++ sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
++ break;
++ }
++
++ default:
++ DC_ERROR("Invalid connector type! signal:%d\n",
++ link->connector_signal);
++ return;
++ }
++
++ sink_init_data.link = link;
++ sink_init_data.sink_signal = sink_caps.signal;
++
++ sink = dc_sink_create(&sink_init_data);
++ if (!sink) {
++ DC_ERROR("Failed to create sink!\n");
++ return;
++ }
++
++ link->local_sink = sink;
++
++ edid_status = dm_helpers_read_local_edid(
++ link->ctx,
++ link,
++ sink);
++
++ if (edid_status != EDID_OK)
++ DC_ERROR("Failed to read EDID");
++
++}
++
+ static int dm_resume(void *handle)
+ {
+ struct amdgpu_device *adev = handle;
+@@ -651,6 +732,7 @@ static int dm_resume(void *handle)
+ struct drm_plane *plane;
+ struct drm_plane_state *new_plane_state;
+ struct dm_plane_state *dm_new_plane_state;
++ enum dc_connection_type new_connection_type = dc_connection_none;
+ int ret;
+ int i;
+
+@@ -681,7 +763,13 @@ static int dm_resume(void *handle)
+ continue;
+
+ mutex_lock(&aconnector->hpd_lock);
+- dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
++ if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
++ DRM_ERROR("KMS: Failed to detect connector\n");
++
++ if (aconnector->base.force && new_connection_type == dc_connection_none)
++ emulated_link_detect(aconnector->dc_link);
++ else
++ dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+
+ if (aconnector->fake_enable && aconnector->dc_link->local_sink)
+ aconnector->fake_enable = false;
+@@ -917,6 +1005,7 @@ static void handle_hpd_irq(void *param)
+ struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
++ enum dc_connection_type new_connection_type = dc_connection_none;
+
+ /* In case of failure or MST no need to update connector status or notify the OS
+ * since (for MST case) MST does this in it's own context.
+@@ -926,7 +1015,21 @@ static void handle_hpd_irq(void *param)
+ if (aconnector->fake_enable)
+ aconnector->fake_enable = false;
+
+- if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
++ if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
++ DRM_ERROR("KMS: Failed to detect connector\n");
++
++ if (aconnector->base.force && new_connection_type == dc_connection_none) {
++ emulated_link_detect(aconnector->dc_link);
++
++
++ drm_modeset_lock_all(dev);
++ dm_restore_drm_connector_state(dev, connector);
++ drm_modeset_unlock_all(dev);
++
++ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
++ drm_kms_helper_hotplug_event(dev);
++
++ } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+
+@@ -1026,6 +1129,7 @@ static void handle_hpd_rx_irq(void *param)
+ struct drm_device *dev = connector->dev;
+ struct dc_link *dc_link = aconnector->dc_link;
+ bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
++ enum dc_connection_type new_connection_type = dc_connection_none;
+
+ /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
+ * conflict, after implement i2c helper, this mutex should be
+@@ -1037,7 +1141,24 @@ static void handle_hpd_rx_irq(void *param)
+ if (dc_link_handle_hpd_rx_irq(dc_link, NULL) &&
+ !is_mst_root_connector) {
+ /* Downstream Port status changed. */
+- if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
++ if (!dc_link_detect_sink(dc_link, &new_connection_type))
++ DRM_ERROR("KMS: Failed to detect connector\n");
++
++ if (aconnector->base.force && new_connection_type == dc_connection_none) {
++ emulated_link_detect(dc_link);
++
++ if (aconnector->fake_enable)
++ aconnector->fake_enable = false;
++
++ amdgpu_dm_update_connector_after_detect(aconnector);
++
++
++ drm_modeset_lock_all(dev);
++ dm_restore_drm_connector_state(dev, connector);
++ drm_modeset_unlock_all(dev);
++
++ drm_kms_helper_hotplug_event(dev);
++ } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
+
+ if (aconnector->fake_enable)
+ aconnector->fake_enable = false;
+@@ -1422,6 +1543,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ uint32_t link_cnt;
+ int32_t total_overlay_planes, total_primary_planes;
++ enum dc_connection_type new_connection_type = dc_connection_none;
+
+ link_cnt = dm->dc->caps.max_links;
+ if (amdgpu_dm_mode_config_init(dm->adev)) {
+@@ -1488,7 +1610,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+
+ link = dc_get_link_at_index(dm->dc, i);
+
+- if (dc_link_detect(link, DETECT_REASON_BOOT)) {
++ if (!dc_link_detect_sink(link, &new_connection_type))
++ DRM_ERROR("KMS: Failed to detect connector\n");
++
++ if (aconnector->base.force && new_connection_type == dc_connection_none) {
++ emulated_link_detect(link);
++ amdgpu_dm_update_connector_after_detect(aconnector);
++
++ } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ register_backlight_device(dm, link);
+ }
+@@ -2631,7 +2760,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ if (dm_state && dm_state->freesync_capable)
+ stream->ignore_msa_timing_param = true;
+ finish:
+- if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL)
++ if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
+ dc_sink_release(sink);
+
+ return stream;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index a81ae8d..53f9db9 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -187,7 +187,7 @@ static bool program_hpd_filter(
+ return result;
+ }
+
+-static bool detect_sink(struct dc_link *link, enum dc_connection_type *type)
++bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
+ {
+ uint32_t is_hpd_high = 0;
+ struct gpio *hpd_pin;
+@@ -588,7 +588,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+ if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
+ return false;
+
+- if (false == detect_sink(link, &new_connection_type)) {
++ if (false == dc_link_detect_sink(link, &new_connection_type)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
+index 8a716baa..9404c6e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
+@@ -210,6 +210,8 @@ bool dc_link_dp_set_test_pattern(
+
+ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
+
++bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type);
++
+ /*
+ * DPCD access interfaces
+ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4679-drm-amd-display-fix-invalid-function-table-override.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4679-drm-amd-display-fix-invalid-function-table-override.patch
new file mode 100644
index 00000000..5aa95528
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4679-drm-amd-display-fix-invalid-function-table-override.patch
@@ -0,0 +1,73 @@
+From f5adcecec19eb742b92c3a24dac3d2672024b9fe Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 6 Jul 2018 13:46:05 +0200
+Subject: [PATCH 4679/5725] drm/amd/display: fix invalid function table
+ override
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Otherwise we try to program hardware with the wrong watermark functions
+when multiple DCE generations are installed in one system.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c | 27 ++++++++++++++++++++--
+ 1 file changed, 25 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+index b235a75..bae7523 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+@@ -741,6 +741,29 @@ static struct mem_input_funcs dce_mi_funcs = {
+ .mem_input_is_flip_pending = dce_mi_is_flip_pending
+ };
+
++static struct mem_input_funcs dce112_mi_funcs = {
++ .mem_input_program_display_marks = dce112_mi_program_display_marks,
++ .allocate_mem_input = dce_mi_allocate_dmif,
++ .free_mem_input = dce_mi_free_dmif,
++ .mem_input_program_surface_flip_and_addr =
++ dce_mi_program_surface_flip_and_addr,
++ .mem_input_program_pte_vm = dce_mi_program_pte_vm,
++ .mem_input_program_surface_config =
++ dce_mi_program_surface_config,
++ .mem_input_is_flip_pending = dce_mi_is_flip_pending
++};
++
++static struct mem_input_funcs dce120_mi_funcs = {
++ .mem_input_program_display_marks = dce120_mi_program_display_marks,
++ .allocate_mem_input = dce_mi_allocate_dmif,
++ .free_mem_input = dce_mi_free_dmif,
++ .mem_input_program_surface_flip_and_addr =
++ dce_mi_program_surface_flip_and_addr,
++ .mem_input_program_pte_vm = dce_mi_program_pte_vm,
++ .mem_input_program_surface_config =
++ dce_mi_program_surface_config,
++ .mem_input_is_flip_pending = dce_mi_is_flip_pending
++};
+
+ void dce_mem_input_construct(
+ struct dce_mem_input *dce_mi,
+@@ -769,7 +792,7 @@ void dce112_mem_input_construct(
+ const struct dce_mem_input_mask *mi_mask)
+ {
+ dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
+- dce_mi->base.funcs->mem_input_program_display_marks = dce112_mi_program_display_marks;
++ dce_mi->base.funcs = &dce112_mi_funcs;
+ }
+
+ void dce120_mem_input_construct(
+@@ -781,5 +804,5 @@ void dce120_mem_input_construct(
+ const struct dce_mem_input_mask *mi_mask)
+ {
+ dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
+- dce_mi->base.funcs->mem_input_program_display_marks = dce120_mi_program_display_marks;
++ dce_mi->base.funcs = &dce120_mi_funcs;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4680-drm-amd-display-make-function-tables-const.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4680-drm-amd-display-make-function-tables-const.patch
new file mode 100644
index 00000000..7e588589
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4680-drm-amd-display-make-function-tables-const.patch
@@ -0,0 +1,80 @@
+From a6434605fc96c514bb77198ad487c2ee49360a63 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 6 Jul 2018 14:19:07 +0200
+Subject: [PATCH 4680/5725] drm/amd/display: make function tables const
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+It is good practice to make global function tables const to avoid
+accidental override.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c | 6 +++---
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h | 2 +-
+ 3 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+index bae7523..85686d9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+@@ -729,7 +729,7 @@ static bool dce_mi_program_surface_flip_and_addr(
+ return true;
+ }
+
+-static struct mem_input_funcs dce_mi_funcs = {
++static const struct mem_input_funcs dce_mi_funcs = {
+ .mem_input_program_display_marks = dce_mi_program_display_marks,
+ .allocate_mem_input = dce_mi_allocate_dmif,
+ .free_mem_input = dce_mi_free_dmif,
+@@ -741,7 +741,7 @@ static struct mem_input_funcs dce_mi_funcs = {
+ .mem_input_is_flip_pending = dce_mi_is_flip_pending
+ };
+
+-static struct mem_input_funcs dce112_mi_funcs = {
++static const struct mem_input_funcs dce112_mi_funcs = {
+ .mem_input_program_display_marks = dce112_mi_program_display_marks,
+ .allocate_mem_input = dce_mi_allocate_dmif,
+ .free_mem_input = dce_mi_free_dmif,
+@@ -753,7 +753,7 @@ static struct mem_input_funcs dce112_mi_funcs = {
+ .mem_input_is_flip_pending = dce_mi_is_flip_pending
+ };
+
+-static struct mem_input_funcs dce120_mi_funcs = {
++static const struct mem_input_funcs dce120_mi_funcs = {
+ .mem_input_program_display_marks = dce120_mi_program_display_marks,
+ .allocate_mem_input = dce_mi_allocate_dmif,
+ .free_mem_input = dce_mi_free_dmif,
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
+index 0564c8e..9b9fc3d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
+@@ -1011,7 +1011,7 @@ void dce110_free_mem_input_v(
+ {
+ }
+
+-static struct mem_input_funcs dce110_mem_input_v_funcs = {
++static const struct mem_input_funcs dce110_mem_input_v_funcs = {
+ .mem_input_program_display_marks =
+ dce_mem_input_v_program_display_marks,
+ .mem_input_program_chroma_display_marks =
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+index 47f1dc5..da89c2e 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+@@ -64,7 +64,7 @@ struct stutter_modes {
+ };
+
+ struct mem_input {
+- struct mem_input_funcs *funcs;
++ const struct mem_input_funcs *funcs;
+ struct dc_context *ctx;
+ struct dc_plane_address request_address;
+ struct dc_plane_address current_address;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4681-drm-amd-amdgpu-Removing-unwanted-code-from-the-below.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4681-drm-amd-amdgpu-Removing-unwanted-code-from-the-below.patch
new file mode 100644
index 00000000..30e24455
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4681-drm-amd-amdgpu-Removing-unwanted-code-from-the-below.patch
@@ -0,0 +1,191 @@
+From 40627be2ec8fbe7f0e2d34494c840a669fdca869 Mon Sep 17 00:00:00 2001
+From: Kalyan Alle <kalyan.alle@amd.com>
+Date: Wed, 26 Sep 2018 18:27:52 +0530
+Subject: [PATCH 4681/5725] drm/amd/amdgpu: Removing unwanted code from the
+ below files
+
+Removing the unwanted code :
+1. Some of the structure members removed are not used in the code
+2. amdgpu_gem_prime_foreign_bo is not used anywhere in the amdgpu driver code
+3. amdgpu_ttm_bind , not used in the code.
+
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 --
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 1 -
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 3 ---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 40 ------------------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 1 -
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 1 -
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 8 ------
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 3 +--
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 2 +-
+ 9 files changed, 2 insertions(+), 59 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index ad566b7..c55e675 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1675,8 +1675,6 @@ struct amdgpu_device {
+
+ u8 reset_magic[AMDGPU_RESET_MAGIC_NUM];
+
+- spinlock_t tlb_invalidation_lock;
+-
+ /* record last mm index being written through WREG32*/
+ unsigned long last_mm_index;
+ bool in_gpu_reset;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+index d89d43e..a1d52c4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+@@ -56,7 +56,6 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
+ struct reservation_object *resv,
+ struct drm_gem_object **obj)
+ {
+- struct amdgpu_bo *robj;
+ struct amdgpu_bo *bo;
+ unsigned long max_size;
+ struct amdgpu_bo_param bp;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+index 40e7b42..8505ad3 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+@@ -88,9 +88,6 @@ struct amdgpu_bo {
+ void *metadata;
+ u32 metadata_size;
+ unsigned prime_shared_count;
+- /* GEM objects refereing to this BO */
+- struct list_head gem_objects;
+-
+ /* list of all virtual address to which this bo is associated to */
+ struct list_head va;
+ /* Constant after initialization */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+index 5472366..37bd4f3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+@@ -420,43 +420,3 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
+ }
+ #endif
+
+-struct drm_gem_object *
+-amdgpu_gem_prime_foreign_bo(struct amdgpu_device *adev, struct amdgpu_bo *bo)
+-{
+- struct amdgpu_gem_object *gobj;
+- int r;
+-
+- ww_mutex_lock(&bo->tbo.resv->lock, NULL);
+-
+- list_for_each_entry(gobj, &bo->gem_objects, list) {
+- if (gobj->base.dev != adev->ddev)
+- continue;
+-
+- ww_mutex_unlock(&bo->tbo.resv->lock);
+- drm_gem_object_reference(&gobj->base);
+- return &gobj->base;
+- }
+-
+-
+- gobj = kzalloc(sizeof(struct amdgpu_gem_object), GFP_KERNEL);
+- if (unlikely(!gobj)) {
+- ww_mutex_unlock(&bo->tbo.resv->lock);
+- return ERR_PTR(-ENOMEM);
+- }
+-
+- r = drm_gem_object_init(adev->ddev, &gobj->base, amdgpu_bo_size(bo));
+- if (unlikely(r)) {
+- kfree(gobj);
+- ww_mutex_unlock(&bo->tbo.resv->lock);
+- return ERR_PTR(r);
+- }
+-
+- list_add(&gobj->list, &bo->gem_objects);
+- gobj->bo = amdgpu_bo_ref(bo);
+- bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+-
+- ww_mutex_unlock(&bo->tbo.resv->lock);
+-
+- return &gobj->base;
+-}
+-
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 905f13b..e573325 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -838,7 +838,6 @@ struct amdgpu_ttm_gup_task_list {
+
+ struct amdgpu_ttm_tt {
+ struct ttm_dma_tt ttm;
+- struct amdgpu_device *adev;
+ u64 offset;
+ uint64_t userptr;
+ struct task_struct *usertask;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+index 265c3ed..aaf490e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+@@ -103,7 +103,6 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
+ struct dma_fence **fence);
+
+ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
+-int amdgpu_ttm_bind(struct ttm_buffer_object *bo);
+ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
+ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 57dd137..d9e3d48 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2419,14 +2419,6 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
+ continue;
+ }
+
+- /* Don't add page tables to the moved state */
+- if (bo->tbo.type == ttm_bo_type_kernel) {
+- if (list_empty(&bo_base->vm_status))
+- list_add(&bo_base->vm_status, &vm->relocated);
+-
+- continue;
+- }
+-
+ if (was_moved)
+ continue;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 7b63823..77350cd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -1746,7 +1746,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
+ DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+ goto fail;
+ }
+-#if 0
++
+ tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, DED_MODE, 2);
+ tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, PROP_FED, 1);
+ WREG32(mmGB_EDC_MODE, tmp);
+@@ -1754,7 +1754,6 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
+ tmp = RREG32(mmCC_GC_EDC_CONFIG);
+ tmp = REG_SET_FIELD(tmp, CC_GC_EDC_CONFIG, DIS_EDC, 0) | 1;
+ WREG32(mmCC_GC_EDC_CONFIG, tmp);
+-#endif
+
+ /* read back registers to clear the counters */
+ for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 89176ec..56c1230 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1870,7 +1870,7 @@ static int kfd_create_cma_system_bo(struct kfd_dev *kdev, struct kfd_bo *bo,
+
+ INIT_LIST_HEAD(&cbo->list);
+ if (bo->mem_type == KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
+- bo_size = min(*size, MAX_SYSTEM_BO_SIZE);
++ bo_size = min_t(uint64_t, *size, MAX_SYSTEM_BO_SIZE);
+ else if (bo->cpuva) {
+ ret = kfd_create_sg_table_from_userptr_bo(bo, offset,
+ cma_write, mm, task,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4682-drm-amdgpu-Fix-vce-work-queue-was-not-cancelled-when.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4682-drm-amdgpu-Fix-vce-work-queue-was-not-cancelled-when.patch
new file mode 100644
index 00000000..969509ab
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4682-drm-amdgpu-Fix-vce-work-queue-was-not-cancelled-when.patch
@@ -0,0 +1,69 @@
+From 129370d4928b5c85cbdece39b654828686d705ca Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Thu, 27 Sep 2018 20:20:57 +0800
+Subject: [PATCH 4682/5725] drm/amdgpu: Fix vce work queue was not cancelled
+ when suspend
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The vce cancel_delayed_work_sync never be called.
+driver call the function in error path.
+
+This caused s3 hang on A+A when runtime pm enebled.
+As the smu will be visited in the idle queue, this will cause
+smu hang because the dgpu may be in suspend, and the dgpu
+will be waked up. As the smu has been hang, so the dgpu resume
+will failed.
+
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Feifei Xu <Feifei.Xu@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Cherry-picked-by: Jack Gui <Jack.Gui@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 3 ++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 4 ++--
+ 2 files changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+index 23d960e..acad299 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+@@ -246,6 +246,8 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
+ {
+ int i;
+
++ cancel_delayed_work_sync(&adev->vce.idle_work);
++
+ if (adev->vce.vcpu_bo == NULL)
+ return 0;
+
+@@ -256,7 +258,6 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
+ if (i == AMDGPU_MAX_VCE_HANDLES)
+ return 0;
+
+- cancel_delayed_work_sync(&adev->vce.idle_work);
+ /* TODO: suspending running encoding sessions isn't supported */
+ return -EINVAL;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 7ea85c9..e836d7c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -132,11 +132,11 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
+ unsigned size;
+ void *ptr;
+
++ cancel_delayed_work_sync(&adev->vcn.idle_work);
++
+ if (adev->vcn.vcpu_bo == NULL)
+ return 0;
+
+- cancel_delayed_work_sync(&adev->vcn.idle_work);
+-
+ size = amdgpu_bo_size(adev->vcn.vcpu_bo);
+ ptr = adev->vcn.cpu_addr;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4683-drm-amd-display-skip-multisync-for-slave-displays-ha.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4683-drm-amd-display-skip-multisync-for-slave-displays-ha.patch
new file mode 100644
index 00000000..989fdf4e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4683-drm-amd-display-skip-multisync-for-slave-displays-ha.patch
@@ -0,0 +1,39 @@
+From 2a748120e151e9e51f60f26f1a2e2c7d400f0f60 Mon Sep 17 00:00:00 2001
+From: Yogesh Mohan Marimuthu <yogesh.mohanmarimuthu@amd.com>
+Date: Wed, 26 Sep 2018 10:41:32 +0530
+Subject: [PATCH 4683/5725] drm/amd/display: skip multisync for slave displays
+ having same timing as master
+
+[Why]
+In case if other displays has same timing as multisync master display, then
+no need to force vsync on those slave displays continously instead it can be
+force synced only once in program_timing_sync() function.
+
+[How]
+In enable_timing_multisync() function, skip displays which has same timing
+as master display.
+
+Signed-off-by: Yogesh Mohan Marimuthu <yogesh.mohanmarimuthu@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index a6a09b0..efddbf3 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -671,6 +671,11 @@ static void enable_timing_multisync(
+ continue;
+ if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
+ continue;
++ if (resource_are_streams_timing_synchronizable(
++ ctx->res_ctx.pipe_ctx[i].stream,
++ ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source))
++ continue;
++
+ multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
+ multisync_count++;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4684-drm-amd-display-multisync-should-be-enabled-only-for.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4684-drm-amd-display-multisync-should-be-enabled-only-for.patch
new file mode 100644
index 00000000..2dd20dbf
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4684-drm-amd-display-multisync-should-be-enabled-only-for.patch
@@ -0,0 +1,29 @@
+From f21e3679f809c34f710ef382979ccb4a25f668d3 Mon Sep 17 00:00:00 2001
+From: Yogesh Mohan Marimuthu <yogesh.mohanmarimuthu@amd.com>
+Date: Wed, 26 Sep 2018 14:13:11 +0530
+Subject: [PATCH 4684/5725] drm/amd/display: multisync should be enabled only
+ for bottom pipes
+
+multisync should be enabled only for bottom pipes
+
+Signed-off-by: Yogesh Mohan Marimuthu <yogesh.mohanmarimuthu@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index efddbf3..1f8f6446 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -666,7 +666,7 @@ static void enable_timing_multisync(
+ struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
+
+ for (i = 0; i < pipe_count; i++) {
+- if (!ctx->res_ctx.pipe_ctx[i].stream ||
++ if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe ||
+ !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
+ continue;
+ if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4685-drm-amd-display-skip-multisync-redo-for-already-enab.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4685-drm-amd-display-skip-multisync-redo-for-already-enab.patch
new file mode 100644
index 00000000..c2cb6868
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4685-drm-amd-display-skip-multisync-redo-for-already-enab.patch
@@ -0,0 +1,105 @@
+From 311c7aebbfb9fd57240286b9e6f9b16f62c1a77f Mon Sep 17 00:00:00 2001
+From: Yogesh Mohan Marimuthu <yogesh.mohanmarimuthu@amd.com>
+Date: Wed, 26 Sep 2018 14:41:00 +0530
+Subject: [PATCH 4685/5725] drm/amd/display: skip multisync redo for already
+ enabled displays
+
+[Why]
+During boot/hotplug Xorg calls modeset ioctl for the new displays being enabled
+ony by one. As of now even for already enabled displays multi sync is getting
+re-enabled. This is not required.
+
+[how]
+In enable_timing_multisync() function skip displays if it is already enabled.
+Also if there is a new master stream enumerated, then in
+enable_timing_multisync() function, redo multisync for already
+enabled displays.
+
+Signed-off-by: Yogesh Mohan Marimuthu <yogesh.mohanmarimuthu@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 18 +++++++++++++-----
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 4 ++++
+ drivers/gpu/drm/amd/display/dc/inc/core_types.h | 1 +
+ 3 files changed, 18 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 913e6c1..3448aa0 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2570,10 +2570,12 @@ static void set_multisync_trigger_params(
+ }
+ }
+
+-static void set_master_stream(struct dc_stream_state *stream_set[],
++static void set_master_stream(struct dc_state *context,
++ struct dc_stream_state *stream_set[],
+ int stream_count)
+ {
+- int j, highest_rfr = 0, master_stream = 0;
++ int j, highest_rfr = 0;
++ struct dc_stream_state *new_master_stream = NULL, *old_master_stream = NULL;
+
+ for (j = 0; j < stream_count; j++) {
+ if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
+@@ -2583,14 +2585,20 @@ static void set_master_stream(struct dc_stream_state *stream_set[],
+ (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
+ if (refresh_rate > highest_rfr) {
+ highest_rfr = refresh_rate;
+- master_stream = j;
++ new_master_stream = stream_set[j];
+ }
++
++ if (stream_set[j]->triggered_crtc_reset.event_source)
++ old_master_stream = stream_set[j]->triggered_crtc_reset.event_source;
+ }
+ }
+ for (j = 0; j < stream_count; j++) {
+ if (stream_set[j])
+- stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
++ stream_set[j]->triggered_crtc_reset.event_source = new_master_stream;
+ }
++
++ if (old_master_stream != new_master_stream)
++ context->mdvsync_master_changed = true;
+ }
+
+ bool dm_helpers_parse_amd_vsdb(struct edid *edid)
+@@ -2672,7 +2680,7 @@ static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
+ */
+ set_multisync_trigger_params(context->streams[i]);
+ }
+- set_master_stream(context->streams, context->stream_count);
++ set_master_stream(context, context->streams, context->stream_count);
+ }
+
+ static struct dc_stream_state *
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 1f8f6446..7da1e88 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -675,6 +675,10 @@ static void enable_timing_multisync(
+ ctx->res_ctx.pipe_ctx[i].stream,
+ ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source))
+ continue;
++ if ((ctx->mdvsync_master_changed == false) &&
++ ctx->res_ctx.pipe_ctx[i].stream_res.tg->funcs->is_blanked &&
++ !ctx->res_ctx.pipe_ctx[i].stream_res.tg->funcs->is_blanked(ctx->res_ctx.pipe_ctx[i].stream_res.tg))
++ continue;
+
+ multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
+ multisync_count++;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+index 4beddca0..a4640e4a 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+@@ -270,6 +270,7 @@ struct dc_state {
+ struct dc_stream_state *streams[MAX_PIPES];
+ struct dc_stream_status stream_status[MAX_PIPES];
+ uint8_t stream_count;
++ uint8_t mdvsync_master_changed;
+
+ struct resource_context res_ctx;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4686-drm-amd-display-initialize-new_stream-status.primary.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4686-drm-amd-display-initialize-new_stream-status.primary.patch
new file mode 100644
index 00000000..5cc17f39
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4686-drm-amd-display-initialize-new_stream-status.primary.patch
@@ -0,0 +1,39 @@
+From d8f99be8e6e7b3676cfc70e338907c55cd7785a4 Mon Sep 17 00:00:00 2001
+From: Yogesh Mohan Marimuthu <yogesh.mohanmarimuthu@amd.com>
+Date: Wed, 26 Sep 2018 16:45:15 +0530
+Subject: [PATCH 4686/5725] drm/amd/display: initialize
+ new_stream->status.primary_otg_inst
+
+[Why]
+primary_otg_inst is used in enable_timing_multisync() function, but
+this variable is not initialized in the code leaving it to default
+value of 0. Due to this, In function enable_timing_multisync() master
+is programmed as the first display. But if first display is not master,
+then it results in incorrect hardware programming and the displays having
+incorrect hardware programming blanks.
+
+[how]
+In resour_map_pool_resources() function, also initialize
+stream->status.primary_otg_inst
+
+Signed-off-by: Yogesh Mohan Marimuthu <yogesh.mohanmarimuthu@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index b65aa6e..07e2c28 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -1927,6 +1927,8 @@ enum dc_status resource_map_pool_resources(
+ if (pipe_ctx->stream && dc_is_embedded_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.abm = pool->abm;
+
++ stream->status.primary_otg_inst = pipe_ctx->stream_res.tg->inst;
++
+ for (i = 0; i < context->stream_count; i++)
+ if (context->streams[i] == stream) {
+ context->stream_status[i].primary_otg_inst = pipe_ctx->stream_res.tg->inst;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4687-x86-MCE-AMD-mce-code-changes-to-fix-the-crash.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4687-x86-MCE-AMD-mce-code-changes-to-fix-the-crash.patch
new file mode 100644
index 00000000..32d5be4a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4687-x86-MCE-AMD-mce-code-changes-to-fix-the-crash.patch
@@ -0,0 +1,137 @@
+From 427a2867957665d24bd4216e35366c74cc057c82 Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Thu, 10 Jan 2019 14:57:12 +0530
+Subject: [PATCH 4687/5725] x86/MCE/AMD: mce code changes to fix the crash
+ which is
+
+occuring when doing suspend resume on Dibbler V1000.
+
+Up stream Patches commits from where the code is taken
+11cf887728a3d1de77cc12ce247b64ef32608891: x86/MCE/AMD: Read MCx_MISC block addresses on any CPU
+8a331f4a0863bea758561c921b94b4d28f7c4029 : x86/mce/AMD: Carve out SMCA get_block_address() code
+fbf96cf904dc154a28338fe68f72902e9af57afc : x86/MCE/AMD: Define a function to get SMCA bank type
+
+This patch fixes the below crash seen when user gives the command
+"systemctl suspend" for S3.
+
+ CPU: 0 PID: 1830 Comm: systemd-sleep Not tainted 4.14.14-1-amd-oct3-allpatches+ #3
+ Hardware name: AMD Dibbler/Dibbler, BIOS RDB1107CB 09/13/2018
+ task: ffff96d541174500 task.stack: ffffb8bd41df0000
+ RIP: 0010:smp_call_function_single+0xe2/0xf0
+ RSP: 0018:ffffb8bd41df3c40 EFLAGS: 00010046
+ RAX: 0000000000000000 RBX: ffffb8bd41df3cd4 RCX: 0000000000000001
+ RDX: ffffb8bd41df3c90 RSI: ffffffff98c3c2a0 RDI: 0000000000000000
+ RBP: ffffb8bd41df3c70 R08: 0000000000000001 R09: 0000000000010000
+ R10: ffffb8bd41df3c90 R11: ffffffff99c6ad20 R12: 0000000000000001
+ R13: 0000000000000000 R14: 0000000000000000 R15: ffffb8bd41df3d14
+ FS: 00007fc832f29940(0000) GS:ffff96d54a200000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 00005633e8039eb8 CR3: 00000001409ba000 CR4: 00000000003406f0
+ Call Trace:
+ ? rdmsr_safe_on_cpu+0x4b/0x70
+ rdmsr_safe_on_cpu+0x4b/0x70
+ get_block_address.isra.2+0x75/0xf0
+ mce_amd_feature_init+0x69/0x2b0
+ mce_syscore_resume+0x1e/0x30
+ syscore_resume+0x47/0x170
+ suspend_devices_and_enter+0x6fd/0x760
+ pm_suspend+0x30c/0x380
+ state_store+0x71/0xd0
+ kernfs_fop_write+0x10f/0x190
+ __vfs_write+0x26/0x150
+ ? common_file_perm+0x44/0x140
+ ? security_file_permission+0x3c/0xb0
+ vfs_write+0xb3/0x1a0
+ SyS_write+0x42/0x90
+ do_syscall_64+0x6e/0x120
+ entry_SYSCALL_64_after_hwframe+0x3d/0xa2
+ RIP: 0033:0x7fc832a4a154
+ RSP: 002b:00007ffe0a53c7e8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
+ RAX: ffffffffffffffda RBX: 0000000000000004 RCX: 00007fc832a4a154
+ RDX: 0000000000000004 RSI: 00005633e8038eb0 RDI: 0000000000000004
+ RBP: 00005633e8038eb0 R08: 00005633e8037370 R09: 00007fc832f29940
+ R10: 000000000000000a R11: 0000000000000246 R12: 00005633e8037290
+ R13: 0000000000000004 R14: 00007fc832d222a0 R15: 00007fc832d21760
+ Code: fe ff ff 8b 55 e8 83 e2 01 74 0a f3 90 8b 55 e8 83 e2 01 75 f6 48 83 c4 28 41 5a 5d 49 8d 62 f8 c3 8b 05 f2 76 9e 01 85 c0 75 85 <0f> 0b eb 81 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 f6 46
+ ---[ end trace 21ebe5b8c411c5d7 ]---
+
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ arch/x86/kernel/cpu/mcheck/mce_amd.c | 17 ++++++++---------
+ 1 file changed, 8 insertions(+), 9 deletions(-)
+ mode change 100644 => 100755 arch/x86/kernel/cpu/mcheck/mce_amd.c
+
+diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+old mode 100644
+new mode 100755
+index dbcb010..63250b3
+--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+@@ -434,8 +434,7 @@ static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
+ wrmsr(MSR_CU_DEF_ERR, low, high);
+ }
+
+-static u32 smca_get_block_address(unsigned int cpu, unsigned int bank,
+- unsigned int block)
++static u32 smca_get_block_address(unsigned int bank, unsigned int block)
+ {
+ u32 low, high;
+ u32 addr = 0;
+@@ -454,13 +453,13 @@ static u32 smca_get_block_address(unsigned int cpu, unsigned int bank,
+ * For SMCA enabled processors, BLKPTR field of the first MISC register
+ * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4).
+ */
+- if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
++ if (rdmsr_safe(MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
+ goto out;
+
+ if (!(low & MCI_CONFIG_MCAX))
+ goto out;
+
+- if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
++ if (!rdmsr_safe(MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
+ (low & MASK_BLKPTR_LO))
+ addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
+
+@@ -469,8 +468,8 @@ static u32 smca_get_block_address(unsigned int cpu, unsigned int bank,
+ return addr;
+ }
+
+-static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 high,
+- unsigned int bank, unsigned int block)
++static u32 get_block_address(u32 current_addr, u32 low, u32 high,
++ unsigned int bank, unsigned int block)
+ {
+ u32 addr = 0, offset = 0;
+
+@@ -478,7 +477,7 @@ static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 hi
+ return addr;
+
+ if (mce_flags.smca)
+- return smca_get_block_address(cpu, bank, block);
++ return smca_get_block_address(bank, block);
+
+ /* Fall back to method we used for older processors: */
+ switch (block) {
+@@ -556,7 +555,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
+ smca_configure(bank, cpu);
+
+ for (block = 0; block < NR_BLOCKS; ++block) {
+- address = get_block_address(cpu, address, low, high, bank, block);
++ address = get_block_address(address, low, high, bank, block);
+ if (!address)
+ break;
+
+@@ -1173,7 +1172,7 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
+ if (err)
+ goto out_free;
+ recurse:
+- address = get_block_address(cpu, address, low, high, bank, ++block);
++ address = get_block_address(address, low, high, bank, ++block);
+ if (!address)
+ return 0;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4688-drm-amdgpu-No-action-when-VCN-PG-state-is-unchanged.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4688-drm-amdgpu-No-action-when-VCN-PG-state-is-unchanged.patch
new file mode 100644
index 00000000..fdc5a03e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4688-drm-amdgpu-No-action-when-VCN-PG-state-is-unchanged.patch
@@ -0,0 +1,61 @@
+From e1e9c636bd5b19b538d05c8885a19e9355923e74 Mon Sep 17 00:00:00 2001
+From: James Zhu <jzhums@gmail.com>
+Date: Thu, 13 Sep 2018 16:55:44 -0400
+Subject: [PATCH 4688/5725] drm/amdgpu:No action when VCN PG state is unchanged
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+When VCN PG state is unchanged, it is unnecessary to reset power
+gate state
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 12 ++++++++++--
+ 2 files changed, 11 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+index 0b0b863..d2219ab 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+@@ -69,6 +69,7 @@ struct amdgpu_vcn {
+ struct amdgpu_ring ring_jpeg;
+ struct amdgpu_irq_src irq;
+ unsigned num_enc_rings;
++ enum amd_powergating_state cur_state;
+ };
+
+ int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index d0c428f..5b219e6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -1637,12 +1637,20 @@ static int vcn_v1_0_set_powergating_state(void *handle,
+ * revisit this when there is a cleaner line between
+ * the smc and the hw blocks
+ */
++ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
++ if(state == adev->vcn.cur_state)
++ return 0;
++
+ if (state == AMD_PG_STATE_GATE)
+- return vcn_v1_0_stop(adev);
++ ret = vcn_v1_0_stop(adev);
+ else
+- return vcn_v1_0_start(adev);
++ ret = vcn_v1_0_start(adev);
++
++ if(!ret)
++ adev->vcn.cur_state = state;
++ return ret;
+ }
+
+ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4689-tpm-tpm_crb-Use-start-method-value-from-ACPI-table-d.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4689-tpm-tpm_crb-Use-start-method-value-from-ACPI-table-d.patch
new file mode 100644
index 00000000..54ca05c6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4689-tpm-tpm_crb-Use-start-method-value-from-ACPI-table-d.patch
@@ -0,0 +1,175 @@
+From ee6a2634fcbf2d2639a5c30e4eee4bd5aae33970 Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Wed, 9 Jan 2019 20:07:02 +0530
+Subject: [PATCH 4689/5725] tpm/tpm_crb: Use start method value from ACPI table
+ directly
+
+This patch gets rid of dealing with intermediate flag for start method
+and use start method value from ACPI table directly.
+
+For ARM64, the locality is handled by Trust Zone in FW. The layout
+does not have crb_regs_head. It is hitting the following line.
+dev_warn(dev, FW_BUG "Bad ACPI memory layout");
+
+Current code excludes CRB_FL_ACPI_START for this check. Now since
+ARM64 support for TPM CRB is added, CRB_FL_CRB_SMC_START should also be
+excluded from this check.
+
+For goIdle and cmdReady where code was excluding CRB_FL_ACPI_START only
+(do nothing for ACPI start method), CRB_FL_CRB_SMC_START was also
+excluded as ARM64 SMC start method does not have TPM_CRB_CTRL_REQ.
+
+However with special PPT workaround requiring CRB_FL_CRB_START to be
+set in addition to CRB_FL_ACPI_START and the addition flag of SMC
+start method CRB_FL_CRB_SMC_START, the code has become difficult to
+maintain and undrestand. It is better to make code deal with start
+method value from ACPI table directly.
+
+Signed-off-by: Jiandi An <anjiandi@codeaurora.org>
+Reviewed-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Tested-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/char/tpm/tpm_crb.c | 59 +++++++++++++++++++++++-----------------------
+ 1 file changed, 29 insertions(+), 30 deletions(-)
+
+diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
+index 5c7ce5a..0962456 100644
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -92,14 +92,9 @@ enum crb_status {
+ CRB_DRV_STS_COMPLETE = BIT(0),
+ };
+
+-enum crb_flags {
+- CRB_FL_ACPI_START = BIT(0),
+- CRB_FL_CRB_START = BIT(1),
+- CRB_FL_CRB_SMC_START = BIT(2),
+-};
+-
+ struct crb_priv {
+- unsigned int flags;
++ u32 sm;
++ const char *hid;
+ void __iomem *iobase;
+ struct crb_regs_head __iomem *regs_h;
+ struct crb_regs_tail __iomem *regs_t;
+@@ -147,14 +142,16 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
+ * Anyhow, we do not wait here as a consequent CMD_READY request
+ * will be handled correctly even if idle was not completed.
+ *
+- * The function does nothing for devices with ACPI-start method.
++ * The function does nothing for devices with ACPI-start method
++ * or SMC-start method.
+ *
+ * Return: 0 always
+ */
+ static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
+ {
+- if ((priv->flags & CRB_FL_ACPI_START) ||
+- (priv->flags & CRB_FL_CRB_SMC_START))
++ if ((priv->sm == ACPI_TPM2_START_METHOD) ||
++ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
++ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
+ return 0;
+
+ iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req);
+@@ -189,13 +186,15 @@ static int crb_go_idle(struct tpm_chip *chip)
+ * The device should respond within TIMEOUT_C.
+ *
+ * The function does nothing for devices with ACPI-start method
++ * or SMC-start method.
+ *
+ * Return: 0 on success -ETIME on timeout;
+ */
+ static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
+ {
+- if ((priv->flags & CRB_FL_ACPI_START) ||
+- (priv->flags & CRB_FL_CRB_SMC_START))
++ if ((priv->sm == ACPI_TPM2_START_METHOD) ||
++ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
++ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
+ return 0;
+
+ iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req);
+@@ -371,13 +370,20 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
+ /* Make sure that cmd is populated before issuing start. */
+ wmb();
+
+- if (priv->flags & CRB_FL_CRB_START)
++ /* The reason for the extra quirk is that the PTT in 4th Gen Core CPUs
++ * report only ACPI start but in practice seems to require both
++ * CRB start, hence invoking CRB start method if hid == MSFT0101.
++ */
++ if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
++ (priv->sm == ACPI_TPM2_MEMORY_MAPPED) ||
++ (!strcmp(priv->hid, "MSFT0101")))
+ iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start);
+
+- if (priv->flags & CRB_FL_ACPI_START)
++ if ((priv->sm == ACPI_TPM2_START_METHOD) ||
++ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD))
+ rc = crb_do_acpi_start(chip);
+
+- if (priv->flags & CRB_FL_CRB_SMC_START) {
++ if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
+ iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start);
+ rc = tpm_crb_smc_start(&chip->dev, priv->smc_func_id);
+ }
+@@ -391,7 +397,9 @@ static void crb_cancel(struct tpm_chip *chip)
+
+ iowrite32(CRB_CANCEL_INVOKE, &priv->regs_t->ctrl_cancel);
+
+- if ((priv->flags & CRB_FL_ACPI_START) && crb_do_acpi_start(chip))
++ if (((priv->sm == ACPI_TPM2_START_METHOD) ||
++ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)) &&
++ crb_do_acpi_start(chip))
+ dev_err(&chip->dev, "ACPI Start failed\n");
+ }
+
+@@ -506,7 +514,8 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
+ * the control area, as one nice sane region except for some older
+ * stuff that puts the control area outside the ACPI IO region.
+ */
+- if (!(priv->flags & CRB_FL_ACPI_START)) {
++ if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
++ (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) {
+ if (buf->control_address == io_res.start +
+ sizeof(*priv->regs_h))
+ priv->regs_h = priv->iobase;
+@@ -606,18 +615,6 @@ static int crb_acpi_add(struct acpi_device *device)
+ if (!priv)
+ return -ENOMEM;
+
+- /* The reason for the extra quirk is that the PTT in 4th Gen Core CPUs
+- * report only ACPI start but in practice seems to require both
+- * ACPI start and CRB start.
+- */
+- if (sm == ACPI_TPM2_COMMAND_BUFFER || sm == ACPI_TPM2_MEMORY_MAPPED ||
+- !strcmp(acpi_device_hid(device), "MSFT0101"))
+- priv->flags |= CRB_FL_CRB_START;
+-
+- if (sm == ACPI_TPM2_START_METHOD ||
+- sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)
+- priv->flags |= CRB_FL_ACPI_START;
+-
+ if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
+ if (buf->header.length < (sizeof(*buf) + sizeof(*crb_smc))) {
+ dev_err(dev,
+@@ -628,9 +625,11 @@ static int crb_acpi_add(struct acpi_device *device)
+ }
+ crb_smc = ACPI_ADD_PTR(struct tpm2_crb_smc, buf, sizeof(*buf));
+ priv->smc_func_id = crb_smc->smc_func_id;
+- priv->flags |= CRB_FL_CRB_SMC_START;
+ }
+
++ priv->sm = sm;
++ priv->hid = acpi_device_hid(device);
++
+ rc = crb_map_io(device, priv, buf);
+ if (rc)
+ return rc;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4690-drm-amd-display-Fix-BUG_ON-during-CRTC-atomic-check-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4690-drm-amd-display-Fix-BUG_ON-during-CRTC-atomic-check-.patch
new file mode 100644
index 00000000..30e39e7a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4690-drm-amd-display-Fix-BUG_ON-during-CRTC-atomic-check-.patch
@@ -0,0 +1,59 @@
+From d4a93ea98ebb07a999f566769682a9647b18ff59 Mon Sep 17 00:00:00 2001
+From: "Leo (Sunpeng) Li" <sunpeng.li@amd.com>
+Date: Tue, 29 May 2018 09:51:51 -0400
+Subject: [PATCH 4690/5725] drm/amd/display: Fix BUG_ON during CRTC atomic
+ check update
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+For cases where the CRTC is inactive (DPMS off), where a modeset is not
+required, yet the CRTC is still in the atomic state, we should not
+attempt to update anything on it.
+
+Previously, we were relying on the modereset_required() helper to check
+the above condition. However, the function returns false immediately if
+a modeset is not required, ignoring the CRTC's enable/active state
+flags. The correct way to filter is by looking at these flags instead.
+
+Fixes: e277adc5a06c "drm/amd/display: Hookup color management functions"
+Bugzilla: https://bugs.freedesktop.org/106194
+
+Signed-off-by: Leo (Sunpeng) Li <sunpeng.li@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Tested-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 3448aa0..8fbe483 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -5091,15 +5091,16 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
+ * We want to do dc stream updates that do not require a
+ * full modeset below.
+ */
+- if (!enable || !aconnector || modereset_required(new_crtc_state))
++ if (!(enable && aconnector && new_crtc_state->enable &&
++ new_crtc_state->active))
+ continue;
+ /*
+ * Given above conditions, the dc state cannot be NULL because:
+- * 1. We're attempting to enable a CRTC. Which has a...
+- * 2. Valid connector attached, and
+- * 3. User does not want to reset it (disable or mark inactive,
+- * which can happen on a CRTC that's already disabled).
+- * => It currently exists.
++ * 1. We're in the process of enabling CRTCs (just been added
++ * to the dc context, or already is on the context)
++ * 2. Has a valid connector attached, and
++ * 3. Is currently active and enabled.
++ * => The dc stream state currently exists.
+ */
+ BUG_ON(dm_new_crtc_state->stream == NULL);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4691-drm-amd-display-Make-atomic-check-validate-underscan.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4691-drm-amd-display-Make-atomic-check-validate-underscan.patch
new file mode 100644
index 00000000..3aee328f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4691-drm-amd-display-Make-atomic-check-validate-underscan.patch
@@ -0,0 +1,90 @@
+From fd7e79a872be3c1cdba1cab01898ee5ae0e76e42 Mon Sep 17 00:00:00 2001
+From: David Francis <David.Francis@amd.com>
+Date: Thu, 31 May 2018 13:48:31 -0400
+Subject: [PATCH 4691/5725] drm/amd/display: Make atomic-check validate
+ underscan changes
+
+When the underscan state was changed, atomic-check was triggering a
+validation but passing the old underscan values. This change adds a
+somewhat hacky check in dm_update_crtcs_state that will update the
+stream if old and newunderscan values are different.
+This was causing 4k on Fiji to allow underscan when it wasn't permitted.
+
+Signed-off-by: David Francis <David.Francis@amd.com>
+Reviewed-by: David Francis <David.Francis@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 25 +++++++++++++++--------
+ 1 file changed, 17 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 8fbe483..e308f0c 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4943,8 +4943,8 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = NULL;
+ struct amdgpu_dm_connector *aconnector = NULL;
+- struct drm_connector_state *new_con_state = NULL;
+- struct dm_connector_state *dm_conn_state = NULL;
++ struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
++ struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
+ struct drm_plane_state *new_plane_state = NULL;
+
+ new_stream = NULL;
+@@ -4965,19 +4965,23 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
+ /* TODO This hack should go away */
+ if (aconnector && enable) {
+ // Make sure fake sink is created in plug-in scenario
+- new_con_state = drm_atomic_get_connector_state(state,
++ drm_new_conn_state = drm_atomic_get_new_connector_state(state,
+ &aconnector->base);
+
+- if (IS_ERR(new_con_state)) {
+- ret = PTR_ERR_OR_ZERO(new_con_state);
++ drm_old_conn_state = drm_atomic_get_old_connector_state(state,
++ &aconnector->base);
++
++ if (IS_ERR(drm_new_conn_state)) {
++ ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
+ break;
+ }
+
+- dm_conn_state = to_dm_connector_state(new_con_state);
++ dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
++ dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
+
+ new_stream = create_stream_for_sink(aconnector,
+ &new_crtc_state->mode,
+- dm_conn_state);
++ dm_new_conn_state);
+
+ /*
+ * we can have no stream on ACTION_SET if a display
+@@ -4993,7 +4997,7 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
+ }
+
+ set_freesync_on_stream(dm, dm_new_crtc_state,
+- dm_conn_state, new_stream);
++ dm_new_conn_state, new_stream);
+
+ if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
+@@ -5104,6 +5108,11 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
+ */
+ BUG_ON(dm_new_crtc_state->stream == NULL);
+
++ /* Scaling or underscan settings */
++ if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
++ update_stream_scaling_settings(
++ &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
++
+ /* Color managment settings */
+ if (dm_new_crtc_state->base.color_mgmt_changed) {
+ ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4692-drm-amd-display-Update-color-props-when-modeset-is-r.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4692-drm-amd-display-Update-color-props-when-modeset-is-r.patch
new file mode 100644
index 00000000..0dca7ab7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4692-drm-amd-display-Update-color-props-when-modeset-is-r.patch
@@ -0,0 +1,39 @@
+From d6293434e012add7451d43dbaab419b85ce2f569 Mon Sep 17 00:00:00 2001
+From: "Leo (Sunpeng) Li" <sunpeng.li@amd.com>
+Date: Thu, 31 May 2018 10:23:37 -0400
+Subject: [PATCH 4692/5725] drm/amd/display: Update color props when modeset is
+ required
+
+This fixes issues where color management properties don't persist
+over DPMS on/off, or when the CRTC is moved across connectors.
+
+Signed-off-by: Leo (Sunpeng) Li <sunpeng.li@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index e308f0c..017fd80 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -5113,8 +5113,12 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
+ update_stream_scaling_settings(
+ &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
+
+- /* Color managment settings */
+- if (dm_new_crtc_state->base.color_mgmt_changed) {
++ /*
++ * Color management settings. We also update color properties
++ * when a modeset is needed, to ensure it gets reprogrammed.
++ */
++ if (dm_new_crtc_state->base.color_mgmt_changed ||
++ drm_atomic_crtc_needs_modeset(new_crtc_state)) {
+ ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
+ if (ret)
+ goto fail;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4693-drm-amd-powerplay-add-control-gfxoff-enabling-in-lat.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4693-drm-amd-powerplay-add-control-gfxoff-enabling-in-lat.patch
new file mode 100644
index 00000000..972f7827
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4693-drm-amd-powerplay-add-control-gfxoff-enabling-in-lat.patch
@@ -0,0 +1,50 @@
+From a9c4a96846cd940fdea416cb7ca65cdb8e8e569c Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Tue, 13 Mar 2018 18:32:39 +0800
+Subject: [PATCH 4693/5725] drm/amd/powerplay: add control gfxoff enabling in
+ late init
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index 76fc45f..46ffe04 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -180,7 +180,8 @@ static int pp_late_init(void *handle)
+ {
+ struct amdgpu_device *adev = handle;
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+-
++ int ret;
++
+ if (hwmgr && hwmgr->pm_en) {
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr_handle_task(hwmgr,
+@@ -191,6 +192,13 @@ static int pp_late_init(void *handle)
+ if (adev->pm.smu_prv_buffer_size != 0)
+ pp_reserve_vram_for_smu(adev);
+
++ if (hwmgr->hwmgr_func->gfx_off_control &&
++ (hwmgr->feature_mask & PP_GFXOFF_MASK)) {
++ ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr, true);
++ if (ret)
++ pr_err("gfx off enabling failed!\n");
++ }
++
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4694-drm-amd-powerplay-fix-missed-hwmgr-check-warning-bef.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4694-drm-amd-powerplay-fix-missed-hwmgr-check-warning-bef.patch
new file mode 100644
index 00000000..239a9846
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4694-drm-amd-powerplay-fix-missed-hwmgr-check-warning-bef.patch
@@ -0,0 +1,42 @@
+From a2cf4a17e0a8afcbd88c81d7393969cefc9ba702 Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Fri, 18 May 2018 10:39:16 +0800
+Subject: [PATCH 4694/5725] drm/amd/powerplay: fix missed hwmgr check warning
+ before call gfx_off_control handler
+
+Patch 9667849bbb8d: "drm/amd/powerplay: add control gfxoff enabling in late
+init" from Mar 13, 2018, leads to the following static checker warning:
+
+ drivers/gpu/drm/amd/amdgpu/../powerplay/amd_powerplay.c:194
+pp_late_init()
+ error: we previously assumed 'hwmgr' could be null (see line 185)
+
+drivers/gpu/drm/amd/amdgpu/../powerplay/amd_powerplay.c
+
+This patch fixes the warning to add hwmgr checking.
+
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index 46ffe04..9e285ed 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -192,7 +192,8 @@ static int pp_late_init(void *handle)
+ if (adev->pm.smu_prv_buffer_size != 0)
+ pp_reserve_vram_for_smu(adev);
+
+- if (hwmgr->hwmgr_func->gfx_off_control &&
++ if (hwmgr && hwmgr->hwmgr_func &&
++ hwmgr->hwmgr_func->gfx_off_control &&
+ (hwmgr->feature_mask & PP_GFXOFF_MASK)) {
+ ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr, true);
+ if (ret)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4695-drm-amd-powerplay-Set-higher-SCLK-MCLK-frequency-tha.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4695-drm-amd-powerplay-Set-higher-SCLK-MCLK-frequency-tha.patch
new file mode 100644
index 00000000..61514bca
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4695-drm-amd-powerplay-Set-higher-SCLK-MCLK-frequency-tha.patch
@@ -0,0 +1,47 @@
+From e06f7a05fe35ab7f40a2ca6a456f9f20c6aec389 Mon Sep 17 00:00:00 2001
+From: Kenneth Feng <kenneth.feng@amd.com>
+Date: Tue, 12 Jun 2018 15:07:37 +0800
+Subject: [PATCH 4695/5725] drm/amd/powerplay: Set higher SCLK&MCLK frequency
+ than dpm7 in OD (v2)
+
+Fix the issue that SCLK&MCLK can't be set higher than dpm7 when
+OD is enabled in SMU7.
+
+v2: fix warning (Alex)
+
+Signed-off-by: Kenneth Feng <kenneth.feng@amd.com>
+Acked-by: Rex Zhu<rezhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index e8285f1..4f1e7d6 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -3812,7 +3812,7 @@ static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr,
+ static int smu7_generate_dpm_level_enable_mask(
+ struct pp_hwmgr *hwmgr, const void *input)
+ {
+- int result;
++ int result = 0;
+ const struct phm_set_power_state_input *states =
+ (const struct phm_set_power_state_input *)input;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+@@ -3820,7 +3820,10 @@ static int smu7_generate_dpm_level_enable_mask(
+ cast_const_phw_smu7_power_state(states->pnew_state);
+
+
+- result = smu7_trim_dpm_states(hwmgr, smu7_ps);
++ /*skip the trim if od is enabled*/
++ if (!hwmgr->od_enabled)
++ result = smu7_trim_dpm_states(hwmgr, smu7_ps);
++
+ if (result)
+ return result;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4696-drm-amd-pp-Fix-uninitialized-variable.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4696-drm-amd-pp-Fix-uninitialized-variable.patch
new file mode 100644
index 00000000..ca96e3cc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4696-drm-amd-pp-Fix-uninitialized-variable.patch
@@ -0,0 +1,30 @@
+From 3cb4ff19ed6799f8f2ef171214fef38fb9036a97 Mon Sep 17 00:00:00 2001
+From: Rajan Vaja <rajan.vaja@gmail.com>
+Date: Mon, 18 Jun 2018 13:01:02 +0530
+Subject: [PATCH 4696/5725] drm/amd/pp: Fix uninitialized variable
+
+Initialize variable to 0 before performing logical OR operation.
+
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Rajan Vaja <rajan.vaja@gmail.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+index dbe4b1f..2236487 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+@@ -1090,7 +1090,7 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
+ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
+ {
+ struct amdgpu_device *adev = hwmgr->adev;
+- int result;
++ int result = 0;
+ uint32_t num_se = 0;
+ uint32_t count, data;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4697-drm-amdgpu-Use-kvmalloc_array-for-allocating-VRAM-ma.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4697-drm-amdgpu-Use-kvmalloc_array-for-allocating-VRAM-ma.patch
new file mode 100644
index 00000000..ad7c450a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4697-drm-amdgpu-Use-kvmalloc_array-for-allocating-VRAM-ma.patch
@@ -0,0 +1,57 @@
+From d5871abd402e7f170814076860e95e2558b302b2 Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Wed, 9 Jan 2019 20:12:06 +0530
+Subject: [PATCH 4697/5725] drm/amdgpu: Use kvmalloc_array for allocating VRAM
+ manager nodes array
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+It can be quite big, and there's no need for it to be physically
+contiguous. This is less likely to fail under memory pressure (has
+actually happened while running piglit).
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+index 11dba00..aff8d2e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+@@ -136,7 +136,8 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
+ num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
+ }
+
+- nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL);
++ nodes = kvmalloc_array(num_nodes, sizeof(*nodes),
++ GFP_KERNEL | __GFP_ZERO);
+ if (!nodes)
+ return -ENOMEM;
+
+@@ -191,7 +192,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
+ drm_mm_remove_node(&nodes[i]);
+ spin_unlock(&mgr->lock);
+
+- kfree(nodes);
++ kvfree(nodes);
+ return r == -ENOSPC ? 0 : r;
+ }
+
+@@ -230,7 +231,7 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
+ atomic64_sub(usage, &mgr->usage);
+ atomic64_sub(vis_usage, &mgr->vis_usage);
+
+- kfree(mem->mm_node);
++ kvfree(mem->mm_node);
+ mem->mm_node = NULL;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4698-drm-amdgpu-Don-t-default-to-DC-support-for-Kaveri-an.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4698-drm-amdgpu-Don-t-default-to-DC-support-for-Kaveri-an.patch
new file mode 100644
index 00000000..35450686
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4698-drm-amdgpu-Don-t-default-to-DC-support-for-Kaveri-an.patch
@@ -0,0 +1,60 @@
+From 324e4e833783474043fdc2cd1671d421399ec0cf Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Tue, 8 May 2018 11:33:42 -0400
+Subject: [PATCH 4698/5725] drm/amdgpu: Don't default to DC support for Kaveri
+ and older
+
+We've had a number of users report failures to detect and light up
+display with DC with LVDS and VGA. These connector types are not
+currently supported with DC. I'd like to add support but unfortunately
+don't have a system with LVDS or VGA available.
+
+In order not to cause regressions we should probably fallback to the
+non-DC driver for ASICs that support VGA and LVDS.
+
+These ASICs are:
+ * Bonaire
+ * Kabini
+ * Kaveri
+ * Mullins
+
+ASIC support can always be force enabled with amdgpu.dc=1
+
+v2: Keep Hawaii on DC
+v3: Added Mullins to the list
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index b39192f..3d23d8b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2207,10 +2207,18 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
+ switch (asic_type) {
+ #if defined(CONFIG_DRM_AMD_DC)
+ case CHIP_BONAIRE:
+- case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
++ /*
++ * We have systems in the wild with these ASICs that require
++ * LVDS and VGA support which is not supported with DC.
++ *
++ * Fallback to the non-DC driver here by default so as not to
++ * cause regressions.
++ */
++ return amdgpu_dc > 0;
++ case CHIP_HAWAII:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_POLARIS10:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4699-drm-amdgpu-All-UVD-instances-share-one-idle_work-han.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4699-drm-amdgpu-All-UVD-instances-share-one-idle_work-han.patch
new file mode 100644
index 00000000..d3a9cedc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4699-drm-amdgpu-All-UVD-instances-share-one-idle_work-han.patch
@@ -0,0 +1,109 @@
+From 5370cb347676e0af3d49786e6f1e710bc926d3ca Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Wed, 9 Jan 2019 20:15:32 +0530
+Subject: [PATCH 4699/5725] drm/amdgpu:All UVD instances share one idle_work
+ handle
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+All UVD instanses have only one dpm control, so it is better
+to share one idle_work handle.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Tested-by: Stefan Agner <stefan@agner.ch>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 13 +++++++------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | 2 +-
+ 2 files changed, 8 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index 9ceab34..cb4efa8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -130,7 +130,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ unsigned version_major, version_minor, family_id;
+ int i, j, r;
+
+- INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler);
++ INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
+
+ switch (adev->asic_type) {
+ #ifdef CONFIG_DRM_AMDGPU_CIK
+@@ -325,11 +325,12 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
+ void *ptr;
+ int i, j;
+
++ cancel_delayed_work_sync(&adev->uvd.idle_work);
++
+ for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
+ if (adev->uvd.inst[j].vcpu_bo == NULL)
+ continue;
+
+- cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work);
+
+ /* only valid for physical mode */
+ if (adev->asic_type < CHIP_POLARIS10) {
+@@ -1158,7 +1159,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
+ {
+ struct amdgpu_device *adev =
+- container_of(work, struct amdgpu_device, uvd.inst->idle_work.work);
++ container_of(work, struct amdgpu_device, uvd.idle_work.work);
+ unsigned fences = 0, i, j;
+
+ for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+@@ -1180,7 +1181,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
+ AMD_CG_STATE_GATE);
+ }
+ } else {
+- schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
++ schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
+ }
+ }
+
+@@ -1192,7 +1193,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+- set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work);
++ set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
+ if (set_clocks) {
+ if (adev->pm.dpm_enabled) {
+ amdgpu_dpm_enable_uvd(adev, true);
+@@ -1209,7 +1210,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
+ void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
+ {
+ if (!amdgpu_sriov_vf(ring->adev))
+- schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
++ schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+index b1579fb..8b23a1b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+@@ -44,7 +44,6 @@ struct amdgpu_uvd_inst {
+ void *saved_bo;
+ atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
+ struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
+- struct delayed_work idle_work;
+ struct amdgpu_ring ring;
+ struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
+ struct amdgpu_irq_src irq;
+@@ -62,6 +61,7 @@ struct amdgpu_uvd {
+ bool address_64_bit;
+ bool use_ctx_buf;
+ struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
++ struct delayed_work idle_work;
+ };
+
+ int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4700-drm-amdgpu-Update-pin_size-values-before-unpinning-B.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4700-drm-amdgpu-Update-pin_size-values-before-unpinning-B.patch
new file mode 100644
index 00000000..6b77b22d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4700-drm-amdgpu-Update-pin_size-values-before-unpinning-B.patch
@@ -0,0 +1,59 @@
+From 00c69132d943be3650e1038c4ed9a3b9b9171075 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
+Date: Fri, 15 Jun 2018 11:06:56 +0200
+Subject: [PATCH 4700/5725] drm/amdgpu: Update pin_size values before unpinning
+ BO
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+At least in theory, ttm_bo_validate may move the BO, in which case the
+pin_size accounting would be inconsistent with when the BO was pinned.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 18 ++++++++----------
+ 1 file changed, 8 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index f218f1c..4864d9c 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -994,15 +994,6 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+- for (i = 0; i < bo->placement.num_placement; i++) {
+- bo->placements[i].lpfn = 0;
+- bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
+- }
+- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+- if (unlikely(r)) {
+- dev_err(adev->dev, "%p validate failed for unpin\n", bo);
+- goto error;
+- }
+
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+ adev->vram_pin_size -= amdgpu_bo_size(bo);
+@@ -1012,7 +1003,14 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
+ adev->gart_pin_size -= amdgpu_bo_size(bo);
+ }
+
+-error:
++ for (i = 0; i < bo->placement.num_placement; i++) {
++ bo->placements[i].lpfn = 0;
++ bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
++ }
++ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
++ if (unlikely(r))
++ dev_err(adev->dev, "%p validate failed for unpin\n", bo);
++
+ return r;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4701-drm-amdgpu-Refactor-amdgpu_vram_mgr_bo_invisible_siz.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4701-drm-amdgpu-Refactor-amdgpu_vram_mgr_bo_invisible_siz.patch
new file mode 100644
index 00000000..509da8ed
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4701-drm-amdgpu-Refactor-amdgpu_vram_mgr_bo_invisible_siz.patch
@@ -0,0 +1,88 @@
+From 812a6d369d37d0cc600548f1d8d32d24ca70252c Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Wed, 9 Jan 2019 20:18:03 +0530
+Subject: [PATCH 4701/5725] drm/amdgpu: Refactor
+ amdgpu_vram_mgr_bo_invisible_size helper
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Preparation for the following fix, no functional change intended.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 6 ++----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 16 ++++++++++++++++
+ 3 files changed, 19 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 4864d9c..e85c07b 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -945,8 +945,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
+ domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ adev->vram_pin_size += amdgpu_bo_size(bo);
+- if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+- adev->invisible_pin_size += amdgpu_bo_size(bo);
++ adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo);
+ } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
+ adev->gart_pin_size += amdgpu_bo_size(bo);
+ }
+@@ -997,8 +996,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
+
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+ adev->vram_pin_size -= amdgpu_bo_size(bo);
+- if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+- adev->invisible_pin_size -= amdgpu_bo_size(bo);
++ adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo);
+ } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+ adev->gart_pin_size -= amdgpu_bo_size(bo);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+index aaf490e..42136da 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+@@ -77,6 +77,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
+ uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
+ int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
+
++u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo);
+ uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
+ uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+index aff8d2e..cce71d3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+@@ -98,6 +98,22 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
+
+
+ /**
++ * amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size
++ *
++ * @bo: &amdgpu_bo buffer object (must be in VRAM)
++ *
++ * Returns:
++ * How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM.
++ */
++u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
++{
++ if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
++ return amdgpu_bo_size(bo);
++
++ return 0;
++}
++
++/**
+ * amdgpu_vram_mgr_new - allocate new ranges
+ *
+ * @man: TTM memory type manager
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4702-drm-amdgpu-Make-amdgpu_vram_mgr_bo_invisible_size-al.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4702-drm-amdgpu-Make-amdgpu_vram_mgr_bo_invisible_size-al.patch
new file mode 100644
index 00000000..9571dfc8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4702-drm-amdgpu-Make-amdgpu_vram_mgr_bo_invisible_size-al.patch
@@ -0,0 +1,60 @@
+From 7bdb23c7e8b5159862fff4872f4b0c7e55741f3b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
+Date: Thu, 14 Jun 2018 13:02:07 +0200
+Subject: [PATCH 4702/5725] drm/amdgpu: Make amdgpu_vram_mgr_bo_invisible_size
+ always accurate
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Even BOs with AMDGPU_GEM_CREATE_NO_CPU_ACCESS may end up at least
+partially in CPU visible VRAM, in particular when all VRAM is visible.
+
+v2:
+* Don't take VRAM mgr spinlock, not needed (Christian König)
+* Make loop logic simpler and clearer.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 20 ++++++++++++++++++--
+ 1 file changed, 18 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+index cce71d3..dc1cb22 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+@@ -107,10 +107,26 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
+ */
+ u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
+ {
+- if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
++ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
++ struct ttm_mem_reg *mem = &bo->tbo.mem;
++ struct drm_mm_node *nodes = mem->mm_node;
++ unsigned pages = mem->num_pages;
++ u64 usage = 0;
++
++ if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size)
++ return 0;
++
++ if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
+ return amdgpu_bo_size(bo);
+
+- return 0;
++ while (nodes && pages) {
++ usage += nodes->size << PAGE_SHIFT;
++ usage -= amdgpu_vram_mgr_vis_size(adev, nodes);
++ pages -= nodes->size;
++ ++nodes;
++ }
++
++ return usage;
+ }
+
+ /**
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4703-drm-amdgpu-GPU-vs-CPU-page-size-fixes-in-amdgpu_vm_b.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4703-drm-amdgpu-GPU-vs-CPU-page-size-fixes-in-amdgpu_vm_b.patch
new file mode 100644
index 00000000..64840c4e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4703-drm-amdgpu-GPU-vs-CPU-page-size-fixes-in-amdgpu_vm_b.patch
@@ -0,0 +1,63 @@
+From 9004422e8083985f599e8f05534514c0187092a5 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
+Date: Thu, 21 Jun 2018 11:27:46 +0200
+Subject: [PATCH 4703/5725] drm/amdgpu: GPU vs CPU page size fixes in
+ amdgpu_vm_bo_split_mapping
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+start / last / max_entries are numbers of GPU pages, pfn / count are
+numbers of CPU pages. Convert between them accordingly.
+
+Fixes badness on systems with > 4K page size.
+
+Cc: stable@vger.kernel.org
+Bugzilla: https://bugs.freedesktop.org/106258
+Reported-by: Matt Corallo <freedesktop@bluematt.me>
+Tested-by: foxbat@ruin.net
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index d9e3d48..fcd52d8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1561,7 +1561,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
+ case TTM_PL_TT:
+ max_entries = min(max_entries, 16ull * 1024ull);
+
+- for (count = 1; count < max_entries; ++count) {
++ for (count = 1;
++ count < max_entries / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
++ ++count) {
+ uint64_t idx = pfn + count;
+
+ if (pages_addr[idx] !=
+@@ -1574,7 +1576,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
+ dma_addr = pages_addr;
+ } else {
+ addr = pages_addr[pfn];
+- max_entries = count;
++ max_entries = count * (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+ }
+ break;
+ case AMDGPU_PL_DGMA_IMPORT:
+@@ -1610,7 +1612,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
+ if (r)
+ return r;
+
+- pfn += last - start + 1;
++ pfn += (last - start + 1) / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+ if (nodes && nodes->size == pfn) {
+ pfn = 0;
+ ++nodes;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4704-drm-amdgpu-fix-UBSAN-Undefined-behaviour-for-amdgpu_.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4704-drm-amdgpu-fix-UBSAN-Undefined-behaviour-for-amdgpu_.patch
new file mode 100644
index 00000000..7b05c75f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4704-drm-amdgpu-fix-UBSAN-Undefined-behaviour-for-amdgpu_.patch
@@ -0,0 +1,52 @@
+From f0c2ebbe64394bf2a0f409c3f1b7c2dd3563e4d0 Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Mon, 25 Jun 2018 14:56:06 -0400
+Subject: [PATCH 4704/5725] drm/amdgpu: fix UBSAN: Undefined behaviour for
+ amdgpu_fence.c
+
+Here is the UBSAN dump:
+
+[ 3.866656] index 2 is out of range for type 'amdgpu_uvd_inst [2]'
+[ 3.866693] Workqueue: events work_for_cpu_fn
+[ 3.866702] Call Trace:
+[ 3.866710] dump_stack+0x85/0xc5
+[ 3.866719] ubsan_epilogue+0x9/0x40
+[ 3.866727] __ubsan_handle_out_of_bounds+0x89/0x90
+[ 3.866737] ? rcu_read_lock_sched_held+0x58/0x60
+[ 3.866746] ? __kmalloc+0x26c/0x2d0
+[ 3.866846] amdgpu_fence_driver_start_ring+0x259/0x280 [amdgpu]
+[ 3.866896] amdgpu_ring_init+0x12c/0x710 [amdgpu]
+[ 3.866906] ? sprintf+0x42/0x50
+[ 3.866956] amdgpu_gfx_kiq_init_ring+0x1bc/0x3a0 [amdgpu]
+[ 3.867009] gfx_v8_0_sw_init+0x1ad3/0x2360 [amdgpu]
+[ 3.867062] ? smu7_init+0xec/0x160 [amdgpu]
+[ 3.867109] amdgpu_device_init+0x112c/0x1dc0 [amdgpu]
+
+'ring->me' might be set as 2 with 'amdgpu_gfx_kiq_init_ring', that would
+cause out of range for 'amdgpu_uvd_inst[2]'.
+
+v2: simplified with ring type
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+index bed6d77..1ec9590 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+@@ -376,7 +376,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
+ struct amdgpu_device *adev = ring->adev;
+ uint64_t index;
+
+- if (ring != &adev->uvd.inst[ring->me].ring) {
++ if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
+ ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
+ ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
+ } else {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4705-drm-amdgpu-Support-new-VCN-FW-version-naming-convent.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4705-drm-amdgpu-Support-new-VCN-FW-version-naming-convent.patch
new file mode 100644
index 00000000..c5b94354
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4705-drm-amdgpu-Support-new-VCN-FW-version-naming-convent.patch
@@ -0,0 +1,81 @@
+From 176be73a8522e88634a9716765c93c7ef6682198 Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Tue, 19 Jun 2018 13:44:04 -0400
+Subject: [PATCH 4705/5725] drm/amdgpu:Support new VCN FW version naming
+ convention
+
+Support new VCN FW version naming convention:
+ [31, 28] for VEP interface major version if applicable
+ [27, 24] for decode interface major version
+ [23, 20] for encode interface major version
+ [19, 12] for encode interface minor version
+ [11, 0] for firmware revision
+Bit 20-23, it is encode major and non-zero for new naming convention.
+This field is part of version minor and DRM_DISABLED_FLAG in old naming
+convention. Since the latest version minor is 0x5B and DRM_DISABLED_FLAG
+is zero in old naming convention, this field is always zero so far.
+These four bits are used to tell which naming convention is present.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Reviewed-by: Fang, Peter <Peter.Fang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 33 +++++++++++++++++++++++++++------
+ 1 file changed, 27 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index e836d7c..be1cbba 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -52,7 +52,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+ unsigned long bo_size;
+ const char *fw_name;
+ const struct common_firmware_header *hdr;
+- unsigned version_major, version_minor, family_id;
++ unsigned char fw_check;
+ int r;
+
+ INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
+@@ -83,12 +83,33 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+
+ hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
+- family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
+- version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
+- version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+- DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
+- version_major, version_minor, family_id);
+
++ /* Bit 20-23, it is encode major and non-zero for new naming convention.
++ * This field is part of version minor and DRM_DISABLED_FLAG in old naming
++ * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
++ * is zero in old naming convention, this field is always zero so far.
++ * These four bits are used to tell which naming convention is present.
++ */
++ fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
++ if (fw_check) {
++ unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
++
++ fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
++ enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
++ enc_major = fw_check;
++ dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
++ vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
++ DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
++ enc_major, enc_minor, dec_ver, vep, fw_rev);
++ } else {
++ unsigned int version_major, version_minor, family_id;
++
++ family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
++ version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
++ version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
++ DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
++ version_major, version_minor, family_id);
++ }
+
+ bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
+ + AMDGPU_VCN_SESSION_SIZE * 40;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4706-drm-amd-display-release-spinlock-before-committing-u.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4706-drm-amd-display-release-spinlock-before-committing-u.patch
new file mode 100644
index 00000000..5f073473
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4706-drm-amd-display-release-spinlock-before-committing-u.patch
@@ -0,0 +1,60 @@
+From 90c87d89b78bf28254b57772e03c5f24c9ee63f6 Mon Sep 17 00:00:00 2001
+From: Shirish S <shirish.s@amd.com>
+Date: Tue, 26 Jun 2018 09:32:39 +0530
+Subject: [PATCH 4706/5725] drm/amd/display: release spinlock before committing
+ updates to stream
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Currently, amdgpu_do_flip() spinlocks crtc->dev->event_lock and
+releases it only after committing updates to the stream.
+
+dc_commit_updates_for_stream() should be moved out of
+spinlock for the below reasons:
+
+1. event_lock is supposed to protect access to acrct->pflip_status _only_
+2. dc_commit_updates_for_stream() has potential sleep's
+ and also its not appropriate to be in an atomic state
+ for such long sequences of code.
+
+Signed-off-by: Shirish S <shirish.s@amd.com>
+Suggested-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 017fd80..0543cb1 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4256,10 +4256,11 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
+ if (acrtc->base.state->event)
+ prepare_flip_isr(acrtc);
+
++ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
++
+ surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
+ surface_updates->flip_addr = &addr;
+
+-
+ dc_commit_updates_for_stream(adev->dm.dc,
+ surface_updates,
+ 1,
+@@ -4272,9 +4273,6 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
+ __func__,
+ addr.address.grph.addr.high_part,
+ addr.address.grph.addr.low_part);
+-
+-
+- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
+
+ /*
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4707-drm-amd-powerplay-correct-vega12-thermal-support-as-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4707-drm-amd-powerplay-correct-vega12-thermal-support-as-.patch
new file mode 100644
index 00000000..6418a356
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4707-drm-amd-powerplay-correct-vega12-thermal-support-as-.patch
@@ -0,0 +1,30 @@
+From 9452765c7d12895a4ae9870d64b4be77decb1d71 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 11 Jun 2018 16:46:40 +0800
+Subject: [PATCH 4707/5725] drm/amd/powerplay: correct vega12 thermal support
+ as true
+
+Thermal support is enabled on vega12.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index d685ce7..b38b1d9 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -81,6 +81,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
+
+ data->registry_data.disallowed_features = 0x0;
+ data->registry_data.od_state_in_dc_support = 0;
++ data->registry_data.thermal_support = 1;
+ data->registry_data.skip_baco_hardware = 0;
+
+ data->registry_data.log_avfs_param = 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4708-drm-amd-powerplay-correct-vega12-bootup-values-setti.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4708-drm-amd-powerplay-correct-vega12-bootup-values-setti.patch
new file mode 100644
index 00000000..28c69fb2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4708-drm-amd-powerplay-correct-vega12-bootup-values-setti.patch
@@ -0,0 +1,183 @@
+From 56c86885423448422d8491e631b8cd80c29a7b0d Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 28 May 2018 08:59:16 +0800
+Subject: [PATCH 4708/5725] drm/amd/powerplay: correct vega12 bootup values
+ settings
+
+The vbios firmware structure changed between v3_1 and v3_2. So,
+the code to setup bootup values needs different paths based
+on header version.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c | 94 +++++++++++++++++++---
+ drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h | 3 +
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 3 +
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h | 3 +
+ 4 files changed, 91 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+index 5325661..aa2faff 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+@@ -512,14 +512,82 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKI
+ return 0;
+ }
+
++static void pp_atomfwctrl_copy_vbios_bootup_values_3_2(struct pp_hwmgr *hwmgr,
++ struct pp_atomfwctrl_bios_boot_up_values *boot_values,
++ struct atom_firmware_info_v3_2 *fw_info)
++{
++ uint32_t frequency = 0;
++
++ boot_values->ulRevision = fw_info->firmware_revision;
++ boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz;
++ boot_values->ulUClk = fw_info->bootup_mclk_in10khz;
++ boot_values->usVddc = fw_info->bootup_vddc_mv;
++ boot_values->usVddci = fw_info->bootup_vddci_mv;
++ boot_values->usMvddc = fw_info->bootup_mvddc_mv;
++ boot_values->usVddGfx = fw_info->bootup_vddgfx_mv;
++ boot_values->ucCoolingID = fw_info->coolingsolution_id;
++ boot_values->ulSocClk = 0;
++ boot_values->ulDCEFClk = 0;
++
++ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, &frequency))
++ boot_values->ulSocClk = frequency;
++
++ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, &frequency))
++ boot_values->ulDCEFClk = frequency;
++
++ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, &frequency))
++ boot_values->ulEClk = frequency;
++
++ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, &frequency))
++ boot_values->ulVClk = frequency;
++
++ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, &frequency))
++ boot_values->ulDClk = frequency;
++}
++
++static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr,
++ struct pp_atomfwctrl_bios_boot_up_values *boot_values,
++ struct atom_firmware_info_v3_1 *fw_info)
++{
++ uint32_t frequency = 0;
++
++ boot_values->ulRevision = fw_info->firmware_revision;
++ boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz;
++ boot_values->ulUClk = fw_info->bootup_mclk_in10khz;
++ boot_values->usVddc = fw_info->bootup_vddc_mv;
++ boot_values->usVddci = fw_info->bootup_vddci_mv;
++ boot_values->usMvddc = fw_info->bootup_mvddc_mv;
++ boot_values->usVddGfx = fw_info->bootup_vddgfx_mv;
++ boot_values->ucCoolingID = fw_info->coolingsolution_id;
++ boot_values->ulSocClk = 0;
++ boot_values->ulDCEFClk = 0;
++
++ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency))
++ boot_values->ulSocClk = frequency;
++
++ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency))
++ boot_values->ulDCEFClk = frequency;
++
++ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, &frequency))
++ boot_values->ulEClk = frequency;
++
++ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, &frequency))
++ boot_values->ulVClk = frequency;
++
++ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, &frequency))
++ boot_values->ulDClk = frequency;
++}
++
+ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
+ struct pp_atomfwctrl_bios_boot_up_values *boot_values)
+ {
+- struct atom_firmware_info_v3_1 *info = NULL;
++ struct atom_firmware_info_v3_2 *fwinfo_3_2;
++ struct atom_firmware_info_v3_1 *fwinfo_3_1;
++ struct atom_common_table_header *info = NULL;
+ uint16_t ix;
+
+ ix = GetIndexIntoMasterDataTable(firmwareinfo);
+- info = (struct atom_firmware_info_v3_1 *)
++ info = (struct atom_common_table_header *)
+ smu_atom_get_data_table(hwmgr->adev,
+ ix, NULL, NULL, NULL);
+
+@@ -528,16 +596,18 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
+ return -EINVAL;
+ }
+
+- boot_values->ulRevision = info->firmware_revision;
+- boot_values->ulGfxClk = info->bootup_sclk_in10khz;
+- boot_values->ulUClk = info->bootup_mclk_in10khz;
+- boot_values->usVddc = info->bootup_vddc_mv;
+- boot_values->usVddci = info->bootup_vddci_mv;
+- boot_values->usMvddc = info->bootup_mvddc_mv;
+- boot_values->usVddGfx = info->bootup_vddgfx_mv;
+- boot_values->ucCoolingID = info->coolingsolution_id;
+- boot_values->ulSocClk = 0;
+- boot_values->ulDCEFClk = 0;
++ if ((info->format_revision == 3) && (info->content_revision == 2)) {
++ fwinfo_3_2 = (struct atom_firmware_info_v3_2 *)info;
++ pp_atomfwctrl_copy_vbios_bootup_values_3_2(hwmgr,
++ boot_values, fwinfo_3_2);
++ } else if ((info->format_revision == 3) && (info->content_revision == 1)) {
++ fwinfo_3_1 = (struct atom_firmware_info_v3_1 *)info;
++ pp_atomfwctrl_copy_vbios_bootup_values_3_1(hwmgr,
++ boot_values, fwinfo_3_1);
++ } else {
++ pr_info("Fw info table revision does not match!");
++ return -EINVAL;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+index fe10aa4..745bd38 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+@@ -136,6 +136,9 @@ struct pp_atomfwctrl_bios_boot_up_values {
+ uint32_t ulUClk;
+ uint32_t ulSocClk;
+ uint32_t ulDCEFClk;
++ uint32_t ulEClk;
++ uint32_t ulVClk;
++ uint32_t ulDClk;
+ uint16_t usVddc;
+ uint16_t usVddci;
+ uint16_t usMvddc;
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index b38b1d9..6e22cb3 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -804,6 +804,9 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
+ data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
+ data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
+ data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
++ data->vbios_boot_state.eclock = boot_up_values.ulEClk;
++ data->vbios_boot_state.dclock = boot_up_values.ulDClk;
++ data->vbios_boot_state.vclock = boot_up_values.ulVClk;
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetMinDeepSleepDcefclk,
+ (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+index e81ded1..49b38df 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+@@ -167,6 +167,9 @@ struct vega12_vbios_boot_state {
+ uint32_t mem_clock;
+ uint32_t soc_clock;
+ uint32_t dcef_clock;
++ uint32_t eclock;
++ uint32_t dclock;
++ uint32_t vclock;
+ };
+
+ #define DPMTABLE_OD_UPDATE_SCLK 0x00000001
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4709-drm-amd-powerplay-smc_dpm_info-structure-change.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4709-drm-amd-powerplay-smc_dpm_info-structure-change.patch
new file mode 100644
index 00000000..5454ac62
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4709-drm-amd-powerplay-smc_dpm_info-structure-change.patch
@@ -0,0 +1,91 @@
+From 748ff5b23d3fe68482caa6f37bcb16503a7e1c5f Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 11 Jun 2018 15:20:39 +0800
+Subject: [PATCH 4709/5725] drm/amd/powerplay: smc_dpm_info structure change
+
+A new member Vr2_I2C_address is added.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/include/atomfirmware.h | 5 ++++-
+ drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c | 2 ++
+ drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h | 2 ++
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c | 2 ++
+ drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h | 5 ++++-
+ 5 files changed, 14 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
+index 092d800..33b4de4 100644
+--- a/drivers/gpu/drm/amd/include/atomfirmware.h
++++ b/drivers/gpu/drm/amd/include/atomfirmware.h
+@@ -1433,7 +1433,10 @@ struct atom_smc_dpm_info_v4_1
+ uint8_t acggfxclkspreadpercent;
+ uint16_t acggfxclkspreadfreq;
+
+- uint32_t boardreserved[10];
++ uint8_t Vr2_I2C_address;
++ uint8_t padding_vr2[3];
++
++ uint32_t boardreserved[9];
+ };
+
+ /*
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+index aa2faff..d27c1c9 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+@@ -699,5 +699,7 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
+ param->acggfxclkspreadpercent = info->acggfxclkspreadpercent;
+ param->acggfxclkspreadfreq = info->acggfxclkspreadfreq;
+
++ param->Vr2_I2C_address = info->Vr2_I2C_address;
++
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+index 745bd38..22e2166 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+@@ -210,6 +210,8 @@ struct pp_atomfwctrl_smc_dpm_parameters
+ uint8_t acggfxclkspreadenabled;
+ uint8_t acggfxclkspreadpercent;
+ uint16_t acggfxclkspreadfreq;
++
++ uint8_t Vr2_I2C_address;
+ };
+
+ int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+index 888ddca..2991470 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+@@ -230,6 +230,8 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
+ ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF;
+ }
+
++ ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
++
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
+index 2f8a3b9..b08526f 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
+@@ -499,7 +499,10 @@ typedef struct {
+ uint8_t AcgGfxclkSpreadPercent;
+ uint16_t AcgGfxclkSpreadFreq;
+
+- uint32_t BoardReserved[10];
++ uint8_t Vr2_I2C_address;
++ uint8_t padding_vr2[3];
++
++ uint32_t BoardReserved[9];
+
+
+ uint32_t MmHubPadding[7];
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4710-drm-amdgpu-fix-swapped-emit_ib_size-in-vce3.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4710-drm-amdgpu-fix-swapped-emit_ib_size-in-vce3.patch
new file mode 100644
index 00000000..e6eb0465
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4710-drm-amdgpu-fix-swapped-emit_ib_size-in-vce3.patch
@@ -0,0 +1,39 @@
+From f50e337580747d4cf06c9c7ec3e9175b8e9d7b75 Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Wed, 9 Jan 2019 20:22:20 +0530
+Subject: [PATCH 4710/5725] drm/amdgpu: fix swapped emit_ib_size in vce3
+
+The phys and vm versions had the values swapped.
+
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+index f67822f..a71b975 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+@@ -900,7 +900,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
+ .emit_frame_size =
+ 4 + /* vce_v3_0_emit_pipeline_sync */
+ 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
+- .emit_ib_size = 5, /* amdgpu_vce_ring_emit_ib */
++ .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
+ .emit_ib = amdgpu_vce_ring_emit_ib,
+ .emit_fence = amdgpu_vce_ring_emit_fence,
+ .test_ring = amdgpu_vce_ring_test_ring,
+@@ -924,7 +924,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
+ 6 + /* vce_v3_0_emit_vm_flush */
+ 4 + /* vce_v3_0_emit_pipeline_sync */
+ 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
+- .emit_ib_size = 4, /* vce_v3_0_ring_emit_ib */
++ .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
+ .emit_ib = vce_v3_0_ring_emit_ib,
+ .emit_vm_flush = vce_v3_0_emit_vm_flush,
+ .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4711-drm-amdgpu-pm-fix-display-count-in-non-DC-path.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4711-drm-amdgpu-pm-fix-display-count-in-non-DC-path.patch
new file mode 100644
index 00000000..78686b1b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4711-drm-amdgpu-pm-fix-display-count-in-non-DC-path.patch
@@ -0,0 +1,30 @@
+From ce0aee34f1622f8c8a46f9516c1e047494f7d935 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 28 Jun 2018 12:44:25 -0500
+Subject: [PATCH 4711/5725] drm/amdgpu/pm: fix display count in non-DC path
+
+new_active_crtcs is a bitmask, new_active_crtc_count is the
+actual count.
+
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index 2c904a6..31fbbcd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -1910,7 +1910,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
+ if (!amdgpu_device_has_dc_support(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ amdgpu_dpm_get_active_displays(adev);
+- adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtcs;
++ adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
+ adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
+ adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
+ /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4712-drm-amdgpu-fix-user-fence-write-race-condition.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4712-drm-amdgpu-fix-user-fence-write-race-condition.patch
new file mode 100644
index 00000000..d9a10f3f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4712-drm-amdgpu-fix-user-fence-write-race-condition.patch
@@ -0,0 +1,58 @@
+From 2a3e01dbb09c6021cde101650d1555ca5d28b4e4 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Nicolai=20H=C3=A4hnle?= <nicolai.haehnle@amd.com>
+Date: Fri, 29 Jun 2018 13:23:25 +0200
+Subject: [PATCH 4712/5725] drm/amdgpu: fix user fence write race condition
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The buffer object backing the user fence is reserved using the non-user
+fence, i.e., as soon as the non-user fence is signaled, the user fence
+buffer object can be moved or even destroyed.
+
+Therefore, emit the user fence first.
+
+Both fences have the same cache invalidation behavior, so this should
+have no user-visible effect.
+
+Signed-off-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+index 5a21f9c..5f05d15d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+@@ -238,6 +238,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
+ fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
+
++ /* wrap the last IB with fence */
++ if (job && job->uf_addr) {
++ amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
++ fence_flags | AMDGPU_FENCE_FLAG_64BIT);
++ }
++
+ r = amdgpu_fence_emit(ring, f, fence_flags);
+ if (r) {
+ dev_err(adev->dev, "failed to emit fence (%d)\n", r);
+@@ -250,12 +256,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ if (ring->funcs->insert_end)
+ ring->funcs->insert_end(ring);
+
+- /* wrap the last IB with fence */
+- if (job && job->uf_addr) {
+- amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
+- fence_flags | AMDGPU_FENCE_FLAG_64BIT);
+- }
+-
+ if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
+ amdgpu_ring_patch_cond_exec(ring, patch_offset);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4713-drm-amd-display-adding-ycbcr420-pixel-encoding-for-h.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4713-drm-amd-display-adding-ycbcr420-pixel-encoding-for-h.patch
new file mode 100644
index 00000000..a151faf3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4713-drm-amd-display-adding-ycbcr420-pixel-encoding-for-h.patch
@@ -0,0 +1,52 @@
+From a47beb902eae45b2392cfedc8267fe13bcccefec Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Wed, 4 Jul 2018 17:26:26 -0400
+Subject: [PATCH 4713/5725] drm/amd/display: adding ycbcr420 pixel encoding for
+ hdmi
+
+[why]
+HDMI EDID's VSDB contains spectial timings for specifically
+YCbCr 4:2:0 colour space. In those cases we need to verify
+if the mode provided is one of the special ones has to use
+YCbCr 4:2:0 pixel encoding for display info.
+[how]
+Verify if the mode is using specific ycbcr420 colour space with
+the help of DRM helper function and assign the mode to use
+ycbcr420 pixel encoding.
+
+Tested-by: Mike Lothian <mike@fireburn.co.uk>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 0543cb1..8d60833 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2413,6 +2413,7 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
+ const struct drm_connector *connector)
+ {
+ struct dc_crtc_timing *timing_out = &stream->timing;
++ const struct drm_display_info *info = &connector->display_info;
+
+ memset(timing_out, 0, sizeof(struct dc_crtc_timing));
+
+@@ -2421,8 +2422,10 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
+ timing_out->v_border_top = 0;
+ timing_out->v_border_bottom = 0;
+ /* TODO: un-hardcode */
+-
+- if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
++ if (drm_mode_is_420_only(info, mode_in)
++ && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
++ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
++ else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
+ else
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4714-drm-amd-display-add-a-check-for-display-depth-validi.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4714-drm-amd-display-add-a-check-for-display-depth-validi.patch
new file mode 100644
index 00000000..4e2afab8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4714-drm-amd-display-add-a-check-for-display-depth-validi.patch
@@ -0,0 +1,90 @@
+From 7456cb4d8d7f538b679b05dfa0ee7a5975d9d404 Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Wed, 4 Jul 2018 17:27:56 -0400
+Subject: [PATCH 4714/5725] drm/amd/display: add a check for display depth
+ validity
+
+[why]
+HDMI 2.0 fails to validate 4K@60 timing with 10 bpc
+[how]
+Adding a helper function that would verify if the display depth
+assigned would pass a bandwidth validation.
+Drop the display depth by one level till calculated pixel clk
+is lower than maximum TMDS clk.
+
+Bugzilla: https://bugs.freedesktop.org/106959
+
+Tested-by: Mike Lothian <mike@fireburn.co.uk>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 44 +++++++++++++++++++++++
+ 1 file changed, 44 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 8d60833..2aa0041 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2364,6 +2364,47 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
+ return color_space;
+ }
+
++static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
++{
++ if (timing_out->display_color_depth <= COLOR_DEPTH_888)
++ return;
++
++ timing_out->display_color_depth--;
++}
++
++static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
++ const struct drm_display_info *info)
++{
++ int normalized_clk;
++ if (timing_out->display_color_depth <= COLOR_DEPTH_888)
++ return;
++ do {
++ normalized_clk = timing_out->pix_clk_khz;
++ /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
++ if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
++ normalized_clk /= 2;
++ /* Adjusting pix clock following on HDMI spec based on colour depth */
++ switch (timing_out->display_color_depth) {
++ case COLOR_DEPTH_101010:
++ normalized_clk = (normalized_clk * 30) / 24;
++ break;
++ case COLOR_DEPTH_121212:
++ normalized_clk = (normalized_clk * 36) / 24;
++ break;
++ case COLOR_DEPTH_161616:
++ normalized_clk = (normalized_clk * 48) / 24;
++ break;
++ default:
++ return;
++ }
++ if (normalized_clk <= info->max_tmds_clock)
++ return;
++ reduce_mode_colour_depth(timing_out);
++
++ } while (timing_out->display_color_depth > COLOR_DEPTH_888);
++
++}
++
+ /*****************************************************************************/
+
+ static int
+@@ -2462,6 +2503,9 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
+ stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
+ stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
+
++ if (stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
++ adjust_colour_depth_from_display_info(timing_out, info);
++
+ calculate_phy_pix_clks(stream);
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4715-Revert-drm-amd-display-Don-t-return-ddc-result-and-r.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4715-Revert-drm-amd-display-Don-t-return-ddc-result-and-r.patch
new file mode 100644
index 00000000..dc708c8f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4715-Revert-drm-amd-display-Don-t-return-ddc-result-and-r.patch
@@ -0,0 +1,151 @@
+From fdb158543496f6cf08f58aea7b75a95bcb68fb4a Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 10 Jul 2018 12:56:45 -0500
+Subject: [PATCH 4715/5725] Revert "drm/amd/display: Don't return ddc result
+ and read_bytes in same return value"
+
+This reverts commit 018d82e5f02ef3583411bcaa4e00c69786f46f19.
+
+This breaks DDC in certain cases. Revert for 4.18 and previous kernels.
+For 4.19, this is fixed with the following more extensive patches:
+drm/amd/display: Serialize is_dp_sink_present
+drm/amd/display: Break out function to simply read aux reply
+drm/amd/display: Return aux replies directly to DRM
+drm/amd/display: Right shift AUX reply value sooner than later
+drm/amd/display: Read AUX channel even if only status byte is returned
+
+Link: https://lists.freedesktop.org/archives/amd-gfx/2018-July/023788.html
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+---
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 20 ++++++++------------
+ drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c | 10 +++-------
+ drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h | 5 ++---
+ 3 files changed, 13 insertions(+), 22 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index 559bd2a..e3110d6 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -82,22 +82,21 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
+ enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
+ I2C_MOT_TRUE : I2C_MOT_FALSE;
+ enum ddc_result res;
+- uint32_t read_bytes = msg->size;
++ ssize_t read_bytes;
+
+ if (WARN_ON(msg->size > 16))
+ return -E2BIG;
+
+ switch (msg->request & ~DP_AUX_I2C_MOT) {
+ case DP_AUX_NATIVE_READ:
+- res = dal_ddc_service_read_dpcd_data(
++ read_bytes = dal_ddc_service_read_dpcd_data(
+ TO_DM_AUX(aux)->ddc_service,
+ false,
+ I2C_MOT_UNDEF,
+ msg->address,
+ msg->buffer,
+- msg->size,
+- &read_bytes);
+- break;
++ msg->size);
++ return read_bytes;
+ case DP_AUX_NATIVE_WRITE:
+ res = dal_ddc_service_write_dpcd_data(
+ TO_DM_AUX(aux)->ddc_service,
+@@ -108,15 +107,14 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
+ msg->size);
+ break;
+ case DP_AUX_I2C_READ:
+- res = dal_ddc_service_read_dpcd_data(
++ read_bytes = dal_ddc_service_read_dpcd_data(
+ TO_DM_AUX(aux)->ddc_service,
+ true,
+ mot,
+ msg->address,
+ msg->buffer,
+- msg->size,
+- &read_bytes);
+- break;
++ msg->size);
++ return read_bytes;
+ case DP_AUX_I2C_WRITE:
+ res = dal_ddc_service_write_dpcd_data(
+ TO_DM_AUX(aux)->ddc_service,
+@@ -138,9 +136,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
+ r == DDC_RESULT_SUCESSFULL);
+ #endif
+
+- if (res != DDC_RESULT_SUCESSFULL)
+- return -EIO;
+- return read_bytes;
++ return msg->size;
+ }
+
+ static enum drm_connector_status
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+index ae48d60..49c2fac 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+@@ -629,14 +629,13 @@ bool dal_ddc_service_query_ddc_data(
+ return ret;
+ }
+
+-enum ddc_result dal_ddc_service_read_dpcd_data(
++ssize_t dal_ddc_service_read_dpcd_data(
+ struct ddc_service *ddc,
+ bool i2c,
+ enum i2c_mot_mode mot,
+ uint32_t address,
+ uint8_t *data,
+- uint32_t len,
+- uint32_t *read)
++ uint32_t len)
+ {
+ struct aux_payload read_payload = {
+ .i2c_over_aux = i2c,
+@@ -653,8 +652,6 @@ enum ddc_result dal_ddc_service_read_dpcd_data(
+ .mot = mot
+ };
+
+- *read = 0;
+-
+ if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
+ BREAK_TO_DEBUGGER();
+ return DDC_RESULT_FAILED_INVALID_OPERATION;
+@@ -664,8 +661,7 @@ enum ddc_result dal_ddc_service_read_dpcd_data(
+ ddc->ctx->i2caux,
+ ddc->ddc_pin,
+ &command)) {
+- *read = command.payloads->length;
+- return DDC_RESULT_SUCESSFULL;
++ return (ssize_t)command.payloads->length;
+ }
+
+ return DDC_RESULT_FAILED_OPERATION;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+index 30b3a08..090b7a8 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+@@ -102,14 +102,13 @@ bool dal_ddc_service_query_ddc_data(
+ uint8_t *read_buf,
+ uint32_t read_size);
+
+-enum ddc_result dal_ddc_service_read_dpcd_data(
++ssize_t dal_ddc_service_read_dpcd_data(
+ struct ddc_service *ddc,
+ bool i2c,
+ enum i2c_mot_mode mot,
+ uint32_t address,
+ uint8_t *data,
+- uint32_t len,
+- uint32_t *read);
++ uint32_t len);
+
+ enum ddc_result dal_ddc_service_write_dpcd_data(
+ struct ddc_service *ddc,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4716-drm-amdgpu-Reserve-VM-root-shared-fence-slot-for-com.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4716-drm-amdgpu-Reserve-VM-root-shared-fence-slot-for-com.patch
new file mode 100644
index 00000000..dfa4aa0e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4716-drm-amdgpu-Reserve-VM-root-shared-fence-slot-for-com.patch
@@ -0,0 +1,46 @@
+From 30b6d754525da76550a36fb788aba8fd31e49526 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
+Date: Mon, 25 Jun 2018 11:07:17 +0200
+Subject: [PATCH 4716/5725] drm/amdgpu: Reserve VM root shared fence slot for
+ command submission (v3)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Without this, there could not be enough slots, which could trigger the
+BUG_ON in reservation_object_add_shared_fence.
+
+v2:
+* Jump to the error label instead of returning directly (Jerry Zhang)
+v3:
+* Reserve slots for command submission after VM updates (Christian König)
+
+Cc: stable@vger.kernel.org
+Bugzilla: https://bugs.freedesktop.org/106418
+Reported-by: mikhail.v.gavrilov@gmail.com
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index abe2bff..da11c95 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -930,6 +930,10 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
+ r = amdgpu_bo_vm_update_pte(p);
+ if (r)
+ return r;
++
++ r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
++ if (r)
++ return r;
+ }
+
+ return amdgpu_cs_sync_rings(p);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4717-drm-amdgpu-Verify-root-PD-is-mapped-into-kernel-addr.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4717-drm-amdgpu-Verify-root-PD-is-mapped-into-kernel-addr.patch
new file mode 100644
index 00000000..cdc18a53
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4717-drm-amdgpu-Verify-root-PD-is-mapped-into-kernel-addr.patch
@@ -0,0 +1,60 @@
+From 73a89d9f5a76214b75d6f2b5aff445c85cf7994a Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Thu, 5 Jul 2018 14:49:34 -0400
+Subject: [PATCH 4717/5725] drm/amdgpu: Verify root PD is mapped into kernel
+ address space (v4)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Problem: When PD/PT update made by CPU root PD was not yet mapped causing
+page fault.
+
+Fix: Verify root PD is mapped into CPU address space.
+
+v2:
+Make sure that we add the root PD to the relocated list
+since then it's get mapped into CPU address space bt default
+in amdgpu_vm_update_directories.
+
+v3:
+Drop change to not move kernel type BOs to evicted list.
+
+v4:
+Remove redundant bo move to relocated list.
+
+Link: https://bugs.freedesktop.org/show_bug.cgi?id=107065
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index fcd52d8..9d4dfb2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -156,6 +156,9 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
+ return;
+ list_add_tail(&base->bo_list, &bo->va);
+
++ if (bo->tbo.type == ttm_bo_type_kernel)
++ list_move(&base->vm_status, &vm->relocated);
++
+ if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
+ return;
+
+@@ -527,7 +530,6 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
+ pt->parent = amdgpu_bo_ref(parent->base.bo);
+
+ amdgpu_vm_bo_base_init(&entry->base, vm, pt);
+- list_move(&entry->base.vm_status, &vm->relocated);
+ }
+
+ if (level < AMDGPU_VM_PTB) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4718-amd-dc-dce100-On-dce100-set-clocks-to-0-on-suspend.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4718-amd-dc-dce100-On-dce100-set-clocks-to-0-on-suspend.patch
new file mode 100644
index 00000000..8700617e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4718-amd-dc-dce100-On-dce100-set-clocks-to-0-on-suspend.patch
@@ -0,0 +1,55 @@
+From 9bbe2abc4cba4cb6ac833f81d3e8b9632b64798c Mon Sep 17 00:00:00 2001
+From: David Francis <David.Francis@amd.com>
+Date: Thu, 12 Jul 2018 10:07:49 -0400
+Subject: [PATCH 4718/5725] amd/dc/dce100: On dce100, set clocks to 0 on
+ suspend
+
+[Why]
+When a dce100 asic was suspended, the clocks were not set to 0.
+Upon resume, the new clock was compared to the existing clock,
+they were found to be the same, and so the clock was not set.
+This resulted in a pernicious blackscreen.
+
+[How]
+In atomic commit, check to see if there are any active pipes.
+If no, set clocks to 0
+
+Signed-off-by: David Francis <David.Francis@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../gpu/drm/amd/display/dc/dce100/dce100_resource.c | 19 ++++++++++++++++---
+ 1 file changed, 16 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+index 38ec0d6..344dd2e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+@@ -678,9 +678,22 @@ bool dce100_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+ {
+- /* TODO implement when needed but for now hardcode max value*/
+- context->bw.dce.dispclk_khz = 681000;
+- context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
++ int i;
++ bool at_least_one_pipe = false;
++
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ if (context->res_ctx.pipe_ctx[i].stream)
++ at_least_one_pipe = true;
++ }
++
++ if (at_least_one_pipe) {
++ /* TODO implement when needed but for now hardcode max value*/
++ context->bw.dce.dispclk_khz = 681000;
++ context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
++ } else {
++ context->bw.dce.dispclk_khz = 0;
++ context->bw.dce.yclk_khz = 0;
++ }
+
+ return true;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4719-drm-amdgpu-pp-smu7-use-a-local-variable-for-toc-inde.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4719-drm-amdgpu-pp-smu7-use-a-local-variable-for-toc-inde.patch
new file mode 100644
index 00000000..5653d36d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4719-drm-amdgpu-pp-smu7-use-a-local-variable-for-toc-inde.patch
@@ -0,0 +1,92 @@
+From 229563b0db31cb197e63197b5324b4c3ff8728ae Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 12 Jul 2018 08:38:09 -0500
+Subject: [PATCH 4719/5725] drm/amdgpu/pp/smu7: use a local variable for toc
+ indexing
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Rather than using the index variable stored in vram. If
+the device fails to come back online after a resume cycle,
+reads from vram will return all 1s which will cause a
+segfault. Based on a patch from Thomas Martitz <kugel@rockbox.org>.
+This avoids the segfault, but we still need to sort out
+why the GPU does not come back online after a resume.
+
+Bug: https://bugs.freedesktop.org/show_bug.cgi?id=105760
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+---
+ drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 23 +++++++++++-----------
+ 1 file changed, 12 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+index 64d33b7..e131add 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+@@ -383,6 +383,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
+ uint32_t fw_to_load;
+ int result = 0;
+ struct SMU_DRAMData_TOC *toc;
++ uint32_t num_entries = 0;
+
+ if (!hwmgr->reload_fw) {
+ pr_info("skip reloading...\n");
+@@ -424,41 +425,41 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
+ }
+
+ toc = (struct SMU_DRAMData_TOC *)smu_data->header;
+- toc->num_entries = 0;
+ toc->structure_version = 1;
+
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+- UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
++ UCODE_ID_RLC_G, &toc->entry[num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+- UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
++ UCODE_ID_CP_CE, &toc->entry[num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+- UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
++ UCODE_ID_CP_PFP, &toc->entry[num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+- UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
++ UCODE_ID_CP_ME, &toc->entry[num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+- UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
++ UCODE_ID_CP_MEC, &toc->entry[num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+- UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
++ UCODE_ID_CP_MEC_JT1, &toc->entry[num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+- UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
++ UCODE_ID_CP_MEC_JT2, &toc->entry[num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+- UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
++ UCODE_ID_SDMA0, &toc->entry[num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+- UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
++ UCODE_ID_SDMA1, &toc->entry[num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+ if (!hwmgr->not_vf)
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+- UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
++ UCODE_ID_MEC_STORAGE, &toc->entry[num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+
++ toc->num_entries = num_entries;
+ smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr));
+ smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr));
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4720-drm-amd-display-Fix-DP-HBR2-Eye-Diagram-Pattern-on-C.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4720-drm-amd-display-Fix-DP-HBR2-Eye-Diagram-Pattern-on-C.patch
new file mode 100644
index 00000000..acdefaac
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4720-drm-amd-display-Fix-DP-HBR2-Eye-Diagram-Pattern-on-C.patch
@@ -0,0 +1,85 @@
+From e511ec9c7fcc6971a09ad67cec72840270b59585 Mon Sep 17 00:00:00 2001
+From: Hersen Wu <hersenxs.wu@amd.com>
+Date: Mon, 16 Jul 2018 11:21:12 -0400
+Subject: [PATCH 4720/5725] drm/amd/display: Fix DP HBR2 Eye Diagram Pattern on
+ Carrizo
+
+[why] dp hbr2 eye diagram pattern for raven asic is not stabled.
+workaround is to use tp4 pattern. But this should not be
+applied to asic before raven.
+
+[how] add new bool varilable in asic caps. for raven asic,
+use the workaround. for carrizo, vega, do not use workaround.
+
+Signed-off-by: Hersen Wu <hersenxs.wu@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 8 +++-----
+ drivers/gpu/drm/amd/display/dc/dc.h | 1 +
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 2 ++
+ 3 files changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index 72a8a55..d14b543 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -1771,12 +1771,10 @@ static void dp_test_send_link_training(struct dc_link *link)
+ dp_retrain_link_dp_test(link, &link_settings, false);
+ }
+
+-/* TODO hbr2 compliance eye output is unstable
++/* TODO Raven hbr2 compliance eye output is unstable
+ * (toggling on and off) with debugger break
+ * This caueses intermittent PHY automation failure
+ * Need to look into the root cause */
+-static uint8_t force_tps4_for_cp2520 = 1;
+-
+ static void dp_test_send_phy_test_pattern(struct dc_link *link)
+ {
+ union phy_test_pattern dpcd_test_pattern;
+@@ -1836,13 +1834,13 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
+ break;
+ case PHY_TEST_PATTERN_CP2520_1:
+ /* CP2520 pattern is unstable, temporarily use TPS4 instead */
+- test_pattern = (force_tps4_for_cp2520 == 1) ?
++ test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ?
+ DP_TEST_PATTERN_TRAINING_PATTERN4 :
+ DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
+ break;
+ case PHY_TEST_PATTERN_CP2520_2:
+ /* CP2520 pattern is unstable, temporarily use TPS4 instead */
+- test_pattern = (force_tps4_for_cp2520 == 1) ?
++ test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ?
+ DP_TEST_PATTERN_TRAINING_PATTERN4 :
+ DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
+ break;
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 23a5045..096098e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -77,6 +77,7 @@ struct dc_caps {
+ bool is_apu;
+ bool dual_link_dvi;
+ bool post_blend_color_processing;
++ bool force_dp_tps4_for_cp2520;
+ };
+
+ struct dc_dcc_surface_param {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index 99c223b..a2318ca 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -1028,6 +1028,8 @@ static bool construct(
+ dc->caps.max_slave_planes = 1;
+ dc->caps.is_apu = true;
+ dc->caps.post_blend_color_processing = false;
++ /* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */
++ dc->caps.force_dp_tps4_for_cp2520 = true;
+
+ if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
+ dc->debug = debug_defaults_drv;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4721-drm-amdgpu-allocate-shared-fence-slot-in-VA-IOCTL.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4721-drm-amdgpu-allocate-shared-fence-slot-in-VA-IOCTL.patch
new file mode 100644
index 00000000..09ed705c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4721-drm-amdgpu-allocate-shared-fence-slot-in-VA-IOCTL.patch
@@ -0,0 +1,35 @@
+From c35f262eb0648fe6f39eabc6ec487e33ace4a381 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 11 Jun 2018 15:10:02 +0200
+Subject: [PATCH 4721/5725] drm/amdgpu: allocate shared fence slot in VA IOCTL
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Per VM BOs share the reservation object with the PD and so need to
+reserve a shared fence slot for the update.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+index a1d52c4..330c07a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+@@ -760,7 +760,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
+ return -ENOENT;
+ abo = gem_to_amdgpu_bo(gobj);
+ tv.bo = &abo->tbo;
+- tv.shared = false;
++ tv.shared = !!(abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID);
+ list_add(&tv.head, &list);
+ } else {
+ gobj = NULL;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4722-drm-amd-pp-Make-sure-clock_voltage_limit_table-on-dc.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4722-drm-amd-pp-Make-sure-clock_voltage_limit_table-on-dc.patch
new file mode 100644
index 00000000..96fe49b4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4722-drm-amd-pp-Make-sure-clock_voltage_limit_table-on-dc.patch
@@ -0,0 +1,35 @@
+From 60a92e96817e7d96956110b61bf49c5d9b3e8802 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Wed, 13 Jun 2018 18:53:49 +0800
+Subject: [PATCH 4722/5725] drm/amd/pp: Make sure clock_voltage_limit_table on
+ dc is valid
+
+if vbios not set the max clock voltage limit table for DC mode,
+Set the table as sama as the table for AC mode.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+index e63bc47..4ef77ce 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+@@ -236,6 +236,11 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
+ ret = hwmgr->hwmgr_func->backend_init(hwmgr);
+ if (ret)
+ goto err1;
++ /* make sure dc limits are valid */
++ if ((hwmgr->dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
++ (hwmgr->dyn_state.max_clock_voltage_on_dc.mclk == 0))
++ hwmgr->dyn_state.max_clock_voltage_on_dc =
++ hwmgr->dyn_state.max_clock_voltage_on_ac;
+
+ ret = psm_init_power_state_table(hwmgr);
+ if (ret)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4723-drm-amdgpu-Fix-uvd-firmware-version-information-for-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4723-drm-amdgpu-Fix-uvd-firmware-version-information-for-.patch
new file mode 100644
index 00000000..ba9bc0fa
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4723-drm-amdgpu-Fix-uvd-firmware-version-information-for-.patch
@@ -0,0 +1,96 @@
+From 274d47138d7a15c426abfb7b92158c6ee647b279 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 14 Jun 2018 10:24:06 -0500
+Subject: [PATCH 4723/5725] drm/amdgpu: Fix uvd firmware version information
+ for vega20 (v2)
+
+The uvd version information was not set correctly for vega20.
+Rearrange the logic to set it correctly and fix the warnings
+as a result.
+
+v2: fix version formatting for userspace based on feedback from Leo
+
+Fixes: 96ca7f298f (drm/amdgpu/vg20:support new UVD FW version naming convention)
+Reviewed-by: Leo Liu <leo.liu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 42 +++++++++++++++++++--------------
+ 1 file changed, 24 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index cb4efa8..212fec7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -127,7 +127,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ unsigned long bo_size;
+ const char *fw_name;
+ const struct common_firmware_header *hdr;
+- unsigned version_major, version_minor, family_id;
++ unsigned family_id;
+ int i, j, r;
+
+ INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
+@@ -210,10 +210,31 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
+
+ if (adev->asic_type < CHIP_VEGA20) {
++ unsigned version_major, version_minor;
++
+ version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
+ version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+ DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
+ version_major, version_minor, family_id);
++
++ /*
++ * Limit the number of UVD handles depending on microcode major
++ * and minor versions. The firmware version which has 40 UVD
++ * instances support is 1.80. So all subsequent versions should
++ * also have the same support.
++ */
++ if ((version_major > 0x01) ||
++ ((version_major == 0x01) && (version_minor >= 0x50)))
++ adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
++
++ adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
++ (family_id << 8));
++
++ if ((adev->asic_type == CHIP_POLARIS10 ||
++ adev->asic_type == CHIP_POLARIS11) &&
++ (adev->uvd.fw_version < FW_1_66_16))
++ DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
++ version_major, version_minor);
+ } else {
+ unsigned int enc_major, enc_minor, dec_minor;
+
+@@ -222,26 +243,11 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ enc_major = (le32_to_cpu(hdr->ucode_version) >> 30) & 0x3;
+ DRM_INFO("Found UVD firmware ENC: %hu.%hu DEC: .%hu Family ID: %hu\n",
+ enc_major, enc_minor, dec_minor, family_id);
+- }
+
+- /*
+- * Limit the number of UVD handles depending on microcode major
+- * and minor versions. The firmware version which has 40 UVD
+- * instances support is 1.80. So all subsequent versions should
+- * also have the same support.
+- */
+- if (adev->asic_type >= CHIP_VEGA20 || (version_major > 0x01) ||
+- ((version_major == 0x01) && (version_minor >= 0x50)))
+ adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
+
+- adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
+- (family_id << 8));
+-
+- if ((adev->asic_type == CHIP_POLARIS10 ||
+- adev->asic_type == CHIP_POLARIS11) &&
+- (adev->uvd.fw_version < FW_1_66_16))
+- DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
+- version_major, version_minor);
++ adev->uvd.fw_version = le32_to_cpu(hdr->ucode_version);
++ }
+
+ bo_size = AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
+ + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4724-drm-amd-display-fix-type-of-variable.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4724-drm-amd-display-fix-type-of-variable.patch
new file mode 100644
index 00000000..891074c2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4724-drm-amd-display-fix-type-of-variable.patch
@@ -0,0 +1,38 @@
+From 8933217efba2a57654d8f2d3763f4fc7884f3e4d Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Fri, 15 Jun 2018 08:32:28 -0500
+Subject: [PATCH 4724/5725] drm/amd/display: fix type of variable
+
+Currently, the maximum value that *counter* can reach is 255, and
+code at line 150: while (counter < 1000) { implies a bigger value
+could be expected.
+
+Fix this by changing the type of variable *counter* from uint8_t
+to uint16_t.
+
+Addresses-Coverity-ID: 1470030 ("Operands don't affect result")
+Fixes: 2b6199a1d1b7 ("drm/amd/display: replace msleep with udelay in fbc path")
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+index a79fc0b..df02701 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+@@ -143,7 +143,7 @@ static void wait_for_fbc_state_changed(
+ struct dce110_compressor *cp110,
+ bool enabled)
+ {
+- uint8_t counter = 0;
++ uint16_t counter = 0;
+ uint32_t addr = mmFBC_STATUS;
+ uint32_t value;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4725-drm-amdgpu-Fix-ups-for-amdgpu_object.c-documentation.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4725-drm-amdgpu-Fix-ups-for-amdgpu_object.c-documentation.patch
new file mode 100644
index 00000000..f084e6bb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4725-drm-amdgpu-Fix-ups-for-amdgpu_object.c-documentation.patch
@@ -0,0 +1,261 @@
+From 51392c1794ba799b582d8240d1c8a4ae0b3028f3 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
+Date: Fri, 1 Jun 2018 12:29:45 +0200
+Subject: [PATCH 4725/5725] drm/amdgpu: Fix-ups for amdgpu_object.c
+ documentation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+* Fix format of return value descriptions
+* Document all parameters of amdgpu_bo_free_kernel
+* Document amdgpu_bo_get_preferred_pin_domain
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 70 ++++++++++++++++++++----------
+ 1 file changed, 47 insertions(+), 23 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index e85c07b..31f8de1 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -96,7 +96,8 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+ * Uses destroy function associated with the object to determine if this is
+ * an &amdgpu_bo.
+ *
+- * Returns true if the object belongs to &amdgpu_bo, false if not.
++ * Returns:
++ * true if the object belongs to &amdgpu_bo, false if not.
+ */
+ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
+ {
+@@ -237,7 +238,8 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
+ *
+ * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
+ *
+- * Returns 0 on success, negative error code otherwise.
++ * Returns:
++ * 0 on success, negative error code otherwise.
+ */
+ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
+ unsigned long size, int align,
+@@ -314,7 +316,8 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
+ *
+ * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
+ *
+- * Returns 0 on success, negative error code otherwise.
++ * Returns:
++ * 0 on success, negative error code otherwise.
+ */
+ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
+ unsigned long size, int align,
+@@ -338,6 +341,8 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
+ * amdgpu_bo_free_kernel - free BO for kernel use
+ *
+ * @bo: amdgpu BO to free
++ * @gpu_addr: pointer to where the BO's GPU memory space address was stored
++ * @cpu_addr: pointer to where the BO's CPU memory space address was stored
+ *
+ * unmaps and unpin a BO for kernel internal use.
+ */
+@@ -579,7 +584,8 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
+ * Shadow object is used to backup the original buffer object, and is always
+ * in GTT.
+ *
+- * Returns 0 for success or a negative error code on failure.
++ * Returns:
++ * 0 for success or a negative error code on failure.
+ */
+ int amdgpu_bo_create(struct amdgpu_device *adev,
+ struct amdgpu_bo_param *bp,
+@@ -622,7 +628,8 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
+ * Copies an &amdgpu_bo buffer object to its shadow object.
+ * Not used for now.
+ *
+- * Returns 0 for success or a negative error code on failure.
++ * Returns:
++ * 0 for success or a negative error code on failure.
+ */
+ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+@@ -665,7 +672,8 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
+ * This is used for validating shadow bos. It calls ttm_bo_validate() to
+ * make sure the buffer is resident where it needs to be.
+ *
+- * Returns 0 for success or a negative error code on failure.
++ * Returns:
++ * 0 for success or a negative error code on failure.
+ */
+ int amdgpu_bo_validate(struct amdgpu_bo *bo)
+ {
+@@ -702,7 +710,8 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
+ * This is used for recovering a buffer from its shadow in case of a gpu
+ * reset where vram context may be lost.
+ *
+- * Returns 0 for success or a negative error code on failure.
++ * Returns:
++ * 0 for success or a negative error code on failure.
+ */
+ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+@@ -744,7 +753,8 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
+ * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
+ * amdgpu_bo_kptr() to get the kernel virtual address.
+ *
+- * Returns 0 for success or a negative error code on failure.
++ * Returns:
++ * 0 for success or a negative error code on failure.
+ */
+ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
+ {
+@@ -782,7 +792,8 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
+ *
+ * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
+ *
+- * Returns the virtual address of a buffer object area.
++ * Returns:
++ * the virtual address of a buffer object area.
+ */
+ void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
+ {
+@@ -809,7 +820,8 @@ void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
+ *
+ * References the contained &ttm_buffer_object.
+ *
+- * Returns a refcounted pointer to the &amdgpu_bo buffer object.
++ * Returns:
++ * a refcounted pointer to the &amdgpu_bo buffer object.
+ */
+ struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
+ {
+@@ -859,7 +871,8 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo)
+ * where to pin a buffer if there are specific restrictions on where a buffer
+ * must be located.
+ *
+- * Returns 0 for success or a negative error code on failure.
++ * Returns:
++ * 0 for success or a negative error code on failure.
+ */
+ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
+ u64 min_offset, u64 max_offset,
+@@ -964,7 +977,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
+ * Provides a simpler API for buffers that do not have any strict restrictions
+ * on where a buffer must be located.
+ *
+- * Returns 0 for success or a negative error code on failure.
++ * Returns:
++ * 0 for success or a negative error code on failure.
+ */
+ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
+ {
+@@ -978,7 +992,8 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
+ * Decreases the pin_count, and clears the flags if pin_count reaches 0.
+ * Changes placement and pin size accordingly.
+ *
+- * Returns 0 for success or a negative error code on failure.
++ * Returns:
++ * 0 for success or a negative error code on failure.
+ */
+ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
+ {
+@@ -1019,7 +1034,8 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
+ * Evicts all VRAM buffers on the lru list of the memory type.
+ * Mainly used for evicting vram at suspend time.
+ *
+- * Returns 0 for success or a negative error code on failure.
++ * Returns:
++ * 0 for success or a negative error code on failure.
+ */
+ int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
+ {
+@@ -1049,7 +1065,8 @@ static const char *amdgpu_vram_names[] = {
+ *
+ * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
+ *
+- * Returns 0 for success or a negative error code on failure.
++ * Returns:
++ * 0 for success or a negative error code on failure.
+ */
+ int amdgpu_bo_init(struct amdgpu_device *adev)
+ {
+@@ -1075,7 +1092,8 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
+ * Calls amdgpu_ttm_late_init() to free resources used earlier during
+ * initialization.
+ *
+- * Returns 0 for success or a negative error code on failure.
++ * Returns:
++ * 0 for success or a negative error code on failure.
+ */
+ int amdgpu_bo_late_init(struct amdgpu_device *adev)
+ {
+@@ -1104,7 +1122,8 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
+ *
+ * Calls ttm_fbdev_mmap() to mmap fbdev memory if it is backed by a bo.
+ *
+- * Returns 0 for success or a negative error code on failure.
++ * Returns:
++ * 0 for success or a negative error code on failure.
+ */
+ int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
+ struct vm_area_struct *vma)
+@@ -1120,7 +1139,8 @@ int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
+ * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
+ * kernel driver to set the tiling flags on a buffer.
+ *
+- * Returns 0 for success or a negative error code on failure.
++ * Returns:
++ * 0 for success or a negative error code on failure.
+ */
+ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
+ {
+@@ -1160,7 +1180,8 @@ void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
+ * Sets buffer object's metadata, its size and flags.
+ * Used via GEM ioctl.
+ *
+- * Returns 0 for success or a negative error code on failure.
++ * Returns:
++ * 0 for success or a negative error code on failure.
+ */
+ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
+ uint32_t metadata_size, uint64_t flags)
+@@ -1203,7 +1224,8 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
+ * less than metadata_size.
+ * Used via GEM ioctl.
+ *
+- * Returns 0 for success or a negative error code on failure.
++ * Returns:
++ * 0 for success or a negative error code on failure.
+ */
+ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
+ size_t buffer_size, uint32_t *metadata_size,
+@@ -1274,7 +1296,8 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
+ * also performs bookkeeping.
+ * TTM driver callback for dealing with vm faults.
+ *
+- * Returns 0 for success or a negative error code on failure.
++ * Returns:
++ * 0 for success or a negative error code on failure.
+ */
+ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+ {
+@@ -1349,10 +1372,11 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
+ * amdgpu_bo_gpu_offset - return GPU offset of bo
+ * @bo: amdgpu object for which we query the offset
+ *
+- * Returns current GPU offset of the object.
+- *
+ * Note: object should either be pinned or reserved when calling this
+ * function, it might be useful to add check for this for debugging.
++ *
++ * Returns:
++ * current GPU offset of the object.
+ */
+ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
+ {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4726-drm-amd-pp-Remove-SAMU-support-in-powerplay.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4726-drm-amd-pp-Remove-SAMU-support-in-powerplay.patch
new file mode 100644
index 00000000..f562c15a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4726-drm-amd-pp-Remove-SAMU-support-in-powerplay.patch
@@ -0,0 +1,755 @@
+From 5a052f04418b4c383651d42fe443050866cecc02 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Mon, 4 Jun 2018 13:33:14 +0800
+Subject: [PATCH 4726/5725] drm/amd/pp: Remove SAMU support in powerplay
+
+As the SAMU ip was not supported in linux,
+so delete the SAMU support in powerplay on
+asics Bonarire/Hawwii/Tonga/Fiji/Polaris/vegam.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../amd/powerplay/hwmgr/smu7_clockpowergating.c | 54 --------------
+ .../amd/powerplay/hwmgr/smu7_clockpowergating.h | 1 -
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 1 -
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h | 1 -
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h | 1 -
+ drivers/gpu/drm/amd/powerplay/inc/smumgr.h | 2 -
+ drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c | 35 ---------
+ drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c | 74 -------------------
+ .../gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c | 10 ---
+ .../drm/amd/powerplay/smumgr/polaris10_smumgr.c | 86 ----------------------
+ .../gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c | 80 --------------------
+ .../gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c | 85 ---------------------
+ 12 files changed, 430 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+index 5e3c264..a77cced 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+@@ -39,13 +39,6 @@ static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
+ PPSMC_MSG_VCEDPM_Disable);
+ }
+
+-static int smu7_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
+-{
+- return smum_send_msg_to_smc(hwmgr, enable ?
+- PPSMC_MSG_SAMUDPM_Enable :
+- PPSMC_MSG_SAMUDPM_Disable);
+-}
+-
+ static int smu7_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
+ {
+ if (!bgate)
+@@ -60,13 +53,6 @@ static int smu7_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate)
+ return smu7_enable_disable_vce_dpm(hwmgr, !bgate);
+ }
+
+-static int smu7_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
+-{
+- if (!bgate)
+- smum_update_smc_table(hwmgr, SMU_SAMU_TABLE);
+- return smu7_enable_disable_samu_dpm(hwmgr, !bgate);
+-}
+-
+ int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
+ {
+ if (phm_cf_want_uvd_power_gating(hwmgr))
+@@ -107,35 +93,15 @@ static int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
+ return 0;
+ }
+
+-static int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
+-{
+- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+- PHM_PlatformCaps_SamuPowerGating))
+- return smum_send_msg_to_smc(hwmgr,
+- PPSMC_MSG_SAMPowerOFF);
+- return 0;
+-}
+-
+-static int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
+-{
+- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+- PHM_PlatformCaps_SamuPowerGating))
+- return smum_send_msg_to_smc(hwmgr,
+- PPSMC_MSG_SAMPowerON);
+- return 0;
+-}
+-
+ int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
+ {
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ data->uvd_power_gated = false;
+ data->vce_power_gated = false;
+- data->samu_power_gated = false;
+
+ smu7_powerup_uvd(hwmgr);
+ smu7_powerup_vce(hwmgr);
+- smu7_powerup_samu(hwmgr);
+
+ return 0;
+ }
+@@ -196,26 +162,6 @@ void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
+ }
+ }
+
+-int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
+-{
+- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+-
+- if (data->samu_power_gated == bgate)
+- return 0;
+-
+- data->samu_power_gated = bgate;
+-
+- if (bgate) {
+- smu7_update_samu_dpm(hwmgr, true);
+- smu7_powerdown_samu(hwmgr);
+- } else {
+- smu7_powerup_samu(hwmgr);
+- smu7_update_samu_dpm(hwmgr, false);
+- }
+-
+- return 0;
+-}
+-
+ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
+ const uint32_t *msg_id)
+ {
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
+index 1ddce02..be7f66d 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
+@@ -29,7 +29,6 @@
+ void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
+ void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
+ int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr);
+-int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
+ int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
+ int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
+ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index 4f1e7d6..d464deb 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -4300,7 +4300,6 @@ static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
+
+ data->uvd_power_gated = false;
+ data->vce_power_gated = false;
+- data->samu_power_gated = false;
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
+index c91e75d..3784ce6 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
+@@ -310,7 +310,6 @@ struct smu7_hwmgr {
+ /* ---- Power Gating States ---- */
+ bool uvd_power_gated;
+ bool vce_power_gated;
+- bool samu_power_gated;
+ bool need_long_memory_training;
+
+ /* Application power optimization parameters */
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
+index aadd6cb..339820d 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
+@@ -370,7 +370,6 @@ struct vega10_hwmgr {
+ /* ---- Power Gating States ---- */
+ bool uvd_power_gated;
+ bool vce_power_gated;
+- bool samu_power_gated;
+ bool need_long_memory_training;
+
+ /* Internal settings to apply the application power optimization parameters */
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+index 6c22ed9..89dfbf5 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+@@ -29,7 +29,6 @@
+ enum SMU_TABLE {
+ SMU_UVD_TABLE = 0,
+ SMU_VCE_TABLE,
+- SMU_SAMU_TABLE,
+ SMU_BIF_TABLE,
+ };
+
+@@ -47,7 +46,6 @@ enum SMU_MEMBER {
+ UcodeLoadStatus,
+ UvdBootLevel,
+ VceBootLevel,
+- SamuBootLevel,
+ LowSclkInterruptThreshold,
+ DRAM_LOG_ADDR_H,
+ DRAM_LOG_ADDR_L,
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+index 2d4ec8a..8cd21ac 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+@@ -1614,37 +1614,6 @@ static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+ return result;
+ }
+
+-static int ci_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+- SMU7_Discrete_DpmTable *table)
+-{
+- int result = -EINVAL;
+- uint8_t count;
+- struct pp_atomctrl_clock_dividers_vi dividers;
+- struct phm_samu_clock_voltage_dependency_table *samu_table =
+- hwmgr->dyn_state.samu_clock_voltage_dependency_table;
+-
+- table->SamuBootLevel = 0;
+- table->SamuLevelCount = (uint8_t)(samu_table->count);
+-
+- for (count = 0; count < table->SamuLevelCount; count++) {
+- table->SamuLevel[count].Frequency = samu_table->entries[count].samclk;
+- table->SamuLevel[count].MinVoltage = samu_table->entries[count].v * VOLTAGE_SCALE;
+- table->SamuLevel[count].MinPhases = 1;
+-
+- /* retrieve divider value for VBIOS */
+- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+- table->SamuLevel[count].Frequency, &dividers);
+- PP_ASSERT_WITH_CODE((0 == result),
+- "can not find divide id for samu clock", return result);
+-
+- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+-
+- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+- CONVERT_FROM_HOST_TO_SMC_US(table->SamuLevel[count].MinVoltage);
+- }
+- return result;
+-}
+-
+ static int ci_populate_memory_timing_parameters(
+ struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock,
+@@ -2026,10 +1995,6 @@ static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACP Level!", return result);
+
+- result = ci_populate_smc_samu_level(hwmgr, table);
+- PP_ASSERT_WITH_CODE(0 == result,
+- "Failed to initialize SAMU Level!", return result);
+-
+ /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
+ /* need to populate the ARB settings for the initial state. */
+ result = ci_program_memory_timing_parameters(hwmgr);
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+index 53df940..18048f8 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+@@ -1503,44 +1503,6 @@ static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+ return result;
+ }
+
+-static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+- SMU73_Discrete_DpmTable *table)
+-{
+- int result = -EINVAL;
+- uint8_t count;
+- struct pp_atomctrl_clock_dividers_vi dividers;
+- struct phm_ppt_v1_information *table_info =
+- (struct phm_ppt_v1_information *)(hwmgr->pptable);
+- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+- table_info->mm_dep_table;
+-
+- table->SamuBootLevel = 0;
+- table->SamuLevelCount = (uint8_t)(mm_table->count);
+-
+- for (count = 0; count < table->SamuLevelCount; count++) {
+- /* not sure whether we need evclk or not */
+- table->SamuLevel[count].MinVoltage = 0;
+- table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
+- table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+- VOLTAGE_SCALE) << VDDC_SHIFT;
+- table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+- VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+- table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+-
+- /* retrieve divider value for VBIOS */
+- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+- table->SamuLevel[count].Frequency, &dividers);
+- PP_ASSERT_WITH_CODE((0 == result),
+- "can not find divide id for samu clock", return result);
+-
+- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+-
+- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
+- }
+- return result;
+-}
+-
+ static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
+ int32_t eng_clock, int32_t mem_clock,
+ struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
+@@ -2028,10 +1990,6 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACP Level!", return result);
+
+- result = fiji_populate_smc_samu_level(hwmgr, table);
+- PP_ASSERT_WITH_CODE(0 == result,
+- "Failed to initialize SAMU Level!", return result);
+-
+ /* Since only the initial state is completely set up at this point
+ * (the other states are just copies of the boot state) we only
+ * need to populate the ARB settings for the initial state.
+@@ -2378,8 +2336,6 @@ static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
+ return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
+ case VceBootLevel:
+ return offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
+- case SamuBootLevel:
+- return offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
+@@ -2478,33 +2434,6 @@ static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+ return 0;
+ }
+
+-static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+-{
+- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+- uint32_t mm_boot_level_offset, mm_boot_level_value;
+-
+-
+- smu_data->smc_state_table.SamuBootLevel = 0;
+- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+- offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
+-
+- mm_boot_level_offset /= 4;
+- mm_boot_level_offset *= 4;
+- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+- CGS_IND_REG__SMC, mm_boot_level_offset);
+- mm_boot_level_value &= 0xFFFFFF00;
+- mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+- cgs_write_ind_register(hwmgr->device,
+- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+-
+- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+- PHM_PlatformCaps_StablePState))
+- smum_send_msg_to_smc_with_parameter(hwmgr,
+- PPSMC_MSG_SAMUDPM_SetEnabledMask,
+- (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+- return 0;
+-}
+-
+ static int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+ {
+ switch (type) {
+@@ -2514,9 +2443,6 @@ static int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+ case SMU_VCE_TABLE:
+ fiji_update_vce_smc_table(hwmgr);
+ break;
+- case SMU_SAMU_TABLE:
+- fiji_update_samu_smc_table(hwmgr);
+- break;
+ default:
+ break;
+ }
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+index 415f691..9299b93 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+@@ -1578,12 +1578,6 @@ static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+ return 0;
+ }
+
+-static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+- SMU71_Discrete_DpmTable *table)
+-{
+- return 0;
+-}
+-
+ static int iceland_populate_memory_timing_parameters(
+ struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock,
+@@ -1992,10 +1986,6 @@ static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACP Level!", return result;);
+
+- result = iceland_populate_smc_samu_level(hwmgr, table);
+- PP_ASSERT_WITH_CODE(0 == result,
+- "Failed to initialize SAMU Level!", return result;);
+-
+ /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
+ /* need to populate the ARB settings for the initial state. */
+ result = iceland_program_memory_timing_parameters(hwmgr);
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+index a8c6524..a4ce199 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+@@ -1337,55 +1337,6 @@ static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ return result;
+ }
+
+-
+-static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+- SMU74_Discrete_DpmTable *table)
+-{
+- int result = -EINVAL;
+- uint8_t count;
+- struct pp_atomctrl_clock_dividers_vi dividers;
+- struct phm_ppt_v1_information *table_info =
+- (struct phm_ppt_v1_information *)(hwmgr->pptable);
+- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+- table_info->mm_dep_table;
+- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+- uint32_t vddci;
+-
+- table->SamuBootLevel = 0;
+- table->SamuLevelCount = (uint8_t)(mm_table->count);
+-
+- for (count = 0; count < table->SamuLevelCount; count++) {
+- /* not sure whether we need evclk or not */
+- table->SamuLevel[count].MinVoltage = 0;
+- table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
+- table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+- VOLTAGE_SCALE) << VDDC_SHIFT;
+-
+- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+- vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+- else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+- vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+- else
+- vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+-
+- table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+- table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+-
+- /* retrieve divider value for VBIOS */
+- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+- table->SamuLevel[count].Frequency, &dividers);
+- PP_ASSERT_WITH_CODE((0 == result),
+- "can not find divide id for samu clock", return result);
+-
+- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+-
+- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
+- }
+- return result;
+-}
+-
+ static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
+ int32_t eng_clock, int32_t mem_clock,
+ SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
+@@ -1865,10 +1816,6 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize VCE Level!", return result);
+
+- result = polaris10_populate_smc_samu_level(hwmgr, table);
+- PP_ASSERT_WITH_CODE(0 == result,
+- "Failed to initialize SAMU Level!", return result);
+-
+ /* Since only the initial state is completely set up at this point
+ * (the other states are just copies of the boot state) we only
+ * need to populate the ARB settings for the initial state.
+@@ -2222,34 +2169,6 @@ static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+ return 0;
+ }
+
+-static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+-{
+- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+- uint32_t mm_boot_level_offset, mm_boot_level_value;
+-
+-
+- smu_data->smc_state_table.SamuBootLevel = 0;
+- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+- offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
+-
+- mm_boot_level_offset /= 4;
+- mm_boot_level_offset *= 4;
+- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+- CGS_IND_REG__SMC, mm_boot_level_offset);
+- mm_boot_level_value &= 0xFFFFFF00;
+- mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+- cgs_write_ind_register(hwmgr->device,
+- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+-
+- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+- PHM_PlatformCaps_StablePState))
+- smum_send_msg_to_smc_with_parameter(hwmgr,
+- PPSMC_MSG_SAMUDPM_SetEnabledMask,
+- (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+- return 0;
+-}
+-
+-
+ static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr)
+ {
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+@@ -2276,9 +2195,6 @@ static int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+ case SMU_VCE_TABLE:
+ polaris10_update_vce_smc_table(hwmgr);
+ break;
+- case SMU_SAMU_TABLE:
+- polaris10_update_samu_smc_table(hwmgr);
+- break;
+ case SMU_BIF_TABLE:
+ polaris10_update_bif_smc_table(hwmgr);
+ default:
+@@ -2357,8 +2273,6 @@ static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
+ return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
+ case VceBootLevel:
+ return offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
+- case SamuBootLevel:
+- return offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+index 782b19f..7dabc6c 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+@@ -1443,51 +1443,6 @@ static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+ return result;
+ }
+
+-static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+- SMU72_Discrete_DpmTable *table)
+-{
+- int result = 0;
+- uint8_t count;
+- pp_atomctrl_clock_dividers_vi dividers;
+- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+- struct phm_ppt_v1_information *pptable_info =
+- (struct phm_ppt_v1_information *)(hwmgr->pptable);
+- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+- pptable_info->mm_dep_table;
+-
+- table->SamuBootLevel = 0;
+- table->SamuLevelCount = (uint8_t) (mm_table->count);
+-
+- for (count = 0; count < table->SamuLevelCount; count++) {
+- /* not sure whether we need evclk or not */
+- table->SamuLevel[count].Frequency =
+- pptable_info->mm_dep_table->entries[count].samclock;
+- table->SamuLevel[count].MinVoltage.Vddc =
+- phm_get_voltage_index(pptable_info->vddc_lookup_table,
+- mm_table->entries[count].vddc);
+- table->SamuLevel[count].MinVoltage.VddGfx =
+- (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+- phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+- mm_table->entries[count].vddgfx) : 0;
+- table->SamuLevel[count].MinVoltage.Vddci =
+- phm_get_voltage_id(&data->vddci_voltage_table,
+- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+- table->SamuLevel[count].MinVoltage.Phases = 1;
+-
+- /* retrieve divider value for VBIOS */
+- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+- table->SamuLevel[count].Frequency, &dividers);
+- PP_ASSERT_WITH_CODE((!result),
+- "can not find divide id for samu clock", return result);
+-
+- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+-
+- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+- }
+-
+- return result;
+-}
+-
+ static int tonga_populate_memory_timing_parameters(
+ struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock,
+@@ -2323,10 +2278,6 @@ static int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize ACP Level !", return result);
+
+- result = tonga_populate_smc_samu_level(hwmgr, table);
+- PP_ASSERT_WITH_CODE(!result,
+- "Failed to initialize SAMU Level !", return result);
+-
+ /* Since only the initial state is completely set up at this
+ * point (the other states are just copies of the boot state) we only
+ * need to populate the ARB settings for the initial state.
+@@ -2673,8 +2624,6 @@ static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
+ return offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
+ case VceBootLevel:
+ return offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
+- case SamuBootLevel:
+- return offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
+@@ -2773,32 +2722,6 @@ static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+ return 0;
+ }
+
+-static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+-{
+- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+- uint32_t mm_boot_level_offset, mm_boot_level_value;
+-
+- smu_data->smc_state_table.SamuBootLevel = 0;
+- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+- offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
+-
+- mm_boot_level_offset /= 4;
+- mm_boot_level_offset *= 4;
+- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+- CGS_IND_REG__SMC, mm_boot_level_offset);
+- mm_boot_level_value &= 0xFFFFFF00;
+- mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+- cgs_write_ind_register(hwmgr->device,
+- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+-
+- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+- PHM_PlatformCaps_StablePState))
+- smum_send_msg_to_smc_with_parameter(hwmgr,
+- PPSMC_MSG_SAMUDPM_SetEnabledMask,
+- (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+- return 0;
+-}
+-
+ static int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+ {
+ switch (type) {
+@@ -2808,9 +2731,6 @@ static int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+ case SMU_VCE_TABLE:
+ tonga_update_vce_smc_table(hwmgr);
+ break;
+- case SMU_SAMU_TABLE:
+- tonga_update_samu_smc_table(hwmgr);
+- break;
+ default:
+ break;
+ }
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+index 2de4895..57420d7 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+@@ -393,34 +393,6 @@ static int vegam_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+ return 0;
+ }
+
+-static int vegam_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+-{
+- struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+- uint32_t mm_boot_level_offset, mm_boot_level_value;
+-
+-
+- smu_data->smc_state_table.SamuBootLevel = 0;
+- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+- offsetof(SMU75_Discrete_DpmTable, SamuBootLevel);
+-
+- mm_boot_level_offset /= 4;
+- mm_boot_level_offset *= 4;
+- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+- CGS_IND_REG__SMC, mm_boot_level_offset);
+- mm_boot_level_value &= 0xFFFFFF00;
+- mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+- cgs_write_ind_register(hwmgr->device,
+- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+-
+- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+- PHM_PlatformCaps_StablePState))
+- smum_send_msg_to_smc_with_parameter(hwmgr,
+- PPSMC_MSG_SAMUDPM_SetEnabledMask,
+- (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+- return 0;
+-}
+-
+-
+ static int vegam_update_bif_smc_table(struct pp_hwmgr *hwmgr)
+ {
+ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+@@ -447,9 +419,6 @@ static int vegam_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+ case SMU_VCE_TABLE:
+ vegam_update_vce_smc_table(hwmgr);
+ break;
+- case SMU_SAMU_TABLE:
+- vegam_update_samu_smc_table(hwmgr);
+- break;
+ case SMU_BIF_TABLE:
+ vegam_update_bif_smc_table(hwmgr);
+ break;
+@@ -1281,54 +1250,6 @@ static int vegam_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ return result;
+ }
+
+-static int vegam_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+- SMU75_Discrete_DpmTable *table)
+-{
+- int result = -EINVAL;
+- uint8_t count;
+- struct pp_atomctrl_clock_dividers_vi dividers;
+- struct phm_ppt_v1_information *table_info =
+- (struct phm_ppt_v1_information *)(hwmgr->pptable);
+- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+- table_info->mm_dep_table;
+- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+- uint32_t vddci;
+-
+- table->SamuBootLevel = 0;
+- table->SamuLevelCount = (uint8_t)(mm_table->count);
+-
+- for (count = 0; count < table->SamuLevelCount; count++) {
+- /* not sure whether we need evclk or not */
+- table->SamuLevel[count].MinVoltage = 0;
+- table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
+- table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+- VOLTAGE_SCALE) << VDDC_SHIFT;
+-
+- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+- vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+- else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+- vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+- else
+- vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+-
+- table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+- table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+-
+- /* retrieve divider value for VBIOS */
+- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+- table->SamuLevel[count].Frequency, &dividers);
+- PP_ASSERT_WITH_CODE((0 == result),
+- "can not find divide id for samu clock", return result);
+-
+- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+-
+- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
+- }
+- return result;
+-}
+-
+ static int vegam_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
+ int32_t eng_clock, int32_t mem_clock,
+ SMU75_Discrete_MCArbDramTimingTableEntry *arb_regs)
+@@ -2062,10 +1983,6 @@ static int vegam_init_smc_table(struct pp_hwmgr *hwmgr)
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize VCE Level!", return result);
+
+- result = vegam_populate_smc_samu_level(hwmgr, table);
+- PP_ASSERT_WITH_CODE(!result,
+- "Failed to initialize SAMU Level!", return result);
+-
+ /* Since only the initial state is completely set up at this point
+ * (the other states are just copies of the boot state) we only
+ * need to populate the ARB settings for the initial state.
+@@ -2273,8 +2190,6 @@ static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member)
+ return offsetof(SMU75_Discrete_DpmTable, UvdBootLevel);
+ case VceBootLevel:
+ return offsetof(SMU75_Discrete_DpmTable, VceBootLevel);
+- case SamuBootLevel:
+- return offsetof(SMU75_Discrete_DpmTable, SamuBootLevel);
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU75_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4727-drm-amdgpu-Use-real-power-source-in-powerplay-instan.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4727-drm-amdgpu-Use-real-power-source-in-powerplay-instan.patch
new file mode 100644
index 00000000..432961b0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4727-drm-amdgpu-Use-real-power-source-in-powerplay-instan.patch
@@ -0,0 +1,318 @@
+From a7e256e7ea16def7c5b52790ce6e7863d8617e7a Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Mon, 4 Jun 2018 16:39:38 +0800
+Subject: [PATCH 4727/5725] drm/amdgpu: Use real power source in powerplay
+ instand of hardcode
+
+1. move ac_power to struct pm from dpm, so can be shared with powerplay
+2. remove power_source in powerplay, use adev->pm.ac_power instand.
+3. update ac_power before dispatch power task.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 21 +++++++------
+ drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 12 ++++----
+ drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/si_dpm.c | 4 +--
+ drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 1 -
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 6 ++--
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 5 +--
+ drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 2 --
+ .../gpu/drm/amd/powerplay/inc/pp_power_source.h | 36 ----------------------
+ 10 files changed, 27 insertions(+), 64 deletions(-)
+ delete mode 100644 drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+index dd6203a..9acfbee 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+@@ -402,7 +402,6 @@ struct amdgpu_dpm {
+ u32 tdp_adjustment;
+ u16 load_line_slope;
+ bool power_control;
+- bool ac_power;
+ /* special states active */
+ bool thermal_active;
+ bool uvd_active;
+@@ -439,6 +438,7 @@ struct amdgpu_pm {
+ struct amd_pp_display_configuration pm_display_cfg;/* set by dc */
+ uint32_t smu_prv_buffer_size;
+ struct amdgpu_bo *smu_prv_buffer;
++ bool ac_power;
+ };
+
+ #define R600_SSTU_DFLT 0
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index 31fbbcd..bc2dd4f9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -68,11 +68,11 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
+ if (adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
+ if (power_supply_is_system_supplied() > 0)
+- adev->pm.dpm.ac_power = true;
++ adev->pm.ac_power = true;
+ else
+- adev->pm.dpm.ac_power = false;
++ adev->pm.ac_power = false;
+ if (adev->powerplay.pp_funcs->enable_bapm)
+- amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
++ amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
+ mutex_unlock(&adev->pm.mutex);
+ }
+ }
+@@ -1906,6 +1906,14 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
+ amdgpu_fence_wait_empty(ring);
+ }
+
++ mutex_lock(&adev->pm.mutex);
++ /* update battery/ac status */
++ if (power_supply_is_system_supplied() > 0)
++ adev->pm.ac_power = true;
++ else
++ adev->pm.ac_power = false;
++ mutex_unlock(&adev->pm.mutex);
++
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ if (!amdgpu_device_has_dc_support(adev)) {
+ mutex_lock(&adev->pm.mutex);
+@@ -1926,14 +1934,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
+ } else {
+ mutex_lock(&adev->pm.mutex);
+ amdgpu_dpm_get_active_displays(adev);
+- /* update battery/ac status */
+- if (power_supply_is_system_supplied() > 0)
+- adev->pm.dpm.ac_power = true;
+- else
+- adev->pm.dpm.ac_power = false;
+-
+ amdgpu_dpm_change_power_state_locked(adev);
+-
+ mutex_unlock(&adev->pm.mutex);
+ }
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+index a266dcf..b6248c0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+@@ -951,12 +951,12 @@ static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
+ else
+ pi->battery_state = false;
+
+- if (adev->pm.dpm.ac_power)
++ if (adev->pm.ac_power)
+ max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+- if (adev->pm.dpm.ac_power == false) {
++ if (adev->pm.ac_power == false) {
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (ps->performance_levels[i].mclk > max_limits->mclk)
+ ps->performance_levels[i].mclk = max_limits->mclk;
+@@ -4078,7 +4078,7 @@ static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
+ const struct amdgpu_clock_and_voltage_limits *max_limits;
+ int i;
+
+- if (adev->pm.dpm.ac_power)
++ if (adev->pm.ac_power)
+ max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+@@ -4127,7 +4127,7 @@ static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
+ const struct amdgpu_clock_and_voltage_limits *max_limits;
+ int i;
+
+- if (adev->pm.dpm.ac_power)
++ if (adev->pm.ac_power)
+ max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+@@ -4160,7 +4160,7 @@ static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
+ const struct amdgpu_clock_and_voltage_limits *max_limits;
+ int i;
+
+- if (adev->pm.dpm.ac_power)
++ if (adev->pm.ac_power)
+ max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+@@ -4191,7 +4191,7 @@ static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
+ const struct amdgpu_clock_and_voltage_limits *max_limits;
+ int i;
+
+- if (adev->pm.dpm.ac_power)
++ if (adev->pm.ac_power)
+ max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+index 17f7f07..d79e6f5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+@@ -1921,7 +1921,7 @@ static int kv_dpm_set_power_state(void *handle)
+ int ret;
+
+ if (pi->bapm_enable) {
+- ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.dpm.ac_power);
++ ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power);
+ if (ret) {
+ DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
+ return ret;
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+index b12d7c9..9567dd0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+@@ -3480,7 +3480,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
+ disable_sclk_switching = true;
+ }
+
+- if (adev->pm.dpm.ac_power)
++ if (adev->pm.ac_power)
+ max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+@@ -3489,7 +3489,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
+ if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc)
+ ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc;
+ }
+- if (adev->pm.dpm.ac_power == false) {
++ if (adev->pm.ac_power == false) {
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (ps->performance_levels[i].mclk > max_limits->mclk)
+ ps->performance_levels[i].mclk = max_limits->mclk;
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+index 4ef77ce..9b675d9 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+@@ -81,7 +81,6 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
+ return -EINVAL;
+
+ hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
+- hwmgr->power_source = PP_PowerSource_AC;
+ hwmgr->pp_table_version = PP_TABLE_V1;
+ hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
+ hwmgr->request_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index d464deb..2d83afe 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -2877,7 +2877,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ struct pp_power_state *request_ps,
+ const struct pp_power_state *current_ps)
+ {
+-
++ struct amdgpu_device *adev = hwmgr->adev;
+ struct smu7_power_state *smu7_ps =
+ cast_phw_smu7_power_state(&request_ps->hardware);
+ uint32_t sclk;
+@@ -2900,12 +2900,12 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ "VI should always have 2 performance levels",
+ );
+
+- max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
++ max_limits = adev->pm.ac_power ?
+ &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
+ &(hwmgr->dyn_state.max_clock_voltage_on_dc);
+
+ /* Cap clock DPM tables at DC MAX if it is in DC. */
+- if (PP_PowerSource_DC == hwmgr->power_source) {
++ if (!adev->pm.ac_power) {
+ for (i = 0; i < smu7_ps->performance_level_count; i++) {
+ if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
+ smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+index 8be5a71..da29871 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+@@ -3107,6 +3107,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ struct pp_power_state *request_ps,
+ const struct pp_power_state *current_ps)
+ {
++ struct amdgpu_device *adev = hwmgr->adev;
+ struct vega10_power_state *vega10_ps =
+ cast_phw_vega10_power_state(&request_ps->hardware);
+ uint32_t sclk;
+@@ -3132,12 +3133,12 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ if (vega10_ps->performance_level_count != 2)
+ pr_info("VI should always have 2 performance levels");
+
+- max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
++ max_limits = adev->pm.ac_power ?
+ &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
+ &(hwmgr->dyn_state.max_clock_voltage_on_dc);
+
+ /* Cap clock DPM tables at DC MAX if it is in DC. */
+- if (PP_PowerSource_DC == hwmgr->power_source) {
++ if (!adev->pm.ac_power) {
+ for (i = 0; i < vega10_ps->performance_level_count; i++) {
+ if (vega10_ps->performance_levels[i].mem_clock >
+ max_limits->mclk)
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+index b99fb8a..40c98ca 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+@@ -26,7 +26,6 @@
+ #include <linux/seq_file.h>
+ #include "amd_powerplay.h"
+ #include "hardwaremanager.h"
+-#include "pp_power_source.h"
+ #include "hwmgr_ppt.h"
+ #include "ppatomctrl.h"
+ #include "hwmgr_ppt.h"
+@@ -741,7 +740,6 @@ struct pp_hwmgr {
+ const struct pp_table_func *pptable_func;
+
+ struct pp_power_state *ps;
+- enum pp_power_source power_source;
+ uint32_t num_ps;
+ struct pp_thermal_controller_info thermal_controller;
+ bool fan_ctrl_is_in_default_mode;
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h b/drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h
+deleted file mode 100644
+index b43315c..0000000
+--- a/drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h
++++ /dev/null
+@@ -1,36 +0,0 @@
+-/*
+- * Copyright 2015 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- */
+-
+-#ifndef PP_POWERSOURCE_H
+-#define PP_POWERSOURCE_H
+-
+-enum pp_power_source {
+- PP_PowerSource_AC = 0,
+- PP_PowerSource_DC,
+- PP_PowerSource_LimitedPower,
+- PP_PowerSource_LimitedPower_2,
+- PP_PowerSource_Max
+-};
+-
+-
+-#endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4728-drm-amd-pp-Implement-update_smc_table-for-CI.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4728-drm-amd-pp-Implement-update_smc_table-for-CI.patch
new file mode 100644
index 00000000..dac45542
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4728-drm-amd-pp-Implement-update_smc_table-for-CI.patch
@@ -0,0 +1,118 @@
+From 62a291de085926c34ae0015ae0e84c768d77e6a8 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Mon, 4 Jun 2018 18:12:31 +0800
+Subject: [PATCH 4728/5725] drm/amd/pp: Implement update_smc_table for CI.
+
+driver need to update uvd/vce smc table before enable
+uvd/vce dpm.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c | 84 ++++++++++++++++++++++++
+ 1 file changed, 84 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+index 8cd21ac..fbe3ef4 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+@@ -2846,6 +2846,89 @@ static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
+ return 0;
+ }
+
++static int ci_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
++{
++ struct amdgpu_device *adev = hwmgr->adev;
++ struct smu7_hwmgr *data = hwmgr->backend;
++ struct ci_smumgr *smu_data = hwmgr->smu_backend;
++ struct phm_uvd_clock_voltage_dependency_table *uvd_table =
++ hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
++ uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
++ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
++ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
++ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
++ uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc :
++ hwmgr->dyn_state.max_clock_voltage_on_dc.vddc;
++ int32_t i;
++
++ if (PP_CAP(PHM_PlatformCaps_UVDDPM) || uvd_table->count <= 0)
++ smu_data->smc_state_table.UvdBootLevel = 0;
++ else
++ smu_data->smc_state_table.UvdBootLevel = uvd_table->count - 1;
++
++ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475,
++ UvdBootLevel, smu_data->smc_state_table.UvdBootLevel);
++
++ data->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
++
++ for (i = uvd_table->count - 1; i >= 0; i--) {
++ if (uvd_table->entries[i].v <= max_vddc)
++ data->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
++ if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_UVDDPM))
++ break;
++ }
++ ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask,
++ data->dpm_level_enable_mask.uvd_dpm_enable_mask);
++
++ return 0;
++}
++
++static int ci_update_vce_smc_table(struct pp_hwmgr *hwmgr)
++{
++ struct amdgpu_device *adev = hwmgr->adev;
++ struct smu7_hwmgr *data = hwmgr->backend;
++ struct phm_vce_clock_voltage_dependency_table *vce_table =
++ hwmgr->dyn_state.vce_clock_voltage_dependency_table;
++ uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
++ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
++ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
++ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
++ uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc :
++ hwmgr->dyn_state.max_clock_voltage_on_dc.vddc;
++ int32_t i;
++
++ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475,
++ VceBootLevel, 0); /* temp hard code to level 0, vce can set min evclk*/
++
++ data->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
++
++ for (i = vce_table->count - 1; i >= 0; i--) {
++ if (vce_table->entries[i].v <= max_vddc)
++ data->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
++ if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_VCEDPM))
++ break;
++ }
++ ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask,
++ data->dpm_level_enable_mask.vce_dpm_enable_mask);
++
++ return 0;
++}
++
++static int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
++{
++ switch (type) {
++ case SMU_UVD_TABLE:
++ ci_update_uvd_smc_table(hwmgr);
++ break;
++ case SMU_VCE_TABLE:
++ ci_update_vce_smc_table(hwmgr);
++ break;
++ default:
++ break;
++ }
++ return 0;
++}
++
+ const struct pp_smumgr_func ci_smu_funcs = {
+ .smu_init = ci_smu_init,
+ .smu_fini = ci_smu_fini,
+@@ -2868,4 +2951,5 @@ const struct pp_smumgr_func ci_smu_funcs = {
+ .initialize_mc_reg_table = ci_initialize_mc_reg_table,
+ .is_dpm_running = ci_is_dpm_running,
+ .update_dpm_settings = ci_update_dpm_settings,
++ .update_smc_table = ci_update_smc_table,
+ };
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4729-drm-amdgpu-Get-real-power-source-to-initizlize-ac_po.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4729-drm-amdgpu-Get-real-power-source-to-initizlize-ac_po.patch
new file mode 100644
index 00000000..428e8cf8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4729-drm-amdgpu-Get-real-power-source-to-initizlize-ac_po.patch
@@ -0,0 +1,40 @@
+From 7bac726cbf64d5dcea4cb03d9fbe161c30766abb Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Mon, 4 Jun 2018 18:26:18 +0800
+Subject: [PATCH 4729/5725] drm/amdgpu: Get real power source to initizlize
+ ac_power
+
+driver need to know the real power source to do some power
+related configuration when initialize.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 3d23d8b..a847b42 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -25,6 +25,7 @@
+ * Alex Deucher
+ * Jerome Glisse
+ */
++#include <linux/power_supply.h>
+ #include <linux/kthread.h>
+ #include <linux/console.h>
+ #include <linux/slab.h>
+@@ -2350,6 +2351,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+ INIT_DELAYED_WORK(&adev->late_init_work,
+ amdgpu_device_ip_late_init_func_handler);
+
++ adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
++
+ /* Registers mapping */
+ /* TODO: block userspace mapping of io register */
+ if (adev->asic_type >= CHIP_BONAIRE) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4730-drm-amdgpu-Update-function-level-documentation-for-G.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4730-drm-amdgpu-Update-function-level-documentation-for-G.patch
new file mode 100644
index 00000000..e54e6440
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4730-drm-amdgpu-Update-function-level-documentation-for-G.patch
@@ -0,0 +1,101 @@
+From a160c837bad29921ed01997b5cfec89ef399f7a3 Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Wed, 13 Jun 2018 16:01:38 -0400
+Subject: [PATCH 4730/5725] drm/amdgpu: Update function level documentation for
+ GPUVM.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add documentation for missed parameters.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 9d4dfb2..b1586a7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -337,6 +337,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
+ * @vm: VM to clear BO from
+ * @bo: BO to clear
+ * @level: level this BO is at
++ * @pte_support_ats: indicate ATS support from PTE
+ *
+ * Root PD needs to be reserved when calling this.
+ *
+@@ -663,6 +664,7 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
+ * amdgpu_vm_flush - hardware flush the vm
+ *
+ * @ring: ring to use for flush
++ * @job: related job
+ * @need_pipe_sync: is pipe sync needed
+ *
+ * Emit a VM flush when it is necessary.
+@@ -1780,6 +1782,7 @@ static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
+ * amdgpu_vm_prt_cb - callback for updating the PRT status
+ *
+ * @fence: fence for the callback
++ * @_cb: the callback function
+ */
+ static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
+ {
+@@ -2058,6 +2061,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
+ * @bo_va: bo_va to store the address
+ * @saddr: where to map the BO
+ * @offset: requested offset in the BO
++ * @size: BO size in bytes
+ * @flags: attributes of pages (read/write/valid/etc.)
+ *
+ * Add a mapping of the BO at the specefied addr into the VM.
+@@ -2121,6 +2125,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+ * @bo_va: bo_va to store the address
+ * @saddr: where to map the BO
+ * @offset: requested offset in the BO
++ * @size: BO size in bytes
+ * @flags: attributes of pages (read/write/valid/etc.)
+ *
+ * Add a mapping of the BO at the specefied addr into the VM. Replace existing
+@@ -2339,6 +2344,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
+ * amdgpu_vm_bo_lookup_mapping - find mapping by address
+ *
+ * @vm: the requested VM
++ * @addr: the address
+ *
+ * Find a mapping by it's address.
+ *
+@@ -2397,6 +2403,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
+ *
+ * @adev: amdgpu_device pointer
+ * @bo: amdgpu buffer object
++ * @evicted: is the BO evicted
+ *
+ * Mark @bo as invalid.
+ */
+@@ -2462,6 +2469,10 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
+ *
+ * @adev: amdgpu_device pointer
+ * @vm_size: the default vm size if it's set auto
++ * @fragment_size_default: Default PTE fragment size
++ * @max_level: max VMPT level
++ * @max_bits: max address space size in bits
++ *
+ */
+ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
+ uint32_t fragment_size_default, unsigned max_level,
+@@ -2547,6 +2558,7 @@ static void amdgpu_inc_compute_vms(struct amdgpu_device *adev)
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ * @vm_context: Indicates if it GFX or Compute context
++ * @pasid: Process address space identifier
+ *
+ * Init @vm fields.
+ *
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4731-drm-amd-display-Drop-to-fail-safe-mode-if-edid-is-ba.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4731-drm-amd-display-Drop-to-fail-safe-mode-if-edid-is-ba.patch
new file mode 100644
index 00000000..909d3356
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4731-drm-amd-display-Drop-to-fail-safe-mode-if-edid-is-ba.patch
@@ -0,0 +1,35 @@
+From 022bc228b3719d912336445cce84c28385806f45 Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Tue, 22 May 2018 15:55:43 -0400
+Subject: [PATCH 4731/5725] drm/amd/display: Drop to fail-safe mode if edid is
+ bad
+
+Provide the connector with a single fail-safe mode of 640x480 for CTS
+tests instead of providing a list of possible base modes.
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 2aa0041..8bc6736 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3798,6 +3798,10 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
+ struct edid *edid = amdgpu_dm_connector->edid;
+
+ encoder = helper->best_encoder(connector);
++
++ if (!edid || !drm_edid_is_valid(edid))
++ return drm_add_modes_noedid(connector, 640, 480);
++
+ amdgpu_dm_connector_ddc_get_modes(connector, edid);
+ amdgpu_dm_connector_add_common_modes(encoder, connector);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4732-drm-amd-display-Write-TEST_EDID_CHECKSUM_WRITE-for-E.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4732-drm-amd-display-Write-TEST_EDID_CHECKSUM_WRITE-for-E.patch
new file mode 100644
index 00000000..346d3558
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4732-drm-amd-display-Write-TEST_EDID_CHECKSUM_WRITE-for-E.patch
@@ -0,0 +1,86 @@
+From 9c86b692e86bc47641c63bbc62b24142aaba2df3 Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Thu, 17 May 2018 15:44:20 -0400
+Subject: [PATCH 4732/5725] drm/amd/display: Write TEST_EDID_CHECKSUM_WRITE for
+ EDID tests
+
+Extract edid's checksum and send it back for verification if EDID_TEST
+is requested.
+
+Also added a flag for EDID checksum write in TEST_RESPONSE structure,
+and simple spelling fix.
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 28 ++++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/dc_dp_types.h | 5 ++--
+ 2 files changed, 31 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+index bd44935..dea49dc 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -497,6 +497,34 @@ enum dc_edid_status dm_helpers_read_local_edid(
+ DRM_ERROR("EDID err: %d, on connector: %s",
+ edid_status,
+ aconnector->base.name);
++ if (link->aux_mode) {
++ union test_request test_request = {0};
++ union test_response test_response = {0};
++
++ dm_helpers_dp_read_dpcd(ctx,
++ link,
++ DP_TEST_REQUEST,
++ &test_request.raw,
++ sizeof(union test_request));
++
++ if (!test_request.bits.EDID_READ)
++ return edid_status;
++
++ test_response.bits.EDID_CHECKSUM_WRITE = 1;
++
++ dm_helpers_dp_write_dpcd(ctx,
++ link,
++ DP_TEST_EDID_CHECKSUM,
++ &sink->dc_edid.raw_edid[sink->dc_edid.length-1],
++ 1);
++
++ dm_helpers_dp_write_dpcd(ctx,
++ link,
++ DP_TEST_RESPONSE,
++ &test_response.raw,
++ sizeof(test_response));
++
++ }
+
+ return edid_status;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+index 90bccd5..da93ab4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+@@ -430,7 +430,7 @@ union test_request {
+ struct {
+ uint8_t LINK_TRAINING :1;
+ uint8_t LINK_TEST_PATTRN :1;
+- uint8_t EDID_REAT :1;
++ uint8_t EDID_READ :1;
+ uint8_t PHY_TEST_PATTERN :1;
+ uint8_t AUDIO_TEST_PATTERN :1;
+ uint8_t RESERVED :1;
+@@ -443,7 +443,8 @@ union test_response {
+ struct {
+ uint8_t ACK :1;
+ uint8_t NO_ACK :1;
+- uint8_t RESERVED :6;
++ uint8_t EDID_CHECKSUM_WRITE:1;
++ uint8_t RESERVED :5;
+ } bits;
+ uint8_t raw;
+ };
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4733-drm-amd-display-Stream-encoder-update.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4733-drm-amd-display-Stream-encoder-update.patch
new file mode 100644
index 00000000..2f7f8ec0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4733-drm-amd-display-Stream-encoder-update.patch
@@ -0,0 +1,68 @@
+From 4021feb98c47aad8f783b687793078679fe7895e Mon Sep 17 00:00:00 2001
+From: Eric Bernstein <eric.bernstein@amd.com>
+Date: Tue, 22 May 2018 15:16:33 -0400
+Subject: [PATCH 4733/5725] drm/amd/display: Stream encoder update
+
+Update stream encoder based on feedback from HW team.
+
+Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../amd/display/dc/dcn10/dcn10_stream_encoder.c | 30 +---------------------
+ 1 file changed, 1 insertion(+), 29 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+index 147f614..32a4997 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+@@ -1097,27 +1097,6 @@ static union audio_cea_channels speakers_to_channels(
+ return cea_channels;
+ }
+
+-static uint32_t calc_max_audio_packets_per_line(
+- const struct audio_crtc_info *crtc_info)
+-{
+- uint32_t max_packets_per_line;
+-
+- max_packets_per_line =
+- crtc_info->h_total - crtc_info->h_active;
+-
+- if (crtc_info->pixel_repetition)
+- max_packets_per_line *= crtc_info->pixel_repetition;
+-
+- /* for other hdmi features */
+- max_packets_per_line -= 58;
+- /* for Control Period */
+- max_packets_per_line -= 16;
+- /* Number of Audio Packets per Line */
+- max_packets_per_line /= 32;
+-
+- return max_packets_per_line;
+-}
+-
+ static void get_audio_clock_info(
+ enum dc_color_depth color_depth,
+ uint32_t crtc_pixel_clock_in_khz,
+@@ -1211,16 +1190,9 @@ static void enc1_se_setup_hdmi_audio(
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ struct audio_clock_info audio_clock_info = {0};
+- uint32_t max_packets_per_line;
+-
+- /* For now still do calculation, although this field is ignored when
+- * above HDMI_PACKET_GEN_VERSION set to 1
+- */
+- max_packets_per_line = calc_max_audio_packets_per_line(crtc_info);
+
+ /* HDMI_AUDIO_PACKET_CONTROL */
+- REG_UPDATE_2(HDMI_AUDIO_PACKET_CONTROL,
+- HDMI_AUDIO_PACKETS_PER_LINE, max_packets_per_line,
++ REG_UPDATE(HDMI_AUDIO_PACKET_CONTROL,
+ HDMI_AUDIO_DELAY_EN, 1);
+
+ /* AFMT_AUDIO_PACKET_CONTROL */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4734-drm-amd-display-Move-i2c-and-aux-structs-into-dc_ddc.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4734-drm-amd-display-Move-i2c-and-aux-structs-into-dc_ddc.patch
new file mode 100644
index 00000000..e8b20489
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4734-drm-amd-display-Move-i2c-and-aux-structs-into-dc_ddc.patch
@@ -0,0 +1,180 @@
+From 64d1b2306a12bc1bf3b890f410410ecee9461ef4 Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Wed, 9 May 2018 11:35:21 -0400
+Subject: [PATCH 4734/5725] drm/amd/display: Move i2c and aux structs into
+ dc_ddc_types.h
+
+We'd like to use some of them in dc_link_ddc and amdgpu_dm and should
+have them available in dc_ddc_types.h.
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc_ddc_types.h | 59 ++++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h | 44 +---------------
+ drivers/gpu/drm/amd/display/dc/i2caux/engine.h | 15 +-----
+ 3 files changed, 62 insertions(+), 56 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
+index ee04812..05c8c31 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
+@@ -25,6 +25,65 @@
+ #ifndef DC_DDC_TYPES_H_
+ #define DC_DDC_TYPES_H_
+
++enum aux_transaction_type {
++ AUX_TRANSACTION_TYPE_DP,
++ AUX_TRANSACTION_TYPE_I2C
++};
++
++
++enum i2caux_transaction_action {
++ I2CAUX_TRANSACTION_ACTION_I2C_WRITE = 0x00,
++ I2CAUX_TRANSACTION_ACTION_I2C_READ = 0x10,
++ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST = 0x20,
++
++ I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT = 0x40,
++ I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT = 0x50,
++ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT = 0x60,
++
++ I2CAUX_TRANSACTION_ACTION_DP_WRITE = 0x80,
++ I2CAUX_TRANSACTION_ACTION_DP_READ = 0x90
++};
++
++enum aux_channel_operation_result {
++ AUX_CHANNEL_OPERATION_SUCCEEDED,
++ AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN,
++ AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY,
++ AUX_CHANNEL_OPERATION_FAILED_TIMEOUT,
++ AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON
++};
++
++
++struct aux_request_transaction_data {
++ enum aux_transaction_type type;
++ enum i2caux_transaction_action action;
++ /* 20-bit AUX channel transaction address */
++ uint32_t address;
++ /* delay, in 100-microsecond units */
++ uint8_t delay;
++ uint32_t length;
++ uint8_t *data;
++};
++
++enum aux_transaction_reply {
++ AUX_TRANSACTION_REPLY_AUX_ACK = 0x00,
++ AUX_TRANSACTION_REPLY_AUX_NACK = 0x01,
++ AUX_TRANSACTION_REPLY_AUX_DEFER = 0x02,
++
++ AUX_TRANSACTION_REPLY_I2C_ACK = 0x00,
++ AUX_TRANSACTION_REPLY_I2C_NACK = 0x10,
++ AUX_TRANSACTION_REPLY_I2C_DEFER = 0x20,
++
++ AUX_TRANSACTION_REPLY_HPD_DISCON = 0x40,
++
++ AUX_TRANSACTION_REPLY_INVALID = 0xFF
++};
++
++struct aux_reply_transaction_data {
++ enum aux_transaction_reply status;
++ uint32_t length;
++ uint8_t *data;
++};
++
+ struct i2c_payload {
+ bool write;
+ uint8_t address;
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
+index b9e35d0..b01488f 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
+@@ -26,49 +26,7 @@
+ #ifndef __DAL_AUX_ENGINE_H__
+ #define __DAL_AUX_ENGINE_H__
+
+-enum aux_transaction_type {
+- AUX_TRANSACTION_TYPE_DP,
+- AUX_TRANSACTION_TYPE_I2C
+-};
+-
+-struct aux_request_transaction_data {
+- enum aux_transaction_type type;
+- enum i2caux_transaction_action action;
+- /* 20-bit AUX channel transaction address */
+- uint32_t address;
+- /* delay, in 100-microsecond units */
+- uint8_t delay;
+- uint32_t length;
+- uint8_t *data;
+-};
+-
+-enum aux_transaction_reply {
+- AUX_TRANSACTION_REPLY_AUX_ACK = 0x00,
+- AUX_TRANSACTION_REPLY_AUX_NACK = 0x01,
+- AUX_TRANSACTION_REPLY_AUX_DEFER = 0x02,
+-
+- AUX_TRANSACTION_REPLY_I2C_ACK = 0x00,
+- AUX_TRANSACTION_REPLY_I2C_NACK = 0x10,
+- AUX_TRANSACTION_REPLY_I2C_DEFER = 0x20,
+-
+- AUX_TRANSACTION_REPLY_HPD_DISCON = 0x40,
+-
+- AUX_TRANSACTION_REPLY_INVALID = 0xFF
+-};
+-
+-struct aux_reply_transaction_data {
+- enum aux_transaction_reply status;
+- uint32_t length;
+- uint8_t *data;
+-};
+-
+-enum aux_channel_operation_result {
+- AUX_CHANNEL_OPERATION_SUCCEEDED,
+- AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN,
+- AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY,
+- AUX_CHANNEL_OPERATION_FAILED_TIMEOUT,
+- AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON
+-};
++#include "dc_ddc_types.h"
+
+ struct aux_engine;
+
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
+index c110970..1e8a158 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
+@@ -26,6 +26,8 @@
+ #ifndef __DAL_ENGINE_H__
+ #define __DAL_ENGINE_H__
+
++#include "dc_ddc_types.h"
++
+ enum i2caux_transaction_operation {
+ I2CAUX_TRANSACTION_READ,
+ I2CAUX_TRANSACTION_WRITE
+@@ -76,19 +78,6 @@ enum i2c_default_speed {
+ I2CAUX_DEFAULT_I2C_SW_SPEED = 50
+ };
+
+-enum i2caux_transaction_action {
+- I2CAUX_TRANSACTION_ACTION_I2C_WRITE = 0x00,
+- I2CAUX_TRANSACTION_ACTION_I2C_READ = 0x10,
+- I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST = 0x20,
+-
+- I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT = 0x40,
+- I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT = 0x50,
+- I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT = 0x60,
+-
+- I2CAUX_TRANSACTION_ACTION_DP_WRITE = 0x80,
+- I2CAUX_TRANSACTION_ACTION_DP_READ = 0x90
+-};
+-
+ struct engine;
+
+ struct engine_funcs {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4735-drm-amd-display-Add-use_dynamic_meta-flag-to-stream_.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4735-drm-amd-display-Add-use_dynamic_meta-flag-to-stream_.patch
new file mode 100644
index 00000000..bcd696ad
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4735-drm-amd-display-Add-use_dynamic_meta-flag-to-stream_.patch
@@ -0,0 +1,44 @@
+From 934ded05716413b33c979bf96b9821624f9e9ca1 Mon Sep 17 00:00:00 2001
+From: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Date: Thu, 17 May 2018 14:50:12 -0400
+Subject: [PATCH 4735/5725] drm/amd/display: Add use_dynamic_meta flag to
+ stream_state
+
+Signed-off-by: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 3 ++-
+ drivers/gpu/drm/amd/display/dc/dc_stream.h | 1 +
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 07e2c28..55711c6 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -2404,7 +2404,8 @@ static void set_hdr_static_info_packet(
+ {
+ /* HDR Static Metadata info packet for HDR10 */
+
+- if (!stream->hdr_static_metadata.valid)
++ if (!stream->hdr_static_metadata.valid ||
++ stream->use_dynamic_meta)
+ return;
+
+ *info_packet = stream->hdr_static_metadata;
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+index d06fd41..984a7a3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+@@ -66,6 +66,7 @@ struct dc_stream_state {
+
+ struct dc_info_packet hdr_static_metadata;
+ PHYSICAL_ADDRESS_LOC dmdata_address;
++ bool use_dynamic_meta;
+
+ struct dc_transfer_func *out_transfer_func;
+ struct colorspace_transform gamut_remap_matrix;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4736-drm-amd-display-Drop-duplicate-dc_stream_set_static_.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4736-drm-amd-display-Drop-duplicate-dc_stream_set_static_.patch
new file mode 100644
index 00000000..465a7b12
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4736-drm-amd-display-Drop-duplicate-dc_stream_set_static_.patch
@@ -0,0 +1,31 @@
+From a5fed478e88922e6477e62b6f094841188293ecd Mon Sep 17 00:00:00 2001
+From: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Date: Wed, 30 May 2018 10:56:32 -0400
+Subject: [PATCH 4736/5725] drm/amd/display: Drop duplicate
+ dc_stream_set_static_screen_events definition
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc_stream.h | 5 -----
+ 1 file changed, 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+index 984a7a3..597c6bf 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+@@ -304,9 +304,4 @@ bool dc_stream_get_crtc_position(struct dc *dc,
+ unsigned int *v_pos,
+ unsigned int *nom_v_pos);
+
+-void dc_stream_set_static_screen_events(struct dc *dc,
+- struct dc_stream_state **stream,
+- int num_streams,
+- const struct dc_static_screen_events *events);
+-
+ #endif /* DC_STREAM_H_ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4737-drm-amd-display-Make-it-more-clear-when-info-frames-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4737-drm-amd-display-Make-it-more-clear-when-info-frames-.patch
new file mode 100644
index 00000000..518728e1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4737-drm-amd-display-Make-it-more-clear-when-info-frames-.patch
@@ -0,0 +1,49 @@
+From b6dd520fb68fdc0add7c8fd88b38c67dab096122 Mon Sep 17 00:00:00 2001
+From: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Date: Wed, 30 May 2018 10:57:32 -0400
+Subject: [PATCH 4737/5725] drm/amd/display: Make it more clear when info
+ frames affect DP or HDMI
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 383b63d..57d8113 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -665,16 +665,25 @@ static enum dc_status bios_parser_crtc_source_select(
+
+ void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
+ {
++ bool is_hdmi;
++ bool is_dp;
++
+ ASSERT(pipe_ctx->stream);
+
+ if (pipe_ctx->stream_res.stream_enc == NULL)
+ return; /* this is not root pipe */
+
+- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
++ is_hdmi = dc_is_hdmi_signal(pipe_ctx->stream->signal);
++ is_dp = dc_is_dp_signal(pipe_ctx->stream->signal);
++
++ if (!is_hdmi && !is_dp)
++ return;
++
++ if (is_hdmi)
+ pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
+ pipe_ctx->stream_res.stream_enc,
+ &pipe_ctx->stream_res.encoder_info_frame);
+- else if (dc_is_dp_signal(pipe_ctx->stream->signal))
++ else
+ pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(
+ pipe_ctx->stream_res.stream_enc,
+ &pipe_ctx->stream_res.encoder_info_frame);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4738-drm-amd-display-Convert-quotes-to-Ascii-quotes.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4738-drm-amd-display-Convert-quotes-to-Ascii-quotes.patch
new file mode 100644
index 00000000..b20f94f0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4738-drm-amd-display-Convert-quotes-to-Ascii-quotes.patch
@@ -0,0 +1,29 @@
+From a5843f41a5adbc00376073d263d27ad8e7225af5 Mon Sep 17 00:00:00 2001
+From: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Date: Wed, 30 May 2018 10:59:11 -0400
+Subject: [PATCH 4738/5725] drm/amd/display: Convert quotes to Ascii quotes
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+index 32a4997..c6a13d0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+@@ -301,7 +301,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(
+ /* For YCbCr420 and BT2020 Colorimetry Formats, VSC SDP shall be used.
+ * When MISC1, bit 6, is Set to 1, a Source device uses a VSC SDP to indicate the
+ * Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7,
+- * and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become “don’t careâ€).
++ * and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become "don't care").
+ */
+ if ((crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) ||
+ (output_color_space == COLOR_SPACE_2020_YCBCR) ||
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4739-drm-amd-display-Disable-stats-by-default.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4739-drm-amd-display-Disable-stats-by-default.patch
new file mode 100644
index 00000000..4395f433
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4739-drm-amd-display-Disable-stats-by-default.patch
@@ -0,0 +1,29 @@
+From d4b730648e101d71b6e9c61432df17a6bd6f0e84 Mon Sep 17 00:00:00 2001
+From: Anthony Koo <Anthony.Koo@amd.com>
+Date: Thu, 24 May 2018 11:25:00 -0400
+Subject: [PATCH 4739/5725] drm/amd/display: Disable stats by default
+
+Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/modules/stats/stats.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
+index fa0665d..480eb2c 100644
+--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
++++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
+@@ -29,7 +29,7 @@
+ #include "core_types.h"
+
+ #define DAL_STATS_ENABLE_REGKEY "DalStatsEnable"
+-#define DAL_STATS_ENABLE_REGKEY_DEFAULT 0x00000001
++#define DAL_STATS_ENABLE_REGKEY_DEFAULT 0x00000000
+ #define DAL_STATS_ENABLE_REGKEY_ENABLED 0x00000001
+
+ #define DAL_STATS_ENTRIES_REGKEY "DalStatsEntries"
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4740-drm-amd-display-Add-new-transfer-type-HWPWL.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4740-drm-amd-display-Add-new-transfer-type-HWPWL.patch
new file mode 100644
index 00000000..d508caf3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4740-drm-amd-display-Add-new-transfer-type-HWPWL.patch
@@ -0,0 +1,48 @@
+From 753f0e0229ffe78b003ff5d143e01ee3345963cd Mon Sep 17 00:00:00 2001
+From: Vitaly Prosyak <vitaly.prosyak@amd.com>
+Date: Fri, 25 May 2018 08:37:36 -0500
+Subject: [PATCH 4740/5725] drm/amd/display: Add new transfer type HWPWL
+
+It is used when curve register settings are generated
+by 'matlab', i.e. bypass color module calculation.
+
+Signed-off-by: Vitaly Prosyak <vitaly.prosyak@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 096098e..7cd4d5e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -358,6 +358,7 @@ enum dc_transfer_func_type {
+ TF_TYPE_PREDEFINED,
+ TF_TYPE_DISTRIBUTED_POINTS,
+ TF_TYPE_BYPASS,
++ TF_TYPE_HWPWL
+ };
+
+ struct dc_transfer_func_distributed_points {
+@@ -383,12 +384,15 @@ enum dc_transfer_func_predefined {
+
+ struct dc_transfer_func {
+ struct kref refcount;
+- struct dc_transfer_func_distributed_points tf_pts;
+ enum dc_transfer_func_type type;
+ enum dc_transfer_func_predefined tf;
+ /* FP16 1.0 reference level in nits, default is 80 nits, only for PQ*/
+ uint32_t sdr_ref_white_level;
+ struct dc_context *ctx;
++ union {
++ struct pwl_params pwl;
++ struct dc_transfer_func_distributed_points tf_pts;
++ };
+ };
+
+ /*
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4741-drm-amd-display-create-sink_id-in-dc_sink-structure-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4741-drm-amd-display-create-sink_id-in-dc_sink-structure-.patch
new file mode 100644
index 00000000..554552f3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4741-drm-amd-display-create-sink_id-in-dc_sink-structure-.patch
@@ -0,0 +1,78 @@
+From 5a00ad09adddcab065c1006de3aa3ec034b59a5d Mon Sep 17 00:00:00 2001
+From: Alvin lee <alvin.lee3@amd.com>
+Date: Fri, 18 May 2018 14:14:38 -0400
+Subject: [PATCH 4741/5725] drm/amd/display: create sink_id in dc_sink
+ structure to idenitify all sinks
+
+Signed-off-by: Alvin lee <alvin.lee3@amd.com>
+Reviewed-by: Jun Lei <Jun.Lei@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 1 +
+ drivers/gpu/drm/amd/display/dc/core/dc_sink.c | 4 ++++
+ drivers/gpu/drm/amd/display/dc/dc.h | 6 +++++-
+ drivers/gpu/drm/amd/display/dc/dc_types.h | 1 +
+ 4 files changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 7da1e88..16ebdc1 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -479,6 +479,7 @@ static bool construct(struct dc *dc,
+ dc_ctx->driver_context = init_params->driver;
+ dc_ctx->dc = dc;
+ dc_ctx->asic_id = init_params->asic_id;
++ dc_ctx->dc_sink_id_count = 0;
+ dc->ctx = dc_ctx;
+
+ dc->current_state = dc_create_state();
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
+index 25fae38..9971b51 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
+@@ -53,6 +53,10 @@ static bool construct(struct dc_sink *sink, const struct dc_sink_init_data *init
+ sink->dongle_max_pix_clk = init_params->dongle_max_pix_clk;
+ sink->converter_disable_audio = init_params->converter_disable_audio;
+ sink->dc_container_id = NULL;
++ sink->sink_id = init_params->link->ctx->dc_sink_id_count;
++ // increment dc_sink_id_count because we don't want two sinks with same ID
++ // unless they are actually the same
++ init_params->link->ctx->dc_sink_id_count++;
+
+ return true;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 7cd4d5e..56c1e2b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -679,9 +679,13 @@ struct dc_sink {
+ struct dc_link *link;
+ struct dc_context *ctx;
+
++ uint32_t sink_id;
++
+ /* private to dc_sink.c */
++ // refcount must be the last member in dc_sink, since we want the
++ // sink structure to be logically cloneable up to (but not including)
++ // refcount
+ struct kref refcount;
+-
+ };
+
+ void dc_sink_retain(struct dc_sink *sink);
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
+index 40d620f..9467249 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
+@@ -92,6 +92,7 @@ struct dc_context {
+ bool created_bios;
+ struct gpio_service *gpio_service;
+ struct i2caux *i2caux;
++ uint32_t dc_sink_id_count;
+ uint64_t fbc_gpu_addr;
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4742-drm-amd-display-Allow-DP-register-double-buffer.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4742-drm-amd-display-Allow-DP-register-double-buffer.patch
new file mode 100644
index 00000000..39405e58
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4742-drm-amd-display-Allow-DP-register-double-buffer.patch
@@ -0,0 +1,138 @@
+From 2bb61af9b1b1035849a1c869916188f9c8c08f0b Mon Sep 17 00:00:00 2001
+From: Eric Bernstein <eric.bernstein@amd.com>
+Date: Fri, 25 May 2018 11:57:26 -0400
+Subject: [PATCH 4742/5725] drm/amd/display: Allow DP register double buffer
+
+Remove setting DP_DB_DISABLE to avoid issues when changing
+bit depth after vbios take over.
+Refactor code to perform single register update for both
+pixel encoding and component depth fields.
+
+Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/display/dc/dce/dce_stream_encoder.c | 5 ---
+ .../amd/display/dc/dcn10/dcn10_stream_encoder.c | 44 ++++++++++------------
+ 2 files changed, 19 insertions(+), 30 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+index c0e813c..91642e6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+@@ -289,11 +289,6 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
+
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+- if (REG(DP_DB_CNTL))
+- REG_UPDATE(DP_DB_CNTL, DP_DB_DISABLE, 1);
+-#endif
+-
+ /* set pixel encoding */
+ switch (crtc_timing->pixel_encoding) {
+ case PIXEL_ENCODING_YCBCR422:
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+index c6a13d0..6f9078f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+@@ -257,20 +257,18 @@ void enc1_stream_encoder_dp_set_stream_attribute(
+ uint8_t colorimetry_bpc;
+ uint8_t dynamic_range_rgb = 0; /*full range*/
+ uint8_t dynamic_range_ycbcr = 1; /*bt709*/
++ uint8_t dp_pixel_encoding = 0;
++ uint8_t dp_component_depth = 0;
+
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+- REG_UPDATE(DP_DB_CNTL, DP_DB_DISABLE, 1);
+-
+ /* set pixel encoding */
+ switch (crtc_timing->pixel_encoding) {
+ case PIXEL_ENCODING_YCBCR422:
+- REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+- DP_PIXEL_ENCODING_TYPE_YCBCR422);
++ dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR422;
+ break;
+ case PIXEL_ENCODING_YCBCR444:
+- REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+- DP_PIXEL_ENCODING_TYPE_YCBCR444);
++ dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR444;
+
+ if (crtc_timing->flags.Y_ONLY)
+ if (crtc_timing->display_color_depth != COLOR_DEPTH_666)
+@@ -278,8 +276,8 @@ void enc1_stream_encoder_dp_set_stream_attribute(
+ * Color depth of Y-only could be
+ * 8, 10, 12, 16 bits
+ */
+- REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+- DP_PIXEL_ENCODING_TYPE_Y_ONLY);
++ dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_Y_ONLY;
++
+ /* Note: DP_MSA_MISC1 bit 7 is the indicator
+ * of Y-only mode.
+ * This bit is set in HW if register
+@@ -287,13 +285,11 @@ void enc1_stream_encoder_dp_set_stream_attribute(
+ */
+ break;
+ case PIXEL_ENCODING_YCBCR420:
+- REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+- DP_PIXEL_ENCODING_TYPE_YCBCR420);
++ dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR420;
+ REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
+ break;
+ default:
+- REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+- DP_PIXEL_ENCODING_TYPE_RGB444);
++ dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_RGB444;
+ break;
+ }
+
+@@ -314,32 +310,30 @@ void enc1_stream_encoder_dp_set_stream_attribute(
+ /* set color depth */
+ switch (crtc_timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+- REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+- 0);
++ dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_6BPC;
+ break;
+ case COLOR_DEPTH_888:
+- REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+- DP_COMPONENT_PIXEL_DEPTH_8BPC);
++ dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_8BPC;
+ break;
+ case COLOR_DEPTH_101010:
+- REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+- DP_COMPONENT_PIXEL_DEPTH_10BPC);
+-
++ dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_10BPC;
+ break;
+ case COLOR_DEPTH_121212:
+- REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+- DP_COMPONENT_PIXEL_DEPTH_12BPC);
++ dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_12BPC;
+ break;
+ case COLOR_DEPTH_161616:
+- REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+- DP_COMPONENT_PIXEL_DEPTH_16BPC);
++ dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_16BPC;
+ break;
+ default:
+- REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+- DP_COMPONENT_PIXEL_DEPTH_6BPC);
++ dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_6BPC;
+ break;
+ }
+
++ /* Set DP pixel encoding and component depth */
++ REG_UPDATE_2(DP_PIXEL_FORMAT,
++ DP_PIXEL_ENCODING, dp_pixel_encoding,
++ DP_COMPONENT_DEPTH, dp_component_depth);
++
+ /* set dynamic range and YCbCr range */
+
+ switch (crtc_timing->display_color_depth) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4743-drm-amd-display-Add-num_opp-to-resource_caps.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4743-drm-amd-display-Add-num_opp-to-resource_caps.patch
new file mode 100644
index 00000000..df1be129
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4743-drm-amd-display-Add-num_opp-to-resource_caps.patch
@@ -0,0 +1,44 @@
+From 1e2717edbc9cc215ad7292966c6a150b61e96863 Mon Sep 17 00:00:00 2001
+From: Eric Bernstein <eric.bernstein@amd.com>
+Date: Fri, 18 May 2018 10:49:33 -0400
+Subject: [PATCH 4743/5725] drm/amd/display: Add num_opp to resource_caps
+
+Number of OPPs to be instantiated is based on number
+of timing generators, not number of pipes.
+
+Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 1 +
+ drivers/gpu/drm/amd/display/dc/inc/resource.h | 1 +
+ 2 files changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index a2318ca..704acc0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -417,6 +417,7 @@ static const struct dce110_clk_src_mask cs_mask = {
+
+ static const struct resource_caps res_cap = {
+ .num_timing_generator = 4,
++ .num_opp = 4,
+ .num_video_plane = 4,
+ .num_audio = 4,
+ .num_stream_encoder = 4,
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
+index abf42a7..5b32100 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
+@@ -38,6 +38,7 @@ enum dce_version resource_parse_asic_id(
+
+ struct resource_caps {
+ int num_timing_generator;
++ int num_opp;
+ int num_video_plane;
+ int num_audio;
+ int num_stream_encoder;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4744-drm-amd-display-Do-not-skip-FBC-init-in-failsafe-mod.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4744-drm-amd-display-Do-not-skip-FBC-init-in-failsafe-mod.patch
new file mode 100644
index 00000000..e967d70e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4744-drm-amd-display-Do-not-skip-FBC-init-in-failsafe-mod.patch
@@ -0,0 +1,44 @@
+From 31f4f5970c6b6e5276a8ab7cc66e747bea98b799 Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Mon, 28 May 2018 10:08:30 -0400
+Subject: [PATCH 4744/5725] drm/amd/display: Do not skip FBC init in failsafe
+ mode
+
+Initially FBC would be initialized if display's edid was correct
+and all the modes acquired from it, but n case when edid is corrupted
+or non-existant we must still initialize FBC.
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 8bc6736..985c3b9 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3799,12 +3799,12 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
+
+ encoder = helper->best_encoder(connector);
+
+- if (!edid || !drm_edid_is_valid(edid))
+- return drm_add_modes_noedid(connector, 640, 480);
+-
+- amdgpu_dm_connector_ddc_get_modes(connector, edid);
+- amdgpu_dm_connector_add_common_modes(encoder, connector);
+-
++ if (!edid || !drm_edid_is_valid(edid)) {
++ drm_add_modes_noedid(connector, 640, 480);
++ } else {
++ amdgpu_dm_connector_ddc_get_modes(connector, edid);
++ amdgpu_dm_connector_add_common_modes(encoder, connector);
++ }
+ amdgpu_dm_fbc_init(connector);
+
+ return amdgpu_dm_connector->num_modes;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4745-amdgpu-display-use-modern-ktime-accessors.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4745-amdgpu-display-use-modern-ktime-accessors.patch
new file mode 100644
index 00000000..d5977370
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4745-amdgpu-display-use-modern-ktime-accessors.patch
@@ -0,0 +1,55 @@
+From daa8f4fc68de9599d26a21fbe18f9e365d89c4a4 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Mon, 18 Jun 2018 17:35:10 +0200
+Subject: [PATCH 4745/5725] amdgpu: display: use modern ktime accessors
+
+getrawmonotonic64() is deprecated because of the nonstandard naming.
+
+The replacement functions ktime_get_raw_ns() also simplifies the callers.
+
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 8 --------
+ drivers/gpu/drm/amd/display/dc/dm_services.h | 5 ++++-
+ 2 files changed, 4 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+index 0229c7ed..9f46421 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+@@ -35,14 +35,6 @@
+ #include "amdgpu_dm_irq.h"
+ #include "amdgpu_pm.h"
+
+-unsigned long long dm_get_timestamp(struct dc_context *ctx)
+-{
+- struct timespec64 time;
+-
+- getrawmonotonic64(&time);
+- return timespec64_to_ns(&time);
+-}
+-
+ unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
+ unsigned long long current_time_stamp,
+ unsigned long long last_time_stamp)
+diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
+index 4ff9b2b..eb5ab39 100644
+--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
++++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
+@@ -339,7 +339,10 @@ bool dm_dmcu_set_pipe(struct dc_context *ctx, unsigned int controller_id);
+ #define dm_log_to_buffer(buffer, size, fmt, args)\
+ vsnprintf(buffer, size, fmt, args)
+
+-unsigned long long dm_get_timestamp(struct dc_context *ctx);
++static inline unsigned long long dm_get_timestamp(struct dc_context *ctx)
++{
++ return ktime_get_raw_ns();
++}
+
+ unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
+ unsigned long long current_time_stamp,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4746-drm-amdgpu-update-ib_start-size_alignment-same-as-wi.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4746-drm-amdgpu-update-ib_start-size_alignment-same-as-wi.patch
new file mode 100644
index 00000000..bbfe252b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4746-drm-amdgpu-update-ib_start-size_alignment-same-as-wi.patch
@@ -0,0 +1,105 @@
+From 62ca868522f57a526d3b6c4b99c18270ec397832 Mon Sep 17 00:00:00 2001
+From: Chunming Zhou <david1.zhou@amd.com>
+Date: Fri, 15 Jun 2018 14:39:57 +0800
+Subject: [PATCH 4746/5725] drm/amdgpu: update ib_start/size_alignment same as
+ windows used
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+PAGE_SIZE for start_alignment is far much than hw requirement,
+And now, update to expereince value from window side.
+
+Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
+Acked-by: Marek Olšák <marek.olsak@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Acked-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 28 ++++++++++++++--------------
+ 1 file changed, 14 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index a1a53c6..a7a0be9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -343,35 +343,35 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ type = AMD_IP_BLOCK_TYPE_GFX;
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+ ring_mask |= ((adev->gfx.gfx_ring[i].ready ? 1 : 0) << i);
+- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+- ib_size_alignment = 8;
++ ib_start_alignment = 32;
++ ib_size_alignment = 32;
+ break;
+ case AMDGPU_HW_IP_COMPUTE:
+ type = AMD_IP_BLOCK_TYPE_GFX;
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+ ring_mask |= ((adev->gfx.compute_ring[i].ready ? 1 : 0) << i);
+- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+- ib_size_alignment = 8;
++ ib_start_alignment = 32;
++ ib_size_alignment = 32;
+ break;
+ case AMDGPU_HW_IP_DMA:
+ type = AMD_IP_BLOCK_TYPE_SDMA;
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i);
+- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+- ib_size_alignment = 1;
++ ib_start_alignment = 256;
++ ib_size_alignment = 4;
+ break;
+ case AMDGPU_HW_IP_UVD:
+ type = AMD_IP_BLOCK_TYPE_UVD;
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++)
+ ring_mask |= ((adev->uvd.inst[i].ring.ready ? 1 : 0) << i);
+- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+- ib_size_alignment = 16;
++ ib_start_alignment = 64;
++ ib_size_alignment = 64;
+ break;
+ case AMDGPU_HW_IP_VCE:
+ type = AMD_IP_BLOCK_TYPE_VCE;
+ for (i = 0; i < adev->vce.num_rings; i++)
+ ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
+- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
++ ib_start_alignment = 4;
+ ib_size_alignment = 1;
+ break;
+ case AMDGPU_HW_IP_UVD_ENC:
+@@ -381,26 +381,26 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ ring_mask |=
+ ((adev->uvd.inst[i].ring_enc[j].ready ? 1 : 0) <<
+ (j + i * adev->uvd.num_enc_rings));
+- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+- ib_size_alignment = 1;
++ ib_start_alignment = 64;
++ ib_size_alignment = 64;
+ break;
+ case AMDGPU_HW_IP_VCN_DEC:
+ type = AMD_IP_BLOCK_TYPE_VCN;
+ ring_mask = adev->vcn.ring_dec.ready ? 1 : 0;
+- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
++ ib_start_alignment = 16;
+ ib_size_alignment = 16;
+ break;
+ case AMDGPU_HW_IP_VCN_ENC:
+ type = AMD_IP_BLOCK_TYPE_VCN;
+ for (i = 0; i < adev->vcn.num_enc_rings; i++)
+ ring_mask |= ((adev->vcn.ring_enc[i].ready ? 1 : 0) << i);
+- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
++ ib_start_alignment = 64;
+ ib_size_alignment = 1;
+ break;
+ case AMDGPU_HW_IP_VCN_JPEG:
+ type = AMD_IP_BLOCK_TYPE_VCN;
+ ring_mask = adev->vcn.ring_jpeg.ready ? 1 : 0;
+- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
++ ib_start_alignment = 16;
+ ib_size_alignment = 16;
+ break;
+ default:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4747-drm-amdgpu-correct-GART-location-info.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4747-drm-amdgpu-correct-GART-location-info.patch
new file mode 100644
index 00000000..52fd5bb6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4747-drm-amdgpu-correct-GART-location-info.patch
@@ -0,0 +1,70 @@
+From afb375810a3280c8783d8994677f0554cc07e17b Mon Sep 17 00:00:00 2001
+From: Junwei Zhang <Jerry.Zhang@amd.com>
+Date: Tue, 19 Jun 2018 11:22:18 +0800
+Subject: [PATCH 4747/5725] drm/amdgpu: correct GART location info
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Avoid confusing the GART with the GTT domain.
+
+Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 14 ++++++--------
+ 1 file changed, 6 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index a847b42..5791f58 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -676,17 +676,15 @@ void amdgpu_device_vram_location(struct amdgpu_device *adev,
+ }
+
+ /**
+- * amdgpu_device_gart_location - try to find GTT location
++ * amdgpu_device_gart_location - try to find GART location
+ *
+ * @adev: amdgpu device structure holding all necessary informations
+ * @mc: memory controller structure holding memory informations
+ *
+- * Function will place try to place GTT before or after VRAM.
++ * Function will place try to place GART before or after VRAM.
+ *
+- * If GTT size is bigger than space left then we ajust GTT size.
++ * If GART size is bigger than space left then we ajust GART size.
+ * Thus function will never fails.
+- *
+- * FIXME: when reducing GTT size align new size on power of 2.
+ */
+ void amdgpu_device_gart_location(struct amdgpu_device *adev,
+ struct amdgpu_gmc *mc)
+@@ -699,13 +697,13 @@ void amdgpu_device_gart_location(struct amdgpu_device *adev,
+ size_bf = mc->vram_start;
+ if (size_bf > size_af) {
+ if (mc->gart_size > size_bf) {
+- dev_warn(adev->dev, "limiting GTT\n");
++ dev_warn(adev->dev, "limiting GART\n");
+ mc->gart_size = size_bf;
+ }
+ mc->gart_start = 0;
+ } else {
+ if (mc->gart_size > size_af) {
+- dev_warn(adev->dev, "limiting GTT\n");
++ dev_warn(adev->dev, "limiting GART\n");
+ mc->gart_size = size_af;
+ }
+ /* VCE doesn't like it when BOs cross a 4GB segment, so align
+@@ -714,7 +712,7 @@ void amdgpu_device_gart_location(struct amdgpu_device *adev,
+ mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
+ }
+ mc->gart_end = mc->gart_start + mc->gart_size - 1;
+- dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
++ dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
+ mc->gart_size >> 20, mc->gart_start, mc->gart_end);
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4748-drm-amdgpu-Use-correct-enum-to-set-powergating-state.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4748-drm-amdgpu-Use-correct-enum-to-set-powergating-state.patch
new file mode 100644
index 00000000..c0712616
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4748-drm-amdgpu-Use-correct-enum-to-set-powergating-state.patch
@@ -0,0 +1,38 @@
+From 2f3efd16076cacf90aa033123d3e751ab2a28ffc Mon Sep 17 00:00:00 2001
+From: Stefan Agner <stefan@agner.ch>
+Date: Tue, 19 Jun 2018 11:16:56 +0200
+Subject: [PATCH 4748/5725] drm/amdgpu: Use correct enum to set powergating
+ state
+
+Use enum amd_powergating_state instead of enum amd_clockgating_state.
+The underlying value stays the same, so there is no functional change
+in practise. This fixes a warning seen with clang:
+drivers/gpu/drm/amd/amdgpu/amdgpu_device.c:1930:14: warning: implicit
+ conversion from enumeration type 'enum amd_clockgating_state' to
+ different enumeration type 'enum amd_powergating_state'
+ [-Wenum-conversion]
+ AMD_CG_STATE_UNGATE);
+ ^~~~~~~~~~~~~~~~~~~
+
+Signed-off-by: Stefan Agner <stefan@agner.ch>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 5791f58..78d6c99 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1974,7 +1974,7 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
+ if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
+ amdgpu_device_ip_set_powergating_state(adev,
+ AMD_IP_BLOCK_TYPE_SMC,
+- AMD_CG_STATE_UNGATE);
++ AMD_PG_STATE_UNGATE);
+
+ /* ungate SMC block first */
+ r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4749-drm-amd-amdgpu-Add-a-GPU_LOAD-entry-to-sysfs-v3.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4749-drm-amd-amdgpu-Add-a-GPU_LOAD-entry-to-sysfs-v3.patch
new file mode 100644
index 00000000..3c3d9f82
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4749-drm-amd-amdgpu-Add-a-GPU_LOAD-entry-to-sysfs-v3.patch
@@ -0,0 +1,94 @@
+From 59766c824785eb6236991fc9396ca44236d44e75 Mon Sep 17 00:00:00 2001
+From: Tom St Denis <tom.stdenis@amd.com>
+Date: Wed, 20 Jun 2018 07:55:39 -0400
+Subject: [PATCH 4749/5725] drm/amd/amdgpu: Add a GPU_LOAD entry to sysfs (v3)
+
+This adds what should be a stable interface to read GPU
+load from userspace.
+
+(v2): Fix comments and name of file per recommendations.
+(v3): Add chapter to amdgpu.rst as well.
+
+Signed-off-by: Tom St Denis <tom.stdenis@amd.com>
+Acked-by: Slava Abramov <slava.abramov@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 40 ++++++++++++++++++++++++++++++++++
+ 1 file changed, 40 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index bc2dd4f9..dbc8300 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -918,6 +918,36 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
+ return -EINVAL;
+ }
+
++/**
++ * DOC: busy_percent
++ *
++ * The amdgpu driver provides a sysfs API for reading how busy the GPU
++ * is as a percentage. The file gpu_busy_percent is used for this.
++ * The SMU firmware computes a percentage of load based on the
++ * aggregate activity level in the IP cores.
++ */
++static ssize_t amdgpu_get_busy_percent(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct drm_device *ddev = dev_get_drvdata(dev);
++ struct amdgpu_device *adev = ddev->dev_private;
++ int r, value, size = sizeof(value);
++
++ /* sanity check PP is enabled */
++ if (!(adev->powerplay.pp_funcs &&
++ adev->powerplay.pp_funcs->read_sensor))
++ return -EINVAL;
++
++ /* read the IP busy sensor */
++ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
++ (void *)&value, &size);
++ if (r)
++ return r;
++
++ return snprintf(buf, PAGE_SIZE, "%d\n", value);
++}
++
+ static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
+ static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
+ amdgpu_get_dpm_forced_performance_level,
+@@ -951,6 +981,8 @@ static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR,
+ static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR,
+ amdgpu_get_pp_od_clk_voltage,
+ amdgpu_set_pp_od_clk_voltage);
++static DEVICE_ATTR(gpu_busy_percent, S_IRUGO,
++ amdgpu_get_busy_percent, NULL);
+
+ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
+ struct device_attribute *attr,
+@@ -1853,6 +1885,13 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
+ "pp_od_clk_voltage\n");
+ return ret;
+ }
++ ret = device_create_file(adev->dev,
++ &dev_attr_gpu_busy_percent);
++ if (ret) {
++ DRM_ERROR("failed to create device file "
++ "gpu_busy_level\n");
++ return ret;
++ }
+ ret = amdgpu_debugfs_pm_init(adev);
+ if (ret) {
+ DRM_ERROR("Failed to register debugfs file for dpm!\n");
+@@ -1888,6 +1927,7 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
+ &dev_attr_pp_power_profile_mode);
+ device_remove_file(adev->dev,
+ &dev_attr_pp_od_clk_voltage);
++ device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
+ }
+
+ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4750-drm-amdgpu-Polish-SQ-IH.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4750-drm-amdgpu-Polish-SQ-IH.patch
new file mode 100644
index 00000000..4e3e8af6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4750-drm-amdgpu-Polish-SQ-IH.patch
@@ -0,0 +1,85 @@
+From 8be6402c521105b89cf08c57a0c8e7b0b7e4f146 Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Mon, 18 Jun 2018 11:15:10 -0400
+Subject: [PATCH 4750/5725] drm/amdgpu: Polish SQ IH.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Switch to using reg fields defines istead of magic values.
+Add SH_ID and PRIV fields reading for instr. and err cases.
+
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 36 +++++++++++++++++++----------------
+ 1 file changed, 20 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 77350cd..ad28dd9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -6973,10 +6973,11 @@ static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
+ {
+ u8 enc, se_id;
+ char type[20];
++ unsigned ih_data = entry->src_data[0];
+
+- /* Parse all fields according to SQ_INTERRUPT* registers */
+- enc = (entry->src_data[0] >> 26) & 0x3;
+- se_id = (entry->src_data[0] >> 24) & 0x3;
++
++ enc = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, ENCODING);
++ se_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, SE_ID);
+
+ switch (enc) {
+ case 0:
+@@ -6986,14 +6987,14 @@ static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
+ "reg_timestamp %d, thread_trace_buff_full %d,"
+ "wlt %d, thread_trace %d.\n",
+ se_id,
+- (entry->src_data[0] >> 7) & 0x1,
+- (entry->src_data[0] >> 6) & 0x1,
+- (entry->src_data[0] >> 5) & 0x1,
+- (entry->src_data[0] >> 4) & 0x1,
+- (entry->src_data[0] >> 3) & 0x1,
+- (entry->src_data[0] >> 2) & 0x1,
+- (entry->src_data[0] >> 1) & 0x1,
+- entry->src_data[0] & 0x1
++ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, IMMED_OVERFLOW),
++ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_REG_OVERFLOW),
++ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_CMD_OVERFLOW),
++ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, CMD_TIMESTAMP),
++ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, REG_TIMESTAMP),
++ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE_BUF_FULL),
++ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, WLT),
++ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE)
+ );
+ break;
+ case 1:
+@@ -7006,12 +7007,15 @@ static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
+
+ DRM_INFO(
+ "SQ %s detected: "
+- "se_id %d, cu_id %d, simd_id %d, wave_id %d, vm_id %d\n",
++ "se_id %d, cu_id %d, simd_id %d, wave_id %d, vm_id %d\n"
++ "trap %s, sh_id %d. ",
+ type, se_id,
+- (entry->src_data[0] >> 20) & 0xf,
+- (entry->src_data[0] >> 18) & 0x3,
+- (entry->src_data[0] >> 14) & 0xf,
+- (entry->src_data[0] >> 10) & 0xf
++ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, CU_ID),
++ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SIMD_ID),
++ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, WAVE_ID),
++ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, VM_ID),
++ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, PRIV) ? "true" : "false",
++ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SH_ID)
+ );
+ break;
+ default:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4751-drm-amdgpu-Add-parsing-SQ_EDC_INFO-to-SQ-IH-v3.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4751-drm-amdgpu-Add-parsing-SQ_EDC_INFO-to-SQ-IH-v3.patch
new file mode 100644
index 00000000..d437cc68
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4751-drm-amdgpu-Add-parsing-SQ_EDC_INFO-to-SQ-IH-v3.patch
@@ -0,0 +1,199 @@
+From 8bbabd0adfc498df205067032efc08d91df6305b Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Tue, 19 Jun 2018 10:27:53 -0400
+Subject: [PATCH 4751/5725] drm/amdgpu: Add parsing SQ_EDC_INFO to SQ IH v3.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Access to SQ_EDC_INFO requires selecting register instance and
+hence mutex lock when accessing GRBM_GFX_INDEX for which a work
+is schedueled from IH. But SQ interrupt can be raised on many instances
+at once which means queuing work will usually succeed for the first one
+but fail for the rest since the work takes time to process. To avoid
+losing info about other interrupt instances call the parsing function
+directly from high IRQ when current work hasn't finished and avoid
+accessing SQ_EDC_INFO in that case.
+
+v2:
+Simplify high IRQ and BH handlers synchronization using work_pending.
+Remove {READ,WRITE}_ONCE notations since smp_{r,w}mb are implicit
+compiler barriers.
+
+v3:
+Remove exlicit memory barriers as scedule_work has r/w barriers.
+
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 7 +++
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 82 +++++++++++++++++++++++++++++------
+ 2 files changed, 76 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index c55e675..3b456ce 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -951,6 +951,11 @@ struct amdgpu_ngg {
+ bool init;
+ };
+
++struct sq_work {
++ struct work_struct work;
++ unsigned ih_data;
++};
++
+ struct amdgpu_gfx {
+ struct mutex gpu_clock_mutex;
+ struct amdgpu_gfx_config config;
+@@ -991,6 +996,8 @@ struct amdgpu_gfx {
+ struct amdgpu_irq_src priv_inst_irq;
+ struct amdgpu_irq_src cp_ecc_error_irq;
+ struct amdgpu_irq_src sq_irq;
++ struct sq_work sq_work;
++
+ /* gfx status */
+ uint32_t gfx_current_status;
+ /* ce ram size*/
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index ad28dd9..294fa59 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -704,6 +704,17 @@ static const u32 stoney_mgcg_cgcg_init[] =
+ mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
+ };
+
++
++static const char * const sq_edc_source_names[] = {
++ "SQ_EDC_INFO_SOURCE_INVALID: No EDC error has occurred",
++ "SQ_EDC_INFO_SOURCE_INST: EDC source is Instruction Fetch",
++ "SQ_EDC_INFO_SOURCE_SGPR: EDC source is SGPR or SQC data return",
++ "SQ_EDC_INFO_SOURCE_VGPR: EDC source is VGPR",
++ "SQ_EDC_INFO_SOURCE_LDS: EDC source is LDS",
++ "SQ_EDC_INFO_SOURCE_GDS: EDC source is GDS",
++ "SQ_EDC_INFO_SOURCE_TA: EDC source is TA",
++};
++
+ static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
+ static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev);
+ static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev);
+@@ -2005,6 +2016,8 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
+ return 0;
+ }
+
++static void gfx_v8_0_sq_irq_work_func(struct work_struct *work);
++
+ static int gfx_v8_0_sw_init(void *handle)
+ {
+ int i, j, k, r, ring_id;
+@@ -2068,6 +2081,8 @@ static int gfx_v8_0_sw_init(void *handle)
+ return r;
+ }
+
++ INIT_WORK(&adev->gfx.sq_work.work, gfx_v8_0_sq_irq_work_func);
++
+ adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
+
+ gfx_v8_0_scratch_init(adev);
+@@ -6967,14 +6982,11 @@ static int gfx_v8_0_cp_ecc_error_irq(struct amdgpu_device *adev,
+ return 0;
+ }
+
+-static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
+- struct amdgpu_irq_src *source,
+- struct amdgpu_iv_entry *entry)
++static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data)
+ {
+- u8 enc, se_id;
++ u32 enc, se_id, sh_id, cu_id;
+ char type[20];
+- unsigned ih_data = entry->src_data[0];
+-
++ int sq_edc_source = -1;
+
+ enc = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, ENCODING);
+ se_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, SE_ID);
+@@ -7000,6 +7012,24 @@ static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
+ case 1:
+ case 2:
+
++ cu_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, CU_ID);
++ sh_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SH_ID);
++
++ /*
++ * This function can be called either directly from ISR
++ * or from BH in which case we can access SQ_EDC_INFO
++ * instance
++ */
++ if (in_task()) {
++ mutex_lock(&adev->grbm_idx_mutex);
++ gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id);
++
++ sq_edc_source = REG_GET_FIELD(RREG32(mmSQ_EDC_INFO), SQ_EDC_INFO, SOURCE);
++
++ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
++ mutex_unlock(&adev->grbm_idx_mutex);
++ }
++
+ if (enc == 1)
+ sprintf(type, "instruction intr");
+ else
+@@ -7007,20 +7037,46 @@ static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
+
+ DRM_INFO(
+ "SQ %s detected: "
+- "se_id %d, cu_id %d, simd_id %d, wave_id %d, vm_id %d\n"
+- "trap %s, sh_id %d. ",
+- type, se_id,
+- REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, CU_ID),
++ "se_id %d, sh_id %d, cu_id %d, simd_id %d, wave_id %d, vm_id %d "
++ "trap %s, sq_ed_info.source %s.\n",
++ type, se_id, sh_id, cu_id,
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SIMD_ID),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, WAVE_ID),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, VM_ID),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, PRIV) ? "true" : "false",
+- REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SH_ID)
+- );
++ (sq_edc_source != -1) ? sq_edc_source_names[sq_edc_source] : "unavailable"
++ );
+ break;
+ default:
+ DRM_ERROR("SQ invalid encoding type\n.");
+- return -EINVAL;
++ }
++}
++
++static void gfx_v8_0_sq_irq_work_func(struct work_struct *work)
++{
++
++ struct amdgpu_device *adev = container_of(work, struct amdgpu_device, gfx.sq_work.work);
++ struct sq_work *sq_work = container_of(work, struct sq_work, work);
++
++ gfx_v8_0_parse_sq_irq(adev, sq_work->ih_data);
++}
++
++static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
++ struct amdgpu_irq_src *source,
++ struct amdgpu_iv_entry *entry)
++{
++ unsigned ih_data = entry->src_data[0];
++
++ /*
++ * Try to submit work so SQ_EDC_INFO can be accessed from
++ * BH. If previous work submission hasn't finished yet
++ * just print whatever info is possible directly from the ISR.
++ */
++ if (work_pending(&adev->gfx.sq_work.work)) {
++ gfx_v8_0_parse_sq_irq(adev, ih_data);
++ } else {
++ adev->gfx.sq_work.ih_data = ih_data;
++ schedule_work(&adev->gfx.sq_work.work);
+ }
+
+ return 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4752-drm-amd-display-replace-clocks_value-struct-with-dc_.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4752-drm-amd-display-replace-clocks_value-struct-with-dc_.patch
new file mode 100644
index 00000000..ef7a4525
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4752-drm-amd-display-replace-clocks_value-struct-with-dc_.patch
@@ -0,0 +1,378 @@
+From 2debb6972a470ed43081814f6f9161dd32ce7916 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Wed, 23 May 2018 13:16:50 -0400
+Subject: [PATCH 4752/5725] drm/amd/display: replace clocks_value struct with
+ dc_clocks
+
+This will avoid structs with duplicate information. Also
+removes pixel clock voltage request. This has no effect since
+pixel clock does not affect dcn voltage and this function only
+matters for dcn.
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 34 ++++++++--------
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 8 ++--
+ drivers/gpu/drm/amd/display/dc/dc.h | 5 +++
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 46 ++++++++--------------
+ .../amd/display/dc/dce110/dce110_hw_sequencer.c | 18 +--------
+ drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h | 2 +-
+ .../gpu/drm/amd/display/dc/inc/hw/display_clock.h | 22 ++---------
+ 7 files changed, 49 insertions(+), 86 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+index 49a4ea4..d8a3165 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+@@ -1145,10 +1145,10 @@ static unsigned int dcn_find_normalized_clock_vdd_Level(
+
+ switch (clocks_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+- if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmax0p9*1000) {
++ /*if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmax0p9*1000) {
+ vdd_level = dcn_bw_v_max0p91;
+- BREAK_TO_DEBUGGER();
+- } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vnom0p8*1000) {
++ //BREAK_TO_DEBUGGER();
++ } else*/ if (clocks_in_khz > dc->dcn_soc->max_dispclk_vnom0p8*1000) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmid0p72*1000) {
+ vdd_level = dcn_bw_v_nom0p8;
+@@ -1158,10 +1158,10 @@ static unsigned int dcn_find_normalized_clock_vdd_Level(
+ vdd_level = dcn_bw_v_min0p65;
+ break;
+ case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+- if (clocks_in_khz > dc->dcn_soc->phyclkv_max0p9*1000) {
++ /*if (clocks_in_khz > dc->dcn_soc->phyclkv_max0p9*1000) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+- } else if (clocks_in_khz > dc->dcn_soc->phyclkv_nom0p8*1000) {
++ } else*/ if (clocks_in_khz > dc->dcn_soc->phyclkv_nom0p8*1000) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->phyclkv_mid0p72*1000) {
+ vdd_level = dcn_bw_v_nom0p8;
+@@ -1172,10 +1172,10 @@ static unsigned int dcn_find_normalized_clock_vdd_Level(
+ break;
+
+ case DM_PP_CLOCK_TYPE_DPPCLK:
+- if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmax0p9*1000) {
++ /*if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmax0p9*1000) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+- } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vnom0p8*1000) {
++ } else*/ if (clocks_in_khz > dc->dcn_soc->max_dppclk_vnom0p8*1000) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmid0p72*1000) {
+ vdd_level = dcn_bw_v_nom0p8;
+@@ -1189,10 +1189,10 @@ static unsigned int dcn_find_normalized_clock_vdd_Level(
+ {
+ unsigned factor = (ddr4_dram_factor_single_Channel * dc->dcn_soc->number_of_channels);
+
+- if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9*1000000/factor) {
++ /*if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9*1000000/factor) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+- } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8*1000000/factor) {
++ } else */if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8*1000000/factor) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72*1000000/factor) {
+ vdd_level = dcn_bw_v_nom0p8;
+@@ -1204,10 +1204,10 @@ static unsigned int dcn_find_normalized_clock_vdd_Level(
+ break;
+
+ case DM_PP_CLOCK_TYPE_DCFCLK:
+- if (clocks_in_khz > dc->dcn_soc->dcfclkv_max0p9*1000) {
++ /*if (clocks_in_khz > dc->dcn_soc->dcfclkv_max0p9*1000) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+- } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_nom0p8*1000) {
++ } else */if (clocks_in_khz > dc->dcn_soc->dcfclkv_nom0p8*1000) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_mid0p72*1000) {
+ vdd_level = dcn_bw_v_nom0p8;
+@@ -1225,27 +1225,27 @@ static unsigned int dcn_find_normalized_clock_vdd_Level(
+
+ unsigned int dcn_find_dcfclk_suits_all(
+ const struct dc *dc,
+- struct clocks_value *clocks)
++ struct dc_clocks *clocks)
+ {
+ unsigned vdd_level, vdd_level_temp;
+ unsigned dcf_clk;
+
+ /*find a common supported voltage level*/
+ vdd_level = dcn_find_normalized_clock_vdd_Level(
+- dc, DM_PP_CLOCK_TYPE_DISPLAY_CLK, clocks->dispclk_in_khz);
++ dc, DM_PP_CLOCK_TYPE_DISPLAY_CLK, clocks->dispclk_khz);
+ vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
+- dc, DM_PP_CLOCK_TYPE_DISPLAYPHYCLK, clocks->phyclk_in_khz);
++ dc, DM_PP_CLOCK_TYPE_DISPLAYPHYCLK, clocks->phyclk_khz);
+
+ vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
+ vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
+- dc, DM_PP_CLOCK_TYPE_DPPCLK, clocks->dppclk_in_khz);
++ dc, DM_PP_CLOCK_TYPE_DPPCLK, clocks->dppclk_khz);
+ vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
+
+ vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
+- dc, DM_PP_CLOCK_TYPE_MEMORY_CLK, clocks->dcfclock_in_khz);
++ dc, DM_PP_CLOCK_TYPE_MEMORY_CLK, clocks->fclk_khz);
+ vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
+ vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
+- dc, DM_PP_CLOCK_TYPE_DCFCLK, clocks->dcfclock_in_khz);
++ dc, DM_PP_CLOCK_TYPE_DCFCLK, clocks->dcfclk_khz);
+
+ /*find that level conresponding dcfclk*/
+ vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 53f9db9..af9e0db 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -1290,15 +1290,13 @@ static enum dc_status enable_link_dp(
+ state->dis_clk, DM_PP_CLOCKS_STATE_NOMINAL);
+ } else {
+ uint32_t dp_phyclk_in_khz;
+- const struct clocks_value clocks_value =
+- state->dis_clk->cur_clocks_value;
++ const struct dc_clocks clocks_value =
++ state->dis_clk->clks;
+
+ /* 27mhz = 27000000hz= 27000khz */
+ dp_phyclk_in_khz = link_settings.link_rate * 27000;
+
+- if (((clocks_value.max_non_dp_phyclk_in_khz != 0) &&
+- (dp_phyclk_in_khz > clocks_value.max_non_dp_phyclk_in_khz)) ||
+- (dp_phyclk_in_khz > clocks_value.max_dp_phyclk_in_khz)) {
++ if (dp_phyclk_in_khz > clocks_value.phyclk_khz) {
+ state->dis_clk->funcs->apply_clock_voltage_request(
+ state->dis_clk,
+ DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 56c1e2b..3471485 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -187,6 +187,10 @@ enum wm_report_mode {
+ WM_REPORT_OVERRIDE = 1,
+ };
+
++/*
++ * For any clocks that may differ per pipe
++ * only the max is stored in this structure
++ */
+ struct dc_clocks {
+ int dispclk_khz;
+ int max_supported_dppclk_khz;
+@@ -195,6 +199,7 @@ struct dc_clocks {
+ int socclk_khz;
+ int dcfclk_deep_sleep_khz;
+ int fclk_khz;
++ int phyclk_khz;
+ };
+
+ struct dc_debug {
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index 8a581c6..b749a20 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -560,11 +560,9 @@ static bool dce_apply_clock_voltage_request(
+
+ switch (clocks_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+- case DM_PP_CLOCK_TYPE_PIXELCLK:
+ case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+ break;
+ default:
+- BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+@@ -575,31 +573,22 @@ static bool dce_apply_clock_voltage_request(
+ if (pre_mode_set) {
+ switch (clocks_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+- if (clocks_in_khz > clk->cur_clocks_value.dispclk_in_khz) {
+- clk->cur_clocks_value.dispclk_notify_pplib_done = true;
++ if (clocks_in_khz > clk->clks.dispclk_khz) {
++ clk->dispclk_notify_pplib_done = true;
+ send_request = true;
+ } else
+- clk->cur_clocks_value.dispclk_notify_pplib_done = false;
++ clk->dispclk_notify_pplib_done = false;
+ /* no matter incrase or decrase clock, update current clock value */
+- clk->cur_clocks_value.dispclk_in_khz = clocks_in_khz;
+- break;
+- case DM_PP_CLOCK_TYPE_PIXELCLK:
+- if (clocks_in_khz > clk->cur_clocks_value.max_pixelclk_in_khz) {
+- clk->cur_clocks_value.pixelclk_notify_pplib_done = true;
+- send_request = true;
+- } else
+- clk->cur_clocks_value.pixelclk_notify_pplib_done = false;
+- /* no matter incrase or decrase clock, update current clock value */
+- clk->cur_clocks_value.max_pixelclk_in_khz = clocks_in_khz;
++ clk->clks.dispclk_khz = clocks_in_khz;
+ break;
+ case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+- if (clocks_in_khz > clk->cur_clocks_value.max_non_dp_phyclk_in_khz) {
+- clk->cur_clocks_value.phyclk_notigy_pplib_done = true;
++ if (clocks_in_khz > clk->clks.phyclk_khz) {
++ clk->phyclk_notify_pplib_done = true;
+ send_request = true;
+ } else
+- clk->cur_clocks_value.phyclk_notigy_pplib_done = false;
++ clk->phyclk_notify_pplib_done = false;
+ /* no matter incrase or decrase clock, update current clock value */
+- clk->cur_clocks_value.max_non_dp_phyclk_in_khz = clocks_in_khz;
++ clk->clks.phyclk_khz = clocks_in_khz;
+ break;
+ default:
+ ASSERT(0);
+@@ -609,16 +598,14 @@ static bool dce_apply_clock_voltage_request(
+ } else {
+ switch (clocks_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+- if (!clk->cur_clocks_value.dispclk_notify_pplib_done)
+- send_request = true;
+- break;
+- case DM_PP_CLOCK_TYPE_PIXELCLK:
+- if (!clk->cur_clocks_value.pixelclk_notify_pplib_done)
++ if (!clk->dispclk_notify_pplib_done)
+ send_request = true;
++ clk->dispclk_notify_pplib_done = true;
+ break;
+ case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+- if (!clk->cur_clocks_value.phyclk_notigy_pplib_done)
++ if (!clk->phyclk_notify_pplib_done)
+ send_request = true;
++ clk->phyclk_notify_pplib_done = true;
+ break;
+ default:
+ ASSERT(0);
+@@ -627,20 +614,21 @@ static bool dce_apply_clock_voltage_request(
+ }
+ if (send_request) {
+ #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+- if (clk->ctx->dce_version >= DCN_VERSION_1_0) {
++ if (clk->ctx->dce_version >= DCN_VERSION_1_0
++ ) {
+ struct dc *core_dc = clk->ctx->dc;
+ /*use dcfclk request voltage*/
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+ clock_voltage_req.clocks_in_khz =
+- dcn_find_dcfclk_suits_all(core_dc, &clk->cur_clocks_value);
++ dcn_find_dcfclk_suits_all(core_dc, &clk->clks);
+ }
+ #endif
+ dm_pp_apply_clock_for_voltage_request(
+ clk->ctx, &clock_voltage_req);
+ }
+ if (update_dp_phyclk && (clocks_in_khz >
+- clk->cur_clocks_value.max_dp_phyclk_in_khz))
+- clk->cur_clocks_value.max_dp_phyclk_in_khz = clocks_in_khz;
++ clk->clks.phyclk_khz))
++ clk->clks.phyclk_khz = clocks_in_khz;
+
+ return true;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 57d8113..e540172 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -1800,21 +1800,14 @@ static void apply_min_clocks(
+ context->dis_clk->funcs->apply_clock_voltage_request(
+ context->dis_clk,
+ DM_PP_CLOCK_TYPE_DISPLAY_CLK,
+- context->dis_clk->cur_clocks_value.dispclk_in_khz,
+- pre_mode_set,
+- false);
+-
+- context->dis_clk->funcs->apply_clock_voltage_request(
+- context->dis_clk,
+- DM_PP_CLOCK_TYPE_PIXELCLK,
+- context->dis_clk->cur_clocks_value.max_pixelclk_in_khz,
++ context->dis_clk->clks.dispclk_khz,
+ pre_mode_set,
+ false);
+
+ context->dis_clk->funcs->apply_clock_voltage_request(
+ context->dis_clk,
+ DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
+- context->dis_clk->cur_clocks_value.max_non_dp_phyclk_in_khz,
++ context->dis_clk->clks.phyclk_khz,
+ pre_mode_set,
+ false);
+ return;
+@@ -1843,13 +1836,6 @@ static void apply_min_clocks(
+
+ context->dis_clk->funcs->apply_clock_voltage_request(
+ context->dis_clk,
+- DM_PP_CLOCK_TYPE_PIXELCLK,
+- req_clocks.pixel_clk_khz,
+- pre_mode_set,
+- false);
+-
+- context->dis_clk->funcs->apply_clock_voltage_request(
+- context->dis_clk,
+ DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
+ req_clocks.pixel_clk_khz,
+ pre_mode_set,
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+index 132d18d..ddbb673 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+@@ -625,7 +625,7 @@ bool dcn_validate_bandwidth(
+
+ unsigned int dcn_find_dcfclk_suits_all(
+ const struct dc *dc,
+- struct clocks_value *clocks);
++ struct dc_clocks *clocks);
+
+ void dcn_bw_update_from_pplib(struct dc *dc);
+ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc);
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
+index f5f69cd..6b9ca55 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
+@@ -27,23 +27,7 @@
+ #define __DISPLAY_CLOCK_H__
+
+ #include "dm_services_types.h"
+-
+-
+-struct clocks_value {
+- int dispclk_in_khz;
+- int max_pixelclk_in_khz;
+- int max_non_dp_phyclk_in_khz;
+- int max_dp_phyclk_in_khz;
+- bool dispclk_notify_pplib_done;
+- bool pixelclk_notify_pplib_done;
+- bool phyclk_notigy_pplib_done;
+- int dcfclock_in_khz;
+- int dppclk_in_khz;
+- int mclk_in_khz;
+- int phyclk_in_khz;
+- int common_vdd_level;
+-};
+-
++#include "dc.h"
+
+ /* Structure containing all state-dependent clocks
+ * (dependent on "enum clocks_state") */
+@@ -56,9 +40,11 @@ struct display_clock {
+ struct dc_context *ctx;
+ const struct display_clock_funcs *funcs;
+
++ bool dispclk_notify_pplib_done;
++ bool phyclk_notify_pplib_done;
+ enum dm_pp_clocks_state max_clks_state;
+ enum dm_pp_clocks_state cur_min_clks_state;
+- struct clocks_value cur_clocks_value;
++ struct dc_clocks clks;
+ };
+
+ struct display_clock_funcs {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4753-drm-amd-display-redesign-dce-dcn-clock-voltage-updat.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4753-drm-amd-display-redesign-dce-dcn-clock-voltage-updat.patch
new file mode 100644
index 00000000..acea24b0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4753-drm-amd-display-redesign-dce-dcn-clock-voltage-updat.patch
@@ -0,0 +1,918 @@
+From fc5d96cda0c50f44feef553e75df07cea74a00ee Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Wed, 23 May 2018 16:21:54 -0400
+Subject: [PATCH 4753/5725] drm/amd/display: redesign dce/dcn clock voltage
+ update request
+
+The goal of this change is to move clock programming and voltage
+requests to a single function. As of this change only dce is affected.
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 22 +-
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 30 +--
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 279 +++++++++++----------
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h | 6 +-
+ .../amd/display/dc/dce100/dce100_hw_sequencer.c | 49 +++-
+ .../amd/display/dc/dce110/dce110_hw_sequencer.c | 150 ++---------
+ .../amd/display/dc/dce110/dce110_hw_sequencer.h | 4 +
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 9 +-
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 2 +-
+ .../gpu/drm/amd/display/dc/inc/hw/display_clock.h | 21 +-
+ 10 files changed, 250 insertions(+), 322 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+index d8a3165..2b70ac6 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+@@ -1145,10 +1145,10 @@ static unsigned int dcn_find_normalized_clock_vdd_Level(
+
+ switch (clocks_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+- /*if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmax0p9*1000) {
++ if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmax0p9*1000) {
+ vdd_level = dcn_bw_v_max0p91;
+- //BREAK_TO_DEBUGGER();
+- } else*/ if (clocks_in_khz > dc->dcn_soc->max_dispclk_vnom0p8*1000) {
++ BREAK_TO_DEBUGGER();
++ } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vnom0p8*1000) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmid0p72*1000) {
+ vdd_level = dcn_bw_v_nom0p8;
+@@ -1158,10 +1158,10 @@ static unsigned int dcn_find_normalized_clock_vdd_Level(
+ vdd_level = dcn_bw_v_min0p65;
+ break;
+ case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+- /*if (clocks_in_khz > dc->dcn_soc->phyclkv_max0p9*1000) {
++ if (clocks_in_khz > dc->dcn_soc->phyclkv_max0p9*1000) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+- } else*/ if (clocks_in_khz > dc->dcn_soc->phyclkv_nom0p8*1000) {
++ } else if (clocks_in_khz > dc->dcn_soc->phyclkv_nom0p8*1000) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->phyclkv_mid0p72*1000) {
+ vdd_level = dcn_bw_v_nom0p8;
+@@ -1172,10 +1172,10 @@ static unsigned int dcn_find_normalized_clock_vdd_Level(
+ break;
+
+ case DM_PP_CLOCK_TYPE_DPPCLK:
+- /*if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmax0p9*1000) {
++ if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmax0p9*1000) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+- } else*/ if (clocks_in_khz > dc->dcn_soc->max_dppclk_vnom0p8*1000) {
++ } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vnom0p8*1000) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmid0p72*1000) {
+ vdd_level = dcn_bw_v_nom0p8;
+@@ -1189,10 +1189,10 @@ static unsigned int dcn_find_normalized_clock_vdd_Level(
+ {
+ unsigned factor = (ddr4_dram_factor_single_Channel * dc->dcn_soc->number_of_channels);
+
+- /*if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9*1000000/factor) {
++ if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9*1000000/factor) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+- } else */if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8*1000000/factor) {
++ } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8*1000000/factor) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72*1000000/factor) {
+ vdd_level = dcn_bw_v_nom0p8;
+@@ -1204,10 +1204,10 @@ static unsigned int dcn_find_normalized_clock_vdd_Level(
+ break;
+
+ case DM_PP_CLOCK_TYPE_DCFCLK:
+- /*if (clocks_in_khz > dc->dcn_soc->dcfclkv_max0p9*1000) {
++ if (clocks_in_khz > dc->dcn_soc->dcfclkv_max0p9*1000) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+- } else */if (clocks_in_khz > dc->dcn_soc->dcfclkv_nom0p8*1000) {
++ } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_nom0p8*1000) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_mid0p72*1000) {
+ vdd_level = dcn_bw_v_nom0p8;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index af9e0db..1a6a7c5 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -1284,27 +1284,15 @@ static enum dc_status enable_link_dp(
+ max_link_rate = LINK_RATE_HIGH3;
+
+ if (link_settings.link_rate == max_link_rate) {
+- if (state->dis_clk->funcs->set_min_clocks_state) {
+- if (state->dis_clk->cur_min_clks_state < DM_PP_CLOCKS_STATE_NOMINAL)
+- state->dis_clk->funcs->set_min_clocks_state(
+- state->dis_clk, DM_PP_CLOCKS_STATE_NOMINAL);
+- } else {
+- uint32_t dp_phyclk_in_khz;
+- const struct dc_clocks clocks_value =
+- state->dis_clk->clks;
+-
+- /* 27mhz = 27000000hz= 27000khz */
+- dp_phyclk_in_khz = link_settings.link_rate * 27000;
+-
+- if (dp_phyclk_in_khz > clocks_value.phyclk_khz) {
+- state->dis_clk->funcs->apply_clock_voltage_request(
+- state->dis_clk,
+- DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
+- dp_phyclk_in_khz,
+- false,
+- true);
+- }
+- }
++ struct dc_clocks clocks = state->bw.dcn.calc_clk;
++
++ /* dce/dcn compat, do not update dispclk */
++ clocks.dispclk_khz = 0;
++ /* 27mhz = 27000000hz= 27000khz */
++ clocks.phyclk_khz = link_settings.link_rate * 27000;
++
++ state->dis_clk->funcs->update_clocks(
++ state->dis_clk, &clocks, false);
+ }
+
+ dp_enable_link_phy(
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index b749a20..d3bbac8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -275,7 +275,7 @@ static int dce_clocks_get_dp_ref_freq_wrkaround(struct display_clock *clk)
+ }
+ static enum dm_pp_clocks_state dce_get_required_clocks_state(
+ struct display_clock *clk,
+- struct state_dependent_clocks *req_clocks)
++ struct dc_clocks *req_clocks)
+ {
+ struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ int i;
+@@ -286,48 +286,25 @@ static enum dm_pp_clocks_state dce_get_required_clocks_state(
+ * all required clocks
+ */
+ for (i = clk->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
+- if (req_clocks->display_clk_khz >
++ if (req_clocks->dispclk_khz >
+ clk_dce->max_clks_by_state[i].display_clk_khz
+- || req_clocks->pixel_clk_khz >
++ || req_clocks->phyclk_khz >
+ clk_dce->max_clks_by_state[i].pixel_clk_khz)
+ break;
+
+ low_req_clk = i + 1;
+ if (low_req_clk > clk->max_clks_state) {
+- DC_LOG_WARNING("%s: clocks unsupported disp_clk %d pix_clk %d",
+- __func__,
+- req_clocks->display_clk_khz,
+- req_clocks->pixel_clk_khz);
+- low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
++ /* set max clock state for high phyclock, invalid on exceeding display clock */
++ if (clk_dce->max_clks_by_state[clk->max_clks_state].display_clk_khz
++ < req_clocks->dispclk_khz)
++ low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
++ else
++ low_req_clk = clk->max_clks_state;
+ }
+
+ return low_req_clk;
+ }
+
+-static bool dce_clock_set_min_clocks_state(
+- struct display_clock *clk,
+- enum dm_pp_clocks_state clocks_state)
+-{
+- struct dm_pp_power_level_change_request level_change_req = {
+- clocks_state };
+-
+- if (clocks_state > clk->max_clks_state) {
+- /*Requested state exceeds max supported state.*/
+- DC_LOG_WARNING("Requested state exceeds max supported state");
+- return false;
+- } else if (clocks_state == clk->cur_min_clks_state) {
+- /*if we're trying to set the same state, we can just return
+- * since nothing needs to be done*/
+- return true;
+- }
+-
+- /* get max clock state from PPLIB */
+- if (dm_pp_apply_power_level_change_request(clk->ctx, &level_change_req))
+- clk->cur_min_clks_state = clocks_state;
+-
+- return true;
+-}
+-
+ static int dce_set_clock(
+ struct display_clock *clk,
+ int requested_clk_khz)
+@@ -488,8 +465,6 @@ static void dce_clock_read_integrated_info(struct dce_disp_clk *clk_dce)
+ if (!debug->disable_dfs_bypass && bp->integrated_info)
+ if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+ clk_dce->dfs_bypass_enabled = true;
+-
+- clk_dce->use_max_disp_clk = debug->max_disp_clk;
+ }
+
+ static void dce_clock_read_ss_info(struct dce_disp_clk *clk_dce)
+@@ -548,117 +523,160 @@ static void dce_clock_read_ss_info(struct dce_disp_clk *clk_dce)
+ }
+ }
+
+-static bool dce_apply_clock_voltage_request(
+- struct display_clock *clk,
+- enum dm_pp_clock_type clocks_type,
+- int clocks_in_khz,
+- bool pre_mode_set,
+- bool update_dp_phyclk)
++static void dce12_update_clocks(struct display_clock *dccg,
++ struct dc_clocks *new_clocks,
++ bool safe_to_lower)
+ {
+- bool send_request = false;
+ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+
+- switch (clocks_type) {
+- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+- case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+- break;
+- default:
+- return false;
++ if ((new_clocks->dispclk_khz < dccg->clks.dispclk_khz && safe_to_lower)
++ || new_clocks->dispclk_khz > dccg->clks.dispclk_khz) {
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
++ clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
++ dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
++ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
++
++ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ }
+
+- clock_voltage_req.clk_type = clocks_type;
+- clock_voltage_req.clocks_in_khz = clocks_in_khz;
+-
+- /* to pplib */
+- if (pre_mode_set) {
+- switch (clocks_type) {
+- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+- if (clocks_in_khz > clk->clks.dispclk_khz) {
+- clk->dispclk_notify_pplib_done = true;
+- send_request = true;
+- } else
+- clk->dispclk_notify_pplib_done = false;
+- /* no matter incrase or decrase clock, update current clock value */
+- clk->clks.dispclk_khz = clocks_in_khz;
+- break;
+- case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+- if (clocks_in_khz > clk->clks.phyclk_khz) {
+- clk->phyclk_notify_pplib_done = true;
+- send_request = true;
+- } else
+- clk->phyclk_notify_pplib_done = false;
+- /* no matter incrase or decrase clock, update current clock value */
+- clk->clks.phyclk_khz = clocks_in_khz;
+- break;
+- default:
+- ASSERT(0);
+- break;
+- }
++ if ((new_clocks->phyclk_khz < dccg->clks.phyclk_khz && safe_to_lower)
++ || new_clocks->phyclk_khz > dccg->clks.phyclk_khz) {
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
++ clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz;
++ dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
+
+- } else {
+- switch (clocks_type) {
+- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+- if (!clk->dispclk_notify_pplib_done)
+- send_request = true;
+- clk->dispclk_notify_pplib_done = true;
+- break;
+- case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+- if (!clk->phyclk_notify_pplib_done)
+- send_request = true;
+- clk->phyclk_notify_pplib_done = true;
+- break;
+- default:
+- ASSERT(0);
+- break;
+- }
++ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ }
+- if (send_request) {
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+- if (clk->ctx->dce_version >= DCN_VERSION_1_0
++}
++
++static void dcn_update_clocks(struct display_clock *dccg,
++ struct dc_clocks *new_clocks,
++ bool safe_to_lower)
++{
++ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
++ bool send_request_to_increase = false;
++ bool send_request_to_lower = false;
++
++ if (new_clocks->dispclk_khz > dccg->clks.dispclk_khz
++ || new_clocks->phyclk_khz > dccg->clks.phyclk_khz
++ || new_clocks->fclk_khz > dccg->clks.fclk_khz
++ || new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz)
++ send_request_to_increase = true;
++
++#ifdef CONFIG_DRM_AMD_DC_DCN1_0
++ if (send_request_to_increase
+ ) {
+- struct dc *core_dc = clk->ctx->dc;
+- /*use dcfclk request voltage*/
+- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+- clock_voltage_req.clocks_in_khz =
+- dcn_find_dcfclk_suits_all(core_dc, &clk->clks);
+- }
++ struct dc *core_dc = dccg->ctx->dc;
++
++ /*use dcfclk to request voltage*/
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
++ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(core_dc, new_clocks);
++ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
++ }
+ #endif
+- dm_pp_apply_clock_for_voltage_request(
+- clk->ctx, &clock_voltage_req);
++
++ if ((new_clocks->dispclk_khz < dccg->clks.dispclk_khz && safe_to_lower)
++ || new_clocks->dispclk_khz > dccg->clks.dispclk_khz) {
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
++ clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
++ /* TODO: ramp up - dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);*/
++ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
++
++ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
++ send_request_to_lower = true;
+ }
+- if (update_dp_phyclk && (clocks_in_khz >
+- clk->clks.phyclk_khz))
+- clk->clks.phyclk_khz = clocks_in_khz;
+
+- return true;
++ if ((new_clocks->phyclk_khz < dccg->clks.phyclk_khz && safe_to_lower)
++ || new_clocks->phyclk_khz > dccg->clks.phyclk_khz) {
++ dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
++ clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz;
++
++ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
++ send_request_to_lower = true;
++ }
++
++ if ((new_clocks->fclk_khz < dccg->clks.fclk_khz && safe_to_lower)
++ || new_clocks->fclk_khz > dccg->clks.fclk_khz) {
++ dccg->clks.phyclk_khz = new_clocks->fclk_khz;
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
++ clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
++
++ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
++ send_request_to_lower = true;
++ }
++
++ if ((new_clocks->dcfclk_khz < dccg->clks.dcfclk_khz && safe_to_lower)
++ || new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz) {
++ dccg->clks.phyclk_khz = new_clocks->dcfclk_khz;
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
++ clock_voltage_req.clocks_in_khz = new_clocks->dcfclk_khz;
++
++ send_request_to_lower = true;
++ }
++
++#ifdef CONFIG_DRM_AMD_DC_DCN1_0
++ if (!send_request_to_increase && send_request_to_lower
++ ) {
++ struct dc *core_dc = dccg->ctx->dc;
++
++ /*use dcfclk to request voltage*/
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
++ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(core_dc, new_clocks);
++ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
++ }
++#endif
++}
++
++static void dce_update_clocks(struct display_clock *dccg,
++ struct dc_clocks *new_clocks,
++ bool safe_to_lower)
++{
++ struct dm_pp_power_level_change_request level_change_req;
++
++ level_change_req.power_level = dce_get_required_clocks_state(dccg, new_clocks);
++ /* get max clock state from PPLIB */
++ if ((level_change_req.power_level < dccg->cur_min_clks_state && safe_to_lower)
++ || level_change_req.power_level > dccg->cur_min_clks_state) {
++ if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
++ dccg->cur_min_clks_state = level_change_req.power_level;
++ }
++
++ if ((new_clocks->dispclk_khz < dccg->clks.dispclk_khz && safe_to_lower)
++ || new_clocks->dispclk_khz > dccg->clks.dispclk_khz) {
++ dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
++ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
++ }
+ }
+
++static const struct display_clock_funcs dcn_funcs = {
++ .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq_wrkaround,
++ .set_dispclk = dce112_set_clock,
++ .update_clocks = dcn_update_clocks
++};
+
+ static const struct display_clock_funcs dce120_funcs = {
+ .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq_wrkaround,
+- .apply_clock_voltage_request = dce_apply_clock_voltage_request,
+- .set_clock = dce112_set_clock
++ .set_dispclk = dce112_set_clock,
++ .update_clocks = dce12_update_clocks
+ };
+
+ static const struct display_clock_funcs dce112_funcs = {
+ .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
+- .get_required_clocks_state = dce_get_required_clocks_state,
+- .set_min_clocks_state = dce_clock_set_min_clocks_state,
+- .set_clock = dce112_set_clock
++ .set_dispclk = dce112_set_clock,
++ .update_clocks = dce_update_clocks
+ };
+
+ static const struct display_clock_funcs dce110_funcs = {
+ .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
+- .get_required_clocks_state = dce_get_required_clocks_state,
+- .set_min_clocks_state = dce_clock_set_min_clocks_state,
+- .set_clock = dce_psr_set_clock
++ .set_dispclk = dce_psr_set_clock,
++ .update_clocks = dce_update_clocks
+ };
+
+ static const struct display_clock_funcs dce_funcs = {
+ .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
+- .get_required_clocks_state = dce_get_required_clocks_state,
+- .set_min_clocks_state = dce_clock_set_min_clocks_state,
+- .set_clock = dce_set_clock
++ .set_dispclk = dce_set_clock,
++ .update_clocks = dce_update_clocks
+ };
+
+ static void dce_disp_clk_construct(
+@@ -785,7 +803,6 @@ struct display_clock *dce112_disp_clk_create(
+ struct display_clock *dce120_disp_clk_create(struct dc_context *ctx)
+ {
+ struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+- struct dm_pp_clock_levels_with_voltage clk_level_info = {0};
+
+ if (clk_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+@@ -801,15 +818,23 @@ struct display_clock *dce120_disp_clk_create(struct dc_context *ctx)
+
+ clk_dce->base.funcs = &dce120_funcs;
+
+- /* new in dce120 */
+- if (!ctx->dc->debug.disable_pplib_clock_request &&
+- dm_pp_get_clock_levels_by_type_with_voltage(
+- ctx, DM_PP_CLOCK_TYPE_DISPLAY_CLK, &clk_level_info)
+- && clk_level_info.num_levels)
+- clk_dce->max_displ_clk_in_khz =
+- clk_level_info.data[clk_level_info.num_levels - 1].clocks_in_khz;
+- else
+- clk_dce->max_displ_clk_in_khz = 1133000;
++ return &clk_dce->base;
++}
++
++struct display_clock *dcn_disp_clk_create(struct dc_context *ctx)
++{
++ struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
++
++ if (clk_dce == NULL) {
++ BREAK_TO_DEBUGGER();
++ return NULL;
++ }
++
++ /* TODO strip out useful stuff out of dce constructor */
++ dce_disp_clk_construct(
++ clk_dce, ctx, NULL, NULL, NULL);
++
++ clk_dce->base.funcs = &dcn_funcs;
+
+ return &clk_dce->base;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+index 0e717e0..f9b0020 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+@@ -91,7 +91,6 @@ struct dce_disp_clk {
+ struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
+ struct dce_divider_range divider_ranges[DIVIDER_RANGE_MAX];
+
+- bool use_max_disp_clk;
+ int dentist_vco_freq_khz;
+
+ /* Cache the status of DFS-bypass feature*/
+@@ -106,9 +105,6 @@ struct dce_disp_clk {
+ int dprefclk_ss_percentage;
+ /* DPREFCLK SS percentage Divider (100 or 1000) */
+ int dprefclk_ss_divider;
+-
+- /* max disp_clk from PPLIB for max validation display clock*/
+- int max_displ_clk_in_khz;
+ };
+
+
+@@ -132,6 +128,8 @@ struct display_clock *dce112_disp_clk_create(
+
+ struct display_clock *dce120_disp_clk_create(struct dc_context *ctx);
+
++struct display_clock *dcn_disp_clk_create(struct dc_context *ctx);
++
+ void dce_disp_clk_destroy(struct display_clock **disp_clk);
+
+ #endif /* _DCE_CLOCKS_H_ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+index 41f83ec..aabf7ca 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+@@ -125,17 +125,54 @@ static void dce100_pplib_apply_display_requirements(
+ dc->prev_display_config = *pp_display_cfg;
+ }
+
++/* unit: in_khz before mode set, get pixel clock from context. ASIC register
++ * may not be programmed yet
++ */
++static uint32_t get_max_pixel_clock_for_all_paths(
++ struct dc *dc,
++ struct dc_state *context)
++{
++ uint32_t max_pix_clk = 0;
++ int i;
++
++ for (i = 0; i < MAX_PIPES; i++) {
++ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
++
++ if (pipe_ctx->stream == NULL)
++ continue;
++
++ /* do not check under lay */
++ if (pipe_ctx->top_pipe)
++ continue;
++
++ if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
++ max_pix_clk =
++ pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
++ }
++
++ if (max_pix_clk == 0)
++ ASSERT(0);
++
++ return max_pix_clk;
++}
++
+ void dce100_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed)
+ {
+- if (decrease_allowed || context->bw.dce.dispclk_khz > dc->current_state->bw.dce.dispclk_khz) {
+- dc->res_pool->display_clock->funcs->set_clock(
+- dc->res_pool->display_clock,
+- context->bw.dce.dispclk_khz * 115 / 100);
+- dc->current_state->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz;
+- }
++ struct dc_clocks req_clks;
++
++ req_clks.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
++ req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
++
++ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
++
++ dc->res_pool->display_clock->funcs->update_clocks(
++ dc->res_pool->display_clock,
++ &req_clks,
++ decrease_allowed);
++
+ dce100_pplib_apply_display_requirements(dc, context);
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index e540172..73c03b7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -1637,7 +1637,7 @@ static void dce110_set_displaymarks(
+ }
+ }
+
+-static void set_safe_displaymarks(
++void dce110_set_safe_displaymarks(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool)
+ {
+@@ -1737,23 +1737,15 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ }
+
+ /* unit: in_khz before mode set, get pixel clock from context. ASIC register
+- * may not be programmed yet.
+- * TODO: after mode set, pre_mode_set = false,
+- * may read PLL register to get pixel clock
++ * may not be programmed yet
+ */
+ static uint32_t get_max_pixel_clock_for_all_paths(
+ struct dc *dc,
+- struct dc_state *context,
+- bool pre_mode_set)
++ struct dc_state *context)
+ {
+ uint32_t max_pix_clk = 0;
+ int i;
+
+- if (!pre_mode_set) {
+- /* TODO: read ASIC register to get pixel clock */
+- ASSERT(0);
+- }
+-
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+@@ -1776,74 +1768,6 @@ static uint32_t get_max_pixel_clock_for_all_paths(
+ }
+
+ /*
+- * Find clock state based on clock requested. if clock value is 0, simply
+- * set clock state as requested without finding clock state by clock value
+- */
+-
+-static void apply_min_clocks(
+- struct dc *dc,
+- struct dc_state *context,
+- enum dm_pp_clocks_state *clocks_state,
+- bool pre_mode_set)
+-{
+- struct state_dependent_clocks req_clocks = {0};
+-
+- if (!pre_mode_set) {
+- /* set clock_state without verification */
+- if (context->dis_clk->funcs->set_min_clocks_state) {
+- context->dis_clk->funcs->set_min_clocks_state(
+- context->dis_clk, *clocks_state);
+- return;
+- }
+-
+- /* TODO: This is incorrect. Figure out how to fix. */
+- context->dis_clk->funcs->apply_clock_voltage_request(
+- context->dis_clk,
+- DM_PP_CLOCK_TYPE_DISPLAY_CLK,
+- context->dis_clk->clks.dispclk_khz,
+- pre_mode_set,
+- false);
+-
+- context->dis_clk->funcs->apply_clock_voltage_request(
+- context->dis_clk,
+- DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
+- context->dis_clk->clks.phyclk_khz,
+- pre_mode_set,
+- false);
+- return;
+- }
+-
+- /* get the required state based on state dependent clocks:
+- * display clock and pixel clock
+- */
+- req_clocks.display_clk_khz = context->bw.dce.dispclk_khz;
+-
+- req_clocks.pixel_clk_khz = get_max_pixel_clock_for_all_paths(
+- dc, context, true);
+-
+- if (context->dis_clk->funcs->get_required_clocks_state) {
+- *clocks_state = context->dis_clk->funcs->get_required_clocks_state(
+- context->dis_clk, &req_clocks);
+- context->dis_clk->funcs->set_min_clocks_state(
+- context->dis_clk, *clocks_state);
+- } else {
+- context->dis_clk->funcs->apply_clock_voltage_request(
+- context->dis_clk,
+- DM_PP_CLOCK_TYPE_DISPLAY_CLK,
+- req_clocks.display_clk_khz,
+- pre_mode_set,
+- false);
+-
+- context->dis_clk->funcs->apply_clock_voltage_request(
+- context->dis_clk,
+- DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
+- req_clocks.pixel_clk_khz,
+- pre_mode_set,
+- false);
+- }
+-}
+-
+-/*
+ * Check if FBC can be enabled
+ */
+ static bool should_enable_fbc(struct dc *dc,
+@@ -2060,7 +1984,6 @@ enum dc_status dce110_apply_ctx_to_hw(
+ struct dc_bios *dcb = dc->ctx->dc_bios;
+ enum dc_status status;
+ int i;
+- enum dm_pp_clocks_state clocks_state = DM_PP_CLOCKS_STATE_INVALID;
+
+ /* Reset old context */
+ /* look up the targets that have been removed since last commit */
+@@ -2094,54 +2017,10 @@ enum dc_status dce110_apply_ctx_to_hw(
+ PIPE_GATING_CONTROL_DISABLE);
+ }
+
+- set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+-
+ if (dc->fbc_compressor)
+ dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
+
+- /*TODO: when pplib works*/
+- apply_min_clocks(dc, context, &clocks_state, true);
+-
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+- if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
+- if (context->bw.dcn.calc_clk.fclk_khz
+- > dc->current_state->bw.dcn.cur_clk.fclk_khz) {
+- struct dm_pp_clock_for_voltage_req clock;
+-
+- clock.clk_type = DM_PP_CLOCK_TYPE_FCLK;
+- clock.clocks_in_khz = context->bw.dcn.calc_clk.fclk_khz;
+- dm_pp_apply_clock_for_voltage_request(dc->ctx, &clock);
+- dc->current_state->bw.dcn.cur_clk.fclk_khz = clock.clocks_in_khz;
+- context->bw.dcn.cur_clk.fclk_khz = clock.clocks_in_khz;
+- }
+- if (context->bw.dcn.calc_clk.dcfclk_khz
+- > dc->current_state->bw.dcn.cur_clk.dcfclk_khz) {
+- struct dm_pp_clock_for_voltage_req clock;
+-
+- clock.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+- clock.clocks_in_khz = context->bw.dcn.calc_clk.dcfclk_khz;
+- dm_pp_apply_clock_for_voltage_request(dc->ctx, &clock);
+- dc->current_state->bw.dcn.cur_clk.dcfclk_khz = clock.clocks_in_khz;
+- context->bw.dcn.cur_clk.dcfclk_khz = clock.clocks_in_khz;
+- }
+- if (context->bw.dcn.calc_clk.dispclk_khz
+- > dc->current_state->bw.dcn.cur_clk.dispclk_khz) {
+- dc->res_pool->display_clock->funcs->set_clock(
+- dc->res_pool->display_clock,
+- context->bw.dcn.calc_clk.dispclk_khz);
+- dc->current_state->bw.dcn.cur_clk.dispclk_khz =
+- context->bw.dcn.calc_clk.dispclk_khz;
+- context->bw.dcn.cur_clk.dispclk_khz =
+- context->bw.dcn.calc_clk.dispclk_khz;
+- }
+- } else
+-#endif
+- if (context->bw.dce.dispclk_khz
+- > dc->current_state->bw.dce.dispclk_khz) {
+- dc->res_pool->display_clock->funcs->set_clock(
+- dc->res_pool->display_clock,
+- context->bw.dce.dispclk_khz * 115 / 100);
+- }
++ dc->hwss.set_bandwidth(dc, context, false);
+
+ dce110_setup_audio_dto(dc, context);
+
+@@ -2172,7 +2051,7 @@ enum dc_status dce110_apply_ctx_to_hw(
+ }
+
+ /* to save power */
+- apply_min_clocks(dc, context, &clocks_state, false);
++ dc->hwss.set_bandwidth(dc, context, true);
+
+ dcb->funcs->set_scratch_critical_state(dcb, false);
+
+@@ -2661,15 +2540,20 @@ void dce110_set_bandwidth(
+ struct dc_state *context,
+ bool decrease_allowed)
+ {
+- dce110_set_displaymarks(dc, context);
++ struct dc_clocks req_clks;
+
+- if (decrease_allowed || context->bw.dce.dispclk_khz > dc->current_state->bw.dce.dispclk_khz) {
+- dc->res_pool->display_clock->funcs->set_clock(
+- dc->res_pool->display_clock,
+- context->bw.dce.dispclk_khz * 115 / 100);
+- dc->current_state->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz;
+- }
++ req_clks.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
++ req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
++
++ if (decrease_allowed)
++ dce110_set_displaymarks(dc, context);
++ else
++ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+
++ dc->res_pool->display_clock->funcs->update_clocks(
++ dc->res_pool->display_clock,
++ &req_clks,
++ decrease_allowed);
+ pplib_apply_display_requirements(dc, context);
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+index 1782757..a226a3d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+@@ -60,6 +60,10 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context);
+
+ void dce110_power_down(struct dc *dc);
+
++void dce110_set_safe_displaymarks(
++ struct resource_context *res_ctx,
++ const struct resource_pool *pool);
++
+ void dce110_fill_display_configs(
+ const struct dc_state *context,
+ struct dm_pp_display_configuration *pp_display_cfg);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 72d0b6f6..2fdec57f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2439,7 +2439,7 @@ static void ramp_up_dispclk_with_dpp(struct dc *dc, struct dc_state *context)
+ int dispclk_to_dpp_threshold = determine_dppclk_threshold(dc, context);
+
+ /* set disp clk to dpp clk threshold */
+- dc->res_pool->display_clock->funcs->set_clock(
++ dc->res_pool->display_clock->funcs->set_dispclk(
+ dc->res_pool->display_clock,
+ dispclk_to_dpp_threshold);
+
+@@ -2458,7 +2458,7 @@ static void ramp_up_dispclk_with_dpp(struct dc *dc, struct dc_state *context)
+
+ /* If target clk not same as dppclk threshold, set to target clock */
+ if (dispclk_to_dpp_threshold != context->bw.dcn.calc_clk.dispclk_khz) {
+- dc->res_pool->display_clock->funcs->set_clock(
++ dc->res_pool->display_clock->funcs->set_dispclk(
+ dc->res_pool->display_clock,
+ context->bw.dcn.calc_clk.dispclk_khz);
+ }
+@@ -2488,6 +2488,11 @@ static void dcn10_set_bandwidth(
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ return;
+
++ dc->res_pool->display_clock->funcs->update_clocks(
++ dc->res_pool->display_clock,
++ &context->bw.dcn.calc_clk,
++ decrease_allowed);
++
+ if (should_set_clock(
+ decrease_allowed,
+ context->bw.dcn.calc_clk.dcfclk_khz,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index 704acc0..e548ce5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -1074,7 +1074,7 @@ static bool construct(
+ }
+ }
+
+- pool->base.display_clock = dce120_disp_clk_create(ctx);
++ pool->base.display_clock = dcn_disp_clk_create(ctx);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
+index 6b9ca55..8ce106f 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
+@@ -40,32 +40,19 @@ struct display_clock {
+ struct dc_context *ctx;
+ const struct display_clock_funcs *funcs;
+
+- bool dispclk_notify_pplib_done;
+- bool phyclk_notify_pplib_done;
+ enum dm_pp_clocks_state max_clks_state;
+ enum dm_pp_clocks_state cur_min_clks_state;
+ struct dc_clocks clks;
+ };
+
+ struct display_clock_funcs {
+- int (*set_clock)(struct display_clock *disp_clk,
++ void (*update_clocks)(struct display_clock *dccg,
++ struct dc_clocks *new_clocks,
++ bool safe_to_lower);
++ int (*set_dispclk)(struct display_clock *disp_clk,
+ int requested_clock_khz);
+
+- enum dm_pp_clocks_state (*get_required_clocks_state)(
+- struct display_clock *disp_clk,
+- struct state_dependent_clocks *req_clocks);
+-
+- bool (*set_min_clocks_state)(struct display_clock *disp_clk,
+- enum dm_pp_clocks_state dm_pp_clocks_state);
+-
+ int (*get_dp_ref_clk_frequency)(struct display_clock *disp_clk);
+-
+- bool (*apply_clock_voltage_request)(
+- struct display_clock *disp_clk,
+- enum dm_pp_clock_type clocks_type,
+- int clocks_in_khz,
+- bool pre_mode_set,
+- bool update_dp_phyclk);
+ };
+
+ #endif /* __DISPLAY_CLOCK_H__ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4754-drm-amd-display-rename-display-clock-block-to-dccg.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4754-drm-amd-display-rename-display-clock-block-to-dccg.patch
new file mode 100644
index 00000000..24bfaea7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4754-drm-amd-display-rename-display-clock-block-to-dccg.patch
@@ -0,0 +1,755 @@
+From 2c42ccf2b33646a48ded58b4b7e916952cfb7145 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Wed, 23 May 2018 16:44:26 -0400
+Subject: [PATCH 4754/5725] drm/amd/display: rename display clock block to dccg
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 78 +++++++++++-----------
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h | 16 ++---
+ .../amd/display/dc/dce100/dce100_hw_sequencer.c | 4 +-
+ .../drm/amd/display/dc/dce100/dce100_resource.c | 10 +--
+ .../amd/display/dc/dce110/dce110_hw_sequencer.c | 4 +-
+ .../drm/amd/display/dc/dce110/dce110_resource.c | 10 +--
+ .../drm/amd/display/dc/dce112/dce112_resource.c | 10 +--
+ .../drm/amd/display/dc/dce120/dce120_resource.c | 12 ++--
+ .../gpu/drm/amd/display/dc/dce80/dce80_resource.c | 22 +++---
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 12 ++--
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 8 +--
+ drivers/gpu/drm/amd/display/dc/inc/core_types.h | 4 +-
+ .../gpu/drm/amd/display/dc/inc/hw/display_clock.h | 8 +--
+ 14 files changed, 100 insertions(+), 100 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 55711c6..c4c7e00 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -1952,7 +1952,7 @@ void dc_resource_state_construct(
+ const struct dc *dc,
+ struct dc_state *dst_ctx)
+ {
+- dst_ctx->dis_clk = dc->res_pool->display_clock;
++ dst_ctx->dis_clk = dc->res_pool->dccg;
+ }
+
+ enum dc_status dc_validate_global_state(
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index d3bbac8..890a3ec 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -38,7 +38,7 @@
+ #include "dal_asic_id.h"
+
+ #define TO_DCE_CLOCKS(clocks)\
+- container_of(clocks, struct dce_disp_clk, base)
++ container_of(clocks, struct dce_dccg, base)
+
+ #define REG(reg) \
+ (clk_dce->regs->reg)
+@@ -187,9 +187,9 @@ static int dce_divider_range_get_divider(
+ return div;
+ }
+
+-static int dce_clocks_get_dp_ref_freq(struct display_clock *clk)
++static int dce_clocks_get_dp_ref_freq(struct dccg *clk)
+ {
+- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
++ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
+ int dprefclk_wdivider;
+ int dprefclk_src_sel;
+ int dp_ref_clk_khz = 600000;
+@@ -250,9 +250,9 @@ static int dce_clocks_get_dp_ref_freq(struct display_clock *clk)
+ * or CLK0_CLK11 by SMU. For DCE120, it is wlays 600Mhz. Will re-visit
+ * clock implementation
+ */
+-static int dce_clocks_get_dp_ref_freq_wrkaround(struct display_clock *clk)
++static int dce_clocks_get_dp_ref_freq_wrkaround(struct dccg *clk)
+ {
+- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
++ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
+ int dp_ref_clk_khz = 600000;
+
+ if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
+@@ -274,10 +274,10 @@ static int dce_clocks_get_dp_ref_freq_wrkaround(struct display_clock *clk)
+ return dp_ref_clk_khz;
+ }
+ static enum dm_pp_clocks_state dce_get_required_clocks_state(
+- struct display_clock *clk,
++ struct dccg *clk,
+ struct dc_clocks *req_clocks)
+ {
+- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
++ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
+ int i;
+ enum dm_pp_clocks_state low_req_clk;
+
+@@ -306,10 +306,10 @@ static enum dm_pp_clocks_state dce_get_required_clocks_state(
+ }
+
+ static int dce_set_clock(
+- struct display_clock *clk,
++ struct dccg *clk,
+ int requested_clk_khz)
+ {
+- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
++ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
+ struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
+ struct dc_bios *bp = clk->ctx->dc_bios;
+ int actual_clock = requested_clk_khz;
+@@ -341,10 +341,10 @@ static int dce_set_clock(
+ }
+
+ static int dce_psr_set_clock(
+- struct display_clock *clk,
++ struct dccg *clk,
+ int requested_clk_khz)
+ {
+- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
++ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
+ struct dc_context *ctx = clk_dce->base.ctx;
+ struct dc *core_dc = ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+@@ -357,10 +357,10 @@ static int dce_psr_set_clock(
+ }
+
+ static int dce112_set_clock(
+- struct display_clock *clk,
++ struct dccg *clk,
+ int requested_clk_khz)
+ {
+- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
++ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
+ struct bp_set_dce_clock_parameters dce_clk_params;
+ struct dc_bios *bp = clk->ctx->dc_bios;
+ struct dc *core_dc = clk->ctx->dc;
+@@ -409,7 +409,7 @@ static int dce112_set_clock(
+ return actual_clock;
+ }
+
+-static void dce_clock_read_integrated_info(struct dce_disp_clk *clk_dce)
++static void dce_clock_read_integrated_info(struct dce_dccg *clk_dce)
+ {
+ struct dc_debug *debug = &clk_dce->base.ctx->dc->debug;
+ struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
+@@ -467,7 +467,7 @@ static void dce_clock_read_integrated_info(struct dce_disp_clk *clk_dce)
+ clk_dce->dfs_bypass_enabled = true;
+ }
+
+-static void dce_clock_read_ss_info(struct dce_disp_clk *clk_dce)
++static void dce_clock_read_ss_info(struct dce_dccg *clk_dce)
+ {
+ struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
+ int ss_info_num = bp->funcs->get_ss_entry_number(
+@@ -523,7 +523,7 @@ static void dce_clock_read_ss_info(struct dce_disp_clk *clk_dce)
+ }
+ }
+
+-static void dce12_update_clocks(struct display_clock *dccg,
++static void dce12_update_clocks(struct dccg *dccg,
+ struct dc_clocks *new_clocks,
+ bool safe_to_lower)
+ {
+@@ -549,7 +549,7 @@ static void dce12_update_clocks(struct display_clock *dccg,
+ }
+ }
+
+-static void dcn_update_clocks(struct display_clock *dccg,
++static void dcn_update_clocks(struct dccg *dccg,
+ struct dc_clocks *new_clocks,
+ bool safe_to_lower)
+ {
+@@ -628,7 +628,7 @@ static void dcn_update_clocks(struct display_clock *dccg,
+ #endif
+ }
+
+-static void dce_update_clocks(struct display_clock *dccg,
++static void dce_update_clocks(struct dccg *dccg,
+ struct dc_clocks *new_clocks,
+ bool safe_to_lower)
+ {
+@@ -679,14 +679,14 @@ static const struct display_clock_funcs dce_funcs = {
+ .update_clocks = dce_update_clocks
+ };
+
+-static void dce_disp_clk_construct(
+- struct dce_disp_clk *clk_dce,
++static void dce_dccg_construct(
++ struct dce_dccg *clk_dce,
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask)
+ {
+- struct display_clock *base = &clk_dce->base;
++ struct dccg *base = &clk_dce->base;
+
+ base->ctx = ctx;
+ base->funcs = &dce_funcs;
+@@ -727,13 +727,13 @@ static void dce_disp_clk_construct(
+ DIVIDER_RANGE_MAX_DIVIDER_ID);
+ }
+
+-struct display_clock *dce_disp_clk_create(
++struct dccg *dce_dccg_create(
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask)
+ {
+- struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
++ struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+
+ if (clk_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+@@ -744,19 +744,19 @@ struct display_clock *dce_disp_clk_create(
+ dce80_max_clks_by_state,
+ sizeof(dce80_max_clks_by_state));
+
+- dce_disp_clk_construct(
++ dce_dccg_construct(
+ clk_dce, ctx, regs, clk_shift, clk_mask);
+
+ return &clk_dce->base;
+ }
+
+-struct display_clock *dce110_disp_clk_create(
++struct dccg *dce110_dccg_create(
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask)
+ {
+- struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
++ struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+
+ if (clk_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+@@ -767,7 +767,7 @@ struct display_clock *dce110_disp_clk_create(
+ dce110_max_clks_by_state,
+ sizeof(dce110_max_clks_by_state));
+
+- dce_disp_clk_construct(
++ dce_dccg_construct(
+ clk_dce, ctx, regs, clk_shift, clk_mask);
+
+ clk_dce->base.funcs = &dce110_funcs;
+@@ -775,13 +775,13 @@ struct display_clock *dce110_disp_clk_create(
+ return &clk_dce->base;
+ }
+
+-struct display_clock *dce112_disp_clk_create(
++struct dccg *dce112_dccg_create(
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask)
+ {
+- struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
++ struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+
+ if (clk_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+@@ -792,7 +792,7 @@ struct display_clock *dce112_disp_clk_create(
+ dce112_max_clks_by_state,
+ sizeof(dce112_max_clks_by_state));
+
+- dce_disp_clk_construct(
++ dce_dccg_construct(
+ clk_dce, ctx, regs, clk_shift, clk_mask);
+
+ clk_dce->base.funcs = &dce112_funcs;
+@@ -800,9 +800,9 @@ struct display_clock *dce112_disp_clk_create(
+ return &clk_dce->base;
+ }
+
+-struct display_clock *dce120_disp_clk_create(struct dc_context *ctx)
++struct dccg *dce120_dccg_create(struct dc_context *ctx)
+ {
+- struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
++ struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+
+ if (clk_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+@@ -813,7 +813,7 @@ struct display_clock *dce120_disp_clk_create(struct dc_context *ctx)
+ dce120_max_clks_by_state,
+ sizeof(dce120_max_clks_by_state));
+
+- dce_disp_clk_construct(
++ dce_dccg_construct(
+ clk_dce, ctx, NULL, NULL, NULL);
+
+ clk_dce->base.funcs = &dce120_funcs;
+@@ -821,9 +821,9 @@ struct display_clock *dce120_disp_clk_create(struct dc_context *ctx)
+ return &clk_dce->base;
+ }
+
+-struct display_clock *dcn_disp_clk_create(struct dc_context *ctx)
++struct dccg *dcn_dccg_create(struct dc_context *ctx)
+ {
+- struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
++ struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+
+ if (clk_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+@@ -831,7 +831,7 @@ struct display_clock *dcn_disp_clk_create(struct dc_context *ctx)
+ }
+
+ /* TODO strip out useful stuff out of dce constructor */
+- dce_disp_clk_construct(
++ dce_dccg_construct(
+ clk_dce, ctx, NULL, NULL, NULL);
+
+ clk_dce->base.funcs = &dcn_funcs;
+@@ -839,10 +839,10 @@ struct display_clock *dcn_disp_clk_create(struct dc_context *ctx)
+ return &clk_dce->base;
+ }
+
+-void dce_disp_clk_destroy(struct display_clock **disp_clk)
++void dce_dccg_destroy(struct dccg **dccg)
+ {
+- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(*disp_clk);
++ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(*dccg);
+
+ kfree(clk_dce);
+- *disp_clk = NULL;
++ *dccg = NULL;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+index f9b0020..c695b9c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+@@ -82,8 +82,8 @@ struct dce_divider_range {
+ int did_max;
+ };
+
+-struct dce_disp_clk {
+- struct display_clock base;
++struct dce_dccg {
++ struct dccg base;
+ const struct dce_disp_clk_registers *regs;
+ const struct dce_disp_clk_shift *clk_shift;
+ const struct dce_disp_clk_mask *clk_mask;
+@@ -108,28 +108,28 @@ struct dce_disp_clk {
+ };
+
+
+-struct display_clock *dce_disp_clk_create(
++struct dccg *dce_dccg_create(
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask);
+
+-struct display_clock *dce110_disp_clk_create(
++struct dccg *dce110_dccg_create(
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask);
+
+-struct display_clock *dce112_disp_clk_create(
++struct dccg *dce112_dccg_create(
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask);
+
+-struct display_clock *dce120_disp_clk_create(struct dc_context *ctx);
++struct dccg *dce120_dccg_create(struct dc_context *ctx);
+
+-struct display_clock *dcn_disp_clk_create(struct dc_context *ctx);
++struct dccg *dcn_dccg_create(struct dc_context *ctx);
+
+-void dce_disp_clk_destroy(struct display_clock **disp_clk);
++void dce_dccg_destroy(struct dccg **dccg);
+
+ #endif /* _DCE_CLOCKS_H_ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+index aabf7ca..ec32213 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+@@ -168,8 +168,8 @@ void dce100_set_bandwidth(
+
+ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+
+- dc->res_pool->display_clock->funcs->update_clocks(
+- dc->res_pool->display_clock,
++ dc->res_pool->dccg->funcs->update_clocks(
++ dc->res_pool->dccg,
+ &req_clks,
+ decrease_allowed);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+index 344dd2e..a90c9a6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+@@ -644,8 +644,8 @@ static void destruct(struct dce110_resource_pool *pool)
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+
+- if (pool->base.display_clock != NULL)
+- dce_disp_clk_destroy(&pool->base.display_clock);
++ if (pool->base.dccg != NULL)
++ dce_dccg_destroy(&pool->base.dccg);
+
+ if (pool->base.abm != NULL)
+ dce_abm_destroy(&pool->base.abm);
+@@ -830,11 +830,11 @@ static bool construct(
+ }
+ }
+
+- pool->base.display_clock = dce_disp_clk_create(ctx,
++ pool->base.dccg = dce_dccg_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+- if (pool->base.display_clock == NULL) {
++ if (pool->base.dccg == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+@@ -864,7 +864,7 @@ static bool construct(
+ * max_clock_state
+ */
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+- pool->base.display_clock->max_clks_state =
++ pool->base.dccg->max_clks_state =
+ static_clk_info.max_clocks_state;
+ {
+ struct irq_service_init_data init_data;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 73c03b7..8a51b9e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -2550,8 +2550,8 @@ void dce110_set_bandwidth(
+ else
+ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+
+- dc->res_pool->display_clock->funcs->update_clocks(
+- dc->res_pool->display_clock,
++ dc->res_pool->dccg->funcs->update_clocks(
++ dc->res_pool->dccg,
+ &req_clks,
+ decrease_allowed);
+ pplib_apply_display_requirements(dc, context);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+index 20c0290..71a401f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+@@ -679,8 +679,8 @@ static void destruct(struct dce110_resource_pool *pool)
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+- if (pool->base.display_clock != NULL)
+- dce_disp_clk_destroy(&pool->base.display_clock);
++ if (pool->base.dccg != NULL)
++ dce_dccg_destroy(&pool->base.dccg);
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+@@ -1179,11 +1179,11 @@ static bool construct(
+ }
+ }
+
+- pool->base.display_clock = dce110_disp_clk_create(ctx,
++ pool->base.dccg = dce110_dccg_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+- if (pool->base.display_clock == NULL) {
++ if (pool->base.dccg == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+@@ -1213,7 +1213,7 @@ static bool construct(
+ * max_clock_state
+ */
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+- pool->base.display_clock->max_clks_state =
++ pool->base.dccg->max_clks_state =
+ static_clk_info.max_clocks_state;
+
+ {
+diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+index 00c0a1e..ae5b19d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+@@ -668,8 +668,8 @@ static void destruct(struct dce110_resource_pool *pool)
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+- if (pool->base.display_clock != NULL)
+- dce_disp_clk_destroy(&pool->base.display_clock);
++ if (pool->base.dccg != NULL)
++ dce_dccg_destroy(&pool->base.dccg);
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+@@ -1124,11 +1124,11 @@ static bool construct(
+ }
+ }
+
+- pool->base.display_clock = dce112_disp_clk_create(ctx,
++ pool->base.dccg = dce112_dccg_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+- if (pool->base.display_clock == NULL) {
++ if (pool->base.dccg == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+@@ -1158,7 +1158,7 @@ static bool construct(
+ * max_clock_state
+ */
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+- pool->base.display_clock->max_clks_state =
++ pool->base.dccg->max_clks_state =
+ static_clk_info.max_clocks_state;
+
+ {
+diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+index 2d58dac..13c388a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+@@ -494,8 +494,8 @@ static void destruct(struct dce110_resource_pool *pool)
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+- if (pool->base.display_clock != NULL)
+- dce_disp_clk_destroy(&pool->base.display_clock);
++ if (pool->base.dccg != NULL)
++ dce_dccg_destroy(&pool->base.dccg);
+ }
+
+ static void read_dce_straps(
+@@ -894,11 +894,11 @@ static bool construct(
+ }
+ }
+
+- pool->base.display_clock = dce120_disp_clk_create(ctx);
+- if (pool->base.display_clock == NULL) {
++ pool->base.dccg = dce120_dccg_create(ctx);
++ if (pool->base.dccg == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+- goto disp_clk_create_fail;
++ goto dccg_create_fail;
+ }
+
+ pool->base.dmcu = dce_dmcu_create(ctx,
+@@ -1011,7 +1011,7 @@ static bool construct(
+
+ irqs_create_fail:
+ controller_create_fail:
+-disp_clk_create_fail:
++dccg_create_fail:
+ clk_src_create_fail:
+ res_create_fail:
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+index 48a0689..7070053 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+@@ -683,8 +683,8 @@ static void destruct(struct dce110_resource_pool *pool)
+ }
+ }
+
+- if (pool->base.display_clock != NULL)
+- dce_disp_clk_destroy(&pool->base.display_clock);
++ if (pool->base.dccg != NULL)
++ dce_dccg_destroy(&pool->base.dccg);
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+@@ -822,11 +822,11 @@ static bool dce80_construct(
+ }
+ }
+
+- pool->base.display_clock = dce_disp_clk_create(ctx,
++ pool->base.dccg = dce_dccg_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+- if (pool->base.display_clock == NULL) {
++ if (pool->base.dccg == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+@@ -852,7 +852,7 @@ static bool dce80_construct(
+ goto res_create_fail;
+ }
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+- pool->base.display_clock->max_clks_state =
++ pool->base.dccg->max_clks_state =
+ static_clk_info.max_clocks_state;
+
+ {
+@@ -1006,11 +1006,11 @@ static bool dce81_construct(
+ }
+ }
+
+- pool->base.display_clock = dce_disp_clk_create(ctx,
++ pool->base.dccg = dce_dccg_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+- if (pool->base.display_clock == NULL) {
++ if (pool->base.dccg == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+@@ -1037,7 +1037,7 @@ static bool dce81_construct(
+ }
+
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+- pool->base.display_clock->max_clks_state =
++ pool->base.dccg->max_clks_state =
+ static_clk_info.max_clocks_state;
+
+ {
+@@ -1187,11 +1187,11 @@ static bool dce83_construct(
+ }
+ }
+
+- pool->base.display_clock = dce_disp_clk_create(ctx,
++ pool->base.dccg = dce_dccg_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+- if (pool->base.display_clock == NULL) {
++ if (pool->base.dccg == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+@@ -1218,7 +1218,7 @@ static bool dce83_construct(
+ }
+
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+- pool->base.display_clock->max_clks_state =
++ pool->base.dccg->max_clks_state =
+ static_clk_info.max_clocks_state;
+
+ {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 2fdec57f..65e4189 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2439,8 +2439,8 @@ static void ramp_up_dispclk_with_dpp(struct dc *dc, struct dc_state *context)
+ int dispclk_to_dpp_threshold = determine_dppclk_threshold(dc, context);
+
+ /* set disp clk to dpp clk threshold */
+- dc->res_pool->display_clock->funcs->set_dispclk(
+- dc->res_pool->display_clock,
++ dc->res_pool->dccg->funcs->set_dispclk(
++ dc->res_pool->dccg,
+ dispclk_to_dpp_threshold);
+
+ /* update request dpp clk division option */
+@@ -2458,8 +2458,8 @@ static void ramp_up_dispclk_with_dpp(struct dc *dc, struct dc_state *context)
+
+ /* If target clk not same as dppclk threshold, set to target clock */
+ if (dispclk_to_dpp_threshold != context->bw.dcn.calc_clk.dispclk_khz) {
+- dc->res_pool->display_clock->funcs->set_dispclk(
+- dc->res_pool->display_clock,
++ dc->res_pool->dccg->funcs->set_dispclk(
++ dc->res_pool->dccg,
+ context->bw.dcn.calc_clk.dispclk_khz);
+ }
+
+@@ -2488,8 +2488,8 @@ static void dcn10_set_bandwidth(
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ return;
+
+- dc->res_pool->display_clock->funcs->update_clocks(
+- dc->res_pool->display_clock,
++ dc->res_pool->dccg->funcs->update_clocks(
++ dc->res_pool->dccg,
+ &context->bw.dcn.calc_clk,
+ decrease_allowed);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index e548ce5..6255e95 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -791,8 +791,8 @@ static void destruct(struct dcn10_resource_pool *pool)
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+- if (pool->base.display_clock != NULL)
+- dce_disp_clk_destroy(&pool->base.display_clock);
++ if (pool->base.dccg != NULL)
++ dce_dccg_destroy(&pool->base.dccg);
+
+ kfree(pool->base.pp_smu);
+ }
+@@ -1074,8 +1074,8 @@ static bool construct(
+ }
+ }
+
+- pool->base.display_clock = dcn_disp_clk_create(ctx);
+- if (pool->base.display_clock == NULL) {
++ pool->base.dccg = dcn_dccg_create(ctx);
++ if (pool->base.dccg == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto fail;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+index a4640e4a..327ea4e 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+@@ -163,7 +163,7 @@ struct resource_pool {
+ unsigned int audio_count;
+ struct audio_support audio_support;
+
+- struct display_clock *display_clock;
++ struct dccg *dccg;
+ struct irq_service *irqs;
+
+ struct abm *abm;
+@@ -283,7 +283,7 @@ struct dc_state {
+ struct dcn_bw_internal_vars dcn_bw_vars;
+ #endif
+
+- struct display_clock *dis_clk;
++ struct dccg *dis_clk;
+
+ struct kref refcount;
+ };
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
+index 8ce106f..3c7ccb6 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
+@@ -36,7 +36,7 @@ struct state_dependent_clocks {
+ int pixel_clk_khz;
+ };
+
+-struct display_clock {
++struct dccg {
+ struct dc_context *ctx;
+ const struct display_clock_funcs *funcs;
+
+@@ -46,13 +46,13 @@ struct display_clock {
+ };
+
+ struct display_clock_funcs {
+- void (*update_clocks)(struct display_clock *dccg,
++ void (*update_clocks)(struct dccg *dccg,
+ struct dc_clocks *new_clocks,
+ bool safe_to_lower);
+- int (*set_dispclk)(struct display_clock *disp_clk,
++ int (*set_dispclk)(struct dccg *dccg,
+ int requested_clock_khz);
+
+- int (*get_dp_ref_clk_frequency)(struct display_clock *disp_clk);
++ int (*get_dp_ref_clk_frequency)(struct dccg *dccg);
+ };
+
+ #endif /* __DISPLAY_CLOCK_H__ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4755-drm-amd-display-move-clock-programming-from-set_band.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4755-drm-amd-display-move-clock-programming-from-set_band.patch
new file mode 100644
index 00000000..c81d35a3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4755-drm-amd-display-move-clock-programming-from-set_band.patch
@@ -0,0 +1,284 @@
+From c4e54f7f507c22e27215324c31b676fbaaa6fb63 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Wed, 23 May 2018 17:52:04 -0400
+Subject: [PATCH 4755/5725] drm/amd/display: move clock programming from
+ set_bandwidth to dccg
+
+This change moves dcn clock programming(with exception of dispclk)
+into dccg. This should have no functional effect.
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 57 +++++++++++++-------
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 62 +++++-----------------
+ 3 files changed, 51 insertions(+), 70 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+index 2b70ac6..9acdd9d 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+@@ -997,7 +997,7 @@ bool dcn_validate_bandwidth(
+ }
+
+ context->bw.dcn.calc_clk.dppclk_khz = context->bw.dcn.calc_clk.dispclk_khz / v->dispclk_dppclk_ratio;
+-
++ context->bw.dcn.calc_clk.phyclk_khz = v->phyclk_per_state[v->voltage_level];
+ switch (v->voltage_level) {
+ case 0:
+ context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index 890a3ec..93e6063 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -523,14 +523,18 @@ static void dce_clock_read_ss_info(struct dce_dccg *clk_dce)
+ }
+ }
+
++static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
++{
++ return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
++}
++
+ static void dce12_update_clocks(struct dccg *dccg,
+ struct dc_clocks *new_clocks,
+ bool safe_to_lower)
+ {
+ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+
+- if ((new_clocks->dispclk_khz < dccg->clks.dispclk_khz && safe_to_lower)
+- || new_clocks->dispclk_khz > dccg->clks.dispclk_khz) {
++ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
+ dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
+@@ -539,8 +543,7 @@ static void dce12_update_clocks(struct dccg *dccg,
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ }
+
+- if ((new_clocks->phyclk_khz < dccg->clks.phyclk_khz && safe_to_lower)
+- || new_clocks->phyclk_khz > dccg->clks.phyclk_khz) {
++ if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz;
+ dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
+@@ -553,6 +556,11 @@ static void dcn_update_clocks(struct dccg *dccg,
+ struct dc_clocks *new_clocks,
+ bool safe_to_lower)
+ {
++ struct dc *dc = dccg->ctx->dc;
++ struct pp_smu_display_requirement_rv *smu_req_cur =
++ &dc->res_pool->pp_smu_req;
++ struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
++ struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+ bool send_request_to_increase = false;
+ bool send_request_to_lower = false;
+@@ -566,17 +574,14 @@ static void dcn_update_clocks(struct dccg *dccg,
+ #ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ if (send_request_to_increase
+ ) {
+- struct dc *core_dc = dccg->ctx->dc;
+-
+ /*use dcfclk to request voltage*/
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+- clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(core_dc, new_clocks);
++ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ }
+ #endif
+
+- if ((new_clocks->dispclk_khz < dccg->clks.dispclk_khz && safe_to_lower)
+- || new_clocks->dispclk_khz > dccg->clks.dispclk_khz) {
++ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
+ /* TODO: ramp up - dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);*/
+@@ -586,8 +591,7 @@ static void dcn_update_clocks(struct dccg *dccg,
+ send_request_to_lower = true;
+ }
+
+- if ((new_clocks->phyclk_khz < dccg->clks.phyclk_khz && safe_to_lower)
+- || new_clocks->phyclk_khz > dccg->clks.phyclk_khz) {
++ if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
+ dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz;
+@@ -596,36 +600,50 @@ static void dcn_update_clocks(struct dccg *dccg,
+ send_request_to_lower = true;
+ }
+
+- if ((new_clocks->fclk_khz < dccg->clks.fclk_khz && safe_to_lower)
+- || new_clocks->fclk_khz > dccg->clks.fclk_khz) {
++ if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, dccg->clks.fclk_khz)) {
+ dccg->clks.phyclk_khz = new_clocks->fclk_khz;
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
++ smu_req.hard_min_fclk_khz = new_clocks->fclk_khz;
+
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ send_request_to_lower = true;
+ }
+
+- if ((new_clocks->dcfclk_khz < dccg->clks.dcfclk_khz && safe_to_lower)
+- || new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz) {
++ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, dccg->clks.dcfclk_khz)) {
+ dccg->clks.phyclk_khz = new_clocks->dcfclk_khz;
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->dcfclk_khz;
++ smu_req.hard_min_dcefclk_khz = new_clocks->dcfclk_khz;
+
+ send_request_to_lower = true;
+ }
+
++ if (should_set_clock(safe_to_lower,
++ new_clocks->dcfclk_deep_sleep_khz, dccg->clks.dcfclk_deep_sleep_khz)) {
++ dccg->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
++ smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz;
++ }
++
+ #ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ if (!send_request_to_increase && send_request_to_lower
+ ) {
+- struct dc *core_dc = dccg->ctx->dc;
+-
+ /*use dcfclk to request voltage*/
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+- clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(core_dc, new_clocks);
++ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ }
+ #endif
++
++ if (new_clocks->phyclk_khz)
++ smu_req.display_count = 1;
++ else
++ smu_req.display_count = 0;
++
++ if (pp_smu->set_display_requirement)
++ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
++
++ *smu_req_cur = smu_req;
+ }
+
+ static void dce_update_clocks(struct dccg *dccg,
+@@ -642,8 +660,7 @@ static void dce_update_clocks(struct dccg *dccg,
+ dccg->cur_min_clks_state = level_change_req.power_level;
+ }
+
+- if ((new_clocks->dispclk_khz < dccg->clks.dispclk_khz && safe_to_lower)
+- || new_clocks->dispclk_khz > dccg->clks.dispclk_khz) {
++ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
+ dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
+ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 65e4189..66ecb86 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2168,11 +2168,11 @@ static void dcn10_pplib_apply_display_requirements(
+ {
+ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+- pp_display_cfg->min_engine_clock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
+- pp_display_cfg->min_memory_clock_khz = context->bw.dcn.cur_clk.fclk_khz;
+- pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
+- pp_display_cfg->min_dcfc_deep_sleep_clock_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
+- pp_display_cfg->min_dcfclock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
++ pp_display_cfg->min_engine_clock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
++ pp_display_cfg->min_memory_clock_khz = dc->res_pool->dccg->clks.fclk_khz;
++ pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
++ pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
++ pp_display_cfg->min_dcfclock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
+ pp_display_cfg->disp_clk_khz = context->bw.dcn.cur_clk.dispclk_khz;
+ dce110_fill_display_configs(context, pp_display_cfg);
+
+@@ -2376,11 +2376,6 @@ static void dcn10_apply_ctx_for_surface(
+ */
+ }
+
+-static inline bool should_set_clock(bool decrease_allowed, int calc_clk, int cur_clk)
+-{
+- return ((decrease_allowed && calc_clk < cur_clk) || calc_clk > cur_clk);
+-}
+-
+ static int determine_dppclk_threshold(struct dc *dc, struct dc_state *context)
+ {
+ bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
+@@ -2471,16 +2466,16 @@ static void ramp_up_dispclk_with_dpp(struct dc *dc, struct dc_state *context)
+ context->bw.dcn.calc_clk.max_supported_dppclk_khz;
+ }
+
++static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
++{
++ return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
++}
++
+ static void dcn10_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed)
+ {
+- struct pp_smu_display_requirement_rv *smu_req_cur =
+- &dc->res_pool->pp_smu_req;
+- struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
+- struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+-
+ if (dc->debug.sanity_checks) {
+ dcn10_verify_allow_pstate_change_high(dc);
+ }
+@@ -2488,45 +2483,14 @@ static void dcn10_set_bandwidth(
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ return;
+
++ if (context->stream_count == 0)
++ context->bw.dcn.calc_clk.phyclk_khz = 0;
++
+ dc->res_pool->dccg->funcs->update_clocks(
+ dc->res_pool->dccg,
+ &context->bw.dcn.calc_clk,
+ decrease_allowed);
+
+- if (should_set_clock(
+- decrease_allowed,
+- context->bw.dcn.calc_clk.dcfclk_khz,
+- dc->current_state->bw.dcn.cur_clk.dcfclk_khz)) {
+- context->bw.dcn.cur_clk.dcfclk_khz =
+- context->bw.dcn.calc_clk.dcfclk_khz;
+- smu_req.hard_min_dcefclk_khz =
+- context->bw.dcn.calc_clk.dcfclk_khz;
+- }
+-
+- if (should_set_clock(
+- decrease_allowed,
+- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
+- dc->current_state->bw.dcn.cur_clk.dcfclk_deep_sleep_khz)) {
+- context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz =
+- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
+- }
+-
+- if (should_set_clock(
+- decrease_allowed,
+- context->bw.dcn.calc_clk.fclk_khz,
+- dc->current_state->bw.dcn.cur_clk.fclk_khz)) {
+- context->bw.dcn.cur_clk.fclk_khz =
+- context->bw.dcn.calc_clk.fclk_khz;
+- smu_req.hard_min_fclk_khz = context->bw.dcn.calc_clk.fclk_khz;
+- }
+-
+- smu_req.display_count = context->stream_count;
+-
+- if (pp_smu->set_display_requirement)
+- pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+-
+- *smu_req_cur = smu_req;
+-
+ /* make sure dcf clk is before dpp clk to
+ * make sure we have enough voltage to run dpp clk
+ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4756-drm-amd-display-Adding-dm-pp-clocks-getting-by-volta.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4756-drm-amd-display-Adding-dm-pp-clocks-getting-by-volta.patch
new file mode 100644
index 00000000..62fd9db5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4756-drm-amd-display-Adding-dm-pp-clocks-getting-by-volta.patch
@@ -0,0 +1,80 @@
+From 2d581ebb87e5ee7742b08c00ca284dfd53c218f9 Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Mon, 9 Apr 2018 09:48:15 -0400
+Subject: [PATCH 4756/5725] drm/amd/display: Adding dm-pp clocks getting by
+ voltage
+
+Function to get clock levels by voltage from PPLib
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 43 +++++++++++++++++++++-
+ 1 file changed, 41 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+index 9f46421..9e923a4 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+@@ -226,6 +226,34 @@ static void pp_to_dc_clock_levels(
+ }
+ }
+
++static void pp_to_dc_clock_levels_with_voltage(
++ const struct pp_clock_levels_with_voltage *pp_clks,
++ struct dm_pp_clock_levels_with_voltage *clk_level_info,
++ enum dm_pp_clock_type dc_clk_type)
++{
++ uint32_t i;
++
++ if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
++ DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
++ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
++ pp_clks->num_levels,
++ DM_PP_MAX_CLOCK_LEVELS);
++
++ clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
++ } else
++ clk_level_info->num_levels = pp_clks->num_levels;
++
++ DRM_INFO("DM_PPLIB: values for %s clock\n",
++ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
++
++ for (i = 0; i < clk_level_info->num_levels; i++) {
++ DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->data[i].clocks_in_khz);
++ clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
++ clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv;
++ }
++}
++
++
+ bool dm_pp_get_clock_levels_by_type(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+@@ -312,8 +340,19 @@ bool dm_pp_get_clock_levels_by_type_with_voltage(
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels_with_voltage *clk_level_info)
+ {
+- /* TODO: to be implemented */
+- return false;
++ struct amdgpu_device *adev = ctx->driver_context;
++ void *pp_handle = adev->powerplay.pp_handle;
++ struct pp_clock_levels_with_voltage pp_clk_info = {0};
++ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
++
++ if (pp_funcs->get_clock_by_type_with_voltage(pp_handle,
++ dc_to_pp_clock_type(clk_type),
++ &pp_clk_info))
++ return false;
++
++ pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type);
++
++ return true;
+ }
+
+ bool dm_pp_notify_wm_clock_changes(
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4757-drm-amd-display-Apply-clock-for-voltage-request.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4757-drm-amd-display-Apply-clock-for-voltage-request.patch
new file mode 100644
index 00000000..517d0723
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4757-drm-amd-display-Apply-clock-for-voltage-request.patch
@@ -0,0 +1,61 @@
+From 395728c607e2bbac32ef84d89705716133c4fb50 Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Mon, 9 Apr 2018 13:40:00 -0400
+Subject: [PATCH 4757/5725] drm/amd/display: Apply clock for voltage request
+
+Translate dm_pp tructure to pp type
+Call PP lib to apply clock voltage request for display
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 31 ++++++++++++++++++++--
+ 1 file changed, 29 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+index 9e923a4..985e69e 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+@@ -375,8 +375,35 @@ bool dm_pp_apply_clock_for_voltage_request(
+ const struct dc_context *ctx,
+ struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
+ {
+- /* TODO: to be implemented */
+- return false;
++ struct amdgpu_device *adev = ctx->driver_context;
++ struct pp_display_clock_request *pp_clock_request = {0};
++ int ret = 0;
++ switch (clock_for_voltage_req->clk_type) {
++ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
++ pp_clock_request->clock_type = amd_pp_disp_clock;
++ break;
++
++ case DM_PP_CLOCK_TYPE_DCEFCLK:
++ pp_clock_request->clock_type = amd_pp_dcef_clock;
++ break;
++
++ case DM_PP_CLOCK_TYPE_PIXELCLK:
++ pp_clock_request->clock_type = amd_pp_pixel_clock;
++ break;
++
++ default:
++ return false;
++ }
++
++ pp_clock_request->clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz;
++
++ if (adev->powerplay.pp_funcs->display_clock_voltage_request)
++ ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
++ adev->powerplay.pp_handle,
++ pp_clock_request);
++ if (ret)
++ return false;
++ return true;
+ }
+
+ bool dm_pp_get_static_clocks(
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4758-drm-amd-display-Adding-Get-static-clocks-for-dm_pp-i.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4758-drm-amd-display-Adding-Get-static-clocks-for-dm_pp-i.patch
new file mode 100644
index 00000000..ab9baa42
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4758-drm-amd-display-Adding-Get-static-clocks-for-dm_pp-i.patch
@@ -0,0 +1,48 @@
+From a4f63bb0185794b377a28fbc4bbe0b2a99ee1ccf Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Wed, 18 Apr 2018 17:19:23 -0400
+Subject: [PATCH 4758/5725] drm/amd/display: Adding Get static clocks for dm_pp
+ interface
+
+Adding a call to powerplay to get system clocks and translate to dm structure
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+index 985e69e..11894b7 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+@@ -410,8 +410,22 @@ bool dm_pp_get_static_clocks(
+ const struct dc_context *ctx,
+ struct dm_pp_static_clock_info *static_clk_info)
+ {
+- /* TODO: to be implemented */
+- return false;
++ struct amdgpu_device *adev = ctx->driver_context;
++ struct amd_pp_clock_info *pp_clk_info = {0};
++ int ret = 0;
++
++ if (adev->powerplay.pp_funcs->get_current_clocks)
++ ret = adev->powerplay.pp_funcs->get_current_clocks(
++ adev->powerplay.pp_handle,
++ pp_clk_info);
++ if (ret)
++ return false;
++
++ static_clk_info->max_clocks_state = pp_clk_info->max_clocks_state;
++ static_clk_info->max_mclk_khz = pp_clk_info->max_memory_clock;
++ static_clk_info->max_sclk_khz = pp_clk_info->max_engine_clock;
++
++ return true;
+ }
+
+ void dm_pp_get_funcs_rv(
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4759-drm-amd-display-dal-3.1.48.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4759-drm-amd-display-dal-3.1.48.patch
new file mode 100644
index 00000000..66a7646e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4759-drm-amd-display-dal-3.1.48.patch
@@ -0,0 +1,28 @@
+From 002ea8636806828754c3e5254827ccfbf3bcbf14 Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Tue, 8 May 2018 12:25:15 -0400
+Subject: [PATCH 4759/5725] drm/amd/display: dal 3.1.48
+
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 3471485..94f7a7e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -38,7 +38,7 @@
+ #include "inc/compressor.h"
+ #include "dml/display_mode_lib.h"
+
+-#define DC_VER "3.1.47"
++#define DC_VER "3.1.48"
+
+ #define MAX_SURFACES 3
+ #define MAX_STREAMS 6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4760-drm-amd-display-Introduce-pp-smu-raven-functions.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4760-drm-amd-display-Introduce-pp-smu-raven-functions.patch
new file mode 100644
index 00000000..da37ef10
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4760-drm-amd-display-Introduce-pp-smu-raven-functions.patch
@@ -0,0 +1,170 @@
+From 28e8af2cc6afe6672a97f49067b89a42268a9685 Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Wed, 11 Apr 2018 14:52:41 -0400
+Subject: [PATCH 4760/5725] drm/amd/display: Introduce pp-smu raven functions
+
+DM powerplay calls for DCN10 allowing to bypass PPLib
+and call directly to the SMU functions.
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 88 +++++++++++++++++++++-
+ drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 4 +-
+ drivers/gpu/drm/amd/display/dc/dm_pp_smu.h | 6 +-
+ 3 files changed, 92 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+index 11894b7..e7e3ed9 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+@@ -34,6 +34,11 @@
+ #include "amdgpu_dm.h"
+ #include "amdgpu_dm_irq.h"
+ #include "amdgpu_pm.h"
++#include "dm_pp_smu.h"
++#include "../../powerplay/inc/hwmgr.h"
++#include "../../powerplay/hwmgr/smu10_hwmgr.h"
++
++
+
+ unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
+ unsigned long long current_time_stamp,
+@@ -428,9 +433,90 @@ bool dm_pp_get_static_clocks(
+ return true;
+ }
+
++void pp_rv_set_display_requirement(struct pp_smu *pp,
++ struct pp_smu_display_requirement_rv *req)
++{
++ struct amdgpu_device *adev = pp->ctx->driver_context;
++ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
++ int ret = 0;
++ if (hwmgr->hwmgr_func->set_deep_sleep_dcefclk)
++ ret = hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, req->hard_min_dcefclk_khz/10);
++ if (hwmgr->hwmgr_func->set_active_display_count)
++ ret = hwmgr->hwmgr_func->set_active_display_count(hwmgr, req->display_count);
++
++ //store_cc6 is not yet implemented in SMU level
++}
++
++void pp_rv_set_wm_ranges(struct pp_smu *pp,
++ struct pp_smu_wm_range_sets *ranges)
++{
++ struct amdgpu_device *adev = pp->ctx->driver_context;
++ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
++ struct pp_wm_sets_with_clock_ranges_soc15 ranges_soc15 = {0};
++ int i = 0;
++
++ if (!hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges ||
++ !pp || !ranges)
++ return;
++
++ //not entirely sure if thats a correct assignment
++ ranges_soc15.num_wm_sets_dmif = ranges->num_reader_wm_sets;
++ ranges_soc15.num_wm_sets_mcif = ranges->num_writer_wm_sets;
++
++ for (i = 0; i < ranges_soc15.num_wm_sets_dmif; i++) {
++ if (ranges->reader_wm_sets[i].wm_inst > 3)
++ ranges_soc15.wm_sets_dmif[i].wm_set_id = DC_WM_SET_A;
++ else
++ ranges_soc15.wm_sets_dmif[i].wm_set_id =
++ ranges->reader_wm_sets[i].wm_inst;
++ ranges_soc15.wm_sets_dmif[i].wm_max_dcefclk_in_khz =
++ ranges->reader_wm_sets[i].max_drain_clk_khz;
++ ranges_soc15.wm_sets_dmif[i].wm_min_dcefclk_in_khz =
++ ranges->reader_wm_sets[i].min_drain_clk_khz;
++ ranges_soc15.wm_sets_dmif[i].wm_max_memclk_in_khz =
++ ranges->reader_wm_sets[i].max_fill_clk_khz;
++ ranges_soc15.wm_sets_dmif[i].wm_min_memclk_in_khz =
++ ranges->reader_wm_sets[i].min_fill_clk_khz;
++ }
++
++ for (i = 0; i < ranges_soc15.num_wm_sets_mcif; i++) {
++ if (ranges->writer_wm_sets[i].wm_inst > 3)
++ ranges_soc15.wm_sets_dmif[i].wm_set_id = DC_WM_SET_A;
++ else
++ ranges_soc15.wm_sets_mcif[i].wm_set_id =
++ ranges->writer_wm_sets[i].wm_inst;
++ ranges_soc15.wm_sets_mcif[i].wm_max_socclk_in_khz =
++ ranges->writer_wm_sets[i].max_fill_clk_khz;
++ ranges_soc15.wm_sets_mcif[i].wm_min_socclk_in_khz =
++ ranges->writer_wm_sets[i].min_fill_clk_khz;
++ ranges_soc15.wm_sets_mcif[i].wm_max_memclk_in_khz =
++ ranges->writer_wm_sets[i].max_fill_clk_khz;
++ ranges_soc15.wm_sets_mcif[i].wm_min_memclk_in_khz =
++ ranges->writer_wm_sets[i].min_fill_clk_khz;
++ }
++
++ hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges(hwmgr, &ranges_soc15);
++
++}
++
++void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
++{
++ struct amdgpu_device *adev = pp->ctx->driver_context;
++ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
++
++ if (hwmgr->hwmgr_func->smus_notify_pwe)
++ hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
++}
++
+ void dm_pp_get_funcs_rv(
+ struct dc_context *ctx,
+ struct pp_smu_funcs_rv *funcs)
+-{}
++{
++ funcs->pp_smu.ctx = ctx;
++ funcs->set_display_requirement = pp_rv_set_display_requirement;
++ funcs->set_wm_ranges = pp_rv_set_wm_ranges;
++ funcs->set_pme_wa_enable = pp_rv_set_pme_wa_enable;
++}
++
+
+ /**** end of power component interfaces ****/
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+index 9acdd9d..9ce329e 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+@@ -1358,8 +1358,8 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
+ /* SOCCLK does not affect anytihng but writeback for DCN so for now we dont
+ * care what the value is, hence min to overdrive level
+ */
+- ranges.num_reader_wm_sets = WM_COUNT;
+- ranges.num_writer_wm_sets = WM_COUNT;
++ ranges.num_reader_wm_sets = WM_SET_COUNT;
++ ranges.num_writer_wm_sets = WM_SET_COUNT;
+ ranges.reader_wm_sets[0].wm_inst = WM_A;
+ ranges.reader_wm_sets[0].min_drain_clk_khz = min_dcfclk_khz;
+ ranges.reader_wm_sets[0].max_drain_clk_khz = max_dcfclk_khz;
+diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+index eac4bfe..58ed205 100644
+--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
++++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+@@ -40,7 +40,7 @@ enum wm_set_id {
+ WM_B,
+ WM_C,
+ WM_D,
+- WM_COUNT,
++ WM_SET_COUNT,
+ };
+
+ struct pp_smu_wm_set_range {
+@@ -53,10 +53,10 @@ struct pp_smu_wm_set_range {
+
+ struct pp_smu_wm_range_sets {
+ uint32_t num_reader_wm_sets;
+- struct pp_smu_wm_set_range reader_wm_sets[WM_COUNT];
++ struct pp_smu_wm_set_range reader_wm_sets[WM_SET_COUNT];
+
+ uint32_t num_writer_wm_sets;
+- struct pp_smu_wm_set_range writer_wm_sets[WM_COUNT];
++ struct pp_smu_wm_set_range writer_wm_sets[WM_SET_COUNT];
+ };
+
+ struct pp_smu_display_requirement_rv {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4761-drm-amd-display-remove-invalid-assert-when-no-max_pi.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4761-drm-amd-display-remove-invalid-assert-when-no-max_pi.patch
new file mode 100644
index 00000000..1ad7bb98
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4761-drm-amd-display-remove-invalid-assert-when-no-max_pi.patch
@@ -0,0 +1,30 @@
+From 743886aa9e9b1f819edf6f5f83e9ad38b02d1d13 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Mon, 28 May 2018 18:09:52 -0400
+Subject: [PATCH 4761/5725] drm/amd/display: remove invalid assert when no
+ max_pixel_clk is found
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 8a51b9e..07633a1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -1761,9 +1761,6 @@ static uint32_t get_max_pixel_clock_for_all_paths(
+ pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
+ }
+
+- if (max_pix_clk == 0)
+- ASSERT(0);
+-
+ return max_pix_clk;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4762-drm-amd-display-Use-tg-count-for-opp-init.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4762-drm-amd-display-Use-tg-count-for-opp-init.patch
new file mode 100644
index 00000000..d79274f8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4762-drm-amd-display-Use-tg-count-for-opp-init.patch
@@ -0,0 +1,32 @@
+From 07c16be1802e8399fbc7b111c3323d9e4a1aa225 Mon Sep 17 00:00:00 2001
+From: Yongqiang Sun <yongqiang.sun@amd.com>
+Date: Tue, 29 May 2018 07:18:27 -0700
+Subject: [PATCH 4762/5725] drm/amd/display: Use tg count for opp init.
+
+In case of tg count not equal to FE pipe count, if use pipe count to iterate
+the tgs, it will cause BSOD.
+
+Signed-off-by: Yongqiang Sun <yongqiang.sun@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 66ecb86..eae2fd7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -1025,7 +1025,7 @@ static void dcn10_init_hw(struct dc *dc)
+ /* Reset all MPCC muxes */
+ dc->res_pool->mpc->funcs->mpc_init(dc->res_pool->mpc);
+
+- for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct hubp *hubp = dc->res_pool->hubps[i];
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4763-drm-amd-display-Use-local-structs-instead-of-struct-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4763-drm-amd-display-Use-local-structs-instead-of-struct-.patch
new file mode 100644
index 00000000..a3e8fa8b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4763-drm-amd-display-Use-local-structs-instead-of-struct-.patch
@@ -0,0 +1,87 @@
+From 6bd8c0cc42dc0dca31982a2f81ee4bbe5514449b Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Tue, 29 May 2018 16:15:12 -0400
+Subject: [PATCH 4763/5725] drm/amd/display: Use local structs instead of
+ struct pointers
+
+Change struct pointers to creating structs on a stack.
+Thats fixing a mistake in a previous patch introducing dm_pplib functions
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 22 +++++++++++-----------
+ 1 file changed, 11 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+index e7e3ed9..4c85643 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+@@ -381,31 +381,31 @@ bool dm_pp_apply_clock_for_voltage_request(
+ struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
+ {
+ struct amdgpu_device *adev = ctx->driver_context;
+- struct pp_display_clock_request *pp_clock_request = {0};
++ struct pp_display_clock_request pp_clock_request = {0};
+ int ret = 0;
+ switch (clock_for_voltage_req->clk_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+- pp_clock_request->clock_type = amd_pp_disp_clock;
++ pp_clock_request.clock_type = amd_pp_disp_clock;
+ break;
+
+ case DM_PP_CLOCK_TYPE_DCEFCLK:
+- pp_clock_request->clock_type = amd_pp_dcef_clock;
++ pp_clock_request.clock_type = amd_pp_dcef_clock;
+ break;
+
+ case DM_PP_CLOCK_TYPE_PIXELCLK:
+- pp_clock_request->clock_type = amd_pp_pixel_clock;
++ pp_clock_request.clock_type = amd_pp_pixel_clock;
+ break;
+
+ default:
+ return false;
+ }
+
+- pp_clock_request->clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz;
++ pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz;
+
+ if (adev->powerplay.pp_funcs->display_clock_voltage_request)
+ ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
+ adev->powerplay.pp_handle,
+- pp_clock_request);
++ &pp_clock_request);
+ if (ret)
+ return false;
+ return true;
+@@ -416,19 +416,19 @@ bool dm_pp_get_static_clocks(
+ struct dm_pp_static_clock_info *static_clk_info)
+ {
+ struct amdgpu_device *adev = ctx->driver_context;
+- struct amd_pp_clock_info *pp_clk_info = {0};
++ struct amd_pp_clock_info pp_clk_info = {0};
+ int ret = 0;
+
+ if (adev->powerplay.pp_funcs->get_current_clocks)
+ ret = adev->powerplay.pp_funcs->get_current_clocks(
+ adev->powerplay.pp_handle,
+- pp_clk_info);
++ &pp_clk_info);
+ if (ret)
+ return false;
+
+- static_clk_info->max_clocks_state = pp_clk_info->max_clocks_state;
+- static_clk_info->max_mclk_khz = pp_clk_info->max_memory_clock;
+- static_clk_info->max_sclk_khz = pp_clk_info->max_engine_clock;
++ static_clk_info->max_clocks_state = pp_clk_info.max_clocks_state;
++ static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock;
++ static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock;
+
+ return true;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4764-drm-amd-display-Add-clock-types-to-applying-clk-for-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4764-drm-amd-display-Add-clock-types-to-applying-clk-for-.patch
new file mode 100644
index 00000000..41f3cba4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4764-drm-amd-display-Add-clock-types-to-applying-clk-for-.patch
@@ -0,0 +1,44 @@
+From ae07d942bbe22d3bff4a06ddba7a5699ada1742c Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Tue, 29 May 2018 16:20:37 -0400
+Subject: [PATCH 4764/5725] drm/amd/display: Add clock types to applying clk
+ for voltage
+
+Add DCF and FCLK clock case statements for changing raven's
+clocks for voltage request.
+Also maintain DCEF clock for DCE120 calls.
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+index 4c85643..55fea6c 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+@@ -392,10 +392,18 @@ bool dm_pp_apply_clock_for_voltage_request(
+ pp_clock_request.clock_type = amd_pp_dcef_clock;
+ break;
+
++ case DM_PP_CLOCK_TYPE_DCFCLK:
++ pp_clock_request.clock_type = amd_pp_dcf_clock;
++ break;
++
+ case DM_PP_CLOCK_TYPE_PIXELCLK:
+ pp_clock_request.clock_type = amd_pp_pixel_clock;
+ break;
+
++ case DM_PP_CLOCK_TYPE_FCLK:
++ pp_clock_request.clock_type = amd_pp_f_clock;
++ break;
++
+ default:
+ return false;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4765-drm-amd-display-get-rid-of-cur_clks-from-dcn_bw_outp.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4765-drm-amd-display-get-rid-of-cur_clks-from-dcn_bw_outp.patch
new file mode 100644
index 00000000..b93826bb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4765-drm-amd-display-get-rid-of-cur_clks-from-dcn_bw_outp.patch
@@ -0,0 +1,348 @@
+From c47d8492b5f4f593a22b512526c172adb6c29f6e Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Wed, 23 May 2018 18:02:27 -0400
+Subject: [PATCH 4765/5725] drm/amd/display: get rid of cur_clks from
+ dcn_bw_output
+
+Cleans up dcn_bw_output to only contain calculated info,
+actual programmed values will now be stored in respective blocks.
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Nikola Cornij <Nikola.Cornij@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 28 +++----
+ drivers/gpu/drm/amd/display/dc/core/dc_debug.c | 24 +++---
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 4 +-
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 85 +++++++++++-----------
+ drivers/gpu/drm/amd/display/dc/inc/core_types.h | 3 +-
+ 6 files changed, 72 insertions(+), 74 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+index 9ce329e..b8195e5 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+@@ -977,42 +977,42 @@ bool dcn_validate_bandwidth(
+
+ display_pipe_configuration(v);
+ calc_wm_sets_and_perf_params(context, v);
+- context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 /
++ context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 /
+ (ddr4_dram_factor_single_Channel * v->number_of_channels));
+ if (bw_consumed == v->fabric_and_dram_bandwidth_vmin0p65) {
+- context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
++ context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
+ }
+
+- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
+- context->bw.dcn.calc_clk.dcfclk_khz = (int)(v->dcfclk * 1000);
++ context->bw.dcn.clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
++ context->bw.dcn.clk.dcfclk_khz = (int)(v->dcfclk * 1000);
+
+- context->bw.dcn.calc_clk.dispclk_khz = (int)(v->dispclk * 1000);
++ context->bw.dcn.clk.dispclk_khz = (int)(v->dispclk * 1000);
+ if (dc->debug.max_disp_clk == true)
+- context->bw.dcn.calc_clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
++ context->bw.dcn.clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
+
+- if (context->bw.dcn.calc_clk.dispclk_khz <
++ if (context->bw.dcn.clk.dispclk_khz <
+ dc->debug.min_disp_clk_khz) {
+- context->bw.dcn.calc_clk.dispclk_khz =
++ context->bw.dcn.clk.dispclk_khz =
+ dc->debug.min_disp_clk_khz;
+ }
+
+- context->bw.dcn.calc_clk.dppclk_khz = context->bw.dcn.calc_clk.dispclk_khz / v->dispclk_dppclk_ratio;
+- context->bw.dcn.calc_clk.phyclk_khz = v->phyclk_per_state[v->voltage_level];
++ context->bw.dcn.clk.dppclk_khz = context->bw.dcn.clk.dispclk_khz / v->dispclk_dppclk_ratio;
++ context->bw.dcn.clk.phyclk_khz = v->phyclk_per_state[v->voltage_level];
+ switch (v->voltage_level) {
+ case 0:
+- context->bw.dcn.calc_clk.max_supported_dppclk_khz =
++ context->bw.dcn.clk.max_supported_dppclk_khz =
+ (int)(dc->dcn_soc->max_dppclk_vmin0p65 * 1000);
+ break;
+ case 1:
+- context->bw.dcn.calc_clk.max_supported_dppclk_khz =
++ context->bw.dcn.clk.max_supported_dppclk_khz =
+ (int)(dc->dcn_soc->max_dppclk_vmid0p72 * 1000);
+ break;
+ case 2:
+- context->bw.dcn.calc_clk.max_supported_dppclk_khz =
++ context->bw.dcn.clk.max_supported_dppclk_khz =
+ (int)(dc->dcn_soc->max_dppclk_vnom0p8 * 1000);
+ break;
+ default:
+- context->bw.dcn.calc_clk.max_supported_dppclk_khz =
++ context->bw.dcn.clk.max_supported_dppclk_khz =
+ (int)(dc->dcn_soc->max_dppclk_vmax0p9 * 1000);
+ break;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+index 267c767..e1ebdf7 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+@@ -352,19 +352,19 @@ void context_clock_trace(
+ DC_LOGGER_INIT(dc->ctx->logger);
+ CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
+ "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
+- context->bw.dcn.calc_clk.dispclk_khz,
+- context->bw.dcn.calc_clk.dppclk_khz,
+- context->bw.dcn.calc_clk.dcfclk_khz,
+- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
+- context->bw.dcn.calc_clk.fclk_khz,
+- context->bw.dcn.calc_clk.socclk_khz);
++ context->bw.dcn.clk.dispclk_khz,
++ context->bw.dcn.clk.dppclk_khz,
++ context->bw.dcn.clk.dcfclk_khz,
++ context->bw.dcn.clk.dcfclk_deep_sleep_khz,
++ context->bw.dcn.clk.fclk_khz,
++ context->bw.dcn.clk.socclk_khz);
+ CLOCK_TRACE("Calculated: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
+ "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
+- context->bw.dcn.calc_clk.dispclk_khz,
+- context->bw.dcn.calc_clk.dppclk_khz,
+- context->bw.dcn.calc_clk.dcfclk_khz,
+- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
+- context->bw.dcn.calc_clk.fclk_khz,
+- context->bw.dcn.calc_clk.socclk_khz);
++ context->bw.dcn.clk.dispclk_khz,
++ context->bw.dcn.clk.dppclk_khz,
++ context->bw.dcn.clk.dcfclk_khz,
++ context->bw.dcn.clk.dcfclk_deep_sleep_khz,
++ context->bw.dcn.clk.fclk_khz,
++ context->bw.dcn.clk.socclk_khz);
+ #endif
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 1a6a7c5..27ee9bf 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -1284,7 +1284,7 @@ static enum dc_status enable_link_dp(
+ max_link_rate = LINK_RATE_HIGH3;
+
+ if (link_settings.link_rate == max_link_rate) {
+- struct dc_clocks clocks = state->bw.dcn.calc_clk;
++ struct dc_clocks clocks = state->bw.dcn.clk;
+
+ /* dce/dcn compat, do not update dispclk */
+ clocks.dispclk_khz = 0;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index 93e6063..6b6570e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -584,8 +584,8 @@ static void dcn_update_clocks(struct dccg *dccg,
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
+- /* TODO: ramp up - dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);*/
+- dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
++ /* TODO: ramp up - dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
++ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;*/
+
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ send_request_to_lower = true;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index eae2fd7..2c15854 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -337,13 +337,13 @@ void dcn10_log_hw_state(struct dc *dc)
+
+ DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
+ "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
+- dc->current_state->bw.dcn.calc_clk.dcfclk_khz,
+- dc->current_state->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
+- dc->current_state->bw.dcn.calc_clk.dispclk_khz,
+- dc->current_state->bw.dcn.calc_clk.dppclk_khz,
+- dc->current_state->bw.dcn.calc_clk.max_supported_dppclk_khz,
+- dc->current_state->bw.dcn.calc_clk.fclk_khz,
+- dc->current_state->bw.dcn.calc_clk.socclk_khz);
++ dc->current_state->bw.dcn.clk.dcfclk_khz,
++ dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz,
++ dc->current_state->bw.dcn.clk.dispclk_khz,
++ dc->current_state->bw.dcn.clk.dppclk_khz,
++ dc->current_state->bw.dcn.clk.max_supported_dppclk_khz,
++ dc->current_state->bw.dcn.clk.fclk_khz,
++ dc->current_state->bw.dcn.clk.socclk_khz);
+
+ log_mpc_crc(dc);
+
+@@ -1967,18 +1967,17 @@ static void update_dchubp_dpp(
+ * divided by 2
+ */
+ if (plane_state->update_flags.bits.full_update) {
+- bool should_divided_by_2 = context->bw.dcn.calc_clk.dppclk_khz <=
+- context->bw.dcn.cur_clk.dispclk_khz / 2;
++ bool should_divided_by_2 = context->bw.dcn.clk.dppclk_khz <=
++ dc->res_pool->dccg->clks.dispclk_khz / 2;
+
+ dpp->funcs->dpp_dppclk_control(
+ dpp,
+ should_divided_by_2,
+ true);
+
+- dc->current_state->bw.dcn.cur_clk.dppclk_khz =
+- should_divided_by_2 ?
+- context->bw.dcn.cur_clk.dispclk_khz / 2 :
+- context->bw.dcn.cur_clk.dispclk_khz;
++ dc->res_pool->dccg->clks.dppclk_khz = should_divided_by_2 ?
++ dc->res_pool->dccg->clks.dispclk_khz / 2 :
++ dc->res_pool->dccg->clks.dispclk_khz;
+ }
+
+ /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
+@@ -2173,7 +2172,7 @@ static void dcn10_pplib_apply_display_requirements(
+ pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
+ pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
+ pp_display_cfg->min_dcfclock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
+- pp_display_cfg->disp_clk_khz = context->bw.dcn.cur_clk.dispclk_khz;
++ pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
+ dce110_fill_display_configs(context, pp_display_cfg);
+
+ if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
+@@ -2378,29 +2377,29 @@ static void dcn10_apply_ctx_for_surface(
+
+ static int determine_dppclk_threshold(struct dc *dc, struct dc_state *context)
+ {
+- bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
+- context->bw.dcn.calc_clk.dppclk_khz;
+- bool dispclk_increase = context->bw.dcn.calc_clk.dispclk_khz >
+- context->bw.dcn.cur_clk.dispclk_khz;
+- int disp_clk_threshold = context->bw.dcn.calc_clk.max_supported_dppclk_khz;
+- bool cur_dpp_div = context->bw.dcn.cur_clk.dispclk_khz >
+- context->bw.dcn.cur_clk.dppclk_khz;
++ bool request_dpp_div = context->bw.dcn.clk.dispclk_khz >
++ context->bw.dcn.clk.dppclk_khz;
++ bool dispclk_increase = context->bw.dcn.clk.dispclk_khz >
++ dc->res_pool->dccg->clks.dispclk_khz;
++ int disp_clk_threshold = context->bw.dcn.clk.max_supported_dppclk_khz;
++ bool cur_dpp_div = dc->res_pool->dccg->clks.dispclk_khz >
++ dc->res_pool->dccg->clks.dppclk_khz;
+
+ /* increase clock, looking for div is 0 for current, request div is 1*/
+ if (dispclk_increase) {
+ /* already divided by 2, no need to reach target clk with 2 steps*/
+ if (cur_dpp_div)
+- return context->bw.dcn.calc_clk.dispclk_khz;
++ return context->bw.dcn.clk.dispclk_khz;
+
+ /* request disp clk is lower than maximum supported dpp clk,
+ * no need to reach target clk with two steps.
+ */
+- if (context->bw.dcn.calc_clk.dispclk_khz <= disp_clk_threshold)
+- return context->bw.dcn.calc_clk.dispclk_khz;
++ if (context->bw.dcn.clk.dispclk_khz <= disp_clk_threshold)
++ return context->bw.dcn.clk.dispclk_khz;
+
+ /* target dpp clk not request divided by 2, still within threshold */
+ if (!request_dpp_div)
+- return context->bw.dcn.calc_clk.dispclk_khz;
++ return context->bw.dcn.clk.dispclk_khz;
+
+ } else {
+ /* decrease clock, looking for current dppclk divided by 2,
+@@ -2409,17 +2408,17 @@ static int determine_dppclk_threshold(struct dc *dc, struct dc_state *context)
+
+ /* current dpp clk not divided by 2, no need to ramp*/
+ if (!cur_dpp_div)
+- return context->bw.dcn.calc_clk.dispclk_khz;
++ return context->bw.dcn.clk.dispclk_khz;
+
+ /* current disp clk is lower than current maximum dpp clk,
+ * no need to ramp
+ */
+- if (context->bw.dcn.cur_clk.dispclk_khz <= disp_clk_threshold)
+- return context->bw.dcn.calc_clk.dispclk_khz;
++ if (dc->res_pool->dccg->clks.dispclk_khz <= disp_clk_threshold)
++ return context->bw.dcn.clk.dispclk_khz;
+
+ /* request dpp clk need to be divided by 2 */
+ if (request_dpp_div)
+- return context->bw.dcn.calc_clk.dispclk_khz;
++ return context->bw.dcn.clk.dispclk_khz;
+ }
+
+ return disp_clk_threshold;
+@@ -2428,8 +2427,8 @@ static int determine_dppclk_threshold(struct dc *dc, struct dc_state *context)
+ static void ramp_up_dispclk_with_dpp(struct dc *dc, struct dc_state *context)
+ {
+ int i;
+- bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
+- context->bw.dcn.calc_clk.dppclk_khz;
++ bool request_dpp_div = context->bw.dcn.clk.dispclk_khz >
++ context->bw.dcn.clk.dppclk_khz;
+
+ int dispclk_to_dpp_threshold = determine_dppclk_threshold(dc, context);
+
+@@ -2452,18 +2451,18 @@ static void ramp_up_dispclk_with_dpp(struct dc *dc, struct dc_state *context)
+ }
+
+ /* If target clk not same as dppclk threshold, set to target clock */
+- if (dispclk_to_dpp_threshold != context->bw.dcn.calc_clk.dispclk_khz) {
++ if (dispclk_to_dpp_threshold != context->bw.dcn.clk.dispclk_khz) {
+ dc->res_pool->dccg->funcs->set_dispclk(
+ dc->res_pool->dccg,
+- context->bw.dcn.calc_clk.dispclk_khz);
++ context->bw.dcn.clk.dispclk_khz);
+ }
+
+- context->bw.dcn.cur_clk.dispclk_khz =
+- context->bw.dcn.calc_clk.dispclk_khz;
+- context->bw.dcn.cur_clk.dppclk_khz =
+- context->bw.dcn.calc_clk.dppclk_khz;
+- context->bw.dcn.cur_clk.max_supported_dppclk_khz =
+- context->bw.dcn.calc_clk.max_supported_dppclk_khz;
++ dc->res_pool->dccg->clks.dispclk_khz =
++ context->bw.dcn.clk.dispclk_khz;
++ dc->res_pool->dccg->clks.dppclk_khz =
++ context->bw.dcn.clk.dppclk_khz;
++ dc->res_pool->dccg->clks.max_supported_dppclk_khz =
++ context->bw.dcn.clk.max_supported_dppclk_khz;
+ }
+
+ static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
+@@ -2484,11 +2483,11 @@ static void dcn10_set_bandwidth(
+ return;
+
+ if (context->stream_count == 0)
+- context->bw.dcn.calc_clk.phyclk_khz = 0;
++ context->bw.dcn.clk.phyclk_khz = 0;
+
+ dc->res_pool->dccg->funcs->update_clocks(
+ dc->res_pool->dccg,
+- &context->bw.dcn.calc_clk,
++ &context->bw.dcn.clk,
+ decrease_allowed);
+
+ /* make sure dcf clk is before dpp clk to
+@@ -2496,8 +2495,8 @@ static void dcn10_set_bandwidth(
+ */
+ if (should_set_clock(
+ decrease_allowed,
+- context->bw.dcn.calc_clk.dispclk_khz,
+- dc->current_state->bw.dcn.cur_clk.dispclk_khz)) {
++ context->bw.dcn.clk.dispclk_khz,
++ dc->res_pool->dccg->clks.dispclk_khz)) {
+
+ ramp_up_dispclk_with_dpp(dc, context);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+index 327ea4e..0107aa2 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+@@ -256,8 +256,7 @@ struct dce_bw_output {
+ };
+
+ struct dcn_bw_output {
+- struct dc_clocks cur_clk;
+- struct dc_clocks calc_clk;
++ struct dc_clocks clk;
+ struct dcn_watermark_set watermarks;
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4766-drm-amd-display-move-dcn1-dispclk-programming-to-dcc.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4766-drm-amd-display-move-dcn1-dispclk-programming-to-dcc.patch
new file mode 100644
index 00000000..305427f4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4766-drm-amd-display-move-dcn1-dispclk-programming-to-dcc.patch
@@ -0,0 +1,315 @@
+From f450b4de968348f83b5b2436f5c6f1e8a52bdd5b Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Wed, 23 May 2018 18:18:50 -0400
+Subject: [PATCH 4766/5725] drm/amd/display: move dcn1 dispclk programming to
+ dccg
+
+No functional change.
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Nikola Cornij <Nikola.Cornij@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 95 ++++++++++++++++--
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h | 2 +-
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 106 ---------------------
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 2 +-
+ 4 files changed, 90 insertions(+), 115 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index 6b6570e..55f533cf 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -552,7 +552,85 @@ static void dce12_update_clocks(struct dccg *dccg,
+ }
+ }
+
+-static void dcn_update_clocks(struct dccg *dccg,
++static int dcn1_determine_dppclk_threshold(struct dccg *dccg, struct dc_clocks *new_clocks)
++{
++ bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
++ bool dispclk_increase = new_clocks->dispclk_khz > dccg->clks.dispclk_khz;
++ int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
++ bool cur_dpp_div = dccg->clks.dispclk_khz > dccg->clks.dppclk_khz;
++
++ /* increase clock, looking for div is 0 for current, request div is 1*/
++ if (dispclk_increase) {
++ /* already divided by 2, no need to reach target clk with 2 steps*/
++ if (cur_dpp_div)
++ return new_clocks->dispclk_khz;
++
++ /* request disp clk is lower than maximum supported dpp clk,
++ * no need to reach target clk with two steps.
++ */
++ if (new_clocks->dispclk_khz <= disp_clk_threshold)
++ return new_clocks->dispclk_khz;
++
++ /* target dpp clk not request divided by 2, still within threshold */
++ if (!request_dpp_div)
++ return new_clocks->dispclk_khz;
++
++ } else {
++ /* decrease clock, looking for current dppclk divided by 2,
++ * request dppclk not divided by 2.
++ */
++
++ /* current dpp clk not divided by 2, no need to ramp*/
++ if (!cur_dpp_div)
++ return new_clocks->dispclk_khz;
++
++ /* current disp clk is lower than current maximum dpp clk,
++ * no need to ramp
++ */
++ if (dccg->clks.dispclk_khz <= disp_clk_threshold)
++ return new_clocks->dispclk_khz;
++
++ /* request dpp clk need to be divided by 2 */
++ if (request_dpp_div)
++ return new_clocks->dispclk_khz;
++ }
++
++ return disp_clk_threshold;
++}
++
++static void dcn1_ramp_up_dispclk_with_dpp(struct dccg *dccg, struct dc_clocks *new_clocks)
++{
++ struct dc *dc = dccg->ctx->dc;
++ int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(dccg, new_clocks);
++ bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
++ int i;
++
++ /* set disp clk to dpp clk threshold */
++ dccg->funcs->set_dispclk(dccg, dispclk_to_dpp_threshold);
++
++ /* update request dpp clk division option */
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
++
++ if (!pipe_ctx->plane_state)
++ continue;
++
++ pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
++ pipe_ctx->plane_res.dpp,
++ request_dpp_div,
++ true);
++ }
++
++ /* If target clk not same as dppclk threshold, set to target clock */
++ if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz)
++ dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
++
++ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
++ dccg->clks.dppclk_khz = new_clocks->dppclk_khz;
++ dccg->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
++}
++
++static void dcn1_update_clocks(struct dccg *dccg,
+ struct dc_clocks *new_clocks,
+ bool safe_to_lower)
+ {
+@@ -572,6 +650,9 @@ static void dcn_update_clocks(struct dccg *dccg,
+ send_request_to_increase = true;
+
+ #ifdef CONFIG_DRM_AMD_DC_DCN1_0
++ /* make sure dcf clk is before dpp clk to
++ * make sure we have enough voltage to run dpp clk
++ */
+ if (send_request_to_increase
+ ) {
+ /*use dcfclk to request voltage*/
+@@ -584,8 +665,8 @@ static void dcn_update_clocks(struct dccg *dccg,
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
+- /* TODO: ramp up - dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
+- dccg->clks.dispclk_khz = new_clocks->dispclk_khz;*/
++ dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks);
++ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ send_request_to_lower = true;
+@@ -666,10 +747,10 @@ static void dce_update_clocks(struct dccg *dccg,
+ }
+ }
+
+-static const struct display_clock_funcs dcn_funcs = {
++static const struct display_clock_funcs dcn1_funcs = {
+ .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq_wrkaround,
+ .set_dispclk = dce112_set_clock,
+- .update_clocks = dcn_update_clocks
++ .update_clocks = dcn1_update_clocks
+ };
+
+ static const struct display_clock_funcs dce120_funcs = {
+@@ -838,7 +919,7 @@ struct dccg *dce120_dccg_create(struct dc_context *ctx)
+ return &clk_dce->base;
+ }
+
+-struct dccg *dcn_dccg_create(struct dc_context *ctx)
++struct dccg *dcn1_dccg_create(struct dc_context *ctx)
+ {
+ struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+
+@@ -851,7 +932,7 @@ struct dccg *dcn_dccg_create(struct dc_context *ctx)
+ dce_dccg_construct(
+ clk_dce, ctx, NULL, NULL, NULL);
+
+- clk_dce->base.funcs = &dcn_funcs;
++ clk_dce->base.funcs = &dcn1_funcs;
+
+ return &clk_dce->base;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+index c695b9c..7907c3c4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+@@ -128,7 +128,7 @@ struct dccg *dce112_dccg_create(
+
+ struct dccg *dce120_dccg_create(struct dc_context *ctx);
+
+-struct dccg *dcn_dccg_create(struct dc_context *ctx);
++struct dccg *dcn1_dccg_create(struct dc_context *ctx);
+
+ void dce_dccg_destroy(struct dccg **dccg);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 2c15854..5d47b28 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2375,101 +2375,6 @@ static void dcn10_apply_ctx_for_surface(
+ */
+ }
+
+-static int determine_dppclk_threshold(struct dc *dc, struct dc_state *context)
+-{
+- bool request_dpp_div = context->bw.dcn.clk.dispclk_khz >
+- context->bw.dcn.clk.dppclk_khz;
+- bool dispclk_increase = context->bw.dcn.clk.dispclk_khz >
+- dc->res_pool->dccg->clks.dispclk_khz;
+- int disp_clk_threshold = context->bw.dcn.clk.max_supported_dppclk_khz;
+- bool cur_dpp_div = dc->res_pool->dccg->clks.dispclk_khz >
+- dc->res_pool->dccg->clks.dppclk_khz;
+-
+- /* increase clock, looking for div is 0 for current, request div is 1*/
+- if (dispclk_increase) {
+- /* already divided by 2, no need to reach target clk with 2 steps*/
+- if (cur_dpp_div)
+- return context->bw.dcn.clk.dispclk_khz;
+-
+- /* request disp clk is lower than maximum supported dpp clk,
+- * no need to reach target clk with two steps.
+- */
+- if (context->bw.dcn.clk.dispclk_khz <= disp_clk_threshold)
+- return context->bw.dcn.clk.dispclk_khz;
+-
+- /* target dpp clk not request divided by 2, still within threshold */
+- if (!request_dpp_div)
+- return context->bw.dcn.clk.dispclk_khz;
+-
+- } else {
+- /* decrease clock, looking for current dppclk divided by 2,
+- * request dppclk not divided by 2.
+- */
+-
+- /* current dpp clk not divided by 2, no need to ramp*/
+- if (!cur_dpp_div)
+- return context->bw.dcn.clk.dispclk_khz;
+-
+- /* current disp clk is lower than current maximum dpp clk,
+- * no need to ramp
+- */
+- if (dc->res_pool->dccg->clks.dispclk_khz <= disp_clk_threshold)
+- return context->bw.dcn.clk.dispclk_khz;
+-
+- /* request dpp clk need to be divided by 2 */
+- if (request_dpp_div)
+- return context->bw.dcn.clk.dispclk_khz;
+- }
+-
+- return disp_clk_threshold;
+-}
+-
+-static void ramp_up_dispclk_with_dpp(struct dc *dc, struct dc_state *context)
+-{
+- int i;
+- bool request_dpp_div = context->bw.dcn.clk.dispclk_khz >
+- context->bw.dcn.clk.dppclk_khz;
+-
+- int dispclk_to_dpp_threshold = determine_dppclk_threshold(dc, context);
+-
+- /* set disp clk to dpp clk threshold */
+- dc->res_pool->dccg->funcs->set_dispclk(
+- dc->res_pool->dccg,
+- dispclk_to_dpp_threshold);
+-
+- /* update request dpp clk division option */
+- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+- struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+-
+- if (!pipe_ctx->plane_state)
+- continue;
+-
+- pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
+- pipe_ctx->plane_res.dpp,
+- request_dpp_div,
+- true);
+- }
+-
+- /* If target clk not same as dppclk threshold, set to target clock */
+- if (dispclk_to_dpp_threshold != context->bw.dcn.clk.dispclk_khz) {
+- dc->res_pool->dccg->funcs->set_dispclk(
+- dc->res_pool->dccg,
+- context->bw.dcn.clk.dispclk_khz);
+- }
+-
+- dc->res_pool->dccg->clks.dispclk_khz =
+- context->bw.dcn.clk.dispclk_khz;
+- dc->res_pool->dccg->clks.dppclk_khz =
+- context->bw.dcn.clk.dppclk_khz;
+- dc->res_pool->dccg->clks.max_supported_dppclk_khz =
+- context->bw.dcn.clk.max_supported_dppclk_khz;
+-}
+-
+-static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
+-{
+- return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
+-}
+-
+ static void dcn10_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+@@ -2490,17 +2395,6 @@ static void dcn10_set_bandwidth(
+ &context->bw.dcn.clk,
+ decrease_allowed);
+
+- /* make sure dcf clk is before dpp clk to
+- * make sure we have enough voltage to run dpp clk
+- */
+- if (should_set_clock(
+- decrease_allowed,
+- context->bw.dcn.clk.dispclk_khz,
+- dc->res_pool->dccg->clks.dispclk_khz)) {
+-
+- ramp_up_dispclk_with_dpp(dc, context);
+- }
+-
+ dcn10_pplib_apply_display_requirements(dc, context);
+
+ if (dc->debug.sanity_checks) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index 6255e95..d885988 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -1074,7 +1074,7 @@ static bool construct(
+ }
+ }
+
+- pool->base.dccg = dcn_dccg_create(ctx);
++ pool->base.dccg = dcn1_dccg_create(ctx);
+ if (pool->base.dccg == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4767-drm-amd-display-clean-up-dccg-divider-calc-and-dcn-c.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4767-drm-amd-display-clean-up-dccg-divider-calc-and-dcn-c.patch
new file mode 100644
index 00000000..a25bcaa5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4767-drm-amd-display-clean-up-dccg-divider-calc-and-dcn-c.patch
@@ -0,0 +1,334 @@
+From a719cef7c38dbdad11c09ec745797aac170f64f9 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Thu, 24 May 2018 15:50:18 -0400
+Subject: [PATCH 4767/5725] drm/amd/display: clean up dccg divider calc and dcn
+ constructor
+
+No functional change.
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 197 ++++++++----------------
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h | 26 ----
+ 2 files changed, 68 insertions(+), 155 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index 55f533cf..6e3bfdf 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -101,90 +101,42 @@ static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
+ /*ClocksStatePerformance*/
+ { .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
+
+-/* Starting point for each divider range.*/
+-enum dce_divider_range_start {
+- DIVIDER_RANGE_01_START = 200, /* 2.00*/
+- DIVIDER_RANGE_02_START = 1600, /* 16.00*/
+- DIVIDER_RANGE_03_START = 3200, /* 32.00*/
+- DIVIDER_RANGE_SCALE_FACTOR = 100 /* Results are scaled up by 100.*/
++/* Starting DID for each range */
++enum dentist_base_divider_id {
++ dentist_base_divider_id_1 = 0x08,
++ dentist_base_divider_id_2 = 0x40,
++ dentist_base_divider_id_3 = 0x60,
++ dentist_max_divider_id = 0x80
+ };
+
+-/* Ranges for divider identifiers (Divider ID or DID)
+- mmDENTIST_DISPCLK_CNTL.DENTIST_DISPCLK_WDIVIDER*/
+-enum dce_divider_id_register_setting {
+- DIVIDER_RANGE_01_BASE_DIVIDER_ID = 0X08,
+- DIVIDER_RANGE_02_BASE_DIVIDER_ID = 0X40,
+- DIVIDER_RANGE_03_BASE_DIVIDER_ID = 0X60,
+- DIVIDER_RANGE_MAX_DIVIDER_ID = 0X80
++/* Starting point and step size for each divider range.*/
++enum dentist_divider_range {
++ dentist_divider_range_1_start = 8, /* 2.00 */
++ dentist_divider_range_1_step = 1, /* 0.25 */
++ dentist_divider_range_2_start = 64, /* 16.00 */
++ dentist_divider_range_2_step = 2, /* 0.50 */
++ dentist_divider_range_3_start = 128, /* 32.00 */
++ dentist_divider_range_3_step = 4, /* 1.00 */
++ dentist_divider_range_scale_factor = 4
+ };
+
+-/* Step size between each divider within a range.
+- Incrementing the DENTIST_DISPCLK_WDIVIDER by one
+- will increment the divider by this much.*/
+-enum dce_divider_range_step_size {
+- DIVIDER_RANGE_01_STEP_SIZE = 25, /* 0.25*/
+- DIVIDER_RANGE_02_STEP_SIZE = 50, /* 0.50*/
+- DIVIDER_RANGE_03_STEP_SIZE = 100 /* 1.00 */
+-};
+-
+-static bool dce_divider_range_construct(
+- struct dce_divider_range *div_range,
+- int range_start,
+- int range_step,
+- int did_min,
+- int did_max)
+-{
+- div_range->div_range_start = range_start;
+- div_range->div_range_step = range_step;
+- div_range->did_min = did_min;
+- div_range->did_max = did_max;
+-
+- if (div_range->div_range_step == 0) {
+- div_range->div_range_step = 1;
+- /*div_range_step cannot be zero*/
+- BREAK_TO_DEBUGGER();
+- }
+- /* Calculate this based on the other inputs.*/
+- /* See DividerRange.h for explanation of */
+- /* the relationship between divider id (DID) and a divider.*/
+- /* Number of Divider IDs = (Maximum Divider ID - Minimum Divider ID)*/
+- /* Maximum divider identified in this range =
+- * (Number of Divider IDs)*Step size between dividers
+- * + The start of this range.*/
+- div_range->div_range_end = (did_max - did_min) * range_step
+- + range_start;
+- return true;
+-}
+-
+-static int dce_divider_range_calc_divider(
+- struct dce_divider_range *div_range,
+- int did)
+-{
+- /* Is this DID within our range?*/
+- if ((did < div_range->did_min) || (did >= div_range->did_max))
+- return INVALID_DIVIDER;
+-
+- return ((did - div_range->did_min) * div_range->div_range_step)
+- + div_range->div_range_start;
+-
+-}
+-
+-static int dce_divider_range_get_divider(
+- struct dce_divider_range *div_range,
+- int ranges_num,
+- int did)
++static int dentist_get_divider_from_did(int did)
+ {
+- int div = INVALID_DIVIDER;
+- int i;
+-
+- for (i = 0; i < ranges_num; i++) {
+- /* Calculate divider with given divider ID*/
+- div = dce_divider_range_calc_divider(&div_range[i], did);
+- /* Found a valid return divider*/
+- if (div != INVALID_DIVIDER)
+- break;
++ if (did < dentist_base_divider_id_1)
++ did = dentist_base_divider_id_1;
++ if (did > dentist_max_divider_id)
++ did = dentist_max_divider_id;
++
++ if (did < dentist_base_divider_id_2) {
++ return dentist_divider_range_1_start + dentist_divider_range_1_step
++ * (did - dentist_base_divider_id_1);
++ } else if (did < dentist_base_divider_id_3) {
++ return dentist_divider_range_2_start + dentist_divider_range_2_step
++ * (did - dentist_base_divider_id_2);
++ } else {
++ return dentist_divider_range_3_start + dentist_divider_range_3_step
++ * (did - dentist_base_divider_id_3);
+ }
+- return div;
+ }
+
+ static int dce_clocks_get_dp_ref_freq(struct dccg *clk)
+@@ -193,7 +145,7 @@ static int dce_clocks_get_dp_ref_freq(struct dccg *clk)
+ int dprefclk_wdivider;
+ int dprefclk_src_sel;
+ int dp_ref_clk_khz = 600000;
+- int target_div = INVALID_DIVIDER;
++ int target_div;
+
+ /* ASSERT DP Reference Clock source is from DFS*/
+ REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
+@@ -204,16 +156,11 @@ static int dce_clocks_get_dp_ref_freq(struct dccg *clk)
+ REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
+
+ /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
+- target_div = dce_divider_range_get_divider(
+- clk_dce->divider_ranges,
+- DIVIDER_RANGE_MAX,
+- dprefclk_wdivider);
+-
+- if (target_div != INVALID_DIVIDER) {
+- /* Calculate the current DFS clock, in kHz.*/
+- dp_ref_clk_khz = (DIVIDER_RANGE_SCALE_FACTOR
+- * clk_dce->dentist_vco_freq_khz) / target_div;
+- }
++ target_div = dentist_get_divider_from_did(dprefclk_wdivider);
++
++ /* Calculate the current DFS clock, in kHz.*/
++ dp_ref_clk_khz = (dentist_divider_range_scale_factor
++ * clk_dce->dentist_vco_freq_khz) / target_div;
+
+ /* SW will adjust DP REF Clock average value for all purposes
+ * (DP DTO / DP Audio DTO and DP GTC)
+@@ -229,17 +176,12 @@ static int dce_clocks_get_dp_ref_freq(struct dccg *clk)
+ */
+ if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
+ struct fixed31_32 ss_percentage = dc_fixpt_div_int(
+- dc_fixpt_from_fraction(
+- clk_dce->dprefclk_ss_percentage,
+- clk_dce->dprefclk_ss_divider), 200);
++ dc_fixpt_from_fraction(clk_dce->dprefclk_ss_percentage,
++ clk_dce->dprefclk_ss_divider), 200);
+ struct fixed31_32 adj_dp_ref_clk_khz;
+
+- ss_percentage = dc_fixpt_sub(dc_fixpt_one,
+- ss_percentage);
+- adj_dp_ref_clk_khz =
+- dc_fixpt_mul_int(
+- ss_percentage,
+- dp_ref_clk_khz);
++ ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
++ adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
+ dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
+ }
+
+@@ -257,17 +199,12 @@ static int dce_clocks_get_dp_ref_freq_wrkaround(struct dccg *clk)
+
+ if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
+ struct fixed31_32 ss_percentage = dc_fixpt_div_int(
+- dc_fixpt_from_fraction(
+- clk_dce->dprefclk_ss_percentage,
++ dc_fixpt_from_fraction(clk_dce->dprefclk_ss_percentage,
+ clk_dce->dprefclk_ss_divider), 200);
+ struct fixed31_32 adj_dp_ref_clk_khz;
+
+- ss_percentage = dc_fixpt_sub(dc_fixpt_one,
+- ss_percentage);
+- adj_dp_ref_clk_khz =
+- dc_fixpt_mul_int(
+- ss_percentage,
+- dp_ref_clk_khz);
++ ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
++ adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
+ dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
+ }
+
+@@ -804,25 +741,6 @@ static void dce_dccg_construct(
+
+ dce_clock_read_integrated_info(clk_dce);
+ dce_clock_read_ss_info(clk_dce);
+-
+- dce_divider_range_construct(
+- &clk_dce->divider_ranges[DIVIDER_RANGE_01],
+- DIVIDER_RANGE_01_START,
+- DIVIDER_RANGE_01_STEP_SIZE,
+- DIVIDER_RANGE_01_BASE_DIVIDER_ID,
+- DIVIDER_RANGE_02_BASE_DIVIDER_ID);
+- dce_divider_range_construct(
+- &clk_dce->divider_ranges[DIVIDER_RANGE_02],
+- DIVIDER_RANGE_02_START,
+- DIVIDER_RANGE_02_STEP_SIZE,
+- DIVIDER_RANGE_02_BASE_DIVIDER_ID,
+- DIVIDER_RANGE_03_BASE_DIVIDER_ID);
+- dce_divider_range_construct(
+- &clk_dce->divider_ranges[DIVIDER_RANGE_03],
+- DIVIDER_RANGE_03_START,
+- DIVIDER_RANGE_03_STEP_SIZE,
+- DIVIDER_RANGE_03_BASE_DIVIDER_ID,
+- DIVIDER_RANGE_MAX_DIVIDER_ID);
+ }
+
+ struct dccg *dce_dccg_create(
+@@ -921,6 +839,9 @@ struct dccg *dce120_dccg_create(struct dc_context *ctx)
+
+ struct dccg *dcn1_dccg_create(struct dc_context *ctx)
+ {
++ struct dc_debug *debug = &ctx->dc->debug;
++ struct dc_bios *bp = ctx->dc_bios;
++ struct dc_firmware_info fw_info = { { 0 } };
+ struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+
+ if (clk_dce == NULL) {
+@@ -928,12 +849,30 @@ struct dccg *dcn1_dccg_create(struct dc_context *ctx)
+ return NULL;
+ }
+
+- /* TODO strip out useful stuff out of dce constructor */
+- dce_dccg_construct(
+- clk_dce, ctx, NULL, NULL, NULL);
+-
++ clk_dce->base.ctx = ctx;
+ clk_dce->base.funcs = &dcn1_funcs;
+
++ clk_dce->dfs_bypass_disp_clk = 0;
++
++ clk_dce->dprefclk_ss_percentage = 0;
++ clk_dce->dprefclk_ss_divider = 1000;
++ clk_dce->ss_on_dprefclk = false;
++
++ if (bp->integrated_info)
++ clk_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
++ if (clk_dce->dentist_vco_freq_khz == 0) {
++ bp->funcs->get_firmware_info(bp, &fw_info);
++ clk_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
++ if (clk_dce->dentist_vco_freq_khz == 0)
++ clk_dce->dentist_vco_freq_khz = 3600000;
++ }
++
++ if (!debug->disable_dfs_bypass && bp->integrated_info)
++ if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
++ clk_dce->dfs_bypass_enabled = true;
++
++ dce_clock_read_ss_info(clk_dce);
++
+ return &clk_dce->base;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+index 7907c3c4..04a9e3c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+@@ -57,31 +57,6 @@ struct dce_disp_clk_registers {
+ uint32_t DENTIST_DISPCLK_CNTL;
+ };
+
+-/* Array identifiers and count for the divider ranges.*/
+-enum dce_divider_range_count {
+- DIVIDER_RANGE_01 = 0,
+- DIVIDER_RANGE_02,
+- DIVIDER_RANGE_03,
+- DIVIDER_RANGE_MAX /* == 3*/
+-};
+-
+-enum dce_divider_error_types {
+- INVALID_DID = 0,
+- INVALID_DIVIDER = 1
+-};
+-
+-struct dce_divider_range {
+- int div_range_start;
+- /* The end of this range of dividers.*/
+- int div_range_end;
+- /* The distance between each divider in this range.*/
+- int div_range_step;
+- /* The divider id for the lowest divider.*/
+- int did_min;
+- /* The divider id for the highest divider.*/
+- int did_max;
+-};
+-
+ struct dce_dccg {
+ struct dccg base;
+ const struct dce_disp_clk_registers *regs;
+@@ -89,7 +64,6 @@ struct dce_dccg {
+ const struct dce_disp_clk_mask *clk_mask;
+
+ struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
+- struct dce_divider_range divider_ranges[DIVIDER_RANGE_MAX];
+
+ int dentist_vco_freq_khz;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4768-drm-amd-display-rename-dce_disp_clk-to-dccg.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4768-drm-amd-display-rename-dce_disp_clk-to-dccg.patch
new file mode 100644
index 00000000..a90c327a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4768-drm-amd-display-rename-dce_disp_clk-to-dccg.patch
@@ -0,0 +1,432 @@
+From 62d3670e317f8861c85aa87857e0f84af007120c Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Thu, 24 May 2018 16:48:38 -0400
+Subject: [PATCH 4768/5725] drm/amd/display: rename dce_disp_clk to dccg
+
+No functional change.
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Nikola Cornij <Nikola.Cornij@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 115 +++++++++------------
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h | 41 +++++---
+ drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h | 3 -
+ .../drm/amd/display/dc/dce100/dce100_resource.c | 6 +-
+ .../drm/amd/display/dc/dce110/dce110_resource.c | 6 +-
+ .../drm/amd/display/dc/dce112/dce112_resource.c | 6 +-
+ .../gpu/drm/amd/display/dc/dce80/dce80_resource.c | 6 +-
+ 7 files changed, 88 insertions(+), 95 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index 6e3bfdf..242e8ae 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -139,7 +139,34 @@ static int dentist_get_divider_from_did(int did)
+ }
+ }
+
+-static int dce_clocks_get_dp_ref_freq(struct dccg *clk)
++/* SW will adjust DP REF Clock average value for all purposes
++ * (DP DTO / DP Audio DTO and DP GTC)
++ if clock is spread for all cases:
++ -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
++ calculations for DS_INCR/DS_MODULO (this is planned to be default case)
++ -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
++ calculations (not planned to be used, but average clock should still
++ be valid)
++ -if SS enabled on DP Ref clock and HW de-spreading disabled
++ (should not be case with CIK) then SW should program all rates
++ generated according to average value (case as with previous ASICs)
++ */
++static int dccg_adjust_dp_ref_freq_for_ss(struct dce_dccg *clk_dce, int dp_ref_clk_khz)
++{
++ if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
++ struct fixed31_32 ss_percentage = dc_fixpt_div_int(
++ dc_fixpt_from_fraction(clk_dce->dprefclk_ss_percentage,
++ clk_dce->dprefclk_ss_divider), 200);
++ struct fixed31_32 adj_dp_ref_clk_khz;
++
++ ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
++ adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
++ dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
++ }
++ return dp_ref_clk_khz;
++}
++
++static int dce_get_dp_ref_freq_khz(struct dccg *clk)
+ {
+ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
+ int dprefclk_wdivider;
+@@ -162,54 +189,16 @@ static int dce_clocks_get_dp_ref_freq(struct dccg *clk)
+ dp_ref_clk_khz = (dentist_divider_range_scale_factor
+ * clk_dce->dentist_vco_freq_khz) / target_div;
+
+- /* SW will adjust DP REF Clock average value for all purposes
+- * (DP DTO / DP Audio DTO and DP GTC)
+- if clock is spread for all cases:
+- -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
+- calculations for DS_INCR/DS_MODULO (this is planned to be default case)
+- -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
+- calculations (not planned to be used, but average clock should still
+- be valid)
+- -if SS enabled on DP Ref clock and HW de-spreading disabled
+- (should not be case with CIK) then SW should program all rates
+- generated according to average value (case as with previous ASICs)
+- */
+- if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
+- struct fixed31_32 ss_percentage = dc_fixpt_div_int(
+- dc_fixpt_from_fraction(clk_dce->dprefclk_ss_percentage,
+- clk_dce->dprefclk_ss_divider), 200);
+- struct fixed31_32 adj_dp_ref_clk_khz;
+-
+- ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
+- adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
+- dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
+- }
+-
+- return dp_ref_clk_khz;
++ return dccg_adjust_dp_ref_freq_for_ss(clk_dce, dp_ref_clk_khz);
+ }
+
+-/* TODO: This is DCN DPREFCLK: it could be program by DENTIST by VBIOS
+- * or CLK0_CLK11 by SMU. For DCE120, it is wlays 600Mhz. Will re-visit
+- * clock implementation
+- */
+-static int dce_clocks_get_dp_ref_freq_wrkaround(struct dccg *clk)
++static int dce12_get_dp_ref_freq_khz(struct dccg *clk)
+ {
+ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
+- int dp_ref_clk_khz = 600000;
+-
+- if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
+- struct fixed31_32 ss_percentage = dc_fixpt_div_int(
+- dc_fixpt_from_fraction(clk_dce->dprefclk_ss_percentage,
+- clk_dce->dprefclk_ss_divider), 200);
+- struct fixed31_32 adj_dp_ref_clk_khz;
+-
+- ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
+- adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
+- dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
+- }
+
+- return dp_ref_clk_khz;
++ return dccg_adjust_dp_ref_freq_for_ss(clk_dce, 600000);
+ }
++
+ static enum dm_pp_clocks_state dce_get_required_clocks_state(
+ struct dccg *clk,
+ struct dc_clocks *req_clocks)
+@@ -590,8 +579,7 @@ static void dcn1_update_clocks(struct dccg *dccg,
+ /* make sure dcf clk is before dpp clk to
+ * make sure we have enough voltage to run dpp clk
+ */
+- if (send_request_to_increase
+- ) {
++ if (send_request_to_increase) {
+ /*use dcfclk to request voltage*/
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+@@ -644,8 +632,7 @@ static void dcn1_update_clocks(struct dccg *dccg,
+ }
+
+ #ifdef CONFIG_DRM_AMD_DC_DCN1_0
+- if (!send_request_to_increase && send_request_to_lower
+- ) {
++ if (!send_request_to_increase && send_request_to_lower) {
+ /*use dcfclk to request voltage*/
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+@@ -685,31 +672,31 @@ static void dce_update_clocks(struct dccg *dccg,
+ }
+
+ static const struct display_clock_funcs dcn1_funcs = {
+- .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq_wrkaround,
++ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .set_dispclk = dce112_set_clock,
+ .update_clocks = dcn1_update_clocks
+ };
+
+ static const struct display_clock_funcs dce120_funcs = {
+- .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq_wrkaround,
++ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .set_dispclk = dce112_set_clock,
+ .update_clocks = dce12_update_clocks
+ };
+
+ static const struct display_clock_funcs dce112_funcs = {
+- .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
++ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+ .set_dispclk = dce112_set_clock,
+ .update_clocks = dce_update_clocks
+ };
+
+ static const struct display_clock_funcs dce110_funcs = {
+- .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
++ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+ .set_dispclk = dce_psr_set_clock,
+ .update_clocks = dce_update_clocks
+ };
+
+ static const struct display_clock_funcs dce_funcs = {
+- .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
++ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+ .set_dispclk = dce_set_clock,
+ .update_clocks = dce_update_clocks
+ };
+@@ -717,9 +704,9 @@ static const struct display_clock_funcs dce_funcs = {
+ static void dce_dccg_construct(
+ struct dce_dccg *clk_dce,
+ struct dc_context *ctx,
+- const struct dce_disp_clk_registers *regs,
+- const struct dce_disp_clk_shift *clk_shift,
+- const struct dce_disp_clk_mask *clk_mask)
++ const struct dccg_registers *regs,
++ const struct dccg_shift *clk_shift,
++ const struct dccg_mask *clk_mask)
+ {
+ struct dccg *base = &clk_dce->base;
+
+@@ -745,9 +732,9 @@ static void dce_dccg_construct(
+
+ struct dccg *dce_dccg_create(
+ struct dc_context *ctx,
+- const struct dce_disp_clk_registers *regs,
+- const struct dce_disp_clk_shift *clk_shift,
+- const struct dce_disp_clk_mask *clk_mask)
++ const struct dccg_registers *regs,
++ const struct dccg_shift *clk_shift,
++ const struct dccg_mask *clk_mask)
+ {
+ struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+
+@@ -768,9 +755,9 @@ struct dccg *dce_dccg_create(
+
+ struct dccg *dce110_dccg_create(
+ struct dc_context *ctx,
+- const struct dce_disp_clk_registers *regs,
+- const struct dce_disp_clk_shift *clk_shift,
+- const struct dce_disp_clk_mask *clk_mask)
++ const struct dccg_registers *regs,
++ const struct dccg_shift *clk_shift,
++ const struct dccg_mask *clk_mask)
+ {
+ struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+
+@@ -793,9 +780,9 @@ struct dccg *dce110_dccg_create(
+
+ struct dccg *dce112_dccg_create(
+ struct dc_context *ctx,
+- const struct dce_disp_clk_registers *regs,
+- const struct dce_disp_clk_shift *clk_shift,
+- const struct dce_disp_clk_mask *clk_mask)
++ const struct dccg_registers *regs,
++ const struct dccg_shift *clk_shift,
++ const struct dccg_mask *clk_mask)
+ {
+ struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+index 04a9e3c..be5b68d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+@@ -33,6 +33,9 @@
+ .DPREFCLK_CNTL = mmDPREFCLK_CNTL, \
+ .DENTIST_DISPCLK_CNTL = mmDENTIST_DISPCLK_CNTL
+
++#define CLK_COMMON_REG_LIST_DCN_BASE() \
++ SR(DENTIST_DISPCLK_CNTL)
++
+ #define CLK_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+@@ -40,28 +43,34 @@
+ CLK_SF(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, mask_sh), \
+ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, mask_sh)
+
++#define CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh) \
++ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, mask_sh),\
++ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh)
++
+ #define CLK_REG_FIELD_LIST(type) \
+ type DPREFCLK_SRC_SEL; \
+- type DENTIST_DPREFCLK_WDIVIDER;
++ type DENTIST_DPREFCLK_WDIVIDER; \
++ type DENTIST_DISPCLK_WDIVIDER; \
++ type DENTIST_DPPCLK_WDIVIDER;
+
+-struct dce_disp_clk_shift {
++struct dccg_shift {
+ CLK_REG_FIELD_LIST(uint8_t)
+ };
+
+-struct dce_disp_clk_mask {
++struct dccg_mask {
+ CLK_REG_FIELD_LIST(uint32_t)
+ };
+
+-struct dce_disp_clk_registers {
++struct dccg_registers {
+ uint32_t DPREFCLK_CNTL;
+ uint32_t DENTIST_DISPCLK_CNTL;
+ };
+
+ struct dce_dccg {
+ struct dccg base;
+- const struct dce_disp_clk_registers *regs;
+- const struct dce_disp_clk_shift *clk_shift;
+- const struct dce_disp_clk_mask *clk_mask;
++ const struct dccg_registers *regs;
++ const struct dccg_shift *clk_shift;
++ const struct dccg_mask *clk_mask;
+
+ struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
+
+@@ -84,21 +93,21 @@ struct dce_dccg {
+
+ struct dccg *dce_dccg_create(
+ struct dc_context *ctx,
+- const struct dce_disp_clk_registers *regs,
+- const struct dce_disp_clk_shift *clk_shift,
+- const struct dce_disp_clk_mask *clk_mask);
++ const struct dccg_registers *regs,
++ const struct dccg_shift *clk_shift,
++ const struct dccg_mask *clk_mask);
+
+ struct dccg *dce110_dccg_create(
+ struct dc_context *ctx,
+- const struct dce_disp_clk_registers *regs,
+- const struct dce_disp_clk_shift *clk_shift,
+- const struct dce_disp_clk_mask *clk_mask);
++ const struct dccg_registers *regs,
++ const struct dccg_shift *clk_shift,
++ const struct dccg_mask *clk_mask);
+
+ struct dccg *dce112_dccg_create(
+ struct dc_context *ctx,
+- const struct dce_disp_clk_registers *regs,
+- const struct dce_disp_clk_shift *clk_shift,
+- const struct dce_disp_clk_mask *clk_mask);
++ const struct dccg_registers *regs,
++ const struct dccg_shift *clk_shift,
++ const struct dccg_mask *clk_mask);
+
+ struct dccg *dce120_dccg_create(struct dc_context *ctx);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+index 057b8af..0574078 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+@@ -249,7 +249,6 @@ struct dce_hwseq_registers {
+ uint32_t DISPCLK_FREQ_CHANGE_CNTL;
+ uint32_t RBBMIF_TIMEOUT_DIS;
+ uint32_t RBBMIF_TIMEOUT_DIS_2;
+- uint32_t DENTIST_DISPCLK_CNTL;
+ uint32_t DCHUBBUB_CRC_CTRL;
+ uint32_t DPP_TOP0_DPP_CRC_CTRL;
+ uint32_t DPP_TOP0_DPP_CRC_VAL_R_G;
+@@ -496,8 +495,6 @@ struct dce_hwseq_registers {
+ type DOMAIN7_PGFSM_PWR_STATUS; \
+ type DCFCLK_GATE_DIS; \
+ type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
+- type DENTIST_DPPCLK_WDIVIDER; \
+- type DENTIST_DISPCLK_WDIVIDER; \
+ type VGA_TEST_ENABLE; \
+ type VGA_TEST_RENDER_START; \
+ type D1VGA_MODE_ENABLE; \
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+index a90c9a6..ad8ad4e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+@@ -135,15 +135,15 @@ static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = {
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+
+-static const struct dce_disp_clk_registers disp_clk_regs = {
++static const struct dccg_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+ };
+
+-static const struct dce_disp_clk_shift disp_clk_shift = {
++static const struct dccg_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+ };
+
+-static const struct dce_disp_clk_mask disp_clk_mask = {
++static const struct dccg_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+ };
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+index 71a401f..3edaa00 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+@@ -146,15 +146,15 @@ static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = {
+ #define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+-static const struct dce_disp_clk_registers disp_clk_regs = {
++static const struct dccg_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+ };
+
+-static const struct dce_disp_clk_shift disp_clk_shift = {
++static const struct dccg_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+ };
+
+-static const struct dce_disp_clk_mask disp_clk_mask = {
++static const struct dccg_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+ };
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+index ae5b19d..7529100 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+@@ -146,15 +146,15 @@ static const struct dce110_timing_generator_offsets dce112_tg_offsets[] = {
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+
+-static const struct dce_disp_clk_registers disp_clk_regs = {
++static const struct dccg_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+ };
+
+-static const struct dce_disp_clk_shift disp_clk_shift = {
++static const struct dccg_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+ };
+
+-static const struct dce_disp_clk_mask disp_clk_mask = {
++static const struct dccg_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+ };
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+index 7070053..2ac95ec 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+@@ -153,15 +153,15 @@ static const struct dce110_timing_generator_offsets dce80_tg_offsets[] = {
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+
+-static const struct dce_disp_clk_registers disp_clk_regs = {
++static const struct dccg_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+ };
+
+-static const struct dce_disp_clk_shift disp_clk_shift = {
++static const struct dccg_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+ };
+
+-static const struct dce_disp_clk_mask disp_clk_mask = {
++static const struct dccg_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4769-drm-amd-display-clean-up-set_bandwidth-usage.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4769-drm-amd-display-clean-up-set_bandwidth-usage.patch
new file mode 100644
index 00000000..0e72f1fb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4769-drm-amd-display-clean-up-set_bandwidth-usage.patch
@@ -0,0 +1,100 @@
+From 81ef29b9bbc5d476a9025c0a0ac75939df530620 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Wed, 23 May 2018 18:39:21 -0400
+Subject: [PATCH 4769/5725] drm/amd/display: clean up set_bandwidth usage
+
+This removes redundant set_bandwidth calls as well
+as fixes a bug in post_set_address_update where dcn1
+would never get to lower clocks.
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Nikola Cornij <Nikola.Cornij@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 5 -----
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 5 -----
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 11 +++--------
+ 3 files changed, 3 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 16ebdc1..4e9bd55 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -968,12 +968,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
+
+ dc->optimized_required = false;
+
+- /* 3rd param should be true, temp w/a for RV*/
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+- dc->hwss.set_bandwidth(dc, context, dc->ctx->dce_version < DCN_VERSION_1_0);
+-#else
+ dc->hwss.set_bandwidth(dc, context, true);
+-#endif
+ return true;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 07633a1..6c2b4cc 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -2017,8 +2017,6 @@ enum dc_status dce110_apply_ctx_to_hw(
+ if (dc->fbc_compressor)
+ dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
+
+- dc->hwss.set_bandwidth(dc, context, false);
+-
+ dce110_setup_audio_dto(dc, context);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+@@ -2047,9 +2045,6 @@ enum dc_status dce110_apply_ctx_to_hw(
+ return status;
+ }
+
+- /* to save power */
+- dc->hwss.set_bandwidth(dc, context, true);
+-
+ dcb->funcs->set_scratch_critical_state(dcb, false);
+
+ if (dc->fbc_compressor)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 5d47b28..ef3969f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2283,8 +2283,7 @@ static void dcn10_apply_ctx_for_surface(
+ hwss1_plane_atomic_disconnect(dc, old_pipe_ctx);
+ removed_pipe[i] = true;
+
+- DC_LOG_DC(
+- "Reset mpcc for pipe %d\n",
++ DC_LOG_DC("Reset mpcc for pipe %d\n",
+ old_pipe_ctx->pipe_idx);
+ }
+ }
+@@ -2380,9 +2379,8 @@ static void dcn10_set_bandwidth(
+ struct dc_state *context,
+ bool decrease_allowed)
+ {
+- if (dc->debug.sanity_checks) {
++ if (dc->debug.sanity_checks)
+ dcn10_verify_allow_pstate_change_high(dc);
+- }
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ return;
+@@ -2397,11 +2395,8 @@ static void dcn10_set_bandwidth(
+
+ dcn10_pplib_apply_display_requirements(dc, context);
+
+- if (dc->debug.sanity_checks) {
++ if (dc->debug.sanity_checks)
+ dcn10_verify_allow_pstate_change_high(dc);
+- }
+-
+- /* need to fix this function. not doing the right thing here */
+ }
+
+ static void set_drr(struct pipe_ctx **pipe_ctx,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4770-drm-amd-display-remove-unnecessary-pplib-volage-requ.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4770-drm-amd-display-remove-unnecessary-pplib-volage-requ.patch
new file mode 100644
index 00000000..0787ec5d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4770-drm-amd-display-remove-unnecessary-pplib-volage-requ.patch
@@ -0,0 +1,52 @@
+From 5046ee596526213198a03fa80e1807bd50c72f95 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Thu, 31 May 2018 13:28:00 -0400
+Subject: [PATCH 4770/5725] drm/amd/display: remove unnecessary pplib volage
+ requests that are asserting
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 8 --------
+ 1 file changed, 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index 242e8ae..df6a37b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -588,21 +588,15 @@ static void dcn1_update_clocks(struct dccg *dccg,
+ #endif
+
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
+- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
+- clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
+ dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks);
+ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+
+- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ send_request_to_lower = true;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
+- dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
+- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz;
+
+- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ send_request_to_lower = true;
+ }
+
+@@ -618,8 +612,6 @@ static void dcn1_update_clocks(struct dccg *dccg,
+
+ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, dccg->clks.dcfclk_khz)) {
+ dccg->clks.phyclk_khz = new_clocks->dcfclk_khz;
+- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+- clock_voltage_req.clocks_in_khz = new_clocks->dcfclk_khz;
+ smu_req.hard_min_dcefclk_khz = new_clocks->dcfclk_khz;
+
+ send_request_to_lower = true;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4771-drm-amd-display-Temporarily-remove-Chroma-logs.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4771-drm-amd-display-Temporarily-remove-Chroma-logs.patch
new file mode 100644
index 00000000..d7e844e7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4771-drm-amd-display-Temporarily-remove-Chroma-logs.patch
@@ -0,0 +1,48 @@
+From fa4a50932008b4ca2ef7ba16b0557c797a7f826d Mon Sep 17 00:00:00 2001
+From: Wesley Chalmers <Wesley.Chalmers@amd.com>
+Date: Fri, 1 Jun 2018 10:54:53 -0400
+Subject: [PATCH 4771/5725] drm/amd/display: Temporarily remove Chroma logs
+
+To ensure tests continue to pass
+
+Signed-off-by: Wesley Chalmers <Wesley.Chalmers@amd.com>
+Reviewed-by: Shahin Khayyer <Shahin.Khayyer@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index ef3969f..db72b4d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -151,23 +151,19 @@ static void dcn10_log_hubp_states(struct dc *dc)
+
+ DTN_INFO("\n=========RQ========\n");
+ DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
+- " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
+ " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
+ struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
+
+ if (!s->blank_en)
+- DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
++ DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
+ pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
+ rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
+ rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
+ rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
+ rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
+- rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
+- rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
+- rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
+- rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
++ rq_regs->rq_regs_l.pte_row_height_linear);
+ }
+
+ DTN_INFO("========DLG========\n");
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4772-drm-amd-display-Define-dp_alt_mode.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4772-drm-amd-display-Define-dp_alt_mode.patch
new file mode 100644
index 00000000..5507d020
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4772-drm-amd-display-Define-dp_alt_mode.patch
@@ -0,0 +1,112 @@
+From 598d85c7aad1e1be105f2dbb1325aa98e1cb2007 Mon Sep 17 00:00:00 2001
+From: Charlene Liu <charlene.liu@amd.com>
+Date: Wed, 30 May 2018 15:58:08 -0400
+Subject: [PATCH 4772/5725] drm/amd/display: Define dp_alt_mode
+
+Also cleanup command_table2.c. No need for a lot of forward
+declarations.
+
+Signed-off-by: Charlene Liu <charlene.liu@amd.com>
+Reviewed-by: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../gpu/drm/amd/display/dc/bios/command_table2.c | 46 ++++++++--------------
+ .../drm/amd/display/dc/dcn10/dcn10_link_encoder.c | 2 +
+ .../gpu/drm/amd/display/include/grph_object_id.h | 5 +++
+ 3 files changed, 24 insertions(+), 29 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+index 752b08a..2b5dc49 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+@@ -59,36 +59,7 @@
+ bios_cmd_table_para_revision(bp->base.ctx->driver_context, \
+ GET_INDEX_INTO_MASTER_TABLE(command, fname))
+
+-static void init_dig_encoder_control(struct bios_parser *bp);
+-static void init_transmitter_control(struct bios_parser *bp);
+-static void init_set_pixel_clock(struct bios_parser *bp);
+
+-static void init_set_crtc_timing(struct bios_parser *bp);
+-
+-static void init_select_crtc_source(struct bios_parser *bp);
+-static void init_enable_crtc(struct bios_parser *bp);
+-
+-static void init_external_encoder_control(struct bios_parser *bp);
+-static void init_enable_disp_power_gating(struct bios_parser *bp);
+-static void init_set_dce_clock(struct bios_parser *bp);
+-static void init_get_smu_clock_info(struct bios_parser *bp);
+-
+-void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
+-{
+- init_dig_encoder_control(bp);
+- init_transmitter_control(bp);
+- init_set_pixel_clock(bp);
+-
+- init_set_crtc_timing(bp);
+-
+- init_select_crtc_source(bp);
+- init_enable_crtc(bp);
+-
+- init_external_encoder_control(bp);
+- init_enable_disp_power_gating(bp);
+- init_set_dce_clock(bp);
+- init_get_smu_clock_info(bp);
+-}
+
+ static uint32_t bios_cmd_table_para_revision(void *dev,
+ uint32_t index)
+@@ -829,3 +800,20 @@ static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id)
+ return 0;
+ }
+
++void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
++{
++ init_dig_encoder_control(bp);
++ init_transmitter_control(bp);
++ init_set_pixel_clock(bp);
++
++ init_set_crtc_timing(bp);
++
++ init_select_crtc_source(bp);
++ init_enable_crtc(bp);
++
++ init_external_encoder_control(bp);
++ init_enable_disp_power_gating(bp);
++ init_set_dce_clock(bp);
++ init_get_smu_clock_info(bp);
++
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+index 21fa40a..fd9dc70 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+@@ -995,6 +995,8 @@ void dcn10_link_encoder_disable_output(
+
+ if (!dcn10_is_dig_enabled(enc)) {
+ /* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
++ /*in DP_Alt_No_Connect case, we turn off the dig already,
++ after excuation the PHY w/a sequence, not allow touch PHY any more*/
+ return;
+ }
+ /* Power-down RX and disable GPU PHY should be paired.
+diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h
+index c419743..92cc6c1 100644
+--- a/drivers/gpu/drm/amd/display/include/grph_object_id.h
++++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h
+@@ -197,6 +197,11 @@ enum transmitter_color_depth {
+ TRANSMITTER_COLOR_DEPTH_48 /* 16 bits */
+ };
+
++enum dp_alt_mode {
++ DP_Alt_mode__Unknown = 0,
++ DP_Alt_mode__Connect,
++ DP_Alt_mode__NoConnect,
++};
+ /*
+ *****************************************************************************
+ * graphics_object_id struct
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4773-drm-amd-display-fix-dccg-dcn1-ifdef.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4773-drm-amd-display-fix-dccg-dcn1-ifdef.patch
new file mode 100644
index 00000000..4ef8cdf4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4773-drm-amd-display-fix-dccg-dcn1-ifdef.patch
@@ -0,0 +1,112 @@
+From 15ec7ed92dae7822be86da5ed4423d5c63d40a16 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Fri, 1 Jun 2018 14:13:40 -0400
+Subject: [PATCH 4773/5725] drm/amd/display: fix dccg dcn1 ifdef
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Eric Yang <eric.yang2@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 10 ++++++----
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h | 2 ++
+ 2 files changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index df6a37b..e62a21f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -478,6 +478,7 @@ static void dce12_update_clocks(struct dccg *dccg,
+ }
+ }
+
++#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ static int dcn1_determine_dppclk_threshold(struct dccg *dccg, struct dc_clocks *new_clocks)
+ {
+ bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+@@ -575,7 +576,6 @@ static void dcn1_update_clocks(struct dccg *dccg,
+ || new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz)
+ send_request_to_increase = true;
+
+-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ /* make sure dcf clk is before dpp clk to
+ * make sure we have enough voltage to run dpp clk
+ */
+@@ -585,7 +585,6 @@ static void dcn1_update_clocks(struct dccg *dccg,
+ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ }
+-#endif
+
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
+ dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks);
+@@ -623,14 +622,12 @@ static void dcn1_update_clocks(struct dccg *dccg,
+ smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz;
+ }
+
+-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ if (!send_request_to_increase && send_request_to_lower) {
+ /*use dcfclk to request voltage*/
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ }
+-#endif
+
+ if (new_clocks->phyclk_khz)
+ smu_req.display_count = 1;
+@@ -642,6 +639,7 @@ static void dcn1_update_clocks(struct dccg *dccg,
+
+ *smu_req_cur = smu_req;
+ }
++#endif
+
+ static void dce_update_clocks(struct dccg *dccg,
+ struct dc_clocks *new_clocks,
+@@ -663,11 +661,13 @@ static void dce_update_clocks(struct dccg *dccg,
+ }
+ }
+
++#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ static const struct display_clock_funcs dcn1_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .set_dispclk = dce112_set_clock,
+ .update_clocks = dcn1_update_clocks
+ };
++#endif
+
+ static const struct display_clock_funcs dce120_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+@@ -816,6 +816,7 @@ struct dccg *dce120_dccg_create(struct dc_context *ctx)
+ return &clk_dce->base;
+ }
+
++#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ struct dccg *dcn1_dccg_create(struct dc_context *ctx)
+ {
+ struct dc_debug *debug = &ctx->dc->debug;
+@@ -854,6 +855,7 @@ struct dccg *dcn1_dccg_create(struct dc_context *ctx)
+
+ return &clk_dce->base;
+ }
++#endif
+
+ void dce_dccg_destroy(struct dccg **dccg)
+ {
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+index be5b68d..1f1899e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+@@ -111,7 +111,9 @@ struct dccg *dce112_dccg_create(
+
+ struct dccg *dce120_dccg_create(struct dc_context *ctx);
+
++#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ struct dccg *dcn1_dccg_create(struct dc_context *ctx);
++#endif
+
+ void dce_dccg_destroy(struct dccg **dccg);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4774-drm-amd-display-fix-pplib-voltage-request.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4774-drm-amd-display-fix-pplib-voltage-request.patch
new file mode 100644
index 00000000..bf9222f3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4774-drm-amd-display-fix-pplib-voltage-request.patch
@@ -0,0 +1,124 @@
+From eb8578390ab985f51250d8f29dc3e963c0e065f1 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Fri, 1 Jun 2018 15:01:32 -0400
+Subject: [PATCH 4774/5725] drm/amd/display: fix pplib voltage request
+
+This fixes incorrect clock caching and by extension fixes
+the clock reporting.
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Eric Yang <eric.yang2@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 59 ++++++++++++++-----------
+ 1 file changed, 32 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index e62a21f..0a4ae0f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -570,37 +570,25 @@ static void dcn1_update_clocks(struct dccg *dccg,
+ bool send_request_to_increase = false;
+ bool send_request_to_lower = false;
+
++ if (new_clocks->phyclk_khz)
++ smu_req.display_count = 1;
++ else
++ smu_req.display_count = 0;
++
+ if (new_clocks->dispclk_khz > dccg->clks.dispclk_khz
+ || new_clocks->phyclk_khz > dccg->clks.phyclk_khz
+ || new_clocks->fclk_khz > dccg->clks.fclk_khz
+ || new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz)
+ send_request_to_increase = true;
+
+- /* make sure dcf clk is before dpp clk to
+- * make sure we have enough voltage to run dpp clk
+- */
+- if (send_request_to_increase) {
+- /*use dcfclk to request voltage*/
+- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+- clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+- }
+-
+- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
+- dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks);
+- dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+-
+- send_request_to_lower = true;
+- }
+-
+ if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
+- clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz;
++ dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
+
+ send_request_to_lower = true;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, dccg->clks.fclk_khz)) {
+- dccg->clks.phyclk_khz = new_clocks->fclk_khz;
++ dccg->clks.fclk_khz = new_clocks->fclk_khz;
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
+ smu_req.hard_min_fclk_khz = new_clocks->fclk_khz;
+@@ -610,7 +598,7 @@ static void dcn1_update_clocks(struct dccg *dccg,
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, dccg->clks.dcfclk_khz)) {
+- dccg->clks.phyclk_khz = new_clocks->dcfclk_khz;
++ dccg->clks.dcfclk_khz = new_clocks->dcfclk_khz;
+ smu_req.hard_min_dcefclk_khz = new_clocks->dcfclk_khz;
+
+ send_request_to_lower = true;
+@@ -620,22 +608,39 @@ static void dcn1_update_clocks(struct dccg *dccg,
+ new_clocks->dcfclk_deep_sleep_khz, dccg->clks.dcfclk_deep_sleep_khz)) {
+ dccg->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
+ smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz;
++
++ send_request_to_lower = true;
+ }
+
+- if (!send_request_to_increase && send_request_to_lower) {
++ /* make sure dcf clk is before dpp clk to
++ * make sure we have enough voltage to run dpp clk
++ */
++ if (send_request_to_increase) {
+ /*use dcfclk to request voltage*/
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
++ if (pp_smu->set_display_requirement)
++ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+ }
+
+- if (new_clocks->phyclk_khz)
+- smu_req.display_count = 1;
+- else
+- smu_req.display_count = 0;
++ /* dcn1 dppclk is tied to dispclk */
++ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
++ dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks);
++ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
++
++ send_request_to_lower = true;
++ }
++
++ if (!send_request_to_increase && send_request_to_lower) {
++ /*use dcfclk to request voltage*/
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
++ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
++ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
++ if (pp_smu->set_display_requirement)
++ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
++ }
+
+- if (pp_smu->set_display_requirement)
+- pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+
+ *smu_req_cur = smu_req;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4775-drm-amd-display-add-CHG_DONE-mash-sh-defines-for-den.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4775-drm-amd-display-add-CHG_DONE-mash-sh-defines-for-den.patch
new file mode 100644
index 00000000..e9f93a0b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4775-drm-amd-display-add-CHG_DONE-mash-sh-defines-for-den.patch
@@ -0,0 +1,41 @@
+From 9784fbbe2499f942ed63d89c03c02ecdb74cfadf Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Wed, 30 May 2018 17:12:16 -0400
+Subject: [PATCH 4775/5725] drm/amd/display: add CHG_DONE mash/sh defines for
+ dentist
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+index 1f1899e..7ce0a54 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+@@ -45,13 +45,17 @@
+
+ #define CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh) \
+ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, mask_sh),\
+- CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh)
++ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh),\
++ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh),\
++ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, mask_sh)
+
+ #define CLK_REG_FIELD_LIST(type) \
+ type DPREFCLK_SRC_SEL; \
+ type DENTIST_DPREFCLK_WDIVIDER; \
+ type DENTIST_DISPCLK_WDIVIDER; \
+- type DENTIST_DPPCLK_WDIVIDER;
++ type DENTIST_DPPCLK_WDIVIDER; \
++ type DENTIST_DISPCLK_CHG_DONE; \
++ type DENTIST_DPPCLK_CHG_DONE;
+
+ struct dccg_shift {
+ CLK_REG_FIELD_LIST(uint8_t)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4776-drm-amd-display-change-dentist-DID-enum-values-to-up.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4776-drm-amd-display-change-dentist-DID-enum-values-to-up.patch
new file mode 100644
index 00000000..0f0f0948
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4776-drm-amd-display-change-dentist-DID-enum-values-to-up.patch
@@ -0,0 +1,94 @@
+From 13b90c08f783ed42c761174a375908d6f2c715e5 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Wed, 30 May 2018 17:17:27 -0400
+Subject: [PATCH 4776/5725] drm/amd/display: change dentist DID enum values to
+ uppercase
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 50 ++++++++++++-------------
+ 1 file changed, 25 insertions(+), 25 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index 0a4ae0f..6882dc9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -103,39 +103,39 @@ static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
+
+ /* Starting DID for each range */
+ enum dentist_base_divider_id {
+- dentist_base_divider_id_1 = 0x08,
+- dentist_base_divider_id_2 = 0x40,
+- dentist_base_divider_id_3 = 0x60,
+- dentist_max_divider_id = 0x80
++ DENTIST_BASE_DID_1 = 0x08,
++ DENTIST_BASE_DID_2 = 0x40,
++ DENTIST_BASE_DID_3 = 0x60,
++ DENTIST_MAX_DID = 0x80
+ };
+
+ /* Starting point and step size for each divider range.*/
+ enum dentist_divider_range {
+- dentist_divider_range_1_start = 8, /* 2.00 */
+- dentist_divider_range_1_step = 1, /* 0.25 */
+- dentist_divider_range_2_start = 64, /* 16.00 */
+- dentist_divider_range_2_step = 2, /* 0.50 */
+- dentist_divider_range_3_start = 128, /* 32.00 */
+- dentist_divider_range_3_step = 4, /* 1.00 */
+- dentist_divider_range_scale_factor = 4
++ DENTIST_DIVIDER_RANGE_1_START = 8, /* 2.00 */
++ DENTIST_DIVIDER_RANGE_1_STEP = 1, /* 0.25 */
++ DENTIST_DIVIDER_RANGE_2_START = 64, /* 16.00 */
++ DENTIST_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */
++ DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
++ DENTIST_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */
++ DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
+ };
+
+ static int dentist_get_divider_from_did(int did)
+ {
+- if (did < dentist_base_divider_id_1)
+- did = dentist_base_divider_id_1;
+- if (did > dentist_max_divider_id)
+- did = dentist_max_divider_id;
+-
+- if (did < dentist_base_divider_id_2) {
+- return dentist_divider_range_1_start + dentist_divider_range_1_step
+- * (did - dentist_base_divider_id_1);
+- } else if (did < dentist_base_divider_id_3) {
+- return dentist_divider_range_2_start + dentist_divider_range_2_step
+- * (did - dentist_base_divider_id_2);
++ if (did < DENTIST_BASE_DID_1)
++ did = DENTIST_BASE_DID_1;
++ if (did > DENTIST_MAX_DID)
++ did = DENTIST_MAX_DID;
++
++ if (did < DENTIST_BASE_DID_2) {
++ return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
++ * (did - DENTIST_BASE_DID_1);
++ } else if (did < DENTIST_BASE_DID_3) {
++ return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
++ * (did - DENTIST_BASE_DID_2);
+ } else {
+- return dentist_divider_range_3_start + dentist_divider_range_3_step
+- * (did - dentist_base_divider_id_3);
++ return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
++ * (did - DENTIST_BASE_DID_3);
+ }
+ }
+
+@@ -186,7 +186,7 @@ static int dce_get_dp_ref_freq_khz(struct dccg *clk)
+ target_div = dentist_get_divider_from_did(dprefclk_wdivider);
+
+ /* Calculate the current DFS clock, in kHz.*/
+- dp_ref_clk_khz = (dentist_divider_range_scale_factor
++ dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_dce->dentist_vco_freq_khz) / target_div;
+
+ return dccg_adjust_dp_ref_freq_for_ss(clk_dce, dp_ref_clk_khz);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4777-drm-amd-display-add-safe_to_lower-support-to-dcn-wm-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4777-drm-amd-display-add-safe_to_lower-support-to-dcn-wm-.patch
new file mode 100644
index 00000000..c0f1becc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4777-drm-amd-display-add-safe_to_lower-support-to-dcn-wm-.patch
@@ -0,0 +1,463 @@
+From 9c64a4e35d3a3709b40cc2686859421ef8a42282 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Thu, 24 May 2018 15:09:40 -0400
+Subject: [PATCH 4777/5725] drm/amd/display: add safe_to_lower support to dcn
+ wm programming
+
+This will prevent watermarks from lowering when unsafe to do so.
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c | 346 +++++++++++++--------
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h | 4 +-
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 2 +-
+ 3 files changed, 214 insertions(+), 138 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+index 943143e..63b75ac 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+@@ -193,7 +193,8 @@ static uint32_t convert_and_clamp(
+ void hubbub1_program_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+- unsigned int refclk_mhz)
++ unsigned int refclk_mhz,
++ bool safe_to_lower)
+ {
+ uint32_t force_en = hubbub->ctx->dc->debug.disable_stutter ? 1 : 0;
+ /*
+@@ -207,184 +208,257 @@ void hubbub1_program_watermarks(
+
+ /* Repeat for water mark set A, B, C and D. */
+ /* clock state A */
+- prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
+- refclk_mhz, 0x1fffff);
+- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
+-
+- DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
+- "HW register value = 0x%x\n",
+- watermarks->a.urgent_ns, prog_wm_value);
++ if (safe_to_lower || watermarks->a.urgent_ns > hubbub->watermarks.a.urgent_ns) {
++ hubbub->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
++ prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
++ refclk_mhz, 0x1fffff);
++ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
+
+- prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
+- refclk_mhz, 0x1fffff);
+- REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
+- DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
+- "HW register value = 0x%x\n",
+- watermarks->a.pte_meta_urgent_ns, prog_wm_value);
++ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
++ "HW register value = 0x%x\n",
++ watermarks->a.urgent_ns, prog_wm_value);
++ }
+
+- if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
+- prog_wm_value = convert_and_clamp(
+- watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
++ if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub->watermarks.a.pte_meta_urgent_ns) {
++ hubbub->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns;
++ prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
+ refclk_mhz, 0x1fffff);
+- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
+- DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
++ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
++ DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+- watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
++ watermarks->a.pte_meta_urgent_ns, prog_wm_value);
++ }
+
++ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
++ if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
++ > hubbub->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
++ hubbub->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
++ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
++ prog_wm_value = convert_and_clamp(
++ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
++ refclk_mhz, 0x1fffff);
++ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
++ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
++ "HW register value = 0x%x\n",
++ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
++ }
++
++ if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
++ > hubbub->watermarks.a.cstate_pstate.cstate_exit_ns) {
++ hubbub->watermarks.a.cstate_pstate.cstate_exit_ns =
++ watermarks->a.cstate_pstate.cstate_exit_ns;
++ prog_wm_value = convert_and_clamp(
++ watermarks->a.cstate_pstate.cstate_exit_ns,
++ refclk_mhz, 0x1fffff);
++ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
++ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
++ "HW register value = 0x%x\n",
++ watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
++ }
++ }
+
++ if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
++ > hubbub->watermarks.a.cstate_pstate.pstate_change_ns) {
++ hubbub->watermarks.a.cstate_pstate.pstate_change_ns =
++ watermarks->a.cstate_pstate.pstate_change_ns;
+ prog_wm_value = convert_and_clamp(
+- watermarks->a.cstate_pstate.cstate_exit_ns,
++ watermarks->a.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
+- DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
+- "HW register value = 0x%x\n",
+- watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
++ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
++ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
++ "HW register value = 0x%x\n\n",
++ watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
+ }
+
+- prog_wm_value = convert_and_clamp(
+- watermarks->a.cstate_pstate.pstate_change_ns,
+- refclk_mhz, 0x1fffff);
+- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
+- DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
+- "HW register value = 0x%x\n\n",
+- watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
+-
+-
+ /* clock state B */
+- prog_wm_value = convert_and_clamp(
+- watermarks->b.urgent_ns, refclk_mhz, 0x1fffff);
+- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
+- DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
+- "HW register value = 0x%x\n",
+- watermarks->b.urgent_ns, prog_wm_value);
+-
+-
+- prog_wm_value = convert_and_clamp(
+- watermarks->b.pte_meta_urgent_ns,
+- refclk_mhz, 0x1fffff);
+- REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
+- DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
+- "HW register value = 0x%x\n",
+- watermarks->b.pte_meta_urgent_ns, prog_wm_value);
++ if (safe_to_lower || watermarks->b.urgent_ns > hubbub->watermarks.b.urgent_ns) {
++ hubbub->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
++ prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
++ refclk_mhz, 0x1fffff);
++ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
+
++ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
++ "HW register value = 0x%x\n",
++ watermarks->b.urgent_ns, prog_wm_value);
++ }
+
+- if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
+- prog_wm_value = convert_and_clamp(
+- watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
++ if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub->watermarks.b.pte_meta_urgent_ns) {
++ hubbub->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns;
++ prog_wm_value = convert_and_clamp(watermarks->b.pte_meta_urgent_ns,
+ refclk_mhz, 0x1fffff);
+- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
+- DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_B calculated =%d\n"
++ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
++ DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+- watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
++ watermarks->b.pte_meta_urgent_ns, prog_wm_value);
++ }
+
++ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
++ if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
++ > hubbub->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
++ hubbub->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
++ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
++ prog_wm_value = convert_and_clamp(
++ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
++ refclk_mhz, 0x1fffff);
++ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
++ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
++ "HW register value = 0x%x\n",
++ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
++ }
+
++ if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
++ > hubbub->watermarks.b.cstate_pstate.cstate_exit_ns) {
++ hubbub->watermarks.b.cstate_pstate.cstate_exit_ns =
++ watermarks->b.cstate_pstate.cstate_exit_ns;
++ prog_wm_value = convert_and_clamp(
++ watermarks->b.cstate_pstate.cstate_exit_ns,
++ refclk_mhz, 0x1fffff);
++ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
++ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
++ "HW register value = 0x%x\n",
++ watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
++ }
++ }
++
++ if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
++ > hubbub->watermarks.b.cstate_pstate.pstate_change_ns) {
++ hubbub->watermarks.b.cstate_pstate.pstate_change_ns =
++ watermarks->b.cstate_pstate.pstate_change_ns;
+ prog_wm_value = convert_and_clamp(
+- watermarks->b.cstate_pstate.cstate_exit_ns,
++ watermarks->b.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
+- DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
+- "HW register value = 0x%x\n",
+- watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
++ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
++ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
++ "HW register value = 0x%x\n\n",
++ watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
+ }
+
+- prog_wm_value = convert_and_clamp(
+- watermarks->b.cstate_pstate.pstate_change_ns,
+- refclk_mhz, 0x1fffff);
+- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
+- DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n\n"
+- "HW register value = 0x%x\n",
+- watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
+-
+ /* clock state C */
+- prog_wm_value = convert_and_clamp(
+- watermarks->c.urgent_ns, refclk_mhz, 0x1fffff);
+- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
+- DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
+- "HW register value = 0x%x\n",
+- watermarks->c.urgent_ns, prog_wm_value);
+-
+-
+- prog_wm_value = convert_and_clamp(
+- watermarks->c.pte_meta_urgent_ns,
+- refclk_mhz, 0x1fffff);
+- REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
+- DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
+- "HW register value = 0x%x\n",
+- watermarks->c.pte_meta_urgent_ns, prog_wm_value);
++ if (safe_to_lower || watermarks->c.urgent_ns > hubbub->watermarks.c.urgent_ns) {
++ hubbub->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
++ prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
++ refclk_mhz, 0x1fffff);
++ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
+
++ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
++ "HW register value = 0x%x\n",
++ watermarks->c.urgent_ns, prog_wm_value);
++ }
+
+- if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
+- prog_wm_value = convert_and_clamp(
+- watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
++ if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub->watermarks.c.pte_meta_urgent_ns) {
++ hubbub->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns;
++ prog_wm_value = convert_and_clamp(watermarks->c.pte_meta_urgent_ns,
+ refclk_mhz, 0x1fffff);
+- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
+- DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_C calculated =%d\n"
++ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
++ DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+- watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
++ watermarks->c.pte_meta_urgent_ns, prog_wm_value);
++ }
+
++ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
++ if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
++ > hubbub->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
++ hubbub->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
++ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
++ prog_wm_value = convert_and_clamp(
++ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
++ refclk_mhz, 0x1fffff);
++ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
++ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
++ "HW register value = 0x%x\n",
++ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
++ }
+
++ if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
++ > hubbub->watermarks.c.cstate_pstate.cstate_exit_ns) {
++ hubbub->watermarks.c.cstate_pstate.cstate_exit_ns =
++ watermarks->c.cstate_pstate.cstate_exit_ns;
++ prog_wm_value = convert_and_clamp(
++ watermarks->c.cstate_pstate.cstate_exit_ns,
++ refclk_mhz, 0x1fffff);
++ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
++ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
++ "HW register value = 0x%x\n",
++ watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
++ }
++ }
++
++ if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
++ > hubbub->watermarks.c.cstate_pstate.pstate_change_ns) {
++ hubbub->watermarks.c.cstate_pstate.pstate_change_ns =
++ watermarks->c.cstate_pstate.pstate_change_ns;
+ prog_wm_value = convert_and_clamp(
+- watermarks->c.cstate_pstate.cstate_exit_ns,
++ watermarks->c.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
+- DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
+- "HW register value = 0x%x\n",
+- watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
++ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
++ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
++ "HW register value = 0x%x\n\n",
++ watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
+ }
+
+- prog_wm_value = convert_and_clamp(
+- watermarks->c.cstate_pstate.pstate_change_ns,
+- refclk_mhz, 0x1fffff);
+- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
+- DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n\n"
+- "HW register value = 0x%x\n",
+- watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
+-
+ /* clock state D */
+- prog_wm_value = convert_and_clamp(
+- watermarks->d.urgent_ns, refclk_mhz, 0x1fffff);
+- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
+- DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
+- "HW register value = 0x%x\n",
+- watermarks->d.urgent_ns, prog_wm_value);
+-
+- prog_wm_value = convert_and_clamp(
+- watermarks->d.pte_meta_urgent_ns,
+- refclk_mhz, 0x1fffff);
+- REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
+- DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
+- "HW register value = 0x%x\n",
+- watermarks->d.pte_meta_urgent_ns, prog_wm_value);
++ if (safe_to_lower || watermarks->d.urgent_ns > hubbub->watermarks.d.urgent_ns) {
++ hubbub->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
++ prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
++ refclk_mhz, 0x1fffff);
++ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
+
++ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
++ "HW register value = 0x%x\n",
++ watermarks->d.urgent_ns, prog_wm_value);
++ }
+
+- if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
+- prog_wm_value = convert_and_clamp(
+- watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
++ if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub->watermarks.d.pte_meta_urgent_ns) {
++ hubbub->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns;
++ prog_wm_value = convert_and_clamp(watermarks->d.pte_meta_urgent_ns,
+ refclk_mhz, 0x1fffff);
+- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
+- DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_D calculated =%d\n"
++ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
++ DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+- watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
++ watermarks->d.pte_meta_urgent_ns, prog_wm_value);
++ }
++
++ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
++ if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
++ > hubbub->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
++ hubbub->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
++ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
++ prog_wm_value = convert_and_clamp(
++ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
++ refclk_mhz, 0x1fffff);
++ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
++ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
++ "HW register value = 0x%x\n",
++ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
++ }
+
++ if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
++ > hubbub->watermarks.d.cstate_pstate.cstate_exit_ns) {
++ hubbub->watermarks.d.cstate_pstate.cstate_exit_ns =
++ watermarks->d.cstate_pstate.cstate_exit_ns;
++ prog_wm_value = convert_and_clamp(
++ watermarks->d.cstate_pstate.cstate_exit_ns,
++ refclk_mhz, 0x1fffff);
++ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
++ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
++ "HW register value = 0x%x\n",
++ watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
++ }
++ }
+
++ if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
++ > hubbub->watermarks.d.cstate_pstate.pstate_change_ns) {
++ hubbub->watermarks.d.cstate_pstate.pstate_change_ns =
++ watermarks->d.cstate_pstate.pstate_change_ns;
+ prog_wm_value = convert_and_clamp(
+- watermarks->d.cstate_pstate.cstate_exit_ns,
++ watermarks->d.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
+- DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
+- "HW register value = 0x%x\n",
+- watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
++ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
++ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
++ "HW register value = 0x%x\n\n",
++ watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
+ }
+
+-
+- prog_wm_value = convert_and_clamp(
+- watermarks->d.cstate_pstate.pstate_change_ns,
+- refclk_mhz, 0x1fffff);
+- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
+- DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
+- "HW register value = 0x%x\n\n",
+- watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
+-
+ REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+index 6315a0e..0ca39cb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+@@ -185,6 +185,7 @@ struct hubbub {
+ const struct dcn_hubbub_shift *shifts;
+ const struct dcn_hubbub_mask *masks;
+ unsigned int debug_test_index_pstate;
++ struct dcn_watermark_set watermarks;
+ };
+
+ void hubbub1_update_dchub(
+@@ -197,7 +198,8 @@ bool hubbub1_verify_allow_pstate_change_high(
+ void hubbub1_program_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+- unsigned int refclk_mhz);
++ unsigned int refclk_mhz,
++ bool safe_to_lower);
+
+ void hubbub1_toggle_watermark_change_req(
+ struct hubbub *hubbub);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index db72b4d..1170ea0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2314,7 +2314,7 @@ static void dcn10_apply_ctx_for_surface(
+
+ /* watermark is for all pipes */
+ hubbub1_program_watermarks(dc->res_pool->hubbub,
+- &context->bw.dcn.watermarks, ref_clk_mhz);
++ &context->bw.dcn.watermarks, ref_clk_mhz, true);
+
+ if (dc->debug.sanity_checks) {
+ /* pstate stuck check after watermark update */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4778-drm-amd-display-support-ACrYCb2101010.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4778-drm-amd-display-support-ACrYCb2101010.patch
new file mode 100644
index 00000000..c917639d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4778-drm-amd-display-support-ACrYCb2101010.patch
@@ -0,0 +1,42 @@
+From fda1e5d264009086c45e41cf7f7c28916cd91803 Mon Sep 17 00:00:00 2001
+From: "Zheng, XueLai(Eric)" <XueLai.Zheng@amd.com>
+Date: Tue, 8 May 2018 12:25:15 -0400
+Subject: [PATCH 4778/5725] drm/amd/display: support ACrYCb2101010
+
+Signed-off-by: XueLai(Eric), Zheng <XueLai.Zheng@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc_hw_types.h | 1 +
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 2 +-
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+index 7e5a41f..f285d37 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+@@ -199,6 +199,7 @@ enum surface_pixel_format {
+ SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb,
+ SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr,
+ SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb,
++ SURFACE_PIXEL_FORMAT_SUBSAMPLE_END,
+ SURFACE_PIXEL_FORMAT_INVALID
+
+ /*grow 444 video here if necessary */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+index c28085b..93f52c5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+@@ -166,7 +166,7 @@ void hubp1_program_size_and_rotation(
+ /* Program data and meta surface pitch (calculation from addrlib)
+ * 444 or 420 luma
+ */
+- if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
++ if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN && format < SURFACE_PIXEL_FORMAT_SUBSAMPLE_END) {
+ ASSERT(plane_size->video.chroma_pitch != 0);
+ /* Chroma pitch zero can cause system hang! */
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4779-drm-amd-display-fix-use-of-uninitialized-memory.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4779-drm-amd-display-fix-use-of-uninitialized-memory.patch
new file mode 100644
index 00000000..a77dd6a8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4779-drm-amd-display-fix-use-of-uninitialized-memory.patch
@@ -0,0 +1,36 @@
+From c1b8da392e8418714a9ccd6389fdf92d4ba8bb3e Mon Sep 17 00:00:00 2001
+From: Wesley Chalmers <Wesley.Chalmers@amd.com>
+Date: Tue, 29 May 2018 17:45:05 -0400
+Subject: [PATCH 4779/5725] drm/amd/display: fix use of uninitialized memory
+
+DML does not calculate chroma values for RQ when surface is not YUV, but DC
+will unconditionally use the uninitialized values for HW programming.
+This does not cause visual corruption since HW will ignore garbage chroma
+values when surface is not YUV, but causes presubmission tests to fail
+golden value comparison.
+
+Signed-off-by: Wesley Chalmers <Wesley.Chalmers@amd.com>
+Signed-off-by: Eryk Brol <eryk.brol@amd.com>
+Reviewed-by: Wenjing Liu <Wenjing.Liu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+index c2037da..0efbf41 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+@@ -239,6 +239,8 @@ void dml1_extract_rq_regs(
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l);
+ if (rq_param.yuv420)
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c);
++ else
++ memset(&(rq_regs->rq_regs_c), 0, sizeof(rq_regs->rq_regs_c));
+
+ rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
+ rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4780-drm-amd-display-dal-3.1.49.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4780-drm-amd-display-dal-3.1.49.patch
new file mode 100644
index 00000000..4622c019
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4780-drm-amd-display-dal-3.1.49.patch
@@ -0,0 +1,28 @@
+From 1ec8eb2f40726397e5fc5649ab8e15edcfcd4869 Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Tue, 8 May 2018 12:25:29 -0400
+Subject: [PATCH 4780/5725] drm/amd/display: dal 3.1.49
+
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 94f7a7e..4b5a92b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -38,7 +38,7 @@
+ #include "inc/compressor.h"
+ #include "dml/display_mode_lib.h"
+
+-#define DC_VER "3.1.48"
++#define DC_VER "3.1.49"
+
+ #define MAX_SURFACES 3
+ #define MAX_STREAMS 6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4781-drm-amd-display-Add-front-end-for-dp-debugfs-files.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4781-drm-amd-display-Add-front-end-for-dp-debugfs-files.patch
new file mode 100644
index 00000000..1d6fa57a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4781-drm-amd-display-Add-front-end-for-dp-debugfs-files.patch
@@ -0,0 +1,284 @@
+From 2407761df9e633f358b6731d95c64e42a8f90f94 Mon Sep 17 00:00:00 2001
+From: David Francis <David.Francis@amd.com>
+Date: Fri, 1 Jun 2018 09:49:06 -0400
+Subject: [PATCH 4781/5725] drm/amd/display: Add front end for dp debugfs files
+
+As part of hardware certification, read-write access to
+the link rate, lane count, voltage swing, pre-emphasis,
+and PHY test pattern of DP connectors is required. This commit
+adds debugfs files that will correspond to these values.
+The file operations are not yet implemented: currently
+writing or reading them does nothing.
+
+Signed-off-by: David Francis <David.Francis@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/Makefile | 2 +-
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 10 ++
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 170 +++++++++++++++++++++
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h | 34 +++++
+ 4 files changed, 215 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+ create mode 100644 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+index 8af5ccc..c54685e 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+@@ -11,7 +11,7 @@ AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o
+ endif
+
+ ifneq ($(CONFIG_DEBUG_FS),)
+-AMDGPUDM += amdgpu_dm_crc.o
++AMDGPUDM += amdgpu_dm_crc.o amdgpu_dm_debugfs.o
+ endif
+
+ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 985c3b9..f51c1f2 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -39,6 +39,9 @@
+ #include "dm_helpers.h"
+ #include "dm_services_types.h"
+ #include "amdgpu_dm_mst_types.h"
++#if defined(CONFIG_DEBUG_FS)
++#include "amdgpu_dm_debugfs.h"
++#endif
+
+ #include "ivsrcid/ivsrcid_vislands30.h"
+
+@@ -4000,6 +4003,13 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+ &aconnector->base, &aencoder->base);
+
+ drm_connector_register(&aconnector->base);
++#if defined(CONFIG_DEBUG_FS)
++ res = connector_debugfs_init(aconnector);
++ if (res) {
++ DRM_ERROR("Failed to create debugfs for connector");
++ goto out_free;
++ }
++#endif
+
+ if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
+ || connector_type == DRM_MODE_CONNECTOR_eDP)
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+new file mode 100644
+index 0000000..cf5ea69
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+@@ -0,0 +1,170 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#include <linux/debugfs.h>
++
++#include "dc.h"
++#include "dc_link.h"
++
++#include "amdgpu.h"
++#include "amdgpu_dm.h"
++#include "amdgpu_dm_debugfs.h"
++
++static ssize_t dp_link_rate_debugfs_read(struct file *f, char __user *buf,
++ size_t size, loff_t *pos)
++{
++ /* TODO: create method to read link rate */
++ return 1;
++}
++
++static ssize_t dp_link_rate_debugfs_write(struct file *f, const char __user *buf,
++ size_t size, loff_t *pos)
++{
++ /* TODO: create method to write link rate */
++ return 1;
++}
++
++static ssize_t dp_lane_count_debugfs_read(struct file *f, char __user *buf,
++ size_t size, loff_t *pos)
++{
++ /* TODO: create method to read lane count */
++ return 1;
++}
++
++static ssize_t dp_lane_count_debugfs_write(struct file *f, const char __user *buf,
++ size_t size, loff_t *pos)
++{
++ /* TODO: create method to write lane count */
++ return 1;
++}
++
++static ssize_t dp_voltage_swing_debugfs_read(struct file *f, char __user *buf,
++ size_t size, loff_t *pos)
++{
++ /* TODO: create method to read voltage swing */
++ return 1;
++}
++
++static ssize_t dp_voltage_swing_debugfs_write(struct file *f, const char __user *buf,
++ size_t size, loff_t *pos)
++{
++ /* TODO: create method to write voltage swing */
++ return 1;
++}
++
++static ssize_t dp_pre_emphasis_debugfs_read(struct file *f, char __user *buf,
++ size_t size, loff_t *pos)
++{
++ /* TODO: create method to read pre-emphasis */
++ return 1;
++}
++
++static ssize_t dp_pre_emphasis_debugfs_write(struct file *f, const char __user *buf,
++ size_t size, loff_t *pos)
++{
++ /* TODO: create method to write pre-emphasis */
++ return 1;
++}
++
++static ssize_t dp_phy_test_pattern_debugfs_read(struct file *f, char __user *buf,
++ size_t size, loff_t *pos)
++{
++ /* TODO: create method to read PHY test pattern */
++ return 1;
++}
++
++static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __user *buf,
++ size_t size, loff_t *pos)
++{
++ /* TODO: create method to write PHY test pattern */
++ return 1;
++}
++
++static const struct file_operations dp_link_rate_fops = {
++ .owner = THIS_MODULE,
++ .read = dp_link_rate_debugfs_read,
++ .write = dp_link_rate_debugfs_write,
++ .llseek = default_llseek
++};
++
++static const struct file_operations dp_lane_count_fops = {
++ .owner = THIS_MODULE,
++ .read = dp_lane_count_debugfs_read,
++ .write = dp_lane_count_debugfs_write,
++ .llseek = default_llseek
++};
++
++static const struct file_operations dp_voltage_swing_fops = {
++ .owner = THIS_MODULE,
++ .read = dp_voltage_swing_debugfs_read,
++ .write = dp_voltage_swing_debugfs_write,
++ .llseek = default_llseek
++};
++
++static const struct file_operations dp_pre_emphasis_fops = {
++ .owner = THIS_MODULE,
++ .read = dp_pre_emphasis_debugfs_read,
++ .write = dp_pre_emphasis_debugfs_write,
++ .llseek = default_llseek
++};
++
++static const struct file_operations dp_phy_test_pattern_fops = {
++ .owner = THIS_MODULE,
++ .read = dp_phy_test_pattern_debugfs_read,
++ .write = dp_phy_test_pattern_debugfs_write,
++ .llseek = default_llseek
++};
++
++static const struct {
++ char *name;
++ const struct file_operations *fops;
++} dp_debugfs_entries[] = {
++ {"link_rate", &dp_link_rate_fops},
++ {"lane_count", &dp_lane_count_fops},
++ {"voltage_swing", &dp_voltage_swing_fops},
++ {"pre_emphasis", &dp_pre_emphasis_fops},
++ {"phy_test_pattern", &dp_phy_test_pattern_fops}
++};
++
++int connector_debugfs_init(struct amdgpu_dm_connector *connector)
++{
++ int i;
++ struct dentry *ent, *dir = connector->base.debugfs_entry;
++
++ if (connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
++ for (i = 0; i < ARRAY_SIZE(dp_debugfs_entries); i++) {
++ ent = debugfs_create_file(dp_debugfs_entries[i].name,
++ 0644,
++ dir,
++ connector,
++ dp_debugfs_entries[i].fops);
++ if (IS_ERR(ent))
++ return PTR_ERR(ent);
++ }
++ }
++
++ return 0;
++}
++
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
+new file mode 100644
+index 0000000..d9ed1b2
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
+@@ -0,0 +1,34 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef __AMDGPU_DM_DEBUGFS_H__
++#define __AMDGPU_DM_DEBUGFS_H__
++
++#include "amdgpu.h"
++#include "amdgpu_dm.h"
++
++int connector_debugfs_init(struct amdgpu_dm_connector *connector);
++
++#endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4782-drm-amd-display-dal-3.1.50.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4782-drm-amd-display-dal-3.1.50.patch
new file mode 100644
index 00000000..80685727
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4782-drm-amd-display-dal-3.1.50.patch
@@ -0,0 +1,28 @@
+From 59d0348bd914bcfd6b704c0f6f45b3ee4dd7b87f Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Tue, 5 Jun 2018 09:13:56 -0400
+Subject: [PATCH 4782/5725] drm/amd/display: dal 3.1.50
+
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 4b5a92b..93e746a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -38,7 +38,7 @@
+ #include "inc/compressor.h"
+ #include "dml/display_mode_lib.h"
+
+-#define DC_VER "3.1.49"
++#define DC_VER "3.1.50"
+
+ #define MAX_SURFACES 3
+ #define MAX_STREAMS 6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4783-drm-amd-display-clean-rq-dlg-ttu-reg-structs-before-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4783-drm-amd-display-clean-rq-dlg-ttu-reg-structs-before-.patch
new file mode 100644
index 00000000..ccd44c67
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4783-drm-amd-display-clean-rq-dlg-ttu-reg-structs-before-.patch
@@ -0,0 +1,77 @@
+From c29a0f3b76327320ea69a7ec3b830609a0336f15 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Tue, 5 Jun 2018 12:54:38 -0400
+Subject: [PATCH 4783/5725] drm/amd/display: clean rq/dlg/ttu reg structs
+ before calculations
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 4 ++++
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 8 ++++++--
+ drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c | 2 --
+ 3 files changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+index b8195e5..ac4451a 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+@@ -423,6 +423,10 @@ static void dcn_bw_calc_rq_dlg_ttu(
+ int total_flip_bytes = 0;
+ int i;
+
++ memset(dlg_regs, 0, sizeof(*dlg_regs));
++ memset(ttu_regs, 0, sizeof(*ttu_regs));
++ memset(rq_regs, 0, sizeof(*rq_regs));
++
+ for (i = 0; i < number_of_planes; i++) {
+ total_active_bw += v->read_bandwidth[i];
+ total_prefetch_bw += v->prefetch_bandwidth[i];
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 1170ea0..eaa8b0a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -151,19 +151,23 @@ static void dcn10_log_hubp_states(struct dc *dc)
+
+ DTN_INFO("\n=========RQ========\n");
+ DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
++ " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
+ " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
+ struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
+
+ if (!s->blank_en)
+- DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
++ DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
+ pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
+ rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
+ rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
+ rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
+ rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
+- rq_regs->rq_regs_l.pte_row_height_linear);
++ rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
++ rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
++ rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
++ rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
+ }
+
+ DTN_INFO("========DLG========\n");
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+index 0efbf41..c2037da 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+@@ -239,8 +239,6 @@ void dml1_extract_rq_regs(
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l);
+ if (rq_param.yuv420)
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c);
+- else
+- memset(&(rq_regs->rq_regs_c), 0, sizeof(rq_regs->rq_regs_c));
+
+ rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
+ rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4784-drm-amd-display-dal-3.1.51.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4784-drm-amd-display-dal-3.1.51.patch
new file mode 100644
index 00000000..861cbc71
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4784-drm-amd-display-dal-3.1.51.patch
@@ -0,0 +1,28 @@
+From 39cfd85229a6421acf6a73b15636582f63640627 Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Tue, 5 Jun 2018 09:14:22 -0400
+Subject: [PATCH 4784/5725] drm/amd/display: dal 3.1.51
+
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 93e746a..997dbf9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -38,7 +38,7 @@
+ #include "inc/compressor.h"
+ #include "dml/display_mode_lib.h"
+
+-#define DC_VER "3.1.50"
++#define DC_VER "3.1.51"
+
+ #define MAX_SURFACES 3
+ #define MAX_STREAMS 6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4785-drm-amd-display-fix-potential-infinite-loop-in-fbc-p.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4785-drm-amd-display-fix-potential-infinite-loop-in-fbc-p.patch
new file mode 100644
index 00000000..500e1dad
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4785-drm-amd-display-fix-potential-infinite-loop-in-fbc-p.patch
@@ -0,0 +1,42 @@
+From 7a13914aacf6b31f699f3eb83b8510884a6426e4 Mon Sep 17 00:00:00 2001
+From: Roman Li <Roman.Li@amd.com>
+Date: Tue, 5 Jun 2018 14:24:53 -0400
+Subject: [PATCH 4785/5725] drm/amd/display: fix potential infinite loop in fbc
+ path
+
+- Fixing integer overflow bug in wait_for_fbc_state_changed()
+- Correct the max value of retries for the corresponding warning
+
+Signed-off-by: Roman Li <Roman.Li@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+index df02701..1f7f250 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+@@ -143,7 +143,7 @@ static void wait_for_fbc_state_changed(
+ struct dce110_compressor *cp110,
+ bool enabled)
+ {
+- uint16_t counter = 0;
++ uint32_t counter = 0;
+ uint32_t addr = mmFBC_STATUS;
+ uint32_t value;
+
+@@ -158,7 +158,7 @@ static void wait_for_fbc_state_changed(
+ counter++;
+ }
+
+- if (counter == 10) {
++ if (counter == 1000) {
+ DC_LOG_WARNING("%s: wait counter exceeded, changes to HW not applied",
+ __func__);
+ } else {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4786-drm-amd-display-Enable-PPLib-calls-from-DC-on-linux.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4786-drm-amd-display-Enable-PPLib-calls-from-DC-on-linux.patch
new file mode 100644
index 00000000..373523d2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4786-drm-amd-display-Enable-PPLib-calls-from-DC-on-linux.patch
@@ -0,0 +1,34 @@
+From 894886432cdee6c66c871c5056b0657d38d806a4 Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Thu, 31 May 2018 14:49:00 -0400
+Subject: [PATCH 4786/5725] drm/amd/display: Enable PPLib calls from DC on
+ linux
+
+Set the powerplay debug flag to false for both Windows and Linux
+to allow the calls to pplib. So we can retrieve the clock values
+from powerplay instead of using default hardcoded values.
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index d885988..35457af 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -437,7 +437,7 @@ static const struct dc_debug debug_defaults_drv = {
+ */
+ .min_disp_clk_khz = 100000,
+
+- .disable_pplib_clock_request = true,
++ .disable_pplib_clock_request = false,
+ .disable_pplib_wm_range = false,
+ .pplib_wm_report_mode = WM_REPORT_DEFAULT,
+ .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4787-drm-amd-display-Add-dmpp-clks-types-for-conversion.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4787-drm-amd-display-Add-dmpp-clks-types-for-conversion.patch
new file mode 100644
index 00000000..1b85512a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4787-drm-amd-display-Add-dmpp-clks-types-for-conversion.patch
@@ -0,0 +1,82 @@
+From b975dc4c7303d50c027a92f653d722a14c65609f Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Thu, 31 May 2018 17:31:14 -0400
+Subject: [PATCH 4787/5725] drm/amd/display: Add dmpp clks types for conversion
+
+Add more cases for dm_pp clks translator into pp clks so
+we can pass the right structures to the powerplay.
+Use clks translator instead of massive switch statement.
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 41 ++++++++++------------
+ 1 file changed, 18 insertions(+), 23 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+index 55fea6c..08a0328 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+@@ -195,6 +195,21 @@ static enum amd_pp_clock_type dc_to_pp_clock_type(
+ case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+ amd_pp_clk_type = amd_pp_mem_clock;
+ break;
++ case DM_PP_CLOCK_TYPE_DCEFCLK:
++ amd_pp_clk_type = amd_pp_dcef_clock;
++ break;
++ case DM_PP_CLOCK_TYPE_DCFCLK:
++ amd_pp_clk_type = amd_pp_dcf_clock;
++ break;
++ case DM_PP_CLOCK_TYPE_PIXELCLK:
++ amd_pp_clk_type = amd_pp_pixel_clock;
++ break;
++ case DM_PP_CLOCK_TYPE_FCLK:
++ amd_pp_clk_type = amd_pp_f_clock;
++ break;
++ case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
++ amd_pp_clk_type = amd_pp_dpp_clock;
++ break;
+ default:
+ DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
+ dm_pp_clk_type);
+@@ -383,32 +398,12 @@ bool dm_pp_apply_clock_for_voltage_request(
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct pp_display_clock_request pp_clock_request = {0};
+ int ret = 0;
+- switch (clock_for_voltage_req->clk_type) {
+- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+- pp_clock_request.clock_type = amd_pp_disp_clock;
+- break;
+-
+- case DM_PP_CLOCK_TYPE_DCEFCLK:
+- pp_clock_request.clock_type = amd_pp_dcef_clock;
+- break;
+
+- case DM_PP_CLOCK_TYPE_DCFCLK:
+- pp_clock_request.clock_type = amd_pp_dcf_clock;
+- break;
+-
+- case DM_PP_CLOCK_TYPE_PIXELCLK:
+- pp_clock_request.clock_type = amd_pp_pixel_clock;
+- break;
+-
+- case DM_PP_CLOCK_TYPE_FCLK:
+- pp_clock_request.clock_type = amd_pp_f_clock;
+- break;
++ pp_clock_request.clock_type = dc_to_pp_clock_type(clock_for_voltage_req->clk_type);
++ pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz;
+
+- default:
++ if (!pp_clock_request.clock_type)
+ return false;
+- }
+-
+- pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz;
+
+ if (adev->powerplay.pp_funcs->display_clock_voltage_request)
+ ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4788-drm-amd-display-Convert-10kHz-clks-from-PPLib-into-k.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4788-drm-amd-display-Convert-10kHz-clks-from-PPLib-into-k.patch
new file mode 100644
index 00000000..b597cedc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4788-drm-amd-display-Convert-10kHz-clks-from-PPLib-into-k.patch
@@ -0,0 +1,48 @@
+From e79cbfc78a628087d4840631e9f0778ae6798d25 Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Thu, 31 May 2018 14:44:18 -0400
+Subject: [PATCH 4788/5725] drm/amd/display: Convert 10kHz clks from PPLib into
+ kHz
+
+The driver is expecting clock frequency in kHz, while SMU returns
+the values in 10kHz, which causes the bandwidth validation to fail
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+index 08a0328..4f86f6f 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+@@ -267,8 +267,9 @@ static void pp_to_dc_clock_levels_with_voltage(
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+
+ for (i = 0; i < clk_level_info->num_levels; i++) {
+- DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->data[i].clocks_in_khz);
+- clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
++ DRM_INFO("DM_PPLIB:\t %d in 10kHz\n", pp_clks->data[i].clocks_in_khz);
++ /* translate 10kHz to kHz */
++ clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz * 10;
+ clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv;
+ }
+ }
+@@ -430,8 +431,9 @@ bool dm_pp_get_static_clocks(
+ return false;
+
+ static_clk_info->max_clocks_state = pp_clk_info.max_clocks_state;
+- static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock;
+- static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock;
++ /* translate 10kHz to kHz */
++ static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
++ static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
+
+ return true;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4789-drm-amd-display-move-dml-defaults-to-respective-dcn-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4789-drm-amd-display-move-dml-defaults-to-respective-dcn-.patch
new file mode 100644
index 00000000..bc9789eb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4789-drm-amd-display-move-dml-defaults-to-respective-dcn-.patch
@@ -0,0 +1,165 @@
+From 616a1a5bd34e9367eb3946584fb18af373522563 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Tue, 5 Jun 2018 07:40:04 -0400
+Subject: [PATCH 4789/5725] drm/amd/display: move dml defaults to respective
+ dcn resource files
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 62 +++++++++++++++++++++
+ .../gpu/drm/amd/display/dc/dml/display_mode_lib.c | 63 +---------------------
+ 2 files changed, 64 insertions(+), 61 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index 35457af..4081160 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -65,6 +65,68 @@
+ #include "dce/dce_abm.h"
+ #include "dce/dce_dmcu.h"
+
++const struct _vcs_dpi_ip_params_st dcn1_0_ip = {
++ .rob_buffer_size_kbytes = 64,
++ .det_buffer_size_kbytes = 164,
++ .dpte_buffer_size_in_pte_reqs = 42,
++ .dpp_output_buffer_pixels = 2560,
++ .opp_output_buffer_lines = 1,
++ .pixel_chunk_size_kbytes = 8,
++ .pte_enable = 1,
++ .pte_chunk_size_kbytes = 2,
++ .meta_chunk_size_kbytes = 2,
++ .writeback_chunk_size_kbytes = 2,
++ .line_buffer_size_bits = 589824,
++ .max_line_buffer_lines = 12,
++ .IsLineBufferBppFixed = 0,
++ .LineBufferFixedBpp = -1,
++ .writeback_luma_buffer_size_kbytes = 12,
++ .writeback_chroma_buffer_size_kbytes = 8,
++ .max_num_dpp = 4,
++ .max_num_wb = 2,
++ .max_dchub_pscl_bw_pix_per_clk = 4,
++ .max_pscl_lb_bw_pix_per_clk = 2,
++ .max_lb_vscl_bw_pix_per_clk = 4,
++ .max_vscl_hscl_bw_pix_per_clk = 4,
++ .max_hscl_ratio = 4,
++ .max_vscl_ratio = 4,
++ .hscl_mults = 4,
++ .vscl_mults = 4,
++ .max_hscl_taps = 8,
++ .max_vscl_taps = 8,
++ .dispclk_ramp_margin_percent = 1,
++ .underscan_factor = 1.10,
++ .min_vblank_lines = 14,
++ .dppclk_delay_subtotal = 90,
++ .dispclk_delay_subtotal = 42,
++ .dcfclk_cstate_latency = 10,
++ .max_inter_dcn_tile_repeaters = 8,
++ .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0,
++ .bug_forcing_LC_req_same_size_fixed = 0,
++};
++
++const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = {
++ .sr_exit_time_us = 9.0,
++ .sr_enter_plus_exit_time_us = 11.0,
++ .urgent_latency_us = 4.0,
++ .writeback_latency_us = 12.0,
++ .ideal_dram_bw_after_urgent_percent = 80.0,
++ .max_request_size_bytes = 256,
++ .downspread_percent = 0.5,
++ .dram_page_open_time_ns = 50.0,
++ .dram_rw_turnaround_time_ns = 17.5,
++ .dram_return_buffer_per_channel_bytes = 8192,
++ .round_trip_ping_latency_dcfclk_cycles = 128,
++ .urgent_out_of_order_return_per_channel_bytes = 256,
++ .channel_interleave_bytes = 256,
++ .num_banks = 8,
++ .num_chans = 2,
++ .vmm_page_size_bytes = 4096,
++ .dram_clock_change_latency_us = 17.0,
++ .writeback_dram_clock_change_latency_us = 23.0,
++ .return_bus_width_bytes = 64,
++};
++
+ #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
+ #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
+ #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+index fd9d97a..dddeb0d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+@@ -26,67 +26,8 @@
+ #include "display_mode_lib.h"
+ #include "dc_features.h"
+
+-static const struct _vcs_dpi_ip_params_st dcn1_0_ip = {
+- .rob_buffer_size_kbytes = 64,
+- .det_buffer_size_kbytes = 164,
+- .dpte_buffer_size_in_pte_reqs = 42,
+- .dpp_output_buffer_pixels = 2560,
+- .opp_output_buffer_lines = 1,
+- .pixel_chunk_size_kbytes = 8,
+- .pte_enable = 1,
+- .pte_chunk_size_kbytes = 2,
+- .meta_chunk_size_kbytes = 2,
+- .writeback_chunk_size_kbytes = 2,
+- .line_buffer_size_bits = 589824,
+- .max_line_buffer_lines = 12,
+- .IsLineBufferBppFixed = 0,
+- .LineBufferFixedBpp = -1,
+- .writeback_luma_buffer_size_kbytes = 12,
+- .writeback_chroma_buffer_size_kbytes = 8,
+- .max_num_dpp = 4,
+- .max_num_wb = 2,
+- .max_dchub_pscl_bw_pix_per_clk = 4,
+- .max_pscl_lb_bw_pix_per_clk = 2,
+- .max_lb_vscl_bw_pix_per_clk = 4,
+- .max_vscl_hscl_bw_pix_per_clk = 4,
+- .max_hscl_ratio = 4,
+- .max_vscl_ratio = 4,
+- .hscl_mults = 4,
+- .vscl_mults = 4,
+- .max_hscl_taps = 8,
+- .max_vscl_taps = 8,
+- .dispclk_ramp_margin_percent = 1,
+- .underscan_factor = 1.10,
+- .min_vblank_lines = 14,
+- .dppclk_delay_subtotal = 90,
+- .dispclk_delay_subtotal = 42,
+- .dcfclk_cstate_latency = 10,
+- .max_inter_dcn_tile_repeaters = 8,
+- .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0,
+- .bug_forcing_LC_req_same_size_fixed = 0,
+-};
+-
+-static const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = {
+- .sr_exit_time_us = 9.0,
+- .sr_enter_plus_exit_time_us = 11.0,
+- .urgent_latency_us = 4.0,
+- .writeback_latency_us = 12.0,
+- .ideal_dram_bw_after_urgent_percent = 80.0,
+- .max_request_size_bytes = 256,
+- .downspread_percent = 0.5,
+- .dram_page_open_time_ns = 50.0,
+- .dram_rw_turnaround_time_ns = 17.5,
+- .dram_return_buffer_per_channel_bytes = 8192,
+- .round_trip_ping_latency_dcfclk_cycles = 128,
+- .urgent_out_of_order_return_per_channel_bytes = 256,
+- .channel_interleave_bytes = 256,
+- .num_banks = 8,
+- .num_chans = 2,
+- .vmm_page_size_bytes = 4096,
+- .dram_clock_change_latency_us = 17.0,
+- .writeback_dram_clock_change_latency_us = 23.0,
+- .return_bus_width_bytes = 64,
+-};
++extern const struct _vcs_dpi_ip_params_st dcn1_0_ip;
++extern const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc;
+
+ static void set_soc_bounding_box(struct _vcs_dpi_soc_bounding_box_st *soc, enum dml_project project)
+ {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4790-drm-amd-display-Moving-powerplay-functions-to-a-sepa.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4790-drm-amd-display-Moving-powerplay-functions-to-a-sepa.patch
new file mode 100644
index 00000000..c61fa4ea
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4790-drm-amd-display-Moving-powerplay-functions-to-a-sepa.patch
@@ -0,0 +1,1035 @@
+From dd3df5a2d35287242a63c93f88219a29892ac44a Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Fri, 1 Jun 2018 15:02:55 -0400
+Subject: [PATCH 4790/5725] drm/amd/display: Moving powerplay functions to a
+ separate class
+
+Moving powerplay functions to a new amdgpu_dm_pp_smu class
+and cleaning dm_services class from unused headers.
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/Makefile | 2 +-
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 525 +++++++++++++++++++++
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 452 ------------------
+ 3 files changed, 526 insertions(+), 453 deletions(-)
+ create mode 100644 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+index c54685e..98854bc 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+@@ -7,7 +7,7 @@
+ AMDGPUDM = amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o amdgpu_dm_color.o
+
+ ifneq ($(CONFIG_DRM_AMD_DC),)
+-AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o
++AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o
+ endif
+
+ ifneq ($(CONFIG_DEBUG_FS),)
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+new file mode 100644
+index 0000000..ad96e2a
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+@@ -0,0 +1,525 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ */
++#include <linux/string.h>
++#include <linux/acpi.h>
++
++#include <drm/drmP.h>
++#include <drm/drm_crtc_helper.h>
++#include <drm/amdgpu_drm.h>
++#include "dm_services.h"
++#include "amdgpu.h"
++#include "amdgpu_dm.h"
++#include "amdgpu_dm_irq.h"
++#include "amdgpu_pm.h"
++#include "dm_pp_smu.h"
++#include "../../powerplay/inc/hwmgr.h"
++#include "../../powerplay/hwmgr/smu10_hwmgr.h"
++
++
++bool dm_pp_apply_display_requirements(
++ const struct dc_context *ctx,
++ const struct dm_pp_display_configuration *pp_display_cfg)
++{
++ struct amdgpu_device *adev = ctx->driver_context;
++
++ if (adev->pm.dpm_enabled) {
++
++ memset(&adev->pm.pm_display_cfg, 0,
++ sizeof(adev->pm.pm_display_cfg));
++
++ adev->pm.pm_display_cfg.cpu_cc6_disable =
++ pp_display_cfg->cpu_cc6_disable;
++
++ adev->pm.pm_display_cfg.cpu_pstate_disable =
++ pp_display_cfg->cpu_pstate_disable;
++
++ adev->pm.pm_display_cfg.cpu_pstate_separation_time =
++ pp_display_cfg->cpu_pstate_separation_time;
++
++ adev->pm.pm_display_cfg.nb_pstate_switch_disable =
++ pp_display_cfg->nb_pstate_switch_disable;
++
++ adev->pm.pm_display_cfg.num_display =
++ pp_display_cfg->display_count;
++ adev->pm.pm_display_cfg.num_path_including_non_display =
++ pp_display_cfg->display_count;
++
++ adev->pm.pm_display_cfg.min_core_set_clock =
++ pp_display_cfg->min_engine_clock_khz/10;
++ adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
++ pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
++ adev->pm.pm_display_cfg.min_mem_set_clock =
++ pp_display_cfg->min_memory_clock_khz/10;
++
++ adev->pm.pm_display_cfg.multi_monitor_in_sync =
++ pp_display_cfg->all_displays_in_sync;
++ adev->pm.pm_display_cfg.min_vblank_time =
++ pp_display_cfg->avail_mclk_switch_time_us;
++
++ adev->pm.pm_display_cfg.display_clk =
++ pp_display_cfg->disp_clk_khz/10;
++
++ adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
++ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
++
++ adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
++ adev->pm.pm_display_cfg.line_time_in_us =
++ pp_display_cfg->line_time_in_us;
++
++ adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
++ adev->pm.pm_display_cfg.crossfire_display_index = -1;
++ adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
++
++ /* TODO: complete implementation of
++ * pp_display_configuration_change().
++ * Follow example of:
++ * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
++ * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
++ if (adev->powerplay.pp_funcs->display_configuration_change)
++ adev->powerplay.pp_funcs->display_configuration_change(
++ adev->powerplay.pp_handle,
++ &adev->pm.pm_display_cfg);
++
++ /* TODO: replace by a separate call to 'apply display cfg'? */
++ amdgpu_pm_compute_clocks(adev);
++ }
++
++ return true;
++}
++
++static void get_default_clock_levels(
++ enum dm_pp_clock_type clk_type,
++ struct dm_pp_clock_levels *clks)
++{
++ uint32_t disp_clks_in_khz[6] = {
++ 300000, 400000, 496560, 626090, 685720, 757900 };
++ uint32_t sclks_in_khz[6] = {
++ 300000, 360000, 423530, 514290, 626090, 720000 };
++ uint32_t mclks_in_khz[2] = { 333000, 800000 };
++
++ switch (clk_type) {
++ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
++ clks->num_levels = 6;
++ memmove(clks->clocks_in_khz, disp_clks_in_khz,
++ sizeof(disp_clks_in_khz));
++ break;
++ case DM_PP_CLOCK_TYPE_ENGINE_CLK:
++ clks->num_levels = 6;
++ memmove(clks->clocks_in_khz, sclks_in_khz,
++ sizeof(sclks_in_khz));
++ break;
++ case DM_PP_CLOCK_TYPE_MEMORY_CLK:
++ clks->num_levels = 2;
++ memmove(clks->clocks_in_khz, mclks_in_khz,
++ sizeof(mclks_in_khz));
++ break;
++ default:
++ clks->num_levels = 0;
++ break;
++ }
++}
++
++static enum amd_pp_clock_type dc_to_pp_clock_type(
++ enum dm_pp_clock_type dm_pp_clk_type)
++{
++ enum amd_pp_clock_type amd_pp_clk_type = 0;
++
++ switch (dm_pp_clk_type) {
++ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
++ amd_pp_clk_type = amd_pp_disp_clock;
++ break;
++ case DM_PP_CLOCK_TYPE_ENGINE_CLK:
++ amd_pp_clk_type = amd_pp_sys_clock;
++ break;
++ case DM_PP_CLOCK_TYPE_MEMORY_CLK:
++ amd_pp_clk_type = amd_pp_mem_clock;
++ break;
++ case DM_PP_CLOCK_TYPE_DCEFCLK:
++ amd_pp_clk_type = amd_pp_dcef_clock;
++ break;
++ case DM_PP_CLOCK_TYPE_DCFCLK:
++ amd_pp_clk_type = amd_pp_dcf_clock;
++ break;
++ case DM_PP_CLOCK_TYPE_PIXELCLK:
++ amd_pp_clk_type = amd_pp_pixel_clock;
++ break;
++ case DM_PP_CLOCK_TYPE_FCLK:
++ amd_pp_clk_type = amd_pp_f_clock;
++ break;
++ case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
++ amd_pp_clk_type = amd_pp_dpp_clock;
++ break;
++ default:
++ DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
++ dm_pp_clk_type);
++ break;
++ }
++
++ return amd_pp_clk_type;
++}
++
++static void pp_to_dc_clock_levels(
++ const struct amd_pp_clocks *pp_clks,
++ struct dm_pp_clock_levels *dc_clks,
++ enum dm_pp_clock_type dc_clk_type)
++{
++ uint32_t i;
++
++ if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
++ DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
++ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
++ pp_clks->count,
++ DM_PP_MAX_CLOCK_LEVELS);
++
++ dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
++ } else
++ dc_clks->num_levels = pp_clks->count;
++
++ DRM_INFO("DM_PPLIB: values for %s clock\n",
++ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
++
++ for (i = 0; i < dc_clks->num_levels; i++) {
++ DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
++ /* translate 10kHz to kHz */
++ dc_clks->clocks_in_khz[i] = pp_clks->clock[i] * 10;
++ }
++}
++
++static void pp_to_dc_clock_levels_with_latency(
++ const struct pp_clock_levels_with_latency *pp_clks,
++ struct dm_pp_clock_levels_with_latency *clk_level_info,
++ enum dm_pp_clock_type dc_clk_type)
++{
++ uint32_t i;
++
++ if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
++ DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
++ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
++ pp_clks->num_levels,
++ DM_PP_MAX_CLOCK_LEVELS);
++
++ clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
++ } else
++ clk_level_info->num_levels = pp_clks->num_levels;
++
++ DRM_DEBUG("DM_PPLIB: values for %s clock\n",
++ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
++
++ for (i = 0; i < clk_level_info->num_levels; i++) {
++ DRM_DEBUG("DM_PPLIB:\t %d in 10kHz\n", pp_clks->data[i].clocks_in_khz);
++ /* translate 10kHz to kHz */
++ clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz * 10;
++ clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
++ }
++}
++
++static void pp_to_dc_clock_levels_with_voltage(
++ const struct pp_clock_levels_with_voltage *pp_clks,
++ struct dm_pp_clock_levels_with_voltage *clk_level_info,
++ enum dm_pp_clock_type dc_clk_type)
++{
++ uint32_t i;
++
++ if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
++ DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
++ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
++ pp_clks->num_levels,
++ DM_PP_MAX_CLOCK_LEVELS);
++
++ clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
++ } else
++ clk_level_info->num_levels = pp_clks->num_levels;
++
++ DRM_INFO("DM_PPLIB: values for %s clock\n",
++ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
++
++ for (i = 0; i < clk_level_info->num_levels; i++) {
++ DRM_INFO("DM_PPLIB:\t %d in 10kHz\n", pp_clks->data[i].clocks_in_khz);
++ /* translate 10kHz to kHz */
++ clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz * 10;
++ clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv;
++ }
++}
++
++bool dm_pp_get_clock_levels_by_type(
++ const struct dc_context *ctx,
++ enum dm_pp_clock_type clk_type,
++ struct dm_pp_clock_levels *dc_clks)
++{
++ struct amdgpu_device *adev = ctx->driver_context;
++ void *pp_handle = adev->powerplay.pp_handle;
++ struct amd_pp_clocks pp_clks = { 0 };
++ struct amd_pp_simple_clock_info validation_clks = { 0 };
++ uint32_t i;
++
++ if (adev->powerplay.pp_funcs->get_clock_by_type) {
++ if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
++ dc_to_pp_clock_type(clk_type), &pp_clks)) {
++ /* Error in pplib. Provide default values. */
++ get_default_clock_levels(clk_type, dc_clks);
++ return true;
++ }
++ }
++
++ pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
++
++ if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
++ if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
++ pp_handle, &validation_clks)) {
++ /* Error in pplib. Provide default values. */
++ DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
++ validation_clks.engine_max_clock = 72000;
++ validation_clks.memory_max_clock = 80000;
++ validation_clks.level = 0;
++ }
++ }
++
++ DRM_INFO("DM_PPLIB: Validation clocks:\n");
++ DRM_INFO("DM_PPLIB: engine_max_clock: %d\n",
++ validation_clks.engine_max_clock);
++ DRM_INFO("DM_PPLIB: memory_max_clock: %d\n",
++ validation_clks.memory_max_clock);
++ DRM_INFO("DM_PPLIB: level : %d\n",
++ validation_clks.level);
++
++ /* Translate 10 kHz to kHz. */
++ validation_clks.engine_max_clock *= 10;
++ validation_clks.memory_max_clock *= 10;
++
++ /* Determine the highest non-boosted level from the Validation Clocks */
++ if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
++ for (i = 0; i < dc_clks->num_levels; i++) {
++ if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
++ /* This clock is higher the validation clock.
++ * Than means the previous one is the highest
++ * non-boosted one. */
++ DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
++ dc_clks->num_levels, i);
++ dc_clks->num_levels = i > 0 ? i : 1;
++ break;
++ }
++ }
++ } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
++ for (i = 0; i < dc_clks->num_levels; i++) {
++ if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
++ DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
++ dc_clks->num_levels, i);
++ dc_clks->num_levels = i > 0 ? i : 1;
++ break;
++ }
++ }
++ }
++
++ return true;
++}
++
++bool dm_pp_get_clock_levels_by_type_with_latency(
++ const struct dc_context *ctx,
++ enum dm_pp_clock_type clk_type,
++ struct dm_pp_clock_levels_with_latency *clk_level_info)
++{
++ struct amdgpu_device *adev = ctx->driver_context;
++ void *pp_handle = adev->powerplay.pp_handle;
++ struct pp_clock_levels_with_latency pp_clks = { 0 };
++ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
++
++ if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency)
++ return false;
++
++ if (pp_funcs->get_clock_by_type_with_latency(pp_handle,
++ dc_to_pp_clock_type(clk_type),
++ &pp_clks))
++ return false;
++
++ pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
++
++ return true;
++}
++
++bool dm_pp_get_clock_levels_by_type_with_voltage(
++ const struct dc_context *ctx,
++ enum dm_pp_clock_type clk_type,
++ struct dm_pp_clock_levels_with_voltage *clk_level_info)
++{
++ struct amdgpu_device *adev = ctx->driver_context;
++ void *pp_handle = adev->powerplay.pp_handle;
++ struct pp_clock_levels_with_voltage pp_clk_info = {0};
++ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
++
++ if (pp_funcs->get_clock_by_type_with_voltage(pp_handle,
++ dc_to_pp_clock_type(clk_type),
++ &pp_clk_info))
++ return false;
++
++ pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type);
++
++ return true;
++}
++
++bool dm_pp_notify_wm_clock_changes(
++ const struct dc_context *ctx,
++ struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
++{
++ /* TODO: to be implemented */
++ return false;
++}
++
++bool dm_pp_apply_power_level_change_request(
++ const struct dc_context *ctx,
++ struct dm_pp_power_level_change_request *level_change_req)
++{
++ /* TODO: to be implemented */
++ return false;
++}
++
++bool dm_pp_apply_clock_for_voltage_request(
++ const struct dc_context *ctx,
++ struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
++{
++ struct amdgpu_device *adev = ctx->driver_context;
++ struct pp_display_clock_request pp_clock_request = {0};
++ int ret = 0;
++
++ pp_clock_request.clock_type = dc_to_pp_clock_type(clock_for_voltage_req->clk_type);
++ pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz;
++
++ if (!pp_clock_request.clock_type)
++ return false;
++
++ if (adev->powerplay.pp_funcs->display_clock_voltage_request)
++ ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
++ adev->powerplay.pp_handle,
++ &pp_clock_request);
++ if (ret)
++ return false;
++ return true;
++}
++
++bool dm_pp_get_static_clocks(
++ const struct dc_context *ctx,
++ struct dm_pp_static_clock_info *static_clk_info)
++{
++ struct amdgpu_device *adev = ctx->driver_context;
++ struct amd_pp_clock_info pp_clk_info = {0};
++ int ret = 0;
++
++ if (adev->powerplay.pp_funcs->get_current_clocks)
++ ret = adev->powerplay.pp_funcs->get_current_clocks(
++ adev->powerplay.pp_handle,
++ &pp_clk_info);
++ if (ret)
++ return false;
++
++ static_clk_info->max_clocks_state = pp_clk_info.max_clocks_state;
++ /* translate 10kHz to kHz */
++ static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
++ static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
++
++ return true;
++}
++
++void pp_rv_set_display_requirement(struct pp_smu *pp,
++ struct pp_smu_display_requirement_rv *req)
++{
++ struct amdgpu_device *adev = pp->ctx->driver_context;
++ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
++ int ret = 0;
++ if (hwmgr->hwmgr_func->set_deep_sleep_dcefclk)
++ ret = hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, req->hard_min_dcefclk_khz/10);
++ if (hwmgr->hwmgr_func->set_active_display_count)
++ ret = hwmgr->hwmgr_func->set_active_display_count(hwmgr, req->display_count);
++
++ //store_cc6 is not yet implemented in SMU level
++}
++
++void pp_rv_set_wm_ranges(struct pp_smu *pp,
++ struct pp_smu_wm_range_sets *ranges)
++{
++ struct amdgpu_device *adev = pp->ctx->driver_context;
++ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
++ struct pp_wm_sets_with_clock_ranges_soc15 ranges_soc15 = {0};
++ int i = 0;
++
++ if (!hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges ||
++ !pp || !ranges)
++ return;
++
++ //not entirely sure if thats a correct assignment
++ ranges_soc15.num_wm_sets_dmif = ranges->num_reader_wm_sets;
++ ranges_soc15.num_wm_sets_mcif = ranges->num_writer_wm_sets;
++
++ for (i = 0; i < ranges_soc15.num_wm_sets_dmif; i++) {
++ if (ranges->reader_wm_sets[i].wm_inst > 3)
++ ranges_soc15.wm_sets_dmif[i].wm_set_id = DC_WM_SET_A;
++ else
++ ranges_soc15.wm_sets_dmif[i].wm_set_id =
++ ranges->reader_wm_sets[i].wm_inst;
++ ranges_soc15.wm_sets_dmif[i].wm_max_dcefclk_in_khz =
++ ranges->reader_wm_sets[i].max_drain_clk_khz;
++ ranges_soc15.wm_sets_dmif[i].wm_min_dcefclk_in_khz =
++ ranges->reader_wm_sets[i].min_drain_clk_khz;
++ ranges_soc15.wm_sets_dmif[i].wm_max_memclk_in_khz =
++ ranges->reader_wm_sets[i].max_fill_clk_khz;
++ ranges_soc15.wm_sets_dmif[i].wm_min_memclk_in_khz =
++ ranges->reader_wm_sets[i].min_fill_clk_khz;
++ }
++
++ for (i = 0; i < ranges_soc15.num_wm_sets_mcif; i++) {
++ if (ranges->writer_wm_sets[i].wm_inst > 3)
++ ranges_soc15.wm_sets_dmif[i].wm_set_id = DC_WM_SET_A;
++ else
++ ranges_soc15.wm_sets_mcif[i].wm_set_id =
++ ranges->writer_wm_sets[i].wm_inst;
++ ranges_soc15.wm_sets_mcif[i].wm_max_socclk_in_khz =
++ ranges->writer_wm_sets[i].max_fill_clk_khz;
++ ranges_soc15.wm_sets_mcif[i].wm_min_socclk_in_khz =
++ ranges->writer_wm_sets[i].min_fill_clk_khz;
++ ranges_soc15.wm_sets_mcif[i].wm_max_memclk_in_khz =
++ ranges->writer_wm_sets[i].max_fill_clk_khz;
++ ranges_soc15.wm_sets_mcif[i].wm_min_memclk_in_khz =
++ ranges->writer_wm_sets[i].min_fill_clk_khz;
++ }
++
++ hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges(hwmgr, &ranges_soc15);
++
++}
++
++void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
++{
++ struct amdgpu_device *adev = pp->ctx->driver_context;
++ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
++
++ if (hwmgr->hwmgr_func->smus_notify_pwe)
++ hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
++}
++
++void dm_pp_get_funcs_rv(
++ struct dc_context *ctx,
++ struct pp_smu_funcs_rv *funcs)
++{
++ funcs->pp_smu.ctx = ctx;
++ funcs->set_display_requirement = pp_rv_set_display_requirement;
++ funcs->set_wm_ranges = pp_rv_set_wm_ranges;
++ funcs->set_pme_wa_enable = pp_rv_set_pme_wa_enable;
++}
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+index 4f86f6f..19e7faa 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+@@ -34,9 +34,6 @@
+ #include "amdgpu_dm.h"
+ #include "amdgpu_dm_irq.h"
+ #include "amdgpu_pm.h"
+-#include "dm_pp_smu.h"
+-#include "../../powerplay/inc/hwmgr.h"
+-#include "../../powerplay/hwmgr/smu10_hwmgr.h"
+
+
+
+@@ -76,452 +73,3 @@ bool dm_read_persistent_data(struct dc_context *ctx,
+ }
+
+ /**** power component interfaces ****/
+-
+-bool dm_pp_apply_display_requirements(
+- const struct dc_context *ctx,
+- const struct dm_pp_display_configuration *pp_display_cfg)
+-{
+- struct amdgpu_device *adev = ctx->driver_context;
+-
+- if (adev->pm.dpm_enabled) {
+-
+- memset(&adev->pm.pm_display_cfg, 0,
+- sizeof(adev->pm.pm_display_cfg));
+-
+- adev->pm.pm_display_cfg.cpu_cc6_disable =
+- pp_display_cfg->cpu_cc6_disable;
+-
+- adev->pm.pm_display_cfg.cpu_pstate_disable =
+- pp_display_cfg->cpu_pstate_disable;
+-
+- adev->pm.pm_display_cfg.cpu_pstate_separation_time =
+- pp_display_cfg->cpu_pstate_separation_time;
+-
+- adev->pm.pm_display_cfg.nb_pstate_switch_disable =
+- pp_display_cfg->nb_pstate_switch_disable;
+-
+- adev->pm.pm_display_cfg.num_display =
+- pp_display_cfg->display_count;
+- adev->pm.pm_display_cfg.num_path_including_non_display =
+- pp_display_cfg->display_count;
+-
+- adev->pm.pm_display_cfg.min_core_set_clock =
+- pp_display_cfg->min_engine_clock_khz/10;
+- adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
+- pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
+- adev->pm.pm_display_cfg.min_mem_set_clock =
+- pp_display_cfg->min_memory_clock_khz/10;
+-
+- adev->pm.pm_display_cfg.multi_monitor_in_sync =
+- pp_display_cfg->all_displays_in_sync;
+- adev->pm.pm_display_cfg.min_vblank_time =
+- pp_display_cfg->avail_mclk_switch_time_us;
+-
+- adev->pm.pm_display_cfg.display_clk =
+- pp_display_cfg->disp_clk_khz/10;
+-
+- adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
+- pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
+-
+- adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
+- adev->pm.pm_display_cfg.line_time_in_us =
+- pp_display_cfg->line_time_in_us;
+-
+- adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
+- adev->pm.pm_display_cfg.crossfire_display_index = -1;
+- adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
+-
+- /* TODO: complete implementation of
+- * pp_display_configuration_change().
+- * Follow example of:
+- * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
+- * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
+- if (adev->powerplay.pp_funcs->display_configuration_change)
+- adev->powerplay.pp_funcs->display_configuration_change(
+- adev->powerplay.pp_handle,
+- &adev->pm.pm_display_cfg);
+-
+- /* TODO: replace by a separate call to 'apply display cfg'? */
+- amdgpu_pm_compute_clocks(adev);
+- }
+-
+- return true;
+-}
+-
+-static void get_default_clock_levels(
+- enum dm_pp_clock_type clk_type,
+- struct dm_pp_clock_levels *clks)
+-{
+- uint32_t disp_clks_in_khz[6] = {
+- 300000, 400000, 496560, 626090, 685720, 757900 };
+- uint32_t sclks_in_khz[6] = {
+- 300000, 360000, 423530, 514290, 626090, 720000 };
+- uint32_t mclks_in_khz[2] = { 333000, 800000 };
+-
+- switch (clk_type) {
+- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+- clks->num_levels = 6;
+- memmove(clks->clocks_in_khz, disp_clks_in_khz,
+- sizeof(disp_clks_in_khz));
+- break;
+- case DM_PP_CLOCK_TYPE_ENGINE_CLK:
+- clks->num_levels = 6;
+- memmove(clks->clocks_in_khz, sclks_in_khz,
+- sizeof(sclks_in_khz));
+- break;
+- case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+- clks->num_levels = 2;
+- memmove(clks->clocks_in_khz, mclks_in_khz,
+- sizeof(mclks_in_khz));
+- break;
+- default:
+- clks->num_levels = 0;
+- break;
+- }
+-}
+-
+-static enum amd_pp_clock_type dc_to_pp_clock_type(
+- enum dm_pp_clock_type dm_pp_clk_type)
+-{
+- enum amd_pp_clock_type amd_pp_clk_type = 0;
+-
+- switch (dm_pp_clk_type) {
+- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+- amd_pp_clk_type = amd_pp_disp_clock;
+- break;
+- case DM_PP_CLOCK_TYPE_ENGINE_CLK:
+- amd_pp_clk_type = amd_pp_sys_clock;
+- break;
+- case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+- amd_pp_clk_type = amd_pp_mem_clock;
+- break;
+- case DM_PP_CLOCK_TYPE_DCEFCLK:
+- amd_pp_clk_type = amd_pp_dcef_clock;
+- break;
+- case DM_PP_CLOCK_TYPE_DCFCLK:
+- amd_pp_clk_type = amd_pp_dcf_clock;
+- break;
+- case DM_PP_CLOCK_TYPE_PIXELCLK:
+- amd_pp_clk_type = amd_pp_pixel_clock;
+- break;
+- case DM_PP_CLOCK_TYPE_FCLK:
+- amd_pp_clk_type = amd_pp_f_clock;
+- break;
+- case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+- amd_pp_clk_type = amd_pp_dpp_clock;
+- break;
+- default:
+- DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
+- dm_pp_clk_type);
+- break;
+- }
+-
+- return amd_pp_clk_type;
+-}
+-
+-static void pp_to_dc_clock_levels(
+- const struct amd_pp_clocks *pp_clks,
+- struct dm_pp_clock_levels *dc_clks,
+- enum dm_pp_clock_type dc_clk_type)
+-{
+- uint32_t i;
+-
+- if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
+- DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
+- DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
+- pp_clks->count,
+- DM_PP_MAX_CLOCK_LEVELS);
+-
+- dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
+- } else
+- dc_clks->num_levels = pp_clks->count;
+-
+- DRM_INFO("DM_PPLIB: values for %s clock\n",
+- DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+-
+- for (i = 0; i < dc_clks->num_levels; i++) {
+- DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
+- /* translate 10kHz to kHz */
+- dc_clks->clocks_in_khz[i] = pp_clks->clock[i] * 10;
+- }
+-}
+-
+-static void pp_to_dc_clock_levels_with_voltage(
+- const struct pp_clock_levels_with_voltage *pp_clks,
+- struct dm_pp_clock_levels_with_voltage *clk_level_info,
+- enum dm_pp_clock_type dc_clk_type)
+-{
+- uint32_t i;
+-
+- if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
+- DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
+- DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
+- pp_clks->num_levels,
+- DM_PP_MAX_CLOCK_LEVELS);
+-
+- clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
+- } else
+- clk_level_info->num_levels = pp_clks->num_levels;
+-
+- DRM_INFO("DM_PPLIB: values for %s clock\n",
+- DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+-
+- for (i = 0; i < clk_level_info->num_levels; i++) {
+- DRM_INFO("DM_PPLIB:\t %d in 10kHz\n", pp_clks->data[i].clocks_in_khz);
+- /* translate 10kHz to kHz */
+- clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz * 10;
+- clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv;
+- }
+-}
+-
+-
+-bool dm_pp_get_clock_levels_by_type(
+- const struct dc_context *ctx,
+- enum dm_pp_clock_type clk_type,
+- struct dm_pp_clock_levels *dc_clks)
+-{
+- struct amdgpu_device *adev = ctx->driver_context;
+- void *pp_handle = adev->powerplay.pp_handle;
+- struct amd_pp_clocks pp_clks = { 0 };
+- struct amd_pp_simple_clock_info validation_clks = { 0 };
+- uint32_t i;
+-
+- if (adev->powerplay.pp_funcs->get_clock_by_type) {
+- if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
+- dc_to_pp_clock_type(clk_type), &pp_clks)) {
+- /* Error in pplib. Provide default values. */
+- get_default_clock_levels(clk_type, dc_clks);
+- return true;
+- }
+- }
+-
+- pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
+-
+- if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
+- if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
+- pp_handle, &validation_clks)) {
+- /* Error in pplib. Provide default values. */
+- DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
+- validation_clks.engine_max_clock = 72000;
+- validation_clks.memory_max_clock = 80000;
+- validation_clks.level = 0;
+- }
+- }
+-
+- DRM_INFO("DM_PPLIB: Validation clocks:\n");
+- DRM_INFO("DM_PPLIB: engine_max_clock: %d\n",
+- validation_clks.engine_max_clock);
+- DRM_INFO("DM_PPLIB: memory_max_clock: %d\n",
+- validation_clks.memory_max_clock);
+- DRM_INFO("DM_PPLIB: level : %d\n",
+- validation_clks.level);
+-
+- /* Translate 10 kHz to kHz. */
+- validation_clks.engine_max_clock *= 10;
+- validation_clks.memory_max_clock *= 10;
+-
+- /* Determine the highest non-boosted level from the Validation Clocks */
+- if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
+- for (i = 0; i < dc_clks->num_levels; i++) {
+- if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
+- /* This clock is higher the validation clock.
+- * Than means the previous one is the highest
+- * non-boosted one. */
+- DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
+- dc_clks->num_levels, i);
+- dc_clks->num_levels = i > 0 ? i : 1;
+- break;
+- }
+- }
+- } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
+- for (i = 0; i < dc_clks->num_levels; i++) {
+- if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
+- DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
+- dc_clks->num_levels, i);
+- dc_clks->num_levels = i > 0 ? i : 1;
+- break;
+- }
+- }
+- }
+-
+- return true;
+-}
+-
+-bool dm_pp_get_clock_levels_by_type_with_latency(
+- const struct dc_context *ctx,
+- enum dm_pp_clock_type clk_type,
+- struct dm_pp_clock_levels_with_latency *clk_level_info)
+-{
+- /* TODO: to be implemented */
+- return false;
+-}
+-
+-bool dm_pp_get_clock_levels_by_type_with_voltage(
+- const struct dc_context *ctx,
+- enum dm_pp_clock_type clk_type,
+- struct dm_pp_clock_levels_with_voltage *clk_level_info)
+-{
+- struct amdgpu_device *adev = ctx->driver_context;
+- void *pp_handle = adev->powerplay.pp_handle;
+- struct pp_clock_levels_with_voltage pp_clk_info = {0};
+- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+-
+- if (pp_funcs->get_clock_by_type_with_voltage(pp_handle,
+- dc_to_pp_clock_type(clk_type),
+- &pp_clk_info))
+- return false;
+-
+- pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type);
+-
+- return true;
+-}
+-
+-bool dm_pp_notify_wm_clock_changes(
+- const struct dc_context *ctx,
+- struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
+-{
+- /* TODO: to be implemented */
+- return false;
+-}
+-
+-bool dm_pp_apply_power_level_change_request(
+- const struct dc_context *ctx,
+- struct dm_pp_power_level_change_request *level_change_req)
+-{
+- /* TODO: to be implemented */
+- return false;
+-}
+-
+-bool dm_pp_apply_clock_for_voltage_request(
+- const struct dc_context *ctx,
+- struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
+-{
+- struct amdgpu_device *adev = ctx->driver_context;
+- struct pp_display_clock_request pp_clock_request = {0};
+- int ret = 0;
+-
+- pp_clock_request.clock_type = dc_to_pp_clock_type(clock_for_voltage_req->clk_type);
+- pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz;
+-
+- if (!pp_clock_request.clock_type)
+- return false;
+-
+- if (adev->powerplay.pp_funcs->display_clock_voltage_request)
+- ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
+- adev->powerplay.pp_handle,
+- &pp_clock_request);
+- if (ret)
+- return false;
+- return true;
+-}
+-
+-bool dm_pp_get_static_clocks(
+- const struct dc_context *ctx,
+- struct dm_pp_static_clock_info *static_clk_info)
+-{
+- struct amdgpu_device *adev = ctx->driver_context;
+- struct amd_pp_clock_info pp_clk_info = {0};
+- int ret = 0;
+-
+- if (adev->powerplay.pp_funcs->get_current_clocks)
+- ret = adev->powerplay.pp_funcs->get_current_clocks(
+- adev->powerplay.pp_handle,
+- &pp_clk_info);
+- if (ret)
+- return false;
+-
+- static_clk_info->max_clocks_state = pp_clk_info.max_clocks_state;
+- /* translate 10kHz to kHz */
+- static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
+- static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
+-
+- return true;
+-}
+-
+-void pp_rv_set_display_requirement(struct pp_smu *pp,
+- struct pp_smu_display_requirement_rv *req)
+-{
+- struct amdgpu_device *adev = pp->ctx->driver_context;
+- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+- int ret = 0;
+- if (hwmgr->hwmgr_func->set_deep_sleep_dcefclk)
+- ret = hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, req->hard_min_dcefclk_khz/10);
+- if (hwmgr->hwmgr_func->set_active_display_count)
+- ret = hwmgr->hwmgr_func->set_active_display_count(hwmgr, req->display_count);
+-
+- //store_cc6 is not yet implemented in SMU level
+-}
+-
+-void pp_rv_set_wm_ranges(struct pp_smu *pp,
+- struct pp_smu_wm_range_sets *ranges)
+-{
+- struct amdgpu_device *adev = pp->ctx->driver_context;
+- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+- struct pp_wm_sets_with_clock_ranges_soc15 ranges_soc15 = {0};
+- int i = 0;
+-
+- if (!hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges ||
+- !pp || !ranges)
+- return;
+-
+- //not entirely sure if thats a correct assignment
+- ranges_soc15.num_wm_sets_dmif = ranges->num_reader_wm_sets;
+- ranges_soc15.num_wm_sets_mcif = ranges->num_writer_wm_sets;
+-
+- for (i = 0; i < ranges_soc15.num_wm_sets_dmif; i++) {
+- if (ranges->reader_wm_sets[i].wm_inst > 3)
+- ranges_soc15.wm_sets_dmif[i].wm_set_id = DC_WM_SET_A;
+- else
+- ranges_soc15.wm_sets_dmif[i].wm_set_id =
+- ranges->reader_wm_sets[i].wm_inst;
+- ranges_soc15.wm_sets_dmif[i].wm_max_dcefclk_in_khz =
+- ranges->reader_wm_sets[i].max_drain_clk_khz;
+- ranges_soc15.wm_sets_dmif[i].wm_min_dcefclk_in_khz =
+- ranges->reader_wm_sets[i].min_drain_clk_khz;
+- ranges_soc15.wm_sets_dmif[i].wm_max_memclk_in_khz =
+- ranges->reader_wm_sets[i].max_fill_clk_khz;
+- ranges_soc15.wm_sets_dmif[i].wm_min_memclk_in_khz =
+- ranges->reader_wm_sets[i].min_fill_clk_khz;
+- }
+-
+- for (i = 0; i < ranges_soc15.num_wm_sets_mcif; i++) {
+- if (ranges->writer_wm_sets[i].wm_inst > 3)
+- ranges_soc15.wm_sets_dmif[i].wm_set_id = DC_WM_SET_A;
+- else
+- ranges_soc15.wm_sets_mcif[i].wm_set_id =
+- ranges->writer_wm_sets[i].wm_inst;
+- ranges_soc15.wm_sets_mcif[i].wm_max_socclk_in_khz =
+- ranges->writer_wm_sets[i].max_fill_clk_khz;
+- ranges_soc15.wm_sets_mcif[i].wm_min_socclk_in_khz =
+- ranges->writer_wm_sets[i].min_fill_clk_khz;
+- ranges_soc15.wm_sets_mcif[i].wm_max_memclk_in_khz =
+- ranges->writer_wm_sets[i].max_fill_clk_khz;
+- ranges_soc15.wm_sets_mcif[i].wm_min_memclk_in_khz =
+- ranges->writer_wm_sets[i].min_fill_clk_khz;
+- }
+-
+- hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges(hwmgr, &ranges_soc15);
+-
+-}
+-
+-void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
+-{
+- struct amdgpu_device *adev = pp->ctx->driver_context;
+- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+-
+- if (hwmgr->hwmgr_func->smus_notify_pwe)
+- hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
+-}
+-
+-void dm_pp_get_funcs_rv(
+- struct dc_context *ctx,
+- struct pp_smu_funcs_rv *funcs)
+-{
+- funcs->pp_smu.ctx = ctx;
+- funcs->set_display_requirement = pp_rv_set_display_requirement;
+- funcs->set_wm_ranges = pp_rv_set_wm_ranges;
+- funcs->set_pme_wa_enable = pp_rv_set_pme_wa_enable;
+-}
+-
+-
+-/**** end of power component interfaces ****/
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4791-drm-amd-display-fix-dcn1-watermark-range-reporting.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4791-drm-amd-display-fix-dcn1-watermark-range-reporting.patch
new file mode 100644
index 00000000..863498c5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4791-drm-amd-display-fix-dcn1-watermark-range-reporting.patch
@@ -0,0 +1,165 @@
+From 56ad2ac996f71c52abb6d2db8bec77aafc445045 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Wed, 6 Jun 2018 13:19:39 -0400
+Subject: [PATCH 4791/5725] drm/amd/display: fix dcn1 watermark range reporting
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 102 ++++-------------------
+ 1 file changed, 18 insertions(+), 84 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+index ac4451a..8dc0773 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+@@ -1335,21 +1335,14 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
+ {
+ struct pp_smu_funcs_rv *pp = dc->res_pool->pp_smu;
+ struct pp_smu_wm_range_sets ranges = {0};
+- int max_fclk_khz, nom_fclk_khz, mid_fclk_khz, min_fclk_khz;
+- int max_dcfclk_khz, min_dcfclk_khz;
+- int socclk_khz;
++ int min_fclk_khz, min_dcfclk_khz, socclk_khz;
+ const int overdrive = 5000000; /* 5 GHz to cover Overdrive */
+- unsigned factor = (ddr4_dram_factor_single_Channel * dc->dcn_soc->number_of_channels);
+
+ if (!pp->set_wm_ranges)
+ return;
+
+ kernel_fpu_begin();
+- max_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 * 1000000 / factor;
+- nom_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 * 1000000 / factor;
+- mid_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 * 1000000 / factor;
+ min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
+- max_dcfclk_khz = dc->dcn_soc->dcfclkv_max0p9 * 1000;
+ min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
+ socclk_khz = dc->dcn_soc->socclk * 1000;
+ kernel_fpu_end();
+@@ -1357,7 +1350,7 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
+ /* Now notify PPLib/SMU about which Watermarks sets they should select
+ * depending on DPM state they are in. And update BW MGR GFX Engine and
+ * Memory clock member variables for Watermarks calculations for each
+- * Watermark Set
++ * Watermark Set. Only one watermark set for dcn1 due to hw bug DEGVIDCN10-254.
+ */
+ /* SOCCLK does not affect anytihng but writeback for DCN so for now we dont
+ * care what the value is, hence min to overdrive level
+@@ -1366,96 +1359,37 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
+ ranges.num_writer_wm_sets = WM_SET_COUNT;
+ ranges.reader_wm_sets[0].wm_inst = WM_A;
+ ranges.reader_wm_sets[0].min_drain_clk_khz = min_dcfclk_khz;
+- ranges.reader_wm_sets[0].max_drain_clk_khz = max_dcfclk_khz;
++ ranges.reader_wm_sets[0].max_drain_clk_khz = overdrive;
+ ranges.reader_wm_sets[0].min_fill_clk_khz = min_fclk_khz;
+- ranges.reader_wm_sets[0].max_fill_clk_khz = min_fclk_khz;
++ ranges.reader_wm_sets[0].max_fill_clk_khz = overdrive;
+ ranges.writer_wm_sets[0].wm_inst = WM_A;
+ ranges.writer_wm_sets[0].min_fill_clk_khz = socclk_khz;
+ ranges.writer_wm_sets[0].max_fill_clk_khz = overdrive;
+ ranges.writer_wm_sets[0].min_drain_clk_khz = min_fclk_khz;
+- ranges.writer_wm_sets[0].max_drain_clk_khz = min_fclk_khz;
+-
+- ranges.reader_wm_sets[1].wm_inst = WM_B;
+- ranges.reader_wm_sets[1].min_drain_clk_khz = min_fclk_khz;
+- ranges.reader_wm_sets[1].max_drain_clk_khz = max_dcfclk_khz;
+- ranges.reader_wm_sets[1].min_fill_clk_khz = mid_fclk_khz;
+- ranges.reader_wm_sets[1].max_fill_clk_khz = mid_fclk_khz;
+- ranges.writer_wm_sets[1].wm_inst = WM_B;
+- ranges.writer_wm_sets[1].min_fill_clk_khz = socclk_khz;
+- ranges.writer_wm_sets[1].max_fill_clk_khz = overdrive;
+- ranges.writer_wm_sets[1].min_drain_clk_khz = mid_fclk_khz;
+- ranges.writer_wm_sets[1].max_drain_clk_khz = mid_fclk_khz;
+-
+-
+- ranges.reader_wm_sets[2].wm_inst = WM_C;
+- ranges.reader_wm_sets[2].min_drain_clk_khz = min_fclk_khz;
+- ranges.reader_wm_sets[2].max_drain_clk_khz = max_dcfclk_khz;
+- ranges.reader_wm_sets[2].min_fill_clk_khz = nom_fclk_khz;
+- ranges.reader_wm_sets[2].max_fill_clk_khz = nom_fclk_khz;
+- ranges.writer_wm_sets[2].wm_inst = WM_C;
+- ranges.writer_wm_sets[2].min_fill_clk_khz = socclk_khz;
+- ranges.writer_wm_sets[2].max_fill_clk_khz = overdrive;
+- ranges.writer_wm_sets[2].min_drain_clk_khz = nom_fclk_khz;
+- ranges.writer_wm_sets[2].max_drain_clk_khz = nom_fclk_khz;
+-
+- ranges.reader_wm_sets[3].wm_inst = WM_D;
+- ranges.reader_wm_sets[3].min_drain_clk_khz = min_fclk_khz;
+- ranges.reader_wm_sets[3].max_drain_clk_khz = max_dcfclk_khz;
+- ranges.reader_wm_sets[3].min_fill_clk_khz = max_fclk_khz;
+- ranges.reader_wm_sets[3].max_fill_clk_khz = max_fclk_khz;
+- ranges.writer_wm_sets[3].wm_inst = WM_D;
+- ranges.writer_wm_sets[3].min_fill_clk_khz = socclk_khz;
+- ranges.writer_wm_sets[3].max_fill_clk_khz = overdrive;
+- ranges.writer_wm_sets[3].min_drain_clk_khz = max_fclk_khz;
+- ranges.writer_wm_sets[3].max_drain_clk_khz = max_fclk_khz;
++ ranges.writer_wm_sets[0].max_drain_clk_khz = overdrive;
+
+ if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
+ ranges.reader_wm_sets[0].wm_inst = WM_A;
+ ranges.reader_wm_sets[0].min_drain_clk_khz = 300000;
+- ranges.reader_wm_sets[0].max_drain_clk_khz = 654000;
++ ranges.reader_wm_sets[0].max_drain_clk_khz = 5000000;
+ ranges.reader_wm_sets[0].min_fill_clk_khz = 800000;
+- ranges.reader_wm_sets[0].max_fill_clk_khz = 800000;
++ ranges.reader_wm_sets[0].max_fill_clk_khz = 5000000;
+ ranges.writer_wm_sets[0].wm_inst = WM_A;
+ ranges.writer_wm_sets[0].min_fill_clk_khz = 200000;
+- ranges.writer_wm_sets[0].max_fill_clk_khz = 757000;
++ ranges.writer_wm_sets[0].max_fill_clk_khz = 5000000;
+ ranges.writer_wm_sets[0].min_drain_clk_khz = 800000;
+- ranges.writer_wm_sets[0].max_drain_clk_khz = 800000;
+-
+- ranges.reader_wm_sets[1].wm_inst = WM_B;
+- ranges.reader_wm_sets[1].min_drain_clk_khz = 300000;
+- ranges.reader_wm_sets[1].max_drain_clk_khz = 654000;
+- ranges.reader_wm_sets[1].min_fill_clk_khz = 933000;
+- ranges.reader_wm_sets[1].max_fill_clk_khz = 933000;
+- ranges.writer_wm_sets[1].wm_inst = WM_B;
+- ranges.writer_wm_sets[1].min_fill_clk_khz = 200000;
+- ranges.writer_wm_sets[1].max_fill_clk_khz = 757000;
+- ranges.writer_wm_sets[1].min_drain_clk_khz = 933000;
+- ranges.writer_wm_sets[1].max_drain_clk_khz = 933000;
+-
+-
+- ranges.reader_wm_sets[2].wm_inst = WM_C;
+- ranges.reader_wm_sets[2].min_drain_clk_khz = 300000;
+- ranges.reader_wm_sets[2].max_drain_clk_khz = 654000;
+- ranges.reader_wm_sets[2].min_fill_clk_khz = 1067000;
+- ranges.reader_wm_sets[2].max_fill_clk_khz = 1067000;
+- ranges.writer_wm_sets[2].wm_inst = WM_C;
+- ranges.writer_wm_sets[2].min_fill_clk_khz = 200000;
+- ranges.writer_wm_sets[2].max_fill_clk_khz = 757000;
+- ranges.writer_wm_sets[2].min_drain_clk_khz = 1067000;
+- ranges.writer_wm_sets[2].max_drain_clk_khz = 1067000;
+-
+- ranges.reader_wm_sets[3].wm_inst = WM_D;
+- ranges.reader_wm_sets[3].min_drain_clk_khz = 300000;
+- ranges.reader_wm_sets[3].max_drain_clk_khz = 654000;
+- ranges.reader_wm_sets[3].min_fill_clk_khz = 1200000;
+- ranges.reader_wm_sets[3].max_fill_clk_khz = 1200000;
+- ranges.writer_wm_sets[3].wm_inst = WM_D;
+- ranges.writer_wm_sets[3].min_fill_clk_khz = 200000;
+- ranges.writer_wm_sets[3].max_fill_clk_khz = 757000;
+- ranges.writer_wm_sets[3].min_drain_clk_khz = 1200000;
+- ranges.writer_wm_sets[3].max_drain_clk_khz = 1200000;
++ ranges.writer_wm_sets[0].max_drain_clk_khz = 5000000;
+ }
+
++ ranges.reader_wm_sets[1] = ranges.writer_wm_sets[0];
++ ranges.reader_wm_sets[1].wm_inst = WM_B;
++
++ ranges.reader_wm_sets[2] = ranges.writer_wm_sets[0];
++ ranges.reader_wm_sets[2].wm_inst = WM_C;
++
++ ranges.reader_wm_sets[3] = ranges.writer_wm_sets[0];
++ ranges.reader_wm_sets[3].wm_inst = WM_D;
++
+ /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
+ pp->set_wm_ranges(&pp->pp_smu, &ranges);
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4792-drm-amd-display-remove-dcn1-watermark-sets-b-c-and-d.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4792-drm-amd-display-remove-dcn1-watermark-sets-b-c-and-d.patch
new file mode 100644
index 00000000..a4df213b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4792-drm-amd-display-remove-dcn1-watermark-sets-b-c-and-d.patch
@@ -0,0 +1,66 @@
+From 8745ebc0b599afc57a6c9e00ff0beca363de57de Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Tue, 5 Jun 2018 07:19:08 -0400
+Subject: [PATCH 4792/5725] drm/amd/display: remove dcn1 watermark sets b, c
+ and d
+
+Currently dcn1 will not switch between watermark sets so we can
+save time by not calculating 3 extra sets.
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 21 ++++++++++++++++++++-
+ 1 file changed, 20 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+index 8dc0773..12261fb 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+@@ -505,6 +505,7 @@ static void split_stream_across_pipes(
+ resource_build_scaling_params(secondary_pipe);
+ }
+
++#if 0
+ static void calc_wm_sets_and_perf_params(
+ struct dc_state *context,
+ struct dcn_bw_internal_vars *v)
+@@ -586,6 +587,7 @@ static void calc_wm_sets_and_perf_params(
+ if (v->voltage_level >= 3)
+ context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
+ }
++#endif
+
+ static bool dcn_bw_apply_registry_override(struct dc *dc)
+ {
+@@ -980,7 +982,24 @@ bool dcn_validate_bandwidth(
+ bw_consumed = v->fabric_and_dram_bandwidth;
+
+ display_pipe_configuration(v);
+- calc_wm_sets_and_perf_params(context, v);
++ /*calc_wm_sets_and_perf_params(context, v);*/
++ /* Only 1 set is used by dcn since no noticeable
++ * performance improvement was measured and due to hw bug DEGVIDCN10-254
++ */
++ dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
++
++ context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
++ v->stutter_exit_watermark * 1000;
++ context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
++ v->stutter_enter_plus_exit_watermark * 1000;
++ context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
++ v->dram_clock_change_watermark * 1000;
++ context->bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
++ context->bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
++ context->bw.dcn.watermarks.b = context->bw.dcn.watermarks.a;
++ context->bw.dcn.watermarks.c = context->bw.dcn.watermarks.a;
++ context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
++
+ context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 /
+ (ddr4_dram_factor_single_Channel * v->number_of_channels));
+ if (bw_consumed == v->fabric_and_dram_bandwidth_vmin0p65) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4793-drm-amd-display-separate-out-wm-change-request-dcn-w.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4793-drm-amd-display-separate-out-wm-change-request-dcn-w.patch
new file mode 100644
index 00000000..1371f92d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4793-drm-amd-display-separate-out-wm-change-request-dcn-w.patch
@@ -0,0 +1,108 @@
+From a8c7550543f613fdbc8411ef9eb37d1fd2ea71f7 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Tue, 5 Jun 2018 13:14:13 -0400
+Subject: [PATCH 4793/5725] drm/amd/display: separate out wm change request dcn
+ workaround
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c | 11 ++++++-----
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h | 2 ++
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 3 +++
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 1 +
+ drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 1 +
+ 5 files changed, 13 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+index 63b75ac..623db09 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+@@ -190,6 +190,12 @@ static uint32_t convert_and_clamp(
+ }
+
+
++void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
++{
++ REG_UPDATE_SEQ(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
++ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0, 1);
++}
++
+ void hubbub1_program_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+@@ -203,8 +209,6 @@ void hubbub1_program_watermarks(
+ */
+ uint32_t prog_wm_value;
+
+- REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+- DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0);
+
+ /* Repeat for water mark set A, B, C and D. */
+ /* clock state A */
+@@ -459,9 +463,6 @@ void hubbub1_program_watermarks(
+ watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
+ }
+
+- REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+- DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
+-
+ REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
+ DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
+ REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+index 0ca39cb..d6e596e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+@@ -195,6 +195,8 @@ void hubbub1_update_dchub(
+ bool hubbub1_verify_allow_pstate_change_high(
+ struct hubbub *hubbub);
+
++void hubbub1_wm_change_req_wa(struct hubbub *hubbub);
++
+ void hubbub1_program_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index eaa8b0a..d78802e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2320,6 +2320,9 @@ static void dcn10_apply_ctx_for_surface(
+ hubbub1_program_watermarks(dc->res_pool->hubbub,
+ &context->bw.dcn.watermarks, ref_clk_mhz, true);
+
++ if (dc->hwseq->wa.DEGVIDCN10_254)
++ hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
++
+ if (dc->debug.sanity_checks) {
+ /* pstate stuck check after watermark update */
+ dcn10_verify_allow_pstate_change_high(dc);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index 4081160..0a313dc 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -743,6 +743,7 @@ static struct dce_hwseq *dcn10_hwseq_create(
+ hws->masks = &hwseq_mask;
+ hws->wa.DEGVIDCN10_253 = true;
+ hws->wa.false_optc_underflow = true;
++ hws->wa.DEGVIDCN10_254 = true;
+ }
+ return hws;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+index a71770e..1c94dae 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+@@ -44,6 +44,7 @@ struct dce_hwseq_wa {
+ bool blnd_crtc_trigger;
+ bool DEGVIDCN10_253;
+ bool false_optc_underflow;
++ bool DEGVIDCN10_254;
+ };
+
+ struct hwseq_wa_state {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4794-drm-amd-display-move-dcn-watermark-programming-to-se.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4794-drm-amd-display-move-dcn-watermark-programming-to-se.patch
new file mode 100644
index 00000000..8c1f1ce0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4794-drm-amd-display-move-dcn-watermark-programming-to-se.patch
@@ -0,0 +1,170 @@
+From 4b5cdf02087ab62f1ee31ff6758920ff8d7ecc92 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Thu, 24 May 2018 14:39:01 -0400
+Subject: [PATCH 4794/5725] drm/amd/display: move dcn watermark programming to
+ set_bandwidth
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 107 ++++-----------------
+ drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 2 +-
+ 2 files changed, 19 insertions(+), 90 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index d78802e..da82c6a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2234,8 +2234,6 @@ static void dcn10_apply_ctx_for_surface(
+ int i;
+ struct timing_generator *tg;
+ bool removed_pipe[4] = { false };
+- unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000;
+- bool program_water_mark = false;
+ struct pipe_ctx *top_pipe_to_program =
+ find_top_pipe_for_stream(dc, context, stream);
+ DC_LOGGER_INIT(dc->ctx->logger);
+@@ -2296,107 +2294,38 @@ static void dcn10_apply_ctx_for_surface(
+ if (num_planes == 0)
+ false_optc_underflow_wa(dc, stream, tg);
+
+- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+- struct pipe_ctx *old_pipe_ctx =
+- &dc->current_state->res_ctx.pipe_ctx[i];
+- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+-
+- if (pipe_ctx->stream == stream &&
+- pipe_ctx->plane_state &&
+- pipe_ctx->plane_state->update_flags.bits.full_update)
+- program_water_mark = true;
+-
++ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (removed_pipe[i])
+- dcn10_disable_plane(dc, old_pipe_ctx);
+- }
+-
+- if (program_water_mark) {
+- if (dc->debug.sanity_checks) {
+- /* pstate stuck check after watermark update */
+- dcn10_verify_allow_pstate_change_high(dc);
+- }
+-
+- /* watermark is for all pipes */
+- hubbub1_program_watermarks(dc->res_pool->hubbub,
+- &context->bw.dcn.watermarks, ref_clk_mhz, true);
+-
+- if (dc->hwseq->wa.DEGVIDCN10_254)
+- hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
++ dcn10_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+
+- if (dc->debug.sanity_checks) {
+- /* pstate stuck check after watermark update */
+- dcn10_verify_allow_pstate_change_high(dc);
+- }
+- }
+-/* DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
+- "\n============== Watermark parameters ==============\n"
+- "a.urgent_ns: %d \n"
+- "a.cstate_enter_plus_exit: %d \n"
+- "a.cstate_exit: %d \n"
+- "a.pstate_change: %d \n"
+- "a.pte_meta_urgent: %d \n"
+- "b.urgent_ns: %d \n"
+- "b.cstate_enter_plus_exit: %d \n"
+- "b.cstate_exit: %d \n"
+- "b.pstate_change: %d \n"
+- "b.pte_meta_urgent: %d \n",
+- context->bw.dcn.watermarks.a.urgent_ns,
+- context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns,
+- context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns,
+- context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns,
+- context->bw.dcn.watermarks.a.pte_meta_urgent_ns,
+- context->bw.dcn.watermarks.b.urgent_ns,
+- context->bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns,
+- context->bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns,
+- context->bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns,
+- context->bw.dcn.watermarks.b.pte_meta_urgent_ns
+- );
+- DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
+- "\nc.urgent_ns: %d \n"
+- "c.cstate_enter_plus_exit: %d \n"
+- "c.cstate_exit: %d \n"
+- "c.pstate_change: %d \n"
+- "c.pte_meta_urgent: %d \n"
+- "d.urgent_ns: %d \n"
+- "d.cstate_enter_plus_exit: %d \n"
+- "d.cstate_exit: %d \n"
+- "d.pstate_change: %d \n"
+- "d.pte_meta_urgent: %d \n"
+- "========================================================\n",
+- context->bw.dcn.watermarks.c.urgent_ns,
+- context->bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns,
+- context->bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns,
+- context->bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns,
+- context->bw.dcn.watermarks.c.pte_meta_urgent_ns,
+- context->bw.dcn.watermarks.d.urgent_ns,
+- context->bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns,
+- context->bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns,
+- context->bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns,
+- context->bw.dcn.watermarks.d.pte_meta_urgent_ns
+- );
+-*/
++ if (dc->hwseq->wa.DEGVIDCN10_254)
++ hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
+ }
+
+ static void dcn10_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+- bool decrease_allowed)
++ bool safe_to_lower)
+ {
+ if (dc->debug.sanity_checks)
+ dcn10_verify_allow_pstate_change_high(dc);
+
+- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+- return;
++ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
++ if (context->stream_count == 0)
++ context->bw.dcn.clk.phyclk_khz = 0;
+
+- if (context->stream_count == 0)
+- context->bw.dcn.clk.phyclk_khz = 0;
++ dc->res_pool->dccg->funcs->update_clocks(
++ dc->res_pool->dccg,
++ &context->bw.dcn.clk,
++ safe_to_lower);
+
+- dc->res_pool->dccg->funcs->update_clocks(
+- dc->res_pool->dccg,
+- &context->bw.dcn.clk,
+- decrease_allowed);
++ dcn10_pplib_apply_display_requirements(dc, context);
++ }
+
+- dcn10_pplib_apply_display_requirements(dc, context);
++ hubbub1_program_watermarks(dc->res_pool->hubbub,
++ &context->bw.dcn.watermarks,
++ dc->res_pool->ref_clock_inKhz / 1000,
++ true);
+
+ if (dc->debug.sanity_checks)
+ dcn10_verify_allow_pstate_change_high(dc);
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+index 1c94dae..2506601 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+@@ -172,7 +172,7 @@ struct hw_sequencer_funcs {
+ void (*set_bandwidth)(
+ struct dc *dc,
+ struct dc_state *context,
+- bool decrease_allowed);
++ bool safe_to_lower);
+
+ void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
+ int vmin, int vmax);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4795-drm-amd-display-remove-soc_bounding_box.c.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4795-drm-amd-display-remove-soc_bounding_box.c.patch
new file mode 100644
index 00000000..affc7634
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4795-drm-amd-display-remove-soc_bounding_box.c.patch
@@ -0,0 +1,193 @@
+From 90850500a1c1154a957a242443a74f3797ef0f07 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Tue, 5 Jun 2018 07:33:10 -0400
+Subject: [PATCH 4795/5725] drm/amd/display: remove soc_bounding_box.c
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dml/Makefile | 3 +-
+ .../gpu/drm/amd/display/dc/dml/display_mode_lib.h | 1 -
+ .../drm/amd/display/dc/dml/display_mode_structs.h | 3 +-
+ .../gpu/drm/amd/display/dc/dml/soc_bounding_box.c | 79 ----------------------
+ .../gpu/drm/amd/display/dc/dml/soc_bounding_box.h | 35 ----------
+ 5 files changed, 2 insertions(+), 119 deletions(-)
+ delete mode 100644 drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
+ delete mode 100644 drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
+index a29fa6d..ec2475d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
+@@ -15,11 +15,10 @@ CFLAGS_display_mode_lib.o := $(dml_ccflags)
+ CFLAGS_display_pipe_clocks.o := $(dml_ccflags)
+ CFLAGS_dml1_display_rq_dlg_calc.o := $(dml_ccflags)
+ CFLAGS_display_rq_dlg_helpers.o := $(dml_ccflags)
+-CFLAGS_soc_bounding_box.o := $(dml_ccflags)
+ CFLAGS_dml_common_defs.o := $(dml_ccflags)
+
+ DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \
+- soc_bounding_box.o dml_common_defs.o
++ dml_common_defs.o
+
+ AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML))
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+index 3c2abcb..6352062 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+@@ -27,7 +27,6 @@
+
+
+ #include "dml_common_defs.h"
+-#include "soc_bounding_box.h"
+ #include "dml1_display_rq_dlg_calc.h"
+
+ enum dml_project {
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+index 7fa0375..6943801 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+@@ -64,10 +64,9 @@ struct _vcs_dpi_voltage_scaling_st {
+ double dscclk_mhz;
+ double dcfclk_mhz;
+ double socclk_mhz;
+- double dram_speed_mhz;
++ double dram_speed_mts;
+ double fabricclk_mhz;
+ double dispclk_mhz;
+- double dram_bw_per_chan_gbps;
+ double phyclk_mhz;
+ double dppclk_mhz;
+ };
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c b/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
+deleted file mode 100644
+index 324239c..0000000
+--- a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
++++ /dev/null
+@@ -1,79 +0,0 @@
+-/*
+- * Copyright 2017 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- * Authors: AMD
+- *
+- */
+-#include "soc_bounding_box.h"
+-#include "display_mode_lib.h"
+-#include "dc_features.h"
+-
+-#include "dml_inline_defs.h"
+-
+-/*
+- * NOTE:
+- * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+- *
+- * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+- * ways. Unless there is something clearly wrong with it the code should
+- * remain as-is as it provides us with a guarantee from HW that it is correct.
+- */
+-
+-void dml_socbb_set_latencies(soc_bounding_box_st *to_box, soc_bounding_box_st *from_box)
+-{
+- to_box->dram_clock_change_latency_us = from_box->dram_clock_change_latency_us;
+- to_box->sr_exit_time_us = from_box->sr_exit_time_us;
+- to_box->sr_enter_plus_exit_time_us = from_box->sr_enter_plus_exit_time_us;
+- to_box->urgent_latency_us = from_box->urgent_latency_us;
+- to_box->writeback_latency_us = from_box->writeback_latency_us;
+-}
+-
+-voltage_scaling_st dml_socbb_voltage_scaling(
+- const soc_bounding_box_st *soc,
+- enum voltage_state voltage)
+-{
+- const voltage_scaling_st *voltage_state;
+- const voltage_scaling_st * const voltage_end = soc->clock_limits + DC__VOLTAGE_STATES;
+-
+- for (voltage_state = soc->clock_limits;
+- voltage_state < voltage_end && voltage_state->state != voltage;
+- voltage_state++) {
+- }
+-
+- if (voltage_state < voltage_end)
+- return *voltage_state;
+- return soc->clock_limits[DC__VOLTAGE_STATES - 1];
+-}
+-
+-double dml_socbb_return_bw_mhz(soc_bounding_box_st *box, enum voltage_state voltage)
+-{
+- double return_bw;
+-
+- voltage_scaling_st state = dml_socbb_voltage_scaling(box, voltage);
+-
+- return_bw = dml_min((double) box->return_bus_width_bytes * state.dcfclk_mhz,
+- state.dram_bw_per_chan_gbps * 1000.0 * (double) box->num_chans
+- * box->ideal_dram_bw_after_urgent_percent / 100.0);
+-
+- return_bw = dml_min((double) box->return_bus_width_bytes * state.fabricclk_mhz, return_bw);
+-
+- return return_bw;
+-}
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h b/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h
+deleted file mode 100644
+index 7a65206..0000000
+--- a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h
++++ /dev/null
+@@ -1,35 +0,0 @@
+-/*
+- * Copyright 2017 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- * Authors: AMD
+- *
+- */
+-
+-#ifndef __SOC_BOUNDING_BOX_H__
+-#define __SOC_BOUNDING_BOX_H__
+-
+-#include "dml_common_defs.h"
+-
+-void dml_socbb_set_latencies(soc_bounding_box_st *to_box, soc_bounding_box_st *from_box);
+-voltage_scaling_st dml_socbb_voltage_scaling(const soc_bounding_box_st *box, enum voltage_state voltage);
+-double dml_socbb_return_bw_mhz(soc_bounding_box_st *box, enum voltage_state voltage);
+-
+-#endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4796-drm-amd-display-Check-scaling-ration-not-viewports-p.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4796-drm-amd-display-Check-scaling-ration-not-viewports-p.patch
new file mode 100644
index 00000000..a59b16a5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4796-drm-amd-display-Check-scaling-ration-not-viewports-p.patch
@@ -0,0 +1,40 @@
+From 28adce9f6ed1135f7eca199ff9ed4557de48bd93 Mon Sep 17 00:00:00 2001
+From: Yongqiang Sun <yongqiang.sun@amd.com>
+Date: Fri, 8 Jun 2018 13:07:53 -0500
+Subject: [PATCH 4796/5725] drm/amd/display: Check scaling ration not viewports
+ params.
+
+In case of roation, width and height in viewport is difference
+between viewport and h_active and v_active, while this is not scaling.
+The right way is check ratios in scaling data,
+to determine it is a scaling case or not.
+
+Signed-off-by: Yongqiang Sun <yongqiang.sun@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+index c69fa4b..742fd49 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+@@ -145,10 +145,10 @@ static bool dpp_get_optimal_number_of_taps(
+ pixel_width = scl_data->viewport.width;
+
+ /* Some ASICs does not support FP16 scaling, so we reject modes require this*/
+- if (scl_data->viewport.width != scl_data->h_active &&
+- scl_data->viewport.height != scl_data->v_active &&
++ if (scl_data->format == PIXEL_FORMAT_FP16 &&
+ dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
+- scl_data->format == PIXEL_FORMAT_FP16)
++ scl_data->ratios.horz.value != dc_fixpt_one.value &&
++ scl_data->ratios.vert.value != dc_fixpt_one.value)
+ return false;
+
+ if (scl_data->viewport.width > scl_data->h_active &&
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4797-drm-amd-display-dal-3.1.52.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4797-drm-amd-display-dal-3.1.52.patch
new file mode 100644
index 00000000..252455df
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4797-drm-amd-display-dal-3.1.52.patch
@@ -0,0 +1,28 @@
+From 657fdbd7a83d2c990ed8e11e05ea85f460e5a740 Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Tue, 5 Jun 2018 09:14:36 -0400
+Subject: [PATCH 4797/5725] drm/amd/display: dal 3.1.52
+
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 997dbf9..79edbb1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -38,7 +38,7 @@
+ #include "inc/compressor.h"
+ #include "dml/display_mode_lib.h"
+
+-#define DC_VER "3.1.51"
++#define DC_VER "3.1.52"
+
+ #define MAX_SURFACES 3
+ #define MAX_STREAMS 6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4798-drm-amd-display-add-valid-regoffset-and-NULL-pointer.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4798-drm-amd-display-add-valid-regoffset-and-NULL-pointer.patch
new file mode 100644
index 00000000..be7a95cd
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4798-drm-amd-display-add-valid-regoffset-and-NULL-pointer.patch
@@ -0,0 +1,191 @@
+From 69887e2ebbbe763f81de64ac68a71d930315d477 Mon Sep 17 00:00:00 2001
+From: Charlene Liu <charlene.liu@amd.com>
+Date: Sat, 9 Jun 2018 19:33:14 -0400
+Subject: [PATCH 4798/5725] drm/amd/display: add valid regoffset and NULL
+ pointer check
+
+Signed-off-by: Charlene Liu <charlene.liu@amd.com>
+Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 8 ++++---
+ drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 9 ++++----
+ .../amd/display/dc/dce110/dce110_hw_sequencer.c | 7 +++---
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c | 5 +++++
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 26 ++++++++++++++++------
+ 5 files changed, 38 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 27ee9bf..e88dc58 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -33,6 +33,7 @@
+ #include "dc_link_dp.h"
+ #include "dc_link_ddc.h"
+ #include "link_hwss.h"
++#include "opp.h"
+
+ #include "link_encoder.h"
+ #include "hw_sequencer.h"
+@@ -2416,9 +2417,10 @@ void core_link_enable_stream(
+ core_dc->hwss.enable_audio_stream(pipe_ctx);
+
+ /* turn off otg test pattern if enable */
+- pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+- CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+- COLOR_DEPTH_UNDEFINED);
++ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
++ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
++ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
++ COLOR_DEPTH_UNDEFINED);
+
+ core_dc->hwss.enable_stream(pipe_ctx);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index d14b543..dcac527 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -3,6 +3,7 @@
+ #include "dc.h"
+ #include "dc_link_dp.h"
+ #include "dm_helpers.h"
++#include "opp.h"
+
+ #include "inc/core_types.h"
+ #include "link_hwss.h"
+@@ -2512,8 +2513,8 @@ static void set_crtc_test_pattern(struct dc_link *link,
+ pipe_ctx->stream->bit_depth_params = params;
+ pipe_ctx->stream_res.opp->funcs->
+ opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
+-
+- pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
++ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
++ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ controller_test_pattern, color_depth);
+ }
+ break;
+@@ -2525,8 +2526,8 @@ static void set_crtc_test_pattern(struct dc_link *link,
+ pipe_ctx->stream->bit_depth_params = params;
+ pipe_ctx->stream_res.opp->funcs->
+ opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
+-
+- pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
++ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
++ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ color_depth);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 6c2b4cc..4059a4c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -1449,7 +1449,7 @@ static void power_down_controllers(struct dc *dc)
+ {
+ int i;
+
+- for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
+ dc->res_pool->timing_generators[i]->funcs->disable_crtc(
+ dc->res_pool->timing_generators[i]);
+ }
+@@ -1489,12 +1489,13 @@ static void disable_vga_and_power_gate_all_controllers(
+ struct timing_generator *tg;
+ struct dc_context *ctx = dc->ctx;
+
+- for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
+ tg = dc->res_pool->timing_generators[i];
+
+ if (tg->funcs->disable_vga)
+ tg->funcs->disable_vga(tg);
+-
++ }
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ /* Enable CLOCK gating for each pipe BEFORE controller
+ * powergating. */
+ enable_display_pipe_clock_gating(ctx,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+index 623db09..1ea91e1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+@@ -483,6 +483,11 @@ void hubbub1_update_dchub(
+ struct hubbub *hubbub,
+ struct dchub_init_data *dh_data)
+ {
++ if (REG(DCHUBBUB_SDPIF_FB_TOP) == 0) {
++ ASSERT(false);
++ /*should not come here*/
++ return;
++ }
+ /* TODO: port code from dal2 */
+ switch (dh_data->fb_mode) {
+ case FRAME_BUFFER_MODE_ZFB_ONLY:
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index da82c6a..12cb828 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -415,6 +415,8 @@ static void dpp_pg_control(
+
+ if (hws->ctx->dc->debug.disable_dpp_power_gate)
+ return;
++ if (REG(DOMAIN1_PG_CONFIG) == 0)
++ return;
+
+ switch (dpp_inst) {
+ case 0: /* DPP0 */
+@@ -465,6 +467,8 @@ static void hubp_pg_control(
+
+ if (hws->ctx->dc->debug.disable_hubp_power_gate)
+ return;
++ if (REG(DOMAIN0_PG_CONFIG) == 0)
++ return;
+
+ switch (hubp_inst) {
+ case 0: /* DCHUBP0 */
+@@ -880,7 +884,8 @@ void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
+ return;
+
+ mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
+- opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
++ if (opp != NULL)
++ opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
+
+ dc->optimized_required = true;
+
+@@ -1358,10 +1363,11 @@ static void dcn10_enable_per_frame_crtc_position_reset(
+
+ DC_SYNC_INFO("Setting up\n");
+ for (i = 0; i < group_size; i++)
+- grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
+- grouped_pipes[i]->stream_res.tg,
+- grouped_pipes[i]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst,
+- &grouped_pipes[i]->stream->triggered_crtc_reset);
++ if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
++ grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
++ grouped_pipes[i]->stream_res.tg,
++ grouped_pipes[i]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst,
++ &grouped_pipes[i]->stream->triggered_crtc_reset);
+
+ DC_SYNC_INFO("Waiting for trigger\n");
+
+@@ -2519,8 +2525,14 @@ static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
+
+ static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
+ {
+- if (hws->ctx->dc->res_pool->hubbub != NULL)
+- hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data);
++ if (hws->ctx->dc->res_pool->hubbub != NULL) {
++ struct hubp *hubp = hws->ctx->dc->res_pool->hubps[0];
++
++ if (hubp->funcs->hubp_update_dchub)
++ hubp->funcs->hubp_update_dchub(hubp, dh_data);
++ else
++ hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data);
++ }
+ }
+
+ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4799-drm-amd-display-get-board-layout-for-edid-emulation.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4799-drm-amd-display-get-board-layout-for-edid-emulation.patch
new file mode 100644
index 00000000..bfece697
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4799-drm-amd-display-get-board-layout-for-edid-emulation.patch
@@ -0,0 +1,583 @@
+From 8401f0d64d68522e6807550d5b7d38ff319b6b1a Mon Sep 17 00:00:00 2001
+From: Samson Tam <Samson.Tam@amd.com>
+Date: Wed, 30 May 2018 15:44:50 -0400
+Subject: [PATCH 4799/5725] drm/amd/display: get board layout for edid
+ emulation
+
+Signed-off-by: Samson Tam <Samson.Tam@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/bios/bios_parser.c | 196 ++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | 218 ++++++++++++++++++++-
+ drivers/gpu/drm/amd/display/dc/dc_bios_types.h | 4 +
+ .../gpu/drm/amd/display/include/grph_object_defs.h | 46 +++++
+ .../gpu/drm/amd/display/include/grph_object_id.h | 11 ++
+ 5 files changed, 474 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+index c7f0b27..be8a249 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+@@ -3762,6 +3762,200 @@ static struct integrated_info *bios_parser_create_integrated_info(
+ return NULL;
+ }
+
++enum bp_result update_slot_layout_info(
++ struct dc_bios *dcb,
++ unsigned int i,
++ struct slot_layout_info *slot_layout_info,
++ unsigned int record_offset)
++{
++ unsigned int j;
++ struct bios_parser *bp;
++ ATOM_BRACKET_LAYOUT_RECORD *record;
++ ATOM_COMMON_RECORD_HEADER *record_header;
++ enum bp_result result = BP_RESULT_NORECORD;
++
++ bp = BP_FROM_DCB(dcb);
++ record = NULL;
++ record_header = NULL;
++
++ for (;;) {
++
++ record_header = (ATOM_COMMON_RECORD_HEADER *)
++ GET_IMAGE(ATOM_COMMON_RECORD_HEADER, record_offset);
++ if (record_header == NULL) {
++ result = BP_RESULT_BADBIOSTABLE;
++ break;
++ }
++
++ /* the end of the list */
++ if (record_header->ucRecordType == 0xff ||
++ record_header->ucRecordSize == 0) {
++ break;
++ }
++
++ if (record_header->ucRecordType ==
++ ATOM_BRACKET_LAYOUT_RECORD_TYPE &&
++ sizeof(ATOM_BRACKET_LAYOUT_RECORD)
++ <= record_header->ucRecordSize) {
++ record = (ATOM_BRACKET_LAYOUT_RECORD *)
++ (record_header);
++ result = BP_RESULT_OK;
++ break;
++ }
++
++ record_offset += record_header->ucRecordSize;
++ }
++
++ /* return if the record not found */
++ if (result != BP_RESULT_OK)
++ return result;
++
++ /* get slot sizes */
++ slot_layout_info->length = record->ucLength;
++ slot_layout_info->width = record->ucWidth;
++
++ /* get info for each connector in the slot */
++ slot_layout_info->num_of_connectors = record->ucConnNum;
++ for (j = 0; j < slot_layout_info->num_of_connectors; ++j) {
++ slot_layout_info->connectors[j].connector_type =
++ (enum connector_layout_type)
++ (record->asConnInfo[j].ucConnectorType);
++ switch (record->asConnInfo[j].ucConnectorType) {
++ case CONNECTOR_TYPE_DVI_D:
++ slot_layout_info->connectors[j].connector_type =
++ CONNECTOR_LAYOUT_TYPE_DVI_D;
++ slot_layout_info->connectors[j].length =
++ CONNECTOR_SIZE_DVI;
++ break;
++
++ case CONNECTOR_TYPE_HDMI:
++ slot_layout_info->connectors[j].connector_type =
++ CONNECTOR_LAYOUT_TYPE_HDMI;
++ slot_layout_info->connectors[j].length =
++ CONNECTOR_SIZE_HDMI;
++ break;
++
++ case CONNECTOR_TYPE_DISPLAY_PORT:
++ slot_layout_info->connectors[j].connector_type =
++ CONNECTOR_LAYOUT_TYPE_DP;
++ slot_layout_info->connectors[j].length =
++ CONNECTOR_SIZE_DP;
++ break;
++
++ case CONNECTOR_TYPE_MINI_DISPLAY_PORT:
++ slot_layout_info->connectors[j].connector_type =
++ CONNECTOR_LAYOUT_TYPE_MINI_DP;
++ slot_layout_info->connectors[j].length =
++ CONNECTOR_SIZE_MINI_DP;
++ break;
++
++ default:
++ slot_layout_info->connectors[j].connector_type =
++ CONNECTOR_LAYOUT_TYPE_UNKNOWN;
++ slot_layout_info->connectors[j].length =
++ CONNECTOR_SIZE_UNKNOWN;
++ }
++
++ slot_layout_info->connectors[j].position =
++ record->asConnInfo[j].ucPosition;
++ slot_layout_info->connectors[j].connector_id =
++ object_id_from_bios_object_id(
++ record->asConnInfo[j].usConnectorObjectId);
++ }
++ return result;
++}
++
++
++enum bp_result get_bracket_layout_record(
++ struct dc_bios *dcb,
++ unsigned int bracket_layout_id,
++ struct slot_layout_info *slot_layout_info)
++{
++ unsigned int i;
++ unsigned int record_offset;
++ struct bios_parser *bp;
++ enum bp_result result;
++ ATOM_OBJECT *object;
++ ATOM_OBJECT_TABLE *object_table;
++ unsigned int genericTableOffset;
++
++ bp = BP_FROM_DCB(dcb);
++ object = NULL;
++ if (slot_layout_info == NULL) {
++ DC_LOG_DETECTION_EDID_PARSER("Invalid slot_layout_info\n");
++ return BP_RESULT_BADINPUT;
++ }
++
++
++ genericTableOffset = bp->object_info_tbl_offset +
++ bp->object_info_tbl.v1_3->usMiscObjectTableOffset;
++ object_table = (ATOM_OBJECT_TABLE *)
++ GET_IMAGE(ATOM_OBJECT_TABLE, genericTableOffset);
++ if (!object_table)
++ return BP_RESULT_FAILURE;
++
++ result = BP_RESULT_NORECORD;
++ for (i = 0; i < object_table->ucNumberOfObjects; ++i) {
++
++ if (bracket_layout_id ==
++ object_table->asObjects[i].usObjectID) {
++
++ object = &object_table->asObjects[i];
++ record_offset = object->usRecordOffset +
++ bp->object_info_tbl_offset;
++
++ result = update_slot_layout_info(dcb, i,
++ slot_layout_info, record_offset);
++ break;
++ }
++ }
++ return result;
++}
++
++static enum bp_result bios_get_board_layout_info(
++ struct dc_bios *dcb,
++ struct board_layout_info *board_layout_info)
++{
++ unsigned int i;
++ struct bios_parser *bp;
++ enum bp_result record_result;
++
++ const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = {
++ GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1,
++ GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2,
++ 0, 0
++ };
++
++ bp = BP_FROM_DCB(dcb);
++ if (board_layout_info == NULL) {
++ DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n");
++ return BP_RESULT_BADINPUT;
++ }
++
++ board_layout_info->num_of_slots = 0;
++
++ for (i = 0; i < MAX_BOARD_SLOTS; ++i) {
++ record_result = get_bracket_layout_record(dcb,
++ slot_index_to_vbios_id[i],
++ &board_layout_info->slots[i]);
++
++ if (record_result == BP_RESULT_NORECORD && i > 0)
++ break; /* no more slots present in bios */
++ else if (record_result != BP_RESULT_OK)
++ return record_result; /* fail */
++
++ ++board_layout_info->num_of_slots;
++ }
++
++ /* all data is valid */
++ board_layout_info->is_number_of_slots_valid = 1;
++ board_layout_info->is_slots_size_valid = 1;
++ board_layout_info->is_connector_offsets_valid = 1;
++ board_layout_info->is_connector_lengths_valid = 1;
++
++ return BP_RESULT_OK;
++}
++
+ /******************************************************************************/
+
+ static const struct dc_vbios_funcs vbios_funcs = {
+@@ -3836,6 +4030,8 @@ static const struct dc_vbios_funcs vbios_funcs = {
+ .post_init = bios_parser_post_init, /* patch vbios table for mxm module by reading i2c */
+
+ .bios_parser_destroy = bios_parser_destroy,
++
++ .get_board_layout_info = bios_get_board_layout_info,
+ };
+
+ static bool bios_parser_construct(
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+index b8cef7a..aeb56e4 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+@@ -43,6 +43,29 @@
+ #include "bios_parser_interface.h"
+
+ #include "bios_parser_common.h"
++
++/* Temporarily add in defines until ObjectID.h patch is updated in a few days */
++#ifndef GENERIC_OBJECT_ID_BRACKET_LAYOUT
++#define GENERIC_OBJECT_ID_BRACKET_LAYOUT 0x05
++#endif /* GENERIC_OBJECT_ID_BRACKET_LAYOUT */
++
++#ifndef GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1
++#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1 \
++ (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++ GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
++#endif /* GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1 */
++
++#ifndef GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2
++#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2 \
++ (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
++ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++ GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
++#endif /* GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2 */
++
++#define DC_LOGGER \
++ bp->base.ctx->logger
++
+ #define LAST_RECORD_TYPE 0xff
+ #define SMU9_SYSPLL0_ID 0
+
+@@ -86,7 +109,6 @@ static struct atom_encoder_caps_record *get_encoder_cap_record(
+
+ #define DATA_TABLES(table) (bp->master_data_tbl->listOfdatatables.table)
+
+-
+ static void destruct(struct bios_parser *bp)
+ {
+ kfree(bp->base.bios_local_image);
+@@ -1854,6 +1876,198 @@ static struct integrated_info *bios_parser_create_integrated_info(
+ return NULL;
+ }
+
++static enum bp_result update_slot_layout_info(
++ struct dc_bios *dcb,
++ unsigned int i,
++ struct slot_layout_info *slot_layout_info)
++{
++ unsigned int record_offset;
++ unsigned int j;
++ struct atom_display_object_path_v2 *object;
++ struct atom_bracket_layout_record *record;
++ struct atom_common_record_header *record_header;
++ enum bp_result result;
++ struct bios_parser *bp;
++ struct object_info_table *tbl;
++ struct display_object_info_table_v1_4 *v1_4;
++
++ record = NULL;
++ record_header = NULL;
++ result = BP_RESULT_NORECORD;
++
++ bp = BP_FROM_DCB(dcb);
++ tbl = &bp->object_info_tbl;
++ v1_4 = tbl->v1_4;
++
++ object = &v1_4->display_path[i];
++ record_offset = (unsigned int)
++ (object->disp_recordoffset) +
++ (unsigned int)(bp->object_info_tbl_offset);
++
++ for (;;) {
++
++ record_header = (struct atom_common_record_header *)
++ GET_IMAGE(struct atom_common_record_header,
++ record_offset);
++ if (record_header == NULL) {
++ result = BP_RESULT_BADBIOSTABLE;
++ break;
++ }
++
++ /* the end of the list */
++ if (record_header->record_type == 0xff ||
++ record_header->record_size == 0) {
++ break;
++ }
++
++ if (record_header->record_type ==
++ ATOM_BRACKET_LAYOUT_RECORD_TYPE &&
++ sizeof(struct atom_bracket_layout_record)
++ <= record_header->record_size) {
++ record = (struct atom_bracket_layout_record *)
++ (record_header);
++ result = BP_RESULT_OK;
++ break;
++ }
++
++ record_offset += record_header->record_size;
++ }
++
++ /* return if the record not found */
++ if (result != BP_RESULT_OK)
++ return result;
++
++ /* get slot sizes */
++ slot_layout_info->length = record->bracketlen;
++ slot_layout_info->width = record->bracketwidth;
++
++ /* get info for each connector in the slot */
++ slot_layout_info->num_of_connectors = record->conn_num;
++ for (j = 0; j < slot_layout_info->num_of_connectors; ++j) {
++ slot_layout_info->connectors[j].connector_type =
++ (enum connector_layout_type)
++ (record->conn_info[j].connector_type);
++ switch (record->conn_info[j].connector_type) {
++ case CONNECTOR_TYPE_DVI_D:
++ slot_layout_info->connectors[j].connector_type =
++ CONNECTOR_LAYOUT_TYPE_DVI_D;
++ slot_layout_info->connectors[j].length =
++ CONNECTOR_SIZE_DVI;
++ break;
++
++ case CONNECTOR_TYPE_HDMI:
++ slot_layout_info->connectors[j].connector_type =
++ CONNECTOR_LAYOUT_TYPE_HDMI;
++ slot_layout_info->connectors[j].length =
++ CONNECTOR_SIZE_HDMI;
++ break;
++
++ case CONNECTOR_TYPE_DISPLAY_PORT:
++ slot_layout_info->connectors[j].connector_type =
++ CONNECTOR_LAYOUT_TYPE_DP;
++ slot_layout_info->connectors[j].length =
++ CONNECTOR_SIZE_DP;
++ break;
++
++ case CONNECTOR_TYPE_MINI_DISPLAY_PORT:
++ slot_layout_info->connectors[j].connector_type =
++ CONNECTOR_LAYOUT_TYPE_MINI_DP;
++ slot_layout_info->connectors[j].length =
++ CONNECTOR_SIZE_MINI_DP;
++ break;
++
++ default:
++ slot_layout_info->connectors[j].connector_type =
++ CONNECTOR_LAYOUT_TYPE_UNKNOWN;
++ slot_layout_info->connectors[j].length =
++ CONNECTOR_SIZE_UNKNOWN;
++ }
++
++ slot_layout_info->connectors[j].position =
++ record->conn_info[j].position;
++ slot_layout_info->connectors[j].connector_id =
++ object_id_from_bios_object_id(
++ record->conn_info[j].connectorobjid);
++ }
++ return result;
++}
++
++
++static enum bp_result get_bracket_layout_record(
++ struct dc_bios *dcb,
++ unsigned int bracket_layout_id,
++ struct slot_layout_info *slot_layout_info)
++{
++ unsigned int i;
++ struct bios_parser *bp = BP_FROM_DCB(dcb);
++ enum bp_result result;
++ struct object_info_table *tbl;
++ struct display_object_info_table_v1_4 *v1_4;
++
++ if (slot_layout_info == NULL) {
++ DC_LOG_DETECTION_EDID_PARSER("Invalid slot_layout_info\n");
++ return BP_RESULT_BADINPUT;
++ }
++ tbl = &bp->object_info_tbl;
++ v1_4 = tbl->v1_4;
++
++ result = BP_RESULT_NORECORD;
++ for (i = 0; i < v1_4->number_of_path; ++i) {
++
++ if (bracket_layout_id ==
++ v1_4->display_path[i].display_objid) {
++ result = update_slot_layout_info(dcb, i,
++ slot_layout_info);
++ break;
++ }
++ }
++ return result;
++}
++
++static enum bp_result bios_get_board_layout_info(
++ struct dc_bios *dcb,
++ struct board_layout_info *board_layout_info)
++{
++ unsigned int i;
++ struct bios_parser *bp;
++ enum bp_result record_result;
++
++ const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = {
++ GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1,
++ GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2,
++ 0, 0
++ };
++
++ bp = BP_FROM_DCB(dcb);
++ if (board_layout_info == NULL) {
++ DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n");
++ return BP_RESULT_BADINPUT;
++ }
++
++ board_layout_info->num_of_slots = 0;
++
++ for (i = 0; i < MAX_BOARD_SLOTS; ++i) {
++ record_result = get_bracket_layout_record(dcb,
++ slot_index_to_vbios_id[i],
++ &board_layout_info->slots[i]);
++
++ if (record_result == BP_RESULT_NORECORD && i > 0)
++ break; /* no more slots present in bios */
++ else if (record_result != BP_RESULT_OK)
++ return record_result; /* fail */
++
++ ++board_layout_info->num_of_slots;
++ }
++
++ /* all data is valid */
++ board_layout_info->is_number_of_slots_valid = 1;
++ board_layout_info->is_slots_size_valid = 1;
++ board_layout_info->is_connector_offsets_valid = 1;
++ board_layout_info->is_connector_lengths_valid = 1;
++
++ return BP_RESULT_OK;
++}
++
+ static const struct dc_vbios_funcs vbios_funcs = {
+ .get_connectors_number = bios_parser_get_connectors_number,
+
+@@ -1925,6 +2139,8 @@ static const struct dc_vbios_funcs vbios_funcs = {
+ .bios_parser_destroy = firmware_parser_destroy,
+
+ .get_smu_clock_info = bios_parser_get_smu_clock_info,
++
++ .get_board_layout_info = bios_get_board_layout_info,
+ };
+
+ static bool bios_parser_construct(
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+index d9b84ec..90082ba 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+@@ -198,6 +198,10 @@ struct dc_vbios_funcs {
+ void (*post_init)(struct dc_bios *bios);
+
+ void (*bios_parser_destroy)(struct dc_bios **dcb);
++
++ enum bp_result (*get_board_layout_info)(
++ struct dc_bios *dcb,
++ struct board_layout_info *board_layout_info);
+ };
+
+ struct bios_registers {
+diff --git a/drivers/gpu/drm/amd/display/include/grph_object_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_defs.h
+index 2941b88..58bb42e 100644
+--- a/drivers/gpu/drm/amd/display/include/grph_object_defs.h
++++ b/drivers/gpu/drm/amd/display/include/grph_object_defs.h
+@@ -37,6 +37,10 @@
+ * ********************************************************************
+ */
+
++#define MAX_CONNECTOR_NUMBER_PER_SLOT (16)
++#define MAX_BOARD_SLOTS (4)
++#define INVALID_CONNECTOR_INDEX ((unsigned int)(-1))
++
+ /* HPD unit id - HW direct translation */
+ enum hpd_source_id {
+ HPD_SOURCEID1 = 0,
+@@ -136,5 +140,47 @@ enum sync_source {
+ SYNC_SOURCE_DUAL_GPU_PIN
+ };
+
++/* connector sizes in millimeters - from BiosParserTypes.hpp */
++#define CONNECTOR_SIZE_DVI 40
++#define CONNECTOR_SIZE_VGA 32
++#define CONNECTOR_SIZE_HDMI 16
++#define CONNECTOR_SIZE_DP 16
++#define CONNECTOR_SIZE_MINI_DP 9
++#define CONNECTOR_SIZE_UNKNOWN 30
++
++enum connector_layout_type {
++ CONNECTOR_LAYOUT_TYPE_UNKNOWN,
++ CONNECTOR_LAYOUT_TYPE_DVI_D,
++ CONNECTOR_LAYOUT_TYPE_DVI_I,
++ CONNECTOR_LAYOUT_TYPE_VGA,
++ CONNECTOR_LAYOUT_TYPE_HDMI,
++ CONNECTOR_LAYOUT_TYPE_DP,
++ CONNECTOR_LAYOUT_TYPE_MINI_DP,
++};
++struct connector_layout_info {
++ struct graphics_object_id connector_id;
++ enum connector_layout_type connector_type;
++ unsigned int length;
++ unsigned int position; /* offset in mm from right side of the board */
++};
++
++/* length and width in mm */
++struct slot_layout_info {
++ unsigned int length;
++ unsigned int width;
++ unsigned int num_of_connectors;
++ struct connector_layout_info connectors[MAX_CONNECTOR_NUMBER_PER_SLOT];
++};
++
++struct board_layout_info {
++ unsigned int num_of_slots;
+
++ /* indicates valid information in bracket layout structure. */
++ unsigned int is_number_of_slots_valid : 1;
++ unsigned int is_slots_size_valid : 1;
++ unsigned int is_connector_offsets_valid : 1;
++ unsigned int is_connector_lengths_valid : 1;
++
++ struct slot_layout_info slots[MAX_BOARD_SLOTS];
++};
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h
+index 92cc6c1..33b3d75 100644
+--- a/drivers/gpu/drm/amd/display/include/grph_object_id.h
++++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h
+@@ -292,4 +292,15 @@ static inline enum engine_id dal_graphics_object_id_get_engine_id(
+ return (enum engine_id) id.id;
+ return ENGINE_ID_UNKNOWN;
+ }
++
++static inline bool dal_graphics_object_id_equal(
++ struct graphics_object_id id_1,
++ struct graphics_object_id id_2)
++{
++ if ((id_1.id == id_2.id) && (id_1.enum_id == id_2.enum_id) &&
++ (id_1.type == id_2.type)) {
++ return true;
++ }
++ return false;
++}
+ #endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4800-drm-amd-display-Allow-option-to-use-worst-case-water.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4800-drm-amd-display-Allow-option-to-use-worst-case-water.patch
new file mode 100644
index 00000000..47efbaa0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4800-drm-amd-display-Allow-option-to-use-worst-case-water.patch
@@ -0,0 +1,74 @@
+From 738e1ef59d4ea47feeacb3f17617ccfd3c992722 Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Fri, 8 Jun 2018 17:36:26 -0400
+Subject: [PATCH 4800/5725] drm/amd/display: Allow option to use worst-case
+ watermark
+
+use worse case watermark (consider both DCC and VM)
+to keep golden consistent regardless of DCC
+
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 23 ++++++++++++++++++++++-
+ drivers/gpu/drm/amd/display/dc/dc.h | 1 +
+ 2 files changed, 23 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+index 12261fb..e44b8d3 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+@@ -31,6 +31,8 @@
+
+ #include "resource.h"
+ #include "dcn10/dcn10_resource.h"
++#include "dcn10/dcn10_hubbub.h"
++
+ #include "dcn_calc_math.h"
+
+ #define DC_LOGGER \
+@@ -889,7 +891,26 @@ bool dcn_validate_bandwidth(
+ ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dc_fixpt_one.value
+ || v->scaler_rec_out_width[input_idx] == v->viewport_height[input_idx]);
+ }
+- v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no;
++
++ if (dc->debug.optimized_watermark) {
++ /*
++ * this method requires us to always re-calculate watermark when dcc change
++ * between flip.
++ */
++ v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no;
++ } else {
++ /*
++ * allow us to disable dcc on the fly without re-calculating WM
++ *
++ * extra overhead for DCC is quite small. for 1080p WM without
++ * DCC is only 0.417us lower (urgent goes from 6.979us to 6.562us)
++ */
++ unsigned int bpe;
++
++ v->dcc_enable[input_idx] = dc->res_pool->hubbub->funcs->dcc_support_pixel_format(
++ pipe->plane_state->format, &bpe) ? dcn_bw_yes : dcn_bw_no;
++ }
++
+ v->source_pixel_format[input_idx] = tl_pixel_format_to_bw_defs(
+ pipe->plane_state->format);
+ v->source_surface_mode[input_idx] = tl_sw_mode_to_bw_defs(
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 79edbb1..2af5e60 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -234,6 +234,7 @@ struct dc_debug {
+ int urgent_latency_ns;
+ int percent_of_ideal_drambw;
+ int dram_clock_change_latency_ns;
++ bool optimized_watermark;
+ int always_scale;
+ bool disable_pplib_clock_request;
+ bool disable_clock_gate;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4801-drm-amdgpu-Rename-entity-cleanup-finctions.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4801-drm-amdgpu-Rename-entity-cleanup-finctions.patch
new file mode 100644
index 00000000..3a40f288
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4801-drm-amdgpu-Rename-entity-cleanup-finctions.patch
@@ -0,0 +1,83 @@
+From 974fe593adb3b9c2b33fd8f587e90d90e32fe213 Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Tue, 5 Jun 2018 12:56:26 -0400
+Subject: [PATCH 4801/5725] drm/amdgpu: Rename entity cleanup finctions.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Everything in the flush code path (i.e. waiting for SW queue
+to become empty) names with *_flush()
+and everything in the release code path names *_fini()
+
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Suggested-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 6 +++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +-
+ 3 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 3b456ce..da75fb2e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -702,8 +702,8 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
+ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
+
+ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
+-void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr);
+ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
++void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr);
+ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
+
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+index 6f6fdab..38903ea 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+@@ -460,7 +460,7 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
+ idr_init(&mgr->ctx_handles);
+ }
+
+-void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
++void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
+ {
+ struct amdgpu_ctx *ctx;
+ struct idr *idp;
+@@ -489,7 +489,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
+ mutex_unlock(&mgr->lock);
+ }
+
+-void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
++void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
+ {
+ struct amdgpu_ctx *ctx;
+ struct idr *idp;
+@@ -522,7 +522,7 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
+ struct idr *idp;
+ uint32_t id;
+
+- amdgpu_ctx_mgr_entity_cleanup(mgr);
++ amdgpu_ctx_mgr_entity_fini(mgr);
+
+ idp = &mgr->ctx_handles;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index c41af58..b9b9850 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -866,7 +866,7 @@ static int amdgpu_flush(struct file *f, fl_owner_t id)
+ struct drm_file *file_priv = f->private_data;
+ struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+
+- amdgpu_ctx_mgr_entity_fini(&fpriv->ctx_mgr);
++ amdgpu_ctx_mgr_entity_flush(&fpriv->ctx_mgr);
+
+ return 0;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4802-drm-amd-display-don-t-initialize-result.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4802-drm-amd-display-don-t-initialize-result.patch
new file mode 100644
index 00000000..02a05c7c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4802-drm-amd-display-don-t-initialize-result.patch
@@ -0,0 +1,40 @@
+From 5063a7048ae0881a38be529598b03c0bac2d5d9c Mon Sep 17 00:00:00 2001
+From: Stefan Agner <stefan@agner.ch>
+Date: Sun, 17 Jun 2018 10:53:38 +0200
+Subject: [PATCH 4802/5725] drm/amd/display: don't initialize result
+
+The wrong enum type is used to initialize the result, leading to a
+warning when using clang:
+drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_link_dp.c:1998:26: warning:
+ implicit conversion from enumeration type 'enum ddc_result' to different
+ enumeration type 'enum dc_status' [-Wenum-conversion]
+ enum dc_status result = DDC_RESULT_UNKNOWN;
+ ~~~~~~ ^~~~~~~~~~~~~~~~~~
+1 warning generated.
+
+Initialization of result is unnecessary anyway, just drop the
+initialization.
+
+Signed-off-by: Stefan Agner <stefan@agner.ch>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index dcac527..68c1f65 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -1998,7 +1998,7 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
+ {
+ union hpd_irq_data hpd_irq_dpcd_data = {{{{0}}}};
+ union device_service_irq device_service_clear = { { 0 } };
+- enum dc_status result = DDC_RESULT_UNKNOWN;
++ enum dc_status result;
+ bool status = false;
+ /* For use cases related to down stream connection status change,
+ * PSR and device auto test, refer to function handle_sst_hpd_irq
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4803-drm-amdgpu-remove-duplicated-codes.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4803-drm-amdgpu-remove-duplicated-codes.patch
new file mode 100644
index 00000000..ff531817
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4803-drm-amdgpu-remove-duplicated-codes.patch
@@ -0,0 +1,35 @@
+From 23dfdc00f0e1ceb0bdb47f2573f3cfb57b0eab23 Mon Sep 17 00:00:00 2001
+From: Flora Cui <Flora.Cui@amd.com>
+Date: Wed, 27 Jun 2018 13:53:53 +0800
+Subject: [PATCH 4803/5725] drm/amdgpu: remove duplicated codes
+
+the fence_context and seqno is init in amdgpu_vm_manager_init() &
+amdgpu_vmid_mgr_init(). remove the amdgpu_vmid_mgr_init() copy.
+
+Signed-off-by: Flora Cui <Flora.Cui@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 5 -----
+ 1 file changed, 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+index 46b541b..5cdbfe4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+@@ -582,11 +582,6 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
+ list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
+ }
+ }
+-
+- adev->vm_manager.fence_context =
+- dma_fence_context_alloc(AMDGPU_MAX_RINGS);
+- for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+- adev->vm_manager.seqno[i] = 0;
+ }
+
+ /**
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4804-drm-amd-display-Drop-unnecessary-header-file.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4804-drm-amd-display-Drop-unnecessary-header-file.patch
new file mode 100644
index 00000000..2880fd4f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4804-drm-amd-display-Drop-unnecessary-header-file.patch
@@ -0,0 +1,32 @@
+From aab1cd9f80690889681c6d74f34f470d41d7ca04 Mon Sep 17 00:00:00 2001
+From: rex zhu <rex.zhu@amd.com>
+Date: Wed, 27 Jun 2018 17:35:40 +0800
+Subject: [PATCH 4804/5725] drm/amd/display: Drop unnecessary header file
+
+powerplay implement hwmgr_function table for all supported asics in
+order to conceal the asic's detail infomation. so no need to include
+the smu10_hwmgr.h.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+index ad96e2a..2acbcc7 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+@@ -34,7 +34,6 @@
+ #include "amdgpu_pm.h"
+ #include "dm_pp_smu.h"
+ #include "../../powerplay/inc/hwmgr.h"
+-#include "../../powerplay/hwmgr/smu10_hwmgr.h"
+
+
+ bool dm_pp_apply_display_requirements(
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4805-drm-amd-display-Fix-dm-pp-clks-type-convert-error.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4805-drm-amd-display-Fix-dm-pp-clks-type-convert-error.patch
new file mode 100644
index 00000000..e44b213d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4805-drm-amd-display-Fix-dm-pp-clks-type-convert-error.patch
@@ -0,0 +1,32 @@
+From 848cfd686aeabb33589b9f05dce8d28f8e87b431 Mon Sep 17 00:00:00 2001
+From: rex zhu <rex.zhu@amd.com>
+Date: Wed, 27 Jun 2018 17:34:37 +0800
+Subject: [PATCH 4805/5725] drm/amd/display: Fix dm-pp clks type convert error
+
+fix a typo when convert displayphyclk type.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+index 2acbcc7..cf92d7a 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+@@ -167,6 +167,9 @@ static enum amd_pp_clock_type dc_to_pp_clock_type(
+ amd_pp_clk_type = amd_pp_f_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
++ amd_pp_clk_type = amd_pp_phy_clock;
++ break;
++ case DM_PP_CLOCK_TYPE_DPPCLK:
+ amd_pp_clk_type = amd_pp_dpp_clock;
+ break;
+ default:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4806-drm-amdgpu-Rename-set_mmhub_powergating_by_smu-to-po.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4806-drm-amdgpu-Rename-set_mmhub_powergating_by_smu-to-po.patch
new file mode 100644
index 00000000..2f7ca07b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4806-drm-amdgpu-Rename-set_mmhub_powergating_by_smu-to-po.patch
@@ -0,0 +1,136 @@
+From 3d0017eb06bc8dca7319e1b3b77a8f7375150de1 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Tue, 5 Jun 2018 10:07:53 +0800
+Subject: [PATCH 4806/5725] drm/amdgpu: Rename set_mmhub_powergating_by_smu to
+ powergate_mmhub
+
+In order to keep consistent with powergate_uvd/vce.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 4 ++--
+ drivers/gpu/drm/amd/include/kgd_pp_interface.h | 2 +-
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 8 ++++----
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 4 ++--
+ drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 2 +-
+ 6 files changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+index 9acfbee..c6d6926 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+@@ -359,8 +359,8 @@ enum amdgpu_pcie_gen {
+ ((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\
+ (adev)->powerplay.pp_handle, type, parameter, size))
+
+-#define amdgpu_dpm_set_mmhub_powergating_by_smu(adev) \
+- ((adev)->powerplay.pp_funcs->set_mmhub_powergating_by_smu( \
++#define amdgpu_dpm_powergate_mmhub(adev) \
++ ((adev)->powerplay.pp_funcs->powergate_mmhub( \
+ (adev)->powerplay.pp_handle))
+
+ struct amdgpu_dpm {
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+index 3d53c44..377f536 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+@@ -471,8 +471,8 @@ void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
+ RENG_EXECUTE_ON_REG_UPDATE, 1);
+ WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute);
+
+- if (adev->powerplay.pp_funcs->set_mmhub_powergating_by_smu)
+- amdgpu_dpm_set_mmhub_powergating_by_smu(adev);
++ if (adev->powerplay.pp_funcs->powergate_mmhub)
++ amdgpu_dpm_powergate_mmhub(adev);
+
+ } else {
+ pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
+diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+index 06f08f3..0f98862 100644
+--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+@@ -269,7 +269,7 @@ struct amd_pm_funcs {
+ int (*get_power_profile_mode)(void *handle, char *buf);
+ int (*set_power_profile_mode)(void *handle, long *input, uint32_t size);
+ int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size);
+- int (*set_mmhub_powergating_by_smu)(void *handle);
++ int (*powergate_mmhub)(void *handle);
+ };
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index 9e285ed..9732ae9 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -1179,19 +1179,19 @@ static int pp_get_display_mode_validation_clocks(void *handle,
+ return ret;
+ }
+
+-static int pp_set_mmhub_powergating_by_smu(void *handle)
++static int pp_dpm_powergate_mmhub(void *handle)
+ {
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+- if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
++ if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+- return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr);
++ return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
+ }
+
+ static const struct amd_pm_funcs pp_dpm_funcs = {
+@@ -1238,6 +1238,6 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
+ .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
+ .display_clock_voltage_request = pp_display_clock_voltage_request,
+ .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
+- .set_mmhub_powergating_by_smu = pp_set_mmhub_powergating_by_smu,
++ .powergate_mmhub = pp_dpm_powergate_mmhub,
+ };
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+index c905df4..b810d9c 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+@@ -1126,7 +1126,7 @@ static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr)
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister);
+ }
+
+-static int smu10_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr)
++static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr)
+ {
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
+ }
+@@ -1182,7 +1182,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
+ .asic_setup = smu10_setup_asic_task,
+ .power_state_set = smu10_set_power_state_tasks,
+ .dynamic_state_management_disable = smu10_disable_dpm_tasks,
+- .set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu,
++ .powergate_mmhub = smu10_powergate_mmhub,
+ .smus_notify_pwe = smu10_smus_notify_pwe,
+ .gfx_off_control = smu10_gfx_off_control,
+ .display_clock_voltage_request = smu10_display_clock_voltage_request,
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+index 40c98ca..9b07d6e 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+@@ -327,7 +327,7 @@ struct pp_hwmgr_func {
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size);
+ int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n);
+- int (*set_mmhub_powergating_by_smu)(struct pp_hwmgr *hwmgr);
++ int (*powergate_mmhub)(struct pp_hwmgr *hwmgr);
+ int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr);
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4807-drm-amd-pp-Rename-enable_per_cu_power_gating-to-powe.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4807-drm-amd-pp-Rename-enable_per_cu_power_gating-to-powe.patch
new file mode 100644
index 00000000..b417f7dc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4807-drm-amd-pp-Rename-enable_per_cu_power_gating-to-powe.patch
@@ -0,0 +1,94 @@
+From 11a48aee0e77a68bc26279855ec3693042768576 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Tue, 5 Jun 2018 11:28:03 +0800
+Subject: [PATCH 4807/5725] drm/amd/pp: Rename enable_per_cu_power_gating to
+ powergate_gfx
+
+keep consistent with powergate_uvd/vce/mmhub
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 6 +++---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c | 2 +-
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h | 2 +-
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 2 +-
+ drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 2 +-
+ 5 files changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index 9732ae9..f9baa04 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -246,13 +246,13 @@ static int pp_set_powergating_state(void *handle,
+ pr_err("gfx off control failed!\n");
+ }
+
+- if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
+- pr_debug("%s was not implemented.\n", __func__);
++ if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
++ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+ /* Enable/disable GFX per cu powergating through SMU */
+- return hwmgr->hwmgr_func->enable_per_cu_power_gating(hwmgr,
++ return hwmgr->hwmgr_func->powergate_gfx(hwmgr,
+ state == AMD_PG_STATE_GATE);
+ }
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+index a77cced..11d71f1 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+@@ -417,7 +417,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
+ * Powerplay will only control the static per CU Power Gating.
+ * Dynamic per CU Power Gating will be done in gfx.
+ */
+-int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable)
++int smu7_powergate_gfx(struct pp_hwmgr *hwmgr, bool enable)
+ {
+ struct amdgpu_device *adev = hwmgr->adev;
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
+index be7f66d..fc8f8a6 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
+@@ -33,6 +33,6 @@ int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
+ int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
+ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
+ const uint32_t *msg_id);
+-int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable);
++int smu7_powergate_gfx(struct pp_hwmgr *hwmgr, bool enable);
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index 2d83afe..bb3f80c 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -5044,7 +5044,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
+ .get_fan_control_mode = smu7_get_fan_control_mode,
+ .force_clock_level = smu7_force_clock_level,
+ .print_clock_levels = smu7_print_clock_levels,
+- .enable_per_cu_power_gating = smu7_enable_per_cu_power_gating,
++ .powergate_gfx = smu7_powergate_gfx,
+ .get_sclk_od = smu7_get_sclk_od,
+ .set_sclk_od = smu7_set_sclk_od,
+ .get_mclk_od = smu7_get_mclk_od,
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+index 9b07d6e..95e29a2 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+@@ -302,7 +302,7 @@ struct pp_hwmgr_func {
+ int (*power_off_asic)(struct pp_hwmgr *hwmgr);
+ int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask);
+ int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf);
+- int (*enable_per_cu_power_gating)(struct pp_hwmgr *hwmgr, bool enable);
++ int (*powergate_gfx)(struct pp_hwmgr *hwmgr, bool enable);
+ int (*get_sclk_od)(struct pp_hwmgr *hwmgr);
+ int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
+ int (*get_mclk_od)(struct pp_hwmgr *hwmgr);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4808-drm-amd-pp-Unify-powergate_uvd-vce-mmhub-to-set_powe.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4808-drm-amd-pp-Unify-powergate_uvd-vce-mmhub-to-set_powe.patch
new file mode 100644
index 00000000..7f482729
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4808-drm-amd-pp-Unify-powergate_uvd-vce-mmhub-to-set_powe.patch
@@ -0,0 +1,272 @@
+From 97c7ec99d8ee187c1228639080f682d8f9305f63 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Tue, 5 Jun 2018 13:06:11 +0800
+Subject: [PATCH 4808/5725] drm/amd/pp: Unify powergate_uvd/vce/mmhub to
+ set_powergating_by_smu
+
+Some HW ip blocks need call SMU to enter/leave power gate state.
+So export common set_powergating_by_smu interface.
+
+1. keep consistent with set_clockgating_by_smu
+2. scales easily to powergate other ip(gfx) if necessary
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h | 14 ++++---------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 8 +++----
+ drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 15 ++++++++++++-
+ drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 15 ++++++++++++-
+ drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 4 ++--
+ drivers/gpu/drm/amd/include/kgd_pp_interface.h | 5 ++---
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 29 +++++++++++++++++++++++---
+ 7 files changed, 66 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+index c6d6926..ff24e1c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+@@ -287,12 +287,6 @@ enum amdgpu_pcie_gen {
+ #define amdgpu_dpm_force_performance_level(adev, l) \
+ ((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)))
+
+-#define amdgpu_dpm_powergate_uvd(adev, g) \
+- ((adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)))
+-
+-#define amdgpu_dpm_powergate_vce(adev, g) \
+- ((adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)))
+-
+ #define amdgpu_dpm_get_current_power_state(adev) \
+ ((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
+
+@@ -347,6 +341,10 @@ enum amdgpu_pcie_gen {
+ ((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
+ (adev)->powerplay.pp_handle, msg_id))
+
++#define amdgpu_dpm_set_powergating_by_smu(adev, block_type, gate) \
++ ((adev)->powerplay.pp_funcs->set_powergating_by_smu(\
++ (adev)->powerplay.pp_handle, block_type, gate))
++
+ #define amdgpu_dpm_get_power_profile_mode(adev, buf) \
+ ((adev)->powerplay.pp_funcs->get_power_profile_mode(\
+ (adev)->powerplay.pp_handle, buf))
+@@ -359,10 +357,6 @@ enum amdgpu_pcie_gen {
+ ((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\
+ (adev)->powerplay.pp_handle, type, parameter, size))
+
+-#define amdgpu_dpm_powergate_mmhub(adev) \
+- ((adev)->powerplay.pp_funcs->powergate_mmhub( \
+- (adev)->powerplay.pp_handle))
+-
+ struct amdgpu_dpm {
+ struct amdgpu_ps *ps;
+ /* number of valid power states */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index dbc8300..f30e03f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -1728,10 +1728,10 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
+
+ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
+ {
+- if (adev->powerplay.pp_funcs->powergate_uvd) {
++ if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
+ /* enable/disable UVD */
+ mutex_lock(&adev->pm.mutex);
+- amdgpu_dpm_powergate_uvd(adev, !enable);
++ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
+ mutex_unlock(&adev->pm.mutex);
+ } else {
+ if (enable) {
+@@ -1750,10 +1750,10 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
+
+ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
+ {
+- if (adev->powerplay.pp_funcs->powergate_vce) {
++ if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
+ /* enable/disable VCE */
+ mutex_lock(&adev->pm.mutex);
+- amdgpu_dpm_powergate_vce(adev, !enable);
++ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
+ mutex_unlock(&adev->pm.mutex);
+ } else {
+ if (enable) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+index b6248c0..85b3f46 100644
+--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+@@ -6764,6 +6764,19 @@ static int ci_dpm_read_sensor(void *handle, int idx,
+ }
+ }
+
++static int ci_set_powergating_by_smu(void *handle,
++ uint32_t block_type, bool gate)
++{
++ switch (block_type) {
++ case AMD_IP_BLOCK_TYPE_UVD:
++ ci_dpm_powergate_uvd(handle, gate);
++ break;
++ default:
++ break;
++ }
++ return 0;
++}
++
+ static const struct amd_ip_funcs ci_dpm_ip_funcs = {
+ .name = "ci_dpm",
+ .early_init = ci_dpm_early_init,
+@@ -6801,7 +6814,7 @@ static const struct amd_pm_funcs ci_dpm_funcs = {
+ .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
+ .force_performance_level = &ci_dpm_force_performance_level,
+ .vblank_too_short = &ci_dpm_vblank_too_short,
+- .powergate_uvd = &ci_dpm_powergate_uvd,
++ .set_powergating_by_smu = &ci_set_powergating_by_smu,
+ .set_fan_control_mode = &ci_dpm_set_fan_control_mode,
+ .get_fan_control_mode = &ci_dpm_get_fan_control_mode,
+ .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
+diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+index d79e6f5..cee92f8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+@@ -3305,6 +3305,19 @@ static int kv_dpm_read_sensor(void *handle, int idx,
+ }
+ }
+
++static int kv_set_powergating_by_smu(void *handle,
++ uint32_t block_type, bool gate)
++{
++ switch (block_type) {
++ case AMD_IP_BLOCK_TYPE_UVD:
++ kv_dpm_powergate_uvd(handle, gate);
++ break;
++ default:
++ break;
++ }
++ return 0;
++}
++
+ static const struct amd_ip_funcs kv_dpm_ip_funcs = {
+ .name = "kv_dpm",
+ .early_init = kv_dpm_early_init,
+@@ -3341,7 +3354,7 @@ static const struct amd_pm_funcs kv_dpm_funcs = {
+ .print_power_state = &kv_dpm_print_power_state,
+ .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
+ .force_performance_level = &kv_dpm_force_performance_level,
+- .powergate_uvd = &kv_dpm_powergate_uvd,
++ .set_powergating_by_smu = kv_set_powergating_by_smu,
+ .enable_bapm = &kv_dpm_enable_bapm,
+ .get_vce_clock_state = amdgpu_get_vce_clock_state,
+ .check_state_equal = kv_check_state_equal,
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+index 377f536..e70a0d4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+@@ -471,8 +471,8 @@ void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
+ RENG_EXECUTE_ON_REG_UPDATE, 1);
+ WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute);
+
+- if (adev->powerplay.pp_funcs->powergate_mmhub)
+- amdgpu_dpm_powergate_mmhub(adev);
++ if (adev->powerplay.pp_funcs->set_powergating_by_smu)
++ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
+
+ } else {
+ pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
+diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+index 0f98862..4535756 100644
+--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+@@ -232,13 +232,13 @@ struct amd_pm_funcs {
+ void (*debugfs_print_current_performance_level)(void *handle, struct seq_file *m);
+ int (*switch_power_profile)(void *handle, enum PP_SMC_POWER_PROFILE type, bool en);
+ /* export to amdgpu */
+- void (*powergate_uvd)(void *handle, bool gate);
+- void (*powergate_vce)(void *handle, bool gate);
+ struct amd_vce_state *(*get_vce_clock_state)(void *handle, u32 idx);
+ int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id,
+ enum amd_pm_state_type *user_state);
+ int (*load_firmware)(void *handle);
+ int (*wait_for_fw_loading_complete)(void *handle);
++ int (*set_powergating_by_smu)(void *handle,
++ uint32_t block_type, bool gate);
+ int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id);
+ int (*set_power_limit)(void *handle, uint32_t n);
+ int (*get_power_limit)(void *handle, uint32_t *limit, bool default_limit);
+@@ -269,7 +269,6 @@ struct amd_pm_funcs {
+ int (*get_power_profile_mode)(void *handle, char *buf);
+ int (*set_power_profile_mode)(void *handle, long *input, uint32_t size);
+ int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size);
+- int (*powergate_mmhub)(void *handle);
+ };
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index f9baa04..02ba7c9 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -254,6 +254,7 @@ static int pp_set_powergating_state(void *handle,
+ /* Enable/disable GFX per cu powergating through SMU */
+ return hwmgr->hwmgr_func->powergate_gfx(hwmgr,
+ state == AMD_PG_STATE_GATE);
++
+ }
+
+ static int pp_suspend(void *handle)
+@@ -1194,14 +1195,36 @@ static int pp_dpm_powergate_mmhub(void *handle)
+ return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
+ }
+
++static int pp_set_powergating_by_smu(void *handle,
++ uint32_t block_type, bool gate)
++{
++ int ret = 0;
++
++ switch (block_type) {
++ case AMD_IP_BLOCK_TYPE_UVD:
++ case AMD_IP_BLOCK_TYPE_VCN:
++ pp_dpm_powergate_uvd(handle, gate);
++ break;
++ case AMD_IP_BLOCK_TYPE_VCE:
++ pp_dpm_powergate_vce(handle, gate);
++ break;
++ case AMD_IP_BLOCK_TYPE_GMC:
++ pp_dpm_powergate_mmhub(handle);
++ break;
++ case AMD_IP_BLOCK_TYPE_GFX:
++ break;
++ default:
++ break;
++ }
++ return ret;
++}
++
+ static const struct amd_pm_funcs pp_dpm_funcs = {
+ .load_firmware = pp_dpm_load_fw,
+ .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
+ .force_performance_level = pp_dpm_force_performance_level,
+ .get_performance_level = pp_dpm_get_performance_level,
+ .get_current_power_state = pp_dpm_get_current_power_state,
+- .powergate_vce = pp_dpm_powergate_vce,
+- .powergate_uvd = pp_dpm_powergate_uvd,
+ .dispatch_tasks = pp_dpm_dispatch_tasks,
+ .set_fan_control_mode = pp_dpm_set_fan_control_mode,
+ .get_fan_control_mode = pp_dpm_get_fan_control_mode,
+@@ -1221,6 +1244,7 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
+ .get_vce_clock_state = pp_dpm_get_vce_clock_state,
+ .switch_power_profile = pp_dpm_switch_power_profile,
+ .set_clockgating_by_smu = pp_set_clockgating_by_smu,
++ .set_powergating_by_smu = pp_set_powergating_by_smu,
+ .get_power_profile_mode = pp_get_power_profile_mode,
+ .set_power_profile_mode = pp_set_power_profile_mode,
+ .odn_edit_dpm_table = pp_odn_edit_dpm_table,
+@@ -1238,6 +1262,5 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
+ .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
+ .display_clock_voltage_request = pp_display_clock_voltage_request,
+ .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
+- .powergate_mmhub = pp_dpm_powergate_mmhub,
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4809-drm-amd-pp-Add-gfx-pg-support-in-smu-through-set_pow.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4809-drm-amd-pp-Add-gfx-pg-support-in-smu-through-set_pow.patch
new file mode 100644
index 00000000..e51bf8f0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4809-drm-amd-pp-Add-gfx-pg-support-in-smu-through-set_pow.patch
@@ -0,0 +1,93 @@
+From 2e853a193b1d09eee6aed7f48e1e37b31345a307 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Thu, 14 Jun 2018 13:07:19 +0800
+Subject: [PATCH 4809/5725] drm/amd/pp: Add gfx pg support in smu through
+ set_powergating_by_smu
+
+gfx ip block can call set_powergating_by_smu to set gfx pg state if
+necessary.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 10 ++++------
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 25 +++++++++++++++++--------
+ 2 files changed, 21 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 294fa59..f9c7247d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -5605,14 +5605,12 @@ static int gfx_v8_0_late_init(void *handle)
+ static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
+ bool enable)
+ {
+- if ((adev->asic_type == CHIP_POLARIS11) ||
++ if (((adev->asic_type == CHIP_POLARIS11) ||
+ (adev->asic_type == CHIP_POLARIS12) ||
+- (adev->asic_type == CHIP_VEGAM))
++ (adev->asic_type == CHIP_VEGAM)) &&
++ adev->powerplay.pp_funcs->set_powergating_by_smu)
+ /* Send msg to SMU via Powerplay */
+- amdgpu_device_ip_set_powergating_state(adev,
+- AMD_IP_BLOCK_TYPE_SMC,
+- enable ?
+- AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
++ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable);
+
+ WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
+ }
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index 02ba7c9..ae190f9 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -246,14 +246,7 @@ static int pp_set_powergating_state(void *handle,
+ pr_err("gfx off control failed!\n");
+ }
+
+- if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
+- pr_info("%s was not implemented.\n", __func__);
+- return 0;
+- }
+-
+- /* Enable/disable GFX per cu powergating through SMU */
+- return hwmgr->hwmgr_func->powergate_gfx(hwmgr,
+- state == AMD_PG_STATE_GATE);
++ return 0;
+
+ }
+
+@@ -1195,6 +1188,21 @@ static int pp_dpm_powergate_mmhub(void *handle)
+ return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
+ }
+
++static int pp_dpm_powergate_gfx(void *handle, bool gate)
++{
++ struct pp_hwmgr *hwmgr = handle;
++
++ if (!hwmgr || !hwmgr->pm_en)
++ return 0;
++
++ if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
++ pr_info("%s was not implemented.\n", __func__);
++ return 0;
++ }
++
++ return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
++}
++
+ static int pp_set_powergating_by_smu(void *handle,
+ uint32_t block_type, bool gate)
+ {
+@@ -1212,6 +1220,7 @@ static int pp_set_powergating_by_smu(void *handle,
+ pp_dpm_powergate_mmhub(handle);
+ break;
+ case AMD_IP_BLOCK_TYPE_GFX:
++ ret = pp_dpm_powergate_gfx(handle, gate);
+ break;
+ default:
+ break;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4810-drm-amd-pp-Add-powergate_gfx-backend-function-on-Rav.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4810-drm-amd-pp-Add-powergate_gfx-backend-function-on-Rav.patch
new file mode 100644
index 00000000..fa533b20
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4810-drm-amd-pp-Add-powergate_gfx-backend-function-on-Rav.patch
@@ -0,0 +1,31 @@
+From 9db5e96710aea6f3ee3f1114c44042c4120809b8 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Tue, 5 Jun 2018 13:55:04 +0800
+Subject: [PATCH 4810/5725] drm/amd/pp: Add powergate_gfx backend function on
+ Raven
+
+Raven support gfx off feature instand of gfx powergate,
+so use smu10_gfx_off_control as the powergate_gfx backend function.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+index b810d9c..07cc98c 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+@@ -1186,6 +1186,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
+ .smus_notify_pwe = smu10_smus_notify_pwe,
+ .gfx_off_control = smu10_gfx_off_control,
+ .display_clock_voltage_request = smu10_display_clock_voltage_request,
++ .powergate_gfx = smu10_gfx_off_control,
+ };
+
+ int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4811-drm-amdgpu-Add-gfx_off-support-in-smu-through-pp_set.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4811-drm-amdgpu-Add-gfx_off-support-in-smu-through-pp_set.patch
new file mode 100644
index 00000000..445705d0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4811-drm-amdgpu-Add-gfx_off-support-in-smu-through-pp_set.patch
@@ -0,0 +1,119 @@
+From 3e27fbb641f0383537857d80fa58093b301bb57b Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Tue, 5 Jun 2018 11:46:35 +0800
+Subject: [PATCH 4811/5725] drm/amdgpu: Add gfx_off support in smu through
+ pp_set_powergating_by_smu
+
+we can take gfx off feature as gfx power gate. gfx off feature is also
+controled by smu. so add gfx_off support in pp_set_powergating_by_smu.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 19 +++++++------------
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 4 ++++
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 16 ----------------
+ 3 files changed, 11 insertions(+), 28 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 78d6c99..5a815cf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1780,16 +1780,11 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
+ }
+ }
+
+- if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) {
++ if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
+ /* enable gfx powergating */
+ amdgpu_device_ip_set_powergating_state(adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_GATE);
+- /* enable gfxoff */
+- amdgpu_device_ip_set_powergating_state(adev,
+- AMD_IP_BLOCK_TYPE_SMC,
+- AMD_PG_STATE_GATE);
+- }
+
+ return 0;
+ }
+@@ -1862,6 +1857,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
++ if (adev->powerplay.pp_funcs->set_powergating_by_smu)
++ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false);
+ r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
+ /* XXX handle errors */
+ if (r) {
+@@ -1970,12 +1967,6 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_request_full_gpu(adev, false);
+
+- /* ungate SMC block powergating */
+- if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
+- amdgpu_device_ip_set_powergating_state(adev,
+- AMD_IP_BLOCK_TYPE_SMC,
+- AMD_PG_STATE_UNGATE);
+-
+ /* ungate SMC block first */
+ r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
+ AMD_CG_STATE_UNGATE);
+@@ -1983,6 +1974,10 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
+ DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
+ }
+
++ /* call smu to disable gfx off feature first when suspend */
++ if (adev->powerplay.pp_funcs->set_powergating_by_smu)
++ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false);
++
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 7a881ef..cd5668a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -3730,6 +3730,10 @@ static int gfx_v9_0_set_powergating_state(void *handle,
+
+ /* update mgcg state */
+ gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
++
++ /* set gfx off through smu */
++ if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu)
++ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true);
+ break;
+ default:
+ break;
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index ae190f9..9e54bbe 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -231,23 +231,7 @@ static int pp_sw_reset(void *handle)
+ static int pp_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+ {
+- struct amdgpu_device *adev = handle;
+- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+- int ret;
+-
+- if (!hwmgr || !hwmgr->pm_en)
+- return 0;
+-
+- if (hwmgr->hwmgr_func->gfx_off_control) {
+- /* Enable/disable GFX off through SMU */
+- ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr,
+- state == AMD_PG_STATE_GATE);
+- if (ret)
+- pr_err("gfx off control failed!\n");
+- }
+-
+ return 0;
+-
+ }
+
+ static int pp_suspend(void *handle)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4812-drm-amdgpu-Split-set_pg_state-into-separate-function.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4812-drm-amdgpu-Split-set_pg_state-into-separate-function.patch
new file mode 100644
index 00000000..77f2abb6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4812-drm-amdgpu-Split-set_pg_state-into-separate-function.patch
@@ -0,0 +1,88 @@
+From 3859d5619f1fb0cbe5107aacf0e0ddedde8262f1 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Wed, 13 Jun 2018 19:30:40 +0800
+Subject: [PATCH 4812/5725] drm/amdgpu: Split set_pg_state into separate
+ function
+
+1. add amdgpu_device_ip_late_set_pg_state function for
+ set pg state.
+2. delete duplicate pg state setting on gfx_v8_0's late_init.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 33 +++++++++++++++++++++++++-----
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 4 ----
+ 2 files changed, 28 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 5a815cf..41cff36 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1780,12 +1780,34 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
+ }
+ }
+
+- if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
+- /* enable gfx powergating */
+- amdgpu_device_ip_set_powergating_state(adev,
+- AMD_IP_BLOCK_TYPE_GFX,
+- AMD_PG_STATE_GATE);
++ return 0;
++}
++
++static int amdgpu_device_ip_late_set_pg_state(struct amdgpu_device *adev)
++{
++ int i = 0, r;
+
++ if (amdgpu_emu_mode == 1)
++ return 0;
++
++ for (i = 0; i < adev->num_ip_blocks; i++) {
++ if (!adev->ip_blocks[i].status.valid)
++ continue;
++ /* skip CG for VCE/UVD, it's handled specially */
++ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
++ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
++ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
++ adev->ip_blocks[i].version->funcs->set_powergating_state) {
++ /* enable powergating to save power */
++ r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
++ AMD_PG_STATE_GATE);
++ if (r) {
++ DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
++ adev->ip_blocks[i].version->funcs->name, r);
++ return r;
++ }
++ }
++ }
+ return 0;
+ }
+
+@@ -1947,6 +1969,7 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device, late_init_work.work);
+ amdgpu_device_ip_late_set_cg_state(adev);
++ amdgpu_device_ip_late_set_pg_state(adev);
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index f9c7247d..92bda71 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -5595,10 +5595,6 @@ static int gfx_v8_0_late_init(void *handle)
+ return r;
+ }
+
+- amdgpu_device_ip_set_powergating_state(adev,
+- AMD_IP_BLOCK_TYPE_GFX,
+- AMD_PG_STATE_GATE);
+-
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4813-drm-amdgpu-Move-CG-PG-setting-out-of-delay-worker-th.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4813-drm-amdgpu-Move-CG-PG-setting-out-of-delay-worker-th.patch
new file mode 100644
index 00000000..fe153814
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4813-drm-amdgpu-Move-CG-PG-setting-out-of-delay-worker-th.patch
@@ -0,0 +1,69 @@
+From fc75b73652c45234c1b5cced6dbbebfe453c3104 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Wed, 6 Jun 2018 13:42:42 +0800
+Subject: [PATCH 4813/5725] drm/amdgpu: Move CG/PG setting out of delay worker
+ thread
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Partially revert commit 2dc80b00652f
+("drm/amdgpu: optimize amdgpu driver load & resume time")'
+
+1. CG/PG enablement are part of gpu hw ip initialize, we should
+wait for them complete. otherwise, there are some potential conflicts,
+for example, Suspend and CG enablement concurrently.
+2. better run ib test after hw initialize completely. That is to say,
+ ib test should be after CG/PG enablement. otherwise, the test will
+ not cover the cg/pg/poweroff enable case.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 41cff36..c0c835a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1757,10 +1757,6 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
+ if (amdgpu_emu_mode == 1)
+ return 0;
+
+- r = amdgpu_ib_ring_tests(adev);
+- if (r)
+- DRM_ERROR("ib ring test failed (%d).\n", r);
+-
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+@@ -1841,6 +1837,9 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
+ }
+ }
+
++ amdgpu_device_ip_late_set_cg_state(adev);
++ amdgpu_device_ip_late_set_pg_state(adev);
++
+ queue_delayed_work(system_wq, &adev->late_init_work,
+ msecs_to_jiffies(AMDGPU_RESUME_MS));
+
+@@ -1968,8 +1967,11 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
+ {
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device, late_init_work.work);
+- amdgpu_device_ip_late_set_cg_state(adev);
+- amdgpu_device_ip_late_set_pg_state(adev);
++ int r;
++
++ r = amdgpu_ib_ring_tests(adev);
++ if (r)
++ DRM_ERROR("ib ring test failed (%d).\n", r);
+ }
+
+ /**
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4814-drm-amdgpu-Add-stutter-mode-ctrl-in-module-parameter.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4814-drm-amdgpu-Add-stutter-mode-ctrl-in-module-parameter.patch
new file mode 100644
index 00000000..18a20bcd
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4814-drm-amdgpu-Add-stutter-mode-ctrl-in-module-parameter.patch
@@ -0,0 +1,53 @@
+From ed7b9cb95c7bdabdefae263ca25084a48a3e6bc2 Mon Sep 17 00:00:00 2001
+From: rex zhu <rex.zhu@amd.com>
+Date: Wed, 27 Jun 2018 18:08:43 +0800
+Subject: [PATCH 4814/5725] drm/amdgpu: Add stutter mode ctrl in module
+ parameter
+
+Enable stutter mode can save power in low DRAM use cases
+including but not limited to productivity application use,
+web browsing, and video playback.
+
+Currently this feature is disabled by default.
+
+Make bit 17 in module parameter amdgpu_pp_feature_mask
+as stutter mode mask, so user can enable/disable this feature easily.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 ++-
+ drivers/gpu/drm/amd/include/amd_shared.h | 1 +
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index b9b9850..19f1a74 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -125,7 +125,8 @@ uint amdgpu_pg_mask = 0xffffffff;
+ uint amdgpu_sdma_phase_quantum = 32;
+ char *amdgpu_disable_cu = NULL;
+ char *amdgpu_virtual_display = NULL;
+-uint amdgpu_pp_feature_mask = 0xffff3fff; /* gfxoff (bit 15) disabled by default */
++/* OverDrive(bit 14),gfxoff(bit 15),stutter mode(bit 17) disabled by default*/
++uint amdgpu_pp_feature_mask = 0xfffd3fff;
+ int amdgpu_ngg = 0;
+ int amdgpu_prim_buf_per_se = 0;
+ int amdgpu_pos_buf_per_se = 0;
+diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
+index 0c30256..91459a6 100644
+--- a/drivers/gpu/drm/amd/include/amd_shared.h
++++ b/drivers/gpu/drm/amd/include/amd_shared.h
+@@ -128,6 +128,7 @@ enum PP_FEATURE_MASK {
+ PP_OVERDRIVE_MASK = 0x4000,
+ PP_GFXOFF_MASK = 0x8000,
+ PP_ACG_MASK = 0x10000,
++ PP_STUTTER_MODE = 0x20000,
+ };
+
+ struct amd_ip_funcs {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4815-drm-amd-display-Ctrl-stutter-mode-through-module-par.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4815-drm-amd-display-Ctrl-stutter-mode-through-module-par.patch
new file mode 100644
index 00000000..45a5ddab
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4815-drm-amd-display-Ctrl-stutter-mode-through-module-par.patch
@@ -0,0 +1,33 @@
+From b18ae154414b9d1079c21859969537282f74d916 Mon Sep 17 00:00:00 2001
+From: rex zhu <rex.zhu@amd.com>
+Date: Wed, 27 Jun 2018 18:19:08 +0800
+Subject: [PATCH 4815/5725] drm/amd/display: Ctrl stutter mode through module
+ parameter
+
+use ppfeaturemask to enable/disable stutter mode.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index f51c1f2..7579008 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1660,7 +1660,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+ /*
+ * Temporary disable until pplib/smu interaction is implemented
+ */
+- dm->dc->debug.disable_stutter = true;
++ dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
+ break;
+ #endif
+ default:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4816-drm-amd-display-Fix-a-typo-in-wm_min_memg_clk_in_khz.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4816-drm-amd-display-Fix-a-typo-in-wm_min_memg_clk_in_khz.patch
new file mode 100644
index 00000000..e55c6f81
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4816-drm-amd-display-Fix-a-typo-in-wm_min_memg_clk_in_khz.patch
@@ -0,0 +1,131 @@
+From c82007c11a6b78a775c96188b89ae1c31838e6aa Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Wed, 20 Jun 2018 12:52:43 +0800
+Subject: [PATCH 4816/5725] drm/amd/display: Fix a typo in
+ wm_min_memg_clk_in_khz
+
+change wm_min_memg_clk_in_khz -> wm_min_mem_clk_in_khz
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c | 8 ++++----
+ drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c | 8 ++++----
+ drivers/gpu/drm/amd/display/dc/dm_services_types.h | 6 +++---
+ 3 files changed, 11 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+index 7529100..9e1afb1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+@@ -1000,7 +1000,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
+ eng_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
+- clk_ranges.wm_clk_ranges[0].wm_min_memg_clk_in_khz =
++ clk_ranges.wm_clk_ranges[0].wm_min_mem_clk_in_khz =
+ mem_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
+@@ -1010,7 +1010,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
+ /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000;
+- clk_ranges.wm_clk_ranges[1].wm_min_memg_clk_in_khz =
++ clk_ranges.wm_clk_ranges[1].wm_min_mem_clk_in_khz =
+ mem_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
+@@ -1020,7 +1020,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
+ eng_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
+- clk_ranges.wm_clk_ranges[2].wm_min_memg_clk_in_khz =
++ clk_ranges.wm_clk_ranges[2].wm_min_mem_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
+ /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000;
+@@ -1030,7 +1030,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
+ /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000;
+- clk_ranges.wm_clk_ranges[3].wm_min_memg_clk_in_khz =
++ clk_ranges.wm_clk_ranges[3].wm_min_mem_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
+ /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+index 13c388a..8381f27 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+@@ -775,7 +775,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
+ eng_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
+- clk_ranges.wm_clk_ranges[0].wm_min_memg_clk_in_khz =
++ clk_ranges.wm_clk_ranges[0].wm_min_mem_clk_in_khz =
+ mem_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
+@@ -785,7 +785,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
+ /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000;
+- clk_ranges.wm_clk_ranges[1].wm_min_memg_clk_in_khz =
++ clk_ranges.wm_clk_ranges[1].wm_min_mem_clk_in_khz =
+ mem_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
+@@ -795,7 +795,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
+ eng_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
+- clk_ranges.wm_clk_ranges[2].wm_min_memg_clk_in_khz =
++ clk_ranges.wm_clk_ranges[2].wm_min_mem_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
+ /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000;
+@@ -805,7 +805,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
+ /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000;
+- clk_ranges.wm_clk_ranges[3].wm_min_memg_clk_in_khz =
++ clk_ranges.wm_clk_ranges[3].wm_min_mem_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
+ /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000;
+diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+index ab8c77d..2b83f92 100644
+--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+@@ -137,7 +137,7 @@ struct dm_pp_clock_range_for_wm_set {
+ enum dm_pp_wm_set_id wm_set_id;
+ uint32_t wm_min_eng_clk_in_khz;
+ uint32_t wm_max_eng_clk_in_khz;
+- uint32_t wm_min_memg_clk_in_khz;
++ uint32_t wm_min_mem_clk_in_khz;
+ uint32_t wm_max_mem_clk_in_khz;
+ };
+
+@@ -150,7 +150,7 @@ struct dm_pp_clock_range_for_dmif_wm_set_soc15 {
+ enum dm_pp_wm_set_id wm_set_id;
+ uint32_t wm_min_dcfclk_clk_in_khz;
+ uint32_t wm_max_dcfclk_clk_in_khz;
+- uint32_t wm_min_memg_clk_in_khz;
++ uint32_t wm_min_mem_clk_in_khz;
+ uint32_t wm_max_mem_clk_in_khz;
+ };
+
+@@ -158,7 +158,7 @@ struct dm_pp_clock_range_for_mcif_wm_set_soc15 {
+ enum dm_pp_wm_set_id wm_set_id;
+ uint32_t wm_min_socclk_clk_in_khz;
+ uint32_t wm_max_socclk_clk_in_khz;
+- uint32_t wm_min_memg_clk_in_khz;
++ uint32_t wm_min_mem_clk_in_khz;
+ uint32_t wm_max_mem_clk_in_khz;
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4817-drm-amd-powerplay-drop-the-acg-fix.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4817-drm-amd-powerplay-drop-the-acg-fix.patch
new file mode 100644
index 00000000..ca0329b2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4817-drm-amd-powerplay-drop-the-acg-fix.patch
@@ -0,0 +1,34 @@
+From 3b87a366884511dcad870b8334a195e75fe58e3a Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 11 Jun 2018 17:38:54 +0800
+Subject: [PATCH 4817/5725] drm/amd/powerplay: drop the acg fix
+
+This workaround is not needed any more.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c | 6 ------
+ 1 file changed, 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+index 2991470..f4f366b 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+@@ -224,12 +224,6 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
+ ppsmc_pptable->AcgGfxclkSpreadPercent = smc_dpm_table.acggfxclkspreadpercent;
+ ppsmc_pptable->AcgGfxclkSpreadFreq = smc_dpm_table.acggfxclkspreadfreq;
+
+- /* 0xFFFF will disable the ACG feature */
+- if (!(hwmgr->feature_mask & PP_ACG_MASK)) {
+- ppsmc_pptable->AcgThresholdFreqHigh = 0xFFFF;
+- ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF;
+- }
+-
+ ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
+
+ return 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4818-drm-amd-powerplay-revise-default-dpm-tables-setup.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4818-drm-amd-powerplay-revise-default-dpm-tables-setup.patch
new file mode 100644
index 00000000..8818771b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4818-drm-amd-powerplay-revise-default-dpm-tables-setup.patch
@@ -0,0 +1,424 @@
+From ae94f990c469147a418075791a822e5094b7b4f5 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 11 Jun 2018 15:25:37 +0800
+Subject: [PATCH 4818/5725] drm/amd/powerplay: revise default dpm tables setup
+
+Initialize the soft/hard min/max level correctly and
+handle the dpm disabled situation.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 334 ++++++++-------------
+ 1 file changed, 132 insertions(+), 202 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index 6e22cb3..8b5c581 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -454,37 +454,30 @@ static int vega12_setup_asic_task(struct pp_hwmgr *hwmgr)
+ */
+ static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state)
+ {
+- dpm_state->soft_min_level = 0xff;
+- dpm_state->soft_max_level = 0xff;
+- dpm_state->hard_min_level = 0xff;
+- dpm_state->hard_max_level = 0xff;
++ dpm_state->soft_min_level = 0x0;
++ dpm_state->soft_max_level = 0xffff;
++ dpm_state->hard_min_level = 0x0;
++ dpm_state->hard_max_level = 0xffff;
+ }
+
+-static int vega12_get_number_dpm_level(struct pp_hwmgr *hwmgr,
+- PPCLK_e clkID, uint32_t *num_dpm_level)
++static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
++ PPCLK_e clk_id, uint32_t *num_of_levels)
+ {
+- int result;
+- /*
+- * SMU expects the Clock ID to be in the top 16 bits.
+- * Lower 16 bits specify the level however 0xFF is a
+- * special argument the returns the total number of levels
+- */
+- PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+- PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | 0xFF)) == 0,
+- "[GetNumberDpmLevel] Failed to get DPM levels from SMU for CLKID!",
+- return -EINVAL);
+-
+- result = vega12_read_arg_from_smc(hwmgr, num_dpm_level);
++ int ret = 0;
+
+- PP_ASSERT_WITH_CODE(*num_dpm_level < MAX_REGULAR_DPM_NUMBER,
+- "[GetNumberDPMLevel] Number of DPM levels is greater than limit",
+- return -EINVAL);
++ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_GetDpmFreqByIndex,
++ (clk_id << 16 | 0xFF));
++ PP_ASSERT_WITH_CODE(!ret,
++ "[GetNumOfDpmLevel] failed to get dpm levels!",
++ return ret);
+
+- PP_ASSERT_WITH_CODE(*num_dpm_level != 0,
+- "[GetNumberDPMLevel] Number of CLK Levels is zero!",
+- return -EINVAL);
++ vega12_read_arg_from_smc(hwmgr, num_of_levels);
++ PP_ASSERT_WITH_CODE(*num_of_levels > 0,
++ "[GetNumOfDpmLevel] number of clk levels is invalid!",
++ return -EINVAL);
+
+- return result;
++ return ret;
+ }
+
+ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
+@@ -510,6 +503,31 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
+ return result;
+ }
+
++static int vega12_setup_single_dpm_table(struct pp_hwmgr *hwmgr,
++ struct vega12_single_dpm_table *dpm_table, PPCLK_e clk_id)
++{
++ int ret = 0;
++ uint32_t i, num_of_levels, clk;
++
++ ret = vega12_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupSingleDpmTable] failed to get clk levels!",
++ return ret);
++
++ dpm_table->count = num_of_levels;
++
++ for (i = 0; i < num_of_levels; i++) {
++ ret = vega12_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupSingleDpmTable] failed to get clk of specific level!",
++ return ret);
++ dpm_table->dpm_levels[i].value = clk;
++ dpm_table->dpm_levels[i].enabled = true;
++ }
++
++ return ret;
++}
++
+ /*
+ * This function is to initialize all DPM state tables
+ * for SMU based on the dependency table.
+@@ -520,224 +538,136 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
+ */
+ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+ {
+- uint32_t num_levels, i, clock;
+
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+-
+ struct vega12_single_dpm_table *dpm_table;
++ int ret = 0;
+
+ memset(&data->dpm_table, 0, sizeof(data->dpm_table));
+
+- /* Initialize Sclk DPM and SOC DPM table based on allow Sclk values */
++ /* socclk */
+ dpm_table = &(data->dpm_table.soc_table);
+-
+- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_SOCCLK,
+- &num_levels) == 0,
+- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for SOCCLK!",
+- return -EINVAL);
+-
+- dpm_table->count = num_levels;
+-
+- for (i = 0; i < num_levels; i++) {
+- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
+- PPCLK_SOCCLK, i, &clock) == 0,
+- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for SOCCLK!",
+- return -EINVAL);
+-
+- dpm_table->dpm_levels[i].value = clock;
+- dpm_table->dpm_levels[i].enabled = true;
++ if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
++ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupDefaultDpmTable] failed to get socclk dpm levels!",
++ return ret);
++ } else {
++ dpm_table->count = 1;
++ dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100;
+ }
+-
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+
++ /* gfxclk */
+ dpm_table = &(data->dpm_table.gfx_table);
+-
+- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_GFXCLK,
+- &num_levels) == 0,
+- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for GFXCLK!",
+- return -EINVAL);
+-
+- dpm_table->count = num_levels;
+- for (i = 0; i < num_levels; i++) {
+- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
+- PPCLK_GFXCLK, i, &clock) == 0,
+- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for GFXCLK!",
+- return -EINVAL);
+-
+- dpm_table->dpm_levels[i].value = clock;
+- dpm_table->dpm_levels[i].enabled = true;
++ if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
++ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!",
++ return ret);
++ } else {
++ dpm_table->count = 1;
++ dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100;
+ }
+-
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+- /* Initialize Mclk DPM table based on allow Mclk values */
+- dpm_table = &(data->dpm_table.mem_table);
+
+- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_UCLK,
+- &num_levels) == 0,
+- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for UCLK!",
+- return -EINVAL);
+-
+- dpm_table->count = num_levels;
+-
+- for (i = 0; i < num_levels; i++) {
+- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
+- PPCLK_UCLK, i, &clock) == 0,
+- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for UCLK!",
+- return -EINVAL);
+-
+- dpm_table->dpm_levels[i].value = clock;
+- dpm_table->dpm_levels[i].enabled = true;
++ /* memclk */
++ dpm_table = &(data->dpm_table.mem_table);
++ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
++ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupDefaultDpmTable] failed to get memclk dpm levels!",
++ return ret);
++ } else {
++ dpm_table->count = 1;
++ dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100;
+ }
+-
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+
++ /* eclk */
+ dpm_table = &(data->dpm_table.eclk_table);
+-
+- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_ECLK,
+- &num_levels) == 0,
+- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for ECLK!",
+- return -EINVAL);
+-
+- dpm_table->count = num_levels;
+-
+- for (i = 0; i < num_levels; i++) {
+- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
+- PPCLK_ECLK, i, &clock) == 0,
+- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for ECLK!",
+- return -EINVAL);
+-
+- dpm_table->dpm_levels[i].value = clock;
+- dpm_table->dpm_levels[i].enabled = true;
++ if (data->smu_features[GNLD_DPM_VCE].enabled) {
++ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupDefaultDpmTable] failed to get eclk dpm levels!",
++ return ret);
++ } else {
++ dpm_table->count = 1;
++ dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100;
+ }
+-
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+
++ /* vclk */
+ dpm_table = &(data->dpm_table.vclk_table);
+-
+- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_VCLK,
+- &num_levels) == 0,
+- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for VCLK!",
+- return -EINVAL);
+-
+- dpm_table->count = num_levels;
+-
+- for (i = 0; i < num_levels; i++) {
+- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
+- PPCLK_VCLK, i, &clock) == 0,
+- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for VCLK!",
+- return -EINVAL);
+-
+- dpm_table->dpm_levels[i].value = clock;
+- dpm_table->dpm_levels[i].enabled = true;
++ if (data->smu_features[GNLD_DPM_UVD].enabled) {
++ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupDefaultDpmTable] failed to get vclk dpm levels!",
++ return ret);
++ } else {
++ dpm_table->count = 1;
++ dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100;
+ }
+-
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+
++ /* dclk */
+ dpm_table = &(data->dpm_table.dclk_table);
+-
+- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_DCLK,
+- &num_levels) == 0,
+- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCLK!",
+- return -EINVAL);
+-
+- dpm_table->count = num_levels;
+-
+- for (i = 0; i < num_levels; i++) {
+- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
+- PPCLK_DCLK, i, &clock) == 0,
+- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCLK!",
+- return -EINVAL);
+-
+- dpm_table->dpm_levels[i].value = clock;
+- dpm_table->dpm_levels[i].enabled = true;
++ if (data->smu_features[GNLD_DPM_UVD].enabled) {
++ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupDefaultDpmTable] failed to get dclk dpm levels!",
++ return ret);
++ } else {
++ dpm_table->count = 1;
++ dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100;
+ }
+-
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+
+- /* Assume there is no headless Vega12 for now */
++ /* dcefclk */
+ dpm_table = &(data->dpm_table.dcef_table);
+-
+- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
+- PPCLK_DCEFCLK, &num_levels) == 0,
+- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCEFCLK!",
+- return -EINVAL);
+-
+- dpm_table->count = num_levels;
+-
+- for (i = 0; i < num_levels; i++) {
+- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
+- PPCLK_DCEFCLK, i, &clock) == 0,
+- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCEFCLK!",
+- return -EINVAL);
+-
+- dpm_table->dpm_levels[i].value = clock;
+- dpm_table->dpm_levels[i].enabled = true;
++ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
++ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupDefaultDpmTable] failed to get dcefclk dpm levels!",
++ return ret);
++ } else {
++ dpm_table->count = 1;
++ dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100;
+ }
+-
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+
++ /* pixclk */
+ dpm_table = &(data->dpm_table.pixel_table);
+-
+- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
+- PPCLK_PIXCLK, &num_levels) == 0,
+- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PIXCLK!",
+- return -EINVAL);
+-
+- dpm_table->count = num_levels;
+-
+- for (i = 0; i < num_levels; i++) {
+- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
+- PPCLK_PIXCLK, i, &clock) == 0,
+- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PIXCLK!",
+- return -EINVAL);
+-
+- dpm_table->dpm_levels[i].value = clock;
+- dpm_table->dpm_levels[i].enabled = true;
+- }
+-
++ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
++ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupDefaultDpmTable] failed to get pixclk dpm levels!",
++ return ret);
++ } else
++ dpm_table->count = 0;
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+
++ /* dispclk */
+ dpm_table = &(data->dpm_table.display_table);
+-
+- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
+- PPCLK_DISPCLK, &num_levels) == 0,
+- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DISPCLK!",
+- return -EINVAL);
+-
+- dpm_table->count = num_levels;
+-
+- for (i = 0; i < num_levels; i++) {
+- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
+- PPCLK_DISPCLK, i, &clock) == 0,
+- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DISPCLK!",
+- return -EINVAL);
+-
+- dpm_table->dpm_levels[i].value = clock;
+- dpm_table->dpm_levels[i].enabled = true;
+- }
+-
++ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
++ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupDefaultDpmTable] failed to get dispclk dpm levels!",
++ return ret);
++ } else
++ dpm_table->count = 0;
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+
++ /* phyclk */
+ dpm_table = &(data->dpm_table.phy_table);
+-
+- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
+- PPCLK_PHYCLK, &num_levels) == 0,
+- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PHYCLK!",
+- return -EINVAL);
+-
+- dpm_table->count = num_levels;
+-
+- for (i = 0; i < num_levels; i++) {
+- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
+- PPCLK_PHYCLK, i, &clock) == 0,
+- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PHYCLK!",
+- return -EINVAL);
+-
+- dpm_table->dpm_levels[i].value = clock;
+- dpm_table->dpm_levels[i].enabled = true;
+- }
+-
++ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
++ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupDefaultDpmTable] failed to get phyclk dpm levels!",
++ return ret);
++ } else
++ dpm_table->count = 0;
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+
+ /* save a copy of the default DPM table */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4819-drm-amd-powerplay-retrieve-all-clock-ranges-on-start.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4819-drm-amd-powerplay-retrieve-all-clock-ranges-on-start.patch
new file mode 100644
index 00000000..6384e1e0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4819-drm-amd-powerplay-retrieve-all-clock-ranges-on-start.patch
@@ -0,0 +1,142 @@
+From 80658a01a6d126dfe5ca5b0ef05ee819282aa097 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 11 Jun 2018 15:41:44 +0800
+Subject: [PATCH 4819/5725] drm/amd/powerplay: retrieve all clock ranges on
+ startup
+
+So that we do not need to use PPSMC_MSG_GetMin/MaxDpmFreq to
+get the clock ranges on runtime. Since that causes some problems.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 69 +++++++++++++++++-----
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h | 8 +++
+ 2 files changed, 61 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index 8b5c581..5c0935a 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -857,6 +857,48 @@ static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
+ return result;
+ }
+
++static int vega12_get_all_clock_ranges_helper(struct pp_hwmgr *hwmgr,
++ PPCLK_e clkid, struct vega12_clock_range *clock)
++{
++ /* AC Max */
++ PP_ASSERT_WITH_CODE(
++ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16)) == 0,
++ "[GetClockRanges] Failed to get max ac clock from SMC!",
++ return -EINVAL);
++ vega12_read_arg_from_smc(hwmgr, &(clock->ACMax));
++
++ /* AC Min */
++ PP_ASSERT_WITH_CODE(
++ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16)) == 0,
++ "[GetClockRanges] Failed to get min ac clock from SMC!",
++ return -EINVAL);
++ vega12_read_arg_from_smc(hwmgr, &(clock->ACMin));
++
++ /* DC Max */
++ PP_ASSERT_WITH_CODE(
++ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16)) == 0,
++ "[GetClockRanges] Failed to get max dc clock from SMC!",
++ return -EINVAL);
++ vega12_read_arg_from_smc(hwmgr, &(clock->DCMax));
++
++ return 0;
++}
++
++static int vega12_get_all_clock_ranges(struct pp_hwmgr *hwmgr)
++{
++ struct vega12_hwmgr *data =
++ (struct vega12_hwmgr *)(hwmgr->backend);
++ uint32_t i;
++
++ for (i = 0; i < PPCLK_COUNT; i++)
++ PP_ASSERT_WITH_CODE(!vega12_get_all_clock_ranges_helper(hwmgr,
++ i, &(data->clk_range[i])),
++ "Failed to get clk range from SMC!",
++ return -EINVAL);
++
++ return 0;
++}
++
+ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+ {
+ int tmp_result, result = 0;
+@@ -884,6 +926,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+ "Failed to power control set level!",
+ result = tmp_result);
+
++ result = vega12_get_all_clock_ranges(hwmgr);
++ PP_ASSERT_WITH_CODE(!result,
++ "Failed to get all clock ranges!",
++ return result);
++
+ result = vega12_odn_initialize_default_settings(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to power control set level!",
+@@ -1473,24 +1520,14 @@ static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr,
+ PPCLK_e clock_select,
+ bool max)
+ {
+- int result;
+- *clock = 0;
++ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+
+- if (max) {
+- PP_ASSERT_WITH_CODE(
+- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16)) == 0,
+- "[GetClockRanges] Failed to get max clock from SMC!",
+- return -1);
+- result = vega12_read_arg_from_smc(hwmgr, clock);
+- } else {
+- PP_ASSERT_WITH_CODE(
+- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clock_select << 16)) == 0,
+- "[GetClockRanges] Failed to get min clock from SMC!",
+- return -1);
+- result = vega12_read_arg_from_smc(hwmgr, clock);
+- }
++ if (max)
++ *clock = data->clk_range[clock_select].ACMax;
++ else
++ *clock = data->clk_range[clock_select].ACMin;
+
+- return result;
++ return 0;
+ }
+
+ static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+index 49b38df..e18c083 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+@@ -304,6 +304,12 @@ struct vega12_odn_fan_table {
+ bool force_fan_pwm;
+ };
+
++struct vega12_clock_range {
++ uint32_t ACMax;
++ uint32_t ACMin;
++ uint32_t DCMax;
++};
++
+ struct vega12_hwmgr {
+ struct vega12_dpm_table dpm_table;
+ struct vega12_dpm_table golden_dpm_table;
+@@ -385,6 +391,8 @@ struct vega12_hwmgr {
+ uint32_t smu_version;
+ struct smu_features smu_features[GNLD_FEATURES_MAX];
+ struct vega12_smc_state_table smc_state_table;
++
++ struct vega12_clock_range clk_range[PPCLK_COUNT];
+ };
+
+ #define VEGA12_DPM2_NEAR_TDP_DEC 10
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4820-drm-amd-powerplay-revise-clock-level-setup.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4820-drm-amd-powerplay-revise-clock-level-setup.patch
new file mode 100644
index 00000000..f4b48151
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4820-drm-amd-powerplay-revise-clock-level-setup.patch
@@ -0,0 +1,470 @@
+From 4085de30ccabd31f04c8e3bf51bd04a6cc611a53 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 11 Jun 2018 16:04:17 +0800
+Subject: [PATCH 4820/5725] drm/amd/powerplay: revise clock level setup
+
+Make sure the clock level set only on dpm enabled. Also uvd/vce/soc
+clock also changed correspondingly.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 318 ++++++++++++++-------
+ 1 file changed, 211 insertions(+), 107 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index 5c0935a..b27978b 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -959,76 +959,172 @@ static uint32_t vega12_find_lowest_dpm_level(
+ break;
+ }
+
++ if (i >= table->count) {
++ i = 0;
++ table->dpm_levels[i].enabled = true;
++ }
++
+ return i;
+ }
+
+ static uint32_t vega12_find_highest_dpm_level(
+ struct vega12_single_dpm_table *table)
+ {
+- uint32_t i = 0;
++ int32_t i = 0;
++ PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER,
++ "[FindHighestDPMLevel] DPM Table has too many entries!",
++ return MAX_REGULAR_DPM_NUMBER - 1);
+
+- if (table->count <= MAX_REGULAR_DPM_NUMBER) {
+- for (i = table->count; i > 0; i--) {
+- if (table->dpm_levels[i - 1].enabled)
+- return i - 1;
+- }
+- } else {
+- pr_info("DPM Table Has Too Many Entries!");
+- return MAX_REGULAR_DPM_NUMBER - 1;
++ for (i = table->count - 1; i >= 0; i--) {
++ if (table->dpm_levels[i].enabled)
++ break;
+ }
+
+- return i;
++ if (i < 0) {
++ i = 0;
++ table->dpm_levels[i].enabled = true;
++ }
++
++ return (uint32_t)i;
+ }
+
+ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
+ {
+ struct vega12_hwmgr *data = hwmgr->backend;
+- if (data->smc_state_table.gfx_boot_level !=
+- data->dpm_table.gfx_table.dpm_state.soft_min_level) {
+- smum_send_msg_to_smc_with_parameter(hwmgr,
+- PPSMC_MSG_SetSoftMinByFreq,
+- PPCLK_GFXCLK<<16 | data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_boot_level].value);
+- data->dpm_table.gfx_table.dpm_state.soft_min_level =
+- data->smc_state_table.gfx_boot_level;
++ uint32_t min_freq;
++ int ret = 0;
++
++ if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
++ min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
++ (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))),
++ "Failed to set soft min gfxclk !",
++ return ret);
+ }
+
+- if (data->smc_state_table.mem_boot_level !=
+- data->dpm_table.mem_table.dpm_state.soft_min_level) {
+- smum_send_msg_to_smc_with_parameter(hwmgr,
+- PPSMC_MSG_SetSoftMinByFreq,
+- PPCLK_UCLK<<16 | data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_boot_level].value);
+- data->dpm_table.mem_table.dpm_state.soft_min_level =
+- data->smc_state_table.mem_boot_level;
++ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
++ min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
++ (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
++ "Failed to set soft min memclk !",
++ return ret);
++
++ min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetHardMinByFreq,
++ (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
++ "Failed to set hard min memclk !",
++ return ret);
+ }
+
+- return 0;
++ if (data->smu_features[GNLD_DPM_UVD].enabled) {
++ min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
++
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
++ (PPCLK_VCLK << 16) | (min_freq & 0xffff))),
++ "Failed to set soft min vclk!",
++ return ret);
++
++ min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level;
++
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
++ (PPCLK_DCLK << 16) | (min_freq & 0xffff))),
++ "Failed to set soft min dclk!",
++ return ret);
++ }
++
++ if (data->smu_features[GNLD_DPM_VCE].enabled) {
++ min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
++
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
++ (PPCLK_ECLK << 16) | (min_freq & 0xffff))),
++ "Failed to set soft min eclk!",
++ return ret);
++ }
++
++ if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
++ min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
++
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
++ (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))),
++ "Failed to set soft min socclk!",
++ return ret);
++ }
++
++ return ret;
+
+ }
+
+ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
+ {
+ struct vega12_hwmgr *data = hwmgr->backend;
+- if (data->smc_state_table.gfx_max_level !=
+- data->dpm_table.gfx_table.dpm_state.soft_max_level) {
+- smum_send_msg_to_smc_with_parameter(hwmgr,
+- PPSMC_MSG_SetSoftMaxByFreq,
+- /* plus the vale by 1 to align the resolution */
+- PPCLK_GFXCLK<<16 | (data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_max_level].value + 1));
+- data->dpm_table.gfx_table.dpm_state.soft_max_level =
+- data->smc_state_table.gfx_max_level;
++ uint32_t max_freq;
++ int ret = 0;
++
++ if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
++ max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
++
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
++ (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))),
++ "Failed to set soft max gfxclk!",
++ return ret);
+ }
+
+- if (data->smc_state_table.mem_max_level !=
+- data->dpm_table.mem_table.dpm_state.soft_max_level) {
+- smum_send_msg_to_smc_with_parameter(hwmgr,
+- PPSMC_MSG_SetSoftMaxByFreq,
+- /* plus the vale by 1 to align the resolution */
+- PPCLK_UCLK<<16 | (data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_max_level].value + 1));
+- data->dpm_table.mem_table.dpm_state.soft_max_level =
+- data->smc_state_table.mem_max_level;
++ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
++ max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
++
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
++ (PPCLK_UCLK << 16) | (max_freq & 0xffff))),
++ "Failed to set soft max memclk!",
++ return ret);
+ }
+
+- return 0;
++ if (data->smu_features[GNLD_DPM_UVD].enabled) {
++ max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
++
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
++ (PPCLK_VCLK << 16) | (max_freq & 0xffff))),
++ "Failed to set soft max vclk!",
++ return ret);
++
++ max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
++ (PPCLK_DCLK << 16) | (max_freq & 0xffff))),
++ "Failed to set soft max dclk!",
++ return ret);
++ }
++
++ if (data->smu_features[GNLD_DPM_VCE].enabled) {
++ max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
++
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
++ (PPCLK_ECLK << 16) | (max_freq & 0xffff))),
++ "Failed to set soft max eclk!",
++ return ret);
++ }
++
++ if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
++ max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
++
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
++ (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))),
++ "Failed to set soft max socclk!",
++ return ret);
++ }
++
++ return ret;
+ }
+
+ int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
+@@ -1331,12 +1427,19 @@ static int vega12_force_dpm_highest(struct pp_hwmgr *hwmgr)
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+
+- data->smc_state_table.gfx_boot_level =
+- data->smc_state_table.gfx_max_level =
+- vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
+- data->smc_state_table.mem_boot_level =
+- data->smc_state_table.mem_max_level =
+- vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
++ uint32_t soft_level;
++
++ soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
++
++ data->dpm_table.gfx_table.dpm_state.soft_min_level =
++ data->dpm_table.gfx_table.dpm_state.soft_max_level =
++ data->dpm_table.gfx_table.dpm_levels[soft_level].value;
++
++ soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
++
++ data->dpm_table.mem_table.dpm_state.soft_min_level =
++ data->dpm_table.mem_table.dpm_state.soft_max_level =
++ data->dpm_table.mem_table.dpm_levels[soft_level].value;
+
+ PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
+ "Failed to upload boot level to highest!",
+@@ -1353,13 +1456,19 @@ static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr)
+ {
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
++ uint32_t soft_level;
++
++ soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
++
++ data->dpm_table.gfx_table.dpm_state.soft_min_level =
++ data->dpm_table.gfx_table.dpm_state.soft_max_level =
++ data->dpm_table.gfx_table.dpm_levels[soft_level].value;
+
+- data->smc_state_table.gfx_boot_level =
+- data->smc_state_table.gfx_max_level =
+- vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
+- data->smc_state_table.mem_boot_level =
+- data->smc_state_table.mem_max_level =
+- vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
++ soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
++
++ data->dpm_table.mem_table.dpm_state.soft_min_level =
++ data->dpm_table.mem_table.dpm_state.soft_max_level =
++ data->dpm_table.mem_table.dpm_levels[soft_level].value;
+
+ PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
+ "Failed to upload boot level to highest!",
+@@ -1375,17 +1484,6 @@ static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr)
+
+ static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
+ {
+- struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+-
+- data->smc_state_table.gfx_boot_level =
+- vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
+- data->smc_state_table.gfx_max_level =
+- vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
+- data->smc_state_table.mem_boot_level =
+- vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
+- data->smc_state_table.mem_max_level =
+- vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
+-
+ PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
+ "Failed to upload DPM Bootup Levels!",
+ return -1);
+@@ -1393,22 +1491,28 @@ static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
+ PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
+ "Failed to upload DPM Max Levels!",
+ return -1);
++
+ return 0;
+ }
+
+-#if 0
+ static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
+ uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
+ {
+- struct phm_ppt_v2_information *table_info =
+- (struct phm_ppt_v2_information *)(hwmgr->pptable);
++ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
++ struct vega12_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
++ struct vega12_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
++ struct vega12_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table);
++
++ *sclk_mask = 0;
++ *mclk_mask = 0;
++ *soc_mask = 0;
+
+- if (table_info->vdd_dep_on_sclk->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
+- table_info->vdd_dep_on_socclk->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL &&
+- table_info->vdd_dep_on_mclk->count > VEGA12_UMD_PSTATE_MCLK_LEVEL) {
++ if (gfx_dpm_table->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
++ mem_dpm_table->count > VEGA12_UMD_PSTATE_MCLK_LEVEL &&
++ soc_dpm_table->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL) {
+ *sclk_mask = VEGA12_UMD_PSTATE_GFXCLK_LEVEL;
+- *soc_mask = VEGA12_UMD_PSTATE_SOCCLK_LEVEL;
+ *mclk_mask = VEGA12_UMD_PSTATE_MCLK_LEVEL;
++ *soc_mask = VEGA12_UMD_PSTATE_SOCCLK_LEVEL;
+ }
+
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+@@ -1416,13 +1520,13 @@ static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ *mclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+- *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
+- *soc_mask = table_info->vdd_dep_on_socclk->count - 1;
+- *mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
++ *sclk_mask = gfx_dpm_table->count - 1;
++ *mclk_mask = mem_dpm_table->count - 1;
++ *soc_mask = soc_dpm_table->count - 1;
+ }
++
+ return 0;
+ }
+-#endif
+
+ static void vega12_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
+ {
+@@ -1446,11 +1550,9 @@ static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
+ enum amd_dpm_forced_level level)
+ {
+ int ret = 0;
+-#if 0
+ uint32_t sclk_mask = 0;
+ uint32_t mclk_mask = 0;
+ uint32_t soc_mask = 0;
+-#endif
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+@@ -1466,27 +1568,18 @@ static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+-#if 0
+ ret = vega12_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
+ if (ret)
+ return ret;
+- vega12_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
+- vega12_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
+-#endif
++ vega12_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask);
++ vega12_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask);
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ default:
+ break;
+ }
+-#if 0
+- if (!ret) {
+- if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+- vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
+- else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+- vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
+- }
+-#endif
++
+ return ret;
+ }
+
+@@ -1746,37 +1839,48 @@ static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
+ enum pp_clock_type type, uint32_t mask)
+ {
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+-
+- if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
+- AMD_DPM_FORCED_LEVEL_LOW |
+- AMD_DPM_FORCED_LEVEL_HIGH))
+- return -EINVAL;
++ uint32_t soft_min_level, soft_max_level;
++ int ret = 0;
+
+ switch (type) {
+ case PP_SCLK:
+- data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
+- data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
++ soft_min_level = mask ? (ffs(mask) - 1) : 0;
++ soft_max_level = mask ? (fls(mask) - 1) : 0;
++
++ data->dpm_table.gfx_table.dpm_state.soft_min_level =
++ data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
++ data->dpm_table.gfx_table.dpm_state.soft_max_level =
++ data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
+
+- PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
++ ret = vega12_upload_dpm_min_level(hwmgr);
++ PP_ASSERT_WITH_CODE(!ret,
+ "Failed to upload boot level to lowest!",
+- return -EINVAL);
++ return ret);
+
+- PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
++ ret = vega12_upload_dpm_max_level(hwmgr);
++ PP_ASSERT_WITH_CODE(!ret,
+ "Failed to upload dpm max level to highest!",
+- return -EINVAL);
++ return ret);
+ break;
+
+ case PP_MCLK:
+- data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
+- data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
++ soft_min_level = mask ? (ffs(mask) - 1) : 0;
++ soft_max_level = mask ? (fls(mask) - 1) : 0;
++
++ data->dpm_table.mem_table.dpm_state.soft_min_level =
++ data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
++ data->dpm_table.mem_table.dpm_state.soft_max_level =
++ data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
+
+- PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
++ ret = vega12_upload_dpm_min_level(hwmgr);
++ PP_ASSERT_WITH_CODE(!ret,
+ "Failed to upload boot level to lowest!",
+- return -EINVAL);
++ return ret);
+
+- PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
++ ret = vega12_upload_dpm_max_level(hwmgr);
++ PP_ASSERT_WITH_CODE(!ret,
+ "Failed to upload dpm max level to highest!",
+- return -EINVAL);
++ return ret);
+
+ break;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4821-drm-amd-powerplay-initialize-uvd-vce-powergate-statu.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4821-drm-amd-powerplay-initialize-uvd-vce-powergate-statu.patch
new file mode 100644
index 00000000..8de4762a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4821-drm-amd-powerplay-initialize-uvd-vce-powergate-statu.patch
@@ -0,0 +1,79 @@
+From 275cbd63bbdfc138ee1fdb583a8494c116b33185 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 11 Jun 2018 16:25:14 +0800
+Subject: [PATCH 4821/5725] drm/amd/powerplay: initialize uvd/vce powergate
+ status v4
+
+On UVD/VCE dpm enabled/disabled, the powergate status will be
+set as false/true. So that we will not try to ungate/gate them(
+enable/disable their dpm) again.
+
+v2: added check for uvd/vce powergate status before gating
+v3: fix typo in description
+v4: warning fix (Alex)
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 23 ++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index b27978b..39235f0 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -778,6 +778,21 @@ static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
+ return 0;
+ }
+
++static void vega12_init_powergate_state(struct pp_hwmgr *hwmgr)
++{
++ struct vega12_hwmgr *data =
++ (struct vega12_hwmgr *)(hwmgr->backend);
++
++ data->uvd_power_gated = true;
++ data->vce_power_gated = true;
++
++ if (data->smu_features[GNLD_DPM_UVD].enabled)
++ data->uvd_power_gated = false;
++
++ if (data->smu_features[GNLD_DPM_VCE].enabled)
++ data->vce_power_gated = false;
++}
++
+ static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
+ {
+ struct vega12_hwmgr *data =
+@@ -802,6 +817,8 @@ static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
+ }
+ }
+
++ vega12_init_powergate_state(hwmgr);
++
+ return 0;
+ }
+
+@@ -1986,6 +2003,9 @@ static void vega12_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
+ {
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+
++ if (data->vce_power_gated == bgate)
++ return;
++
+ data->vce_power_gated = bgate;
+ vega12_enable_disable_vce_dpm(hwmgr, !bgate);
+ }
+@@ -1994,6 +2014,9 @@ static void vega12_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
+ {
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+
++ if (data->uvd_power_gated == bgate)
++ return;
++
+ data->uvd_power_gated = bgate;
+ vega12_enable_disable_uvd_dpm(hwmgr, !bgate);
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4822-drm-amd-powerplay-correct-smc-display-config-for-mul.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4822-drm-amd-powerplay-correct-smc-display-config-for-mul.patch
new file mode 100644
index 00000000..e16b08a9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4822-drm-amd-powerplay-correct-smc-display-config-for-mul.patch
@@ -0,0 +1,32 @@
+From a093037e6a5386cf03833e861e4a51b988bfa92b Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Wed, 20 Jun 2018 12:24:29 +0800
+Subject: [PATCH 4822/5725] drm/amd/powerplay: correct smc display config for
+ multi monitor
+
+Need to take into account multi-head with synced displays.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index 39235f0..177fe78 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -1402,7 +1402,8 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
+ struct pp_display_clock_request clock_req;
+ uint32_t clk_request;
+
+- if (hwmgr->display_config->num_display > 1)
++ if ((hwmgr->display_config->num_display > 1) &&
++ !hwmgr->display_config->multi_monitor_in_sync)
+ vega12_notify_smc_display_change(hwmgr, false);
+ else
+ vega12_notify_smc_display_change(hwmgr, true);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4823-drm-amd-powerplay-drop-unnecessary-uclk-hard-min-set.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4823-drm-amd-powerplay-drop-unnecessary-uclk-hard-min-set.patch
new file mode 100644
index 00000000..2ef10e81
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4823-drm-amd-powerplay-drop-unnecessary-uclk-hard-min-set.patch
@@ -0,0 +1,47 @@
+From 185d9c1590607f9522ee83f2f63e046d612a190e Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Wed, 20 Jun 2018 12:28:10 +0800
+Subject: [PATCH 4823/5725] drm/amd/powerplay: drop unnecessary uclk hard min
+ setting
+
+We don't need to set uclk hard min here because this will
+be set with other clocks on power state change.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 10 ----------
+ 1 file changed, 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index 177fe78..ea0ad3e 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -1400,7 +1400,6 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
+ (struct vega12_hwmgr *)(hwmgr->backend);
+ struct PP_Clocks min_clocks = {0};
+ struct pp_display_clock_request clock_req;
+- uint32_t clk_request;
+
+ if ((hwmgr->display_config->num_display > 1) &&
+ !hwmgr->display_config->multi_monitor_in_sync)
+@@ -1428,15 +1427,6 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
+ }
+ }
+
+- if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+- clk_request = (PPCLK_UCLK << 16) | (min_clocks.memoryClock) / 100;
+- PP_ASSERT_WITH_CODE(
+- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinByFreq, clk_request) == 0,
+- "[PhwVega12_NotifySMCDisplayConfigAfterPowerStateAdjustment] Attempt to set UCLK HardMin Failed!",
+- return -1);
+- data->dpm_table.mem_table.dpm_state.hard_min_level = min_clocks.memoryClock;
+- }
+-
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4824-drm-amd-powerplay-correct-vega12-max-num-of-dpm-leve.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4824-drm-amd-powerplay-correct-vega12-max-num-of-dpm-leve.patch
new file mode 100644
index 00000000..f2c36fc0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4824-drm-amd-powerplay-correct-vega12-max-num-of-dpm-leve.patch
@@ -0,0 +1,72 @@
+From f59d46b2ada24965070d729c98de2d844b273b34 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 11 Jun 2018 16:33:40 +0800
+Subject: [PATCH 4824/5725] drm/amd/powerplay: correct vega12 max num of dpm
+ level
+
+Use MAX_NUM_CLOCKS instead of VG12_PSUEDO* macros for
+the max number of dpm levels.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 17 +++++++++--------
+ 1 file changed, 9 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index ea0ad3e..d0e7081 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -1643,8 +1643,8 @@ static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
+ return -1;
+
+ dpm_table = &(data->dpm_table.gfx_table);
+- ucount = (dpm_table->count > VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS) ?
+- VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS : dpm_table->count;
++ ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
++ MAX_NUM_CLOCKS : dpm_table->count;
+
+ for (i = 0; i < ucount; i++) {
+ clocks->data[i].clocks_in_khz =
+@@ -1675,11 +1675,12 @@ static int vega12_get_memclocks(struct pp_hwmgr *hwmgr,
+ return -1;
+
+ dpm_table = &(data->dpm_table.mem_table);
+- ucount = (dpm_table->count > VG12_PSUEDO_NUM_UCLK_DPM_LEVELS) ?
+- VG12_PSUEDO_NUM_UCLK_DPM_LEVELS : dpm_table->count;
++ ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
++ MAX_NUM_CLOCKS : dpm_table->count;
+
+ for (i = 0; i < ucount; i++) {
+ clocks->data[i].clocks_in_khz =
++ data->mclk_latency_table.entries[i].frequency =
+ dpm_table->dpm_levels[i].value * 1000;
+
+ clocks->data[i].latency_in_us =
+@@ -1705,8 +1706,8 @@ static int vega12_get_dcefclocks(struct pp_hwmgr *hwmgr,
+
+
+ dpm_table = &(data->dpm_table.dcef_table);
+- ucount = (dpm_table->count > VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS) ?
+- VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS : dpm_table->count;
++ ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
++ MAX_NUM_CLOCKS : dpm_table->count;
+
+ for (i = 0; i < ucount; i++) {
+ clocks->data[i].clocks_in_khz =
+@@ -1733,8 +1734,8 @@ static int vega12_get_socclocks(struct pp_hwmgr *hwmgr,
+
+
+ dpm_table = &(data->dpm_table.soc_table);
+- ucount = (dpm_table->count > VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS) ?
+- VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS : dpm_table->count;
++ ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
++ MAX_NUM_CLOCKS : dpm_table->count;
+
+ for (i = 0; i < ucount; i++) {
+ clocks->data[i].clocks_in_khz =
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4825-drm-amd-powerplay-apply-clocks-adjust-rules-on-power.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4825-drm-amd-powerplay-apply-clocks-adjust-rules-on-power.patch
new file mode 100644
index 00000000..fda9ff8a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4825-drm-amd-powerplay-apply-clocks-adjust-rules-on-power.patch
@@ -0,0 +1,213 @@
+From f63c81d3092beba4ed572b0d8c1ed188a1533d03 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 11 Jun 2018 16:40:57 +0800
+Subject: [PATCH 4825/5725] drm/amd/powerplay: apply clocks adjust rules on
+ power state change
+
+This add the apply_clocks_adjust_rules callback which is used
+to validate the clock settings on a power state change.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 162 +++++++++++++++++++++
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h | 2 +
+ 2 files changed, 164 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index d0e7081..9125bc5 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -1951,6 +1951,166 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
+ return size;
+ }
+
++static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
++{
++ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
++ struct vega12_single_dpm_table *dpm_table;
++ bool vblank_too_short = false;
++ bool disable_mclk_switching;
++ uint32_t i, latency;
++
++ disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
++ !hwmgr->display_config->multi_monitor_in_sync) ||
++ vblank_too_short;
++ latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
++
++ /* gfxclk */
++ dpm_table = &(data->dpm_table.gfx_table);
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++
++ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
++ if (VEGA12_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
++ }
++
++ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
++ }
++
++ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ }
++ }
++
++ /* memclk */
++ dpm_table = &(data->dpm_table.mem_table);
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++
++ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
++ if (VEGA12_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
++ }
++
++ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
++ }
++
++ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ }
++ }
++
++ /* honour DAL's UCLK Hardmin */
++ if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100))
++ dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100;
++
++ /* Hardmin is dependent on displayconfig */
++ if (disable_mclk_switching) {
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
++ if (data->mclk_latency_table.entries[i].latency <= latency) {
++ if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) {
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
++ break;
++ }
++ }
++ }
++ }
++
++ if (hwmgr->display_config->nb_pstate_switch_disable)
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++
++ /* vclk */
++ dpm_table = &(data->dpm_table.vclk_table);
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++
++ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
++ if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
++ }
++
++ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ }
++ }
++
++ /* dclk */
++ dpm_table = &(data->dpm_table.dclk_table);
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++
++ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
++ if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
++ }
++
++ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ }
++ }
++
++ /* socclk */
++ dpm_table = &(data->dpm_table.soc_table);
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++
++ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
++ if (VEGA12_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
++ }
++
++ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ }
++ }
++
++ /* eclk */
++ dpm_table = &(data->dpm_table.eclk_table);
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++
++ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
++ if (VEGA12_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
++ }
++
++ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ }
++ }
++
++ return 0;
++}
++
+ static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+ {
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+@@ -2203,6 +2363,8 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
+ .display_clock_voltage_request = vega12_display_clock_voltage_request,
+ .force_clock_level = vega12_force_clock_level,
+ .print_clock_levels = vega12_print_clock_levels,
++ .apply_clocks_adjust_rules =
++ vega12_apply_clocks_adjust_rules,
+ .display_config_changed = vega12_display_configuration_changed_task,
+ .powergate_uvd = vega12_power_gate_uvd,
+ .powergate_vce = vega12_power_gate_vce,
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+index e18c083..e17237c 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+@@ -443,6 +443,8 @@ struct vega12_hwmgr {
+ #define VEGA12_UMD_PSTATE_GFXCLK_LEVEL 0x3
+ #define VEGA12_UMD_PSTATE_SOCCLK_LEVEL 0x3
+ #define VEGA12_UMD_PSTATE_MCLK_LEVEL 0x2
++#define VEGA12_UMD_PSTATE_UVDCLK_LEVEL 0x3
++#define VEGA12_UMD_PSTATE_VCEMCLK_LEVEL 0x3
+
+ int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4826-drm-amd-powerplay-set-vega12-pre-display-configurati.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4826-drm-amd-powerplay-set-vega12-pre-display-configurati.patch
new file mode 100644
index 00000000..73c30102
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4826-drm-amd-powerplay-set-vega12-pre-display-configurati.patch
@@ -0,0 +1,78 @@
+From 76bafbf7f72090f72c7bfff21f1b5851a7f92f15 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 11 Jun 2018 17:22:33 +0800
+Subject: [PATCH 4826/5725] drm/amd/powerplay: set vega12 pre display
+ configurations
+
+Set num_displays to 0 and force uclk high as part of the mode
+set sequence.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 41 ++++++++++++++++++++++
+ 1 file changed, 41 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index 9125bc5..b673620 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -2111,6 +2111,45 @@ static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
+ return 0;
+ }
+
++static int vega12_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
++ struct vega12_single_dpm_table *dpm_table)
++{
++ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
++ int ret = 0;
++
++ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
++ PP_ASSERT_WITH_CODE(dpm_table->count > 0,
++ "[SetUclkToHightestDpmLevel] Dpm table has no entry!",
++ return -EINVAL);
++ PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS,
++ "[SetUclkToHightestDpmLevel] Dpm table has too many entries!",
++ return -EINVAL);
++
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SetHardMinByFreq,
++ (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
++ "[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
++ return ret);
++ }
++
++ return ret;
++}
++
++static int vega12_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
++{
++ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
++ int ret = 0;
++
++ smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_NumOfDisplays, 0);
++
++ ret = vega12_set_uclk_to_highest_dpm_level(hwmgr,
++ &data->dpm_table.mem_table);
++
++ return ret;
++}
++
+ static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+ {
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+@@ -2365,6 +2404,8 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
+ .print_clock_levels = vega12_print_clock_levels,
+ .apply_clocks_adjust_rules =
+ vega12_apply_clocks_adjust_rules,
++ .pre_display_config_changed =
++ vega12_pre_display_configuration_changed_task,
+ .display_config_changed = vega12_display_configuration_changed_task,
+ .powergate_uvd = vega12_power_gate_uvd,
+ .powergate_vce = vega12_power_gate_vce,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4827-drm-amd-powerplay-cosmetic-fix.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4827-drm-amd-powerplay-cosmetic-fix.patch
new file mode 100644
index 00000000..3750bee6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4827-drm-amd-powerplay-cosmetic-fix.patch
@@ -0,0 +1,104 @@
+From 01696c8ccbcb7718c2123a0d935cb1a3e493789e Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 11 Jun 2018 16:48:43 +0800
+Subject: [PATCH 4827/5725] drm/amd/powerplay: cosmetic fix
+
+Fix coding style and drop unused variable.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 10 +++-------
+ .../gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h | 18 +++++++++---------
+ 2 files changed, 12 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index b673620..9e9a3db 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -811,9 +811,6 @@ static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
+ enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false;
+ data->smu_features[i].enabled = enabled;
+ data->smu_features[i].supported = enabled;
+- PP_ASSERT(
+- !data->smu_features[i].allowed || enabled,
+- "[EnableAllSMUFeatures] Enabled feature is different from allowed, expected disabled!");
+ }
+ }
+
+@@ -1230,8 +1227,8 @@ static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx
+
+ *gfx_freq = 0;
+
+- PP_ASSERT_WITH_CODE(
+- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
++ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
+ "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
+ return -1);
+ PP_ASSERT_WITH_CODE(
+@@ -1790,7 +1787,6 @@ static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
+ {
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ Watermarks_t *table = &(data->smc_state_table.water_marks_table);
+- int result = 0;
+ uint32_t i;
+
+ if (!data->registry_data.disable_water_mark &&
+@@ -1841,7 +1837,7 @@ static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
+ data->water_marks_bitmap &= ~WaterMarksLoaded;
+ }
+
+- return result;
++ return 0;
+ }
+
+ static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
+index b08526f..b6ffd08 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
+@@ -412,10 +412,10 @@ typedef struct {
+ QuadraticInt_t ReservedEquation2;
+ QuadraticInt_t ReservedEquation3;
+
+- uint16_t MinVoltageUlvGfx;
+- uint16_t MinVoltageUlvSoc;
++ uint16_t MinVoltageUlvGfx;
++ uint16_t MinVoltageUlvSoc;
+
+- uint32_t Reserved[14];
++ uint32_t Reserved[14];
+
+
+
+@@ -483,9 +483,9 @@ typedef struct {
+ uint8_t padding8_4;
+
+
+- uint8_t PllGfxclkSpreadEnabled;
+- uint8_t PllGfxclkSpreadPercent;
+- uint16_t PllGfxclkSpreadFreq;
++ uint8_t PllGfxclkSpreadEnabled;
++ uint8_t PllGfxclkSpreadPercent;
++ uint16_t PllGfxclkSpreadFreq;
+
+ uint8_t UclkSpreadEnabled;
+ uint8_t UclkSpreadPercent;
+@@ -495,9 +495,9 @@ typedef struct {
+ uint8_t SocclkSpreadPercent;
+ uint16_t SocclkSpreadFreq;
+
+- uint8_t AcgGfxclkSpreadEnabled;
+- uint8_t AcgGfxclkSpreadPercent;
+- uint16_t AcgGfxclkSpreadFreq;
++ uint8_t AcgGfxclkSpreadEnabled;
++ uint8_t AcgGfxclkSpreadPercent;
++ uint16_t AcgGfxclkSpreadFreq;
+
+ uint8_t Vr2_I2C_address;
+ uint8_t padding_vr2[3];
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4828-drm-amdgpu-Use-gmc_vram_full_visible-in-vram_mgr_bo_.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4828-drm-amdgpu-Use-gmc_vram_full_visible-in-vram_mgr_bo_.patch
new file mode 100644
index 00000000..66b776a2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4828-drm-amdgpu-Use-gmc_vram_full_visible-in-vram_mgr_bo_.patch
@@ -0,0 +1,32 @@
+From 7ea32e90cb78fe5878bc7536908834d39d265e9b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
+Date: Fri, 15 Jun 2018 16:37:03 +0200
+Subject: [PATCH 4828/5725] drm/amdgpu: Use gmc_vram_full_visible in
+ vram_mgr_bo_invisible_size
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+index dc1cb22..e8790ea 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+@@ -113,7 +113,7 @@ u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
+ unsigned pages = mem->num_pages;
+ u64 usage = 0;
+
+- if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size)
++ if (amdgpu_gmc_vram_full_visible(&adev->gmc))
+ return 0;
+
+ if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4829-drm-amdgpu-Remove-amdgpu_gem_map_attach-target_dev-d.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4829-drm-amdgpu-Remove-amdgpu_gem_map_attach-target_dev-d.patch
new file mode 100644
index 00000000..9f967f14
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4829-drm-amdgpu-Remove-amdgpu_gem_map_attach-target_dev-d.patch
@@ -0,0 +1,35 @@
+From c6c49b8cb6842f82a263818f26655ca6d74107ab Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
+Date: Fri, 29 Jun 2018 11:27:11 +0200
+Subject: [PATCH 4829/5725] drm/amdgpu: Remove amdgpu_gem_map_attach target_dev
+ documentation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The parameter was removed.
+
+Fixes: a19741e5e5a9 "dma_buf: remove device parameter from attach
+ callback v2"
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+index 37bd4f3..8a7a56f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+@@ -194,7 +194,6 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
+ /**
+ * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
+ * @dma_buf: shared DMA buffer
+- * @target_dev: target device
+ * @attach: DMA-buf attachment
+ *
+ * Makes sure that the shared DMA buffer can be accessed by the target device.
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4830-drm-amdgpu-pp-add-missing-byte-swapping-in-process_p.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4830-drm-amdgpu-pp-add-missing-byte-swapping-in-process_p.patch
new file mode 100644
index 00000000..5ac0765e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4830-drm-amdgpu-pp-add-missing-byte-swapping-in-process_p.patch
@@ -0,0 +1,36 @@
+From 65118457e9d0acd411250194b9c44e10559380c0 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 28 Jun 2018 12:48:10 -0500
+Subject: [PATCH 4830/5725] drm/amdgpu/pp: add missing byte swapping in
+ process_pptables_v1_0.c
+
+Values need to be swapped on big endian.
+
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+index 35bd987..4e1fd53 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+@@ -183,10 +183,10 @@ static int get_vddc_lookup_table(
+ ATOM_Tonga_Voltage_Lookup_Record,
+ entries, vddc_lookup_pp_tables, i);
+ record->us_calculated = 0;
+- record->us_vdd = atom_record->usVdd;
+- record->us_cac_low = atom_record->usCACLow;
+- record->us_cac_mid = atom_record->usCACMid;
+- record->us_cac_high = atom_record->usCACHigh;
++ record->us_vdd = le16_to_cpu(atom_record->usVdd);
++ record->us_cac_low = le16_to_cpu(atom_record->usCACLow);
++ record->us_cac_mid = le16_to_cpu(atom_record->usCACMid);
++ record->us_cac_high = le16_to_cpu(atom_record->usCACHigh);
+ }
+
+ *lookup_table = table;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4831-drm-amdgpu-pp-fix-endian-swapping-in-atomctrl_get_vo.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4831-drm-amdgpu-pp-fix-endian-swapping-in-atomctrl_get_vo.patch
new file mode 100644
index 00000000..d2fca01b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4831-drm-amdgpu-pp-fix-endian-swapping-in-atomctrl_get_vo.patch
@@ -0,0 +1,41 @@
+From 4ce0e11ee202b80a4a188875a46444323acdcf44 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 28 Jun 2018 12:51:06 -0500
+Subject: [PATCH 4831/5725] drm/amdgpu/pp: fix endian swapping in
+ atomctrl_get_voltage_range
+
+Need to swap before doing arthimetic on the values.
+
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+index c7927595..e2158fc 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+@@ -1549,14 +1549,14 @@ void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc,
+ switch (hwmgr->chip_id) {
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+- *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc/4);
+- *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc/4);
++ *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc) / 4;
++ *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc) / 4;
+ break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
+- *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc/100);
+- *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc/100);
++ *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc) / 100;
++ *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc) / 100;
+ break;
+ default:
+ return;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4832-drm-amdgpu-pp-fix-copy-paste-typo-in-smu7_init_dpm_d.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4832-drm-amdgpu-pp-fix-copy-paste-typo-in-smu7_init_dpm_d.patch
new file mode 100644
index 00000000..1d753cea
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4832-drm-amdgpu-pp-fix-copy-paste-typo-in-smu7_init_dpm_d.patch
@@ -0,0 +1,30 @@
+From 3adefb3d5a0114adcf42f4414441e5396b90c0c4 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 28 Jun 2018 12:52:43 -0500
+Subject: [PATCH 4832/5725] drm/amdgpu/pp: fix copy paste typo in
+ smu7_init_dpm_defaults
+
+Should be mclk rather than sclk.
+
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index bb3f80c..5bd6d51 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -1578,7 +1578,7 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ data->current_profile_setting.sclk_up_hyst = 0;
+ data->current_profile_setting.sclk_down_hyst = 100;
+ data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
+- data->current_profile_setting.bupdate_sclk = 1;
++ data->current_profile_setting.bupdate_mclk = 1;
+ data->current_profile_setting.mclk_up_hyst = 0;
+ data->current_profile_setting.mclk_down_hyst = 100;
+ data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4833-drm-amdgpu-pp-fix-copy-paste-typo-in-smu7_get_pp_tab.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4833-drm-amdgpu-pp-fix-copy-paste-typo-in-smu7_get_pp_tab.patch
new file mode 100644
index 00000000..0f73b97f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4833-drm-amdgpu-pp-fix-copy-paste-typo-in-smu7_get_pp_tab.patch
@@ -0,0 +1,30 @@
+From 873c5af13ff23d36f1f665d054efb03004cb8617 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 28 Jun 2018 13:21:12 -0500
+Subject: [PATCH 4833/5725] drm/amdgpu/pp: fix copy paste typo in
+ smu7_get_pp_table_entry_callback_func_v1
+
+Should be using PCIELaneLow for the low clock level.
+
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index 5bd6d51..b57a5df 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -3182,7 +3182,7 @@ static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
+ performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
+ state_entry->ucPCIEGenLow);
+ performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
+- state_entry->ucPCIELaneHigh);
++ state_entry->ucPCIELaneLow);
+
+ performance_level = &(smu7_power_state->performance_levels
+ [smu7_power_state->performance_level_count++]);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4834-drm-amdgpu-sdma-simplify-sdma-instance-setup.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4834-drm-amdgpu-sdma-simplify-sdma-instance-setup.patch
new file mode 100644
index 00000000..73b3a02b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4834-drm-amdgpu-sdma-simplify-sdma-instance-setup.patch
@@ -0,0 +1,229 @@
+From b667099d39eb6e0314da588614f82f24712e4e6d Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Mon, 25 Jun 2018 12:24:10 -0500
+Subject: [PATCH 4834/5725] drm/amdgpu/sdma: simplify sdma instance setup
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Set the me instance in early init and use that rather than
+calculating the instance based on the ring pointer.
+
+Reviewed-by: James Zhu <James.Zhu@amd.com>
+Reviewed-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 12 ++++++------
+ drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 12 ++++++------
+ drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 14 ++++++--------
+ drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 23 +++++++++++------------
+ 4 files changed, 29 insertions(+), 32 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+index c8144d1..1543e7e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
++++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+@@ -177,9 +177,8 @@ static uint64_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
+ static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
+ {
+ struct amdgpu_device *adev = ring->adev;
+- u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
+
+- return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
++ return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) & 0x3fffc) >> 2;
+ }
+
+ /**
+@@ -192,9 +191,8 @@ static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
+ static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
+ {
+ struct amdgpu_device *adev = ring->adev;
+- u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
+
+- WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me],
++ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me],
+ (lower_32_bits(ring->wptr) << 2) & 0x3fffc);
+ }
+
+@@ -248,7 +246,7 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+ SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
+ u32 ref_and_mask;
+
+- if (ring == &ring->adev->sdma.instance[0].ring)
++ if (ring->me == 0)
+ ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
+ else
+ ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
+@@ -1298,8 +1296,10 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
+ {
+ int i;
+
+- for (i = 0; i < adev->sdma.num_instances; i++)
++ for (i = 0; i < adev->sdma.num_instances; i++) {
+ adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs;
++ adev->sdma.instance[i].ring.me = i;
++ }
+ }
+
+ static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = {
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+index 702b949..096c6f2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+@@ -202,8 +202,7 @@ static uint64_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
+ static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
+ {
+ struct amdgpu_device *adev = ring->adev;
+- int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
+- u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
++ u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2;
+
+ return wptr;
+ }
+@@ -218,9 +217,8 @@ static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
+ static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
+ {
+ struct amdgpu_device *adev = ring->adev;
+- int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
+
+- WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], lower_32_bits(ring->wptr) << 2);
++ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], lower_32_bits(ring->wptr) << 2);
+ }
+
+ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+@@ -273,7 +271,7 @@ static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+ {
+ u32 ref_and_mask = 0;
+
+- if (ring == &ring->adev->sdma.instance[0].ring)
++ if (ring->me == 0)
+ ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
+ else
+ ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
+@@ -1221,8 +1219,10 @@ static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
+ {
+ int i;
+
+- for (i = 0; i < adev->sdma.num_instances; i++)
++ for (i = 0; i < adev->sdma.num_instances; i++) {
+ adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs;
++ adev->sdma.instance[i].ring.me = i;
++ }
+ }
+
+ static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = {
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+index d082751..e074dea 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+@@ -365,9 +365,7 @@ static uint64_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
+ /* XXX check if swapping is necessary on BE */
+ wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
+ } else {
+- int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
+-
+- wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
++ wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2;
+ }
+
+ return wptr;
+@@ -394,9 +392,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
+
+ WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2));
+ } else {
+- int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
+-
+- WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], lower_32_bits(ring->wptr) << 2);
++ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], lower_32_bits(ring->wptr) << 2);
+ }
+ }
+
+@@ -450,7 +446,7 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+ {
+ u32 ref_and_mask = 0;
+
+- if (ring == &ring->adev->sdma.instance[0].ring)
++ if (ring->me == 0)
+ ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
+ else
+ ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
+@@ -1664,8 +1660,10 @@ static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
+ {
+ int i;
+
+- for (i = 0; i < adev->sdma.num_instances; i++)
++ for (i = 0; i < adev->sdma.num_instances; i++) {
+ adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs;
++ adev->sdma.instance[i].ring.me = i;
++ }
+ }
+
+ static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = {
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index 0b076d4..55d2c17 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -296,13 +296,12 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
+ DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
+ } else {
+ u32 lowbit, highbit;
+- int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
+
+- lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR)) >> 2;
+- highbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
++ lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2;
++ highbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
+
+ DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
+- me, highbit, lowbit);
++ ring->me, highbit, lowbit);
+ wptr = highbit;
+ wptr = wptr << 32;
+ wptr |= lowbit;
+@@ -339,17 +338,15 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
+ ring->doorbell_index, ring->wptr << 2);
+ WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
+ } else {
+- int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
+-
+ DRM_DEBUG("Not using doorbell -- "
+ "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
+ "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
+- me,
++ ring->me,
+ lower_32_bits(ring->wptr << 2),
+- me,
++ ring->me,
+ upper_32_bits(ring->wptr << 2));
+- WREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
+- WREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
++ WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
++ WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
+ }
+ }
+
+@@ -405,7 +402,7 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+ u32 ref_and_mask = 0;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+
+- if (ring == &ring->adev->sdma.instance[0].ring)
++ if (ring->me == 0)
+ ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
+ else
+ ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1;
+@@ -1656,8 +1653,10 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
+ {
+ int i;
+
+- for (i = 0; i < adev->sdma.num_instances; i++)
++ for (i = 0; i < adev->sdma.num_instances; i++) {
+ adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
++ adev->sdma.instance[i].ring.me = i;
++ }
+ }
+
+ static const struct amdgpu_irq_src_funcs sdma_v4_0_trap_irq_funcs = {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4835-drm-amdgpu-vce-simplify-vce-instance-setup.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4835-drm-amdgpu-vce-simplify-vce-instance-setup.patch
new file mode 100644
index 00000000..48f18591
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4835-drm-amdgpu-vce-simplify-vce-instance-setup.patch
@@ -0,0 +1,179 @@
+From a1c62379895d6c73c6265426fbaad3cd4ed6d569 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Mon, 25 Jun 2018 12:41:21 -0500
+Subject: [PATCH 4835/5725] drm/amdgpu/vce: simplify vce instance setup
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Set the me instance in early init and use that rather than
+calculating the instance based on the ring pointer.
+
+Reviewed-by: James Zhu <James.Zhu@amd.com>
+Reviewed-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vce_v2_0.c | 10 ++++++----
+ drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 20 ++++++++++++--------
+ drivers/gpu/drm/amd/amdgpu/vce_v4_0.c | 16 +++++++++-------
+ 3 files changed, 27 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+index 47f7082..d48e877 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+@@ -56,7 +56,7 @@ static uint64_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring)
+ {
+ struct amdgpu_device *adev = ring->adev;
+
+- if (ring == &adev->vce.ring[0])
++ if (ring->me == 0)
+ return RREG32(mmVCE_RB_RPTR);
+ else
+ return RREG32(mmVCE_RB_RPTR2);
+@@ -73,7 +73,7 @@ static uint64_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring)
+ {
+ struct amdgpu_device *adev = ring->adev;
+
+- if (ring == &adev->vce.ring[0])
++ if (ring->me == 0)
+ return RREG32(mmVCE_RB_WPTR);
+ else
+ return RREG32(mmVCE_RB_WPTR2);
+@@ -90,7 +90,7 @@ static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
+ {
+ struct amdgpu_device *adev = ring->adev;
+
+- if (ring == &adev->vce.ring[0])
++ if (ring->me == 0)
+ WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
+ else
+ WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
+@@ -627,8 +627,10 @@ static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
+ {
+ int i;
+
+- for (i = 0; i < adev->vce.num_rings; i++)
++ for (i = 0; i < adev->vce.num_rings; i++) {
+ adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
++ adev->vce.ring[i].me = i;
++ }
+ }
+
+ static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
+diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+index a71b975..99604d0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+@@ -86,9 +86,9 @@ static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
+ else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
+
+- if (ring == &adev->vce.ring[0])
++ if (ring->me == 0)
+ v = RREG32(mmVCE_RB_RPTR);
+- else if (ring == &adev->vce.ring[1])
++ else if (ring->me == 1)
+ v = RREG32(mmVCE_RB_RPTR2);
+ else
+ v = RREG32(mmVCE_RB_RPTR3);
+@@ -118,9 +118,9 @@ static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
+ else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
+
+- if (ring == &adev->vce.ring[0])
++ if (ring->me == 0)
+ v = RREG32(mmVCE_RB_WPTR);
+- else if (ring == &adev->vce.ring[1])
++ else if (ring->me == 1)
+ v = RREG32(mmVCE_RB_WPTR2);
+ else
+ v = RREG32(mmVCE_RB_WPTR3);
+@@ -149,9 +149,9 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
+ else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
+
+- if (ring == &adev->vce.ring[0])
++ if (ring->me == 0)
+ WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
+- else if (ring == &adev->vce.ring[1])
++ else if (ring->me == 1)
+ WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
+ else
+ WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
+@@ -942,12 +942,16 @@ static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
+ int i;
+
+ if (adev->asic_type >= CHIP_STONEY) {
+- for (i = 0; i < adev->vce.num_rings; i++)
++ for (i = 0; i < adev->vce.num_rings; i++) {
+ adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs;
++ adev->vce.ring[i].me = i;
++ }
+ DRM_INFO("VCE enabled in VM mode\n");
+ } else {
+- for (i = 0; i < adev->vce.num_rings; i++)
++ for (i = 0; i < adev->vce.num_rings; i++) {
+ adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs;
++ adev->vce.ring[i].me = i;
++ }
+ DRM_INFO("VCE enabled in physical mode\n");
+ }
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+index 8fd1b74..575bf97 100755
+--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+@@ -60,9 +60,9 @@ static uint64_t vce_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
+ {
+ struct amdgpu_device *adev = ring->adev;
+
+- if (ring == &adev->vce.ring[0])
++ if (ring->me == 0)
+ return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR));
+- else if (ring == &adev->vce.ring[1])
++ else if (ring->me == 1)
+ return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2));
+ else
+ return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3));
+@@ -82,9 +82,9 @@ static uint64_t vce_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
+ if (ring->use_doorbell)
+ return adev->wb.wb[ring->wptr_offs];
+
+- if (ring == &adev->vce.ring[0])
++ if (ring->me == 0)
+ return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR));
+- else if (ring == &adev->vce.ring[1])
++ else if (ring->me == 1)
+ return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2));
+ else
+ return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3));
+@@ -108,10 +108,10 @@ static void vce_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
+ return;
+ }
+
+- if (ring == &adev->vce.ring[0])
++ if (ring->me == 0)
+ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR),
+ lower_32_bits(ring->wptr));
+- else if (ring == &adev->vce.ring[1])
++ else if (ring->me == 1)
+ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2),
+ lower_32_bits(ring->wptr));
+ else
+@@ -1088,8 +1088,10 @@ static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev)
+ {
+ int i;
+
+- for (i = 0; i < adev->vce.num_rings; i++)
++ for (i = 0; i < adev->vce.num_rings; i++) {
+ adev->vce.ring[i].funcs = &vce_v4_0_ring_vm_funcs;
++ adev->vce.ring[i].me = i;
++ }
+ DRM_INFO("VCE enabled in VM mode\n");
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4836-drm-amd-Replace-drm_dev_unref-with-drm_dev_put.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4836-drm-amd-Replace-drm_dev_unref-with-drm_dev_put.patch
new file mode 100644
index 00000000..2776faa3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4836-drm-amd-Replace-drm_dev_unref-with-drm_dev_put.patch
@@ -0,0 +1,80 @@
+From fa483a1f1886ec73160253b53f3fa330c941d8e1 Mon Sep 17 00:00:00 2001
+From: Thomas Zimmermann <contact@tzimmermann.org>
+Date: Thu, 28 Jun 2018 16:10:25 +0200
+Subject: [PATCH 4836/5725] drm/amd: Replace drm_dev_unref with drm_dev_put
+
+This patch unifies the naming of DRM functions for reference counting
+of struct drm_device. The resulting code is more aligned with the rest
+of the Linux kernel interfaces.
+
+Signed-off-by: Thomas Zimmermann <contact@tzimmermann.org>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 ++--
+ drivers/gpu/drm/drm_drv.c | 14 ++++++++++++++
+ include/drm/drm_drv.h | 1 +
+ 3 files changed, 17 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 19f1a74..d01d4f0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -669,7 +669,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
+ err_pci:
+ pci_disable_device(pdev);
+ err_free:
+- drm_dev_unref(dev);
++ drm_dev_put(dev);
+ return ret;
+ }
+
+@@ -679,7 +679,7 @@ amdgpu_pci_remove(struct pci_dev *pdev)
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_dev_unregister(dev);
+- drm_dev_unref(dev);
++ drm_dev_put(dev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
+diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
+index a7b6734..d5a25e6 100644
+--- a/drivers/gpu/drm/drm_drv.c
++++ b/drivers/gpu/drm/drm_drv.c
+@@ -670,6 +670,20 @@ static void drm_dev_release(struct kref *ref)
+ }
+
+ /**
++ * drm_dev_put - Drop reference of a DRM device
++ * @dev: device to drop reference of or NULL
++ *
++ * This decreases the ref-count of @dev by one. The device is destroyed if the
++ * ref-count drops to zero.
++ */
++void drm_dev_put(struct drm_device *dev)
++{
++ if (dev)
++ kref_put(&dev->ref, drm_dev_release);
++}
++EXPORT_SYMBOL(drm_dev_put);
++
++/**
+ * drm_dev_ref - Take reference of a DRM device
+ * @dev: device to take reference of or NULL
+ *
+diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
+index f08273c..5c3d3f7 100644
+--- a/include/drm/drm_drv.h
++++ b/include/drm/drm_drv.h
+@@ -614,6 +614,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+ int drm_dev_register(struct drm_device *dev, unsigned long flags);
+ void drm_dev_unregister(struct drm_device *dev);
+
++void drm_dev_put(struct drm_device *dev);
+ void drm_dev_ref(struct drm_device *dev);
+ void drm_dev_unref(struct drm_device *dev);
+ void drm_put_dev(struct drm_device *dev);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4837-drm-amd-add-SPDX-identifier-and-clarify-license.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4837-drm-amd-add-SPDX-identifier-and-clarify-license.patch
new file mode 100644
index 00000000..fcbb647f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4837-drm-amd-add-SPDX-identifier-and-clarify-license.patch
@@ -0,0 +1,26 @@
+From 6383b7b4222de7deb1b838eb9f554b81ad46b447 Mon Sep 17 00:00:00 2001
+From: "Dirk Hohndel (VMware)" <dirk@hohndel.org>
+Date: Mon, 7 May 2018 01:16:20 +0200
+Subject: [PATCH 4837/5725] drm/amd: add SPDX identifier and clarify license
+
+This is dual licensed under GPL-2.0 or MIT.
+
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Dirk Hohndel (VMware) <dirk@hohndel.org>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_test.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+index d167e8a..4c600ba 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+@@ -1,3 +1,4 @@
++// SPDX-License-Identifier: GPL-2.0 OR MIT
+ /*
+ * Copyright 2009 VMware, Inc.
+ *
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4838-drm-amdgpu-fix-the-wrong-type-of-gem-object-creation.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4838-drm-amdgpu-fix-the-wrong-type-of-gem-object-creation.patch
new file mode 100644
index 00000000..697dfff7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4838-drm-amdgpu-fix-the-wrong-type-of-gem-object-creation.patch
@@ -0,0 +1,67 @@
+From 62dae4abbf71a52cf03938be0630ebda51854861 Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Thu, 28 Jun 2018 14:38:21 +0800
+Subject: [PATCH 4838/5725] drm/amdgpu: fix the wrong type of gem object
+ creation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+We still use legacy type of gem_object_create, it should update to ttm_bo_type
+now.
+
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 6 +++---
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+index cc55fa5..6b65342 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+@@ -146,7 +146,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+ AMDGPU_GEM_CREATE_VRAM_CLEARED,
+- true, NULL, &gobj);
++ ttm_bo_type_kernel, NULL, &gobj);
+ if (ret) {
+ pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
+ return -ENOMEM;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+index 330c07a..868832e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+@@ -297,7 +297,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
+
+ r = amdgpu_gem_object_create(adev, size, args->in.alignment,
+ (u32)(0xffffffff & args->in.domains),
+- flags, false, resv, &gobj);
++ flags, ttm_bo_type_device, resv, &gobj);
+ if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
+ if (!r) {
+ struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
+@@ -410,7 +410,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
+
+ /* create a gem object to contain this object in */
+ r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
+- 0, 0, NULL, &gobj);
++ 0, ttm_bo_type_device, NULL, &gobj);
+ if (r)
+ return r;
+
+@@ -916,7 +916,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
+ amdgpu_display_supported_domains(adev));
+ r = amdgpu_gem_object_create(adev, args->size, 0, domain,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+- false, NULL, &gobj);
++ ttm_bo_type_device, NULL, &gobj);
+ if (r)
+ return -ENOMEM;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4839-drm-amdgpu-update-uvd_v6_0_ring_vm_funcs-to-use-new-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4839-drm-amdgpu-update-uvd_v6_0_ring_vm_funcs-to-use-new-.patch
new file mode 100644
index 00000000..019bece0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4839-drm-amdgpu-update-uvd_v6_0_ring_vm_funcs-to-use-new-.patch
@@ -0,0 +1,41 @@
+From f9f2cf203abe830330ceb2445846cb6d787da1c6 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 28 Jun 2018 12:32:27 -0500
+Subject: [PATCH 4839/5725] drm/amdgpu: update uvd_v6_0_ring_vm_funcs to use
+ new nop packet
+
+Was missed when updating the uvd 6 module.
+
+Fixes: 1aac3c9180 (drm/amdgpu: fix insert nop for UVD6 ring)
+Reviewed-by: Leo Liu <leo.liu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+index 37bb32b..6f008a0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+@@ -1588,7 +1588,6 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
+ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD,
+ .align_mask = 0xf,
+- .nop = PACKET0(mmUVD_NO_OP, 0),
+ .support_64bit_ptrs = false,
+ .get_rptr = uvd_v6_0_ring_get_rptr,
+ .get_wptr = uvd_v6_0_ring_get_wptr,
+@@ -1608,7 +1607,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
+ .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
+ .test_ring = uvd_v6_0_ring_test_ring,
+ .test_ib = amdgpu_uvd_ring_test_ib,
+- .insert_nop = amdgpu_ring_insert_nop,
++ .insert_nop = uvd_v6_0_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_uvd_ring_begin_use,
+ .end_use = amdgpu_uvd_ring_end_use,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4840-drm-amd-pp-Convert-clock-unit-to-KHz-as-defined.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4840-drm-amd-pp-Convert-clock-unit-to-KHz-as-defined.patch
new file mode 100644
index 00000000..f49acda3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4840-drm-amd-pp-Convert-clock-unit-to-KHz-as-defined.patch
@@ -0,0 +1,165 @@
+From df6b61e83b70a0341e355c7002496e4c4781e1af Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Mon, 18 Jun 2018 18:15:15 +0800
+Subject: [PATCH 4840/5725] drm/amd/pp: Convert clock unit to KHz as defined
+
+Convert clock unit 10KHz to KHz as the data sturct defined.
+e.g.
+struct pp_clock_with_latency {
+ uint32_t clocks_in_khz;
+ uint32_t latency_in_us;
+};
+Meanwhile revert the same conversion in display side.
+
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 18 ++++++---------
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 27 ++++++++++------------
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 12 ++++------
+ 3 files changed, 24 insertions(+), 33 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+index cf92d7a..596d49d 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+@@ -203,8 +203,7 @@ static void pp_to_dc_clock_levels(
+
+ for (i = 0; i < dc_clks->num_levels; i++) {
+ DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
+- /* translate 10kHz to kHz */
+- dc_clks->clocks_in_khz[i] = pp_clks->clock[i] * 10;
++ dc_clks->clocks_in_khz[i] = pp_clks->clock[i];
+ }
+ }
+
+@@ -229,9 +228,8 @@ static void pp_to_dc_clock_levels_with_latency(
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+
+ for (i = 0; i < clk_level_info->num_levels; i++) {
+- DRM_DEBUG("DM_PPLIB:\t %d in 10kHz\n", pp_clks->data[i].clocks_in_khz);
+- /* translate 10kHz to kHz */
+- clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz * 10;
++ DRM_DEBUG("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
++ clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
+ clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
+ }
+ }
+@@ -257,9 +255,8 @@ static void pp_to_dc_clock_levels_with_voltage(
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+
+ for (i = 0; i < clk_level_info->num_levels; i++) {
+- DRM_INFO("DM_PPLIB:\t %d in 10kHz\n", pp_clks->data[i].clocks_in_khz);
+- /* translate 10kHz to kHz */
+- clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz * 10;
++ DRM_INFO("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
++ clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
+ clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv;
+ }
+ }
+@@ -434,9 +431,8 @@ bool dm_pp_get_static_clocks(
+ return false;
+
+ static_clk_info->max_clocks_state = pp_clk_info.max_clocks_state;
+- /* translate 10kHz to kHz */
+- static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
+- static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
++ static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock;
++ static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock;
+
+ return true;
+ }
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+index da29871..71a6e62 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+@@ -4082,7 +4082,7 @@ static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
+ table_info->vdd_dep_on_sclk;
+ uint32_t i;
+-
++ clocks->num_levels = 0;
+ for (i = 0; i < dep_table->count; i++) {
+ if (dep_table->entries[i].clk) {
+ clocks->data[clocks->num_levels].clocks_in_khz =
+@@ -4113,26 +4113,23 @@ static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
+ table_info->vdd_dep_on_mclk;
+ struct vega10_hwmgr *data = hwmgr->backend;
++ uint32_t j = 0;
+ uint32_t i;
+
+- clocks->num_levels = 0;
+- data->mclk_latency_table.count = 0;
+-
+ for (i = 0; i < dep_table->count; i++) {
+ if (dep_table->entries[i].clk) {
+- clocks->data[clocks->num_levels].clocks_in_khz =
+- data->mclk_latency_table.entries
+- [data->mclk_latency_table.count].frequency =
+- dep_table->entries[i].clk * 10;
+- clocks->data[clocks->num_levels].latency_in_us =
+- data->mclk_latency_table.entries
+- [data->mclk_latency_table.count].latency =
+- vega10_get_mem_latency(hwmgr,
+- dep_table->entries[i].clk);
+- clocks->num_levels++;
+- data->mclk_latency_table.count++;
++ clocks->data[j].clocks_in_khz =
++ dep_table->entries[i].clk * 10;
++ data->mclk_latency_table.entries[j].frequency =
++ dep_table->entries[i].clk;
++ clocks->data[j].latency_in_us =
++ data->mclk_latency_table.entries[j].latency =
++ vega10_get_mem_latency(hwmgr,
++ dep_table->entries[i].clk);
++ j++;
+ }
+ }
++ clocks->num_levels = data->mclk_latency_table.count = j;
+ }
+
+ static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index 9e9a3db..4cf2570 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -1676,10 +1676,8 @@ static int vega12_get_memclocks(struct pp_hwmgr *hwmgr,
+ MAX_NUM_CLOCKS : dpm_table->count;
+
+ for (i = 0; i < ucount; i++) {
+- clocks->data[i].clocks_in_khz =
+- data->mclk_latency_table.entries[i].frequency =
+- dpm_table->dpm_levels[i].value * 1000;
+-
++ clocks->data[i].clocks_in_khz = dpm_table->dpm_levels[i].value * 1000;
++ data->mclk_latency_table.entries[i].frequency = dpm_table->dpm_levels[i].value * 100;
+ clocks->data[i].latency_in_us =
+ data->mclk_latency_table.entries[i].latency =
+ vega12_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value);
+@@ -1919,7 +1917,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, clocks.data[i].clocks_in_khz / 1000,
+- (clocks.data[i].clocks_in_khz == now) ? "*" : "");
++ (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
+ break;
+
+ case PP_MCLK:
+@@ -1934,8 +1932,8 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
+ return -1);
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+- i, clocks.data[i].clocks_in_khz / 100,
+- (clocks.data[i].clocks_in_khz == now) ? "*" : "");
++ i, clocks.data[i].clocks_in_khz / 1000,
++ (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
+ break;
+
+ case PP_PCIE:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4841-drm-amd-pp-Memory-Latency-is-always-25us-on-Vega10.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4841-drm-amd-pp-Memory-Latency-is-always-25us-on-Vega10.patch
new file mode 100644
index 00000000..6e198f30
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4841-drm-amd-pp-Memory-Latency-is-always-25us-on-Vega10.patch
@@ -0,0 +1,66 @@
+From 56e4a6b522f82beaf5cc326eb2115bc09a4b9702 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Mon, 18 Jun 2018 18:49:07 +0800
+Subject: [PATCH 4841/5725] drm/amd/pp: Memory Latency is always 25us on Vega10
+
+For HBM, 25us latency is enough for memory clock switch.
+
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 22 +---------------------
+ 1 file changed, 1 insertion(+), 21 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+index 71a6e62..b293a68 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+@@ -55,12 +55,6 @@
+
+ static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
+
+-#define MEM_FREQ_LOW_LATENCY 25000
+-#define MEM_FREQ_HIGH_LATENCY 80000
+-#define MEM_LATENCY_HIGH 245
+-#define MEM_LATENCY_LOW 35
+-#define MEM_LATENCY_ERR 0xFFFF
+-
+ #define mmDF_CS_AON0_DramBaseAddress0 0x0044
+ #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
+
+@@ -4093,18 +4087,6 @@ static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
+
+ }
+
+-static uint32_t vega10_get_mem_latency(struct pp_hwmgr *hwmgr,
+- uint32_t clock)
+-{
+- if (clock >= MEM_FREQ_LOW_LATENCY &&
+- clock < MEM_FREQ_HIGH_LATENCY)
+- return MEM_LATENCY_HIGH;
+- else if (clock >= MEM_FREQ_HIGH_LATENCY)
+- return MEM_LATENCY_LOW;
+- else
+- return MEM_LATENCY_ERR;
+-}
+-
+ static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
+ struct pp_clock_levels_with_latency *clocks)
+ {
+@@ -4123,9 +4105,7 @@ static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
+ data->mclk_latency_table.entries[j].frequency =
+ dep_table->entries[i].clk;
+ clocks->data[j].latency_in_us =
+- data->mclk_latency_table.entries[j].latency =
+- vega10_get_mem_latency(hwmgr,
+- dep_table->entries[i].clk);
++ data->mclk_latency_table.entries[j].latency = 25;
+ j++;
+ }
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4842-drm-amd-pp-Switch-the-tolerable-latency-for-display.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4842-drm-amd-pp-Switch-the-tolerable-latency-for-display.patch
new file mode 100644
index 00000000..06a0d8cb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4842-drm-amd-pp-Switch-the-tolerable-latency-for-display.patch
@@ -0,0 +1,33 @@
+From d051c9d141387eeeabca17afd872c4e8fc32a5c5 Mon Sep 17 00:00:00 2001
+From: rex zhu <rex.zhu@amd.com>
+Date: Thu, 28 Jun 2018 13:55:46 +0800
+Subject: [PATCH 4842/5725] drm/amd/pp: Switch the tolerable latency for
+ display
+
+Select the lowest MCLK frequency that is within
+the tolerable latency defined in DISPALY
+
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+index b293a68..5f00760 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+@@ -3221,7 +3221,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ /* Find the lowest MCLK frequency that is within
+ * the tolerable latency defined in DAL
+ */
+- latency = 0;
++ latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
+ for (i = 0; i < data->mclk_latency_table.count; i++) {
+ if ((data->mclk_latency_table.entries[i].latency <= latency) &&
+ (data->mclk_latency_table.entries[i].frequency >=
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4843-drm-amd-display-Notify-powerplay-the-min_dcef-clock.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4843-drm-amd-display-Notify-powerplay-the-min_dcef-clock.patch
new file mode 100644
index 00000000..2b6eb2b2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4843-drm-amd-display-Notify-powerplay-the-min_dcef-clock.patch
@@ -0,0 +1,35 @@
+From f1eeb7ae2d2232efd8b899c023d2e0eb5bf60e6c Mon Sep 17 00:00:00 2001
+From: rex zhu <rex.zhu@amd.com>
+Date: Mon, 2 Jul 2018 14:55:43 +0800
+Subject: [PATCH 4843/5725] drm/amd/display: Notify powerplay the min_dcef
+ clock
+
+powerplay can notify smu to recalculates the maximum deep-sleep
+divider display allowed.
+
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+index 596d49d..2e801ba 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+@@ -71,6 +71,11 @@ bool dm_pp_apply_display_requirements(
+ adev->pm.pm_display_cfg.min_mem_set_clock =
+ pp_display_cfg->min_memory_clock_khz/10;
+
++ adev->pm.pm_display_cfg.min_dcef_deep_sleep_set_clk =
++ pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
++ adev->pm.pm_display_cfg.min_dcef_set_clk =
++ pp_display_cfg->min_dcfclock_khz/10;
++
+ adev->pm.pm_display_cfg.multi_monitor_in_sync =
+ pp_display_cfg->all_displays_in_sync;
+ adev->pm.pm_display_cfg.min_vblank_time =
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4844-drm-amd-display-Notify-powerplay-the-display-control.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4844-drm-amd-display-Notify-powerplay-the-display-control.patch
new file mode 100644
index 00000000..5911ae91
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4844-drm-amd-display-Notify-powerplay-the-display-control.patch
@@ -0,0 +1,43 @@
+From 578a424a263911f422a4b089dfc61e1f9681f40f Mon Sep 17 00:00:00 2001
+From: rex zhu <rex.zhu@amd.com>
+Date: Mon, 2 Jul 2018 16:20:56 +0800
+Subject: [PATCH 4844/5725] drm/amd/display: Notify powerplay the display
+ controller id
+
+powerplay can recalculate the number of active display
+
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+index 2e801ba..8184511 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+@@ -41,6 +41,7 @@ bool dm_pp_apply_display_requirements(
+ const struct dm_pp_display_configuration *pp_display_cfg)
+ {
+ struct amdgpu_device *adev = ctx->driver_context;
++ int i;
+
+ if (adev->pm.dpm_enabled) {
+
+@@ -95,6 +96,12 @@ bool dm_pp_apply_display_requirements(
+ adev->pm.pm_display_cfg.crossfire_display_index = -1;
+ adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
+
++ for (i = 0; i < pp_display_cfg->display_count; i++) {
++ const struct dm_pp_single_disp_config *dc_cfg =
++ &pp_display_cfg->disp_configs[i];
++ adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
++ }
++
+ /* TODO: complete implementation of
+ * pp_display_configuration_change().
+ * Follow example of:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4845-drm-amd-pp-Refine-the-interface-exported-to-display.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4845-drm-amd-pp-Refine-the-interface-exported-to-display.patch
new file mode 100644
index 00000000..42ed8bc7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4845-drm-amd-pp-Refine-the-interface-exported-to-display.patch
@@ -0,0 +1,169 @@
+From d26360e4a01d8b74a5e220f714c4747766b7d763 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Fri, 22 Jun 2018 18:26:52 +0800
+Subject: [PATCH 4845/5725] drm/amd/pp: Refine the interface exported to
+ display
+
+use void * as function parameter type in order for extension.
+
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/include/kgd_pp_interface.h | 3 +--
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 6 +++---
+ drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c | 4 ++--
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 3 ++-
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 3 ++-
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 3 ++-
+ drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h | 2 +-
+ drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 3 +--
+ 8 files changed, 14 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+index 4535756..99ee3ed 100644
+--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+@@ -192,7 +192,6 @@ struct amd_pp_simple_clock_info;
+ struct amd_pp_display_configuration;
+ struct amd_pp_clock_info;
+ struct pp_display_clock_request;
+-struct pp_wm_sets_with_clock_ranges_soc15;
+ struct pp_clock_levels_with_voltage;
+ struct pp_clock_levels_with_latency;
+ struct amd_pp_clocks;
+@@ -261,7 +260,7 @@ struct amd_pm_funcs {
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_voltage *clocks);
+ int (*set_watermarks_for_clocks_ranges)(void *handle,
+- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
++ void *clock_ranges);
+ int (*display_clock_voltage_request)(void *handle,
+ struct pp_display_clock_request *clock);
+ int (*get_display_mode_validation_clocks)(void *handle,
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index 9e54bbe..68c19d4 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -1106,17 +1106,17 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
+ }
+
+ static int pp_set_watermarks_for_clocks_ranges(void *handle,
+- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
++ void *clock_ranges)
+ {
+ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+- if (!hwmgr || !hwmgr->pm_en ||!wm_with_clock_ranges)
++ if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
+ return -EINVAL;
+
+ mutex_lock(&hwmgr->smu_lock);
+ ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
+- wm_with_clock_ranges);
++ clock_ranges);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return ret;
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+index a0bb921..53207e7 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+@@ -435,7 +435,7 @@ int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
+ }
+
+ int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
+- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
++ void *clock_ranges)
+ {
+ PHM_FUNC_CHECK(hwmgr);
+
+@@ -443,7 +443,7 @@ int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges(hwmgr,
+- wm_with_clock_ranges);
++ clock_ranges);
+ }
+
+ int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+index 07cc98c..02fc0bc 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+@@ -1108,9 +1108,10 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
+ }
+
+ static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
+- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
++ void *clock_ranges)
+ {
+ struct smu10_hwmgr *data = hwmgr->backend;
++ struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
+ Watermarks_t *table = &(data->water_marks_table);
+ int result = 0;
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+index 5f00760..064b4467 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+@@ -4211,9 +4211,10 @@ static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
+ }
+
+ static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
+- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
++ void *clock_range)
+ {
+ struct vega10_hwmgr *data = hwmgr->backend;
++ struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_range;
+ Watermarks_t *table = &(data->smc_state_table.water_marks_table);
+ int result = 0;
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index 4cf2570..448014b 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -1781,10 +1781,11 @@ static int vega12_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
+ }
+
+ static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
+- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
++ void *clock_ranges)
+ {
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ Watermarks_t *table = &(data->smc_state_table.water_marks_table);
++ struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
+ uint32_t i;
+
+ if (!data->registry_data.disable_water_mark &&
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+index a202247..429c9c4 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+@@ -455,7 +455,7 @@ extern int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_voltage *clocks);
+ extern int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
+- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
++ void *clock_ranges);
+ extern int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
+ struct pp_display_clock_request *clock);
+
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+index 95e29a2..b3363f2 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+@@ -293,8 +293,7 @@ struct pp_hwmgr_func {
+ int (*get_clock_by_type_with_voltage)(struct pp_hwmgr *hwmgr,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_voltage *clocks);
+- int (*set_watermarks_for_clocks_ranges)(struct pp_hwmgr *hwmgr,
+- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
++ int (*set_watermarks_for_clocks_ranges)(struct pp_hwmgr *hwmgr, void *clock_ranges);
+ int (*display_clock_voltage_request)(struct pp_hwmgr *hwmgr,
+ struct pp_display_clock_request *clock);
+ int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4846-drm-amd-pp-Remove-duplicate-code-in-vega12_hwmgr.c.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4846-drm-amd-pp-Remove-duplicate-code-in-vega12_hwmgr.c.patch
new file mode 100644
index 00000000..bdbc42f9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4846-drm-amd-pp-Remove-duplicate-code-in-vega12_hwmgr.c.patch
@@ -0,0 +1,76 @@
+From cc7be832e02076702a2ff63163d46a5e30704cb4 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Wed, 20 Jun 2018 13:36:58 +0800
+Subject: [PATCH 4846/5725] drm/amd/pp: Remove duplicate code in vega12_hwmgr.c
+
+use smu_helper function smu_set_watermarks_for_clocks_ranges
+in vega12_set_watermarks_for_clocks_ranges.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 43 +---------------------
+ 1 file changed, 1 insertion(+), 42 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index 448014b..0a09075 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -1786,52 +1786,11 @@ static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ Watermarks_t *table = &(data->smc_state_table.water_marks_table);
+ struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
+- uint32_t i;
+
+ if (!data->registry_data.disable_water_mark &&
+ data->smu_features[GNLD_DPM_DCEFCLK].supported &&
+ data->smu_features[GNLD_DPM_SOCCLK].supported) {
+- for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
+- table->WatermarkRow[WM_DCEFCLK][i].MinClock =
+- cpu_to_le16((uint16_t)
+- (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
+- 100);
+- table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
+- cpu_to_le16((uint16_t)
+- (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
+- 100);
+- table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
+- cpu_to_le16((uint16_t)
+- (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
+- 100);
+- table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
+- cpu_to_le16((uint16_t)
+- (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
+- 100);
+- table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t)
+- wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
+- }
+-
+- for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
+- table->WatermarkRow[WM_SOCCLK][i].MinClock =
+- cpu_to_le16((uint16_t)
+- (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
+- 100);
+- table->WatermarkRow[WM_SOCCLK][i].MaxClock =
+- cpu_to_le16((uint16_t)
+- (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
+- 100);
+- table->WatermarkRow[WM_SOCCLK][i].MinUclk =
+- cpu_to_le16((uint16_t)
+- (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
+- 100);
+- table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
+- cpu_to_le16((uint16_t)
+- (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
+- 100);
+- table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
+- wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
+- }
++ smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
+ data->water_marks_bitmap |= WaterMarksExist;
+ data->water_marks_bitmap &= ~WaterMarksLoaded;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4847-drm-amdgpu-switch-firmware-path-for-CIK-parts-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4847-drm-amdgpu-switch-firmware-path-for-CIK-parts-v2.patch
new file mode 100644
index 00000000..88fab65a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4847-drm-amdgpu-switch-firmware-path-for-CIK-parts-v2.patch
@@ -0,0 +1,320 @@
+From 2a8cf751841f8bdc1fb51b20c752b6e7b4abd4f1 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Mon, 2 Jul 2018 14:32:28 -0500
+Subject: [PATCH 4847/5725] drm/amdgpu: switch firmware path for CIK parts (v2)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Use separate firmware path for amdgpu to avoid conflicts
+with radeon on CIK parts.
+
+v2: squash in logic simplification (Alex)
+
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | 8 ++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 10 ++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 10 ++---
+ drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 10 ++---
+ drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 24 +++++------
+ drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 72 ++++++++++++++++-----------------
+ drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 9 ++---
+ 7 files changed, 70 insertions(+), 73 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+index e950730..693ec5e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+@@ -314,17 +314,17 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
+ (adev->pdev->revision == 0x81) ||
+ (adev->pdev->device == 0x665f)) {
+ info->is_kicker = true;
+- strcpy(fw_name, "radeon/bonaire_k_smc.bin");
++ strcpy(fw_name, "amdgpu/bonaire_k_smc.bin");
+ } else {
+- strcpy(fw_name, "radeon/bonaire_smc.bin");
++ strcpy(fw_name, "amdgpu/bonaire_smc.bin");
+ }
+ break;
+ case CHIP_HAWAII:
+ if (adev->pdev->revision == 0x80) {
+ info->is_kicker = true;
+- strcpy(fw_name, "radeon/hawaii_k_smc.bin");
++ strcpy(fw_name, "amdgpu/hawaii_k_smc.bin");
+ } else {
+- strcpy(fw_name, "radeon/hawaii_smc.bin");
++ strcpy(fw_name, "amdgpu/hawaii_smc.bin");
+ }
+ break;
+ case CHIP_TOPAZ:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index 212fec7..1db0845 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -53,11 +53,11 @@
+
+ /* Firmware Names */
+ #ifdef CONFIG_DRM_AMDGPU_CIK
+-#define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin"
+-#define FIRMWARE_KABINI "radeon/kabini_uvd.bin"
+-#define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin"
+-#define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin"
+-#define FIRMWARE_MULLINS "radeon/mullins_uvd.bin"
++#define FIRMWARE_BONAIRE "amdgpu/bonaire_uvd.bin"
++#define FIRMWARE_KABINI "amdgpu/kabini_uvd.bin"
++#define FIRMWARE_KAVERI "amdgpu/kaveri_uvd.bin"
++#define FIRMWARE_HAWAII "amdgpu/hawaii_uvd.bin"
++#define FIRMWARE_MULLINS "amdgpu/mullins_uvd.bin"
+ #endif
+ #define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin"
+ #define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin"
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+index acad299..b2a4cdb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+@@ -40,11 +40,11 @@
+
+ /* Firmware Names */
+ #ifdef CONFIG_DRM_AMDGPU_CIK
+-#define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin"
+-#define FIRMWARE_KABINI "radeon/kabini_vce.bin"
+-#define FIRMWARE_KAVERI "radeon/kaveri_vce.bin"
+-#define FIRMWARE_HAWAII "radeon/hawaii_vce.bin"
+-#define FIRMWARE_MULLINS "radeon/mullins_vce.bin"
++#define FIRMWARE_BONAIRE "amdgpu/bonaire_vce.bin"
++#define FIRMWARE_KABINI "amdgpu/kabini_vce.bin"
++#define FIRMWARE_KAVERI "amdgpu/kaveri_vce.bin"
++#define FIRMWARE_HAWAII "amdgpu/hawaii_vce.bin"
++#define FIRMWARE_MULLINS "amdgpu/mullins_vce.bin"
+ #endif
+ #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
+ #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
+diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+index 85b3f46..caaaabf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+@@ -49,10 +49,10 @@
+ #include "gmc/gmc_7_1_d.h"
+ #include "gmc/gmc_7_1_sh_mask.h"
+
+-MODULE_FIRMWARE("radeon/bonaire_smc.bin");
+-MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
+-MODULE_FIRMWARE("radeon/hawaii_smc.bin");
+-MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
++MODULE_FIRMWARE("amdgpu/bonaire_smc.bin");
++MODULE_FIRMWARE("amdgpu/bonaire_k_smc.bin");
++MODULE_FIRMWARE("amdgpu/hawaii_smc.bin");
++MODULE_FIRMWARE("amdgpu/hawaii_k_smc.bin");
+
+ #define MC_CG_ARB_FREQ_F0 0x0a
+ #define MC_CG_ARB_FREQ_F1 0x0b
+@@ -5814,7 +5814,7 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev)
+ default: BUG();
+ }
+
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
+ err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+index 1543e7e..e1b56e7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
++++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+@@ -54,16 +54,16 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
+ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
+ static int cik_sdma_soft_reset(void *handle);
+
+-MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
+-MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
+-MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
+-MODULE_FIRMWARE("radeon/hawaii_sdma1.bin");
+-MODULE_FIRMWARE("radeon/kaveri_sdma.bin");
+-MODULE_FIRMWARE("radeon/kaveri_sdma1.bin");
+-MODULE_FIRMWARE("radeon/kabini_sdma.bin");
+-MODULE_FIRMWARE("radeon/kabini_sdma1.bin");
+-MODULE_FIRMWARE("radeon/mullins_sdma.bin");
+-MODULE_FIRMWARE("radeon/mullins_sdma1.bin");
++MODULE_FIRMWARE("amdgpu/bonaire_sdma.bin");
++MODULE_FIRMWARE("amdgpu/bonaire_sdma1.bin");
++MODULE_FIRMWARE("amdgpu/hawaii_sdma.bin");
++MODULE_FIRMWARE("amdgpu/hawaii_sdma1.bin");
++MODULE_FIRMWARE("amdgpu/kaveri_sdma.bin");
++MODULE_FIRMWARE("amdgpu/kaveri_sdma1.bin");
++MODULE_FIRMWARE("amdgpu/kabini_sdma.bin");
++MODULE_FIRMWARE("amdgpu/kabini_sdma1.bin");
++MODULE_FIRMWARE("amdgpu/mullins_sdma.bin");
++MODULE_FIRMWARE("amdgpu/mullins_sdma1.bin");
+
+ u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
+
+@@ -132,9 +132,9 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev)
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (i == 0)
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
+ else
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
+ err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+index 703803f..46dfa24 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+@@ -57,36 +57,36 @@ static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
+ static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
+ static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
+
+-MODULE_FIRMWARE("radeon/bonaire_pfp.bin");
+-MODULE_FIRMWARE("radeon/bonaire_me.bin");
+-MODULE_FIRMWARE("radeon/bonaire_ce.bin");
+-MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
+-MODULE_FIRMWARE("radeon/bonaire_mec.bin");
+-
+-MODULE_FIRMWARE("radeon/hawaii_pfp.bin");
+-MODULE_FIRMWARE("radeon/hawaii_me.bin");
+-MODULE_FIRMWARE("radeon/hawaii_ce.bin");
+-MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
+-MODULE_FIRMWARE("radeon/hawaii_mec.bin");
+-
+-MODULE_FIRMWARE("radeon/kaveri_pfp.bin");
+-MODULE_FIRMWARE("radeon/kaveri_me.bin");
+-MODULE_FIRMWARE("radeon/kaveri_ce.bin");
+-MODULE_FIRMWARE("radeon/kaveri_rlc.bin");
+-MODULE_FIRMWARE("radeon/kaveri_mec.bin");
+-MODULE_FIRMWARE("radeon/kaveri_mec2.bin");
+-
+-MODULE_FIRMWARE("radeon/kabini_pfp.bin");
+-MODULE_FIRMWARE("radeon/kabini_me.bin");
+-MODULE_FIRMWARE("radeon/kabini_ce.bin");
+-MODULE_FIRMWARE("radeon/kabini_rlc.bin");
+-MODULE_FIRMWARE("radeon/kabini_mec.bin");
+-
+-MODULE_FIRMWARE("radeon/mullins_pfp.bin");
+-MODULE_FIRMWARE("radeon/mullins_me.bin");
+-MODULE_FIRMWARE("radeon/mullins_ce.bin");
+-MODULE_FIRMWARE("radeon/mullins_rlc.bin");
+-MODULE_FIRMWARE("radeon/mullins_mec.bin");
++MODULE_FIRMWARE("amdgpu/bonaire_pfp.bin");
++MODULE_FIRMWARE("amdgpu/bonaire_me.bin");
++MODULE_FIRMWARE("amdgpu/bonaire_ce.bin");
++MODULE_FIRMWARE("amdgpu/bonaire_rlc.bin");
++MODULE_FIRMWARE("amdgpu/bonaire_mec.bin");
++
++MODULE_FIRMWARE("amdgpu/hawaii_pfp.bin");
++MODULE_FIRMWARE("amdgpu/hawaii_me.bin");
++MODULE_FIRMWARE("amdgpu/hawaii_ce.bin");
++MODULE_FIRMWARE("amdgpu/hawaii_rlc.bin");
++MODULE_FIRMWARE("amdgpu/hawaii_mec.bin");
++
++MODULE_FIRMWARE("amdgpu/kaveri_pfp.bin");
++MODULE_FIRMWARE("amdgpu/kaveri_me.bin");
++MODULE_FIRMWARE("amdgpu/kaveri_ce.bin");
++MODULE_FIRMWARE("amdgpu/kaveri_rlc.bin");
++MODULE_FIRMWARE("amdgpu/kaveri_mec.bin");
++MODULE_FIRMWARE("amdgpu/kaveri_mec2.bin");
++
++MODULE_FIRMWARE("amdgpu/kabini_pfp.bin");
++MODULE_FIRMWARE("amdgpu/kabini_me.bin");
++MODULE_FIRMWARE("amdgpu/kabini_ce.bin");
++MODULE_FIRMWARE("amdgpu/kabini_rlc.bin");
++MODULE_FIRMWARE("amdgpu/kabini_mec.bin");
++
++MODULE_FIRMWARE("amdgpu/mullins_pfp.bin");
++MODULE_FIRMWARE("amdgpu/mullins_me.bin");
++MODULE_FIRMWARE("amdgpu/mullins_ce.bin");
++MODULE_FIRMWARE("amdgpu/mullins_rlc.bin");
++MODULE_FIRMWARE("amdgpu/mullins_mec.bin");
+
+ static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
+ {
+@@ -925,7 +925,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
+ default: BUG();
+ }
+
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
+ err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+@@ -933,7 +933,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
+ if (err)
+ goto out;
+
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
+ err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+@@ -941,7 +941,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
+ if (err)
+ goto out;
+
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
+ err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+@@ -949,7 +949,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
+ if (err)
+ goto out;
+
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+@@ -958,7 +958,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
+ goto out;
+
+ if (adev->asic_type == CHIP_KAVERI) {
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+@@ -967,7 +967,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
+ goto out;
+ }
+
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
+ err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+index 22707a4..4fd4081 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+@@ -47,8 +47,8 @@ static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
+ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
+ static int gmc_v7_0_wait_for_idle(void *handle);
+
+-MODULE_FIRMWARE("radeon/bonaire_mc.bin");
+-MODULE_FIRMWARE("radeon/hawaii_mc.bin");
++MODULE_FIRMWARE("amdgpu/bonaire_mc.bin");
++MODULE_FIRMWARE("amdgpu/hawaii_mc.bin");
+ MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
+
+ static const u32 golden_settings_iceland_a11[] =
+@@ -147,10 +147,7 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
+ default: BUG();
+ }
+
+- if (adev->asic_type == CHIP_TOPAZ)
+- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
+- else
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
+
+ err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
+ if (err)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4848-drm-amdgpu-switch-firmware-path-for-SI-parts.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4848-drm-amdgpu-switch-firmware-path-for-SI-parts.patch
new file mode 100644
index 00000000..32a0f318
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4848-drm-amdgpu-switch-firmware-path-for-SI-parts.patch
@@ -0,0 +1,191 @@
+From ebe8951c35677696c279be3097e49a50e98324c8 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Mon, 2 Jul 2018 14:35:36 -0500
+Subject: [PATCH 4848/5725] drm/amdgpu: switch firmware path for SI parts
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Use separate firmware path for amdgpu to avoid conflicts
+with radeon on SI parts.
+
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 56 +++++++++++++++++------------------
+ drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 14 ++++-----
+ drivers/gpu/drm/amd/amdgpu/si_dpm.c | 22 +++++++-------
+ 3 files changed, 46 insertions(+), 46 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+index 0005f70..4518021 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+@@ -44,30 +44,30 @@ static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
+ static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
+ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev);
+
+-MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
+-MODULE_FIRMWARE("radeon/tahiti_me.bin");
+-MODULE_FIRMWARE("radeon/tahiti_ce.bin");
+-MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
+-
+-MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
+-MODULE_FIRMWARE("radeon/pitcairn_me.bin");
+-MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
+-MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
+-
+-MODULE_FIRMWARE("radeon/verde_pfp.bin");
+-MODULE_FIRMWARE("radeon/verde_me.bin");
+-MODULE_FIRMWARE("radeon/verde_ce.bin");
+-MODULE_FIRMWARE("radeon/verde_rlc.bin");
+-
+-MODULE_FIRMWARE("radeon/oland_pfp.bin");
+-MODULE_FIRMWARE("radeon/oland_me.bin");
+-MODULE_FIRMWARE("radeon/oland_ce.bin");
+-MODULE_FIRMWARE("radeon/oland_rlc.bin");
+-
+-MODULE_FIRMWARE("radeon/hainan_pfp.bin");
+-MODULE_FIRMWARE("radeon/hainan_me.bin");
+-MODULE_FIRMWARE("radeon/hainan_ce.bin");
+-MODULE_FIRMWARE("radeon/hainan_rlc.bin");
++MODULE_FIRMWARE("amdgpu/tahiti_pfp.bin");
++MODULE_FIRMWARE("amdgpu/tahiti_me.bin");
++MODULE_FIRMWARE("amdgpu/tahiti_ce.bin");
++MODULE_FIRMWARE("amdgpu/tahiti_rlc.bin");
++
++MODULE_FIRMWARE("amdgpu/pitcairn_pfp.bin");
++MODULE_FIRMWARE("amdgpu/pitcairn_me.bin");
++MODULE_FIRMWARE("amdgpu/pitcairn_ce.bin");
++MODULE_FIRMWARE("amdgpu/pitcairn_rlc.bin");
++
++MODULE_FIRMWARE("amdgpu/verde_pfp.bin");
++MODULE_FIRMWARE("amdgpu/verde_me.bin");
++MODULE_FIRMWARE("amdgpu/verde_ce.bin");
++MODULE_FIRMWARE("amdgpu/verde_rlc.bin");
++
++MODULE_FIRMWARE("amdgpu/oland_pfp.bin");
++MODULE_FIRMWARE("amdgpu/oland_me.bin");
++MODULE_FIRMWARE("amdgpu/oland_ce.bin");
++MODULE_FIRMWARE("amdgpu/oland_rlc.bin");
++
++MODULE_FIRMWARE("amdgpu/hainan_pfp.bin");
++MODULE_FIRMWARE("amdgpu/hainan_me.bin");
++MODULE_FIRMWARE("amdgpu/hainan_ce.bin");
++MODULE_FIRMWARE("amdgpu/hainan_rlc.bin");
+
+ static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev);
+ static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
+@@ -335,7 +335,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
+ default: BUG();
+ }
+
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
+ err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+@@ -346,7 +346,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
+ adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
+ err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+@@ -357,7 +357,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
+ adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
+ err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+@@ -368,7 +368,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
+ adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
+ err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+index 1170699..b2ad0c2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+@@ -40,11 +40,11 @@ static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
+ static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
+ static int gmc_v6_0_wait_for_idle(void *handle);
+
+-MODULE_FIRMWARE("radeon/tahiti_mc.bin");
+-MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
+-MODULE_FIRMWARE("radeon/verde_mc.bin");
+-MODULE_FIRMWARE("radeon/oland_mc.bin");
+-MODULE_FIRMWARE("radeon/si58_mc.bin");
++MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
++MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
++MODULE_FIRMWARE("amdgpu/verde_mc.bin");
++MODULE_FIRMWARE("amdgpu/oland_mc.bin");
++MODULE_FIRMWARE("amdgpu/si58_mc.bin");
+
+ #define MC_SEQ_MISC0__MT__MASK 0xf0000000
+ #define MC_SEQ_MISC0__MT__GDDR1 0x10000000
+@@ -133,9 +133,9 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
+ is_58_fw = true;
+
+ if (is_58_fw)
+- snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/si58_mc.bin");
+ else
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
+ err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+index 9567dd0..1026431 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+@@ -56,16 +56,16 @@
+
+ #define BIOS_SCRATCH_4 0x5cd
+
+-MODULE_FIRMWARE("radeon/tahiti_smc.bin");
+-MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
+-MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
+-MODULE_FIRMWARE("radeon/verde_smc.bin");
+-MODULE_FIRMWARE("radeon/verde_k_smc.bin");
+-MODULE_FIRMWARE("radeon/oland_smc.bin");
+-MODULE_FIRMWARE("radeon/oland_k_smc.bin");
+-MODULE_FIRMWARE("radeon/hainan_smc.bin");
+-MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
+-MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
++MODULE_FIRMWARE("amdgpu/tahiti_smc.bin");
++MODULE_FIRMWARE("amdgpu/pitcairn_smc.bin");
++MODULE_FIRMWARE("amdgpu/pitcairn_k_smc.bin");
++MODULE_FIRMWARE("amdgpu/verde_smc.bin");
++MODULE_FIRMWARE("amdgpu/verde_k_smc.bin");
++MODULE_FIRMWARE("amdgpu/oland_smc.bin");
++MODULE_FIRMWARE("amdgpu/oland_k_smc.bin");
++MODULE_FIRMWARE("amdgpu/hainan_smc.bin");
++MODULE_FIRMWARE("amdgpu/hainan_k_smc.bin");
++MODULE_FIRMWARE("amdgpu/banks_k_2_smc.bin");
+
+ static const struct amd_pm_funcs si_dpm_funcs;
+
+@@ -7664,7 +7664,7 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
+ default: BUG();
+ }
+
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
+ err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4849-drm-amdgpu-update-amd_pcie.h-to-include-gen4-speeds.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4849-drm-amdgpu-update-amd_pcie.h-to-include-gen4-speeds.patch
new file mode 100644
index 00000000..a47ae1bd
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4849-drm-amdgpu-update-amd_pcie.h-to-include-gen4-speeds.patch
@@ -0,0 +1,41 @@
+From f08801a19f5fc3a87805ea33fc066be4c514f394 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Mon, 25 Jun 2018 13:03:51 -0500
+Subject: [PATCH 4849/5725] drm/amdgpu: update amd_pcie.h to include gen4
+ speeds
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Internal header used by the driver to specify pcie gen
+speeds of the asic and chipset.
+
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/include/amd_pcie.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/include/amd_pcie.h b/drivers/gpu/drm/amd/include/amd_pcie.h
+index 5eb895f..9cb9ceb 100644
+--- a/drivers/gpu/drm/amd/include/amd_pcie.h
++++ b/drivers/gpu/drm/amd/include/amd_pcie.h
+@@ -27,6 +27,7 @@
+ #define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 0x00010000
+ #define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 0x00020000
+ #define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 0x00040000
++#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 0x00080000
+ #define CAIL_PCIE_LINK_SPEED_SUPPORT_MASK 0xFFFF0000
+ #define CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT 16
+
+@@ -34,6 +35,7 @@
+ #define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 0x00000001
+ #define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 0x00000002
+ #define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 0x00000004
++#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 0x00000008
+ #define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_MASK 0x0000FFFF
+ #define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_SHIFT 0
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4850-drm-amdgpu-use-pcie-functions-for-link-width-and-spe.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4850-drm-amdgpu-use-pcie-functions-for-link-width-and-spe.patch
new file mode 100644
index 00000000..c3257d6c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4850-drm-amdgpu-use-pcie-functions-for-link-width-and-spe.patch
@@ -0,0 +1,341 @@
+From 85ca399974d01aa27b21b393a70a04b30cfc5e09 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Mon, 25 Jun 2018 13:07:50 -0500
+Subject: [PATCH 4850/5725] drm/amdgpu: use pcie functions for link width and
+ speed
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Use the newly exported pci functions to get the link width
+and speed rather than using the drm duplicated versions.
+
+Also query the GPU link caps directly rather than hardcoding
+them.
+
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 83 +++++++++++++++++++++---------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c | 7 ++-
+ drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 3 +-
+ drivers/gpu/drm/amd/amdgpu/si_dpm.c | 3 +-
+ drivers/pci/pci.c | 63 +++++++++++++++++++++++
+ include/linux/pci.h | 4 ++
+ include/uapi/linux/pci_regs.h | 2 +
+ 7 files changed, 134 insertions(+), 31 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index c0c835a..88c3879 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3374,8 +3374,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ */
+ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
+ {
+- u32 mask;
+- int ret;
++ struct pci_dev *pdev;
++ enum pci_bus_speed speed_cap;
++ enum pcie_link_width link_width;
+
+ if (amdgpu_pcie_gen_cap)
+ adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
+@@ -3393,27 +3394,61 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
+ }
+
+ if (adev->pm.pcie_gen_mask == 0) {
+- ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
+- if (!ret) {
+- adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
++ /* asic caps */
++ pdev = adev->pdev;
++ speed_cap = pcie_get_speed_cap(pdev);
++ if (speed_cap == PCI_SPEED_UNKNOWN) {
++ adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
+-
+- if (mask & DRM_PCIE_SPEED_25)
+- adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
+- if (mask & DRM_PCIE_SPEED_50)
+- adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
+- if (mask & DRM_PCIE_SPEED_80)
+- adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
+ } else {
+- adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
++ if (speed_cap == PCIE_SPEED_16_0GT)
++ adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
++ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
++ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
++ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
++ else if (speed_cap == PCIE_SPEED_8_0GT)
++ adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
++ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
++ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
++ else if (speed_cap == PCIE_SPEED_5_0GT)
++ adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
++ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
++ else
++ adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
++ }
++ /* platform caps */
++ pdev = adev->ddev->pdev->bus->self;
++ speed_cap = pcie_get_speed_cap(pdev);
++ if (speed_cap == PCI_SPEED_UNKNOWN) {
++ adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
++ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
++ } else {
++ if (speed_cap == PCIE_SPEED_16_0GT)
++ adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
++ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
++ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
++ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
++ else if (speed_cap == PCIE_SPEED_8_0GT)
++ adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
++ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
++ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
++ else if (speed_cap == PCIE_SPEED_5_0GT)
++ adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
++ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
++ else
++ adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
++
+ }
+ }
+ if (adev->pm.pcie_mlw_mask == 0) {
+- ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
+- if (!ret) {
+- switch (mask) {
+- case 32:
++ pdev = adev->ddev->pdev->bus->self;
++ link_width = pcie_get_width_cap(pdev);
++ if (link_width == PCIE_LNK_WIDTH_UNKNOWN) {
++ adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
++ } else {
++ switch (link_width) {
++ case PCIE_LNK_X32:
+ adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+@@ -3422,7 +3457,7 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+- case 16:
++ case PCIE_LNK_X16:
+ adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+@@ -3430,36 +3465,34 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+- case 12:
++ case PCIE_LNK_X12:
+ adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+- case 8:
++ case PCIE_LNK_X8:
+ adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+- case 4:
++ case PCIE_LNK_X4:
+ adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+- case 2:
++ case PCIE_LNK_X2:
+ adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+- case 1:
++ case PCIE_LNK_X1:
+ adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
+ break;
+ default:
+ break;
+ }
+- } else {
+- adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
+ }
+ }
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+index def1010..719061f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+@@ -28,6 +28,7 @@
+ #include "amdgpu_i2c.h"
+ #include "amdgpu_dpm.h"
+ #include "atom.h"
++#include "amd_pcie.h"
+
+ void amdgpu_dpm_print_class_info(u32 class, u32 class2)
+ {
+@@ -936,9 +937,11 @@ enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
+ case AMDGPU_PCIE_GEN3:
+ return AMDGPU_PCIE_GEN3;
+ default:
+- if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
++ if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
++ (default_gen == AMDGPU_PCIE_GEN3))
+ return AMDGPU_PCIE_GEN3;
+- else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
++ else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
++ (default_gen == AMDGPU_PCIE_GEN2))
+ return AMDGPU_PCIE_GEN2;
+ else
+ return AMDGPU_PCIE_GEN1;
+diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+index caaaabf..9bf0b24 100644
+--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+@@ -5845,8 +5845,7 @@ static int ci_dpm_init(struct amdgpu_device *adev)
+ adev->pm.dpm.priv = pi;
+
+ pi->sys_pcie_mask =
+- (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
+- CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
++ adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK;
+
+ pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+index 1026431..a32f6f6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+@@ -7317,8 +7317,7 @@ static int si_dpm_init(struct amdgpu_device *adev)
+ pi = &eg_pi->rv7xx;
+
+ si_pi->sys_pcie_mask =
+- (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
+- CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
++ adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK;
+ si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
+ si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
+
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 1dde9da..f0957d4 100755
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -5156,6 +5156,69 @@ int pcie_set_mps(struct pci_dev *dev, int mps)
+ EXPORT_SYMBOL(pcie_set_mps);
+
+ /**
++ * pcie_get_speed_cap - query for the PCI device's link speed capability
++ * @dev: PCI device to query
++ *
++ * Query the PCI device speed capability. Return the maximum link speed
++ * supported by the device.
++ */
++enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
++{
++ u32 lnkcap2, lnkcap;
++
++ /*
++ * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link
++ * Speeds Vector in Link Capabilities 2 when supported, falling
++ * back to Max Link Speed in Link Capabilities otherwise.
++ */
++ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
++ if (lnkcap2) { /* PCIe r3.0-compliant */
++ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB)
++ return PCIE_SPEED_16_0GT;
++ else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
++ return PCIE_SPEED_8_0GT;
++ else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
++ return PCIE_SPEED_5_0GT;
++ else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
++ return PCIE_SPEED_2_5GT;
++ return PCI_SPEED_UNKNOWN;
++ }
++ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
++ if (lnkcap) {
++ if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB)
++ return PCIE_SPEED_16_0GT;
++ else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB)
++ return PCIE_SPEED_8_0GT;
++ else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
++ return PCIE_SPEED_5_0GT;
++ else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
++ return PCIE_SPEED_2_5GT;
++ }
++
++ return PCI_SPEED_UNKNOWN;
++}
++EXPORT_SYMBOL(pcie_get_speed_cap);
++
++/**
++ * pcie_get_width_cap - query for the PCI device's link width capability
++ * @dev: PCI device to query
++ *
++ * Query the PCI device width capability. Return the maximum link width
++ * supported by the device.
++ */
++enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
++{
++ u32 lnkcap;
++
++ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
++ if (lnkcap)
++ return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
++
++ return PCIE_LNK_WIDTH_UNKNOWN;
++}
++EXPORT_SYMBOL(pcie_get_width_cap);
++
++/**
+ * pcie_get_minimum_link - determine minimum link settings of a PCI device
+ * @dev: PCI device to query
+ * @speed: storage for minimum speed
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 76a681f..907fafa 100755
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -264,9 +264,13 @@ enum pci_bus_speed {
+ PCIE_SPEED_2_5GT = 0x14,
+ PCIE_SPEED_5_0GT = 0x15,
+ PCIE_SPEED_8_0GT = 0x16,
++ PCIE_SPEED_16_0GT = 0x17,
+ PCI_SPEED_UNKNOWN = 0xff,
+ };
+
++enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
++enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
++
+ struct pci_cap_saved_data {
+ u16 cap_nr;
+ bool cap_extended;
+diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
+index 009a432..fe1a8c9 100755
+--- a/include/uapi/linux/pci_regs.h
++++ b/include/uapi/linux/pci_regs.h
+@@ -520,6 +520,7 @@
+ #define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */
+ #define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */
+ #define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */
++#define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */
+ #define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */
+ #define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */
+ #define PCI_EXP_LNKCAP_L0SEL 0x00007000 /* L0s Exit Latency */
+@@ -649,6 +650,7 @@
+ #define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */
+ #define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5.0GT/s */
+ #define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8.0GT/s */
++#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */
+ #define PCI_EXP_LNKCAP2_CROSSLINK 0x00000100 /* Crosslink supported */
+ #define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
+ #define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4851-drm-amd-pp-Export-notify_smu_enable_pwe-to-display.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4851-drm-amd-pp-Export-notify_smu_enable_pwe-to-display.patch
new file mode 100644
index 00000000..8a814dfb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4851-drm-amd-pp-Export-notify_smu_enable_pwe-to-display.patch
@@ -0,0 +1,82 @@
+From 5e36318bb63ca76462d1446827540e2ddc12d433 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Fri, 22 Jun 2018 14:12:59 +0800
+Subject: [PATCH 4851/5725] drm/amd/pp: Export notify_smu_enable_pwe to display
+
+Display can notify smu to enable pwe after gpu suspend.
+It is used in case when display resumes from S3 and wants to start
+audio driver by enabling pwe
+
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/include/kgd_pp_interface.h | 7 ++++---
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 20 ++++++++++++++++++++
+ 2 files changed, 24 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+index 99ee3ed..6a41b81 100644
+--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+@@ -241,6 +241,9 @@ struct amd_pm_funcs {
+ int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id);
+ int (*set_power_limit)(void *handle, uint32_t n);
+ int (*get_power_limit)(void *handle, uint32_t *limit, bool default_limit);
++ int (*get_power_profile_mode)(void *handle, char *buf);
++ int (*set_power_profile_mode)(void *handle, long *input, uint32_t size);
++ int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size);
+ /* export to DC */
+ u32 (*get_sclk)(void *handle, bool low);
+ u32 (*get_mclk)(void *handle, bool low);
+@@ -265,9 +268,7 @@ struct amd_pm_funcs {
+ struct pp_display_clock_request *clock);
+ int (*get_display_mode_validation_clocks)(void *handle,
+ struct amd_pp_simple_clock_info *clocks);
+- int (*get_power_profile_mode)(void *handle, char *buf);
+- int (*set_power_profile_mode)(void *handle, long *input, uint32_t size);
+- int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size);
++ int (*notify_smu_enable_pwe)(void *handle);
+ };
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index 68c19d4..63db1ea 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -1212,6 +1212,25 @@ static int pp_set_powergating_by_smu(void *handle,
+ return ret;
+ }
+
++static int pp_notify_smu_enable_pwe(void *handle)
++{
++ struct pp_hwmgr *hwmgr = handle;
++
++ if (!hwmgr || !hwmgr->pm_en)
++ return -EINVAL;;
++
++ if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
++ pr_info("%s was not implemented.\n", __func__);
++ return -EINVAL;;
++ }
++
++ mutex_lock(&hwmgr->smu_lock);
++ hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
++ mutex_unlock(&hwmgr->smu_lock);
++
++ return 0;
++}
++
+ static const struct amd_pm_funcs pp_dpm_funcs = {
+ .load_firmware = pp_dpm_load_fw,
+ .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
+@@ -1255,5 +1274,6 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
+ .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
+ .display_clock_voltage_request = pp_display_clock_voltage_request,
+ .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
++ .notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4852-drm-amd-display-Refine-the-implementation-of-dm_pp_g.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4852-drm-amd-display-Refine-the-implementation-of-dm_pp_g.patch
new file mode 100644
index 00000000..85273b3e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4852-drm-amd-display-Refine-the-implementation-of-dm_pp_g.patch
@@ -0,0 +1,150 @@
+From 57a86817e6ad728c4d4d4b929f57788286855dfe Mon Sep 17 00:00:00 2001
+From: Rex Zhu <rex.zhu@amd.com>
+Date: Tue, 3 Jul 2018 16:31:35 +0800
+Subject: [PATCH 4852/5725] drm/amd/display: Refine the implementation of
+ dm_pp_get_funcs_rv
+
+powerplay/dpm export all interfaces in struct amd_pm_funcs.
+so call common exported interfaces instead of powerplay inner interfaces
+
+Also not include header file hwmgr.h
+
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 80 +++++++++++-----------
+ 1 file changed, 40 insertions(+), 40 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+index 8184511..9c105c0 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+@@ -33,7 +33,6 @@
+ #include "amdgpu_dm_irq.h"
+ #include "amdgpu_pm.h"
+ #include "dm_pp_smu.h"
+-#include "../../powerplay/inc/hwmgr.h"
+
+
+ bool dm_pp_apply_display_requirements(
+@@ -452,76 +451,77 @@ bool dm_pp_get_static_clocks(
+ void pp_rv_set_display_requirement(struct pp_smu *pp,
+ struct pp_smu_display_requirement_rv *req)
+ {
+- struct amdgpu_device *adev = pp->ctx->driver_context;
+- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+- int ret = 0;
+- if (hwmgr->hwmgr_func->set_deep_sleep_dcefclk)
+- ret = hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, req->hard_min_dcefclk_khz/10);
+- if (hwmgr->hwmgr_func->set_active_display_count)
+- ret = hwmgr->hwmgr_func->set_active_display_count(hwmgr, req->display_count);
++ struct dc_context *ctx = pp->ctx;
++ struct amdgpu_device *adev = ctx->driver_context;
++ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
++
++ if (!pp_funcs || !pp_funcs->display_configuration_changed)
++ return;
+
+- //store_cc6 is not yet implemented in SMU level
++ amdgpu_dpm_display_configuration_changed(adev);
+ }
+
+ void pp_rv_set_wm_ranges(struct pp_smu *pp,
+ struct pp_smu_wm_range_sets *ranges)
+ {
+- struct amdgpu_device *adev = pp->ctx->driver_context;
+- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+- struct pp_wm_sets_with_clock_ranges_soc15 ranges_soc15 = {0};
+- int i = 0;
+-
+- if (!hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges ||
+- !pp || !ranges)
+- return;
++ struct dc_context *ctx = pp->ctx;
++ struct amdgpu_device *adev = ctx->driver_context;
++ void *pp_handle = adev->powerplay.pp_handle;
++ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
++ struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
++ struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges;
++ struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges;
++ int32_t i;
+
+- //not entirely sure if thats a correct assignment
+- ranges_soc15.num_wm_sets_dmif = ranges->num_reader_wm_sets;
+- ranges_soc15.num_wm_sets_mcif = ranges->num_writer_wm_sets;
++ wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
++ wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
+
+- for (i = 0; i < ranges_soc15.num_wm_sets_dmif; i++) {
++ for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
+ if (ranges->reader_wm_sets[i].wm_inst > 3)
+- ranges_soc15.wm_sets_dmif[i].wm_set_id = DC_WM_SET_A;
++ wm_dce_clocks[i].wm_set_id = WM_SET_A;
+ else
+- ranges_soc15.wm_sets_dmif[i].wm_set_id =
++ wm_dce_clocks[i].wm_set_id =
+ ranges->reader_wm_sets[i].wm_inst;
+- ranges_soc15.wm_sets_dmif[i].wm_max_dcefclk_in_khz =
++ wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
+ ranges->reader_wm_sets[i].max_drain_clk_khz;
+- ranges_soc15.wm_sets_dmif[i].wm_min_dcefclk_in_khz =
++ wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
+ ranges->reader_wm_sets[i].min_drain_clk_khz;
+- ranges_soc15.wm_sets_dmif[i].wm_max_memclk_in_khz =
++ wm_dce_clocks[i].wm_max_mem_clk_in_khz =
+ ranges->reader_wm_sets[i].max_fill_clk_khz;
+- ranges_soc15.wm_sets_dmif[i].wm_min_memclk_in_khz =
++ wm_dce_clocks[i].wm_min_mem_clk_in_khz =
+ ranges->reader_wm_sets[i].min_fill_clk_khz;
+ }
+
+- for (i = 0; i < ranges_soc15.num_wm_sets_mcif; i++) {
++ for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
+ if (ranges->writer_wm_sets[i].wm_inst > 3)
+- ranges_soc15.wm_sets_dmif[i].wm_set_id = DC_WM_SET_A;
++ wm_soc_clocks[i].wm_set_id = WM_SET_A;
+ else
+- ranges_soc15.wm_sets_mcif[i].wm_set_id =
++ wm_soc_clocks[i].wm_set_id =
+ ranges->writer_wm_sets[i].wm_inst;
+- ranges_soc15.wm_sets_mcif[i].wm_max_socclk_in_khz =
++ wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
+ ranges->writer_wm_sets[i].max_fill_clk_khz;
+- ranges_soc15.wm_sets_mcif[i].wm_min_socclk_in_khz =
++ wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
+ ranges->writer_wm_sets[i].min_fill_clk_khz;
+- ranges_soc15.wm_sets_mcif[i].wm_max_memclk_in_khz =
++ wm_soc_clocks[i].wm_max_mem_clk_in_khz =
+ ranges->writer_wm_sets[i].max_fill_clk_khz;
+- ranges_soc15.wm_sets_mcif[i].wm_min_memclk_in_khz =
++ wm_soc_clocks[i].wm_min_mem_clk_in_khz =
+ ranges->writer_wm_sets[i].min_fill_clk_khz;
+ }
+
+- hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges(hwmgr, &ranges_soc15);
+-
++ pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges);
+ }
+
+ void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
+ {
+- struct amdgpu_device *adev = pp->ctx->driver_context;
+- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
++ struct dc_context *ctx = pp->ctx;
++ struct amdgpu_device *adev = ctx->driver_context;
++ void *pp_handle = adev->powerplay.pp_handle;
++ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
++
++ if (!pp_funcs || !pp_funcs->notify_smu_enable_pwe)
++ return;
+
+- if (hwmgr->hwmgr_func->smus_notify_pwe)
+- hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
++ pp_funcs->notify_smu_enable_pwe(pp_handle);
+ }
+
+ void dm_pp_get_funcs_rv(
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4853-drm-amd-display-Fix-copy-error-when-set-memory-clock.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4853-drm-amd-display-Fix-copy-error-when-set-memory-clock.patch
new file mode 100644
index 00000000..a57ff65a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4853-drm-amd-display-Fix-copy-error-when-set-memory-clock.patch
@@ -0,0 +1,34 @@
+From afe0cec5197d44d6d59671128eb8dde5d3ae22fc Mon Sep 17 00:00:00 2001
+From: Rex Zhu <rex.zhu@amd.com>
+Date: Tue, 3 Jul 2018 17:17:21 +0800
+Subject: [PATCH 4853/5725] drm/amd/display: Fix copy error when set memory
+ clocks
+
+Set memory clocks same as soc clocks
+
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+index 9c105c0..50e8630 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+@@ -503,9 +503,9 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
+ wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
+ ranges->writer_wm_sets[i].min_fill_clk_khz;
+ wm_soc_clocks[i].wm_max_mem_clk_in_khz =
+- ranges->writer_wm_sets[i].max_fill_clk_khz;
++ ranges->writer_wm_sets[i].max_drain_clk_khz;
+ wm_soc_clocks[i].wm_min_mem_clk_in_khz =
+- ranges->writer_wm_sets[i].min_fill_clk_khz;
++ ranges->writer_wm_sets[i].min_drain_clk_khz;
+ }
+
+ pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4854-drm-amd-pp-Remove-the-same-struct-define-in-powerpla.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4854-drm-amd-pp-Remove-the-same-struct-define-in-powerpla.patch
new file mode 100644
index 00000000..e774d12d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4854-drm-amd-pp-Remove-the-same-struct-define-in-powerpla.patch
@@ -0,0 +1,212 @@
+From a469669039b295cd69144db8a224b7ca2e5a536c Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Wed, 20 Jun 2018 15:05:04 +0800
+Subject: [PATCH 4854/5725] drm/amd/pp: Remove the same struct define in
+ powerplay
+
+delete the same struct define in powerplay, share the struct
+with display.
+
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/include/dm_pp_interface.h | 37 ++-----------------
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 2 +-
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c | 42 +++++++++++-----------
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h | 2 +-
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 2 +-
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 2 +-
+ 6 files changed, 27 insertions(+), 60 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/include/dm_pp_interface.h b/drivers/gpu/drm/amd/include/dm_pp_interface.h
+index 7852952..1d93a0c 100644
+--- a/drivers/gpu/drm/amd/include/dm_pp_interface.h
++++ b/drivers/gpu/drm/amd/include/dm_pp_interface.h
+@@ -23,6 +23,8 @@
+ #ifndef _DM_PP_INTERFACE_
+ #define _DM_PP_INTERFACE_
+
++#include "dm_services_types.h"
++
+ #define PP_MAX_CLOCK_LEVELS 16
+
+ enum amd_pp_display_config_type{
+@@ -189,39 +191,4 @@ struct pp_display_clock_request {
+ uint32_t clock_freq_in_khz;
+ };
+
+-#define PP_MAX_WM_SETS 4
+-
+-enum pp_wm_set_id {
+- DC_WM_SET_A = 0,
+- DC_WM_SET_B,
+- DC_WM_SET_C,
+- DC_WM_SET_D,
+- DC_WM_SET_INVALID = 0xffff,
+-};
+-
+-struct pp_wm_set_with_dmif_clock_range_soc15 {
+- enum pp_wm_set_id wm_set_id;
+- uint32_t wm_min_dcefclk_in_khz;
+- uint32_t wm_max_dcefclk_in_khz;
+- uint32_t wm_min_memclk_in_khz;
+- uint32_t wm_max_memclk_in_khz;
+-};
+-
+-struct pp_wm_set_with_mcif_clock_range_soc15 {
+- enum pp_wm_set_id wm_set_id;
+- uint32_t wm_min_socclk_in_khz;
+- uint32_t wm_max_socclk_in_khz;
+- uint32_t wm_min_memclk_in_khz;
+- uint32_t wm_max_memclk_in_khz;
+-};
+-
+-struct pp_wm_sets_with_clock_ranges_soc15 {
+- uint32_t num_wm_sets_dmif;
+- uint32_t num_wm_sets_mcif;
+- struct pp_wm_set_with_dmif_clock_range_soc15
+- wm_sets_dmif[PP_MAX_WM_SETS];
+- struct pp_wm_set_with_mcif_clock_range_soc15
+- wm_sets_mcif[PP_MAX_WM_SETS];
+-};
+-
+ #endif /* _DM_PP_INTERFACE_ */
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+index 02fc0bc..a63e006 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+@@ -1111,7 +1111,7 @@ static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
+ void *clock_ranges)
+ {
+ struct smu10_hwmgr *data = hwmgr->backend;
+- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
++ struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
+ Watermarks_t *table = &(data->water_marks_table);
+ int result = 0;
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+index 93a3d02..3effb55 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+@@ -652,7 +652,7 @@ int smu_get_voltage_dependency_table_ppt_v1(
+ }
+
+ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
+- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
++ struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+ {
+ uint32_t i;
+ struct watermarks *table = wt_table;
+@@ -660,49 +660,49 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
+ if (!table || !wm_with_clock_ranges)
+ return -EINVAL;
+
+- if (wm_with_clock_ranges->num_wm_sets_dmif > 4 || wm_with_clock_ranges->num_wm_sets_mcif > 4)
++ if (wm_with_clock_ranges->num_wm_dmif_sets > 4 || wm_with_clock_ranges->num_wm_mcif_sets > 4)
+ return -EINVAL;
+
+- for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
++ for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
+ table->WatermarkRow[1][i].MinClock =
+ cpu_to_le16((uint16_t)
+- (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
+- 100);
++ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz) /
++ 1000);
+ table->WatermarkRow[1][i].MaxClock =
+ cpu_to_le16((uint16_t)
+- (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
++ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) /
+ 100);
+ table->WatermarkRow[1][i].MinUclk =
+ cpu_to_le16((uint16_t)
+- (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
+- 100);
++ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
++ 1000);
+ table->WatermarkRow[1][i].MaxUclk =
+ cpu_to_le16((uint16_t)
+- (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
+- 100);
++ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
++ 1000);
+ table->WatermarkRow[1][i].WmSetting = (uint8_t)
+- wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
++ wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
+ }
+
+- for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
++ for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
+ table->WatermarkRow[0][i].MinClock =
+ cpu_to_le16((uint16_t)
+- (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
+- 100);
++ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz) /
++ 1000);
+ table->WatermarkRow[0][i].MaxClock =
+ cpu_to_le16((uint16_t)
+- (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
+- 100);
++ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz) /
++ 1000);
+ table->WatermarkRow[0][i].MinUclk =
+ cpu_to_le16((uint16_t)
+- (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
+- 100);
++ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
++ 1000);
+ table->WatermarkRow[0][i].MaxUclk =
+ cpu_to_le16((uint16_t)
+- (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
+- 100);
++ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
++ 1000);
+ table->WatermarkRow[0][i].WmSetting = (uint8_t)
+- wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
++ wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+ }
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
+index 916cc01..5454289 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
+@@ -107,7 +107,7 @@ int smu_get_voltage_dependency_table_ppt_v1(
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table);
+
+ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
+- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
++ struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
+
+ #define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
+ #define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+index 064b4467..2f628c3 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+@@ -4214,7 +4214,7 @@ static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
+ void *clock_range)
+ {
+ struct vega10_hwmgr *data = hwmgr->backend;
+- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_range;
++ struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_range;
+ Watermarks_t *table = &(data->smc_state_table.water_marks_table);
+ int result = 0;
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index 0a09075..5749287 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -1785,7 +1785,7 @@ static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
+ {
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ Watermarks_t *table = &(data->smc_state_table.water_marks_table);
+- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
++ struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
+
+ if (!data->registry_data.disable_water_mark &&
+ data->smu_features[GNLD_DPM_DCEFCLK].supported &&
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4855-drm-amd-display-off-by-one-in-find_irq_source_info.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4855-drm-amd-display-off-by-one-in-find_irq_source_info.patch
new file mode 100644
index 00000000..6ccdfe45
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4855-drm-amd-display-off-by-one-in-find_irq_source_info.patch
@@ -0,0 +1,34 @@
+From a8d9fbbd656f7100ab49705302b7798b61558ecd Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Wed, 4 Jul 2018 12:46:15 +0300
+Subject: [PATCH 4855/5725] drm/amd/display: off by one in
+ find_irq_source_info()
+
+The ->info[] array has DAL_IRQ_SOURCES_NUMBER elements so this condition
+should be >= instead of > or we could read one element beyond the end of
+the array.
+
+Fixes: 4562236b3bc0 ("drm/amd/dc: Add dc display driver (v2)")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/irq/irq_service.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+index dcdfa0f..604bea0 100644
+--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
++++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+@@ -78,7 +78,7 @@ const struct irq_source_info *find_irq_source_info(
+ struct irq_service *irq_service,
+ enum dc_irq_source source)
+ {
+- if (source > DAL_IRQ_SOURCES_NUMBER || source < DC_IRQ_SOURCE_INVALID)
++ if (source >= DAL_IRQ_SOURCES_NUMBER || source < DC_IRQ_SOURCE_INVALID)
+ return NULL;
+
+ return &irq_service->info[source];
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4856-Revert-drm-amd-display-Fix-indentation-in-dcn10-reso.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4856-Revert-drm-amd-display-Fix-indentation-in-dcn10-reso.patch
new file mode 100644
index 00000000..38c706fa
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4856-Revert-drm-amd-display-Fix-indentation-in-dcn10-reso.patch
@@ -0,0 +1,33 @@
+From 2f357ab39fe1c1fd0e059538f84e37dbf09527d7 Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Wed, 4 Jul 2018 13:47:07 -0400
+Subject: [PATCH 4856/5725] Revert "drm/amd/display: Fix indentation in dcn10
+ resource constructor"
+
+That change was a merge gone bad.
+
+This reverts commit cb1d7eacb58f7d1b7d0e57b26dc02d45eada4a3c.
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index 0a313dc..c92a156 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -1068,8 +1068,7 @@ static bool construct(
+
+ ctx->dc_bios->regs = &bios_regs;
+
+- pool->base.res_cap = &res_cap;
+-
++ pool->base.res_cap = &res_cap;
+ pool->base.funcs = &dcn10_res_pool_funcs;
+
+ /*
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4857-drm-amd-display-dc-dce-Fix-multiple-potential-intege.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4857-drm-amd-display-dc-dce-Fix-multiple-potential-intege.patch
new file mode 100644
index 00000000..a4216c60
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4857-drm-amd-display-dc-dce-Fix-multiple-potential-intege.patch
@@ -0,0 +1,70 @@
+From 1f5b35820dc07720fdd328e6d434ff1b5c243514 Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Wed, 4 Jul 2018 08:22:11 -0500
+Subject: [PATCH 4857/5725] drm/amd/display/dc/dce: Fix multiple potential
+ integer overflows
+
+Add suffix ULL to constant 5 and cast variables target_pix_clk_khz and
+feedback_divider to uint64_t in order to avoid multiple potential integer
+overflows and give the compiler complete information about the proper
+arithmetic to use.
+
+Notice that such constant and variables are used in contexts that
+expect expressions of type uint64_t (64 bits, unsigned). The current
+casts to uint64_t effectively apply to each expression as a whole,
+but they do not prevent them from being evaluated using 32-bit
+arithmetic instead of 64-bit arithmetic.
+
+Also, once the expressions are properly evaluated using 64-bit
+arithmentic, there is no need for the parentheses that enclose
+them.
+
+Addresses-Coverity-ID: 1460245 ("Unintentional integer overflow")
+Addresses-Coverity-ID: 1460286 ("Unintentional integer overflow")
+Addresses-Coverity-ID: 1460401 ("Unintentional integer overflow")
+Fixes: 4562236b3bc0 ("drm/amd/dc: Add dc display driver (v2)")
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+index 599c7ab..f72f331 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+@@ -133,7 +133,7 @@ static bool calculate_fb_and_fractional_fb_divider(
+ uint64_t feedback_divider;
+
+ feedback_divider =
+- (uint64_t)(target_pix_clk_khz * ref_divider * post_divider);
++ (uint64_t)target_pix_clk_khz * ref_divider * post_divider;
+ feedback_divider *= 10;
+ /* additional factor, since we divide by 10 afterwards */
+ feedback_divider *= (uint64_t)(calc_pll_cs->fract_fb_divider_factor);
+@@ -145,8 +145,8 @@ static bool calculate_fb_and_fractional_fb_divider(
+ * of fractional feedback decimal point and the fractional FB Divider precision
+ * is 2 then the equation becomes (ullfeedbackDivider + 5*100) / (10*100))*/
+
+- feedback_divider += (uint64_t)
+- (5 * calc_pll_cs->fract_fb_divider_precision_factor);
++ feedback_divider += 5ULL *
++ calc_pll_cs->fract_fb_divider_precision_factor;
+ feedback_divider =
+ div_u64(feedback_divider,
+ calc_pll_cs->fract_fb_divider_precision_factor * 10);
+@@ -203,8 +203,8 @@ static bool calc_fb_divider_checking_tolerance(
+ &fract_feedback_divider);
+
+ /*Actual calculated value*/
+- actual_calc_clk_khz = (uint64_t)(feedback_divider *
+- calc_pll_cs->fract_fb_divider_factor) +
++ actual_calc_clk_khz = (uint64_t)feedback_divider *
++ calc_pll_cs->fract_fb_divider_factor +
+ fract_feedback_divider;
+ actual_calc_clk_khz *= calc_pll_cs->ref_freq_khz;
+ actual_calc_clk_khz =
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4858-drm-amd-Remove-errors-from-sphinx-documentation.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4858-drm-amd-Remove-errors-from-sphinx-documentation.patch
new file mode 100644
index 00000000..f58870cf
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4858-drm-amd-Remove-errors-from-sphinx-documentation.patch
@@ -0,0 +1,125 @@
+From 80b6289c7db5aa198e09c0aedaf4cb0b205181d1 Mon Sep 17 00:00:00 2001
+From: Darren Powell <darren.powell@amd.com>
+Date: Mon, 25 Jun 2018 19:04:03 -0400
+Subject: [PATCH 4858/5725] drm/amd: Remove errors from sphinx documentation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Eliminating the warnings produced by sphinx when processing the sphinx comments in
+ amdgpu_device.c & amdgpu_mn.c
+
+Signed-off-by: Darren Powell <darren.powell@amd.com>
+Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 20 ++++++++++++--------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 5 +++--
+ 2 files changed, 15 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 88c3879..2ffb27c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1126,7 +1126,7 @@ static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
+ /**
+ * amdgpu_device_ip_set_clockgating_state - set the CG state
+ *
+- * @adev: amdgpu_device pointer
++ * @dev: amdgpu_device pointer
+ * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
+ * @state: clockgating state (gate or ungate)
+ *
+@@ -1160,7 +1160,7 @@ int amdgpu_device_ip_set_clockgating_state(void *dev,
+ /**
+ * amdgpu_device_ip_set_powergating_state - set the PG state
+ *
+- * @adev: amdgpu_device pointer
++ * @dev: amdgpu_device pointer
+ * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
+ * @state: powergating state (gate or ungate)
+ *
+@@ -1271,7 +1271,7 @@ bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
+ * amdgpu_device_ip_get_ip_block - get a hw IP pointer
+ *
+ * @adev: amdgpu_device pointer
+- * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
++ * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
+ *
+ * Returns a pointer to the hardware IP block structure
+ * if it exists for the asic, otherwise NULL.
+@@ -2278,7 +2278,7 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
+ * amdgpu_device_init - initialize the driver
+ *
+ * @adev: amdgpu_device pointer
+- * @pdev: drm dev pointer
++ * @ddev: drm dev pointer
+ * @pdev: pci dev pointer
+ * @flags: driver flags
+ *
+@@ -2654,8 +2654,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
+ /**
+ * amdgpu_device_suspend - initiate device suspend
+ *
+- * @pdev: drm dev pointer
+- * @state: suspend state
++ * @dev: drm dev pointer
++ * @suspend: suspend state
++ * @fbcon : notify the fbdev of suspend
+ *
+ * Puts the hw in the suspend state (all asics).
+ * Returns 0 for success or an error on failure.
+@@ -2753,7 +2754,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
+ /**
+ * amdgpu_device_resume - initiate device resume
+ *
+- * @pdev: drm dev pointer
++ * @dev: drm dev pointer
++ * @resume: resume state
++ * @fbcon : notify the fbdev of resume
+ *
+ * Bring the hw back to operating state (all asics).
+ * Returns 0 for success or an error on failure.
+@@ -3224,6 +3227,7 @@ static int amdgpu_device_reset(struct amdgpu_device *adev)
+ * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
+ *
+ * @adev: amdgpu device pointer
++ * @from_hypervisor: request from hypervisor
+ *
+ * do VF FLR and reinitialize Asic
+ * return 0 means successed otherwise failed
+@@ -3274,7 +3278,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
+ *
+ * @adev: amdgpu device pointer
+ * @job: which job trigger hang
+- * @force forces reset regardless of amdgpu_gpu_recovery
++ * @force: forces reset regardless of amdgpu_gpu_recovery
+ *
+ * Attempt to reset the GPU if it has hung (all asics).
+ * Returns 0 for success or an error on failure.
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+index 2f3fb4f..6b53a88 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+@@ -58,7 +58,8 @@
+ *
+ * @adev: amdgpu device pointer
+ * @mm: process address space
+- * @mn: MMU notifier structur
++ * @mn: MMU notifier structure
++ * @type: type of MMU notifier
+ * @work: destruction work item
+ * @node: hash table node to find structure by adev and mn
+ * @lock: rw semaphore protecting the notifier nodes
+@@ -294,7 +295,7 @@ static void amdgpu_mn_invalidate_range_end_gfx(struct mmu_notifier *mn,
+ * amdgpu_mn_invalidate_range_start_hsa - callback to notify about mm change
+ *
+ * @mn: our notifier
+- * @mn: the mm this callback is about
++ * @mm: the mm this callback is about
+ * @start: start of updated range
+ * @end: end of updated range
+ *
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4859-drm-amdgpu-update-documentation-for-amdgpu_drv.c.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4859-drm-amdgpu-update-documentation-for-amdgpu_drv.c.patch
new file mode 100644
index 00000000..aa8e91a6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4859-drm-amdgpu-update-documentation-for-amdgpu_drv.c.patch
@@ -0,0 +1,437 @@
+From ec791cfe3c78671a01b5e70fd847a96804f8970c Mon Sep 17 00:00:00 2001
+From: Sonny Jiang <sonny.jiang@amd.com>
+Date: Tue, 26 Jun 2018 15:48:34 -0400
+Subject: [PATCH 4859/5725] drm/amdgpu: update documentation for amdgpu_drv.c
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Signed-off-by: Sonny Jiang <sonny.jiang@amd.com>
+Acked-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 232 +++++++++++++++++++++++++++++++-
+ 1 file changed, 225 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index d01d4f0..966e337 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -1,10 +1,3 @@
+-/**
+- * \file amdgpu_drv.c
+- * AMD Amdgpu driver
+- *
+- * \author Gareth Hughes <gareth@valinux.com>
+- */
+-
+ /*
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+@@ -139,102 +132,239 @@ int amdgpu_gpu_recovery = -1; /* auto */
+ int amdgpu_emu_mode = 0;
+ uint amdgpu_smu_memory_pool_size = 0;
+
++/**
++ * DOC: vramlimit (int)
++ * Restrict the total amount of VRAM in MiB for testing. The default is 0 (Use full VRAM).
++ */
+ MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
+ module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
+
++/**
++ * DOC: vis_vramlimit (int)
++ * Restrict the amount of CPU visible VRAM in MiB for testing. The default is 0 (Use full CPU visible VRAM).
++ */
+ MODULE_PARM_DESC(vis_vramlimit, "Restrict visible VRAM for testing, in megabytes");
+ module_param_named(vis_vramlimit, amdgpu_vis_vram_limit, int, 0444);
+
++/**
++ * DOC: gartsize (uint)
++ * Restrict the size of GART in Mib (32, 64, etc.) for testing. The default is -1 (The size depends on asic).
++ */
+ MODULE_PARM_DESC(gartsize, "Size of GART to setup in megabytes (32, 64, etc., -1=auto)");
+ module_param_named(gartsize, amdgpu_gart_size, uint, 0600);
+
++/**
++ * DOC: gttsize (int)
++ * Restrict the size of GTT domain in MiB for testing. The default is -1 (It's VRAM size if 3GB < VRAM < 3/4 RAM,
++ * otherwise 3/4 RAM size).
++ */
+ MODULE_PARM_DESC(gttsize, "Size of the GTT domain in megabytes (-1 = auto)");
+ module_param_named(gttsize, amdgpu_gtt_size, int, 0600);
+
++/**
++ * DOC: moverate (int)
++ * Set maximum buffer migration rate in MB/s. The default is -1 (8 MB/s).
++ */
+ MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)");
+ module_param_named(moverate, amdgpu_moverate, int, 0600);
+
++/**
++ * DOC: benchmark (int)
++ * Run benchmarks. The default is 0 (Skip benchmarks).
++ */
+ MODULE_PARM_DESC(benchmark, "Run benchmark");
+ module_param_named(benchmark, amdgpu_benchmarking, int, 0444);
+
++/**
++ * DOC: test (int)
++ * Test BO GTT->VRAM and VRAM->GTT GPU copies. The default is 0 (Skip test, only set 1 to run test).
++ */
+ MODULE_PARM_DESC(test, "Run tests");
+ module_param_named(test, amdgpu_testing, int, 0444);
+
++/**
++ * DOC: audio (int)
++ * Set HDMI/DPAudio. Only affects non-DC display handling. The default is -1 (Enabled), set 0 to disabled it.
++ */
+ MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)");
+ module_param_named(audio, amdgpu_audio, int, 0444);
+
++/**
++ * DOC: disp_priority (int)
++ * Set display Priority (1 = normal, 2 = high). Only affects non-DC display handling. The default is 0 (auto).
++ */
+ MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
+ module_param_named(disp_priority, amdgpu_disp_priority, int, 0444);
+
++/**
++ * DOC: hw_i2c (int)
++ * To enable hw i2c engine. Only affects non-DC display handling. The default is 0 (Disabled).
++ */
+ MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
+ module_param_named(hw_i2c, amdgpu_hw_i2c, int, 0444);
+
++/**
++ * DOC: pcie_gen2 (int)
++ * To disable PCIE Gen2/3 mode (0 = disable, 1 = enable). The default is -1 (auto, enabled).
++ */
+ MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (-1 = auto, 0 = disable, 1 = enable)");
+ module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444);
+
++/**
++ * DOC: msi (int)
++ * To disable Message Signaled Interrupts (MSI) functionality (1 = enable, 0 = disable). The default is -1 (auto, enabled).
++ */
+ MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
+ module_param_named(msi, amdgpu_msi, int, 0444);
+
++/**
++ * DOC: lockup_timeout (int)
++ * Set GPU scheduler timeout value in ms. Value 0 is invalidated, will be adjusted to 10000.
++ * Negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET). The default is 10000.
++ */
+ MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms > 0 (default 10000)");
+ module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
+
++/**
++ * DOC: dpm (int)
++ * Override for dynamic power management setting (1 = enable, 0 = disable). The default is -1 (auto).
++ */
+ MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
+ module_param_named(dpm, amdgpu_dpm, int, 0444);
+
++/**
++ * DOC: fw_load_type (int)
++ * Set different firmware loading type for debugging (0 = direct, 1 = SMU, 2 = PSP). The default is -1 (auto).
++ */
+ MODULE_PARM_DESC(fw_load_type, "firmware loading type (0 = direct, 1 = SMU, 2 = PSP, -1 = auto)");
+ module_param_named(fw_load_type, amdgpu_fw_load_type, int, 0444);
+
++/**
++ * DOC: aspm (int)
++ * To disable ASPM (1 = enable, 0 = disable). The default is -1 (auto, enabled).
++ */
+ MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)");
+ module_param_named(aspm, amdgpu_aspm, int, 0444);
+
++/**
++ * DOC: runpm (int)
++ * Override for runtime power management control for dGPUs in PX/HG laptops. The amdgpu driver can dynamically power down
++ * the dGPU on PX/HG laptops when it is idle. The default is -1 (auto enable). Setting the value to 0 disables this functionality.
++ */
+ MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)");
+ module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
+
++/**
++ * DOC: ip_block_mask (uint)
++ * Override what IP blocks are enabled on the GPU. Each GPU is a collection of IP blocks (gfx, display, video, etc.).
++ * Use this parameter to disable specific blocks. Note that the IP blocks do not have a fixed index. Some asics may not have
++ * some IPs or may include multiple instances of an IP so the ordering various from asic to asic. See the driver output in
++ * the kernel log for the list of IPs on the asic. The default is 0xffffffff (enable all blocks on a device).
++ */
+ MODULE_PARM_DESC(ip_block_mask, "IP Block Mask (all blocks enabled (default))");
+ module_param_named(ip_block_mask, amdgpu_ip_block_mask, uint, 0444);
+
++/**
++ * DOC: bapm (int)
++ * Bidirectional Application Power Management (BAPM) used to dynamically share TDP between CPU and GPU. Set value 0 to disable it.
++ * The default -1 (auto, enabled)
++ */
+ MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)");
+ module_param_named(bapm, amdgpu_bapm, int, 0444);
+
++/**
++ * DOC: deep_color (int)
++ * Set 1 to enable Deep Color support. Only affects non-DC display handling. The default is 0 (disabled).
++ */
+ MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
+ module_param_named(deep_color, amdgpu_deep_color, int, 0444);
+
++/**
++ * DOC: vm_size (int)
++ * Override the size of the GPU's per client virtual address space in GiB. The default is -1 (automatic for each asic).
++ */
+ MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 64GB)");
+ module_param_named(vm_size, amdgpu_vm_size, int, 0444);
+
++/**
++ * DOC: vm_fragment_size (int)
++ * Override VM fragment size in bits (4, 5, etc. 4 = 64K, 9 = 2M). The default is -1 (automatic for each asic).
++ */
+ MODULE_PARM_DESC(vm_fragment_size, "VM fragment size in bits (4, 5, etc. 4 = 64K (default), Max 9 = 2M)");
+ module_param_named(vm_fragment_size, amdgpu_vm_fragment_size, int, 0444);
+
++/**
++ * DOC: vm_block_size (int)
++ * Override VM page table size in bits (default depending on vm_size and hw setup). The default is -1 (automatic for each asic).
++ */
+ MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
+ module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444);
+
++/**
++ * DOC: vm_fault_stop (int)
++ * Stop on VM fault for debugging (0 = never, 1 = print first, 2 = always). The default is 0 (No stop).
++ */
+ MODULE_PARM_DESC(vm_fault_stop, "Stop on VM fault (0 = never (default), 1 = print first, 2 = always)");
+ module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444);
+
++/**
++ * DOC: vm_debug (int)
++ * Debug VM handling (0 = disabled, 1 = enabled). The default is 0 (Disabled).
++ */
+ MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)");
+ module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
+
++/**
++ * DOC: vm_update_mode (int)
++ * Override VM update mode. VM updated by using CPU (0 = never, 1 = Graphics only, 2 = Compute only, 3 = Both). The default
++ * is -1 (Only in large BAR(LB) systems Compute VM tables will be updated by CPU, otherwise 0, never).
++ */
+ MODULE_PARM_DESC(vm_update_mode, "VM update using CPU (0 = never (default except for large BAR(LB)), 1 = Graphics only, 2 = Compute only (default for LB), 3 = Both");
+ module_param_named(vm_update_mode, amdgpu_vm_update_mode, int, 0444);
+
++/**
++ * DOC: vram_page_split (int)
++ * Override the number of pages after we split VRAM allocations (default 512, -1 = disable). The default is 512.
++ */
+ MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 512, -1 = disable)");
+ module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
+
++/**
++ * DOC: exp_hw_support (int)
++ * Enable experimental hw support (1 = enable). The default is 0 (disabled).
++ */
+ MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
+ module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
+
++/**
++ * DOC: dc (int)
++ * Disable/Enable Display Core driver for debugging (1 = enable, 0 = disable). The default is -1 (automatic for each asic).
++ */
+ MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))");
+ module_param_named(dc, amdgpu_dc, int, 0444);
+
+ MODULE_PARM_DESC(dc_log, "Display Core Log Level (0 = minimal (default), 1 = chatty");
+ module_param_named(dc_log, amdgpu_dc_log, int, 0444);
+
++/**
++ * DOC: sched_jobs (int)
++ * Override the max number of jobs supported in the sw queue. The default is 32.
++ */
+ MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
+ module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
+
++/**
++ * DOC: sched_hw_submission (int)
++ * Override the max number of HW submissions. The default is 2.
++ */
+ MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
+ module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
+
++/**
++ * DOC: ppfeaturemask (uint)
++ * Override power features enabled. See enum PP_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
++ * The default is the current set of stable power features.
++ */
+ MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
+ module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444);
+
+@@ -247,58 +377,135 @@ module_param_named(direct_gma_size, amdgpu_direct_gma_size, int, 0444);
+ MODULE_PARM_DESC(ssg, "SSG support (1 = enable, 0 = disable (default))");
+ module_param_named(ssg, amdgpu_ssg_enabled, int, 0444);
+
++/**
++ * DOC: pcie_gen_cap (uint)
++ * Override PCIE gen speed capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
++ * The default is 0 (automatic for each asic).
++ */
+ MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
+ module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
+
++/**
++ * DOC: pcie_lane_cap (uint)
++ * Override PCIE lanes capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
++ * The default is 0 (automatic for each asic).
++ */
+ MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
+ module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
+
++/**
++ * DOC: cg_mask (uint)
++ * Override Clockgating features enabled on GPU (0 = disable clock gating). See the AMD_CG_SUPPORT flags in
++ * drivers/gpu/drm/amd/include/amd_shared.h. The default is 0xffffffff (all enabled).
++ */
+ MODULE_PARM_DESC(cg_mask, "Clockgating flags mask (0 = disable clock gating)");
+ module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444);
+
++/**
++ * DOC: pg_mask (uint)
++ * Override Powergating features enabled on GPU (0 = disable power gating). See the AMD_PG_SUPPORT flags in
++ * drivers/gpu/drm/amd/include/amd_shared.h. The default is 0xffffffff (all enabled).
++ */
+ MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)");
+ module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
+
++/**
++ * DOC: sdma_phase_quantum (uint)
++ * Override SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change). The default is 32.
++ */
+ MODULE_PARM_DESC(sdma_phase_quantum, "SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change (default 32))");
+ module_param_named(sdma_phase_quantum, amdgpu_sdma_phase_quantum, uint, 0444);
+
++/**
++ * DOC: disable_cu (charp)
++ * Set to disable CUs (It's set like se.sh.cu,...). The default is NULL.
++ */
+ MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
+ module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
+
++/**
++ * DOC: virtual_display (charp)
++ * Set to enable virtual display feature. This feature provides a virtual display hardware on headless boards
++ * or in virtualized environments. It will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x. It's the pci address of
++ * the device, plus the number of crtcs to expose. E.g., 0000:26:00.0,4 would enable 4 virtual crtcs on the pci
++ * device at 26:00.0. The default is NULL.
++ */
+ MODULE_PARM_DESC(virtual_display,
+ "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)");
+ module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
+
++/**
++ * DOC: ngg (int)
++ * Set to enable Next Generation Graphics (1 = enable). The default is 0 (disabled).
++ */
+ MODULE_PARM_DESC(ngg, "Next Generation Graphics (1 = enable, 0 = disable(default depending on gfx))");
+ module_param_named(ngg, amdgpu_ngg, int, 0444);
+
++/**
++ * DOC: prim_buf_per_se (int)
++ * Override the size of Primitive Buffer per Shader Engine in Byte. The default is 0 (depending on gfx).
++ */
+ MODULE_PARM_DESC(prim_buf_per_se, "the size of Primitive Buffer per Shader Engine (default depending on gfx)");
+ module_param_named(prim_buf_per_se, amdgpu_prim_buf_per_se, int, 0444);
+
++/**
++ * DOC: pos_buf_per_se (int)
++ * Override the size of Position Buffer per Shader Engine in Byte. The default is 0 (depending on gfx).
++ */
+ MODULE_PARM_DESC(pos_buf_per_se, "the size of Position Buffer per Shader Engine (default depending on gfx)");
+ module_param_named(pos_buf_per_se, amdgpu_pos_buf_per_se, int, 0444);
+
++/**
++ * DOC: cntl_sb_buf_per_se (int)
++ * Override the size of Control Sideband per Shader Engine in Byte. The default is 0 (depending on gfx).
++ */
+ MODULE_PARM_DESC(cntl_sb_buf_per_se, "the size of Control Sideband per Shader Engine (default depending on gfx)");
+ module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444);
+
++/**
++ * DOC: param_buf_per_se (int)
++ * Override the size of Off-Chip Pramater Cache per Shader Engine in Byte. The default is 0 (depending on gfx).
++ */
+ MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Pramater Cache per Shader Engine (default depending on gfx)");
+ module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444);
+
++/**
++ * DOC: job_hang_limit (int)
++ * Set how much time allow a job hang and not drop it. The default is 0.
++ */
+ MODULE_PARM_DESC(job_hang_limit, "how much time allow a job hang and not drop it (default 0)");
+ module_param_named(job_hang_limit, amdgpu_job_hang_limit, int ,0444);
+
++/**
++ * DOC: lbpw (int)
++ * Override Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable). The default is -1 (auto, enabled).
++ */
+ MODULE_PARM_DESC(lbpw, "Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable, -1 = auto)");
+ module_param_named(lbpw, amdgpu_lbpw, int, 0444);
+
+ MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)");
+ module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
+
++/**
++ * DOC: gpu_recovery (int)
++ * Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default is -1 (auto, disabled except SRIOV).
++ */
+ MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto");
+ module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
+
++/**
++ * DOC: emu_mode (int)
++ * Set value 1 to enable emulation mode. This is only needed when running on an emulator. The default is 0 (disabled).
++ */
+ MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable)");
+ module_param_named(emu_mode, amdgpu_emu_mode, int, 0444);
+
++/**
++ * DOC: si_support (int)
++ * Set SI support driver. This parameter works after set config CONFIG_DRM_AMDGPU_SI. For SI asic, when radeon driver is enabled,
++ * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
++ * otherwise using amdgpu driver.
++ */
+ #ifdef CONFIG_DRM_AMDGPU_SI
+
+ #if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
+@@ -311,6 +518,12 @@ MODULE_PARM_DESC(si_support, "SI support (1 = enabled (default), 0 = disabled)")
+ module_param_named(si_support, amdgpu_si_support, int, 0444);
+ #endif
+
++/**
++ * DOC: cik_support (int)
++ * Set CIK support driver. This parameter works after set config CONFIG_DRM_AMDGPU_CIK. For CIK asic, when radeon driver is enabled,
++ * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
++ * otherwise using amdgpu driver.
++ */
+ #ifdef CONFIG_DRM_AMDGPU_CIK
+
+ #if (0 && (defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)))
+@@ -323,6 +536,11 @@ MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)
+ module_param_named(cik_support, amdgpu_cik_support, int, 0444);
+ #endif
+
++/**
++ * DOC: smu_memory_pool_size (uint)
++ * It is used to reserve gtt for smu debug usage, setting value 0 to disable it. The actual size is value * 256MiB.
++ * E.g. 0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte. The default is 0 (disabled).
++ */
+ MODULE_PARM_DESC(smu_memory_pool_size,
+ "reserve gtt for smu debug usage, 0 = disable,"
+ "0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte");
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4860-drm-amd-Add-sphinx-documentation-for-amd_ip_funcs.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4860-drm-amd-Add-sphinx-documentation-for-amd_ip_funcs.patch
new file mode 100644
index 00000000..31f48579
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4860-drm-amd-Add-sphinx-documentation-for-amd_ip_funcs.patch
@@ -0,0 +1,95 @@
+From 27a023ae41e7df6c4bd994bce65396a4fd62ced6 Mon Sep 17 00:00:00 2001
+From: Darren Powell <darren.powell@amd.com>
+Date: Wed, 27 Jun 2018 17:05:20 -0400
+Subject: [PATCH 4860/5725] drm/amd: Add sphinx documentation for amd_ip_funcs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Signed-off-by: Darren Powell <darren.powell@amd.com>
+Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/include/amd_shared.h | 45 +++++++++++++++++++-------------
+ 1 file changed, 27 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
+index 91459a6..dc3d856 100644
+--- a/drivers/gpu/drm/amd/include/amd_shared.h
++++ b/drivers/gpu/drm/amd/include/amd_shared.h
+@@ -131,45 +131,54 @@ enum PP_FEATURE_MASK {
+ PP_STUTTER_MODE = 0x20000,
+ };
+
++/**
++ * struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks
++ */
+ struct amd_ip_funcs {
+- /* Name of IP block */
++ /** @name: Name of IP block */
+ char *name;
+- /* sets up early driver state (pre sw_init), does not configure hw - Optional */
++ /**
++ * @early_init:
++ *
++ * sets up early driver state (pre sw_init),
++ * does not configure hw - Optional
++ */
+ int (*early_init)(void *handle);
+- /* sets up late driver/hw state (post hw_init) - Optional */
++ /** @late_init: sets up late driver/hw state (post hw_init) - Optional */
+ int (*late_init)(void *handle);
+- /* sets up driver state, does not configure hw */
++ /** @sw_init: sets up driver state, does not configure hw */
+ int (*sw_init)(void *handle);
+- /* tears down driver state, does not configure hw */
++ /** @sw_fini: tears down driver state, does not configure hw */
+ int (*sw_fini)(void *handle);
+- /* sets up the hw state */
++ /** @hw_init: sets up the hw state */
+ int (*hw_init)(void *handle);
+- /* tears down the hw state */
++ /** @hw_fini: tears down the hw state */
+ int (*hw_fini)(void *handle);
++ /** @late_fini: final cleanup */
+ void (*late_fini)(void *handle);
+- /* handles IP specific hw/sw changes for suspend */
++ /** @suspend: handles IP specific hw/sw changes for suspend */
+ int (*suspend)(void *handle);
+- /* handles IP specific hw/sw changes for resume */
++ /** @resume: handles IP specific hw/sw changes for resume */
+ int (*resume)(void *handle);
+- /* returns current IP block idle status */
++ /** @is_idle: returns current IP block idle status */
+ bool (*is_idle)(void *handle);
+- /* poll for idle */
++ /** @wait_for_idle: poll for idle */
+ int (*wait_for_idle)(void *handle);
+- /* check soft reset the IP block */
++ /** @check_soft_reset: check soft reset the IP block */
+ bool (*check_soft_reset)(void *handle);
+- /* pre soft reset the IP block */
++ /** @pre_soft_reset: pre soft reset the IP block */
+ int (*pre_soft_reset)(void *handle);
+- /* soft reset the IP block */
++ /** @soft_reset: soft reset the IP block */
+ int (*soft_reset)(void *handle);
+- /* post soft reset the IP block */
++ /** @post_soft_reset: post soft reset the IP block */
+ int (*post_soft_reset)(void *handle);
+- /* enable/disable cg for the IP block */
++ /** @set_clockgating_state: enable/disable cg for the IP block */
+ int (*set_clockgating_state)(void *handle,
+ enum amd_clockgating_state state);
+- /* enable/disable pg for the IP block */
++ /** @set_powergating_state: enable/disable pg for the IP block */
+ int (*set_powergating_state)(void *handle,
+ enum amd_powergating_state state);
+- /* get current clockgating status */
++ /** @get_clockgating_state: get current clockgating status */
+ void (*get_clockgating_state)(void *handle, u32 *flags);
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4861-drm-amdgpu-separate-gpu-address-from-bo-pin.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4861-drm-amdgpu-separate-gpu-address-from-bo-pin.patch
new file mode 100644
index 00000000..a16f2cca
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4861-drm-amdgpu-separate-gpu-address-from-bo-pin.patch
@@ -0,0 +1,550 @@
+From 9fafbdd9dd4cab370a1a399663f64d47cf8b1e5c Mon Sep 17 00:00:00 2001
+From: Junwei Zhang <Jerry.Zhang@amd.com>
+Date: Mon, 25 Jun 2018 12:51:14 +0800
+Subject: [PATCH 4861/5725] drm/amdgpu: separate gpu address from bo pin
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+It could be got by amdgpu_bo_gpu_offset() if need
+
+Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 6 ++----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c | 6 ++++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 5 ++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 5 ++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 6 ++----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 19 +++++++------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 5 ++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_test.c | 6 ++++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 10 +++++-----
+ drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 10 +++++-----
+ drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | 10 +++++-----
+ drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 10 +++++-----
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 ++-
+ 17 files changed, 53 insertions(+), 60 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index af2481a..3593c35 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -299,7 +299,6 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+ struct amdgpu_bo *bo = NULL;
+ struct amdgpu_bo_param bp;
+ int r;
+- uint64_t gpu_addr_tmp = 0;
+ void *cpu_ptr_tmp = NULL;
+
+ memset(&bp, 0, sizeof(bp));
+@@ -328,8 +327,7 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+ goto allocate_mem_reserve_bo_failed;
+ }
+
+- r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT,
+- &gpu_addr_tmp);
++ r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
+ goto allocate_mem_pin_bo_failed;
+@@ -343,7 +341,7 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+ }
+
+ *mem_obj = bo;
+- *gpu_addr = gpu_addr_tmp;
++ *gpu_addr = amdgpu_bo_gpu_offset(bo);
+ *cpu_ptr = cpu_ptr_tmp;
+
+ amdgpu_bo_unreserve(bo);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 0572e2d..1c616bd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -1663,7 +1663,7 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
+ goto bo_reserve_failed;
+ }
+
+- ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
++ ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+ if (ret) {
+ pr_err("Failed to pin bo. ret %d\n", ret);
+ goto pin_failed;
+@@ -1720,7 +1720,7 @@ static int pin_bo_wo_map(struct kgd_mem *mem)
+ if (unlikely(ret))
+ return ret;
+
+- ret = amdgpu_bo_pin(bo, mem->domain, NULL);
++ ret = amdgpu_bo_pin(bo, mem->domain);
+ amdgpu_bo_unreserve(bo);
+
+ return ret;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+index 19cfff3..cb88d7e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+@@ -95,7 +95,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
+ r = amdgpu_bo_reserve(sobj, false);
+ if (unlikely(r != 0))
+ goto out_cleanup;
+- r = amdgpu_bo_pin(sobj, sdomain, &saddr);
++ r = amdgpu_bo_pin(sobj, sdomain);
++ saddr = amdgpu_bo_gpu_offset(sobj);
+ amdgpu_bo_unreserve(sobj);
+ if (r) {
+ goto out_cleanup;
+@@ -108,7 +109,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
+ r = amdgpu_bo_reserve(dobj, false);
+ if (unlikely(r != 0))
+ goto out_cleanup;
+- r = amdgpu_bo_pin(dobj, ddomain, &daddr);
++ r = amdgpu_bo_pin(dobj, ddomain);
++ daddr = amdgpu_bo_gpu_offset(dobj);
+ amdgpu_bo_unreserve(dobj);
+ if (r) {
+ goto out_cleanup;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 2ffb27c..ebf370c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2811,11 +2811,10 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
+ struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+ r = amdgpu_bo_reserve(aobj, true);
+ if (r == 0) {
+- r = amdgpu_bo_pin(aobj,
+- AMDGPU_GEM_DOMAIN_VRAM,
+- &amdgpu_crtc->cursor_addr);
++ r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
+ if (r != 0)
+ DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
++ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
+ amdgpu_bo_unreserve(aobj);
+ }
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+index 26a5a9c..9dd5daf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+@@ -157,7 +157,6 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
+ struct amdgpu_bo *new_abo;
+ unsigned long flags;
+ u64 tiling_flags;
+- u64 base;
+ int i, r;
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+@@ -191,7 +190,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
+ goto cleanup;
+ }
+
+- r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev), &base);
++ r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev));
+ if (unlikely(r != 0)) {
+ DRM_ERROR("failed to pin new abo buffer before flip\n");
+ goto unreserve;
+@@ -208,7 +207,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
+ amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
+ amdgpu_bo_unreserve(new_abo);
+
+- work->base = base;
++ work->base = amdgpu_bo_gpu_offset(new_abo);
+ work->target_vblank = target - drm_crtc_vblank_count(crtc) +
+ amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+index 6b65342..92ef673 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+@@ -168,7 +168,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
+ }
+
+
+- ret = amdgpu_bo_pin(abo, domain, NULL);
++ ret = amdgpu_bo_pin(abo, domain);
+ if (ret) {
+ amdgpu_bo_unreserve(abo);
+ goto out_unref;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+index 17d6b9f..a14379c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+@@ -143,14 +143,12 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
+ */
+ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
+ {
+- uint64_t gpu_addr;
+ int r;
+
+ r = amdgpu_bo_reserve(adev->gart.robj, false);
+ if (unlikely(r != 0))
+ return r;
+- r = amdgpu_bo_pin(adev->gart.robj,
+- AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
++ r = amdgpu_bo_pin(adev->gart.robj, AMDGPU_GEM_DOMAIN_VRAM);
+ if (r) {
+ amdgpu_bo_unreserve(adev->gart.robj);
+ return r;
+@@ -159,7 +157,7 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
+ if (r)
+ amdgpu_bo_unpin(adev->gart.robj);
+ amdgpu_bo_unreserve(adev->gart.robj);
+- adev->gart.table_addr = gpu_addr;
++ adev->gart.table_addr = amdgpu_bo_gpu_offset(adev->gart.robj);
+ return r;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 31f8de1..dab26e10 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -275,11 +275,13 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
+ goto error_free;
+ }
+
+- r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
++ r = amdgpu_bo_pin(*bo_ptr, domain);
+ if (r) {
+ dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
+ goto error_unreserve;
+ }
++ if (gpu_addr)
++ *gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
+
+ if (cpu_addr) {
+ r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
+@@ -529,7 +531,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
+ r = amdgpu_bo_reserve(bo, false);
+ if (unlikely(r != 0))
+ return r;
+- r = amdgpu_bo_pin(bo, bp->domain, NULL);
++ r = amdgpu_bo_pin(bo, bp->domain);
+ amdgpu_bo_unreserve(bo);
+ }
+
+@@ -857,7 +859,6 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo)
+ * @domain: domain to be pinned to
+ * @min_offset: the start of requested address range
+ * @max_offset: the end of requested address range
+- * @gpu_addr: GPU offset of the &amdgpu_bo buffer object
+ *
+ * Pins the buffer object according to requested domain and address range. If
+ * the memory is unbound gart memory, binds the pages into gart table. Adjusts
+@@ -875,8 +876,7 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo)
+ * 0 for success or a negative error code on failure.
+ */
+ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
+- u64 min_offset, u64 max_offset,
+- u64 *gpu_addr)
++ u64 min_offset, u64 max_offset)
+ {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_operation_ctx ctx = { false, false };
+@@ -908,8 +908,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
+ return -EINVAL;
+
+ bo->pin_count++;
+- if (gpu_addr)
+- *gpu_addr = amdgpu_bo_gpu_offset(bo);
+
+ if (max_offset != 0) {
+ u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
+@@ -952,8 +950,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
+ }
+
+ bo->pin_count = 1;
+- if (gpu_addr != NULL)
+- *gpu_addr = amdgpu_bo_gpu_offset(bo);
+
+ domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+@@ -971,7 +967,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
+ * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be pinned
+ * @domain: domain to be pinned to
+- * @gpu_addr: GPU offset of the &amdgpu_bo buffer object
+ *
+ * A simple wrapper to amdgpu_bo_pin_restricted().
+ * Provides a simpler API for buffers that do not have any strict restrictions
+@@ -980,9 +975,9 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
+-int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
++int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
+ {
+- return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
++ return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+index 8505ad3..2c4fca6 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+@@ -260,10 +260,9 @@ void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
+ void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
+ struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
+ void amdgpu_bo_unref(struct amdgpu_bo **bo);
+-int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr);
++int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
+ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
+- u64 min_offset, u64 max_offset,
+- u64 *gpu_addr);
++ u64 min_offset, u64 max_offset);
+ int amdgpu_bo_unpin(struct amdgpu_bo *bo);
+ int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
+ int amdgpu_bo_init(struct amdgpu_device *adev);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+index 8a7a56f..81d3788 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+@@ -236,7 +236,7 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
+ }
+
+ /* pin buffer into GTT */
+- r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
++ r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+ if (r)
+ goto error_unreserve;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+index 4c600ba..a66f81aa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+@@ -76,11 +76,12 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
+ r = amdgpu_bo_reserve(vram_obj, false);
+ if (unlikely(r != 0))
+ goto out_unref;
+- r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM, &vram_addr);
++ r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM);
+ if (r) {
+ DRM_ERROR("Failed to pin VRAM object\n");
+ goto out_unres;
+ }
++ vram_addr = amdgpu_bo_gpu_offset(vram_obj);
+ for (i = 0; i < n; i++) {
+ void *gtt_map, *vram_map;
+ void **gart_start, **gart_end;
+@@ -97,11 +98,12 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
+ r = amdgpu_bo_reserve(gtt_obj[i], false);
+ if (unlikely(r != 0))
+ goto out_lclean_unref;
+- r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gart_addr);
++ r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT);
+ if (r) {
+ DRM_ERROR("Failed to pin GTT object %d\n", i);
+ goto out_lclean_unres;
+ }
++ gart_addr = amdgpu_bo_gpu_offset(gtt_obj[i]);
+
+ r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
+ if (r) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index e573325..d0c95f2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -1673,7 +1673,7 @@ static int amdgpu_direct_gma_init(struct amdgpu_device *adev)
+ if (unlikely(r))
+ goto error_free;
+
+- r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, NULL);
++ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
+ amdgpu_bo_unreserve(abo);
+ if (unlikely(r))
+ goto error_free;
+@@ -1882,7 +1882,7 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
+ AMDGPU_GEM_DOMAIN_VRAM,
+ adev->fw_vram_usage.start_offset,
+ (adev->fw_vram_usage.start_offset +
+- adev->fw_vram_usage.size), NULL);
++ adev->fw_vram_usage.size));
+ if (r)
+ goto error_pin;
+ r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+index 71c3e13..f9f9165 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+@@ -1859,15 +1859,14 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
+ if (unlikely(r != 0))
+ return r;
+
+- if (atomic) {
+- fb_location = amdgpu_bo_gpu_offset(abo);
+- } else {
+- r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
++ if (!atomic) {
++ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
+ if (unlikely(r != 0)) {
+ amdgpu_bo_unreserve(abo);
+ return -EINVAL;
+ }
+ }
++ fb_location = amdgpu_bo_gpu_offset(abo);
+
+ amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
+ amdgpu_bo_unreserve(abo);
+@@ -2375,13 +2374,14 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
+ return ret;
+ }
+
+- ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
++ ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
+ amdgpu_bo_unreserve(aobj);
+ if (ret) {
+ DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
+ drm_gem_object_put_unlocked(obj);
+ return ret;
+ }
++ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
+
+ dce_v10_0_lock_cursor(crtc, true);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+index cfb2ddb..b123a99 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+@@ -1901,15 +1901,14 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
+ if (unlikely(r != 0))
+ return r;
+
+- if (atomic) {
+- fb_location = amdgpu_bo_gpu_offset(abo);
+- } else {
+- r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
++ if (!atomic) {
++ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
+ if (unlikely(r != 0)) {
+ amdgpu_bo_unreserve(abo);
+ return -EINVAL;
+ }
+ }
++ fb_location = amdgpu_bo_gpu_offset(abo);
+
+ amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
+ amdgpu_bo_unreserve(abo);
+@@ -2454,13 +2453,14 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
+ return ret;
+ }
+
+- ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
++ ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
+ amdgpu_bo_unreserve(aobj);
+ if (ret) {
+ DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
+ drm_gem_object_put_unlocked(obj);
+ return ret;
+ }
++ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
+
+ dce_v11_0_lock_cursor(crtc, true);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+index fc580ef..1cbc20c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+@@ -1815,15 +1815,14 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
+ if (unlikely(r != 0))
+ return r;
+
+- if (atomic) {
+- fb_location = amdgpu_bo_gpu_offset(abo);
+- } else {
+- r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
++ if (!atomic) {
++ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
+ if (unlikely(r != 0)) {
+ amdgpu_bo_unreserve(abo);
+ return -EINVAL;
+ }
+ }
++ fb_location = amdgpu_bo_gpu_offset(abo);
+
+ amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
+ amdgpu_bo_unreserve(abo);
+@@ -2268,13 +2267,14 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
+ return ret;
+ }
+
+- ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
++ ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
+ amdgpu_bo_unreserve(aobj);
+ if (ret) {
+ DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
+ drm_gem_object_put_unlocked(obj);
+ return ret;
+ }
++ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
+
+ dce_v6_0_lock_cursor(crtc, true);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+index 1df1c89..26fcb39 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+@@ -1790,15 +1790,14 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
+ if (unlikely(r != 0))
+ return r;
+
+- if (atomic) {
+- fb_location = amdgpu_bo_gpu_offset(abo);
+- } else {
+- r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
++ if (!atomic) {
++ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
+ if (unlikely(r != 0)) {
+ amdgpu_bo_unreserve(abo);
+ return -EINVAL;
+ }
+ }
++ fb_location = amdgpu_bo_gpu_offset(abo);
+
+ amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
+ amdgpu_bo_unreserve(abo);
+@@ -2279,13 +2278,14 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
+ return ret;
+ }
+
+- ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
++ ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
+ amdgpu_bo_unreserve(aobj);
+ if (ret) {
+ DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
+ drm_gem_object_put_unlocked(obj);
+ return ret;
+ }
++ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
+
+ dce_v8_0_lock_cursor(crtc, true);
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 7579008..1d7504a 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3419,7 +3419,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
+ else
+ domain = AMDGPU_GEM_DOMAIN_VRAM;
+
+- r = amdgpu_bo_pin(rbo, domain, &afb->address);
++ r = amdgpu_bo_pin(rbo, domain);
+ amdgpu_bo_unreserve(rbo);
+
+ if (unlikely(r != 0)) {
+@@ -3427,6 +3427,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
+ DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
+ return r;
+ }
++ afb->address = amdgpu_bo_gpu_offset(rbo);
+
+ amdgpu_bo_ref(rbo);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4862-drm-amdgpu-allocate-gart-memory-when-it-s-required-v.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4862-drm-amdgpu-allocate-gart-memory-when-it-s-required-v.patch
new file mode 100644
index 00000000..cfa3e473
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4862-drm-amdgpu-allocate-gart-memory-when-it-s-required-v.patch
@@ -0,0 +1,180 @@
+From 1cbe9fdbfb3204fe79d0e9f6496c0ac90d67bd5d Mon Sep 17 00:00:00 2001
+From: Junwei Zhang <Jerry.Zhang@amd.com>
+Date: Mon, 25 Jun 2018 13:32:24 +0800
+Subject: [PATCH 4862/5725] drm/amdgpu: allocate gart memory when it's required
+ (v3)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Instead of calling gart address space on every bo pin,
+allocates it on demand
+
+v2: fix error handling
+v3: drop the change on amdgpu_amdkfd_gpuvm.c, not needed.
+
+Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 6 ++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c | 14 ++++++++++++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 6 ++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | 8 ++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 15 +++++++++------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_test.c | 5 +++++
+ 6 files changed, 46 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index 3593c35..0adee23 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -333,6 +333,12 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+ goto allocate_mem_pin_bo_failed;
+ }
+
++ r = amdgpu_ttm_alloc_gart(&bo->tbo);
++ if (r) {
++ dev_err(adev->dev, "%p bind failed\n", bo);
++ goto allocate_mem_kmap_bo_failed;
++ }
++
+ r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
+ if (r) {
+ dev_err(adev->dev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+index cb88d7e..3079ea8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+@@ -96,11 +96,16 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
+ if (unlikely(r != 0))
+ goto out_cleanup;
+ r = amdgpu_bo_pin(sobj, sdomain);
+- saddr = amdgpu_bo_gpu_offset(sobj);
++ if (r) {
++ amdgpu_bo_unreserve(sobj);
++ goto out_cleanup;
++ }
++ r = amdgpu_ttm_alloc_gart(&sobj->tbo);
+ amdgpu_bo_unreserve(sobj);
+ if (r) {
+ goto out_cleanup;
+ }
++ saddr = amdgpu_bo_gpu_offset(sobj);
+ bp.domain = ddomain;
+ r = amdgpu_bo_create(adev, &bp, &dobj);
+ if (r) {
+@@ -110,11 +115,16 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
+ if (unlikely(r != 0))
+ goto out_cleanup;
+ r = amdgpu_bo_pin(dobj, ddomain);
+- daddr = amdgpu_bo_gpu_offset(dobj);
++ if (r) {
++ amdgpu_bo_unreserve(sobj);
++ goto out_cleanup;
++ }
++ r = amdgpu_ttm_alloc_gart(&dobj->tbo);
+ amdgpu_bo_unreserve(dobj);
+ if (r) {
+ goto out_cleanup;
+ }
++ daddr = amdgpu_bo_gpu_offset(dobj);
+
+ if (adev->mman.buffer_funcs) {
+ time = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+index 9dd5daf..36e3ddf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+@@ -196,6 +196,12 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
+ goto unreserve;
+ }
+
++ r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
++ if (unlikely(r != 0)) {
++ DRM_ERROR("%p bind failed\n", new_abo);
++ goto unpin;
++ }
++
+ r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl,
+ &work->shared_count,
+ &work->shared);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+index 92ef673..f2c7dbd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+@@ -173,6 +173,14 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
+ amdgpu_bo_unreserve(abo);
+ goto out_unref;
+ }
++
++ ret = amdgpu_ttm_alloc_gart(&abo->tbo);
++ if (ret) {
++ amdgpu_bo_unreserve(abo);
++ dev_err(adev->dev, "%p bind failed\n", abo);
++ goto out_unref;
++ }
++
+ ret = amdgpu_bo_kmap(abo, NULL);
+ amdgpu_bo_unreserve(abo);
+ if (ret) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index dab26e10..f6b58c5 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -280,6 +280,13 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
+ dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
+ goto error_unreserve;
+ }
++
++ r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
++ if (r) {
++ dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
++ goto error_unpin;
++ }
++
+ if (gpu_addr)
+ *gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
+
+@@ -293,6 +300,8 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
+
+ return 0;
+
++error_unpin:
++ amdgpu_bo_unpin(*bo_ptr);
+ error_unreserve:
+ amdgpu_bo_unreserve(*bo_ptr);
+
+@@ -943,12 +952,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
+ goto error;
+ }
+
+- r = amdgpu_ttm_alloc_gart(&bo->tbo);
+- if (unlikely(r)) {
+- dev_err(adev->dev, "%p bind failed\n", bo);
+- goto error;
+- }
+-
+ bo->pin_count = 1;
+
+ domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+index a66f81aa..af6c783 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+@@ -103,6 +103,11 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
+ DRM_ERROR("Failed to pin GTT object %d\n", i);
+ goto out_lclean_unres;
+ }
++ r = amdgpu_ttm_alloc_gart(&gtt_obj[i]->tbo);
++ if (r) {
++ DRM_ERROR("%p bind failed\n", gtt_obj[i]);
++ goto out_lclean_unpin;
++ }
+ gart_addr = amdgpu_bo_gpu_offset(gtt_obj[i]);
+
+ r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4863-drm-amdgpu-fix-kmap-error-handling-for-bo-creations.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4863-drm-amdgpu-fix-kmap-error-handling-for-bo-creations.patch
new file mode 100644
index 00000000..0592422b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4863-drm-amdgpu-fix-kmap-error-handling-for-bo-creations.patch
@@ -0,0 +1,34 @@
+From 9f555c718f60971dc2b50911afb6eac392131b03 Mon Sep 17 00:00:00 2001
+From: Junwei Zhang <Jerry.Zhang@amd.com>
+Date: Tue, 26 Jun 2018 16:23:48 +0800
+Subject: [PATCH 4863/5725] drm/amdgpu: fix kmap error handling for bo
+ creations
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+kmap happens after bo pin, so unpin is required on error
+
+Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index f6b58c5..7b9da44 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -294,7 +294,7 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
+ r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
+ if (r) {
+ dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
+- goto error_unreserve;
++ goto error_unpin;
+ }
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4864-drm-amdgpu-Add-CLK-IP-base-offset.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4864-drm-amdgpu-Add-CLK-IP-base-offset.patch
new file mode 100644
index 00000000..abc86b25
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4864-drm-amdgpu-Add-CLK-IP-base-offset.patch
@@ -0,0 +1,43 @@
+From 4e0dbe3aa690388e7e0d8821521eff46982a0ee4 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <rex.zhu@amd.com>
+Date: Thu, 5 Jul 2018 16:34:13 +0800
+Subject: [PATCH 4864/5725] drm/amdgpu: Add CLK IP base offset
+
+so we can read/write the registers in CLK domain
+through RREG32/WREG32_SOC15
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c | 1 +
+ 2 files changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index da75fb2e..80b88c3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1457,6 +1457,7 @@ enum amd_hw_ip_block_type {
+ PWR_HWIP,
+ NBIF_HWIP,
+ THM_HWIP,
++ CLK_HWIP,
+ MAX_HWIP
+ };
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
+index 45aafca..c5c9b2b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
++++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
+@@ -51,6 +51,7 @@ int vega10_reg_base_init(struct amdgpu_device *adev)
+ adev->reg_offset[PWR_HWIP][i] = (uint32_t *)(&(PWR_BASE.instance[i]));
+ adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIF_BASE.instance[i]));
+ adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
++ adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
+ }
+ return 0;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4865-drm-amd-pp-Convert-10KHz-to-KHz-as-variable-name.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4865-drm-amd-pp-Convert-10KHz-to-KHz-as-variable-name.patch
new file mode 100644
index 00000000..e1063910
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4865-drm-amd-pp-Convert-10KHz-to-KHz-as-variable-name.patch
@@ -0,0 +1,52 @@
+From b391f4c5e68604542ab9603027868de7b5648051 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <rex.zhu@amd.com>
+Date: Thu, 5 Jul 2018 16:45:21 +0800
+Subject: [PATCH 4865/5725] drm/amd/pp: Convert 10KHz to KHz as variable name
+
+The default clock unit in powerplay is 10KHz.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 2 +-
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 3 +--
+ 2 files changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+index 2f628c3..e94bffe 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+@@ -3820,7 +3820,7 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
+
+ if (i < dpm_table->count) {
+ clock_req.clock_type = amd_pp_dcef_clock;
+- clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value;
++ clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value * 10;
+ if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
+ smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index 5749287..ed17c56 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -1361,7 +1361,6 @@ int vega12_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
+ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+ switch (clk_type) {
+ case amd_pp_dcef_clock:
+- clk_freq = clock_req->clock_freq_in_khz / 100;
+ clk_select = PPCLK_DCEFCLK;
+ break;
+ case amd_pp_disp_clock:
+@@ -1410,7 +1409,7 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
+
+ if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
+ clock_req.clock_type = amd_pp_dcef_clock;
+- clock_req.clock_freq_in_khz = min_clocks.dcefClock;
++ clock_req.clock_freq_in_khz = min_clocks.dcefClock/10;
+ if (!vega12_display_clock_voltage_request(hwmgr, &clock_req)) {
+ if (data->smu_features[GNLD_DS_DCEFCLK].supported)
+ PP_ASSERT_WITH_CODE(
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4866-drm-amd-display-Make-function-pointer-structs-const.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4866-drm-amd-display-Make-function-pointer-structs-const.patch
new file mode 100644
index 00000000..e33557ca
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4866-drm-amd-display-Make-function-pointer-structs-const.patch
@@ -0,0 +1,96 @@
+From 3a7e64275186ad257fde0bf9cfe2aa9a5581fc12 Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Fri, 6 Jul 2018 09:49:05 -0400
+Subject: [PATCH 4866/5725] drm/amd/display: Make function pointer structs
+ const
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+const to avoid hard-to-find bugs where some function overrides a
+function pointer.
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 4 ++--
+ drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h | 2 +-
+ 5 files changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+index 93f52c5..a281bed 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+@@ -1125,7 +1125,7 @@ void hubp1_vtg_sel(struct hubp *hubp, uint32_t otg_inst)
+ REG_UPDATE(DCHUBP_CNTL, HUBP_VTG_SEL, otg_inst);
+ }
+
+-static struct hubp_funcs dcn10_hubp_funcs = {
++static const struct hubp_funcs dcn10_hubp_funcs = {
+ .hubp_program_surface_flip_and_addr =
+ hubp1_program_surface_flip_and_addr,
+ .hubp_program_surface_config =
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+index 9ca51ae..958994e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+@@ -428,7 +428,7 @@ void mpc1_read_mpcc_state(
+ MPCC_BUSY, &s->busy);
+ }
+
+-const struct mpc_funcs dcn10_mpc_funcs = {
++static const struct mpc_funcs dcn10_mpc_funcs = {
+ .read_mpcc_state = mpc1_read_mpcc_state,
+ .insert_plane = mpc1_insert_plane,
+ .remove_mpcc = mpc1_remove_mpcc,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+index 77a1a9d..ab958cf 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+@@ -385,7 +385,7 @@ void opp1_destroy(struct output_pixel_processor **opp)
+ *opp = NULL;
+ }
+
+-static struct opp_funcs dcn10_opp_funcs = {
++static const struct opp_funcs dcn10_opp_funcs = {
+ .opp_set_dyn_expansion = opp1_set_dyn_expansion,
+ .opp_program_fmt = opp1_program_fmt,
+ .opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index c92a156..f9246d4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -1035,11 +1035,11 @@ static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_st
+ return DC_OK;
+ }
+
+-static struct dc_cap_funcs cap_funcs = {
++static const struct dc_cap_funcs cap_funcs = {
+ .get_dcc_compression_cap = dcn10_get_dcc_compression_cap
+ };
+
+-static struct resource_funcs dcn10_res_pool_funcs = {
++static const struct resource_funcs dcn10_res_pool_funcs = {
+ .destroy = dcn10_destroy_resource_pool,
+ .link_enc_create = dcn10_link_encoder_create,
+ .validate_bandwidth = dcn_validate_bandwidth,
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+index 97df82c..5b7976f 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+@@ -43,7 +43,7 @@ enum cursor_lines_per_chunk {
+ };
+
+ struct hubp {
+- struct hubp_funcs *funcs;
++ const struct hubp_funcs *funcs;
+ struct dc_context *ctx;
+ struct dc_plane_address request_address;
+ struct dc_plane_address current_address;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4867-drm-amdgpu-Add-support-for-logging-process-info-in-a.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4867-drm-amdgpu-Add-support-for-logging-process-info-in-a.patch
new file mode 100644
index 00000000..3220fa98
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4867-drm-amdgpu-Add-support-for-logging-process-info-in-a.patch
@@ -0,0 +1,110 @@
+From 322b5ce1ba16bec1517cdf3800fd36c5ec075b7c Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Thu, 28 Jun 2018 22:51:32 -0400
+Subject: [PATCH 4867/5725] drm/amdgpu: Add support for logging process info in
+ amdgpu_vm.
+
+Add process and thread names and pids and a function to extract
+this info from relevant amdgpu_vm.
+
+v2: Add documentation and fix identation.
+
+v3: Add getter and setter functions for amdgpu_task_info.
+
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Acked-by: Jim Qu <Jim.Qu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 39 ++++++++++++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 16 ++++++++++++++
+ 2 files changed, 55 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index b1586a7..8b5dada 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -3010,3 +3010,42 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+
+ return 0;
+ }
++
++/**
++ * amdgpu_vm_get_task_info - Extracts task info for a PASID.
++ *
++ * @dev: drm device pointer
++ * @pasid: PASID identifier for VM
++ * @task_info: task_info to fill.
++ */
++void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
++ struct amdgpu_task_info *task_info)
++{
++ struct amdgpu_vm *vm;
++
++ spin_lock(&adev->vm_manager.pasid_lock);
++
++ vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
++ if (vm)
++ *task_info = vm->task_info;
++
++ spin_unlock(&adev->vm_manager.pasid_lock);
++}
++
++/**
++ * amdgpu_vm_set_task_info - Sets VMs task info.
++ *
++ * @vm: vm for which to set the info
++ */
++void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
++{
++ if (!vm->task_info.pid) {
++ vm->task_info.pid = current->pid;
++ get_task_comm(vm->task_info.task_name, current);
++
++ if (current->group_leader->mm == current->mm) {
++ vm->task_info.tgid = current->group_leader->pid;
++ get_task_comm(vm->task_info.process_name, current->group_leader);
++ }
++ }
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+index 7a4c967..9f70db6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+@@ -167,6 +167,14 @@ struct amdgpu_vm_pt {
+ #define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48)
+ #define AMDGPU_VM_FAULT_ADDR(fault) ((u64)(fault) & 0xfffffffff000ULL)
+
++
++struct amdgpu_task_info {
++ char process_name[TASK_COMM_LEN];
++ char task_name[TASK_COMM_LEN];
++ pid_t pid;
++ pid_t tgid;
++};
++
+ struct amdgpu_vm {
+ /* tree of virtual addresses mapped */
+ struct rb_root_cached va;
+@@ -218,6 +226,9 @@ struct amdgpu_vm {
+
+ /* Valid while the PD is reserved or fenced */
+ uint64_t pd_phys_addr;
++
++ /* Some basic info about the task */
++ struct amdgpu_task_info task_info;
+ };
+
+ struct amdgpu_vm_manager {
+@@ -323,4 +334,9 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
+ struct amdgpu_job *job);
+ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
+
++void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
++ struct amdgpu_task_info *task_info);
++
++void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
++
+ #endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4868-drm-amdgpu-Present-amdgpu_task_info-in-VM_FAULTS.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4868-drm-amdgpu-Present-amdgpu_task_info-in-VM_FAULTS.patch
new file mode 100644
index 00000000..44bc68e6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4868-drm-amdgpu-Present-amdgpu_task_info-in-VM_FAULTS.patch
@@ -0,0 +1,88 @@
+From baaf8570ec1d667f9f97570ee6110a1cc77e5c86 Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Thu, 28 Jun 2018 22:55:27 -0400
+Subject: [PATCH 4868/5725] drm/amdgpu: Present amdgpu_task_info in VM_FAULTS.
+
+Extract and present the reposnsible process and thread when
+VM_FAULT happens.
+
+v2: Use getter and setter functions.
+
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Acked-by: Jim Qu <Jim.Qu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 4 ++++
+ drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 10 +++++++---
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 9 +++++++--
+ 3 files changed, 18 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index da11c95..62182e9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -187,6 +187,10 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
+ if (p->uf_entry.robj)
+ p->job->uf_addr = uf_offset;
+ kfree(chunk_array);
++
++ /* Use this opportunity to fill in task info for the vm */
++ amdgpu_vm_set_task_info(vm);
++
+ return 0;
+
+ free_all_kdata:
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index c696bad..a3312f7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -44,7 +44,6 @@
+
+ #include "amdgpu_atombios.h"
+
+-
+ static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
+ static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
+ static int gmc_v8_0_wait_for_idle(void *handle);
+@@ -1459,8 +1458,13 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
+ gmc_v8_0_set_fault_enable_default(adev, false);
+
+ if (printk_ratelimit()) {
+- dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
+- entry->src_id, entry->src_data[0]);
++ struct amdgpu_task_info task_info = { 0 };
++
++ amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
++
++ dev_err(adev->dev, "GPU fault detected: %d 0x%08x for process %s pid %d thread %s pid %d\n",
++ entry->src_id, entry->src_data[0], task_info.process_name,
++ task_info.tgid, task_info.task_name, task_info.pid);
+ dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ addr);
+ dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index ad65e57c..95b0587 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -256,11 +256,16 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
+ }
+
+ if (printk_ratelimit()) {
++ struct amdgpu_task_info task_info = { 0 };
++
++ amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
++
+ dev_err(adev->dev,
+- "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
++ "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d\n)\n",
+ entry->vmid_src ? "mmhub" : "gfxhub",
+ entry->src_id, entry->ring_id, entry->vmid,
+- entry->pasid);
++ entry->pasid, task_info.process_name, task_info.tgid,
++ task_info.task_name, task_info.pid);
+ dev_err(adev->dev, " at page 0x%016llx from %d\n",
+ addr, entry->client_id);
+ if (!amdgpu_sriov_vf(adev))
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4869-drm-amd-pp-Send-khz-clock-values-to-DC-for-smu7-8.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4869-drm-amd-pp-Send-khz-clock-values-to-DC-for-smu7-8.patch
new file mode 100644
index 00000000..9ead0b59
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4869-drm-amd-pp-Send-khz-clock-values-to-DC-for-smu7-8.patch
@@ -0,0 +1,86 @@
+From 8ce31c35f040ae082511a37263d87ae42aa9edcf Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Mon, 9 Jul 2018 13:48:12 -0400
+Subject: [PATCH 4869/5725] drm/amd/pp: Send khz clock values to DC for smu7/8
+
+The previous change wasn't covering smu 7 and 8 and therefore DC was
+seeing wrong clock values.
+
+This fixes an issue where the pipes seem to hang with a 4k DP and 1080p
+HDMI display.
+
+Fixes: c3df50abc84b ("drm/amd/pp: Convert clock unit to KHz as defined")
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Cc:rex.zhu@amd.com
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 8 ++++----
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c | 6 +++---
+ 2 files changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index b57a5df..47a4bbc 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -4610,12 +4610,12 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
+ return -EINVAL;
+ dep_sclk_table = table_info->vdd_dep_on_sclk;
+ for (i = 0; i < dep_sclk_table->count; i++)
+- clocks->clock[i] = dep_sclk_table->entries[i].clk;
++ clocks->clock[i] = dep_sclk_table->entries[i].clk * 10;
+ clocks->count = dep_sclk_table->count;
+ } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
+ sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
+ for (i = 0; i < sclk_table->count; i++)
+- clocks->clock[i] = sclk_table->entries[i].clk;
++ clocks->clock[i] = sclk_table->entries[i].clk * 10;
+ clocks->count = sclk_table->count;
+ }
+
+@@ -4647,7 +4647,7 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
+ return -EINVAL;
+ dep_mclk_table = table_info->vdd_dep_on_mclk;
+ for (i = 0; i < dep_mclk_table->count; i++) {
+- clocks->clock[i] = dep_mclk_table->entries[i].clk;
++ clocks->clock[i] = dep_mclk_table->entries[i].clk * 10;
+ clocks->latency[i] = smu7_get_mem_latency(hwmgr,
+ dep_mclk_table->entries[i].clk);
+ }
+@@ -4655,7 +4655,7 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
+ } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
+ mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
+ for (i = 0; i < mclk_table->count; i++)
+- clocks->clock[i] = mclk_table->entries[i].clk;
++ clocks->clock[i] = mclk_table->entries[i].clk * 10;
+ clocks->count = mclk_table->count;
+ }
+ return 0;
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+index 50690c7..288802f 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+@@ -1604,17 +1604,17 @@ static int smu8_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type
+ switch (type) {
+ case amd_pp_disp_clock:
+ for (i = 0; i < clocks->count; i++)
+- clocks->clock[i] = data->sys_info.display_clock[i];
++ clocks->clock[i] = data->sys_info.display_clock[i] * 10;
+ break;
+ case amd_pp_sys_clock:
+ table = hwmgr->dyn_state.vddc_dependency_on_sclk;
+ for (i = 0; i < clocks->count; i++)
+- clocks->clock[i] = table->entries[i].clk;
++ clocks->clock[i] = table->entries[i].clk * 10;
+ break;
+ case amd_pp_mem_clock:
+ clocks->count = SMU8_NUM_NBPMEMORYCLOCK;
+ for (i = 0; i < clocks->count; i++)
+- clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i];
++ clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i] * 10;
+ break;
+ default:
+ return -1;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4870-drm-amdgpu-Take-VCN-jpeg-ring-into-account-in-idle-w.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4870-drm-amdgpu-Take-VCN-jpeg-ring-into-account-in-idle-w.patch
new file mode 100644
index 00000000..536fcdbb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4870-drm-amdgpu-Take-VCN-jpeg-ring-into-account-in-idle-w.patch
@@ -0,0 +1,35 @@
+From cbd4579b6363223b02b7e0c3f149e8024d4df1f5 Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Mon, 9 Jul 2018 11:59:01 -0400
+Subject: [PATCH 4870/5725] drm/amdgpu: Take VCN jpeg ring into account in idle
+ work handler
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+VCN won't get power off when only jpeg active
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index be1cbba..9168d82 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -214,6 +214,8 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
+ fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
+ }
+
++ fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
++
+ if (fences == 0) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, false);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4871-drm-amdgpu-move-cache-window-setup-after-power-and-c.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4871-drm-amdgpu-move-cache-window-setup-after-power-and-c.patch
new file mode 100644
index 00000000..6d682dd0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4871-drm-amdgpu-move-cache-window-setup-after-power-and-c.patch
@@ -0,0 +1,41 @@
+From 8817360ddd6eb4aba2b9faab3121085c8341c99c Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Wed, 4 Jul 2018 13:43:38 -0400
+Subject: [PATCH 4871/5725] drm/amdgpu: move cache window setup after power and
+ clock resume
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+To make register read/write reliable
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 5b219e6..7bca18b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -622,12 +622,12 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
+ /* disable byte swapping */
+ lmi_swap_cntl = 0;
+
+- vcn_v1_0_mc_resume(adev);
+-
+ vcn_1_0_disable_static_power_gating(adev);
+ /* disable clock gating */
+ vcn_v1_0_disable_clock_gating(adev);
+
++ vcn_v1_0_mc_resume(adev);
++
+ /* disable interupt */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4872-drm-amdgpu-get-VCN-start-to-process-in-the-dpm-disab.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4872-drm-amdgpu-get-VCN-start-to-process-in-the-dpm-disab.patch
new file mode 100644
index 00000000..1c123ff0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4872-drm-amdgpu-get-VCN-start-to-process-in-the-dpm-disab.patch
@@ -0,0 +1,35 @@
+From d42de93980d102aba96d8dada7114e5835dfc93a Mon Sep 17 00:00:00 2001
+From: Leo Liu <leo.liu@amd.com>
+Date: Wed, 4 Jul 2018 13:35:56 -0400
+Subject: [PATCH 4872/5725] drm/amdgpu: get VCN start to process in the dpm
+ disabled case
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Fixes: 22cc6c5e19 (drm/amdgpu: Add runtime VCN PG support)
+
+Signed-off-by: Leo Liu <leo.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 9168d82..e5bcdbb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -232,7 +232,7 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
+ struct amdgpu_device *adev = ring->adev;
+ bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
+
+- if (set_clocks && adev->pm.dpm_enabled) {
++ if (set_clocks) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, true);
+ else
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4873-drm-amd-pp-fix-semicolon.cocci-warnings.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4873-drm-amd-pp-fix-semicolon.cocci-warnings.patch
new file mode 100644
index 00000000..e40a2629
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4873-drm-amd-pp-fix-semicolon.cocci-warnings.patch
@@ -0,0 +1,35 @@
+From abd8347b8e7c78e2624220a523ed1e9e6abc10f6 Mon Sep 17 00:00:00 2001
+From: kbuild test robot <fengguang.wu@intel.com>
+Date: Wed, 11 Jul 2018 01:11:08 +0800
+Subject: [PATCH 4873/5725] drm/amd/pp: fix semicolon.cocci warnings
+
+drivers/gpu/drm/amd/amdgpu/../powerplay/amd_powerplay.c:1209:17-18: Unneeded semicolon
+
+ Remove unneeded semicolon.
+
+Generated by: scripts/coccinelle/misc/semicolon.cocci
+
+Fixes: ea870e44415a ("drm/amd/pp: Export notify_smu_enable_pwe to display")
+CC: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: kbuild test robot <fengguang.wu@intel.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index 63db1ea..2a479fa 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -1217,7 +1217,7 @@ static int pp_notify_smu_enable_pwe(void *handle)
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+- return -EINVAL;;
++ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4874-drm-amdgpu-pin-the-csb-buffer-on-hw-init-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4874-drm-amdgpu-pin-the-csb-buffer-on-hw-init-v2.patch
new file mode 100644
index 00000000..e337d1c3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4874-drm-amdgpu-pin-the-csb-buffer-on-hw-init-v2.patch
@@ -0,0 +1,93 @@
+From 8a909db2019c5a407176bd61f2ff601bef3ab72b Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Wed, 4 Jul 2018 16:21:52 +0800
+Subject: [PATCH 4874/5725] drm/amdgpu: pin the csb buffer on hw init v2
+
+Without this pin, the csb buffer will be filled with inconsistent
+data after S3 resume. And that will causes gfx hang on gfxoff
+exit since this csb will be executed then.
+
+v2: fit amdgpu_bo_pin change(take one less argument)
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 40 +++++++++++++++++++++++++++++++++++
+ 1 file changed, 40 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index cd5668a..f424cb8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -954,6 +954,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
+ dst_ptr = adev->gfx.rlc.cs_ptr;
+ gfx_v9_0_get_csb_buffer(adev, dst_ptr);
+ amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
++ amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+ }
+
+@@ -982,6 +983,39 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
+ return 0;
+ }
+
++static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev)
++{
++ int r;
++
++ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
++ if (unlikely(r != 0))
++ return r;
++
++ r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
++ AMDGPU_GEM_DOMAIN_VRAM);
++ if (!r)
++ adev->gfx.rlc.clear_state_gpu_addr =
++ amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
++
++ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
++
++ return r;
++}
++
++static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev)
++{
++ int r;
++
++ if (!adev->gfx.rlc.clear_state_obj)
++ return;
++
++ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
++ if (likely(r == 0)) {
++ amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
++ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
++ }
++}
++
+ static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
+ {
+ amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
+@@ -3132,6 +3166,10 @@ static int gfx_v9_0_hw_init(void *handle)
+
+ gfx_v9_0_gpu_init(adev);
+
++ r = gfx_v9_0_csb_vram_pin(adev);
++ if (r)
++ return r;
++
+ r = gfx_v9_0_rlc_resume(adev);
+ if (r)
+ return r;
+@@ -3240,6 +3278,8 @@ static int gfx_v9_0_hw_fini(void *handle)
+ gfx_v9_0_cp_enable(adev, false);
+ gfx_v9_0_rlc_stop(adev);
+
++ gfx_v9_0_csb_vram_unpin(adev);
++
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4875-drm-amdgpu-init-CSIB-regardless-of-rlc-version-and-p.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4875-drm-amdgpu-init-CSIB-regardless-of-rlc-version-and-p.patch
new file mode 100644
index 00000000..438df0ba
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4875-drm-amdgpu-init-CSIB-regardless-of-rlc-version-and-p.patch
@@ -0,0 +1,40 @@
+From 2db9f271f03ed6b27869338031d62b305a1049c8 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Thu, 5 Jul 2018 11:17:48 +0800
+Subject: [PATCH 4875/5725] drm/amdgpu: init CSIB regardless of rlc version and
+ pg status
+
+CSIB init has no relation with rlc version and pg status. It should be
+needed regardless of them.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index f424cb8..cfacec8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -2198,6 +2198,8 @@ static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *ad
+
+ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
+ {
++ gfx_v9_0_init_csb(adev);
++
+ if (!adev->gfx.rlc.is_rlc_v2_1)
+ return;
+
+@@ -2207,7 +2209,6 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GDS |
+ AMD_PG_SUPPORT_RLC_SMU_HS)) {
+- gfx_v9_0_init_csb(adev);
+ gfx_v9_1_init_rlc_save_restore_list(adev);
+ gfx_v9_0_enable_save_restore_machine(adev);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4876-drm-amdgpu-correct-rlc-save-restore-list-initializat.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4876-drm-amdgpu-correct-rlc-save-restore-list-initializat.patch
new file mode 100644
index 00000000..d2a76f28
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4876-drm-amdgpu-correct-rlc-save-restore-list-initializat.patch
@@ -0,0 +1,62 @@
+From 1e5c45f7fe58bafdb763606b5a4c846d9a79026b Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Thu, 5 Jul 2018 11:24:20 +0800
+Subject: [PATCH 4876/5725] drm/amdgpu: correct rlc save restore list
+ initialization for v2_1
+
+The save restore list initialization does not have to be pg guarded.
+And for some asic(e.g. Vega12), it does not have cntl/gpm/srm lists.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index cfacec8..b322714 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -659,7 +659,10 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+
+- if (adev->gfx.rlc.is_rlc_v2_1) {
++ if (adev->gfx.rlc.is_rlc_v2_1 &&
++ adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
++ adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
++ adev->gfx.rlc.save_restore_list_srm_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
+ info->fw = adev->gfx.rlc_fw;
+@@ -2200,8 +2203,14 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
+ {
+ gfx_v9_0_init_csb(adev);
+
+- if (!adev->gfx.rlc.is_rlc_v2_1)
+- return;
++ /*
++ * Rlc save restore list is workable since v2_1.
++ * And it's needed by gfxoff feature.
++ */
++ if (adev->gfx.rlc.is_rlc_v2_1) {
++ gfx_v9_1_init_rlc_save_restore_list(adev);
++ gfx_v9_0_enable_save_restore_machine(adev);
++ }
+
+ if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_GFX_SMG |
+@@ -2209,9 +2218,6 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GDS |
+ AMD_PG_SUPPORT_RLC_SMU_HS)) {
+- gfx_v9_1_init_rlc_save_restore_list(adev);
+- gfx_v9_0_enable_save_restore_machine(adev);
+-
+ WREG32(mmRLC_JUMP_TABLE_RESTORE,
+ adev->gfx.rlc.cp_table_gpu_addr >> 8);
+ gfx_v9_0_init_gfx_power_gating(adev);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4877-drm-amdgpu-drop-mmRLC_PG_CNTL-clear-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4877-drm-amdgpu-drop-mmRLC_PG_CNTL-clear-v2.patch
new file mode 100644
index 00000000..fff4d5a6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4877-drm-amdgpu-drop-mmRLC_PG_CNTL-clear-v2.patch
@@ -0,0 +1,35 @@
+From 0608446c9ecb074f32eadaf07251b8501fb7edd5 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Wed, 4 Jul 2018 17:06:38 +0800
+Subject: [PATCH 4877/5725] drm/amdgpu: drop mmRLC_PG_CNTL clear v2
+
+SMU owns this register so the driver should not set it
+to avoid breaking gfxoff.
+
+v2: update description
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher at amd.com>
+Reviewed-by: Huang Rui <ray.huang at amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index b322714..99d6005 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -2309,9 +2309,6 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
+ /* disable CG */
+ WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
+
+- /* disable PG */
+- WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, 0);
+-
+ gfx_v9_0_rlc_reset(adev);
+
+ gfx_v9_0_init_pg(adev);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4878-drm-amdgpu-no-touch-for-the-reserved-bit-of-RLC_CGTT.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4878-drm-amdgpu-no-touch-for-the-reserved-bit-of-RLC_CGTT.patch
new file mode 100644
index 00000000..62596a43
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4878-drm-amdgpu-no-touch-for-the-reserved-bit-of-RLC_CGTT.patch
@@ -0,0 +1,54 @@
+From 152168a0dde590504efa2d0b826aeb5ec9b1db11 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Thu, 5 Jul 2018 10:26:48 +0800
+Subject: [PATCH 4878/5725] drm/amdgpu: no touch for the reserved bit of
+ RLC_CGTT_MGCG_OVERRIDE
+
+On vega12, the bit0 of RLC_CGTT_MGCG_OVERRIDE is reserved.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 99d6005..38f0a5d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -3570,8 +3570,11 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
+ /* 1 - RLC_CGTT_MGCG_OVERRIDE */
+ def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
+- data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
+- RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
++
++ if (adev->asic_type != CHIP_VEGA12)
++ data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
++
++ data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
+
+@@ -3601,11 +3604,15 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
+ } else {
+ /* 1 - MGCG_OVERRIDE */
+ def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
+- data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
+- RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
++
++ if (adev->asic_type != CHIP_VEGA12)
++ data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
++
++ data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
++
+ if (def != data)
+ WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4879-drm-amdgpu-reduce-the-idle-period-that-RLC-has-to-wa.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4879-drm-amdgpu-reduce-the-idle-period-that-RLC-has-to-wa.patch
new file mode 100644
index 00000000..e879dfca
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4879-drm-amdgpu-reduce-the-idle-period-that-RLC-has-to-wa.patch
@@ -0,0 +1,50 @@
+From 8e18525e1db111f0e6659ad16244d9cc84fd09fa Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Thu, 5 Jul 2018 10:30:36 +0800
+Subject: [PATCH 4879/5725] drm/amdgpu: reduce the idle period that RLC has to
+ wait before request CGCG
+
+Gfxoff feature may depends on the CGCG(on vega12, that's the case). This
+change will help to enable gfxoff feature more frequently.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 38f0a5d..5a719b0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -3648,9 +3648,11 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
+ /* update CGCG and CGLS override bits */
+ if (def != data)
+ WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
+- /* enable 3Dcgcg FSM(0x0020003f) */
++
++ /* enable 3Dcgcg FSM(0x0000363f) */
+ def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
+- data = (0x2000 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
++
++ data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
+ RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
+ data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
+@@ -3697,9 +3699,10 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
+ if (def != data)
+ WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
+
+- /* enable cgcg FSM(0x0020003F) */
++ /* enable cgcg FSM(0x0000363F) */
+ def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
+- data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
++
++ data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
+ RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
+ data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4880-drm-amd-powerplay-add-vega12-SMU-gfxoff-support-v3.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4880-drm-amd-powerplay-add-vega12-SMU-gfxoff-support-v3.patch
new file mode 100644
index 00000000..b3a4a8ce
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4880-drm-amd-powerplay-add-vega12-SMU-gfxoff-support-v3.patch
@@ -0,0 +1,115 @@
+From 9927e33a9ef6b25d569594d06204d0baffa5394c Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Wed, 4 Jul 2018 16:44:07 +0800
+Subject: [PATCH 4880/5725] drm/amd/powerplay: add vega12 SMU gfxoff support v3
+
+Export apis for enabling/disabling SMU gfxoff support.
+
+v2: fit the latest gfxoff support framework
+v3: add feature_mask control
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Huang Rui <ray.huang at amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 5 +++
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 38 ++++++++++++++++++++++
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h | 3 ++
+ 3 files changed, 46 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 5a719b0..e221eb0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -3789,6 +3789,11 @@ static int gfx_v9_0_set_powergating_state(void *handle,
+ if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu)
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true);
+ break;
++ case CHIP_VEGA12:
++ /* set gfx off through smu */
++ if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu)
++ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true);
++ break;
+ default:
+ break;
+ }
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index ed17c56..cae76fe 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -423,6 +423,11 @@ static int vega12_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit *
+ hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
+
++ if (hwmgr->feature_mask & PP_GFXOFF_MASK)
++ data->gfxoff_controlled_by_driver = true;
++ else
++ data->gfxoff_controlled_by_driver = false;
++
+ return result;
+ }
+
+@@ -2328,6 +2333,38 @@ static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
+ return 0;
+ }
+
++static int vega12_enable_gfx_off(struct pp_hwmgr *hwmgr)
++{
++ struct vega12_hwmgr *data =
++ (struct vega12_hwmgr *)(hwmgr->backend);
++ int ret = 0;
++
++ if (data->gfxoff_controlled_by_driver)
++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_AllowGfxOff);
++
++ return ret;
++}
++
++static int vega12_disable_gfx_off(struct pp_hwmgr *hwmgr)
++{
++ struct vega12_hwmgr *data =
++ (struct vega12_hwmgr *)(hwmgr->backend);
++ int ret = 0;
++
++ if (data->gfxoff_controlled_by_driver)
++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisallowGfxOff);
++
++ return ret;
++}
++
++static int vega12_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable)
++{
++ if (enable)
++ return vega12_enable_gfx_off(hwmgr);
++ else
++ return vega12_disable_gfx_off(hwmgr);
++}
++
+ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
+ .backend_init = vega12_hwmgr_backend_init,
+ .backend_fini = vega12_hwmgr_backend_fini,
+@@ -2377,6 +2414,7 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
+ .get_thermal_temperature_range = vega12_get_thermal_temperature_range,
+ .register_irq_handlers = smu9_register_irq_handlers,
+ .start_thermal_controller = vega12_start_thermal_controller,
++ .powergate_gfx = vega12_gfx_off_control,
+ };
+
+ int vega12_hwmgr_init(struct pp_hwmgr *hwmgr)
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+index e17237c..b3e424d 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+@@ -393,6 +393,9 @@ struct vega12_hwmgr {
+ struct vega12_smc_state_table smc_state_table;
+
+ struct vega12_clock_range clk_range[PPCLK_COUNT];
++
++ /* ---- Gfxoff ---- */
++ bool gfxoff_controlled_by_driver;
+ };
+
+ #define VEGA12_DPM2_NEAR_TDP_DEC 10
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4881-drm-amd-powerplay-no-need-to-mask-workable-gfxoff-fe.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4881-drm-amd-powerplay-no-need-to-mask-workable-gfxoff-fe.patch
new file mode 100644
index 00000000..6a15ad2c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4881-drm-amd-powerplay-no-need-to-mask-workable-gfxoff-fe.patch
@@ -0,0 +1,35 @@
+From 9ccb0b2ee210f91637e7be294df4041ed301114d Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Thu, 5 Jul 2018 10:44:33 +0800
+Subject: [PATCH 4881/5725] drm/amd/powerplay: no need to mask workable gfxoff
+ feature for vega12
+
+Gfxoff feature for vega12 is workable. So, there is no need to
+mask it any more.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+index 9b675d9..8994aa5 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+@@ -147,10 +147,10 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
+ smu7_init_function_pointers(hwmgr);
+ break;
+ case AMDGPU_FAMILY_AI:
+- hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
+ switch (hwmgr->chip_id) {
+ case CHIP_VEGA10:
+ case CHIP_VEGA20:
++ hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
+ hwmgr->smumgr_funcs = &vega10_smu_funcs;
+ vega10_hwmgr_init(hwmgr);
+ break;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4882-drm-amd-powerplay-convert-the-sclk-mclk-into-Mhz-for.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4882-drm-amd-powerplay-convert-the-sclk-mclk-into-Mhz-for.patch
new file mode 100644
index 00000000..7fd2a393
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4882-drm-amd-powerplay-convert-the-sclk-mclk-into-Mhz-for.patch
@@ -0,0 +1,42 @@
+From b8ecc8aa0d487fe215e348b2a5e2dff7dfc71ab6 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Tue, 10 Jul 2018 11:35:16 +0800
+Subject: [PATCH 4882/5725] drm/amd/powerplay: convert the sclk/mclk into Mhz
+ for comparation
+
+Convert the clocks into right Mhz unit. Otherwise, it will miss
+the equal situation.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index cae76fe..c0ceb69 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -1881,7 +1881,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, clocks.data[i].clocks_in_khz / 1000,
+- (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
++ (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
+ break;
+
+ case PP_MCLK:
+@@ -1897,7 +1897,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, clocks.data[i].clocks_in_khz / 1000,
+- (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
++ (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
+ break;
+
+ case PP_PCIE:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4883-drm-amd-Add-interrupt-source-definitions-for-VI-v3.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4883-drm-amd-Add-interrupt-source-definitions-for-VI-v3.patch
new file mode 100644
index 00000000..0bfeea9f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4883-drm-amd-Add-interrupt-source-definitions-for-VI-v3.patch
@@ -0,0 +1,136 @@
+From 1f39557c0b2c981f44d97b204e74501e2f1b7122 Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Wed, 11 Jul 2018 17:34:35 -0400
+Subject: [PATCH 4883/5725] drm/amd: Add interrupt source definitions for VI
+ v3.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Stop using 'magic numbers' when registering interrupt sources.
+
+v2:
+Clean redundant comments.
+Switch to kernel style comments.
+
+v3:
+Add CP_ECC_ERROR define
+
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/include/ivsrcid/ivsrcid_vislands30.h | 98 ++++++++++++++++++++++
+ 1 file changed, 98 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h b/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h
+index c6b6f97..aaed7f5 100644
+--- a/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h
++++ b/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h
+@@ -198,4 +198,102 @@
+ #define VISLANDS30_IV_SRCID_HPD_RX_F 42 // 0x2a
+ #define VISLANDS30_IV_EXTID_HPD_RX_F 11
+
++#define VISLANDS30_IV_SRCID_GPIO_19 0x00000053 /* 83 */
++
++#define VISLANDS30_IV_SRCID_SRBM_READ_TIMEOUT_ERR 0x00000060 /* 96 */
++#define VISLANDS30_IV_SRCID_SRBM_CTX_SWITCH 0x00000061 /* 97 */
++
++#define VISLANDS30_IV_SRBM_REG_ACCESS_ERROR 0x00000062 /* 98 */
++
++
++#define VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP 0x00000077 /* 119 */
++#define VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE 0x0000007c /* 124 */
++
++#define VISLANDS30_IV_SRCID_BIF_PF_VF_MSGBUF_VALID 0x00000087 /* 135 */
++
++#define VISLANDS30_IV_SRCID_BIF_VF_PF_MSGBUF_ACK 0x0000008a /* 138 */
++
++#define VISLANDS30_IV_SRCID_SYS_PAGE_INV_FAULT 0x0000008c /* 140 */
++#define VISLANDS30_IV_SRCID_SYS_MEM_PROT_FAULT 0x0000008d /* 141 */
++
++#define VISLANDS30_IV_SRCID_SEM_PAGE_INV_FAULT 0x00000090 /* 144 */
++#define VISLANDS30_IV_SRCID_SEM_MEM_PROT_FAULT 0x00000091 /* 145 */
++
++#define VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT 0x00000092 /* 146 */
++#define VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT 0x00000093 /* 147 */
++
++#define VISLANDS30_IV_SRCID_ACP 0x000000a2 /* 162 */
++
++#define VISLANDS30_IV_SRCID_VCE_TRAP 0x000000a7 /* 167 */
++#define VISLANDS30_IV_EXTID_VCE_TRAP_GENERAL_PURPOSE 0
++#define VISLANDS30_IV_EXTID_VCE_TRAP_LOW_LATENCY 1
++#define VISLANDS30_IV_EXTID_VCE_TRAP_REAL_TIME 2
++
++#define VISLANDS30_IV_SRCID_CP_INT_RB 0x000000b0 /* 176 */
++#define VISLANDS30_IV_SRCID_CP_INT_IB1 0x000000b1 /* 177 */
++#define VISLANDS30_IV_SRCID_CP_INT_IB2 0x000000b2 /* 178 */
++#define VISLANDS30_IV_SRCID_CP_PM4_RES_BITS_ERR 0x000000b4 /* 180 */
++#define VISLANDS30_IV_SRCID_CP_END_OF_PIPE 0x000000b5 /* 181 */
++#define VISLANDS30_IV_SRCID_CP_BAD_OPCODE 0x000000b7 /* 183 */
++#define VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT 0x000000b8 /* 184 */
++#define VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT 0x000000b9 /* 185 */
++#define VISLANDS30_IV_SRCID_CP_WAIT_MEM_SEM_FAULT 0x000000ba /* 186 */
++#define VISLANDS30_IV_SRCID_CP_GUI_IDLE 0x000000bb /* 187 */
++#define VISLANDS30_IV_SRCID_CP_GUI_BUSY 0x000000bc /* 188 */
++
++#define VISLANDS30_IV_SRCID_CP_COMPUTE_QUERY_STATUS 0x000000bf /* 191 */
++#define VISLANDS30_IV_SRCID_CP_ECC_ERROR 0x000000c5 /* 197 */
++
++#define CARRIZO_IV_SRCID_CP_COMPUTE_QUERY_STATUS 0x000000c7 /* 199 */
++
++#define VISLANDS30_IV_SRCID_CP_WAIT_REG_MEM_POLL_TIMEOUT 0x000000c0 /* 192 */
++#define VISLANDS30_IV_SRCID_CP_SEM_SIG_INCOMPL 0x000000c1 /* 193 */
++#define VISLANDS30_IV_SRCID_CP_PREEMPT_ACK 0x000000c2 /* 194 */
++#define VISLANDS30_IV_SRCID_CP_GENERAL_PROT_FAULT 0x000000c3 /* 195 */
++#define VISLANDS30_IV_SRCID_CP_GDS_ALLOC_ERROR 0x000000c4 /* 196 */
++#define VISLANDS30_IV_SRCID_CP_ECC_ERROR 0x000000c5 /* 197 */
++
++#define VISLANDS30_IV_SRCID_RLC_STRM_PERF_MONITOR 0x000000ca /* 202 */
++
++#define VISLANDS30_IV_SDMA_ATOMIC_SRC_ID 0x000000da /* 218 */
++
++#define VISLANDS30_IV_SRCID_SDMA_ECC_ERROR 0x000000dc /* 220 */
++
++#define VISLANDS30_IV_SRCID_SDMA_TRAP 0x000000e0 /* 224 */
++#define VISLANDS30_IV_SRCID_SDMA_SEM_INCOMPLETE 0x000000e1 /* 225 */
++#define VISLANDS30_IV_SRCID_SDMA_SEM_WAIT 0x000000e2 /* 226 */
++
++
++#define VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER 0x000000e5 /* 229 */
++
++#define VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH 0x000000e6 /* 230 */
++#define VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW 0x000000e7 /* 231 */
++
++#define VISLANDS30_IV_SRCID_GRBM_READ_TIMEOUT_ERR 0x000000e8 /* 232 */
++#define VISLANDS30_IV_SRCID_GRBM_REG_GUI_IDLE 0x000000e9 /* 233 */
++
++#define VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG 0x000000ef /* 239 */
++
++#define VISLANDS30_IV_SRCID_SDMA_PREEMPT 0x000000f0 /* 240 */
++#define VISLANDS30_IV_SRCID_SDMA_VM_HOLE 0x000000f2 /* 242 */
++#define VISLANDS30_IV_SRCID_SDMA_CTXEMPTY 0x000000f3 /* 243 */
++#define VISLANDS30_IV_SRCID_SDMA_DOORBELL_INVALID 0x000000f4 /* 244 */
++#define VISLANDS30_IV_SRCID_SDMA_FROZEN 0x000000f5 /* 245 */
++#define VISLANDS30_IV_SRCID_SDMA_POLL_TIMEOUT 0x000000f6 /* 246 */
++#define VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE 0x000000f7 /* 247 */
++
++#define VISLANDS30_IV_SRCID_CG_THERMAL_TRIG 0x000000f8 /* 248 */
++
++#define VISLANDS30_IV_SRCID_SMU_DISP_TIMER_TRIGGER 0x000000fd /* 253 */
++
++/* These are not "real" source ids defined by HW */
++#define VISLANDS30_IV_SRCID_VM_CONTEXT_ALL 0x00000100 /* 256 */
++#define VISLANDS30_IV_EXTID_VM_CONTEXT0_ALL 0
++#define VISLANDS30_IV_EXTID_VM_CONTEXT1_ALL 1
++
++
++/* IV Extended IDs */
++#define VISLANDS30_IV_EXTID_NONE 0x00000000
++#define VISLANDS30_IV_EXTID_INVALID 0xffffffff
++
+ #endif // _IVSRCID_VISLANDS30_H_
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4884-drm-amd-Use-newly-added-interrupt-source-defs-for-VI.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4884-drm-amd-Use-newly-added-interrupt-source-defs-for-VI.patch
new file mode 100644
index 00000000..7301c4cb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4884-drm-amd-Use-newly-added-interrupt-source-defs-for-VI.patch
@@ -0,0 +1,391 @@
+From 7de2293f66c15dd769d1dfe92274d51a04ddd52f Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Fri, 25 May 2018 10:06:52 -0400
+Subject: [PATCH 4884/5725] drm/amd: Use newly added interrupt source defs for
+ VI v3.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+v2: Rebase
+v3: Use defines for CP_SQ and CP_ECC_ERROR interrupts.
+
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 6 ++++--
+ drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 6 ++++--
+ drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 3 ++-
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 14 ++++++++------
+ drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 6 ++++--
+ drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 6 ++++--
+ drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 6 ++++--
+ drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 6 ++++--
+ drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 3 ++-
+ drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 5 +++--
+ drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 3 ++-
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 8 +++++---
+ 12 files changed, 46 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+index f9f9165..85649e9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+@@ -41,6 +41,8 @@
+ #include "gmc/gmc_8_1_d.h"
+ #include "gmc/gmc_8_1_sh_mask.h"
+
++#include "ivsrcid/ivsrcid_vislands30.h"
++
+ static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev);
+ static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev);
+
+@@ -2744,14 +2746,14 @@ static int dce_v10_0_sw_init(void *handle)
+ return r;
+ }
+
+- for (i = 8; i < 20; i += 2) {
++ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
+ if (r)
+ return r;
+ }
+
+ /* HPD hotplug */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
+ if (r)
+ return r;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+index b123a99..4e4e5fc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+@@ -41,6 +41,8 @@
+ #include "gmc/gmc_8_1_d.h"
+ #include "gmc/gmc_8_1_sh_mask.h"
+
++#include "ivsrcid/ivsrcid_vislands30.h"
++
+ static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev);
+ static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev);
+
+@@ -2866,14 +2868,14 @@ static int dce_v11_0_sw_init(void *handle)
+ return r;
+ }
+
+- for (i = 8; i < 20; i += 2) {
++ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
+ if (r)
+ return r;
+ }
+
+ /* HPD hotplug */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
+ if (r)
+ return r;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+index 28bf8cf..7145e7a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+@@ -36,6 +36,7 @@
+ #include "dce_v10_0.h"
+ #include "dce_v11_0.h"
+ #include "dce_virtual.h"
++#include "ivsrcid/ivsrcid_vislands30.h"
+
+ #define DCE_VIRTUAL_VBLANK_PERIOD 16666666
+
+@@ -381,7 +382,7 @@ static int dce_virtual_sw_init(void *handle)
+ int r, i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 229, &adev->crtc_irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER, &adev->crtc_irq);
+ if (r)
+ return r;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 92bda71..96d517e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -51,6 +51,8 @@
+
+ #include "smu/smu_7_1_3_d.h"
+
++#include "ivsrcid/ivsrcid_vislands30.h"
++
+ #define GFX8_NUM_GFX_RINGS 1
+ #define GFX8_MEC_HPD_SIZE 2048
+
+@@ -2046,35 +2048,35 @@ static int gfx_v8_0_sw_init(void *handle)
+ adev->gfx.mec.num_queue_per_pipe = 8;
+
+ /* KIQ event */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 178, &adev->gfx.kiq.irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_INT_IB2, &adev->gfx.kiq.irq);
+ if (r)
+ return r;
+
+ /* EOP Event */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq);
+ if (r)
+ return r;
+
+ /* Privileged reg */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184,
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT,
+ &adev->gfx.priv_reg_irq);
+ if (r)
+ return r;
+
+ /* Privileged inst */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185,
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT,
+ &adev->gfx.priv_inst_irq);
+ if (r)
+ return r;
+
+ /* Add CP EDC/ECC irq */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 197,
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR,
+ &adev->gfx.cp_ecc_error_irq);
+ if (r)
+ return r;
+
+ /* SQ interrupts. */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 239,
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG,
+ &adev->gfx.sq_irq);
+ if (r) {
+ DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+index 4fd4081..3040e8a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+@@ -43,6 +43,8 @@
+
+ #include "amdgpu_atombios.h"
+
++#include "ivsrcid/ivsrcid_vislands30.h"
++
+ static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
+ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
+ static int gmc_v7_0_wait_for_idle(void *handle);
+@@ -998,11 +1000,11 @@ static int gmc_v7_0_sw_init(void *handle)
+ adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp);
+ }
+
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
+ if (r)
+ return r;
+
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
+ if (r)
+ return r;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index a3312f7..5476ddd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -44,6 +44,8 @@
+
+ #include "amdgpu_atombios.h"
+
++#include "ivsrcid/ivsrcid_vislands30.h"
++
+ static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
+ static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
+ static int gmc_v8_0_wait_for_idle(void *handle);
+@@ -1106,11 +1108,11 @@ static int gmc_v8_0_sw_init(void *handle)
+ adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
+ }
+
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
+ if (r)
+ return r;
+
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
+ if (r)
+ return r;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+index 096c6f2..cc22269 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+@@ -44,6 +44,8 @@
+
+ #include "iceland_sdma_pkt_open.h"
+
++#include "ivsrcid/ivsrcid_vislands30.h"
++
+ static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev);
+ static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev);
+ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev);
+@@ -903,7 +905,7 @@ static int sdma_v2_4_sw_init(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* SDMA trap event */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
+ &adev->sdma.trap_irq);
+ if (r)
+ return r;
+@@ -915,7 +917,7 @@ static int sdma_v2_4_sw_init(void *handle)
+ return r;
+
+ /* SDMA Privileged inst */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
+ &adev->sdma.illegal_inst_irq);
+ if (r)
+ return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+index e074dea..4b7df45 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+@@ -44,6 +44,8 @@
+
+ #include "tonga_sdma_pkt_open.h"
+
++#include "ivsrcid/ivsrcid_vislands30.h"
++
+ static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev);
+ static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev);
+ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev);
+@@ -1183,7 +1185,7 @@ static int sdma_v3_0_sw_init(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* SDMA trap event */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
+ &adev->sdma.trap_irq);
+ if (r)
+ return r;
+@@ -1195,7 +1197,7 @@ static int sdma_v3_0_sw_init(void *handle)
+ return r;
+
+ /* SDMA Privileged inst */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
+ &adev->sdma.illegal_inst_irq);
+ if (r)
+ return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+index 693944f..ab3ad86 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+@@ -35,6 +35,7 @@
+ #include "vi.h"
+ #include "smu/smu_7_1_2_d.h"
+ #include "smu/smu_7_1_2_sh_mask.h"
++#include "ivsrcid/ivsrcid_vislands30.h"
+
+ static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
+ static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
+@@ -104,7 +105,7 @@ static int uvd_v5_0_sw_init(void *handle)
+ int r;
+
+ /* UVD TRAP */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
+ if (r)
+ return r;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+index 6f008a0..bca9c63 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+@@ -36,6 +36,7 @@
+ #include "bif/bif_5_1_d.h"
+ #include "gmc/gmc_8_1_d.h"
+ #include "vi.h"
++#include "ivsrcid/ivsrcid_vislands30.h"
+
+ /* Polaris10/11/12 firmware version */
+ #define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8))
+@@ -400,14 +401,14 @@ static int uvd_v6_0_sw_init(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* UVD TRAP */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
+ if (r)
+ return r;
+
+ /* UVD ENC TRAP */
+ if (uvd_v6_0_enc_support(adev)) {
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.inst->irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
+ if (r)
+ return r;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+index 99604d0..cc6ce6c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+@@ -39,6 +39,7 @@
+ #include "smu/smu_7_1_2_sh_mask.h"
+ #include "gca/gfx_8_0_d.h"
+ #include "gca/gfx_8_0_sh_mask.h"
++#include "ivsrcid/ivsrcid_vislands30.h"
+
+
+ #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
+@@ -422,7 +423,7 @@ static int vce_v3_0_sw_init(void *handle)
+ int r, i;
+
+ /* VCE */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 167, &adev->vce.irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq);
+ if (r)
+ return r;
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index 47a4bbc..e3c1eb4 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -48,6 +48,8 @@
+ #include "processpptables.h"
+ #include "pp_thermal.h"
+
++#include "ivsrcid/ivsrcid_vislands30.h"
++
+ #define MC_CG_ARB_FREQ_F0 0x0a
+ #define MC_CG_ARB_FREQ_F1 0x0b
+ #define MC_CG_ARB_FREQ_F2 0x0c
+@@ -4105,17 +4107,17 @@ static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr)
+
+ amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
+ AMDGPU_IH_CLIENTID_LEGACY,
+- 230,
++ VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH,
+ source);
+ amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
+ AMDGPU_IH_CLIENTID_LEGACY,
+- 231,
++ VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW,
+ source);
+
+ /* Register CTF(GPIO_19) interrupt */
+ amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
+ AMDGPU_IH_CLIENTID_LEGACY,
+- 83,
++ VISLANDS30_IV_SRCID_GPIO_19,
+ source);
+
+ return 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4885-drm-amd-Add-interrupt-source-definitions-for-SOC15-v.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4885-drm-amd-Add-interrupt-source-definitions-for-SOC15-v.patch
new file mode 100644
index 00000000..11c3ca22
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4885-drm-amd-Add-interrupt-source-definitions-for-SOC15-v.patch
@@ -0,0 +1,457 @@
+From 589b6eab7229ec7fc1ae7d58fffe74d320ed7175 Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Fri, 25 May 2018 10:44:12 -0400
+Subject: [PATCH 4885/5725] drm/amd: Add interrupt source definitions for SOC15
+ v3.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Stop using 'magic numbers' when registering interrupt sources.
+
+v2: Switch to kernel style comments.
+
+v3:
+Rebase.
+
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_9_0.h | 55 ++++++++++++++++++++++
+ .../amd/include/ivsrcid/sdma0/irqsrcs_sdma0_4_0.h | 50 ++++++++++++++++++++
+ .../amd/include/ivsrcid/sdma1/irqsrcs_sdma1_4_0.h | 50 ++++++++++++++++++++
+ .../amd/include/ivsrcid/smuio/irqsrcs_smuio_9_0.h | 32 +++++++++++++
+ .../drm/amd/include/ivsrcid/thm/irqsrcs_thm_9_0.h | 33 +++++++++++++
+ .../drm/amd/include/ivsrcid/uvd/irqsrcs_uvd_7_0.h | 34 +++++++++++++
+ .../drm/amd/include/ivsrcid/vce/irqsrcs_vce_4_0.h | 34 +++++++++++++
+ .../drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_1_0.h | 34 +++++++++++++
+ .../drm/amd/include/ivsrcid/vmc/irqsrcs_vmc_1_0.h | 37 +++++++++++++++
+ 9 files changed, 359 insertions(+)
+ create mode 100644 drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_9_0.h
+ create mode 100644 drivers/gpu/drm/amd/include/ivsrcid/sdma0/irqsrcs_sdma0_4_0.h
+ create mode 100644 drivers/gpu/drm/amd/include/ivsrcid/sdma1/irqsrcs_sdma1_4_0.h
+ create mode 100644 drivers/gpu/drm/amd/include/ivsrcid/smuio/irqsrcs_smuio_9_0.h
+ create mode 100644 drivers/gpu/drm/amd/include/ivsrcid/thm/irqsrcs_thm_9_0.h
+ create mode 100644 drivers/gpu/drm/amd/include/ivsrcid/uvd/irqsrcs_uvd_7_0.h
+ create mode 100644 drivers/gpu/drm/amd/include/ivsrcid/vce/irqsrcs_vce_4_0.h
+ create mode 100644 drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_1_0.h
+ create mode 100644 drivers/gpu/drm/amd/include/ivsrcid/vmc/irqsrcs_vmc_1_0.h
+
+diff --git a/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_9_0.h b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_9_0.h
+new file mode 100644
+index 0000000..36306c5
+--- /dev/null
++++ b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_9_0.h
+@@ -0,0 +1,55 @@
++/*
++ * Copyright 2017 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef __IRQSRCS_GFX_9_0_H__
++#define __IRQSRCS_GFX_9_0_H__
++
++
++#define GFX_9_0__SRCID__CP_RB_INTERRUPT_PKT 176 /* B0 CP_INTERRUPT pkt in RB */
++#define GFX_9_0__SRCID__CP_IB1_INTERRUPT_PKT 177 /* B1 CP_INTERRUPT pkt in IB1 */
++#define GFX_9_0__SRCID__CP_IB2_INTERRUPT_PKT 178 /* B2 CP_INTERRUPT pkt in IB2 */
++#define GFX_9_0__SRCID__CP_PM4_PKT_RSVD_BIT_ERROR 180 /* B4 PM4 Pkt Rsvd Bits Error */
++#define GFX_9_0__SRCID__CP_EOP_INTERRUPT 181 /* B5 End-of-Pipe Interrupt */
++#define GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR 183 /* B7 Bad Opcode Error */
++#define GFX_9_0__SRCID__CP_PRIV_REG_FAULT 184 /* B8 Privileged Register Fault */
++#define GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT 185 /* B9 Privileged Instr Fault */
++#define GFX_9_0__SRCID__CP_WAIT_MEM_SEM_FAULT 186 /* BA Wait Memory Semaphore Fault (Synchronization Object Fault) */
++#define GFX_9_0__SRCID__CP_CTX_EMPTY_INTERRUPT 187 /* BB Context Empty Interrupt */
++#define GFX_9_0__SRCID__CP_CTX_BUSY_INTERRUPT 188 /* BC Context Busy Interrupt */
++#define GFX_9_0__SRCID__CP_ME_WAIT_REG_MEM_POLL_TIMEOUT 192 /* C0 CP.ME Wait_Reg_Mem Poll Timeout */
++#define GFX_9_0__SRCID__CP_SIG_INCOMPLETE 193 /* C1 "Surface Probe Fault Signal Incomplete" */
++#define GFX_9_0__SRCID__CP_PREEMPT_ACK 194 /* C2 Preemption Ack-wledge */
++#define GFX_9_0__SRCID__CP_GPF 195 /* C3 General Protection Fault (GPF) */
++#define GFX_9_0__SRCID__CP_GDS_ALLOC_ERROR 196 /* C4 GDS Alloc Error */
++#define GFX_9_0__SRCID__CP_ECC_ERROR 197 /* C5 ECC Error */
++#define GFX_9_0__SRCID__CP_COMPUTE_QUERY_STATUS 199 /* C7 Compute query status */
++#define GFX_9_0__SRCID__CP_VM_DOORBELL 200 /* C8 Unattached VM Doorbell Received */
++#define GFX_9_0__SRCID__CP_FUE_ERROR 201 /* C9 ECC FUE Error */
++#define GFX_9_0__SRCID__RLC_STRM_PERF_MONITOR_INTERRUPT 202 /* CA Streaming Perf Monitor Interrupt */
++#define GFX_9_0__SRCID__GRBM_RD_TIMEOUT_ERROR 232 /* E8 CRead timeout error */
++#define GFX_9_0__SRCID__GRBM_REG_GUI_IDLE 233 /* E9 Register GUI Idle */
++#define GFX_9_0__SRCID__SQ_INTERRUPT_ID 239 /* EF SQ Interrupt (ttrace wrap, errors) */
++
++#endif /* __IRQSRCS_GFX_9_0_H__ */
+diff --git a/drivers/gpu/drm/amd/include/ivsrcid/sdma0/irqsrcs_sdma0_4_0.h b/drivers/gpu/drm/amd/include/ivsrcid/sdma0/irqsrcs_sdma0_4_0.h
+new file mode 100644
+index 0000000..8024138
+--- /dev/null
++++ b/drivers/gpu/drm/amd/include/ivsrcid/sdma0/irqsrcs_sdma0_4_0.h
+@@ -0,0 +1,50 @@
++/*
++ * Copyright 2017 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef __IRQSRCS_SDMA0_4_0_H__
++#define __IRQSRCS_SDMA0_4_0_H__
++
++#define SDMA0_4_0__SRCID__SDMA_ATOMIC_RTN_DONE 217 /* 0xD9 SDMA atomic*_rtn ops complete */
++#define SDMA0_4_0__SRCID__SDMA_ATOMIC_TIMEOUT 218 /* 0xDA SDMA atomic CMPSWAP loop timeout */
++#define SDMA0_4_0__SRCID__SDMA_IB_PREEMPT 219 /* 0xDB sdma mid-command buffer preempt interrupt */
++#define SDMA0_4_0__SRCID__SDMA_ECC 220 /* 0xDC ECC Error */
++#define SDMA0_4_0__SRCID__SDMA_PAGE_FAULT 221 /* 0xDD Page Fault Error from UTCL2 when nack=3 */
++#define SDMA0_4_0__SRCID__SDMA_PAGE_NULL 222 /* 0xDE Page Null from UTCL2 when nack=2 */
++#define SDMA0_4_0__SRCID__SDMA_XNACK 223 /* 0xDF Page retry timeout after UTCL2 return nack=1 */
++#define SDMA0_4_0__SRCID__SDMA_TRAP 224 /* 0xE0 Trap */
++#define SDMA0_4_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT 225 /* 0xE1 0xDAGPF (Sem incomplete timeout) */
++#define SDMA0_4_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT 226 /* 0xE2 Semaphore wait fail timeout */
++#define SDMA0_4_0__SRCID__SDMA_SRAM_ECC 228 /* 0xE4 SRAM ECC Error */
++#define SDMA0_4_0__SRCID__SDMA_PREEMPT 240 /* 0xF0 SDMA New Run List */
++#define SDMA0_4_0__SRCID__SDMA_VM_HOLE 242 /* 0xF2 MC or SEM address in VM hole */
++#define SDMA0_4_0__SRCID__SDMA_CTXEMPTY 243 /* 0xF3 Context Empty */
++#define SDMA0_4_0__SRCID__SDMA_DOORBELL_INVALID 244 /* 0xF4 Doorbell BE invalid */
++#define SDMA0_4_0__SRCID__SDMA_FROZEN 245 /* 0xF5 SDMA Frozen */
++#define SDMA0_4_0__SRCID__SDMA_POLL_TIMEOUT 246 /* 0xF6 SRBM read poll timeout */
++#define SDMA0_4_0__SRCID__SDMA_SRBMWRITE 247 /* 0xF7 SRBM write Protection */
++
++#endif /* __IRQSRCS_SDMA_4_0_H__ */
++
++
+diff --git a/drivers/gpu/drm/amd/include/ivsrcid/sdma1/irqsrcs_sdma1_4_0.h b/drivers/gpu/drm/amd/include/ivsrcid/sdma1/irqsrcs_sdma1_4_0.h
+new file mode 100644
+index 0000000..d12a356
+--- /dev/null
++++ b/drivers/gpu/drm/amd/include/ivsrcid/sdma1/irqsrcs_sdma1_4_0.h
+@@ -0,0 +1,50 @@
++/*
++ * Copyright 2017 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef __IRQSRCS_SDMA1_4_0_H__
++#define __IRQSRCS_SDMA1_4_0_H__
++
++#define SDMA1_4_0__SRCID__SDMA_ATOMIC_RTN_DONE 217 /* 0xD9 SDMA atomic*_rtn ops complete */
++#define SDMA1_4_0__SRCID__SDMA_ATOMIC_TIMEOUT 218 /* 0xDA SDMA atomic CMPSWAP loop timeout */
++#define SDMA1_4_0__SRCID__SDMA_IB_PREEMPT 219 /* 0xDB sdma mid-command buffer preempt interrupt */
++#define SDMA1_4_0__SRCID__SDMA_ECC 220 /* 0xDC ECC Error */
++#define SDMA1_4_0__SRCID__SDMA_PAGE_FAULT 221 /* 0xDD Page Fault Error from UTCL2 when nack=3 */
++#define SDMA1_4_0__SRCID__SDMA_PAGE_NULL 222 /* 0xDE Page Null from UTCL2 when nack=2 */
++#define SDMA1_4_0__SRCID__SDMA_XNACK 223 /* 0xDF Page retry timeout after UTCL2 return nack=1 */
++#define SDMA1_4_0__SRCID__SDMA_TRAP 224 /* 0xE0 Trap */
++#define SDMA1_4_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT 225 /* 0xE1 0xDAGPF (Sem incomplete timeout) */
++#define SDMA1_4_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT 226 /* 0xE2 Semaphore wait fail timeout */
++#define SDMA1_4_0__SRCID__SDMA_SRAM_ECC 228 /* 0xE4 SRAM ECC Error */
++#define SDMA1_4_0__SRCID__SDMA_PREEMPT 240 /* 0xF0 SDMA New Run List */
++#define SDMA1_4_0__SRCID__SDMA_VM_HOLE 242 /* 0xF2 MC or SEM address in VM hole */
++#define SDMA1_4_0__SRCID__SDMA_CTXEMPTY 243 /* 0xF3 Context Empty */
++#define SDMA1_4_0__SRCID__SDMA_DOORBELL_INVALID 244 /* 0xF4 Doorbell BE invalid */
++#define SDMA1_4_0__SRCID__SDMA_FROZEN 245 /* 0xF5 SDMA Frozen */
++#define SDMA1_4_0__SRCID__SDMA_POLL_TIMEOUT 246 /* 0xF6 SRBM read poll timeout */
++#define SDMA1_4_0__SRCID__SDMA_SRBMWRITE 247 /* 0xF7 SRBM write Protection */
++
++#endif /* __IRQSRCS_SDMA1_4_0_H__ */
++
++
+diff --git a/drivers/gpu/drm/amd/include/ivsrcid/smuio/irqsrcs_smuio_9_0.h b/drivers/gpu/drm/amd/include/ivsrcid/smuio/irqsrcs_smuio_9_0.h
+new file mode 100644
+index 0000000..02bab46
+--- /dev/null
++++ b/drivers/gpu/drm/amd/include/ivsrcid/smuio/irqsrcs_smuio_9_0.h
+@@ -0,0 +1,32 @@
++/*
++ * Copyright 2017 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef __IRQSRCS_SMUIO_9_0_H__
++#define __IRQSRCS_SMUIO_9_0_H__
++
++#define SMUIO_9_0__SRCID__SMUIO_GPIO19 83 /* GPIO19 interrupt */
++
++#endif /* __IRQSRCS_SMUIO_9_0_H__ */
++
+diff --git a/drivers/gpu/drm/amd/include/ivsrcid/thm/irqsrcs_thm_9_0.h b/drivers/gpu/drm/amd/include/ivsrcid/thm/irqsrcs_thm_9_0.h
+new file mode 100644
+index 0000000..5218bc5
+--- /dev/null
++++ b/drivers/gpu/drm/amd/include/ivsrcid/thm/irqsrcs_thm_9_0.h
+@@ -0,0 +1,33 @@
++/*
++ * Copyright 2017 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef __IRQSRCS_THM_9_0_H__
++#define __IRQSRCS_THM_9_0_H__
++
++#define THM_9_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
++#define THM_9_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
++
++#endif /* __IRQSRCS_THM_9_0_H__ */
++
+diff --git a/drivers/gpu/drm/amd/include/ivsrcid/uvd/irqsrcs_uvd_7_0.h b/drivers/gpu/drm/amd/include/ivsrcid/uvd/irqsrcs_uvd_7_0.h
+new file mode 100644
+index 0000000..fb041ae
+--- /dev/null
++++ b/drivers/gpu/drm/amd/include/ivsrcid/uvd/irqsrcs_uvd_7_0.h
+@@ -0,0 +1,34 @@
++/*
++ * Copyright 2017 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef __IRQSRCS_UVD_7_0_H__
++#define __IRQSRCS_UVD_7_0_H__
++
++#define UVD_7_0__SRCID__UVD_ENC_GEN_PURP 119
++#define UVD_7_0__SRCID__UVD_ENC_LOW_LATENCY 120
++#define UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT 124 /* UVD system message interrupt */
++
++#endif /* __IRQSRCS_UVD_7_0_H__ */
++
+diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vce/irqsrcs_vce_4_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vce/irqsrcs_vce_4_0.h
+new file mode 100644
+index 0000000..3440bab
+--- /dev/null
++++ b/drivers/gpu/drm/amd/include/ivsrcid/vce/irqsrcs_vce_4_0.h
+@@ -0,0 +1,34 @@
++/*
++ * Copyright 2017 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef __IRQSRCS_VCE_4_0_H__
++#define __IRQSRCS_VCE_4_0_H__
++
++#define VCE_4_0__CTXID__VCE_TRAP_GENERAL_PURPOSE 0
++#define VCE_4_0__CTXID__VCE_TRAP_LOW_LATENCY 1
++#define VCE_4_0__CTXID__VCE_TRAP_REAL_TIME 2
++
++#endif /* __IRQSRCS_VCE_4_0_H__ */
++
+diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_1_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_1_0.h
+new file mode 100644
+index 0000000..e595170
+--- /dev/null
++++ b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_1_0.h
+@@ -0,0 +1,34 @@
++/*
++ * Copyright 2017 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef __IRQSRCS_VCN_1_0_H__
++#define __IRQSRCS_VCN_1_0_H__
++
++#define VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE 119 /* 0x77 Encoder General Purpose */
++#define VCN_1_0__SRCID__UVD_ENC_LOW_LATENCY 120 /* 0x78 Encoder Low Latency */
++#define VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT 124 /* 0x7c UVD system message interrupt */
++
++#endif /* __IRQSRCS_VCN_1_0_H__ */
++
+diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vmc/irqsrcs_vmc_1_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vmc/irqsrcs_vmc_1_0.h
+new file mode 100644
+index 0000000..d130936
+--- /dev/null
++++ b/drivers/gpu/drm/amd/include/ivsrcid/vmc/irqsrcs_vmc_1_0.h
+@@ -0,0 +1,37 @@
++/*
++ * Copyright 2017 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef __IRQSRCS_VMC_1_0_H__
++#define __IRQSRCS_VMC_1_0_H__
++
++
++#define VMC_1_0__SRCID__VM_FAULT 0
++#define VMC_1_0__SRCID__VM_CONTEXT0_ALL 256
++#define VMC_1_0__SRCID__VM_CONTEXT1_ALL 257
++
++#define UTCL2_1_0__SRCID__FAULT 0 /* UTC L2 has encountered a fault or retry scenario */
++
++
++#endif /* __IRQSRCS_VMC_1_0_H__ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4886-drm-amd-Use-newly-added-interrupt-source-defs-for-SO.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4886-drm-amd-Use-newly-added-interrupt-source-defs-for-SO.patch
new file mode 100644
index 00000000..f8195b3e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4886-drm-amd-Use-newly-added-interrupt-source-defs-for-SO.patch
@@ -0,0 +1,226 @@
+From 2cc1f3ad95b6563f7c090bc9af8dbeb8b568cf59 Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Fri, 25 May 2018 10:45:34 -0400
+Subject: [PATCH 4886/5725] drm/amd: Use newly added interrupt source defs for
+ SOC15.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 10 ++++++----
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 6 ++++--
+ drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 7 +++++--
+ drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 5 +++--
+ drivers/gpu/drm/amd/amdgpu/vce_v4_0.c | 2 ++
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 6 ++++--
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c | 8 +++++---
+ 7 files changed, 29 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index e221eb0..39b229e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -38,6 +38,8 @@
+ #include "clearstate_gfx9.h"
+ #include "v9_structs.h"
+
++#include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
++
+ #define GFX9_NUM_GFX_RINGS 1
+ #define GFX9_MEC_HPD_SIZE 2048
+ #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
+@@ -1500,23 +1502,23 @@ static int gfx_v9_0_sw_init(void *handle)
+ adev->gfx.mec.num_queue_per_pipe = 8;
+
+ /* KIQ event */
+- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq);
++ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_IB2_INTERRUPT_PKT, &adev->gfx.kiq.irq);
+ if (r)
+ return r;
+
+ /* EOP Event */
+- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq);
++ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
+ if (r)
+ return r;
+
+ /* Privileged reg */
+- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 184,
++ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
+ &adev->gfx.priv_reg_irq);
+ if (r)
+ return r;
+
+ /* Privileged inst */
+- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 185,
++ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
+ &adev->gfx.priv_inst_irq);
+ if (r)
+ return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 95b0587..5255647 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -42,6 +42,8 @@
+ #include "gfxhub_v1_0.h"
+ #include "mmhub_v1_0.h"
+
++#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
++
+ /* add these here since we already include dce12 headers and these are for DCN */
+ #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
+ #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+@@ -877,9 +879,9 @@ static int gmc_v9_0_sw_init(void *handle)
+ }
+
+ /* This interrupt is VMC page fault.*/
+- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, 0,
++ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
+ &adev->gmc.vm_fault);
+- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, 0,
++ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
+ &adev->gmc.vm_fault);
+
+ if (r)
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index 55d2c17..d102712 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -38,6 +38,9 @@
+ #include "soc15.h"
+ #include "vega10_sdma_pkt_open.h"
+
++#include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h"
++#include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"
++
+ MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
+ MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
+ MODULE_FIRMWARE("amdgpu/vega12_sdma.bin");
+@@ -1229,13 +1232,13 @@ static int sdma_v4_0_sw_init(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* SDMA trap event */
+- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, 224,
++ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, SDMA0_4_0__SRCID__SDMA_TRAP,
+ &adev->sdma.trap_irq);
+ if (r)
+ return r;
+
+ /* SDMA trap event */
+- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, 224,
++ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, SDMA1_4_0__SRCID__SDMA_TRAP,
+ &adev->sdma.trap_irq);
+ if (r)
+ return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+index 78b5111..07f5ccb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+@@ -39,6 +39,7 @@
+ #include "hdp/hdp_4_0_offset.h"
+ #include "mmhub/mmhub_1_0_offset.h"
+ #include "mmhub/mmhub_1_0_sh_mask.h"
++#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
+
+ #define UVD7_MAX_HW_INSTANCES_VEGA20 2
+
+@@ -402,13 +403,13 @@ static int uvd_v7_0_sw_init(void *handle)
+
+ for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+ /* UVD TRAP */
+- r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], 124, &adev->uvd.inst[j].irq);
++ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
+ if (r)
+ return r;
+
+ /* UVD ENC TRAP */
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+- r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + 119, &adev->uvd.inst[j].irq);
++ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
+ if (r)
+ return r;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+index 575bf97..65f8860 100755
+--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+@@ -39,6 +39,8 @@
+ #include "mmhub/mmhub_1_0_offset.h"
+ #include "mmhub/mmhub_1_0_sh_mask.h"
+
++#include "ivsrcid/vce/irqsrcs_vce_4_0.h"
++
+ #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
+
+ #define VCE_V4_0_FW_SIZE (384 * 1024)
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 7bca18b..90103b0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -35,6 +35,8 @@
+ #include "mmhub/mmhub_9_1_offset.h"
+ #include "mmhub/mmhub_9_1_sh_mask.h"
+
++#include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
++
+ static int vcn_v1_0_stop(struct amdgpu_device *adev);
+ static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
+ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
+@@ -77,13 +79,13 @@ static int vcn_v1_0_sw_init(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* VCN DEC TRAP */
+- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 124, &adev->vcn.irq);
++ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.irq);
+ if (r)
+ return r;
+
+ /* VCN ENC TRAP */
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + 119,
++ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
+ &adev->vcn.irq);
+ if (r)
+ return r;
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+index 3effb55..8eea49e 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+@@ -25,6 +25,8 @@
+ #include "ppatomctrl.h"
+ #include "ppsmc.h"
+ #include "atom.h"
++#include "ivsrcid/thm/irqsrcs_thm_9_0.h"
++#include "ivsrcid/smuio/irqsrcs_smuio_9_0.h"
+
+ uint8_t convert_to_vid(uint16_t vddc)
+ {
+@@ -594,17 +596,17 @@ int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr)
+
+ amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
+ SOC15_IH_CLIENTID_THM,
+- 0,
++ THM_9_0__SRCID__THM_DIG_THERM_L2H,
+ source);
+ amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
+ SOC15_IH_CLIENTID_THM,
+- 1,
++ THM_9_0__SRCID__THM_DIG_THERM_H2L,
+ source);
+
+ /* Register CTF(GPIO_19) interrupt */
+ amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
+ SOC15_IH_CLIENTID_ROM_SMUIO,
+- 83,
++ SMUIO_9_0__SRCID__SMUIO_GPIO19,
+ source);
+
+ return 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4887-drm-amdgpu-fix-TTM-move-entity-init-order.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4887-drm-amdgpu-fix-TTM-move-entity-init-order.patch
new file mode 100644
index 00000000..4b758dfa
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4887-drm-amdgpu-fix-TTM-move-entity-init-order.patch
@@ -0,0 +1,101 @@
+From 21c33e6a7668dda24f6f7162ba98cb6380725e53 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Thu, 12 Jul 2018 14:31:25 +0200
+Subject: [PATCH 4887/5725] drm/amdgpu: fix TTM move entity init order
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+We are initializing the entity before the scheduler is actually
+initialized.
+
+This can lead to all kind of problem, but especially NULL pointer deref
+because of Nayan's scheduler work.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 37 +++++++++++++++++++--------------
+ 1 file changed, 21 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index d0c95f2..529ddda 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -104,8 +104,6 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
+ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
+ {
+ struct drm_global_reference *global_ref;
+- struct amdgpu_ring *ring;
+- struct drm_sched_rq *rq;
+ int r;
+
+ /* ensure reference is false in case init fails */
+@@ -138,21 +136,10 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
+
+ mutex_init(&adev->mman.gtt_window_lock);
+
+- ring = adev->mman.buffer_funcs_ring;
+- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+- r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
+- rq, NULL);
+- if (r) {
+- DRM_ERROR("Failed setting up TTM BO move run queue.\n");
+- goto error_entity;
+- }
+-
+ adev->mman.mem_global_referenced = true;
+
+ return 0;
+
+-error_entity:
+- drm_global_item_unref(&adev->mman.bo_global_ref.ref);
+ error_bo:
+ drm_global_item_unref(&adev->mman.mem_global_ref);
+ error_mem:
+@@ -162,8 +149,6 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
+ static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
+ {
+ if (adev->mman.mem_global_referenced) {
+- drm_sched_entity_fini(adev->mman.entity.sched,
+- &adev->mman.entity);
+ mutex_destroy(&adev->mman.gtt_window_lock);
+ drm_global_item_unref(&adev->mman.bo_global_ref.ref);
+ drm_global_item_unref(&adev->mman.mem_global_ref);
+@@ -2118,10 +2103,30 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
+ {
+ struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
+ uint64_t size;
++ int r;
+
+- if (!adev->mman.initialized || adev->in_gpu_reset)
++ if (!adev->mman.initialized || adev->in_gpu_reset ||
++ adev->mman.buffer_funcs_enabled == enable)
+ return;
+
++ if (enable) {
++ struct amdgpu_ring *ring;
++ struct drm_sched_rq *rq;
++
++ ring = adev->mman.buffer_funcs_ring;
++ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
++ r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
++ rq, NULL);
++ if (r) {
++ DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
++ r);
++ return;
++ }
++ } else {
++ drm_sched_entity_fini(adev->mman.entity.sched,
++ &adev->mman.entity);
++ }
++
+ /* this just adjusts TTM size idea, which sets lpfn to the correct value */
+ if (enable)
+ size = adev->gmc.real_vram_size;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4888-drm-amdgpu-Keep-track-of-amount-of-pinned-CPU-visibl.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4888-drm-amdgpu-Keep-track-of-amount-of-pinned-CPU-visibl.patch
new file mode 100644
index 00000000..2d45a74a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4888-drm-amdgpu-Keep-track-of-amount-of-pinned-CPU-visibl.patch
@@ -0,0 +1,150 @@
+From 5d6f5f709a8ff95826eb0d023af0c6151230e6e5 Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Wed, 9 Jan 2019 20:25:53 +0530
+Subject: [PATCH 4888/5725] drm/amdgpu: Keep track of amount of pinned CPU
+ visible VRAM
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Instead of CPU invisible VRAM. Preparation for the following, no
+functional change intended.
+
+v2:
+* Also change amdgpu_vram_mgr_bo_invisible_size to
+ amdgpu_vram_mgr_bo_visible_size, allowing further simplification
+ (Christian König)
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 5 ++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 20 ++++++++------------
+ 5 files changed, 14 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 80b88c3..e056008 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1652,7 +1652,7 @@ struct amdgpu_device {
+
+ /* tracking pinned memory */
+ u64 vram_pin_size;
+- u64 invisible_pin_size;
++ u64 visible_pin_size;
+ u64 gart_pin_size;
+
+ /* amdkfd interface */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index a7a0be9..02a5ef7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -518,7 +518,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ vram_gtt.vram_size = adev->gmc.real_vram_size;
+ vram_gtt.vram_size -= adev->vram_pin_size;
+ vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size;
+- vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
++ vram_gtt.vram_cpu_accessible_size -= adev->visible_pin_size;
+ vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
+ vram_gtt.gtt_size *= PAGE_SIZE;
+ vram_gtt.gtt_size -= adev->gart_pin_size;
+@@ -539,8 +539,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ mem.cpu_accessible_vram.total_heap_size =
+ adev->gmc.visible_vram_size;
+ mem.cpu_accessible_vram.usable_heap_size =
+- adev->gmc.visible_vram_size -
+- (adev->vram_pin_size - adev->invisible_pin_size);
++ adev->gmc.visible_vram_size - adev->visible_pin_size;
+ mem.cpu_accessible_vram.heap_usage =
+ amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
+ mem.cpu_accessible_vram.max_allocation =
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 7b9da44..51f08a8 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -957,7 +957,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
+ domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ adev->vram_pin_size += amdgpu_bo_size(bo);
+- adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo);
++ adev->visible_pin_size += amdgpu_vram_mgr_bo_visible_size(bo);
+ } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
+ adev->gart_pin_size += amdgpu_bo_size(bo);
+ }
+@@ -1009,7 +1009,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
+
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+ adev->vram_pin_size -= amdgpu_bo_size(bo);
+- adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo);
++ adev->visible_pin_size -= amdgpu_vram_mgr_bo_visible_size(bo);
+ } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+ adev->gart_pin_size -= amdgpu_bo_size(bo);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+index 42136da..66251b6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+@@ -77,7 +77,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
+ uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
+ int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
+
+-u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo);
++u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
+ uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
+ uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+index e8790ea..f949efb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+@@ -98,33 +98,29 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
+
+
+ /**
+- * amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size
++ * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size
+ *
+ * @bo: &amdgpu_bo buffer object (must be in VRAM)
+ *
+ * Returns:
+- * How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM.
++ * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM.
+ */
+-u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
++u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
+ {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_mem_reg *mem = &bo->tbo.mem;
+ struct drm_mm_node *nodes = mem->mm_node;
+ unsigned pages = mem->num_pages;
+- u64 usage = 0;
++ u64 usage;
+
+ if (amdgpu_gmc_vram_full_visible(&adev->gmc))
+- return 0;
++ return amdgpu_bo_size(bo);
+
+ if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
+- return amdgpu_bo_size(bo);
++ return 0;
+
+- while (nodes && pages) {
+- usage += nodes->size << PAGE_SHIFT;
+- usage -= amdgpu_vram_mgr_vis_size(adev, nodes);
+- pages -= nodes->size;
+- ++nodes;
+- }
++ for (usage = 0; nodes && pages; pages -= nodes->size, nodes++)
++ usage += amdgpu_vram_mgr_vis_size(adev, nodes);
+
+ return usage;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4889-drm-amdgpu-Make-pin_size-values-atomic.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4889-drm-amdgpu-Make-pin_size-values-atomic.patch
new file mode 100644
index 00000000..2dca34f6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4889-drm-amdgpu-Make-pin_size-values-atomic.patch
@@ -0,0 +1,181 @@
+From 7647b85138330b2e7250f92cd97da55803bd84b5 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
+Date: Wed, 11 Jul 2018 12:00:40 +0200
+Subject: [PATCH 4889/5725] drm/amdgpu: Make pin_size values atomic
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Concurrent execution of the non-atomic arithmetic could result in
+completely bogus values.
+
+v2:
+* Rebased on v2 of the previous patch
+
+Cc: stable@vger.kernel.org
+Bugzilla: https://bugs.freedesktop.org/106872
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 +++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 22 +++++++++++-----------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 14 ++++++++------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 6 +++---
+ 5 files changed, 26 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index e056008..2fa7976 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1651,9 +1651,9 @@ struct amdgpu_device {
+ DECLARE_HASHTABLE(mn_hash, 7);
+
+ /* tracking pinned memory */
+- u64 vram_pin_size;
+- u64 visible_pin_size;
+- u64 gart_pin_size;
++ atomic64_t vram_pin_size;
++ atomic64_t visible_pin_size;
++ atomic64_t gart_pin_size;
+
+ /* amdkfd interface */
+ struct kfd_dev *kfd;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 62182e9..a4aaf37 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -262,7 +262,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
+ return;
+ }
+
+- total_vram = adev->gmc.real_vram_size - adev->vram_pin_size;
++ total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
+ used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
+ free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index 02a5ef7..4cd3317 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -515,13 +515,13 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ case AMDGPU_INFO_VRAM_GTT: {
+ struct drm_amdgpu_info_vram_gtt vram_gtt;
+
+- vram_gtt.vram_size = adev->gmc.real_vram_size;
+- vram_gtt.vram_size -= adev->vram_pin_size;
+- vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size;
+- vram_gtt.vram_cpu_accessible_size -= adev->visible_pin_size;
++ vram_gtt.vram_size = adev->gmc.real_vram_size -
++ atomic64_read(&adev->vram_pin_size);
++ vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size -
++ atomic64_read(&adev->visible_pin_size);
+ vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
+ vram_gtt.gtt_size *= PAGE_SIZE;
+- vram_gtt.gtt_size -= adev->gart_pin_size;
++ vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
+ return copy_to_user(out, &vram_gtt,
+ min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
+ }
+@@ -530,16 +530,16 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+
+ memset(&mem, 0, sizeof(mem));
+ mem.vram.total_heap_size = adev->gmc.real_vram_size;
+- mem.vram.usable_heap_size =
+- adev->gmc.real_vram_size - adev->vram_pin_size;
++ mem.vram.usable_heap_size = adev->gmc.real_vram_size -
++ atomic64_read(&adev->vram_pin_size);
+ mem.vram.heap_usage =
+ amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
+ mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
+
+ mem.cpu_accessible_vram.total_heap_size =
+ adev->gmc.visible_vram_size;
+- mem.cpu_accessible_vram.usable_heap_size =
+- adev->gmc.visible_vram_size - adev->visible_pin_size;
++ mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size -
++ atomic64_read(&adev->visible_pin_size);
+ mem.cpu_accessible_vram.heap_usage =
+ amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
+ mem.cpu_accessible_vram.max_allocation =
+@@ -547,8 +547,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+
+ mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
+ mem.gtt.total_heap_size *= PAGE_SIZE;
+- mem.gtt.usable_heap_size = mem.gtt.total_heap_size
+- - adev->gart_pin_size;
++ mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
++ atomic64_read(&adev->gart_pin_size);
+ mem.gtt.heap_usage =
+ amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
+ mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 51f08a8..b483732 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -956,10 +956,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
+
+ domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+- adev->vram_pin_size += amdgpu_bo_size(bo);
+- adev->visible_pin_size += amdgpu_vram_mgr_bo_visible_size(bo);
++ atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
++ atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
++ &adev->visible_pin_size);
+ } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
+- adev->gart_pin_size += amdgpu_bo_size(bo);
++ atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
+ }
+
+ error:
+@@ -1008,10 +1009,11 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
+ return 0;
+
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+- adev->vram_pin_size -= amdgpu_bo_size(bo);
+- adev->visible_pin_size -= amdgpu_vram_mgr_bo_visible_size(bo);
++ atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
++ atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
++ &adev->visible_pin_size);
+ } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+- adev->gart_pin_size -= amdgpu_bo_size(bo);
++ atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
+ }
+
+ for (i = 0; i < bo->placement.num_placement; i++) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 529ddda..cf7f380 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -1666,7 +1666,7 @@ static int amdgpu_direct_gma_init(struct amdgpu_device *adev)
+ adev->direct_gma.dgma_bo = abo;
+
+ /* reserve in gtt */
+- adev->gart_pin_size += size;
++ atomic64_add(size,&adev->gart_pin_size);
+ r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_DGMA, size >> PAGE_SHIFT);
+ if (unlikely(r))
+ goto error_put_node;
+@@ -1682,7 +1682,7 @@ static int amdgpu_direct_gma_init(struct amdgpu_device *adev)
+ ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_DGMA);
+
+ error_put_node:
+- adev->gart_pin_size -= size;
++ atomic64_sub(size,&adev->gart_pin_size);
+
+ error_free:
+ amdgpu_bo_unref(&abo);
+@@ -1710,7 +1710,7 @@ static void amdgpu_direct_gma_fini(struct amdgpu_device *adev)
+ amdgpu_bo_unreserve(adev->direct_gma.dgma_bo);
+ }
+ amdgpu_bo_unref(&adev->direct_gma.dgma_bo);
+- adev->gart_pin_size -= (u64)amdgpu_direct_gma_size << 20;
++ atomic64_sub((u64)amdgpu_direct_gma_size << 20,&adev->gart_pin_size);
+ }
+
+ #ifdef CONFIG_ENABLE_SSG
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4890-drm-amdgpu-Warn-and-update-pin_size-values-when-dest.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4890-drm-amdgpu-Warn-and-update-pin_size-values-when-dest.patch
new file mode 100644
index 00000000..e3da7ea6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4890-drm-amdgpu-Warn-and-update-pin_size-values-when-dest.patch
@@ -0,0 +1,84 @@
+From 398cee6e91a6e4dc849e126ae3582da85ea1bbf7 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
+Date: Wed, 11 Jul 2018 12:42:55 +0200
+Subject: [PATCH 4890/5725] drm/amdgpu: Warn and update pin_size values when
+ destroying a pinned BO
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This shouldn't happen, but if it does, we'll get a backtrace of the
+caller, and update the pin_size values as needed.
+
+v2:
+* Check bo->pin_count instead of placement flags (Christian König)
+
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 33 +++++++++++++++++++++++-------
+ 1 file changed, 26 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index b483732..aecee98 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -63,6 +63,27 @@ static bool amdgpu_need_backup(struct amdgpu_device *adev)
+ return true;
+ }
+
++/**
++ * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting
++ *
++ * @bo: &amdgpu_bo buffer object
++ *
++ * This function is called when a BO stops being pinned, and updates the
++ * &amdgpu_device pin_size values accordingly.
++ */
++static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
++{
++ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
++
++ if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
++ atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
++ atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
++ &adev->visible_pin_size);
++ } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
++ atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
++ }
++}
++
+ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+ {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
+@@ -71,6 +92,10 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+
+ if (bo->tbo.mem.mem_type == AMDGPU_PL_DGMA_IMPORT)
+ kfree(tbo->mem.bus.addr);
++
++ if (WARN_ON_ONCE(bo->pin_count > 0))
++ amdgpu_bo_subtract_pin_size(bo);
++
+ if (bo->kfd_bo)
+ amdgpu_amdkfd_unreserve_system_memory_limit(bo);
+
+@@ -1008,13 +1033,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
+ if (bo->pin_count)
+ return 0;
+
+- if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+- atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
+- atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
+- &adev->visible_pin_size);
+- } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+- atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
+- }
++ amdgpu_bo_subtract_pin_size(bo);
+
+ for (i = 0; i < bo->placement.num_placement; i++) {
+ bo->placements[i].lpfn = 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4891-Revert-drm-amd-display-make-dm_dp_aux_transfer-retur.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4891-Revert-drm-amd-display-make-dm_dp_aux_transfer-retur.patch
new file mode 100644
index 00000000..ff00801f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4891-Revert-drm-amd-display-make-dm_dp_aux_transfer-retur.patch
@@ -0,0 +1,156 @@
+From 2ea2173581cf00e775a13a61b70a4dc2bcb650c8 Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Fri, 6 Jul 2018 10:54:33 -0400
+Subject: [PATCH 4891/5725] Revert "drm/amd/display: make dm_dp_aux_transfer
+ return payload bytes instead of size"
+
+This reverts commit cc195141133ac3e767d930bedd8294ceebf1f10b.
+
+This commit was problematic on other OSes. The real solution is to
+leave all the error checking to DRM and don't do it in DC, which is
+addressed by "Return aux replies directly to DRM" later in this patchset.
+
+v2: Add reason for revert.
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 9 ++++-----
+ drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c | 7 +++----
+ drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c | 15 +++++++++++++--
+ drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c | 1 -
+ drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h | 2 +-
+ 5 files changed, 21 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index e3110d6..d48a37a 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -82,21 +82,20 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
+ enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
+ I2C_MOT_TRUE : I2C_MOT_FALSE;
+ enum ddc_result res;
+- ssize_t read_bytes;
+
+ if (WARN_ON(msg->size > 16))
+ return -E2BIG;
+
+ switch (msg->request & ~DP_AUX_I2C_MOT) {
+ case DP_AUX_NATIVE_READ:
+- read_bytes = dal_ddc_service_read_dpcd_data(
++ res = dal_ddc_service_read_dpcd_data(
+ TO_DM_AUX(aux)->ddc_service,
+ false,
+ I2C_MOT_UNDEF,
+ msg->address,
+ msg->buffer,
+ msg->size);
+- return read_bytes;
++ break;
+ case DP_AUX_NATIVE_WRITE:
+ res = dal_ddc_service_write_dpcd_data(
+ TO_DM_AUX(aux)->ddc_service,
+@@ -107,14 +106,14 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
+ msg->size);
+ break;
+ case DP_AUX_I2C_READ:
+- read_bytes = dal_ddc_service_read_dpcd_data(
++ res = dal_ddc_service_read_dpcd_data(
+ TO_DM_AUX(aux)->ddc_service,
+ true,
+ mot,
+ msg->address,
+ msg->buffer,
+ msg->size);
+- return read_bytes;
++ break;
+ case DP_AUX_I2C_WRITE:
+ res = dal_ddc_service_write_dpcd_data(
+ TO_DM_AUX(aux)->ddc_service,
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+index 49c2fac..d5294798b 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+@@ -629,7 +629,7 @@ bool dal_ddc_service_query_ddc_data(
+ return ret;
+ }
+
+-ssize_t dal_ddc_service_read_dpcd_data(
++enum ddc_result dal_ddc_service_read_dpcd_data(
+ struct ddc_service *ddc,
+ bool i2c,
+ enum i2c_mot_mode mot,
+@@ -660,9 +660,8 @@ ssize_t dal_ddc_service_read_dpcd_data(
+ if (dal_i2caux_submit_aux_command(
+ ddc->ctx->i2caux,
+ ddc->ddc_pin,
+- &command)) {
+- return (ssize_t)command.payloads->length;
+- }
++ &command))
++ return DDC_RESULT_SUCESSFULL;
+
+ return DDC_RESULT_FAILED_OPERATION;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
+index 1d73096..0afd2fa 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
+@@ -128,8 +128,20 @@ static void process_read_reply(
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
++ } else if (ctx->returned_byte < ctx->current_read_length) {
++ ctx->current_read_length -= ctx->returned_byte;
++
++ ctx->offset += ctx->returned_byte;
++
++ ++ctx->invalid_reply_retry_aux_on_ack;
++
++ if (ctx->invalid_reply_retry_aux_on_ack >
++ AUX_INVALID_REPLY_RETRY_COUNTER) {
++ ctx->status =
++ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
++ ctx->operation_succeeded = false;
++ }
+ } else {
+- ctx->current_read_length = ctx->returned_byte;
+ ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
+ ctx->transaction_complete = true;
+ ctx->operation_succeeded = true;
+@@ -290,7 +302,6 @@ static bool read_command(
+ ctx.operation_succeeded);
+ }
+
+- request->payload.length = ctx.reply.length;
+ return ctx.operation_succeeded;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
+index 14dc8c9..9b0bcc6 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
+@@ -254,7 +254,6 @@ bool dal_i2caux_submit_aux_command(
+ break;
+ }
+
+- cmd->payloads->length = request.payload.length;
+ ++index_of_payload;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+index 090b7a8..0bf73b7 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+@@ -102,7 +102,7 @@ bool dal_ddc_service_query_ddc_data(
+ uint8_t *read_buf,
+ uint32_t read_size);
+
+-ssize_t dal_ddc_service_read_dpcd_data(
++enum ddc_result dal_ddc_service_read_dpcd_data(
+ struct ddc_service *ddc,
+ bool i2c,
+ enum i2c_mot_mode mot,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4892-drm-amd-display-Separate-HUBP-surface-size-and-rotat.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4892-drm-amd-display-Separate-HUBP-surface-size-and-rotat.patch
new file mode 100644
index 00000000..a052c7ed
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4892-drm-amd-display-Separate-HUBP-surface-size-and-rotat.patch
@@ -0,0 +1,122 @@
+From 548fa588dbfb81ae01e7573edb130d812bb6cfa5 Mon Sep 17 00:00:00 2001
+From: Eric Bernstein <eric.bernstein@amd.com>
+Date: Fri, 8 Jun 2018 15:01:59 -0400
+Subject: [PATCH 4892/5725] drm/amd/display: Separate HUBP surface size and
+ rotation/mirror programming
+
+Separate HUBP surface size and rotation/mirror programming so that
+HUBP revision without mirror/rotation do not access those register
+fields.
+
+Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 23 +++++++++++++++--------
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 13 ++++++++-----
+ 2 files changed, 23 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+index a281bed..ec8e833 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+@@ -152,16 +152,14 @@ void hubp1_program_tiling(
+ PIPE_ALIGNED, info->gfx9.pipe_aligned);
+ }
+
+-void hubp1_program_size_and_rotation(
++void hubp1_program_size(
+ struct hubp *hubp,
+- enum dc_rotation_angle rotation,
+ enum surface_pixel_format format,
+ const union plane_size *plane_size,
+- struct dc_plane_dcc_param *dcc,
+- bool horizontal_mirror)
++ struct dc_plane_dcc_param *dcc)
+ {
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+- uint32_t pitch, meta_pitch, pitch_c, meta_pitch_c, mirror;
++ uint32_t pitch, meta_pitch, pitch_c, meta_pitch_c;
+
+ /* Program data and meta surface pitch (calculation from addrlib)
+ * 444 or 420 luma
+@@ -192,13 +190,22 @@ void hubp1_program_size_and_rotation(
+ if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ REG_UPDATE_2(DCSURF_SURFACE_PITCH_C,
+ PITCH_C, pitch_c, META_PITCH_C, meta_pitch_c);
++}
++
++void hubp1_program_rotation(
++ struct hubp *hubp,
++ enum dc_rotation_angle rotation,
++ bool horizontal_mirror)
++{
++ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
++ uint32_t mirror;
++
+
+ if (horizontal_mirror)
+ mirror = 1;
+ else
+ mirror = 0;
+
+-
+ /* Program rotation angle and horz mirror - no mirror */
+ if (rotation == ROTATION_ANGLE_0)
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+@@ -481,8 +488,8 @@ void hubp1_program_surface_config(
+ {
+ hubp1_dcc_control(hubp, dcc->enable, dcc->grph.independent_64b_blks);
+ hubp1_program_tiling(hubp, tiling_info, format);
+- hubp1_program_size_and_rotation(
+- hubp, rotation, format, plane_size, dcc, horizontal_mirror);
++ hubp1_program_size(hubp, format, plane_size, dcc);
++ hubp1_program_rotation(hubp, rotation, horizontal_mirror);
+ hubp1_program_pixel_format(hubp, format);
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+index d901d50..f689fea 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+@@ -268,8 +268,6 @@
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, META_PITCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, PITCH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, META_PITCH_C, mask_sh),\
+- HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ROTATION_ANGLE, mask_sh),\
+- HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, H_MIRROR_EN, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_TYPE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_PENDING, mask_sh),\
+@@ -388,6 +386,8 @@
+ #define HUBP_MASK_SH_LIST_DCN10(mask_sh)\
+ HUBP_MASK_SH_LIST_DCN(mask_sh),\
+ HUBP_MASK_SH_LIST_DCN_VM(mask_sh),\
++ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ROTATION_ANGLE, mask_sh),\
++ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, H_MIRROR_EN, mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, DST_Y_PREFETCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, VRATIO_PREFETCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINS_C, VRATIO_PREFETCH_C, mask_sh),\
+@@ -679,12 +679,15 @@ void hubp1_program_pixel_format(
+ struct hubp *hubp,
+ enum surface_pixel_format format);
+
+-void hubp1_program_size_and_rotation(
++void hubp1_program_size(
+ struct hubp *hubp,
+- enum dc_rotation_angle rotation,
+ enum surface_pixel_format format,
+ const union plane_size *plane_size,
+- struct dc_plane_dcc_param *dcc,
++ struct dc_plane_dcc_param *dcc);
++
++void hubp1_program_rotation(
++ struct hubp *hubp,
++ enum dc_rotation_angle rotation,
+ bool horizontal_mirror);
+
+ void hubp1_program_tiling(
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4893-drm-amd-display-Add-avoid_vbios_exec_table-debug-bit.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4893-drm-amd-display-Add-avoid_vbios_exec_table-debug-bit.patch
new file mode 100644
index 00000000..b17d9a63
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4893-drm-amd-display-Add-avoid_vbios_exec_table-debug-bit.patch
@@ -0,0 +1,29 @@
+From 08f3b79d6b583049b75b7fe9faa58ff5595a61da Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Thu, 14 Jun 2018 16:06:10 -0400
+Subject: [PATCH 4893/5725] drm/amd/display: Add avoid_vbios_exec_table debug
+ bit
+
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Yongqiang Sun <yongqiang.sun@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 2af5e60..4192bbd 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -250,6 +250,7 @@ struct dc_debug {
+ bool always_use_regamma;
+ bool p010_mpo_support;
+ bool recovery_enabled;
++ bool avoid_vbios_exec_table;
+
+ };
+ struct dc_state;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4894-drm-amd-display-support-access-ddc-for-mst-branch.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4894-drm-amd-display-support-access-ddc-for-mst-branch.patch
new file mode 100644
index 00000000..e24246cc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4894-drm-amd-display-support-access-ddc-for-mst-branch.patch
@@ -0,0 +1,40 @@
+From 07cb2ad65e7c2bc5cb69c0ae21d3cfce96e247ab Mon Sep 17 00:00:00 2001
+From: Eric Yang <Eric.Yang2@amd.com>
+Date: Tue, 12 Jun 2018 18:37:12 -0400
+Subject: [PATCH 4894/5725] drm/amd/display: support access ddc for mst branch
+
+[Why]
+Megachip dockings accesses ddc line through display driver when
+installing FW. Previously, we would fail every transaction because
+link attached to mst branch did not have their ddc transaction type
+set.
+
+[How]
+Set ddc transaction type when mst branch is connected.
+
+Signed-off-by: Eric Yang <Eric.Yang2@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index e88dc58..9d5ccb6 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -498,6 +498,10 @@ static bool detect_dp(
+ sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
+ link->type = dc_connection_mst_branch;
+
++ dal_ddc_service_set_transaction_type(
++ link->ddc,
++ sink_caps->transaction_type);
++
+ /*
+ * This call will initiate MST topology discovery. Which
+ * will detect MST ports and add new DRM connector DRM
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4895-drm-amd-display-Implement-cursor-multiplier.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4895-drm-amd-display-Implement-cursor-multiplier.patch
new file mode 100644
index 00000000..ef7b4afb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4895-drm-amd-display-Implement-cursor-multiplier.patch
@@ -0,0 +1,117 @@
+From 4101acd1a4f21dc0f043feadd26ce296c35a3e09 Mon Sep 17 00:00:00 2001
+From: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Date: Thu, 14 Jun 2018 15:08:58 -0400
+Subject: [PATCH 4895/5725] drm/amd/display: Implement cursor multiplier
+
+DCN allows cursor multiplier when blending FP16 surface.
+
+Signed-off-by: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc_hw_types.h | 1 +
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 15 +++++++++++++++
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 12 ++++++++----
+ 3 files changed, 24 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+index f285d37..7117f9f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+@@ -490,6 +490,7 @@ struct dc_cursor_attributes {
+ uint32_t height;
+
+ enum dc_cursor_color_format color_format;
++ uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode
+
+ /* In case we support HW Cursor rotation in the future */
+ enum dc_rotation_angle rotation_angle;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+index ec8e833..9eb60a0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+@@ -27,6 +27,7 @@
+ #include "reg_helper.h"
+ #include "basics/conversion.h"
+ #include "dcn10_hubp.h"
++#include "custom_float.h"
+
+ #define REG(reg)\
+ hubp1->hubp_regs->reg
+@@ -1038,6 +1039,18 @@ void hubp1_cursor_set_attributes(
+ enum cursor_pitch hw_pitch = hubp1_get_cursor_pitch(attr->pitch);
+ enum cursor_lines_per_chunk lpc = hubp1_get_lines_per_chunk(
+ attr->width, attr->color_format);
++ struct fixed31_32 multiplier;
++ uint32_t hw_mult = 0x3c00; // 1.0 default multiplier
++ struct custom_float_format fmt;
++
++ fmt.exponenta_bits = 5;
++ fmt.mantissa_bits = 10;
++ fmt.sign = true;
++
++ if (attr->sdr_white_level > 80) {
++ multiplier = dc_fixpt_from_fraction(attr->sdr_white_level, 80);
++ convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
++ }
+
+ hubp->curs_attr = *attr;
+
+@@ -1060,6 +1073,8 @@ void hubp1_cursor_set_attributes(
+ CURSOR0_DST_Y_OFFSET, 0,
+ /* used to shift the cursor chunk request deadline */
+ CURSOR0_CHUNK_HDL_ADJUST, 3);
++
++ REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, hw_mult);
+ }
+
+ void hubp1_cursor_set_position(
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+index f689fea..9991da5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+@@ -133,7 +133,8 @@
+ SRI(CURSOR_CONTROL, CURSOR, id), \
+ SRI(CURSOR_POSITION, CURSOR, id), \
+ SRI(CURSOR_HOT_SPOT, CURSOR, id), \
+- SRI(CURSOR_DST_OFFSET, CURSOR, id)
++ SRI(CURSOR_DST_OFFSET, CURSOR, id), \
++ SRI(CURSOR0_FP_SCALE_BIAS, CNVC_CUR, id)
+
+ #define HUBP_COMMON_REG_VARIABLE_LIST \
+ uint32_t DCHUBP_CNTL; \
+@@ -241,7 +242,8 @@
+ uint32_t CURSOR_POSITION; \
+ uint32_t CURSOR_HOT_SPOT; \
+ uint32_t CURSOR_DST_OFFSET; \
+- uint32_t HUBP_CLK_CNTL
++ uint32_t HUBP_CLK_CNTL; \
++ uint32_t CURSOR0_FP_SCALE_BIAS
+
+ #define HUBP_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+@@ -424,7 +426,8 @@
+ HUBP_SF(CURSOR0_CURSOR_POSITION, CURSOR_Y_POSITION, mask_sh), \
+ HUBP_SF(CURSOR0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
+ HUBP_SF(CURSOR0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
+- HUBP_SF(CURSOR0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh)
++ HUBP_SF(CURSOR0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh), \
++ HUBP_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, mask_sh)
+
+ #define DCN_HUBP_REG_FIELD_LIST(type) \
+ type HUBP_BLANK_EN;\
+@@ -615,7 +618,8 @@
+ type CURSOR_HOT_SPOT_X; \
+ type CURSOR_HOT_SPOT_Y; \
+ type CURSOR_DST_X_OFFSET; \
+- type OUTPUT_FP
++ type OUTPUT_FP; \
++ type CUR0_FP_SCALE
+
+ struct dcn_mi_registers {
+ HUBP_COMMON_REG_VARIABLE_LIST;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4896-drm-amd-display-Linux-Set-Read-link-rate-and-lane-co.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4896-drm-amd-display-Linux-Set-Read-link-rate-and-lane-co.patch
new file mode 100644
index 00000000..553065d8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4896-drm-amd-display-Linux-Set-Read-link-rate-and-lane-co.patch
@@ -0,0 +1,124 @@
+From f4eb6343eee75da91eca0cc8c6677b9282934a79 Mon Sep 17 00:00:00 2001
+From: Hersen Wu <hersenxs.wu@amd.com>
+Date: Fri, 15 Jun 2018 09:28:34 -0400
+Subject: [PATCH 4896/5725] drm/amd/display: Linux Set/Read link rate and lane
+ count through debugfs
+
+expose dc function to be called by linux dm
+
+Signed-off-by: Hersen Wu <hersenxs.wu@amd.com>
+Reviewed-by: Sun peng Li <Sunpeng.Li@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 65 ++++++++++++++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/dc_link.h | 17 +++++++++
+ 2 files changed, 82 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 4e9bd55..b14741d 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -382,6 +382,71 @@ void dc_stream_set_static_screen_events(struct dc *dc,
+ dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
+ }
+
++void dc_link_set_drive_settings(struct dc *dc,
++ struct link_training_settings *lt_settings,
++ const struct dc_link *link)
++{
++
++ int i;
++
++ for (i = 0; i < dc->link_count; i++) {
++ if (dc->links[i] == link)
++ break;
++ }
++
++ if (i >= dc->link_count)
++ ASSERT_CRITICAL(false);
++
++ dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
++}
++
++void dc_link_perform_link_training(struct dc *dc,
++ struct dc_link_settings *link_setting,
++ bool skip_video_pattern)
++{
++ int i;
++
++ for (i = 0; i < dc->link_count; i++)
++ dc_link_dp_perform_link_training(
++ dc->links[i],
++ link_setting,
++ skip_video_pattern);
++}
++
++void dc_link_set_preferred_link_settings(struct dc *dc,
++ struct dc_link_settings *link_setting,
++ struct dc_link *link)
++{
++ link->preferred_link_setting = *link_setting;
++ dp_retrain_link_dp_test(link, link_setting, false);
++}
++
++void dc_link_enable_hpd(const struct dc_link *link)
++{
++ dc_link_dp_enable_hpd(link);
++}
++
++void dc_link_disable_hpd(const struct dc_link *link)
++{
++ dc_link_dp_disable_hpd(link);
++}
++
++
++void dc_link_set_test_pattern(struct dc_link *link,
++ enum dp_test_pattern test_pattern,
++ const struct link_training_settings *p_link_settings,
++ const unsigned char *p_custom_pattern,
++ unsigned int cust_pattern_size)
++{
++ if (link != NULL)
++ dc_link_dp_set_test_pattern(
++ link,
++ test_pattern,
++ p_link_settings,
++ p_custom_pattern,
++ cust_pattern_size);
++}
++
+ static void destruct(struct dc *dc)
+ {
+ dc_release_state(dc->current_state);
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
+index 9404c6e..db92387 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
+@@ -216,6 +216,23 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type);
+ * DPCD access interfaces
+ */
+
++void dc_link_set_drive_settings(struct dc *dc,
++ struct link_training_settings *lt_settings,
++ const struct dc_link *link);
++void dc_link_perform_link_training(struct dc *dc,
++ struct dc_link_settings *link_setting,
++ bool skip_video_pattern);
++void dc_link_set_preferred_link_settings(struct dc *dc,
++ struct dc_link_settings *link_setting,
++ struct dc_link *link);
++void dc_link_enable_hpd(const struct dc_link *link);
++void dc_link_disable_hpd(const struct dc_link *link);
++void dc_link_set_test_pattern(struct dc_link *link,
++ enum dp_test_pattern test_pattern,
++ const struct link_training_settings *p_link_settings,
++ const unsigned char *p_custom_pattern,
++ unsigned int cust_pattern_size);
++
+ bool dc_submit_i2c(
+ struct dc *dc,
+ uint32_t link_index,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4897-drm-amd-display-Move-common-GPIO-registers-into-a-co.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4897-drm-amd-display-Move-common-GPIO-registers-into-a-co.patch
new file mode 100644
index 00000000..1e389c61
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4897-drm-amd-display-Move-common-GPIO-registers-into-a-co.patch
@@ -0,0 +1,40 @@
+From ed0384b24c0391667d452d12676d500ef1ea2761 Mon Sep 17 00:00:00 2001
+From: Charlene Liu <charlene.liu@amd.com>
+Date: Fri, 15 Jun 2018 12:19:00 -0400
+Subject: [PATCH 4897/5725] drm/amd/display: Move common GPIO registers into a
+ common define
+
+Signed-off-by: Charlene Liu <charlene.liu@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
+index 9c4a56c..bf40725 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
++++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
+@@ -82,13 +82,16 @@
+ DDC_GPIO_I2C_REG_LIST(cd),\
+ .ddc_setup = 0
+
+-#define DDC_MASK_SH_LIST(mask_sh) \
++#define DDC_MASK_SH_LIST_COMMON(mask_sh) \
+ SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE, mask_sh),\
+ SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_ENABLE, mask_sh),\
+ SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_MODE, mask_sh),\
+ SF_DDC(DC_GPIO_DDC1_MASK, DC_GPIO_DDC1DATA_PD_EN, mask_sh),\
+ SF_DDC(DC_GPIO_DDC1_MASK, DC_GPIO_DDC1CLK_PD_EN, mask_sh),\
+- SF_DDC(DC_GPIO_DDC1_MASK, AUX_PAD1_MODE, mask_sh),\
++ SF_DDC(DC_GPIO_DDC1_MASK, AUX_PAD1_MODE, mask_sh)
++
++#define DDC_MASK_SH_LIST(mask_sh) \
++ DDC_MASK_SH_LIST_COMMON(mask_sh),\
+ SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SDA_PD_DIS, mask_sh),\
+ SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SCL_PD_DIS, mask_sh)
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4898-drm-amd-display-fix-bug-where-we-are-creating-bogus-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4898-drm-amd-display-fix-bug-where-we-are-creating-bogus-.patch
new file mode 100644
index 00000000..155882fc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4898-drm-amd-display-fix-bug-where-we-are-creating-bogus-.patch
@@ -0,0 +1,126 @@
+From 37a8f4093a6d92b6b4940e008781c11a8c5025ba Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Fri, 15 Jun 2018 17:53:35 -0400
+Subject: [PATCH 4898/5725] drm/amd/display: fix bug where we are creating
+ bogus i2c aux
+
+[WHY]
+we were using 6 instances based on i2caux_dce110.c
+
+[HOW]
+pass in how many instances to ctor
+
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Yongqiang Sun <yongqiang.sun@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c | 1 +
+ drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c | 6 ++++--
+ drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h | 1 +
+ drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c | 1 +
+ drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c | 1 +
+ drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c | 1 +
+ 6 files changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c
+index e8d3781..8b704ab 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c
+@@ -97,6 +97,7 @@ struct i2caux *dal_i2caux_dce100_create(
+
+ dal_i2caux_dce110_construct(i2caux_dce110,
+ ctx,
++ ARRAY_SIZE(dce100_aux_regs),
+ dce100_aux_regs,
+ dce100_hw_engine_regs,
+ &i2c_shift,
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
+index 2a047f8..e0557d3 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
+@@ -199,6 +199,7 @@ static const struct dce110_i2c_hw_engine_mask i2c_mask = {
+ void dal_i2caux_dce110_construct(
+ struct i2caux_dce110 *i2caux_dce110,
+ struct dc_context *ctx,
++ unsigned int num_i2caux_inst,
+ const struct dce110_aux_registers aux_regs[],
+ const struct dce110_i2c_hw_engine_registers i2c_hw_engine_regs[],
+ const struct dce110_i2c_hw_engine_shift *i2c_shift,
+@@ -251,7 +252,7 @@ void dal_i2caux_dce110_construct(
+ dal_i2c_hw_engine_dce110_create(&hw_arg_dce110);
+
+ ++i;
+- } while (i < ARRAY_SIZE(hw_ddc_lines));
++ } while (i < num_i2caux_inst);
+
+ /* Create AUX engines for all lines which has assisted HW AUX
+ * 'i' (loop counter) used as DDC/AUX engine_id */
+@@ -272,7 +273,7 @@ void dal_i2caux_dce110_construct(
+ dal_aux_engine_dce110_create(&aux_init_data);
+
+ ++i;
+- } while (i < ARRAY_SIZE(hw_aux_lines));
++ } while (i < num_i2caux_inst);
+
+ /*TODO Generic I2C SW and HW*/
+ }
+@@ -303,6 +304,7 @@ struct i2caux *dal_i2caux_dce110_create(
+
+ dal_i2caux_dce110_construct(i2caux_dce110,
+ ctx,
++ ARRAY_SIZE(dce110_aux_regs),
+ dce110_aux_regs,
+ i2c_hw_engine_regs,
+ &i2c_shift,
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h
+index 1b1f71c..d3d8cc5 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h
+@@ -45,6 +45,7 @@ struct i2caux *dal_i2caux_dce110_create(
+ void dal_i2caux_dce110_construct(
+ struct i2caux_dce110 *i2caux_dce110,
+ struct dc_context *ctx,
++ unsigned int num_i2caux_inst,
+ const struct dce110_aux_registers *aux_regs,
+ const struct dce110_i2c_hw_engine_registers *i2c_hw_engine_regs,
+ const struct dce110_i2c_hw_engine_shift *i2c_shift,
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c
+index dafc1a7..a9db047 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c
+@@ -93,6 +93,7 @@ static void construct(
+ {
+ dal_i2caux_dce110_construct(i2caux_dce110,
+ ctx,
++ ARRAY_SIZE(dce112_aux_regs),
+ dce112_aux_regs,
+ dce112_hw_engine_regs,
+ &i2c_shift,
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
+index 0e7b182..6a4f344 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
+@@ -111,6 +111,7 @@ struct i2caux *dal_i2caux_dce120_create(
+
+ dal_i2caux_dce110_construct(i2caux_dce110,
+ ctx,
++ ARRAY_SIZE(dce120_aux_regs),
+ dce120_aux_regs,
+ dce120_hw_engine_regs,
+ &i2c_shift,
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
+index e44a890..a59c1f5 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
+@@ -111,6 +111,7 @@ struct i2caux *dal_i2caux_dcn10_create(
+
+ dal_i2caux_dce110_construct(i2caux_dce110,
+ ctx,
++ ARRAY_SIZE(dcn10_aux_regs),
+ dcn10_aux_regs,
+ dcn10_hw_engine_regs,
+ &i2c_shift,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4899-drm-amd-display-generic-indirect-register-access.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4899-drm-amd-display-generic-indirect-register-access.patch
new file mode 100644
index 00000000..187a1d7b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4899-drm-amd-display-generic-indirect-register-access.patch
@@ -0,0 +1,136 @@
+From b5cce50c1cbbb46ec3b9fb1d87cfefd974f3ed69 Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Sat, 16 Jun 2018 19:43:41 -0400
+Subject: [PATCH 4899/5725] drm/amd/display: generic indirect register access
+
+add generic indirect register access following our register access pattern
+
+this will make it easier to review code and programming sequence,
+with all the complexity hidden in macro
+
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Yongqiang Sun <yongqiang.sun@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc_helper.c | 51 +++++++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/inc/reg_helper.h | 46 ++++++++++++++++++++++
+ 2 files changed, 97 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
+index bd0fda0..e68077e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
++++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
+@@ -255,3 +255,54 @@ uint32_t generic_reg_wait(const struct dc_context *ctx,
+
+ return reg_val;
+ }
++
++void generic_write_indirect_reg(const struct dc_context *ctx,
++ uint32_t addr_index, uint32_t addr_data,
++ uint32_t index, uint32_t data)
++{
++ dm_write_reg(ctx, addr_index, index);
++ dm_write_reg(ctx, addr_data, data);
++}
++
++uint32_t generic_read_indirect_reg(const struct dc_context *ctx,
++ uint32_t addr_index, uint32_t addr_data,
++ uint32_t index)
++{
++ uint32_t value = 0;
++
++ dm_write_reg(ctx, addr_index, index);
++ value = dm_read_reg(ctx, addr_data);
++
++ return value;
++}
++
++
++uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
++ uint32_t addr_index, uint32_t addr_data,
++ uint32_t index, uint32_t reg_val, int n,
++ uint8_t shift1, uint32_t mask1, uint32_t field_value1,
++ ...)
++{
++ uint32_t shift, mask, field_value;
++ int i = 1;
++
++ va_list ap;
++
++ va_start(ap, field_value1);
++
++ reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1);
++
++ while (i < n) {
++ shift = va_arg(ap, uint32_t);
++ mask = va_arg(ap, uint32_t);
++ field_value = va_arg(ap, uint32_t);
++
++ reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift);
++ i++;
++ }
++
++ generic_write_indirect_reg(ctx, addr_index, addr_data, index, reg_val);
++ va_end(ap);
++
++ return reg_val;
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
+index 3306e7b..cf5a84b 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
+@@ -445,4 +445,50 @@ uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
+ uint8_t shift7, uint32_t mask7, uint32_t *field_value7,
+ uint8_t shift8, uint32_t mask8, uint32_t *field_value8);
++
++
++/* indirect register access */
++
++#define IX_REG_SET_N(index_reg_name, data_reg_name, index, n, initial_val, ...) \
++ generic_indirect_reg_update_ex(CTX, \
++ REG(index_reg_name), REG(data_reg_name), IND_REG(index), \
++ initial_val, \
++ n, __VA_ARGS__)
++
++#define IX_REG_SET_2(index_reg_name, data_reg_name, index, init_value, f1, v1, f2, v2) \
++ IX_REG_SET_N(index_reg_name, data_reg_name, index, 2, init_value, \
++ FN(reg, f1), v1,\
++ FN(reg, f2), v2)
++
++
++#define IX_REG_READ(index_reg_name, data_reg_name, index) \
++ generic_read_indirect_reg(CTX, REG(index_reg_name), REG(data_reg_name), IND_REG(index))
++
++
++
++#define IX_REG_UPDATE_N(index_reg_name, data_reg_name, index, n, ...) \
++ generic_indirect_reg_update_ex(CTX, \
++ REG(index_reg_name), REG(data_reg_name), IND_REG(index), \
++ IX_REG_READ(index_reg_name, data_reg_name, index), \
++ n, __VA_ARGS__)
++
++#define IX_REG_UPDATE_2(index_reg_name, data_reg_name, index, f1, v1, f2, v2) \
++ IX_REG_UPDATE_N(index_reg_name, data_reg_name, index, 2,\
++ FN(reg, f1), v1,\
++ FN(reg, f2), v2)
++
++void generic_write_indirect_reg(const struct dc_context *ctx,
++ uint32_t addr_index, uint32_t addr_data,
++ uint32_t index, uint32_t data);
++
++uint32_t generic_read_indirect_reg(const struct dc_context *ctx,
++ uint32_t addr_index, uint32_t addr_data,
++ uint32_t index);
++
++uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
++ uint32_t addr_index, uint32_t addr_data,
++ uint32_t index, uint32_t reg_val, int n,
++ uint8_t shift1, uint32_t mask1, uint32_t field_value1,
++ ...);
++
+ #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4900-drm-amd-display-fix-incorrect-check-for-atom-table-s.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4900-drm-amd-display-fix-incorrect-check-for-atom-table-s.patch
new file mode 100644
index 00000000..5c06c913
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4900-drm-amd-display-fix-incorrect-check-for-atom-table-s.patch
@@ -0,0 +1,32 @@
+From 5613d0609063c4ffc7520f0e6bb2bd272a165a27 Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Sun, 17 Jun 2018 13:26:27 -0400
+Subject: [PATCH 4900/5725] drm/amd/display: fix incorrect check for atom table
+ size
+
+in case we have very few pins in the table, check fails and we can't boot
+
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Yongqiang Sun <yongqiang.sun@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+index aeb56e4..eab007e 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+@@ -678,7 +678,7 @@ static enum bp_result bios_parser_get_gpio_pin_info(
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (sizeof(struct atom_common_table_header) +
+- sizeof(struct atom_gpio_pin_lut_v2_1)
++ sizeof(struct atom_gpio_pin_assignment)
+ > le16_to_cpu(header->table_header.structuresize))
+ return BP_RESULT_BADBIOSTABLE;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4901-drm-amd-display-set-read-link-rate-and-lane-count-th.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4901-drm-amd-display-set-read-link-rate-and-lane-count-th.patch
new file mode 100644
index 00000000..b98a39e9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4901-drm-amd-display-set-read-link-rate-and-lane-count-th.patch
@@ -0,0 +1,323 @@
+From b043cb5f0eda885e7dff7b8f614b74a1609925d6 Mon Sep 17 00:00:00 2001
+From: Hersen Wu <hersenxs.wu@amd.com>
+Date: Fri, 15 Jun 2018 14:25:48 -0400
+Subject: [PATCH 4901/5725] drm/amd/display: set-read link rate and lane count
+ through debugfs
+
+ function description
+ get/ set DP configuration: lane_count, link_rate, spread_spectrum
+
+ valid lane count value: 1, 2, 4
+ valid link rate value:
+ 06h = 1.62Gbps per lane
+ 0Ah = 2.7Gbps per lane
+ 0Ch = 3.24Gbps per lane
+ 14h = 5.4Gbps per lane
+ 1Eh = 8.1Gbps per lane
+
+ debugfs is located at /sys/kernel/debug/dri/0/DP-x/link_settings
+
+ --- to get dp configuration
+
+ xxd -l 300 phy_settings
+
+ It will list current, verified, reported, preferred dp configuration.
+ current -- for current video mode
+ verified --- maximum configuration which pass link training
+ reported --- DP rx report caps (DPCD register offset 0, 1 2)
+ preferred --- user force settings
+
+ --- set (or force) dp configuration
+
+ echo <lane_count> <link_rate>
+
+ for example, to force to 2 lane, 2.7GHz,
+ echo 4 0xa > link_settings
+
+ spread_spectrum could not be changed dynamically.
+
+ in case invalid lane count, link rate are force, no hw programming will be
+ done. please check link settings after force operation to see if HW get
+ programming.
+
+ xxd -l 300 link_settings
+
+ check current and preferred settings.
+
+Signed-off-by: Hersen Wu <hersenxs.wu@amd.com>
+Reviewed-by: Hersen Wu <hersenxs.wu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 227 ++++++++++++++++++---
+ 1 file changed, 196 insertions(+), 31 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+index cf5ea69..9ff8833 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+@@ -26,38 +26,211 @@
+ #include <linux/debugfs.h>
+
+ #include "dc.h"
+-#include "dc_link.h"
+
+ #include "amdgpu.h"
+ #include "amdgpu_dm.h"
+ #include "amdgpu_dm_debugfs.h"
+
+-static ssize_t dp_link_rate_debugfs_read(struct file *f, char __user *buf,
++/* function description
++ * get/ set DP configuration: lane_count, link_rate, spread_spectrum
++ *
++ * valid lane count value: 1, 2, 4
++ * valid link rate value:
++ * 06h = 1.62Gbps per lane
++ * 0Ah = 2.7Gbps per lane
++ * 0Ch = 3.24Gbps per lane
++ * 14h = 5.4Gbps per lane
++ * 1Eh = 8.1Gbps per lane
++ *
++ * debugfs is located at /sys/kernel/debug/dri/0/DP-x/link_settings
++ *
++ * --- to get dp configuration
++ *
++ * xxd -l 300 phy_settings
++ *
++ * It will list current, verified, reported, preferred dp configuration.
++ * current -- for current video mode
++ * verified --- maximum configuration which pass link training
++ * reported --- DP rx report caps (DPCD register offset 0, 1 2)
++ * preferred --- user force settings
++ *
++ * --- set (or force) dp configuration
++ *
++ * echo <lane_count> <link_rate>
++ *
++ * for example, to force to 2 lane, 2.7GHz,
++ * echo 4 0xa > link_settings
++ *
++ * spread_spectrum could not be changed dynamically.
++ *
++ * in case invalid lane count, link rate are force, no hw programming will be
++ * done. please check link settings after force operation to see if HW get
++ * programming.
++ *
++ * xxd -l 300 link_settings
++ *
++ * check current and preferred settings.
++ *
++ */
++static ssize_t dp_link_settings_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+ {
+- /* TODO: create method to read link rate */
+- return 1;
+-}
++ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
++ struct dc_link *link = connector->dc_link;
++ char *rd_buf = NULL;
++ char *rd_buf_ptr = NULL;
++ uint32_t rd_buf_size = 320;
++ int bytes_to_user;
++ uint8_t str_len = 0;
++ int r;
+
+-static ssize_t dp_link_rate_debugfs_write(struct file *f, const char __user *buf,
+- size_t size, loff_t *pos)
+-{
+- /* TODO: create method to write link rate */
+- return 1;
+-}
++ if (size == 0)
++ return 0;
+
+-static ssize_t dp_lane_count_debugfs_read(struct file *f, char __user *buf,
+- size_t size, loff_t *pos)
+-{
+- /* TODO: create method to read lane count */
+- return 1;
++ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
++ if (!rd_buf)
++ return 0;
++
++ rd_buf_ptr = rd_buf;
++
++ str_len = strlen("Current: %d %d %d ");
++ snprintf(rd_buf_ptr, str_len, "Current: %d %d %d ",
++ link->cur_link_settings.lane_count,
++ link->cur_link_settings.link_rate,
++ link->cur_link_settings.link_spread);
++ rd_buf_ptr = rd_buf_ptr + str_len;
++
++ str_len = strlen("Verified: %d %d %d ");
++ snprintf(rd_buf_ptr, str_len, "Verified: %d %d %d ",
++ link->verified_link_cap.lane_count,
++ link->verified_link_cap.link_rate,
++ link->verified_link_cap.link_spread);
++ rd_buf_ptr = rd_buf_ptr + str_len;
++
++ str_len = strlen("Reported: %d %d %d ");
++ snprintf(rd_buf_ptr, str_len, "Reported: %d %d %d ",
++ link->reported_link_cap.lane_count,
++ link->reported_link_cap.link_rate,
++ link->reported_link_cap.link_spread);
++ rd_buf_ptr = rd_buf_ptr + str_len;
++
++ str_len = strlen("Preferred: %d %d %d ");
++ snprintf(rd_buf_ptr, str_len, "Preferred: %d %d %d ",
++ link->preferred_link_setting.lane_count,
++ link->preferred_link_setting.link_rate,
++ link->preferred_link_setting.link_spread);
++
++ r = copy_to_user(buf, rd_buf, rd_buf_size);
++
++ bytes_to_user = rd_buf_size - r;
++
++ if (r > rd_buf_size) {
++ bytes_to_user = 0;
++ DRM_DEBUG_DRIVER("data not copy to user");
++ }
++
++ kfree(rd_buf);
++ return bytes_to_user;
+ }
+
+-static ssize_t dp_lane_count_debugfs_write(struct file *f, const char __user *buf,
++static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+ {
+- /* TODO: create method to write lane count */
+- return 1;
++ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
++ struct dc_link *link = connector->dc_link;
++ struct dc *dc = (struct dc *)link->dc;
++ struct dc_link_settings prefer_link_settings;
++ char *wr_buf = NULL;
++ char *wr_buf_ptr = NULL;
++ uint32_t wr_buf_size = 40;
++ int r;
++ int bytes_from_user;
++ char *sub_str;
++ /* 0: lane_count; 1: link_rate */
++ uint8_t param_index = 0;
++ long param[2];
++ const char delimiter[3] = {' ', '\n', '\0'};
++ bool valid_input = false;
++
++ if (size == 0)
++ return 0;
++
++ wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
++ if (!wr_buf)
++ return 0;
++ wr_buf_ptr = wr_buf;
++
++ r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
++
++ /* r is bytes not be copied */
++ if (r >= wr_buf_size) {
++ kfree(wr_buf);
++ DRM_DEBUG_DRIVER("user data not read\n");
++ return 0;
++ }
++
++ bytes_from_user = wr_buf_size - r;
++
++ while (isspace(*wr_buf_ptr))
++ wr_buf_ptr++;
++
++ while ((*wr_buf_ptr != '\0') && (param_index < 2)) {
++
++ sub_str = strsep(&wr_buf_ptr, delimiter);
++
++ r = kstrtol(sub_str, 16, &param[param_index]);
++
++ if (r)
++ DRM_DEBUG_DRIVER(" -EINVAL convert error happens!\n");
++
++ param_index++;
++ while (isspace(*wr_buf_ptr))
++ wr_buf_ptr++;
++ }
++
++ DRM_DEBUG_DRIVER("Lane_count: %lx\n", param[0]);
++ DRM_DEBUG_DRIVER("link_rate: %lx\n", param[1]);
++
++ switch (param[0]) {
++ case LANE_COUNT_ONE:
++ case LANE_COUNT_TWO:
++ case LANE_COUNT_FOUR:
++ valid_input = true;
++ break;
++ default:
++ break;
++ }
++
++ switch (param[1]) {
++ case LINK_RATE_LOW:
++ case LINK_RATE_HIGH:
++ case LINK_RATE_RBR2:
++ case LINK_RATE_HIGH2:
++ case LINK_RATE_HIGH3:
++ valid_input = true;
++ break;
++ default:
++ break;
++ }
++
++ if (!valid_input) {
++ kfree(wr_buf);
++ DRM_DEBUG_DRIVER("Invalid Input value exceed No HW will be programmed\n");
++ return bytes_from_user;
++ }
++
++ /* save user force lane_count, link_rate to preferred settings
++ * spread spectrum will not be changed
++ */
++ prefer_link_settings.link_spread = link->cur_link_settings.link_spread;
++ prefer_link_settings.lane_count = param[0];
++ prefer_link_settings.link_rate = param[1];
++
++ dc_link_set_preferred_link_settings(dc, &prefer_link_settings, link);
++
++ kfree(wr_buf);
++
++ return bytes_from_user;
+ }
+
+ static ssize_t dp_voltage_swing_debugfs_read(struct file *f, char __user *buf,
+@@ -102,17 +275,10 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
+ return 1;
+ }
+
+-static const struct file_operations dp_link_rate_fops = {
+- .owner = THIS_MODULE,
+- .read = dp_link_rate_debugfs_read,
+- .write = dp_link_rate_debugfs_write,
+- .llseek = default_llseek
+-};
+-
+-static const struct file_operations dp_lane_count_fops = {
++static const struct file_operations dp_link_settings_debugfs_fops = {
+ .owner = THIS_MODULE,
+- .read = dp_lane_count_debugfs_read,
+- .write = dp_lane_count_debugfs_write,
++ .read = dp_link_settings_read,
++ .write = dp_link_settings_write,
+ .llseek = default_llseek
+ };
+
+@@ -141,8 +307,7 @@ static const struct {
+ char *name;
+ const struct file_operations *fops;
+ } dp_debugfs_entries[] = {
+- {"link_rate", &dp_link_rate_fops},
+- {"lane_count", &dp_lane_count_fops},
++ {"link_settings", &dp_link_settings_debugfs_fops},
+ {"voltage_swing", &dp_voltage_swing_fops},
+ {"pre_emphasis", &dp_pre_emphasis_fops},
+ {"phy_test_pattern", &dp_phy_test_pattern_fops}
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4902-drm-amd-display-dal-3.1.53.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4902-drm-amd-display-dal-3.1.53.patch
new file mode 100644
index 00000000..b575a26f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4902-drm-amd-display-dal-3.1.53.patch
@@ -0,0 +1,29 @@
+From f2050fb45e0d1da7b958cb2b936d080552235352 Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Tue, 5 Jun 2018 09:14:56 -0400
+Subject: [PATCH 4902/5725] drm/amd/display: dal 3.1.53
+
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 4192bbd..bb71717 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -38,7 +38,7 @@
+ #include "inc/compressor.h"
+ #include "dml/display_mode_lib.h"
+
+-#define DC_VER "3.1.52"
++#define DC_VER "3.1.53"
+
+ #define MAX_SURFACES 3
+ #define MAX_STREAMS 6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4903-drm-amd-display-Correct-calculation-of-duration-time.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4903-drm-amd-display-Correct-calculation-of-duration-time.patch
new file mode 100644
index 00000000..c8a38fa7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4903-drm-amd-display-Correct-calculation-of-duration-time.patch
@@ -0,0 +1,34 @@
+From 86e03595eea05551ad297b7ed0a92427bc95a6d1 Mon Sep 17 00:00:00 2001
+From: Hugo Hu <hugo.hu@amd.com>
+Date: Mon, 18 Jun 2018 15:27:58 -0400
+Subject: [PATCH 4903/5725] drm/amd/display: Correct calculation of duration
+ time.
+
+Signed-off-by: Hugo Hu <hugo.hu@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 4059a4c..1634e9d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -864,10 +864,10 @@ void hwss_edp_power_control(
+ if (power_up) {
+ unsigned long long current_ts = dm_get_timestamp(ctx);
+ unsigned long long duration_in_ms =
+- dm_get_elapse_time_in_ns(
++ div64_u64(dm_get_elapse_time_in_ns(
+ ctx,
+ current_ts,
+- div64_u64(link->link_trace.time_stamp.edp_poweroff, 1000000));
++ link->link_trace.time_stamp.edp_poweroff), 1000000);
+ unsigned long long wait_time_ms = 0;
+
+ /* max 500ms from LCDVDD off to on */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4904-drm-amd-display-Add-Azalia-registers-to-HW-sequencer.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4904-drm-amd-display-Add-Azalia-registers-to-HW-sequencer.patch
new file mode 100644
index 00000000..1db04c98
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4904-drm-amd-display-Add-Azalia-registers-to-HW-sequencer.patch
@@ -0,0 +1,40 @@
+From 59450a5f8f42e140d9e9c4256d443114b1a46de1 Mon Sep 17 00:00:00 2001
+From: Eric Bernstein <eric.bernstein@amd.com>
+Date: Mon, 18 Jun 2018 15:45:07 -0400
+Subject: [PATCH 4904/5725] drm/amd/display: Add Azalia registers to HW
+ sequencer
+
+Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+index 0574078..f091d87 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+@@ -275,6 +275,8 @@ struct dce_hwseq_registers {
+ uint32_t MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;
+ uint32_t MC_VM_SYSTEM_APERTURE_LOW_ADDR;
+ uint32_t MC_VM_SYSTEM_APERTURE_HIGH_ADDR;
++ uint32_t AZALIA_AUDIO_DTO;
++ uint32_t AZALIA_CONTROLLER_CLOCK_GATING;
+ };
+ /* set field name */
+ #define HWS_SF(blk_name, reg_name, field_name, post_fix)\
+@@ -500,7 +502,8 @@ struct dce_hwseq_registers {
+ type D1VGA_MODE_ENABLE; \
+ type D2VGA_MODE_ENABLE; \
+ type D3VGA_MODE_ENABLE; \
+- type D4VGA_MODE_ENABLE;
++ type D4VGA_MODE_ENABLE; \
++ type AZALIA_AUDIO_DTO_MODULE;
+
+ struct dce_hwseq_shift {
+ HWSEQ_REG_FIELD_LIST(uint8_t)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4905-drm-amd-display-Define-couple-extra-DCN-registers.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4905-drm-amd-display-Define-couple-extra-DCN-registers.patch
new file mode 100644
index 00000000..9f7a5b5a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4905-drm-amd-display-Define-couple-extra-DCN-registers.patch
@@ -0,0 +1,79 @@
+From 58e54ec3bcc1746e888cb622b1f0bebcfe2b3a9d Mon Sep 17 00:00:00 2001
+From: Charlene Liu <charlene.liu@amd.com>
+Date: Mon, 18 Jun 2018 19:50:07 -0400
+Subject: [PATCH 4905/5725] drm/amd/display: Define couple extra DCN registers
+
+Signed-off-by: Charlene Liu <charlene.liu@amd.com>
+Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h | 1 +
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h | 9 +++++++--
+ 2 files changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+index f091d87..df3203a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+@@ -147,6 +147,7 @@
+ SR(DCCG_GATE_DISABLE_CNTL2), \
+ SR(DCFCLK_CNTL),\
+ SR(DCFCLK_CNTL), \
++ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \
+ /* todo: get these from GVM instead of reading registers ourselves */\
+ MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32),\
+ MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32),\
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+index 2a97cdb..d8ef30b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+@@ -42,6 +42,7 @@
+ #define LE_DCN_COMMON_REG_LIST(id) \
+ SRI(DIG_BE_CNTL, DIG, id), \
+ SRI(DIG_BE_EN_CNTL, DIG, id), \
++ SRI(TMDS_CTL_BITS, DIG, id), \
+ SRI(DP_CONFIG, DP, id), \
+ SRI(DP_DPHY_CNTL, DP, id), \
+ SRI(DP_DPHY_PRBS_CNTL, DP, id), \
+@@ -64,6 +65,7 @@
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
+ SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id)
+
++
+ #define LE_DCN10_REG_LIST(id)\
+ LE_DCN_COMMON_REG_LIST(id)
+
+@@ -100,6 +102,7 @@ struct dcn10_link_enc_registers {
+ uint32_t DP_DPHY_BS_SR_SWAP_CNTL;
+ uint32_t DP_DPHY_HBR2_PATTERN_CONTROL;
+ uint32_t DP_SEC_CNTL1;
++ uint32_t TMDS_CTL_BITS;
+ };
+
+ #define LE_SF(reg_name, field_name, post_fix)\
+@@ -110,6 +113,7 @@ struct dcn10_link_enc_registers {
+ LE_SF(DIG0_DIG_BE_CNTL, DIG_HPD_SELECT, mask_sh),\
+ LE_SF(DIG0_DIG_BE_CNTL, DIG_MODE, mask_sh),\
+ LE_SF(DIG0_DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, mask_sh),\
++ LE_SF(DIG0_TMDS_CTL_BITS, TMDS_CTL0, mask_sh), \
+ LE_SF(DP0_DP_DPHY_CNTL, DPHY_BYPASS, mask_sh),\
+ LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE0, mask_sh),\
+ LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE1, mask_sh),\
+@@ -198,10 +202,11 @@ struct dcn10_link_enc_registers {
+ type DP_MSE_SAT_SLOT_COUNT3;\
+ type DP_MSE_SAT_UPDATE;\
+ type DP_MSE_16_MTP_KEEPOUT;\
++ type DC_HPD_EN;\
++ type TMDS_CTL0;\
+ type AUX_HPD_SEL;\
+ type AUX_LS_READ_EN;\
+- type AUX_RX_RECEIVE_WINDOW;\
+- type DC_HPD_EN
++ type AUX_RX_RECEIVE_WINDOW
+
+ struct dcn10_link_enc_shift {
+ DCN_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4906-drm-amd-display-Expose-configure_encoder-for-link_en.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4906-drm-amd-display-Expose-configure_encoder-for-link_en.patch
new file mode 100644
index 00000000..8c3577dd
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4906-drm-amd-display-Expose-configure_encoder-for-link_en.patch
@@ -0,0 +1,51 @@
+From f6b86864f3ea93e60a2c3af32a14dff380886de6 Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Mon, 18 Jun 2018 18:32:43 -0400
+Subject: [PATCH 4906/5725] drm/amd/display: Expose configure_encoder for
+ link_encoder
+
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c | 3 +--
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h | 4 ++++
+ 2 files changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+index fd9dc70..18a7cac 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+@@ -445,12 +445,11 @@ static uint8_t get_frontend_source(
+ }
+ }
+
+-static void configure_encoder(
++void configure_encoder(
+ struct dcn10_link_encoder *enc10,
+ const struct dc_link_settings *link_settings)
+ {
+ /* set number of lanes */
+-
+ REG_SET(DP_CONFIG, 0,
+ DP_UDI_LANES, link_settings->lane_count - LANE_COUNT_ONE);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+index d8ef30b..cd3bb5d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+@@ -271,6 +271,10 @@ void dcn10_link_encoder_setup(
+ struct link_encoder *enc,
+ enum signal_type signal);
+
++void configure_encoder(
++ struct dcn10_link_encoder *enc10,
++ const struct dc_link_settings *link_settings);
++
+ /* enables TMDS PHY output */
+ /* TODO: still need depth or just pass in adjusted pixel clock? */
+ void dcn10_link_encoder_enable_tmds_output(
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4907-drm-amd-display-Serialize-is_dp_sink_present.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4907-drm-amd-display-Serialize-is_dp_sink_present.patch
new file mode 100644
index 00000000..9ea22e04
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4907-drm-amd-display-Serialize-is_dp_sink_present.patch
@@ -0,0 +1,103 @@
+From 2568a1debc878028f531ab57094dbce565d3771f Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Tue, 29 May 2018 13:11:55 -0400
+Subject: [PATCH 4907/5725] drm/amd/display: Serialize is_dp_sink_present
+
+Access to GPIO needs to be serialized. Aux transactions are already
+serialized in DRM but we also need to serialize access to the GPIO pin
+for purposes of DP dongle detection.
+
+Call is_dp_sink_present through DM so we can lock correctly. This
+follows the same pattern used for DPCD transactions.
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Jun Lei <Jun.Lei@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 16 ++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 4 ++--
+ drivers/gpu/drm/amd/display/dc/dc_link.h | 2 ++
+ drivers/gpu/drm/amd/display/dc/dm_helpers.h | 3 +++
+ 4 files changed, 23 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+index dea49dc..0193bc5 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -454,6 +454,22 @@ bool dm_helpers_submit_i2c(
+ return result;
+ }
+
++bool dm_helpers_is_dp_sink_present(struct dc_link *link)
++{
++ bool dp_sink_present;
++ struct amdgpu_dm_connector *aconnector = link->priv;
++
++ if (!aconnector) {
++ BUG_ON("Failed to found connector for link!");
++ return true;
++ }
++
++ mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex);
++ dp_sink_present = dc_link_is_dp_sink_present(link);
++ mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex);
++ return dp_sink_present;
++}
++
+ enum dc_edid_status dm_helpers_read_local_edid(
+ struct dc_context *ctx,
+ struct dc_link *link,
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 9d5ccb6..f058620 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -313,7 +313,7 @@ static enum signal_type get_basic_signal_type(
+ * @brief
+ * Check whether there is a dongle on DP connector
+ */
+-static bool is_dp_sink_present(struct dc_link *link)
++bool dc_link_is_dp_sink_present(struct dc_link *link)
+ {
+ enum gpio_result gpio_result;
+ uint32_t clock_pin = 0;
+@@ -406,7 +406,7 @@ static enum signal_type link_detect_sink(
+ * we assume signal is DVI; it could be corrected
+ * to HDMI after dongle detection
+ */
+- if (!is_dp_sink_present(link))
++ if (!dm_helpers_is_dp_sink_present(link))
+ result = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ }
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
+index db92387..795a8f0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
+@@ -210,6 +210,8 @@ bool dc_link_dp_set_test_pattern(
+
+ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
+
++bool dc_link_is_dp_sink_present(struct dc_link *link);
++
+ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type);
+
+ /*
+diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+index 034369f..7e6b9f5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
++++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+@@ -103,6 +103,9 @@ bool dm_helpers_submit_i2c(
+ const struct dc_link *link,
+ struct i2c_command *cmd);
+
++bool dm_helpers_is_dp_sink_present(
++ struct dc_link *link);
++
+ enum dc_edid_status dm_helpers_read_local_edid(
+ struct dc_context *ctx,
+ struct dc_link *link,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4908-drm-amd-display-Break-out-function-to-simply-read-au.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4908-drm-amd-display-Break-out-function-to-simply-read-au.patch
new file mode 100644
index 00000000..73a18a5b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4908-drm-amd-display-Break-out-function-to-simply-read-au.patch
@@ -0,0 +1,200 @@
+From 462e24fa4f1c1af6bd5ee845d2f757fcd1b8dec9 Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Tue, 8 May 2018 16:28:31 -0400
+Subject: [PATCH 4908/5725] drm/amd/display: Break out function to simply read
+ aux reply
+
+DRM's DP helpers take care of dealing with the error code for us. In
+order not to step on each other's toes we'll need to be able to simply
+read auch channel replies without further logic based on return values.
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Sun peng Li <Sunpeng.Li@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h | 6 ++
+ .../display/dc/i2caux/dce110/aux_engine_dce110.c | 119 ++++++++++++---------
+ 2 files changed, 76 insertions(+), 49 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
+index b01488f..c33a289 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
+@@ -44,6 +44,12 @@ struct aux_engine_funcs {
+ void (*process_channel_reply)(
+ struct aux_engine *engine,
+ struct aux_reply_transaction_data *reply);
++ int (*read_channel_reply)(
++ struct aux_engine *engine,
++ uint32_t size,
++ uint8_t *buffer,
++ uint8_t *reply_result,
++ uint32_t *sw_status);
+ enum aux_channel_operation_result (*get_channel_status)(
+ struct aux_engine *engine,
+ uint8_t *returned_bytes);
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
+index 2b927f2..1f39406 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
+@@ -275,61 +275,92 @@ static void submit_channel_request(
+ REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
+ }
+
+-static void process_channel_reply(
+- struct aux_engine *engine,
+- struct aux_reply_transaction_data *reply)
++static int read_channel_reply(struct aux_engine *engine, uint32_t size,
++ uint8_t *buffer, uint8_t *reply_result,
++ uint32_t *sw_status)
+ {
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
++ uint32_t bytes_replied;
++ uint32_t reply_result_32;
+
+- /* Need to do a read to get the number of bytes to process
+- * Alternatively, this information can be passed -
+- * but that causes coupling which isn't good either. */
++ *sw_status = REG_GET(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT,
++ &bytes_replied);
+
+- uint32_t bytes_replied;
+- uint32_t value;
++ /* In case HPD is LOW, exit AUX transaction */
++ if ((*sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
++ return -1;
+
+- value = REG_GET(AUX_SW_STATUS,
+- AUX_SW_REPLY_BYTE_COUNT, &bytes_replied);
++ /* Need at least the status byte */
++ if (!bytes_replied)
++ return -1;
+
+- /* in case HPD is LOW, exit AUX transaction */
+- if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
+- reply->status = AUX_TRANSACTION_REPLY_HPD_DISCON;
+- return;
+- }
++ REG_UPDATE_1BY1_3(AUX_SW_DATA,
++ AUX_SW_INDEX, 0,
++ AUX_SW_AUTOINCREMENT_DISABLE, 1,
++ AUX_SW_DATA_RW, 1);
++
++ REG_GET(AUX_SW_DATA, AUX_SW_DATA, &reply_result_32);
++ *reply_result = (uint8_t)reply_result_32;
+
+- if (bytes_replied) {
+- uint32_t reply_result;
++ if (reply_result_32 >> 4 == 0) { /* ACK */
++ uint32_t i = 0;
+
+- REG_UPDATE_1BY1_3(AUX_SW_DATA,
+- AUX_SW_INDEX, 0,
+- AUX_SW_AUTOINCREMENT_DISABLE, 1,
+- AUX_SW_DATA_RW, 1);
++ /* First byte was already used to get the command status */
++ --bytes_replied;
+
+- REG_GET(AUX_SW_DATA,
+- AUX_SW_DATA, &reply_result);
++ /* Do not overflow buffer */
++ if (bytes_replied > size)
++ return -1;
+
+- reply_result = reply_result >> 4;
++ while (i < bytes_replied) {
++ uint32_t aux_sw_data_val;
+
+- switch (reply_result) {
+- case 0: /* ACK */ {
+- uint32_t i = 0;
++ REG_GET(AUX_SW_DATA, AUX_SW_DATA, &aux_sw_data_val);
++ buffer[i] = aux_sw_data_val;
++ ++i;
++ }
+
+- /* first byte was already used
+- * to get the command status */
+- --bytes_replied;
++ return i;
++ }
++
++ return 0;
++}
++
++static void process_channel_reply(
++ struct aux_engine *engine,
++ struct aux_reply_transaction_data *reply)
++{
++ int bytes_replied;
++ uint8_t reply_result;
++ uint32_t sw_status;
+
+- while (i < bytes_replied) {
+- uint32_t aux_sw_data_val;
++ bytes_replied = read_channel_reply(engine, reply->length, reply->data,
++ &reply_result, &sw_status);
+
+- REG_GET(AUX_SW_DATA,
+- AUX_SW_DATA, &aux_sw_data_val);
++ /* in case HPD is LOW, exit AUX transaction */
++ if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
++ reply->status = AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
++ return;
++ }
+
+- reply->data[i] = aux_sw_data_val;
+- ++i;
+- }
++ if (bytes_replied < 0) {
++ /* Need to handle an error case...
++ * Hopefully, upper layer function won't call this function if
++ * the number of bytes in the reply was 0, because there was
++ * surely an error that was asserted that should have been
++ * handled for hot plug case, this could happens
++ */
++ if (!(sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
++ reply->status = AUX_TRANSACTION_REPLY_INVALID;
++ ASSERT_CRITICAL(false);
++ return;
++ }
++ } else {
++ reply_result = reply_result >> 4;
+
++ switch (reply_result) {
++ case 0: /* ACK */
+ reply->status = AUX_TRANSACTION_REPLY_AUX_ACK;
+- }
+ break;
+ case 1: /* NACK */
+ reply->status = AUX_TRANSACTION_REPLY_AUX_NACK;
+@@ -346,17 +377,6 @@ static void process_channel_reply(
+ default:
+ reply->status = AUX_TRANSACTION_REPLY_INVALID;
+ }
+- } else {
+- /* Need to handle an error case...
+- * hopefully, upper layer function won't call this function
+- * if the number of bytes in the reply was 0
+- * because there was surely an error that was asserted
+- * that should have been handled
+- * for hot plug case, this could happens*/
+- if (!(value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
+- reply->status = AUX_TRANSACTION_REPLY_INVALID;
+- ASSERT_CRITICAL(false);
+- }
+ }
+ }
+
+@@ -427,6 +447,7 @@ static const struct aux_engine_funcs aux_engine_funcs = {
+ .acquire_engine = acquire_engine,
+ .submit_channel_request = submit_channel_request,
+ .process_channel_reply = process_channel_reply,
++ .read_channel_reply = read_channel_reply,
+ .get_channel_status = get_channel_status,
+ .is_engine_available = is_engine_available,
+ };
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4909-drm-amd-display-Return-aux-replies-directly-to-DRM.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4909-drm-amd-display-Return-aux-replies-directly-to-DRM.patch
new file mode 100644
index 00000000..2e84b8e9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4909-drm-amd-display-Return-aux-replies-directly-to-DRM.patch
@@ -0,0 +1,324 @@
+From 723cf443e6a2864784a9be8c3c31d247705eeb0e Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Wed, 9 May 2018 16:26:17 -0400
+Subject: [PATCH 4909/5725] drm/amd/display: Return aux replies directly to DRM
+
+Currently we still go through DC code that does error checking, retries,
+etc. There's no need for that since DRM already does that for us. This
+simplifies the code a bit and makes it easier to debug.
+
+This also ensures we correctly tell DRM how many bytes have actually
+been read, as we should. This allows DRM to correctly read the EDID on
+the Chamelium DP port.
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 89 +++++++++------
+ drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c | 120 ++++++++++-----------
+ drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h | 22 ++--
+ 3 files changed, 117 insertions(+), 114 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index d48a37a..26d5e9d 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -79,52 +79,72 @@ static void log_dpcd(uint8_t type,
+ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+ {
+- enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
+- I2C_MOT_TRUE : I2C_MOT_FALSE;
+- enum ddc_result res;
++ ssize_t result = 0;
++ enum i2caux_transaction_action action;
++ enum aux_transaction_type type;
+
+ if (WARN_ON(msg->size > 16))
+ return -E2BIG;
+
+ switch (msg->request & ~DP_AUX_I2C_MOT) {
+ case DP_AUX_NATIVE_READ:
+- res = dal_ddc_service_read_dpcd_data(
+- TO_DM_AUX(aux)->ddc_service,
+- false,
+- I2C_MOT_UNDEF,
+- msg->address,
+- msg->buffer,
+- msg->size);
++ type = AUX_TRANSACTION_TYPE_DP;
++ action = I2CAUX_TRANSACTION_ACTION_DP_READ;
++
++ result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
++ msg->address,
++ &msg->reply,
++ msg->buffer,
++ msg->size,
++ type,
++ action);
+ break;
+ case DP_AUX_NATIVE_WRITE:
+- res = dal_ddc_service_write_dpcd_data(
+- TO_DM_AUX(aux)->ddc_service,
+- false,
+- I2C_MOT_UNDEF,
+- msg->address,
+- msg->buffer,
+- msg->size);
++ type = AUX_TRANSACTION_TYPE_DP;
++ action = I2CAUX_TRANSACTION_ACTION_DP_WRITE;
++
++ dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
++ msg->address,
++ &msg->reply,
++ msg->buffer,
++ msg->size,
++ type,
++ action);
++ result = msg->size;
+ break;
+ case DP_AUX_I2C_READ:
+- res = dal_ddc_service_read_dpcd_data(
+- TO_DM_AUX(aux)->ddc_service,
+- true,
+- mot,
+- msg->address,
+- msg->buffer,
+- msg->size);
++ type = AUX_TRANSACTION_TYPE_I2C;
++ if (msg->request & DP_AUX_I2C_MOT)
++ action = I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT;
++ else
++ action = I2CAUX_TRANSACTION_ACTION_I2C_READ;
++
++ result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
++ msg->address,
++ &msg->reply,
++ msg->buffer,
++ msg->size,
++ type,
++ action);
+ break;
+ case DP_AUX_I2C_WRITE:
+- res = dal_ddc_service_write_dpcd_data(
+- TO_DM_AUX(aux)->ddc_service,
+- true,
+- mot,
+- msg->address,
+- msg->buffer,
+- msg->size);
++ type = AUX_TRANSACTION_TYPE_I2C;
++ if (msg->request & DP_AUX_I2C_MOT)
++ action = I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT;
++ else
++ action = I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
++
++ dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
++ msg->address,
++ &msg->reply,
++ msg->buffer,
++ msg->size,
++ type,
++ action);
++ result = msg->size;
+ break;
+ default:
+- return 0;
++ return -EINVAL;
+ }
+
+ #ifdef TRACE_DPCD
+@@ -135,7 +155,10 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
+ r == DDC_RESULT_SUCESSFULL);
+ #endif
+
+- return msg->size;
++ if (result < 0) /* DC doesn't know about kernel error codes */
++ result = -EIO;
++
++ return result;
+ }
+
+ static enum drm_connector_status
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+index d5294798b..d108ccf 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+@@ -33,6 +33,10 @@
+ #include "include/vector.h"
+ #include "core_types.h"
+ #include "dc_link_ddc.h"
++#include "i2caux/engine.h"
++#include "i2caux/i2c_engine.h"
++#include "i2caux/aux_engine.h"
++#include "i2caux/i2caux.h"
+
+ #define AUX_POWER_UP_WA_DELAY 500
+ #define I2C_OVER_AUX_DEFER_WA_DELAY 70
+@@ -629,78 +633,62 @@ bool dal_ddc_service_query_ddc_data(
+ return ret;
+ }
+
+-enum ddc_result dal_ddc_service_read_dpcd_data(
+- struct ddc_service *ddc,
+- bool i2c,
+- enum i2c_mot_mode mot,
+- uint32_t address,
+- uint8_t *data,
+- uint32_t len)
++int dc_link_aux_transfer(struct ddc_service *ddc,
++ unsigned int address,
++ uint8_t *reply,
++ void *buffer,
++ unsigned int size,
++ enum aux_transaction_type type,
++ enum i2caux_transaction_action action)
+ {
+- struct aux_payload read_payload = {
+- .i2c_over_aux = i2c,
+- .write = false,
+- .address = address,
+- .length = len,
+- .data = data,
+- };
+- struct aux_command command = {
+- .payloads = &read_payload,
+- .number_of_payloads = 1,
+- .defer_delay = 0,
+- .max_defer_write_retry = 0,
+- .mot = mot
+- };
+-
+- if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
+- BREAK_TO_DEBUGGER();
+- return DDC_RESULT_FAILED_INVALID_OPERATION;
+- }
++ struct i2caux *i2caux = ddc->ctx->i2caux;
++ struct ddc *ddc_pin = ddc->ddc_pin;
++ struct aux_engine *engine;
++ enum aux_channel_operation_result operation_result;
++ struct aux_request_transaction_data aux_req;
++ struct aux_reply_transaction_data aux_rep;
++ uint8_t returned_bytes = 0;
++ int res = -1;
++ uint32_t status;
+
+- if (dal_i2caux_submit_aux_command(
+- ddc->ctx->i2caux,
+- ddc->ddc_pin,
+- &command))
+- return DDC_RESULT_SUCESSFULL;
++ memset(&aux_req, 0, sizeof(aux_req));
++ memset(&aux_rep, 0, sizeof(aux_rep));
+
+- return DDC_RESULT_FAILED_OPERATION;
+-}
++ engine = i2caux->funcs->acquire_aux_engine(i2caux, ddc_pin);
+
+-enum ddc_result dal_ddc_service_write_dpcd_data(
+- struct ddc_service *ddc,
+- bool i2c,
+- enum i2c_mot_mode mot,
+- uint32_t address,
+- const uint8_t *data,
+- uint32_t len)
+-{
+- struct aux_payload write_payload = {
+- .i2c_over_aux = i2c,
+- .write = true,
+- .address = address,
+- .length = len,
+- .data = (uint8_t *)data,
+- };
+- struct aux_command command = {
+- .payloads = &write_payload,
+- .number_of_payloads = 1,
+- .defer_delay = 0,
+- .max_defer_write_retry = 0,
+- .mot = mot
+- };
+-
+- if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
+- BREAK_TO_DEBUGGER();
+- return DDC_RESULT_FAILED_INVALID_OPERATION;
+- }
++ aux_req.type = type;
++ aux_req.action = action;
++
++ aux_req.address = address;
++ aux_req.delay = 0;
++ aux_req.length = size;
++ aux_req.data = buffer;
++
++ engine->funcs->submit_channel_request(engine, &aux_req);
++ operation_result = engine->funcs->get_channel_status(engine, &returned_bytes);
+
+- if (dal_i2caux_submit_aux_command(
+- ddc->ctx->i2caux,
+- ddc->ddc_pin,
+- &command))
+- return DDC_RESULT_SUCESSFULL;
++ switch (operation_result) {
++ case AUX_CHANNEL_OPERATION_SUCCEEDED:
++ res = returned_bytes;
++
++ if (res <= size && res > 0)
++ res = engine->funcs->read_channel_reply(engine, size,
++ buffer, reply,
++ &status);
++
++ break;
++ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
++ res = 0;
++ break;
++ case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
++ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
++ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
++ res = -1;
++ break;
++ }
+
+- return DDC_RESULT_FAILED_OPERATION;
++ i2caux->funcs->release_engine(i2caux, &engine->base);
++ return res;
+ }
+
+ /*test only function*/
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+index 0bf73b7..538b833 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+@@ -102,21 +102,13 @@ bool dal_ddc_service_query_ddc_data(
+ uint8_t *read_buf,
+ uint32_t read_size);
+
+-enum ddc_result dal_ddc_service_read_dpcd_data(
+- struct ddc_service *ddc,
+- bool i2c,
+- enum i2c_mot_mode mot,
+- uint32_t address,
+- uint8_t *data,
+- uint32_t len);
+-
+-enum ddc_result dal_ddc_service_write_dpcd_data(
+- struct ddc_service *ddc,
+- bool i2c,
+- enum i2c_mot_mode mot,
+- uint32_t address,
+- const uint8_t *data,
+- uint32_t len);
++int dc_link_aux_transfer(struct ddc_service *ddc,
++ unsigned int address,
++ uint8_t *reply,
++ void *buffer,
++ unsigned int size,
++ enum aux_transaction_type type,
++ enum i2caux_transaction_action action);
+
+ void dal_ddc_service_write_scdc_data(
+ struct ddc_service *ddc_service,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4910-drm-amd-display-Convert-remaining-loggers-off-dc_log.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4910-drm-amd-display-Convert-remaining-loggers-off-dc_log.patch
new file mode 100644
index 00000000..fea0ebe9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4910-drm-amd-display-Convert-remaining-loggers-off-dc_log.patch
@@ -0,0 +1,1148 @@
+From d61f6143e00948ad798ee4f747124addd1988d01 Mon Sep 17 00:00:00 2001
+From: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Date: Tue, 19 Jun 2018 09:58:24 -0400
+Subject: [PATCH 4910/5725] drm/amd/display: Convert remaining loggers off
+ dc_logger
+
+- Removed dal/dm/dc loggers from linux, switched to kernel prints
+- Modified functions that used these directly to use macros
+- dc_logger support is completely dropped from Linux
+
+Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/display/TODO | 8 +-
+ drivers/gpu/drm/amd/display/dc/basics/Makefile | 2 +-
+ .../gpu/drm/amd/display/dc/basics/log_helpers.c | 71 +---
+ drivers/gpu/drm/amd/display/dc/basics/logger.c | 406 ---------------------
+ .../gpu/drm/amd/display/dc/calcs/calcs_logger.h | 9 +-
+ drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c | 6 +-
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 15 +-
+ drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 23 +-
+ drivers/gpu/drm/amd/display/dc/dc_stream.h | 5 +-
+ drivers/gpu/drm/amd/display/dc/dc_types.h | 2 -
+ .../drm/amd/display/dc/dce110/dce110_resource.c | 34 +-
+ .../drm/amd/display/dc/dce112/dce112_resource.c | 34 +-
+ .../gpu/drm/amd/display/include/logger_interface.h | 138 +++----
+ drivers/gpu/drm/amd/display/include/logger_types.h | 59 ---
+ 14 files changed, 99 insertions(+), 713 deletions(-)
+ delete mode 100644 drivers/gpu/drm/amd/display/dc/basics/logger.c
+
+diff --git a/drivers/gpu/drm/amd/display/TODO b/drivers/gpu/drm/amd/display/TODO
+index 357d5964..a8a6c10 100644
+--- a/drivers/gpu/drm/amd/display/TODO
++++ b/drivers/gpu/drm/amd/display/TODO
+@@ -97,10 +97,10 @@ share it with drivers. But that's a very long term goal, and by far not just an
+ issue with DC - other drivers, especially around DP sink handling, are equally
+ guilty.
+
+-19. The DC logger is still a rather sore thing, but I know that the DRM_DEBUG
+-stuff just isn't up to the challenges either. We need to figure out something
+-that integrates better with DRM and linux debug printing, while not being
+-useless with filtering output. dynamic debug printing might be an option.
++19. DONE - The DC logger is still a rather sore thing, but I know that the
++DRM_DEBUG stuff just isn't up to the challenges either. We need to figure out
++something that integrates better with DRM and linux debug printing, while not
++being useless with filtering output. dynamic debug printing might be an option.
+
+ 20. Use kernel i2c device to program HDMI retimer. Some boards have an HDMI
+ retimer that we need to program to pass PHY compliance. Currently that's
+diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile
+index fbf4dbf..ccd94a3 100644
+--- a/drivers/gpu/drm/amd/display/dc/basics/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile
+@@ -4,7 +4,7 @@
+ # subcomponents.
+
+ BASICS = conversion.o fixpt31_32.o \
+- logger.o log_helpers.o vector.o
++ log_helpers.o vector.o
+
+ AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
+
+diff --git a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
+index f6c00a5..26583f3 100644
+--- a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
++++ b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
+@@ -28,77 +28,12 @@
+ #include "include/logger_interface.h"
+ #include "dm_helpers.h"
+
+-#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+-
+-struct dc_signal_type_info {
+- enum signal_type type;
+- char name[MAX_NAME_LEN];
+-};
+-
+-static const struct dc_signal_type_info signal_type_info_tbl[] = {
+- {SIGNAL_TYPE_NONE, "NC"},
+- {SIGNAL_TYPE_DVI_SINGLE_LINK, "DVI"},
+- {SIGNAL_TYPE_DVI_DUAL_LINK, "DDVI"},
+- {SIGNAL_TYPE_HDMI_TYPE_A, "HDMIA"},
+- {SIGNAL_TYPE_LVDS, "LVDS"},
+- {SIGNAL_TYPE_RGB, "VGA"},
+- {SIGNAL_TYPE_DISPLAY_PORT, "DP"},
+- {SIGNAL_TYPE_DISPLAY_PORT_MST, "MST"},
+- {SIGNAL_TYPE_EDP, "eDP"},
+- {SIGNAL_TYPE_VIRTUAL, "Virtual"}
+-};
+-
+-void dc_conn_log(struct dc_context *ctx,
+- const struct dc_link *link,
+- uint8_t *hex_data,
+- int hex_data_count,
+- enum dc_log_type event,
+- const char *msg,
+- ...)
++void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count)
+ {
+ int i;
+- va_list args;
+- struct log_entry entry = { 0 };
+- enum signal_type signal;
+-
+- if (link->local_sink)
+- signal = link->local_sink->sink_signal;
+- else
+- signal = link->connector_signal;
+-
+- if (link->type == dc_connection_mst_branch)
+- signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
+-
+- dm_logger_open(ctx->logger, &entry, event);
+-
+- for (i = 0; i < NUM_ELEMENTS(signal_type_info_tbl); i++)
+- if (signal == signal_type_info_tbl[i].type)
+- break;
+-
+- if (i == NUM_ELEMENTS(signal_type_info_tbl))
+- goto fail;
+-
+- dm_logger_append_heading(&entry);
+-
+- dm_logger_append(&entry, "[%s][ConnIdx:%d] ",
+- signal_type_info_tbl[i].name,
+- link->link_index);
+-
+- va_start(args, msg);
+- dm_logger_append_va(&entry, msg, args);
+-
+- if (entry.buf_offset > 0 &&
+- entry.buf[entry.buf_offset - 1] == '\n')
+- entry.buf_offset--;
+
+ if (hex_data)
+ for (i = 0; i < hex_data_count; i++)
+- dm_logger_append(&entry, "%2.2X ", hex_data[i]);
+-
+- dm_logger_append(&entry, "^\n");
+-
+-fail:
+- dm_logger_close(&entry);
+-
+- va_end(args);
++ DC_LOG_DEBUG("%2.2X ", hex_data[i]);
+ }
++
+diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.c b/drivers/gpu/drm/amd/display/dc/basics/logger.c
+deleted file mode 100644
+index 733bc5b..0000000
+--- a/drivers/gpu/drm/amd/display/dc/basics/logger.c
++++ /dev/null
+@@ -1,406 +0,0 @@
+-/*
+- * Copyright 2012-15 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- * Authors: AMD
+- *
+- */
+-#include "dm_services.h"
+-#include "include/logger_interface.h"
+-#include "logger.h"
+-
+-
+-#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+-
+-static const struct dc_log_type_info log_type_info_tbl[] = {
+- {LOG_ERROR, "Error"},
+- {LOG_WARNING, "Warning"},
+- {LOG_DEBUG, "Debug"},
+- {LOG_DC, "DC_Interface"},
+- {LOG_DTN, "DTN"},
+- {LOG_SURFACE, "Surface"},
+- {LOG_HW_HOTPLUG, "HW_Hotplug"},
+- {LOG_HW_LINK_TRAINING, "HW_LKTN"},
+- {LOG_HW_SET_MODE, "HW_Mode"},
+- {LOG_HW_RESUME_S3, "HW_Resume"},
+- {LOG_HW_AUDIO, "HW_Audio"},
+- {LOG_HW_HPD_IRQ, "HW_HPDIRQ"},
+- {LOG_MST, "MST"},
+- {LOG_SCALER, "Scaler"},
+- {LOG_BIOS, "BIOS"},
+- {LOG_BANDWIDTH_CALCS, "BWCalcs"},
+- {LOG_BANDWIDTH_VALIDATION, "BWValidation"},
+- {LOG_I2C_AUX, "I2C_AUX"},
+- {LOG_SYNC, "Sync"},
+- {LOG_BACKLIGHT, "Backlight"},
+- {LOG_FEATURE_OVERRIDE, "Override"},
+- {LOG_DETECTION_EDID_PARSER, "Edid"},
+- {LOG_DETECTION_DP_CAPS, "DP_Caps"},
+- {LOG_RESOURCE, "Resource"},
+- {LOG_DML, "DML"},
+- {LOG_EVENT_MODE_SET, "Mode"},
+- {LOG_EVENT_DETECTION, "Detect"},
+- {LOG_EVENT_LINK_TRAINING, "LKTN"},
+- {LOG_EVENT_LINK_LOSS, "LinkLoss"},
+- {LOG_EVENT_UNDERFLOW, "Underflow"},
+- {LOG_IF_TRACE, "InterfaceTrace"},
+- {LOG_PERF_TRACE, "PerfTrace"},
+- {LOG_DISPLAYSTATS, "DisplayStats"}
+-};
+-
+-
+-/* ----------- Object init and destruction ----------- */
+-static bool construct(struct dc_context *ctx, struct dal_logger *logger,
+- uint32_t log_mask)
+-{
+- /* malloc buffer and init offsets */
+- logger->log_buffer_size = DAL_LOGGER_BUFFER_MAX_SIZE;
+- logger->log_buffer = kcalloc(logger->log_buffer_size, sizeof(char),
+- GFP_KERNEL);
+- if (!logger->log_buffer)
+- return false;
+-
+- /* Initialize both offsets to start of buffer (empty) */
+- logger->buffer_read_offset = 0;
+- logger->buffer_write_offset = 0;
+-
+- logger->open_count = 0;
+-
+- logger->flags.bits.ENABLE_CONSOLE = 1;
+- logger->flags.bits.ENABLE_BUFFER = 0;
+-
+- logger->ctx = ctx;
+-
+- logger->mask = log_mask;
+-
+- return true;
+-}
+-
+-static void destruct(struct dal_logger *logger)
+-{
+- if (logger->log_buffer) {
+- kfree(logger->log_buffer);
+- logger->log_buffer = NULL;
+- }
+-}
+-
+-struct dal_logger *dal_logger_create(struct dc_context *ctx, uint32_t log_mask)
+-{
+- /* malloc struct */
+- struct dal_logger *logger = kzalloc(sizeof(struct dal_logger),
+- GFP_KERNEL);
+-
+- if (!logger)
+- return NULL;
+- if (!construct(ctx, logger, log_mask)) {
+- kfree(logger);
+- return NULL;
+- }
+-
+- return logger;
+-}
+-
+-uint32_t dal_logger_destroy(struct dal_logger **logger)
+-{
+- if (logger == NULL || *logger == NULL)
+- return 1;
+- destruct(*logger);
+- kfree(*logger);
+- *logger = NULL;
+-
+- return 0;
+-}
+-
+-/* ------------------------------------------------------------------------ */
+-void dm_logger_append_heading(struct log_entry *entry)
+-{
+- int j;
+-
+- for (j = 0; j < NUM_ELEMENTS(log_type_info_tbl); j++) {
+-
+- const struct dc_log_type_info *info = &log_type_info_tbl[j];
+-
+- if (info->type == entry->type)
+- dm_logger_append(entry, "[%s]\t", info->name);
+- }
+-}
+-
+-
+-/* Print everything unread existing in log_buffer to debug console*/
+-void dm_logger_flush_buffer(struct dal_logger *logger, bool should_warn)
+-{
+- char *string_start = &logger->log_buffer[logger->buffer_read_offset];
+-
+- if (should_warn)
+- dm_output_to_console(
+- "---------------- FLUSHING LOG BUFFER ----------------\n");
+- while (logger->buffer_read_offset < logger->buffer_write_offset) {
+-
+- if (logger->log_buffer[logger->buffer_read_offset] == '\0') {
+- dm_output_to_console("%s", string_start);
+- string_start = logger->log_buffer + logger->buffer_read_offset + 1;
+- }
+- logger->buffer_read_offset++;
+- }
+- if (should_warn)
+- dm_output_to_console(
+- "-------------- END FLUSHING LOG BUFFER --------------\n\n");
+-}
+-/* ------------------------------------------------------------------------ */
+-
+-/* Warning: Be careful that 'msg' is null terminated and the total size is
+- * less than DAL_LOGGER_BUFFER_MAX_LOG_LINE_SIZE (256) including '\0'
+- */
+-static bool dal_logger_should_log(
+- struct dal_logger *logger,
+- enum dc_log_type log_type)
+-{
+- if (logger->mask & (1 << log_type))
+- return true;
+-
+- return false;
+-}
+-
+-static void log_to_debug_console(struct log_entry *entry)
+-{
+- struct dal_logger *logger = entry->logger;
+-
+- if (logger->flags.bits.ENABLE_CONSOLE == 0)
+- return;
+-
+- if (entry->buf_offset) {
+- switch (entry->type) {
+- case LOG_ERROR:
+- dm_error("%s", entry->buf);
+- break;
+- default:
+- dm_output_to_console("%s", entry->buf);
+- break;
+- }
+- }
+-}
+-
+-
+-static void log_to_internal_buffer(struct log_entry *entry)
+-{
+-
+- uint32_t size = entry->buf_offset;
+- struct dal_logger *logger = entry->logger;
+-
+- if (logger->flags.bits.ENABLE_BUFFER == 0)
+- return;
+-
+- if (logger->log_buffer == NULL)
+- return;
+-
+- if (size > 0 && size < logger->log_buffer_size) {
+-
+- int buffer_space = logger->log_buffer_size -
+- logger->buffer_write_offset;
+-
+- if (logger->buffer_write_offset == logger->buffer_read_offset) {
+- /* Buffer is empty, start writing at beginning */
+- buffer_space = logger->log_buffer_size;
+- logger->buffer_write_offset = 0;
+- logger->buffer_read_offset = 0;
+- }
+-
+- if (buffer_space > size) {
+- /* No wrap around, copy 'size' bytes
+- * from 'entry->buf' to 'log_buffer'
+- */
+- memmove(logger->log_buffer +
+- logger->buffer_write_offset,
+- entry->buf, size);
+- logger->buffer_write_offset += size;
+-
+- } else {
+- /* Not enough room remaining, we should flush
+- * existing logs */
+-
+- /* Flush existing unread logs to console */
+- dm_logger_flush_buffer(logger, true);
+-
+- /* Start writing to beginning of buffer */
+- memmove(logger->log_buffer, entry->buf, size);
+- logger->buffer_write_offset = size;
+- logger->buffer_read_offset = 0;
+- }
+-
+- }
+-}
+-
+-static void append_entry(
+- struct log_entry *entry,
+- char *buffer,
+- uint32_t buf_size)
+-{
+- if (!entry->buf ||
+- entry->buf_offset + buf_size > entry->max_buf_bytes
+- ) {
+- BREAK_TO_DEBUGGER();
+- return;
+- }
+-
+- /* Todo: check if off by 1 byte due to \0 anywhere */
+- memmove(entry->buf + entry->buf_offset, buffer, buf_size);
+- entry->buf_offset += buf_size;
+-}
+-
+-
+-void dm_logger_write(
+- struct dal_logger *logger,
+- enum dc_log_type log_type,
+- const char *msg,
+- ...)
+-{
+- if (logger && dal_logger_should_log(logger, log_type)) {
+- uint32_t size;
+- va_list args;
+- char buffer[LOG_MAX_LINE_SIZE];
+- struct log_entry entry;
+-
+- va_start(args, msg);
+-
+- entry.logger = logger;
+-
+- entry.buf = buffer;
+-
+- entry.buf_offset = 0;
+- entry.max_buf_bytes = DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char);
+-
+- entry.type = log_type;
+-
+- dm_logger_append_heading(&entry);
+-
+- size = dm_log_to_buffer(
+- buffer, LOG_MAX_LINE_SIZE - 1, msg, args);
+-
+- buffer[entry.buf_offset + size] = '\0';
+- entry.buf_offset += size + 1;
+-
+- /* --Flush log_entry buffer-- */
+- /* print to kernel console */
+- log_to_debug_console(&entry);
+- /* log internally for dsat */
+- log_to_internal_buffer(&entry);
+-
+- va_end(args);
+- }
+-}
+-
+-/* Same as dm_logger_write, except without open() and close(), which must
+- * be done separately.
+- */
+-void dm_logger_append(
+- struct log_entry *entry,
+- const char *msg,
+- ...)
+-{
+- va_list args;
+-
+- va_start(args, msg);
+- dm_logger_append_va(entry, msg, args);
+- va_end(args);
+-}
+-
+-void dm_logger_append_va(
+- struct log_entry *entry,
+- const char *msg,
+- va_list args)
+-{
+- struct dal_logger *logger;
+-
+- if (!entry) {
+- BREAK_TO_DEBUGGER();
+- return;
+- }
+-
+- logger = entry->logger;
+-
+- if (logger && logger->open_count > 0 &&
+- dal_logger_should_log(logger, entry->type)) {
+-
+- uint32_t size;
+- char buffer[LOG_MAX_LINE_SIZE];
+-
+- size = dm_log_to_buffer(
+- buffer, LOG_MAX_LINE_SIZE, msg, args);
+-
+- if (size < LOG_MAX_LINE_SIZE - 1) {
+- append_entry(entry, buffer, size);
+- } else {
+- append_entry(entry, "LOG_ERROR, line too long\n", 27);
+- }
+- }
+-}
+-
+-void dm_logger_open(
+- struct dal_logger *logger,
+- struct log_entry *entry, /* out */
+- enum dc_log_type log_type)
+-{
+- if (!entry) {
+- BREAK_TO_DEBUGGER();
+- return;
+- }
+-
+- entry->type = log_type;
+- entry->logger = logger;
+-
+- entry->buf = kzalloc(DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char),
+- GFP_KERNEL);
+-
+- entry->buf_offset = 0;
+- entry->max_buf_bytes = DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char);
+-
+- logger->open_count++;
+-
+- dm_logger_append_heading(entry);
+-}
+-
+-void dm_logger_close(struct log_entry *entry)
+-{
+- struct dal_logger *logger = entry->logger;
+-
+- if (logger && logger->open_count > 0) {
+- logger->open_count--;
+- } else {
+- BREAK_TO_DEBUGGER();
+- goto cleanup;
+- }
+-
+- /* --Flush log_entry buffer-- */
+- /* print to kernel console */
+- log_to_debug_console(entry);
+- /* log internally for dsat */
+- log_to_internal_buffer(entry);
+-
+- /* TODO: Write end heading */
+-
+-cleanup:
+- if (entry->buf) {
+- kfree(entry->buf);
+- entry->buf = NULL;
+- entry->buf_offset = 0;
+- entry->max_buf_bytes = 0;
+- }
+-}
+-
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h b/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h
+index fc3f98f..62435bf 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h
++++ b/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h
+@@ -25,10 +25,9 @@
+
+ #ifndef _CALCS_CALCS_LOGGER_H_
+ #define _CALCS_CALCS_LOGGER_H_
+-#define DC_LOGGER \
+- logger
++#define DC_LOGGER ctx->logger
+
+-static void print_bw_calcs_dceip(struct dal_logger *logger, const struct bw_calcs_dceip *dceip)
++static void print_bw_calcs_dceip(struct dc_context *ctx, const struct bw_calcs_dceip *dceip)
+ {
+
+ DC_LOG_BANDWIDTH_CALCS("#####################################################################");
+@@ -122,7 +121,7 @@ static void print_bw_calcs_dceip(struct dal_logger *logger, const struct bw_calc
+
+ }
+
+-static void print_bw_calcs_vbios(struct dal_logger *logger, const struct bw_calcs_vbios *vbios)
++static void print_bw_calcs_vbios(struct dc_context *ctx, const struct bw_calcs_vbios *vbios)
+ {
+
+ DC_LOG_BANDWIDTH_CALCS("#####################################################################");
+@@ -181,7 +180,7 @@ static void print_bw_calcs_vbios(struct dal_logger *logger, const struct bw_calc
+
+ }
+
+-static void print_bw_calcs_data(struct dal_logger *logger, struct bw_calcs_data *data)
++static void print_bw_calcs_data(struct dc_context *ctx, struct bw_calcs_data *data)
+ {
+
+ int i, j, k;
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+index 2c4e8f0..160d11a 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+@@ -3010,9 +3010,9 @@ bool bw_calcs(struct dc_context *ctx,
+ struct bw_fixed low_yclk = vbios->low_yclk;
+
+ if (ctx->dc->debug.bandwidth_calcs_trace) {
+- print_bw_calcs_dceip(ctx->logger, dceip);
+- print_bw_calcs_vbios(ctx->logger, vbios);
+- print_bw_calcs_data(ctx->logger, data);
++ print_bw_calcs_dceip(ctx, dceip);
++ print_bw_calcs_vbios(ctx, vbios);
++ print_bw_calcs_data(ctx, data);
+ }
+ calculate_bandwidth(dceip, vbios, data);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index b14741d..e9e7953 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -465,9 +465,6 @@ static void destruct(struct dc *dc)
+ if (dc->ctx->created_bios)
+ dal_bios_parser_destroy(&dc->ctx->dc_bios);
+
+- if (dc->ctx->logger)
+- dal_logger_destroy(&dc->ctx->logger);
+-
+ kfree(dc->ctx);
+ dc->ctx = NULL;
+
+@@ -490,7 +487,6 @@ static void destruct(struct dc *dc)
+ static bool construct(struct dc *dc,
+ const struct dc_init_data *init_params)
+ {
+- struct dal_logger *logger;
+ struct dc_context *dc_ctx;
+ struct bw_calcs_dceip *dc_dceip;
+ struct bw_calcs_vbios *dc_vbios;
+@@ -555,14 +551,7 @@ static bool construct(struct dc *dc,
+ }
+
+ /* Create logger */
+- logger = dal_logger_create(dc_ctx, init_params->log_mask);
+
+- if (!logger) {
+- /* can *not* call logger. call base driver 'print error' */
+- dm_error("%s: failed to create Logger!\n", __func__);
+- goto fail;
+- }
+- dc_ctx->logger = logger;
+ dc_ctx->dce_environment = init_params->dce_environment;
+
+ dc_version = resource_parse_asic_id(init_params->asic_id);
+@@ -1007,9 +996,7 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
+ for (i = 0; i < context->stream_count; i++) {
+ struct dc_stream_state *stream = context->streams[i];
+
+- dc_stream_log(stream,
+- dc->ctx->logger,
+- LOG_DC);
++ dc_stream_log(dc, stream);
+ }
+
+ result = dc_commit_state_no_check(dc, context);
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+index 3732a1d..0223f48 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+@@ -30,6 +30,8 @@
+ #include "ipp.h"
+ #include "timing_generator.h"
+
++#define DC_LOGGER dc->ctx->logger
++
+ /*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+@@ -317,16 +319,10 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
+ return ret;
+ }
+
+-
+-void dc_stream_log(
+- const struct dc_stream_state *stream,
+- struct dal_logger *dm_logger,
+- enum dc_log_type log_type)
++void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
+ {
+-
+- dm_logger_write(dm_logger,
+- log_type,
+- "core_stream 0x%x: src: %d, %d, %d, %d; dst: %d, %d, %d, %d, colorSpace:%d\n",
++ DC_LOG_DC(
++ "core_stream 0x%p: src: %d, %d, %d, %d; dst: %d, %d, %d, %d, colorSpace:%d\n",
+ stream,
+ stream->src.x,
+ stream->src.y,
+@@ -337,21 +333,18 @@ void dc_stream_log(
+ stream->dst.width,
+ stream->dst.height,
+ stream->output_color_space);
+- dm_logger_write(dm_logger,
+- log_type,
++ DC_LOG_DC(
+ "\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixelencoder:%d, displaycolorDepth:%d\n",
+ stream->timing.pix_clk_khz,
+ stream->timing.h_total,
+ stream->timing.v_total,
+ stream->timing.pixel_encoding,
+ stream->timing.display_color_depth);
+- dm_logger_write(dm_logger,
+- log_type,
++ DC_LOG_DC(
+ "\tsink name: %s, serial: %d\n",
+ stream->sink->edid_caps.display_name,
+ stream->sink->edid_caps.serial_number);
+- dm_logger_write(dm_logger,
+- log_type,
++ DC_LOG_DC(
+ "\tlink: %d\n",
+ stream->sink->link->link_index);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+index 597c6bf..e7a05d2 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+@@ -158,10 +158,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
+ /*
+ * Log the current stream state.
+ */
+-void dc_stream_log(
+- const struct dc_stream_state *stream,
+- struct dal_logger *dc_logger,
+- enum dc_log_type log_type);
++void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream);
+
+ uint8_t dc_get_current_stream_count(struct dc *dc);
+ struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i);
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
+index 9467249..59bf0d5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
+@@ -77,8 +77,6 @@ struct dc_context {
+ struct dc *dc;
+
+ void *driver_context; /* e.g. amdgpu_device */
+-
+- struct dal_logger *logger;
+ void *cgs_device;
+
+ enum dce_environment dce_environment;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+index 3edaa00..1c902e4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+@@ -794,43 +794,38 @@ static bool dce110_validate_bandwidth(
+
+ if (memcmp(&dc->current_state->bw.dce,
+ &context->bw.dce, sizeof(context->bw.dce))) {
+- struct log_entry log_entry;
+- dm_logger_open(
+- dc->ctx->logger,
+- &log_entry,
+- LOG_BANDWIDTH_CALCS);
+- dm_logger_append(&log_entry, "%s: finish,\n"
++
++ DC_LOG_BANDWIDTH_CALCS(
++ "%s: finish,\n"
++ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
++ "stutMark_b: %d stutMark_a: %d\n"
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+- "stutMark_b: %d stutMark_a: %d\n",
++ "stutMark_b: %d stutMark_a: %d\n"
++ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
++ "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n"
++ "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
++ "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n"
++ ,
+ __func__,
+ context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
+ context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
+ context->bw.dce.urgent_wm_ns[0].b_mark,
+ context->bw.dce.urgent_wm_ns[0].a_mark,
+ context->bw.dce.stutter_exit_wm_ns[0].b_mark,
+- context->bw.dce.stutter_exit_wm_ns[0].a_mark);
+- dm_logger_append(&log_entry,
+- "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+- "stutMark_b: %d stutMark_a: %d\n",
++ context->bw.dce.stutter_exit_wm_ns[0].a_mark,
+ context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
+ context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
+ context->bw.dce.urgent_wm_ns[1].b_mark,
+ context->bw.dce.urgent_wm_ns[1].a_mark,
+ context->bw.dce.stutter_exit_wm_ns[1].b_mark,
+- context->bw.dce.stutter_exit_wm_ns[1].a_mark);
+- dm_logger_append(&log_entry,
+- "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+- "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n",
++ context->bw.dce.stutter_exit_wm_ns[1].a_mark,
+ context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
+ context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
+ context->bw.dce.urgent_wm_ns[2].b_mark,
+ context->bw.dce.urgent_wm_ns[2].a_mark,
+ context->bw.dce.stutter_exit_wm_ns[2].b_mark,
+ context->bw.dce.stutter_exit_wm_ns[2].a_mark,
+- context->bw.dce.stutter_mode_enable);
+- dm_logger_append(&log_entry,
+- "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
+- "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n",
++ context->bw.dce.stutter_mode_enable,
+ context->bw.dce.cpuc_state_change_enable,
+ context->bw.dce.cpup_state_change_enable,
+ context->bw.dce.nbp_state_change_enable,
+@@ -840,7 +835,6 @@ static bool dce110_validate_bandwidth(
+ context->bw.dce.sclk_deep_sleep_khz,
+ context->bw.dce.yclk_khz,
+ context->bw.dce.blackout_recovery_time_us);
+- dm_logger_close(&log_entry);
+ }
+ return result;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+index 9e1afb1..30d5b32 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+@@ -744,43 +744,38 @@ bool dce112_validate_bandwidth(
+
+ if (memcmp(&dc->current_state->bw.dce,
+ &context->bw.dce, sizeof(context->bw.dce))) {
+- struct log_entry log_entry;
+- dm_logger_open(
+- dc->ctx->logger,
+- &log_entry,
+- LOG_BANDWIDTH_CALCS);
+- dm_logger_append(&log_entry, "%s: finish,\n"
++
++ DC_LOG_BANDWIDTH_CALCS(
++ "%s: finish,\n"
++ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
++ "stutMark_b: %d stutMark_a: %d\n"
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+- "stutMark_b: %d stutMark_a: %d\n",
++ "stutMark_b: %d stutMark_a: %d\n"
++ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
++ "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n"
++ "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
++ "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n"
++ ,
+ __func__,
+ context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
+ context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
+ context->bw.dce.urgent_wm_ns[0].b_mark,
+ context->bw.dce.urgent_wm_ns[0].a_mark,
+ context->bw.dce.stutter_exit_wm_ns[0].b_mark,
+- context->bw.dce.stutter_exit_wm_ns[0].a_mark);
+- dm_logger_append(&log_entry,
+- "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+- "stutMark_b: %d stutMark_a: %d\n",
++ context->bw.dce.stutter_exit_wm_ns[0].a_mark,
+ context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
+ context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
+ context->bw.dce.urgent_wm_ns[1].b_mark,
+ context->bw.dce.urgent_wm_ns[1].a_mark,
+ context->bw.dce.stutter_exit_wm_ns[1].b_mark,
+- context->bw.dce.stutter_exit_wm_ns[1].a_mark);
+- dm_logger_append(&log_entry,
+- "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+- "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n",
++ context->bw.dce.stutter_exit_wm_ns[1].a_mark,
+ context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
+ context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
+ context->bw.dce.urgent_wm_ns[2].b_mark,
+ context->bw.dce.urgent_wm_ns[2].a_mark,
+ context->bw.dce.stutter_exit_wm_ns[2].b_mark,
+ context->bw.dce.stutter_exit_wm_ns[2].a_mark,
+- context->bw.dce.stutter_mode_enable);
+- dm_logger_append(&log_entry,
+- "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
+- "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n",
++ context->bw.dce.stutter_mode_enable,
+ context->bw.dce.cpuc_state_change_enable,
+ context->bw.dce.cpup_state_change_enable,
+ context->bw.dce.nbp_state_change_enable,
+@@ -790,7 +785,6 @@ bool dce112_validate_bandwidth(
+ context->bw.dce.sclk_deep_sleep_khz,
+ context->bw.dce.yclk_khz,
+ context->bw.dce.blackout_recovery_time_us);
+- dm_logger_close(&log_entry);
+ }
+ return result;
+ }
+diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h
+index 0f10ed7..e3c7961 100644
+--- a/drivers/gpu/drm/amd/display/include/logger_interface.h
++++ b/drivers/gpu/drm/amd/display/include/logger_interface.h
+@@ -40,49 +40,7 @@ struct dc_state;
+ *
+ */
+
+-struct dal_logger *dal_logger_create(struct dc_context *ctx, uint32_t log_mask);
+-
+-uint32_t dal_logger_destroy(struct dal_logger **logger);
+-
+-void dm_logger_flush_buffer(struct dal_logger *logger, bool should_warn);
+-
+-void dm_logger_write(
+- struct dal_logger *logger,
+- enum dc_log_type log_type,
+- const char *msg,
+- ...);
+-
+-void dm_logger_append(
+- struct log_entry *entry,
+- const char *msg,
+- ...);
+-
+-void dm_logger_append_va(
+- struct log_entry *entry,
+- const char *msg,
+- va_list args);
+-
+-void dm_logger_append_heading(struct log_entry *entry);
+-
+-void dm_logger_open(
+- struct dal_logger *logger,
+- struct log_entry *entry,
+- enum dc_log_type log_type);
+-
+-void dm_logger_close(struct log_entry *entry);
+-
+-void dc_conn_log(struct dc_context *ctx,
+- const struct dc_link *link,
+- uint8_t *hex_data,
+- int hex_data_count,
+- enum dc_log_type event,
+- const char *msg,
+- ...);
+-
+-void logger_write(struct dal_logger *logger,
+- enum dc_log_type log_type,
+- const char *msg,
+- void *paralist);
++void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count);
+
+ void pre_surface_trace(
+ struct dc *dc,
+@@ -108,28 +66,31 @@ void context_clock_trace(
+ * marked by this macro.
+ * Note that the message will be printed exactly once for every function
+ * it is used in order to avoid repeating of the same message. */
++
+ #define DAL_LOGGER_NOT_IMPL(fmt, ...) \
+-{ \
+- static bool print_not_impl = true; \
+-\
+- if (print_not_impl == true) { \
+- print_not_impl = false; \
+- dm_logger_write(ctx->logger, LOG_WARNING, \
+- "DAL_NOT_IMPL: " fmt, ##__VA_ARGS__); \
+- } \
+-}
++ do { \
++ static bool print_not_impl = true; \
++ if (print_not_impl == true) { \
++ print_not_impl = false; \
++ DRM_WARN("DAL_NOT_IMPL: " fmt, ##__VA_ARGS__); \
++ } \
++ } while (0)
+
+ /******************************************************************************
+ * Convenience macros to save on typing.
+ *****************************************************************************/
+
+ #define DC_ERROR(...) \
+- dm_logger_write(dc_ctx->logger, LOG_ERROR, \
+- __VA_ARGS__)
++ do { \
++ (void)(dc_ctx); \
++ DC_LOG_ERROR(__VA_ARGS__); \
++ } while (0)
+
+ #define DC_SYNC_INFO(...) \
+- dm_logger_write(dc_ctx->logger, LOG_SYNC, \
+- __VA_ARGS__)
++ do { \
++ (void)(dc_ctx); \
++ DC_LOG_SYNC(__VA_ARGS__); \
++ } while (0)
+
+ /* Connectivity log format:
+ * [time stamp] [drm] [Major_minor] [connector name] message.....
+@@ -139,20 +100,30 @@ void context_clock_trace(
+ */
+
+ #define CONN_DATA_DETECT(link, hex_data, hex_len, ...) \
+- dc_conn_log(link->ctx, link, hex_data, hex_len, \
+- LOG_EVENT_DETECTION, ##__VA_ARGS__)
++ do { \
++ (void)(link); \
++ dc_conn_log_hex_linux(hex_data, hex_len); \
++ DC_LOG_EVENT_DETECTION(__VA_ARGS__); \
++ } while (0)
+
+ #define CONN_DATA_LINK_LOSS(link, hex_data, hex_len, ...) \
+- dc_conn_log(link->ctx, link, hex_data, hex_len, \
+- LOG_EVENT_LINK_LOSS, ##__VA_ARGS__)
++ do { \
++ (void)(link); \
++ dc_conn_log_hex_linux(hex_data, hex_len); \
++ DC_LOG_EVENT_LINK_LOSS(__VA_ARGS__); \
++ } while (0)
+
+ #define CONN_MSG_LT(link, ...) \
+- dc_conn_log(link->ctx, link, NULL, 0, \
+- LOG_EVENT_LINK_TRAINING, ##__VA_ARGS__)
++ do { \
++ (void)(link); \
++ DC_LOG_EVENT_LINK_TRAINING(__VA_ARGS__); \
++ } while (0)
+
+ #define CONN_MSG_MODE(link, ...) \
+- dc_conn_log(link->ctx, link, NULL, 0, \
+- LOG_EVENT_MODE_SET, ##__VA_ARGS__)
++ do { \
++ (void)(link); \
++ DC_LOG_EVENT_MODE_SET(__VA_ARGS__); \
++ } while (0)
+
+ /*
+ * Display Test Next logging
+@@ -167,38 +138,21 @@ void context_clock_trace(
+ dm_dtn_log_end(dc_ctx)
+
+ #define PERFORMANCE_TRACE_START() \
+- unsigned long long perf_trc_start_stmp = dm_get_timestamp(dc->ctx); \
+- unsigned long long perf_trc_start_log_msk = dc->ctx->logger->mask; \
+- unsigned int perf_trc_start_log_flags = dc->ctx->logger->flags.value; \
+- if (dc->debug.performance_trace) {\
+- dm_logger_flush_buffer(dc->ctx->logger, false);\
+- dc->ctx->logger->mask = 1<<LOG_PERF_TRACE;\
+- dc->ctx->logger->flags.bits.ENABLE_CONSOLE = 0;\
+- dc->ctx->logger->flags.bits.ENABLE_BUFFER = 1;\
+- }
+-
+-#define PERFORMANCE_TRACE_END() do {\
+- unsigned long long perf_trc_end_stmp = dm_get_timestamp(dc->ctx);\
+- if (dc->debug.performance_trace) {\
+- dm_logger_write(dc->ctx->logger, \
+- LOG_PERF_TRACE, \
+- "%s duration: %d ticks\n", __func__,\
++ unsigned long long perf_trc_start_stmp = dm_get_timestamp(dc->ctx)
++
++#define PERFORMANCE_TRACE_END() \
++ do { \
++ unsigned long long perf_trc_end_stmp = dm_get_timestamp(dc->ctx); \
++ if (dc->debug.performance_trace) { \
++ DC_LOG_PERF_TRACE("%s duration: %lld ticks\n", __func__, \
+ perf_trc_end_stmp - perf_trc_start_stmp); \
+- if (perf_trc_start_log_msk != 1<<LOG_PERF_TRACE) {\
+- dc->ctx->logger->mask = perf_trc_start_log_msk;\
+- dc->ctx->logger->flags.value = perf_trc_start_log_flags;\
+- dm_logger_flush_buffer(dc->ctx->logger, false);\
+ } \
+- } \
+-} while (0)
++ } while (0)
+
+-#define DISPLAY_STATS_BEGIN(entry) \
+- dm_logger_open(dc->ctx->logger, &entry, LOG_DISPLAYSTATS)
++#define DISPLAY_STATS_BEGIN(entry) (void)(entry)
+
+-#define DISPLAY_STATS(msg, ...) \
+- dm_logger_append(&log_entry, msg, ##__VA_ARGS__)
++#define DISPLAY_STATS(msg, ...) DC_LOG_PERF_TRACE(msg, __VA_ARGS__)
+
+-#define DISPLAY_STATS_END(entry) \
+- dm_logger_close(&entry)
++#define DISPLAY_STATS_END(entry) (void)(entry)
+
+ #endif /* __DAL_LOGGER_INTERFACE_H__ */
+diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
+index 0a540b9..ad3695e 100644
+--- a/drivers/gpu/drm/amd/display/include/logger_types.h
++++ b/drivers/gpu/drm/amd/display/include/logger_types.h
+@@ -138,63 +138,4 @@ enum dc_log_type {
+ (1 << LOG_HW_AUDIO)| \
+ (1 << LOG_BANDWIDTH_CALCS)*/
+
+-union logger_flags {
+- struct {
+- uint32_t ENABLE_CONSOLE:1; /* Print to console */
+- uint32_t ENABLE_BUFFER:1; /* Print to buffer */
+- uint32_t RESERVED:30;
+- } bits;
+- uint32_t value;
+-};
+-
+-struct log_entry {
+- struct dal_logger *logger;
+- enum dc_log_type type;
+-
+- char *buf;
+- uint32_t buf_offset;
+- uint32_t max_buf_bytes;
+-};
+-
+-/**
+-* Structure for enumerating log types
+-*/
+-struct dc_log_type_info {
+- enum dc_log_type type;
+- char name[MAX_NAME_LEN];
+-};
+-
+-/* Structure for keeping track of offsets, buffer, etc */
+-
+-#define DAL_LOGGER_BUFFER_MAX_SIZE 2048
+-
+-/*Connectivity log needs to output EDID, which needs at lease 256x3 bytes,
+- * change log line size to 896 to meet the request.
+- */
+-#define LOG_MAX_LINE_SIZE 896
+-
+-struct dal_logger {
+-
+- /* How far into the circular buffer has been read by dsat
+- * Read offset should never cross write offset. Write \0's to
+- * read data just to be sure?
+- */
+- uint32_t buffer_read_offset;
+-
+- /* How far into the circular buffer we have written
+- * Write offset should never cross read offset
+- */
+- uint32_t buffer_write_offset;
+-
+- uint32_t open_count;
+-
+- char *log_buffer; /* Pointer to malloc'ed buffer */
+- uint32_t log_buffer_size; /* Size of circular buffer */
+-
+- uint32_t mask; /*array of masks for major elements*/
+-
+- union logger_flags flags;
+- struct dc_context *ctx;
+-};
+-
+ #endif /* __DAL_LOGGER_TYPES_H__ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4911-drm-amd-display-read-DP-sink-and-DP-branch-hardware-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4911-drm-amd-display-read-DP-sink-and-DP-branch-hardware-.patch
new file mode 100644
index 00000000..03c0bfec
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4911-drm-amd-display-read-DP-sink-and-DP-branch-hardware-.patch
@@ -0,0 +1,104 @@
+From 8f2acb22d5fc4c2bbb5c32e8829ac3a3ce6a8041 Mon Sep 17 00:00:00 2001
+From: Alvin lee <alvin.lee3@amd.com>
+Date: Tue, 19 Jun 2018 15:40:09 -0400
+Subject: [PATCH 4911/5725] drm/amd/display: read DP sink and DP branch
+ hardware and firmware revision from DPCD
+
+- define new dpcd address in drm
+- implement new members in dpcd_caps to store values read from new dpcd address
+
+Signed-off-by: Alvin lee <alvin.lee3@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 25 ++++++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/dc.h | 5 +++++
+ drivers/gpu/drm/amd/display/include/dpcd_defs.h | 3 +++
+ 3 files changed, 33 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index 68c1f65..165618f 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -2260,6 +2260,11 @@ static void get_active_converter_info(
+
+ link->dpcd_caps.branch_hw_revision =
+ dp_hw_fw_revision.ieee_hw_rev;
++
++ memmove(
++ link->dpcd_caps.branch_fw_revision,
++ dp_hw_fw_revision.ieee_fw_rev,
++ sizeof(dp_hw_fw_revision.ieee_fw_rev));
+ }
+ }
+
+@@ -2315,6 +2320,7 @@ static bool retrieve_link_cap(struct dc_link *link)
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ uint32_t read_dpcd_retry_cnt = 3;
+ int i;
++ struct dp_sink_hw_fw_revision dp_hw_fw_revision;
+
+ memset(dpcd_data, '\0', sizeof(dpcd_data));
+ memset(&down_strm_port_count,
+@@ -2409,6 +2415,25 @@ static bool retrieve_link_cap(struct dc_link *link)
+ (sink_id.ieee_oui[1] << 8) +
+ (sink_id.ieee_oui[2]);
+
++ memmove(
++ link->dpcd_caps.sink_dev_id_str,
++ sink_id.ieee_device_id,
++ sizeof(sink_id.ieee_device_id));
++
++ core_link_read_dpcd(
++ link,
++ DP_SINK_HW_REVISION_START,
++ (uint8_t *)&dp_hw_fw_revision,
++ sizeof(dp_hw_fw_revision));
++
++ link->dpcd_caps.sink_hw_revision =
++ dp_hw_fw_revision.ieee_hw_rev;
++
++ memmove(
++ link->dpcd_caps.sink_fw_revision,
++ dp_hw_fw_revision.ieee_fw_rev,
++ sizeof(dp_hw_fw_revision.ieee_fw_rev));
++
+ /* Connectivity log: detection */
+ CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index bb71717..e382582 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -641,9 +641,14 @@ struct dpcd_caps {
+ struct dc_dongle_caps dongle_caps;
+
+ uint32_t sink_dev_id;
++ int8_t sink_dev_id_str[6];
++ int8_t sink_hw_revision;
++ int8_t sink_fw_revision[2];
++
+ uint32_t branch_dev_id;
+ int8_t branch_dev_name[6];
+ int8_t branch_hw_revision;
++ int8_t branch_fw_revision[2];
+
+ bool allow_invalid_MSA_timing_param;
+ bool panel_mode_edp;
+diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+index d8e52e3..1c66166 100644
+--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
++++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+@@ -27,6 +27,9 @@
+ #define __DAL_DPCD_DEFS_H__
+
+ #include <drm/drm_dp_helper.h>
++#ifndef DP_SINK_HW_REVISION_START // can remove this once the define gets into linux drm_dp_helper.h
++#define DP_SINK_HW_REVISION_START 0x409
++#endif
+
+ enum dpcd_revision {
+ DPCD_REV_10 = 0x10,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4912-drm-amd-display-dcc-always-on-for-bw-calculations-on.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4912-drm-amd-display-dcc-always-on-for-bw-calculations-on.patch
new file mode 100644
index 00000000..3cc8f85b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4912-drm-amd-display-dcc-always-on-for-bw-calculations-on.patch
@@ -0,0 +1,47 @@
+From 54a1fef4af129f946eea051c42e25d67a717fb65 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Wed, 13 Jun 2018 13:58:14 -0400
+Subject: [PATCH 4912/5725] drm/amd/display: dcc always on for bw calculations
+ on raven
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 19 ++++++++++++++++++-
+ 1 file changed, 18 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+index e44b8d3..080f777 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+@@ -250,7 +250,24 @@ static void pipe_ctx_to_e2e_pipe_params (
+ else if (pipe->bottom_pipe != NULL && pipe->bottom_pipe->plane_state == pipe->plane_state)
+ input->src.is_hsplit = true;
+
+- input->src.dcc = pipe->plane_state->dcc.enable;
++ if (pipe->plane_res.dpp->ctx->dc->debug.optimized_watermark) {
++ /*
++ * this method requires us to always re-calculate watermark when dcc change
++ * between flip.
++ */
++ input->src.dcc = pipe->plane_state->dcc.enable ? 1 : 0;
++ } else {
++ /*
++ * allow us to disable dcc on the fly without re-calculating WM
++ *
++ * extra overhead for DCC is quite small. for 1080p WM without
++ * DCC is only 0.417us lower (urgent goes from 6.979us to 6.562us)
++ */
++ unsigned int bpe;
++
++ input->src.dcc = pipe->plane_res.dpp->ctx->dc->res_pool->hubbub->funcs->
++ dcc_support_pixel_format(pipe->plane_state->format, &bpe) ? 1 : 0;
++ }
+ input->src.dcc_rate = 1;
+ input->src.meta_pitch = pipe->plane_state->dcc.grph.meta_pitch;
+ input->src.source_scan = dm_horz;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4913-drm-amd-display-hook-dp-test-pattern-through-debugfs.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4913-drm-amd-display-hook-dp-test-pattern-through-debugfs.patch
new file mode 100644
index 00000000..2684bced
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4913-drm-amd-display-hook-dp-test-pattern-through-debugfs.patch
@@ -0,0 +1,325 @@
+From 468893b87ff4edcd3b27dbe2528dbcbeb9d3f682 Mon Sep 17 00:00:00 2001
+From: Hersen Wu <hersenxs.wu@amd.com>
+Date: Tue, 19 Jun 2018 12:14:29 -0400
+Subject: [PATCH 4913/5725] drm/amd/display: hook dp test pattern through
+ debugfs
+
+ set PHY layer or Link layer test pattern
+ PHY test pattern is used for PHY SI check.
+ Link layer test will not affect PHY SI.
+
+ - normal video mode
+ 0 = DP_TEST_PATTERN_VIDEO_MODE
+
+ - PHY test pattern supported
+ 1 = DP_TEST_PATTERN_D102
+ 2 = DP_TEST_PATTERN_SYMBOL_ERROR
+ 3 = DP_TEST_PATTERN_PRBS7
+ 4 = DP_TEST_PATTERN_80BIT_CUSTOM
+ 5 = DP_TEST_PATTERN_CP2520_1
+ 6 = DP_TEST_PATTERN_CP2520_2 = DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE
+ 7 = DP_TEST_PATTERN_CP2520_3
+
+ - DP PHY Link Training Patterns
+ 8 = DP_TEST_PATTERN_TRAINING_PATTERN1
+ 9 = DP_TEST_PATTERN_TRAINING_PATTERN2
+ 0xa = DP_TEST_PATTERN_TRAINING_PATTERN3
+ 0xb = DP_TEST_PATTERN_TRAINING_PATTERN4
+
+ - DP Link Layer Test pattern
+ 0xc = DP_TEST_PATTERN_COLOR_SQUARES
+ 0xd = DP_TEST_PATTERN_COLOR_SQUARES_CEA
+ 0xe = DP_TEST_PATTERN_VERTICAL_BARS
+ 0xf = DP_TEST_PATTERN_HORIZONTAL_BARS
+ 0x10= DP_TEST_PATTERN_COLOR_RAMP
+
+ debugfs phy_test_pattern is located at /syskernel/debug/dri/0/DP-x
+
+ --- set test pattern
+ echo <test pattern #> > test_pattern
+
+ - custom test pattern
+ If test pattern # is not supported, NO HW programming will be done
+ for DP_TEST_PATTERN_80BIT_CUSTOM, it needs extra 10 bytes of data
+ for the user pattern. input 10 bytes data are separated by space
+
+ echo 0x4 0x11 0x22 0x33 0x44 0x55 0x66 0x77 0x88 0x99 0xaa >
+ test_pattern
+
+ --- reset test pattern
+ echo 0 > test_pattern
+
+ --- HPD detection is disabled when set PHY test pattern
+
+ when PHY test pattern (pattern # within [1,7]) is set, HPD pin of
+ HW ASIC is disable. User could unplug DP display from DP connected
+ and plug scope to check test pattern PHY SI.
+ If there is need unplug scope and plug DP display back, do steps
+ below:
+ echo 0 > phy_test_pattern
+ unplug scope
+ plug DP display.
+
+ "echo 0 > phy_test_pattern" will re-enable HPD pin again so that
+ video sw driver could detect "unplug scope" and "plug DP display"
+
+Signed-off-by: Hersen Wu <hersenxs.wu@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 222 ++++++++++++++++++++-
+ 1 file changed, 211 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+index 9ff8833..8ddbf219 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+@@ -261,18 +261,219 @@ static ssize_t dp_pre_emphasis_debugfs_write(struct file *f, const char __user *
+ return 1;
+ }
+
+-static ssize_t dp_phy_test_pattern_debugfs_read(struct file *f, char __user *buf,
+- size_t size, loff_t *pos)
+-{
+- /* TODO: create method to read PHY test pattern */
+- return 1;
+-}
+-
++/* function description
++ *
++ * set PHY layer or Link layer test pattern
++ * PHY test pattern is used for PHY SI check.
++ * Link layer test will not affect PHY SI.
++ *
++ * Reset Test Pattern:
++ * 0 = DP_TEST_PATTERN_VIDEO_MODE
++ *
++ * PHY test pattern supported:
++ * 1 = DP_TEST_PATTERN_D102
++ * 2 = DP_TEST_PATTERN_SYMBOL_ERROR
++ * 3 = DP_TEST_PATTERN_PRBS7
++ * 4 = DP_TEST_PATTERN_80BIT_CUSTOM
++ * 5 = DP_TEST_PATTERN_CP2520_1
++ * 6 = DP_TEST_PATTERN_CP2520_2 = DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE
++ * 7 = DP_TEST_PATTERN_CP2520_3
++ *
++ * DP PHY Link Training Patterns
++ * 8 = DP_TEST_PATTERN_TRAINING_PATTERN1
++ * 9 = DP_TEST_PATTERN_TRAINING_PATTERN2
++ * a = DP_TEST_PATTERN_TRAINING_PATTERN3
++ * b = DP_TEST_PATTERN_TRAINING_PATTERN4
++ *
++ * DP Link Layer Test pattern
++ * c = DP_TEST_PATTERN_COLOR_SQUARES
++ * d = DP_TEST_PATTERN_COLOR_SQUARES_CEA
++ * e = DP_TEST_PATTERN_VERTICAL_BARS
++ * f = DP_TEST_PATTERN_HORIZONTAL_BARS
++ * 10= DP_TEST_PATTERN_COLOR_RAMP
++ *
++ * debugfs phy_test_pattern is located at /syskernel/debug/dri/0/DP-x
++ *
++ * --- set test pattern
++ * echo <test pattern #> > test_pattern
++ *
++ * If test pattern # is not supported, NO HW programming will be done.
++ * for DP_TEST_PATTERN_80BIT_CUSTOM, it needs extra 10 bytes of data
++ * for the user pattern. input 10 bytes data are separated by space
++ *
++ * echo 0x4 0x11 0x22 0x33 0x44 0x55 0x66 0x77 0x88 0x99 0xaa > test_pattern
++ *
++ * --- reset test pattern
++ * echo 0 > test_pattern
++ *
++ * --- HPD detection is disabled when set PHY test pattern
++ *
++ * when PHY test pattern (pattern # within [1,7]) is set, HPD pin of HW ASIC
++ * is disable. User could unplug DP display from DP connected and plug scope to
++ * check test pattern PHY SI.
++ * If there is need unplug scope and plug DP display back, do steps below:
++ * echo 0 > phy_test_pattern
++ * unplug scope
++ * plug DP display.
++ *
++ * "echo 0 > phy_test_pattern" will re-enable HPD pin again so that video sw
++ * driver could detect "unplug scope" and "plug DP display"
++ */
+ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+ {
+- /* TODO: create method to write PHY test pattern */
+- return 1;
++ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
++ struct dc_link *link = connector->dc_link;
++ char *wr_buf = NULL;
++ char *wr_buf_ptr = NULL;
++ uint32_t wr_buf_size = 100;
++ int r;
++ int bytes_from_user;
++ char *sub_str;
++ uint8_t param_index = 0;
++ long param[11];
++ const char delimiter[3] = {' ', '\n', '\0'};
++ enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
++ bool disable_hpd = false;
++ bool valid_test_pattern = false;
++ uint8_t custom_pattern[10] = {0};
++ struct dc_link_settings prefer_link_settings = {LANE_COUNT_UNKNOWN,
++ LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
++ struct dc_link_settings cur_link_settings = {LANE_COUNT_UNKNOWN,
++ LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
++ struct link_training_settings link_training_settings;
++ int i;
++
++ if (size == 0)
++ return 0;
++
++ wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
++ if (!wr_buf)
++ return 0;
++ wr_buf_ptr = wr_buf;
++
++ r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
++
++ /* r is bytes not be copied */
++ if (r >= wr_buf_size) {
++ kfree(wr_buf);
++ DRM_DEBUG_DRIVER("user data not be read\n");
++ return 0;
++ }
++
++ bytes_from_user = wr_buf_size - r;
++
++ while (isspace(*wr_buf_ptr))
++ wr_buf_ptr++;
++
++ while ((*wr_buf_ptr != '\0') && (param_index < 1)) {
++ sub_str = strsep(&wr_buf_ptr, delimiter);
++ r = kstrtol(sub_str, 16, &param[param_index]);
++
++ if (r)
++ DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
++
++ param_index++;
++ while (isspace(*wr_buf_ptr))
++ wr_buf_ptr++;
++
++ /* DP_TEST_PATTERN_80BIT_CUSTOM need extra 80 bits
++ * whci are 10 bytes separte by space
++ */
++ if (param[0] != 0x4)
++ break;
++ }
++
++ test_pattern = param[0];
++
++ switch (test_pattern) {
++ case DP_TEST_PATTERN_VIDEO_MODE:
++ case DP_TEST_PATTERN_COLOR_SQUARES:
++ case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
++ case DP_TEST_PATTERN_VERTICAL_BARS:
++ case DP_TEST_PATTERN_HORIZONTAL_BARS:
++ case DP_TEST_PATTERN_COLOR_RAMP:
++ valid_test_pattern = true;
++ break;
++
++ case DP_TEST_PATTERN_D102:
++ case DP_TEST_PATTERN_SYMBOL_ERROR:
++ case DP_TEST_PATTERN_PRBS7:
++ case DP_TEST_PATTERN_80BIT_CUSTOM:
++ case DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE:
++ case DP_TEST_PATTERN_TRAINING_PATTERN4:
++ disable_hpd = true;
++ valid_test_pattern = true;
++ break;
++
++ default:
++ valid_test_pattern = false;
++ test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
++ break;
++ }
++
++ if (!valid_test_pattern) {
++ kfree(wr_buf);
++ DRM_DEBUG_DRIVER("Invalid Test Pattern Parameters\n");
++ return bytes_from_user;
++ }
++
++ if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) {
++ for (i = 0; i < 10; i++)
++ custom_pattern[i] = (uint8_t) param[i + 1];
++ }
++
++ /* Usage: set DP physical test pattern using debugfs with normal DP
++ * panel. Then plug out DP panel and connect a scope to measure
++ * For normal video mode and test pattern generated from CRCT,
++ * they are visibile to user. So do not disable HPD.
++ * Video Mode is also set to clear the test pattern, so enable HPD
++ * because it might have been disabled after a test pattern was set.
++ * AUX depends on HPD * sequence dependent, do not move!
++ */
++ if (!disable_hpd)
++ dc_link_enable_hpd(link);
++
++ prefer_link_settings.lane_count = link->verified_link_cap.lane_count;
++ prefer_link_settings.link_rate = link->verified_link_cap.link_rate;
++ prefer_link_settings.link_spread = link->verified_link_cap.link_spread;
++
++ cur_link_settings.lane_count = link->cur_link_settings.lane_count;
++ cur_link_settings.link_rate = link->cur_link_settings.link_rate;
++ cur_link_settings.link_spread = link->cur_link_settings.link_spread;
++
++ link_training_settings.link_settings = cur_link_settings;
++
++
++ if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
++ if (prefer_link_settings.lane_count != LANE_COUNT_UNKNOWN &&
++ prefer_link_settings.link_rate != LINK_RATE_UNKNOWN &&
++ (prefer_link_settings.lane_count != cur_link_settings.lane_count ||
++ prefer_link_settings.link_rate != cur_link_settings.link_rate))
++ link_training_settings.link_settings = prefer_link_settings;
++ }
++
++ for (i = 0; i < (unsigned int)(link_training_settings.link_settings.lane_count); i++)
++ link_training_settings.lane_settings[i] = link->cur_lane_setting;
++
++ dc_link_set_test_pattern(
++ link,
++ test_pattern,
++ &link_training_settings,
++ custom_pattern,
++ 10);
++
++ /* Usage: Set DP physical test pattern using AMDDP with normal DP panel
++ * Then plug out DP panel and connect a scope to measure DP PHY signal.
++ * Need disable interrupt to avoid SW driver disable DP output. This is
++ * done after the test pattern is set.
++ */
++ if (valid_test_pattern && disable_hpd)
++ dc_link_disable_hpd(link);
++
++ kfree(wr_buf);
++
++ return bytes_from_user;
+ }
+
+ static const struct file_operations dp_link_settings_debugfs_fops = {
+@@ -298,7 +499,6 @@ static const struct file_operations dp_pre_emphasis_fops = {
+
+ static const struct file_operations dp_phy_test_pattern_fops = {
+ .owner = THIS_MODULE,
+- .read = dp_phy_test_pattern_debugfs_read,
+ .write = dp_phy_test_pattern_debugfs_write,
+ .llseek = default_llseek
+ };
+@@ -310,7 +510,7 @@ static const struct {
+ {"link_settings", &dp_link_settings_debugfs_fops},
+ {"voltage_swing", &dp_voltage_swing_fops},
+ {"pre_emphasis", &dp_pre_emphasis_fops},
+- {"phy_test_pattern", &dp_phy_test_pattern_fops}
++ {"test_pattern", &dp_phy_test_pattern_fops}
+ };
+
+ int connector_debugfs_init(struct amdgpu_dm_connector *connector)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4914-drm-amd-display-remove-dentist_vco_freq-from-resourc.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4914-drm-amd-display-remove-dentist_vco_freq-from-resourc.patch
new file mode 100644
index 00000000..1dc6fc0d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4914-drm-amd-display-remove-dentist_vco_freq-from-resourc.patch
@@ -0,0 +1,29 @@
+From 66351cca2dfef6b80d76ab57a0fa6647a9ed2d1f Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Wed, 13 Jun 2018 13:52:53 -0400
+Subject: [PATCH 4914/5725] drm/amd/display: remove dentist_vco_freq from
+ resource_pool
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/inc/core_types.h | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+index 0107aa2..c42308a 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+@@ -148,7 +148,6 @@ struct resource_pool {
+ unsigned int underlay_pipe_index;
+ unsigned int stream_enc_count;
+ unsigned int ref_clock_inKhz;
+- unsigned int dentist_vco_freq_khz;
+ unsigned int timing_generator_count;
+
+ /*
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4915-drm-amd-display-drop-unused-register-defines.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4915-drm-amd-display-drop-unused-register-defines.patch
new file mode 100644
index 00000000..75dfc8ea
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4915-drm-amd-display-drop-unused-register-defines.patch
@@ -0,0 +1,40 @@
+From 29440e868a59e8bf6024d108bb358fc42c7c1324 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Wed, 20 Jun 2018 11:40:15 -0400
+Subject: [PATCH 4915/5725] drm/amd/display: drop unused register defines
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+index 7ce0a54..8a6b2d3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+@@ -44,18 +44,14 @@
+ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, mask_sh)
+
+ #define CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh) \
+- CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, mask_sh),\
+ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh),\
+- CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh),\
+- CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, mask_sh)
++ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh)
+
+ #define CLK_REG_FIELD_LIST(type) \
+ type DPREFCLK_SRC_SEL; \
+ type DENTIST_DPREFCLK_WDIVIDER; \
+ type DENTIST_DISPCLK_WDIVIDER; \
+- type DENTIST_DPPCLK_WDIVIDER; \
+- type DENTIST_DISPCLK_CHG_DONE; \
+- type DENTIST_DPPCLK_CHG_DONE;
++ type DENTIST_DISPCLK_CHG_DONE;
+
+ struct dccg_shift {
+ CLK_REG_FIELD_LIST(uint8_t)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4916-drm-amd-display-add-additional-info-for-cursor-posit.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4916-drm-amd-display-add-additional-info-for-cursor-posit.patch
new file mode 100644
index 00000000..ca25bd92
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4916-drm-amd-display-add-additional-info-for-cursor-posit.patch
@@ -0,0 +1,113 @@
+From 2eac1bbf7dc93fa0b1ae2ec58fe3135ead4a02ba Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Tue, 19 Jun 2018 15:49:02 -0400
+Subject: [PATCH 4916/5725] drm/amd/display: add additional info for cursor
+ position programming
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc_hw_types.h | 5 +++--
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 7 ++++---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | 4 ++--
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 4 ++--
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 7 ++++---
+ 5 files changed, 15 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+index 7117f9f..afda2d4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+@@ -404,9 +404,10 @@ struct dc_cursor_position {
+ struct dc_cursor_mi_param {
+ unsigned int pixel_clk_khz;
+ unsigned int ref_clk_khz;
+- unsigned int viewport_x_start;
+- unsigned int viewport_width;
++ struct rect viewport;
+ struct fixed31_32 h_scale_ratio;
++ struct fixed31_32 v_scale_ratio;
++ enum dc_rotation_angle rotation;
+ };
+
+ /* IPP related types */
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 1634e9d..b0bcc9a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -2782,9 +2782,10 @@ void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
+ struct dc_cursor_mi_param param = {
+ .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
+ .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
+- .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
+- .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
+- .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
++ .viewport = pipe_ctx->plane_res.scl_data.viewport,
++ .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
++ .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
++ .rotation = pipe_ctx->plane_state->rotation
+ };
+
+ if (pipe_ctx->plane_state->address.type
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+index 742fd49..a558efa 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+@@ -445,10 +445,10 @@ void dpp1_set_cursor_position(
+ uint32_t width)
+ {
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+- int src_x_offset = pos->x - pos->x_hotspot - param->viewport_x_start;
++ int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
+ uint32_t cur_en = pos->enable ? 1 : 0;
+
+- if (src_x_offset >= (int)param->viewport_width)
++ if (src_x_offset >= (int)param->viewport.width)
+ cur_en = 0; /* not visible beyond right edge*/
+
+ if (src_x_offset + (int)width <= 0)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+index 9eb60a0..617fd30 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+@@ -1083,7 +1083,7 @@ void hubp1_cursor_set_position(
+ const struct dc_cursor_mi_param *param)
+ {
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+- int src_x_offset = pos->x - pos->x_hotspot - param->viewport_x_start;
++ int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
+ uint32_t cur_en = pos->enable ? 1 : 0;
+ uint32_t dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
+
+@@ -1107,7 +1107,7 @@ void hubp1_cursor_set_position(
+ dc_fixpt_from_int(dst_x_offset),
+ param->h_scale_ratio));
+
+- if (src_x_offset >= (int)param->viewport_width)
++ if (src_x_offset >= (int)param->viewport.width)
+ cur_en = 0; /* not visible beyond right edge*/
+
+ if (src_x_offset + (int)hubp->curs_attr.width <= 0)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 12cb828..80cb7fd 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2543,9 +2543,10 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
+ struct dc_cursor_mi_param param = {
+ .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
+ .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
+- .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
+- .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
+- .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
++ .viewport = pipe_ctx->plane_res.scl_data.viewport,
++ .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
++ .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
++ .rotation = pipe_ctx->plane_state->rotation
+ };
+
+ if (pipe_ctx->plane_state->address.type
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4917-drm-amd-display-Patch-for-extend-time-to-panel-power.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4917-drm-amd-display-Patch-for-extend-time-to-panel-power.patch
new file mode 100644
index 00000000..97603489
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4917-drm-amd-display-Patch-for-extend-time-to-panel-power.patch
@@ -0,0 +1,57 @@
+From d104c169112f1a5d885dc6a8f8a015d058d13f44 Mon Sep 17 00:00:00 2001
+From: Hugo Hu <hugo.hu@amd.com>
+Date: Fri, 15 Jun 2018 15:49:55 -0400
+Subject: [PATCH 4917/5725] drm/amd/display: Patch for extend time to panel
+ poweron.
+
+[WHY]
+In eDP spec, the min duration in LCDVDD on-off-on sequence should be
+500ms, some BOE panels need 700ms to pass.
+[HOW]
+Add patch to wait more time when eDP power on.
+
+Signed-off-by: Hugo Hu <hugo.hu@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc_types.h | 1 +
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 9 ++++++---
+ 2 files changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
+index 59bf0d5..58a6ef8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
+@@ -192,6 +192,7 @@ union display_content_support {
+
+ struct dc_panel_patch {
+ unsigned int dppowerup_delay;
++ unsigned int extra_t12_ms;
+ };
+
+ struct dc_edid_caps {
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index b0bcc9a..286b6071 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -871,10 +871,13 @@ void hwss_edp_power_control(
+ unsigned long long wait_time_ms = 0;
+
+ /* max 500ms from LCDVDD off to on */
++ unsigned long long edp_poweroff_time_ms =
++ 500 + link->local_sink->edid_caps.panel_patch.extra_t12_ms;
++
+ if (link->link_trace.time_stamp.edp_poweroff == 0)
+- wait_time_ms = 500;
+- else if (duration_in_ms < 500)
+- wait_time_ms = 500 - duration_in_ms;
++ wait_time_ms = edp_poweroff_time_ms;
++ else if (duration_in_ms < edp_poweroff_time_ms)
++ wait_time_ms = edp_poweroff_time_ms - duration_in_ms;
+
+ if (wait_time_ms) {
+ msleep(wait_time_ms);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4918-drm-amd-display-Linux-set-read-lane-settings-through.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4918-drm-amd-display-Linux-set-read-lane-settings-through.patch
new file mode 100644
index 00000000..329bba63
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4918-drm-amd-display-Linux-set-read-lane-settings-through.patch
@@ -0,0 +1,471 @@
+From 45d70ac2a30007c809265fdba76840b936cff95f Mon Sep 17 00:00:00 2001
+From: Hersen Wu <hersenxs.wu@amd.com>
+Date: Fri, 15 Jun 2018 10:32:50 -0400
+Subject: [PATCH 4918/5725] drm/amd/display: Linux set/read lane settings
+ through debugfs
+
+ function: get current DP PHY settings: voltage swing, pre-emphasis,
+ post-cursor2 (defined by VESA DP specification)
+
+ valid values: voltage swing: 0,1,2,3 pre-emphasis : 0,1,2,3
+ post cursor2 : 0,1,2,3
+
+ debugfs file phy_setings is located at /sys/kernel/debug/dri/0/DP-x
+
+ there will be directories, like DP-1, DP-2,DP-3, etc. for DP display
+
+ --- to figure out which DP-x is the display for DP to be check,
+ cd DP-x
+ ls -ll
+ There should be debugfs file, like link_settings, phy_settings.
+ cat link_settings
+ from lane_count, link_rate to figure which DP-x is for display to be
+ worked on
+
+ --- to get current DP PHY settings,
+ cat phy_settings
+
+ --- to change DP PHY settings,
+ echo <voltage_swing> <pre-emphasis> <post_cursor2> > phy_settings
+
+ for examle, to change voltage swing to 2, pre-emphasis to 3,
+ post_cursor2 to 0,
+ echo 2 3 0 > phy_settings
+
+ --- to check if change be applied, get current phy settings by
+ cat phy_settings
+
+ --- in case invalid values are set by user, like
+ echo 1 4 0 > phy_settings
+
+ HW will NOT be programmed by these settings.
+
+cat phy_settings will show the previous valid settings.
+
+Signed-off-by: Hersen Wu <hersenxs.wu@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Reviewed-by: Hersen Wu <hersenxs.wu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 266 ++++++++++++++++-----
+ 1 file changed, 207 insertions(+), 59 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+index 8ddbf219..f20ba9d 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+@@ -26,7 +26,6 @@
+ #include <linux/debugfs.h>
+
+ #include "dc.h"
+-
+ #include "amdgpu.h"
+ #include "amdgpu_dm.h"
+ #include "amdgpu_dm_debugfs.h"
+@@ -46,7 +45,7 @@
+ *
+ * --- to get dp configuration
+ *
+- * xxd -l 300 phy_settings
++ * cat link_settings
+ *
+ * It will list current, verified, reported, preferred dp configuration.
+ * current -- for current video mode
+@@ -56,7 +55,7 @@
+ *
+ * --- set (or force) dp configuration
+ *
+- * echo <lane_count> <link_rate>
++ * echo <lane_count> <link_rate> > link_settings
+ *
+ * for example, to force to 2 lane, 2.7GHz,
+ * echo 4 0xa > link_settings
+@@ -67,7 +66,7 @@
+ * done. please check link settings after force operation to see if HW get
+ * programming.
+ *
+- * xxd -l 300 link_settings
++ * cat link_settings
+ *
+ * check current and preferred settings.
+ *
+@@ -79,13 +78,13 @@ static ssize_t dp_link_settings_read(struct file *f, char __user *buf,
+ struct dc_link *link = connector->dc_link;
+ char *rd_buf = NULL;
+ char *rd_buf_ptr = NULL;
+- uint32_t rd_buf_size = 320;
+- int bytes_to_user;
++ const uint32_t rd_buf_size = 100;
++ uint32_t result = 0;
+ uint8_t str_len = 0;
+ int r;
+
+- if (size == 0)
+- return 0;
++ if (*pos & 3 || size & 3)
++ return -EINVAL;
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+ if (!rd_buf)
+@@ -98,39 +97,44 @@ static ssize_t dp_link_settings_read(struct file *f, char __user *buf,
+ link->cur_link_settings.lane_count,
+ link->cur_link_settings.link_rate,
+ link->cur_link_settings.link_spread);
+- rd_buf_ptr = rd_buf_ptr + str_len;
++ rd_buf_ptr += str_len;
+
+ str_len = strlen("Verified: %d %d %d ");
+ snprintf(rd_buf_ptr, str_len, "Verified: %d %d %d ",
+ link->verified_link_cap.lane_count,
+ link->verified_link_cap.link_rate,
+ link->verified_link_cap.link_spread);
+- rd_buf_ptr = rd_buf_ptr + str_len;
++ rd_buf_ptr += str_len;
+
+ str_len = strlen("Reported: %d %d %d ");
+ snprintf(rd_buf_ptr, str_len, "Reported: %d %d %d ",
+ link->reported_link_cap.lane_count,
+ link->reported_link_cap.link_rate,
+ link->reported_link_cap.link_spread);
+- rd_buf_ptr = rd_buf_ptr + str_len;
++ rd_buf_ptr += str_len;
+
+ str_len = strlen("Preferred: %d %d %d ");
+- snprintf(rd_buf_ptr, str_len, "Preferred: %d %d %d ",
++ snprintf(rd_buf_ptr, str_len, "Preferred: %d %d %d\n",
+ link->preferred_link_setting.lane_count,
+ link->preferred_link_setting.link_rate,
+ link->preferred_link_setting.link_spread);
+
+- r = copy_to_user(buf, rd_buf, rd_buf_size);
++ while (size) {
++ if (*pos >= rd_buf_size)
++ break;
+
+- bytes_to_user = rd_buf_size - r;
++ r = put_user(*(rd_buf + result), buf);
++ if (r)
++ return r; /* r = -EFAULT */
+
+- if (r > rd_buf_size) {
+- bytes_to_user = 0;
+- DRM_DEBUG_DRIVER("data not copy to user");
++ buf += 1;
++ size -= 1;
++ *pos += 1;
++ result += 1;
+ }
+
+ kfree(rd_buf);
+- return bytes_to_user;
++ return result;
+ }
+
+ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
+@@ -142,7 +146,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
+ struct dc_link_settings prefer_link_settings;
+ char *wr_buf = NULL;
+ char *wr_buf_ptr = NULL;
+- uint32_t wr_buf_size = 40;
++ const uint32_t wr_buf_size = 40;
+ int r;
+ int bytes_from_user;
+ char *sub_str;
+@@ -153,11 +157,11 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
+ bool valid_input = false;
+
+ if (size == 0)
+- return 0;
++ return -EINVAL;
+
+ wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
+ if (!wr_buf)
+- return 0;
++ return -EINVAL;
+ wr_buf_ptr = wr_buf;
+
+ r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
+@@ -166,7 +170,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
+ if (r >= wr_buf_size) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("user data not read\n");
+- return 0;
++ return -EINVAL;
+ }
+
+ bytes_from_user = wr_buf_size - r;
+@@ -181,16 +185,13 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
+ r = kstrtol(sub_str, 16, &param[param_index]);
+
+ if (r)
+- DRM_DEBUG_DRIVER(" -EINVAL convert error happens!\n");
++ DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
+
+ param_index++;
+ while (isspace(*wr_buf_ptr))
+ wr_buf_ptr++;
+ }
+
+- DRM_DEBUG_DRIVER("Lane_count: %lx\n", param[0]);
+- DRM_DEBUG_DRIVER("link_rate: %lx\n", param[1]);
+-
+ switch (param[0]) {
+ case LANE_COUNT_ONE:
+ case LANE_COUNT_TWO:
+@@ -213,9 +214,10 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
+ break;
+ }
+
+- if (!valid_input) {
++ if (!valid_input || (param[0] > link->reported_link_cap.lane_count) ||
++ (param[1] > link->reported_link_cap.link_rate)) {
+ kfree(wr_buf);
+- DRM_DEBUG_DRIVER("Invalid Input value exceed No HW will be programmed\n");
++ DRM_DEBUG_DRIVER("Invalid Input value No HW will be programmed\n");
+ return bytes_from_user;
+ }
+
+@@ -229,36 +231,190 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
+ dc_link_set_preferred_link_settings(dc, &prefer_link_settings, link);
+
+ kfree(wr_buf);
+-
+ return bytes_from_user;
+ }
+
+-static ssize_t dp_voltage_swing_debugfs_read(struct file *f, char __user *buf,
++/* function: get current DP PHY settings: voltage swing, pre-emphasis,
++ * post-cursor2 (defined by VESA DP specification)
++ *
++ * valid values
++ * voltage swing: 0,1,2,3
++ * pre-emphasis : 0,1,2,3
++ * post cursor2 : 0,1,2,3
++ *
++ *
++ * how to use this debugfs
++ *
++ * debugfs is located at /sys/kernel/debug/dri/0/DP-x
++ *
++ * there will be directories, like DP-1, DP-2,DP-3, etc. for DP display
++ *
++ * To figure out which DP-x is the display for DP to be check,
++ * cd DP-x
++ * ls -ll
++ * There should be debugfs file, like link_settings, phy_settings.
++ * cat link_settings
++ * from lane_count, link_rate to figure which DP-x is for display to be worked
++ * on
++ *
++ * To get current DP PHY settings,
++ * cat phy_settings
++ *
++ * To change DP PHY settings,
++ * echo <voltage_swing> <pre-emphasis> <post_cursor2> > phy_settings
++ * for examle, to change voltage swing to 2, pre-emphasis to 3, post_cursor2 to
++ * 0,
++ * echo 2 3 0 > phy_settings
++ *
++ * To check if change be applied, get current phy settings by
++ * cat phy_settings
++ *
++ * In case invalid values are set by user, like
++ * echo 1 4 0 > phy_settings
++ *
++ * HW will NOT be programmed by these settings.
++ * cat phy_settings will show the previous valid settings.
++ */
++static ssize_t dp_phy_settings_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+ {
+- /* TODO: create method to read voltage swing */
+- return 1;
+-}
++ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
++ struct dc_link *link = connector->dc_link;
++ char *rd_buf = NULL;
++ const uint32_t rd_buf_size = 20;
++ uint32_t result = 0;
++ int r;
+
+-static ssize_t dp_voltage_swing_debugfs_write(struct file *f, const char __user *buf,
+- size_t size, loff_t *pos)
+-{
+- /* TODO: create method to write voltage swing */
+- return 1;
+-}
++ if (*pos & 3 || size & 3)
++ return -EINVAL;
+
+-static ssize_t dp_pre_emphasis_debugfs_read(struct file *f, char __user *buf,
+- size_t size, loff_t *pos)
+-{
+- /* TODO: create method to read pre-emphasis */
+- return 1;
++ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
++ if (!rd_buf)
++ return -EINVAL;
++
++ snprintf(rd_buf, rd_buf_size, " %d %d %d ",
++ link->cur_lane_setting.VOLTAGE_SWING,
++ link->cur_lane_setting.PRE_EMPHASIS,
++ link->cur_lane_setting.POST_CURSOR2);
++
++ while (size) {
++ if (*pos >= rd_buf_size)
++ break;
++
++ r = put_user((*(rd_buf + result)), buf);
++ if (r)
++ return r; /* r = -EFAULT */
++
++ buf += 1;
++ size -= 1;
++ *pos += 1;
++ result += 1;
++ }
++
++ kfree(rd_buf);
++ return result;
+ }
+
+-static ssize_t dp_pre_emphasis_debugfs_write(struct file *f, const char __user *buf,
++static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+ {
+- /* TODO: create method to write pre-emphasis */
+- return 1;
++ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
++ struct dc_link *link = connector->dc_link;
++ struct dc *dc = (struct dc *)link->dc;
++ char *wr_buf = NULL;
++ char *wr_buf_ptr = NULL;
++ uint32_t wr_buf_size = 40;
++ int r;
++ int bytes_from_user;
++ char *sub_str;
++ uint8_t param_index = 0;
++ long param[3];
++ const char delimiter[3] = {' ', '\n', '\0'};
++ bool use_prefer_link_setting;
++ struct link_training_settings link_lane_settings;
++
++ if (size == 0)
++ return 0;
++
++ wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
++ if (!wr_buf)
++ return 0;
++ wr_buf_ptr = wr_buf;
++
++ r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
++
++ /* r is bytes not be copied */
++ if (r >= wr_buf_size) {
++ kfree(wr_buf);
++ DRM_DEBUG_DRIVER("user data not be read\n");
++ return 0;
++ }
++
++ bytes_from_user = wr_buf_size - r;
++
++ while (isspace(*wr_buf_ptr))
++ wr_buf_ptr++;
++
++ while ((*wr_buf_ptr != '\0') && (param_index < 3)) {
++
++ sub_str = strsep(&wr_buf_ptr, delimiter);
++
++ r = kstrtol(sub_str, 16, &param[param_index]);
++
++ if (r)
++ DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
++
++ param_index++;
++ while (isspace(*wr_buf_ptr))
++ wr_buf_ptr++;
++ }
++
++ if ((param[0] > VOLTAGE_SWING_MAX_LEVEL) ||
++ (param[1] > PRE_EMPHASIS_MAX_LEVEL) ||
++ (param[2] > POST_CURSOR2_MAX_LEVEL)) {
++ kfree(wr_buf);
++ DRM_DEBUG_DRIVER("Invalid Input No HW will be programmed\n");
++ return bytes_from_user;
++ }
++
++ /* get link settings: lane count, link rate */
++ use_prefer_link_setting =
++ ((link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) &&
++ (link->test_pattern_enabled));
++
++ memset(&link_lane_settings, 0, sizeof(link_lane_settings));
++
++ if (use_prefer_link_setting) {
++ link_lane_settings.link_settings.lane_count =
++ link->preferred_link_setting.lane_count;
++ link_lane_settings.link_settings.link_rate =
++ link->preferred_link_setting.link_rate;
++ link_lane_settings.link_settings.link_spread =
++ link->preferred_link_setting.link_spread;
++ } else {
++ link_lane_settings.link_settings.lane_count =
++ link->cur_link_settings.lane_count;
++ link_lane_settings.link_settings.link_rate =
++ link->cur_link_settings.link_rate;
++ link_lane_settings.link_settings.link_spread =
++ link->cur_link_settings.link_spread;
++ }
++
++ /* apply phy settings from user */
++ for (r = 0; r < link_lane_settings.link_settings.lane_count; r++) {
++ link_lane_settings.lane_settings[r].VOLTAGE_SWING =
++ (enum dc_voltage_swing) (param[0]);
++ link_lane_settings.lane_settings[r].PRE_EMPHASIS =
++ (enum dc_pre_emphasis) (param[1]);
++ link_lane_settings.lane_settings[r].POST_CURSOR2 =
++ (enum dc_post_cursor2) (param[2]);
++ }
++
++ /* program ASIC registers and DPCD registers */
++ dc_link_set_drive_settings(dc, &link_lane_settings, link);
++
++ kfree(wr_buf);
++ return bytes_from_user;
+ }
+
+ /* function description
+@@ -483,17 +639,10 @@ static const struct file_operations dp_link_settings_debugfs_fops = {
+ .llseek = default_llseek
+ };
+
+-static const struct file_operations dp_voltage_swing_fops = {
+- .owner = THIS_MODULE,
+- .read = dp_voltage_swing_debugfs_read,
+- .write = dp_voltage_swing_debugfs_write,
+- .llseek = default_llseek
+-};
+-
+-static const struct file_operations dp_pre_emphasis_fops = {
++static const struct file_operations dp_phy_settings_debugfs_fop = {
+ .owner = THIS_MODULE,
+- .read = dp_pre_emphasis_debugfs_read,
+- .write = dp_pre_emphasis_debugfs_write,
++ .read = dp_phy_settings_read,
++ .write = dp_phy_settings_write,
+ .llseek = default_llseek
+ };
+
+@@ -508,8 +657,7 @@ static const struct {
+ const struct file_operations *fops;
+ } dp_debugfs_entries[] = {
+ {"link_settings", &dp_link_settings_debugfs_fops},
+- {"voltage_swing", &dp_voltage_swing_fops},
+- {"pre_emphasis", &dp_pre_emphasis_fops},
++ {"phy_settings", &dp_phy_settings_debugfs_fop},
+ {"test_pattern", &dp_phy_test_pattern_fops}
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4919-drm-amd-display-Fix-compile-error-on-older-GCC-versi.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4919-drm-amd-display-Fix-compile-error-on-older-GCC-versi.patch
new file mode 100644
index 00000000..a117438f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4919-drm-amd-display-Fix-compile-error-on-older-GCC-versi.patch
@@ -0,0 +1,39 @@
+From 2ad3a5dc171e8c5fabcc7130026451255a94ea41 Mon Sep 17 00:00:00 2001
+From: "Leo (Sunpeng) Li" <sunpeng.li@amd.com>
+Date: Mon, 18 Jun 2018 12:23:03 -0400
+Subject: [PATCH 4919/5725] drm/amd/display: Fix compile error on older GCC
+ versions
+
+GCC 4.9 reports a 'missing braces around initializer' error. This is a
+bug, documented here:
+
+https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53119
+
+Fix it by adding another brace.
+
+Signed-off-by: Leo (Sunpeng) Li <sunpeng.li@amd.com>
+Reviewed-by: Mikita Lipski <Mikita.Lipski@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+index 0193bc5..4188dbd 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -514,8 +514,8 @@ enum dc_edid_status dm_helpers_read_local_edid(
+ edid_status,
+ aconnector->base.name);
+ if (link->aux_mode) {
+- union test_request test_request = {0};
+- union test_response test_response = {0};
++ union test_request test_request = { {0} };
++ union test_response test_response = { {0} };
+
+ dm_helpers_dp_read_dpcd(ctx,
+ link,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4920-drm-amd-display-add-missing-mask-for-dcn.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4920-drm-amd-display-add-missing-mask-for-dcn.patch
new file mode 100644
index 00000000..e2983e88
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4920-drm-amd-display-add-missing-mask-for-dcn.patch
@@ -0,0 +1,31 @@
+From b718ffe058b73c791a32524f29a3588a0cbbd33e Mon Sep 17 00:00:00 2001
+From: Charlene Liu <charlene.liu@amd.com>
+Date: Thu, 21 Jun 2018 17:57:51 -0400
+Subject: [PATCH 4920/5725] drm/amd/display: add missing mask for dcn
+
+Signed-off-by: Charlene Liu <charlene.liu@amd.com>
+Reviewed-by: Wesley Chalmers <Wesley.Chalmers@amd.com>
+Reviewed-by: Duke Du <Duke.Du@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+index df3203a..64dc753 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+@@ -364,7 +364,8 @@ struct dce_hwseq_registers {
+ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, OTG0_),\
+ HWS_SF1(OTG0_, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh), \
+ HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \
+- HWS_SF(, DCFCLK_CNTL, DCFCLK_GATE_DIS, mask_sh)
++ HWS_SF(, DCFCLK_CNTL, DCFCLK_GATE_DIS, mask_sh), \
++ HWS_SF(, DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, mask_sh)
+
+ #define HWSEQ_DCN1_MASK_SH_LIST(mask_sh)\
+ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4921-drm-amd-display-set-default-GPIO_ID_HPD.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4921-drm-amd-display-set-default-GPIO_ID_HPD.patch
new file mode 100644
index 00000000..049c8a68
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4921-drm-amd-display-set-default-GPIO_ID_HPD.patch
@@ -0,0 +1,28 @@
+From 87243c523fd40b98506955ca8680e5f0a2477c38 Mon Sep 17 00:00:00 2001
+From: Charlene Liu <charlene.liu@amd.com>
+Date: Thu, 21 Jun 2018 21:32:36 -0400
+Subject: [PATCH 4921/5725] drm/amd/display: set default GPIO_ID_HPD
+
+Signed-off-by: Charlene Liu <charlene.liu@amd.com>
+Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+index 80038e0..f06d05a 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
++++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+@@ -374,6 +374,7 @@ struct gpio *dal_gpio_create_irq(
+ case GPIO_ID_GPIO_PAD:
+ break;
+ default:
++ id = GPIO_ID_HPD;
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4922-drm-amd-display-add-dcn-cursor-hotsport-rotation-and.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4922-drm-amd-display-add-dcn-cursor-hotsport-rotation-and.patch
new file mode 100644
index 00000000..80c846f4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4922-drm-amd-display-add-dcn-cursor-hotsport-rotation-and.patch
@@ -0,0 +1,106 @@
+From 055620c6d342d92f94cb4b43ca3b505a7c1a8d39 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Thu, 21 Jun 2018 13:33:41 -0400
+Subject: [PATCH 4922/5725] drm/amd/display: add dcn cursor hotsport rotation
+ and mirror support
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc_hw_types.h | 1 +
+ .../drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 3 ++-
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 20 +++++++++++++++++---
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 3 ++-
+ 4 files changed, 22 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+index afda2d4..e1c0af7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+@@ -408,6 +408,7 @@ struct dc_cursor_mi_param {
+ struct fixed31_32 h_scale_ratio;
+ struct fixed31_32 v_scale_ratio;
+ enum dc_rotation_angle rotation;
++ bool mirror;
+ };
+
+ /* IPP related types */
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 286b6071..298d2ca 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -2788,7 +2788,8 @@ void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
+ .viewport = pipe_ctx->plane_res.scl_data.viewport,
+ .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
+ .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
+- .rotation = pipe_ctx->plane_state->rotation
++ .rotation = pipe_ctx->plane_state->rotation,
++ .mirror = pipe_ctx->plane_state->horizontal_mirror
+ };
+
+ if (pipe_ctx->plane_state->address.type
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+index 617fd30..5c4ad8a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+@@ -1084,8 +1084,10 @@ void hubp1_cursor_set_position(
+ {
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
++ int x_hotspot = pos->x_hotspot;
++ int y_hotspot = pos->y_hotspot;
++ uint32_t dst_x_offset;
+ uint32_t cur_en = pos->enable ? 1 : 0;
+- uint32_t dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
+
+ /*
+ * Guard aganst cursor_set_position() from being called with invalid
+@@ -1097,6 +1099,18 @@ void hubp1_cursor_set_position(
+ if (hubp->curs_attr.address.quad_part == 0)
+ return;
+
++ if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
++ src_x_offset = pos->y - pos->y_hotspot - param->viewport.x;
++ y_hotspot = pos->x_hotspot;
++ x_hotspot = pos->y_hotspot;
++ }
++
++ if (param->mirror) {
++ x_hotspot = param->viewport.width - x_hotspot;
++ src_x_offset = param->viewport.x + param->viewport.width - src_x_offset;
++ }
++
++ dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
+ dst_x_offset *= param->ref_clk_khz;
+ dst_x_offset /= param->pixel_clk_khz;
+
+@@ -1124,8 +1138,8 @@ void hubp1_cursor_set_position(
+ CURSOR_Y_POSITION, pos->y);
+
+ REG_SET_2(CURSOR_HOT_SPOT, 0,
+- CURSOR_HOT_SPOT_X, pos->x_hotspot,
+- CURSOR_HOT_SPOT_Y, pos->y_hotspot);
++ CURSOR_HOT_SPOT_X, x_hotspot,
++ CURSOR_HOT_SPOT_Y, y_hotspot);
+
+ REG_SET(CURSOR_DST_OFFSET, 0,
+ CURSOR_DST_X_OFFSET, dst_x_offset);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 80cb7fd..28dba6a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2546,7 +2546,8 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
+ .viewport = pipe_ctx->plane_res.scl_data.viewport,
+ .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
+ .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
+- .rotation = pipe_ctx->plane_state->rotation
++ .rotation = pipe_ctx->plane_state->rotation,
++ .mirror = pipe_ctx->plane_state->horizontal_mirror
+ };
+
+ if (pipe_ctx->plane_state->address.type
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4923-drm-amd-display-expose-dcn10_aux_initialize-in-heade.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4923-drm-amd-display-expose-dcn10_aux_initialize-in-heade.patch
new file mode 100644
index 00000000..a75f1c47
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4923-drm-amd-display-expose-dcn10_aux_initialize-in-heade.patch
@@ -0,0 +1,64 @@
+From 39916fb37613175bc9df6534c683ef2b25402dce Mon Sep 17 00:00:00 2001
+From: Yongqiang Sun <yongqiang.sun@amd.com>
+Date: Mon, 25 Jun 2018 00:18:54 +0800
+Subject: [PATCH 4923/5725] drm/amd/display: expose dcn10_aux_initialize in
+ header
+
+Signed-off-by: Yongqiang Sun <yongqiang.sun@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c | 10 ++--------
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h | 2 ++
+ 2 files changed, 4 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+index 18a7cac..be78ccb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+@@ -65,11 +65,6 @@ enum {
+ DP_MST_UPDATE_MAX_RETRY = 50
+ };
+
+-
+-
+-static void aux_initialize(struct dcn10_link_encoder *enc10);
+-
+-
+ static const struct link_encoder_funcs dcn10_lnk_enc_funcs = {
+ .validate_output_with_stream =
+ dcn10_link_encoder_validate_output_with_stream,
+@@ -811,7 +806,7 @@ void dcn10_link_encoder_hw_init(
+ ASSERT(result == BP_RESULT_OK);
+
+ }
+- aux_initialize(enc10);
++ dcn10_aux_initialize(enc10);
+
+ /* reinitialize HPD.
+ * hpd_initialize() will pass DIG_FE id to HW context.
+@@ -1348,8 +1343,7 @@ void dcn10_link_encoder_disable_hpd(struct link_encoder *enc)
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2)
+
+-static void aux_initialize(
+- struct dcn10_link_encoder *enc10)
++void dcn10_aux_initialize(struct dcn10_link_encoder *enc10)
+ {
+ enum hpd_source_id hpd_source = enc10->base.hpd_source;
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+index cd3bb5d..49ead12 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+@@ -336,4 +336,6 @@ void dcn10_psr_program_secondary_packet(struct link_encoder *enc,
+
+ bool dcn10_is_dig_enabled(struct link_encoder *enc);
+
++void dcn10_aux_initialize(struct dcn10_link_encoder *enc10);
++
+ #endif /* __DC_LINK_ENCODER__DCN10_H__ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4924-drm-amd-display-Linux-hook-test-pattern-through-debu.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4924-drm-amd-display-Linux-hook-test-pattern-through-debu.patch
new file mode 100644
index 00000000..0b56bf0a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4924-drm-amd-display-Linux-hook-test-pattern-through-debu.patch
@@ -0,0 +1,134 @@
+From 7bad2c131c178dd0937d8fd1480c5eb42d4fc8db Mon Sep 17 00:00:00 2001
+From: Hersen Wu <hersenxs.wu@amd.com>
+Date: Fri, 22 Jun 2018 13:06:01 -0400
+Subject: [PATCH 4924/5725] drm/amd/display: Linux hook test pattern through
+ debufs
+
+bug fix: phy test PLTAT is special 80bit test pattern. The 80bit
+data should be hard coded within driver so that user does not
+need input the deata. previous driver does not have hard coded
+80 bits pattern data for PLTPAT. Other than this PLTPAT, user
+has to input 80 bits pattern data. In case user input less than
+10 bytes data, un-input data byte will be filled by 0x00.
+
+Signed-off-by: Hersen Wu <hersenxs.wu@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 70 +++++++++++++++++-----
+ 1 file changed, 55 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+index f20ba9d..0276e09 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+@@ -483,16 +483,22 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
+ char *wr_buf = NULL;
+ char *wr_buf_ptr = NULL;
+ uint32_t wr_buf_size = 100;
++ uint32_t wr_buf_count = 0;
+ int r;
+ int bytes_from_user;
+- char *sub_str;
++ char *sub_str = NULL;
+ uint8_t param_index = 0;
+- long param[11];
++ uint8_t param_nums = 0;
++ long param[11] = {0x0};
+ const char delimiter[3] = {' ', '\n', '\0'};
+ enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
+ bool disable_hpd = false;
+ bool valid_test_pattern = false;
+- uint8_t custom_pattern[10] = {0};
++ /* init with defalut 80bit custom pattern */
++ uint8_t custom_pattern[10] = {
++ 0x1f, 0x7c, 0xf0, 0xc1, 0x07,
++ 0x1f, 0x7c, 0xf0, 0xc1, 0x07
++ };
+ struct dc_link_settings prefer_link_settings = {LANE_COUNT_UNKNOWN,
+ LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
+ struct dc_link_settings cur_link_settings = {LANE_COUNT_UNKNOWN,
+@@ -519,25 +525,51 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
+
+ bytes_from_user = wr_buf_size - r;
+
+- while (isspace(*wr_buf_ptr))
++ /* check number of parameters. isspace could not differ space and \n */
++ while ((*wr_buf_ptr != 0xa) && (wr_buf_count < wr_buf_size)) {
++ /* skip space*/
++ while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
++ wr_buf_ptr++;
++ wr_buf_count++;
++ }
++
++ if (wr_buf_count == wr_buf_size)
++ break;
++
++ /* skip non-space*/
++ while ((!isspace(*wr_buf_ptr)) && (wr_buf_count < wr_buf_size)) {
++ wr_buf_ptr++;
++ wr_buf_count++;
++ }
++
++ param_nums++;
++
++ if (wr_buf_count == wr_buf_size)
++ break;
++ }
++
++ /* max 11 parameters */
++ if (param_nums > 11)
++ param_nums = 11;
++
++ wr_buf_ptr = wr_buf; /* reset buf pinter */
++ wr_buf_count = 0; /* number of char already checked */
++
++ while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
+ wr_buf_ptr++;
++ wr_buf_count++;
++ }
+
+- while ((*wr_buf_ptr != '\0') && (param_index < 1)) {
++ while (param_index < param_nums) {
++ /* after strsep, wr_buf_ptr will be moved to after space */
+ sub_str = strsep(&wr_buf_ptr, delimiter);
++
+ r = kstrtol(sub_str, 16, &param[param_index]);
+
+ if (r)
+ DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
+
+ param_index++;
+- while (isspace(*wr_buf_ptr))
+- wr_buf_ptr++;
+-
+- /* DP_TEST_PATTERN_80BIT_CUSTOM need extra 80 bits
+- * whci are 10 bytes separte by space
+- */
+- if (param[0] != 0x4)
+- break;
+ }
+
+ test_pattern = param[0];
+@@ -575,8 +607,16 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
+ }
+
+ if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) {
+- for (i = 0; i < 10; i++)
+- custom_pattern[i] = (uint8_t) param[i + 1];
++ for (i = 0; i < 10; i++) {
++ if ((uint8_t) param[i + 1] != 0x0)
++ break;
++ }
++
++ if (i < 10) {
++ /* not use default value */
++ for (i = 0; i < 10; i++)
++ custom_pattern[i] = (uint8_t) param[i + 1];
++ }
+ }
+
+ /* Usage: set DP physical test pattern using debugfs with normal DP
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4925-drm-amd-display-dal-3.1.54.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4925-drm-amd-display-dal-3.1.54.patch
new file mode 100644
index 00000000..a1235397
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4925-drm-amd-display-dal-3.1.54.patch
@@ -0,0 +1,29 @@
+From fc473cd5f92b5afb5e596378c6cbbe37790df3e4 Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Tue, 5 Jun 2018 09:15:15 -0400
+Subject: [PATCH 4925/5725] drm/amd/display: dal 3.1.54
+
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index e382582..22265d1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -38,7 +38,7 @@
+ #include "inc/compressor.h"
+ #include "dml/display_mode_lib.h"
+
+-#define DC_VER "3.1.53"
++#define DC_VER "3.1.54"
+
+ #define MAX_SURFACES 3
+ #define MAX_STREAMS 6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4926-drm-amd-display-Add-YCbCr420-only-support-for-HDMI-4.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4926-drm-amd-display-Add-YCbCr420-only-support-for-HDMI-4.patch
new file mode 100644
index 00000000..986e7a8c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4926-drm-amd-display-Add-YCbCr420-only-support-for-HDMI-4.patch
@@ -0,0 +1,53 @@
+From 85217e9dc63056667627919b38a28b82b622601c Mon Sep 17 00:00:00 2001
+From: "Jerry (Fangzhi) Zuo" <Jerry.Zuo@amd.com>
+Date: Fri, 22 Jun 2018 17:12:47 -0400
+Subject: [PATCH 4926/5725] drm/amd/display: Add YCbCr420 only support for HDMI
+ 4K@60
+
+[Why]
+Some monitors mark 4K@60 capable HDMI port only have 300MHz TMDS
+maximum, but the edid includes 4K@60 mode in cea extension block.
+
+[How]
+To enable 4K@60, need to limit BW by allowing YCbCr420 ONLY mode.
+Add YCbCr420 only support for monitors that do not fully support
+HDMI2.0, e.g., ASUS PA328. The YCbCr420 only support applies to
+DCN, DCE112 or higher.
+
+Signed-off-by: Jerry (Fangzhi) Zuo <Jerry.Zuo@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 1d7504a..d52d004 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3829,7 +3829,6 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
+ aconnector->base.stereo_allowed = false;
+ aconnector->base.dpms = DRM_MODE_DPMS_OFF;
+ aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
+-
+ mutex_init(&aconnector->hpd_lock);
+
+ /* configure support HPD hot plug connector_>polled default value is 0
+@@ -3838,9 +3837,13 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
+ switch (connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
++ aconnector->base.ycbcr_420_allowed =
++ link->link_enc->features.ycbcr420_supported ? true : false;
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
++ aconnector->base.ycbcr_420_allowed =
++ link->link_enc->features.ycbcr420_supported ? true : false;
+ break;
+ case DRM_MODE_CONNECTOR_DVID:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4927-drm-amd-display-Expose-bunch-of-functions-from-dcn10.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4927-drm-amd-display-Expose-bunch-of-functions-from-dcn10.patch
new file mode 100644
index 00000000..4d47ca1a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4927-drm-amd-display-Expose-bunch-of-functions-from-dcn10.patch
@@ -0,0 +1,173 @@
+From 7d608966251253c5424a41d644d10e7b4b81749c Mon Sep 17 00:00:00 2001
+From: Eric Bernstein <eric.bernstein@amd.com>
+Date: Wed, 16 May 2018 16:19:50 -0400
+Subject: [PATCH 4927/5725] drm/amd/display: Expose bunch of functions from
+ dcn10_hw_sequencer
+
+v2: Remove spurious newline changes
+
+Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
+Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 59 +++++++++++++---------
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h | 7 +++
+ drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 8 +++
+ 3 files changed, 49 insertions(+), 25 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 28dba6a..06cf967 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -849,7 +849,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
+ }
+
+
+-static void dcn10_verify_allow_pstate_change_high(struct dc *dc)
++void dcn10_verify_allow_pstate_change_high(struct dc *dc)
+ {
+ static bool should_log_hw_state; /* prevent hw state log by default */
+
+@@ -1863,8 +1863,7 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
+ dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
+ }
+
+-
+-static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
++static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
+ {
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct mpcc_blnd_cfg blnd_cfg;
+@@ -2009,7 +2008,7 @@ static void update_dchubp_dpp(
+
+ if (plane_state->update_flags.bits.full_update ||
+ plane_state->update_flags.bits.per_pixel_alpha_change)
+- update_mpcc(dc, pipe_ctx);
++ dc->hwss.update_mpcc(dc, pipe_ctx);
+
+ if (plane_state->update_flags.bits.full_update ||
+ plane_state->update_flags.bits.per_pixel_alpha_change ||
+@@ -2119,6 +2118,33 @@ static void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
+ pipe_ctx->plane_res.dpp, hw_mult);
+ }
+
++void dcn10_program_pipe(
++ struct dc *dc,
++ struct pipe_ctx *pipe_ctx,
++ struct dc_state *context)
++{
++ if (pipe_ctx->plane_state->update_flags.bits.full_update)
++ dcn10_enable_plane(dc, pipe_ctx, context);
++
++ update_dchubp_dpp(dc, pipe_ctx, context);
++
++ set_hdr_multiplier(pipe_ctx);
++
++ if (pipe_ctx->plane_state->update_flags.bits.full_update ||
++ pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
++ pipe_ctx->plane_state->update_flags.bits.gamma_change)
++ dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
++
++ /* dcn10_translate_regamma_to_hw_format takes 750us to finish
++ * only do gamma programming for full update.
++ * TODO: This can be further optimized/cleaned up
++ * Always call this for now since it does memcmp inside before
++ * doing heavy calculation and programming
++ */
++ if (pipe_ctx->plane_state->update_flags.bits.full_update)
++ dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
++}
++
+ static void program_all_pipe_in_tree(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+@@ -2140,26 +2166,7 @@ static void program_all_pipe_in_tree(
+ }
+
+ if (pipe_ctx->plane_state != NULL) {
+- if (pipe_ctx->plane_state->update_flags.bits.full_update)
+- dcn10_enable_plane(dc, pipe_ctx, context);
+-
+- update_dchubp_dpp(dc, pipe_ctx, context);
+-
+- set_hdr_multiplier(pipe_ctx);
+-
+- if (pipe_ctx->plane_state->update_flags.bits.full_update ||
+- pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+- pipe_ctx->plane_state->update_flags.bits.gamma_change)
+- dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
+-
+- /* dcn10_translate_regamma_to_hw_format takes 750us to finish
+- * only do gamma programming for full update.
+- * TODO: This can be further optimized/cleaned up
+- * Always call this for now since it does memcmp inside before
+- * doing heavy calculation and programming
+- */
+- if (pipe_ctx->plane_state->update_flags.bits.full_update)
+- dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
++ dcn10_program_pipe(dc, pipe_ctx, context);
+ }
+
+ if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx) {
+@@ -2284,7 +2291,7 @@ static void dcn10_apply_ctx_for_surface(
+ old_pipe_ctx->plane_state &&
+ old_pipe_ctx->stream_res.tg == tg) {
+
+- hwss1_plane_atomic_disconnect(dc, old_pipe_ctx);
++ dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx);
+ removed_pipe[i] = true;
+
+ DC_LOG_DC("Reset mpcc for pipe %d\n",
+@@ -2578,7 +2585,9 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
+ .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ .apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
+ .update_plane_addr = dcn10_update_plane_addr,
++ .plane_atomic_disconnect = hwss1_plane_atomic_disconnect,
+ .update_dchub = dcn10_update_dchub,
++ .update_mpcc = dcn10_update_mpcc,
+ .update_pending_status = dcn10_update_pending_status,
+ .set_input_transfer_func = dcn10_set_input_transfer_func,
+ .set_output_transfer_func = dcn10_set_output_transfer_func,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+index 44f734b..7139fb7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+@@ -39,4 +39,11 @@ bool is_rgb_cspace(enum dc_color_space output_color_space);
+
+ void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx);
+
++void dcn10_verify_allow_pstate_change_high(struct dc *dc);
++
++void dcn10_program_pipe(
++ struct dc *dc,
++ struct pipe_ctx *pipe_ctx,
++ struct dc_state *context);
++
+ #endif /* __DC_HWSS_DCN10_H__ */
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+index 2506601..c2277d1 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+@@ -102,10 +102,18 @@ struct hw_sequencer_funcs {
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
++ void (*plane_atomic_disconnect)(
++ struct dc *dc,
++ struct pipe_ctx *pipe_ctx);
++
+ void (*update_dchub)(
+ struct dce_hwseq *hws,
+ struct dchub_init_data *dh_data);
+
++ void (*update_mpcc)(
++ struct dc *dc,
++ struct pipe_ctx *pipe_ctx);
++
+ void (*update_pending_status)(
+ struct pipe_ctx *pipe_ctx);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4928-drm-amd-display-Right-shift-AUX-reply-value-sooner-t.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4928-drm-amd-display-Right-shift-AUX-reply-value-sooner-t.patch
new file mode 100644
index 00000000..fee1e5f0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4928-drm-amd-display-Right-shift-AUX-reply-value-sooner-t.patch
@@ -0,0 +1,48 @@
+From 9dc856e00a17501f73d503b29d7fb35c1f73429b Mon Sep 17 00:00:00 2001
+From: "Leo (Sunpeng) Li" <sunpeng.li@amd.com>
+Date: Tue, 26 Jun 2018 10:44:05 -0400
+Subject: [PATCH 4928/5725] drm/amd/display: Right shift AUX reply value sooner
+ than later
+
+[Why]
+There is no point in keeping the AUX reply value in the raw format as
+returned from reading the AUX_SW_DATA register.
+
+[How]
+Shift it within read_channel_reply(), where the register is read, before
+returning it.
+
+Signed-off-by: Leo (Sunpeng) Li <sunpeng.li@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
+index 1f39406..ae5caa9 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
+@@ -300,9 +300,10 @@ static int read_channel_reply(struct aux_engine *engine, uint32_t size,
+ AUX_SW_DATA_RW, 1);
+
+ REG_GET(AUX_SW_DATA, AUX_SW_DATA, &reply_result_32);
++ reply_result_32 = reply_result_32 >> 4;
+ *reply_result = (uint8_t)reply_result_32;
+
+- if (reply_result_32 >> 4 == 0) { /* ACK */
++ if (reply_result_32 == 0) { /* ACK */
+ uint32_t i = 0;
+
+ /* First byte was already used to get the command status */
+@@ -356,7 +357,6 @@ static void process_channel_reply(
+ return;
+ }
+ } else {
+- reply_result = reply_result >> 4;
+
+ switch (reply_result) {
+ case 0: /* ACK */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4929-drm-amd-display-Read-AUX-channel-even-if-only-status.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4929-drm-amd-display-Read-AUX-channel-even-if-only-status.patch
new file mode 100644
index 00000000..290ce36f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4929-drm-amd-display-Read-AUX-channel-even-if-only-status.patch
@@ -0,0 +1,47 @@
+From aa030c7b66c1ea84ed86779ca7891b86b1616eb8 Mon Sep 17 00:00:00 2001
+From: "Leo (Sunpeng) Li" <sunpeng.li@amd.com>
+Date: Tue, 26 Jun 2018 10:50:16 -0400
+Subject: [PATCH 4929/5725] drm/amd/display: Read AUX channel even if only
+ status byte is returned
+
+[Why]
+get_channel_status() can return 0 in returned_bytes, and report a
+successful operation result. This is because it prunes the first status
+byte out. This was preventing read_channel_reply() from being called
+(due to the faulty condition), and consequently preventing the AUX
+reply status from being set.
+
+[How]
+Fix the conditional so that it accounts for when get_channel_status()
+returns 0 bytes read.
+
+[Fixes]
+Fixes possible edid read failures during S3 resume, where we are now
+relying on DRM's DP AUX handling. This was an regression introduced by:
+
+ Author: Harry Wentland <harry.wentland@amd.com>
+ drm/amd/display: Return aux replies directly to DRM
+
+Signed-off-by: Leo (Sunpeng) Li <sunpeng.li@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+index d108ccf..08c9d73 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+@@ -671,7 +671,7 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+ res = returned_bytes;
+
+- if (res <= size && res > 0)
++ if (res <= size && res >= 0)
+ res = engine->funcs->read_channel_reply(engine, size,
+ buffer, reply,
+ &status);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4930-drm-amd-display-introduce-concept-of-send_reset_leng.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4930-drm-amd-display-introduce-concept-of-send_reset_leng.patch
new file mode 100644
index 00000000..f27a1527
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4930-drm-amd-display-introduce-concept-of-send_reset_leng.patch
@@ -0,0 +1,157 @@
+From 0e8d1244652d654803df99450455458521a62df8 Mon Sep 17 00:00:00 2001
+From: Charlene Liu <charlene.liu@amd.com>
+Date: Mon, 25 Jun 2018 19:28:54 -0400
+Subject: [PATCH 4930/5725] drm/amd/display: introduce concept of
+ send_reset_length for i2c engines
+
+Signed-off-by: Charlene Liu <charlene.liu@amd.com>
+Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 1 +
+ .../dc/i2caux/dce110/i2c_hw_engine_dce110.c | 26 +++++++++++++---------
+ .../dc/i2caux/dce110/i2c_hw_engine_dce110.h | 8 +++++++
+ .../amd/display/dc/i2caux/dce110/i2caux_dce110.c | 18 ++++++++++++++-
+ drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h | 2 ++
+ 5 files changed, 43 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 22265d1..d3c15de 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -251,6 +251,7 @@ struct dc_debug {
+ bool p010_mpo_support;
+ bool recovery_enabled;
+ bool avoid_vbios_exec_table;
++ bool scl_reset_length10;
+
+ };
+ struct dc_state;
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
+index b7256f5..9cbe1a7 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
+@@ -62,12 +62,7 @@ enum dc_i2c_arbitration {
+ DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_HIGH
+ };
+
+-enum {
+- /* No timeout in HW
+- * (timeout implemented in SW by querying status) */
+- I2C_SETUP_TIME_LIMIT = 255,
+- I2C_HW_BUFFER_SIZE = 538
+-};
++
+
+ /*
+ * @brief
+@@ -152,6 +147,11 @@ static bool setup_engine(
+ struct i2c_engine *i2c_engine)
+ {
+ struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine);
++ uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
++ uint32_t reset_length = 0;
++
++ if (hw_engine->base.base.setup_limit != 0)
++ i2c_setup_limit = hw_engine->base.base.setup_limit;
+
+ /* Program pin select */
+ REG_UPDATE_6(
+@@ -164,11 +164,15 @@ static bool setup_engine(
+ DC_I2C_DDC_SELECT, hw_engine->engine_id);
+
+ /* Program time limit */
+- REG_UPDATE_N(
+- SETUP, 2,
+- FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), I2C_SETUP_TIME_LIMIT,
+- FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
+-
++ if (hw_engine->base.base.send_reset_length == 0) {
++ /*pre-dcn*/
++ REG_UPDATE_N(
++ SETUP, 2,
++ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit,
++ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
++ } else {
++ reset_length = hw_engine->base.base.send_reset_length;
++ }
+ /* Program HW priority
+ * set to High - interrupt software I2C at any time
+ * Enable restart of SW I2C that was interrupted by HW
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h
+index 5bb0408..fea2946 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h
+@@ -192,6 +192,7 @@ struct i2c_hw_engine_dce110 {
+ /* number of pending transactions (before GO) */
+ uint32_t transaction_count;
+ uint32_t engine_keep_power_up_count;
++ uint32_t i2_setup_time_limit;
+ };
+
+ struct i2c_hw_engine_dce110_create_arg {
+@@ -207,4 +208,11 @@ struct i2c_hw_engine_dce110_create_arg {
+ struct i2c_engine *dal_i2c_hw_engine_dce110_create(
+ const struct i2c_hw_engine_dce110_create_arg *arg);
+
++enum {
++ I2C_SETUP_TIME_LIMIT_DCE = 255,
++ I2C_SETUP_TIME_LIMIT_DCN = 3,
++ I2C_HW_BUFFER_SIZE = 538,
++ I2C_SEND_RESET_LENGTH_9 = 9,
++ I2C_SEND_RESET_LENGTH_10 = 10,
++};
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
+index e0557d3..1d748ac 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
+@@ -43,6 +43,9 @@
+ #include "i2c_sw_engine_dce110.h"
+ #include "i2c_hw_engine_dce110.h"
+ #include "aux_engine_dce110.h"
++#include "../../dc.h"
++#include "dc_types.h"
++
+
+ /*
+ * Post-requisites: headers required by this unit
+@@ -250,7 +253,20 @@ void dal_i2caux_dce110_construct(
+
+ base->i2c_hw_engines[line_id] =
+ dal_i2c_hw_engine_dce110_create(&hw_arg_dce110);
+-
++ if (base->i2c_hw_engines[line_id] != NULL) {
++ switch (ctx->dce_version) {
++ case DCN_VERSION_1_0:
++ base->i2c_hw_engines[line_id]->setup_limit =
++ I2C_SETUP_TIME_LIMIT_DCN;
++ base->i2c_hw_engines[line_id]->send_reset_length = 0;
++ break;
++ default:
++ base->i2c_hw_engines[line_id]->setup_limit =
++ I2C_SETUP_TIME_LIMIT_DCE;
++ base->i2c_hw_engines[line_id]->send_reset_length = 0;
++ break;
++ }
++ }
+ ++i;
+ } while (i < num_i2caux_inst);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h
+index 58fc0f2..ded6ea3 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h
+@@ -86,6 +86,8 @@ struct i2c_engine {
+ struct engine base;
+ const struct i2c_engine_funcs *funcs;
+ uint32_t timeout_delay;
++ uint32_t setup_limit;
++ uint32_t send_reset_length;
+ };
+
+ void dal_i2c_engine_construct(
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4931-drm-amd-display-add-DalEnableHDMI20-key-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4931-drm-amd-display-add-DalEnableHDMI20-key-support.patch
new file mode 100644
index 00000000..bd76d537
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4931-drm-amd-display-add-DalEnableHDMI20-key-support.patch
@@ -0,0 +1,82 @@
+From 1bdb1450b953467166e6914ede6e94b52c13ada2 Mon Sep 17 00:00:00 2001
+From: Charlene Liu <charlene.liu@amd.com>
+Date: Tue, 26 Jun 2018 18:49:32 -0400
+Subject: [PATCH 4931/5725] drm/amd/display: add DalEnableHDMI20 key support
+
+[why]
+"DalEnableHDMI20" set to 0, disallow HDMI YCbCr420 and pixel clock > 340Mhz
+Default is enabled.
+
+Signed-off-by: Charlene Liu <charlene.liu@amd.com>
+Reviewed-by: Jun Lei <Jun.Lei@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 1 +
+ drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c | 6 ++++++
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c | 6 ++++++
+ 3 files changed, 13 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index d3c15de..a7c880b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -252,6 +252,7 @@ struct dc_debug {
+ bool recovery_enabled;
+ bool avoid_vbios_exec_table;
+ bool scl_reset_length10;
++ bool hdmi20_disable;
+
+ };
+ struct dc_state;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+index dbe3b26..60e3c6a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+@@ -646,6 +646,9 @@ static bool dce110_link_encoder_validate_hdmi_output(
+ if (!enc110->base.features.flags.bits.HDMI_6GB_EN &&
+ adjusted_pix_clk_khz >= 300000)
+ return false;
++ if (enc110->base.ctx->dc->debug.hdmi20_disable &&
++ crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
++ return false;
+ return true;
+ }
+
+@@ -773,6 +776,9 @@ void dce110_link_encoder_construct(
+ __func__,
+ result);
+ }
++ if (enc110->base.ctx->dc->debug.hdmi20_disable) {
++ enc110->base.features.flags.bits.HDMI_6GB_EN = 0;
++ }
+ }
+
+ bool dce110_link_encoder_validate_output_with_stream(
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+index be78ccb..6f67520 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+@@ -596,6 +596,9 @@ static bool dcn10_link_encoder_validate_hdmi_output(
+ if (!enc10->base.features.flags.bits.HDMI_6GB_EN &&
+ adjusted_pix_clk_khz >= 300000)
+ return false;
++ if (enc10->base.ctx->dc->debug.hdmi20_disable &&
++ crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
++ return false;
+ return true;
+ }
+
+@@ -728,6 +731,9 @@ void dcn10_link_encoder_construct(
+ __func__,
+ result);
+ }
++ if (enc10->base.ctx->dc->debug.hdmi20_disable) {
++ enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
++ }
+ }
+
+ bool dcn10_link_encoder_validate_output_with_stream(
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4932-drm-amd-display-add-pp-to-dc-powerlevel-enum-transla.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4932-drm-amd-display-add-pp-to-dc-powerlevel-enum-transla.patch
new file mode 100644
index 00000000..a84f2486
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4932-drm-amd-display-add-pp-to-dc-powerlevel-enum-transla.patch
@@ -0,0 +1,70 @@
+From 058991fab747d124a939ffe67af8f19f18af6dab Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Tue, 26 Jun 2018 09:52:29 -0400
+Subject: [PATCH 4932/5725] drm/amd/display: add pp to dc powerlevel enum
+ translator
+
+[why]
+Add a switch statement to translate pp's powerlevel enum
+to dc powerlevel statement enum
+[how]
+Add a translator function
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Sun peng Li <Sunpeng.Li@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 29 +++++++++++++++++++++-
+ 1 file changed, 28 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+index 50e8630..c69ae78 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+@@ -192,6 +192,33 @@ static enum amd_pp_clock_type dc_to_pp_clock_type(
+ return amd_pp_clk_type;
+ }
+
++static enum dm_pp_clocks_state pp_to_dc_powerlevel_state(
++ enum PP_DAL_POWERLEVEL max_clocks_state)
++{
++ switch (max_clocks_state) {
++ case PP_DAL_POWERLEVEL_0:
++ return DM_PP_CLOCKS_DPM_STATE_LEVEL_0;
++ case PP_DAL_POWERLEVEL_1:
++ return DM_PP_CLOCKS_DPM_STATE_LEVEL_1;
++ case PP_DAL_POWERLEVEL_2:
++ return DM_PP_CLOCKS_DPM_STATE_LEVEL_2;
++ case PP_DAL_POWERLEVEL_3:
++ return DM_PP_CLOCKS_DPM_STATE_LEVEL_3;
++ case PP_DAL_POWERLEVEL_4:
++ return DM_PP_CLOCKS_DPM_STATE_LEVEL_4;
++ case PP_DAL_POWERLEVEL_5:
++ return DM_PP_CLOCKS_DPM_STATE_LEVEL_5;
++ case PP_DAL_POWERLEVEL_6:
++ return DM_PP_CLOCKS_DPM_STATE_LEVEL_6;
++ case PP_DAL_POWERLEVEL_7:
++ return DM_PP_CLOCKS_DPM_STATE_LEVEL_7;
++ default:
++ DRM_ERROR("DM_PPLIB: invalid powerlevel state: %d!\n",
++ max_clocks_state);
++ return DM_PP_CLOCKS_STATE_INVALID;
++ }
++}
++
+ static void pp_to_dc_clock_levels(
+ const struct amd_pp_clocks *pp_clks,
+ struct dm_pp_clock_levels *dc_clks,
+@@ -441,7 +468,7 @@ bool dm_pp_get_static_clocks(
+ if (ret)
+ return false;
+
+- static_clk_info->max_clocks_state = pp_clk_info.max_clocks_state;
++ static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
+ static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock;
+ static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4933-drm-amd-display-Add-NULL-check-for-local-sink-in-edp.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4933-drm-amd-display-Add-NULL-check-for-local-sink-in-edp.patch
new file mode 100644
index 00000000..98ad6529
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4933-drm-amd-display-Add-NULL-check-for-local-sink-in-edp.patch
@@ -0,0 +1,41 @@
+From f57ab5ff410727061811cd5a64b668b249492209 Mon Sep 17 00:00:00 2001
+From: Yue Hin Lau <Yuehin.Lau@amd.com>
+Date: Wed, 27 Jun 2018 13:49:20 -0400
+Subject: [PATCH 4933/5725] drm/amd/display: Add NULL check for local sink in
+ edp_power_control
+
+[WHY]
+PNP cause bsod regression fix
+
+[HOW]
+Add NULL check
+
+Signed-off-by: Yue Hin Lau <Yuehin.Lau@amd.com>
+Reviewed-by: Hugo Hu <Hugo.Hu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 298d2ca..8068074 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -871,9 +871,11 @@ void hwss_edp_power_control(
+ unsigned long long wait_time_ms = 0;
+
+ /* max 500ms from LCDVDD off to on */
+- unsigned long long edp_poweroff_time_ms =
+- 500 + link->local_sink->edid_caps.panel_patch.extra_t12_ms;
++ unsigned long long edp_poweroff_time_ms = 500;
+
++ if (link->local_sink != NULL)
++ edp_poweroff_time_ms =
++ 500 + link->local_sink->edid_caps.panel_patch.extra_t12_ms;
+ if (link->link_trace.time_stamp.edp_poweroff == 0)
+ wait_time_ms = edp_poweroff_time_ms;
+ else if (duration_in_ms < edp_poweroff_time_ms)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4934-drm-amd-display-Return-out_link_loss-from-interrupt-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4934-drm-amd-display-Return-out_link_loss-from-interrupt-.patch
new file mode 100644
index 00000000..70a689ea
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4934-drm-amd-display-Return-out_link_loss-from-interrupt-.patch
@@ -0,0 +1,77 @@
+From 43af800ebea195283e0f5f06f83f97093bd9fd6e Mon Sep 17 00:00:00 2001
+From: Fatemeh Darbehani <fatemeh.darbehani@amd.com>
+Date: Tue, 26 Jun 2018 16:40:55 -0400
+Subject: [PATCH 4934/5725] drm/amd/display: Return out_link_loss from
+ interrupt handler
+
+Signed-off-by: Fatemeh Darbehani <fatemeh.darbehani@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 8 +++++++-
+ drivers/gpu/drm/amd/display/dc/dc_link.h | 2 +-
+ 3 files changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index d52d004..8ab7a99 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1141,7 +1141,7 @@ static void handle_hpd_rx_irq(void *param)
+ if (dc_link->type != dc_connection_mst_branch)
+ mutex_lock(&aconnector->hpd_lock);
+
+- if (dc_link_handle_hpd_rx_irq(dc_link, NULL) &&
++ if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
+ !is_mst_root_connector) {
+ /* Downstream Port status changed. */
+ if (!dc_link_detect_sink(dc_link, &new_connection_type))
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index 165618f..fd73a9c 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -1994,12 +1994,16 @@ static void handle_automated_test(struct dc_link *link)
+ sizeof(test_response));
+ }
+
+-bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data)
++bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss)
+ {
+ union hpd_irq_data hpd_irq_dpcd_data = {{{{0}}}};
+ union device_service_irq device_service_clear = { { 0 } };
+ enum dc_status result;
++
+ bool status = false;
++
++ if (out_link_loss)
++ *out_link_loss = false;
+ /* For use cases related to down stream connection status change,
+ * PSR and device auto test, refer to function handle_sst_hpd_irq
+ * in DAL2.1*/
+@@ -2074,6 +2078,8 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
+ true, LINK_TRAINING_ATTEMPTS);
+
+ status = false;
++ if (out_link_loss)
++ *out_link_loss = true;
+ }
+
+ if (link->type == dc_connection_active_dongle &&
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
+index 795a8f0..2351681 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
+@@ -172,7 +172,7 @@ bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
+ * false - no change in Downstream port status. No further action required
+ * from DM. */
+ bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
+- union hpd_irq_data *hpd_irq_dpcd_data);
++ union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss);
+
+ struct dc_sink_init_data;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4935-drm-amd-display-Add-CRC-support-for-DCN.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4935-drm-amd-display-Add-CRC-support-for-DCN.patch
new file mode 100644
index 00000000..1a756ecb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4935-drm-amd-display-Add-CRC-support-for-DCN.patch
@@ -0,0 +1,193 @@
+From 1b55edc08e16d146f22ad95deecb2063c60d967f Mon Sep 17 00:00:00 2001
+From: David Francis <David.Francis@amd.com>
+Date: Tue, 26 Jun 2018 14:58:15 -0400
+Subject: [PATCH 4935/5725] drm/amd/display: Add CRC support for DCN
+
+[Why]
+Regamma/CTM tests require CRC support
+
+[How]
+The CRC registers that were used in DCE exist under different
+names in DCN. The code was copied from DCE (in
+dc/dce110/dce110_timing_generator.c) into DCN, and changed to
+use the DCN register access helper functions.
+
+Signed-off-by: David Francis <David.Francis@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | 68 +++++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h | 49 +++++++++++++++-
+ 2 files changed, 114 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+index e6a3ade..411f892 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+@@ -1324,6 +1324,72 @@ bool optc1_is_optc_underflow_occurred(struct timing_generator *optc)
+ return (underflow_occurred == 1);
+ }
+
++bool optc1_configure_crc(struct timing_generator *optc,
++ const struct crc_params *params)
++{
++ struct optc *optc1 = DCN10TG_FROM_TG(optc);
++
++ /* Cannot configure crc on a CRTC that is disabled */
++ if (!optc1_is_tg_enabled(optc))
++ return false;
++
++ REG_WRITE(OTG_CRC_CNTL, 0);
++
++ if (!params->enable)
++ return true;
++
++ /* Program frame boundaries */
++ /* Window A x axis start and end. */
++ REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL,
++ OTG_CRC0_WINDOWA_X_START, params->windowa_x_start,
++ OTG_CRC0_WINDOWA_X_END, params->windowa_x_end);
++
++ /* Window A y axis start and end. */
++ REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL,
++ OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start,
++ OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end);
++
++ /* Window B x axis start and end. */
++ REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL,
++ OTG_CRC0_WINDOWB_X_START, params->windowb_x_start,
++ OTG_CRC0_WINDOWB_X_END, params->windowb_x_end);
++
++ /* Window B y axis start and end. */
++ REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL,
++ OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start,
++ OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end);
++
++ /* Set crc mode and selection, and enable. Only using CRC0*/
++ REG_UPDATE_3(OTG_CRC_CNTL,
++ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
++ OTG_CRC0_SELECT, params->selection,
++ OTG_CRC_EN, 1);
++
++ return true;
++}
++
++bool optc1_get_crc(struct timing_generator *optc,
++ uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
++{
++ uint32_t field = 0;
++ struct optc *optc1 = DCN10TG_FROM_TG(optc);
++
++ REG_GET(OTG_CRC_CNTL, OTG_CRC_EN, &field);
++
++ /* Early return if CRC is not enabled for this CRTC */
++ if (!field)
++ return false;
++
++ REG_GET_2(OTG_CRC0_DATA_RG,
++ CRC0_R_CR, r_cr,
++ CRC0_G_Y, g_y);
++
++ REG_GET(OTG_CRC0_DATA_B,
++ CRC0_B_CB, b_cb);
++
++ return true;
++}
++
+ static const struct timing_generator_funcs dcn10_tg_funcs = {
+ .validate_timing = optc1_validate_timing,
+ .program_timing = optc1_program_timing,
+@@ -1360,6 +1426,8 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
+ .is_tg_enabled = optc1_is_tg_enabled,
+ .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
+ .clear_optc_underflow = optc1_clear_optc_underflow,
++ .get_crc = optc1_get_crc,
++ .configure_crc = optc1_configure_crc,
+ };
+
+ void dcn10_timing_generator_init(struct optc *optc1)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+index 59ed272..1df510f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+@@ -75,7 +75,14 @@
+ SRI(CONTROL, VTG, inst),\
+ SRI(OTG_VERT_SYNC_CONTROL, OTG, inst),\
+ SRI(OTG_MASTER_UPDATE_MODE, OTG, inst),\
+- SRI(OTG_GSL_CONTROL, OTG, inst)
++ SRI(OTG_GSL_CONTROL, OTG, inst),\
++ SRI(OTG_CRC_CNTL, OTG, inst),\
++ SRI(OTG_CRC0_DATA_RG, OTG, inst),\
++ SRI(OTG_CRC0_DATA_B, OTG, inst),\
++ SRI(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst),\
++ SRI(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\
++ SRI(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\
++ SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst)
+
+ #define TG_COMMON_REG_LIST_DCN1_0(inst) \
+ TG_COMMON_REG_LIST_DCN(inst),\
+@@ -138,6 +145,13 @@ struct dcn_optc_registers {
+ uint32_t OTG_GSL_WINDOW_X;
+ uint32_t OTG_GSL_WINDOW_Y;
+ uint32_t OTG_VUPDATE_KEEPOUT;
++ uint32_t OTG_CRC_CNTL;
++ uint32_t OTG_CRC0_DATA_RG;
++ uint32_t OTG_CRC0_DATA_B;
++ uint32_t OTG_CRC0_WINDOWA_X_CONTROL;
++ uint32_t OTG_CRC0_WINDOWA_Y_CONTROL;
++ uint32_t OTG_CRC0_WINDOWB_X_CONTROL;
++ uint32_t OTG_CRC0_WINDOWB_Y_CONTROL;
+ };
+
+ #define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
+@@ -232,7 +246,21 @@ struct dcn_optc_registers {
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL2_EN, mask_sh),\
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_EN, mask_sh),\
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_FORCE_DELAY, mask_sh),\
+- SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh)
++ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh),\
++ SF(OTG0_OTG_CRC_CNTL, OTG_CRC_CONT_EN, mask_sh),\
++ SF(OTG0_OTG_CRC_CNTL, OTG_CRC0_SELECT, mask_sh),\
++ SF(OTG0_OTG_CRC_CNTL, OTG_CRC_EN, mask_sh),\
++ SF(OTG0_OTG_CRC0_DATA_RG, CRC0_R_CR, mask_sh),\
++ SF(OTG0_OTG_CRC0_DATA_RG, CRC0_G_Y, mask_sh),\
++ SF(OTG0_OTG_CRC0_DATA_B, CRC0_B_CB, mask_sh),\
++ SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_START, mask_sh),\
++ SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_END, mask_sh),\
++ SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_START, mask_sh),\
++ SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_END, mask_sh),\
++ SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_START, mask_sh),\
++ SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_END, mask_sh),\
++ SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_START, mask_sh),\
++ SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh)
+
+
+ #define TG_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
+@@ -363,7 +391,22 @@ struct dcn_optc_registers {
+ type OTG_MASTER_UPDATE_LOCK_GSL_EN;\
+ type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET;\
+ type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET;\
+- type OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN;
++ type OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN;\
++ type OTG_CRC_CONT_EN;\
++ type OTG_CRC0_SELECT;\
++ type OTG_CRC_EN;\
++ type CRC0_R_CR;\
++ type CRC0_G_Y;\
++ type CRC0_B_CB;\
++ type OTG_CRC0_WINDOWA_X_START;\
++ type OTG_CRC0_WINDOWA_X_END;\
++ type OTG_CRC0_WINDOWA_Y_START;\
++ type OTG_CRC0_WINDOWA_Y_END;\
++ type OTG_CRC0_WINDOWB_X_START;\
++ type OTG_CRC0_WINDOWB_X_END;\
++ type OTG_CRC0_WINDOWB_Y_START;\
++ type OTG_CRC0_WINDOWB_Y_END;
++
+
+ #define TG_REG_FIELD_LIST(type) \
+ TG_REG_FIELD_LIST_DCN1_0(type)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4936-drm-amd-display-Expose-couple-OPTC-functions-through.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4936-drm-amd-display-Expose-couple-OPTC-functions-through.patch
new file mode 100644
index 00000000..44112a6d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4936-drm-amd-display-Expose-couple-OPTC-functions-through.patch
@@ -0,0 +1,37 @@
+From 4e184888955b642935a24833f6b314bcf1db09a4 Mon Sep 17 00:00:00 2001
+From: David Francis <David.Francis@amd.com>
+Date: Wed, 27 Jun 2018 15:55:57 -0400
+Subject: [PATCH 4936/5725] drm/amd/display: Expose couple OPTC functions
+ through header
+
+Signed-off-by: David Francis <David.Francis@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+index 1df510f..c1b1142 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+@@ -554,4 +554,15 @@ bool optc1_get_otg_active_size(struct timing_generator *optc,
+ uint32_t *otg_active_width,
+ uint32_t *otg_active_height);
+
++void optc1_enable_crtc_reset(
++ struct timing_generator *optc,
++ int source_tg_inst,
++ struct crtc_trigger_info *crtc_tp);
++
++bool optc1_configure_crc(struct timing_generator *optc,
++ const struct crc_params *params);
++
++bool optc1_get_crc(struct timing_generator *optc,
++ uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
++
+ #endif /* __DC_TIMING_GENERATOR_DCN10_H__ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4937-drm-amd-display-dp-debugfs-allow-link-rate-lane-coun.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4937-drm-amd-display-dp-debugfs-allow-link-rate-lane-coun.patch
new file mode 100644
index 00000000..bcdb7f97
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4937-drm-amd-display-dp-debugfs-allow-link-rate-lane-coun.patch
@@ -0,0 +1,41 @@
+From 60577e58760c282196b3803b7cd11016234e867e Mon Sep 17 00:00:00 2001
+From: Hersen Wu <hersenxs.wu@amd.com>
+Date: Wed, 27 Jun 2018 13:03:04 -0400
+Subject: [PATCH 4937/5725] drm/amd/display: dp debugfs allow link rate lane
+ count greater than dp rx reported caps
+
+[Why]
+when hw team does phy parameters tuning, there is need to force dp
+link rate or lane count grater than the values from dp receiver to
+check dp tx. current debufs limit link rate, lane count no more
+than rx caps.
+
+[How] remove force settings less than rx caps check
+
+v2: Fix typo in title
+
+Signed-off-by: Hersen Wu <hersenxs.wu@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+index 0276e09..0d9e410 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+@@ -214,8 +214,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
+ break;
+ }
+
+- if (!valid_input || (param[0] > link->reported_link_cap.lane_count) ||
+- (param[1] > link->reported_link_cap.link_rate)) {
++ if (!valid_input) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("Invalid Input value No HW will be programmed\n");
+ return bytes_from_user;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4938-drm-amd-display-Fix-new-stream-count-check-in-dc_add.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4938-drm-amd-display-Fix-new-stream-count-check-in-dc_add.patch
new file mode 100644
index 00000000..a0389e5a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4938-drm-amd-display-Fix-new-stream-count-check-in-dc_add.patch
@@ -0,0 +1,42 @@
+From ece8913e70ed65b76da811c642071c83b1aae99b Mon Sep 17 00:00:00 2001
+From: Ken Chalmers <ken.chalmers@amd.com>
+Date: Wed, 27 Jun 2018 12:48:21 -0400
+Subject: [PATCH 4938/5725] drm/amd/display: Fix new stream count check in
+ dc_add_stream_to_ctx
+
+[Why]
+The previous code could allow through attempts to enable more streams
+than there are timing generators, in designs where the number of pipes
+is greater than the number of timing generators.
+
+[How]
+Compare the new stream count to the resource pool's timing generator
+count, instead of its pipe count. Also correct a typo in the error
+message.
+
+Signed-off-by: Ken Chalmers <ken.chalmers@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index c4c7e00..417d2bf 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -1707,8 +1707,8 @@ enum dc_status dc_add_stream_to_ctx(
+ struct dc_context *dc_ctx = dc->ctx;
+ enum dc_status res;
+
+- if (new_ctx->stream_count >= dc->res_pool->pipe_count) {
+- DC_ERROR("Max streams reached, can add stream %p !\n", stream);
++ if (new_ctx->stream_count >= dc->res_pool->timing_generator_count) {
++ DC_ERROR("Max streams reached, can't add stream %p !\n", stream);
+ return DC_ERROR_UNEXPECTED;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4939-drm-amd-display-add-max-scl-ratio-to-soc-bounding-bo.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4939-drm-amd-display-add-max-scl-ratio-to-soc-bounding-bo.patch
new file mode 100644
index 00000000..8b3f7e85
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4939-drm-amd-display-add-max-scl-ratio-to-soc-bounding-bo.patch
@@ -0,0 +1,30 @@
+From 589e36ffdcc4f75a8c1884cc8c7adeb0b56a2569 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Thu, 28 Jun 2018 12:28:00 -0400
+Subject: [PATCH 4939/5725] drm/amd/display: add max scl ratio to soc bounding
+ box
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+index 6943801..c43d68b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+@@ -111,6 +111,8 @@ struct _vcs_dpi_soc_bounding_box_st {
+ double xfc_bus_transport_time_us;
+ double xfc_xbuf_latency_tolerance_us;
+ int use_urgent_burst_bw;
++ double max_hscl_ratio;
++ double max_vscl_ratio;
+ struct _vcs_dpi_voltage_scaling_st clock_limits[7];
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4940-drm-amd-display-update-dml-to-match-DV-dml.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4940-drm-amd-display-update-dml-to-match-DV-dml.patch
new file mode 100644
index 00000000..e1861142
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4940-drm-amd-display-update-dml-to-match-DV-dml.patch
@@ -0,0 +1,31 @@
+From 035fdd06aab7f46ed36857bd5327d86e4aa496b9 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Thu, 28 Jun 2018 11:31:13 -0400
+Subject: [PATCH 4940/5725] drm/amd/display: update dml to match DV dml
+
+DV updated their dml with an option to use max vstartup,
+this updates dc dml with the same option
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+index c43d68b..cbafce6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+@@ -305,6 +305,7 @@ struct _vcs_dpi_display_pipe_dest_params_st {
+ unsigned char otg_inst;
+ unsigned char odm_split_cnt;
+ unsigned char odm_combine;
++ unsigned char use_maximum_vstartup;
+ };
+
+ struct _vcs_dpi_display_pipe_params_st {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4941-drm-amd-display-dal-3.1.55.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4941-drm-amd-display-dal-3.1.55.patch
new file mode 100644
index 00000000..175afb43
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4941-drm-amd-display-dal-3.1.55.patch
@@ -0,0 +1,29 @@
+From 434ed0f16cb040292b4bbf7399a438c068946693 Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Tue, 5 Jun 2018 09:15:41 -0400
+Subject: [PATCH 4941/5725] drm/amd/display: dal 3.1.55
+
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index a7c880b..fc1aa32 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -38,7 +38,7 @@
+ #include "inc/compressor.h"
+ #include "dml/display_mode_lib.h"
+
+-#define DC_VER "3.1.54"
++#define DC_VER "3.1.55"
+
+ #define MAX_SURFACES 3
+ #define MAX_STREAMS 6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4942-drm-amd-display-Initialize-data-structure-for-DalMpV.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4942-drm-amd-display-Initialize-data-structure-for-DalMpV.patch
new file mode 100644
index 00000000..007d135e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4942-drm-amd-display-Initialize-data-structure-for-DalMpV.patch
@@ -0,0 +1,33 @@
+From 11d048118dbb391295fb2b4b72a855351d9bda63 Mon Sep 17 00:00:00 2001
+From: Hugo Hu <hugo.hu@amd.com>
+Date: Tue, 3 Jul 2018 15:59:15 -0400
+Subject: [PATCH 4942/5725] drm/amd/display: Initialize data structure for
+ DalMpVisualConfirm.
+
+[Why] Prevent unexpected color shows if DalMpVisualConfirm enable.
+[How] Zero out color configuration data for DalMpVisualConfirm when initiating.
+
+Signed-off-by: Hugo Hu <hugo.hu@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 06cf967..5b99a83 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -1866,7 +1866,7 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
+ static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
+ {
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+- struct mpcc_blnd_cfg blnd_cfg;
++ struct mpcc_blnd_cfg blnd_cfg = {0};
+ bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
+ int mpcc_id;
+ struct mpcc *new_mpcc;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4943-drm-amd-display-properly-turn-autocal-off.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4943-drm-amd-display-properly-turn-autocal-off.patch
new file mode 100644
index 00000000..cba005f0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4943-drm-amd-display-properly-turn-autocal-off.patch
@@ -0,0 +1,54 @@
+From 3fc2bd54b9225f4fc156390700ced367144e3be4 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Wed, 4 Jul 2018 11:31:40 -0400
+Subject: [PATCH 4943/5725] drm/amd/display: properly turn autocal off
+
+[why]
+Currently we do not turn off autocal when scaling is in bypass.
+In case vbios enalbes auto scale and our first mode set is a non-scaled
+mode we have autocal on causing screen corruption.
+
+[how]
+moves turning autocal off to be first thing done during scaler setup
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+index f862fd1..f0cc975 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+@@ -655,6 +655,12 @@ void dpp1_dscl_set_scaler_manual_scale(
+
+ dpp->scl_data = *scl_data;
+
++ /* Autocal off */
++ REG_SET_3(DSCL_AUTOCAL, 0,
++ AUTOCAL_MODE, AUTOCAL_MODE_OFF,
++ AUTOCAL_NUM_PIPE, 0,
++ AUTOCAL_PIPE_ID, 0);
++
+ /* Recout */
+ dpp1_dscl_set_recout(dpp, &scl_data->recout);
+
+@@ -678,12 +684,6 @@ void dpp1_dscl_set_scaler_manual_scale(
+ if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS)
+ return;
+
+- /* Autocal off */
+- REG_SET_3(DSCL_AUTOCAL, 0,
+- AUTOCAL_MODE, AUTOCAL_MODE_OFF,
+- AUTOCAL_NUM_PIPE, 0,
+- AUTOCAL_PIPE_ID, 0);
+-
+ /* Black offsets */
+ if (ycbcr)
+ REG_SET_2(SCL_BLACK_OFFSET, 0,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4944-drm-amdgpu-vi-fix-mixed-up-state-in-smu-clockgating-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4944-drm-amdgpu-vi-fix-mixed-up-state-in-smu-clockgating-.patch
new file mode 100644
index 00000000..c8de8604
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4944-drm-amdgpu-vi-fix-mixed-up-state-in-smu-clockgating-.patch
@@ -0,0 +1,64 @@
+From f177c8ae29a3162bd9622a1ff05a5beef43a6b41 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 10 Jul 2018 16:51:22 -0500
+Subject: [PATCH 4944/5725] drm/amdgpu/vi: fix mixed up state in smu
+ clockgating setup
+
+Use the PP_STATE_SUPPORT_* rather than AMD_CG_SUPPORT_*
+when communicating with the SMU.
+
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vi.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
+index a791b04..6789cc2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vi.c
++++ b/drivers/gpu/drm/amd/amdgpu/vi.c
+@@ -1367,11 +1367,11 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
+
+ if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
+- pp_support_state = AMD_CG_SUPPORT_MC_LS;
++ pp_support_state = PP_STATE_SUPPORT_LS;
+ pp_state = PP_STATE_LS;
+ }
+ if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
+- pp_support_state |= AMD_CG_SUPPORT_MC_MGCG;
++ pp_support_state |= PP_STATE_SUPPORT_CG;
+ pp_state |= PP_STATE_CG;
+ }
+ if (state == AMD_CG_STATE_UNGATE)
+@@ -1386,11 +1386,11 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
+
+ if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
+- pp_support_state = AMD_CG_SUPPORT_SDMA_LS;
++ pp_support_state = PP_STATE_SUPPORT_LS;
+ pp_state = PP_STATE_LS;
+ }
+ if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
+- pp_support_state |= AMD_CG_SUPPORT_SDMA_MGCG;
++ pp_support_state |= PP_STATE_SUPPORT_CG;
+ pp_state |= PP_STATE_CG;
+ }
+ if (state == AMD_CG_STATE_UNGATE)
+@@ -1405,11 +1405,11 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
+
+ if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
+- pp_support_state = AMD_CG_SUPPORT_HDP_LS;
++ pp_support_state = PP_STATE_SUPPORT_LS;
+ pp_state = PP_STATE_LS;
+ }
+ if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
+- pp_support_state |= AMD_CG_SUPPORT_HDP_MGCG;
++ pp_support_state |= PP_STATE_SUPPORT_CG;
+ pp_state |= PP_STATE_CG;
+ }
+ if (state == AMD_CG_STATE_UNGATE)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4945-drm-amdgpu-pp-smu7-drop-unused-values-in-smu-data-st.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4945-drm-amdgpu-pp-smu7-drop-unused-values-in-smu-data-st.patch
new file mode 100644
index 00000000..4e177125
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4945-drm-amdgpu-pp-smu7-drop-unused-values-in-smu-data-st.patch
@@ -0,0 +1,55 @@
+From c88d20986e05f06ba111e34ee30ea7492a23c0e2 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed, 11 Jul 2018 13:24:53 -0500
+Subject: [PATCH 4945/5725] drm/amdgpu/pp/smu7: drop unused values in smu data
+ structure
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+use kaddr directly rather than secondary variable.
+
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 3 +--
+ drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h | 2 --
+ 2 files changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+index e131add..e35ea99 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+@@ -424,7 +424,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
+ + UCODE_ID_CP_MEC_JT2_MASK;
+ }
+
+- toc = (struct SMU_DRAMData_TOC *)smu_data->header;
++ toc = (struct SMU_DRAMData_TOC *)smu_data->header_buffer.kaddr;
+ toc->structure_version = 1;
+
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+@@ -593,7 +593,6 @@ int smu7_init(struct pp_hwmgr *hwmgr)
+ if (r)
+ return -EINVAL;
+
+- smu_data->header = smu_data->header_buffer.kaddr;
+ smu_data->header_buffer.mc_addr = mc_addr;
+
+ if (!hwmgr->not_vf)
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+index 39c9bfd..e6def28 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+@@ -37,8 +37,6 @@ struct smu7_buffer_entry {
+ };
+
+ struct smu7_smumgr {
+- uint8_t *header;
+- uint8_t *mec_image;
+ struct smu7_buffer_entry smu_buffer;
+ struct smu7_buffer_entry header_buffer;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4946-drm-amdgpu-pp-smu7-remove-local-mc_addr-variable.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4946-drm-amdgpu-pp-smu7-remove-local-mc_addr-variable.patch
new file mode 100644
index 00000000..b51783d6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4946-drm-amdgpu-pp-smu7-remove-local-mc_addr-variable.patch
@@ -0,0 +1,64 @@
+From 38bba920a4685cd26f781f661c7ebdef0a2f6184 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed, 11 Jul 2018 13:43:40 -0500
+Subject: [PATCH 4946/5725] drm/amdgpu/pp/smu7: remove local mc_addr variable
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+use the structure member directly.
+
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+index e35ea99..a29ffe4 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+@@ -573,7 +573,6 @@ int smu7_setup_pwr_virus(struct pp_hwmgr *hwmgr)
+ int smu7_init(struct pp_hwmgr *hwmgr)
+ {
+ struct smu7_smumgr *smu_data;
+- uint64_t mc_addr = 0;
+ int r;
+ /* Allocate memory for backend private data */
+ smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
+@@ -587,14 +586,12 @@ int smu7_init(struct pp_hwmgr *hwmgr)
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &smu_data->header_buffer.handle,
+- &mc_addr,
++ &smu_data->header_buffer.mc_addr,
+ &smu_data->header_buffer.kaddr);
+
+ if (r)
+ return -EINVAL;
+
+- smu_data->header_buffer.mc_addr = mc_addr;
+-
+ if (!hwmgr->not_vf)
+ return 0;
+
+@@ -604,7 +601,7 @@ int smu7_init(struct pp_hwmgr *hwmgr)
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &smu_data->smu_buffer.handle,
+- &mc_addr,
++ &smu_data->smu_buffer.mc_addr,
+ &smu_data->smu_buffer.kaddr);
+
+ if (r) {
+@@ -613,7 +610,6 @@ int smu7_init(struct pp_hwmgr *hwmgr)
+ &smu_data->header_buffer.kaddr);
+ return -EINVAL;
+ }
+- smu_data->smu_buffer.mc_addr = mc_addr;
+
+ if (smum_is_hw_avfs_present(hwmgr))
+ hwmgr->avfs_supported = true;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4947-drm-amdgpu-pp-smu7-cache-smu-firmware-toc.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4947-drm-amdgpu-pp-smu7-cache-smu-firmware-toc.patch
new file mode 100644
index 00000000..06599d3a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4947-drm-amdgpu-pp-smu7-cache-smu-firmware-toc.patch
@@ -0,0 +1,158 @@
+From aa74e7ef96a1df80053d8f9447836d834f8ad8c5 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 12 Jul 2018 00:38:23 -0500
+Subject: [PATCH 4947/5725] drm/amdgpu/pp/smu7: cache smu firmware toc
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Rather than calculating it everytime we rebuild the toc
+buffer, calculate it once initially and then just copy
+the cached results to the vram buffer.
+
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 92 +++++++++++++---------
+ drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h | 1 +
+ 2 files changed, 54 insertions(+), 39 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+index a29ffe4..cc56a24 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+@@ -381,9 +381,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
+ {
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
+ uint32_t fw_to_load;
+- int result = 0;
+- struct SMU_DRAMData_TOC *toc;
+- uint32_t num_entries = 0;
++ int r = 0;
+
+ if (!hwmgr->reload_fw) {
+ pr_info("skip reloading...\n");
+@@ -424,49 +422,62 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
+ + UCODE_ID_CP_MEC_JT2_MASK;
+ }
+
+- toc = (struct SMU_DRAMData_TOC *)smu_data->header_buffer.kaddr;
+- toc->structure_version = 1;
+-
+- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+- UCODE_ID_RLC_G, &toc->entry[num_entries++]),
+- "Failed to Get Firmware Entry.", return -EINVAL);
+- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+- UCODE_ID_CP_CE, &toc->entry[num_entries++]),
+- "Failed to Get Firmware Entry.", return -EINVAL);
+- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+- UCODE_ID_CP_PFP, &toc->entry[num_entries++]),
+- "Failed to Get Firmware Entry.", return -EINVAL);
+- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+- UCODE_ID_CP_ME, &toc->entry[num_entries++]),
+- "Failed to Get Firmware Entry.", return -EINVAL);
+- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+- UCODE_ID_CP_MEC, &toc->entry[num_entries++]),
+- "Failed to Get Firmware Entry.", return -EINVAL);
+- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+- UCODE_ID_CP_MEC_JT1, &toc->entry[num_entries++]),
+- "Failed to Get Firmware Entry.", return -EINVAL);
+- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+- UCODE_ID_CP_MEC_JT2, &toc->entry[num_entries++]),
+- "Failed to Get Firmware Entry.", return -EINVAL);
+- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+- UCODE_ID_SDMA0, &toc->entry[num_entries++]),
+- "Failed to Get Firmware Entry.", return -EINVAL);
+- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+- UCODE_ID_SDMA1, &toc->entry[num_entries++]),
+- "Failed to Get Firmware Entry.", return -EINVAL);
+- if (!hwmgr->not_vf)
+- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+- UCODE_ID_MEC_STORAGE, &toc->entry[num_entries++]),
+- "Failed to Get Firmware Entry.", return -EINVAL);
++ if (!smu_data->toc) {
++ struct SMU_DRAMData_TOC *toc;
+
+- toc->num_entries = num_entries;
++ smu_data->toc = kzalloc(sizeof(struct SMU_DRAMData_TOC), GFP_KERNEL);
++ if (!smu_data->toc)
++ return -ENOMEM;
++ toc = smu_data->toc;
++ toc->num_entries = 0;
++ toc->structure_version = 1;
++
++ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
++ UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
++ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
++ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
++ UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
++ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
++ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
++ UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
++ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
++ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
++ UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
++ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
++ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
++ UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
++ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
++ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
++ UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
++ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
++ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
++ UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
++ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
++ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
++ UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
++ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
++ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
++ UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
++ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
++ if (!hwmgr->not_vf)
++ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
++ UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
++ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
++ }
++ memcpy_toio(smu_data->header_buffer.kaddr, smu_data->toc,
++ sizeof(struct SMU_DRAMData_TOC));
+ smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr));
+ smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr));
+
+ if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load))
+ pr_err("Fail to Request SMU Load uCode");
+
+- return result;
++ return r;
++
++failed:
++ kfree(smu_data->toc);
++ smu_data->toc = NULL;
++ return r;
+ }
+
+ /* Check if the FW has been loaded, SMU will not return if loading has not finished. */
+@@ -631,6 +642,9 @@ int smu7_smu_fini(struct pp_hwmgr *hwmgr)
+ &smu_data->smu_buffer.mc_addr,
+ &smu_data->smu_buffer.kaddr);
+
++
++ kfree(smu_data->toc);
++ smu_data->toc = NULL;
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
+ return 0;
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+index e6def28..01f0538f 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+@@ -39,6 +39,7 @@ struct smu7_buffer_entry {
+ struct smu7_smumgr {
+ struct smu7_buffer_entry smu_buffer;
+ struct smu7_buffer_entry header_buffer;
++ struct SMU_DRAMData_TOC *toc;
+
+ uint32_t soft_regs_start;
+ uint32_t dpm_table_start;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4948-drm-amdgpu-pp-remove-dead-vega12-code.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4948-drm-amdgpu-pp-remove-dead-vega12-code.patch
new file mode 100644
index 00000000..51a1b72f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4948-drm-amdgpu-pp-remove-dead-vega12-code.patch
@@ -0,0 +1,67 @@
+From 0e22eaa796f07e46d6fbb3d4c5a0eb37314ba483 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed, 11 Jul 2018 17:30:25 -0500
+Subject: [PATCH 4948/5725] drm/amdgpu/pp: remove dead vega12 code
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Commented out.
+
+Reviewed-by: Rex Zhu <rezhu@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c | 29 ----------------------
+ 1 file changed, 29 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+index 7d9b40e..508a262 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+@@ -343,29 +343,6 @@ static int vega12_set_tools_address(struct pp_hwmgr *hwmgr)
+ return 0;
+ }
+
+-#if 0 /* tentatively remove */
+-static int vega12_verify_smc_interface(struct pp_hwmgr *hwmgr)
+-{
+- uint32_t smc_driver_if_version;
+-
+- PP_ASSERT_WITH_CODE(!vega12_send_msg_to_smc(hwmgr,
+- PPSMC_MSG_GetDriverIfVersion),
+- "Attempt to get SMC IF Version Number Failed!",
+- return -EINVAL);
+- vega12_read_arg_from_smc(hwmgr, &smc_driver_if_version);
+-
+- if (smc_driver_if_version != SMU9_DRIVER_IF_VERSION) {
+- pr_err("Your firmware(0x%x) doesn't match \
+- SMU9_DRIVER_IF_VERSION(0x%x). \
+- Please update your firmware!\n",
+- smc_driver_if_version, SMU9_DRIVER_IF_VERSION);
+- return -EINVAL;
+- }
+-
+- return 0;
+-}
+-#endif
+-
+ static int vega12_smu_init(struct pp_hwmgr *hwmgr)
+ {
+ struct vega12_smumgr *priv;
+@@ -517,12 +494,6 @@ static int vega12_start_smu(struct pp_hwmgr *hwmgr)
+ "SMC is not running!",
+ return -EINVAL);
+
+-#if 0 /* tentatively remove */
+- PP_ASSERT_WITH_CODE(!vega12_verify_smc_interface(hwmgr),
+- "Failed to verify SMC interface!",
+- return -EINVAL);
+-#endif
+-
+ vega12_set_tools_address(hwmgr);
+
+ return 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4949-drm-amdgpu-pp-split-out-common-smumgr-smu9-code.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4949-drm-amdgpu-pp-split-out-common-smumgr-smu9-code.patch
new file mode 100644
index 00000000..6481bdb9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4949-drm-amdgpu-pp-split-out-common-smumgr-smu9-code.patch
@@ -0,0 +1,920 @@
+From 8757277ccbe249636b4b0f0493a9af5c7e37b3d3 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 12 Jul 2018 14:47:30 -0500
+Subject: [PATCH 4949/5725] drm/amdgpu/pp: split out common smumgr smu9 code
+
+Split out the shared smumgr code for vega10 and 12
+so we don't have duplicate code for both.
+
+Reviewed-by: Rex Zhu <rezhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 44 ++---
+ .../gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c | 8 +-
+ drivers/gpu/drm/amd/powerplay/smumgr/Makefile | 2 +-
+ drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c | 150 ++++++++++++++++
+ drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h | 32 ++++
+ .../gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c | 168 ++----------------
+ .../gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c | 191 +++------------------
+ .../gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h | 1 -
+ 8 files changed, 242 insertions(+), 354 deletions(-)
+ create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
+ create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index c0ceb69..912d0d6 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -477,7 +477,7 @@ static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
+ "[GetNumOfDpmLevel] failed to get dpm levels!",
+ return ret);
+
+- vega12_read_arg_from_smc(hwmgr, num_of_levels);
++ *num_of_levels = smum_get_argument(hwmgr);
+ PP_ASSERT_WITH_CODE(*num_of_levels > 0,
+ "[GetNumOfDpmLevel] number of clk levels is invalid!",
+ return -EINVAL);
+@@ -499,11 +499,7 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
+ "[GetDpmFrequencyByIndex] Failed to get dpm frequency from SMU!",
+ return -EINVAL);
+
+- result = vega12_read_arg_from_smc(hwmgr, clock);
+-
+- PP_ASSERT_WITH_CODE(*clock != 0,
+- "[GetDPMFrequencyByIndex] Failed to get dpm frequency by index.!",
+- return -EINVAL);
++ *clock = smum_get_argument(hwmgr);
+
+ return result;
+ }
+@@ -884,21 +880,21 @@ static int vega12_get_all_clock_ranges_helper(struct pp_hwmgr *hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16)) == 0,
+ "[GetClockRanges] Failed to get max ac clock from SMC!",
+ return -EINVAL);
+- vega12_read_arg_from_smc(hwmgr, &(clock->ACMax));
++ clock->ACMax = smum_get_argument(hwmgr);
+
+ /* AC Min */
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16)) == 0,
+ "[GetClockRanges] Failed to get min ac clock from SMC!",
+ return -EINVAL);
+- vega12_read_arg_from_smc(hwmgr, &(clock->ACMin));
++ clock->ACMin = smum_get_argument(hwmgr);
+
+ /* DC Max */
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16)) == 0,
+ "[GetClockRanges] Failed to get max dc clock from SMC!",
+ return -EINVAL);
+- vega12_read_arg_from_smc(hwmgr, &(clock->DCMax));
++ clock->DCMax = smum_get_argument(hwmgr);
+
+ return 0;
+ }
+@@ -1219,7 +1215,7 @@ static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, uint32_t *query)
+ "Failed to get current package power!",
+ return -EINVAL);
+
+- vega12_read_arg_from_smc(hwmgr, &value);
++ value = smum_get_argument(hwmgr);
+ /* power value is an integer */
+ *query = value << 8;
+ #endif
+@@ -1235,11 +1231,8 @@ static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
+ "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
+- return -1);
+- PP_ASSERT_WITH_CODE(
+- vega12_read_arg_from_smc(hwmgr, &gfx_clk) == 0,
+- "[GetCurrentGfxClkFreq] Attempt to read arg from SMC Failed",
+- return -1);
++ return -EINVAL);
++ gfx_clk = smum_get_argument(hwmgr);
+
+ *gfx_freq = gfx_clk * 100;
+
+@@ -1255,11 +1248,8 @@ static int vega12_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_f
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16)) == 0,
+ "[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
+- return -1);
+- PP_ASSERT_WITH_CODE(
+- vega12_read_arg_from_smc(hwmgr, &mem_clk) == 0,
+- "[GetCurrentMClkFreq] Attempt to read arg from SMC Failed",
+- return -1);
++ return -EINVAL);
++ mem_clk = smum_get_argument(hwmgr);
+
+ *mclk_freq = mem_clk * 100;
+
+@@ -1276,16 +1266,12 @@ static int vega12_get_current_activity_percent(
+ #if 0
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
+ if (!ret) {
+- ret = vega12_read_arg_from_smc(hwmgr, &current_activity);
+- if (!ret) {
+- if (current_activity > 100) {
+- PP_ASSERT(false,
+- "[GetCurrentActivityPercent] Activity Percentage Exceeds 100!");
+- current_activity = 100;
+- }
+- } else
++ current_activity = smum_get_argument(hwmgr);
++ if (current_activity > 100) {
+ PP_ASSERT(false,
+- "[GetCurrentActivityPercent] Attempt To Read Average Graphics Activity from SMU Failed!");
++ "[GetCurrentActivityPercent] Activity Percentage Exceeds 100!");
++ current_activity = 100;
++ }
+ } else
+ PP_ASSERT(false,
+ "[GetCurrentActivityPercent] Attempt To Send Get Average Graphics Activity to SMU Failed!");
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
+index cfd9e6c..904eb2c 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
+@@ -34,11 +34,9 @@ static int vega12_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetCurrentRpm),
+ "Attempt to get current RPM from SMC Failed!",
+- return -1);
+- PP_ASSERT_WITH_CODE(!vega12_read_arg_from_smc(hwmgr,
+- current_rpm),
+- "Attempt to read current RPM from SMC Failed!",
+- return -1);
++ return -EINVAL);
++ *current_rpm = smum_get_argument(hwmgr);
++
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+index 5e9db66..3ef862b 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+@@ -6,7 +6,7 @@
+ SMU_MGR = smumgr.o smu8_smumgr.o tonga_smumgr.o fiji_smumgr.o \
+ polaris10_smumgr.o iceland_smumgr.o \
+ smu7_smumgr.o vega10_smumgr.o smu10_smumgr.o ci_smumgr.o \
+- vega12_smumgr.o vegam_smumgr.o
++ vega12_smumgr.o vegam_smumgr.o smu9_smumgr.o
+
+ AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
+new file mode 100644
+index 0000000..aad8f07
+--- /dev/null
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
+@@ -0,0 +1,150 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "smumgr.h"
++#include "vega10_inc.h"
++#include "soc15_common.h"
++#include "pp_debug.h"
++
++
++/* MP Apertures */
++#define MP0_Public 0x03800000
++#define MP0_SRAM 0x03900000
++#define MP1_Public 0x03b00000
++#define MP1_SRAM 0x03c00004
++
++#define smnMP1_FIRMWARE_FLAGS 0x3010028
++
++bool smu9_is_smc_ram_running(struct pp_hwmgr *hwmgr)
++{
++ struct amdgpu_device *adev = hwmgr->adev;
++ uint32_t mp1_fw_flags;
++
++ WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
++ (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
++
++ mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
++
++ if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
++ return true;
++
++ return false;
++}
++
++/*
++ * Check if SMC has responded to previous message.
++ *
++ * @param smumgr the address of the powerplay hardware manager.
++ * @return TRUE SMC has responded, FALSE otherwise.
++ */
++static uint32_t smu9_wait_for_response(struct pp_hwmgr *hwmgr)
++{
++ struct amdgpu_device *adev = hwmgr->adev;
++ uint32_t reg;
++ uint32_t ret;
++
++ reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
++
++ ret = phm_wait_for_register_unequal(hwmgr, reg,
++ 0, MP1_C2PMSG_90__CONTENT_MASK);
++
++ if (ret)
++ pr_err("No response from smu\n");
++
++ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
++}
++
++/*
++ * Send a message to the SMC, and do not wait for its response.
++ * @param smumgr the address of the powerplay hardware manager.
++ * @param msg the message to send.
++ * @return Always return 0.
++ */
++static int smu9_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
++ uint16_t msg)
++{
++ struct amdgpu_device *adev = hwmgr->adev;
++
++ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
++
++ return 0;
++}
++
++/*
++ * Send a message to the SMC, and wait for its response.
++ * @param hwmgr the address of the powerplay hardware manager.
++ * @param msg the message to send.
++ * @return Always return 0.
++ */
++int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
++{
++ struct amdgpu_device *adev = hwmgr->adev;
++ uint32_t ret;
++
++ smu9_wait_for_response(hwmgr);
++
++ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
++
++ smu9_send_msg_to_smc_without_waiting(hwmgr, msg);
++
++ ret = smu9_wait_for_response(hwmgr);
++ if (ret != 1)
++ pr_err("Failed to send message: 0x%x, ret value: 0x%x\n", msg, ret);
++
++ return 0;
++}
++
++/*
++ * Send a message to the SMC with parameter
++ * @param hwmgr: the address of the powerplay hardware manager.
++ * @param msg: the message to send.
++ * @param parameter: the parameter to send
++ * @return Always return 0.
++ */
++int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
++ uint16_t msg, uint32_t parameter)
++{
++ struct amdgpu_device *adev = hwmgr->adev;
++ uint32_t ret;
++
++ smu9_wait_for_response(hwmgr);
++
++ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
++
++ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
++
++ smu9_send_msg_to_smc_without_waiting(hwmgr, msg);
++
++ ret = smu9_wait_for_response(hwmgr);
++ if (ret != 1)
++ pr_err("Failed message: 0x%x, input parameter: 0x%x, error code: 0x%x\n", msg, parameter, ret);
++
++ return 0;
++}
++
++int smu9_get_argument(struct pp_hwmgr *hwmgr)
++{
++ struct amdgpu_device *adev = hwmgr->adev;
++
++ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
++}
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h
+new file mode 100644
+index 0000000..a8da281
+--- /dev/null
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h
+@@ -0,0 +1,32 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++#ifndef _SMU9_SMUMANAGER_H_
++#define _SMU9_SMUMANAGER_H_
++
++bool smu9_is_smc_ram_running(struct pp_hwmgr *hwmgr);
++int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
++int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
++ uint16_t msg, uint32_t parameter);
++int smu9_get_argument(struct pp_hwmgr *hwmgr);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+index e84669c..5d19115 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+@@ -28,142 +28,11 @@
+ #include "vega10_hwmgr.h"
+ #include "vega10_ppsmc.h"
+ #include "smu9_driver_if.h"
++#include "smu9_smumgr.h"
+ #include "ppatomctrl.h"
+ #include "pp_debug.h"
+
+
+-#define AVFS_EN_MSB 1568
+-#define AVFS_EN_LSB 1568
+-
+-/* Microcode file is stored in this buffer */
+-#define BUFFER_SIZE 80000
+-#define MAX_STRING_SIZE 15
+-#define BUFFER_SIZETWO 131072 /* 128 *1024 */
+-
+-/* MP Apertures */
+-#define MP0_Public 0x03800000
+-#define MP0_SRAM 0x03900000
+-#define MP1_Public 0x03b00000
+-#define MP1_SRAM 0x03c00004
+-
+-#define smnMP1_FIRMWARE_FLAGS 0x3010028
+-#define smnMP0_FW_INTF 0x3010104
+-#define smnMP1_PUB_CTRL 0x3010b14
+-
+-static bool vega10_is_smc_ram_running(struct pp_hwmgr *hwmgr)
+-{
+- struct amdgpu_device *adev = hwmgr->adev;
+- uint32_t mp1_fw_flags;
+-
+- WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
+- (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
+-
+- mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
+-
+- if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
+- return true;
+-
+- return false;
+-}
+-
+-/*
+- * Check if SMC has responded to previous message.
+- *
+- * @param smumgr the address of the powerplay hardware manager.
+- * @return TRUE SMC has responded, FALSE otherwise.
+- */
+-static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr)
+-{
+- struct amdgpu_device *adev = hwmgr->adev;
+- uint32_t reg;
+- uint32_t ret;
+-
+- reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
+-
+- ret = phm_wait_for_register_unequal(hwmgr, reg,
+- 0, MP1_C2PMSG_90__CONTENT_MASK);
+-
+- if (ret)
+- pr_err("No response from smu\n");
+-
+- return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+-}
+-
+-/*
+- * Send a message to the SMC, and do not wait for its response.
+- * @param smumgr the address of the powerplay hardware manager.
+- * @param msg the message to send.
+- * @return Always return 0.
+- */
+-static int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
+- uint16_t msg)
+-{
+- struct amdgpu_device *adev = hwmgr->adev;
+-
+- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+-
+- return 0;
+-}
+-
+-/*
+- * Send a message to the SMC, and wait for its response.
+- * @param hwmgr the address of the powerplay hardware manager.
+- * @param msg the message to send.
+- * @return Always return 0.
+- */
+-static int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
+-{
+- struct amdgpu_device *adev = hwmgr->adev;
+- uint32_t ret;
+-
+- vega10_wait_for_response(hwmgr);
+-
+- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+-
+- vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
+-
+- ret = vega10_wait_for_response(hwmgr);
+- if (ret != 1)
+- pr_err("Failed to send message: 0x%x, ret value: 0x%x\n", msg, ret);
+-
+- return 0;
+-}
+-
+-/*
+- * Send a message to the SMC with parameter
+- * @param hwmgr: the address of the powerplay hardware manager.
+- * @param msg: the message to send.
+- * @param parameter: the parameter to send
+- * @return Always return 0.
+- */
+-static int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+- uint16_t msg, uint32_t parameter)
+-{
+- struct amdgpu_device *adev = hwmgr->adev;
+- uint32_t ret;
+-
+- vega10_wait_for_response(hwmgr);
+-
+- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+-
+- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
+-
+- vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
+-
+- ret = vega10_wait_for_response(hwmgr);
+- if (ret != 1)
+- pr_err("Failed message: 0x%x, input parameter: 0x%x, error code: 0x%x\n", msg, parameter, ret);
+-
+- return 0;
+-}
+-
+-static int vega10_get_argument(struct pp_hwmgr *hwmgr)
+-{
+- struct amdgpu_device *adev = hwmgr->adev;
+-
+- return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+-}
+-
+ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
+ uint8_t *table, int16_t table_id)
+ {
+@@ -175,13 +44,13 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
+ "Invalid SMU Table version!", return -EINVAL);
+ PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
+ "Invalid SMU Table Length!", return -EINVAL);
+- vega10_send_msg_to_smc_with_parameter(hwmgr,
++ smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
+- vega10_send_msg_to_smc_with_parameter(hwmgr,
++ smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDriverDramAddrLow,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
+- vega10_send_msg_to_smc_with_parameter(hwmgr,
++ smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_TransferTableSmu2Dram,
+ priv->smu_tables.entry[table_id].table_id);
+
+@@ -206,13 +75,13 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
+ memcpy(priv->smu_tables.entry[table_id].table, table,
+ priv->smu_tables.entry[table_id].size);
+
+- vega10_send_msg_to_smc_with_parameter(hwmgr,
++ smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
+- vega10_send_msg_to_smc_with_parameter(hwmgr,
++ smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDriverDramAddrLow,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
+- vega10_send_msg_to_smc_with_parameter(hwmgr,
++ smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_TransferTableDram2Smu,
+ priv->smu_tables.entry[table_id].table_id);
+
+@@ -225,8 +94,8 @@ static int vega10_get_smc_features(struct pp_hwmgr *hwmgr,
+ if (features_enabled == NULL)
+ return -EINVAL;
+
+- vega10_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures);
+- *features_enabled = vega10_get_argument(hwmgr);
++ smu9_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures);
++ *features_enabled = smu9_get_argument(hwmgr);
+
+ return 0;
+ }
+@@ -248,10 +117,10 @@ static int vega10_set_tools_address(struct pp_hwmgr *hwmgr)
+ struct vega10_smumgr *priv = hwmgr->smu_backend;
+
+ if (priv->smu_tables.entry[TOOLSTABLE].mc_addr) {
+- vega10_send_msg_to_smc_with_parameter(hwmgr,
++ smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetToolsDramAddrHigh,
+ upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
+- vega10_send_msg_to_smc_with_parameter(hwmgr,
++ smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetToolsDramAddrLow,
+ lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
+ }
+@@ -265,11 +134,11 @@ static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr)
+ uint32_t dev_id;
+ uint32_t rev_id;
+
+- PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(hwmgr,
++ PP_ASSERT_WITH_CODE(!smu9_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetDriverIfVersion),
+ "Attempt to get SMC IF Version Number Failed!",
+ return -EINVAL);
+- smc_driver_if_version = vega10_get_argument(hwmgr);
++ smc_driver_if_version = smu9_get_argument(hwmgr);
+
+ dev_id = adev->pdev->device;
+ rev_id = adev->pdev->revision;
+@@ -441,7 +310,7 @@ static int vega10_smu_fini(struct pp_hwmgr *hwmgr)
+
+ static int vega10_start_smu(struct pp_hwmgr *hwmgr)
+ {
+- if (!vega10_is_smc_ram_running(hwmgr))
++ if (!smu9_is_smc_ram_running(hwmgr))
+ return -EINVAL;
+
+ PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(hwmgr),
+@@ -453,7 +322,8 @@ static int vega10_start_smu(struct pp_hwmgr *hwmgr)
+ return 0;
+ }
+
+-static int vega10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw)
++static int vega10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table,
++ uint16_t table_id, bool rw)
+ {
+ int ret;
+
+@@ -470,11 +340,11 @@ const struct pp_smumgr_func vega10_smu_funcs = {
+ .smu_fini = &vega10_smu_fini,
+ .start_smu = &vega10_start_smu,
+ .request_smu_load_specific_fw = NULL,
+- .send_msg_to_smc = &vega10_send_msg_to_smc,
+- .send_msg_to_smc_with_parameter = &vega10_send_msg_to_smc_with_parameter,
++ .send_msg_to_smc = &smu9_send_msg_to_smc,
++ .send_msg_to_smc_with_parameter = &smu9_send_msg_to_smc_with_parameter,
+ .download_pptable_settings = NULL,
+ .upload_pptable_settings = NULL,
+ .is_dpm_running = vega10_is_dpm_running,
+- .get_argument = vega10_get_argument,
++ .get_argument = smu9_get_argument,
+ .smc_table_manager = vega10_smc_table_manager,
+ };
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+index 508a262..7f0e210 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+@@ -24,157 +24,14 @@
+ #include "smumgr.h"
+ #include "vega12_inc.h"
+ #include "soc15_common.h"
++#include "smu9_smumgr.h"
+ #include "vega12_smumgr.h"
+ #include "vega12_ppsmc.h"
+ #include "vega12/smu9_driver_if.h"
+-
+ #include "ppatomctrl.h"
+ #include "pp_debug.h"
+
+
+-/* MP Apertures */
+-#define MP0_Public 0x03800000
+-#define MP0_SRAM 0x03900000
+-#define MP1_Public 0x03b00000
+-#define MP1_SRAM 0x03c00004
+-
+-#define smnMP1_FIRMWARE_FLAGS 0x3010028
+-#define smnMP0_FW_INTF 0x3010104
+-#define smnMP1_PUB_CTRL 0x3010b14
+-
+-static bool vega12_is_smc_ram_running(struct pp_hwmgr *hwmgr)
+-{
+- struct amdgpu_device *adev = hwmgr->adev;
+- uint32_t mp1_fw_flags;
+-
+- WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
+- (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
+-
+- mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
+-
+- if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
+- MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
+- return true;
+-
+- return false;
+-}
+-
+-/*
+- * Check if SMC has responded to previous message.
+- *
+- * @param smumgr the address of the powerplay hardware manager.
+- * @return TRUE SMC has responded, FALSE otherwise.
+- */
+-static uint32_t vega12_wait_for_response(struct pp_hwmgr *hwmgr)
+-{
+- struct amdgpu_device *adev = hwmgr->adev;
+- uint32_t reg;
+-
+- reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
+-
+- phm_wait_for_register_unequal(hwmgr, reg,
+- 0, MP1_C2PMSG_90__CONTENT_MASK);
+-
+- return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+-}
+-
+-/*
+- * Send a message to the SMC, and do not wait for its response.
+- * @param smumgr the address of the powerplay hardware manager.
+- * @param msg the message to send.
+- * @return Always return 0.
+- */
+-int vega12_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
+- uint16_t msg)
+-{
+- struct amdgpu_device *adev = hwmgr->adev;
+-
+- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+-
+- return 0;
+-}
+-
+-/*
+- * Send a message to the SMC, and wait for its response.
+- * @param hwmgr the address of the powerplay hardware manager.
+- * @param msg the message to send.
+- * @return Always return 0.
+- */
+-int vega12_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
+-{
+- struct amdgpu_device *adev = hwmgr->adev;
+-
+- vega12_wait_for_response(hwmgr);
+-
+- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+-
+- vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
+-
+- if (vega12_wait_for_response(hwmgr) != 1)
+- pr_err("Failed to send message: 0x%x\n", msg);
+-
+- return 0;
+-}
+-
+-/*
+- * Send a message to the SMC with parameter
+- * @param hwmgr: the address of the powerplay hardware manager.
+- * @param msg: the message to send.
+- * @param parameter: the parameter to send
+- * @return Always return 0.
+- */
+-int vega12_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+- uint16_t msg, uint32_t parameter)
+-{
+- struct amdgpu_device *adev = hwmgr->adev;
+-
+- vega12_wait_for_response(hwmgr);
+-
+- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+-
+- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
+-
+- vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
+-
+- if (vega12_wait_for_response(hwmgr) != 1)
+- pr_err("Failed to send message: 0x%x\n", msg);
+-
+- return 0;
+-}
+-
+-
+-/*
+- * Send a message to the SMC with parameter, do not wait for response
+- * @param hwmgr: the address of the powerplay hardware manager.
+- * @param msg: the message to send.
+- * @param parameter: the parameter to send
+- * @return The response that came from the SMC.
+- */
+-int vega12_send_msg_to_smc_with_parameter_without_waiting(
+- struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
+-{
+- struct amdgpu_device *adev = hwmgr->adev;
+-
+- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, parameter);
+-
+- return vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
+-}
+-
+-/*
+- * Retrieve an argument from SMC.
+- * @param hwmgr the address of the powerplay hardware manager.
+- * @param arg pointer to store the argument from SMC.
+- * @return Always return 0.
+- */
+-int vega12_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
+-{
+- struct amdgpu_device *adev = hwmgr->adev;
+-
+- *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+-
+- return 0;
+-}
+-
+ /*
+ * Copy table from SMC into driver FB
+ * @param hwmgr the address of the HW manager
+@@ -192,16 +49,16 @@ int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
+ "Invalid SMU Table version!", return -EINVAL);
+ PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
+ "Invalid SMU Table Length!", return -EINVAL);
+- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
++ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+ "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL);
+- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
++ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDriverDramAddrLow,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+ "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
+ return -EINVAL);
+- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
++ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_TransferTableSmu2Dram,
+ table_id) == 0,
+ "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
+@@ -234,17 +91,17 @@ int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
+ memcpy(priv->smu_tables.entry[table_id].table, table,
+ priv->smu_tables.entry[table_id].size);
+
+- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
++ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+ "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
+ return -EINVAL;);
+- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
++ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDriverDramAddrLow,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+ "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
+ return -EINVAL);
+- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
++ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_TransferTableDram2Smu,
+ table_id) == 0,
+ "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
+@@ -262,20 +119,20 @@ int vega12_enable_smc_features(struct pp_hwmgr *hwmgr,
+ smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
+
+ if (enable) {
+- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
++ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low) == 0,
+ "[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!",
+ return -EINVAL);
+- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
++ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high) == 0,
+ "[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!",
+ return -EINVAL);
+ } else {
+- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
++ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low) == 0,
+ "[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!",
+ return -EINVAL);
+- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
++ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high) == 0,
+ "[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!",
+ return -EINVAL);
+@@ -292,22 +149,17 @@ int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
+ if (features_enabled == NULL)
+ return -EINVAL;
+
+- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc(hwmgr,
++ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetEnabledSmuFeaturesLow) == 0,
+ "[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!",
+ return -EINVAL);
+- PP_ASSERT_WITH_CODE(vega12_read_arg_from_smc(hwmgr,
+- &smc_features_low) == 0,
+- "[GetEnabledSMCFeatures] Attemp to read SMU features Low argument failed!",
+- return -EINVAL);
+- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc(hwmgr,
++ smc_features_low = smu9_get_argument(hwmgr);
++
++ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetEnabledSmuFeaturesHigh) == 0,
+ "[GetEnabledSMCFeatures] Attemp to get SMU features High failed!",
+ return -EINVAL);
+- PP_ASSERT_WITH_CODE(vega12_read_arg_from_smc(hwmgr,
+- &smc_features_high) == 0,
+- "[GetEnabledSMCFeatures] Attemp to read SMU features High argument failed!",
+- return -EINVAL);
++ smc_features_high = smu9_get_argument(hwmgr);
+
+ *features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
+ (((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
+@@ -333,10 +185,10 @@ static int vega12_set_tools_address(struct pp_hwmgr *hwmgr)
+ (struct vega12_smumgr *)(hwmgr->smu_backend);
+
+ if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) {
+- if (!vega12_send_msg_to_smc_with_parameter(hwmgr,
++ if (!smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetToolsDramAddrHigh,
+ upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr)))
+- vega12_send_msg_to_smc_with_parameter(hwmgr,
++ smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetToolsDramAddrLow,
+ lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
+ }
+@@ -490,7 +342,7 @@ static int vega12_smu_fini(struct pp_hwmgr *hwmgr)
+
+ static int vega12_start_smu(struct pp_hwmgr *hwmgr)
+ {
+- PP_ASSERT_WITH_CODE(vega12_is_smc_ram_running(hwmgr),
++ PP_ASSERT_WITH_CODE(smu9_is_smc_ram_running(hwmgr),
+ "SMC is not running!",
+ return -EINVAL);
+
+@@ -504,9 +356,10 @@ const struct pp_smumgr_func vega12_smu_funcs = {
+ .smu_fini = &vega12_smu_fini,
+ .start_smu = &vega12_start_smu,
+ .request_smu_load_specific_fw = NULL,
+- .send_msg_to_smc = &vega12_send_msg_to_smc,
+- .send_msg_to_smc_with_parameter = &vega12_send_msg_to_smc_with_parameter,
++ .send_msg_to_smc = &smu9_send_msg_to_smc,
++ .send_msg_to_smc_with_parameter = &smu9_send_msg_to_smc_with_parameter,
+ .download_pptable_settings = NULL,
+ .upload_pptable_settings = NULL,
+ .is_dpm_running = vega12_is_dpm_running,
++ .get_argument = smu9_get_argument,
+ };
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h
+index 2810d38..b285cbc 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h
+@@ -48,7 +48,6 @@ struct vega12_smumgr {
+ #define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000
+ #define SMU_FEATURES_HIGH_SHIFT 32
+
+-int vega12_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg);
+ int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
+ uint8_t *table, int16_t table_id);
+ int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4950-drm-amdgpu-pp-switch-smu-callback-type-for-get_argum.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4950-drm-amdgpu-pp-switch-smu-callback-type-for-get_argum.patch
new file mode 100644
index 00000000..d34dfa01
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4950-drm-amdgpu-pp-switch-smu-callback-type-for-get_argum.patch
@@ -0,0 +1,118 @@
+From 2b76af6798d25fe3a9249e250b0f3ebc690a2ddf Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 12 Jul 2018 14:59:22 -0500
+Subject: [PATCH 4950/5725] drm/amdgpu/pp: switch smu callback type for
+ get_argument()
+
+return a uint32_t rather than an int to properly reflect
+what the function does.
+
+Reviewed-by: Rex Zhu <rezhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 2 +-
+ drivers/gpu/drm/amd/powerplay/inc/smumgr.h | 2 +-
+ drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c | 2 +-
+ drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c | 4 ++--
+ drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c | 2 +-
+ drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h | 2 +-
+ drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | 2 +-
+ 7 files changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+index b3363f2..d3d9626 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+@@ -194,7 +194,7 @@ struct pp_smumgr_func {
+ int (*request_smu_load_fw)(struct pp_hwmgr *hwmgr);
+ int (*request_smu_load_specific_fw)(struct pp_hwmgr *hwmgr,
+ uint32_t firmware);
+- int (*get_argument)(struct pp_hwmgr *hwmgr);
++ uint32_t (*get_argument)(struct pp_hwmgr *hwmgr);
+ int (*send_msg_to_smc)(struct pp_hwmgr *hwmgr, uint16_t msg);
+ int (*send_msg_to_smc_with_parameter)(struct pp_hwmgr *hwmgr,
+ uint16_t msg, uint32_t parameter);
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+index 89dfbf5..82550a8 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+@@ -80,7 +80,7 @@ enum SMU10_TABLE_ID {
+ SMU10_CLOCKTABLE,
+ };
+
+-extern int smum_get_argument(struct pp_hwmgr *hwmgr);
++extern uint32_t smum_get_argument(struct pp_hwmgr *hwmgr);
+
+ extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table);
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+index 0a563f6..bb07d43 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+@@ -68,7 +68,7 @@ static int smu10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
+ return 0;
+ }
+
+-static int smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr)
++static uint32_t smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr)
+ {
+ struct amdgpu_device *adev = hwmgr->adev;
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+index c861d30..f7e3bc2 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+@@ -52,10 +52,10 @@ static const enum smu8_scratch_entry firmware_list[] = {
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G,
+ };
+
+-static int smu8_get_argument(struct pp_hwmgr *hwmgr)
++static uint32_t smu8_get_argument(struct pp_hwmgr *hwmgr)
+ {
+ if (hwmgr == NULL || hwmgr->device == NULL)
+- return -EINVAL;
++ return 0;
+
+ return cgs_read_register(hwmgr->device,
+ mmSMU_MP1_SRBM2P_ARG_0);
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
+index aad8f07..079fc8e 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
+@@ -142,7 +142,7 @@ int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+ return 0;
+ }
+
+-int smu9_get_argument(struct pp_hwmgr *hwmgr)
++uint32_t smu9_get_argument(struct pp_hwmgr *hwmgr)
+ {
+ struct amdgpu_device *adev = hwmgr->adev;
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h
+index a8da281..1462279 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h
+@@ -27,6 +27,6 @@ bool smu9_is_smc_ram_running(struct pp_hwmgr *hwmgr);
+ int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
+ int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+ uint16_t msg, uint32_t parameter);
+-int smu9_get_argument(struct pp_hwmgr *hwmgr);
++uint32_t smu9_get_argument(struct pp_hwmgr *hwmgr);
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+index c983793..99d5e4f 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+@@ -96,7 +96,7 @@ int smum_process_firmware_header(struct pp_hwmgr *hwmgr)
+ return 0;
+ }
+
+-int smum_get_argument(struct pp_hwmgr *hwmgr)
++uint32_t smum_get_argument(struct pp_hwmgr *hwmgr)
+ {
+ if (NULL != hwmgr->smumgr_funcs->get_argument)
+ return hwmgr->smumgr_funcs->get_argument(hwmgr);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4951-Revert-drm-amd-powerplay-fix-performance-drop-on-Veg.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4951-Revert-drm-amd-powerplay-fix-performance-drop-on-Veg.patch
new file mode 100644
index 00000000..a178b8c4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4951-Revert-drm-amd-powerplay-fix-performance-drop-on-Veg.patch
@@ -0,0 +1,36 @@
+From d0e6deaf7a3300b606f92b7375d17abe7f9355dd Mon Sep 17 00:00:00 2001
+From: Eric Huang <JinhuiEric.Huang@amd.com>
+Date: Fri, 13 Jul 2018 15:05:10 -0400
+Subject: [PATCH 4951/5725] Revert "drm/amd/powerplay: fix performance drop on
+ Vega10"
+
+This reverts commit b87079ec7b4d38efee015367315958ce5495ba93.
+
+SMU FW team ask to remove this version specific code.
+
+Signed-off-by: Eric Huang <JinHuiEric.Huang@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 5 -----
+ 1 file changed, 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+index e94bffe..ca4e1d9 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+@@ -2901,11 +2901,6 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+
+ vega10_enable_disable_PCC_limit_feature(hwmgr, true);
+
+- if ((hwmgr->smu_version == 0x001c2c00) ||
+- (hwmgr->smu_version == 0x001c2d00))
+- smum_send_msg_to_smc_with_parameter(hwmgr,
+- PPSMC_MSG_UpdatePkgPwrPidAlpha, 1);
+-
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4952-drm-amdgpu-Allow-to-create-BO-lists-in-CS-ioctl-v3.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4952-drm-amdgpu-Allow-to-create-BO-lists-in-CS-ioctl-v3.patch
new file mode 100644
index 00000000..e7273ab7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4952-drm-amdgpu-Allow-to-create-BO-lists-in-CS-ioctl-v3.patch
@@ -0,0 +1,302 @@
+From d0a6b493aa51dc3773a4def2550c655f6452f876 Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Fri, 6 Jul 2018 14:16:54 -0400
+Subject: [PATCH 4952/5725] drm/amdgpu: Allow to create BO lists in CS ioctl v3
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This change is to support MESA performace optimization.
+Modify CS IOCTL to allow its input as command buffer and an array of
+buffer handles to create a temporay bo list and then destroy it
+when IOCTL completes.
+This saves on calling for BO_LIST create and destry IOCTLs in MESA
+and by this improves performance.
+
+v2: Avoid inserting the temp list into idr struct.
+
+v3:
+Remove idr alloation from amdgpu_bo_list_create.
+Remove useless argument from amdgpu_cs_parser_fini
+Minor cosmetic stuff.
+
+v4: Revert amdgpu_bo_list_destroy back to static
+
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 8 +++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | 88 +++++++++++++++++------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 48 +++++++++++++++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 +-
+ 4 files changed, 107 insertions(+), 40 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 2fa7976..bb1062e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -752,6 +752,14 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
+ struct list_head *validated);
+ void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
+ void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
++int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
++ struct drm_amdgpu_bo_list_entry **info_param);
++
++int amdgpu_bo_list_create(struct amdgpu_device *adev,
++ struct drm_file *filp,
++ struct drm_amdgpu_bo_list_entry *info,
++ unsigned num_entries,
++ struct amdgpu_bo_list **list);
+
+ /*
+ * GFX stuff
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+index 4742fce..6ad79dd6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+@@ -55,15 +55,15 @@ static void amdgpu_bo_list_release_rcu(struct kref *ref)
+ kfree_rcu(list, rhead);
+ }
+
+-static int amdgpu_bo_list_create(struct amdgpu_device *adev,
++int amdgpu_bo_list_create(struct amdgpu_device *adev,
+ struct drm_file *filp,
+ struct drm_amdgpu_bo_list_entry *info,
+ unsigned num_entries,
+- int *id)
++ struct amdgpu_bo_list **list_out)
+ {
+- int r;
+- struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_bo_list *list;
++ int r;
++
+
+ list = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
+ if (!list)
+@@ -78,16 +78,7 @@ static int amdgpu_bo_list_create(struct amdgpu_device *adev,
+ return r;
+ }
+
+- /* idr alloc should be called only after initialization of bo list. */
+- mutex_lock(&fpriv->bo_list_lock);
+- r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
+- mutex_unlock(&fpriv->bo_list_lock);
+- if (r < 0) {
+- amdgpu_bo_list_free(list);
+- return r;
+- }
+- *id = r;
+-
++ *list_out = list;
+ return 0;
+ }
+
+@@ -262,55 +253,79 @@ void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
+ kfree(list);
+ }
+
+-int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *filp)
++int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
++ struct drm_amdgpu_bo_list_entry **info_param)
+ {
++ const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
+ const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
+-
+- struct amdgpu_device *adev = dev->dev_private;
+- struct amdgpu_fpriv *fpriv = filp->driver_priv;
+- union drm_amdgpu_bo_list *args = data;
+- uint32_t handle = args->in.list_handle;
+- const void __user *uptr = u64_to_user_ptr(args->in.bo_info_ptr);
+-
+ struct drm_amdgpu_bo_list_entry *info;
+- struct amdgpu_bo_list *list;
+-
+ int r;
+
+- info = kvmalloc_array(args->in.bo_number,
+- sizeof(struct drm_amdgpu_bo_list_entry), GFP_KERNEL);
++ info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ /* copy the handle array from userspace to a kernel buffer */
+ r = -EFAULT;
+- if (likely(info_size == args->in.bo_info_size)) {
+- unsigned long bytes = args->in.bo_number *
+- args->in.bo_info_size;
++ if (likely(info_size == in->bo_info_size)) {
++ unsigned long bytes = in->bo_number *
++ in->bo_info_size;
+
+ if (copy_from_user(info, uptr, bytes))
+ goto error_free;
+
+ } else {
+- unsigned long bytes = min(args->in.bo_info_size, info_size);
++ unsigned long bytes = min(in->bo_info_size, info_size);
+ unsigned i;
+
+- memset(info, 0, args->in.bo_number * info_size);
+- for (i = 0; i < args->in.bo_number; ++i) {
++ memset(info, 0, in->bo_number * info_size);
++ for (i = 0; i < in->bo_number; ++i) {
+ if (copy_from_user(&info[i], uptr, bytes))
+ goto error_free;
+
+- uptr += args->in.bo_info_size;
++ uptr += in->bo_info_size;
+ }
+ }
+
++ *info_param = info;
++ return 0;
++
++error_free:
++ kvfree(info);
++ return r;
++}
++
++int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *filp)
++{
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_fpriv *fpriv = filp->driver_priv;
++ union drm_amdgpu_bo_list *args = data;
++ uint32_t handle = args->in.list_handle;
++ struct drm_amdgpu_bo_list_entry *info = NULL;
++ struct amdgpu_bo_list *list;
++ int r;
++
++ r = amdgpu_bo_create_list_entry_array(&args->in, &info);
++ if (r)
++ goto error_free;
++
+ switch (args->in.operation) {
+ case AMDGPU_BO_LIST_OP_CREATE:
+ r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
+- &handle);
++ &list);
+ if (r)
+ goto error_free;
++
++ mutex_lock(&fpriv->bo_list_lock);
++ r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
++ mutex_unlock(&fpriv->bo_list_lock);
++ if (r < 0) {
++ amdgpu_bo_list_free(list);
++ return r;
++ }
++
++ handle = r;
+ break;
+
+ case AMDGPU_BO_LIST_OP_DESTROY:
+@@ -344,6 +359,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
+ return 0;
+
+ error_free:
+- kvfree(info);
++ if (info)
++ kvfree(info);
+ return r;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index a4aaf37..56da0ef 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -66,11 +66,35 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
+ return 0;
+ }
+
+-static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
++static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
++ struct drm_amdgpu_bo_list_in *data)
++{
++ int r;
++ struct drm_amdgpu_bo_list_entry *info = NULL;
++
++ r = amdgpu_bo_create_list_entry_array(data, &info);
++ if (r)
++ return r;
++
++ r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
++ &p->bo_list);
++ if (r)
++ goto error_free;
++
++ kvfree(info);
++ return 0;
++
++error_free:
++ if (info)
++ kvfree(info);
++
++ return r;
++}
++
++static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
+ {
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
+- union drm_amdgpu_cs *cs = data;
+ uint64_t *chunk_array_user;
+ uint64_t *chunk_array;
+ unsigned size, num_ibs = 0;
+@@ -164,6 +188,19 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
+
+ break;
+
++ case AMDGPU_CHUNK_ID_BO_HANDLES:
++ size = sizeof(struct drm_amdgpu_bo_list_in);
++ if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
++ ret = -EINVAL;
++ goto free_partial_kdata;
++ }
++
++ ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
++ if (ret)
++ goto free_partial_kdata;
++
++ break;
++
+ case AMDGPU_CHUNK_ID_DEPENDENCIES:
+ case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
+ case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
+@@ -534,7 +571,12 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+
+ INIT_LIST_HEAD(&p->validated);
+
+- p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
++ /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
++ if (!p->bo_list)
++ p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
++ else
++ mutex_lock(&p->bo_list->lock);
++
+ if (p->bo_list) {
+ amdgpu_bo_list_get_list(p->bo_list, &p->validated);
+ if (p->bo_list->first_userptr != p->bo_list->num_entries)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 966e337..d587428 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -69,9 +69,10 @@
+ * - 3.24.0 - Add high priority compute support for gfx9
+ * - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk).
+ * - 3.26.0 - GFX9: Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE.
++ * - 3.27.0 - Add new chunk to to AMDGPU_CS to enable BO_LIST creation.
+ */
+ #define KMS_DRIVER_MAJOR 3
+-#define KMS_DRIVER_MINOR 26
++#define KMS_DRIVER_MINOR 27
+ #define KMS_DRIVER_PATCHLEVEL 0
+
+ #define AMDGPU_VERSION "18.30.2.15"
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4953-drm-amd-display-Add-headers-for-hardcoded-1d-luts.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4953-drm-amd-display-Add-headers-for-hardcoded-1d-luts.patch
new file mode 100644
index 00000000..3f313ac6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4953-drm-amd-display-Add-headers-for-hardcoded-1d-luts.patch
@@ -0,0 +1,77 @@
+From ccf3a35797a76e9a030b0f0a19dc2be9c8da4b7c Mon Sep 17 00:00:00 2001
+From: Vitaly Prosyak <vitaly.prosyak@amd.com>
+Date: Fri, 15 Jun 2018 08:34:10 -0500
+Subject: [PATCH 4953/5725] drm/amd/display: Add headers for hardcoded 1d luts.
+
+Hard-coded luts are needed since complex algorithms are used for
+color and tone mapping. Add the headers for future use.
+
+Signed-off-by: Vitaly Prosyak <vitaly.prosyak@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../gpu/drm/amd/display/modules/color/luts_1d.h | 51 ++++++++++++++++++++++
+ 1 file changed, 51 insertions(+)
+ create mode 100644 drivers/gpu/drm/amd/display/modules/color/luts_1d.h
+
+diff --git a/drivers/gpu/drm/amd/display/modules/color/luts_1d.h b/drivers/gpu/drm/amd/display/modules/color/luts_1d.h
+new file mode 100644
+index 0000000..66b1fad
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/modules/color/luts_1d.h
+@@ -0,0 +1,51 @@
++/*
++ * Copyright 2016 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++#ifndef LUTS_1D_H
++#define LUTS_1D_H
++
++#include "hw_shared.h"
++
++struct point_config {
++ uint32_t custom_float_x;
++ uint32_t custom_float_y;
++ uint32_t custom_float_slope;
++};
++
++struct lut_point {
++ uint32_t red;
++ uint32_t green;
++ uint32_t blue;
++ uint32_t delta_red;
++ uint32_t delta_green;
++ uint32_t delta_blue;
++};
++
++struct pwl_1dlut_parameter {
++ struct gamma_curve arr_curve_points[34];
++ struct point_config arr_points[2];
++ struct lut_point rgb_resulted[256];
++ uint32_t hw_points_num;
++};
++#endif // LUTS_1D_H
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4954-drm-amd-display-Refactor-SDR-cursor-boosting-in-HDR-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4954-drm-amd-display-Refactor-SDR-cursor-boosting-in-HDR-.patch
new file mode 100644
index 00000000..198797bf
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4954-drm-amd-display-Refactor-SDR-cursor-boosting-in-HDR-.patch
@@ -0,0 +1,320 @@
+From 97dc7e46308618255a396fe33c1f141f16fc6793 Mon Sep 17 00:00:00 2001
+From: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Date: Wed, 27 Jun 2018 18:23:37 -0400
+Subject: [PATCH 4954/5725] drm/amd/display: Refactor SDR cursor boosting in
+ HDR mode
+
+[Why]
+Cursor boosting is done via CNVC_CUR register which is DPP, not HUBP
+Previous commit was implementing it in HUBP functions,
+and also breaking diags tests.
+
+[How]
+1. Undo original commit as well as Eric's diags test fix, almost completely
+2. Move programming to DPP and call via new dc_stream function
+3. Also removing cur_rom_en from dpp_cursor_attributes and programming
+as part of normal cursor attributes as it depends on cursor color format
+
+Signed-off-by: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 1 +
+ drivers/gpu/drm/amd/display/dc/dc_hw_types.h | 5 ++++
+ drivers/gpu/drm/amd/display/dc/dc_stream.h | 1 +
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | 13 ++++++++++
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h | 14 ++++++++--
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 15 -----------
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 12 +++------
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 30 +++++++++++++++++++++-
+ drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h | 3 +++
+ drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 1 +
+ 10 files changed, 69 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+index 0223f48..4717330 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+@@ -214,6 +214,7 @@ bool dc_stream_set_cursor_attributes(
+ }
+
+ core_dc->hwss.set_cursor_attribute(pipe_ctx);
++ core_dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
+ }
+
+ if (pipe_to_program)
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+index e1c0af7..e9c1ec5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+@@ -500,6 +500,11 @@ struct dc_cursor_attributes {
+ union dc_cursor_attribute_flags attribute_flags;
+ };
+
++struct dpp_cursor_attributes {
++ int bias;
++ int scale;
++};
++
+ /* OPP */
+
+ enum dc_color_space {
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+index e7a05d2..90ee911 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+@@ -106,6 +106,7 @@ struct dc_stream_state {
+
+ struct dc_cursor_attributes cursor_attributes;
+ struct dc_cursor_position cursor_position;
++ uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode
+
+ /* from stream struct */
+ struct kref refcount;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+index a558efa..bf8b68f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+@@ -459,6 +459,18 @@ void dpp1_set_cursor_position(
+
+ }
+
++void dpp1_cnv_set_optional_cursor_attributes(
++ struct dpp *dpp_base,
++ struct dpp_cursor_attributes *attr)
++{
++ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
++
++ if (attr) {
++ REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias);
++ REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale);
++ }
++}
++
+ void dpp1_dppclk_control(
+ struct dpp *dpp_base,
+ bool dppclk_div,
+@@ -499,6 +511,7 @@ static const struct dpp_funcs dcn10_dpp_funcs = {
+ .dpp_full_bypass = dpp1_full_bypass,
+ .set_cursor_attributes = dpp1_set_cursor_attributes,
+ .set_cursor_position = dpp1_set_cursor_position,
++ .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
+ .dpp_dppclk_control = dpp1_dppclk_control,
+ .dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier,
+ };
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+index e862caf..e2889e6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+@@ -119,6 +119,7 @@
+ SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
+ SRI(CURSOR0_COLOR0, CNVC_CUR, id), \
+ SRI(CURSOR0_COLOR1, CNVC_CUR, id), \
++ SRI(CURSOR0_FP_SCALE_BIAS, CNVC_CUR, id), \
+ SRI(DPP_CONTROL, DPP_TOP, id), \
+ SRI(CM_HDR_MULT_COEF, CM, id)
+
+@@ -324,6 +325,8 @@
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \
++ TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, mask_sh), \
++ TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, mask_sh), \
+ TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \
+ TF_SF(CM0_CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, mask_sh)
+
+@@ -1076,7 +1079,9 @@
+ type CUR0_COLOR1; \
+ type DPPCLK_RATE_CONTROL; \
+ type DPP_CLOCK_ENABLE; \
+- type CM_HDR_MULT_COEF;
++ type CM_HDR_MULT_COEF; \
++ type CUR0_FP_BIAS; \
++ type CUR0_FP_SCALE;
+
+ struct dcn_dpp_shift {
+ TF_REG_FIELD_LIST(uint8_t)
+@@ -1329,7 +1334,8 @@ struct dcn_dpp_mask {
+ uint32_t CURSOR0_COLOR0; \
+ uint32_t CURSOR0_COLOR1; \
+ uint32_t DPP_CONTROL; \
+- uint32_t CM_HDR_MULT_COEF;
++ uint32_t CM_HDR_MULT_COEF; \
++ uint32_t CURSOR0_FP_SCALE_BIAS;
+
+ struct dcn_dpp_registers {
+ DPP_COMMON_REG_VARIABLE_LIST
+@@ -1370,6 +1376,10 @@ void dpp1_set_cursor_position(
+ const struct dc_cursor_mi_param *param,
+ uint32_t width);
+
++void dpp1_cnv_set_optional_cursor_attributes(
++ struct dpp *dpp_base,
++ struct dpp_cursor_attributes *attr);
++
+ bool dpp1_dscl_is_lb_conf_valid(
+ int ceil_vratio,
+ int num_partitions,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+index 5c4ad8a..d6dc61e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+@@ -27,7 +27,6 @@
+ #include "reg_helper.h"
+ #include "basics/conversion.h"
+ #include "dcn10_hubp.h"
+-#include "custom_float.h"
+
+ #define REG(reg)\
+ hubp1->hubp_regs->reg
+@@ -1039,18 +1038,6 @@ void hubp1_cursor_set_attributes(
+ enum cursor_pitch hw_pitch = hubp1_get_cursor_pitch(attr->pitch);
+ enum cursor_lines_per_chunk lpc = hubp1_get_lines_per_chunk(
+ attr->width, attr->color_format);
+- struct fixed31_32 multiplier;
+- uint32_t hw_mult = 0x3c00; // 1.0 default multiplier
+- struct custom_float_format fmt;
+-
+- fmt.exponenta_bits = 5;
+- fmt.mantissa_bits = 10;
+- fmt.sign = true;
+-
+- if (attr->sdr_white_level > 80) {
+- multiplier = dc_fixpt_from_fraction(attr->sdr_white_level, 80);
+- convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
+- }
+
+ hubp->curs_attr = *attr;
+
+@@ -1073,8 +1060,6 @@ void hubp1_cursor_set_attributes(
+ CURSOR0_DST_Y_OFFSET, 0,
+ /* used to shift the cursor chunk request deadline */
+ CURSOR0_CHUNK_HDL_ADJUST, 3);
+-
+- REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, hw_mult);
+ }
+
+ void hubp1_cursor_set_position(
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+index 9991da5..f689fea 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+@@ -133,8 +133,7 @@
+ SRI(CURSOR_CONTROL, CURSOR, id), \
+ SRI(CURSOR_POSITION, CURSOR, id), \
+ SRI(CURSOR_HOT_SPOT, CURSOR, id), \
+- SRI(CURSOR_DST_OFFSET, CURSOR, id), \
+- SRI(CURSOR0_FP_SCALE_BIAS, CNVC_CUR, id)
++ SRI(CURSOR_DST_OFFSET, CURSOR, id)
+
+ #define HUBP_COMMON_REG_VARIABLE_LIST \
+ uint32_t DCHUBP_CNTL; \
+@@ -242,8 +241,7 @@
+ uint32_t CURSOR_POSITION; \
+ uint32_t CURSOR_HOT_SPOT; \
+ uint32_t CURSOR_DST_OFFSET; \
+- uint32_t HUBP_CLK_CNTL; \
+- uint32_t CURSOR0_FP_SCALE_BIAS
++ uint32_t HUBP_CLK_CNTL
+
+ #define HUBP_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+@@ -426,8 +424,7 @@
+ HUBP_SF(CURSOR0_CURSOR_POSITION, CURSOR_Y_POSITION, mask_sh), \
+ HUBP_SF(CURSOR0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
+ HUBP_SF(CURSOR0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
+- HUBP_SF(CURSOR0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh), \
+- HUBP_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, mask_sh)
++ HUBP_SF(CURSOR0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh)
+
+ #define DCN_HUBP_REG_FIELD_LIST(type) \
+ type HUBP_BLANK_EN;\
+@@ -618,8 +615,7 @@
+ type CURSOR_HOT_SPOT_X; \
+ type CURSOR_HOT_SPOT_Y; \
+ type CURSOR_DST_X_OFFSET; \
+- type OUTPUT_FP; \
+- type CUR0_FP_SCALE
++ type OUTPUT_FP
+
+ struct dcn_mi_registers {
+ HUBP_COMMON_REG_VARIABLE_LIST;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 5b99a83..4795974 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2578,6 +2578,33 @@ static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
+ pipe_ctx->plane_res.dpp, attributes->color_format);
+ }
+
++static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
++{
++ uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
++ struct fixed31_32 multiplier;
++ struct dpp_cursor_attributes opt_attr = { 0 };
++ uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
++ struct custom_float_format fmt;
++
++ if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
++ return;
++
++ fmt.exponenta_bits = 5;
++ fmt.mantissa_bits = 10;
++ fmt.sign = true;
++
++ if (sdr_white_level > 80) {
++ multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
++ convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
++ }
++
++ opt_attr.scale = hw_scale;
++ opt_attr.bias = 0;
++
++ pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
++ pipe_ctx->plane_res.dpp, &opt_attr);
++}
++
+ static const struct hw_sequencer_funcs dcn10_funcs = {
+ .program_gamut_remap = program_gamut_remap,
+ .program_csc_matrix = program_csc_matrix,
+@@ -2625,7 +2652,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
+ .edp_power_control = hwss_edp_power_control,
+ .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
+ .set_cursor_position = dcn10_set_cursor_position,
+- .set_cursor_attribute = dcn10_set_cursor_attribute
++ .set_cursor_attribute = dcn10_set_cursor_attribute,
++ .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level
+ };
+
+
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+index 582458f..74ad94b 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+@@ -151,6 +151,9 @@ struct dpp_funcs {
+ void (*dpp_set_hdr_multiplier)(
+ struct dpp *dpp_base,
+ uint32_t multiplier);
++ void (*set_optional_cursor_attributes)(
++ struct dpp *dpp_base,
++ struct dpp_cursor_attributes *attr);
+
+ void (*dpp_dppclk_control)(
+ struct dpp *dpp_base,
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+index c2277d1..a14ce4d 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+@@ -223,6 +223,7 @@ struct hw_sequencer_funcs {
+
+ void (*set_cursor_position)(struct pipe_ctx *pipe);
+ void (*set_cursor_attribute)(struct pipe_ctx *pipe);
++ void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe);
+
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4955-drm-amd-display-add-HDR-visual-confirm.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4955-drm-amd-display-add-HDR-visual-confirm.patch
new file mode 100644
index 00000000..2b016a64
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4955-drm-amd-display-add-HDR-visual-confirm.patch
@@ -0,0 +1,205 @@
+From 11f622f8b38a4bc86615f64edfe93d63631a9964 Mon Sep 17 00:00:00 2001
+From: Gloria Li <geling.li@amd.com>
+Date: Tue, 3 Jul 2018 14:39:22 -0400
+Subject: [PATCH 4955/5725] drm/amd/display: add HDR visual confirm
+
+[Why]
+Testing team wants a way to tell if HDR is on or not
+
+[How]
+Program the overscan color to visually indicate the HDR state of the top-most plane
+
+Signed-off-by: Gloria Li <geling.li@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 11 ++++-
+ drivers/gpu/drm/amd/display/dc/dce/dce_transform.c | 2 +-
+ .../amd/display/dc/dce110/dce110_hw_sequencer.c | 2 +-
+ .../drm/amd/display/dc/dce110/dce110_transform_v.c | 2 +-
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c | 7 ++-
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 50 ++++++++++++++++++++--
+ 6 files changed, 63 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index fc1aa32..966e9b9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -170,6 +170,12 @@ struct dc_config {
+ bool disable_disp_pll_sharing;
+ };
+
++enum visual_confirm {
++ VISUAL_CONFIRM_DISABLE = 0,
++ VISUAL_CONFIRM_SURFACE = 1,
++ VISUAL_CONFIRM_HDR = 2,
++};
++
+ enum dcc_option {
+ DCC_ENABLE = 0,
+ DCC_DISABLE = 1,
+@@ -203,7 +209,7 @@ struct dc_clocks {
+ };
+
+ struct dc_debug {
+- bool surface_visual_confirm;
++ enum visual_confirm visual_confirm;
+ bool sanity_checks;
+ bool max_disp_clk;
+ bool surface_trace;
+@@ -388,7 +394,8 @@ enum dc_transfer_func_predefined {
+ TRANSFER_FUNCTION_LINEAR,
+ TRANSFER_FUNCTION_UNITY,
+ TRANSFER_FUNCTION_HLG,
+- TRANSFER_FUNCTION_HLG12
++ TRANSFER_FUNCTION_HLG12,
++ TRANSFER_FUNCTION_GAMMA22
+ };
+
+ struct dc_transfer_func {
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+index a02e719..ab63d0d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+@@ -155,7 +155,7 @@ static void program_overscan(
+ int overscan_bottom = data->v_active
+ - data->recout.y - data->recout.height;
+
+- if (xfm_dce->base.ctx->dc->debug.surface_visual_confirm) {
++ if (xfm_dce->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
+ overscan_bottom += 2;
+ overscan_right += 2;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 8068074..8b7606e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -1256,7 +1256,7 @@ static void program_scaler(const struct dc *dc,
+ return;
+ #endif
+
+- if (dc->debug.surface_visual_confirm)
++ if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
+ get_surface_visual_confirm_color(pipe_ctx, &color);
+ else
+ color_space_to_black_color(dc,
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
+index a7dce06..aa8d6b1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
+@@ -235,7 +235,7 @@ static void program_overscan(
+ int overscan_right = data->h_active - data->recout.x - data->recout.width;
+ int overscan_bottom = data->v_active - data->recout.y - data->recout.height;
+
+- if (xfm_dce->base.ctx->dc->debug.surface_visual_confirm) {
++ if (xfm_dce->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
+ overscan_bottom += 2;
+ overscan_right += 2;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+index f0cc975..4a863a5d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+@@ -621,6 +621,10 @@ static void dpp1_dscl_set_manual_ratio_init(
+ static void dpp1_dscl_set_recout(
+ struct dcn10_dpp *dpp, const struct rect *recout)
+ {
++ int visual_confirm_on = 0;
++ if (dpp->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE)
++ visual_confirm_on = 1;
++
+ REG_SET_2(RECOUT_START, 0,
+ /* First pixel of RECOUT */
+ RECOUT_START_X, recout->x,
+@@ -632,8 +636,7 @@ static void dpp1_dscl_set_recout(
+ RECOUT_WIDTH, recout->width,
+ /* Number of RECOUT vertical lines */
+ RECOUT_HEIGHT, recout->height
+- - dpp->base.ctx->dc->debug.surface_visual_confirm * 4 *
+- (dpp->base.inst + 1));
++ - visual_confirm_on * 4 * (dpp->base.inst + 1));
+ }
+
+ /* Main function to program scaler and line buffer in manual scaling mode */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 4795974..689aebf 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -1783,6 +1783,43 @@ static void dcn10_get_surface_visual_confirm_color(
+ }
+ }
+
++static void dcn10_get_hdr_visual_confirm_color(
++ struct pipe_ctx *pipe_ctx,
++ struct tg_color *color)
++{
++ uint32_t color_value = MAX_TG_COLOR_VALUE;
++
++ // Determine the overscan color based on the top-most (desktop) plane's context
++ struct pipe_ctx *top_pipe_ctx = pipe_ctx;
++
++ while (top_pipe_ctx->top_pipe != NULL)
++ top_pipe_ctx = top_pipe_ctx->top_pipe;
++
++ switch (top_pipe_ctx->plane_res.scl_data.format) {
++ case PIXEL_FORMAT_ARGB2101010:
++ if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_UNITY) {
++ /* HDR10, ARGB2101010 - set boarder color to red */
++ color->color_r_cr = color_value;
++ }
++ break;
++ case PIXEL_FORMAT_FP16:
++ if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
++ /* HDR10, FP16 - set boarder color to blue */
++ color->color_b_cb = color_value;
++ } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
++ /* FreeSync 2 HDR - set boarder color to green */
++ color->color_g_y = color_value;
++ }
++ break;
++ default:
++ /* SDR - set boarder color to Gray */
++ color->color_r_cr = color_value/2;
++ color->color_b_cb = color_value/2;
++ color->color_g_y = color_value/2;
++ break;
++ }
++}
++
+ static uint16_t fixed_point_to_int_frac(
+ struct fixed31_32 arg,
+ uint8_t integer_bits,
+@@ -1877,13 +1914,17 @@ static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
+
+ /* TODO: proper fix once fpga works */
+
+- if (dc->debug.surface_visual_confirm)
++ if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
++ dcn10_get_hdr_visual_confirm_color(
++ pipe_ctx, &blnd_cfg.black_color);
++ } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
+ dcn10_get_surface_visual_confirm_color(
+ pipe_ctx, &blnd_cfg.black_color);
+- else
++ } else {
+ color_space_to_black_color(
+- dc, pipe_ctx->stream->output_color_space,
+- &blnd_cfg.black_color);
++ dc, pipe_ctx->stream->output_color_space,
++ &blnd_cfg.black_color);
++ }
+
+ if (per_pixel_alpha)
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+@@ -2163,6 +2204,7 @@ static void program_all_pipe_in_tree(
+ pipe_ctx->stream_res.tg);
+
+ dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
++
+ }
+
+ if (pipe_ctx->plane_state != NULL) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4956-drm-amd-display-Add-hook-for-MST-root-branch-info.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4956-drm-amd-display-Add-hook-for-MST-root-branch-info.patch
new file mode 100644
index 00000000..dd7c9630
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4956-drm-amd-display-Add-hook-for-MST-root-branch-info.patch
@@ -0,0 +1,70 @@
+From 1d231e8a2e95cb77c102766951a0447a7695a77b Mon Sep 17 00:00:00 2001
+From: Nikola Cornij <nikola.cornij@amd.com>
+Date: Wed, 4 Jul 2018 19:05:16 -0400
+Subject: [PATCH 4956/5725] drm/amd/display: Add hook for MST root branch info
+
+This allows DM to do any necessary updates before MST discovery starts.
+
+Signed-off-by: Nikola Cornij <nikola.cornij@amd.com>
+Reviewed-by: Nikola Cornij <Nikola.Cornij@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 5 +++++
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 4 ++++
+ drivers/gpu/drm/amd/display/dc/dm_helpers.h | 8 ++++++++
+ 3 files changed, 17 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+index 4188dbd..d1ce925 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -169,6 +169,11 @@ static void get_payload_table(
+ mutex_unlock(&mst_mgr->payload_lock);
+ }
+
++void dm_helpers_dp_update_branch_info(
++ struct dc_context *ctx,
++ const struct dc_link *link)
++{}
++
+ /*
+ * Writes payload allocation table in immediate downstream device.
+ */
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index f058620..a621467 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -529,6 +529,10 @@ static bool detect_dp(
+ if (reason == DETECT_REASON_BOOT)
+ boot = true;
+
++ dm_helpers_dp_update_branch_info(
++ link->ctx,
++ link);
++
+ if (!dm_helpers_dp_mst_start_top_mgr(
+ link->ctx,
+ link, boot)) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+index 7e6b9f5..5d4527d0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
++++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+@@ -40,6 +40,14 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
+ const struct dc_edid *edid,
+ struct dc_edid_caps *edid_caps);
+
++
++/*
++ * Update DP branch info
++ */
++void dm_helpers_dp_update_branch_info(
++ struct dc_context *ctx,
++ const struct dc_link *link);
++
+ /*
+ * Writes payload allocation table in immediate downstream device.
+ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4957-drm-amd-display-Move-address-tracking-out-of-HUBP.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4957-drm-amd-display-Move-address-tracking-out-of-HUBP.patch
new file mode 100644
index 00000000..cfce9dec
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4957-drm-amd-display-Move-address-tracking-out-of-HUBP.patch
@@ -0,0 +1,107 @@
+From 1b71396adb76640a85d37daf8346adbf1dcd9408 Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Sat, 7 Jul 2018 15:43:07 -0400
+Subject: [PATCH 4957/5725] drm/amd/display: Move address tracking out of HUBP
+
+[Why]
+We sometime require remapping of FB address space to UMA
+
+[How]
+Move address tracking up a layer before we apply address translation
+
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 4 ----
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 19 +++++++++++++++----
+ drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h | 1 -
+ 3 files changed, 15 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+index d6dc61e..332354c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+@@ -457,9 +457,6 @@ bool hubp1_program_surface_flip_and_addr(
+
+ hubp->request_address = *address;
+
+- if (flip_immediate)
+- hubp->current_address = *address;
+-
+ return true;
+ }
+
+@@ -695,7 +692,6 @@ bool hubp1_is_flip_pending(struct hubp *hubp)
+ if (earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part)
+ return true;
+
+- hubp->current_address = hubp->request_address;
+ return false;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 689aebf..8535d87 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -1172,12 +1172,19 @@ static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_c
+
+ if (plane_state == NULL)
+ return;
++
+ addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
++
+ pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
+ pipe_ctx->plane_res.hubp,
+ &plane_state->address,
+ plane_state->flip_immediate);
++
+ plane_state->status.requested_address = plane_state->address;
++
++ if (plane_state->flip_immediate)
++ plane_state->status.current_address = plane_state->address;
++
+ if (addr_patched)
+ pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
+ }
+@@ -2556,16 +2563,20 @@ static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
+ {
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
++ bool flip_pending;
+
+ if (plane_state == NULL)
+ return;
+
+- plane_state->status.is_flip_pending =
+- pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
++ flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
+ pipe_ctx->plane_res.hubp);
+
+- plane_state->status.current_address = pipe_ctx->plane_res.hubp->current_address;
+- if (pipe_ctx->plane_res.hubp->current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
++ plane_state->status.is_flip_pending = flip_pending;
++
++ if (!flip_pending)
++ plane_state->status.current_address = plane_state->status.requested_address;
++
++ if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
+ tg->funcs->is_stereo_left_eye) {
+ plane_state->status.is_right_eye =
+ !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+index 5b7976f..4f3f9e6 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+@@ -46,7 +46,6 @@ struct hubp {
+ const struct hubp_funcs *funcs;
+ struct dc_context *ctx;
+ struct dc_plane_address request_address;
+- struct dc_plane_address current_address;
+ int inst;
+
+ /* run time states */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4958-drm-amd-display-add-new-dc-debug-structure-to-track-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4958-drm-amd-display-add-new-dc-debug-structure-to-track-.patch
new file mode 100644
index 00000000..730da2c6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4958-drm-amd-display-add-new-dc-debug-structure-to-track-.patch
@@ -0,0 +1,54 @@
+From 4471426bb73511848e746ea72a1bfea5709b57e4 Mon Sep 17 00:00:00 2001
+From: Jun Lei <Jun.Lei@amd.com>
+Date: Fri, 22 Jun 2018 16:51:47 -0400
+Subject: [PATCH 4958/5725] drm/amd/display: add new dc debug structure to
+ track debug data
+
+[why]
+Some DTN tests still failing @ 2% Need to reduce.
+
+[how]
+add instrumentation code to driver so we can get more information from failed runs.
+
+Signed-off-by: Jun Lei <Jun.Lei@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 3 +++
+ drivers/gpu/drm/amd/display/dc/dc.h | 5 +++++
+ 2 files changed, 8 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index fd73a9c..3bb75b1 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -1028,6 +1028,9 @@ enum link_training_result dc_link_dp_perform_link_training(
+ lt_settings.lane_settings[0].VOLTAGE_SWING,
+ lt_settings.lane_settings[0].PRE_EMPHASIS);
+
++ if (status != LINK_TRAINING_SUCCESS)
++ link->ctx->dc->debug.debug_data.ltFailCount++;
++
+ return status;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 966e9b9..c106d58 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -260,6 +260,11 @@ struct dc_debug {
+ bool scl_reset_length10;
+ bool hdmi20_disable;
+
++ struct {
++ uint32_t ltFailCount;
++ uint32_t i2cErrorCount;
++ uint32_t auxErrorCount;
++ } debug_data;
+ };
+ struct dc_state;
+ struct resource_pool;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4959-drm-amd-display-dal-3.1.56.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4959-drm-amd-display-dal-3.1.56.patch
new file mode 100644
index 00000000..b5f1cada
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4959-drm-amd-display-dal-3.1.56.patch
@@ -0,0 +1,29 @@
+From 827bccff4c16f0af9a4e00772423ea3f9cc372e8 Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Mon, 9 Jul 2018 17:25:15 -0400
+Subject: [PATCH 4959/5725] drm/amd/display: dal 3.1.56
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Steven Chiu <Steven.Chiu@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index c106d58..5030c02 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -38,7 +38,7 @@
+ #include "inc/compressor.h"
+ #include "dml/display_mode_lib.h"
+
+-#define DC_VER "3.1.55"
++#define DC_VER "3.1.56"
+
+ #define MAX_SURFACES 3
+ #define MAX_STREAMS 6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4960-drm-amd-display-Null-ptr-check-for-set_sdr_white_lev.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4960-drm-amd-display-Null-ptr-check-for-set_sdr_white_lev.patch
new file mode 100644
index 00000000..74b03dea
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4960-drm-amd-display-Null-ptr-check-for-set_sdr_white_lev.patch
@@ -0,0 +1,35 @@
+From 1515250caa2ab33a4d73d33bba4a99af16705413 Mon Sep 17 00:00:00 2001
+From: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Date: Tue, 10 Jul 2018 16:04:05 -0400
+Subject: [PATCH 4960/5725] drm/amd/display: Null ptr check for
+ set_sdr_white_level
+
+[Why&How]
+Cursor boosting can only be done on DCN+
+Check for nullptr since DCE doesn't implement it.
+
+Signed-off-by: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Reviewed-by: Sivapiriyan Kumarasamy <Sivapiriyan.Kumarasamy@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+index 4717330..fdcc8ab 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+@@ -214,7 +214,8 @@ bool dc_stream_set_cursor_attributes(
+ }
+
+ core_dc->hwss.set_cursor_attribute(pipe_ctx);
+- core_dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
++ if (core_dc->hwss.set_cursor_sdr_white_level)
++ core_dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
+ }
+
+ if (pipe_to_program)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4961-drm-amd-display-Fix-some-checkpatch.pl-errors-and-wa.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4961-drm-amd-display-Fix-some-checkpatch.pl-errors-and-wa.patch
new file mode 100644
index 00000000..60ece619
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4961-drm-amd-display-Fix-some-checkpatch.pl-errors-and-wa.patch
@@ -0,0 +1,150 @@
+From eb01908bbbaa4939a2cf3fcf075cbdf596babd9b Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Fri, 6 Jul 2018 13:40:33 -0400
+Subject: [PATCH 4961/5725] drm/amd/display: Fix some checkpatch.pl errors and
+ warnings in dc_link_dp.c
+
+[Why]
+Any Linux kernel code should pass checkpatch.pl with no errors and
+little, if any, warning.
+
+[How]
+Fixing some spacing errors and warnings.
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 35 ++++++++++++------------
+ 1 file changed, 18 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index 3bb75b1..b34a694 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -39,7 +39,7 @@ static bool decide_fallback_link_setting(
+ struct dc_link_settings initial_link_settings,
+ struct dc_link_settings *current_link_setting,
+ enum link_training_result training_result);
+-static struct dc_link_settings get_common_supported_link_settings (
++static struct dc_link_settings get_common_supported_link_settings(
+ struct dc_link_settings link_setting_a,
+ struct dc_link_settings link_setting_b);
+
+@@ -94,8 +94,8 @@ static void dpcd_set_link_settings(
+ uint8_t rate = (uint8_t)
+ (lt_settings->link_settings.link_rate);
+
+- union down_spread_ctrl downspread = {{0}};
+- union lane_count_set lane_count_set = {{0}};
++ union down_spread_ctrl downspread = { {0} };
++ union lane_count_set lane_count_set = { {0} };
+ uint8_t link_set_buffer[2];
+
+ downspread.raw = (uint8_t)
+@@ -165,11 +165,11 @@ static void dpcd_set_lt_pattern_and_lane_settings(
+ const struct link_training_settings *lt_settings,
+ enum hw_dp_training_pattern pattern)
+ {
+- union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
++ union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = { { {0} } };
+ const uint32_t dpcd_base_lt_offset =
+ DP_TRAINING_PATTERN_SET;
+ uint8_t dpcd_lt_buffer[5] = {0};
+- union dpcd_training_pattern dpcd_pattern = {{0}};
++ union dpcd_training_pattern dpcd_pattern = { {0} };
+ uint32_t lane;
+ uint32_t size_in_bytes;
+ bool edp_workaround = false; /* TODO link_prop.INTERNAL */
+@@ -233,7 +233,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
+ link,
+ DP_TRAINING_PATTERN_SET,
+ &dpcd_pattern.raw,
+- sizeof(dpcd_pattern.raw) );
++ sizeof(dpcd_pattern.raw));
+
+ core_link_write_dpcd(
+ link,
+@@ -247,7 +247,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
+ link,
+ dpcd_base_lt_offset,
+ dpcd_lt_buffer,
+- size_in_bytes + sizeof(dpcd_pattern.raw) );
++ size_in_bytes + sizeof(dpcd_pattern.raw));
+
+ link->cur_lane_setting = lt_settings->lane_settings[0];
+ }
+@@ -429,8 +429,8 @@ static void get_lane_status_and_drive_settings(
+ struct link_training_settings *req_settings)
+ {
+ uint8_t dpcd_buf[6] = {0};
+- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {{{0}}};
+- struct link_training_settings request_settings = {{0}};
++ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
++ struct link_training_settings request_settings = { {0} };
+ uint32_t lane;
+
+ memset(req_settings, '\0', sizeof(struct link_training_settings));
+@@ -652,7 +652,7 @@ static bool perform_post_lt_adj_req_sequence(
+
+ if (req_drv_setting_changed) {
+ update_drive_settings(
+- lt_settings,req_settings);
++ lt_settings, req_settings);
+
+ dc_link_dp_set_drive_settings(link,
+ lt_settings);
+@@ -725,8 +725,8 @@ static enum link_training_result perform_channel_equalization_sequence(
+ enum hw_dp_training_pattern hw_tr_pattern;
+ uint32_t retries_ch_eq;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+- union lane_align_status_updated dpcd_lane_status_updated = {{0}};
+- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};
++ union lane_align_status_updated dpcd_lane_status_updated = { {0} };
++ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
+
+ hw_tr_pattern = get_supported_tp(link);
+
+@@ -1186,7 +1186,7 @@ bool dp_hbr_verify_link_cap(
+ return success;
+ }
+
+-static struct dc_link_settings get_common_supported_link_settings (
++static struct dc_link_settings get_common_supported_link_settings(
+ struct dc_link_settings link_setting_a,
+ struct dc_link_settings link_setting_b)
+ {
+@@ -1432,6 +1432,7 @@ static uint32_t bandwidth_in_kbps_from_link_settings(
+
+ uint32_t lane_count = link_setting->lane_count;
+ uint32_t kbps = link_rate_in_kbps;
++
+ kbps *= lane_count;
+ kbps *= 8; /* 8 bits per byte*/
+
+@@ -1449,9 +1450,9 @@ bool dp_validate_mode_timing(
+ const struct dc_link_settings *link_setting;
+
+ /*always DP fail safe mode*/
+- if (timing->pix_clk_khz == (uint32_t)25175 &&
+- timing->h_addressable == (uint32_t)640 &&
+- timing->v_addressable == (uint32_t)480)
++ if (timing->pix_clk_khz == (uint32_t) 25175 &&
++ timing->h_addressable == (uint32_t) 640 &&
++ timing->v_addressable == (uint32_t) 480)
+ return true;
+
+ /* We always use verified link settings */
+@@ -1999,7 +2000,7 @@ static void handle_automated_test(struct dc_link *link)
+
+ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss)
+ {
+- union hpd_irq_data hpd_irq_dpcd_data = {{{{0}}}};
++ union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } };
+ union device_service_irq device_service_clear = { { 0 } };
+ enum dc_status result;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4962-drm-amdgpu-cleanup-job-header.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4962-drm-amdgpu-cleanup-job-header.patch
new file mode 100644
index 00000000..e5dcda8e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4962-drm-amdgpu-cleanup-job-header.patch
@@ -0,0 +1,175 @@
+From a6eb67ba24df176d009277acfdb0a8ca61343edb Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 13 Jul 2018 09:50:08 +0200
+Subject: [PATCH 4962/5725] drm/amdgpu: cleanup job header
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Move job related defines, structure and function declarations to
+amdgpu_job.h
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Acked-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 46 +-------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_job.h | 74 +++++++++++++++++++++++++++++++++
+ 2 files changed, 75 insertions(+), 45 deletions(-)
+ create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index bb1062e..d3a2e16 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -74,6 +74,7 @@
+ #include "amdgpu_virt.h"
+ #include "amdgpu_gart.h"
+ #include "amdgpu_debugfs.h"
++#include "amdgpu_job.h"
+
+ /*
+ * Modules parameters.
+@@ -616,17 +617,6 @@ struct amdgpu_ib {
+
+ extern const struct drm_sched_backend_ops amdgpu_sched_ops;
+
+-int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
+- struct amdgpu_job **job, struct amdgpu_vm *vm);
+-int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
+- struct amdgpu_job **job);
+-
+-void amdgpu_job_free_resources(struct amdgpu_job *job);
+-void amdgpu_job_free(struct amdgpu_job *job);
+-int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
+- struct drm_sched_entity *entity, void *owner,
+- struct dma_fence **f);
+-
+ /*
+ * Queue manager
+ */
+@@ -1078,40 +1068,6 @@ struct amdgpu_cs_parser {
+ struct drm_syncobj **post_dep_syncobjs;
+ };
+
+-#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */
+-#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) /* bit set means preamble IB is first presented in belonging context */
+-#define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */
+-
+-struct amdgpu_job {
+- struct drm_sched_job base;
+- struct amdgpu_device *adev;
+- struct amdgpu_vm *vm;
+- struct amdgpu_ring *ring;
+- struct amdgpu_sync sync;
+- struct amdgpu_sync sched_sync;
+- struct amdgpu_ib *ibs;
+- struct dma_fence *fence; /* the hw fence */
+- uint32_t preamble_status;
+- uint32_t num_ibs;
+- void *owner;
+- uint64_t fence_ctx; /* the fence_context this job uses */
+- bool vm_needs_flush;
+- uint64_t vm_pd_addr;
+- unsigned vmid;
+- unsigned pasid;
+- uint32_t gds_base, gds_size;
+- uint32_t gws_base, gws_size;
+- uint32_t oa_base, oa_size;
+- uint32_t vram_lost_counter;
+-
+- /* user fence handling */
+- uint64_t uf_addr;
+- uint64_t uf_sequence;
+-
+-};
+-#define to_amdgpu_job(sched_job) \
+- container_of((sched_job), struct amdgpu_job, base)
+-
+ static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
+ uint32_t ib_idx, int idx)
+ {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+new file mode 100644
+index 0000000..35bb932
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+@@ -0,0 +1,74 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++#ifndef __AMDGPU_JOB_H__
++#define __AMDGPU_JOB_H__
++
++/* bit set means command submit involves a preamble IB */
++#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0)
++/* bit set means preamble IB is first presented in belonging context */
++#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1)
++/* bit set means context switch occured */
++#define AMDGPU_HAVE_CTX_SWITCH (1 << 2)
++
++#define to_amdgpu_job(sched_job) \
++ container_of((sched_job), struct amdgpu_job, base)
++
++struct amdgpu_job {
++ struct drm_sched_job base;
++ struct amdgpu_device *adev;
++ struct amdgpu_vm *vm;
++ struct amdgpu_ring *ring;
++ struct amdgpu_sync sync;
++ struct amdgpu_sync sched_sync;
++ struct amdgpu_ib *ibs;
++ struct dma_fence *fence; /* the hw fence */
++ uint32_t preamble_status;
++ uint32_t num_ibs;
++ void *owner;
++ uint64_t fence_ctx; /* the fence_context this job uses */
++ bool vm_needs_flush;
++ uint64_t vm_pd_addr;
++ unsigned vmid;
++ unsigned pasid;
++ uint32_t gds_base, gds_size;
++ uint32_t gws_base, gws_size;
++ uint32_t oa_base, oa_size;
++ uint32_t vram_lost_counter;
++
++ /* user fence handling */
++ uint64_t uf_addr;
++ uint64_t uf_sequence;
++
++};
++
++int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
++ struct amdgpu_job **job, struct amdgpu_vm *vm);
++int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
++ struct amdgpu_job **job);
++
++void amdgpu_job_free_resources(struct amdgpu_job *job);
++void amdgpu_job_free(struct amdgpu_job *job);
++int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
++ struct drm_sched_entity *entity, void *owner,
++ struct dma_fence **f);
++#endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4963-drm-amdgpu-remove-fence-context-from-the-job.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4963-drm-amdgpu-remove-fence-context-from-the-job.patch
new file mode 100644
index 00000000..3c3aeb9c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4963-drm-amdgpu-remove-fence-context-from-the-job.patch
@@ -0,0 +1,73 @@
+From 555152671ff6b29497925245056bfce9105ee972 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 13 Jul 2018 09:58:49 +0200
+Subject: [PATCH 4963/5725] drm/amdgpu: remove fence context from the job
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Can be obtained directly from the fence as well.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Acked-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 1 -
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 1 -
+ drivers/gpu/drm/amd/amdgpu/amdgpu_job.h | 1 -
+ 4 files changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 56da0ef..4385446 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -1241,7 +1241,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
+ }
+
+ job->owner = p->filp;
+- job->fence_ctx = entity->fence_context;
+ p->fence = dma_fence_get(&job->base.s_fence->finished);
+
+ r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+index 5f05d15d..360ec43 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+@@ -140,7 +140,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ /* ring tests don't use a job */
+ if (job) {
+ vm = job->vm;
+- fence_ctx = job->fence_ctx;
++ fence_ctx = job->base.s_fence->scheduled.context;
+ } else {
+ vm = NULL;
+ fence_ctx = 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+index dfef4db..bf45b1a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+@@ -132,7 +132,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
+ return r;
+
+ job->owner = owner;
+- job->fence_ctx = entity->fence_context;
+ *f = dma_fence_get(&job->base.s_fence->finished);
+ amdgpu_job_free_resources(job);
+ amdgpu_ring_priority_get(job->ring, job->base.s_priority);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+index 35bb932..3151692 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+@@ -45,7 +45,6 @@ struct amdgpu_job {
+ uint32_t preamble_status;
+ uint32_t num_ibs;
+ void *owner;
+- uint64_t fence_ctx; /* the fence_context this job uses */
+ bool vm_needs_flush;
+ uint64_t vm_pd_addr;
+ unsigned vmid;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4964-drm-amdgpu-remove-ring-parameter-from-amdgpu_job_sub.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4964-drm-amdgpu-remove-ring-parameter-from-amdgpu_job_sub.patch
new file mode 100644
index 00000000..e2cab3ee
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4964-drm-amdgpu-remove-ring-parameter-from-amdgpu_job_sub.patch
@@ -0,0 +1,206 @@
+From b77ae8d1f37e165da8f7ff71b8877aaff4b6ff9e Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 13 Jul 2018 13:54:56 +0200
+Subject: [PATCH 4964/5725] drm/amdgpu: remove ring parameter from
+ amdgpu_job_submit
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+We know the ring through the entity anyway.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Acked-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 9 ++++-----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_job.h | 5 ++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 2 ++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 6 +++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 11 +++++------
+ drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 2 +-
+ 9 files changed, 20 insertions(+), 21 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+index bf45b1a..4dd5aac 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+@@ -117,21 +117,20 @@ void amdgpu_job_free(struct amdgpu_job *job)
+ kfree(job);
+ }
+
+-int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
+- struct drm_sched_entity *entity, void *owner,
+- struct dma_fence **f)
++int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
++ void *owner, struct dma_fence **f)
+ {
+ int r;
+- job->ring = ring;
+
+ if (!f)
+ return -EINVAL;
+
+- r = drm_sched_job_init(&job->base, &ring->sched, entity, owner);
++ r = drm_sched_job_init(&job->base, entity->sched, entity, owner);
+ if (r)
+ return r;
+
+ job->owner = owner;
++ job->ring = to_amdgpu_ring(entity->sched);
+ *f = dma_fence_get(&job->base.s_fence->finished);
+ amdgpu_job_free_resources(job);
+ amdgpu_ring_priority_get(job->ring, job->base.s_priority);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+index 3151692..39f4230 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+@@ -67,7 +67,6 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
+
+ void amdgpu_job_free_resources(struct amdgpu_job *job);
+ void amdgpu_job_free(struct amdgpu_job *job);
+-int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
+- struct drm_sched_entity *entity, void *owner,
+- struct dma_fence **f);
++int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
++ void *owner, struct dma_fence **f);
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+index 1c7ffc1..380eb2c 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+@@ -43,6 +43,8 @@
+ #define AMDGPU_FENCE_FLAG_INT (1 << 1)
+ #define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
+
++#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
++
+ enum amdgpu_ring_type {
+ AMDGPU_RING_TYPE_GFX,
+ AMDGPU_RING_TYPE_COMPUTE,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index cf7f380..8bf244e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -2204,7 +2204,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
+ if (r)
+ goto error_free;
+
+- r = amdgpu_job_submit(job, ring, &adev->mman.entity,
++ r = amdgpu_job_submit(job, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
+ if (r)
+ goto error_free;
+@@ -2281,7 +2281,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
+ DRM_ERROR("Error scheduling IBs (%d)\n", r);
+ amdgpu_job_free(job);
+ } else {
+- r = amdgpu_job_submit(job, ring, &adev->mman.entity,
++ r = amdgpu_job_submit(job, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+ if (r)
+ goto error_free;
+@@ -2373,7 +2373,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
+
+ amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+ WARN_ON(job->ibs[0].length_dw > num_dw);
+- r = amdgpu_job_submit(job, ring, &adev->mman.entity,
++ r = amdgpu_job_submit(job, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+ if (r)
+ goto error_free;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index 1db0845..5ee3151 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -1075,7 +1075,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
+ if (r)
+ goto err_free;
+
+- r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity,
++ r = amdgpu_job_submit(job, &adev->uvd.inst[ring->me].entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+ if (r)
+ goto err_free;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+index b2a4cdb..27853b2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+@@ -541,7 +541,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+
+ amdgpu_job_free(job);
+ } else {
+- r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
++ r = amdgpu_job_submit(job, &ring->adev->vce.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+ if (r)
+ goto err;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 8b5dada..b8e9262 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -412,8 +412,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
+ if (r)
+ goto error_free;
+
+- r = amdgpu_job_submit(job, ring, &vm->entity,
+- AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
++ r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
++ &fence);
+ if (r)
+ goto error_free;
+
+@@ -1108,8 +1108,8 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
+ amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
+ AMDGPU_FENCE_OWNER_VM, false);
+ WARN_ON(params.ib->length_dw > ndw);
+- r = amdgpu_job_submit(job, ring, &vm->entity,
+- AMDGPU_FENCE_OWNER_VM, &fence);
++ r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
++ &fence);
+ if (r)
+ goto error;
+
+@@ -1473,8 +1473,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
+
+ amdgpu_ring_pad_ib(ring, params.ib);
+ WARN_ON(params.ib->length_dw > ndw);
+- r = amdgpu_job_submit(job, ring, &vm->entity,
+- AMDGPU_FENCE_OWNER_VM, &f);
++ r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f);
+ if (r)
+ goto error_free;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+index bca9c63..068fe13 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+@@ -320,7 +320,7 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
+
+ amdgpu_job_free(job);
+ } else {
+- r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
++ r = amdgpu_job_submit(job, &ring->adev->vce.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+ if (r)
+ goto err;
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+index 07f5ccb..c6365b0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+@@ -321,7 +321,7 @@ int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+
+ amdgpu_job_free(job);
+ } else {
+- r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
++ r = amdgpu_job_submit(job, &ring->adev->vce.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+ if (r)
+ goto err;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4965-drm-amdgpu-remove-job-ring.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4965-drm-amdgpu-remove-job-ring.patch
new file mode 100644
index 00000000..f18cfe95
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4965-drm-amdgpu-remove-job-ring.patch
@@ -0,0 +1,278 @@
+From f91c9ea9bbc5f9d97125812d12f24aeaf499fc4c Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 13 Jul 2018 15:08:44 +0200
+Subject: [PATCH 4965/5725] drm/amdgpu: remove job->ring
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+We can easily get that from the scheduler.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Acked-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 18 +++++++++---------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 23 ++++++++++++-----------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_job.h | 1 -
+ drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 8 ++++----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 2 +-
+ 7 files changed, 29 insertions(+), 28 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index d3a2e16..d83a206 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1047,6 +1047,7 @@ struct amdgpu_cs_parser {
+
+ /* scheduler job object */
+ struct amdgpu_job *job;
++ struct amdgpu_ring *ring;
+
+ /* buffer objects */
+ struct ww_acquire_ctx ticket;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 4385446..0e31215 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -914,11 +914,11 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
+ {
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
+- struct amdgpu_ring *ring = p->job->ring;
++ struct amdgpu_ring *ring = p->ring;
+ int r;
+
+ /* Only for UVD/VCE VM emulation */
+- if (p->job->ring->funcs->parse_cs) {
++ if (p->ring->funcs->parse_cs) {
+ unsigned i, j;
+
+ for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
+@@ -1032,10 +1032,10 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
+ }
+ }
+
+- if (parser->job->ring && parser->job->ring != ring)
++ if (parser->ring && parser->ring != ring)
+ return -EINVAL;
+
+- parser->job->ring = ring;
++ parser->ring = ring;
+
+ r = amdgpu_ib_get(adev, vm,
+ ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0,
+@@ -1054,11 +1054,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
+
+ /* UVD & VCE fw doesn't support user fences */
+ if (parser->job->uf_addr && (
+- parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
+- parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
++ parser->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
++ parser->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
+ return -EINVAL;
+
+- return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->job->ring->idx);
++ return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->ring->idx);
+ }
+
+ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
+@@ -1209,7 +1209,7 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
+ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
+ union drm_amdgpu_cs *cs)
+ {
+- struct amdgpu_ring *ring = p->job->ring;
++ struct amdgpu_ring *ring = p->ring;
+ struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
+ struct amdgpu_job *job;
+ unsigned i;
+@@ -1258,7 +1258,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
+ job->uf_sequence = seq;
+
+ amdgpu_job_free_resources(job);
+- amdgpu_ring_priority_get(job->ring, job->base.s_priority);
++ amdgpu_ring_priority_get(p->ring, job->base.s_priority);
+
+ trace_amdgpu_cs_ioctl(job);
+ drm_sched_entity_push_job(&job->base, entity);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index ebf370c..ea19758 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3316,7 +3316,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+
+ kthread_park(ring->sched.thread);
+
+- if (job && job->ring->idx != i)
++ if (job && job->base.sched == &ring->sched)
+ continue;
+
+ drm_sched_hw_job_reset(&ring->sched, job ? &job->base : NULL);
+@@ -3340,7 +3340,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ * or all rings (in the case @job is NULL)
+ * after above amdgpu_reset accomplished
+ */
+- if ((!job || job->ring->idx == i) && !r)
++ if ((!job || job->base.sched == &ring->sched) && !r)
+ drm_sched_job_recovery(&ring->sched);
+
+ kthread_unpark(adev->rings[i]->sched.thread);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+index 4dd5aac..27263f2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+@@ -30,12 +30,12 @@
+
+ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
+ {
+- struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
++ struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
++ struct amdgpu_job *job = to_amdgpu_job(s_job);
+
+ DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
+- job->base.sched->name,
+- atomic_read(&job->ring->fence_drv.last_seq),
+- job->ring->fence_drv.sync_seq);
++ job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
++ ring->fence_drv.sync_seq);
+
+ amdgpu_device_gpu_recover(job->adev, job, false);
+ }
+@@ -98,9 +98,10 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
+
+ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
+ {
+- struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
++ struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
++ struct amdgpu_job *job = to_amdgpu_job(s_job);
+
+- amdgpu_ring_priority_put(job->ring, s_job->s_priority);
++ amdgpu_ring_priority_put(ring, s_job->s_priority);
+ dma_fence_put(job->fence);
+ amdgpu_sync_free(&job->sync);
+ amdgpu_sync_free(&job->sched_sync);
+@@ -120,6 +121,7 @@ void amdgpu_job_free(struct amdgpu_job *job)
+ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
+ void *owner, struct dma_fence **f)
+ {
++ struct amdgpu_ring *ring = to_amdgpu_ring(entity->sched);
+ int r;
+
+ if (!f)
+@@ -130,10 +132,9 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
+ return r;
+
+ job->owner = owner;
+- job->ring = to_amdgpu_ring(entity->sched);
+ *f = dma_fence_get(&job->base.s_fence->finished);
+ amdgpu_job_free_resources(job);
+- amdgpu_ring_priority_get(job->ring, job->base.s_priority);
++ amdgpu_ring_priority_get(ring, job->base.s_priority);
+ drm_sched_entity_push_job(&job->base, entity);
+
+ return 0;
+@@ -142,6 +143,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
+ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *s_entity)
+ {
++ struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->sched);
+ struct amdgpu_job *job = to_amdgpu_job(sched_job);
+ struct amdgpu_vm *vm = job->vm;
+ bool explicit = false;
+@@ -157,8 +159,6 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
+ }
+
+ while (fence == NULL && vm && !job->vmid) {
+- struct amdgpu_ring *ring = job->ring;
+-
+ r = amdgpu_vmid_grab(vm, ring, &job->sync,
+ &job->base.s_fence->finished,
+ job);
+@@ -173,6 +173,7 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
+
+ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
+ {
++ struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
+ struct dma_fence *fence = NULL, *finished;
+ struct amdgpu_device *adev;
+ struct amdgpu_job *job;
+@@ -196,7 +197,7 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
+ if (finished->error < 0) {
+ DRM_INFO("Skip scheduling IBs!\n");
+ } else {
+- r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
++ r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
+ &fence);
+ if (r)
+ DRM_ERROR("Error scheduling IBs (%d)\n", r);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+index 39f4230..c663c19 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+@@ -37,7 +37,6 @@ struct amdgpu_job {
+ struct drm_sched_job base;
+ struct amdgpu_device *adev;
+ struct amdgpu_vm *vm;
+- struct amdgpu_ring *ring;
+ struct amdgpu_sync sync;
+ struct amdgpu_sync sched_sync;
+ struct amdgpu_ib *ibs;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+index e96e26d..7692003 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+@@ -150,10 +150,10 @@ TRACE_EVENT(amdgpu_cs,
+
+ TP_fast_assign(
+ __entry->bo_list = p->bo_list;
+- __entry->ring = p->job->ring->idx;
++ __entry->ring = p->ring->idx;
+ __entry->dw = p->job->ibs[i].length_dw;
+ __entry->fences = amdgpu_fence_count_emitted(
+- p->job->ring);
++ p->ring);
+ ),
+ TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
+ __entry->bo_list, __entry->ring, __entry->dw,
+@@ -178,7 +178,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
+ __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
+ __entry->context = job->base.s_fence->finished.context;
+ __entry->seqno = job->base.s_fence->finished.seqno;
+- __entry->ring_name = job->ring->name;
++ __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
+ __entry->num_ibs = job->num_ibs;
+ ),
+ TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
+@@ -203,7 +203,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
+ __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
+ __entry->context = job->base.s_fence->finished.context;
+ __entry->seqno = job->base.s_fence->finished.seqno;
+- __entry->ring_name = job->ring->name;
++ __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
+ __entry->num_ibs = job->num_ibs;
+ ),
+ TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index 5ee3151..7c3b3ac 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -693,11 +693,11 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
+ struct amdgpu_bo *bo, unsigned offset)
+ {
+ struct amdgpu_device *adev = ctx->parser->adev;
++ uint32_t ip_instance = ctx->parser->ring->me;
+ int32_t *msg, msg_type, handle;
+ void *ptr;
+ long r;
+ int i;
+- uint32_t ip_instance = ctx->parser->job->ring->me;
+
+ if (offset & 0x3F) {
+ DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4966-drm-amdgpu-add-amdgpu_job_submit_direct-helper.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4966-drm-amdgpu-add-amdgpu_job_submit_direct-helper.patch
new file mode 100644
index 00000000..b2f9ff5d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4966-drm-amdgpu-add-amdgpu_job_submit_direct-helper.patch
@@ -0,0 +1,318 @@
+From 425e2affec4b98d562de8d362016f322c1213394 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 13 Jul 2018 16:29:10 +0200
+Subject: [PATCH 4966/5725] drm/amdgpu: add amdgpu_job_submit_direct helper
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Make sure that we properly initialize at least the sched member.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Acked-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 15 +++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_job.h | 4 ++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 17 ++++++-----------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 4 +---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 20 ++++++--------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 17 ++++-------------
+ drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 20 ++++++--------------
+ drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 20 ++++++--------------
+ 8 files changed, 48 insertions(+), 69 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+index 27263f2..1be5575 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+@@ -140,6 +140,21 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
+ return 0;
+ }
+
++int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
++ struct dma_fence **fence)
++{
++ int r;
++
++ job->base.sched = &ring->sched;
++ r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
++ job->fence = dma_fence_get(*fence);
++ if (r)
++ return r;
++
++ amdgpu_job_free(job);
++ return 0;
++}
++
+ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *s_entity)
+ {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+index c663c19..d77fd23 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+@@ -33,6 +33,8 @@
+ #define to_amdgpu_job(sched_job) \
+ container_of((sched_job), struct amdgpu_job, base)
+
++struct amdgpu_fence;
++
+ struct amdgpu_job {
+ struct drm_sched_job base;
+ struct amdgpu_device *adev;
+@@ -68,4 +70,6 @@ void amdgpu_job_free_resources(struct amdgpu_job *job);
+ void amdgpu_job_free(struct amdgpu_job *job);
+ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
+ void *owner, struct dma_fence **f);
++int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
++ struct dma_fence **fence);
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 8bf244e..addc397 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -2273,24 +2273,19 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
+
+ amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+ WARN_ON(job->ibs[0].length_dw > num_dw);
+- if (direct_submit) {
+- r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
+- NULL, fence);
+- job->fence = dma_fence_get(*fence);
+- if (r)
+- DRM_ERROR("Error scheduling IBs (%d)\n", r);
+- amdgpu_job_free(job);
+- } else {
++ if (direct_submit)
++ r = amdgpu_job_submit_direct(job, ring, fence);
++ else
+ r = amdgpu_job_submit(job, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+- if (r)
+- goto error_free;
+- }
++ if (r)
++ goto error_free;
+
+ return r;
+
+ error_free:
+ amdgpu_job_free(job);
++ DRM_ERROR("Error scheduling IBs (%d)\n", r);
+ return r;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index 7c3b3ac..2298571 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -1063,12 +1063,10 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
+ if (r < 0)
+ goto err_free;
+
+- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+- job->fence = dma_fence_get(f);
++ r = amdgpu_job_submit_direct(job, ring, &f);
+ if (r)
+ goto err_free;
+
+- amdgpu_job_free(job);
+ } else {
+ r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
+ AMDGPU_FENCE_OWNER_UNDEFINED, false);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+index 27853b2..7c23719 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+@@ -471,12 +471,10 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+
+- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+- job->fence = dma_fence_get(f);
++ r = amdgpu_job_submit_direct(job, ring, &f);
+ if (r)
+ goto err;
+
+- amdgpu_job_free(job);
+ if (fence)
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
+@@ -533,19 +531,13 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+
+- if (direct) {
+- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+- job->fence = dma_fence_get(f);
+- if (r)
+- goto err;
+-
+- amdgpu_job_free(job);
+- } else {
++ if (direct)
++ r = amdgpu_job_submit_direct(job, ring, &f);
++ else
+ r = amdgpu_job_submit(job, &ring->adev->vce.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+- if (r)
+- goto err;
+- }
++ if (r)
++ goto err;
+
+ if (fence)
+ *fence = dma_fence_get(f);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index e5bcdbb..414a67e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -311,13 +311,10 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
+ }
+ ib->length_dw = 16;
+
+- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+- job->fence = dma_fence_get(f);
++ r = amdgpu_job_submit_direct(job, ring, &f);
+ if (r)
+ goto err_free;
+
+- amdgpu_job_free(job);
+-
+ amdgpu_bo_fence(bo, f, false);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+@@ -504,12 +501,10 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+
+- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+- job->fence = dma_fence_get(f);
++ r = amdgpu_job_submit_direct(job, ring, &f);
+ if (r)
+ goto err;
+
+- amdgpu_job_free(job);
+ if (fence)
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
+@@ -558,12 +553,10 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+
+- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+- job->fence = dma_fence_get(f);
++ r = amdgpu_job_submit_direct(job, ring, &f);
+ if (r)
+ goto err;
+
+- amdgpu_job_free(job);
+ if (fence)
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
+@@ -671,12 +664,10 @@ static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
+ }
+ ib->length_dw = 16;
+
+- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+- job->fence = dma_fence_get(f);
++ r = amdgpu_job_submit_direct(job, ring, &f);
+ if (r)
+ goto err;
+
+- amdgpu_job_free(job);
+ if (fence)
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+index 068fe13..f832c5e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+@@ -248,12 +248,10 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+
+- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+- job->fence = dma_fence_get(f);
++ r = amdgpu_job_submit_direct(job, ring, &f);
+ if (r)
+ goto err;
+
+- amdgpu_job_free(job);
+ if (fence)
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
+@@ -312,19 +310,13 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+
+- if (direct) {
+- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+- job->fence = dma_fence_get(f);
+- if (r)
+- goto err;
+-
+- amdgpu_job_free(job);
+- } else {
++ if (direct)
++ r = amdgpu_job_submit_direct(job, ring, &f);
++ else
+ r = amdgpu_job_submit(job, &ring->adev->vce.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+- if (r)
+- goto err;
+- }
++ if (r)
++ goto err;
+
+ if (fence)
+ *fence = dma_fence_get(f);
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+index c6365b0..423b74e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+@@ -250,12 +250,10 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+
+- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+- job->fence = dma_fence_get(f);
++ r = amdgpu_job_submit_direct(job, ring, &f);
+ if (r)
+ goto err;
+
+- amdgpu_job_free(job);
+ if (fence)
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
+@@ -313,19 +311,13 @@ int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+
+- if (direct) {
+- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+- job->fence = dma_fence_get(f);
+- if (r)
+- goto err;
+-
+- amdgpu_job_free(job);
+- } else {
++ if (direct)
++ r = amdgpu_job_submit_direct(job, ring, &f);
++ else
+ r = amdgpu_job_submit(job, &ring->adev->vce.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+- if (r)
+- goto err;
+- }
++ if (r)
++ goto err;
+
+ if (fence)
+ *fence = dma_fence_get(f);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4967-drm-amdgpu-remove-job-adev-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4967-drm-amdgpu-remove-job-adev-v2.patch
new file mode 100644
index 00000000..a248bd0c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4967-drm-amdgpu-remove-job-adev-v2.patch
@@ -0,0 +1,113 @@
+From 27d830371766ff8f5d44673c57de5c91a9ea22cc Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 13 Jul 2018 17:15:54 +0200
+Subject: [PATCH 4967/5725] drm/amdgpu: remove job->adev (v2)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+We can get that from the ring.
+
+v2: squash in "drm/amdgpu: always initialize job->base.sched" (Alex)
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Acked-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 18 +++++++++++-------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_job.h | 1 -
+ 2 files changed, 11 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+index 1be5575..09efdd1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+@@ -37,7 +37,7 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
+ job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
+ ring->fence_drv.sync_seq);
+
+- amdgpu_device_gpu_recover(job->adev, job, false);
++ amdgpu_device_gpu_recover(ring->adev, job, false);
+ }
+
+ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
+@@ -54,7 +54,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
+ if (!*job)
+ return -ENOMEM;
+
+- (*job)->adev = adev;
++ /*
++ * Initialize the scheduler to at least some ring so that we always
++ * have a pointer to adev.
++ */
++ (*job)->base.sched = &adev->rings[0]->sched;
+ (*job)->vm = vm;
+ (*job)->ibs = (void *)&(*job)[1];
+ (*job)->num_ibs = num_ibs;
+@@ -86,6 +90,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
+
+ void amdgpu_job_free_resources(struct amdgpu_job *job)
+ {
++ struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
+ struct dma_fence *f;
+ unsigned i;
+
+@@ -93,7 +98,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
+ f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
+
+ for (i = 0; i < job->num_ibs; ++i)
+- amdgpu_ib_free(job->adev, &job->ibs[i], f);
++ amdgpu_ib_free(ring->adev, &job->ibs[i], f);
+ }
+
+ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
+@@ -167,7 +172,8 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
+
+ if (fence && explicit) {
+ if (drm_sched_dependency_optimized(fence, s_entity)) {
+- r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false);
++ r = amdgpu_sync_fence(ring->adev, &job->sched_sync,
++ fence, false);
+ if (r)
+ DRM_ERROR("Error adding fence to sync (%d)\n", r);
+ }
+@@ -190,7 +196,6 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
+ {
+ struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
+ struct dma_fence *fence = NULL, *finished;
+- struct amdgpu_device *adev;
+ struct amdgpu_job *job;
+ int r;
+
+@@ -200,13 +205,12 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
+ }
+ job = to_amdgpu_job(sched_job);
+ finished = &job->base.s_fence->finished;
+- adev = job->adev;
+
+ BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
+
+ trace_amdgpu_sched_run_job(job);
+ /* skip ib schedule when vram is lost */
+- if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
++ if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
+ dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
+
+ if (finished->error < 0) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+index d77fd23..57cfe78 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+@@ -37,7 +37,6 @@ struct amdgpu_fence;
+
+ struct amdgpu_job {
+ struct drm_sched_job base;
+- struct amdgpu_device *adev;
+ struct amdgpu_vm *vm;
+ struct amdgpu_sync sync;
+ struct amdgpu_sync sched_sync;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4968-drm-amdgpu-minor-cleanup-in-amdgpu_job.c.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4968-drm-amdgpu-minor-cleanup-in-amdgpu_job.c.patch
new file mode 100644
index 00000000..b5695181
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4968-drm-amdgpu-minor-cleanup-in-amdgpu_job.c.patch
@@ -0,0 +1,66 @@
+From a628d595dd0642548a3fc785ea217a0c3cadc547 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 13 Jul 2018 14:01:08 +0200
+Subject: [PATCH 4968/5725] drm/amdgpu: minor cleanup in amdgpu_job.c
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Remove superflous NULL check, fix coding style a bit, shorten error
+messages.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Acked-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 11 ++++-------
+ 1 file changed, 4 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+index 09efdd1..f92437f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+@@ -33,7 +33,7 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
+ struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
+ struct amdgpu_job *job = to_amdgpu_job(s_job);
+
+- DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
++ DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
+ job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
+ ring->fence_drv.sync_seq);
+
+@@ -166,16 +166,17 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
+ struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->sched);
+ struct amdgpu_job *job = to_amdgpu_job(sched_job);
+ struct amdgpu_vm *vm = job->vm;
++ struct dma_fence *fence;
+ bool explicit = false;
+ int r;
+- struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync, &explicit);
+
++ fence = amdgpu_sync_get_fence(&job->sync, &explicit);
+ if (fence && explicit) {
+ if (drm_sched_dependency_optimized(fence, s_entity)) {
+ r = amdgpu_sync_fence(ring->adev, &job->sched_sync,
+ fence, false);
+ if (r)
+- DRM_ERROR("Error adding fence to sync (%d)\n", r);
++ DRM_ERROR("Error adding fence (%d)\n", r);
+ }
+ }
+
+@@ -199,10 +200,6 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
+ struct amdgpu_job *job;
+ int r;
+
+- if (!sched_job) {
+- DRM_ERROR("job is null\n");
+- return NULL;
+- }
+ job = to_amdgpu_job(sched_job);
+ finished = &job->base.s_fence->finished;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4969-drm-amdgpu-allow-for-more-flexible-priority-handling.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4969-drm-amdgpu-allow-for-more-flexible-priority-handling.patch
new file mode 100644
index 00000000..2abb812d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4969-drm-amdgpu-allow-for-more-flexible-priority-handling.patch
@@ -0,0 +1,36 @@
+From e058320957ab91b7f4fcf566951abad3aa549ee9 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 16 Jul 2018 13:47:34 +0200
+Subject: [PATCH 4969/5725] drm/amdgpu: allow for more flexible priority
+ handling
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Allow to call amdgpu_ring_priority_get() after pushing the ring to the
+scheduler.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+index 19e45a3..93794a8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+@@ -211,7 +211,8 @@ void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
+ if (!ring->funcs->set_priority)
+ return;
+
+- atomic_inc(&ring->num_jobs[priority]);
++ if (atomic_inc_return(&ring->num_jobs[priority]) <= 0)
++ return;
+
+ mutex_lock(&ring->priority_mutex);
+ if (priority <= ring->priority)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4970-drm-amdgpu-change-ring-priority-after-pushing-the-jo.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4970-drm-amdgpu-change-ring-priority-after-pushing-the-jo.patch
new file mode 100644
index 00000000..6e5d9eb1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4970-drm-amdgpu-change-ring-priority-after-pushing-the-jo.patch
@@ -0,0 +1,81 @@
+From cb06803a0142639a485a174eb881ab80713e7872 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 16 Jul 2018 14:58:48 +0200
+Subject: [PATCH 4970/5725] drm/amdgpu: change ring priority after pushing the
+ job (v2)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Pushing a job can change the ring assignment of an entity.
+
+v2: squash in:
+"drm/amdgpu: fix job priority handling" (Christian)
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 6 +++++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 8 ++++++--
+ 2 files changed, 11 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 0e31215..8f2991b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -1211,6 +1211,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
+ {
+ struct amdgpu_ring *ring = p->ring;
+ struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
++ enum drm_sched_priority priority;
+ struct amdgpu_job *job;
+ unsigned i;
+ uint64_t seq;
+@@ -1258,11 +1259,14 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
+ job->uf_sequence = seq;
+
+ amdgpu_job_free_resources(job);
+- amdgpu_ring_priority_get(p->ring, job->base.s_priority);
+
+ trace_amdgpu_cs_ioctl(job);
++ priority = job->base.s_priority;
+ drm_sched_entity_push_job(&job->base, entity);
+
++ ring = to_amdgpu_ring(entity->sched);
++ amdgpu_ring_priority_get(ring, priority);
++
+ ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
+ amdgpu_mn_unlock(p->mn);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+index f92437f..1250aae 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+@@ -126,7 +126,8 @@ void amdgpu_job_free(struct amdgpu_job *job)
+ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
+ void *owner, struct dma_fence **f)
+ {
+- struct amdgpu_ring *ring = to_amdgpu_ring(entity->sched);
++ enum drm_sched_priority priority;
++ struct amdgpu_ring *ring;
+ int r;
+
+ if (!f)
+@@ -139,9 +140,12 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
+ job->owner = owner;
+ *f = dma_fence_get(&job->base.s_fence->finished);
+ amdgpu_job_free_resources(job);
+- amdgpu_ring_priority_get(ring, job->base.s_priority);
++ priority = job->base.s_priority;
+ drm_sched_entity_push_job(&job->base, entity);
+
++ ring = to_amdgpu_ring(entity->sched);
++ amdgpu_ring_priority_get(ring, priority);
++
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4971-drm-amdgpu-simplify-the-bo-reference-on-amdgpu_bo_up.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4971-drm-amdgpu-simplify-the-bo-reference-on-amdgpu_bo_up.patch
new file mode 100644
index 00000000..10d46192
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4971-drm-amdgpu-simplify-the-bo-reference-on-amdgpu_bo_up.patch
@@ -0,0 +1,51 @@
+From 674fede53e8ed89c8718435c25127a005c2e07ba Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Wed, 4 Jul 2018 18:08:54 +0800
+Subject: [PATCH 4971/5725] drm/amdgpu: simplify the bo reference on
+ amdgpu_bo_update
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+BO ptr already be initialized at definition, we needn't use the complicated
+reference.
+
+v2: fix typo at subject line
+
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index b8e9262..852956c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1655,18 +1655,17 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+ struct amdgpu_device *bo_adev;
+ int r;
+
+- if (clear || !bo_va->base.bo) {
++ if (clear || !bo) {
+ mem = NULL;
+ nodes = NULL;
+ exclusive = NULL;
+ } else {
+ struct ttm_dma_tt *ttm;
+
+- mem = &bo_va->base.bo->tbo.mem;
++ mem = &bo->tbo.mem;
+ nodes = mem->mm_node;
+ if (mem->mem_type == TTM_PL_TT) {
+- ttm = container_of(bo_va->base.bo->tbo.ttm,
+- struct ttm_dma_tt, ttm);
++ ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
+ pages_addr = ttm->dma_address;
+ } else if (mem->mem_type == AMDGPU_PL_DGMA_IMPORT) {
+ pages_addr = (dma_addr_t *)bo_va->base.bo->tbo.mem.bus.addr;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4972-drm-amdgpu-pm-Remove-VLA-usage.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4972-drm-amdgpu-pm-Remove-VLA-usage.patch
new file mode 100644
index 00000000..c74ec7c3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4972-drm-amdgpu-pm-Remove-VLA-usage.patch
@@ -0,0 +1,174 @@
+From 394cda92a7180bcee0d9ac775d14b653b0e2d01d Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Wed, 20 Jun 2018 11:26:47 -0700
+Subject: [PATCH 4972/5725] drm/amdgpu/pm: Remove VLA usage
+
+In the quest to remove all stack VLA usage from the kernel[1], this
+uses the maximum sane buffer size and removes copy/paste code.
+
+[1] https://lkml.kernel.org/r/CA+55aFzCG-zNmZwX4A2FQpadafLfEzK6CC=qPXydAacU1RqZWA@mail.gmail.com
+
+Reviewed-by: Rex Zhu <rezhu@amd.com>
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 100 ++++++++++++++-------------------
+ 1 file changed, 42 insertions(+), 58 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index f30e03f..a3ab2a2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -606,40 +606,59 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
+ return snprintf(buf, PAGE_SIZE, "\n");
+ }
+
+-static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
+- struct device_attribute *attr,
+- const char *buf,
+- size_t count)
++/*
++ * Worst case: 32 bits individually specified, in octal at 12 characters
++ * per line (+1 for \n).
++ */
++#define AMDGPU_MASK_BUF_MAX (32 * 13)
++
++static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
+ {
+- struct drm_device *ddev = dev_get_drvdata(dev);
+- struct amdgpu_device *adev = ddev->dev_private;
+ int ret;
+ long level;
+- uint32_t mask = 0;
+ char *sub_str = NULL;
+ char *tmp;
+- char buf_cpy[count];
++ char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
+ const char delimiter[3] = {' ', '\n', '\0'};
++ size_t bytes;
+
+- memcpy(buf_cpy, buf, count+1);
++ *mask = 0;
++
++ bytes = min(count, sizeof(buf_cpy) - 1);
++ memcpy(buf_cpy, buf, bytes);
++ buf_cpy[bytes] = '\0';
+ tmp = buf_cpy;
+ while (tmp[0]) {
+- sub_str = strsep(&tmp, delimiter);
++ sub_str = strsep(&tmp, delimiter);
+ if (strlen(sub_str)) {
+ ret = kstrtol(sub_str, 0, &level);
+-
+- if (ret) {
+- count = -EINVAL;
+- goto fail;
+- }
+- mask |= 1 << level;
++ if (ret)
++ return -EINVAL;
++ *mask |= 1 << level;
+ } else
+ break;
+ }
++
++ return 0;
++}
++
++static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf,
++ size_t count)
++{
++ struct drm_device *ddev = dev_get_drvdata(dev);
++ struct amdgpu_device *adev = ddev->dev_private;
++ int ret;
++ uint32_t mask = 0;
++
++ ret = amdgpu_read_mask(buf, count, &mask);
++ if (ret)
++ return ret;
++
+ if (adev->powerplay.pp_funcs->force_clock_level)
+ amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
+
+-fail:
+ return count;
+ }
+
+@@ -664,32 +683,15 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ int ret;
+- long level;
+ uint32_t mask = 0;
+- char *sub_str = NULL;
+- char *tmp;
+- char buf_cpy[count];
+- const char delimiter[3] = {' ', '\n', '\0'};
+
+- memcpy(buf_cpy, buf, count+1);
+- tmp = buf_cpy;
+- while (tmp[0]) {
+- sub_str = strsep(&tmp, delimiter);
+- if (strlen(sub_str)) {
+- ret = kstrtol(sub_str, 0, &level);
++ ret = amdgpu_read_mask(buf, count, &mask);
++ if (ret)
++ return ret;
+
+- if (ret) {
+- count = -EINVAL;
+- goto fail;
+- }
+- mask |= 1 << level;
+- } else
+- break;
+- }
+ if (adev->powerplay.pp_funcs->force_clock_level)
+ amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
+
+-fail:
+ return count;
+ }
+
+@@ -714,33 +716,15 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ int ret;
+- long level;
+ uint32_t mask = 0;
+- char *sub_str = NULL;
+- char *tmp;
+- char buf_cpy[count];
+- const char delimiter[3] = {' ', '\n', '\0'};
+-
+- memcpy(buf_cpy, buf, count+1);
+- tmp = buf_cpy;
+
+- while (tmp[0]) {
+- sub_str = strsep(&tmp, delimiter);
+- if (strlen(sub_str)) {
+- ret = kstrtol(sub_str, 0, &level);
++ ret = amdgpu_read_mask(buf, count, &mask);
++ if (ret)
++ return ret;
+
+- if (ret) {
+- count = -EINVAL;
+- goto fail;
+- }
+- mask |= 1 << level;
+- } else
+- break;
+- }
+ if (adev->powerplay.pp_funcs->force_clock_level)
+ amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
+
+-fail:
+ return count;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4973-drm-amdgpu-powerplay-use-irq-source-defines-for-smu7.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4973-drm-amdgpu-powerplay-use-irq-source-defines-for-smu7.patch
new file mode 100644
index 00000000..9024ef9b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4973-drm-amdgpu-powerplay-use-irq-source-defines-for-smu7.patch
@@ -0,0 +1,52 @@
+From 7817b94555f77af03a903490c5e75f53f8e8e880 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed, 18 Jul 2018 16:07:11 -0500
+Subject: [PATCH 4973/5725] drm/amdgpu/powerplay: use irq source defines for
+ smu7 sources
+
+Use the newly added irq source defines rather than magic numbers
+for smu7 thermal interrupts.
+
+Rewiewed-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Rex Zhu <rezhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+index 8eea49e..2aab1b4 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+@@ -27,6 +27,7 @@
+ #include "atom.h"
+ #include "ivsrcid/thm/irqsrcs_thm_9_0.h"
+ #include "ivsrcid/smuio/irqsrcs_smuio_9_0.h"
++#include "ivsrcid/ivsrcid_vislands30.h"
+
+ uint8_t convert_to_vid(uint16_t vddc)
+ {
+@@ -545,17 +546,17 @@ int phm_irq_process(struct amdgpu_device *adev,
+ uint32_t src_id = entry->src_id;
+
+ if (client_id == AMDGPU_IH_CLIENTID_LEGACY) {
+- if (src_id == 230)
++ if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH)
+ pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
+ PCI_BUS_NUM(adev->pdev->devfn),
+ PCI_SLOT(adev->pdev->devfn),
+ PCI_FUNC(adev->pdev->devfn));
+- else if (src_id == 231)
++ else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
+ pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
+ PCI_BUS_NUM(adev->pdev->devfn),
+ PCI_SLOT(adev->pdev->devfn),
+ PCI_FUNC(adev->pdev->devfn));
+- else if (src_id == 83)
++ else if (src_id == VISLANDS30_IV_SRCID_GPIO_19)
+ pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
+ PCI_BUS_NUM(adev->pdev->devfn),
+ PCI_SLOT(adev->pdev->devfn),
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4974-drm-amd-powerplay-fixed-uninitialized-value.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4974-drm-amd-powerplay-fixed-uninitialized-value.patch
new file mode 100644
index 00000000..305dd90c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4974-drm-amd-powerplay-fixed-uninitialized-value.patch
@@ -0,0 +1,32 @@
+From 728782455a0f16da16dd99474f7b3a6999e74d73 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Thu, 19 Jul 2018 13:21:43 +0800
+Subject: [PATCH 4974/5725] drm/amd/powerplay: fixed uninitialized value
+
+The 'result' is not initialized correctly. It causes the API
+return an error code even on success.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index 912d0d6..4ed218d 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -488,7 +488,7 @@ static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
+ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
+ PPCLK_e clkID, uint32_t index, uint32_t *clock)
+ {
+- int result;
++ int result = 0;
+
+ /*
+ *SMU expects the Clock ID to be in the top 16 bits.
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4975-drm-amdgpu-display-Replace-CONFIG_DRM_AMD_DC_DCN1_0-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4975-drm-amdgpu-display-Replace-CONFIG_DRM_AMD_DC_DCN1_0-.patch
new file mode 100644
index 00000000..28598212
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4975-drm-amdgpu-display-Replace-CONFIG_DRM_AMD_DC_DCN1_0-.patch
@@ -0,0 +1,673 @@
+From d83a05473032b27a1e8613a1abed3a65120d43f6 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
+Date: Tue, 17 Jul 2018 12:37:45 +0200
+Subject: [PATCH 4975/5725] drm/amdgpu/display: Replace
+ CONFIG_DRM_AMD_DC_DCN1_0 with CONFIG_X86
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Allowing CONFIG_DRM_AMD_DC_DCN1_0 to be disabled on X86 was an
+opportunity for display with Raven Ridge accidentally not working.
+
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 ++--
+ drivers/gpu/drm/amd/display/Kconfig | 8 --------
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 8 ++++----
+ drivers/gpu/drm/amd/display/dc/Makefile | 2 +-
+ .../drm/amd/display/dc/bios/command_table_helper2.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/calcs/Makefile | 2 +-
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 6 +++---
+ drivers/gpu/drm/amd/display/dc/core/dc_debug.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 12 ++++++------
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ .../gpu/drm/amd/display/dc/dce/dce_clock_source.c | 6 +++---
+ .../gpu/drm/amd/display/dc/dce/dce_clock_source.h | 2 +-
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 8 ++++----
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h | 2 +-
+ drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c | 6 +++---
+ .../gpu/drm/amd/display/dc/dce/dce_stream_encoder.c | 20 ++++++++++----------
+ .../drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/gpio/Makefile | 2 +-
+ drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c | 4 ++--
+ drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c | 4 ++--
+ drivers/gpu/drm/amd/display/dc/i2caux/Makefile | 2 +-
+ drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c | 4 ++--
+ drivers/gpu/drm/amd/display/dc/inc/core_types.h | 6 +++---
+ drivers/gpu/drm/amd/display/dc/irq/Makefile | 2 +-
+ drivers/gpu/drm/amd/display/dc/irq/irq_service.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/os_types.h | 2 +-
+ 26 files changed, 57 insertions(+), 65 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index ea19758..e93ec54 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2248,8 +2248,8 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
+ case CHIP_FIJI:
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+- case CHIP_VEGA20:
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++ case CHIP_VEGA20:
++#ifdef CONFIG_X86
+ case CHIP_RAVEN:
+ #endif
+ return amdgpu_dc != 0;
+diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
+index b23c89a..6a824ac 100644
+--- a/drivers/gpu/drm/amd/display/Kconfig
++++ b/drivers/gpu/drm/amd/display/Kconfig
+@@ -9,14 +9,6 @@ config DRM_AMD_DC
+ support for AMDGPU.This adds required support for Vega and
+ Raven ASICs.
+
+-config DRM_AMD_DC_DCN1_0
+- bool "DCN 1.0 Raven family"
+- depends on DRM_AMD_DC && X86
+- default y
+- help
+- Choose this option if you want to have
+- RV family for display engine
+-
+ config DEBUG_KERNEL_DC
+ bool "Enable kgdb break in DC"
+ depends on DRM_AMD_DC
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 8ab7a99..1c6caa7 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -58,7 +58,7 @@
+ #include <drm/drm_fb_helper.h>
+ #include <drm/drm_edid.h>
+
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ #include "ivsrcid/irqsrcs_dcn_1_0.h"
+
+ #include "dcn/dcn_1_0_offset.h"
+@@ -1309,7 +1309,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
+ return 0;
+ }
+
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ /* Register IRQ sources and initialize IRQ callbacks */
+ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
+ {
+@@ -1651,7 +1651,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+ goto fail;
+ }
+ break;
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ case CHIP_RAVEN:
+ if (dcn10_register_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+@@ -1914,7 +1914,7 @@ static int dm_early_init(void *handle)
+ adev->mode_info.num_dig = 6;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ case CHIP_RAVEN:
+ adev->mode_info.num_crtc = 4;
+ adev->mode_info.num_hpd = 4;
+diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
+index 4f83e30..c6d36da 100644
+--- a/drivers/gpu/drm/amd/display/dc/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/Makefile
+@@ -4,7 +4,7 @@
+
+ DC_LIBS = basics bios calcs dce gpio i2caux irq virtual
+
+-ifdef CONFIG_DRM_AMD_DC_DCN1_0
++ifdef CONFIG_X86
+ DC_LIBS += dcn10 dml
+ endif
+
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+index bbbcef5..770ff89 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+@@ -55,7 +55,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
+ case DCE_VERSION_11_22:
+ *h = dal_cmd_tbl_helper_dce112_get_table2();
+ return true;
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ case DCN_VERSION_1_0:
+ *h = dal_cmd_tbl_helper_dce112_get_table2();
+ return true;
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+index 1cb3a1d..5370f92 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+@@ -17,7 +17,7 @@ CFLAGS_dcn_calc_math.o := $(calcs_ccflags) -Wno-tautological-compare
+
+ BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o
+
+-ifdef CONFIG_DRM_AMD_DC_DCN1_0
++ifdef CONFIG_X86
+ BW_CALCS += dcn_calcs.o dcn_calc_math.o dcn_calc_auto.o
+ endif
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index e9e7953..8ca5efd 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -474,7 +474,7 @@ static void destruct(struct dc *dc)
+ kfree(dc->bw_dceip);
+ dc->bw_dceip = NULL;
+
+-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
++#ifdef CONFIG_X86
+ kfree(dc->dcn_soc);
+ dc->dcn_soc = NULL;
+
+@@ -490,7 +490,7 @@ static bool construct(struct dc *dc,
+ struct dc_context *dc_ctx;
+ struct bw_calcs_dceip *dc_dceip;
+ struct bw_calcs_vbios *dc_vbios;
+-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
++#ifdef CONFIG_X86
+ struct dcn_soc_bounding_box *dcn_soc;
+ struct dcn_ip_params *dcn_ip;
+ #endif
+@@ -512,7 +512,7 @@ static bool construct(struct dc *dc,
+ }
+
+ dc->bw_vbios = dc_vbios;
+-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
++#ifdef CONFIG_X86
+ dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
+ if (!dcn_soc) {
+ dm_error("%s: failed to create dcn_soc\n", __func__);
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+index e1ebdf7..caece7c1 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+@@ -348,7 +348,7 @@ void context_clock_trace(
+ struct dc *dc,
+ struct dc_state *context)
+ {
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ DC_LOGGER_INIT(dc->ctx->logger);
+ CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
+ "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 417d2bf..f42a465 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -41,7 +41,7 @@
+ #include "dce100/dce100_resource.h"
+ #include "dce110/dce110_resource.h"
+ #include "dce112/dce112_resource.h"
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ #include "dcn10/dcn10_resource.h"
+ #endif
+ #include "dce120/dce120_resource.h"
+@@ -85,7 +85,7 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
+ case FAMILY_AI:
+ dc_version = DCE_VERSION_12_0;
+ break;
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ case FAMILY_RV:
+ dc_version = DCN_VERSION_1_0;
+ break;
+@@ -136,7 +136,7 @@ struct resource_pool *dc_create_resource_pool(
+ num_virtual_links, dc);
+ break;
+
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ case DCN_VERSION_1_0:
+ res_pool = dcn10_create_resource_pool(
+ num_virtual_links, dc);
+@@ -1212,7 +1212,7 @@ static struct pipe_ctx *acquire_free_pipe_for_stream(
+
+ }
+
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ static int acquire_first_split_pipe(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+@@ -1283,7 +1283,7 @@ bool dc_add_plane_to_context(
+
+ free_pipe = acquire_free_pipe_for_stream(context, pool, stream);
+
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ if (!free_pipe) {
+ int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
+ if (pipe_idx >= 0)
+@@ -1884,7 +1884,7 @@ enum dc_status resource_map_pool_resources(
+ /* acquire new resources */
+ pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
+
+-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
++#ifdef CONFIG_X86
+ if (pipe_idx < 0)
+ pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 5030c02..cddc34f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -290,7 +290,7 @@ struct dc {
+ /* Inputs into BW and WM calculations. */
+ struct bw_calcs_dceip *bw_dceip;
+ struct bw_calcs_vbios *bw_vbios;
+-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
++#ifdef CONFIG_X86
+ struct dcn_soc_bounding_box *dcn_soc;
+ struct dcn_ip_params *dcn_ip;
+ struct display_mode_lib dml;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+index f72f331..c5069a10 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+@@ -592,7 +592,7 @@ static uint32_t dce110_get_pix_clk_dividers(
+ case DCE_VERSION_11_2:
+ case DCE_VERSION_11_22:
+ case DCE_VERSION_12_0:
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ case DCN_VERSION_1_0:
+ #endif
+
+@@ -909,7 +909,7 @@ static bool dce110_program_pix_clk(
+ struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
+ struct bp_pixel_clock_parameters bp_pc_params = {0};
+
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) {
+ unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
+ unsigned dp_dto_ref_kHz = 700000;
+@@ -982,7 +982,7 @@ static bool dce110_program_pix_clk(
+ case DCE_VERSION_11_2:
+ case DCE_VERSION_11_22:
+ case DCE_VERSION_12_0:
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ case DCN_VERSION_1_0:
+ #endif
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
+index c45e2f7..801bb65 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
+@@ -55,7 +55,7 @@
+ CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\
+ CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, mask_sh)
+
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+
+ #define CS_COMMON_REG_LIST_DCN1_0(index, pllid) \
+ SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index 6882dc9..8f8a2ab 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -30,7 +30,7 @@
+ #include "bios_parser_interface.h"
+ #include "dc.h"
+ #include "dmcu.h"
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ #include "dcn_calcs.h"
+ #endif
+ #include "core_types.h"
+@@ -478,7 +478,7 @@ static void dce12_update_clocks(struct dccg *dccg,
+ }
+ }
+
+-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
++#ifdef CONFIG_X86
+ static int dcn1_determine_dppclk_threshold(struct dccg *dccg, struct dc_clocks *new_clocks)
+ {
+ bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+@@ -666,7 +666,7 @@ static void dce_update_clocks(struct dccg *dccg,
+ }
+ }
+
+-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
++#ifdef CONFIG_X86
+ static const struct display_clock_funcs dcn1_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .set_dispclk = dce112_set_clock,
+@@ -821,7 +821,7 @@ struct dccg *dce120_dccg_create(struct dc_context *ctx)
+ return &clk_dce->base;
+ }
+
+-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
++#ifdef CONFIG_X86
+ struct dccg *dcn1_dccg_create(struct dc_context *ctx)
+ {
+ struct dc_debug *debug = &ctx->dc->debug;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+index 8a6b2d3..e5e44ad 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+@@ -111,7 +111,7 @@ struct dccg *dce112_dccg_create(
+
+ struct dccg *dce120_dccg_create(struct dc_context *ctx);
+
+-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
++#ifdef CONFIG_X86
+ struct dccg *dcn1_dccg_create(struct dc_context *ctx);
+ #endif
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+index a576b8b..062a465 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+@@ -314,7 +314,7 @@ static void dce_get_psr_wait_loop(
+ return;
+ }
+
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ static void dcn10_get_dmcu_state(struct dmcu *dmcu)
+ {
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+@@ -735,7 +735,7 @@ static const struct dmcu_funcs dce_funcs = {
+ .is_dmcu_initialized = dce_is_dmcu_initialized
+ };
+
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ static const struct dmcu_funcs dcn10_funcs = {
+ .dmcu_init = dcn10_dmcu_init,
+ .load_iram = dcn10_dmcu_load_iram,
+@@ -787,7 +787,7 @@ struct dmcu *dce_dmcu_create(
+ return &dmcu_dce->base;
+ }
+
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ struct dmcu *dcn10_dmcu_create(
+ struct dc_context *ctx,
+ const struct dce_dmcu_registers *regs,
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+index 91642e6..b139b40 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+@@ -135,7 +135,7 @@ static void dce110_update_generic_info_packet(
+ AFMT_GENERIC0_UPDATE, (packet_index == 0),
+ AFMT_GENERIC2_UPDATE, (packet_index == 2));
+ }
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ if (REG(AFMT_VBI_PACKET_CONTROL1)) {
+ switch (packet_index) {
+ case 0:
+@@ -229,7 +229,7 @@ static void dce110_update_hdmi_info_packet(
+ HDMI_GENERIC1_SEND, send,
+ HDMI_GENERIC1_LINE, line);
+ break;
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ case 4:
+ if (REG(HDMI_GENERIC_PACKET_CONTROL2))
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2,
+@@ -274,7 +274,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
+ struct dc_crtc_timing *crtc_timing,
+ enum dc_color_space output_color_space)
+ {
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ uint32_t h_active_start;
+ uint32_t v_active_start;
+ uint32_t misc0 = 0;
+@@ -317,7 +317,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
+ if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN)
+ REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1);
+
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ if (enc110->se_mask->DP_VID_N_MUL)
+ REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
+ #endif
+@@ -328,7 +328,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
+ break;
+ }
+
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ if (REG(DP_MSA_MISC))
+ misc1 = REG_READ(DP_MSA_MISC);
+ #endif
+@@ -362,7 +362,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
+ /* set dynamic range and YCbCr range */
+
+
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ switch (crtc_timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ colorimetry_bpc = 0;
+@@ -441,7 +441,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
+ DP_DYN_RANGE, dynamic_range_rgb,
+ DP_YCBCR_RANGE, dynamic_range_ycbcr);
+
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ if (REG(DP_MSA_COLORIMETRY))
+ REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0);
+
+@@ -476,7 +476,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
+ crtc_timing->v_front_porch;
+
+
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ /* start at begining of left border */
+ if (REG(DP_MSA_TIMING_PARAM2))
+ REG_SET_2(DP_MSA_TIMING_PARAM2, 0,
+@@ -751,7 +751,7 @@ static void dce110_stream_encoder_update_hdmi_info_packets(
+ dce110_update_hdmi_info_packet(enc110, 3, &info_frame->hdrsmd);
+ }
+
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ if (enc110->se_mask->HDMI_DB_DISABLE) {
+ /* for bring up, disable dp double TODO */
+ if (REG(HDMI_DB_CONTROL))
+@@ -789,7 +789,7 @@ static void dce110_stream_encoder_stop_hdmi_info_packets(
+ HDMI_GENERIC1_LINE, 0,
+ HDMI_GENERIC1_SEND, 0);
+
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ /* stop generic packets 2 & 3 on HDMI */
+ if (REG(HDMI_GENERIC_PACKET_CONTROL2))
+ REG_SET_6(HDMI_GENERIC_PACKET_CONTROL2, 0,
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 8b7606e..4fff944 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -1250,7 +1250,7 @@ static void program_scaler(const struct dc *dc,
+ {
+ struct tg_color color = {0};
+
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ /* TOFPGA */
+ if (pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth == NULL)
+ return;
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
+index 70d01a9..9d9dffe 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
+@@ -40,7 +40,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCE120)
+ ###############################################################################
+ # DCN 1x
+ ###############################################################################
+-ifdef CONFIG_DRM_AMD_DC_DCN1_0
++ifdef CONFIG_X86
+ GPIO_DCN10 = hw_translate_dcn10.o hw_factory_dcn10.o
+
+ AMD_DAL_GPIO_DCN10 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn10/,$(GPIO_DCN10))
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+index 0caee35..83df779 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
++++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+@@ -43,7 +43,7 @@
+ #include "dce80/hw_factory_dce80.h"
+ #include "dce110/hw_factory_dce110.h"
+ #include "dce120/hw_factory_dce120.h"
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ #include "dcn10/hw_factory_dcn10.h"
+ #endif
+
+@@ -81,7 +81,7 @@ bool dal_hw_factory_init(
+ case DCE_VERSION_12_0:
+ dal_hw_factory_dce120_init(factory);
+ return true;
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ case DCN_VERSION_1_0:
+ dal_hw_factory_dcn10_init(factory);
+ return true;
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+index 55c7074..e754131 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
++++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+@@ -43,7 +43,7 @@
+ #include "dce80/hw_translate_dce80.h"
+ #include "dce110/hw_translate_dce110.h"
+ #include "dce120/hw_translate_dce120.h"
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ #include "dcn10/hw_translate_dcn10.h"
+ #endif
+
+@@ -78,7 +78,7 @@ bool dal_hw_translate_init(
+ case DCE_VERSION_12_0:
+ dal_hw_translate_dce120_init(translate);
+ return true;
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ case DCN_VERSION_1_0:
+ dal_hw_translate_dcn10_init(translate);
+ return true;
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/Makefile b/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
+index 5560340..c1870ee 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
+@@ -50,7 +50,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE112)
+ ###############################################################################
+ # DCN 1.0 family
+ ###############################################################################
+-ifdef CONFIG_DRM_AMD_DC_DCN1_0
++ifdef CONFIG_X86
+ I2CAUX_DCN1 = i2caux_dcn10.o
+
+ AMD_DAL_I2CAUX_DCN1 = $(addprefix $(AMDDALPATH)/dc/i2caux/dcn10/,$(I2CAUX_DCN1))
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
+index 9b0bcc6..f7ed355 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
+@@ -59,7 +59,7 @@
+
+ #include "dce120/i2caux_dce120.h"
+
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ #include "dcn10/i2caux_dcn10.h"
+ #endif
+
+@@ -91,7 +91,7 @@ struct i2caux *dal_i2caux_create(
+ return dal_i2caux_dce100_create(ctx);
+ case DCE_VERSION_12_0:
+ return dal_i2caux_dce120_create(ctx);
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ case DCN_VERSION_1_0:
+ return dal_i2caux_dcn10_create(ctx);
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+index c42308a..816da02 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+@@ -33,7 +33,7 @@
+ #include "dc_bios_types.h"
+ #include "mem_input.h"
+ #include "hubp.h"
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ #include "mpc.h"
+ #endif
+
+@@ -221,7 +221,7 @@ struct pipe_ctx {
+ struct pipe_ctx *top_pipe;
+ struct pipe_ctx *bottom_pipe;
+
+-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
++#ifdef CONFIG_X86
+ struct _vcs_dpi_display_dlg_regs_st dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st ttu_regs;
+ struct _vcs_dpi_display_rq_regs_st rq_regs;
+@@ -277,7 +277,7 @@ struct dc_state {
+
+ /* Note: these are big structures, do *not* put on stack! */
+ struct dm_pp_display_configuration pp_display_cfg;
+-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
++#ifdef CONFIG_X86
+ struct dcn_bw_internal_vars dcn_bw_vars;
+ #endif
+
+diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile
+index c7e93f7..b094a5b 100644
+--- a/drivers/gpu/drm/amd/display/dc/irq/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile
+@@ -39,7 +39,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCE12)
+ ###############################################################################
+ # DCN 1x
+ ###############################################################################
+-ifdef CONFIG_DRM_AMD_DC_DCN1_0
++ifdef CONFIG_X86
+ IRQ_DCN1 = irq_service_dcn10.o
+
+ AMD_DAL_IRQ_DCN1 = $(addprefix $(AMDDALPATH)/dc/irq/dcn10/,$(IRQ_DCN1))
+diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+index 604bea0..ae3fd0a 100644
+--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
++++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+@@ -36,7 +36,7 @@
+ #include "dce120/irq_service_dce120.h"
+
+
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ #include "dcn10/irq_service_dcn10.h"
+ #endif
+
+diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
+index d18fcbb..fc81cae 100644
+--- a/drivers/gpu/drm/amd/display/dc/os_types.h
++++ b/drivers/gpu/drm/amd/display/dc/os_types.h
+@@ -52,7 +52,7 @@
+
+ #define dm_vlog(fmt, args) vprintk(fmt, args)
+
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
++#ifdef CONFIG_X86
+ #include <asm/fpu/api.h>
+ #endif
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4976-drm-amdgpu-remove-superflous-UVD-encode-entity.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4976-drm-amdgpu-remove-superflous-UVD-encode-entity.patch
new file mode 100644
index 00000000..7b2c91e1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4976-drm-amdgpu-remove-superflous-UVD-encode-entity.patch
@@ -0,0 +1,105 @@
+From 0e1aaf3866482a938136af420ef95298755f389e Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Wed, 18 Jul 2018 20:28:08 +0200
+Subject: [PATCH 4976/5725] drm/amdgpu: remove superflous UVD encode entity
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Not sure what that was every used for, but now it is completely unused.
+
+Reviewed-by: Leo Liu <leo.liu@amd.com>
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Acked-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | 1 -
+ drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 12 ------------
+ drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 14 --------------
+ 3 files changed, 27 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+index 8b23a1b..cae3f52 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+@@ -48,7 +48,6 @@ struct amdgpu_uvd_inst {
+ struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
+ struct amdgpu_irq_src irq;
+ struct drm_sched_entity entity;
+- struct drm_sched_entity entity_enc;
+ uint32_t srbm_soft_reset;
+ };
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+index f832c5e..f6522bc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+@@ -418,16 +418,6 @@ static int uvd_v6_0_sw_init(void *handle)
+ adev->uvd.num_enc_rings = 0;
+
+ DRM_INFO("UVD ENC is disabled\n");
+- } else {
+- struct drm_sched_rq *rq;
+- ring = &adev->uvd.inst->ring_enc[0];
+- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+- r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc,
+- rq, NULL);
+- if (r) {
+- DRM_ERROR("Failed setting up UVD ENC run queue.\n");
+- return r;
+- }
+ }
+
+ r = amdgpu_uvd_resume(adev);
+@@ -463,8 +453,6 @@ static int uvd_v6_0_sw_fini(void *handle)
+ return r;
+
+ if (uvd_v6_0_enc_support(adev)) {
+- drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc);
+-
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i)
+ amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+index 423b74e..1fd09d0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+@@ -389,7 +389,6 @@ static int uvd_v7_0_early_init(void *handle)
+ static int uvd_v7_0_sw_init(void *handle)
+ {
+ struct amdgpu_ring *ring;
+- struct drm_sched_rq *rq;
+ int i, j, r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+@@ -421,17 +420,6 @@ static int uvd_v7_0_sw_init(void *handle)
+ DRM_INFO("PSP loading UVD firmware\n");
+ }
+
+- for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+- ring = &adev->uvd.inst[j].ring_enc[0];
+- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+- r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity_enc,
+- rq, NULL);
+- if (r) {
+- DRM_ERROR("(%d)Failed setting up UVD ENC run queue.\n", j);
+- return r;
+- }
+- }
+-
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+@@ -484,8 +472,6 @@ static int uvd_v7_0_sw_fini(void *handle)
+ return r;
+
+ for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
+- drm_sched_entity_fini(&adev->uvd.inst[j].ring_enc[0].sched, &adev->uvd.inst[j].entity_enc);
+-
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i)
+ amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4977-drm-amdgpu-clean-up-UVD-instance-handling-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4977-drm-amdgpu-clean-up-UVD-instance-handling-v2.patch
new file mode 100644
index 00000000..1a509c41
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4977-drm-amdgpu-clean-up-UVD-instance-handling-v2.patch
@@ -0,0 +1,304 @@
+From f15e912ed9abc580a307ea08e74523ce124bc773 Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Wed, 9 Jan 2019 20:31:14 +0530
+Subject: [PATCH 4977/5725] drm/amdgpu: clean up UVD instance handling v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The whole handle, filp and entity handling is superfluous here.
+
+We should have reviewed that more thoughtfully. It looks like somebody
+just made the code instance aware without knowing the background.
+
+v2: fix one more missed case in amdgpu_uvd_suspend
+
+Reviewed-by: Leo Liu <leo.liu@amd.com>
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Acked-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 121 ++++++++++++++++----------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | 10 +--
+ 2 files changed, 66 insertions(+), 65 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index 2298571..9c665d5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -264,20 +264,21 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ return r;
+ }
+
+- ring = &adev->uvd.inst[j].ring;
+- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+- r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity,
+- rq, NULL);
+- if (r != 0) {
+- DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j);
+- return r;
+- }
++ }
++ ring = &adev->uvd.inst[0].ring;
++ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
++ r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[0].entity,
++ rq, NULL);
++ if (r != 0) {
++ DRM_ERROR("Failed setting up UVD kernel entity.\n");
++ return r;
++ }
+
+- for (i = 0; i < adev->uvd.max_handles; ++i) {
+- atomic_set(&adev->uvd.inst[j].handles[i], 0);
+- adev->uvd.inst[j].filp[i] = NULL;
+- }
++ for (i = 0; i < adev->uvd.max_handles; ++i) {
++ atomic_set(&adev->uvd.handles[i], 0);
++ adev->uvd.filp[i] = NULL;
+ }
++
+ /* from uvd v5.0 HW addressing capacity increased to 64 bits */
+ if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
+ adev->uvd.address_64_bit = true;
+@@ -306,11 +307,12 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
+ {
+ int i, j;
+
++ drm_sched_entity_fini(&adev->uvd.inst->ring.sched,
++ &adev->uvd.entity);
++
+ for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
+ kfree(adev->uvd.inst[j].saved_bo);
+
+- drm_sched_entity_fini(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);
+-
+ amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
+ &adev->uvd.inst[j].gpu_addr,
+ (void **)&adev->uvd.inst[j].cpu_addr);
+@@ -333,20 +335,22 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
+
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
+
++ /* only valid for physical mode */
++ if (adev->asic_type < CHIP_POLARIS10) {
++ for (i = 0; i < adev->uvd.max_handles; ++i)
++ if (atomic_read(&adev->uvd.handles[i]))
++ break;
++
++ if (i == adev->uvd.max_handles)
++ return 0;
++ }
++
++
+ for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
+ if (adev->uvd.inst[j].vcpu_bo == NULL)
+ continue;
+
+
+- /* only valid for physical mode */
+- if (adev->asic_type < CHIP_POLARIS10) {
+- for (i = 0; i < adev->uvd.max_handles; ++i)
+- if (atomic_read(&adev->uvd.inst[j].handles[i]))
+- break;
+-
+- if (i == adev->uvd.max_handles)
+- continue;
+- }
+
+ size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
+ ptr = adev->uvd.inst[j].cpu_addr;
+@@ -399,30 +403,27 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
+
+ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
+ {
+- struct amdgpu_ring *ring;
+- int i, j, r;
+-
+- for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+- ring = &adev->uvd.inst[j].ring;
++ struct amdgpu_ring *ring = &adev->uvd.inst[0].ring;
++ int i, r;
+
+- for (i = 0; i < adev->uvd.max_handles; ++i) {
+- uint32_t handle = atomic_read(&adev->uvd.inst[j].handles[i]);
+- if (handle != 0 && adev->uvd.inst[j].filp[i] == filp) {
+- struct dma_fence *fence;
+-
+- r = amdgpu_uvd_get_destroy_msg(ring, handle,
+- false, &fence);
+- if (r) {
+- DRM_ERROR("Error destroying UVD(%d) %d!\n", j, r);
+- continue;
+- }
++ for (i = 0; i < adev->uvd.max_handles; ++i) {
++ uint32_t handle = atomic_read(&adev->uvd.handles[i]);
+
+- dma_fence_wait(fence, false);
+- dma_fence_put(fence);
++ if (handle != 0 && adev->uvd.filp[i] == filp) {
++ struct dma_fence *fence;
+
+- adev->uvd.inst[j].filp[i] = NULL;
+- atomic_set(&adev->uvd.inst[j].handles[i], 0);
++ r = amdgpu_uvd_get_destroy_msg(ring, handle, false,
++ &fence);
++ if (r) {
++ DRM_ERROR("Error destroying UVD %d!\n", r);
++ continue;
+ }
++
++ dma_fence_wait(fence, false);
++ dma_fence_put(fence);
++
++ adev->uvd.filp[i] = NULL;
++ atomic_set(&adev->uvd.handles[i], 0);
+ }
+ }
+ }
+@@ -693,20 +694,19 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
+ struct amdgpu_bo *bo, unsigned offset)
+ {
+ struct amdgpu_device *adev = ctx->parser->adev;
+- uint32_t ip_instance = ctx->parser->ring->me;
+ int32_t *msg, msg_type, handle;
+ void *ptr;
+ long r;
+ int i;
+
+ if (offset & 0x3F) {
+- DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance);
++ DRM_ERROR("UVD messages must be 64 byte aligned!\n");
+ return -EINVAL;
+ }
+
+ r = amdgpu_bo_kmap(bo, &ptr);
+ if (r) {
+- DRM_ERROR("Failed mapping the UVD(%d) message (%ld)!\n", ip_instance, r);
++ DRM_ERROR("Failed mapping the UVD) message (%ld)!\n", r);
+ return r;
+ }
+
+@@ -716,7 +716,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
+ handle = msg[2];
+
+ if (handle == 0) {
+- DRM_ERROR("Invalid UVD(%d) handle!\n", ip_instance);
++ DRM_ERROR("Invalid UVD handle!\n");
+ return -EINVAL;
+ }
+
+@@ -727,18 +727,19 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
+
+ /* try to alloc a new handle */
+ for (i = 0; i < adev->uvd.max_handles; ++i) {
+- if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
+- DRM_ERROR("(%d)Handle 0x%x already in use!\n", ip_instance, handle);
++ if (atomic_read(&adev->uvd.handles[i]) == handle) {
++ DRM_ERROR(")Handle 0x%x already in use!\n",
++ handle);
+ return -EINVAL;
+ }
+
+- if (!atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], 0, handle)) {
+- adev->uvd.inst[ip_instance].filp[i] = ctx->parser->filp;
++ if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
++ adev->uvd.filp[i] = ctx->parser->filp;
+ return 0;
+ }
+ }
+
+- DRM_ERROR("No more free UVD(%d) handles!\n", ip_instance);
++ DRM_ERROR("No more free UVD handles!\n");
+ return -ENOSPC;
+
+ case 1:
+@@ -750,27 +751,27 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
+
+ /* validate the handle */
+ for (i = 0; i < adev->uvd.max_handles; ++i) {
+- if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
+- if (adev->uvd.inst[ip_instance].filp[i] != ctx->parser->filp) {
+- DRM_ERROR("UVD(%d) handle collision detected!\n", ip_instance);
++ if (atomic_read(&adev->uvd.handles[i]) == handle) {
++ if (adev->uvd.filp[i] != ctx->parser->filp) {
++ DRM_ERROR("UVD handle collision detected!\n");
+ return -EINVAL;
+ }
+ return 0;
+ }
+ }
+
+- DRM_ERROR("Invalid UVD(%d) handle 0x%x!\n", ip_instance, handle);
++ DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
+ return -ENOENT;
+
+ case 2:
+ /* it's a destroy msg, free the handle */
+ for (i = 0; i < adev->uvd.max_handles; ++i)
+- atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], handle, 0);
++ atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
+ amdgpu_bo_kunmap(bo);
+ return 0;
+
+ default:
+- DRM_ERROR("Illegal UVD(%d) message type (%d)!\n", ip_instance, msg_type);
++ DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
+ return -EINVAL;
+ }
+ BUG();
+@@ -1073,7 +1074,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
+ if (r)
+ goto err_free;
+
+- r = amdgpu_job_submit(job, &adev->uvd.inst[ring->me].entity,
++ r = amdgpu_job_submit(job, &adev->uvd.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+ if (r)
+ goto err_free;
+@@ -1277,7 +1278,7 @@ uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
+ * necessarily linear. So we need to count
+ * all non-zero handles.
+ */
+- if (atomic_read(&adev->uvd.inst->handles[i]))
++ if (atomic_read(&adev->uvd.handles[i]))
+ used_handles++;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+index cae3f52..6687228 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+@@ -42,12 +42,9 @@ struct amdgpu_uvd_inst {
+ void *cpu_addr;
+ uint64_t gpu_addr;
+ void *saved_bo;
+- atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
+- struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
+ struct amdgpu_ring ring;
+ struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
+ struct amdgpu_irq_src irq;
+- struct drm_sched_entity entity;
+ uint32_t srbm_soft_reset;
+ };
+
+@@ -56,10 +53,13 @@ struct amdgpu_uvd {
+ unsigned fw_version;
+ unsigned max_handles;
+ unsigned num_enc_rings;
+- uint8_t num_uvd_inst;
++ uint8_t num_uvd_inst;
+ bool address_64_bit;
+ bool use_ctx_buf;
+- struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
++ struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
++ struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
++ atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
++ struct drm_sched_entity entity;
+ struct delayed_work idle_work;
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4978-drm-amdgpu-fix-spelling-mistake-successed-succeeded.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4978-drm-amdgpu-fix-spelling-mistake-successed-succeeded.patch
new file mode 100644
index 00000000..c9129e00
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4978-drm-amdgpu-fix-spelling-mistake-successed-succeeded.patch
@@ -0,0 +1,58 @@
+From 79d4477259cda66e4ee29f03953def32f84cdf83 Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.king@canonical.com>
+Date: Tue, 17 Jul 2018 10:29:29 +0100
+Subject: [PATCH 4978/5725] drm/amdgpu: fix spelling mistake "successed" ->
+ "succeeded"
+
+Trivial fix to spelling mistake in dev_err error message.
+
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index e93ec54..75d35b4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2053,7 +2053,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
+ continue;
+
+ r = block->version->funcs->hw_init(adev);
+- DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
++ DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
+ if (r)
+ return r;
+ }
+@@ -2088,7 +2088,7 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
+ continue;
+
+ r = block->version->funcs->hw_init(adev);
+- DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
++ DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
+ if (r)
+ return r;
+ }
+@@ -3147,7 +3147,7 @@ static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
+ * @adev: amdgpu device pointer
+ *
+ * attempt to do soft-reset or full-reset and reinitialize Asic
+- * return 0 means successed otherwise failed
++ * return 0 means succeeded otherwise failed
+ */
+ static int amdgpu_device_reset(struct amdgpu_device *adev)
+ {
+@@ -3357,7 +3357,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
+ } else {
+- dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
++ dev_info(adev->dev, "GPU reset(%d) succeeded!\n",atomic_read(&adev->gpu_reset_counter));
+ }
+
+ amdgpu_vf_error_trans_all(adev);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4979-drm-amd-display-Drop-unused-backlight-functions-in-D.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4979-drm-amd-display-Drop-unused-backlight-functions-in-D.patch
new file mode 100644
index 00000000..b3508bb5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4979-drm-amd-display-Drop-unused-backlight-functions-in-D.patch
@@ -0,0 +1,54 @@
+From 6a6ac24c463ff14ab2369c2ce2e376de1143c838 Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Thu, 19 Jul 2018 14:17:30 -0400
+Subject: [PATCH 4979/5725] drm/amd/display: Drop unused backlight functions in
+ DM
+
+These are only ever called for non-DC code.
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 18 ++----------------
+ 1 file changed, 2 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 1c6caa7..9289083 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1699,18 +1699,6 @@ static void dm_bandwidth_update(struct amdgpu_device *adev)
+ /* TODO: implement later */
+ }
+
+-static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
+- u8 level)
+-{
+- /* TODO: translate amdgpu_encoder to display_index and call DAL */
+-}
+-
+-static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
+-{
+- /* TODO: translate amdgpu_encoder to display_index and call DAL */
+- return 0;
+-}
+-
+ static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+ {
+@@ -1803,10 +1791,8 @@ static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
+ static const struct amdgpu_display_funcs dm_display_funcs = {
+ .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
+ .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
+- .backlight_set_level =
+- dm_set_backlight_level,/* called unconditionally */
+- .backlight_get_level =
+- dm_get_backlight_level,/* called unconditionally */
++ .backlight_set_level = NULL, /* never called for DC */
++ .backlight_get_level = NULL, /* never called for DC */
+ .hpd_sense = NULL,/* called unconditionally */
+ .hpd_set_polarity = NULL, /* called unconditionally */
+ .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4980-drm-amd-display-Honor-pplib-stutter-mask-for-all-ASI.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4980-drm-amd-display-Honor-pplib-stutter-mask-for-all-ASI.patch
new file mode 100644
index 00000000..cb68251c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4980-drm-amd-display-Honor-pplib-stutter-mask-for-all-ASI.patch
@@ -0,0 +1,58 @@
+From eb170046c283c4d8a5b47e865fcc2977189485a2 Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Tue, 17 Jul 2018 10:51:23 -0400
+Subject: [PATCH 4980/5725] drm/amd/display: Honor pplib stutter mask for all
+ ASICs in DC
+
+[Why]
+We were only setting this mask for DCN, but should really use it
+universally for all ASICs.
+
+[How]
+Move the assignment out of the Raven switch statement for all ASICs
+other than Stoney and Carrizo.
+
+v2: Keep stutter always on for Carrizo and Stoney (Alex)
+
+Cc: Rex.Zhu@amd.com
+Cc: Feifei.Xu@amd.com
+Cc: Kenneth.Feng@amd.com
+Cc: Evan.Quan@amd.com
+Cc: Bhawanpreet.Lakha@amd.com
+Cc: Jordan.Lazare@amd.com
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 9289083..c5c59cb 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1657,10 +1657,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+- /*
+- * Temporary disable until pplib/smu interaction is implemented
+- */
+- dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
+ break;
+ #endif
+ default:
+@@ -1668,6 +1664,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+ goto fail;
+ }
+
++ if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
++ dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
++
+ return 0;
+ fail:
+ kfree(aencoder);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4981-drm-amdgpu-lock-and-unlock-console-only-for-amdgpu_f.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4981-drm-amdgpu-lock-and-unlock-console-only-for-amdgpu_f.patch
new file mode 100644
index 00000000..e183cad9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4981-drm-amdgpu-lock-and-unlock-console-only-for-amdgpu_f.patch
@@ -0,0 +1,104 @@
+From a883ee193b437c4d252864401b533b2d313bacc4 Mon Sep 17 00:00:00 2001
+From: Shirish S <shirish.s@amd.com>
+Date: Wed, 18 Jul 2018 13:36:26 +0530
+Subject: [PATCH 4981/5725] drm/amdgpu: lock and unlock console only for
+ amdgpu_fbdev_set_suspend [V5]
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[Why]
+While the console_lock is held, console output will be buffered, till
+its unlocked it wont be emitted, hence its ideal to unlock sooner to enable
+debugging/detecting/fixing of any issue in the remaining sequence of events
+in resume path.
+The concern here is about consoles other than fbcon on the device,
+e.g. a serial console
+
+[How]
+This patch restructures the console_lock, console_unlock around
+amdgpu_fbdev_set_suspend() and moves this new block appropriately.
+
+V2: Kept amdgpu_fbdev_set_suspend after pci_set_power_state
+V3: Updated the commit message to clarify the real concern that this patch
+ addresses.
+V4: code clean-up.
+V5: fixed return value
+
+Signed-off-by: Shirish S <shirish.s@amd.com>
+Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 22 +++++++---------------
+ 1 file changed, 7 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 75d35b4..7ac4de2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2772,15 +2772,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+- if (fbcon)
+- console_lock();
+-
+ if (resume) {
+ pci_set_power_state(dev->pdev, PCI_D0);
+ pci_restore_state(dev->pdev);
+ r = pci_enable_device(dev->pdev);
+ if (r)
+- goto unlock;
++ return r;
+ }
+
+ /* post card */
+@@ -2793,7 +2790,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
+ r = amdgpu_device_ip_resume(adev);
+ if (r) {
+ DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
+- goto unlock;
++ return r;
+ }
+
+ amdgpu_fence_driver_resume(adev);
+@@ -2801,7 +2798,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
+
+ r = amdgpu_device_ip_late_init(adev);
+ if (r)
+- goto unlock;
++ return r;
+
+ /* pin cursors */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+@@ -2839,6 +2836,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
+ }
+ drm_modeset_unlock_all(dev);
+ }
++ console_lock();
++ amdgpu_fbdev_set_suspend(adev, 0);
++ console_unlock();
+ }
+
+ drm_kms_helper_poll_enable(dev);
+@@ -2862,15 +2862,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
+ #ifdef CONFIG_PM
+ dev->dev->power.disable_depth--;
+ #endif
+-
+- if (fbcon)
+- amdgpu_fbdev_set_suspend(adev, 0);
+-
+-unlock:
+- if (fbcon)
+- console_unlock();
+-
+- return r;
++ return 0;
+ }
+
+ /**
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4982-drm-amd-pp-Set-Max-clock-level-to-display-by-default.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4982-drm-amd-pp-Set-Max-clock-level-to-display-by-default.patch
new file mode 100644
index 00000000..b5eadfea
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4982-drm-amd-pp-Set-Max-clock-level-to-display-by-default.patch
@@ -0,0 +1,55 @@
+From ce637a95634a90c4cee56685e2866d337d225e1a Mon Sep 17 00:00:00 2001
+From: Rex Zhu <rex.zhu@amd.com>
+Date: Tue, 17 Jul 2018 18:31:50 +0800
+Subject: [PATCH 4982/5725] drm/amd/pp: Set Max clock level to display by
+ default
+
+avoid the error in dmesg:
+[drm:dm_pp_get_static_clocks]
+*ERROR* DM_PPLIB: invalid powerlevel state: 0!
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index 2a479fa..6ef06a4 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -1008,7 +1008,7 @@ static int pp_get_display_power_level(void *handle,
+ static int pp_get_current_clocks(void *handle,
+ struct amd_pp_clock_info *clocks)
+ {
+- struct amd_pp_simple_clock_info simple_clocks;
++ struct amd_pp_simple_clock_info simple_clocks = { 0 };
+ struct pp_clock_info hw_clocks;
+ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+@@ -1044,7 +1044,10 @@ static int pp_get_current_clocks(void *handle,
+ clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
+ clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
+
+- clocks->max_clocks_state = simple_clocks.level;
++ if (simple_clocks.level == 0)
++ clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
++ else
++ clocks->max_clocks_state = simple_clocks.level;
+
+ if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
+ clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
+@@ -1147,6 +1150,8 @@ static int pp_get_display_mode_validation_clocks(void *handle,
+ if (!hwmgr || !hwmgr->pm_en ||!clocks)
+ return -EINVAL;
+
++ clocks->level = PP_DAL_POWERLEVEL_7;
++
+ mutex_lock(&hwmgr->smu_lock);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4983-drm-amd-display-Convert-10kHz-clks-from-PPLib-into-k.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4983-drm-amd-display-Convert-10kHz-clks-from-PPLib-into-k.patch
new file mode 100644
index 00000000..a7007c06
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4983-drm-amd-display-Convert-10kHz-clks-from-PPLib-into-k.patch
@@ -0,0 +1,36 @@
+From 705cff090240cdf66e5c14afa9b17c926f7dcbda Mon Sep 17 00:00:00 2001
+From: Rex Zhu <rex.zhu@amd.com>
+Date: Tue, 17 Jul 2018 20:18:04 +0800
+Subject: [PATCH 4983/5725] drm/amd/display: Convert 10kHz clks from PPLib into
+ kHz
+
+Except special naming as *_in_khz, The default clock unit in powerplay
+is in 10KHz. so need to * 10 as expecting clock frequency in display
+is in kHz.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+index c69ae78..fbe878a 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+@@ -469,8 +469,8 @@ bool dm_pp_get_static_clocks(
+ return false;
+
+ static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
+- static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock;
+- static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock;
++ static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
++ static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
+
+ return true;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4984-300-compilaiton.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4984-300-compilaiton.patch
new file mode 100644
index 00000000..98b16201
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4984-300-compilaiton.patch
@@ -0,0 +1,38 @@
+From f6c0998dacbf05b84c9635434d0a28e92782fb1e Mon Sep 17 00:00:00 2001
+From: Raveendra Talabattula <raveendra.talabattula@amd.com>
+Date: Fri, 2 Nov 2018 15:36:17 +0530
+Subject: [PATCH 4984/5725] 300 compilaiton
+
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +-
+ include/uapi/drm/amdgpu_drm.h | 1 +
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 8f2991b..4d68c37 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -1195,7 +1195,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
+ }
+ }
+
+- return amdgpu_sem_add_cs(p->ctx, p->job->ring, &p->job->sync);
++ return amdgpu_sem_add_cs(p->ctx, p->ring, &p->job->sync);
+ }
+
+ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
+diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
+index f12f57a..a0c3286 100644
+--- a/include/uapi/drm/amdgpu_drm.h
++++ b/include/uapi/drm/amdgpu_drm.h
+@@ -560,6 +560,7 @@ struct drm_amdgpu_gem_va {
+ #define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03
+ #define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04
+ #define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05
++#define AMDGPU_CHUNK_ID_BO_HANDLES 0x06
+
+ struct drm_amdgpu_cs_chunk {
+ __u32 chunk_id;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4985-patch-correction-amdgpu-clean-up-UVD-instance-handli.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4985-patch-correction-amdgpu-clean-up-UVD-instance-handli.patch
new file mode 100644
index 00000000..92ae0933
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4985-patch-correction-amdgpu-clean-up-UVD-instance-handli.patch
@@ -0,0 +1,26 @@
+From 5a59641d10dc82da8d6e406e09800f3bb6f0a9d2 Mon Sep 17 00:00:00 2001
+From: Raveendra Talabattula <raveendra.talabattula@amd.com>
+Date: Fri, 2 Nov 2018 15:39:08 +0530
+Subject: [PATCH 4985/5725] patch correction
+ amdgpu-clean-up-UVD-instance-handling
+
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index 9c665d5..3536198 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -267,7 +267,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ }
+ ring = &adev->uvd.inst[0].ring;
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+- r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[0].entity,
++ r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity,
+ rq, NULL);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up UVD kernel entity.\n");
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4986-drm-amdgpu-use-drm_fb-helper-for-console_-un-lock.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4986-drm-amdgpu-use-drm_fb-helper-for-console_-un-lock.patch
new file mode 100644
index 00000000..2824db23
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4986-drm-amdgpu-use-drm_fb-helper-for-console_-un-lock.patch
@@ -0,0 +1,67 @@
+From 9b43f4c2c30e66ca3f6bb3bfd5a6aa9ab3c894cd Mon Sep 17 00:00:00 2001
+From: Shirish S <shirish.s@amd.com>
+Date: Fri, 20 Jul 2018 17:26:50 +0530
+Subject: [PATCH 4986/5725] drm/amdgpu: use drm_fb helper for console_(un)lock
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This patch removes the usage of console_(un)lock
+by replacing drm_fb_helper_set_suspend() to
+drm_fb_helper_set_suspend_unlocked() which locks and
+unlocks the console instead of locking ourselves.
+
+Signed-off-by: Shirish S <shirish.s@amd.com>
+Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 8 ++------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | 4 ++--
+ 2 files changed, 4 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 7ac4de2..8c85387 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2743,11 +2743,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
+ DRM_ERROR("amdgpu asic reset failed\n");
+ }
+
+- if (fbcon) {
+- console_lock();
++ if (fbcon)
+ amdgpu_fbdev_set_suspend(adev, 1);
+- console_unlock();
+- }
++
+ return 0;
+ }
+
+@@ -2836,9 +2834,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
+ }
+ drm_modeset_unlock_all(dev);
+ }
+- console_lock();
+ amdgpu_fbdev_set_suspend(adev, 0);
+- console_unlock();
+ }
+
+ drm_kms_helper_poll_enable(dev);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+index f2c7dbd..7ff11d7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+@@ -379,8 +379,8 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev)
+ void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state)
+ {
+ if (adev->mode_info.rfbdev)
+- drm_fb_helper_set_suspend(&adev->mode_info.rfbdev->helper,
+- state);
++ drm_fb_helper_set_suspend_unlocked(&adev->mode_info.rfbdev->helper,
++ state);
+ }
+
+ int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4987-drm-amdgpu-Fix-warning-in-dma_fence_is_later-on-resu.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4987-drm-amdgpu-Fix-warning-in-dma_fence_is_later-on-resu.patch
new file mode 100644
index 00000000..d1392d4a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4987-drm-amdgpu-Fix-warning-in-dma_fence_is_later-on-resu.patch
@@ -0,0 +1,43 @@
+From 820cd8589182530c353f63c8b212c83b6a4fdc82 Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Fri, 20 Jul 2018 11:42:24 -0400
+Subject: [PATCH 4987/5725] drm/amdgpu: Fix warning in dma_fence_is_later on
+ resume from S3.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Problem:
+amdgpu_ttm_set_buffer_funcs_status destroys adev->mman.entity on suspend
+without releasing adev->mman.bdev.man[TTM_PL_VRAM].move fence
+so on resume the new drm_sched_entity.fence_context causes
+the warning against the old fence context which is different.
+
+Fix:
+When destroying sched_entity in amdgpu_ttm_set_buffer_funcs_status
+release man->move and set the pointer to NULL.
+
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index addc397..5adaefc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -2125,6 +2125,8 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
+ } else {
+ drm_sched_entity_fini(adev->mman.entity.sched,
+ &adev->mman.entity);
++ dma_fence_put(man->move);
++ man->move = NULL;
+ }
+
+ /* this just adjusts TTM size idea, which sets lpfn to the correct value */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4988-drm-amdgpu-apci-don-t-call-sbios-request-function-if.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4988-drm-amdgpu-apci-don-t-call-sbios-request-function-if.patch
new file mode 100644
index 00000000..0a035651
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4988-drm-amdgpu-apci-don-t-call-sbios-request-function-if.patch
@@ -0,0 +1,101 @@
+From b4d19ebc884437b5d3a93b9da434a0f3a60bb316 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 19 Jul 2018 09:17:02 -0500
+Subject: [PATCH 4988/5725] drm/amdgpu/apci: don't call sbios request function
+ if it's not supported
+
+Check the supported functions mask before calling the bios
+requests method.
+
+Reviewed-by: Jim Qu <Jim.Qu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | 53 +++++++++++++++++---------------
+ 1 file changed, 28 insertions(+), 25 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+index 0d8c3fc..4556178 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+@@ -364,7 +364,6 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
+ struct acpi_bus_event *event)
+ {
+ struct amdgpu_atif *atif = adev->atif;
+- struct atif_sbios_requests req;
+ int count;
+
+ DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
+@@ -379,42 +378,46 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
+ /* Not our event */
+ return NOTIFY_DONE;
+
+- /* Check pending SBIOS requests */
+- count = amdgpu_atif_get_sbios_requests(atif, &req);
++ if (atif->functions.sbios_requests) {
++ struct atif_sbios_requests req;
+
+- if (count <= 0)
+- return NOTIFY_DONE;
++ /* Check pending SBIOS requests */
++ count = amdgpu_atif_get_sbios_requests(atif, &req);
++
++ if (count <= 0)
++ return NOTIFY_DONE;
+
+- DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
++ DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
+
+- if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) {
+- struct amdgpu_encoder *enc = atif->encoder_for_bl;
++ if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) {
++ struct amdgpu_encoder *enc = atif->encoder_for_bl;
+
+- if (enc) {
+- struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
++ if (enc) {
++ struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
+
+- DRM_DEBUG_DRIVER("Changing brightness to %d\n",
+- req.backlight_level);
++ DRM_DEBUG_DRIVER("Changing brightness to %d\n",
++ req.backlight_level);
+
+- amdgpu_display_backlight_set_level(adev, enc, req.backlight_level);
++ amdgpu_display_backlight_set_level(adev, enc, req.backlight_level);
+
+ #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+- backlight_force_update(dig->bl_dev,
+- BACKLIGHT_UPDATE_HOTKEY);
++ backlight_force_update(dig->bl_dev,
++ BACKLIGHT_UPDATE_HOTKEY);
+ #endif
++ }
+ }
+- }
+- if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
+- if ((adev->flags & AMD_IS_PX) &&
+- amdgpu_atpx_dgpu_req_power_for_displays()) {
+- pm_runtime_get_sync(adev->ddev->dev);
+- /* Just fire off a uevent and let userspace tell us what to do */
+- drm_helper_hpd_irq_event(adev->ddev);
+- pm_runtime_mark_last_busy(adev->ddev->dev);
+- pm_runtime_put_autosuspend(adev->ddev->dev);
++ if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
++ if ((adev->flags & AMD_IS_PX) &&
++ amdgpu_atpx_dgpu_req_power_for_displays()) {
++ pm_runtime_get_sync(adev->ddev->dev);
++ /* Just fire off a uevent and let userspace tell us what to do */
++ drm_helper_hpd_irq_event(adev->ddev);
++ pm_runtime_mark_last_busy(adev->ddev->dev);
++ pm_runtime_put_autosuspend(adev->ddev->dev);
++ }
+ }
++ /* TODO: check other events */
+ }
+- /* TODO: check other events */
+
+ /* We've handled the event, stop the notifier chain. The ACPI interface
+ * overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4989-drm-amdgpu-acpi-skip-backlight-events-for-DC.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4989-drm-amdgpu-acpi-skip-backlight-events-for-DC.patch
new file mode 100644
index 00000000..82781288
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4989-drm-amdgpu-acpi-skip-backlight-events-for-DC.patch
@@ -0,0 +1,31 @@
+From a2b4e24d7f9bb69d165395e4d367608d6acb765b Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 19 Jul 2018 09:28:23 -0500
+Subject: [PATCH 4989/5725] drm/amdgpu/acpi: skip backlight events for DC
+
+No change in behavior, just bail sooner.
+
+Reviewed-by: Jim Qu <Jim.Qu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+index 4556178..3539932 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+@@ -389,7 +389,9 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
+
+ DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
+
+- if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) {
++ /* todo: add DC handling */
++ if ((req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) &&
++ !amdgpu_device_has_dc_support(adev)) {
+ struct amdgpu_encoder *enc = atif->encoder_for_bl;
+
+ if (enc) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4990-drm-amdgpu-split-ip-suspend-into-2-phases.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4990-drm-amdgpu-split-ip-suspend-into-2-phases.patch
new file mode 100644
index 00000000..f17967ef
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4990-drm-amdgpu-split-ip-suspend-into-2-phases.patch
@@ -0,0 +1,129 @@
+From d50d606afb8d7e95f44bae0c617fbbc4c5a426ba Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 19 Jul 2018 13:10:07 -0500
+Subject: [PATCH 4990/5725] drm/amdgpu: split ip suspend into 2 phases
+
+We need to do some IPs earlier to deal with ordering issues
+similar to how resume is split into two phases. Do DCE first
+to deal with atomic, then do the rest.
+
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-and-tested-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 78 +++++++++++++++++++++++++++++-
+ 1 file changed, 76 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 8c85387..c4a9156 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1975,7 +1975,7 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
+ }
+
+ /**
+- * amdgpu_device_ip_suspend - run suspend for hardware IPs
++ * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
+ *
+ * @adev: amdgpu_device pointer
+ *
+@@ -1985,7 +1985,55 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
+ * in each IP into a state suitable for suspend.
+ * Returns 0 on success, negative error code on failure.
+ */
+-int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
++static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
++{
++ int i, r;
++
++ if (amdgpu_sriov_vf(adev))
++ amdgpu_virt_request_full_gpu(adev, false);
++
++ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
++ if (!adev->ip_blocks[i].status.valid)
++ continue;
++ /* displays are handled separately */
++ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
++ /* ungate blocks so that suspend can properly shut them down */
++ if (adev->ip_blocks[i].version->funcs->set_clockgating_state) {
++ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
++ AMD_CG_STATE_UNGATE);
++ if (r) {
++ DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
++ adev->ip_blocks[i].version->funcs->name, r);
++ }
++ }
++ /* XXX handle errors */
++ r = adev->ip_blocks[i].version->funcs->suspend(adev);
++ /* XXX handle errors */
++ if (r) {
++ DRM_ERROR("suspend of IP block <%s> failed %d\n",
++ adev->ip_blocks[i].version->funcs->name, r);
++ }
++ }
++ }
++
++ if (amdgpu_sriov_vf(adev))
++ amdgpu_virt_release_full_gpu(adev, false);
++
++ return 0;
++}
++
++/**
++ * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Main suspend function for hardware IPs. The list of all the hardware
++ * IPs that make up the asic is walked, clockgating is disabled and the
++ * suspend callbacks are run. suspend puts the hardware and software state
++ * in each IP into a state suitable for suspend.
++ * Returns 0 on success, negative error code on failure.
++ */
++static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
+ {
+ int i, r;
+
+@@ -2006,6 +2054,9 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
++ /* displays are handled in phase1 */
++ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
++ continue;
+ /* ungate blocks so that suspend can properly shut them down */
+ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC &&
+ adev->ip_blocks[i].version->funcs->set_clockgating_state) {
+@@ -2031,6 +2082,29 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
+ return 0;
+ }
+
++/**
++ * amdgpu_device_ip_suspend - run suspend for hardware IPs
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Main suspend function for hardware IPs. The list of all the hardware
++ * IPs that make up the asic is walked, clockgating is disabled and the
++ * suspend callbacks are run. suspend puts the hardware and software state
++ * in each IP into a state suitable for suspend.
++ * Returns 0 on success, negative error code on failure.
++ */
++int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
++{
++ int r;
++
++ r = amdgpu_device_ip_suspend_phase1(adev);
++ if (r)
++ return r;
++ r = amdgpu_device_ip_suspend_phase2(adev);
++
++ return r;
++}
++
+ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
+ {
+ int i, r;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4991-drm-amdgpu-rework-suspend-and-resume-to-deal-with-at.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4991-drm-amdgpu-rework-suspend-and-resume-to-deal-with-at.patch
new file mode 100644
index 00000000..f82ad169
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4991-drm-amdgpu-rework-suspend-and-resume-to-deal-with-at.patch
@@ -0,0 +1,140 @@
+From 816b67e0c73c376811c869fe8271ed725be83385 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 19 Jul 2018 13:24:33 -0500
+Subject: [PATCH 4991/5725] drm/amdgpu: rework suspend and resume to deal with
+ atomic changes
+
+Use the newly split ip suspend functions to do suspend displays
+first (to deal with atomic so that FBs can be unpinned before
+attempting to evict vram), then evict vram, then suspend the
+other IPs. Also move the non-DC pinning code to only be
+called in the non-DC cases since atomic should take care of
+DC.
+
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107065
+Fixes: e00fb85 drm: Stop updating plane->crtc/fb/old_fb on atomic drivers
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-and-tested-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 86 ++++++++++++++++--------------
+ 1 file changed, 45 insertions(+), 41 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index c4a9156..5c128c5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2761,44 +2761,46 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ }
+ drm_modeset_unlock_all(dev);
+- }
+-
+- amdgpu_amdkfd_suspend(adev);
+-
+- /* unpin the front buffers and cursors */
+- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+- struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
+- struct amdgpu_bo *robj;
+-
+- if (amdgpu_crtc->cursor_bo) {
+- struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+- r = amdgpu_bo_reserve(aobj, true);
+- if (r == 0) {
+- amdgpu_bo_unpin(aobj);
+- amdgpu_bo_unreserve(aobj);
++ /* unpin the front buffers and cursors */
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
++ struct drm_framebuffer *fb = crtc->primary->fb;
++ struct amdgpu_bo *robj;
++
++ if (amdgpu_crtc->cursor_bo) {
++ struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
++ r = amdgpu_bo_reserve(aobj, true);
++ if (r == 0) {
++ amdgpu_bo_unpin(aobj);
++ amdgpu_bo_unreserve(aobj);
++ }
+ }
+- }
+
+- if (rfb == NULL || rfb->obj == NULL) {
+- continue;
+- }
+- robj = gem_to_amdgpu_bo(rfb->obj);
+- /* don't unpin kernel fb objects */
+- if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
+- r = amdgpu_bo_reserve(robj, true);
+- if (r == 0) {
+- amdgpu_bo_unpin(robj);
+- amdgpu_bo_unreserve(robj);
++ if (fb == NULL || fb->obj[0] == NULL) {
++ continue;
++ }
++ robj = gem_to_amdgpu_bo(fb->obj[0]);
++ /* don't unpin kernel fb objects */
++ if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
++ r = amdgpu_bo_reserve(robj, true);
++ if (r == 0) {
++ amdgpu_bo_unpin(robj);
++ amdgpu_bo_unreserve(robj);
++ }
+ }
+ }
+ }
++
++ amdgpu_amdkfd_suspend(adev);
++
++ r = amdgpu_device_ip_suspend_phase1(adev);
++
+ /* evict vram memory */
+ amdgpu_bo_evict_vram(adev);
+
+ amdgpu_fence_driver_suspend(adev);
+
+- r = amdgpu_device_ip_suspend(adev);
++ r = amdgpu_device_ip_suspend_phase2(adev);
+
+ /* evict remaining vram memory
+ * This second call to evict vram is to evict the gart page table
+@@ -2872,19 +2874,21 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
+ if (r)
+ return r;
+
+- /* pin cursors */
+- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+-
+- if (amdgpu_crtc->cursor_bo) {
+- struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+- r = amdgpu_bo_reserve(aobj, true);
+- if (r == 0) {
+- r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
+- if (r != 0)
+- DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
+- amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
+- amdgpu_bo_unreserve(aobj);
++ if (!amdgpu_device_has_dc_support(adev)) {
++ /* pin cursors */
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
++
++ if (amdgpu_crtc->cursor_bo) {
++ struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
++ r = amdgpu_bo_reserve(aobj, true);
++ if (r == 0) {
++ r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
++ if (r != 0)
++ DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
++ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
++ amdgpu_bo_unreserve(aobj);
++ }
+ }
+ }
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4992-drm-amdgpu-Fix-RLC-safe-mode-test-in-gfx_v9_0_enter_.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4992-drm-amdgpu-Fix-RLC-safe-mode-test-in-gfx_v9_0_enter_.patch
new file mode 100644
index 00000000..1d3d3649
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4992-drm-amdgpu-Fix-RLC-safe-mode-test-in-gfx_v9_0_enter_.patch
@@ -0,0 +1,42 @@
+From 0e3792809c47d3d8c416045fd11546d3449d1650 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
+Date: Thu, 19 Jul 2018 18:33:39 +0200
+Subject: [PATCH 4992/5725] drm/amdgpu: Fix RLC safe mode test in
+ gfx_v9_0_enter_rlc_safe_mode
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+We were testing the register offset, instead of the value stored in the
+register, therefore always timing out the loop.
+
+This reduces suspend time of the system in the bug report below by ~600
+ms.
+
+Cc: stable@vger.kernel.org
+Bugzilla: https://bugs.freedesktop.org/107277
+Tested-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 39b229e..670a564 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -3495,7 +3495,7 @@ static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
+
+ /* wait for RLC_SAFE_MODE */
+ for (i = 0; i < adev->usec_timeout; i++) {
+- if (!REG_GET_FIELD(SOC15_REG_OFFSET(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
++ if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+ break;
+ udelay(1);
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4993-drm-amd-powerplay-slow-UCLK-switch-when-multiple-dis.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4993-drm-amd-powerplay-slow-UCLK-switch-when-multiple-dis.patch
new file mode 100644
index 00000000..a6b6155c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4993-drm-amd-powerplay-slow-UCLK-switch-when-multiple-dis.patch
@@ -0,0 +1,33 @@
+From e5d3dbcfcab546ece75295a90c23e6cd45fd1229 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Fri, 20 Jul 2018 10:13:19 +0800
+Subject: [PATCH 4993/5725] drm/amd/powerplay: slow UCLK switch when multiple
+ displays not in sync
+
+Slow switch for UCLK when there is multiple displays and they are
+not in sync.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Rex Zhu <rex.zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+index ca4e1d9..1ef9ab4 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+@@ -3799,7 +3799,8 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
+ uint32_t i;
+ struct pp_display_clock_request clock_req;
+
+- if (hwmgr->display_config->num_display > 1)
++ if ((hwmgr->display_config->num_display > 1) &&
++ !hwmgr->display_config->multi_monitor_in_sync)
+ vega10_notify_smc_display_change(hwmgr, false);
+ else
+ vega10_notify_smc_display_change(hwmgr, true);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4994-drm-amd-powerplay-correct-the-argument-for-PPSMC_MSG.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4994-drm-amd-powerplay-correct-the-argument-for-PPSMC_MSG.patch
new file mode 100644
index 00000000..31509925
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4994-drm-amd-powerplay-correct-the-argument-for-PPSMC_MSG.patch
@@ -0,0 +1,46 @@
+From 48dccd684c2afe6272537e706d5dee3bd7b01d1a Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 16 Jul 2018 17:23:19 +0800
+Subject: [PATCH 4994/5725] drm/amd/powerplay: correct the argument for
+ PPSMC_MSG_SetUclkFastSwitch
+
+The argument was set wrongly. Fast/slow switch was asked when there is
+actually a slow/fast switch needed.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Rex Zhu <rex.zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 2 +-
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+index 1ef9ab4..9e2281e 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+@@ -3724,7 +3724,7 @@ static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
+ {
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetUclkFastSwitch,
+- has_disp ? 0 : 1);
++ has_disp ? 1 : 0);
+ }
+
+ int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index 4ed218d..35f96da 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -1334,7 +1334,7 @@ static int vega12_notify_smc_display_change(struct pp_hwmgr *hwmgr,
+ if (data->smu_features[GNLD_DPM_UCLK].enabled)
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetUclkFastSwitch,
+- has_disp ? 0 : 1);
++ has_disp ? 1 : 0);
+
+ return 0;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4995-drm-amd-powerplay-allow-slow-switch-only-if-NBPState.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4995-drm-amd-powerplay-allow-slow-switch-only-if-NBPState.patch
new file mode 100644
index 00000000..7e700637
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4995-drm-amd-powerplay-allow-slow-switch-only-if-NBPState.patch
@@ -0,0 +1,49 @@
+From 0420615a56459fd812748ee971ebc1004728084f Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 16 Jul 2018 17:25:30 +0800
+Subject: [PATCH 4995/5725] drm/amd/powerplay: allow slow switch only if
+ NBPState enabled v2
+
+Otherwise there may be potential SMU performance issues.
+
+v2: fix commit description and coding style
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Rex Zhu <rex.zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 3 ++-
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 3 ++-
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+index 9e2281e..384d853 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+@@ -3800,7 +3800,8 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
+ struct pp_display_clock_request clock_req;
+
+ if ((hwmgr->display_config->num_display > 1) &&
+- !hwmgr->display_config->multi_monitor_in_sync)
++ !hwmgr->display_config->multi_monitor_in_sync &&
++ !hwmgr->display_config->nb_pstate_switch_disable)
+ vega10_notify_smc_display_change(hwmgr, false);
+ else
+ vega10_notify_smc_display_change(hwmgr, true);
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index 35f96da..0789d64 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -1389,7 +1389,8 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
+ struct pp_display_clock_request clock_req;
+
+ if ((hwmgr->display_config->num_display > 1) &&
+- !hwmgr->display_config->multi_monitor_in_sync)
++ !hwmgr->display_config->multi_monitor_in_sync &&
++ !hwmgr->display_config->nb_pstate_switch_disable)
+ vega12_notify_smc_display_change(hwmgr, false);
+ else
+ vega12_notify_smc_display_change(hwmgr, true);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4996-drm-amdgpu-Don-t-warn-on-destroying-a-pinned-BO.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4996-drm-amdgpu-Don-t-warn-on-destroying-a-pinned-BO.patch
new file mode 100644
index 00000000..945b9699
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4996-drm-amdgpu-Don-t-warn-on-destroying-a-pinned-BO.patch
@@ -0,0 +1,39 @@
+From a731e025852ecf95d69e790ed543fd00cde3a4bb Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
+Date: Thu, 19 Jul 2018 17:38:18 +0200
+Subject: [PATCH 4996/5725] drm/amdgpu: Don't warn on destroying a pinned BO
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The warning turned out to be not so useful, as BO destruction tends to
+be deferred to a workqueue.
+
+Also, we should be preventing any damage from this now, so not really
+important anymore to fix code doing this.
+
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Tested-by: Mike Lothian <mike@fireburn.co.uk>
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index aecee98..352d6b1 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -93,7 +93,7 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+ if (bo->tbo.mem.mem_type == AMDGPU_PL_DGMA_IMPORT)
+ kfree(tbo->mem.bus.addr);
+
+- if (WARN_ON_ONCE(bo->pin_count > 0))
++ if (bo->pin_count > 0)
+ amdgpu_bo_subtract_pin_size(bo);
+
+ if (bo->kfd_bo)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4997-drm-amdgpu-move-the-amdgpu_fbdev_set_suspend-further.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4997-drm-amdgpu-move-the-amdgpu_fbdev_set_suspend-further.patch
new file mode 100644
index 00000000..7dfd4094
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4997-drm-amdgpu-move-the-amdgpu_fbdev_set_suspend-further.patch
@@ -0,0 +1,49 @@
+From 929154f95f4c19079b4f5d9b768546c0efc5e703 Mon Sep 17 00:00:00 2001
+From: Shirish S <shirish.s@amd.com>
+Date: Mon, 23 Jul 2018 15:11:51 +0530
+Subject: [PATCH 4997/5725] drm/amdgpu: move the amdgpu_fbdev_set_suspend()
+ further up
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This patch moves amdgpu_fbdev_set_suspend() to the beginning
+of suspend sequence.
+
+This is to ensure fbcon does not to write to the VRAM
+after GPU is powerd down.
+
+Signed-off-by: Shirish S <shirish.s@amd.com>
+Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 5c128c5..6dcbe98 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2754,6 +2754,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
+
+ drm_kms_helper_poll_disable(dev);
+
++ if (fbcon)
++ amdgpu_fbdev_set_suspend(adev, 1);
++
+ if (!amdgpu_device_has_dc_support(adev)) {
+ /* turn off display hw */
+ drm_modeset_lock_all(dev);
+@@ -2819,9 +2822,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
+ DRM_ERROR("amdgpu asic reset failed\n");
+ }
+
+- if (fbcon)
+- amdgpu_fbdev_set_suspend(adev, 1);
+-
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4998-drm-amd-display-Remove-unnecessary-warning.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4998-drm-amd-display-Remove-unnecessary-warning.patch
new file mode 100644
index 00000000..26297431
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4998-drm-amd-display-Remove-unnecessary-warning.patch
@@ -0,0 +1,38 @@
+From 4e70f912cb5af747c25732d1376fc0a1ffd37e10 Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Wed, 4 Jul 2018 11:19:53 -0400
+Subject: [PATCH 4998/5725] drm/amd/display: Remove unnecessary warning
+
+[why]
+The warning message floods the dmesg log on Tonga even
+though it is expected to have a pix_clk set to zero,
+when the pipe is not active.
+[how]
+remove the assert
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+index ec32213..74c05e8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+@@ -149,10 +149,6 @@ static uint32_t get_max_pixel_clock_for_all_paths(
+ max_pix_clk =
+ pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
+ }
+-
+- if (max_pix_clk == 0)
+- ASSERT(0);
+-
+ return max_pix_clk;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4999-drm-amd-display-allow-diags-to-skip-initial-link-tra.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4999-drm-amd-display-allow-diags-to-skip-initial-link-tra.patch
new file mode 100644
index 00000000..c7344913
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4999-drm-amd-display-allow-diags-to-skip-initial-link-tra.patch
@@ -0,0 +1,94 @@
+From 5a3bf4ea4dca71cbba53b68925519a2f6f1f79cb Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Wed, 11 Jul 2018 15:31:24 -0400
+Subject: [PATCH 4999/5725] drm/amd/display: allow diags to skip initial link
+ training
+
+[why]
+diag specify what the full config and is only concerned about pass/fail at the end
+
+having inter-op code like verifiying we can actually train at reported link rate
+slows down diag test and add complexity we don't need
+
+[how]
+add dc_debug option to skip capability link trianing
+
+also remove hbr in function name as verify is not specific to hbr
+
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Ken Chalmers <ken.chalmers@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 7 ++++++-
+ drivers/gpu/drm/amd/display/dc/dc.h | 1 +
+ drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h | 2 +-
+ 4 files changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index a621467..838231e 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -760,7 +760,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+ */
+
+ /* deal with non-mst cases */
+- dp_hbr_verify_link_cap(link, &link->reported_link_cap);
++ dp_verify_link_cap(link, &link->reported_link_cap);
+ }
+
+ /* HDMI-DVI Dongle */
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index b34a694..bd40831 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -1086,7 +1086,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
+ return max_link_cap;
+ }
+
+-bool dp_hbr_verify_link_cap(
++bool dp_verify_link_cap(
+ struct dc_link *link,
+ struct dc_link_settings *known_limit_link_setting)
+ {
+@@ -1101,6 +1101,11 @@ bool dp_hbr_verify_link_cap(
+ enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
+ enum link_training_result status;
+
++ if (link->dc->debug.skip_detection_link_training) {
++ link->verified_link_cap = *known_limit_link_setting;
++ return true;
++ }
++
+ success = false;
+ skip_link_training = false;
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index cddc34f..7503dcf 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -259,6 +259,7 @@ struct dc_debug {
+ bool avoid_vbios_exec_table;
+ bool scl_reset_length10;
+ bool hdmi20_disable;
++ bool skip_detection_link_training;
+
+ struct {
+ uint32_t ltFailCount;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+index 2f783c6..697b5ee 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+@@ -33,7 +33,7 @@ struct dc_link;
+ struct dc_stream_state;
+ struct dc_link_settings;
+
+-bool dp_hbr_verify_link_cap(
++bool dp_verify_link_cap(
+ struct dc_link *link,
+ struct dc_link_settings *known_limit_link_setting);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5000-drm-amd-display-DPP-CM-ICSC-AYCRCB8888-format-suppor.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5000-drm-amd-display-DPP-CM-ICSC-AYCRCB8888-format-suppor.patch
new file mode 100644
index 00000000..78db8ed7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5000-drm-amd-display-DPP-CM-ICSC-AYCRCB8888-format-suppor.patch
@@ -0,0 +1,55 @@
+From 277a7c3dd97647f4ef25d704d795ffff4cd986c0 Mon Sep 17 00:00:00 2001
+From: vikrant mhaske <vikrant.mhaske@amd.com>
+Date: Thu, 12 Jul 2018 16:04:43 +0800
+Subject: [PATCH 5000/5725] drm/amd/display: DPP CM ICSC AYCRCB8888 format
+ support
+
+[why]
+Diags has POR to run the video workload using AYCRCB8888 through DCN;
+capture it through DWB and send it to VCN hardware to encode
+
+[how]
+added the code to support this format so that DPP ICSC will be able to
+convert it from YUV444 to internal RGB and DWB OCSC will be able to
+convert from internal RGB to YUV420
+
+Signed-off-by: vikrant mhaske <vikrant.mhaske@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc_hw_types.h | 2 +-
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 4 ++++
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+index e9c1ec5..bbc384f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+@@ -192,7 +192,7 @@ enum surface_pixel_format {
+ /*swaped & float*/
+ SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F,
+ /*grow graphics here if necessary */
+-
++ SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888,
+ SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
+ SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr =
+ SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+index 332354c..2138cd3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+@@ -294,6 +294,10 @@ void hubp1_program_pixel_format(
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 66);
+ break;
++ case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
++ REG_UPDATE(DCSURF_SURFACE_CONFIG,
++ SURFACE_PIXEL_FORMAT, 12);
++ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5001-drm-amd-display-Decouple-aux-from-i2c.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5001-drm-amd-display-Decouple-aux-from-i2c.patch
new file mode 100644
index 00000000..27ed2499
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5001-drm-amd-display-Decouple-aux-from-i2c.patch
@@ -0,0 +1,1949 @@
+From 2e972f9d807a5d585a1d05bea81e34cf21389ee4 Mon Sep 17 00:00:00 2001
+From: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Date: Tue, 10 Jul 2018 17:20:17 -0400
+Subject: [PATCH 5001/5725] drm/amd/display: Decouple aux from i2c
+
+[Why]
+Aux engine is created from i2caux layer. We want to remove this layer
+and use the engine directly.
+
+[How]
+Decouple aux engine from i2caux. Move aux engine related code to dce folder and use
+dc resource pool to manage the engine. And use the engine functions directly
+
+Signed-off-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c | 22 +-
+ drivers/gpu/drm/amd/display/dc/dce/Makefile | 2 +-
+ drivers/gpu/drm/amd/display/dc/dce/dce_aux.c | 942 +++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/dce/dce_aux.h | 111 +++
+ .../drm/amd/display/dc/dce100/dce100_resource.c | 42 +
+ .../drm/amd/display/dc/dce110/dce110_resource.c | 45 +
+ .../drm/amd/display/dc/dce112/dce112_resource.c | 47 +
+ .../drm/amd/display/dc/dce120/dce120_resource.c | 42 +
+ .../gpu/drm/amd/display/dc/dce80/dce80_resource.c | 44 +
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 44 +
+ drivers/gpu/drm/amd/display/dc/i2caux/engine.h | 1 +
+ drivers/gpu/drm/amd/display/dc/inc/core_types.h | 2 +-
+ drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h | 113 +++
+ drivers/gpu/drm/amd/display/dc/inc/hw/engine.h | 106 +++
+ 14 files changed, 1549 insertions(+), 14 deletions(-)
+ create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+ create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+ create mode 100644 drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
+ create mode 100644 drivers/gpu/drm/amd/display/dc/inc/hw/engine.h
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+index 08c9d73..4019fe07 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+@@ -33,10 +33,8 @@
+ #include "include/vector.h"
+ #include "core_types.h"
+ #include "dc_link_ddc.h"
+-#include "i2caux/engine.h"
+-#include "i2caux/i2c_engine.h"
+-#include "i2caux/aux_engine.h"
+-#include "i2caux/i2caux.h"
++#include "engine.h"
++#include "aux_engine.h"
+
+ #define AUX_POWER_UP_WA_DELAY 500
+ #define I2C_OVER_AUX_DEFER_WA_DELAY 70
+@@ -641,9 +639,9 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
+ enum aux_transaction_type type,
+ enum i2caux_transaction_action action)
+ {
+- struct i2caux *i2caux = ddc->ctx->i2caux;
+ struct ddc *ddc_pin = ddc->ddc_pin;
+- struct aux_engine *engine;
++ struct engine *engine;
++ struct aux_engine *aux_engine;
+ enum aux_channel_operation_result operation_result;
+ struct aux_request_transaction_data aux_req;
+ struct aux_reply_transaction_data aux_rep;
+@@ -654,7 +652,8 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
+ memset(&aux_req, 0, sizeof(aux_req));
+ memset(&aux_rep, 0, sizeof(aux_rep));
+
+- engine = i2caux->funcs->acquire_aux_engine(i2caux, ddc_pin);
++ engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
++ aux_engine = engine->funcs->acquire(engine, ddc_pin);
+
+ aux_req.type = type;
+ aux_req.action = action;
+@@ -664,15 +663,15 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
+ aux_req.length = size;
+ aux_req.data = buffer;
+
+- engine->funcs->submit_channel_request(engine, &aux_req);
+- operation_result = engine->funcs->get_channel_status(engine, &returned_bytes);
++ aux_engine->funcs->submit_channel_request(aux_engine, &aux_req);
++ operation_result = aux_engine->funcs->get_channel_status(aux_engine, &returned_bytes);
+
+ switch (operation_result) {
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+ res = returned_bytes;
+
+ if (res <= size && res >= 0)
+- res = engine->funcs->read_channel_reply(engine, size,
++ res = aux_engine->funcs->read_channel_reply(aux_engine, size,
+ buffer, reply,
+ &status);
+
+@@ -686,8 +685,7 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
+ res = -1;
+ break;
+ }
+-
+- i2caux->funcs->release_engine(i2caux, &engine->base);
++ aux_engine->base.funcs->release_engine(&aux_engine->base);
+ return res;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
+index 8abec0b..67b0852 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
+@@ -7,7 +7,7 @@
+
+ DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
+ dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
+-dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o
++dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o
+
+
+ AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+new file mode 100644
+index 0000000..b28e212
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+@@ -0,0 +1,942 @@
++/*
++ * Copyright 2012-15 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#include "dm_services.h"
++#include "dce_aux.h"
++#include "dce/dce_11_0_sh_mask.h"
++
++#define CTX \
++ aux110->base.base.ctx
++#define REG(reg_name)\
++ (aux110->regs->reg_name)
++
++#define DC_LOGGER \
++ engine->base.ctx->logger
++
++#include "reg_helper.h"
++
++#define FROM_AUX_ENGINE(ptr) \
++ container_of((ptr), struct aux_engine_dce110, base)
++
++#define FROM_ENGINE(ptr) \
++ FROM_AUX_ENGINE(container_of((ptr), struct aux_engine, base))
++
++#define FROM_AUX_ENGINE_ENGINE(ptr) \
++ container_of((ptr), struct aux_engine, base)
++enum {
++ AUX_INVALID_REPLY_RETRY_COUNTER = 1,
++ AUX_TIMED_OUT_RETRY_COUNTER = 2,
++ AUX_DEFER_RETRY_COUNTER = 6
++};
++static void release_engine(
++ struct engine *engine)
++{
++ struct aux_engine_dce110 *aux110 = FROM_ENGINE(engine);
++
++ dal_ddc_close(engine->ddc);
++
++ engine->ddc = NULL;
++
++ REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1);
++}
++
++#define SW_CAN_ACCESS_AUX 1
++#define DMCU_CAN_ACCESS_AUX 2
++
++static bool is_engine_available(
++ struct aux_engine *engine)
++{
++ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
++
++ uint32_t value = REG_READ(AUX_ARB_CONTROL);
++ uint32_t field = get_reg_field_value(
++ value,
++ AUX_ARB_CONTROL,
++ AUX_REG_RW_CNTL_STATUS);
++
++ return (field != DMCU_CAN_ACCESS_AUX);
++}
++static bool acquire_engine(
++ struct aux_engine *engine)
++{
++ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
++
++ uint32_t value = REG_READ(AUX_ARB_CONTROL);
++ uint32_t field = get_reg_field_value(
++ value,
++ AUX_ARB_CONTROL,
++ AUX_REG_RW_CNTL_STATUS);
++ if (field == DMCU_CAN_ACCESS_AUX)
++ return false;
++ /* enable AUX before request SW to access AUX */
++ value = REG_READ(AUX_CONTROL);
++ field = get_reg_field_value(value,
++ AUX_CONTROL,
++ AUX_EN);
++
++ if (field == 0) {
++ set_reg_field_value(
++ value,
++ 1,
++ AUX_CONTROL,
++ AUX_EN);
++
++ if (REG(AUX_RESET_MASK)) {
++ /*DP_AUX block as part of the enable sequence*/
++ set_reg_field_value(
++ value,
++ 1,
++ AUX_CONTROL,
++ AUX_RESET);
++ }
++
++ REG_WRITE(AUX_CONTROL, value);
++
++ if (REG(AUX_RESET_MASK)) {
++ /*poll HW to make sure reset it done*/
++
++ REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 1,
++ 1, 11);
++
++ set_reg_field_value(
++ value,
++ 0,
++ AUX_CONTROL,
++ AUX_RESET);
++
++ REG_WRITE(AUX_CONTROL, value);
++
++ REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 0,
++ 1, 11);
++ }
++ } /*if (field)*/
++
++ /* request SW to access AUX */
++ REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, 1);
++
++ value = REG_READ(AUX_ARB_CONTROL);
++ field = get_reg_field_value(
++ value,
++ AUX_ARB_CONTROL,
++ AUX_REG_RW_CNTL_STATUS);
++
++ return (field == SW_CAN_ACCESS_AUX);
++}
++
++#define COMPOSE_AUX_SW_DATA_16_20(command, address) \
++ ((command) | ((0xF0000 & (address)) >> 16))
++
++#define COMPOSE_AUX_SW_DATA_8_15(address) \
++ ((0xFF00 & (address)) >> 8)
++
++#define COMPOSE_AUX_SW_DATA_0_7(address) \
++ (0xFF & (address))
++
++static void submit_channel_request(
++ struct aux_engine *engine,
++ struct aux_request_transaction_data *request)
++{
++ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
++ uint32_t value;
++ uint32_t length;
++
++ bool is_write =
++ ((request->type == AUX_TRANSACTION_TYPE_DP) &&
++ (request->action == I2CAUX_TRANSACTION_ACTION_DP_WRITE)) ||
++ ((request->type == AUX_TRANSACTION_TYPE_I2C) &&
++ ((request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
++ (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT)));
++ if (REG(AUXN_IMPCAL)) {
++ /* clear_aux_error */
++ REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
++ 1,
++ 0);
++
++ REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
++ 1,
++ 0);
++
++ /* force_default_calibrate */
++ REG_UPDATE_1BY1_2(AUXN_IMPCAL,
++ AUXN_IMPCAL_ENABLE, 1,
++ AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
++
++ /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
++
++ REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
++ 1,
++ 0);
++ }
++ /* set the delay and the number of bytes to write */
++
++ /* The length include
++ * the 4 bit header and the 20 bit address
++ * (that is 3 byte).
++ * If the requested length is non zero this means
++ * an addition byte specifying the length is required.
++ */
++
++ length = request->length ? 4 : 3;
++ if (is_write)
++ length += request->length;
++
++ REG_UPDATE_2(AUX_SW_CONTROL,
++ AUX_SW_START_DELAY, request->delay,
++ AUX_SW_WR_BYTES, length);
++
++ /* program action and address and payload data (if 'is_write') */
++ value = REG_UPDATE_4(AUX_SW_DATA,
++ AUX_SW_INDEX, 0,
++ AUX_SW_DATA_RW, 0,
++ AUX_SW_AUTOINCREMENT_DISABLE, 1,
++ AUX_SW_DATA, COMPOSE_AUX_SW_DATA_16_20(request->action, request->address));
++
++ value = REG_SET_2(AUX_SW_DATA, value,
++ AUX_SW_AUTOINCREMENT_DISABLE, 0,
++ AUX_SW_DATA, COMPOSE_AUX_SW_DATA_8_15(request->address));
++
++ value = REG_SET(AUX_SW_DATA, value,
++ AUX_SW_DATA, COMPOSE_AUX_SW_DATA_0_7(request->address));
++
++ if (request->length) {
++ value = REG_SET(AUX_SW_DATA, value,
++ AUX_SW_DATA, request->length - 1);
++ }
++
++ if (is_write) {
++ /* Load the HW buffer with the Data to be sent.
++ * This is relevant for write operation.
++ * For read, the data recived data will be
++ * processed in process_channel_reply().
++ */
++ uint32_t i = 0;
++
++ while (i < request->length) {
++ value = REG_SET(AUX_SW_DATA, value,
++ AUX_SW_DATA, request->data[i]);
++
++ ++i;
++ }
++ }
++
++ REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
++ REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
++ 10, aux110->timeout_period/10);
++ REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
++}
++
++static int read_channel_reply(struct aux_engine *engine, uint32_t size,
++ uint8_t *buffer, uint8_t *reply_result,
++ uint32_t *sw_status)
++{
++ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
++ uint32_t bytes_replied;
++ uint32_t reply_result_32;
++
++ *sw_status = REG_GET(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT,
++ &bytes_replied);
++
++ /* In case HPD is LOW, exit AUX transaction */
++ if ((*sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
++ return -1;
++
++ /* Need at least the status byte */
++ if (!bytes_replied)
++ return -1;
++
++ REG_UPDATE_1BY1_3(AUX_SW_DATA,
++ AUX_SW_INDEX, 0,
++ AUX_SW_AUTOINCREMENT_DISABLE, 1,
++ AUX_SW_DATA_RW, 1);
++
++ REG_GET(AUX_SW_DATA, AUX_SW_DATA, &reply_result_32);
++ reply_result_32 = reply_result_32 >> 4;
++ *reply_result = (uint8_t)reply_result_32;
++
++ if (reply_result_32 == 0) { /* ACK */
++ uint32_t i = 0;
++
++ /* First byte was already used to get the command status */
++ --bytes_replied;
++
++ /* Do not overflow buffer */
++ if (bytes_replied > size)
++ return -1;
++
++ while (i < bytes_replied) {
++ uint32_t aux_sw_data_val;
++
++ REG_GET(AUX_SW_DATA, AUX_SW_DATA, &aux_sw_data_val);
++ buffer[i] = aux_sw_data_val;
++ ++i;
++ }
++
++ return i;
++ }
++
++ return 0;
++}
++
++static void process_channel_reply(
++ struct aux_engine *engine,
++ struct aux_reply_transaction_data *reply)
++{
++ int bytes_replied;
++ uint8_t reply_result;
++ uint32_t sw_status;
++
++ bytes_replied = read_channel_reply(engine, reply->length, reply->data,
++ &reply_result, &sw_status);
++
++ /* in case HPD is LOW, exit AUX transaction */
++ if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
++ reply->status = AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
++ return;
++ }
++
++ if (bytes_replied < 0) {
++ /* Need to handle an error case...
++ * Hopefully, upper layer function won't call this function if
++ * the number of bytes in the reply was 0, because there was
++ * surely an error that was asserted that should have been
++ * handled for hot plug case, this could happens
++ */
++ if (!(sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
++ reply->status = AUX_TRANSACTION_REPLY_INVALID;
++ ASSERT_CRITICAL(false);
++ return;
++ }
++ } else {
++
++ switch (reply_result) {
++ case 0: /* ACK */
++ reply->status = AUX_TRANSACTION_REPLY_AUX_ACK;
++ break;
++ case 1: /* NACK */
++ reply->status = AUX_TRANSACTION_REPLY_AUX_NACK;
++ break;
++ case 2: /* DEFER */
++ reply->status = AUX_TRANSACTION_REPLY_AUX_DEFER;
++ break;
++ case 4: /* AUX ACK / I2C NACK */
++ reply->status = AUX_TRANSACTION_REPLY_I2C_NACK;
++ break;
++ case 8: /* AUX ACK / I2C DEFER */
++ reply->status = AUX_TRANSACTION_REPLY_I2C_DEFER;
++ break;
++ default:
++ reply->status = AUX_TRANSACTION_REPLY_INVALID;
++ }
++ }
++}
++
++static enum aux_channel_operation_result get_channel_status(
++ struct aux_engine *engine,
++ uint8_t *returned_bytes)
++{
++ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
++
++ uint32_t value;
++
++ if (returned_bytes == NULL) {
++ /*caller pass NULL pointer*/
++ ASSERT_CRITICAL(false);
++ return AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN;
++ }
++ *returned_bytes = 0;
++
++ /* poll to make sure that SW_DONE is asserted */
++ value = REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
++ 10, aux110->timeout_period/10);
++
++ /* in case HPD is LOW, exit AUX transaction */
++ if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
++ return AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
++
++ /* Note that the following bits are set in 'status.bits'
++ * during CTS 4.2.1.2 (FW 3.3.1):
++ * AUX_SW_RX_MIN_COUNT_VIOL, AUX_SW_RX_INVALID_STOP,
++ * AUX_SW_RX_RECV_NO_DET, AUX_SW_RX_RECV_INVALID_H.
++ *
++ * AUX_SW_RX_MIN_COUNT_VIOL is an internal,
++ * HW debugging bit and should be ignored.
++ */
++ if (value & AUX_SW_STATUS__AUX_SW_DONE_MASK) {
++ if ((value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK) ||
++ (value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK))
++ return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
++
++ else if ((value & AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK) ||
++ (value & AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK) ||
++ (value &
++ AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK) ||
++ (value & AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK))
++ return AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY;
++
++ *returned_bytes = get_reg_field_value(value,
++ AUX_SW_STATUS,
++ AUX_SW_REPLY_BYTE_COUNT);
++
++ if (*returned_bytes == 0)
++ return
++ AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY;
++ else {
++ *returned_bytes -= 1;
++ return AUX_CHANNEL_OPERATION_SUCCEEDED;
++ }
++ } else {
++ /*time_elapsed >= aux_engine->timeout_period
++ * AUX_SW_STATUS__AUX_SW_HPD_DISCON = at this point
++ */
++ ASSERT_CRITICAL(false);
++ return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
++ }
++}
++static void process_read_reply(
++ struct aux_engine *engine,
++ struct read_command_context *ctx)
++{
++ engine->funcs->process_channel_reply(engine, &ctx->reply);
++
++ switch (ctx->reply.status) {
++ case AUX_TRANSACTION_REPLY_AUX_ACK:
++ ctx->defer_retry_aux = 0;
++ if (ctx->returned_byte > ctx->current_read_length) {
++ ctx->status =
++ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
++ ctx->operation_succeeded = false;
++ } else if (ctx->returned_byte < ctx->current_read_length) {
++ ctx->current_read_length -= ctx->returned_byte;
++
++ ctx->offset += ctx->returned_byte;
++
++ ++ctx->invalid_reply_retry_aux_on_ack;
++
++ if (ctx->invalid_reply_retry_aux_on_ack >
++ AUX_INVALID_REPLY_RETRY_COUNTER) {
++ ctx->status =
++ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
++ ctx->operation_succeeded = false;
++ }
++ } else {
++ ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
++ ctx->transaction_complete = true;
++ ctx->operation_succeeded = true;
++ }
++ break;
++ case AUX_TRANSACTION_REPLY_AUX_NACK:
++ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
++ ctx->operation_succeeded = false;
++ break;
++ case AUX_TRANSACTION_REPLY_AUX_DEFER:
++ ++ctx->defer_retry_aux;
++
++ if (ctx->defer_retry_aux > AUX_DEFER_RETRY_COUNTER) {
++ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
++ ctx->operation_succeeded = false;
++ }
++ break;
++ case AUX_TRANSACTION_REPLY_I2C_DEFER:
++ ctx->defer_retry_aux = 0;
++
++ ++ctx->defer_retry_i2c;
++
++ if (ctx->defer_retry_i2c > AUX_DEFER_RETRY_COUNTER) {
++ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
++ ctx->operation_succeeded = false;
++ }
++ break;
++ case AUX_TRANSACTION_REPLY_HPD_DISCON:
++ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
++ ctx->operation_succeeded = false;
++ break;
++ default:
++ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
++ ctx->operation_succeeded = false;
++ }
++}
++static void process_read_request(
++ struct aux_engine *engine,
++ struct read_command_context *ctx)
++{
++ enum aux_channel_operation_result operation_result;
++
++ engine->funcs->submit_channel_request(engine, &ctx->request);
++
++ operation_result = engine->funcs->get_channel_status(
++ engine, &ctx->returned_byte);
++
++ switch (operation_result) {
++ case AUX_CHANNEL_OPERATION_SUCCEEDED:
++ if (ctx->returned_byte > ctx->current_read_length) {
++ ctx->status =
++ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
++ ctx->operation_succeeded = false;
++ } else {
++ ctx->timed_out_retry_aux = 0;
++ ctx->invalid_reply_retry_aux = 0;
++
++ ctx->reply.length = ctx->returned_byte;
++ ctx->reply.data = ctx->buffer;
++
++ process_read_reply(engine, ctx);
++ }
++ break;
++ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
++ ++ctx->invalid_reply_retry_aux;
++
++ if (ctx->invalid_reply_retry_aux >
++ AUX_INVALID_REPLY_RETRY_COUNTER) {
++ ctx->status =
++ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
++ ctx->operation_succeeded = false;
++ } else
++ udelay(400);
++ break;
++ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
++ ++ctx->timed_out_retry_aux;
++
++ if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
++ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
++ ctx->operation_succeeded = false;
++ } else {
++ /* DP 1.2a, table 2-58:
++ * "S3: AUX Request CMD PENDING:
++ * retry 3 times, with 400usec wait on each"
++ * The HW timeout is set to 550usec,
++ * so we should not wait here
++ */
++ }
++ break;
++ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
++ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
++ ctx->operation_succeeded = false;
++ break;
++ default:
++ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
++ ctx->operation_succeeded = false;
++ }
++}
++static bool read_command(
++ struct aux_engine *engine,
++ struct i2caux_transaction_request *request,
++ bool middle_of_transaction)
++{
++ struct read_command_context ctx;
++
++ ctx.buffer = request->payload.data;
++ ctx.current_read_length = request->payload.length;
++ ctx.offset = 0;
++ ctx.timed_out_retry_aux = 0;
++ ctx.invalid_reply_retry_aux = 0;
++ ctx.defer_retry_aux = 0;
++ ctx.defer_retry_i2c = 0;
++ ctx.invalid_reply_retry_aux_on_ack = 0;
++ ctx.transaction_complete = false;
++ ctx.operation_succeeded = true;
++
++ if (request->payload.address_space ==
++ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
++ ctx.request.type = AUX_TRANSACTION_TYPE_DP;
++ ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_READ;
++ ctx.request.address = request->payload.address;
++ } else if (request->payload.address_space ==
++ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
++ ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
++ ctx.request.action = middle_of_transaction ?
++ I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT :
++ I2CAUX_TRANSACTION_ACTION_I2C_READ;
++ ctx.request.address = request->payload.address >> 1;
++ } else {
++ /* in DAL2, there was no return in such case */
++ BREAK_TO_DEBUGGER();
++ return false;
++ }
++
++ ctx.request.delay = 0;
++
++ do {
++ memset(ctx.buffer + ctx.offset, 0, ctx.current_read_length);
++
++ ctx.request.data = ctx.buffer + ctx.offset;
++ ctx.request.length = ctx.current_read_length;
++
++ process_read_request(engine, &ctx);
++
++ request->status = ctx.status;
++
++ if (ctx.operation_succeeded && !ctx.transaction_complete)
++ if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
++ msleep(engine->delay);
++ } while (ctx.operation_succeeded && !ctx.transaction_complete);
++
++ if (request->payload.address_space ==
++ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
++ DC_LOG_I2C_AUX("READ: addr:0x%x value:0x%x Result:%d",
++ request->payload.address,
++ request->payload.data[0],
++ ctx.operation_succeeded);
++ }
++
++ return ctx.operation_succeeded;
++}
++
++static void process_write_reply(
++ struct aux_engine *engine,
++ struct write_command_context *ctx)
++{
++ engine->funcs->process_channel_reply(engine, &ctx->reply);
++
++ switch (ctx->reply.status) {
++ case AUX_TRANSACTION_REPLY_AUX_ACK:
++ ctx->operation_succeeded = true;
++
++ if (ctx->returned_byte) {
++ ctx->request.action = ctx->mot ?
++ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
++ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
++
++ ctx->current_write_length = 0;
++
++ ++ctx->ack_m_retry;
++
++ if (ctx->ack_m_retry > AUX_DEFER_RETRY_COUNTER) {
++ ctx->status =
++ I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
++ ctx->operation_succeeded = false;
++ } else
++ udelay(300);
++ } else {
++ ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
++ ctx->defer_retry_aux = 0;
++ ctx->ack_m_retry = 0;
++ ctx->transaction_complete = true;
++ }
++ break;
++ case AUX_TRANSACTION_REPLY_AUX_NACK:
++ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
++ ctx->operation_succeeded = false;
++ break;
++ case AUX_TRANSACTION_REPLY_AUX_DEFER:
++ ++ctx->defer_retry_aux;
++
++ if (ctx->defer_retry_aux > ctx->max_defer_retry) {
++ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
++ ctx->operation_succeeded = false;
++ }
++ break;
++ case AUX_TRANSACTION_REPLY_I2C_DEFER:
++ ctx->defer_retry_aux = 0;
++ ctx->current_write_length = 0;
++
++ ctx->request.action = ctx->mot ?
++ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
++ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
++
++ ++ctx->defer_retry_i2c;
++
++ if (ctx->defer_retry_i2c > ctx->max_defer_retry) {
++ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
++ ctx->operation_succeeded = false;
++ }
++ break;
++ case AUX_TRANSACTION_REPLY_HPD_DISCON:
++ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
++ ctx->operation_succeeded = false;
++ break;
++ default:
++ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
++ ctx->operation_succeeded = false;
++ }
++}
++static void process_write_request(
++ struct aux_engine *engine,
++ struct write_command_context *ctx)
++{
++ enum aux_channel_operation_result operation_result;
++
++ engine->funcs->submit_channel_request(engine, &ctx->request);
++
++ operation_result = engine->funcs->get_channel_status(
++ engine, &ctx->returned_byte);
++
++ switch (operation_result) {
++ case AUX_CHANNEL_OPERATION_SUCCEEDED:
++ ctx->timed_out_retry_aux = 0;
++ ctx->invalid_reply_retry_aux = 0;
++
++ ctx->reply.length = ctx->returned_byte;
++ ctx->reply.data = ctx->reply_data;
++
++ process_write_reply(engine, ctx);
++ break;
++ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
++ ++ctx->invalid_reply_retry_aux;
++
++ if (ctx->invalid_reply_retry_aux >
++ AUX_INVALID_REPLY_RETRY_COUNTER) {
++ ctx->status =
++ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
++ ctx->operation_succeeded = false;
++ } else
++ udelay(400);
++ break;
++ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
++ ++ctx->timed_out_retry_aux;
++
++ if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
++ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
++ ctx->operation_succeeded = false;
++ } else {
++ /* DP 1.2a, table 2-58:
++ * "S3: AUX Request CMD PENDING:
++ * retry 3 times, with 400usec wait on each"
++ * The HW timeout is set to 550usec,
++ * so we should not wait here
++ */
++ }
++ break;
++ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
++ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
++ ctx->operation_succeeded = false;
++ break;
++ default:
++ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
++ ctx->operation_succeeded = false;
++ }
++}
++static bool write_command(
++ struct aux_engine *engine,
++ struct i2caux_transaction_request *request,
++ bool middle_of_transaction)
++{
++ struct write_command_context ctx;
++
++ ctx.mot = middle_of_transaction;
++ ctx.buffer = request->payload.data;
++ ctx.current_write_length = request->payload.length;
++ ctx.timed_out_retry_aux = 0;
++ ctx.invalid_reply_retry_aux = 0;
++ ctx.defer_retry_aux = 0;
++ ctx.defer_retry_i2c = 0;
++ ctx.ack_m_retry = 0;
++ ctx.transaction_complete = false;
++ ctx.operation_succeeded = true;
++
++ if (request->payload.address_space ==
++ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
++ ctx.request.type = AUX_TRANSACTION_TYPE_DP;
++ ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_WRITE;
++ ctx.request.address = request->payload.address;
++ } else if (request->payload.address_space ==
++ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
++ ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
++ ctx.request.action = middle_of_transaction ?
++ I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT :
++ I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
++ ctx.request.address = request->payload.address >> 1;
++ } else {
++ /* in DAL2, there was no return in such case */
++ BREAK_TO_DEBUGGER();
++ return false;
++ }
++
++ ctx.request.delay = 0;
++
++ ctx.max_defer_retry =
++ (engine->max_defer_write_retry > AUX_DEFER_RETRY_COUNTER) ?
++ engine->max_defer_write_retry : AUX_DEFER_RETRY_COUNTER;
++
++ do {
++ ctx.request.data = ctx.buffer;
++ ctx.request.length = ctx.current_write_length;
++
++ process_write_request(engine, &ctx);
++
++ request->status = ctx.status;
++
++ if (ctx.operation_succeeded && !ctx.transaction_complete)
++ if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
++ msleep(engine->delay);
++ } while (ctx.operation_succeeded && !ctx.transaction_complete);
++
++ if (request->payload.address_space ==
++ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
++ DC_LOG_I2C_AUX("WRITE: addr:0x%x value:0x%x Result:%d",
++ request->payload.address,
++ request->payload.data[0],
++ ctx.operation_succeeded);
++ }
++
++ return ctx.operation_succeeded;
++}
++static bool end_of_transaction_command(
++ struct aux_engine *engine,
++ struct i2caux_transaction_request *request)
++{
++ struct i2caux_transaction_request dummy_request;
++ uint8_t dummy_data;
++
++ /* [tcheng] We only need to send the stop (read with MOT = 0)
++ * for I2C-over-Aux, not native AUX
++ */
++
++ if (request->payload.address_space !=
++ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C)
++ return false;
++
++ dummy_request.operation = request->operation;
++ dummy_request.payload.address_space = request->payload.address_space;
++ dummy_request.payload.address = request->payload.address;
++
++ /*
++ * Add a dummy byte due to some receiver quirk
++ * where one byte is sent along with MOT = 0.
++ * Ideally this should be 0.
++ */
++
++ dummy_request.payload.length = 0;
++ dummy_request.payload.data = &dummy_data;
++
++ if (request->operation == I2CAUX_TRANSACTION_READ)
++ return read_command(engine, &dummy_request, false);
++ else
++ return write_command(engine, &dummy_request, false);
++
++ /* according Syed, it does not need now DoDummyMOT */
++}
++bool submit_request(
++ struct engine *engine,
++ struct i2caux_transaction_request *request,
++ bool middle_of_transaction)
++{
++ struct aux_engine *aux_engine = FROM_AUX_ENGINE_ENGINE(engine);
++
++ bool result;
++ bool mot_used = true;
++
++ switch (request->operation) {
++ case I2CAUX_TRANSACTION_READ:
++ result = read_command(aux_engine, request, mot_used);
++ break;
++ case I2CAUX_TRANSACTION_WRITE:
++ result = write_command(aux_engine, request, mot_used);
++ break;
++ default:
++ result = false;
++ }
++
++ /* [tcheng]
++ * need to send stop for the last transaction to free up the AUX
++ * if the above command fails, this would be the last transaction
++ */
++
++ if (!middle_of_transaction || !result)
++ end_of_transaction_command(aux_engine, request);
++
++ /* mask AUX interrupt */
++
++ return result;
++}
++enum i2caux_engine_type get_engine_type(
++ const struct engine *engine)
++{
++ return I2CAUX_ENGINE_TYPE_AUX;
++}
++
++static struct aux_engine *acquire(
++ struct engine *engine,
++ struct ddc *ddc)
++{
++ struct aux_engine *aux_engine = FROM_AUX_ENGINE_ENGINE(engine);
++ enum gpio_result result;
++
++ if (aux_engine->funcs->is_engine_available) {
++ /*check whether SW could use the engine*/
++ if (!aux_engine->funcs->is_engine_available(aux_engine))
++ return NULL;
++ }
++
++ result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
++ GPIO_DDC_CONFIG_TYPE_MODE_AUX);
++
++ if (result != GPIO_RESULT_OK)
++ return NULL;
++
++ if (!aux_engine->funcs->acquire_engine(aux_engine)) {
++ dal_ddc_close(ddc);
++ return NULL;
++ }
++
++ engine->ddc = ddc;
++
++ return aux_engine;
++}
++
++static const struct aux_engine_funcs aux_engine_funcs = {
++ .acquire_engine = acquire_engine,
++ .submit_channel_request = submit_channel_request,
++ .process_channel_reply = process_channel_reply,
++ .read_channel_reply = read_channel_reply,
++ .get_channel_status = get_channel_status,
++ .is_engine_available = is_engine_available,
++};
++
++static const struct engine_funcs engine_funcs = {
++ .release_engine = release_engine,
++ .destroy_engine = dce110_engine_destroy,
++ .submit_request = submit_request,
++ .get_engine_type = get_engine_type,
++ .acquire = acquire,
++};
++
++void dce110_engine_destroy(struct engine **engine)
++{
++
++ struct aux_engine_dce110 *engine110 = FROM_ENGINE(*engine);
++
++ kfree(engine110);
++ *engine = NULL;
++
++}
++struct aux_engine *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110,
++ struct dc_context *ctx,
++ uint32_t inst,
++ uint32_t timeout_period,
++ const struct dce110_aux_registers *regs)
++{
++ aux_engine110->base.base.ddc = NULL;
++ aux_engine110->base.base.ctx = ctx;
++ aux_engine110->base.delay = 0;
++ aux_engine110->base.max_defer_write_retry = 0;
++ aux_engine110->base.base.funcs = &engine_funcs;
++ aux_engine110->base.funcs = &aux_engine_funcs;
++ aux_engine110->base.base.inst = inst;
++ aux_engine110->timeout_period = timeout_period;
++ aux_engine110->regs = regs;
++
++ return &aux_engine110->base;
++}
++
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+new file mode 100644
+index 0000000..c6b2aec
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+@@ -0,0 +1,111 @@
++/*
++ * Copyright 2012-15 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef __DAL_AUX_ENGINE_DCE110_H__
++#define __DAL_AUX_ENGINE_DCE110_H__
++#include "aux_engine.h"
++
++#define AUX_COMMON_REG_LIST(id)\
++ SRI(AUX_CONTROL, DP_AUX, id), \
++ SRI(AUX_ARB_CONTROL, DP_AUX, id), \
++ SRI(AUX_SW_DATA, DP_AUX, id), \
++ SRI(AUX_SW_CONTROL, DP_AUX, id), \
++ SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \
++ SRI(AUX_SW_STATUS, DP_AUX, id), \
++ SR(AUXN_IMPCAL), \
++ SR(AUXP_IMPCAL)
++
++struct dce110_aux_registers {
++ uint32_t AUX_CONTROL;
++ uint32_t AUX_ARB_CONTROL;
++ uint32_t AUX_SW_DATA;
++ uint32_t AUX_SW_CONTROL;
++ uint32_t AUX_INTERRUPT_CONTROL;
++ uint32_t AUX_SW_STATUS;
++ uint32_t AUXN_IMPCAL;
++ uint32_t AUXP_IMPCAL;
++
++ uint32_t AUX_RESET_MASK;
++};
++
++enum { /* This is the timeout as defined in DP 1.2a,
++ * 2.3.4 "Detailed uPacket TX AUX CH State Description".
++ */
++ AUX_TIMEOUT_PERIOD = 400,
++
++ /* Ideally, the SW timeout should be just above 550usec
++ * which is programmed in HW.
++ * But the SW timeout of 600usec is not reliable,
++ * because on some systems, delay_in_microseconds()
++ * returns faster than it should.
++ * EPR #379763: by trial-and-error on different systems,
++ * 700usec is the minimum reliable SW timeout for polling
++ * the AUX_SW_STATUS.AUX_SW_DONE bit.
++ * This timeout expires *only* when there is
++ * AUX Error or AUX Timeout conditions - not during normal operation.
++ * During normal operation, AUX_SW_STATUS.AUX_SW_DONE bit is set
++ * at most within ~240usec. That means,
++ * increasing this timeout will not affect normal operation,
++ * and we'll timeout after
++ * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec.
++ * This timeout is especially important for
++ * resume from S3 and CTS.
++ */
++ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4
++};
++struct aux_engine_dce110 {
++ struct aux_engine base;
++ const struct dce110_aux_registers *regs;
++ struct {
++ uint32_t aux_control;
++ uint32_t aux_arb_control;
++ uint32_t aux_sw_data;
++ uint32_t aux_sw_control;
++ uint32_t aux_interrupt_control;
++ uint32_t aux_sw_status;
++ } addr;
++ uint32_t timeout_period;
++};
++
++struct aux_engine_dce110_init_data {
++ uint32_t engine_id;
++ uint32_t timeout_period;
++ struct dc_context *ctx;
++ const struct dce110_aux_registers *regs;
++};
++
++struct aux_engine *dce110_aux_engine_construct(
++ struct aux_engine_dce110 *aux_engine110,
++ struct dc_context *ctx,
++ uint32_t inst,
++ uint32_t timeout_period,
++ const struct dce110_aux_registers *regs);
++
++void dce110_engine_destroy(struct engine **engine);
++
++bool dce110_aux_engine_acquire(
++ struct engine *aux_engine,
++ struct ddc *ddc);
++#endif
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+index ad8ad4e..c34c953 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+@@ -52,6 +52,7 @@
+ #include "dce/dce_10_0_sh_mask.h"
+
+ #include "dce/dce_dmcu.h"
++#include "dce/dce_aux.h"
+ #include "dce/dce_abm.h"
+
+ #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
+@@ -279,7 +280,20 @@ static const struct dce_opp_shift opp_shift = {
+ static const struct dce_opp_mask opp_mask = {
+ OPP_COMMON_MASK_SH_LIST_DCE_100(_MASK)
+ };
++#define aux_engine_regs(id)\
++[id] = {\
++ AUX_COMMON_REG_LIST(id), \
++ .AUX_RESET_MASK = 0 \
++}
+
++static const struct dce110_aux_registers aux_engine_regs[] = {
++ aux_engine_regs(0),
++ aux_engine_regs(1),
++ aux_engine_regs(2),
++ aux_engine_regs(3),
++ aux_engine_regs(4),
++ aux_engine_regs(5)
++};
+
+ #define audio_regs(id)\
+ [id] = {\
+@@ -572,6 +586,23 @@ struct output_pixel_processor *dce100_opp_create(
+ return &opp->base;
+ }
+
++struct engine *dce100_aux_engine_create(
++ struct dc_context *ctx,
++ uint32_t inst)
++{
++ struct aux_engine_dce110 *aux_engine =
++ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
++
++ if (!aux_engine)
++ return NULL;
++
++ dce110_aux_engine_construct(aux_engine, ctx, inst,
++ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
++ &aux_engine_regs[inst]);
++
++ return &aux_engine->base.base;
++}
++
+ struct clock_source *dce100_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+@@ -624,6 +655,10 @@ static void destruct(struct dce110_resource_pool *pool)
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
++
++ if (pool->base.engines[i] != NULL)
++ dce110_engine_destroy(&pool->base.engines[i]);
++
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+@@ -928,6 +963,13 @@ static bool construct(
+ "DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
++ pool->base.engines[i] = dce100_aux_engine_create(ctx, i);
++ if (pool->base.engines[i] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC:failed to create aux engine!!\n");
++ goto res_create_fail;
++ }
+ }
+
+ dc->caps.max_planes = pool->base.pipe_count;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+index 1c902e4..4a665a2 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+@@ -49,6 +49,7 @@
+ #include "dce/dce_clock_source.h"
+ #include "dce/dce_hwseq.h"
+ #include "dce110/dce110_hw_sequencer.h"
++#include "dce/dce_aux.h"
+ #include "dce/dce_abm.h"
+ #include "dce/dce_dmcu.h"
+
+@@ -306,6 +307,21 @@ static const struct dce_opp_mask opp_mask = {
+ OPP_COMMON_MASK_SH_LIST_DCE_110(_MASK)
+ };
+
++#define aux_engine_regs(id)\
++[id] = {\
++ AUX_COMMON_REG_LIST(id), \
++ .AUX_RESET_MASK = 0 \
++}
++
++static const struct dce110_aux_registers aux_engine_regs[] = {
++ aux_engine_regs(0),
++ aux_engine_regs(1),
++ aux_engine_regs(2),
++ aux_engine_regs(3),
++ aux_engine_regs(4),
++ aux_engine_regs(5)
++};
++
+ #define audio_regs(id)\
+ [id] = {\
+ AUD_COMMON_REG_LIST(id)\
+@@ -588,6 +604,23 @@ static struct output_pixel_processor *dce110_opp_create(
+ return &opp->base;
+ }
+
++struct engine *dce110_aux_engine_create(
++ struct dc_context *ctx,
++ uint32_t inst)
++{
++ struct aux_engine_dce110 *aux_engine =
++ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
++
++ if (!aux_engine)
++ return NULL;
++
++ dce110_aux_engine_construct(aux_engine, ctx, inst,
++ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
++ &aux_engine_regs[inst]);
++
++ return &aux_engine->base.base;
++}
++
+ struct clock_source *dce110_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+@@ -651,6 +684,10 @@ static void destruct(struct dce110_resource_pool *pool)
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
++
++ if (pool->base.engines[i] != NULL)
++ dce110_engine_destroy(&pool->base.engines[i]);
++
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+@@ -1258,6 +1295,14 @@ static bool construct(
+ "DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
++
++ pool->base.engines[i] = dce110_aux_engine_create(ctx, i);
++ if (pool->base.engines[i] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC:failed to create aux engine!!\n");
++ goto res_create_fail;
++ }
+ }
+
+ dc->fbc_compressor = dce110_compressor_create(ctx);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+index 30d5b32..caf90ae 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+@@ -49,6 +49,7 @@
+ #include "dce112/dce112_hw_sequencer.h"
+ #include "dce/dce_abm.h"
+ #include "dce/dce_dmcu.h"
++#include "dce/dce_aux.h"
+
+ #include "reg_helper.h"
+
+@@ -314,6 +315,21 @@ static const struct dce_opp_mask opp_mask = {
+ OPP_COMMON_MASK_SH_LIST_DCE_112(_MASK)
+ };
+
++#define aux_engine_regs(id)\
++[id] = {\
++ AUX_COMMON_REG_LIST(id), \
++ .AUX_RESET_MASK = 0 \
++}
++
++static const struct dce110_aux_registers aux_engine_regs[] = {
++ aux_engine_regs(0),
++ aux_engine_regs(1),
++ aux_engine_regs(2),
++ aux_engine_regs(3),
++ aux_engine_regs(4),
++ aux_engine_regs(5)
++};
++
+ #define audio_regs(id)\
+ [id] = {\
+ AUD_COMMON_REG_LIST(id)\
+@@ -588,6 +604,23 @@ struct output_pixel_processor *dce112_opp_create(
+ return &opp->base;
+ }
+
++struct engine *dce112_aux_engine_create(
++ struct dc_context *ctx,
++ uint32_t inst)
++{
++ struct aux_engine_dce110 *aux_engine =
++ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
++
++ if (!aux_engine)
++ return NULL;
++
++ dce110_aux_engine_construct(aux_engine, ctx, inst,
++ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
++ &aux_engine_regs[inst]);
++
++ return &aux_engine->base.base;
++}
++
+ struct clock_source *dce112_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+@@ -625,6 +658,9 @@ static void destruct(struct dce110_resource_pool *pool)
+ if (pool->base.opps[i] != NULL)
+ dce110_opp_destroy(&pool->base.opps[i]);
+
++ if (pool->base.engines[i] != NULL)
++ dce110_engine_destroy(&pool->base.engines[i]);
++
+ if (pool->base.transforms[i] != NULL)
+ dce112_transform_destroy(&pool->base.transforms[i]);
+
+@@ -640,6 +676,10 @@ static void destruct(struct dce110_resource_pool *pool)
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
++
++ if (pool->base.engines[i] != NULL)
++ dce110_engine_destroy(&pool->base.engines[i]);
++
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+@@ -1208,6 +1248,13 @@ static bool construct(
+ "DC:failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
++ pool->base.engines[i] = dce112_aux_engine_create(ctx, i);
++ if (pool->base.engines[i] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC:failed to create aux engine!!\n");
++ goto res_create_fail;
++ }
+ }
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+index 8381f27..e389832 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+@@ -53,6 +53,7 @@
+ #include "dce/dce_hwseq.h"
+ #include "dce/dce_abm.h"
+ #include "dce/dce_dmcu.h"
++#include "dce/dce_aux.h"
+
+ #include "dce/dce_12_0_offset.h"
+ #include "dce/dce_12_0_sh_mask.h"
+@@ -297,6 +298,20 @@ static const struct dce_opp_shift opp_shift = {
+ static const struct dce_opp_mask opp_mask = {
+ OPP_COMMON_MASK_SH_LIST_DCE_120(_MASK)
+ };
++ #define aux_engine_regs(id)\
++[id] = {\
++ AUX_COMMON_REG_LIST(id), \
++ .AUX_RESET_MASK = 0 \
++}
++
++static const struct dce110_aux_registers aux_engine_regs[] = {
++ aux_engine_regs(0),
++ aux_engine_regs(1),
++ aux_engine_regs(2),
++ aux_engine_regs(3),
++ aux_engine_regs(4),
++ aux_engine_regs(5)
++};
+
+ #define audio_regs(id)\
+ [id] = {\
+@@ -361,6 +376,22 @@ struct output_pixel_processor *dce120_opp_create(
+ ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+ }
++struct engine *dce120_aux_engine_create(
++ struct dc_context *ctx,
++ uint32_t inst)
++{
++ struct aux_engine_dce110 *aux_engine =
++ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
++
++ if (!aux_engine)
++ return NULL;
++
++ dce110_aux_engine_construct(aux_engine, ctx, inst,
++ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
++ &aux_engine_regs[inst]);
++
++ return &aux_engine->base.base;
++}
+
+ static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX)
+@@ -467,6 +498,10 @@ static void destruct(struct dce110_resource_pool *pool)
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
++
++ if (pool->base.engines[i] != NULL)
++ dce110_engine_destroy(&pool->base.engines[i]);
++
+ }
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+@@ -984,6 +1019,13 @@ static bool construct(
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ }
++ pool->base.engines[i] = dce120_aux_engine_create(ctx, i);
++ if (pool->base.engines[i] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC:failed to create aux engine!!\n");
++ goto res_create_fail;
++ }
+
+ /* check next valid pipe */
+ j++;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+index 2ac95ec..6fb33ad 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+@@ -54,6 +54,7 @@
+ #include "reg_helper.h"
+
+ #include "dce/dce_dmcu.h"
++#include "dce/dce_aux.h"
+ #include "dce/dce_abm.h"
+ /* TODO remove this include */
+
+@@ -298,6 +299,21 @@ static const struct dce_opp_mask opp_mask = {
+ OPP_COMMON_MASK_SH_LIST_DCE_80(_MASK)
+ };
+
++#define aux_engine_regs(id)\
++[id] = {\
++ AUX_COMMON_REG_LIST(id), \
++ .AUX_RESET_MASK = 0 \
++}
++
++static const struct dce110_aux_registers aux_engine_regs[] = {
++ aux_engine_regs(0),
++ aux_engine_regs(1),
++ aux_engine_regs(2),
++ aux_engine_regs(3),
++ aux_engine_regs(4),
++ aux_engine_regs(5)
++};
++
+ #define audio_regs(id)\
+ [id] = {\
+ AUD_COMMON_REG_LIST(id)\
+@@ -448,6 +464,23 @@ static struct output_pixel_processor *dce80_opp_create(
+ return &opp->base;
+ }
+
++struct engine *dce80_aux_engine_create(
++ struct dc_context *ctx,
++ uint32_t inst)
++{
++ struct aux_engine_dce110 *aux_engine =
++ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
++
++ if (!aux_engine)
++ return NULL;
++
++ dce110_aux_engine_construct(aux_engine, ctx, inst,
++ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
++ &aux_engine_regs[inst]);
++
++ return &aux_engine->base.base;
++}
++
+ static struct stream_encoder *dce80_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+@@ -655,6 +688,9 @@ static void destruct(struct dce110_resource_pool *pool)
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
++
++ if (pool->base.engines[i] != NULL)
++ dce110_engine_destroy(&pool->base.engines[i]);
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+@@ -899,6 +935,14 @@ static bool dce80_construct(
+ dm_error("DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
++
++ pool->base.engines[i] = dce80_aux_engine_create(ctx, i);
++ if (pool->base.engines[i] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC:failed to create aux engine!!\n");
++ goto res_create_fail;
++ }
+ }
+
+ dc->caps.max_planes = pool->base.pipe_count;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index f9246d4..3f793a3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -64,6 +64,7 @@
+ #include "reg_helper.h"
+ #include "dce/dce_abm.h"
+ #include "dce/dce_dmcu.h"
++#include "dce/dce_aux.h"
+
+ const struct _vcs_dpi_ip_params_st dcn1_0_ip = {
+ .rob_buffer_size_kbytes = 64,
+@@ -356,6 +357,21 @@ static const struct dcn10_opp_mask opp_mask = {
+ OPP_MASK_SH_LIST_DCN10(_MASK),
+ };
+
++#define aux_engine_regs(id)\
++[id] = {\
++ AUX_COMMON_REG_LIST(id), \
++ .AUX_RESET_MASK = 0 \
++}
++
++static const struct dce110_aux_registers aux_engine_regs[] = {
++ aux_engine_regs(0),
++ aux_engine_regs(1),
++ aux_engine_regs(2),
++ aux_engine_regs(3),
++ aux_engine_regs(4),
++ aux_engine_regs(5)
++};
++
+ #define tf_regs(id)\
+ [id] = {\
+ TF_REG_LIST_DCN10(id),\
+@@ -578,6 +594,23 @@ static struct output_pixel_processor *dcn10_opp_create(
+ return &opp->base;
+ }
+
++struct engine *dcn10_aux_engine_create(
++ struct dc_context *ctx,
++ uint32_t inst)
++{
++ struct aux_engine_dce110 *aux_engine =
++ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
++
++ if (!aux_engine)
++ return NULL;
++
++ dce110_aux_engine_construct(aux_engine, ctx, inst,
++ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
++ &aux_engine_regs[inst]);
++
++ return &aux_engine->base.base;
++}
++
+ static struct mpc *dcn10_mpc_create(struct dc_context *ctx)
+ {
+ struct dcn10_mpc *mpc10 = kzalloc(sizeof(struct dcn10_mpc),
+@@ -826,6 +859,9 @@ static void destruct(struct dcn10_resource_pool *pool)
+ kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
++
++ if (pool->base.engines[i] != NULL)
++ pool->base.engines[i]->funcs->destroy_engine(&pool->base.engines[i]);
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++)
+@@ -1257,6 +1293,14 @@ static bool construct(
+ goto fail;
+ }
+
++ pool->base.engines[i] = dcn10_aux_engine_create(ctx, i);
++ if (pool->base.engines[i] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC:failed to create aux engine!!\n");
++ goto fail;
++ }
++
+ /* check next valid pipe */
+ j++;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
+index 1e8a158..b16fb1f 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
+@@ -96,6 +96,7 @@ struct engine_funcs {
+
+ struct engine {
+ const struct engine_funcs *funcs;
++ uint32_t inst;
+ struct ddc *ddc;
+ struct dc_context *ctx;
+ };
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+index 816da02..3b7e9aa 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+@@ -138,7 +138,7 @@ struct resource_pool {
+ struct output_pixel_processor *opps[MAX_PIPES];
+ struct timing_generator *timing_generators[MAX_PIPES];
+ struct stream_encoder *stream_enc[MAX_PIPES * 2];
+-
++ struct engine *engines[MAX_PIPES];
+ struct hubbub *hubbub;
+ struct mpc *mpc;
+ struct pp_smu_funcs_rv *pp_smu;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
+new file mode 100644
+index 0000000..06d7e5d
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
+@@ -0,0 +1,113 @@
++/*
++ * Copyright 2012-15 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef __DAL_AUX_ENGINE_H__
++#define __DAL_AUX_ENGINE_H__
++
++#include "engine.h"
++#include "include/i2caux_interface.h"
++
++struct aux_engine;
++union aux_config;
++struct aux_engine_funcs {
++ void (*destroy)(
++ struct aux_engine **ptr);
++ bool (*acquire_engine)(
++ struct aux_engine *engine);
++ void (*configure)(
++ struct aux_engine *engine,
++ union aux_config cfg);
++ void (*submit_channel_request)(
++ struct aux_engine *engine,
++ struct aux_request_transaction_data *request);
++ void (*process_channel_reply)(
++ struct aux_engine *engine,
++ struct aux_reply_transaction_data *reply);
++ int (*read_channel_reply)(
++ struct aux_engine *engine,
++ uint32_t size,
++ uint8_t *buffer,
++ uint8_t *reply_result,
++ uint32_t *sw_status);
++ enum aux_channel_operation_result (*get_channel_status)(
++ struct aux_engine *engine,
++ uint8_t *returned_bytes);
++ bool (*is_engine_available)(struct aux_engine *engine);
++};
++struct engine;
++struct aux_engine {
++ struct engine base;
++ const struct aux_engine_funcs *funcs;
++ /* following values are expressed in milliseconds */
++ uint32_t delay;
++ uint32_t max_defer_write_retry;
++
++ bool acquire_reset;
++};
++struct read_command_context {
++ uint8_t *buffer;
++ uint32_t current_read_length;
++ uint32_t offset;
++ enum i2caux_transaction_status status;
++
++ struct aux_request_transaction_data request;
++ struct aux_reply_transaction_data reply;
++
++ uint8_t returned_byte;
++
++ uint32_t timed_out_retry_aux;
++ uint32_t invalid_reply_retry_aux;
++ uint32_t defer_retry_aux;
++ uint32_t defer_retry_i2c;
++ uint32_t invalid_reply_retry_aux_on_ack;
++
++ bool transaction_complete;
++ bool operation_succeeded;
++};
++struct write_command_context {
++ bool mot;
++
++ uint8_t *buffer;
++ uint32_t current_write_length;
++ enum i2caux_transaction_status status;
++
++ struct aux_request_transaction_data request;
++ struct aux_reply_transaction_data reply;
++
++ uint8_t returned_byte;
++
++ uint32_t timed_out_retry_aux;
++ uint32_t invalid_reply_retry_aux;
++ uint32_t defer_retry_aux;
++ uint32_t defer_retry_i2c;
++ uint32_t max_defer_retry;
++ uint32_t ack_m_retry;
++
++ uint8_t reply_data[DEFAULT_AUX_MAX_DATA_SIZE];
++
++ bool transaction_complete;
++ bool operation_succeeded;
++};
++#endif
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/engine.h
+new file mode 100644
+index 0000000..1f5476f
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/engine.h
+@@ -0,0 +1,106 @@
++/*
++ * Copyright 2012-15 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef __DAL_ENGINE_H__
++#define __DAL_ENGINE_H__
++
++#include "dc_ddc_types.h"
++
++enum i2caux_transaction_operation {
++ I2CAUX_TRANSACTION_READ,
++ I2CAUX_TRANSACTION_WRITE
++};
++
++enum i2caux_transaction_address_space {
++ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C = 1,
++ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD
++};
++
++struct i2caux_transaction_payload {
++ enum i2caux_transaction_address_space address_space;
++ uint32_t address;
++ uint32_t length;
++ uint8_t *data;
++};
++
++enum i2caux_transaction_status {
++ I2CAUX_TRANSACTION_STATUS_UNKNOWN = (-1L),
++ I2CAUX_TRANSACTION_STATUS_SUCCEEDED,
++ I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY,
++ I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT,
++ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR,
++ I2CAUX_TRANSACTION_STATUS_FAILED_NACK,
++ I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE,
++ I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION,
++ I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
++ I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW,
++ I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON
++};
++
++struct i2caux_transaction_request {
++ enum i2caux_transaction_operation operation;
++ struct i2caux_transaction_payload payload;
++ enum i2caux_transaction_status status;
++};
++
++enum i2caux_engine_type {
++ I2CAUX_ENGINE_TYPE_UNKNOWN = (-1L),
++ I2CAUX_ENGINE_TYPE_AUX,
++ I2CAUX_ENGINE_TYPE_I2C_DDC_HW,
++ I2CAUX_ENGINE_TYPE_I2C_GENERIC_HW,
++ I2CAUX_ENGINE_TYPE_I2C_SW
++};
++
++enum i2c_default_speed {
++ I2CAUX_DEFAULT_I2C_HW_SPEED = 50,
++ I2CAUX_DEFAULT_I2C_SW_SPEED = 50
++};
++
++struct engine;
++
++struct engine_funcs {
++ enum i2caux_engine_type (*get_engine_type)(
++ const struct engine *engine);
++ struct aux_engine* (*acquire)(
++ struct engine *engine,
++ struct ddc *ddc);
++ bool (*submit_request)(
++ struct engine *engine,
++ struct i2caux_transaction_request *request,
++ bool middle_of_transaction);
++ void (*release_engine)(
++ struct engine *engine);
++ void (*destroy_engine)(
++ struct engine **engine);
++};
++
++struct engine {
++ const struct engine_funcs *funcs;
++ uint32_t inst;
++ struct ddc *ddc;
++ struct dc_context *ctx;
++};
++
++#endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5002-drm-amd-display-separate-dc_debug-into-dc_debug_opti.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5002-drm-amd-display-separate-dc_debug-into-dc_debug_opti.patch
new file mode 100644
index 00000000..0fef3094
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5002-drm-amd-display-separate-dc_debug-into-dc_debug_opti.patch
@@ -0,0 +1,161 @@
+From 706ed1f09b2ceca82d44586420aebf2d8cd7f0fb Mon Sep 17 00:00:00 2001
+From: Jun Lei <Jun.Lei@amd.com>
+Date: Thu, 12 Jul 2018 10:35:01 -0400
+Subject: [PATCH 5002/5725] drm/amd/display: separate dc_debug into
+ dc_debug_options and dc_debug data
+
+[why]
+confusing as to which part of debug is informational, and which part causes behavioral change
+
+Signed-off-by: Jun Lei <Jun.Lei@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/dc.h | 19 +++++++++++--------
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 4 ++--
+ .../gpu/drm/amd/display/dc/dce120/dce120_resource.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 4 ++--
+ 6 files changed, 18 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+index 080f777..bd03932 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+@@ -676,7 +676,7 @@ static void hack_force_pipe_split(struct dcn_bw_internal_vars *v,
+ }
+
+ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
+- struct dc_debug *dbg,
++ struct dc_debug_options *dbg,
+ struct dc_state *context)
+ {
+ if (dbg->pipe_split_policy == MPC_SPLIT_AVOID)
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index bd40831..b9db6e6 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -1029,7 +1029,7 @@ enum link_training_result dc_link_dp_perform_link_training(
+ lt_settings.lane_settings[0].PRE_EMPHASIS);
+
+ if (status != LINK_TRAINING_SUCCESS)
+- link->ctx->dc->debug.debug_data.ltFailCount++;
++ link->ctx->dc->debug_data.ltFailCount++;
+
+ return status;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 7503dcf..2a3b327 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -208,7 +208,7 @@ struct dc_clocks {
+ int phyclk_khz;
+ };
+
+-struct dc_debug {
++struct dc_debug_options {
+ enum visual_confirm visual_confirm;
+ bool sanity_checks;
+ bool max_disp_clk;
+@@ -260,13 +260,15 @@ struct dc_debug {
+ bool scl_reset_length10;
+ bool hdmi20_disable;
+ bool skip_detection_link_training;
++};
+
+- struct {
+- uint32_t ltFailCount;
+- uint32_t i2cErrorCount;
+- uint32_t auxErrorCount;
+- } debug_data;
++struct dc_debug_data {
++ uint32_t ltFailCount;
++ uint32_t i2cErrorCount;
++ uint32_t auxErrorCount;
+ };
++
++
+ struct dc_state;
+ struct resource_pool;
+ struct dce_hwseq;
+@@ -275,8 +277,7 @@ struct dc {
+ struct dc_caps caps;
+ struct dc_cap_funcs cap_funcs;
+ struct dc_config config;
+- struct dc_debug debug;
+-
++ struct dc_debug_options debug;
+ struct dc_context *ctx;
+
+ uint8_t link_count;
+@@ -312,6 +313,8 @@ struct dc {
+
+ /* FBC compressor */
+ struct compressor *fbc_compressor;
++
++ struct dc_debug_data debug_data;
+ };
+
+ enum frame_buffer_mode {
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index 8f8a2ab..0db8d1d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -337,7 +337,7 @@ static int dce112_set_clock(
+
+ static void dce_clock_read_integrated_info(struct dce_dccg *clk_dce)
+ {
+- struct dc_debug *debug = &clk_dce->base.ctx->dc->debug;
++ struct dc_debug_options *debug = &clk_dce->base.ctx->dc->debug;
+ struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
+ struct integrated_info info = { { { 0 } } };
+ struct dc_firmware_info fw_info = { { 0 } };
+@@ -824,7 +824,7 @@ struct dccg *dce120_dccg_create(struct dc_context *ctx)
+ #ifdef CONFIG_X86
+ struct dccg *dcn1_dccg_create(struct dc_context *ctx)
+ {
+- struct dc_debug *debug = &ctx->dc->debug;
++ struct dc_debug_options *debug = &ctx->dc->debug;
+ struct dc_bios *bp = ctx->dc_bios;
+ struct dc_firmware_info fw_info = { { 0 } };
+ struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+index e389832..f7d02f2 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+@@ -404,7 +404,7 @@ static const struct resource_caps res_cap = {
+ .num_pll = 6,
+ };
+
+-static const struct dc_debug debug_defaults = {
++static const struct dc_debug_options debug_defaults = {
+ .disable_clock_gate = true,
+ };
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index 3f793a3..c39934f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -502,7 +502,7 @@ static const struct resource_caps res_cap = {
+ .num_pll = 4,
+ };
+
+-static const struct dc_debug debug_defaults_drv = {
++static const struct dc_debug_options debug_defaults_drv = {
+ .sanity_checks = true,
+ .disable_dmcu = true,
+ .force_abm_enable = false,
+@@ -530,7 +530,7 @@ static const struct dc_debug debug_defaults_drv = {
+ .max_downscale_src_width = 3840,
+ };
+
+-static const struct dc_debug debug_defaults_diags = {
++static const struct dc_debug_options debug_defaults_diags = {
+ .disable_dmcu = true,
+ .force_abm_enable = false,
+ .timing_trace = true,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5003-drm-amd-display-DC-3.1.58.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5003-drm-amd-display-DC-3.1.58.patch
new file mode 100644
index 00000000..2bdd3884
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5003-drm-amd-display-DC-3.1.58.patch
@@ -0,0 +1,29 @@
+From e0ba6d08616ea4ed89c2412d498d2bdc4f0b1b13 Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Mon, 9 Jul 2018 17:25:42 -0400
+Subject: [PATCH 5003/5725] drm/amd/display: DC 3.1.58
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 2a3b327..49b4da4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -38,7 +38,7 @@
+ #include "inc/compressor.h"
+ #include "dml/display_mode_lib.h"
+
+-#define DC_VER "3.1.56"
++#define DC_VER "3.1.58"
+
+ #define MAX_SURFACES 3
+ #define MAX_STREAMS 6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5004-drm-amdgpu-clean-up-coding-style-a-bit.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5004-drm-amdgpu-clean-up-coding-style-a-bit.patch
new file mode 100644
index 00000000..55f75b4c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5004-drm-amdgpu-clean-up-coding-style-a-bit.patch
@@ -0,0 +1,98 @@
+From 1db179cc9529049fdaa369e34149078a3dc703e8 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Wed, 18 Jul 2018 13:58:16 +0200
+Subject: [PATCH 5004/5725] drm/amdgpu: clean up coding style a bit
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+No need to bitcast a boolean and even if we should use "!!" instead.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index 4cd3317..e34c1b9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -342,35 +342,35 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ case AMDGPU_HW_IP_GFX:
+ type = AMD_IP_BLOCK_TYPE_GFX;
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+- ring_mask |= ((adev->gfx.gfx_ring[i].ready ? 1 : 0) << i);
++ ring_mask |= adev->gfx.gfx_ring[i].ready << i;
+ ib_start_alignment = 32;
+ ib_size_alignment = 32;
+ break;
+ case AMDGPU_HW_IP_COMPUTE:
+ type = AMD_IP_BLOCK_TYPE_GFX;
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+- ring_mask |= ((adev->gfx.compute_ring[i].ready ? 1 : 0) << i);
++ ring_mask |= adev->gfx.compute_ring[i].ready << i;
+ ib_start_alignment = 32;
+ ib_size_alignment = 32;
+ break;
+ case AMDGPU_HW_IP_DMA:
+ type = AMD_IP_BLOCK_TYPE_SDMA;
+ for (i = 0; i < adev->sdma.num_instances; i++)
+- ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i);
++ ring_mask |= adev->sdma.instance[i].ring.ready << i;
+ ib_start_alignment = 256;
+ ib_size_alignment = 4;
+ break;
+ case AMDGPU_HW_IP_UVD:
+ type = AMD_IP_BLOCK_TYPE_UVD;
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++)
+- ring_mask |= ((adev->uvd.inst[i].ring.ready ? 1 : 0) << i);
++ ring_mask |= adev->uvd.inst[i].ring.ready << i;
+ ib_start_alignment = 64;
+ ib_size_alignment = 64;
+ break;
+ case AMDGPU_HW_IP_VCE:
+ type = AMD_IP_BLOCK_TYPE_VCE;
+ for (i = 0; i < adev->vce.num_rings; i++)
+- ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
++ ring_mask |= adev->vce.ring[i].ready << i;
+ ib_start_alignment = 4;
+ ib_size_alignment = 1;
+ break;
+@@ -379,27 +379,27 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++)
+ for (j = 0; j < adev->uvd.num_enc_rings; j++)
+ ring_mask |=
+- ((adev->uvd.inst[i].ring_enc[j].ready ? 1 : 0) <<
+- (j + i * adev->uvd.num_enc_rings));
++ adev->uvd.inst[i].ring_enc[j].ready <<
++ (j + i * adev->uvd.num_enc_rings);
+ ib_start_alignment = 64;
+ ib_size_alignment = 64;
+ break;
+ case AMDGPU_HW_IP_VCN_DEC:
+ type = AMD_IP_BLOCK_TYPE_VCN;
+- ring_mask = adev->vcn.ring_dec.ready ? 1 : 0;
++ ring_mask = adev->vcn.ring_dec.ready;
+ ib_start_alignment = 16;
+ ib_size_alignment = 16;
+ break;
+ case AMDGPU_HW_IP_VCN_ENC:
+ type = AMD_IP_BLOCK_TYPE_VCN;
+ for (i = 0; i < adev->vcn.num_enc_rings; i++)
+- ring_mask |= ((adev->vcn.ring_enc[i].ready ? 1 : 0) << i);
++ ring_mask |= adev->vcn.ring_enc[i].ready << i;
+ ib_start_alignment = 64;
+ ib_size_alignment = 1;
+ break;
+ case AMDGPU_HW_IP_VCN_JPEG:
+ type = AMD_IP_BLOCK_TYPE_VCN;
+- ring_mask = adev->vcn.ring_jpeg.ready ? 1 : 0;
++ ring_mask = adev->vcn.ring_jpeg.ready;
+ ib_start_alignment = 16;
+ ib_size_alignment = 16;
+ break;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5005-drm-amdgpu-expose-only-the-first-UVD-instance-for-no.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5005-drm-amdgpu-expose-only-the-first-UVD-instance-for-no.patch
new file mode 100644
index 00000000..da3c8e5e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5005-drm-amdgpu-expose-only-the-first-UVD-instance-for-no.patch
@@ -0,0 +1,94 @@
+From 2333a46a57fecafb64f70a149c8fc5b2651decea Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Wed, 18 Jul 2018 14:17:59 +0200
+Subject: [PATCH 5005/5725] drm/amdgpu: expose only the first UVD instance for
+ now
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Going to completely rework the context to ring mapping with Nayan's GSoC
+work, but for now just stopping to expose the second UVD instance should
+do it.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 13 +++++--------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c | 9 ++-------
+ 2 files changed, 7 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index e34c1b9..5aab580 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -283,7 +283,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ struct drm_crtc *crtc;
+ uint32_t ui32 = 0;
+ uint64_t ui64 = 0;
+- int i, j, found;
++ int i, found;
+ int ui32_size = sizeof(ui32);
+
+ if (!info->return_size || !info->return_pointer)
+@@ -362,8 +362,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ break;
+ case AMDGPU_HW_IP_UVD:
+ type = AMD_IP_BLOCK_TYPE_UVD;
+- for (i = 0; i < adev->uvd.num_uvd_inst; i++)
+- ring_mask |= adev->uvd.inst[i].ring.ready << i;
++ ring_mask |= adev->uvd.inst[0].ring.ready;
+ ib_start_alignment = 64;
+ ib_size_alignment = 64;
+ break;
+@@ -376,11 +375,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ break;
+ case AMDGPU_HW_IP_UVD_ENC:
+ type = AMD_IP_BLOCK_TYPE_UVD;
+- for (i = 0; i < adev->uvd.num_uvd_inst; i++)
+- for (j = 0; j < adev->uvd.num_enc_rings; j++)
+- ring_mask |=
+- adev->uvd.inst[i].ring_enc[j].ready <<
+- (j + i * adev->uvd.num_enc_rings);
++ for (i = 0; i < adev->uvd.num_enc_rings; i++)
++ ring_mask |=
++ adev->uvd.inst[0].ring_enc[i].ready << i;
+ ib_start_alignment = 64;
+ ib_size_alignment = 64;
+ break;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+index ea9850c..d835729 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+@@ -66,8 +66,6 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
+ u32 ring,
+ struct amdgpu_ring **out_ring)
+ {
+- u32 instance;
+-
+ switch (mapper->hw_ip) {
+ case AMDGPU_HW_IP_GFX:
+ *out_ring = &adev->gfx.gfx_ring[ring];
+@@ -79,16 +77,13 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
+ *out_ring = &adev->sdma.instance[ring].ring;
+ break;
+ case AMDGPU_HW_IP_UVD:
+- instance = ring;
+- *out_ring = &adev->uvd.inst[instance].ring;
++ *out_ring = &adev->uvd.inst[0].ring;
+ break;
+ case AMDGPU_HW_IP_VCE:
+ *out_ring = &adev->vce.ring[ring];
+ break;
+ case AMDGPU_HW_IP_UVD_ENC:
+- instance = ring / adev->uvd.num_enc_rings;
+- *out_ring =
+- &adev->uvd.inst[instance].ring_enc[ring%adev->uvd.num_enc_rings];
++ *out_ring = &adev->uvd.inst[0].ring_enc[ring];
+ break;
+ case AMDGPU_HW_IP_VCN_DEC:
+ *out_ring = &adev->vcn.ring_dec;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5006-drm-amdgpu-consistenly-name-amdgpu_bo_-functions.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5006-drm-amdgpu-consistenly-name-amdgpu_bo_-functions.patch
new file mode 100644
index 00000000..37d4cec7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5006-drm-amdgpu-consistenly-name-amdgpu_bo_-functions.patch
@@ -0,0 +1,428 @@
+From 6cff8c430599dee4de77c77dc65fc06a244ef451 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 16 Jul 2018 16:12:24 +0200
+Subject: [PATCH 5006/5725] drm/amdgpu: consistenly name amdgpu_bo_ functions
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Just rename functions, no functional change.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 --
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 8 ++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 10 +++----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 34 ++++++++++-----------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 3 ++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 38 ++++++++++++------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 10 +++----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 4 +--
+ 10 files changed, 57 insertions(+), 56 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index d83a206..cbe4336 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1868,8 +1868,6 @@ void amdgpu_display_update_priority(struct amdgpu_device *adev);
+
+ void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
+ u64 num_vis_bytes);
+-void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
+-bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
+ void amdgpu_device_vram_location(struct amdgpu_device *adev,
+ struct amdgpu_gmc *mc, u64 base);
+ void amdgpu_device_gart_location(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 1c616bd..4c0f2a3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -328,7 +328,7 @@ static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
+ "Called with userptr BO"))
+ return -EINVAL;
+
+- amdgpu_ttm_placement_from_domain(bo, domain);
++ amdgpu_bo_placement_from_domain(bo, domain);
+
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+@@ -613,7 +613,7 @@ static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
+ pr_err("%s: Failed to reserve BO\n", __func__);
+ goto release_out;
+ }
+- amdgpu_ttm_placement_from_domain(bo, mem->domain);
++ amdgpu_bo_placement_from_domain(bo, mem->domain);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ pr_err("%s: failed to validate BO\n", __func__);
+@@ -1977,7 +1977,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
+
+ if (amdgpu_bo_reserve(bo, true))
+ return -EAGAIN;
+- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
++ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ amdgpu_bo_unreserve(bo);
+ if (ret) {
+@@ -2121,7 +2121,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
+ if (mem->user_pages[0]) {
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
+ mem->user_pages);
+- amdgpu_ttm_placement_from_domain(bo, mem->domain);
++ amdgpu_bo_placement_from_domain(bo, mem->domain);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret) {
+ pr_err("%s: failed to validate BO\n", __func__);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 4d68c37..15fa375 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -419,7 +419,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
+ }
+
+ retry:
+- amdgpu_ttm_placement_from_domain(bo, domain);
++ amdgpu_bo_placement_from_domain(bo, domain);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+
+ p->bytes_moved += ctx.bytes_moved;
+@@ -478,7 +478,7 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
+ update_bytes_moved_vis =
+ !amdgpu_gmc_vram_full_visible(&adev->gmc) &&
+ amdgpu_bo_in_cpu_visible_vram(bo);
+- amdgpu_ttm_placement_from_domain(bo, other);
++ amdgpu_bo_placement_from_domain(bo, other);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ p->bytes_moved += ctx.bytes_moved;
+ if (update_bytes_moved_vis)
+@@ -532,8 +532,8 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
+ /* Check if we have user pages and nobody bound the BO already */
+ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
+ lobj->user_pages) {
+- amdgpu_ttm_placement_from_domain(bo,
+- AMDGPU_GEM_DOMAIN_CPU);
++ amdgpu_bo_placement_from_domain(bo,
++ AMDGPU_GEM_DOMAIN_CPU);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (r)
+ return r;
+@@ -1657,7 +1657,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
+
+ if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
+ (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+- amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
++ amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
+ r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
+ if (r)
+ return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+index 868832e..6171e03 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+@@ -437,7 +437,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
+ if (r)
+ goto free_pages;
+
+- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
++ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ amdgpu_bo_unreserve(bo);
+ if (r)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 352d6b1..e61d122 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -51,7 +51,7 @@
+ *
+ */
+
+-static bool amdgpu_need_backup(struct amdgpu_device *adev)
++static bool amdgpu_bo_need_backup(struct amdgpu_device *adev)
+ {
+ if (adev->flags & AMD_IS_APU)
+ return false;
+@@ -84,7 +84,7 @@ static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
+ }
+ }
+
+-static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
++static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
+ {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
+@@ -115,7 +115,7 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+ }
+
+ /**
+- * amdgpu_ttm_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
++ * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
+ * @bo: buffer object to be checked
+ *
+ * Uses destroy function associated with the object to determine if this is
+@@ -124,22 +124,22 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+ * Returns:
+ * true if the object belongs to &amdgpu_bo, false if not.
+ */
+-bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
++bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
+ {
+- if (bo->destroy == &amdgpu_ttm_bo_destroy)
++ if (bo->destroy == &amdgpu_bo_destroy)
+ return true;
+ return false;
+ }
+
+ /**
+- * amdgpu_ttm_placement_from_domain - set buffer's placement
++ * amdgpu_bo_placement_from_domain - set buffer's placement
+ * @abo: &amdgpu_bo buffer object whose placement is to be set
+ * @domain: requested domain
+ *
+ * Sets buffer's placement according to requested domain and the buffer's
+ * flags.
+ */
+-void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
++void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
+ {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+ struct ttm_placement *placement = &abo->placement;
+@@ -511,13 +511,13 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
+ #endif
+
+ bo->tbo.bdev = &adev->mman.bdev;
+- amdgpu_ttm_placement_from_domain(bo, bp->domain);
++ amdgpu_bo_placement_from_domain(bo, bp->domain);
+ if (bp->type == ttm_bo_type_kernel)
+ bo->tbo.priority = 1;
+
+ r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
+ &bo->placement, page_align, &ctx, acc_size,
+- NULL, bp->resv, &amdgpu_ttm_bo_destroy);
++ NULL, bp->resv, &amdgpu_bo_destroy);
+
+ if (unlikely(r != 0))
+ return r;
+@@ -635,7 +635,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
+ if (r)
+ return r;
+
+- if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) {
++ if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_bo_need_backup(adev)) {
+ if (!bp->resv)
+ WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
+ NULL));
+@@ -723,7 +723,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
+ domain = bo->preferred_domains;
+
+ retry:
+- amdgpu_ttm_placement_from_domain(bo, domain);
++ amdgpu_bo_placement_from_domain(bo, domain);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
+ domain = bo->allowed_domains;
+@@ -956,7 +956,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
+ /* force to pin into visible video ram */
+ if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
+ bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+- amdgpu_ttm_placement_from_domain(bo, domain);
++ amdgpu_bo_placement_from_domain(bo, domain);
+ for (i = 0; i < bo->placement.num_placement; i++) {
+ unsigned fpfn, lpfn;
+
+@@ -1287,7 +1287,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
+ struct amdgpu_bo *abo;
+ struct ttm_mem_reg *old_mem = &bo->mem;
+
+- if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
++ if (!amdgpu_bo_is_amdgpu_bo(bo))
+ return;
+
+ abo = ttm_to_amdgpu_bo(bo);
+@@ -1304,7 +1304,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
+ return;
+
+ /* move_notify is called before move happens */
+- trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
++ trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
+ }
+
+ /**
+@@ -1326,7 +1326,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+ unsigned long offset, size;
+ int r;
+
+- if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
++ if (!amdgpu_bo_is_amdgpu_bo(bo))
+ return 0;
+
+ abo = ttm_to_amdgpu_bo(bo);
+@@ -1348,8 +1348,8 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+
+ /* hurrah the memory is not visible ! */
+ atomic64_inc(&adev->num_vram_cpu_page_faults);
+- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
+- AMDGPU_GEM_DOMAIN_GTT);
++ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
++ AMDGPU_GEM_DOMAIN_GTT);
+
+ /* Avoid costly evictions; only set GTT as a busy placement */
+ abo->placement.num_busy_placement = 1;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+index 2c4fca6..93c4325 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+@@ -242,6 +242,9 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
+ return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
+ }
+
++bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
++void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
++
+ int amdgpu_bo_create(struct amdgpu_device *adev,
+ struct amdgpu_bo_param *bp,
+ struct amdgpu_bo **bo_ptr);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+index 81d3788..e6ef562 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+@@ -309,31 +309,31 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
+ * 0 on success or negative error code.
+ */
+ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
+- enum dma_data_direction direction)
++ enum dma_data_direction direction)
+ {
+- struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
+- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+- struct ttm_operation_ctx ctx = { true, false };
++ struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
++ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
++ struct ttm_operation_ctx ctx = { true, false };
+ u32 domain = amdgpu_display_supported_domains(adev);
+- int ret;
+- bool reads = (direction == DMA_BIDIRECTIONAL ||
+- direction == DMA_FROM_DEVICE);
++ int ret;
++ bool reads = (direction == DMA_BIDIRECTIONAL ||
++ direction == DMA_FROM_DEVICE);
+
+- if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
+- return 0;
++ if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
++ return 0;
+
+- /* move to gtt */
+- ret = amdgpu_bo_reserve(bo, false);
+- if (unlikely(ret != 0))
+- return ret;
++ /* move to gtt */
++ ret = amdgpu_bo_reserve(bo, false);
++ if (unlikely(ret != 0))
++ return ret;
+
+- if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
+- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+- ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+- }
++ if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
++ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
++ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
++ }
+
+- amdgpu_bo_unreserve(bo);
+- return ret;
++ amdgpu_bo_unreserve(bo);
++ return ret;
+ }
+
+ const struct dma_buf_ops amdgpu_dmabuf_ops = {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+index 7692003..11f262f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+@@ -436,7 +436,7 @@ TRACE_EVENT(amdgpu_cs_bo_status,
+ __entry->total_bo, __entry->total_size)
+ );
+
+-TRACE_EVENT(amdgpu_ttm_bo_move,
++TRACE_EVENT(amdgpu_bo_move,
+ TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement),
+ TP_ARGS(bo, new_placement, old_placement),
+ TP_STRUCT__entry(
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 5adaefc..a164c52 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -265,7 +265,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
+ }
+
+ /* Object isn't an AMDGPU object so ignore */
+- if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
++ if (!amdgpu_bo_is_amdgpu_bo(bo)) {
+ placement->placement = &placements;
+ placement->busy_placement = &placements;
+ placement->num_placement = 1;
+@@ -279,7 +279,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
+ case AMDGPU_PL_DGMA:
+ if (!adev->mman.buffer_funcs_enabled) {
+ /* Move to system memory */
+- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
++ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+ } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
+ !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
+ amdgpu_bo_in_cpu_visible_vram(abo)) {
+@@ -289,7 +289,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
+ * BO will be evicted to GTT rather than causing other
+ * BOs to be evicted from VRAM
+ */
+- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
++ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT);
+ abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
+ abo->placements[0].lpfn = 0;
+@@ -297,13 +297,13 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
+ abo->placement.num_busy_placement = 1;
+ } else {
+ /* Move to GTT memory */
+- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
++ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
+ }
+ break;
+ case TTM_PL_TT:
+ case AMDGPU_PL_DGMA_IMPORT:
+ default:
+- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
++ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+ }
+ *placement = abo->placement;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index 3536198..420a533 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -478,7 +478,7 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
+ if (cmd == 0x0 || cmd == 0x3) {
+ /* yes, force it into VRAM */
+ uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
+- amdgpu_ttm_placement_from_domain(bo, domain);
++ amdgpu_bo_placement_from_domain(bo, domain);
+ }
+ amdgpu_uvd_force_into_uvd_segment(bo);
+
+@@ -1019,7 +1019,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
+ if (!ring->adev->uvd.address_64_bit) {
+ struct ttm_operation_ctx ctx = { true, false };
+
+- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
++ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
+ amdgpu_uvd_force_into_uvd_segment(bo);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (r)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5007-drm-amdgpu-reduce-the-number-of-placements-for-a-BO.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5007-drm-amdgpu-reduce-the-number-of-placements-for-a-BO.patch
new file mode 100644
index 00000000..3b6ee3e0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5007-drm-amdgpu-reduce-the-number-of-placements-for-a-BO.patch
@@ -0,0 +1,57 @@
+From caee5a48d469fe62e96416b75b03d9ed1fa6f695 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Wed, 18 Jul 2018 11:16:35 +0200
+Subject: [PATCH 5007/5725] drm/amdgpu: reduce the number of placements for a
+ BO
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Make struct amdgpu_bo a bit smaller.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 ++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 3 ++-
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index e61d122..e8f43d0 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -240,6 +240,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
+ if (flags & AMDGPU_GEM_CREATE_TOP_DOWN)
+ places[i].flags |= TTM_PL_FLAG_TOPDOWN;
+
++ BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
++
+ placement->num_placement = c;
+ placement->placement = places;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+index 93c4325..ae4d06c 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+@@ -32,6 +32,7 @@
+ #include "amdgpu.h"
+
+ #define AMDGPU_BO_INVALID_OFFSET LONG_MAX
++#define AMDGPU_BO_MAX_PLACEMENTS 3
+
+ struct amdgpu_bo_param {
+ unsigned long size;
+@@ -77,7 +78,7 @@ struct amdgpu_bo {
+ /* Protected by tbo.reserved */
+ u32 preferred_domains;
+ u32 allowed_domains;
+- struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
++ struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS];
+ struct ttm_placement placement;
+ struct ttm_buffer_object tbo;
+ struct ttm_bo_kmap_obj kmap;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5008-drm-amdgpu-gmc9-clarify-GPUVM-fault-error-message.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5008-drm-amdgpu-gmc9-clarify-GPUVM-fault-error-message.patch
new file mode 100644
index 00000000..989e776b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5008-drm-amdgpu-gmc9-clarify-GPUVM-fault-error-message.patch
@@ -0,0 +1,33 @@
+From 43b325ef0b087e4e910afac353f61760ab24e6d2 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 24 Jul 2018 11:52:58 -0500
+Subject: [PATCH 5008/5725] drm/amdgpu/gmc9: clarify GPUVM fault error message
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The address printed is the actual address, not the page.
+
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 5255647..68355b1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -268,7 +268,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
+ entry->src_id, entry->ring_id, entry->vmid,
+ entry->pasid, task_info.process_name, task_info.tgid,
+ task_info.task_name, task_info.pid);
+- dev_err(adev->dev, " at page 0x%016llx from %d\n",
++ dev_err(adev->dev, " at address 0x%016llx from %d\n",
+ addr, entry->client_id);
+ if (!amdgpu_sriov_vf(adev))
+ dev_err(adev->dev,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5009-gpu-drm-amdgpu-Replace-mdelay-with-msleep-in-cik_pci.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5009-gpu-drm-amdgpu-Replace-mdelay-with-msleep-in-cik_pci.patch
new file mode 100644
index 00000000..0031896c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5009-gpu-drm-amdgpu-Replace-mdelay-with-msleep-in-cik_pci.patch
@@ -0,0 +1,36 @@
+From 1bf50d381e7906fba5ee294ef921080425a6d35d Mon Sep 17 00:00:00 2001
+From: Jia-Ju Bai <baijiaju1990@gmail.com>
+Date: Mon, 23 Jul 2018 22:29:56 +0800
+Subject: [PATCH 5009/5725] gpu: drm: amdgpu: Replace mdelay with msleep in
+ cik_pcie_gen3_enable()
+
+cik_pcie_gen3_enable() is only called by cik_common_hw_init(), which is
+never called in atomic context.
+cik_pcie_gen3_enable() calls mdelay() to busily wait, which is not
+necessary.
+mdelay() can be replaced with msleep().
+
+This is found by a static analysis tool named DCNS written by myself.
+
+Signed-off-by: Jia-Ju Bai <baijiaju1990@gmail.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/cik.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
+index 702e257..78ab939 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cik.c
++++ b/drivers/gpu/drm/amd/amdgpu/cik.c
+@@ -1476,7 +1476,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
+ tmp |= PCIE_LC_CNTL4__LC_REDO_EQ_MASK;
+ WREG32_PCIE(ixPCIE_LC_CNTL4, tmp);
+
+- mdelay(100);
++ msleep(100);
+
+ /* linkctl */
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5010-drm-amdgpu-add-support-for-inplace-IB-patching-for-M.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5010-drm-amdgpu-add-support-for-inplace-IB-patching-for-M.patch
new file mode 100644
index 00000000..f0bdf355
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5010-drm-amdgpu-add-support-for-inplace-IB-patching-for-M.patch
@@ -0,0 +1,89 @@
+From 1d55956489a2094bd1295cb6125811d9ff5f8ee8 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 23 Jul 2018 16:01:39 +0200
+Subject: [PATCH 5010/5725] drm/amdgpu: add support for inplace IB patching for
+ MM engines v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+We are going to need that for the second UVD instance on Vega20.
+
+v2: rename to patch_cs_in_place
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-and-tested-by: James Zhu <James.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 22 +++++++++++++++-------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 1 +
+ 3 files changed, 17 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index cbe4336..557c964 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1814,6 +1814,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
+ #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
+ #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
+ #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
++#define amdgpu_ring_patch_cs_in_place(r, p, ib) ((r)->funcs->patch_cs_in_place((p), (ib)))
+ #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
+ #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
+ #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 15fa375..653c61f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -918,7 +918,7 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
+ int r;
+
+ /* Only for UVD/VCE VM emulation */
+- if (p->ring->funcs->parse_cs) {
++ if (p->ring->funcs->parse_cs || p->ring->funcs->patch_cs_in_place) {
+ unsigned i, j;
+
+ for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
+@@ -959,12 +959,20 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
+ offset = m->start * AMDGPU_GPU_PAGE_SIZE;
+ kptr += va_start - offset;
+
+- memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
+- amdgpu_bo_kunmap(aobj);
+-
+- r = amdgpu_ring_parse_cs(ring, p, j);
+- if (r)
+- return r;
++ if (p->ring->funcs->parse_cs) {
++ memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
++ amdgpu_bo_kunmap(aobj);
++
++ r = amdgpu_ring_parse_cs(ring, p, j);
++ if (r)
++ return r;
++ } else {
++ ib->ptr = (uint32_t *)kptr;
++ r = amdgpu_ring_patch_cs_in_place(ring, p, j);
++ amdgpu_bo_kunmap(aobj);
++ if (r)
++ return r;
++ }
+
+ j++;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+index 380eb2c..7bec0be 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+@@ -122,6 +122,7 @@ struct amdgpu_ring_funcs {
+ void (*set_wptr)(struct amdgpu_ring *ring);
+ /* validating and patching of IBs */
+ int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
++ int (*patch_cs_in_place)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+ /* constants to calculate how many DW are needed for an emit */
+ unsigned emit_frame_size;
+ unsigned emit_ib_size;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5011-drm-amdgpu-patch-the-IBs-for-the-second-UVD-instance.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5011-drm-amdgpu-patch-the-IBs-for-the-second-UVD-instance.patch
new file mode 100644
index 00000000..01b073f6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5011-drm-amdgpu-patch-the-IBs-for-the-second-UVD-instance.patch
@@ -0,0 +1,73 @@
+From 3c693286dae83108ad010eb69e344226d547d2e7 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Wed, 18 Jul 2018 14:52:42 +0200
+Subject: [PATCH 5011/5725] drm/amdgpu: patch the IBs for the second UVD
+ instance v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Patch the IBs for the second UVD instance so that userspace don't need
+to care about the instance they submit to.
+
+v2: use direct IB patching
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-and-tested-by: James Zhu <James.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 29 +++++++++++++++++++++++++++++
+ 1 file changed, 29 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+index 1fd09d0..87d6a59 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+@@ -1227,6 +1227,34 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
+ }
+
+ /**
++ * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
++ *
++ * @p: the CS parser with the IBs
++ * @ib_idx: which IB to patch
++ *
++ */
++static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
++ uint32_t ib_idx)
++{
++ struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
++ unsigned i;
++
++ /* No patching necessary for the first instance */
++ if (!p->ring->me)
++ return 0;
++
++ for (i = 0; i < ib->length_dw; i += 2) {
++ uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
++
++ reg -= p->adev->reg_offset[UVD_HWIP][0][1];
++ reg += p->adev->reg_offset[UVD_HWIP][1][1];
++
++ amdgpu_set_ib_value(p, ib_idx, i, reg);
++ }
++ return 0;
++}
++
++/**
+ * uvd_v7_0_ring_emit_ib - execute indirect buffer
+ *
+ * @ring: amdgpu_ring pointer
+@@ -1718,6 +1746,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
+ .get_rptr = uvd_v7_0_ring_get_rptr,
+ .get_wptr = uvd_v7_0_ring_get_wptr,
+ .set_wptr = uvd_v7_0_ring_set_wptr,
++ .patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
+ .emit_frame_size =
+ 2 + /* uvd_v7_0_ring_emit_hdp_flush */
+ 2 + /* uvd_v7_0_ring_emit_hdp_invalidate */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5012-drm-amd-display-Retry-link-training-again.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5012-drm-amd-display-Retry-link-training-again.patch
new file mode 100644
index 00000000..0a722761
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5012-drm-amd-display-Retry-link-training-again.patch
@@ -0,0 +1,102 @@
+From cd2ca099e3f1063073f5b518d62a691ff784c395 Mon Sep 17 00:00:00 2001
+From: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Date: Fri, 13 Jul 2018 18:00:06 -0400
+Subject: [PATCH 5012/5725] drm/amd/display: Retry link training again
+
+[Why]
+Some receivers seem to fail the first link training but are good on
+subsequent tries. We want to retry link training again. This fixes
+HTC vive pro not lighting up after being disabled.
+
+[How]
+Check if the link training passed without fall back if this is not
+the case then we retry link training.
+
+Signed-off-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 20 ++++++++++++++++++--
+ drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 5 ++++-
+ drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h | 3 ++-
+ 3 files changed, 24 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 838231e..7962141 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -60,7 +60,14 @@
+
+ enum {
+ LINK_RATE_REF_FREQ_IN_MHZ = 27,
+- PEAK_FACTOR_X1000 = 1006
++ PEAK_FACTOR_X1000 = 1006,
++ /*
++ * Some receivers fail to train on first try and are good
++ * on subsequent tries. 2 retries should be plenty. If we
++ * don't have a successful training then we don't expect to
++ * ever get one.
++ */
++ LINK_TRAINING_MAX_VERIFY_RETRY = 2
+ };
+
+ /*******************************************************************************
+@@ -760,7 +767,16 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+ */
+
+ /* deal with non-mst cases */
+- dp_verify_link_cap(link, &link->reported_link_cap);
++ for (i = 0; i < LINK_TRAINING_MAX_VERIFY_RETRY; i++) {
++ int fail_count = 0;
++
++ dp_verify_link_cap(link,
++ &link->reported_link_cap,
++ &fail_count);
++
++ if (fail_count == 0)
++ break;
++ }
+ }
+
+ /* HDMI-DVI Dongle */
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index b9db6e6..e8a69d7 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -1088,7 +1088,8 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
+
+ bool dp_verify_link_cap(
+ struct dc_link *link,
+- struct dc_link_settings *known_limit_link_setting)
++ struct dc_link_settings *known_limit_link_setting,
++ int *fail_count)
+ {
+ struct dc_link_settings max_link_cap = {0};
+ struct dc_link_settings cur_link_setting = {0};
+@@ -1160,6 +1161,8 @@ bool dp_verify_link_cap(
+ skip_video_pattern);
+ if (status == LINK_TRAINING_SUCCESS)
+ success = true;
++ else
++ (*fail_count)++;
+ }
+
+ if (success)
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+index 697b5ee..a37255c 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+@@ -35,7 +35,8 @@ struct dc_link_settings;
+
+ bool dp_verify_link_cap(
+ struct dc_link *link,
+- struct dc_link_settings *known_limit_link_setting);
++ struct dc_link_settings *known_limit_link_setting,
++ int *fail_count);
+
+ bool dp_validate_mode_timing(
+ struct dc_link *link,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5013-drm-amd-display-flatten-aux_engine-and-engine.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5013-drm-amd-display-flatten-aux_engine-and-engine.patch
new file mode 100644
index 00000000..2149dd7c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5013-drm-amd-display-flatten-aux_engine-and-engine.patch
@@ -0,0 +1,665 @@
+From 203490d30f7a7fd075207aece8f802e23b462aa6 Mon Sep 17 00:00:00 2001
+From: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Date: Thu, 28 Jun 2018 17:50:05 -0400
+Subject: [PATCH 5013/5725] drm/amd/display: flatten aux_engine and engine
+
+[Why]
+engine and aux_engine are unnecessary layers we want to remove this
+layer.
+
+[How]
+flatten engine and aux engine structs into one struct called
+aux_engine and remove all references to the engine struct.
+
+Signed-off-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c | 8 +-
+ drivers/gpu/drm/amd/display/dc/dce/dce_aux.c | 55 ++++-----
+ drivers/gpu/drm/amd/display/dc/dce/dce_aux.h | 4 +-
+ .../drm/amd/display/dc/dce100/dce100_resource.c | 4 +-
+ .../drm/amd/display/dc/dce110/dce110_resource.c | 4 +-
+ .../drm/amd/display/dc/dce112/dce112_resource.c | 4 +-
+ .../drm/amd/display/dc/dce120/dce120_resource.c | 4 +-
+ .../gpu/drm/amd/display/dc/dce80/dce80_resource.c | 4 +-
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 4 +-
+ drivers/gpu/drm/amd/display/dc/inc/core_types.h | 2 +-
+ drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h | 127 ++++++++++++++++-----
+ drivers/gpu/drm/amd/display/dc/inc/hw/engine.h | 106 -----------------
+ 12 files changed, 140 insertions(+), 186 deletions(-)
+ delete mode 100644 drivers/gpu/drm/amd/display/dc/inc/hw/engine.h
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+index 4019fe07..8def0d9 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+@@ -33,7 +33,6 @@
+ #include "include/vector.h"
+ #include "core_types.h"
+ #include "dc_link_ddc.h"
+-#include "engine.h"
+ #include "aux_engine.h"
+
+ #define AUX_POWER_UP_WA_DELAY 500
+@@ -640,7 +639,6 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
+ enum i2caux_transaction_action action)
+ {
+ struct ddc *ddc_pin = ddc->ddc_pin;
+- struct engine *engine;
+ struct aux_engine *aux_engine;
+ enum aux_channel_operation_result operation_result;
+ struct aux_request_transaction_data aux_req;
+@@ -652,8 +650,8 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
+ memset(&aux_req, 0, sizeof(aux_req));
+ memset(&aux_rep, 0, sizeof(aux_rep));
+
+- engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
+- aux_engine = engine->funcs->acquire(engine, ddc_pin);
++ aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
++ aux_engine->funcs->acquire(aux_engine, ddc_pin);
+
+ aux_req.type = type;
+ aux_req.action = action;
+@@ -685,7 +683,7 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
+ res = -1;
+ break;
+ }
+- aux_engine->base.funcs->release_engine(&aux_engine->base);
++ aux_engine->funcs->release_engine(aux_engine);
+ return res;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+index b28e212..3f5b2e6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+@@ -28,12 +28,12 @@
+ #include "dce/dce_11_0_sh_mask.h"
+
+ #define CTX \
+- aux110->base.base.ctx
++ aux110->base.ctx
+ #define REG(reg_name)\
+ (aux110->regs->reg_name)
+
+ #define DC_LOGGER \
+- engine->base.ctx->logger
++ engine->ctx->logger
+
+ #include "reg_helper.h"
+
+@@ -51,9 +51,9 @@ enum {
+ AUX_DEFER_RETRY_COUNTER = 6
+ };
+ static void release_engine(
+- struct engine *engine)
++ struct aux_engine *engine)
+ {
+- struct aux_engine_dce110 *aux110 = FROM_ENGINE(engine);
++ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ dal_ddc_close(engine->ddc);
+
+@@ -827,22 +827,21 @@ static bool end_of_transaction_command(
+
+ /* according Syed, it does not need now DoDummyMOT */
+ }
+-bool submit_request(
+- struct engine *engine,
++static bool submit_request(
++ struct aux_engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction)
+ {
+- struct aux_engine *aux_engine = FROM_AUX_ENGINE_ENGINE(engine);
+
+ bool result;
+ bool mot_used = true;
+
+ switch (request->operation) {
+ case I2CAUX_TRANSACTION_READ:
+- result = read_command(aux_engine, request, mot_used);
++ result = read_command(engine, request, mot_used);
+ break;
+ case I2CAUX_TRANSACTION_WRITE:
+- result = write_command(aux_engine, request, mot_used);
++ result = write_command(engine, request, mot_used);
+ break;
+ default:
+ result = false;
+@@ -854,45 +853,45 @@ bool submit_request(
+ */
+
+ if (!middle_of_transaction || !result)
+- end_of_transaction_command(aux_engine, request);
++ end_of_transaction_command(engine, request);
+
+ /* mask AUX interrupt */
+
+ return result;
+ }
+ enum i2caux_engine_type get_engine_type(
+- const struct engine *engine)
++ const struct aux_engine *engine)
+ {
+ return I2CAUX_ENGINE_TYPE_AUX;
+ }
+
+-static struct aux_engine *acquire(
+- struct engine *engine,
++static bool acquire(
++ struct aux_engine *engine,
+ struct ddc *ddc)
+ {
+- struct aux_engine *aux_engine = FROM_AUX_ENGINE_ENGINE(engine);
++
+ enum gpio_result result;
+
+- if (aux_engine->funcs->is_engine_available) {
++ if (engine->funcs->is_engine_available) {
+ /*check whether SW could use the engine*/
+- if (!aux_engine->funcs->is_engine_available(aux_engine))
+- return NULL;
++ if (!engine->funcs->is_engine_available(engine))
++ return false;
+ }
+
+ result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
+ GPIO_DDC_CONFIG_TYPE_MODE_AUX);
+
+ if (result != GPIO_RESULT_OK)
+- return NULL;
++ return false;
+
+- if (!aux_engine->funcs->acquire_engine(aux_engine)) {
++ if (!engine->funcs->acquire_engine(engine)) {
+ dal_ddc_close(ddc);
+- return NULL;
++ return false;
+ }
+
+ engine->ddc = ddc;
+
+- return aux_engine;
++ return true;
+ }
+
+ static const struct aux_engine_funcs aux_engine_funcs = {
+@@ -902,9 +901,6 @@ static const struct aux_engine_funcs aux_engine_funcs = {
+ .read_channel_reply = read_channel_reply,
+ .get_channel_status = get_channel_status,
+ .is_engine_available = is_engine_available,
+-};
+-
+-static const struct engine_funcs engine_funcs = {
+ .release_engine = release_engine,
+ .destroy_engine = dce110_engine_destroy,
+ .submit_request = submit_request,
+@@ -912,10 +908,10 @@ static const struct engine_funcs engine_funcs = {
+ .acquire = acquire,
+ };
+
+-void dce110_engine_destroy(struct engine **engine)
++void dce110_engine_destroy(struct aux_engine **engine)
+ {
+
+- struct aux_engine_dce110 *engine110 = FROM_ENGINE(*engine);
++ struct aux_engine_dce110 *engine110 = FROM_AUX_ENGINE(*engine);
+
+ kfree(engine110);
+ *engine = NULL;
+@@ -927,13 +923,12 @@ struct aux_engine *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_eng
+ uint32_t timeout_period,
+ const struct dce110_aux_registers *regs)
+ {
+- aux_engine110->base.base.ddc = NULL;
+- aux_engine110->base.base.ctx = ctx;
++ aux_engine110->base.ddc = NULL;
++ aux_engine110->base.ctx = ctx;
+ aux_engine110->base.delay = 0;
+ aux_engine110->base.max_defer_write_retry = 0;
+- aux_engine110->base.base.funcs = &engine_funcs;
+ aux_engine110->base.funcs = &aux_engine_funcs;
+- aux_engine110->base.base.inst = inst;
++ aux_engine110->base.inst = inst;
+ aux_engine110->timeout_period = timeout_period;
+ aux_engine110->regs = regs;
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+index c6b2aec..f7caab8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+@@ -103,9 +103,9 @@ struct aux_engine *dce110_aux_engine_construct(
+ uint32_t timeout_period,
+ const struct dce110_aux_registers *regs);
+
+-void dce110_engine_destroy(struct engine **engine);
++void dce110_engine_destroy(struct aux_engine **engine);
+
+ bool dce110_aux_engine_acquire(
+- struct engine *aux_engine,
++ struct aux_engine *aux_engine,
+ struct ddc *ddc);
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+index c34c953..fd2bdae 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+@@ -586,7 +586,7 @@ struct output_pixel_processor *dce100_opp_create(
+ return &opp->base;
+ }
+
+-struct engine *dce100_aux_engine_create(
++struct aux_engine *dce100_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+ {
+@@ -600,7 +600,7 @@ struct engine *dce100_aux_engine_create(
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+- return &aux_engine->base.base;
++ return &aux_engine->base;
+ }
+
+ struct clock_source *dce100_clock_source_create(
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+index 4a665a2..e5e9e92 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+@@ -604,7 +604,7 @@ static struct output_pixel_processor *dce110_opp_create(
+ return &opp->base;
+ }
+
+-struct engine *dce110_aux_engine_create(
++struct aux_engine *dce110_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+ {
+@@ -618,7 +618,7 @@ struct engine *dce110_aux_engine_create(
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+- return &aux_engine->base.base;
++ return &aux_engine->base;
+ }
+
+ struct clock_source *dce110_clock_source_create(
+diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+index caf90ae..84a05ff 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+@@ -604,7 +604,7 @@ struct output_pixel_processor *dce112_opp_create(
+ return &opp->base;
+ }
+
+-struct engine *dce112_aux_engine_create(
++struct aux_engine *dce112_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+ {
+@@ -618,7 +618,7 @@ struct engine *dce112_aux_engine_create(
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+- return &aux_engine->base.base;
++ return &aux_engine->base;
+ }
+
+ struct clock_source *dce112_clock_source_create(
+diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+index f7d02f2..61d8e22 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+@@ -376,7 +376,7 @@ struct output_pixel_processor *dce120_opp_create(
+ ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+ }
+-struct engine *dce120_aux_engine_create(
++struct aux_engine *dce120_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+ {
+@@ -390,7 +390,7 @@ struct engine *dce120_aux_engine_create(
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+- return &aux_engine->base.base;
++ return &aux_engine->base;
+ }
+
+ static const struct bios_registers bios_regs = {
+diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+index 6fb33ad..dc9f3e9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+@@ -464,7 +464,7 @@ static struct output_pixel_processor *dce80_opp_create(
+ return &opp->base;
+ }
+
+-struct engine *dce80_aux_engine_create(
++struct aux_engine *dce80_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+ {
+@@ -478,7 +478,7 @@ struct engine *dce80_aux_engine_create(
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+- return &aux_engine->base.base;
++ return &aux_engine->base;
+ }
+
+ static struct stream_encoder *dce80_stream_encoder_create(
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index c39934f..6b44ed3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -594,7 +594,7 @@ static struct output_pixel_processor *dcn10_opp_create(
+ return &opp->base;
+ }
+
+-struct engine *dcn10_aux_engine_create(
++struct aux_engine *dcn10_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+ {
+@@ -608,7 +608,7 @@ struct engine *dcn10_aux_engine_create(
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+- return &aux_engine->base.base;
++ return &aux_engine->base;
+ }
+
+ static struct mpc *dcn10_mpc_create(struct dc_context *ctx)
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+index 3b7e9aa..b4d3300 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+@@ -138,7 +138,7 @@ struct resource_pool {
+ struct output_pixel_processor *opps[MAX_PIPES];
+ struct timing_generator *timing_generators[MAX_PIPES];
+ struct stream_encoder *stream_enc[MAX_PIPES * 2];
+- struct engine *engines[MAX_PIPES];
++ struct aux_engine *engines[MAX_PIPES];
+ struct hubbub *hubbub;
+ struct mpc *mpc;
+ struct pp_smu_funcs_rv *pp_smu;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
+index 06d7e5d..e79cd4e 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
+@@ -26,46 +26,72 @@
+ #ifndef __DAL_AUX_ENGINE_H__
+ #define __DAL_AUX_ENGINE_H__
+
+-#include "engine.h"
++#include "dc_ddc_types.h"
+ #include "include/i2caux_interface.h"
+
+-struct aux_engine;
+-union aux_config;
+-struct aux_engine_funcs {
+- void (*destroy)(
+- struct aux_engine **ptr);
+- bool (*acquire_engine)(
+- struct aux_engine *engine);
+- void (*configure)(
+- struct aux_engine *engine,
+- union aux_config cfg);
+- void (*submit_channel_request)(
+- struct aux_engine *engine,
+- struct aux_request_transaction_data *request);
+- void (*process_channel_reply)(
+- struct aux_engine *engine,
+- struct aux_reply_transaction_data *reply);
+- int (*read_channel_reply)(
+- struct aux_engine *engine,
+- uint32_t size,
+- uint8_t *buffer,
+- uint8_t *reply_result,
+- uint32_t *sw_status);
+- enum aux_channel_operation_result (*get_channel_status)(
+- struct aux_engine *engine,
+- uint8_t *returned_bytes);
+- bool (*is_engine_available)(struct aux_engine *engine);
++enum i2caux_transaction_operation {
++ I2CAUX_TRANSACTION_READ,
++ I2CAUX_TRANSACTION_WRITE
++};
++
++enum i2caux_transaction_address_space {
++ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C = 1,
++ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD
++};
++
++struct i2caux_transaction_payload {
++ enum i2caux_transaction_address_space address_space;
++ uint32_t address;
++ uint32_t length;
++ uint8_t *data;
++};
++
++enum i2caux_transaction_status {
++ I2CAUX_TRANSACTION_STATUS_UNKNOWN = (-1L),
++ I2CAUX_TRANSACTION_STATUS_SUCCEEDED,
++ I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY,
++ I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT,
++ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR,
++ I2CAUX_TRANSACTION_STATUS_FAILED_NACK,
++ I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE,
++ I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION,
++ I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
++ I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW,
++ I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON
++};
++
++struct i2caux_transaction_request {
++ enum i2caux_transaction_operation operation;
++ struct i2caux_transaction_payload payload;
++ enum i2caux_transaction_status status;
++};
++
++enum i2caux_engine_type {
++ I2CAUX_ENGINE_TYPE_UNKNOWN = (-1L),
++ I2CAUX_ENGINE_TYPE_AUX,
++ I2CAUX_ENGINE_TYPE_I2C_DDC_HW,
++ I2CAUX_ENGINE_TYPE_I2C_GENERIC_HW,
++ I2CAUX_ENGINE_TYPE_I2C_SW
++};
++
++enum i2c_default_speed {
++ I2CAUX_DEFAULT_I2C_HW_SPEED = 50,
++ I2CAUX_DEFAULT_I2C_SW_SPEED = 50
+ };
+-struct engine;
++
++union aux_config;
++
+ struct aux_engine {
+- struct engine base;
++ uint32_t inst;
++ struct ddc *ddc;
++ struct dc_context *ctx;
+ const struct aux_engine_funcs *funcs;
+ /* following values are expressed in milliseconds */
+ uint32_t delay;
+ uint32_t max_defer_write_retry;
+-
+ bool acquire_reset;
+ };
++
+ struct read_command_context {
+ uint8_t *buffer;
+ uint32_t current_read_length;
+@@ -86,6 +112,7 @@ struct read_command_context {
+ bool transaction_complete;
+ bool operation_succeeded;
+ };
++
+ struct write_command_context {
+ bool mot;
+
+@@ -110,4 +137,44 @@ struct write_command_context {
+ bool transaction_complete;
+ bool operation_succeeded;
+ };
++
++
++struct aux_engine_funcs {
++ void (*destroy)(
++ struct aux_engine **ptr);
++ bool (*acquire_engine)(
++ struct aux_engine *engine);
++ void (*configure)(
++ struct aux_engine *engine,
++ union aux_config cfg);
++ void (*submit_channel_request)(
++ struct aux_engine *engine,
++ struct aux_request_transaction_data *request);
++ void (*process_channel_reply)(
++ struct aux_engine *engine,
++ struct aux_reply_transaction_data *reply);
++ int (*read_channel_reply)(
++ struct aux_engine *engine,
++ uint32_t size,
++ uint8_t *buffer,
++ uint8_t *reply_result,
++ uint32_t *sw_status);
++ enum aux_channel_operation_result (*get_channel_status)(
++ struct aux_engine *engine,
++ uint8_t *returned_bytes);
++ bool (*is_engine_available)(struct aux_engine *engine);
++ enum i2caux_engine_type (*get_engine_type)(
++ const struct aux_engine *engine);
++ bool (*acquire)(
++ struct aux_engine *engine,
++ struct ddc *ddc);
++ bool (*submit_request)(
++ struct aux_engine *engine,
++ struct i2caux_transaction_request *request,
++ bool middle_of_transaction);
++ void (*release_engine)(
++ struct aux_engine *engine);
++ void (*destroy_engine)(
++ struct aux_engine **engine);
++};
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/engine.h
+deleted file mode 100644
+index 1f5476f..0000000
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/engine.h
++++ /dev/null
+@@ -1,106 +0,0 @@
+-/*
+- * Copyright 2012-15 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- * Authors: AMD
+- *
+- */
+-
+-#ifndef __DAL_ENGINE_H__
+-#define __DAL_ENGINE_H__
+-
+-#include "dc_ddc_types.h"
+-
+-enum i2caux_transaction_operation {
+- I2CAUX_TRANSACTION_READ,
+- I2CAUX_TRANSACTION_WRITE
+-};
+-
+-enum i2caux_transaction_address_space {
+- I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C = 1,
+- I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD
+-};
+-
+-struct i2caux_transaction_payload {
+- enum i2caux_transaction_address_space address_space;
+- uint32_t address;
+- uint32_t length;
+- uint8_t *data;
+-};
+-
+-enum i2caux_transaction_status {
+- I2CAUX_TRANSACTION_STATUS_UNKNOWN = (-1L),
+- I2CAUX_TRANSACTION_STATUS_SUCCEEDED,
+- I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY,
+- I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT,
+- I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR,
+- I2CAUX_TRANSACTION_STATUS_FAILED_NACK,
+- I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE,
+- I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION,
+- I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
+- I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW,
+- I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON
+-};
+-
+-struct i2caux_transaction_request {
+- enum i2caux_transaction_operation operation;
+- struct i2caux_transaction_payload payload;
+- enum i2caux_transaction_status status;
+-};
+-
+-enum i2caux_engine_type {
+- I2CAUX_ENGINE_TYPE_UNKNOWN = (-1L),
+- I2CAUX_ENGINE_TYPE_AUX,
+- I2CAUX_ENGINE_TYPE_I2C_DDC_HW,
+- I2CAUX_ENGINE_TYPE_I2C_GENERIC_HW,
+- I2CAUX_ENGINE_TYPE_I2C_SW
+-};
+-
+-enum i2c_default_speed {
+- I2CAUX_DEFAULT_I2C_HW_SPEED = 50,
+- I2CAUX_DEFAULT_I2C_SW_SPEED = 50
+-};
+-
+-struct engine;
+-
+-struct engine_funcs {
+- enum i2caux_engine_type (*get_engine_type)(
+- const struct engine *engine);
+- struct aux_engine* (*acquire)(
+- struct engine *engine,
+- struct ddc *ddc);
+- bool (*submit_request)(
+- struct engine *engine,
+- struct i2caux_transaction_request *request,
+- bool middle_of_transaction);
+- void (*release_engine)(
+- struct engine *engine);
+- void (*destroy_engine)(
+- struct engine **engine);
+-};
+-
+-struct engine {
+- const struct engine_funcs *funcs;
+- uint32_t inst;
+- struct ddc *ddc;
+- struct dc_context *ctx;
+-};
+-
+-#endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5014-drm-amd-display-Prevent-PSR-from-being-enabled-if-in.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5014-drm-amd-display-Prevent-PSR-from-being-enabled-if-in.patch
new file mode 100644
index 00000000..868a5f56
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5014-drm-amd-display-Prevent-PSR-from-being-enabled-if-in.patch
@@ -0,0 +1,124 @@
+From 8758419d5f7103cc56263e2cc8675f5e18060aeb Mon Sep 17 00:00:00 2001
+From: Anthony Koo <Anthony.Koo@amd.com>
+Date: Tue, 17 Jul 2018 09:43:44 -0400
+Subject: [PATCH 5014/5725] drm/amd/display: Prevent PSR from being enabled if
+ initialization fails
+
+[Why]
+PSR_SET command is sent to the microcontroller in order to initialize
+parameters needed for PSR feature, such as telling the microcontroller
+which pipe is driving the PSR supported panel. When this command is
+skipped or fails, the microcontroller may program the wrong thing if
+driver tries to enable PSR.
+
+[How]
+If PSR_SET fails, do not set psr_enable flag to indicate the feature is
+not yet initialized.
+
+Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c | 38 ++++++++++++++++-----------
+ drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h | 2 +-
+ 2 files changed, 24 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+index 062a465..ca7989e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+@@ -150,7 +150,7 @@ static void dce_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
+ }
+ }
+
+-static void dce_dmcu_setup_psr(struct dmcu *dmcu,
++static bool dce_dmcu_setup_psr(struct dmcu *dmcu,
+ struct dc_link *link,
+ struct psr_context *psr_context)
+ {
+@@ -261,6 +261,8 @@ static void dce_dmcu_setup_psr(struct dmcu *dmcu,
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
++
++ return true;
+ }
+
+ static bool dce_is_dmcu_initialized(struct dmcu *dmcu)
+@@ -545,24 +547,25 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
+ * least a few frames. Should never hit the max retry assert below.
+ */
+ if (wait == true) {
+- for (retryCount = 0; retryCount <= 1000; retryCount++) {
+- dcn10_get_dmcu_psr_state(dmcu, &psr_state);
+- if (enable) {
+- if (psr_state != 0)
+- break;
+- } else {
+- if (psr_state == 0)
+- break;
++ for (retryCount = 0; retryCount <= 1000; retryCount++) {
++ dcn10_get_dmcu_psr_state(dmcu, &psr_state);
++ if (enable) {
++ if (psr_state != 0)
++ break;
++ } else {
++ if (psr_state == 0)
++ break;
++ }
++ udelay(500);
+ }
+- udelay(500);
+- }
+
+- /* assert if max retry hit */
+- ASSERT(retryCount <= 1000);
++ /* assert if max retry hit */
++ if (retryCount >= 1000)
++ ASSERT(0);
+ }
+ }
+
+-static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
++static bool dcn10_dmcu_setup_psr(struct dmcu *dmcu,
+ struct dc_link *link,
+ struct psr_context *psr_context)
+ {
+@@ -577,7 +580,7 @@ static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
+
+ /* If microcontroller is not running, do nothing */
+ if (dmcu->dmcu_state != DMCU_RUNNING)
+- return;
++ return false;
+
+ link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc,
+ psr_context->psrExitLinkTrainingRequired);
+@@ -677,6 +680,11 @@ static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
++
++ /* waitDMCUReadyForCmd */
++ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
++
++ return true;
+ }
+
+ static void dcn10_psr_wait_loop(
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+index de60f94..4550747 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+@@ -48,7 +48,7 @@ struct dmcu_funcs {
+ const char *src,
+ unsigned int bytes);
+ void (*set_psr_enable)(struct dmcu *dmcu, bool enable, bool wait);
+- void (*setup_psr)(struct dmcu *dmcu,
++ bool (*setup_psr)(struct dmcu *dmcu,
+ struct dc_link *link,
+ struct psr_context *psr_context);
+ void (*get_psr_state)(struct dmcu *dmcu, uint32_t *psr_state);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5015-drm-amd-display-DC-3.1.59.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5015-drm-amd-display-DC-3.1.59.patch
new file mode 100644
index 00000000..74bc5c50
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5015-drm-amd-display-DC-3.1.59.patch
@@ -0,0 +1,29 @@
+From 058aa226b445c1739e57dde0d64b948ca275f2d8 Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Mon, 9 Jul 2018 17:26:34 -0400
+Subject: [PATCH 5015/5725] drm/amd/display: DC 3.1.59
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 49b4da4..6c7b57d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -38,7 +38,7 @@
+ #include "inc/compressor.h"
+ #include "dml/display_mode_lib.h"
+
+-#define DC_VER "3.1.58"
++#define DC_VER "3.1.59"
+
+ #define MAX_SURFACES 3
+ #define MAX_STREAMS 6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5016-drm-amd-Add-missing-fields-in-atom_integrated_system.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5016-drm-amd-Add-missing-fields-in-atom_integrated_system.patch
new file mode 100644
index 00000000..a25495f8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5016-drm-amd-Add-missing-fields-in-atom_integrated_system.patch
@@ -0,0 +1,62 @@
+From 3f8ccc212e45e8901ed1b6aa6f436d108ea38cdc Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Wed, 25 Jul 2018 09:45:47 -0400
+Subject: [PATCH 5016/5725] drm/amd: Add missing fields in
+ atom_integrated_system_info_v1_11
+
+This structure needs to align with structure in atomfirmware table.
+Update it.
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Dmytro Laktyushkin <dmytro.laktyushkin@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/include/atomfirmware.h | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
+index 33b4de4..4bc118d 100644
+--- a/drivers/gpu/drm/amd/include/atomfirmware.h
++++ b/drivers/gpu/drm/amd/include/atomfirmware.h
+@@ -1074,7 +1074,7 @@ struct atom_integrated_system_info_v1_11
+ uint16_t dpphy_override; // bit vector, enum of atom_sysinfo_dpphy_override_def
+ uint16_t lvds_misc; // enum of atom_sys_info_lvds_misc_def
+ uint16_t backlight_pwm_hz; // pwm frequency in hz
+- uint8_t memorytype; // enum of atom_sys_mem_type
++ uint8_t memorytype; // enum of atom_dmi_t17_mem_type_def, APU memory type indication.
+ uint8_t umachannelnumber; // number of memory channels
+ uint8_t pwr_on_digon_to_de; /* all pwr sequence numbers below are in uint of 4ms */
+ uint8_t pwr_on_de_to_vary_bl;
+@@ -1084,18 +1084,25 @@ struct atom_integrated_system_info_v1_11
+ uint8_t pwr_on_vary_bl_to_blon;
+ uint8_t pwr_down_bloff_to_vary_bloff;
+ uint8_t min_allowed_bl_level;
++ uint8_t htc_hyst_limit;
++ uint8_t htc_tmp_limit;
++ uint8_t reserved1;
++ uint8_t reserved2;
+ struct atom_external_display_connection_info extdispconninfo;
+ struct atom_14nm_dpphy_dvihdmi_tuningset dvi_tuningset;
+ struct atom_14nm_dpphy_dvihdmi_tuningset hdmi_tuningset;
+ struct atom_14nm_dpphy_dvihdmi_tuningset hdmi6g_tuningset;
+- struct atom_14nm_dpphy_dp_tuningset dp_tuningset;
+- struct atom_14nm_dpphy_dp_tuningset dp_hbr3_tuningset;
++ struct atom_14nm_dpphy_dp_tuningset dp_tuningset; // rbr 1.62G dp tuning set
++ struct atom_14nm_dpphy_dp_tuningset dp_hbr3_tuningset; // HBR3 dp tuning set
+ struct atom_camera_data camera_info;
+ struct atom_hdmi_retimer_redriver_set dp0_retimer_set; //for DP0
+ struct atom_hdmi_retimer_redriver_set dp1_retimer_set; //for DP1
+ struct atom_hdmi_retimer_redriver_set dp2_retimer_set; //for DP2
+ struct atom_hdmi_retimer_redriver_set dp3_retimer_set; //for DP3
+- uint32_t reserved[108];
++ struct atom_14nm_dpphy_dp_tuningset dp_hbr_tuningset; //hbr 2.7G dp tuning set
++ struct atom_14nm_dpphy_dp_tuningset dp_hbr2_tuningset; //hbr2 5.4G dp turnig set
++ struct atom_14nm_dpphy_dp_tuningset edp_tuningset; //edp tuning set
++ uint32_t reserved[66];
+ };
+
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5017-drm-amdgpu-implement-harvesting-support-for-UVD-7.2-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5017-drm-amdgpu-implement-harvesting-support-for-UVD-7.2-.patch
new file mode 100644
index 00000000..69291789
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5017-drm-amdgpu-implement-harvesting-support-for-UVD-7.2-.patch
@@ -0,0 +1,353 @@
+From f916806866884732b15d0516d3146dd48e5e271c Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Wed, 9 Jan 2019 20:47:48 +0530
+Subject: [PATCH 5017/5725] drm/amdgpu: implement harvesting support for UVD
+ 7.2 (v3)
+
+Properly handle cases where one or more instance of the IP
+block may be harvested.
+
+v2: make sure ip_num_rings is initialized amdgpu_queue_mgr.c
+v3: rebase on Christian's UVD changes, drop unused var
+
+Reviewed-by: James Zhu <James.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 17 +++++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c | 13 +++++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 11 +++++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | 4 ++
+ drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 56 +++++++++++++++++++++++++--
+ 5 files changed, 89 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index 5aab580..be1770d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -283,7 +283,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ struct drm_crtc *crtc;
+ uint32_t ui32 = 0;
+ uint64_t ui64 = 0;
+- int i, found;
++ int i, j, found;
+ int ui32_size = sizeof(ui32);
+
+ if (!info->return_size || !info->return_pointer)
+@@ -362,7 +362,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ break;
+ case AMDGPU_HW_IP_UVD:
+ type = AMD_IP_BLOCK_TYPE_UVD;
+- ring_mask |= adev->uvd.inst[0].ring.ready;
++ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
++ if (adev->uvd.harvest_config & (1 << i))
++ continue;
++ ring_mask |= adev->uvd.inst[i].ring.ready;
++ }
+ ib_start_alignment = 64;
+ ib_size_alignment = 64;
+ break;
+@@ -375,9 +379,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ break;
+ case AMDGPU_HW_IP_UVD_ENC:
+ type = AMD_IP_BLOCK_TYPE_UVD;
+- for (i = 0; i < adev->uvd.num_enc_rings; i++)
+- ring_mask |=
+- adev->uvd.inst[0].ring_enc[i].ready << i;
++ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
++ if (adev->uvd.harvest_config & (1 << i))
++ continue;
++ for (j = 0; j < adev->uvd.num_enc_rings; j++)
++ ring_mask |= adev->uvd.inst[i].ring_enc[j].ready << j;
++ }
+ ib_start_alignment = 64;
+ ib_size_alignment = 64;
+ break;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+index d835729..a172bba 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+@@ -214,7 +214,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
+ u32 hw_ip, u32 instance, u32 ring,
+ struct amdgpu_ring **out_ring)
+ {
+- int r, ip_num_rings;
++ int i, r, ip_num_rings = 0;
+ struct amdgpu_queue_mapper *mapper = &mgr->mapper[hw_ip];
+
+ if (!adev || !mgr || !out_ring)
+@@ -243,14 +243,21 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
+ ip_num_rings = adev->sdma.num_instances;
+ break;
+ case AMDGPU_HW_IP_UVD:
+- ip_num_rings = adev->uvd.num_uvd_inst;
++ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
++ if (!(adev->uvd.harvest_config & (1 << i)))
++ ip_num_rings++;
++ }
+ break;
+ case AMDGPU_HW_IP_VCE:
+ ip_num_rings = adev->vce.num_rings;
+ break;
+ case AMDGPU_HW_IP_UVD_ENC:
++ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
++ if (!(adev->uvd.harvest_config & (1 << i)))
++ ip_num_rings++;
++ }
+ ip_num_rings =
+- adev->uvd.num_enc_rings * adev->uvd.num_uvd_inst;
++ adev->uvd.num_enc_rings * ip_num_rings;
+ break;
+ case AMDGPU_HW_IP_VCN_DEC:
+ ip_num_rings = 1;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index 420a533..b933d1f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -255,7 +255,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
+
+ for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+-
++ if (adev->uvd.harvest_config & (1 << j))
++ continue;
+ r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
+ &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
+@@ -311,6 +312,8 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
+ &adev->uvd.entity);
+
+ for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
++ if (adev->uvd.harvest_config & (1 << j))
++ continue;
+ kfree(adev->uvd.inst[j].saved_bo);
+
+ amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
+@@ -347,6 +350,8 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
+
+
+ for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
++ if (adev->uvd.harvest_config & (1 << j))
++ continue;
+ if (adev->uvd.inst[j].vcpu_bo == NULL)
+ continue;
+
+@@ -371,6 +376,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
+ int i;
+
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
++ if (adev->uvd.harvest_config & (1 << i))
++ continue;
+ if (adev->uvd.inst[i].vcpu_bo == NULL)
+ return -EINVAL;
+
+@@ -1168,6 +1175,8 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
+ unsigned fences = 0, i, j;
+
+ for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
++ if (adev->uvd.harvest_config & (1 << i))
++ continue;
+ fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
+ for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
+ fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+index 6687228..33c5f80 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+@@ -48,6 +48,9 @@ struct amdgpu_uvd_inst {
+ uint32_t srbm_soft_reset;
+ };
+
++#define AMDGPU_UVD_HARVEST_UVD0 (1 << 0)
++#define AMDGPU_UVD_HARVEST_UVD1 (1 << 1)
++
+ struct amdgpu_uvd {
+ const struct firmware *fw; /* UVD firmware */
+ unsigned fw_version;
+@@ -61,6 +64,7 @@ struct amdgpu_uvd {
+ atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
+ struct drm_sched_entity entity;
+ struct delayed_work idle_work;
++ unsigned harvest_config;
+ };
+
+ int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+index 87d6a59..2a583d8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+@@ -41,6 +41,12 @@
+ #include "mmhub/mmhub_1_0_sh_mask.h"
+ #include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
+
++#define mmUVD_PG0_CC_UVD_HARVESTING 0x00c7
++#define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX 1
++//UVD_PG0_CC_UVD_HARVESTING
++#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1
++#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L
++
+ #define UVD7_MAX_HW_INSTANCES_VEGA20 2
+
+ static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
+@@ -370,10 +376,25 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ static int uvd_v7_0_early_init(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+- if (adev->asic_type == CHIP_VEGA20)
++
++ if (adev->asic_type == CHIP_VEGA20) {
++ u32 harvest;
++ int i;
++
+ adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
+- else
++ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
++ harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
++ if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
++ adev->uvd.harvest_config |= 1 << i;
++ }
++ }
++ if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
++ AMDGPU_UVD_HARVEST_UVD1))
++ /* both instances are harvested, disable the block */
++ return -ENOENT;
++ } else {
+ adev->uvd.num_uvd_inst = 1;
++ }
+
+ if (amdgpu_sriov_vf(adev))
+ adev->uvd.num_enc_rings = 1;
+@@ -393,6 +414,8 @@ static int uvd_v7_0_sw_init(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
++ if (adev->uvd.harvest_config & (1 << j))
++ continue;
+ /* UVD TRAP */
+ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
+ if (r)
+@@ -425,6 +448,8 @@ static int uvd_v7_0_sw_init(void *handle)
+ return r;
+
+ for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
++ if (adev->uvd.harvest_config & (1 << j))
++ continue;
+ if (!amdgpu_sriov_vf(adev)) {
+ ring = &adev->uvd.inst[j].ring;
+ sprintf(ring->name, "uvd<%d>", j);
+@@ -472,6 +497,8 @@ static int uvd_v7_0_sw_fini(void *handle)
+ return r;
+
+ for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
++ if (adev->uvd.harvest_config & (1 << j))
++ continue;
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i)
+ amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
+ }
+@@ -500,6 +527,8 @@ static int uvd_v7_0_hw_init(void *handle)
+ goto done;
+
+ for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
++ if (adev->uvd.harvest_config & (1 << j))
++ continue;
+ ring = &adev->uvd.inst[j].ring;
+
+ if (!amdgpu_sriov_vf(adev)) {
+@@ -579,8 +608,11 @@ static int uvd_v7_0_hw_fini(void *handle)
+ DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
+ }
+
+- for (i = 0; i < adev->uvd.num_uvd_inst; ++i)
++ for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
++ if (adev->uvd.harvest_config & (1 << i))
++ continue;
+ adev->uvd.inst[i].ring.ready = false;
++ }
+
+ return 0;
+ }
+@@ -623,6 +655,8 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
+ int i;
+
+ for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
++ if (adev->uvd.harvest_config & (1 << i))
++ continue;
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
+@@ -695,6 +729,8 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
+ WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
+
+ for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
++ if (adev->uvd.harvest_config & (1 << i))
++ continue;
+ WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
+ adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
+ adev->uvd.inst[i].ring_enc[0].wptr = 0;
+@@ -751,6 +787,8 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
+ init_table += header->uvd_table_offset;
+
+ for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
++ if (adev->uvd.harvest_config & (1 << i))
++ continue;
+ ring = &adev->uvd.inst[i].ring;
+ ring->wptr = 0;
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
+@@ -890,6 +928,8 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
+ int i, j, k, r;
+
+ for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
++ if (adev->uvd.harvest_config & (1 << k))
++ continue;
+ /* disable DPG */
+ WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
+ ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+@@ -902,6 +942,8 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
+ uvd_v7_0_mc_resume(adev);
+
+ for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
++ if (adev->uvd.harvest_config & (1 << k))
++ continue;
+ ring = &adev->uvd.inst[k].ring;
+ /* disable clock gating */
+ WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
+@@ -1069,6 +1111,8 @@ static void uvd_v7_0_stop(struct amdgpu_device *adev)
+ uint8_t i = 0;
+
+ for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
++ if (adev->uvd.harvest_config & (1 << i))
++ continue;
+ /* force RBC into idle state */
+ WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
+
+@@ -1807,6 +1851,8 @@ static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
+ int i;
+
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
++ if (adev->uvd.harvest_config & (1 << i))
++ continue;
+ adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
+ adev->uvd.inst[i].ring.me = i;
+ DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
+@@ -1818,6 +1864,8 @@ static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
+ int i, j;
+
+ for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
++ if (adev->uvd.harvest_config & (1 << j))
++ continue;
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+ adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
+ adev->uvd.inst[j].ring_enc[i].me = j;
+@@ -1837,6 +1885,8 @@ static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
+ int i;
+
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
++ if (adev->uvd.harvest_config & (1 << i))
++ continue;
+ adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
+ adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5018-drm-amdgpu-correct-evict-flag-for-bo-move.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5018-drm-amdgpu-correct-evict-flag-for-bo-move.patch
new file mode 100644
index 00000000..247f42e7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5018-drm-amdgpu-correct-evict-flag-for-bo-move.patch
@@ -0,0 +1,43 @@
+From 6fb15be88fbb87bd3a565f14fba821a54b2fd0e9 Mon Sep 17 00:00:00 2001
+From: Junwei Zhang <Jerry.Zhang@amd.com>
+Date: Thu, 26 Jul 2018 18:00:13 +0800
+Subject: [PATCH 5018/5725] drm/amdgpu: correct evict flag for bo move
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+pass the evict flag instead of hard code
+
+Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index a164c52..38493d4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -600,7 +600,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
+ }
+
+ /* blit VRAM to GTT */
+- r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem);
++ r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, &tmp_mem, old_mem);
+ if (unlikely(r)) {
+ goto out_cleanup;
+ }
+@@ -652,7 +652,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
+ }
+
+ /* copy to VRAM */
+- r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem);
++ r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, new_mem, old_mem);
+ if (unlikely(r)) {
+ goto out_cleanup;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5019-drm-amdgpu-clean-up-the-superfluous-space-and-align-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5019-drm-amdgpu-clean-up-the-superfluous-space-and-align-.patch
new file mode 100644
index 00000000..df5e5b6e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5019-drm-amdgpu-clean-up-the-superfluous-space-and-align-.patch
@@ -0,0 +1,281 @@
+From 470b5d8232b68619786278298d15056a3ab7296f Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Thu, 26 Jul 2018 14:08:03 +0800
+Subject: [PATCH 5019/5725] drm/amdgpu: clean up the superfluous space and
+ align the comment text for amdgpu_ttm
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This patch cleans up spaces and align the text to refine the comment for
+amdgpu_ttm.
+
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 115 +++++++++++++++-----------------
+ 1 file changed, 55 insertions(+), 60 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 38493d4..4db7bf5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -92,11 +92,9 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
+ }
+
+ /**
+- * amdgpu_ttm_global_init - Initialize global TTM memory reference
+- * structures.
++ * amdgpu_ttm_global_init - Initialize global TTM memory reference structures.
+ *
+- * @adev: AMDGPU device for which the global structures need to be
+- * registered.
++ * @adev: AMDGPU device for which the global structures need to be registered.
+ *
+ * This is called as part of the AMDGPU ttm init from amdgpu_ttm_init()
+ * during bring up.
+@@ -162,13 +160,12 @@ static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+ }
+
+ /**
+- * amdgpu_init_mem_type - Initialize a memory manager for a specific
+- * type of memory request.
++ * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
++ * memory request.
+ *
+- * @bdev: The TTM BO device object (contains a reference to
+- * amdgpu_device)
+- * @type: The type of memory requested
+- * @man:
++ * @bdev: The TTM BO device object (contains a reference to amdgpu_device)
++ * @type: The type of memory requested
++ * @man: The memory type manager for each domain
+ *
+ * This is called by ttm_bo_init_mm() when a buffer object is being
+ * initialized.
+@@ -311,8 +308,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
+ /**
+ * amdgpu_verify_access - Verify access for a mmap call
+ *
+- * @bo: The buffer object to map
+- * @filp: The file pointer from the process performing the mmap
++ * @bo: The buffer object to map
++ * @filp: The file pointer from the process performing the mmap
+ *
+ * This is called by ttm_bo_mmap() to verify whether a process
+ * has the right to mmap a BO to their process space.
+@@ -338,12 +335,11 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+ /**
+ * amdgpu_move_null - Register memory for a buffer object
+ *
+- * @bo: The bo to assign the memory to
+- * @new_mem: The memory to be assigned.
++ * @bo: The bo to assign the memory to
++ * @new_mem: The memory to be assigned.
+ *
+- * Assign the memory from new_mem to the memory of the buffer object
+- * bo.
+-*/
++ * Assign the memory from new_mem to the memory of the buffer object bo.
++ */
+ static void amdgpu_move_null(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *new_mem)
+ {
+@@ -355,9 +351,13 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
+ }
+
+ /**
+- * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT
+- * buffer.
+-*/
++ * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
++ *
++ * @bo: The bo to assign the memory to.
++ * @mm_node: Memory manager node for drm allocator.
++ * @mem: The region where the bo resides.
++ *
++ */
+ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
+ struct drm_mm_node *mm_node,
+ struct ttm_mem_reg *mem)
+@@ -372,10 +372,12 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
+ }
+
+ /**
+- * amdgpu_find_mm_node - Helper function finds the drm_mm_node
+- * corresponding to @offset. It also modifies
+- * the offset to be within the drm_mm_node
+- * returned
++ * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to
++ * @offset. It also modifies the offset to be within the drm_mm_node returned
++ *
++ * @mem: The region where the bo resides.
++ * @offset: The offset that drm_mm_node is used for finding.
++ *
+ */
+ static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
+ unsigned long *offset)
+@@ -517,8 +519,8 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
+ /**
+ * amdgpu_move_blit - Copy an entire buffer to another buffer
+ *
+- * This is a helper called by amdgpu_bo_move() and
+- * amdgpu_move_vram_ram() to help move buffers to and from VRAM.
++ * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
++ * help move buffers to and from VRAM.
+ */
+ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
+ bool evict, bool no_wait_gpu,
+@@ -834,8 +836,8 @@ struct amdgpu_ttm_tt {
+ };
+
+ /**
+- * amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to
+- * by a USERPTR pointer to memory
++ * amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to by a USERPTR
++ * pointer to memory
+ *
+ * Called by amdgpu_gem_userptr_ioctl() and amdgpu_cs_parser_bos().
+ * This provides a wrapper around the get_user_pages() call to provide
+@@ -858,8 +860,10 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
+ down_read(&mm->mmap_sem);
+
+ if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
+- /* check that we only use anonymous memory
+- to prevent problems with writeback */
++ /*
++ * check that we only use anonymous memory to prevent problems
++ * with writeback
++ */
+ unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
+ struct vm_area_struct *vma;
+
+@@ -905,10 +909,9 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
+ }
+
+ /**
+- * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages
+- * as necessary.
++ * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
+ *
+- * Called by amdgpu_cs_list_validate(). This creates the page list
++ * Called by amdgpu_cs_list_validate(). This creates the page list
+ * that backs user memory and will ultimately be mapped into the device
+ * address space.
+ */
+@@ -950,8 +953,7 @@ void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
+ }
+
+ /**
+- * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the
+- * user pages
++ * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
+ *
+ * Called by amdgpu_ttm_backend_bind()
+ **/
+@@ -1317,8 +1319,8 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
+ }
+
+ /**
+- * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt
+- * for the current task
++ * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
++ * task
+ *
+ * @ttm: The ttm_tt object to bind this userptr object to
+ * @addr: The address in the current tasks VM space to use
+@@ -1368,9 +1370,8 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
+ }
+
+ /**
+- * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays
+- * inside an address range for the
+- * current task.
++ * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
++ * address range for the current task.
+ *
+ */
+ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
+@@ -1408,8 +1409,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
+ }
+
+ /**
+- * amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been
+- * invalidated?
++ * amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been invalidated?
+ */
+ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
+ int *last_invalidated)
+@@ -1422,10 +1422,8 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
+ }
+
+ /**
+- * amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this
+- * ttm_tt object been invalidated
+- * since the last time they've
+- * been set?
++ * amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this ttm_tt object
++ * been invalidated since the last time they've been set?
+ */
+ bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
+ {
+@@ -1484,15 +1482,14 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
+ }
+
+ /**
+- * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict
+- * a buffer object.
++ * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
++ * object.
+ *
+- * Return true if eviction is sensible. Called by
+- * ttm_mem_evict_first() on behalf of ttm_bo_mem_force_space()
+- * which tries to evict buffer objects until it can find space
+- * for a new object and by ttm_bo_force_list_clean() which is
++ * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
++ * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
++ * it can find space for a new object and by ttm_bo_force_list_clean() which is
+ * used to clean out a memory space.
+-*/
++ */
+ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+ const struct ttm_place *place)
+ {
+@@ -1540,8 +1537,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+ }
+
+ /**
+- * amdgpu_ttm_access_memory - Read or Write memory that backs a
+- * buffer object.
++ * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
+ *
+ * @bo: The buffer object to read/write
+ * @offset: Offset into buffer object
+@@ -1892,8 +1888,8 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
+ }
+
+ /**
+- * amdgpu_ttm_init - Init the memory management (ttm) as well as
+- * various gtt/vram related fields.
++ * amdgpu_ttm_init - Init the memory management (ttm) as well as various
++ * gtt/vram related fields.
+ *
+ * This initializes all of the memory space pools that the TTM layer
+ * will need such as the GTT space (system memory mapped to the device),
+@@ -2050,9 +2046,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
+ }
+
+ /**
+- * amdgpu_ttm_late_init - Handle any late initialization for
+- * amdgpu_ttm
+-*/
++ * amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm
++ */
+ void amdgpu_ttm_late_init(struct amdgpu_device *adev)
+ {
+ /* return the VGA stolen memory (if any) back to VRAM */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5020-drm-amd-pp-Polaris12-Fix-a-chunk-of-registers-missed.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5020-drm-amd-pp-Polaris12-Fix-a-chunk-of-registers-missed.patch
new file mode 100644
index 00000000..5cc5f898
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5020-drm-amd-pp-Polaris12-Fix-a-chunk-of-registers-missed.patch
@@ -0,0 +1,75 @@
+From 9c878a05c9b7d664a7b1fb14510fb2f03a6544c7 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <rex.zhu@amd.com>
+Date: Fri, 20 Jul 2018 16:26:46 +0800
+Subject: [PATCH 5020/5725] drm/amd/pp/Polaris12: Fix a chunk of registers
+ missed to program
+
+DIDTConfig_Polaris12[] table missed a big chunk of data.
+
+Pointed by aidan.fabius <aidan.fabius@coreavi.com>
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+---
+ .../gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c | 43 ++++++++++++++++++++++
+ 1 file changed, 43 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+index c952845..5e19f59 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+@@ -403,6 +403,49 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris12[] = {
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
++ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
++
++ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5021-drm-amd-pp-Delete-unused-temp-variables.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5021-drm-amd-pp-Delete-unused-temp-variables.patch
new file mode 100644
index 00000000..75f57b2e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5021-drm-amd-pp-Delete-unused-temp-variables.patch
@@ -0,0 +1,72 @@
+From 873140bf9f9b332403e21ff50b8f018d14b258b1 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <rex.zhu@amd.com>
+Date: Fri, 20 Jul 2018 18:19:00 +0800
+Subject: [PATCH 5021/5725] drm/amd/pp: Delete unused temp variables
+
+Only delete the dead temp variables in Polaris.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/powerplay/smumgr/polaris10_smumgr.c | 22 ++++++----------------
+ 1 file changed, 6 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+index a4ce199..1276f16 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+@@ -1204,7 +1204,6 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ SMIO_Pattern vol_level;
+ uint32_t mvdd;
+- uint16_t us_mvdd;
+
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+@@ -1255,16 +1254,11 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ "in Clock Dependency Table",
+ );
+
+- us_mvdd = 0;
+- if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
+- (data->mclk_dpm_key_disabled))
+- us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
+- else {
+- if (!polaris10_populate_mvdd_value(hwmgr,
++ if (!((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
++ (data->mclk_dpm_key_disabled)))
++ polaris10_populate_mvdd_value(hwmgr,
+ data->dpm_table.mclk_table.dpm_levels[0].value,
+- &vol_level))
+- us_mvdd = vol_level.Voltage;
+- }
++ &vol_level);
+
+ if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
+ table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
+@@ -1517,7 +1511,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+ uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+- uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
++ uint8_t i, stretch_amount, volt_offset = 0;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+@@ -1568,11 +1562,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+
+ smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
+ /* Populate CKS Lookup Table */
+- if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+- stretch_amount2 = 0;
+- else if (stretch_amount == 3 || stretch_amount == 4)
+- stretch_amount2 = 1;
+- else {
++ if (stretch_amount == 0 || stretch_amount > 5) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher);
+ PP_ASSERT_WITH_CODE(false,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5022-drm-amd-pp-Convert-voltage-unit-in-mV-4-to-mV-on-CZ-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5022-drm-amd-pp-Convert-voltage-unit-in-mV-4-to-mV-on-CZ-.patch
new file mode 100644
index 00000000..1620bfd4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5022-drm-amd-pp-Convert-voltage-unit-in-mV-4-to-mV-on-CZ-.patch
@@ -0,0 +1,48 @@
+From c6a5fc9ba8c300ecf443a8eb7ad58ae859d2e7c7 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <rex.zhu@amd.com>
+Date: Wed, 25 Jul 2018 11:45:03 +0800
+Subject: [PATCH 5022/5725] drm/amd/pp: Convert voltage unit in mV*4 to mV on
+ CZ/ST
+
+the voltage showed in debugfs and hwmon should be in mV
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+index 288802f..0adfc53 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+@@ -244,6 +244,7 @@ static int smu8_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
+ return 0;
+ }
+
++/* convert form 8bit vid to real voltage in mV*4 */
+ static uint32_t smu8_convert_8Bit_index_to_voltage(
+ struct pp_hwmgr *hwmgr, uint16_t voltage)
+ {
+@@ -1702,13 +1703,13 @@ static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx,
+ case AMDGPU_PP_SENSOR_VDDNB:
+ tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
+ CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
+- vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp);
++ vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp) / 4;
+ *((uint32_t *)value) = vddnb;
+ return 0;
+ case AMDGPU_PP_SENSOR_VDDGFX:
+ tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
+ CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
+- vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
++ vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp) / 4;
+ *((uint32_t *)value) = vddgfx;
+ return 0;
+ case AMDGPU_PP_SENSOR_UVD_VCLK:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5023-drm-amdgpu-fix-a-reversed-condition.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5023-drm-amdgpu-fix-a-reversed-condition.patch
new file mode 100644
index 00000000..42a99523
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5023-drm-amdgpu-fix-a-reversed-condition.patch
@@ -0,0 +1,33 @@
+From fd4c73f45bfb51dd32b3b4698cb4f28f4c77472c Mon Sep 17 00:00:00 2001
+From: Rex Zhu <rex.zhu@amd.com>
+Date: Wed, 25 Jul 2018 11:51:46 +0800
+Subject: [PATCH 5023/5725] drm/amdgpu: fix a reversed condition
+
+This test was reversed so it would end up leading to vddnb value
+can't be read via hwmon on APU.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index a3ab2a2..14bb1b3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -1185,7 +1185,7 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
+ int r, size = sizeof(vddnb);
+
+ /* only APUs have vddnb */
+- if (adev->flags & AMD_IS_APU)
++ if (!(adev->flags & AMD_IS_APU))
+ return -EINVAL;
+
+ /* Can't get voltage when the card is off */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5024-drm-amdgpu-add-proper-error-handling-to-amdgpu_bo_li.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5024-drm-amdgpu-add-proper-error-handling-to-amdgpu_bo_li.patch
new file mode 100644
index 00000000..66cf1f87
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5024-drm-amdgpu-add-proper-error-handling-to-amdgpu_bo_li.patch
@@ -0,0 +1,116 @@
+From c9b633fc502fee8dda4cb23aee4b76269d3d71c2 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 27 Jul 2018 15:32:04 +0200
+Subject: [PATCH 5024/5725] drm/amdgpu: add proper error handling to
+ amdgpu_bo_list_get
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Otherwise we silently don't use a BO list when the handle is invalid.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | 28 ++++++++++------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 11 ++++++++---
+ 3 files changed, 20 insertions(+), 23 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 557c964..09d61b2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -736,8 +736,8 @@ struct amdgpu_bo_list {
+ struct amdgpu_bo_list_entry *array;
+ };
+
+-struct amdgpu_bo_list *
+-amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
++int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
++ struct amdgpu_bo_list **result);
+ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
+ struct list_head *validated);
+ void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+index 6ad79dd6..7b4ce1d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+@@ -180,27 +180,20 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
+ return r;
+ }
+
+-struct amdgpu_bo_list *
+-amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
++int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
++ struct amdgpu_bo_list **result)
+ {
+- struct amdgpu_bo_list *result;
+-
+ rcu_read_lock();
+- result = idr_find(&fpriv->bo_list_handles, id);
++ *result = idr_find(&fpriv->bo_list_handles, id);
+
+- if (result) {
+- if (kref_get_unless_zero(&result->refcount)) {
+- rcu_read_unlock();
+- mutex_lock(&result->lock);
+- } else {
+- rcu_read_unlock();
+- result = NULL;
+- }
+- } else {
++ if (*result && kref_get_unless_zero(&(*result)->refcount)) {
+ rcu_read_unlock();
++ mutex_lock(&(*result)->lock);
++ return 0;
+ }
+
+- return result;
++ rcu_read_unlock();
++ return -ENOENT;
+ }
+
+ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
+@@ -334,9 +327,8 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
+ break;
+
+ case AMDGPU_BO_LIST_OP_UPDATE:
+- r = -ENOENT;
+- list = amdgpu_bo_list_get(fpriv, handle);
+- if (!list)
++ r = amdgpu_bo_list_get(fpriv, handle, &list);
++ if (r)
+ goto error_free;
+
+ r = amdgpu_bo_list_set(adev, filp, list, info,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 653c61f..5a466ce 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -572,11 +572,16 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+ INIT_LIST_HEAD(&p->validated);
+
+ /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
+- if (!p->bo_list)
+- p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
+- else
++ if (p->bo_list) {
+ mutex_lock(&p->bo_list->lock);
+
++ } else if (cs->in.bo_list_handle) {
++ r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
++ &p->bo_list);
++ if (r)
++ return r;
++ }
++
+ if (p->bo_list) {
+ amdgpu_bo_list_get_list(p->bo_list, &p->validated);
+ if (p->bo_list->first_userptr != p->bo_list->num_entries)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5025-drm-amdgpu-fix-total-size-calculation.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5025-drm-amdgpu-fix-total-size-calculation.patch
new file mode 100644
index 00000000..09df2cfe
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5025-drm-amdgpu-fix-total-size-calculation.patch
@@ -0,0 +1,37 @@
+From fd4307bf2d02f9594575c5d0806d300d5632165d Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 30 Jul 2018 15:33:34 +0200
+Subject: [PATCH 5025/5725] drm/amdgpu: fix total size calculation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+long might only be 32bit in size and we can easily use more than 4GB
+here.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Acked-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+index 7b4ce1d..2247486 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+@@ -105,9 +105,9 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
+ struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
+
+ unsigned last_entry = 0, first_userptr = num_entries;
++ uint64_t total_size = 0;
+ unsigned i;
+ int r;
+- unsigned long total_size = 0;
+
+ array = kvmalloc_array(num_entries, sizeof(struct amdgpu_bo_list_entry), GFP_KERNEL);
+ if (!array)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5026-drm-amdgpu-return-error-if-both-BOs-and-bo_list-hand.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5026-drm-amdgpu-return-error-if-both-BOs-and-bo_list-hand.patch
new file mode 100644
index 00000000..beb1dd50
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5026-drm-amdgpu-return-error-if-both-BOs-and-bo_list-hand.patch
@@ -0,0 +1,48 @@
+From 819563fe631ebcd566e041dd761d3d4e60709e6e Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 30 Jul 2018 16:18:54 +0200
+Subject: [PATCH 5026/5725] drm/amdgpu: return error if both BOs and bo_list
+ handle is given
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Return -EINVAL when both the BOs as well as a list handle is provided in
+the IOCTL.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 5a466ce..fae6b58 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -572,14 +572,17 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+ INIT_LIST_HEAD(&p->validated);
+
+ /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
+- if (p->bo_list) {
+- mutex_lock(&p->bo_list->lock);
++ if (cs->in.bo_list_handle) {
++ if (p->bo_list)
++ return -EINVAL;
+
+- } else if (cs->in.bo_list_handle) {
+ r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
+ &p->bo_list);
+ if (r)
+ return r;
++
++ } else if (p->bo_list) {
++ mutex_lock(&p->bo_list->lock);
+ }
+
+ if (p->bo_list) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5027-drm-amdgpu-add-new-amdgpu_vm_bo_trace_cs-function-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5027-drm-amdgpu-add-new-amdgpu_vm_bo_trace_cs-function-v2.patch
new file mode 100644
index 00000000..b8fb2957
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5027-drm-amdgpu-add-new-amdgpu_vm_bo_trace_cs-function-v2.patch
@@ -0,0 +1,116 @@
+From 976785ec319cfadd17f5ead410d84bec162dbcb4 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 27 Jul 2018 16:56:34 +0200
+Subject: [PATCH 5027/5725] drm/amdgpu: add new amdgpu_vm_bo_trace_cs()
+ function v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This allows us to trace all VM ranges which should be valid inside a CS.
+
+v2: dump mappings without BO as well
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-and-tested-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com> (v1)
+Reviewed-by: Huang Rui <ray.huang@amd.com> (v1)
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 ++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 5 +++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 29 +++++++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 1 +
+ 4 files changed, 37 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index fae6b58..08cb8f5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -1225,6 +1225,7 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
+ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
+ union drm_amdgpu_cs *cs)
+ {
++ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_ring *ring = p->ring;
+ struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
+ enum drm_sched_priority priority;
+@@ -1277,6 +1278,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
+ amdgpu_job_free_resources(job);
+
+ trace_amdgpu_cs_ioctl(job);
++ amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
+ priority = job->base.s_priority;
+ drm_sched_entity_push_job(&job->base, entity);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+index 11f262f..7206a00 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+@@ -314,6 +314,11 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping,
+ TP_ARGS(mapping)
+ );
+
++DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs,
++ TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
++ TP_ARGS(mapping)
++);
++
+ TRACE_EVENT(amdgpu_vm_set_ptes,
+ TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
+ uint32_t incr, uint64_t flags),
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 852956c..5de844d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2357,6 +2357,35 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
+ }
+
+ /**
++ * amdgpu_vm_bo_trace_cs - trace all reserved mappings
++ *
++ * @vm: the requested vm
++ * @ticket: CS ticket
++ *
++ * Trace all mappings of BOs reserved during a command submission.
++ */
++void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
++{
++ struct amdgpu_bo_va_mapping *mapping;
++
++ if (!trace_amdgpu_vm_bo_cs_enabled())
++ return;
++
++ for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
++ mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
++ if (mapping->bo_va && mapping->bo_va->base.bo) {
++ struct amdgpu_bo *bo;
++
++ bo = mapping->bo_va->base.bo;
++ if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
++ continue;
++ }
++
++ trace_amdgpu_vm_bo_cs(mapping);
++ }
++}
++
++/**
+ * amdgpu_vm_bo_rmv - remove a bo to a specific vm
+ *
+ * @adev: amdgpu_device pointer
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+index 9f70db6..a5cf0cc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+@@ -324,6 +324,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
+ uint64_t saddr, uint64_t size);
+ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
+ uint64_t addr);
++void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
+ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va);
+ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5028-drm-amdgpu-move-bo_list-defines-to-amdgpu_bo_list.h.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5028-drm-amdgpu-move-bo_list-defines-to-amdgpu_bo_list.h.patch
new file mode 100644
index 00000000..94e029a7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5028-drm-amdgpu-move-bo_list-defines-to-amdgpu_bo_list.h.patch
@@ -0,0 +1,158 @@
+From e14abb0d9e3e5b0fdc4db51021a64b5cb5248c6f Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 30 Jul 2018 13:27:09 +0200
+Subject: [PATCH 5028/5725] drm/amdgpu: move bo_list defines to
+ amdgpu_bo_list.h
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Further demangle amdgpu.h
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Acked-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 40 +----------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h | 70 +++++++++++++++++++++++++++++
+ 2 files changed, 71 insertions(+), 39 deletions(-)
+ create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 09d61b2..a6bde13 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -75,6 +75,7 @@
+ #include "amdgpu_gart.h"
+ #include "amdgpu_debugfs.h"
+ #include "amdgpu_job.h"
++#include "amdgpu_bo_list.h"
+
+ /*
+ * Modules parameters.
+@@ -713,45 +714,6 @@ struct amdgpu_fpriv {
+ };
+
+ /*
+- * residency list
+- */
+-struct amdgpu_bo_list_entry {
+- struct amdgpu_bo *robj;
+- struct ttm_validate_buffer tv;
+- struct amdgpu_bo_va *bo_va;
+- uint32_t priority;
+- struct page **user_pages;
+- int user_invalidated;
+-};
+-
+-struct amdgpu_bo_list {
+- struct mutex lock;
+- struct rcu_head rhead;
+- struct kref refcount;
+- struct amdgpu_bo *gds_obj;
+- struct amdgpu_bo *gws_obj;
+- struct amdgpu_bo *oa_obj;
+- unsigned first_userptr;
+- unsigned num_entries;
+- struct amdgpu_bo_list_entry *array;
+-};
+-
+-int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
+- struct amdgpu_bo_list **result);
+-void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
+- struct list_head *validated);
+-void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
+-void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
+-int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
+- struct drm_amdgpu_bo_list_entry **info_param);
+-
+-int amdgpu_bo_list_create(struct amdgpu_device *adev,
+- struct drm_file *filp,
+- struct drm_amdgpu_bo_list_entry *info,
+- unsigned num_entries,
+- struct amdgpu_bo_list **list);
+-
+-/*
+ * GFX stuff
+ */
+ #include "clearstate_defs.h"
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+new file mode 100644
+index 0000000..833f846
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+@@ -0,0 +1,70 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++#ifndef __AMDGPU_BO_LIST_H__
++#define __AMDGPU_BO_LIST_H__
++
++#include <drm/ttm/ttm_execbuf_util.h>
++#include <drm/amdgpu_drm.h>
++
++struct amdgpu_device;
++struct amdgpu_bo;
++struct amdgpu_bo_va;
++struct amdgpu_fpriv;
++
++struct amdgpu_bo_list_entry {
++ struct amdgpu_bo *robj;
++ struct ttm_validate_buffer tv;
++ struct amdgpu_bo_va *bo_va;
++ uint32_t priority;
++ struct page **user_pages;
++ int user_invalidated;
++};
++
++struct amdgpu_bo_list {
++ struct mutex lock;
++ struct rcu_head rhead;
++ struct kref refcount;
++ struct amdgpu_bo *gds_obj;
++ struct amdgpu_bo *gws_obj;
++ struct amdgpu_bo *oa_obj;
++ unsigned first_userptr;
++ unsigned num_entries;
++ struct amdgpu_bo_list_entry *array;
++};
++
++int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
++ struct amdgpu_bo_list **result);
++void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
++ struct list_head *validated);
++void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
++void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
++int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
++ struct drm_amdgpu_bo_list_entry **info_param);
++
++int amdgpu_bo_list_create(struct amdgpu_device *adev,
++ struct drm_file *filp,
++ struct drm_amdgpu_bo_list_entry *info,
++ unsigned num_entries,
++ struct amdgpu_bo_list **list);
++
++#endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5029-drm-amdgpu-always-recreate-bo_list.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5029-drm-amdgpu-always-recreate-bo_list.patch
new file mode 100644
index 00000000..af6e42fb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5029-drm-amdgpu-always-recreate-bo_list.patch
@@ -0,0 +1,132 @@
+From bfb0b638ae96ec21b96f5552e96cc9d40d5ff417 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 30 Jul 2018 13:46:04 +0200
+Subject: [PATCH 5029/5725] drm/amdgpu: always recreate bo_list
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The bo_list handle is allocated by OP_CREATE, so in OP_UPDATE here we just
+re-create the bo_list object and replace the handle. This way we don't
+need locking to protect the bo_list because it's always re-created when
+changed.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | 23 ++++++++++++-----------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h | 1 -
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 3 ---
+ 3 files changed, 12 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+index 2247486..f0d85a2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+@@ -50,7 +50,6 @@ static void amdgpu_bo_list_release_rcu(struct kref *ref)
+ for (i = 0; i < list->num_entries; ++i)
+ amdgpu_bo_unref(&list->array[i].robj);
+
+- mutex_destroy(&list->lock);
+ kvfree(list->array);
+ kfree_rcu(list, rhead);
+ }
+@@ -70,7 +69,6 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev,
+ return -ENOMEM;
+
+ /* initialize bo list*/
+- mutex_init(&list->lock);
+ kref_init(&list->refcount);
+ r = amdgpu_bo_list_set(adev, filp, list, info, num_entries);
+ if (r) {
+@@ -188,7 +186,6 @@ int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
+
+ if (*result && kref_get_unless_zero(&(*result)->refcount)) {
+ rcu_read_unlock();
+- mutex_lock(&(*result)->lock);
+ return 0;
+ }
+
+@@ -230,7 +227,6 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
+
+ void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
+ {
+- mutex_unlock(&list->lock);
+ kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
+ }
+
+@@ -241,7 +237,6 @@ void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
+ for (i = 0; i < list->num_entries; ++i)
+ amdgpu_bo_unref(&list->array[i].robj);
+
+- mutex_destroy(&list->lock);
+ kvfree(list->array);
+ kfree(list);
+ }
+@@ -296,7 +291,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
+ union drm_amdgpu_bo_list *args = data;
+ uint32_t handle = args->in.list_handle;
+ struct drm_amdgpu_bo_list_entry *info = NULL;
+- struct amdgpu_bo_list *list;
++ struct amdgpu_bo_list *list, *old;
+ int r;
+
+ r = amdgpu_bo_create_list_entry_array(&args->in, &info);
+@@ -327,16 +322,22 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
+ break;
+
+ case AMDGPU_BO_LIST_OP_UPDATE:
+- r = amdgpu_bo_list_get(fpriv, handle, &list);
++ r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
++ &list);
+ if (r)
+ goto error_free;
+
+- r = amdgpu_bo_list_set(adev, filp, list, info,
+- args->in.bo_number);
+- amdgpu_bo_list_put(list);
+- if (r)
++ mutex_lock(&fpriv->bo_list_lock);
++ old = idr_replace(&fpriv->bo_list_handles, list, handle);
++ mutex_unlock(&fpriv->bo_list_lock);
++
++ if (IS_ERR(old)) {
++ amdgpu_bo_list_put(list);
++ r = PTR_ERR(old);
+ goto error_free;
++ }
+
++ amdgpu_bo_list_put(old);
+ break;
+
+ default:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+index 833f846..89195fd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+@@ -41,7 +41,6 @@ struct amdgpu_bo_list_entry {
+ };
+
+ struct amdgpu_bo_list {
+- struct mutex lock;
+ struct rcu_head rhead;
+ struct kref refcount;
+ struct amdgpu_bo *gds_obj;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 08cb8f5..e41ab4f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -580,9 +580,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+ &p->bo_list);
+ if (r)
+ return r;
+-
+- } else if (p->bo_list) {
+- mutex_lock(&p->bo_list->lock);
+ }
+
+ if (p->bo_list) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5030-drm-amdgpu-nuke-amdgpu_bo_list_free.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5030-drm-amdgpu-nuke-amdgpu_bo_list_free.patch
new file mode 100644
index 00000000..383d6ca0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5030-drm-amdgpu-nuke-amdgpu_bo_list_free.patch
@@ -0,0 +1,80 @@
+From 52957e45f0e65c0cc177750b9ba2b1254a47b6a1 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 30 Jul 2018 14:17:41 +0200
+Subject: [PATCH 5030/5725] drm/amdgpu: nuke amdgpu_bo_list_free
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The RCU grace period is harmless and avoiding it is not worth the effort
+of doubling the implementation.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | 13 +------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h | 1 -
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 2 +-
+ 3 files changed, 2 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+index f0d85a2..ee09c2d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+@@ -230,17 +230,6 @@ void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
+ kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
+ }
+
+-void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
+-{
+- unsigned i;
+-
+- for (i = 0; i < list->num_entries; ++i)
+- amdgpu_bo_unref(&list->array[i].robj);
+-
+- kvfree(list->array);
+- kfree(list);
+-}
+-
+ int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
+ struct drm_amdgpu_bo_list_entry **info_param)
+ {
+@@ -309,7 +298,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
+ r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
+ mutex_unlock(&fpriv->bo_list_lock);
+ if (r < 0) {
+- amdgpu_bo_list_free(list);
++ amdgpu_bo_list_put(list);
+ return r;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+index 89195fd..0ce5402 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+@@ -56,7 +56,6 @@ int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
+ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
+ struct list_head *validated);
+ void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
+-void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
+ int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
+ struct drm_amdgpu_bo_list_entry **info_param);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index be1770d..58158b0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -1001,7 +1001,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
+ amdgpu_bo_unref(&pd);
+
+ idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
+- amdgpu_bo_list_free(list);
++ amdgpu_bo_list_put(list);
+
+ idr_destroy(&fpriv->bo_list_handles);
+ mutex_destroy(&fpriv->bo_list_lock);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5031-drm-amdgpu-add-bo_list-iterators.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5031-drm-amdgpu-add-bo_list-iterators.patch
new file mode 100644
index 00000000..f6e37b2d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5031-drm-amdgpu-add-bo_list-iterators.patch
@@ -0,0 +1,251 @@
+From f10a187969235463c0c2384920837b8ba27b5d8d Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Wed, 9 Jan 2019 20:55:49 +0530
+Subject: [PATCH 5031/5725] drm/amdgpu: add bo_list iterators
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add helpers to iterate over all entries in a bo_list.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Acked-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | 22 +++++++------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h | 10 ++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 51 +++++++++++++----------------
+ 3 files changed, 44 insertions(+), 39 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+index ee09c2d..3f96f38 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+@@ -43,12 +43,12 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
+
+ static void amdgpu_bo_list_release_rcu(struct kref *ref)
+ {
+- unsigned i;
+ struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list,
+ refcount);
++ struct amdgpu_bo_list_entry *e;
+
+- for (i = 0; i < list->num_entries; ++i)
+- amdgpu_bo_unref(&list->array[i].robj);
++ amdgpu_bo_list_for_each_entry(e, list)
++ amdgpu_bo_unref(&e->robj);
+
+ kvfree(list->array);
+ kfree_rcu(list, rhead);
+@@ -103,6 +103,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
+ struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
+
+ unsigned last_entry = 0, first_userptr = num_entries;
++ struct amdgpu_bo_list_entry *e;
+ uint64_t total_size = 0;
+ unsigned i;
+ int r;
+@@ -156,7 +157,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
+ trace_amdgpu_bo_list_set(list, entry->robj);
+ }
+
+- for (i = 0; i < list->num_entries; ++i)
++ amdgpu_bo_list_for_each_entry(e, list)
+ amdgpu_bo_unref(&list->array[i].robj);
+
+ kvfree(list->array);
+@@ -201,6 +202,7 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
+ * concatenated in descending order.
+ */
+ struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
++ struct amdgpu_bo_list_entry *e;
+ unsigned i;
+
+ for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
+@@ -211,13 +213,13 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
+ * in the list, the sort mustn't change the ordering of buffers
+ * with the same priority, i.e. it must be stable.
+ */
+- for (i = 0; i < list->num_entries; i++) {
+- unsigned priority = list->array[i].priority;
++ amdgpu_bo_list_for_each_entry(e, list) {
++ unsigned priority = e->priority;
++
++ if (!e->robj->parent)
++ list_add_tail(&e->tv.head, &bucket[priority]);
+
+- if (!list->array[i].robj->parent)
+- list_add_tail(&list->array[i].tv.head,
+- &bucket[priority]);
+- list->array[i].user_pages = NULL;
++ e->user_pages = NULL;
+ }
+
+ /* Connect the sorted buckets in the output list. */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+index 0ce5402..3d77abf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+@@ -65,4 +65,14 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev,
+ unsigned num_entries,
+ struct amdgpu_bo_list **list);
+
++#define amdgpu_bo_list_for_each_entry(e, list) \
++ for (e = &(list)->array[0]; \
++ e != &(list)->array[(list)->num_entries]; \
++ ++e)
++
++#define amdgpu_bo_list_for_each_userptr_entry(e, list) \
++ for (e = &(list)->array[(list)->first_userptr]; \
++ e != &(list)->array[(list)->num_entries]; \
++ ++e)
++
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index e41ab4f..df87259 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -563,10 +563,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_bo_list_entry *e;
+ struct list_head duplicates;
+- unsigned i, tries = 10;
+ struct amdgpu_bo *gds;
+ struct amdgpu_bo *gws;
+ struct amdgpu_bo *oa;
++ unsigned tries = 10;
+ int r;
+
+ INIT_LIST_HEAD(&p->validated);
+@@ -596,7 +596,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+
+ while (1) {
+ struct list_head need_pages;
+- unsigned i;
+
+ r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
+ &duplicates);
+@@ -611,12 +610,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+ break;
+
+ INIT_LIST_HEAD(&need_pages);
+- for (i = p->bo_list->first_userptr;
+- i < p->bo_list->num_entries; ++i) {
+- struct amdgpu_bo *bo;
+-
+- e = &p->bo_list->array[i];
+- bo = e->robj;
++ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
++ struct amdgpu_bo *bo = e->robj;
+
+ if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
+ &e->user_invalidated) && e->user_pages) {
+@@ -711,16 +706,14 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+
+ if (p->bo_list) {
+ struct amdgpu_vm *vm = &fpriv->vm;
+- unsigned i;
++ struct amdgpu_bo_list_entry *e;
+
+ gds = p->bo_list->gds_obj;
+ gws = p->bo_list->gws_obj;
+ oa = p->bo_list->oa_obj;
+- for (i = 0; i < p->bo_list->num_entries; i++) {
+- struct amdgpu_bo *bo = p->bo_list->array[i].robj;
+
+- p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
+- }
++ amdgpu_bo_list_for_each_entry(e, p->bo_list)
++ e->bo_va = amdgpu_vm_bo_find(vm, e->robj);
+ } else {
+ gds = p->adev->gds.gds_gfx_bo;
+ gws = p->adev->gds.gws_gfx_bo;
+@@ -754,10 +747,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+ error_free_pages:
+
+ if (p->bo_list) {
+- for (i = p->bo_list->first_userptr;
+- i < p->bo_list->num_entries; ++i) {
+- e = &p->bo_list->array[i];
+-
++ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ if (!e->user_pages)
+ continue;
+
+@@ -832,7 +822,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
+ struct amdgpu_vm *vm = &fpriv->vm;
+ struct amdgpu_bo_va *bo_va;
+ struct amdgpu_bo *bo;
+- int i, r;
++ int r;
+
+ r = amdgpu_vm_clear_freed(adev, vm, NULL);
+ if (r)
+@@ -863,15 +853,17 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
+ }
+
+ if (p->bo_list) {
+- for (i = 0; i < p->bo_list->num_entries; i++) {
++ struct amdgpu_bo_list_entry *e;
++
++ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
+ struct dma_fence *f;
+
+ /* ignore duplicates */
+- bo = p->bo_list->array[i].robj;
++ bo = e->robj;
+ if (!bo)
+ continue;
+
+- bo_va = p->bo_list->array[i].bo_va;
++ bo_va = e->bo_va;
+ if (bo_va == NULL)
+ continue;
+
+@@ -900,14 +892,15 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
+ return r;
+
+ if (amdgpu_vm_debug && p->bo_list) {
++ struct amdgpu_bo_list_entry *e;
++
+ /* Invalidate all BOs to test for userspace bugs */
+- for (i = 0; i < p->bo_list->num_entries; i++) {
++ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
+ /* ignore duplicates */
+- bo = p->bo_list->array[i].robj;
+- if (!bo)
++ if (!e->robj)
+ continue;
+
+- amdgpu_vm_bo_invalidate(adev, bo, false);
++ amdgpu_vm_bo_invalidate(adev, e->robj, false);
+ }
+ }
+
+@@ -1227,16 +1220,16 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
+ struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
+ enum drm_sched_priority priority;
+ struct amdgpu_job *job;
+- unsigned i;
+ uint64_t seq;
+
+ int r;
+
+ amdgpu_mn_lock(p->mn);
+ if (p->bo_list) {
+- for (i = p->bo_list->first_userptr;
+- i < p->bo_list->num_entries; ++i) {
+- struct amdgpu_bo *bo = p->bo_list->array[i].robj;
++ struct amdgpu_bo_list_entry *e;
++
++ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
++ struct amdgpu_bo *bo = e->robj;
+
+ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
+ amdgpu_mn_unlock(p->mn);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5032-drm-amdgpu-allocate-the-bo_list-array-after-the-list.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5032-drm-amdgpu-allocate-the-bo_list-array-after-the-list.patch
new file mode 100644
index 00000000..52e36116
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5032-drm-amdgpu-allocate-the-bo_list-array-after-the-list.patch
@@ -0,0 +1,237 @@
+From a001a34d8c17dcbd12ff418a7e79efe3879fb631 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 30 Jul 2018 16:16:01 +0200
+Subject: [PATCH 5032/5725] drm/amdgpu: allocate the bo_list array after the
+ list
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This avoids multiple allocations for the head and the array.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | 114 +++++++++++-----------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h | 17 +++--
+ 2 files changed, 57 insertions(+), 74 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+index 3f96f38..b603249 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+@@ -35,13 +35,15 @@
+ #define AMDGPU_BO_LIST_MAX_PRIORITY 32u
+ #define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1)
+
+-static int amdgpu_bo_list_set(struct amdgpu_device *adev,
+- struct drm_file *filp,
+- struct amdgpu_bo_list *list,
+- struct drm_amdgpu_bo_list_entry *info,
+- unsigned num_entries);
++static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu)
++{
++ struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list,
++ rhead);
++
++ kvfree(list);
++}
+
+-static void amdgpu_bo_list_release_rcu(struct kref *ref)
++static void amdgpu_bo_list_free(struct kref *ref)
+ {
+ struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list,
+ refcount);
+@@ -50,67 +52,36 @@ static void amdgpu_bo_list_release_rcu(struct kref *ref)
+ amdgpu_bo_list_for_each_entry(e, list)
+ amdgpu_bo_unref(&e->robj);
+
+- kvfree(list->array);
+- kfree_rcu(list, rhead);
++ call_rcu(&list->rhead, amdgpu_bo_list_free_rcu);
+ }
+
+-int amdgpu_bo_list_create(struct amdgpu_device *adev,
+- struct drm_file *filp,
+- struct drm_amdgpu_bo_list_entry *info,
+- unsigned num_entries,
+- struct amdgpu_bo_list **list_out)
++int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
++ struct drm_amdgpu_bo_list_entry *info,
++ unsigned num_entries, struct amdgpu_bo_list **result)
+ {
++ unsigned last_entry = 0, first_userptr = num_entries;
++ struct amdgpu_bo_list_entry *array;
+ struct amdgpu_bo_list *list;
++ uint64_t total_size = 0;
++ size_t size;
++ unsigned i;
+ int r;
+
++ if (num_entries > SIZE_MAX / sizeof(struct amdgpu_bo_list_entry))
++ return -EINVAL;
+
+- list = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
++ size = sizeof(struct amdgpu_bo_list);
++ size += num_entries * sizeof(struct amdgpu_bo_list_entry);
++ list = kvmalloc(size, GFP_KERNEL);
+ if (!list)
+ return -ENOMEM;
+
+- /* initialize bo list*/
+ kref_init(&list->refcount);
+- r = amdgpu_bo_list_set(adev, filp, list, info, num_entries);
+- if (r) {
+- kfree(list);
+- return r;
+- }
+-
+- *list_out = list;
+- return 0;
+-}
+-
+-static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
+-{
+- struct amdgpu_bo_list *list;
+-
+- mutex_lock(&fpriv->bo_list_lock);
+- list = idr_remove(&fpriv->bo_list_handles, id);
+- mutex_unlock(&fpriv->bo_list_lock);
+- if (list)
+- kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
+-}
+-
+-static int amdgpu_bo_list_set(struct amdgpu_device *adev,
+- struct drm_file *filp,
+- struct amdgpu_bo_list *list,
+- struct drm_amdgpu_bo_list_entry *info,
+- unsigned num_entries)
+-{
+- struct amdgpu_bo_list_entry *array;
+- struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo;
+- struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
+- struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
+-
+- unsigned last_entry = 0, first_userptr = num_entries;
+- struct amdgpu_bo_list_entry *e;
+- uint64_t total_size = 0;
+- unsigned i;
+- int r;
++ list->gds_obj = adev->gds.gds_gfx_bo;
++ list->gws_obj = adev->gds.gws_gfx_bo;
++ list->oa_obj = adev->gds.oa_gfx_bo;
+
+- array = kvmalloc_array(num_entries, sizeof(struct amdgpu_bo_list_entry), GFP_KERNEL);
+- if (!array)
+- return -ENOMEM;
++ array = amdgpu_bo_list_array_entry(list, 0);
+ memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
+
+ for (i = 0; i < num_entries; ++i) {
+@@ -147,36 +118,41 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
+ entry->tv.shared = !entry->robj->prime_shared_count;
+
+ if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
+- gds_obj = entry->robj;
++ list->gds_obj = entry->robj;
+ if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
+- gws_obj = entry->robj;
++ list->gws_obj = entry->robj;
+ if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
+- oa_obj = entry->robj;
++ list->oa_obj = entry->robj;
+
+ total_size += amdgpu_bo_size(entry->robj);
+ trace_amdgpu_bo_list_set(list, entry->robj);
+ }
+
+- amdgpu_bo_list_for_each_entry(e, list)
+- amdgpu_bo_unref(&list->array[i].robj);
+-
+- kvfree(list->array);
+-
+- list->gds_obj = gds_obj;
+- list->gws_obj = gws_obj;
+- list->oa_obj = oa_obj;
+ list->first_userptr = first_userptr;
+- list->array = array;
+ list->num_entries = num_entries;
+
+ trace_amdgpu_cs_bo_status(list->num_entries, total_size);
++
++ *result = list;
+ return 0;
+
+ error_free:
+ while (i--)
+ amdgpu_bo_unref(&array[i].robj);
+- kvfree(array);
++ kvfree(list);
+ return r;
++
++}
++
++static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
++{
++ struct amdgpu_bo_list *list;
++
++ mutex_lock(&fpriv->bo_list_lock);
++ list = idr_remove(&fpriv->bo_list_handles, id);
++ mutex_unlock(&fpriv->bo_list_lock);
++ if (list)
++ kref_put(&list->refcount, amdgpu_bo_list_free);
+ }
+
+ int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
+@@ -229,7 +205,7 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
+
+ void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
+ {
+- kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
++ kref_put(&list->refcount, amdgpu_bo_list_free);
+ }
+
+ int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+index 3d77abf..61b0897 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+@@ -48,7 +48,6 @@ struct amdgpu_bo_list {
+ struct amdgpu_bo *oa_obj;
+ unsigned first_userptr;
+ unsigned num_entries;
+- struct amdgpu_bo_list_entry *array;
+ };
+
+ int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
+@@ -65,14 +64,22 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev,
+ unsigned num_entries,
+ struct amdgpu_bo_list **list);
+
++static inline struct amdgpu_bo_list_entry *
++amdgpu_bo_list_array_entry(struct amdgpu_bo_list *list, unsigned index)
++{
++ struct amdgpu_bo_list_entry *array = (void *)&list[1];
++
++ return &array[index];
++}
++
+ #define amdgpu_bo_list_for_each_entry(e, list) \
+- for (e = &(list)->array[0]; \
+- e != &(list)->array[(list)->num_entries]; \
++ for (e = amdgpu_bo_list_array_entry(list, 0); \
++ e != amdgpu_bo_list_array_entry(list, (list)->num_entries); \
+ ++e)
+
+ #define amdgpu_bo_list_for_each_userptr_entry(e, list) \
+- for (e = &(list)->array[(list)->first_userptr]; \
+- e != &(list)->array[(list)->num_entries]; \
++ for (e = amdgpu_bo_list_array_entry(list, (list)->first_userptr); \
++ e != amdgpu_bo_list_array_entry(list, (list)->num_entries); \
+ ++e)
+
+ #endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5033-drm-amdgpu-create-an-empty-bo_list-if-no-handle-is-p.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5033-drm-amdgpu-create-an-empty-bo_list-if-no-handle-is-p.patch
new file mode 100644
index 00000000..11321522
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5033-drm-amdgpu-create-an-empty-bo_list-if-no-handle-is-p.patch
@@ -0,0 +1,221 @@
+From 1c2c3a624ddcfb33507ace1e2b099c7ab8b07d0a Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 30 Jul 2018 16:44:14 +0200
+Subject: [PATCH 5033/5725] drm/amdgpu: create an empty bo_list if no handle is
+ provided
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Instead of having extra handling just create an empty bo_list when no
+handle is provided.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 113 ++++++++++++++-------------------
+ 1 file changed, 47 insertions(+), 66 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index df87259..bf68877 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -561,6 +561,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+ union drm_amdgpu_cs *cs)
+ {
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
++ struct amdgpu_vm *vm = &fpriv->vm;
+ struct amdgpu_bo_list_entry *e;
+ struct list_head duplicates;
+ struct amdgpu_bo *gds;
+@@ -580,13 +581,17 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+ &p->bo_list);
+ if (r)
+ return r;
++ } else if (!p->bo_list) {
++ /* Create a empty bo_list when no handle is provided */
++ r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
++ &p->bo_list);
++ if (r)
++ return r;
+ }
+
+- if (p->bo_list) {
+- amdgpu_bo_list_get_list(p->bo_list, &p->validated);
+- if (p->bo_list->first_userptr != p->bo_list->num_entries)
+- p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
+- }
++ amdgpu_bo_list_get_list(p->bo_list, &p->validated);
++ if (p->bo_list->first_userptr != p->bo_list->num_entries)
++ p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
+
+ INIT_LIST_HEAD(&duplicates);
+ amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
+@@ -605,10 +610,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+ goto error_free_pages;
+ }
+
+- /* Without a BO list we don't have userptr BOs */
+- if (!p->bo_list)
+- break;
+-
+ INIT_LIST_HEAD(&need_pages);
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ struct amdgpu_bo *bo = e->robj;
+@@ -704,21 +705,12 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+ amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
+ p->bytes_moved_vis);
+
+- if (p->bo_list) {
+- struct amdgpu_vm *vm = &fpriv->vm;
+- struct amdgpu_bo_list_entry *e;
++ gds = p->bo_list->gds_obj;
++ gws = p->bo_list->gws_obj;
++ oa = p->bo_list->oa_obj;
+
+- gds = p->bo_list->gds_obj;
+- gws = p->bo_list->gws_obj;
+- oa = p->bo_list->oa_obj;
+-
+- amdgpu_bo_list_for_each_entry(e, p->bo_list)
+- e->bo_va = amdgpu_vm_bo_find(vm, e->robj);
+- } else {
+- gds = p->adev->gds.gds_gfx_bo;
+- gws = p->adev->gds.gws_gfx_bo;
+- oa = p->adev->gds.oa_gfx_bo;
+- }
++ amdgpu_bo_list_for_each_entry(e, p->bo_list)
++ e->bo_va = amdgpu_vm_bo_find(vm, e->robj);
+
+ if (gds) {
+ p->job->gds_base = amdgpu_bo_gpu_offset(gds);
+@@ -746,16 +738,14 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+
+ error_free_pages:
+
+- if (p->bo_list) {
+- amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+- if (!e->user_pages)
+- continue;
++ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
++ if (!e->user_pages)
++ continue;
+
+- release_pages(e->user_pages,
+- e->robj->tbo.ttm->num_pages,
+- false);
+- kvfree(e->user_pages);
+- }
++ release_pages(e->user_pages,
++ e->robj->tbo.ttm->num_pages,
++ false);
++ kvfree(e->user_pages);
+ }
+
+ return r;
+@@ -817,9 +807,10 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
+
+ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
+ {
+- struct amdgpu_device *adev = p->adev;
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
++ struct amdgpu_device *adev = p->adev;
+ struct amdgpu_vm *vm = &fpriv->vm;
++ struct amdgpu_bo_list_entry *e;
+ struct amdgpu_bo_va *bo_va;
+ struct amdgpu_bo *bo;
+ int r;
+@@ -852,31 +843,26 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
+ return r;
+ }
+
+- if (p->bo_list) {
+- struct amdgpu_bo_list_entry *e;
+-
+- amdgpu_bo_list_for_each_entry(e, p->bo_list) {
+- struct dma_fence *f;
+-
+- /* ignore duplicates */
+- bo = e->robj;
+- if (!bo)
+- continue;
++ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
++ struct dma_fence *f;
+
+- bo_va = e->bo_va;
+- if (bo_va == NULL)
+- continue;
++ /* ignore duplicates */
++ bo = e->robj;
++ if (!bo)
++ continue;
+
+- r = amdgpu_vm_bo_update(adev, bo_va, false);
+- if (r)
+- return r;
++ bo_va = e->bo_va;
++ if (bo_va == NULL)
++ continue;
+
+- f = bo_va->last_pt_update;
+- r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
+- if (r)
+- return r;
+- }
++ r = amdgpu_vm_bo_update(adev, bo_va, false);
++ if (r)
++ return r;
+
++ f = bo_va->last_pt_update;
++ r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
++ if (r)
++ return r;
+ }
+
+ r = amdgpu_vm_handle_moved(adev, vm);
+@@ -891,9 +877,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
+ if (r)
+ return r;
+
+- if (amdgpu_vm_debug && p->bo_list) {
+- struct amdgpu_bo_list_entry *e;
+-
++ if (amdgpu_vm_debug) {
+ /* Invalidate all BOs to test for userspace bugs */
+ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
+ /* ignore duplicates */
+@@ -1219,22 +1203,19 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
+ struct amdgpu_ring *ring = p->ring;
+ struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
+ enum drm_sched_priority priority;
++ struct amdgpu_bo_list_entry *e;
+ struct amdgpu_job *job;
+ uint64_t seq;
+
+ int r;
+
+ amdgpu_mn_lock(p->mn);
+- if (p->bo_list) {
+- struct amdgpu_bo_list_entry *e;
++ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
++ struct amdgpu_bo *bo = e->robj;
+
+- amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+- struct amdgpu_bo *bo = e->robj;
+-
+- if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
+- amdgpu_mn_unlock(p->mn);
+- return -ERESTARTSYS;
+- }
++ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
++ amdgpu_mn_unlock(p->mn);
++ return -ERESTARTSYS;
+ }
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5034-drm-amdgpu-Replace-ttm_bo_reference-with-ttm_bo_get.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5034-drm-amdgpu-Replace-ttm_bo_reference-with-ttm_bo_get.patch
new file mode 100644
index 00000000..4dfa6037
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5034-drm-amdgpu-Replace-ttm_bo_reference-with-ttm_bo_get.patch
@@ -0,0 +1,62 @@
+From 4d9bf45f0d8213d72aa05799b359da9146d67781 Mon Sep 17 00:00:00 2001
+From: Thomas Zimmermann <tzimmermann@suse.de>
+Date: Tue, 31 Jul 2018 09:12:35 +0200
+Subject: [PATCH 5034/5725] drm/amdgpu: Replace ttm_bo_reference with
+ ttm_bo_get
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The function ttm_bo_get acquires a reference on a TTM buffer object. The
+function's name is more aligned to the Linux kernel convention of naming
+ref-counting function _get and _put.
+
+v2:
+ * changed prefix to drm/amdgpu
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +-
+ include/drm/ttm/ttm_bo_api.h | 10 ++++++++++
+ 2 files changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index e8f43d0..961b848 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -866,7 +866,7 @@ struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
+ if (bo == NULL)
+ return NULL;
+
+- ttm_bo_reference(&bo->tbo);
++ ttm_bo_get(&bo->tbo);
+ return bo;
+ }
+
+diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
+index f885bfc..e5a9866 100644
+--- a/include/drm/ttm/ttm_bo_api.h
++++ b/include/drm/ttm/ttm_bo_api.h
+@@ -283,6 +283,16 @@ struct ttm_operation_ctx {
+ #define TTM_OPT_FLAG_FORCE_ALLOC 0x2
+
+ /**
++ * ttm_bo_get - reference a struct ttm_buffer_object
++ *
++ * @bo: The buffer object.
++ */
++static inline void ttm_bo_get(struct ttm_buffer_object *bo)
++{
++ kref_get(&bo->kref);
++}
++
++/**
+ * ttm_bo_reference - reference a struct ttm_buffer_object
+ *
+ * @bo: The buffer object.
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5035-drm-amdgpu-Replace-ttm_bo_unref-with-ttm_bo_put.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5035-drm-amdgpu-Replace-ttm_bo_unref-with-ttm_bo_put.patch
new file mode 100644
index 00000000..3abd9581
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5035-drm-amdgpu-Replace-ttm_bo_unref-with-ttm_bo_put.patch
@@ -0,0 +1,89 @@
+From a76486d145b8de2bcc111f14dc977671ed6b66c3 Mon Sep 17 00:00:00 2001
+From: Thomas Zimmermann <tzimmermann@suse.de>
+Date: Tue, 31 Jul 2018 09:12:36 +0200
+Subject: [PATCH 5035/5725] drm/amdgpu: Replace ttm_bo_unref with ttm_bo_put
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The function ttm_bo_put releases a reference to a TTM buffer object. The
+function's name is more aligned to the Linux kernel convention of naming
+ref-counting function _get and _put.
+
+A call to ttm_bo_unref takes the address of the TTM BO object's pointer and
+clears the pointer's value to NULL. This is not necessary in most cases and
+sometimes even worked around by the calling code. A call to ttm_bo_put only
+releases the reference without clearing the pointer.
+
+The current behaviour of cleaning the pointer is kept in the calling code,
+but should be removed if not required in a later patch.
+
+v2:
+ * set prefix to drm/amdgpu
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 5 ++---
+ drivers/gpu/drm/ttm/ttm_bo.c | 6 ++++++
+ include/drm/ttm/ttm_bo_api.h | 9 +++++++++
+ 3 files changed, 17 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 961b848..4c26b94 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -884,9 +884,8 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo)
+ return;
+
+ tbo = &((*bo)->tbo);
+- ttm_bo_unref(&tbo);
+- if (tbo == NULL)
+- *bo = NULL;
++ ttm_bo_put(tbo);
++ *bo = NULL;
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index d741f18..3e74e63 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -591,6 +591,12 @@ static void ttm_bo_release(struct kref *kref)
+ kref_put(&bo->list_kref, ttm_bo_release_list);
+ }
+
++void ttm_bo_put(struct ttm_buffer_object *bo)
++{
++ kref_put(&bo->kref, ttm_bo_release);
++}
++EXPORT_SYMBOL(ttm_bo_put);
++
+ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
+ {
+ struct ttm_buffer_object *bo = *p_bo;
+diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
+index e5a9866..a5f7e1c 100644
+--- a/include/drm/ttm/ttm_bo_api.h
++++ b/include/drm/ttm/ttm_bo_api.h
+@@ -357,6 +357,15 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx);
+
+ /**
++ * ttm_bo_put
++ *
++ * @bo: The buffer object.
++ *
++ * Unreference a buffer object.
++ */
++void ttm_bo_put(struct ttm_buffer_object *bo);
++
++/**
+ * ttm_bo_unref
+ *
+ * @bo: The buffer object.
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5036-drm-amd-display-add-missing-void-parameter-to-dc_cre.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5036-drm-amd-display-add-missing-void-parameter-to-dc_cre.patch
new file mode 100644
index 00000000..b60cf9ed
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5036-drm-amd-display-add-missing-void-parameter-to-dc_cre.patch
@@ -0,0 +1,34 @@
+From 2bbf4999eaad08630e498450f6fb353e828c77b8 Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.king@canonical.com>
+Date: Tue, 31 Jul 2018 11:42:54 +0100
+Subject: [PATCH 5036/5725] drm/amd/display: add missing void parameter to
+ dc_create_transfer_func
+
+Add a missing void parameter to function dc_create_transfer_func, fixes
+sparse warning:
+
+warning: non-ANSI function declaration of function 'dc_create_transfer_func'
+
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_surface.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+index 815dfb5..8fb3aef 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+@@ -192,7 +192,7 @@ void dc_transfer_func_release(struct dc_transfer_func *tf)
+ kref_put(&tf->refcount, dc_transfer_func_free);
+ }
+
+-struct dc_transfer_func *dc_create_transfer_func()
++struct dc_transfer_func *dc_create_transfer_func(void)
+ {
+ struct dc_transfer_func *tf = kvzalloc(sizeof(*tf), GFP_KERNEL);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5037-drm-amdgpu-pm-Fix-potential-Spectre-v1.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5037-drm-amdgpu-pm-Fix-potential-Spectre-v1.patch
new file mode 100644
index 00000000..13543bfb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5037-drm-amdgpu-pm-Fix-potential-Spectre-v1.patch
@@ -0,0 +1,52 @@
+From 363d0459eaf763aa3bf07875bee09866db6f30fb Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Mon, 23 Jul 2018 11:32:32 -0500
+Subject: [PATCH 5037/5725] drm/amdgpu/pm: Fix potential Spectre v1
+
+idx can be indirectly controlled by user-space, hence leading to a
+potential exploitation of the Spectre variant 1 vulnerability.
+
+This issue was detected with the help of Smatch:
+
+drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c:408 amdgpu_set_pp_force_state()
+warn: potential spectre issue 'data.states'
+
+Fix this by sanitizing idx before using it to index data.states
+
+Notice that given that speculation windows are large, the policy is
+to kill the speculation on the first load and not worry if it can be
+completed with a dependent load/store [1].
+
+[1] https://marc.info/?l=linux-kernel&m=152449131114778&w=2
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index 14bb1b3..2a78a3c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -31,7 +31,7 @@
+ #include <linux/power_supply.h>
+ #include <linux/hwmon.h>
+ #include <linux/hwmon-sysfs.h>
+-
++#include <linux/nospec.h>
+
+ static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
+
+@@ -403,6 +403,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
+ count = -EINVAL;
+ goto fail;
+ }
++ idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
+
+ amdgpu_dpm_get_pp_num_states(adev, &data);
+ state = data.states[idx];
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5038-drm-amd-display-Report-non-DP-display-as-disconnecte.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5038-drm-amd-display-Report-non-DP-display-as-disconnecte.patch
new file mode 100644
index 00000000..e934f41e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5038-drm-amd-display-Report-non-DP-display-as-disconnecte.patch
@@ -0,0 +1,54 @@
+From 1770c8348bb00c968234d6c903aa82961efc02b4 Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Wed, 1 Aug 2018 10:48:23 -0400
+Subject: [PATCH 5038/5725] drm/amd/display: Report non-DP display as
+ disconnected without EDID
+
+[Why]
+Some boards seem to have a problem where HPD is high on HDMI even though
+no display is connected. We don't want to report these as connected. DP
+spec still requires us to report DP displays as connected when HPD is
+high but we can't read the EDID in order to go to fail-safe mode.
+
+[How]
+If connector_signal is not DP abort detection if we can't retrieve the
+EDID.
+
+v2: Add Bugzilla and stable
+
+Bugzilla: https://bugs.freedesktop.org/107390
+Bugzilla: https://bugs.freedesktop.org/106846
+Cc: stable@vger.kernel.org
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 7962141..cbfb1ae 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -744,6 +744,17 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+ break;
+ case EDID_NO_RESPONSE:
+ DC_LOG_ERROR("No EDID read.\n");
++
++ /*
++ * Abort detection for non-DP connectors if we have
++ * no EDID
++ *
++ * DP needs to report as connected if HDP is high
++ * even if we have no EDID in order to go to
++ * fail-safe mode
++ */
++ if (!dc_is_dp_signal(link->connector_signal))
++ return false;
+ default:
+ break;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5039-drm-amd-display-Only-require-EDID-read-for-HDMI-and-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5039-drm-amd-display-Only-require-EDID-read-for-HDMI-and-.patch
new file mode 100644
index 00000000..9c4d7510
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5039-drm-amd-display-Only-require-EDID-read-for-HDMI-and-.patch
@@ -0,0 +1,43 @@
+From 499b11527b5dc07df496e3533ae217b8992b2275 Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Thu, 2 Aug 2018 15:32:01 -0400
+Subject: [PATCH 5039/5725] drm/amd/display: Only require EDID read for HDMI
+ and DVI
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[Why]
+VGA sometimes has trouble retrieving the EDID on very long cables, KVM
+switches, or old displays.
+
+[How]
+Only require EDID read for HDMI and DVI and exempt other types (DP,
+VGA). We currently don't support VGA but if anyone adds support in the
+future this might get overlooked.
+
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Suggested-by: Michel Dänzer <michel@daenzer.net>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index cbfb1ae..048b8c8 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -753,7 +753,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+ * even if we have no EDID in order to go to
+ * fail-safe mode
+ */
+- if (!dc_is_dp_signal(link->connector_signal))
++ if (dc_is_hdmi_signal(link->connector_signal) ||
++ dc_is_dvi_signal(link->connector_signal))
+ return false;
+ default:
+ break;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5040-drm-amd-display-Use-requested-HDMI-aspect-ratio.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5040-drm-amd-display-Use-requested-HDMI-aspect-ratio.patch
new file mode 100644
index 00000000..6a13e345
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5040-drm-amd-display-Use-requested-HDMI-aspect-ratio.patch
@@ -0,0 +1,47 @@
+From b9cd07d32e0714925d670cbf2b8c1f5e8d5f323e Mon Sep 17 00:00:00 2001
+From: "Leo (Sunpeng) Li" <sunpeng.li@amd.com>
+Date: Thu, 19 Jul 2018 08:22:16 -0400
+Subject: [PATCH 5040/5725] drm/amd/display: Use requested HDMI aspect ratio
+
+[Why]
+The DRM mode's HDMI picture aspect ratio field was never saved in
+dc_stream's timing struct. This causes us to mistake a new stream to
+have the same timings as the old, even though the user has requested a
+different aspect ratio.
+
+[How]
+Save DRM's aspect ratio field within dc_stream's timing struct.
+
+Bug: https://bugs.freedesktop.org/show_bug.cgi?id=107153
+Signed-off-by: Leo (Sunpeng) Li <sunpeng.li@amd.com>
+Reviewed-by: Mikita Lipski <Mikita.Lipski@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 9 ++-------
+ 1 file changed, 2 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index c5c59cb..685b2fea 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2300,13 +2300,8 @@ convert_color_depth_from_display_info(const struct drm_connector *connector)
+ static enum dc_aspect_ratio
+ get_aspect_ratio(const struct drm_display_mode *mode_in)
+ {
+- int32_t width = mode_in->crtc_hdisplay * 9;
+- int32_t height = mode_in->crtc_vdisplay * 16;
+-
+- if ((width - height) < 10 && (width - height) > -10)
+- return ASPECT_RATIO_16_9;
+- else
+- return ASPECT_RATIO_4_3;
++ /* 1-1 mapping, since both enums follow the HDMI spec. */
++ return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
+ }
+
+ static enum dc_color_space
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5041-drm-amd-display-DP-Compliance-400.1.1-failure.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5041-drm-amd-display-DP-Compliance-400.1.1-failure.patch
new file mode 100644
index 00000000..9f348cad
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5041-drm-amd-display-DP-Compliance-400.1.1-failure.patch
@@ -0,0 +1,97 @@
+From 61a1c2efc2b5d836fcd4fffafdc18a1717c3e6c1 Mon Sep 17 00:00:00 2001
+From: abdoulaye berthe <abdoulaye.berthe@amd.com>
+Date: Thu, 19 Jul 2018 15:39:55 -0400
+Subject: [PATCH 5041/5725] drm/amd/display: DP Compliance 400.1.1 failure
+
+[Why]
+400.1.1 is failing because we are not performing link training when
+we get an HPD pulse for the same display. This is breaking DP
+compliance
+
+[How]
+Always perform link training after HPD pulse if the detection
+reason is not DETECT_REASON_HPDRX.
+
+Signed-off-by: abdoulaye berthe <abdoulaye.berthe@amd.com>
+Reviewed-by: Wenjing Liu <Wenjing.Liu@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 54 ++++++++++++++-------------
+ 1 file changed, 28 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 048b8c8..5658eb1 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -764,39 +764,41 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+ if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK)))
+ same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
+
+- // If both edid and dpcd are the same, then discard new sink and revert back to original sink
+- if ((same_edid) && (same_dpcd)) {
+- link_disconnect_remap(prev_sink, link);
+- sink = prev_sink;
+- prev_sink = NULL;
+- } else {
+- if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+- sink_caps.transaction_type ==
+- DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
+- /*
+- * TODO debug why Dell 2413 doesn't like
+- * two link trainings
+- */
++ if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
++ sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX &&
++ reason != DETECT_REASON_HPDRX) {
++ /*
++ * TODO debug why Dell 2413 doesn't like
++ * two link trainings
++ */
+
+- /* deal with non-mst cases */
+- for (i = 0; i < LINK_TRAINING_MAX_VERIFY_RETRY; i++) {
+- int fail_count = 0;
++ /* deal with non-mst cases */
++ for (i = 0; i < LINK_TRAINING_MAX_VERIFY_RETRY; i++) {
++ int fail_count = 0;
+
+- dp_verify_link_cap(link,
+- &link->reported_link_cap,
+- &fail_count);
++ dp_verify_link_cap(link,
++ &link->reported_link_cap,
++ &fail_count);
+
+- if (fail_count == 0)
+- break;
+- }
++ if (fail_count == 0)
++ break;
+ }
+
+- /* HDMI-DVI Dongle */
+- if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
+- !sink->edid_caps.edid_hdmi)
+- sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
++ } else {
++ // If edid is the same, then discard new sink and revert back to original sink
++ if (same_edid) {
++ link_disconnect_remap(prev_sink, link);
++ sink = prev_sink;
++ prev_sink = NULL;
++
++ }
+ }
+
++ /* HDMI-DVI Dongle */
++ if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
++ !sink->edid_caps.edid_hdmi)
++ sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
++
+ /* Connectivity log: detection */
+ for (i = 0; i < sink->dc_edid.length / EDID_BLOCK_SIZE; i++) {
+ CONN_DATA_DETECT(link,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5042-drm-amd-display-Implement-backlight_ops.get_brightne.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5042-drm-amd-display-Implement-backlight_ops.get_brightne.patch
new file mode 100644
index 00000000..2a3dc5b9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5042-drm-amd-display-Implement-backlight_ops.get_brightne.patch
@@ -0,0 +1,79 @@
+From d8eb60aab13f40734d2e739749fdd653a95151d5 Mon Sep 17 00:00:00 2001
+From: David Francis <David.Francis@amd.com>
+Date: Thu, 19 Jul 2018 11:25:05 -0400
+Subject: [PATCH 5042/5725] drm/amd/display: Implement
+ backlight_ops.get_brightness
+
+[Why]
+This hook that is supposed to read the actual backlight value
+is used in a few places throughout the kernel to setup or force
+update on backlight
+
+[How]
+Create a dc function that calls the existing abm function, and
+call that function from amdgpu
+
+Signed-off-by: David Francis <David.Francis@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 7 ++++++-
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 9 +++++++++
+ drivers/gpu/drm/amd/display/dc/dc_link.h | 2 ++
+ 3 files changed, 17 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 685b2fea..1c8088e 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1437,7 +1437,12 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
+
+ static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
+ {
+- return bd->props.brightness;
++ struct amdgpu_display_manager *dm = bl_get_data(bd);
++ int ret = dc_link_get_backlight_level(dm->backlight_link);
++
++ if (ret == DC_ERROR_UNEXPECTED)
++ return bd->props.brightness;
++ return ret;
+ }
+
+ static const struct backlight_ops amdgpu_dm_backlight_ops = {
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 5658eb1..186131b 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -2026,6 +2026,15 @@ enum dc_status dc_link_validate_mode_timing(
+ return DC_OK;
+ }
+
++int dc_link_get_backlight_level(const struct dc_link *link)
++{
++ struct abm *abm = link->ctx->dc->res_pool->abm;
++
++ if (abm == NULL || abm->funcs->get_current_backlight_8_bit == NULL)
++ return DC_ERROR_UNEXPECTED;
++
++ return (int) abm->funcs->get_current_backlight_8_bit(abm);
++}
+
+ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
+ uint32_t frame_ramp, const struct dc_stream_state *stream)
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
+index 2351681..44cd31f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
+@@ -141,6 +141,8 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
+ bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
+ uint32_t frame_ramp, const struct dc_stream_state *stream);
+
++int dc_link_get_backlight_level(const struct dc_link *dc_link);
++
+ bool dc_link_set_abm_disable(const struct dc_link *dc_link);
+
+ bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5043-drm-amd-display-Read-back-max-backlight-value-at-boo.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5043-drm-amd-display-Read-back-max-backlight-value-at-boo.patch
new file mode 100644
index 00000000..1296b959
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5043-drm-amd-display-Read-back-max-backlight-value-at-boo.patch
@@ -0,0 +1,39 @@
+From 064e819e738f9f9fd1487d5e6d319f36fa1c27f5 Mon Sep 17 00:00:00 2001
+From: David Francis <David.Francis@amd.com>
+Date: Wed, 18 Jul 2018 16:03:30 -0400
+Subject: [PATCH 5043/5725] drm/amd/display: Read back max backlight value at
+ boot
+
+[Why]
+If there is no program explicitly setting the backlight
+brightness (for example, during a minimal install of linux), the
+hardware defaults to maximum brightness but the backlight_device
+defaults to 0 value. Thus, settings displays the wrong brightness
+value.
+
+[How]
+When creating the backlight device, set brightness to max
+
+Signed-off-by: David Francis <David.Francis@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 1c8088e..0d6475b 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1457,6 +1457,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
+ struct backlight_properties props = { 0 };
+
+ props.max_brightness = AMDGPU_MAX_BL_LEVEL;
++ props.brightness = AMDGPU_MAX_BL_LEVEL;
+ props.type = BACKLIGHT_RAW;
+
+ snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5044-drm-amd-display-Destroy-aux_engines-only-once.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5044-drm-amd-display-Destroy-aux_engines-only-once.patch
new file mode 100644
index 00000000..028a19c2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5044-drm-amd-display-Destroy-aux_engines-only-once.patch
@@ -0,0 +1,37 @@
+From 40c879456d0f55bb8cccd8f6a53cb7813c0316ec Mon Sep 17 00:00:00 2001
+From: David Francis <David.Francis@amd.com>
+Date: Thu, 19 Jul 2018 15:48:24 -0400
+Subject: [PATCH 5044/5725] drm/amd/display: Destroy aux_engines only once
+
+[Why]
+In the dce112 function to destroy the resource pool, engines
+(the aux engines) is destroyed twice. This has no ill effects
+but is a tad redundant.
+
+[How]
+Remove the redundant call
+
+Signed-off-by: David Francis <David.Francis@amd.com>
+Reviewed-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+index 84a05ff..2881293 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+@@ -677,9 +677,6 @@ static void destruct(struct dce110_resource_pool *pool)
+ pool->base.timing_generators[i] = NULL;
+ }
+
+- if (pool->base.engines[i] != NULL)
+- dce110_engine_destroy(&pool->base.engines[i]);
+-
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5045-drm-amd-display-Implement-custom-degamma-lut-on-dcn.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5045-drm-amd-display-Implement-custom-degamma-lut-on-dcn.patch
new file mode 100644
index 00000000..990db1bd
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5045-drm-amd-display-Implement-custom-degamma-lut-on-dcn.patch
@@ -0,0 +1,170 @@
+From bc975c0b7995414e3d70a9c1ffb4636debe233bd Mon Sep 17 00:00:00 2001
+From: David Francis <David.Francis@amd.com>
+Date: Thu, 12 Jul 2018 15:46:41 -0400
+Subject: [PATCH 5045/5725] drm/amd/display: Implement custom degamma lut on
+ dcn
+
+[Why]
+Custom degamma lut functions are a feature we would
+like to support on compatible hardware
+
+[How]
+In atomic check, convert from array of drm_color_lut to
+dc_transfer_func. On hardware commit, allow for possibility
+of custom degamma. Both are based on the equivalent
+regamma pipeline.
+
+Signed-off-by: David Francis <David.Francis@amd.com>
+Reviewed-by: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_color.c | 42 ++++++++++++++++------
+ drivers/gpu/drm/amd/display/dc/dc_hw_types.h | 2 ++
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 7 ++--
+ .../drm/amd/display/modules/color/color_gamma.c | 10 ++++--
+ 4 files changed, 46 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+index b329393..326f6fb 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+@@ -231,18 +231,21 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc)
+ * preparation for hardware commit. If no lut is specified by user, we default
+ * to SRGB degamma.
+ *
+- * Currently, we only support degamma bypass, or preprogrammed SRGB degamma.
+- * Programmable degamma is not supported, and an attempt to do so will return
+- * -EINVAL.
++ * We support degamma bypass, predefined SRGB, and custom degamma
+ *
+ * RETURNS:
+- * 0 on success, -EINVAL if custom degamma curve is given.
++ * 0 on success
++ * -EINVAL if crtc_state has a degamma_lut of invalid size
++ * -ENOMEM if gamma allocation fails
+ */
+ int amdgpu_dm_set_degamma_lut(struct drm_crtc_state *crtc_state,
+ struct dc_plane_state *dc_plane_state)
+ {
+ struct drm_property_blob *blob = crtc_state->degamma_lut;
+ struct drm_color_lut *lut;
++ uint32_t lut_size;
++ struct dc_gamma *gamma;
++ bool ret;
+
+ if (!blob) {
+ /* Default to SRGB */
+@@ -258,11 +261,30 @@ int amdgpu_dm_set_degamma_lut(struct drm_crtc_state *crtc_state,
+ return 0;
+ }
+
+- /* Otherwise, assume SRGB, since programmable degamma is not
+- * supported.
+- */
+- dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED;
+- dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
+- return -EINVAL;
++ gamma = dc_create_gamma();
++ if (!gamma)
++ return -ENOMEM;
++
++ lut_size = blob->length / sizeof(struct drm_color_lut);
++ gamma->num_entries = lut_size;
++ if (gamma->num_entries == MAX_COLOR_LUT_ENTRIES)
++ gamma->type = GAMMA_CUSTOM;
++ else {
++ dc_gamma_release(&gamma);
++ return -EINVAL;
++ }
++
++ __drm_lut_to_dc_gamma(lut, gamma, false);
++
++ dc_plane_state->in_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
++ ret = mod_color_calculate_degamma_params(dc_plane_state->in_transfer_func, gamma, true);
++ dc_gamma_release(&gamma);
++ if (!ret) {
++ dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
++ DRM_ERROR("Out of memory when calculating degamma params\n");
++ return -ENOMEM;
++ }
++
++ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+index bbc384f..57f57cf 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+@@ -417,6 +417,7 @@ enum {
+ GAMMA_RGB_256_ENTRIES = 256,
+ GAMMA_RGB_FLOAT_1024_ENTRIES = 1024,
+ GAMMA_CS_TFM_1D_ENTRIES = 4096,
++ GAMMA_CUSTOM_ENTRIES = 4096,
+ GAMMA_MAX_ENTRIES = 4096
+ };
+
+@@ -424,6 +425,7 @@ enum dc_gamma_type {
+ GAMMA_RGB_256 = 1,
+ GAMMA_RGB_FLOAT_1024 = 2,
+ GAMMA_CS_TFM_1D = 3,
++ GAMMA_CUSTOM = 4,
+ };
+
+ struct dc_csc_transform {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 8535d87..9604c13 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -1228,8 +1228,11 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
+ } else if (tf->type == TF_TYPE_BYPASS) {
+ dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
+ } else {
+- /*TF_TYPE_DISTRIBUTED_POINTS*/
+- result = false;
++ cm_helper_translate_curve_to_degamma_hw_format(tf,
++ &dpp_base->degamma_params);
++ dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
++ &dpp_base->degamma_params);
++ result = true;
+ }
+
+ return result;
+diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+index fa9a199..4c67058 100644
+--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+@@ -997,7 +997,9 @@ static void scale_user_regamma_ramp(struct pwl_float_data *pwl_rgb,
+ * norm_y = 4095*regamma_y, and index is just truncating to nearest integer
+ * lut1 = lut1D[index], lut2 = lut1D[index+1]
+ *
+- *adjustedY is then linearly interpolating regamma Y between lut1 and lut2
++ * adjustedY is then linearly interpolating regamma Y between lut1 and lut2
++ *
++ * Custom degamma on Linux uses the same interpolation math, so is handled here
+ */
+ static void apply_lut_1d(
+ const struct dc_gamma *ramp,
+@@ -1018,7 +1020,7 @@ static void apply_lut_1d(
+ struct fixed31_32 delta_lut;
+ struct fixed31_32 delta_index;
+
+- if (ramp->type != GAMMA_CS_TFM_1D)
++ if (ramp->type != GAMMA_CS_TFM_1D && ramp->type != GAMMA_CUSTOM)
+ return; // this is not expected
+
+ for (i = 0; i < num_hw_points; i++) {
+@@ -1629,7 +1631,9 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
+ map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
+ coordinates_x, axix_x, curve,
+ MAX_HW_POINTS, tf_pts,
+- mapUserRamp);
++ mapUserRamp && ramp->type != GAMMA_CUSTOM);
++ if (ramp->type == GAMMA_CUSTOM)
++ apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
+
+ ret = true;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5046-drm-amd-display-Use-calculated-disp_clk_khz-value-fo.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5046-drm-amd-display-Use-calculated-disp_clk_khz-value-fo.patch
new file mode 100644
index 00000000..75da6292
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5046-drm-amd-display-Use-calculated-disp_clk_khz-value-fo.patch
@@ -0,0 +1,83 @@
+From 4fc39b72e286bea741d0cc881eb97e53267b8fe8 Mon Sep 17 00:00:00 2001
+From: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Date: Mon, 23 Jul 2018 14:13:23 -0400
+Subject: [PATCH 5046/5725] drm/amd/display: Use calculated disp_clk_khz value
+ for dce110
+
+[Why]
+
+The calculated values for actual disp_clk_khz were ignored when
+notifying pplib of the new display requirements. In order to honor DFS
+bypass clocks from the hardware, the calculated value should be used.
+
+[How]
+
+The return value for set_dispclk is now assigned back into new_clocks
+and correctly carried through into dccg->clks.phyclk_khz. When notifying
+pplib of new display requirements dccg->clks.phyclk_khz is used
+instead of dce.dispclk_khz. The value of dce.dispclk_khz was never
+explicitly set to anything before.
+
+A 15% higher display clock value than calculated is no longer requested
+for dce110 since it now makes use of the calculated value.
+
+Since dce112 makes use of dce110's set_bandwidth but not its
+update_clocks it needs to have the value correctly carried through.
+
+Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 4 ++--
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index 0db8d1d..f176779 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -463,7 +463,7 @@ static void dce12_update_clocks(struct dccg *dccg,
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
+- dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
++ new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
+ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+@@ -661,7 +661,7 @@ static void dce_update_clocks(struct dccg *dccg,
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
+- dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
++ new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
+ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+ }
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 4fff944..afd1743 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -2511,7 +2511,7 @@ static void pplib_apply_display_requirements(
+ /* TODO: dce11.2*/
+ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
+
+- pp_display_cfg->disp_clk_khz = context->bw.dce.dispclk_khz;
++ pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
+
+ dce110_fill_display_configs(context, pp_display_cfg);
+
+@@ -2540,7 +2540,7 @@ void dce110_set_bandwidth(
+ {
+ struct dc_clocks req_clks;
+
+- req_clks.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
++ req_clks.dispclk_khz = context->bw.dce.dispclk_khz;
+ req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
+
+ if (decrease_allowed)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5047-drm-amd-display-Don-t-share-clk-source-between-DP-an.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5047-drm-amd-display-Don-t-share-clk-source-between-DP-an.patch
new file mode 100644
index 00000000..93c10a32
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5047-drm-amd-display-Don-t-share-clk-source-between-DP-an.patch
@@ -0,0 +1,130 @@
+From eea8598ecdffe8af7497d2de0c247ade282400c6 Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Thu, 12 Jul 2018 16:44:05 -0400
+Subject: [PATCH 5047/5725] drm/amd/display: Don't share clk source between DP
+ and HDMI
+
+[why]
+Prevent clock source sharing between HDMI and DP connectors.
+DP shouldn't be sharing its ref clock with phy clock,
+which caused an issue of older ASICS booting up with multiple
+diplays plugged in.
+
+[how]
+Add an extra check that would prevent HDMI and DP sharing clk.
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Hersen Wu <hersenxs.wu@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 22 +++++++++++++++++++++-
+ drivers/gpu/drm/amd/display/dc/dc.h | 1 +
+ .../drm/amd/display/dc/dce100/dce100_resource.c | 2 +-
+ .../gpu/drm/amd/display/dc/dce80/dce80_resource.c | 3 +++
+ 4 files changed, 26 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index f42a465..2a4a642 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -329,6 +329,9 @@ bool resource_are_streams_timing_synchronizable(
+ != stream2->timing.pix_clk_khz)
+ return false;
+
++ if (stream1->clamping.c_depth != stream2->clamping.c_depth)
++ return false;
++
+ if (stream1->phy_pix_clk != stream2->phy_pix_clk
+ && (!dc_is_dp_signal(stream1->signal)
+ || !dc_is_dp_signal(stream2->signal)))
+@@ -336,6 +339,20 @@ bool resource_are_streams_timing_synchronizable(
+
+ return true;
+ }
++static bool is_dp_and_hdmi_sharable(
++ struct dc_stream_state *stream1,
++ struct dc_stream_state *stream2)
++{
++ if (stream1->ctx->dc->caps.disable_dp_clk_share)
++ return false;
++
++ if (stream1->clamping.c_depth != COLOR_DEPTH_888 ||
++ stream2->clamping.c_depth != COLOR_DEPTH_888)
++ return false;
++
++ return true;
++
++}
+
+ static bool is_sharable_clk_src(
+ const struct pipe_ctx *pipe_with_clk_src,
+@@ -347,7 +364,10 @@ static bool is_sharable_clk_src(
+ if (pipe_with_clk_src->stream->signal == SIGNAL_TYPE_VIRTUAL)
+ return false;
+
+- if (dc_is_dp_signal(pipe_with_clk_src->stream->signal))
++ if (dc_is_dp_signal(pipe_with_clk_src->stream->signal) ||
++ (dc_is_dp_signal(pipe->stream->signal) &&
++ !is_dp_and_hdmi_sharable(pipe_with_clk_src->stream,
++ pipe->stream)))
+ return false;
+
+ if (dc_is_hdmi_signal(pipe_with_clk_src->stream->signal)
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 6c7b57d..8dfbce9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -78,6 +78,7 @@ struct dc_caps {
+ bool dual_link_dvi;
+ bool post_blend_color_processing;
+ bool force_dp_tps4_for_cp2520;
++ bool disable_dp_clk_share;
+ };
+
+ struct dc_dcc_surface_param {
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+index fd2bdae..3f76e60 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+@@ -919,7 +919,7 @@ static bool construct(
+ dc->caps.i2c_speed_in_khz = 40;
+ dc->caps.max_cursor_size = 128;
+ dc->caps.dual_link_dvi = true;
+-
++ dc->caps.disable_dp_clk_share = true;
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.timing_generators[i] =
+ dce100_timing_generator_create(
+diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+index dc9f3e9..604c629 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+@@ -946,6 +946,7 @@ static bool dce80_construct(
+ }
+
+ dc->caps.max_planes = pool->base.pipe_count;
++ dc->caps.disable_dp_clk_share = true;
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+@@ -1131,6 +1132,7 @@ static bool dce81_construct(
+ }
+
+ dc->caps.max_planes = pool->base.pipe_count;
++ dc->caps.disable_dp_clk_share = true;
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+@@ -1312,6 +1314,7 @@ static bool dce83_construct(
+ }
+
+ dc->caps.max_planes = pool->base.pipe_count;
++ dc->caps.disable_dp_clk_share = true;
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5048-drm-amd-display-add-vbios-table-check-for-enabling-d.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5048-drm-amd-display-add-vbios-table-check-for-enabling-d.patch
new file mode 100644
index 00000000..a78b1d76
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5048-drm-amd-display-add-vbios-table-check-for-enabling-d.patch
@@ -0,0 +1,73 @@
+From af1fe386f732bce046ff4f76c6038c8162250eb1 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Wed, 18 Jul 2018 15:25:34 -0400
+Subject: [PATCH 5048/5725] drm/amd/display: add vbios table check for enabling
+ dp ss
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Eric Bernstein <Eric.Bernstein@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 1 +
+ drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 5 ++++-
+ drivers/gpu/drm/amd/display/dc/dc_link.h | 1 +
+ drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h | 2 ++
+ 4 files changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 186131b..574c041 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -1038,6 +1038,7 @@ static bool construct(
+ link->link_index = init_params->link_index;
+
+ link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index);
++ link->dp_ss_off = !!dc_ctx->dc_bios->integrated_info->dp_ss_control;;
+
+ if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
+ dm_error("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n",
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index e8a69d7..d91df5e 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -953,7 +953,10 @@ enum link_training_result dc_link_dp_perform_link_training(
+ * LINK_SPREAD_05_DOWNSPREAD_30KHZ :
+ * LINK_SPREAD_DISABLED;
+ */
+- lt_settings.link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ;
++ if (link->dp_ss_off)
++ lt_settings.link_settings.link_spread = LINK_SPREAD_DISABLED;
++ else
++ lt_settings.link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ;
+
+ /* 1. set link rate, lane count and spread*/
+ dpcd_set_link_settings(link, &lt_settings);
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
+index 44cd31f..1794764 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
+@@ -73,6 +73,7 @@ struct dc_link {
+ enum dc_irq_source irq_source_hpd;
+ enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
+ bool is_hpd_filter_disabled;
++ bool dp_ss_off;
+
+ /* caps is the same as reported_link_cap. link_traing use
+ * reported_link_cap. Will clean up. TODO
+diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+index 36bbad5..f312834 100644
+--- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
++++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+@@ -395,6 +395,8 @@ struct integrated_info {
+ struct i2c_reg_info dp3_ext_hdmi_reg_settings[9];
+ unsigned char dp3_ext_hdmi_6g_reg_num;
+ struct i2c_reg_info dp3_ext_hdmi_6g_reg_settings[3];
++ /* V11 */
++ uint32_t dp_ss_control;
+ };
+
+ /**
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5049-drm-amd-display-Add-NULL-check-for-enabling-dp-ss.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5049-drm-amd-display-Add-NULL-check-for-enabling-dp-ss.patch
new file mode 100644
index 00000000..5364d941
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5049-drm-amd-display-Add-NULL-check-for-enabling-dp-ss.patch
@@ -0,0 +1,41 @@
+From ae6eac5a2e26cc7cf02a4e00c7e7917bccd0c8a2 Mon Sep 17 00:00:00 2001
+From: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Date: Mon, 30 Jul 2018 12:27:23 -0400
+Subject: [PATCH 5049/5725] drm/amd/display: Add NULL check for enabling dp ss
+
+[Why]
+
+The pointer for integrated_info can be NULL which causes the system to
+do a null pointer deference and hang on boot.
+
+[How]
+
+Add a check to ensure that integrated_info is not null before enabling
+DP ss.
+
+Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Reviewed-by: Sun peng Li <Sunpeng.Li@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 574c041..c3ca109 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -1038,7 +1038,9 @@ static bool construct(
+ link->link_index = init_params->link_index;
+
+ link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index);
+- link->dp_ss_off = !!dc_ctx->dc_bios->integrated_info->dp_ss_control;;
++
++ if (dc_ctx->dc_bios->integrated_info)
++ link->dp_ss_off = !!dc_ctx->dc_bios->integrated_info->dp_ss_control;
+
+ if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
+ dm_error("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n",
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5050-drm-amd-display-program-display-clock-on-cache-match.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5050-drm-amd-display-program-display-clock-on-cache-match.patch
new file mode 100644
index 00000000..c0bc2e1e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5050-drm-amd-display-program-display-clock-on-cache-match.patch
@@ -0,0 +1,56 @@
+From 5e2af58cba7118b313815c45d574295957565175 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Thu, 26 Jul 2018 12:17:58 -0400
+Subject: [PATCH 5050/5725] drm/amd/display: program display clock on cache
+ match
+
+[Why]
+We seem to have an issue where high enough display clock
+will not get set properly during S3 resume if we only
+call vbios once
+
+[How]
+Expand condition of display clock programming to happen
+even when cached display clock matches requested display
+clock
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 4 +++-
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 2 ++
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index f176779..684da3d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -625,7 +625,9 @@ static void dcn1_update_clocks(struct dccg *dccg,
+ }
+
+ /* dcn1 dppclk is tied to dispclk */
+- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
++ /* program dispclk on = as a w/a for sleep resume clock ramping issues */
++ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)
++ || new_clocks->dispclk_khz == dccg->clks.dispclk_khz) {
+ dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks);
+ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 9604c13..05014e0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -1104,6 +1104,8 @@ static void dcn10_init_hw(struct dc *dc)
+ }
+
+ enable_power_gating_plane(dc->hwseq, true);
++
++ memset(&dc->res_pool->dccg->clks, 0, sizeof(dc->res_pool->dccg->clks));
+ }
+
+ static void reset_hw_ctx_wrap(
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5051-drm-amd-display-update-clk-for-various-HDMI-color-de.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5051-drm-amd-display-update-clk-for-various-HDMI-color-de.patch
new file mode 100644
index 00000000..783208a1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5051-drm-amd-display-update-clk-for-various-HDMI-color-de.patch
@@ -0,0 +1,57 @@
+From 0b90d171fe33b4cd160a8250b279e56eab4abae6 Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Fri, 13 Jul 2018 09:07:35 -0400
+Subject: [PATCH 5051/5725] drm/amd/display: update clk for various HDMI color
+ depths
+
+[why]
+When programming tonga's connector's backend we didn't take
+in account that HDMI's colour depth might be more than 8bpc
+therefore we need to add a switch statement that would adjust
+the pixel clock accordingly.
+
+[how]
+Add a switch statement updating clock by its appropriate
+coefficient.
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+---
+ drivers/gpu/drm/amd/display/dc/bios/command_table.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+index 651e1fd..a558bfa 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+@@ -808,6 +808,24 @@ static enum bp_result transmitter_control_v1_5(
+ * (=1: 8bpp, =1.25: 10bpp, =1.5:12bpp, =2: 16bpp)
+ * LVDS mode: usPixelClock = pixel clock
+ */
++ if (cntl->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
++ switch (cntl->color_depth) {
++ case COLOR_DEPTH_101010:
++ params.usSymClock =
++ cpu_to_le16((le16_to_cpu(params.usSymClock) * 30) / 24);
++ break;
++ case COLOR_DEPTH_121212:
++ params.usSymClock =
++ cpu_to_le16((le16_to_cpu(params.usSymClock) * 36) / 24);
++ break;
++ case COLOR_DEPTH_161616:
++ params.usSymClock =
++ cpu_to_le16((le16_to_cpu(params.usSymClock) * 48) / 24);
++ break;
++ default:
++ break;
++ }
++ }
+
+ if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+ result = BP_RESULT_OK;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5052-drm-amd-display-display-connected-to-dp-1-does-not-l.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5052-drm-amd-display-display-connected-to-dp-1-does-not-l.patch
new file mode 100644
index 00000000..a652176f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5052-drm-amd-display-display-connected-to-dp-1-does-not-l.patch
@@ -0,0 +1,67 @@
+From ae41a6a1c4d81e75ae7139a210319dba19e040e2 Mon Sep 17 00:00:00 2001
+From: Hersen Wu <hersenxs.wu@amd.com>
+Date: Fri, 27 Jul 2018 14:52:37 -0400
+Subject: [PATCH 5052/5725] drm/amd/display: display connected to dp-1 does not
+ light up
+
+[why]
+for vega, dp set_panel_mode is
+handled by psp firmware. dal should not program the
+register again.
+
+[how]
+dal does not program panel mode.
+
+Signed-off-by: Hersen Wu <hersenxs.wu@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 1 +
+ drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c | 5 +++++
+ drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c | 1 +
+ 3 files changed, 7 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 8dfbce9..4f9d969 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -79,6 +79,7 @@ struct dc_caps {
+ bool post_blend_color_processing;
+ bool force_dp_tps4_for_cp2520;
+ bool disable_dp_clk_share;
++ bool psp_setup_panel_mode;
+ };
+
+ struct dc_dcc_surface_param {
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+index 60e3c6a..752b3d6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+@@ -256,6 +256,11 @@ static void setup_panel_mode(
+ enum dp_panel_mode panel_mode)
+ {
+ uint32_t value;
++ struct dc_context *ctx = enc110->base.ctx;
++
++ /* if psp set panel mode, dal should be program it */
++ if (ctx->dc->caps.psp_setup_panel_mode)
++ return;
+
+ ASSERT(REG(DP_DPHY_INTERNAL_CTRL));
+ value = REG_READ(DP_DPHY_INTERNAL_CTRL);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+index 61d8e22..d43f37d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+@@ -883,6 +883,7 @@ static bool construct(
+ dc->caps.i2c_speed_in_khz = 100;
+ dc->caps.max_cursor_size = 128;
+ dc->caps.dual_link_dvi = true;
++ dc->caps.psp_setup_panel_mode = true;
+
+ dc->debug = debug_defaults;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5053-drm-amdgpu-sriov-give-8s-for-recover-vram-under-RUNT.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5053-drm-amdgpu-sriov-give-8s-for-recover-vram-under-RUNT.patch
new file mode 100644
index 00000000..6a479700
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5053-drm-amdgpu-sriov-give-8s-for-recover-vram-under-RUNT.patch
@@ -0,0 +1,43 @@
+From fd4366c0c11e7dda3fd5e2e70ee65613cd4dbdd8 Mon Sep 17 00:00:00 2001
+From: Emily Deng <Emily.Deng@amd.com>
+Date: Thu, 9 Aug 2018 10:03:04 +0800
+Subject: [PATCH 5053/5725] drm/amdgpu/sriov: give 8s for recover vram under
+ RUNTIME
+
+Extend the timeout for recovering vram bos from shadows on sr-iov
+to cover the worst case scenario for timeslices and VFs
+
+Under runtime, the wait fence time could be quite long when
+other VFs are in exclusive mode. For example, for 4 VF, every
+VF's exclusive timeout time is set to 3s, then the worst case is
+9s. If the VF number is more than 4,then the worst case time will
+be longer.
+The 8s is the test data, with setting to 8s, it will pass the TDR
+test for 1000 times.
+
+SWDEV-161490
+
+Signed-off-by: Monk Liu <Monk.Liu@amd.com>
+Signed-off-by: Emily Deng <Emily.Deng@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 6dcbe98..4bc4c4a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3161,7 +3161,7 @@ static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
+ long tmo;
+
+ if (amdgpu_sriov_runtime(adev))
+- tmo = msecs_to_jiffies(amdgpu_lockup_timeout);
++ tmo = msecs_to_jiffies(8000);
+ else
+ tmo = msecs_to_jiffies(100);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5054-drm-amd-display-fix-single-link-DVI-has-no-display.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5054-drm-amd-display-fix-single-link-DVI-has-no-display.patch
new file mode 100644
index 00000000..e20e5133
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5054-drm-amd-display-fix-single-link-DVI-has-no-display.patch
@@ -0,0 +1,29 @@
+From 2956ab3a81e8825f03bbefe16f9f2518e5a91108 Mon Sep 17 00:00:00 2001
+From: Charlene Liu <charlene.liu@amd.com>
+Date: Mon, 30 Jul 2018 17:59:20 -0400
+Subject: [PATCH 5054/5725] drm/amd/display: fix single link DVI has no display
+
+Signed-off-by: Charlene Liu <charlene.liu@amd.com>
+Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index c3ca109..3bec439 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -1812,6 +1812,8 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
+ bool is_vga_mode = (stream->timing.h_addressable == 640)
+ && (stream->timing.v_addressable == 480);
+
++ if (stream->phy_pix_clk == 0)
++ stream->phy_pix_clk = stream->timing.pix_clk_khz;
+ if (stream->phy_pix_clk > 340000)
+ is_over_340mhz = true;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5055-drm-amd-display-Allow-clock-sharing-b-w-HDMI-and-DVI.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5055-drm-amd-display-Allow-clock-sharing-b-w-HDMI-and-DVI.patch
new file mode 100644
index 00000000..ad17ebcd
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5055-drm-amd-display-Allow-clock-sharing-b-w-HDMI-and-DVI.patch
@@ -0,0 +1,45 @@
+From 62a45ccc38345dbb9f05f3dd04c44e030f50628f Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Mon, 16 Jul 2018 09:17:55 -0400
+Subject: [PATCH 5055/5725] drm/amd/display: Allow clock sharing b/w HDMI and
+ DVI
+
+[why]
+HDMI and DVI share the same PHY clock and single link
+DVI and HDMI both use 4 lanes, so they should be allowed
+to be sharing the same clock source if all other parameters
+are satisfied.
+
+[how]
+Change a check for general DVI to Dual DVI.
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 2a4a642..1f3aa4a 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -371,11 +371,11 @@ static bool is_sharable_clk_src(
+ return false;
+
+ if (dc_is_hdmi_signal(pipe_with_clk_src->stream->signal)
+- && dc_is_dvi_signal(pipe->stream->signal))
++ && dc_is_dual_link_signal(pipe->stream->signal))
+ return false;
+
+ if (dc_is_hdmi_signal(pipe->stream->signal)
+- && dc_is_dvi_signal(pipe_with_clk_src->stream->signal))
++ && dc_is_dual_link_signal(pipe_with_clk_src->stream->signal))
+ return false;
+
+ if (!resource_are_streams_timing_synchronizable(
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5056-drm-amd-display-Pass-connector-id-when-executing-VBI.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5056-drm-amd-display-Pass-connector-id-when-executing-VBI.patch
new file mode 100644
index 00000000..ebb89248
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5056-drm-amd-display-Pass-connector-id-when-executing-VBI.patch
@@ -0,0 +1,48 @@
+From 78c3a0056209da78bee2eaf47d8d29081174bb67 Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Tue, 17 Jul 2018 10:52:19 -0400
+Subject: [PATCH 5056/5725] drm/amd/display: Pass connector id when executing
+ VBIOS CT
+
+[why]
+Older ASICs require both phys_id and connector_id
+to execute bios command table. If we are not passing the
+right connector_id - it can lead to a black screen.
+
+[how]
+Set connector_obj_id when executing vbios command table
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Hersen Wu <hersenxs.wu@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+index 752b3d6..eff7d22 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+@@ -930,7 +930,7 @@ void dce110_link_encoder_enable_tmds_output(
+ enum bp_result result;
+
+ /* Enable the PHY */
+-
++ cntl.connector_obj_id = enc110->base.connector;
+ cntl.action = TRANSMITTER_CONTROL_ENABLE;
+ cntl.engine_id = enc->preferred_engine;
+ cntl.transmitter = enc110->base.transmitter;
+@@ -972,7 +972,7 @@ void dce110_link_encoder_enable_dp_output(
+ * We need to set number of lanes manually.
+ */
+ configure_encoder(enc110, link_settings);
+-
++ cntl.connector_obj_id = enc110->base.connector;
+ cntl.action = TRANSMITTER_CONTROL_ENABLE;
+ cntl.engine_id = enc->preferred_engine;
+ cntl.transmitter = enc110->base.transmitter;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5057-drm-amd-display-Guard-against-null-crtc-in-CRC-IRQ.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5057-drm-amd-display-Guard-against-null-crtc-in-CRC-IRQ.patch
new file mode 100644
index 00000000..21eb8eb2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5057-drm-amd-display-Guard-against-null-crtc-in-CRC-IRQ.patch
@@ -0,0 +1,50 @@
+From 45f20843c6c83ecfd5329b3e4d66396690a29438 Mon Sep 17 00:00:00 2001
+From: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Date: Fri, 3 Aug 2018 10:40:20 -0400
+Subject: [PATCH 5057/5725] drm/amd/display: Guard against null crtc in CRC IRQ
+
+[Why]
+
+A null pointer deference can occur if crtc is null in
+amdgpu_dm_crtc_handle_crc_irq. This can happen if get_crtc_by_otg_inst
+returns NULL during dm_crtc_high_irq, leading to a hang in some IGT
+test cases.
+
+[How]
+
+Check that CRTC is non-null before accessing its fields.
+
+Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Reviewed-by: Sun peng Li <Sunpeng.Li@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+index ad80991..ce01920 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+@@ -98,10 +98,16 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
+ */
+ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
+ {
+- struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state);
+- struct dc_stream_state *stream_state = crtc_state->stream;
++ struct dm_crtc_state *crtc_state;
++ struct dc_stream_state *stream_state;
+ uint32_t crcs[3];
+
++ if (crtc == NULL)
++ return;
++
++ crtc_state = to_dm_crtc_state(crtc->state);
++ stream_state = crtc_state->stream;
++
+ /* Early return if CRC capture is not enabled. */
+ if (!crtc_state->crc_enabled)
+ return;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5058-drm-amd-pp-Add-ACP-PG-support-in-SMU.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5058-drm-amd-pp-Add-ACP-PG-support-in-SMU.patch
new file mode 100644
index 00000000..6f9fd70c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5058-drm-amd-pp-Add-ACP-PG-support-in-SMU.patch
@@ -0,0 +1,115 @@
+From 75400fc9632182396de89bc1ce9fad5557dab0d2 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Thu, 19 Jul 2018 13:49:07 +0800
+Subject: [PATCH 5058/5725] drm/amd/pp: Add ACP PG support in SMU
+
+when ACP block not enabled, we power off
+acp block to save power.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 18 ++++++++++++++++++
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c | 21 ++++++++++++++++++++-
+ drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 1 +
+ 3 files changed, 39 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index 6ef06a4..2bdef16 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -1192,6 +1192,21 @@ static int pp_dpm_powergate_gfx(void *handle, bool gate)
+ return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
+ }
+
++static void pp_dpm_powergate_acp(void *handle, bool gate)
++{
++ struct pp_hwmgr *hwmgr = handle;
++
++ if (!hwmgr || !hwmgr->pm_en)
++ return;
++
++ if (hwmgr->hwmgr_func->powergate_acp == NULL) {
++ pr_info("%s was not implemented.\n", __func__);
++ return;
++ }
++
++ hwmgr->hwmgr_func->powergate_acp(hwmgr, gate);
++}
++
+ static int pp_set_powergating_by_smu(void *handle,
+ uint32_t block_type, bool gate)
+ {
+@@ -1211,6 +1226,9 @@ static int pp_set_powergating_by_smu(void *handle,
+ case AMD_IP_BLOCK_TYPE_GFX:
+ ret = pp_dpm_powergate_gfx(handle, gate);
+ break;
++ case AMD_IP_BLOCK_TYPE_ACP:
++ pp_dpm_powergate_acp(handle, gate);
++ break;
+ default:
+ break;
+ }
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+index 0adfc53..b863704 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+@@ -664,8 +664,13 @@ static void smu8_init_power_gate_state(struct pp_hwmgr *hwmgr)
+ data->uvd_power_gated = false;
+ data->vce_power_gated = false;
+ data->samu_power_gated = false;
++#ifdef CONFIG_DRM_AMD_ACP
+ data->acp_power_gated = false;
+- data->pgacpinit = true;
++#else
++ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF);
++ data->acp_power_gated = true;
++#endif
++
+ }
+
+ static void smu8_init_sclk_threshold(struct pp_hwmgr *hwmgr)
+@@ -1886,6 +1891,19 @@ static int smu8_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
+ }
+
+
++static void smu8_dpm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate)
++{
++ struct smu8_hwmgr *data = hwmgr->backend;
++
++ if (data->acp_power_gated == bgate)
++ return;
++
++ if (bgate)
++ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF);
++ else
++ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerON);
++}
++
+ static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
+ {
+ struct smu8_hwmgr *data = hwmgr->backend;
+@@ -1951,6 +1969,7 @@ static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
+ .powerdown_uvd = smu8_dpm_powerdown_uvd,
+ .powergate_uvd = smu8_dpm_powergate_uvd,
+ .powergate_vce = smu8_dpm_powergate_vce,
++ .powergate_acp = smu8_dpm_powergate_acp,
+ .get_mclk = smu8_dpm_get_mclk,
+ .get_sclk = smu8_dpm_get_sclk,
+ .patch_boot_state = smu8_dpm_patch_boot_state,
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+index d3d9626..7e58a0d 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+@@ -247,6 +247,7 @@ struct pp_hwmgr_func {
+ int (*powerdown_uvd)(struct pp_hwmgr *hwmgr);
+ void (*powergate_vce)(struct pp_hwmgr *hwmgr, bool bgate);
+ void (*powergate_uvd)(struct pp_hwmgr *hwmgr, bool bgate);
++ void (*powergate_acp)(struct pp_hwmgr *hwmgr, bool bgate);
+ uint32_t (*get_mclk)(struct pp_hwmgr *hwmgr, bool low);
+ uint32_t (*get_sclk)(struct pp_hwmgr *hwmgr, bool low);
+ int (*power_state_set)(struct pp_hwmgr *hwmgr,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5059-drm-amdgpu-Power-down-acp-if-board-uses-AZ-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5059-drm-amdgpu-Power-down-acp-if-board-uses-AZ-v2.patch
new file mode 100644
index 00000000..5c520495
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5059-drm-amdgpu-Power-down-acp-if-board-uses-AZ-v2.patch
@@ -0,0 +1,73 @@
+From f1b5a8092a0aa022b2083cdab25645d700a9e07e Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Thu, 19 Jul 2018 11:48:36 -0500
+Subject: [PATCH 5059/5725] drm/amdgpu: Power down acp if board uses AZ (v2)
+
+if board uses AZ rather than ACP, we power down acp
+through smu to save power.
+
+v2: handle S3/S4 and hw_fini (Alex)
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c | 20 +++++++++++++++++---
+ 1 file changed, 17 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+index 663634c..b1be634 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+@@ -289,10 +289,12 @@ static int acp_hw_init(void *handle)
+ r = amd_acp_hw_init(adev->acp.cgs_device,
+ ip_block->version->major, ip_block->version->minor);
+ /* -ENODEV means board uses AZ rather than ACP */
+- if (r == -ENODEV)
++ if (r == -ENODEV) {
++ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
+ return 0;
+- else if (r)
++ } else if (r) {
+ return r;
++ }
+
+ if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
+ return -EINVAL;
+@@ -498,8 +500,10 @@ static int acp_hw_fini(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* return early if no ACP */
+- if (!adev->acp.acp_cell)
++ if (!adev->acp.acp_cell) {
++ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
+ return 0;
++ }
+
+ /* Assert Soft reset of ACP */
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
+@@ -557,11 +561,21 @@ static int acp_hw_fini(void *handle)
+
+ static int acp_suspend(void *handle)
+ {
++ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++
++ /* power up on suspend */
++ if (!adev->acp.acp_cell)
++ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
+ return 0;
+ }
+
+ static int acp_resume(void *handle)
+ {
++ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++
++ /* power down again on resume */
++ if (!adev->acp.acp_cell)
++ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5060-drm-amd-amdgpu-Enabling-Power-Gating-for-Stoney-plat.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5060-drm-amd-amdgpu-Enabling-Power-Gating-for-Stoney-plat.patch
new file mode 100644
index 00000000..aed22800
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5060-drm-amd-amdgpu-Enabling-Power-Gating-for-Stoney-plat.patch
@@ -0,0 +1,112 @@
+From 62810ba8992a3cd6d1d8dfc54426fd6534ffcf86 Mon Sep 17 00:00:00 2001
+From: Vijendar Mukunda <vijendar.mukunda@amd.com>
+Date: Sun, 29 Jul 2018 19:08:32 +0800
+Subject: [PATCH 5060/5725] drm/amd/amdgpu: Enabling Power Gating for Stoney
+ platform
+
+Removed condition checks to skip the power gating feature for
+stoney platform.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Vijendar Mukunda <vijendar.mukunda@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c | 53 ++++++++++++++-------------------
+ 1 file changed, 23 insertions(+), 30 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+index b1be634..38ecca2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+@@ -301,20 +301,17 @@ static int acp_hw_init(void *handle)
+
+ acp_base = adev->rmmio_base;
+
+- if (adev->asic_type != CHIP_STONEY) {
+- adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
+- if (adev->acp.acp_genpd == NULL)
+- return -ENOMEM;
+-
+- adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
+- adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
+- adev->acp.acp_genpd->gpd.power_on = acp_poweron;
+-
++ adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
++ if (adev->acp.acp_genpd == NULL)
++ return -ENOMEM;
++
++ adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
++ adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
++ adev->acp.acp_genpd->gpd.power_on = acp_poweron;
+
+- adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device;
++ adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device;
+
+- pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
+- }
++ pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
+
+ adev->acp.acp_cell = kzalloc(sizeof(struct mfd_cell) * ACP_DEVS,
+ GFP_KERNEL);
+@@ -432,14 +429,12 @@ static int acp_hw_init(void *handle)
+ if (r)
+ return r;
+
+- if (adev->asic_type != CHIP_STONEY) {
+- for (i = 0; i < ACP_DEVS ; i++) {
+- dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
+- r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
+- if (r) {
+- dev_err(dev, "Failed to add dev to genpd\n");
+- return r;
+- }
++ for (i = 0; i < ACP_DEVS ; i++) {
++ dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
++ r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
++ if (r) {
++ dev_err(dev, "Failed to add dev to genpd\n");
++ return r;
+ }
+ }
+
+@@ -500,7 +495,7 @@ static int acp_hw_fini(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* return early if no ACP */
+- if (!adev->acp.acp_cell) {
++ if (!adev->acp.acp_genpd) {
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
+ return 0;
+ }
+@@ -541,19 +536,17 @@ static int acp_hw_fini(void *handle)
+ udelay(100);
+ }
+
+- if (adev->acp.acp_genpd) {
+- for (i = 0; i < ACP_DEVS ; i++) {
+- dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
+- ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
+- /* If removal fails, dont giveup and try rest */
+- if (ret)
+- dev_err(dev, "remove dev from genpd failed\n");
+- }
+- kfree(adev->acp.acp_genpd);
++ for (i = 0; i < ACP_DEVS ; i++) {
++ dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
++ ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
++ /* If removal fails, dont giveup and try rest */
++ if (ret)
++ dev_err(dev, "remove dev from genpd failed\n");
+ }
+
+ mfd_remove_devices(adev->acp.parent);
+ kfree(adev->acp.acp_res);
++ kfree(adev->acp.acp_genpd);
+ kfree(adev->acp.acp_cell);
+
+ return 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5061-drm-amdgpu-acp-Powrgate-acp-via-smu.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5061-drm-amdgpu-acp-Powrgate-acp-via-smu.patch
new file mode 100644
index 00000000..5fe12313
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5061-drm-amdgpu-acp-Powrgate-acp-via-smu.patch
@@ -0,0 +1,195 @@
+From 76d153eb4639747d82c87de111dc805951992109 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Sun, 29 Jul 2018 18:44:06 +0800
+Subject: [PATCH 5061/5725] drm/amdgpu/acp: Powrgate acp via smu
+
+Call smu to power gate/ungate acp instand of only
+powr down acp tiles in acp block.
+when smu power gate acp:
+smu will turn off clock, power down acp tiles,check and
+enter in ULV state.
+when smu ungate acp:
+smu will exit ulv, turn on clocks, power on acp tiles.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c | 133 ++++++--------------------------
+ 1 file changed, 22 insertions(+), 111 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+index 38ecca2..b8228a2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+@@ -116,136 +116,47 @@ static int acp_sw_fini(void *handle)
+ return 0;
+ }
+
+-/* power off a tile/block within ACP */
+-static int acp_suspend_tile(void *cgs_dev, int tile)
+-{
+- u32 val = 0;
+- u32 count = 0;
+-
+- if ((tile < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
+- pr_err("Invalid ACP tile : %d to suspend\n", tile);
+- return -1;
+- }
+-
+- val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile);
+- val &= ACP_TILE_ON_MASK;
+-
+- if (val == 0x0) {
+- val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
+- val = val | (1 << tile);
+- cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
+- cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG,
+- 0x500 + tile);
+-
+- count = ACP_TIMEOUT_LOOP;
+- while (true) {
+- val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0
+- + tile);
+- val = val & ACP_TILE_ON_MASK;
+- if (val == ACP_TILE_OFF_MASK)
+- break;
+- if (--count == 0) {
+- pr_err("Timeout reading ACP PGFSM status\n");
+- return -ETIMEDOUT;
+- }
+- udelay(100);
+- }
+-
+- val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
+-
+- val |= ACP_TILE_OFF_RETAIN_REG_MASK;
+- cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
+- }
+- return 0;
+-}
+-
+-/* power on a tile/block within ACP */
+-static int acp_resume_tile(void *cgs_dev, int tile)
+-{
+- u32 val = 0;
+- u32 count = 0;
+-
+- if ((tile < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
+- pr_err("Invalid ACP tile to resume\n");
+- return -1;
+- }
+-
+- val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile);
+- val = val & ACP_TILE_ON_MASK;
+-
+- if (val != 0x0) {
+- cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG,
+- 0x600 + tile);
+- count = ACP_TIMEOUT_LOOP;
+- while (true) {
+- val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0
+- + tile);
+- val = val & ACP_TILE_ON_MASK;
+- if (val == 0x0)
+- break;
+- if (--count == 0) {
+- pr_err("Timeout reading ACP PGFSM status\n");
+- return -ETIMEDOUT;
+- }
+- udelay(100);
+- }
+- val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
+- if (tile == ACP_TILE_P1)
+- val = val & (ACP_TILE_P1_MASK);
+- else if (tile == ACP_TILE_P2)
+- val = val & (ACP_TILE_P2_MASK);
+-
+- cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
+- }
+- return 0;
+-}
+-
+ struct acp_pm_domain {
+- void *cgs_dev;
++ void *adev;
+ struct generic_pm_domain gpd;
+ };
+
+ static int acp_poweroff(struct generic_pm_domain *genpd)
+ {
+- int i, ret;
+ struct acp_pm_domain *apd;
++ struct amdgpu_device *adev;
+
+ apd = container_of(genpd, struct acp_pm_domain, gpd);
+ if (apd != NULL) {
+- /* Donot return abruptly if any of power tile fails to suspend.
+- * Log it and continue powering off other tile
+- */
+- for (i = 4; i >= 0 ; i--) {
+- ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i);
+- if (ret)
+- pr_err("ACP tile %d tile suspend failed\n", i);
+- }
++ adev = apd->adev;
++ /* call smu to POWER GATE ACP block
++ * smu will
++ * 1. turn off the acp clock
++ * 2. power off the acp tiles
++ * 3. check and enter ulv state
++ */
++ if (adev->powerplay.pp_funcs->set_powergating_by_smu)
++ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
+ }
+ return 0;
+ }
+
+ static int acp_poweron(struct generic_pm_domain *genpd)
+ {
+- int i, ret;
+ struct acp_pm_domain *apd;
++ struct amdgpu_device *adev;
+
+ apd = container_of(genpd, struct acp_pm_domain, gpd);
+ if (apd != NULL) {
+- for (i = 0; i < 2; i++) {
+- ret = acp_resume_tile(apd->cgs_dev, ACP_TILE_P1 + i);
+- if (ret) {
+- pr_err("ACP tile %d resume failed\n", i);
+- break;
+- }
+- }
+-
+- /* Disable DSPs which are not going to be used */
+- for (i = 0; i < 3; i++) {
+- ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_DSP0 + i);
+- /* Continue suspending other DSP, even if one fails */
+- if (ret)
+- pr_err("ACP DSP %d suspend failed\n", i);
+- }
++ adev = apd->adev;
++ /* call smu to UNGATE ACP block
++ * smu will
++ * 1. exit ulv
++ * 2. turn on acp clock
++ * 3. power on acp tiles
++ */
++ if (adev->powerplay.pp_funcs->set_powergating_by_smu)
++ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
+ }
+ return 0;
+ }
+@@ -309,7 +220,7 @@ static int acp_hw_init(void *handle)
+ adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
+ adev->acp.acp_genpd->gpd.power_on = acp_poweron;
+
+- adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device;
++ adev->acp.acp_genpd->adev = adev;
+
+ pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5062-drm-amgpu-acp-Implement-set_powergating_state-for-ac.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5062-drm-amgpu-acp-Implement-set_powergating_state-for-ac.patch
new file mode 100644
index 00000000..c21db8ec
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5062-drm-amgpu-acp-Implement-set_powergating_state-for-ac.patch
@@ -0,0 +1,36 @@
+From b5dc477245fe4370865a867d205f964a78400910 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Sun, 29 Jul 2018 18:53:02 +0800
+Subject: [PATCH 5062/5725] drm/amgpu/acp: Implement set_powergating_state for
+ acp
+
+so driver can powergate acp block after asic initialized
+to save power.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+index b8228a2..0527bfb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+@@ -512,6 +512,12 @@ static int acp_set_clockgating_state(void *handle,
+ static int acp_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+ {
++ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++ bool enable = state == AMD_PG_STATE_GATE ? true : false;
++
++ if (adev->powerplay.pp_funcs->set_powergating_by_smu)
++ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable);
++
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5063-drm-amdgpu-Add-job-pipe-sync-dependecy-trace.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5063-drm-amdgpu-Add-job-pipe-sync-dependecy-trace.patch
new file mode 100644
index 00000000..88684898
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5063-drm-amdgpu-Add-job-pipe-sync-dependecy-trace.patch
@@ -0,0 +1,80 @@
+From 88e0afe158ccce5ee07721f00eb5cff2105b2715 Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Tue, 31 Jul 2018 10:52:25 -0400
+Subject: [PATCH 5063/5725] drm/amdgpu: Add job pipe sync dependecy trace
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+It's useful to trace any dependency a job has on prevoius
+jobs.
+
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 5 +++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 24 ++++++++++++++++++++++++
+ 2 files changed, 29 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+index 360ec43..96f69eb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+@@ -32,6 +32,7 @@
+ #include <drm/amdgpu_drm.h>
+ #include "amdgpu.h"
+ #include "atom.h"
++#include "amdgpu_trace.h"
+
+ #define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000)
+
+@@ -174,6 +175,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
+ amdgpu_vm_need_pipeline_sync(ring, job))) {
+ need_pipe_sync = true;
++
++ if (tmp)
++ trace_amdgpu_ib_pipe_sync(job, tmp);
++
+ dma_fence_put(tmp);
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+index 7206a00..8c2dab2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+@@ -462,6 +462,30 @@ TRACE_EVENT(amdgpu_bo_move,
+ __entry->new_placement, __entry->bo_size)
+ );
+
++TRACE_EVENT(amdgpu_ib_pipe_sync,
++ TP_PROTO(struct amdgpu_job *sched_job, struct dma_fence *fence),
++ TP_ARGS(sched_job, fence),
++ TP_STRUCT__entry(
++ __field(const char *,name)
++ __field(uint64_t, id)
++ __field(struct dma_fence *, fence)
++ __field(uint64_t, ctx)
++ __field(unsigned, seqno)
++ ),
++
++ TP_fast_assign(
++ __entry->name = sched_job->base.sched->name;
++ __entry->id = sched_job->base.id;
++ __entry->fence = fence;
++ __entry->ctx = fence->context;
++ __entry->seqno = fence->seqno;
++ ),
++ TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u",
++ __entry->name, __entry->id,
++ __entry->fence, __entry->ctx,
++ __entry->seqno)
++);
++
+ #undef AMDGPU_JOB_GET_TIMELINE_NAME
+ #endif
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5064-drm-amd-pp-Implement-get_performance_level-for-legac.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5064-drm-amd-pp-Implement-get_performance_level-for-legac.patch
new file mode 100644
index 00000000..66e1cc48
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5064-drm-amd-pp-Implement-get_performance_level-for-legac.patch
@@ -0,0 +1,121 @@
+From e3ca9af0f77c41a0a3e10a9217a37e981799563f Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Thu, 5 Jul 2018 19:22:50 +0800
+Subject: [PATCH 5064/5725] drm/amd/pp: Implement get_performance_level for
+ legacy dgpu
+
+display can get clock info through this function.
+implement this function for vega10 and old asics.
+from vega12, there is no power state management,
+so need to add new interface to notify display
+the clock info
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c | 2 +-
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 24 ++++++++++++++++++++++
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 24 ++++++++++++++++++++++
+ 3 files changed, 49 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+index 53207e7..b05b153 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+@@ -357,7 +357,7 @@ int phm_get_clock_info(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *s
+ PHM_PerformanceLevelDesignation designation)
+ {
+ int result;
+- PHM_PerformanceLevel performance_level;
++ PHM_PerformanceLevel performance_level = {0};
+
+ PHM_FUNC_CHECK(hwmgr);
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index e3c1eb4..d785b76 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -5008,6 +5008,29 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint
+ return 0;
+ }
+
++static int smu7_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
++ PHM_PerformanceLevelDesignation designation, uint32_t index,
++ PHM_PerformanceLevel *level)
++{
++ const struct smu7_power_state *ps;
++ struct smu7_hwmgr *data;
++ uint32_t i;
++
++ if (level == NULL || hwmgr == NULL || state == NULL)
++ return -EINVAL;
++
++ data = hwmgr->backend;
++ ps = cast_const_phw_smu7_power_state(state);
++
++ i = index > ps->performance_level_count - 1 ?
++ ps->performance_level_count - 1 : index;
++
++ level->coreClock = ps->performance_levels[i].engine_clock;
++ level->memory_clock = ps->performance_levels[i].memory_clock;
++
++ return 0;
++}
++
+ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
+ .backend_init = &smu7_hwmgr_backend_init,
+ .backend_fini = &smu7_hwmgr_backend_fini,
+@@ -5064,6 +5087,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
+ .set_power_limit = smu7_set_power_limit,
+ .get_power_profile_mode = smu7_get_power_profile_mode,
+ .set_power_profile_mode = smu7_set_power_profile_mode,
++ .get_performance_level = smu7_get_performance_level,
+ };
+
+ uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+index 384d853..61c6be2 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+@@ -4851,6 +4851,29 @@ static int vega10_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
+ return 0;
+ }
+
++static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
++ PHM_PerformanceLevelDesignation designation, uint32_t index,
++ PHM_PerformanceLevel *level)
++{
++ const struct vega10_power_state *ps;
++ struct vega10_hwmgr *data;
++ uint32_t i;
++
++ if (level == NULL || hwmgr == NULL || state == NULL)
++ return -EINVAL;
++
++ data = hwmgr->backend;
++ ps = cast_const_phw_vega10_power_state(state);
++
++ i = index > ps->performance_level_count - 1 ?
++ ps->performance_level_count - 1 : index;
++
++ level->coreClock = ps->performance_levels[i].gfx_clock;
++ level->memory_clock = ps->performance_levels[i].mem_clock;
++
++ return 0;
++}
++
+ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
+ .backend_init = vega10_hwmgr_backend_init,
+ .backend_fini = vega10_hwmgr_backend_fini,
+@@ -4910,6 +4933,7 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
+ .set_power_profile_mode = vega10_set_power_profile_mode,
+ .set_power_limit = vega10_set_power_limit,
+ .odn_edit_dpm_table = vega10_odn_edit_dpm_table,
++ .get_performance_level = vega10_get_performance_level,
+ };
+
+ int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5065-drm-amd-display-pass-compat_level-to-hubp.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5065-drm-amd-display-pass-compat_level-to-hubp.patch
new file mode 100644
index 00000000..30ff0a4e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5065-drm-amd-display-pass-compat_level-to-hubp.patch
@@ -0,0 +1,83 @@
+From 90b59fa09e6ba3ce9065127be2559e08b06efb6a Mon Sep 17 00:00:00 2001
+From: Charlene Liu <charlene.liu@amd.com>
+Date: Mon, 16 Jul 2018 14:05:11 -0400
+Subject: [PATCH 5065/5725] drm/amd/display: pass compat_level to hubp
+
+Signed-off-by: Charlene Liu <charlene.liu@amd.com>
+Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 3 ++-
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 3 ++-
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 4 +++-
+ drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h | 3 ++-
+ 4 files changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+index 2138cd3..fa1bacd 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+@@ -485,7 +485,8 @@ void hubp1_program_surface_config(
+ union plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+- bool horizontal_mirror)
++ bool horizontal_mirror,
++ unsigned int compat_level)
+ {
+ hubp1_dcc_control(hubp, dcc->enable, dcc->grph.independent_64b_blks);
+ hubp1_program_tiling(hubp, tiling_info, format);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+index f689fea..48c1907 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+@@ -664,7 +664,8 @@ void hubp1_program_surface_config(
+ union plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+- bool horizontal_mirror);
++ bool horizontal_mirror,
++ unsigned int compat_level);
+
+ void hubp1_program_deadline(
+ struct hubp *hubp,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 05014e0..ba4856f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2019,6 +2019,7 @@ static void update_dchubp_dpp(
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ union plane_size size = plane_state->plane_size;
++ unsigned int compat_level = 0;
+
+ /* depends on DML calculation, DPP clock value may change dynamically */
+ /* If request max dpp clk is lower than current dispclk, no need to
+@@ -2110,7 +2111,8 @@ static void update_dchubp_dpp(
+ &size,
+ plane_state->rotation,
+ &plane_state->dcc,
+- plane_state->horizontal_mirror);
++ plane_state->horizontal_mirror,
++ compat_level);
+ }
+
+ hubp->power_gated = false;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+index 4f3f9e6..334c48c 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+@@ -96,7 +96,8 @@ struct hubp_funcs {
+ union plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+- bool horizontal_mirror);
++ bool horizontal_mirror,
++ unsigned int compa_level);
+
+ bool (*hubp_is_flip_pending)(struct hubp *hubp);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5066-drm-amd-display-Move-PME-to-function-pointer-call-se.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5066-drm-amd-display-Move-PME-to-function-pointer-call-se.patch
new file mode 100644
index 00000000..42f3292f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5066-drm-amd-display-Move-PME-to-function-pointer-call-se.patch
@@ -0,0 +1,227 @@
+From 7e75b72a5f26fb6d262dd18cde1f7186f72a7810 Mon Sep 17 00:00:00 2001
+From: Jun Lei <Jun.Lei@amd.com>
+Date: Mon, 16 Jul 2018 10:40:31 -0400
+Subject: [PATCH 5066/5725] drm/amd/display: Move PME to function pointer call
+ semantics
+
+[why]
+Legacy IRI style is not linux friendly.
+
+[how]
+New function pointer call
+semantics will be used for all future PPLIB/DAL interfaces, and also
+some existing will be refactored. This change defines how the
+new function pointer structures will look, as well as implements
+
+Signed-off-by: Jun Lei <Jun.Lei@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 8 +-
+ drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 7 ++
+ drivers/gpu/drm/amd/display/dc/dm_pp_smu.h | 92 +++++++++++++---------
+ 3 files changed, 65 insertions(+), 42 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+index fbe878a..e5c5b0a 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+@@ -478,7 +478,7 @@ bool dm_pp_get_static_clocks(
+ void pp_rv_set_display_requirement(struct pp_smu *pp,
+ struct pp_smu_display_requirement_rv *req)
+ {
+- struct dc_context *ctx = pp->ctx;
++ const struct dc_context *ctx = pp->dm;
+ struct amdgpu_device *adev = ctx->driver_context;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+@@ -491,7 +491,7 @@ void pp_rv_set_display_requirement(struct pp_smu *pp,
+ void pp_rv_set_wm_ranges(struct pp_smu *pp,
+ struct pp_smu_wm_range_sets *ranges)
+ {
+- struct dc_context *ctx = pp->ctx;
++ const struct dc_context *ctx = pp->dm;
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+@@ -540,7 +540,7 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
+
+ void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
+ {
+- struct dc_context *ctx = pp->ctx;
++ const struct dc_context *ctx = pp->dm;
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+@@ -555,7 +555,7 @@ void dm_pp_get_funcs_rv(
+ struct dc_context *ctx,
+ struct pp_smu_funcs_rv *funcs)
+ {
+- funcs->pp_smu.ctx = ctx;
++ funcs->pp_smu.dm = ctx;
+ funcs->set_display_requirement = pp_rv_set_display_requirement;
+ funcs->set_wm_ranges = pp_rv_set_wm_ranges;
+ funcs->set_pme_wa_enable = pp_rv_set_pme_wa_enable;
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+index bd03932..32b3413 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+@@ -37,6 +37,13 @@
+
+ #define DC_LOGGER \
+ dc->ctx->logger
++
++#define WM_SET_COUNT 4
++#define WM_A 0
++#define WM_B 1
++#define WM_C 2
++#define WM_D 3
++
+ /*
+ * NOTE:
+ * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+index 58ed205..f2ea845 100644
+--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
++++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+@@ -30,33 +30,45 @@
+ * interface to PPLIB/SMU to setup clocks and pstate requirements on SoC
+ */
+
++enum pp_smu_ver {
++ /*
++ * PP_SMU_INTERFACE_X should be interpreted as the interface defined
++ * starting from X, where X is some family of ASICs. This is as
++ * opposed to interfaces used only for X. There will be some degree
++ * of interface sharing between families of ASIcs.
++ */
++ PP_SMU_UNSUPPORTED,
++ PP_SMU_VER_RV
++};
+
+ struct pp_smu {
+- struct dc_context *ctx;
+-};
++ enum pp_smu_ver ver;
++ const void *pp;
+
+-enum wm_set_id {
+- WM_A,
+- WM_B,
+- WM_C,
+- WM_D,
+- WM_SET_COUNT,
++ /*
++ * interim extra handle for backwards compatibility
++ * as some existing functionality not yet implemented
++ * by ppsmu
++ */
++ const void *dm;
+ };
+
+ struct pp_smu_wm_set_range {
+- enum wm_set_id wm_inst;
++ unsigned int wm_inst;
+ uint32_t min_fill_clk_khz;
+ uint32_t max_fill_clk_khz;
+ uint32_t min_drain_clk_khz;
+ uint32_t max_drain_clk_khz;
+ };
+
++#define MAX_WATERMARK_SETS 4
++
+ struct pp_smu_wm_range_sets {
+- uint32_t num_reader_wm_sets;
+- struct pp_smu_wm_set_range reader_wm_sets[WM_SET_COUNT];
++ unsigned int num_reader_wm_sets;
++ struct pp_smu_wm_set_range reader_wm_sets[MAX_WATERMARK_SETS];
+
+- uint32_t num_writer_wm_sets;
+- struct pp_smu_wm_set_range writer_wm_sets[WM_SET_COUNT];
++ unsigned int num_writer_wm_sets;
++ struct pp_smu_wm_set_range writer_wm_sets[MAX_WATERMARK_SETS];
+ };
+
+ struct pp_smu_display_requirement_rv {
+@@ -85,48 +97,52 @@ struct pp_smu_display_requirement_rv {
+ struct pp_smu_funcs_rv {
+ struct pp_smu pp_smu;
+
+- void (*set_display_requirement)(struct pp_smu *pp,
+- struct pp_smu_display_requirement_rv *req);
++ /* PPSMC_MSG_SetDisplayCount
++ * 0 triggers S0i2 optimization
++ */
++ void (*set_display_count)(struct pp_smu *pp, int count);
+
+ /* which SMU message? are reader and writer WM separate SMU msg? */
+ void (*set_wm_ranges)(struct pp_smu *pp,
+ struct pp_smu_wm_range_sets *ranges);
+- /* PME w/a */
+- void (*set_pme_wa_enable)(struct pp_smu *pp);
+-};
+
+-#if 0
+-struct pp_smu_funcs_rv {
++ /* PPSMC_MSG_SetHardMinDcfclkByFreq
++ * fixed clock at requested freq, either from FCH bypass or DFS
++ */
++ void (*set_hard_min_dcfclk_by_freq)(struct pp_smu *pp, int khz);
+
+- /* PPSMC_MSG_SetDisplayCount
+- * 0 triggers S0i2 optimization
++ /* PPSMC_MSG_SetMinDeepSleepDcfclk
++ * when DF is in cstate, dcf clock is further divided down
++ * to just above given frequency
+ */
+- void (*set_display_count)(struct pp_smu *pp, int count);
++ void (*set_min_deep_sleep_dcfclk)(struct pp_smu *pp, int mhz);
+
+ /* PPSMC_MSG_SetHardMinFclkByFreq
+- * FCLK will vary with DPM, but never below requested hard min
++ * FCLK will vary with DPM, but never below requested hard min
+ */
+ void (*set_hard_min_fclk_by_freq)(struct pp_smu *pp, int khz);
+
+- /* PPSMC_MSG_SetHardMinDcefclkByFreq
+- * fixed clock at requested freq, either from FCH bypass or DFS
++ /* PPSMC_MSG_SetHardMinSocclkByFreq
++ * Needed for DWB support
+ */
+- void (*set_hard_min_dcefclk_by_freq)(struct pp_smu *pp, int khz);
++ void (*set_hard_min_socclk_by_freq)(struct pp_smu *pp, int khz);
+
+- /* PPSMC_MSG_SetMinDeepSleepDcefclk
+- * when DF is in cstate, dcf clock is further divided down
+- * to just above given frequency
+- */
+- void (*set_min_deep_sleep_dcefclk)(struct pp_smu *pp, int mhz);
++ /* PME w/a */
++ void (*set_pme_wa_enable)(struct pp_smu *pp);
+
+- /* todo: aesthetic
+- * watermark range table
++ /*
++ * Legacy functions. Used for backwards comp. with existing
++ * PPlib code.
+ */
++ void (*set_display_requirement)(struct pp_smu *pp,
++ struct pp_smu_display_requirement_rv *req);
++};
+
+- /* todo: functional/feature
+- * PPSMC_MSG_SetHardMinSocclkByFreq: required to support DWB
+- */
++struct pp_smu_funcs {
++ struct pp_smu ctx;
++ union {
++ struct pp_smu_funcs_rv rv_funcs;
++ };
+ };
+-#endif
+
+ #endif /* DM_PP_SMU_IF__H */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5067-drm-amd-display-dal-3.1.60.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5067-drm-amd-display-dal-3.1.60.patch
new file mode 100644
index 00000000..c7408074
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5067-drm-amd-display-dal-3.1.60.patch
@@ -0,0 +1,29 @@
+From 3c91b52a3c117e5fcf68174c2fdd07b4e57c4a5c Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Wed, 18 Jul 2018 20:28:12 -0400
+Subject: [PATCH 5067/5725] drm/amd/display: dal 3.1.60
+
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 4f9d969..263d9f3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -38,7 +38,7 @@
+ #include "inc/compressor.h"
+ #include "dml/display_mode_lib.h"
+
+-#define DC_VER "3.1.59"
++#define DC_VER "3.1.60"
+
+ #define MAX_SURFACES 3
+ #define MAX_STREAMS 6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5068-drm-amd-display-Set-DFS-bypass-flags-for-dce110.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5068-drm-amd-display-Set-DFS-bypass-flags-for-dce110.patch
new file mode 100644
index 00000000..f444f138
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5068-drm-amd-display-Set-DFS-bypass-flags-for-dce110.patch
@@ -0,0 +1,71 @@
+From 521b4da5ac654875b61091a318b9915366454128 Mon Sep 17 00:00:00 2001
+From: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Date: Tue, 24 Jul 2018 13:19:49 -0400
+Subject: [PATCH 5068/5725] drm/amd/display: Set DFS bypass flags for dce110
+
+[Why]
+
+While there is support for using and quering DFS bypass clocks the
+hardware is never notified to enter DFS bypass mode for dce110.
+
+[How]
+
+Add a flag that can be set when programming the display engine PLL
+to enable DFS bypass mode. If this flag is set then the hardware is
+notified to enter DFS bypass mode and the correct display engine clock
+frequency can be acquired.
+
+Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/bios/command_table.c | 3 +++
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 3 +++
+ drivers/gpu/drm/amd/display/include/bios_parser_types.h | 2 ++
+ 3 files changed, 8 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+index a558bfa..2bd7cd9 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+@@ -2201,6 +2201,9 @@ static enum bp_result program_clock_v6(
+ if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
+ params.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
+
++ if (bp_params->flags.SET_DISPCLK_DFS_BYPASS)
++ params.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_DPREFCLK_BYPASS;
++
+ if (EXEC_BIOS_CMD_TABLE(SetPixelClock, params)) {
+ /* True display clock is returned by VBIOS if DFS bypass
+ * is enabled. */
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index 684da3d..10bb8095 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -249,6 +249,9 @@ static int dce_set_clock(
+ pxl_clk_params.target_pixel_clock = requested_clk_khz;
+ pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+
++ if (clk_dce->dfs_bypass_enabled)
++ pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
++
+ bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
+
+ if (clk_dce->dfs_bypass_enabled) {
+diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
+index 0840f69..f8dbfa5 100644
+--- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h
++++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
+@@ -234,6 +234,8 @@ struct bp_pixel_clock_parameters {
+ uint32_t USE_E_CLOCK_AS_SOURCE_FOR_D_CLOCK:1;
+ /* Use external reference clock (refDivSrc for PLL) */
+ uint32_t SET_EXTERNAL_REF_DIV_SRC:1;
++ /* Use DFS bypass for Display clock. */
++ uint32_t SET_DISPCLK_DFS_BYPASS:1;
+ /* Force program PHY PLL only */
+ uint32_t PROGRAM_PHY_PLL_ONLY:1;
+ /* Support for YUV420 */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5069-drm-amd-display-Enable-DFS-bypass-support-in-DC-conf.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5069-drm-amd-display-Enable-DFS-bypass-support-in-DC-conf.patch
new file mode 100644
index 00000000..5b317641
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5069-drm-amd-display-Enable-DFS-bypass-support-in-DC-conf.patch
@@ -0,0 +1,44 @@
+From 4a2b4c9fafa90c8c16ef9630b6f723657b4944aa Mon Sep 17 00:00:00 2001
+From: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Date: Tue, 24 Jul 2018 09:42:23 -0400
+Subject: [PATCH 5069/5725] drm/amd/display: Enable DFS bypass support in DC
+ config
+
+[Why]
+
+We explicitly disable DFS bypass support when creating DC. Support
+for this feature should now be in place so it can be left implicitly
+enabled.
+
+[How]
+
+Remove the line that disables DFS bypass support.
+
+Note: This option was actually reset to false anyway for most of
+the hardware I've tested on making this particular line misleading
+in the first place. This patch also fixes this issue.
+
+Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 8ca5efd..e29417c 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -693,8 +693,6 @@ struct dc *dc_create(const struct dc_init_data *init_params)
+ DC_LOG_DC("Display Core initialized\n");
+
+
+- /* TODO: missing feature to be enabled */
+- dc->debug.disable_dfs_bypass = true;
+
+ return dc;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5070-drm-amd-display-Add-support-for-toggling-DFS-bypass.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5070-drm-amd-display-Add-support-for-toggling-DFS-bypass.patch
new file mode 100644
index 00000000..4635e5be
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5070-drm-amd-display-Add-support-for-toggling-DFS-bypass.patch
@@ -0,0 +1,190 @@
+From c9b18e8580a988a80be3ba87c157b8afdc5d3d63 Mon Sep 17 00:00:00 2001
+From: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Date: Thu, 26 Jul 2018 09:32:48 -0400
+Subject: [PATCH 5070/5725] drm/amd/display: Add support for toggling DFS
+ bypass
+
+[Why]
+
+If the hardware supports DFS bypass it will always be enabled after
+creation of the DCCG. DFS bypass should only be enabled when
+the current stream consists of a single embedded panel and the
+minimum display clock is below the DFS bypass threshold.
+
+[How]
+
+Add a function to the DCCG table that updates the DFS bypass state
+when setting the bandwidth. If the DFS bypass state is changed, the
+clock needs to be reprogrammed to reflect this before the DPREFCLK
+is updated for audio endpoints. The existing display clock value
+is used as the target display clock value when reprogramming since the
+resulting change will be equal or larger to the current value.
+
+These changes only specifically target dce110 but do offer a framework
+for support on other applicable targets.
+
+Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Reviewed-by: David Francis <David.Francis@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 63 ++++++++++++++++++++--
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h | 2 +
+ .../amd/display/dc/dce110/dce110_hw_sequencer.c | 12 ++++-
+ .../gpu/drm/amd/display/dc/inc/hw/display_clock.h | 5 ++
+ 4 files changed, 76 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index 10bb8095..51ceb99 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -249,13 +249,12 @@ static int dce_set_clock(
+ pxl_clk_params.target_pixel_clock = requested_clk_khz;
+ pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+
+- if (clk_dce->dfs_bypass_enabled)
++ if (clk_dce->dfs_bypass_active)
+ pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
+
+ bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
+
+- if (clk_dce->dfs_bypass_enabled) {
+-
++ if (clk_dce->dfs_bypass_active) {
+ /* Cache the fixed display clock*/
+ clk_dce->dfs_bypass_disp_clk =
+ pxl_clk_params.dfs_bypass_display_clock;
+@@ -671,6 +670,61 @@ static void dce_update_clocks(struct dccg *dccg,
+ }
+ }
+
++static bool dce_update_dfs_bypass(
++ struct dccg *dccg,
++ struct dc *dc,
++ struct dc_state *context,
++ int requested_clock_khz)
++{
++ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(dccg);
++ struct resource_context *res_ctx = &context->res_ctx;
++ enum signal_type signal_type = SIGNAL_TYPE_NONE;
++ bool was_active = clk_dce->dfs_bypass_active;
++ int i;
++
++ /* Disable DFS bypass by default. */
++ clk_dce->dfs_bypass_active = false;
++
++ /* Check that DFS bypass is available. */
++ if (!clk_dce->dfs_bypass_enabled)
++ goto update;
++
++ /* Check if the requested display clock is below the threshold. */
++ if (requested_clock_khz >= 400000)
++ goto update;
++
++ /* DFS-bypass should only be enabled on single stream setups */
++ if (context->stream_count != 1)
++ goto update;
++
++ /* Check that the stream's signal type is an embedded panel */
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ if (res_ctx->pipe_ctx[i].stream) {
++ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
++
++ signal_type = pipe_ctx->stream->sink->link->connector_signal;
++ break;
++ }
++ }
++
++ if (signal_type == SIGNAL_TYPE_EDP ||
++ signal_type == SIGNAL_TYPE_LVDS)
++ clk_dce->dfs_bypass_active = true;
++
++update:
++ /* Update the clock state. We don't need to respect safe_to_lower
++ * because DFS bypass should always be greater than the current
++ * display clock frequency.
++ */
++ if (was_active != clk_dce->dfs_bypass_active) {
++ dccg->clks.dispclk_khz =
++ dccg->funcs->set_dispclk(dccg, dccg->clks.dispclk_khz);
++ return true;
++ }
++
++ return false;
++}
++
+ #ifdef CONFIG_X86
+ static const struct display_clock_funcs dcn1_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+@@ -694,7 +748,8 @@ static const struct display_clock_funcs dce112_funcs = {
+ static const struct display_clock_funcs dce110_funcs = {
+ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+ .set_dispclk = dce_psr_set_clock,
+- .update_clocks = dce_update_clocks
++ .update_clocks = dce_update_clocks,
++ .update_dfs_bypass = dce_update_dfs_bypass
+ };
+
+ static const struct display_clock_funcs dce_funcs = {
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+index e5e44ad..8be68eb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+@@ -78,6 +78,8 @@ struct dce_dccg {
+
+ /* Cache the status of DFS-bypass feature*/
+ bool dfs_bypass_enabled;
++ /* True if the DFS-bypass feature is enabled and active. */
++ bool dfs_bypass_active;
+ /* Cache the display clock returned by VBIOS if DFS-bypass is enabled.
+ * This is basically "Crystal Frequency In KHz" (XTALIN) frequency */
+ int dfs_bypass_disp_clk;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index afd1743..3b4a9f9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -2539,6 +2539,7 @@ void dce110_set_bandwidth(
+ bool decrease_allowed)
+ {
+ struct dc_clocks req_clks;
++ struct dccg *dccg = dc->res_pool->dccg;
+
+ req_clks.dispclk_khz = context->bw.dce.dispclk_khz;
+ req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
+@@ -2548,8 +2549,15 @@ void dce110_set_bandwidth(
+ else
+ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+
+- dc->res_pool->dccg->funcs->update_clocks(
+- dc->res_pool->dccg,
++ if (dccg->funcs->update_dfs_bypass)
++ dccg->funcs->update_dfs_bypass(
++ dccg,
++ dc,
++ context,
++ req_clks.dispclk_khz);
++
++ dccg->funcs->update_clocks(
++ dccg,
+ &req_clks,
+ decrease_allowed);
+ pplib_apply_display_requirements(dc, context);
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
+index 3c7ccb6..689faa1 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
+@@ -53,6 +53,11 @@ struct display_clock_funcs {
+ int requested_clock_khz);
+
+ int (*get_dp_ref_clk_frequency)(struct dccg *dccg);
++
++ bool (*update_dfs_bypass)(struct dccg *dccg,
++ struct dc *dc,
++ struct dc_state *context,
++ int requested_clock_khz);
+ };
+
+ #endif /* __DISPLAY_CLOCK_H__ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5071-drm-amdgpu-Add-amdgpu_gfx_off_ctrl-function.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5071-drm-amdgpu-Add-amdgpu_gfx_off_ctrl-function.patch
new file mode 100644
index 00000000..cc4fdec4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5071-drm-amdgpu-Add-amdgpu_gfx_off_ctrl-function.patch
@@ -0,0 +1,114 @@
+From ae46fd8a5981824155d3ddde3b1457cca60a9af8 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Mon, 30 Jul 2018 16:59:09 +0800
+Subject: [PATCH 5071/5725] drm/amdgpu: Add amdgpu_gfx_off_ctrl function
+
+v2:
+ 1. drop the special handling for the hw IP
+ suggested by hawking and Christian.
+ 2. refine the variable name suggested by Flora.
+
+This funciton as the entry of gfx off feature.
+we arbitrat gfx off feature enable/disable in this
+function.
+
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 5 +++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 ++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 36 ++++++++++++++++++++++++++++++
+ 3 files changed, 43 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index a6bde13..d13ddd2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -973,6 +973,10 @@ struct amdgpu_gfx {
+ /* NGG */
+ struct amdgpu_ngg ngg;
+
++ /* gfx off */
++ bool gfx_off_state; /* true: enabled, false: disabled */
++ struct mutex gfx_off_mutex;
++ uint32_t gfx_off_req_count; /* default 1, enable gfx off: dec 1, disable gfx off: add 1 */
+ /* pipe reservation */
+ struct mutex pipe_reserve_mutex;
+ DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
+@@ -1841,6 +1845,7 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
+ const u32 array_size);
+
+ bool amdgpu_device_is_px(struct drm_device *dev);
++void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
+ /* atpx handler */
+ #if defined(CONFIG_VGA_SWITCHEROO)
+ void amdgpu_register_atpx_handler(void);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 4bc4c4a..dd34f4c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2416,6 +2416,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+ mutex_init(&adev->gfx.gpu_clock_mutex);
+ mutex_init(&adev->srbm_mutex);
+ mutex_init(&adev->gfx.pipe_reserve_mutex);
++ mutex_init(&adev->gfx.gfx_off_mutex);
+ mutex_init(&adev->grbm_idx_mutex);
+ mutex_init(&adev->mn_lock);
+ mutex_init(&adev->virt.vf_errors.lock);
+@@ -2443,6 +2444,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+ INIT_DELAYED_WORK(&adev->late_init_work,
+ amdgpu_device_ip_late_init_func_handler);
+
++ adev->gfx.gfx_off_req_count = 1;
+ adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
+
+ /* Registers mapping */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index 3116e4d..fe87ffd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -340,3 +340,39 @@ void amdgpu_gfx_compute_mqd_sw_fini(struct amdgpu_device *adev)
+ &ring->mqd_gpu_addr,
+ &ring->mqd_ptr);
+ }
++
++/* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
++ *
++ * @adev: amdgpu_device pointer
++ * @bool enable true: enable gfx off feature, false: disable gfx off feature
++ *
++ * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled.
++ * 2. other client can send request to disable gfx off feature, the request should be honored.
++ * 3. other client can cancel their request of disable gfx off feature
++ * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
++ */
++
++void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
++{
++ if (!(adev->powerplay.pp_feature & PP_GFXOFF_MASK))
++ return;
++
++ if (!adev->powerplay.pp_funcs->set_powergating_by_smu)
++ return;
++
++ mutex_lock(&adev->gfx.gfx_off_mutex);
++
++ if (!enable)
++ adev->gfx.gfx_off_req_count++;
++ else if (adev->gfx.gfx_off_req_count > 0)
++ adev->gfx.gfx_off_req_count--;
++
++ if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
++ if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
++ adev->gfx.gfx_off_state = true;
++ } else if (!enable && adev->gfx.gfx_off_state) {
++ if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false))
++ adev->gfx.gfx_off_state = false;
++ }
++ mutex_unlock(&adev->gfx.gfx_off_mutex);
++}
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5072-drm-amdgpu-Put-enable-gfx-off-feature-to-a-delay-thr.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5072-drm-amdgpu-Put-enable-gfx-off-feature-to-a-delay-thr.patch
new file mode 100644
index 00000000..9d25c1bf
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5072-drm-amdgpu-Put-enable-gfx-off-feature-to-a-delay-thr.patch
@@ -0,0 +1,104 @@
+From 1f373ae040b85197c04af1bab3bb0ac496099c63 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Fri, 27 Jul 2018 21:06:30 +0800
+Subject: [PATCH 5072/5725] drm/amdgpu: Put enable gfx off feature to a delay
+ thread
+
+delay to enable gfx off feature to avoid gfx on/off frequently
+suggested by Alex and Evan.
+
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 15 +++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 8 ++++++--
+ 3 files changed, 23 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index d13ddd2..2098e0f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -977,6 +977,8 @@ struct amdgpu_gfx {
+ bool gfx_off_state; /* true: enabled, false: disabled */
+ struct mutex gfx_off_mutex;
+ uint32_t gfx_off_req_count; /* default 1, enable gfx off: dec 1, disable gfx off: add 1 */
++ struct delayed_work gfx_off_delay_work;
++
+ /* pipe reservation */
+ struct mutex pipe_reserve_mutex;
+ DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index dd34f4c..aa9cc45 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1974,6 +1974,19 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
+ DRM_ERROR("ib ring test failed (%d).\n", r);
+ }
+
++static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
++{
++ struct amdgpu_device *adev =
++ container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
++
++ mutex_lock(&adev->gfx.gfx_off_mutex);
++ if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
++ if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
++ adev->gfx.gfx_off_state = true;
++ }
++ mutex_unlock(&adev->gfx.gfx_off_mutex);
++}
++
+ /**
+ * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
+ *
+@@ -2443,6 +2456,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+
+ INIT_DELAYED_WORK(&adev->late_init_work,
+ amdgpu_device_ip_late_init_func_handler);
++ INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
++ amdgpu_device_delay_enable_gfx_off);
+
+ adev->gfx.gfx_off_req_count = 1;
+ adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index fe87ffd..3fe6527 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -26,6 +26,9 @@
+ #include "amdgpu.h"
+ #include "amdgpu_gfx.h"
+
++/* 0.5 second timeout */
++#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(500)
++
+ /*
+ * GPU scratch registers helpers function.
+ */
+@@ -360,6 +363,7 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
+ if (!adev->powerplay.pp_funcs->set_powergating_by_smu)
+ return;
+
++
+ mutex_lock(&adev->gfx.gfx_off_mutex);
+
+ if (!enable)
+@@ -368,11 +372,11 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
+ adev->gfx.gfx_off_req_count--;
+
+ if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
+- if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
+- adev->gfx.gfx_off_state = true;
++ schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
+ } else if (!enable && adev->gfx.gfx_off_state) {
+ if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false))
+ adev->gfx.gfx_off_state = false;
+ }
++
+ mutex_unlock(&adev->gfx.gfx_off_mutex);
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5073-drm-amdgpu-Ctrl-gfx-off-via-amdgpu_gfx_off_ctrl.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5073-drm-amdgpu-Ctrl-gfx-off-via-amdgpu_gfx_off_ctrl.patch
new file mode 100644
index 00000000..3b5225db
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5073-drm-amdgpu-Ctrl-gfx-off-via-amdgpu_gfx_off_ctrl.patch
@@ -0,0 +1,63 @@
+From 79a48ac1e9e80d834e2c06415e4c17a8ad5bce76 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Fri, 27 Jul 2018 14:55:09 +0800
+Subject: [PATCH 5073/5725] drm/amdgpu: Ctrl gfx off via amdgpu_gfx_off_ctrl
+
+use amdgpu_gfx_off_ctrl function so driver can arbitrate
+whether the gfx ip can be power off or power on.
+
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 ++----
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 6 ++----
+ 2 files changed, 4 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index aa9cc45..5e47f2a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1878,8 +1878,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+- if (adev->powerplay.pp_funcs->set_powergating_by_smu)
+- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false);
++ amdgpu_gfx_off_ctrl(adev, false);
+ r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
+ /* XXX handle errors */
+ if (r) {
+@@ -2061,8 +2060,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
+ }
+
+ /* call smu to disable gfx off feature first when suspend */
+- if (adev->powerplay.pp_funcs->set_powergating_by_smu)
+- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false);
++ amdgpu_gfx_off_ctrl(adev, false);
+
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!adev->ip_blocks[i].status.valid)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 670a564..fe751d2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -3788,13 +3788,11 @@ static int gfx_v9_0_set_powergating_state(void *handle,
+ gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
+
+ /* set gfx off through smu */
+- if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu)
+- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true);
++ amdgpu_gfx_off_ctrl(adev, true);
+ break;
+ case CHIP_VEGA12:
+ /* set gfx off through smu */
+- if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu)
+- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true);
++ amdgpu_gfx_off_ctrl(adev, true);
+ break;
+ default:
+ break;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5074-drm-amdgpu-Disable-gfx-off-if-VCN-is-busy.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5074-drm-amdgpu-Disable-gfx-off-if-VCN-is-busy.patch
new file mode 100644
index 00000000..fcede29e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5074-drm-amdgpu-Disable-gfx-off-if-VCN-is-busy.patch
@@ -0,0 +1,38 @@
+From 5fcf5bd2f0a494dbf355ad08126f1124affa7531 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Fri, 27 Jul 2018 17:00:02 +0800
+Subject: [PATCH 5074/5725] drm/amdgpu: Disable gfx off if VCN is busy
+
+this patch is a workaround for the gpu hang
+at video begin/end time if gfx off is enabled.
+
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 414a67e..9485972 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -217,6 +217,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
+ fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
+
+ if (fences == 0) {
++ amdgpu_gfx_off_ctrl(adev, true);
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, false);
+ else
+@@ -233,6 +234,7 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
+ bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
+
+ if (set_clocks) {
++ amdgpu_gfx_off_ctrl(adev, false);
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, true);
+ else
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5075-drm-amdgpu-move-gfx-definitions-into-amdgpu_gfx-head.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5075-drm-amdgpu-move-gfx-definitions-into-amdgpu_gfx-head.patch
new file mode 100644
index 00000000..147a4c4f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5075-drm-amdgpu-move-gfx-definitions-into-amdgpu_gfx-head.patch
@@ -0,0 +1,762 @@
+From 95a86797422e6da7da033c727c3051dfb2decdd0 Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Thu, 2 Aug 2018 16:12:39 +0800
+Subject: [PATCH 5075/5725] drm/amdgpu: move gfx definitions into amdgpu_gfx
+ header
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Demangle amdgpu.h
+
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 282 +-------------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 34 +++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 349 ++++++++++++++++++++++++++++----
+ 3 files changed, 342 insertions(+), 323 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 2098e0f..6e7c60f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -69,6 +69,7 @@
+ #include "amdgpu_vce.h"
+ #include "amdgpu_vcn.h"
+ #include "amdgpu_gmc.h"
++#include "amdgpu_gfx.h"
+ #include "amdgpu_dm.h"
+ #include "amdgpu_mn.h"
+ #include "amdgpu_virt.h"
+@@ -176,13 +177,6 @@ extern int amdgpu_cik_support;
+ #define AMDGPU_RESET_VCE (1 << 13)
+ #define AMDGPU_RESET_VCE1 (1 << 14)
+
+-/* GFX current status */
+-#define AMDGPU_GFX_NORMAL_MODE 0x00000000L
+-#define AMDGPU_GFX_SAFE_MODE 0x00000001L
+-#define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L
+-#define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L
+-#define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
+-
+ /* max cursor sizes (in pixels) */
+ #define CIK_CURSOR_WIDTH 128
+ #define CIK_CURSOR_HEIGHT 128
+@@ -713,277 +707,6 @@ struct amdgpu_fpriv {
+ struct idr sem_handles;
+ };
+
+-/*
+- * GFX stuff
+- */
+-#include "clearstate_defs.h"
+-
+-struct amdgpu_rlc_funcs {
+- void (*enter_safe_mode)(struct amdgpu_device *adev);
+- void (*exit_safe_mode)(struct amdgpu_device *adev);
+-};
+-
+-struct amdgpu_rlc {
+- /* for power gating */
+- struct amdgpu_bo *save_restore_obj;
+- uint64_t save_restore_gpu_addr;
+- volatile uint32_t *sr_ptr;
+- const u32 *reg_list;
+- u32 reg_list_size;
+- /* for clear state */
+- struct amdgpu_bo *clear_state_obj;
+- uint64_t clear_state_gpu_addr;
+- volatile uint32_t *cs_ptr;
+- const struct cs_section_def *cs_data;
+- u32 clear_state_size;
+- /* for cp tables */
+- struct amdgpu_bo *cp_table_obj;
+- uint64_t cp_table_gpu_addr;
+- volatile uint32_t *cp_table_ptr;
+- u32 cp_table_size;
+-
+- /* safe mode for updating CG/PG state */
+- bool in_safe_mode;
+- const struct amdgpu_rlc_funcs *funcs;
+-
+- /* for firmware data */
+- u32 save_and_restore_offset;
+- u32 clear_state_descriptor_offset;
+- u32 avail_scratch_ram_locations;
+- u32 reg_restore_list_size;
+- u32 reg_list_format_start;
+- u32 reg_list_format_separate_start;
+- u32 starting_offsets_start;
+- u32 reg_list_format_size_bytes;
+- u32 reg_list_size_bytes;
+- u32 reg_list_format_direct_reg_list_length;
+- u32 save_restore_list_cntl_size_bytes;
+- u32 save_restore_list_gpm_size_bytes;
+- u32 save_restore_list_srm_size_bytes;
+-
+- u32 *register_list_format;
+- u32 *register_restore;
+- u8 *save_restore_list_cntl;
+- u8 *save_restore_list_gpm;
+- u8 *save_restore_list_srm;
+-
+- bool is_rlc_v2_1;
+-};
+-
+-#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
+-
+-struct amdgpu_mec {
+- struct amdgpu_bo *hpd_eop_obj;
+- u64 hpd_eop_gpu_addr;
+- struct amdgpu_bo *mec_fw_obj;
+- u64 mec_fw_gpu_addr;
+- u32 num_mec;
+- u32 num_pipe_per_mec;
+- u32 num_queue_per_pipe;
+- void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS + 1];
+-
+- /* These are the resources for which amdgpu takes ownership */
+- DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
+-};
+-
+-struct amdgpu_kiq {
+- u64 eop_gpu_addr;
+- struct amdgpu_bo *eop_obj;
+- spinlock_t ring_lock;
+- struct amdgpu_ring ring;
+- struct amdgpu_irq_src irq;
+-};
+-
+-/*
+- * GPU scratch registers structures, functions & helpers
+- */
+-struct amdgpu_scratch {
+- unsigned num_reg;
+- uint32_t reg_base;
+- uint32_t free_mask;
+-};
+-
+-/*
+- * GFX configurations
+- */
+-#define AMDGPU_GFX_MAX_SE 4
+-#define AMDGPU_GFX_MAX_SH_PER_SE 2
+-
+-struct amdgpu_rb_config {
+- uint32_t rb_backend_disable;
+- uint32_t user_rb_backend_disable;
+- uint32_t raster_config;
+- uint32_t raster_config_1;
+-};
+-
+-struct gb_addr_config {
+- uint16_t pipe_interleave_size;
+- uint8_t num_pipes;
+- uint8_t max_compress_frags;
+- uint8_t num_banks;
+- uint8_t num_se;
+- uint8_t num_rb_per_se;
+-};
+-
+-struct amdgpu_gfx_config {
+- unsigned max_shader_engines;
+- unsigned max_tile_pipes;
+- unsigned max_cu_per_sh;
+- unsigned max_sh_per_se;
+- unsigned max_backends_per_se;
+- unsigned max_texture_channel_caches;
+- unsigned max_gprs;
+- unsigned max_gs_threads;
+- unsigned max_hw_contexts;
+- unsigned sc_prim_fifo_size_frontend;
+- unsigned sc_prim_fifo_size_backend;
+- unsigned sc_hiz_tile_fifo_size;
+- unsigned sc_earlyz_tile_fifo_size;
+-
+- unsigned num_tile_pipes;
+- unsigned backend_enable_mask;
+- unsigned mem_max_burst_length_bytes;
+- unsigned mem_row_size_in_kb;
+- unsigned shader_engine_tile_size;
+- unsigned num_gpus;
+- unsigned multi_gpu_tile_size;
+- unsigned mc_arb_ramcfg;
+- unsigned gb_addr_config;
+- unsigned num_rbs;
+- unsigned gs_vgt_table_depth;
+- unsigned gs_prim_buffer_depth;
+-
+- uint32_t tile_mode_array[32];
+- uint32_t macrotile_mode_array[16];
+-
+- struct gb_addr_config gb_addr_config_fields;
+- struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE];
+-
+- /* gfx configure feature */
+- uint32_t double_offchip_lds_buf;
+- /* cached value of DB_DEBUG2 */
+- uint32_t db_debug2;
+-};
+-
+-struct amdgpu_cu_info {
+- uint32_t simd_per_cu;
+- uint32_t max_waves_per_simd;
+- uint32_t wave_front_size;
+- uint32_t max_scratch_slots_per_cu;
+- uint32_t lds_size;
+-
+- /* total active CU number */
+- uint32_t number;
+- uint32_t ao_cu_mask;
+- uint32_t ao_cu_bitmap[4][4];
+- uint32_t bitmap[4][4];
+-};
+-
+-struct amdgpu_gfx_funcs {
+- /* get the gpu clock counter */
+- uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
+- void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
+- void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields);
+- void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst);
+- void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst);
+- void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue);
+-};
+-
+-struct amdgpu_ngg_buf {
+- struct amdgpu_bo *bo;
+- uint64_t gpu_addr;
+- uint32_t size;
+- uint32_t bo_size;
+-};
+-
+-enum {
+- NGG_PRIM = 0,
+- NGG_POS,
+- NGG_CNTL,
+- NGG_PARAM,
+- NGG_BUF_MAX
+-};
+-
+-struct amdgpu_ngg {
+- struct amdgpu_ngg_buf buf[NGG_BUF_MAX];
+- uint32_t gds_reserve_addr;
+- uint32_t gds_reserve_size;
+- bool init;
+-};
+-
+-struct sq_work {
+- struct work_struct work;
+- unsigned ih_data;
+-};
+-
+-struct amdgpu_gfx {
+- struct mutex gpu_clock_mutex;
+- struct amdgpu_gfx_config config;
+- struct amdgpu_rlc rlc;
+- struct amdgpu_mec mec;
+- struct amdgpu_kiq kiq;
+- struct amdgpu_scratch scratch;
+- const struct firmware *me_fw; /* ME firmware */
+- uint32_t me_fw_version;
+- const struct firmware *pfp_fw; /* PFP firmware */
+- uint32_t pfp_fw_version;
+- const struct firmware *ce_fw; /* CE firmware */
+- uint32_t ce_fw_version;
+- const struct firmware *rlc_fw; /* RLC firmware */
+- uint32_t rlc_fw_version;
+- const struct firmware *mec_fw; /* MEC firmware */
+- uint32_t mec_fw_version;
+- const struct firmware *mec2_fw; /* MEC2 firmware */
+- uint32_t mec2_fw_version;
+- uint32_t me_feature_version;
+- uint32_t ce_feature_version;
+- uint32_t pfp_feature_version;
+- uint32_t rlc_feature_version;
+- uint32_t rlc_srlc_fw_version;
+- uint32_t rlc_srlc_feature_version;
+- uint32_t rlc_srlg_fw_version;
+- uint32_t rlc_srlg_feature_version;
+- uint32_t rlc_srls_fw_version;
+- uint32_t rlc_srls_feature_version;
+- uint32_t mec_feature_version;
+- uint32_t mec2_feature_version;
+- struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
+- unsigned num_gfx_rings;
+- struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
+- unsigned num_compute_rings;
+- struct amdgpu_irq_src eop_irq;
+- struct amdgpu_irq_src priv_reg_irq;
+- struct amdgpu_irq_src priv_inst_irq;
+- struct amdgpu_irq_src cp_ecc_error_irq;
+- struct amdgpu_irq_src sq_irq;
+- struct sq_work sq_work;
+-
+- /* gfx status */
+- uint32_t gfx_current_status;
+- /* ce ram size*/
+- unsigned ce_ram_size;
+- struct amdgpu_cu_info cu_info;
+- const struct amdgpu_gfx_funcs *funcs;
+-
+- /* reset mask */
+- uint32_t grbm_soft_reset;
+- uint32_t srbm_soft_reset;
+- /* s3/s4 mask */
+- bool in_suspend;
+- /* NGG */
+- struct amdgpu_ngg ngg;
+-
+- /* gfx off */
+- bool gfx_off_state; /* true: enabled, false: disabled */
+- struct mutex gfx_off_mutex;
+- uint32_t gfx_off_req_count; /* default 1, enable gfx off: dec 1, disable gfx off: add 1 */
+- struct delayed_work gfx_off_delay_work;
+-
+- /* pipe reservation */
+- struct mutex pipe_reserve_mutex;
+- DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
+-};
+-
+ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ unsigned size, struct amdgpu_ib *ib);
+ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
+@@ -1822,11 +1545,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
+ #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
+ #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
+ #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
+-#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
+-#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
+ #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
+ #define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
+-#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q))
+
+ /* Common functions */
+ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index 3fe6527..3577484 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -30,8 +30,40 @@
+ #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(500)
+
+ /*
+- * GPU scratch registers helpers function.
++ * GPU GFX IP block helpers function.
+ */
++
++int amdgpu_gfx_queue_to_bit(struct amdgpu_device *adev, int mec,
++ int pipe, int queue)
++{
++ int bit = 0;
++
++ bit += mec * adev->gfx.mec.num_pipe_per_mec
++ * adev->gfx.mec.num_queue_per_pipe;
++ bit += pipe * adev->gfx.mec.num_queue_per_pipe;
++ bit += queue;
++
++ return bit;
++}
++
++void amdgpu_gfx_bit_to_queue(struct amdgpu_device *adev, int bit,
++ int *mec, int *pipe, int *queue)
++{
++ *queue = bit % adev->gfx.mec.num_queue_per_pipe;
++ *pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
++ % adev->gfx.mec.num_pipe_per_mec;
++ *mec = (bit / adev->gfx.mec.num_queue_per_pipe)
++ / adev->gfx.mec.num_pipe_per_mec;
++
++}
++
++bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
++ int mec, int pipe, int queue)
++{
++ return test_bit(amdgpu_gfx_queue_to_bit(adev, mec, pipe, queue),
++ adev->gfx.mec.queue_bitmap);
++}
++
+ /**
+ * amdgpu_gfx_scratch_get - Allocate a scratch register
+ *
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+index 1f27905..4e3d147 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+@@ -24,28 +24,296 @@
+ #ifndef __AMDGPU_GFX_H__
+ #define __AMDGPU_GFX_H__
+
+-int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg);
+-void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg);
++/*
++ * GFX stuff
++ */
++#include "clearstate_defs.h"
++#include "amdgpu_ring.h"
+
+-void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se,
+- unsigned max_sh);
++/* GFX current status */
++#define AMDGPU_GFX_NORMAL_MODE 0x00000000L
++#define AMDGPU_GFX_SAFE_MODE 0x00000001L
++#define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L
++#define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L
++#define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
+
+-void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev);
+
+-int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
+- struct amdgpu_ring *ring,
+- struct amdgpu_irq_src *irq);
++struct amdgpu_rlc_funcs {
++ void (*enter_safe_mode)(struct amdgpu_device *adev);
++ void (*exit_safe_mode)(struct amdgpu_device *adev);
++};
+
+-void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring,
+- struct amdgpu_irq_src *irq);
++struct amdgpu_rlc {
++ /* for power gating */
++ struct amdgpu_bo *save_restore_obj;
++ uint64_t save_restore_gpu_addr;
++ volatile uint32_t *sr_ptr;
++ const u32 *reg_list;
++ u32 reg_list_size;
++ /* for clear state */
++ struct amdgpu_bo *clear_state_obj;
++ uint64_t clear_state_gpu_addr;
++ volatile uint32_t *cs_ptr;
++ const struct cs_section_def *cs_data;
++ u32 clear_state_size;
++ /* for cp tables */
++ struct amdgpu_bo *cp_table_obj;
++ uint64_t cp_table_gpu_addr;
++ volatile uint32_t *cp_table_ptr;
++ u32 cp_table_size;
+
+-void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev);
+-int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
+- unsigned hpd_size);
++ /* safe mode for updating CG/PG state */
++ bool in_safe_mode;
++ const struct amdgpu_rlc_funcs *funcs;
+
+-int amdgpu_gfx_compute_mqd_sw_init(struct amdgpu_device *adev,
+- unsigned mqd_size);
+-void amdgpu_gfx_compute_mqd_sw_fini(struct amdgpu_device *adev);
++ /* for firmware data */
++ u32 save_and_restore_offset;
++ u32 clear_state_descriptor_offset;
++ u32 avail_scratch_ram_locations;
++ u32 reg_restore_list_size;
++ u32 reg_list_format_start;
++ u32 reg_list_format_separate_start;
++ u32 starting_offsets_start;
++ u32 reg_list_format_size_bytes;
++ u32 reg_list_size_bytes;
++ u32 reg_list_format_direct_reg_list_length;
++ u32 save_restore_list_cntl_size_bytes;
++ u32 save_restore_list_gpm_size_bytes;
++ u32 save_restore_list_srm_size_bytes;
++
++ u32 *register_list_format;
++ u32 *register_restore;
++ u8 *save_restore_list_cntl;
++ u8 *save_restore_list_gpm;
++ u8 *save_restore_list_srm;
++
++ bool is_rlc_v2_1;
++};
++
++#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
++
++struct amdgpu_mec {
++ struct amdgpu_bo *hpd_eop_obj;
++ u64 hpd_eop_gpu_addr;
++ struct amdgpu_bo *mec_fw_obj;
++ u64 mec_fw_gpu_addr;
++ u32 num_mec;
++ u32 num_pipe_per_mec;
++ u32 num_queue_per_pipe;
++ void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS + 1];
++
++ /* These are the resources for which amdgpu takes ownership */
++ DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
++};
++
++struct amdgpu_kiq {
++ u64 eop_gpu_addr;
++ struct amdgpu_bo *eop_obj;
++ spinlock_t ring_lock;
++ struct amdgpu_ring ring;
++ struct amdgpu_irq_src irq;
++};
++
++/*
++ * GPU scratch registers structures, functions & helpers
++ */
++struct amdgpu_scratch {
++ unsigned num_reg;
++ uint32_t reg_base;
++ uint32_t free_mask;
++};
++
++/*
++ * GFX configurations
++ */
++#define AMDGPU_GFX_MAX_SE 4
++#define AMDGPU_GFX_MAX_SH_PER_SE 2
++
++struct amdgpu_rb_config {
++ uint32_t rb_backend_disable;
++ uint32_t user_rb_backend_disable;
++ uint32_t raster_config;
++ uint32_t raster_config_1;
++};
++
++struct gb_addr_config {
++ uint16_t pipe_interleave_size;
++ uint8_t num_pipes;
++ uint8_t max_compress_frags;
++ uint8_t num_banks;
++ uint8_t num_se;
++ uint8_t num_rb_per_se;
++};
++
++struct amdgpu_gfx_config {
++ unsigned max_shader_engines;
++ unsigned max_tile_pipes;
++ unsigned max_cu_per_sh;
++ unsigned max_sh_per_se;
++ unsigned max_backends_per_se;
++ unsigned max_texture_channel_caches;
++ unsigned max_gprs;
++ unsigned max_gs_threads;
++ unsigned max_hw_contexts;
++ unsigned sc_prim_fifo_size_frontend;
++ unsigned sc_prim_fifo_size_backend;
++ unsigned sc_hiz_tile_fifo_size;
++ unsigned sc_earlyz_tile_fifo_size;
++
++ unsigned num_tile_pipes;
++ unsigned backend_enable_mask;
++ unsigned mem_max_burst_length_bytes;
++ unsigned mem_row_size_in_kb;
++ unsigned shader_engine_tile_size;
++ unsigned num_gpus;
++ unsigned multi_gpu_tile_size;
++ unsigned mc_arb_ramcfg;
++ unsigned gb_addr_config;
++ unsigned num_rbs;
++ unsigned gs_vgt_table_depth;
++ unsigned gs_prim_buffer_depth;
++
++ uint32_t tile_mode_array[32];
++ uint32_t macrotile_mode_array[16];
++
++ struct gb_addr_config gb_addr_config_fields;
++ struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE];
++
++ /* gfx configure feature */
++ uint32_t double_offchip_lds_buf;
++ /* cached value of DB_DEBUG2 */
++ uint32_t db_debug2;
++};
++
++struct amdgpu_cu_info {
++ uint32_t simd_per_cu;
++ uint32_t max_waves_per_simd;
++ uint32_t wave_front_size;
++ uint32_t max_scratch_slots_per_cu;
++ uint32_t lds_size;
++
++ /* total active CU number */
++ uint32_t number;
++ uint32_t ao_cu_mask;
++ uint32_t ao_cu_bitmap[4][4];
++ uint32_t bitmap[4][4];
++};
++
++struct amdgpu_gfx_funcs {
++ /* get the gpu clock counter */
++ uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
++ void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num,
++ u32 sh_num, u32 instance);
++ void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd,
++ uint32_t wave, uint32_t *dst, int *no_fields);
++ void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd,
++ uint32_t wave, uint32_t thread, uint32_t start,
++ uint32_t size, uint32_t *dst);
++ void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t simd,
++ uint32_t wave, uint32_t start, uint32_t size,
++ uint32_t *dst);
++ void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe,
++ u32 queue);
++};
++
++struct amdgpu_ngg_buf {
++ struct amdgpu_bo *bo;
++ uint64_t gpu_addr;
++ uint32_t size;
++ uint32_t bo_size;
++};
++
++enum {
++ NGG_PRIM = 0,
++ NGG_POS,
++ NGG_CNTL,
++ NGG_PARAM,
++ NGG_BUF_MAX
++};
++
++struct amdgpu_ngg {
++ struct amdgpu_ngg_buf buf[NGG_BUF_MAX];
++ uint32_t gds_reserve_addr;
++ uint32_t gds_reserve_size;
++ bool init;
++};
++
++struct sq_work {
++ struct work_struct work;
++ unsigned ih_data;
++};
++
++struct amdgpu_gfx {
++ struct mutex gpu_clock_mutex;
++ struct amdgpu_gfx_config config;
++ struct amdgpu_rlc rlc;
++ struct amdgpu_mec mec;
++ struct amdgpu_kiq kiq;
++ struct amdgpu_scratch scratch;
++ const struct firmware *me_fw; /* ME firmware */
++ uint32_t me_fw_version;
++ const struct firmware *pfp_fw; /* PFP firmware */
++ uint32_t pfp_fw_version;
++ const struct firmware *ce_fw; /* CE firmware */
++ uint32_t ce_fw_version;
++ const struct firmware *rlc_fw; /* RLC firmware */
++ uint32_t rlc_fw_version;
++ const struct firmware *mec_fw; /* MEC firmware */
++ uint32_t mec_fw_version;
++ const struct firmware *mec2_fw; /* MEC2 firmware */
++ uint32_t mec2_fw_version;
++ uint32_t me_feature_version;
++ uint32_t ce_feature_version;
++ uint32_t pfp_feature_version;
++ uint32_t rlc_feature_version;
++ uint32_t rlc_srlc_fw_version;
++ uint32_t rlc_srlc_feature_version;
++ uint32_t rlc_srlg_fw_version;
++ uint32_t rlc_srlg_feature_version;
++ uint32_t rlc_srls_fw_version;
++ uint32_t rlc_srls_feature_version;
++ uint32_t mec_feature_version;
++ uint32_t mec2_feature_version;
++ struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
++ unsigned num_gfx_rings;
++ struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
++ unsigned num_compute_rings;
++ struct amdgpu_irq_src eop_irq;
++ struct amdgpu_irq_src priv_reg_irq;
++ struct amdgpu_irq_src priv_inst_irq;
++ struct amdgpu_irq_src cp_ecc_error_irq;
++ struct amdgpu_irq_src sq_irq;
++ struct sq_work sq_work;
++
++ /* gfx status */
++ uint32_t gfx_current_status;
++ /* ce ram size*/
++ unsigned ce_ram_size;
++ struct amdgpu_cu_info cu_info;
++ const struct amdgpu_gfx_funcs *funcs;
++
++ /* reset mask */
++ uint32_t grbm_soft_reset;
++ uint32_t srbm_soft_reset;
++ /* s3/s4 mask */
++ bool in_suspend;
++ /* NGG */
++ struct amdgpu_ngg ngg;
++
++ /* gfx off */
++ bool gfx_off_state; /* true: enabled, false: disabled */
++ struct mutex gfx_off_mutex;
++ uint32_t gfx_off_req_count; /* default 1, enable gfx off: dec 1, disable gfx off: add 1 */
++ struct delayed_work gfx_off_delay_work;
++
++ /* pipe reservation */
++ struct mutex pipe_reserve_mutex;
++ DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
++};
++
++#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
++#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
++#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q))
+
+ /**
+ * amdgpu_gfx_create_bitmask - create a bitmask
+@@ -60,34 +328,33 @@ static inline u32 amdgpu_gfx_create_bitmask(u32 bit_width)
+ return (u32)((1ULL << bit_width) - 1);
+ }
+
+-static inline int amdgpu_gfx_queue_to_bit(struct amdgpu_device *adev,
+- int mec, int pipe, int queue)
+-{
+- int bit = 0;
++int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg);
++void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg);
+
+- bit += mec * adev->gfx.mec.num_pipe_per_mec
+- * adev->gfx.mec.num_queue_per_pipe;
+- bit += pipe * adev->gfx.mec.num_queue_per_pipe;
+- bit += queue;
++void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se,
++ unsigned max_sh);
+
+- return bit;
+-}
++int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
++ struct amdgpu_ring *ring,
++ struct amdgpu_irq_src *irq);
+
+-static inline void amdgpu_gfx_bit_to_queue(struct amdgpu_device *adev, int bit,
+- int *mec, int *pipe, int *queue)
+-{
+- *queue = bit % adev->gfx.mec.num_queue_per_pipe;
+- *pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
+- % adev->gfx.mec.num_pipe_per_mec;
+- *mec = (bit / adev->gfx.mec.num_queue_per_pipe)
+- / adev->gfx.mec.num_pipe_per_mec;
++void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring,
++ struct amdgpu_irq_src *irq);
+
+-}
+-static inline bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
+- int mec, int pipe, int queue)
+-{
+- return test_bit(amdgpu_gfx_queue_to_bit(adev, mec, pipe, queue),
+- adev->gfx.mec.queue_bitmap);
+-}
++void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev);
++int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
++ unsigned hpd_size);
++
++int amdgpu_gfx_compute_mqd_sw_init(struct amdgpu_device *adev,
++ unsigned mqd_size);
++void amdgpu_gfx_compute_mqd_sw_fini(struct amdgpu_device *adev);
++
++void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev);
++int amdgpu_gfx_queue_to_bit(struct amdgpu_device *adev, int mec,
++ int pipe, int queue);
++void amdgpu_gfx_bit_to_queue(struct amdgpu_device *adev, int bit,
++ int *mec, int *pipe, int *queue);
++bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec,
++ int pipe, int queue);
+
+ #endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5076-drm-amdgpu-move-ih-definitions-into-amdgpu_ih-header.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5076-drm-amdgpu-move-ih-definitions-into-amdgpu_ih-header.patch
new file mode 100644
index 00000000..4538d8b7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5076-drm-amdgpu-move-ih-definitions-into-amdgpu_ih-header.patch
@@ -0,0 +1,80 @@
+From 113759604ab893a9fe22cfff8c579e3a0d30f8a4 Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Thu, 2 Aug 2018 16:24:52 +0800
+Subject: [PATCH 5076/5725] drm/amdgpu: move ih definitions into amdgpu_ih
+ header
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Demangle amdgpu.h
+
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 14 --------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h | 15 +++++++++++++++
+ 2 files changed, 15 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 6e7c60f..99f3233 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -325,16 +325,6 @@ struct amdgpu_vm_pte_funcs {
+ uint32_t incr, uint64_t flags);
+ };
+
+-/* provided by the ih block */
+-struct amdgpu_ih_funcs {
+- /* ring read/write ptr handling, called from interrupt context */
+- u32 (*get_wptr)(struct amdgpu_device *adev);
+- bool (*prescreen_iv)(struct amdgpu_device *adev);
+- void (*decode_iv)(struct amdgpu_device *adev,
+- struct amdgpu_iv_entry *entry);
+- void (*set_rptr)(struct amdgpu_device *adev);
+-};
+-
+ /*
+ * BIOS.
+ */
+@@ -1528,10 +1518,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
+ #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
+ #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
+ #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
+-#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
+-#define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev))
+-#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
+-#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
+ #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
+ #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
+ #define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+index 0e01f11..a23e1c0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+@@ -76,6 +76,21 @@ struct amdgpu_iv_entry {
+ const uint32_t *iv_entry;
+ };
+
++/* provided by the ih block */
++struct amdgpu_ih_funcs {
++ /* ring read/write ptr handling, called from interrupt context */
++ u32 (*get_wptr)(struct amdgpu_device *adev);
++ bool (*prescreen_iv)(struct amdgpu_device *adev);
++ void (*decode_iv)(struct amdgpu_device *adev,
++ struct amdgpu_iv_entry *entry);
++ void (*set_rptr)(struct amdgpu_device *adev);
++};
++
++#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
++#define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev))
++#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
++#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
++
+ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
+ bool use_bus_addr);
+ void amdgpu_ih_ring_fini(struct amdgpu_device *adev);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5077-drm-amdgpu-move-sdma-definitions-into-amdgpu_sdma-he.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5077-drm-amdgpu-move-sdma-definitions-into-amdgpu_sdma-he.patch
new file mode 100644
index 00000000..127557eb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5077-drm-amdgpu-move-sdma-definitions-into-amdgpu_sdma-he.patch
@@ -0,0 +1,351 @@
+From 1dc7e2981aa896181aa16598101d4fde9d9647ed Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Thu, 2 Aug 2018 17:23:33 +0800
+Subject: [PATCH 5077/5725] drm/amdgpu: move sdma definitions into amdgpu_sdma
+ header
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Demangle amdgpu.h.
+Furthermore, SDMA is used for moving and clearing the data buffer, so the header
+also need be included in ttm.
+
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/Makefile | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 87 +-------------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c | 44 ++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h | 101 +++++++++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 1 +
+ 5 files changed, 148 insertions(+), 86 deletions(-)
+ create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+ create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
+index b16581d..68a4a06 100644
+--- a/drivers/gpu/drm/amd/amdgpu/Makefile
++++ b/drivers/gpu/drm/amd/amdgpu/Makefile
+@@ -88,6 +88,7 @@ amdgpu-y += \
+
+ # add async DMA block
+ amdgpu-y += \
++ amdgpu_sdma.o \
+ sdma_v2_4.o \
+ sdma_v3_0.o \
+ sdma_v4_0.o
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 99f3233..b2e03fb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -70,6 +70,7 @@
+ #include "amdgpu_vcn.h"
+ #include "amdgpu_gmc.h"
+ #include "amdgpu_gfx.h"
++#include "amdgpu_sdma.h"
+ #include "amdgpu_dm.h"
+ #include "amdgpu_mn.h"
+ #include "amdgpu_virt.h"
+@@ -154,9 +155,6 @@ extern int amdgpu_cik_support;
+ #define AMDGPUFB_CONN_LIMIT 4
+ #define AMDGPU_BIOS_NUM_SCRATCH 16
+
+-/* max number of IP instances */
+-#define AMDGPU_MAX_SDMA_INSTANCES 2
+-
+ /* hard reset data */
+ #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b
+
+@@ -205,13 +203,6 @@ enum amdgpu_cp_irq {
+ AMDGPU_CP_IRQ_LAST
+ };
+
+-enum amdgpu_sdma_irq {
+- AMDGPU_SDMA_IRQ_TRAP0 = 0,
+- AMDGPU_SDMA_IRQ_TRAP1,
+-
+- AMDGPU_SDMA_IRQ_LAST
+-};
+-
+ enum amdgpu_thermal_irq {
+ AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0,
+ AMDGPU_THERMAL_IRQ_HIGH_TO_LOW,
+@@ -271,39 +262,6 @@ amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
+ int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
+ const struct amdgpu_ip_block_version *ip_block_version);
+
+-/* provided by hw blocks that can move/clear data. e.g., gfx or sdma */
+-struct amdgpu_buffer_funcs {
+- /* maximum bytes in a single operation */
+- uint32_t copy_max_bytes;
+-
+- /* number of dw to reserve per operation */
+- unsigned copy_num_dw;
+-
+- /* used for buffer migration */
+- void (*emit_copy_buffer)(struct amdgpu_ib *ib,
+- /* src addr in bytes */
+- uint64_t src_offset,
+- /* dst addr in bytes */
+- uint64_t dst_offset,
+- /* number of byte to transfer */
+- uint32_t byte_count);
+-
+- /* maximum bytes in a single operation */
+- uint32_t fill_max_bytes;
+-
+- /* number of dw to reserve per operation */
+- unsigned fill_num_dw;
+-
+- /* used for buffer clearing */
+- void (*emit_fill_buffer)(struct amdgpu_ib *ib,
+- /* value to write to memory */
+- uint32_t src_data,
+- /* dst addr in bytes */
+- uint64_t dst_offset,
+- /* number of byte to fill */
+- uint32_t byte_count);
+-};
+-
+ /* provided by hw blocks that can write ptes, e.g., sdma */
+ struct amdgpu_vm_pte_funcs {
+ /* number of dw to reserve per operation */
+@@ -780,31 +738,6 @@ int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb);
+ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb);
+
+ /*
+- * SDMA
+- */
+-struct amdgpu_sdma_instance {
+- /* SDMA firmware */
+- const struct firmware *fw;
+- uint32_t fw_version;
+- uint32_t feature_version;
+-
+- struct amdgpu_ring ring;
+- bool burst_nop;
+-};
+-
+-struct amdgpu_sdma {
+- struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
+-#ifdef CONFIG_DRM_AMDGPU_SI
+- //SI DMA has a difference trap irq number for the second engine
+- struct amdgpu_irq_src trap_irq_1;
+-#endif
+- struct amdgpu_irq_src trap_irq;
+- struct amdgpu_irq_src illegal_inst_irq;
+- int num_instances;
+- uint32_t srbm_soft_reset;
+-};
+-
+-/*
+ * Firmware
+ */
+ enum amdgpu_firmware_load_type {
+@@ -1451,22 +1384,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
+ #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
+ #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
+
+-static inline struct amdgpu_sdma_instance *
+-amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
+-{
+- struct amdgpu_device *adev = ring->adev;
+- int i;
+-
+- for (i = 0; i < adev->sdma.num_instances; i++)
+- if (&adev->sdma.instance[i].ring == ring)
+- break;
+-
+- if (i < AMDGPU_MAX_SDMA_INSTANCES)
+- return &adev->sdma.instance[i];
+- else
+- return NULL;
+-}
+-
+ /*
+ * ASICs macro.
+ */
+@@ -1529,8 +1446,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
+ #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
+ #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
+ #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
+-#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
+-#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
+ #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
+ #define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+new file mode 100644
+index 0000000..bc9244b
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+@@ -0,0 +1,44 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++#include <drm/drmP.h>
++#include "amdgpu.h"
++#include "amdgpu_sdma.h"
++
++/*
++ * GPU SDMA IP block helpers function.
++ */
++
++struct amdgpu_sdma_instance * amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
++{
++ struct amdgpu_device *adev = ring->adev;
++ int i;
++
++ for (i = 0; i < adev->sdma.num_instances; i++)
++ if (&adev->sdma.instance[i].ring == ring)
++ break;
++
++ if (i < AMDGPU_MAX_SDMA_INSTANCES)
++ return &adev->sdma.instance[i];
++ else
++ return NULL;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+new file mode 100644
+index 0000000..d17503f
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+@@ -0,0 +1,101 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __AMDGPU_SDMA_H__
++#define __AMDGPU_SDMA_H__
++
++/* max number of IP instances */
++#define AMDGPU_MAX_SDMA_INSTANCES 2
++
++enum amdgpu_sdma_irq {
++ AMDGPU_SDMA_IRQ_TRAP0 = 0,
++ AMDGPU_SDMA_IRQ_TRAP1,
++
++ AMDGPU_SDMA_IRQ_LAST
++};
++
++struct amdgpu_sdma_instance {
++ /* SDMA firmware */
++ const struct firmware *fw;
++ uint32_t fw_version;
++ uint32_t feature_version;
++
++ struct amdgpu_ring ring;
++ bool burst_nop;
++};
++
++struct amdgpu_sdma {
++ struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
++#ifdef CONFIG_DRM_AMDGPU_SI
++ //SI DMA has a difference trap irq number for the second engine
++ struct amdgpu_irq_src trap_irq_1;
++#endif
++ struct amdgpu_irq_src trap_irq;
++ struct amdgpu_irq_src illegal_inst_irq;
++ int num_instances;
++ uint32_t srbm_soft_reset;
++};
++
++/*
++ * Provided by hw blocks that can move/clear data. e.g., gfx or sdma
++ * But currently, we use sdma to move data.
++ */
++struct amdgpu_buffer_funcs {
++ /* maximum bytes in a single operation */
++ uint32_t copy_max_bytes;
++
++ /* number of dw to reserve per operation */
++ unsigned copy_num_dw;
++
++ /* used for buffer migration */
++ void (*emit_copy_buffer)(struct amdgpu_ib *ib,
++ /* src addr in bytes */
++ uint64_t src_offset,
++ /* dst addr in bytes */
++ uint64_t dst_offset,
++ /* number of byte to transfer */
++ uint32_t byte_count);
++
++ /* maximum bytes in a single operation */
++ uint32_t fill_max_bytes;
++
++ /* number of dw to reserve per operation */
++ unsigned fill_num_dw;
++
++ /* used for buffer clearing */
++ void (*emit_fill_buffer)(struct amdgpu_ib *ib,
++ /* value to write to memory */
++ uint32_t src_data,
++ /* dst addr in bytes */
++ uint64_t dst_offset,
++ /* number of byte to fill */
++ uint32_t byte_count);
++};
++
++#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
++#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
++
++struct amdgpu_sdma_instance *
++amdgpu_get_sdma_instance(struct amdgpu_ring *ring);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 4db7bf5..b741827 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -48,6 +48,7 @@
+ #include "amdgpu_trace.h"
+ #include "bif/bif_4_1_d.h"
+ #include "amdgpu_amdkfd.h"
++#include "amdgpu_sdma.h"
+
+ #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5078-drm-amdgpu-move-firmware-definitions-into-amdgpu_uco.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5078-drm-amdgpu-move-firmware-definitions-into-amdgpu_uco.patch
new file mode 100644
index 00000000..e7306526
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5078-drm-amdgpu-move-firmware-definitions-into-amdgpu_uco.patch
@@ -0,0 +1,102 @@
+From 0df11ed38e5134ad6e17399dd5e2cb89936c6c6c Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Thu, 2 Aug 2018 17:47:15 +0800
+Subject: [PATCH 5078/5725] drm/amdgpu: move firmware definitions into
+ amdgpu_ucode header
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Demangle amdgpu.h.
+
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 27 ---------------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h | 24 ++++++++++++++++++++++++
+ 2 files changed, 24 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index b2e03fb..5f3250c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -738,33 +738,6 @@ int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb);
+ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb);
+
+ /*
+- * Firmware
+- */
+-enum amdgpu_firmware_load_type {
+- AMDGPU_FW_LOAD_DIRECT = 0,
+- AMDGPU_FW_LOAD_SMU,
+- AMDGPU_FW_LOAD_PSP,
+-};
+-
+-struct amdgpu_firmware {
+- struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM];
+- enum amdgpu_firmware_load_type load_type;
+- struct amdgpu_bo *fw_buf;
+- unsigned int fw_size;
+- unsigned int max_ucodes;
+- /* firmwares are loaded by psp instead of smu from vega10 */
+- const struct amdgpu_psp_funcs *funcs;
+- struct amdgpu_bo *rbuf;
+- struct mutex mutex;
+-
+- /* gpu info firmware data pointer */
+- const struct firmware *gpu_info_fw;
+-
+- void *fw_buf_ptr;
+- uint64_t fw_buf_mc;
+-};
+-
+-/*
+ * Benchmarking
+ */
+ void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+index 2334cb6..1a265e4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+@@ -205,6 +205,12 @@ enum AMDGPU_UCODE_STATUS {
+ AMDGPU_UCODE_STATUS_LOADED,
+ };
+
++enum amdgpu_firmware_load_type {
++ AMDGPU_FW_LOAD_DIRECT = 0,
++ AMDGPU_FW_LOAD_SMU,
++ AMDGPU_FW_LOAD_PSP,
++};
++
+ /* conform to smu_ucode_xfer_cz.h */
+ #define AMDGPU_SDMA0_UCODE_LOADED 0x00000001
+ #define AMDGPU_SDMA1_UCODE_LOADED 0x00000002
+@@ -233,6 +239,24 @@ struct amdgpu_firmware_info {
+ uint32_t tmr_mc_addr_hi;
+ };
+
++struct amdgpu_firmware {
++ struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM];
++ enum amdgpu_firmware_load_type load_type;
++ struct amdgpu_bo *fw_buf;
++ unsigned int fw_size;
++ unsigned int max_ucodes;
++ /* firmwares are loaded by psp instead of smu from vega10 */
++ const struct amdgpu_psp_funcs *funcs;
++ struct amdgpu_bo *rbuf;
++ struct mutex mutex;
++
++ /* gpu info firmware data pointer */
++ const struct firmware *gpu_info_fw;
++
++ void *fw_buf_ptr;
++ uint64_t fw_buf_mc;
++};
++
+ void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
+ void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr);
+ void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header *hdr);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5079-drm-amdgpu-move-psp-macro-into-amdgpu_psp-header.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5079-drm-amdgpu-move-psp-macro-into-amdgpu_psp-header.patch
new file mode 100644
index 00000000..2ecaf84b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5079-drm-amdgpu-move-psp-macro-into-amdgpu_psp-header.patch
@@ -0,0 +1,110 @@
+From 641efc81bc777c7c31b9f9d95c7b5dea9ef0dc09 Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Thu, 2 Aug 2018 17:54:21 +0800
+Subject: [PATCH 5079/5725] drm/amdgpu: move psp macro into amdgpu_psp header
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Demangle amdgpu.h.
+
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 -
+ drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 29 +++++++++++++++++------------
+ 2 files changed, 17 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 5f3250c..402e42f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1420,7 +1420,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
+ #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
+ #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
+ #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
+-#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
+
+ /* Common functions */
+ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+index 1292096..967712f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+@@ -63,13 +63,16 @@ struct psp_funcs
+ int (*prep_cmd_buf)(struct amdgpu_firmware_info *ucode,
+ struct psp_gfx_cmd_resp *cmd);
+ int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type);
+- int (*ring_create)(struct psp_context *psp, enum psp_ring_type ring_type);
++ int (*ring_create)(struct psp_context *psp,
++ enum psp_ring_type ring_type);
+ int (*ring_stop)(struct psp_context *psp,
+ enum psp_ring_type ring_type);
+ int (*ring_destroy)(struct psp_context *psp,
+ enum psp_ring_type ring_type);
+- int (*cmd_submit)(struct psp_context *psp, struct amdgpu_firmware_info *ucode,
+- uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, int index);
++ int (*cmd_submit)(struct psp_context *psp,
++ struct amdgpu_firmware_info *ucode,
++ uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
++ int index);
+ bool (*compare_sram_data)(struct psp_context *psp,
+ struct amdgpu_firmware_info *ucode,
+ enum AMDGPU_UCODE_ID ucode_type);
+@@ -83,11 +86,11 @@ struct psp_context
+ struct psp_ring km_ring;
+ struct psp_gfx_cmd_resp *cmd;
+
+- const struct psp_funcs *funcs;
++ const struct psp_funcs *funcs;
+
+ /* fence buffer */
+- struct amdgpu_bo *fw_pri_bo;
+- uint64_t fw_pri_mc_addr;
++ struct amdgpu_bo *fw_pri_bo;
++ uint64_t fw_pri_mc_addr;
+ void *fw_pri_buf;
+
+ /* sos firmware */
+@@ -100,8 +103,8 @@ struct psp_context
+ uint8_t *sos_start_addr;
+
+ /* tmr buffer */
+- struct amdgpu_bo *tmr_bo;
+- uint64_t tmr_mc_addr;
++ struct amdgpu_bo *tmr_bo;
++ uint64_t tmr_mc_addr;
+ void *tmr_buf;
+
+ /* asd firmware and buffer */
+@@ -110,13 +113,13 @@ struct psp_context
+ uint32_t asd_feature_version;
+ uint32_t asd_ucode_size;
+ uint8_t *asd_start_addr;
+- struct amdgpu_bo *asd_shared_bo;
+- uint64_t asd_shared_mc_addr;
++ struct amdgpu_bo *asd_shared_bo;
++ uint64_t asd_shared_mc_addr;
+ void *asd_shared_buf;
+
+ /* fence buffer */
+- struct amdgpu_bo *fence_buf_bo;
+- uint64_t fence_buf_mc_addr;
++ struct amdgpu_bo *fence_buf_bo;
++ uint64_t fence_buf_mc_addr;
+ void *fence_buf;
+
+ /* cmd buffer */
+@@ -150,6 +153,8 @@ struct amdgpu_psp_funcs {
+ #define psp_mode1_reset(psp) \
+ ((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false)
+
++#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
++
+ extern const struct amd_ip_funcs psp_ip_funcs;
+
+ extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5080-drm-amdgpu-move-gem-definitions-into-amdgpu_gem-head.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5080-drm-amdgpu-move-gem-definitions-into-amdgpu_gem-head.patch
new file mode 100644
index 00000000..d7f93292
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5080-drm-amdgpu-move-gem-definitions-into-amdgpu_gem-head.patch
@@ -0,0 +1,379 @@
+From 66f4d6a51b2b06f0b7fe4605323b8ca06b37d62b Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Mon, 13 Aug 2018 11:41:35 -0500
+Subject: [PATCH 5080/5725] drm/amdgpu: move gem definitions into amdgpu_gem
+ header
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Demangle amdgpu.h.
+
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 76 +------------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h | 92 +++++++++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 4 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 6 +-
+ drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 1 +
+ 11 files changed, 104 insertions(+), 81 deletions(-)
+ create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 402e42f..048a8ed 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -78,6 +78,7 @@
+ #include "amdgpu_debugfs.h"
+ #include "amdgpu_job.h"
+ #include "amdgpu_bo_list.h"
++#include "amdgpu_gem.h"
+
+ /*
+ * Modules parameters.
+@@ -308,46 +309,6 @@ struct amdgpu_clock {
+ uint32_t max_pixel_clock;
+ };
+
+-/*
+- * GEM.
+- */
+-
+-#define AMDGPU_GEM_DOMAIN_MAX 0x3
+-
+-struct amdgpu_gem_object {
+- struct drm_gem_object base;
+- struct list_head list;
+- struct amdgpu_bo *bo;
+-};
+-
+-struct kgd_mem;
+-
+-#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
+-
+-void amdgpu_gem_object_free(struct drm_gem_object *obj);
+-int amdgpu_gem_object_open(struct drm_gem_object *obj,
+- struct drm_file *file_priv);
+-void amdgpu_gem_object_close(struct drm_gem_object *obj,
+- struct drm_file *file_priv);
+-unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
+-struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
+-struct drm_gem_object *
+-amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
+- struct dma_buf_attachment *attach,
+- struct sg_table *sg);
+-struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
+- struct drm_gem_object *gobj,
+- int flags);
+-struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
+- struct dma_buf *dma_buf);
+-struct drm_gem_object *
+-amdgpu_gem_prime_foreign_bo(struct amdgpu_device *adev, struct amdgpu_bo *bo);
+-struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
+-void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
+-void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+-int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+-
+-extern const struct dma_buf_ops amdgpu_dmabuf_ops;
+
+ /* sub-allocation manager, it has to be protected by another lock.
+ * By conception this is an helper for other part of the driver
+@@ -398,22 +359,6 @@ struct amdgpu_sa_bo {
+ struct dma_fence *fence;
+ };
+
+-/*
+- * GEM objects.
+- */
+-void amdgpu_gem_force_release(struct amdgpu_device *adev);
+-int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
+- int alignment, u32 initial_domain,
+- u64 flags, enum ttm_bo_type type,
+- struct reservation_object *resv,
+- struct drm_gem_object **obj);
+-
+-int amdgpu_mode_dumb_create(struct drm_file *file_priv,
+- struct drm_device *dev,
+- struct drm_mode_create_dumb *args);
+-int amdgpu_mode_dumb_mmap(struct drm_file *filp,
+- struct drm_device *dev,
+- uint32_t handle, uint64_t *offset_p);
+ int amdgpu_fence_slab_init(void);
+ void amdgpu_fence_slab_fini(void);
+
+@@ -814,25 +759,9 @@ struct amdgpu_asic_funcs {
+ /*
+ * IOCTL.
+ */
+-int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *filp);
+ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+
+-int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *filp);
+-int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *filp);
+-int amdgpu_gem_find_bo_by_cpu_mapping_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *filp);
+-int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *filp);
+-int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *filp);
+-int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *filp);
+-int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *filp);
+ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+@@ -840,9 +769,6 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
+ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+
+-int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *filp);
+-
+ int amdgpu_display_freesync_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index bf68877..24f2489 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -32,6 +32,7 @@
+ #include "amdgpu.h"
+ #include "amdgpu_trace.h"
+ #include "amdgpu_gmc.h"
++#include "amdgpu_gem.h"
+
+ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
+ struct drm_amdgpu_cs_chunk_fence *data,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index d587428..6754cbf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -36,6 +36,7 @@
+
+ #include "amdgpu.h"
+ #include "amdgpu_irq.h"
++#include "amdgpu_gem.h"
+
+ #include "amdgpu_amdkfd.h"
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+index 7ff11d7..dc7a7d6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+@@ -33,6 +33,7 @@
+ #include <drm/amdgpu_drm.h>
+ #include "amdgpu.h"
+ #include "cikd.h"
++#include "amdgpu_gem.h"
+
+ #include <drm/drm_fb_helper.h>
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+new file mode 100644
+index 0000000..d63daba
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+@@ -0,0 +1,92 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++#ifndef __AMDGPU_GEM_H__
++#define __AMDGPU_GEM_H__
++
++#include <drm/amdgpu_drm.h>
++#include <drm/drm_gem.h>
++
++/*
++ * GEM.
++ */
++
++#define AMDGPU_GEM_DOMAIN_MAX 0x3
++#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
++
++void amdgpu_gem_object_free(struct drm_gem_object *obj);
++int amdgpu_gem_object_open(struct drm_gem_object *obj,
++ struct drm_file *file_priv);
++void amdgpu_gem_object_close(struct drm_gem_object *obj,
++ struct drm_file *file_priv);
++unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
++struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
++struct drm_gem_object *
++amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
++ struct dma_buf_attachment *attach,
++ struct sg_table *sg);
++struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
++ struct drm_gem_object *gobj,
++ int flags);
++struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
++ struct dma_buf *dma_buf);
++struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
++void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
++void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
++int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
++
++/*
++ * GEM objects.
++ */
++void amdgpu_gem_force_release(struct amdgpu_device *adev);
++int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
++ int alignment, u32 initial_domain,
++ u64 flags, enum ttm_bo_type type,
++ struct reservation_object *resv,
++ struct drm_gem_object **obj);
++
++int amdgpu_mode_dumb_create(struct drm_file *file_priv,
++ struct drm_device *dev,
++ struct drm_mode_create_dumb *args);
++int amdgpu_mode_dumb_mmap(struct drm_file *filp,
++ struct drm_device *dev,
++ uint32_t handle, uint64_t *offset_p);
++
++int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *filp);
++int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *filp);
++int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *filp);
++int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *filp);
++int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *filp);
++int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *filp);
++int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *filp);
++
++int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *filp);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index 58158b0..1d7c70d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -37,6 +37,7 @@
+ #include <linux/slab.h>
+ #include <linux/pm_runtime.h>
+ #include "amdgpu_amdkfd.h"
++#include "amdgpu_gem.h"
+
+ /**
+ * amdgpu_driver_unload_kms - Main unload function for KMS.
+@@ -1138,8 +1139,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
+ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+- DRM_IOCTL_DEF_DRV(AMDGPU_FREESYNC, amdgpu_display_freesync_ioctl, DRM_MASTER),
+- DRM_IOCTL_DEF_DRV(AMDGPU_GEM_FIND_BO, amdgpu_gem_find_bo_by_cpu_mapping_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
++ DRM_IOCTL_DEF_DRV(AMDGPU_FREESYNC, amdgpu_display_freesync_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_DGMA, amdgpu_gem_dgma_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_SEM, amdgpu_sem_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ };
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+index e6ef562..8c93c9e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+@@ -35,6 +35,7 @@
+
+ #include "amdgpu.h"
+ #include "amdgpu_display.h"
++#include "amdgpu_gem.h"
+ #include <drm/amdgpu_drm.h>
+ #include <linux/dma-buf.h>
+
+@@ -177,10 +178,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
+ bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
+ bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
+
+-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) || !defined(BUILD_AS_DKMS)
+- if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
+-#endif
+- bo->prime_shared_count = 1;
++ bo->prime_shared_count = 1;
+
+ ww_mutex_unlock(&resv->lock);
+ return &bo->gem_base;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+index b2ad0c2..8fc37d7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+@@ -25,6 +25,7 @@
+ #include "amdgpu.h"
+ #include "gmc_v6_0.h"
+ #include "amdgpu_ucode.h"
++#include "amdgpu_gem.h"
+
+ #include "bif/bif_3_0_d.h"
+ #include "bif/bif_3_0_sh_mask.h"
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+index 3040e8a..5f854e5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+@@ -28,6 +28,7 @@
+ #include "gmc_v7_0.h"
+ #include "amdgpu_ucode.h"
+ #include "amdgpu_amdkfd.h"
++#include "amdgpu_gem.h"
+
+ #include "bif/bif_4_1_d.h"
+ #include "bif/bif_4_1_sh_mask.h"
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index 5476ddd..82f19e7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -26,6 +26,7 @@
+ #include "gmc_v8_0.h"
+ #include "amdgpu_ucode.h"
+ #include "amdgpu_amdkfd.h"
++#include "amdgpu_gem.h"
+
+ #include "gmc/gmc_8_1_d.h"
+ #include "gmc/gmc_8_1_sh_mask.h"
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 68355b1..8a5bccc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -24,6 +24,7 @@
+ #include "amdgpu.h"
+ #include "gmc_v9_0.h"
+ #include "amdgpu_atomfirmware.h"
++#include "amdgpu_gem.h"
+
+ #include "hdp/hdp_4_0_offset.h"
+ #include "hdp/hdp_4_0_sh_mask.h"
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5081-drm-amd-display-pass-the-right-num-of-modes-added.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5081-drm-amd-display-pass-the-right-num-of-modes-added.patch
new file mode 100644
index 00000000..3876346e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5081-drm-amd-display-pass-the-right-num-of-modes-added.patch
@@ -0,0 +1,39 @@
+From d2d3eb077d5cc6128dc7ea4192cc59cc8f3f088c Mon Sep 17 00:00:00 2001
+From: Mikita Lipski <mikita.lipski@amd.com>
+Date: Thu, 26 Jul 2018 16:27:48 -0400
+Subject: [PATCH 5081/5725] drm/amd/display: pass the right num of modes added
+
+[why]
+In case if edid is null or corrupted we need to manually add
+a single failsafe mode (640x480). If zero modes returned
+DRM adds a different failsafe mode that is not accepted by
+DP 1.2 compliance test
+
+[how]
+Return the number of modes manually added
+
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Reviewed-by: Sun peng Li <Sunpeng.Li@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 0d6475b..d6d6568 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3790,7 +3790,8 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
+ encoder = helper->best_encoder(connector);
+
+ if (!edid || !drm_edid_is_valid(edid)) {
+- drm_add_modes_noedid(connector, 640, 480);
++ amdgpu_dm_connector->num_modes =
++ drm_add_modes_noedid(connector, 640, 480);
+ } else {
+ amdgpu_dm_connector_ddc_get_modes(connector, edid);
+ amdgpu_dm_connector_add_common_modes(encoder, connector);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5082-drm-amd-display-correct-image-viewport-calculation.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5082-drm-amd-display-correct-image-viewport-calculation.patch
new file mode 100644
index 00000000..db1a3b2d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5082-drm-amd-display-correct-image-viewport-calculation.patch
@@ -0,0 +1,97 @@
+From 2f15e88819e98ad1188e9104329cb19acb49f55f Mon Sep 17 00:00:00 2001
+From: Martin Tsai <Martin.Tsai@amd.com>
+Date: Fri, 27 Jul 2018 15:39:47 +0800
+Subject: [PATCH 5082/5725] drm/amd/display: correct image viewport calculation
+
+[why]
+We didn't transfer the camera/video viewport coordinate
+when doing rotation and mirror.
+
+[how]
+To correct the viewport coordinate in calculate_viewport().
+
+Signed-off-by: Martin Tsai <Martin.Tsai@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 49 +++++++++++++++++++++++
+ 1 file changed, 49 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 1f3aa4a..bd525e8 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -468,6 +468,18 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
+ pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
+ bool sec_split = pipe_ctx->top_pipe &&
+ pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
++ bool flip_vert_scan_dir = false, flip_horz_scan_dir = false;
++
++ /*
++ * Need to calculate the scan direction for viewport to properly determine offset
++ */
++ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_180) {
++ flip_vert_scan_dir = true;
++ flip_horz_scan_dir = true;
++ } else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90)
++ flip_vert_scan_dir = true;
++ else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
++ flip_horz_scan_dir = true;
+
+ if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE ||
+ stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
+@@ -511,6 +523,34 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
+ data->viewport.height = clip.height *
+ surf_src.height / plane_state->dst_rect.height;
+
++ /* To transfer the x, y to correct coordinate on mirror image (camera).
++ * deg 0 : transfer x,
++ * deg 90 : don't need to transfer,
++ * deg180 : transfer y,
++ * deg270 : transfer x and y.
++ * To transfer the x, y to correct coordinate on non-mirror image (video).
++ * deg 0 : don't need to transfer,
++ * deg 90 : transfer y,
++ * deg180 : transfer x and y,
++ * deg270 : transfer x.
++ */
++ if (pipe_ctx->plane_state->horizontal_mirror) {
++ if (flip_horz_scan_dir && !flip_vert_scan_dir) {
++ data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height;
++ data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width;
++ } else if (flip_horz_scan_dir && flip_vert_scan_dir)
++ data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height;
++ else {
++ if (!flip_horz_scan_dir && !flip_vert_scan_dir)
++ data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width;
++ }
++ } else {
++ if (flip_horz_scan_dir)
++ data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width;
++ if (flip_vert_scan_dir)
++ data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height;
++ }
++
+ /* Round down, compensate in init */
+ data->viewport_c.x = data->viewport.x / vpc_div;
+ data->viewport_c.y = data->viewport.y / vpc_div;
+@@ -706,6 +746,15 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct rect *r
+ rect_swap_helper(&src);
+ rect_swap_helper(&data->viewport_c);
+ rect_swap_helper(&data->viewport);
++
++ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270 &&
++ pipe_ctx->plane_state->horizontal_mirror) {
++ flip_vert_scan_dir = true;
++ }
++ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 &&
++ pipe_ctx->plane_state->horizontal_mirror) {
++ flip_vert_scan_dir = false;
++ }
+ } else if (pipe_ctx->plane_state->horizontal_mirror)
+ flip_horz_scan_dir = !flip_horz_scan_dir;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5083-drm-amd-display-Print-DPP-DTN-log-info-only-for-enab.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5083-drm-amd-display-Print-DPP-DTN-log-info-only-for-enab.patch
new file mode 100644
index 00000000..d01257d6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5083-drm-amd-display-Print-DPP-DTN-log-info-only-for-enab.patch
@@ -0,0 +1,84 @@
+From 42397214d13d16c1767fc01c3bc534ed93bce7d5 Mon Sep 17 00:00:00 2001
+From: Nikola Cornij <nikola.cornij@amd.com>
+Date: Thu, 19 Jul 2018 14:03:14 -0400
+Subject: [PATCH 5083/5725] drm/amd/display: Print DPP DTN log info only for
+ enabled pipes
+
+[why]
+There is currently a dependency on the order in which tests are executed.
+This is because the non-relevant state info is being printed, which results
+in the output based on the state from the previous test.
+
+[how]
+Print DPP DTN log only if the pipe is enabled.
+In addition to the affected per-submission DTN golden logs, included in this
+change is also DTN golden log update for pre-submission tests.
+The other DTN golden logs affected by this change will be updated upon
+nightly test run (which will generate the updated DTN logs).
+
+Signed-off-by: Nikola Cornij <nikola.cornij@amd.com>
+Reviewed-by: Nikola Cornij <Nikola.Cornij@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | 2 ++
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 7 +++++--
+ drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h | 1 +
+ 3 files changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+index bf8b68f..1d64255 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+@@ -103,6 +103,8 @@ void dpp_read_state(struct dpp *dpp_base,
+ {
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
++ REG_GET(DPP_CONTROL,
++ DPP_CLOCK_ENABLE, &s->is_enabled);
+ REG_GET(CM_IGAM_CONTROL,
+ CM_IGAM_LUT_MODE, &s->igam_lut_mode);
+ REG_GET(CM_IGAM_CONTROL,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index ba4856f..82fb1f9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -88,7 +88,7 @@ static void log_mpc_crc(struct dc *dc)
+ void dcn10_log_hubbub_state(struct dc *dc)
+ {
+ struct dc_context *dc_ctx = dc->ctx;
+- struct dcn_hubbub_wm wm;
++ struct dcn_hubbub_wm wm = {0};
+ int i;
+
+ hubbub1_wm_read_state(dc->res_pool->hubbub, &wm);
+@@ -244,10 +244,13 @@ void dcn10_log_hw_state(struct dc *dc)
+ "C31 C32 C33 C34\n");
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct dpp *dpp = pool->dpps[i];
+- struct dcn_dpp_state s;
++ struct dcn_dpp_state s = {0};
+
+ dpp->funcs->dpp_read_state(dpp, &s);
+
++ if (!s.is_enabled)
++ continue;
++
+ DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
+ "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
+ dpp->inst,
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+index 74ad94b..80a480b 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+@@ -45,6 +45,7 @@ struct dpp_grph_csc_adjustment {
+ };
+
+ struct dcn_dpp_state {
++ uint32_t is_enabled;
+ uint32_t igam_lut_mode;
+ uint32_t igam_input_format;
+ uint32_t dgam_lut_mode;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5084-drm-amd-display-Use-DGAM-ROM-or-RAM.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5084-drm-amd-display-Use-DGAM-ROM-or-RAM.patch
new file mode 100644
index 00000000..db252670
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5084-drm-amd-display-Use-DGAM-ROM-or-RAM.patch
@@ -0,0 +1,126 @@
+From 27979191d4930f7addbb34ac148a5c7d08955509 Mon Sep 17 00:00:00 2001
+From: Vitaly Prosyak <vitaly.prosyak@amd.com>
+Date: Wed, 18 Jul 2018 15:10:10 -0500
+Subject: [PATCH 5084/5725] drm/amd/display: Use DGAM ROM or RAM
+
+[Why]
+Optimize gamma programming
+
+[How]
+Use ROM for optimization when it is possible.
+Use RAM only when it is necessary.
+
+Signed-off-by: Vitaly Prosyak <vitaly.prosyak@amd.com>
+Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c | 6 ++++--
+ drivers/gpu/drm/amd/display/modules/color/color_gamma.c | 10 +++++-----
+ drivers/gpu/drm/amd/display/modules/color/color_gamma.h | 5 +++--
+ 3 files changed, 12 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+index 326f6fb..be19e68 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+@@ -22,7 +22,7 @@
+ * Authors: AMD
+ *
+ */
+-
++#include "amdgpu.h"
+ #include "amdgpu_mode.h"
+ #include "amdgpu_dm.h"
+ #include "dc.h"
+@@ -122,6 +122,8 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc)
+ {
+ struct drm_property_blob *blob = crtc->base.gamma_lut;
+ struct dc_stream_state *stream = crtc->stream;
++ struct amdgpu_device *adev = (struct amdgpu_device *)
++ crtc->base.state->dev->dev_private;
+ struct drm_color_lut *lut;
+ uint32_t lut_size;
+ struct dc_gamma *gamma;
+@@ -162,7 +164,7 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc)
+ */
+ stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
+ ret = mod_color_calculate_regamma_params(stream->out_transfer_func,
+- gamma, true);
++ gamma, true, adev->asic_type <= CHIP_RAVEN);
+ dc_gamma_release(&gamma);
+ if (!ret) {
+ stream->out_transfer_func->type = old_type;
+diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+index 4c67058..646e60d 100644
+--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+@@ -1352,7 +1352,7 @@ static bool map_regamma_hw_to_x_user(
+ #define _EXTRA_POINTS 3
+
+ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
+- const struct dc_gamma *ramp, bool mapUserRamp)
++ const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed)
+ {
+ struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
+ struct dividers dividers;
+@@ -1368,7 +1368,7 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
+ return false;
+
+ /* we can use hardcoded curve for plain SRGB TF */
+- if (output_tf->type == TF_TYPE_PREDEFINED &&
++ if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true &&
+ output_tf->tf == TRANSFER_FUNCTION_SRGB &&
+ (!mapUserRamp && ramp->type == GAMMA_RGB_256))
+ return true;
+@@ -1427,7 +1427,6 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
+ MAX_HW_POINTS,
+ coordinates_x, tf == TRANSFER_FUNCTION_SRGB ? true:false);
+ }
+-
+ map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
+ coordinates_x, axix_x, rgb_regamma,
+ MAX_HW_POINTS, tf_pts,
+@@ -1652,7 +1651,8 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
+
+
+ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
+- struct dc_transfer_func_distributed_points *points)
++ struct dc_transfer_func_distributed_points *points,
++ uint32_t sdr_ref_white_level)
+ {
+ uint32_t i;
+ bool ret = false;
+@@ -1686,7 +1686,7 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
+ build_pq(rgb_regamma,
+ MAX_HW_POINTS,
+ coordinates_x,
+- 80);
++ sdr_ref_white_level);
+ for (i = 0; i <= MAX_HW_POINTS ; i++) {
+ points->red[i] = rgb_regamma[i].r;
+ points->green[i] = rgb_regamma[i].g;
+diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
+index b6404899..63ccb9c 100644
+--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
+@@ -78,13 +78,14 @@ void precompute_pq(void);
+ void precompute_de_pq(void);
+
+ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
+- const struct dc_gamma *ramp, bool mapUserRamp);
++ const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed);
+
+ bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf,
+ const struct dc_gamma *ramp, bool mapUserRamp);
+
+ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
+- struct dc_transfer_func_distributed_points *points);
++ struct dc_transfer_func_distributed_points *points,
++ uint32_t sdr_ref_white_level);
+
+ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
+ struct dc_transfer_func_distributed_points *points);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5085-drm-amd-display-Add-check-for-num-of-entries-in-gamm.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5085-drm-amd-display-Add-check-for-num-of-entries-in-gamm.patch
new file mode 100644
index 00000000..fbc3dd49
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5085-drm-amd-display-Add-check-for-num-of-entries-in-gamm.patch
@@ -0,0 +1,33 @@
+From 356beb979312bc7885fb4f59b2cfefe30b208f0a Mon Sep 17 00:00:00 2001
+From: Vitaly Prosyak <vitaly.prosyak@amd.com>
+Date: Thu, 12 Jul 2018 14:26:47 -0500
+Subject: [PATCH 5085/5725] drm/amd/display: Add check for num of entries in
+ gamma
+
+This check avoids potential bugs related to gamma.
+
+Signed-off-by: Vitaly Prosyak <vitaly.prosyak@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/modules/color/color_gamma.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+index 646e60d..e130aac 100644
+--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+@@ -1575,7 +1575,8 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
+ /* we can use hardcoded curve for plain SRGB TF */
+ if (input_tf->type == TF_TYPE_PREDEFINED &&
+ input_tf->tf == TRANSFER_FUNCTION_SRGB &&
+- (!mapUserRamp && ramp->type == GAMMA_RGB_256))
++ (!mapUserRamp &&
++ (ramp->type == GAMMA_RGB_256 || ramp->num_entries == 0)))
+ return true;
+
+ input_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5086-drm-amdgpu-Delay-100ms-to-enable-gfx-off-feature.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5086-drm-amdgpu-Delay-100ms-to-enable-gfx-off-feature.patch
new file mode 100644
index 00000000..70aaa39e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5086-drm-amdgpu-Delay-100ms-to-enable-gfx-off-feature.patch
@@ -0,0 +1,32 @@
+From 67a4dbcd79f233ac45206f8ce24a5f19891470d9 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Mon, 6 Aug 2018 19:45:04 +0800
+Subject: [PATCH 5086/5725] drm/amdgpu: Delay 100ms to enable gfx off feature
+
+Original 500ms delay seems a bit large.
+Change to 100 ms suggested by Christian.
+
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index 3577484..a750242 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -26,8 +26,8 @@
+ #include "amdgpu.h"
+ #include "amdgpu_gfx.h"
+
+-/* 0.5 second timeout */
+-#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(500)
++/* delay 0.1 second to enable gfx off feature */
++#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
+
+ /*
+ * GPU GFX IP block helpers function.
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5087-drm-amdgpu-move-ring-macros-into-amdgpu_ring-header.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5087-drm-amdgpu-move-ring-macros-into-amdgpu_ring-header.patch
new file mode 100644
index 00000000..67461029
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5087-drm-amdgpu-move-ring-macros-into-amdgpu_ring-header.patch
@@ -0,0 +1,107 @@
+From 9fde34aebb330c9953e7c570bd1fef3d76e5151b Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Fri, 3 Aug 2018 18:33:06 +0800
+Subject: [PATCH 5087/5725] drm/amdgpu: move ring macros into amdgpu_ring
+ header
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Demangle amdgpu.h.
+
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 24 ------------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 24 ++++++++++++++++++++++++
+ 3 files changed, 25 insertions(+), 25 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 048a8ed..0cc153c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1310,30 +1310,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
+ #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
+ #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
+ #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
+-#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
+-#define amdgpu_ring_patch_cs_in_place(r, p, ib) ((r)->funcs->patch_cs_in_place((p), (ib)))
+-#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
+-#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
+-#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
+-#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
+-#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
+-#define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c))
+-#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
+-#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
+-#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
+-#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
+-#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
+-#define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
+-#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
+-#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
+-#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
+-#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
+-#define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
+-#define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
+-#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
+-#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
+-#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
+-#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
+ #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
+ #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
+ #define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+index 96f69eb..a38daa3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+@@ -238,7 +238,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ && !(adev->flags & AMD_IS_APU)
+ #endif
+ )
+- amdgpu_ring_emit_hdp_invalidate(ring);
++ amdgpu_asic_invalidate_hdp(adev, ring);
+
+ if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
+ fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+index 7bec0be..1f5fcfd 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+@@ -221,6 +221,30 @@ struct amdgpu_ring {
+ #endif
+ };
+
++#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
++#define amdgpu_ring_patch_cs_in_place(r, p, ib) ((r)->funcs->patch_cs_in_place((p), (ib)))
++#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
++#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
++#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
++#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
++#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
++#define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c))
++#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
++#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
++#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
++#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
++#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
++#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
++#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
++#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
++#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
++#define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
++#define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
++#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
++#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
++#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
++#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
++
+ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
+ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
+ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5088-drm-amdgpu-remove-useless-gds-switch-macro.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5088-drm-amdgpu-remove-useless-gds-switch-macro.patch
new file mode 100644
index 00000000..9d30aa3a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5088-drm-amdgpu-remove-useless-gds-switch-macro.patch
@@ -0,0 +1,32 @@
+From 801072382f838217f4ef10092682b7c0c31c2c3c Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Fri, 3 Aug 2018 18:37:58 +0800
+Subject: [PATCH 5088/5725] drm/amdgpu: remove useless gds switch macro
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Demangle amdgpu.h.
+
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 0cc153c..f3e06d8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1321,7 +1321,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
+ #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
+ #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
+ #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
+-#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
+
+ /* Common functions */
+ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5089-drm-amdgpu-move-display-definitions-into-amdgpu_disp.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5089-drm-amdgpu-move-display-definitions-into-amdgpu_disp.patch
new file mode 100644
index 00000000..b279a4dc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5089-drm-amdgpu-move-display-definitions-into-amdgpu_disp.patch
@@ -0,0 +1,221 @@
+From 27c3e0af6f78219da54f2ca154084cd31249174e Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Thu, 9 Aug 2018 09:50:12 -0500
+Subject: [PATCH 5089/5725] drm/amdgpu: move display definitions into
+ amdgpu_display header
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Demangle amdgpu.h.
+
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 12 ------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_display.h | 15 +++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/atombios_encoders.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 1 +
+ 13 files changed, 26 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index f3e06d8..403d2b8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1310,24 +1310,12 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
+ #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
+ #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
+ #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
+-#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
+-#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
+-#define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
+-#define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
+-#define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h))
+-#define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev))
+-#define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev))
+-#define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async))
+-#define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
+-#define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
+-#define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
+
+ /* Common functions */
+ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ struct amdgpu_job* job, bool force);
+ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
+ bool amdgpu_device_need_post(struct amdgpu_device *adev);
+-void amdgpu_display_update_priority(struct amdgpu_device *adev);
+
+ void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
+ u64 num_vis_bytes);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+index 3539932..6488e90 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+@@ -31,6 +31,7 @@
+ #include <drm/drm_crtc_helper.h>
+ #include "amdgpu.h"
+ #include "amdgpu_pm.h"
++#include "amdgpu_display.h"
+ #include "amd_acpi.h"
+ #include "atom.h"
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+index b5773e8..b38275d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+@@ -29,6 +29,7 @@
+ #include "amdgpu_atombios.h"
+ #include "amdgpu_atomfirmware.h"
+ #include "amdgpu_i2c.h"
++#include "amdgpu_display.h"
+
+ #include "atom.h"
+ #include "atom-bits.h"
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+index c0b2ab9..370071b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+@@ -34,6 +34,7 @@
+ #include "atombios_dp.h"
+ #include "amdgpu_connectors.h"
+ #include "amdgpu_i2c.h"
++#include "amdgpu_display.h"
+
+ #include <linux/pm_runtime.h>
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
+index ec11434..8a34ec8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
+@@ -23,6 +23,21 @@
+ #ifndef __AMDGPU_DISPLAY_H__
+ #define __AMDGPU_DISPLAY_H__
+
++#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
++#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
++#define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
++#define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
++#define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h))
++#define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev))
++#define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev))
++#define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async))
++#define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
++#define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
++#define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
++
++int amdgpu_display_freesync_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *filp);
++void amdgpu_display_update_priority(struct amdgpu_device *adev);
+ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev);
+ struct drm_framebuffer *
+ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
+index 94138ab..0f43b19 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
+@@ -28,6 +28,7 @@
+ #include <drm/amdgpu_drm.h>
+ #include "amdgpu.h"
+ #include "amdgpu_connectors.h"
++#include "amdgpu_display.h"
+ #include "atom.h"
+ #include "atombios_encoders.h"
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index 1d7c70d..98c0c38 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -38,6 +38,7 @@
+ #include <linux/pm_runtime.h>
+ #include "amdgpu_amdkfd.h"
+ #include "amdgpu_gem.h"
++#include "amdgpu_display.h"
+
+ /**
+ * amdgpu_driver_unload_kms - Main unload function for KMS.
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index 2a78a3c..1e9ed55 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -27,6 +27,7 @@
+ #include "amdgpu_drv.h"
+ #include "amdgpu_pm.h"
+ #include "amdgpu_dpm.h"
++#include "amdgpu_display.h"
+ #include "atom.h"
+ #include <linux/power_supply.h>
+ #include <linux/hwmon.h>
+diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+index d702fb8..60e2447 100644
+--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
++++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+@@ -28,6 +28,7 @@
+ #include <drm/amdgpu_drm.h>
+ #include "amdgpu.h"
+ #include "amdgpu_connectors.h"
++#include "amdgpu_display.h"
+ #include "atom.h"
+ #include "atombios_encoders.h"
+ #include "atombios_dp.h"
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+index 85649e9..34cdcc8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+@@ -31,6 +31,7 @@
+ #include "atombios_encoders.h"
+ #include "amdgpu_pll.h"
+ #include "amdgpu_connectors.h"
++#include "amdgpu_display.h"
+ #include "dce_v10_0.h"
+
+ #include "dce/dce_10_0_d.h"
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+index 4e4e5fc..fd3441e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+@@ -31,6 +31,7 @@
+ #include "atombios_encoders.h"
+ #include "amdgpu_pll.h"
+ #include "amdgpu_connectors.h"
++#include "amdgpu_display.h"
+ #include "dce_v11_0.h"
+
+ #include "dce/dce_11_0_d.h"
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+index 1cbc20c..b185bd7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+@@ -30,6 +30,7 @@
+ #include "atombios_encoders.h"
+ #include "amdgpu_pll.h"
+ #include "amdgpu_connectors.h"
++#include "amdgpu_display.h"
+
+ #include "bif/bif_3_0_d.h"
+ #include "bif/bif_3_0_sh_mask.h"
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+index 26fcb39..bff4b94 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+@@ -31,6 +31,7 @@
+ #include "atombios_encoders.h"
+ #include "amdgpu_pll.h"
+ #include "amdgpu_connectors.h"
++#include "amdgpu_display.h"
+ #include "dce_v8_0.h"
+
+ #include "dce/dce_8_0_d.h"
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5090-drm-amdgpu-move-gmc-macros-into-amdgpu_gmc-header.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5090-drm-amdgpu-move-gmc-macros-into-amdgpu_gmc-header.patch
new file mode 100644
index 00000000..a6176635
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5090-drm-amdgpu-move-gmc-macros-into-amdgpu_gmc-header.patch
@@ -0,0 +1,56 @@
+From b094dba496ebca7275014895c2c3a15148c6a40e Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Fri, 3 Aug 2018 18:59:25 +0800
+Subject: [PATCH 5090/5725] drm/amdgpu: move gmc macros into amdgpu_gmc header
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Demangle amdgpu.h.
+
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 ------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 7 +++++++
+ 2 files changed, 7 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 403d2b8..59b0980 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1301,12 +1301,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
+ #define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
+ #define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
+ #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
+-#define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
+-#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
+-#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
+-#define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
+-#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
+-#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags))
+ #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
+ #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
+ #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+index 5f1d59e..5dbbac6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+@@ -112,6 +112,13 @@ struct amdgpu_gmc {
+ const struct amdgpu_gmc_funcs *gmc_funcs;
+ };
+
++#define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
++#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
++#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
++#define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
++#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
++#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags))
++
+ /**
+ * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
+ *
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5091-drm-amdgpu-move-vm-definitions-into-amdgpu_vm-header.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5091-drm-amdgpu-move-vm-definitions-into-amdgpu_vm-header.patch
new file mode 100644
index 00000000..fb24597b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5091-drm-amdgpu-move-vm-definitions-into-amdgpu_vm-header.patch
@@ -0,0 +1,108 @@
+From e551c7aab2da28721baa304ad1557713b059a729 Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Fri, 3 Aug 2018 19:06:02 +0800
+Subject: [PATCH 5091/5725] drm/amdgpu: move vm definitions into amdgpu_vm
+ header
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Demangle amdgpu.h.
+
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 24 ------------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 25 +++++++++++++++++++++++++
+ 2 files changed, 25 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 59b0980..27042b2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -263,27 +263,6 @@ amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
+ int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
+ const struct amdgpu_ip_block_version *ip_block_version);
+
+-/* provided by hw blocks that can write ptes, e.g., sdma */
+-struct amdgpu_vm_pte_funcs {
+- /* number of dw to reserve per operation */
+- unsigned copy_pte_num_dw;
+-
+- /* copy pte entries from GART */
+- void (*copy_pte)(struct amdgpu_ib *ib,
+- uint64_t pe, uint64_t src,
+- unsigned count);
+-
+- /* write pte one entry at a time with addr mapping */
+- void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
+- uint64_t value, unsigned count,
+- uint32_t incr);
+- /* for linear pte/pde updates without addr mapping */
+- void (*set_pte_pde)(struct amdgpu_ib *ib,
+- uint64_t pe,
+- uint64_t addr, unsigned count,
+- uint32_t incr, uint64_t flags);
+-};
+-
+ /*
+ * BIOS.
+ */
+@@ -1301,9 +1280,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
+ #define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
+ #define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
+ #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
+-#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
+-#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
+-#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
+
+ /* Common functions */
+ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+index a5cf0cc..b96bfed 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+@@ -163,6 +163,27 @@ struct amdgpu_vm_pt {
+ struct amdgpu_vm_pt *entries;
+ };
+
++/* provided by hw blocks that can write ptes, e.g., sdma */
++struct amdgpu_vm_pte_funcs {
++ /* number of dw to reserve per operation */
++ unsigned copy_pte_num_dw;
++
++ /* copy pte entries from GART */
++ void (*copy_pte)(struct amdgpu_ib *ib,
++ uint64_t pe, uint64_t src,
++ unsigned count);
++
++ /* write pte one entry at a time with addr mapping */
++ void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
++ uint64_t value, unsigned count,
++ uint32_t incr);
++ /* for linear pte/pde updates without addr mapping */
++ void (*set_pte_pde)(struct amdgpu_ib *ib,
++ uint64_t pe,
++ uint64_t addr, unsigned count,
++ uint32_t incr, uint64_t flags);
++};
++
+ #define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr))
+ #define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48)
+ #define AMDGPU_VM_FAULT_ADDR(fault) ((u64)(fault) & 0xfffffffff000ULL)
+@@ -272,6 +293,10 @@ struct amdgpu_vm_manager {
+ unsigned n_compute_vms;
+ };
+
++#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
++#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
++#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
++
+ void amdgpu_vm_manager_init(struct amdgpu_device *adev);
+ void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
+ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5092-drm-amdgpu-move-missed-gfxoff-entry-into-amdgpu_gfx-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5092-drm-amdgpu-move-missed-gfxoff-entry-into-amdgpu_gfx-.patch
new file mode 100644
index 00000000..f9710a12
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5092-drm-amdgpu-move-missed-gfxoff-entry-into-amdgpu_gfx-.patch
@@ -0,0 +1,45 @@
+From 112a7d8730c140ad4fd8c8f726e22e344806e51f Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Mon, 6 Aug 2018 20:14:51 +0800
+Subject: [PATCH 5092/5725] drm/amdgpu: move missed gfxoff entry into
+ amdgpu_gfx header
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Move missed gfxoff entry to amdgpu_gfx.h.
+
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 -
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 1 +
+ 2 files changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 27042b2..ece78da 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1299,7 +1299,6 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
+ const u32 array_size);
+
+ bool amdgpu_device_is_px(struct drm_device *dev);
+-void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
+ /* atpx handler */
+ #if defined(CONFIG_VGA_SWITCHEROO)
+ void amdgpu_register_atpx_handler(void);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+index 4e3d147..53e9e2a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+@@ -356,5 +356,6 @@ void amdgpu_gfx_bit_to_queue(struct amdgpu_device *adev, int bit,
+ int *mec, int *pipe, int *queue);
+ bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec,
+ int pipe, int queue);
++void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
+
+ #endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5093-drm-amdgpu-pp-endian-fixes-for-process_pptables_v1_0.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5093-drm-amdgpu-pp-endian-fixes-for-process_pptables_v1_0.patch
new file mode 100644
index 00000000..d02e152c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5093-drm-amdgpu-pp-endian-fixes-for-process_pptables_v1_0.patch
@@ -0,0 +1,386 @@
+From 58cf0871c004edf550ea357a65786285759eeb07 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 7 Aug 2018 15:17:09 -0500
+Subject: [PATCH 5093/5725] drm/amdgpu/pp: endian fixes for
+ process_pptables_v1_0.c
+
+Properly swap when reading from the vbios.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../amd/powerplay/hwmgr/process_pptables_v1_0.c | 194 ++++++++++-----------
+ 1 file changed, 97 insertions(+), 97 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+index 4e1fd53..ae64ff7 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+@@ -214,23 +214,23 @@ static int get_platform_power_management_table(
+ ptr->ppm_design
+ = atom_ppm_table->ucPpmDesign;
+ ptr->cpu_core_number
+- = atom_ppm_table->usCpuCoreNumber;
++ = le16_to_cpu(atom_ppm_table->usCpuCoreNumber);
+ ptr->platform_tdp
+- = atom_ppm_table->ulPlatformTDP;
++ = le32_to_cpu(atom_ppm_table->ulPlatformTDP);
+ ptr->small_ac_platform_tdp
+- = atom_ppm_table->ulSmallACPlatformTDP;
++ = le32_to_cpu(atom_ppm_table->ulSmallACPlatformTDP);
+ ptr->platform_tdc
+- = atom_ppm_table->ulPlatformTDC;
++ = le32_to_cpu(atom_ppm_table->ulPlatformTDC);
+ ptr->small_ac_platform_tdc
+- = atom_ppm_table->ulSmallACPlatformTDC;
++ = le32_to_cpu(atom_ppm_table->ulSmallACPlatformTDC);
+ ptr->apu_tdp
+- = atom_ppm_table->ulApuTDP;
++ = le32_to_cpu(atom_ppm_table->ulApuTDP);
+ ptr->dgpu_tdp
+- = atom_ppm_table->ulDGpuTDP;
++ = le32_to_cpu(atom_ppm_table->ulDGpuTDP);
+ ptr->dgpu_ulv_power
+- = atom_ppm_table->ulDGpuUlvPower;
++ = le32_to_cpu(atom_ppm_table->ulDGpuUlvPower);
+ ptr->tj_max
+- = atom_ppm_table->ulTjmax;
++ = le32_to_cpu(atom_ppm_table->ulTjmax);
+
+ pp_table_information->ppm_parameter_table = ptr;
+
+@@ -355,11 +355,11 @@ static int get_hard_limits(
+ PP_ASSERT_WITH_CODE((0 != limitable->ucNumEntries), "Invalid PowerPlay Table!", return -1);
+
+ /* currently we always take entries[0] parameters */
+- limits->sclk = (uint32_t)limitable->entries[0].ulSCLKLimit;
+- limits->mclk = (uint32_t)limitable->entries[0].ulMCLKLimit;
+- limits->vddc = (uint16_t)limitable->entries[0].usVddcLimit;
+- limits->vddci = (uint16_t)limitable->entries[0].usVddciLimit;
+- limits->vddgfx = (uint16_t)limitable->entries[0].usVddgfxLimit;
++ limits->sclk = le32_to_cpu(limitable->entries[0].ulSCLKLimit);
++ limits->mclk = le32_to_cpu(limitable->entries[0].ulMCLKLimit);
++ limits->vddc = le16_to_cpu(limitable->entries[0].usVddcLimit);
++ limits->vddci = le16_to_cpu(limitable->entries[0].usVddciLimit);
++ limits->vddgfx = le16_to_cpu(limitable->entries[0].usVddgfxLimit);
+
+ return 0;
+ }
+@@ -396,10 +396,10 @@ static int get_mclk_voltage_dependency_table(
+ ATOM_Tonga_MCLK_Dependency_Record,
+ entries, mclk_dep_table, i);
+ mclk_table_record->vddInd = mclk_dep_record->ucVddcInd;
+- mclk_table_record->vdd_offset = mclk_dep_record->usVddgfxOffset;
+- mclk_table_record->vddci = mclk_dep_record->usVddci;
+- mclk_table_record->mvdd = mclk_dep_record->usMvdd;
+- mclk_table_record->clk = mclk_dep_record->ulMclk;
++ mclk_table_record->vdd_offset = le16_to_cpu(mclk_dep_record->usVddgfxOffset);
++ mclk_table_record->vddci = le16_to_cpu(mclk_dep_record->usVddci);
++ mclk_table_record->mvdd = le16_to_cpu(mclk_dep_record->usMvdd);
++ mclk_table_record->clk = le32_to_cpu(mclk_dep_record->ulMclk);
+ }
+
+ *pp_tonga_mclk_dep_table = mclk_table;
+@@ -443,8 +443,8 @@ static int get_sclk_voltage_dependency_table(
+ phm_ppt_v1_clock_voltage_dependency_record,
+ entries, sclk_table, i);
+ sclk_table_record->vddInd = sclk_dep_record->ucVddInd;
+- sclk_table_record->vdd_offset = sclk_dep_record->usVddcOffset;
+- sclk_table_record->clk = sclk_dep_record->ulSclk;
++ sclk_table_record->vdd_offset = le16_to_cpu(sclk_dep_record->usVddcOffset);
++ sclk_table_record->clk = le32_to_cpu(sclk_dep_record->ulSclk);
+ sclk_table_record->cks_enable =
+ (((sclk_dep_record->ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
+ sclk_table_record->cks_voffset = (sclk_dep_record->ucCKSVOffsetandDisable & 0x7F);
+@@ -475,12 +475,12 @@ static int get_sclk_voltage_dependency_table(
+ phm_ppt_v1_clock_voltage_dependency_record,
+ entries, sclk_table, i);
+ sclk_table_record->vddInd = sclk_dep_record->ucVddInd;
+- sclk_table_record->vdd_offset = sclk_dep_record->usVddcOffset;
+- sclk_table_record->clk = sclk_dep_record->ulSclk;
++ sclk_table_record->vdd_offset = le16_to_cpu(sclk_dep_record->usVddcOffset);
++ sclk_table_record->clk = le32_to_cpu(sclk_dep_record->ulSclk);
+ sclk_table_record->cks_enable =
+ (((sclk_dep_record->ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
+ sclk_table_record->cks_voffset = (sclk_dep_record->ucCKSVOffsetandDisable & 0x7F);
+- sclk_table_record->sclk_offset = sclk_dep_record->ulSclkOffset;
++ sclk_table_record->sclk_offset = le32_to_cpu(sclk_dep_record->ulSclkOffset);
+ }
+ }
+ *pp_tonga_sclk_dep_table = sclk_table;
+@@ -534,7 +534,7 @@ static int get_pcie_table(
+ ATOM_Tonga_PCIE_Record,
+ entries, atom_pcie_table, i);
+ pcie_record->gen_speed = atom_pcie_record->ucPCIEGenSpeed;
+- pcie_record->lane_width = atom_pcie_record->usPCIELaneWidth;
++ pcie_record->lane_width = le16_to_cpu(atom_pcie_record->usPCIELaneWidth);
+ }
+
+ *pp_tonga_pcie_table = pcie_table;
+@@ -574,8 +574,8 @@ static int get_pcie_table(
+ ATOM_Polaris10_PCIE_Record,
+ entries, atom_pcie_table, i);
+ pcie_record->gen_speed = atom_pcie_record->ucPCIEGenSpeed;
+- pcie_record->lane_width = atom_pcie_record->usPCIELaneWidth;
+- pcie_record->pcie_sclk = atom_pcie_record->ulPCIE_Sclk;
++ pcie_record->lane_width = le16_to_cpu(atom_pcie_record->usPCIELaneWidth);
++ pcie_record->pcie_sclk = le32_to_cpu(atom_pcie_record->ulPCIE_Sclk);
+ }
+
+ *pp_tonga_pcie_table = pcie_table;
+@@ -609,64 +609,64 @@ static int get_cac_tdp_table(
+ if (table->ucRevId < 3) {
+ const ATOM_Tonga_PowerTune_Table *tonga_table =
+ (ATOM_Tonga_PowerTune_Table *)table;
+- tdp_table->usTDP = tonga_table->usTDP;
++ tdp_table->usTDP = le16_to_cpu(tonga_table->usTDP);
+ tdp_table->usConfigurableTDP =
+- tonga_table->usConfigurableTDP;
+- tdp_table->usTDC = tonga_table->usTDC;
++ le16_to_cpu(tonga_table->usConfigurableTDP);
++ tdp_table->usTDC = le16_to_cpu(tonga_table->usTDC);
+ tdp_table->usBatteryPowerLimit =
+- tonga_table->usBatteryPowerLimit;
++ le16_to_cpu(tonga_table->usBatteryPowerLimit);
+ tdp_table->usSmallPowerLimit =
+- tonga_table->usSmallPowerLimit;
++ le16_to_cpu(tonga_table->usSmallPowerLimit);
+ tdp_table->usLowCACLeakage =
+- tonga_table->usLowCACLeakage;
++ le16_to_cpu(tonga_table->usLowCACLeakage);
+ tdp_table->usHighCACLeakage =
+- tonga_table->usHighCACLeakage;
++ le16_to_cpu(tonga_table->usHighCACLeakage);
+ tdp_table->usMaximumPowerDeliveryLimit =
+- tonga_table->usMaximumPowerDeliveryLimit;
++ le16_to_cpu(tonga_table->usMaximumPowerDeliveryLimit);
+ tdp_table->usDefaultTargetOperatingTemp =
+- tonga_table->usTjMax;
++ le16_to_cpu(tonga_table->usTjMax);
+ tdp_table->usTargetOperatingTemp =
+- tonga_table->usTjMax; /*Set the initial temp to the same as default */
++ le16_to_cpu(tonga_table->usTjMax); /*Set the initial temp to the same as default */
+ tdp_table->usPowerTuneDataSetID =
+- tonga_table->usPowerTuneDataSetID;
++ le16_to_cpu(tonga_table->usPowerTuneDataSetID);
+ tdp_table->usSoftwareShutdownTemp =
+- tonga_table->usSoftwareShutdownTemp;
++ le16_to_cpu(tonga_table->usSoftwareShutdownTemp);
+ tdp_table->usClockStretchAmount =
+- tonga_table->usClockStretchAmount;
++ le16_to_cpu(tonga_table->usClockStretchAmount);
+ } else { /* Fiji and newer */
+ const ATOM_Fiji_PowerTune_Table *fijitable =
+ (ATOM_Fiji_PowerTune_Table *)table;
+- tdp_table->usTDP = fijitable->usTDP;
+- tdp_table->usConfigurableTDP = fijitable->usConfigurableTDP;
+- tdp_table->usTDC = fijitable->usTDC;
+- tdp_table->usBatteryPowerLimit = fijitable->usBatteryPowerLimit;
+- tdp_table->usSmallPowerLimit = fijitable->usSmallPowerLimit;
+- tdp_table->usLowCACLeakage = fijitable->usLowCACLeakage;
+- tdp_table->usHighCACLeakage = fijitable->usHighCACLeakage;
++ tdp_table->usTDP = le16_to_cpu(fijitable->usTDP);
++ tdp_table->usConfigurableTDP = le16_to_cpu(fijitable->usConfigurableTDP);
++ tdp_table->usTDC = le16_to_cpu(fijitable->usTDC);
++ tdp_table->usBatteryPowerLimit = le16_to_cpu(fijitable->usBatteryPowerLimit);
++ tdp_table->usSmallPowerLimit = le16_to_cpu(fijitable->usSmallPowerLimit);
++ tdp_table->usLowCACLeakage = le16_to_cpu(fijitable->usLowCACLeakage);
++ tdp_table->usHighCACLeakage = le16_to_cpu(fijitable->usHighCACLeakage);
+ tdp_table->usMaximumPowerDeliveryLimit =
+- fijitable->usMaximumPowerDeliveryLimit;
++ le16_to_cpu(fijitable->usMaximumPowerDeliveryLimit);
+ tdp_table->usDefaultTargetOperatingTemp =
+- fijitable->usTjMax;
++ le16_to_cpu(fijitable->usTjMax);
+ tdp_table->usTargetOperatingTemp =
+- fijitable->usTjMax; /*Set the initial temp to the same as default */
++ le16_to_cpu(fijitable->usTjMax); /*Set the initial temp to the same as default */
+ tdp_table->usPowerTuneDataSetID =
+- fijitable->usPowerTuneDataSetID;
++ le16_to_cpu(fijitable->usPowerTuneDataSetID);
+ tdp_table->usSoftwareShutdownTemp =
+- fijitable->usSoftwareShutdownTemp;
++ le16_to_cpu(fijitable->usSoftwareShutdownTemp);
+ tdp_table->usClockStretchAmount =
+- fijitable->usClockStretchAmount;
++ le16_to_cpu(fijitable->usClockStretchAmount);
+ tdp_table->usTemperatureLimitHotspot =
+- fijitable->usTemperatureLimitHotspot;
++ le16_to_cpu(fijitable->usTemperatureLimitHotspot);
+ tdp_table->usTemperatureLimitLiquid1 =
+- fijitable->usTemperatureLimitLiquid1;
++ le16_to_cpu(fijitable->usTemperatureLimitLiquid1);
+ tdp_table->usTemperatureLimitLiquid2 =
+- fijitable->usTemperatureLimitLiquid2;
++ le16_to_cpu(fijitable->usTemperatureLimitLiquid2);
+ tdp_table->usTemperatureLimitVrVddc =
+- fijitable->usTemperatureLimitVrVddc;
++ le16_to_cpu(fijitable->usTemperatureLimitVrVddc);
+ tdp_table->usTemperatureLimitVrMvdd =
+- fijitable->usTemperatureLimitVrMvdd;
++ le16_to_cpu(fijitable->usTemperatureLimitVrMvdd);
+ tdp_table->usTemperatureLimitPlx =
+- fijitable->usTemperatureLimitPlx;
++ le16_to_cpu(fijitable->usTemperatureLimitPlx);
+ tdp_table->ucLiquid1_I2C_address =
+ fijitable->ucLiquid1_I2C_address;
+ tdp_table->ucLiquid2_I2C_address =
+@@ -715,12 +715,12 @@ static int get_mm_clock_voltage_table(
+ phm_ppt_v1_mm_clock_voltage_dependency_record,
+ entries, mm_table, i);
+ mm_table_record->vddcInd = mm_dependency_record->ucVddcInd;
+- mm_table_record->vddgfx_offset = mm_dependency_record->usVddgfxOffset;
+- mm_table_record->aclk = mm_dependency_record->ulAClk;
+- mm_table_record->samclock = mm_dependency_record->ulSAMUClk;
+- mm_table_record->eclk = mm_dependency_record->ulEClk;
+- mm_table_record->vclk = mm_dependency_record->ulVClk;
+- mm_table_record->dclk = mm_dependency_record->ulDClk;
++ mm_table_record->vddgfx_offset = le16_to_cpu(mm_dependency_record->usVddgfxOffset);
++ mm_table_record->aclk = le32_to_cpu(mm_dependency_record->ulAClk);
++ mm_table_record->samclock = le32_to_cpu(mm_dependency_record->ulSAMUClk);
++ mm_table_record->eclk = le32_to_cpu(mm_dependency_record->ulEClk);
++ mm_table_record->vclk = le32_to_cpu(mm_dependency_record->ulVClk);
++ mm_table_record->dclk = le32_to_cpu(mm_dependency_record->ulDClk);
+ }
+
+ *tonga_mm_table = mm_table;
+@@ -939,33 +939,33 @@ static int init_thermal_controller(
+ hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst
+ = tonga_fan_table->ucTHyst;
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMin
+- = tonga_fan_table->usTMin;
++ = le16_to_cpu(tonga_fan_table->usTMin);
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMed
+- = tonga_fan_table->usTMed;
++ = le16_to_cpu(tonga_fan_table->usTMed);
+ hwmgr->thermal_controller.advanceFanControlParameters.usTHigh
+- = tonga_fan_table->usTHigh;
++ = le16_to_cpu(tonga_fan_table->usTHigh);
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin
+- = tonga_fan_table->usPWMMin;
++ = le16_to_cpu(tonga_fan_table->usPWMMin);
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed
+- = tonga_fan_table->usPWMMed;
++ = le16_to_cpu(tonga_fan_table->usPWMMed);
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh
+- = tonga_fan_table->usPWMHigh;
++ = le16_to_cpu(tonga_fan_table->usPWMHigh);
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMax
+ = 10900; /* hard coded */
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMax
+- = tonga_fan_table->usTMax;
++ = le16_to_cpu(tonga_fan_table->usTMax);
+ hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode
+ = tonga_fan_table->ucFanControlMode;
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM
+- = tonga_fan_table->usFanPWMMax;
++ = le16_to_cpu(tonga_fan_table->usFanPWMMax);
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity
+ = 4836;
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity
+- = tonga_fan_table->usFanOutputSensitivity;
++ = le16_to_cpu(tonga_fan_table->usFanOutputSensitivity);
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM
+- = tonga_fan_table->usFanRPMMax;
++ = le16_to_cpu(tonga_fan_table->usFanRPMMax);
+ hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit
+- = (tonga_fan_table->ulMinFanSCLKAcousticLimit / 100); /* PPTable stores it in 10Khz unit for 2 decimal places. SMC wants MHz. */
++ = (le32_to_cpu(tonga_fan_table->ulMinFanSCLKAcousticLimit) / 100); /* PPTable stores it in 10Khz unit for 2 decimal places. SMC wants MHz. */
+ hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature
+ = tonga_fan_table->ucTargetTemperature;
+ hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit
+@@ -976,50 +976,50 @@ static int init_thermal_controller(
+ hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst
+ = fiji_fan_table->ucTHyst;
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMin
+- = fiji_fan_table->usTMin;
++ = le16_to_cpu(fiji_fan_table->usTMin);
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMed
+- = fiji_fan_table->usTMed;
++ = le16_to_cpu(fiji_fan_table->usTMed);
+ hwmgr->thermal_controller.advanceFanControlParameters.usTHigh
+- = fiji_fan_table->usTHigh;
++ = le16_to_cpu(fiji_fan_table->usTHigh);
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin
+- = fiji_fan_table->usPWMMin;
++ = le16_to_cpu(fiji_fan_table->usPWMMin);
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed
+- = fiji_fan_table->usPWMMed;
++ = le16_to_cpu(fiji_fan_table->usPWMMed);
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh
+- = fiji_fan_table->usPWMHigh;
++ = le16_to_cpu(fiji_fan_table->usPWMHigh);
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMax
+- = fiji_fan_table->usTMax;
++ = le16_to_cpu(fiji_fan_table->usTMax);
+ hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode
+ = fiji_fan_table->ucFanControlMode;
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM
+- = fiji_fan_table->usFanPWMMax;
++ = le16_to_cpu(fiji_fan_table->usFanPWMMax);
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity
+ = 4836;
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity
+- = fiji_fan_table->usFanOutputSensitivity;
++ = le16_to_cpu(fiji_fan_table->usFanOutputSensitivity);
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM
+- = fiji_fan_table->usFanRPMMax;
++ = le16_to_cpu(fiji_fan_table->usFanRPMMax);
+ hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit
+- = (fiji_fan_table->ulMinFanSCLKAcousticLimit / 100); /* PPTable stores it in 10Khz unit for 2 decimal places. SMC wants MHz. */
++ = (le32_to_cpu(fiji_fan_table->ulMinFanSCLKAcousticLimit) / 100); /* PPTable stores it in 10Khz unit for 2 decimal places. SMC wants MHz. */
+ hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature
+ = fiji_fan_table->ucTargetTemperature;
+ hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit
+ = fiji_fan_table->ucMinimumPWMLimit;
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge
+- = fiji_fan_table->usFanGainEdge;
++ = le16_to_cpu(fiji_fan_table->usFanGainEdge);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot
+- = fiji_fan_table->usFanGainHotspot;
++ = le16_to_cpu(fiji_fan_table->usFanGainHotspot);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid
+- = fiji_fan_table->usFanGainLiquid;
++ = le16_to_cpu(fiji_fan_table->usFanGainLiquid);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc
+- = fiji_fan_table->usFanGainVrVddc;
++ = le16_to_cpu(fiji_fan_table->usFanGainVrVddc);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd
+- = fiji_fan_table->usFanGainVrMvdd;
++ = le16_to_cpu(fiji_fan_table->usFanGainVrMvdd);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx
+- = fiji_fan_table->usFanGainPlx;
++ = le16_to_cpu(fiji_fan_table->usFanGainPlx);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm
+- = fiji_fan_table->usFanGainHbm;
++ = le16_to_cpu(fiji_fan_table->usFanGainHbm);
+ }
+
+ return 0;
+@@ -1256,9 +1256,9 @@ static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i
+ vce_state_record->ucVCEClockIndex);
+ *flag = vce_state_record->ucFlag;
+
+- vce_state->evclk = mm_dep_record->ulEClk;
+- vce_state->ecclk = mm_dep_record->ulEClk;
+- vce_state->sclk = sclk_dep_record->ulSclk;
++ vce_state->evclk = le32_to_cpu(mm_dep_record->ulEClk);
++ vce_state->ecclk = le32_to_cpu(mm_dep_record->ulEClk);
++ vce_state->sclk = le32_to_cpu(sclk_dep_record->ulSclk);
+
+ if (vce_state_record->ucMCLKIndex >= mclk_dep_table->ucNumEntries)
+ mclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+@@ -1271,7 +1271,7 @@ static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i
+ entries, mclk_dep_table,
+ vce_state_record->ucMCLKIndex);
+
+- vce_state->mclk = mclk_dep_record->ulMclk;
++ vce_state->mclk = le32_to_cpu(mclk_dep_record->ulMclk);
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5094-drm-amdgpu-pp-endian-fixes-for-processpptables.c.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5094-drm-amdgpu-pp-endian-fixes-for-processpptables.c.patch
new file mode 100644
index 00000000..1ce14aaa
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5094-drm-amdgpu-pp-endian-fixes-for-processpptables.c.patch
@@ -0,0 +1,126 @@
+From 24469ba23f40c668045da0fb62e3462ee0fa1bd9 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 7 Aug 2018 16:30:50 -0500
+Subject: [PATCH 5094/5725] drm/amdgpu/pp: endian fixes for processpptables.c
+
+Properly swap when reading from the vbios.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../gpu/drm/amd/powerplay/hwmgr/processpptables.c | 30 ++++++++++++----------
+ 1 file changed, 16 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+index 925e171..77c1467 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+@@ -757,8 +757,8 @@ static int init_non_clock_fields(struct pp_hwmgr *hwmgr,
+ ps->validation.supportedPowerLevels = pnon_clock_info->ucRequiredPower;
+
+ if (ATOM_PPLIB_NONCLOCKINFO_VER1 < version) {
+- ps->uvd_clocks.VCLK = pnon_clock_info->ulVCLK;
+- ps->uvd_clocks.DCLK = pnon_clock_info->ulDCLK;
++ ps->uvd_clocks.VCLK = le32_to_cpu(pnon_clock_info->ulVCLK);
++ ps->uvd_clocks.DCLK = le32_to_cpu(pnon_clock_info->ulDCLK);
+ } else {
+ ps->uvd_clocks.VCLK = 0;
+ ps->uvd_clocks.DCLK = 0;
+@@ -937,8 +937,9 @@ int pp_tables_get_entry(struct pp_hwmgr *hwmgr,
+ if (entry_index > powerplay_table->ucNumStates)
+ return -1;
+
+- pstate_entry = (ATOM_PPLIB_STATE *)((unsigned long)powerplay_table + powerplay_table->usStateArrayOffset +
+- entry_index * powerplay_table->ucStateEntrySize);
++ pstate_entry = (ATOM_PPLIB_STATE *)((unsigned long)powerplay_table +
++ le16_to_cpu(powerplay_table->usStateArrayOffset) +
++ entry_index * powerplay_table->ucStateEntrySize);
+
+ pnon_clock_info = (ATOM_PPLIB_NONCLOCK_INFO *)((unsigned long)powerplay_table +
+ le16_to_cpu(powerplay_table->usNonClockInfoArrayOffset) +
+@@ -1063,13 +1064,13 @@ static int init_overdrive_limits(struct pp_hwmgr *hwmgr,
+ &size, &frev, &crev);
+
+ if ((fw_info->ucTableFormatRevision == 1)
+- && (fw_info->usStructureSize >= sizeof(ATOM_FIRMWARE_INFO_V1_4)))
++ && (le16_to_cpu(fw_info->usStructureSize) >= sizeof(ATOM_FIRMWARE_INFO_V1_4)))
+ result = init_overdrive_limits_V1_4(hwmgr,
+ powerplay_table,
+ (const ATOM_FIRMWARE_INFO_V1_4 *)fw_info);
+
+ else if ((fw_info->ucTableFormatRevision == 2)
+- && (fw_info->usStructureSize >= sizeof(ATOM_FIRMWARE_INFO_V2_1)))
++ && (le16_to_cpu(fw_info->usStructureSize) >= sizeof(ATOM_FIRMWARE_INFO_V2_1)))
+ result = init_overdrive_limits_V2_1(hwmgr,
+ powerplay_table,
+ (const ATOM_FIRMWARE_INFO_V2_1 *)fw_info);
+@@ -1303,7 +1304,7 @@ static int init_clock_voltage_dependency(struct pp_hwmgr *hwmgr,
+ if (0 != powerplay_table4->usVddcDependencyOnSCLKOffset) {
+ table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+ (((unsigned long) powerplay_table4) +
+- powerplay_table4->usVddcDependencyOnSCLKOffset);
++ le16_to_cpu(powerplay_table4->usVddcDependencyOnSCLKOffset));
+ result = get_clock_voltage_dependency_table(hwmgr,
+ &hwmgr->dyn_state.vddc_dependency_on_sclk, table);
+ }
+@@ -1311,7 +1312,7 @@ static int init_clock_voltage_dependency(struct pp_hwmgr *hwmgr,
+ if (result == 0 && (0 != powerplay_table4->usVddciDependencyOnMCLKOffset)) {
+ table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+ (((unsigned long) powerplay_table4) +
+- powerplay_table4->usVddciDependencyOnMCLKOffset);
++ le16_to_cpu(powerplay_table4->usVddciDependencyOnMCLKOffset));
+ result = get_clock_voltage_dependency_table(hwmgr,
+ &hwmgr->dyn_state.vddci_dependency_on_mclk, table);
+ }
+@@ -1319,7 +1320,7 @@ static int init_clock_voltage_dependency(struct pp_hwmgr *hwmgr,
+ if (result == 0 && (0 != powerplay_table4->usVddcDependencyOnMCLKOffset)) {
+ table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+ (((unsigned long) powerplay_table4) +
+- powerplay_table4->usVddcDependencyOnMCLKOffset);
++ le16_to_cpu(powerplay_table4->usVddcDependencyOnMCLKOffset));
+ result = get_clock_voltage_dependency_table(hwmgr,
+ &hwmgr->dyn_state.vddc_dependency_on_mclk, table);
+ }
+@@ -1327,7 +1328,7 @@ static int init_clock_voltage_dependency(struct pp_hwmgr *hwmgr,
+ if (result == 0 && (0 != powerplay_table4->usMaxClockVoltageOnDCOffset)) {
+ limit_table = (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
+ (((unsigned long) powerplay_table4) +
+- powerplay_table4->usMaxClockVoltageOnDCOffset);
++ le16_to_cpu(powerplay_table4->usMaxClockVoltageOnDCOffset));
+ result = get_clock_voltage_limit(hwmgr,
+ &hwmgr->dyn_state.max_clock_voltage_on_dc, limit_table);
+ }
+@@ -1346,7 +1347,7 @@ static int init_clock_voltage_dependency(struct pp_hwmgr *hwmgr,
+ if (result == 0 && (0 != powerplay_table4->usMvddDependencyOnMCLKOffset)) {
+ table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+ (((unsigned long) powerplay_table4) +
+- powerplay_table4->usMvddDependencyOnMCLKOffset);
++ le16_to_cpu(powerplay_table4->usMvddDependencyOnMCLKOffset));
+ result = get_clock_voltage_dependency_table(hwmgr,
+ &hwmgr->dyn_state.mvdd_dependency_on_mclk, table);
+ }
+@@ -1569,7 +1570,8 @@ static int get_vce_state_table_entry(struct pp_hwmgr *hwmgr,
+
+ const VCEClockInfoArray *vce_clock_info_array = (const VCEClockInfoArray *)(((unsigned long) powerplay_table) + vce_clock_info_array_offset);
+
+- const ClockInfoArray *clock_arrays = (ClockInfoArray *)(((unsigned long)powerplay_table) + powerplay_table->usClockInfoArrayOffset);
++ const ClockInfoArray *clock_arrays = (ClockInfoArray *)(((unsigned long)powerplay_table) +
++ le16_to_cpu(powerplay_table->usClockInfoArrayOffset));
+
+ const ATOM_PPLIB_VCE_State_Record *record = &vce_state_table->entries[i];
+
+@@ -1579,8 +1581,8 @@ static int get_vce_state_table_entry(struct pp_hwmgr *hwmgr,
+
+ *flag = (record->ucClockInfoIndex >> NUM_BITS_CLOCK_INFO_ARRAY_INDEX);
+
+- vce_state->evclk = ((uint32_t)vce_clock_info->ucEVClkHigh << 16) | vce_clock_info->usEVClkLow;
+- vce_state->ecclk = ((uint32_t)vce_clock_info->ucECClkHigh << 16) | vce_clock_info->usECClkLow;
++ vce_state->evclk = ((uint32_t)vce_clock_info->ucEVClkHigh << 16) | le16_to_cpu(vce_clock_info->usEVClkLow);
++ vce_state->ecclk = ((uint32_t)vce_clock_info->ucECClkHigh << 16) | le16_to_cpu(vce_clock_info->usECClkLow);
+
+ *clock_info = (void *)((unsigned long)(clock_arrays->clockInfo) + (clockInfoIndex * clock_arrays->ucEntrySize));
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5095-drm-amdgpu-add-emit-reg-write-reg-wait-for-vcn-jpeg.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5095-drm-amdgpu-add-emit-reg-write-reg-wait-for-vcn-jpeg.patch
new file mode 100644
index 00000000..e6246f78
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5095-drm-amdgpu-add-emit-reg-write-reg-wait-for-vcn-jpeg.patch
@@ -0,0 +1,31 @@
+From fd8759636d8963b50c77452946d53cd27172a73b Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Wed, 11 Jul 2018 14:40:18 -0400
+Subject: [PATCH 5095/5725] drm/amdgpu: add emit reg write reg wait for vcn
+ jpeg
+
+The emit_reg_write_reg_wait function was not assigned for vcn jpeg.
+This patch adds it back.
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Reviewed-by: Leo Liu <leo.liu at amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 90103b0..284b99d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -1770,6 +1770,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = {
+ .end_use = amdgpu_vcn_ring_end_use,
+ .emit_wreg = vcn_v1_0_jpeg_ring_emit_wreg,
+ .emit_reg_wait = vcn_v1_0_jpeg_ring_emit_reg_wait,
++ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ };
+
+ static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5096-drm-amdgpu-add-system-interrupt-register-offset-head.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5096-drm-amdgpu-add-system-interrupt-register-offset-head.patch
new file mode 100644
index 00000000..f97cc8d2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5096-drm-amdgpu-add-system-interrupt-register-offset-head.patch
@@ -0,0 +1,31 @@
+From 846c6438f6db289a3f7a1580fe49d27cb6acef70 Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Wed, 18 Jul 2018 16:13:29 -0400
+Subject: [PATCH 5096/5725] drm/amdgpu: add system interrupt register offset
+ header
+
+Add new register offset for enabling system interrupt.
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Acked-by: Leo Liu <leo.liu at amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
+index fe0cbaa..216a401 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
+@@ -307,6 +307,8 @@
+ #define mmUVD_LMI_CTRL2_BASE_IDX 1
+ #define mmUVD_MASTINT_EN 0x0540
+ #define mmUVD_MASTINT_EN_BASE_IDX 1
++#define mmUVD_SYS_INT_EN 0x0541
++#define mmUVD_SYS_INT_EN_BASE_IDX 1
+ #define mmJPEG_CGC_CTRL 0x0565
+ #define mmJPEG_CGC_CTRL_BASE_IDX 1
+ #define mmUVD_LMI_CTRL 0x0566
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5097-drm-amdgpu-add-system-interrupt-mask-for-jrbc.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5097-drm-amdgpu-add-system-interrupt-mask-for-jrbc.patch
new file mode 100644
index 00000000..25347098
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5097-drm-amdgpu-add-system-interrupt-mask-for-jrbc.patch
@@ -0,0 +1,30 @@
+From b3bd77cd53157ec789042d3ea65baaf6e1092cae Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Wed, 18 Jul 2018 16:24:18 -0400
+Subject: [PATCH 5097/5725] drm/amdgpu: add system interrupt mask for jrbc
+
+Add new mask for enabling system interrupt for jrbc.
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Acked-by: Leo Liu <leo.liu at amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h
+index d6ba269..124383d 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h
+@@ -982,6 +982,8 @@
+ #define UVD_MASTINT_EN__VCPU_EN_MASK 0x00000002L
+ #define UVD_MASTINT_EN__SYS_EN_MASK 0x00000004L
+ #define UVD_MASTINT_EN__INT_OVERRUN_MASK 0x007FFFF0L
++//UVD_SYS_INT_EN
++#define UVD_SYS_INT_EN__UVD_JRBC_EN_MASK 0x00000010L
+ //JPEG_CGC_CTRL
+ #define JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT 0x0
+ #define JPEG_CGC_CTRL__JPEG2_MODE__SHIFT 0x1
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5098-drm-amdgpu-enable-system-interrupt-for-jrbc.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5098-drm-amdgpu-enable-system-interrupt-for-jrbc.patch
new file mode 100644
index 00000000..45fe2594
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5098-drm-amdgpu-enable-system-interrupt-for-jrbc.patch
@@ -0,0 +1,42 @@
+From 90b12e4e2fae259d1954c03628768a7ffcd2c211 Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Wed, 18 Jul 2018 16:25:42 -0400
+Subject: [PATCH 5098/5725] drm/amdgpu: enable system interrupt for jrbc
+
+Enable system interrupt for jrbc during engine starting time.
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Acked-by: Leo Liu <leo.liu at amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 284b99d..a45dcd8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -724,6 +724,11 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
+ (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
+ ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
+
++ /* enable system interrupt for JRBC, TODO: move to set interrupt*/
++ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SYS_INT_EN),
++ UVD_SYS_INT_EN__UVD_JRBC_EN_MASK,
++ ~UVD_SYS_INT_EN__UVD_JRBC_EN_MASK);
++
+ /* clear the bit 4 of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+@@ -1802,7 +1807,7 @@ static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
+
+ static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
+ {
+- adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 1;
++ adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 2;
+ adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5099-drm-amdgpu-add-emit-trap-for-vcn-jpeg.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5099-drm-amdgpu-add-emit-trap-for-vcn-jpeg.patch
new file mode 100644
index 00000000..79db2c5c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5099-drm-amdgpu-add-emit-trap-for-vcn-jpeg.patch
@@ -0,0 +1,32 @@
+From 515913d67a154575286b2454ecdf9727b49c96fe Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Wed, 18 Jul 2018 16:26:28 -0400
+Subject: [PATCH 5099/5725] drm/amdgpu: add emit trap for vcn jpeg
+
+Add emit trap command in jpeg emit fence call.
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Acked-by: Leo Liu <leo.liu at amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index a45dcd8..4221312 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -1355,6 +1355,10 @@ static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u6
+ amdgpu_ring_write(ring,
+ PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x1);
++
++ /* emit trap */
++ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
++ amdgpu_ring_write(ring, 0);
+ }
+
+ /**
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5100-drm-amdgpu-fix-emit-frame-size-and-comments-for-jpeg.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5100-drm-amdgpu-fix-emit-frame-size-and-comments-for-jpeg.patch
new file mode 100644
index 00000000..7188ad12
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5100-drm-amdgpu-fix-emit-frame-size-and-comments-for-jpeg.patch
@@ -0,0 +1,36 @@
+From 0c46ca9688a925f46b5dfdceda88e676cb329564 Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Wed, 18 Jul 2018 16:29:29 -0400
+Subject: [PATCH 5100/5725] drm/amdgpu: fix emit frame size and comments for
+ jpeg
+
+Fix vcn jpeg ring emit fence size in dword, and fix the naming in comments.
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Acked-by: Leo Liu <leo.liu at amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 4221312..9108230 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -1762,10 +1762,10 @@ static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = {
+ 6 + 6 + /* hdp invalidate / flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+- 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
+- 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
++ 8 + /* vcn_v1_0_jpeg_ring_emit_vm_flush */
++ 26 + 26 + /* vcn_v1_0_jpeg_ring_emit_fence x2 vm fence */
+ 6,
+- .emit_ib_size = 22, /* vcn_v1_0_dec_ring_emit_ib */
++ .emit_ib_size = 22, /* vcn_v1_0_jpeg_ring_emit_ib */
+ .emit_ib = vcn_v1_0_jpeg_ring_emit_ib,
+ .emit_fence = vcn_v1_0_jpeg_ring_emit_fence,
+ .emit_vm_flush = vcn_v1_0_jpeg_ring_emit_vm_flush,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5101-drm-amdgpu-powerplay-check-vrefresh-when-when-changi.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5101-drm-amdgpu-powerplay-check-vrefresh-when-when-changi.patch
new file mode 100644
index 00000000..e456e823
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5101-drm-amdgpu-powerplay-check-vrefresh-when-when-changi.patch
@@ -0,0 +1,129 @@
+From 3e6308fb44e64513771f433769df77098134bf27 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 9 Aug 2018 14:24:08 -0500
+Subject: [PATCH 5101/5725] drm/amdgpu/powerplay: check vrefresh when when
+ changing displays
+
+Compare the current vrefresh in addition to the number of displays
+when determining whether or not the smu needs updates when changing
+modes. The SMU needs to be updated if the vbi timeout changes due
+to a different refresh rate. Fixes flickering around mode changes
+in some cases on polaris parts.
+
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 3 +++
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h | 1 +
+ drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c | 1 +
+ drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c | 3 ++-
+ drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c | 1 +
+ drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c | 1 +
+ drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c | 1 +
+ drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c | 1 +
+ 8 files changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index d785b76..15e110f 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -4132,6 +4132,9 @@ smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
+ if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
+ is_update_required = true;
+
++ if (data->display_timing.vrefresh != hwmgr->display_config->vrefresh)
++ is_update_required = true;
++
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
+ if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr &&
+ (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
+index 3784ce6..69d361f 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
+@@ -156,6 +156,7 @@ struct smu7_vbios_boot_state {
+ struct smu7_display_timing {
+ uint32_t min_clock_in_sr;
+ uint32_t num_existing_displays;
++ uint32_t vrefresh;
+ };
+
+ struct smu7_dpmlevel_enable_mask {
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+index fbe3ef4..18643e0 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+@@ -1231,6 +1231,7 @@ static int ci_populate_single_memory_level(
+ memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
++ data->display_timing.vrefresh = hwmgr->display_config->vrefresh;
+
+ /* stutter mode not support on ci */
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+index 18048f8..ec14798 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+@@ -1210,7 +1210,8 @@ static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
+ * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI,
+ * &(data->DisplayTiming.numExistingDisplays));
+ */
+- data->display_timing.num_existing_displays = 1;
++ data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
++ data->display_timing.vrefresh = hwmgr->display_config->vrefresh;
+
+ if (mclk_stutter_mode_threshold &&
+ (clock <= mclk_stutter_mode_threshold) &&
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+index 9299b93..73aa368 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+@@ -1280,6 +1280,7 @@ static int iceland_populate_single_memory_level(
+ memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
++ data->display_timing.vrefresh = hwmgr->display_config->vrefresh;
+
+ /* stutter mode not support on iceland */
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+index 1276f16..872d382 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+@@ -1103,6 +1103,7 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
+ mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
++ data->display_timing.vrefresh = hwmgr->display_config->vrefresh;
+
+ if (mclk_stutter_mode_threshold &&
+ (clock <= mclk_stutter_mode_threshold) &&
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+index 7dabc6c..ae8378e 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+@@ -1004,6 +1004,7 @@ static int tonga_populate_single_memory_level(
+ memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
++ data->display_timing.vrefresh = hwmgr->display_config->vrefresh;
+
+ if ((mclk_stutter_mode_threshold != 0) &&
+ (memory_clock <= mclk_stutter_mode_threshold) &&
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+index 57420d7..3d415fa 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+@@ -1009,6 +1009,7 @@ static int vegam_populate_single_memory_level(struct pp_hwmgr *hwmgr,
+ mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
++ data->display_timing.vrefresh = hwmgr->display_config->vrefresh;
+
+ if (mclk_stutter_mode_threshold &&
+ (clock <= mclk_stutter_mode_threshold) &&
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5102-drm-amdgpu-Cancel-gfx-off-delay-work-when-driver-fin.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5102-drm-amdgpu-Cancel-gfx-off-delay-work-when-driver-fin.patch
new file mode 100644
index 00000000..f00e9a7e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5102-drm-amdgpu-Cancel-gfx-off-delay-work-when-driver-fin.patch
@@ -0,0 +1,39 @@
+From efad2802b48caabd163369cf1807e2e0098b82c9 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Thu, 9 Aug 2018 15:26:06 +0800
+Subject: [PATCH 5102/5725] drm/amdgpu: Cancel gfx off delay work when driver
+ fini/suspend
+
+there may be gfx off delay work pending when suspend/driver
+unload, need to cancel them first.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 5e47f2a..65cf144 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1879,6 +1879,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
+ return r;
+ }
+ amdgpu_gfx_off_ctrl(adev, false);
++ cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
+ r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
+ /* XXX handle errors */
+ if (r) {
+@@ -2061,6 +2062,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
+
+ /* call smu to disable gfx off feature first when suspend */
+ amdgpu_gfx_off_ctrl(adev, false);
++ cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
+
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!adev->ip_blocks[i].status.valid)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5103-drm-amd-display-dc-3.1.61.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5103-drm-amd-display-dc-3.1.61.patch
new file mode 100644
index 00000000..ca8fc613
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5103-drm-amd-display-dc-3.1.61.patch
@@ -0,0 +1,29 @@
+From e46d029ee62ae7acb6f59cecddad6796d3827e02 Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Wed, 18 Jul 2018 20:28:54 -0400
+Subject: [PATCH 5103/5725] drm/amd/display: dc 3.1.61
+
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Steven Chiu <Steven.Chiu@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 263d9f3..67203c1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -38,7 +38,7 @@
+ #include "inc/compressor.h"
+ #include "dml/display_mode_lib.h"
+
+-#define DC_VER "3.1.60"
++#define DC_VER "3.1.61"
+
+ #define MAX_SURFACES 3
+ #define MAX_STREAMS 6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5104-drm-amd-display-fix-PIP-bugs-on-Dal3.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5104-drm-amd-display-fix-PIP-bugs-on-Dal3.patch
new file mode 100644
index 00000000..5b1f603f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5104-drm-amd-display-fix-PIP-bugs-on-Dal3.patch
@@ -0,0 +1,266 @@
+From 1223567dced802100958531da705c3e95a8436a7 Mon Sep 17 00:00:00 2001
+From: Gloria Li <geling.li@amd.com>
+Date: Thu, 26 Jul 2018 11:32:14 -0400
+Subject: [PATCH 5104/5725] drm/amd/display: fix PIP bugs on Dal3
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[Why]
+There are outstanding bugs for PIP in Dal3:
+-Crash when toggling PIP visibility
+-Global Alpha is not working, Adjusting global alpha
+ doesn’t have an effect
+-Cursor is not working with pip plane and pipe splits
+-One flash occurs when cursor enters PIP plane from
+ top/bottom
+-Crash when moving PIP plane off the screen
+
+[How]
+Resolve divide by 0 error
+Implement global alpha
+Program cursor on all pipes
+Add dst rects' x and y offests into cursor position
+Disable cursor when it is beyond bottom/top edge
+
+Signed-off-by: Gloria Li <geling.li@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 3 +++
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 9 ++++++---
+ drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 2 --
+ drivers/gpu/drm/amd/display/dc/dc.h | 5 +++++
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | 10 +++++++++-
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h | 3 ++-
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 7 +++++++
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 18 ++++++++++++------
+ drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h | 3 ++-
+ 9 files changed, 46 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index e29417c..3727085 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1119,6 +1119,9 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
+ if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha)
+ update_flags->bits.per_pixel_alpha_change = 1;
+
++ if (u->plane_info->global_alpha_value != u->surface->global_alpha_value)
++ update_flags->bits.global_alpha_change = 1;
++
+ if (u->plane_info->dcc.enable != u->surface->dcc.enable
+ || u->plane_info->dcc.grph.independent_64b_blks != u->surface->dcc.grph.independent_64b_blks
+ || u->plane_info->dcc.grph.meta_pitch != u->surface->dcc.grph.meta_pitch)
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index bd525e8..d02dac1 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -570,8 +570,10 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
+ data->viewport.width = (data->viewport.width + 1) / 2;
+ data->viewport_c.width = (data->viewport_c.width + 1) / 2;
+ } else if (pri_split) {
+- data->viewport.width /= 2;
+- data->viewport_c.width /= 2;
++ if (data->viewport.width > 1)
++ data->viewport.width /= 2;
++ if (data->viewport_c.width > 1)
++ data->viewport_c.width /= 2;
+ }
+
+ if (plane_state->rotation == ROTATION_ANGLE_90 ||
+@@ -651,7 +653,8 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full
+ pipe_ctx->plane_res.scl_data.recout.width =
+ (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
+ } else {
+- pipe_ctx->plane_res.scl_data.recout.width /= 2;
++ if (pipe_ctx->plane_res.scl_data.recout.width > 1)
++ pipe_ctx->plane_res.scl_data.recout.width /= 2;
+ }
+ }
+ /* Unclipped recout offset = stream dst offset + ((surf dst offset - stream surf_src offset)
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+index fdcc8ab..2ac848a1 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+@@ -205,8 +205,6 @@ bool dc_stream_set_cursor_attributes(
+
+ if (pipe_ctx->stream != stream)
+ continue;
+- if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
+- continue;
+
+ if (!pipe_to_program) {
+ pipe_to_program = pipe_ctx;
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 67203c1..61d3755 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -442,6 +442,7 @@ union surface_update_flags {
+ uint32_t color_space_change:1;
+ uint32_t horizontal_mirror_change:1;
+ uint32_t per_pixel_alpha_change:1;
++ uint32_t global_alpha_change:1;
+ uint32_t rotation_change:1;
+ uint32_t swizzle_change:1;
+ uint32_t scaling_change:1;
+@@ -496,6 +497,8 @@ struct dc_plane_state {
+
+ bool is_tiling_rotated;
+ bool per_pixel_alpha;
++ bool global_alpha;
++ int global_alpha_value;
+ bool visible;
+ bool flip_immediate;
+ bool horizontal_mirror;
+@@ -522,6 +525,8 @@ struct dc_plane_info {
+ bool horizontal_mirror;
+ bool visible;
+ bool per_pixel_alpha;
++ bool global_alpha;
++ int global_alpha_value;
+ bool input_csc_enabled;
+ };
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+index 1d64255..5f2054a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+@@ -444,10 +444,12 @@ void dpp1_set_cursor_position(
+ struct dpp *dpp_base,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param,
+- uint32_t width)
++ uint32_t width,
++ uint32_t height)
+ {
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
++ int src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
+ uint32_t cur_en = pos->enable ? 1 : 0;
+
+ if (src_x_offset >= (int)param->viewport.width)
+@@ -456,6 +458,12 @@ void dpp1_set_cursor_position(
+ if (src_x_offset + (int)width <= 0)
+ cur_en = 0; /* not visible beyond left edge*/
+
++ if (src_y_offset >= (int)param->viewport.height)
++ cur_en = 0; /* not visible beyond bottom edge*/
++
++ if (src_y_offset < 0)
++ cur_en = 0; /* not visible beyond top edge*/
++
+ REG_UPDATE(CURSOR0_CONTROL,
+ CUR0_ENABLE, cur_en);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+index e2889e6..282e22f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+@@ -1374,7 +1374,8 @@ void dpp1_set_cursor_position(
+ struct dpp *dpp_base,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param,
+- uint32_t width);
++ uint32_t width,
++ uint32_t height);
+
+ void dpp1_cnv_set_optional_cursor_attributes(
+ struct dpp *dpp_base,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+index fa1bacd..ec4a5f6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+@@ -1070,6 +1070,7 @@ void hubp1_cursor_set_position(
+ {
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
++ int src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
+ int x_hotspot = pos->x_hotspot;
+ int y_hotspot = pos->y_hotspot;
+ uint32_t dst_x_offset;
+@@ -1113,6 +1114,12 @@ void hubp1_cursor_set_position(
+ if (src_x_offset + (int)hubp->curs_attr.width <= 0)
+ cur_en = 0; /* not visible beyond left edge*/
+
++ if (src_y_offset >= (int)param->viewport.height)
++ cur_en = 0; /* not visible beyond bottom edge*/
++
++ if (src_y_offset < 0) //+ (int)hubp->curs_attr.height
++ cur_en = 0; /* not visible beyond top edge*/
++
+ if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
+ hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 82fb1f9..4b8bedb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -1947,9 +1947,13 @@ static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+
+ blnd_cfg.overlap_only = false;
+- blnd_cfg.global_alpha = 0xff;
+ blnd_cfg.global_gain = 0xff;
+
++ if (pipe_ctx->plane_state->global_alpha)
++ blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
++ else
++ blnd_cfg.global_alpha = 0xff;
++
+ /* DCN1.0 has output CM before MPC which seems to screw with
+ * pre-multiplied alpha.
+ */
+@@ -2064,11 +2068,13 @@ static void update_dchubp_dpp(
+ update_dpp(dpp, plane_state);
+
+ if (plane_state->update_flags.bits.full_update ||
+- plane_state->update_flags.bits.per_pixel_alpha_change)
++ plane_state->update_flags.bits.per_pixel_alpha_change ||
++ plane_state->update_flags.bits.global_alpha_change)
+ dc->hwss.update_mpcc(dc, pipe_ctx);
+
+ if (plane_state->update_flags.bits.full_update ||
+ plane_state->update_flags.bits.per_pixel_alpha_change ||
++ plane_state->update_flags.bits.global_alpha_change ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.position_change) {
+ update_scaler(pipe_ctx);
+@@ -2620,15 +2626,15 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
+ .mirror = pipe_ctx->plane_state->horizontal_mirror
+ };
+
++ pos_cpy.x -= pipe_ctx->plane_state->dst_rect.x;
++ pos_cpy.y -= pipe_ctx->plane_state->dst_rect.y;
++
+ if (pipe_ctx->plane_state->address.type
+ == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+ pos_cpy.enable = false;
+
+- if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
+- pos_cpy.enable = false;
+-
+ hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
+- dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width);
++ dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
+ }
+
+ static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+index 80a480b..e894e64 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+@@ -147,7 +147,8 @@ struct dpp_funcs {
+ struct dpp *dpp_base,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param,
+- uint32_t width
++ uint32_t width,
++ uint32_t height
+ );
+ void (*dpp_set_hdr_multiplier)(
+ struct dpp *dpp_base,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5105-drm-amd-display-Add-dprefclk-value-to-dce_dccg.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5105-drm-amd-display-Add-dprefclk-value-to-dce_dccg.patch
new file mode 100644
index 00000000..5ca5a468
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5105-drm-amd-display-Add-dprefclk-value-to-dce_dccg.patch
@@ -0,0 +1,60 @@
+From c8043b0c90e040339473e167a9ae71f6c58583aa Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Mon, 30 Jul 2018 14:41:01 -0400
+Subject: [PATCH 5105/5725] drm/amd/display: Add dprefclk value to dce_dccg
+
+This allows us to avoid any vbios bugs when initializing clocks
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 4 +++-
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h | 1 +
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index 51ceb99..d52dead 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -196,7 +196,7 @@ static int dce12_get_dp_ref_freq_khz(struct dccg *clk)
+ {
+ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
+
+- return dccg_adjust_dp_ref_freq_for_ss(clk_dce, 600000);
++ return dccg_adjust_dp_ref_freq_for_ss(clk_dce, clk_dce->dprefclk_khz);
+ }
+
+ static enum dm_pp_clocks_state dce_get_required_clocks_state(
+@@ -876,6 +876,7 @@ struct dccg *dce120_dccg_create(struct dc_context *ctx)
+ dce_dccg_construct(
+ clk_dce, ctx, NULL, NULL, NULL);
+
++ clk_dce->dprefclk_khz = 600000;
+ clk_dce->base.funcs = &dce120_funcs;
+
+ return &clk_dce->base;
+@@ -903,6 +904,7 @@ struct dccg *dcn1_dccg_create(struct dc_context *ctx)
+ clk_dce->dprefclk_ss_divider = 1000;
+ clk_dce->ss_on_dprefclk = false;
+
++ clk_dce->dprefclk_khz = 600000;
+ if (bp->integrated_info)
+ clk_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
+ if (clk_dce->dentist_vco_freq_khz == 0) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+index 8be68eb..9179173 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+@@ -90,6 +90,7 @@ struct dce_dccg {
+ int dprefclk_ss_percentage;
+ /* DPREFCLK SS percentage Divider (100 or 1000) */
+ int dprefclk_ss_divider;
++ int dprefclk_khz;
+ };
+
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5106-drm-amd-display-fix-dml-handling-of-mono8-16-pixel-f.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5106-drm-amd-display-fix-dml-handling-of-mono8-16-pixel-f.patch
new file mode 100644
index 00000000..2ada526b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5106-drm-amd-display-fix-dml-handling-of-mono8-16-pixel-f.patch
@@ -0,0 +1,39 @@
+From 15acfcb5620e9b1aba76e8a33fc57cfb0205d7df Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Tue, 17 Jul 2018 17:15:48 -0400
+Subject: [PATCH 5106/5725] drm/amd/display: fix dml handling of mono8/16 pixel
+ formats
+
+mono formats are treated exactly the same as equivallent bpp
+444 formats. Dml validation however lacks 444 8 bit format
+while dml perf param calculation lacks mono format support
+
+This change makes them equivallent as far as the enum is concerned
+to avoid having to update dml
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
+index 47c19f8..bea4e61 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
+@@ -40,8 +40,8 @@ enum source_format_class {
+ dm_422_8 = 5,
+ dm_422_10 = 6,
+ dm_444_8 = 7,
+- dm_mono_8,
+- dm_mono_16
++ dm_mono_8 = dm_444_8,
++ dm_mono_16 = dm_444_16
+ };
+ enum output_bpc_class {
+ dm_out_6 = 0, dm_out_8 = 1, dm_out_10 = 2, dm_out_12 = 3, dm_out_16 = 4
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5107-drm-amd-display-add-retimer-log-for-HWQ-tuning-use.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5107-drm-amd-display-add-retimer-log-for-HWQ-tuning-use.patch
new file mode 100644
index 00000000..34a2bbd1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5107-drm-amd-display-add-retimer-log-for-HWQ-tuning-use.patch
@@ -0,0 +1,249 @@
+From b96914084a51f9c31e0d170e499c6de6c7200bae Mon Sep 17 00:00:00 2001
+From: Charlene Liu <charlene.liu@amd.com>
+Date: Tue, 31 Jul 2018 20:14:26 -0400
+Subject: [PATCH 5107/5725] drm/amd/display: add retimer log for HWQ tuning
+ use.
+
+Signed-off-by: Charlene Liu <charlene.liu@amd.com>
+Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 59 ++++++++++++++++++++++
+ drivers/gpu/drm/amd/display/include/logger_types.h | 3 +-
+ 2 files changed, 61 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 3bec439..7af0f31 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -54,6 +54,9 @@
+ DC_LOG_HW_HOTPLUG( \
+ __VA_ARGS__)
+
++#define RETIMER_REDRIVER_INFO(...) \
++ DC_LOG_RETIMER_REDRIVER( \
++ __VA_ARGS__)
+ /*******************************************************************************
+ * Private structures
+ ******************************************************************************/
+@@ -1547,6 +1550,7 @@ static void write_i2c_retimer_setting(
+ uint8_t value = 0;
+ int i = 0;
+ bool i2c_success = false;
++ DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+
+ memset(&buffer, 0, sizeof(buffer));
+
+@@ -1560,6 +1564,9 @@ static void write_i2c_retimer_setting(
+ buffer[1] = settings->reg_settings[i].i2c_reg_val;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
++ RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
++ offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
++ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+
+ if (!i2c_success)
+ /* Write failure */
+@@ -1590,6 +1597,9 @@ static void write_i2c_retimer_setting(
+ buffer[1] = value | apply_rx_tx_change;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
++ RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
++ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
++ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+@@ -1607,6 +1617,9 @@ static void write_i2c_retimer_setting(
+ buffer[1] = settings->reg_settings_6g[i].i2c_reg_val;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
++ RETIMER_REDRIVER_INFO("above 340Mhz: retimer write to slave_address = 0x%x,\
++ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
++ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+
+ if (!i2c_success)
+ /* Write failure */
+@@ -1637,6 +1650,9 @@ static void write_i2c_retimer_setting(
+ buffer[1] = value | apply_rx_tx_change;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
++ RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
++ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
++ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+@@ -1653,6 +1669,9 @@ static void write_i2c_retimer_setting(
+ buffer[1] = 0x01;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
++ RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
++ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
++ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+@@ -1662,6 +1681,9 @@ static void write_i2c_retimer_setting(
+ buffer[1] = 0x23;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
++ RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
++ offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n",
++ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+@@ -1671,6 +1693,9 @@ static void write_i2c_retimer_setting(
+ buffer[1] = 0x00;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
++ RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
++ offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n",
++ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+@@ -1686,6 +1711,7 @@ static void write_i2c_default_retimer_setting(
+ uint8_t slave_address = (0xBA >> 1);
+ uint8_t buffer[2];
+ bool i2c_success = false;
++ DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+
+ memset(&buffer, 0, sizeof(buffer));
+
+@@ -1695,6 +1721,9 @@ static void write_i2c_default_retimer_setting(
+ buffer[1] = 0x13;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
++ RETIMER_REDRIVER_INFO("retimer writes default setting to slave_address = 0x%x,\
++ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
++ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+@@ -1704,6 +1733,9 @@ static void write_i2c_default_retimer_setting(
+ buffer[1] = 0x17;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
++ RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
++ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
++ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+@@ -1713,6 +1745,9 @@ static void write_i2c_default_retimer_setting(
+ buffer[1] = is_over_340mhz ? 0xDA : 0xD8;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
++ RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
++ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
++ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+@@ -1722,6 +1757,9 @@ static void write_i2c_default_retimer_setting(
+ buffer[1] = 0x17;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
++ RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
++ offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
++ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+@@ -1731,6 +1769,9 @@ static void write_i2c_default_retimer_setting(
+ buffer[1] = is_over_340mhz ? 0x1D : 0x91;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
++ RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
++ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
++ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+@@ -1740,6 +1781,9 @@ static void write_i2c_default_retimer_setting(
+ buffer[1] = 0x17;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
++ RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
++ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
++ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+@@ -1753,6 +1797,9 @@ static void write_i2c_default_retimer_setting(
+ buffer[1] = 0x01;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
++ RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
++ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
++ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+@@ -1762,6 +1809,9 @@ static void write_i2c_default_retimer_setting(
+ buffer[1] = 0x23;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
++ RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
++ offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
++ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+@@ -1771,6 +1821,9 @@ static void write_i2c_default_retimer_setting(
+ buffer[1] = 0x00;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
++ RETIMER_REDRIVER_INFO("retimer write default setting to slave_addr = 0x%x,\
++ offset = 0x%x, reg_val= 0x%x, i2c_success = %d end here\n",
++ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+@@ -1784,6 +1837,7 @@ static void write_i2c_redriver_setting(
+ uint8_t slave_address = (0xF0 >> 1);
+ uint8_t buffer[16];
+ bool i2c_success = false;
++ DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+
+ memset(&buffer, 0, sizeof(buffer));
+
+@@ -1795,6 +1849,11 @@ static void write_i2c_redriver_setting(
+
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
++ RETIMER_REDRIVER_INFO("redriver write 0 to all 16 reg offset expect following:\n\
++ \t slave_addr = 0x%x, offset[3] = 0x%x, offset[4] = 0x%x,\
++ offset[5] = 0x%x,offset[6] is_over_340mhz = 0x%x,\
++ i2c_success = %d\n",
++ slave_address, buffer[3], buffer[4], buffer[5], buffer[6], i2c_success?1:0);
+
+ if (!i2c_success)
+ /* Write failure */
+diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
+index ad3695e..bc57326 100644
+--- a/drivers/gpu/drm/amd/display/include/logger_types.h
++++ b/drivers/gpu/drm/amd/display/include/logger_types.h
+@@ -62,6 +62,7 @@
+ #define DC_LOG_EVENT_UNDERFLOW(...) DRM_DEBUG_KMS(__VA_ARGS__)
+ #define DC_LOG_IF_TRACE(...) pr_debug("[IF_TRACE]:"__VA_ARGS__)
+ #define DC_LOG_PERF_TRACE(...) DRM_DEBUG_KMS(__VA_ARGS__)
++#define DC_LOG_RETIMER_REDRIVER(...) DRM_DEBUG_KMS(__VA_ARGS__)
+
+ struct dal_logger;
+
+@@ -99,7 +100,7 @@ enum dc_log_type {
+ LOG_IF_TRACE,
+ LOG_PERF_TRACE,
+ LOG_DISPLAYSTATS,
+-
++ LOG_HDMI_RETIMER_REDRIVER,
+ LOG_SECTION_TOTAL_COUNT
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5108-drm-amd-display-Remove-redundant-non-zero-and-overfl.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5108-drm-amd-display-Remove-redundant-non-zero-and-overfl.patch
new file mode 100644
index 00000000..5de1c21c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5108-drm-amd-display-Remove-redundant-non-zero-and-overfl.patch
@@ -0,0 +1,46 @@
+From e5fccfc9ac51fa9af33871990438af78e00d8946 Mon Sep 17 00:00:00 2001
+From: "Leo (Sunpeng) Li" <sunpeng.li@amd.com>
+Date: Wed, 1 Aug 2018 10:20:53 -0400
+Subject: [PATCH 5108/5725] drm/amd/display: Remove redundant non-zero and
+ overflow check
+
+[Why]
+Unsigned int is guaranteed to be >= 0, and read_channel_reply checks for
+overflows. read_channel_reply also returns -1 on error, which is what
+dc_link_aux_transfer is expected to return on error.
+
+[How]
+Remove the if-statement. Return result of read_channel_reply directly.
+
+Signed-off-by: Leo (Sunpeng) Li <sunpeng.li@amd.com>
+Reviewed-by: Mikita Lipski <Mikita.Lipski@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c | 10 +++-------
+ 1 file changed, 3 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+index 8def0d9..506a97e 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+@@ -666,13 +666,9 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
+
+ switch (operation_result) {
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+- res = returned_bytes;
+-
+- if (res <= size && res >= 0)
+- res = aux_engine->funcs->read_channel_reply(aux_engine, size,
+- buffer, reply,
+- &status);
+-
++ res = aux_engine->funcs->read_channel_reply(aux_engine, size,
++ buffer, reply,
++ &status);
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ res = 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5109-drm-amd-display-dc-3.1.62.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5109-drm-amd-display-dc-3.1.62.patch
new file mode 100644
index 00000000..e787d8b7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5109-drm-amd-display-dc-3.1.62.patch
@@ -0,0 +1,29 @@
+From 1b5c35ec3a4c3c1684799749633e26798b64d15b Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Wed, 18 Jul 2018 20:29:13 -0400
+Subject: [PATCH 5109/5725] drm/amd/display: dc 3.1.62
+
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Steven Chiu <Steven.Chiu@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 61d3755..f6069a0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -38,7 +38,7 @@
+ #include "inc/compressor.h"
+ #include "dml/display_mode_lib.h"
+
+-#define DC_VER "3.1.61"
++#define DC_VER "3.1.62"
+
+ #define MAX_SURFACES 3
+ #define MAX_STREAMS 6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5110-drm-amdgpu-add-AVFS-control-to-PP_FEATURE_MASK.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5110-drm-amdgpu-add-AVFS-control-to-PP_FEATURE_MASK.patch
new file mode 100644
index 00000000..20b72ea5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5110-drm-amdgpu-add-AVFS-control-to-PP_FEATURE_MASK.patch
@@ -0,0 +1,29 @@
+From 769917aef42335fcffb66188e8fceff00c18d930 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Fri, 10 Aug 2018 13:09:43 -0500
+Subject: [PATCH 5110/5725] drm/amdgpu: add AVFS control to PP_FEATURE_MASK
+
+Add a ppfeaturemask flag to disable AVFS control.
+
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/include/amd_shared.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
+index dc3d856..f7b9453 100644
+--- a/drivers/gpu/drm/amd/include/amd_shared.h
++++ b/drivers/gpu/drm/amd/include/amd_shared.h
+@@ -129,6 +129,7 @@ enum PP_FEATURE_MASK {
+ PP_GFXOFF_MASK = 0x8000,
+ PP_ACG_MASK = 0x10000,
+ PP_STUTTER_MODE = 0x20000,
++ PP_AVFS_MASK = 0x40000,
+ };
+
+ /**
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5111-drm-amdgpu-powerplay-smu7-enable-AVFS-control-via-pp.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5111-drm-amdgpu-powerplay-smu7-enable-AVFS-control-via-pp.patch
new file mode 100644
index 00000000..a952edb9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5111-drm-amdgpu-powerplay-smu7-enable-AVFS-control-via-pp.patch
@@ -0,0 +1,32 @@
+From 213b777d1a6300aeba781cd2687c411468dae377 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Fri, 10 Aug 2018 13:19:26 -0500
+Subject: [PATCH 5111/5725] drm/amdgpu/powerplay/smu7: enable AVFS control via
+ ppfeaturemask
+
+Allow the user to disable AFVS via ppfeaturemask for debugging.
+
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+index cc56a24..1234400 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+@@ -622,7 +622,8 @@ int smu7_init(struct pp_hwmgr *hwmgr)
+ return -EINVAL;
+ }
+
+- if (smum_is_hw_avfs_present(hwmgr))
++ if (smum_is_hw_avfs_present(hwmgr) &&
++ (hwmgr->feature_mask & PP_AVFS_MASK))
+ hwmgr->avfs_supported = true;
+
+ return 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5112-drm-amdgpu-powerplay-vega10-enable-AVFS-control-via-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5112-drm-amdgpu-powerplay-vega10-enable-AVFS-control-via-.patch
new file mode 100644
index 00000000..23231e6a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5112-drm-amdgpu-powerplay-vega10-enable-AVFS-control-via-.patch
@@ -0,0 +1,32 @@
+From 8187a7e1d15292a0691bea40498356fea4a301ba Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Fri, 10 Aug 2018 13:21:09 -0500
+Subject: [PATCH 5112/5725] drm/amdgpu/powerplay/vega10: enable AVFS control
+ via ppfeaturemask
+
+Allow the user to disable AFVS via ppfeaturemask for debugging.
+
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+index 61c6be2..25397a3 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+@@ -129,7 +129,8 @@ static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
+ data->registry_data.thermal_support = 1;
+ data->registry_data.fw_ctf_enabled = 1;
+
+- data->registry_data.avfs_support = 1;
++ data->registry_data.avfs_support =
++ hwmgr->feature_mask & PP_AVFS_MASK ? true : false;
+ data->registry_data.led_dpm_enabled = 1;
+
+ data->registry_data.vr0hot_enabled = 1;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5113-drm-amd-display-enable-ABGR-and-XBGR-formats-v4.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5113-drm-amd-display-enable-ABGR-and-XBGR-formats-v4.patch
new file mode 100644
index 00000000..dce6f031
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5113-drm-amd-display-enable-ABGR-and-XBGR-formats-v4.patch
@@ -0,0 +1,44 @@
+From 2ba3073f9533d303ff52d8cfa60599ead70163b9 Mon Sep 17 00:00:00 2001
+From: Mauro Rossi <issor.oruam@gmail.com>
+Date: Sun, 12 Aug 2018 21:43:01 +0200
+Subject: [PATCH 5113/5725] drm/amd/display: enable ABGR and XBGR formats (v4)
+
+SURFACE_PIXEL_FORMAT_GRPH_ABGR8888 is supported in amd/display/dc/dc_hw_types.h
+and the necessary crossbars register controls to swap red and blue channels
+are already implemented in drm/amd/display/dc/dce/dce_mem_input.c
+
+(v4) Logic to handle new formats is added only in amdgpu_dm module.
+
+Signed-off-by: Mauro Rossi <issor.oruam@gmail.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index d6d6568..3305d9d 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2073,6 +2073,10 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
+ case DRM_FORMAT_ABGR2101010:
+ plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
+ break;
++ case DRM_FORMAT_XBGR8888:
++ case DRM_FORMAT_ABGR8888:
++ plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
++ break;
+ case DRM_FORMAT_NV21:
+ plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
+ break;
+@@ -3505,6 +3509,8 @@ static const uint32_t rgb_formats[] = {
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
++ DRM_FORMAT_XBGR8888,
++ DRM_FORMAT_ABGR8888,
+ };
+
+ static const uint32_t yuv_formats[] = {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5114-drm-amdgpu-enable-ABGR-and-XBGR-formats-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5114-drm-amdgpu-enable-ABGR-and-XBGR-formats-v2.patch
new file mode 100644
index 00000000..8d8d9326
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5114-drm-amdgpu-enable-ABGR-and-XBGR-formats-v2.patch
@@ -0,0 +1,174 @@
+From f68e8cda32e5e585c1613f025e000caa21b484f7 Mon Sep 17 00:00:00 2001
+From: Mauro Rossi <issor.oruam@gmail.com>
+Date: Sun, 12 Aug 2018 21:43:02 +0200
+Subject: [PATCH 5114/5725] drm/amdgpu: enable ABGR and XBGR formats (v2)
+
+Add support for DRM_FORMAT_{A,X}BGR8888 in amdgpu with amd dc disabled
+
+(v2) Crossbar registers are defined and used to swap red and blue channels,
+ keeping the existing coding style in each of the dce modules.
+ After setting crossbar bits in fb_swap, use bitwise OR for big endian
+ where required in DCE6 and DCE8 which do not rely on REG_SET_FIELD()
+
+Signed-off-by: Mauro Rossi <issor.oruam@gmail.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 11 +++++++++++
+ drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 11 +++++++++++
+ drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | 10 ++++++++++
+ drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 10 ++++++++++
+ drivers/gpu/drm/amd/amdgpu/si_enums.h | 20 ++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/sid.h | 20 ++++++++++++++++++++
+ 6 files changed, 82 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+index 34cdcc8..de88444 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+@@ -1947,6 +1947,17 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
+ /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+ bypass_lut = true;
+ break;
++ case DRM_FORMAT_XBGR8888:
++ case DRM_FORMAT_ABGR8888:
++ fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
++ fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
++ fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, 2);
++ fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, 2);
++#ifdef __BIG_ENDIAN
++ fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
++ ENDIAN_8IN32);
++#endif
++ break;
+ default:
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->format->format, &format_name));
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+index fd3441e..d45c2d8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+@@ -1989,6 +1989,17 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
+ /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+ bypass_lut = true;
+ break;
++ case DRM_FORMAT_XBGR8888:
++ case DRM_FORMAT_ABGR8888:
++ fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
++ fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
++ fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, 2);
++ fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, 2);
++#ifdef __BIG_ENDIAN
++ fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
++ ENDIAN_8IN32);
++#endif
++ break;
+ default:
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->format->format, &format_name));
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+index b185bd7..3d214bd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+@@ -1892,6 +1892,16 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
+ /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+ bypass_lut = true;
+ break;
++ case DRM_FORMAT_XBGR8888:
++ case DRM_FORMAT_ABGR8888:
++ fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
++ GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
++ fb_swap = (GRPH_RED_CROSSBAR(GRPH_RED_SEL_B) |
++ GRPH_BLUE_CROSSBAR(GRPH_BLUE_SEL_R));
++#ifdef __BIG_ENDIAN
++ fb_swap |= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
++#endif
++ break;
+ default:
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->format->format, &format_name));
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+index bff4b94..4798f45 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+@@ -1869,6 +1869,16 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
+ /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+ bypass_lut = true;
+ break;
++ case DRM_FORMAT_XBGR8888:
++ case DRM_FORMAT_ABGR8888:
++ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
++ (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
++ fb_swap = ((GRPH_RED_SEL_B << GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT) |
++ (GRPH_BLUE_SEL_R << GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT));
++#ifdef __BIG_ENDIAN
++ fb_swap |= (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
++#endif
++ break;
+ default:
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->format->format, &format_name));
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_enums.h b/drivers/gpu/drm/amd/amdgpu/si_enums.h
+index dc9e0e6..790ba46 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si_enums.h
++++ b/drivers/gpu/drm/amd/amdgpu/si_enums.h
+@@ -46,6 +46,26 @@
+ #define GRPH_ENDIAN_8IN16 1
+ #define GRPH_ENDIAN_8IN32 2
+ #define GRPH_ENDIAN_8IN64 3
++#define GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4)
++#define GRPH_RED_SEL_R 0
++#define GRPH_RED_SEL_G 1
++#define GRPH_RED_SEL_B 2
++#define GRPH_RED_SEL_A 3
++#define GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6)
++#define GRPH_GREEN_SEL_G 0
++#define GRPH_GREEN_SEL_B 1
++#define GRPH_GREEN_SEL_A 2
++#define GRPH_GREEN_SEL_R 3
++#define GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8)
++#define GRPH_BLUE_SEL_B 0
++#define GRPH_BLUE_SEL_A 1
++#define GRPH_BLUE_SEL_R 2
++#define GRPH_BLUE_SEL_G 3
++#define GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10)
++#define GRPH_ALPHA_SEL_A 0
++#define GRPH_ALPHA_SEL_R 1
++#define GRPH_ALPHA_SEL_G 2
++#define GRPH_ALPHA_SEL_B 3
+
+ #define GRPH_DEPTH(x) (((x) & 0x3) << 0)
+ #define GRPH_DEPTH_8BPP 0
+diff --git a/drivers/gpu/drm/amd/amdgpu/sid.h b/drivers/gpu/drm/amd/amdgpu/sid.h
+index c57eff1..7cf12ad 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sid.h
++++ b/drivers/gpu/drm/amd/amdgpu/sid.h
+@@ -2201,6 +2201,26 @@
+ # define EVERGREEN_GRPH_ENDIAN_8IN16 1
+ # define EVERGREEN_GRPH_ENDIAN_8IN32 2
+ # define EVERGREEN_GRPH_ENDIAN_8IN64 3
++#define EVERGREEN_GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4)
++# define EVERGREEN_GRPH_RED_SEL_R 0
++# define EVERGREEN_GRPH_RED_SEL_G 1
++# define EVERGREEN_GRPH_RED_SEL_B 2
++# define EVERGREEN_GRPH_RED_SEL_A 3
++#define EVERGREEN_GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6)
++# define EVERGREEN_GRPH_GREEN_SEL_G 0
++# define EVERGREEN_GRPH_GREEN_SEL_B 1
++# define EVERGREEN_GRPH_GREEN_SEL_A 2
++# define EVERGREEN_GRPH_GREEN_SEL_R 3
++#define EVERGREEN_GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8)
++# define EVERGREEN_GRPH_BLUE_SEL_B 0
++# define EVERGREEN_GRPH_BLUE_SEL_A 1
++# define EVERGREEN_GRPH_BLUE_SEL_R 2
++# define EVERGREEN_GRPH_BLUE_SEL_G 3
++#define EVERGREEN_GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10)
++# define EVERGREEN_GRPH_ALPHA_SEL_A 0
++# define EVERGREEN_GRPH_ALPHA_SEL_R 1
++# define EVERGREEN_GRPH_ALPHA_SEL_G 2
++# define EVERGREEN_GRPH_ALPHA_SEL_B 3
+
+ #define EVERGREEN_D3VGA_CONTROL 0xf8
+ #define EVERGREEN_D4VGA_CONTROL 0xf9
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5115-drm-amdgpu-include-Add-nbio-7.4-header-files-v4.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5115-drm-amdgpu-include-Add-nbio-7.4-header-files-v4.patch
new file mode 100644
index 00000000..592fd01c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5115-drm-amdgpu-include-Add-nbio-7.4-header-files-v4.patch
@@ -0,0 +1,53097 @@
+From 8d80fcf98d7d4d17c41cc3eeca5e7a81881d2913 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Wed, 17 Jan 2018 20:05:19 +0800
+Subject: [PATCH 5115/5725] drm/amdgpu/include: Add nbio 7.4 header files (v4)
+
+v2: Cleanups (Alex)
+v3: More updates (Alex)
+v4: more cleanups (Alex)
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Acked-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../amd/include/asic_reg/nbio/nbio_7_4_offset.h | 4627 ++
+ .../amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h | 48436 +++++++++++++++++++
+ 2 files changed, 53063 insertions(+)
+ create mode 100644 drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
+ create mode 100644 drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h
+
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
+new file mode 100644
+index 0000000..e932213f
+--- /dev/null
++++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
+@@ -0,0 +1,4627 @@
++/*
++ * Copyright (C) 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
++ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++#ifndef _nbio_7_4_OFFSET_HEADER
++#define _nbio_7_4_OFFSET_HEADER
++
++
++
++// addressBlock: nbio_pcie0_pswuscfg0_cfgdecp
++// base address: 0x0
++#define cfgPSWUSCFG0_VENDOR_ID 0x0000
++#define cfgPSWUSCFG0_DEVICE_ID 0x0002
++#define cfgPSWUSCFG0_COMMAND 0x0004
++#define cfgPSWUSCFG0_STATUS 0x0006
++#define cfgPSWUSCFG0_REVISION_ID 0x0008
++#define cfgPSWUSCFG0_PROG_INTERFACE 0x0009
++#define cfgPSWUSCFG0_SUB_CLASS 0x000a
++#define cfgPSWUSCFG0_BASE_CLASS 0x000b
++#define cfgPSWUSCFG0_CACHE_LINE 0x000c
++#define cfgPSWUSCFG0_LATENCY 0x000d
++#define cfgPSWUSCFG0_HEADER 0x000e
++#define cfgPSWUSCFG0_BIST 0x000f
++#define cfgPSWUSCFG0_SUB_BUS_NUMBER_LATENCY 0x0018
++#define cfgPSWUSCFG0_IO_BASE_LIMIT 0x001c
++#define cfgPSWUSCFG0_SECONDARY_STATUS 0x001e
++#define cfgPSWUSCFG0_MEM_BASE_LIMIT 0x0020
++#define cfgPSWUSCFG0_PREF_BASE_LIMIT 0x0024
++#define cfgPSWUSCFG0_PREF_BASE_UPPER 0x0028
++#define cfgPSWUSCFG0_PREF_LIMIT_UPPER 0x002c
++#define cfgPSWUSCFG0_IO_BASE_LIMIT_HI 0x0030
++#define cfgPSWUSCFG0_CAP_PTR 0x0034
++#define cfgPSWUSCFG0_INTERRUPT_LINE 0x003c
++#define cfgPSWUSCFG0_INTERRUPT_PIN 0x003d
++#define cfgPSWUSCFG0_IRQ_BRIDGE_CNTL 0x003e
++#define cfgEXT_BRIDGE_CNTL 0x0040
++#define cfgPSWUSCFG0_VENDOR_CAP_LIST 0x0048
++#define cfgPSWUSCFG0_ADAPTER_ID_W 0x004c
++#define cfgPSWUSCFG0_PMI_CAP_LIST 0x0050
++#define cfgPSWUSCFG0_PMI_CAP 0x0052
++#define cfgPSWUSCFG0_PMI_STATUS_CNTL 0x0054
++#define cfgPSWUSCFG0_PCIE_CAP_LIST 0x0058
++#define cfgPSWUSCFG0_PCIE_CAP 0x005a
++#define cfgPSWUSCFG0_DEVICE_CAP 0x005c
++#define cfgPSWUSCFG0_DEVICE_CNTL 0x0060
++#define cfgPSWUSCFG0_DEVICE_STATUS 0x0062
++#define cfgPSWUSCFG0_LINK_CAP 0x0064
++#define cfgPSWUSCFG0_LINK_CNTL 0x0068
++#define cfgPSWUSCFG0_LINK_STATUS 0x006a
++#define cfgPSWUSCFG0_DEVICE_CAP2 0x007c
++#define cfgPSWUSCFG0_DEVICE_CNTL2 0x0080
++#define cfgPSWUSCFG0_DEVICE_STATUS2 0x0082
++#define cfgPSWUSCFG0_LINK_CAP2 0x0084
++#define cfgPSWUSCFG0_LINK_CNTL2 0x0088
++#define cfgPSWUSCFG0_LINK_STATUS2 0x008a
++#define cfgPSWUSCFG0_MSI_CAP_LIST 0x00a0
++#define cfgPSWUSCFG0_MSI_MSG_CNTL 0x00a2
++#define cfgPSWUSCFG0_MSI_MSG_ADDR_LO 0x00a4
++#define cfgPSWUSCFG0_MSI_MSG_ADDR_HI 0x00a8
++#define cfgPSWUSCFG0_MSI_MSG_DATA 0x00a8
++#define cfgPSWUSCFG0_MSI_MSG_DATA_64 0x00ac
++#define cfgPSWUSCFG0_SSID_CAP_LIST 0x00c0
++#define cfgPSWUSCFG0_SSID_CAP 0x00c4
++#define cfgMSI_MAP_CAP_LIST 0x00c8
++#define cfgMSI_MAP_CAP 0x00ca
++#define cfgMSI_MAP_ADDR_LO 0x00cc
++#define cfgMSI_MAP_ADDR_HI 0x00d0
++#define cfgPSWUSCFG0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
++#define cfgPSWUSCFG0_PCIE_VENDOR_SPECIFIC_HDR 0x0104
++#define cfgPSWUSCFG0_PCIE_VENDOR_SPECIFIC1 0x0108
++#define cfgPSWUSCFG0_PCIE_VENDOR_SPECIFIC2 0x010c
++#define cfgPSWUSCFG0_PCIE_VC_ENH_CAP_LIST 0x0110
++#define cfgPSWUSCFG0_PCIE_PORT_VC_CAP_REG1 0x0114
++#define cfgPSWUSCFG0_PCIE_PORT_VC_CAP_REG2 0x0118
++#define cfgPSWUSCFG0_PCIE_PORT_VC_CNTL 0x011c
++#define cfgPSWUSCFG0_PCIE_PORT_VC_STATUS 0x011e
++#define cfgPSWUSCFG0_PCIE_VC0_RESOURCE_CAP 0x0120
++#define cfgPSWUSCFG0_PCIE_VC0_RESOURCE_CNTL 0x0124
++#define cfgPSWUSCFG0_PCIE_VC0_RESOURCE_STATUS 0x012a
++#define cfgPSWUSCFG0_PCIE_VC1_RESOURCE_CAP 0x012c
++#define cfgPSWUSCFG0_PCIE_VC1_RESOURCE_CNTL 0x0130
++#define cfgPSWUSCFG0_PCIE_VC1_RESOURCE_STATUS 0x0136
++#define cfgPSWUSCFG0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST 0x0140
++#define cfgPSWUSCFG0_PCIE_DEV_SERIAL_NUM_DW1 0x0144
++#define cfgPSWUSCFG0_PCIE_DEV_SERIAL_NUM_DW2 0x0148
++#define cfgPSWUSCFG0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
++#define cfgPSWUSCFG0_PCIE_UNCORR_ERR_STATUS 0x0154
++#define cfgPSWUSCFG0_PCIE_UNCORR_ERR_MASK 0x0158
++#define cfgPSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY 0x015c
++#define cfgPSWUSCFG0_PCIE_CORR_ERR_STATUS 0x0160
++#define cfgPSWUSCFG0_PCIE_CORR_ERR_MASK 0x0164
++#define cfgPSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL 0x0168
++#define cfgPSWUSCFG0_PCIE_HDR_LOG0 0x016c
++#define cfgPSWUSCFG0_PCIE_HDR_LOG1 0x0170
++#define cfgPSWUSCFG0_PCIE_HDR_LOG2 0x0174
++#define cfgPSWUSCFG0_PCIE_HDR_LOG3 0x0178
++#define cfgPSWUSCFG0_PCIE_TLP_PREFIX_LOG0 0x0188
++#define cfgPSWUSCFG0_PCIE_TLP_PREFIX_LOG1 0x018c
++#define cfgPSWUSCFG0_PCIE_TLP_PREFIX_LOG2 0x0190
++#define cfgPSWUSCFG0_PCIE_TLP_PREFIX_LOG3 0x0194
++#define cfgPSWUSCFG0_PCIE_SECONDARY_ENH_CAP_LIST 0x0270
++#define cfgPSWUSCFG0_PCIE_LINK_CNTL3 0x0274
++#define cfgPSWUSCFG0_PCIE_LANE_ERROR_STATUS 0x0278
++#define cfgPSWUSCFG0_PCIE_LANE_0_EQUALIZATION_CNTL 0x027c
++#define cfgPSWUSCFG0_PCIE_LANE_1_EQUALIZATION_CNTL 0x027e
++#define cfgPSWUSCFG0_PCIE_LANE_2_EQUALIZATION_CNTL 0x0280
++#define cfgPSWUSCFG0_PCIE_LANE_3_EQUALIZATION_CNTL 0x0282
++#define cfgPSWUSCFG0_PCIE_LANE_4_EQUALIZATION_CNTL 0x0284
++#define cfgPSWUSCFG0_PCIE_LANE_5_EQUALIZATION_CNTL 0x0286
++#define cfgPSWUSCFG0_PCIE_LANE_6_EQUALIZATION_CNTL 0x0288
++#define cfgPSWUSCFG0_PCIE_LANE_7_EQUALIZATION_CNTL 0x028a
++#define cfgPSWUSCFG0_PCIE_LANE_8_EQUALIZATION_CNTL 0x028c
++#define cfgPSWUSCFG0_PCIE_LANE_9_EQUALIZATION_CNTL 0x028e
++#define cfgPSWUSCFG0_PCIE_LANE_10_EQUALIZATION_CNTL 0x0290
++#define cfgPSWUSCFG0_PCIE_LANE_11_EQUALIZATION_CNTL 0x0292
++#define cfgPSWUSCFG0_PCIE_LANE_12_EQUALIZATION_CNTL 0x0294
++#define cfgPSWUSCFG0_PCIE_LANE_13_EQUALIZATION_CNTL 0x0296
++#define cfgPSWUSCFG0_PCIE_LANE_14_EQUALIZATION_CNTL 0x0298
++#define cfgPSWUSCFG0_PCIE_LANE_15_EQUALIZATION_CNTL 0x029a
++#define cfgPSWUSCFG0_PCIE_ACS_ENH_CAP_LIST 0x02a0
++#define cfgPSWUSCFG0_PCIE_ACS_CAP 0x02a4
++#define cfgPSWUSCFG0_PCIE_ACS_CNTL 0x02a6
++#define cfgPSWUSCFG0_PCIE_MC_ENH_CAP_LIST 0x02f0
++#define cfgPSWUSCFG0_PCIE_MC_CAP 0x02f4
++#define cfgPSWUSCFG0_PCIE_MC_CNTL 0x02f6
++#define cfgPSWUSCFG0_PCIE_MC_ADDR0 0x02f8
++#define cfgPSWUSCFG0_PCIE_MC_ADDR1 0x02fc
++#define cfgPSWUSCFG0_PCIE_MC_RCV0 0x0300
++#define cfgPSWUSCFG0_PCIE_MC_RCV1 0x0304
++#define cfgPSWUSCFG0_PCIE_MC_BLOCK_ALL0 0x0308
++#define cfgPSWUSCFG0_PCIE_MC_BLOCK_ALL1 0x030c
++#define cfgPSWUSCFG0_PCIE_MC_BLOCK_UNTRANSLATED_0 0x0310
++#define cfgPSWUSCFG0_PCIE_MC_BLOCK_UNTRANSLATED_1 0x0314
++#define cfgPCIE_MC_OVERLAY_BAR0 0x0318
++#define cfgPCIE_MC_OVERLAY_BAR1 0x031c
++#define cfgPSWUSCFG0_PCIE_LTR_ENH_CAP_LIST 0x0320
++#define cfgPSWUSCFG0_PCIE_LTR_CAP 0x0324
++#define cfgPSWUSCFG0_PCIE_ARI_ENH_CAP_LIST 0x0328
++#define cfgPSWUSCFG0_PCIE_ARI_CAP 0x032c
++#define cfgPSWUSCFG0_PCIE_ARI_CNTL 0x032e
++#define cfgPCIE_L1_PM_SUB_CAP_LIST 0x0370
++#define cfgPCIE_L1_PM_SUB_CAP 0x0374
++#define cfgPCIE_L1_PM_SUB_CNTL 0x0378
++#define cfgPCIE_L1_PM_SUB_CNTL2 0x037c
++#define cfgPCIE_ESM_CAP_LIST 0x03c4
++#define cfgPCIE_ESM_HEADER_1 0x03c8
++#define cfgPCIE_ESM_HEADER_2 0x03cc
++#define cfgPCIE_ESM_STATUS 0x03ce
++#define cfgPCIE_ESM_CTRL 0x03d0
++#define cfgPCIE_ESM_CAP_1 0x03d4
++#define cfgPCIE_ESM_CAP_2 0x03d8
++#define cfgPCIE_ESM_CAP_3 0x03dc
++#define cfgPCIE_ESM_CAP_4 0x03e0
++#define cfgPCIE_ESM_CAP_5 0x03e4
++#define cfgPCIE_ESM_CAP_6 0x03e8
++#define cfgPCIE_ESM_CAP_7 0x03ec
++#define cfgPSWUSCFG0_PCIE_DLF_ENH_CAP_LIST 0x0400
++#define cfgPSWUSCFG0_DATA_LINK_FEATURE_CAP 0x0404
++#define cfgPSWUSCFG0_DATA_LINK_FEATURE_STATUS 0x0408
++#define cfgPCIE_PHY_16GT_ENH_CAP_LIST 0x0410
++#define cfgPSWUSCFG0_LINK_CAP_16GT 0x0414
++#define cfgPSWUSCFG0_LINK_CNTL_16GT 0x0418
++#define cfgPSWUSCFG0_LINK_STATUS_16GT 0x041c
++#define cfgPSWUSCFG0_LOCAL_PARITY_MISMATCH_STATUS_16GT 0x0420
++#define cfgPSWUSCFG0_RTM1_PARITY_MISMATCH_STATUS_16GT 0x0424
++#define cfgPSWUSCFG0_RTM2_PARITY_MISMATCH_STATUS_16GT 0x0428
++#define cfgPSWUSCFG0_LANE_0_EQUALIZATION_CNTL_16GT 0x0430
++#define cfgPSWUSCFG0_LANE_1_EQUALIZATION_CNTL_16GT 0x0431
++#define cfgPSWUSCFG0_LANE_2_EQUALIZATION_CNTL_16GT 0x0432
++#define cfgPSWUSCFG0_LANE_3_EQUALIZATION_CNTL_16GT 0x0433
++#define cfgPSWUSCFG0_LANE_4_EQUALIZATION_CNTL_16GT 0x0434
++#define cfgPSWUSCFG0_LANE_5_EQUALIZATION_CNTL_16GT 0x0435
++#define cfgPSWUSCFG0_LANE_6_EQUALIZATION_CNTL_16GT 0x0436
++#define cfgPSWUSCFG0_LANE_7_EQUALIZATION_CNTL_16GT 0x0437
++#define cfgPSWUSCFG0_LANE_8_EQUALIZATION_CNTL_16GT 0x0438
++#define cfgPSWUSCFG0_LANE_9_EQUALIZATION_CNTL_16GT 0x0439
++#define cfgPSWUSCFG0_LANE_10_EQUALIZATION_CNTL_16GT 0x043a
++#define cfgPSWUSCFG0_LANE_11_EQUALIZATION_CNTL_16GT 0x043b
++#define cfgPSWUSCFG0_LANE_12_EQUALIZATION_CNTL_16GT 0x043c
++#define cfgPSWUSCFG0_LANE_13_EQUALIZATION_CNTL_16GT 0x043d
++#define cfgPSWUSCFG0_LANE_14_EQUALIZATION_CNTL_16GT 0x043e
++#define cfgPSWUSCFG0_LANE_15_EQUALIZATION_CNTL_16GT 0x043f
++#define cfgPCIE_MARGINING_ENH_CAP_LIST 0x0440
++#define cfgPSWUSCFG0_MARGINING_PORT_CAP 0x0444
++#define cfgPSWUSCFG0_MARGINING_PORT_STATUS 0x0446
++#define cfgPSWUSCFG0_LANE_0_MARGINING_LANE_CNTL 0x0448
++#define cfgPSWUSCFG0_LANE_0_MARGINING_LANE_STATUS 0x044a
++#define cfgPSWUSCFG0_LANE_1_MARGINING_LANE_CNTL 0x044c
++#define cfgPSWUSCFG0_LANE_1_MARGINING_LANE_STATUS 0x044e
++#define cfgPSWUSCFG0_LANE_2_MARGINING_LANE_CNTL 0x0450
++#define cfgPSWUSCFG0_LANE_2_MARGINING_LANE_STATUS 0x0452
++#define cfgPSWUSCFG0_LANE_3_MARGINING_LANE_CNTL 0x0454
++#define cfgPSWUSCFG0_LANE_3_MARGINING_LANE_STATUS 0x0456
++#define cfgPSWUSCFG0_LANE_4_MARGINING_LANE_CNTL 0x0458
++#define cfgPSWUSCFG0_LANE_4_MARGINING_LANE_STATUS 0x045a
++#define cfgPSWUSCFG0_LANE_5_MARGINING_LANE_CNTL 0x045c
++#define cfgPSWUSCFG0_LANE_5_MARGINING_LANE_STATUS 0x045e
++#define cfgPSWUSCFG0_LANE_6_MARGINING_LANE_CNTL 0x0460
++#define cfgPSWUSCFG0_LANE_6_MARGINING_LANE_STATUS 0x0462
++#define cfgPSWUSCFG0_LANE_7_MARGINING_LANE_CNTL 0x0464
++#define cfgPSWUSCFG0_LANE_7_MARGINING_LANE_STATUS 0x0466
++#define cfgPSWUSCFG0_LANE_8_MARGINING_LANE_CNTL 0x0468
++#define cfgPSWUSCFG0_LANE_8_MARGINING_LANE_STATUS 0x046a
++#define cfgPSWUSCFG0_LANE_9_MARGINING_LANE_CNTL 0x046c
++#define cfgPSWUSCFG0_LANE_9_MARGINING_LANE_STATUS 0x046e
++#define cfgPSWUSCFG0_LANE_10_MARGINING_LANE_CNTL 0x0470
++#define cfgPSWUSCFG0_LANE_10_MARGINING_LANE_STATUS 0x0472
++#define cfgPSWUSCFG0_LANE_11_MARGINING_LANE_CNTL 0x0474
++#define cfgPSWUSCFG0_LANE_11_MARGINING_LANE_STATUS 0x0476
++#define cfgPSWUSCFG0_LANE_12_MARGINING_LANE_CNTL 0x0478
++#define cfgPSWUSCFG0_LANE_12_MARGINING_LANE_STATUS 0x047a
++#define cfgPSWUSCFG0_LANE_13_MARGINING_LANE_CNTL 0x047c
++#define cfgPSWUSCFG0_LANE_13_MARGINING_LANE_STATUS 0x047e
++#define cfgPSWUSCFG0_LANE_14_MARGINING_LANE_CNTL 0x0480
++#define cfgPSWUSCFG0_LANE_14_MARGINING_LANE_STATUS 0x0482
++#define cfgPSWUSCFG0_LANE_15_MARGINING_LANE_CNTL 0x0484
++#define cfgPSWUSCFG0_LANE_15_MARGINING_LANE_STATUS 0x0486
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_bifcfgdecp
++// base address: 0x0
++#define cfgBIF_CFG_DEV0_EPF0_0_VENDOR_ID 0x0000
++#define cfgBIF_CFG_DEV0_EPF0_0_DEVICE_ID 0x0002
++#define cfgBIF_CFG_DEV0_EPF0_0_COMMAND 0x0004
++#define cfgBIF_CFG_DEV0_EPF0_0_STATUS 0x0006
++#define cfgBIF_CFG_DEV0_EPF0_0_REVISION_ID 0x0008
++#define cfgBIF_CFG_DEV0_EPF0_0_PROG_INTERFACE 0x0009
++#define cfgBIF_CFG_DEV0_EPF0_0_SUB_CLASS 0x000a
++#define cfgBIF_CFG_DEV0_EPF0_0_BASE_CLASS 0x000b
++#define cfgBIF_CFG_DEV0_EPF0_0_CACHE_LINE 0x000c
++#define cfgBIF_CFG_DEV0_EPF0_0_LATENCY 0x000d
++#define cfgBIF_CFG_DEV0_EPF0_0_HEADER 0x000e
++#define cfgBIF_CFG_DEV0_EPF0_0_BIST 0x000f
++#define cfgBIF_CFG_DEV0_EPF0_0_BASE_ADDR_1 0x0010
++#define cfgBIF_CFG_DEV0_EPF0_0_BASE_ADDR_2 0x0014
++#define cfgBIF_CFG_DEV0_EPF0_0_BASE_ADDR_3 0x0018
++#define cfgBIF_CFG_DEV0_EPF0_0_BASE_ADDR_4 0x001c
++#define cfgBIF_CFG_DEV0_EPF0_0_BASE_ADDR_5 0x0020
++#define cfgBIF_CFG_DEV0_EPF0_0_BASE_ADDR_6 0x0024
++#define cfgBIF_CFG_DEV0_EPF0_0_ADAPTER_ID 0x002c
++#define cfgBIF_CFG_DEV0_EPF0_0_ROM_BASE_ADDR 0x0030
++#define cfgBIF_CFG_DEV0_EPF0_0_CAP_PTR 0x0034
++#define cfgBIF_CFG_DEV0_EPF0_0_INTERRUPT_LINE 0x003c
++#define cfgBIF_CFG_DEV0_EPF0_0_INTERRUPT_PIN 0x003d
++#define cfgBIF_CFG_DEV0_EPF0_0_MIN_GRANT 0x003e
++#define cfgBIF_CFG_DEV0_EPF0_0_MAX_LATENCY 0x003f
++#define cfgBIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST 0x0048
++#define cfgBIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W 0x004c
++#define cfgBIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST 0x0050
++#define cfgBIF_CFG_DEV0_EPF0_0_PMI_CAP 0x0052
++#define cfgBIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL 0x0054
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST 0x0064
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_CAP 0x0066
++#define cfgBIF_CFG_DEV0_EPF0_0_DEVICE_CAP 0x0068
++#define cfgBIF_CFG_DEV0_EPF0_0_DEVICE_CNTL 0x006c
++#define cfgBIF_CFG_DEV0_EPF0_0_DEVICE_STATUS 0x006e
++#define cfgBIF_CFG_DEV0_EPF0_0_LINK_CAP 0x0070
++#define cfgBIF_CFG_DEV0_EPF0_0_LINK_CNTL 0x0074
++#define cfgBIF_CFG_DEV0_EPF0_0_LINK_STATUS 0x0076
++#define cfgBIF_CFG_DEV0_EPF0_0_DEVICE_CAP2 0x0088
++#define cfgBIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2 0x008c
++#define cfgBIF_CFG_DEV0_EPF0_0_DEVICE_STATUS2 0x008e
++#define cfgBIF_CFG_DEV0_EPF0_0_LINK_CAP2 0x0090
++#define cfgBIF_CFG_DEV0_EPF0_0_LINK_CNTL2 0x0094
++#define cfgBIF_CFG_DEV0_EPF0_0_LINK_STATUS2 0x0096
++#define cfgBIF_CFG_DEV0_EPF0_0_SLOT_CAP2 0x0098
++#define cfgBIF_CFG_DEV0_EPF0_0_SLOT_CNTL2 0x009c
++#define cfgBIF_CFG_DEV0_EPF0_0_SLOT_STATUS2 0x009e
++#define cfgBIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST 0x00a0
++#define cfgBIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL 0x00a2
++#define cfgBIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_LO 0x00a4
++#define cfgBIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_HI 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_0_MSI_MASK 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA_64 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_0_MSI_MASK_64 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_0_MSI_PENDING 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_0_MSI_PENDING_64 0x00b4
++#define cfgBIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST 0x00c0
++#define cfgBIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL 0x00c2
++#define cfgBIF_CFG_DEV0_EPF0_0_MSIX_TABLE 0x00c4
++#define cfgBIF_CFG_DEV0_EPF0_0_MSIX_PBA 0x00c8
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC1 0x0108
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC2 0x010c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST 0x0110
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1 0x0114
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2 0x0118
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL 0x011c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_STATUS 0x011e
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP 0x0120
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL 0x0124
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS 0x012a
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP 0x012c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL 0x0130
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS 0x0136
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST 0x0140
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW1 0x0144
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW2 0x0148
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS 0x0154
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK 0x0158
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY 0x015c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS 0x0160
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK 0x0164
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL 0x0168
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG0 0x016c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG1 0x0170
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG2 0x0174
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG3 0x0178
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG0 0x0188
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG1 0x018c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG2 0x0190
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG3 0x0194
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST 0x0200
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CAP 0x0204
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL 0x0208
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CAP 0x020c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL 0x0210
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CAP 0x0214
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL 0x0218
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CAP 0x021c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL 0x0220
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CAP 0x0224
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL 0x0228
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CAP 0x022c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL 0x0230
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST 0x0240
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA_SELECT 0x0244
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA 0x0248
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_CAP 0x024c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST 0x0250
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP 0x0254
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DPA_LATENCY_INDICATOR 0x0258
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS 0x025c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DPA_CNTL 0x025e
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0 0x0260
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1 0x0261
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2 0x0262
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3 0x0263
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4 0x0264
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5 0x0265
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6 0x0266
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7 0x0267
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST 0x0270
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3 0x0274
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS 0x0278
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL 0x027c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL 0x027e
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL 0x0280
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL 0x0282
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL 0x0284
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL 0x0286
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL 0x0288
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL 0x028a
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL 0x028c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL 0x028e
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL 0x0290
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL 0x0292
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL 0x0294
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL 0x0296
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL 0x0298
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL 0x029a
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST 0x02a0
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP 0x02a4
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL 0x02a6
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST 0x02b0
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP 0x02b4
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL 0x02b6
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST 0x02c0
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL 0x02c4
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS 0x02c6
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY 0x02c8
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC 0x02cc
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST 0x02d0
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP 0x02d4
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL 0x02d6
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST 0x02f0
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP 0x02f4
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL 0x02f6
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0 0x02f8
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR1 0x02fc
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV0 0x0300
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV1 0x0304
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL0 0x0308
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL1 0x030c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_0 0x0310
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_1 0x0314
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST 0x0320
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP 0x0324
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST 0x0328
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP 0x032c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL 0x032e
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST 0x0330
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP 0x0334
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL 0x0338
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_STATUS 0x033a
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_INITIAL_VFS 0x033c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_TOTAL_VFS 0x033e
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_NUM_VFS 0x0340
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FUNC_DEP_LINK 0x0342
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FIRST_VF_OFFSET 0x0344
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_STRIDE 0x0346
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_DEVICE_ID 0x034a
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE 0x034c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE 0x0350
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_0 0x0354
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_1 0x0358
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_2 0x035c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_3 0x0360
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_4 0x0364
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_5 0x0368
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET 0x036c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST 0x0370
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP 0x0374
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CNTL 0x0378
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST 0x0400
++#define cfgBIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP 0x0404
++#define cfgBIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS 0x0408
++#define cfgBIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST 0x0410
++#define cfgBIF_CFG_DEV0_EPF0_0_LINK_CAP_16GT 0x0414
++#define cfgBIF_CFG_DEV0_EPF0_0_LINK_CNTL_16GT 0x0418
++#define cfgBIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT 0x041c
++#define cfgBIF_CFG_DEV0_EPF0_0_LOCAL_PARITY_MISMATCH_STATUS_16GT 0x0420
++#define cfgBIF_CFG_DEV0_EPF0_0_RTM1_PARITY_MISMATCH_STATUS_16GT 0x0424
++#define cfgBIF_CFG_DEV0_EPF0_0_RTM2_PARITY_MISMATCH_STATUS_16GT 0x0428
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT 0x0430
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT 0x0431
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT 0x0432
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT 0x0433
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT 0x0434
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT 0x0435
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT 0x0436
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT 0x0437
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT 0x0438
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT 0x0439
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT 0x043a
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT 0x043b
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT 0x043c
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT 0x043d
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT 0x043e
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT 0x043f
++#define cfgBIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST 0x0440
++#define cfgBIF_CFG_DEV0_EPF0_0_MARGINING_PORT_CAP 0x0444
++#define cfgBIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS 0x0446
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL 0x0448
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS 0x044a
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL 0x044c
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS 0x044e
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL 0x0450
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS 0x0452
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL 0x0454
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS 0x0456
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL 0x0458
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS 0x045a
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL 0x045c
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS 0x045e
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL 0x0460
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS 0x0462
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL 0x0464
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS 0x0466
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL 0x0468
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS 0x046a
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL 0x046c
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS 0x046e
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL 0x0470
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS 0x0472
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL 0x0474
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS 0x0476
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL 0x0478
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS 0x047a
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL 0x047c
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS 0x047e
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL 0x0480
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS 0x0482
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL 0x0484
++#define cfgBIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS 0x0486
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST 0x04c0
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CAP 0x04c4
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL 0x04c8
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CAP 0x04cc
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL 0x04d0
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CAP 0x04d4
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL 0x04d8
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CAP 0x04dc
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL 0x04e0
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CAP 0x04e4
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL 0x04e8
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CAP 0x04ec
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL 0x04f0
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV 0x0500
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV 0x0504
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW 0x0508
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE 0x050c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS 0x0510
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL 0x0514
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0 0x0518
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1 0x051c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2 0x0520
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT 0x0524
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB 0x0528
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS 0x052c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE 0x0530
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB 0x0534
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB 0x0538
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB 0x053c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB 0x0540
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB 0x0544
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB 0x0548
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB 0x054c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB 0x0550
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB 0x0554
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB 0x0558
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB 0x055c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB 0x0560
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB 0x0564
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB 0x0568
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB 0x056c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB 0x0570
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB 0x0574
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB 0x0578
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB 0x057c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB 0x0580
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB 0x0584
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB 0x0588
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB 0x058c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB 0x0590
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB 0x0594
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB 0x0598
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB 0x059c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB 0x05a0
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB 0x05a4
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB 0x05a8
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB 0x05ac
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0 0x05b0
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1 0x05b4
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2 0x05b8
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3 0x05bc
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4 0x05c0
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5 0x05c4
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6 0x05c8
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7 0x05cc
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8 0x05d0
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0 0x05e0
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1 0x05e4
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2 0x05e8
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3 0x05ec
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4 0x05f0
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5 0x05f4
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6 0x05f8
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7 0x05fc
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8 0x0600
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0 0x0610
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1 0x0614
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2 0x0618
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3 0x061c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4 0x0620
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5 0x0624
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6 0x0628
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7 0x062c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8 0x0630
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0 0x0640
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1 0x0644
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2 0x0648
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3 0x064c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4 0x0650
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5 0x0654
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6 0x0658
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7 0x065c
++#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8 0x0660
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf1_bifcfgdecp
++// base address: 0x0
++#define cfgBIF_CFG_DEV0_EPF1_0_VENDOR_ID 0x0000
++#define cfgBIF_CFG_DEV0_EPF1_0_DEVICE_ID 0x0002
++#define cfgBIF_CFG_DEV0_EPF1_0_COMMAND 0x0004
++#define cfgBIF_CFG_DEV0_EPF1_0_STATUS 0x0006
++#define cfgBIF_CFG_DEV0_EPF1_0_REVISION_ID 0x0008
++#define cfgBIF_CFG_DEV0_EPF1_0_PROG_INTERFACE 0x0009
++#define cfgBIF_CFG_DEV0_EPF1_0_SUB_CLASS 0x000a
++#define cfgBIF_CFG_DEV0_EPF1_0_BASE_CLASS 0x000b
++#define cfgBIF_CFG_DEV0_EPF1_0_CACHE_LINE 0x000c
++#define cfgBIF_CFG_DEV0_EPF1_0_LATENCY 0x000d
++#define cfgBIF_CFG_DEV0_EPF1_0_HEADER 0x000e
++#define cfgBIF_CFG_DEV0_EPF1_0_BIST 0x000f
++#define cfgBIF_CFG_DEV0_EPF1_0_BASE_ADDR_1 0x0010
++#define cfgBIF_CFG_DEV0_EPF1_0_BASE_ADDR_2 0x0014
++#define cfgBIF_CFG_DEV0_EPF1_0_BASE_ADDR_3 0x0018
++#define cfgBIF_CFG_DEV0_EPF1_0_BASE_ADDR_4 0x001c
++#define cfgBIF_CFG_DEV0_EPF1_0_BASE_ADDR_5 0x0020
++#define cfgBIF_CFG_DEV0_EPF1_0_BASE_ADDR_6 0x0024
++#define cfgBIF_CFG_DEV0_EPF1_0_ADAPTER_ID 0x002c
++#define cfgBIF_CFG_DEV0_EPF1_0_ROM_BASE_ADDR 0x0030
++#define cfgBIF_CFG_DEV0_EPF1_0_CAP_PTR 0x0034
++#define cfgBIF_CFG_DEV0_EPF1_0_INTERRUPT_LINE 0x003c
++#define cfgBIF_CFG_DEV0_EPF1_0_INTERRUPT_PIN 0x003d
++#define cfgBIF_CFG_DEV0_EPF1_0_MIN_GRANT 0x003e
++#define cfgBIF_CFG_DEV0_EPF1_0_MAX_LATENCY 0x003f
++#define cfgBIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST 0x0048
++#define cfgBIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W 0x004c
++#define cfgBIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST 0x0050
++#define cfgBIF_CFG_DEV0_EPF1_0_PMI_CAP 0x0052
++#define cfgBIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL 0x0054
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST 0x0064
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_CAP 0x0066
++#define cfgBIF_CFG_DEV0_EPF1_0_DEVICE_CAP 0x0068
++#define cfgBIF_CFG_DEV0_EPF1_0_DEVICE_CNTL 0x006c
++#define cfgBIF_CFG_DEV0_EPF1_0_DEVICE_STATUS 0x006e
++#define cfgBIF_CFG_DEV0_EPF1_0_LINK_CAP 0x0070
++#define cfgBIF_CFG_DEV0_EPF1_0_LINK_CNTL 0x0074
++#define cfgBIF_CFG_DEV0_EPF1_0_LINK_STATUS 0x0076
++#define cfgBIF_CFG_DEV0_EPF1_0_DEVICE_CAP2 0x0088
++#define cfgBIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2 0x008c
++#define cfgBIF_CFG_DEV0_EPF1_0_DEVICE_STATUS2 0x008e
++#define cfgBIF_CFG_DEV0_EPF1_0_LINK_CAP2 0x0090
++#define cfgBIF_CFG_DEV0_EPF1_0_LINK_CNTL2 0x0094
++#define cfgBIF_CFG_DEV0_EPF1_0_LINK_STATUS2 0x0096
++#define cfgBIF_CFG_DEV0_EPF1_0_SLOT_CAP2 0x0098
++#define cfgBIF_CFG_DEV0_EPF1_0_SLOT_CNTL2 0x009c
++#define cfgBIF_CFG_DEV0_EPF1_0_SLOT_STATUS2 0x009e
++#define cfgBIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST 0x00a0
++#define cfgBIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL 0x00a2
++#define cfgBIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_LO 0x00a4
++#define cfgBIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_HI 0x00a8
++#define cfgBIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA 0x00a8
++#define cfgBIF_CFG_DEV0_EPF1_0_MSI_MASK 0x00ac
++#define cfgBIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA_64 0x00ac
++#define cfgBIF_CFG_DEV0_EPF1_0_MSI_MASK_64 0x00b0
++#define cfgBIF_CFG_DEV0_EPF1_0_MSI_PENDING 0x00b0
++#define cfgBIF_CFG_DEV0_EPF1_0_MSI_PENDING_64 0x00b4
++#define cfgBIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST 0x00c0
++#define cfgBIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL 0x00c2
++#define cfgBIF_CFG_DEV0_EPF1_0_MSIX_TABLE 0x00c4
++#define cfgBIF_CFG_DEV0_EPF1_0_MSIX_PBA 0x00c8
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC1 0x0108
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC2 0x010c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST 0x0110
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1 0x0114
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG2 0x0118
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CNTL 0x011c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_STATUS 0x011e
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP 0x0120
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL 0x0124
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_STATUS 0x012a
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP 0x012c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL 0x0130
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_STATUS 0x0136
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST 0x0140
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW1 0x0144
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW2 0x0148
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS 0x0154
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK 0x0158
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY 0x015c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS 0x0160
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK 0x0164
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL 0x0168
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG0 0x016c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG1 0x0170
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG2 0x0174
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG3 0x0178
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG0 0x0188
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG1 0x018c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG2 0x0190
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG3 0x0194
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST 0x0200
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CAP 0x0204
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL 0x0208
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CAP 0x020c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL 0x0210
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CAP 0x0214
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL 0x0218
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CAP 0x021c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL 0x0220
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CAP 0x0224
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL 0x0228
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CAP 0x022c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL 0x0230
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST 0x0240
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA_SELECT 0x0244
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA 0x0248
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_CAP 0x024c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST 0x0250
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP 0x0254
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DPA_LATENCY_INDICATOR 0x0258
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS 0x025c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DPA_CNTL 0x025e
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0 0x0260
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1 0x0261
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2 0x0262
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3 0x0263
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4 0x0264
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5 0x0265
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6 0x0266
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7 0x0267
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST 0x0270
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3 0x0274
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_ERROR_STATUS 0x0278
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL 0x027c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL 0x027e
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL 0x0280
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL 0x0282
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL 0x0284
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL 0x0286
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL 0x0288
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL 0x028a
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL 0x028c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL 0x028e
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL 0x0290
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL 0x0292
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL 0x0294
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL 0x0296
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL 0x0298
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL 0x029a
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST 0x02a0
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP 0x02a4
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL 0x02a6
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST 0x02b0
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP 0x02b4
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_ATS_CNTL 0x02b6
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST 0x02c0
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_CNTL 0x02c4
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS 0x02c6
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY 0x02c8
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC 0x02cc
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST 0x02d0
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP 0x02d4
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL 0x02d6
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST 0x02f0
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP 0x02f4
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_MC_CNTL 0x02f6
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR0 0x02f8
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR1 0x02fc
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV0 0x0300
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV1 0x0304
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL0 0x0308
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL1 0x030c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_0 0x0310
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_1 0x0314
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST 0x0320
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP 0x0324
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST 0x0328
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP 0x032c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL 0x032e
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST 0x0330
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP 0x0334
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL 0x0338
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_STATUS 0x033a
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_INITIAL_VFS 0x033c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_TOTAL_VFS 0x033e
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_NUM_VFS 0x0340
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FUNC_DEP_LINK 0x0342
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FIRST_VF_OFFSET 0x0344
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_STRIDE 0x0346
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_DEVICE_ID 0x034a
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE 0x034c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE 0x0350
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_0 0x0354
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_1 0x0358
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_2 0x035c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_3 0x0360
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_4 0x0364
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_5 0x0368
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET 0x036c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST 0x0370
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP 0x0374
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CNTL 0x0378
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST 0x0400
++#define cfgBIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_CAP 0x0404
++#define cfgBIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_STATUS 0x0408
++#define cfgBIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST 0x0410
++#define cfgBIF_CFG_DEV0_EPF1_0_LINK_CAP_16GT 0x0414
++#define cfgBIF_CFG_DEV0_EPF1_0_LINK_CNTL_16GT 0x0418
++#define cfgBIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT 0x041c
++#define cfgBIF_CFG_DEV0_EPF1_0_LOCAL_PARITY_MISMATCH_STATUS_16GT 0x0420
++#define cfgBIF_CFG_DEV0_EPF1_0_RTM1_PARITY_MISMATCH_STATUS_16GT 0x0424
++#define cfgBIF_CFG_DEV0_EPF1_0_RTM2_PARITY_MISMATCH_STATUS_16GT 0x0428
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_0_EQUALIZATION_CNTL_16GT 0x0430
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_1_EQUALIZATION_CNTL_16GT 0x0431
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_2_EQUALIZATION_CNTL_16GT 0x0432
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_3_EQUALIZATION_CNTL_16GT 0x0433
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_4_EQUALIZATION_CNTL_16GT 0x0434
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_5_EQUALIZATION_CNTL_16GT 0x0435
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_6_EQUALIZATION_CNTL_16GT 0x0436
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_7_EQUALIZATION_CNTL_16GT 0x0437
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_8_EQUALIZATION_CNTL_16GT 0x0438
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_9_EQUALIZATION_CNTL_16GT 0x0439
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_10_EQUALIZATION_CNTL_16GT 0x043a
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_11_EQUALIZATION_CNTL_16GT 0x043b
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_12_EQUALIZATION_CNTL_16GT 0x043c
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_13_EQUALIZATION_CNTL_16GT 0x043d
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_14_EQUALIZATION_CNTL_16GT 0x043e
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_15_EQUALIZATION_CNTL_16GT 0x043f
++#define cfgBIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST 0x0440
++#define cfgBIF_CFG_DEV0_EPF1_0_MARGINING_PORT_CAP 0x0444
++#define cfgBIF_CFG_DEV0_EPF1_0_MARGINING_PORT_STATUS 0x0446
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL 0x0448
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS 0x044a
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL 0x044c
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS 0x044e
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL 0x0450
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS 0x0452
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL 0x0454
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS 0x0456
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL 0x0458
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS 0x045a
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL 0x045c
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS 0x045e
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL 0x0460
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS 0x0462
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL 0x0464
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS 0x0466
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL 0x0468
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS 0x046a
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL 0x046c
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS 0x046e
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL 0x0470
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS 0x0472
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL 0x0474
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS 0x0476
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL 0x0478
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS 0x047a
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL 0x047c
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS 0x047e
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL 0x0480
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS 0x0482
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL 0x0484
++#define cfgBIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS 0x0486
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST 0x04c0
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CAP 0x04c4
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL 0x04c8
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CAP 0x04cc
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL 0x04d0
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CAP 0x04d4
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL 0x04d8
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CAP 0x04dc
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL 0x04e0
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CAP 0x04e4
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL 0x04e8
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CAP 0x04ec
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL 0x04f0
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV 0x0500
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV 0x0504
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW 0x0508
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE 0x050c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS 0x0510
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL 0x0514
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0 0x0518
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1 0x051c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2 0x0520
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT 0x0524
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB 0x0528
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS 0x052c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE 0x0530
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB 0x0534
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB 0x0538
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB 0x053c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB 0x0540
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB 0x0544
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB 0x0548
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB 0x054c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB 0x0550
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB 0x0554
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB 0x0558
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB 0x055c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB 0x0560
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB 0x0564
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB 0x0568
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB 0x056c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB 0x0570
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB 0x0574
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB 0x0578
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB 0x057c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB 0x0580
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB 0x0584
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB 0x0588
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB 0x058c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB 0x0590
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB 0x0594
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB 0x0598
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB 0x059c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB 0x05a0
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB 0x05a4
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB 0x05a8
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB 0x05ac
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0 0x05b0
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1 0x05b4
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2 0x05b8
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3 0x05bc
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4 0x05c0
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5 0x05c4
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6 0x05c8
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7 0x05cc
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8 0x05d0
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0 0x05e0
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1 0x05e4
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2 0x05e8
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3 0x05ec
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4 0x05f0
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5 0x05f4
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6 0x05f8
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7 0x05fc
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8 0x0600
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0 0x0610
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1 0x0614
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2 0x0618
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3 0x061c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4 0x0620
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5 0x0624
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6 0x0628
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7 0x062c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8 0x0630
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0 0x0640
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1 0x0644
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2 0x0648
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3 0x064c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4 0x0650
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5 0x0654
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6 0x0658
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7 0x065c
++#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8 0x0660
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_swds_bifcfgdecp
++// base address: 0x0
++#define cfgBIF_CFG_DEV0_SWDS0_VENDOR_ID 0x0000
++#define cfgBIF_CFG_DEV0_SWDS0_DEVICE_ID 0x0002
++#define cfgBIF_CFG_DEV0_SWDS0_COMMAND 0x0004
++#define cfgBIF_CFG_DEV0_SWDS0_STATUS 0x0006
++#define cfgBIF_CFG_DEV0_SWDS0_REVISION_ID 0x0008
++#define cfgBIF_CFG_DEV0_SWDS0_PROG_INTERFACE 0x0009
++#define cfgBIF_CFG_DEV0_SWDS0_SUB_CLASS 0x000a
++#define cfgBIF_CFG_DEV0_SWDS0_BASE_CLASS 0x000b
++#define cfgBIF_CFG_DEV0_SWDS0_CACHE_LINE 0x000c
++#define cfgBIF_CFG_DEV0_SWDS0_LATENCY 0x000d
++#define cfgBIF_CFG_DEV0_SWDS0_HEADER 0x000e
++#define cfgBIF_CFG_DEV0_SWDS0_BIST 0x000f
++#define cfgBIF_CFG_DEV0_SWDS0_BASE_ADDR_1 0x0010
++#define cfgBIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY 0x0018
++#define cfgBIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT 0x001c
++#define cfgBIF_CFG_DEV0_SWDS0_SECONDARY_STATUS 0x001e
++#define cfgBIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT 0x0020
++#define cfgBIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT 0x0024
++#define cfgBIF_CFG_DEV0_SWDS0_PREF_BASE_UPPER 0x0028
++#define cfgBIF_CFG_DEV0_SWDS0_PREF_LIMIT_UPPER 0x002c
++#define cfgBIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT_HI 0x0030
++#define cfgBIF_CFG_DEV0_SWDS0_CAP_PTR 0x0034
++#define cfgBIF_CFG_DEV0_SWDS0_INTERRUPT_LINE 0x003c
++#define cfgBIF_CFG_DEV0_SWDS0_INTERRUPT_PIN 0x003d
++#define cfgBIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL 0x003e
++#define cfgBIF_CFG_DEV0_SWDS0_PMI_CAP_LIST 0x0050
++#define cfgBIF_CFG_DEV0_SWDS0_PMI_CAP 0x0052
++#define cfgBIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL 0x0054
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_CAP_LIST 0x0058
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_CAP 0x005a
++#define cfgBIF_CFG_DEV0_SWDS0_DEVICE_CAP 0x005c
++#define cfgBIF_CFG_DEV0_SWDS0_DEVICE_CNTL 0x0060
++#define cfgBIF_CFG_DEV0_SWDS0_DEVICE_STATUS 0x0062
++#define cfgBIF_CFG_DEV0_SWDS0_LINK_CAP 0x0064
++#define cfgBIF_CFG_DEV0_SWDS0_LINK_CNTL 0x0068
++#define cfgBIF_CFG_DEV0_SWDS0_LINK_STATUS 0x006a
++#define cfgBIF_CFG_DEV0_SWDS0_SLOT_CAP 0x006c
++#define cfgBIF_CFG_DEV0_SWDS0_SLOT_CNTL 0x0070
++#define cfgBIF_CFG_DEV0_SWDS0_SLOT_STATUS 0x0072
++#define cfgBIF_CFG_DEV0_SWDS0_DEVICE_CAP2 0x007c
++#define cfgBIF_CFG_DEV0_SWDS0_DEVICE_CNTL2 0x0080
++#define cfgBIF_CFG_DEV0_SWDS0_DEVICE_STATUS2 0x0082
++#define cfgBIF_CFG_DEV0_SWDS0_LINK_CAP2 0x0084
++#define cfgBIF_CFG_DEV0_SWDS0_LINK_CNTL2 0x0088
++#define cfgBIF_CFG_DEV0_SWDS0_LINK_STATUS2 0x008a
++#define cfgBIF_CFG_DEV0_SWDS0_SLOT_CAP2 0x008c
++#define cfgBIF_CFG_DEV0_SWDS0_SLOT_CNTL2 0x0090
++#define cfgBIF_CFG_DEV0_SWDS0_SLOT_STATUS2 0x0092
++#define cfgBIF_CFG_DEV0_SWDS0_MSI_CAP_LIST 0x00a0
++#define cfgBIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL 0x00a2
++#define cfgBIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_LO 0x00a4
++#define cfgBIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_HI 0x00a8
++#define cfgBIF_CFG_DEV0_SWDS0_MSI_MSG_DATA 0x00a8
++#define cfgBIF_CFG_DEV0_SWDS0_MSI_MSG_DATA_64 0x00ac
++#define cfgBIF_CFG_DEV0_SWDS0_SSID_CAP_LIST 0x00c0
++#define cfgBIF_CFG_DEV0_SWDS0_SSID_CAP 0x00c4
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR 0x0104
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC1 0x0108
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC2 0x010c
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST 0x0110
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1 0x0114
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG2 0x0118
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CNTL 0x011c
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_STATUS 0x011e
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP 0x0120
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL 0x0124
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_STATUS 0x012a
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP 0x012c
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL 0x0130
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_STATUS 0x0136
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST 0x0140
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW1 0x0144
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW2 0x0148
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS 0x0154
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK 0x0158
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY 0x015c
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS 0x0160
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK 0x0164
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL 0x0168
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG0 0x016c
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG1 0x0170
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG2 0x0174
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG3 0x0178
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG0 0x0188
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG1 0x018c
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG2 0x0190
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG3 0x0194
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST 0x0270
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3 0x0274
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_ERROR_STATUS 0x0278
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL 0x027c
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL 0x027e
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL 0x0280
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL 0x0282
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL 0x0284
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL 0x0286
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL 0x0288
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL 0x028a
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL 0x028c
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL 0x028e
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL 0x0290
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL 0x0292
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL 0x0294
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL 0x0296
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL 0x0298
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL 0x029a
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST 0x02a0
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP 0x02a4
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL 0x02a6
++#define cfgBIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST 0x0400
++#define cfgBIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_CAP 0x0404
++#define cfgBIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_STATUS 0x0408
++#define cfgBIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST 0x0410
++#define cfgBIF_CFG_DEV0_SWDS0_LINK_CAP_16GT 0x0414
++#define cfgBIF_CFG_DEV0_SWDS0_LINK_CNTL_16GT 0x0418
++#define cfgBIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT 0x041c
++#define cfgBIF_CFG_DEV0_SWDS0_LOCAL_PARITY_MISMATCH_STATUS_16GT 0x0420
++#define cfgBIF_CFG_DEV0_SWDS0_RTM1_PARITY_MISMATCH_STATUS_16GT 0x0424
++#define cfgBIF_CFG_DEV0_SWDS0_RTM2_PARITY_MISMATCH_STATUS_16GT 0x0428
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_0_EQUALIZATION_CNTL_16GT 0x0430
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_1_EQUALIZATION_CNTL_16GT 0x0431
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_2_EQUALIZATION_CNTL_16GT 0x0432
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_3_EQUALIZATION_CNTL_16GT 0x0433
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_4_EQUALIZATION_CNTL_16GT 0x0434
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_5_EQUALIZATION_CNTL_16GT 0x0435
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_6_EQUALIZATION_CNTL_16GT 0x0436
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_7_EQUALIZATION_CNTL_16GT 0x0437
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_8_EQUALIZATION_CNTL_16GT 0x0438
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_9_EQUALIZATION_CNTL_16GT 0x0439
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_10_EQUALIZATION_CNTL_16GT 0x043a
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_11_EQUALIZATION_CNTL_16GT 0x043b
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_12_EQUALIZATION_CNTL_16GT 0x043c
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_13_EQUALIZATION_CNTL_16GT 0x043d
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_14_EQUALIZATION_CNTL_16GT 0x043e
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_15_EQUALIZATION_CNTL_16GT 0x043f
++#define cfgBIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST 0x0440
++#define cfgBIF_CFG_DEV0_SWDS0_MARGINING_PORT_CAP 0x0444
++#define cfgBIF_CFG_DEV0_SWDS0_MARGINING_PORT_STATUS 0x0446
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL 0x0448
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS 0x044a
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL 0x044c
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS 0x044e
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL 0x0450
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS 0x0452
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL 0x0454
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS 0x0456
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL 0x0458
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS 0x045a
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL 0x045c
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS 0x045e
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL 0x0460
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS 0x0462
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL 0x0464
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS 0x0466
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL 0x0468
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS 0x046a
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL 0x046c
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS 0x046e
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL 0x0470
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS 0x0472
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL 0x0474
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS 0x0476
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL 0x0478
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS 0x047a
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL 0x047c
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS 0x047e
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL 0x0480
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS 0x0482
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL 0x0484
++#define cfgBIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS 0x0486
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf0_bifcfgdecp
++// base address: 0x0
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_VENDOR_ID 0x0000
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_DEVICE_ID 0x0002
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_COMMAND 0x0004
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_STATUS 0x0006
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_REVISION_ID 0x0008
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PROG_INTERFACE 0x0009
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_SUB_CLASS 0x000a
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_BASE_CLASS 0x000b
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_CACHE_LINE 0x000c
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_LATENCY 0x000d
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_HEADER 0x000e
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_BIST 0x000f
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_1 0x0010
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_2 0x0014
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_3 0x0018
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_4 0x001c
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_5 0x0020
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_6 0x0024
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_ADAPTER_ID 0x002c
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_ROM_BASE_ADDR 0x0030
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_CAP_PTR 0x0034
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_LINE 0x003c
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_PIN 0x003d
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP_LIST 0x0064
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP 0x0066
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP 0x0068
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL 0x006c
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS 0x006e
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP 0x0070
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL 0x0074
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS 0x0076
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2 0x0088
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2 0x008c
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS2 0x008e
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2 0x0090
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2 0x0094
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2 0x0096
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_SLOT_CAP2 0x0098
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_SLOT_CNTL2 0x009c
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_SLOT_STATUS2 0x009e
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSI_CAP_LIST 0x00a0
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL 0x00a2
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_LO 0x00a4
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_HI 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA_64 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK_64 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING_64 0x00b4
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSIX_CAP_LIST 0x00c0
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL 0x00c2
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSIX_TABLE 0x00c4
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSIX_PBA 0x00c8
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC1 0x0108
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC2 0x010c
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS 0x0154
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK 0x0158
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY 0x015c
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS 0x0160
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK 0x0164
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL 0x0168
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG0 0x016c
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG1 0x0170
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG2 0x0174
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG3 0x0178
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG0 0x0188
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG1 0x018c
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG2 0x0190
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG3 0x0194
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST 0x02b0
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP 0x02b4
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CNTL 0x02b6
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST 0x0328
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP 0x032c
++#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL 0x032e
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf1_bifcfgdecp
++// base address: 0x0
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_VENDOR_ID 0x0000
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_DEVICE_ID 0x0002
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_COMMAND 0x0004
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_STATUS 0x0006
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_REVISION_ID 0x0008
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PROG_INTERFACE 0x0009
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_SUB_CLASS 0x000a
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_BASE_CLASS 0x000b
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_CACHE_LINE 0x000c
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_LATENCY 0x000d
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_HEADER 0x000e
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_BIST 0x000f
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_1 0x0010
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_2 0x0014
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_3 0x0018
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_4 0x001c
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_5 0x0020
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_6 0x0024
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_ADAPTER_ID 0x002c
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_ROM_BASE_ADDR 0x0030
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_CAP_PTR 0x0034
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_LINE 0x003c
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_PIN 0x003d
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP_LIST 0x0064
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP 0x0066
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP 0x0068
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL 0x006c
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS 0x006e
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP 0x0070
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL 0x0074
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS 0x0076
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2 0x0088
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2 0x008c
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS2 0x008e
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2 0x0090
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2 0x0094
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2 0x0096
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_SLOT_CAP2 0x0098
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_SLOT_CNTL2 0x009c
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_SLOT_STATUS2 0x009e
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSI_CAP_LIST 0x00a0
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL 0x00a2
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_LO 0x00a4
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_HI 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA_64 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK_64 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING_64 0x00b4
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSIX_CAP_LIST 0x00c0
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL 0x00c2
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSIX_TABLE 0x00c4
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSIX_PBA 0x00c8
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC1 0x0108
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC2 0x010c
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS 0x0154
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK 0x0158
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY 0x015c
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS 0x0160
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK 0x0164
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL 0x0168
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG0 0x016c
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG1 0x0170
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG2 0x0174
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG3 0x0178
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG0 0x0188
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG1 0x018c
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG2 0x0190
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG3 0x0194
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST 0x02b0
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP 0x02b4
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CNTL 0x02b6
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST 0x0328
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP 0x032c
++#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL 0x032e
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf2_bifcfgdecp
++// base address: 0x0
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_VENDOR_ID 0x0000
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_DEVICE_ID 0x0002
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_COMMAND 0x0004
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_STATUS 0x0006
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_REVISION_ID 0x0008
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PROG_INTERFACE 0x0009
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_SUB_CLASS 0x000a
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_BASE_CLASS 0x000b
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_CACHE_LINE 0x000c
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_LATENCY 0x000d
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_HEADER 0x000e
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_BIST 0x000f
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_1 0x0010
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_2 0x0014
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_3 0x0018
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_4 0x001c
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_5 0x0020
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_6 0x0024
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_ADAPTER_ID 0x002c
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_ROM_BASE_ADDR 0x0030
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_CAP_PTR 0x0034
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_LINE 0x003c
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_PIN 0x003d
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP_LIST 0x0064
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP 0x0066
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP 0x0068
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL 0x006c
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS 0x006e
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP 0x0070
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL 0x0074
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS 0x0076
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2 0x0088
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2 0x008c
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS2 0x008e
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2 0x0090
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2 0x0094
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2 0x0096
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_SLOT_CAP2 0x0098
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_SLOT_CNTL2 0x009c
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_SLOT_STATUS2 0x009e
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSI_CAP_LIST 0x00a0
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL 0x00a2
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_LO 0x00a4
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_HI 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA_64 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK_64 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING_64 0x00b4
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSIX_CAP_LIST 0x00c0
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL 0x00c2
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSIX_TABLE 0x00c4
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSIX_PBA 0x00c8
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC1 0x0108
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC2 0x010c
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS 0x0154
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK 0x0158
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY 0x015c
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS 0x0160
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK 0x0164
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL 0x0168
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG0 0x016c
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG1 0x0170
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG2 0x0174
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG3 0x0178
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG0 0x0188
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG1 0x018c
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG2 0x0190
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG3 0x0194
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST 0x02b0
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP 0x02b4
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CNTL 0x02b6
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST 0x0328
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP 0x032c
++#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL 0x032e
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf3_bifcfgdecp
++// base address: 0x0
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_VENDOR_ID 0x0000
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_DEVICE_ID 0x0002
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_COMMAND 0x0004
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_STATUS 0x0006
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_REVISION_ID 0x0008
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PROG_INTERFACE 0x0009
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_SUB_CLASS 0x000a
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_BASE_CLASS 0x000b
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_CACHE_LINE 0x000c
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_LATENCY 0x000d
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_HEADER 0x000e
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_BIST 0x000f
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_1 0x0010
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_2 0x0014
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_3 0x0018
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_4 0x001c
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_5 0x0020
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_6 0x0024
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_ADAPTER_ID 0x002c
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_ROM_BASE_ADDR 0x0030
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_CAP_PTR 0x0034
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_LINE 0x003c
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_PIN 0x003d
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP_LIST 0x0064
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP 0x0066
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP 0x0068
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL 0x006c
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS 0x006e
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP 0x0070
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL 0x0074
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS 0x0076
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2 0x0088
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2 0x008c
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS2 0x008e
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2 0x0090
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2 0x0094
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2 0x0096
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_SLOT_CAP2 0x0098
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_SLOT_CNTL2 0x009c
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_SLOT_STATUS2 0x009e
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSI_CAP_LIST 0x00a0
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL 0x00a2
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_LO 0x00a4
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_HI 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA_64 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK_64 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING_64 0x00b4
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSIX_CAP_LIST 0x00c0
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL 0x00c2
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSIX_TABLE 0x00c4
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSIX_PBA 0x00c8
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC1 0x0108
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC2 0x010c
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS 0x0154
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK 0x0158
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY 0x015c
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS 0x0160
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK 0x0164
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL 0x0168
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG0 0x016c
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG1 0x0170
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG2 0x0174
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG3 0x0178
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG0 0x0188
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG1 0x018c
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG2 0x0190
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG3 0x0194
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST 0x02b0
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP 0x02b4
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CNTL 0x02b6
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST 0x0328
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP 0x032c
++#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL 0x032e
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf4_bifcfgdecp
++// base address: 0x0
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_VENDOR_ID 0x0000
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_DEVICE_ID 0x0002
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_COMMAND 0x0004
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_STATUS 0x0006
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_REVISION_ID 0x0008
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PROG_INTERFACE 0x0009
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_SUB_CLASS 0x000a
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_BASE_CLASS 0x000b
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_CACHE_LINE 0x000c
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_LATENCY 0x000d
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_HEADER 0x000e
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_BIST 0x000f
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_1 0x0010
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_2 0x0014
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_3 0x0018
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_4 0x001c
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_5 0x0020
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_6 0x0024
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_ADAPTER_ID 0x002c
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_ROM_BASE_ADDR 0x0030
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_CAP_PTR 0x0034
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_LINE 0x003c
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_PIN 0x003d
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP_LIST 0x0064
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP 0x0066
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP 0x0068
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL 0x006c
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS 0x006e
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP 0x0070
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL 0x0074
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS 0x0076
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2 0x0088
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2 0x008c
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS2 0x008e
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2 0x0090
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2 0x0094
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2 0x0096
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_SLOT_CAP2 0x0098
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_SLOT_CNTL2 0x009c
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_SLOT_STATUS2 0x009e
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSI_CAP_LIST 0x00a0
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL 0x00a2
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_LO 0x00a4
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_HI 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA_64 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK_64 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING_64 0x00b4
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSIX_CAP_LIST 0x00c0
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL 0x00c2
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSIX_TABLE 0x00c4
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSIX_PBA 0x00c8
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC1 0x0108
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC2 0x010c
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS 0x0154
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK 0x0158
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY 0x015c
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS 0x0160
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK 0x0164
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL 0x0168
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG0 0x016c
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG1 0x0170
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG2 0x0174
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG3 0x0178
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG0 0x0188
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG1 0x018c
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG2 0x0190
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG3 0x0194
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST 0x02b0
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP 0x02b4
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CNTL 0x02b6
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST 0x0328
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP 0x032c
++#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL 0x032e
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf5_bifcfgdecp
++// base address: 0x0
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_VENDOR_ID 0x0000
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_DEVICE_ID 0x0002
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_COMMAND 0x0004
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_STATUS 0x0006
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_REVISION_ID 0x0008
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PROG_INTERFACE 0x0009
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_SUB_CLASS 0x000a
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_BASE_CLASS 0x000b
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_CACHE_LINE 0x000c
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_LATENCY 0x000d
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_HEADER 0x000e
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_BIST 0x000f
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_1 0x0010
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_2 0x0014
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_3 0x0018
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_4 0x001c
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_5 0x0020
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_6 0x0024
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_ADAPTER_ID 0x002c
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_ROM_BASE_ADDR 0x0030
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_CAP_PTR 0x0034
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_LINE 0x003c
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_PIN 0x003d
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP_LIST 0x0064
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP 0x0066
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP 0x0068
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL 0x006c
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS 0x006e
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP 0x0070
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL 0x0074
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS 0x0076
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2 0x0088
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2 0x008c
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS2 0x008e
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2 0x0090
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2 0x0094
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2 0x0096
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_SLOT_CAP2 0x0098
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_SLOT_CNTL2 0x009c
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_SLOT_STATUS2 0x009e
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSI_CAP_LIST 0x00a0
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL 0x00a2
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_LO 0x00a4
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_HI 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA_64 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK_64 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING_64 0x00b4
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSIX_CAP_LIST 0x00c0
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL 0x00c2
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSIX_TABLE 0x00c4
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSIX_PBA 0x00c8
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC1 0x0108
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC2 0x010c
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS 0x0154
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK 0x0158
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY 0x015c
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS 0x0160
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK 0x0164
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL 0x0168
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG0 0x016c
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG1 0x0170
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG2 0x0174
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG3 0x0178
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG0 0x0188
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG1 0x018c
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG2 0x0190
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG3 0x0194
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST 0x02b0
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP 0x02b4
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CNTL 0x02b6
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST 0x0328
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP 0x032c
++#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL 0x032e
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf6_bifcfgdecp
++// base address: 0x0
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_VENDOR_ID 0x0000
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_DEVICE_ID 0x0002
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_COMMAND 0x0004
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_STATUS 0x0006
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_REVISION_ID 0x0008
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PROG_INTERFACE 0x0009
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_SUB_CLASS 0x000a
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_BASE_CLASS 0x000b
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_CACHE_LINE 0x000c
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_LATENCY 0x000d
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_HEADER 0x000e
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_BIST 0x000f
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_1 0x0010
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_2 0x0014
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_3 0x0018
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_4 0x001c
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_5 0x0020
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_6 0x0024
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_ADAPTER_ID 0x002c
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_ROM_BASE_ADDR 0x0030
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_CAP_PTR 0x0034
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_LINE 0x003c
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_PIN 0x003d
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP_LIST 0x0064
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP 0x0066
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP 0x0068
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL 0x006c
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS 0x006e
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP 0x0070
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL 0x0074
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS 0x0076
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2 0x0088
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2 0x008c
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS2 0x008e
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2 0x0090
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2 0x0094
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2 0x0096
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_SLOT_CAP2 0x0098
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_SLOT_CNTL2 0x009c
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_SLOT_STATUS2 0x009e
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSI_CAP_LIST 0x00a0
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL 0x00a2
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_LO 0x00a4
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_HI 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA_64 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK_64 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING_64 0x00b4
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSIX_CAP_LIST 0x00c0
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL 0x00c2
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSIX_TABLE 0x00c4
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSIX_PBA 0x00c8
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC1 0x0108
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC2 0x010c
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS 0x0154
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK 0x0158
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY 0x015c
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS 0x0160
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK 0x0164
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL 0x0168
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG0 0x016c
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG1 0x0170
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG2 0x0174
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG3 0x0178
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG0 0x0188
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG1 0x018c
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG2 0x0190
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG3 0x0194
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST 0x02b0
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP 0x02b4
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CNTL 0x02b6
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST 0x0328
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP 0x032c
++#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL 0x032e
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf7_bifcfgdecp
++// base address: 0x0
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_VENDOR_ID 0x0000
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_DEVICE_ID 0x0002
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_COMMAND 0x0004
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_STATUS 0x0006
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_REVISION_ID 0x0008
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PROG_INTERFACE 0x0009
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_SUB_CLASS 0x000a
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_BASE_CLASS 0x000b
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_CACHE_LINE 0x000c
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_LATENCY 0x000d
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_HEADER 0x000e
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_BIST 0x000f
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_1 0x0010
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_2 0x0014
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_3 0x0018
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_4 0x001c
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_5 0x0020
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_6 0x0024
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_ADAPTER_ID 0x002c
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_ROM_BASE_ADDR 0x0030
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_CAP_PTR 0x0034
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_LINE 0x003c
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_PIN 0x003d
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP_LIST 0x0064
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP 0x0066
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP 0x0068
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL 0x006c
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS 0x006e
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP 0x0070
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL 0x0074
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS 0x0076
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2 0x0088
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2 0x008c
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS2 0x008e
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2 0x0090
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2 0x0094
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2 0x0096
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_SLOT_CAP2 0x0098
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_SLOT_CNTL2 0x009c
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_SLOT_STATUS2 0x009e
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSI_CAP_LIST 0x00a0
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL 0x00a2
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_LO 0x00a4
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_HI 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA_64 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK_64 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING_64 0x00b4
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSIX_CAP_LIST 0x00c0
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL 0x00c2
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSIX_TABLE 0x00c4
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSIX_PBA 0x00c8
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC1 0x0108
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC2 0x010c
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS 0x0154
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK 0x0158
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY 0x015c
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS 0x0160
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK 0x0164
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL 0x0168
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG0 0x016c
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG1 0x0170
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG2 0x0174
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG3 0x0178
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG0 0x0188
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG1 0x018c
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG2 0x0190
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG3 0x0194
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST 0x02b0
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP 0x02b4
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CNTL 0x02b6
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST 0x0328
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP 0x032c
++#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL 0x032e
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf8_bifcfgdecp
++// base address: 0x0
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_VENDOR_ID 0x0000
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_DEVICE_ID 0x0002
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_COMMAND 0x0004
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_STATUS 0x0006
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_REVISION_ID 0x0008
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PROG_INTERFACE 0x0009
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_SUB_CLASS 0x000a
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_BASE_CLASS 0x000b
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_CACHE_LINE 0x000c
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_LATENCY 0x000d
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_HEADER 0x000e
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_BIST 0x000f
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_1 0x0010
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_2 0x0014
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_3 0x0018
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_4 0x001c
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_5 0x0020
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_6 0x0024
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_ADAPTER_ID 0x002c
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_ROM_BASE_ADDR 0x0030
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_CAP_PTR 0x0034
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_LINE 0x003c
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_PIN 0x003d
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP_LIST 0x0064
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP 0x0066
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP 0x0068
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL 0x006c
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS 0x006e
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP 0x0070
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL 0x0074
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS 0x0076
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2 0x0088
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2 0x008c
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS2 0x008e
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2 0x0090
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2 0x0094
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2 0x0096
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_SLOT_CAP2 0x0098
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_SLOT_CNTL2 0x009c
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_SLOT_STATUS2 0x009e
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSI_CAP_LIST 0x00a0
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL 0x00a2
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_LO 0x00a4
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_HI 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA_64 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK_64 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING_64 0x00b4
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSIX_CAP_LIST 0x00c0
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL 0x00c2
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSIX_TABLE 0x00c4
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSIX_PBA 0x00c8
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC1 0x0108
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC2 0x010c
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS 0x0154
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK 0x0158
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY 0x015c
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS 0x0160
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK 0x0164
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL 0x0168
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG0 0x016c
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG1 0x0170
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG2 0x0174
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG3 0x0178
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG0 0x0188
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG1 0x018c
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG2 0x0190
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG3 0x0194
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST 0x02b0
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP 0x02b4
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CNTL 0x02b6
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST 0x0328
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP 0x032c
++#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL 0x032e
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf9_bifcfgdecp
++// base address: 0x0
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_VENDOR_ID 0x0000
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_DEVICE_ID 0x0002
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_COMMAND 0x0004
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_STATUS 0x0006
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_REVISION_ID 0x0008
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PROG_INTERFACE 0x0009
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_SUB_CLASS 0x000a
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_BASE_CLASS 0x000b
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_CACHE_LINE 0x000c
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_LATENCY 0x000d
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_HEADER 0x000e
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_BIST 0x000f
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_1 0x0010
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_2 0x0014
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_3 0x0018
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_4 0x001c
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_5 0x0020
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_6 0x0024
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_ADAPTER_ID 0x002c
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_ROM_BASE_ADDR 0x0030
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_CAP_PTR 0x0034
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_LINE 0x003c
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_PIN 0x003d
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP_LIST 0x0064
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP 0x0066
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP 0x0068
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL 0x006c
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS 0x006e
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP 0x0070
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL 0x0074
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS 0x0076
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2 0x0088
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2 0x008c
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS2 0x008e
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2 0x0090
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2 0x0094
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2 0x0096
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_SLOT_CAP2 0x0098
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_SLOT_CNTL2 0x009c
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_SLOT_STATUS2 0x009e
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSI_CAP_LIST 0x00a0
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL 0x00a2
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_LO 0x00a4
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_HI 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA_64 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK_64 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING_64 0x00b4
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSIX_CAP_LIST 0x00c0
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL 0x00c2
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSIX_TABLE 0x00c4
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSIX_PBA 0x00c8
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC1 0x0108
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC2 0x010c
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS 0x0154
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK 0x0158
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY 0x015c
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS 0x0160
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK 0x0164
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL 0x0168
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG0 0x016c
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG1 0x0170
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG2 0x0174
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG3 0x0178
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG0 0x0188
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG1 0x018c
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG2 0x0190
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG3 0x0194
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST 0x02b0
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP 0x02b4
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CNTL 0x02b6
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST 0x0328
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP 0x032c
++#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL 0x032e
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf10_bifcfgdecp
++// base address: 0x0
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_VENDOR_ID 0x0000
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_DEVICE_ID 0x0002
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_COMMAND 0x0004
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_STATUS 0x0006
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_REVISION_ID 0x0008
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PROG_INTERFACE 0x0009
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_SUB_CLASS 0x000a
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_BASE_CLASS 0x000b
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_CACHE_LINE 0x000c
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_LATENCY 0x000d
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_HEADER 0x000e
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_BIST 0x000f
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_1 0x0010
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_2 0x0014
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_3 0x0018
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_4 0x001c
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_5 0x0020
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_6 0x0024
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_ADAPTER_ID 0x002c
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_ROM_BASE_ADDR 0x0030
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_CAP_PTR 0x0034
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_LINE 0x003c
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_PIN 0x003d
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP_LIST 0x0064
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP 0x0066
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP 0x0068
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL 0x006c
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS 0x006e
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP 0x0070
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL 0x0074
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS 0x0076
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2 0x0088
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2 0x008c
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS2 0x008e
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2 0x0090
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2 0x0094
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2 0x0096
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_SLOT_CAP2 0x0098
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_SLOT_CNTL2 0x009c
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_SLOT_STATUS2 0x009e
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSI_CAP_LIST 0x00a0
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL 0x00a2
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_LO 0x00a4
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_HI 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA_64 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK_64 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING_64 0x00b4
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSIX_CAP_LIST 0x00c0
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL 0x00c2
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSIX_TABLE 0x00c4
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSIX_PBA 0x00c8
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC1 0x0108
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC2 0x010c
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS 0x0154
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK 0x0158
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY 0x015c
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS 0x0160
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK 0x0164
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL 0x0168
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG0 0x016c
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG1 0x0170
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG2 0x0174
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG3 0x0178
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG0 0x0188
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG1 0x018c
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG2 0x0190
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG3 0x0194
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST 0x02b0
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP 0x02b4
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CNTL 0x02b6
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST 0x0328
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP 0x032c
++#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL 0x032e
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf11_bifcfgdecp
++// base address: 0x0
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_VENDOR_ID 0x0000
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_DEVICE_ID 0x0002
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_COMMAND 0x0004
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_STATUS 0x0006
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_REVISION_ID 0x0008
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PROG_INTERFACE 0x0009
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_SUB_CLASS 0x000a
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_BASE_CLASS 0x000b
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_CACHE_LINE 0x000c
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_LATENCY 0x000d
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_HEADER 0x000e
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_BIST 0x000f
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_1 0x0010
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_2 0x0014
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_3 0x0018
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_4 0x001c
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_5 0x0020
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_6 0x0024
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_ADAPTER_ID 0x002c
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_ROM_BASE_ADDR 0x0030
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_CAP_PTR 0x0034
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_LINE 0x003c
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_PIN 0x003d
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP_LIST 0x0064
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP 0x0066
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP 0x0068
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL 0x006c
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS 0x006e
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP 0x0070
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL 0x0074
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS 0x0076
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2 0x0088
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2 0x008c
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS2 0x008e
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2 0x0090
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2 0x0094
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2 0x0096
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_SLOT_CAP2 0x0098
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_SLOT_CNTL2 0x009c
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_SLOT_STATUS2 0x009e
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSI_CAP_LIST 0x00a0
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL 0x00a2
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_LO 0x00a4
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_HI 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA_64 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK_64 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING_64 0x00b4
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSIX_CAP_LIST 0x00c0
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL 0x00c2
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSIX_TABLE 0x00c4
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSIX_PBA 0x00c8
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC1 0x0108
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC2 0x010c
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS 0x0154
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK 0x0158
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY 0x015c
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS 0x0160
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK 0x0164
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL 0x0168
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG0 0x016c
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG1 0x0170
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG2 0x0174
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG3 0x0178
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG0 0x0188
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG1 0x018c
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG2 0x0190
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG3 0x0194
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST 0x02b0
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP 0x02b4
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CNTL 0x02b6
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST 0x0328
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP 0x032c
++#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL 0x032e
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf12_bifcfgdecp
++// base address: 0x0
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_VENDOR_ID 0x0000
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_DEVICE_ID 0x0002
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_COMMAND 0x0004
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_STATUS 0x0006
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_REVISION_ID 0x0008
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PROG_INTERFACE 0x0009
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_SUB_CLASS 0x000a
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_BASE_CLASS 0x000b
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_CACHE_LINE 0x000c
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_LATENCY 0x000d
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_HEADER 0x000e
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_BIST 0x000f
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_1 0x0010
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_2 0x0014
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_3 0x0018
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_4 0x001c
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_5 0x0020
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_6 0x0024
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_ADAPTER_ID 0x002c
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_ROM_BASE_ADDR 0x0030
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_CAP_PTR 0x0034
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_LINE 0x003c
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_PIN 0x003d
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP_LIST 0x0064
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP 0x0066
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP 0x0068
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL 0x006c
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS 0x006e
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP 0x0070
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL 0x0074
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS 0x0076
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2 0x0088
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2 0x008c
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS2 0x008e
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2 0x0090
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2 0x0094
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2 0x0096
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_SLOT_CAP2 0x0098
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_SLOT_CNTL2 0x009c
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_SLOT_STATUS2 0x009e
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSI_CAP_LIST 0x00a0
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL 0x00a2
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_LO 0x00a4
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_HI 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA_64 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK_64 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING_64 0x00b4
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSIX_CAP_LIST 0x00c0
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL 0x00c2
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSIX_TABLE 0x00c4
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSIX_PBA 0x00c8
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC1 0x0108
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC2 0x010c
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS 0x0154
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK 0x0158
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY 0x015c
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS 0x0160
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK 0x0164
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL 0x0168
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG0 0x016c
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG1 0x0170
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG2 0x0174
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG3 0x0178
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG0 0x0188
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG1 0x018c
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG2 0x0190
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG3 0x0194
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST 0x02b0
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP 0x02b4
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CNTL 0x02b6
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST 0x0328
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP 0x032c
++#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL 0x032e
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf13_bifcfgdecp
++// base address: 0x0
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_VENDOR_ID 0x0000
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_DEVICE_ID 0x0002
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_COMMAND 0x0004
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_STATUS 0x0006
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_REVISION_ID 0x0008
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PROG_INTERFACE 0x0009
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_SUB_CLASS 0x000a
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_BASE_CLASS 0x000b
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_CACHE_LINE 0x000c
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_LATENCY 0x000d
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_HEADER 0x000e
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_BIST 0x000f
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_1 0x0010
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_2 0x0014
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_3 0x0018
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_4 0x001c
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_5 0x0020
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_6 0x0024
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_ADAPTER_ID 0x002c
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_ROM_BASE_ADDR 0x0030
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_CAP_PTR 0x0034
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_LINE 0x003c
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_PIN 0x003d
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP_LIST 0x0064
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP 0x0066
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP 0x0068
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL 0x006c
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS 0x006e
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP 0x0070
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL 0x0074
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS 0x0076
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2 0x0088
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2 0x008c
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS2 0x008e
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2 0x0090
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2 0x0094
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2 0x0096
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_SLOT_CAP2 0x0098
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_SLOT_CNTL2 0x009c
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_SLOT_STATUS2 0x009e
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSI_CAP_LIST 0x00a0
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL 0x00a2
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_LO 0x00a4
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_HI 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA_64 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK_64 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING_64 0x00b4
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSIX_CAP_LIST 0x00c0
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL 0x00c2
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSIX_TABLE 0x00c4
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSIX_PBA 0x00c8
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC1 0x0108
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC2 0x010c
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS 0x0154
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK 0x0158
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY 0x015c
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS 0x0160
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK 0x0164
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL 0x0168
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG0 0x016c
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG1 0x0170
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG2 0x0174
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG3 0x0178
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG0 0x0188
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG1 0x018c
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG2 0x0190
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG3 0x0194
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST 0x02b0
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP 0x02b4
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CNTL 0x02b6
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST 0x0328
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP 0x032c
++#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL 0x032e
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf14_bifcfgdecp
++// base address: 0x0
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_VENDOR_ID 0x0000
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_DEVICE_ID 0x0002
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_COMMAND 0x0004
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_STATUS 0x0006
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_REVISION_ID 0x0008
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PROG_INTERFACE 0x0009
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_SUB_CLASS 0x000a
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_BASE_CLASS 0x000b
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_CACHE_LINE 0x000c
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_LATENCY 0x000d
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_HEADER 0x000e
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_BIST 0x000f
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_1 0x0010
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_2 0x0014
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_3 0x0018
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_4 0x001c
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_5 0x0020
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_6 0x0024
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_ADAPTER_ID 0x002c
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_ROM_BASE_ADDR 0x0030
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_CAP_PTR 0x0034
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_LINE 0x003c
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_PIN 0x003d
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP_LIST 0x0064
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP 0x0066
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP 0x0068
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL 0x006c
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS 0x006e
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP 0x0070
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL 0x0074
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS 0x0076
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2 0x0088
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2 0x008c
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS2 0x008e
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2 0x0090
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2 0x0094
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2 0x0096
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_SLOT_CAP2 0x0098
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_SLOT_CNTL2 0x009c
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_SLOT_STATUS2 0x009e
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSI_CAP_LIST 0x00a0
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL 0x00a2
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_LO 0x00a4
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_HI 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA_64 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK_64 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING_64 0x00b4
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSIX_CAP_LIST 0x00c0
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL 0x00c2
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSIX_TABLE 0x00c4
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSIX_PBA 0x00c8
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC1 0x0108
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC2 0x010c
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS 0x0154
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK 0x0158
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY 0x015c
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS 0x0160
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK 0x0164
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL 0x0168
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG0 0x016c
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG1 0x0170
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG2 0x0174
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG3 0x0178
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG0 0x0188
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG1 0x018c
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG2 0x0190
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG3 0x0194
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST 0x02b0
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP 0x02b4
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CNTL 0x02b6
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST 0x0328
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP 0x032c
++#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL 0x032e
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf15_bifcfgdecp
++// base address: 0x0
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_VENDOR_ID 0x0000
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_DEVICE_ID 0x0002
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_COMMAND 0x0004
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_STATUS 0x0006
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_REVISION_ID 0x0008
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PROG_INTERFACE 0x0009
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_SUB_CLASS 0x000a
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_BASE_CLASS 0x000b
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_CACHE_LINE 0x000c
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_LATENCY 0x000d
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_HEADER 0x000e
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_BIST 0x000f
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_1 0x0010
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_2 0x0014
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_3 0x0018
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_4 0x001c
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_5 0x0020
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_6 0x0024
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_ADAPTER_ID 0x002c
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_ROM_BASE_ADDR 0x0030
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_CAP_PTR 0x0034
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_LINE 0x003c
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_PIN 0x003d
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP_LIST 0x0064
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP 0x0066
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP 0x0068
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL 0x006c
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS 0x006e
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP 0x0070
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL 0x0074
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS 0x0076
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2 0x0088
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2 0x008c
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS2 0x008e
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2 0x0090
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2 0x0094
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2 0x0096
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_SLOT_CAP2 0x0098
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_SLOT_CNTL2 0x009c
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_SLOT_STATUS2 0x009e
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSI_CAP_LIST 0x00a0
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL 0x00a2
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_LO 0x00a4
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_HI 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA 0x00a8
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA_64 0x00ac
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK_64 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING 0x00b0
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING_64 0x00b4
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSIX_CAP_LIST 0x00c0
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL 0x00c2
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSIX_TABLE 0x00c4
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSIX_PBA 0x00c8
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC1 0x0108
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC2 0x010c
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS 0x0154
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK 0x0158
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY 0x015c
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS 0x0160
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK 0x0164
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL 0x0168
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG0 0x016c
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG1 0x0170
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG2 0x0174
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG3 0x0178
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG0 0x0188
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG1 0x018c
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG2 0x0190
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG3 0x0194
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST 0x02b0
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP 0x02b4
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CNTL 0x02b6
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST 0x0328
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP 0x032c
++#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL 0x032e
++
++
++// addressBlock: nbio_nbif0_bif_bx_pf_SYSPFVFDEC
++// base address: 0x0
++#define mmMM_INDEX 0x0000
++#define mmMM_INDEX_BASE_IDX 0
++#define mmMM_DATA 0x0001
++#define mmMM_DATA_BASE_IDX 0
++#define mmMM_INDEX_HI 0x0006
++#define mmMM_INDEX_HI_BASE_IDX 0
++
++
++// addressBlock: nbio_nbif0_bif_bx_SYSDEC
++// base address: 0x0
++#define mmSYSHUB_INDEX_OVLP 0x0008
++#define mmSYSHUB_INDEX_OVLP_BASE_IDX 0
++#define mmSYSHUB_DATA_OVLP 0x0009
++#define mmSYSHUB_DATA_OVLP_BASE_IDX 0
++#define mmPCIE_INDEX 0x000c
++#define mmPCIE_INDEX_BASE_IDX 0
++#define mmPCIE_DATA 0x000d
++#define mmPCIE_DATA_BASE_IDX 0
++#define mmPCIE_INDEX2 0x000e
++#define mmPCIE_INDEX2_BASE_IDX 0
++#define mmPCIE_DATA2 0x000f
++#define mmPCIE_DATA2_BASE_IDX 0
++#define mmSBIOS_SCRATCH_0 0x0034
++#define mmSBIOS_SCRATCH_0_BASE_IDX 1
++#define mmSBIOS_SCRATCH_1 0x0035
++#define mmSBIOS_SCRATCH_1_BASE_IDX 1
++#define mmSBIOS_SCRATCH_2 0x0036
++#define mmSBIOS_SCRATCH_2_BASE_IDX 1
++#define mmSBIOS_SCRATCH_3 0x0037
++#define mmSBIOS_SCRATCH_3_BASE_IDX 1
++#define mmBIOS_SCRATCH_0 0x0038
++#define mmBIOS_SCRATCH_0_BASE_IDX 1
++#define mmBIOS_SCRATCH_1 0x0039
++#define mmBIOS_SCRATCH_1_BASE_IDX 1
++#define mmBIOS_SCRATCH_2 0x003a
++#define mmBIOS_SCRATCH_2_BASE_IDX 1
++#define mmBIOS_SCRATCH_3 0x003b
++#define mmBIOS_SCRATCH_3_BASE_IDX 1
++#define mmBIOS_SCRATCH_4 0x003c
++#define mmBIOS_SCRATCH_4_BASE_IDX 1
++#define mmBIOS_SCRATCH_5 0x003d
++#define mmBIOS_SCRATCH_5_BASE_IDX 1
++#define mmBIOS_SCRATCH_6 0x003e
++#define mmBIOS_SCRATCH_6_BASE_IDX 1
++#define mmBIOS_SCRATCH_7 0x003f
++#define mmBIOS_SCRATCH_7_BASE_IDX 1
++#define mmBIOS_SCRATCH_8 0x0040
++#define mmBIOS_SCRATCH_8_BASE_IDX 1
++#define mmBIOS_SCRATCH_9 0x0041
++#define mmBIOS_SCRATCH_9_BASE_IDX 1
++#define mmBIOS_SCRATCH_10 0x0042
++#define mmBIOS_SCRATCH_10_BASE_IDX 1
++#define mmBIOS_SCRATCH_11 0x0043
++#define mmBIOS_SCRATCH_11_BASE_IDX 1
++#define mmBIOS_SCRATCH_12 0x0044
++#define mmBIOS_SCRATCH_12_BASE_IDX 1
++#define mmBIOS_SCRATCH_13 0x0045
++#define mmBIOS_SCRATCH_13_BASE_IDX 1
++#define mmBIOS_SCRATCH_14 0x0046
++#define mmBIOS_SCRATCH_14_BASE_IDX 1
++#define mmBIOS_SCRATCH_15 0x0047
++#define mmBIOS_SCRATCH_15_BASE_IDX 1
++#define mmBIF_RLC_INTR_CNTL 0x004c
++#define mmBIF_RLC_INTR_CNTL_BASE_IDX 1
++#define mmBIF_VCE_INTR_CNTL 0x004d
++#define mmBIF_VCE_INTR_CNTL_BASE_IDX 1
++#define mmBIF_UVD_INTR_CNTL 0x004e
++#define mmBIF_UVD_INTR_CNTL_BASE_IDX 1
++#define mmGFX_MMIOREG_CAM_ADDR0 0x006c
++#define mmGFX_MMIOREG_CAM_ADDR0_BASE_IDX 1
++#define mmGFX_MMIOREG_CAM_REMAP_ADDR0 0x006d
++#define mmGFX_MMIOREG_CAM_REMAP_ADDR0_BASE_IDX 1
++#define mmGFX_MMIOREG_CAM_ADDR1 0x006e
++#define mmGFX_MMIOREG_CAM_ADDR1_BASE_IDX 1
++#define mmGFX_MMIOREG_CAM_REMAP_ADDR1 0x006f
++#define mmGFX_MMIOREG_CAM_REMAP_ADDR1_BASE_IDX 1
++#define mmGFX_MMIOREG_CAM_ADDR2 0x0070
++#define mmGFX_MMIOREG_CAM_ADDR2_BASE_IDX 1
++#define mmGFX_MMIOREG_CAM_REMAP_ADDR2 0x0071
++#define mmGFX_MMIOREG_CAM_REMAP_ADDR2_BASE_IDX 1
++#define mmGFX_MMIOREG_CAM_ADDR3 0x0072
++#define mmGFX_MMIOREG_CAM_ADDR3_BASE_IDX 1
++#define mmGFX_MMIOREG_CAM_REMAP_ADDR3 0x0073
++#define mmGFX_MMIOREG_CAM_REMAP_ADDR3_BASE_IDX 1
++#define mmGFX_MMIOREG_CAM_ADDR4 0x0074
++#define mmGFX_MMIOREG_CAM_ADDR4_BASE_IDX 1
++#define mmGFX_MMIOREG_CAM_REMAP_ADDR4 0x0075
++#define mmGFX_MMIOREG_CAM_REMAP_ADDR4_BASE_IDX 1
++#define mmGFX_MMIOREG_CAM_ADDR5 0x0076
++#define mmGFX_MMIOREG_CAM_ADDR5_BASE_IDX 1
++#define mmGFX_MMIOREG_CAM_REMAP_ADDR5 0x0077
++#define mmGFX_MMIOREG_CAM_REMAP_ADDR5_BASE_IDX 1
++#define mmGFX_MMIOREG_CAM_ADDR6 0x0078
++#define mmGFX_MMIOREG_CAM_ADDR6_BASE_IDX 1
++#define mmGFX_MMIOREG_CAM_REMAP_ADDR6 0x0079
++#define mmGFX_MMIOREG_CAM_REMAP_ADDR6_BASE_IDX 1
++#define mmGFX_MMIOREG_CAM_ADDR7 0x007a
++#define mmGFX_MMIOREG_CAM_ADDR7_BASE_IDX 1
++#define mmGFX_MMIOREG_CAM_REMAP_ADDR7 0x007b
++#define mmGFX_MMIOREG_CAM_REMAP_ADDR7_BASE_IDX 1
++#define mmGFX_MMIOREG_CAM_CNTL 0x007c
++#define mmGFX_MMIOREG_CAM_CNTL_BASE_IDX 1
++#define mmGFX_MMIOREG_CAM_ZERO_CPL 0x007d
++#define mmGFX_MMIOREG_CAM_ZERO_CPL_BASE_IDX 1
++#define mmGFX_MMIOREG_CAM_ONE_CPL 0x007e
++#define mmGFX_MMIOREG_CAM_ONE_CPL_BASE_IDX 1
++#define mmGFX_MMIOREG_CAM_PROGRAMMABLE_CPL 0x007f
++#define mmGFX_MMIOREG_CAM_PROGRAMMABLE_CPL_BASE_IDX 1
++
++
++// addressBlock: nbio_nbif0_syshub_mmreg_syshubdec
++// base address: 0x0
++#define mmSYSHUB_INDEX 0x0008
++#define mmSYSHUB_INDEX_BASE_IDX 0
++#define mmSYSHUB_DATA 0x0009
++#define mmSYSHUB_DATA_BASE_IDX 0
++
++
++// addressBlock: nbio_nbif0_rcc_strap_BIFDEC1
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_STRAP0 0x0011
++#define mmRCC_DEV0_EPF0_STRAP0_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_rcc_ep_dev0_BIFDEC1
++// base address: 0x0
++#define mmEP_PCIE_SCRATCH 0x0025
++#define mmEP_PCIE_SCRATCH_BASE_IDX 2
++#define mmEP_PCIE_CNTL 0x0027
++#define mmEP_PCIE_CNTL_BASE_IDX 2
++#define mmEP_PCIE_INT_CNTL 0x0028
++#define mmEP_PCIE_INT_CNTL_BASE_IDX 2
++#define mmEP_PCIE_INT_STATUS 0x0029
++#define mmEP_PCIE_INT_STATUS_BASE_IDX 2
++#define mmEP_PCIE_RX_CNTL2 0x002a
++#define mmEP_PCIE_RX_CNTL2_BASE_IDX 2
++#define mmEP_PCIE_BUS_CNTL 0x002b
++#define mmEP_PCIE_BUS_CNTL_BASE_IDX 2
++#define mmEP_PCIE_CFG_CNTL 0x002c
++#define mmEP_PCIE_CFG_CNTL_BASE_IDX 2
++#define mmEP_PCIE_TX_LTR_CNTL 0x002e
++#define mmEP_PCIE_TX_LTR_CNTL_BASE_IDX 2
++#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0 0x002f
++#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0_BASE_IDX 2
++#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1 0x002f
++#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1_BASE_IDX 2
++#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2 0x002f
++#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2_BASE_IDX 2
++#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3 0x002f
++#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3_BASE_IDX 2
++#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4 0x0030
++#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4_BASE_IDX 2
++#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5 0x0030
++#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5_BASE_IDX 2
++#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6 0x0030
++#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6_BASE_IDX 2
++#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7 0x0030
++#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7_BASE_IDX 2
++#define mmEP_PCIE_F0_DPA_CAP 0x0034
++#define mmEP_PCIE_F0_DPA_CAP_BASE_IDX 2
++#define mmEP_PCIE_F0_DPA_LATENCY_INDICATOR 0x0035
++#define mmEP_PCIE_F0_DPA_LATENCY_INDICATOR_BASE_IDX 2
++#define mmEP_PCIE_F0_DPA_CNTL 0x0035
++#define mmEP_PCIE_F0_DPA_CNTL_BASE_IDX 2
++#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0 0x0035
++#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0_BASE_IDX 2
++#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1 0x0036
++#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1_BASE_IDX 2
++#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2 0x0036
++#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2_BASE_IDX 2
++#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3 0x0036
++#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3_BASE_IDX 2
++#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4 0x0036
++#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4_BASE_IDX 2
++#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5 0x0037
++#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5_BASE_IDX 2
++#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6 0x0037
++#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6_BASE_IDX 2
++#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7 0x0037
++#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7_BASE_IDX 2
++#define mmEP_PCIE_PME_CONTROL 0x0037
++#define mmEP_PCIE_PME_CONTROL_BASE_IDX 2
++#define mmEP_PCIEP_RESERVED 0x0038
++#define mmEP_PCIEP_RESERVED_BASE_IDX 2
++#define mmEP_PCIE_TX_CNTL 0x003a
++#define mmEP_PCIE_TX_CNTL_BASE_IDX 2
++#define mmEP_PCIE_TX_REQUESTER_ID 0x003b
++#define mmEP_PCIE_TX_REQUESTER_ID_BASE_IDX 2
++#define mmEP_PCIE_ERR_CNTL 0x003c
++#define mmEP_PCIE_ERR_CNTL_BASE_IDX 2
++#define mmEP_PCIE_RX_CNTL 0x003d
++#define mmEP_PCIE_RX_CNTL_BASE_IDX 2
++#define mmEP_PCIE_LC_SPEED_CNTL 0x003e
++#define mmEP_PCIE_LC_SPEED_CNTL_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_rcc_dwn_dev0_BIFDEC1
++// base address: 0x0
++#define mmDN_PCIE_RESERVED 0x0040
++#define mmDN_PCIE_RESERVED_BASE_IDX 2
++#define mmDN_PCIE_SCRATCH 0x0041
++#define mmDN_PCIE_SCRATCH_BASE_IDX 2
++#define mmDN_PCIE_CNTL 0x0043
++#define mmDN_PCIE_CNTL_BASE_IDX 2
++#define mmDN_PCIE_CONFIG_CNTL 0x0044
++#define mmDN_PCIE_CONFIG_CNTL_BASE_IDX 2
++#define mmDN_PCIE_RX_CNTL2 0x0045
++#define mmDN_PCIE_RX_CNTL2_BASE_IDX 2
++#define mmDN_PCIE_BUS_CNTL 0x0046
++#define mmDN_PCIE_BUS_CNTL_BASE_IDX 2
++#define mmDN_PCIE_CFG_CNTL 0x0047
++#define mmDN_PCIE_CFG_CNTL_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_rcc_dwnp_dev0_BIFDEC1
++// base address: 0x0
++#define mmPCIE_ERR_CNTL 0x004f
++#define mmPCIE_ERR_CNTL_BASE_IDX 2
++#define mmPCIE_RX_CNTL 0x0050
++#define mmPCIE_RX_CNTL_BASE_IDX 2
++#define mmPCIE_LC_SPEED_CNTL 0x0051
++#define mmPCIE_LC_SPEED_CNTL_BASE_IDX 2
++#define mmPCIE_LC_CNTL2 0x0052
++#define mmPCIE_LC_CNTL2_BASE_IDX 2
++#define mmLTR_MSG_INFO_FROM_EP 0x0054
++#define mmLTR_MSG_INFO_FROM_EP_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_BIFPFVFDEC1[13440..14975]
++// base address: 0x3480
++#define mmRCC_ERR_LOG 0x0085
++#define mmRCC_ERR_LOG_BASE_IDX 2
++#define mmRCC_DOORBELL_APER_EN 0x00c0
++#define mmRCC_DOORBELL_APER_EN_BASE_IDX 2
++#define mmRCC_CONFIG_MEMSIZE 0x00c3
++#define mmRCC_CONFIG_MEMSIZE_BASE_IDX 2
++#define mmRCC_CONFIG_RESERVED 0x00c4
++#define mmRCC_CONFIG_RESERVED_BASE_IDX 2
++#define mmRCC_IOV_FUNC_IDENTIFIER 0x00c5
++#define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_BIFDEC1
++// base address: 0x0
++#define mmRCC_ERR_INT_CNTL 0x0086
++#define mmRCC_ERR_INT_CNTL_BASE_IDX 2
++#define mmRCC_BACO_CNTL_MISC 0x0087
++#define mmRCC_BACO_CNTL_MISC_BASE_IDX 2
++#define mmRCC_RESET_EN 0x0088
++#define mmRCC_RESET_EN_BASE_IDX 2
++#define mmRCC_VDM_SUPPORT 0x0089
++#define mmRCC_VDM_SUPPORT_BASE_IDX 2
++#define mmRCC_MARGIN_PARAM_CNTL0 0x008a
++#define mmRCC_MARGIN_PARAM_CNTL0_BASE_IDX 2
++#define mmRCC_MARGIN_PARAM_CNTL1 0x008b
++#define mmRCC_MARGIN_PARAM_CNTL1_BASE_IDX 2
++#define mmRCC_PEER_REG_RANGE0 0x00be
++#define mmRCC_PEER_REG_RANGE0_BASE_IDX 2
++#define mmRCC_PEER_REG_RANGE1 0x00bf
++#define mmRCC_PEER_REG_RANGE1_BASE_IDX 2
++#define mmRCC_BUS_CNTL 0x00c1
++#define mmRCC_BUS_CNTL_BASE_IDX 2
++#define mmRCC_CONFIG_CNTL 0x00c2
++#define mmRCC_CONFIG_CNTL_BASE_IDX 2
++#define mmRCC_CONFIG_F0_BASE 0x00c6
++#define mmRCC_CONFIG_F0_BASE_BASE_IDX 2
++#define mmRCC_CONFIG_APER_SIZE 0x00c7
++#define mmRCC_CONFIG_APER_SIZE_BASE_IDX 2
++#define mmRCC_CONFIG_REG_APER_SIZE 0x00c8
++#define mmRCC_CONFIG_REG_APER_SIZE_BASE_IDX 2
++#define mmRCC_XDMA_LO 0x00c9
++#define mmRCC_XDMA_LO_BASE_IDX 2
++#define mmRCC_XDMA_HI 0x00ca
++#define mmRCC_XDMA_HI_BASE_IDX 2
++#define mmRCC_FEATURES_CONTROL_MISC 0x00cb
++#define mmRCC_FEATURES_CONTROL_MISC_BASE_IDX 2
++#define mmRCC_BUSNUM_CNTL1 0x00cc
++#define mmRCC_BUSNUM_CNTL1_BASE_IDX 2
++#define mmRCC_BUSNUM_LIST0 0x00cd
++#define mmRCC_BUSNUM_LIST0_BASE_IDX 2
++#define mmRCC_BUSNUM_LIST1 0x00ce
++#define mmRCC_BUSNUM_LIST1_BASE_IDX 2
++#define mmRCC_BUSNUM_CNTL2 0x00cf
++#define mmRCC_BUSNUM_CNTL2_BASE_IDX 2
++#define mmRCC_CAPTURE_HOST_BUSNUM 0x00d0
++#define mmRCC_CAPTURE_HOST_BUSNUM_BASE_IDX 2
++#define mmRCC_HOST_BUSNUM 0x00d1
++#define mmRCC_HOST_BUSNUM_BASE_IDX 2
++#define mmRCC_PEER0_FB_OFFSET_HI 0x00d2
++#define mmRCC_PEER0_FB_OFFSET_HI_BASE_IDX 2
++#define mmRCC_PEER0_FB_OFFSET_LO 0x00d3
++#define mmRCC_PEER0_FB_OFFSET_LO_BASE_IDX 2
++#define mmRCC_PEER1_FB_OFFSET_HI 0x00d4
++#define mmRCC_PEER1_FB_OFFSET_HI_BASE_IDX 2
++#define mmRCC_PEER1_FB_OFFSET_LO 0x00d5
++#define mmRCC_PEER1_FB_OFFSET_LO_BASE_IDX 2
++#define mmRCC_PEER2_FB_OFFSET_HI 0x00d6
++#define mmRCC_PEER2_FB_OFFSET_HI_BASE_IDX 2
++#define mmRCC_PEER2_FB_OFFSET_LO 0x00d7
++#define mmRCC_PEER2_FB_OFFSET_LO_BASE_IDX 2
++#define mmRCC_PEER3_FB_OFFSET_HI 0x00d8
++#define mmRCC_PEER3_FB_OFFSET_HI_BASE_IDX 2
++#define mmRCC_PEER3_FB_OFFSET_LO 0x00d9
++#define mmRCC_PEER3_FB_OFFSET_LO_BASE_IDX 2
++#define mmRCC_CMN_LINK_CNTL 0x00de
++#define mmRCC_CMN_LINK_CNTL_BASE_IDX 2
++#define mmRCC_EP_REQUESTERID_RESTORE 0x00df
++#define mmRCC_EP_REQUESTERID_RESTORE_BASE_IDX 2
++#define mmRCC_LTR_LSWITCH_CNTL 0x00e0
++#define mmRCC_LTR_LSWITCH_CNTL_BASE_IDX 2
++#define mmRCC_MH_ARB_CNTL 0x00e1
++#define mmRCC_MH_ARB_CNTL_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_bif_bx_BIFDEC1
++// base address: 0x0
++#define mmBIF_MM_INDACCESS_CNTL 0x00e6
++#define mmBIF_MM_INDACCESS_CNTL_BASE_IDX 2
++#define mmBUS_CNTL 0x00e7
++#define mmBUS_CNTL_BASE_IDX 2
++#define mmBIF_SCRATCH0 0x00e8
++#define mmBIF_SCRATCH0_BASE_IDX 2
++#define mmBIF_SCRATCH1 0x00e9
++#define mmBIF_SCRATCH1_BASE_IDX 2
++#define mmBX_RESET_EN 0x00ed
++#define mmBX_RESET_EN_BASE_IDX 2
++#define mmMM_CFGREGS_CNTL 0x00ee
++#define mmMM_CFGREGS_CNTL_BASE_IDX 2
++#define mmBX_RESET_CNTL 0x00f0
++#define mmBX_RESET_CNTL_BASE_IDX 2
++#define mmINTERRUPT_CNTL 0x00f1
++#define mmINTERRUPT_CNTL_BASE_IDX 2
++#define mmINTERRUPT_CNTL2 0x00f2
++#define mmINTERRUPT_CNTL2_BASE_IDX 2
++#define mmCLKREQB_PAD_CNTL 0x00f8
++#define mmCLKREQB_PAD_CNTL_BASE_IDX 2
++#define mmBIF_FEATURES_CONTROL_MISC 0x00fb
++#define mmBIF_FEATURES_CONTROL_MISC_BASE_IDX 2
++#define mmBIF_DOORBELL_CNTL 0x00fc
++#define mmBIF_DOORBELL_CNTL_BASE_IDX 2
++#define mmBIF_DOORBELL_INT_CNTL 0x00fd
++#define mmBIF_DOORBELL_INT_CNTL_BASE_IDX 2
++#define mmBIF_FB_EN 0x00ff
++#define mmBIF_FB_EN_BASE_IDX 2
++#define mmBIF_BUSY_DELAY_CNTR 0x0100
++#define mmBIF_BUSY_DELAY_CNTR_BASE_IDX 2
++#define mmBIF_MST_TRANS_PENDING_VF 0x0109
++#define mmBIF_MST_TRANS_PENDING_VF_BASE_IDX 2
++#define mmBIF_SLV_TRANS_PENDING_VF 0x010a
++#define mmBIF_SLV_TRANS_PENDING_VF_BASE_IDX 2
++#define mmBACO_CNTL 0x010b
++#define mmBACO_CNTL_BASE_IDX 2
++#define mmBIF_BACO_EXIT_TIME0 0x010c
++#define mmBIF_BACO_EXIT_TIME0_BASE_IDX 2
++#define mmBIF_BACO_EXIT_TIMER1 0x010d
++#define mmBIF_BACO_EXIT_TIMER1_BASE_IDX 2
++#define mmBIF_BACO_EXIT_TIMER2 0x010e
++#define mmBIF_BACO_EXIT_TIMER2_BASE_IDX 2
++#define mmBIF_BACO_EXIT_TIMER3 0x010f
++#define mmBIF_BACO_EXIT_TIMER3_BASE_IDX 2
++#define mmBIF_BACO_EXIT_TIMER4 0x0110
++#define mmBIF_BACO_EXIT_TIMER4_BASE_IDX 2
++#define mmMEM_TYPE_CNTL 0x0111
++#define mmMEM_TYPE_CNTL_BASE_IDX 2
++#define mmNBIF_GFX_ADDR_LUT_CNTL 0x0113
++#define mmNBIF_GFX_ADDR_LUT_CNTL_BASE_IDX 2
++#define mmNBIF_GFX_ADDR_LUT_0 0x0114
++#define mmNBIF_GFX_ADDR_LUT_0_BASE_IDX 2
++#define mmNBIF_GFX_ADDR_LUT_1 0x0115
++#define mmNBIF_GFX_ADDR_LUT_1_BASE_IDX 2
++#define mmNBIF_GFX_ADDR_LUT_2 0x0116
++#define mmNBIF_GFX_ADDR_LUT_2_BASE_IDX 2
++#define mmNBIF_GFX_ADDR_LUT_3 0x0117
++#define mmNBIF_GFX_ADDR_LUT_3_BASE_IDX 2
++#define mmNBIF_GFX_ADDR_LUT_4 0x0118
++#define mmNBIF_GFX_ADDR_LUT_4_BASE_IDX 2
++#define mmNBIF_GFX_ADDR_LUT_5 0x0119
++#define mmNBIF_GFX_ADDR_LUT_5_BASE_IDX 2
++#define mmNBIF_GFX_ADDR_LUT_6 0x011a
++#define mmNBIF_GFX_ADDR_LUT_6_BASE_IDX 2
++#define mmNBIF_GFX_ADDR_LUT_7 0x011b
++#define mmNBIF_GFX_ADDR_LUT_7_BASE_IDX 2
++#define mmNBIF_GFX_ADDR_LUT_8 0x011c
++#define mmNBIF_GFX_ADDR_LUT_8_BASE_IDX 2
++#define mmNBIF_GFX_ADDR_LUT_9 0x011d
++#define mmNBIF_GFX_ADDR_LUT_9_BASE_IDX 2
++#define mmNBIF_GFX_ADDR_LUT_10 0x011e
++#define mmNBIF_GFX_ADDR_LUT_10_BASE_IDX 2
++#define mmNBIF_GFX_ADDR_LUT_11 0x011f
++#define mmNBIF_GFX_ADDR_LUT_11_BASE_IDX 2
++#define mmNBIF_GFX_ADDR_LUT_12 0x0120
++#define mmNBIF_GFX_ADDR_LUT_12_BASE_IDX 2
++#define mmNBIF_GFX_ADDR_LUT_13 0x0121
++#define mmNBIF_GFX_ADDR_LUT_13_BASE_IDX 2
++#define mmNBIF_GFX_ADDR_LUT_14 0x0122
++#define mmNBIF_GFX_ADDR_LUT_14_BASE_IDX 2
++#define mmNBIF_GFX_ADDR_LUT_15 0x0123
++#define mmNBIF_GFX_ADDR_LUT_15_BASE_IDX 2
++#define mmREMAP_HDP_MEM_FLUSH_CNTL 0x012d
++#define mmREMAP_HDP_MEM_FLUSH_CNTL_BASE_IDX 2
++#define mmREMAP_HDP_REG_FLUSH_CNTL 0x012e
++#define mmREMAP_HDP_REG_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_RB_CNTL 0x012f
++#define mmBIF_RB_CNTL_BASE_IDX 2
++#define mmBIF_RB_BASE 0x0130
++#define mmBIF_RB_BASE_BASE_IDX 2
++#define mmBIF_RB_RPTR 0x0131
++#define mmBIF_RB_RPTR_BASE_IDX 2
++#define mmBIF_RB_WPTR 0x0132
++#define mmBIF_RB_WPTR_BASE_IDX 2
++#define mmBIF_RB_WPTR_ADDR_HI 0x0133
++#define mmBIF_RB_WPTR_ADDR_HI_BASE_IDX 2
++#define mmBIF_RB_WPTR_ADDR_LO 0x0134
++#define mmBIF_RB_WPTR_ADDR_LO_BASE_IDX 2
++#define mmMAILBOX_INDEX 0x0135
++#define mmMAILBOX_INDEX_BASE_IDX 2
++#define mmBIF_MP1_INTR_CTRL 0x0142
++#define mmBIF_MP1_INTR_CTRL_BASE_IDX 2
++#define mmBIF_UVD_GPUIOV_CFG_SIZE 0x0143
++#define mmBIF_UVD_GPUIOV_CFG_SIZE_BASE_IDX 2
++#define mmBIF_VCE_GPUIOV_CFG_SIZE 0x0144
++#define mmBIF_VCE_GPUIOV_CFG_SIZE_BASE_IDX 2
++#define mmBIF_GFX_SDMA_GPUIOV_CFG_SIZE 0x0145
++#define mmBIF_GFX_SDMA_GPUIOV_CFG_SIZE_BASE_IDX 2
++#define mmBIF_PERSTB_PAD_CNTL 0x0148
++#define mmBIF_PERSTB_PAD_CNTL_BASE_IDX 2
++#define mmBIF_PX_EN_PAD_CNTL 0x0149
++#define mmBIF_PX_EN_PAD_CNTL_BASE_IDX 2
++#define mmBIF_REFPADKIN_PAD_CNTL 0x014a
++#define mmBIF_REFPADKIN_PAD_CNTL_BASE_IDX 2
++#define mmBIF_CLKREQB_PAD_CNTL 0x014b
++#define mmBIF_CLKREQB_PAD_CNTL_BASE_IDX 2
++#define mmBIF_PWRBRK_PAD_CNTL 0x014c
++#define mmBIF_PWRBRK_PAD_CNTL_BASE_IDX 2
++#define mmBIF_WAKEB_PAD_CNTL 0x014d
++#define mmBIF_WAKEB_PAD_CNTL_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_bif_bx_pf_BIFPFVFDEC1
++// base address: 0x0
++#define mmBIF_BME_STATUS 0x00eb
++#define mmBIF_BME_STATUS_BASE_IDX 2
++#define mmBIF_ATOMIC_ERR_LOG 0x00ec
++#define mmBIF_ATOMIC_ERR_LOG_BASE_IDX 2
++#define mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
++#define mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
++#define mmDOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
++#define mmDOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
++#define mmDOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
++#define mmDOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
++#define mmHDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
++#define mmHDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmHDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
++#define mmHDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmGPU_HDP_FLUSH_REQ 0x0106
++#define mmGPU_HDP_FLUSH_REQ_BASE_IDX 2
++#define mmGPU_HDP_FLUSH_DONE 0x0107
++#define mmGPU_HDP_FLUSH_DONE_BASE_IDX 2
++#define mmBIF_TRANS_PENDING 0x0108
++#define mmBIF_TRANS_PENDING_BASE_IDX 2
++#define mmNBIF_GFX_ADDR_LUT_BYPASS 0x0112
++#define mmNBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2
++#define mmMAILBOX_MSGBUF_TRN_DW0 0x0136
++#define mmMAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
++#define mmMAILBOX_MSGBUF_TRN_DW1 0x0137
++#define mmMAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
++#define mmMAILBOX_MSGBUF_TRN_DW2 0x0138
++#define mmMAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
++#define mmMAILBOX_MSGBUF_TRN_DW3 0x0139
++#define mmMAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
++#define mmMAILBOX_MSGBUF_RCV_DW0 0x013a
++#define mmMAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
++#define mmMAILBOX_MSGBUF_RCV_DW1 0x013b
++#define mmMAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
++#define mmMAILBOX_MSGBUF_RCV_DW2 0x013c
++#define mmMAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
++#define mmMAILBOX_MSGBUF_RCV_DW3 0x013d
++#define mmMAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
++#define mmMAILBOX_CONTROL 0x013e
++#define mmMAILBOX_CONTROL_BASE_IDX 2
++#define mmMAILBOX_INT_CNTL 0x013f
++#define mmMAILBOX_INT_CNTL_BASE_IDX 2
++#define mmBIF_VMHV_MAILBOX 0x0140
++#define mmBIF_VMHV_MAILBOX_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_gdc_GDCDEC
++// base address: 0x0
++#define mmNGDC_SDP_PORT_CTRL 0x01c2
++#define mmNGDC_SDP_PORT_CTRL_BASE_IDX 2
++#define mmSHUB_REGS_IF_CTL 0x01c3
++#define mmSHUB_REGS_IF_CTL_BASE_IDX 2
++#define mmNGDC_MGCG_CTRL 0x01ca
++#define mmNGDC_MGCG_CTRL_BASE_IDX 2
++#define mmNGDC_RESERVED_0 0x01cb
++#define mmNGDC_RESERVED_0_BASE_IDX 2
++#define mmNGDC_RESERVED_1 0x01cc
++#define mmNGDC_RESERVED_1_BASE_IDX 2
++#define mmNGDC_SDP_PORT_CTRL_SOCCLK 0x01cd
++#define mmNGDC_SDP_PORT_CTRL_SOCCLK_BASE_IDX 2
++#define mmBIF_SDMA0_DOORBELL_RANGE 0x01d0
++#define mmBIF_SDMA0_DOORBELL_RANGE_BASE_IDX 2
++#define mmBIF_SDMA1_DOORBELL_RANGE 0x01d1
++#define mmBIF_SDMA1_DOORBELL_RANGE_BASE_IDX 2
++#define mmBIF_IH_DOORBELL_RANGE 0x01d2
++#define mmBIF_IH_DOORBELL_RANGE_BASE_IDX 2
++#define mmBIF_MMSCH0_DOORBELL_RANGE 0x01d3
++#define mmBIF_MMSCH0_DOORBELL_RANGE_BASE_IDX 2
++#define mmBIF_ACV_DOORBELL_RANGE 0x01d4
++#define mmBIF_ACV_DOORBELL_RANGE_BASE_IDX 2
++#define mmBIF_DOORBELL_FENCE_CNTL 0x01de
++#define mmBIF_DOORBELL_FENCE_CNTL_BASE_IDX 2
++#define mmS2A_MISC_CNTL 0x01df
++#define mmS2A_MISC_CNTL_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_BIFDEC2
++// base address: 0x0
++#define mmGFXMSIX_VECT0_ADDR_LO 0x0400
++#define mmGFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
++#define mmGFXMSIX_VECT0_ADDR_HI 0x0401
++#define mmGFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
++#define mmGFXMSIX_VECT0_MSG_DATA 0x0402
++#define mmGFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
++#define mmGFXMSIX_VECT0_CONTROL 0x0403
++#define mmGFXMSIX_VECT0_CONTROL_BASE_IDX 3
++#define mmGFXMSIX_VECT1_ADDR_LO 0x0404
++#define mmGFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
++#define mmGFXMSIX_VECT1_ADDR_HI 0x0405
++#define mmGFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
++#define mmGFXMSIX_VECT1_MSG_DATA 0x0406
++#define mmGFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
++#define mmGFXMSIX_VECT1_CONTROL 0x0407
++#define mmGFXMSIX_VECT1_CONTROL_BASE_IDX 3
++#define mmGFXMSIX_VECT2_ADDR_LO 0x0408
++#define mmGFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
++#define mmGFXMSIX_VECT2_ADDR_HI 0x0409
++#define mmGFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
++#define mmGFXMSIX_VECT2_MSG_DATA 0x040a
++#define mmGFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
++#define mmGFXMSIX_VECT2_CONTROL 0x040b
++#define mmGFXMSIX_VECT2_CONTROL_BASE_IDX 3
++#define mmGFXMSIX_PBA 0x0800
++#define mmGFXMSIX_PBA_BASE_IDX 3
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf0_SYSPFVFDEC
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF0_MM_INDEX 0x0000
++#define mmBIF_BX_DEV0_EPF0_VF0_MM_INDEX_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF0_MM_DATA 0x0001
++#define mmBIF_BX_DEV0_EPF0_VF0_MM_DATA_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF0_MM_INDEX_HI 0x0006
++#define mmBIF_BX_DEV0_EPF0_VF0_MM_INDEX_HI_BASE_IDX 0
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf0_BIFPFVFDEC1
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF0_RCC_ERR_LOG 0x0085
++#define mmRCC_DEV0_EPF0_VF0_RCC_ERR_LOG_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF0_RCC_DOORBELL_APER_EN 0x00c0
++#define mmRCC_DEV0_EPF0_VF0_RCC_DOORBELL_APER_EN_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF0_RCC_CONFIG_MEMSIZE 0x00c3
++#define mmRCC_DEV0_EPF0_VF0_RCC_CONFIG_MEMSIZE_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF0_RCC_CONFIG_RESERVED 0x00c4
++#define mmRCC_DEV0_EPF0_VF0_RCC_CONFIG_RESERVED_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER 0x00c5
++#define mmRCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf0_BIFPFVFDEC1
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS 0x00eb
++#define mmBIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG 0x00ec
++#define mmBIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
++#define mmBIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
++#define mmBIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
++#define mmBIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF0_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
++#define mmBIF_BX_DEV0_EPF0_VF0_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
++#define mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ 0x0106
++#define mmBIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE 0x0107
++#define mmBIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING 0x0108
++#define mmBIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF0_NBIF_GFX_ADDR_LUT_BYPASS 0x0112
++#define mmBIF_BX_DEV0_EPF0_VF0_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW0 0x0136
++#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW1 0x0137
++#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW2 0x0138
++#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW3 0x0139
++#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW0 0x013a
++#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW1 0x013b
++#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW2 0x013c
++#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW3 0x013d
++#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL 0x013e
++#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL 0x013f
++#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX 0x0140
++#define mmBIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf0_BIFDEC2
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_LO 0x0400
++#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_HI 0x0401
++#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_MSG_DATA 0x0402
++#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_CONTROL 0x0403
++#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_LO 0x0404
++#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_HI 0x0405
++#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_MSG_DATA 0x0406
++#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_CONTROL 0x0407
++#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_LO 0x0408
++#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_HI 0x0409
++#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_MSG_DATA 0x040a
++#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_CONTROL 0x040b
++#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_PBA 0x0800
++#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_PBA_BASE_IDX 3
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf1_SYSPFVFDEC
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF1_MM_INDEX 0x0000
++#define mmBIF_BX_DEV0_EPF0_VF1_MM_INDEX_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF1_MM_DATA 0x0001
++#define mmBIF_BX_DEV0_EPF0_VF1_MM_DATA_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF1_MM_INDEX_HI 0x0006
++#define mmBIF_BX_DEV0_EPF0_VF1_MM_INDEX_HI_BASE_IDX 0
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf1_BIFPFVFDEC1
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF1_RCC_ERR_LOG 0x0085
++#define mmRCC_DEV0_EPF0_VF1_RCC_ERR_LOG_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF1_RCC_DOORBELL_APER_EN 0x00c0
++#define mmRCC_DEV0_EPF0_VF1_RCC_DOORBELL_APER_EN_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF1_RCC_CONFIG_MEMSIZE 0x00c3
++#define mmRCC_DEV0_EPF0_VF1_RCC_CONFIG_MEMSIZE_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF1_RCC_CONFIG_RESERVED 0x00c4
++#define mmRCC_DEV0_EPF0_VF1_RCC_CONFIG_RESERVED_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER 0x00c5
++#define mmRCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf1_BIFPFVFDEC1
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS 0x00eb
++#define mmBIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG 0x00ec
++#define mmBIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
++#define mmBIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
++#define mmBIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
++#define mmBIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF1_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
++#define mmBIF_BX_DEV0_EPF0_VF1_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
++#define mmBIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ 0x0106
++#define mmBIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE 0x0107
++#define mmBIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING 0x0108
++#define mmBIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF1_NBIF_GFX_ADDR_LUT_BYPASS 0x0112
++#define mmBIF_BX_DEV0_EPF0_VF1_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW0 0x0136
++#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW1 0x0137
++#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW2 0x0138
++#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW3 0x0139
++#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW0 0x013a
++#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW1 0x013b
++#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW2 0x013c
++#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW3 0x013d
++#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL 0x013e
++#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL 0x013f
++#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX 0x0140
++#define mmBIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf1_BIFDEC2
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_LO 0x0400
++#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_HI 0x0401
++#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_MSG_DATA 0x0402
++#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_CONTROL 0x0403
++#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_LO 0x0404
++#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_HI 0x0405
++#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_MSG_DATA 0x0406
++#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_CONTROL 0x0407
++#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_LO 0x0408
++#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_HI 0x0409
++#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_MSG_DATA 0x040a
++#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_CONTROL 0x040b
++#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_PBA 0x0800
++#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_PBA_BASE_IDX 3
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf2_SYSPFVFDEC
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF2_MM_INDEX 0x0000
++#define mmBIF_BX_DEV0_EPF0_VF2_MM_INDEX_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF2_MM_DATA 0x0001
++#define mmBIF_BX_DEV0_EPF0_VF2_MM_DATA_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF2_MM_INDEX_HI 0x0006
++#define mmBIF_BX_DEV0_EPF0_VF2_MM_INDEX_HI_BASE_IDX 0
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf2_BIFPFVFDEC1
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF2_RCC_ERR_LOG 0x0085
++#define mmRCC_DEV0_EPF0_VF2_RCC_ERR_LOG_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF2_RCC_DOORBELL_APER_EN 0x00c0
++#define mmRCC_DEV0_EPF0_VF2_RCC_DOORBELL_APER_EN_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF2_RCC_CONFIG_MEMSIZE 0x00c3
++#define mmRCC_DEV0_EPF0_VF2_RCC_CONFIG_MEMSIZE_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF2_RCC_CONFIG_RESERVED 0x00c4
++#define mmRCC_DEV0_EPF0_VF2_RCC_CONFIG_RESERVED_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER 0x00c5
++#define mmRCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf2_BIFPFVFDEC1
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS 0x00eb
++#define mmBIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG 0x00ec
++#define mmBIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
++#define mmBIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
++#define mmBIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
++#define mmBIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF2_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
++#define mmBIF_BX_DEV0_EPF0_VF2_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
++#define mmBIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ 0x0106
++#define mmBIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE 0x0107
++#define mmBIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING 0x0108
++#define mmBIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF2_NBIF_GFX_ADDR_LUT_BYPASS 0x0112
++#define mmBIF_BX_DEV0_EPF0_VF2_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW0 0x0136
++#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW1 0x0137
++#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW2 0x0138
++#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW3 0x0139
++#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW0 0x013a
++#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW1 0x013b
++#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW2 0x013c
++#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW3 0x013d
++#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL 0x013e
++#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL 0x013f
++#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX 0x0140
++#define mmBIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf2_BIFDEC2
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_LO 0x0400
++#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_HI 0x0401
++#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_MSG_DATA 0x0402
++#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_CONTROL 0x0403
++#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_LO 0x0404
++#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_HI 0x0405
++#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_MSG_DATA 0x0406
++#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_CONTROL 0x0407
++#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_LO 0x0408
++#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_HI 0x0409
++#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_MSG_DATA 0x040a
++#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_CONTROL 0x040b
++#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_PBA 0x0800
++#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_PBA_BASE_IDX 3
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf3_SYSPFVFDEC
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF3_MM_INDEX 0x0000
++#define mmBIF_BX_DEV0_EPF0_VF3_MM_INDEX_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF3_MM_DATA 0x0001
++#define mmBIF_BX_DEV0_EPF0_VF3_MM_DATA_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF3_MM_INDEX_HI 0x0006
++#define mmBIF_BX_DEV0_EPF0_VF3_MM_INDEX_HI_BASE_IDX 0
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf3_BIFPFVFDEC1
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF3_RCC_ERR_LOG 0x0085
++#define mmRCC_DEV0_EPF0_VF3_RCC_ERR_LOG_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF3_RCC_DOORBELL_APER_EN 0x00c0
++#define mmRCC_DEV0_EPF0_VF3_RCC_DOORBELL_APER_EN_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF3_RCC_CONFIG_MEMSIZE 0x00c3
++#define mmRCC_DEV0_EPF0_VF3_RCC_CONFIG_MEMSIZE_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF3_RCC_CONFIG_RESERVED 0x00c4
++#define mmRCC_DEV0_EPF0_VF3_RCC_CONFIG_RESERVED_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER 0x00c5
++#define mmRCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf3_BIFPFVFDEC1
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS 0x00eb
++#define mmBIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG 0x00ec
++#define mmBIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
++#define mmBIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
++#define mmBIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
++#define mmBIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF3_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
++#define mmBIF_BX_DEV0_EPF0_VF3_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
++#define mmBIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ 0x0106
++#define mmBIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE 0x0107
++#define mmBIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING 0x0108
++#define mmBIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF3_NBIF_GFX_ADDR_LUT_BYPASS 0x0112
++#define mmBIF_BX_DEV0_EPF0_VF3_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW0 0x0136
++#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW1 0x0137
++#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW2 0x0138
++#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW3 0x0139
++#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW0 0x013a
++#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW1 0x013b
++#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW2 0x013c
++#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW3 0x013d
++#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL 0x013e
++#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL 0x013f
++#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX 0x0140
++#define mmBIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf3_BIFDEC2
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_LO 0x0400
++#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_HI 0x0401
++#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_MSG_DATA 0x0402
++#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_CONTROL 0x0403
++#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_LO 0x0404
++#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_HI 0x0405
++#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_MSG_DATA 0x0406
++#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_CONTROL 0x0407
++#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_LO 0x0408
++#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_HI 0x0409
++#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_MSG_DATA 0x040a
++#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_CONTROL 0x040b
++#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_PBA 0x0800
++#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_PBA_BASE_IDX 3
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf4_SYSPFVFDEC
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF4_MM_INDEX 0x0000
++#define mmBIF_BX_DEV0_EPF0_VF4_MM_INDEX_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF4_MM_DATA 0x0001
++#define mmBIF_BX_DEV0_EPF0_VF4_MM_DATA_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF4_MM_INDEX_HI 0x0006
++#define mmBIF_BX_DEV0_EPF0_VF4_MM_INDEX_HI_BASE_IDX 0
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf4_BIFPFVFDEC1
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF4_RCC_ERR_LOG 0x0085
++#define mmRCC_DEV0_EPF0_VF4_RCC_ERR_LOG_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF4_RCC_DOORBELL_APER_EN 0x00c0
++#define mmRCC_DEV0_EPF0_VF4_RCC_DOORBELL_APER_EN_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF4_RCC_CONFIG_MEMSIZE 0x00c3
++#define mmRCC_DEV0_EPF0_VF4_RCC_CONFIG_MEMSIZE_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF4_RCC_CONFIG_RESERVED 0x00c4
++#define mmRCC_DEV0_EPF0_VF4_RCC_CONFIG_RESERVED_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER 0x00c5
++#define mmRCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf4_BIFPFVFDEC1
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS 0x00eb
++#define mmBIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG 0x00ec
++#define mmBIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
++#define mmBIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
++#define mmBIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
++#define mmBIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF4_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
++#define mmBIF_BX_DEV0_EPF0_VF4_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
++#define mmBIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ 0x0106
++#define mmBIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE 0x0107
++#define mmBIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING 0x0108
++#define mmBIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF4_NBIF_GFX_ADDR_LUT_BYPASS 0x0112
++#define mmBIF_BX_DEV0_EPF0_VF4_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW0 0x0136
++#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW1 0x0137
++#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW2 0x0138
++#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW3 0x0139
++#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW0 0x013a
++#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW1 0x013b
++#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW2 0x013c
++#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW3 0x013d
++#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL 0x013e
++#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL 0x013f
++#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX 0x0140
++#define mmBIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf4_BIFDEC2
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_LO 0x0400
++#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_HI 0x0401
++#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_MSG_DATA 0x0402
++#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_CONTROL 0x0403
++#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_LO 0x0404
++#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_HI 0x0405
++#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_MSG_DATA 0x0406
++#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_CONTROL 0x0407
++#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_LO 0x0408
++#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_HI 0x0409
++#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_MSG_DATA 0x040a
++#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_CONTROL 0x040b
++#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_PBA 0x0800
++#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_PBA_BASE_IDX 3
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf5_SYSPFVFDEC
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF5_MM_INDEX 0x0000
++#define mmBIF_BX_DEV0_EPF0_VF5_MM_INDEX_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF5_MM_DATA 0x0001
++#define mmBIF_BX_DEV0_EPF0_VF5_MM_DATA_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF5_MM_INDEX_HI 0x0006
++#define mmBIF_BX_DEV0_EPF0_VF5_MM_INDEX_HI_BASE_IDX 0
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf5_BIFPFVFDEC1
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF5_RCC_ERR_LOG 0x0085
++#define mmRCC_DEV0_EPF0_VF5_RCC_ERR_LOG_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF5_RCC_DOORBELL_APER_EN 0x00c0
++#define mmRCC_DEV0_EPF0_VF5_RCC_DOORBELL_APER_EN_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF5_RCC_CONFIG_MEMSIZE 0x00c3
++#define mmRCC_DEV0_EPF0_VF5_RCC_CONFIG_MEMSIZE_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF5_RCC_CONFIG_RESERVED 0x00c4
++#define mmRCC_DEV0_EPF0_VF5_RCC_CONFIG_RESERVED_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER 0x00c5
++#define mmRCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf5_BIFPFVFDEC1
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS 0x00eb
++#define mmBIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG 0x00ec
++#define mmBIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
++#define mmBIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
++#define mmBIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
++#define mmBIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF5_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
++#define mmBIF_BX_DEV0_EPF0_VF5_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
++#define mmBIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ 0x0106
++#define mmBIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE 0x0107
++#define mmBIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING 0x0108
++#define mmBIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF5_NBIF_GFX_ADDR_LUT_BYPASS 0x0112
++#define mmBIF_BX_DEV0_EPF0_VF5_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW0 0x0136
++#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW1 0x0137
++#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW2 0x0138
++#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW3 0x0139
++#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW0 0x013a
++#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW1 0x013b
++#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW2 0x013c
++#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW3 0x013d
++#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL 0x013e
++#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL 0x013f
++#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX 0x0140
++#define mmBIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf5_BIFDEC2
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_LO 0x0400
++#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_HI 0x0401
++#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_MSG_DATA 0x0402
++#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_CONTROL 0x0403
++#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_LO 0x0404
++#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_HI 0x0405
++#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_MSG_DATA 0x0406
++#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_CONTROL 0x0407
++#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_LO 0x0408
++#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_HI 0x0409
++#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_MSG_DATA 0x040a
++#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_CONTROL 0x040b
++#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_PBA 0x0800
++#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_PBA_BASE_IDX 3
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf6_SYSPFVFDEC
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF6_MM_INDEX 0x0000
++#define mmBIF_BX_DEV0_EPF0_VF6_MM_INDEX_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF6_MM_DATA 0x0001
++#define mmBIF_BX_DEV0_EPF0_VF6_MM_DATA_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF6_MM_INDEX_HI 0x0006
++#define mmBIF_BX_DEV0_EPF0_VF6_MM_INDEX_HI_BASE_IDX 0
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf6_BIFPFVFDEC1
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF6_RCC_ERR_LOG 0x0085
++#define mmRCC_DEV0_EPF0_VF6_RCC_ERR_LOG_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF6_RCC_DOORBELL_APER_EN 0x00c0
++#define mmRCC_DEV0_EPF0_VF6_RCC_DOORBELL_APER_EN_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF6_RCC_CONFIG_MEMSIZE 0x00c3
++#define mmRCC_DEV0_EPF0_VF6_RCC_CONFIG_MEMSIZE_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF6_RCC_CONFIG_RESERVED 0x00c4
++#define mmRCC_DEV0_EPF0_VF6_RCC_CONFIG_RESERVED_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER 0x00c5
++#define mmRCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf6_BIFPFVFDEC1
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS 0x00eb
++#define mmBIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG 0x00ec
++#define mmBIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
++#define mmBIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
++#define mmBIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
++#define mmBIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF6_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
++#define mmBIF_BX_DEV0_EPF0_VF6_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
++#define mmBIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ 0x0106
++#define mmBIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE 0x0107
++#define mmBIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING 0x0108
++#define mmBIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF6_NBIF_GFX_ADDR_LUT_BYPASS 0x0112
++#define mmBIF_BX_DEV0_EPF0_VF6_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW0 0x0136
++#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW1 0x0137
++#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW2 0x0138
++#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW3 0x0139
++#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW0 0x013a
++#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW1 0x013b
++#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW2 0x013c
++#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW3 0x013d
++#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL 0x013e
++#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL 0x013f
++#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX 0x0140
++#define mmBIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf6_BIFDEC2
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_LO 0x0400
++#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_HI 0x0401
++#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_MSG_DATA 0x0402
++#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_CONTROL 0x0403
++#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_LO 0x0404
++#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_HI 0x0405
++#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_MSG_DATA 0x0406
++#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_CONTROL 0x0407
++#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_LO 0x0408
++#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_HI 0x0409
++#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_MSG_DATA 0x040a
++#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_CONTROL 0x040b
++#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_PBA 0x0800
++#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_PBA_BASE_IDX 3
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf7_SYSPFVFDEC
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF7_MM_INDEX 0x0000
++#define mmBIF_BX_DEV0_EPF0_VF7_MM_INDEX_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF7_MM_DATA 0x0001
++#define mmBIF_BX_DEV0_EPF0_VF7_MM_DATA_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF7_MM_INDEX_HI 0x0006
++#define mmBIF_BX_DEV0_EPF0_VF7_MM_INDEX_HI_BASE_IDX 0
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf7_BIFPFVFDEC1
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF7_RCC_ERR_LOG 0x0085
++#define mmRCC_DEV0_EPF0_VF7_RCC_ERR_LOG_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF7_RCC_DOORBELL_APER_EN 0x00c0
++#define mmRCC_DEV0_EPF0_VF7_RCC_DOORBELL_APER_EN_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF7_RCC_CONFIG_MEMSIZE 0x00c3
++#define mmRCC_DEV0_EPF0_VF7_RCC_CONFIG_MEMSIZE_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF7_RCC_CONFIG_RESERVED 0x00c4
++#define mmRCC_DEV0_EPF0_VF7_RCC_CONFIG_RESERVED_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER 0x00c5
++#define mmRCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf7_BIFPFVFDEC1
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS 0x00eb
++#define mmBIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG 0x00ec
++#define mmBIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
++#define mmBIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
++#define mmBIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
++#define mmBIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF7_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
++#define mmBIF_BX_DEV0_EPF0_VF7_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
++#define mmBIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ 0x0106
++#define mmBIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE 0x0107
++#define mmBIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING 0x0108
++#define mmBIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF7_NBIF_GFX_ADDR_LUT_BYPASS 0x0112
++#define mmBIF_BX_DEV0_EPF0_VF7_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW0 0x0136
++#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW1 0x0137
++#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW2 0x0138
++#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW3 0x0139
++#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW0 0x013a
++#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW1 0x013b
++#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW2 0x013c
++#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW3 0x013d
++#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL 0x013e
++#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL 0x013f
++#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX 0x0140
++#define mmBIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf7_BIFDEC2
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_LO 0x0400
++#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_HI 0x0401
++#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_MSG_DATA 0x0402
++#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_CONTROL 0x0403
++#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_LO 0x0404
++#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_HI 0x0405
++#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_MSG_DATA 0x0406
++#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_CONTROL 0x0407
++#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_LO 0x0408
++#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_HI 0x0409
++#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_MSG_DATA 0x040a
++#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_CONTROL 0x040b
++#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_PBA 0x0800
++#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_PBA_BASE_IDX 3
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf8_SYSPFVFDEC
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF8_MM_INDEX 0x0000
++#define mmBIF_BX_DEV0_EPF0_VF8_MM_INDEX_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF8_MM_DATA 0x0001
++#define mmBIF_BX_DEV0_EPF0_VF8_MM_DATA_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF8_MM_INDEX_HI 0x0006
++#define mmBIF_BX_DEV0_EPF0_VF8_MM_INDEX_HI_BASE_IDX 0
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf8_BIFPFVFDEC1
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF8_RCC_ERR_LOG 0x0085
++#define mmRCC_DEV0_EPF0_VF8_RCC_ERR_LOG_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF8_RCC_DOORBELL_APER_EN 0x00c0
++#define mmRCC_DEV0_EPF0_VF8_RCC_DOORBELL_APER_EN_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF8_RCC_CONFIG_MEMSIZE 0x00c3
++#define mmRCC_DEV0_EPF0_VF8_RCC_CONFIG_MEMSIZE_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF8_RCC_CONFIG_RESERVED 0x00c4
++#define mmRCC_DEV0_EPF0_VF8_RCC_CONFIG_RESERVED_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF8_RCC_IOV_FUNC_IDENTIFIER 0x00c5
++#define mmRCC_DEV0_EPF0_VF8_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf8_BIFPFVFDEC1
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF8_BIF_BME_STATUS 0x00eb
++#define mmBIF_BX_DEV0_EPF0_VF8_BIF_BME_STATUS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG 0x00ec
++#define mmBIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
++#define mmBIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
++#define mmBIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
++#define mmBIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF8_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
++#define mmBIF_BX_DEV0_EPF0_VF8_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF8_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
++#define mmBIF_BX_DEV0_EPF0_VF8_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ 0x0106
++#define mmBIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE 0x0107
++#define mmBIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF8_BIF_TRANS_PENDING 0x0108
++#define mmBIF_BX_DEV0_EPF0_VF8_BIF_TRANS_PENDING_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF8_NBIF_GFX_ADDR_LUT_BYPASS 0x0112
++#define mmBIF_BX_DEV0_EPF0_VF8_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW0 0x0136
++#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW1 0x0137
++#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW2 0x0138
++#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW3 0x0139
++#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW0 0x013a
++#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW1 0x013b
++#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW2 0x013c
++#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW3 0x013d
++#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL 0x013e
++#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_INT_CNTL 0x013f
++#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_INT_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX 0x0140
++#define mmBIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf8_BIFDEC2
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_LO 0x0400
++#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_HI 0x0401
++#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_MSG_DATA 0x0402
++#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_CONTROL 0x0403
++#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_LO 0x0404
++#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_HI 0x0405
++#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_MSG_DATA 0x0406
++#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_CONTROL 0x0407
++#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_LO 0x0408
++#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_HI 0x0409
++#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_MSG_DATA 0x040a
++#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_CONTROL 0x040b
++#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_PBA 0x0800
++#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_PBA_BASE_IDX 3
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf9_SYSPFVFDEC
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF9_MM_INDEX 0x0000
++#define mmBIF_BX_DEV0_EPF0_VF9_MM_INDEX_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF9_MM_DATA 0x0001
++#define mmBIF_BX_DEV0_EPF0_VF9_MM_DATA_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF9_MM_INDEX_HI 0x0006
++#define mmBIF_BX_DEV0_EPF0_VF9_MM_INDEX_HI_BASE_IDX 0
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf9_BIFPFVFDEC1
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF9_RCC_ERR_LOG 0x0085
++#define mmRCC_DEV0_EPF0_VF9_RCC_ERR_LOG_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF9_RCC_DOORBELL_APER_EN 0x00c0
++#define mmRCC_DEV0_EPF0_VF9_RCC_DOORBELL_APER_EN_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF9_RCC_CONFIG_MEMSIZE 0x00c3
++#define mmRCC_DEV0_EPF0_VF9_RCC_CONFIG_MEMSIZE_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF9_RCC_CONFIG_RESERVED 0x00c4
++#define mmRCC_DEV0_EPF0_VF9_RCC_CONFIG_RESERVED_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF9_RCC_IOV_FUNC_IDENTIFIER 0x00c5
++#define mmRCC_DEV0_EPF0_VF9_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf9_BIFPFVFDEC1
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF9_BIF_BME_STATUS 0x00eb
++#define mmBIF_BX_DEV0_EPF0_VF9_BIF_BME_STATUS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG 0x00ec
++#define mmBIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
++#define mmBIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
++#define mmBIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
++#define mmBIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF9_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
++#define mmBIF_BX_DEV0_EPF0_VF9_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF9_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
++#define mmBIF_BX_DEV0_EPF0_VF9_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ 0x0106
++#define mmBIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE 0x0107
++#define mmBIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF9_BIF_TRANS_PENDING 0x0108
++#define mmBIF_BX_DEV0_EPF0_VF9_BIF_TRANS_PENDING_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF9_NBIF_GFX_ADDR_LUT_BYPASS 0x0112
++#define mmBIF_BX_DEV0_EPF0_VF9_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW0 0x0136
++#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW1 0x0137
++#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW2 0x0138
++#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW3 0x0139
++#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW0 0x013a
++#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW1 0x013b
++#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW2 0x013c
++#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW3 0x013d
++#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL 0x013e
++#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_INT_CNTL 0x013f
++#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_INT_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX 0x0140
++#define mmBIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf9_BIFDEC2
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_LO 0x0400
++#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_HI 0x0401
++#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_MSG_DATA 0x0402
++#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_CONTROL 0x0403
++#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_LO 0x0404
++#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_HI 0x0405
++#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_MSG_DATA 0x0406
++#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_CONTROL 0x0407
++#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_LO 0x0408
++#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_HI 0x0409
++#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_MSG_DATA 0x040a
++#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_CONTROL 0x040b
++#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_PBA 0x0800
++#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_PBA_BASE_IDX 3
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf10_SYSPFVFDEC
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF10_MM_INDEX 0x0000
++#define mmBIF_BX_DEV0_EPF0_VF10_MM_INDEX_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF10_MM_DATA 0x0001
++#define mmBIF_BX_DEV0_EPF0_VF10_MM_DATA_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF10_MM_INDEX_HI 0x0006
++#define mmBIF_BX_DEV0_EPF0_VF10_MM_INDEX_HI_BASE_IDX 0
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf10_BIFPFVFDEC1
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF10_RCC_ERR_LOG 0x0085
++#define mmRCC_DEV0_EPF0_VF10_RCC_ERR_LOG_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF10_RCC_DOORBELL_APER_EN 0x00c0
++#define mmRCC_DEV0_EPF0_VF10_RCC_DOORBELL_APER_EN_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF10_RCC_CONFIG_MEMSIZE 0x00c3
++#define mmRCC_DEV0_EPF0_VF10_RCC_CONFIG_MEMSIZE_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF10_RCC_CONFIG_RESERVED 0x00c4
++#define mmRCC_DEV0_EPF0_VF10_RCC_CONFIG_RESERVED_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF10_RCC_IOV_FUNC_IDENTIFIER 0x00c5
++#define mmRCC_DEV0_EPF0_VF10_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf10_BIFPFVFDEC1
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF10_BIF_BME_STATUS 0x00eb
++#define mmBIF_BX_DEV0_EPF0_VF10_BIF_BME_STATUS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG 0x00ec
++#define mmBIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
++#define mmBIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
++#define mmBIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
++#define mmBIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF10_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
++#define mmBIF_BX_DEV0_EPF0_VF10_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF10_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
++#define mmBIF_BX_DEV0_EPF0_VF10_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ 0x0106
++#define mmBIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE 0x0107
++#define mmBIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF10_BIF_TRANS_PENDING 0x0108
++#define mmBIF_BX_DEV0_EPF0_VF10_BIF_TRANS_PENDING_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF10_NBIF_GFX_ADDR_LUT_BYPASS 0x0112
++#define mmBIF_BX_DEV0_EPF0_VF10_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW0 0x0136
++#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW1 0x0137
++#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW2 0x0138
++#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW3 0x0139
++#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW0 0x013a
++#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW1 0x013b
++#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW2 0x013c
++#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW3 0x013d
++#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL 0x013e
++#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_INT_CNTL 0x013f
++#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_INT_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX 0x0140
++#define mmBIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf10_BIFDEC2
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_LO 0x0400
++#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_HI 0x0401
++#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_MSG_DATA 0x0402
++#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_CONTROL 0x0403
++#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_LO 0x0404
++#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_HI 0x0405
++#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_MSG_DATA 0x0406
++#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_CONTROL 0x0407
++#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_LO 0x0408
++#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_HI 0x0409
++#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_MSG_DATA 0x040a
++#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_CONTROL 0x040b
++#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_PBA 0x0800
++#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_PBA_BASE_IDX 3
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf11_SYSPFVFDEC
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF11_MM_INDEX 0x0000
++#define mmBIF_BX_DEV0_EPF0_VF11_MM_INDEX_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF11_MM_DATA 0x0001
++#define mmBIF_BX_DEV0_EPF0_VF11_MM_DATA_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF11_MM_INDEX_HI 0x0006
++#define mmBIF_BX_DEV0_EPF0_VF11_MM_INDEX_HI_BASE_IDX 0
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf11_BIFPFVFDEC1
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF11_RCC_ERR_LOG 0x0085
++#define mmRCC_DEV0_EPF0_VF11_RCC_ERR_LOG_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF11_RCC_DOORBELL_APER_EN 0x00c0
++#define mmRCC_DEV0_EPF0_VF11_RCC_DOORBELL_APER_EN_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF11_RCC_CONFIG_MEMSIZE 0x00c3
++#define mmRCC_DEV0_EPF0_VF11_RCC_CONFIG_MEMSIZE_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF11_RCC_CONFIG_RESERVED 0x00c4
++#define mmRCC_DEV0_EPF0_VF11_RCC_CONFIG_RESERVED_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF11_RCC_IOV_FUNC_IDENTIFIER 0x00c5
++#define mmRCC_DEV0_EPF0_VF11_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf11_BIFPFVFDEC1
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF11_BIF_BME_STATUS 0x00eb
++#define mmBIF_BX_DEV0_EPF0_VF11_BIF_BME_STATUS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG 0x00ec
++#define mmBIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
++#define mmBIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
++#define mmBIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
++#define mmBIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF11_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
++#define mmBIF_BX_DEV0_EPF0_VF11_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF11_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
++#define mmBIF_BX_DEV0_EPF0_VF11_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ 0x0106
++#define mmBIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE 0x0107
++#define mmBIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF11_BIF_TRANS_PENDING 0x0108
++#define mmBIF_BX_DEV0_EPF0_VF11_BIF_TRANS_PENDING_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF11_NBIF_GFX_ADDR_LUT_BYPASS 0x0112
++#define mmBIF_BX_DEV0_EPF0_VF11_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW0 0x0136
++#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW1 0x0137
++#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW2 0x0138
++#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW3 0x0139
++#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW0 0x013a
++#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW1 0x013b
++#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW2 0x013c
++#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW3 0x013d
++#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL 0x013e
++#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_INT_CNTL 0x013f
++#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_INT_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX 0x0140
++#define mmBIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf11_BIFDEC2
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_LO 0x0400
++#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_HI 0x0401
++#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_MSG_DATA 0x0402
++#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_CONTROL 0x0403
++#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_LO 0x0404
++#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_HI 0x0405
++#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_MSG_DATA 0x0406
++#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_CONTROL 0x0407
++#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_LO 0x0408
++#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_HI 0x0409
++#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_MSG_DATA 0x040a
++#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_CONTROL 0x040b
++#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_PBA 0x0800
++#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_PBA_BASE_IDX 3
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf12_SYSPFVFDEC
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF12_MM_INDEX 0x0000
++#define mmBIF_BX_DEV0_EPF0_VF12_MM_INDEX_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF12_MM_DATA 0x0001
++#define mmBIF_BX_DEV0_EPF0_VF12_MM_DATA_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF12_MM_INDEX_HI 0x0006
++#define mmBIF_BX_DEV0_EPF0_VF12_MM_INDEX_HI_BASE_IDX 0
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf12_BIFPFVFDEC1
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF12_RCC_ERR_LOG 0x0085
++#define mmRCC_DEV0_EPF0_VF12_RCC_ERR_LOG_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF12_RCC_DOORBELL_APER_EN 0x00c0
++#define mmRCC_DEV0_EPF0_VF12_RCC_DOORBELL_APER_EN_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF12_RCC_CONFIG_MEMSIZE 0x00c3
++#define mmRCC_DEV0_EPF0_VF12_RCC_CONFIG_MEMSIZE_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF12_RCC_CONFIG_RESERVED 0x00c4
++#define mmRCC_DEV0_EPF0_VF12_RCC_CONFIG_RESERVED_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF12_RCC_IOV_FUNC_IDENTIFIER 0x00c5
++#define mmRCC_DEV0_EPF0_VF12_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf12_BIFPFVFDEC1
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF12_BIF_BME_STATUS 0x00eb
++#define mmBIF_BX_DEV0_EPF0_VF12_BIF_BME_STATUS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG 0x00ec
++#define mmBIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
++#define mmBIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
++#define mmBIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
++#define mmBIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF12_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
++#define mmBIF_BX_DEV0_EPF0_VF12_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF12_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
++#define mmBIF_BX_DEV0_EPF0_VF12_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ 0x0106
++#define mmBIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE 0x0107
++#define mmBIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF12_BIF_TRANS_PENDING 0x0108
++#define mmBIF_BX_DEV0_EPF0_VF12_BIF_TRANS_PENDING_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF12_NBIF_GFX_ADDR_LUT_BYPASS 0x0112
++#define mmBIF_BX_DEV0_EPF0_VF12_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW0 0x0136
++#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW1 0x0137
++#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW2 0x0138
++#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW3 0x0139
++#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW0 0x013a
++#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW1 0x013b
++#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW2 0x013c
++#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW3 0x013d
++#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL 0x013e
++#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_INT_CNTL 0x013f
++#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_INT_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX 0x0140
++#define mmBIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf12_BIFDEC2
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_LO 0x0400
++#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_HI 0x0401
++#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_MSG_DATA 0x0402
++#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_CONTROL 0x0403
++#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_LO 0x0404
++#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_HI 0x0405
++#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_MSG_DATA 0x0406
++#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_CONTROL 0x0407
++#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_LO 0x0408
++#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_HI 0x0409
++#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_MSG_DATA 0x040a
++#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_CONTROL 0x040b
++#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_PBA 0x0800
++#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_PBA_BASE_IDX 3
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf13_SYSPFVFDEC
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF13_MM_INDEX 0x0000
++#define mmBIF_BX_DEV0_EPF0_VF13_MM_INDEX_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF13_MM_DATA 0x0001
++#define mmBIF_BX_DEV0_EPF0_VF13_MM_DATA_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF13_MM_INDEX_HI 0x0006
++#define mmBIF_BX_DEV0_EPF0_VF13_MM_INDEX_HI_BASE_IDX 0
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf13_BIFPFVFDEC1
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF13_RCC_ERR_LOG 0x0085
++#define mmRCC_DEV0_EPF0_VF13_RCC_ERR_LOG_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF13_RCC_DOORBELL_APER_EN 0x00c0
++#define mmRCC_DEV0_EPF0_VF13_RCC_DOORBELL_APER_EN_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF13_RCC_CONFIG_MEMSIZE 0x00c3
++#define mmRCC_DEV0_EPF0_VF13_RCC_CONFIG_MEMSIZE_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF13_RCC_CONFIG_RESERVED 0x00c4
++#define mmRCC_DEV0_EPF0_VF13_RCC_CONFIG_RESERVED_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF13_RCC_IOV_FUNC_IDENTIFIER 0x00c5
++#define mmRCC_DEV0_EPF0_VF13_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf13_BIFPFVFDEC1
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF13_BIF_BME_STATUS 0x00eb
++#define mmBIF_BX_DEV0_EPF0_VF13_BIF_BME_STATUS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG 0x00ec
++#define mmBIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
++#define mmBIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
++#define mmBIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
++#define mmBIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF13_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
++#define mmBIF_BX_DEV0_EPF0_VF13_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF13_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
++#define mmBIF_BX_DEV0_EPF0_VF13_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ 0x0106
++#define mmBIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE 0x0107
++#define mmBIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF13_BIF_TRANS_PENDING 0x0108
++#define mmBIF_BX_DEV0_EPF0_VF13_BIF_TRANS_PENDING_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF13_NBIF_GFX_ADDR_LUT_BYPASS 0x0112
++#define mmBIF_BX_DEV0_EPF0_VF13_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW0 0x0136
++#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW1 0x0137
++#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW2 0x0138
++#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW3 0x0139
++#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW0 0x013a
++#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW1 0x013b
++#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW2 0x013c
++#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW3 0x013d
++#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL 0x013e
++#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_INT_CNTL 0x013f
++#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_INT_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX 0x0140
++#define mmBIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf13_BIFDEC2
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_LO 0x0400
++#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_HI 0x0401
++#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_MSG_DATA 0x0402
++#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_CONTROL 0x0403
++#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_LO 0x0404
++#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_HI 0x0405
++#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_MSG_DATA 0x0406
++#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_CONTROL 0x0407
++#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_LO 0x0408
++#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_HI 0x0409
++#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_MSG_DATA 0x040a
++#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_CONTROL 0x040b
++#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_PBA 0x0800
++#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_PBA_BASE_IDX 3
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf14_SYSPFVFDEC
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF14_MM_INDEX 0x0000
++#define mmBIF_BX_DEV0_EPF0_VF14_MM_INDEX_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF14_MM_DATA 0x0001
++#define mmBIF_BX_DEV0_EPF0_VF14_MM_DATA_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF14_MM_INDEX_HI 0x0006
++#define mmBIF_BX_DEV0_EPF0_VF14_MM_INDEX_HI_BASE_IDX 0
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf14_BIFPFVFDEC1
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF14_RCC_ERR_LOG 0x0085
++#define mmRCC_DEV0_EPF0_VF14_RCC_ERR_LOG_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF14_RCC_DOORBELL_APER_EN 0x00c0
++#define mmRCC_DEV0_EPF0_VF14_RCC_DOORBELL_APER_EN_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF14_RCC_CONFIG_MEMSIZE 0x00c3
++#define mmRCC_DEV0_EPF0_VF14_RCC_CONFIG_MEMSIZE_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF14_RCC_CONFIG_RESERVED 0x00c4
++#define mmRCC_DEV0_EPF0_VF14_RCC_CONFIG_RESERVED_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF14_RCC_IOV_FUNC_IDENTIFIER 0x00c5
++#define mmRCC_DEV0_EPF0_VF14_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf14_BIFPFVFDEC1
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF14_BIF_BME_STATUS 0x00eb
++#define mmBIF_BX_DEV0_EPF0_VF14_BIF_BME_STATUS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG 0x00ec
++#define mmBIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
++#define mmBIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
++#define mmBIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
++#define mmBIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF14_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
++#define mmBIF_BX_DEV0_EPF0_VF14_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF14_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
++#define mmBIF_BX_DEV0_EPF0_VF14_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ 0x0106
++#define mmBIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE 0x0107
++#define mmBIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF14_BIF_TRANS_PENDING 0x0108
++#define mmBIF_BX_DEV0_EPF0_VF14_BIF_TRANS_PENDING_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF14_NBIF_GFX_ADDR_LUT_BYPASS 0x0112
++#define mmBIF_BX_DEV0_EPF0_VF14_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW0 0x0136
++#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW1 0x0137
++#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW2 0x0138
++#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW3 0x0139
++#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW0 0x013a
++#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW1 0x013b
++#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW2 0x013c
++#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW3 0x013d
++#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL 0x013e
++#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_INT_CNTL 0x013f
++#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_INT_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX 0x0140
++#define mmBIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf14_BIFDEC2
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_LO 0x0400
++#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_HI 0x0401
++#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_MSG_DATA 0x0402
++#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_CONTROL 0x0403
++#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_LO 0x0404
++#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_HI 0x0405
++#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_MSG_DATA 0x0406
++#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_CONTROL 0x0407
++#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_LO 0x0408
++#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_HI 0x0409
++#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_MSG_DATA 0x040a
++#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_CONTROL 0x040b
++#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_PBA 0x0800
++#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_PBA_BASE_IDX 3
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf15_SYSPFVFDEC
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF15_MM_INDEX 0x0000
++#define mmBIF_BX_DEV0_EPF0_VF15_MM_INDEX_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF15_MM_DATA 0x0001
++#define mmBIF_BX_DEV0_EPF0_VF15_MM_DATA_BASE_IDX 0
++#define mmBIF_BX_DEV0_EPF0_VF15_MM_INDEX_HI 0x0006
++#define mmBIF_BX_DEV0_EPF0_VF15_MM_INDEX_HI_BASE_IDX 0
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf15_BIFPFVFDEC1
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF15_RCC_ERR_LOG 0x0085
++#define mmRCC_DEV0_EPF0_VF15_RCC_ERR_LOG_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF15_RCC_DOORBELL_APER_EN 0x00c0
++#define mmRCC_DEV0_EPF0_VF15_RCC_DOORBELL_APER_EN_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF15_RCC_CONFIG_MEMSIZE 0x00c3
++#define mmRCC_DEV0_EPF0_VF15_RCC_CONFIG_MEMSIZE_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF15_RCC_CONFIG_RESERVED 0x00c4
++#define mmRCC_DEV0_EPF0_VF15_RCC_CONFIG_RESERVED_BASE_IDX 2
++#define mmRCC_DEV0_EPF0_VF15_RCC_IOV_FUNC_IDENTIFIER 0x00c5
++#define mmRCC_DEV0_EPF0_VF15_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf15_BIFPFVFDEC1
++// base address: 0x0
++#define mmBIF_BX_DEV0_EPF0_VF15_BIF_BME_STATUS 0x00eb
++#define mmBIF_BX_DEV0_EPF0_VF15_BIF_BME_STATUS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG 0x00ec
++#define mmBIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
++#define mmBIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
++#define mmBIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
++#define mmBIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF15_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
++#define mmBIF_BX_DEV0_EPF0_VF15_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF15_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
++#define mmBIF_BX_DEV0_EPF0_VF15_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ 0x0106
++#define mmBIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE 0x0107
++#define mmBIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF15_BIF_TRANS_PENDING 0x0108
++#define mmBIF_BX_DEV0_EPF0_VF15_BIF_TRANS_PENDING_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF15_NBIF_GFX_ADDR_LUT_BYPASS 0x0112
++#define mmBIF_BX_DEV0_EPF0_VF15_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW0 0x0136
++#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW1 0x0137
++#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW2 0x0138
++#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW3 0x0139
++#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW0 0x013a
++#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW1 0x013b
++#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW2 0x013c
++#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW3 0x013d
++#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL 0x013e
++#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_INT_CNTL 0x013f
++#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_INT_CNTL_BASE_IDX 2
++#define mmBIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX 0x0140
++#define mmBIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX_BASE_IDX 2
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf15_BIFDEC2
++// base address: 0x0
++#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_LO 0x0400
++#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_HI 0x0401
++#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_MSG_DATA 0x0402
++#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_CONTROL 0x0403
++#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_LO 0x0404
++#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_HI 0x0405
++#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_MSG_DATA 0x0406
++#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_CONTROL 0x0407
++#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_LO 0x0408
++#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_HI 0x0409
++#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_MSG_DATA 0x040a
++#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_CONTROL 0x040b
++#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
++#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_PBA 0x0800
++#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_PBA_BASE_IDX 3
++
++#endif
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h
+new file mode 100644
+index 0000000..d3704b4
+--- /dev/null
++++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h
+@@ -0,0 +1,48436 @@
++/*
++ * Copyright (C) 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
++ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++#ifndef _nbio_7_4_SH_MASK_HEADER
++#define _nbio_7_4_SH_MASK_HEADER
++
++
++// addressBlock: nbio_pcie0_pswuscfg0_cfgdecp
++//PSWUSCFG0_VENDOR_ID
++#define PSWUSCFG0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define PSWUSCFG0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//PSWUSCFG0_DEVICE_ID
++#define PSWUSCFG0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define PSWUSCFG0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//PSWUSCFG0_COMMAND
++#define PSWUSCFG0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define PSWUSCFG0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define PSWUSCFG0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define PSWUSCFG0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define PSWUSCFG0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define PSWUSCFG0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define PSWUSCFG0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define PSWUSCFG0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define PSWUSCFG0_COMMAND__SERR_EN__SHIFT 0x8
++#define PSWUSCFG0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define PSWUSCFG0_COMMAND__INT_DIS__SHIFT 0xa
++#define PSWUSCFG0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define PSWUSCFG0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define PSWUSCFG0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define PSWUSCFG0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define PSWUSCFG0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define PSWUSCFG0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define PSWUSCFG0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define PSWUSCFG0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define PSWUSCFG0_COMMAND__SERR_EN_MASK 0x0100L
++#define PSWUSCFG0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define PSWUSCFG0_COMMAND__INT_DIS_MASK 0x0400L
++//PSWUSCFG0_STATUS
++#define PSWUSCFG0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define PSWUSCFG0_STATUS__INT_STATUS__SHIFT 0x3
++#define PSWUSCFG0_STATUS__CAP_LIST__SHIFT 0x4
++#define PSWUSCFG0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define PSWUSCFG0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define PSWUSCFG0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define PSWUSCFG0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define PSWUSCFG0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define PSWUSCFG0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define PSWUSCFG0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define PSWUSCFG0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define PSWUSCFG0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define PSWUSCFG0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define PSWUSCFG0_STATUS__INT_STATUS_MASK 0x0008L
++#define PSWUSCFG0_STATUS__CAP_LIST_MASK 0x0010L
++#define PSWUSCFG0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define PSWUSCFG0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define PSWUSCFG0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define PSWUSCFG0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define PSWUSCFG0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define PSWUSCFG0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define PSWUSCFG0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define PSWUSCFG0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define PSWUSCFG0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//PSWUSCFG0_REVISION_ID
++#define PSWUSCFG0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define PSWUSCFG0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define PSWUSCFG0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define PSWUSCFG0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//PSWUSCFG0_PROG_INTERFACE
++#define PSWUSCFG0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define PSWUSCFG0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//PSWUSCFG0_SUB_CLASS
++#define PSWUSCFG0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define PSWUSCFG0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//PSWUSCFG0_BASE_CLASS
++#define PSWUSCFG0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define PSWUSCFG0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//PSWUSCFG0_CACHE_LINE
++#define PSWUSCFG0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define PSWUSCFG0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//PSWUSCFG0_LATENCY
++#define PSWUSCFG0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define PSWUSCFG0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//PSWUSCFG0_HEADER
++#define PSWUSCFG0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define PSWUSCFG0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define PSWUSCFG0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define PSWUSCFG0_HEADER__DEVICE_TYPE_MASK 0x80L
++//PSWUSCFG0_BIST
++#define PSWUSCFG0_BIST__BIST_COMP__SHIFT 0x0
++#define PSWUSCFG0_BIST__BIST_STRT__SHIFT 0x6
++#define PSWUSCFG0_BIST__BIST_CAP__SHIFT 0x7
++#define PSWUSCFG0_BIST__BIST_COMP_MASK 0x0FL
++#define PSWUSCFG0_BIST__BIST_STRT_MASK 0x40L
++#define PSWUSCFG0_BIST__BIST_CAP_MASK 0x80L
++//PSWUSCFG0_SUB_BUS_NUMBER_LATENCY
++#define PSWUSCFG0_SUB_BUS_NUMBER_LATENCY__PRIMARY_BUS__SHIFT 0x0
++#define PSWUSCFG0_SUB_BUS_NUMBER_LATENCY__SECONDARY_BUS__SHIFT 0x8
++#define PSWUSCFG0_SUB_BUS_NUMBER_LATENCY__SUB_BUS_NUM__SHIFT 0x10
++#define PSWUSCFG0_SUB_BUS_NUMBER_LATENCY__SECONDARY_LATENCY_TIMER__SHIFT 0x18
++#define PSWUSCFG0_SUB_BUS_NUMBER_LATENCY__PRIMARY_BUS_MASK 0x000000FFL
++#define PSWUSCFG0_SUB_BUS_NUMBER_LATENCY__SECONDARY_BUS_MASK 0x0000FF00L
++#define PSWUSCFG0_SUB_BUS_NUMBER_LATENCY__SUB_BUS_NUM_MASK 0x00FF0000L
++#define PSWUSCFG0_SUB_BUS_NUMBER_LATENCY__SECONDARY_LATENCY_TIMER_MASK 0xFF000000L
++//PSWUSCFG0_IO_BASE_LIMIT
++#define PSWUSCFG0_IO_BASE_LIMIT__IO_BASE_TYPE__SHIFT 0x0
++#define PSWUSCFG0_IO_BASE_LIMIT__IO_BASE__SHIFT 0x4
++#define PSWUSCFG0_IO_BASE_LIMIT__IO_LIMIT_TYPE__SHIFT 0x8
++#define PSWUSCFG0_IO_BASE_LIMIT__IO_LIMIT__SHIFT 0xc
++#define PSWUSCFG0_IO_BASE_LIMIT__IO_BASE_TYPE_MASK 0x000FL
++#define PSWUSCFG0_IO_BASE_LIMIT__IO_BASE_MASK 0x00F0L
++#define PSWUSCFG0_IO_BASE_LIMIT__IO_LIMIT_TYPE_MASK 0x0F00L
++#define PSWUSCFG0_IO_BASE_LIMIT__IO_LIMIT_MASK 0xF000L
++//PSWUSCFG0_SECONDARY_STATUS
++#define PSWUSCFG0_SECONDARY_STATUS__PCI_66_CAP__SHIFT 0x5
++#define PSWUSCFG0_SECONDARY_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define PSWUSCFG0_SECONDARY_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define PSWUSCFG0_SECONDARY_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define PSWUSCFG0_SECONDARY_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define PSWUSCFG0_SECONDARY_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define PSWUSCFG0_SECONDARY_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define PSWUSCFG0_SECONDARY_STATUS__RECEIVED_SYSTEM_ERROR__SHIFT 0xe
++#define PSWUSCFG0_SECONDARY_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define PSWUSCFG0_SECONDARY_STATUS__PCI_66_CAP_MASK 0x0020L
++#define PSWUSCFG0_SECONDARY_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define PSWUSCFG0_SECONDARY_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define PSWUSCFG0_SECONDARY_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define PSWUSCFG0_SECONDARY_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define PSWUSCFG0_SECONDARY_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define PSWUSCFG0_SECONDARY_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define PSWUSCFG0_SECONDARY_STATUS__RECEIVED_SYSTEM_ERROR_MASK 0x4000L
++#define PSWUSCFG0_SECONDARY_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//PSWUSCFG0_MEM_BASE_LIMIT
++#define PSWUSCFG0_MEM_BASE_LIMIT__MEM_BASE_TYPE__SHIFT 0x0
++#define PSWUSCFG0_MEM_BASE_LIMIT__MEM_BASE_31_20__SHIFT 0x4
++#define PSWUSCFG0_MEM_BASE_LIMIT__MEM_LIMIT_TYPE__SHIFT 0x10
++#define PSWUSCFG0_MEM_BASE_LIMIT__MEM_LIMIT_31_20__SHIFT 0x14
++#define PSWUSCFG0_MEM_BASE_LIMIT__MEM_BASE_TYPE_MASK 0x0000000FL
++#define PSWUSCFG0_MEM_BASE_LIMIT__MEM_BASE_31_20_MASK 0x0000FFF0L
++#define PSWUSCFG0_MEM_BASE_LIMIT__MEM_LIMIT_TYPE_MASK 0x000F0000L
++#define PSWUSCFG0_MEM_BASE_LIMIT__MEM_LIMIT_31_20_MASK 0xFFF00000L
++//PSWUSCFG0_PREF_BASE_LIMIT
++#define PSWUSCFG0_PREF_BASE_LIMIT__PREF_MEM_BASE_TYPE__SHIFT 0x0
++#define PSWUSCFG0_PREF_BASE_LIMIT__PREF_MEM_BASE_31_20__SHIFT 0x4
++#define PSWUSCFG0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_TYPE__SHIFT 0x10
++#define PSWUSCFG0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_31_20__SHIFT 0x14
++#define PSWUSCFG0_PREF_BASE_LIMIT__PREF_MEM_BASE_TYPE_MASK 0x0000000FL
++#define PSWUSCFG0_PREF_BASE_LIMIT__PREF_MEM_BASE_31_20_MASK 0x0000FFF0L
++#define PSWUSCFG0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_TYPE_MASK 0x000F0000L
++#define PSWUSCFG0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_31_20_MASK 0xFFF00000L
++//PSWUSCFG0_PREF_BASE_UPPER
++#define PSWUSCFG0_PREF_BASE_UPPER__PREF_BASE_UPPER__SHIFT 0x0
++#define PSWUSCFG0_PREF_BASE_UPPER__PREF_BASE_UPPER_MASK 0xFFFFFFFFL
++//PSWUSCFG0_PREF_LIMIT_UPPER
++#define PSWUSCFG0_PREF_LIMIT_UPPER__PREF_LIMIT_UPPER__SHIFT 0x0
++#define PSWUSCFG0_PREF_LIMIT_UPPER__PREF_LIMIT_UPPER_MASK 0xFFFFFFFFL
++//PSWUSCFG0_IO_BASE_LIMIT_HI
++#define PSWUSCFG0_IO_BASE_LIMIT_HI__IO_BASE_31_16__SHIFT 0x0
++#define PSWUSCFG0_IO_BASE_LIMIT_HI__IO_LIMIT_31_16__SHIFT 0x10
++#define PSWUSCFG0_IO_BASE_LIMIT_HI__IO_BASE_31_16_MASK 0x0000FFFFL
++#define PSWUSCFG0_IO_BASE_LIMIT_HI__IO_LIMIT_31_16_MASK 0xFFFF0000L
++//PSWUSCFG0_CAP_PTR
++#define PSWUSCFG0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define PSWUSCFG0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//PSWUSCFG0_INTERRUPT_LINE
++#define PSWUSCFG0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define PSWUSCFG0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//PSWUSCFG0_INTERRUPT_PIN
++#define PSWUSCFG0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define PSWUSCFG0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//PSWUSCFG0_IRQ_BRIDGE_CNTL
++#define PSWUSCFG0_IRQ_BRIDGE_CNTL__PARITY_RESPONSE_EN__SHIFT 0x0
++#define PSWUSCFG0_IRQ_BRIDGE_CNTL__SERR_EN__SHIFT 0x1
++#define PSWUSCFG0_IRQ_BRIDGE_CNTL__ISA_EN__SHIFT 0x2
++#define PSWUSCFG0_IRQ_BRIDGE_CNTL__VGA_EN__SHIFT 0x3
++#define PSWUSCFG0_IRQ_BRIDGE_CNTL__VGA_DEC__SHIFT 0x4
++#define PSWUSCFG0_IRQ_BRIDGE_CNTL__MASTER_ABORT_MODE__SHIFT 0x5
++#define PSWUSCFG0_IRQ_BRIDGE_CNTL__SECONDARY_BUS_RESET__SHIFT 0x6
++#define PSWUSCFG0_IRQ_BRIDGE_CNTL__FAST_B2B_EN__SHIFT 0x7
++#define PSWUSCFG0_IRQ_BRIDGE_CNTL__PARITY_RESPONSE_EN_MASK 0x0001L
++#define PSWUSCFG0_IRQ_BRIDGE_CNTL__SERR_EN_MASK 0x0002L
++#define PSWUSCFG0_IRQ_BRIDGE_CNTL__ISA_EN_MASK 0x0004L
++#define PSWUSCFG0_IRQ_BRIDGE_CNTL__VGA_EN_MASK 0x0008L
++#define PSWUSCFG0_IRQ_BRIDGE_CNTL__VGA_DEC_MASK 0x0010L
++#define PSWUSCFG0_IRQ_BRIDGE_CNTL__MASTER_ABORT_MODE_MASK 0x0020L
++#define PSWUSCFG0_IRQ_BRIDGE_CNTL__SECONDARY_BUS_RESET_MASK 0x0040L
++#define PSWUSCFG0_IRQ_BRIDGE_CNTL__FAST_B2B_EN_MASK 0x0080L
++//EXT_BRIDGE_CNTL
++#define EXT_BRIDGE_CNTL__IO_PORT_80_EN__SHIFT 0x0
++#define EXT_BRIDGE_CNTL__IO_PORT_80_EN_MASK 0x01L
++//PSWUSCFG0_VENDOR_CAP_LIST
++#define PSWUSCFG0_VENDOR_CAP_LIST__CAP_ID__SHIFT 0x0
++#define PSWUSCFG0_VENDOR_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define PSWUSCFG0_VENDOR_CAP_LIST__LENGTH__SHIFT 0x10
++#define PSWUSCFG0_VENDOR_CAP_LIST__CAP_ID_MASK 0x000000FFL
++#define PSWUSCFG0_VENDOR_CAP_LIST__NEXT_PTR_MASK 0x0000FF00L
++#define PSWUSCFG0_VENDOR_CAP_LIST__LENGTH_MASK 0x00FF0000L
++//PSWUSCFG0_ADAPTER_ID_W
++#define PSWUSCFG0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define PSWUSCFG0_ADAPTER_ID_W__SUBSYSTEM_ID__SHIFT 0x10
++#define PSWUSCFG0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define PSWUSCFG0_ADAPTER_ID_W__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//PSWUSCFG0_PMI_CAP_LIST
++#define PSWUSCFG0_PMI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define PSWUSCFG0_PMI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define PSWUSCFG0_PMI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define PSWUSCFG0_PMI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//PSWUSCFG0_PMI_CAP
++#define PSWUSCFG0_PMI_CAP__VERSION__SHIFT 0x0
++#define PSWUSCFG0_PMI_CAP__PME_CLOCK__SHIFT 0x3
++#define PSWUSCFG0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0__SHIFT 0x4
++#define PSWUSCFG0_PMI_CAP__DEV_SPECIFIC_INIT__SHIFT 0x5
++#define PSWUSCFG0_PMI_CAP__AUX_CURRENT__SHIFT 0x6
++#define PSWUSCFG0_PMI_CAP__D1_SUPPORT__SHIFT 0x9
++#define PSWUSCFG0_PMI_CAP__D2_SUPPORT__SHIFT 0xa
++#define PSWUSCFG0_PMI_CAP__PME_SUPPORT__SHIFT 0xb
++#define PSWUSCFG0_PMI_CAP__VERSION_MASK 0x0007L
++#define PSWUSCFG0_PMI_CAP__PME_CLOCK_MASK 0x0008L
++#define PSWUSCFG0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0_MASK 0x0010L
++#define PSWUSCFG0_PMI_CAP__DEV_SPECIFIC_INIT_MASK 0x0020L
++#define PSWUSCFG0_PMI_CAP__AUX_CURRENT_MASK 0x01C0L
++#define PSWUSCFG0_PMI_CAP__D1_SUPPORT_MASK 0x0200L
++#define PSWUSCFG0_PMI_CAP__D2_SUPPORT_MASK 0x0400L
++#define PSWUSCFG0_PMI_CAP__PME_SUPPORT_MASK 0xF800L
++//PSWUSCFG0_PMI_STATUS_CNTL
++#define PSWUSCFG0_PMI_STATUS_CNTL__POWER_STATE__SHIFT 0x0
++#define PSWUSCFG0_PMI_STATUS_CNTL__NO_SOFT_RESET__SHIFT 0x3
++#define PSWUSCFG0_PMI_STATUS_CNTL__PME_EN__SHIFT 0x8
++#define PSWUSCFG0_PMI_STATUS_CNTL__DATA_SELECT__SHIFT 0x9
++#define PSWUSCFG0_PMI_STATUS_CNTL__DATA_SCALE__SHIFT 0xd
++#define PSWUSCFG0_PMI_STATUS_CNTL__PME_STATUS__SHIFT 0xf
++#define PSWUSCFG0_PMI_STATUS_CNTL__B2_B3_SUPPORT__SHIFT 0x16
++#define PSWUSCFG0_PMI_STATUS_CNTL__BUS_PWR_EN__SHIFT 0x17
++#define PSWUSCFG0_PMI_STATUS_CNTL__PMI_DATA__SHIFT 0x18
++#define PSWUSCFG0_PMI_STATUS_CNTL__POWER_STATE_MASK 0x00000003L
++#define PSWUSCFG0_PMI_STATUS_CNTL__NO_SOFT_RESET_MASK 0x00000008L
++#define PSWUSCFG0_PMI_STATUS_CNTL__PME_EN_MASK 0x00000100L
++#define PSWUSCFG0_PMI_STATUS_CNTL__DATA_SELECT_MASK 0x00001E00L
++#define PSWUSCFG0_PMI_STATUS_CNTL__DATA_SCALE_MASK 0x00006000L
++#define PSWUSCFG0_PMI_STATUS_CNTL__PME_STATUS_MASK 0x00008000L
++#define PSWUSCFG0_PMI_STATUS_CNTL__B2_B3_SUPPORT_MASK 0x00400000L
++#define PSWUSCFG0_PMI_STATUS_CNTL__BUS_PWR_EN_MASK 0x00800000L
++#define PSWUSCFG0_PMI_STATUS_CNTL__PMI_DATA_MASK 0xFF000000L
++//PSWUSCFG0_PCIE_CAP_LIST
++#define PSWUSCFG0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define PSWUSCFG0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define PSWUSCFG0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define PSWUSCFG0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//PSWUSCFG0_PCIE_CAP
++#define PSWUSCFG0_PCIE_CAP__VERSION__SHIFT 0x0
++#define PSWUSCFG0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define PSWUSCFG0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define PSWUSCFG0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define PSWUSCFG0_PCIE_CAP__VERSION_MASK 0x000FL
++#define PSWUSCFG0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define PSWUSCFG0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define PSWUSCFG0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//PSWUSCFG0_DEVICE_CAP
++#define PSWUSCFG0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define PSWUSCFG0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define PSWUSCFG0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define PSWUSCFG0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define PSWUSCFG0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define PSWUSCFG0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define PSWUSCFG0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define PSWUSCFG0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define PSWUSCFG0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define PSWUSCFG0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define PSWUSCFG0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define PSWUSCFG0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define PSWUSCFG0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define PSWUSCFG0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define PSWUSCFG0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define PSWUSCFG0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define PSWUSCFG0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define PSWUSCFG0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//PSWUSCFG0_DEVICE_CNTL
++#define PSWUSCFG0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define PSWUSCFG0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define PSWUSCFG0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define PSWUSCFG0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define PSWUSCFG0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define PSWUSCFG0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define PSWUSCFG0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define PSWUSCFG0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define PSWUSCFG0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define PSWUSCFG0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define PSWUSCFG0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define PSWUSCFG0_DEVICE_CNTL__BRIDGE_CFG_RETRY_EN__SHIFT 0xf
++#define PSWUSCFG0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define PSWUSCFG0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define PSWUSCFG0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define PSWUSCFG0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define PSWUSCFG0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define PSWUSCFG0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define PSWUSCFG0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define PSWUSCFG0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define PSWUSCFG0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define PSWUSCFG0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define PSWUSCFG0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define PSWUSCFG0_DEVICE_CNTL__BRIDGE_CFG_RETRY_EN_MASK 0x8000L
++//PSWUSCFG0_DEVICE_STATUS
++#define PSWUSCFG0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define PSWUSCFG0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define PSWUSCFG0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define PSWUSCFG0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define PSWUSCFG0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define PSWUSCFG0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define PSWUSCFG0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define PSWUSCFG0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define PSWUSCFG0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define PSWUSCFG0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define PSWUSCFG0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define PSWUSCFG0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++//PSWUSCFG0_LINK_CAP
++#define PSWUSCFG0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define PSWUSCFG0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define PSWUSCFG0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define PSWUSCFG0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define PSWUSCFG0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define PSWUSCFG0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define PSWUSCFG0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define PSWUSCFG0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define PSWUSCFG0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define PSWUSCFG0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define PSWUSCFG0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define PSWUSCFG0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define PSWUSCFG0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define PSWUSCFG0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define PSWUSCFG0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define PSWUSCFG0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define PSWUSCFG0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define PSWUSCFG0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define PSWUSCFG0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define PSWUSCFG0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define PSWUSCFG0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define PSWUSCFG0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//PSWUSCFG0_LINK_CNTL
++#define PSWUSCFG0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define PSWUSCFG0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define PSWUSCFG0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define PSWUSCFG0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define PSWUSCFG0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define PSWUSCFG0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define PSWUSCFG0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define PSWUSCFG0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define PSWUSCFG0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define PSWUSCFG0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define PSWUSCFG0_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
++#define PSWUSCFG0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define PSWUSCFG0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define PSWUSCFG0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define PSWUSCFG0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define PSWUSCFG0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define PSWUSCFG0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define PSWUSCFG0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define PSWUSCFG0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define PSWUSCFG0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define PSWUSCFG0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++#define PSWUSCFG0_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
++//PSWUSCFG0_LINK_STATUS
++#define PSWUSCFG0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define PSWUSCFG0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define PSWUSCFG0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define PSWUSCFG0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define PSWUSCFG0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define PSWUSCFG0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define PSWUSCFG0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define PSWUSCFG0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define PSWUSCFG0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define PSWUSCFG0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define PSWUSCFG0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define PSWUSCFG0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define PSWUSCFG0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define PSWUSCFG0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//PSWUSCFG0_DEVICE_CAP2
++#define PSWUSCFG0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define PSWUSCFG0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define PSWUSCFG0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define PSWUSCFG0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define PSWUSCFG0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define PSWUSCFG0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define PSWUSCFG0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define PSWUSCFG0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define PSWUSCFG0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define PSWUSCFG0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define PSWUSCFG0_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
++#define PSWUSCFG0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define PSWUSCFG0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define PSWUSCFG0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define PSWUSCFG0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define PSWUSCFG0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define PSWUSCFG0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define PSWUSCFG0_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
++#define PSWUSCFG0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define PSWUSCFG0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define PSWUSCFG0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define PSWUSCFG0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define PSWUSCFG0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define PSWUSCFG0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define PSWUSCFG0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define PSWUSCFG0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define PSWUSCFG0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define PSWUSCFG0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define PSWUSCFG0_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
++#define PSWUSCFG0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define PSWUSCFG0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define PSWUSCFG0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define PSWUSCFG0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define PSWUSCFG0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define PSWUSCFG0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define PSWUSCFG0_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
++//PSWUSCFG0_DEVICE_CNTL2
++#define PSWUSCFG0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define PSWUSCFG0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define PSWUSCFG0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define PSWUSCFG0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define PSWUSCFG0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define PSWUSCFG0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define PSWUSCFG0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define PSWUSCFG0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define PSWUSCFG0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define PSWUSCFG0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define PSWUSCFG0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define PSWUSCFG0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define PSWUSCFG0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define PSWUSCFG0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define PSWUSCFG0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define PSWUSCFG0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define PSWUSCFG0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define PSWUSCFG0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define PSWUSCFG0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define PSWUSCFG0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define PSWUSCFG0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define PSWUSCFG0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define PSWUSCFG0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define PSWUSCFG0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//PSWUSCFG0_DEVICE_STATUS2
++#define PSWUSCFG0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define PSWUSCFG0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//PSWUSCFG0_LINK_CAP2
++#define PSWUSCFG0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define PSWUSCFG0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define PSWUSCFG0_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
++#define PSWUSCFG0_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
++#define PSWUSCFG0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define PSWUSCFG0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define PSWUSCFG0_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
++#define PSWUSCFG0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define PSWUSCFG0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define PSWUSCFG0_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x00001E00L
++#define PSWUSCFG0_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x000F0000L
++#define PSWUSCFG0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define PSWUSCFG0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define PSWUSCFG0_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
++//PSWUSCFG0_LINK_CNTL2
++#define PSWUSCFG0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define PSWUSCFG0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define PSWUSCFG0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define PSWUSCFG0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define PSWUSCFG0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define PSWUSCFG0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define PSWUSCFG0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define PSWUSCFG0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define PSWUSCFG0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define PSWUSCFG0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define PSWUSCFG0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define PSWUSCFG0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define PSWUSCFG0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define PSWUSCFG0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define PSWUSCFG0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define PSWUSCFG0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//PSWUSCFG0_LINK_STATUS2
++#define PSWUSCFG0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define PSWUSCFG0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define PSWUSCFG0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define PSWUSCFG0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define PSWUSCFG0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define PSWUSCFG0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define PSWUSCFG0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define PSWUSCFG0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define PSWUSCFG0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define PSWUSCFG0_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
++#define PSWUSCFG0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define PSWUSCFG0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define PSWUSCFG0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define PSWUSCFG0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define PSWUSCFG0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define PSWUSCFG0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define PSWUSCFG0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define PSWUSCFG0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define PSWUSCFG0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++#define PSWUSCFG0_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
++//PSWUSCFG0_MSI_CAP_LIST
++#define PSWUSCFG0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define PSWUSCFG0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define PSWUSCFG0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define PSWUSCFG0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//PSWUSCFG0_MSI_MSG_CNTL
++#define PSWUSCFG0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define PSWUSCFG0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define PSWUSCFG0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define PSWUSCFG0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define PSWUSCFG0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define PSWUSCFG0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define PSWUSCFG0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define PSWUSCFG0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define PSWUSCFG0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define PSWUSCFG0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//PSWUSCFG0_MSI_MSG_ADDR_LO
++#define PSWUSCFG0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define PSWUSCFG0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//PSWUSCFG0_MSI_MSG_ADDR_HI
++#define PSWUSCFG0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define PSWUSCFG0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//PSWUSCFG0_MSI_MSG_DATA
++#define PSWUSCFG0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define PSWUSCFG0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//PSWUSCFG0_MSI_MSG_DATA_64
++#define PSWUSCFG0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define PSWUSCFG0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
++//PSWUSCFG0_SSID_CAP_LIST
++#define PSWUSCFG0_SSID_CAP_LIST__CAP_ID__SHIFT 0x0
++#define PSWUSCFG0_SSID_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define PSWUSCFG0_SSID_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define PSWUSCFG0_SSID_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//PSWUSCFG0_SSID_CAP
++#define PSWUSCFG0_SSID_CAP__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define PSWUSCFG0_SSID_CAP__SUBSYSTEM_ID__SHIFT 0x10
++#define PSWUSCFG0_SSID_CAP__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define PSWUSCFG0_SSID_CAP__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//MSI_MAP_CAP_LIST
++#define MSI_MAP_CAP_LIST__CAP_ID__SHIFT 0x0
++#define MSI_MAP_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define MSI_MAP_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define MSI_MAP_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//MSI_MAP_CAP
++#define MSI_MAP_CAP__EN__SHIFT 0x0
++#define MSI_MAP_CAP__FIXD__SHIFT 0x1
++#define MSI_MAP_CAP__CAP_TYPE__SHIFT 0xb
++#define MSI_MAP_CAP__EN_MASK 0x0001L
++#define MSI_MAP_CAP__FIXD_MASK 0x0002L
++#define MSI_MAP_CAP__CAP_TYPE_MASK 0xF800L
++//MSI_MAP_ADDR_LO
++#define MSI_MAP_ADDR_LO__MSI_MAP_ADDR_LO__SHIFT 0x14
++#define MSI_MAP_ADDR_LO__MSI_MAP_ADDR_LO_MASK 0xFFF00000L
++//MSI_MAP_ADDR_HI
++#define MSI_MAP_ADDR_HI__MSI_MAP_ADDR_HI__SHIFT 0x0
++#define MSI_MAP_ADDR_HI__MSI_MAP_ADDR_HI_MASK 0xFFFFFFFFL
++//PSWUSCFG0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//PSWUSCFG0_PCIE_VENDOR_SPECIFIC_HDR
++#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//PSWUSCFG0_PCIE_VENDOR_SPECIFIC1
++#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//PSWUSCFG0_PCIE_VENDOR_SPECIFIC2
++#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//PSWUSCFG0_PCIE_VC_ENH_CAP_LIST
++#define PSWUSCFG0_PCIE_VC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define PSWUSCFG0_PCIE_VC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define PSWUSCFG0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define PSWUSCFG0_PCIE_VC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define PSWUSCFG0_PCIE_VC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define PSWUSCFG0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//PSWUSCFG0_PCIE_PORT_VC_CAP_REG1
++#define PSWUSCFG0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT__SHIFT 0x0
++#define PSWUSCFG0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT__SHIFT 0x4
++#define PSWUSCFG0_PCIE_PORT_VC_CAP_REG1__REF_CLK__SHIFT 0x8
++#define PSWUSCFG0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE__SHIFT 0xa
++#define PSWUSCFG0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT_MASK 0x00000007L
++#define PSWUSCFG0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT_MASK 0x00000070L
++#define PSWUSCFG0_PCIE_PORT_VC_CAP_REG1__REF_CLK_MASK 0x00000300L
++#define PSWUSCFG0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE_MASK 0x00000C00L
++//PSWUSCFG0_PCIE_PORT_VC_CAP_REG2
++#define PSWUSCFG0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP__SHIFT 0x0
++#define PSWUSCFG0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET__SHIFT 0x18
++#define PSWUSCFG0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP_MASK 0x000000FFL
++#define PSWUSCFG0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET_MASK 0xFF000000L
++//PSWUSCFG0_PCIE_PORT_VC_CNTL
++#define PSWUSCFG0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE__SHIFT 0x0
++#define PSWUSCFG0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT__SHIFT 0x1
++#define PSWUSCFG0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE_MASK 0x0001L
++#define PSWUSCFG0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT_MASK 0x000EL
++//PSWUSCFG0_PCIE_PORT_VC_STATUS
++#define PSWUSCFG0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS__SHIFT 0x0
++#define PSWUSCFG0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS_MASK 0x0001L
++//PSWUSCFG0_PCIE_VC0_RESOURCE_CAP
++#define PSWUSCFG0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0
++#define PSWUSCFG0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf
++#define PSWUSCFG0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10
++#define PSWUSCFG0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18
++#define PSWUSCFG0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL
++#define PSWUSCFG0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L
++#define PSWUSCFG0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L
++#define PSWUSCFG0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L
++//PSWUSCFG0_PCIE_VC0_RESOURCE_CNTL
++#define PSWUSCFG0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0
++#define PSWUSCFG0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1
++#define PSWUSCFG0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10
++#define PSWUSCFG0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11
++#define PSWUSCFG0_PCIE_VC0_RESOURCE_CNTL__VC_ID__SHIFT 0x18
++#define PSWUSCFG0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f
++#define PSWUSCFG0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L
++#define PSWUSCFG0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL
++#define PSWUSCFG0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L
++#define PSWUSCFG0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L
++#define PSWUSCFG0_PCIE_VC0_RESOURCE_CNTL__VC_ID_MASK 0x07000000L
++#define PSWUSCFG0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L
++//PSWUSCFG0_PCIE_VC0_RESOURCE_STATUS
++#define PSWUSCFG0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0
++#define PSWUSCFG0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1
++#define PSWUSCFG0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L
++#define PSWUSCFG0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L
++//PSWUSCFG0_PCIE_VC1_RESOURCE_CAP
++#define PSWUSCFG0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0
++#define PSWUSCFG0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf
++#define PSWUSCFG0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10
++#define PSWUSCFG0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18
++#define PSWUSCFG0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL
++#define PSWUSCFG0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L
++#define PSWUSCFG0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L
++#define PSWUSCFG0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L
++//PSWUSCFG0_PCIE_VC1_RESOURCE_CNTL
++#define PSWUSCFG0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0
++#define PSWUSCFG0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1
++#define PSWUSCFG0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10
++#define PSWUSCFG0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11
++#define PSWUSCFG0_PCIE_VC1_RESOURCE_CNTL__VC_ID__SHIFT 0x18
++#define PSWUSCFG0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f
++#define PSWUSCFG0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L
++#define PSWUSCFG0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL
++#define PSWUSCFG0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L
++#define PSWUSCFG0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L
++#define PSWUSCFG0_PCIE_VC1_RESOURCE_CNTL__VC_ID_MASK 0x07000000L
++#define PSWUSCFG0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L
++//PSWUSCFG0_PCIE_VC1_RESOURCE_STATUS
++#define PSWUSCFG0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0
++#define PSWUSCFG0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1
++#define PSWUSCFG0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L
++#define PSWUSCFG0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L
++//PSWUSCFG0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST
++#define PSWUSCFG0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define PSWUSCFG0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define PSWUSCFG0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define PSWUSCFG0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define PSWUSCFG0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define PSWUSCFG0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//PSWUSCFG0_PCIE_DEV_SERIAL_NUM_DW1
++#define PSWUSCFG0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO__SHIFT 0x0
++#define PSWUSCFG0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO_MASK 0xFFFFFFFFL
++//PSWUSCFG0_PCIE_DEV_SERIAL_NUM_DW2
++#define PSWUSCFG0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI__SHIFT 0x0
++#define PSWUSCFG0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI_MASK 0xFFFFFFFFL
++//PSWUSCFG0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define PSWUSCFG0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define PSWUSCFG0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define PSWUSCFG0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define PSWUSCFG0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define PSWUSCFG0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define PSWUSCFG0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//PSWUSCFG0_PCIE_UNCORR_ERR_STATUS
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
++//PSWUSCFG0_PCIE_UNCORR_ERR_MASK
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
++//PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
++//PSWUSCFG0_PCIE_CORR_ERR_STATUS
++#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//PSWUSCFG0_PCIE_CORR_ERR_MASK
++#define PSWUSCFG0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define PSWUSCFG0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define PSWUSCFG0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define PSWUSCFG0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define PSWUSCFG0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define PSWUSCFG0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define PSWUSCFG0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define PSWUSCFG0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define PSWUSCFG0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define PSWUSCFG0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define PSWUSCFG0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define PSWUSCFG0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define PSWUSCFG0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define PSWUSCFG0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define PSWUSCFG0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define PSWUSCFG0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL
++#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//PSWUSCFG0_PCIE_HDR_LOG0
++#define PSWUSCFG0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define PSWUSCFG0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//PSWUSCFG0_PCIE_HDR_LOG1
++#define PSWUSCFG0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define PSWUSCFG0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//PSWUSCFG0_PCIE_HDR_LOG2
++#define PSWUSCFG0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define PSWUSCFG0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//PSWUSCFG0_PCIE_HDR_LOG3
++#define PSWUSCFG0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define PSWUSCFG0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//PSWUSCFG0_PCIE_TLP_PREFIX_LOG0
++#define PSWUSCFG0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define PSWUSCFG0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//PSWUSCFG0_PCIE_TLP_PREFIX_LOG1
++#define PSWUSCFG0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define PSWUSCFG0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//PSWUSCFG0_PCIE_TLP_PREFIX_LOG2
++#define PSWUSCFG0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define PSWUSCFG0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//PSWUSCFG0_PCIE_TLP_PREFIX_LOG3
++#define PSWUSCFG0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define PSWUSCFG0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//PSWUSCFG0_PCIE_SECONDARY_ENH_CAP_LIST
++#define PSWUSCFG0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define PSWUSCFG0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define PSWUSCFG0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define PSWUSCFG0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define PSWUSCFG0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define PSWUSCFG0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//PSWUSCFG0_PCIE_LINK_CNTL3
++#define PSWUSCFG0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION__SHIFT 0x0
++#define PSWUSCFG0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN__SHIFT 0x1
++#define PSWUSCFG0_PCIE_LINK_CNTL3__ENABLE_LOWER_SKP_OS_GEN__SHIFT 0x9
++#define PSWUSCFG0_PCIE_LINK_CNTL3__RESERVED__SHIFT 0x10
++#define PSWUSCFG0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION_MASK 0x00000001L
++#define PSWUSCFG0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN_MASK 0x00000002L
++#define PSWUSCFG0_PCIE_LINK_CNTL3__ENABLE_LOWER_SKP_OS_GEN_MASK 0x0000FE00L
++#define PSWUSCFG0_PCIE_LINK_CNTL3__RESERVED_MASK 0xFFFF0000L
++//PSWUSCFG0_PCIE_LANE_ERROR_STATUS
++#define PSWUSCFG0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS__SHIFT 0x0
++#define PSWUSCFG0_PCIE_LANE_ERROR_STATUS__RESERVED__SHIFT 0x10
++#define PSWUSCFG0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS_MASK 0x0000FFFFL
++#define PSWUSCFG0_PCIE_LANE_ERROR_STATUS__RESERVED_MASK 0xFFFF0000L
++//PSWUSCFG0_PCIE_LANE_0_EQUALIZATION_CNTL
++#define PSWUSCFG0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define PSWUSCFG0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define PSWUSCFG0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define PSWUSCFG0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define PSWUSCFG0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define PSWUSCFG0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define PSWUSCFG0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++//PSWUSCFG0_PCIE_LANE_1_EQUALIZATION_CNTL
++#define PSWUSCFG0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define PSWUSCFG0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define PSWUSCFG0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define PSWUSCFG0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define PSWUSCFG0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define PSWUSCFG0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define PSWUSCFG0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++//PSWUSCFG0_PCIE_LANE_2_EQUALIZATION_CNTL
++#define PSWUSCFG0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define PSWUSCFG0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define PSWUSCFG0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define PSWUSCFG0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define PSWUSCFG0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define PSWUSCFG0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define PSWUSCFG0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++//PSWUSCFG0_PCIE_LANE_3_EQUALIZATION_CNTL
++#define PSWUSCFG0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define PSWUSCFG0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define PSWUSCFG0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define PSWUSCFG0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define PSWUSCFG0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define PSWUSCFG0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define PSWUSCFG0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++//PSWUSCFG0_PCIE_LANE_4_EQUALIZATION_CNTL
++#define PSWUSCFG0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define PSWUSCFG0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define PSWUSCFG0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define PSWUSCFG0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define PSWUSCFG0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define PSWUSCFG0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define PSWUSCFG0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++//PSWUSCFG0_PCIE_LANE_5_EQUALIZATION_CNTL
++#define PSWUSCFG0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define PSWUSCFG0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define PSWUSCFG0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define PSWUSCFG0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define PSWUSCFG0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define PSWUSCFG0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define PSWUSCFG0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++//PSWUSCFG0_PCIE_LANE_6_EQUALIZATION_CNTL
++#define PSWUSCFG0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define PSWUSCFG0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define PSWUSCFG0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define PSWUSCFG0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define PSWUSCFG0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define PSWUSCFG0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define PSWUSCFG0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++//PSWUSCFG0_PCIE_LANE_7_EQUALIZATION_CNTL
++#define PSWUSCFG0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define PSWUSCFG0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define PSWUSCFG0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define PSWUSCFG0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define PSWUSCFG0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define PSWUSCFG0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define PSWUSCFG0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++//PSWUSCFG0_PCIE_LANE_8_EQUALIZATION_CNTL
++#define PSWUSCFG0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define PSWUSCFG0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define PSWUSCFG0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define PSWUSCFG0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define PSWUSCFG0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define PSWUSCFG0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define PSWUSCFG0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++//PSWUSCFG0_PCIE_LANE_9_EQUALIZATION_CNTL
++#define PSWUSCFG0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define PSWUSCFG0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define PSWUSCFG0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define PSWUSCFG0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define PSWUSCFG0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define PSWUSCFG0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define PSWUSCFG0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++//PSWUSCFG0_PCIE_LANE_10_EQUALIZATION_CNTL
++#define PSWUSCFG0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define PSWUSCFG0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define PSWUSCFG0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define PSWUSCFG0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define PSWUSCFG0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define PSWUSCFG0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define PSWUSCFG0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++//PSWUSCFG0_PCIE_LANE_11_EQUALIZATION_CNTL
++#define PSWUSCFG0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define PSWUSCFG0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define PSWUSCFG0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define PSWUSCFG0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define PSWUSCFG0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define PSWUSCFG0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define PSWUSCFG0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++//PSWUSCFG0_PCIE_LANE_12_EQUALIZATION_CNTL
++#define PSWUSCFG0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define PSWUSCFG0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define PSWUSCFG0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define PSWUSCFG0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define PSWUSCFG0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define PSWUSCFG0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define PSWUSCFG0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++//PSWUSCFG0_PCIE_LANE_13_EQUALIZATION_CNTL
++#define PSWUSCFG0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define PSWUSCFG0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define PSWUSCFG0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define PSWUSCFG0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define PSWUSCFG0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define PSWUSCFG0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define PSWUSCFG0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++//PSWUSCFG0_PCIE_LANE_14_EQUALIZATION_CNTL
++#define PSWUSCFG0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define PSWUSCFG0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define PSWUSCFG0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define PSWUSCFG0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define PSWUSCFG0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define PSWUSCFG0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define PSWUSCFG0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++//PSWUSCFG0_PCIE_LANE_15_EQUALIZATION_CNTL
++#define PSWUSCFG0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define PSWUSCFG0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define PSWUSCFG0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define PSWUSCFG0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define PSWUSCFG0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define PSWUSCFG0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define PSWUSCFG0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++//PSWUSCFG0_PCIE_ACS_ENH_CAP_LIST
++#define PSWUSCFG0_PCIE_ACS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define PSWUSCFG0_PCIE_ACS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define PSWUSCFG0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define PSWUSCFG0_PCIE_ACS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define PSWUSCFG0_PCIE_ACS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define PSWUSCFG0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//PSWUSCFG0_PCIE_ACS_CAP
++#define PSWUSCFG0_PCIE_ACS_CAP__SOURCE_VALIDATION__SHIFT 0x0
++#define PSWUSCFG0_PCIE_ACS_CAP__TRANSLATION_BLOCKING__SHIFT 0x1
++#define PSWUSCFG0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT__SHIFT 0x2
++#define PSWUSCFG0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT__SHIFT 0x3
++#define PSWUSCFG0_PCIE_ACS_CAP__UPSTREAM_FORWARDING__SHIFT 0x4
++#define PSWUSCFG0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL__SHIFT 0x5
++#define PSWUSCFG0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P__SHIFT 0x6
++#define PSWUSCFG0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE__SHIFT 0x8
++#define PSWUSCFG0_PCIE_ACS_CAP__SOURCE_VALIDATION_MASK 0x0001L
++#define PSWUSCFG0_PCIE_ACS_CAP__TRANSLATION_BLOCKING_MASK 0x0002L
++#define PSWUSCFG0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT_MASK 0x0004L
++#define PSWUSCFG0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT_MASK 0x0008L
++#define PSWUSCFG0_PCIE_ACS_CAP__UPSTREAM_FORWARDING_MASK 0x0010L
++#define PSWUSCFG0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL_MASK 0x0020L
++#define PSWUSCFG0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P_MASK 0x0040L
++#define PSWUSCFG0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE_MASK 0xFF00L
++//PSWUSCFG0_PCIE_ACS_CNTL
++#define PSWUSCFG0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN__SHIFT 0x0
++#define PSWUSCFG0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN__SHIFT 0x1
++#define PSWUSCFG0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN__SHIFT 0x2
++#define PSWUSCFG0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN__SHIFT 0x3
++#define PSWUSCFG0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN__SHIFT 0x4
++#define PSWUSCFG0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN__SHIFT 0x5
++#define PSWUSCFG0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN__SHIFT 0x6
++#define PSWUSCFG0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN_MASK 0x0001L
++#define PSWUSCFG0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN_MASK 0x0002L
++#define PSWUSCFG0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN_MASK 0x0004L
++#define PSWUSCFG0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN_MASK 0x0008L
++#define PSWUSCFG0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN_MASK 0x0010L
++#define PSWUSCFG0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN_MASK 0x0020L
++#define PSWUSCFG0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN_MASK 0x0040L
++//PSWUSCFG0_PCIE_MC_ENH_CAP_LIST
++#define PSWUSCFG0_PCIE_MC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define PSWUSCFG0_PCIE_MC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define PSWUSCFG0_PCIE_MC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define PSWUSCFG0_PCIE_MC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define PSWUSCFG0_PCIE_MC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define PSWUSCFG0_PCIE_MC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//PSWUSCFG0_PCIE_MC_CAP
++#define PSWUSCFG0_PCIE_MC_CAP__MC_MAX_GROUP__SHIFT 0x0
++#define PSWUSCFG0_PCIE_MC_CAP__MC_ECRC_REGEN_SUPP__SHIFT 0xf
++#define PSWUSCFG0_PCIE_MC_CAP__MC_MAX_GROUP_MASK 0x003FL
++#define PSWUSCFG0_PCIE_MC_CAP__MC_ECRC_REGEN_SUPP_MASK 0x8000L
++//PSWUSCFG0_PCIE_MC_CNTL
++#define PSWUSCFG0_PCIE_MC_CNTL__MC_NUM_GROUP__SHIFT 0x0
++#define PSWUSCFG0_PCIE_MC_CNTL__MC_ENABLE__SHIFT 0xf
++#define PSWUSCFG0_PCIE_MC_CNTL__MC_NUM_GROUP_MASK 0x003FL
++#define PSWUSCFG0_PCIE_MC_CNTL__MC_ENABLE_MASK 0x8000L
++//PSWUSCFG0_PCIE_MC_ADDR0
++#define PSWUSCFG0_PCIE_MC_ADDR0__MC_INDEX_POS__SHIFT 0x0
++#define PSWUSCFG0_PCIE_MC_ADDR0__MC_BASE_ADDR_0__SHIFT 0xc
++#define PSWUSCFG0_PCIE_MC_ADDR0__MC_INDEX_POS_MASK 0x0000003FL
++#define PSWUSCFG0_PCIE_MC_ADDR0__MC_BASE_ADDR_0_MASK 0xFFFFF000L
++//PSWUSCFG0_PCIE_MC_ADDR1
++#define PSWUSCFG0_PCIE_MC_ADDR1__MC_BASE_ADDR_1__SHIFT 0x0
++#define PSWUSCFG0_PCIE_MC_ADDR1__MC_BASE_ADDR_1_MASK 0xFFFFFFFFL
++//PSWUSCFG0_PCIE_MC_RCV0
++#define PSWUSCFG0_PCIE_MC_RCV0__MC_RECEIVE_0__SHIFT 0x0
++#define PSWUSCFG0_PCIE_MC_RCV0__MC_RECEIVE_0_MASK 0xFFFFFFFFL
++//PSWUSCFG0_PCIE_MC_RCV1
++#define PSWUSCFG0_PCIE_MC_RCV1__MC_RECEIVE_1__SHIFT 0x0
++#define PSWUSCFG0_PCIE_MC_RCV1__MC_RECEIVE_1_MASK 0xFFFFFFFFL
++//PSWUSCFG0_PCIE_MC_BLOCK_ALL0
++#define PSWUSCFG0_PCIE_MC_BLOCK_ALL0__MC_BLOCK_ALL_0__SHIFT 0x0
++#define PSWUSCFG0_PCIE_MC_BLOCK_ALL0__MC_BLOCK_ALL_0_MASK 0xFFFFFFFFL
++//PSWUSCFG0_PCIE_MC_BLOCK_ALL1
++#define PSWUSCFG0_PCIE_MC_BLOCK_ALL1__MC_BLOCK_ALL_1__SHIFT 0x0
++#define PSWUSCFG0_PCIE_MC_BLOCK_ALL1__MC_BLOCK_ALL_1_MASK 0xFFFFFFFFL
++//PSWUSCFG0_PCIE_MC_BLOCK_UNTRANSLATED_0
++#define PSWUSCFG0_PCIE_MC_BLOCK_UNTRANSLATED_0__MC_BLOCK_UNTRANSLATED_0__SHIFT 0x0
++#define PSWUSCFG0_PCIE_MC_BLOCK_UNTRANSLATED_0__MC_BLOCK_UNTRANSLATED_0_MASK 0xFFFFFFFFL
++//PSWUSCFG0_PCIE_MC_BLOCK_UNTRANSLATED_1
++#define PSWUSCFG0_PCIE_MC_BLOCK_UNTRANSLATED_1__MC_BLOCK_UNTRANSLATED_1__SHIFT 0x0
++#define PSWUSCFG0_PCIE_MC_BLOCK_UNTRANSLATED_1__MC_BLOCK_UNTRANSLATED_1_MASK 0xFFFFFFFFL
++//PCIE_MC_OVERLAY_BAR0
++#define PCIE_MC_OVERLAY_BAR0__MC_OVERLAY_SIZE__SHIFT 0x0
++#define PCIE_MC_OVERLAY_BAR0__MC_OVERLAY_BAR_0__SHIFT 0x6
++#define PCIE_MC_OVERLAY_BAR0__MC_OVERLAY_SIZE_MASK 0x0000003FL
++#define PCIE_MC_OVERLAY_BAR0__MC_OVERLAY_BAR_0_MASK 0xFFFFFFC0L
++//PCIE_MC_OVERLAY_BAR1
++#define PCIE_MC_OVERLAY_BAR1__MC_OVERLAY_BAR_1__SHIFT 0x0
++#define PCIE_MC_OVERLAY_BAR1__MC_OVERLAY_BAR_1_MASK 0xFFFFFFFFL
++//PSWUSCFG0_PCIE_LTR_ENH_CAP_LIST
++#define PSWUSCFG0_PCIE_LTR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define PSWUSCFG0_PCIE_LTR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define PSWUSCFG0_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define PSWUSCFG0_PCIE_LTR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define PSWUSCFG0_PCIE_LTR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define PSWUSCFG0_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//PSWUSCFG0_PCIE_LTR_CAP
++#define PSWUSCFG0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE__SHIFT 0x0
++#define PSWUSCFG0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE__SHIFT 0xa
++#define PSWUSCFG0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE__SHIFT 0x10
++#define PSWUSCFG0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE__SHIFT 0x1a
++#define PSWUSCFG0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE_MASK 0x000003FFL
++#define PSWUSCFG0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE_MASK 0x00001C00L
++#define PSWUSCFG0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE_MASK 0x03FF0000L
++#define PSWUSCFG0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE_MASK 0x1C000000L
++//PSWUSCFG0_PCIE_ARI_ENH_CAP_LIST
++#define PSWUSCFG0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define PSWUSCFG0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define PSWUSCFG0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define PSWUSCFG0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define PSWUSCFG0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define PSWUSCFG0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//PSWUSCFG0_PCIE_ARI_CAP
++#define PSWUSCFG0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define PSWUSCFG0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define PSWUSCFG0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define PSWUSCFG0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define PSWUSCFG0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define PSWUSCFG0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//PSWUSCFG0_PCIE_ARI_CNTL
++#define PSWUSCFG0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define PSWUSCFG0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define PSWUSCFG0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define PSWUSCFG0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define PSWUSCFG0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define PSWUSCFG0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++//PCIE_L1_PM_SUB_CAP_LIST
++#define PCIE_L1_PM_SUB_CAP_LIST__CAP_ID__SHIFT 0x0
++#define PCIE_L1_PM_SUB_CAP_LIST__CAP_VER__SHIFT 0x10
++#define PCIE_L1_PM_SUB_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define PCIE_L1_PM_SUB_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define PCIE_L1_PM_SUB_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define PCIE_L1_PM_SUB_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//PCIE_L1_PM_SUB_CAP
++#define PCIE_L1_PM_SUB_CAP__PCI_PM_L1_2_SUPPORTED__SHIFT 0x0
++#define PCIE_L1_PM_SUB_CAP__PCI_PM_L1_1_SUPPORTED__SHIFT 0x1
++#define PCIE_L1_PM_SUB_CAP__ASPM_L1_2_SUPPORTED__SHIFT 0x2
++#define PCIE_L1_PM_SUB_CAP__ASPM_L1_1_SUPPORTED__SHIFT 0x3
++#define PCIE_L1_PM_SUB_CAP__L1_PM_SUB_SUPPORTED__SHIFT 0x4
++#define PCIE_L1_PM_SUB_CAP__PORT_CM_RESTORE_TIME__SHIFT 0x8
++#define PCIE_L1_PM_SUB_CAP__PORT_T_POWER_ON_SCALE__SHIFT 0x10
++#define PCIE_L1_PM_SUB_CAP__PORT_T_POWER_ON_VALUE__SHIFT 0x13
++#define PCIE_L1_PM_SUB_CAP__PCI_PM_L1_2_SUPPORTED_MASK 0x00000001L
++#define PCIE_L1_PM_SUB_CAP__PCI_PM_L1_1_SUPPORTED_MASK 0x00000002L
++#define PCIE_L1_PM_SUB_CAP__ASPM_L1_2_SUPPORTED_MASK 0x00000004L
++#define PCIE_L1_PM_SUB_CAP__ASPM_L1_1_SUPPORTED_MASK 0x00000008L
++#define PCIE_L1_PM_SUB_CAP__L1_PM_SUB_SUPPORTED_MASK 0x00000010L
++#define PCIE_L1_PM_SUB_CAP__PORT_CM_RESTORE_TIME_MASK 0x0000FF00L
++#define PCIE_L1_PM_SUB_CAP__PORT_T_POWER_ON_SCALE_MASK 0x00030000L
++#define PCIE_L1_PM_SUB_CAP__PORT_T_POWER_ON_VALUE_MASK 0x00F80000L
++//PCIE_L1_PM_SUB_CNTL
++#define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN__SHIFT 0x0
++#define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN__SHIFT 0x1
++#define PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN__SHIFT 0x2
++#define PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN__SHIFT 0x3
++#define PCIE_L1_PM_SUB_CNTL__COMMON_MODE_RESTORE_TIME__SHIFT 0x8
++#define PCIE_L1_PM_SUB_CNTL__LTR_L1_2_THRESHOLD_VALUE__SHIFT 0x10
++#define PCIE_L1_PM_SUB_CNTL__LTR_L1_2_THRESHOLD_SCALE__SHIFT 0x1d
++#define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK 0x00000001L
++#define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK 0x00000002L
++#define PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK 0x00000004L
++#define PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK 0x00000008L
++#define PCIE_L1_PM_SUB_CNTL__COMMON_MODE_RESTORE_TIME_MASK 0x0000FF00L
++#define PCIE_L1_PM_SUB_CNTL__LTR_L1_2_THRESHOLD_VALUE_MASK 0x03FF0000L
++#define PCIE_L1_PM_SUB_CNTL__LTR_L1_2_THRESHOLD_SCALE_MASK 0xE0000000L
++//PCIE_L1_PM_SUB_CNTL2
++#define PCIE_L1_PM_SUB_CNTL2__T_POWER_ON_SCALE__SHIFT 0x0
++#define PCIE_L1_PM_SUB_CNTL2__T_POWER_ON_VALUE__SHIFT 0x3
++#define PCIE_L1_PM_SUB_CNTL2__T_POWER_ON_SCALE_MASK 0x00000003L
++#define PCIE_L1_PM_SUB_CNTL2__T_POWER_ON_VALUE_MASK 0x000000F8L
++//PCIE_ESM_CAP_LIST
++#define PCIE_ESM_CAP_LIST__CAP_ID__SHIFT 0x0
++#define PCIE_ESM_CAP_LIST__CAP_VER__SHIFT 0x10
++#define PCIE_ESM_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define PCIE_ESM_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define PCIE_ESM_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define PCIE_ESM_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//PCIE_ESM_HEADER_1
++#define PCIE_ESM_HEADER_1__ESM_VENDOR_ID__SHIFT 0x0
++#define PCIE_ESM_HEADER_1__ESM_CAP_REV__SHIFT 0x10
++#define PCIE_ESM_HEADER_1__ESM_CAP_LEN__SHIFT 0x14
++#define PCIE_ESM_HEADER_1__ESM_VENDOR_ID_MASK 0x0000FFFFL
++#define PCIE_ESM_HEADER_1__ESM_CAP_REV_MASK 0x000F0000L
++#define PCIE_ESM_HEADER_1__ESM_CAP_LEN_MASK 0xFFF00000L
++//PCIE_ESM_HEADER_2
++#define PCIE_ESM_HEADER_2__CAP_ID__SHIFT 0x0
++#define PCIE_ESM_HEADER_2__CAP_ID_MASK 0xFFFFL
++//PCIE_ESM_STATUS
++#define PCIE_ESM_STATUS__MIN_TIME_IN_EI_VAL__SHIFT 0x0
++#define PCIE_ESM_STATUS__MIN_TIME_IN_EI_SCALE__SHIFT 0x9
++#define PCIE_ESM_STATUS__MIN_TIME_IN_EI_VAL_MASK 0x01FFL
++#define PCIE_ESM_STATUS__MIN_TIME_IN_EI_SCALE_MASK 0x0E00L
++//PCIE_ESM_CTRL
++#define PCIE_ESM_CTRL__ESM_GEN_3_DATA_RATE__SHIFT 0x0
++#define PCIE_ESM_CTRL__ESM_GEN_4_DATA_RATE__SHIFT 0x8
++#define PCIE_ESM_CTRL__ESM_ENABLED__SHIFT 0xf
++#define PCIE_ESM_CTRL__ESM_GEN_3_DATA_RATE_MASK 0x007FL
++#define PCIE_ESM_CTRL__ESM_GEN_4_DATA_RATE_MASK 0x7F00L
++#define PCIE_ESM_CTRL__ESM_ENABLED_MASK 0x8000L
++//PCIE_ESM_CAP_1
++#define PCIE_ESM_CAP_1__ESM_8P0G__SHIFT 0x0
++#define PCIE_ESM_CAP_1__ESM_8P1G__SHIFT 0x1
++#define PCIE_ESM_CAP_1__ESM_8P2G__SHIFT 0x2
++#define PCIE_ESM_CAP_1__ESM_8P3G__SHIFT 0x3
++#define PCIE_ESM_CAP_1__ESM_8P4G__SHIFT 0x4
++#define PCIE_ESM_CAP_1__ESM_8P5G__SHIFT 0x5
++#define PCIE_ESM_CAP_1__ESM_8P6G__SHIFT 0x6
++#define PCIE_ESM_CAP_1__ESM_8P7G__SHIFT 0x7
++#define PCIE_ESM_CAP_1__ESM_8P8G__SHIFT 0x8
++#define PCIE_ESM_CAP_1__ESM_8P9G__SHIFT 0x9
++#define PCIE_ESM_CAP_1__ESM_9P0G__SHIFT 0xa
++#define PCIE_ESM_CAP_1__ESM_9P1G__SHIFT 0xb
++#define PCIE_ESM_CAP_1__ESM_9P2G__SHIFT 0xc
++#define PCIE_ESM_CAP_1__ESM_9P3G__SHIFT 0xd
++#define PCIE_ESM_CAP_1__ESM_9P4G__SHIFT 0xe
++#define PCIE_ESM_CAP_1__ESM_9P5G__SHIFT 0xf
++#define PCIE_ESM_CAP_1__ESM_9P6G__SHIFT 0x10
++#define PCIE_ESM_CAP_1__ESM_9P7G__SHIFT 0x11
++#define PCIE_ESM_CAP_1__ESM_9P8G__SHIFT 0x12
++#define PCIE_ESM_CAP_1__ESM_9P9G__SHIFT 0x13
++#define PCIE_ESM_CAP_1__ESM_10P0G__SHIFT 0x14
++#define PCIE_ESM_CAP_1__ESM_10P1G__SHIFT 0x15
++#define PCIE_ESM_CAP_1__ESM_10P2G__SHIFT 0x16
++#define PCIE_ESM_CAP_1__ESM_10P3G__SHIFT 0x17
++#define PCIE_ESM_CAP_1__ESM_10P4G__SHIFT 0x18
++#define PCIE_ESM_CAP_1__ESM_10P5G__SHIFT 0x19
++#define PCIE_ESM_CAP_1__ESM_10P6G__SHIFT 0x1a
++#define PCIE_ESM_CAP_1__ESM_10P7G__SHIFT 0x1b
++#define PCIE_ESM_CAP_1__ESM_10P8G__SHIFT 0x1c
++#define PCIE_ESM_CAP_1__ESM_10P9G__SHIFT 0x1d
++#define PCIE_ESM_CAP_1__ESM_8P0G_MASK 0x00000001L
++#define PCIE_ESM_CAP_1__ESM_8P1G_MASK 0x00000002L
++#define PCIE_ESM_CAP_1__ESM_8P2G_MASK 0x00000004L
++#define PCIE_ESM_CAP_1__ESM_8P3G_MASK 0x00000008L
++#define PCIE_ESM_CAP_1__ESM_8P4G_MASK 0x00000010L
++#define PCIE_ESM_CAP_1__ESM_8P5G_MASK 0x00000020L
++#define PCIE_ESM_CAP_1__ESM_8P6G_MASK 0x00000040L
++#define PCIE_ESM_CAP_1__ESM_8P7G_MASK 0x00000080L
++#define PCIE_ESM_CAP_1__ESM_8P8G_MASK 0x00000100L
++#define PCIE_ESM_CAP_1__ESM_8P9G_MASK 0x00000200L
++#define PCIE_ESM_CAP_1__ESM_9P0G_MASK 0x00000400L
++#define PCIE_ESM_CAP_1__ESM_9P1G_MASK 0x00000800L
++#define PCIE_ESM_CAP_1__ESM_9P2G_MASK 0x00001000L
++#define PCIE_ESM_CAP_1__ESM_9P3G_MASK 0x00002000L
++#define PCIE_ESM_CAP_1__ESM_9P4G_MASK 0x00004000L
++#define PCIE_ESM_CAP_1__ESM_9P5G_MASK 0x00008000L
++#define PCIE_ESM_CAP_1__ESM_9P6G_MASK 0x00010000L
++#define PCIE_ESM_CAP_1__ESM_9P7G_MASK 0x00020000L
++#define PCIE_ESM_CAP_1__ESM_9P8G_MASK 0x00040000L
++#define PCIE_ESM_CAP_1__ESM_9P9G_MASK 0x00080000L
++#define PCIE_ESM_CAP_1__ESM_10P0G_MASK 0x00100000L
++#define PCIE_ESM_CAP_1__ESM_10P1G_MASK 0x00200000L
++#define PCIE_ESM_CAP_1__ESM_10P2G_MASK 0x00400000L
++#define PCIE_ESM_CAP_1__ESM_10P3G_MASK 0x00800000L
++#define PCIE_ESM_CAP_1__ESM_10P4G_MASK 0x01000000L
++#define PCIE_ESM_CAP_1__ESM_10P5G_MASK 0x02000000L
++#define PCIE_ESM_CAP_1__ESM_10P6G_MASK 0x04000000L
++#define PCIE_ESM_CAP_1__ESM_10P7G_MASK 0x08000000L
++#define PCIE_ESM_CAP_1__ESM_10P8G_MASK 0x10000000L
++#define PCIE_ESM_CAP_1__ESM_10P9G_MASK 0x20000000L
++//PCIE_ESM_CAP_2
++#define PCIE_ESM_CAP_2__ESM_11P0G__SHIFT 0x0
++#define PCIE_ESM_CAP_2__ESM_11P1G__SHIFT 0x1
++#define PCIE_ESM_CAP_2__ESM_11P2G__SHIFT 0x2
++#define PCIE_ESM_CAP_2__ESM_11P3G__SHIFT 0x3
++#define PCIE_ESM_CAP_2__ESM_11P4G__SHIFT 0x4
++#define PCIE_ESM_CAP_2__ESM_11P5G__SHIFT 0x5
++#define PCIE_ESM_CAP_2__ESM_11P6G__SHIFT 0x6
++#define PCIE_ESM_CAP_2__ESM_11P7G__SHIFT 0x7
++#define PCIE_ESM_CAP_2__ESM_11P8G__SHIFT 0x8
++#define PCIE_ESM_CAP_2__ESM_11P9G__SHIFT 0x9
++#define PCIE_ESM_CAP_2__ESM_12P0G__SHIFT 0xa
++#define PCIE_ESM_CAP_2__ESM_12P1G__SHIFT 0xb
++#define PCIE_ESM_CAP_2__ESM_12P2G__SHIFT 0xc
++#define PCIE_ESM_CAP_2__ESM_12P3G__SHIFT 0xd
++#define PCIE_ESM_CAP_2__ESM_12P4G__SHIFT 0xe
++#define PCIE_ESM_CAP_2__ESM_12P5G__SHIFT 0xf
++#define PCIE_ESM_CAP_2__ESM_12P6G__SHIFT 0x10
++#define PCIE_ESM_CAP_2__ESM_12P7G__SHIFT 0x11
++#define PCIE_ESM_CAP_2__ESM_12P8G__SHIFT 0x12
++#define PCIE_ESM_CAP_2__ESM_12P9G__SHIFT 0x13
++#define PCIE_ESM_CAP_2__ESM_13P0G__SHIFT 0x14
++#define PCIE_ESM_CAP_2__ESM_13P1G__SHIFT 0x15
++#define PCIE_ESM_CAP_2__ESM_13P2G__SHIFT 0x16
++#define PCIE_ESM_CAP_2__ESM_13P3G__SHIFT 0x17
++#define PCIE_ESM_CAP_2__ESM_13P4G__SHIFT 0x18
++#define PCIE_ESM_CAP_2__ESM_13P5G__SHIFT 0x19
++#define PCIE_ESM_CAP_2__ESM_13P6G__SHIFT 0x1a
++#define PCIE_ESM_CAP_2__ESM_13P7G__SHIFT 0x1b
++#define PCIE_ESM_CAP_2__ESM_13P8G__SHIFT 0x1c
++#define PCIE_ESM_CAP_2__ESM_13P9G__SHIFT 0x1d
++#define PCIE_ESM_CAP_2__ESM_11P0G_MASK 0x00000001L
++#define PCIE_ESM_CAP_2__ESM_11P1G_MASK 0x00000002L
++#define PCIE_ESM_CAP_2__ESM_11P2G_MASK 0x00000004L
++#define PCIE_ESM_CAP_2__ESM_11P3G_MASK 0x00000008L
++#define PCIE_ESM_CAP_2__ESM_11P4G_MASK 0x00000010L
++#define PCIE_ESM_CAP_2__ESM_11P5G_MASK 0x00000020L
++#define PCIE_ESM_CAP_2__ESM_11P6G_MASK 0x00000040L
++#define PCIE_ESM_CAP_2__ESM_11P7G_MASK 0x00000080L
++#define PCIE_ESM_CAP_2__ESM_11P8G_MASK 0x00000100L
++#define PCIE_ESM_CAP_2__ESM_11P9G_MASK 0x00000200L
++#define PCIE_ESM_CAP_2__ESM_12P0G_MASK 0x00000400L
++#define PCIE_ESM_CAP_2__ESM_12P1G_MASK 0x00000800L
++#define PCIE_ESM_CAP_2__ESM_12P2G_MASK 0x00001000L
++#define PCIE_ESM_CAP_2__ESM_12P3G_MASK 0x00002000L
++#define PCIE_ESM_CAP_2__ESM_12P4G_MASK 0x00004000L
++#define PCIE_ESM_CAP_2__ESM_12P5G_MASK 0x00008000L
++#define PCIE_ESM_CAP_2__ESM_12P6G_MASK 0x00010000L
++#define PCIE_ESM_CAP_2__ESM_12P7G_MASK 0x00020000L
++#define PCIE_ESM_CAP_2__ESM_12P8G_MASK 0x00040000L
++#define PCIE_ESM_CAP_2__ESM_12P9G_MASK 0x00080000L
++#define PCIE_ESM_CAP_2__ESM_13P0G_MASK 0x00100000L
++#define PCIE_ESM_CAP_2__ESM_13P1G_MASK 0x00200000L
++#define PCIE_ESM_CAP_2__ESM_13P2G_MASK 0x00400000L
++#define PCIE_ESM_CAP_2__ESM_13P3G_MASK 0x00800000L
++#define PCIE_ESM_CAP_2__ESM_13P4G_MASK 0x01000000L
++#define PCIE_ESM_CAP_2__ESM_13P5G_MASK 0x02000000L
++#define PCIE_ESM_CAP_2__ESM_13P6G_MASK 0x04000000L
++#define PCIE_ESM_CAP_2__ESM_13P7G_MASK 0x08000000L
++#define PCIE_ESM_CAP_2__ESM_13P8G_MASK 0x10000000L
++#define PCIE_ESM_CAP_2__ESM_13P9G_MASK 0x20000000L
++//PCIE_ESM_CAP_3
++#define PCIE_ESM_CAP_3__ESM_14P0G__SHIFT 0x0
++#define PCIE_ESM_CAP_3__ESM_14P1G__SHIFT 0x1
++#define PCIE_ESM_CAP_3__ESM_14P2G__SHIFT 0x2
++#define PCIE_ESM_CAP_3__ESM_14P3G__SHIFT 0x3
++#define PCIE_ESM_CAP_3__ESM_14P4G__SHIFT 0x4
++#define PCIE_ESM_CAP_3__ESM_14P5G__SHIFT 0x5
++#define PCIE_ESM_CAP_3__ESM_14P6G__SHIFT 0x6
++#define PCIE_ESM_CAP_3__ESM_14P7G__SHIFT 0x7
++#define PCIE_ESM_CAP_3__ESM_14P8G__SHIFT 0x8
++#define PCIE_ESM_CAP_3__ESM_14P9G__SHIFT 0x9
++#define PCIE_ESM_CAP_3__ESM_15P0G__SHIFT 0xa
++#define PCIE_ESM_CAP_3__ESM_15P1G__SHIFT 0xb
++#define PCIE_ESM_CAP_3__ESM_15P2G__SHIFT 0xc
++#define PCIE_ESM_CAP_3__ESM_15P3G__SHIFT 0xd
++#define PCIE_ESM_CAP_3__ESM_15P4G__SHIFT 0xe
++#define PCIE_ESM_CAP_3__ESM_15P5G__SHIFT 0xf
++#define PCIE_ESM_CAP_3__ESM_15P6G__SHIFT 0x10
++#define PCIE_ESM_CAP_3__ESM_15P7G__SHIFT 0x11
++#define PCIE_ESM_CAP_3__ESM_15P8G__SHIFT 0x12
++#define PCIE_ESM_CAP_3__ESM_15P9G__SHIFT 0x13
++#define PCIE_ESM_CAP_3__ESM_14P0G_MASK 0x00000001L
++#define PCIE_ESM_CAP_3__ESM_14P1G_MASK 0x00000002L
++#define PCIE_ESM_CAP_3__ESM_14P2G_MASK 0x00000004L
++#define PCIE_ESM_CAP_3__ESM_14P3G_MASK 0x00000008L
++#define PCIE_ESM_CAP_3__ESM_14P4G_MASK 0x00000010L
++#define PCIE_ESM_CAP_3__ESM_14P5G_MASK 0x00000020L
++#define PCIE_ESM_CAP_3__ESM_14P6G_MASK 0x00000040L
++#define PCIE_ESM_CAP_3__ESM_14P7G_MASK 0x00000080L
++#define PCIE_ESM_CAP_3__ESM_14P8G_MASK 0x00000100L
++#define PCIE_ESM_CAP_3__ESM_14P9G_MASK 0x00000200L
++#define PCIE_ESM_CAP_3__ESM_15P0G_MASK 0x00000400L
++#define PCIE_ESM_CAP_3__ESM_15P1G_MASK 0x00000800L
++#define PCIE_ESM_CAP_3__ESM_15P2G_MASK 0x00001000L
++#define PCIE_ESM_CAP_3__ESM_15P3G_MASK 0x00002000L
++#define PCIE_ESM_CAP_3__ESM_15P4G_MASK 0x00004000L
++#define PCIE_ESM_CAP_3__ESM_15P5G_MASK 0x00008000L
++#define PCIE_ESM_CAP_3__ESM_15P6G_MASK 0x00010000L
++#define PCIE_ESM_CAP_3__ESM_15P7G_MASK 0x00020000L
++#define PCIE_ESM_CAP_3__ESM_15P8G_MASK 0x00040000L
++#define PCIE_ESM_CAP_3__ESM_15P9G_MASK 0x00080000L
++//PCIE_ESM_CAP_4
++#define PCIE_ESM_CAP_4__ESM_16P0G__SHIFT 0x0
++#define PCIE_ESM_CAP_4__ESM_16P1G__SHIFT 0x1
++#define PCIE_ESM_CAP_4__ESM_16P2G__SHIFT 0x2
++#define PCIE_ESM_CAP_4__ESM_16P3G__SHIFT 0x3
++#define PCIE_ESM_CAP_4__ESM_16P4G__SHIFT 0x4
++#define PCIE_ESM_CAP_4__ESM_16P5G__SHIFT 0x5
++#define PCIE_ESM_CAP_4__ESM_16P6G__SHIFT 0x6
++#define PCIE_ESM_CAP_4__ESM_16P7G__SHIFT 0x7
++#define PCIE_ESM_CAP_4__ESM_16P8G__SHIFT 0x8
++#define PCIE_ESM_CAP_4__ESM_16P9G__SHIFT 0x9
++#define PCIE_ESM_CAP_4__ESM_17P0G__SHIFT 0xa
++#define PCIE_ESM_CAP_4__ESM_17P1G__SHIFT 0xb
++#define PCIE_ESM_CAP_4__ESM_17P2G__SHIFT 0xc
++#define PCIE_ESM_CAP_4__ESM_17P3G__SHIFT 0xd
++#define PCIE_ESM_CAP_4__ESM_17P4G__SHIFT 0xe
++#define PCIE_ESM_CAP_4__ESM_17P5G__SHIFT 0xf
++#define PCIE_ESM_CAP_4__ESM_17P6G__SHIFT 0x10
++#define PCIE_ESM_CAP_4__ESM_17P7G__SHIFT 0x11
++#define PCIE_ESM_CAP_4__ESM_17P8G__SHIFT 0x12
++#define PCIE_ESM_CAP_4__ESM_17P9G__SHIFT 0x13
++#define PCIE_ESM_CAP_4__ESM_18P0G__SHIFT 0x14
++#define PCIE_ESM_CAP_4__ESM_18P1G__SHIFT 0x15
++#define PCIE_ESM_CAP_4__ESM_18P2G__SHIFT 0x16
++#define PCIE_ESM_CAP_4__ESM_18P3G__SHIFT 0x17
++#define PCIE_ESM_CAP_4__ESM_18P4G__SHIFT 0x18
++#define PCIE_ESM_CAP_4__ESM_18P5G__SHIFT 0x19
++#define PCIE_ESM_CAP_4__ESM_18P6G__SHIFT 0x1a
++#define PCIE_ESM_CAP_4__ESM_18P7G__SHIFT 0x1b
++#define PCIE_ESM_CAP_4__ESM_18P8G__SHIFT 0x1c
++#define PCIE_ESM_CAP_4__ESM_18P9G__SHIFT 0x1d
++#define PCIE_ESM_CAP_4__ESM_16P0G_MASK 0x00000001L
++#define PCIE_ESM_CAP_4__ESM_16P1G_MASK 0x00000002L
++#define PCIE_ESM_CAP_4__ESM_16P2G_MASK 0x00000004L
++#define PCIE_ESM_CAP_4__ESM_16P3G_MASK 0x00000008L
++#define PCIE_ESM_CAP_4__ESM_16P4G_MASK 0x00000010L
++#define PCIE_ESM_CAP_4__ESM_16P5G_MASK 0x00000020L
++#define PCIE_ESM_CAP_4__ESM_16P6G_MASK 0x00000040L
++#define PCIE_ESM_CAP_4__ESM_16P7G_MASK 0x00000080L
++#define PCIE_ESM_CAP_4__ESM_16P8G_MASK 0x00000100L
++#define PCIE_ESM_CAP_4__ESM_16P9G_MASK 0x00000200L
++#define PCIE_ESM_CAP_4__ESM_17P0G_MASK 0x00000400L
++#define PCIE_ESM_CAP_4__ESM_17P1G_MASK 0x00000800L
++#define PCIE_ESM_CAP_4__ESM_17P2G_MASK 0x00001000L
++#define PCIE_ESM_CAP_4__ESM_17P3G_MASK 0x00002000L
++#define PCIE_ESM_CAP_4__ESM_17P4G_MASK 0x00004000L
++#define PCIE_ESM_CAP_4__ESM_17P5G_MASK 0x00008000L
++#define PCIE_ESM_CAP_4__ESM_17P6G_MASK 0x00010000L
++#define PCIE_ESM_CAP_4__ESM_17P7G_MASK 0x00020000L
++#define PCIE_ESM_CAP_4__ESM_17P8G_MASK 0x00040000L
++#define PCIE_ESM_CAP_4__ESM_17P9G_MASK 0x00080000L
++#define PCIE_ESM_CAP_4__ESM_18P0G_MASK 0x00100000L
++#define PCIE_ESM_CAP_4__ESM_18P1G_MASK 0x00200000L
++#define PCIE_ESM_CAP_4__ESM_18P2G_MASK 0x00400000L
++#define PCIE_ESM_CAP_4__ESM_18P3G_MASK 0x00800000L
++#define PCIE_ESM_CAP_4__ESM_18P4G_MASK 0x01000000L
++#define PCIE_ESM_CAP_4__ESM_18P5G_MASK 0x02000000L
++#define PCIE_ESM_CAP_4__ESM_18P6G_MASK 0x04000000L
++#define PCIE_ESM_CAP_4__ESM_18P7G_MASK 0x08000000L
++#define PCIE_ESM_CAP_4__ESM_18P8G_MASK 0x10000000L
++#define PCIE_ESM_CAP_4__ESM_18P9G_MASK 0x20000000L
++//PCIE_ESM_CAP_5
++#define PCIE_ESM_CAP_5__ESM_19P0G__SHIFT 0x0
++#define PCIE_ESM_CAP_5__ESM_19P1G__SHIFT 0x1
++#define PCIE_ESM_CAP_5__ESM_19P2G__SHIFT 0x2
++#define PCIE_ESM_CAP_5__ESM_19P3G__SHIFT 0x3
++#define PCIE_ESM_CAP_5__ESM_19P4G__SHIFT 0x4
++#define PCIE_ESM_CAP_5__ESM_19P5G__SHIFT 0x5
++#define PCIE_ESM_CAP_5__ESM_19P6G__SHIFT 0x6
++#define PCIE_ESM_CAP_5__ESM_19P7G__SHIFT 0x7
++#define PCIE_ESM_CAP_5__ESM_19P8G__SHIFT 0x8
++#define PCIE_ESM_CAP_5__ESM_19P9G__SHIFT 0x9
++#define PCIE_ESM_CAP_5__ESM_20P0G__SHIFT 0xa
++#define PCIE_ESM_CAP_5__ESM_20P1G__SHIFT 0xb
++#define PCIE_ESM_CAP_5__ESM_20P2G__SHIFT 0xc
++#define PCIE_ESM_CAP_5__ESM_20P3G__SHIFT 0xd
++#define PCIE_ESM_CAP_5__ESM_20P4G__SHIFT 0xe
++#define PCIE_ESM_CAP_5__ESM_20P5G__SHIFT 0xf
++#define PCIE_ESM_CAP_5__ESM_20P6G__SHIFT 0x10
++#define PCIE_ESM_CAP_5__ESM_20P7G__SHIFT 0x11
++#define PCIE_ESM_CAP_5__ESM_20P8G__SHIFT 0x12
++#define PCIE_ESM_CAP_5__ESM_20P9G__SHIFT 0x13
++#define PCIE_ESM_CAP_5__ESM_21P0G__SHIFT 0x14
++#define PCIE_ESM_CAP_5__ESM_21P1G__SHIFT 0x15
++#define PCIE_ESM_CAP_5__ESM_21P2G__SHIFT 0x16
++#define PCIE_ESM_CAP_5__ESM_21P3G__SHIFT 0x17
++#define PCIE_ESM_CAP_5__ESM_21P4G__SHIFT 0x18
++#define PCIE_ESM_CAP_5__ESM_21P5G__SHIFT 0x19
++#define PCIE_ESM_CAP_5__ESM_21P6G__SHIFT 0x1a
++#define PCIE_ESM_CAP_5__ESM_21P7G__SHIFT 0x1b
++#define PCIE_ESM_CAP_5__ESM_21P8G__SHIFT 0x1c
++#define PCIE_ESM_CAP_5__ESM_21P9G__SHIFT 0x1d
++#define PCIE_ESM_CAP_5__ESM_19P0G_MASK 0x00000001L
++#define PCIE_ESM_CAP_5__ESM_19P1G_MASK 0x00000002L
++#define PCIE_ESM_CAP_5__ESM_19P2G_MASK 0x00000004L
++#define PCIE_ESM_CAP_5__ESM_19P3G_MASK 0x00000008L
++#define PCIE_ESM_CAP_5__ESM_19P4G_MASK 0x00000010L
++#define PCIE_ESM_CAP_5__ESM_19P5G_MASK 0x00000020L
++#define PCIE_ESM_CAP_5__ESM_19P6G_MASK 0x00000040L
++#define PCIE_ESM_CAP_5__ESM_19P7G_MASK 0x00000080L
++#define PCIE_ESM_CAP_5__ESM_19P8G_MASK 0x00000100L
++#define PCIE_ESM_CAP_5__ESM_19P9G_MASK 0x00000200L
++#define PCIE_ESM_CAP_5__ESM_20P0G_MASK 0x00000400L
++#define PCIE_ESM_CAP_5__ESM_20P1G_MASK 0x00000800L
++#define PCIE_ESM_CAP_5__ESM_20P2G_MASK 0x00001000L
++#define PCIE_ESM_CAP_5__ESM_20P3G_MASK 0x00002000L
++#define PCIE_ESM_CAP_5__ESM_20P4G_MASK 0x00004000L
++#define PCIE_ESM_CAP_5__ESM_20P5G_MASK 0x00008000L
++#define PCIE_ESM_CAP_5__ESM_20P6G_MASK 0x00010000L
++#define PCIE_ESM_CAP_5__ESM_20P7G_MASK 0x00020000L
++#define PCIE_ESM_CAP_5__ESM_20P8G_MASK 0x00040000L
++#define PCIE_ESM_CAP_5__ESM_20P9G_MASK 0x00080000L
++#define PCIE_ESM_CAP_5__ESM_21P0G_MASK 0x00100000L
++#define PCIE_ESM_CAP_5__ESM_21P1G_MASK 0x00200000L
++#define PCIE_ESM_CAP_5__ESM_21P2G_MASK 0x00400000L
++#define PCIE_ESM_CAP_5__ESM_21P3G_MASK 0x00800000L
++#define PCIE_ESM_CAP_5__ESM_21P4G_MASK 0x01000000L
++#define PCIE_ESM_CAP_5__ESM_21P5G_MASK 0x02000000L
++#define PCIE_ESM_CAP_5__ESM_21P6G_MASK 0x04000000L
++#define PCIE_ESM_CAP_5__ESM_21P7G_MASK 0x08000000L
++#define PCIE_ESM_CAP_5__ESM_21P8G_MASK 0x10000000L
++#define PCIE_ESM_CAP_5__ESM_21P9G_MASK 0x20000000L
++//PCIE_ESM_CAP_6
++#define PCIE_ESM_CAP_6__ESM_22P0G__SHIFT 0x0
++#define PCIE_ESM_CAP_6__ESM_22P1G__SHIFT 0x1
++#define PCIE_ESM_CAP_6__ESM_22P2G__SHIFT 0x2
++#define PCIE_ESM_CAP_6__ESM_22P3G__SHIFT 0x3
++#define PCIE_ESM_CAP_6__ESM_22P4G__SHIFT 0x4
++#define PCIE_ESM_CAP_6__ESM_22P5G__SHIFT 0x5
++#define PCIE_ESM_CAP_6__ESM_22P6G__SHIFT 0x6
++#define PCIE_ESM_CAP_6__ESM_22P7G__SHIFT 0x7
++#define PCIE_ESM_CAP_6__ESM_22P8G__SHIFT 0x8
++#define PCIE_ESM_CAP_6__ESM_22P9G__SHIFT 0x9
++#define PCIE_ESM_CAP_6__ESM_23P0G__SHIFT 0xa
++#define PCIE_ESM_CAP_6__ESM_23P1G__SHIFT 0xb
++#define PCIE_ESM_CAP_6__ESM_23P2G__SHIFT 0xc
++#define PCIE_ESM_CAP_6__ESM_23P3G__SHIFT 0xd
++#define PCIE_ESM_CAP_6__ESM_23P4G__SHIFT 0xe
++#define PCIE_ESM_CAP_6__ESM_23P5G__SHIFT 0xf
++#define PCIE_ESM_CAP_6__ESM_23P6G__SHIFT 0x10
++#define PCIE_ESM_CAP_6__ESM_23P7G__SHIFT 0x11
++#define PCIE_ESM_CAP_6__ESM_23P8G__SHIFT 0x12
++#define PCIE_ESM_CAP_6__ESM_23P9G__SHIFT 0x13
++#define PCIE_ESM_CAP_6__ESM_24P0G__SHIFT 0x14
++#define PCIE_ESM_CAP_6__ESM_24P1G__SHIFT 0x15
++#define PCIE_ESM_CAP_6__ESM_24P2G__SHIFT 0x16
++#define PCIE_ESM_CAP_6__ESM_24P3G__SHIFT 0x17
++#define PCIE_ESM_CAP_6__ESM_24P4G__SHIFT 0x18
++#define PCIE_ESM_CAP_6__ESM_24P5G__SHIFT 0x19
++#define PCIE_ESM_CAP_6__ESM_24P6G__SHIFT 0x1a
++#define PCIE_ESM_CAP_6__ESM_24P7G__SHIFT 0x1b
++#define PCIE_ESM_CAP_6__ESM_24P8G__SHIFT 0x1c
++#define PCIE_ESM_CAP_6__ESM_24P9G__SHIFT 0x1d
++#define PCIE_ESM_CAP_6__ESM_22P0G_MASK 0x00000001L
++#define PCIE_ESM_CAP_6__ESM_22P1G_MASK 0x00000002L
++#define PCIE_ESM_CAP_6__ESM_22P2G_MASK 0x00000004L
++#define PCIE_ESM_CAP_6__ESM_22P3G_MASK 0x00000008L
++#define PCIE_ESM_CAP_6__ESM_22P4G_MASK 0x00000010L
++#define PCIE_ESM_CAP_6__ESM_22P5G_MASK 0x00000020L
++#define PCIE_ESM_CAP_6__ESM_22P6G_MASK 0x00000040L
++#define PCIE_ESM_CAP_6__ESM_22P7G_MASK 0x00000080L
++#define PCIE_ESM_CAP_6__ESM_22P8G_MASK 0x00000100L
++#define PCIE_ESM_CAP_6__ESM_22P9G_MASK 0x00000200L
++#define PCIE_ESM_CAP_6__ESM_23P0G_MASK 0x00000400L
++#define PCIE_ESM_CAP_6__ESM_23P1G_MASK 0x00000800L
++#define PCIE_ESM_CAP_6__ESM_23P2G_MASK 0x00001000L
++#define PCIE_ESM_CAP_6__ESM_23P3G_MASK 0x00002000L
++#define PCIE_ESM_CAP_6__ESM_23P4G_MASK 0x00004000L
++#define PCIE_ESM_CAP_6__ESM_23P5G_MASK 0x00008000L
++#define PCIE_ESM_CAP_6__ESM_23P6G_MASK 0x00010000L
++#define PCIE_ESM_CAP_6__ESM_23P7G_MASK 0x00020000L
++#define PCIE_ESM_CAP_6__ESM_23P8G_MASK 0x00040000L
++#define PCIE_ESM_CAP_6__ESM_23P9G_MASK 0x00080000L
++#define PCIE_ESM_CAP_6__ESM_24P0G_MASK 0x00100000L
++#define PCIE_ESM_CAP_6__ESM_24P1G_MASK 0x00200000L
++#define PCIE_ESM_CAP_6__ESM_24P2G_MASK 0x00400000L
++#define PCIE_ESM_CAP_6__ESM_24P3G_MASK 0x00800000L
++#define PCIE_ESM_CAP_6__ESM_24P4G_MASK 0x01000000L
++#define PCIE_ESM_CAP_6__ESM_24P5G_MASK 0x02000000L
++#define PCIE_ESM_CAP_6__ESM_24P6G_MASK 0x04000000L
++#define PCIE_ESM_CAP_6__ESM_24P7G_MASK 0x08000000L
++#define PCIE_ESM_CAP_6__ESM_24P8G_MASK 0x10000000L
++#define PCIE_ESM_CAP_6__ESM_24P9G_MASK 0x20000000L
++//PCIE_ESM_CAP_7
++#define PCIE_ESM_CAP_7__ESM_25P0G__SHIFT 0x0
++#define PCIE_ESM_CAP_7__ESM_25P1G__SHIFT 0x1
++#define PCIE_ESM_CAP_7__ESM_25P2G__SHIFT 0x2
++#define PCIE_ESM_CAP_7__ESM_25P3G__SHIFT 0x3
++#define PCIE_ESM_CAP_7__ESM_25P4G__SHIFT 0x4
++#define PCIE_ESM_CAP_7__ESM_25P5G__SHIFT 0x5
++#define PCIE_ESM_CAP_7__ESM_25P6G__SHIFT 0x6
++#define PCIE_ESM_CAP_7__ESM_25P7G__SHIFT 0x7
++#define PCIE_ESM_CAP_7__ESM_25P8G__SHIFT 0x8
++#define PCIE_ESM_CAP_7__ESM_25P9G__SHIFT 0x9
++#define PCIE_ESM_CAP_7__ESM_26P0G__SHIFT 0xa
++#define PCIE_ESM_CAP_7__ESM_26P1G__SHIFT 0xb
++#define PCIE_ESM_CAP_7__ESM_26P2G__SHIFT 0xc
++#define PCIE_ESM_CAP_7__ESM_26P3G__SHIFT 0xd
++#define PCIE_ESM_CAP_7__ESM_26P4G__SHIFT 0xe
++#define PCIE_ESM_CAP_7__ESM_26P5G__SHIFT 0xf
++#define PCIE_ESM_CAP_7__ESM_26P6G__SHIFT 0x10
++#define PCIE_ESM_CAP_7__ESM_26P7G__SHIFT 0x11
++#define PCIE_ESM_CAP_7__ESM_26P8G__SHIFT 0x12
++#define PCIE_ESM_CAP_7__ESM_26P9G__SHIFT 0x13
++#define PCIE_ESM_CAP_7__ESM_27P0G__SHIFT 0x14
++#define PCIE_ESM_CAP_7__ESM_27P1G__SHIFT 0x15
++#define PCIE_ESM_CAP_7__ESM_27P2G__SHIFT 0x16
++#define PCIE_ESM_CAP_7__ESM_27P3G__SHIFT 0x17
++#define PCIE_ESM_CAP_7__ESM_27P4G__SHIFT 0x18
++#define PCIE_ESM_CAP_7__ESM_27P5G__SHIFT 0x19
++#define PCIE_ESM_CAP_7__ESM_27P6G__SHIFT 0x1a
++#define PCIE_ESM_CAP_7__ESM_27P7G__SHIFT 0x1b
++#define PCIE_ESM_CAP_7__ESM_27P8G__SHIFT 0x1c
++#define PCIE_ESM_CAP_7__ESM_27P9G__SHIFT 0x1d
++#define PCIE_ESM_CAP_7__ESM_28P0G__SHIFT 0x1e
++#define PCIE_ESM_CAP_7__ESM_25P0G_MASK 0x00000001L
++#define PCIE_ESM_CAP_7__ESM_25P1G_MASK 0x00000002L
++#define PCIE_ESM_CAP_7__ESM_25P2G_MASK 0x00000004L
++#define PCIE_ESM_CAP_7__ESM_25P3G_MASK 0x00000008L
++#define PCIE_ESM_CAP_7__ESM_25P4G_MASK 0x00000010L
++#define PCIE_ESM_CAP_7__ESM_25P5G_MASK 0x00000020L
++#define PCIE_ESM_CAP_7__ESM_25P6G_MASK 0x00000040L
++#define PCIE_ESM_CAP_7__ESM_25P7G_MASK 0x00000080L
++#define PCIE_ESM_CAP_7__ESM_25P8G_MASK 0x00000100L
++#define PCIE_ESM_CAP_7__ESM_25P9G_MASK 0x00000200L
++#define PCIE_ESM_CAP_7__ESM_26P0G_MASK 0x00000400L
++#define PCIE_ESM_CAP_7__ESM_26P1G_MASK 0x00000800L
++#define PCIE_ESM_CAP_7__ESM_26P2G_MASK 0x00001000L
++#define PCIE_ESM_CAP_7__ESM_26P3G_MASK 0x00002000L
++#define PCIE_ESM_CAP_7__ESM_26P4G_MASK 0x00004000L
++#define PCIE_ESM_CAP_7__ESM_26P5G_MASK 0x00008000L
++#define PCIE_ESM_CAP_7__ESM_26P6G_MASK 0x00010000L
++#define PCIE_ESM_CAP_7__ESM_26P7G_MASK 0x00020000L
++#define PCIE_ESM_CAP_7__ESM_26P8G_MASK 0x00040000L
++#define PCIE_ESM_CAP_7__ESM_26P9G_MASK 0x00080000L
++#define PCIE_ESM_CAP_7__ESM_27P0G_MASK 0x00100000L
++#define PCIE_ESM_CAP_7__ESM_27P1G_MASK 0x00200000L
++#define PCIE_ESM_CAP_7__ESM_27P2G_MASK 0x00400000L
++#define PCIE_ESM_CAP_7__ESM_27P3G_MASK 0x00800000L
++#define PCIE_ESM_CAP_7__ESM_27P4G_MASK 0x01000000L
++#define PCIE_ESM_CAP_7__ESM_27P5G_MASK 0x02000000L
++#define PCIE_ESM_CAP_7__ESM_27P6G_MASK 0x04000000L
++#define PCIE_ESM_CAP_7__ESM_27P7G_MASK 0x08000000L
++#define PCIE_ESM_CAP_7__ESM_27P8G_MASK 0x10000000L
++#define PCIE_ESM_CAP_7__ESM_27P9G_MASK 0x20000000L
++#define PCIE_ESM_CAP_7__ESM_28P0G_MASK 0x40000000L
++//PSWUSCFG0_PCIE_DLF_ENH_CAP_LIST
++#define PSWUSCFG0_PCIE_DLF_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define PSWUSCFG0_PCIE_DLF_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define PSWUSCFG0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define PSWUSCFG0_PCIE_DLF_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define PSWUSCFG0_PCIE_DLF_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define PSWUSCFG0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//PSWUSCFG0_DATA_LINK_FEATURE_CAP
++#define PSWUSCFG0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SCALED_FLOW_CONTROL_SUPPORTED__SHIFT 0x0
++#define PSWUSCFG0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED_22_1__SHIFT 0x1
++#define PSWUSCFG0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE__SHIFT 0x1f
++#define PSWUSCFG0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SCALED_FLOW_CONTROL_SUPPORTED_MASK 0x00000001L
++#define PSWUSCFG0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED_22_1_MASK 0x007FFFFEL
++#define PSWUSCFG0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE_MASK 0x80000000L
++//PSWUSCFG0_DATA_LINK_FEATURE_STATUS
++#define PSWUSCFG0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED__SHIFT 0x0
++#define PSWUSCFG0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID__SHIFT 0x1f
++#define PSWUSCFG0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_MASK 0x007FFFFFL
++#define PSWUSCFG0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID_MASK 0x80000000L
++//PCIE_PHY_16GT_ENH_CAP_LIST
++#define PCIE_PHY_16GT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define PCIE_PHY_16GT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define PCIE_PHY_16GT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define PCIE_PHY_16GT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define PCIE_PHY_16GT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define PCIE_PHY_16GT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//PSWUSCFG0_LINK_CAP_16GT
++#define PSWUSCFG0_LINK_CAP_16GT__RESERVED__SHIFT 0x0
++#define PSWUSCFG0_LINK_CAP_16GT__RESERVED_MASK 0xFFFFFFFFL
++//PSWUSCFG0_LINK_CNTL_16GT
++#define PSWUSCFG0_LINK_CNTL_16GT__RESERVED__SHIFT 0x0
++#define PSWUSCFG0_LINK_CNTL_16GT__RESERVED_MASK 0xFFFFFFFFL
++//PSWUSCFG0_LINK_STATUS_16GT
++#define PSWUSCFG0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT__SHIFT 0x0
++#define PSWUSCFG0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT__SHIFT 0x1
++#define PSWUSCFG0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT__SHIFT 0x2
++#define PSWUSCFG0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT__SHIFT 0x3
++#define PSWUSCFG0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT__SHIFT 0x4
++#define PSWUSCFG0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT_MASK 0x00000001L
++#define PSWUSCFG0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT_MASK 0x00000002L
++#define PSWUSCFG0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT_MASK 0x00000004L
++#define PSWUSCFG0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT_MASK 0x00000008L
++#define PSWUSCFG0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT_MASK 0x00000010L
++//PSWUSCFG0_LOCAL_PARITY_MISMATCH_STATUS_16GT
++#define PSWUSCFG0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
++#define PSWUSCFG0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
++//PSWUSCFG0_RTM1_PARITY_MISMATCH_STATUS_16GT
++#define PSWUSCFG0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
++#define PSWUSCFG0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
++//PSWUSCFG0_RTM2_PARITY_MISMATCH_STATUS_16GT
++#define PSWUSCFG0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
++#define PSWUSCFG0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
++//PSWUSCFG0_LANE_0_EQUALIZATION_CNTL_16GT
++#define PSWUSCFG0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET__SHIFT 0x4
++#define PSWUSCFG0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define PSWUSCFG0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET_MASK 0xF0L
++//PSWUSCFG0_LANE_1_EQUALIZATION_CNTL_16GT
++#define PSWUSCFG0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET__SHIFT 0x4
++#define PSWUSCFG0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define PSWUSCFG0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET_MASK 0xF0L
++//PSWUSCFG0_LANE_2_EQUALIZATION_CNTL_16GT
++#define PSWUSCFG0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET__SHIFT 0x4
++#define PSWUSCFG0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define PSWUSCFG0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET_MASK 0xF0L
++//PSWUSCFG0_LANE_3_EQUALIZATION_CNTL_16GT
++#define PSWUSCFG0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET__SHIFT 0x4
++#define PSWUSCFG0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define PSWUSCFG0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET_MASK 0xF0L
++//PSWUSCFG0_LANE_4_EQUALIZATION_CNTL_16GT
++#define PSWUSCFG0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET__SHIFT 0x4
++#define PSWUSCFG0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define PSWUSCFG0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET_MASK 0xF0L
++//PSWUSCFG0_LANE_5_EQUALIZATION_CNTL_16GT
++#define PSWUSCFG0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET__SHIFT 0x4
++#define PSWUSCFG0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define PSWUSCFG0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET_MASK 0xF0L
++//PSWUSCFG0_LANE_6_EQUALIZATION_CNTL_16GT
++#define PSWUSCFG0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET__SHIFT 0x4
++#define PSWUSCFG0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define PSWUSCFG0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET_MASK 0xF0L
++//PSWUSCFG0_LANE_7_EQUALIZATION_CNTL_16GT
++#define PSWUSCFG0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET__SHIFT 0x4
++#define PSWUSCFG0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define PSWUSCFG0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET_MASK 0xF0L
++//PSWUSCFG0_LANE_8_EQUALIZATION_CNTL_16GT
++#define PSWUSCFG0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET__SHIFT 0x4
++#define PSWUSCFG0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define PSWUSCFG0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET_MASK 0xF0L
++//PSWUSCFG0_LANE_9_EQUALIZATION_CNTL_16GT
++#define PSWUSCFG0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET__SHIFT 0x4
++#define PSWUSCFG0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define PSWUSCFG0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET_MASK 0xF0L
++//PSWUSCFG0_LANE_10_EQUALIZATION_CNTL_16GT
++#define PSWUSCFG0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET__SHIFT 0x4
++#define PSWUSCFG0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define PSWUSCFG0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET_MASK 0xF0L
++//PSWUSCFG0_LANE_11_EQUALIZATION_CNTL_16GT
++#define PSWUSCFG0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET__SHIFT 0x4
++#define PSWUSCFG0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define PSWUSCFG0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET_MASK 0xF0L
++//PSWUSCFG0_LANE_12_EQUALIZATION_CNTL_16GT
++#define PSWUSCFG0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET__SHIFT 0x4
++#define PSWUSCFG0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define PSWUSCFG0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET_MASK 0xF0L
++//PSWUSCFG0_LANE_13_EQUALIZATION_CNTL_16GT
++#define PSWUSCFG0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET__SHIFT 0x4
++#define PSWUSCFG0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define PSWUSCFG0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET_MASK 0xF0L
++//PSWUSCFG0_LANE_14_EQUALIZATION_CNTL_16GT
++#define PSWUSCFG0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET__SHIFT 0x4
++#define PSWUSCFG0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define PSWUSCFG0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET_MASK 0xF0L
++//PSWUSCFG0_LANE_15_EQUALIZATION_CNTL_16GT
++#define PSWUSCFG0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define PSWUSCFG0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET__SHIFT 0x4
++#define PSWUSCFG0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define PSWUSCFG0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET_MASK 0xF0L
++//PCIE_MARGINING_ENH_CAP_LIST
++#define PCIE_MARGINING_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define PCIE_MARGINING_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define PCIE_MARGINING_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define PCIE_MARGINING_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define PCIE_MARGINING_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define PCIE_MARGINING_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//PSWUSCFG0_MARGINING_PORT_CAP
++#define PSWUSCFG0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE__SHIFT 0x0
++#define PSWUSCFG0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE_MASK 0x0001L
++//PSWUSCFG0_MARGINING_PORT_STATUS
++#define PSWUSCFG0_MARGINING_PORT_STATUS__MARGINING_READY__SHIFT 0x0
++#define PSWUSCFG0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY__SHIFT 0x1
++#define PSWUSCFG0_MARGINING_PORT_STATUS__MARGINING_READY_MASK 0x0001L
++#define PSWUSCFG0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY_MASK 0x0002L
++//PSWUSCFG0_LANE_0_MARGINING_LANE_CNTL
++#define PSWUSCFG0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER__SHIFT 0x0
++#define PSWUSCFG0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE__SHIFT 0x3
++#define PSWUSCFG0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL__SHIFT 0x6
++#define PSWUSCFG0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD__SHIFT 0x8
++#define PSWUSCFG0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER_MASK 0x0007L
++#define PSWUSCFG0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE_MASK 0x0038L
++#define PSWUSCFG0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL_MASK 0x0040L
++#define PSWUSCFG0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD_MASK 0xFF00L
++//PSWUSCFG0_LANE_0_MARGINING_LANE_STATUS
++#define PSWUSCFG0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define PSWUSCFG0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define PSWUSCFG0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS__SHIFT 0x6
++#define PSWUSCFG0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define PSWUSCFG0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define PSWUSCFG0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define PSWUSCFG0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS_MASK 0x0040L
++#define PSWUSCFG0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//PSWUSCFG0_LANE_1_MARGINING_LANE_CNTL
++#define PSWUSCFG0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER__SHIFT 0x0
++#define PSWUSCFG0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE__SHIFT 0x3
++#define PSWUSCFG0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL__SHIFT 0x6
++#define PSWUSCFG0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD__SHIFT 0x8
++#define PSWUSCFG0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER_MASK 0x0007L
++#define PSWUSCFG0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE_MASK 0x0038L
++#define PSWUSCFG0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL_MASK 0x0040L
++#define PSWUSCFG0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD_MASK 0xFF00L
++//PSWUSCFG0_LANE_1_MARGINING_LANE_STATUS
++#define PSWUSCFG0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define PSWUSCFG0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define PSWUSCFG0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS__SHIFT 0x6
++#define PSWUSCFG0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define PSWUSCFG0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define PSWUSCFG0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define PSWUSCFG0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS_MASK 0x0040L
++#define PSWUSCFG0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//PSWUSCFG0_LANE_2_MARGINING_LANE_CNTL
++#define PSWUSCFG0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER__SHIFT 0x0
++#define PSWUSCFG0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE__SHIFT 0x3
++#define PSWUSCFG0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL__SHIFT 0x6
++#define PSWUSCFG0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD__SHIFT 0x8
++#define PSWUSCFG0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER_MASK 0x0007L
++#define PSWUSCFG0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE_MASK 0x0038L
++#define PSWUSCFG0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL_MASK 0x0040L
++#define PSWUSCFG0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD_MASK 0xFF00L
++//PSWUSCFG0_LANE_2_MARGINING_LANE_STATUS
++#define PSWUSCFG0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define PSWUSCFG0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define PSWUSCFG0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS__SHIFT 0x6
++#define PSWUSCFG0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define PSWUSCFG0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define PSWUSCFG0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define PSWUSCFG0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS_MASK 0x0040L
++#define PSWUSCFG0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//PSWUSCFG0_LANE_3_MARGINING_LANE_CNTL
++#define PSWUSCFG0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER__SHIFT 0x0
++#define PSWUSCFG0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE__SHIFT 0x3
++#define PSWUSCFG0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL__SHIFT 0x6
++#define PSWUSCFG0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD__SHIFT 0x8
++#define PSWUSCFG0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER_MASK 0x0007L
++#define PSWUSCFG0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE_MASK 0x0038L
++#define PSWUSCFG0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL_MASK 0x0040L
++#define PSWUSCFG0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD_MASK 0xFF00L
++//PSWUSCFG0_LANE_3_MARGINING_LANE_STATUS
++#define PSWUSCFG0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define PSWUSCFG0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define PSWUSCFG0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS__SHIFT 0x6
++#define PSWUSCFG0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define PSWUSCFG0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define PSWUSCFG0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define PSWUSCFG0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS_MASK 0x0040L
++#define PSWUSCFG0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//PSWUSCFG0_LANE_4_MARGINING_LANE_CNTL
++#define PSWUSCFG0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER__SHIFT 0x0
++#define PSWUSCFG0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE__SHIFT 0x3
++#define PSWUSCFG0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL__SHIFT 0x6
++#define PSWUSCFG0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD__SHIFT 0x8
++#define PSWUSCFG0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER_MASK 0x0007L
++#define PSWUSCFG0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE_MASK 0x0038L
++#define PSWUSCFG0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL_MASK 0x0040L
++#define PSWUSCFG0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD_MASK 0xFF00L
++//PSWUSCFG0_LANE_4_MARGINING_LANE_STATUS
++#define PSWUSCFG0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define PSWUSCFG0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define PSWUSCFG0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS__SHIFT 0x6
++#define PSWUSCFG0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define PSWUSCFG0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define PSWUSCFG0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define PSWUSCFG0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS_MASK 0x0040L
++#define PSWUSCFG0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//PSWUSCFG0_LANE_5_MARGINING_LANE_CNTL
++#define PSWUSCFG0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER__SHIFT 0x0
++#define PSWUSCFG0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE__SHIFT 0x3
++#define PSWUSCFG0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL__SHIFT 0x6
++#define PSWUSCFG0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD__SHIFT 0x8
++#define PSWUSCFG0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER_MASK 0x0007L
++#define PSWUSCFG0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE_MASK 0x0038L
++#define PSWUSCFG0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL_MASK 0x0040L
++#define PSWUSCFG0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD_MASK 0xFF00L
++//PSWUSCFG0_LANE_5_MARGINING_LANE_STATUS
++#define PSWUSCFG0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define PSWUSCFG0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define PSWUSCFG0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS__SHIFT 0x6
++#define PSWUSCFG0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define PSWUSCFG0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define PSWUSCFG0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define PSWUSCFG0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS_MASK 0x0040L
++#define PSWUSCFG0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//PSWUSCFG0_LANE_6_MARGINING_LANE_CNTL
++#define PSWUSCFG0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER__SHIFT 0x0
++#define PSWUSCFG0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE__SHIFT 0x3
++#define PSWUSCFG0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL__SHIFT 0x6
++#define PSWUSCFG0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD__SHIFT 0x8
++#define PSWUSCFG0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER_MASK 0x0007L
++#define PSWUSCFG0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE_MASK 0x0038L
++#define PSWUSCFG0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL_MASK 0x0040L
++#define PSWUSCFG0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD_MASK 0xFF00L
++//PSWUSCFG0_LANE_6_MARGINING_LANE_STATUS
++#define PSWUSCFG0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define PSWUSCFG0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define PSWUSCFG0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS__SHIFT 0x6
++#define PSWUSCFG0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define PSWUSCFG0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define PSWUSCFG0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define PSWUSCFG0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS_MASK 0x0040L
++#define PSWUSCFG0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//PSWUSCFG0_LANE_7_MARGINING_LANE_CNTL
++#define PSWUSCFG0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER__SHIFT 0x0
++#define PSWUSCFG0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE__SHIFT 0x3
++#define PSWUSCFG0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL__SHIFT 0x6
++#define PSWUSCFG0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD__SHIFT 0x8
++#define PSWUSCFG0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER_MASK 0x0007L
++#define PSWUSCFG0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE_MASK 0x0038L
++#define PSWUSCFG0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL_MASK 0x0040L
++#define PSWUSCFG0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD_MASK 0xFF00L
++//PSWUSCFG0_LANE_7_MARGINING_LANE_STATUS
++#define PSWUSCFG0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define PSWUSCFG0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define PSWUSCFG0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS__SHIFT 0x6
++#define PSWUSCFG0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define PSWUSCFG0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define PSWUSCFG0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define PSWUSCFG0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS_MASK 0x0040L
++#define PSWUSCFG0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//PSWUSCFG0_LANE_8_MARGINING_LANE_CNTL
++#define PSWUSCFG0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER__SHIFT 0x0
++#define PSWUSCFG0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE__SHIFT 0x3
++#define PSWUSCFG0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL__SHIFT 0x6
++#define PSWUSCFG0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD__SHIFT 0x8
++#define PSWUSCFG0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER_MASK 0x0007L
++#define PSWUSCFG0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE_MASK 0x0038L
++#define PSWUSCFG0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL_MASK 0x0040L
++#define PSWUSCFG0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD_MASK 0xFF00L
++//PSWUSCFG0_LANE_8_MARGINING_LANE_STATUS
++#define PSWUSCFG0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define PSWUSCFG0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define PSWUSCFG0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS__SHIFT 0x6
++#define PSWUSCFG0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define PSWUSCFG0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define PSWUSCFG0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define PSWUSCFG0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS_MASK 0x0040L
++#define PSWUSCFG0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//PSWUSCFG0_LANE_9_MARGINING_LANE_CNTL
++#define PSWUSCFG0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER__SHIFT 0x0
++#define PSWUSCFG0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE__SHIFT 0x3
++#define PSWUSCFG0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL__SHIFT 0x6
++#define PSWUSCFG0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD__SHIFT 0x8
++#define PSWUSCFG0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER_MASK 0x0007L
++#define PSWUSCFG0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE_MASK 0x0038L
++#define PSWUSCFG0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL_MASK 0x0040L
++#define PSWUSCFG0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD_MASK 0xFF00L
++//PSWUSCFG0_LANE_9_MARGINING_LANE_STATUS
++#define PSWUSCFG0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define PSWUSCFG0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define PSWUSCFG0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS__SHIFT 0x6
++#define PSWUSCFG0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define PSWUSCFG0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define PSWUSCFG0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define PSWUSCFG0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS_MASK 0x0040L
++#define PSWUSCFG0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//PSWUSCFG0_LANE_10_MARGINING_LANE_CNTL
++#define PSWUSCFG0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER__SHIFT 0x0
++#define PSWUSCFG0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE__SHIFT 0x3
++#define PSWUSCFG0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL__SHIFT 0x6
++#define PSWUSCFG0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD__SHIFT 0x8
++#define PSWUSCFG0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER_MASK 0x0007L
++#define PSWUSCFG0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE_MASK 0x0038L
++#define PSWUSCFG0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL_MASK 0x0040L
++#define PSWUSCFG0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD_MASK 0xFF00L
++//PSWUSCFG0_LANE_10_MARGINING_LANE_STATUS
++#define PSWUSCFG0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define PSWUSCFG0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define PSWUSCFG0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS__SHIFT 0x6
++#define PSWUSCFG0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define PSWUSCFG0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define PSWUSCFG0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define PSWUSCFG0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS_MASK 0x0040L
++#define PSWUSCFG0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//PSWUSCFG0_LANE_11_MARGINING_LANE_CNTL
++#define PSWUSCFG0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER__SHIFT 0x0
++#define PSWUSCFG0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE__SHIFT 0x3
++#define PSWUSCFG0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL__SHIFT 0x6
++#define PSWUSCFG0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD__SHIFT 0x8
++#define PSWUSCFG0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER_MASK 0x0007L
++#define PSWUSCFG0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE_MASK 0x0038L
++#define PSWUSCFG0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL_MASK 0x0040L
++#define PSWUSCFG0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD_MASK 0xFF00L
++//PSWUSCFG0_LANE_11_MARGINING_LANE_STATUS
++#define PSWUSCFG0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define PSWUSCFG0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define PSWUSCFG0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS__SHIFT 0x6
++#define PSWUSCFG0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define PSWUSCFG0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define PSWUSCFG0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define PSWUSCFG0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS_MASK 0x0040L
++#define PSWUSCFG0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//PSWUSCFG0_LANE_12_MARGINING_LANE_CNTL
++#define PSWUSCFG0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER__SHIFT 0x0
++#define PSWUSCFG0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE__SHIFT 0x3
++#define PSWUSCFG0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL__SHIFT 0x6
++#define PSWUSCFG0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD__SHIFT 0x8
++#define PSWUSCFG0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER_MASK 0x0007L
++#define PSWUSCFG0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE_MASK 0x0038L
++#define PSWUSCFG0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL_MASK 0x0040L
++#define PSWUSCFG0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD_MASK 0xFF00L
++//PSWUSCFG0_LANE_12_MARGINING_LANE_STATUS
++#define PSWUSCFG0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define PSWUSCFG0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define PSWUSCFG0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS__SHIFT 0x6
++#define PSWUSCFG0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define PSWUSCFG0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define PSWUSCFG0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define PSWUSCFG0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS_MASK 0x0040L
++#define PSWUSCFG0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//PSWUSCFG0_LANE_13_MARGINING_LANE_CNTL
++#define PSWUSCFG0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER__SHIFT 0x0
++#define PSWUSCFG0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE__SHIFT 0x3
++#define PSWUSCFG0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL__SHIFT 0x6
++#define PSWUSCFG0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD__SHIFT 0x8
++#define PSWUSCFG0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER_MASK 0x0007L
++#define PSWUSCFG0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE_MASK 0x0038L
++#define PSWUSCFG0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL_MASK 0x0040L
++#define PSWUSCFG0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD_MASK 0xFF00L
++//PSWUSCFG0_LANE_13_MARGINING_LANE_STATUS
++#define PSWUSCFG0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define PSWUSCFG0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define PSWUSCFG0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS__SHIFT 0x6
++#define PSWUSCFG0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define PSWUSCFG0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define PSWUSCFG0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define PSWUSCFG0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS_MASK 0x0040L
++#define PSWUSCFG0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//PSWUSCFG0_LANE_14_MARGINING_LANE_CNTL
++#define PSWUSCFG0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER__SHIFT 0x0
++#define PSWUSCFG0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE__SHIFT 0x3
++#define PSWUSCFG0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL__SHIFT 0x6
++#define PSWUSCFG0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD__SHIFT 0x8
++#define PSWUSCFG0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER_MASK 0x0007L
++#define PSWUSCFG0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE_MASK 0x0038L
++#define PSWUSCFG0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL_MASK 0x0040L
++#define PSWUSCFG0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD_MASK 0xFF00L
++//PSWUSCFG0_LANE_14_MARGINING_LANE_STATUS
++#define PSWUSCFG0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define PSWUSCFG0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define PSWUSCFG0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS__SHIFT 0x6
++#define PSWUSCFG0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define PSWUSCFG0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define PSWUSCFG0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define PSWUSCFG0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS_MASK 0x0040L
++#define PSWUSCFG0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//PSWUSCFG0_LANE_15_MARGINING_LANE_CNTL
++#define PSWUSCFG0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER__SHIFT 0x0
++#define PSWUSCFG0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE__SHIFT 0x3
++#define PSWUSCFG0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL__SHIFT 0x6
++#define PSWUSCFG0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD__SHIFT 0x8
++#define PSWUSCFG0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER_MASK 0x0007L
++#define PSWUSCFG0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE_MASK 0x0038L
++#define PSWUSCFG0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL_MASK 0x0040L
++#define PSWUSCFG0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD_MASK 0xFF00L
++//PSWUSCFG0_LANE_15_MARGINING_LANE_STATUS
++#define PSWUSCFG0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define PSWUSCFG0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define PSWUSCFG0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS__SHIFT 0x6
++#define PSWUSCFG0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define PSWUSCFG0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define PSWUSCFG0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define PSWUSCFG0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS_MASK 0x0040L
++#define PSWUSCFG0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_0_STATUS
++#define BIF_CFG_DEV0_EPF0_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_HEADER
++#define BIF_CFG_DEV0_EPF0_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_0_BIST
++#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_MIN_GRANT
++#define BIF_CFG_DEV0_EPF0_0_MIN_GRANT__MIN_GNT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MIN_GRANT__MIN_GNT_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_MAX_LATENCY
++#define BIF_CFG_DEV0_EPF0_0_MAX_LATENCY__MAX_LAT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MAX_LATENCY__MAX_LAT_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__LENGTH__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__CAP_ID_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__NEXT_PTR_MASK 0x0000FF00L
++#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__LENGTH_MASK 0x00FF0000L
++//BIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W
++#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_PMI_CAP
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__PME_CLOCK__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__DEV_SPECIFIC_INIT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__AUX_CURRENT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__D1_SUPPORT__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__D2_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__PME_SUPPORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__VERSION_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__PME_CLOCK_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__DEV_SPECIFIC_INIT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__AUX_CURRENT_MASK 0x01C0L
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__D1_SUPPORT_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__D2_SUPPORT_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__PME_SUPPORT_MASK 0xF800L
++//BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__POWER_STATE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__NO_SOFT_RESET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PME_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__DATA_SELECT__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__DATA_SCALE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PME_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__B2_B3_SUPPORT__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__BUS_PWR_EN__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PMI_DATA__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__POWER_STATE_MASK 0x00000003L
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__NO_SOFT_RESET_MASK 0x00000008L
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PME_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__DATA_SELECT_MASK 0x00001E00L
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__DATA_SCALE_MASK 0x00006000L
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PME_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__B2_B3_SUPPORT_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__BUS_PWR_EN_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PMI_DATA_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__REF_CLK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT_MASK 0x00000070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__REF_CLK_MASK 0x00000300L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE_MASK 0x00000C00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT_MASK 0x000EL
++//BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_STATUS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS_MASK 0x0001L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__VC_ID__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__VC_ID_MASK 0x07000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__VC_ID__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__VC_ID_MASK 0x07000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L
++//BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_INDEX_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_SIZE_MASK 0x3F00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_INDEX_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_SIZE_MASK 0x3F00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_INDEX_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_SIZE_MASK 0x3F00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_INDEX_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_SIZE_MASK 0x3F00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_INDEX_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_SIZE_MASK 0x3F00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_INDEX_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_SIZE_MASK 0x3F00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA_SELECT
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__BASE_POWER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__DATA_SCALE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__PM_STATE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__TYPE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__POWER_RAIL__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__BASE_POWER_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__DATA_SCALE_MASK 0x00000300L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE_MASK 0x00001C00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__PM_STATE_MASK 0x00006000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__TYPE_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__POWER_RAIL_MASK 0x001C0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED_MASK 0x01L
++//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__SUBSTATE_MAX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__SUBSTATE_MAX_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_LATENCY_INDICATOR
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS__SUBSTATE_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS__SUBSTATE_STATUS_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CNTL__SUBSTATE_CNTL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CNTL__SUBSTATE_CNTL_MASK 0x1FL
++//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__RESERVED__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__RESERVED_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS__RESERVED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS__RESERVED_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__SOURCE_VALIDATION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__TRANSLATION_BLOCKING__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__UPSTREAM_FORWARDING__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__SOURCE_VALIDATION_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__TRANSLATION_BLOCKING_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__UPSTREAM_FORWARDING_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL__PRI_ENABLE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL__PRI_RESET__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL__PRI_ENABLE_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL__PRI_RESET_MASK 0x0002L
++//BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__RESPONSE_FAILURE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__UNEXPECTED_PAGE_REQ_GRP_INDEX__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__STOPPED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__PRG_RESPONSE_PASID_REQUIRED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__RESPONSE_FAILURE_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__UNEXPECTED_PAGE_REQ_GRP_INDEX_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__STOPPED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__PRG_RESPONSE_PASID_REQUIRED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY
++#define BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY__OUTSTAND_PAGE_REQ_CAPACITY__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY__OUTSTAND_PAGE_REQ_CAPACITY_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC
++#define BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC__OUTSTAND_PAGE_REQ_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC__OUTSTAND_PAGE_REQ_ALLOC_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__MAX_PASID_WIDTH__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__MAX_PASID_WIDTH_MASK 0x1F00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_ENABLE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_ENABLE_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE_MASK 0x0004L
++//BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_MAX_GROUP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_WIN_SIZE_REQ__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_ECRC_REGEN_SUPP__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_MAX_GROUP_MASK 0x003FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_WIN_SIZE_REQ_MASK 0x3F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_ECRC_REGEN_SUPP_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL__MC_NUM_GROUP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL__MC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL__MC_NUM_GROUP_MASK 0x003FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL__MC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0__MC_INDEX_POS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0__MC_BASE_ADDR_0__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0__MC_INDEX_POS_MASK 0x0000003FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0__MC_BASE_ADDR_0_MASK 0xFFFFF000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR1__MC_BASE_ADDR_1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR1__MC_BASE_ADDR_1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV0__MC_RECEIVE_0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV0__MC_RECEIVE_0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV1__MC_RECEIVE_1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV1__MC_RECEIVE_1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL0__MC_BLOCK_ALL_0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL0__MC_BLOCK_ALL_0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL1__MC_BLOCK_ALL_1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL1__MC_BLOCK_ALL_1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_0__MC_BLOCK_UNTRANSLATED_0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_0__MC_BLOCK_UNTRANSLATED_0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_1__MC_BLOCK_UNTRANSLATED_1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_1__MC_BLOCK_UNTRANSLATED_1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE_MASK 0x000003FFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE_MASK 0x00001C00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE_MASK 0x03FF0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE_MASK 0x1C000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_ARI_CAP_HIERARCHY_PRESERVED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_INTR_MSG_NUM__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_CAP_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_ARI_CAP_HIERARCHY_PRESERVED_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_INTR_MSG_NUM_MASK 0xFFE00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_ENABLE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_ENABLE__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_INTR_ENABLE__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MSE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_ARI_CAP_HIERARCHY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_ENABLE_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_ENABLE_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_INTR_ENABLE_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MSE_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_ARI_CAP_HIERARCHY_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x0020L
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_STATUS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_STATUS__SRIOV_VF_MIGRATION_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_STATUS__SRIOV_VF_MIGRATION_STATUS_MASK 0x0001L
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_INITIAL_VFS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_INITIAL_VFS__SRIOV_INITIAL_VFS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_INITIAL_VFS__SRIOV_INITIAL_VFS_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_TOTAL_VFS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_TOTAL_VFS__SRIOV_TOTAL_VFS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_TOTAL_VFS__SRIOV_TOTAL_VFS_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_NUM_VFS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_NUM_VFS__SRIOV_NUM_VFS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_NUM_VFS__SRIOV_NUM_VFS_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FUNC_DEP_LINK
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FUNC_DEP_LINK__SRIOV_FUNC_DEP_LINK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FUNC_DEP_LINK__SRIOV_FUNC_DEP_LINK_MASK 0x00FFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FIRST_VF_OFFSET
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FIRST_VF_OFFSET__SRIOV_FIRST_VF_OFFSET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FIRST_VF_OFFSET__SRIOV_FIRST_VF_OFFSET_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_STRIDE
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_STRIDE__SRIOV_VF_STRIDE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_STRIDE__SRIOV_VF_STRIDE_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_DEVICE_ID__SRIOV_VF_DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_DEVICE_ID__SRIOV_VF_DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE__SRIOV_SUPPORTED_PAGE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE__SRIOV_SUPPORTED_PAGE_SIZE_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE__SRIOV_SYSTEM_PAGE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE__SRIOV_SYSTEM_PAGE_SIZE_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_0__VF_BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_0__VF_BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_1__VF_BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_1__VF_BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_2__VF_BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_2__VF_BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_3__VF_BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_3__VF_BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_4__VF_BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_4__VF_BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_5__VF_BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_5__VF_BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_BIF__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_BIF_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_NO_ST_MODE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_INT_VEC_MODE_SUPPORTED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_DEV_SPC_MODE_SUPPORTED__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_EXTND_TPH_REQR_SUPPORED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_LOCATION__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_SIZE__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_NO_ST_MODE_SUPPORTED_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_INT_VEC_MODE_SUPPORTED_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_DEV_SPC_MODE_SUPPORTED_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_EXTND_TPH_REQR_SUPPORED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_LOCATION_MASK 0x00000600L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_SIZE_MASK 0x07FF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CNTL__TPH_REQR_ST_MODE_SEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CNTL__TPH_REQR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CNTL__TPH_REQR_ST_MODE_SEL_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CNTL__TPH_REQR_EN_MASK 0x00000300L
++//BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP
++#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED_MASK 0x007FFFFFL
++#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_MASK 0x007FFFFFL
++#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_LINK_CAP_16GT
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP_16GT__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP_16GT__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_LINK_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL_16GT__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL_16GT__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT_MASK 0x00000008L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT_MASK 0x00000010L
++//BIF_CFG_DEV0_EPF0_0_LOCAL_PARITY_MISMATCH_STATUS_16GT
++#define BIF_CFG_DEV0_EPF0_0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_0_RTM1_PARITY_MISMATCH_STATUS_16GT
++#define BIF_CFG_DEV0_EPF0_0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_0_RTM2_PARITY_MISMATCH_STATUS_16GT
++#define BIF_CFG_DEV0_EPF0_0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_CAP
++#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE_MASK 0x0001L
++//BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS
++#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS__MARGINING_READY__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS__MARGINING_READY_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY_MASK 0x0002L
++//BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_INDEX_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_INDEX_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_INDEX_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_INDEX_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_INDEX_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_INDEX_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_NUM__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_EN_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_NUM_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_CMD_COMPLETE_INTR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_NEED_FLR_INTR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_CMD_COMPLETE_INTR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_NEED_FLR_INTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_CMD_COMPLETE_INTR_EN__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_NEED_FLR_INTR_EN__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_CMD_COMPLETE_INTR_EN__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_NEED_FLR_INTR_EN__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_CMD_COMPLETE_INTR_EN_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_NEED_FLR_INTR_EN_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000008L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_CMD_COMPLETE_INTR_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_NEED_FLR_INTR_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_CMD_COMPLETE_INTR_EN_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_NEED_FLR_INTR_EN_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_CMD_COMPLETE_INTR_EN_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_NEED_FLR_INTR_EN_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_TRN_ACK_INTR_EN_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_RCV_VALID_INTR_EN_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_CMD_COMPLETE_INTR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_CMD_COMPLETE_INTR_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_CMD_COMPLETE_INTR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_CMD_COMPLETE_INTR_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_TRN_ACK_INTR_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_RCV_VALID_INTR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_CMD_COMPLETE_INTR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000008L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_CMD_COMPLETE_INTR_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_CMD_COMPLETE_INTR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_NEED_FLR_INTR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_CMD_COMPLETE_INTR_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_NEED_FLR_INTR_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_TRN_ACK_INTR_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_RCV_VALID_INTR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL__SOFT_PF_FLR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL__SOFT_PF_FLR_MASK 0x0001L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__VF_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_DATA__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_VALID__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_DATA__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_ACK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__VF_INDEX_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_DATA_MASK 0x00000F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_VALID_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_DATA_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_ACK_MASK 0x01000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_TRN_ACK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_RCV_VALID__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_TRN_ACK__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_RCV_VALID__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_TRN_ACK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_RCV_VALID__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_TRN_ACK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_RCV_VALID__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_TRN_ACK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_RCV_VALID__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_TRN_ACK__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_RCV_VALID__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_TRN_ACK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_RCV_VALID__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_TRN_ACK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_RCV_VALID__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_TRN_ACK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_RCV_VALID__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_TRN_ACK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_RCV_VALID__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_TRN_ACK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_RCV_VALID__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_TRN_ACK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_RCV_VALID__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_TRN_ACK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_RCV_VALID__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_TRN_ACK__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_RCV_VALID__SHIFT 0x1b
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_TRN_ACK__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_RCV_VALID__SHIFT 0x1d
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_TRN_ACK__SHIFT 0x1e
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_RCV_VALID__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_TRN_ACK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_RCV_VALID_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_TRN_ACK_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_RCV_VALID_MASK 0x00000008L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_TRN_ACK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_RCV_VALID_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_TRN_ACK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_RCV_VALID_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_TRN_ACK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_RCV_VALID_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_TRN_ACK_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_RCV_VALID_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_TRN_ACK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_RCV_VALID_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_TRN_ACK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_RCV_VALID_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_TRN_ACK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_RCV_VALID_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_TRN_ACK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_RCV_VALID_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_TRN_ACK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_RCV_VALID_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_TRN_ACK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_RCV_VALID_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_TRN_ACK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_RCV_VALID_MASK 0x02000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_TRN_ACK_MASK 0x04000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_RCV_VALID_MASK 0x08000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_TRN_ACK_MASK 0x10000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_RCV_VALID_MASK 0x20000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_TRN_ACK_MASK 0x40000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_RCV_VALID_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_TRN_ACK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_RCV_VALID__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_TRN_ACK__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_RCV_VALID__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_TRN_ACK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_RCV_VALID__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_TRN_ACK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_RCV_VALID__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_TRN_ACK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_RCV_VALID__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_TRN_ACK__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_RCV_VALID__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_TRN_ACK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_RCV_VALID__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_TRN_ACK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_RCV_VALID__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_TRN_ACK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_RCV_VALID__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_TRN_ACK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_RCV_VALID__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_TRN_ACK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_RCV_VALID__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_TRN_ACK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_RCV_VALID__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_TRN_ACK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_RCV_VALID__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_TRN_ACK__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_RCV_VALID__SHIFT 0x1b
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_TRN_ACK__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_RCV_VALID__SHIFT 0x1d
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_TRN_ACK__SHIFT 0x1e
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_RCV_VALID__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_TRN_ACK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_RCV_VALID_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_TRN_ACK_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_RCV_VALID_MASK 0x00000008L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_TRN_ACK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_RCV_VALID_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_TRN_ACK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_RCV_VALID_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_TRN_ACK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_RCV_VALID_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_TRN_ACK_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_RCV_VALID_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_TRN_ACK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_RCV_VALID_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_TRN_ACK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_RCV_VALID_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_TRN_ACK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_RCV_VALID_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_TRN_ACK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_RCV_VALID_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_TRN_ACK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_RCV_VALID_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_TRN_ACK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_RCV_VALID_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_TRN_ACK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_RCV_VALID_MASK 0x02000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_TRN_ACK_MASK 0x04000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_RCV_VALID_MASK 0x08000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_TRN_ACK_MASK 0x10000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_RCV_VALID_MASK 0x20000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_TRN_ACK_MASK 0x40000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_RCV_VALID_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__LOC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_OFFSET__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_SIZE_MASK 0x0000007FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__LOC_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_OFFSET_MASK 0xFFFFFC00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_AVAILABLE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_CONSUMED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_AVAILABLE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_CONSUMED_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVDSCH_OFFSET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCESCH_OFFSET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__GFXSCH_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVD1SCH_OFFSET__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVDSCH_OFFSET_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCESCH_OFFSET_MASK 0x0000FF00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__GFXSCH_OFFSET_MASK 0x00FF0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVD1SCH_OFFSET_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_VF__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_PF__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_VF_MASK 0x7FFFFFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_PF_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0__DW0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0__DW0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1__DW1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1__DW1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2__DW2__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2__DW2_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3__DW3__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3__DW3_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4__DW4__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4__DW4_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5__DW5__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5__DW5_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6__DW6__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6__DW6_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7__DW7__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7__DW7_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8__DW8__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8__DW8_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0__DW0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0__DW0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1__DW1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1__DW1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2__DW2__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2__DW2_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3__DW3__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3__DW3_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4__DW4__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4__DW4_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5__DW5__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5__DW5_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6__DW6__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6__DW6_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7__DW7__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7__DW7_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8__DW8__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8__DW8_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0__DW0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0__DW0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1__DW1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1__DW1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2__DW2__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2__DW2_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3__DW3__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3__DW3_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4__DW4__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4__DW4_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5__DW5__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5__DW5_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6__DW6__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6__DW6_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7__DW7__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7__DW7_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8__DW8__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8__DW8_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0__DW0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0__DW0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1__DW1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1__DW1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2__DW2__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2__DW2_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3__DW3__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3__DW3_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4__DW4__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4__DW4_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5__DW5__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5__DW5_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6__DW6__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6__DW6_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7__DW7__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7__DW7_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8__DW8__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8__DW8_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf1_bifcfgdecp
++//BIF_CFG_DEV0_EPF1_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF1_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF1_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF1_0_COMMAND
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF1_0_STATUS
++#define BIF_CFG_DEV0_EPF1_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF1_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF1_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF1_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF1_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF1_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF1_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF1_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF1_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF1_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF1_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF1_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF1_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF1_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF1_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF1_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF1_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF1_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_LATENCY
++#define BIF_CFG_DEV0_EPF1_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_HEADER
++#define BIF_CFG_DEV0_EPF1_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF1_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF1_0_BIST
++#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF1_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF1_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF1_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF1_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF1_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_MIN_GRANT
++#define BIF_CFG_DEV0_EPF1_0_MIN_GRANT__MIN_GNT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MIN_GRANT__MIN_GNT_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_MAX_LATENCY
++#define BIF_CFG_DEV0_EPF1_0_MAX_LATENCY__MAX_LAT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MAX_LATENCY__MAX_LAT_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__LENGTH__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__CAP_ID_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__NEXT_PTR_MASK 0x0000FF00L
++#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__LENGTH_MASK 0x00FF0000L
++//BIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W
++#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_PMI_CAP
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__PME_CLOCK__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__DEV_SPECIFIC_INIT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__AUX_CURRENT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__D1_SUPPORT__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__D2_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__PME_SUPPORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__VERSION_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__PME_CLOCK_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__DEV_SPECIFIC_INIT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__AUX_CURRENT_MASK 0x01C0L
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__D1_SUPPORT_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__D2_SUPPORT_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__PME_SUPPORT_MASK 0xF800L
++//BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__POWER_STATE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__NO_SOFT_RESET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PME_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__DATA_SELECT__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__DATA_SCALE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PME_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__B2_B3_SUPPORT__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__BUS_PWR_EN__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PMI_DATA__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__POWER_STATE_MASK 0x00000003L
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__NO_SOFT_RESET_MASK 0x00000008L
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PME_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__DATA_SELECT_MASK 0x00001E00L
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__DATA_SCALE_MASK 0x00006000L
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PME_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__B2_B3_SUPPORT_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__BUS_PWR_EN_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PMI_DATA_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF1_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF1_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF1_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF1_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF1_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF1_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF1_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF1_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF1_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF1_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF1_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF1_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF1_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF1_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF1_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF1_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF1_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF1_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF1_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF1_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF1_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF1_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF1_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__REF_CLK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT_MASK 0x00000070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__REF_CLK_MASK 0x00000300L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE_MASK 0x00000C00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT_MASK 0x000EL
++//BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_STATUS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS_MASK 0x0001L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__VC_ID__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__VC_ID_MASK 0x07000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__VC_ID__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__VC_ID_MASK 0x07000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L
++//BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_INDEX_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_SIZE_MASK 0x3F00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_INDEX_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_SIZE_MASK 0x3F00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_INDEX_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_SIZE_MASK 0x3F00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_INDEX_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_SIZE_MASK 0x3F00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_INDEX_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_SIZE_MASK 0x3F00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_INDEX_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_SIZE_MASK 0x3F00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA_SELECT
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__BASE_POWER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__DATA_SCALE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__PM_STATE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__TYPE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__POWER_RAIL__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__BASE_POWER_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__DATA_SCALE_MASK 0x00000300L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE_MASK 0x00001C00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__PM_STATE_MASK 0x00006000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__TYPE_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__POWER_RAIL_MASK 0x001C0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED_MASK 0x01L
++//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__SUBSTATE_MAX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__SUBSTATE_MAX_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_LATENCY_INDICATOR
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS__SUBSTATE_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS__SUBSTATE_STATUS_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED_MASK 0x0100L
++//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CNTL__SUBSTATE_CNTL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CNTL__SUBSTATE_CNTL_MASK 0x1FL
++//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3__RESERVED__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3__RESERVED_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_ERROR_STATUS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_ERROR_STATUS__RESERVED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_ERROR_STATUS__RESERVED_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__SOURCE_VALIDATION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__TRANSLATION_BLOCKING__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__UPSTREAM_FORWARDING__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__SOURCE_VALIDATION_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__TRANSLATION_BLOCKING_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__UPSTREAM_FORWARDING_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN_MASK 0x0040L
++//BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_CNTL__PRI_ENABLE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_CNTL__PRI_RESET__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_CNTL__PRI_ENABLE_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_CNTL__PRI_RESET_MASK 0x0002L
++//BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__RESPONSE_FAILURE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__UNEXPECTED_PAGE_REQ_GRP_INDEX__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__STOPPED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__PRG_RESPONSE_PASID_REQUIRED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__RESPONSE_FAILURE_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__UNEXPECTED_PAGE_REQ_GRP_INDEX_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__STOPPED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__PRG_RESPONSE_PASID_REQUIRED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY
++#define BIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY__OUTSTAND_PAGE_REQ_CAPACITY__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY__OUTSTAND_PAGE_REQ_CAPACITY_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC
++#define BIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC__OUTSTAND_PAGE_REQ_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC__OUTSTAND_PAGE_REQ_ALLOC_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__MAX_PASID_WIDTH__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__MAX_PASID_WIDTH_MASK 0x1F00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_ENABLE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_ENABLE_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE_MASK 0x0004L
++//BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP__MC_MAX_GROUP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP__MC_WIN_SIZE_REQ__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP__MC_ECRC_REGEN_SUPP__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP__MC_MAX_GROUP_MASK 0x003FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP__MC_WIN_SIZE_REQ_MASK 0x3F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP__MC_ECRC_REGEN_SUPP_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_MC_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CNTL__MC_NUM_GROUP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CNTL__MC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CNTL__MC_NUM_GROUP_MASK 0x003FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CNTL__MC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR0__MC_INDEX_POS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR0__MC_BASE_ADDR_0__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR0__MC_INDEX_POS_MASK 0x0000003FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR0__MC_BASE_ADDR_0_MASK 0xFFFFF000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR1__MC_BASE_ADDR_1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR1__MC_BASE_ADDR_1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV0__MC_RECEIVE_0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV0__MC_RECEIVE_0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV1__MC_RECEIVE_1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV1__MC_RECEIVE_1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL0__MC_BLOCK_ALL_0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL0__MC_BLOCK_ALL_0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL1__MC_BLOCK_ALL_1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL1__MC_BLOCK_ALL_1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_0__MC_BLOCK_UNTRANSLATED_0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_0__MC_BLOCK_UNTRANSLATED_0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_1__MC_BLOCK_UNTRANSLATED_1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_1__MC_BLOCK_UNTRANSLATED_1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE_MASK 0x000003FFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE_MASK 0x00001C00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE_MASK 0x03FF0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE_MASK 0x1C000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_ARI_CAP_HIERARCHY_PRESERVED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_VF_TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_INTR_MSG_NUM__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_CAP_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_ARI_CAP_HIERARCHY_PRESERVED_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_VF_TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_INTR_MSG_NUM_MASK 0xFFE00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_ENABLE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_ENABLE__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_INTR_ENABLE__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MSE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_ARI_CAP_HIERARCHY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_ENABLE_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_ENABLE_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_INTR_ENABLE_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MSE_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_ARI_CAP_HIERARCHY_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x0020L
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_STATUS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_STATUS__SRIOV_VF_MIGRATION_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_STATUS__SRIOV_VF_MIGRATION_STATUS_MASK 0x0001L
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_INITIAL_VFS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_INITIAL_VFS__SRIOV_INITIAL_VFS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_INITIAL_VFS__SRIOV_INITIAL_VFS_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_TOTAL_VFS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_TOTAL_VFS__SRIOV_TOTAL_VFS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_TOTAL_VFS__SRIOV_TOTAL_VFS_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_NUM_VFS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_NUM_VFS__SRIOV_NUM_VFS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_NUM_VFS__SRIOV_NUM_VFS_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FUNC_DEP_LINK
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FUNC_DEP_LINK__SRIOV_FUNC_DEP_LINK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FUNC_DEP_LINK__SRIOV_FUNC_DEP_LINK_MASK 0x00FFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FIRST_VF_OFFSET
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FIRST_VF_OFFSET__SRIOV_FIRST_VF_OFFSET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FIRST_VF_OFFSET__SRIOV_FIRST_VF_OFFSET_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_STRIDE
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_STRIDE__SRIOV_VF_STRIDE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_STRIDE__SRIOV_VF_STRIDE_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_DEVICE_ID
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_DEVICE_ID__SRIOV_VF_DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_DEVICE_ID__SRIOV_VF_DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE__SRIOV_SUPPORTED_PAGE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE__SRIOV_SUPPORTED_PAGE_SIZE_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE__SRIOV_SYSTEM_PAGE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE__SRIOV_SYSTEM_PAGE_SIZE_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_0__VF_BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_0__VF_BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_1__VF_BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_1__VF_BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_2__VF_BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_2__VF_BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_3__VF_BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_3__VF_BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_4__VF_BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_4__VF_BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_5__VF_BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_5__VF_BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_BIF__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_BIF_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_NO_ST_MODE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_INT_VEC_MODE_SUPPORTED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_DEV_SPC_MODE_SUPPORTED__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_EXTND_TPH_REQR_SUPPORED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_LOCATION__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_SIZE__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_NO_ST_MODE_SUPPORTED_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_INT_VEC_MODE_SUPPORTED_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_DEV_SPC_MODE_SUPPORTED_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_EXTND_TPH_REQR_SUPPORED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_LOCATION_MASK 0x00000600L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_SIZE_MASK 0x07FF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CNTL__TPH_REQR_ST_MODE_SEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CNTL__TPH_REQR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CNTL__TPH_REQR_ST_MODE_SEL_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CNTL__TPH_REQR_EN_MASK 0x00000300L
++//BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_CAP
++#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED_MASK 0x007FFFFFL
++#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_MASK 0x007FFFFFL
++#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_LINK_CAP_16GT
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP_16GT__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP_16GT__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_LINK_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL_16GT__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL_16GT__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT_MASK 0x00000008L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT_MASK 0x00000010L
++//BIF_CFG_DEV0_EPF1_0_LOCAL_PARITY_MISMATCH_STATUS_16GT
++#define BIF_CFG_DEV0_EPF1_0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF1_0_RTM1_PARITY_MISMATCH_STATUS_16GT
++#define BIF_CFG_DEV0_EPF1_0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF1_0_RTM2_PARITY_MISMATCH_STATUS_16GT
++#define BIF_CFG_DEV0_EPF1_0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF1_0_LANE_0_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_1_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_2_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_3_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_4_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_5_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_6_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_7_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_8_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_9_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_10_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_11_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_12_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_13_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_14_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_15_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_CAP
++#define BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE_MASK 0x0001L
++//BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_STATUS
++#define BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_STATUS__MARGINING_READY__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_STATUS__MARGINING_READY_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY_MASK 0x0002L
++//BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_INDEX_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_INDEX_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_INDEX_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_INDEX_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_INDEX_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_INDEX_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_NUM__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_EN_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_NUM_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_CMD_COMPLETE_INTR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_NEED_FLR_INTR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_CMD_COMPLETE_INTR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_NEED_FLR_INTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_CMD_COMPLETE_INTR_EN__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_NEED_FLR_INTR_EN__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_CMD_COMPLETE_INTR_EN__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_NEED_FLR_INTR_EN__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_CMD_COMPLETE_INTR_EN_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_NEED_FLR_INTR_EN_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000008L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_CMD_COMPLETE_INTR_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_NEED_FLR_INTR_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_CMD_COMPLETE_INTR_EN_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_NEED_FLR_INTR_EN_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_CMD_COMPLETE_INTR_EN_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_NEED_FLR_INTR_EN_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_TRN_ACK_INTR_EN_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_RCV_VALID_INTR_EN_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_CMD_COMPLETE_INTR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_CMD_COMPLETE_INTR_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_CMD_COMPLETE_INTR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_CMD_COMPLETE_INTR_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_TRN_ACK_INTR_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_RCV_VALID_INTR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_CMD_COMPLETE_INTR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000008L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_CMD_COMPLETE_INTR_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_CMD_COMPLETE_INTR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_NEED_FLR_INTR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_CMD_COMPLETE_INTR_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_NEED_FLR_INTR_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_TRN_ACK_INTR_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_RCV_VALID_INTR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL__SOFT_PF_FLR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL__SOFT_PF_FLR_MASK 0x0001L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__VF_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_DATA__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_VALID__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_DATA__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_ACK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__VF_INDEX_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_DATA_MASK 0x00000F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_VALID_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_DATA_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_ACK_MASK 0x01000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_TRN_ACK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_RCV_VALID__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_TRN_ACK__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_RCV_VALID__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_TRN_ACK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_RCV_VALID__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_TRN_ACK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_RCV_VALID__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_TRN_ACK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_RCV_VALID__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_TRN_ACK__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_RCV_VALID__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_TRN_ACK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_RCV_VALID__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_TRN_ACK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_RCV_VALID__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_TRN_ACK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_RCV_VALID__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_TRN_ACK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_RCV_VALID__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_TRN_ACK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_RCV_VALID__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_TRN_ACK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_RCV_VALID__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_TRN_ACK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_RCV_VALID__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_TRN_ACK__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_RCV_VALID__SHIFT 0x1b
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_TRN_ACK__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_RCV_VALID__SHIFT 0x1d
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_TRN_ACK__SHIFT 0x1e
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_RCV_VALID__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_TRN_ACK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_RCV_VALID_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_TRN_ACK_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_RCV_VALID_MASK 0x00000008L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_TRN_ACK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_RCV_VALID_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_TRN_ACK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_RCV_VALID_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_TRN_ACK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_RCV_VALID_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_TRN_ACK_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_RCV_VALID_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_TRN_ACK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_RCV_VALID_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_TRN_ACK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_RCV_VALID_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_TRN_ACK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_RCV_VALID_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_TRN_ACK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_RCV_VALID_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_TRN_ACK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_RCV_VALID_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_TRN_ACK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_RCV_VALID_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_TRN_ACK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_RCV_VALID_MASK 0x02000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_TRN_ACK_MASK 0x04000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_RCV_VALID_MASK 0x08000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_TRN_ACK_MASK 0x10000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_RCV_VALID_MASK 0x20000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_TRN_ACK_MASK 0x40000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_RCV_VALID_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_TRN_ACK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_RCV_VALID__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_TRN_ACK__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_RCV_VALID__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_TRN_ACK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_RCV_VALID__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_TRN_ACK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_RCV_VALID__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_TRN_ACK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_RCV_VALID__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_TRN_ACK__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_RCV_VALID__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_TRN_ACK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_RCV_VALID__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_TRN_ACK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_RCV_VALID__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_TRN_ACK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_RCV_VALID__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_TRN_ACK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_RCV_VALID__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_TRN_ACK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_RCV_VALID__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_TRN_ACK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_RCV_VALID__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_TRN_ACK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_RCV_VALID__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_TRN_ACK__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_RCV_VALID__SHIFT 0x1b
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_TRN_ACK__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_RCV_VALID__SHIFT 0x1d
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_TRN_ACK__SHIFT 0x1e
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_RCV_VALID__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_TRN_ACK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_RCV_VALID_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_TRN_ACK_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_RCV_VALID_MASK 0x00000008L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_TRN_ACK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_RCV_VALID_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_TRN_ACK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_RCV_VALID_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_TRN_ACK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_RCV_VALID_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_TRN_ACK_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_RCV_VALID_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_TRN_ACK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_RCV_VALID_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_TRN_ACK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_RCV_VALID_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_TRN_ACK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_RCV_VALID_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_TRN_ACK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_RCV_VALID_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_TRN_ACK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_RCV_VALID_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_TRN_ACK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_RCV_VALID_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_TRN_ACK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_RCV_VALID_MASK 0x02000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_TRN_ACK_MASK 0x04000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_RCV_VALID_MASK 0x08000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_TRN_ACK_MASK 0x10000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_RCV_VALID_MASK 0x20000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_TRN_ACK_MASK 0x40000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_RCV_VALID_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__LOC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_OFFSET__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_SIZE_MASK 0x0000007FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__LOC_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_OFFSET_MASK 0xFFFFFC00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_AVAILABLE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_CONSUMED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_AVAILABLE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_CONSUMED_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVDSCH_OFFSET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCESCH_OFFSET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__GFXSCH_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVD1SCH_OFFSET__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVDSCH_OFFSET_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCESCH_OFFSET_MASK 0x0000FF00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__GFXSCH_OFFSET_MASK 0x00FF0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVD1SCH_OFFSET_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_VF__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_PF__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_VF_MASK 0x7FFFFFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_PF_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0__DW0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0__DW0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1__DW1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1__DW1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2__DW2__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2__DW2_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3__DW3__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3__DW3_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4__DW4__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4__DW4_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5__DW5__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5__DW5_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6__DW6__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6__DW6_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7__DW7__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7__DW7_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8__DW8__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8__DW8_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0__DW0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0__DW0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1__DW1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1__DW1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2__DW2__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2__DW2_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3__DW3__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3__DW3_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4__DW4__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4__DW4_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5__DW5__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5__DW5_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6__DW6__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6__DW6_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7__DW7__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7__DW7_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8__DW8__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8__DW8_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0__DW0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0__DW0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1__DW1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1__DW1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2__DW2__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2__DW2_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3__DW3__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3__DW3_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4__DW4__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4__DW4_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5__DW5__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5__DW5_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6__DW6__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6__DW6_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7__DW7__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7__DW7_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8__DW8__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8__DW8_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0__DW0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0__DW0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1__DW1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1__DW1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2__DW2__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2__DW2_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3__DW3__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3__DW3_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4__DW4__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4__DW4_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5__DW5__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5__DW5_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6__DW6__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6__DW6_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7__DW7__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7__DW7_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8__DW8__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8__DW8_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_swds_bifcfgdecp
++//BIF_CFG_DEV0_SWDS0_VENDOR_ID
++#define BIF_CFG_DEV0_SWDS0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_SWDS0_DEVICE_ID
++#define BIF_CFG_DEV0_SWDS0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_SWDS0_COMMAND
++#define BIF_CFG_DEV0_SWDS0_COMMAND__IOEN_DN__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_COMMAND__MEMEN_DN__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_SWDS0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_SWDS0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_SWDS0_COMMAND__IOEN_DN_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_COMMAND__MEMEN_DN_MASK 0x0002L
++#define BIF_CFG_DEV0_SWDS0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_SWDS0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_SWDS0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_SWDS0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_SWDS0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_SWDS0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_SWDS0_STATUS
++#define BIF_CFG_DEV0_SWDS0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_SWDS0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_SWDS0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_SWDS0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_SWDS0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_SWDS0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_SWDS0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_SWDS0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_SWDS0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_SWDS0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_SWDS0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_SWDS0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_SWDS0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_REVISION_ID
++#define BIF_CFG_DEV0_SWDS0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_PROG_INTERFACE
++#define BIF_CFG_DEV0_SWDS0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_SWDS0_SUB_CLASS
++#define BIF_CFG_DEV0_SWDS0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_SWDS0_BASE_CLASS
++#define BIF_CFG_DEV0_SWDS0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_SWDS0_CACHE_LINE
++#define BIF_CFG_DEV0_SWDS0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_SWDS0_LATENCY
++#define BIF_CFG_DEV0_SWDS0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_SWDS0_HEADER
++#define BIF_CFG_DEV0_SWDS0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_SWDS0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_SWDS0_BIST
++#define BIF_CFG_DEV0_SWDS0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_SWDS0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_SWDS0_BASE_ADDR_1
++#define BIF_CFG_DEV0_SWDS0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY
++#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__PRIMARY_BUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__SECONDARY_BUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__SUB_BUS_NUM__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__SECONDARY_LATENCY_TIMER__SHIFT 0x18
++#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__PRIMARY_BUS_MASK 0x000000FFL
++#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__SECONDARY_BUS_MASK 0x0000FF00L
++#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__SUB_BUS_NUM_MASK 0x00FF0000L
++#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__SECONDARY_LATENCY_TIMER_MASK 0xFF000000L
++//BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT
++#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_BASE_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_BASE__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_LIMIT_TYPE__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_LIMIT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_BASE_TYPE_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_BASE_MASK 0x00F0L
++#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_LIMIT_TYPE_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_LIMIT_MASK 0xF000L
++//BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__RECEIVED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__RECEIVED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT
++#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_BASE_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_BASE_31_20__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_LIMIT_TYPE__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_LIMIT_31_20__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_BASE_TYPE_MASK 0x0000000FL
++#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_BASE_31_20_MASK 0x0000FFF0L
++#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_LIMIT_TYPE_MASK 0x000F0000L
++#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_LIMIT_31_20_MASK 0xFFF00000L
++//BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT
++#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_BASE_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_BASE_31_20__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_TYPE__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_31_20__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_BASE_TYPE_MASK 0x0000000FL
++#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_BASE_31_20_MASK 0x0000FFF0L
++#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_TYPE_MASK 0x000F0000L
++#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_31_20_MASK 0xFFF00000L
++//BIF_CFG_DEV0_SWDS0_PREF_BASE_UPPER
++#define BIF_CFG_DEV0_SWDS0_PREF_BASE_UPPER__PREF_BASE_UPPER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PREF_BASE_UPPER__PREF_BASE_UPPER_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_PREF_LIMIT_UPPER
++#define BIF_CFG_DEV0_SWDS0_PREF_LIMIT_UPPER__PREF_LIMIT_UPPER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PREF_LIMIT_UPPER__PREF_LIMIT_UPPER_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT_HI
++#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT_HI__IO_BASE_31_16__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT_HI__IO_LIMIT_31_16__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT_HI__IO_BASE_31_16_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT_HI__IO_LIMIT_31_16_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_SWDS0_CAP_PTR
++#define BIF_CFG_DEV0_SWDS0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_SWDS0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_SWDS0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_SWDS0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_SWDS0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__PARITY_RESPONSE_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__SERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__ISA_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__VGA_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__VGA_DEC__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__MASTER_ABORT_MODE__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__SECONDARY_BUS_RESET__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__FAST_B2B_EN__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__PARITY_RESPONSE_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__SERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__ISA_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__VGA_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__VGA_DEC_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__MASTER_ABORT_MODE_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__SECONDARY_BUS_RESET_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__FAST_B2B_EN_MASK 0x0080L
++//BIF_CFG_DEV0_SWDS0_PMI_CAP_LIST
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_PMI_CAP
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__PME_CLOCK__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__DEV_SPECIFIC_INIT__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__AUX_CURRENT__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__D1_SUPPORT__SHIFT 0x9
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__D2_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__PME_SUPPORT__SHIFT 0xb
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__VERSION_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__PME_CLOCK_MASK 0x0008L
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__DEV_SPECIFIC_INIT_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__AUX_CURRENT_MASK 0x01C0L
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__D1_SUPPORT_MASK 0x0200L
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__D2_SUPPORT_MASK 0x0400L
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__PME_SUPPORT_MASK 0xF800L
++//BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__POWER_STATE__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__NO_SOFT_RESET__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__PME_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__DATA_SELECT__SHIFT 0x9
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__DATA_SCALE__SHIFT 0xd
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__PME_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__B2_B3_SUPPORT__SHIFT 0x16
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__BUS_PWR_EN__SHIFT 0x17
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__PMI_DATA__SHIFT 0x18
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__POWER_STATE_MASK 0x00000003L
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__NO_SOFT_RESET_MASK 0x00000008L
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__PME_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__DATA_SELECT_MASK 0x00001E00L
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__DATA_SCALE_MASK 0x00006000L
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__PME_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__B2_B3_SUPPORT_MASK 0x00400000L
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__BUS_PWR_EN_MASK 0x00800000L
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__PMI_DATA_MASK 0xFF000000L
++//BIF_CFG_DEV0_SWDS0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_SWDS0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_SWDS0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_PCIE_CAP
++#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_SWDS0_DEVICE_CAP
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_SWDS0_DEVICE_CNTL
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__BRIDGE_CFG_RETRY_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__BRIDGE_CFG_RETRY_EN_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_DEVICE_STATUS
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_SWDS0_LINK_CAP
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_SWDS0_LINK_CNTL
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_SWDS0_LINK_STATUS
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_SLOT_CAP
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__ATTN_BUTTON_PRESENT__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__PWR_CONTROLLER_PRESENT__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__MRL_SENSOR_PRESENT__SHIFT 0x2
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__ATTN_INDICATOR_PRESENT__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__PWR_INDICATOR_PRESENT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__HOTPLUG_SURPRISE__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__HOTPLUG_CAPABLE__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__SLOT_PWR_LIMIT_VALUE__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__SLOT_PWR_LIMIT_SCALE__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__ELECTROMECH_INTERLOCK_PRESENT__SHIFT 0x11
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__NO_COMMAND_COMPLETED_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__PHYSICAL_SLOT_NUM__SHIFT 0x13
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__ATTN_BUTTON_PRESENT_MASK 0x00000001L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__PWR_CONTROLLER_PRESENT_MASK 0x00000002L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__MRL_SENSOR_PRESENT_MASK 0x00000004L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__ATTN_INDICATOR_PRESENT_MASK 0x00000008L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__PWR_INDICATOR_PRESENT_MASK 0x00000010L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__HOTPLUG_SURPRISE_MASK 0x00000020L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__HOTPLUG_CAPABLE_MASK 0x00000040L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__SLOT_PWR_LIMIT_VALUE_MASK 0x00007F80L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__SLOT_PWR_LIMIT_SCALE_MASK 0x00018000L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__ELECTROMECH_INTERLOCK_PRESENT_MASK 0x00020000L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__NO_COMMAND_COMPLETED_SUPPORTED_MASK 0x00040000L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__PHYSICAL_SLOT_NUM_MASK 0xFFF80000L
++//BIF_CFG_DEV0_SWDS0_SLOT_CNTL
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__ATTN_BUTTON_PRESSED_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PWR_FAULT_DETECTED_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__MRL_SENSOR_CHANGED_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PRESENCE_DETECT_CHANGED_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__COMMAND_COMPLETED_INTR_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__HOTPLUG_INTR_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__ATTN_INDICATOR_CNTL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PWR_INDICATOR_CNTL__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PWR_CONTROLLER_CNTL__SHIFT 0xa
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__ELECTROMECH_INTERLOCK_CNTL__SHIFT 0xb
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__DL_STATE_CHANGED_EN__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__ATTN_BUTTON_PRESSED_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PWR_FAULT_DETECTED_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__MRL_SENSOR_CHANGED_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PRESENCE_DETECT_CHANGED_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__COMMAND_COMPLETED_INTR_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__HOTPLUG_INTR_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__ATTN_INDICATOR_CNTL_MASK 0x00C0L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PWR_INDICATOR_CNTL_MASK 0x0300L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PWR_CONTROLLER_CNTL_MASK 0x0400L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__ELECTROMECH_INTERLOCK_CNTL_MASK 0x0800L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__DL_STATE_CHANGED_EN_MASK 0x1000L
++//BIF_CFG_DEV0_SWDS0_SLOT_STATUS
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__ATTN_BUTTON_PRESSED__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__PWR_FAULT_DETECTED__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__MRL_SENSOR_CHANGED__SHIFT 0x2
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__PRESENCE_DETECT_CHANGED__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__COMMAND_COMPLETED__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__MRL_SENSOR_STATE__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__PRESENCE_DETECT_STATE__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__ELECTROMECH_INTERLOCK_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__DL_STATE_CHANGED__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__ATTN_BUTTON_PRESSED_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__PWR_FAULT_DETECTED_MASK 0x0002L
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__MRL_SENSOR_CHANGED_MASK 0x0004L
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__PRESENCE_DETECT_CHANGED_MASK 0x0008L
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__COMMAND_COMPLETED_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__MRL_SENSOR_STATE_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__PRESENCE_DETECT_STATE_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__ELECTROMECH_INTERLOCK_STATUS_MASK 0x0080L
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__DL_STATE_CHANGED_MASK 0x0100L
++//BIF_CFG_DEV0_SWDS0_DEVICE_CAP2
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_SWDS0_LINK_CAP2
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_SWDS0_LINK_CNTL2
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_SWDS0_LINK_STATUS2
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_SWDS0_SLOT_CAP2
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_SLOT_CNTL2
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_SWDS0_SLOT_STATUS2
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_SWDS0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_SWDS0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_SWDS0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_SWDS0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_SWDS0_SSID_CAP_LIST
++#define BIF_CFG_DEV0_SWDS0_SSID_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_SSID_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_SSID_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_SWDS0_SSID_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_SSID_CAP
++#define BIF_CFG_DEV0_SWDS0_SSID_CAP__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_SSID_CAP__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_SSID_CAP__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_SWDS0_SSID_CAP__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__REF_CLK__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE__SHIFT 0xa
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT_MASK 0x00000007L
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT_MASK 0x00000070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__REF_CLK_MASK 0x00000300L
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE_MASK 0x00000C00L
++//BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG2
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET__SHIFT 0x18
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP_MASK 0x000000FFL
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET_MASK 0xFF000000L
++//BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT_MASK 0x000EL
++//BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_STATUS
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS_MASK 0x0001L
++//BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L
++//BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__VC_ID__SHIFT 0x18
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__VC_ID_MASK 0x07000000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L
++//BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_STATUS
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L
++//BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L
++//BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__VC_ID__SHIFT 0x18
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__VC_ID_MASK 0x07000000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L
++//BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_STATUS
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L
++//BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST
++#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW1
++#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW2
++#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST
++#define BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3
++#define BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3__RESERVED__SHIFT 0x2
++#define BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION_MASK 0x00000001L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN_MASK 0x00000002L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3__RESERVED_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_ERROR_STATUS
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_ERROR_STATUS__RESERVED__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_ERROR_STATUS__RESERVED_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__SOURCE_VALIDATION__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__TRANSLATION_BLOCKING__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT__SHIFT 0x2
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__UPSTREAM_FORWARDING__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__SOURCE_VALIDATION_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__TRANSLATION_BLOCKING_MASK 0x0002L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT_MASK 0x0004L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT_MASK 0x0008L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__UPSTREAM_FORWARDING_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN_MASK 0x0040L
++//BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST
++#define BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_CAP
++#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE__SHIFT 0x1f
++#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED_MASK 0x007FFFFFL
++#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE_MASK 0x80000000L
++//BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_STATUS
++#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID__SHIFT 0x1f
++#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_MASK 0x007FFFFFL
++#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID_MASK 0x80000000L
++//BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_SWDS0_LINK_CAP_16GT
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP_16GT__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP_16GT__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_LINK_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL_16GT__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL_16GT__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT__SHIFT 0x2
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT_MASK 0x00000001L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT_MASK 0x00000002L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT_MASK 0x00000004L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT_MASK 0x00000008L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT_MASK 0x00000010L
++//BIF_CFG_DEV0_SWDS0_LOCAL_PARITY_MISMATCH_STATUS_16GT
++#define BIF_CFG_DEV0_SWDS0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_SWDS0_RTM1_PARITY_MISMATCH_STATUS_16GT
++#define BIF_CFG_DEV0_SWDS0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_SWDS0_RTM2_PARITY_MISMATCH_STATUS_16GT
++#define BIF_CFG_DEV0_SWDS0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_SWDS0_LANE_0_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_1_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_2_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_3_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_4_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_5_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_6_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_7_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_8_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_9_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_10_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_11_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_12_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_13_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_14_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_15_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST
++#define BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_SWDS0_MARGINING_PORT_CAP
++#define BIF_CFG_DEV0_SWDS0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE_MASK 0x0001L
++//BIF_CFG_DEV0_SWDS0_MARGINING_PORT_STATUS
++#define BIF_CFG_DEV0_SWDS0_MARGINING_PORT_STATUS__MARGINING_READY__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_MARGINING_PORT_STATUS__MARGINING_READY_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY_MASK 0x0002L
++//BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf0_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF0_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF0_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF0_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF0_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF0_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF0_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF0_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF0_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF0_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF0_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF0_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF0_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF0_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF0_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF0_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF0_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF0_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF0_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF0_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF0_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF0_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF0_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF0_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF0_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF0_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF0_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF0_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF0_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF0_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF0_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF0_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF0_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf1_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF1_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF1_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF1_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF1_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF1_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF1_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF1_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF1_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF1_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF1_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF1_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF1_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF1_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF1_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF1_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF1_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF1_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF1_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF1_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF1_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF1_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF1_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF1_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF1_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF1_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF1_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF1_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF1_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF1_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF1_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF1_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF1_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf2_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF2_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF2_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF2_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF2_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF2_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF2_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF2_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF2_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF2_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF2_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF2_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF2_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF2_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF2_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF2_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF2_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF2_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF2_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF2_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF2_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF2_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF2_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF2_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF2_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF2_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF2_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF2_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF2_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF2_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF2_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF2_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF2_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf3_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF3_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF3_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF3_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF3_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF3_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF3_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF3_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF3_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF3_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF3_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF3_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF3_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF3_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF3_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF3_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF3_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF3_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF3_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF3_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF3_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF3_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF3_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF3_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF3_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF3_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF3_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF3_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF3_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF3_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF3_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF3_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF3_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf4_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF4_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF4_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF4_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF4_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF4_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF4_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF4_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF4_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF4_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF4_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF4_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF4_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF4_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF4_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF4_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF4_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF4_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF4_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF4_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF4_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF4_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF4_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF4_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF4_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF4_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF4_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF4_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF4_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF4_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF4_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF4_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF4_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf5_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF5_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF5_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF5_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF5_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF5_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF5_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF5_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF5_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF5_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF5_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF5_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF5_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF5_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF5_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF5_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF5_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF5_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF5_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF5_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF5_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF5_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF5_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF5_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF5_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF5_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF5_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF5_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF5_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF5_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF5_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF5_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF5_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf6_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF6_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF6_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF6_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF6_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF6_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF6_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF6_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF6_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF6_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF6_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF6_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF6_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF6_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF6_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF6_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF6_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF6_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF6_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF6_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF6_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF6_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF6_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF6_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF6_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF6_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF6_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF6_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF6_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF6_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF6_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF6_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF6_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf7_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF7_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF7_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF7_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF7_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF7_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF7_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF7_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF7_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF7_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF7_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF7_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF7_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF7_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF7_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF7_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF7_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF7_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF7_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF7_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF7_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF7_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF7_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF7_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF7_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF7_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF7_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF7_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF7_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF7_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF7_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF7_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF7_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf8_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF8_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF8_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF8_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF8_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF8_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF8_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF8_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF8_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF8_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF8_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF8_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF8_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF8_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF8_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF8_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF8_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF8_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF8_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF8_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF8_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF8_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF8_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF8_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF8_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF8_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF8_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF8_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF8_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF8_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF8_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF8_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF8_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf9_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF9_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF9_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF9_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF9_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF9_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF9_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF9_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF9_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF9_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF9_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF9_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF9_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF9_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF9_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF9_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF9_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF9_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF9_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF9_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF9_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF9_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF9_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF9_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF9_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF9_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF9_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF9_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF9_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF9_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF9_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF9_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF9_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf10_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF10_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF10_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF10_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF10_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF10_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF10_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF10_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF10_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF10_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF10_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF10_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF10_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF10_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF10_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF10_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF10_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF10_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF10_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF10_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF10_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF10_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF10_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF10_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF10_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF10_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF10_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF10_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF10_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF10_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF10_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF10_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF10_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf11_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF11_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF11_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF11_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF11_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF11_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF11_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF11_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF11_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF11_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF11_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF11_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF11_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF11_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF11_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF11_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF11_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF11_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF11_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF11_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF11_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF11_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF11_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF11_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF11_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF11_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF11_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF11_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF11_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF11_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF11_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF11_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF11_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf12_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF12_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF12_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF12_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF12_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF12_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF12_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF12_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF12_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF12_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF12_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF12_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF12_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF12_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF12_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF12_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF12_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF12_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF12_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF12_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF12_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF12_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF12_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF12_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF12_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF12_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF12_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF12_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF12_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF12_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF12_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF12_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF12_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf13_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF13_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF13_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF13_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF13_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF13_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF13_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF13_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF13_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF13_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF13_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF13_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF13_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF13_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF13_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF13_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF13_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF13_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF13_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF13_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF13_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF13_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF13_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF13_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF13_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF13_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF13_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF13_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF13_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF13_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF13_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF13_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF13_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf14_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF14_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF14_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF14_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF14_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF14_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF14_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF14_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF14_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF14_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF14_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF14_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF14_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF14_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF14_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF14_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF14_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF14_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF14_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF14_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF14_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF14_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF14_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF14_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF14_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF14_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF14_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF14_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF14_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF14_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF14_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF14_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF14_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf15_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF15_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF15_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF15_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF15_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF15_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF15_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF15_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF15_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF15_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF15_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF15_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF15_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF15_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF15_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF15_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF15_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF15_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF15_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF15_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF15_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF15_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF15_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF15_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF15_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF15_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF15_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF15_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF15_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF15_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF15_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF15_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF15_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_bx_pf_SYSPFVFDEC
++//MM_INDEX
++#define MM_INDEX__MM_OFFSET__SHIFT 0x0
++#define MM_INDEX__MM_APER__SHIFT 0x1f
++#define MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
++#define MM_INDEX__MM_APER_MASK 0x80000000L
++//MM_DATA
++#define MM_DATA__MM_DATA__SHIFT 0x0
++#define MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
++//MM_INDEX_HI
++#define MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
++#define MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_bif_bx_SYSDEC
++//SYSHUB_INDEX_OVLP
++#define SYSHUB_INDEX_OVLP__SYSHUB_OFFSET__SHIFT 0x0
++#define SYSHUB_INDEX_OVLP__SYSHUB_OFFSET_MASK 0x003FFFFFL
++//SYSHUB_DATA_OVLP
++#define SYSHUB_DATA_OVLP__SYSHUB_DATA__SHIFT 0x0
++#define SYSHUB_DATA_OVLP__SYSHUB_DATA_MASK 0xFFFFFFFFL
++//PCIE_INDEX
++#define PCIE_INDEX__PCIE_INDEX__SHIFT 0x0
++#define PCIE_INDEX__PCIE_INDEX_MASK 0xFFFFFFFFL
++//PCIE_DATA
++#define PCIE_DATA__PCIE_DATA__SHIFT 0x0
++#define PCIE_DATA__PCIE_DATA_MASK 0xFFFFFFFFL
++//PCIE_INDEX2
++#define PCIE_INDEX2__PCIE_INDEX2__SHIFT 0x0
++#define PCIE_INDEX2__PCIE_INDEX2_MASK 0xFFFFFFFFL
++//PCIE_DATA2
++#define PCIE_DATA2__PCIE_DATA2__SHIFT 0x0
++#define PCIE_DATA2__PCIE_DATA2_MASK 0xFFFFFFFFL
++//SBIOS_SCRATCH_0
++#define SBIOS_SCRATCH_0__SBIOS_SCRATCH_DW__SHIFT 0x0
++#define SBIOS_SCRATCH_0__SBIOS_SCRATCH_DW_MASK 0xFFFFFFFFL
++//SBIOS_SCRATCH_1
++#define SBIOS_SCRATCH_1__SBIOS_SCRATCH_DW__SHIFT 0x0
++#define SBIOS_SCRATCH_1__SBIOS_SCRATCH_DW_MASK 0xFFFFFFFFL
++//SBIOS_SCRATCH_2
++#define SBIOS_SCRATCH_2__SBIOS_SCRATCH_DW__SHIFT 0x0
++#define SBIOS_SCRATCH_2__SBIOS_SCRATCH_DW_MASK 0xFFFFFFFFL
++//SBIOS_SCRATCH_3
++#define SBIOS_SCRATCH_3__SBIOS_SCRATCH_DW__SHIFT 0x0
++#define SBIOS_SCRATCH_3__SBIOS_SCRATCH_DW_MASK 0xFFFFFFFFL
++//BIOS_SCRATCH_0
++#define BIOS_SCRATCH_0__BIOS_SCRATCH_0__SHIFT 0x0
++#define BIOS_SCRATCH_0__BIOS_SCRATCH_0_MASK 0xFFFFFFFFL
++//BIOS_SCRATCH_1
++#define BIOS_SCRATCH_1__BIOS_SCRATCH_1__SHIFT 0x0
++#define BIOS_SCRATCH_1__BIOS_SCRATCH_1_MASK 0xFFFFFFFFL
++//BIOS_SCRATCH_2
++#define BIOS_SCRATCH_2__BIOS_SCRATCH_2__SHIFT 0x0
++#define BIOS_SCRATCH_2__BIOS_SCRATCH_2_MASK 0xFFFFFFFFL
++//BIOS_SCRATCH_3
++#define BIOS_SCRATCH_3__BIOS_SCRATCH_3__SHIFT 0x0
++#define BIOS_SCRATCH_3__BIOS_SCRATCH_3_MASK 0xFFFFFFFFL
++//BIOS_SCRATCH_4
++#define BIOS_SCRATCH_4__BIOS_SCRATCH_4__SHIFT 0x0
++#define BIOS_SCRATCH_4__BIOS_SCRATCH_4_MASK 0xFFFFFFFFL
++//BIOS_SCRATCH_5
++#define BIOS_SCRATCH_5__BIOS_SCRATCH_5__SHIFT 0x0
++#define BIOS_SCRATCH_5__BIOS_SCRATCH_5_MASK 0xFFFFFFFFL
++//BIOS_SCRATCH_6
++#define BIOS_SCRATCH_6__BIOS_SCRATCH_6__SHIFT 0x0
++#define BIOS_SCRATCH_6__BIOS_SCRATCH_6_MASK 0xFFFFFFFFL
++//BIOS_SCRATCH_7
++#define BIOS_SCRATCH_7__BIOS_SCRATCH_7__SHIFT 0x0
++#define BIOS_SCRATCH_7__BIOS_SCRATCH_7_MASK 0xFFFFFFFFL
++//BIOS_SCRATCH_8
++#define BIOS_SCRATCH_8__BIOS_SCRATCH_8__SHIFT 0x0
++#define BIOS_SCRATCH_8__BIOS_SCRATCH_8_MASK 0xFFFFFFFFL
++//BIOS_SCRATCH_9
++#define BIOS_SCRATCH_9__BIOS_SCRATCH_9__SHIFT 0x0
++#define BIOS_SCRATCH_9__BIOS_SCRATCH_9_MASK 0xFFFFFFFFL
++//BIOS_SCRATCH_10
++#define BIOS_SCRATCH_10__BIOS_SCRATCH_10__SHIFT 0x0
++#define BIOS_SCRATCH_10__BIOS_SCRATCH_10_MASK 0xFFFFFFFFL
++//BIOS_SCRATCH_11
++#define BIOS_SCRATCH_11__BIOS_SCRATCH_11__SHIFT 0x0
++#define BIOS_SCRATCH_11__BIOS_SCRATCH_11_MASK 0xFFFFFFFFL
++//BIOS_SCRATCH_12
++#define BIOS_SCRATCH_12__BIOS_SCRATCH_12__SHIFT 0x0
++#define BIOS_SCRATCH_12__BIOS_SCRATCH_12_MASK 0xFFFFFFFFL
++//BIOS_SCRATCH_13
++#define BIOS_SCRATCH_13__BIOS_SCRATCH_13__SHIFT 0x0
++#define BIOS_SCRATCH_13__BIOS_SCRATCH_13_MASK 0xFFFFFFFFL
++//BIOS_SCRATCH_14
++#define BIOS_SCRATCH_14__BIOS_SCRATCH_14__SHIFT 0x0
++#define BIOS_SCRATCH_14__BIOS_SCRATCH_14_MASK 0xFFFFFFFFL
++//BIOS_SCRATCH_15
++#define BIOS_SCRATCH_15__BIOS_SCRATCH_15__SHIFT 0x0
++#define BIOS_SCRATCH_15__BIOS_SCRATCH_15_MASK 0xFFFFFFFFL
++//BIF_RLC_INTR_CNTL
++#define BIF_RLC_INTR_CNTL__RLC_CMD_COMPLETE__SHIFT 0x0
++#define BIF_RLC_INTR_CNTL__RLC_HANG_SELF_RECOVERED__SHIFT 0x1
++#define BIF_RLC_INTR_CNTL__RLC_HANG_NEED_FLR__SHIFT 0x2
++#define BIF_RLC_INTR_CNTL__RLC_VM_BUSY_TRANSITION__SHIFT 0x3
++#define BIF_RLC_INTR_CNTL__RLC_CMD_COMPLETE_MASK 0x00000001L
++#define BIF_RLC_INTR_CNTL__RLC_HANG_SELF_RECOVERED_MASK 0x00000002L
++#define BIF_RLC_INTR_CNTL__RLC_HANG_NEED_FLR_MASK 0x00000004L
++#define BIF_RLC_INTR_CNTL__RLC_VM_BUSY_TRANSITION_MASK 0x00000008L
++//BIF_VCE_INTR_CNTL
++#define BIF_VCE_INTR_CNTL__VCE_CMD_COMPLETE__SHIFT 0x0
++#define BIF_VCE_INTR_CNTL__VCE_HANG_SELF_RECOVERED__SHIFT 0x1
++#define BIF_VCE_INTR_CNTL__VCE_HANG_NEED_FLR__SHIFT 0x2
++#define BIF_VCE_INTR_CNTL__VCE_VM_BUSY_TRANSITION__SHIFT 0x3
++#define BIF_VCE_INTR_CNTL__VCE_CMD_COMPLETE_MASK 0x00000001L
++#define BIF_VCE_INTR_CNTL__VCE_HANG_SELF_RECOVERED_MASK 0x00000002L
++#define BIF_VCE_INTR_CNTL__VCE_HANG_NEED_FLR_MASK 0x00000004L
++#define BIF_VCE_INTR_CNTL__VCE_VM_BUSY_TRANSITION_MASK 0x00000008L
++//BIF_UVD_INTR_CNTL
++#define BIF_UVD_INTR_CNTL__UVD_CMD_COMPLETE__SHIFT 0x0
++#define BIF_UVD_INTR_CNTL__UVD_HANG_SELF_RECOVERED__SHIFT 0x1
++#define BIF_UVD_INTR_CNTL__UVD_HANG_NEED_FLR__SHIFT 0x2
++#define BIF_UVD_INTR_CNTL__UVD_VM_BUSY_TRANSITION__SHIFT 0x3
++#define BIF_UVD_INTR_CNTL__UVD_INST_SEL__SHIFT 0x1c
++#define BIF_UVD_INTR_CNTL__UVD_CMD_COMPLETE_MASK 0x00000001L
++#define BIF_UVD_INTR_CNTL__UVD_HANG_SELF_RECOVERED_MASK 0x00000002L
++#define BIF_UVD_INTR_CNTL__UVD_HANG_NEED_FLR_MASK 0x00000004L
++#define BIF_UVD_INTR_CNTL__UVD_VM_BUSY_TRANSITION_MASK 0x00000008L
++#define BIF_UVD_INTR_CNTL__UVD_INST_SEL_MASK 0xF0000000L
++//GFX_MMIOREG_CAM_ADDR0
++#define GFX_MMIOREG_CAM_ADDR0__CAM_ADDR0__SHIFT 0x0
++#define GFX_MMIOREG_CAM_ADDR0__CAM_ADDR0_MASK 0x000FFFFFL
++//GFX_MMIOREG_CAM_REMAP_ADDR0
++#define GFX_MMIOREG_CAM_REMAP_ADDR0__CAM_REMAP_ADDR0__SHIFT 0x0
++#define GFX_MMIOREG_CAM_REMAP_ADDR0__CAM_REMAP_ADDR0_MASK 0x000FFFFFL
++//GFX_MMIOREG_CAM_ADDR1
++#define GFX_MMIOREG_CAM_ADDR1__CAM_ADDR1__SHIFT 0x0
++#define GFX_MMIOREG_CAM_ADDR1__CAM_ADDR1_MASK 0x000FFFFFL
++//GFX_MMIOREG_CAM_REMAP_ADDR1
++#define GFX_MMIOREG_CAM_REMAP_ADDR1__CAM_REMAP_ADDR1__SHIFT 0x0
++#define GFX_MMIOREG_CAM_REMAP_ADDR1__CAM_REMAP_ADDR1_MASK 0x000FFFFFL
++//GFX_MMIOREG_CAM_ADDR2
++#define GFX_MMIOREG_CAM_ADDR2__CAM_ADDR2__SHIFT 0x0
++#define GFX_MMIOREG_CAM_ADDR2__CAM_ADDR2_MASK 0x000FFFFFL
++//GFX_MMIOREG_CAM_REMAP_ADDR2
++#define GFX_MMIOREG_CAM_REMAP_ADDR2__CAM_REMAP_ADDR2__SHIFT 0x0
++#define GFX_MMIOREG_CAM_REMAP_ADDR2__CAM_REMAP_ADDR2_MASK 0x000FFFFFL
++//GFX_MMIOREG_CAM_ADDR3
++#define GFX_MMIOREG_CAM_ADDR3__CAM_ADDR3__SHIFT 0x0
++#define GFX_MMIOREG_CAM_ADDR3__CAM_ADDR3_MASK 0x000FFFFFL
++//GFX_MMIOREG_CAM_REMAP_ADDR3
++#define GFX_MMIOREG_CAM_REMAP_ADDR3__CAM_REMAP_ADDR3__SHIFT 0x0
++#define GFX_MMIOREG_CAM_REMAP_ADDR3__CAM_REMAP_ADDR3_MASK 0x000FFFFFL
++//GFX_MMIOREG_CAM_ADDR4
++#define GFX_MMIOREG_CAM_ADDR4__CAM_ADDR4__SHIFT 0x0
++#define GFX_MMIOREG_CAM_ADDR4__CAM_ADDR4_MASK 0x000FFFFFL
++//GFX_MMIOREG_CAM_REMAP_ADDR4
++#define GFX_MMIOREG_CAM_REMAP_ADDR4__CAM_REMAP_ADDR4__SHIFT 0x0
++#define GFX_MMIOREG_CAM_REMAP_ADDR4__CAM_REMAP_ADDR4_MASK 0x000FFFFFL
++//GFX_MMIOREG_CAM_ADDR5
++#define GFX_MMIOREG_CAM_ADDR5__CAM_ADDR5__SHIFT 0x0
++#define GFX_MMIOREG_CAM_ADDR5__CAM_ADDR5_MASK 0x000FFFFFL
++//GFX_MMIOREG_CAM_REMAP_ADDR5
++#define GFX_MMIOREG_CAM_REMAP_ADDR5__CAM_REMAP_ADDR5__SHIFT 0x0
++#define GFX_MMIOREG_CAM_REMAP_ADDR5__CAM_REMAP_ADDR5_MASK 0x000FFFFFL
++//GFX_MMIOREG_CAM_ADDR6
++#define GFX_MMIOREG_CAM_ADDR6__CAM_ADDR6__SHIFT 0x0
++#define GFX_MMIOREG_CAM_ADDR6__CAM_ADDR6_MASK 0x000FFFFFL
++//GFX_MMIOREG_CAM_REMAP_ADDR6
++#define GFX_MMIOREG_CAM_REMAP_ADDR6__CAM_REMAP_ADDR6__SHIFT 0x0
++#define GFX_MMIOREG_CAM_REMAP_ADDR6__CAM_REMAP_ADDR6_MASK 0x000FFFFFL
++//GFX_MMIOREG_CAM_ADDR7
++#define GFX_MMIOREG_CAM_ADDR7__CAM_ADDR7__SHIFT 0x0
++#define GFX_MMIOREG_CAM_ADDR7__CAM_ADDR7_MASK 0x000FFFFFL
++//GFX_MMIOREG_CAM_REMAP_ADDR7
++#define GFX_MMIOREG_CAM_REMAP_ADDR7__CAM_REMAP_ADDR7__SHIFT 0x0
++#define GFX_MMIOREG_CAM_REMAP_ADDR7__CAM_REMAP_ADDR7_MASK 0x000FFFFFL
++//GFX_MMIOREG_CAM_CNTL
++#define GFX_MMIOREG_CAM_CNTL__CAM_ENABLE__SHIFT 0x0
++#define GFX_MMIOREG_CAM_CNTL__CAM_ENABLE_MASK 0x000000FFL
++//GFX_MMIOREG_CAM_ZERO_CPL
++#define GFX_MMIOREG_CAM_ZERO_CPL__CAM_ZERO_CPL__SHIFT 0x0
++#define GFX_MMIOREG_CAM_ZERO_CPL__CAM_ZERO_CPL_MASK 0xFFFFFFFFL
++//GFX_MMIOREG_CAM_ONE_CPL
++#define GFX_MMIOREG_CAM_ONE_CPL__CAM_ONE_CPL__SHIFT 0x0
++#define GFX_MMIOREG_CAM_ONE_CPL__CAM_ONE_CPL_MASK 0xFFFFFFFFL
++//GFX_MMIOREG_CAM_PROGRAMMABLE_CPL
++#define GFX_MMIOREG_CAM_PROGRAMMABLE_CPL__CAM_PROGRAMMABLE_CPL__SHIFT 0x0
++#define GFX_MMIOREG_CAM_PROGRAMMABLE_CPL__CAM_PROGRAMMABLE_CPL_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_syshub_mmreg_syshubdec
++//SYSHUB_INDEX
++#define SYSHUB_INDEX__INDEX__SHIFT 0x0
++#define SYSHUB_INDEX__INDEX_MASK 0xFFFFFFFFL
++//SYSHUB_DATA
++#define SYSHUB_DATA__DATA__SHIFT 0x0
++#define SYSHUB_DATA__DATA_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_rcc_strap_BIFDEC1
++//RCC_DEV0_EPF0_STRAP0
++#define RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0__SHIFT 0x0
++#define RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0__SHIFT 0x10
++#define RCC_DEV0_EPF0_STRAP0__STRAP_MINOR_REV_ID_DEV0_F0__SHIFT 0x14
++#define RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT 0x18
++#define RCC_DEV0_EPF0_STRAP0__STRAP_FUNC_EN_DEV0_F0__SHIFT 0x1c
++#define RCC_DEV0_EPF0_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F0__SHIFT 0x1d
++#define RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0__SHIFT 0x1e
++#define RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0__SHIFT 0x1f
++#define RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0_MASK 0x0000FFFFL
++#define RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0_MASK 0x000F0000L
++#define RCC_DEV0_EPF0_STRAP0__STRAP_MINOR_REV_ID_DEV0_F0_MASK 0x00F00000L
++#define RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK 0x0F000000L
++#define RCC_DEV0_EPF0_STRAP0__STRAP_FUNC_EN_DEV0_F0_MASK 0x10000000L
++#define RCC_DEV0_EPF0_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F0_MASK 0x20000000L
++#define RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0_MASK 0x40000000L
++#define RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0_MASK 0x80000000L
++
++
++// addressBlock: nbio_nbif0_rcc_ep_dev0_BIFDEC1
++//EP_PCIE_SCRATCH
++#define EP_PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0
++#define EP_PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL
++//EP_PCIE_CNTL
++#define EP_PCIE_CNTL__UR_ERR_REPORT_DIS__SHIFT 0x7
++#define EP_PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS__SHIFT 0x8
++#define EP_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e
++#define EP_PCIE_CNTL__UR_ERR_REPORT_DIS_MASK 0x00000080L
++#define EP_PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS_MASK 0x00000100L
++#define EP_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L
++//EP_PCIE_INT_CNTL
++#define EP_PCIE_INT_CNTL__CORR_ERR_INT_EN__SHIFT 0x0
++#define EP_PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN__SHIFT 0x1
++#define EP_PCIE_INT_CNTL__FATAL_ERR_INT_EN__SHIFT 0x2
++#define EP_PCIE_INT_CNTL__USR_DETECTED_INT_EN__SHIFT 0x3
++#define EP_PCIE_INT_CNTL__MISC_ERR_INT_EN__SHIFT 0x4
++#define EP_PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN__SHIFT 0x6
++#define EP_PCIE_INT_CNTL__CORR_ERR_INT_EN_MASK 0x00000001L
++#define EP_PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN_MASK 0x00000002L
++#define EP_PCIE_INT_CNTL__FATAL_ERR_INT_EN_MASK 0x00000004L
++#define EP_PCIE_INT_CNTL__USR_DETECTED_INT_EN_MASK 0x00000008L
++#define EP_PCIE_INT_CNTL__MISC_ERR_INT_EN_MASK 0x00000010L
++#define EP_PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN_MASK 0x00000040L
++//EP_PCIE_INT_STATUS
++#define EP_PCIE_INT_STATUS__CORR_ERR_INT_STATUS__SHIFT 0x0
++#define EP_PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS__SHIFT 0x1
++#define EP_PCIE_INT_STATUS__FATAL_ERR_INT_STATUS__SHIFT 0x2
++#define EP_PCIE_INT_STATUS__USR_DETECTED_INT_STATUS__SHIFT 0x3
++#define EP_PCIE_INT_STATUS__MISC_ERR_INT_STATUS__SHIFT 0x4
++#define EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS__SHIFT 0x6
++#define EP_PCIE_INT_STATUS__CORR_ERR_INT_STATUS_MASK 0x00000001L
++#define EP_PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS_MASK 0x00000002L
++#define EP_PCIE_INT_STATUS__FATAL_ERR_INT_STATUS_MASK 0x00000004L
++#define EP_PCIE_INT_STATUS__USR_DETECTED_INT_STATUS_MASK 0x00000008L
++#define EP_PCIE_INT_STATUS__MISC_ERR_INT_STATUS_MASK 0x00000010L
++#define EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS_MASK 0x00000040L
++//EP_PCIE_RX_CNTL2
++#define EP_PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR__SHIFT 0x0
++#define EP_PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR_MASK 0x00000001L
++//EP_PCIE_BUS_CNTL
++#define EP_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7
++#define EP_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L
++//EP_PCIE_CFG_CNTL
++#define EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0
++#define EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1
++#define EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2
++#define EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG__SHIFT 0x3
++#define EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L
++#define EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L
++#define EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L
++#define EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG_MASK 0x00000008L
++//EP_PCIE_TX_LTR_CNTL
++#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_SHORT_VALUE__SHIFT 0x0
++#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_LONG_VALUE__SHIFT 0x3
++#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_REQUIREMENT__SHIFT 0x6
++#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_SHORT_VALUE__SHIFT 0x7
++#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_LONG_VALUE__SHIFT 0xa
++#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_REQUIREMENT__SHIFT 0xd
++#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0__SHIFT 0xe
++#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN__SHIFT 0xf
++#define EP_PCIE_TX_LTR_CNTL__TX_CHK_FC_FOR_L1__SHIFT 0x10
++#define EP_PCIE_TX_LTR_CNTL__LTR_DSTATE_USING_WDATA_EN__SHIFT 0x11
++#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_SHORT_VALUE_MASK 0x00000007L
++#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_LONG_VALUE_MASK 0x00000038L
++#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_REQUIREMENT_MASK 0x00000040L
++#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_SHORT_VALUE_MASK 0x00000380L
++#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_LONG_VALUE_MASK 0x00001C00L
++#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_REQUIREMENT_MASK 0x00002000L
++#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK 0x00004000L
++#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN_MASK 0x00008000L
++#define EP_PCIE_TX_LTR_CNTL__TX_CHK_FC_FOR_L1_MASK 0x00010000L
++#define EP_PCIE_TX_LTR_CNTL__LTR_DSTATE_USING_WDATA_EN_MASK 0x00020000L
++//PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0
++#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1
++#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2
++#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3
++#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4
++#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5
++#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6
++#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7
++#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//EP_PCIE_F0_DPA_CAP
++#define EP_PCIE_F0_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8
++#define EP_PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc
++#define EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10
++#define EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18
++#define EP_PCIE_F0_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L
++#define EP_PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L
++#define EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L
++#define EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L
++//EP_PCIE_F0_DPA_LATENCY_INDICATOR
++#define EP_PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0
++#define EP_PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0xFFL
++//EP_PCIE_F0_DPA_CNTL
++#define EP_PCIE_F0_DPA_CNTL__SUBSTATE_STATUS__SHIFT 0x0
++#define EP_PCIE_F0_DPA_CNTL__DPA_COMPLIANCE_MODE__SHIFT 0x8
++#define EP_PCIE_F0_DPA_CNTL__SUBSTATE_STATUS_MASK 0x001FL
++#define EP_PCIE_F0_DPA_CNTL__DPA_COMPLIANCE_MODE_MASK 0x0100L
++//PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0
++#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1
++#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2
++#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3
++#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4
++#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5
++#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6
++#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7
++#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//EP_PCIE_PME_CONTROL
++#define EP_PCIE_PME_CONTROL__PME_SERVICE_TIMER__SHIFT 0x0
++#define EP_PCIE_PME_CONTROL__PME_SERVICE_TIMER_MASK 0x1FL
++//EP_PCIEP_RESERVED
++#define EP_PCIEP_RESERVED__PCIEP_RESERVED__SHIFT 0x0
++#define EP_PCIEP_RESERVED__PCIEP_RESERVED_MASK 0xFFFFFFFFL
++//EP_PCIE_TX_CNTL
++#define EP_PCIE_TX_CNTL__TX_SNR_OVERRIDE__SHIFT 0xa
++#define EP_PCIE_TX_CNTL__TX_RO_OVERRIDE__SHIFT 0xc
++#define EP_PCIE_TX_CNTL__TX_F0_TPH_DIS__SHIFT 0x18
++#define EP_PCIE_TX_CNTL__TX_F1_TPH_DIS__SHIFT 0x19
++#define EP_PCIE_TX_CNTL__TX_F2_TPH_DIS__SHIFT 0x1a
++#define EP_PCIE_TX_CNTL__TX_SNR_OVERRIDE_MASK 0x00000C00L
++#define EP_PCIE_TX_CNTL__TX_RO_OVERRIDE_MASK 0x00003000L
++#define EP_PCIE_TX_CNTL__TX_F0_TPH_DIS_MASK 0x01000000L
++#define EP_PCIE_TX_CNTL__TX_F1_TPH_DIS_MASK 0x02000000L
++#define EP_PCIE_TX_CNTL__TX_F2_TPH_DIS_MASK 0x04000000L
++//EP_PCIE_TX_REQUESTER_ID
++#define EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION__SHIFT 0x0
++#define EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE__SHIFT 0x3
++#define EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS__SHIFT 0x8
++#define EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION_MASK 0x00000007L
++#define EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE_MASK 0x000000F8L
++#define EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS_MASK 0x0000FF00L
++//EP_PCIE_ERR_CNTL
++#define EP_PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0
++#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT__SHIFT 0x8
++#define EP_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11
++#define EP_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL__SHIFT 0x12
++#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED__SHIFT 0x18
++#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED__SHIFT 0x19
++#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED__SHIFT 0x1a
++#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F3_TIMER_EXPIRED__SHIFT 0x1b
++#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F4_TIMER_EXPIRED__SHIFT 0x1c
++#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F5_TIMER_EXPIRED__SHIFT 0x1d
++#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F6_TIMER_EXPIRED__SHIFT 0x1e
++#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F7_TIMER_EXPIRED__SHIFT 0x1f
++#define EP_PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L
++#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT_MASK 0x00000700L
++#define EP_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L
++#define EP_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL_MASK 0x00040000L
++#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED_MASK 0x01000000L
++#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED_MASK 0x02000000L
++#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED_MASK 0x04000000L
++#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F3_TIMER_EXPIRED_MASK 0x08000000L
++#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F4_TIMER_EXPIRED_MASK 0x10000000L
++#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F5_TIMER_EXPIRED_MASK 0x20000000L
++#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F6_TIMER_EXPIRED_MASK 0x40000000L
++#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F7_TIMER_EXPIRED_MASK 0x80000000L
++//EP_PCIE_RX_CNTL
++#define EP_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8
++#define EP_PCIE_RX_CNTL__RX_IGNORE_TC_ERR__SHIFT 0x9
++#define EP_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14
++#define EP_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR__SHIFT 0x15
++#define EP_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR__SHIFT 0x16
++#define EP_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR__SHIFT 0x18
++#define EP_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR__SHIFT 0x19
++#define EP_PCIE_RX_CNTL__RX_TPH_DIS__SHIFT 0x1a
++#define EP_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L
++#define EP_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_MASK 0x00000200L
++#define EP_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L
++#define EP_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_MASK 0x00200000L
++#define EP_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR_MASK 0x00400000L
++#define EP_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR_MASK 0x01000000L
++#define EP_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR_MASK 0x02000000L
++#define EP_PCIE_RX_CNTL__RX_TPH_DIS_MASK 0x04000000L
++//EP_PCIE_LC_SPEED_CNTL
++#define EP_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x0
++#define EP_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x1
++#define EP_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP__SHIFT 0x2
++#define EP_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L
++#define EP_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L
++#define EP_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP_MASK 0x00000004L
++
++
++// addressBlock: nbio_nbif0_rcc_dwn_dev0_BIFDEC1
++//DN_PCIE_RESERVED
++#define DN_PCIE_RESERVED__PCIE_RESERVED__SHIFT 0x0
++#define DN_PCIE_RESERVED__PCIE_RESERVED_MASK 0xFFFFFFFFL
++//DN_PCIE_SCRATCH
++#define DN_PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0
++#define DN_PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL
++//DN_PCIE_CNTL
++#define DN_PCIE_CNTL__HWINIT_WR_LOCK__SHIFT 0x0
++#define DN_PCIE_CNTL__UR_ERR_REPORT_DIS_DN__SHIFT 0x7
++#define DN_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e
++#define DN_PCIE_CNTL__HWINIT_WR_LOCK_MASK 0x00000001L
++#define DN_PCIE_CNTL__UR_ERR_REPORT_DIS_DN_MASK 0x00000080L
++#define DN_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L
++//DN_PCIE_CONFIG_CNTL
++#define DN_PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE__SHIFT 0x19
++#define DN_PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE_MASK 0x06000000L
++//DN_PCIE_RX_CNTL2
++#define DN_PCIE_RX_CNTL2__FLR_EXTEND_MODE__SHIFT 0x1c
++#define DN_PCIE_RX_CNTL2__FLR_EXTEND_MODE_MASK 0x70000000L
++//DN_PCIE_BUS_CNTL
++#define DN_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7
++#define DN_PCIE_BUS_CNTL__AER_CPL_TIMEOUT_RO_DIS_SWDN__SHIFT 0x8
++#define DN_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L
++#define DN_PCIE_BUS_CNTL__AER_CPL_TIMEOUT_RO_DIS_SWDN_MASK 0x00000100L
++//DN_PCIE_CFG_CNTL
++#define DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0
++#define DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1
++#define DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2
++#define DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG__SHIFT 0x3
++#define DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L
++#define DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L
++#define DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L
++#define DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG_MASK 0x00000008L
++
++
++// addressBlock: nbio_nbif0_rcc_dwnp_dev0_BIFDEC1
++//PCIE_ERR_CNTL
++#define PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0
++#define PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11
++#define PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L
++#define PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L
++//PCIE_RX_CNTL
++#define PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8
++#define PCIE_RX_CNTL__RX_IGNORE_TC_ERR_DN__SHIFT 0x9
++#define PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14
++#define PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_DN__SHIFT 0x15
++#define PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS__SHIFT 0x1b
++#define PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L
++#define PCIE_RX_CNTL__RX_IGNORE_TC_ERR_DN_MASK 0x00000200L
++#define PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L
++#define PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_DN_MASK 0x00200000L
++#define PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS_MASK 0x08000000L
++//PCIE_LC_SPEED_CNTL
++#define PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x0
++#define PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x1
++#define PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP__SHIFT 0x2
++#define PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L
++#define PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L
++#define PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP_MASK 0x00000004L
++//PCIE_LC_CNTL2
++#define PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS__SHIFT 0x1b
++#define PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS_MASK 0x08000000L
++//PCIEP_STRAP_MISC
++#define PCIEP_STRAP_MISC__STRAP_MULTI_FUNC_EN__SHIFT 0xa
++#define PCIEP_STRAP_MISC__STRAP_MULTI_FUNC_EN_MASK 0x00000400L
++//LTR_MSG_INFO_FROM_EP
++#define LTR_MSG_INFO_FROM_EP__LTR_MSG_INFO_FROM_EP__SHIFT 0x0
++#define LTR_MSG_INFO_FROM_EP__LTR_MSG_INFO_FROM_EP_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_BIFPFVFDEC1[13440..14975]
++//RCC_ERR_LOG
++#define RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
++#define RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
++#define RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
++#define RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
++//RCC_DOORBELL_APER_EN
++#define RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
++#define RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
++//RCC_CONFIG_MEMSIZE
++#define RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
++#define RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
++//RCC_CONFIG_RESERVED
++#define RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
++#define RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
++//RCC_IOV_FUNC_IDENTIFIER
++#define RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
++#define RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
++#define RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
++#define RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_BIFDEC1
++//RCC_ERR_INT_CNTL
++#define RCC_ERR_INT_CNTL__INVALID_REG_ACCESS_IN_SRIOV_INT_EN__SHIFT 0x0
++#define RCC_ERR_INT_CNTL__INVALID_REG_ACCESS_IN_SRIOV_INT_EN_MASK 0x00000001L
++//RCC_BACO_CNTL_MISC
++#define RCC_BACO_CNTL_MISC__BIF_ROM_REQ_DIS__SHIFT 0x0
++#define RCC_BACO_CNTL_MISC__BIF_AZ_REQ_DIS__SHIFT 0x1
++#define RCC_BACO_CNTL_MISC__BIF_ROM_REQ_DIS_MASK 0x00000001L
++#define RCC_BACO_CNTL_MISC__BIF_AZ_REQ_DIS_MASK 0x00000002L
++//RCC_RESET_EN
++#define RCC_RESET_EN__DB_APER_RESET_EN__SHIFT 0xf
++#define RCC_RESET_EN__DB_APER_RESET_EN_MASK 0x00008000L
++//RCC_VDM_SUPPORT
++#define RCC_VDM_SUPPORT__MCTP_SUPPORT__SHIFT 0x0
++#define RCC_VDM_SUPPORT__AMPTP_SUPPORT__SHIFT 0x1
++#define RCC_VDM_SUPPORT__OTHER_VDM_SUPPORT__SHIFT 0x2
++#define RCC_VDM_SUPPORT__ROUTE_TO_RC_CHECK_IN_RCMODE__SHIFT 0x3
++#define RCC_VDM_SUPPORT__ROUTE_BROADCAST_CHECK_IN_RCMODE__SHIFT 0x4
++#define RCC_VDM_SUPPORT__MCTP_SUPPORT_MASK 0x00000001L
++#define RCC_VDM_SUPPORT__AMPTP_SUPPORT_MASK 0x00000002L
++#define RCC_VDM_SUPPORT__OTHER_VDM_SUPPORT_MASK 0x00000004L
++#define RCC_VDM_SUPPORT__ROUTE_TO_RC_CHECK_IN_RCMODE_MASK 0x00000008L
++#define RCC_VDM_SUPPORT__ROUTE_BROADCAST_CHECK_IN_RCMODE_MASK 0x00000010L
++//RCC_MARGIN_PARAM_CNTL0
++#define RCC_MARGIN_PARAM_CNTL0__MARGINING_VOLTAGE_SUPPORTED__SHIFT 0x0
++#define RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_LEFTRIGHT_TIMING__SHIFT 0x1
++#define RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_UPDOWN_VOLTAGE__SHIFT 0x2
++#define RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_ERROR_SAMPLER__SHIFT 0x3
++#define RCC_MARGIN_PARAM_CNTL0__MARGINING_SAMPLE_REPORTING_METHOD__SHIFT 0x4
++#define RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_TIMING_STEPS__SHIFT 0x5
++#define RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_TIMING_OFFSET__SHIFT 0xb
++#define RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_VOLTAGE_STEPS__SHIFT 0x12
++#define RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_VOLTAGE_OFFSET__SHIFT 0x19
++#define RCC_MARGIN_PARAM_CNTL0__MARGINING_VOLTAGE_SUPPORTED_MASK 0x00000001L
++#define RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_LEFTRIGHT_TIMING_MASK 0x00000002L
++#define RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_UPDOWN_VOLTAGE_MASK 0x00000004L
++#define RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_ERROR_SAMPLER_MASK 0x00000008L
++#define RCC_MARGIN_PARAM_CNTL0__MARGINING_SAMPLE_REPORTING_METHOD_MASK 0x00000010L
++#define RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_TIMING_STEPS_MASK 0x000007E0L
++#define RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_TIMING_OFFSET_MASK 0x0003F800L
++#define RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_VOLTAGE_STEPS_MASK 0x01FC0000L
++#define RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_VOLTAGE_OFFSET_MASK 0xFE000000L
++//RCC_MARGIN_PARAM_CNTL1
++#define RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_VOLTAGE__SHIFT 0x0
++#define RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_TIMING__SHIFT 0x6
++#define RCC_MARGIN_PARAM_CNTL1__MARGINING_MAX_LANES__SHIFT 0xc
++#define RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLE_COUNT__SHIFT 0x11
++#define RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_VOLTAGE_MASK 0x0000003FL
++#define RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_TIMING_MASK 0x00000FC0L
++#define RCC_MARGIN_PARAM_CNTL1__MARGINING_MAX_LANES_MASK 0x0001F000L
++#define RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLE_COUNT_MASK 0x00FE0000L
++//RCC_PEER_REG_RANGE0
++#define RCC_PEER_REG_RANGE0__START_ADDR__SHIFT 0x0
++#define RCC_PEER_REG_RANGE0__END_ADDR__SHIFT 0x10
++#define RCC_PEER_REG_RANGE0__START_ADDR_MASK 0x0000FFFFL
++#define RCC_PEER_REG_RANGE0__END_ADDR_MASK 0xFFFF0000L
++//RCC_PEER_REG_RANGE1
++#define RCC_PEER_REG_RANGE1__START_ADDR__SHIFT 0x0
++#define RCC_PEER_REG_RANGE1__END_ADDR__SHIFT 0x10
++#define RCC_PEER_REG_RANGE1__START_ADDR_MASK 0x0000FFFFL
++#define RCC_PEER_REG_RANGE1__END_ADDR_MASK 0xFFFF0000L
++//RCC_BUS_CNTL
++#define RCC_BUS_CNTL__PMI_IO_DIS__SHIFT 0x2
++#define RCC_BUS_CNTL__PMI_MEM_DIS__SHIFT 0x3
++#define RCC_BUS_CNTL__PMI_BM_DIS__SHIFT 0x4
++#define RCC_BUS_CNTL__PMI_IO_DIS_DN__SHIFT 0x5
++#define RCC_BUS_CNTL__PMI_MEM_DIS_DN__SHIFT 0x6
++#define RCC_BUS_CNTL__PMI_IO_DIS_UP__SHIFT 0x7
++#define RCC_BUS_CNTL__PMI_MEM_DIS_UP__SHIFT 0x8
++#define RCC_BUS_CNTL__DN_SEC_SIG_CPLCA_WITH_EP_ERR__SHIFT 0x10
++#define RCC_BUS_CNTL__DN_SEC_RCV_CPLCA_WITH_EP_ERR__SHIFT 0x11
++#define RCC_BUS_CNTL__DN_SEC_RCV_CPLUR_WITH_EP_ERR__SHIFT 0x12
++#define RCC_BUS_CNTL__DN_PRI_SIG_CPLCA_WITH_EP_ERR__SHIFT 0x13
++#define RCC_BUS_CNTL__DN_PRI_RCV_CPLCA_WITH_EP_ERR__SHIFT 0x14
++#define RCC_BUS_CNTL__DN_PRI_RCV_CPLUR_WITH_EP_ERR__SHIFT 0x15
++#define RCC_BUS_CNTL__MAX_PAYLOAD_SIZE_MODE__SHIFT 0x18
++#define RCC_BUS_CNTL__PRIV_MAX_PAYLOAD_SIZE__SHIFT 0x19
++#define RCC_BUS_CNTL__MAX_READ_REQUEST_SIZE_MODE__SHIFT 0x1c
++#define RCC_BUS_CNTL__PRIV_MAX_READ_REQUEST_SIZE__SHIFT 0x1d
++#define RCC_BUS_CNTL__PMI_IO_DIS_MASK 0x00000004L
++#define RCC_BUS_CNTL__PMI_MEM_DIS_MASK 0x00000008L
++#define RCC_BUS_CNTL__PMI_BM_DIS_MASK 0x00000010L
++#define RCC_BUS_CNTL__PMI_IO_DIS_DN_MASK 0x00000020L
++#define RCC_BUS_CNTL__PMI_MEM_DIS_DN_MASK 0x00000040L
++#define RCC_BUS_CNTL__PMI_IO_DIS_UP_MASK 0x00000080L
++#define RCC_BUS_CNTL__PMI_MEM_DIS_UP_MASK 0x00000100L
++#define RCC_BUS_CNTL__DN_SEC_SIG_CPLCA_WITH_EP_ERR_MASK 0x00010000L
++#define RCC_BUS_CNTL__DN_SEC_RCV_CPLCA_WITH_EP_ERR_MASK 0x00020000L
++#define RCC_BUS_CNTL__DN_SEC_RCV_CPLUR_WITH_EP_ERR_MASK 0x00040000L
++#define RCC_BUS_CNTL__DN_PRI_SIG_CPLCA_WITH_EP_ERR_MASK 0x00080000L
++#define RCC_BUS_CNTL__DN_PRI_RCV_CPLCA_WITH_EP_ERR_MASK 0x00100000L
++#define RCC_BUS_CNTL__DN_PRI_RCV_CPLUR_WITH_EP_ERR_MASK 0x00200000L
++#define RCC_BUS_CNTL__MAX_PAYLOAD_SIZE_MODE_MASK 0x01000000L
++#define RCC_BUS_CNTL__PRIV_MAX_PAYLOAD_SIZE_MASK 0x0E000000L
++#define RCC_BUS_CNTL__MAX_READ_REQUEST_SIZE_MODE_MASK 0x10000000L
++#define RCC_BUS_CNTL__PRIV_MAX_READ_REQUEST_SIZE_MASK 0xE0000000L
++//RCC_CONFIG_CNTL
++#define RCC_CONFIG_CNTL__CFG_VGA_RAM_EN__SHIFT 0x0
++#define RCC_CONFIG_CNTL__GENMO_MONO_ADDRESS_B__SHIFT 0x2
++#define RCC_CONFIG_CNTL__GRPH_ADRSEL__SHIFT 0x3
++#define RCC_CONFIG_CNTL__CFG_VGA_RAM_EN_MASK 0x00000001L
++#define RCC_CONFIG_CNTL__GENMO_MONO_ADDRESS_B_MASK 0x00000004L
++#define RCC_CONFIG_CNTL__GRPH_ADRSEL_MASK 0x00000018L
++//RCC_CONFIG_F0_BASE
++#define RCC_CONFIG_F0_BASE__F0_BASE__SHIFT 0x0
++#define RCC_CONFIG_F0_BASE__F0_BASE_MASK 0xFFFFFFFFL
++//RCC_CONFIG_APER_SIZE
++#define RCC_CONFIG_APER_SIZE__APER_SIZE__SHIFT 0x0
++#define RCC_CONFIG_APER_SIZE__APER_SIZE_MASK 0xFFFFFFFFL
++//RCC_CONFIG_REG_APER_SIZE
++#define RCC_CONFIG_REG_APER_SIZE__REG_APER_SIZE__SHIFT 0x0
++#define RCC_CONFIG_REG_APER_SIZE__REG_APER_SIZE_MASK 0x000FFFFFL
++//RCC_XDMA_LO
++#define RCC_XDMA_LO__BIF_XDMA_LOWER_BOUND__SHIFT 0x0
++#define RCC_XDMA_LO__BIF_XDMA_APER_EN__SHIFT 0x1f
++#define RCC_XDMA_LO__BIF_XDMA_LOWER_BOUND_MASK 0x7FFFFFFFL
++#define RCC_XDMA_LO__BIF_XDMA_APER_EN_MASK 0x80000000L
++//RCC_XDMA_HI
++#define RCC_XDMA_HI__BIF_XDMA_UPPER_BOUND__SHIFT 0x0
++#define RCC_XDMA_HI__BIF_XDMA_UPPER_BOUND_MASK 0x7FFFFFFFL
++//RCC_FEATURES_CONTROL_MISC
++#define RCC_FEATURES_CONTROL_MISC__UR_PSN_PKT_REPORT_POISON_DIS__SHIFT 0x4
++#define RCC_FEATURES_CONTROL_MISC__POST_PSN_ONLY_PKT_REPORT_UR_ALL_DIS__SHIFT 0x5
++#define RCC_FEATURES_CONTROL_MISC__POST_PSN_ONLY_PKT_REPORT_UR_PART_DIS__SHIFT 0x6
++#define RCC_FEATURES_CONTROL_MISC__INIT_PFFLR_CRS_RET_DIS__SHIFT 0x7
++#define RCC_FEATURES_CONTROL_MISC__ATC_PRG_RESP_PASID_UR_EN__SHIFT 0x8
++#define RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMRD_UR__SHIFT 0x9
++#define RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMWR_UR__SHIFT 0xa
++#define RCC_FEATURES_CONTROL_MISC__RX_IGNORE_ATSTRANSREQ_UR__SHIFT 0xb
++#define RCC_FEATURES_CONTROL_MISC__RX_IGNORE_PAGEREQMSG_UR__SHIFT 0xc
++#define RCC_FEATURES_CONTROL_MISC__RX_IGNORE_INVCPL_UR__SHIFT 0xd
++#define RCC_FEATURES_CONTROL_MISC__CLR_MSI_X_PENDING_WHEN_DISABLED_DIS__SHIFT 0xe
++#define RCC_FEATURES_CONTROL_MISC__CHECK_BME_ON_PENDING_PKT_GEN_DIS__SHIFT 0xf
++#define RCC_FEATURES_CONTROL_MISC__PSN_CHECK_ON_PAYLOAD_DIS__SHIFT 0x10
++#define RCC_FEATURES_CONTROL_MISC__CLR_MSI_PENDING_ON_MULTIEN_DIS__SHIFT 0x11
++#define RCC_FEATURES_CONTROL_MISC__SET_DEVICE_ERR_FOR_ECRC_EN__SHIFT 0x12
++#define RCC_FEATURES_CONTROL_MISC__HOST_POISON_FLAG_CHECK_FOR_CHAIN_DIS__SHIFT 0x13
++#define RCC_FEATURES_CONTROL_MISC__UR_PSN_PKT_REPORT_POISON_DIS_MASK 0x00000010L
++#define RCC_FEATURES_CONTROL_MISC__POST_PSN_ONLY_PKT_REPORT_UR_ALL_DIS_MASK 0x00000020L
++#define RCC_FEATURES_CONTROL_MISC__POST_PSN_ONLY_PKT_REPORT_UR_PART_DIS_MASK 0x00000040L
++#define RCC_FEATURES_CONTROL_MISC__INIT_PFFLR_CRS_RET_DIS_MASK 0x00000080L
++#define RCC_FEATURES_CONTROL_MISC__ATC_PRG_RESP_PASID_UR_EN_MASK 0x00000100L
++#define RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMRD_UR_MASK 0x00000200L
++#define RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMWR_UR_MASK 0x00000400L
++#define RCC_FEATURES_CONTROL_MISC__RX_IGNORE_ATSTRANSREQ_UR_MASK 0x00000800L
++#define RCC_FEATURES_CONTROL_MISC__RX_IGNORE_PAGEREQMSG_UR_MASK 0x00001000L
++#define RCC_FEATURES_CONTROL_MISC__RX_IGNORE_INVCPL_UR_MASK 0x00002000L
++#define RCC_FEATURES_CONTROL_MISC__CLR_MSI_X_PENDING_WHEN_DISABLED_DIS_MASK 0x00004000L
++#define RCC_FEATURES_CONTROL_MISC__CHECK_BME_ON_PENDING_PKT_GEN_DIS_MASK 0x00008000L
++#define RCC_FEATURES_CONTROL_MISC__PSN_CHECK_ON_PAYLOAD_DIS_MASK 0x00010000L
++#define RCC_FEATURES_CONTROL_MISC__CLR_MSI_PENDING_ON_MULTIEN_DIS_MASK 0x00020000L
++#define RCC_FEATURES_CONTROL_MISC__SET_DEVICE_ERR_FOR_ECRC_EN_MASK 0x00040000L
++#define RCC_FEATURES_CONTROL_MISC__HOST_POISON_FLAG_CHECK_FOR_CHAIN_DIS_MASK 0x00080000L
++//RCC_BUSNUM_CNTL1
++#define RCC_BUSNUM_CNTL1__ID_MASK__SHIFT 0x0
++#define RCC_BUSNUM_CNTL1__ID_MASK_MASK 0x000000FFL
++//RCC_BUSNUM_LIST0
++#define RCC_BUSNUM_LIST0__ID0__SHIFT 0x0
++#define RCC_BUSNUM_LIST0__ID1__SHIFT 0x8
++#define RCC_BUSNUM_LIST0__ID2__SHIFT 0x10
++#define RCC_BUSNUM_LIST0__ID3__SHIFT 0x18
++#define RCC_BUSNUM_LIST0__ID0_MASK 0x000000FFL
++#define RCC_BUSNUM_LIST0__ID1_MASK 0x0000FF00L
++#define RCC_BUSNUM_LIST0__ID2_MASK 0x00FF0000L
++#define RCC_BUSNUM_LIST0__ID3_MASK 0xFF000000L
++//RCC_BUSNUM_LIST1
++#define RCC_BUSNUM_LIST1__ID4__SHIFT 0x0
++#define RCC_BUSNUM_LIST1__ID5__SHIFT 0x8
++#define RCC_BUSNUM_LIST1__ID6__SHIFT 0x10
++#define RCC_BUSNUM_LIST1__ID7__SHIFT 0x18
++#define RCC_BUSNUM_LIST1__ID4_MASK 0x000000FFL
++#define RCC_BUSNUM_LIST1__ID5_MASK 0x0000FF00L
++#define RCC_BUSNUM_LIST1__ID6_MASK 0x00FF0000L
++#define RCC_BUSNUM_LIST1__ID7_MASK 0xFF000000L
++//RCC_BUSNUM_CNTL2
++#define RCC_BUSNUM_CNTL2__AUTOUPDATE_SEL__SHIFT 0x0
++#define RCC_BUSNUM_CNTL2__AUTOUPDATE_EN__SHIFT 0x8
++#define RCC_BUSNUM_CNTL2__HDPREG_CNTL__SHIFT 0x10
++#define RCC_BUSNUM_CNTL2__ERROR_MULTIPLE_ID_MATCH__SHIFT 0x11
++#define RCC_BUSNUM_CNTL2__AUTOUPDATE_SEL_MASK 0x000000FFL
++#define RCC_BUSNUM_CNTL2__AUTOUPDATE_EN_MASK 0x00000100L
++#define RCC_BUSNUM_CNTL2__HDPREG_CNTL_MASK 0x00010000L
++#define RCC_BUSNUM_CNTL2__ERROR_MULTIPLE_ID_MATCH_MASK 0x00020000L
++//RCC_CAPTURE_HOST_BUSNUM
++#define RCC_CAPTURE_HOST_BUSNUM__CHECK_EN__SHIFT 0x0
++#define RCC_CAPTURE_HOST_BUSNUM__CHECK_EN_MASK 0x00000001L
++//RCC_HOST_BUSNUM
++#define RCC_HOST_BUSNUM__HOST_ID__SHIFT 0x0
++#define RCC_HOST_BUSNUM__HOST_ID_MASK 0x0000FFFFL
++//RCC_PEER0_FB_OFFSET_HI
++#define RCC_PEER0_FB_OFFSET_HI__PEER0_FB_OFFSET_HI__SHIFT 0x0
++#define RCC_PEER0_FB_OFFSET_HI__PEER0_FB_OFFSET_HI_MASK 0x000FFFFFL
++//RCC_PEER0_FB_OFFSET_LO
++#define RCC_PEER0_FB_OFFSET_LO__PEER0_FB_OFFSET_LO__SHIFT 0x0
++#define RCC_PEER0_FB_OFFSET_LO__PEER0_FB_EN__SHIFT 0x1f
++#define RCC_PEER0_FB_OFFSET_LO__PEER0_FB_OFFSET_LO_MASK 0x000FFFFFL
++#define RCC_PEER0_FB_OFFSET_LO__PEER0_FB_EN_MASK 0x80000000L
++//RCC_PEER1_FB_OFFSET_HI
++#define RCC_PEER1_FB_OFFSET_HI__PEER1_FB_OFFSET_HI__SHIFT 0x0
++#define RCC_PEER1_FB_OFFSET_HI__PEER1_FB_OFFSET_HI_MASK 0x000FFFFFL
++//RCC_PEER1_FB_OFFSET_LO
++#define RCC_PEER1_FB_OFFSET_LO__PEER1_FB_OFFSET_LO__SHIFT 0x0
++#define RCC_PEER1_FB_OFFSET_LO__PEER1_FB_EN__SHIFT 0x1f
++#define RCC_PEER1_FB_OFFSET_LO__PEER1_FB_OFFSET_LO_MASK 0x000FFFFFL
++#define RCC_PEER1_FB_OFFSET_LO__PEER1_FB_EN_MASK 0x80000000L
++//RCC_PEER2_FB_OFFSET_HI
++#define RCC_PEER2_FB_OFFSET_HI__PEER2_FB_OFFSET_HI__SHIFT 0x0
++#define RCC_PEER2_FB_OFFSET_HI__PEER2_FB_OFFSET_HI_MASK 0x000FFFFFL
++//RCC_PEER2_FB_OFFSET_LO
++#define RCC_PEER2_FB_OFFSET_LO__PEER2_FB_OFFSET_LO__SHIFT 0x0
++#define RCC_PEER2_FB_OFFSET_LO__PEER2_FB_EN__SHIFT 0x1f
++#define RCC_PEER2_FB_OFFSET_LO__PEER2_FB_OFFSET_LO_MASK 0x000FFFFFL
++#define RCC_PEER2_FB_OFFSET_LO__PEER2_FB_EN_MASK 0x80000000L
++//RCC_PEER3_FB_OFFSET_HI
++#define RCC_PEER3_FB_OFFSET_HI__PEER3_FB_OFFSET_HI__SHIFT 0x0
++#define RCC_PEER3_FB_OFFSET_HI__PEER3_FB_OFFSET_HI_MASK 0x000FFFFFL
++//RCC_PEER3_FB_OFFSET_LO
++#define RCC_PEER3_FB_OFFSET_LO__PEER3_FB_OFFSET_LO__SHIFT 0x0
++#define RCC_PEER3_FB_OFFSET_LO__PEER3_FB_EN__SHIFT 0x1f
++#define RCC_PEER3_FB_OFFSET_LO__PEER3_FB_OFFSET_LO_MASK 0x000FFFFFL
++#define RCC_PEER3_FB_OFFSET_LO__PEER3_FB_EN_MASK 0x80000000L
++//RCC_CMN_LINK_CNTL
++#define RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L0S_DIS__SHIFT 0x0
++#define RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L1_DIS__SHIFT 0x1
++#define RCC_CMN_LINK_CNTL__BLOCK_PME_ON_LDN_DIS__SHIFT 0x2
++#define RCC_CMN_LINK_CNTL__PM_L1_IDLE_CHECK_DMA_EN__SHIFT 0x3
++#define RCC_CMN_LINK_CNTL__VLINK_IN_L1LTR_TIMER__SHIFT 0x10
++#define RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L0S_DIS_MASK 0x00000001L
++#define RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L1_DIS_MASK 0x00000002L
++#define RCC_CMN_LINK_CNTL__BLOCK_PME_ON_LDN_DIS_MASK 0x00000004L
++#define RCC_CMN_LINK_CNTL__PM_L1_IDLE_CHECK_DMA_EN_MASK 0x00000008L
++#define RCC_CMN_LINK_CNTL__VLINK_IN_L1LTR_TIMER_MASK 0xFFFF0000L
++//RCC_EP_REQUESTERID_RESTORE
++#define RCC_EP_REQUESTERID_RESTORE__EP_REQID_BUS__SHIFT 0x0
++#define RCC_EP_REQUESTERID_RESTORE__EP_REQID_DEV__SHIFT 0x8
++#define RCC_EP_REQUESTERID_RESTORE__EP_REQID_BUS_MASK 0x000000FFL
++#define RCC_EP_REQUESTERID_RESTORE__EP_REQID_DEV_MASK 0x00001F00L
++//RCC_LTR_LSWITCH_CNTL
++#define RCC_LTR_LSWITCH_CNTL__LSWITCH_LATENCY_VALUE__SHIFT 0x0
++#define RCC_LTR_LSWITCH_CNTL__LSWITCH_LATENCY_VALUE_MASK 0x000003FFL
++//RCC_MH_ARB_CNTL
++#define RCC_MH_ARB_CNTL__MH_ARB_MODE__SHIFT 0x0
++#define RCC_MH_ARB_CNTL__MH_ARB_FIX_PRIORITY__SHIFT 0x1
++#define RCC_MH_ARB_CNTL__MH_ARB_MODE_MASK 0x00000001L
++#define RCC_MH_ARB_CNTL__MH_ARB_FIX_PRIORITY_MASK 0x00007FFEL
++
++
++// addressBlock: nbio_nbif0_bif_bx_BIFDEC1
++//BIF_MM_INDACCESS_CNTL
++#define BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS__SHIFT 0x1
++#define BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS_MASK 0x00000002L
++//BUS_CNTL
++#define BUS_CNTL__VGA_REG_COHERENCY_DIS__SHIFT 0x6
++#define BUS_CNTL__VGA_MEM_COHERENCY_DIS__SHIFT 0x7
++#define BUS_CNTL__SET_AZ_TC__SHIFT 0xa
++#define BUS_CNTL__SET_MC_TC__SHIFT 0xd
++#define BUS_CNTL__ZERO_BE_WR_EN__SHIFT 0x10
++#define BUS_CNTL__ZERO_BE_RD_EN__SHIFT 0x11
++#define BUS_CNTL__RD_STALL_IO_WR__SHIFT 0x12
++#define BUS_CNTL__PRECEEDINGWR_STALL_VGA_FB_FLUSH_DIS__SHIFT 0x19
++#define BUS_CNTL__PRECEEDINGWR_STALL_VGA_REG_FLUSH_DIS__SHIFT 0x1a
++#define BUS_CNTL__HDP_REG_FLUSH_VF_MASK_EN__SHIFT 0x1d
++#define BUS_CNTL__VGAFB_ZERO_BE_WR_EN__SHIFT 0x1e
++#define BUS_CNTL__VGAFB_ZERO_BE_RD_EN__SHIFT 0x1f
++#define BUS_CNTL__VGA_REG_COHERENCY_DIS_MASK 0x00000040L
++#define BUS_CNTL__VGA_MEM_COHERENCY_DIS_MASK 0x00000080L
++#define BUS_CNTL__SET_AZ_TC_MASK 0x00001C00L
++#define BUS_CNTL__SET_MC_TC_MASK 0x0000E000L
++#define BUS_CNTL__ZERO_BE_WR_EN_MASK 0x00010000L
++#define BUS_CNTL__ZERO_BE_RD_EN_MASK 0x00020000L
++#define BUS_CNTL__RD_STALL_IO_WR_MASK 0x00040000L
++#define BUS_CNTL__PRECEEDINGWR_STALL_VGA_FB_FLUSH_DIS_MASK 0x02000000L
++#define BUS_CNTL__PRECEEDINGWR_STALL_VGA_REG_FLUSH_DIS_MASK 0x04000000L
++#define BUS_CNTL__HDP_REG_FLUSH_VF_MASK_EN_MASK 0x20000000L
++#define BUS_CNTL__VGAFB_ZERO_BE_WR_EN_MASK 0x40000000L
++#define BUS_CNTL__VGAFB_ZERO_BE_RD_EN_MASK 0x80000000L
++//BIF_SCRATCH0
++#define BIF_SCRATCH0__BIF_SCRATCH0__SHIFT 0x0
++#define BIF_SCRATCH0__BIF_SCRATCH0_MASK 0xFFFFFFFFL
++//BIF_SCRATCH1
++#define BIF_SCRATCH1__BIF_SCRATCH1__SHIFT 0x0
++#define BIF_SCRATCH1__BIF_SCRATCH1_MASK 0xFFFFFFFFL
++//BX_RESET_EN
++#define BX_RESET_EN__RESET_ON_VFENABLE_LOW_EN__SHIFT 0x10
++#define BX_RESET_EN__RESET_ON_VFENABLE_LOW_EN_MASK 0x00010000L
++//MM_CFGREGS_CNTL
++#define MM_CFGREGS_CNTL__MM_CFG_FUNC_SEL__SHIFT 0x0
++#define MM_CFGREGS_CNTL__MM_CFG_DEV_SEL__SHIFT 0x6
++#define MM_CFGREGS_CNTL__MM_WR_TO_CFG_EN__SHIFT 0x1f
++#define MM_CFGREGS_CNTL__MM_CFG_FUNC_SEL_MASK 0x00000007L
++#define MM_CFGREGS_CNTL__MM_CFG_DEV_SEL_MASK 0x000000C0L
++#define MM_CFGREGS_CNTL__MM_WR_TO_CFG_EN_MASK 0x80000000L
++//BX_RESET_CNTL
++#define BX_RESET_CNTL__LINK_TRAIN_EN__SHIFT 0x0
++#define BX_RESET_CNTL__LINK_TRAIN_EN_MASK 0x00000001L
++//INTERRUPT_CNTL
++#define INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE__SHIFT 0x0
++#define INTERRUPT_CNTL__IH_DUMMY_RD_EN__SHIFT 0x1
++#define INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN__SHIFT 0x3
++#define INTERRUPT_CNTL__IH_INTR_DLY_CNTR__SHIFT 0x4
++#define INTERRUPT_CNTL__GEN_IH_INT_EN__SHIFT 0x8
++#define INTERRUPT_CNTL__BIF_RB_REQ_NONSNOOP_EN__SHIFT 0xf
++#define INTERRUPT_CNTL__DUMMYRD_BYPASS_IN_MSI_EN__SHIFT 0x10
++#define INTERRUPT_CNTL__ALWAYS_SEND_INTPKT_AFTER_DUMMYRD_DIS__SHIFT 0x11
++#define INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK 0x00000001L
++#define INTERRUPT_CNTL__IH_DUMMY_RD_EN_MASK 0x00000002L
++#define INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK 0x00000008L
++#define INTERRUPT_CNTL__IH_INTR_DLY_CNTR_MASK 0x000000F0L
++#define INTERRUPT_CNTL__GEN_IH_INT_EN_MASK 0x00000100L
++#define INTERRUPT_CNTL__BIF_RB_REQ_NONSNOOP_EN_MASK 0x00008000L
++#define INTERRUPT_CNTL__DUMMYRD_BYPASS_IN_MSI_EN_MASK 0x00010000L
++#define INTERRUPT_CNTL__ALWAYS_SEND_INTPKT_AFTER_DUMMYRD_DIS_MASK 0x00020000L
++//INTERRUPT_CNTL2
++#define INTERRUPT_CNTL2__IH_DUMMY_RD_ADDR__SHIFT 0x0
++#define INTERRUPT_CNTL2__IH_DUMMY_RD_ADDR_MASK 0xFFFFFFFFL
++//CLKREQB_PAD_CNTL
++#define CLKREQB_PAD_CNTL__CLKREQB_PAD_A__SHIFT 0x0
++#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SEL__SHIFT 0x1
++#define CLKREQB_PAD_CNTL__CLKREQB_PAD_MODE__SHIFT 0x2
++#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SPARE__SHIFT 0x3
++#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN0__SHIFT 0x5
++#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN1__SHIFT 0x6
++#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN2__SHIFT 0x7
++#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN3__SHIFT 0x8
++#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SLEWN__SHIFT 0x9
++#define CLKREQB_PAD_CNTL__CLKREQB_PAD_WAKE__SHIFT 0xa
++#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SCHMEN__SHIFT 0xb
++#define CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL_EN__SHIFT 0xc
++#define CLKREQB_PAD_CNTL__CLKREQB_PAD_Y__SHIFT 0xd
++#define CLKREQB_PAD_CNTL__CLKREQB_PAD_A_MASK 0x00000001L
++#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SEL_MASK 0x00000002L
++#define CLKREQB_PAD_CNTL__CLKREQB_PAD_MODE_MASK 0x00000004L
++#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SPARE_MASK 0x00000018L
++#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN0_MASK 0x00000020L
++#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN1_MASK 0x00000040L
++#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN2_MASK 0x00000080L
++#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN3_MASK 0x00000100L
++#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SLEWN_MASK 0x00000200L
++#define CLKREQB_PAD_CNTL__CLKREQB_PAD_WAKE_MASK 0x00000400L
++#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SCHMEN_MASK 0x00000800L
++#define CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL_EN_MASK 0x00001000L
++#define CLKREQB_PAD_CNTL__CLKREQB_PAD_Y_MASK 0x00002000L
++//BIF_FEATURES_CONTROL_MISC
++#define BIF_FEATURES_CONTROL_MISC__MST_BIF_REQ_EP_DIS__SHIFT 0x0
++#define BIF_FEATURES_CONTROL_MISC__SLV_BIF_CPL_EP_DIS__SHIFT 0x1
++#define BIF_FEATURES_CONTROL_MISC__BIF_SLV_REQ_EP_DIS__SHIFT 0x2
++#define BIF_FEATURES_CONTROL_MISC__BIF_MST_CPL_EP_DIS__SHIFT 0x3
++#define BIF_FEATURES_CONTROL_MISC__BIF_RB_SET_OVERFLOW_EN__SHIFT 0xc
++#define BIF_FEATURES_CONTROL_MISC__ATOMIC_ERR_INT_DIS__SHIFT 0xd
++#define BIF_FEATURES_CONTROL_MISC__BME_HDL_NONVIR_EN__SHIFT 0xf
++#define BIF_FEATURES_CONTROL_MISC__DOORBELL_SELFRING_GPA_APER_CHK_48BIT_ADDR__SHIFT 0x18
++#define BIF_FEATURES_CONTROL_MISC__MST_BIF_REQ_EP_DIS_MASK 0x00000001L
++#define BIF_FEATURES_CONTROL_MISC__SLV_BIF_CPL_EP_DIS_MASK 0x00000002L
++#define BIF_FEATURES_CONTROL_MISC__BIF_SLV_REQ_EP_DIS_MASK 0x00000004L
++#define BIF_FEATURES_CONTROL_MISC__BIF_MST_CPL_EP_DIS_MASK 0x00000008L
++#define BIF_FEATURES_CONTROL_MISC__BIF_RB_SET_OVERFLOW_EN_MASK 0x00001000L
++#define BIF_FEATURES_CONTROL_MISC__ATOMIC_ERR_INT_DIS_MASK 0x00002000L
++#define BIF_FEATURES_CONTROL_MISC__BME_HDL_NONVIR_EN_MASK 0x00008000L
++#define BIF_FEATURES_CONTROL_MISC__DOORBELL_SELFRING_GPA_APER_CHK_48BIT_ADDR_MASK 0x01000000L
++//BIF_DOORBELL_CNTL
++#define BIF_DOORBELL_CNTL__SELF_RING_DIS__SHIFT 0x0
++#define BIF_DOORBELL_CNTL__TRANS_CHECK_DIS__SHIFT 0x1
++#define BIF_DOORBELL_CNTL__UNTRANS_LBACK_EN__SHIFT 0x2
++#define BIF_DOORBELL_CNTL__NON_CONSECUTIVE_BE_ZERO_DIS__SHIFT 0x3
++#define BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN__SHIFT 0x4
++#define BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_DIS__SHIFT 0x18
++#define BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_0__SHIFT 0x19
++#define BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_1__SHIFT 0x1a
++#define BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_2__SHIFT 0x1b
++#define BIF_DOORBELL_CNTL__SELF_RING_DIS_MASK 0x00000001L
++#define BIF_DOORBELL_CNTL__TRANS_CHECK_DIS_MASK 0x00000002L
++#define BIF_DOORBELL_CNTL__UNTRANS_LBACK_EN_MASK 0x00000004L
++#define BIF_DOORBELL_CNTL__NON_CONSECUTIVE_BE_ZERO_DIS_MASK 0x00000008L
++#define BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN_MASK 0x00000010L
++#define BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_DIS_MASK 0x01000000L
++#define BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_0_MASK 0x02000000L
++#define BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_1_MASK 0x04000000L
++#define BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_2_MASK 0x08000000L
++//BIF_DOORBELL_INT_CNTL
++#define BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_STATUS__SHIFT 0x0
++#define BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_STATUS__SHIFT 0x1
++#define BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS__SHIFT 0x2
++#define BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_CLEAR__SHIFT 0x10
++#define BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_CLEAR__SHIFT 0x11
++#define BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR__SHIFT 0x12
++#define BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_DISABLE__SHIFT 0x18
++#define BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_DISABLE__SHIFT 0x19
++#define BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_DISABLE__SHIFT 0x1a
++#define BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_STATUS_MASK 0x00000001L
++#define BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_STATUS_MASK 0x00000002L
++#define BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS_MASK 0x00000004L
++#define BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_CLEAR_MASK 0x00010000L
++#define BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_CLEAR_MASK 0x00020000L
++#define BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR_MASK 0x00040000L
++#define BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_DISABLE_MASK 0x01000000L
++#define BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_DISABLE_MASK 0x02000000L
++#define BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_DISABLE_MASK 0x04000000L
++//BIF_FB_EN
++#define BIF_FB_EN__FB_READ_EN__SHIFT 0x0
++#define BIF_FB_EN__FB_WRITE_EN__SHIFT 0x1
++#define BIF_FB_EN__FB_READ_EN_MASK 0x00000001L
++#define BIF_FB_EN__FB_WRITE_EN_MASK 0x00000002L
++//BIF_BUSY_DELAY_CNTR
++#define BIF_BUSY_DELAY_CNTR__DELAY_CNT__SHIFT 0x0
++#define BIF_BUSY_DELAY_CNTR__DELAY_CNT_MASK 0x0000003FL
++//BIF_MST_TRANS_PENDING_VF
++#define BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING__SHIFT 0x0
++#define BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING_MASK 0x7FFFFFFFL
++//BIF_SLV_TRANS_PENDING_VF
++#define BIF_SLV_TRANS_PENDING_VF__BIF_SLV_TRANS_PENDING__SHIFT 0x0
++#define BIF_SLV_TRANS_PENDING_VF__BIF_SLV_TRANS_PENDING_MASK 0x7FFFFFFFL
++//BACO_CNTL
++#define BACO_CNTL__BACO_EN__SHIFT 0x0
++#define BACO_CNTL__BACO_DUMMY_EN__SHIFT 0x2
++#define BACO_CNTL__BACO_POWER_OFF__SHIFT 0x3
++#define BACO_CNTL__BACO_DSTATE_BYPASS__SHIFT 0x5
++#define BACO_CNTL__BACO_RST_INTR_MASK__SHIFT 0x6
++#define BACO_CNTL__BACO_MODE__SHIFT 0x8
++#define BACO_CNTL__RCU_BIF_CONFIG_DONE__SHIFT 0x9
++#define BACO_CNTL__BACO_AUTO_EXIT__SHIFT 0x1f
++#define BACO_CNTL__BACO_EN_MASK 0x00000001L
++#define BACO_CNTL__BACO_DUMMY_EN_MASK 0x00000004L
++#define BACO_CNTL__BACO_POWER_OFF_MASK 0x00000008L
++#define BACO_CNTL__BACO_DSTATE_BYPASS_MASK 0x00000020L
++#define BACO_CNTL__BACO_RST_INTR_MASK_MASK 0x00000040L
++#define BACO_CNTL__BACO_MODE_MASK 0x00000100L
++#define BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK 0x00000200L
++#define BACO_CNTL__BACO_AUTO_EXIT_MASK 0x80000000L
++//BIF_BACO_EXIT_TIME0
++#define BIF_BACO_EXIT_TIME0__BACO_EXIT_PXEN_CLR_TIMER__SHIFT 0x0
++#define BIF_BACO_EXIT_TIME0__BACO_EXIT_PXEN_CLR_TIMER_MASK 0x000FFFFFL
++//BIF_BACO_EXIT_TIMER1
++#define BIF_BACO_EXIT_TIMER1__BACO_EXIT_SIDEBAND_TIMER__SHIFT 0x0
++#define BIF_BACO_EXIT_TIMER1__BACO_HW_AUTO_FLUSH_EN__SHIFT 0x18
++#define BIF_BACO_EXIT_TIMER1__BACO_HW_EXIT_ENDING_AUTO_BY_RSMU_INTR_CLR__SHIFT 0x19
++#define BIF_BACO_EXIT_TIMER1__BACO_HW_EXIT_DIS__SHIFT 0x1a
++#define BIF_BACO_EXIT_TIMER1__PX_EN_OE_IN_PX_EN_HIGH__SHIFT 0x1b
++#define BIF_BACO_EXIT_TIMER1__PX_EN_OE_IN_PX_EN_LOW__SHIFT 0x1c
++#define BIF_BACO_EXIT_TIMER1__BACO_MODE_SEL__SHIFT 0x1d
++#define BIF_BACO_EXIT_TIMER1__AUTO_BACO_EXIT_CLR_BY_HW_DIS__SHIFT 0x1f
++#define BIF_BACO_EXIT_TIMER1__BACO_EXIT_SIDEBAND_TIMER_MASK 0x000FFFFFL
++#define BIF_BACO_EXIT_TIMER1__BACO_HW_AUTO_FLUSH_EN_MASK 0x01000000L
++#define BIF_BACO_EXIT_TIMER1__BACO_HW_EXIT_ENDING_AUTO_BY_RSMU_INTR_CLR_MASK 0x02000000L
++#define BIF_BACO_EXIT_TIMER1__BACO_HW_EXIT_DIS_MASK 0x04000000L
++#define BIF_BACO_EXIT_TIMER1__PX_EN_OE_IN_PX_EN_HIGH_MASK 0x08000000L
++#define BIF_BACO_EXIT_TIMER1__PX_EN_OE_IN_PX_EN_LOW_MASK 0x10000000L
++#define BIF_BACO_EXIT_TIMER1__BACO_MODE_SEL_MASK 0x60000000L
++#define BIF_BACO_EXIT_TIMER1__AUTO_BACO_EXIT_CLR_BY_HW_DIS_MASK 0x80000000L
++//BIF_BACO_EXIT_TIMER2
++#define BIF_BACO_EXIT_TIMER2__BACO_EXIT_LCLK_BAK_TIMER__SHIFT 0x0
++#define BIF_BACO_EXIT_TIMER2__BACO_EXIT_LCLK_BAK_TIMER_MASK 0x000FFFFFL
++//BIF_BACO_EXIT_TIMER3
++#define BIF_BACO_EXIT_TIMER3__BACO_EXIT_DUMMY_EN_CLR_TIMER__SHIFT 0x0
++#define BIF_BACO_EXIT_TIMER3__BACO_EXIT_DUMMY_EN_CLR_TIMER_MASK 0x000FFFFFL
++//BIF_BACO_EXIT_TIMER4
++#define BIF_BACO_EXIT_TIMER4__BACO_EXIT_BACO_EN_CLR_TIMER__SHIFT 0x0
++#define BIF_BACO_EXIT_TIMER4__BACO_EXIT_BACO_EN_CLR_TIMER_MASK 0x000FFFFFL
++//MEM_TYPE_CNTL
++#define MEM_TYPE_CNTL__BF_MEM_PHY_G5_G3__SHIFT 0x0
++#define MEM_TYPE_CNTL__BF_MEM_PHY_G5_G3_MASK 0x00000001L
++//NBIF_GFX_ADDR_LUT_CNTL
++#define NBIF_GFX_ADDR_LUT_CNTL__LUT_ENABLE__SHIFT 0x0
++#define NBIF_GFX_ADDR_LUT_CNTL__MSI_ADDR_MODE__SHIFT 0x1
++#define NBIF_GFX_ADDR_LUT_CNTL__LUT_ENABLE_MASK 0x00000001L
++#define NBIF_GFX_ADDR_LUT_CNTL__MSI_ADDR_MODE_MASK 0x00000002L
++//NBIF_GFX_ADDR_LUT_0
++#define NBIF_GFX_ADDR_LUT_0__ADDR__SHIFT 0x0
++#define NBIF_GFX_ADDR_LUT_0__ADDR_MASK 0x00FFFFFFL
++//NBIF_GFX_ADDR_LUT_1
++#define NBIF_GFX_ADDR_LUT_1__ADDR__SHIFT 0x0
++#define NBIF_GFX_ADDR_LUT_1__ADDR_MASK 0x00FFFFFFL
++//NBIF_GFX_ADDR_LUT_2
++#define NBIF_GFX_ADDR_LUT_2__ADDR__SHIFT 0x0
++#define NBIF_GFX_ADDR_LUT_2__ADDR_MASK 0x00FFFFFFL
++//NBIF_GFX_ADDR_LUT_3
++#define NBIF_GFX_ADDR_LUT_3__ADDR__SHIFT 0x0
++#define NBIF_GFX_ADDR_LUT_3__ADDR_MASK 0x00FFFFFFL
++//NBIF_GFX_ADDR_LUT_4
++#define NBIF_GFX_ADDR_LUT_4__ADDR__SHIFT 0x0
++#define NBIF_GFX_ADDR_LUT_4__ADDR_MASK 0x00FFFFFFL
++//NBIF_GFX_ADDR_LUT_5
++#define NBIF_GFX_ADDR_LUT_5__ADDR__SHIFT 0x0
++#define NBIF_GFX_ADDR_LUT_5__ADDR_MASK 0x00FFFFFFL
++//NBIF_GFX_ADDR_LUT_6
++#define NBIF_GFX_ADDR_LUT_6__ADDR__SHIFT 0x0
++#define NBIF_GFX_ADDR_LUT_6__ADDR_MASK 0x00FFFFFFL
++//NBIF_GFX_ADDR_LUT_7
++#define NBIF_GFX_ADDR_LUT_7__ADDR__SHIFT 0x0
++#define NBIF_GFX_ADDR_LUT_7__ADDR_MASK 0x00FFFFFFL
++//NBIF_GFX_ADDR_LUT_8
++#define NBIF_GFX_ADDR_LUT_8__ADDR__SHIFT 0x0
++#define NBIF_GFX_ADDR_LUT_8__ADDR_MASK 0x00FFFFFFL
++//NBIF_GFX_ADDR_LUT_9
++#define NBIF_GFX_ADDR_LUT_9__ADDR__SHIFT 0x0
++#define NBIF_GFX_ADDR_LUT_9__ADDR_MASK 0x00FFFFFFL
++//NBIF_GFX_ADDR_LUT_10
++#define NBIF_GFX_ADDR_LUT_10__ADDR__SHIFT 0x0
++#define NBIF_GFX_ADDR_LUT_10__ADDR_MASK 0x00FFFFFFL
++//NBIF_GFX_ADDR_LUT_11
++#define NBIF_GFX_ADDR_LUT_11__ADDR__SHIFT 0x0
++#define NBIF_GFX_ADDR_LUT_11__ADDR_MASK 0x00FFFFFFL
++//NBIF_GFX_ADDR_LUT_12
++#define NBIF_GFX_ADDR_LUT_12__ADDR__SHIFT 0x0
++#define NBIF_GFX_ADDR_LUT_12__ADDR_MASK 0x00FFFFFFL
++//NBIF_GFX_ADDR_LUT_13
++#define NBIF_GFX_ADDR_LUT_13__ADDR__SHIFT 0x0
++#define NBIF_GFX_ADDR_LUT_13__ADDR_MASK 0x00FFFFFFL
++//NBIF_GFX_ADDR_LUT_14
++#define NBIF_GFX_ADDR_LUT_14__ADDR__SHIFT 0x0
++#define NBIF_GFX_ADDR_LUT_14__ADDR_MASK 0x00FFFFFFL
++//NBIF_GFX_ADDR_LUT_15
++#define NBIF_GFX_ADDR_LUT_15__ADDR__SHIFT 0x0
++#define NBIF_GFX_ADDR_LUT_15__ADDR_MASK 0x00FFFFFFL
++//REMAP_HDP_MEM_FLUSH_CNTL
++#define REMAP_HDP_MEM_FLUSH_CNTL__ADDRESS__SHIFT 0x2
++#define REMAP_HDP_MEM_FLUSH_CNTL__ADDRESS_MASK 0x0007FFFCL
++//REMAP_HDP_REG_FLUSH_CNTL
++#define REMAP_HDP_REG_FLUSH_CNTL__ADDRESS__SHIFT 0x2
++#define REMAP_HDP_REG_FLUSH_CNTL__ADDRESS_MASK 0x0007FFFCL
++//BIF_RB_CNTL
++#define BIF_RB_CNTL__RB_ENABLE__SHIFT 0x0
++#define BIF_RB_CNTL__RB_SIZE__SHIFT 0x1
++#define BIF_RB_CNTL__WPTR_WRITEBACK_ENABLE__SHIFT 0x8
++#define BIF_RB_CNTL__WPTR_WRITEBACK_TIMER__SHIFT 0x9
++#define BIF_RB_CNTL__BIF_RB_TRAN__SHIFT 0x11
++#define BIF_RB_CNTL__RB_INTR_FIX_PRIORITY__SHIFT 0x1a
++#define BIF_RB_CNTL__RB_INTR_ARB_MODE__SHIFT 0x1d
++#define BIF_RB_CNTL__RB_RST_BY_FLR_DISABLE__SHIFT 0x1e
++#define BIF_RB_CNTL__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f
++#define BIF_RB_CNTL__RB_ENABLE_MASK 0x00000001L
++#define BIF_RB_CNTL__RB_SIZE_MASK 0x0000003EL
++#define BIF_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK 0x00000100L
++#define BIF_RB_CNTL__WPTR_WRITEBACK_TIMER_MASK 0x00003E00L
++#define BIF_RB_CNTL__BIF_RB_TRAN_MASK 0x00020000L
++#define BIF_RB_CNTL__RB_INTR_FIX_PRIORITY_MASK 0x1C000000L
++#define BIF_RB_CNTL__RB_INTR_ARB_MODE_MASK 0x20000000L
++#define BIF_RB_CNTL__RB_RST_BY_FLR_DISABLE_MASK 0x40000000L
++#define BIF_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
++//BIF_RB_BASE
++#define BIF_RB_BASE__ADDR__SHIFT 0x0
++#define BIF_RB_BASE__ADDR_MASK 0xFFFFFFFFL
++//BIF_RB_RPTR
++#define BIF_RB_RPTR__OFFSET__SHIFT 0x2
++#define BIF_RB_RPTR__OFFSET_MASK 0x0003FFFCL
++//BIF_RB_WPTR
++#define BIF_RB_WPTR__BIF_RB_OVERFLOW__SHIFT 0x0
++#define BIF_RB_WPTR__OFFSET__SHIFT 0x2
++#define BIF_RB_WPTR__BIF_RB_OVERFLOW_MASK 0x00000001L
++#define BIF_RB_WPTR__OFFSET_MASK 0x0003FFFCL
++//BIF_RB_WPTR_ADDR_HI
++#define BIF_RB_WPTR_ADDR_HI__ADDR__SHIFT 0x0
++#define BIF_RB_WPTR_ADDR_HI__ADDR_MASK 0x000000FFL
++//BIF_RB_WPTR_ADDR_LO
++#define BIF_RB_WPTR_ADDR_LO__ADDR__SHIFT 0x2
++#define BIF_RB_WPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//MAILBOX_INDEX
++#define MAILBOX_INDEX__MAILBOX_INDEX__SHIFT 0x0
++#define MAILBOX_INDEX__MAILBOX_INDEX_MASK 0x0000001FL
++//BIF_MP1_INTR_CTRL
++#define BIF_MP1_INTR_CTRL__BACO_EXIT_DONE__SHIFT 0x0
++#define BIF_MP1_INTR_CTRL__BACO_EXIT_DONE_MASK 0x00000001L
++//BIF_UVD_GPUIOV_CFG_SIZE
++#define BIF_UVD_GPUIOV_CFG_SIZE__UVD_GPUIOV_CFG_SIZE__SHIFT 0x0
++#define BIF_UVD_GPUIOV_CFG_SIZE__UVD_GPUIOV_CFG_SIZE_MASK 0x0000000FL
++//BIF_VCE_GPUIOV_CFG_SIZE
++#define BIF_VCE_GPUIOV_CFG_SIZE__VCE_GPUIOV_CFG_SIZE__SHIFT 0x0
++#define BIF_VCE_GPUIOV_CFG_SIZE__VCE_GPUIOV_CFG_SIZE_MASK 0x0000000FL
++//BIF_GFX_SDMA_GPUIOV_CFG_SIZE
++#define BIF_GFX_SDMA_GPUIOV_CFG_SIZE__GFX_SDMA_GPUIOV_CFG_SIZE__SHIFT 0x0
++#define BIF_GFX_SDMA_GPUIOV_CFG_SIZE__GFX_SDMA_GPUIOV_CFG_SIZE_MASK 0x0000000FL
++//BIF_PERSTB_PAD_CNTL
++#define BIF_PERSTB_PAD_CNTL__PERSTB_PAD_CNTL__SHIFT 0x0
++#define BIF_PERSTB_PAD_CNTL__PERSTB_PAD_CNTL_MASK 0x0000FFFFL
++//BIF_PX_EN_PAD_CNTL
++#define BIF_PX_EN_PAD_CNTL__PX_EN_PAD_CNTL__SHIFT 0x0
++#define BIF_PX_EN_PAD_CNTL__PX_EN_PAD_CNTL_MASK 0x000000FFL
++//BIF_REFPADKIN_PAD_CNTL
++#define BIF_REFPADKIN_PAD_CNTL__REFPADKIN_PAD_CNTL__SHIFT 0x0
++#define BIF_REFPADKIN_PAD_CNTL__REFPADKIN_PAD_CNTL_MASK 0x000000FFL
++//BIF_CLKREQB_PAD_CNTL
++#define BIF_CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL__SHIFT 0x0
++#define BIF_CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL_MASK 0x00FFFFFFL
++//BIF_PWRBRK_PAD_CNTL
++#define BIF_PWRBRK_PAD_CNTL__PWRBRK_PAD_CNTL__SHIFT 0x0
++#define BIF_PWRBRK_PAD_CNTL__PWRBRK_PAD_CNTL_MASK 0x000000FFL
++//BIF_WAKEB_PAD_CNTL
++#define BIF_WAKEB_PAD_CNTL__GPIO33_ITXIMPSEL__SHIFT 0x0
++#define BIF_WAKEB_PAD_CNTL__GPIO33_ICTFEN__SHIFT 0x1
++#define BIF_WAKEB_PAD_CNTL__GPIO33_IPD__SHIFT 0x2
++#define BIF_WAKEB_PAD_CNTL__GPIO33_IPU__SHIFT 0x3
++#define BIF_WAKEB_PAD_CNTL__GPIO33_IRXEN__SHIFT 0x4
++#define BIF_WAKEB_PAD_CNTL__GPIO33_IRXSEL0__SHIFT 0x5
++#define BIF_WAKEB_PAD_CNTL__GPIO33_IRXSEL1__SHIFT 0x6
++#define BIF_WAKEB_PAD_CNTL__GPIO33_RESERVED__SHIFT 0x7
++#define BIF_WAKEB_PAD_CNTL__GPIO33_ITXIMPSEL_MASK 0x00000001L
++#define BIF_WAKEB_PAD_CNTL__GPIO33_ICTFEN_MASK 0x00000002L
++#define BIF_WAKEB_PAD_CNTL__GPIO33_IPD_MASK 0x00000004L
++#define BIF_WAKEB_PAD_CNTL__GPIO33_IPU_MASK 0x00000008L
++#define BIF_WAKEB_PAD_CNTL__GPIO33_IRXEN_MASK 0x00000010L
++#define BIF_WAKEB_PAD_CNTL__GPIO33_IRXSEL0_MASK 0x00000020L
++#define BIF_WAKEB_PAD_CNTL__GPIO33_IRXSEL1_MASK 0x00000040L
++#define BIF_WAKEB_PAD_CNTL__GPIO33_RESERVED_MASK 0x00000080L
++
++
++// addressBlock: nbio_nbif0_bif_bx_pf_BIFPFVFDEC1
++//BIF_BME_STATUS
++#define BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
++#define BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
++#define BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
++#define BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
++//BIF_ATOMIC_ERR_LOG
++//DOORBELL_SELFRING_GPA_APER_BASE_HIGH
++#define DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
++#define DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
++//DOORBELL_SELFRING_GPA_APER_BASE_LOW
++#define DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
++#define DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
++//DOORBELL_SELFRING_GPA_APER_CNTL
++#define DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
++#define DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
++#define DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
++#define DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
++#define DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
++#define DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
++//HDP_REG_COHERENCY_FLUSH_CNTL
++#define HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
++#define HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
++//HDP_MEM_COHERENCY_FLUSH_CNTL
++#define HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
++#define HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
++//GPU_HDP_FLUSH_REQ
++#define GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
++#define GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
++#define GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
++#define GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
++#define GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
++#define GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
++#define GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
++#define GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
++#define GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
++#define GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
++#define GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
++#define GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
++#define GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
++#define GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
++#define GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
++#define GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
++#define GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
++#define GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
++#define GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
++#define GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
++#define GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
++#define GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
++#define GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
++#define GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
++//GPU_HDP_FLUSH_DONE
++#define GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
++#define GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
++#define GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
++#define GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
++#define GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
++#define GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
++#define GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
++#define GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
++#define GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
++#define GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
++#define GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
++#define GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
++#define GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
++#define GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
++#define GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
++#define GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
++#define GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
++#define GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
++#define GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
++#define GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
++#define GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
++#define GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
++#define GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
++#define GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
++//BIF_TRANS_PENDING
++#define BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
++#define BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
++#define BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
++#define BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
++//NBIF_GFX_ADDR_LUT_BYPASS
++#define NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
++#define NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
++//MAILBOX_MSGBUF_TRN_DW0
++#define MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
++#define MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//MAILBOX_MSGBUF_TRN_DW1
++#define MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
++#define MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//MAILBOX_MSGBUF_TRN_DW2
++#define MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
++#define MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//MAILBOX_MSGBUF_TRN_DW3
++#define MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
++#define MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//MAILBOX_MSGBUF_RCV_DW0
++#define MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
++#define MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//MAILBOX_MSGBUF_RCV_DW1
++#define MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
++#define MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//MAILBOX_MSGBUF_RCV_DW2
++#define MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
++#define MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//MAILBOX_MSGBUF_RCV_DW3
++#define MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
++#define MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//MAILBOX_CONTROL
++#define MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
++#define MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
++#define MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
++#define MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
++#define MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
++#define MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
++#define MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
++#define MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
++//MAILBOX_INT_CNTL
++#define MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
++#define MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
++#define MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
++#define MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
++//BIF_VMHV_MAILBOX
++#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
++#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
++#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
++#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
++#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
++#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
++#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
++#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
++#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
++#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
++#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
++#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
++#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
++#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
++#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
++#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
++
++
++// addressBlock: nbio_nbif0_gdc_GDCDEC
++//NGDC_SDP_PORT_CTRL
++#define NGDC_SDP_PORT_CTRL__SDP_DISCON_HYSTERESIS__SHIFT 0x0
++#define NGDC_SDP_PORT_CTRL__SDP_DISCON_HYSTERESIS_MASK 0x000000FFL
++//SHUB_REGS_IF_CTL
++#define SHUB_REGS_IF_CTL__SHUB_REGS_DROP_NONPF_MMREGREQ_SETERR_DIS__SHIFT 0x0
++#define SHUB_REGS_IF_CTL__SHUB_REGS_DROP_NONPF_MMREGREQ_SETERR_DIS_MASK 0x00000001L
++//NGDC_MGCG_CTRL
++#define NGDC_MGCG_CTRL__NGDC_MGCG_EN__SHIFT 0x0
++#define NGDC_MGCG_CTRL__NGDC_MGCG_MODE__SHIFT 0x1
++#define NGDC_MGCG_CTRL__NGDC_MGCG_HYSTERESIS__SHIFT 0x2
++#define NGDC_MGCG_CTRL__NGDC_MGCG_HST_DIS__SHIFT 0xa
++#define NGDC_MGCG_CTRL__NGDC_MGCG_DMA_DIS__SHIFT 0xb
++#define NGDC_MGCG_CTRL__NGDC_MGCG_REG_DIS__SHIFT 0xc
++#define NGDC_MGCG_CTRL__NGDC_MGCG_AER_DIS__SHIFT 0xd
++#define NGDC_MGCG_CTRL__NGDC_MGCG_EN_MASK 0x00000001L
++#define NGDC_MGCG_CTRL__NGDC_MGCG_MODE_MASK 0x00000002L
++#define NGDC_MGCG_CTRL__NGDC_MGCG_HYSTERESIS_MASK 0x000003FCL
++#define NGDC_MGCG_CTRL__NGDC_MGCG_HST_DIS_MASK 0x00000400L
++#define NGDC_MGCG_CTRL__NGDC_MGCG_DMA_DIS_MASK 0x00000800L
++#define NGDC_MGCG_CTRL__NGDC_MGCG_REG_DIS_MASK 0x00001000L
++#define NGDC_MGCG_CTRL__NGDC_MGCG_AER_DIS_MASK 0x00002000L
++//NGDC_RESERVED_0
++#define NGDC_RESERVED_0__RESERVED__SHIFT 0x0
++#define NGDC_RESERVED_0__RESERVED_MASK 0xFFFFFFFFL
++//NGDC_RESERVED_1
++#define NGDC_RESERVED_1__RESERVED__SHIFT 0x0
++#define NGDC_RESERVED_1__RESERVED_MASK 0xFFFFFFFFL
++//NGDC_SDP_PORT_CTRL_SOCCLK
++#define NGDC_SDP_PORT_CTRL_SOCCLK__SDP_DISCON_HYSTERESIS_SOCCLK__SHIFT 0x0
++#define NGDC_SDP_PORT_CTRL_SOCCLK__SDP_DISCON_HYSTERESIS_SOCCLK_MASK 0x000000FFL
++//BIF_SDMA0_DOORBELL_RANGE
++#define BIF_SDMA0_DOORBELL_RANGE__OFFSET__SHIFT 0x2
++#define BIF_SDMA0_DOORBELL_RANGE__SIZE__SHIFT 0x10
++#define BIF_SDMA0_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL
++#define BIF_SDMA0_DOORBELL_RANGE__SIZE_MASK 0x001F0000L
++//BIF_SDMA1_DOORBELL_RANGE
++#define BIF_SDMA1_DOORBELL_RANGE__OFFSET__SHIFT 0x2
++#define BIF_SDMA1_DOORBELL_RANGE__SIZE__SHIFT 0x10
++#define BIF_SDMA1_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL
++#define BIF_SDMA1_DOORBELL_RANGE__SIZE_MASK 0x001F0000L
++//BIF_IH_DOORBELL_RANGE
++#define BIF_IH_DOORBELL_RANGE__OFFSET__SHIFT 0x2
++#define BIF_IH_DOORBELL_RANGE__SIZE__SHIFT 0x10
++#define BIF_IH_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL
++#define BIF_IH_DOORBELL_RANGE__SIZE_MASK 0x001F0000L
++//BIF_MMSCH0_DOORBELL_RANGE
++#define BIF_MMSCH0_DOORBELL_RANGE__OFFSET__SHIFT 0x2
++#define BIF_MMSCH0_DOORBELL_RANGE__SIZE__SHIFT 0x10
++#define BIF_MMSCH0_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL
++#define BIF_MMSCH0_DOORBELL_RANGE__SIZE_MASK 0x001F0000L
++//BIF_ACV_DOORBELL_RANGE
++#define BIF_ACV_DOORBELL_RANGE__OFFSET__SHIFT 0x2
++#define BIF_ACV_DOORBELL_RANGE__SIZE__SHIFT 0x10
++#define BIF_ACV_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL
++#define BIF_ACV_DOORBELL_RANGE__SIZE_MASK 0x001F0000L
++//BIF_DOORBELL_FENCE_CNTL
++#define BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_CP_ENABLE__SHIFT 0x0
++#define BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_SDMA0_ENABLE__SHIFT 0x1
++#define BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_SDMA1_ENABLE__SHIFT 0x2
++#define BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_ACV_ENABLE__SHIFT 0x3
++#define BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_ONCE_TRIGGER_DIS__SHIFT 0x10
++#define BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_CP_ENABLE_MASK 0x00000001L
++#define BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_SDMA0_ENABLE_MASK 0x00000002L
++#define BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_SDMA1_ENABLE_MASK 0x00000004L
++#define BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_ACV_ENABLE_MASK 0x00000008L
++#define BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_ONCE_TRIGGER_DIS_MASK 0x00010000L
++//S2A_MISC_CNTL
++#define S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_SDMA0_DIS__SHIFT 0x0
++#define S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_SDMA1_DIS__SHIFT 0x1
++#define S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_CP_DIS__SHIFT 0x2
++#define S2A_MISC_CNTL__AXI_HST_CPL_EP_DIS__SHIFT 0x3
++#define S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_ACV_DIS__SHIFT 0x4
++#define S2A_MISC_CNTL__ATM_ARB_MODE__SHIFT 0x8
++#define S2A_MISC_CNTL__RB_ARB_MODE__SHIFT 0xa
++#define S2A_MISC_CNTL__HSTR_ARB_MODE__SHIFT 0xc
++#define S2A_MISC_CNTL__WRSP_ARB_MODE__SHIFT 0x10
++#define S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_SDMA0_DIS_MASK 0x00000001L
++#define S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_SDMA1_DIS_MASK 0x00000002L
++#define S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_CP_DIS_MASK 0x00000004L
++#define S2A_MISC_CNTL__AXI_HST_CPL_EP_DIS_MASK 0x00000008L
++#define S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_ACV_DIS_MASK 0x00000010L
++#define S2A_MISC_CNTL__ATM_ARB_MODE_MASK 0x00000300L
++#define S2A_MISC_CNTL__RB_ARB_MODE_MASK 0x00000C00L
++#define S2A_MISC_CNTL__HSTR_ARB_MODE_MASK 0x00003000L
++#define S2A_MISC_CNTL__WRSP_ARB_MODE_MASK 0x000F0000L
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_BIFDEC2
++//GFXMSIX_VECT0_ADDR_LO
++#define GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//GFXMSIX_VECT0_ADDR_HI
++#define GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//GFXMSIX_VECT0_MSG_DATA
++#define GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//GFXMSIX_VECT0_CONTROL
++#define GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
++#define GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
++//GFXMSIX_VECT1_ADDR_LO
++#define GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//GFXMSIX_VECT1_ADDR_HI
++#define GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//GFXMSIX_VECT1_MSG_DATA
++#define GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//GFXMSIX_VECT1_CONTROL
++#define GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
++#define GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
++//GFXMSIX_VECT2_ADDR_LO
++#define GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//GFXMSIX_VECT2_ADDR_HI
++#define GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//GFXMSIX_VECT2_MSG_DATA
++#define GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//GFXMSIX_VECT2_CONTROL
++#define GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
++#define GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
++//GFXMSIX_PBA
++#define GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
++#define GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
++#define GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2
++#define GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
++#define GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
++#define GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L
++
++
++// addressBlock: nbio_nbif0_gdc_GDCDEC
++//GDC0_NGDC_SDP_PORT_CTRL
++#define GDC0_NGDC_SDP_PORT_CTRL__SDP_DISCON_HYSTERESIS__SHIFT 0x0
++#define GDC0_NGDC_SDP_PORT_CTRL__SDP_DISCON_HYSTERESIS_MASK 0x000000FFL
++//GDC0_SHUB_REGS_IF_CTL
++#define GDC0_SHUB_REGS_IF_CTL__SHUB_REGS_DROP_NONPF_MMREGREQ_SETERR_DIS__SHIFT 0x0
++#define GDC0_SHUB_REGS_IF_CTL__SHUB_REGS_DROP_NONPF_MMREGREQ_SETERR_DIS_MASK 0x00000001L
++//GDC0_NGDC_MGCG_CTRL
++#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_EN__SHIFT 0x0
++#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_MODE__SHIFT 0x1
++#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_HYSTERESIS__SHIFT 0x2
++#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_HST_DIS__SHIFT 0xa
++#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_DMA_DIS__SHIFT 0xb
++#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_REG_DIS__SHIFT 0xc
++#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_AER_DIS__SHIFT 0xd
++#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_EN_MASK 0x00000001L
++#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_MODE_MASK 0x00000002L
++#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_HYSTERESIS_MASK 0x000003FCL
++#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_HST_DIS_MASK 0x00000400L
++#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_DMA_DIS_MASK 0x00000800L
++#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_REG_DIS_MASK 0x00001000L
++#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_AER_DIS_MASK 0x00002000L
++//GDC0_NGDC_RESERVED_0
++#define GDC0_NGDC_RESERVED_0__RESERVED__SHIFT 0x0
++#define GDC0_NGDC_RESERVED_0__RESERVED_MASK 0xFFFFFFFFL
++//GDC0_NGDC_RESERVED_1
++#define GDC0_NGDC_RESERVED_1__RESERVED__SHIFT 0x0
++#define GDC0_NGDC_RESERVED_1__RESERVED_MASK 0xFFFFFFFFL
++//GDC0_NGDC_SDP_PORT_CTRL_SOCCLK
++#define GDC0_NGDC_SDP_PORT_CTRL_SOCCLK__SDP_DISCON_HYSTERESIS_SOCCLK__SHIFT 0x0
++#define GDC0_NGDC_SDP_PORT_CTRL_SOCCLK__SDP_DISCON_HYSTERESIS_SOCCLK_MASK 0x000000FFL
++//GDC0_BIF_SDMA0_DOORBELL_RANGE
++#define GDC0_BIF_SDMA0_DOORBELL_RANGE__OFFSET__SHIFT 0x2
++#define GDC0_BIF_SDMA0_DOORBELL_RANGE__SIZE__SHIFT 0x10
++#define GDC0_BIF_SDMA0_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL
++#define GDC0_BIF_SDMA0_DOORBELL_RANGE__SIZE_MASK 0x001F0000L
++//GDC0_BIF_SDMA1_DOORBELL_RANGE
++#define GDC0_BIF_SDMA1_DOORBELL_RANGE__OFFSET__SHIFT 0x2
++#define GDC0_BIF_SDMA1_DOORBELL_RANGE__SIZE__SHIFT 0x10
++#define GDC0_BIF_SDMA1_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL
++#define GDC0_BIF_SDMA1_DOORBELL_RANGE__SIZE_MASK 0x001F0000L
++//GDC0_BIF_IH_DOORBELL_RANGE
++#define GDC0_BIF_IH_DOORBELL_RANGE__OFFSET__SHIFT 0x2
++#define GDC0_BIF_IH_DOORBELL_RANGE__SIZE__SHIFT 0x10
++#define GDC0_BIF_IH_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL
++#define GDC0_BIF_IH_DOORBELL_RANGE__SIZE_MASK 0x001F0000L
++//GDC0_BIF_MMSCH0_DOORBELL_RANGE
++#define GDC0_BIF_MMSCH0_DOORBELL_RANGE__OFFSET__SHIFT 0x2
++#define GDC0_BIF_MMSCH0_DOORBELL_RANGE__SIZE__SHIFT 0x10
++#define GDC0_BIF_MMSCH0_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL
++#define GDC0_BIF_MMSCH0_DOORBELL_RANGE__SIZE_MASK 0x001F0000L
++//GDC0_BIF_ACV_DOORBELL_RANGE
++#define GDC0_BIF_ACV_DOORBELL_RANGE__OFFSET__SHIFT 0x2
++#define GDC0_BIF_ACV_DOORBELL_RANGE__SIZE__SHIFT 0x10
++#define GDC0_BIF_ACV_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL
++#define GDC0_BIF_ACV_DOORBELL_RANGE__SIZE_MASK 0x001F0000L
++//GDC0_BIF_DOORBELL_FENCE_CNTL
++#define GDC0_BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_CP_ENABLE__SHIFT 0x0
++#define GDC0_BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_SDMA0_ENABLE__SHIFT 0x1
++#define GDC0_BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_SDMA1_ENABLE__SHIFT 0x2
++#define GDC0_BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_ACV_ENABLE__SHIFT 0x3
++#define GDC0_BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_ONCE_TRIGGER_DIS__SHIFT 0x10
++#define GDC0_BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_CP_ENABLE_MASK 0x00000001L
++#define GDC0_BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_SDMA0_ENABLE_MASK 0x00000002L
++#define GDC0_BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_SDMA1_ENABLE_MASK 0x00000004L
++#define GDC0_BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_ACV_ENABLE_MASK 0x00000008L
++#define GDC0_BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_ONCE_TRIGGER_DIS_MASK 0x00010000L
++//GDC0_S2A_MISC_CNTL
++#define GDC0_S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_SDMA0_DIS__SHIFT 0x0
++#define GDC0_S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_SDMA1_DIS__SHIFT 0x1
++#define GDC0_S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_CP_DIS__SHIFT 0x2
++#define GDC0_S2A_MISC_CNTL__AXI_HST_CPL_EP_DIS__SHIFT 0x3
++#define GDC0_S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_ACV_DIS__SHIFT 0x4
++#define GDC0_S2A_MISC_CNTL__ATM_ARB_MODE__SHIFT 0x8
++#define GDC0_S2A_MISC_CNTL__RB_ARB_MODE__SHIFT 0xa
++#define GDC0_S2A_MISC_CNTL__HSTR_ARB_MODE__SHIFT 0xc
++#define GDC0_S2A_MISC_CNTL__WRSP_ARB_MODE__SHIFT 0x10
++#define GDC0_S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_SDMA0_DIS_MASK 0x00000001L
++#define GDC0_S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_SDMA1_DIS_MASK 0x00000002L
++#define GDC0_S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_CP_DIS_MASK 0x00000004L
++#define GDC0_S2A_MISC_CNTL__AXI_HST_CPL_EP_DIS_MASK 0x00000008L
++#define GDC0_S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_ACV_DIS_MASK 0x00000010L
++#define GDC0_S2A_MISC_CNTL__ATM_ARB_MODE_MASK 0x00000300L
++#define GDC0_S2A_MISC_CNTL__RB_ARB_MODE_MASK 0x00000C00L
++#define GDC0_S2A_MISC_CNTL__HSTR_ARB_MODE_MASK 0x00003000L
++#define GDC0_S2A_MISC_CNTL__WRSP_ARB_MODE_MASK 0x000F0000L
++
++
++// addressBlock: nbio_nbif0_syshub_mmreg_syshubdirect
++//SYSHUB_DS_CTRL_SOCCLK
++#define SYSHUB_DS_CTRL_SOCCLK__HST_CL0_SOCCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x0
++#define SYSHUB_DS_CTRL_SOCCLK__HST_CL1_SOCCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x1
++#define SYSHUB_DS_CTRL_SOCCLK__HST_CL2_SOCCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x2
++#define SYSHUB_DS_CTRL_SOCCLK__HST_CL3_SOCCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x3
++#define SYSHUB_DS_CTRL_SOCCLK__HST_CL4_SOCCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x4
++#define SYSHUB_DS_CTRL_SOCCLK__HST_CL5_SOCCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x5
++#define SYSHUB_DS_CTRL_SOCCLK__HST_CL6_SOCCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x6
++#define SYSHUB_DS_CTRL_SOCCLK__HST_CL7_SOCCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x7
++#define SYSHUB_DS_CTRL_SOCCLK__DMA_CL0_SOCCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x8
++#define SYSHUB_DS_CTRL_SOCCLK__DMA_CL1_SOCCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x9
++#define SYSHUB_DS_CTRL_SOCCLK__SYSHUB_SOCCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x1c
++#define SYSHUB_DS_CTRL_SOCCLK__SYSHUB_SOCCLK_DS_EN__SHIFT 0x1f
++#define SYSHUB_DS_CTRL_SOCCLK__HST_CL0_SOCCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x00000001L
++#define SYSHUB_DS_CTRL_SOCCLK__HST_CL1_SOCCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x00000002L
++#define SYSHUB_DS_CTRL_SOCCLK__HST_CL2_SOCCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x00000004L
++#define SYSHUB_DS_CTRL_SOCCLK__HST_CL3_SOCCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x00000008L
++#define SYSHUB_DS_CTRL_SOCCLK__HST_CL4_SOCCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x00000010L
++#define SYSHUB_DS_CTRL_SOCCLK__HST_CL5_SOCCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x00000020L
++#define SYSHUB_DS_CTRL_SOCCLK__HST_CL6_SOCCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x00000040L
++#define SYSHUB_DS_CTRL_SOCCLK__HST_CL7_SOCCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x00000080L
++#define SYSHUB_DS_CTRL_SOCCLK__DMA_CL0_SOCCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x00000100L
++#define SYSHUB_DS_CTRL_SOCCLK__DMA_CL1_SOCCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x00000200L
++#define SYSHUB_DS_CTRL_SOCCLK__SYSHUB_SOCCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x10000000L
++#define SYSHUB_DS_CTRL_SOCCLK__SYSHUB_SOCCLK_DS_EN_MASK 0x80000000L
++//SYSHUB_DS_CTRL2_SOCCLK
++#define SYSHUB_DS_CTRL2_SOCCLK__SYSHUB_SOCCLK_DS_TIMER__SHIFT 0x0
++#define SYSHUB_DS_CTRL2_SOCCLK__SYSHUB_SOCCLK_DS_TIMER_MASK 0x0000FFFFL
++//SYSHUB_BGEN_ENHANCEMENT_BYPASS_EN_SOCCLK
++#define SYSHUB_BGEN_ENHANCEMENT_BYPASS_EN_SOCCLK__SYSHUB_bgen_socclk_HST_SW0_bypass_en__SHIFT 0x0
++#define SYSHUB_BGEN_ENHANCEMENT_BYPASS_EN_SOCCLK__SYSHUB_bgen_socclk_HST_SW1_bypass_en__SHIFT 0x1
++#define SYSHUB_BGEN_ENHANCEMENT_BYPASS_EN_SOCCLK__SYSHUB_bgen_socclk_HST_SW2_bypass_en__SHIFT 0x2
++#define SYSHUB_BGEN_ENHANCEMENT_BYPASS_EN_SOCCLK__SYSHUB_bgen_socclk_DMA_SW0_bypass_en__SHIFT 0x10
++#define SYSHUB_BGEN_ENHANCEMENT_BYPASS_EN_SOCCLK__SYSHUB_bgen_socclk_HST_SW0_bypass_en_MASK 0x00000001L
++#define SYSHUB_BGEN_ENHANCEMENT_BYPASS_EN_SOCCLK__SYSHUB_bgen_socclk_HST_SW1_bypass_en_MASK 0x00000002L
++#define SYSHUB_BGEN_ENHANCEMENT_BYPASS_EN_SOCCLK__SYSHUB_bgen_socclk_HST_SW2_bypass_en_MASK 0x00000004L
++#define SYSHUB_BGEN_ENHANCEMENT_BYPASS_EN_SOCCLK__SYSHUB_bgen_socclk_DMA_SW0_bypass_en_MASK 0x00010000L
++//SYSHUB_BGEN_ENHANCEMENT_IMM_EN_SOCCLK
++#define SYSHUB_BGEN_ENHANCEMENT_IMM_EN_SOCCLK__SYSHUB_bgen_socclk_HST_SW0_imm_en__SHIFT 0x0
++#define SYSHUB_BGEN_ENHANCEMENT_IMM_EN_SOCCLK__SYSHUB_bgen_socclk_HST_SW1_imm_en__SHIFT 0x1
++#define SYSHUB_BGEN_ENHANCEMENT_IMM_EN_SOCCLK__SYSHUB_bgen_socclk_HST_SW2_imm_en__SHIFT 0x2
++#define SYSHUB_BGEN_ENHANCEMENT_IMM_EN_SOCCLK__SYSHUB_bgen_socclk_DMA_SW0_imm_en__SHIFT 0x10
++#define SYSHUB_BGEN_ENHANCEMENT_IMM_EN_SOCCLK__SYSHUB_bgen_socclk_HST_SW0_imm_en_MASK 0x00000001L
++#define SYSHUB_BGEN_ENHANCEMENT_IMM_EN_SOCCLK__SYSHUB_bgen_socclk_HST_SW1_imm_en_MASK 0x00000002L
++#define SYSHUB_BGEN_ENHANCEMENT_IMM_EN_SOCCLK__SYSHUB_bgen_socclk_HST_SW2_imm_en_MASK 0x00000004L
++#define SYSHUB_BGEN_ENHANCEMENT_IMM_EN_SOCCLK__SYSHUB_bgen_socclk_DMA_SW0_imm_en_MASK 0x00010000L
++//SYSHUB_TRANS_IDLE_SOCCLK
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF0_SOCCLK__SHIFT 0x0
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF1_SOCCLK__SHIFT 0x1
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF2_SOCCLK__SHIFT 0x2
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF3_SOCCLK__SHIFT 0x3
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF4_SOCCLK__SHIFT 0x4
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF5_SOCCLK__SHIFT 0x5
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF6_SOCCLK__SHIFT 0x6
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF7_SOCCLK__SHIFT 0x7
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF8_SOCCLK__SHIFT 0x8
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF9_SOCCLK__SHIFT 0x9
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF10_SOCCLK__SHIFT 0xa
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF11_SOCCLK__SHIFT 0xb
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF12_SOCCLK__SHIFT 0xc
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF13_SOCCLK__SHIFT 0xd
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF14_SOCCLK__SHIFT 0xe
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF15_SOCCLK__SHIFT 0xf
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_PF_SOCCLK__SHIFT 0x10
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF0_SOCCLK_MASK 0x00000001L
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF1_SOCCLK_MASK 0x00000002L
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF2_SOCCLK_MASK 0x00000004L
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF3_SOCCLK_MASK 0x00000008L
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF4_SOCCLK_MASK 0x00000010L
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF5_SOCCLK_MASK 0x00000020L
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF6_SOCCLK_MASK 0x00000040L
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF7_SOCCLK_MASK 0x00000080L
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF8_SOCCLK_MASK 0x00000100L
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF9_SOCCLK_MASK 0x00000200L
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF10_SOCCLK_MASK 0x00000400L
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF11_SOCCLK_MASK 0x00000800L
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF12_SOCCLK_MASK 0x00001000L
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF13_SOCCLK_MASK 0x00002000L
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF14_SOCCLK_MASK 0x00004000L
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF15_SOCCLK_MASK 0x00008000L
++#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_PF_SOCCLK_MASK 0x00010000L
++//SYSHUB_HP_TIMER_SOCCLK
++#define SYSHUB_HP_TIMER_SOCCLK__SYSHUB_HP_TIMER_SOCCLK__SHIFT 0x0
++#define SYSHUB_HP_TIMER_SOCCLK__SYSHUB_HP_TIMER_SOCCLK_MASK 0xFFFFFFFFL
++//SYSHUB_MGCG_CTRL_SOCCLK
++#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_EN_SOCCLK__SHIFT 0x0
++#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_MODE_SOCCLK__SHIFT 0x1
++#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_HYSTERESIS_SOCCLK__SHIFT 0x2
++#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_HST_DIS_SOCCLK__SHIFT 0xa
++#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_DMA_DIS_SOCCLK__SHIFT 0xb
++#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_REG_DIS_SOCCLK__SHIFT 0xc
++#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_AER_DIS_SOCCLK__SHIFT 0xd
++#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_EN_SOCCLK_MASK 0x00000001L
++#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_MODE_SOCCLK_MASK 0x00000002L
++#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_HYSTERESIS_SOCCLK_MASK 0x000003FCL
++#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_HST_DIS_SOCCLK_MASK 0x00000400L
++#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_DMA_DIS_SOCCLK_MASK 0x00000800L
++#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_REG_DIS_SOCCLK_MASK 0x00001000L
++#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_AER_DIS_SOCCLK_MASK 0x00002000L
++//SYSHUB_CPF_DOORBELL_RS_RESET_SOCCLK
++#define SYSHUB_CPF_DOORBELL_RS_RESET_SOCCLK__SYSHUB_CPF_DOORBELL_RS_RESET_SOCCLK__SHIFT 0x0
++#define SYSHUB_CPF_DOORBELL_RS_RESET_SOCCLK__SYSHUB_CPF_DOORBELL_RS_RESET_SOCCLK_MASK 0x00000001L
++//SYSHUB_SCRATCH_SOCCLK
++#define SYSHUB_SCRATCH_SOCCLK__SCRATCH_SOCCLK__SHIFT 0x0
++#define SYSHUB_SCRATCH_SOCCLK__SCRATCH_SOCCLK_MASK 0xFFFFFFFFL
++//SYSHUB_CL_MASK_SOCCLK
++#define SYSHUB_CL_MASK_SOCCLK__MP1DRAM_MASK_DIS_SOCCLK__SHIFT 0x1
++#define SYSHUB_CL_MASK_SOCCLK__MP1_MASK_DIS_SOCCLK__SHIFT 0x2
++#define SYSHUB_CL_MASK_SOCCLK__MP1DRAM_MASK_DIS_SOCCLK_MASK 0x00000002L
++#define SYSHUB_CL_MASK_SOCCLK__MP1_MASK_DIS_SOCCLK_MASK 0x00000004L
++//SYSHUB_HANG_CNTL_SOCCLK
++#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW0_CL0__SHIFT 0x0
++#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW0_CL1__SHIFT 0x1
++#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW1_CL0__SHIFT 0x2
++#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW2_CL0__SHIFT 0x3
++#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW2_CL1__SHIFT 0x4
++#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW2_CL2__SHIFT 0x5
++#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW2_CL3__SHIFT 0x6
++#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW2_CL4__SHIFT 0x7
++#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW0_CL0_MASK 0x00000001L
++#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW0_CL1_MASK 0x00000002L
++#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW1_CL0_MASK 0x00000004L
++#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW2_CL0_MASK 0x00000008L
++#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW2_CL1_MASK 0x00000010L
++#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW2_CL2_MASK 0x00000020L
++#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW2_CL3_MASK 0x00000040L
++#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW2_CL4_MASK 0x00000080L
++//HST_CLK0_SW0_CL0_CNTL
++#define HST_CLK0_SW0_CL0_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0
++#define HST_CLK0_SW0_CL0_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1
++#define HST_CLK0_SW0_CL0_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L
++#define HST_CLK0_SW0_CL0_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L
++//HST_CLK0_SW0_CL1_CNTL
++#define HST_CLK0_SW0_CL1_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0
++#define HST_CLK0_SW0_CL1_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1
++#define HST_CLK0_SW0_CL1_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L
++#define HST_CLK0_SW0_CL1_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L
++//HST_CLK0_SW1_CL0_CNTL
++#define HST_CLK0_SW1_CL0_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0
++#define HST_CLK0_SW1_CL0_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1
++#define HST_CLK0_SW1_CL0_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L
++#define HST_CLK0_SW1_CL0_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L
++//HST_CLK0_SW2_CL0_CNTL
++#define HST_CLK0_SW2_CL0_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0
++#define HST_CLK0_SW2_CL0_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1
++#define HST_CLK0_SW2_CL0_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L
++#define HST_CLK0_SW2_CL0_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L
++//HST_CLK0_SW2_CL1_CNTL
++#define HST_CLK0_SW2_CL1_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0
++#define HST_CLK0_SW2_CL1_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1
++#define HST_CLK0_SW2_CL1_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L
++#define HST_CLK0_SW2_CL1_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L
++//HST_CLK0_SW2_CL2_CNTL
++#define HST_CLK0_SW2_CL2_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0
++#define HST_CLK0_SW2_CL2_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1
++#define HST_CLK0_SW2_CL2_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L
++#define HST_CLK0_SW2_CL2_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L
++//HST_CLK0_SW2_CL3_CNTL
++#define HST_CLK0_SW2_CL3_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0
++#define HST_CLK0_SW2_CL3_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1
++#define HST_CLK0_SW2_CL3_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L
++#define HST_CLK0_SW2_CL3_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L
++//HST_CLK0_SW2_CL4_CNTL
++#define HST_CLK0_SW2_CL4_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0
++#define HST_CLK0_SW2_CL4_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1
++#define HST_CLK0_SW2_CL4_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L
++#define HST_CLK0_SW2_CL4_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L
++//DMA_CLK0_SW0_SYSHUB_QOS_CNTL
++#define DMA_CLK0_SW0_SYSHUB_QOS_CNTL__QOS_CNTL_MODE__SHIFT 0x0
++#define DMA_CLK0_SW0_SYSHUB_QOS_CNTL__QOS_MAX_VALUE__SHIFT 0x1
++#define DMA_CLK0_SW0_SYSHUB_QOS_CNTL__QOS_MIN_VALUE__SHIFT 0x5
++#define DMA_CLK0_SW0_SYSHUB_QOS_CNTL__QOS_CNTL_MODE_MASK 0x00000001L
++#define DMA_CLK0_SW0_SYSHUB_QOS_CNTL__QOS_MAX_VALUE_MASK 0x0000001EL
++#define DMA_CLK0_SW0_SYSHUB_QOS_CNTL__QOS_MIN_VALUE_MASK 0x000001E0L
++//DMA_CLK0_SW0_CL0_CNTL
++#define DMA_CLK0_SW0_CL0_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0
++#define DMA_CLK0_SW0_CL0_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1
++#define DMA_CLK0_SW0_CL0_CNTL__QOS_STATIC_OVERRIDE_EN__SHIFT 0x8
++#define DMA_CLK0_SW0_CL0_CNTL__QOS_STATIC_OVERRIDE_VALUE__SHIFT 0x9
++#define DMA_CLK0_SW0_CL0_CNTL__READ_WRR_WEIGHT__SHIFT 0x10
++#define DMA_CLK0_SW0_CL0_CNTL__WRITE_WRR_WEIGHT__SHIFT 0x18
++#define DMA_CLK0_SW0_CL0_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L
++#define DMA_CLK0_SW0_CL0_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L
++#define DMA_CLK0_SW0_CL0_CNTL__QOS_STATIC_OVERRIDE_EN_MASK 0x00000100L
++#define DMA_CLK0_SW0_CL0_CNTL__QOS_STATIC_OVERRIDE_VALUE_MASK 0x00001E00L
++#define DMA_CLK0_SW0_CL0_CNTL__READ_WRR_WEIGHT_MASK 0x00FF0000L
++#define DMA_CLK0_SW0_CL0_CNTL__WRITE_WRR_WEIGHT_MASK 0xFF000000L
++//DMA_CLK0_SW0_CL1_CNTL
++#define DMA_CLK0_SW0_CL1_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0
++#define DMA_CLK0_SW0_CL1_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1
++#define DMA_CLK0_SW0_CL1_CNTL__QOS_STATIC_OVERRIDE_EN__SHIFT 0x8
++#define DMA_CLK0_SW0_CL1_CNTL__QOS_STATIC_OVERRIDE_VALUE__SHIFT 0x9
++#define DMA_CLK0_SW0_CL1_CNTL__READ_WRR_WEIGHT__SHIFT 0x10
++#define DMA_CLK0_SW0_CL1_CNTL__WRITE_WRR_WEIGHT__SHIFT 0x18
++#define DMA_CLK0_SW0_CL1_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L
++#define DMA_CLK0_SW0_CL1_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L
++#define DMA_CLK0_SW0_CL1_CNTL__QOS_STATIC_OVERRIDE_EN_MASK 0x00000100L
++#define DMA_CLK0_SW0_CL1_CNTL__QOS_STATIC_OVERRIDE_VALUE_MASK 0x00001E00L
++#define DMA_CLK0_SW0_CL1_CNTL__READ_WRR_WEIGHT_MASK 0x00FF0000L
++#define DMA_CLK0_SW0_CL1_CNTL__WRITE_WRR_WEIGHT_MASK 0xFF000000L
++//SYSHUB_DS_CTRL_SHUBCLK
++#define SYSHUB_DS_CTRL_SHUBCLK__SYSHUB_SHUBCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x1c
++#define SYSHUB_DS_CTRL_SHUBCLK__SYSHUB_SHUBCLK_DS_EN__SHIFT 0x1f
++#define SYSHUB_DS_CTRL_SHUBCLK__SYSHUB_SHUBCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x10000000L
++#define SYSHUB_DS_CTRL_SHUBCLK__SYSHUB_SHUBCLK_DS_EN_MASK 0x80000000L
++//SYSHUB_DS_CTRL2_SHUBCLK
++#define SYSHUB_DS_CTRL2_SHUBCLK__SYSHUB_SHUBCLK_DS_TIMER__SHIFT 0x0
++#define SYSHUB_DS_CTRL2_SHUBCLK__SYSHUB_SHUBCLK_DS_TIMER_MASK 0x0000FFFFL
++//SYSHUB_BGEN_ENHANCEMENT_BYPASS_EN_SHUBCLK
++//SYSHUB_BGEN_ENHANCEMENT_IMM_EN_SHUBCLK
++//SYSHUB_MGCG_CTRL_SHUBCLK
++#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_EN_SHUBCLK__SHIFT 0x0
++#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_MODE_SHUBCLK__SHIFT 0x1
++#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_HYSTERESIS_SHUBCLK__SHIFT 0x2
++#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_HST_DIS_SHUBCLK__SHIFT 0xa
++#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_DMA_DIS_SHUBCLK__SHIFT 0xb
++#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_REG_DIS_SHUBCLK__SHIFT 0xc
++#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_AER_DIS_SHUBCLK__SHIFT 0xd
++#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_EN_SHUBCLK_MASK 0x00000001L
++#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_MODE_SHUBCLK_MASK 0x00000002L
++#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_HYSTERESIS_SHUBCLK_MASK 0x000003FCL
++#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_HST_DIS_SHUBCLK_MASK 0x00000400L
++#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_DMA_DIS_SHUBCLK_MASK 0x00000800L
++#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_REG_DIS_SHUBCLK_MASK 0x00001000L
++#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_AER_DIS_SHUBCLK_MASK 0x00002000L
++//SYSHUB_SCRATCH_SHUBCLK
++#define SYSHUB_SCRATCH_SHUBCLK__SCRATCH_SHUBCLK__SHIFT 0x0
++#define SYSHUB_SCRATCH_SHUBCLK__SCRATCH_SHUBCLK_MASK 0xFFFFFFFFL
++//SYSHUB_SELECT_SHUBCLK
++#define SYSHUB_SELECT_SHUBCLK__SELECT_USB0__SHIFT 0x0
++#define SYSHUB_SELECT_SHUBCLK__SELECT_USB1__SHIFT 0x1
++#define SYSHUB_SELECT_SHUBCLK__SELECT_USB0_MASK 0x00000001L
++#define SYSHUB_SELECT_SHUBCLK__SELECT_USB1_MASK 0x00000002L
++//SYSHUB_SCRATCH_LCLK
++#define SYSHUB_SCRATCH_LCLK__SCRATCH_LCLK__SHIFT 0x0
++#define SYSHUB_SCRATCH_LCLK__SCRATCH_LCLK_MASK 0xFFFFFFFFL
++//NIC400_0_ASIB_0_FN_MOD
++#define NIC400_0_ASIB_0_FN_MOD__read_iss_override__SHIFT 0x0
++#define NIC400_0_ASIB_0_FN_MOD__write_iss_override__SHIFT 0x1
++#define NIC400_0_ASIB_0_FN_MOD__read_iss_override_MASK 0x00000001L
++#define NIC400_0_ASIB_0_FN_MOD__write_iss_override_MASK 0x00000002L
++//NIC400_0_AMIB_0_FN_MOD_BM_ISS
++#define NIC400_0_AMIB_0_FN_MOD_BM_ISS__read_iss_override__SHIFT 0x0
++#define NIC400_0_AMIB_0_FN_MOD_BM_ISS__write_iss_override__SHIFT 0x1
++#define NIC400_0_AMIB_0_FN_MOD_BM_ISS__read_iss_override_MASK 0x00000001L
++#define NIC400_0_AMIB_0_FN_MOD_BM_ISS__write_iss_override_MASK 0x00000002L
++//NIC400_0_AMIB_1_FN_MOD_BM_ISS
++#define NIC400_0_AMIB_1_FN_MOD_BM_ISS__read_iss_override__SHIFT 0x0
++#define NIC400_0_AMIB_1_FN_MOD_BM_ISS__write_iss_override__SHIFT 0x1
++#define NIC400_0_AMIB_1_FN_MOD_BM_ISS__read_iss_override_MASK 0x00000001L
++#define NIC400_0_AMIB_1_FN_MOD_BM_ISS__write_iss_override_MASK 0x00000002L
++//NIC400_2_ASIB_0_FN_MOD
++#define NIC400_2_ASIB_0_FN_MOD__read_iss_override__SHIFT 0x0
++#define NIC400_2_ASIB_0_FN_MOD__write_iss_override__SHIFT 0x1
++#define NIC400_2_ASIB_0_FN_MOD__read_iss_override_MASK 0x00000001L
++#define NIC400_2_ASIB_0_FN_MOD__write_iss_override_MASK 0x00000002L
++//NIC400_2_AMIB_0_FN_MOD_BM_ISS
++#define NIC400_2_AMIB_0_FN_MOD_BM_ISS__read_iss_override__SHIFT 0x0
++#define NIC400_2_AMIB_0_FN_MOD_BM_ISS__write_iss_override__SHIFT 0x1
++#define NIC400_2_AMIB_0_FN_MOD_BM_ISS__read_iss_override_MASK 0x00000001L
++#define NIC400_2_AMIB_0_FN_MOD_BM_ISS__write_iss_override_MASK 0x00000002L
++//NIC400_2_AMIB_1_FN_MOD_BM_ISS
++#define NIC400_2_AMIB_1_FN_MOD_BM_ISS__read_iss_override__SHIFT 0x0
++#define NIC400_2_AMIB_1_FN_MOD_BM_ISS__write_iss_override__SHIFT 0x1
++#define NIC400_2_AMIB_1_FN_MOD_BM_ISS__read_iss_override_MASK 0x00000001L
++#define NIC400_2_AMIB_1_FN_MOD_BM_ISS__write_iss_override_MASK 0x00000002L
++//NIC400_2_AMIB_2_FN_MOD_BM_ISS
++#define NIC400_2_AMIB_2_FN_MOD_BM_ISS__read_iss_override__SHIFT 0x0
++#define NIC400_2_AMIB_2_FN_MOD_BM_ISS__write_iss_override__SHIFT 0x1
++#define NIC400_2_AMIB_2_FN_MOD_BM_ISS__read_iss_override_MASK 0x00000001L
++#define NIC400_2_AMIB_2_FN_MOD_BM_ISS__write_iss_override_MASK 0x00000002L
++//NIC400_2_AMIB_3_FN_MOD_BM_ISS
++#define NIC400_2_AMIB_3_FN_MOD_BM_ISS__read_iss_override__SHIFT 0x0
++#define NIC400_2_AMIB_3_FN_MOD_BM_ISS__write_iss_override__SHIFT 0x1
++#define NIC400_2_AMIB_3_FN_MOD_BM_ISS__read_iss_override_MASK 0x00000001L
++#define NIC400_2_AMIB_3_FN_MOD_BM_ISS__write_iss_override_MASK 0x00000002L
++//NIC400_2_AMIB_4_FN_MOD_BM_ISS
++#define NIC400_2_AMIB_4_FN_MOD_BM_ISS__read_iss_override__SHIFT 0x0
++#define NIC400_2_AMIB_4_FN_MOD_BM_ISS__write_iss_override__SHIFT 0x1
++#define NIC400_2_AMIB_4_FN_MOD_BM_ISS__read_iss_override_MASK 0x00000001L
++#define NIC400_2_AMIB_4_FN_MOD_BM_ISS__write_iss_override_MASK 0x00000002L
++//NIC400_3_AMIB_0_FN_MOD_BM_ISS
++#define NIC400_3_AMIB_0_FN_MOD_BM_ISS__read_iss_override__SHIFT 0x0
++#define NIC400_3_AMIB_0_FN_MOD_BM_ISS__write_iss_override__SHIFT 0x1
++#define NIC400_3_AMIB_0_FN_MOD_BM_ISS__read_iss_override_MASK 0x00000001L
++#define NIC400_3_AMIB_0_FN_MOD_BM_ISS__write_iss_override_MASK 0x00000002L
++//NIC400_3_ASIB_0_FN_MOD
++#define NIC400_3_ASIB_0_FN_MOD__read_iss_override__SHIFT 0x0
++#define NIC400_3_ASIB_0_FN_MOD__write_iss_override__SHIFT 0x1
++#define NIC400_3_ASIB_0_FN_MOD__read_iss_override_MASK 0x00000001L
++#define NIC400_3_ASIB_0_FN_MOD__write_iss_override_MASK 0x00000002L
++//NIC400_3_ASIB_0_QOS_CNTL
++#define NIC400_3_ASIB_0_QOS_CNTL__en_aw_rate__SHIFT 0x0
++#define NIC400_3_ASIB_0_QOS_CNTL__en_ar_rate__SHIFT 0x1
++#define NIC400_3_ASIB_0_QOS_CNTL__en_awar_rate__SHIFT 0x2
++#define NIC400_3_ASIB_0_QOS_CNTL__en_aw_fc__SHIFT 0x3
++#define NIC400_3_ASIB_0_QOS_CNTL__en_ar_fc__SHIFT 0x4
++#define NIC400_3_ASIB_0_QOS_CNTL__en_aw_ot__SHIFT 0x5
++#define NIC400_3_ASIB_0_QOS_CNTL__en_ar_ot__SHIFT 0x6
++#define NIC400_3_ASIB_0_QOS_CNTL__en_awar_ot__SHIFT 0x7
++#define NIC400_3_ASIB_0_QOS_CNTL__mode_aw_fc__SHIFT 0x10
++#define NIC400_3_ASIB_0_QOS_CNTL__mode_ar_fc__SHIFT 0x14
++#define NIC400_3_ASIB_0_QOS_CNTL__en_aw_rate_MASK 0x00000001L
++#define NIC400_3_ASIB_0_QOS_CNTL__en_ar_rate_MASK 0x00000002L
++#define NIC400_3_ASIB_0_QOS_CNTL__en_awar_rate_MASK 0x00000004L
++#define NIC400_3_ASIB_0_QOS_CNTL__en_aw_fc_MASK 0x00000008L
++#define NIC400_3_ASIB_0_QOS_CNTL__en_ar_fc_MASK 0x00000010L
++#define NIC400_3_ASIB_0_QOS_CNTL__en_aw_ot_MASK 0x00000020L
++#define NIC400_3_ASIB_0_QOS_CNTL__en_ar_ot_MASK 0x00000040L
++#define NIC400_3_ASIB_0_QOS_CNTL__en_awar_ot_MASK 0x00000080L
++#define NIC400_3_ASIB_0_QOS_CNTL__mode_aw_fc_MASK 0x00010000L
++#define NIC400_3_ASIB_0_QOS_CNTL__mode_ar_fc_MASK 0x00100000L
++//NIC400_3_ASIB_0_MAX_OT
++#define NIC400_3_ASIB_0_MAX_OT__aw_max_otf__SHIFT 0x0
++#define NIC400_3_ASIB_0_MAX_OT__aw_max_oti__SHIFT 0x8
++#define NIC400_3_ASIB_0_MAX_OT__ar_max_otf__SHIFT 0x10
++#define NIC400_3_ASIB_0_MAX_OT__ar_max_oti__SHIFT 0x18
++#define NIC400_3_ASIB_0_MAX_OT__aw_max_otf_MASK 0x000000FFL
++#define NIC400_3_ASIB_0_MAX_OT__aw_max_oti_MASK 0x00003F00L
++#define NIC400_3_ASIB_0_MAX_OT__ar_max_otf_MASK 0x00FF0000L
++#define NIC400_3_ASIB_0_MAX_OT__ar_max_oti_MASK 0x3F000000L
++//NIC400_3_ASIB_0_MAX_COMB_OT
++#define NIC400_3_ASIB_0_MAX_COMB_OT__awar_max_otf__SHIFT 0x0
++#define NIC400_3_ASIB_0_MAX_COMB_OT__awar_max_oti__SHIFT 0x8
++#define NIC400_3_ASIB_0_MAX_COMB_OT__awar_max_otf_MASK 0x000000FFL
++#define NIC400_3_ASIB_0_MAX_COMB_OT__awar_max_oti_MASK 0x00007F00L
++//NIC400_3_ASIB_0_AW_P
++#define NIC400_3_ASIB_0_AW_P__aw_p__SHIFT 0x18
++#define NIC400_3_ASIB_0_AW_P__aw_p_MASK 0xFF000000L
++//NIC400_3_ASIB_0_AW_B
++#define NIC400_3_ASIB_0_AW_B__aw_b__SHIFT 0x0
++#define NIC400_3_ASIB_0_AW_B__aw_b_MASK 0x0000FFFFL
++//NIC400_3_ASIB_0_AW_R
++#define NIC400_3_ASIB_0_AW_R__aw_r__SHIFT 0x14
++#define NIC400_3_ASIB_0_AW_R__aw_r_MASK 0xFFF00000L
++//NIC400_3_ASIB_0_AR_P
++#define NIC400_3_ASIB_0_AR_P__ar_p__SHIFT 0x18
++#define NIC400_3_ASIB_0_AR_P__ar_p_MASK 0xFF000000L
++//NIC400_3_ASIB_0_AR_B
++#define NIC400_3_ASIB_0_AR_B__ar_b__SHIFT 0x0
++#define NIC400_3_ASIB_0_AR_B__ar_b_MASK 0x0000FFFFL
++//NIC400_3_ASIB_0_AR_R
++#define NIC400_3_ASIB_0_AR_R__ar_r__SHIFT 0x14
++#define NIC400_3_ASIB_0_AR_R__ar_r_MASK 0xFFF00000L
++//NIC400_3_ASIB_0_TARGET_FC
++#define NIC400_3_ASIB_0_TARGET_FC__aw_tgt_latency__SHIFT 0x0
++#define NIC400_3_ASIB_0_TARGET_FC__ar_tgt_latency__SHIFT 0x10
++#define NIC400_3_ASIB_0_TARGET_FC__aw_tgt_latency_MASK 0x00000FFFL
++#define NIC400_3_ASIB_0_TARGET_FC__ar_tgt_latency_MASK 0x0FFF0000L
++//NIC400_3_ASIB_0_KI_FC
++#define NIC400_3_ASIB_0_KI_FC__aw_tgt_latency__SHIFT 0x0
++#define NIC400_3_ASIB_0_KI_FC__ar_tgt_latency__SHIFT 0x8
++#define NIC400_3_ASIB_0_KI_FC__aw_tgt_latency_MASK 0x00000007L
++#define NIC400_3_ASIB_0_KI_FC__ar_tgt_latency_MASK 0x00000700L
++//NIC400_3_ASIB_0_QOS_RANGE
++#define NIC400_3_ASIB_0_QOS_RANGE__aw_min_qos__SHIFT 0x0
++#define NIC400_3_ASIB_0_QOS_RANGE__aw_max_qos__SHIFT 0x8
++#define NIC400_3_ASIB_0_QOS_RANGE__ar_min_qos__SHIFT 0x10
++#define NIC400_3_ASIB_0_QOS_RANGE__ar_max_qos__SHIFT 0x18
++#define NIC400_3_ASIB_0_QOS_RANGE__aw_min_qos_MASK 0x0000000FL
++#define NIC400_3_ASIB_0_QOS_RANGE__aw_max_qos_MASK 0x00000F00L
++#define NIC400_3_ASIB_0_QOS_RANGE__ar_min_qos_MASK 0x000F0000L
++#define NIC400_3_ASIB_0_QOS_RANGE__ar_max_qos_MASK 0x0F000000L
++//NIC400_3_ASIB_1_FN_MOD
++#define NIC400_3_ASIB_1_FN_MOD__read_iss_override__SHIFT 0x0
++#define NIC400_3_ASIB_1_FN_MOD__write_iss_override__SHIFT 0x1
++#define NIC400_3_ASIB_1_FN_MOD__read_iss_override_MASK 0x00000001L
++#define NIC400_3_ASIB_1_FN_MOD__write_iss_override_MASK 0x00000002L
++//NIC400_3_ASIB_1_QOS_CNTL
++#define NIC400_3_ASIB_1_QOS_CNTL__en_aw_rate__SHIFT 0x0
++#define NIC400_3_ASIB_1_QOS_CNTL__en_ar_rate__SHIFT 0x1
++#define NIC400_3_ASIB_1_QOS_CNTL__en_awar_rate__SHIFT 0x2
++#define NIC400_3_ASIB_1_QOS_CNTL__en_aw_fc__SHIFT 0x3
++#define NIC400_3_ASIB_1_QOS_CNTL__en_ar_fc__SHIFT 0x4
++#define NIC400_3_ASIB_1_QOS_CNTL__en_aw_ot__SHIFT 0x5
++#define NIC400_3_ASIB_1_QOS_CNTL__en_ar_ot__SHIFT 0x6
++#define NIC400_3_ASIB_1_QOS_CNTL__en_awar_ot__SHIFT 0x7
++#define NIC400_3_ASIB_1_QOS_CNTL__mode_aw_fc__SHIFT 0x10
++#define NIC400_3_ASIB_1_QOS_CNTL__mode_ar_fc__SHIFT 0x14
++#define NIC400_3_ASIB_1_QOS_CNTL__en_aw_rate_MASK 0x00000001L
++#define NIC400_3_ASIB_1_QOS_CNTL__en_ar_rate_MASK 0x00000002L
++#define NIC400_3_ASIB_1_QOS_CNTL__en_awar_rate_MASK 0x00000004L
++#define NIC400_3_ASIB_1_QOS_CNTL__en_aw_fc_MASK 0x00000008L
++#define NIC400_3_ASIB_1_QOS_CNTL__en_ar_fc_MASK 0x00000010L
++#define NIC400_3_ASIB_1_QOS_CNTL__en_aw_ot_MASK 0x00000020L
++#define NIC400_3_ASIB_1_QOS_CNTL__en_ar_ot_MASK 0x00000040L
++#define NIC400_3_ASIB_1_QOS_CNTL__en_awar_ot_MASK 0x00000080L
++#define NIC400_3_ASIB_1_QOS_CNTL__mode_aw_fc_MASK 0x00010000L
++#define NIC400_3_ASIB_1_QOS_CNTL__mode_ar_fc_MASK 0x00100000L
++//NIC400_3_ASIB_1_MAX_OT
++#define NIC400_3_ASIB_1_MAX_OT__aw_max_otf__SHIFT 0x0
++#define NIC400_3_ASIB_1_MAX_OT__aw_max_oti__SHIFT 0x8
++#define NIC400_3_ASIB_1_MAX_OT__ar_max_otf__SHIFT 0x10
++#define NIC400_3_ASIB_1_MAX_OT__ar_max_oti__SHIFT 0x18
++#define NIC400_3_ASIB_1_MAX_OT__aw_max_otf_MASK 0x000000FFL
++#define NIC400_3_ASIB_1_MAX_OT__aw_max_oti_MASK 0x00003F00L
++#define NIC400_3_ASIB_1_MAX_OT__ar_max_otf_MASK 0x00FF0000L
++#define NIC400_3_ASIB_1_MAX_OT__ar_max_oti_MASK 0x3F000000L
++//NIC400_3_ASIB_1_MAX_COMB_OT
++#define NIC400_3_ASIB_1_MAX_COMB_OT__awar_max_otf__SHIFT 0x0
++#define NIC400_3_ASIB_1_MAX_COMB_OT__awar_max_oti__SHIFT 0x8
++#define NIC400_3_ASIB_1_MAX_COMB_OT__awar_max_otf_MASK 0x000000FFL
++#define NIC400_3_ASIB_1_MAX_COMB_OT__awar_max_oti_MASK 0x00007F00L
++//NIC400_3_ASIB_1_AW_P
++#define NIC400_3_ASIB_1_AW_P__aw_p__SHIFT 0x18
++#define NIC400_3_ASIB_1_AW_P__aw_p_MASK 0xFF000000L
++//NIC400_3_ASIB_1_AW_B
++#define NIC400_3_ASIB_1_AW_B__aw_b__SHIFT 0x0
++#define NIC400_3_ASIB_1_AW_B__aw_b_MASK 0x0000FFFFL
++//NIC400_3_ASIB_1_AW_R
++#define NIC400_3_ASIB_1_AW_R__aw_r__SHIFT 0x14
++#define NIC400_3_ASIB_1_AW_R__aw_r_MASK 0xFFF00000L
++//NIC400_3_ASIB_1_AR_P
++#define NIC400_3_ASIB_1_AR_P__ar_p__SHIFT 0x18
++#define NIC400_3_ASIB_1_AR_P__ar_p_MASK 0xFF000000L
++//NIC400_3_ASIB_1_AR_B
++#define NIC400_3_ASIB_1_AR_B__ar_b__SHIFT 0x0
++#define NIC400_3_ASIB_1_AR_B__ar_b_MASK 0x0000FFFFL
++//NIC400_3_ASIB_1_AR_R
++#define NIC400_3_ASIB_1_AR_R__ar_r__SHIFT 0x14
++#define NIC400_3_ASIB_1_AR_R__ar_r_MASK 0xFFF00000L
++//NIC400_3_ASIB_1_TARGET_FC
++#define NIC400_3_ASIB_1_TARGET_FC__aw_tgt_latency__SHIFT 0x0
++#define NIC400_3_ASIB_1_TARGET_FC__ar_tgt_latency__SHIFT 0x10
++#define NIC400_3_ASIB_1_TARGET_FC__aw_tgt_latency_MASK 0x00000FFFL
++#define NIC400_3_ASIB_1_TARGET_FC__ar_tgt_latency_MASK 0x0FFF0000L
++//NIC400_3_ASIB_1_KI_FC
++#define NIC400_3_ASIB_1_KI_FC__aw_tgt_latency__SHIFT 0x0
++#define NIC400_3_ASIB_1_KI_FC__ar_tgt_latency__SHIFT 0x8
++#define NIC400_3_ASIB_1_KI_FC__aw_tgt_latency_MASK 0x00000007L
++#define NIC400_3_ASIB_1_KI_FC__ar_tgt_latency_MASK 0x00000700L
++//NIC400_3_ASIB_1_QOS_RANGE
++#define NIC400_3_ASIB_1_QOS_RANGE__aw_min_qos__SHIFT 0x0
++#define NIC400_3_ASIB_1_QOS_RANGE__aw_max_qos__SHIFT 0x8
++#define NIC400_3_ASIB_1_QOS_RANGE__ar_min_qos__SHIFT 0x10
++#define NIC400_3_ASIB_1_QOS_RANGE__ar_max_qos__SHIFT 0x18
++#define NIC400_3_ASIB_1_QOS_RANGE__aw_min_qos_MASK 0x0000000FL
++#define NIC400_3_ASIB_1_QOS_RANGE__aw_max_qos_MASK 0x00000F00L
++#define NIC400_3_ASIB_1_QOS_RANGE__ar_min_qos_MASK 0x000F0000L
++#define NIC400_3_ASIB_1_QOS_RANGE__ar_max_qos_MASK 0x0F000000L
++
++
++// addressBlock: nbio_nbif0_nbif_sion_SIONDEC
++//SION_CL0_RdRsp_BurstTarget_REG0
++#define SION_CL0_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0__SHIFT 0x0
++#define SION_CL0_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL
++//SION_CL0_RdRsp_BurstTarget_REG1
++#define SION_CL0_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32__SHIFT 0x0
++#define SION_CL0_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL
++//SION_CL0_RdRsp_TimeSlot_REG0
++#define SION_CL0_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0__SHIFT 0x0
++#define SION_CL0_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL
++//SION_CL0_RdRsp_TimeSlot_REG1
++#define SION_CL0_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32__SHIFT 0x0
++#define SION_CL0_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL
++//SION_CL0_WrRsp_BurstTarget_REG0
++#define SION_CL0_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0__SHIFT 0x0
++#define SION_CL0_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL
++//SION_CL0_WrRsp_BurstTarget_REG1
++#define SION_CL0_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32__SHIFT 0x0
++#define SION_CL0_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL
++//SION_CL0_WrRsp_TimeSlot_REG0
++#define SION_CL0_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0__SHIFT 0x0
++#define SION_CL0_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL
++//SION_CL0_WrRsp_TimeSlot_REG1
++#define SION_CL0_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32__SHIFT 0x0
++#define SION_CL0_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL
++//SION_CL0_Req_BurstTarget_REG0
++#define SION_CL0_Req_BurstTarget_REG0__Req_BurstTarget_31_0__SHIFT 0x0
++#define SION_CL0_Req_BurstTarget_REG0__Req_BurstTarget_31_0_MASK 0xFFFFFFFFL
++//SION_CL0_Req_BurstTarget_REG1
++#define SION_CL0_Req_BurstTarget_REG1__Req_BurstTarget_63_32__SHIFT 0x0
++#define SION_CL0_Req_BurstTarget_REG1__Req_BurstTarget_63_32_MASK 0xFFFFFFFFL
++//SION_CL0_Req_TimeSlot_REG0
++#define SION_CL0_Req_TimeSlot_REG0__Req_TimeSlot_31_0__SHIFT 0x0
++#define SION_CL0_Req_TimeSlot_REG0__Req_TimeSlot_31_0_MASK 0xFFFFFFFFL
++//SION_CL0_Req_TimeSlot_REG1
++#define SION_CL0_Req_TimeSlot_REG1__Req_TimeSlot_63_32__SHIFT 0x0
++#define SION_CL0_Req_TimeSlot_REG1__Req_TimeSlot_63_32_MASK 0xFFFFFFFFL
++//SION_CL0_ReqPoolCredit_Alloc_REG0
++#define SION_CL0_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0__SHIFT 0x0
++#define SION_CL0_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
++//SION_CL0_ReqPoolCredit_Alloc_REG1
++#define SION_CL0_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32__SHIFT 0x0
++#define SION_CL0_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
++//SION_CL0_DataPoolCredit_Alloc_REG0
++#define SION_CL0_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0__SHIFT 0x0
++#define SION_CL0_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
++//SION_CL0_DataPoolCredit_Alloc_REG1
++#define SION_CL0_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32__SHIFT 0x0
++#define SION_CL0_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
++//SION_CL0_RdRspPoolCredit_Alloc_REG0
++#define SION_CL0_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0__SHIFT 0x0
++#define SION_CL0_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
++//SION_CL0_RdRspPoolCredit_Alloc_REG1
++#define SION_CL0_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32__SHIFT 0x0
++#define SION_CL0_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
++//SION_CL0_WrRspPoolCredit_Alloc_REG0
++#define SION_CL0_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0__SHIFT 0x0
++#define SION_CL0_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
++//SION_CL0_WrRspPoolCredit_Alloc_REG1
++#define SION_CL0_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32__SHIFT 0x0
++#define SION_CL0_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
++//SION_CL1_RdRsp_BurstTarget_REG0
++#define SION_CL1_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0__SHIFT 0x0
++#define SION_CL1_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL
++//SION_CL1_RdRsp_BurstTarget_REG1
++#define SION_CL1_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32__SHIFT 0x0
++#define SION_CL1_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL
++//SION_CL1_RdRsp_TimeSlot_REG0
++#define SION_CL1_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0__SHIFT 0x0
++#define SION_CL1_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL
++//SION_CL1_RdRsp_TimeSlot_REG1
++#define SION_CL1_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32__SHIFT 0x0
++#define SION_CL1_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL
++//SION_CL1_WrRsp_BurstTarget_REG0
++#define SION_CL1_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0__SHIFT 0x0
++#define SION_CL1_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL
++//SION_CL1_WrRsp_BurstTarget_REG1
++#define SION_CL1_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32__SHIFT 0x0
++#define SION_CL1_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL
++//SION_CL1_WrRsp_TimeSlot_REG0
++#define SION_CL1_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0__SHIFT 0x0
++#define SION_CL1_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL
++//SION_CL1_WrRsp_TimeSlot_REG1
++#define SION_CL1_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32__SHIFT 0x0
++#define SION_CL1_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL
++//SION_CL1_Req_BurstTarget_REG0
++#define SION_CL1_Req_BurstTarget_REG0__Req_BurstTarget_31_0__SHIFT 0x0
++#define SION_CL1_Req_BurstTarget_REG0__Req_BurstTarget_31_0_MASK 0xFFFFFFFFL
++//SION_CL1_Req_BurstTarget_REG1
++#define SION_CL1_Req_BurstTarget_REG1__Req_BurstTarget_63_32__SHIFT 0x0
++#define SION_CL1_Req_BurstTarget_REG1__Req_BurstTarget_63_32_MASK 0xFFFFFFFFL
++//SION_CL1_Req_TimeSlot_REG0
++#define SION_CL1_Req_TimeSlot_REG0__Req_TimeSlot_31_0__SHIFT 0x0
++#define SION_CL1_Req_TimeSlot_REG0__Req_TimeSlot_31_0_MASK 0xFFFFFFFFL
++//SION_CL1_Req_TimeSlot_REG1
++#define SION_CL1_Req_TimeSlot_REG1__Req_TimeSlot_63_32__SHIFT 0x0
++#define SION_CL1_Req_TimeSlot_REG1__Req_TimeSlot_63_32_MASK 0xFFFFFFFFL
++//SION_CL1_ReqPoolCredit_Alloc_REG0
++#define SION_CL1_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0__SHIFT 0x0
++#define SION_CL1_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
++//SION_CL1_ReqPoolCredit_Alloc_REG1
++#define SION_CL1_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32__SHIFT 0x0
++#define SION_CL1_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
++//SION_CL1_DataPoolCredit_Alloc_REG0
++#define SION_CL1_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0__SHIFT 0x0
++#define SION_CL1_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
++//SION_CL1_DataPoolCredit_Alloc_REG1
++#define SION_CL1_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32__SHIFT 0x0
++#define SION_CL1_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
++//SION_CL1_RdRspPoolCredit_Alloc_REG0
++#define SION_CL1_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0__SHIFT 0x0
++#define SION_CL1_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
++//SION_CL1_RdRspPoolCredit_Alloc_REG1
++#define SION_CL1_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32__SHIFT 0x0
++#define SION_CL1_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
++//SION_CL1_WrRspPoolCredit_Alloc_REG0
++#define SION_CL1_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0__SHIFT 0x0
++#define SION_CL1_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
++//SION_CL1_WrRspPoolCredit_Alloc_REG1
++#define SION_CL1_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32__SHIFT 0x0
++#define SION_CL1_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
++//SION_CL2_RdRsp_BurstTarget_REG0
++#define SION_CL2_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0__SHIFT 0x0
++#define SION_CL2_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL
++//SION_CL2_RdRsp_BurstTarget_REG1
++#define SION_CL2_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32__SHIFT 0x0
++#define SION_CL2_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL
++//SION_CL2_RdRsp_TimeSlot_REG0
++#define SION_CL2_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0__SHIFT 0x0
++#define SION_CL2_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL
++//SION_CL2_RdRsp_TimeSlot_REG1
++#define SION_CL2_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32__SHIFT 0x0
++#define SION_CL2_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL
++//SION_CL2_WrRsp_BurstTarget_REG0
++#define SION_CL2_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0__SHIFT 0x0
++#define SION_CL2_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL
++//SION_CL2_WrRsp_BurstTarget_REG1
++#define SION_CL2_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32__SHIFT 0x0
++#define SION_CL2_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL
++//SION_CL2_WrRsp_TimeSlot_REG0
++#define SION_CL2_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0__SHIFT 0x0
++#define SION_CL2_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL
++//SION_CL2_WrRsp_TimeSlot_REG1
++#define SION_CL2_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32__SHIFT 0x0
++#define SION_CL2_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL
++//SION_CL2_Req_BurstTarget_REG0
++#define SION_CL2_Req_BurstTarget_REG0__Req_BurstTarget_31_0__SHIFT 0x0
++#define SION_CL2_Req_BurstTarget_REG0__Req_BurstTarget_31_0_MASK 0xFFFFFFFFL
++//SION_CL2_Req_BurstTarget_REG1
++#define SION_CL2_Req_BurstTarget_REG1__Req_BurstTarget_63_32__SHIFT 0x0
++#define SION_CL2_Req_BurstTarget_REG1__Req_BurstTarget_63_32_MASK 0xFFFFFFFFL
++//SION_CL2_Req_TimeSlot_REG0
++#define SION_CL2_Req_TimeSlot_REG0__Req_TimeSlot_31_0__SHIFT 0x0
++#define SION_CL2_Req_TimeSlot_REG0__Req_TimeSlot_31_0_MASK 0xFFFFFFFFL
++//SION_CL2_Req_TimeSlot_REG1
++#define SION_CL2_Req_TimeSlot_REG1__Req_TimeSlot_63_32__SHIFT 0x0
++#define SION_CL2_Req_TimeSlot_REG1__Req_TimeSlot_63_32_MASK 0xFFFFFFFFL
++//SION_CL2_ReqPoolCredit_Alloc_REG0
++#define SION_CL2_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0__SHIFT 0x0
++#define SION_CL2_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
++//SION_CL2_ReqPoolCredit_Alloc_REG1
++#define SION_CL2_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32__SHIFT 0x0
++#define SION_CL2_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
++//SION_CL2_DataPoolCredit_Alloc_REG0
++#define SION_CL2_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0__SHIFT 0x0
++#define SION_CL2_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
++//SION_CL2_DataPoolCredit_Alloc_REG1
++#define SION_CL2_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32__SHIFT 0x0
++#define SION_CL2_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
++//SION_CL2_RdRspPoolCredit_Alloc_REG0
++#define SION_CL2_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0__SHIFT 0x0
++#define SION_CL2_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
++//SION_CL2_RdRspPoolCredit_Alloc_REG1
++#define SION_CL2_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32__SHIFT 0x0
++#define SION_CL2_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
++//SION_CL2_WrRspPoolCredit_Alloc_REG0
++#define SION_CL2_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0__SHIFT 0x0
++#define SION_CL2_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
++//SION_CL2_WrRspPoolCredit_Alloc_REG1
++#define SION_CL2_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32__SHIFT 0x0
++#define SION_CL2_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
++//SION_CL3_RdRsp_BurstTarget_REG0
++#define SION_CL3_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0__SHIFT 0x0
++#define SION_CL3_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL
++//SION_CL3_RdRsp_BurstTarget_REG1
++#define SION_CL3_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32__SHIFT 0x0
++#define SION_CL3_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL
++//SION_CL3_RdRsp_TimeSlot_REG0
++#define SION_CL3_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0__SHIFT 0x0
++#define SION_CL3_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL
++//SION_CL3_RdRsp_TimeSlot_REG1
++#define SION_CL3_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32__SHIFT 0x0
++#define SION_CL3_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL
++//SION_CL3_WrRsp_BurstTarget_REG0
++#define SION_CL3_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0__SHIFT 0x0
++#define SION_CL3_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL
++//SION_CL3_WrRsp_BurstTarget_REG1
++#define SION_CL3_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32__SHIFT 0x0
++#define SION_CL3_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL
++//SION_CL3_WrRsp_TimeSlot_REG0
++#define SION_CL3_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0__SHIFT 0x0
++#define SION_CL3_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL
++//SION_CL3_WrRsp_TimeSlot_REG1
++#define SION_CL3_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32__SHIFT 0x0
++#define SION_CL3_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL
++//SION_CL3_Req_BurstTarget_REG0
++#define SION_CL3_Req_BurstTarget_REG0__Req_BurstTarget_31_0__SHIFT 0x0
++#define SION_CL3_Req_BurstTarget_REG0__Req_BurstTarget_31_0_MASK 0xFFFFFFFFL
++//SION_CL3_Req_BurstTarget_REG1
++#define SION_CL3_Req_BurstTarget_REG1__Req_BurstTarget_63_32__SHIFT 0x0
++#define SION_CL3_Req_BurstTarget_REG1__Req_BurstTarget_63_32_MASK 0xFFFFFFFFL
++//SION_CL3_Req_TimeSlot_REG0
++#define SION_CL3_Req_TimeSlot_REG0__Req_TimeSlot_31_0__SHIFT 0x0
++#define SION_CL3_Req_TimeSlot_REG0__Req_TimeSlot_31_0_MASK 0xFFFFFFFFL
++//SION_CL3_Req_TimeSlot_REG1
++#define SION_CL3_Req_TimeSlot_REG1__Req_TimeSlot_63_32__SHIFT 0x0
++#define SION_CL3_Req_TimeSlot_REG1__Req_TimeSlot_63_32_MASK 0xFFFFFFFFL
++//SION_CL3_ReqPoolCredit_Alloc_REG0
++#define SION_CL3_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0__SHIFT 0x0
++#define SION_CL3_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
++//SION_CL3_ReqPoolCredit_Alloc_REG1
++#define SION_CL3_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32__SHIFT 0x0
++#define SION_CL3_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
++//SION_CL3_DataPoolCredit_Alloc_REG0
++#define SION_CL3_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0__SHIFT 0x0
++#define SION_CL3_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
++//SION_CL3_DataPoolCredit_Alloc_REG1
++#define SION_CL3_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32__SHIFT 0x0
++#define SION_CL3_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
++//SION_CL3_RdRspPoolCredit_Alloc_REG0
++#define SION_CL3_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0__SHIFT 0x0
++#define SION_CL3_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
++//SION_CL3_RdRspPoolCredit_Alloc_REG1
++#define SION_CL3_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32__SHIFT 0x0
++#define SION_CL3_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
++//SION_CL3_WrRspPoolCredit_Alloc_REG0
++#define SION_CL3_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0__SHIFT 0x0
++#define SION_CL3_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
++//SION_CL3_WrRspPoolCredit_Alloc_REG1
++#define SION_CL3_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32__SHIFT 0x0
++#define SION_CL3_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
++//SION_CNTL_REG0
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK0__SHIFT 0x0
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK1__SHIFT 0x1
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK2__SHIFT 0x2
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK3__SHIFT 0x3
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK4__SHIFT 0x4
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK5__SHIFT 0x5
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK6__SHIFT 0x6
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK7__SHIFT 0x7
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK8__SHIFT 0x8
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK9__SHIFT 0x9
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK0__SHIFT 0xa
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK1__SHIFT 0xb
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK2__SHIFT 0xc
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK3__SHIFT 0xd
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK4__SHIFT 0xe
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK5__SHIFT 0xf
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK6__SHIFT 0x10
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK7__SHIFT 0x11
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK8__SHIFT 0x12
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK9__SHIFT 0x13
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK0_MASK 0x00000001L
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK1_MASK 0x00000002L
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK2_MASK 0x00000004L
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK3_MASK 0x00000008L
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK4_MASK 0x00000010L
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK5_MASK 0x00000020L
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK6_MASK 0x00000040L
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK7_MASK 0x00000080L
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK8_MASK 0x00000100L
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK9_MASK 0x00000200L
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK0_MASK 0x00000400L
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK1_MASK 0x00000800L
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK2_MASK 0x00001000L
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK3_MASK 0x00002000L
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK4_MASK 0x00004000L
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK5_MASK 0x00008000L
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK6_MASK 0x00010000L
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK7_MASK 0x00020000L
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK8_MASK 0x00040000L
++#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK9_MASK 0x00080000L
++//SION_CNTL_REG1
++#define SION_CNTL_REG1__LIVELOCK_WATCHDOG_THRESHOLD__SHIFT 0x0
++#define SION_CNTL_REG1__CG_OFF_HYSTERESIS__SHIFT 0x8
++#define SION_CNTL_REG1__LIVELOCK_WATCHDOG_THRESHOLD_MASK 0x000000FFL
++#define SION_CNTL_REG1__CG_OFF_HYSTERESIS_MASK 0x0000FF00L
++
++
++// addressBlock: nbio_nbif0_gdc_rst_GDCRST_DEC
++//SHUB_PF_FLR_RST
++#define SHUB_PF_FLR_RST__DEV0_PF0_FLR_RST__SHIFT 0x0
++#define SHUB_PF_FLR_RST__DEV0_PF1_FLR_RST__SHIFT 0x1
++#define SHUB_PF_FLR_RST__DEV0_PF0_FLR_RST_MASK 0x00000001L
++#define SHUB_PF_FLR_RST__DEV0_PF1_FLR_RST_MASK 0x00000002L
++//SHUB_GFX_DRV_VPU_RST
++#define SHUB_GFX_DRV_VPU_RST__GFX_DRV_MODE1_RST__SHIFT 0x0
++#define SHUB_GFX_DRV_VPU_RST__GFX_DRV_MODE1_RST_MASK 0x00000001L
++//SHUB_LINK_RESET
++#define SHUB_LINK_RESET__LINK_P0_RESET__SHIFT 0x0
++#define SHUB_LINK_RESET__LINK_P1_RESET__SHIFT 0x1
++#define SHUB_LINK_RESET__LINK_P2_RESET__SHIFT 0x2
++#define SHUB_LINK_RESET__LINK_P0_RESET_MASK 0x00000001L
++#define SHUB_LINK_RESET__LINK_P1_RESET_MASK 0x00000002L
++#define SHUB_LINK_RESET__LINK_P2_RESET_MASK 0x00000004L
++//SHUB_PF0_VF_FLR_RST
++#define SHUB_PF0_VF_FLR_RST__PF0_VF0_FLR_RST__SHIFT 0x0
++#define SHUB_PF0_VF_FLR_RST__PF0_VF1_FLR_RST__SHIFT 0x1
++#define SHUB_PF0_VF_FLR_RST__PF0_VF2_FLR_RST__SHIFT 0x2
++#define SHUB_PF0_VF_FLR_RST__PF0_VF3_FLR_RST__SHIFT 0x3
++#define SHUB_PF0_VF_FLR_RST__PF0_VF4_FLR_RST__SHIFT 0x4
++#define SHUB_PF0_VF_FLR_RST__PF0_VF5_FLR_RST__SHIFT 0x5
++#define SHUB_PF0_VF_FLR_RST__PF0_VF6_FLR_RST__SHIFT 0x6
++#define SHUB_PF0_VF_FLR_RST__PF0_VF7_FLR_RST__SHIFT 0x7
++#define SHUB_PF0_VF_FLR_RST__PF0_VF8_FLR_RST__SHIFT 0x8
++#define SHUB_PF0_VF_FLR_RST__PF0_VF9_FLR_RST__SHIFT 0x9
++#define SHUB_PF0_VF_FLR_RST__PF0_VF10_FLR_RST__SHIFT 0xa
++#define SHUB_PF0_VF_FLR_RST__PF0_VF11_FLR_RST__SHIFT 0xb
++#define SHUB_PF0_VF_FLR_RST__PF0_VF12_FLR_RST__SHIFT 0xc
++#define SHUB_PF0_VF_FLR_RST__PF0_VF13_FLR_RST__SHIFT 0xd
++#define SHUB_PF0_VF_FLR_RST__PF0_VF14_FLR_RST__SHIFT 0xe
++#define SHUB_PF0_VF_FLR_RST__PF0_VF15_FLR_RST__SHIFT 0xf
++#define SHUB_PF0_VF_FLR_RST__PF0_SOFTPF_FLR_RST__SHIFT 0x1f
++#define SHUB_PF0_VF_FLR_RST__PF0_VF0_FLR_RST_MASK 0x00000001L
++#define SHUB_PF0_VF_FLR_RST__PF0_VF1_FLR_RST_MASK 0x00000002L
++#define SHUB_PF0_VF_FLR_RST__PF0_VF2_FLR_RST_MASK 0x00000004L
++#define SHUB_PF0_VF_FLR_RST__PF0_VF3_FLR_RST_MASK 0x00000008L
++#define SHUB_PF0_VF_FLR_RST__PF0_VF4_FLR_RST_MASK 0x00000010L
++#define SHUB_PF0_VF_FLR_RST__PF0_VF5_FLR_RST_MASK 0x00000020L
++#define SHUB_PF0_VF_FLR_RST__PF0_VF6_FLR_RST_MASK 0x00000040L
++#define SHUB_PF0_VF_FLR_RST__PF0_VF7_FLR_RST_MASK 0x00000080L
++#define SHUB_PF0_VF_FLR_RST__PF0_VF8_FLR_RST_MASK 0x00000100L
++#define SHUB_PF0_VF_FLR_RST__PF0_VF9_FLR_RST_MASK 0x00000200L
++#define SHUB_PF0_VF_FLR_RST__PF0_VF10_FLR_RST_MASK 0x00000400L
++#define SHUB_PF0_VF_FLR_RST__PF0_VF11_FLR_RST_MASK 0x00000800L
++#define SHUB_PF0_VF_FLR_RST__PF0_VF12_FLR_RST_MASK 0x00001000L
++#define SHUB_PF0_VF_FLR_RST__PF0_VF13_FLR_RST_MASK 0x00002000L
++#define SHUB_PF0_VF_FLR_RST__PF0_VF14_FLR_RST_MASK 0x00004000L
++#define SHUB_PF0_VF_FLR_RST__PF0_VF15_FLR_RST_MASK 0x00008000L
++#define SHUB_PF0_VF_FLR_RST__PF0_SOFTPF_FLR_RST_MASK 0x80000000L
++//SHUB_HARD_RST_CTRL
++#define SHUB_HARD_RST_CTRL__COR_RESET_EN__SHIFT 0x0
++#define SHUB_HARD_RST_CTRL__REG_RESET_EN__SHIFT 0x1
++#define SHUB_HARD_RST_CTRL__STY_RESET_EN__SHIFT 0x2
++#define SHUB_HARD_RST_CTRL__NIC400_RESET_EN__SHIFT 0x3
++#define SHUB_HARD_RST_CTRL__SDP_PORT_RESET_EN__SHIFT 0x4
++#define SHUB_HARD_RST_CTRL__SION_AON_RESET_EN__SHIFT 0x5
++#define SHUB_HARD_RST_CTRL__COR_RESET_EN_MASK 0x00000001L
++#define SHUB_HARD_RST_CTRL__REG_RESET_EN_MASK 0x00000002L
++#define SHUB_HARD_RST_CTRL__STY_RESET_EN_MASK 0x00000004L
++#define SHUB_HARD_RST_CTRL__NIC400_RESET_EN_MASK 0x00000008L
++#define SHUB_HARD_RST_CTRL__SDP_PORT_RESET_EN_MASK 0x00000010L
++#define SHUB_HARD_RST_CTRL__SION_AON_RESET_EN_MASK 0x00000020L
++//SHUB_SOFT_RST_CTRL
++#define SHUB_SOFT_RST_CTRL__COR_RESET_EN__SHIFT 0x0
++#define SHUB_SOFT_RST_CTRL__REG_RESET_EN__SHIFT 0x1
++#define SHUB_SOFT_RST_CTRL__STY_RESET_EN__SHIFT 0x2
++#define SHUB_SOFT_RST_CTRL__NIC400_RESET_EN__SHIFT 0x3
++#define SHUB_SOFT_RST_CTRL__SDP_PORT_RESET_EN__SHIFT 0x4
++#define SHUB_SOFT_RST_CTRL__SION_AON_RESET_EN__SHIFT 0x5
++#define SHUB_SOFT_RST_CTRL__COR_RESET_EN_MASK 0x00000001L
++#define SHUB_SOFT_RST_CTRL__REG_RESET_EN_MASK 0x00000002L
++#define SHUB_SOFT_RST_CTRL__STY_RESET_EN_MASK 0x00000004L
++#define SHUB_SOFT_RST_CTRL__NIC400_RESET_EN_MASK 0x00000008L
++#define SHUB_SOFT_RST_CTRL__SDP_PORT_RESET_EN_MASK 0x00000010L
++#define SHUB_SOFT_RST_CTRL__SION_AON_RESET_EN_MASK 0x00000020L
++//SHUB_SDP_PORT_RST
++#define SHUB_SDP_PORT_RST__A2S_SDP_PORT_RST__SHIFT 0x0
++#define SHUB_SDP_PORT_RST__NBIFSION_BIF_SDP_PORT_RST__SHIFT 0x1
++#define SHUB_SDP_PORT_RST__ATHUB_HST_SDP_PORT_RST__SHIFT 0x2
++#define SHUB_SDP_PORT_RST__ATHUB_DMA_SDP_PORT_RST__SHIFT 0x3
++#define SHUB_SDP_PORT_RST__ATDMA_NBIFSOIN_SDP_PORT_RST__SHIFT 0x4
++#define SHUB_SDP_PORT_RST__INT_NBIFSION_SDP_PORT_RST__SHIFT 0x5
++#define SHUB_SDP_PORT_RST__MP4SDP_SDP_PORT_RST__SHIFT 0x6
++#define SHUB_SDP_PORT_RST__GDC_HST_SDP_PORT_RST__SHIFT 0x7
++#define SHUB_SDP_PORT_RST__NTB_HST_SDP_PORT_RST__SHIFT 0x8
++#define SHUB_SDP_PORT_RST__NTB_DMA_SDP_PORT_RST__SHIFT 0x9
++#define SHUB_SDP_PORT_RST__SION_AON_RST__SHIFT 0x18
++#define SHUB_SDP_PORT_RST__A2S_SDP_PORT_RST_MASK 0x00000001L
++#define SHUB_SDP_PORT_RST__NBIFSION_BIF_SDP_PORT_RST_MASK 0x00000002L
++#define SHUB_SDP_PORT_RST__ATHUB_HST_SDP_PORT_RST_MASK 0x00000004L
++#define SHUB_SDP_PORT_RST__ATHUB_DMA_SDP_PORT_RST_MASK 0x00000008L
++#define SHUB_SDP_PORT_RST__ATDMA_NBIFSOIN_SDP_PORT_RST_MASK 0x00000010L
++#define SHUB_SDP_PORT_RST__INT_NBIFSION_SDP_PORT_RST_MASK 0x00000020L
++#define SHUB_SDP_PORT_RST__MP4SDP_SDP_PORT_RST_MASK 0x00000040L
++#define SHUB_SDP_PORT_RST__GDC_HST_SDP_PORT_RST_MASK 0x00000080L
++#define SHUB_SDP_PORT_RST__NTB_HST_SDP_PORT_RST_MASK 0x00000100L
++#define SHUB_SDP_PORT_RST__NTB_DMA_SDP_PORT_RST_MASK 0x00000200L
++#define SHUB_SDP_PORT_RST__SION_AON_RST_MASK 0x01000000L
++//SHUB_RST_MISC_TRL
++#define SHUB_RST_MISC_TRL__RSMU_SOFT_RST_ATOMIC__SHIFT 0x0
++#define SHUB_RST_MISC_TRL__RSMU_SOFT_RST_CYCLE__SHIFT 0x10
++#define SHUB_RST_MISC_TRL__RSMU_SOFT_RST_ATOMIC_MASK 0x00000001L
++#define SHUB_RST_MISC_TRL__RSMU_SOFT_RST_CYCLE_MASK 0x00FF0000L
++
++
++// addressBlock: nbio_nbif0_gdc_ras_gdc_ras_regblk
++//GDCL_RAS_CENTRAL_STATUS
++#define GDCL_RAS_CENTRAL_STATUS__GDCL_L2C_EgStall_det__SHIFT 0x0
++#define GDCL_RAS_CENTRAL_STATUS__GDCL_L2C_ErrEvent_det__SHIFT 0x1
++#define GDCL_RAS_CENTRAL_STATUS__GDCL_C2L_EgStall_det__SHIFT 0x2
++#define GDCL_RAS_CENTRAL_STATUS__GDCL_C2L_ErrEvent_det__SHIFT 0x3
++#define GDCL_RAS_CENTRAL_STATUS__GDCL_L2C_EgStall_det_MASK 0x00000001L
++#define GDCL_RAS_CENTRAL_STATUS__GDCL_L2C_ErrEvent_det_MASK 0x00000002L
++#define GDCL_RAS_CENTRAL_STATUS__GDCL_C2L_EgStall_det_MASK 0x00000004L
++#define GDCL_RAS_CENTRAL_STATUS__GDCL_C2L_ErrEvent_det_MASK 0x00000008L
++//GDCSOC_RAS_CENTRAL_STATUS
++#define GDCSOC_RAS_CENTRAL_STATUS__GDCSOC_L2C_EgStall_det__SHIFT 0x0
++#define GDCSOC_RAS_CENTRAL_STATUS__GDCSOC_L2C_ErrEvent_det__SHIFT 0x1
++#define GDCSOC_RAS_CENTRAL_STATUS__GDCSOC_C2L_EgStall_det__SHIFT 0x2
++#define GDCSOC_RAS_CENTRAL_STATUS__GDCSOC_C2L_ErrEvent_det__SHIFT 0x3
++#define GDCSOC_RAS_CENTRAL_STATUS__GDCSOC_L2C_EgStall_det_MASK 0x00000001L
++#define GDCSOC_RAS_CENTRAL_STATUS__GDCSOC_L2C_ErrEvent_det_MASK 0x00000002L
++#define GDCSOC_RAS_CENTRAL_STATUS__GDCSOC_C2L_EgStall_det_MASK 0x00000004L
++#define GDCSOC_RAS_CENTRAL_STATUS__GDCSOC_C2L_ErrEvent_det_MASK 0x00000008L
++//GDCSOC_RAS_LEAF0_CTRL
++#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_ERR_EVENT_DET_EN__SHIFT 0x0
++#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_POISON_ERREVENT_EN__SHIFT 0x1
++#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_POISON_STALL_EN__SHIFT 0x2
++#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_PARITY_ERREVENT_EN__SHIFT 0x3
++#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_PARITY_STALL_EN__SHIFT 0x4
++#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_ERR_EVENT_GEN_EN__SHIFT 0x8
++#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_EGRESS_STALL_GEN_EN__SHIFT 0x9
++#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_ERR_EVENT_PROP_EN__SHIFT 0xa
++#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_EGRESS_STALL_PROP_EN__SHIFT 0xb
++#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_ERR_EVENT_DET_EN_MASK 0x00000001L
++#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_POISON_ERREVENT_EN_MASK 0x00000002L
++#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_POISON_STALL_EN_MASK 0x00000004L
++#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_PARITY_ERREVENT_EN_MASK 0x00000008L
++#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_PARITY_STALL_EN_MASK 0x00000010L
++#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_ERR_EVENT_GEN_EN_MASK 0x00000100L
++#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_EGRESS_STALL_GEN_EN_MASK 0x00000200L
++#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_ERR_EVENT_PROP_EN_MASK 0x00000400L
++#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_EGRESS_STALL_PROP_EN_MASK 0x00000800L
++//GDCSOC_RAS_LEAF1_CTRL
++#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_ERR_EVENT_DET_EN__SHIFT 0x0
++#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_POISON_ERREVENT_EN__SHIFT 0x1
++#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_POISON_STALL_EN__SHIFT 0x2
++#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_PARITY_ERREVENT_EN__SHIFT 0x3
++#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_PARITY_STALL_EN__SHIFT 0x4
++#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_ERR_EVENT_GEN_EN__SHIFT 0x8
++#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_EGRESS_STALL_GEN_EN__SHIFT 0x9
++#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_ERR_EVENT_PROP_EN__SHIFT 0xa
++#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_EGRESS_STALL_PROP_EN__SHIFT 0xb
++#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_ERR_EVENT_DET_EN_MASK 0x00000001L
++#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_POISON_ERREVENT_EN_MASK 0x00000002L
++#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_POISON_STALL_EN_MASK 0x00000004L
++#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_PARITY_ERREVENT_EN_MASK 0x00000008L
++#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_PARITY_STALL_EN_MASK 0x00000010L
++#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_ERR_EVENT_GEN_EN_MASK 0x00000100L
++#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_EGRESS_STALL_GEN_EN_MASK 0x00000200L
++#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_ERR_EVENT_PROP_EN_MASK 0x00000400L
++#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_EGRESS_STALL_PROP_EN_MASK 0x00000800L
++//GDCSOC_RAS_LEAF2_CTRL
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_DET_EN__SHIFT 0x0
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_POISON_ERREVENT_EN__SHIFT 0x1
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_POISON_STALL_EN__SHIFT 0x2
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_PARITY_ERREVENT_EN__SHIFT 0x3
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_PARITY_STALL_EN__SHIFT 0x4
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_GEN_EN__SHIFT 0x8
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_EGRESS_STALL_GEN_EN__SHIFT 0x9
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_PROP_EN__SHIFT 0xa
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_EGRESS_STALL_PROP_EN__SHIFT 0xb
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_RAS_INTR_EN__SHIFT 0x10
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_RAS_HSTRSP_SHUB_STALL_EN__SHIFT 0x18
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_RAS_HSTRSP_CDC_STALL_EN__SHIFT 0x19
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_RAS_IHINTR_PORT_MASK_DIS__SHIFT 0x1a
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_RAS_IHINTR_TRANS_MASK_DIS__SHIFT 0x1b
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_DET_EN_MASK 0x00000001L
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_POISON_ERREVENT_EN_MASK 0x00000002L
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_POISON_STALL_EN_MASK 0x00000004L
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_PARITY_ERREVENT_EN_MASK 0x00000008L
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_PARITY_STALL_EN_MASK 0x00000010L
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_GEN_EN_MASK 0x00000100L
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_EGRESS_STALL_GEN_EN_MASK 0x00000200L
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_PROP_EN_MASK 0x00000400L
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_EGRESS_STALL_PROP_EN_MASK 0x00000800L
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_RAS_INTR_EN_MASK 0x00010000L
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_RAS_HSTRSP_SHUB_STALL_EN_MASK 0x01000000L
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_RAS_HSTRSP_CDC_STALL_EN_MASK 0x02000000L
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_RAS_IHINTR_PORT_MASK_DIS_MASK 0x04000000L
++#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_RAS_IHINTR_TRANS_MASK_DIS_MASK 0x08000000L
++//GDCSOC_RAS_LEAF3_CTRL
++#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_ERR_EVENT_DET_EN__SHIFT 0x0
++#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_POISON_ERREVENT_EN__SHIFT 0x1
++#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_POISON_STALL_EN__SHIFT 0x2
++#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_PARITY_ERREVENT_EN__SHIFT 0x3
++#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_PARITY_STALL_EN__SHIFT 0x4
++#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_ERR_EVENT_GEN_EN__SHIFT 0x8
++#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_EGRESS_STALL_GEN_EN__SHIFT 0x9
++#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_ERR_EVENT_PROP_EN__SHIFT 0xa
++#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_EGRESS_STALL_PROP_EN__SHIFT 0xb
++#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_ERR_EVENT_DET_EN_MASK 0x00000001L
++#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_POISON_ERREVENT_EN_MASK 0x00000002L
++#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_POISON_STALL_EN_MASK 0x00000004L
++#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_PARITY_ERREVENT_EN_MASK 0x00000008L
++#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_PARITY_STALL_EN_MASK 0x00000010L
++#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_ERR_EVENT_GEN_EN_MASK 0x00000100L
++#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_EGRESS_STALL_GEN_EN_MASK 0x00000200L
++#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_ERR_EVENT_PROP_EN_MASK 0x00000400L
++#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_EGRESS_STALL_PROP_EN_MASK 0x00000800L
++//GDCSOC_RAS_LEAF4_CTRL
++#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_ERR_EVENT_DET_EN__SHIFT 0x0
++#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_POISON_ERREVENT_EN__SHIFT 0x1
++#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_POISON_STALL_EN__SHIFT 0x2
++#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_PARITY_ERREVENT_EN__SHIFT 0x3
++#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_PARITY_STALL_EN__SHIFT 0x4
++#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_ERR_EVENT_GEN_EN__SHIFT 0x8
++#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_EGRESS_STALL_GEN_EN__SHIFT 0x9
++#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_ERR_EVENT_PROP_EN__SHIFT 0xa
++#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_EGRESS_STALL_PROP_EN__SHIFT 0xb
++#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_ERR_EVENT_DET_EN_MASK 0x00000001L
++#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_POISON_ERREVENT_EN_MASK 0x00000002L
++#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_POISON_STALL_EN_MASK 0x00000004L
++#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_PARITY_ERREVENT_EN_MASK 0x00000008L
++#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_PARITY_STALL_EN_MASK 0x00000010L
++#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_ERR_EVENT_GEN_EN_MASK 0x00000100L
++#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_EGRESS_STALL_GEN_EN_MASK 0x00000200L
++#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_ERR_EVENT_PROP_EN_MASK 0x00000400L
++#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_EGRESS_STALL_PROP_EN_MASK 0x00000800L
++//GDCSOC_RAS_LEAF5_CTRL
++#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_ERR_EVENT_DET_EN__SHIFT 0x0
++#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_POISON_ERREVENT_EN__SHIFT 0x1
++#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_POISON_STALL_EN__SHIFT 0x2
++#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_PARITY_ERREVENT_EN__SHIFT 0x3
++#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_PARITY_STALL_EN__SHIFT 0x4
++#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_ERR_EVENT_GEN_EN__SHIFT 0x8
++#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_EGRESS_STALL_GEN_EN__SHIFT 0x9
++#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_ERR_EVENT_PROP_EN__SHIFT 0xa
++#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_EGRESS_STALL_PROP_EN__SHIFT 0xb
++#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_ERR_EVENT_DET_EN_MASK 0x00000001L
++#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_POISON_ERREVENT_EN_MASK 0x00000002L
++#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_POISON_STALL_EN_MASK 0x00000004L
++#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_PARITY_ERREVENT_EN_MASK 0x00000008L
++#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_PARITY_STALL_EN_MASK 0x00000010L
++#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_ERR_EVENT_GEN_EN_MASK 0x00000100L
++#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_EGRESS_STALL_GEN_EN_MASK 0x00000200L
++#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_ERR_EVENT_PROP_EN_MASK 0x00000400L
++#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_EGRESS_STALL_PROP_EN_MASK 0x00000800L
++//GDCSOC_RAS_LEAF6_CTRL
++#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_ERR_EVENT_DET_EN__SHIFT 0x0
++#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_POISON_ERREVENT_EN__SHIFT 0x1
++#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_POISON_STALL_EN__SHIFT 0x2
++#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_PARITY_ERREVENT_EN__SHIFT 0x3
++#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_PARITY_STALL_EN__SHIFT 0x4
++#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_ERR_EVENT_GEN_EN__SHIFT 0x8
++#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_EGRESS_STALL_GEN_EN__SHIFT 0x9
++#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_ERR_EVENT_PROP_EN__SHIFT 0xa
++#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_EGRESS_STALL_PROP_EN__SHIFT 0xb
++#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_ERR_EVENT_DET_EN_MASK 0x00000001L
++#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_POISON_ERREVENT_EN_MASK 0x00000002L
++#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_POISON_STALL_EN_MASK 0x00000004L
++#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_PARITY_ERREVENT_EN_MASK 0x00000008L
++#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_PARITY_STALL_EN_MASK 0x00000010L
++#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_ERR_EVENT_GEN_EN_MASK 0x00000100L
++#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_EGRESS_STALL_GEN_EN_MASK 0x00000200L
++#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_ERR_EVENT_PROP_EN_MASK 0x00000400L
++#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_EGRESS_STALL_PROP_EN_MASK 0x00000800L
++//GDCSOC_RAS_LEAF2_MISC_CTRL
++#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_HSTRSP_SHUB_DROP_EN__SHIFT 0x0
++#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_HSTRSP_CDC_DROP_EN__SHIFT 0x1
++#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_IHINTR_PORT_MASK_DIS__SHIFT 0x8
++#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_IHINTR_TRANS_MASK_DIS__SHIFT 0x9
++#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_HSTRSP_SHUB_DROP_EN_MASK 0x00000001L
++#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_HSTRSP_CDC_DROP_EN_MASK 0x00000002L
++#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_IHINTR_PORT_MASK_DIS_MASK 0x00000100L
++#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_IHINTR_TRANS_MASK_DIS_MASK 0x00000200L
++//GDCSOC_RAS_LEAF0_STATUS
++#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_ERR_EVENT_RECV__SHIFT 0x0
++#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_POISON_ERR_DET__SHIFT 0x1
++#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_PARITY_ERR_DET__SHIFT 0x2
++#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_ERR_EVENT_GENN_STAT__SHIFT 0x8
++#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_EGRESS_STALLED_GENN_STAT__SHIFT 0x9
++#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_ERR_EVENT_PROP_STAT__SHIFT 0xa
++#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_EGRESS_STALLED_PROP_STAT__SHIFT 0xb
++#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_ERR_EVENT_RECV_MASK 0x00000001L
++#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_POISON_ERR_DET_MASK 0x00000002L
++#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_PARITY_ERR_DET_MASK 0x00000004L
++#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_ERR_EVENT_GENN_STAT_MASK 0x00000100L
++#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_EGRESS_STALLED_GENN_STAT_MASK 0x00000200L
++#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_ERR_EVENT_PROP_STAT_MASK 0x00000400L
++#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_EGRESS_STALLED_PROP_STAT_MASK 0x00000800L
++//GDCSOC_RAS_LEAF1_STATUS
++#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_ERR_EVENT_RECV__SHIFT 0x0
++#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_POISON_ERR_DET__SHIFT 0x1
++#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_PARITY_ERR_DET__SHIFT 0x2
++#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_ERR_EVENT_GENN_STAT__SHIFT 0x8
++#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_EGRESS_STALLED_GENN_STAT__SHIFT 0x9
++#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_ERR_EVENT_PROP_STAT__SHIFT 0xa
++#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_EGRESS_STALLED_PROP_STAT__SHIFT 0xb
++#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_ERR_EVENT_RECV_MASK 0x00000001L
++#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_POISON_ERR_DET_MASK 0x00000002L
++#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_PARITY_ERR_DET_MASK 0x00000004L
++#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_ERR_EVENT_GENN_STAT_MASK 0x00000100L
++#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_EGRESS_STALLED_GENN_STAT_MASK 0x00000200L
++#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_ERR_EVENT_PROP_STAT_MASK 0x00000400L
++#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_EGRESS_STALLED_PROP_STAT_MASK 0x00000800L
++//GDCSOC_RAS_LEAF2_STATUS
++#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_ERR_EVENT_RECV__SHIFT 0x0
++#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_POISON_ERR_DET__SHIFT 0x1
++#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_PARITY_ERR_DET__SHIFT 0x2
++#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_ERR_EVENT_GENN_STAT__SHIFT 0x8
++#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_EGRESS_STALLED_GENN_STAT__SHIFT 0x9
++#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_ERR_EVENT_PROP_STAT__SHIFT 0xa
++#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_EGRESS_STALLED_PROP_STAT__SHIFT 0xb
++#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_ERR_EVENT_RECV_MASK 0x00000001L
++#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_POISON_ERR_DET_MASK 0x00000002L
++#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_PARITY_ERR_DET_MASK 0x00000004L
++#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_ERR_EVENT_GENN_STAT_MASK 0x00000100L
++#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_EGRESS_STALLED_GENN_STAT_MASK 0x00000200L
++#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_ERR_EVENT_PROP_STAT_MASK 0x00000400L
++#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_EGRESS_STALLED_PROP_STAT_MASK 0x00000800L
++//GDCSOC_RAS_LEAF3_STATUS
++#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_ERR_EVENT_RECV__SHIFT 0x0
++#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_POISON_ERR_DET__SHIFT 0x1
++#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_PARITY_ERR_DET__SHIFT 0x2
++#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_ERR_EVENT_GENN_STAT__SHIFT 0x8
++#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_EGRESS_STALLED_GENN_STAT__SHIFT 0x9
++#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_ERR_EVENT_PROP_STAT__SHIFT 0xa
++#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_EGRESS_STALLED_PROP_STAT__SHIFT 0xb
++#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_ERR_EVENT_RECV_MASK 0x00000001L
++#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_POISON_ERR_DET_MASK 0x00000002L
++#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_PARITY_ERR_DET_MASK 0x00000004L
++#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_ERR_EVENT_GENN_STAT_MASK 0x00000100L
++#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_EGRESS_STALLED_GENN_STAT_MASK 0x00000200L
++#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_ERR_EVENT_PROP_STAT_MASK 0x00000400L
++#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_EGRESS_STALLED_PROP_STAT_MASK 0x00000800L
++//GDCSOC_RAS_LEAF4_STATUS
++#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_ERR_EVENT_RECV__SHIFT 0x0
++#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_POISON_ERR_DET__SHIFT 0x1
++#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_PARITY_ERR_DET__SHIFT 0x2
++#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_ERR_EVENT_GENN_STAT__SHIFT 0x8
++#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_EGRESS_STALLED_GENN_STAT__SHIFT 0x9
++#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_ERR_EVENT_PROP_STAT__SHIFT 0xa
++#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_EGRESS_STALLED_PROP_STAT__SHIFT 0xb
++#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_ERR_EVENT_RECV_MASK 0x00000001L
++#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_POISON_ERR_DET_MASK 0x00000002L
++#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_PARITY_ERR_DET_MASK 0x00000004L
++#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_ERR_EVENT_GENN_STAT_MASK 0x00000100L
++#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_EGRESS_STALLED_GENN_STAT_MASK 0x00000200L
++#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_ERR_EVENT_PROP_STAT_MASK 0x00000400L
++#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_EGRESS_STALLED_PROP_STAT_MASK 0x00000800L
++//GDCSOC_RAS_LEAF5_STATUS
++#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_ERR_EVENT_RECV__SHIFT 0x0
++#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_POISON_ERR_DET__SHIFT 0x1
++#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_PARITY_ERR_DET__SHIFT 0x2
++#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_ERR_EVENT_GENN_STAT__SHIFT 0x8
++#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_EGRESS_STALLED_GENN_STAT__SHIFT 0x9
++#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_ERR_EVENT_PROP_STAT__SHIFT 0xa
++#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_EGRESS_STALLED_PROP_STAT__SHIFT 0xb
++#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_ERR_EVENT_RECV_MASK 0x00000001L
++#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_POISON_ERR_DET_MASK 0x00000002L
++#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_PARITY_ERR_DET_MASK 0x00000004L
++#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_ERR_EVENT_GENN_STAT_MASK 0x00000100L
++#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_EGRESS_STALLED_GENN_STAT_MASK 0x00000200L
++#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_ERR_EVENT_PROP_STAT_MASK 0x00000400L
++#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_EGRESS_STALLED_PROP_STAT_MASK 0x00000800L
++//GDCSOC_RAS_LEAF6_STATUS
++#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_ERR_EVENT_RECV__SHIFT 0x0
++#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_POISON_ERR_DET__SHIFT 0x1
++#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_PARITY_ERR_DET__SHIFT 0x2
++#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_ERR_EVENT_GENN_STAT__SHIFT 0x8
++#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_EGRESS_STALLED_GENN_STAT__SHIFT 0x9
++#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_ERR_EVENT_PROP_STAT__SHIFT 0xa
++#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_EGRESS_STALLED_PROP_STAT__SHIFT 0xb
++#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_ERR_EVENT_RECV_MASK 0x00000001L
++#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_POISON_ERR_DET_MASK 0x00000002L
++#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_PARITY_ERR_DET_MASK 0x00000004L
++#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_ERR_EVENT_GENN_STAT_MASK 0x00000100L
++#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_EGRESS_STALLED_GENN_STAT_MASK 0x00000200L
++#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_ERR_EVENT_PROP_STAT_MASK 0x00000400L
++#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_EGRESS_STALLED_PROP_STAT_MASK 0x00000800L
++//GDCSHUB_RAS_CENTRAL_STATUS
++#define GDCSHUB_RAS_CENTRAL_STATUS__GDCSHUB_L2C_EgStall_det__SHIFT 0x0
++#define GDCSHUB_RAS_CENTRAL_STATUS__GDCSHUB_L2C_ErrEvent_det__SHIFT 0x1
++#define GDCSHUB_RAS_CENTRAL_STATUS__GDCSHUB_C2L_EgStall_det__SHIFT 0x2
++#define GDCSHUB_RAS_CENTRAL_STATUS__GDCSHUB_C2L_ErrEvent_det__SHIFT 0x3
++#define GDCSHUB_RAS_CENTRAL_STATUS__GDCSHUB_L2C_EgStall_det_MASK 0x00000001L
++#define GDCSHUB_RAS_CENTRAL_STATUS__GDCSHUB_L2C_ErrEvent_det_MASK 0x00000002L
++#define GDCSHUB_RAS_CENTRAL_STATUS__GDCSHUB_C2L_EgStall_det_MASK 0x00000004L
++#define GDCSHUB_RAS_CENTRAL_STATUS__GDCSHUB_C2L_ErrEvent_det_MASK 0x00000008L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_swds_bifcfgdecp
++//BIF_CFG_DEV0_SWDS0_VENDOR_ID
++#define BIF_CFG_DEV0_SWDS0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_SWDS0_DEVICE_ID
++#define BIF_CFG_DEV0_SWDS0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_SWDS0_COMMAND
++#define BIF_CFG_DEV0_SWDS0_COMMAND__IOEN_DN__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_COMMAND__MEMEN_DN__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_SWDS0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_SWDS0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_SWDS0_COMMAND__IOEN_DN_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_COMMAND__MEMEN_DN_MASK 0x0002L
++#define BIF_CFG_DEV0_SWDS0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_SWDS0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_SWDS0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_SWDS0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_SWDS0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_SWDS0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_SWDS0_STATUS
++#define BIF_CFG_DEV0_SWDS0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_SWDS0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_SWDS0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_SWDS0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_SWDS0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_SWDS0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_SWDS0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_SWDS0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_SWDS0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_SWDS0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_SWDS0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_SWDS0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_SWDS0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_REVISION_ID
++#define BIF_CFG_DEV0_SWDS0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_PROG_INTERFACE
++#define BIF_CFG_DEV0_SWDS0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_SWDS0_SUB_CLASS
++#define BIF_CFG_DEV0_SWDS0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_SWDS0_BASE_CLASS
++#define BIF_CFG_DEV0_SWDS0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_SWDS0_CACHE_LINE
++#define BIF_CFG_DEV0_SWDS0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_SWDS0_LATENCY
++#define BIF_CFG_DEV0_SWDS0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_SWDS0_HEADER
++#define BIF_CFG_DEV0_SWDS0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_SWDS0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_SWDS0_BIST
++#define BIF_CFG_DEV0_SWDS0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_SWDS0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_SWDS0_BASE_ADDR_1
++#define BIF_CFG_DEV0_SWDS0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY
++#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__PRIMARY_BUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__SECONDARY_BUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__SUB_BUS_NUM__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__SECONDARY_LATENCY_TIMER__SHIFT 0x18
++#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__PRIMARY_BUS_MASK 0x000000FFL
++#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__SECONDARY_BUS_MASK 0x0000FF00L
++#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__SUB_BUS_NUM_MASK 0x00FF0000L
++#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__SECONDARY_LATENCY_TIMER_MASK 0xFF000000L
++//BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT
++#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_BASE_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_BASE__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_LIMIT_TYPE__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_LIMIT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_BASE_TYPE_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_BASE_MASK 0x00F0L
++#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_LIMIT_TYPE_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_LIMIT_MASK 0xF000L
++//BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__RECEIVED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__RECEIVED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT
++#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_BASE_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_BASE_31_20__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_LIMIT_TYPE__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_LIMIT_31_20__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_BASE_TYPE_MASK 0x0000000FL
++#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_BASE_31_20_MASK 0x0000FFF0L
++#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_LIMIT_TYPE_MASK 0x000F0000L
++#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_LIMIT_31_20_MASK 0xFFF00000L
++//BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT
++#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_BASE_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_BASE_31_20__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_TYPE__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_31_20__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_BASE_TYPE_MASK 0x0000000FL
++#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_BASE_31_20_MASK 0x0000FFF0L
++#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_TYPE_MASK 0x000F0000L
++#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_31_20_MASK 0xFFF00000L
++//BIF_CFG_DEV0_SWDS0_PREF_BASE_UPPER
++#define BIF_CFG_DEV0_SWDS0_PREF_BASE_UPPER__PREF_BASE_UPPER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PREF_BASE_UPPER__PREF_BASE_UPPER_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_PREF_LIMIT_UPPER
++#define BIF_CFG_DEV0_SWDS0_PREF_LIMIT_UPPER__PREF_LIMIT_UPPER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PREF_LIMIT_UPPER__PREF_LIMIT_UPPER_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT_HI
++#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT_HI__IO_BASE_31_16__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT_HI__IO_LIMIT_31_16__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT_HI__IO_BASE_31_16_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT_HI__IO_LIMIT_31_16_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_SWDS0_CAP_PTR
++#define BIF_CFG_DEV0_SWDS0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_SWDS0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_SWDS0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_SWDS0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_SWDS0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__PARITY_RESPONSE_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__SERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__ISA_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__VGA_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__VGA_DEC__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__MASTER_ABORT_MODE__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__SECONDARY_BUS_RESET__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__FAST_B2B_EN__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__PARITY_RESPONSE_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__SERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__ISA_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__VGA_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__VGA_DEC_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__MASTER_ABORT_MODE_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__SECONDARY_BUS_RESET_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__FAST_B2B_EN_MASK 0x0080L
++//BIF_CFG_DEV0_SWDS0_PMI_CAP_LIST
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_PMI_CAP
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__PME_CLOCK__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__DEV_SPECIFIC_INIT__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__AUX_CURRENT__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__D1_SUPPORT__SHIFT 0x9
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__D2_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__PME_SUPPORT__SHIFT 0xb
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__VERSION_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__PME_CLOCK_MASK 0x0008L
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__DEV_SPECIFIC_INIT_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__AUX_CURRENT_MASK 0x01C0L
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__D1_SUPPORT_MASK 0x0200L
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__D2_SUPPORT_MASK 0x0400L
++#define BIF_CFG_DEV0_SWDS0_PMI_CAP__PME_SUPPORT_MASK 0xF800L
++//BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__POWER_STATE__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__NO_SOFT_RESET__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__PME_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__DATA_SELECT__SHIFT 0x9
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__DATA_SCALE__SHIFT 0xd
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__PME_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__B2_B3_SUPPORT__SHIFT 0x16
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__BUS_PWR_EN__SHIFT 0x17
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__PMI_DATA__SHIFT 0x18
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__POWER_STATE_MASK 0x00000003L
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__NO_SOFT_RESET_MASK 0x00000008L
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__PME_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__DATA_SELECT_MASK 0x00001E00L
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__DATA_SCALE_MASK 0x00006000L
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__PME_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__B2_B3_SUPPORT_MASK 0x00400000L
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__BUS_PWR_EN_MASK 0x00800000L
++#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__PMI_DATA_MASK 0xFF000000L
++//BIF_CFG_DEV0_SWDS0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_SWDS0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_SWDS0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_PCIE_CAP
++#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_SWDS0_DEVICE_CAP
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_SWDS0_DEVICE_CNTL
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__BRIDGE_CFG_RETRY_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__BRIDGE_CFG_RETRY_EN_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_DEVICE_STATUS
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_SWDS0_LINK_CAP
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_SWDS0_LINK_CNTL
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_SWDS0_LINK_STATUS
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_SLOT_CAP
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__ATTN_BUTTON_PRESENT__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__PWR_CONTROLLER_PRESENT__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__MRL_SENSOR_PRESENT__SHIFT 0x2
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__ATTN_INDICATOR_PRESENT__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__PWR_INDICATOR_PRESENT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__HOTPLUG_SURPRISE__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__HOTPLUG_CAPABLE__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__SLOT_PWR_LIMIT_VALUE__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__SLOT_PWR_LIMIT_SCALE__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__ELECTROMECH_INTERLOCK_PRESENT__SHIFT 0x11
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__NO_COMMAND_COMPLETED_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__PHYSICAL_SLOT_NUM__SHIFT 0x13
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__ATTN_BUTTON_PRESENT_MASK 0x00000001L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__PWR_CONTROLLER_PRESENT_MASK 0x00000002L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__MRL_SENSOR_PRESENT_MASK 0x00000004L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__ATTN_INDICATOR_PRESENT_MASK 0x00000008L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__PWR_INDICATOR_PRESENT_MASK 0x00000010L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__HOTPLUG_SURPRISE_MASK 0x00000020L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__HOTPLUG_CAPABLE_MASK 0x00000040L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__SLOT_PWR_LIMIT_VALUE_MASK 0x00007F80L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__SLOT_PWR_LIMIT_SCALE_MASK 0x00018000L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__ELECTROMECH_INTERLOCK_PRESENT_MASK 0x00020000L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__NO_COMMAND_COMPLETED_SUPPORTED_MASK 0x00040000L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__PHYSICAL_SLOT_NUM_MASK 0xFFF80000L
++//BIF_CFG_DEV0_SWDS0_SLOT_CNTL
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__ATTN_BUTTON_PRESSED_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PWR_FAULT_DETECTED_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__MRL_SENSOR_CHANGED_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PRESENCE_DETECT_CHANGED_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__COMMAND_COMPLETED_INTR_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__HOTPLUG_INTR_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__ATTN_INDICATOR_CNTL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PWR_INDICATOR_CNTL__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PWR_CONTROLLER_CNTL__SHIFT 0xa
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__ELECTROMECH_INTERLOCK_CNTL__SHIFT 0xb
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__DL_STATE_CHANGED_EN__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__ATTN_BUTTON_PRESSED_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PWR_FAULT_DETECTED_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__MRL_SENSOR_CHANGED_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PRESENCE_DETECT_CHANGED_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__COMMAND_COMPLETED_INTR_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__HOTPLUG_INTR_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__ATTN_INDICATOR_CNTL_MASK 0x00C0L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PWR_INDICATOR_CNTL_MASK 0x0300L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PWR_CONTROLLER_CNTL_MASK 0x0400L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__ELECTROMECH_INTERLOCK_CNTL_MASK 0x0800L
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__DL_STATE_CHANGED_EN_MASK 0x1000L
++//BIF_CFG_DEV0_SWDS0_SLOT_STATUS
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__ATTN_BUTTON_PRESSED__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__PWR_FAULT_DETECTED__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__MRL_SENSOR_CHANGED__SHIFT 0x2
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__PRESENCE_DETECT_CHANGED__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__COMMAND_COMPLETED__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__MRL_SENSOR_STATE__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__PRESENCE_DETECT_STATE__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__ELECTROMECH_INTERLOCK_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__DL_STATE_CHANGED__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__ATTN_BUTTON_PRESSED_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__PWR_FAULT_DETECTED_MASK 0x0002L
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__MRL_SENSOR_CHANGED_MASK 0x0004L
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__PRESENCE_DETECT_CHANGED_MASK 0x0008L
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__COMMAND_COMPLETED_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__MRL_SENSOR_STATE_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__PRESENCE_DETECT_STATE_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__ELECTROMECH_INTERLOCK_STATUS_MASK 0x0080L
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__DL_STATE_CHANGED_MASK 0x0100L
++//BIF_CFG_DEV0_SWDS0_DEVICE_CAP2
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_SWDS0_LINK_CAP2
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_SWDS0_LINK_CNTL2
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_SWDS0_LINK_STATUS2
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_SWDS0_SLOT_CAP2
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_SLOT_CNTL2
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_SWDS0_SLOT_STATUS2
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_SWDS0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_SWDS0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_SWDS0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_SWDS0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_SWDS0_SSID_CAP_LIST
++#define BIF_CFG_DEV0_SWDS0_SSID_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_SSID_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_SSID_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_SWDS0_SSID_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_SSID_CAP
++#define BIF_CFG_DEV0_SWDS0_SSID_CAP__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_SSID_CAP__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_SSID_CAP__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_SWDS0_SSID_CAP__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__REF_CLK__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE__SHIFT 0xa
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT_MASK 0x00000007L
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT_MASK 0x00000070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__REF_CLK_MASK 0x00000300L
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE_MASK 0x00000C00L
++//BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG2
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET__SHIFT 0x18
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP_MASK 0x000000FFL
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET_MASK 0xFF000000L
++//BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT_MASK 0x000EL
++//BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_STATUS
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS_MASK 0x0001L
++//BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L
++//BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__VC_ID__SHIFT 0x18
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__VC_ID_MASK 0x07000000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L
++//BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_STATUS
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L
++//BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L
++//BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__VC_ID__SHIFT 0x18
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__VC_ID_MASK 0x07000000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L
++//BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_STATUS
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L
++//BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST
++#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW1
++#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW2
++#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST
++#define BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3
++#define BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3__RESERVED__SHIFT 0x2
++#define BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION_MASK 0x00000001L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN_MASK 0x00000002L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3__RESERVED_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_ERROR_STATUS
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_ERROR_STATUS__RESERVED__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_ERROR_STATUS__RESERVED_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__SOURCE_VALIDATION__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__TRANSLATION_BLOCKING__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT__SHIFT 0x2
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__UPSTREAM_FORWARDING__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__SOURCE_VALIDATION_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__TRANSLATION_BLOCKING_MASK 0x0002L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT_MASK 0x0004L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT_MASK 0x0008L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__UPSTREAM_FORWARDING_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN_MASK 0x0040L
++//BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST
++#define BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_CAP
++#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE__SHIFT 0x1f
++#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED_MASK 0x007FFFFFL
++#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE_MASK 0x80000000L
++//BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_STATUS
++#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID__SHIFT 0x1f
++#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_MASK 0x007FFFFFL
++#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID_MASK 0x80000000L
++//BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_SWDS0_LINK_CAP_16GT
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP_16GT__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LINK_CAP_16GT__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_LINK_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL_16GT__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LINK_CNTL_16GT__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT__SHIFT 0x2
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT_MASK 0x00000001L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT_MASK 0x00000002L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT_MASK 0x00000004L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT_MASK 0x00000008L
++#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT_MASK 0x00000010L
++//BIF_CFG_DEV0_SWDS0_LOCAL_PARITY_MISMATCH_STATUS_16GT
++#define BIF_CFG_DEV0_SWDS0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_SWDS0_RTM1_PARITY_MISMATCH_STATUS_16GT
++#define BIF_CFG_DEV0_SWDS0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_SWDS0_RTM2_PARITY_MISMATCH_STATUS_16GT
++#define BIF_CFG_DEV0_SWDS0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_SWDS0_LANE_0_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_1_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_2_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_3_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_4_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_5_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_6_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_7_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_8_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_9_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_10_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_11_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_12_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_13_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_14_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_LANE_15_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_SWDS0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_SWDS0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_SWDS0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST
++#define BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_SWDS0_MARGINING_PORT_CAP
++#define BIF_CFG_DEV0_SWDS0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE_MASK 0x0001L
++//BIF_CFG_DEV0_SWDS0_MARGINING_PORT_STATUS
++#define BIF_CFG_DEV0_SWDS0_MARGINING_PORT_STATUS__MARGINING_READY__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY__SHIFT 0x1
++#define BIF_CFG_DEV0_SWDS0_MARGINING_PORT_STATUS__MARGINING_READY_MASK 0x0001L
++#define BIF_CFG_DEV0_SWDS0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY_MASK 0x0002L
++//BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++
++
++// addressBlock: nbio_nbif0_rcc_strap_BIFDEC1
++//RCC_STRAP0_RCC_DEV0_EPF0_STRAP0
++#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0__SHIFT 0x0
++#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0__SHIFT 0x10
++#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_MINOR_REV_ID_DEV0_F0__SHIFT 0x14
++#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT 0x18
++#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_FUNC_EN_DEV0_F0__SHIFT 0x1c
++#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F0__SHIFT 0x1d
++#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0__SHIFT 0x1e
++#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0__SHIFT 0x1f
++#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0_MASK 0x0000FFFFL
++#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0_MASK 0x000F0000L
++#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_MINOR_REV_ID_DEV0_F0_MASK 0x00F00000L
++#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK 0x0F000000L
++#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_FUNC_EN_DEV0_F0_MASK 0x10000000L
++#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F0_MASK 0x20000000L
++#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0_MASK 0x40000000L
++#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0_MASK 0x80000000L
++
++
++// addressBlock: nbio_nbif0_rcc_ep_dev0_BIFDEC1
++//RCC_EP_DEV0_0_EP_PCIE_SCRATCH
++#define RCC_EP_DEV0_0_EP_PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0
++#define RCC_EP_DEV0_0_EP_PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL
++//RCC_EP_DEV0_0_EP_PCIE_CNTL
++#define RCC_EP_DEV0_0_EP_PCIE_CNTL__UR_ERR_REPORT_DIS__SHIFT 0x7
++#define RCC_EP_DEV0_0_EP_PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS__SHIFT 0x8
++#define RCC_EP_DEV0_0_EP_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e
++#define RCC_EP_DEV0_0_EP_PCIE_CNTL__UR_ERR_REPORT_DIS_MASK 0x00000080L
++#define RCC_EP_DEV0_0_EP_PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS_MASK 0x00000100L
++#define RCC_EP_DEV0_0_EP_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L
++//RCC_EP_DEV0_0_EP_PCIE_INT_CNTL
++#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__CORR_ERR_INT_EN__SHIFT 0x0
++#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN__SHIFT 0x1
++#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__FATAL_ERR_INT_EN__SHIFT 0x2
++#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__USR_DETECTED_INT_EN__SHIFT 0x3
++#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__MISC_ERR_INT_EN__SHIFT 0x4
++#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN__SHIFT 0x6
++#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__CORR_ERR_INT_EN_MASK 0x00000001L
++#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN_MASK 0x00000002L
++#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__FATAL_ERR_INT_EN_MASK 0x00000004L
++#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__USR_DETECTED_INT_EN_MASK 0x00000008L
++#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__MISC_ERR_INT_EN_MASK 0x00000010L
++#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN_MASK 0x00000040L
++//RCC_EP_DEV0_0_EP_PCIE_INT_STATUS
++#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__CORR_ERR_INT_STATUS__SHIFT 0x0
++#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS__SHIFT 0x1
++#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__FATAL_ERR_INT_STATUS__SHIFT 0x2
++#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__USR_DETECTED_INT_STATUS__SHIFT 0x3
++#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__MISC_ERR_INT_STATUS__SHIFT 0x4
++#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS__SHIFT 0x6
++#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__CORR_ERR_INT_STATUS_MASK 0x00000001L
++#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS_MASK 0x00000002L
++#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__FATAL_ERR_INT_STATUS_MASK 0x00000004L
++#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__USR_DETECTED_INT_STATUS_MASK 0x00000008L
++#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__MISC_ERR_INT_STATUS_MASK 0x00000010L
++#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS_MASK 0x00000040L
++//RCC_EP_DEV0_0_EP_PCIE_RX_CNTL2
++#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR__SHIFT 0x0
++#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR_MASK 0x00000001L
++//RCC_EP_DEV0_0_EP_PCIE_BUS_CNTL
++#define RCC_EP_DEV0_0_EP_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7
++#define RCC_EP_DEV0_0_EP_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L
++//RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL
++#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0
++#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1
++#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2
++#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG__SHIFT 0x3
++#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L
++#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L
++#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L
++#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG_MASK 0x00000008L
++//RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL
++#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_SHORT_VALUE__SHIFT 0x0
++#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_LONG_VALUE__SHIFT 0x3
++#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_REQUIREMENT__SHIFT 0x6
++#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_SHORT_VALUE__SHIFT 0x7
++#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_LONG_VALUE__SHIFT 0xa
++#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_REQUIREMENT__SHIFT 0xd
++#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0__SHIFT 0xe
++#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN__SHIFT 0xf
++#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__TX_CHK_FC_FOR_L1__SHIFT 0x10
++#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_DSTATE_USING_WDATA_EN__SHIFT 0x11
++#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_SHORT_VALUE_MASK 0x00000007L
++#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_LONG_VALUE_MASK 0x00000038L
++#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_REQUIREMENT_MASK 0x00000040L
++#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_SHORT_VALUE_MASK 0x00000380L
++#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_LONG_VALUE_MASK 0x00001C00L
++#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_REQUIREMENT_MASK 0x00002000L
++#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK 0x00004000L
++#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN_MASK 0x00008000L
++#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__TX_CHK_FC_FOR_L1_MASK 0x00010000L
++#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_DSTATE_USING_WDATA_EN_MASK 0x00020000L
++//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0
++#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1
++#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2
++#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3
++#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4
++#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5
++#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6
++#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7
++#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP
++#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8
++#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc
++#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10
++#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18
++#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L
++#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L
++#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L
++#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L
++//RCC_EP_DEV0_0_EP_PCIE_F0_DPA_LATENCY_INDICATOR
++#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0
++#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0xFFL
++//RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CNTL
++#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CNTL__SUBSTATE_STATUS__SHIFT 0x0
++#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CNTL__DPA_COMPLIANCE_MODE__SHIFT 0x8
++#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CNTL__SUBSTATE_STATUS_MASK 0x001FL
++#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CNTL__DPA_COMPLIANCE_MODE_MASK 0x0100L
++//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0
++#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1
++#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2
++#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3
++#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4
++#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5
++#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6
++#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7
++#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//RCC_EP_DEV0_0_EP_PCIE_PME_CONTROL
++#define RCC_EP_DEV0_0_EP_PCIE_PME_CONTROL__PME_SERVICE_TIMER__SHIFT 0x0
++#define RCC_EP_DEV0_0_EP_PCIE_PME_CONTROL__PME_SERVICE_TIMER_MASK 0x1FL
++//RCC_EP_DEV0_0_EP_PCIEP_RESERVED
++#define RCC_EP_DEV0_0_EP_PCIEP_RESERVED__PCIEP_RESERVED__SHIFT 0x0
++#define RCC_EP_DEV0_0_EP_PCIEP_RESERVED__PCIEP_RESERVED_MASK 0xFFFFFFFFL
++//RCC_EP_DEV0_0_EP_PCIE_TX_CNTL
++#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_SNR_OVERRIDE__SHIFT 0xa
++#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_RO_OVERRIDE__SHIFT 0xc
++#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_F0_TPH_DIS__SHIFT 0x18
++#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_F1_TPH_DIS__SHIFT 0x19
++#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_F2_TPH_DIS__SHIFT 0x1a
++#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_SNR_OVERRIDE_MASK 0x00000C00L
++#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_RO_OVERRIDE_MASK 0x00003000L
++#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_F0_TPH_DIS_MASK 0x01000000L
++#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_F1_TPH_DIS_MASK 0x02000000L
++#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_F2_TPH_DIS_MASK 0x04000000L
++//RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID
++#define RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION__SHIFT 0x0
++#define RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE__SHIFT 0x3
++#define RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS__SHIFT 0x8
++#define RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION_MASK 0x00000007L
++#define RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE_MASK 0x000000F8L
++#define RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS_MASK 0x0000FF00L
++//RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL
++#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0
++#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT__SHIFT 0x8
++#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11
++#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL__SHIFT 0x12
++#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED__SHIFT 0x18
++#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED__SHIFT 0x19
++#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED__SHIFT 0x1a
++#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F3_TIMER_EXPIRED__SHIFT 0x1b
++#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F4_TIMER_EXPIRED__SHIFT 0x1c
++#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F5_TIMER_EXPIRED__SHIFT 0x1d
++#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F6_TIMER_EXPIRED__SHIFT 0x1e
++#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F7_TIMER_EXPIRED__SHIFT 0x1f
++#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L
++#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT_MASK 0x00000700L
++#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L
++#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL_MASK 0x00040000L
++#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED_MASK 0x01000000L
++#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED_MASK 0x02000000L
++#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED_MASK 0x04000000L
++#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F3_TIMER_EXPIRED_MASK 0x08000000L
++#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F4_TIMER_EXPIRED_MASK 0x10000000L
++#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F5_TIMER_EXPIRED_MASK 0x20000000L
++#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F6_TIMER_EXPIRED_MASK 0x40000000L
++#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F7_TIMER_EXPIRED_MASK 0x80000000L
++//RCC_EP_DEV0_0_EP_PCIE_RX_CNTL
++#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8
++#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_TC_ERR__SHIFT 0x9
++#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14
++#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR__SHIFT 0x15
++#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR__SHIFT 0x16
++#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR__SHIFT 0x18
++#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR__SHIFT 0x19
++#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_TPH_DIS__SHIFT 0x1a
++#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L
++#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_MASK 0x00000200L
++#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L
++#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_MASK 0x00200000L
++#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR_MASK 0x00400000L
++#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR_MASK 0x01000000L
++#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR_MASK 0x02000000L
++#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_TPH_DIS_MASK 0x04000000L
++//RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL
++#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x0
++#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x1
++#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP__SHIFT 0x2
++#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L
++#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L
++#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP_MASK 0x00000004L
++
++
++// addressBlock: nbio_nbif0_rcc_dwn_dev0_BIFDEC1
++//RCC_DWN_DEV0_0_DN_PCIE_RESERVED
++#define RCC_DWN_DEV0_0_DN_PCIE_RESERVED__PCIE_RESERVED__SHIFT 0x0
++#define RCC_DWN_DEV0_0_DN_PCIE_RESERVED__PCIE_RESERVED_MASK 0xFFFFFFFFL
++//RCC_DWN_DEV0_0_DN_PCIE_SCRATCH
++#define RCC_DWN_DEV0_0_DN_PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0
++#define RCC_DWN_DEV0_0_DN_PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL
++//RCC_DWN_DEV0_0_DN_PCIE_CNTL
++#define RCC_DWN_DEV0_0_DN_PCIE_CNTL__HWINIT_WR_LOCK__SHIFT 0x0
++#define RCC_DWN_DEV0_0_DN_PCIE_CNTL__UR_ERR_REPORT_DIS_DN__SHIFT 0x7
++#define RCC_DWN_DEV0_0_DN_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e
++#define RCC_DWN_DEV0_0_DN_PCIE_CNTL__HWINIT_WR_LOCK_MASK 0x00000001L
++#define RCC_DWN_DEV0_0_DN_PCIE_CNTL__UR_ERR_REPORT_DIS_DN_MASK 0x00000080L
++#define RCC_DWN_DEV0_0_DN_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L
++//RCC_DWN_DEV0_0_DN_PCIE_CONFIG_CNTL
++#define RCC_DWN_DEV0_0_DN_PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE__SHIFT 0x19
++#define RCC_DWN_DEV0_0_DN_PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE_MASK 0x06000000L
++//RCC_DWN_DEV0_0_DN_PCIE_RX_CNTL2
++#define RCC_DWN_DEV0_0_DN_PCIE_RX_CNTL2__FLR_EXTEND_MODE__SHIFT 0x1c
++#define RCC_DWN_DEV0_0_DN_PCIE_RX_CNTL2__FLR_EXTEND_MODE_MASK 0x70000000L
++//RCC_DWN_DEV0_0_DN_PCIE_BUS_CNTL
++#define RCC_DWN_DEV0_0_DN_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7
++#define RCC_DWN_DEV0_0_DN_PCIE_BUS_CNTL__AER_CPL_TIMEOUT_RO_DIS_SWDN__SHIFT 0x8
++#define RCC_DWN_DEV0_0_DN_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L
++#define RCC_DWN_DEV0_0_DN_PCIE_BUS_CNTL__AER_CPL_TIMEOUT_RO_DIS_SWDN_MASK 0x00000100L
++//RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL
++#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0
++#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1
++#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2
++#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG__SHIFT 0x3
++#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L
++#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L
++#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L
++#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG_MASK 0x00000008L
++
++
++// addressBlock: nbio_nbif0_rcc_dwnp_dev0_BIFDEC1
++//RCC_DWNP_DEV0_0_PCIE_ERR_CNTL
++#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0
++#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11
++#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L
++#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L
++//RCC_DWNP_DEV0_0_PCIE_RX_CNTL
++#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8
++#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_DN__SHIFT 0x9
++#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14
++#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_DN__SHIFT 0x15
++#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS__SHIFT 0x1b
++#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L
++#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_DN_MASK 0x00000200L
++#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L
++#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_DN_MASK 0x00200000L
++#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS_MASK 0x08000000L
++//RCC_DWNP_DEV0_0_PCIE_LC_SPEED_CNTL
++//RCC_DWNP_DEV0_0_PCIE_LC_CNTL2
++#define RCC_DWNP_DEV0_0_PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS__SHIFT 0x1b
++#define RCC_DWNP_DEV0_0_PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS_MASK 0x08000000L
++//RCC_DWNP_DEV0_0_LTR_MSG_INFO_FROM_EP
++#define RCC_DWNP_DEV0_0_LTR_MSG_INFO_FROM_EP__LTR_MSG_INFO_FROM_EP__SHIFT 0x0
++#define RCC_DWNP_DEV0_0_LTR_MSG_INFO_FROM_EP__LTR_MSG_INFO_FROM_EP_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_BIFDEC1
++
++
++// addressBlock: nbio_nbif0_rcc_shadow_reg_shadowdec
++//SHADOW_COMMAND
++#define SHADOW_COMMAND__IOEN_UP__SHIFT 0x0
++#define SHADOW_COMMAND__MEMEN_UP__SHIFT 0x1
++#define SHADOW_COMMAND__IOEN_UP_MASK 0x0001L
++#define SHADOW_COMMAND__MEMEN_UP_MASK 0x0002L
++//SHADOW_BASE_ADDR_1
++#define SHADOW_BASE_ADDR_1__BAR1_UP__SHIFT 0x0
++#define SHADOW_BASE_ADDR_1__BAR1_UP_MASK 0xFFFFFFFFL
++//SHADOW_BASE_ADDR_2
++#define SHADOW_BASE_ADDR_2__BAR2_UP__SHIFT 0x0
++#define SHADOW_BASE_ADDR_2__BAR2_UP_MASK 0xFFFFFFFFL
++//SHADOW_SUB_BUS_NUMBER_LATENCY
++#define SHADOW_SUB_BUS_NUMBER_LATENCY__SECONDARY_BUS_UP__SHIFT 0x8
++#define SHADOW_SUB_BUS_NUMBER_LATENCY__SUB_BUS_NUM_UP__SHIFT 0x10
++#define SHADOW_SUB_BUS_NUMBER_LATENCY__SECONDARY_BUS_UP_MASK 0x0000FF00L
++#define SHADOW_SUB_BUS_NUMBER_LATENCY__SUB_BUS_NUM_UP_MASK 0x00FF0000L
++//SHADOW_IO_BASE_LIMIT
++#define SHADOW_IO_BASE_LIMIT__IO_BASE_UP__SHIFT 0x4
++#define SHADOW_IO_BASE_LIMIT__IO_LIMIT_UP__SHIFT 0xc
++#define SHADOW_IO_BASE_LIMIT__IO_BASE_UP_MASK 0x00F0L
++#define SHADOW_IO_BASE_LIMIT__IO_LIMIT_UP_MASK 0xF000L
++//SHADOW_MEM_BASE_LIMIT
++#define SHADOW_MEM_BASE_LIMIT__MEM_BASE_TYPE__SHIFT 0x0
++#define SHADOW_MEM_BASE_LIMIT__MEM_BASE_31_20_UP__SHIFT 0x4
++#define SHADOW_MEM_BASE_LIMIT__MEM_LIMIT_TYPE__SHIFT 0x10
++#define SHADOW_MEM_BASE_LIMIT__MEM_LIMIT_31_20_UP__SHIFT 0x14
++#define SHADOW_MEM_BASE_LIMIT__MEM_BASE_TYPE_MASK 0x0000000FL
++#define SHADOW_MEM_BASE_LIMIT__MEM_BASE_31_20_UP_MASK 0x0000FFF0L
++#define SHADOW_MEM_BASE_LIMIT__MEM_LIMIT_TYPE_MASK 0x000F0000L
++#define SHADOW_MEM_BASE_LIMIT__MEM_LIMIT_31_20_UP_MASK 0xFFF00000L
++//SHADOW_PREF_BASE_LIMIT
++#define SHADOW_PREF_BASE_LIMIT__PREF_MEM_BASE_TYPE__SHIFT 0x0
++#define SHADOW_PREF_BASE_LIMIT__PREF_MEM_BASE_31_20_UP__SHIFT 0x4
++#define SHADOW_PREF_BASE_LIMIT__PREF_MEM_LIMIT_TYPE__SHIFT 0x10
++#define SHADOW_PREF_BASE_LIMIT__PREF_MEM_LIMIT_31_20_UP__SHIFT 0x14
++#define SHADOW_PREF_BASE_LIMIT__PREF_MEM_BASE_TYPE_MASK 0x0000000FL
++#define SHADOW_PREF_BASE_LIMIT__PREF_MEM_BASE_31_20_UP_MASK 0x0000FFF0L
++#define SHADOW_PREF_BASE_LIMIT__PREF_MEM_LIMIT_TYPE_MASK 0x000F0000L
++#define SHADOW_PREF_BASE_LIMIT__PREF_MEM_LIMIT_31_20_UP_MASK 0xFFF00000L
++//SHADOW_PREF_BASE_UPPER
++#define SHADOW_PREF_BASE_UPPER__PREF_BASE_UPPER_UP__SHIFT 0x0
++#define SHADOW_PREF_BASE_UPPER__PREF_BASE_UPPER_UP_MASK 0xFFFFFFFFL
++//SHADOW_PREF_LIMIT_UPPER
++#define SHADOW_PREF_LIMIT_UPPER__PREF_LIMIT_UPPER_UP__SHIFT 0x0
++#define SHADOW_PREF_LIMIT_UPPER__PREF_LIMIT_UPPER_UP_MASK 0xFFFFFFFFL
++//SHADOW_IO_BASE_LIMIT_HI
++#define SHADOW_IO_BASE_LIMIT_HI__IO_BASE_31_16_UP__SHIFT 0x0
++#define SHADOW_IO_BASE_LIMIT_HI__IO_LIMIT_31_16_UP__SHIFT 0x10
++#define SHADOW_IO_BASE_LIMIT_HI__IO_BASE_31_16_UP_MASK 0x0000FFFFL
++#define SHADOW_IO_BASE_LIMIT_HI__IO_LIMIT_31_16_UP_MASK 0xFFFF0000L
++//SHADOW_IRQ_BRIDGE_CNTL
++#define SHADOW_IRQ_BRIDGE_CNTL__ISA_EN_UP__SHIFT 0x2
++#define SHADOW_IRQ_BRIDGE_CNTL__VGA_EN_UP__SHIFT 0x3
++#define SHADOW_IRQ_BRIDGE_CNTL__VGA_DEC_UP__SHIFT 0x4
++#define SHADOW_IRQ_BRIDGE_CNTL__SECONDARY_BUS_RESET_UP__SHIFT 0x6
++#define SHADOW_IRQ_BRIDGE_CNTL__ISA_EN_UP_MASK 0x0004L
++#define SHADOW_IRQ_BRIDGE_CNTL__VGA_EN_UP_MASK 0x0008L
++#define SHADOW_IRQ_BRIDGE_CNTL__VGA_DEC_UP_MASK 0x0010L
++#define SHADOW_IRQ_BRIDGE_CNTL__SECONDARY_BUS_RESET_UP_MASK 0x0040L
++//SUC_INDEX
++#define SUC_INDEX__SUC_INDEX__SHIFT 0x0
++#define SUC_INDEX__SUC_INDEX_MASK 0xFFFFFFFFL
++//SUC_DATA
++#define SUC_DATA__SUC_DATA__SHIFT 0x0
++#define SUC_DATA__SUC_DATA_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_RCCPORTDEC
++
++
++// addressBlock: nbio_nbif0_rcc_ep_dev0_RCCPORTDEC
++//RCC_EP_DEV0_1_EP_PCIE_SCRATCH
++#define RCC_EP_DEV0_1_EP_PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0
++#define RCC_EP_DEV0_1_EP_PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL
++//RCC_EP_DEV0_1_EP_PCIE_CNTL
++#define RCC_EP_DEV0_1_EP_PCIE_CNTL__UR_ERR_REPORT_DIS__SHIFT 0x7
++#define RCC_EP_DEV0_1_EP_PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS__SHIFT 0x8
++#define RCC_EP_DEV0_1_EP_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e
++#define RCC_EP_DEV0_1_EP_PCIE_CNTL__UR_ERR_REPORT_DIS_MASK 0x00000080L
++#define RCC_EP_DEV0_1_EP_PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS_MASK 0x00000100L
++#define RCC_EP_DEV0_1_EP_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L
++//RCC_EP_DEV0_1_EP_PCIE_INT_CNTL
++#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__CORR_ERR_INT_EN__SHIFT 0x0
++#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN__SHIFT 0x1
++#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__FATAL_ERR_INT_EN__SHIFT 0x2
++#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__USR_DETECTED_INT_EN__SHIFT 0x3
++#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__MISC_ERR_INT_EN__SHIFT 0x4
++#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN__SHIFT 0x6
++#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__CORR_ERR_INT_EN_MASK 0x00000001L
++#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN_MASK 0x00000002L
++#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__FATAL_ERR_INT_EN_MASK 0x00000004L
++#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__USR_DETECTED_INT_EN_MASK 0x00000008L
++#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__MISC_ERR_INT_EN_MASK 0x00000010L
++#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN_MASK 0x00000040L
++//RCC_EP_DEV0_1_EP_PCIE_INT_STATUS
++#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__CORR_ERR_INT_STATUS__SHIFT 0x0
++#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS__SHIFT 0x1
++#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__FATAL_ERR_INT_STATUS__SHIFT 0x2
++#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__USR_DETECTED_INT_STATUS__SHIFT 0x3
++#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__MISC_ERR_INT_STATUS__SHIFT 0x4
++#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS__SHIFT 0x6
++#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__CORR_ERR_INT_STATUS_MASK 0x00000001L
++#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS_MASK 0x00000002L
++#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__FATAL_ERR_INT_STATUS_MASK 0x00000004L
++#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__USR_DETECTED_INT_STATUS_MASK 0x00000008L
++#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__MISC_ERR_INT_STATUS_MASK 0x00000010L
++#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS_MASK 0x00000040L
++//RCC_EP_DEV0_1_EP_PCIE_RX_CNTL2
++#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR__SHIFT 0x0
++#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR_MASK 0x00000001L
++//RCC_EP_DEV0_1_EP_PCIE_BUS_CNTL
++#define RCC_EP_DEV0_1_EP_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7
++#define RCC_EP_DEV0_1_EP_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L
++//RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL
++#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0
++#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1
++#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2
++#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG__SHIFT 0x3
++#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L
++#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L
++#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L
++#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG_MASK 0x00000008L
++//RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL
++#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_SHORT_VALUE__SHIFT 0x0
++#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_LONG_VALUE__SHIFT 0x3
++#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_REQUIREMENT__SHIFT 0x6
++#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_SHORT_VALUE__SHIFT 0x7
++#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_LONG_VALUE__SHIFT 0xa
++#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_REQUIREMENT__SHIFT 0xd
++#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0__SHIFT 0xe
++#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN__SHIFT 0xf
++#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__TX_CHK_FC_FOR_L1__SHIFT 0x10
++#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_DSTATE_USING_WDATA_EN__SHIFT 0x11
++#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_SHORT_VALUE_MASK 0x00000007L
++#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_LONG_VALUE_MASK 0x00000038L
++#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_REQUIREMENT_MASK 0x00000040L
++#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_SHORT_VALUE_MASK 0x00000380L
++#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_LONG_VALUE_MASK 0x00001C00L
++#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_REQUIREMENT_MASK 0x00002000L
++#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK 0x00004000L
++#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN_MASK 0x00008000L
++#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__TX_CHK_FC_FOR_L1_MASK 0x00010000L
++#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_DSTATE_USING_WDATA_EN_MASK 0x00020000L
++//RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP
++#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8
++#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc
++#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10
++#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18
++#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L
++#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L
++#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L
++#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L
++//RCC_EP_DEV0_1_EP_PCIE_F0_DPA_LATENCY_INDICATOR
++#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0
++#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0xFFL
++//RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CNTL
++#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CNTL__SUBSTATE_STATUS__SHIFT 0x0
++#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CNTL__DPA_COMPLIANCE_MODE__SHIFT 0x8
++#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CNTL__SUBSTATE_STATUS_MASK 0x001FL
++#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CNTL__DPA_COMPLIANCE_MODE_MASK 0x0100L
++//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0
++#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1
++#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2
++#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3
++#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4
++#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5
++#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6
++#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7
++#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//RCC_EP_DEV0_1_EP_PCIE_PME_CONTROL
++#define RCC_EP_DEV0_1_EP_PCIE_PME_CONTROL__PME_SERVICE_TIMER__SHIFT 0x0
++#define RCC_EP_DEV0_1_EP_PCIE_PME_CONTROL__PME_SERVICE_TIMER_MASK 0x1FL
++//RCC_EP_DEV0_1_EP_PCIEP_RESERVED
++#define RCC_EP_DEV0_1_EP_PCIEP_RESERVED__PCIEP_RESERVED__SHIFT 0x0
++#define RCC_EP_DEV0_1_EP_PCIEP_RESERVED__PCIEP_RESERVED_MASK 0xFFFFFFFFL
++//RCC_EP_DEV0_1_EP_PCIE_TX_CNTL
++#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_SNR_OVERRIDE__SHIFT 0xa
++#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_RO_OVERRIDE__SHIFT 0xc
++#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_F0_TPH_DIS__SHIFT 0x18
++#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_F1_TPH_DIS__SHIFT 0x19
++#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_F2_TPH_DIS__SHIFT 0x1a
++#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_SNR_OVERRIDE_MASK 0x00000C00L
++#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_RO_OVERRIDE_MASK 0x00003000L
++#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_F0_TPH_DIS_MASK 0x01000000L
++#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_F1_TPH_DIS_MASK 0x02000000L
++#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_F2_TPH_DIS_MASK 0x04000000L
++//RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID
++#define RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION__SHIFT 0x0
++#define RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE__SHIFT 0x3
++#define RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS__SHIFT 0x8
++#define RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION_MASK 0x00000007L
++#define RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE_MASK 0x000000F8L
++#define RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS_MASK 0x0000FF00L
++//RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL
++#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0
++#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT__SHIFT 0x8
++#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11
++#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL__SHIFT 0x12
++#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED__SHIFT 0x18
++#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED__SHIFT 0x19
++#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED__SHIFT 0x1a
++#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F3_TIMER_EXPIRED__SHIFT 0x1b
++#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F4_TIMER_EXPIRED__SHIFT 0x1c
++#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F5_TIMER_EXPIRED__SHIFT 0x1d
++#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F6_TIMER_EXPIRED__SHIFT 0x1e
++#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F7_TIMER_EXPIRED__SHIFT 0x1f
++#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L
++#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT_MASK 0x00000700L
++#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L
++#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL_MASK 0x00040000L
++#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED_MASK 0x01000000L
++#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED_MASK 0x02000000L
++#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED_MASK 0x04000000L
++#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F3_TIMER_EXPIRED_MASK 0x08000000L
++#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F4_TIMER_EXPIRED_MASK 0x10000000L
++#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F5_TIMER_EXPIRED_MASK 0x20000000L
++#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F6_TIMER_EXPIRED_MASK 0x40000000L
++#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F7_TIMER_EXPIRED_MASK 0x80000000L
++//RCC_EP_DEV0_1_EP_PCIE_RX_CNTL
++#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8
++#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_TC_ERR__SHIFT 0x9
++#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14
++#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR__SHIFT 0x15
++#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR__SHIFT 0x16
++#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR__SHIFT 0x18
++#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR__SHIFT 0x19
++#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_TPH_DIS__SHIFT 0x1a
++#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L
++#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_MASK 0x00000200L
++#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L
++#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_MASK 0x00200000L
++#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR_MASK 0x00400000L
++#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR_MASK 0x01000000L
++#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR_MASK 0x02000000L
++#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_TPH_DIS_MASK 0x04000000L
++//RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL
++#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x0
++#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x1
++#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP__SHIFT 0x2
++#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L
++#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L
++#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP_MASK 0x00000004L
++
++
++// addressBlock: nbio_nbif0_rcc_dwn_dev0_RCCPORTDEC
++//RCC_DWN_DEV0_1_DN_PCIE_RESERVED
++#define RCC_DWN_DEV0_1_DN_PCIE_RESERVED__PCIE_RESERVED__SHIFT 0x0
++#define RCC_DWN_DEV0_1_DN_PCIE_RESERVED__PCIE_RESERVED_MASK 0xFFFFFFFFL
++//RCC_DWN_DEV0_1_DN_PCIE_SCRATCH
++#define RCC_DWN_DEV0_1_DN_PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0
++#define RCC_DWN_DEV0_1_DN_PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL
++//RCC_DWN_DEV0_1_DN_PCIE_CNTL
++#define RCC_DWN_DEV0_1_DN_PCIE_CNTL__HWINIT_WR_LOCK__SHIFT 0x0
++#define RCC_DWN_DEV0_1_DN_PCIE_CNTL__UR_ERR_REPORT_DIS_DN__SHIFT 0x7
++#define RCC_DWN_DEV0_1_DN_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e
++#define RCC_DWN_DEV0_1_DN_PCIE_CNTL__HWINIT_WR_LOCK_MASK 0x00000001L
++#define RCC_DWN_DEV0_1_DN_PCIE_CNTL__UR_ERR_REPORT_DIS_DN_MASK 0x00000080L
++#define RCC_DWN_DEV0_1_DN_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L
++//RCC_DWN_DEV0_1_DN_PCIE_CONFIG_CNTL
++#define RCC_DWN_DEV0_1_DN_PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE__SHIFT 0x19
++#define RCC_DWN_DEV0_1_DN_PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE_MASK 0x06000000L
++//RCC_DWN_DEV0_1_DN_PCIE_RX_CNTL2
++#define RCC_DWN_DEV0_1_DN_PCIE_RX_CNTL2__FLR_EXTEND_MODE__SHIFT 0x1c
++#define RCC_DWN_DEV0_1_DN_PCIE_RX_CNTL2__FLR_EXTEND_MODE_MASK 0x70000000L
++//RCC_DWN_DEV0_1_DN_PCIE_BUS_CNTL
++#define RCC_DWN_DEV0_1_DN_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7
++#define RCC_DWN_DEV0_1_DN_PCIE_BUS_CNTL__AER_CPL_TIMEOUT_RO_DIS_SWDN__SHIFT 0x8
++#define RCC_DWN_DEV0_1_DN_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L
++#define RCC_DWN_DEV0_1_DN_PCIE_BUS_CNTL__AER_CPL_TIMEOUT_RO_DIS_SWDN_MASK 0x00000100L
++//RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL
++#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0
++#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1
++#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2
++#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG__SHIFT 0x3
++#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L
++#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L
++#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L
++#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG_MASK 0x00000008L
++
++
++// addressBlock: nbio_nbif0_rcc_dwnp_dev0_RCCPORTDEC
++//RCC_DWNP_DEV0_1_PCIE_ERR_CNTL
++#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0
++#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11
++#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L
++#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L
++//RCC_DWNP_DEV0_1_PCIE_RX_CNTL
++#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8
++#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_DN__SHIFT 0x9
++#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14
++#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_DN__SHIFT 0x15
++#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS__SHIFT 0x1b
++#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L
++#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_DN_MASK 0x00000200L
++#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L
++#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_DN_MASK 0x00200000L
++#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS_MASK 0x08000000L
++//RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL
++#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x0
++#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x1
++#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP__SHIFT 0x2
++#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L
++#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L
++#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP_MASK 0x00000004L
++//RCC_DWNP_DEV0_1_PCIE_LC_CNTL2
++#define RCC_DWNP_DEV0_1_PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS__SHIFT 0x1b
++#define RCC_DWNP_DEV0_1_PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS_MASK 0x08000000L
++//RCC_DWNP_DEV0_1_LTR_MSG_INFO_FROM_EP
++#define RCC_DWNP_DEV0_1_LTR_MSG_INFO_FROM_EP__LTR_MSG_INFO_FROM_EP__SHIFT 0x0
++#define RCC_DWNP_DEV0_1_LTR_MSG_INFO_FROM_EP__LTR_MSG_INFO_FROM_EP_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_rcc_strap_rcc_strap_internal
++//RCC_STRAP1_RCC_DEV0_EPF0_STRAP0
++#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0__SHIFT 0x0
++#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0__SHIFT 0x10
++#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_MINOR_REV_ID_DEV0_F0__SHIFT 0x14
++#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT 0x18
++#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_FUNC_EN_DEV0_F0__SHIFT 0x1c
++#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F0__SHIFT 0x1d
++#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0__SHIFT 0x1e
++#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0__SHIFT 0x1f
++#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0_MASK 0x0000FFFFL
++#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0_MASK 0x000F0000L
++#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_MINOR_REV_ID_DEV0_F0_MASK 0x00F00000L
++#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK 0x0F000000L
++#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_FUNC_EN_DEV0_F0_MASK 0x10000000L
++#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F0_MASK 0x20000000L
++#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0_MASK 0x40000000L
++#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0_MASK 0x80000000L
++
++
++// addressBlock: nbio_nbif0_bif_misc_bif_misc_regblk
++//MISC_SCRATCH
++#define MISC_SCRATCH__MISC_SCRATCH0__SHIFT 0x0
++#define MISC_SCRATCH__MISC_SCRATCH0_MASK 0xFFFFFFFFL
++//INTR_LINE_POLARITY
++#define INTR_LINE_POLARITY__INTR_LINE_POLARITY_DEV0__SHIFT 0x0
++#define INTR_LINE_POLARITY__INTR_LINE_POLARITY_DEV0_MASK 0x000000FFL
++//INTR_LINE_ENABLE
++#define INTR_LINE_ENABLE__INTR_LINE_ENABLE_DEV0__SHIFT 0x0
++#define INTR_LINE_ENABLE__INTR_LINE_ENABLE_DEV0_MASK 0x000000FFL
++//OUTSTANDING_VC_ALLOC
++#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC0_ALLOC__SHIFT 0x0
++#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC1_ALLOC__SHIFT 0x2
++#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC2_ALLOC__SHIFT 0x4
++#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC3_ALLOC__SHIFT 0x6
++#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC4_ALLOC__SHIFT 0x8
++#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC5_ALLOC__SHIFT 0xa
++#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC6_ALLOC__SHIFT 0xc
++#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC7_ALLOC__SHIFT 0xe
++#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_THRD__SHIFT 0x10
++#define OUTSTANDING_VC_ALLOC__HST_OUTSTANDING_VC0_ALLOC__SHIFT 0x18
++#define OUTSTANDING_VC_ALLOC__HST_OUTSTANDING_VC1_ALLOC__SHIFT 0x1a
++#define OUTSTANDING_VC_ALLOC__HST_OUTSTANDING_THRD__SHIFT 0x1c
++#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC0_ALLOC_MASK 0x00000003L
++#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC1_ALLOC_MASK 0x0000000CL
++#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC2_ALLOC_MASK 0x00000030L
++#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC3_ALLOC_MASK 0x000000C0L
++#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC4_ALLOC_MASK 0x00000300L
++#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC5_ALLOC_MASK 0x00000C00L
++#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC6_ALLOC_MASK 0x00003000L
++#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC7_ALLOC_MASK 0x0000C000L
++#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_THRD_MASK 0x000F0000L
++#define OUTSTANDING_VC_ALLOC__HST_OUTSTANDING_VC0_ALLOC_MASK 0x03000000L
++#define OUTSTANDING_VC_ALLOC__HST_OUTSTANDING_VC1_ALLOC_MASK 0x0C000000L
++#define OUTSTANDING_VC_ALLOC__HST_OUTSTANDING_THRD_MASK 0xF0000000L
++//BIFC_MISC_CTRL0
++#define BIFC_MISC_CTRL0__VWIRE_TARG_UNITID_CHECK_EN__SHIFT 0x0
++#define BIFC_MISC_CTRL0__VWIRE_SRC_UNITID_CHECK_EN__SHIFT 0x1
++#define BIFC_MISC_CTRL0__DMA_VC4_NON_DVM_STS__SHIFT 0x4
++#define BIFC_MISC_CTRL0__DMA_CHAIN_BREAK_IN_RCMODE__SHIFT 0x8
++#define BIFC_MISC_CTRL0__HST_ARB_CHAIN_LOCK__SHIFT 0x9
++#define BIFC_MISC_CTRL0__GSI_SST_ARB_CHAIN_LOCK__SHIFT 0xa
++#define BIFC_MISC_CTRL0__GSI_RD_SPLIT_STALL_FLUSH_EN__SHIFT 0xb
++#define BIFC_MISC_CTRL0__GSI_RD_SPLIT_STALL_NPWR_DIS__SHIFT 0xc
++#define BIFC_MISC_CTRL0__GSI_SET_PRECEEDINGWR_DIS__SHIFT 0xd
++#define BIFC_MISC_CTRL0__DMA_ATOMIC_LENGTH_CHK_DIS__SHIFT 0x10
++#define BIFC_MISC_CTRL0__DMA_ATOMIC_FAILED_STS_SEL__SHIFT 0x11
++#define BIFC_MISC_CTRL0__DMA_FORCE_VF_AS_PF_SRIOIVEN_LOW__SHIFT 0x12
++#define BIFC_MISC_CTRL0__DMA_ADDR_KEEP_PH__SHIFT 0x13
++#define BIFC_MISC_CTRL0__RCC_GMI_TD_FORCE_ZERO__SHIFT 0x14
++#define BIFC_MISC_CTRL0__PCIE_CAPABILITY_PROT_DIS__SHIFT 0x18
++#define BIFC_MISC_CTRL0__VC7_DMA_IOCFG_DIS__SHIFT 0x19
++#define BIFC_MISC_CTRL0__DMA_2ND_REQ_DIS__SHIFT 0x1a
++#define BIFC_MISC_CTRL0__PORT_DSTATE_BYPASS_MODE__SHIFT 0x1b
++#define BIFC_MISC_CTRL0__PME_TURNOFF_MODE__SHIFT 0x1c
++#define BIFC_MISC_CTRL0__PCIESWUS_SELECTION__SHIFT 0x1f
++#define BIFC_MISC_CTRL0__VWIRE_TARG_UNITID_CHECK_EN_MASK 0x00000001L
++#define BIFC_MISC_CTRL0__VWIRE_SRC_UNITID_CHECK_EN_MASK 0x00000006L
++#define BIFC_MISC_CTRL0__DMA_VC4_NON_DVM_STS_MASK 0x000000F0L
++#define BIFC_MISC_CTRL0__DMA_CHAIN_BREAK_IN_RCMODE_MASK 0x00000100L
++#define BIFC_MISC_CTRL0__HST_ARB_CHAIN_LOCK_MASK 0x00000200L
++#define BIFC_MISC_CTRL0__GSI_SST_ARB_CHAIN_LOCK_MASK 0x00000400L
++#define BIFC_MISC_CTRL0__GSI_RD_SPLIT_STALL_FLUSH_EN_MASK 0x00000800L
++#define BIFC_MISC_CTRL0__GSI_RD_SPLIT_STALL_NPWR_DIS_MASK 0x00001000L
++#define BIFC_MISC_CTRL0__GSI_SET_PRECEEDINGWR_DIS_MASK 0x00002000L
++#define BIFC_MISC_CTRL0__DMA_ATOMIC_LENGTH_CHK_DIS_MASK 0x00010000L
++#define BIFC_MISC_CTRL0__DMA_ATOMIC_FAILED_STS_SEL_MASK 0x00020000L
++#define BIFC_MISC_CTRL0__DMA_FORCE_VF_AS_PF_SRIOIVEN_LOW_MASK 0x00040000L
++#define BIFC_MISC_CTRL0__DMA_ADDR_KEEP_PH_MASK 0x00080000L
++#define BIFC_MISC_CTRL0__RCC_GMI_TD_FORCE_ZERO_MASK 0x00100000L
++#define BIFC_MISC_CTRL0__PCIE_CAPABILITY_PROT_DIS_MASK 0x01000000L
++#define BIFC_MISC_CTRL0__VC7_DMA_IOCFG_DIS_MASK 0x02000000L
++#define BIFC_MISC_CTRL0__DMA_2ND_REQ_DIS_MASK 0x04000000L
++#define BIFC_MISC_CTRL0__PORT_DSTATE_BYPASS_MODE_MASK 0x08000000L
++#define BIFC_MISC_CTRL0__PME_TURNOFF_MODE_MASK 0x10000000L
++#define BIFC_MISC_CTRL0__PCIESWUS_SELECTION_MASK 0x80000000L
++//BIFC_MISC_CTRL1
++#define BIFC_MISC_CTRL1__THT_HST_CPLD_POISON_REPORT__SHIFT 0x0
++#define BIFC_MISC_CTRL1__DMA_REQ_POISON_REPORT__SHIFT 0x1
++#define BIFC_MISC_CTRL1__DMA_REQ_ACSVIO_REPORT__SHIFT 0x2
++#define BIFC_MISC_CTRL1__DMA_RSP_POISON_CPLD_REPORT__SHIFT 0x3
++#define BIFC_MISC_CTRL1__GSI_SMN_WORST_ERR_STSTUS__SHIFT 0x4
++#define BIFC_MISC_CTRL1__GSI_SDP_RDRSP_DATA_FORCE1_FOR_ERROR__SHIFT 0x5
++#define BIFC_MISC_CTRL1__GSI_RDWR_BALANCE_DIS__SHIFT 0x6
++#define BIFC_MISC_CTRL1__GMI_ATOMIC_POISON_DROP__SHIFT 0x7
++#define BIFC_MISC_CTRL1__HST_UNSUPPORT_SDPCMD_STS__SHIFT 0x8
++#define BIFC_MISC_CTRL1__HST_UNSUPPORT_SDPCMD_DATASTS__SHIFT 0xa
++#define BIFC_MISC_CTRL1__DROP_OTHER_HT_ADDR_REQ__SHIFT 0xc
++#define BIFC_MISC_CTRL1__DMAWRREQ_HSTRDRSP_ORDER_FORCE__SHIFT 0xd
++#define BIFC_MISC_CTRL1__DMAWRREQ_HSTRDRSP_ORDER_FORCE_VALUE__SHIFT 0xe
++#define BIFC_MISC_CTRL1__UPS_SDP_RDY_TIE1__SHIFT 0xf
++#define BIFC_MISC_CTRL1__GMI_RCC_DN_BME_DROP_DIS__SHIFT 0x10
++#define BIFC_MISC_CTRL1__GMI_RCC_EP_BME_DROP_DIS__SHIFT 0x11
++#define BIFC_MISC_CTRL1__GMI_BIH_DN_BME_DROP_DIS__SHIFT 0x12
++#define BIFC_MISC_CTRL1__GMI_BIH_EP_BME_DROP_DIS__SHIFT 0x13
++#define BIFC_MISC_CTRL1__GSI_SDP_RDRSP_DATA_FORCE0_FOR_ERROR__SHIFT 0x14
++#define BIFC_MISC_CTRL1__GSI_SMN_POSTWR_MULTI_EN__SHIFT 0x15
++#define BIFC_MISC_CTRL1__GMI_RDSIZED_REQATTR_MASK__SHIFT 0x18
++#define BIFC_MISC_CTRL1__GMI_RDSIZEDDW_REQATTR_MASK__SHIFT 0x19
++#define BIFC_MISC_CTRL1__GMI_WRSIZED_REQATTR_MASK__SHIFT 0x1a
++#define BIFC_MISC_CTRL1__GMI_WRSIZEDFL_REQATTR_MASK__SHIFT 0x1b
++#define BIFC_MISC_CTRL1__GMI_FORCE_NOT_SEND_NON_BASEVC_RSPCREDIT__SHIFT 0x1c
++#define BIFC_MISC_CTRL1__GMI_CPLBUF_EN__SHIFT 0x1d
++#define BIFC_MISC_CTRL1__GMI_MSG_BLOCKLVL_SEL__SHIFT 0x1e
++#define BIFC_MISC_CTRL1__THT_HST_CPLD_POISON_REPORT_MASK 0x00000001L
++#define BIFC_MISC_CTRL1__DMA_REQ_POISON_REPORT_MASK 0x00000002L
++#define BIFC_MISC_CTRL1__DMA_REQ_ACSVIO_REPORT_MASK 0x00000004L
++#define BIFC_MISC_CTRL1__DMA_RSP_POISON_CPLD_REPORT_MASK 0x00000008L
++#define BIFC_MISC_CTRL1__GSI_SMN_WORST_ERR_STSTUS_MASK 0x00000010L
++#define BIFC_MISC_CTRL1__GSI_SDP_RDRSP_DATA_FORCE1_FOR_ERROR_MASK 0x00000020L
++#define BIFC_MISC_CTRL1__GSI_RDWR_BALANCE_DIS_MASK 0x00000040L
++#define BIFC_MISC_CTRL1__GMI_ATOMIC_POISON_DROP_MASK 0x00000080L
++#define BIFC_MISC_CTRL1__HST_UNSUPPORT_SDPCMD_STS_MASK 0x00000300L
++#define BIFC_MISC_CTRL1__HST_UNSUPPORT_SDPCMD_DATASTS_MASK 0x00000C00L
++#define BIFC_MISC_CTRL1__DROP_OTHER_HT_ADDR_REQ_MASK 0x00001000L
++#define BIFC_MISC_CTRL1__DMAWRREQ_HSTRDRSP_ORDER_FORCE_MASK 0x00002000L
++#define BIFC_MISC_CTRL1__DMAWRREQ_HSTRDRSP_ORDER_FORCE_VALUE_MASK 0x00004000L
++#define BIFC_MISC_CTRL1__UPS_SDP_RDY_TIE1_MASK 0x00008000L
++#define BIFC_MISC_CTRL1__GMI_RCC_DN_BME_DROP_DIS_MASK 0x00010000L
++#define BIFC_MISC_CTRL1__GMI_RCC_EP_BME_DROP_DIS_MASK 0x00020000L
++#define BIFC_MISC_CTRL1__GMI_BIH_DN_BME_DROP_DIS_MASK 0x00040000L
++#define BIFC_MISC_CTRL1__GMI_BIH_EP_BME_DROP_DIS_MASK 0x00080000L
++#define BIFC_MISC_CTRL1__GSI_SDP_RDRSP_DATA_FORCE0_FOR_ERROR_MASK 0x00100000L
++#define BIFC_MISC_CTRL1__GSI_SMN_POSTWR_MULTI_EN_MASK 0x00200000L
++#define BIFC_MISC_CTRL1__GMI_RDSIZED_REQATTR_MASK_MASK 0x01000000L
++#define BIFC_MISC_CTRL1__GMI_RDSIZEDDW_REQATTR_MASK_MASK 0x02000000L
++#define BIFC_MISC_CTRL1__GMI_WRSIZED_REQATTR_MASK_MASK 0x04000000L
++#define BIFC_MISC_CTRL1__GMI_WRSIZEDFL_REQATTR_MASK_MASK 0x08000000L
++#define BIFC_MISC_CTRL1__GMI_FORCE_NOT_SEND_NON_BASEVC_RSPCREDIT_MASK 0x10000000L
++#define BIFC_MISC_CTRL1__GMI_CPLBUF_EN_MASK 0x20000000L
++#define BIFC_MISC_CTRL1__GMI_MSG_BLOCKLVL_SEL_MASK 0xC0000000L
++//BIFC_BME_ERR_LOG
++#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F0__SHIFT 0x0
++#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F1__SHIFT 0x1
++#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F2__SHIFT 0x2
++#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F3__SHIFT 0x3
++#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F4__SHIFT 0x4
++#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F5__SHIFT 0x5
++#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F6__SHIFT 0x6
++#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F7__SHIFT 0x7
++#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F0__SHIFT 0x10
++#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F1__SHIFT 0x11
++#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F2__SHIFT 0x12
++#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F3__SHIFT 0x13
++#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F4__SHIFT 0x14
++#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F5__SHIFT 0x15
++#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F6__SHIFT 0x16
++#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F7__SHIFT 0x17
++#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F0_MASK 0x00000001L
++#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F1_MASK 0x00000002L
++#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F2_MASK 0x00000004L
++#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F3_MASK 0x00000008L
++#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F4_MASK 0x00000010L
++#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F5_MASK 0x00000020L
++#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F6_MASK 0x00000040L
++#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F7_MASK 0x00000080L
++#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F0_MASK 0x00010000L
++#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F1_MASK 0x00020000L
++#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F2_MASK 0x00040000L
++#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F3_MASK 0x00080000L
++#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F4_MASK 0x00100000L
++#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F5_MASK 0x00200000L
++#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F6_MASK 0x00400000L
++#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F7_MASK 0x00800000L
++//BIFC_RCCBIH_BME_ERR_LOG0
++#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F0__SHIFT 0x0
++#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F1__SHIFT 0x1
++#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F2__SHIFT 0x2
++#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F3__SHIFT 0x3
++#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F4__SHIFT 0x4
++#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F5__SHIFT 0x5
++#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F6__SHIFT 0x6
++#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F7__SHIFT 0x7
++#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F0__SHIFT 0x10
++#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F1__SHIFT 0x11
++#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F2__SHIFT 0x12
++#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F3__SHIFT 0x13
++#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F4__SHIFT 0x14
++#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F5__SHIFT 0x15
++#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F6__SHIFT 0x16
++#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F7__SHIFT 0x17
++#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F0_MASK 0x00000001L
++#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F1_MASK 0x00000002L
++#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F2_MASK 0x00000004L
++#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F3_MASK 0x00000008L
++#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F4_MASK 0x00000010L
++#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F5_MASK 0x00000020L
++#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F6_MASK 0x00000040L
++#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F7_MASK 0x00000080L
++#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F0_MASK 0x00010000L
++#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F1_MASK 0x00020000L
++#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F2_MASK 0x00040000L
++#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F3_MASK 0x00080000L
++#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F4_MASK 0x00100000L
++#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F5_MASK 0x00200000L
++#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F6_MASK 0x00400000L
++#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F7_MASK 0x00800000L
++//BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_P_DEV0_F0__SHIFT 0x0
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_NP_DEV0_F0__SHIFT 0x2
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_IDO_DEV0_F0__SHIFT 0x4
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_P_DEV0_F0__SHIFT 0x6
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_NP_DEV0_F0__SHIFT 0x8
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_P_DEV0_F0__SHIFT 0xa
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_NP_DEV0_F0__SHIFT 0xc
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_NONIDO_DEV0_F0__SHIFT 0xe
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_P_DEV0_F1__SHIFT 0x10
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_NP_DEV0_F1__SHIFT 0x12
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_IDO_DEV0_F1__SHIFT 0x14
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_P_DEV0_F1__SHIFT 0x16
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_NP_DEV0_F1__SHIFT 0x18
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_P_DEV0_F1__SHIFT 0x1a
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_NP_DEV0_F1__SHIFT 0x1c
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_NONIDO_DEV0_F1__SHIFT 0x1e
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_P_DEV0_F0_MASK 0x00000003L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_NP_DEV0_F0_MASK 0x0000000CL
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_IDO_DEV0_F0_MASK 0x00000030L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_P_DEV0_F0_MASK 0x000000C0L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_NP_DEV0_F0_MASK 0x00000300L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_P_DEV0_F0_MASK 0x00000C00L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_NP_DEV0_F0_MASK 0x00003000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_NONIDO_DEV0_F0_MASK 0x0000C000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_P_DEV0_F1_MASK 0x00030000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_NP_DEV0_F1_MASK 0x000C0000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_IDO_DEV0_F1_MASK 0x00300000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_P_DEV0_F1_MASK 0x00C00000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_NP_DEV0_F1_MASK 0x03000000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_P_DEV0_F1_MASK 0x0C000000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_NP_DEV0_F1_MASK 0x30000000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_NONIDO_DEV0_F1_MASK 0xC0000000L
++//BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_P_DEV0_F2__SHIFT 0x0
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_NP_DEV0_F2__SHIFT 0x2
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_IDO_DEV0_F2__SHIFT 0x4
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_P_DEV0_F2__SHIFT 0x6
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_NP_DEV0_F2__SHIFT 0x8
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_P_DEV0_F2__SHIFT 0xa
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_NP_DEV0_F2__SHIFT 0xc
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_NONIDO_DEV0_F2__SHIFT 0xe
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_P_DEV0_F3__SHIFT 0x10
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_NP_DEV0_F3__SHIFT 0x12
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_IDO_DEV0_F3__SHIFT 0x14
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_P_DEV0_F3__SHIFT 0x16
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_NP_DEV0_F3__SHIFT 0x18
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_P_DEV0_F3__SHIFT 0x1a
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_NP_DEV0_F3__SHIFT 0x1c
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_NONIDO_DEV0_F3__SHIFT 0x1e
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_P_DEV0_F2_MASK 0x00000003L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_NP_DEV0_F2_MASK 0x0000000CL
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_IDO_DEV0_F2_MASK 0x00000030L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_P_DEV0_F2_MASK 0x000000C0L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_NP_DEV0_F2_MASK 0x00000300L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_P_DEV0_F2_MASK 0x00000C00L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_NP_DEV0_F2_MASK 0x00003000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_NONIDO_DEV0_F2_MASK 0x0000C000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_P_DEV0_F3_MASK 0x00030000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_NP_DEV0_F3_MASK 0x000C0000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_IDO_DEV0_F3_MASK 0x00300000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_P_DEV0_F3_MASK 0x00C00000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_NP_DEV0_F3_MASK 0x03000000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_P_DEV0_F3_MASK 0x0C000000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_NP_DEV0_F3_MASK 0x30000000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_NONIDO_DEV0_F3_MASK 0xC0000000L
++//BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_P_DEV0_F4__SHIFT 0x0
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_NP_DEV0_F4__SHIFT 0x2
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_IDO_DEV0_F4__SHIFT 0x4
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_P_DEV0_F4__SHIFT 0x6
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_NP_DEV0_F4__SHIFT 0x8
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_P_DEV0_F4__SHIFT 0xa
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_NP_DEV0_F4__SHIFT 0xc
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_NONIDO_DEV0_F4__SHIFT 0xe
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_P_DEV0_F5__SHIFT 0x10
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_NP_DEV0_F5__SHIFT 0x12
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_IDO_DEV0_F5__SHIFT 0x14
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_P_DEV0_F5__SHIFT 0x16
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_NP_DEV0_F5__SHIFT 0x18
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_P_DEV0_F5__SHIFT 0x1a
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_NP_DEV0_F5__SHIFT 0x1c
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_NONIDO_DEV0_F5__SHIFT 0x1e
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_P_DEV0_F4_MASK 0x00000003L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_NP_DEV0_F4_MASK 0x0000000CL
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_IDO_DEV0_F4_MASK 0x00000030L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_P_DEV0_F4_MASK 0x000000C0L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_NP_DEV0_F4_MASK 0x00000300L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_P_DEV0_F4_MASK 0x00000C00L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_NP_DEV0_F4_MASK 0x00003000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_NONIDO_DEV0_F4_MASK 0x0000C000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_P_DEV0_F5_MASK 0x00030000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_NP_DEV0_F5_MASK 0x000C0000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_IDO_DEV0_F5_MASK 0x00300000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_P_DEV0_F5_MASK 0x00C00000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_NP_DEV0_F5_MASK 0x03000000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_P_DEV0_F5_MASK 0x0C000000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_NP_DEV0_F5_MASK 0x30000000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_NONIDO_DEV0_F5_MASK 0xC0000000L
++//BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_P_DEV0_F6__SHIFT 0x0
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_NP_DEV0_F6__SHIFT 0x2
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_IDO_DEV0_F6__SHIFT 0x4
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_P_DEV0_F6__SHIFT 0x6
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_NP_DEV0_F6__SHIFT 0x8
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_P_DEV0_F6__SHIFT 0xa
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_NP_DEV0_F6__SHIFT 0xc
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_NONIDO_DEV0_F6__SHIFT 0xe
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_P_DEV0_F7__SHIFT 0x10
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_NP_DEV0_F7__SHIFT 0x12
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_IDO_DEV0_F7__SHIFT 0x14
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_P_DEV0_F7__SHIFT 0x16
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_NP_DEV0_F7__SHIFT 0x18
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_P_DEV0_F7__SHIFT 0x1a
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_NP_DEV0_F7__SHIFT 0x1c
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_NONIDO_DEV0_F7__SHIFT 0x1e
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_P_DEV0_F6_MASK 0x00000003L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_NP_DEV0_F6_MASK 0x0000000CL
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_IDO_DEV0_F6_MASK 0x00000030L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_P_DEV0_F6_MASK 0x000000C0L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_NP_DEV0_F6_MASK 0x00000300L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_P_DEV0_F6_MASK 0x00000C00L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_NP_DEV0_F6_MASK 0x00003000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_NONIDO_DEV0_F6_MASK 0x0000C000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_P_DEV0_F7_MASK 0x00030000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_NP_DEV0_F7_MASK 0x000C0000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_IDO_DEV0_F7_MASK 0x00300000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_P_DEV0_F7_MASK 0x00C00000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_NP_DEV0_F7_MASK 0x03000000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_P_DEV0_F7_MASK 0x0C000000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_NP_DEV0_F7_MASK 0x30000000L
++#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_NONIDO_DEV0_F7_MASK 0xC0000000L
++//BIFC_DMA_ATTR_CNTL2_DEV0
++#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F0__SHIFT 0x0
++#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F1__SHIFT 0x4
++#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F2__SHIFT 0x8
++#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F3__SHIFT 0xc
++#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F4__SHIFT 0x10
++#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F5__SHIFT 0x14
++#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F6__SHIFT 0x18
++#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F7__SHIFT 0x1c
++#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F0_MASK 0x00000001L
++#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F1_MASK 0x00000010L
++#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F2_MASK 0x00000100L
++#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F3_MASK 0x00001000L
++#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F4_MASK 0x00010000L
++#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F5_MASK 0x00100000L
++#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F6_MASK 0x01000000L
++#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F7_MASK 0x10000000L
++//BME_DUMMY_CNTL_0
++#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F0__SHIFT 0x0
++#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F1__SHIFT 0x2
++#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F2__SHIFT 0x4
++#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F3__SHIFT 0x6
++#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F4__SHIFT 0x8
++#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F5__SHIFT 0xa
++#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F6__SHIFT 0xc
++#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F7__SHIFT 0xe
++#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F0_MASK 0x00000003L
++#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F1_MASK 0x0000000CL
++#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F2_MASK 0x00000030L
++#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F3_MASK 0x000000C0L
++#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F4_MASK 0x00000300L
++#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F5_MASK 0x00000C00L
++#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F6_MASK 0x00003000L
++#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F7_MASK 0x0000C000L
++//BIFC_THT_CNTL
++#define BIFC_THT_CNTL__CREDIT_ALLOC_THT_RD_VC0__SHIFT 0x0
++#define BIFC_THT_CNTL__CREDIT_ALLOC_THT_WR_VC0__SHIFT 0x4
++#define BIFC_THT_CNTL__CREDIT_ALLOC_THT_WR_VC1__SHIFT 0x8
++#define BIFC_THT_CNTL__UR_OVRD_FOR_ECRC_EN__SHIFT 0x10
++#define BIFC_THT_CNTL__CREDIT_ALLOC_THT_RD_VC0_MASK 0x0000000FL
++#define BIFC_THT_CNTL__CREDIT_ALLOC_THT_WR_VC0_MASK 0x000000F0L
++#define BIFC_THT_CNTL__CREDIT_ALLOC_THT_WR_VC1_MASK 0x00000F00L
++#define BIFC_THT_CNTL__UR_OVRD_FOR_ECRC_EN_MASK 0x00010000L
++//BIFC_HSTARB_CNTL
++#define BIFC_HSTARB_CNTL__SLVARB_MODE__SHIFT 0x0
++#define BIFC_HSTARB_CNTL__SLVARB_MODE_MASK 0x00000003L
++//BIFC_GSI_CNTL
++#define BIFC_GSI_CNTL__GSI_SDP_RSP_ARB_MODE__SHIFT 0x0
++#define BIFC_GSI_CNTL__GSI_CPL_RSP_ARB_MODE__SHIFT 0x2
++#define BIFC_GSI_CNTL__GSI_CPL_INTERLEAVING_EN__SHIFT 0x5
++#define BIFC_GSI_CNTL__GSI_CPL_PCR_EP_CAUSE_UR_EN__SHIFT 0x6
++#define BIFC_GSI_CNTL__GSI_CPL_SMN_P_EP_CAUSE_UR_EN__SHIFT 0x7
++#define BIFC_GSI_CNTL__GSI_CPL_SMN_NP_EP_CAUSE_UR_EN__SHIFT 0x8
++#define BIFC_GSI_CNTL__GSI_CPL_SST_EP_CAUSE_UR_EN__SHIFT 0x9
++#define BIFC_GSI_CNTL__GSI_SDP_REQ_ARB_MODE__SHIFT 0xa
++#define BIFC_GSI_CNTL__GSI_SMN_REQ_ARB_MODE__SHIFT 0xc
++#define BIFC_GSI_CNTL__GSI_SDP_RSP_ARB_MODE_MASK 0x00000003L
++#define BIFC_GSI_CNTL__GSI_CPL_RSP_ARB_MODE_MASK 0x0000001CL
++#define BIFC_GSI_CNTL__GSI_CPL_INTERLEAVING_EN_MASK 0x00000020L
++#define BIFC_GSI_CNTL__GSI_CPL_PCR_EP_CAUSE_UR_EN_MASK 0x00000040L
++#define BIFC_GSI_CNTL__GSI_CPL_SMN_P_EP_CAUSE_UR_EN_MASK 0x00000080L
++#define BIFC_GSI_CNTL__GSI_CPL_SMN_NP_EP_CAUSE_UR_EN_MASK 0x00000100L
++#define BIFC_GSI_CNTL__GSI_CPL_SST_EP_CAUSE_UR_EN_MASK 0x00000200L
++#define BIFC_GSI_CNTL__GSI_SDP_REQ_ARB_MODE_MASK 0x00000C00L
++#define BIFC_GSI_CNTL__GSI_SMN_REQ_ARB_MODE_MASK 0x00003000L
++//BIFC_PCIEFUNC_CNTL
++#define BIFC_PCIEFUNC_CNTL__DMA_NON_PCIEFUNC_BUSDEVFUNC__SHIFT 0x0
++#define BIFC_PCIEFUNC_CNTL__MP1SYSHUBDATA_DRAM_IS_PCIEFUNC__SHIFT 0x10
++#define BIFC_PCIEFUNC_CNTL__DMA_NON_PCIEFUNC_BUSDEVFUNC_MASK 0x0000FFFFL
++#define BIFC_PCIEFUNC_CNTL__MP1SYSHUBDATA_DRAM_IS_PCIEFUNC_MASK 0x00010000L
++//BIFC_PASID_CHECK_DIS
++#define BIFC_PASID_CHECK_DIS__PASID_CHECK_DIS_DEV0_F0__SHIFT 0x0
++#define BIFC_PASID_CHECK_DIS__PASID_CHECK_DIS_DEV0_F1__SHIFT 0x1
++#define BIFC_PASID_CHECK_DIS__PASID_CHECK_DIS_DEV0_F0_MASK 0x00000001L
++#define BIFC_PASID_CHECK_DIS__PASID_CHECK_DIS_DEV0_F1_MASK 0x00000002L
++//BIFC_SDP_CNTL_0
++#define BIFC_SDP_CNTL_0__HRP_SDP_DISCON_HYSTERESIS__SHIFT 0x0
++#define BIFC_SDP_CNTL_0__GSI_SDP_DISCON_HYSTERESIS__SHIFT 0x8
++#define BIFC_SDP_CNTL_0__GMI_DNS_SDP_DISCON_HYSTERESIS__SHIFT 0x10
++#define BIFC_SDP_CNTL_0__GMI_UPS_SDP_DISCON_HYSTERESIS__SHIFT 0x18
++#define BIFC_SDP_CNTL_0__HRP_SDP_DISCON_HYSTERESIS_MASK 0x000000FFL
++#define BIFC_SDP_CNTL_0__GSI_SDP_DISCON_HYSTERESIS_MASK 0x0000FF00L
++#define BIFC_SDP_CNTL_0__GMI_DNS_SDP_DISCON_HYSTERESIS_MASK 0x00FF0000L
++#define BIFC_SDP_CNTL_0__GMI_UPS_SDP_DISCON_HYSTERESIS_MASK 0xFF000000L
++//BIFC_SDP_CNTL_1
++#define BIFC_SDP_CNTL_1__HRP_SDP_DISCON_DIS__SHIFT 0x0
++#define BIFC_SDP_CNTL_1__GSI_SDP_DISCON_DIS__SHIFT 0x1
++#define BIFC_SDP_CNTL_1__GMI_DNS_SDP_DISCON_DIS__SHIFT 0x2
++#define BIFC_SDP_CNTL_1__GMI_UPS_SDP_DISCON_DIS__SHIFT 0x3
++#define BIFC_SDP_CNTL_1__HRP_SDP_DISCON_VLINK_NONL0_ONLY__SHIFT 0x4
++#define BIFC_SDP_CNTL_1__GMI_UPS_SDP_DISCON_VLINK_NONL0_ONLY__SHIFT 0x7
++#define BIFC_SDP_CNTL_1__HRP_SDP_DISCON_DIS_MASK 0x00000001L
++#define BIFC_SDP_CNTL_1__GSI_SDP_DISCON_DIS_MASK 0x00000002L
++#define BIFC_SDP_CNTL_1__GMI_DNS_SDP_DISCON_DIS_MASK 0x00000004L
++#define BIFC_SDP_CNTL_1__GMI_UPS_SDP_DISCON_DIS_MASK 0x00000008L
++#define BIFC_SDP_CNTL_1__HRP_SDP_DISCON_VLINK_NONL0_ONLY_MASK 0x00000010L
++#define BIFC_SDP_CNTL_1__GMI_UPS_SDP_DISCON_VLINK_NONL0_ONLY_MASK 0x00000080L
++//BIFC_PASID_STS
++#define BIFC_PASID_STS__PASID_STS__SHIFT 0x0
++#define BIFC_PASID_STS__PASID_STS_MASK 0x0000000FL
++//BIFC_ATHUB_ACT_CNTL
++#define BIFC_ATHUB_ACT_CNTL__ATHUB_ACT_GSI_RSP_STS_TYPE__SHIFT 0x0
++#define BIFC_ATHUB_ACT_CNTL__ATHUB_ACT_GSI_RSP_STS_TYPE_MASK 0x00000007L
++//BIFC_PERF_CNTL_0
++#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_RD_EN__SHIFT 0x0
++#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_WR_EN__SHIFT 0x1
++#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_RD_RESET__SHIFT 0x8
++#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_WR_RESET__SHIFT 0x9
++#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_RD_SEL__SHIFT 0x10
++#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_WR_SEL__SHIFT 0x18
++#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_RD_EN_MASK 0x00000001L
++#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_WR_EN_MASK 0x00000002L
++#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_RD_RESET_MASK 0x00000100L
++#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_WR_RESET_MASK 0x00000200L
++#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_RD_SEL_MASK 0x003F0000L
++#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_WR_SEL_MASK 0x3F000000L
++//BIFC_PERF_CNTL_1
++#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_RD_EN__SHIFT 0x0
++#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_WR_EN__SHIFT 0x1
++#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_RD_RESET__SHIFT 0x8
++#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_WR_RESET__SHIFT 0x9
++#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_RD_SEL__SHIFT 0x10
++#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_WR_SEL__SHIFT 0x18
++#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_RD_EN_MASK 0x00000001L
++#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_WR_EN_MASK 0x00000002L
++#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_RD_RESET_MASK 0x00000100L
++#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_WR_RESET_MASK 0x00000200L
++#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_RD_SEL_MASK 0x003F0000L
++#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_WR_SEL_MASK 0x7F000000L
++//BIFC_PERF_CNT_MMIO_RD
++#define BIFC_PERF_CNT_MMIO_RD__PERF_CNT_MMIO_RD_VALUE__SHIFT 0x0
++#define BIFC_PERF_CNT_MMIO_RD__PERF_CNT_MMIO_RD_VALUE_MASK 0xFFFFFFFFL
++//BIFC_PERF_CNT_MMIO_WR
++#define BIFC_PERF_CNT_MMIO_WR__PERF_CNT_MMIO_WR_VALUE__SHIFT 0x0
++#define BIFC_PERF_CNT_MMIO_WR__PERF_CNT_MMIO_WR_VALUE_MASK 0xFFFFFFFFL
++//BIFC_PERF_CNT_DMA_RD
++#define BIFC_PERF_CNT_DMA_RD__PERF_CNT_DMA_RD_VALUE__SHIFT 0x0
++#define BIFC_PERF_CNT_DMA_RD__PERF_CNT_DMA_RD_VALUE_MASK 0xFFFFFFFFL
++//BIFC_PERF_CNT_DMA_WR
++#define BIFC_PERF_CNT_DMA_WR__PERF_CNT_DMA_WR_VALUE__SHIFT 0x0
++#define BIFC_PERF_CNT_DMA_WR__PERF_CNT_DMA_WR_VALUE_MASK 0xFFFFFFFFL
++//NBIF_REGIF_ERRSET_CTRL
++#define NBIF_REGIF_ERRSET_CTRL__DROP_NONPF_MMREGREQ_SETERR_DIS__SHIFT 0x0
++#define NBIF_REGIF_ERRSET_CTRL__DROP_NONPF_MMREGREQ_SETERR_DIS_MASK 0x00000001L
++//SMN_MST_EP_CNTL3
++#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF0__SHIFT 0x0
++#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF1__SHIFT 0x1
++#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF2__SHIFT 0x2
++#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF3__SHIFT 0x3
++#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF4__SHIFT 0x4
++#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF5__SHIFT 0x5
++#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF6__SHIFT 0x6
++#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF7__SHIFT 0x7
++#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF0_MASK 0x00000001L
++#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF1_MASK 0x00000002L
++#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF2_MASK 0x00000004L
++#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF3_MASK 0x00000008L
++#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF4_MASK 0x00000010L
++#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF5_MASK 0x00000020L
++#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF6_MASK 0x00000040L
++#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF7_MASK 0x00000080L
++//SMN_MST_EP_CNTL4
++#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF0__SHIFT 0x0
++#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF1__SHIFT 0x1
++#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF2__SHIFT 0x2
++#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF3__SHIFT 0x3
++#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF4__SHIFT 0x4
++#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF5__SHIFT 0x5
++#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF6__SHIFT 0x6
++#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF7__SHIFT 0x7
++#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF0_MASK 0x00000001L
++#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF1_MASK 0x00000002L
++#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF2_MASK 0x00000004L
++#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF3_MASK 0x00000008L
++#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF4_MASK 0x00000010L
++#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF5_MASK 0x00000020L
++#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF6_MASK 0x00000040L
++#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF7_MASK 0x00000080L
++//SMN_MST_CNTL1
++#define SMN_MST_CNTL1__SMN_ERRRSP_DATA_ALLF_DIS_UPS__SHIFT 0x0
++#define SMN_MST_CNTL1__SMN_ERRRSP_DATA_ALLF_DIS_DNS_DEV0__SHIFT 0x10
++#define SMN_MST_CNTL1__SMN_ERRRSP_DATA_ALLF_DIS_UPS_MASK 0x00000001L
++#define SMN_MST_CNTL1__SMN_ERRRSP_DATA_ALLF_DIS_DNS_DEV0_MASK 0x00010000L
++//SMN_MST_EP_CNTL5
++#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF0__SHIFT 0x0
++#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF1__SHIFT 0x1
++#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF2__SHIFT 0x2
++#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF3__SHIFT 0x3
++#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF4__SHIFT 0x4
++#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF5__SHIFT 0x5
++#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF6__SHIFT 0x6
++#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF7__SHIFT 0x7
++#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF0_MASK 0x00000001L
++#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF1_MASK 0x00000002L
++#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF2_MASK 0x00000004L
++#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF3_MASK 0x00000008L
++#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF4_MASK 0x00000010L
++#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF5_MASK 0x00000020L
++#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF6_MASK 0x00000040L
++#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF7_MASK 0x00000080L
++//BIF_SELFRING_BUFFER_VID
++#define BIF_SELFRING_BUFFER_VID__DOORBELL_MONITOR_CID__SHIFT 0x0
++#define BIF_SELFRING_BUFFER_VID__RAS_CNTLR_INTR_CID__SHIFT 0x8
++#define BIF_SELFRING_BUFFER_VID__RAS_ATHUB_ERR_EVENT_INTR_CID__SHIFT 0x10
++#define BIF_SELFRING_BUFFER_VID__DOORBELL_MONITOR_CID_MASK 0x000000FFL
++#define BIF_SELFRING_BUFFER_VID__RAS_CNTLR_INTR_CID_MASK 0x0000FF00L
++#define BIF_SELFRING_BUFFER_VID__RAS_ATHUB_ERR_EVENT_INTR_CID_MASK 0x00FF0000L
++//BIF_SELFRING_VECTOR_CNTL
++#define BIF_SELFRING_VECTOR_CNTL__MISC_DB_MNTR_INTR_DIS__SHIFT 0x0
++#define BIF_SELFRING_VECTOR_CNTL__DB_MNTR_TS_FROM__SHIFT 0x1
++#define BIF_SELFRING_VECTOR_CNTL__MISC_DB_MNTR_INTR_DIS_MASK 0x00000001L
++#define BIF_SELFRING_VECTOR_CNTL__DB_MNTR_TS_FROM_MASK 0x00000002L
++//NBIF_INTX_DSTATE_MISC_CNTL
++#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_DSTATE_CHK_DIS_EP__SHIFT 0x0
++#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_DSTATE_CHK_DIS_DN__SHIFT 0x1
++#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_DSTATE_CHK_DIS_SWUS__SHIFT 0x2
++#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_IN_NOND0_EN_EP__SHIFT 0x3
++#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_IN_NOND0_EN_DN__SHIFT 0x4
++#define NBIF_INTX_DSTATE_MISC_CNTL__PMI_INT_DIS_EP__SHIFT 0x5
++#define NBIF_INTX_DSTATE_MISC_CNTL__PMI_INT_DIS_DN__SHIFT 0x6
++#define NBIF_INTX_DSTATE_MISC_CNTL__PMI_INT_DIS_SWUS__SHIFT 0x7
++#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_DSTATE_CHK_DIS_EP_MASK 0x00000001L
++#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_DSTATE_CHK_DIS_DN_MASK 0x00000002L
++#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_DSTATE_CHK_DIS_SWUS_MASK 0x00000004L
++#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_IN_NOND0_EN_EP_MASK 0x00000008L
++#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_IN_NOND0_EN_DN_MASK 0x00000010L
++#define NBIF_INTX_DSTATE_MISC_CNTL__PMI_INT_DIS_EP_MASK 0x00000020L
++#define NBIF_INTX_DSTATE_MISC_CNTL__PMI_INT_DIS_DN_MASK 0x00000040L
++#define NBIF_INTX_DSTATE_MISC_CNTL__PMI_INT_DIS_SWUS_MASK 0x00000080L
++//NBIF_PENDING_MISC_CNTL
++#define NBIF_PENDING_MISC_CNTL__FLR_MST_PEND_CHK_DIS__SHIFT 0x0
++#define NBIF_PENDING_MISC_CNTL__FLR_SLV_PEND_CHK_DIS__SHIFT 0x1
++#define NBIF_PENDING_MISC_CNTL__FLR_MST_PEND_CHK_DIS_MASK 0x00000001L
++#define NBIF_PENDING_MISC_CNTL__FLR_SLV_PEND_CHK_DIS_MASK 0x00000002L
++//BIF_GMI_WRR_WEIGHT
++#define BIF_GMI_WRR_WEIGHT__GMI_REQ_WRR_MODE__SHIFT 0x1f
++#define BIF_GMI_WRR_WEIGHT__GMI_REQ_WRR_MODE_MASK 0x80000000L
++//BIF_GMI_WRR_WEIGHT2
++#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY0_WEIGHT__SHIFT 0x0
++#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY1_WEIGHT__SHIFT 0x8
++#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY2_WEIGHT__SHIFT 0x10
++#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY3_WEIGHT__SHIFT 0x18
++#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY0_WEIGHT_MASK 0x000000FFL
++#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY1_WEIGHT_MASK 0x0000FF00L
++#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY2_WEIGHT_MASK 0x00FF0000L
++#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY3_WEIGHT_MASK 0xFF000000L
++//BIF_GMI_WRR_WEIGHT3
++#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY4_WEIGHT__SHIFT 0x0
++#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY5_WEIGHT__SHIFT 0x8
++#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY6_WEIGHT__SHIFT 0x10
++#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY7_WEIGHT__SHIFT 0x18
++#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY4_WEIGHT_MASK 0x000000FFL
++#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY5_WEIGHT_MASK 0x0000FF00L
++#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY6_WEIGHT_MASK 0x00FF0000L
++#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY7_WEIGHT_MASK 0xFF000000L
++//NBIF_PWRBRK_REQUEST
++#define NBIF_PWRBRK_REQUEST__NBIF_PWRBRK_REQUEST__SHIFT 0x0
++#define NBIF_PWRBRK_REQUEST__NBIF_PWRBRK_REQUEST_MASK 0x00000001L
++//BIF_ATOMIC_ERR_LOG_DEV0_F0
++#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_OPCODE_DEV0_F0__SHIFT 0x0
++#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_REQEN_LOW_DEV0_F0__SHIFT 0x1
++#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_LENGTH_DEV0_F0__SHIFT 0x2
++#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_NR_DEV0_F0__SHIFT 0x3
++#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_OPCODE_DEV0_F0__SHIFT 0x10
++#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F0__SHIFT 0x11
++#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_LENGTH_DEV0_F0__SHIFT 0x12
++#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_NR_DEV0_F0__SHIFT 0x13
++#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_OPCODE_DEV0_F0_MASK 0x00000001L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_REQEN_LOW_DEV0_F0_MASK 0x00000002L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_LENGTH_DEV0_F0_MASK 0x00000004L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_NR_DEV0_F0_MASK 0x00000008L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_OPCODE_DEV0_F0_MASK 0x00010000L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F0_MASK 0x00020000L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_LENGTH_DEV0_F0_MASK 0x00040000L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_NR_DEV0_F0_MASK 0x00080000L
++//BIF_ATOMIC_ERR_LOG_DEV0_F1
++#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_OPCODE_DEV0_F1__SHIFT 0x0
++#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_REQEN_LOW_DEV0_F1__SHIFT 0x1
++#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_LENGTH_DEV0_F1__SHIFT 0x2
++#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_NR_DEV0_F1__SHIFT 0x3
++#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_OPCODE_DEV0_F1__SHIFT 0x10
++#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F1__SHIFT 0x11
++#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_LENGTH_DEV0_F1__SHIFT 0x12
++#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_NR_DEV0_F1__SHIFT 0x13
++#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_OPCODE_DEV0_F1_MASK 0x00000001L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_REQEN_LOW_DEV0_F1_MASK 0x00000002L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_LENGTH_DEV0_F1_MASK 0x00000004L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_NR_DEV0_F1_MASK 0x00000008L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_OPCODE_DEV0_F1_MASK 0x00010000L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F1_MASK 0x00020000L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_LENGTH_DEV0_F1_MASK 0x00040000L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_NR_DEV0_F1_MASK 0x00080000L
++//BIF_ATOMIC_ERR_LOG_DEV0_F2
++#define BIF_ATOMIC_ERR_LOG_DEV0_F2__UR_ATOMIC_OPCODE_DEV0_F2__SHIFT 0x0
++#define BIF_ATOMIC_ERR_LOG_DEV0_F2__UR_ATOMIC_REQEN_LOW_DEV0_F2__SHIFT 0x1
++#define BIF_ATOMIC_ERR_LOG_DEV0_F2__UR_ATOMIC_LENGTH_DEV0_F2__SHIFT 0x2
++#define BIF_ATOMIC_ERR_LOG_DEV0_F2__UR_ATOMIC_NR_DEV0_F2__SHIFT 0x3
++#define BIF_ATOMIC_ERR_LOG_DEV0_F2__CLEAR_UR_ATOMIC_OPCODE_DEV0_F2__SHIFT 0x10
++#define BIF_ATOMIC_ERR_LOG_DEV0_F2__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F2__SHIFT 0x11
++#define BIF_ATOMIC_ERR_LOG_DEV0_F2__CLEAR_UR_ATOMIC_LENGTH_DEV0_F2__SHIFT 0x12
++#define BIF_ATOMIC_ERR_LOG_DEV0_F2__CLEAR_UR_ATOMIC_NR_DEV0_F2__SHIFT 0x13
++#define BIF_ATOMIC_ERR_LOG_DEV0_F2__UR_ATOMIC_OPCODE_DEV0_F2_MASK 0x00000001L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F2__UR_ATOMIC_REQEN_LOW_DEV0_F2_MASK 0x00000002L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F2__UR_ATOMIC_LENGTH_DEV0_F2_MASK 0x00000004L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F2__UR_ATOMIC_NR_DEV0_F2_MASK 0x00000008L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F2__CLEAR_UR_ATOMIC_OPCODE_DEV0_F2_MASK 0x00010000L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F2__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F2_MASK 0x00020000L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F2__CLEAR_UR_ATOMIC_LENGTH_DEV0_F2_MASK 0x00040000L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F2__CLEAR_UR_ATOMIC_NR_DEV0_F2_MASK 0x00080000L
++//BIF_ATOMIC_ERR_LOG_DEV0_F3
++#define BIF_ATOMIC_ERR_LOG_DEV0_F3__UR_ATOMIC_OPCODE_DEV0_F3__SHIFT 0x0
++#define BIF_ATOMIC_ERR_LOG_DEV0_F3__UR_ATOMIC_REQEN_LOW_DEV0_F3__SHIFT 0x1
++#define BIF_ATOMIC_ERR_LOG_DEV0_F3__UR_ATOMIC_LENGTH_DEV0_F3__SHIFT 0x2
++#define BIF_ATOMIC_ERR_LOG_DEV0_F3__UR_ATOMIC_NR_DEV0_F3__SHIFT 0x3
++#define BIF_ATOMIC_ERR_LOG_DEV0_F3__CLEAR_UR_ATOMIC_OPCODE_DEV0_F3__SHIFT 0x10
++#define BIF_ATOMIC_ERR_LOG_DEV0_F3__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F3__SHIFT 0x11
++#define BIF_ATOMIC_ERR_LOG_DEV0_F3__CLEAR_UR_ATOMIC_LENGTH_DEV0_F3__SHIFT 0x12
++#define BIF_ATOMIC_ERR_LOG_DEV0_F3__CLEAR_UR_ATOMIC_NR_DEV0_F3__SHIFT 0x13
++#define BIF_ATOMIC_ERR_LOG_DEV0_F3__UR_ATOMIC_OPCODE_DEV0_F3_MASK 0x00000001L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F3__UR_ATOMIC_REQEN_LOW_DEV0_F3_MASK 0x00000002L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F3__UR_ATOMIC_LENGTH_DEV0_F3_MASK 0x00000004L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F3__UR_ATOMIC_NR_DEV0_F3_MASK 0x00000008L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F3__CLEAR_UR_ATOMIC_OPCODE_DEV0_F3_MASK 0x00010000L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F3__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F3_MASK 0x00020000L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F3__CLEAR_UR_ATOMIC_LENGTH_DEV0_F3_MASK 0x00040000L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F3__CLEAR_UR_ATOMIC_NR_DEV0_F3_MASK 0x00080000L
++//BIF_ATOMIC_ERR_LOG_DEV0_F4
++#define BIF_ATOMIC_ERR_LOG_DEV0_F4__UR_ATOMIC_OPCODE_DEV0_F4__SHIFT 0x0
++#define BIF_ATOMIC_ERR_LOG_DEV0_F4__UR_ATOMIC_REQEN_LOW_DEV0_F4__SHIFT 0x1
++#define BIF_ATOMIC_ERR_LOG_DEV0_F4__UR_ATOMIC_LENGTH_DEV0_F4__SHIFT 0x2
++#define BIF_ATOMIC_ERR_LOG_DEV0_F4__UR_ATOMIC_NR_DEV0_F4__SHIFT 0x3
++#define BIF_ATOMIC_ERR_LOG_DEV0_F4__CLEAR_UR_ATOMIC_OPCODE_DEV0_F4__SHIFT 0x10
++#define BIF_ATOMIC_ERR_LOG_DEV0_F4__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F4__SHIFT 0x11
++#define BIF_ATOMIC_ERR_LOG_DEV0_F4__CLEAR_UR_ATOMIC_LENGTH_DEV0_F4__SHIFT 0x12
++#define BIF_ATOMIC_ERR_LOG_DEV0_F4__CLEAR_UR_ATOMIC_NR_DEV0_F4__SHIFT 0x13
++#define BIF_ATOMIC_ERR_LOG_DEV0_F4__UR_ATOMIC_OPCODE_DEV0_F4_MASK 0x00000001L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F4__UR_ATOMIC_REQEN_LOW_DEV0_F4_MASK 0x00000002L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F4__UR_ATOMIC_LENGTH_DEV0_F4_MASK 0x00000004L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F4__UR_ATOMIC_NR_DEV0_F4_MASK 0x00000008L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F4__CLEAR_UR_ATOMIC_OPCODE_DEV0_F4_MASK 0x00010000L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F4__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F4_MASK 0x00020000L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F4__CLEAR_UR_ATOMIC_LENGTH_DEV0_F4_MASK 0x00040000L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F4__CLEAR_UR_ATOMIC_NR_DEV0_F4_MASK 0x00080000L
++//BIF_ATOMIC_ERR_LOG_DEV0_F5
++#define BIF_ATOMIC_ERR_LOG_DEV0_F5__UR_ATOMIC_OPCODE_DEV0_F5__SHIFT 0x0
++#define BIF_ATOMIC_ERR_LOG_DEV0_F5__UR_ATOMIC_REQEN_LOW_DEV0_F5__SHIFT 0x1
++#define BIF_ATOMIC_ERR_LOG_DEV0_F5__UR_ATOMIC_LENGTH_DEV0_F5__SHIFT 0x2
++#define BIF_ATOMIC_ERR_LOG_DEV0_F5__UR_ATOMIC_NR_DEV0_F5__SHIFT 0x3
++#define BIF_ATOMIC_ERR_LOG_DEV0_F5__CLEAR_UR_ATOMIC_OPCODE_DEV0_F5__SHIFT 0x10
++#define BIF_ATOMIC_ERR_LOG_DEV0_F5__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F5__SHIFT 0x11
++#define BIF_ATOMIC_ERR_LOG_DEV0_F5__CLEAR_UR_ATOMIC_LENGTH_DEV0_F5__SHIFT 0x12
++#define BIF_ATOMIC_ERR_LOG_DEV0_F5__CLEAR_UR_ATOMIC_NR_DEV0_F5__SHIFT 0x13
++#define BIF_ATOMIC_ERR_LOG_DEV0_F5__UR_ATOMIC_OPCODE_DEV0_F5_MASK 0x00000001L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F5__UR_ATOMIC_REQEN_LOW_DEV0_F5_MASK 0x00000002L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F5__UR_ATOMIC_LENGTH_DEV0_F5_MASK 0x00000004L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F5__UR_ATOMIC_NR_DEV0_F5_MASK 0x00000008L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F5__CLEAR_UR_ATOMIC_OPCODE_DEV0_F5_MASK 0x00010000L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F5__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F5_MASK 0x00020000L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F5__CLEAR_UR_ATOMIC_LENGTH_DEV0_F5_MASK 0x00040000L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F5__CLEAR_UR_ATOMIC_NR_DEV0_F5_MASK 0x00080000L
++//BIF_ATOMIC_ERR_LOG_DEV0_F6
++#define BIF_ATOMIC_ERR_LOG_DEV0_F6__UR_ATOMIC_OPCODE_DEV0_F6__SHIFT 0x0
++#define BIF_ATOMIC_ERR_LOG_DEV0_F6__UR_ATOMIC_REQEN_LOW_DEV0_F6__SHIFT 0x1
++#define BIF_ATOMIC_ERR_LOG_DEV0_F6__UR_ATOMIC_LENGTH_DEV0_F6__SHIFT 0x2
++#define BIF_ATOMIC_ERR_LOG_DEV0_F6__UR_ATOMIC_NR_DEV0_F6__SHIFT 0x3
++#define BIF_ATOMIC_ERR_LOG_DEV0_F6__CLEAR_UR_ATOMIC_OPCODE_DEV0_F6__SHIFT 0x10
++#define BIF_ATOMIC_ERR_LOG_DEV0_F6__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F6__SHIFT 0x11
++#define BIF_ATOMIC_ERR_LOG_DEV0_F6__CLEAR_UR_ATOMIC_LENGTH_DEV0_F6__SHIFT 0x12
++#define BIF_ATOMIC_ERR_LOG_DEV0_F6__CLEAR_UR_ATOMIC_NR_DEV0_F6__SHIFT 0x13
++#define BIF_ATOMIC_ERR_LOG_DEV0_F6__UR_ATOMIC_OPCODE_DEV0_F6_MASK 0x00000001L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F6__UR_ATOMIC_REQEN_LOW_DEV0_F6_MASK 0x00000002L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F6__UR_ATOMIC_LENGTH_DEV0_F6_MASK 0x00000004L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F6__UR_ATOMIC_NR_DEV0_F6_MASK 0x00000008L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F6__CLEAR_UR_ATOMIC_OPCODE_DEV0_F6_MASK 0x00010000L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F6__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F6_MASK 0x00020000L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F6__CLEAR_UR_ATOMIC_LENGTH_DEV0_F6_MASK 0x00040000L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F6__CLEAR_UR_ATOMIC_NR_DEV0_F6_MASK 0x00080000L
++//BIF_ATOMIC_ERR_LOG_DEV0_F7
++#define BIF_ATOMIC_ERR_LOG_DEV0_F7__UR_ATOMIC_OPCODE_DEV0_F7__SHIFT 0x0
++#define BIF_ATOMIC_ERR_LOG_DEV0_F7__UR_ATOMIC_REQEN_LOW_DEV0_F7__SHIFT 0x1
++#define BIF_ATOMIC_ERR_LOG_DEV0_F7__UR_ATOMIC_LENGTH_DEV0_F7__SHIFT 0x2
++#define BIF_ATOMIC_ERR_LOG_DEV0_F7__UR_ATOMIC_NR_DEV0_F7__SHIFT 0x3
++#define BIF_ATOMIC_ERR_LOG_DEV0_F7__CLEAR_UR_ATOMIC_OPCODE_DEV0_F7__SHIFT 0x10
++#define BIF_ATOMIC_ERR_LOG_DEV0_F7__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F7__SHIFT 0x11
++#define BIF_ATOMIC_ERR_LOG_DEV0_F7__CLEAR_UR_ATOMIC_LENGTH_DEV0_F7__SHIFT 0x12
++#define BIF_ATOMIC_ERR_LOG_DEV0_F7__CLEAR_UR_ATOMIC_NR_DEV0_F7__SHIFT 0x13
++#define BIF_ATOMIC_ERR_LOG_DEV0_F7__UR_ATOMIC_OPCODE_DEV0_F7_MASK 0x00000001L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F7__UR_ATOMIC_REQEN_LOW_DEV0_F7_MASK 0x00000002L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F7__UR_ATOMIC_LENGTH_DEV0_F7_MASK 0x00000004L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F7__UR_ATOMIC_NR_DEV0_F7_MASK 0x00000008L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F7__CLEAR_UR_ATOMIC_OPCODE_DEV0_F7_MASK 0x00010000L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F7__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F7_MASK 0x00020000L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F7__CLEAR_UR_ATOMIC_LENGTH_DEV0_F7_MASK 0x00040000L
++#define BIF_ATOMIC_ERR_LOG_DEV0_F7__CLEAR_UR_ATOMIC_NR_DEV0_F7_MASK 0x00080000L
++//BIF_DMA_MP4_ERR_LOG
++#define BIF_DMA_MP4_ERR_LOG__MP4SDP_VC4_NON_DVM_ERR__SHIFT 0x0
++#define BIF_DMA_MP4_ERR_LOG__MP4SDP_ATOMIC_REQEN_LOW_ERR__SHIFT 0x1
++#define BIF_DMA_MP4_ERR_LOG__CLEAR_MP4SDP_VC4_NON_DVM_ERR__SHIFT 0x10
++#define BIF_DMA_MP4_ERR_LOG__CLEAR_MP4SDP_ATOMIC_REQEN_LOW_ERR__SHIFT 0x11
++#define BIF_DMA_MP4_ERR_LOG__MP4SDP_VC4_NON_DVM_ERR_MASK 0x00000001L
++#define BIF_DMA_MP4_ERR_LOG__MP4SDP_ATOMIC_REQEN_LOW_ERR_MASK 0x00000002L
++#define BIF_DMA_MP4_ERR_LOG__CLEAR_MP4SDP_VC4_NON_DVM_ERR_MASK 0x00010000L
++#define BIF_DMA_MP4_ERR_LOG__CLEAR_MP4SDP_ATOMIC_REQEN_LOW_ERR_MASK 0x00020000L
++//BIF_PASID_ERR_LOG
++#define BIF_PASID_ERR_LOG__PASID_ERR_DEV0_F0__SHIFT 0x0
++#define BIF_PASID_ERR_LOG__PASID_ERR_DEV0_F1__SHIFT 0x1
++#define BIF_PASID_ERR_LOG__PASID_ERR_DEV0_F0_MASK 0x00000001L
++#define BIF_PASID_ERR_LOG__PASID_ERR_DEV0_F1_MASK 0x00000002L
++//BIF_PASID_ERR_CLR
++#define BIF_PASID_ERR_CLR__PASID_ERR_CLR_DEV0_F0__SHIFT 0x0
++#define BIF_PASID_ERR_CLR__PASID_ERR_CLR_DEV0_F1__SHIFT 0x1
++#define BIF_PASID_ERR_CLR__PASID_ERR_CLR_DEV0_F0_MASK 0x00000001L
++#define BIF_PASID_ERR_CLR__PASID_ERR_CLR_DEV0_F1_MASK 0x00000002L
++//NBIF_VWIRE_CTRL
++#define NBIF_VWIRE_CTRL__NBIF_SMN_VWR_DIS__SHIFT 0x0
++#define NBIF_VWIRE_CTRL__SMN_VWR_RESET_DELAY_CNT__SHIFT 0x4
++#define NBIF_VWIRE_CTRL__SMN_VWR_POSTED__SHIFT 0x8
++#define NBIF_VWIRE_CTRL__NBIF_SDP_UPS_VWR_DIS__SHIFT 0x10
++#define NBIF_VWIRE_CTRL__SDP_VWR_RESET_DELAY_CNT__SHIFT 0x14
++#define NBIF_VWIRE_CTRL__SDP_VWR_BLOCKLVL__SHIFT 0x1a
++#define NBIF_VWIRE_CTRL__NBIF_SMN_VWR_DIS_MASK 0x00000001L
++#define NBIF_VWIRE_CTRL__SMN_VWR_RESET_DELAY_CNT_MASK 0x000000F0L
++#define NBIF_VWIRE_CTRL__SMN_VWR_POSTED_MASK 0x00000100L
++#define NBIF_VWIRE_CTRL__NBIF_SDP_UPS_VWR_DIS_MASK 0x00010000L
++#define NBIF_VWIRE_CTRL__SDP_VWR_RESET_DELAY_CNT_MASK 0x00F00000L
++#define NBIF_VWIRE_CTRL__SDP_VWR_BLOCKLVL_MASK 0x0C000000L
++//NBIF_SMN_VWR_VCHG_DIS_CTRL
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET0_DIS__SHIFT 0x0
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET1_DIS__SHIFT 0x1
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET2_DIS__SHIFT 0x2
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET3_DIS__SHIFT 0x3
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET4_DIS__SHIFT 0x4
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET5_DIS__SHIFT 0x5
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET6_DIS__SHIFT 0x6
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET0_DIS_MASK 0x00000001L
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET1_DIS_MASK 0x00000002L
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET2_DIS_MASK 0x00000004L
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET3_DIS_MASK 0x00000008L
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET4_DIS_MASK 0x00000010L
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET5_DIS_MASK 0x00000020L
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET6_DIS_MASK 0x00000040L
++//NBIF_SMN_VWR_VCHG_RST_CTRL0
++#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET0_RST_DEF_REV__SHIFT 0x0
++#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET1_RST_DEF_REV__SHIFT 0x1
++#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET2_RST_DEF_REV__SHIFT 0x2
++#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET3_RST_DEF_REV__SHIFT 0x3
++#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET4_RST_DEF_REV__SHIFT 0x4
++#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET5_RST_DEF_REV__SHIFT 0x5
++#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET6_RST_DEF_REV__SHIFT 0x6
++#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET0_RST_DEF_REV_MASK 0x00000001L
++#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET1_RST_DEF_REV_MASK 0x00000002L
++#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET2_RST_DEF_REV_MASK 0x00000004L
++#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET3_RST_DEF_REV_MASK 0x00000008L
++#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET4_RST_DEF_REV_MASK 0x00000010L
++#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET5_RST_DEF_REV_MASK 0x00000020L
++#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET6_RST_DEF_REV_MASK 0x00000040L
++//NBIF_SMN_VWR_VCHG_TRIG
++#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET0_TRIG__SHIFT 0x0
++#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET1_TRIG__SHIFT 0x1
++#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET2_TRIG__SHIFT 0x2
++#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET3_TRIG__SHIFT 0x3
++#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET4_TRIG__SHIFT 0x4
++#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET5_TRIG__SHIFT 0x5
++#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET6_TRIG__SHIFT 0x6
++#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET0_TRIG_MASK 0x00000001L
++#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET1_TRIG_MASK 0x00000002L
++#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET2_TRIG_MASK 0x00000004L
++#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET3_TRIG_MASK 0x00000008L
++#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET4_TRIG_MASK 0x00000010L
++#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET5_TRIG_MASK 0x00000020L
++#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET6_TRIG_MASK 0x00000040L
++//NBIF_SMN_VWR_WTRIG_CNTL
++#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET0_DIS__SHIFT 0x0
++#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET1_DIS__SHIFT 0x1
++#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET2_DIS__SHIFT 0x2
++#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET3_DIS__SHIFT 0x3
++#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET4_DIS__SHIFT 0x4
++#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET5_DIS__SHIFT 0x5
++#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET6_DIS__SHIFT 0x6
++#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET0_DIS_MASK 0x00000001L
++#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET1_DIS_MASK 0x00000002L
++#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET2_DIS_MASK 0x00000004L
++#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET3_DIS_MASK 0x00000008L
++#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET4_DIS_MASK 0x00000010L
++#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET5_DIS_MASK 0x00000020L
++#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET6_DIS_MASK 0x00000040L
++//NBIF_SMN_VWR_VCHG_DIS_CTRL_1
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET0_DIFFDET_DEF_REV__SHIFT 0x0
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET1_DIFFDET_DEF_REV__SHIFT 0x1
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET2_DIFFDET_DEF_REV__SHIFT 0x2
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET3_DIFFDET_DEF_REV__SHIFT 0x3
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET4_DIFFDET_DEF_REV__SHIFT 0x4
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET5_DIFFDET_DEF_REV__SHIFT 0x5
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET6_DIFFDET_DEF_REV__SHIFT 0x6
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET0_DIFFDET_DEF_REV_MASK 0x00000001L
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET1_DIFFDET_DEF_REV_MASK 0x00000002L
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET2_DIFFDET_DEF_REV_MASK 0x00000004L
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET3_DIFFDET_DEF_REV_MASK 0x00000008L
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET4_DIFFDET_DEF_REV_MASK 0x00000010L
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET5_DIFFDET_DEF_REV_MASK 0x00000020L
++#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET6_DIFFDET_DEF_REV_MASK 0x00000040L
++//NBIF_MGCG_CTRL_LCLK
++#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_EN_LCLK__SHIFT 0x0
++#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_MODE_LCLK__SHIFT 0x1
++#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_HYSTERESIS_LCLK__SHIFT 0x2
++#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_HST_DIS_LCLK__SHIFT 0xa
++#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_DMA_DIS_LCLK__SHIFT 0xb
++#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK__SHIFT 0xc
++#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_AER_DIS_LCLK__SHIFT 0xd
++#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_EN_LCLK_MASK 0x00000001L
++#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_MODE_LCLK_MASK 0x00000002L
++#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_HYSTERESIS_LCLK_MASK 0x000003FCL
++#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_HST_DIS_LCLK_MASK 0x00000400L
++#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_DMA_DIS_LCLK_MASK 0x00000800L
++#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK 0x00001000L
++#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_AER_DIS_LCLK_MASK 0x00002000L
++//NBIF_DS_CTRL_LCLK
++#define NBIF_DS_CTRL_LCLK__NBIF_LCLK_DS_EN__SHIFT 0x0
++#define NBIF_DS_CTRL_LCLK__NBIF_LCLK_DS_TIMER__SHIFT 0x10
++#define NBIF_DS_CTRL_LCLK__NBIF_LCLK_DS_EN_MASK 0x00000001L
++#define NBIF_DS_CTRL_LCLK__NBIF_LCLK_DS_TIMER_MASK 0xFFFF0000L
++//SMN_MST_CNTL0
++#define SMN_MST_CNTL0__SMN_ARB_MODE__SHIFT 0x0
++#define SMN_MST_CNTL0__SMN_ZERO_BE_WR_EN_UPS__SHIFT 0x8
++#define SMN_MST_CNTL0__SMN_ZERO_BE_RD_EN_UPS__SHIFT 0x9
++#define SMN_MST_CNTL0__SMN_POST_MASK_EN_UPS__SHIFT 0xa
++#define SMN_MST_CNTL0__MULTI_SMN_TRANS_ID_DIS_UPS__SHIFT 0xb
++#define SMN_MST_CNTL0__SMN_ZERO_BE_WR_EN_DNS_DEV0__SHIFT 0x10
++#define SMN_MST_CNTL0__SMN_ZERO_BE_RD_EN_DNS_DEV0__SHIFT 0x14
++#define SMN_MST_CNTL0__SMN_POST_MASK_EN_DNS_DEV0__SHIFT 0x18
++#define SMN_MST_CNTL0__MULTI_SMN_TRANS_ID_DIS_DNS_DEV0__SHIFT 0x1c
++#define SMN_MST_CNTL0__SMN_ARB_MODE_MASK 0x00000003L
++#define SMN_MST_CNTL0__SMN_ZERO_BE_WR_EN_UPS_MASK 0x00000100L
++#define SMN_MST_CNTL0__SMN_ZERO_BE_RD_EN_UPS_MASK 0x00000200L
++#define SMN_MST_CNTL0__SMN_POST_MASK_EN_UPS_MASK 0x00000400L
++#define SMN_MST_CNTL0__MULTI_SMN_TRANS_ID_DIS_UPS_MASK 0x00000800L
++#define SMN_MST_CNTL0__SMN_ZERO_BE_WR_EN_DNS_DEV0_MASK 0x00010000L
++#define SMN_MST_CNTL0__SMN_ZERO_BE_RD_EN_DNS_DEV0_MASK 0x00100000L
++#define SMN_MST_CNTL0__SMN_POST_MASK_EN_DNS_DEV0_MASK 0x01000000L
++#define SMN_MST_CNTL0__MULTI_SMN_TRANS_ID_DIS_DNS_DEV0_MASK 0x10000000L
++//SMN_MST_EP_CNTL1
++#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF0__SHIFT 0x0
++#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF1__SHIFT 0x1
++#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF2__SHIFT 0x2
++#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF3__SHIFT 0x3
++#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF4__SHIFT 0x4
++#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF5__SHIFT 0x5
++#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF6__SHIFT 0x6
++#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF7__SHIFT 0x7
++#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF0_MASK 0x00000001L
++#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF1_MASK 0x00000002L
++#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF2_MASK 0x00000004L
++#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF3_MASK 0x00000008L
++#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF4_MASK 0x00000010L
++#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF5_MASK 0x00000020L
++#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF6_MASK 0x00000040L
++#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF7_MASK 0x00000080L
++//SMN_MST_EP_CNTL2
++#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF0__SHIFT 0x0
++#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF1__SHIFT 0x1
++#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF2__SHIFT 0x2
++#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF3__SHIFT 0x3
++#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF4__SHIFT 0x4
++#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF5__SHIFT 0x5
++#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF6__SHIFT 0x6
++#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF7__SHIFT 0x7
++#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF0_MASK 0x00000001L
++#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF1_MASK 0x00000002L
++#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF2_MASK 0x00000004L
++#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF3_MASK 0x00000008L
++#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF4_MASK 0x00000010L
++#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF5_MASK 0x00000020L
++#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF6_MASK 0x00000040L
++#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF7_MASK 0x00000080L
++//NBIF_SDP_VWR_VCHG_DIS_CTRL
++#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F0_DIS__SHIFT 0x0
++#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F1_DIS__SHIFT 0x1
++#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F2_DIS__SHIFT 0x2
++#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F3_DIS__SHIFT 0x3
++#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F4_DIS__SHIFT 0x4
++#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F5_DIS__SHIFT 0x5
++#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F6_DIS__SHIFT 0x6
++#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F7_DIS__SHIFT 0x7
++#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_SWDS_P0_DIS__SHIFT 0x18
++#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F0_DIS_MASK 0x00000001L
++#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F1_DIS_MASK 0x00000002L
++#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F2_DIS_MASK 0x00000004L
++#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F3_DIS_MASK 0x00000008L
++#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F4_DIS_MASK 0x00000010L
++#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F5_DIS_MASK 0x00000020L
++#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F6_DIS_MASK 0x00000040L
++#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F7_DIS_MASK 0x00000080L
++#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_SWDS_P0_DIS_MASK 0x01000000L
++//NBIF_SDP_VWR_VCHG_RST_CTRL0
++#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F0_RST_OVRD_EN__SHIFT 0x0
++#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F1_RST_OVRD_EN__SHIFT 0x1
++#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F2_RST_OVRD_EN__SHIFT 0x2
++#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F3_RST_OVRD_EN__SHIFT 0x3
++#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F4_RST_OVRD_EN__SHIFT 0x4
++#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F5_RST_OVRD_EN__SHIFT 0x5
++#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F6_RST_OVRD_EN__SHIFT 0x6
++#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F7_RST_OVRD_EN__SHIFT 0x7
++#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_SWDS_P0_RST_OVRD_EN__SHIFT 0x18
++#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F0_RST_OVRD_EN_MASK 0x00000001L
++#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F1_RST_OVRD_EN_MASK 0x00000002L
++#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F2_RST_OVRD_EN_MASK 0x00000004L
++#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F3_RST_OVRD_EN_MASK 0x00000008L
++#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F4_RST_OVRD_EN_MASK 0x00000010L
++#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F5_RST_OVRD_EN_MASK 0x00000020L
++#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F6_RST_OVRD_EN_MASK 0x00000040L
++#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F7_RST_OVRD_EN_MASK 0x00000080L
++#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_SWDS_P0_RST_OVRD_EN_MASK 0x01000000L
++//NBIF_SDP_VWR_VCHG_RST_CTRL1
++#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F0_RST_OVRD_VAL__SHIFT 0x0
++#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F1_RST_OVRD_VAL__SHIFT 0x1
++#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F2_RST_OVRD_VAL__SHIFT 0x2
++#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F3_RST_OVRD_VAL__SHIFT 0x3
++#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F4_RST_OVRD_VAL__SHIFT 0x4
++#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F5_RST_OVRD_VAL__SHIFT 0x5
++#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F6_RST_OVRD_VAL__SHIFT 0x6
++#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F7_RST_OVRD_VAL__SHIFT 0x7
++#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_SWDS_P0_RST_OVRD_VAL__SHIFT 0x18
++#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F0_RST_OVRD_VAL_MASK 0x00000001L
++#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F1_RST_OVRD_VAL_MASK 0x00000002L
++#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F2_RST_OVRD_VAL_MASK 0x00000004L
++#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F3_RST_OVRD_VAL_MASK 0x00000008L
++#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F4_RST_OVRD_VAL_MASK 0x00000010L
++#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F5_RST_OVRD_VAL_MASK 0x00000020L
++#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F6_RST_OVRD_VAL_MASK 0x00000040L
++#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F7_RST_OVRD_VAL_MASK 0x00000080L
++#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_SWDS_P0_RST_OVRD_VAL_MASK 0x01000000L
++//NBIF_SDP_VWR_VCHG_TRIG
++#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F0_TRIG__SHIFT 0x0
++#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F1_TRIG__SHIFT 0x1
++#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F2_TRIG__SHIFT 0x2
++#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F3_TRIG__SHIFT 0x3
++#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F4_TRIG__SHIFT 0x4
++#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F5_TRIG__SHIFT 0x5
++#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F6_TRIG__SHIFT 0x6
++#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F7_TRIG__SHIFT 0x7
++#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_SWDS_P0_TRIG__SHIFT 0x18
++#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F0_TRIG_MASK 0x00000001L
++#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F1_TRIG_MASK 0x00000002L
++#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F2_TRIG_MASK 0x00000004L
++#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F3_TRIG_MASK 0x00000008L
++#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F4_TRIG_MASK 0x00000010L
++#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F5_TRIG_MASK 0x00000020L
++#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F6_TRIG_MASK 0x00000040L
++#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F7_TRIG_MASK 0x00000080L
++#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_SWDS_P0_TRIG_MASK 0x01000000L
++
++
++// addressBlock: nbio_nbif0_rcc_pfc_amdgfx_RCCPFCDEC
++//RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL
++#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_VALUE__SHIFT 0x0
++#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_SCALE__SHIFT 0xa
++#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__SNOOP_REQUIREMENT__SHIFT 0xf
++#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_VALUE__SHIFT 0x10
++#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_SCALE__SHIFT 0x1a
++#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__NONSNOOP_REQUIREMENT__SHIFT 0x1f
++#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_VALUE_MASK 0x000003FFL
++#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_SCALE_MASK 0x00001C00L
++#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__SNOOP_REQUIREMENT_MASK 0x00008000L
++#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_VALUE_MASK 0x03FF0000L
++#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_SCALE_MASK 0x1C000000L
++#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__NONSNOOP_REQUIREMENT_MASK 0x80000000L
++//RCC_PFC_AMDGFX_RCC_PFC_PME_RESTORE
++#define RCC_PFC_AMDGFX_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_EN__SHIFT 0x0
++#define RCC_PFC_AMDGFX_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_STATUS__SHIFT 0x8
++#define RCC_PFC_AMDGFX_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_EN_MASK 0x00000001L
++#define RCC_PFC_AMDGFX_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_STATUS_MASK 0x00000100L
++//RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0
++#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_PSN_ERR_STATUS__SHIFT 0x0
++#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_TIMEOUT_STATUS__SHIFT 0x1
++#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_ABORT_ERR_STATUS__SHIFT 0x2
++#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNEXP_CPL_STATUS__SHIFT 0x3
++#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_MAL_TLP_STATUS__SHIFT 0x4
++#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_ECRC_ERR_STATUS__SHIFT 0x5
++#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNSUPP_REQ_ERR_STATUS__SHIFT 0x6
++#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0x7
++#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_PSN_ERR_STATUS_MASK 0x00000001L
++#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_TIMEOUT_STATUS_MASK 0x00000002L
++#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_ABORT_ERR_STATUS_MASK 0x00000004L
++#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNEXP_CPL_STATUS_MASK 0x00000008L
++#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_MAL_TLP_STATUS_MASK 0x00000010L
++#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_ECRC_ERR_STATUS_MASK 0x00000020L
++#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNSUPP_REQ_ERR_STATUS_MASK 0x00000040L
++#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00000080L
++//RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_1
++#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_1__RESTORE_TLP_HDR_0__SHIFT 0x0
++#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_1__RESTORE_TLP_HDR_0_MASK 0xFFFFFFFFL
++//RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_2
++#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_2__RESTORE_TLP_HDR_1__SHIFT 0x0
++#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_2__RESTORE_TLP_HDR_1_MASK 0xFFFFFFFFL
++//RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_3
++#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_3__RESTORE_TLP_HDR_2__SHIFT 0x0
++#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_3__RESTORE_TLP_HDR_2_MASK 0xFFFFFFFFL
++//RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_4
++#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_4__RESTORE_TLP_HDR_3__SHIFT 0x0
++#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_4__RESTORE_TLP_HDR_3_MASK 0xFFFFFFFFL
++//RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_5
++#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_5__RESTORE_TLP_PREFIX__SHIFT 0x0
++#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_5__RESTORE_TLP_PREFIX_MASK 0xFFFFFFFFL
++//RCC_PFC_AMDGFX_RCC_PFC_AUXPWR_CNTL
++#define RCC_PFC_AMDGFX_RCC_PFC_AUXPWR_CNTL__AUX_CURRENT_OVERRIDE__SHIFT 0x0
++#define RCC_PFC_AMDGFX_RCC_PFC_AUXPWR_CNTL__AUX_POWER_DETECTED_OVERRIDE__SHIFT 0x3
++#define RCC_PFC_AMDGFX_RCC_PFC_AUXPWR_CNTL__AUX_CURRENT_OVERRIDE_MASK 0x00000007L
++#define RCC_PFC_AMDGFX_RCC_PFC_AUXPWR_CNTL__AUX_POWER_DETECTED_OVERRIDE_MASK 0x00000008L
++
++
++// addressBlock: nbio_nbif0_rcc_pfc_amdgfxaz_RCCPFCDEC
++//RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_VALUE__SHIFT 0x0
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_SCALE__SHIFT 0xa
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__SNOOP_REQUIREMENT__SHIFT 0xf
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_VALUE__SHIFT 0x10
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_SCALE__SHIFT 0x1a
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__NONSNOOP_REQUIREMENT__SHIFT 0x1f
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_VALUE_MASK 0x000003FFL
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_SCALE_MASK 0x00001C00L
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__SNOOP_REQUIREMENT_MASK 0x00008000L
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_VALUE_MASK 0x03FF0000L
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_SCALE_MASK 0x1C000000L
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__NONSNOOP_REQUIREMENT_MASK 0x80000000L
++//RCC_PFC_AMDGFXAZ_RCC_PFC_PME_RESTORE
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_EN__SHIFT 0x0
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_STATUS__SHIFT 0x8
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_EN_MASK 0x00000001L
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_STATUS_MASK 0x00000100L
++//RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_PSN_ERR_STATUS__SHIFT 0x0
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_TIMEOUT_STATUS__SHIFT 0x1
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_ABORT_ERR_STATUS__SHIFT 0x2
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNEXP_CPL_STATUS__SHIFT 0x3
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_MAL_TLP_STATUS__SHIFT 0x4
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_ECRC_ERR_STATUS__SHIFT 0x5
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNSUPP_REQ_ERR_STATUS__SHIFT 0x6
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0x7
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_PSN_ERR_STATUS_MASK 0x00000001L
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_TIMEOUT_STATUS_MASK 0x00000002L
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_ABORT_ERR_STATUS_MASK 0x00000004L
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNEXP_CPL_STATUS_MASK 0x00000008L
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_MAL_TLP_STATUS_MASK 0x00000010L
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_ECRC_ERR_STATUS_MASK 0x00000020L
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNSUPP_REQ_ERR_STATUS_MASK 0x00000040L
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00000080L
++//RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_1
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_1__RESTORE_TLP_HDR_0__SHIFT 0x0
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_1__RESTORE_TLP_HDR_0_MASK 0xFFFFFFFFL
++//RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_2
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_2__RESTORE_TLP_HDR_1__SHIFT 0x0
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_2__RESTORE_TLP_HDR_1_MASK 0xFFFFFFFFL
++//RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_3
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_3__RESTORE_TLP_HDR_2__SHIFT 0x0
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_3__RESTORE_TLP_HDR_2_MASK 0xFFFFFFFFL
++//RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_4
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_4__RESTORE_TLP_HDR_3__SHIFT 0x0
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_4__RESTORE_TLP_HDR_3_MASK 0xFFFFFFFFL
++//RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_5
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_5__RESTORE_TLP_PREFIX__SHIFT 0x0
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_5__RESTORE_TLP_PREFIX_MASK 0xFFFFFFFFL
++//RCC_PFC_AMDGFXAZ_RCC_PFC_AUXPWR_CNTL
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_AUXPWR_CNTL__AUX_CURRENT_OVERRIDE__SHIFT 0x0
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_AUXPWR_CNTL__AUX_POWER_DETECTED_OVERRIDE__SHIFT 0x3
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_AUXPWR_CNTL__AUX_CURRENT_OVERRIDE_MASK 0x00000007L
++#define RCC_PFC_AMDGFXAZ_RCC_PFC_AUXPWR_CNTL__AUX_POWER_DETECTED_OVERRIDE_MASK 0x00000008L
++
++
++// addressBlock: nbio_nbif0_bif_rst_bif_rst_regblk
++//HARD_RST_CTRL
++#define HARD_RST_CTRL__DSPT_CFG_RST_EN__SHIFT 0x0
++#define HARD_RST_CTRL__DSPT_CFG_STICKY_RST_EN__SHIFT 0x1
++#define HARD_RST_CTRL__DSPT_PRV_RST_EN__SHIFT 0x2
++#define HARD_RST_CTRL__DSPT_PRV_STICKY_RST_EN__SHIFT 0x3
++#define HARD_RST_CTRL__EP_CFG_RST_EN__SHIFT 0x4
++#define HARD_RST_CTRL__EP_CFG_STICKY_RST_EN__SHIFT 0x5
++#define HARD_RST_CTRL__EP_PRV_RST_EN__SHIFT 0x6
++#define HARD_RST_CTRL__EP_PRV_STICKY_RST_EN__SHIFT 0x7
++#define HARD_RST_CTRL__SWUS_SHADOW_RST_EN__SHIFT 0x1c
++#define HARD_RST_CTRL__CORE_STICKY_RST_EN__SHIFT 0x1d
++#define HARD_RST_CTRL__RELOAD_STRAP_EN__SHIFT 0x1e
++#define HARD_RST_CTRL__CORE_RST_EN__SHIFT 0x1f
++#define HARD_RST_CTRL__DSPT_CFG_RST_EN_MASK 0x00000001L
++#define HARD_RST_CTRL__DSPT_CFG_STICKY_RST_EN_MASK 0x00000002L
++#define HARD_RST_CTRL__DSPT_PRV_RST_EN_MASK 0x00000004L
++#define HARD_RST_CTRL__DSPT_PRV_STICKY_RST_EN_MASK 0x00000008L
++#define HARD_RST_CTRL__EP_CFG_RST_EN_MASK 0x00000010L
++#define HARD_RST_CTRL__EP_CFG_STICKY_RST_EN_MASK 0x00000020L
++#define HARD_RST_CTRL__EP_PRV_RST_EN_MASK 0x00000040L
++#define HARD_RST_CTRL__EP_PRV_STICKY_RST_EN_MASK 0x00000080L
++#define HARD_RST_CTRL__SWUS_SHADOW_RST_EN_MASK 0x10000000L
++#define HARD_RST_CTRL__CORE_STICKY_RST_EN_MASK 0x20000000L
++#define HARD_RST_CTRL__RELOAD_STRAP_EN_MASK 0x40000000L
++#define HARD_RST_CTRL__CORE_RST_EN_MASK 0x80000000L
++//RSMU_SOFT_RST_CTRL
++#define RSMU_SOFT_RST_CTRL__DSPT_CFG_RST_EN__SHIFT 0x0
++#define RSMU_SOFT_RST_CTRL__DSPT_CFG_STICKY_RST_EN__SHIFT 0x1
++#define RSMU_SOFT_RST_CTRL__DSPT_PRV_RST_EN__SHIFT 0x2
++#define RSMU_SOFT_RST_CTRL__DSPT_PRV_STICKY_RST_EN__SHIFT 0x3
++#define RSMU_SOFT_RST_CTRL__EP_CFG_RST_EN__SHIFT 0x4
++#define RSMU_SOFT_RST_CTRL__EP_CFG_STICKY_RST_EN__SHIFT 0x5
++#define RSMU_SOFT_RST_CTRL__EP_PRV_RST_EN__SHIFT 0x6
++#define RSMU_SOFT_RST_CTRL__EP_PRV_STICKY_RST_EN__SHIFT 0x7
++#define RSMU_SOFT_RST_CTRL__SWUS_SHADOW_RST_EN__SHIFT 0x1c
++#define RSMU_SOFT_RST_CTRL__CORE_STICKY_RST_EN__SHIFT 0x1d
++#define RSMU_SOFT_RST_CTRL__RELOAD_STRAP_EN__SHIFT 0x1e
++#define RSMU_SOFT_RST_CTRL__CORE_RST_EN__SHIFT 0x1f
++#define RSMU_SOFT_RST_CTRL__DSPT_CFG_RST_EN_MASK 0x00000001L
++#define RSMU_SOFT_RST_CTRL__DSPT_CFG_STICKY_RST_EN_MASK 0x00000002L
++#define RSMU_SOFT_RST_CTRL__DSPT_PRV_RST_EN_MASK 0x00000004L
++#define RSMU_SOFT_RST_CTRL__DSPT_PRV_STICKY_RST_EN_MASK 0x00000008L
++#define RSMU_SOFT_RST_CTRL__EP_CFG_RST_EN_MASK 0x00000010L
++#define RSMU_SOFT_RST_CTRL__EP_CFG_STICKY_RST_EN_MASK 0x00000020L
++#define RSMU_SOFT_RST_CTRL__EP_PRV_RST_EN_MASK 0x00000040L
++#define RSMU_SOFT_RST_CTRL__EP_PRV_STICKY_RST_EN_MASK 0x00000080L
++#define RSMU_SOFT_RST_CTRL__SWUS_SHADOW_RST_EN_MASK 0x10000000L
++#define RSMU_SOFT_RST_CTRL__CORE_STICKY_RST_EN_MASK 0x20000000L
++#define RSMU_SOFT_RST_CTRL__RELOAD_STRAP_EN_MASK 0x40000000L
++#define RSMU_SOFT_RST_CTRL__CORE_RST_EN_MASK 0x80000000L
++//SELF_SOFT_RST
++#define SELF_SOFT_RST__DSPT0_CFG_RST__SHIFT 0x0
++#define SELF_SOFT_RST__DSPT0_CFG_STICKY_RST__SHIFT 0x1
++#define SELF_SOFT_RST__DSPT0_PRV_RST__SHIFT 0x2
++#define SELF_SOFT_RST__DSPT0_PRV_STICKY_RST__SHIFT 0x3
++#define SELF_SOFT_RST__EP0_CFG_RST__SHIFT 0x4
++#define SELF_SOFT_RST__EP0_CFG_STICKY_RST__SHIFT 0x5
++#define SELF_SOFT_RST__EP0_PRV_RST__SHIFT 0x6
++#define SELF_SOFT_RST__EP0_PRV_STICKY_RST__SHIFT 0x7
++#define SELF_SOFT_RST__HRPU_SDP_PORT_RST__SHIFT 0x18
++#define SELF_SOFT_RST__GSID_SDP_PORT_RST__SHIFT 0x19
++#define SELF_SOFT_RST__GMIU_SDP_PORT_RST__SHIFT 0x1a
++#define SELF_SOFT_RST__GMID_SDP_PORT_RST__SHIFT 0x1b
++#define SELF_SOFT_RST__SWUS_SHADOW_RST__SHIFT 0x1c
++#define SELF_SOFT_RST__CORE_STICKY_RST__SHIFT 0x1d
++#define SELF_SOFT_RST__RELOAD_STRAP__SHIFT 0x1e
++#define SELF_SOFT_RST__CORE_RST__SHIFT 0x1f
++#define SELF_SOFT_RST__DSPT0_CFG_RST_MASK 0x00000001L
++#define SELF_SOFT_RST__DSPT0_CFG_STICKY_RST_MASK 0x00000002L
++#define SELF_SOFT_RST__DSPT0_PRV_RST_MASK 0x00000004L
++#define SELF_SOFT_RST__DSPT0_PRV_STICKY_RST_MASK 0x00000008L
++#define SELF_SOFT_RST__EP0_CFG_RST_MASK 0x00000010L
++#define SELF_SOFT_RST__EP0_CFG_STICKY_RST_MASK 0x00000020L
++#define SELF_SOFT_RST__EP0_PRV_RST_MASK 0x00000040L
++#define SELF_SOFT_RST__EP0_PRV_STICKY_RST_MASK 0x00000080L
++#define SELF_SOFT_RST__HRPU_SDP_PORT_RST_MASK 0x01000000L
++#define SELF_SOFT_RST__GSID_SDP_PORT_RST_MASK 0x02000000L
++#define SELF_SOFT_RST__GMIU_SDP_PORT_RST_MASK 0x04000000L
++#define SELF_SOFT_RST__GMID_SDP_PORT_RST_MASK 0x08000000L
++#define SELF_SOFT_RST__SWUS_SHADOW_RST_MASK 0x10000000L
++#define SELF_SOFT_RST__CORE_STICKY_RST_MASK 0x20000000L
++#define SELF_SOFT_RST__RELOAD_STRAP_MASK 0x40000000L
++#define SELF_SOFT_RST__CORE_RST_MASK 0x80000000L
++//BIF_GFX_DRV_VPU_RST
++#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_CFG_RST__SHIFT 0x0
++#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_CFG_FLR_EXC_RST__SHIFT 0x1
++#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_CFG_STICKY_RST__SHIFT 0x2
++#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_PRV_RST__SHIFT 0x3
++#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_PRV_STICKY_RST__SHIFT 0x4
++#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_VF_CFG_RST__SHIFT 0x5
++#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_VF_CFG_STICKY_RST__SHIFT 0x6
++#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_VF_PRV_RST__SHIFT 0x7
++#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_CFG_RST_MASK 0x00000001L
++#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_CFG_FLR_EXC_RST_MASK 0x00000002L
++#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_CFG_STICKY_RST_MASK 0x00000004L
++#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_PRV_RST_MASK 0x00000008L
++#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_PRV_STICKY_RST_MASK 0x00000010L
++#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_VF_CFG_RST_MASK 0x00000020L
++#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_VF_CFG_STICKY_RST_MASK 0x00000040L
++#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_VF_PRV_RST_MASK 0x00000080L
++//BIF_RST_MISC_CTRL
++#define BIF_RST_MISC_CTRL__ERRSTATUS_KEPT_IN_PERSTB__SHIFT 0x0
++#define BIF_RST_MISC_CTRL__DRV_RST_MODE__SHIFT 0x2
++#define BIF_RST_MISC_CTRL__DRV_RST_CFG_MASK__SHIFT 0x4
++#define BIF_RST_MISC_CTRL__DRV_RST_BITS_AUTO_CLEAR__SHIFT 0x5
++#define BIF_RST_MISC_CTRL__FLR_RST_BIT_AUTO_CLEAR__SHIFT 0x6
++#define BIF_RST_MISC_CTRL__STRAP_EP_LNK_RST_IOV_EN__SHIFT 0x8
++#define BIF_RST_MISC_CTRL__LNK_RST_GRACE_MODE__SHIFT 0x9
++#define BIF_RST_MISC_CTRL__LNK_RST_GRACE_TIMEOUT__SHIFT 0xa
++#define BIF_RST_MISC_CTRL__LNK_RST_TIMER_SEL__SHIFT 0xd
++#define BIF_RST_MISC_CTRL__LNK_RST_TIMER2_SEL__SHIFT 0xf
++#define BIF_RST_MISC_CTRL__SRIOV_SAVE_VFS_ON_VFENABLE_CLR__SHIFT 0x11
++#define BIF_RST_MISC_CTRL__LNK_RST_DMA_DUMMY_DIS__SHIFT 0x17
++#define BIF_RST_MISC_CTRL__LNK_RST_DMA_DUMMY_RSPSTS__SHIFT 0x18
++#define BIF_RST_MISC_CTRL__ERRSTATUS_KEPT_IN_PERSTB_MASK 0x00000001L
++#define BIF_RST_MISC_CTRL__DRV_RST_MODE_MASK 0x0000000CL
++#define BIF_RST_MISC_CTRL__DRV_RST_CFG_MASK_MASK 0x00000010L
++#define BIF_RST_MISC_CTRL__DRV_RST_BITS_AUTO_CLEAR_MASK 0x00000020L
++#define BIF_RST_MISC_CTRL__FLR_RST_BIT_AUTO_CLEAR_MASK 0x00000040L
++#define BIF_RST_MISC_CTRL__STRAP_EP_LNK_RST_IOV_EN_MASK 0x00000100L
++#define BIF_RST_MISC_CTRL__LNK_RST_GRACE_MODE_MASK 0x00000200L
++#define BIF_RST_MISC_CTRL__LNK_RST_GRACE_TIMEOUT_MASK 0x00001C00L
++#define BIF_RST_MISC_CTRL__LNK_RST_TIMER_SEL_MASK 0x00006000L
++#define BIF_RST_MISC_CTRL__LNK_RST_TIMER2_SEL_MASK 0x00018000L
++#define BIF_RST_MISC_CTRL__SRIOV_SAVE_VFS_ON_VFENABLE_CLR_MASK 0x000E0000L
++#define BIF_RST_MISC_CTRL__LNK_RST_DMA_DUMMY_DIS_MASK 0x00800000L
++#define BIF_RST_MISC_CTRL__LNK_RST_DMA_DUMMY_RSPSTS_MASK 0x03000000L
++//BIF_RST_MISC_CTRL2
++#define BIF_RST_MISC_CTRL2__SWUS_LNK_RST_TRANS_IDLE__SHIFT 0x10
++#define BIF_RST_MISC_CTRL2__SWDS_LNK_RST_TRANS_IDLE__SHIFT 0x11
++#define BIF_RST_MISC_CTRL2__ENDP0_LNK_RST_TRANS_IDLE__SHIFT 0x12
++#define BIF_RST_MISC_CTRL2__ALL_RST_TRANS_IDLE__SHIFT 0x1f
++#define BIF_RST_MISC_CTRL2__SWUS_LNK_RST_TRANS_IDLE_MASK 0x00010000L
++#define BIF_RST_MISC_CTRL2__SWDS_LNK_RST_TRANS_IDLE_MASK 0x00020000L
++#define BIF_RST_MISC_CTRL2__ENDP0_LNK_RST_TRANS_IDLE_MASK 0x00040000L
++#define BIF_RST_MISC_CTRL2__ALL_RST_TRANS_IDLE_MASK 0x80000000L
++//BIF_RST_MISC_CTRL3
++#define BIF_RST_MISC_CTRL3__TIMER_SCALE__SHIFT 0x0
++#define BIF_RST_MISC_CTRL3__PME_TURNOFF_TIMEOUT__SHIFT 0x4
++#define BIF_RST_MISC_CTRL3__PME_TURNOFF_MODE__SHIFT 0x6
++#define BIF_RST_MISC_CTRL3__RSMU_SOFT_RST_CYCLE__SHIFT 0x10
++#define BIF_RST_MISC_CTRL3__TIMER_SCALE_MASK 0x0000000FL
++#define BIF_RST_MISC_CTRL3__PME_TURNOFF_TIMEOUT_MASK 0x00000030L
++#define BIF_RST_MISC_CTRL3__PME_TURNOFF_MODE_MASK 0x00000040L
++#define BIF_RST_MISC_CTRL3__RSMU_SOFT_RST_CYCLE_MASK 0x00FF0000L
++//BIF_RST_GFXVF_FLR_IDLE
++#define BIF_RST_GFXVF_FLR_IDLE__VF0_TRANS_IDLE__SHIFT 0x0
++#define BIF_RST_GFXVF_FLR_IDLE__VF1_TRANS_IDLE__SHIFT 0x1
++#define BIF_RST_GFXVF_FLR_IDLE__VF2_TRANS_IDLE__SHIFT 0x2
++#define BIF_RST_GFXVF_FLR_IDLE__VF3_TRANS_IDLE__SHIFT 0x3
++#define BIF_RST_GFXVF_FLR_IDLE__VF4_TRANS_IDLE__SHIFT 0x4
++#define BIF_RST_GFXVF_FLR_IDLE__VF5_TRANS_IDLE__SHIFT 0x5
++#define BIF_RST_GFXVF_FLR_IDLE__VF6_TRANS_IDLE__SHIFT 0x6
++#define BIF_RST_GFXVF_FLR_IDLE__VF7_TRANS_IDLE__SHIFT 0x7
++#define BIF_RST_GFXVF_FLR_IDLE__VF8_TRANS_IDLE__SHIFT 0x8
++#define BIF_RST_GFXVF_FLR_IDLE__VF9_TRANS_IDLE__SHIFT 0x9
++#define BIF_RST_GFXVF_FLR_IDLE__VF10_TRANS_IDLE__SHIFT 0xa
++#define BIF_RST_GFXVF_FLR_IDLE__VF11_TRANS_IDLE__SHIFT 0xb
++#define BIF_RST_GFXVF_FLR_IDLE__VF12_TRANS_IDLE__SHIFT 0xc
++#define BIF_RST_GFXVF_FLR_IDLE__VF13_TRANS_IDLE__SHIFT 0xd
++#define BIF_RST_GFXVF_FLR_IDLE__VF14_TRANS_IDLE__SHIFT 0xe
++#define BIF_RST_GFXVF_FLR_IDLE__VF15_TRANS_IDLE__SHIFT 0xf
++#define BIF_RST_GFXVF_FLR_IDLE__SOFTPF_TRANS_IDLE__SHIFT 0x1f
++#define BIF_RST_GFXVF_FLR_IDLE__VF0_TRANS_IDLE_MASK 0x00000001L
++#define BIF_RST_GFXVF_FLR_IDLE__VF1_TRANS_IDLE_MASK 0x00000002L
++#define BIF_RST_GFXVF_FLR_IDLE__VF2_TRANS_IDLE_MASK 0x00000004L
++#define BIF_RST_GFXVF_FLR_IDLE__VF3_TRANS_IDLE_MASK 0x00000008L
++#define BIF_RST_GFXVF_FLR_IDLE__VF4_TRANS_IDLE_MASK 0x00000010L
++#define BIF_RST_GFXVF_FLR_IDLE__VF5_TRANS_IDLE_MASK 0x00000020L
++#define BIF_RST_GFXVF_FLR_IDLE__VF6_TRANS_IDLE_MASK 0x00000040L
++#define BIF_RST_GFXVF_FLR_IDLE__VF7_TRANS_IDLE_MASK 0x00000080L
++#define BIF_RST_GFXVF_FLR_IDLE__VF8_TRANS_IDLE_MASK 0x00000100L
++#define BIF_RST_GFXVF_FLR_IDLE__VF9_TRANS_IDLE_MASK 0x00000200L
++#define BIF_RST_GFXVF_FLR_IDLE__VF10_TRANS_IDLE_MASK 0x00000400L
++#define BIF_RST_GFXVF_FLR_IDLE__VF11_TRANS_IDLE_MASK 0x00000800L
++#define BIF_RST_GFXVF_FLR_IDLE__VF12_TRANS_IDLE_MASK 0x00001000L
++#define BIF_RST_GFXVF_FLR_IDLE__VF13_TRANS_IDLE_MASK 0x00002000L
++#define BIF_RST_GFXVF_FLR_IDLE__VF14_TRANS_IDLE_MASK 0x00004000L
++#define BIF_RST_GFXVF_FLR_IDLE__VF15_TRANS_IDLE_MASK 0x00008000L
++#define BIF_RST_GFXVF_FLR_IDLE__SOFTPF_TRANS_IDLE_MASK 0x80000000L
++//DEV0_PF0_FLR_RST_CTRL
++#define DEV0_PF0_FLR_RST_CTRL__PF_CFG_EN__SHIFT 0x0
++#define DEV0_PF0_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
++#define DEV0_PF0_FLR_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
++#define DEV0_PF0_FLR_RST_CTRL__PF_PRV_EN__SHIFT 0x3
++#define DEV0_PF0_FLR_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
++#define DEV0_PF0_FLR_RST_CTRL__VF_CFG_EN__SHIFT 0x5
++#define DEV0_PF0_FLR_RST_CTRL__VF_CFG_STICKY_EN__SHIFT 0x6
++#define DEV0_PF0_FLR_RST_CTRL__VF_PRV_EN__SHIFT 0x7
++#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_CFG_EN__SHIFT 0x8
++#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_CFG_FLR_EXC_EN__SHIFT 0x9
++#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_CFG_STICKY_EN__SHIFT 0xa
++#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_PRV_EN__SHIFT 0xb
++#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_PRV_STICKY_EN__SHIFT 0xc
++#define DEV0_PF0_FLR_RST_CTRL__VF_VF_CFG_EN__SHIFT 0xd
++#define DEV0_PF0_FLR_RST_CTRL__VF_VF_CFG_STICKY_EN__SHIFT 0xe
++#define DEV0_PF0_FLR_RST_CTRL__VF_VF_PRV_EN__SHIFT 0xf
++#define DEV0_PF0_FLR_RST_CTRL__FLR_TWICE_EN__SHIFT 0x10
++#define DEV0_PF0_FLR_RST_CTRL__FLR_GRACE_MODE__SHIFT 0x11
++#define DEV0_PF0_FLR_RST_CTRL__FLR_GRACE_TIMEOUT__SHIFT 0x12
++#define DEV0_PF0_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS__SHIFT 0x17
++#define DEV0_PF0_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS__SHIFT 0x19
++#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_PFCOPY_PRV_EN__SHIFT 0x1f
++#define DEV0_PF0_FLR_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
++#define DEV0_PF0_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
++#define DEV0_PF0_FLR_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
++#define DEV0_PF0_FLR_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
++#define DEV0_PF0_FLR_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
++#define DEV0_PF0_FLR_RST_CTRL__VF_CFG_EN_MASK 0x00000020L
++#define DEV0_PF0_FLR_RST_CTRL__VF_CFG_STICKY_EN_MASK 0x00000040L
++#define DEV0_PF0_FLR_RST_CTRL__VF_PRV_EN_MASK 0x00000080L
++#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_CFG_EN_MASK 0x00000100L
++#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_CFG_FLR_EXC_EN_MASK 0x00000200L
++#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_CFG_STICKY_EN_MASK 0x00000400L
++#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_PRV_EN_MASK 0x00000800L
++#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_PRV_STICKY_EN_MASK 0x00001000L
++#define DEV0_PF0_FLR_RST_CTRL__VF_VF_CFG_EN_MASK 0x00002000L
++#define DEV0_PF0_FLR_RST_CTRL__VF_VF_CFG_STICKY_EN_MASK 0x00004000L
++#define DEV0_PF0_FLR_RST_CTRL__VF_VF_PRV_EN_MASK 0x00008000L
++#define DEV0_PF0_FLR_RST_CTRL__FLR_TWICE_EN_MASK 0x00010000L
++#define DEV0_PF0_FLR_RST_CTRL__FLR_GRACE_MODE_MASK 0x00020000L
++#define DEV0_PF0_FLR_RST_CTRL__FLR_GRACE_TIMEOUT_MASK 0x001C0000L
++#define DEV0_PF0_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS_MASK 0x01800000L
++#define DEV0_PF0_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS_MASK 0x06000000L
++#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_PFCOPY_PRV_EN_MASK 0x80000000L
++//DEV0_PF1_FLR_RST_CTRL
++#define DEV0_PF1_FLR_RST_CTRL__PF_CFG_EN__SHIFT 0x0
++#define DEV0_PF1_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
++#define DEV0_PF1_FLR_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
++#define DEV0_PF1_FLR_RST_CTRL__PF_PRV_EN__SHIFT 0x3
++#define DEV0_PF1_FLR_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
++#define DEV0_PF1_FLR_RST_CTRL__FLR_GRACE_MODE__SHIFT 0x11
++#define DEV0_PF1_FLR_RST_CTRL__FLR_GRACE_TIMEOUT__SHIFT 0x12
++#define DEV0_PF1_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS__SHIFT 0x17
++#define DEV0_PF1_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS__SHIFT 0x19
++#define DEV0_PF1_FLR_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
++#define DEV0_PF1_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
++#define DEV0_PF1_FLR_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
++#define DEV0_PF1_FLR_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
++#define DEV0_PF1_FLR_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
++#define DEV0_PF1_FLR_RST_CTRL__FLR_GRACE_MODE_MASK 0x00020000L
++#define DEV0_PF1_FLR_RST_CTRL__FLR_GRACE_TIMEOUT_MASK 0x001C0000L
++#define DEV0_PF1_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS_MASK 0x01800000L
++#define DEV0_PF1_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS_MASK 0x06000000L
++//DEV0_PF2_FLR_RST_CTRL
++#define DEV0_PF2_FLR_RST_CTRL__PF_CFG_EN__SHIFT 0x0
++#define DEV0_PF2_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
++#define DEV0_PF2_FLR_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
++#define DEV0_PF2_FLR_RST_CTRL__PF_PRV_EN__SHIFT 0x3
++#define DEV0_PF2_FLR_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
++#define DEV0_PF2_FLR_RST_CTRL__FLR_GRACE_MODE__SHIFT 0x11
++#define DEV0_PF2_FLR_RST_CTRL__FLR_GRACE_TIMEOUT__SHIFT 0x12
++#define DEV0_PF2_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS__SHIFT 0x17
++#define DEV0_PF2_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS__SHIFT 0x19
++#define DEV0_PF2_FLR_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
++#define DEV0_PF2_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
++#define DEV0_PF2_FLR_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
++#define DEV0_PF2_FLR_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
++#define DEV0_PF2_FLR_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
++#define DEV0_PF2_FLR_RST_CTRL__FLR_GRACE_MODE_MASK 0x00020000L
++#define DEV0_PF2_FLR_RST_CTRL__FLR_GRACE_TIMEOUT_MASK 0x001C0000L
++#define DEV0_PF2_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS_MASK 0x01800000L
++#define DEV0_PF2_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS_MASK 0x06000000L
++//DEV0_PF3_FLR_RST_CTRL
++#define DEV0_PF3_FLR_RST_CTRL__PF_CFG_EN__SHIFT 0x0
++#define DEV0_PF3_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
++#define DEV0_PF3_FLR_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
++#define DEV0_PF3_FLR_RST_CTRL__PF_PRV_EN__SHIFT 0x3
++#define DEV0_PF3_FLR_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
++#define DEV0_PF3_FLR_RST_CTRL__FLR_GRACE_MODE__SHIFT 0x11
++#define DEV0_PF3_FLR_RST_CTRL__FLR_GRACE_TIMEOUT__SHIFT 0x12
++#define DEV0_PF3_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS__SHIFT 0x17
++#define DEV0_PF3_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS__SHIFT 0x19
++#define DEV0_PF3_FLR_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
++#define DEV0_PF3_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
++#define DEV0_PF3_FLR_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
++#define DEV0_PF3_FLR_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
++#define DEV0_PF3_FLR_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
++#define DEV0_PF3_FLR_RST_CTRL__FLR_GRACE_MODE_MASK 0x00020000L
++#define DEV0_PF3_FLR_RST_CTRL__FLR_GRACE_TIMEOUT_MASK 0x001C0000L
++#define DEV0_PF3_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS_MASK 0x01800000L
++#define DEV0_PF3_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS_MASK 0x06000000L
++//DEV0_PF4_FLR_RST_CTRL
++#define DEV0_PF4_FLR_RST_CTRL__PF_CFG_EN__SHIFT 0x0
++#define DEV0_PF4_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
++#define DEV0_PF4_FLR_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
++#define DEV0_PF4_FLR_RST_CTRL__PF_PRV_EN__SHIFT 0x3
++#define DEV0_PF4_FLR_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
++#define DEV0_PF4_FLR_RST_CTRL__FLR_GRACE_MODE__SHIFT 0x11
++#define DEV0_PF4_FLR_RST_CTRL__FLR_GRACE_TIMEOUT__SHIFT 0x12
++#define DEV0_PF4_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS__SHIFT 0x17
++#define DEV0_PF4_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS__SHIFT 0x19
++#define DEV0_PF4_FLR_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
++#define DEV0_PF4_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
++#define DEV0_PF4_FLR_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
++#define DEV0_PF4_FLR_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
++#define DEV0_PF4_FLR_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
++#define DEV0_PF4_FLR_RST_CTRL__FLR_GRACE_MODE_MASK 0x00020000L
++#define DEV0_PF4_FLR_RST_CTRL__FLR_GRACE_TIMEOUT_MASK 0x001C0000L
++#define DEV0_PF4_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS_MASK 0x01800000L
++#define DEV0_PF4_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS_MASK 0x06000000L
++//DEV0_PF5_FLR_RST_CTRL
++#define DEV0_PF5_FLR_RST_CTRL__PF_CFG_EN__SHIFT 0x0
++#define DEV0_PF5_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
++#define DEV0_PF5_FLR_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
++#define DEV0_PF5_FLR_RST_CTRL__PF_PRV_EN__SHIFT 0x3
++#define DEV0_PF5_FLR_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
++#define DEV0_PF5_FLR_RST_CTRL__FLR_GRACE_MODE__SHIFT 0x11
++#define DEV0_PF5_FLR_RST_CTRL__FLR_GRACE_TIMEOUT__SHIFT 0x12
++#define DEV0_PF5_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS__SHIFT 0x17
++#define DEV0_PF5_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS__SHIFT 0x19
++#define DEV0_PF5_FLR_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
++#define DEV0_PF5_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
++#define DEV0_PF5_FLR_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
++#define DEV0_PF5_FLR_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
++#define DEV0_PF5_FLR_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
++#define DEV0_PF5_FLR_RST_CTRL__FLR_GRACE_MODE_MASK 0x00020000L
++#define DEV0_PF5_FLR_RST_CTRL__FLR_GRACE_TIMEOUT_MASK 0x001C0000L
++#define DEV0_PF5_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS_MASK 0x01800000L
++#define DEV0_PF5_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS_MASK 0x06000000L
++//DEV0_PF6_FLR_RST_CTRL
++#define DEV0_PF6_FLR_RST_CTRL__PF_CFG_EN__SHIFT 0x0
++#define DEV0_PF6_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
++#define DEV0_PF6_FLR_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
++#define DEV0_PF6_FLR_RST_CTRL__PF_PRV_EN__SHIFT 0x3
++#define DEV0_PF6_FLR_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
++#define DEV0_PF6_FLR_RST_CTRL__FLR_GRACE_MODE__SHIFT 0x11
++#define DEV0_PF6_FLR_RST_CTRL__FLR_GRACE_TIMEOUT__SHIFT 0x12
++#define DEV0_PF6_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS__SHIFT 0x17
++#define DEV0_PF6_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS__SHIFT 0x19
++#define DEV0_PF6_FLR_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
++#define DEV0_PF6_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
++#define DEV0_PF6_FLR_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
++#define DEV0_PF6_FLR_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
++#define DEV0_PF6_FLR_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
++#define DEV0_PF6_FLR_RST_CTRL__FLR_GRACE_MODE_MASK 0x00020000L
++#define DEV0_PF6_FLR_RST_CTRL__FLR_GRACE_TIMEOUT_MASK 0x001C0000L
++#define DEV0_PF6_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS_MASK 0x01800000L
++#define DEV0_PF6_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS_MASK 0x06000000L
++//DEV0_PF7_FLR_RST_CTRL
++#define DEV0_PF7_FLR_RST_CTRL__PF_CFG_EN__SHIFT 0x0
++#define DEV0_PF7_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
++#define DEV0_PF7_FLR_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
++#define DEV0_PF7_FLR_RST_CTRL__PF_PRV_EN__SHIFT 0x3
++#define DEV0_PF7_FLR_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
++#define DEV0_PF7_FLR_RST_CTRL__FLR_GRACE_MODE__SHIFT 0x11
++#define DEV0_PF7_FLR_RST_CTRL__FLR_GRACE_TIMEOUT__SHIFT 0x12
++#define DEV0_PF7_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS__SHIFT 0x17
++#define DEV0_PF7_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS__SHIFT 0x19
++#define DEV0_PF7_FLR_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
++#define DEV0_PF7_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
++#define DEV0_PF7_FLR_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
++#define DEV0_PF7_FLR_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
++#define DEV0_PF7_FLR_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
++#define DEV0_PF7_FLR_RST_CTRL__FLR_GRACE_MODE_MASK 0x00020000L
++#define DEV0_PF7_FLR_RST_CTRL__FLR_GRACE_TIMEOUT_MASK 0x001C0000L
++#define DEV0_PF7_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS_MASK 0x01800000L
++#define DEV0_PF7_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS_MASK 0x06000000L
++//BIF_INST_RESET_INTR_STS
++#define BIF_INST_RESET_INTR_STS__EP0_LINK_RESET_INTR_STS__SHIFT 0x0
++#define BIF_INST_RESET_INTR_STS__EP0_LINK_RESET_CFG_ONLY_INTR_STS__SHIFT 0x1
++#define BIF_INST_RESET_INTR_STS__DRV_RESET_M0_INTR_STS__SHIFT 0x2
++#define BIF_INST_RESET_INTR_STS__DRV_RESET_M1_INTR_STS__SHIFT 0x3
++#define BIF_INST_RESET_INTR_STS__DRV_RESET_M2_INTR_STS__SHIFT 0x4
++#define BIF_INST_RESET_INTR_STS__EP0_LINK_RESET_INTR_STS_MASK 0x00000001L
++#define BIF_INST_RESET_INTR_STS__EP0_LINK_RESET_CFG_ONLY_INTR_STS_MASK 0x00000002L
++#define BIF_INST_RESET_INTR_STS__DRV_RESET_M0_INTR_STS_MASK 0x00000004L
++#define BIF_INST_RESET_INTR_STS__DRV_RESET_M1_INTR_STS_MASK 0x00000008L
++#define BIF_INST_RESET_INTR_STS__DRV_RESET_M2_INTR_STS_MASK 0x00000010L
++//BIF_PF_FLR_INTR_STS
++#define BIF_PF_FLR_INTR_STS__DEV0_PF0_FLR_INTR_STS__SHIFT 0x0
++#define BIF_PF_FLR_INTR_STS__DEV0_PF1_FLR_INTR_STS__SHIFT 0x1
++#define BIF_PF_FLR_INTR_STS__DEV0_PF2_FLR_INTR_STS__SHIFT 0x2
++#define BIF_PF_FLR_INTR_STS__DEV0_PF3_FLR_INTR_STS__SHIFT 0x3
++#define BIF_PF_FLR_INTR_STS__DEV0_PF4_FLR_INTR_STS__SHIFT 0x4
++#define BIF_PF_FLR_INTR_STS__DEV0_PF5_FLR_INTR_STS__SHIFT 0x5
++#define BIF_PF_FLR_INTR_STS__DEV0_PF6_FLR_INTR_STS__SHIFT 0x6
++#define BIF_PF_FLR_INTR_STS__DEV0_PF7_FLR_INTR_STS__SHIFT 0x7
++#define BIF_PF_FLR_INTR_STS__DEV0_PF0_FLR_INTR_STS_MASK 0x00000001L
++#define BIF_PF_FLR_INTR_STS__DEV0_PF1_FLR_INTR_STS_MASK 0x00000002L
++#define BIF_PF_FLR_INTR_STS__DEV0_PF2_FLR_INTR_STS_MASK 0x00000004L
++#define BIF_PF_FLR_INTR_STS__DEV0_PF3_FLR_INTR_STS_MASK 0x00000008L
++#define BIF_PF_FLR_INTR_STS__DEV0_PF4_FLR_INTR_STS_MASK 0x00000010L
++#define BIF_PF_FLR_INTR_STS__DEV0_PF5_FLR_INTR_STS_MASK 0x00000020L
++#define BIF_PF_FLR_INTR_STS__DEV0_PF6_FLR_INTR_STS_MASK 0x00000040L
++#define BIF_PF_FLR_INTR_STS__DEV0_PF7_FLR_INTR_STS_MASK 0x00000080L
++//BIF_D3HOTD0_INTR_STS
++#define BIF_D3HOTD0_INTR_STS__DEV0_PF0_D3HOTD0_INTR_STS__SHIFT 0x0
++#define BIF_D3HOTD0_INTR_STS__DEV0_PF1_D3HOTD0_INTR_STS__SHIFT 0x1
++#define BIF_D3HOTD0_INTR_STS__DEV0_PF2_D3HOTD0_INTR_STS__SHIFT 0x2
++#define BIF_D3HOTD0_INTR_STS__DEV0_PF3_D3HOTD0_INTR_STS__SHIFT 0x3
++#define BIF_D3HOTD0_INTR_STS__DEV0_PF4_D3HOTD0_INTR_STS__SHIFT 0x4
++#define BIF_D3HOTD0_INTR_STS__DEV0_PF5_D3HOTD0_INTR_STS__SHIFT 0x5
++#define BIF_D3HOTD0_INTR_STS__DEV0_PF6_D3HOTD0_INTR_STS__SHIFT 0x6
++#define BIF_D3HOTD0_INTR_STS__DEV0_PF7_D3HOTD0_INTR_STS__SHIFT 0x7
++#define BIF_D3HOTD0_INTR_STS__DEV0_PF0_D3HOTD0_INTR_STS_MASK 0x00000001L
++#define BIF_D3HOTD0_INTR_STS__DEV0_PF1_D3HOTD0_INTR_STS_MASK 0x00000002L
++#define BIF_D3HOTD0_INTR_STS__DEV0_PF2_D3HOTD0_INTR_STS_MASK 0x00000004L
++#define BIF_D3HOTD0_INTR_STS__DEV0_PF3_D3HOTD0_INTR_STS_MASK 0x00000008L
++#define BIF_D3HOTD0_INTR_STS__DEV0_PF4_D3HOTD0_INTR_STS_MASK 0x00000010L
++#define BIF_D3HOTD0_INTR_STS__DEV0_PF5_D3HOTD0_INTR_STS_MASK 0x00000020L
++#define BIF_D3HOTD0_INTR_STS__DEV0_PF6_D3HOTD0_INTR_STS_MASK 0x00000040L
++#define BIF_D3HOTD0_INTR_STS__DEV0_PF7_D3HOTD0_INTR_STS_MASK 0x00000080L
++//BIF_POWER_INTR_STS
++#define BIF_POWER_INTR_STS__DEV0_PME_TURN_OFF_INTR_STS__SHIFT 0x0
++#define BIF_POWER_INTR_STS__PORT0_DSTATE_INTR_STS__SHIFT 0x10
++#define BIF_POWER_INTR_STS__DEV0_PME_TURN_OFF_INTR_STS_MASK 0x00000001L
++#define BIF_POWER_INTR_STS__PORT0_DSTATE_INTR_STS_MASK 0x00010000L
++//BIF_PF_DSTATE_INTR_STS
++#define BIF_PF_DSTATE_INTR_STS__DEV0_PF0_DSTATE_INTR_STS__SHIFT 0x0
++#define BIF_PF_DSTATE_INTR_STS__DEV0_PF1_DSTATE_INTR_STS__SHIFT 0x1
++#define BIF_PF_DSTATE_INTR_STS__DEV0_PF2_DSTATE_INTR_STS__SHIFT 0x2
++#define BIF_PF_DSTATE_INTR_STS__DEV0_PF3_DSTATE_INTR_STS__SHIFT 0x3
++#define BIF_PF_DSTATE_INTR_STS__DEV0_PF4_DSTATE_INTR_STS__SHIFT 0x4
++#define BIF_PF_DSTATE_INTR_STS__DEV0_PF5_DSTATE_INTR_STS__SHIFT 0x5
++#define BIF_PF_DSTATE_INTR_STS__DEV0_PF6_DSTATE_INTR_STS__SHIFT 0x6
++#define BIF_PF_DSTATE_INTR_STS__DEV0_PF7_DSTATE_INTR_STS__SHIFT 0x7
++#define BIF_PF_DSTATE_INTR_STS__DEV0_PF0_DSTATE_INTR_STS_MASK 0x00000001L
++#define BIF_PF_DSTATE_INTR_STS__DEV0_PF1_DSTATE_INTR_STS_MASK 0x00000002L
++#define BIF_PF_DSTATE_INTR_STS__DEV0_PF2_DSTATE_INTR_STS_MASK 0x00000004L
++#define BIF_PF_DSTATE_INTR_STS__DEV0_PF3_DSTATE_INTR_STS_MASK 0x00000008L
++#define BIF_PF_DSTATE_INTR_STS__DEV0_PF4_DSTATE_INTR_STS_MASK 0x00000010L
++#define BIF_PF_DSTATE_INTR_STS__DEV0_PF5_DSTATE_INTR_STS_MASK 0x00000020L
++#define BIF_PF_DSTATE_INTR_STS__DEV0_PF6_DSTATE_INTR_STS_MASK 0x00000040L
++#define BIF_PF_DSTATE_INTR_STS__DEV0_PF7_DSTATE_INTR_STS_MASK 0x00000080L
++//BIF_PF0_VF_FLR_INTR_STS
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF0_FLR_INTR_STS__SHIFT 0x0
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF1_FLR_INTR_STS__SHIFT 0x1
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF2_FLR_INTR_STS__SHIFT 0x2
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF3_FLR_INTR_STS__SHIFT 0x3
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF4_FLR_INTR_STS__SHIFT 0x4
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF5_FLR_INTR_STS__SHIFT 0x5
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF6_FLR_INTR_STS__SHIFT 0x6
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF7_FLR_INTR_STS__SHIFT 0x7
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF8_FLR_INTR_STS__SHIFT 0x8
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF9_FLR_INTR_STS__SHIFT 0x9
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF10_FLR_INTR_STS__SHIFT 0xa
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF11_FLR_INTR_STS__SHIFT 0xb
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF12_FLR_INTR_STS__SHIFT 0xc
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF13_FLR_INTR_STS__SHIFT 0xd
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF14_FLR_INTR_STS__SHIFT 0xe
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF15_FLR_INTR_STS__SHIFT 0xf
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_SOFTPF_FLR_INTR_STS__SHIFT 0x1f
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF0_FLR_INTR_STS_MASK 0x00000001L
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF1_FLR_INTR_STS_MASK 0x00000002L
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF2_FLR_INTR_STS_MASK 0x00000004L
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF3_FLR_INTR_STS_MASK 0x00000008L
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF4_FLR_INTR_STS_MASK 0x00000010L
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF5_FLR_INTR_STS_MASK 0x00000020L
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF6_FLR_INTR_STS_MASK 0x00000040L
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF7_FLR_INTR_STS_MASK 0x00000080L
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF8_FLR_INTR_STS_MASK 0x00000100L
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF9_FLR_INTR_STS_MASK 0x00000200L
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF10_FLR_INTR_STS_MASK 0x00000400L
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF11_FLR_INTR_STS_MASK 0x00000800L
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF12_FLR_INTR_STS_MASK 0x00001000L
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF13_FLR_INTR_STS_MASK 0x00002000L
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF14_FLR_INTR_STS_MASK 0x00004000L
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF15_FLR_INTR_STS_MASK 0x00008000L
++#define BIF_PF0_VF_FLR_INTR_STS__PF0_SOFTPF_FLR_INTR_STS_MASK 0x80000000L
++//BIF_INST_RESET_INTR_MASK
++#define BIF_INST_RESET_INTR_MASK__EP0_LINK_RESET_INTR_MASK__SHIFT 0x0
++#define BIF_INST_RESET_INTR_MASK__EP0_LINK_RESET_CFG_ONLY_INTR_MASK__SHIFT 0x1
++#define BIF_INST_RESET_INTR_MASK__DRV_RESET_M0_INTR_MASK__SHIFT 0x2
++#define BIF_INST_RESET_INTR_MASK__DRV_RESET_M1_INTR_MASK__SHIFT 0x3
++#define BIF_INST_RESET_INTR_MASK__DRV_RESET_M2_INTR_MASK__SHIFT 0x4
++#define BIF_INST_RESET_INTR_MASK__EP0_LINK_RESET_INTR_MASK_MASK 0x00000001L
++#define BIF_INST_RESET_INTR_MASK__EP0_LINK_RESET_CFG_ONLY_INTR_MASK_MASK 0x00000002L
++#define BIF_INST_RESET_INTR_MASK__DRV_RESET_M0_INTR_MASK_MASK 0x00000004L
++#define BIF_INST_RESET_INTR_MASK__DRV_RESET_M1_INTR_MASK_MASK 0x00000008L
++#define BIF_INST_RESET_INTR_MASK__DRV_RESET_M2_INTR_MASK_MASK 0x00000010L
++//BIF_PF_FLR_INTR_MASK
++#define BIF_PF_FLR_INTR_MASK__DEV0_PF0_FLR_INTR_MASK__SHIFT 0x0
++#define BIF_PF_FLR_INTR_MASK__DEV0_PF1_FLR_INTR_MASK__SHIFT 0x1
++#define BIF_PF_FLR_INTR_MASK__DEV0_PF2_FLR_INTR_MASK__SHIFT 0x2
++#define BIF_PF_FLR_INTR_MASK__DEV0_PF3_FLR_INTR_MASK__SHIFT 0x3
++#define BIF_PF_FLR_INTR_MASK__DEV0_PF4_FLR_INTR_MASK__SHIFT 0x4
++#define BIF_PF_FLR_INTR_MASK__DEV0_PF5_FLR_INTR_MASK__SHIFT 0x5
++#define BIF_PF_FLR_INTR_MASK__DEV0_PF6_FLR_INTR_MASK__SHIFT 0x6
++#define BIF_PF_FLR_INTR_MASK__DEV0_PF7_FLR_INTR_MASK__SHIFT 0x7
++#define BIF_PF_FLR_INTR_MASK__DEV0_PF0_FLR_INTR_MASK_MASK 0x00000001L
++#define BIF_PF_FLR_INTR_MASK__DEV0_PF1_FLR_INTR_MASK_MASK 0x00000002L
++#define BIF_PF_FLR_INTR_MASK__DEV0_PF2_FLR_INTR_MASK_MASK 0x00000004L
++#define BIF_PF_FLR_INTR_MASK__DEV0_PF3_FLR_INTR_MASK_MASK 0x00000008L
++#define BIF_PF_FLR_INTR_MASK__DEV0_PF4_FLR_INTR_MASK_MASK 0x00000010L
++#define BIF_PF_FLR_INTR_MASK__DEV0_PF5_FLR_INTR_MASK_MASK 0x00000020L
++#define BIF_PF_FLR_INTR_MASK__DEV0_PF6_FLR_INTR_MASK_MASK 0x00000040L
++#define BIF_PF_FLR_INTR_MASK__DEV0_PF7_FLR_INTR_MASK_MASK 0x00000080L
++//BIF_D3HOTD0_INTR_MASK
++#define BIF_D3HOTD0_INTR_MASK__DEV0_PF0_D3HOTD0_INTR_MASK__SHIFT 0x0
++#define BIF_D3HOTD0_INTR_MASK__DEV0_PF1_D3HOTD0_INTR_MASK__SHIFT 0x1
++#define BIF_D3HOTD0_INTR_MASK__DEV0_PF2_D3HOTD0_INTR_MASK__SHIFT 0x2
++#define BIF_D3HOTD0_INTR_MASK__DEV0_PF3_D3HOTD0_INTR_MASK__SHIFT 0x3
++#define BIF_D3HOTD0_INTR_MASK__DEV0_PF4_D3HOTD0_INTR_MASK__SHIFT 0x4
++#define BIF_D3HOTD0_INTR_MASK__DEV0_PF5_D3HOTD0_INTR_MASK__SHIFT 0x5
++#define BIF_D3HOTD0_INTR_MASK__DEV0_PF6_D3HOTD0_INTR_MASK__SHIFT 0x6
++#define BIF_D3HOTD0_INTR_MASK__DEV0_PF7_D3HOTD0_INTR_MASK__SHIFT 0x7
++#define BIF_D3HOTD0_INTR_MASK__DEV0_PF0_D3HOTD0_INTR_MASK_MASK 0x00000001L
++#define BIF_D3HOTD0_INTR_MASK__DEV0_PF1_D3HOTD0_INTR_MASK_MASK 0x00000002L
++#define BIF_D3HOTD0_INTR_MASK__DEV0_PF2_D3HOTD0_INTR_MASK_MASK 0x00000004L
++#define BIF_D3HOTD0_INTR_MASK__DEV0_PF3_D3HOTD0_INTR_MASK_MASK 0x00000008L
++#define BIF_D3HOTD0_INTR_MASK__DEV0_PF4_D3HOTD0_INTR_MASK_MASK 0x00000010L
++#define BIF_D3HOTD0_INTR_MASK__DEV0_PF5_D3HOTD0_INTR_MASK_MASK 0x00000020L
++#define BIF_D3HOTD0_INTR_MASK__DEV0_PF6_D3HOTD0_INTR_MASK_MASK 0x00000040L
++#define BIF_D3HOTD0_INTR_MASK__DEV0_PF7_D3HOTD0_INTR_MASK_MASK 0x00000080L
++//BIF_POWER_INTR_MASK
++#define BIF_POWER_INTR_MASK__DEV0_PME_TURN_OFF_INTR_MASK__SHIFT 0x0
++#define BIF_POWER_INTR_MASK__PORT0_DSTATE_INTR_MASK__SHIFT 0x10
++#define BIF_POWER_INTR_MASK__DEV0_PME_TURN_OFF_INTR_MASK_MASK 0x00000001L
++#define BIF_POWER_INTR_MASK__PORT0_DSTATE_INTR_MASK_MASK 0x00010000L
++//BIF_PF_DSTATE_INTR_MASK
++#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF0_DSTATE_INTR_MASK__SHIFT 0x0
++#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF1_DSTATE_INTR_MASK__SHIFT 0x1
++#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF2_DSTATE_INTR_MASK__SHIFT 0x2
++#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF3_DSTATE_INTR_MASK__SHIFT 0x3
++#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF4_DSTATE_INTR_MASK__SHIFT 0x4
++#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF5_DSTATE_INTR_MASK__SHIFT 0x5
++#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF6_DSTATE_INTR_MASK__SHIFT 0x6
++#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF7_DSTATE_INTR_MASK__SHIFT 0x7
++#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF0_DSTATE_INTR_MASK_MASK 0x00000001L
++#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF1_DSTATE_INTR_MASK_MASK 0x00000002L
++#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF2_DSTATE_INTR_MASK_MASK 0x00000004L
++#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF3_DSTATE_INTR_MASK_MASK 0x00000008L
++#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF4_DSTATE_INTR_MASK_MASK 0x00000010L
++#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF5_DSTATE_INTR_MASK_MASK 0x00000020L
++#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF6_DSTATE_INTR_MASK_MASK 0x00000040L
++#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF7_DSTATE_INTR_MASK_MASK 0x00000080L
++//BIF_PF0_VF_FLR_INTR_MASK
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF0_FLR_INTR_MASK__SHIFT 0x0
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF1_FLR_INTR_MASK__SHIFT 0x1
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF2_FLR_INTR_MASK__SHIFT 0x2
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF3_FLR_INTR_MASK__SHIFT 0x3
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF4_FLR_INTR_MASK__SHIFT 0x4
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF5_FLR_INTR_MASK__SHIFT 0x5
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF6_FLR_INTR_MASK__SHIFT 0x6
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF7_FLR_INTR_MASK__SHIFT 0x7
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF8_FLR_INTR_MASK__SHIFT 0x8
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF9_FLR_INTR_MASK__SHIFT 0x9
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF10_FLR_INTR_MASK__SHIFT 0xa
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF11_FLR_INTR_MASK__SHIFT 0xb
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF12_FLR_INTR_MASK__SHIFT 0xc
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF13_FLR_INTR_MASK__SHIFT 0xd
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF14_FLR_INTR_MASK__SHIFT 0xe
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF15_FLR_INTR_MASK__SHIFT 0xf
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_SOFTPF_FLR_INTR_MASK__SHIFT 0x1f
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF0_FLR_INTR_MASK_MASK 0x00000001L
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF1_FLR_INTR_MASK_MASK 0x00000002L
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF2_FLR_INTR_MASK_MASK 0x00000004L
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF3_FLR_INTR_MASK_MASK 0x00000008L
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF4_FLR_INTR_MASK_MASK 0x00000010L
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF5_FLR_INTR_MASK_MASK 0x00000020L
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF6_FLR_INTR_MASK_MASK 0x00000040L
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF7_FLR_INTR_MASK_MASK 0x00000080L
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF8_FLR_INTR_MASK_MASK 0x00000100L
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF9_FLR_INTR_MASK_MASK 0x00000200L
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF10_FLR_INTR_MASK_MASK 0x00000400L
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF11_FLR_INTR_MASK_MASK 0x00000800L
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF12_FLR_INTR_MASK_MASK 0x00001000L
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF13_FLR_INTR_MASK_MASK 0x00002000L
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF14_FLR_INTR_MASK_MASK 0x00004000L
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF15_FLR_INTR_MASK_MASK 0x00008000L
++#define BIF_PF0_VF_FLR_INTR_MASK__PF0_SOFTPF_FLR_INTR_MASK_MASK 0x80000000L
++//BIF_PF_FLR_RST
++#define BIF_PF_FLR_RST__DEV0_PF0_FLR_RST__SHIFT 0x0
++#define BIF_PF_FLR_RST__DEV0_PF1_FLR_RST__SHIFT 0x1
++#define BIF_PF_FLR_RST__DEV0_PF2_FLR_RST__SHIFT 0x2
++#define BIF_PF_FLR_RST__DEV0_PF3_FLR_RST__SHIFT 0x3
++#define BIF_PF_FLR_RST__DEV0_PF4_FLR_RST__SHIFT 0x4
++#define BIF_PF_FLR_RST__DEV0_PF5_FLR_RST__SHIFT 0x5
++#define BIF_PF_FLR_RST__DEV0_PF6_FLR_RST__SHIFT 0x6
++#define BIF_PF_FLR_RST__DEV0_PF7_FLR_RST__SHIFT 0x7
++#define BIF_PF_FLR_RST__DEV0_PF0_FLR_RST_MASK 0x00000001L
++#define BIF_PF_FLR_RST__DEV0_PF1_FLR_RST_MASK 0x00000002L
++#define BIF_PF_FLR_RST__DEV0_PF2_FLR_RST_MASK 0x00000004L
++#define BIF_PF_FLR_RST__DEV0_PF3_FLR_RST_MASK 0x00000008L
++#define BIF_PF_FLR_RST__DEV0_PF4_FLR_RST_MASK 0x00000010L
++#define BIF_PF_FLR_RST__DEV0_PF5_FLR_RST_MASK 0x00000020L
++#define BIF_PF_FLR_RST__DEV0_PF6_FLR_RST_MASK 0x00000040L
++#define BIF_PF_FLR_RST__DEV0_PF7_FLR_RST_MASK 0x00000080L
++//BIF_PF0_VF_FLR_RST
++#define BIF_PF0_VF_FLR_RST__PF0_VF0_FLR_RST__SHIFT 0x0
++#define BIF_PF0_VF_FLR_RST__PF0_VF1_FLR_RST__SHIFT 0x1
++#define BIF_PF0_VF_FLR_RST__PF0_VF2_FLR_RST__SHIFT 0x2
++#define BIF_PF0_VF_FLR_RST__PF0_VF3_FLR_RST__SHIFT 0x3
++#define BIF_PF0_VF_FLR_RST__PF0_VF4_FLR_RST__SHIFT 0x4
++#define BIF_PF0_VF_FLR_RST__PF0_VF5_FLR_RST__SHIFT 0x5
++#define BIF_PF0_VF_FLR_RST__PF0_VF6_FLR_RST__SHIFT 0x6
++#define BIF_PF0_VF_FLR_RST__PF0_VF7_FLR_RST__SHIFT 0x7
++#define BIF_PF0_VF_FLR_RST__PF0_VF8_FLR_RST__SHIFT 0x8
++#define BIF_PF0_VF_FLR_RST__PF0_VF9_FLR_RST__SHIFT 0x9
++#define BIF_PF0_VF_FLR_RST__PF0_VF10_FLR_RST__SHIFT 0xa
++#define BIF_PF0_VF_FLR_RST__PF0_VF11_FLR_RST__SHIFT 0xb
++#define BIF_PF0_VF_FLR_RST__PF0_VF12_FLR_RST__SHIFT 0xc
++#define BIF_PF0_VF_FLR_RST__PF0_VF13_FLR_RST__SHIFT 0xd
++#define BIF_PF0_VF_FLR_RST__PF0_VF14_FLR_RST__SHIFT 0xe
++#define BIF_PF0_VF_FLR_RST__PF0_VF15_FLR_RST__SHIFT 0xf
++#define BIF_PF0_VF_FLR_RST__PF0_SOFTPF_FLR_RST__SHIFT 0x1f
++#define BIF_PF0_VF_FLR_RST__PF0_VF0_FLR_RST_MASK 0x00000001L
++#define BIF_PF0_VF_FLR_RST__PF0_VF1_FLR_RST_MASK 0x00000002L
++#define BIF_PF0_VF_FLR_RST__PF0_VF2_FLR_RST_MASK 0x00000004L
++#define BIF_PF0_VF_FLR_RST__PF0_VF3_FLR_RST_MASK 0x00000008L
++#define BIF_PF0_VF_FLR_RST__PF0_VF4_FLR_RST_MASK 0x00000010L
++#define BIF_PF0_VF_FLR_RST__PF0_VF5_FLR_RST_MASK 0x00000020L
++#define BIF_PF0_VF_FLR_RST__PF0_VF6_FLR_RST_MASK 0x00000040L
++#define BIF_PF0_VF_FLR_RST__PF0_VF7_FLR_RST_MASK 0x00000080L
++#define BIF_PF0_VF_FLR_RST__PF0_VF8_FLR_RST_MASK 0x00000100L
++#define BIF_PF0_VF_FLR_RST__PF0_VF9_FLR_RST_MASK 0x00000200L
++#define BIF_PF0_VF_FLR_RST__PF0_VF10_FLR_RST_MASK 0x00000400L
++#define BIF_PF0_VF_FLR_RST__PF0_VF11_FLR_RST_MASK 0x00000800L
++#define BIF_PF0_VF_FLR_RST__PF0_VF12_FLR_RST_MASK 0x00001000L
++#define BIF_PF0_VF_FLR_RST__PF0_VF13_FLR_RST_MASK 0x00002000L
++#define BIF_PF0_VF_FLR_RST__PF0_VF14_FLR_RST_MASK 0x00004000L
++#define BIF_PF0_VF_FLR_RST__PF0_VF15_FLR_RST_MASK 0x00008000L
++#define BIF_PF0_VF_FLR_RST__PF0_SOFTPF_FLR_RST_MASK 0x80000000L
++//BIF_DEV0_PF0_DSTATE_VALUE
++#define BIF_DEV0_PF0_DSTATE_VALUE__DEV0_PF0_DSTATE_TGT_VALUE__SHIFT 0x0
++#define BIF_DEV0_PF0_DSTATE_VALUE__DEV0_PF0_DSTATE_NEED_D3TOD0_RESET__SHIFT 0x2
++#define BIF_DEV0_PF0_DSTATE_VALUE__DEV0_PF0_DSTATE_ACK_VALUE__SHIFT 0x10
++#define BIF_DEV0_PF0_DSTATE_VALUE__DEV0_PF0_DSTATE_TGT_VALUE_MASK 0x00000003L
++#define BIF_DEV0_PF0_DSTATE_VALUE__DEV0_PF0_DSTATE_NEED_D3TOD0_RESET_MASK 0x00000004L
++#define BIF_DEV0_PF0_DSTATE_VALUE__DEV0_PF0_DSTATE_ACK_VALUE_MASK 0x00030000L
++//BIF_DEV0_PF1_DSTATE_VALUE
++#define BIF_DEV0_PF1_DSTATE_VALUE__DEV0_PF1_DSTATE_TGT_VALUE__SHIFT 0x0
++#define BIF_DEV0_PF1_DSTATE_VALUE__DEV0_PF1_DSTATE_NEED_D3TOD0_RESET__SHIFT 0x2
++#define BIF_DEV0_PF1_DSTATE_VALUE__DEV0_PF1_DSTATE_ACK_VALUE__SHIFT 0x10
++#define BIF_DEV0_PF1_DSTATE_VALUE__DEV0_PF1_DSTATE_TGT_VALUE_MASK 0x00000003L
++#define BIF_DEV0_PF1_DSTATE_VALUE__DEV0_PF1_DSTATE_NEED_D3TOD0_RESET_MASK 0x00000004L
++#define BIF_DEV0_PF1_DSTATE_VALUE__DEV0_PF1_DSTATE_ACK_VALUE_MASK 0x00030000L
++//BIF_DEV0_PF2_DSTATE_VALUE
++#define BIF_DEV0_PF2_DSTATE_VALUE__DEV0_PF2_DSTATE_TGT_VALUE__SHIFT 0x0
++#define BIF_DEV0_PF2_DSTATE_VALUE__DEV0_PF2_DSTATE_NEED_D3TOD0_RESET__SHIFT 0x2
++#define BIF_DEV0_PF2_DSTATE_VALUE__DEV0_PF2_DSTATE_ACK_VALUE__SHIFT 0x10
++#define BIF_DEV0_PF2_DSTATE_VALUE__DEV0_PF2_DSTATE_TGT_VALUE_MASK 0x00000003L
++#define BIF_DEV0_PF2_DSTATE_VALUE__DEV0_PF2_DSTATE_NEED_D3TOD0_RESET_MASK 0x00000004L
++#define BIF_DEV0_PF2_DSTATE_VALUE__DEV0_PF2_DSTATE_ACK_VALUE_MASK 0x00030000L
++//BIF_DEV0_PF3_DSTATE_VALUE
++#define BIF_DEV0_PF3_DSTATE_VALUE__DEV0_PF3_DSTATE_TGT_VALUE__SHIFT 0x0
++#define BIF_DEV0_PF3_DSTATE_VALUE__DEV0_PF3_DSTATE_NEED_D3TOD0_RESET__SHIFT 0x2
++#define BIF_DEV0_PF3_DSTATE_VALUE__DEV0_PF3_DSTATE_ACK_VALUE__SHIFT 0x10
++#define BIF_DEV0_PF3_DSTATE_VALUE__DEV0_PF3_DSTATE_TGT_VALUE_MASK 0x00000003L
++#define BIF_DEV0_PF3_DSTATE_VALUE__DEV0_PF3_DSTATE_NEED_D3TOD0_RESET_MASK 0x00000004L
++#define BIF_DEV0_PF3_DSTATE_VALUE__DEV0_PF3_DSTATE_ACK_VALUE_MASK 0x00030000L
++//BIF_DEV0_PF4_DSTATE_VALUE
++#define BIF_DEV0_PF4_DSTATE_VALUE__DEV0_PF4_DSTATE_TGT_VALUE__SHIFT 0x0
++#define BIF_DEV0_PF4_DSTATE_VALUE__DEV0_PF4_DSTATE_NEED_D3TOD0_RESET__SHIFT 0x2
++#define BIF_DEV0_PF4_DSTATE_VALUE__DEV0_PF4_DSTATE_ACK_VALUE__SHIFT 0x10
++#define BIF_DEV0_PF4_DSTATE_VALUE__DEV0_PF4_DSTATE_TGT_VALUE_MASK 0x00000003L
++#define BIF_DEV0_PF4_DSTATE_VALUE__DEV0_PF4_DSTATE_NEED_D3TOD0_RESET_MASK 0x00000004L
++#define BIF_DEV0_PF4_DSTATE_VALUE__DEV0_PF4_DSTATE_ACK_VALUE_MASK 0x00030000L
++//BIF_DEV0_PF5_DSTATE_VALUE
++#define BIF_DEV0_PF5_DSTATE_VALUE__DEV0_PF5_DSTATE_TGT_VALUE__SHIFT 0x0
++#define BIF_DEV0_PF5_DSTATE_VALUE__DEV0_PF5_DSTATE_NEED_D3TOD0_RESET__SHIFT 0x2
++#define BIF_DEV0_PF5_DSTATE_VALUE__DEV0_PF5_DSTATE_ACK_VALUE__SHIFT 0x10
++#define BIF_DEV0_PF5_DSTATE_VALUE__DEV0_PF5_DSTATE_TGT_VALUE_MASK 0x00000003L
++#define BIF_DEV0_PF5_DSTATE_VALUE__DEV0_PF5_DSTATE_NEED_D3TOD0_RESET_MASK 0x00000004L
++#define BIF_DEV0_PF5_DSTATE_VALUE__DEV0_PF5_DSTATE_ACK_VALUE_MASK 0x00030000L
++//BIF_DEV0_PF6_DSTATE_VALUE
++#define BIF_DEV0_PF6_DSTATE_VALUE__DEV0_PF6_DSTATE_TGT_VALUE__SHIFT 0x0
++#define BIF_DEV0_PF6_DSTATE_VALUE__DEV0_PF6_DSTATE_NEED_D3TOD0_RESET__SHIFT 0x2
++#define BIF_DEV0_PF6_DSTATE_VALUE__DEV0_PF6_DSTATE_ACK_VALUE__SHIFT 0x10
++#define BIF_DEV0_PF6_DSTATE_VALUE__DEV0_PF6_DSTATE_TGT_VALUE_MASK 0x00000003L
++#define BIF_DEV0_PF6_DSTATE_VALUE__DEV0_PF6_DSTATE_NEED_D3TOD0_RESET_MASK 0x00000004L
++#define BIF_DEV0_PF6_DSTATE_VALUE__DEV0_PF6_DSTATE_ACK_VALUE_MASK 0x00030000L
++//BIF_DEV0_PF7_DSTATE_VALUE
++#define BIF_DEV0_PF7_DSTATE_VALUE__DEV0_PF7_DSTATE_TGT_VALUE__SHIFT 0x0
++#define BIF_DEV0_PF7_DSTATE_VALUE__DEV0_PF7_DSTATE_NEED_D3TOD0_RESET__SHIFT 0x2
++#define BIF_DEV0_PF7_DSTATE_VALUE__DEV0_PF7_DSTATE_ACK_VALUE__SHIFT 0x10
++#define BIF_DEV0_PF7_DSTATE_VALUE__DEV0_PF7_DSTATE_TGT_VALUE_MASK 0x00000003L
++#define BIF_DEV0_PF7_DSTATE_VALUE__DEV0_PF7_DSTATE_NEED_D3TOD0_RESET_MASK 0x00000004L
++#define BIF_DEV0_PF7_DSTATE_VALUE__DEV0_PF7_DSTATE_ACK_VALUE_MASK 0x00030000L
++//DEV0_PF0_D3HOTD0_RST_CTRL
++#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_CFG_EN__SHIFT 0x0
++#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
++#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
++#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_PRV_EN__SHIFT 0x3
++#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
++#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
++#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
++#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
++#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
++#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
++//DEV0_PF1_D3HOTD0_RST_CTRL
++#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_CFG_EN__SHIFT 0x0
++#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
++#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
++#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_PRV_EN__SHIFT 0x3
++#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
++#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
++#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
++#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
++#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
++#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
++//DEV0_PF2_D3HOTD0_RST_CTRL
++#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_CFG_EN__SHIFT 0x0
++#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
++#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
++#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_PRV_EN__SHIFT 0x3
++#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
++#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
++#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
++#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
++#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
++#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
++//DEV0_PF3_D3HOTD0_RST_CTRL
++#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_CFG_EN__SHIFT 0x0
++#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
++#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
++#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_PRV_EN__SHIFT 0x3
++#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
++#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
++#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
++#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
++#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
++#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
++//DEV0_PF4_D3HOTD0_RST_CTRL
++#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_CFG_EN__SHIFT 0x0
++#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
++#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
++#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_PRV_EN__SHIFT 0x3
++#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
++#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
++#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
++#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
++#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
++#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
++//DEV0_PF5_D3HOTD0_RST_CTRL
++#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_CFG_EN__SHIFT 0x0
++#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
++#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
++#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_PRV_EN__SHIFT 0x3
++#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
++#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
++#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
++#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
++#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
++#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
++//DEV0_PF6_D3HOTD0_RST_CTRL
++#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_CFG_EN__SHIFT 0x0
++#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
++#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
++#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_PRV_EN__SHIFT 0x3
++#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
++#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
++#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
++#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
++#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
++#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
++//DEV0_PF7_D3HOTD0_RST_CTRL
++#define DEV0_PF7_D3HOTD0_RST_CTRL__PF_CFG_EN__SHIFT 0x0
++#define DEV0_PF7_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
++#define DEV0_PF7_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
++#define DEV0_PF7_D3HOTD0_RST_CTRL__PF_PRV_EN__SHIFT 0x3
++#define DEV0_PF7_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
++#define DEV0_PF7_D3HOTD0_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
++#define DEV0_PF7_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
++#define DEV0_PF7_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
++#define DEV0_PF7_D3HOTD0_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
++#define DEV0_PF7_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
++//BIF_PORT0_DSTATE_VALUE
++#define BIF_PORT0_DSTATE_VALUE__PORT0_DSTATE_TGT_VALUE__SHIFT 0x0
++#define BIF_PORT0_DSTATE_VALUE__PORT0_DSTATE_ACK_VALUE__SHIFT 0x10
++#define BIF_PORT0_DSTATE_VALUE__PORT0_DSTATE_TGT_VALUE_MASK 0x00000003L
++#define BIF_PORT0_DSTATE_VALUE__PORT0_DSTATE_ACK_VALUE_MASK 0x00030000L
++
++
++// addressBlock: nbio_nbif0_bif_ras_bif_ras_regblk
++//BIFL_RAS_CENTRAL_CNTL
++#define BIFL_RAS_CENTRAL_CNTL__BIFL_RAS_CONTL_ERREVENT_DIS__SHIFT 0x1d
++#define BIFL_RAS_CENTRAL_CNTL__BIFL_RAS_CONTL_INTR_DIS__SHIFT 0x1e
++#define BIFL_RAS_CENTRAL_CNTL__BIFL_LINKDIS_TRIG_EGRESS_STALL_DIS__SHIFT 0x1f
++#define BIFL_RAS_CENTRAL_CNTL__BIFL_RAS_CONTL_ERREVENT_DIS_MASK 0x20000000L
++#define BIFL_RAS_CENTRAL_CNTL__BIFL_RAS_CONTL_INTR_DIS_MASK 0x40000000L
++#define BIFL_RAS_CENTRAL_CNTL__BIFL_LINKDIS_TRIG_EGRESS_STALL_DIS_MASK 0x80000000L
++//BIFL_RAS_CENTRAL_STATUS
++#define BIFL_RAS_CENTRAL_STATUS__BIFL_L2C_EgStall_det__SHIFT 0x0
++#define BIFL_RAS_CENTRAL_STATUS__BIFL_L2C_ErrEvent_det__SHIFT 0x1
++#define BIFL_RAS_CENTRAL_STATUS__BIFL_C2L_EgStall_det__SHIFT 0x2
++#define BIFL_RAS_CENTRAL_STATUS__BIFL_C2L_ErrEvent_det__SHIFT 0x3
++#define BIFL_RAS_CENTRAL_STATUS__BIFL_RasContller_ErrEvent_Recv__SHIFT 0x1d
++#define BIFL_RAS_CENTRAL_STATUS__BIFL_RasContller_Intr_Recv__SHIFT 0x1e
++#define BIFL_RAS_CENTRAL_STATUS__BIFL_LinkDis_Recv__SHIFT 0x1f
++#define BIFL_RAS_CENTRAL_STATUS__BIFL_L2C_EgStall_det_MASK 0x00000001L
++#define BIFL_RAS_CENTRAL_STATUS__BIFL_L2C_ErrEvent_det_MASK 0x00000002L
++#define BIFL_RAS_CENTRAL_STATUS__BIFL_C2L_EgStall_det_MASK 0x00000004L
++#define BIFL_RAS_CENTRAL_STATUS__BIFL_C2L_ErrEvent_det_MASK 0x00000008L
++#define BIFL_RAS_CENTRAL_STATUS__BIFL_RasContller_ErrEvent_Recv_MASK 0x20000000L
++#define BIFL_RAS_CENTRAL_STATUS__BIFL_RasContller_Intr_Recv_MASK 0x40000000L
++#define BIFL_RAS_CENTRAL_STATUS__BIFL_LinkDis_Recv_MASK 0x80000000L
++//BIFL_RAS_LEAF0_CTRL
++#define BIFL_RAS_LEAF0_CTRL__ERR_EVENT_DET_EN__SHIFT 0x0
++#define BIFL_RAS_LEAF0_CTRL__POISON_ERREVENT_EN__SHIFT 0x1
++#define BIFL_RAS_LEAF0_CTRL__POISON_STALL_EN__SHIFT 0x2
++#define BIFL_RAS_LEAF0_CTRL__PARITY_ERREVENT_EN__SHIFT 0x3
++#define BIFL_RAS_LEAF0_CTRL__PARITY_STALL_EN__SHIFT 0x4
++#define BIFL_RAS_LEAF0_CTRL__ERR_EVENT_GEN_EN__SHIFT 0x8
++#define BIFL_RAS_LEAF0_CTRL__EGRESS_STALL_GEN_EN__SHIFT 0x9
++#define BIFL_RAS_LEAF0_CTRL__ERR_EVENT_PROP_EN__SHIFT 0xa
++#define BIFL_RAS_LEAF0_CTRL__EGRESS_STALL_PROP_EN__SHIFT 0xb
++#define BIFL_RAS_LEAF0_CTRL__ERR_EVENT_RAS_INTR_EN__SHIFT 0x10
++#define BIFL_RAS_LEAF0_CTRL__ERR_EVENT_DET_EN_MASK 0x00000001L
++#define BIFL_RAS_LEAF0_CTRL__POISON_ERREVENT_EN_MASK 0x00000002L
++#define BIFL_RAS_LEAF0_CTRL__POISON_STALL_EN_MASK 0x00000004L
++#define BIFL_RAS_LEAF0_CTRL__PARITY_ERREVENT_EN_MASK 0x00000008L
++#define BIFL_RAS_LEAF0_CTRL__PARITY_STALL_EN_MASK 0x00000010L
++#define BIFL_RAS_LEAF0_CTRL__ERR_EVENT_GEN_EN_MASK 0x00000100L
++#define BIFL_RAS_LEAF0_CTRL__EGRESS_STALL_GEN_EN_MASK 0x00000200L
++#define BIFL_RAS_LEAF0_CTRL__ERR_EVENT_PROP_EN_MASK 0x00000400L
++#define BIFL_RAS_LEAF0_CTRL__EGRESS_STALL_PROP_EN_MASK 0x00000800L
++#define BIFL_RAS_LEAF0_CTRL__ERR_EVENT_RAS_INTR_EN_MASK 0x00010000L
++//BIFL_RAS_LEAF1_CTRL
++#define BIFL_RAS_LEAF1_CTRL__ERR_EVENT_DET_EN__SHIFT 0x0
++#define BIFL_RAS_LEAF1_CTRL__POISON_ERREVENT_EN__SHIFT 0x1
++#define BIFL_RAS_LEAF1_CTRL__POISON_STALL_EN__SHIFT 0x2
++#define BIFL_RAS_LEAF1_CTRL__PARITY_ERREVENT_EN__SHIFT 0x3
++#define BIFL_RAS_LEAF1_CTRL__PARITY_STALL_EN__SHIFT 0x4
++#define BIFL_RAS_LEAF1_CTRL__ERR_EVENT_GEN_EN__SHIFT 0x8
++#define BIFL_RAS_LEAF1_CTRL__EGRESS_STALL_GEN_EN__SHIFT 0x9
++#define BIFL_RAS_LEAF1_CTRL__ERR_EVENT_PROP_EN__SHIFT 0xa
++#define BIFL_RAS_LEAF1_CTRL__EGRESS_STALL_PROP_EN__SHIFT 0xb
++#define BIFL_RAS_LEAF1_CTRL__ERR_EVENT_RAS_INTR_EN__SHIFT 0x10
++#define BIFL_RAS_LEAF1_CTRL__ERR_EVENT_DET_EN_MASK 0x00000001L
++#define BIFL_RAS_LEAF1_CTRL__POISON_ERREVENT_EN_MASK 0x00000002L
++#define BIFL_RAS_LEAF1_CTRL__POISON_STALL_EN_MASK 0x00000004L
++#define BIFL_RAS_LEAF1_CTRL__PARITY_ERREVENT_EN_MASK 0x00000008L
++#define BIFL_RAS_LEAF1_CTRL__PARITY_STALL_EN_MASK 0x00000010L
++#define BIFL_RAS_LEAF1_CTRL__ERR_EVENT_GEN_EN_MASK 0x00000100L
++#define BIFL_RAS_LEAF1_CTRL__EGRESS_STALL_GEN_EN_MASK 0x00000200L
++#define BIFL_RAS_LEAF1_CTRL__ERR_EVENT_PROP_EN_MASK 0x00000400L
++#define BIFL_RAS_LEAF1_CTRL__EGRESS_STALL_PROP_EN_MASK 0x00000800L
++#define BIFL_RAS_LEAF1_CTRL__ERR_EVENT_RAS_INTR_EN_MASK 0x00010000L
++//BIFL_RAS_LEAF2_CTRL
++#define BIFL_RAS_LEAF2_CTRL__ERR_EVENT_DET_EN__SHIFT 0x0
++#define BIFL_RAS_LEAF2_CTRL__POISON_ERREVENT_EN__SHIFT 0x1
++#define BIFL_RAS_LEAF2_CTRL__POISON_STALL_EN__SHIFT 0x2
++#define BIFL_RAS_LEAF2_CTRL__PARITY_ERREVENT_EN__SHIFT 0x3
++#define BIFL_RAS_LEAF2_CTRL__PARITY_STALL_EN__SHIFT 0x4
++#define BIFL_RAS_LEAF2_CTRL__ERR_EVENT_GEN_EN__SHIFT 0x8
++#define BIFL_RAS_LEAF2_CTRL__EGRESS_STALL_GEN_EN__SHIFT 0x9
++#define BIFL_RAS_LEAF2_CTRL__ERR_EVENT_PROP_EN__SHIFT 0xa
++#define BIFL_RAS_LEAF2_CTRL__EGRESS_STALL_PROP_EN__SHIFT 0xb
++#define BIFL_RAS_LEAF2_CTRL__ERR_EVENT_RAS_INTR_EN__SHIFT 0x10
++#define BIFL_RAS_LEAF2_CTRL__ERR_EVENT_DET_EN_MASK 0x00000001L
++#define BIFL_RAS_LEAF2_CTRL__POISON_ERREVENT_EN_MASK 0x00000002L
++#define BIFL_RAS_LEAF2_CTRL__POISON_STALL_EN_MASK 0x00000004L
++#define BIFL_RAS_LEAF2_CTRL__PARITY_ERREVENT_EN_MASK 0x00000008L
++#define BIFL_RAS_LEAF2_CTRL__PARITY_STALL_EN_MASK 0x00000010L
++#define BIFL_RAS_LEAF2_CTRL__ERR_EVENT_GEN_EN_MASK 0x00000100L
++#define BIFL_RAS_LEAF2_CTRL__EGRESS_STALL_GEN_EN_MASK 0x00000200L
++#define BIFL_RAS_LEAF2_CTRL__ERR_EVENT_PROP_EN_MASK 0x00000400L
++#define BIFL_RAS_LEAF2_CTRL__EGRESS_STALL_PROP_EN_MASK 0x00000800L
++#define BIFL_RAS_LEAF2_CTRL__ERR_EVENT_RAS_INTR_EN_MASK 0x00010000L
++//BIFL_RAS_LEAF0_STATUS
++#define BIFL_RAS_LEAF0_STATUS__ERR_EVENT_RECV__SHIFT 0x0
++#define BIFL_RAS_LEAF0_STATUS__POISON_ERR_DET__SHIFT 0x1
++#define BIFL_RAS_LEAF0_STATUS__PARITY_ERR_DET__SHIFT 0x2
++#define BIFL_RAS_LEAF0_STATUS__ERR_EVENT_GENN_STAT__SHIFT 0x8
++#define BIFL_RAS_LEAF0_STATUS__EGRESS_STALLED_GENN_STAT__SHIFT 0x9
++#define BIFL_RAS_LEAF0_STATUS__ERR_EVENT_PROP_STAT__SHIFT 0xa
++#define BIFL_RAS_LEAF0_STATUS__EGRESS_STALLED_PROP_STAT__SHIFT 0xb
++#define BIFL_RAS_LEAF0_STATUS__ERR_EVENT_RECV_MASK 0x00000001L
++#define BIFL_RAS_LEAF0_STATUS__POISON_ERR_DET_MASK 0x00000002L
++#define BIFL_RAS_LEAF0_STATUS__PARITY_ERR_DET_MASK 0x00000004L
++#define BIFL_RAS_LEAF0_STATUS__ERR_EVENT_GENN_STAT_MASK 0x00000100L
++#define BIFL_RAS_LEAF0_STATUS__EGRESS_STALLED_GENN_STAT_MASK 0x00000200L
++#define BIFL_RAS_LEAF0_STATUS__ERR_EVENT_PROP_STAT_MASK 0x00000400L
++#define BIFL_RAS_LEAF0_STATUS__EGRESS_STALLED_PROP_STAT_MASK 0x00000800L
++//BIFL_RAS_LEAF1_STATUS
++#define BIFL_RAS_LEAF1_STATUS__ERR_EVENT_RECV__SHIFT 0x0
++#define BIFL_RAS_LEAF1_STATUS__POISON_ERR_DET__SHIFT 0x1
++#define BIFL_RAS_LEAF1_STATUS__PARITY_ERR_DET__SHIFT 0x2
++#define BIFL_RAS_LEAF1_STATUS__ERR_EVENT_GENN_STAT__SHIFT 0x8
++#define BIFL_RAS_LEAF1_STATUS__EGRESS_STALLED_GENN_STAT__SHIFT 0x9
++#define BIFL_RAS_LEAF1_STATUS__ERR_EVENT_PROP_STAT__SHIFT 0xa
++#define BIFL_RAS_LEAF1_STATUS__EGRESS_STALLED_PROP_STAT__SHIFT 0xb
++#define BIFL_RAS_LEAF1_STATUS__ERR_EVENT_RECV_MASK 0x00000001L
++#define BIFL_RAS_LEAF1_STATUS__POISON_ERR_DET_MASK 0x00000002L
++#define BIFL_RAS_LEAF1_STATUS__PARITY_ERR_DET_MASK 0x00000004L
++#define BIFL_RAS_LEAF1_STATUS__ERR_EVENT_GENN_STAT_MASK 0x00000100L
++#define BIFL_RAS_LEAF1_STATUS__EGRESS_STALLED_GENN_STAT_MASK 0x00000200L
++#define BIFL_RAS_LEAF1_STATUS__ERR_EVENT_PROP_STAT_MASK 0x00000400L
++#define BIFL_RAS_LEAF1_STATUS__EGRESS_STALLED_PROP_STAT_MASK 0x00000800L
++//BIFL_RAS_LEAF2_STATUS
++#define BIFL_RAS_LEAF2_STATUS__ERR_EVENT_RECV__SHIFT 0x0
++#define BIFL_RAS_LEAF2_STATUS__POISON_ERR_DET__SHIFT 0x1
++#define BIFL_RAS_LEAF2_STATUS__PARITY_ERR_DET__SHIFT 0x2
++#define BIFL_RAS_LEAF2_STATUS__ERR_EVENT_GENN_STAT__SHIFT 0x8
++#define BIFL_RAS_LEAF2_STATUS__EGRESS_STALLED_GENN_STAT__SHIFT 0x9
++#define BIFL_RAS_LEAF2_STATUS__ERR_EVENT_PROP_STAT__SHIFT 0xa
++#define BIFL_RAS_LEAF2_STATUS__EGRESS_STALLED_PROP_STAT__SHIFT 0xb
++#define BIFL_RAS_LEAF2_STATUS__ERR_EVENT_RECV_MASK 0x00000001L
++#define BIFL_RAS_LEAF2_STATUS__POISON_ERR_DET_MASK 0x00000002L
++#define BIFL_RAS_LEAF2_STATUS__PARITY_ERR_DET_MASK 0x00000004L
++#define BIFL_RAS_LEAF2_STATUS__ERR_EVENT_GENN_STAT_MASK 0x00000100L
++#define BIFL_RAS_LEAF2_STATUS__EGRESS_STALLED_GENN_STAT_MASK 0x00000200L
++#define BIFL_RAS_LEAF2_STATUS__ERR_EVENT_PROP_STAT_MASK 0x00000400L
++#define BIFL_RAS_LEAF2_STATUS__EGRESS_STALLED_PROP_STAT_MASK 0x00000800L
++//BIFL_IOHUB_RAS_IH_CNTL
++#define BIFL_IOHUB_RAS_IH_CNTL__BIFL_RAS_IH_INTR_EN__SHIFT 0x0
++#define BIFL_IOHUB_RAS_IH_CNTL__BIFL_RAS_IH_INTR_EN_MASK 0x00000001L
++//BIFL_RAS_VWR_FROM_IOHUB
++#define BIFL_RAS_VWR_FROM_IOHUB__BIFL_RAS_IH_INTR_TRIG__SHIFT 0x0
++#define BIFL_RAS_VWR_FROM_IOHUB__BIFL_RAS_IH_INTR_TRIG_MASK 0x00000001L
++
++
++// addressBlock: nbio_nbif0_bif_swus_SUMDEC
++//SUM_INDEX
++#define SUM_INDEX__SUM_INDEX__SHIFT 0x0
++#define SUM_INDEX__SUM_INDEX_MASK 0xFFFFFFFFL
++//SUM_DATA
++#define SUM_DATA__SUM_DATA__SHIFT 0x0
++#define SUM_DATA__SUM_DATA_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_0_STATUS
++#define BIF_CFG_DEV0_EPF0_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_HEADER
++#define BIF_CFG_DEV0_EPF0_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_0_BIST
++#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_MIN_GRANT
++#define BIF_CFG_DEV0_EPF0_0_MIN_GRANT__MIN_GNT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MIN_GRANT__MIN_GNT_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_MAX_LATENCY
++#define BIF_CFG_DEV0_EPF0_0_MAX_LATENCY__MAX_LAT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MAX_LATENCY__MAX_LAT_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__LENGTH__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__CAP_ID_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__NEXT_PTR_MASK 0x0000FF00L
++#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__LENGTH_MASK 0x00FF0000L
++//BIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W
++#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_PMI_CAP
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__PME_CLOCK__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__DEV_SPECIFIC_INIT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__AUX_CURRENT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__D1_SUPPORT__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__D2_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__PME_SUPPORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__VERSION_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__PME_CLOCK_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__DEV_SPECIFIC_INIT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__AUX_CURRENT_MASK 0x01C0L
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__D1_SUPPORT_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__D2_SUPPORT_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__PME_SUPPORT_MASK 0xF800L
++//BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__POWER_STATE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__NO_SOFT_RESET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PME_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__DATA_SELECT__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__DATA_SCALE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PME_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__B2_B3_SUPPORT__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__BUS_PWR_EN__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PMI_DATA__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__POWER_STATE_MASK 0x00000003L
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__NO_SOFT_RESET_MASK 0x00000008L
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PME_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__DATA_SELECT_MASK 0x00001E00L
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__DATA_SCALE_MASK 0x00006000L
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PME_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__B2_B3_SUPPORT_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__BUS_PWR_EN_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PMI_DATA_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__REF_CLK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT_MASK 0x00000070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__REF_CLK_MASK 0x00000300L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE_MASK 0x00000C00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT_MASK 0x000EL
++//BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_STATUS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS_MASK 0x0001L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__VC_ID__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__VC_ID_MASK 0x07000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__VC_ID__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__VC_ID_MASK 0x07000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L
++//BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_INDEX_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_SIZE_MASK 0x3F00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_INDEX_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_SIZE_MASK 0x3F00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_INDEX_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_SIZE_MASK 0x3F00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_INDEX_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_SIZE_MASK 0x3F00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_INDEX_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_SIZE_MASK 0x3F00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_INDEX_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_SIZE_MASK 0x3F00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA_SELECT
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__BASE_POWER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__DATA_SCALE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__PM_STATE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__TYPE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__POWER_RAIL__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__BASE_POWER_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__DATA_SCALE_MASK 0x00000300L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE_MASK 0x00001C00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__PM_STATE_MASK 0x00006000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__TYPE_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__POWER_RAIL_MASK 0x001C0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED_MASK 0x01L
++//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__SUBSTATE_MAX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__SUBSTATE_MAX_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_LATENCY_INDICATOR
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS__SUBSTATE_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS__SUBSTATE_STATUS_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CNTL__SUBSTATE_CNTL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CNTL__SUBSTATE_CNTL_MASK 0x1FL
++//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__RESERVED__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__RESERVED_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS__RESERVED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS__RESERVED_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__SOURCE_VALIDATION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__TRANSLATION_BLOCKING__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__UPSTREAM_FORWARDING__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__SOURCE_VALIDATION_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__TRANSLATION_BLOCKING_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__UPSTREAM_FORWARDING_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL__PRI_ENABLE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL__PRI_RESET__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL__PRI_ENABLE_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL__PRI_RESET_MASK 0x0002L
++//BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__RESPONSE_FAILURE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__UNEXPECTED_PAGE_REQ_GRP_INDEX__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__STOPPED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__PRG_RESPONSE_PASID_REQUIRED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__RESPONSE_FAILURE_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__UNEXPECTED_PAGE_REQ_GRP_INDEX_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__STOPPED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__PRG_RESPONSE_PASID_REQUIRED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY
++#define BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY__OUTSTAND_PAGE_REQ_CAPACITY__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY__OUTSTAND_PAGE_REQ_CAPACITY_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC
++#define BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC__OUTSTAND_PAGE_REQ_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC__OUTSTAND_PAGE_REQ_ALLOC_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__MAX_PASID_WIDTH__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__MAX_PASID_WIDTH_MASK 0x1F00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_ENABLE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_ENABLE_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE_MASK 0x0004L
++//BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_MAX_GROUP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_WIN_SIZE_REQ__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_ECRC_REGEN_SUPP__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_MAX_GROUP_MASK 0x003FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_WIN_SIZE_REQ_MASK 0x3F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_ECRC_REGEN_SUPP_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL__MC_NUM_GROUP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL__MC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL__MC_NUM_GROUP_MASK 0x003FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL__MC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0__MC_INDEX_POS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0__MC_BASE_ADDR_0__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0__MC_INDEX_POS_MASK 0x0000003FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0__MC_BASE_ADDR_0_MASK 0xFFFFF000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR1__MC_BASE_ADDR_1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR1__MC_BASE_ADDR_1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV0__MC_RECEIVE_0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV0__MC_RECEIVE_0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV1__MC_RECEIVE_1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV1__MC_RECEIVE_1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL0__MC_BLOCK_ALL_0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL0__MC_BLOCK_ALL_0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL1__MC_BLOCK_ALL_1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL1__MC_BLOCK_ALL_1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_0__MC_BLOCK_UNTRANSLATED_0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_0__MC_BLOCK_UNTRANSLATED_0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_1__MC_BLOCK_UNTRANSLATED_1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_1__MC_BLOCK_UNTRANSLATED_1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE_MASK 0x000003FFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE_MASK 0x00001C00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE_MASK 0x03FF0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE_MASK 0x1C000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_ARI_CAP_HIERARCHY_PRESERVED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_INTR_MSG_NUM__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_CAP_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_ARI_CAP_HIERARCHY_PRESERVED_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_INTR_MSG_NUM_MASK 0xFFE00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_ENABLE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_ENABLE__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_INTR_ENABLE__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MSE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_ARI_CAP_HIERARCHY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_ENABLE_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_ENABLE_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_INTR_ENABLE_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MSE_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_ARI_CAP_HIERARCHY_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x0020L
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_STATUS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_STATUS__SRIOV_VF_MIGRATION_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_STATUS__SRIOV_VF_MIGRATION_STATUS_MASK 0x0001L
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_INITIAL_VFS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_INITIAL_VFS__SRIOV_INITIAL_VFS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_INITIAL_VFS__SRIOV_INITIAL_VFS_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_TOTAL_VFS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_TOTAL_VFS__SRIOV_TOTAL_VFS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_TOTAL_VFS__SRIOV_TOTAL_VFS_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_NUM_VFS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_NUM_VFS__SRIOV_NUM_VFS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_NUM_VFS__SRIOV_NUM_VFS_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FUNC_DEP_LINK
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FUNC_DEP_LINK__SRIOV_FUNC_DEP_LINK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FUNC_DEP_LINK__SRIOV_FUNC_DEP_LINK_MASK 0x00FFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FIRST_VF_OFFSET
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FIRST_VF_OFFSET__SRIOV_FIRST_VF_OFFSET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FIRST_VF_OFFSET__SRIOV_FIRST_VF_OFFSET_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_STRIDE
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_STRIDE__SRIOV_VF_STRIDE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_STRIDE__SRIOV_VF_STRIDE_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_DEVICE_ID__SRIOV_VF_DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_DEVICE_ID__SRIOV_VF_DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE__SRIOV_SUPPORTED_PAGE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE__SRIOV_SUPPORTED_PAGE_SIZE_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE__SRIOV_SYSTEM_PAGE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE__SRIOV_SYSTEM_PAGE_SIZE_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_0__VF_BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_0__VF_BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_1__VF_BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_1__VF_BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_2__VF_BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_2__VF_BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_3__VF_BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_3__VF_BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_4__VF_BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_4__VF_BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_5__VF_BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_5__VF_BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_BIF__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_BIF_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_NO_ST_MODE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_INT_VEC_MODE_SUPPORTED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_DEV_SPC_MODE_SUPPORTED__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_EXTND_TPH_REQR_SUPPORED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_LOCATION__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_SIZE__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_NO_ST_MODE_SUPPORTED_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_INT_VEC_MODE_SUPPORTED_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_DEV_SPC_MODE_SUPPORTED_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_EXTND_TPH_REQR_SUPPORED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_LOCATION_MASK 0x00000600L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_SIZE_MASK 0x07FF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CNTL__TPH_REQR_ST_MODE_SEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CNTL__TPH_REQR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CNTL__TPH_REQR_ST_MODE_SEL_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CNTL__TPH_REQR_EN_MASK 0x00000300L
++//BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP
++#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED_MASK 0x007FFFFFL
++#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_MASK 0x007FFFFFL
++#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_LINK_CAP_16GT
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP_16GT__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LINK_CAP_16GT__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_LINK_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL_16GT__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL_16GT__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT_MASK 0x00000008L
++#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT_MASK 0x00000010L
++//BIF_CFG_DEV0_EPF0_0_LOCAL_PARITY_MISMATCH_STATUS_16GT
++#define BIF_CFG_DEV0_EPF0_0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_0_RTM1_PARITY_MISMATCH_STATUS_16GT
++#define BIF_CFG_DEV0_EPF0_0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_0_RTM2_PARITY_MISMATCH_STATUS_16GT
++#define BIF_CFG_DEV0_EPF0_0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_CAP
++#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE_MASK 0x0001L
++//BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS
++#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS__MARGINING_READY__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS__MARGINING_READY_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY_MASK 0x0002L
++//BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_INDEX_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_INDEX_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_INDEX_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_INDEX_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_INDEX_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CAP
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_INDEX_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_NUM__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_EN_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_NUM_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_CMD_COMPLETE_INTR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_NEED_FLR_INTR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_CMD_COMPLETE_INTR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_NEED_FLR_INTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_CMD_COMPLETE_INTR_EN__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_NEED_FLR_INTR_EN__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_CMD_COMPLETE_INTR_EN__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_NEED_FLR_INTR_EN__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_CMD_COMPLETE_INTR_EN_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_NEED_FLR_INTR_EN_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000008L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_CMD_COMPLETE_INTR_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_NEED_FLR_INTR_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_CMD_COMPLETE_INTR_EN_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_NEED_FLR_INTR_EN_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_CMD_COMPLETE_INTR_EN_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_NEED_FLR_INTR_EN_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_TRN_ACK_INTR_EN_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_RCV_VALID_INTR_EN_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_CMD_COMPLETE_INTR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_CMD_COMPLETE_INTR_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_CMD_COMPLETE_INTR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_CMD_COMPLETE_INTR_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_TRN_ACK_INTR_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_RCV_VALID_INTR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_CMD_COMPLETE_INTR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000008L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_CMD_COMPLETE_INTR_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_CMD_COMPLETE_INTR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_NEED_FLR_INTR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_CMD_COMPLETE_INTR_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_NEED_FLR_INTR_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_TRN_ACK_INTR_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_RCV_VALID_INTR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL__SOFT_PF_FLR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL__SOFT_PF_FLR_MASK 0x0001L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__VF_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_DATA__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_VALID__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_DATA__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_ACK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__VF_INDEX_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_DATA_MASK 0x00000F00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_VALID_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_DATA_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_ACK_MASK 0x01000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_TRN_ACK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_RCV_VALID__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_TRN_ACK__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_RCV_VALID__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_TRN_ACK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_RCV_VALID__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_TRN_ACK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_RCV_VALID__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_TRN_ACK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_RCV_VALID__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_TRN_ACK__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_RCV_VALID__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_TRN_ACK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_RCV_VALID__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_TRN_ACK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_RCV_VALID__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_TRN_ACK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_RCV_VALID__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_TRN_ACK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_RCV_VALID__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_TRN_ACK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_RCV_VALID__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_TRN_ACK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_RCV_VALID__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_TRN_ACK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_RCV_VALID__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_TRN_ACK__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_RCV_VALID__SHIFT 0x1b
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_TRN_ACK__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_RCV_VALID__SHIFT 0x1d
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_TRN_ACK__SHIFT 0x1e
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_RCV_VALID__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_TRN_ACK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_RCV_VALID_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_TRN_ACK_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_RCV_VALID_MASK 0x00000008L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_TRN_ACK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_RCV_VALID_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_TRN_ACK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_RCV_VALID_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_TRN_ACK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_RCV_VALID_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_TRN_ACK_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_RCV_VALID_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_TRN_ACK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_RCV_VALID_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_TRN_ACK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_RCV_VALID_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_TRN_ACK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_RCV_VALID_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_TRN_ACK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_RCV_VALID_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_TRN_ACK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_RCV_VALID_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_TRN_ACK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_RCV_VALID_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_TRN_ACK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_RCV_VALID_MASK 0x02000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_TRN_ACK_MASK 0x04000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_RCV_VALID_MASK 0x08000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_TRN_ACK_MASK 0x10000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_RCV_VALID_MASK 0x20000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_TRN_ACK_MASK 0x40000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_RCV_VALID_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_TRN_ACK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_RCV_VALID__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_TRN_ACK__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_RCV_VALID__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_TRN_ACK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_RCV_VALID__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_TRN_ACK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_RCV_VALID__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_TRN_ACK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_RCV_VALID__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_TRN_ACK__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_RCV_VALID__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_TRN_ACK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_RCV_VALID__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_TRN_ACK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_RCV_VALID__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_TRN_ACK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_RCV_VALID__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_TRN_ACK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_RCV_VALID__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_TRN_ACK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_RCV_VALID__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_TRN_ACK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_RCV_VALID__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_TRN_ACK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_RCV_VALID__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_TRN_ACK__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_RCV_VALID__SHIFT 0x1b
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_TRN_ACK__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_RCV_VALID__SHIFT 0x1d
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_TRN_ACK__SHIFT 0x1e
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_RCV_VALID__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_TRN_ACK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_RCV_VALID_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_TRN_ACK_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_RCV_VALID_MASK 0x00000008L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_TRN_ACK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_RCV_VALID_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_TRN_ACK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_RCV_VALID_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_TRN_ACK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_RCV_VALID_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_TRN_ACK_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_RCV_VALID_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_TRN_ACK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_RCV_VALID_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_TRN_ACK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_RCV_VALID_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_TRN_ACK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_RCV_VALID_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_TRN_ACK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_RCV_VALID_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_TRN_ACK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_RCV_VALID_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_TRN_ACK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_RCV_VALID_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_TRN_ACK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_RCV_VALID_MASK 0x02000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_TRN_ACK_MASK 0x04000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_RCV_VALID_MASK 0x08000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_TRN_ACK_MASK 0x10000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_RCV_VALID_MASK 0x20000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_TRN_ACK_MASK 0x40000000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_RCV_VALID_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__LOC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_OFFSET__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_SIZE_MASK 0x0000007FL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__LOC_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_OFFSET_MASK 0xFFFFFC00L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_AVAILABLE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_CONSUMED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_AVAILABLE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_CONSUMED_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVDSCH_OFFSET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCESCH_OFFSET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__GFXSCH_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVD1SCH_OFFSET__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVDSCH_OFFSET_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCESCH_OFFSET_MASK 0x0000FF00L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__GFXSCH_OFFSET_MASK 0x00FF0000L
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVD1SCH_OFFSET_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_VF__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_PF__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_VF_MASK 0x7FFFFFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_PF_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0__DW0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0__DW0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1__DW1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1__DW1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2__DW2__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2__DW2_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3__DW3__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3__DW3_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4__DW4__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4__DW4_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5__DW5__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5__DW5_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6__DW6__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6__DW6_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7__DW7__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7__DW7_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8__DW8__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8__DW8_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0__DW0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0__DW0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1__DW1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1__DW1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2__DW2__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2__DW2_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3__DW3__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3__DW3_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4__DW4__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4__DW4_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5__DW5__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5__DW5_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6__DW6__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6__DW6_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7__DW7__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7__DW7_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8__DW8__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8__DW8_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0__DW0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0__DW0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1__DW1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1__DW1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2__DW2__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2__DW2_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3__DW3__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3__DW3_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4__DW4__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4__DW4_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5__DW5__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5__DW5_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6__DW6__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6__DW6_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7__DW7__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7__DW7_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8__DW8__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8__DW8_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0__DW0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0__DW0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1__DW1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1__DW1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2__DW2__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2__DW2_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3__DW3__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3__DW3_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4__DW4__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4__DW4_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5__DW5__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5__DW5_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6__DW6__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6__DW6_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7__DW7__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7__DW7_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8__DW8__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8__DW8_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf1_bifcfgdecp
++//BIF_CFG_DEV0_EPF1_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF1_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF1_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF1_0_COMMAND
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF1_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF1_0_STATUS
++#define BIF_CFG_DEV0_EPF1_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF1_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF1_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF1_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF1_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF1_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF1_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF1_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF1_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF1_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF1_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF1_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF1_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF1_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF1_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF1_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF1_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF1_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_LATENCY
++#define BIF_CFG_DEV0_EPF1_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_HEADER
++#define BIF_CFG_DEV0_EPF1_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF1_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF1_0_BIST
++#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF1_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF1_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF1_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF1_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF1_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_MIN_GRANT
++#define BIF_CFG_DEV0_EPF1_0_MIN_GRANT__MIN_GNT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MIN_GRANT__MIN_GNT_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_MAX_LATENCY
++#define BIF_CFG_DEV0_EPF1_0_MAX_LATENCY__MAX_LAT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MAX_LATENCY__MAX_LAT_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__LENGTH__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__CAP_ID_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__NEXT_PTR_MASK 0x0000FF00L
++#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__LENGTH_MASK 0x00FF0000L
++//BIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W
++#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_PMI_CAP
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__PME_CLOCK__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__DEV_SPECIFIC_INIT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__AUX_CURRENT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__D1_SUPPORT__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__D2_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__PME_SUPPORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__VERSION_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__PME_CLOCK_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__DEV_SPECIFIC_INIT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__AUX_CURRENT_MASK 0x01C0L
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__D1_SUPPORT_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__D2_SUPPORT_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__PME_SUPPORT_MASK 0xF800L
++//BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__POWER_STATE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__NO_SOFT_RESET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PME_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__DATA_SELECT__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__DATA_SCALE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PME_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__B2_B3_SUPPORT__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__BUS_PWR_EN__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PMI_DATA__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__POWER_STATE_MASK 0x00000003L
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__NO_SOFT_RESET_MASK 0x00000008L
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PME_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__DATA_SELECT_MASK 0x00001E00L
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__DATA_SCALE_MASK 0x00006000L
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PME_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__B2_B3_SUPPORT_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__BUS_PWR_EN_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PMI_DATA_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF1_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF1_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF1_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF1_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF1_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF1_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF1_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF1_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF1_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF1_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF1_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF1_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF1_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF1_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF1_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF1_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF1_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF1_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF1_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF1_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF1_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF1_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF1_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__REF_CLK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT_MASK 0x00000070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__REF_CLK_MASK 0x00000300L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE_MASK 0x00000C00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT_MASK 0x000EL
++//BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_STATUS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS_MASK 0x0001L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__VC_ID__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__VC_ID_MASK 0x07000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__VC_ID__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__VC_ID_MASK 0x07000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L
++//BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_INDEX_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_SIZE_MASK 0x3F00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_INDEX_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_SIZE_MASK 0x3F00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_INDEX_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_SIZE_MASK 0x3F00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_INDEX_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_SIZE_MASK 0x3F00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_INDEX_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_SIZE_MASK 0x3F00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_INDEX_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_SIZE_MASK 0x3F00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA_SELECT
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__BASE_POWER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__DATA_SCALE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__PM_STATE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__TYPE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__POWER_RAIL__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__BASE_POWER_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__DATA_SCALE_MASK 0x00000300L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE_MASK 0x00001C00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__PM_STATE_MASK 0x00006000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__TYPE_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__POWER_RAIL_MASK 0x001C0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED_MASK 0x01L
++//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__SUBSTATE_MAX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__SUBSTATE_MAX_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_LATENCY_INDICATOR
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS__SUBSTATE_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS__SUBSTATE_STATUS_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED_MASK 0x0100L
++//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CNTL__SUBSTATE_CNTL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CNTL__SUBSTATE_CNTL_MASK 0x1FL
++//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3__RESERVED__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3__RESERVED_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_ERROR_STATUS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_ERROR_STATUS__RESERVED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_ERROR_STATUS__RESERVED_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__SOURCE_VALIDATION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__TRANSLATION_BLOCKING__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__UPSTREAM_FORWARDING__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__SOURCE_VALIDATION_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__TRANSLATION_BLOCKING_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__UPSTREAM_FORWARDING_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN_MASK 0x0040L
++//BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_CNTL__PRI_ENABLE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_CNTL__PRI_RESET__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_CNTL__PRI_ENABLE_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_CNTL__PRI_RESET_MASK 0x0002L
++//BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__RESPONSE_FAILURE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__UNEXPECTED_PAGE_REQ_GRP_INDEX__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__STOPPED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__PRG_RESPONSE_PASID_REQUIRED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__RESPONSE_FAILURE_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__UNEXPECTED_PAGE_REQ_GRP_INDEX_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__STOPPED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__PRG_RESPONSE_PASID_REQUIRED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY
++#define BIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY__OUTSTAND_PAGE_REQ_CAPACITY__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY__OUTSTAND_PAGE_REQ_CAPACITY_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC
++#define BIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC__OUTSTAND_PAGE_REQ_ALLOC__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC__OUTSTAND_PAGE_REQ_ALLOC_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__MAX_PASID_WIDTH__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__MAX_PASID_WIDTH_MASK 0x1F00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_ENABLE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_ENABLE_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE_MASK 0x0004L
++//BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP__MC_MAX_GROUP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP__MC_WIN_SIZE_REQ__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP__MC_ECRC_REGEN_SUPP__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP__MC_MAX_GROUP_MASK 0x003FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP__MC_WIN_SIZE_REQ_MASK 0x3F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP__MC_ECRC_REGEN_SUPP_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_MC_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CNTL__MC_NUM_GROUP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CNTL__MC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CNTL__MC_NUM_GROUP_MASK 0x003FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CNTL__MC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR0__MC_INDEX_POS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR0__MC_BASE_ADDR_0__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR0__MC_INDEX_POS_MASK 0x0000003FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR0__MC_BASE_ADDR_0_MASK 0xFFFFF000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR1__MC_BASE_ADDR_1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR1__MC_BASE_ADDR_1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV0__MC_RECEIVE_0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV0__MC_RECEIVE_0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV1__MC_RECEIVE_1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV1__MC_RECEIVE_1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL0__MC_BLOCK_ALL_0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL0__MC_BLOCK_ALL_0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL1__MC_BLOCK_ALL_1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL1__MC_BLOCK_ALL_1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_0__MC_BLOCK_UNTRANSLATED_0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_0__MC_BLOCK_UNTRANSLATED_0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_1__MC_BLOCK_UNTRANSLATED_1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_1__MC_BLOCK_UNTRANSLATED_1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE_MASK 0x000003FFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE_MASK 0x00001C00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE_MASK 0x03FF0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE_MASK 0x1C000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_ARI_CAP_HIERARCHY_PRESERVED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_VF_TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_INTR_MSG_NUM__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_CAP_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_ARI_CAP_HIERARCHY_PRESERVED_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_VF_TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_INTR_MSG_NUM_MASK 0xFFE00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_ENABLE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_ENABLE__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_INTR_ENABLE__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MSE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_ARI_CAP_HIERARCHY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_ENABLE_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_ENABLE_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_INTR_ENABLE_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MSE_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_ARI_CAP_HIERARCHY_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x0020L
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_STATUS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_STATUS__SRIOV_VF_MIGRATION_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_STATUS__SRIOV_VF_MIGRATION_STATUS_MASK 0x0001L
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_INITIAL_VFS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_INITIAL_VFS__SRIOV_INITIAL_VFS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_INITIAL_VFS__SRIOV_INITIAL_VFS_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_TOTAL_VFS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_TOTAL_VFS__SRIOV_TOTAL_VFS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_TOTAL_VFS__SRIOV_TOTAL_VFS_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_NUM_VFS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_NUM_VFS__SRIOV_NUM_VFS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_NUM_VFS__SRIOV_NUM_VFS_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FUNC_DEP_LINK
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FUNC_DEP_LINK__SRIOV_FUNC_DEP_LINK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FUNC_DEP_LINK__SRIOV_FUNC_DEP_LINK_MASK 0x00FFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FIRST_VF_OFFSET
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FIRST_VF_OFFSET__SRIOV_FIRST_VF_OFFSET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FIRST_VF_OFFSET__SRIOV_FIRST_VF_OFFSET_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_STRIDE
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_STRIDE__SRIOV_VF_STRIDE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_STRIDE__SRIOV_VF_STRIDE_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_DEVICE_ID
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_DEVICE_ID__SRIOV_VF_DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_DEVICE_ID__SRIOV_VF_DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE__SRIOV_SUPPORTED_PAGE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE__SRIOV_SUPPORTED_PAGE_SIZE_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE__SRIOV_SYSTEM_PAGE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE__SRIOV_SYSTEM_PAGE_SIZE_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_0__VF_BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_0__VF_BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_1__VF_BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_1__VF_BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_2__VF_BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_2__VF_BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_3__VF_BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_3__VF_BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_4__VF_BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_4__VF_BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_5__VF_BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_5__VF_BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_BIF__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_BIF_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_NO_ST_MODE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_INT_VEC_MODE_SUPPORTED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_DEV_SPC_MODE_SUPPORTED__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_EXTND_TPH_REQR_SUPPORED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_LOCATION__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_SIZE__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_NO_ST_MODE_SUPPORTED_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_INT_VEC_MODE_SUPPORTED_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_DEV_SPC_MODE_SUPPORTED_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_EXTND_TPH_REQR_SUPPORED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_LOCATION_MASK 0x00000600L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_SIZE_MASK 0x07FF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CNTL__TPH_REQR_ST_MODE_SEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CNTL__TPH_REQR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CNTL__TPH_REQR_ST_MODE_SEL_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CNTL__TPH_REQR_EN_MASK 0x00000300L
++//BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_CAP
++#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED_MASK 0x007FFFFFL
++#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_MASK 0x007FFFFFL
++#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_LINK_CAP_16GT
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP_16GT__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LINK_CAP_16GT__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_LINK_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL_16GT__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL_16GT__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT_MASK 0x00000008L
++#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT_MASK 0x00000010L
++//BIF_CFG_DEV0_EPF1_0_LOCAL_PARITY_MISMATCH_STATUS_16GT
++#define BIF_CFG_DEV0_EPF1_0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF1_0_RTM1_PARITY_MISMATCH_STATUS_16GT
++#define BIF_CFG_DEV0_EPF1_0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF1_0_RTM2_PARITY_MISMATCH_STATUS_16GT
++#define BIF_CFG_DEV0_EPF1_0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF1_0_LANE_0_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_1_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_2_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_3_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_4_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_5_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_6_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_7_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_8_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_9_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_10_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_11_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_12_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_13_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_14_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_LANE_15_EQUALIZATION_CNTL_16GT
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET_MASK 0xF0L
++//BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_CAP
++#define BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE_MASK 0x0001L
++//BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_STATUS
++#define BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_STATUS__MARGINING_READY__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_STATUS__MARGINING_READY_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY_MASK 0x0002L
++//BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS_MASK 0x0007L
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS_MASK 0x0038L
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_INDEX_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_INDEX_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_INDEX_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_INDEX_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_INDEX_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CAP
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_SIZE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_INDEX_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_NUM__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_EN_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_NUM_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_CMD_COMPLETE_INTR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_NEED_FLR_INTR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_CMD_COMPLETE_INTR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_NEED_FLR_INTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_CMD_COMPLETE_INTR_EN__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_NEED_FLR_INTR_EN__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_CMD_COMPLETE_INTR_EN__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_NEED_FLR_INTR_EN__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_CMD_COMPLETE_INTR_EN_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_NEED_FLR_INTR_EN_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000008L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_CMD_COMPLETE_INTR_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_NEED_FLR_INTR_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_CMD_COMPLETE_INTR_EN_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_NEED_FLR_INTR_EN_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_CMD_COMPLETE_INTR_EN_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_NEED_FLR_INTR_EN_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_TRN_ACK_INTR_EN_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_RCV_VALID_INTR_EN_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_CMD_COMPLETE_INTR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_CMD_COMPLETE_INTR_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_CMD_COMPLETE_INTR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_CMD_COMPLETE_INTR_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_TRN_ACK_INTR_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_RCV_VALID_INTR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_CMD_COMPLETE_INTR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000008L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_CMD_COMPLETE_INTR_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_CMD_COMPLETE_INTR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_NEED_FLR_INTR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_CMD_COMPLETE_INTR_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_NEED_FLR_INTR_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_TRN_ACK_INTR_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_RCV_VALID_INTR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL__SOFT_PF_FLR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL__SOFT_PF_FLR_MASK 0x0001L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__VF_INDEX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_DATA__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_VALID__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_DATA__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_ACK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__VF_INDEX_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_DATA_MASK 0x00000F00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_VALID_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_DATA_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_ACK_MASK 0x01000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_TRN_ACK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_RCV_VALID__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_TRN_ACK__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_RCV_VALID__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_TRN_ACK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_RCV_VALID__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_TRN_ACK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_RCV_VALID__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_TRN_ACK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_RCV_VALID__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_TRN_ACK__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_RCV_VALID__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_TRN_ACK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_RCV_VALID__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_TRN_ACK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_RCV_VALID__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_TRN_ACK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_RCV_VALID__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_TRN_ACK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_RCV_VALID__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_TRN_ACK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_RCV_VALID__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_TRN_ACK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_RCV_VALID__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_TRN_ACK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_RCV_VALID__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_TRN_ACK__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_RCV_VALID__SHIFT 0x1b
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_TRN_ACK__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_RCV_VALID__SHIFT 0x1d
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_TRN_ACK__SHIFT 0x1e
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_RCV_VALID__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_TRN_ACK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_RCV_VALID_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_TRN_ACK_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_RCV_VALID_MASK 0x00000008L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_TRN_ACK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_RCV_VALID_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_TRN_ACK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_RCV_VALID_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_TRN_ACK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_RCV_VALID_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_TRN_ACK_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_RCV_VALID_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_TRN_ACK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_RCV_VALID_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_TRN_ACK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_RCV_VALID_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_TRN_ACK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_RCV_VALID_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_TRN_ACK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_RCV_VALID_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_TRN_ACK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_RCV_VALID_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_TRN_ACK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_RCV_VALID_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_TRN_ACK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_RCV_VALID_MASK 0x02000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_TRN_ACK_MASK 0x04000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_RCV_VALID_MASK 0x08000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_TRN_ACK_MASK 0x10000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_RCV_VALID_MASK 0x20000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_TRN_ACK_MASK 0x40000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_RCV_VALID_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_TRN_ACK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_RCV_VALID__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_TRN_ACK__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_RCV_VALID__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_TRN_ACK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_RCV_VALID__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_TRN_ACK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_RCV_VALID__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_TRN_ACK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_RCV_VALID__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_TRN_ACK__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_RCV_VALID__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_TRN_ACK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_RCV_VALID__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_TRN_ACK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_RCV_VALID__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_TRN_ACK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_RCV_VALID__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_TRN_ACK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_RCV_VALID__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_TRN_ACK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_RCV_VALID__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_TRN_ACK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_RCV_VALID__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_TRN_ACK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_RCV_VALID__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_TRN_ACK__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_RCV_VALID__SHIFT 0x1b
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_TRN_ACK__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_RCV_VALID__SHIFT 0x1d
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_TRN_ACK__SHIFT 0x1e
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_RCV_VALID__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_TRN_ACK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_RCV_VALID_MASK 0x00000002L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_TRN_ACK_MASK 0x00000004L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_RCV_VALID_MASK 0x00000008L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_TRN_ACK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_RCV_VALID_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_TRN_ACK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_RCV_VALID_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_TRN_ACK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_RCV_VALID_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_TRN_ACK_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_RCV_VALID_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_TRN_ACK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_RCV_VALID_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_TRN_ACK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_RCV_VALID_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_TRN_ACK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_RCV_VALID_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_TRN_ACK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_RCV_VALID_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_TRN_ACK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_RCV_VALID_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_TRN_ACK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_RCV_VALID_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_TRN_ACK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_RCV_VALID_MASK 0x02000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_TRN_ACK_MASK 0x04000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_RCV_VALID_MASK 0x08000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_TRN_ACK_MASK 0x10000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_RCV_VALID_MASK 0x20000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_TRN_ACK_MASK 0x40000000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_RCV_VALID_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__LOC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_OFFSET__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_SIZE_MASK 0x0000007FL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__LOC_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_OFFSET_MASK 0xFFFFFC00L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_AVAILABLE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_CONSUMED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_AVAILABLE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_CONSUMED_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVDSCH_OFFSET__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCESCH_OFFSET__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__GFXSCH_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVD1SCH_OFFSET__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVDSCH_OFFSET_MASK 0x000000FFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCESCH_OFFSET_MASK 0x0000FF00L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__GFXSCH_OFFSET_MASK 0x00FF0000L
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVD1SCH_OFFSET_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_VF__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_PF__SHIFT 0x1f
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_VF_MASK 0x7FFFFFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_PF_MASK 0x80000000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_OFFSET__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_SIZE_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_OFFSET_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0__DW0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0__DW0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1__DW1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1__DW1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2__DW2__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2__DW2_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3__DW3__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3__DW3_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4__DW4__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4__DW4_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5__DW5__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5__DW5_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6__DW6__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6__DW6_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7__DW7__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7__DW7_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8__DW8__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8__DW8_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0__DW0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0__DW0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1__DW1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1__DW1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2__DW2__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2__DW2_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3__DW3__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3__DW3_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4__DW4__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4__DW4_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5__DW5__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5__DW5_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6__DW6__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6__DW6_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7__DW7__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7__DW7_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8__DW8__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8__DW8_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0__DW0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0__DW0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1__DW1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1__DW1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2__DW2__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2__DW2_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3__DW3__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3__DW3_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4__DW4__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4__DW4_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5__DW5__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5__DW5_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6__DW6__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6__DW6_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7__DW7__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7__DW7_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8__DW8__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8__DW8_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0__DW0__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0__DW0_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1__DW1__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1__DW1_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2__DW2__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2__DW2_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3__DW3__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3__DW3_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4__DW4__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4__DW4_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5__DW5__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5__DW5_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6__DW6__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6__DW6_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7__DW7__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7__DW7_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8__DW8__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8__DW8_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf0_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF0_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF0_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF0_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF0_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF0_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF0_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF0_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF0_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF0_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF0_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF0_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF0_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF0_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF0_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF0_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF0_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF0_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF0_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF0_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF0_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF0_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF0_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF0_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF0_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF0_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF0_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF0_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF0_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF0_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF0_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF0_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF0_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf1_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF1_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF1_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF1_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF1_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF1_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF1_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF1_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF1_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF1_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF1_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF1_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF1_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF1_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF1_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF1_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF1_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF1_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF1_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF1_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF1_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF1_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF1_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF1_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF1_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF1_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF1_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF1_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF1_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF1_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF1_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF1_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF1_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf2_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF2_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF2_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF2_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF2_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF2_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF2_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF2_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF2_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF2_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF2_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF2_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF2_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF2_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF2_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF2_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF2_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF2_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF2_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF2_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF2_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF2_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF2_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF2_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF2_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF2_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF2_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF2_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF2_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF2_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF2_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF2_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF2_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf3_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF3_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF3_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF3_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF3_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF3_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF3_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF3_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF3_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF3_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF3_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF3_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF3_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF3_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF3_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF3_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF3_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF3_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF3_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF3_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF3_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF3_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF3_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF3_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF3_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF3_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF3_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF3_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF3_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF3_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF3_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF3_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF3_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf4_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF4_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF4_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF4_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF4_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF4_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF4_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF4_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF4_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF4_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF4_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF4_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF4_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF4_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF4_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF4_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF4_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF4_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF4_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF4_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF4_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF4_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF4_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF4_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF4_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF4_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF4_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF4_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF4_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF4_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF4_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF4_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF4_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf5_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF5_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF5_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF5_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF5_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF5_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF5_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF5_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF5_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF5_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF5_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF5_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF5_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF5_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF5_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF5_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF5_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF5_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF5_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF5_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF5_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF5_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF5_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF5_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF5_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF5_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF5_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF5_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF5_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF5_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF5_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF5_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF5_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf6_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF6_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF6_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF6_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF6_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF6_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF6_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF6_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF6_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF6_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF6_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF6_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF6_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF6_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF6_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF6_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF6_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF6_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF6_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF6_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF6_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF6_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF6_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF6_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF6_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF6_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF6_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF6_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF6_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF6_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF6_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF6_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF6_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf7_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF7_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF7_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF7_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF7_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF7_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF7_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF7_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF7_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF7_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF7_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF7_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF7_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF7_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF7_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF7_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF7_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF7_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF7_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF7_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF7_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF7_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF7_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF7_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF7_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF7_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF7_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF7_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF7_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF7_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF7_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF7_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF7_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf8_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF8_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF8_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF8_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF8_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF8_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF8_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF8_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF8_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF8_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF8_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF8_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF8_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF8_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF8_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF8_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF8_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF8_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF8_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF8_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF8_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF8_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF8_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF8_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF8_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF8_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF8_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF8_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF8_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF8_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF8_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF8_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF8_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf9_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF9_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF9_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF9_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF9_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF9_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF9_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF9_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF9_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF9_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF9_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF9_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF9_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF9_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF9_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF9_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF9_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF9_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF9_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF9_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF9_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF9_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF9_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF9_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF9_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF9_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF9_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF9_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF9_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF9_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF9_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF9_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF9_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf10_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF10_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF10_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF10_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF10_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF10_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF10_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF10_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF10_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF10_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF10_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF10_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF10_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF10_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF10_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF10_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF10_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF10_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF10_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF10_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF10_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF10_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF10_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF10_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF10_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF10_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF10_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF10_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF10_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF10_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF10_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF10_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF10_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf11_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF11_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF11_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF11_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF11_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF11_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF11_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF11_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF11_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF11_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF11_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF11_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF11_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF11_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF11_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF11_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF11_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF11_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF11_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF11_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF11_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF11_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF11_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF11_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF11_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF11_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF11_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF11_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF11_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF11_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF11_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF11_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF11_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf12_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF12_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF12_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF12_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF12_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF12_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF12_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF12_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF12_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF12_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF12_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF12_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF12_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF12_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF12_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF12_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF12_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF12_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF12_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF12_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF12_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF12_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF12_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF12_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF12_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF12_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF12_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF12_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF12_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF12_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF12_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF12_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF12_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf13_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF13_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF13_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF13_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF13_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF13_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF13_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF13_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF13_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF13_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF13_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF13_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF13_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF13_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF13_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF13_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF13_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF13_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF13_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF13_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF13_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF13_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF13_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF13_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF13_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF13_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF13_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF13_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF13_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF13_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF13_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF13_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF13_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf14_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF14_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF14_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF14_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF14_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF14_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF14_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF14_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF14_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF14_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF14_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF14_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF14_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF14_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF14_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF14_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF14_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF14_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF14_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF14_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF14_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF14_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF14_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF14_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF14_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF14_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF14_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF14_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF14_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF14_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF14_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF14_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF14_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf15_bifcfgdecp
++//BIF_CFG_DEV0_EPF0_VF15_0_VENDOR_ID
++#define BIF_CFG_DEV0_EPF0_VF15_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_ID
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_COMMAND
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__AD_STEPPING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__SERR_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__FAST_B2B_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__INT_DIS__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__AD_STEPPING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__SERR_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__FAST_B2B_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__INT_DIS_MASK 0x0400L
++//BIF_CFG_DEV0_EPF0_VF15_0_STATUS
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__INT_STATUS__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__CAP_LIST__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__PCI_66_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__DEVSEL_TIMING__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__INT_STATUS_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__CAP_LIST_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__PCI_66_CAP_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__DEVSEL_TIMING_MASK 0x0600L
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF15_0_REVISION_ID
++#define BIF_CFG_DEV0_EPF0_VF15_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF15_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
++//BIF_CFG_DEV0_EPF0_VF15_0_PROG_INTERFACE
++#define BIF_CFG_DEV0_EPF0_VF15_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_SUB_CLASS
++#define BIF_CFG_DEV0_EPF0_VF15_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_BASE_CLASS
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_CACHE_LINE
++#define BIF_CFG_DEV0_EPF0_VF15_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_LATENCY
++#define BIF_CFG_DEV0_EPF0_VF15_0_LATENCY__LATENCY_TIMER__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_LATENCY__LATENCY_TIMER_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_HEADER
++#define BIF_CFG_DEV0_EPF0_VF15_0_HEADER__HEADER_TYPE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_HEADER__DEVICE_TYPE__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF15_0_HEADER__HEADER_TYPE_MASK 0x7FL
++#define BIF_CFG_DEV0_EPF0_VF15_0_HEADER__DEVICE_TYPE_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF15_0_BIST
++#define BIF_CFG_DEV0_EPF0_VF15_0_BIST__BIST_COMP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_BIST__BIST_STRT__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF15_0_BIST__BIST_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF15_0_BIST__BIST_COMP_MASK 0x0FL
++#define BIF_CFG_DEV0_EPF0_VF15_0_BIST__BIST_STRT_MASK 0x40L
++#define BIF_CFG_DEV0_EPF0_VF15_0_BIST__BIST_CAP_MASK 0x80L
++//BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_1
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_2
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_3
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_4
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_5
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_6
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_ADAPTER_ID
++#define BIF_CFG_DEV0_EPF0_VF15_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF15_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF15_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
++//BIF_CFG_DEV0_EPF0_VF15_0_ROM_BASE_ADDR
++#define BIF_CFG_DEV0_EPF0_VF15_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_CAP_PTR
++#define BIF_CFG_DEV0_EPF0_VF15_0_CAP_PTR__CAP_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL
++//BIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_LINE
++#define BIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_PIN
++#define BIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__VERSION__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__VERSION_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
++//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
++//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
++//BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__LINK_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__LINK_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
++//BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
++//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__RESERVED__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__RESERVED_MASK 0xFE000000L
++//BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
++//BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
++#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
++//BIF_CFG_DEV0_EPF0_VF15_0_SLOT_CAP2
++#define BIF_CFG_DEV0_EPF0_VF15_0_SLOT_CAP2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_SLOT_CNTL2
++#define BIF_CFG_DEV0_EPF0_VF15_0_SLOT_CNTL2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_SLOT_STATUS2
++#define BIF_CFG_DEV0_EPF0_VF15_0_SLOT_STATUS2__RESERVED__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_MSI_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
++//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_LO
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_HI
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK__MSI_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA_64
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK_64
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING_64
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_MSIX_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF15_0_MSIX_TABLE
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF15_0_MSIX_PBA
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
++#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC1
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC2
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG1
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG2
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG3
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG1
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG2
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG3
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CNTL
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CNTL__STU__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CNTL__STU_MASK 0x001FL
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
++//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
++#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
++
++
++// addressBlock: nbio_pcie0_pswusp0_pciedir_p
++//PCIEP_RESERVED
++#define PCIEP_RESERVED__RESERVED__SHIFT 0x0
++#define PCIEP_RESERVED__RESERVED_MASK 0xFFFFFFFFL
++//PCIEP_SCRATCH
++#define PCIEP_SCRATCH__PCIEP_SCRATCH__SHIFT 0x0
++#define PCIEP_SCRATCH__PCIEP_SCRATCH_MASK 0xFFFFFFFFL
++//PCIEP_PORT_CNTL
++#define PCIEP_PORT_CNTL__SLV_PORT_REQ_EN__SHIFT 0x0
++#define PCIEP_PORT_CNTL__CI_SNOOP_OVERRIDE__SHIFT 0x1
++#define PCIEP_PORT_CNTL__HOTPLUG_MSG_EN__SHIFT 0x2
++#define PCIEP_PORT_CNTL__NATIVE_PME_EN__SHIFT 0x3
++#define PCIEP_PORT_CNTL__PWR_FAULT_EN__SHIFT 0x4
++#define PCIEP_PORT_CNTL__PMI_BM_DIS__SHIFT 0x5
++#define PCIEP_PORT_CNTL__CI_SLV_CPL_STATIC_ALLOC_LIMIT_S__SHIFT 0x8
++#define PCIEP_PORT_CNTL__CI_PRIV_MAX_CPL_PAYLOAD_SIZE__SHIFT 0x12
++#define PCIEP_PORT_CNTL__CI_SLV_RSP_POISONED_UR_MODE__SHIFT 0x18
++#define PCIEP_PORT_CNTL__CI_MAX_CPL_PAYLOAD_SIZE_MODE__SHIFT 0x1a
++#define PCIEP_PORT_CNTL__SLV_PORT_REQ_EN_MASK 0x00000001L
++#define PCIEP_PORT_CNTL__CI_SNOOP_OVERRIDE_MASK 0x00000002L
++#define PCIEP_PORT_CNTL__HOTPLUG_MSG_EN_MASK 0x00000004L
++#define PCIEP_PORT_CNTL__NATIVE_PME_EN_MASK 0x00000008L
++#define PCIEP_PORT_CNTL__PWR_FAULT_EN_MASK 0x00000010L
++#define PCIEP_PORT_CNTL__PMI_BM_DIS_MASK 0x00000020L
++#define PCIEP_PORT_CNTL__CI_SLV_CPL_STATIC_ALLOC_LIMIT_S_MASK 0x0003FF00L
++#define PCIEP_PORT_CNTL__CI_PRIV_MAX_CPL_PAYLOAD_SIZE_MASK 0x001C0000L
++#define PCIEP_PORT_CNTL__CI_SLV_RSP_POISONED_UR_MODE_MASK 0x03000000L
++#define PCIEP_PORT_CNTL__CI_MAX_CPL_PAYLOAD_SIZE_MODE_MASK 0x0C000000L
++//PCIE_TX_CNTL
++#define PCIE_TX_CNTL__TX_SNR_OVERRIDE__SHIFT 0xa
++#define PCIE_TX_CNTL__TX_RO_OVERRIDE__SHIFT 0xc
++#define PCIE_TX_CNTL__TX_PACK_PACKET_DIS__SHIFT 0xe
++#define PCIE_TX_CNTL__TX_FLUSH_TLP_DIS__SHIFT 0xf
++#define PCIE_TX_CNTL__TX_CPL_PASS_P__SHIFT 0x14
++#define PCIE_TX_CNTL__TX_NP_PASS_P__SHIFT 0x15
++#define PCIE_TX_CNTL__TX_CLEAR_EXTRA_PM_REQS__SHIFT 0x16
++#define PCIE_TX_CNTL__TX_FC_UPDATE_TIMEOUT_DIS__SHIFT 0x17
++#define PCIE_TX_CNTL__TX_F0_TPH_DIS__SHIFT 0x18
++#define PCIE_TX_CNTL__TX_F1_TPH_DIS__SHIFT 0x19
++#define PCIE_TX_CNTL__TX_F2_TPH_DIS__SHIFT 0x1a
++#define PCIE_TX_CNTL__TX_SNR_OVERRIDE_MASK 0x00000C00L
++#define PCIE_TX_CNTL__TX_RO_OVERRIDE_MASK 0x00003000L
++#define PCIE_TX_CNTL__TX_PACK_PACKET_DIS_MASK 0x00004000L
++#define PCIE_TX_CNTL__TX_FLUSH_TLP_DIS_MASK 0x00008000L
++#define PCIE_TX_CNTL__TX_CPL_PASS_P_MASK 0x00100000L
++#define PCIE_TX_CNTL__TX_NP_PASS_P_MASK 0x00200000L
++#define PCIE_TX_CNTL__TX_CLEAR_EXTRA_PM_REQS_MASK 0x00400000L
++#define PCIE_TX_CNTL__TX_FC_UPDATE_TIMEOUT_DIS_MASK 0x00800000L
++#define PCIE_TX_CNTL__TX_F0_TPH_DIS_MASK 0x01000000L
++#define PCIE_TX_CNTL__TX_F1_TPH_DIS_MASK 0x02000000L
++#define PCIE_TX_CNTL__TX_F2_TPH_DIS_MASK 0x04000000L
++//PCIE_TX_REQUESTER_ID
++#define PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION__SHIFT 0x0
++#define PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE__SHIFT 0x3
++#define PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS__SHIFT 0x8
++#define PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION_MASK 0x00000007L
++#define PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE_MASK 0x000000F8L
++#define PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS_MASK 0x0000FF00L
++//PCIE_TX_VENDOR_SPECIFIC
++#define PCIE_TX_VENDOR_SPECIFIC__TX_VENDOR_DATA__SHIFT 0x0
++#define PCIE_TX_VENDOR_SPECIFIC__TX_VENDOR_SEND__SHIFT 0x18
++#define PCIE_TX_VENDOR_SPECIFIC__TX_VENDOR_DATA_MASK 0x00FFFFFFL
++#define PCIE_TX_VENDOR_SPECIFIC__TX_VENDOR_SEND_MASK 0x01000000L
++//PCIE_TX_REQUEST_NUM_CNTL
++#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP__SHIFT 0x18
++#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP_VC1_EN__SHIFT 0x1e
++#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP_EN__SHIFT 0x1f
++#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP_MASK 0x3F000000L
++#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP_VC1_EN_MASK 0x40000000L
++#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP_EN_MASK 0x80000000L
++//PCIE_TX_SEQ
++#define PCIE_TX_SEQ__TX_NEXT_TRANSMIT_SEQ__SHIFT 0x0
++#define PCIE_TX_SEQ__TX_ACKD_SEQ__SHIFT 0x10
++#define PCIE_TX_SEQ__TX_NEXT_TRANSMIT_SEQ_MASK 0x00000FFFL
++#define PCIE_TX_SEQ__TX_ACKD_SEQ_MASK 0x0FFF0000L
++//PCIE_TX_REPLAY
++#define PCIE_TX_REPLAY__TX_REPLAY_NUM__SHIFT 0x0
++#define PCIE_TX_REPLAY__TX_REPLAY_TIMER_OVERWRITE__SHIFT 0xf
++#define PCIE_TX_REPLAY__TX_REPLAY_TIMER__SHIFT 0x10
++#define PCIE_TX_REPLAY__TX_REPLAY_NUM_MASK 0x00000007L
++#define PCIE_TX_REPLAY__TX_REPLAY_TIMER_OVERWRITE_MASK 0x00008000L
++#define PCIE_TX_REPLAY__TX_REPLAY_TIMER_MASK 0xFFFF0000L
++//PCIE_TX_ACK_LATENCY_LIMIT
++#define PCIE_TX_ACK_LATENCY_LIMIT__TX_ACK_LATENCY_LIMIT__SHIFT 0x0
++#define PCIE_TX_ACK_LATENCY_LIMIT__TX_ACK_LATENCY_LIMIT_OVERWRITE__SHIFT 0xc
++#define PCIE_TX_ACK_LATENCY_LIMIT__TX_ACK_LATENCY_LIMIT_MASK 0x00000FFFL
++#define PCIE_TX_ACK_LATENCY_LIMIT__TX_ACK_LATENCY_LIMIT_OVERWRITE_MASK 0x00001000L
++//PCIE_TX_NOP_DLLP
++#define PCIE_TX_NOP_DLLP__TX_NOP_DATA__SHIFT 0x0
++#define PCIE_TX_NOP_DLLP__TX_NOP_SEND__SHIFT 0x18
++#define PCIE_TX_NOP_DLLP__TX_NOP_DATA_MASK 0x00FFFFFFL
++#define PCIE_TX_NOP_DLLP__TX_NOP_SEND_MASK 0x01000000L
++//PCIE_TX_CREDITS_ADVT_P
++#define PCIE_TX_CREDITS_ADVT_P__TX_CREDITS_ADVT_PD__SHIFT 0x0
++#define PCIE_TX_CREDITS_ADVT_P__TX_CREDITS_ADVT_PH__SHIFT 0x10
++#define PCIE_TX_CREDITS_ADVT_P__TX_CREDITS_ADVT_PD_MASK 0x00000FFFL
++#define PCIE_TX_CREDITS_ADVT_P__TX_CREDITS_ADVT_PH_MASK 0x00FF0000L
++//PCIE_TX_CREDITS_ADVT_NP
++#define PCIE_TX_CREDITS_ADVT_NP__TX_CREDITS_ADVT_NPD__SHIFT 0x0
++#define PCIE_TX_CREDITS_ADVT_NP__TX_CREDITS_ADVT_NPH__SHIFT 0x10
++#define PCIE_TX_CREDITS_ADVT_NP__TX_CREDITS_ADVT_NPD_MASK 0x00000FFFL
++#define PCIE_TX_CREDITS_ADVT_NP__TX_CREDITS_ADVT_NPH_MASK 0x00FF0000L
++//PCIE_TX_CREDITS_ADVT_CPL
++#define PCIE_TX_CREDITS_ADVT_CPL__TX_CREDITS_ADVT_CPLD__SHIFT 0x0
++#define PCIE_TX_CREDITS_ADVT_CPL__TX_CREDITS_ADVT_CPLH__SHIFT 0x10
++#define PCIE_TX_CREDITS_ADVT_CPL__TX_CREDITS_ADVT_CPLD_MASK 0x00000FFFL
++#define PCIE_TX_CREDITS_ADVT_CPL__TX_CREDITS_ADVT_CPLH_MASK 0x00FF0000L
++//PCIE_TX_CREDITS_INIT_P
++#define PCIE_TX_CREDITS_INIT_P__TX_CREDITS_INIT_PD__SHIFT 0x0
++#define PCIE_TX_CREDITS_INIT_P__TX_CREDITS_INIT_PH__SHIFT 0x10
++#define PCIE_TX_CREDITS_INIT_P__TX_CREDITS_INIT_PD_MASK 0x00000FFFL
++#define PCIE_TX_CREDITS_INIT_P__TX_CREDITS_INIT_PH_MASK 0x00FF0000L
++//PCIE_TX_CREDITS_INIT_NP
++#define PCIE_TX_CREDITS_INIT_NP__TX_CREDITS_INIT_NPD__SHIFT 0x0
++#define PCIE_TX_CREDITS_INIT_NP__TX_CREDITS_INIT_NPH__SHIFT 0x10
++#define PCIE_TX_CREDITS_INIT_NP__TX_CREDITS_INIT_NPD_MASK 0x00000FFFL
++#define PCIE_TX_CREDITS_INIT_NP__TX_CREDITS_INIT_NPH_MASK 0x00FF0000L
++//PCIE_TX_CREDITS_INIT_CPL
++#define PCIE_TX_CREDITS_INIT_CPL__TX_CREDITS_INIT_CPLD__SHIFT 0x0
++#define PCIE_TX_CREDITS_INIT_CPL__TX_CREDITS_INIT_CPLH__SHIFT 0x10
++#define PCIE_TX_CREDITS_INIT_CPL__TX_CREDITS_INIT_CPLD_MASK 0x00000FFFL
++#define PCIE_TX_CREDITS_INIT_CPL__TX_CREDITS_INIT_CPLH_MASK 0x00FF0000L
++//PCIE_TX_CREDITS_STATUS
++#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_PD__SHIFT 0x0
++#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_PH__SHIFT 0x1
++#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_NPD__SHIFT 0x2
++#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_NPH__SHIFT 0x3
++#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_CPLD__SHIFT 0x4
++#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_CPLH__SHIFT 0x5
++#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_PD__SHIFT 0x10
++#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_PH__SHIFT 0x11
++#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_NPD__SHIFT 0x12
++#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_NPH__SHIFT 0x13
++#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_CPLD__SHIFT 0x14
++#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_CPLH__SHIFT 0x15
++#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_PD_MASK 0x00000001L
++#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_PH_MASK 0x00000002L
++#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_NPD_MASK 0x00000004L
++#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_NPH_MASK 0x00000008L
++#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_CPLD_MASK 0x00000010L
++#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_CPLH_MASK 0x00000020L
++#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_PD_MASK 0x00010000L
++#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_PH_MASK 0x00020000L
++#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_NPD_MASK 0x00040000L
++#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_NPH_MASK 0x00080000L
++#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_CPLD_MASK 0x00100000L
++#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_CPLH_MASK 0x00200000L
++//PCIE_TX_CREDITS_FCU_THRESHOLD
++#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_P_VC0__SHIFT 0x0
++#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_NP_VC0__SHIFT 0x4
++#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_CPL_VC0__SHIFT 0x8
++#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_P_VC1__SHIFT 0x10
++#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_NP_VC1__SHIFT 0x14
++#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_CPL_VC1__SHIFT 0x18
++#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_P_VC0_MASK 0x00000007L
++#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_NP_VC0_MASK 0x00000070L
++#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_CPL_VC0_MASK 0x00000700L
++#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_P_VC1_MASK 0x00070000L
++#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_NP_VC1_MASK 0x00700000L
++#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_CPL_VC1_MASK 0x07000000L
++//PCIE_P_PORT_LANE_STATUS
++#define PCIE_P_PORT_LANE_STATUS__PORT_LANE_REVERSAL__SHIFT 0x0
++#define PCIE_P_PORT_LANE_STATUS__PHY_LINK_WIDTH__SHIFT 0x1
++#define PCIE_P_PORT_LANE_STATUS__PORT_LANE_REVERSAL_MASK 0x00000001L
++#define PCIE_P_PORT_LANE_STATUS__PHY_LINK_WIDTH_MASK 0x0000007EL
++//PCIE_FC_P
++#define PCIE_FC_P__PD_CREDITS__SHIFT 0x0
++#define PCIE_FC_P__PH_CREDITS__SHIFT 0x10
++#define PCIE_FC_P__PD_CREDITS_MASK 0x0000FFFFL
++#define PCIE_FC_P__PH_CREDITS_MASK 0x0FFF0000L
++//PCIE_FC_NP
++#define PCIE_FC_NP__NPD_CREDITS__SHIFT 0x0
++#define PCIE_FC_NP__NPH_CREDITS__SHIFT 0x10
++#define PCIE_FC_NP__NPD_CREDITS_MASK 0x0000FFFFL
++#define PCIE_FC_NP__NPH_CREDITS_MASK 0x0FFF0000L
++//PCIE_FC_CPL
++#define PCIE_FC_CPL__CPLD_CREDITS__SHIFT 0x0
++#define PCIE_FC_CPL__CPLH_CREDITS__SHIFT 0x10
++#define PCIE_FC_CPL__CPLD_CREDITS_MASK 0x0000FFFFL
++#define PCIE_FC_CPL__CPLH_CREDITS_MASK 0x0FFF0000L
++//PCIE_FC_P_VC1
++#define PCIE_FC_P_VC1__ADVT_FC_VC1_PD_CREDITS__SHIFT 0x0
++#define PCIE_FC_P_VC1__ADVT_FC_VC1_PH_CREDITS__SHIFT 0x10
++#define PCIE_FC_P_VC1__ADVT_FC_VC1_PD_CREDITS_MASK 0x0000FFFFL
++#define PCIE_FC_P_VC1__ADVT_FC_VC1_PH_CREDITS_MASK 0x0FFF0000L
++//PCIE_FC_NP_VC1
++#define PCIE_FC_NP_VC1__ADVT_FC_VC1_NPD_CREDITS__SHIFT 0x0
++#define PCIE_FC_NP_VC1__ADVT_FC_VC1_NPH_CREDITS__SHIFT 0x10
++#define PCIE_FC_NP_VC1__ADVT_FC_VC1_NPD_CREDITS_MASK 0x0000FFFFL
++#define PCIE_FC_NP_VC1__ADVT_FC_VC1_NPH_CREDITS_MASK 0x0FFF0000L
++//PCIE_FC_CPL_VC1
++#define PCIE_FC_CPL_VC1__ADVT_FC_VC1_CPLD_CREDITS__SHIFT 0x0
++#define PCIE_FC_CPL_VC1__ADVT_FC_VC1_CPLH_CREDITS__SHIFT 0x10
++#define PCIE_FC_CPL_VC1__ADVT_FC_VC1_CPLD_CREDITS_MASK 0x0000FFFFL
++#define PCIE_FC_CPL_VC1__ADVT_FC_VC1_CPLH_CREDITS_MASK 0x0FFF0000L
++//PSWUSP0_PCIE_ERR_CNTL
++#define PSWUSP0_PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0
++#define PSWUSP0_PCIE_ERR_CNTL__STRAP_FIRST_RCVD_ERR_LOG__SHIFT 0x1
++#define PSWUSP0_PCIE_ERR_CNTL__RX_DROP_ECRC_FAILURES__SHIFT 0x2
++#define PSWUSP0_PCIE_ERR_CNTL__TX_GENERATE_LCRC_ERR__SHIFT 0x4
++#define PSWUSP0_PCIE_ERR_CNTL__RX_GENERATE_LCRC_ERR__SHIFT 0x5
++#define PSWUSP0_PCIE_ERR_CNTL__TX_GENERATE_ECRC_ERR__SHIFT 0x6
++#define PSWUSP0_PCIE_ERR_CNTL__RX_GENERATE_ECRC_ERR__SHIFT 0x7
++#define PSWUSP0_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT__SHIFT 0x8
++#define PSWUSP0_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED__SHIFT 0xb
++#define PSWUSP0_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED__SHIFT 0xc
++#define PSWUSP0_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED__SHIFT 0xd
++#define PSWUSP0_PCIE_ERR_CNTL__CI_P_SLV_BUF_RD_HALT_STATUS__SHIFT 0xe
++#define PSWUSP0_PCIE_ERR_CNTL__CI_NP_SLV_BUF_RD_HALT_STATUS__SHIFT 0xf
++#define PSWUSP0_PCIE_ERR_CNTL__CI_SLV_BUF_HALT_RESET__SHIFT 0x10
++#define PSWUSP0_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11
++#define PSWUSP0_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL__SHIFT 0x12
++#define PSWUSP0_PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L
++#define PSWUSP0_PCIE_ERR_CNTL__STRAP_FIRST_RCVD_ERR_LOG_MASK 0x00000002L
++#define PSWUSP0_PCIE_ERR_CNTL__RX_DROP_ECRC_FAILURES_MASK 0x00000004L
++#define PSWUSP0_PCIE_ERR_CNTL__TX_GENERATE_LCRC_ERR_MASK 0x00000010L
++#define PSWUSP0_PCIE_ERR_CNTL__RX_GENERATE_LCRC_ERR_MASK 0x00000020L
++#define PSWUSP0_PCIE_ERR_CNTL__TX_GENERATE_ECRC_ERR_MASK 0x00000040L
++#define PSWUSP0_PCIE_ERR_CNTL__RX_GENERATE_ECRC_ERR_MASK 0x00000080L
++#define PSWUSP0_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT_MASK 0x00000700L
++#define PSWUSP0_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED_MASK 0x00000800L
++#define PSWUSP0_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED_MASK 0x00001000L
++#define PSWUSP0_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED_MASK 0x00002000L
++#define PSWUSP0_PCIE_ERR_CNTL__CI_P_SLV_BUF_RD_HALT_STATUS_MASK 0x00004000L
++#define PSWUSP0_PCIE_ERR_CNTL__CI_NP_SLV_BUF_RD_HALT_STATUS_MASK 0x00008000L
++#define PSWUSP0_PCIE_ERR_CNTL__CI_SLV_BUF_HALT_RESET_MASK 0x00010000L
++#define PSWUSP0_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L
++#define PSWUSP0_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL_MASK 0x00040000L
++//PSWUSP0_PCIE_RX_CNTL
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_IO_ERR__SHIFT 0x0
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_BE_ERR__SHIFT 0x1
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_MSG_ERR__SHIFT 0x2
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_CRC_ERR__SHIFT 0x3
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_CFG_ERR__SHIFT 0x4
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_CPL_ERR__SHIFT 0x5
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_EP_ERR__SHIFT 0x6
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_LEN_MISMATCH_ERR__SHIFT 0x7
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_TC_ERR__SHIFT 0x9
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_CFG_UR__SHIFT 0xa
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_IO_UR__SHIFT 0xb
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_AT_ERR__SHIFT 0xc
++#define PSWUSP0_PCIE_RX_CNTL__RX_NAK_IF_FIFO_FULL__SHIFT 0xd
++#define PSWUSP0_PCIE_RX_CNTL__RX_GEN_ONE_NAK__SHIFT 0xe
++#define PSWUSP0_PCIE_RX_CNTL__RX_FC_INIT_FROM_REG__SHIFT 0xf
++#define PSWUSP0_PCIE_RX_CNTL__RX_RCB_CPL_TIMEOUT__SHIFT 0x10
++#define PSWUSP0_PCIE_RX_CNTL__RX_RCB_CPL_TIMEOUT_MODE__SHIFT 0x13
++#define PSWUSP0_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR__SHIFT 0x15
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR__SHIFT 0x16
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_CPLPREFIX_ERR__SHIFT 0x17
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR__SHIFT 0x18
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR__SHIFT 0x19
++#define PSWUSP0_PCIE_RX_CNTL__RX_TPH_DIS__SHIFT 0x1a
++#define PSWUSP0_PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS__SHIFT 0x1b
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_IO_ERR_MASK 0x00000001L
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_BE_ERR_MASK 0x00000002L
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_MSG_ERR_MASK 0x00000004L
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_CRC_ERR_MASK 0x00000008L
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_CFG_ERR_MASK 0x00000010L
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_CPL_ERR_MASK 0x00000020L
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_EP_ERR_MASK 0x00000040L
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_LEN_MISMATCH_ERR_MASK 0x00000080L
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_MASK 0x00000200L
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_CFG_UR_MASK 0x00000400L
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_IO_UR_MASK 0x00000800L
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_AT_ERR_MASK 0x00001000L
++#define PSWUSP0_PCIE_RX_CNTL__RX_NAK_IF_FIFO_FULL_MASK 0x00002000L
++#define PSWUSP0_PCIE_RX_CNTL__RX_GEN_ONE_NAK_MASK 0x00004000L
++#define PSWUSP0_PCIE_RX_CNTL__RX_FC_INIT_FROM_REG_MASK 0x00008000L
++#define PSWUSP0_PCIE_RX_CNTL__RX_RCB_CPL_TIMEOUT_MASK 0x00070000L
++#define PSWUSP0_PCIE_RX_CNTL__RX_RCB_CPL_TIMEOUT_MODE_MASK 0x00080000L
++#define PSWUSP0_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_MASK 0x00200000L
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR_MASK 0x00400000L
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_CPLPREFIX_ERR_MASK 0x00800000L
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR_MASK 0x01000000L
++#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR_MASK 0x02000000L
++#define PSWUSP0_PCIE_RX_CNTL__RX_TPH_DIS_MASK 0x04000000L
++#define PSWUSP0_PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS_MASK 0x08000000L
++//PCIE_RX_EXPECTED_SEQNUM
++#define PCIE_RX_EXPECTED_SEQNUM__RX_EXPECTED_SEQNUM__SHIFT 0x0
++#define PCIE_RX_EXPECTED_SEQNUM__RX_EXPECTED_SEQNUM_MASK 0x00000FFFL
++//PCIE_RX_VENDOR_SPECIFIC
++#define PCIE_RX_VENDOR_SPECIFIC__RX_VENDOR_DATA__SHIFT 0x0
++#define PCIE_RX_VENDOR_SPECIFIC__RX_VENDOR_STATUS__SHIFT 0x18
++#define PCIE_RX_VENDOR_SPECIFIC__RX_VENDOR_DATA_MASK 0x00FFFFFFL
++#define PCIE_RX_VENDOR_SPECIFIC__RX_VENDOR_STATUS_MASK 0x01000000L
++//PCIE_RX_CNTL3
++#define PCIE_RX_CNTL3__RX_IGNORE_RC_TRANSMRDPASID_UR__SHIFT 0x0
++#define PCIE_RX_CNTL3__RX_IGNORE_RC_TRANSMWRPASID_UR__SHIFT 0x1
++#define PCIE_RX_CNTL3__RX_IGNORE_RC_PRGRESPMSG_UR__SHIFT 0x2
++#define PCIE_RX_CNTL3__RX_IGNORE_RC_INVREQ_UR__SHIFT 0x3
++#define PCIE_RX_CNTL3__RX_IGNORE_RC_INVCPLPASID_UR__SHIFT 0x4
++#define PCIE_RX_CNTL3__RX_IGNORE_RC_TRANSMRDPASID_UR_MASK 0x00000001L
++#define PCIE_RX_CNTL3__RX_IGNORE_RC_TRANSMWRPASID_UR_MASK 0x00000002L
++#define PCIE_RX_CNTL3__RX_IGNORE_RC_PRGRESPMSG_UR_MASK 0x00000004L
++#define PCIE_RX_CNTL3__RX_IGNORE_RC_INVREQ_UR_MASK 0x00000008L
++#define PCIE_RX_CNTL3__RX_IGNORE_RC_INVCPLPASID_UR_MASK 0x00000010L
++//PCIE_RX_CREDITS_ALLOCATED_P
++#define PCIE_RX_CREDITS_ALLOCATED_P__RX_CREDITS_ALLOCATED_PD__SHIFT 0x0
++#define PCIE_RX_CREDITS_ALLOCATED_P__RX_CREDITS_ALLOCATED_PH__SHIFT 0x10
++#define PCIE_RX_CREDITS_ALLOCATED_P__RX_CREDITS_ALLOCATED_PD_MASK 0x00000FFFL
++#define PCIE_RX_CREDITS_ALLOCATED_P__RX_CREDITS_ALLOCATED_PH_MASK 0x00FF0000L
++//PCIE_RX_CREDITS_ALLOCATED_NP
++#define PCIE_RX_CREDITS_ALLOCATED_NP__RX_CREDITS_ALLOCATED_NPD__SHIFT 0x0
++#define PCIE_RX_CREDITS_ALLOCATED_NP__RX_CREDITS_ALLOCATED_NPH__SHIFT 0x10
++#define PCIE_RX_CREDITS_ALLOCATED_NP__RX_CREDITS_ALLOCATED_NPD_MASK 0x00000FFFL
++#define PCIE_RX_CREDITS_ALLOCATED_NP__RX_CREDITS_ALLOCATED_NPH_MASK 0x00FF0000L
++//PCIE_RX_CREDITS_ALLOCATED_CPL
++#define PCIE_RX_CREDITS_ALLOCATED_CPL__RX_CREDITS_ALLOCATED_CPLD__SHIFT 0x0
++#define PCIE_RX_CREDITS_ALLOCATED_CPL__RX_CREDITS_ALLOCATED_CPLH__SHIFT 0x10
++#define PCIE_RX_CREDITS_ALLOCATED_CPL__RX_CREDITS_ALLOCATED_CPLD_MASK 0x00000FFFL
++#define PCIE_RX_CREDITS_ALLOCATED_CPL__RX_CREDITS_ALLOCATED_CPLH_MASK 0x00FF0000L
++//PCIEP_ERROR_INJECT_PHYSICAL
++#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_LANE_ERR__SHIFT 0x0
++#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_FRAMING_ERR__SHIFT 0x2
++#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_BAD_PARITY_IN_SKP__SHIFT 0x4
++#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_BAD_LFSR_IN_SKP__SHIFT 0x6
++#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_LOOPBACK_UFLOW__SHIFT 0x8
++#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_LOOPBACK_OFLOW__SHIFT 0xa
++#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_DESKEW_ERR__SHIFT 0xc
++#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_8B10B_DISPARITY_ERR__SHIFT 0xe
++#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_8B10B_DECODE_ERR__SHIFT 0x10
++#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_SKP_OS_ERROR__SHIFT 0x12
++#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_INV_OS_IDENTIFIER__SHIFT 0x14
++#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_BAD_SYNC_HEADER__SHIFT 0x16
++#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_LANE_ERR_MASK 0x00000003L
++#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_FRAMING_ERR_MASK 0x0000000CL
++#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_BAD_PARITY_IN_SKP_MASK 0x00000030L
++#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_BAD_LFSR_IN_SKP_MASK 0x000000C0L
++#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_LOOPBACK_UFLOW_MASK 0x00000300L
++#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_LOOPBACK_OFLOW_MASK 0x00000C00L
++#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_DESKEW_ERR_MASK 0x00003000L
++#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_8B10B_DISPARITY_ERR_MASK 0x0000C000L
++#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_8B10B_DECODE_ERR_MASK 0x00030000L
++#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_SKP_OS_ERROR_MASK 0x000C0000L
++#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_INV_OS_IDENTIFIER_MASK 0x00300000L
++#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_BAD_SYNC_HEADER_MASK 0x00C00000L
++//PCIEP_ERROR_INJECT_TRANSACTION
++#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_FLOW_CTL_ERR__SHIFT 0x0
++#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_REPLAY_NUM_ROLLOVER__SHIFT 0x2
++#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_BAD_DLLP__SHIFT 0x4
++#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_BAD_TLP__SHIFT 0x6
++#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_UNSUPPORTED_REQ__SHIFT 0x8
++#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_ECRC_ERROR__SHIFT 0xa
++#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_MALFORMED_TLP__SHIFT 0xc
++#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_UNEXPECTED_CMPLT__SHIFT 0xe
++#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_COMPLETER_ABORT__SHIFT 0x10
++#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_COMPLETION_TIMEOUT__SHIFT 0x12
++#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_FLOW_CTL_ERR_MASK 0x00000003L
++#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_REPLAY_NUM_ROLLOVER_MASK 0x0000000CL
++#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_BAD_DLLP_MASK 0x00000030L
++#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_BAD_TLP_MASK 0x000000C0L
++#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_UNSUPPORTED_REQ_MASK 0x00000300L
++#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_ECRC_ERROR_MASK 0x00000C00L
++#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_MALFORMED_TLP_MASK 0x00003000L
++#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_UNEXPECTED_CMPLT_MASK 0x0000C000L
++#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_COMPLETER_ABORT_MASK 0x00030000L
++#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_COMPLETION_TIMEOUT_MASK 0x000C0000L
++//PCIEP_SRIOV_PRIV_CTRL
++#define PCIEP_SRIOV_PRIV_CTRL__RX_SRIOV_VF_MAPPING_MODE__SHIFT 0x0
++#define PCIEP_SRIOV_PRIV_CTRL__SRIOV_SAVE_VFS_ON_VFENABLE_CLR__SHIFT 0x2
++#define PCIEP_SRIOV_PRIV_CTRL__RX_SRIOV_VF_MAPPING_MODE_MASK 0x00000003L
++#define PCIEP_SRIOV_PRIV_CTRL__SRIOV_SAVE_VFS_ON_VFENABLE_CLR_MASK 0x0000000CL
++//PCIEP_NAK_COUNTER
++#define PCIEP_NAK_COUNTER__RX_NUM_NAK_RECEIVED_PORT__SHIFT 0x0
++#define PCIEP_NAK_COUNTER__RX_NUM_NAK_GENERATED_PORT__SHIFT 0x10
++#define PCIEP_NAK_COUNTER__RX_NUM_NAK_RECEIVED_PORT_MASK 0x0000FFFFL
++#define PCIEP_NAK_COUNTER__RX_NUM_NAK_GENERATED_PORT_MASK 0xFFFF0000L
++//PCIE_LC_CNTL
++#define PCIE_LC_CNTL__LC_DONT_ENTER_L23_IN_D0__SHIFT 0x1
++#define PCIE_LC_CNTL__LC_RESET_L_IDLE_COUNT_EN__SHIFT 0x2
++#define PCIE_LC_CNTL__LC_RESET_LINK__SHIFT 0x3
++#define PCIE_LC_CNTL__LC_16X_CLEAR_TX_PIPE__SHIFT 0x4
++#define PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT 0x8
++#define PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT 0xc
++#define PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT 0x10
++#define PCIE_LC_CNTL__LC_INC_N_FTS_EN__SHIFT 0x11
++#define PCIE_LC_CNTL__LC_LOOK_FOR_IDLE_IN_L1L23__SHIFT 0x12
++#define PCIE_LC_CNTL__LC_FACTOR_IN_EXT_SYNC__SHIFT 0x14
++#define PCIE_LC_CNTL__LC_WAIT_FOR_PM_ACK_DIS__SHIFT 0x15
++#define PCIE_LC_CNTL__LC_WAKE_FROM_L23__SHIFT 0x16
++#define PCIE_LC_CNTL__LC_L1_IMMEDIATE_ACK__SHIFT 0x17
++#define PCIE_LC_CNTL__LC_ASPM_TO_L1_DIS__SHIFT 0x18
++#define PCIE_LC_CNTL__LC_DELAY_COUNT__SHIFT 0x19
++#define PCIE_LC_CNTL__LC_DELAY_L0S_EXIT__SHIFT 0x1b
++#define PCIE_LC_CNTL__LC_DELAY_L1_EXIT__SHIFT 0x1c
++#define PCIE_LC_CNTL__LC_EXTEND_WAIT_FOR_EL_IDLE__SHIFT 0x1d
++#define PCIE_LC_CNTL__LC_ESCAPE_L1L23_EN__SHIFT 0x1e
++#define PCIE_LC_CNTL__LC_GATE_RCVR_IDLE__SHIFT 0x1f
++#define PCIE_LC_CNTL__LC_DONT_ENTER_L23_IN_D0_MASK 0x00000002L
++#define PCIE_LC_CNTL__LC_RESET_L_IDLE_COUNT_EN_MASK 0x00000004L
++#define PCIE_LC_CNTL__LC_RESET_LINK_MASK 0x00000008L
++#define PCIE_LC_CNTL__LC_16X_CLEAR_TX_PIPE_MASK 0x000000F0L
++#define PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK 0x00000F00L
++#define PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK 0x0000F000L
++#define PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK 0x00010000L
++#define PCIE_LC_CNTL__LC_INC_N_FTS_EN_MASK 0x00020000L
++#define PCIE_LC_CNTL__LC_LOOK_FOR_IDLE_IN_L1L23_MASK 0x000C0000L
++#define PCIE_LC_CNTL__LC_FACTOR_IN_EXT_SYNC_MASK 0x00100000L
++#define PCIE_LC_CNTL__LC_WAIT_FOR_PM_ACK_DIS_MASK 0x00200000L
++#define PCIE_LC_CNTL__LC_WAKE_FROM_L23_MASK 0x00400000L
++#define PCIE_LC_CNTL__LC_L1_IMMEDIATE_ACK_MASK 0x00800000L
++#define PCIE_LC_CNTL__LC_ASPM_TO_L1_DIS_MASK 0x01000000L
++#define PCIE_LC_CNTL__LC_DELAY_COUNT_MASK 0x06000000L
++#define PCIE_LC_CNTL__LC_DELAY_L0S_EXIT_MASK 0x08000000L
++#define PCIE_LC_CNTL__LC_DELAY_L1_EXIT_MASK 0x10000000L
++#define PCIE_LC_CNTL__LC_EXTEND_WAIT_FOR_EL_IDLE_MASK 0x20000000L
++#define PCIE_LC_CNTL__LC_ESCAPE_L1L23_EN_MASK 0x40000000L
++#define PCIE_LC_CNTL__LC_GATE_RCVR_IDLE_MASK 0x80000000L
++//PCIE_LC_TRAINING_CNTL
++#define PCIE_LC_TRAINING_CNTL__LC_TRAINING_CNTL__SHIFT 0x0
++#define PCIE_LC_TRAINING_CNTL__LC_COMPLIANCE_RECEIVE__SHIFT 0x4
++#define PCIE_LC_TRAINING_CNTL__LC_LOOK_FOR_MORE_NON_MATCHING_TS1__SHIFT 0x5
++#define PCIE_LC_TRAINING_CNTL__LC_L0S_L1_TRAINING_CNTL_EN__SHIFT 0x6
++#define PCIE_LC_TRAINING_CNTL__LC_L1_LONG_WAKE_FIX_EN__SHIFT 0x7
++#define PCIE_LC_TRAINING_CNTL__LC_POWER_STATE__SHIFT 0x8
++#define PCIE_LC_TRAINING_CNTL__LC_DONT_GO_TO_L0S_IF_L1_ARMED__SHIFT 0xb
++#define PCIE_LC_TRAINING_CNTL__LC_INIT_SPD_CHG_WITH_CSR_EN__SHIFT 0xc
++#define PCIE_LC_TRAINING_CNTL__LC_DISABLE_TRAINING_BIT_ARCH__SHIFT 0xd
++#define PCIE_LC_TRAINING_CNTL__LC_WAIT_FOR_SETS_IN_RCFG__SHIFT 0xe
++#define PCIE_LC_TRAINING_CNTL__LC_HOT_RESET_QUICK_EXIT_EN__SHIFT 0xf
++#define PCIE_LC_TRAINING_CNTL__LC_EXTEND_WAIT_FOR_SKP__SHIFT 0x10
++#define PCIE_LC_TRAINING_CNTL__LC_AUTONOMOUS_CHANGE_OFF__SHIFT 0x11
++#define PCIE_LC_TRAINING_CNTL__LC_UPCONFIGURE_CAP_OFF__SHIFT 0x12
++#define PCIE_LC_TRAINING_CNTL__LC_HW_LINK_DIS_EN__SHIFT 0x13
++#define PCIE_LC_TRAINING_CNTL__LC_LINK_DIS_BY_HW__SHIFT 0x14
++#define PCIE_LC_TRAINING_CNTL__LC_STATIC_TX_PIPE_COUNT_EN__SHIFT 0x15
++#define PCIE_LC_TRAINING_CNTL__LC_ASPM_L1_NAK_TIMER_SEL__SHIFT 0x16
++#define PCIE_LC_TRAINING_CNTL__LC_DONT_DEASSERT_RX_EN_IN_R_SPEED__SHIFT 0x18
++#define PCIE_LC_TRAINING_CNTL__LC_DONT_DEASSERT_RX_EN_IN_TEST__SHIFT 0x19
++#define PCIE_LC_TRAINING_CNTL__LC_RESET_ASPM_L1_NAK_TIMER__SHIFT 0x1a
++#define PCIE_LC_TRAINING_CNTL__LC_SHORT_RCFG_TIMEOUT__SHIFT 0x1b
++#define PCIE_LC_TRAINING_CNTL__LC_ALLOW_TX_L1_CONTROL__SHIFT 0x1c
++#define PCIE_LC_TRAINING_CNTL__LC_WAIT_FOR_FOM_VALID_AFTER_TRACK__SHIFT 0x1d
++#define PCIE_LC_TRAINING_CNTL__LC_EXTEND_EQ_REQ_TIME__SHIFT 0x1e
++#define PCIE_LC_TRAINING_CNTL__LC_TRAINING_CNTL_MASK 0x0000000FL
++#define PCIE_LC_TRAINING_CNTL__LC_COMPLIANCE_RECEIVE_MASK 0x00000010L
++#define PCIE_LC_TRAINING_CNTL__LC_LOOK_FOR_MORE_NON_MATCHING_TS1_MASK 0x00000020L
++#define PCIE_LC_TRAINING_CNTL__LC_L0S_L1_TRAINING_CNTL_EN_MASK 0x00000040L
++#define PCIE_LC_TRAINING_CNTL__LC_L1_LONG_WAKE_FIX_EN_MASK 0x00000080L
++#define PCIE_LC_TRAINING_CNTL__LC_POWER_STATE_MASK 0x00000700L
++#define PCIE_LC_TRAINING_CNTL__LC_DONT_GO_TO_L0S_IF_L1_ARMED_MASK 0x00000800L
++#define PCIE_LC_TRAINING_CNTL__LC_INIT_SPD_CHG_WITH_CSR_EN_MASK 0x00001000L
++#define PCIE_LC_TRAINING_CNTL__LC_DISABLE_TRAINING_BIT_ARCH_MASK 0x00002000L
++#define PCIE_LC_TRAINING_CNTL__LC_WAIT_FOR_SETS_IN_RCFG_MASK 0x00004000L
++#define PCIE_LC_TRAINING_CNTL__LC_HOT_RESET_QUICK_EXIT_EN_MASK 0x00008000L
++#define PCIE_LC_TRAINING_CNTL__LC_EXTEND_WAIT_FOR_SKP_MASK 0x00010000L
++#define PCIE_LC_TRAINING_CNTL__LC_AUTONOMOUS_CHANGE_OFF_MASK 0x00020000L
++#define PCIE_LC_TRAINING_CNTL__LC_UPCONFIGURE_CAP_OFF_MASK 0x00040000L
++#define PCIE_LC_TRAINING_CNTL__LC_HW_LINK_DIS_EN_MASK 0x00080000L
++#define PCIE_LC_TRAINING_CNTL__LC_LINK_DIS_BY_HW_MASK 0x00100000L
++#define PCIE_LC_TRAINING_CNTL__LC_STATIC_TX_PIPE_COUNT_EN_MASK 0x00200000L
++#define PCIE_LC_TRAINING_CNTL__LC_ASPM_L1_NAK_TIMER_SEL_MASK 0x00C00000L
++#define PCIE_LC_TRAINING_CNTL__LC_DONT_DEASSERT_RX_EN_IN_R_SPEED_MASK 0x01000000L
++#define PCIE_LC_TRAINING_CNTL__LC_DONT_DEASSERT_RX_EN_IN_TEST_MASK 0x02000000L
++#define PCIE_LC_TRAINING_CNTL__LC_RESET_ASPM_L1_NAK_TIMER_MASK 0x04000000L
++#define PCIE_LC_TRAINING_CNTL__LC_SHORT_RCFG_TIMEOUT_MASK 0x08000000L
++#define PCIE_LC_TRAINING_CNTL__LC_ALLOW_TX_L1_CONTROL_MASK 0x10000000L
++#define PCIE_LC_TRAINING_CNTL__LC_WAIT_FOR_FOM_VALID_AFTER_TRACK_MASK 0x20000000L
++#define PCIE_LC_TRAINING_CNTL__LC_EXTEND_EQ_REQ_TIME_MASK 0xC0000000L
++//PCIE_LC_LINK_WIDTH_CNTL
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH__SHIFT 0x0
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_ARC_MISSING_ESCAPE__SHIFT 0x7
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW__SHIFT 0x8
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATION_SUPPORT__SHIFT 0x9
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATE_EN__SHIFT 0xa
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_SHORT_RECONFIG_EN__SHIFT 0xb
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_SUPPORT__SHIFT 0xc
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_DIS__SHIFT 0xd
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCFG_WAIT_FOR_RCVR_DIS__SHIFT 0xe
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCFG_TIMER_SEL__SHIFT 0xf
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_DEASSERT_TX_PDNB__SHIFT 0x10
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_L1_RECONFIG_EN__SHIFT 0x11
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_DYNLINK_MST_EN__SHIFT 0x12
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_DUAL_END_RECONFIG_EN__SHIFT 0x13
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_CAPABLE__SHIFT 0x14
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE__SHIFT 0x15
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_EQ_REVERSAL_LOGIC_EN__SHIFT 0x17
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_MULT_REVERSE_ATTEMP_EN__SHIFT 0x18
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_RESET_TSX_CNT_IN_RCONFIG_EN__SHIFT 0x19
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_WAIT_FOR_L_IDLE_IN_R_IDLE__SHIFT 0x1a
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_WAIT_FOR_NON_EI_ON_RXL0S_EXIT__SHIFT 0x1b
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_HOLD_EI_FOR_RSPEED_CMD_CHANGE__SHIFT 0x1c
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_BYPASS_RXL0S_ON_SHORT_EI__SHIFT 0x1d
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_TURN_OFF_UNUSED_LANES__SHIFT 0x1e
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_BYPASS_RXSTANDBY_STATUS__SHIFT 0x1f
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_MASK 0x00000007L
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_ARC_MISSING_ESCAPE_MASK 0x00000080L
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW_MASK 0x00000100L
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATION_SUPPORT_MASK 0x00000200L
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATE_EN_MASK 0x00000400L
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_SHORT_RECONFIG_EN_MASK 0x00000800L
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_SUPPORT_MASK 0x00001000L
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_DIS_MASK 0x00002000L
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCFG_WAIT_FOR_RCVR_DIS_MASK 0x00004000L
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCFG_TIMER_SEL_MASK 0x00008000L
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_DEASSERT_TX_PDNB_MASK 0x00010000L
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_L1_RECONFIG_EN_MASK 0x00020000L
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_DYNLINK_MST_EN_MASK 0x00040000L
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_DUAL_END_RECONFIG_EN_MASK 0x00080000L
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_CAPABLE_MASK 0x00100000L
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK 0x00600000L
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_EQ_REVERSAL_LOGIC_EN_MASK 0x00800000L
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_MULT_REVERSE_ATTEMP_EN_MASK 0x01000000L
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_RESET_TSX_CNT_IN_RCONFIG_EN_MASK 0x02000000L
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_WAIT_FOR_L_IDLE_IN_R_IDLE_MASK 0x04000000L
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_WAIT_FOR_NON_EI_ON_RXL0S_EXIT_MASK 0x08000000L
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_HOLD_EI_FOR_RSPEED_CMD_CHANGE_MASK 0x10000000L
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_BYPASS_RXL0S_ON_SHORT_EI_MASK 0x20000000L
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_TURN_OFF_UNUSED_LANES_MASK 0x40000000L
++#define PCIE_LC_LINK_WIDTH_CNTL__LC_BYPASS_RXSTANDBY_STATUS_MASK 0x80000000L
++//PCIE_LC_N_FTS_CNTL
++#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT 0x0
++#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN__SHIFT 0x8
++#define PCIE_LC_N_FTS_CNTL__LC_XMIT_FTS_BEFORE_RECOVERY__SHIFT 0x9
++#define PCIE_LC_N_FTS_CNTL__LC_N_EIE_SEL__SHIFT 0xa
++#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_8GT_CNTL__SHIFT 0xe
++#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_16GT_CNTL__SHIFT 0xf
++#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_LIMIT__SHIFT 0x10
++#define PCIE_LC_N_FTS_CNTL__LC_N_FTS__SHIFT 0x18
++#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK 0x000000FFL
++#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK 0x00000100L
++#define PCIE_LC_N_FTS_CNTL__LC_XMIT_FTS_BEFORE_RECOVERY_MASK 0x00000200L
++#define PCIE_LC_N_FTS_CNTL__LC_N_EIE_SEL_MASK 0x00000400L
++#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_8GT_CNTL_MASK 0x00004000L
++#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_16GT_CNTL_MASK 0x00008000L
++#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_LIMIT_MASK 0x00FF0000L
++#define PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK 0xFF000000L
++//PSWUSP0_PCIE_LC_SPEED_CNTL
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x0
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x1
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP__SHIFT 0x2
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_TARGET_LINK_SPEED_OVERRIDE_EN__SHIFT 0x3
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_TARGET_LINK_SPEED_OVERRIDE__SHIFT 0x4
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_FORCE_EN_SW_SPEED_CHANGE__SHIFT 0x6
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE__SHIFT 0x7
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_FORCE_EN_HW_SPEED_CHANGE__SHIFT 0x8
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_HW_SPEED_CHANGE__SHIFT 0x9
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE__SHIFT 0xa
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_SPEED_CHANGE_ATTEMPTS_ALLOWED__SHIFT 0xb
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_SPEED_CHANGE_ATTEMPT_FAILED__SHIFT 0xd
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xe
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_DONT_CLR_TARGET_SPD_CHANGE_STATUS__SHIFT 0x10
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CLR_FAILED_SPD_CHANGE_CNT__SHIFT 0x11
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_1_OR_MORE_TS2_SPEED_ARC_EN__SHIFT 0x12
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN2__SHIFT 0x13
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN2__SHIFT 0x14
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN3__SHIFT 0x15
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN3__SHIFT 0x16
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN4__SHIFT 0x17
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN4__SHIFT 0x18
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_SPEED_CHANGE_STATUS__SHIFT 0x19
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_DATA_RATE_ADVERTISED__SHIFT 0x1a
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CHECK_DATA_RATE__SHIFT 0x1c
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_MULT_UPSTREAM_AUTO_SPD_CHNG_EN__SHIFT 0x1d
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_INIT_SPEED_NEG_IN_L0s_EN__SHIFT 0x1e
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_INIT_SPEED_NEG_IN_L1_EN__SHIFT 0x1f
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP_MASK 0x00000004L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_TARGET_LINK_SPEED_OVERRIDE_EN_MASK 0x00000008L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_TARGET_LINK_SPEED_OVERRIDE_MASK 0x00000030L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_FORCE_EN_SW_SPEED_CHANGE_MASK 0x00000040L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK 0x00000080L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_FORCE_EN_HW_SPEED_CHANGE_MASK 0x00000100L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_HW_SPEED_CHANGE_MASK 0x00000200L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK 0x00000400L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK 0x00001800L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_SPEED_CHANGE_ATTEMPT_FAILED_MASK 0x00002000L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0x0000C000L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_DONT_CLR_TARGET_SPD_CHANGE_STATUS_MASK 0x00010000L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CLR_FAILED_SPD_CHANGE_CNT_MASK 0x00020000L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_1_OR_MORE_TS2_SPEED_ARC_EN_MASK 0x00040000L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN2_MASK 0x00080000L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN2_MASK 0x00100000L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN3_MASK 0x00200000L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN3_MASK 0x00400000L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN4_MASK 0x00800000L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN4_MASK 0x01000000L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_SPEED_CHANGE_STATUS_MASK 0x02000000L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_DATA_RATE_ADVERTISED_MASK 0x0C000000L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CHECK_DATA_RATE_MASK 0x10000000L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_MULT_UPSTREAM_AUTO_SPD_CHNG_EN_MASK 0x20000000L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_INIT_SPEED_NEG_IN_L0s_EN_MASK 0x40000000L
++#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_INIT_SPEED_NEG_IN_L1_EN_MASK 0x80000000L
++//PCIE_LC_STATE0
++#define PCIE_LC_STATE0__LC_CURRENT_STATE__SHIFT 0x0
++#define PCIE_LC_STATE0__LC_PREV_STATE1__SHIFT 0x8
++#define PCIE_LC_STATE0__LC_PREV_STATE2__SHIFT 0x10
++#define PCIE_LC_STATE0__LC_PREV_STATE3__SHIFT 0x18
++#define PCIE_LC_STATE0__LC_CURRENT_STATE_MASK 0x0000003FL
++#define PCIE_LC_STATE0__LC_PREV_STATE1_MASK 0x00003F00L
++#define PCIE_LC_STATE0__LC_PREV_STATE2_MASK 0x003F0000L
++#define PCIE_LC_STATE0__LC_PREV_STATE3_MASK 0x3F000000L
++//PCIE_LC_STATE1
++#define PCIE_LC_STATE1__LC_PREV_STATE4__SHIFT 0x0
++#define PCIE_LC_STATE1__LC_PREV_STATE5__SHIFT 0x8
++#define PCIE_LC_STATE1__LC_PREV_STATE6__SHIFT 0x10
++#define PCIE_LC_STATE1__LC_PREV_STATE7__SHIFT 0x18
++#define PCIE_LC_STATE1__LC_PREV_STATE4_MASK 0x0000003FL
++#define PCIE_LC_STATE1__LC_PREV_STATE5_MASK 0x00003F00L
++#define PCIE_LC_STATE1__LC_PREV_STATE6_MASK 0x003F0000L
++#define PCIE_LC_STATE1__LC_PREV_STATE7_MASK 0x3F000000L
++//PCIE_LC_STATE2
++#define PCIE_LC_STATE2__LC_PREV_STATE8__SHIFT 0x0
++#define PCIE_LC_STATE2__LC_PREV_STATE9__SHIFT 0x8
++#define PCIE_LC_STATE2__LC_PREV_STATE10__SHIFT 0x10
++#define PCIE_LC_STATE2__LC_PREV_STATE11__SHIFT 0x18
++#define PCIE_LC_STATE2__LC_PREV_STATE8_MASK 0x0000003FL
++#define PCIE_LC_STATE2__LC_PREV_STATE9_MASK 0x00003F00L
++#define PCIE_LC_STATE2__LC_PREV_STATE10_MASK 0x003F0000L
++#define PCIE_LC_STATE2__LC_PREV_STATE11_MASK 0x3F000000L
++//PCIE_LC_STATE3
++#define PCIE_LC_STATE3__LC_PREV_STATE12__SHIFT 0x0
++#define PCIE_LC_STATE3__LC_PREV_STATE13__SHIFT 0x8
++#define PCIE_LC_STATE3__LC_PREV_STATE14__SHIFT 0x10
++#define PCIE_LC_STATE3__LC_PREV_STATE15__SHIFT 0x18
++#define PCIE_LC_STATE3__LC_PREV_STATE12_MASK 0x0000003FL
++#define PCIE_LC_STATE3__LC_PREV_STATE13_MASK 0x00003F00L
++#define PCIE_LC_STATE3__LC_PREV_STATE14_MASK 0x003F0000L
++#define PCIE_LC_STATE3__LC_PREV_STATE15_MASK 0x3F000000L
++//PCIE_LC_STATE4
++#define PCIE_LC_STATE4__LC_PREV_STATE16__SHIFT 0x0
++#define PCIE_LC_STATE4__LC_PREV_STATE17__SHIFT 0x8
++#define PCIE_LC_STATE4__LC_PREV_STATE18__SHIFT 0x10
++#define PCIE_LC_STATE4__LC_PREV_STATE19__SHIFT 0x18
++#define PCIE_LC_STATE4__LC_PREV_STATE16_MASK 0x0000003FL
++#define PCIE_LC_STATE4__LC_PREV_STATE17_MASK 0x00003F00L
++#define PCIE_LC_STATE4__LC_PREV_STATE18_MASK 0x003F0000L
++#define PCIE_LC_STATE4__LC_PREV_STATE19_MASK 0x3F000000L
++//PCIE_LC_STATE5
++#define PCIE_LC_STATE5__LC_PREV_STATE20__SHIFT 0x0
++#define PCIE_LC_STATE5__LC_PREV_STATE21__SHIFT 0x8
++#define PCIE_LC_STATE5__LC_PREV_STATE22__SHIFT 0x10
++#define PCIE_LC_STATE5__LC_PREV_STATE23__SHIFT 0x18
++#define PCIE_LC_STATE5__LC_PREV_STATE20_MASK 0x0000003FL
++#define PCIE_LC_STATE5__LC_PREV_STATE21_MASK 0x00003F00L
++#define PCIE_LC_STATE5__LC_PREV_STATE22_MASK 0x003F0000L
++#define PCIE_LC_STATE5__LC_PREV_STATE23_MASK 0x3F000000L
++//PCIE_LINK_MANAGEMENT_CNTL2
++#define PCIE_LINK_MANAGEMENT_CNTL2__QUIESCE_RCVD__SHIFT 0x0
++#define PCIE_LINK_MANAGEMENT_CNTL2__QUIESCE_SENT__SHIFT 0x1
++#define PCIE_LINK_MANAGEMENT_CNTL2__REQ_EQ_RCVD__SHIFT 0x2
++#define PCIE_LINK_MANAGEMENT_CNTL2__REQ_EQ_SENT__SHIFT 0x3
++#define PCIE_LINK_MANAGEMENT_CNTL2__BW_HINT_MODE__SHIFT 0x4
++#define PCIE_LINK_MANAGEMENT_CNTL2__LOW_BW_THRESHOLD_G2__SHIFT 0x7
++#define PCIE_LINK_MANAGEMENT_CNTL2__HIGH_BW_THRESHOLD_G2__SHIFT 0xb
++#define PCIE_LINK_MANAGEMENT_CNTL2__LOW_BW_THRESHOLD_G3__SHIFT 0xf
++#define PCIE_LINK_MANAGEMENT_CNTL2__HIGH_BW_THRESHOLD_G3__SHIFT 0x13
++#define PCIE_LINK_MANAGEMENT_CNTL2__LOW_BW_THRESHOLD_G4__SHIFT 0x17
++#define PCIE_LINK_MANAGEMENT_CNTL2__HIGH_BW_THRESHOLD_G4__SHIFT 0x1b
++#define PCIE_LINK_MANAGEMENT_CNTL2__QUIESCE_RCVD_MASK 0x00000001L
++#define PCIE_LINK_MANAGEMENT_CNTL2__QUIESCE_SENT_MASK 0x00000002L
++#define PCIE_LINK_MANAGEMENT_CNTL2__REQ_EQ_RCVD_MASK 0x00000004L
++#define PCIE_LINK_MANAGEMENT_CNTL2__REQ_EQ_SENT_MASK 0x00000008L
++#define PCIE_LINK_MANAGEMENT_CNTL2__BW_HINT_MODE_MASK 0x00000070L
++#define PCIE_LINK_MANAGEMENT_CNTL2__LOW_BW_THRESHOLD_G2_MASK 0x00000780L
++#define PCIE_LINK_MANAGEMENT_CNTL2__HIGH_BW_THRESHOLD_G2_MASK 0x00007800L
++#define PCIE_LINK_MANAGEMENT_CNTL2__LOW_BW_THRESHOLD_G3_MASK 0x00078000L
++#define PCIE_LINK_MANAGEMENT_CNTL2__HIGH_BW_THRESHOLD_G3_MASK 0x00780000L
++#define PCIE_LINK_MANAGEMENT_CNTL2__LOW_BW_THRESHOLD_G4_MASK 0x07800000L
++#define PCIE_LINK_MANAGEMENT_CNTL2__HIGH_BW_THRESHOLD_G4_MASK 0x78000000L
++//PSWUSP0_PCIE_LC_CNTL2
++#define PSWUSP0_PCIE_LC_CNTL2__LC_TIMED_OUT_STATE__SHIFT 0x0
++#define PSWUSP0_PCIE_LC_CNTL2__LC_STATE_TIMED_OUT__SHIFT 0x6
++#define PSWUSP0_PCIE_LC_CNTL2__LC_LOOK_FOR_BW_REDUCTION__SHIFT 0x7
++#define PSWUSP0_PCIE_LC_CNTL2__LC_MORE_TS2_EN__SHIFT 0x8
++#define PSWUSP0_PCIE_LC_CNTL2__LC_X12_NEGOTIATION_DIS__SHIFT 0x9
++#define PSWUSP0_PCIE_LC_CNTL2__LC_LINK_UP_REVERSAL_EN__SHIFT 0xa
++#define PSWUSP0_PCIE_LC_CNTL2__LC_ILLEGAL_STATE__SHIFT 0xb
++#define PSWUSP0_PCIE_LC_CNTL2__LC_ILLEGAL_STATE_RESTART_EN__SHIFT 0xc
++#define PSWUSP0_PCIE_LC_CNTL2__LC_WAIT_FOR_OTHER_LANES_MODE__SHIFT 0xd
++#define PSWUSP0_PCIE_LC_CNTL2__LC_ELEC_IDLE_MODE__SHIFT 0xe
++#define PSWUSP0_PCIE_LC_CNTL2__LC_DISABLE_INFERRED_ELEC_IDLE_DET__SHIFT 0x10
++#define PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1__SHIFT 0x11
++#define PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23__SHIFT 0x12
++#define PSWUSP0_PCIE_LC_CNTL2__LC_BLOCK_EL_IDLE_IN_L0__SHIFT 0x14
++#define PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS__SHIFT 0x15
++#define PSWUSP0_PCIE_LC_CNTL2__LC_ASSERT_INACTIVE_DURING_HOLD__SHIFT 0x16
++#define PSWUSP0_PCIE_LC_CNTL2__LC_WAIT_FOR_LANES_IN_LW_NEG__SHIFT 0x17
++#define PSWUSP0_PCIE_LC_CNTL2__LC_PWR_DOWN_NEG_OFF_LANES__SHIFT 0x19
++#define PSWUSP0_PCIE_LC_CNTL2__LC_DISABLE_LOST_SYM_LOCK_ARCS__SHIFT 0x1a
++#define PSWUSP0_PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS__SHIFT 0x1b
++#define PSWUSP0_PCIE_LC_CNTL2__LC_PMI_L1_WAIT_FOR_SLV_IDLE__SHIFT 0x1c
++#define PSWUSP0_PCIE_LC_CNTL2__LC_TEST_TIMER_SEL__SHIFT 0x1d
++#define PSWUSP0_PCIE_LC_CNTL2__LC_ENABLE_INFERRED_ELEC_IDLE_FOR_PI__SHIFT 0x1f
++#define PSWUSP0_PCIE_LC_CNTL2__LC_TIMED_OUT_STATE_MASK 0x0000003FL
++#define PSWUSP0_PCIE_LC_CNTL2__LC_STATE_TIMED_OUT_MASK 0x00000040L
++#define PSWUSP0_PCIE_LC_CNTL2__LC_LOOK_FOR_BW_REDUCTION_MASK 0x00000080L
++#define PSWUSP0_PCIE_LC_CNTL2__LC_MORE_TS2_EN_MASK 0x00000100L
++#define PSWUSP0_PCIE_LC_CNTL2__LC_X12_NEGOTIATION_DIS_MASK 0x00000200L
++#define PSWUSP0_PCIE_LC_CNTL2__LC_LINK_UP_REVERSAL_EN_MASK 0x00000400L
++#define PSWUSP0_PCIE_LC_CNTL2__LC_ILLEGAL_STATE_MASK 0x00000800L
++#define PSWUSP0_PCIE_LC_CNTL2__LC_ILLEGAL_STATE_RESTART_EN_MASK 0x00001000L
++#define PSWUSP0_PCIE_LC_CNTL2__LC_WAIT_FOR_OTHER_LANES_MODE_MASK 0x00002000L
++#define PSWUSP0_PCIE_LC_CNTL2__LC_ELEC_IDLE_MODE_MASK 0x0000C000L
++#define PSWUSP0_PCIE_LC_CNTL2__LC_DISABLE_INFERRED_ELEC_IDLE_DET_MASK 0x00010000L
++#define PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK 0x00020000L
++#define PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK 0x00040000L
++#define PSWUSP0_PCIE_LC_CNTL2__LC_BLOCK_EL_IDLE_IN_L0_MASK 0x00100000L
++#define PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK 0x00200000L
++#define PSWUSP0_PCIE_LC_CNTL2__LC_ASSERT_INACTIVE_DURING_HOLD_MASK 0x00400000L
++#define PSWUSP0_PCIE_LC_CNTL2__LC_WAIT_FOR_LANES_IN_LW_NEG_MASK 0x01800000L
++#define PSWUSP0_PCIE_LC_CNTL2__LC_PWR_DOWN_NEG_OFF_LANES_MASK 0x02000000L
++#define PSWUSP0_PCIE_LC_CNTL2__LC_DISABLE_LOST_SYM_LOCK_ARCS_MASK 0x04000000L
++#define PSWUSP0_PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS_MASK 0x08000000L
++#define PSWUSP0_PCIE_LC_CNTL2__LC_PMI_L1_WAIT_FOR_SLV_IDLE_MASK 0x10000000L
++#define PSWUSP0_PCIE_LC_CNTL2__LC_TEST_TIMER_SEL_MASK 0x60000000L
++#define PSWUSP0_PCIE_LC_CNTL2__LC_ENABLE_INFERRED_ELEC_IDLE_FOR_PI_MASK 0x80000000L
++//PCIE_LC_BW_CHANGE_CNTL
++#define PCIE_LC_BW_CHANGE_CNTL__LC_BW_CHANGE_INT_EN__SHIFT 0x0
++#define PCIE_LC_BW_CHANGE_CNTL__LC_HW_INIT_SPEED_CHANGE__SHIFT 0x1
++#define PCIE_LC_BW_CHANGE_CNTL__LC_SW_INIT_SPEED_CHANGE__SHIFT 0x2
++#define PCIE_LC_BW_CHANGE_CNTL__LC_OTHER_INIT_SPEED_CHANGE__SHIFT 0x3
++#define PCIE_LC_BW_CHANGE_CNTL__LC_RELIABILITY_SPEED_CHANGE__SHIFT 0x4
++#define PCIE_LC_BW_CHANGE_CNTL__LC_FAILED_SPEED_NEG__SHIFT 0x5
++#define PCIE_LC_BW_CHANGE_CNTL__LC_LONG_LW_CHANGE__SHIFT 0x6
++#define PCIE_LC_BW_CHANGE_CNTL__LC_SHORT_LW_CHANGE__SHIFT 0x7
++#define PCIE_LC_BW_CHANGE_CNTL__LC_LW_CHANGE_OTHER__SHIFT 0x8
++#define PCIE_LC_BW_CHANGE_CNTL__LC_LW_CHANGE_FAILED__SHIFT 0x9
++#define PCIE_LC_BW_CHANGE_CNTL__LC_LINK_BW_NOTIFICATION_DETECT_MODE__SHIFT 0xa
++#define PCIE_LC_BW_CHANGE_CNTL__LC_SPEED_NEG_UNSUCCESSFUL__SHIFT 0xb
++#define PCIE_LC_BW_CHANGE_CNTL__LC_BW_CHANGE_INT_EN_MASK 0x00000001L
++#define PCIE_LC_BW_CHANGE_CNTL__LC_HW_INIT_SPEED_CHANGE_MASK 0x00000002L
++#define PCIE_LC_BW_CHANGE_CNTL__LC_SW_INIT_SPEED_CHANGE_MASK 0x00000004L
++#define PCIE_LC_BW_CHANGE_CNTL__LC_OTHER_INIT_SPEED_CHANGE_MASK 0x00000008L
++#define PCIE_LC_BW_CHANGE_CNTL__LC_RELIABILITY_SPEED_CHANGE_MASK 0x00000010L
++#define PCIE_LC_BW_CHANGE_CNTL__LC_FAILED_SPEED_NEG_MASK 0x00000020L
++#define PCIE_LC_BW_CHANGE_CNTL__LC_LONG_LW_CHANGE_MASK 0x00000040L
++#define PCIE_LC_BW_CHANGE_CNTL__LC_SHORT_LW_CHANGE_MASK 0x00000080L
++#define PCIE_LC_BW_CHANGE_CNTL__LC_LW_CHANGE_OTHER_MASK 0x00000100L
++#define PCIE_LC_BW_CHANGE_CNTL__LC_LW_CHANGE_FAILED_MASK 0x00000200L
++#define PCIE_LC_BW_CHANGE_CNTL__LC_LINK_BW_NOTIFICATION_DETECT_MODE_MASK 0x00000400L
++#define PCIE_LC_BW_CHANGE_CNTL__LC_SPEED_NEG_UNSUCCESSFUL_MASK 0x00000800L
++//PCIE_LC_CDR_CNTL
++#define PCIE_LC_CDR_CNTL__LC_CDR_TEST_OFF__SHIFT 0x0
++#define PCIE_LC_CDR_CNTL__LC_CDR_TEST_SETS__SHIFT 0xc
++#define PCIE_LC_CDR_CNTL__LC_CDR_SET_TYPE__SHIFT 0x18
++#define PCIE_LC_CDR_CNTL__LC_CDR_TEST_OFF_MASK 0x00000FFFL
++#define PCIE_LC_CDR_CNTL__LC_CDR_TEST_SETS_MASK 0x00FFF000L
++#define PCIE_LC_CDR_CNTL__LC_CDR_SET_TYPE_MASK 0x03000000L
++//PCIE_LC_LANE_CNTL
++#define PCIE_LC_LANE_CNTL__LC_CORRUPTED_LANES__SHIFT 0x0
++#define PCIE_LC_LANE_CNTL__LC_LANE_DIS__SHIFT 0x10
++#define PCIE_LC_LANE_CNTL__LC_CORRUPTED_LANES_MASK 0x0000FFFFL
++#define PCIE_LC_LANE_CNTL__LC_LANE_DIS_MASK 0xFFFF0000L
++//PCIE_LC_CNTL3
++#define PCIE_LC_CNTL3__LC_SELECT_DEEMPHASIS__SHIFT 0x0
++#define PCIE_LC_CNTL3__LC_SELECT_DEEMPHASIS_CNTL__SHIFT 0x1
++#define PCIE_LC_CNTL3__LC_RCVD_DEEMPHASIS__SHIFT 0x3
++#define PCIE_LC_CNTL3__LC_COMP_TO_DETECT__SHIFT 0x4
++#define PCIE_LC_CNTL3__LC_RESET_TSX_CNT_IN_RLOCK_EN__SHIFT 0x5
++#define PCIE_LC_CNTL3__LC_AUTO_SPEED_CHANGE_ATTEMPTS_ALLOWED__SHIFT 0x6
++#define PCIE_LC_CNTL3__LC_AUTO_SPEED_CHANGE_ATTEMPT_FAILED__SHIFT 0x8
++#define PCIE_LC_CNTL3__LC_CLR_FAILED_AUTO_SPD_CHANGE_CNT__SHIFT 0x9
++#define PCIE_LC_CNTL3__LC_ENHANCED_HOT_PLUG_EN__SHIFT 0xa
++#define PCIE_LC_CNTL3__LC_RCVR_DET_EN_OVERRIDE__SHIFT 0xb
++#define PCIE_LC_CNTL3__LC_CHIP_BIF_USB_IDLE_EN__SHIFT 0x10
++#define PCIE_LC_CNTL3__LC_L1_BLOCK_RECONFIG_EN__SHIFT 0x11
++#define PCIE_LC_CNTL3__LC_AUTO_DISABLE_SPEED_SUPPORT_EN__SHIFT 0x12
++#define PCIE_LC_CNTL3__LC_AUTO_DISABLE_SPEED_SUPPORT_MAX_FAIL_SEL__SHIFT 0x13
++#define PCIE_LC_CNTL3__LC_FAST_L1_ENTRY_EXIT_EN__SHIFT 0x15
++#define PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK__SHIFT 0x17
++#define PCIE_LC_CNTL3__LC_HW_VOLTAGE_IF_CONTROL__SHIFT 0x18
++#define PCIE_LC_CNTL3__LC_VOLTAGE_TIMER_SEL__SHIFT 0x1a
++#define PCIE_LC_CNTL3__LC_GO_TO_RECOVERY__SHIFT 0x1e
++#define PCIE_LC_CNTL3__LC_AUTO_RECOVERY_DIS__SHIFT 0x1f
++#define PCIE_LC_CNTL3__LC_SELECT_DEEMPHASIS_MASK 0x00000001L
++#define PCIE_LC_CNTL3__LC_SELECT_DEEMPHASIS_CNTL_MASK 0x00000006L
++#define PCIE_LC_CNTL3__LC_RCVD_DEEMPHASIS_MASK 0x00000008L
++#define PCIE_LC_CNTL3__LC_COMP_TO_DETECT_MASK 0x00000010L
++#define PCIE_LC_CNTL3__LC_RESET_TSX_CNT_IN_RLOCK_EN_MASK 0x00000020L
++#define PCIE_LC_CNTL3__LC_AUTO_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK 0x000000C0L
++#define PCIE_LC_CNTL3__LC_AUTO_SPEED_CHANGE_ATTEMPT_FAILED_MASK 0x00000100L
++#define PCIE_LC_CNTL3__LC_CLR_FAILED_AUTO_SPD_CHANGE_CNT_MASK 0x00000200L
++#define PCIE_LC_CNTL3__LC_ENHANCED_HOT_PLUG_EN_MASK 0x00000400L
++#define PCIE_LC_CNTL3__LC_RCVR_DET_EN_OVERRIDE_MASK 0x00000800L
++#define PCIE_LC_CNTL3__LC_CHIP_BIF_USB_IDLE_EN_MASK 0x00010000L
++#define PCIE_LC_CNTL3__LC_L1_BLOCK_RECONFIG_EN_MASK 0x00020000L
++#define PCIE_LC_CNTL3__LC_AUTO_DISABLE_SPEED_SUPPORT_EN_MASK 0x00040000L
++#define PCIE_LC_CNTL3__LC_AUTO_DISABLE_SPEED_SUPPORT_MAX_FAIL_SEL_MASK 0x00180000L
++#define PCIE_LC_CNTL3__LC_FAST_L1_ENTRY_EXIT_EN_MASK 0x00200000L
++#define PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK 0x00800000L
++#define PCIE_LC_CNTL3__LC_HW_VOLTAGE_IF_CONTROL_MASK 0x03000000L
++#define PCIE_LC_CNTL3__LC_VOLTAGE_TIMER_SEL_MASK 0x3C000000L
++#define PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK 0x40000000L
++#define PCIE_LC_CNTL3__LC_AUTO_RECOVERY_DIS_MASK 0x80000000L
++//PCIE_LC_CNTL4
++#define PCIE_LC_CNTL4__LC_TX_ENABLE_BEHAVIOUR__SHIFT 0x0
++#define PCIE_LC_CNTL4__LC_DIS_CONTIG_END_SET_CHECK__SHIFT 0x2
++#define PCIE_LC_CNTL4__LC_DIS_ASPM_L1_IN_SPEED_CHANGE__SHIFT 0x3
++#define PCIE_LC_CNTL4__LC_BYPASS_EQ_8GT__SHIFT 0x4
++#define PCIE_LC_CNTL4__LC_REDO_EQ_8GT__SHIFT 0x5
++#define PCIE_LC_CNTL4__LC_EXTEND_EIEOS__SHIFT 0x6
++#define PCIE_LC_CNTL4__LC_IGNORE_PARITY__SHIFT 0x7
++#define PCIE_LC_CNTL4__LC_EQ_SEARCH_MODE_8GT__SHIFT 0x8
++#define PCIE_LC_CNTL4__LC_DSC_CHECK_COEFFS_IN_RLOCK__SHIFT 0xa
++#define PCIE_LC_CNTL4__LC_USC_EQ_NOT_REQD_8GT__SHIFT 0xb
++#define PCIE_LC_CNTL4__LC_USC_GO_TO_EQ_8GT__SHIFT 0xc
++#define PCIE_LC_CNTL4__LC_SET_QUIESCE__SHIFT 0xd
++#define PCIE_LC_CNTL4__LC_QUIESCE_RCVD__SHIFT 0xe
++#define PCIE_LC_CNTL4__LC_UNEXPECTED_COEFFS_RCVD_8GT__SHIFT 0xf
++#define PCIE_LC_CNTL4__LC_BYPASS_EQ_REQ_PHASE_8GT__SHIFT 0x10
++#define PCIE_LC_CNTL4__LC_FORCE_PRESET_IN_EQ_REQ_PHASE_8GT__SHIFT 0x11
++#define PCIE_LC_CNTL4__LC_FORCE_PRESET_VALUE_8GT__SHIFT 0x12
++#define PCIE_LC_CNTL4__LC_USC_DELAY_DLLPS__SHIFT 0x16
++#define PCIE_LC_CNTL4__LC_TX_SWING__SHIFT 0x17
++#define PCIE_LC_CNTL4__LC_EQ_WAIT_FOR_EVAL_DONE__SHIFT 0x18
++#define PCIE_LC_CNTL4__LC_8GT_SKIP_ORDER_EN__SHIFT 0x19
++#define PCIE_LC_CNTL4__LC_WAIT_FOR_MORE_TS_IN_RLOCK__SHIFT 0x1a
++#define PCIE_LC_CNTL4__LC_TX_ENABLE_BEHAVIOUR_MASK 0x00000003L
++#define PCIE_LC_CNTL4__LC_DIS_CONTIG_END_SET_CHECK_MASK 0x00000004L
++#define PCIE_LC_CNTL4__LC_DIS_ASPM_L1_IN_SPEED_CHANGE_MASK 0x00000008L
++#define PCIE_LC_CNTL4__LC_BYPASS_EQ_8GT_MASK 0x00000010L
++#define PCIE_LC_CNTL4__LC_REDO_EQ_8GT_MASK 0x00000020L
++#define PCIE_LC_CNTL4__LC_EXTEND_EIEOS_MASK 0x00000040L
++#define PCIE_LC_CNTL4__LC_IGNORE_PARITY_MASK 0x00000080L
++#define PCIE_LC_CNTL4__LC_EQ_SEARCH_MODE_8GT_MASK 0x00000300L
++#define PCIE_LC_CNTL4__LC_DSC_CHECK_COEFFS_IN_RLOCK_MASK 0x00000400L
++#define PCIE_LC_CNTL4__LC_USC_EQ_NOT_REQD_8GT_MASK 0x00000800L
++#define PCIE_LC_CNTL4__LC_USC_GO_TO_EQ_8GT_MASK 0x00001000L
++#define PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK 0x00002000L
++#define PCIE_LC_CNTL4__LC_QUIESCE_RCVD_MASK 0x00004000L
++#define PCIE_LC_CNTL4__LC_UNEXPECTED_COEFFS_RCVD_8GT_MASK 0x00008000L
++#define PCIE_LC_CNTL4__LC_BYPASS_EQ_REQ_PHASE_8GT_MASK 0x00010000L
++#define PCIE_LC_CNTL4__LC_FORCE_PRESET_IN_EQ_REQ_PHASE_8GT_MASK 0x00020000L
++#define PCIE_LC_CNTL4__LC_FORCE_PRESET_VALUE_8GT_MASK 0x003C0000L
++#define PCIE_LC_CNTL4__LC_USC_DELAY_DLLPS_MASK 0x00400000L
++#define PCIE_LC_CNTL4__LC_TX_SWING_MASK 0x00800000L
++#define PCIE_LC_CNTL4__LC_EQ_WAIT_FOR_EVAL_DONE_MASK 0x01000000L
++#define PCIE_LC_CNTL4__LC_8GT_SKIP_ORDER_EN_MASK 0x02000000L
++#define PCIE_LC_CNTL4__LC_WAIT_FOR_MORE_TS_IN_RLOCK_MASK 0xFC000000L
++//PCIE_LC_CNTL5
++#define PCIE_LC_CNTL5__LC_DSC_EQ_FS_LF_INVALID_TO_PRESETS__SHIFT 0x18
++#define PCIE_LC_CNTL5__LC_TX_SWING_OVERRIDE__SHIFT 0x19
++#define PCIE_LC_CNTL5__LC_ACCEPT_ALL_PRESETS__SHIFT 0x1a
++#define PCIE_LC_CNTL5__LC_ACCEPT_ALL_PRESETS_TEST__SHIFT 0x1b
++#define PCIE_LC_CNTL5__LC_WAIT_IN_DETECT__SHIFT 0x1c
++#define PCIE_LC_CNTL5__LC_HOLD_TRAINING_MODE__SHIFT 0x1d
++#define PCIE_LC_CNTL5__LC_DSC_EQ_FS_LF_INVALID_TO_PRESETS_MASK 0x01000000L
++#define PCIE_LC_CNTL5__LC_TX_SWING_OVERRIDE_MASK 0x02000000L
++#define PCIE_LC_CNTL5__LC_ACCEPT_ALL_PRESETS_MASK 0x04000000L
++#define PCIE_LC_CNTL5__LC_ACCEPT_ALL_PRESETS_TEST_MASK 0x08000000L
++#define PCIE_LC_CNTL5__LC_WAIT_IN_DETECT_MASK 0x10000000L
++#define PCIE_LC_CNTL5__LC_HOLD_TRAINING_MODE_MASK 0xE0000000L
++//PCIE_LC_FORCE_COEFF
++#define PCIE_LC_FORCE_COEFF__LC_FORCE_COEFF_8GT__SHIFT 0x0
++#define PCIE_LC_FORCE_COEFF__LC_FORCE_PRE_CURSOR_8GT__SHIFT 0x1
++#define PCIE_LC_FORCE_COEFF__LC_FORCE_CURSOR_8GT__SHIFT 0x7
++#define PCIE_LC_FORCE_COEFF__LC_FORCE_POST_CURSOR_8GT__SHIFT 0xd
++#define PCIE_LC_FORCE_COEFF__LC_3X3_COEFF_SEARCH_EN_8GT__SHIFT 0x13
++#define PCIE_LC_FORCE_COEFF__LC_PRESET_10_EN__SHIFT 0x14
++#define PCIE_LC_FORCE_COEFF__LC_FORCE_COEFF_8GT_MASK 0x00000001L
++#define PCIE_LC_FORCE_COEFF__LC_FORCE_PRE_CURSOR_8GT_MASK 0x0000007EL
++#define PCIE_LC_FORCE_COEFF__LC_FORCE_CURSOR_8GT_MASK 0x00001F80L
++#define PCIE_LC_FORCE_COEFF__LC_FORCE_POST_CURSOR_8GT_MASK 0x0007E000L
++#define PCIE_LC_FORCE_COEFF__LC_3X3_COEFF_SEARCH_EN_8GT_MASK 0x00080000L
++#define PCIE_LC_FORCE_COEFF__LC_PRESET_10_EN_MASK 0x00100000L
++//PCIE_LC_BEST_EQ_SETTINGS
++#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_PRESET__SHIFT 0x0
++#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_PRECURSOR__SHIFT 0x4
++#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_CURSOR__SHIFT 0xa
++#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_POSTCURSOR__SHIFT 0x10
++#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_FOM__SHIFT 0x16
++#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_SETTINGS_RATE__SHIFT 0x1e
++#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_PRESET_MASK 0x0000000FL
++#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_PRECURSOR_MASK 0x000003F0L
++#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_CURSOR_MASK 0x0000FC00L
++#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_POSTCURSOR_MASK 0x003F0000L
++#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_FOM_MASK 0x3FC00000L
++#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_SETTINGS_RATE_MASK 0x40000000L
++//PCIE_LC_FORCE_EQ_REQ_COEFF
++#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_COEFF_IN_EQ_REQ_PHASE_8GT__SHIFT 0x0
++#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_PRE_CURSOR_REQ_8GT__SHIFT 0x1
++#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_CURSOR_REQ_8GT__SHIFT 0x7
++#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_POST_CURSOR_REQ_8GT__SHIFT 0xd
++#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FS_OTHER_END_8GT__SHIFT 0x13
++#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_LF_OTHER_END_8GT__SHIFT 0x19
++#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_COEFF_IN_EQ_REQ_PHASE_8GT_MASK 0x00000001L
++#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_PRE_CURSOR_REQ_8GT_MASK 0x0000007EL
++#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_CURSOR_REQ_8GT_MASK 0x00001F80L
++#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_POST_CURSOR_REQ_8GT_MASK 0x0007E000L
++#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FS_OTHER_END_8GT_MASK 0x01F80000L
++#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_LF_OTHER_END_8GT_MASK 0x7E000000L
++//PCIE_LC_CNTL6
++#define PCIE_LC_CNTL6__LC_SPC_MODE_2P5GT__SHIFT 0x0
++#define PCIE_LC_CNTL6__LC_SPC_MODE_5GT__SHIFT 0x2
++#define PCIE_LC_CNTL6__LC_SPC_MODE_8GT__SHIFT 0x4
++#define PCIE_LC_CNTL6__LC_SPC_MODE_16GT__SHIFT 0x6
++#define PCIE_LC_CNTL6__LC_SRIS_EN__SHIFT 0x8
++#define PCIE_LC_CNTL6__LC_SRNS_SKIP_IN_SRIS__SHIFT 0x9
++#define PCIE_LC_CNTL6__LC_SRIS_AUTODETECT_EN__SHIFT 0xd
++#define PCIE_LC_CNTL6__LC_SRIS_AUTODETECT_FACTOR__SHIFT 0xe
++#define PCIE_LC_CNTL6__LC_SRIS_AUTODETECT_MODE__SHIFT 0x10
++#define PCIE_LC_CNTL6__LC_SRIS_AUTODETECT_OUT_OF_RANGE__SHIFT 0x12
++#define PCIE_LC_CNTL6__LC_DEFER_SKIP_FOR_EIEOS_EN__SHIFT 0x13
++#define PCIE_LC_CNTL6__LC_SEND_EIEOS_IN_RCFG__SHIFT 0x14
++#define PCIE_LC_CNTL6__LC_L1_POWERDOWN__SHIFT 0x15
++#define PCIE_LC_CNTL6__LC_P2_ENTRY__SHIFT 0x16
++#define PCIE_LC_CNTL6__LC_RXRECOVER_EN__SHIFT 0x17
++#define PCIE_LC_CNTL6__LC_RXRECOVER_TIMEOUT__SHIFT 0x18
++#define PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN__SHIFT 0x1f
++#define PCIE_LC_CNTL6__LC_SPC_MODE_2P5GT_MASK 0x00000003L
++#define PCIE_LC_CNTL6__LC_SPC_MODE_5GT_MASK 0x0000000CL
++#define PCIE_LC_CNTL6__LC_SPC_MODE_8GT_MASK 0x00000030L
++#define PCIE_LC_CNTL6__LC_SPC_MODE_16GT_MASK 0x000000C0L
++#define PCIE_LC_CNTL6__LC_SRIS_EN_MASK 0x00000100L
++#define PCIE_LC_CNTL6__LC_SRNS_SKIP_IN_SRIS_MASK 0x00001E00L
++#define PCIE_LC_CNTL6__LC_SRIS_AUTODETECT_EN_MASK 0x00002000L
++#define PCIE_LC_CNTL6__LC_SRIS_AUTODETECT_FACTOR_MASK 0x0000C000L
++#define PCIE_LC_CNTL6__LC_SRIS_AUTODETECT_MODE_MASK 0x00030000L
++#define PCIE_LC_CNTL6__LC_SRIS_AUTODETECT_OUT_OF_RANGE_MASK 0x00040000L
++#define PCIE_LC_CNTL6__LC_DEFER_SKIP_FOR_EIEOS_EN_MASK 0x00080000L
++#define PCIE_LC_CNTL6__LC_SEND_EIEOS_IN_RCFG_MASK 0x00100000L
++#define PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK 0x00200000L
++#define PCIE_LC_CNTL6__LC_P2_ENTRY_MASK 0x00400000L
++#define PCIE_LC_CNTL6__LC_RXRECOVER_EN_MASK 0x00800000L
++#define PCIE_LC_CNTL6__LC_RXRECOVER_TIMEOUT_MASK 0x7F000000L
++#define PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK 0x80000000L
++//PCIE_LC_CNTL7
++#define PCIE_LC_CNTL7__LC_EXPECTED_TS2_CFG_COMPLETE__SHIFT 0x0
++#define PCIE_LC_CNTL7__LC_IGNORE_NON_CONTIG_SETS_IN_RCFG__SHIFT 0x1
++#define PCIE_LC_CNTL7__LC_ROBUST_TRAINING_BIT_CHK_EN__SHIFT 0x2
++#define PCIE_LC_CNTL7__LC_RESET_TS_COUNT_ON_EI__SHIFT 0x3
++#define PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN__SHIFT 0x4
++#define PCIE_LC_CNTL7__LC_CLEAR_REVERSE_ATTEMPT_IN_L0__SHIFT 0x5
++#define PCIE_LC_CNTL7__LC_LOCK_REVERSAL__SHIFT 0x6
++#define PCIE_LC_CNTL7__LC_FORCE_RX_EQ_IN_PROGRESS__SHIFT 0x7
++#define PCIE_LC_CNTL7__LC_EVER_IDLE_TO_RLOCK__SHIFT 0x8
++#define PCIE_LC_CNTL7__LC_RXEQEVAL_AFTER_TIMEOUT_EN__SHIFT 0x9
++#define PCIE_LC_CNTL7__LC_WAIT_FOR_LANES_IN_CONFIG__SHIFT 0xa
++#define PCIE_LC_CNTL7__LC_REQ_COEFFS_FOR_TXMARGIN_EN__SHIFT 0xb
++#define PCIE_LC_CNTL7__LC_ESM_WAIT_FOR_PLL_INIT_DONE_L1__SHIFT 0xc
++#define PCIE_LC_CNTL7__LC_SCHEDULED_RXEQEVAL_INTERVAL__SHIFT 0xd
++#define PCIE_LC_CNTL7__LC_SCHEDULED_RXEQEVAL_MODE__SHIFT 0x15
++#define PCIE_LC_CNTL7__LC_SCHEDULED_RXEQEVAL_UPCONFIG_EN__SHIFT 0x16
++#define PCIE_LC_CNTL7__LC_LINK_MANAGEMENT_EN__SHIFT 0x17
++#define PCIE_LC_CNTL7__LC_ESM_PLL_INIT_STATE__SHIFT 0x1b
++#define PCIE_LC_CNTL7__LC_ESM_PLL_INIT_DONE__SHIFT 0x1c
++#define PCIE_LC_CNTL7__LC_ESM_REDO_INIT__SHIFT 0x1d
++#define PCIE_LC_CNTL7__LC_MULTIPORT_ESM__SHIFT 0x1e
++#define PCIE_LC_CNTL7__LC_CONSECUTIVE_EIOS_RESET_EN__SHIFT 0x1f
++#define PCIE_LC_CNTL7__LC_EXPECTED_TS2_CFG_COMPLETE_MASK 0x00000001L
++#define PCIE_LC_CNTL7__LC_IGNORE_NON_CONTIG_SETS_IN_RCFG_MASK 0x00000002L
++#define PCIE_LC_CNTL7__LC_ROBUST_TRAINING_BIT_CHK_EN_MASK 0x00000004L
++#define PCIE_LC_CNTL7__LC_RESET_TS_COUNT_ON_EI_MASK 0x00000008L
++#define PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK 0x00000010L
++#define PCIE_LC_CNTL7__LC_CLEAR_REVERSE_ATTEMPT_IN_L0_MASK 0x00000020L
++#define PCIE_LC_CNTL7__LC_LOCK_REVERSAL_MASK 0x00000040L
++#define PCIE_LC_CNTL7__LC_FORCE_RX_EQ_IN_PROGRESS_MASK 0x00000080L
++#define PCIE_LC_CNTL7__LC_EVER_IDLE_TO_RLOCK_MASK 0x00000100L
++#define PCIE_LC_CNTL7__LC_RXEQEVAL_AFTER_TIMEOUT_EN_MASK 0x00000200L
++#define PCIE_LC_CNTL7__LC_WAIT_FOR_LANES_IN_CONFIG_MASK 0x00000400L
++#define PCIE_LC_CNTL7__LC_REQ_COEFFS_FOR_TXMARGIN_EN_MASK 0x00000800L
++#define PCIE_LC_CNTL7__LC_ESM_WAIT_FOR_PLL_INIT_DONE_L1_MASK 0x00001000L
++#define PCIE_LC_CNTL7__LC_SCHEDULED_RXEQEVAL_INTERVAL_MASK 0x001FE000L
++#define PCIE_LC_CNTL7__LC_SCHEDULED_RXEQEVAL_MODE_MASK 0x00200000L
++#define PCIE_LC_CNTL7__LC_SCHEDULED_RXEQEVAL_UPCONFIG_EN_MASK 0x00400000L
++#define PCIE_LC_CNTL7__LC_LINK_MANAGEMENT_EN_MASK 0x00800000L
++#define PCIE_LC_CNTL7__LC_ESM_PLL_INIT_STATE_MASK 0x08000000L
++#define PCIE_LC_CNTL7__LC_ESM_PLL_INIT_DONE_MASK 0x10000000L
++#define PCIE_LC_CNTL7__LC_ESM_REDO_INIT_MASK 0x20000000L
++#define PCIE_LC_CNTL7__LC_MULTIPORT_ESM_MASK 0x40000000L
++#define PCIE_LC_CNTL7__LC_CONSECUTIVE_EIOS_RESET_EN_MASK 0x80000000L
++//PCIE_LINK_MANAGEMENT_STATUS
++#define PCIE_LINK_MANAGEMENT_STATUS__LINK_SPEED_UPDATE__SHIFT 0x0
++#define PCIE_LINK_MANAGEMENT_STATUS__LINK_SPEED_CHANGE_ATTEMPT_FAILED__SHIFT 0x1
++#define PCIE_LINK_MANAGEMENT_STATUS__LINK_PARTNER_SPEED_SUPPORT_UPDATE__SHIFT 0x2
++#define PCIE_LINK_MANAGEMENT_STATUS__LINK_WIDTH_UPDATE__SHIFT 0x3
++#define PCIE_LINK_MANAGEMENT_STATUS__LINK_WIDTH_CHANGE_ATTEMPT_FAILED__SHIFT 0x4
++#define PCIE_LINK_MANAGEMENT_STATUS__LINK_PARTNER_WIDTH_SUPPORT_UPDATE__SHIFT 0x5
++#define PCIE_LINK_MANAGEMENT_STATUS__POWER_DOWN_COMMAND_COMPLETE__SHIFT 0x6
++#define PCIE_LINK_MANAGEMENT_STATUS__BANDWIDTH_UPDATE__SHIFT 0x7
++#define PCIE_LINK_MANAGEMENT_STATUS__LINK_POWER_STATE_CHANGE__SHIFT 0x8
++#define PCIE_LINK_MANAGEMENT_STATUS__BW_REQUIREMENT_HINT__SHIFT 0x9
++#define PCIE_LINK_MANAGEMENT_STATUS__EQUALIZATION_REQUEST__SHIFT 0xa
++#define PCIE_LINK_MANAGEMENT_STATUS__LINK_PARTNER_ESM_REQUEST__SHIFT 0xb
++#define PCIE_LINK_MANAGEMENT_STATUS__LOW_SPEED_REQD_IMMEDIATE__SHIFT 0xc
++#define PCIE_LINK_MANAGEMENT_STATUS__ESTABLISH_ESM_PLL_SETTINGS__SHIFT 0xd
++#define PCIE_LINK_MANAGEMENT_STATUS__LINK_SPEED_UPDATE_MASK 0x00000001L
++#define PCIE_LINK_MANAGEMENT_STATUS__LINK_SPEED_CHANGE_ATTEMPT_FAILED_MASK 0x00000002L
++#define PCIE_LINK_MANAGEMENT_STATUS__LINK_PARTNER_SPEED_SUPPORT_UPDATE_MASK 0x00000004L
++#define PCIE_LINK_MANAGEMENT_STATUS__LINK_WIDTH_UPDATE_MASK 0x00000008L
++#define PCIE_LINK_MANAGEMENT_STATUS__LINK_WIDTH_CHANGE_ATTEMPT_FAILED_MASK 0x00000010L
++#define PCIE_LINK_MANAGEMENT_STATUS__LINK_PARTNER_WIDTH_SUPPORT_UPDATE_MASK 0x00000020L
++#define PCIE_LINK_MANAGEMENT_STATUS__POWER_DOWN_COMMAND_COMPLETE_MASK 0x00000040L
++#define PCIE_LINK_MANAGEMENT_STATUS__BANDWIDTH_UPDATE_MASK 0x00000080L
++#define PCIE_LINK_MANAGEMENT_STATUS__LINK_POWER_STATE_CHANGE_MASK 0x00000100L
++#define PCIE_LINK_MANAGEMENT_STATUS__BW_REQUIREMENT_HINT_MASK 0x00000200L
++#define PCIE_LINK_MANAGEMENT_STATUS__EQUALIZATION_REQUEST_MASK 0x00000400L
++#define PCIE_LINK_MANAGEMENT_STATUS__LINK_PARTNER_ESM_REQUEST_MASK 0x00000800L
++#define PCIE_LINK_MANAGEMENT_STATUS__LOW_SPEED_REQD_IMMEDIATE_MASK 0x00001000L
++#define PCIE_LINK_MANAGEMENT_STATUS__ESTABLISH_ESM_PLL_SETTINGS_MASK 0x00002000L
++//PCIE_LINK_MANAGEMENT_MASK
++#define PCIE_LINK_MANAGEMENT_MASK__LINK_SPEED_UPDATE_MASK__SHIFT 0x0
++#define PCIE_LINK_MANAGEMENT_MASK__LINK_SPEED_CHANGE_ATTEMPT_FAILED_MASK__SHIFT 0x1
++#define PCIE_LINK_MANAGEMENT_MASK__LINK_PARTNER_SPEED_SUPPORT_UPDATE_MASK__SHIFT 0x2
++#define PCIE_LINK_MANAGEMENT_MASK__LINK_WIDTH_UPDATE_MASK__SHIFT 0x3
++#define PCIE_LINK_MANAGEMENT_MASK__LINK_WIDTH_CHANGE_ATTEMPT_FAILED_MASK__SHIFT 0x4
++#define PCIE_LINK_MANAGEMENT_MASK__LINK_PARTNER_WIDTH_SUPPORT_UPDATE_MASK__SHIFT 0x5
++#define PCIE_LINK_MANAGEMENT_MASK__POWER_DOWN_COMMAND_COMPLETE_MASK__SHIFT 0x6
++#define PCIE_LINK_MANAGEMENT_MASK__BANDWIDTH_UPDATE_MASK__SHIFT 0x7
++#define PCIE_LINK_MANAGEMENT_MASK__LINK_POWER_STATE_CHANGE_MASK__SHIFT 0x8
++#define PCIE_LINK_MANAGEMENT_MASK__BW_REQUIREMENT_HINT_MASK__SHIFT 0x9
++#define PCIE_LINK_MANAGEMENT_MASK__EQUALIZATION_REQUEST_MASK__SHIFT 0xa
++#define PCIE_LINK_MANAGEMENT_MASK__LINK_PARTNER_ESM_REQUEST_MASK__SHIFT 0xb
++#define PCIE_LINK_MANAGEMENT_MASK__LOW_SPEED_REQD_IMMEDIATE_MASK__SHIFT 0xc
++#define PCIE_LINK_MANAGEMENT_MASK__ESTABLISH_ESM_PLL_SETTINGS_MASK__SHIFT 0xd
++#define PCIE_LINK_MANAGEMENT_MASK__LINK_SPEED_UPDATE_MASK_MASK 0x00000001L
++#define PCIE_LINK_MANAGEMENT_MASK__LINK_SPEED_CHANGE_ATTEMPT_FAILED_MASK_MASK 0x00000002L
++#define PCIE_LINK_MANAGEMENT_MASK__LINK_PARTNER_SPEED_SUPPORT_UPDATE_MASK_MASK 0x00000004L
++#define PCIE_LINK_MANAGEMENT_MASK__LINK_WIDTH_UPDATE_MASK_MASK 0x00000008L
++#define PCIE_LINK_MANAGEMENT_MASK__LINK_WIDTH_CHANGE_ATTEMPT_FAILED_MASK_MASK 0x00000010L
++#define PCIE_LINK_MANAGEMENT_MASK__LINK_PARTNER_WIDTH_SUPPORT_UPDATE_MASK_MASK 0x00000020L
++#define PCIE_LINK_MANAGEMENT_MASK__POWER_DOWN_COMMAND_COMPLETE_MASK_MASK 0x00000040L
++#define PCIE_LINK_MANAGEMENT_MASK__BANDWIDTH_UPDATE_MASK_MASK 0x00000080L
++#define PCIE_LINK_MANAGEMENT_MASK__LINK_POWER_STATE_CHANGE_MASK_MASK 0x00000100L
++#define PCIE_LINK_MANAGEMENT_MASK__BW_REQUIREMENT_HINT_MASK_MASK 0x00000200L
++#define PCIE_LINK_MANAGEMENT_MASK__EQUALIZATION_REQUEST_MASK_MASK 0x00000400L
++#define PCIE_LINK_MANAGEMENT_MASK__LINK_PARTNER_ESM_REQUEST_MASK_MASK 0x00000800L
++#define PCIE_LINK_MANAGEMENT_MASK__LOW_SPEED_REQD_IMMEDIATE_MASK_MASK 0x00001000L
++#define PCIE_LINK_MANAGEMENT_MASK__ESTABLISH_ESM_PLL_SETTINGS_MASK_MASK 0x00002000L
++//PCIE_LINK_MANAGEMENT_CNTL
++#define PCIE_LINK_MANAGEMENT_CNTL__FAR_END_WIDTH_SUPPORT__SHIFT 0x0
++#define PCIE_LINK_MANAGEMENT_CNTL__LINK_POWER_STATE__SHIFT 0x3
++#define PCIE_LINK_MANAGEMENT_CNTL__LINK_POWER_STATE_MASK__SHIFT 0x7
++#define PCIE_LINK_MANAGEMENT_CNTL__LINK_UP__SHIFT 0xb
++#define PCIE_LINK_MANAGEMENT_CNTL__PORT_POWERED_DOWN__SHIFT 0xc
++#define PCIE_LINK_MANAGEMENT_CNTL__SPC_MODE__SHIFT 0xd
++#define PCIE_LINK_MANAGEMENT_CNTL__CLOCK_RATE__SHIFT 0xf
++#define PCIE_LINK_MANAGEMENT_CNTL__LOW_BW_HINT__SHIFT 0x11
++#define PCIE_LINK_MANAGEMENT_CNTL__HIGH_BW_HINT__SHIFT 0x12
++#define PCIE_LINK_MANAGEMENT_CNTL__LOW_BW_THRESHOLD__SHIFT 0x13
++#define PCIE_LINK_MANAGEMENT_CNTL__HIGH_BW_THRESHOLD__SHIFT 0x17
++#define PCIE_LINK_MANAGEMENT_CNTL__BW_HINT_COUNT__SHIFT 0x1b
++#define PCIE_LINK_MANAGEMENT_CNTL__EQ_REQ_RCVD_8GT__SHIFT 0x1e
++#define PCIE_LINK_MANAGEMENT_CNTL__EQ_REQ_RCVD_16GT__SHIFT 0x1f
++#define PCIE_LINK_MANAGEMENT_CNTL__FAR_END_WIDTH_SUPPORT_MASK 0x00000007L
++#define PCIE_LINK_MANAGEMENT_CNTL__LINK_POWER_STATE_MASK 0x00000078L
++#define PCIE_LINK_MANAGEMENT_CNTL__LINK_POWER_STATE_MASK_MASK 0x00000780L
++#define PCIE_LINK_MANAGEMENT_CNTL__LINK_UP_MASK 0x00000800L
++#define PCIE_LINK_MANAGEMENT_CNTL__PORT_POWERED_DOWN_MASK 0x00001000L
++#define PCIE_LINK_MANAGEMENT_CNTL__SPC_MODE_MASK 0x00006000L
++#define PCIE_LINK_MANAGEMENT_CNTL__CLOCK_RATE_MASK 0x00018000L
++#define PCIE_LINK_MANAGEMENT_CNTL__LOW_BW_HINT_MASK 0x00020000L
++#define PCIE_LINK_MANAGEMENT_CNTL__HIGH_BW_HINT_MASK 0x00040000L
++#define PCIE_LINK_MANAGEMENT_CNTL__LOW_BW_THRESHOLD_MASK 0x00780000L
++#define PCIE_LINK_MANAGEMENT_CNTL__HIGH_BW_THRESHOLD_MASK 0x07800000L
++#define PCIE_LINK_MANAGEMENT_CNTL__BW_HINT_COUNT_MASK 0x38000000L
++#define PCIE_LINK_MANAGEMENT_CNTL__EQ_REQ_RCVD_8GT_MASK 0x40000000L
++#define PCIE_LINK_MANAGEMENT_CNTL__EQ_REQ_RCVD_16GT_MASK 0x80000000L
++//PCIE_LC_L1_PM_SUBSTATE
++#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN__SHIFT 0x0
++#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE__SHIFT 0x1
++#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE__SHIFT 0x2
++#define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE__SHIFT 0x3
++#define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE__SHIFT 0x4
++#define PCIE_LC_L1_PM_SUBSTATE__LC_CLKREQ_FILTER_EN__SHIFT 0x5
++#define PCIE_LC_L1_PM_SUBSTATE__LC_T_POWER_ON_SCALE__SHIFT 0x6
++#define PCIE_LC_L1_PM_SUBSTATE__LC_T_POWER_ON_VALUE__SHIFT 0x8
++#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_1_POWERDOWN__SHIFT 0x10
++#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_2_POWERDOWN__SHIFT 0x14
++#define PCIE_LC_L1_PM_SUBSTATE__LC_DEFER_L1_2_EXIT__SHIFT 0x17
++#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L
++#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L
++#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK 0x00000004L
++#define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK 0x00000008L
++#define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK 0x00000010L
++#define PCIE_LC_L1_PM_SUBSTATE__LC_CLKREQ_FILTER_EN_MASK 0x00000020L
++#define PCIE_LC_L1_PM_SUBSTATE__LC_T_POWER_ON_SCALE_MASK 0x000000C0L
++#define PCIE_LC_L1_PM_SUBSTATE__LC_T_POWER_ON_VALUE_MASK 0x00001F00L
++#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_1_POWERDOWN_MASK 0x00070000L
++#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_2_POWERDOWN_MASK 0x00700000L
++#define PCIE_LC_L1_PM_SUBSTATE__LC_DEFER_L1_2_EXIT_MASK 0x03800000L
++//PCIE_LC_L1_PM_SUBSTATE2
++#define PCIE_LC_L1_PM_SUBSTATE2__LC_CM_RESTORE_TIME__SHIFT 0x0
++#define PCIE_LC_L1_PM_SUBSTATE2__LC_LTR_THRESHOLD_SCALE__SHIFT 0x8
++#define PCIE_LC_L1_PM_SUBSTATE2__LC_LTR_THRESHOLD_VALUE__SHIFT 0x10
++#define PCIE_LC_L1_PM_SUBSTATE2__LC_CM_RESTORE_TIME_MASK 0x000000FFL
++#define PCIE_LC_L1_PM_SUBSTATE2__LC_LTR_THRESHOLD_SCALE_MASK 0x00000700L
++#define PCIE_LC_L1_PM_SUBSTATE2__LC_LTR_THRESHOLD_VALUE_MASK 0x03FF0000L
++//PCIE_LC_PORT_ORDER
++#define PCIE_LC_PORT_ORDER__LC_PORT_OFFSET__SHIFT 0x0
++#define PCIE_LC_PORT_ORDER__LC_PORT_OFFSET_MASK 0x0000000FL
++//PCIEP_BCH_ECC_CNTL
++#define PCIEP_BCH_ECC_CNTL__STRAP_BCH_ECC_EN__SHIFT 0x0
++#define PCIEP_BCH_ECC_CNTL__BCH_ECC_ERROR_THRESHOLD__SHIFT 0x8
++#define PCIEP_BCH_ECC_CNTL__BCH_ECC_ERROR_STATUS__SHIFT 0x10
++#define PCIEP_BCH_ECC_CNTL__STRAP_BCH_ECC_EN_MASK 0x00000001L
++#define PCIEP_BCH_ECC_CNTL__BCH_ECC_ERROR_THRESHOLD_MASK 0x0000FF00L
++#define PCIEP_BCH_ECC_CNTL__BCH_ECC_ERROR_STATUS_MASK 0xFFFF0000L
++//PCIE_LC_CNTL8
++#define PCIE_LC_CNTL8__LC_EQ_SEARCH_MODE_16GT__SHIFT 0x0
++#define PCIE_LC_CNTL8__LC_BYPASS_EQ_16GT__SHIFT 0x2
++#define PCIE_LC_CNTL8__LC_BYPASS_EQ_PRESET_16GT__SHIFT 0x3
++#define PCIE_LC_CNTL8__LC_REDO_EQ_16GT__SHIFT 0x7
++#define PCIE_LC_CNTL8__LC_USC_EQ_NOT_REQD_16GT__SHIFT 0x8
++#define PCIE_LC_CNTL8__LC_USC_GO_TO_EQ_16GT__SHIFT 0x9
++#define PCIE_LC_CNTL8__LC_UNEXPECTED_COEFFS_RCVD_16GT__SHIFT 0xa
++#define PCIE_LC_CNTL8__LC_BYPASS_EQ_REQ_PHASE_16GT__SHIFT 0xb
++#define PCIE_LC_CNTL8__LC_FORCE_PRESET_IN_EQ_REQ_PHASE_16GT__SHIFT 0xc
++#define PCIE_LC_CNTL8__LC_FORCE_PRESET_VALUE_16GT__SHIFT 0xd
++#define PCIE_LC_CNTL8__LC_EQTS2_PRESET_EN__SHIFT 0x11
++#define PCIE_LC_CNTL8__LC_EQTS2_PRESET__SHIFT 0x12
++#define PCIE_LC_CNTL8__LC_USE_EQTS2_PRESET__SHIFT 0x16
++#define PCIE_LC_CNTL8__LC_FOM_TIME__SHIFT 0x17
++#define PCIE_LC_CNTL8__LC_SAFE_EQ_SEARCH__SHIFT 0x19
++#define PCIE_LC_CNTL8__LC_DONT_CHECK_EQTS_IN_RCFG__SHIFT 0x1a
++#define PCIE_LC_CNTL8__LC_DELAY_COEFF_UPDATE_DIS__SHIFT 0x1b
++#define PCIE_LC_CNTL8__LC_8GT_EQ_REDO_EN__SHIFT 0x1c
++#define PCIE_LC_CNTL8__LC_WAIT_FOR_EIEOS_IN_RLOCK__SHIFT 0x1d
++#define PCIE_LC_CNTL8__LC_DYNAMIC_INACTIVE_TS_SELECT__SHIFT 0x1e
++#define PCIE_LC_CNTL8__LC_EQ_SEARCH_MODE_16GT_MASK 0x00000003L
++#define PCIE_LC_CNTL8__LC_BYPASS_EQ_16GT_MASK 0x00000004L
++#define PCIE_LC_CNTL8__LC_BYPASS_EQ_PRESET_16GT_MASK 0x00000078L
++#define PCIE_LC_CNTL8__LC_REDO_EQ_16GT_MASK 0x00000080L
++#define PCIE_LC_CNTL8__LC_USC_EQ_NOT_REQD_16GT_MASK 0x00000100L
++#define PCIE_LC_CNTL8__LC_USC_GO_TO_EQ_16GT_MASK 0x00000200L
++#define PCIE_LC_CNTL8__LC_UNEXPECTED_COEFFS_RCVD_16GT_MASK 0x00000400L
++#define PCIE_LC_CNTL8__LC_BYPASS_EQ_REQ_PHASE_16GT_MASK 0x00000800L
++#define PCIE_LC_CNTL8__LC_FORCE_PRESET_IN_EQ_REQ_PHASE_16GT_MASK 0x00001000L
++#define PCIE_LC_CNTL8__LC_FORCE_PRESET_VALUE_16GT_MASK 0x0001E000L
++#define PCIE_LC_CNTL8__LC_EQTS2_PRESET_EN_MASK 0x00020000L
++#define PCIE_LC_CNTL8__LC_EQTS2_PRESET_MASK 0x003C0000L
++#define PCIE_LC_CNTL8__LC_USE_EQTS2_PRESET_MASK 0x00400000L
++#define PCIE_LC_CNTL8__LC_FOM_TIME_MASK 0x01800000L
++#define PCIE_LC_CNTL8__LC_SAFE_EQ_SEARCH_MASK 0x02000000L
++#define PCIE_LC_CNTL8__LC_DONT_CHECK_EQTS_IN_RCFG_MASK 0x04000000L
++#define PCIE_LC_CNTL8__LC_DELAY_COEFF_UPDATE_DIS_MASK 0x08000000L
++#define PCIE_LC_CNTL8__LC_8GT_EQ_REDO_EN_MASK 0x10000000L
++#define PCIE_LC_CNTL8__LC_WAIT_FOR_EIEOS_IN_RLOCK_MASK 0x20000000L
++#define PCIE_LC_CNTL8__LC_DYNAMIC_INACTIVE_TS_SELECT_MASK 0xC0000000L
++//PCIE_LC_CNTL9
++#define PCIE_LC_CNTL9__LC_OVERRIDE_RETIMER_PRESENCE_EN__SHIFT 0x0
++#define PCIE_LC_CNTL9__LC_OVERRIDE_RETIMER_PRESENCE__SHIFT 0x1
++#define PCIE_LC_CNTL9__LC_IGNORE_RETIMER_PRESENCE__SHIFT 0x3
++#define PCIE_LC_CNTL9__LC_RETIMER_PRESENCE__SHIFT 0x4
++#define PCIE_LC_CNTL9__LC_LOCK_IN_EQ_RESPONSE__SHIFT 0xd
++#define PCIE_LC_CNTL9__LC_USC_ACCEPTABLE_PRESETS__SHIFT 0xe
++#define PCIE_LC_CNTL9__LC_DSC_ACCEPT_8GT_EQ_REDO__SHIFT 0x18
++#define PCIE_LC_CNTL9__LC_DSC_ACCEPT_16GT_EQ_REDO__SHIFT 0x19
++#define PCIE_LC_CNTL9__LC_USC_HW_8GT_EQ_REDO_EN__SHIFT 0x1a
++#define PCIE_LC_CNTL9__LC_USC_HW_16GT_EQ_REDO_EN__SHIFT 0x1b
++#define PCIE_LC_CNTL9__LC_DELAY_DETECTED_TSX_RCV_EN__SHIFT 0x1c
++#define PCIE_LC_CNTL9__LC_OVERRIDE_RETIMER_PRESENCE_EN_MASK 0x00000001L
++#define PCIE_LC_CNTL9__LC_OVERRIDE_RETIMER_PRESENCE_MASK 0x00000006L
++#define PCIE_LC_CNTL9__LC_IGNORE_RETIMER_PRESENCE_MASK 0x00000008L
++#define PCIE_LC_CNTL9__LC_RETIMER_PRESENCE_MASK 0x00000030L
++#define PCIE_LC_CNTL9__LC_LOCK_IN_EQ_RESPONSE_MASK 0x00002000L
++#define PCIE_LC_CNTL9__LC_USC_ACCEPTABLE_PRESETS_MASK 0x00FFC000L
++#define PCIE_LC_CNTL9__LC_DSC_ACCEPT_8GT_EQ_REDO_MASK 0x01000000L
++#define PCIE_LC_CNTL9__LC_DSC_ACCEPT_16GT_EQ_REDO_MASK 0x02000000L
++#define PCIE_LC_CNTL9__LC_USC_HW_8GT_EQ_REDO_EN_MASK 0x04000000L
++#define PCIE_LC_CNTL9__LC_USC_HW_16GT_EQ_REDO_EN_MASK 0x08000000L
++#define PCIE_LC_CNTL9__LC_DELAY_DETECTED_TSX_RCV_EN_MASK 0x10000000L
++//PCIE_LC_FORCE_COEFF2
++#define PCIE_LC_FORCE_COEFF2__LC_FORCE_COEFF_16GT__SHIFT 0x0
++#define PCIE_LC_FORCE_COEFF2__LC_FORCE_PRE_CURSOR_16GT__SHIFT 0x1
++#define PCIE_LC_FORCE_COEFF2__LC_FORCE_CURSOR_16GT__SHIFT 0x7
++#define PCIE_LC_FORCE_COEFF2__LC_FORCE_POST_CURSOR_16GT__SHIFT 0xd
++#define PCIE_LC_FORCE_COEFF2__LC_3X3_COEFF_SEARCH_EN_16GT__SHIFT 0x13
++#define PCIE_LC_FORCE_COEFF2__LC_FORCE_COEFF_16GT_MASK 0x00000001L
++#define PCIE_LC_FORCE_COEFF2__LC_FORCE_PRE_CURSOR_16GT_MASK 0x0000007EL
++#define PCIE_LC_FORCE_COEFF2__LC_FORCE_CURSOR_16GT_MASK 0x00001F80L
++#define PCIE_LC_FORCE_COEFF2__LC_FORCE_POST_CURSOR_16GT_MASK 0x0007E000L
++#define PCIE_LC_FORCE_COEFF2__LC_3X3_COEFF_SEARCH_EN_16GT_MASK 0x00080000L
++//PCIE_LC_FORCE_EQ_REQ_COEFF2
++#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FORCE_COEFF_IN_EQ_REQ_PHASE_16GT__SHIFT 0x0
++#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FORCE_PRE_CURSOR_REQ_16GT__SHIFT 0x1
++#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FORCE_CURSOR_REQ_16GT__SHIFT 0x7
++#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FORCE_POST_CURSOR_REQ_16GT__SHIFT 0xd
++#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FS_OTHER_END_16GT__SHIFT 0x13
++#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_LF_OTHER_END_16GT__SHIFT 0x19
++#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FORCE_COEFF_IN_EQ_REQ_PHASE_16GT_MASK 0x00000001L
++#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FORCE_PRE_CURSOR_REQ_16GT_MASK 0x0000007EL
++#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FORCE_CURSOR_REQ_16GT_MASK 0x00001F80L
++#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FORCE_POST_CURSOR_REQ_16GT_MASK 0x0007E000L
++#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FS_OTHER_END_16GT_MASK 0x01F80000L
++#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_LF_OTHER_END_16GT_MASK 0x7E000000L
++
++
++// addressBlock: nbio_pcie0_pciedir
++//PCIE_RESERVED
++#define PCIE_RESERVED__RESERVED__SHIFT 0x0
++#define PCIE_RESERVED__RESERVED_MASK 0xFFFFFFFFL
++//PCIE_SCRATCH
++#define PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0
++#define PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL
++//PCIE_RX_NUM_NAK
++#define PCIE_RX_NUM_NAK__RX_NUM_NAK__SHIFT 0x0
++#define PCIE_RX_NUM_NAK__RX_NUM_NAK_MASK 0xFFFFFFFFL
++//PCIE_RX_NUM_NAK_GENERATED
++#define PCIE_RX_NUM_NAK_GENERATED__RX_NUM_NAK_GENERATED__SHIFT 0x0
++#define PCIE_RX_NUM_NAK_GENERATED__RX_NUM_NAK_GENERATED_MASK 0xFFFFFFFFL
++//PCIE_CNTL
++#define PCIE_CNTL__HWINIT_WR_LOCK__SHIFT 0x0
++#define PCIE_CNTL__LC_HOT_PLUG_DELAY_SEL__SHIFT 0x1
++#define PCIE_CNTL__UR_ERR_REPORT_DIS__SHIFT 0x7
++#define PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS__SHIFT 0x8
++#define PCIE_CNTL__PCIE_HT_NP_MEM_WRITE__SHIFT 0x9
++#define PCIE_CNTL__RX_SB_ADJ_PAYLOAD_SIZE__SHIFT 0xa
++#define PCIE_CNTL__RX_RCB_ATS_UC_DIS__SHIFT 0xf
++#define PCIE_CNTL__RX_RCB_REORDER_EN__SHIFT 0x10
++#define PCIE_CNTL__RX_RCB_INVALID_SIZE_DIS__SHIFT 0x11
++#define PCIE_CNTL__RX_RCB_UNEXP_CPL_DIS__SHIFT 0x12
++#define PCIE_CNTL__RX_RCB_CPL_TIMEOUT_TEST_MODE__SHIFT 0x13
++#define PCIE_CNTL__RX_RCB_WRONG_PREFIX_DIS__SHIFT 0x14
++#define PCIE_CNTL__RX_RCB_WRONG_ATTR_DIS__SHIFT 0x15
++#define PCIE_CNTL__RX_RCB_WRONG_FUNCNUM_DIS__SHIFT 0x16
++#define PCIE_CNTL__RX_ATS_TRAN_CPL_SPLIT_DIS__SHIFT 0x17
++#define PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e
++#define PCIE_CNTL__RX_CPL_POSTED_REQ_ORD_EN__SHIFT 0x1f
++#define PCIE_CNTL__HWINIT_WR_LOCK_MASK 0x00000001L
++#define PCIE_CNTL__LC_HOT_PLUG_DELAY_SEL_MASK 0x0000000EL
++#define PCIE_CNTL__UR_ERR_REPORT_DIS_MASK 0x00000080L
++#define PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS_MASK 0x00000100L
++#define PCIE_CNTL__PCIE_HT_NP_MEM_WRITE_MASK 0x00000200L
++#define PCIE_CNTL__RX_SB_ADJ_PAYLOAD_SIZE_MASK 0x00001C00L
++#define PCIE_CNTL__RX_RCB_ATS_UC_DIS_MASK 0x00008000L
++#define PCIE_CNTL__RX_RCB_REORDER_EN_MASK 0x00010000L
++#define PCIE_CNTL__RX_RCB_INVALID_SIZE_DIS_MASK 0x00020000L
++#define PCIE_CNTL__RX_RCB_UNEXP_CPL_DIS_MASK 0x00040000L
++#define PCIE_CNTL__RX_RCB_CPL_TIMEOUT_TEST_MODE_MASK 0x00080000L
++#define PCIE_CNTL__RX_RCB_WRONG_PREFIX_DIS_MASK 0x00100000L
++#define PCIE_CNTL__RX_RCB_WRONG_ATTR_DIS_MASK 0x00200000L
++#define PCIE_CNTL__RX_RCB_WRONG_FUNCNUM_DIS_MASK 0x00400000L
++#define PCIE_CNTL__RX_ATS_TRAN_CPL_SPLIT_DIS_MASK 0x00800000L
++#define PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L
++#define PCIE_CNTL__RX_CPL_POSTED_REQ_ORD_EN_MASK 0x80000000L
++//PCIE_CONFIG_CNTL
++#define PCIE_CONFIG_CNTL__DYN_CLK_LATENCY__SHIFT 0x0
++#define PCIE_CONFIG_CNTL__CI_SWUS_MAX_PAYLOAD_SIZE_MODE__SHIFT 0x8
++#define PCIE_CONFIG_CNTL__CI_SWUS_PRIV_MAX_PAYLOAD_SIZE__SHIFT 0x9
++#define PCIE_CONFIG_CNTL__CI_10BIT_TAG_EN_OVERRIDE__SHIFT 0xb
++#define PCIE_CONFIG_CNTL__CI_SWUS_10BIT_TAG_EN_OVERRIDE__SHIFT 0xd
++#define PCIE_CONFIG_CNTL__CI_MAX_PAYLOAD_SIZE_MODE__SHIFT 0x10
++#define PCIE_CONFIG_CNTL__CI_PRIV_MAX_PAYLOAD_SIZE__SHIFT 0x11
++#define PCIE_CONFIG_CNTL__CI_MAX_READ_REQUEST_SIZE_MODE__SHIFT 0x14
++#define PCIE_CONFIG_CNTL__CI_PRIV_MAX_READ_REQUEST_SIZE__SHIFT 0x15
++#define PCIE_CONFIG_CNTL__CI_MAX_READ_SAFE_MODE__SHIFT 0x18
++#define PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE__SHIFT 0x19
++#define PCIE_CONFIG_CNTL__CI_SWUS_MAX_READ_REQUEST_SIZE_MODE__SHIFT 0x1b
++#define PCIE_CONFIG_CNTL__CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV__SHIFT 0x1c
++#define PCIE_CONFIG_CNTL__CI_SWUS_EXTENDED_TAG_EN_OVERRIDE__SHIFT 0x1e
++#define PCIE_CONFIG_CNTL__DYN_CLK_LATENCY_MASK 0x0000000FL
++#define PCIE_CONFIG_CNTL__CI_SWUS_MAX_PAYLOAD_SIZE_MODE_MASK 0x00000100L
++#define PCIE_CONFIG_CNTL__CI_SWUS_PRIV_MAX_PAYLOAD_SIZE_MASK 0x00000600L
++#define PCIE_CONFIG_CNTL__CI_10BIT_TAG_EN_OVERRIDE_MASK 0x00001800L
++#define PCIE_CONFIG_CNTL__CI_SWUS_10BIT_TAG_EN_OVERRIDE_MASK 0x00006000L
++#define PCIE_CONFIG_CNTL__CI_MAX_PAYLOAD_SIZE_MODE_MASK 0x00010000L
++#define PCIE_CONFIG_CNTL__CI_PRIV_MAX_PAYLOAD_SIZE_MASK 0x000E0000L
++#define PCIE_CONFIG_CNTL__CI_MAX_READ_REQUEST_SIZE_MODE_MASK 0x00100000L
++#define PCIE_CONFIG_CNTL__CI_PRIV_MAX_READ_REQUEST_SIZE_MASK 0x00E00000L
++#define PCIE_CONFIG_CNTL__CI_MAX_READ_SAFE_MODE_MASK 0x01000000L
++#define PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE_MASK 0x06000000L
++#define PCIE_CONFIG_CNTL__CI_SWUS_MAX_READ_REQUEST_SIZE_MODE_MASK 0x08000000L
++#define PCIE_CONFIG_CNTL__CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV_MASK 0x30000000L
++#define PCIE_CONFIG_CNTL__CI_SWUS_EXTENDED_TAG_EN_OVERRIDE_MASK 0xC0000000L
++//PCIE_TX_TRACKING_ADDR_LO
++#define PCIE_TX_TRACKING_ADDR_LO__TX_TRACKING_ADDR_LO__SHIFT 0x2
++#define PCIE_TX_TRACKING_ADDR_LO__TX_TRACKING_ADDR_LO_MASK 0xFFFFFFFCL
++//PCIE_TX_TRACKING_ADDR_HI
++#define PCIE_TX_TRACKING_ADDR_HI__TX_TRACKING_ADDR_HI__SHIFT 0x0
++#define PCIE_TX_TRACKING_ADDR_HI__TX_TRACKING_ADDR_HI_MASK 0xFFFFFFFFL
++//PCIE_TX_TRACKING_CTRL_STATUS
++#define PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_ENABLE__SHIFT 0x0
++#define PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_PORT__SHIFT 0x1
++#define PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_UNIT_ID__SHIFT 0x8
++#define PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_STATUS_VALID__SHIFT 0xf
++#define PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_ENABLE_MASK 0x00000001L
++#define PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_PORT_MASK 0x0000000EL
++#define PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_UNIT_ID_MASK 0x00007F00L
++#define PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_STATUS_VALID_MASK 0x00008000L
++//PCIE_BW_BY_UNITID
++#define PCIE_BW_BY_UNITID__CI_MST_PERF_UNITID_EN__SHIFT 0x0
++#define PCIE_BW_BY_UNITID__CI_MST_PERF_UNITID__SHIFT 0x8
++#define PCIE_BW_BY_UNITID__CI_MST_PERF_UNITID_EN_MASK 0x00000001L
++#define PCIE_BW_BY_UNITID__CI_MST_PERF_UNITID_MASK 0x00007F00L
++//PCIE_CNTL2
++#define PCIE_CNTL2__TX_ARB_ROUND_ROBIN_EN__SHIFT 0x0
++#define PCIE_CNTL2__TX_ARB_SLV_LIMIT__SHIFT 0x1
++#define PCIE_CNTL2__TX_ARB_MST_LIMIT__SHIFT 0x6
++#define PCIE_CNTL2__TX_BLOCK_TLP_ON_PM_DIS__SHIFT 0xb
++#define PCIE_CNTL2__TX_NP_MEM_WRITE_SWP_ENCODING__SHIFT 0xc
++#define PCIE_CNTL2__TX_ATOMIC_OPS_DISABLE__SHIFT 0xd
++#define PCIE_CNTL2__TX_ATOMIC_ORDERING_DIS__SHIFT 0xe
++#define PCIE_CNTL2__SLV_MEM_LS_EN__SHIFT 0x10
++#define PCIE_CNTL2__SLV_MEM_AGGRESSIVE_LS_EN__SHIFT 0x11
++#define PCIE_CNTL2__MST_MEM_LS_EN__SHIFT 0x12
++#define PCIE_CNTL2__REPLAY_MEM_LS_EN__SHIFT 0x13
++#define PCIE_CNTL2__SLV_MEM_SD_EN__SHIFT 0x14
++#define PCIE_CNTL2__SLV_MEM_AGGRESSIVE_SD_EN__SHIFT 0x15
++#define PCIE_CNTL2__MST_MEM_SD_EN__SHIFT 0x16
++#define PCIE_CNTL2__REPLAY_MEM_SD_EN__SHIFT 0x17
++#define PCIE_CNTL2__RX_NP_MEM_WRITE_ENCODING__SHIFT 0x18
++#define PCIE_CNTL2__SLV_MEM_DS_EN__SHIFT 0x1d
++#define PCIE_CNTL2__MST_MEM_DS_EN__SHIFT 0x1e
++#define PCIE_CNTL2__REPLAY_MEM_DS_EN__SHIFT 0x1f
++#define PCIE_CNTL2__TX_ARB_ROUND_ROBIN_EN_MASK 0x00000001L
++#define PCIE_CNTL2__TX_ARB_SLV_LIMIT_MASK 0x0000003EL
++#define PCIE_CNTL2__TX_ARB_MST_LIMIT_MASK 0x000007C0L
++#define PCIE_CNTL2__TX_BLOCK_TLP_ON_PM_DIS_MASK 0x00000800L
++#define PCIE_CNTL2__TX_NP_MEM_WRITE_SWP_ENCODING_MASK 0x00001000L
++#define PCIE_CNTL2__TX_ATOMIC_OPS_DISABLE_MASK 0x00002000L
++#define PCIE_CNTL2__TX_ATOMIC_ORDERING_DIS_MASK 0x00004000L
++#define PCIE_CNTL2__SLV_MEM_LS_EN_MASK 0x00010000L
++#define PCIE_CNTL2__SLV_MEM_AGGRESSIVE_LS_EN_MASK 0x00020000L
++#define PCIE_CNTL2__MST_MEM_LS_EN_MASK 0x00040000L
++#define PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK 0x00080000L
++#define PCIE_CNTL2__SLV_MEM_SD_EN_MASK 0x00100000L
++#define PCIE_CNTL2__SLV_MEM_AGGRESSIVE_SD_EN_MASK 0x00200000L
++#define PCIE_CNTL2__MST_MEM_SD_EN_MASK 0x00400000L
++#define PCIE_CNTL2__REPLAY_MEM_SD_EN_MASK 0x00800000L
++#define PCIE_CNTL2__RX_NP_MEM_WRITE_ENCODING_MASK 0x1F000000L
++#define PCIE_CNTL2__SLV_MEM_DS_EN_MASK 0x20000000L
++#define PCIE_CNTL2__MST_MEM_DS_EN_MASK 0x40000000L
++#define PCIE_CNTL2__REPLAY_MEM_DS_EN_MASK 0x80000000L
++//PCIE_RX_CNTL2
++#define PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR__SHIFT 0x0
++#define PCIE_RX_CNTL2__RX_IGNORE_EP_TRANSMRD_UR__SHIFT 0x1
++#define PCIE_RX_CNTL2__RX_IGNORE_EP_TRANSMWR_UR__SHIFT 0x2
++#define PCIE_RX_CNTL2__RX_IGNORE_EP_ATSTRANSREQ_UR__SHIFT 0x3
++#define PCIE_RX_CNTL2__RX_IGNORE_EP_PAGEREQMSG_UR__SHIFT 0x4
++#define PCIE_RX_CNTL2__RX_IGNORE_EP_INVCPL_UR__SHIFT 0x5
++#define PCIE_RX_CNTL2__RX_RCB_LATENCY_EN__SHIFT 0x8
++#define PCIE_RX_CNTL2__RX_RCB_LATENCY_SCALE__SHIFT 0x9
++#define PCIE_RX_CNTL2__SLVCPL_MEM_LS_EN__SHIFT 0xc
++#define PCIE_RX_CNTL2__SLVCPL_MEM_SD_EN__SHIFT 0xd
++#define PCIE_RX_CNTL2__SLVCPL_MEM_DS_EN__SHIFT 0xe
++#define PCIE_RX_CNTL2__RX_RCB_LATENCY_MAX_COUNT__SHIFT 0x10
++#define PCIE_RX_CNTL2__FLR_EXTEND_MODE__SHIFT 0x1c
++#define PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR_MASK 0x00000001L
++#define PCIE_RX_CNTL2__RX_IGNORE_EP_TRANSMRD_UR_MASK 0x00000002L
++#define PCIE_RX_CNTL2__RX_IGNORE_EP_TRANSMWR_UR_MASK 0x00000004L
++#define PCIE_RX_CNTL2__RX_IGNORE_EP_ATSTRANSREQ_UR_MASK 0x00000008L
++#define PCIE_RX_CNTL2__RX_IGNORE_EP_PAGEREQMSG_UR_MASK 0x00000010L
++#define PCIE_RX_CNTL2__RX_IGNORE_EP_INVCPL_UR_MASK 0x00000020L
++#define PCIE_RX_CNTL2__RX_RCB_LATENCY_EN_MASK 0x00000100L
++#define PCIE_RX_CNTL2__RX_RCB_LATENCY_SCALE_MASK 0x00000E00L
++#define PCIE_RX_CNTL2__SLVCPL_MEM_LS_EN_MASK 0x00001000L
++#define PCIE_RX_CNTL2__SLVCPL_MEM_SD_EN_MASK 0x00002000L
++#define PCIE_RX_CNTL2__SLVCPL_MEM_DS_EN_MASK 0x00004000L
++#define PCIE_RX_CNTL2__RX_RCB_LATENCY_MAX_COUNT_MASK 0x03FF0000L
++#define PCIE_RX_CNTL2__FLR_EXTEND_MODE_MASK 0x70000000L
++//PCIE_TX_F0_ATTR_CNTL
++#define PCIE_TX_F0_ATTR_CNTL__TX_F0_IDO_OVERRIDE_P__SHIFT 0x0
++#define PCIE_TX_F0_ATTR_CNTL__TX_F0_IDO_OVERRIDE_NP__SHIFT 0x2
++#define PCIE_TX_F0_ATTR_CNTL__TX_F0_IDO_OVERRIDE_CPL__SHIFT 0x4
++#define PCIE_TX_F0_ATTR_CNTL__TX_F0_RO_OVERRIDE_P__SHIFT 0x6
++#define PCIE_TX_F0_ATTR_CNTL__TX_F0_RO_OVERRIDE_NP__SHIFT 0x8
++#define PCIE_TX_F0_ATTR_CNTL__TX_F0_SNR_OVERRIDE_P__SHIFT 0xa
++#define PCIE_TX_F0_ATTR_CNTL__TX_F0_SNR_OVERRIDE_NP__SHIFT 0xc
++#define PCIE_TX_F0_ATTR_CNTL__TX_F0_IDO_OVERRIDE_P_MASK 0x00000003L
++#define PCIE_TX_F0_ATTR_CNTL__TX_F0_IDO_OVERRIDE_NP_MASK 0x0000000CL
++#define PCIE_TX_F0_ATTR_CNTL__TX_F0_IDO_OVERRIDE_CPL_MASK 0x00000030L
++#define PCIE_TX_F0_ATTR_CNTL__TX_F0_RO_OVERRIDE_P_MASK 0x000000C0L
++#define PCIE_TX_F0_ATTR_CNTL__TX_F0_RO_OVERRIDE_NP_MASK 0x00000300L
++#define PCIE_TX_F0_ATTR_CNTL__TX_F0_SNR_OVERRIDE_P_MASK 0x00000C00L
++#define PCIE_TX_F0_ATTR_CNTL__TX_F0_SNR_OVERRIDE_NP_MASK 0x00003000L
++//PCIE_TX_SWUS_ATTR_CNTL
++#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_IDO_OVERRIDE_P__SHIFT 0x0
++#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_IDO_OVERRIDE_NP__SHIFT 0x2
++#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_IDO_OVERRIDE_CPL__SHIFT 0x4
++#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_RO_OVERRIDE_P__SHIFT 0x6
++#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_RO_OVERRIDE_NP__SHIFT 0x8
++#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_SNR_OVERRIDE_P__SHIFT 0xa
++#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_SNR_OVERRIDE_NP__SHIFT 0xc
++#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_IDO_OVERRIDE_P_MASK 0x00000003L
++#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_IDO_OVERRIDE_NP_MASK 0x0000000CL
++#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_IDO_OVERRIDE_CPL_MASK 0x00000030L
++#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_RO_OVERRIDE_P_MASK 0x000000C0L
++#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_RO_OVERRIDE_NP_MASK 0x00000300L
++#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_SNR_OVERRIDE_P_MASK 0x00000C00L
++#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_SNR_OVERRIDE_NP_MASK 0x00003000L
++//PCIE_CI_CNTL
++#define PCIE_CI_CNTL__CI_SLAVE_SPLIT_MODE__SHIFT 0x2
++#define PCIE_CI_CNTL__CI_SLAVE_GEN_USR_DIS__SHIFT 0x3
++#define PCIE_CI_CNTL__CI_MST_CMPL_DUMMY_DATA__SHIFT 0x4
++#define PCIE_CI_CNTL__CI_SLV_RC_RD_REQ_SIZE__SHIFT 0x6
++#define PCIE_CI_CNTL__CI_SLV_ORDERING_DIS__SHIFT 0x8
++#define PCIE_CI_CNTL__CI_RC_ORDERING_DIS__SHIFT 0x9
++#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_DIS__SHIFT 0xa
++#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_MODE__SHIFT 0xb
++#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_SOR__SHIFT 0xc
++#define PCIE_CI_CNTL__CI_SLV_SDP_ERR_DATA_ON_POISONED_DIS__SHIFT 0x10
++#define PCIE_CI_CNTL__TX_PRIV_TLP_PREFIX_BLOCKING_DIS__SHIFT 0x11
++#define PCIE_CI_CNTL__TX_PRIV_POISONED_TLP_EGRESS_BLOCKING_DIS__SHIFT 0x12
++#define PCIE_CI_CNTL__TX_PRIV_ATOMICOP_EGRESS_BLOCKING_DIS__SHIFT 0x13
++#define PCIE_CI_CNTL__PRIV_AUTO_SLOT_PWR_LIMIT_DIS__SHIFT 0x14
++#define PCIE_CI_CNTL__TX_DISABLE_SLOT_PWR_LIMIT_MSG__SHIFT 0x15
++#define PCIE_CI_CNTL__RX_RCB_RC_CTO_TO_UR_EN__SHIFT 0x16
++#define PCIE_CI_CNTL__RX_RCB_RC_DPC_EXCEPTION_EN__SHIFT 0x17
++#define PCIE_CI_CNTL__RX_RCB_RC_DPC_CPL_CTL_EN__SHIFT 0x18
++#define PCIE_CI_CNTL__CI_MSTSPLIT_DIS__SHIFT 0x19
++#define PCIE_CI_CNTL__CI_MSTSPLIT_REQ_CHAIN_DIS__SHIFT 0x1a
++#define PCIE_CI_CNTL__TX_MWR_SPLIT_QW_PKT_SAFE_MODE__SHIFT 0x1b
++#define PCIE_CI_CNTL__CI_MST_TAG_BORROWING_DIS__SHIFT 0x1c
++#define PCIE_CI_CNTL__RX_RCB_RC_CTO_TO_SC_IN_LINK_DOWN_EN__SHIFT 0x1d
++#define PCIE_CI_CNTL__CI_SLAVE_SPLIT_MODE_MASK 0x00000004L
++#define PCIE_CI_CNTL__CI_SLAVE_GEN_USR_DIS_MASK 0x00000008L
++#define PCIE_CI_CNTL__CI_MST_CMPL_DUMMY_DATA_MASK 0x00000010L
++#define PCIE_CI_CNTL__CI_SLV_RC_RD_REQ_SIZE_MASK 0x000000C0L
++#define PCIE_CI_CNTL__CI_SLV_ORDERING_DIS_MASK 0x00000100L
++#define PCIE_CI_CNTL__CI_RC_ORDERING_DIS_MASK 0x00000200L
++#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_DIS_MASK 0x00000400L
++#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_MODE_MASK 0x00000800L
++#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_SOR_MASK 0x00001000L
++#define PCIE_CI_CNTL__CI_SLV_SDP_ERR_DATA_ON_POISONED_DIS_MASK 0x00010000L
++#define PCIE_CI_CNTL__TX_PRIV_TLP_PREFIX_BLOCKING_DIS_MASK 0x00020000L
++#define PCIE_CI_CNTL__TX_PRIV_POISONED_TLP_EGRESS_BLOCKING_DIS_MASK 0x00040000L
++#define PCIE_CI_CNTL__TX_PRIV_ATOMICOP_EGRESS_BLOCKING_DIS_MASK 0x00080000L
++#define PCIE_CI_CNTL__PRIV_AUTO_SLOT_PWR_LIMIT_DIS_MASK 0x00100000L
++#define PCIE_CI_CNTL__TX_DISABLE_SLOT_PWR_LIMIT_MSG_MASK 0x00200000L
++#define PCIE_CI_CNTL__RX_RCB_RC_CTO_TO_UR_EN_MASK 0x00400000L
++#define PCIE_CI_CNTL__RX_RCB_RC_DPC_EXCEPTION_EN_MASK 0x00800000L
++#define PCIE_CI_CNTL__RX_RCB_RC_DPC_CPL_CTL_EN_MASK 0x01000000L
++#define PCIE_CI_CNTL__CI_MSTSPLIT_DIS_MASK 0x02000000L
++#define PCIE_CI_CNTL__CI_MSTSPLIT_REQ_CHAIN_DIS_MASK 0x04000000L
++#define PCIE_CI_CNTL__TX_MWR_SPLIT_QW_PKT_SAFE_MODE_MASK 0x08000000L
++#define PCIE_CI_CNTL__CI_MST_TAG_BORROWING_DIS_MASK 0x10000000L
++#define PCIE_CI_CNTL__RX_RCB_RC_CTO_TO_SC_IN_LINK_DOWN_EN_MASK 0x20000000L
++//PCIE_BUS_CNTL
++#define PCIE_BUS_CNTL__PMI_INT_DIS__SHIFT 0x6
++#define PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7
++#define PCIE_BUS_CNTL__TRUE_PM_STATUS_EN__SHIFT 0xc
++#define PCIE_BUS_CNTL__PMI_INT_DIS_MASK 0x00000040L
++#define PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L
++#define PCIE_BUS_CNTL__TRUE_PM_STATUS_EN_MASK 0x00001000L
++//PCIE_LC_STATE6
++#define PCIE_LC_STATE6__LC_PREV_STATE24__SHIFT 0x0
++#define PCIE_LC_STATE6__LC_PREV_STATE25__SHIFT 0x8
++#define PCIE_LC_STATE6__LC_PREV_STATE26__SHIFT 0x10
++#define PCIE_LC_STATE6__LC_PREV_STATE27__SHIFT 0x18
++#define PCIE_LC_STATE6__LC_PREV_STATE24_MASK 0x0000003FL
++#define PCIE_LC_STATE6__LC_PREV_STATE25_MASK 0x00003F00L
++#define PCIE_LC_STATE6__LC_PREV_STATE26_MASK 0x003F0000L
++#define PCIE_LC_STATE6__LC_PREV_STATE27_MASK 0x3F000000L
++//PCIE_LC_STATE7
++#define PCIE_LC_STATE7__LC_PREV_STATE28__SHIFT 0x0
++#define PCIE_LC_STATE7__LC_PREV_STATE29__SHIFT 0x8
++#define PCIE_LC_STATE7__LC_PREV_STATE30__SHIFT 0x10
++#define PCIE_LC_STATE7__LC_PREV_STATE31__SHIFT 0x18
++#define PCIE_LC_STATE7__LC_PREV_STATE28_MASK 0x0000003FL
++#define PCIE_LC_STATE7__LC_PREV_STATE29_MASK 0x00003F00L
++#define PCIE_LC_STATE7__LC_PREV_STATE30_MASK 0x003F0000L
++#define PCIE_LC_STATE7__LC_PREV_STATE31_MASK 0x3F000000L
++//PCIE_LC_STATE8
++#define PCIE_LC_STATE8__LC_PREV_STATE32__SHIFT 0x0
++#define PCIE_LC_STATE8__LC_PREV_STATE33__SHIFT 0x8
++#define PCIE_LC_STATE8__LC_PREV_STATE34__SHIFT 0x10
++#define PCIE_LC_STATE8__LC_PREV_STATE35__SHIFT 0x18
++#define PCIE_LC_STATE8__LC_PREV_STATE32_MASK 0x0000003FL
++#define PCIE_LC_STATE8__LC_PREV_STATE33_MASK 0x00003F00L
++#define PCIE_LC_STATE8__LC_PREV_STATE34_MASK 0x003F0000L
++#define PCIE_LC_STATE8__LC_PREV_STATE35_MASK 0x3F000000L
++//PCIE_LC_STATE9
++#define PCIE_LC_STATE9__LC_PREV_STATE36__SHIFT 0x0
++#define PCIE_LC_STATE9__LC_PREV_STATE37__SHIFT 0x8
++#define PCIE_LC_STATE9__LC_PREV_STATE38__SHIFT 0x10
++#define PCIE_LC_STATE9__LC_PREV_STATE39__SHIFT 0x18
++#define PCIE_LC_STATE9__LC_PREV_STATE36_MASK 0x0000003FL
++#define PCIE_LC_STATE9__LC_PREV_STATE37_MASK 0x00003F00L
++#define PCIE_LC_STATE9__LC_PREV_STATE38_MASK 0x003F0000L
++#define PCIE_LC_STATE9__LC_PREV_STATE39_MASK 0x3F000000L
++//PCIE_LC_STATE10
++#define PCIE_LC_STATE10__LC_PREV_STATE40__SHIFT 0x0
++#define PCIE_LC_STATE10__LC_PREV_STATE41__SHIFT 0x8
++#define PCIE_LC_STATE10__LC_PREV_STATE42__SHIFT 0x10
++#define PCIE_LC_STATE10__LC_PREV_STATE43__SHIFT 0x18
++#define PCIE_LC_STATE10__LC_PREV_STATE40_MASK 0x0000003FL
++#define PCIE_LC_STATE10__LC_PREV_STATE41_MASK 0x00003F00L
++#define PCIE_LC_STATE10__LC_PREV_STATE42_MASK 0x003F0000L
++#define PCIE_LC_STATE10__LC_PREV_STATE43_MASK 0x3F000000L
++//PCIE_LC_STATE11
++#define PCIE_LC_STATE11__LC_PREV_STATE44__SHIFT 0x0
++#define PCIE_LC_STATE11__LC_PREV_STATE45__SHIFT 0x8
++#define PCIE_LC_STATE11__LC_PREV_STATE46__SHIFT 0x10
++#define PCIE_LC_STATE11__LC_PREV_STATE47__SHIFT 0x18
++#define PCIE_LC_STATE11__LC_PREV_STATE44_MASK 0x0000003FL
++#define PCIE_LC_STATE11__LC_PREV_STATE45_MASK 0x00003F00L
++#define PCIE_LC_STATE11__LC_PREV_STATE46_MASK 0x003F0000L
++#define PCIE_LC_STATE11__LC_PREV_STATE47_MASK 0x3F000000L
++//PCIE_LC_STATUS1
++#define PCIE_LC_STATUS1__LC_REVERSE_RCVR__SHIFT 0x0
++#define PCIE_LC_STATUS1__LC_REVERSE_XMIT__SHIFT 0x1
++#define PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH__SHIFT 0x2
++#define PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH__SHIFT 0x5
++#define PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK 0x00000001L
++#define PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK 0x00000002L
++#define PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH_MASK 0x0000001CL
++#define PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK 0x000000E0L
++//PCIE_LC_STATUS2
++#define PCIE_LC_STATUS2__LC_TOTAL_INACTIVE_LANES__SHIFT 0x0
++#define PCIE_LC_STATUS2__LC_TURN_ON_LANE__SHIFT 0x10
++#define PCIE_LC_STATUS2__LC_TOTAL_INACTIVE_LANES_MASK 0x0000FFFFL
++#define PCIE_LC_STATUS2__LC_TURN_ON_LANE_MASK 0xFFFF0000L
++//PCIE_TX_CNTL3
++#define PCIE_TX_CNTL3__TX_REGNUM_FROM_ADDR_CFGWR_IOWR_DIS__SHIFT 0x0
++#define PCIE_TX_CNTL3__CI_SLV_CPL_ALLOC_OVERSUBSCRIBE_MODE__SHIFT 0x1
++#define PCIE_TX_CNTL3__TX_REGNUM_FROM_ADDR_CFGWR_IOWR_DIS_MASK 0x00000001L
++#define PCIE_TX_CNTL3__CI_SLV_CPL_ALLOC_OVERSUBSCRIBE_MODE_MASK 0x0000000EL
++//PCIE_WPR_CNTL
++#define PCIE_WPR_CNTL__WPR_RESET_HOT_RST_EN__SHIFT 0x0
++#define PCIE_WPR_CNTL__WPR_RESET_LNK_DWN_EN__SHIFT 0x1
++#define PCIE_WPR_CNTL__WPR_RESET_LNK_DIS_EN__SHIFT 0x2
++#define PCIE_WPR_CNTL__WPR_RESET_COR_EN__SHIFT 0x3
++#define PCIE_WPR_CNTL__WPR_RESET_REG_EN__SHIFT 0x4
++#define PCIE_WPR_CNTL__WPR_RESET_STY_EN__SHIFT 0x5
++#define PCIE_WPR_CNTL__WPR_RESET_PHY_EN__SHIFT 0x6
++#define PCIE_WPR_CNTL__WPR_RESET_HOT_RST_EN_MASK 0x00000001L
++#define PCIE_WPR_CNTL__WPR_RESET_LNK_DWN_EN_MASK 0x00000002L
++#define PCIE_WPR_CNTL__WPR_RESET_LNK_DIS_EN_MASK 0x00000004L
++#define PCIE_WPR_CNTL__WPR_RESET_COR_EN_MASK 0x00000008L
++#define PCIE_WPR_CNTL__WPR_RESET_REG_EN_MASK 0x00000010L
++#define PCIE_WPR_CNTL__WPR_RESET_STY_EN_MASK 0x00000020L
++#define PCIE_WPR_CNTL__WPR_RESET_PHY_EN_MASK 0x00000040L
++//PCIE_RX_LAST_TLP0
++#define PCIE_RX_LAST_TLP0__RX_LAST_TLP0__SHIFT 0x0
++#define PCIE_RX_LAST_TLP0__RX_LAST_TLP0_MASK 0xFFFFFFFFL
++//PCIE_RX_LAST_TLP1
++#define PCIE_RX_LAST_TLP1__RX_LAST_TLP1__SHIFT 0x0
++#define PCIE_RX_LAST_TLP1__RX_LAST_TLP1_MASK 0xFFFFFFFFL
++//PCIE_RX_LAST_TLP2
++#define PCIE_RX_LAST_TLP2__RX_LAST_TLP2__SHIFT 0x0
++#define PCIE_RX_LAST_TLP2__RX_LAST_TLP2_MASK 0xFFFFFFFFL
++//PCIE_RX_LAST_TLP3
++#define PCIE_RX_LAST_TLP3__RX_LAST_TLP3__SHIFT 0x0
++#define PCIE_RX_LAST_TLP3__RX_LAST_TLP3_MASK 0xFFFFFFFFL
++//PCIE_TX_LAST_TLP0
++#define PCIE_TX_LAST_TLP0__TX_LAST_TLP0__SHIFT 0x0
++#define PCIE_TX_LAST_TLP0__TX_LAST_TLP0_MASK 0xFFFFFFFFL
++//PCIE_TX_LAST_TLP1
++#define PCIE_TX_LAST_TLP1__TX_LAST_TLP1__SHIFT 0x0
++#define PCIE_TX_LAST_TLP1__TX_LAST_TLP1_MASK 0xFFFFFFFFL
++//PCIE_TX_LAST_TLP2
++#define PCIE_TX_LAST_TLP2__TX_LAST_TLP2__SHIFT 0x0
++#define PCIE_TX_LAST_TLP2__TX_LAST_TLP2_MASK 0xFFFFFFFFL
++//PCIE_TX_LAST_TLP3
++#define PCIE_TX_LAST_TLP3__TX_LAST_TLP3__SHIFT 0x0
++#define PCIE_TX_LAST_TLP3__TX_LAST_TLP3_MASK 0xFFFFFFFFL
++//PCIE_I2C_REG_ADDR_EXPAND
++#define PCIE_I2C_REG_ADDR_EXPAND__I2C_REG_ADDR__SHIFT 0x0
++#define PCIE_I2C_REG_ADDR_EXPAND__I2C_REG_ADDR_MASK 0x0001FFFFL
++//PCIE_I2C_REG_DATA
++#define PCIE_I2C_REG_DATA__I2C_REG_DATA__SHIFT 0x0
++#define PCIE_I2C_REG_DATA__I2C_REG_DATA_MASK 0xFFFFFFFFL
++//PCIE_CFG_CNTL
++#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0
++#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1
++#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2
++#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L
++#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L
++#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L
++//PCIE_LC_PM_CNTL
++#define PCIE_LC_PM_CNTL__LC_PORT_0_CLKREQB_MAP__SHIFT 0x0
++#define PCIE_LC_PM_CNTL__LC_PORT_1_CLKREQB_MAP__SHIFT 0x4
++#define PCIE_LC_PM_CNTL__LC_PORT_2_CLKREQB_MAP__SHIFT 0x8
++#define PCIE_LC_PM_CNTL__LC_PORT_3_CLKREQB_MAP__SHIFT 0xc
++#define PCIE_LC_PM_CNTL__LC_PORT_4_CLKREQB_MAP__SHIFT 0x10
++#define PCIE_LC_PM_CNTL__LC_PORT_5_CLKREQB_MAP__SHIFT 0x14
++#define PCIE_LC_PM_CNTL__LC_PORT_6_CLKREQB_MAP__SHIFT 0x18
++#define PCIE_LC_PM_CNTL__LC_PORT_7_CLKREQB_MAP__SHIFT 0x1c
++#define PCIE_LC_PM_CNTL__LC_PORT_0_CLKREQB_MAP_MASK 0x0000000FL
++#define PCIE_LC_PM_CNTL__LC_PORT_1_CLKREQB_MAP_MASK 0x000000F0L
++#define PCIE_LC_PM_CNTL__LC_PORT_2_CLKREQB_MAP_MASK 0x00000F00L
++#define PCIE_LC_PM_CNTL__LC_PORT_3_CLKREQB_MAP_MASK 0x0000F000L
++#define PCIE_LC_PM_CNTL__LC_PORT_4_CLKREQB_MAP_MASK 0x000F0000L
++#define PCIE_LC_PM_CNTL__LC_PORT_5_CLKREQB_MAP_MASK 0x00F00000L
++#define PCIE_LC_PM_CNTL__LC_PORT_6_CLKREQB_MAP_MASK 0x0F000000L
++#define PCIE_LC_PM_CNTL__LC_PORT_7_CLKREQB_MAP_MASK 0xF0000000L
++//PCIE_LC_PORT_ORDER_CNTL
++#define PCIE_LC_PORT_ORDER_CNTL__LC_PORT_ORDER_EN__SHIFT 0x0
++#define PCIE_LC_PORT_ORDER_CNTL__LC_PORT_ORDER_EN_MASK 0x00000001L
++//PCIE_P_CNTL
++#define PCIE_P_CNTL__P_PWRDN_EN__SHIFT 0x0
++#define PCIE_P_CNTL__P_SYMALIGN_MODE__SHIFT 0x1
++#define PCIE_P_CNTL__P_IGNORE_CRC_ERR__SHIFT 0x4
++#define PCIE_P_CNTL__P_IGNORE_LEN_ERR__SHIFT 0x5
++#define PCIE_P_CNTL__P_IGNORE_EDB_ERR__SHIFT 0x6
++#define PCIE_P_CNTL__P_IGNORE_IDL_ERR__SHIFT 0x7
++#define PCIE_P_CNTL__P_IGNORE_TOK_ERR__SHIFT 0x8
++#define PCIE_P_CNTL__P_BLK_LOCK_MODE__SHIFT 0xc
++#define PCIE_P_CNTL__P_ALWAYS_USE_FAST_TXCLK__SHIFT 0xd
++#define PCIE_P_CNTL__P_ELEC_IDLE_MODE__SHIFT 0xe
++#define PCIE_P_CNTL__DLP_IGNORE_IN_L1_EN__SHIFT 0x10
++#define PCIE_P_CNTL__ASSERT_DVALID_ON_EI_TRANS__SHIFT 0x11
++#define PCIE_P_CNTL__P_PWRDN_EN_MASK 0x00000001L
++#define PCIE_P_CNTL__P_SYMALIGN_MODE_MASK 0x00000002L
++#define PCIE_P_CNTL__P_IGNORE_CRC_ERR_MASK 0x00000010L
++#define PCIE_P_CNTL__P_IGNORE_LEN_ERR_MASK 0x00000020L
++#define PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK 0x00000040L
++#define PCIE_P_CNTL__P_IGNORE_IDL_ERR_MASK 0x00000080L
++#define PCIE_P_CNTL__P_IGNORE_TOK_ERR_MASK 0x00000100L
++#define PCIE_P_CNTL__P_BLK_LOCK_MODE_MASK 0x00001000L
++#define PCIE_P_CNTL__P_ALWAYS_USE_FAST_TXCLK_MASK 0x00002000L
++#define PCIE_P_CNTL__P_ELEC_IDLE_MODE_MASK 0x0000C000L
++#define PCIE_P_CNTL__DLP_IGNORE_IN_L1_EN_MASK 0x00010000L
++#define PCIE_P_CNTL__ASSERT_DVALID_ON_EI_TRANS_MASK 0x00020000L
++//PCIE_P_BUF_STATUS
++#define PCIE_P_BUF_STATUS__P_OVERFLOW_ERR__SHIFT 0x0
++#define PCIE_P_BUF_STATUS__P_UNDERFLOW_ERR__SHIFT 0x10
++#define PCIE_P_BUF_STATUS__P_OVERFLOW_ERR_MASK 0x0000FFFFL
++#define PCIE_P_BUF_STATUS__P_UNDERFLOW_ERR_MASK 0xFFFF0000L
++//PCIE_P_DECODER_STATUS
++#define PCIE_P_DECODER_STATUS__P_DECODE_ERR__SHIFT 0x0
++#define PCIE_P_DECODER_STATUS__P_DECODE_ERR_MASK 0x0000FFFFL
++//PCIE_P_MISC_STATUS
++#define PCIE_P_MISC_STATUS__P_DESKEW_ERR__SHIFT 0x0
++#define PCIE_P_MISC_STATUS__P_SYMUNLOCK_ERR__SHIFT 0x10
++#define PCIE_P_MISC_STATUS__P_DESKEW_ERR_MASK 0x000000FFL
++#define PCIE_P_MISC_STATUS__P_SYMUNLOCK_ERR_MASK 0xFFFF0000L
++//PCIE_P_RCV_L0S_FTS_DET
++#define PCIE_P_RCV_L0S_FTS_DET__P_RCV_L0S_FTS_DET_MIN__SHIFT 0x0
++#define PCIE_P_RCV_L0S_FTS_DET__P_RCV_L0S_FTS_DET_MAX__SHIFT 0x8
++#define PCIE_P_RCV_L0S_FTS_DET__P_RCV_L0S_FTS_DET_MIN_MASK 0x000000FFL
++#define PCIE_P_RCV_L0S_FTS_DET__P_RCV_L0S_FTS_DET_MAX_MASK 0x0000FF00L
++//PCIE_RX_AD
++#define PCIE_RX_AD__RX_SWUS_DROP_PME_TO__SHIFT 0x0
++#define PCIE_RX_AD__RX_SWUS_DROP_UNLOCK__SHIFT 0x1
++#define PCIE_RX_AD__RX_SWUS_UR_VDM0__SHIFT 0x2
++#define PCIE_RX_AD__RX_SWUS_DROP_VDM0__SHIFT 0x3
++#define PCIE_RX_AD__RX_SWUS_DROP_VDM1__SHIFT 0x4
++#define PCIE_RX_AD__RX_SWUS_UR_MSG_PREFIX_DIS__SHIFT 0x5
++#define PCIE_RX_AD__RX_RC_DROP_VDM0__SHIFT 0x8
++#define PCIE_RX_AD__RX_RC_UR_VDM0__SHIFT 0x9
++#define PCIE_RX_AD__RX_RC_DROP_VDM1__SHIFT 0xa
++#define PCIE_RX_AD__RX_RC_UR_SSPL_MSG__SHIFT 0xb
++#define PCIE_RX_AD__RX_RC_UR_BFRC_MSG__SHIFT 0xc
++#define PCIE_RX_AD__RX_RC_DROP_PME_TO_ACK__SHIFT 0xd
++#define PCIE_RX_AD__RX_RC_UR_ECRC_DIS__SHIFT 0xe
++#define PCIE_RX_AD__RX_RC_DROP_CPL_ECRC_FAILURE__SHIFT 0xf
++#define PCIE_RX_AD__RX_SB_DROP_LTAR_VDM_EN__SHIFT 0x10
++#define PCIE_RX_AD__RX_SWUS_DROP_PME_TO_MASK 0x00000001L
++#define PCIE_RX_AD__RX_SWUS_DROP_UNLOCK_MASK 0x00000002L
++#define PCIE_RX_AD__RX_SWUS_UR_VDM0_MASK 0x00000004L
++#define PCIE_RX_AD__RX_SWUS_DROP_VDM0_MASK 0x00000008L
++#define PCIE_RX_AD__RX_SWUS_DROP_VDM1_MASK 0x00000010L
++#define PCIE_RX_AD__RX_SWUS_UR_MSG_PREFIX_DIS_MASK 0x00000020L
++#define PCIE_RX_AD__RX_RC_DROP_VDM0_MASK 0x00000100L
++#define PCIE_RX_AD__RX_RC_UR_VDM0_MASK 0x00000200L
++#define PCIE_RX_AD__RX_RC_DROP_VDM1_MASK 0x00000400L
++#define PCIE_RX_AD__RX_RC_UR_SSPL_MSG_MASK 0x00000800L
++#define PCIE_RX_AD__RX_RC_UR_BFRC_MSG_MASK 0x00001000L
++#define PCIE_RX_AD__RX_RC_DROP_PME_TO_ACK_MASK 0x00002000L
++#define PCIE_RX_AD__RX_RC_UR_ECRC_DIS_MASK 0x00004000L
++#define PCIE_RX_AD__RX_RC_DROP_CPL_ECRC_FAILURE_MASK 0x00008000L
++#define PCIE_RX_AD__RX_SB_DROP_LTAR_VDM_EN_MASK 0x00010000L
++//PCIE_SDP_CTRL
++#define PCIE_SDP_CTRL__SDP_UNIT_ID__SHIFT 0x0
++#define PCIE_SDP_CTRL__CI_SLV_REQR_FULL_DISCONNECT_EN__SHIFT 0x4
++#define PCIE_SDP_CTRL__CI_SLV_REQR_PART_DISCONNECT_EN__SHIFT 0x5
++#define PCIE_SDP_CTRL__CI_MSTSDP_CLKGATE_ONESIDED_ENABLE__SHIFT 0x6
++#define PCIE_SDP_CTRL__TX_RC_TPH_PRIV_DIS__SHIFT 0x7
++#define PCIE_SDP_CTRL__TX_SWUS_TPH_PRIV_DIS__SHIFT 0x8
++#define PCIE_SDP_CTRL__CI_SLAVE_TAG_STEALING_DIS__SHIFT 0x9
++#define PCIE_SDP_CTRL__SLAVE_PREFIX_PRELOAD_DIS__SHIFT 0xa
++#define PCIE_SDP_CTRL__CI_DISABLE_LTR_DROPPING__SHIFT 0xb
++#define PCIE_SDP_CTRL__RX_SWUS_SIDEBAND_CPLHDR_DIS__SHIFT 0xc
++#define PCIE_SDP_CTRL__CI_MST_MEMR_RD_NONCONT_BE_EN__SHIFT 0xd
++#define PCIE_SDP_CTRL__CI_MSTSDP_DISCONNECT_RSP_ON_PARTIAL__SHIFT 0xe
++#define PCIE_SDP_CTRL__CI_SWUS_RCVD_ERR_HANDLING_DIS__SHIFT 0xf
++#define PCIE_SDP_CTRL__EARLY_HW_WAKE_UP_EN__SHIFT 0x10
++#define PCIE_SDP_CTRL__SLV_SDP_DISCONNECT_WHEN_IN_L1_EN__SHIFT 0x11
++#define PCIE_SDP_CTRL__BLOCK_SLV_SDP_DISCONNECT_WHEN_EARLY_HW_WAKE_UP_EN__SHIFT 0x12
++#define PCIE_SDP_CTRL__TX_ENCMSG_USE_SDP_EP_DIS__SHIFT 0x13
++#define PCIE_SDP_CTRL__TX_IGNORE_POISON_BIT_EN__SHIFT 0x14
++#define PCIE_SDP_CTRL__TX_RBUF_WRITE_2HDR_DIS__SHIFT 0x15
++#define PCIE_SDP_CTRL__TX_RBUF_READ_2HDR_DIS__SHIFT 0x16
++#define PCIE_SDP_CTRL__SDP_UNIT_ID_MASK 0x0000000FL
++#define PCIE_SDP_CTRL__CI_SLV_REQR_FULL_DISCONNECT_EN_MASK 0x00000010L
++#define PCIE_SDP_CTRL__CI_SLV_REQR_PART_DISCONNECT_EN_MASK 0x00000020L
++#define PCIE_SDP_CTRL__CI_MSTSDP_CLKGATE_ONESIDED_ENABLE_MASK 0x00000040L
++#define PCIE_SDP_CTRL__TX_RC_TPH_PRIV_DIS_MASK 0x00000080L
++#define PCIE_SDP_CTRL__TX_SWUS_TPH_PRIV_DIS_MASK 0x00000100L
++#define PCIE_SDP_CTRL__CI_SLAVE_TAG_STEALING_DIS_MASK 0x00000200L
++#define PCIE_SDP_CTRL__SLAVE_PREFIX_PRELOAD_DIS_MASK 0x00000400L
++#define PCIE_SDP_CTRL__CI_DISABLE_LTR_DROPPING_MASK 0x00000800L
++#define PCIE_SDP_CTRL__RX_SWUS_SIDEBAND_CPLHDR_DIS_MASK 0x00001000L
++#define PCIE_SDP_CTRL__CI_MST_MEMR_RD_NONCONT_BE_EN_MASK 0x00002000L
++#define PCIE_SDP_CTRL__CI_MSTSDP_DISCONNECT_RSP_ON_PARTIAL_MASK 0x00004000L
++#define PCIE_SDP_CTRL__CI_SWUS_RCVD_ERR_HANDLING_DIS_MASK 0x00008000L
++#define PCIE_SDP_CTRL__EARLY_HW_WAKE_UP_EN_MASK 0x00010000L
++#define PCIE_SDP_CTRL__SLV_SDP_DISCONNECT_WHEN_IN_L1_EN_MASK 0x00020000L
++#define PCIE_SDP_CTRL__BLOCK_SLV_SDP_DISCONNECT_WHEN_EARLY_HW_WAKE_UP_EN_MASK 0x00040000L
++#define PCIE_SDP_CTRL__TX_ENCMSG_USE_SDP_EP_DIS_MASK 0x00080000L
++#define PCIE_SDP_CTRL__TX_IGNORE_POISON_BIT_EN_MASK 0x00100000L
++#define PCIE_SDP_CTRL__TX_RBUF_WRITE_2HDR_DIS_MASK 0x00200000L
++#define PCIE_SDP_CTRL__TX_RBUF_READ_2HDR_DIS_MASK 0x00400000L
++//PCIE_SDP_SWUS_SLV_ATTR_CTRL
++#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_RO_OVERRIDE_MEMWR__SHIFT 0x0
++#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_RO_OVERRIDE_MEMRD__SHIFT 0x2
++#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_RO_OVERRIDE_ATOMIC__SHIFT 0x4
++#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_SNR_OVERRIDE_MEMWR__SHIFT 0x6
++#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_SNR_OVERRIDE_MEMRD__SHIFT 0x8
++#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_SNR_OVERRIDE_ATOMIC__SHIFT 0xa
++#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_IDO_OVERRIDE_MEMWR__SHIFT 0xc
++#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_IDO_OVERRIDE_MEMRD__SHIFT 0xe
++#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_IDO_OVERRIDE_ATOMIC__SHIFT 0x10
++#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_RO_OVERRIDE_MEMWR_MASK 0x00000003L
++#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_RO_OVERRIDE_MEMRD_MASK 0x0000000CL
++#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_RO_OVERRIDE_ATOMIC_MASK 0x00000030L
++#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_SNR_OVERRIDE_MEMWR_MASK 0x000000C0L
++#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_SNR_OVERRIDE_MEMRD_MASK 0x00000300L
++#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_SNR_OVERRIDE_ATOMIC_MASK 0x00000C00L
++#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_IDO_OVERRIDE_MEMWR_MASK 0x00003000L
++#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_IDO_OVERRIDE_MEMRD_MASK 0x0000C000L
++#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_IDO_OVERRIDE_ATOMIC_MASK 0x00030000L
++//PCIE_PERF_COUNT_CNTL
++#define PCIE_PERF_COUNT_CNTL__GLOBAL_COUNT_EN__SHIFT 0x0
++#define PCIE_PERF_COUNT_CNTL__GLOBAL_SHADOW_WR__SHIFT 0x1
++#define PCIE_PERF_COUNT_CNTL__GLOBAL_COUNT_RESET__SHIFT 0x2
++#define PCIE_PERF_COUNT_CNTL__GLOBAL_COUNT_EN_MASK 0x00000001L
++#define PCIE_PERF_COUNT_CNTL__GLOBAL_SHADOW_WR_MASK 0x00000002L
++#define PCIE_PERF_COUNT_CNTL__GLOBAL_COUNT_RESET_MASK 0x00000004L
++//PCIE_PERF_CNTL_TXCLK
++#define PCIE_PERF_CNTL_TXCLK__EVENT0_SEL__SHIFT 0x0
++#define PCIE_PERF_CNTL_TXCLK__EVENT1_SEL__SHIFT 0x8
++#define PCIE_PERF_CNTL_TXCLK__COUNTER0_UPPER__SHIFT 0x10
++#define PCIE_PERF_CNTL_TXCLK__COUNTER1_UPPER__SHIFT 0x18
++#define PCIE_PERF_CNTL_TXCLK__EVENT0_SEL_MASK 0x000000FFL
++#define PCIE_PERF_CNTL_TXCLK__EVENT1_SEL_MASK 0x0000FF00L
++#define PCIE_PERF_CNTL_TXCLK__COUNTER0_UPPER_MASK 0x00FF0000L
++#define PCIE_PERF_CNTL_TXCLK__COUNTER1_UPPER_MASK 0xFF000000L
++//PCIE_PERF_COUNT0_TXCLK
++#define PCIE_PERF_COUNT0_TXCLK__COUNTER0__SHIFT 0x0
++#define PCIE_PERF_COUNT0_TXCLK__COUNTER0_MASK 0xFFFFFFFFL
++//PCIE_PERF_COUNT1_TXCLK
++#define PCIE_PERF_COUNT1_TXCLK__COUNTER1__SHIFT 0x0
++#define PCIE_PERF_COUNT1_TXCLK__COUNTER1_MASK 0xFFFFFFFFL
++//PCIE_PERF_CNTL_MST_R_CLK
++#define PCIE_PERF_CNTL_MST_R_CLK__EVENT0_SEL__SHIFT 0x0
++#define PCIE_PERF_CNTL_MST_R_CLK__EVENT1_SEL__SHIFT 0x8
++#define PCIE_PERF_CNTL_MST_R_CLK__COUNTER0_UPPER__SHIFT 0x10
++#define PCIE_PERF_CNTL_MST_R_CLK__COUNTER1_UPPER__SHIFT 0x18
++#define PCIE_PERF_CNTL_MST_R_CLK__EVENT0_SEL_MASK 0x000000FFL
++#define PCIE_PERF_CNTL_MST_R_CLK__EVENT1_SEL_MASK 0x0000FF00L
++#define PCIE_PERF_CNTL_MST_R_CLK__COUNTER0_UPPER_MASK 0x00FF0000L
++#define PCIE_PERF_CNTL_MST_R_CLK__COUNTER1_UPPER_MASK 0xFF000000L
++//PCIE_PERF_COUNT0_MST_R_CLK
++#define PCIE_PERF_COUNT0_MST_R_CLK__COUNTER0__SHIFT 0x0
++#define PCIE_PERF_COUNT0_MST_R_CLK__COUNTER0_MASK 0xFFFFFFFFL
++//PCIE_PERF_COUNT1_MST_R_CLK
++#define PCIE_PERF_COUNT1_MST_R_CLK__COUNTER1__SHIFT 0x0
++#define PCIE_PERF_COUNT1_MST_R_CLK__COUNTER1_MASK 0xFFFFFFFFL
++//PCIE_PERF_CNTL_MST_C_CLK
++#define PCIE_PERF_CNTL_MST_C_CLK__EVENT0_SEL__SHIFT 0x0
++#define PCIE_PERF_CNTL_MST_C_CLK__EVENT1_SEL__SHIFT 0x8
++#define PCIE_PERF_CNTL_MST_C_CLK__COUNTER0_UPPER__SHIFT 0x10
++#define PCIE_PERF_CNTL_MST_C_CLK__COUNTER1_UPPER__SHIFT 0x18
++#define PCIE_PERF_CNTL_MST_C_CLK__EVENT0_SEL_MASK 0x000000FFL
++#define PCIE_PERF_CNTL_MST_C_CLK__EVENT1_SEL_MASK 0x0000FF00L
++#define PCIE_PERF_CNTL_MST_C_CLK__COUNTER0_UPPER_MASK 0x00FF0000L
++#define PCIE_PERF_CNTL_MST_C_CLK__COUNTER1_UPPER_MASK 0xFF000000L
++//PCIE_PERF_COUNT0_MST_C_CLK
++#define PCIE_PERF_COUNT0_MST_C_CLK__COUNTER0__SHIFT 0x0
++#define PCIE_PERF_COUNT0_MST_C_CLK__COUNTER0_MASK 0xFFFFFFFFL
++//PCIE_PERF_COUNT1_MST_C_CLK
++#define PCIE_PERF_COUNT1_MST_C_CLK__COUNTER1__SHIFT 0x0
++#define PCIE_PERF_COUNT1_MST_C_CLK__COUNTER1_MASK 0xFFFFFFFFL
++//PCIE_PERF_CNTL_SLV_R_CLK
++#define PCIE_PERF_CNTL_SLV_R_CLK__EVENT0_SEL__SHIFT 0x0
++#define PCIE_PERF_CNTL_SLV_R_CLK__EVENT1_SEL__SHIFT 0x8
++#define PCIE_PERF_CNTL_SLV_R_CLK__COUNTER0_UPPER__SHIFT 0x10
++#define PCIE_PERF_CNTL_SLV_R_CLK__COUNTER1_UPPER__SHIFT 0x18
++#define PCIE_PERF_CNTL_SLV_R_CLK__EVENT0_SEL_MASK 0x000000FFL
++#define PCIE_PERF_CNTL_SLV_R_CLK__EVENT1_SEL_MASK 0x0000FF00L
++#define PCIE_PERF_CNTL_SLV_R_CLK__COUNTER0_UPPER_MASK 0x00FF0000L
++#define PCIE_PERF_CNTL_SLV_R_CLK__COUNTER1_UPPER_MASK 0xFF000000L
++//PCIE_PERF_COUNT0_SLV_R_CLK
++#define PCIE_PERF_COUNT0_SLV_R_CLK__COUNTER0__SHIFT 0x0
++#define PCIE_PERF_COUNT0_SLV_R_CLK__COUNTER0_MASK 0xFFFFFFFFL
++//PCIE_PERF_COUNT1_SLV_R_CLK
++#define PCIE_PERF_COUNT1_SLV_R_CLK__COUNTER1__SHIFT 0x0
++#define PCIE_PERF_COUNT1_SLV_R_CLK__COUNTER1_MASK 0xFFFFFFFFL
++//PCIE_PERF_CNTL_SLV_S_C_CLK
++#define PCIE_PERF_CNTL_SLV_S_C_CLK__EVENT0_SEL__SHIFT 0x0
++#define PCIE_PERF_CNTL_SLV_S_C_CLK__EVENT1_SEL__SHIFT 0x8
++#define PCIE_PERF_CNTL_SLV_S_C_CLK__COUNTER0_UPPER__SHIFT 0x10
++#define PCIE_PERF_CNTL_SLV_S_C_CLK__COUNTER1_UPPER__SHIFT 0x18
++#define PCIE_PERF_CNTL_SLV_S_C_CLK__EVENT0_SEL_MASK 0x000000FFL
++#define PCIE_PERF_CNTL_SLV_S_C_CLK__EVENT1_SEL_MASK 0x0000FF00L
++#define PCIE_PERF_CNTL_SLV_S_C_CLK__COUNTER0_UPPER_MASK 0x00FF0000L
++#define PCIE_PERF_CNTL_SLV_S_C_CLK__COUNTER1_UPPER_MASK 0xFF000000L
++//PCIE_PERF_COUNT0_SLV_S_C_CLK
++#define PCIE_PERF_COUNT0_SLV_S_C_CLK__COUNTER0__SHIFT 0x0
++#define PCIE_PERF_COUNT0_SLV_S_C_CLK__COUNTER0_MASK 0xFFFFFFFFL
++//PCIE_PERF_COUNT1_SLV_S_C_CLK
++#define PCIE_PERF_COUNT1_SLV_S_C_CLK__COUNTER1__SHIFT 0x0
++#define PCIE_PERF_COUNT1_SLV_S_C_CLK__COUNTER1_MASK 0xFFFFFFFFL
++//PCIE_PERF_CNTL_SLV_NS_C_CLK
++#define PCIE_PERF_CNTL_SLV_NS_C_CLK__EVENT0_SEL__SHIFT 0x0
++#define PCIE_PERF_CNTL_SLV_NS_C_CLK__EVENT1_SEL__SHIFT 0x8
++#define PCIE_PERF_CNTL_SLV_NS_C_CLK__COUNTER0_UPPER__SHIFT 0x10
++#define PCIE_PERF_CNTL_SLV_NS_C_CLK__COUNTER1_UPPER__SHIFT 0x18
++#define PCIE_PERF_CNTL_SLV_NS_C_CLK__EVENT0_SEL_MASK 0x000000FFL
++#define PCIE_PERF_CNTL_SLV_NS_C_CLK__EVENT1_SEL_MASK 0x0000FF00L
++#define PCIE_PERF_CNTL_SLV_NS_C_CLK__COUNTER0_UPPER_MASK 0x00FF0000L
++#define PCIE_PERF_CNTL_SLV_NS_C_CLK__COUNTER1_UPPER_MASK 0xFF000000L
++//PCIE_PERF_COUNT0_SLV_NS_C_CLK
++#define PCIE_PERF_COUNT0_SLV_NS_C_CLK__COUNTER0__SHIFT 0x0
++#define PCIE_PERF_COUNT0_SLV_NS_C_CLK__COUNTER0_MASK 0xFFFFFFFFL
++//PCIE_PERF_COUNT1_SLV_NS_C_CLK
++#define PCIE_PERF_COUNT1_SLV_NS_C_CLK__COUNTER1__SHIFT 0x0
++#define PCIE_PERF_COUNT1_SLV_NS_C_CLK__COUNTER1_MASK 0xFFFFFFFFL
++//PCIE_PERF_CNTL_EVENT0_PORT_SEL
++#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_TXCLK__SHIFT 0x0
++#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_MST_R_CLK__SHIFT 0x4
++#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_MST_C_CLK__SHIFT 0x8
++#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_SLV_R_CLK__SHIFT 0xc
++#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_SLV_S_C_CLK__SHIFT 0x10
++#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_SLV_NS_C_CLK__SHIFT 0x14
++#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_TXCLK2__SHIFT 0x18
++#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_TXCLK_MASK 0x0000000FL
++#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_MST_R_CLK_MASK 0x000000F0L
++#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_MST_C_CLK_MASK 0x00000F00L
++#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_SLV_R_CLK_MASK 0x0000F000L
++#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_SLV_S_C_CLK_MASK 0x000F0000L
++#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_SLV_NS_C_CLK_MASK 0x00F00000L
++#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_TXCLK2_MASK 0x0F000000L
++//PCIE_PERF_CNTL_EVENT1_PORT_SEL
++#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_TXCLK__SHIFT 0x0
++#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_MST_R_CLK__SHIFT 0x4
++#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_MST_C_CLK__SHIFT 0x8
++#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_SLV_R_CLK__SHIFT 0xc
++#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_SLV_S_C_CLK__SHIFT 0x10
++#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_SLV_NS_C_CLK__SHIFT 0x14
++#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_TXCLK2__SHIFT 0x18
++#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_TXCLK_MASK 0x0000000FL
++#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_MST_R_CLK_MASK 0x000000F0L
++#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_MST_C_CLK_MASK 0x00000F00L
++#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_SLV_R_CLK_MASK 0x0000F000L
++#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_SLV_S_C_CLK_MASK 0x000F0000L
++#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_SLV_NS_C_CLK_MASK 0x00F00000L
++#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_TXCLK2_MASK 0x0F000000L
++//PCIE_PERF_CNTL_TXCLK2
++#define PCIE_PERF_CNTL_TXCLK2__EVENT0_SEL__SHIFT 0x0
++#define PCIE_PERF_CNTL_TXCLK2__EVENT1_SEL__SHIFT 0x8
++#define PCIE_PERF_CNTL_TXCLK2__COUNTER0_UPPER__SHIFT 0x10
++#define PCIE_PERF_CNTL_TXCLK2__COUNTER1_UPPER__SHIFT 0x18
++#define PCIE_PERF_CNTL_TXCLK2__EVENT0_SEL_MASK 0x000000FFL
++#define PCIE_PERF_CNTL_TXCLK2__EVENT1_SEL_MASK 0x0000FF00L
++#define PCIE_PERF_CNTL_TXCLK2__COUNTER0_UPPER_MASK 0x00FF0000L
++#define PCIE_PERF_CNTL_TXCLK2__COUNTER1_UPPER_MASK 0xFF000000L
++//PCIE_PERF_COUNT0_TXCLK2
++#define PCIE_PERF_COUNT0_TXCLK2__COUNTER0__SHIFT 0x0
++#define PCIE_PERF_COUNT0_TXCLK2__COUNTER0_MASK 0xFFFFFFFFL
++//PCIE_PERF_COUNT1_TXCLK2
++#define PCIE_PERF_COUNT1_TXCLK2__COUNTER1__SHIFT 0x0
++#define PCIE_PERF_COUNT1_TXCLK2__COUNTER1_MASK 0xFFFFFFFFL
++//PCIE_HIP_REG0
++#define PCIE_HIP_REG0__CI_HIP_APT0_BASE_HI__SHIFT 0x0
++#define PCIE_HIP_REG0__CI_HIP_APT0_ENABLE__SHIFT 0x18
++#define PCIE_HIP_REG0__CI_HIP_APT0_PASID_MODE__SHIFT 0x19
++#define PCIE_HIP_REG0__CI_HIP_APT0_REQAT_MODE__SHIFT 0x1a
++#define PCIE_HIP_REG0__CI_HIP_APT0_REQIO_MODE__SHIFT 0x1d
++#define PCIE_HIP_REG0__CI_HIP_APT0_BASE_HI_MASK 0x000FFFFFL
++#define PCIE_HIP_REG0__CI_HIP_APT0_ENABLE_MASK 0x01000000L
++#define PCIE_HIP_REG0__CI_HIP_APT0_PASID_MODE_MASK 0x02000000L
++#define PCIE_HIP_REG0__CI_HIP_APT0_REQAT_MODE_MASK 0x1C000000L
++#define PCIE_HIP_REG0__CI_HIP_APT0_REQIO_MODE_MASK 0x60000000L
++//PCIE_HIP_REG1
++#define PCIE_HIP_REG1__CI_HIP_APT0_BASE_LO__SHIFT 0x0
++#define PCIE_HIP_REG1__CI_HIP_APT0_BASE_LO_MASK 0xFFFFFFFFL
++//PCIE_HIP_REG2
++#define PCIE_HIP_REG2__CI_HIP_APT0_LIMIT_HI__SHIFT 0x0
++#define PCIE_HIP_REG2__CI_HIP_APT0_LIMIT_HI_MASK 0x000FFFFFL
++//PCIE_HIP_REG3
++#define PCIE_HIP_REG3__CI_HIP_APT0_LIMIT_LO__SHIFT 0x0
++#define PCIE_HIP_REG3__CI_HIP_APT0_LIMIT_LO_MASK 0xFFFFFFFFL
++//PCIE_HIP_REG4
++#define PCIE_HIP_REG4__CI_HIP_APT1_BASE_HI__SHIFT 0x0
++#define PCIE_HIP_REG4__CI_HIP_APT1_ENABLE__SHIFT 0x18
++#define PCIE_HIP_REG4__CI_HIP_APT1_PASID_MODE__SHIFT 0x19
++#define PCIE_HIP_REG4__CI_HIP_APT1_REQAT_MODE__SHIFT 0x1a
++#define PCIE_HIP_REG4__CI_HIP_APT1_REQIO_MODE__SHIFT 0x1d
++#define PCIE_HIP_REG4__CI_HIP_APT1_BASE_HI_MASK 0x000FFFFFL
++#define PCIE_HIP_REG4__CI_HIP_APT1_ENABLE_MASK 0x01000000L
++#define PCIE_HIP_REG4__CI_HIP_APT1_PASID_MODE_MASK 0x02000000L
++#define PCIE_HIP_REG4__CI_HIP_APT1_REQAT_MODE_MASK 0x1C000000L
++#define PCIE_HIP_REG4__CI_HIP_APT1_REQIO_MODE_MASK 0x60000000L
++//PCIE_HIP_REG5
++#define PCIE_HIP_REG5__CI_HIP_APT1_BASE_LO__SHIFT 0x0
++#define PCIE_HIP_REG5__CI_HIP_APT1_BASE_LO_MASK 0xFFFFFFFFL
++//PCIE_HIP_REG6
++#define PCIE_HIP_REG6__CI_HIP_APT1_LIMIT_HI__SHIFT 0x0
++#define PCIE_HIP_REG6__CI_HIP_APT1_LIMIT_HI_MASK 0x000FFFFFL
++//PCIE_HIP_REG7
++#define PCIE_HIP_REG7__CI_HIP_APT1_LIMIT_LO__SHIFT 0x0
++#define PCIE_HIP_REG7__CI_HIP_APT1_LIMIT_LO_MASK 0xFFFFFFFFL
++//PCIE_HIP_REG8
++#define PCIE_HIP_REG8__CI_HIP_MASK__SHIFT 0x0
++#define PCIE_HIP_REG8__CI_HIP_MASK_MASK 0x000FFFFFL
++//PCIE_PRBS_CLR
++#define PCIE_PRBS_CLR__PRBS_CLR__SHIFT 0x0
++#define PCIE_PRBS_CLR__PRBS_POLARITY_EN__SHIFT 0x18
++#define PCIE_PRBS_CLR__PRBS_CLR_MASK 0x0000FFFFL
++#define PCIE_PRBS_CLR__PRBS_POLARITY_EN_MASK 0x01000000L
++//PCIE_PRBS_STATUS1
++#define PCIE_PRBS_STATUS1__PRBS_ERRSTAT__SHIFT 0x0
++#define PCIE_PRBS_STATUS1__PRBS_LOCKED__SHIFT 0x10
++#define PCIE_PRBS_STATUS1__PRBS_ERRSTAT_MASK 0x0000FFFFL
++#define PCIE_PRBS_STATUS1__PRBS_LOCKED_MASK 0xFFFF0000L
++//PCIE_PRBS_STATUS2
++#define PCIE_PRBS_STATUS2__PRBS_BITCNT_DONE__SHIFT 0x0
++#define PCIE_PRBS_STATUS2__PRBS_BITCNT_DONE_MASK 0x0000FFFFL
++//PCIE_PRBS_FREERUN
++#define PCIE_PRBS_FREERUN__PRBS_FREERUN__SHIFT 0x0
++#define PCIE_PRBS_FREERUN__PRBS_FREERUN_MASK 0x0000FFFFL
++//PCIE_PRBS_MISC
++#define PCIE_PRBS_MISC__PRBS_EN__SHIFT 0x0
++#define PCIE_PRBS_MISC__PRBS_TEST_MODE__SHIFT 0x1
++#define PCIE_PRBS_MISC__PRBS_USER_PATTERN_TOGGLE__SHIFT 0x4
++#define PCIE_PRBS_MISC__PRBS_8BIT_SEL__SHIFT 0x5
++#define PCIE_PRBS_MISC__PRBS_COMMA_NUM__SHIFT 0x6
++#define PCIE_PRBS_MISC__PRBS_LOCK_CNT__SHIFT 0x8
++#define PCIE_PRBS_MISC__PRBS_DATA_RATE__SHIFT 0xe
++#define PCIE_PRBS_MISC__PRBS_CHK_ERR_MASK__SHIFT 0x10
++#define PCIE_PRBS_MISC__PRBS_EN_MASK 0x00000001L
++#define PCIE_PRBS_MISC__PRBS_TEST_MODE_MASK 0x0000000EL
++#define PCIE_PRBS_MISC__PRBS_USER_PATTERN_TOGGLE_MASK 0x00000010L
++#define PCIE_PRBS_MISC__PRBS_8BIT_SEL_MASK 0x00000020L
++#define PCIE_PRBS_MISC__PRBS_COMMA_NUM_MASK 0x000000C0L
++#define PCIE_PRBS_MISC__PRBS_LOCK_CNT_MASK 0x00001F00L
++#define PCIE_PRBS_MISC__PRBS_DATA_RATE_MASK 0x0000C000L
++#define PCIE_PRBS_MISC__PRBS_CHK_ERR_MASK_MASK 0xFFFF0000L
++//PCIE_PRBS_USER_PATTERN
++#define PCIE_PRBS_USER_PATTERN__PRBS_USER_PATTERN__SHIFT 0x0
++#define PCIE_PRBS_USER_PATTERN__PRBS_USER_PATTERN_MASK 0x3FFFFFFFL
++//PCIE_PRBS_LO_BITCNT
++#define PCIE_PRBS_LO_BITCNT__PRBS_LO_BITCNT__SHIFT 0x0
++#define PCIE_PRBS_LO_BITCNT__PRBS_LO_BITCNT_MASK 0xFFFFFFFFL
++//PCIE_PRBS_HI_BITCNT
++#define PCIE_PRBS_HI_BITCNT__PRBS_HI_BITCNT__SHIFT 0x0
++#define PCIE_PRBS_HI_BITCNT__PRBS_HI_BITCNT_MASK 0x000000FFL
++//PCIE_PRBS_ERRCNT_0
++#define PCIE_PRBS_ERRCNT_0__PRBS_ERRCNT_0__SHIFT 0x0
++#define PCIE_PRBS_ERRCNT_0__PRBS_ERRCNT_0_MASK 0xFFFFFFFFL
++//PCIE_PRBS_ERRCNT_1
++#define PCIE_PRBS_ERRCNT_1__PRBS_ERRCNT_1__SHIFT 0x0
++#define PCIE_PRBS_ERRCNT_1__PRBS_ERRCNT_1_MASK 0xFFFFFFFFL
++//PCIE_PRBS_ERRCNT_2
++#define PCIE_PRBS_ERRCNT_2__PRBS_ERRCNT_2__SHIFT 0x0
++#define PCIE_PRBS_ERRCNT_2__PRBS_ERRCNT_2_MASK 0xFFFFFFFFL
++//PCIE_PRBS_ERRCNT_3
++#define PCIE_PRBS_ERRCNT_3__PRBS_ERRCNT_3__SHIFT 0x0
++#define PCIE_PRBS_ERRCNT_3__PRBS_ERRCNT_3_MASK 0xFFFFFFFFL
++//PCIE_PRBS_ERRCNT_4
++#define PCIE_PRBS_ERRCNT_4__PRBS_ERRCNT_4__SHIFT 0x0
++#define PCIE_PRBS_ERRCNT_4__PRBS_ERRCNT_4_MASK 0xFFFFFFFFL
++//PCIE_PRBS_ERRCNT_5
++#define PCIE_PRBS_ERRCNT_5__PRBS_ERRCNT_5__SHIFT 0x0
++#define PCIE_PRBS_ERRCNT_5__PRBS_ERRCNT_5_MASK 0xFFFFFFFFL
++//PCIE_PRBS_ERRCNT_6
++#define PCIE_PRBS_ERRCNT_6__PRBS_ERRCNT_6__SHIFT 0x0
++#define PCIE_PRBS_ERRCNT_6__PRBS_ERRCNT_6_MASK 0xFFFFFFFFL
++//PCIE_PRBS_ERRCNT_7
++#define PCIE_PRBS_ERRCNT_7__PRBS_ERRCNT_7__SHIFT 0x0
++#define PCIE_PRBS_ERRCNT_7__PRBS_ERRCNT_7_MASK 0xFFFFFFFFL
++//PCIE_PRBS_ERRCNT_8
++#define PCIE_PRBS_ERRCNT_8__PRBS_ERRCNT_8__SHIFT 0x0
++#define PCIE_PRBS_ERRCNT_8__PRBS_ERRCNT_8_MASK 0xFFFFFFFFL
++//PCIE_PRBS_ERRCNT_9
++#define PCIE_PRBS_ERRCNT_9__PRBS_ERRCNT_9__SHIFT 0x0
++#define PCIE_PRBS_ERRCNT_9__PRBS_ERRCNT_9_MASK 0xFFFFFFFFL
++//PCIE_PRBS_ERRCNT_10
++#define PCIE_PRBS_ERRCNT_10__PRBS_ERRCNT_10__SHIFT 0x0
++#define PCIE_PRBS_ERRCNT_10__PRBS_ERRCNT_10_MASK 0xFFFFFFFFL
++//PCIE_PRBS_ERRCNT_11
++#define PCIE_PRBS_ERRCNT_11__PRBS_ERRCNT_11__SHIFT 0x0
++#define PCIE_PRBS_ERRCNT_11__PRBS_ERRCNT_11_MASK 0xFFFFFFFFL
++//PCIE_PRBS_ERRCNT_12
++#define PCIE_PRBS_ERRCNT_12__PRBS_ERRCNT_12__SHIFT 0x0
++#define PCIE_PRBS_ERRCNT_12__PRBS_ERRCNT_12_MASK 0xFFFFFFFFL
++//PCIE_PRBS_ERRCNT_13
++#define PCIE_PRBS_ERRCNT_13__PRBS_ERRCNT_13__SHIFT 0x0
++#define PCIE_PRBS_ERRCNT_13__PRBS_ERRCNT_13_MASK 0xFFFFFFFFL
++//PCIE_PRBS_ERRCNT_14
++#define PCIE_PRBS_ERRCNT_14__PRBS_ERRCNT_14__SHIFT 0x0
++#define PCIE_PRBS_ERRCNT_14__PRBS_ERRCNT_14_MASK 0xFFFFFFFFL
++//PCIE_PRBS_ERRCNT_15
++#define PCIE_PRBS_ERRCNT_15__PRBS_ERRCNT_15__SHIFT 0x0
++#define PCIE_PRBS_ERRCNT_15__PRBS_ERRCNT_15_MASK 0xFFFFFFFFL
++//SWRST_COMMAND_STATUS
++#define SWRST_COMMAND_STATUS__RECONFIGURE__SHIFT 0x0
++#define SWRST_COMMAND_STATUS__ATOMIC_RESET__SHIFT 0x1
++#define SWRST_COMMAND_STATUS__RESET_COMPLETE__SHIFT 0x10
++#define SWRST_COMMAND_STATUS__WAIT_STATE__SHIFT 0x11
++#define SWRST_COMMAND_STATUS__SWUS_LINK_RESET__SHIFT 0x18
++#define SWRST_COMMAND_STATUS__SWUS_LINK_RESET_CFG_ONLY__SHIFT 0x19
++#define SWRST_COMMAND_STATUS__SWUS_LINK_RESET_PHY_CALIB__SHIFT 0x1a
++#define SWRST_COMMAND_STATUS__SWDS_LINK_RESET__SHIFT 0x1b
++#define SWRST_COMMAND_STATUS__SWDS_LINK_RESET_CFG_ONLY__SHIFT 0x1c
++#define SWRST_COMMAND_STATUS__LINK_RESET_TYPE_HOT_RESET__SHIFT 0x1d
++#define SWRST_COMMAND_STATUS__LINK_RESET_TYPE_LINK_DISABLE__SHIFT 0x1e
++#define SWRST_COMMAND_STATUS__LINK_RESET_TYPE_LINK_DOWN__SHIFT 0x1f
++#define SWRST_COMMAND_STATUS__RECONFIGURE_MASK 0x00000001L
++#define SWRST_COMMAND_STATUS__ATOMIC_RESET_MASK 0x00000002L
++#define SWRST_COMMAND_STATUS__RESET_COMPLETE_MASK 0x00010000L
++#define SWRST_COMMAND_STATUS__WAIT_STATE_MASK 0x00020000L
++#define SWRST_COMMAND_STATUS__SWUS_LINK_RESET_MASK 0x01000000L
++#define SWRST_COMMAND_STATUS__SWUS_LINK_RESET_CFG_ONLY_MASK 0x02000000L
++#define SWRST_COMMAND_STATUS__SWUS_LINK_RESET_PHY_CALIB_MASK 0x04000000L
++#define SWRST_COMMAND_STATUS__SWDS_LINK_RESET_MASK 0x08000000L
++#define SWRST_COMMAND_STATUS__SWDS_LINK_RESET_CFG_ONLY_MASK 0x10000000L
++#define SWRST_COMMAND_STATUS__LINK_RESET_TYPE_HOT_RESET_MASK 0x20000000L
++#define SWRST_COMMAND_STATUS__LINK_RESET_TYPE_LINK_DISABLE_MASK 0x40000000L
++#define SWRST_COMMAND_STATUS__LINK_RESET_TYPE_LINK_DOWN_MASK 0x80000000L
++//SWRST_GENERAL_CONTROL
++#define SWRST_GENERAL_CONTROL__RECONFIGURE_EN__SHIFT 0x0
++#define SWRST_GENERAL_CONTROL__ATOMIC_RESET_EN__SHIFT 0x1
++#define SWRST_GENERAL_CONTROL__RESET_PERIOD__SHIFT 0x2
++#define SWRST_GENERAL_CONTROL__WAIT_LINKUP__SHIFT 0x8
++#define SWRST_GENERAL_CONTROL__FORCE_REGIDLE__SHIFT 0x9
++#define SWRST_GENERAL_CONTROL__BLOCK_ON_IDLE__SHIFT 0xa
++#define SWRST_GENERAL_CONTROL__CONFIG_XFER_MODE__SHIFT 0xc
++#define SWRST_GENERAL_CONTROL__MP1_PCIE_CROSSFIRE_LOCKDOWN_EN__SHIFT 0x18
++#define SWRST_GENERAL_CONTROL__IGNORE_SDP_RESET__SHIFT 0x19
++#define SWRST_GENERAL_CONTROL__RECONFIGURE_EN_MASK 0x00000001L
++#define SWRST_GENERAL_CONTROL__ATOMIC_RESET_EN_MASK 0x00000002L
++#define SWRST_GENERAL_CONTROL__RESET_PERIOD_MASK 0x0000001CL
++#define SWRST_GENERAL_CONTROL__WAIT_LINKUP_MASK 0x00000100L
++#define SWRST_GENERAL_CONTROL__FORCE_REGIDLE_MASK 0x00000200L
++#define SWRST_GENERAL_CONTROL__BLOCK_ON_IDLE_MASK 0x00000400L
++#define SWRST_GENERAL_CONTROL__CONFIG_XFER_MODE_MASK 0x00001000L
++#define SWRST_GENERAL_CONTROL__MP1_PCIE_CROSSFIRE_LOCKDOWN_EN_MASK 0x01000000L
++#define SWRST_GENERAL_CONTROL__IGNORE_SDP_RESET_MASK 0x02000000L
++//SWRST_COMMAND_0
++#define SWRST_COMMAND_0__PORT0_COR_RESET__SHIFT 0x0
++#define SWRST_COMMAND_0__PORT0_CFG_RESET__SHIFT 0x8
++#define SWRST_COMMAND_0__PORT1_CFG_RESET__SHIFT 0x9
++#define SWRST_COMMAND_0__PORT2_CFG_RESET__SHIFT 0xa
++#define SWRST_COMMAND_0__PORT3_CFG_RESET__SHIFT 0xb
++#define SWRST_COMMAND_0__PORT4_CFG_RESET__SHIFT 0xc
++#define SWRST_COMMAND_0__PORT5_CFG_RESET__SHIFT 0xd
++#define SWRST_COMMAND_0__PORT6_CFG_RESET__SHIFT 0xe
++#define SWRST_COMMAND_0__PORT7_CFG_RESET__SHIFT 0xf
++#define SWRST_COMMAND_0__BIF0_GLOBAL_RESET__SHIFT 0x18
++#define SWRST_COMMAND_0__BIF0_CALIB_RESET__SHIFT 0x19
++#define SWRST_COMMAND_0__BIF0_CORE_RESET__SHIFT 0x1a
++#define SWRST_COMMAND_0__BIF0_REGISTER_RESET__SHIFT 0x1b
++#define SWRST_COMMAND_0__BIF0_PHY_RESET__SHIFT 0x1c
++#define SWRST_COMMAND_0__BIF0_STICKY_RESET__SHIFT 0x1d
++#define SWRST_COMMAND_0__BIF0_CONFIG_RESET__SHIFT 0x1e
++#define SWRST_COMMAND_0__PORT0_COR_RESET_MASK 0x00000001L
++#define SWRST_COMMAND_0__PORT0_CFG_RESET_MASK 0x00000100L
++#define SWRST_COMMAND_0__PORT1_CFG_RESET_MASK 0x00000200L
++#define SWRST_COMMAND_0__PORT2_CFG_RESET_MASK 0x00000400L
++#define SWRST_COMMAND_0__PORT3_CFG_RESET_MASK 0x00000800L
++#define SWRST_COMMAND_0__PORT4_CFG_RESET_MASK 0x00001000L
++#define SWRST_COMMAND_0__PORT5_CFG_RESET_MASK 0x00002000L
++#define SWRST_COMMAND_0__PORT6_CFG_RESET_MASK 0x00004000L
++#define SWRST_COMMAND_0__PORT7_CFG_RESET_MASK 0x00008000L
++#define SWRST_COMMAND_0__BIF0_GLOBAL_RESET_MASK 0x01000000L
++#define SWRST_COMMAND_0__BIF0_CALIB_RESET_MASK 0x02000000L
++#define SWRST_COMMAND_0__BIF0_CORE_RESET_MASK 0x04000000L
++#define SWRST_COMMAND_0__BIF0_REGISTER_RESET_MASK 0x08000000L
++#define SWRST_COMMAND_0__BIF0_PHY_RESET_MASK 0x10000000L
++#define SWRST_COMMAND_0__BIF0_STICKY_RESET_MASK 0x20000000L
++#define SWRST_COMMAND_0__BIF0_CONFIG_RESET_MASK 0x40000000L
++//SWRST_COMMAND_1
++#define SWRST_COMMAND_1__SWITCHCLK__SHIFT 0x15
++#define SWRST_COMMAND_1__RESETAXIMST__SHIFT 0x16
++#define SWRST_COMMAND_1__RESETAXISLV__SHIFT 0x17
++#define SWRST_COMMAND_1__RESETAXIINT__SHIFT 0x18
++#define SWRST_COMMAND_1__RESETPCFG__SHIFT 0x19
++#define SWRST_COMMAND_1__RESETLNCT__SHIFT 0x1a
++#define SWRST_COMMAND_1__RESETMNTR__SHIFT 0x1b
++#define SWRST_COMMAND_1__RESETHLTR__SHIFT 0x1c
++#define SWRST_COMMAND_1__RESETCPM__SHIFT 0x1d
++#define SWRST_COMMAND_1__RESETPHY0__SHIFT 0x1e
++#define SWRST_COMMAND_1__SWITCHCLK_MASK 0x00200000L
++#define SWRST_COMMAND_1__RESETAXIMST_MASK 0x00400000L
++#define SWRST_COMMAND_1__RESETAXISLV_MASK 0x00800000L
++#define SWRST_COMMAND_1__RESETAXIINT_MASK 0x01000000L
++#define SWRST_COMMAND_1__RESETPCFG_MASK 0x02000000L
++#define SWRST_COMMAND_1__RESETLNCT_MASK 0x04000000L
++#define SWRST_COMMAND_1__RESETMNTR_MASK 0x08000000L
++#define SWRST_COMMAND_1__RESETHLTR_MASK 0x10000000L
++#define SWRST_COMMAND_1__RESETCPM_MASK 0x20000000L
++#define SWRST_COMMAND_1__RESETPHY0_MASK 0x40000000L
++//SWRST_CONTROL_0
++#define SWRST_CONTROL_0__PORT0_COR_RCEN__SHIFT 0x0
++#define SWRST_CONTROL_0__PORT0_CFG_RCEN__SHIFT 0x8
++#define SWRST_CONTROL_0__PORT1_CFG_RCEN__SHIFT 0x9
++#define SWRST_CONTROL_0__PORT2_CFG_RCEN__SHIFT 0xa
++#define SWRST_CONTROL_0__PORT3_CFG_RCEN__SHIFT 0xb
++#define SWRST_CONTROL_0__PORT4_CFG_RCEN__SHIFT 0xc
++#define SWRST_CONTROL_0__PORT5_CFG_RCEN__SHIFT 0xd
++#define SWRST_CONTROL_0__PORT6_CFG_RCEN__SHIFT 0xe
++#define SWRST_CONTROL_0__PORT7_CFG_RCEN__SHIFT 0xf
++#define SWRST_CONTROL_0__BIF0_GLOBAL_RESETRCEN__SHIFT 0x18
++#define SWRST_CONTROL_0__BIF0_CALIB_RESETRCEN__SHIFT 0x19
++#define SWRST_CONTROL_0__BIF0_CORE_RESETRCEN__SHIFT 0x1a
++#define SWRST_CONTROL_0__BIF0_REGISTER_RESETRCEN__SHIFT 0x1b
++#define SWRST_CONTROL_0__BIF0_PHY_RESETRCEN__SHIFT 0x1c
++#define SWRST_CONTROL_0__BIF0_STICKY_RESETRCEN__SHIFT 0x1d
++#define SWRST_CONTROL_0__BIF0_CONFIG_RESETRCEN__SHIFT 0x1e
++#define SWRST_CONTROL_0__PORT0_COR_RCEN_MASK 0x00000001L
++#define SWRST_CONTROL_0__PORT0_CFG_RCEN_MASK 0x00000100L
++#define SWRST_CONTROL_0__PORT1_CFG_RCEN_MASK 0x00000200L
++#define SWRST_CONTROL_0__PORT2_CFG_RCEN_MASK 0x00000400L
++#define SWRST_CONTROL_0__PORT3_CFG_RCEN_MASK 0x00000800L
++#define SWRST_CONTROL_0__PORT4_CFG_RCEN_MASK 0x00001000L
++#define SWRST_CONTROL_0__PORT5_CFG_RCEN_MASK 0x00002000L
++#define SWRST_CONTROL_0__PORT6_CFG_RCEN_MASK 0x00004000L
++#define SWRST_CONTROL_0__PORT7_CFG_RCEN_MASK 0x00008000L
++#define SWRST_CONTROL_0__BIF0_GLOBAL_RESETRCEN_MASK 0x01000000L
++#define SWRST_CONTROL_0__BIF0_CALIB_RESETRCEN_MASK 0x02000000L
++#define SWRST_CONTROL_0__BIF0_CORE_RESETRCEN_MASK 0x04000000L
++#define SWRST_CONTROL_0__BIF0_REGISTER_RESETRCEN_MASK 0x08000000L
++#define SWRST_CONTROL_0__BIF0_PHY_RESETRCEN_MASK 0x10000000L
++#define SWRST_CONTROL_0__BIF0_STICKY_RESETRCEN_MASK 0x20000000L
++#define SWRST_CONTROL_0__BIF0_CONFIG_RESETRCEN_MASK 0x40000000L
++//SWRST_CONTROL_1
++#define SWRST_CONTROL_1__SWITCHCLK_RCEN__SHIFT 0x15
++#define SWRST_CONTROL_1__RESETAXIMST_RCEN__SHIFT 0x16
++#define SWRST_CONTROL_1__RESETAXISLV_RCEN__SHIFT 0x17
++#define SWRST_CONTROL_1__RESETAXIINT_RCEN__SHIFT 0x18
++#define SWRST_CONTROL_1__RESETPCFG_RCEN__SHIFT 0x19
++#define SWRST_CONTROL_1__RESETLNCT_RCEN__SHIFT 0x1a
++#define SWRST_CONTROL_1__RESETMNTR_RCEN__SHIFT 0x1b
++#define SWRST_CONTROL_1__RESETHLTR_RCEN__SHIFT 0x1c
++#define SWRST_CONTROL_1__RESETCPM_RCEN__SHIFT 0x1d
++#define SWRST_CONTROL_1__RESETPHY0_RCEN__SHIFT 0x1e
++#define SWRST_CONTROL_1__SWITCHCLK_RCEN_MASK 0x00200000L
++#define SWRST_CONTROL_1__RESETAXIMST_RCEN_MASK 0x00400000L
++#define SWRST_CONTROL_1__RESETAXISLV_RCEN_MASK 0x00800000L
++#define SWRST_CONTROL_1__RESETAXIINT_RCEN_MASK 0x01000000L
++#define SWRST_CONTROL_1__RESETPCFG_RCEN_MASK 0x02000000L
++#define SWRST_CONTROL_1__RESETLNCT_RCEN_MASK 0x04000000L
++#define SWRST_CONTROL_1__RESETMNTR_RCEN_MASK 0x08000000L
++#define SWRST_CONTROL_1__RESETHLTR_RCEN_MASK 0x10000000L
++#define SWRST_CONTROL_1__RESETCPM_RCEN_MASK 0x20000000L
++#define SWRST_CONTROL_1__RESETPHY0_RCEN_MASK 0x40000000L
++//SWRST_CONTROL_2
++#define SWRST_CONTROL_2__PORT0_COR_ATEN__SHIFT 0x0
++#define SWRST_CONTROL_2__PORT0_CFG_ATEN__SHIFT 0x8
++#define SWRST_CONTROL_2__PORT1_CFG_ATEN__SHIFT 0x9
++#define SWRST_CONTROL_2__PORT2_CFG_ATEN__SHIFT 0xa
++#define SWRST_CONTROL_2__PORT3_CFG_ATEN__SHIFT 0xb
++#define SWRST_CONTROL_2__PORT4_CFG_ATEN__SHIFT 0xc
++#define SWRST_CONTROL_2__PORT5_CFG_ATEN__SHIFT 0xd
++#define SWRST_CONTROL_2__PORT6_CFG_ATEN__SHIFT 0xe
++#define SWRST_CONTROL_2__PORT7_CFG_ATEN__SHIFT 0xf
++#define SWRST_CONTROL_2__BIF0_GLOBAL_RESETATEN__SHIFT 0x18
++#define SWRST_CONTROL_2__BIF0_CALIB_RESETATEN__SHIFT 0x19
++#define SWRST_CONTROL_2__BIF0_CORE_RESETATEN__SHIFT 0x1a
++#define SWRST_CONTROL_2__BIF0_REGISTER_RESETATEN__SHIFT 0x1b
++#define SWRST_CONTROL_2__BIF0_PHY_RESETATEN__SHIFT 0x1c
++#define SWRST_CONTROL_2__BIF0_STICKY_RESETATEN__SHIFT 0x1d
++#define SWRST_CONTROL_2__BIF0_CONFIG_RESETATEN__SHIFT 0x1e
++#define SWRST_CONTROL_2__PORT0_COR_ATEN_MASK 0x00000001L
++#define SWRST_CONTROL_2__PORT0_CFG_ATEN_MASK 0x00000100L
++#define SWRST_CONTROL_2__PORT1_CFG_ATEN_MASK 0x00000200L
++#define SWRST_CONTROL_2__PORT2_CFG_ATEN_MASK 0x00000400L
++#define SWRST_CONTROL_2__PORT3_CFG_ATEN_MASK 0x00000800L
++#define SWRST_CONTROL_2__PORT4_CFG_ATEN_MASK 0x00001000L
++#define SWRST_CONTROL_2__PORT5_CFG_ATEN_MASK 0x00002000L
++#define SWRST_CONTROL_2__PORT6_CFG_ATEN_MASK 0x00004000L
++#define SWRST_CONTROL_2__PORT7_CFG_ATEN_MASK 0x00008000L
++#define SWRST_CONTROL_2__BIF0_GLOBAL_RESETATEN_MASK 0x01000000L
++#define SWRST_CONTROL_2__BIF0_CALIB_RESETATEN_MASK 0x02000000L
++#define SWRST_CONTROL_2__BIF0_CORE_RESETATEN_MASK 0x04000000L
++#define SWRST_CONTROL_2__BIF0_REGISTER_RESETATEN_MASK 0x08000000L
++#define SWRST_CONTROL_2__BIF0_PHY_RESETATEN_MASK 0x10000000L
++#define SWRST_CONTROL_2__BIF0_STICKY_RESETATEN_MASK 0x20000000L
++#define SWRST_CONTROL_2__BIF0_CONFIG_RESETATEN_MASK 0x40000000L
++//SWRST_CONTROL_3
++#define SWRST_CONTROL_3__SWITCHCLK_ATEN__SHIFT 0x15
++#define SWRST_CONTROL_3__RESETAXIMST_ATEN__SHIFT 0x16
++#define SWRST_CONTROL_3__RESETAXISLV_ATEN__SHIFT 0x17
++#define SWRST_CONTROL_3__RESETAXIINT_ATEN__SHIFT 0x18
++#define SWRST_CONTROL_3__RESETPCFG_ATEN__SHIFT 0x19
++#define SWRST_CONTROL_3__RESETLNCT_ATEN__SHIFT 0x1a
++#define SWRST_CONTROL_3__RESETMNTR_ATEN__SHIFT 0x1b
++#define SWRST_CONTROL_3__RESETHLTR_ATEN__SHIFT 0x1c
++#define SWRST_CONTROL_3__RESETCPM_ATEN__SHIFT 0x1d
++#define SWRST_CONTROL_3__RESETPHY0_ATEN__SHIFT 0x1e
++#define SWRST_CONTROL_3__SWITCHCLK_ATEN_MASK 0x00200000L
++#define SWRST_CONTROL_3__RESETAXIMST_ATEN_MASK 0x00400000L
++#define SWRST_CONTROL_3__RESETAXISLV_ATEN_MASK 0x00800000L
++#define SWRST_CONTROL_3__RESETAXIINT_ATEN_MASK 0x01000000L
++#define SWRST_CONTROL_3__RESETPCFG_ATEN_MASK 0x02000000L
++#define SWRST_CONTROL_3__RESETLNCT_ATEN_MASK 0x04000000L
++#define SWRST_CONTROL_3__RESETMNTR_ATEN_MASK 0x08000000L
++#define SWRST_CONTROL_3__RESETHLTR_ATEN_MASK 0x10000000L
++#define SWRST_CONTROL_3__RESETCPM_ATEN_MASK 0x20000000L
++#define SWRST_CONTROL_3__RESETPHY0_ATEN_MASK 0x40000000L
++//SWRST_CONTROL_4
++#define SWRST_CONTROL_4__PORT0_COR_WREN__SHIFT 0x0
++#define SWRST_CONTROL_4__PORT0_CFG_WREN__SHIFT 0x8
++#define SWRST_CONTROL_4__PORT1_CFG_WREN__SHIFT 0x9
++#define SWRST_CONTROL_4__PORT2_CFG_WREN__SHIFT 0xa
++#define SWRST_CONTROL_4__PORT3_CFG_WREN__SHIFT 0xb
++#define SWRST_CONTROL_4__PORT4_CFG_WREN__SHIFT 0xc
++#define SWRST_CONTROL_4__PORT5_CFG_WREN__SHIFT 0xd
++#define SWRST_CONTROL_4__PORT6_CFG_WREN__SHIFT 0xe
++#define SWRST_CONTROL_4__PORT7_CFG_WREN__SHIFT 0xf
++#define SWRST_CONTROL_4__BIF0_GLOBAL_WRRESETEN__SHIFT 0x18
++#define SWRST_CONTROL_4__BIF0_CALIB_WRRESETEN__SHIFT 0x19
++#define SWRST_CONTROL_4__BIF0_CORE_WRRESETEN__SHIFT 0x1a
++#define SWRST_CONTROL_4__BIF0_REGISTER_WRRESETEN__SHIFT 0x1b
++#define SWRST_CONTROL_4__BIF0_PHY_WRRESETEN__SHIFT 0x1c
++#define SWRST_CONTROL_4__BIF0_STICKY_WRRESETEN__SHIFT 0x1d
++#define SWRST_CONTROL_4__BIF0_CONFIG_WRRESETEN__SHIFT 0x1e
++#define SWRST_CONTROL_4__PORT0_COR_WREN_MASK 0x00000001L
++#define SWRST_CONTROL_4__PORT0_CFG_WREN_MASK 0x00000100L
++#define SWRST_CONTROL_4__PORT1_CFG_WREN_MASK 0x00000200L
++#define SWRST_CONTROL_4__PORT2_CFG_WREN_MASK 0x00000400L
++#define SWRST_CONTROL_4__PORT3_CFG_WREN_MASK 0x00000800L
++#define SWRST_CONTROL_4__PORT4_CFG_WREN_MASK 0x00001000L
++#define SWRST_CONTROL_4__PORT5_CFG_WREN_MASK 0x00002000L
++#define SWRST_CONTROL_4__PORT6_CFG_WREN_MASK 0x00004000L
++#define SWRST_CONTROL_4__PORT7_CFG_WREN_MASK 0x00008000L
++#define SWRST_CONTROL_4__BIF0_GLOBAL_WRRESETEN_MASK 0x01000000L
++#define SWRST_CONTROL_4__BIF0_CALIB_WRRESETEN_MASK 0x02000000L
++#define SWRST_CONTROL_4__BIF0_CORE_WRRESETEN_MASK 0x04000000L
++#define SWRST_CONTROL_4__BIF0_REGISTER_WRRESETEN_MASK 0x08000000L
++#define SWRST_CONTROL_4__BIF0_PHY_WRRESETEN_MASK 0x10000000L
++#define SWRST_CONTROL_4__BIF0_STICKY_WRRESETEN_MASK 0x20000000L
++#define SWRST_CONTROL_4__BIF0_CONFIG_WRRESETEN_MASK 0x40000000L
++//SWRST_CONTROL_5
++#define SWRST_CONTROL_5__WRSWITCHCLK_EN__SHIFT 0x15
++#define SWRST_CONTROL_5__WRRESETAXIMST_EN__SHIFT 0x16
++#define SWRST_CONTROL_5__WRRESETAXISLV_EN__SHIFT 0x17
++#define SWRST_CONTROL_5__WRRESETAXIINT_EN__SHIFT 0x18
++#define SWRST_CONTROL_5__WRRESETPCFG_EN__SHIFT 0x19
++#define SWRST_CONTROL_5__WRRESETLNCT_EN__SHIFT 0x1a
++#define SWRST_CONTROL_5__WRRESETMNTR_EN__SHIFT 0x1b
++#define SWRST_CONTROL_5__WRRESETHLTR_EN__SHIFT 0x1c
++#define SWRST_CONTROL_5__WRRESETCPM_EN__SHIFT 0x1d
++#define SWRST_CONTROL_5__WRRESETPHY0_EN__SHIFT 0x1e
++#define SWRST_CONTROL_5__WRSWITCHCLK_EN_MASK 0x00200000L
++#define SWRST_CONTROL_5__WRRESETAXIMST_EN_MASK 0x00400000L
++#define SWRST_CONTROL_5__WRRESETAXISLV_EN_MASK 0x00800000L
++#define SWRST_CONTROL_5__WRRESETAXIINT_EN_MASK 0x01000000L
++#define SWRST_CONTROL_5__WRRESETPCFG_EN_MASK 0x02000000L
++#define SWRST_CONTROL_5__WRRESETLNCT_EN_MASK 0x04000000L
++#define SWRST_CONTROL_5__WRRESETMNTR_EN_MASK 0x08000000L
++#define SWRST_CONTROL_5__WRRESETHLTR_EN_MASK 0x10000000L
++#define SWRST_CONTROL_5__WRRESETCPM_EN_MASK 0x20000000L
++#define SWRST_CONTROL_5__WRRESETPHY0_EN_MASK 0x40000000L
++//SWRST_CONTROL_6
++#define SWRST_CONTROL_6__HOLD_TRAINING_A__SHIFT 0x0
++#define SWRST_CONTROL_6__HOLD_TRAINING_B__SHIFT 0x1
++#define SWRST_CONTROL_6__HOLD_TRAINING_C__SHIFT 0x2
++#define SWRST_CONTROL_6__HOLD_TRAINING_D__SHIFT 0x3
++#define SWRST_CONTROL_6__HOLD_TRAINING_E__SHIFT 0x4
++#define SWRST_CONTROL_6__HOLD_TRAINING_F__SHIFT 0x5
++#define SWRST_CONTROL_6__HOLD_TRAINING_G__SHIFT 0x6
++#define SWRST_CONTROL_6__HOLD_TRAINING_H__SHIFT 0x7
++#define SWRST_CONTROL_6__HOLD_TRAINING_I__SHIFT 0x8
++#define SWRST_CONTROL_6__HOLD_TRAINING_J__SHIFT 0x9
++#define SWRST_CONTROL_6__HOLD_TRAINING_K__SHIFT 0xa
++#define SWRST_CONTROL_6__HOLD_TRAINING_A_MASK 0x00000001L
++#define SWRST_CONTROL_6__HOLD_TRAINING_B_MASK 0x00000002L
++#define SWRST_CONTROL_6__HOLD_TRAINING_C_MASK 0x00000004L
++#define SWRST_CONTROL_6__HOLD_TRAINING_D_MASK 0x00000008L
++#define SWRST_CONTROL_6__HOLD_TRAINING_E_MASK 0x00000010L
++#define SWRST_CONTROL_6__HOLD_TRAINING_F_MASK 0x00000020L
++#define SWRST_CONTROL_6__HOLD_TRAINING_G_MASK 0x00000040L
++#define SWRST_CONTROL_6__HOLD_TRAINING_H_MASK 0x00000080L
++#define SWRST_CONTROL_6__HOLD_TRAINING_I_MASK 0x00000100L
++#define SWRST_CONTROL_6__HOLD_TRAINING_J_MASK 0x00000200L
++#define SWRST_CONTROL_6__HOLD_TRAINING_K_MASK 0x00000400L
++//SWRST_EP_COMMAND_0
++#define SWRST_EP_COMMAND_0__EP_CFG_RESET_ONLY__SHIFT 0x0
++#define SWRST_EP_COMMAND_0__EP_HOT_RESET__SHIFT 0x8
++#define SWRST_EP_COMMAND_0__EP_LNKDWN_RESET__SHIFT 0x9
++#define SWRST_EP_COMMAND_0__EP_LNKDIS_RESET__SHIFT 0xa
++#define SWRST_EP_COMMAND_0__EP_CFG_RESET_ONLY_MASK 0x00000001L
++#define SWRST_EP_COMMAND_0__EP_HOT_RESET_MASK 0x00000100L
++#define SWRST_EP_COMMAND_0__EP_LNKDWN_RESET_MASK 0x00000200L
++#define SWRST_EP_COMMAND_0__EP_LNKDIS_RESET_MASK 0x00000400L
++//SWRST_EP_CONTROL_0
++#define SWRST_EP_CONTROL_0__EP_CFG_RESET_ONLY_EN__SHIFT 0x0
++#define SWRST_EP_CONTROL_0__EP_HOT_RESET_EN__SHIFT 0x8
++#define SWRST_EP_CONTROL_0__EP_LNKDWN_RESET_EN__SHIFT 0x9
++#define SWRST_EP_CONTROL_0__EP_LNKDIS_RESET_EN__SHIFT 0xa
++#define SWRST_EP_CONTROL_0__EP_CFG_RESET_ONLY_EN_MASK 0x00000001L
++#define SWRST_EP_CONTROL_0__EP_HOT_RESET_EN_MASK 0x00000100L
++#define SWRST_EP_CONTROL_0__EP_LNKDWN_RESET_EN_MASK 0x00000200L
++#define SWRST_EP_CONTROL_0__EP_LNKDIS_RESET_EN_MASK 0x00000400L
++//CPM_CONTROL
++#define CPM_CONTROL__LCLK_DYN_GATE_ENABLE__SHIFT 0x0
++#define CPM_CONTROL__TXCLK_DYN_GATE_ENABLE__SHIFT 0x1
++#define CPM_CONTROL__L1_PWR_GATE_ENABLE__SHIFT 0x2
++#define CPM_CONTROL__L1_1_PWR_GATE_ENABLE__SHIFT 0x3
++#define CPM_CONTROL__L1_2_PWR_GATE_ENABLE__SHIFT 0x4
++#define CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE__SHIFT 0x5
++#define CPM_CONTROL__TXCLK_REGS_GATE_ENABLE__SHIFT 0x6
++#define CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE__SHIFT 0x7
++#define CPM_CONTROL__REFCLK_REGS_GATE_ENABLE__SHIFT 0x8
++#define CPM_CONTROL__LCLK_DYN_GATE_LATENCY__SHIFT 0x9
++#define CPM_CONTROL__TXCLK_DYN_GATE_LATENCY__SHIFT 0xb
++#define CPM_CONTROL__REFCLKREQ_REFCLKACK_LOOPBACK_ENABLE__SHIFT 0xd
++#define CPM_CONTROL__TXCLK_REGS_GATE_LATENCY__SHIFT 0xe
++#define CPM_CONTROL__REFCLK_REGS_GATE_LATENCY__SHIFT 0xf
++#define CPM_CONTROL__LCLK_GATE_TXCLK_FREE__SHIFT 0x10
++#define CPM_CONTROL__RCVR_DET_CLK_ENABLE__SHIFT 0x11
++#define CPM_CONTROL__FAST_TXCLK_LATENCY__SHIFT 0x12
++#define CPM_CONTROL__REGS_IDLE_TO_PG_LATENCY__SHIFT 0x15
++#define CPM_CONTROL__REFCLK_XSTCLK_ENABLE__SHIFT 0x16
++#define CPM_CONTROL__REFCLK_XSTCLK_LATENCY__SHIFT 0x17
++#define CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE__SHIFT 0x18
++#define CPM_CONTROL__LCLK_GATE_ALLOW_IN_L1__SHIFT 0x19
++#define CPM_CONTROL__PG_EARLY_WAKE_ENABLE__SHIFT 0x1a
++#define CPM_CONTROL__PCIE_CORE_IDLE__SHIFT 0x1b
++#define CPM_CONTROL__PCIE_LINK_IDLE__SHIFT 0x1c
++#define CPM_CONTROL__PCIE_BUFFER_EMPTY__SHIFT 0x1d
++#define CPM_CONTROL__SPARE_REGS0__SHIFT 0x1e
++#define CPM_CONTROL__IGNORE_REGS_IDLE_IN_PG__SHIFT 0x1f
++#define CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK 0x00000001L
++#define CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK 0x00000002L
++#define CPM_CONTROL__L1_PWR_GATE_ENABLE_MASK 0x00000004L
++#define CPM_CONTROL__L1_1_PWR_GATE_ENABLE_MASK 0x00000008L
++#define CPM_CONTROL__L1_2_PWR_GATE_ENABLE_MASK 0x00000010L
++#define CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK 0x00000020L
++#define CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK 0x00000040L
++#define CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK 0x00000080L
++#define CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK 0x00000100L
++#define CPM_CONTROL__LCLK_DYN_GATE_LATENCY_MASK 0x00000600L
++#define CPM_CONTROL__TXCLK_DYN_GATE_LATENCY_MASK 0x00001800L
++#define CPM_CONTROL__REFCLKREQ_REFCLKACK_LOOPBACK_ENABLE_MASK 0x00002000L
++#define CPM_CONTROL__TXCLK_REGS_GATE_LATENCY_MASK 0x00004000L
++#define CPM_CONTROL__REFCLK_REGS_GATE_LATENCY_MASK 0x00008000L
++#define CPM_CONTROL__LCLK_GATE_TXCLK_FREE_MASK 0x00010000L
++#define CPM_CONTROL__RCVR_DET_CLK_ENABLE_MASK 0x00020000L
++#define CPM_CONTROL__FAST_TXCLK_LATENCY_MASK 0x001C0000L
++#define CPM_CONTROL__REGS_IDLE_TO_PG_LATENCY_MASK 0x00200000L
++#define CPM_CONTROL__REFCLK_XSTCLK_ENABLE_MASK 0x00400000L
++#define CPM_CONTROL__REFCLK_XSTCLK_LATENCY_MASK 0x00800000L
++#define CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK 0x01000000L
++#define CPM_CONTROL__LCLK_GATE_ALLOW_IN_L1_MASK 0x02000000L
++#define CPM_CONTROL__PG_EARLY_WAKE_ENABLE_MASK 0x04000000L
++#define CPM_CONTROL__PCIE_CORE_IDLE_MASK 0x08000000L
++#define CPM_CONTROL__PCIE_LINK_IDLE_MASK 0x10000000L
++#define CPM_CONTROL__PCIE_BUFFER_EMPTY_MASK 0x20000000L
++#define CPM_CONTROL__SPARE_REGS0_MASK 0x40000000L
++#define CPM_CONTROL__IGNORE_REGS_IDLE_IN_PG_MASK 0x80000000L
++//SMN_APERTURE_ID_A
++#define SMN_APERTURE_ID_A__SMU_APERTURE_ID__SHIFT 0x0
++#define SMN_APERTURE_ID_A__SMU_APERTURE_ID_MASK 0x00000FFFL
++//SMN_APERTURE_ID_B
++#define SMN_APERTURE_ID_B__IOHUB_APERTURE_ID__SHIFT 0x0
++#define SMN_APERTURE_ID_B__NBIF_APERTURE_ID__SHIFT 0xc
++#define SMN_APERTURE_ID_B__IOHUB_APERTURE_ID_MASK 0x00000FFFL
++#define SMN_APERTURE_ID_B__NBIF_APERTURE_ID_MASK 0x00FFF000L
++//RSMU_MASTER_CONTROL
++#define RSMU_MASTER_CONTROL__RSMU_MASTER_MESSAGE_SEND_ENABLE__SHIFT 0x0
++#define RSMU_MASTER_CONTROL__RSMU_MASTER_MESSAGE_SEND_ENABLE_MASK 0x00000001L
++//RSMU_SLAVE_CONTROL
++#define RSMU_SLAVE_CONTROL__RSMU_SLAVE_INVALID_READ_RETURN_ZERO__SHIFT 0x0
++#define RSMU_SLAVE_CONTROL__RSMU_SLAVE_IGNORE_INVALID_CONFIG_WRITE__SHIFT 0x2
++#define RSMU_SLAVE_CONTROL__RSMU_SLAVE_INVALID_READ_RETURN_ZERO_MASK 0x00000001L
++#define RSMU_SLAVE_CONTROL__RSMU_SLAVE_IGNORE_INVALID_CONFIG_WRITE_MASK 0x00000004L
++//RSMU_POWER_GATING_CONTROL
++#define RSMU_POWER_GATING_CONTROL__PWR_GATE_MAC_ONLY__SHIFT 0x0
++#define RSMU_POWER_GATING_CONTROL__PWR_GATE_PHY_ONLY__SHIFT 0x1
++#define RSMU_POWER_GATING_CONTROL__PWR_GATE_MAC_ONLY_MASK 0x00000001L
++#define RSMU_POWER_GATING_CONTROL__PWR_GATE_PHY_ONLY_MASK 0x00000002L
++//RSMU_BIOS_TIMER_CMD
++#define RSMU_BIOS_TIMER_CMD__CFG_TMR_MICROSECONDS__SHIFT 0x0
++#define RSMU_BIOS_TIMER_CMD__CFG_TMR_MICROSECONDS_MASK 0xFFFFFFFFL
++//RSMU_BIOS_TIMER_CNTL
++#define RSMU_BIOS_TIMER_CNTL__CFG_TMR_CLOCKRATE__SHIFT 0x0
++#define RSMU_BIOS_TIMER_CNTL__CFG_TMR_CLOCKRATE_MASK 0x000000FFL
++//LNCNT_CONTROL
++#define LNCNT_CONTROL__CFG_LNC_WINDOW_EN__SHIFT 0x0
++#define LNCNT_CONTROL__CFG_LNC_BW_CNT_EN__SHIFT 0x1
++#define LNCNT_CONTROL__CFG_LNC_CMN_CNT_EN__SHIFT 0x2
++#define LNCNT_CONTROL__CFG_LNC_OVRD_EN__SHIFT 0x3
++#define LNCNT_CONTROL__CFG_LNC_OVRD_VAL__SHIFT 0x4
++#define LNCNT_CONTROL__CFG_LNC_WINDOW_EN_MASK 0x00000001L
++#define LNCNT_CONTROL__CFG_LNC_BW_CNT_EN_MASK 0x00000002L
++#define LNCNT_CONTROL__CFG_LNC_CMN_CNT_EN_MASK 0x00000004L
++#define LNCNT_CONTROL__CFG_LNC_OVRD_EN_MASK 0x00000008L
++#define LNCNT_CONTROL__CFG_LNC_OVRD_VAL_MASK 0x00000010L
++//CFG_LNC_WINDOW_REGISTER
++#define CFG_LNC_WINDOW_REGISTER__CFG_LNC_WINDOW__SHIFT 0x0
++#define CFG_LNC_WINDOW_REGISTER__CFG_LNC_WINDOW_MASK 0x00FFFFFFL
++//LNCNT_QUAN_THRD
++#define LNCNT_QUAN_THRD__CFG_LNC_BW_QUAN_THRD__SHIFT 0x0
++#define LNCNT_QUAN_THRD__CFG_LNC_CMN_QUAN_THRD__SHIFT 0x4
++#define LNCNT_QUAN_THRD__CFG_LNC_BW_QUAN_THRD_MASK 0x00000007L
++#define LNCNT_QUAN_THRD__CFG_LNC_CMN_QUAN_THRD_MASK 0x00000070L
++//LNCNT_WEIGHT
++#define LNCNT_WEIGHT__CFG_LNC_BW_WEIGHT__SHIFT 0x0
++#define LNCNT_WEIGHT__CFG_LNC_CMN_WEIGHT__SHIFT 0x10
++#define LNCNT_WEIGHT__CFG_LNC_BW_WEIGHT_MASK 0x0000FFFFL
++#define LNCNT_WEIGHT__CFG_LNC_CMN_WEIGHT_MASK 0xFFFF0000L
++//LNC_TOTAL_WACC_REGISTER
++#define LNC_TOTAL_WACC_REGISTER__LNC_TOTAL_WACC__SHIFT 0x0
++#define LNC_TOTAL_WACC_REGISTER__LNC_TOTAL_WACC_MASK 0xFFFFFFFFL
++//LNC_BW_WACC_REGISTER
++#define LNC_BW_WACC_REGISTER__LNC_BW_WACC__SHIFT 0x0
++#define LNC_BW_WACC_REGISTER__LNC_BW_WACC_MASK 0xFFFFFFFFL
++//LNC_CMN_WACC_REGISTER
++#define LNC_CMN_WACC_REGISTER__LNC_CMN_WACC__SHIFT 0x0
++#define LNC_CMN_WACC_REGISTER__LNC_CMN_WACC_MASK 0xFFFFFFFFL
++//SMU_INT_PIN_SHARING_PORT_INDICATOR
++#define SMU_INT_PIN_SHARING_PORT_INDICATOR__LINK_MANAGEMENT_INT_STATUS__SHIFT 0x0
++#define SMU_INT_PIN_SHARING_PORT_INDICATOR__LTR_INT_STATUS__SHIFT 0x8
++#define SMU_INT_PIN_SHARING_PORT_INDICATOR__DPC_INT_STATUS__SHIFT 0x10
++#define SMU_INT_PIN_SHARING_PORT_INDICATOR__LINK_MANAGEMENT_INT_STATUS_MASK 0x000000FFL
++#define SMU_INT_PIN_SHARING_PORT_INDICATOR__LTR_INT_STATUS_MASK 0x0000FF00L
++#define SMU_INT_PIN_SHARING_PORT_INDICATOR__DPC_INT_STATUS_MASK 0x00FF0000L
++//PCIE_PGMST_CNTL
++#define PCIE_PGMST_CNTL__CFG_PG_HYSTERESIS__SHIFT 0x0
++#define PCIE_PGMST_CNTL__CFG_PG_EN__SHIFT 0x8
++#define PCIE_PGMST_CNTL__CFG_IDLENESS_COUNT_EN__SHIFT 0xa
++#define PCIE_PGMST_CNTL__CFG_FW_PG_EXIT_CNTL__SHIFT 0xe
++#define PCIE_PGMST_CNTL__CFG_PG_HYSTERESIS_MASK 0x000000FFL
++#define PCIE_PGMST_CNTL__CFG_PG_EN_MASK 0x00000100L
++#define PCIE_PGMST_CNTL__CFG_IDLENESS_COUNT_EN_MASK 0x00003C00L
++#define PCIE_PGMST_CNTL__CFG_FW_PG_EXIT_CNTL_MASK 0x0000C000L
++//PCIE_PGSLV_CNTL
++#define PCIE_PGSLV_CNTL__CFG_IDLE_HYSTERESIS__SHIFT 0x0
++#define PCIE_PGSLV_CNTL__CFG_IDLE_HYSTERESIS_MASK 0x0000001FL
++//SMU_PCIE_DF_Address
++#define SMU_PCIE_DF_Address__RAS_INTR_CTL_addr__SHIFT 0x0
++#define SMU_PCIE_DF_Address__RAS_INTR_CTL_addr_MASK 0x000FFFFFL
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf0_SYSPFVFDEC
++//BIF_BX_DEV0_EPF0_VF0_MM_INDEX
++#define BIF_BX_DEV0_EPF0_VF0_MM_INDEX__MM_OFFSET__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF0_MM_INDEX__MM_APER__SHIFT 0x1f
++#define BIF_BX_DEV0_EPF0_VF0_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
++#define BIF_BX_DEV0_EPF0_VF0_MM_INDEX__MM_APER_MASK 0x80000000L
++//BIF_BX_DEV0_EPF0_VF0_MM_DATA
++#define BIF_BX_DEV0_EPF0_VF0_MM_DATA__MM_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF0_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF0_MM_INDEX_HI
++#define BIF_BX_DEV0_EPF0_VF0_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF0_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf0_BIFPFVFDEC1
++//RCC_DEV0_EPF0_VF0_RCC_ERR_LOG
++#define RCC_DEV0_EPF0_VF0_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF0_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF0_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF0_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
++//RCC_DEV0_EPF0_VF0_RCC_DOORBELL_APER_EN
++#define RCC_DEV0_EPF0_VF0_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF0_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF0_RCC_CONFIG_MEMSIZE
++#define RCC_DEV0_EPF0_VF0_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF0_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF0_RCC_CONFIG_RESERVED
++#define RCC_DEV0_EPF0_VF0_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF0_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER
++#define RCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
++#define RCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf0_BIFPFVFDEC1
++//BIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS
++#define BIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
++//BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG
++#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
++#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
++#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
++#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
++#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
++#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
++#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
++//BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
++#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW
++#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL
++#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
++//BIF_BX_DEV0_EPF0_VF0_HDP_REG_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF0_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF0_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING
++#define BIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF0_NBIF_GFX_ADDR_LUT_BYPASS
++#define BIF_BX_DEV0_EPF0_VF0_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF0_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW0
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW1
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW2
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW3
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW0
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW1
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW2
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW3
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
++//BIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX
++#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
++#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
++#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
++#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
++#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
++#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
++#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
++#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
++#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
++#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf0_BIFDEC2
++//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_LO
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_HI
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_MSG_DATA
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_CONTROL
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_LO
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_HI
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_MSG_DATA
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_CONTROL
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_LO
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_HI
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_MSG_DATA
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_CONTROL
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF0_GFXMSIX_PBA
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
++#define RCC_DEV0_EPF0_VF0_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf1_SYSPFVFDEC
++//BIF_BX_DEV0_EPF0_VF1_MM_INDEX
++#define BIF_BX_DEV0_EPF0_VF1_MM_INDEX__MM_OFFSET__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF1_MM_INDEX__MM_APER__SHIFT 0x1f
++#define BIF_BX_DEV0_EPF0_VF1_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
++#define BIF_BX_DEV0_EPF0_VF1_MM_INDEX__MM_APER_MASK 0x80000000L
++//BIF_BX_DEV0_EPF0_VF1_MM_DATA
++#define BIF_BX_DEV0_EPF0_VF1_MM_DATA__MM_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF1_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF1_MM_INDEX_HI
++#define BIF_BX_DEV0_EPF0_VF1_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF1_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf1_BIFPFVFDEC1
++//RCC_DEV0_EPF0_VF1_RCC_ERR_LOG
++#define RCC_DEV0_EPF0_VF1_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF1_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF1_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF1_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
++//RCC_DEV0_EPF0_VF1_RCC_DOORBELL_APER_EN
++#define RCC_DEV0_EPF0_VF1_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF1_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF1_RCC_CONFIG_MEMSIZE
++#define RCC_DEV0_EPF0_VF1_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF1_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF1_RCC_CONFIG_RESERVED
++#define RCC_DEV0_EPF0_VF1_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF1_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER
++#define RCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
++#define RCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf1_BIFPFVFDEC1
++//BIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS
++#define BIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
++//BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG
++#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
++#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
++#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
++#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
++#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
++#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
++#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
++//BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
++#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW
++#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL
++#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
++//BIF_BX_DEV0_EPF0_VF1_HDP_REG_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF1_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF1_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING
++#define BIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF1_NBIF_GFX_ADDR_LUT_BYPASS
++#define BIF_BX_DEV0_EPF0_VF1_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF1_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW0
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW1
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW2
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW3
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW0
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW1
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW2
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW3
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
++//BIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX
++#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
++#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
++#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
++#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
++#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
++#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
++#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
++#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
++#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
++#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf1_BIFDEC2
++//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_LO
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_HI
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_MSG_DATA
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_CONTROL
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_LO
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_HI
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_MSG_DATA
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_CONTROL
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_LO
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_HI
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_MSG_DATA
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_CONTROL
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF1_GFXMSIX_PBA
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
++#define RCC_DEV0_EPF0_VF1_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf2_SYSPFVFDEC
++//BIF_BX_DEV0_EPF0_VF2_MM_INDEX
++#define BIF_BX_DEV0_EPF0_VF2_MM_INDEX__MM_OFFSET__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF2_MM_INDEX__MM_APER__SHIFT 0x1f
++#define BIF_BX_DEV0_EPF0_VF2_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
++#define BIF_BX_DEV0_EPF0_VF2_MM_INDEX__MM_APER_MASK 0x80000000L
++//BIF_BX_DEV0_EPF0_VF2_MM_DATA
++#define BIF_BX_DEV0_EPF0_VF2_MM_DATA__MM_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF2_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF2_MM_INDEX_HI
++#define BIF_BX_DEV0_EPF0_VF2_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF2_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf2_BIFPFVFDEC1
++//RCC_DEV0_EPF0_VF2_RCC_ERR_LOG
++#define RCC_DEV0_EPF0_VF2_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF2_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF2_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF2_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
++//RCC_DEV0_EPF0_VF2_RCC_DOORBELL_APER_EN
++#define RCC_DEV0_EPF0_VF2_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF2_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF2_RCC_CONFIG_MEMSIZE
++#define RCC_DEV0_EPF0_VF2_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF2_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF2_RCC_CONFIG_RESERVED
++#define RCC_DEV0_EPF0_VF2_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF2_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER
++#define RCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
++#define RCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf2_BIFPFVFDEC1
++//BIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS
++#define BIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
++//BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG
++#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
++#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
++#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
++#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
++#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
++#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
++#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
++//BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
++#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_LOW
++#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL
++#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
++//BIF_BX_DEV0_EPF0_VF2_HDP_REG_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF2_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF2_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING
++#define BIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF2_NBIF_GFX_ADDR_LUT_BYPASS
++#define BIF_BX_DEV0_EPF0_VF2_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF2_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW0
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW1
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW2
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW3
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW0
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW1
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW2
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW3
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
++//BIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX
++#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
++#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
++#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
++#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
++#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
++#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
++#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
++#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
++#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
++#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf2_BIFDEC2
++//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_LO
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_HI
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_MSG_DATA
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_CONTROL
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_LO
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_HI
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_MSG_DATA
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_CONTROL
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_LO
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_HI
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_MSG_DATA
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_CONTROL
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF2_GFXMSIX_PBA
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
++#define RCC_DEV0_EPF0_VF2_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf3_SYSPFVFDEC
++//BIF_BX_DEV0_EPF0_VF3_MM_INDEX
++#define BIF_BX_DEV0_EPF0_VF3_MM_INDEX__MM_OFFSET__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF3_MM_INDEX__MM_APER__SHIFT 0x1f
++#define BIF_BX_DEV0_EPF0_VF3_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
++#define BIF_BX_DEV0_EPF0_VF3_MM_INDEX__MM_APER_MASK 0x80000000L
++//BIF_BX_DEV0_EPF0_VF3_MM_DATA
++#define BIF_BX_DEV0_EPF0_VF3_MM_DATA__MM_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF3_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF3_MM_INDEX_HI
++#define BIF_BX_DEV0_EPF0_VF3_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF3_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf3_BIFPFVFDEC1
++//RCC_DEV0_EPF0_VF3_RCC_ERR_LOG
++#define RCC_DEV0_EPF0_VF3_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF3_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF3_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF3_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
++//RCC_DEV0_EPF0_VF3_RCC_DOORBELL_APER_EN
++#define RCC_DEV0_EPF0_VF3_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF3_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF3_RCC_CONFIG_MEMSIZE
++#define RCC_DEV0_EPF0_VF3_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF3_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF3_RCC_CONFIG_RESERVED
++#define RCC_DEV0_EPF0_VF3_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF3_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER
++#define RCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
++#define RCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf3_BIFPFVFDEC1
++//BIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS
++#define BIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
++//BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG
++#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
++#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
++#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
++#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
++#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
++#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
++#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
++//BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
++#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_LOW
++#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL
++#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
++//BIF_BX_DEV0_EPF0_VF3_HDP_REG_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF3_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF3_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING
++#define BIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF3_NBIF_GFX_ADDR_LUT_BYPASS
++#define BIF_BX_DEV0_EPF0_VF3_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF3_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW0
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW1
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW2
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW3
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW0
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW1
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW2
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW3
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
++//BIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX
++#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
++#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
++#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
++#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
++#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
++#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
++#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
++#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
++#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
++#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf3_BIFDEC2
++//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_LO
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_HI
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_MSG_DATA
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_CONTROL
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_LO
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_HI
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_MSG_DATA
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_CONTROL
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_LO
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_HI
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_MSG_DATA
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_CONTROL
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF3_GFXMSIX_PBA
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
++#define RCC_DEV0_EPF0_VF3_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf4_SYSPFVFDEC
++//BIF_BX_DEV0_EPF0_VF4_MM_INDEX
++#define BIF_BX_DEV0_EPF0_VF4_MM_INDEX__MM_OFFSET__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF4_MM_INDEX__MM_APER__SHIFT 0x1f
++#define BIF_BX_DEV0_EPF0_VF4_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
++#define BIF_BX_DEV0_EPF0_VF4_MM_INDEX__MM_APER_MASK 0x80000000L
++//BIF_BX_DEV0_EPF0_VF4_MM_DATA
++#define BIF_BX_DEV0_EPF0_VF4_MM_DATA__MM_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF4_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF4_MM_INDEX_HI
++#define BIF_BX_DEV0_EPF0_VF4_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF4_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf4_BIFPFVFDEC1
++//RCC_DEV0_EPF0_VF4_RCC_ERR_LOG
++#define RCC_DEV0_EPF0_VF4_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF4_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF4_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF4_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
++//RCC_DEV0_EPF0_VF4_RCC_DOORBELL_APER_EN
++#define RCC_DEV0_EPF0_VF4_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF4_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF4_RCC_CONFIG_MEMSIZE
++#define RCC_DEV0_EPF0_VF4_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF4_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF4_RCC_CONFIG_RESERVED
++#define RCC_DEV0_EPF0_VF4_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF4_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER
++#define RCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
++#define RCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf4_BIFPFVFDEC1
++//BIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS
++#define BIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
++//BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG
++#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
++#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
++#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
++#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
++#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
++#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
++#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
++//BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
++#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_LOW
++#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL
++#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
++//BIF_BX_DEV0_EPF0_VF4_HDP_REG_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF4_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF4_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING
++#define BIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF4_NBIF_GFX_ADDR_LUT_BYPASS
++#define BIF_BX_DEV0_EPF0_VF4_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF4_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW0
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW1
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW2
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW3
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW0
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW1
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW2
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW3
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
++//BIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX
++#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
++#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
++#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
++#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
++#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
++#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
++#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
++#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
++#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
++#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf4_BIFDEC2
++//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_LO
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_HI
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_MSG_DATA
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_CONTROL
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_LO
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_HI
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_MSG_DATA
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_CONTROL
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_LO
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_HI
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_MSG_DATA
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_CONTROL
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF4_GFXMSIX_PBA
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
++#define RCC_DEV0_EPF0_VF4_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf5_SYSPFVFDEC
++//BIF_BX_DEV0_EPF0_VF5_MM_INDEX
++#define BIF_BX_DEV0_EPF0_VF5_MM_INDEX__MM_OFFSET__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF5_MM_INDEX__MM_APER__SHIFT 0x1f
++#define BIF_BX_DEV0_EPF0_VF5_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
++#define BIF_BX_DEV0_EPF0_VF5_MM_INDEX__MM_APER_MASK 0x80000000L
++//BIF_BX_DEV0_EPF0_VF5_MM_DATA
++#define BIF_BX_DEV0_EPF0_VF5_MM_DATA__MM_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF5_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF5_MM_INDEX_HI
++#define BIF_BX_DEV0_EPF0_VF5_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF5_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf5_BIFPFVFDEC1
++//RCC_DEV0_EPF0_VF5_RCC_ERR_LOG
++#define RCC_DEV0_EPF0_VF5_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF5_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF5_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF5_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
++//RCC_DEV0_EPF0_VF5_RCC_DOORBELL_APER_EN
++#define RCC_DEV0_EPF0_VF5_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF5_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF5_RCC_CONFIG_MEMSIZE
++#define RCC_DEV0_EPF0_VF5_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF5_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF5_RCC_CONFIG_RESERVED
++#define RCC_DEV0_EPF0_VF5_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF5_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER
++#define RCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
++#define RCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf5_BIFPFVFDEC1
++//BIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS
++#define BIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
++//BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG
++#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
++#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
++#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
++#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
++#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
++#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
++#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
++//BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
++#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_LOW
++#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL
++#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
++//BIF_BX_DEV0_EPF0_VF5_HDP_REG_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF5_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF5_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING
++#define BIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF5_NBIF_GFX_ADDR_LUT_BYPASS
++#define BIF_BX_DEV0_EPF0_VF5_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF5_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW0
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW1
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW2
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW3
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW0
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW1
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW2
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW3
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
++//BIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX
++#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
++#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
++#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
++#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
++#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
++#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
++#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
++#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
++#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
++#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf5_BIFDEC2
++//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_LO
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_HI
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_MSG_DATA
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_CONTROL
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_LO
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_HI
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_MSG_DATA
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_CONTROL
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_LO
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_HI
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_MSG_DATA
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_CONTROL
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF5_GFXMSIX_PBA
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
++#define RCC_DEV0_EPF0_VF5_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf6_SYSPFVFDEC
++//BIF_BX_DEV0_EPF0_VF6_MM_INDEX
++#define BIF_BX_DEV0_EPF0_VF6_MM_INDEX__MM_OFFSET__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF6_MM_INDEX__MM_APER__SHIFT 0x1f
++#define BIF_BX_DEV0_EPF0_VF6_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
++#define BIF_BX_DEV0_EPF0_VF6_MM_INDEX__MM_APER_MASK 0x80000000L
++//BIF_BX_DEV0_EPF0_VF6_MM_DATA
++#define BIF_BX_DEV0_EPF0_VF6_MM_DATA__MM_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF6_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF6_MM_INDEX_HI
++#define BIF_BX_DEV0_EPF0_VF6_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF6_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf6_BIFPFVFDEC1
++//RCC_DEV0_EPF0_VF6_RCC_ERR_LOG
++#define RCC_DEV0_EPF0_VF6_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF6_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF6_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF6_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
++//RCC_DEV0_EPF0_VF6_RCC_DOORBELL_APER_EN
++#define RCC_DEV0_EPF0_VF6_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF6_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF6_RCC_CONFIG_MEMSIZE
++#define RCC_DEV0_EPF0_VF6_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF6_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF6_RCC_CONFIG_RESERVED
++#define RCC_DEV0_EPF0_VF6_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF6_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER
++#define RCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
++#define RCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf6_BIFPFVFDEC1
++//BIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS
++#define BIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
++//BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG
++#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
++#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
++#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
++#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
++#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
++#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
++#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
++//BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
++#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_LOW
++#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL
++#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
++//BIF_BX_DEV0_EPF0_VF6_HDP_REG_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF6_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF6_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING
++#define BIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF6_NBIF_GFX_ADDR_LUT_BYPASS
++#define BIF_BX_DEV0_EPF0_VF6_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF6_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW0
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW1
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW2
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW3
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW0
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW1
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW2
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW3
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
++//BIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX
++#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
++#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
++#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
++#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
++#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
++#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
++#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
++#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
++#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
++#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf6_BIFDEC2
++//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_LO
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_HI
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_MSG_DATA
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_CONTROL
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_LO
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_HI
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_MSG_DATA
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_CONTROL
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_LO
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_HI
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_MSG_DATA
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_CONTROL
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF6_GFXMSIX_PBA
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
++#define RCC_DEV0_EPF0_VF6_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf7_SYSPFVFDEC
++//BIF_BX_DEV0_EPF0_VF7_MM_INDEX
++#define BIF_BX_DEV0_EPF0_VF7_MM_INDEX__MM_OFFSET__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF7_MM_INDEX__MM_APER__SHIFT 0x1f
++#define BIF_BX_DEV0_EPF0_VF7_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
++#define BIF_BX_DEV0_EPF0_VF7_MM_INDEX__MM_APER_MASK 0x80000000L
++//BIF_BX_DEV0_EPF0_VF7_MM_DATA
++#define BIF_BX_DEV0_EPF0_VF7_MM_DATA__MM_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF7_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF7_MM_INDEX_HI
++#define BIF_BX_DEV0_EPF0_VF7_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF7_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf7_BIFPFVFDEC1
++//RCC_DEV0_EPF0_VF7_RCC_ERR_LOG
++#define RCC_DEV0_EPF0_VF7_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF7_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF7_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF7_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
++//RCC_DEV0_EPF0_VF7_RCC_DOORBELL_APER_EN
++#define RCC_DEV0_EPF0_VF7_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF7_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF7_RCC_CONFIG_MEMSIZE
++#define RCC_DEV0_EPF0_VF7_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF7_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF7_RCC_CONFIG_RESERVED
++#define RCC_DEV0_EPF0_VF7_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF7_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER
++#define RCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
++#define RCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf7_BIFPFVFDEC1
++//BIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS
++#define BIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
++//BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG
++#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
++#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
++#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
++#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
++#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
++#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
++#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
++//BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
++#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_LOW
++#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL
++#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
++//BIF_BX_DEV0_EPF0_VF7_HDP_REG_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF7_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF7_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING
++#define BIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF7_NBIF_GFX_ADDR_LUT_BYPASS
++#define BIF_BX_DEV0_EPF0_VF7_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF7_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW0
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW1
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW2
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW3
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW0
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW1
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW2
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW3
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
++//BIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX
++#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
++#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
++#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
++#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
++#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
++#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
++#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
++#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
++#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
++#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf7_BIFDEC2
++//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_LO
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_HI
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_MSG_DATA
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_CONTROL
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_LO
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_HI
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_MSG_DATA
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_CONTROL
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_LO
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_HI
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_MSG_DATA
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_CONTROL
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF7_GFXMSIX_PBA
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
++#define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf8_SYSPFVFDEC
++//BIF_BX_DEV0_EPF0_VF8_MM_INDEX
++#define BIF_BX_DEV0_EPF0_VF8_MM_INDEX__MM_OFFSET__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF8_MM_INDEX__MM_APER__SHIFT 0x1f
++#define BIF_BX_DEV0_EPF0_VF8_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
++#define BIF_BX_DEV0_EPF0_VF8_MM_INDEX__MM_APER_MASK 0x80000000L
++//BIF_BX_DEV0_EPF0_VF8_MM_DATA
++#define BIF_BX_DEV0_EPF0_VF8_MM_DATA__MM_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF8_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF8_MM_INDEX_HI
++#define BIF_BX_DEV0_EPF0_VF8_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF8_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf8_BIFPFVFDEC1
++//RCC_DEV0_EPF0_VF8_RCC_ERR_LOG
++#define RCC_DEV0_EPF0_VF8_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF8_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF8_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF8_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
++//RCC_DEV0_EPF0_VF8_RCC_DOORBELL_APER_EN
++#define RCC_DEV0_EPF0_VF8_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF8_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF8_RCC_CONFIG_MEMSIZE
++#define RCC_DEV0_EPF0_VF8_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF8_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF8_RCC_CONFIG_RESERVED
++#define RCC_DEV0_EPF0_VF8_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF8_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF8_RCC_IOV_FUNC_IDENTIFIER
++#define RCC_DEV0_EPF0_VF8_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF8_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
++#define RCC_DEV0_EPF0_VF8_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF8_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf8_BIFPFVFDEC1
++//BIF_BX_DEV0_EPF0_VF8_BIF_BME_STATUS
++#define BIF_BX_DEV0_EPF0_VF8_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF8_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF8_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF8_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
++//BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG
++#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
++#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
++#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
++#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
++#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
++#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
++#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
++//BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
++#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_LOW
++#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL
++#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
++//BIF_BX_DEV0_EPF0_VF8_HDP_REG_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF8_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF8_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF8_HDP_MEM_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF8_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF8_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF8_BIF_TRANS_PENDING
++#define BIF_BX_DEV0_EPF0_VF8_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF8_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF8_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF8_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF8_NBIF_GFX_ADDR_LUT_BYPASS
++#define BIF_BX_DEV0_EPF0_VF8_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF8_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW0
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW1
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW2
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW3
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW0
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW1
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW2
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW3
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
++//BIF_BX_DEV0_EPF0_VF8_MAILBOX_INT_CNTL
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX
++#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
++#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
++#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
++#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
++#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
++#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
++#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
++#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
++#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
++#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf8_BIFDEC2
++//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_LO
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_HI
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_MSG_DATA
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_CONTROL
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_LO
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_HI
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_MSG_DATA
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_CONTROL
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_LO
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_HI
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_MSG_DATA
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_CONTROL
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF8_GFXMSIX_PBA
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
++#define RCC_DEV0_EPF0_VF8_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf9_SYSPFVFDEC
++//BIF_BX_DEV0_EPF0_VF9_MM_INDEX
++#define BIF_BX_DEV0_EPF0_VF9_MM_INDEX__MM_OFFSET__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF9_MM_INDEX__MM_APER__SHIFT 0x1f
++#define BIF_BX_DEV0_EPF0_VF9_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
++#define BIF_BX_DEV0_EPF0_VF9_MM_INDEX__MM_APER_MASK 0x80000000L
++//BIF_BX_DEV0_EPF0_VF9_MM_DATA
++#define BIF_BX_DEV0_EPF0_VF9_MM_DATA__MM_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF9_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF9_MM_INDEX_HI
++#define BIF_BX_DEV0_EPF0_VF9_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF9_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf9_BIFPFVFDEC1
++//RCC_DEV0_EPF0_VF9_RCC_ERR_LOG
++#define RCC_DEV0_EPF0_VF9_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF9_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF9_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF9_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
++//RCC_DEV0_EPF0_VF9_RCC_DOORBELL_APER_EN
++#define RCC_DEV0_EPF0_VF9_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF9_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF9_RCC_CONFIG_MEMSIZE
++#define RCC_DEV0_EPF0_VF9_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF9_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF9_RCC_CONFIG_RESERVED
++#define RCC_DEV0_EPF0_VF9_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF9_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF9_RCC_IOV_FUNC_IDENTIFIER
++#define RCC_DEV0_EPF0_VF9_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF9_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
++#define RCC_DEV0_EPF0_VF9_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF9_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf9_BIFPFVFDEC1
++//BIF_BX_DEV0_EPF0_VF9_BIF_BME_STATUS
++#define BIF_BX_DEV0_EPF0_VF9_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF9_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF9_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF9_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
++//BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG
++#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
++#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
++#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
++#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
++#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
++#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
++#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
++//BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
++#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_LOW
++#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL
++#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
++//BIF_BX_DEV0_EPF0_VF9_HDP_REG_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF9_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF9_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF9_HDP_MEM_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF9_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF9_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF9_BIF_TRANS_PENDING
++#define BIF_BX_DEV0_EPF0_VF9_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF9_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF9_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF9_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF9_NBIF_GFX_ADDR_LUT_BYPASS
++#define BIF_BX_DEV0_EPF0_VF9_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF9_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW0
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW1
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW2
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW3
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW0
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW1
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW2
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW3
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
++//BIF_BX_DEV0_EPF0_VF9_MAILBOX_INT_CNTL
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX
++#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
++#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
++#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
++#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
++#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
++#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
++#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
++#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
++#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
++#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf9_BIFDEC2
++//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_LO
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_HI
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_MSG_DATA
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_CONTROL
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_LO
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_HI
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_MSG_DATA
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_CONTROL
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_LO
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_HI
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_MSG_DATA
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_CONTROL
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF9_GFXMSIX_PBA
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
++#define RCC_DEV0_EPF0_VF9_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf10_SYSPFVFDEC
++//BIF_BX_DEV0_EPF0_VF10_MM_INDEX
++#define BIF_BX_DEV0_EPF0_VF10_MM_INDEX__MM_OFFSET__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF10_MM_INDEX__MM_APER__SHIFT 0x1f
++#define BIF_BX_DEV0_EPF0_VF10_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
++#define BIF_BX_DEV0_EPF0_VF10_MM_INDEX__MM_APER_MASK 0x80000000L
++//BIF_BX_DEV0_EPF0_VF10_MM_DATA
++#define BIF_BX_DEV0_EPF0_VF10_MM_DATA__MM_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF10_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF10_MM_INDEX_HI
++#define BIF_BX_DEV0_EPF0_VF10_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF10_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf10_BIFPFVFDEC1
++//RCC_DEV0_EPF0_VF10_RCC_ERR_LOG
++#define RCC_DEV0_EPF0_VF10_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF10_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF10_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF10_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
++//RCC_DEV0_EPF0_VF10_RCC_DOORBELL_APER_EN
++#define RCC_DEV0_EPF0_VF10_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF10_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF10_RCC_CONFIG_MEMSIZE
++#define RCC_DEV0_EPF0_VF10_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF10_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF10_RCC_CONFIG_RESERVED
++#define RCC_DEV0_EPF0_VF10_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF10_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF10_RCC_IOV_FUNC_IDENTIFIER
++#define RCC_DEV0_EPF0_VF10_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF10_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
++#define RCC_DEV0_EPF0_VF10_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF10_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf10_BIFPFVFDEC1
++//BIF_BX_DEV0_EPF0_VF10_BIF_BME_STATUS
++#define BIF_BX_DEV0_EPF0_VF10_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF10_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF10_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF10_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
++//BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG
++#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
++#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
++#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
++#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
++#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
++#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
++#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
++//BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
++#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_LOW
++#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL
++#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
++//BIF_BX_DEV0_EPF0_VF10_HDP_REG_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF10_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF10_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF10_HDP_MEM_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF10_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF10_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF10_BIF_TRANS_PENDING
++#define BIF_BX_DEV0_EPF0_VF10_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF10_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF10_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF10_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF10_NBIF_GFX_ADDR_LUT_BYPASS
++#define BIF_BX_DEV0_EPF0_VF10_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF10_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW0
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW1
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW2
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW3
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW0
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW1
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW2
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW3
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
++//BIF_BX_DEV0_EPF0_VF10_MAILBOX_INT_CNTL
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX
++#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
++#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
++#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
++#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
++#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
++#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
++#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
++#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
++#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
++#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf10_BIFDEC2
++//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_LO
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_HI
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_MSG_DATA
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_CONTROL
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_LO
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_HI
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_MSG_DATA
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_CONTROL
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_LO
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_HI
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_MSG_DATA
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_CONTROL
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF10_GFXMSIX_PBA
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
++#define RCC_DEV0_EPF0_VF10_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf11_SYSPFVFDEC
++//BIF_BX_DEV0_EPF0_VF11_MM_INDEX
++#define BIF_BX_DEV0_EPF0_VF11_MM_INDEX__MM_OFFSET__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF11_MM_INDEX__MM_APER__SHIFT 0x1f
++#define BIF_BX_DEV0_EPF0_VF11_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
++#define BIF_BX_DEV0_EPF0_VF11_MM_INDEX__MM_APER_MASK 0x80000000L
++//BIF_BX_DEV0_EPF0_VF11_MM_DATA
++#define BIF_BX_DEV0_EPF0_VF11_MM_DATA__MM_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF11_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF11_MM_INDEX_HI
++#define BIF_BX_DEV0_EPF0_VF11_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF11_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf11_BIFPFVFDEC1
++//RCC_DEV0_EPF0_VF11_RCC_ERR_LOG
++#define RCC_DEV0_EPF0_VF11_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF11_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF11_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF11_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
++//RCC_DEV0_EPF0_VF11_RCC_DOORBELL_APER_EN
++#define RCC_DEV0_EPF0_VF11_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF11_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF11_RCC_CONFIG_MEMSIZE
++#define RCC_DEV0_EPF0_VF11_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF11_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF11_RCC_CONFIG_RESERVED
++#define RCC_DEV0_EPF0_VF11_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF11_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF11_RCC_IOV_FUNC_IDENTIFIER
++#define RCC_DEV0_EPF0_VF11_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF11_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
++#define RCC_DEV0_EPF0_VF11_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF11_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf11_BIFPFVFDEC1
++//BIF_BX_DEV0_EPF0_VF11_BIF_BME_STATUS
++#define BIF_BX_DEV0_EPF0_VF11_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF11_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF11_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF11_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
++//BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG
++#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
++#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
++#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
++#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
++#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
++#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
++#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
++//BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
++#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_LOW
++#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL
++#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
++//BIF_BX_DEV0_EPF0_VF11_HDP_REG_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF11_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF11_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF11_HDP_MEM_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF11_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF11_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF11_BIF_TRANS_PENDING
++#define BIF_BX_DEV0_EPF0_VF11_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF11_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF11_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF11_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF11_NBIF_GFX_ADDR_LUT_BYPASS
++#define BIF_BX_DEV0_EPF0_VF11_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF11_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW0
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW1
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW2
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW3
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW0
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW1
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW2
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW3
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
++//BIF_BX_DEV0_EPF0_VF11_MAILBOX_INT_CNTL
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX
++#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
++#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
++#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
++#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
++#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
++#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
++#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
++#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
++#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
++#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf11_BIFDEC2
++//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_LO
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_HI
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_MSG_DATA
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_CONTROL
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_LO
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_HI
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_MSG_DATA
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_CONTROL
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_LO
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_HI
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_MSG_DATA
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_CONTROL
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF11_GFXMSIX_PBA
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
++#define RCC_DEV0_EPF0_VF11_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf12_SYSPFVFDEC
++//BIF_BX_DEV0_EPF0_VF12_MM_INDEX
++#define BIF_BX_DEV0_EPF0_VF12_MM_INDEX__MM_OFFSET__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF12_MM_INDEX__MM_APER__SHIFT 0x1f
++#define BIF_BX_DEV0_EPF0_VF12_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
++#define BIF_BX_DEV0_EPF0_VF12_MM_INDEX__MM_APER_MASK 0x80000000L
++//BIF_BX_DEV0_EPF0_VF12_MM_DATA
++#define BIF_BX_DEV0_EPF0_VF12_MM_DATA__MM_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF12_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF12_MM_INDEX_HI
++#define BIF_BX_DEV0_EPF0_VF12_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF12_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf12_BIFPFVFDEC1
++//RCC_DEV0_EPF0_VF12_RCC_ERR_LOG
++#define RCC_DEV0_EPF0_VF12_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF12_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF12_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF12_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
++//RCC_DEV0_EPF0_VF12_RCC_DOORBELL_APER_EN
++#define RCC_DEV0_EPF0_VF12_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF12_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF12_RCC_CONFIG_MEMSIZE
++#define RCC_DEV0_EPF0_VF12_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF12_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF12_RCC_CONFIG_RESERVED
++#define RCC_DEV0_EPF0_VF12_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF12_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF12_RCC_IOV_FUNC_IDENTIFIER
++#define RCC_DEV0_EPF0_VF12_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF12_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
++#define RCC_DEV0_EPF0_VF12_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF12_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf12_BIFPFVFDEC1
++//BIF_BX_DEV0_EPF0_VF12_BIF_BME_STATUS
++#define BIF_BX_DEV0_EPF0_VF12_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF12_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF12_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF12_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
++//BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG
++#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
++#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
++#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
++#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
++#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
++#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
++#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
++//BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
++#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_LOW
++#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL
++#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
++//BIF_BX_DEV0_EPF0_VF12_HDP_REG_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF12_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF12_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF12_HDP_MEM_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF12_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF12_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF12_BIF_TRANS_PENDING
++#define BIF_BX_DEV0_EPF0_VF12_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF12_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF12_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF12_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF12_NBIF_GFX_ADDR_LUT_BYPASS
++#define BIF_BX_DEV0_EPF0_VF12_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF12_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW0
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW1
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW2
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW3
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW0
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW1
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW2
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW3
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
++//BIF_BX_DEV0_EPF0_VF12_MAILBOX_INT_CNTL
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX
++#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
++#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
++#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
++#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
++#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
++#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
++#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
++#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
++#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
++#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf12_BIFDEC2
++//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_LO
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_HI
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_MSG_DATA
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_CONTROL
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_LO
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_HI
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_MSG_DATA
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_CONTROL
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_LO
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_HI
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_MSG_DATA
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_CONTROL
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF12_GFXMSIX_PBA
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
++#define RCC_DEV0_EPF0_VF12_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf13_SYSPFVFDEC
++//BIF_BX_DEV0_EPF0_VF13_MM_INDEX
++#define BIF_BX_DEV0_EPF0_VF13_MM_INDEX__MM_OFFSET__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF13_MM_INDEX__MM_APER__SHIFT 0x1f
++#define BIF_BX_DEV0_EPF0_VF13_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
++#define BIF_BX_DEV0_EPF0_VF13_MM_INDEX__MM_APER_MASK 0x80000000L
++//BIF_BX_DEV0_EPF0_VF13_MM_DATA
++#define BIF_BX_DEV0_EPF0_VF13_MM_DATA__MM_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF13_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF13_MM_INDEX_HI
++#define BIF_BX_DEV0_EPF0_VF13_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF13_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf13_BIFPFVFDEC1
++//RCC_DEV0_EPF0_VF13_RCC_ERR_LOG
++#define RCC_DEV0_EPF0_VF13_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF13_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF13_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF13_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
++//RCC_DEV0_EPF0_VF13_RCC_DOORBELL_APER_EN
++#define RCC_DEV0_EPF0_VF13_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF13_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF13_RCC_CONFIG_MEMSIZE
++#define RCC_DEV0_EPF0_VF13_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF13_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF13_RCC_CONFIG_RESERVED
++#define RCC_DEV0_EPF0_VF13_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF13_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF13_RCC_IOV_FUNC_IDENTIFIER
++#define RCC_DEV0_EPF0_VF13_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF13_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
++#define RCC_DEV0_EPF0_VF13_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF13_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf13_BIFPFVFDEC1
++//BIF_BX_DEV0_EPF0_VF13_BIF_BME_STATUS
++#define BIF_BX_DEV0_EPF0_VF13_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF13_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF13_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF13_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
++//BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG
++#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
++#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
++#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
++#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
++#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
++#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
++#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
++//BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
++#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_LOW
++#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL
++#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
++//BIF_BX_DEV0_EPF0_VF13_HDP_REG_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF13_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF13_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF13_HDP_MEM_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF13_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF13_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF13_BIF_TRANS_PENDING
++#define BIF_BX_DEV0_EPF0_VF13_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF13_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF13_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF13_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF13_NBIF_GFX_ADDR_LUT_BYPASS
++#define BIF_BX_DEV0_EPF0_VF13_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF13_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW0
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW1
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW2
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW3
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW0
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW1
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW2
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW3
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
++//BIF_BX_DEV0_EPF0_VF13_MAILBOX_INT_CNTL
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX
++#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
++#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
++#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
++#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
++#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
++#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
++#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
++#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
++#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
++#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf13_BIFDEC2
++//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_LO
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_HI
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_MSG_DATA
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_CONTROL
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_LO
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_HI
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_MSG_DATA
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_CONTROL
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_LO
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_HI
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_MSG_DATA
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_CONTROL
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF13_GFXMSIX_PBA
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
++#define RCC_DEV0_EPF0_VF13_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf14_SYSPFVFDEC
++//BIF_BX_DEV0_EPF0_VF14_MM_INDEX
++#define BIF_BX_DEV0_EPF0_VF14_MM_INDEX__MM_OFFSET__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF14_MM_INDEX__MM_APER__SHIFT 0x1f
++#define BIF_BX_DEV0_EPF0_VF14_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
++#define BIF_BX_DEV0_EPF0_VF14_MM_INDEX__MM_APER_MASK 0x80000000L
++//BIF_BX_DEV0_EPF0_VF14_MM_DATA
++#define BIF_BX_DEV0_EPF0_VF14_MM_DATA__MM_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF14_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF14_MM_INDEX_HI
++#define BIF_BX_DEV0_EPF0_VF14_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF14_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf14_BIFPFVFDEC1
++//RCC_DEV0_EPF0_VF14_RCC_ERR_LOG
++#define RCC_DEV0_EPF0_VF14_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF14_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF14_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF14_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
++//RCC_DEV0_EPF0_VF14_RCC_DOORBELL_APER_EN
++#define RCC_DEV0_EPF0_VF14_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF14_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF14_RCC_CONFIG_MEMSIZE
++#define RCC_DEV0_EPF0_VF14_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF14_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF14_RCC_CONFIG_RESERVED
++#define RCC_DEV0_EPF0_VF14_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF14_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF14_RCC_IOV_FUNC_IDENTIFIER
++#define RCC_DEV0_EPF0_VF14_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF14_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
++#define RCC_DEV0_EPF0_VF14_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF14_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf14_BIFPFVFDEC1
++//BIF_BX_DEV0_EPF0_VF14_BIF_BME_STATUS
++#define BIF_BX_DEV0_EPF0_VF14_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF14_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF14_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF14_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
++//BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG
++#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
++#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
++#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
++#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
++#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
++#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
++#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
++//BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
++#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_LOW
++#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL
++#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
++//BIF_BX_DEV0_EPF0_VF14_HDP_REG_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF14_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF14_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF14_HDP_MEM_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF14_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF14_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF14_BIF_TRANS_PENDING
++#define BIF_BX_DEV0_EPF0_VF14_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF14_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF14_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF14_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF14_NBIF_GFX_ADDR_LUT_BYPASS
++#define BIF_BX_DEV0_EPF0_VF14_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF14_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW0
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW1
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW2
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW3
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW0
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW1
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW2
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW3
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
++//BIF_BX_DEV0_EPF0_VF14_MAILBOX_INT_CNTL
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX
++#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
++#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
++#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
++#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
++#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
++#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
++#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
++#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
++#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
++#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf14_BIFDEC2
++//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_LO
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_HI
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_MSG_DATA
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_CONTROL
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_LO
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_HI
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_MSG_DATA
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_CONTROL
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_LO
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_HI
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_MSG_DATA
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_CONTROL
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF14_GFXMSIX_PBA
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
++#define RCC_DEV0_EPF0_VF14_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf15_SYSPFVFDEC
++//BIF_BX_DEV0_EPF0_VF15_MM_INDEX
++#define BIF_BX_DEV0_EPF0_VF15_MM_INDEX__MM_OFFSET__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF15_MM_INDEX__MM_APER__SHIFT 0x1f
++#define BIF_BX_DEV0_EPF0_VF15_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
++#define BIF_BX_DEV0_EPF0_VF15_MM_INDEX__MM_APER_MASK 0x80000000L
++//BIF_BX_DEV0_EPF0_VF15_MM_DATA
++#define BIF_BX_DEV0_EPF0_VF15_MM_DATA__MM_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF15_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF15_MM_INDEX_HI
++#define BIF_BX_DEV0_EPF0_VF15_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF15_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf15_BIFPFVFDEC1
++//RCC_DEV0_EPF0_VF15_RCC_ERR_LOG
++#define RCC_DEV0_EPF0_VF15_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF15_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF15_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF15_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
++//RCC_DEV0_EPF0_VF15_RCC_DOORBELL_APER_EN
++#define RCC_DEV0_EPF0_VF15_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF15_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF15_RCC_CONFIG_MEMSIZE
++#define RCC_DEV0_EPF0_VF15_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF15_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF15_RCC_CONFIG_RESERVED
++#define RCC_DEV0_EPF0_VF15_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF15_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF15_RCC_IOV_FUNC_IDENTIFIER
++#define RCC_DEV0_EPF0_VF15_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF15_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
++#define RCC_DEV0_EPF0_VF15_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF15_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
++
++
++// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf15_BIFPFVFDEC1
++//BIF_BX_DEV0_EPF0_VF15_BIF_BME_STATUS
++#define BIF_BX_DEV0_EPF0_VF15_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF15_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF15_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF15_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
++//BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG
++#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
++#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
++#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
++#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
++#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
++#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
++#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
++//BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
++#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_LOW
++#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL
++#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
++//BIF_BX_DEV0_EPF0_VF15_HDP_REG_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF15_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF15_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF15_HDP_MEM_COHERENCY_FLUSH_CNTL
++#define BIF_BX_DEV0_EPF0_VF15_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF15_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
++#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
++//BIF_BX_DEV0_EPF0_VF15_BIF_TRANS_PENDING
++#define BIF_BX_DEV0_EPF0_VF15_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF15_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF15_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF15_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF15_NBIF_GFX_ADDR_LUT_BYPASS
++#define BIF_BX_DEV0_EPF0_VF15_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF15_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L
++//BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW0
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW1
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW2
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW3
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW0
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW1
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW2
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW3
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
++//BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
++//BIF_BX_DEV0_EPF0_VF15_MAILBOX_INT_CNTL
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
++//BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX
++#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
++#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
++#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
++#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
++#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
++#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
++#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
++#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
++#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
++#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
++#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
++#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
++#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
++#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
++#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
++#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
++
++
++// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf15_BIFDEC2
++//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_LO
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_HI
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_MSG_DATA
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_CONTROL
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_LO
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_HI
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_MSG_DATA
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_CONTROL
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_LO
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
++//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_HI
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_MSG_DATA
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
++//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_CONTROL
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
++//RCC_DEV0_EPF0_VF15_GFXMSIX_PBA
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
++#define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L
++
++#endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5116-drm-amdgpu-include-Add-sdma0-1-4.2-register-headerfi.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5116-drm-amdgpu-include-Add-sdma0-1-4.2-register-headerfi.patch
new file mode 100644
index 00000000..9cca2964
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5116-drm-amdgpu-include-Add-sdma0-1-4.2-register-headerfi.patch
@@ -0,0 +1,8078 @@
+From 681e0ce870a47e3a461204f49b07cfc0c13e4a8e Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Wed, 17 Jan 2018 19:42:33 +0800
+Subject: [PATCH 5116/5725] drm/amdgpu/include: Add sdma0/1 4.2 register
+ headerfiles. (v3)
+
+These are the System DMA register headers for vega20.
+
+v2: cleanups (Alex)
+v3: add missing licenses (Alex)
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Acked-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../amd/include/asic_reg/sdma0/sdma0_4_2_offset.h | 1047 +++++++
+ .../amd/include/asic_reg/sdma0/sdma0_4_2_sh_mask.h | 2992 ++++++++++++++++++++
+ .../amd/include/asic_reg/sdma1/sdma1_4_2_offset.h | 1039 +++++++
+ .../amd/include/asic_reg/sdma1/sdma1_4_2_sh_mask.h | 2948 +++++++++++++++++++
+ 4 files changed, 8026 insertions(+)
+ create mode 100644 drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_offset.h
+ create mode 100644 drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_sh_mask.h
+ create mode 100644 drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_offset.h
+ create mode 100644 drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_sh_mask.h
+
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_offset.h
+new file mode 100644
+index 0000000..30b2f5d
+--- /dev/null
++++ b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_offset.h
+@@ -0,0 +1,1047 @@
++/*
++ * Copyright (C) 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
++ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++#ifndef _sdma0_4_2_0_OFFSET_HEADER
++#define _sdma0_4_2_0_OFFSET_HEADER
++
++
++
++// addressBlock: sdma0_sdma0dec
++// base address: 0x4980
++#define mmSDMA0_UCODE_ADDR 0x0000
++#define mmSDMA0_UCODE_ADDR_BASE_IDX 0
++#define mmSDMA0_UCODE_DATA 0x0001
++#define mmSDMA0_UCODE_DATA_BASE_IDX 0
++#define mmSDMA0_VM_CNTL 0x0004
++#define mmSDMA0_VM_CNTL_BASE_IDX 0
++#define mmSDMA0_VM_CTX_LO 0x0005
++#define mmSDMA0_VM_CTX_LO_BASE_IDX 0
++#define mmSDMA0_VM_CTX_HI 0x0006
++#define mmSDMA0_VM_CTX_HI_BASE_IDX 0
++#define mmSDMA0_ACTIVE_FCN_ID 0x0007
++#define mmSDMA0_ACTIVE_FCN_ID_BASE_IDX 0
++#define mmSDMA0_VM_CTX_CNTL 0x0008
++#define mmSDMA0_VM_CTX_CNTL_BASE_IDX 0
++#define mmSDMA0_VIRT_RESET_REQ 0x0009
++#define mmSDMA0_VIRT_RESET_REQ_BASE_IDX 0
++#define mmSDMA0_VF_ENABLE 0x000a
++#define mmSDMA0_VF_ENABLE_BASE_IDX 0
++#define mmSDMA0_CONTEXT_REG_TYPE0 0x000b
++#define mmSDMA0_CONTEXT_REG_TYPE0_BASE_IDX 0
++#define mmSDMA0_CONTEXT_REG_TYPE1 0x000c
++#define mmSDMA0_CONTEXT_REG_TYPE1_BASE_IDX 0
++#define mmSDMA0_CONTEXT_REG_TYPE2 0x000d
++#define mmSDMA0_CONTEXT_REG_TYPE2_BASE_IDX 0
++#define mmSDMA0_CONTEXT_REG_TYPE3 0x000e
++#define mmSDMA0_CONTEXT_REG_TYPE3_BASE_IDX 0
++#define mmSDMA0_PUB_REG_TYPE0 0x000f
++#define mmSDMA0_PUB_REG_TYPE0_BASE_IDX 0
++#define mmSDMA0_PUB_REG_TYPE1 0x0010
++#define mmSDMA0_PUB_REG_TYPE1_BASE_IDX 0
++#define mmSDMA0_PUB_REG_TYPE2 0x0011
++#define mmSDMA0_PUB_REG_TYPE2_BASE_IDX 0
++#define mmSDMA0_PUB_REG_TYPE3 0x0012
++#define mmSDMA0_PUB_REG_TYPE3_BASE_IDX 0
++#define mmSDMA0_MMHUB_CNTL 0x0013
++#define mmSDMA0_MMHUB_CNTL_BASE_IDX 0
++#define mmSDMA0_CONTEXT_GROUP_BOUNDARY 0x0019
++#define mmSDMA0_CONTEXT_GROUP_BOUNDARY_BASE_IDX 0
++#define mmSDMA0_POWER_CNTL 0x001a
++#define mmSDMA0_POWER_CNTL_BASE_IDX 0
++#define mmSDMA0_CLK_CTRL 0x001b
++#define mmSDMA0_CLK_CTRL_BASE_IDX 0
++#define mmSDMA0_CNTL 0x001c
++#define mmSDMA0_CNTL_BASE_IDX 0
++#define mmSDMA0_CHICKEN_BITS 0x001d
++#define mmSDMA0_CHICKEN_BITS_BASE_IDX 0
++#define mmSDMA0_GB_ADDR_CONFIG 0x001e
++#define mmSDMA0_GB_ADDR_CONFIG_BASE_IDX 0
++#define mmSDMA0_GB_ADDR_CONFIG_READ 0x001f
++#define mmSDMA0_GB_ADDR_CONFIG_READ_BASE_IDX 0
++#define mmSDMA0_RB_RPTR_FETCH_HI 0x0020
++#define mmSDMA0_RB_RPTR_FETCH_HI_BASE_IDX 0
++#define mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL 0x0021
++#define mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 0
++#define mmSDMA0_RB_RPTR_FETCH 0x0022
++#define mmSDMA0_RB_RPTR_FETCH_BASE_IDX 0
++#define mmSDMA0_IB_OFFSET_FETCH 0x0023
++#define mmSDMA0_IB_OFFSET_FETCH_BASE_IDX 0
++#define mmSDMA0_PROGRAM 0x0024
++#define mmSDMA0_PROGRAM_BASE_IDX 0
++#define mmSDMA0_STATUS_REG 0x0025
++#define mmSDMA0_STATUS_REG_BASE_IDX 0
++#define mmSDMA0_STATUS1_REG 0x0026
++#define mmSDMA0_STATUS1_REG_BASE_IDX 0
++#define mmSDMA0_RD_BURST_CNTL 0x0027
++#define mmSDMA0_RD_BURST_CNTL_BASE_IDX 0
++#define mmSDMA0_HBM_PAGE_CONFIG 0x0028
++#define mmSDMA0_HBM_PAGE_CONFIG_BASE_IDX 0
++#define mmSDMA0_UCODE_CHECKSUM 0x0029
++#define mmSDMA0_UCODE_CHECKSUM_BASE_IDX 0
++#define mmSDMA0_F32_CNTL 0x002a
++#define mmSDMA0_F32_CNTL_BASE_IDX 0
++#define mmSDMA0_FREEZE 0x002b
++#define mmSDMA0_FREEZE_BASE_IDX 0
++#define mmSDMA0_PHASE0_QUANTUM 0x002c
++#define mmSDMA0_PHASE0_QUANTUM_BASE_IDX 0
++#define mmSDMA0_PHASE1_QUANTUM 0x002d
++#define mmSDMA0_PHASE1_QUANTUM_BASE_IDX 0
++#define mmSDMA_POWER_GATING 0x002e
++#define mmSDMA_POWER_GATING_BASE_IDX 0
++#define mmSDMA_PGFSM_CONFIG 0x002f
++#define mmSDMA_PGFSM_CONFIG_BASE_IDX 0
++#define mmSDMA_PGFSM_WRITE 0x0030
++#define mmSDMA_PGFSM_WRITE_BASE_IDX 0
++#define mmSDMA_PGFSM_READ 0x0031
++#define mmSDMA_PGFSM_READ_BASE_IDX 0
++#define mmSDMA0_EDC_CONFIG 0x0032
++#define mmSDMA0_EDC_CONFIG_BASE_IDX 0
++#define mmSDMA0_BA_THRESHOLD 0x0033
++#define mmSDMA0_BA_THRESHOLD_BASE_IDX 0
++#define mmSDMA0_ID 0x0034
++#define mmSDMA0_ID_BASE_IDX 0
++#define mmSDMA0_VERSION 0x0035
++#define mmSDMA0_VERSION_BASE_IDX 0
++#define mmSDMA0_EDC_COUNTER 0x0036
++#define mmSDMA0_EDC_COUNTER_BASE_IDX 0
++#define mmSDMA0_EDC_COUNTER_CLEAR 0x0037
++#define mmSDMA0_EDC_COUNTER_CLEAR_BASE_IDX 0
++#define mmSDMA0_STATUS2_REG 0x0038
++#define mmSDMA0_STATUS2_REG_BASE_IDX 0
++#define mmSDMA0_ATOMIC_CNTL 0x0039
++#define mmSDMA0_ATOMIC_CNTL_BASE_IDX 0
++#define mmSDMA0_ATOMIC_PREOP_LO 0x003a
++#define mmSDMA0_ATOMIC_PREOP_LO_BASE_IDX 0
++#define mmSDMA0_ATOMIC_PREOP_HI 0x003b
++#define mmSDMA0_ATOMIC_PREOP_HI_BASE_IDX 0
++#define mmSDMA0_UTCL1_CNTL 0x003c
++#define mmSDMA0_UTCL1_CNTL_BASE_IDX 0
++#define mmSDMA0_UTCL1_WATERMK 0x003d
++#define mmSDMA0_UTCL1_WATERMK_BASE_IDX 0
++#define mmSDMA0_UTCL1_RD_STATUS 0x003e
++#define mmSDMA0_UTCL1_RD_STATUS_BASE_IDX 0
++#define mmSDMA0_UTCL1_WR_STATUS 0x003f
++#define mmSDMA0_UTCL1_WR_STATUS_BASE_IDX 0
++#define mmSDMA0_UTCL1_INV0 0x0040
++#define mmSDMA0_UTCL1_INV0_BASE_IDX 0
++#define mmSDMA0_UTCL1_INV1 0x0041
++#define mmSDMA0_UTCL1_INV1_BASE_IDX 0
++#define mmSDMA0_UTCL1_INV2 0x0042
++#define mmSDMA0_UTCL1_INV2_BASE_IDX 0
++#define mmSDMA0_UTCL1_RD_XNACK0 0x0043
++#define mmSDMA0_UTCL1_RD_XNACK0_BASE_IDX 0
++#define mmSDMA0_UTCL1_RD_XNACK1 0x0044
++#define mmSDMA0_UTCL1_RD_XNACK1_BASE_IDX 0
++#define mmSDMA0_UTCL1_WR_XNACK0 0x0045
++#define mmSDMA0_UTCL1_WR_XNACK0_BASE_IDX 0
++#define mmSDMA0_UTCL1_WR_XNACK1 0x0046
++#define mmSDMA0_UTCL1_WR_XNACK1_BASE_IDX 0
++#define mmSDMA0_UTCL1_TIMEOUT 0x0047
++#define mmSDMA0_UTCL1_TIMEOUT_BASE_IDX 0
++#define mmSDMA0_UTCL1_PAGE 0x0048
++#define mmSDMA0_UTCL1_PAGE_BASE_IDX 0
++#define mmSDMA0_POWER_CNTL_IDLE 0x0049
++#define mmSDMA0_POWER_CNTL_IDLE_BASE_IDX 0
++#define mmSDMA0_RELAX_ORDERING_LUT 0x004a
++#define mmSDMA0_RELAX_ORDERING_LUT_BASE_IDX 0
++#define mmSDMA0_CHICKEN_BITS_2 0x004b
++#define mmSDMA0_CHICKEN_BITS_2_BASE_IDX 0
++#define mmSDMA0_STATUS3_REG 0x004c
++#define mmSDMA0_STATUS3_REG_BASE_IDX 0
++#define mmSDMA0_PHYSICAL_ADDR_LO 0x004d
++#define mmSDMA0_PHYSICAL_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_PHYSICAL_ADDR_HI 0x004e
++#define mmSDMA0_PHYSICAL_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_PHASE2_QUANTUM 0x004f
++#define mmSDMA0_PHASE2_QUANTUM_BASE_IDX 0
++#define mmSDMA0_ERROR_LOG 0x0050
++#define mmSDMA0_ERROR_LOG_BASE_IDX 0
++#define mmSDMA0_PUB_DUMMY_REG0 0x0051
++#define mmSDMA0_PUB_DUMMY_REG0_BASE_IDX 0
++#define mmSDMA0_PUB_DUMMY_REG1 0x0052
++#define mmSDMA0_PUB_DUMMY_REG1_BASE_IDX 0
++#define mmSDMA0_PUB_DUMMY_REG2 0x0053
++#define mmSDMA0_PUB_DUMMY_REG2_BASE_IDX 0
++#define mmSDMA0_PUB_DUMMY_REG3 0x0054
++#define mmSDMA0_PUB_DUMMY_REG3_BASE_IDX 0
++#define mmSDMA0_F32_COUNTER 0x0055
++#define mmSDMA0_F32_COUNTER_BASE_IDX 0
++#define mmSDMA0_PERFMON_CNTL 0x0057
++#define mmSDMA0_PERFMON_CNTL_BASE_IDX 0
++#define mmSDMA0_PERFCOUNTER0_RESULT 0x0058
++#define mmSDMA0_PERFCOUNTER0_RESULT_BASE_IDX 0
++#define mmSDMA0_PERFCOUNTER1_RESULT 0x0059
++#define mmSDMA0_PERFCOUNTER1_RESULT_BASE_IDX 0
++#define mmSDMA0_PERFCOUNTER_TAG_DELAY_RANGE 0x005a
++#define mmSDMA0_PERFCOUNTER_TAG_DELAY_RANGE_BASE_IDX 0
++#define mmSDMA0_CRD_CNTL 0x005b
++#define mmSDMA0_CRD_CNTL_BASE_IDX 0
++#define mmSDMA0_GPU_IOV_VIOLATION_LOG 0x005d
++#define mmSDMA0_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
++#define mmSDMA0_ULV_CNTL 0x005e
++#define mmSDMA0_ULV_CNTL_BASE_IDX 0
++#define mmSDMA0_EA_DBIT_ADDR_DATA 0x0060
++#define mmSDMA0_EA_DBIT_ADDR_DATA_BASE_IDX 0
++#define mmSDMA0_EA_DBIT_ADDR_INDEX 0x0061
++#define mmSDMA0_EA_DBIT_ADDR_INDEX_BASE_IDX 0
++#define mmSDMA0_GFX_RB_CNTL 0x0080
++#define mmSDMA0_GFX_RB_CNTL_BASE_IDX 0
++#define mmSDMA0_GFX_RB_BASE 0x0081
++#define mmSDMA0_GFX_RB_BASE_BASE_IDX 0
++#define mmSDMA0_GFX_RB_BASE_HI 0x0082
++#define mmSDMA0_GFX_RB_BASE_HI_BASE_IDX 0
++#define mmSDMA0_GFX_RB_RPTR 0x0083
++#define mmSDMA0_GFX_RB_RPTR_BASE_IDX 0
++#define mmSDMA0_GFX_RB_RPTR_HI 0x0084
++#define mmSDMA0_GFX_RB_RPTR_HI_BASE_IDX 0
++#define mmSDMA0_GFX_RB_WPTR 0x0085
++#define mmSDMA0_GFX_RB_WPTR_BASE_IDX 0
++#define mmSDMA0_GFX_RB_WPTR_HI 0x0086
++#define mmSDMA0_GFX_RB_WPTR_HI_BASE_IDX 0
++#define mmSDMA0_GFX_RB_WPTR_POLL_CNTL 0x0087
++#define mmSDMA0_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 0
++#define mmSDMA0_GFX_RB_RPTR_ADDR_HI 0x0088
++#define mmSDMA0_GFX_RB_RPTR_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_GFX_RB_RPTR_ADDR_LO 0x0089
++#define mmSDMA0_GFX_RB_RPTR_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_GFX_IB_CNTL 0x008a
++#define mmSDMA0_GFX_IB_CNTL_BASE_IDX 0
++#define mmSDMA0_GFX_IB_RPTR 0x008b
++#define mmSDMA0_GFX_IB_RPTR_BASE_IDX 0
++#define mmSDMA0_GFX_IB_OFFSET 0x008c
++#define mmSDMA0_GFX_IB_OFFSET_BASE_IDX 0
++#define mmSDMA0_GFX_IB_BASE_LO 0x008d
++#define mmSDMA0_GFX_IB_BASE_LO_BASE_IDX 0
++#define mmSDMA0_GFX_IB_BASE_HI 0x008e
++#define mmSDMA0_GFX_IB_BASE_HI_BASE_IDX 0
++#define mmSDMA0_GFX_IB_SIZE 0x008f
++#define mmSDMA0_GFX_IB_SIZE_BASE_IDX 0
++#define mmSDMA0_GFX_SKIP_CNTL 0x0090
++#define mmSDMA0_GFX_SKIP_CNTL_BASE_IDX 0
++#define mmSDMA0_GFX_CONTEXT_STATUS 0x0091
++#define mmSDMA0_GFX_CONTEXT_STATUS_BASE_IDX 0
++#define mmSDMA0_GFX_DOORBELL 0x0092
++#define mmSDMA0_GFX_DOORBELL_BASE_IDX 0
++#define mmSDMA0_GFX_CONTEXT_CNTL 0x0093
++#define mmSDMA0_GFX_CONTEXT_CNTL_BASE_IDX 0
++#define mmSDMA0_GFX_STATUS 0x00a8
++#define mmSDMA0_GFX_STATUS_BASE_IDX 0
++#define mmSDMA0_GFX_DOORBELL_LOG 0x00a9
++#define mmSDMA0_GFX_DOORBELL_LOG_BASE_IDX 0
++#define mmSDMA0_GFX_WATERMARK 0x00aa
++#define mmSDMA0_GFX_WATERMARK_BASE_IDX 0
++#define mmSDMA0_GFX_DOORBELL_OFFSET 0x00ab
++#define mmSDMA0_GFX_DOORBELL_OFFSET_BASE_IDX 0
++#define mmSDMA0_GFX_CSA_ADDR_LO 0x00ac
++#define mmSDMA0_GFX_CSA_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_GFX_CSA_ADDR_HI 0x00ad
++#define mmSDMA0_GFX_CSA_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_GFX_IB_SUB_REMAIN 0x00af
++#define mmSDMA0_GFX_IB_SUB_REMAIN_BASE_IDX 0
++#define mmSDMA0_GFX_PREEMPT 0x00b0
++#define mmSDMA0_GFX_PREEMPT_BASE_IDX 0
++#define mmSDMA0_GFX_DUMMY_REG 0x00b1
++#define mmSDMA0_GFX_DUMMY_REG_BASE_IDX 0
++#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2
++#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3
++#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_GFX_RB_AQL_CNTL 0x00b4
++#define mmSDMA0_GFX_RB_AQL_CNTL_BASE_IDX 0
++#define mmSDMA0_GFX_MINOR_PTR_UPDATE 0x00b5
++#define mmSDMA0_GFX_MINOR_PTR_UPDATE_BASE_IDX 0
++#define mmSDMA0_GFX_MIDCMD_DATA0 0x00c0
++#define mmSDMA0_GFX_MIDCMD_DATA0_BASE_IDX 0
++#define mmSDMA0_GFX_MIDCMD_DATA1 0x00c1
++#define mmSDMA0_GFX_MIDCMD_DATA1_BASE_IDX 0
++#define mmSDMA0_GFX_MIDCMD_DATA2 0x00c2
++#define mmSDMA0_GFX_MIDCMD_DATA2_BASE_IDX 0
++#define mmSDMA0_GFX_MIDCMD_DATA3 0x00c3
++#define mmSDMA0_GFX_MIDCMD_DATA3_BASE_IDX 0
++#define mmSDMA0_GFX_MIDCMD_DATA4 0x00c4
++#define mmSDMA0_GFX_MIDCMD_DATA4_BASE_IDX 0
++#define mmSDMA0_GFX_MIDCMD_DATA5 0x00c5
++#define mmSDMA0_GFX_MIDCMD_DATA5_BASE_IDX 0
++#define mmSDMA0_GFX_MIDCMD_DATA6 0x00c6
++#define mmSDMA0_GFX_MIDCMD_DATA6_BASE_IDX 0
++#define mmSDMA0_GFX_MIDCMD_DATA7 0x00c7
++#define mmSDMA0_GFX_MIDCMD_DATA7_BASE_IDX 0
++#define mmSDMA0_GFX_MIDCMD_DATA8 0x00c8
++#define mmSDMA0_GFX_MIDCMD_DATA8_BASE_IDX 0
++#define mmSDMA0_GFX_MIDCMD_CNTL 0x00c9
++#define mmSDMA0_GFX_MIDCMD_CNTL_BASE_IDX 0
++#define mmSDMA0_PAGE_RB_CNTL 0x00e0
++#define mmSDMA0_PAGE_RB_CNTL_BASE_IDX 0
++#define mmSDMA0_PAGE_RB_BASE 0x00e1
++#define mmSDMA0_PAGE_RB_BASE_BASE_IDX 0
++#define mmSDMA0_PAGE_RB_BASE_HI 0x00e2
++#define mmSDMA0_PAGE_RB_BASE_HI_BASE_IDX 0
++#define mmSDMA0_PAGE_RB_RPTR 0x00e3
++#define mmSDMA0_PAGE_RB_RPTR_BASE_IDX 0
++#define mmSDMA0_PAGE_RB_RPTR_HI 0x00e4
++#define mmSDMA0_PAGE_RB_RPTR_HI_BASE_IDX 0
++#define mmSDMA0_PAGE_RB_WPTR 0x00e5
++#define mmSDMA0_PAGE_RB_WPTR_BASE_IDX 0
++#define mmSDMA0_PAGE_RB_WPTR_HI 0x00e6
++#define mmSDMA0_PAGE_RB_WPTR_HI_BASE_IDX 0
++#define mmSDMA0_PAGE_RB_WPTR_POLL_CNTL 0x00e7
++#define mmSDMA0_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 0
++#define mmSDMA0_PAGE_RB_RPTR_ADDR_HI 0x00e8
++#define mmSDMA0_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_PAGE_RB_RPTR_ADDR_LO 0x00e9
++#define mmSDMA0_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_PAGE_IB_CNTL 0x00ea
++#define mmSDMA0_PAGE_IB_CNTL_BASE_IDX 0
++#define mmSDMA0_PAGE_IB_RPTR 0x00eb
++#define mmSDMA0_PAGE_IB_RPTR_BASE_IDX 0
++#define mmSDMA0_PAGE_IB_OFFSET 0x00ec
++#define mmSDMA0_PAGE_IB_OFFSET_BASE_IDX 0
++#define mmSDMA0_PAGE_IB_BASE_LO 0x00ed
++#define mmSDMA0_PAGE_IB_BASE_LO_BASE_IDX 0
++#define mmSDMA0_PAGE_IB_BASE_HI 0x00ee
++#define mmSDMA0_PAGE_IB_BASE_HI_BASE_IDX 0
++#define mmSDMA0_PAGE_IB_SIZE 0x00ef
++#define mmSDMA0_PAGE_IB_SIZE_BASE_IDX 0
++#define mmSDMA0_PAGE_SKIP_CNTL 0x00f0
++#define mmSDMA0_PAGE_SKIP_CNTL_BASE_IDX 0
++#define mmSDMA0_PAGE_CONTEXT_STATUS 0x00f1
++#define mmSDMA0_PAGE_CONTEXT_STATUS_BASE_IDX 0
++#define mmSDMA0_PAGE_DOORBELL 0x00f2
++#define mmSDMA0_PAGE_DOORBELL_BASE_IDX 0
++#define mmSDMA0_PAGE_STATUS 0x0108
++#define mmSDMA0_PAGE_STATUS_BASE_IDX 0
++#define mmSDMA0_PAGE_DOORBELL_LOG 0x0109
++#define mmSDMA0_PAGE_DOORBELL_LOG_BASE_IDX 0
++#define mmSDMA0_PAGE_WATERMARK 0x010a
++#define mmSDMA0_PAGE_WATERMARK_BASE_IDX 0
++#define mmSDMA0_PAGE_DOORBELL_OFFSET 0x010b
++#define mmSDMA0_PAGE_DOORBELL_OFFSET_BASE_IDX 0
++#define mmSDMA0_PAGE_CSA_ADDR_LO 0x010c
++#define mmSDMA0_PAGE_CSA_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_PAGE_CSA_ADDR_HI 0x010d
++#define mmSDMA0_PAGE_CSA_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_PAGE_IB_SUB_REMAIN 0x010f
++#define mmSDMA0_PAGE_IB_SUB_REMAIN_BASE_IDX 0
++#define mmSDMA0_PAGE_PREEMPT 0x0110
++#define mmSDMA0_PAGE_PREEMPT_BASE_IDX 0
++#define mmSDMA0_PAGE_DUMMY_REG 0x0111
++#define mmSDMA0_PAGE_DUMMY_REG_BASE_IDX 0
++#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI 0x0112
++#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO 0x0113
++#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_PAGE_RB_AQL_CNTL 0x0114
++#define mmSDMA0_PAGE_RB_AQL_CNTL_BASE_IDX 0
++#define mmSDMA0_PAGE_MINOR_PTR_UPDATE 0x0115
++#define mmSDMA0_PAGE_MINOR_PTR_UPDATE_BASE_IDX 0
++#define mmSDMA0_PAGE_MIDCMD_DATA0 0x0120
++#define mmSDMA0_PAGE_MIDCMD_DATA0_BASE_IDX 0
++#define mmSDMA0_PAGE_MIDCMD_DATA1 0x0121
++#define mmSDMA0_PAGE_MIDCMD_DATA1_BASE_IDX 0
++#define mmSDMA0_PAGE_MIDCMD_DATA2 0x0122
++#define mmSDMA0_PAGE_MIDCMD_DATA2_BASE_IDX 0
++#define mmSDMA0_PAGE_MIDCMD_DATA3 0x0123
++#define mmSDMA0_PAGE_MIDCMD_DATA3_BASE_IDX 0
++#define mmSDMA0_PAGE_MIDCMD_DATA4 0x0124
++#define mmSDMA0_PAGE_MIDCMD_DATA4_BASE_IDX 0
++#define mmSDMA0_PAGE_MIDCMD_DATA5 0x0125
++#define mmSDMA0_PAGE_MIDCMD_DATA5_BASE_IDX 0
++#define mmSDMA0_PAGE_MIDCMD_DATA6 0x0126
++#define mmSDMA0_PAGE_MIDCMD_DATA6_BASE_IDX 0
++#define mmSDMA0_PAGE_MIDCMD_DATA7 0x0127
++#define mmSDMA0_PAGE_MIDCMD_DATA7_BASE_IDX 0
++#define mmSDMA0_PAGE_MIDCMD_DATA8 0x0128
++#define mmSDMA0_PAGE_MIDCMD_DATA8_BASE_IDX 0
++#define mmSDMA0_PAGE_MIDCMD_CNTL 0x0129
++#define mmSDMA0_PAGE_MIDCMD_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC0_RB_CNTL 0x0140
++#define mmSDMA0_RLC0_RB_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC0_RB_BASE 0x0141
++#define mmSDMA0_RLC0_RB_BASE_BASE_IDX 0
++#define mmSDMA0_RLC0_RB_BASE_HI 0x0142
++#define mmSDMA0_RLC0_RB_BASE_HI_BASE_IDX 0
++#define mmSDMA0_RLC0_RB_RPTR 0x0143
++#define mmSDMA0_RLC0_RB_RPTR_BASE_IDX 0
++#define mmSDMA0_RLC0_RB_RPTR_HI 0x0144
++#define mmSDMA0_RLC0_RB_RPTR_HI_BASE_IDX 0
++#define mmSDMA0_RLC0_RB_WPTR 0x0145
++#define mmSDMA0_RLC0_RB_WPTR_BASE_IDX 0
++#define mmSDMA0_RLC0_RB_WPTR_HI 0x0146
++#define mmSDMA0_RLC0_RB_WPTR_HI_BASE_IDX 0
++#define mmSDMA0_RLC0_RB_WPTR_POLL_CNTL 0x0147
++#define mmSDMA0_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC0_RB_RPTR_ADDR_HI 0x0148
++#define mmSDMA0_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_RLC0_RB_RPTR_ADDR_LO 0x0149
++#define mmSDMA0_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_RLC0_IB_CNTL 0x014a
++#define mmSDMA0_RLC0_IB_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC0_IB_RPTR 0x014b
++#define mmSDMA0_RLC0_IB_RPTR_BASE_IDX 0
++#define mmSDMA0_RLC0_IB_OFFSET 0x014c
++#define mmSDMA0_RLC0_IB_OFFSET_BASE_IDX 0
++#define mmSDMA0_RLC0_IB_BASE_LO 0x014d
++#define mmSDMA0_RLC0_IB_BASE_LO_BASE_IDX 0
++#define mmSDMA0_RLC0_IB_BASE_HI 0x014e
++#define mmSDMA0_RLC0_IB_BASE_HI_BASE_IDX 0
++#define mmSDMA0_RLC0_IB_SIZE 0x014f
++#define mmSDMA0_RLC0_IB_SIZE_BASE_IDX 0
++#define mmSDMA0_RLC0_SKIP_CNTL 0x0150
++#define mmSDMA0_RLC0_SKIP_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC0_CONTEXT_STATUS 0x0151
++#define mmSDMA0_RLC0_CONTEXT_STATUS_BASE_IDX 0
++#define mmSDMA0_RLC0_DOORBELL 0x0152
++#define mmSDMA0_RLC0_DOORBELL_BASE_IDX 0
++#define mmSDMA0_RLC0_STATUS 0x0168
++#define mmSDMA0_RLC0_STATUS_BASE_IDX 0
++#define mmSDMA0_RLC0_DOORBELL_LOG 0x0169
++#define mmSDMA0_RLC0_DOORBELL_LOG_BASE_IDX 0
++#define mmSDMA0_RLC0_WATERMARK 0x016a
++#define mmSDMA0_RLC0_WATERMARK_BASE_IDX 0
++#define mmSDMA0_RLC0_DOORBELL_OFFSET 0x016b
++#define mmSDMA0_RLC0_DOORBELL_OFFSET_BASE_IDX 0
++#define mmSDMA0_RLC0_CSA_ADDR_LO 0x016c
++#define mmSDMA0_RLC0_CSA_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_RLC0_CSA_ADDR_HI 0x016d
++#define mmSDMA0_RLC0_CSA_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_RLC0_IB_SUB_REMAIN 0x016f
++#define mmSDMA0_RLC0_IB_SUB_REMAIN_BASE_IDX 0
++#define mmSDMA0_RLC0_PREEMPT 0x0170
++#define mmSDMA0_RLC0_PREEMPT_BASE_IDX 0
++#define mmSDMA0_RLC0_DUMMY_REG 0x0171
++#define mmSDMA0_RLC0_DUMMY_REG_BASE_IDX 0
++#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_HI 0x0172
++#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_LO 0x0173
++#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_RLC0_RB_AQL_CNTL 0x0174
++#define mmSDMA0_RLC0_RB_AQL_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC0_MINOR_PTR_UPDATE 0x0175
++#define mmSDMA0_RLC0_MINOR_PTR_UPDATE_BASE_IDX 0
++#define mmSDMA0_RLC0_MIDCMD_DATA0 0x0180
++#define mmSDMA0_RLC0_MIDCMD_DATA0_BASE_IDX 0
++#define mmSDMA0_RLC0_MIDCMD_DATA1 0x0181
++#define mmSDMA0_RLC0_MIDCMD_DATA1_BASE_IDX 0
++#define mmSDMA0_RLC0_MIDCMD_DATA2 0x0182
++#define mmSDMA0_RLC0_MIDCMD_DATA2_BASE_IDX 0
++#define mmSDMA0_RLC0_MIDCMD_DATA3 0x0183
++#define mmSDMA0_RLC0_MIDCMD_DATA3_BASE_IDX 0
++#define mmSDMA0_RLC0_MIDCMD_DATA4 0x0184
++#define mmSDMA0_RLC0_MIDCMD_DATA4_BASE_IDX 0
++#define mmSDMA0_RLC0_MIDCMD_DATA5 0x0185
++#define mmSDMA0_RLC0_MIDCMD_DATA5_BASE_IDX 0
++#define mmSDMA0_RLC0_MIDCMD_DATA6 0x0186
++#define mmSDMA0_RLC0_MIDCMD_DATA6_BASE_IDX 0
++#define mmSDMA0_RLC0_MIDCMD_DATA7 0x0187
++#define mmSDMA0_RLC0_MIDCMD_DATA7_BASE_IDX 0
++#define mmSDMA0_RLC0_MIDCMD_DATA8 0x0188
++#define mmSDMA0_RLC0_MIDCMD_DATA8_BASE_IDX 0
++#define mmSDMA0_RLC0_MIDCMD_CNTL 0x0189
++#define mmSDMA0_RLC0_MIDCMD_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC1_RB_CNTL 0x01a0
++#define mmSDMA0_RLC1_RB_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC1_RB_BASE 0x01a1
++#define mmSDMA0_RLC1_RB_BASE_BASE_IDX 0
++#define mmSDMA0_RLC1_RB_BASE_HI 0x01a2
++#define mmSDMA0_RLC1_RB_BASE_HI_BASE_IDX 0
++#define mmSDMA0_RLC1_RB_RPTR 0x01a3
++#define mmSDMA0_RLC1_RB_RPTR_BASE_IDX 0
++#define mmSDMA0_RLC1_RB_RPTR_HI 0x01a4
++#define mmSDMA0_RLC1_RB_RPTR_HI_BASE_IDX 0
++#define mmSDMA0_RLC1_RB_WPTR 0x01a5
++#define mmSDMA0_RLC1_RB_WPTR_BASE_IDX 0
++#define mmSDMA0_RLC1_RB_WPTR_HI 0x01a6
++#define mmSDMA0_RLC1_RB_WPTR_HI_BASE_IDX 0
++#define mmSDMA0_RLC1_RB_WPTR_POLL_CNTL 0x01a7
++#define mmSDMA0_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC1_RB_RPTR_ADDR_HI 0x01a8
++#define mmSDMA0_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_RLC1_RB_RPTR_ADDR_LO 0x01a9
++#define mmSDMA0_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_RLC1_IB_CNTL 0x01aa
++#define mmSDMA0_RLC1_IB_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC1_IB_RPTR 0x01ab
++#define mmSDMA0_RLC1_IB_RPTR_BASE_IDX 0
++#define mmSDMA0_RLC1_IB_OFFSET 0x01ac
++#define mmSDMA0_RLC1_IB_OFFSET_BASE_IDX 0
++#define mmSDMA0_RLC1_IB_BASE_LO 0x01ad
++#define mmSDMA0_RLC1_IB_BASE_LO_BASE_IDX 0
++#define mmSDMA0_RLC1_IB_BASE_HI 0x01ae
++#define mmSDMA0_RLC1_IB_BASE_HI_BASE_IDX 0
++#define mmSDMA0_RLC1_IB_SIZE 0x01af
++#define mmSDMA0_RLC1_IB_SIZE_BASE_IDX 0
++#define mmSDMA0_RLC1_SKIP_CNTL 0x01b0
++#define mmSDMA0_RLC1_SKIP_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC1_CONTEXT_STATUS 0x01b1
++#define mmSDMA0_RLC1_CONTEXT_STATUS_BASE_IDX 0
++#define mmSDMA0_RLC1_DOORBELL 0x01b2
++#define mmSDMA0_RLC1_DOORBELL_BASE_IDX 0
++#define mmSDMA0_RLC1_STATUS 0x01c8
++#define mmSDMA0_RLC1_STATUS_BASE_IDX 0
++#define mmSDMA0_RLC1_DOORBELL_LOG 0x01c9
++#define mmSDMA0_RLC1_DOORBELL_LOG_BASE_IDX 0
++#define mmSDMA0_RLC1_WATERMARK 0x01ca
++#define mmSDMA0_RLC1_WATERMARK_BASE_IDX 0
++#define mmSDMA0_RLC1_DOORBELL_OFFSET 0x01cb
++#define mmSDMA0_RLC1_DOORBELL_OFFSET_BASE_IDX 0
++#define mmSDMA0_RLC1_CSA_ADDR_LO 0x01cc
++#define mmSDMA0_RLC1_CSA_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_RLC1_CSA_ADDR_HI 0x01cd
++#define mmSDMA0_RLC1_CSA_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_RLC1_IB_SUB_REMAIN 0x01cf
++#define mmSDMA0_RLC1_IB_SUB_REMAIN_BASE_IDX 0
++#define mmSDMA0_RLC1_PREEMPT 0x01d0
++#define mmSDMA0_RLC1_PREEMPT_BASE_IDX 0
++#define mmSDMA0_RLC1_DUMMY_REG 0x01d1
++#define mmSDMA0_RLC1_DUMMY_REG_BASE_IDX 0
++#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_HI 0x01d2
++#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_LO 0x01d3
++#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_RLC1_RB_AQL_CNTL 0x01d4
++#define mmSDMA0_RLC1_RB_AQL_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC1_MINOR_PTR_UPDATE 0x01d5
++#define mmSDMA0_RLC1_MINOR_PTR_UPDATE_BASE_IDX 0
++#define mmSDMA0_RLC1_MIDCMD_DATA0 0x01e0
++#define mmSDMA0_RLC1_MIDCMD_DATA0_BASE_IDX 0
++#define mmSDMA0_RLC1_MIDCMD_DATA1 0x01e1
++#define mmSDMA0_RLC1_MIDCMD_DATA1_BASE_IDX 0
++#define mmSDMA0_RLC1_MIDCMD_DATA2 0x01e2
++#define mmSDMA0_RLC1_MIDCMD_DATA2_BASE_IDX 0
++#define mmSDMA0_RLC1_MIDCMD_DATA3 0x01e3
++#define mmSDMA0_RLC1_MIDCMD_DATA3_BASE_IDX 0
++#define mmSDMA0_RLC1_MIDCMD_DATA4 0x01e4
++#define mmSDMA0_RLC1_MIDCMD_DATA4_BASE_IDX 0
++#define mmSDMA0_RLC1_MIDCMD_DATA5 0x01e5
++#define mmSDMA0_RLC1_MIDCMD_DATA5_BASE_IDX 0
++#define mmSDMA0_RLC1_MIDCMD_DATA6 0x01e6
++#define mmSDMA0_RLC1_MIDCMD_DATA6_BASE_IDX 0
++#define mmSDMA0_RLC1_MIDCMD_DATA7 0x01e7
++#define mmSDMA0_RLC1_MIDCMD_DATA7_BASE_IDX 0
++#define mmSDMA0_RLC1_MIDCMD_DATA8 0x01e8
++#define mmSDMA0_RLC1_MIDCMD_DATA8_BASE_IDX 0
++#define mmSDMA0_RLC1_MIDCMD_CNTL 0x01e9
++#define mmSDMA0_RLC1_MIDCMD_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC2_RB_CNTL 0x0200
++#define mmSDMA0_RLC2_RB_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC2_RB_BASE 0x0201
++#define mmSDMA0_RLC2_RB_BASE_BASE_IDX 0
++#define mmSDMA0_RLC2_RB_BASE_HI 0x0202
++#define mmSDMA0_RLC2_RB_BASE_HI_BASE_IDX 0
++#define mmSDMA0_RLC2_RB_RPTR 0x0203
++#define mmSDMA0_RLC2_RB_RPTR_BASE_IDX 0
++#define mmSDMA0_RLC2_RB_RPTR_HI 0x0204
++#define mmSDMA0_RLC2_RB_RPTR_HI_BASE_IDX 0
++#define mmSDMA0_RLC2_RB_WPTR 0x0205
++#define mmSDMA0_RLC2_RB_WPTR_BASE_IDX 0
++#define mmSDMA0_RLC2_RB_WPTR_HI 0x0206
++#define mmSDMA0_RLC2_RB_WPTR_HI_BASE_IDX 0
++#define mmSDMA0_RLC2_RB_WPTR_POLL_CNTL 0x0207
++#define mmSDMA0_RLC2_RB_WPTR_POLL_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC2_RB_RPTR_ADDR_HI 0x0208
++#define mmSDMA0_RLC2_RB_RPTR_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_RLC2_RB_RPTR_ADDR_LO 0x0209
++#define mmSDMA0_RLC2_RB_RPTR_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_RLC2_IB_CNTL 0x020a
++#define mmSDMA0_RLC2_IB_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC2_IB_RPTR 0x020b
++#define mmSDMA0_RLC2_IB_RPTR_BASE_IDX 0
++#define mmSDMA0_RLC2_IB_OFFSET 0x020c
++#define mmSDMA0_RLC2_IB_OFFSET_BASE_IDX 0
++#define mmSDMA0_RLC2_IB_BASE_LO 0x020d
++#define mmSDMA0_RLC2_IB_BASE_LO_BASE_IDX 0
++#define mmSDMA0_RLC2_IB_BASE_HI 0x020e
++#define mmSDMA0_RLC2_IB_BASE_HI_BASE_IDX 0
++#define mmSDMA0_RLC2_IB_SIZE 0x020f
++#define mmSDMA0_RLC2_IB_SIZE_BASE_IDX 0
++#define mmSDMA0_RLC2_SKIP_CNTL 0x0210
++#define mmSDMA0_RLC2_SKIP_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC2_CONTEXT_STATUS 0x0211
++#define mmSDMA0_RLC2_CONTEXT_STATUS_BASE_IDX 0
++#define mmSDMA0_RLC2_DOORBELL 0x0212
++#define mmSDMA0_RLC2_DOORBELL_BASE_IDX 0
++#define mmSDMA0_RLC2_STATUS 0x0228
++#define mmSDMA0_RLC2_STATUS_BASE_IDX 0
++#define mmSDMA0_RLC2_DOORBELL_LOG 0x0229
++#define mmSDMA0_RLC2_DOORBELL_LOG_BASE_IDX 0
++#define mmSDMA0_RLC2_WATERMARK 0x022a
++#define mmSDMA0_RLC2_WATERMARK_BASE_IDX 0
++#define mmSDMA0_RLC2_DOORBELL_OFFSET 0x022b
++#define mmSDMA0_RLC2_DOORBELL_OFFSET_BASE_IDX 0
++#define mmSDMA0_RLC2_CSA_ADDR_LO 0x022c
++#define mmSDMA0_RLC2_CSA_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_RLC2_CSA_ADDR_HI 0x022d
++#define mmSDMA0_RLC2_CSA_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_RLC2_IB_SUB_REMAIN 0x022f
++#define mmSDMA0_RLC2_IB_SUB_REMAIN_BASE_IDX 0
++#define mmSDMA0_RLC2_PREEMPT 0x0230
++#define mmSDMA0_RLC2_PREEMPT_BASE_IDX 0
++#define mmSDMA0_RLC2_DUMMY_REG 0x0231
++#define mmSDMA0_RLC2_DUMMY_REG_BASE_IDX 0
++#define mmSDMA0_RLC2_RB_WPTR_POLL_ADDR_HI 0x0232
++#define mmSDMA0_RLC2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_RLC2_RB_WPTR_POLL_ADDR_LO 0x0233
++#define mmSDMA0_RLC2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_RLC2_RB_AQL_CNTL 0x0234
++#define mmSDMA0_RLC2_RB_AQL_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC2_MINOR_PTR_UPDATE 0x0235
++#define mmSDMA0_RLC2_MINOR_PTR_UPDATE_BASE_IDX 0
++#define mmSDMA0_RLC2_MIDCMD_DATA0 0x0240
++#define mmSDMA0_RLC2_MIDCMD_DATA0_BASE_IDX 0
++#define mmSDMA0_RLC2_MIDCMD_DATA1 0x0241
++#define mmSDMA0_RLC2_MIDCMD_DATA1_BASE_IDX 0
++#define mmSDMA0_RLC2_MIDCMD_DATA2 0x0242
++#define mmSDMA0_RLC2_MIDCMD_DATA2_BASE_IDX 0
++#define mmSDMA0_RLC2_MIDCMD_DATA3 0x0243
++#define mmSDMA0_RLC2_MIDCMD_DATA3_BASE_IDX 0
++#define mmSDMA0_RLC2_MIDCMD_DATA4 0x0244
++#define mmSDMA0_RLC2_MIDCMD_DATA4_BASE_IDX 0
++#define mmSDMA0_RLC2_MIDCMD_DATA5 0x0245
++#define mmSDMA0_RLC2_MIDCMD_DATA5_BASE_IDX 0
++#define mmSDMA0_RLC2_MIDCMD_DATA6 0x0246
++#define mmSDMA0_RLC2_MIDCMD_DATA6_BASE_IDX 0
++#define mmSDMA0_RLC2_MIDCMD_DATA7 0x0247
++#define mmSDMA0_RLC2_MIDCMD_DATA7_BASE_IDX 0
++#define mmSDMA0_RLC2_MIDCMD_DATA8 0x0248
++#define mmSDMA0_RLC2_MIDCMD_DATA8_BASE_IDX 0
++#define mmSDMA0_RLC2_MIDCMD_CNTL 0x0249
++#define mmSDMA0_RLC2_MIDCMD_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC3_RB_CNTL 0x0260
++#define mmSDMA0_RLC3_RB_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC3_RB_BASE 0x0261
++#define mmSDMA0_RLC3_RB_BASE_BASE_IDX 0
++#define mmSDMA0_RLC3_RB_BASE_HI 0x0262
++#define mmSDMA0_RLC3_RB_BASE_HI_BASE_IDX 0
++#define mmSDMA0_RLC3_RB_RPTR 0x0263
++#define mmSDMA0_RLC3_RB_RPTR_BASE_IDX 0
++#define mmSDMA0_RLC3_RB_RPTR_HI 0x0264
++#define mmSDMA0_RLC3_RB_RPTR_HI_BASE_IDX 0
++#define mmSDMA0_RLC3_RB_WPTR 0x0265
++#define mmSDMA0_RLC3_RB_WPTR_BASE_IDX 0
++#define mmSDMA0_RLC3_RB_WPTR_HI 0x0266
++#define mmSDMA0_RLC3_RB_WPTR_HI_BASE_IDX 0
++#define mmSDMA0_RLC3_RB_WPTR_POLL_CNTL 0x0267
++#define mmSDMA0_RLC3_RB_WPTR_POLL_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC3_RB_RPTR_ADDR_HI 0x0268
++#define mmSDMA0_RLC3_RB_RPTR_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_RLC3_RB_RPTR_ADDR_LO 0x0269
++#define mmSDMA0_RLC3_RB_RPTR_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_RLC3_IB_CNTL 0x026a
++#define mmSDMA0_RLC3_IB_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC3_IB_RPTR 0x026b
++#define mmSDMA0_RLC3_IB_RPTR_BASE_IDX 0
++#define mmSDMA0_RLC3_IB_OFFSET 0x026c
++#define mmSDMA0_RLC3_IB_OFFSET_BASE_IDX 0
++#define mmSDMA0_RLC3_IB_BASE_LO 0x026d
++#define mmSDMA0_RLC3_IB_BASE_LO_BASE_IDX 0
++#define mmSDMA0_RLC3_IB_BASE_HI 0x026e
++#define mmSDMA0_RLC3_IB_BASE_HI_BASE_IDX 0
++#define mmSDMA0_RLC3_IB_SIZE 0x026f
++#define mmSDMA0_RLC3_IB_SIZE_BASE_IDX 0
++#define mmSDMA0_RLC3_SKIP_CNTL 0x0270
++#define mmSDMA0_RLC3_SKIP_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC3_CONTEXT_STATUS 0x0271
++#define mmSDMA0_RLC3_CONTEXT_STATUS_BASE_IDX 0
++#define mmSDMA0_RLC3_DOORBELL 0x0272
++#define mmSDMA0_RLC3_DOORBELL_BASE_IDX 0
++#define mmSDMA0_RLC3_STATUS 0x0288
++#define mmSDMA0_RLC3_STATUS_BASE_IDX 0
++#define mmSDMA0_RLC3_DOORBELL_LOG 0x0289
++#define mmSDMA0_RLC3_DOORBELL_LOG_BASE_IDX 0
++#define mmSDMA0_RLC3_WATERMARK 0x028a
++#define mmSDMA0_RLC3_WATERMARK_BASE_IDX 0
++#define mmSDMA0_RLC3_DOORBELL_OFFSET 0x028b
++#define mmSDMA0_RLC3_DOORBELL_OFFSET_BASE_IDX 0
++#define mmSDMA0_RLC3_CSA_ADDR_LO 0x028c
++#define mmSDMA0_RLC3_CSA_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_RLC3_CSA_ADDR_HI 0x028d
++#define mmSDMA0_RLC3_CSA_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_RLC3_IB_SUB_REMAIN 0x028f
++#define mmSDMA0_RLC3_IB_SUB_REMAIN_BASE_IDX 0
++#define mmSDMA0_RLC3_PREEMPT 0x0290
++#define mmSDMA0_RLC3_PREEMPT_BASE_IDX 0
++#define mmSDMA0_RLC3_DUMMY_REG 0x0291
++#define mmSDMA0_RLC3_DUMMY_REG_BASE_IDX 0
++#define mmSDMA0_RLC3_RB_WPTR_POLL_ADDR_HI 0x0292
++#define mmSDMA0_RLC3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_RLC3_RB_WPTR_POLL_ADDR_LO 0x0293
++#define mmSDMA0_RLC3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_RLC3_RB_AQL_CNTL 0x0294
++#define mmSDMA0_RLC3_RB_AQL_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC3_MINOR_PTR_UPDATE 0x0295
++#define mmSDMA0_RLC3_MINOR_PTR_UPDATE_BASE_IDX 0
++#define mmSDMA0_RLC3_MIDCMD_DATA0 0x02a0
++#define mmSDMA0_RLC3_MIDCMD_DATA0_BASE_IDX 0
++#define mmSDMA0_RLC3_MIDCMD_DATA1 0x02a1
++#define mmSDMA0_RLC3_MIDCMD_DATA1_BASE_IDX 0
++#define mmSDMA0_RLC3_MIDCMD_DATA2 0x02a2
++#define mmSDMA0_RLC3_MIDCMD_DATA2_BASE_IDX 0
++#define mmSDMA0_RLC3_MIDCMD_DATA3 0x02a3
++#define mmSDMA0_RLC3_MIDCMD_DATA3_BASE_IDX 0
++#define mmSDMA0_RLC3_MIDCMD_DATA4 0x02a4
++#define mmSDMA0_RLC3_MIDCMD_DATA4_BASE_IDX 0
++#define mmSDMA0_RLC3_MIDCMD_DATA5 0x02a5
++#define mmSDMA0_RLC3_MIDCMD_DATA5_BASE_IDX 0
++#define mmSDMA0_RLC3_MIDCMD_DATA6 0x02a6
++#define mmSDMA0_RLC3_MIDCMD_DATA6_BASE_IDX 0
++#define mmSDMA0_RLC3_MIDCMD_DATA7 0x02a7
++#define mmSDMA0_RLC3_MIDCMD_DATA7_BASE_IDX 0
++#define mmSDMA0_RLC3_MIDCMD_DATA8 0x02a8
++#define mmSDMA0_RLC3_MIDCMD_DATA8_BASE_IDX 0
++#define mmSDMA0_RLC3_MIDCMD_CNTL 0x02a9
++#define mmSDMA0_RLC3_MIDCMD_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC4_RB_CNTL 0x02c0
++#define mmSDMA0_RLC4_RB_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC4_RB_BASE 0x02c1
++#define mmSDMA0_RLC4_RB_BASE_BASE_IDX 0
++#define mmSDMA0_RLC4_RB_BASE_HI 0x02c2
++#define mmSDMA0_RLC4_RB_BASE_HI_BASE_IDX 0
++#define mmSDMA0_RLC4_RB_RPTR 0x02c3
++#define mmSDMA0_RLC4_RB_RPTR_BASE_IDX 0
++#define mmSDMA0_RLC4_RB_RPTR_HI 0x02c4
++#define mmSDMA0_RLC4_RB_RPTR_HI_BASE_IDX 0
++#define mmSDMA0_RLC4_RB_WPTR 0x02c5
++#define mmSDMA0_RLC4_RB_WPTR_BASE_IDX 0
++#define mmSDMA0_RLC4_RB_WPTR_HI 0x02c6
++#define mmSDMA0_RLC4_RB_WPTR_HI_BASE_IDX 0
++#define mmSDMA0_RLC4_RB_WPTR_POLL_CNTL 0x02c7
++#define mmSDMA0_RLC4_RB_WPTR_POLL_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC4_RB_RPTR_ADDR_HI 0x02c8
++#define mmSDMA0_RLC4_RB_RPTR_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_RLC4_RB_RPTR_ADDR_LO 0x02c9
++#define mmSDMA0_RLC4_RB_RPTR_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_RLC4_IB_CNTL 0x02ca
++#define mmSDMA0_RLC4_IB_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC4_IB_RPTR 0x02cb
++#define mmSDMA0_RLC4_IB_RPTR_BASE_IDX 0
++#define mmSDMA0_RLC4_IB_OFFSET 0x02cc
++#define mmSDMA0_RLC4_IB_OFFSET_BASE_IDX 0
++#define mmSDMA0_RLC4_IB_BASE_LO 0x02cd
++#define mmSDMA0_RLC4_IB_BASE_LO_BASE_IDX 0
++#define mmSDMA0_RLC4_IB_BASE_HI 0x02ce
++#define mmSDMA0_RLC4_IB_BASE_HI_BASE_IDX 0
++#define mmSDMA0_RLC4_IB_SIZE 0x02cf
++#define mmSDMA0_RLC4_IB_SIZE_BASE_IDX 0
++#define mmSDMA0_RLC4_SKIP_CNTL 0x02d0
++#define mmSDMA0_RLC4_SKIP_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC4_CONTEXT_STATUS 0x02d1
++#define mmSDMA0_RLC4_CONTEXT_STATUS_BASE_IDX 0
++#define mmSDMA0_RLC4_DOORBELL 0x02d2
++#define mmSDMA0_RLC4_DOORBELL_BASE_IDX 0
++#define mmSDMA0_RLC4_STATUS 0x02e8
++#define mmSDMA0_RLC4_STATUS_BASE_IDX 0
++#define mmSDMA0_RLC4_DOORBELL_LOG 0x02e9
++#define mmSDMA0_RLC4_DOORBELL_LOG_BASE_IDX 0
++#define mmSDMA0_RLC4_WATERMARK 0x02ea
++#define mmSDMA0_RLC4_WATERMARK_BASE_IDX 0
++#define mmSDMA0_RLC4_DOORBELL_OFFSET 0x02eb
++#define mmSDMA0_RLC4_DOORBELL_OFFSET_BASE_IDX 0
++#define mmSDMA0_RLC4_CSA_ADDR_LO 0x02ec
++#define mmSDMA0_RLC4_CSA_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_RLC4_CSA_ADDR_HI 0x02ed
++#define mmSDMA0_RLC4_CSA_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_RLC4_IB_SUB_REMAIN 0x02ef
++#define mmSDMA0_RLC4_IB_SUB_REMAIN_BASE_IDX 0
++#define mmSDMA0_RLC4_PREEMPT 0x02f0
++#define mmSDMA0_RLC4_PREEMPT_BASE_IDX 0
++#define mmSDMA0_RLC4_DUMMY_REG 0x02f1
++#define mmSDMA0_RLC4_DUMMY_REG_BASE_IDX 0
++#define mmSDMA0_RLC4_RB_WPTR_POLL_ADDR_HI 0x02f2
++#define mmSDMA0_RLC4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_RLC4_RB_WPTR_POLL_ADDR_LO 0x02f3
++#define mmSDMA0_RLC4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_RLC4_RB_AQL_CNTL 0x02f4
++#define mmSDMA0_RLC4_RB_AQL_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC4_MINOR_PTR_UPDATE 0x02f5
++#define mmSDMA0_RLC4_MINOR_PTR_UPDATE_BASE_IDX 0
++#define mmSDMA0_RLC4_MIDCMD_DATA0 0x0300
++#define mmSDMA0_RLC4_MIDCMD_DATA0_BASE_IDX 0
++#define mmSDMA0_RLC4_MIDCMD_DATA1 0x0301
++#define mmSDMA0_RLC4_MIDCMD_DATA1_BASE_IDX 0
++#define mmSDMA0_RLC4_MIDCMD_DATA2 0x0302
++#define mmSDMA0_RLC4_MIDCMD_DATA2_BASE_IDX 0
++#define mmSDMA0_RLC4_MIDCMD_DATA3 0x0303
++#define mmSDMA0_RLC4_MIDCMD_DATA3_BASE_IDX 0
++#define mmSDMA0_RLC4_MIDCMD_DATA4 0x0304
++#define mmSDMA0_RLC4_MIDCMD_DATA4_BASE_IDX 0
++#define mmSDMA0_RLC4_MIDCMD_DATA5 0x0305
++#define mmSDMA0_RLC4_MIDCMD_DATA5_BASE_IDX 0
++#define mmSDMA0_RLC4_MIDCMD_DATA6 0x0306
++#define mmSDMA0_RLC4_MIDCMD_DATA6_BASE_IDX 0
++#define mmSDMA0_RLC4_MIDCMD_DATA7 0x0307
++#define mmSDMA0_RLC4_MIDCMD_DATA7_BASE_IDX 0
++#define mmSDMA0_RLC4_MIDCMD_DATA8 0x0308
++#define mmSDMA0_RLC4_MIDCMD_DATA8_BASE_IDX 0
++#define mmSDMA0_RLC4_MIDCMD_CNTL 0x0309
++#define mmSDMA0_RLC4_MIDCMD_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC5_RB_CNTL 0x0320
++#define mmSDMA0_RLC5_RB_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC5_RB_BASE 0x0321
++#define mmSDMA0_RLC5_RB_BASE_BASE_IDX 0
++#define mmSDMA0_RLC5_RB_BASE_HI 0x0322
++#define mmSDMA0_RLC5_RB_BASE_HI_BASE_IDX 0
++#define mmSDMA0_RLC5_RB_RPTR 0x0323
++#define mmSDMA0_RLC5_RB_RPTR_BASE_IDX 0
++#define mmSDMA0_RLC5_RB_RPTR_HI 0x0324
++#define mmSDMA0_RLC5_RB_RPTR_HI_BASE_IDX 0
++#define mmSDMA0_RLC5_RB_WPTR 0x0325
++#define mmSDMA0_RLC5_RB_WPTR_BASE_IDX 0
++#define mmSDMA0_RLC5_RB_WPTR_HI 0x0326
++#define mmSDMA0_RLC5_RB_WPTR_HI_BASE_IDX 0
++#define mmSDMA0_RLC5_RB_WPTR_POLL_CNTL 0x0327
++#define mmSDMA0_RLC5_RB_WPTR_POLL_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC5_RB_RPTR_ADDR_HI 0x0328
++#define mmSDMA0_RLC5_RB_RPTR_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_RLC5_RB_RPTR_ADDR_LO 0x0329
++#define mmSDMA0_RLC5_RB_RPTR_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_RLC5_IB_CNTL 0x032a
++#define mmSDMA0_RLC5_IB_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC5_IB_RPTR 0x032b
++#define mmSDMA0_RLC5_IB_RPTR_BASE_IDX 0
++#define mmSDMA0_RLC5_IB_OFFSET 0x032c
++#define mmSDMA0_RLC5_IB_OFFSET_BASE_IDX 0
++#define mmSDMA0_RLC5_IB_BASE_LO 0x032d
++#define mmSDMA0_RLC5_IB_BASE_LO_BASE_IDX 0
++#define mmSDMA0_RLC5_IB_BASE_HI 0x032e
++#define mmSDMA0_RLC5_IB_BASE_HI_BASE_IDX 0
++#define mmSDMA0_RLC5_IB_SIZE 0x032f
++#define mmSDMA0_RLC5_IB_SIZE_BASE_IDX 0
++#define mmSDMA0_RLC5_SKIP_CNTL 0x0330
++#define mmSDMA0_RLC5_SKIP_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC5_CONTEXT_STATUS 0x0331
++#define mmSDMA0_RLC5_CONTEXT_STATUS_BASE_IDX 0
++#define mmSDMA0_RLC5_DOORBELL 0x0332
++#define mmSDMA0_RLC5_DOORBELL_BASE_IDX 0
++#define mmSDMA0_RLC5_STATUS 0x0348
++#define mmSDMA0_RLC5_STATUS_BASE_IDX 0
++#define mmSDMA0_RLC5_DOORBELL_LOG 0x0349
++#define mmSDMA0_RLC5_DOORBELL_LOG_BASE_IDX 0
++#define mmSDMA0_RLC5_WATERMARK 0x034a
++#define mmSDMA0_RLC5_WATERMARK_BASE_IDX 0
++#define mmSDMA0_RLC5_DOORBELL_OFFSET 0x034b
++#define mmSDMA0_RLC5_DOORBELL_OFFSET_BASE_IDX 0
++#define mmSDMA0_RLC5_CSA_ADDR_LO 0x034c
++#define mmSDMA0_RLC5_CSA_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_RLC5_CSA_ADDR_HI 0x034d
++#define mmSDMA0_RLC5_CSA_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_RLC5_IB_SUB_REMAIN 0x034f
++#define mmSDMA0_RLC5_IB_SUB_REMAIN_BASE_IDX 0
++#define mmSDMA0_RLC5_PREEMPT 0x0350
++#define mmSDMA0_RLC5_PREEMPT_BASE_IDX 0
++#define mmSDMA0_RLC5_DUMMY_REG 0x0351
++#define mmSDMA0_RLC5_DUMMY_REG_BASE_IDX 0
++#define mmSDMA0_RLC5_RB_WPTR_POLL_ADDR_HI 0x0352
++#define mmSDMA0_RLC5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_RLC5_RB_WPTR_POLL_ADDR_LO 0x0353
++#define mmSDMA0_RLC5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_RLC5_RB_AQL_CNTL 0x0354
++#define mmSDMA0_RLC5_RB_AQL_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC5_MINOR_PTR_UPDATE 0x0355
++#define mmSDMA0_RLC5_MINOR_PTR_UPDATE_BASE_IDX 0
++#define mmSDMA0_RLC5_MIDCMD_DATA0 0x0360
++#define mmSDMA0_RLC5_MIDCMD_DATA0_BASE_IDX 0
++#define mmSDMA0_RLC5_MIDCMD_DATA1 0x0361
++#define mmSDMA0_RLC5_MIDCMD_DATA1_BASE_IDX 0
++#define mmSDMA0_RLC5_MIDCMD_DATA2 0x0362
++#define mmSDMA0_RLC5_MIDCMD_DATA2_BASE_IDX 0
++#define mmSDMA0_RLC5_MIDCMD_DATA3 0x0363
++#define mmSDMA0_RLC5_MIDCMD_DATA3_BASE_IDX 0
++#define mmSDMA0_RLC5_MIDCMD_DATA4 0x0364
++#define mmSDMA0_RLC5_MIDCMD_DATA4_BASE_IDX 0
++#define mmSDMA0_RLC5_MIDCMD_DATA5 0x0365
++#define mmSDMA0_RLC5_MIDCMD_DATA5_BASE_IDX 0
++#define mmSDMA0_RLC5_MIDCMD_DATA6 0x0366
++#define mmSDMA0_RLC5_MIDCMD_DATA6_BASE_IDX 0
++#define mmSDMA0_RLC5_MIDCMD_DATA7 0x0367
++#define mmSDMA0_RLC5_MIDCMD_DATA7_BASE_IDX 0
++#define mmSDMA0_RLC5_MIDCMD_DATA8 0x0368
++#define mmSDMA0_RLC5_MIDCMD_DATA8_BASE_IDX 0
++#define mmSDMA0_RLC5_MIDCMD_CNTL 0x0369
++#define mmSDMA0_RLC5_MIDCMD_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC6_RB_CNTL 0x0380
++#define mmSDMA0_RLC6_RB_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC6_RB_BASE 0x0381
++#define mmSDMA0_RLC6_RB_BASE_BASE_IDX 0
++#define mmSDMA0_RLC6_RB_BASE_HI 0x0382
++#define mmSDMA0_RLC6_RB_BASE_HI_BASE_IDX 0
++#define mmSDMA0_RLC6_RB_RPTR 0x0383
++#define mmSDMA0_RLC6_RB_RPTR_BASE_IDX 0
++#define mmSDMA0_RLC6_RB_RPTR_HI 0x0384
++#define mmSDMA0_RLC6_RB_RPTR_HI_BASE_IDX 0
++#define mmSDMA0_RLC6_RB_WPTR 0x0385
++#define mmSDMA0_RLC6_RB_WPTR_BASE_IDX 0
++#define mmSDMA0_RLC6_RB_WPTR_HI 0x0386
++#define mmSDMA0_RLC6_RB_WPTR_HI_BASE_IDX 0
++#define mmSDMA0_RLC6_RB_WPTR_POLL_CNTL 0x0387
++#define mmSDMA0_RLC6_RB_WPTR_POLL_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC6_RB_RPTR_ADDR_HI 0x0388
++#define mmSDMA0_RLC6_RB_RPTR_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_RLC6_RB_RPTR_ADDR_LO 0x0389
++#define mmSDMA0_RLC6_RB_RPTR_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_RLC6_IB_CNTL 0x038a
++#define mmSDMA0_RLC6_IB_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC6_IB_RPTR 0x038b
++#define mmSDMA0_RLC6_IB_RPTR_BASE_IDX 0
++#define mmSDMA0_RLC6_IB_OFFSET 0x038c
++#define mmSDMA0_RLC6_IB_OFFSET_BASE_IDX 0
++#define mmSDMA0_RLC6_IB_BASE_LO 0x038d
++#define mmSDMA0_RLC6_IB_BASE_LO_BASE_IDX 0
++#define mmSDMA0_RLC6_IB_BASE_HI 0x038e
++#define mmSDMA0_RLC6_IB_BASE_HI_BASE_IDX 0
++#define mmSDMA0_RLC6_IB_SIZE 0x038f
++#define mmSDMA0_RLC6_IB_SIZE_BASE_IDX 0
++#define mmSDMA0_RLC6_SKIP_CNTL 0x0390
++#define mmSDMA0_RLC6_SKIP_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC6_CONTEXT_STATUS 0x0391
++#define mmSDMA0_RLC6_CONTEXT_STATUS_BASE_IDX 0
++#define mmSDMA0_RLC6_DOORBELL 0x0392
++#define mmSDMA0_RLC6_DOORBELL_BASE_IDX 0
++#define mmSDMA0_RLC6_STATUS 0x03a8
++#define mmSDMA0_RLC6_STATUS_BASE_IDX 0
++#define mmSDMA0_RLC6_DOORBELL_LOG 0x03a9
++#define mmSDMA0_RLC6_DOORBELL_LOG_BASE_IDX 0
++#define mmSDMA0_RLC6_WATERMARK 0x03aa
++#define mmSDMA0_RLC6_WATERMARK_BASE_IDX 0
++#define mmSDMA0_RLC6_DOORBELL_OFFSET 0x03ab
++#define mmSDMA0_RLC6_DOORBELL_OFFSET_BASE_IDX 0
++#define mmSDMA0_RLC6_CSA_ADDR_LO 0x03ac
++#define mmSDMA0_RLC6_CSA_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_RLC6_CSA_ADDR_HI 0x03ad
++#define mmSDMA0_RLC6_CSA_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_RLC6_IB_SUB_REMAIN 0x03af
++#define mmSDMA0_RLC6_IB_SUB_REMAIN_BASE_IDX 0
++#define mmSDMA0_RLC6_PREEMPT 0x03b0
++#define mmSDMA0_RLC6_PREEMPT_BASE_IDX 0
++#define mmSDMA0_RLC6_DUMMY_REG 0x03b1
++#define mmSDMA0_RLC6_DUMMY_REG_BASE_IDX 0
++#define mmSDMA0_RLC6_RB_WPTR_POLL_ADDR_HI 0x03b2
++#define mmSDMA0_RLC6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_RLC6_RB_WPTR_POLL_ADDR_LO 0x03b3
++#define mmSDMA0_RLC6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_RLC6_RB_AQL_CNTL 0x03b4
++#define mmSDMA0_RLC6_RB_AQL_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC6_MINOR_PTR_UPDATE 0x03b5
++#define mmSDMA0_RLC6_MINOR_PTR_UPDATE_BASE_IDX 0
++#define mmSDMA0_RLC6_MIDCMD_DATA0 0x03c0
++#define mmSDMA0_RLC6_MIDCMD_DATA0_BASE_IDX 0
++#define mmSDMA0_RLC6_MIDCMD_DATA1 0x03c1
++#define mmSDMA0_RLC6_MIDCMD_DATA1_BASE_IDX 0
++#define mmSDMA0_RLC6_MIDCMD_DATA2 0x03c2
++#define mmSDMA0_RLC6_MIDCMD_DATA2_BASE_IDX 0
++#define mmSDMA0_RLC6_MIDCMD_DATA3 0x03c3
++#define mmSDMA0_RLC6_MIDCMD_DATA3_BASE_IDX 0
++#define mmSDMA0_RLC6_MIDCMD_DATA4 0x03c4
++#define mmSDMA0_RLC6_MIDCMD_DATA4_BASE_IDX 0
++#define mmSDMA0_RLC6_MIDCMD_DATA5 0x03c5
++#define mmSDMA0_RLC6_MIDCMD_DATA5_BASE_IDX 0
++#define mmSDMA0_RLC6_MIDCMD_DATA6 0x03c6
++#define mmSDMA0_RLC6_MIDCMD_DATA6_BASE_IDX 0
++#define mmSDMA0_RLC6_MIDCMD_DATA7 0x03c7
++#define mmSDMA0_RLC6_MIDCMD_DATA7_BASE_IDX 0
++#define mmSDMA0_RLC6_MIDCMD_DATA8 0x03c8
++#define mmSDMA0_RLC6_MIDCMD_DATA8_BASE_IDX 0
++#define mmSDMA0_RLC6_MIDCMD_CNTL 0x03c9
++#define mmSDMA0_RLC6_MIDCMD_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC7_RB_CNTL 0x03e0
++#define mmSDMA0_RLC7_RB_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC7_RB_BASE 0x03e1
++#define mmSDMA0_RLC7_RB_BASE_BASE_IDX 0
++#define mmSDMA0_RLC7_RB_BASE_HI 0x03e2
++#define mmSDMA0_RLC7_RB_BASE_HI_BASE_IDX 0
++#define mmSDMA0_RLC7_RB_RPTR 0x03e3
++#define mmSDMA0_RLC7_RB_RPTR_BASE_IDX 0
++#define mmSDMA0_RLC7_RB_RPTR_HI 0x03e4
++#define mmSDMA0_RLC7_RB_RPTR_HI_BASE_IDX 0
++#define mmSDMA0_RLC7_RB_WPTR 0x03e5
++#define mmSDMA0_RLC7_RB_WPTR_BASE_IDX 0
++#define mmSDMA0_RLC7_RB_WPTR_HI 0x03e6
++#define mmSDMA0_RLC7_RB_WPTR_HI_BASE_IDX 0
++#define mmSDMA0_RLC7_RB_WPTR_POLL_CNTL 0x03e7
++#define mmSDMA0_RLC7_RB_WPTR_POLL_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC7_RB_RPTR_ADDR_HI 0x03e8
++#define mmSDMA0_RLC7_RB_RPTR_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_RLC7_RB_RPTR_ADDR_LO 0x03e9
++#define mmSDMA0_RLC7_RB_RPTR_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_RLC7_IB_CNTL 0x03ea
++#define mmSDMA0_RLC7_IB_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC7_IB_RPTR 0x03eb
++#define mmSDMA0_RLC7_IB_RPTR_BASE_IDX 0
++#define mmSDMA0_RLC7_IB_OFFSET 0x03ec
++#define mmSDMA0_RLC7_IB_OFFSET_BASE_IDX 0
++#define mmSDMA0_RLC7_IB_BASE_LO 0x03ed
++#define mmSDMA0_RLC7_IB_BASE_LO_BASE_IDX 0
++#define mmSDMA0_RLC7_IB_BASE_HI 0x03ee
++#define mmSDMA0_RLC7_IB_BASE_HI_BASE_IDX 0
++#define mmSDMA0_RLC7_IB_SIZE 0x03ef
++#define mmSDMA0_RLC7_IB_SIZE_BASE_IDX 0
++#define mmSDMA0_RLC7_SKIP_CNTL 0x03f0
++#define mmSDMA0_RLC7_SKIP_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC7_CONTEXT_STATUS 0x03f1
++#define mmSDMA0_RLC7_CONTEXT_STATUS_BASE_IDX 0
++#define mmSDMA0_RLC7_DOORBELL 0x03f2
++#define mmSDMA0_RLC7_DOORBELL_BASE_IDX 0
++#define mmSDMA0_RLC7_STATUS 0x0408
++#define mmSDMA0_RLC7_STATUS_BASE_IDX 0
++#define mmSDMA0_RLC7_DOORBELL_LOG 0x0409
++#define mmSDMA0_RLC7_DOORBELL_LOG_BASE_IDX 0
++#define mmSDMA0_RLC7_WATERMARK 0x040a
++#define mmSDMA0_RLC7_WATERMARK_BASE_IDX 0
++#define mmSDMA0_RLC7_DOORBELL_OFFSET 0x040b
++#define mmSDMA0_RLC7_DOORBELL_OFFSET_BASE_IDX 0
++#define mmSDMA0_RLC7_CSA_ADDR_LO 0x040c
++#define mmSDMA0_RLC7_CSA_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_RLC7_CSA_ADDR_HI 0x040d
++#define mmSDMA0_RLC7_CSA_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_RLC7_IB_SUB_REMAIN 0x040f
++#define mmSDMA0_RLC7_IB_SUB_REMAIN_BASE_IDX 0
++#define mmSDMA0_RLC7_PREEMPT 0x0410
++#define mmSDMA0_RLC7_PREEMPT_BASE_IDX 0
++#define mmSDMA0_RLC7_DUMMY_REG 0x0411
++#define mmSDMA0_RLC7_DUMMY_REG_BASE_IDX 0
++#define mmSDMA0_RLC7_RB_WPTR_POLL_ADDR_HI 0x0412
++#define mmSDMA0_RLC7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
++#define mmSDMA0_RLC7_RB_WPTR_POLL_ADDR_LO 0x0413
++#define mmSDMA0_RLC7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
++#define mmSDMA0_RLC7_RB_AQL_CNTL 0x0414
++#define mmSDMA0_RLC7_RB_AQL_CNTL_BASE_IDX 0
++#define mmSDMA0_RLC7_MINOR_PTR_UPDATE 0x0415
++#define mmSDMA0_RLC7_MINOR_PTR_UPDATE_BASE_IDX 0
++#define mmSDMA0_RLC7_MIDCMD_DATA0 0x0420
++#define mmSDMA0_RLC7_MIDCMD_DATA0_BASE_IDX 0
++#define mmSDMA0_RLC7_MIDCMD_DATA1 0x0421
++#define mmSDMA0_RLC7_MIDCMD_DATA1_BASE_IDX 0
++#define mmSDMA0_RLC7_MIDCMD_DATA2 0x0422
++#define mmSDMA0_RLC7_MIDCMD_DATA2_BASE_IDX 0
++#define mmSDMA0_RLC7_MIDCMD_DATA3 0x0423
++#define mmSDMA0_RLC7_MIDCMD_DATA3_BASE_IDX 0
++#define mmSDMA0_RLC7_MIDCMD_DATA4 0x0424
++#define mmSDMA0_RLC7_MIDCMD_DATA4_BASE_IDX 0
++#define mmSDMA0_RLC7_MIDCMD_DATA5 0x0425
++#define mmSDMA0_RLC7_MIDCMD_DATA5_BASE_IDX 0
++#define mmSDMA0_RLC7_MIDCMD_DATA6 0x0426
++#define mmSDMA0_RLC7_MIDCMD_DATA6_BASE_IDX 0
++#define mmSDMA0_RLC7_MIDCMD_DATA7 0x0427
++#define mmSDMA0_RLC7_MIDCMD_DATA7_BASE_IDX 0
++#define mmSDMA0_RLC7_MIDCMD_DATA8 0x0428
++#define mmSDMA0_RLC7_MIDCMD_DATA8_BASE_IDX 0
++#define mmSDMA0_RLC7_MIDCMD_CNTL 0x0429
++#define mmSDMA0_RLC7_MIDCMD_CNTL_BASE_IDX 0
++
++#endif
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_sh_mask.h
+new file mode 100644
+index 0000000..11bfb43
+--- /dev/null
++++ b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_sh_mask.h
+@@ -0,0 +1,2992 @@
++/*
++ * Copyright (C) 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
++ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++#ifndef _sdma0_4_2_0_SH_MASK_HEADER
++#define _sdma0_4_2_0_SH_MASK_HEADER
++
++
++// addressBlock: sdma0_sdma0dec
++//SDMA0_UCODE_ADDR
++#define SDMA0_UCODE_ADDR__VALUE__SHIFT 0x0
++#define SDMA0_UCODE_ADDR__VALUE_MASK 0x00001FFFL
++//SDMA0_UCODE_DATA
++#define SDMA0_UCODE_DATA__VALUE__SHIFT 0x0
++#define SDMA0_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
++//SDMA0_VM_CNTL
++#define SDMA0_VM_CNTL__CMD__SHIFT 0x0
++#define SDMA0_VM_CNTL__CMD_MASK 0x0000000FL
++//SDMA0_VM_CTX_LO
++#define SDMA0_VM_CTX_LO__ADDR__SHIFT 0x2
++#define SDMA0_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_VM_CTX_HI
++#define SDMA0_VM_CTX_HI__ADDR__SHIFT 0x0
++#define SDMA0_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_ACTIVE_FCN_ID
++#define SDMA0_ACTIVE_FCN_ID__VFID__SHIFT 0x0
++#define SDMA0_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
++#define SDMA0_ACTIVE_FCN_ID__VF__SHIFT 0x1f
++#define SDMA0_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
++#define SDMA0_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
++#define SDMA0_ACTIVE_FCN_ID__VF_MASK 0x80000000L
++//SDMA0_VM_CTX_CNTL
++#define SDMA0_VM_CTX_CNTL__PRIV__SHIFT 0x0
++#define SDMA0_VM_CTX_CNTL__VMID__SHIFT 0x4
++#define SDMA0_VM_CTX_CNTL__PRIV_MASK 0x00000001L
++#define SDMA0_VM_CTX_CNTL__VMID_MASK 0x000000F0L
++//SDMA0_VIRT_RESET_REQ
++#define SDMA0_VIRT_RESET_REQ__VF__SHIFT 0x0
++#define SDMA0_VIRT_RESET_REQ__PF__SHIFT 0x1f
++#define SDMA0_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
++#define SDMA0_VIRT_RESET_REQ__PF_MASK 0x80000000L
++//SDMA0_VF_ENABLE
++#define SDMA0_VF_ENABLE__VF_ENABLE__SHIFT 0x0
++#define SDMA0_VF_ENABLE__VF_ENABLE_MASK 0x00000001L
++//SDMA0_CONTEXT_REG_TYPE0
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_CNTL__SHIFT 0x0
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE__SHIFT 0x1
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE_HI__SHIFT 0x2
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR__SHIFT 0x3
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_HI__SHIFT 0x4
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR__SHIFT 0x5
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_HI__SHIFT 0x6
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_CNTL__SHIFT 0xa
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_RPTR__SHIFT 0xb
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_OFFSET__SHIFT 0xc
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_LO__SHIFT 0xd
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_HI__SHIFT 0xe
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_SIZE__SHIFT 0xf
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_SKIP_CNTL__SHIFT 0x10
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_STATUS__SHIFT 0x11
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_DOORBELL__SHIFT 0x12
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_CNTL__SHIFT 0x13
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_CNTL_MASK 0x00000001L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE_MASK 0x00000002L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE_HI_MASK 0x00000004L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_MASK 0x00000008L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_HI_MASK 0x00000010L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_MASK 0x00000020L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_HI_MASK 0x00000040L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_CNTL_MASK 0x00000400L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_RPTR_MASK 0x00000800L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_OFFSET_MASK 0x00001000L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_LO_MASK 0x00002000L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_HI_MASK 0x00004000L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_SIZE_MASK 0x00008000L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_SKIP_CNTL_MASK 0x00010000L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_STATUS_MASK 0x00020000L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_DOORBELL_MASK 0x00040000L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_CNTL_MASK 0x00080000L
++//SDMA0_CONTEXT_REG_TYPE1
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_STATUS__SHIFT 0x8
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_LOG__SHIFT 0x9
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_WATERMARK__SHIFT 0xa
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_OFFSET__SHIFT 0xb
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_LO__SHIFT 0xc
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_HI__SHIFT 0xd
++#define SDMA0_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_IB_SUB_REMAIN__SHIFT 0xf
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_PREEMPT__SHIFT 0x10
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DUMMY_REG__SHIFT 0x11
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_AQL_CNTL__SHIFT 0x14
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_MINOR_PTR_UPDATE__SHIFT 0x15
++#define SDMA0_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_STATUS_MASK 0x00000100L
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_LOG_MASK 0x00000200L
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_WATERMARK_MASK 0x00000400L
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_OFFSET_MASK 0x00000800L
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_LO_MASK 0x00001000L
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_HI_MASK 0x00002000L
++#define SDMA0_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_IB_SUB_REMAIN_MASK 0x00008000L
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_PREEMPT_MASK 0x00010000L
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DUMMY_REG_MASK 0x00020000L
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_AQL_CNTL_MASK 0x00100000L
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L
++#define SDMA0_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L
++//SDMA0_CONTEXT_REG_TYPE2
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA0__SHIFT 0x0
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA1__SHIFT 0x1
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA2__SHIFT 0x2
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA3__SHIFT 0x3
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA4__SHIFT 0x4
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA5__SHIFT 0x5
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA6__SHIFT 0x6
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA7__SHIFT 0x7
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA8__SHIFT 0x8
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_CNTL__SHIFT 0x9
++#define SDMA0_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA0_MASK 0x00000001L
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA1_MASK 0x00000002L
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA2_MASK 0x00000004L
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA3_MASK 0x00000008L
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA4_MASK 0x00000010L
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA5_MASK 0x00000020L
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA6_MASK 0x00000040L
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA7_MASK 0x00000080L
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA8_MASK 0x00000100L
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_CNTL_MASK 0x00000200L
++#define SDMA0_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L
++//SDMA0_CONTEXT_REG_TYPE3
++#define SDMA0_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0
++#define SDMA0_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL
++//SDMA0_PUB_REG_TYPE0
++#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_ADDR__SHIFT 0x0
++#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_DATA__SHIFT 0x1
++#define SDMA0_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3
++#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CNTL__SHIFT 0x4
++#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_LO__SHIFT 0x5
++#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_HI__SHIFT 0x6
++#define SDMA0_PUB_REG_TYPE0__SDMA0_ACTIVE_FCN_ID__SHIFT 0x7
++#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_CNTL__SHIFT 0x8
++#define SDMA0_PUB_REG_TYPE0__SDMA0_VIRT_RESET_REQ__SHIFT 0x9
++#define SDMA0_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE0__SHIFT 0xb
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE1__SHIFT 0xc
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE2__SHIFT 0xd
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE3__SHIFT 0xe
++#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE0__SHIFT 0xf
++#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE1__SHIFT 0x10
++#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE2__SHIFT 0x11
++#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE3__SHIFT 0x12
++#define SDMA0_PUB_REG_TYPE0__SDMA0_MMHUB_CNTL__SHIFT 0x13
++#define SDMA0_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x14
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19
++#define SDMA0_PUB_REG_TYPE0__SDMA0_POWER_CNTL__SHIFT 0x1a
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CLK_CTRL__SHIFT 0x1b
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CNTL__SHIFT 0x1c
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CHICKEN_BITS__SHIFT 0x1d
++#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG__SHIFT 0x1e
++#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_READ__SHIFT 0x1f
++#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_ADDR_MASK 0x00000001L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_DATA_MASK 0x00000002L
++#define SDMA0_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CNTL_MASK 0x00000010L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_LO_MASK 0x00000020L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_HI_MASK 0x00000040L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_ACTIVE_FCN_ID_MASK 0x00000080L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_CNTL_MASK 0x00000100L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_VIRT_RESET_REQ_MASK 0x00000200L
++#define SDMA0_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE0_MASK 0x00000800L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE1_MASK 0x00001000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE2_MASK 0x00002000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE3_MASK 0x00004000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE0_MASK 0x00008000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE1_MASK 0x00010000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE2_MASK 0x00020000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE3_MASK 0x00040000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_MMHUB_CNTL_MASK 0x00080000L
++#define SDMA0_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01F00000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_POWER_CNTL_MASK 0x04000000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CLK_CTRL_MASK 0x08000000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CNTL_MASK 0x10000000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CHICKEN_BITS_MASK 0x20000000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_MASK 0x40000000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_READ_MASK 0x80000000L
++//SDMA0_PUB_REG_TYPE1
++#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_HI__SHIFT 0x0
++#define SDMA0_PUB_REG_TYPE1__SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1
++#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH__SHIFT 0x2
++#define SDMA0_PUB_REG_TYPE1__SDMA0_IB_OFFSET_FETCH__SHIFT 0x3
++#define SDMA0_PUB_REG_TYPE1__SDMA0_PROGRAM__SHIFT 0x4
++#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS_REG__SHIFT 0x5
++#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS1_REG__SHIFT 0x6
++#define SDMA0_PUB_REG_TYPE1__SDMA0_RD_BURST_CNTL__SHIFT 0x7
++#define SDMA0_PUB_REG_TYPE1__SDMA0_HBM_PAGE_CONFIG__SHIFT 0x8
++#define SDMA0_PUB_REG_TYPE1__SDMA0_UCODE_CHECKSUM__SHIFT 0x9
++#define SDMA0_PUB_REG_TYPE1__SDMA0_F32_CNTL__SHIFT 0xa
++#define SDMA0_PUB_REG_TYPE1__SDMA0_FREEZE__SHIFT 0xb
++#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE0_QUANTUM__SHIFT 0xc
++#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE1_QUANTUM__SHIFT 0xd
++#define SDMA0_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe
++#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf
++#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10
++#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11
++#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_CONFIG__SHIFT 0x12
++#define SDMA0_PUB_REG_TYPE1__SDMA0_BA_THRESHOLD__SHIFT 0x13
++#define SDMA0_PUB_REG_TYPE1__SDMA0_ID__SHIFT 0x14
++#define SDMA0_PUB_REG_TYPE1__SDMA0_VERSION__SHIFT 0x15
++#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER__SHIFT 0x16
++#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_CLEAR__SHIFT 0x17
++#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS2_REG__SHIFT 0x18
++#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_CNTL__SHIFT 0x19
++#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_LO__SHIFT 0x1a
++#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_HI__SHIFT 0x1b
++#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_CNTL__SHIFT 0x1c
++#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WATERMK__SHIFT 0x1d
++#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_RD_STATUS__SHIFT 0x1e
++#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WR_STATUS__SHIFT 0x1f
++#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_HI_MASK 0x00000001L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_MASK 0x00000004L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_IB_OFFSET_FETCH_MASK 0x00000008L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_PROGRAM_MASK 0x00000010L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS_REG_MASK 0x00000020L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS1_REG_MASK 0x00000040L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_RD_BURST_CNTL_MASK 0x00000080L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_HBM_PAGE_CONFIG_MASK 0x00000100L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_UCODE_CHECKSUM_MASK 0x00000200L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_F32_CNTL_MASK 0x00000400L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_FREEZE_MASK 0x00000800L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE0_QUANTUM_MASK 0x00001000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE1_QUANTUM_MASK 0x00002000L
++#define SDMA0_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L
++#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L
++#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L
++#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_CONFIG_MASK 0x00040000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_BA_THRESHOLD_MASK 0x00080000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_ID_MASK 0x00100000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_VERSION_MASK 0x00200000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_MASK 0x00400000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_CLEAR_MASK 0x00800000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS2_REG_MASK 0x01000000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_CNTL_MASK 0x02000000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_LO_MASK 0x04000000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_HI_MASK 0x08000000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_CNTL_MASK 0x10000000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WATERMK_MASK 0x20000000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_RD_STATUS_MASK 0x40000000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WR_STATUS_MASK 0x80000000L
++//SDMA0_PUB_REG_TYPE2
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV0__SHIFT 0x0
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV1__SHIFT 0x1
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV2__SHIFT 0x2
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK0__SHIFT 0x3
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK1__SHIFT 0x4
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK0__SHIFT 0x5
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK1__SHIFT 0x6
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_TIMEOUT__SHIFT 0x7
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_PAGE__SHIFT 0x8
++#define SDMA0_PUB_REG_TYPE2__SDMA0_POWER_CNTL_IDLE__SHIFT 0x9
++#define SDMA0_PUB_REG_TYPE2__SDMA0_RELAX_ORDERING_LUT__SHIFT 0xa
++#define SDMA0_PUB_REG_TYPE2__SDMA0_CHICKEN_BITS_2__SHIFT 0xb
++#define SDMA0_PUB_REG_TYPE2__SDMA0_STATUS3_REG__SHIFT 0xc
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_LO__SHIFT 0xd
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_HI__SHIFT 0xe
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PHASE2_QUANTUM__SHIFT 0xf
++#define SDMA0_PUB_REG_TYPE2__SDMA0_ERROR_LOG__SHIFT 0x10
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG0__SHIFT 0x11
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG1__SHIFT 0x12
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG2__SHIFT 0x13
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG3__SHIFT 0x14
++#define SDMA0_PUB_REG_TYPE2__SDMA0_F32_COUNTER__SHIFT 0x15
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFMON_CNTL__SHIFT 0x17
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER0_RESULT__SHIFT 0x18
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER1_RESULT__SHIFT 0x19
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a
++#define SDMA0_PUB_REG_TYPE2__SDMA0_CRD_CNTL__SHIFT 0x1b
++#define SDMA0_PUB_REG_TYPE2__SDMA0_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d
++#define SDMA0_PUB_REG_TYPE2__SDMA0_ULV_CNTL__SHIFT 0x1e
++#define SDMA0_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV0_MASK 0x00000001L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV1_MASK 0x00000002L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV2_MASK 0x00000004L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK0_MASK 0x00000008L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK1_MASK 0x00000010L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK0_MASK 0x00000020L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK1_MASK 0x00000040L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_TIMEOUT_MASK 0x00000080L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_PAGE_MASK 0x00000100L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_POWER_CNTL_IDLE_MASK 0x00000200L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_RELAX_ORDERING_LUT_MASK 0x00000400L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_CHICKEN_BITS_2_MASK 0x00000800L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_STATUS3_REG_MASK 0x00001000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_LO_MASK 0x00002000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_HI_MASK 0x00004000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PHASE2_QUANTUM_MASK 0x00008000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_ERROR_LOG_MASK 0x00010000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG0_MASK 0x00020000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG1_MASK 0x00040000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG2_MASK 0x00080000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG3_MASK 0x00100000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_F32_COUNTER_MASK 0x00200000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFMON_CNTL_MASK 0x00800000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER0_RESULT_MASK 0x01000000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER1_RESULT_MASK 0x02000000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_CRD_CNTL_MASK 0x08000000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_ULV_CNTL_MASK 0x40000000L
++#define SDMA0_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L
++//SDMA0_PUB_REG_TYPE3
++#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_DATA__SHIFT 0x0
++#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_INDEX__SHIFT 0x1
++#define SDMA0_PUB_REG_TYPE3__RESERVED__SHIFT 0x2
++#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_DATA_MASK 0x00000001L
++#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_INDEX_MASK 0x00000002L
++#define SDMA0_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFFCL
++//SDMA0_MMHUB_CNTL
++#define SDMA0_MMHUB_CNTL__UNIT_ID__SHIFT 0x0
++#define SDMA0_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL
++//SDMA0_CONTEXT_GROUP_BOUNDARY
++#define SDMA0_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0
++#define SDMA0_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL
++//SDMA0_POWER_CNTL
++#define SDMA0_POWER_CNTL__PG_CNTL_ENABLE__SHIFT 0x0
++#define SDMA0_POWER_CNTL__EXT_PG_POWER_ON_REQ__SHIFT 0x1
++#define SDMA0_POWER_CNTL__EXT_PG_POWER_OFF_REQ__SHIFT 0x2
++#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME__SHIFT 0x3
++#define SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8
++#define SDMA0_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9
++#define SDMA0_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa
++#define SDMA0_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb
++#define SDMA0_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc
++#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME__SHIFT 0x1a
++#define SDMA0_POWER_CNTL__PG_CNTL_ENABLE_MASK 0x00000001L
++#define SDMA0_POWER_CNTL__EXT_PG_POWER_ON_REQ_MASK 0x00000002L
++#define SDMA0_POWER_CNTL__EXT_PG_POWER_OFF_REQ_MASK 0x00000004L
++#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
++#define SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L
++#define SDMA0_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L
++#define SDMA0_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L
++#define SDMA0_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L
++#define SDMA0_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L
++#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
++//SDMA0_CLK_CTRL
++#define SDMA0_CLK_CTRL__ON_DELAY__SHIFT 0x0
++#define SDMA0_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define SDMA0_CLK_CTRL__RESERVED__SHIFT 0xc
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
++#define SDMA0_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define SDMA0_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define SDMA0_CLK_CTRL__RESERVED_MASK 0x00FFF000L
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
++//SDMA0_CNTL
++#define SDMA0_CNTL__TRAP_ENABLE__SHIFT 0x0
++#define SDMA0_CNTL__UTC_L1_ENABLE__SHIFT 0x1
++#define SDMA0_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
++#define SDMA0_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
++#define SDMA0_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
++#define SDMA0_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
++#define SDMA0_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
++#define SDMA0_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12
++#define SDMA0_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c
++#define SDMA0_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d
++#define SDMA0_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e
++#define SDMA0_CNTL__TRAP_ENABLE_MASK 0x00000001L
++#define SDMA0_CNTL__UTC_L1_ENABLE_MASK 0x00000002L
++#define SDMA0_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
++#define SDMA0_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
++#define SDMA0_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
++#define SDMA0_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
++#define SDMA0_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
++#define SDMA0_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L
++#define SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
++#define SDMA0_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L
++#define SDMA0_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L
++//SDMA0_CHICKEN_BITS
++#define SDMA0_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0
++#define SDMA0_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
++#define SDMA0_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
++#define SDMA0_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8
++#define SDMA0_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa
++#define SDMA0_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
++#define SDMA0_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
++#define SDMA0_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14
++#define SDMA0_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17
++#define SDMA0_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19
++#define SDMA0_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a
++#define SDMA0_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c
++#define SDMA0_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e
++#define SDMA0_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L
++#define SDMA0_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
++#define SDMA0_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
++#define SDMA0_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L
++#define SDMA0_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L
++#define SDMA0_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
++#define SDMA0_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
++#define SDMA0_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L
++#define SDMA0_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L
++#define SDMA0_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L
++#define SDMA0_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L
++#define SDMA0_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L
++#define SDMA0_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L
++//SDMA0_GB_ADDR_CONFIG
++#define SDMA0_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
++#define SDMA0_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
++#define SDMA0_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8
++#define SDMA0_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
++#define SDMA0_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
++#define SDMA0_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
++#define SDMA0_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
++#define SDMA0_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
++#define SDMA0_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
++#define SDMA0_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
++//SDMA0_GB_ADDR_CONFIG_READ
++#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
++#define SDMA0_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
++#define SDMA0_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8
++#define SDMA0_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc
++#define SDMA0_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
++#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
++#define SDMA0_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
++#define SDMA0_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
++#define SDMA0_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L
++#define SDMA0_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
++//SDMA0_RB_RPTR_FETCH_HI
++#define SDMA0_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
++#define SDMA0_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_SEM_WAIT_FAIL_TIMER_CNTL
++#define SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
++#define SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
++//SDMA0_RB_RPTR_FETCH
++#define SDMA0_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
++#define SDMA0_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
++//SDMA0_IB_OFFSET_FETCH
++#define SDMA0_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
++#define SDMA0_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
++//SDMA0_PROGRAM
++#define SDMA0_PROGRAM__STREAM__SHIFT 0x0
++#define SDMA0_PROGRAM__STREAM_MASK 0xFFFFFFFFL
++//SDMA0_STATUS_REG
++#define SDMA0_STATUS_REG__IDLE__SHIFT 0x0
++#define SDMA0_STATUS_REG__REG_IDLE__SHIFT 0x1
++#define SDMA0_STATUS_REG__RB_EMPTY__SHIFT 0x2
++#define SDMA0_STATUS_REG__RB_FULL__SHIFT 0x3
++#define SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
++#define SDMA0_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
++#define SDMA0_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
++#define SDMA0_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
++#define SDMA0_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
++#define SDMA0_STATUS_REG__INSIDE_IB__SHIFT 0x9
++#define SDMA0_STATUS_REG__EX_IDLE__SHIFT 0xa
++#define SDMA0_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb
++#define SDMA0_STATUS_REG__PACKET_READY__SHIFT 0xc
++#define SDMA0_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
++#define SDMA0_STATUS_REG__SRBM_IDLE__SHIFT 0xe
++#define SDMA0_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
++#define SDMA0_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
++#define SDMA0_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
++#define SDMA0_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
++#define SDMA0_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
++#define SDMA0_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
++#define SDMA0_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
++#define SDMA0_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
++#define SDMA0_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
++#define SDMA0_STATUS_REG__SEM_IDLE__SHIFT 0x1a
++#define SDMA0_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
++#define SDMA0_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
++#define SDMA0_STATUS_REG__INT_IDLE__SHIFT 0x1e
++#define SDMA0_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
++#define SDMA0_STATUS_REG__IDLE_MASK 0x00000001L
++#define SDMA0_STATUS_REG__REG_IDLE_MASK 0x00000002L
++#define SDMA0_STATUS_REG__RB_EMPTY_MASK 0x00000004L
++#define SDMA0_STATUS_REG__RB_FULL_MASK 0x00000008L
++#define SDMA0_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
++#define SDMA0_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
++#define SDMA0_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
++#define SDMA0_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
++#define SDMA0_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
++#define SDMA0_STATUS_REG__INSIDE_IB_MASK 0x00000200L
++#define SDMA0_STATUS_REG__EX_IDLE_MASK 0x00000400L
++#define SDMA0_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L
++#define SDMA0_STATUS_REG__PACKET_READY_MASK 0x00001000L
++#define SDMA0_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
++#define SDMA0_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
++#define SDMA0_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
++#define SDMA0_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
++#define SDMA0_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
++#define SDMA0_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
++#define SDMA0_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
++#define SDMA0_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
++#define SDMA0_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
++#define SDMA0_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
++#define SDMA0_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
++#define SDMA0_STATUS_REG__SEM_IDLE_MASK 0x04000000L
++#define SDMA0_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
++#define SDMA0_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
++#define SDMA0_STATUS_REG__INT_IDLE_MASK 0x40000000L
++#define SDMA0_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
++//SDMA0_STATUS1_REG
++#define SDMA0_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
++#define SDMA0_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
++#define SDMA0_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
++#define SDMA0_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
++#define SDMA0_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
++#define SDMA0_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
++#define SDMA0_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
++#define SDMA0_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
++#define SDMA0_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
++#define SDMA0_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd
++#define SDMA0_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe
++#define SDMA0_STATUS1_REG__EX_START__SHIFT 0xf
++#define SDMA0_STATUS1_REG__CE_RD_STALL__SHIFT 0x11
++#define SDMA0_STATUS1_REG__CE_WR_STALL__SHIFT 0x12
++#define SDMA0_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
++#define SDMA0_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
++#define SDMA0_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
++#define SDMA0_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
++#define SDMA0_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
++#define SDMA0_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
++#define SDMA0_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
++#define SDMA0_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
++#define SDMA0_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
++#define SDMA0_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L
++#define SDMA0_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L
++#define SDMA0_STATUS1_REG__EX_START_MASK 0x00008000L
++#define SDMA0_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L
++#define SDMA0_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L
++//SDMA0_RD_BURST_CNTL
++#define SDMA0_RD_BURST_CNTL__RD_BURST__SHIFT 0x0
++#define SDMA0_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2
++#define SDMA0_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L
++#define SDMA0_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL
++//SDMA0_HBM_PAGE_CONFIG
++#define SDMA0_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
++#define SDMA0_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000003L
++//SDMA0_UCODE_CHECKSUM
++#define SDMA0_UCODE_CHECKSUM__DATA__SHIFT 0x0
++#define SDMA0_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
++//SDMA0_F32_CNTL
++#define SDMA0_F32_CNTL__HALT__SHIFT 0x0
++#define SDMA0_F32_CNTL__STEP__SHIFT 0x1
++#define SDMA0_F32_CNTL__HALT_MASK 0x00000001L
++#define SDMA0_F32_CNTL__STEP_MASK 0x00000002L
++//SDMA0_FREEZE
++#define SDMA0_FREEZE__PREEMPT__SHIFT 0x0
++#define SDMA0_FREEZE__FREEZE__SHIFT 0x4
++#define SDMA0_FREEZE__FROZEN__SHIFT 0x5
++#define SDMA0_FREEZE__F32_FREEZE__SHIFT 0x6
++#define SDMA0_FREEZE__PREEMPT_MASK 0x00000001L
++#define SDMA0_FREEZE__FREEZE_MASK 0x00000010L
++#define SDMA0_FREEZE__FROZEN_MASK 0x00000020L
++#define SDMA0_FREEZE__F32_FREEZE_MASK 0x00000040L
++//SDMA0_PHASE0_QUANTUM
++#define SDMA0_PHASE0_QUANTUM__UNIT__SHIFT 0x0
++#define SDMA0_PHASE0_QUANTUM__VALUE__SHIFT 0x8
++#define SDMA0_PHASE0_QUANTUM__PREFER__SHIFT 0x1e
++#define SDMA0_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL
++#define SDMA0_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L
++#define SDMA0_PHASE0_QUANTUM__PREFER_MASK 0x40000000L
++//SDMA0_PHASE1_QUANTUM
++#define SDMA0_PHASE1_QUANTUM__UNIT__SHIFT 0x0
++#define SDMA0_PHASE1_QUANTUM__VALUE__SHIFT 0x8
++#define SDMA0_PHASE1_QUANTUM__PREFER__SHIFT 0x1e
++#define SDMA0_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL
++#define SDMA0_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L
++#define SDMA0_PHASE1_QUANTUM__PREFER_MASK 0x40000000L
++//SDMA_POWER_GATING
++#define SDMA_POWER_GATING__SDMA0_POWER_OFF_CONDITION__SHIFT 0x0
++#define SDMA_POWER_GATING__SDMA0_POWER_ON_CONDITION__SHIFT 0x1
++#define SDMA_POWER_GATING__SDMA0_POWER_OFF_REQ__SHIFT 0x2
++#define SDMA_POWER_GATING__SDMA0_POWER_ON_REQ__SHIFT 0x3
++#define SDMA_POWER_GATING__PG_CNTL_STATUS__SHIFT 0x4
++#define SDMA_POWER_GATING__SDMA0_POWER_OFF_CONDITION_MASK 0x00000001L
++#define SDMA_POWER_GATING__SDMA0_POWER_ON_CONDITION_MASK 0x00000002L
++#define SDMA_POWER_GATING__SDMA0_POWER_OFF_REQ_MASK 0x00000004L
++#define SDMA_POWER_GATING__SDMA0_POWER_ON_REQ_MASK 0x00000008L
++#define SDMA_POWER_GATING__PG_CNTL_STATUS_MASK 0x00000030L
++//SDMA_PGFSM_CONFIG
++#define SDMA_PGFSM_CONFIG__FSM_ADDR__SHIFT 0x0
++#define SDMA_PGFSM_CONFIG__POWER_DOWN__SHIFT 0x8
++#define SDMA_PGFSM_CONFIG__POWER_UP__SHIFT 0x9
++#define SDMA_PGFSM_CONFIG__P1_SELECT__SHIFT 0xa
++#define SDMA_PGFSM_CONFIG__P2_SELECT__SHIFT 0xb
++#define SDMA_PGFSM_CONFIG__WRITE__SHIFT 0xc
++#define SDMA_PGFSM_CONFIG__READ__SHIFT 0xd
++#define SDMA_PGFSM_CONFIG__SRBM_OVERRIDE__SHIFT 0x1b
++#define SDMA_PGFSM_CONFIG__REG_ADDR__SHIFT 0x1c
++#define SDMA_PGFSM_CONFIG__FSM_ADDR_MASK 0x000000FFL
++#define SDMA_PGFSM_CONFIG__POWER_DOWN_MASK 0x00000100L
++#define SDMA_PGFSM_CONFIG__POWER_UP_MASK 0x00000200L
++#define SDMA_PGFSM_CONFIG__P1_SELECT_MASK 0x00000400L
++#define SDMA_PGFSM_CONFIG__P2_SELECT_MASK 0x00000800L
++#define SDMA_PGFSM_CONFIG__WRITE_MASK 0x00001000L
++#define SDMA_PGFSM_CONFIG__READ_MASK 0x00002000L
++#define SDMA_PGFSM_CONFIG__SRBM_OVERRIDE_MASK 0x08000000L
++#define SDMA_PGFSM_CONFIG__REG_ADDR_MASK 0xF0000000L
++//SDMA_PGFSM_WRITE
++#define SDMA_PGFSM_WRITE__VALUE__SHIFT 0x0
++#define SDMA_PGFSM_WRITE__VALUE_MASK 0xFFFFFFFFL
++//SDMA_PGFSM_READ
++#define SDMA_PGFSM_READ__VALUE__SHIFT 0x0
++#define SDMA_PGFSM_READ__VALUE_MASK 0x00FFFFFFL
++//SDMA0_EDC_CONFIG
++#define SDMA0_EDC_CONFIG__DIS_EDC__SHIFT 0x1
++#define SDMA0_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2
++#define SDMA0_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
++#define SDMA0_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L
++//SDMA0_BA_THRESHOLD
++#define SDMA0_BA_THRESHOLD__READ_THRES__SHIFT 0x0
++#define SDMA0_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
++#define SDMA0_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
++#define SDMA0_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
++//SDMA0_ID
++#define SDMA0_ID__DEVICE_ID__SHIFT 0x0
++#define SDMA0_ID__DEVICE_ID_MASK 0x000000FFL
++//SDMA0_VERSION
++#define SDMA0_VERSION__MINVER__SHIFT 0x0
++#define SDMA0_VERSION__MAJVER__SHIFT 0x8
++#define SDMA0_VERSION__REV__SHIFT 0x10
++#define SDMA0_VERSION__MINVER_MASK 0x0000007FL
++#define SDMA0_VERSION__MAJVER_MASK 0x00007F00L
++#define SDMA0_VERSION__REV_MASK 0x003F0000L
++//SDMA0_EDC_COUNTER
++#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_SED__SHIFT 0x0
++#define SDMA0_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2
++#define SDMA0_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3
++#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4
++#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5
++#define SDMA0_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED__SHIFT 0xf
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED__SHIFT 0x10
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED__SHIFT 0x11
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED__SHIFT 0x12
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED__SHIFT 0x13
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED__SHIFT 0x14
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED__SHIFT 0x15
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED__SHIFT 0x16
++#define SDMA0_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0x17
++#define SDMA0_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x18
++#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_SED_MASK 0x00000001L
++#define SDMA0_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L
++#define SDMA0_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L
++#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L
++#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L
++#define SDMA0_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED_MASK 0x00008000L
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED_MASK 0x00010000L
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED_MASK 0x00020000L
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED_MASK 0x00040000L
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED_MASK 0x00080000L
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED_MASK 0x00100000L
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED_MASK 0x00200000L
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED_MASK 0x00400000L
++#define SDMA0_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00800000L
++#define SDMA0_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x01000000L
++//SDMA0_EDC_COUNTER_CLEAR
++#define SDMA0_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0
++#define SDMA0_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L
++//SDMA0_STATUS2_REG
++#define SDMA0_STATUS2_REG__ID__SHIFT 0x0
++#define SDMA0_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x2
++#define SDMA0_STATUS2_REG__CMD_OP__SHIFT 0x10
++#define SDMA0_STATUS2_REG__ID_MASK 0x00000003L
++#define SDMA0_STATUS2_REG__F32_INSTR_PTR_MASK 0x00000FFCL
++#define SDMA0_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
++//SDMA0_ATOMIC_CNTL
++#define SDMA0_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
++#define SDMA0_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f
++#define SDMA0_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
++#define SDMA0_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L
++//SDMA0_ATOMIC_PREOP_LO
++#define SDMA0_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
++#define SDMA0_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
++//SDMA0_ATOMIC_PREOP_HI
++#define SDMA0_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
++#define SDMA0_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
++//SDMA0_UTCL1_CNTL
++#define SDMA0_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0
++#define SDMA0_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1
++#define SDMA0_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb
++#define SDMA0_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe
++#define SDMA0_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
++#define SDMA0_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d
++#define SDMA0_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L
++#define SDMA0_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL
++#define SDMA0_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L
++#define SDMA0_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L
++#define SDMA0_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L
++#define SDMA0_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L
++//SDMA0_UTCL1_WATERMK
++#define SDMA0_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0
++#define SDMA0_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0x9
++#define SDMA0_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x11
++#define SDMA0_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x19
++#define SDMA0_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000001FFL
++#define SDMA0_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0001FE00L
++#define SDMA0_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x01FE0000L
++#define SDMA0_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFE000000L
++//SDMA0_UTCL1_RD_STATUS
++#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
++#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
++#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
++#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
++#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
++#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
++#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
++#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
++#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
++#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
++#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
++#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
++#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
++#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
++#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
++#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
++#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
++#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
++#define SDMA0_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12
++#define SDMA0_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13
++#define SDMA0_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14
++#define SDMA0_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15
++#define SDMA0_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16
++#define SDMA0_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a
++#define SDMA0_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d
++#define SDMA0_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e
++#define SDMA0_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f
++#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
++#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
++#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
++#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
++#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
++#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
++#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
++#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
++#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
++#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
++#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
++#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
++#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
++#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
++#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
++#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
++#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
++#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
++#define SDMA0_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L
++#define SDMA0_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L
++#define SDMA0_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L
++#define SDMA0_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L
++#define SDMA0_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L
++#define SDMA0_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L
++#define SDMA0_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L
++#define SDMA0_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L
++#define SDMA0_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L
++//SDMA0_UTCL1_WR_STATUS
++#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
++#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
++#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
++#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
++#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
++#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
++#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
++#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
++#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
++#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
++#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
++#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
++#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
++#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
++#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
++#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
++#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
++#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
++#define SDMA0_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12
++#define SDMA0_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13
++#define SDMA0_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14
++#define SDMA0_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15
++#define SDMA0_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16
++#define SDMA0_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19
++#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c
++#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d
++#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e
++#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f
++#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
++#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
++#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
++#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
++#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
++#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
++#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
++#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
++#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
++#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
++#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
++#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
++#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
++#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
++#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
++#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
++#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
++#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
++#define SDMA0_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L
++#define SDMA0_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L
++#define SDMA0_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L
++#define SDMA0_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L
++#define SDMA0_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L
++#define SDMA0_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L
++#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L
++#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L
++#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L
++#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L
++//SDMA0_UTCL1_INV0
++#define SDMA0_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0
++#define SDMA0_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1
++#define SDMA0_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2
++#define SDMA0_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3
++#define SDMA0_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4
++#define SDMA0_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5
++#define SDMA0_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6
++#define SDMA0_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7
++#define SDMA0_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8
++#define SDMA0_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9
++#define SDMA0_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa
++#define SDMA0_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb
++#define SDMA0_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc
++#define SDMA0_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c
++#define SDMA0_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L
++#define SDMA0_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L
++#define SDMA0_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L
++#define SDMA0_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L
++#define SDMA0_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L
++#define SDMA0_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L
++#define SDMA0_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L
++#define SDMA0_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L
++#define SDMA0_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L
++#define SDMA0_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L
++#define SDMA0_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L
++#define SDMA0_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L
++#define SDMA0_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L
++#define SDMA0_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L
++//SDMA0_UTCL1_INV1
++#define SDMA0_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
++#define SDMA0_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
++//SDMA0_UTCL1_INV2
++#define SDMA0_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0
++#define SDMA0_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL
++//SDMA0_UTCL1_RD_XNACK0
++#define SDMA0_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
++#define SDMA0_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
++//SDMA0_UTCL1_RD_XNACK1
++#define SDMA0_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
++#define SDMA0_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4
++#define SDMA0_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8
++#define SDMA0_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a
++#define SDMA0_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
++#define SDMA0_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L
++#define SDMA0_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
++#define SDMA0_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L
++//SDMA0_UTCL1_WR_XNACK0
++#define SDMA0_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
++#define SDMA0_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
++//SDMA0_UTCL1_WR_XNACK1
++#define SDMA0_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
++#define SDMA0_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4
++#define SDMA0_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8
++#define SDMA0_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a
++#define SDMA0_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
++#define SDMA0_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L
++#define SDMA0_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
++#define SDMA0_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L
++//SDMA0_UTCL1_TIMEOUT
++#define SDMA0_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0
++#define SDMA0_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10
++#define SDMA0_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL
++#define SDMA0_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L
++//SDMA0_UTCL1_PAGE
++#define SDMA0_UTCL1_PAGE__VM_HOLE__SHIFT 0x0
++#define SDMA0_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
++#define SDMA0_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
++#define SDMA0_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9
++#define SDMA0_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L
++#define SDMA0_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
++#define SDMA0_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L
++#define SDMA0_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L
++//SDMA0_POWER_CNTL_IDLE
++#define SDMA0_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0
++#define SDMA0_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10
++#define SDMA0_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18
++#define SDMA0_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL
++#define SDMA0_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L
++#define SDMA0_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L
++//SDMA0_RELAX_ORDERING_LUT
++#define SDMA0_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
++#define SDMA0_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
++#define SDMA0_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
++#define SDMA0_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
++#define SDMA0_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
++#define SDMA0_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
++#define SDMA0_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
++#define SDMA0_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
++#define SDMA0_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
++#define SDMA0_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
++#define SDMA0_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
++#define SDMA0_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
++#define SDMA0_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
++#define SDMA0_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
++#define SDMA0_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
++#define SDMA0_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
++#define SDMA0_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
++#define SDMA0_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
++#define SDMA0_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
++#define SDMA0_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
++#define SDMA0_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
++#define SDMA0_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
++#define SDMA0_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
++#define SDMA0_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
++#define SDMA0_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
++#define SDMA0_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
++#define SDMA0_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
++#define SDMA0_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
++#define SDMA0_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
++#define SDMA0_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
++#define SDMA0_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
++#define SDMA0_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
++#define SDMA0_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
++#define SDMA0_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
++#define SDMA0_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
++#define SDMA0_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
++#define SDMA0_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
++#define SDMA0_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
++//SDMA0_CHICKEN_BITS_2
++#define SDMA0_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
++#define SDMA0_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
++//SDMA0_STATUS3_REG
++#define SDMA0_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
++#define SDMA0_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
++#define SDMA0_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
++#define SDMA0_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15
++#define SDMA0_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16
++#define SDMA0_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
++#define SDMA0_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
++#define SDMA0_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
++#define SDMA0_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L
++#define SDMA0_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L
++//SDMA0_PHYSICAL_ADDR_LO
++#define SDMA0_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
++#define SDMA0_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
++#define SDMA0_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
++#define SDMA0_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
++#define SDMA0_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
++#define SDMA0_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
++#define SDMA0_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
++#define SDMA0_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
++//SDMA0_PHYSICAL_ADDR_HI
++#define SDMA0_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
++//SDMA0_PHASE2_QUANTUM
++#define SDMA0_PHASE2_QUANTUM__UNIT__SHIFT 0x0
++#define SDMA0_PHASE2_QUANTUM__VALUE__SHIFT 0x8
++#define SDMA0_PHASE2_QUANTUM__PREFER__SHIFT 0x1e
++#define SDMA0_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL
++#define SDMA0_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L
++#define SDMA0_PHASE2_QUANTUM__PREFER_MASK 0x40000000L
++//SDMA0_ERROR_LOG
++#define SDMA0_ERROR_LOG__OVERRIDE__SHIFT 0x0
++#define SDMA0_ERROR_LOG__STATUS__SHIFT 0x10
++#define SDMA0_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
++#define SDMA0_ERROR_LOG__STATUS_MASK 0xFFFF0000L
++//SDMA0_PUB_DUMMY_REG0
++#define SDMA0_PUB_DUMMY_REG0__VALUE__SHIFT 0x0
++#define SDMA0_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL
++//SDMA0_PUB_DUMMY_REG1
++#define SDMA0_PUB_DUMMY_REG1__VALUE__SHIFT 0x0
++#define SDMA0_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL
++//SDMA0_PUB_DUMMY_REG2
++#define SDMA0_PUB_DUMMY_REG2__VALUE__SHIFT 0x0
++#define SDMA0_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL
++//SDMA0_PUB_DUMMY_REG3
++#define SDMA0_PUB_DUMMY_REG3__VALUE__SHIFT 0x0
++#define SDMA0_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL
++//SDMA0_F32_COUNTER
++#define SDMA0_F32_COUNTER__VALUE__SHIFT 0x0
++#define SDMA0_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
++//SDMA0_PERFMON_CNTL
++#define SDMA0_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0
++#define SDMA0_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1
++#define SDMA0_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
++#define SDMA0_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa
++#define SDMA0_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb
++#define SDMA0_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc
++#define SDMA0_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L
++#define SDMA0_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L
++#define SDMA0_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL
++#define SDMA0_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L
++#define SDMA0_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L
++#define SDMA0_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L
++//SDMA0_PERFCOUNTER0_RESULT
++#define SDMA0_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
++#define SDMA0_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
++//SDMA0_PERFCOUNTER1_RESULT
++#define SDMA0_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
++#define SDMA0_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
++//SDMA0_PERFCOUNTER_TAG_DELAY_RANGE
++#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0
++#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe
++#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c
++#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL
++#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L
++#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L
++//SDMA0_CRD_CNTL
++#define SDMA0_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
++#define SDMA0_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
++#define SDMA0_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
++#define SDMA0_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
++//SDMA0_GPU_IOV_VIOLATION_LOG
++#define SDMA0_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
++#define SDMA0_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
++#define SDMA0_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
++#define SDMA0_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x12
++#define SDMA0_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13
++#define SDMA0_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x14
++#define SDMA0_GPU_IOV_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x18
++#define SDMA0_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
++#define SDMA0_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
++#define SDMA0_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL
++#define SDMA0_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00040000L
++#define SDMA0_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L
++#define SDMA0_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x00F00000L
++#define SDMA0_GPU_IOV_VIOLATION_LOG__INITIATOR_ID_MASK 0xFF000000L
++//SDMA0_ULV_CNTL
++#define SDMA0_ULV_CNTL__HYSTERESIS__SHIFT 0x0
++#define SDMA0_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b
++#define SDMA0_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c
++#define SDMA0_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d
++#define SDMA0_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e
++#define SDMA0_ULV_CNTL__ULV_STATUS__SHIFT 0x1f
++#define SDMA0_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL
++#define SDMA0_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L
++#define SDMA0_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L
++#define SDMA0_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L
++#define SDMA0_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L
++#define SDMA0_ULV_CNTL__ULV_STATUS_MASK 0x80000000L
++//SDMA0_EA_DBIT_ADDR_DATA
++#define SDMA0_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
++#define SDMA0_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
++//SDMA0_EA_DBIT_ADDR_INDEX
++#define SDMA0_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
++#define SDMA0_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
++//SDMA0_GFX_RB_CNTL
++#define SDMA0_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0
++#define SDMA0_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1
++#define SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
++#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
++#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
++#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
++#define SDMA0_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17
++#define SDMA0_GFX_RB_CNTL__RB_VMID__SHIFT 0x18
++#define SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L
++#define SDMA0_GFX_RB_CNTL__RB_SIZE_MASK 0x0000003EL
++#define SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
++#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
++#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
++#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
++#define SDMA0_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L
++#define SDMA0_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L
++//SDMA0_GFX_RB_BASE
++#define SDMA0_GFX_RB_BASE__ADDR__SHIFT 0x0
++#define SDMA0_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_GFX_RB_BASE_HI
++#define SDMA0_GFX_RB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA0_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
++//SDMA0_GFX_RB_RPTR
++#define SDMA0_GFX_RB_RPTR__OFFSET__SHIFT 0x0
++#define SDMA0_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_GFX_RB_RPTR_HI
++#define SDMA0_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA0_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_GFX_RB_WPTR
++#define SDMA0_GFX_RB_WPTR__OFFSET__SHIFT 0x0
++#define SDMA0_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_GFX_RB_WPTR_HI
++#define SDMA0_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA0_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_GFX_RB_WPTR_POLL_CNTL
++#define SDMA0_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
++#define SDMA0_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
++#define SDMA0_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
++#define SDMA0_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
++#define SDMA0_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
++#define SDMA0_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
++#define SDMA0_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
++#define SDMA0_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
++#define SDMA0_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
++#define SDMA0_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
++//SDMA0_GFX_RB_RPTR_ADDR_HI
++#define SDMA0_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_GFX_RB_RPTR_ADDR_LO
++#define SDMA0_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
++#define SDMA0_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
++#define SDMA0_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_GFX_IB_CNTL
++#define SDMA0_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0
++#define SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
++#define SDMA0_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
++#define SDMA0_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10
++#define SDMA0_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L
++#define SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
++#define SDMA0_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
++#define SDMA0_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L
++//SDMA0_GFX_IB_RPTR
++#define SDMA0_GFX_IB_RPTR__OFFSET__SHIFT 0x2
++#define SDMA0_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL
++//SDMA0_GFX_IB_OFFSET
++#define SDMA0_GFX_IB_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA0_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
++//SDMA0_GFX_IB_BASE_LO
++#define SDMA0_GFX_IB_BASE_LO__ADDR__SHIFT 0x5
++#define SDMA0_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
++//SDMA0_GFX_IB_BASE_HI
++#define SDMA0_GFX_IB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA0_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_GFX_IB_SIZE
++#define SDMA0_GFX_IB_SIZE__SIZE__SHIFT 0x0
++#define SDMA0_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL
++//SDMA0_GFX_SKIP_CNTL
++#define SDMA0_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
++#define SDMA0_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
++//SDMA0_GFX_CONTEXT_STATUS
++#define SDMA0_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0
++#define SDMA0_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2
++#define SDMA0_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
++#define SDMA0_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
++#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
++#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
++#define SDMA0_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
++#define SDMA0_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
++#define SDMA0_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
++#define SDMA0_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L
++#define SDMA0_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
++#define SDMA0_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
++#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
++#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
++#define SDMA0_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
++#define SDMA0_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
++//SDMA0_GFX_DOORBELL
++#define SDMA0_GFX_DOORBELL__ENABLE__SHIFT 0x1c
++#define SDMA0_GFX_DOORBELL__CAPTURED__SHIFT 0x1e
++#define SDMA0_GFX_DOORBELL__ENABLE_MASK 0x10000000L
++#define SDMA0_GFX_DOORBELL__CAPTURED_MASK 0x40000000L
++//SDMA0_GFX_CONTEXT_CNTL
++#define SDMA0_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10
++#define SDMA0_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L
++//SDMA0_GFX_STATUS
++#define SDMA0_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
++#define SDMA0_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
++#define SDMA0_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
++#define SDMA0_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
++//SDMA0_GFX_DOORBELL_LOG
++#define SDMA0_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
++#define SDMA0_GFX_DOORBELL_LOG__DATA__SHIFT 0x2
++#define SDMA0_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
++#define SDMA0_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
++//SDMA0_GFX_WATERMARK
++#define SDMA0_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
++#define SDMA0_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
++#define SDMA0_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
++#define SDMA0_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
++//SDMA0_GFX_DOORBELL_OFFSET
++#define SDMA0_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA0_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
++//SDMA0_GFX_CSA_ADDR_LO
++#define SDMA0_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_GFX_CSA_ADDR_HI
++#define SDMA0_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_GFX_IB_SUB_REMAIN
++#define SDMA0_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0
++#define SDMA0_GFX_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
++//SDMA0_GFX_PREEMPT
++#define SDMA0_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0
++#define SDMA0_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L
++//SDMA0_GFX_DUMMY_REG
++#define SDMA0_GFX_DUMMY_REG__DUMMY__SHIFT 0x0
++#define SDMA0_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
++//SDMA0_GFX_RB_WPTR_POLL_ADDR_HI
++#define SDMA0_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_GFX_RB_WPTR_POLL_ADDR_LO
++#define SDMA0_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_GFX_RB_AQL_CNTL
++#define SDMA0_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
++#define SDMA0_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
++#define SDMA0_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
++#define SDMA0_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
++#define SDMA0_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
++#define SDMA0_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
++//SDMA0_GFX_MINOR_PTR_UPDATE
++#define SDMA0_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
++#define SDMA0_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
++//SDMA0_GFX_MIDCMD_DATA0
++#define SDMA0_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0
++#define SDMA0_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
++//SDMA0_GFX_MIDCMD_DATA1
++#define SDMA0_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0
++#define SDMA0_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
++//SDMA0_GFX_MIDCMD_DATA2
++#define SDMA0_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0
++#define SDMA0_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
++//SDMA0_GFX_MIDCMD_DATA3
++#define SDMA0_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0
++#define SDMA0_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
++//SDMA0_GFX_MIDCMD_DATA4
++#define SDMA0_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0
++#define SDMA0_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
++//SDMA0_GFX_MIDCMD_DATA5
++#define SDMA0_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0
++#define SDMA0_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
++//SDMA0_GFX_MIDCMD_DATA6
++#define SDMA0_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0
++#define SDMA0_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
++//SDMA0_GFX_MIDCMD_DATA7
++#define SDMA0_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0
++#define SDMA0_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
++//SDMA0_GFX_MIDCMD_DATA8
++#define SDMA0_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0
++#define SDMA0_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
++//SDMA0_GFX_MIDCMD_CNTL
++#define SDMA0_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
++#define SDMA0_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
++#define SDMA0_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
++#define SDMA0_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
++#define SDMA0_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
++#define SDMA0_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
++#define SDMA0_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
++#define SDMA0_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
++//SDMA0_PAGE_RB_CNTL
++#define SDMA0_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0
++#define SDMA0_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1
++#define SDMA0_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
++#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
++#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
++#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
++#define SDMA0_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17
++#define SDMA0_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18
++#define SDMA0_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L
++#define SDMA0_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000003EL
++#define SDMA0_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
++#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
++#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
++#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
++#define SDMA0_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L
++#define SDMA0_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L
++//SDMA0_PAGE_RB_BASE
++#define SDMA0_PAGE_RB_BASE__ADDR__SHIFT 0x0
++#define SDMA0_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_PAGE_RB_BASE_HI
++#define SDMA0_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA0_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
++//SDMA0_PAGE_RB_RPTR
++#define SDMA0_PAGE_RB_RPTR__OFFSET__SHIFT 0x0
++#define SDMA0_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_PAGE_RB_RPTR_HI
++#define SDMA0_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA0_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_PAGE_RB_WPTR
++#define SDMA0_PAGE_RB_WPTR__OFFSET__SHIFT 0x0
++#define SDMA0_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_PAGE_RB_WPTR_HI
++#define SDMA0_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA0_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_PAGE_RB_WPTR_POLL_CNTL
++#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
++#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
++#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
++#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
++#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
++#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
++#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
++#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
++#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
++#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
++//SDMA0_PAGE_RB_RPTR_ADDR_HI
++#define SDMA0_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_PAGE_RB_RPTR_ADDR_LO
++#define SDMA0_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
++#define SDMA0_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
++#define SDMA0_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_PAGE_IB_CNTL
++#define SDMA0_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0
++#define SDMA0_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
++#define SDMA0_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
++#define SDMA0_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10
++#define SDMA0_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L
++#define SDMA0_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
++#define SDMA0_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
++#define SDMA0_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L
++//SDMA0_PAGE_IB_RPTR
++#define SDMA0_PAGE_IB_RPTR__OFFSET__SHIFT 0x2
++#define SDMA0_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL
++//SDMA0_PAGE_IB_OFFSET
++#define SDMA0_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA0_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
++//SDMA0_PAGE_IB_BASE_LO
++#define SDMA0_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5
++#define SDMA0_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
++//SDMA0_PAGE_IB_BASE_HI
++#define SDMA0_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA0_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_PAGE_IB_SIZE
++#define SDMA0_PAGE_IB_SIZE__SIZE__SHIFT 0x0
++#define SDMA0_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL
++//SDMA0_PAGE_SKIP_CNTL
++#define SDMA0_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
++#define SDMA0_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
++//SDMA0_PAGE_CONTEXT_STATUS
++#define SDMA0_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0
++#define SDMA0_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2
++#define SDMA0_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
++#define SDMA0_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
++#define SDMA0_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
++#define SDMA0_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
++#define SDMA0_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
++#define SDMA0_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
++#define SDMA0_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
++#define SDMA0_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L
++#define SDMA0_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
++#define SDMA0_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
++#define SDMA0_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
++#define SDMA0_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
++#define SDMA0_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
++#define SDMA0_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
++//SDMA0_PAGE_DOORBELL
++#define SDMA0_PAGE_DOORBELL__ENABLE__SHIFT 0x1c
++#define SDMA0_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e
++#define SDMA0_PAGE_DOORBELL__ENABLE_MASK 0x10000000L
++#define SDMA0_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L
++//SDMA0_PAGE_STATUS
++#define SDMA0_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
++#define SDMA0_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
++#define SDMA0_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
++#define SDMA0_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
++//SDMA0_PAGE_DOORBELL_LOG
++#define SDMA0_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
++#define SDMA0_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2
++#define SDMA0_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
++#define SDMA0_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
++//SDMA0_PAGE_WATERMARK
++#define SDMA0_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
++#define SDMA0_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
++#define SDMA0_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
++#define SDMA0_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
++//SDMA0_PAGE_DOORBELL_OFFSET
++#define SDMA0_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA0_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
++//SDMA0_PAGE_CSA_ADDR_LO
++#define SDMA0_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_PAGE_CSA_ADDR_HI
++#define SDMA0_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_PAGE_IB_SUB_REMAIN
++#define SDMA0_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0
++#define SDMA0_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
++//SDMA0_PAGE_PREEMPT
++#define SDMA0_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0
++#define SDMA0_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L
++//SDMA0_PAGE_DUMMY_REG
++#define SDMA0_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0
++#define SDMA0_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
++//SDMA0_PAGE_RB_WPTR_POLL_ADDR_HI
++#define SDMA0_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_PAGE_RB_WPTR_POLL_ADDR_LO
++#define SDMA0_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_PAGE_RB_AQL_CNTL
++#define SDMA0_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
++#define SDMA0_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
++#define SDMA0_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
++#define SDMA0_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
++#define SDMA0_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
++#define SDMA0_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
++//SDMA0_PAGE_MINOR_PTR_UPDATE
++#define SDMA0_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
++#define SDMA0_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
++//SDMA0_PAGE_MIDCMD_DATA0
++#define SDMA0_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0
++#define SDMA0_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
++//SDMA0_PAGE_MIDCMD_DATA1
++#define SDMA0_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0
++#define SDMA0_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
++//SDMA0_PAGE_MIDCMD_DATA2
++#define SDMA0_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0
++#define SDMA0_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
++//SDMA0_PAGE_MIDCMD_DATA3
++#define SDMA0_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0
++#define SDMA0_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
++//SDMA0_PAGE_MIDCMD_DATA4
++#define SDMA0_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0
++#define SDMA0_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
++//SDMA0_PAGE_MIDCMD_DATA5
++#define SDMA0_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0
++#define SDMA0_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
++//SDMA0_PAGE_MIDCMD_DATA6
++#define SDMA0_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0
++#define SDMA0_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
++//SDMA0_PAGE_MIDCMD_DATA7
++#define SDMA0_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0
++#define SDMA0_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
++//SDMA0_PAGE_MIDCMD_DATA8
++#define SDMA0_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0
++#define SDMA0_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
++//SDMA0_PAGE_MIDCMD_CNTL
++#define SDMA0_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
++#define SDMA0_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
++#define SDMA0_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
++#define SDMA0_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
++#define SDMA0_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
++#define SDMA0_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
++#define SDMA0_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
++#define SDMA0_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
++//SDMA0_RLC0_RB_CNTL
++#define SDMA0_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0
++#define SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1
++#define SDMA0_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
++#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
++#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
++#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
++#define SDMA0_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17
++#define SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18
++#define SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000003EL
++#define SDMA0_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
++#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
++#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
++#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
++#define SDMA0_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L
++#define SDMA0_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L
++//SDMA0_RLC0_RB_BASE
++#define SDMA0_RLC0_RB_BASE__ADDR__SHIFT 0x0
++#define SDMA0_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_RB_BASE_HI
++#define SDMA0_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
++//SDMA0_RLC0_RB_RPTR
++#define SDMA0_RLC0_RB_RPTR__OFFSET__SHIFT 0x0
++#define SDMA0_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_RB_RPTR_HI
++#define SDMA0_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA0_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_RB_WPTR
++#define SDMA0_RLC0_RB_WPTR__OFFSET__SHIFT 0x0
++#define SDMA0_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_RB_WPTR_HI
++#define SDMA0_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA0_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_RB_WPTR_POLL_CNTL
++#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
++#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
++#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
++#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
++#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
++#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
++#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
++#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
++#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
++#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
++//SDMA0_RLC0_RB_RPTR_ADDR_HI
++#define SDMA0_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_RB_RPTR_ADDR_LO
++#define SDMA0_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
++#define SDMA0_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
++#define SDMA0_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC0_IB_CNTL
++#define SDMA0_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0
++#define SDMA0_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
++#define SDMA0_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
++#define SDMA0_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10
++#define SDMA0_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
++#define SDMA0_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
++#define SDMA0_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
++//SDMA0_RLC0_IB_RPTR
++#define SDMA0_RLC0_IB_RPTR__OFFSET__SHIFT 0x2
++#define SDMA0_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
++//SDMA0_RLC0_IB_OFFSET
++#define SDMA0_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA0_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
++//SDMA0_RLC0_IB_BASE_LO
++#define SDMA0_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5
++#define SDMA0_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
++//SDMA0_RLC0_IB_BASE_HI
++#define SDMA0_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_IB_SIZE
++#define SDMA0_RLC0_IB_SIZE__SIZE__SHIFT 0x0
++#define SDMA0_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL
++//SDMA0_RLC0_SKIP_CNTL
++#define SDMA0_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
++#define SDMA0_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
++//SDMA0_RLC0_CONTEXT_STATUS
++#define SDMA0_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
++#define SDMA0_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2
++#define SDMA0_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
++#define SDMA0_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
++#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
++#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
++#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
++#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
++#define SDMA0_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
++#define SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
++#define SDMA0_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
++#define SDMA0_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
++#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
++#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
++#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
++#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
++//SDMA0_RLC0_DOORBELL
++#define SDMA0_RLC0_DOORBELL__ENABLE__SHIFT 0x1c
++#define SDMA0_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e
++#define SDMA0_RLC0_DOORBELL__ENABLE_MASK 0x10000000L
++#define SDMA0_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L
++//SDMA0_RLC0_STATUS
++#define SDMA0_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
++#define SDMA0_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
++#define SDMA0_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
++#define SDMA0_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
++//SDMA0_RLC0_DOORBELL_LOG
++#define SDMA0_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
++#define SDMA0_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2
++#define SDMA0_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
++#define SDMA0_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
++//SDMA0_RLC0_WATERMARK
++#define SDMA0_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
++#define SDMA0_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
++#define SDMA0_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
++#define SDMA0_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
++//SDMA0_RLC0_DOORBELL_OFFSET
++#define SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA0_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
++//SDMA0_RLC0_CSA_ADDR_LO
++#define SDMA0_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC0_CSA_ADDR_HI
++#define SDMA0_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_IB_SUB_REMAIN
++#define SDMA0_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
++#define SDMA0_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
++//SDMA0_RLC0_PREEMPT
++#define SDMA0_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0
++#define SDMA0_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
++//SDMA0_RLC0_DUMMY_REG
++#define SDMA0_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0
++#define SDMA0_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI
++#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO
++#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC0_RB_AQL_CNTL
++#define SDMA0_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
++#define SDMA0_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
++#define SDMA0_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
++#define SDMA0_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
++#define SDMA0_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
++//SDMA0_RLC0_MINOR_PTR_UPDATE
++#define SDMA0_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
++#define SDMA0_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
++//SDMA0_RLC0_MIDCMD_DATA0
++#define SDMA0_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0
++#define SDMA0_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_MIDCMD_DATA1
++#define SDMA0_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0
++#define SDMA0_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_MIDCMD_DATA2
++#define SDMA0_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0
++#define SDMA0_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_MIDCMD_DATA3
++#define SDMA0_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0
++#define SDMA0_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_MIDCMD_DATA4
++#define SDMA0_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0
++#define SDMA0_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_MIDCMD_DATA5
++#define SDMA0_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0
++#define SDMA0_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_MIDCMD_DATA6
++#define SDMA0_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0
++#define SDMA0_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_MIDCMD_DATA7
++#define SDMA0_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0
++#define SDMA0_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_MIDCMD_DATA8
++#define SDMA0_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0
++#define SDMA0_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_MIDCMD_CNTL
++#define SDMA0_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
++#define SDMA0_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
++#define SDMA0_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
++#define SDMA0_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
++#define SDMA0_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
++#define SDMA0_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
++#define SDMA0_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
++#define SDMA0_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
++//SDMA0_RLC1_RB_CNTL
++#define SDMA0_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0
++#define SDMA0_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1
++#define SDMA0_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
++#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
++#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
++#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
++#define SDMA0_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17
++#define SDMA0_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18
++#define SDMA0_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000003EL
++#define SDMA0_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
++#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
++#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
++#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
++#define SDMA0_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L
++#define SDMA0_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L
++//SDMA0_RLC1_RB_BASE
++#define SDMA0_RLC1_RB_BASE__ADDR__SHIFT 0x0
++#define SDMA0_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_RB_BASE_HI
++#define SDMA0_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
++//SDMA0_RLC1_RB_RPTR
++#define SDMA0_RLC1_RB_RPTR__OFFSET__SHIFT 0x0
++#define SDMA0_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_RB_RPTR_HI
++#define SDMA0_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA0_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_RB_WPTR
++#define SDMA0_RLC1_RB_WPTR__OFFSET__SHIFT 0x0
++#define SDMA0_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_RB_WPTR_HI
++#define SDMA0_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA0_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_RB_WPTR_POLL_CNTL
++#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
++#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
++#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
++#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
++#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
++#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
++#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
++#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
++#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
++#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
++//SDMA0_RLC1_RB_RPTR_ADDR_HI
++#define SDMA0_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_RB_RPTR_ADDR_LO
++#define SDMA0_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
++#define SDMA0_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
++#define SDMA0_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC1_IB_CNTL
++#define SDMA0_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0
++#define SDMA0_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
++#define SDMA0_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
++#define SDMA0_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10
++#define SDMA0_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
++#define SDMA0_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
++#define SDMA0_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
++//SDMA0_RLC1_IB_RPTR
++#define SDMA0_RLC1_IB_RPTR__OFFSET__SHIFT 0x2
++#define SDMA0_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
++//SDMA0_RLC1_IB_OFFSET
++#define SDMA0_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA0_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
++//SDMA0_RLC1_IB_BASE_LO
++#define SDMA0_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5
++#define SDMA0_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
++//SDMA0_RLC1_IB_BASE_HI
++#define SDMA0_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_IB_SIZE
++#define SDMA0_RLC1_IB_SIZE__SIZE__SHIFT 0x0
++#define SDMA0_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL
++//SDMA0_RLC1_SKIP_CNTL
++#define SDMA0_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
++#define SDMA0_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
++//SDMA0_RLC1_CONTEXT_STATUS
++#define SDMA0_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
++#define SDMA0_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2
++#define SDMA0_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
++#define SDMA0_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
++#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
++#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
++#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
++#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
++#define SDMA0_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
++#define SDMA0_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
++#define SDMA0_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
++#define SDMA0_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
++#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
++#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
++#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
++#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
++//SDMA0_RLC1_DOORBELL
++#define SDMA0_RLC1_DOORBELL__ENABLE__SHIFT 0x1c
++#define SDMA0_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e
++#define SDMA0_RLC1_DOORBELL__ENABLE_MASK 0x10000000L
++#define SDMA0_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L
++//SDMA0_RLC1_STATUS
++#define SDMA0_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
++#define SDMA0_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
++#define SDMA0_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
++#define SDMA0_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
++//SDMA0_RLC1_DOORBELL_LOG
++#define SDMA0_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
++#define SDMA0_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2
++#define SDMA0_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
++#define SDMA0_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
++//SDMA0_RLC1_WATERMARK
++#define SDMA0_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
++#define SDMA0_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
++#define SDMA0_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
++#define SDMA0_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
++//SDMA0_RLC1_DOORBELL_OFFSET
++#define SDMA0_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA0_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
++//SDMA0_RLC1_CSA_ADDR_LO
++#define SDMA0_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC1_CSA_ADDR_HI
++#define SDMA0_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_IB_SUB_REMAIN
++#define SDMA0_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
++#define SDMA0_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
++//SDMA0_RLC1_PREEMPT
++#define SDMA0_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0
++#define SDMA0_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
++//SDMA0_RLC1_DUMMY_REG
++#define SDMA0_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0
++#define SDMA0_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_RB_WPTR_POLL_ADDR_HI
++#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_RB_WPTR_POLL_ADDR_LO
++#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC1_RB_AQL_CNTL
++#define SDMA0_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
++#define SDMA0_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
++#define SDMA0_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
++#define SDMA0_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
++#define SDMA0_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
++//SDMA0_RLC1_MINOR_PTR_UPDATE
++#define SDMA0_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
++#define SDMA0_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
++//SDMA0_RLC1_MIDCMD_DATA0
++#define SDMA0_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0
++#define SDMA0_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_MIDCMD_DATA1
++#define SDMA0_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0
++#define SDMA0_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_MIDCMD_DATA2
++#define SDMA0_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0
++#define SDMA0_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_MIDCMD_DATA3
++#define SDMA0_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0
++#define SDMA0_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_MIDCMD_DATA4
++#define SDMA0_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0
++#define SDMA0_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_MIDCMD_DATA5
++#define SDMA0_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0
++#define SDMA0_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_MIDCMD_DATA6
++#define SDMA0_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0
++#define SDMA0_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_MIDCMD_DATA7
++#define SDMA0_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0
++#define SDMA0_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_MIDCMD_DATA8
++#define SDMA0_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0
++#define SDMA0_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_MIDCMD_CNTL
++#define SDMA0_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
++#define SDMA0_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
++#define SDMA0_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
++#define SDMA0_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
++#define SDMA0_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
++#define SDMA0_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
++#define SDMA0_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
++#define SDMA0_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
++//SDMA0_RLC2_RB_CNTL
++#define SDMA0_RLC2_RB_CNTL__RB_ENABLE__SHIFT 0x0
++#define SDMA0_RLC2_RB_CNTL__RB_SIZE__SHIFT 0x1
++#define SDMA0_RLC2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
++#define SDMA0_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
++#define SDMA0_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
++#define SDMA0_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
++#define SDMA0_RLC2_RB_CNTL__RB_PRIV__SHIFT 0x17
++#define SDMA0_RLC2_RB_CNTL__RB_VMID__SHIFT 0x18
++#define SDMA0_RLC2_RB_CNTL__RB_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC2_RB_CNTL__RB_SIZE_MASK 0x0000003EL
++#define SDMA0_RLC2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
++#define SDMA0_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
++#define SDMA0_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
++#define SDMA0_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
++#define SDMA0_RLC2_RB_CNTL__RB_PRIV_MASK 0x00800000L
++#define SDMA0_RLC2_RB_CNTL__RB_VMID_MASK 0x0F000000L
++//SDMA0_RLC2_RB_BASE
++#define SDMA0_RLC2_RB_BASE__ADDR__SHIFT 0x0
++#define SDMA0_RLC2_RB_BASE__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC2_RB_BASE_HI
++#define SDMA0_RLC2_RB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
++//SDMA0_RLC2_RB_RPTR
++#define SDMA0_RLC2_RB_RPTR__OFFSET__SHIFT 0x0
++#define SDMA0_RLC2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC2_RB_RPTR_HI
++#define SDMA0_RLC2_RB_RPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA0_RLC2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC2_RB_WPTR
++#define SDMA0_RLC2_RB_WPTR__OFFSET__SHIFT 0x0
++#define SDMA0_RLC2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC2_RB_WPTR_HI
++#define SDMA0_RLC2_RB_WPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA0_RLC2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC2_RB_WPTR_POLL_CNTL
++#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
++#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
++#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
++#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
++#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
++#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
++#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
++#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
++#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
++#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
++//SDMA0_RLC2_RB_RPTR_ADDR_HI
++#define SDMA0_RLC2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC2_RB_RPTR_ADDR_LO
++#define SDMA0_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
++#define SDMA0_RLC2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
++#define SDMA0_RLC2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC2_IB_CNTL
++#define SDMA0_RLC2_IB_CNTL__IB_ENABLE__SHIFT 0x0
++#define SDMA0_RLC2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
++#define SDMA0_RLC2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
++#define SDMA0_RLC2_IB_CNTL__CMD_VMID__SHIFT 0x10
++#define SDMA0_RLC2_IB_CNTL__IB_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
++#define SDMA0_RLC2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
++#define SDMA0_RLC2_IB_CNTL__CMD_VMID_MASK 0x000F0000L
++//SDMA0_RLC2_IB_RPTR
++#define SDMA0_RLC2_IB_RPTR__OFFSET__SHIFT 0x2
++#define SDMA0_RLC2_IB_RPTR__OFFSET_MASK 0x003FFFFCL
++//SDMA0_RLC2_IB_OFFSET
++#define SDMA0_RLC2_IB_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA0_RLC2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
++//SDMA0_RLC2_IB_BASE_LO
++#define SDMA0_RLC2_IB_BASE_LO__ADDR__SHIFT 0x5
++#define SDMA0_RLC2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
++//SDMA0_RLC2_IB_BASE_HI
++#define SDMA0_RLC2_IB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC2_IB_SIZE
++#define SDMA0_RLC2_IB_SIZE__SIZE__SHIFT 0x0
++#define SDMA0_RLC2_IB_SIZE__SIZE_MASK 0x000FFFFFL
++//SDMA0_RLC2_SKIP_CNTL
++#define SDMA0_RLC2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
++#define SDMA0_RLC2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
++//SDMA0_RLC2_CONTEXT_STATUS
++#define SDMA0_RLC2_CONTEXT_STATUS__SELECTED__SHIFT 0x0
++#define SDMA0_RLC2_CONTEXT_STATUS__IDLE__SHIFT 0x2
++#define SDMA0_RLC2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
++#define SDMA0_RLC2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
++#define SDMA0_RLC2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
++#define SDMA0_RLC2_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
++#define SDMA0_RLC2_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
++#define SDMA0_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
++#define SDMA0_RLC2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
++#define SDMA0_RLC2_CONTEXT_STATUS__IDLE_MASK 0x00000004L
++#define SDMA0_RLC2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
++#define SDMA0_RLC2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
++#define SDMA0_RLC2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
++#define SDMA0_RLC2_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
++#define SDMA0_RLC2_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
++#define SDMA0_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
++//SDMA0_RLC2_DOORBELL
++#define SDMA0_RLC2_DOORBELL__ENABLE__SHIFT 0x1c
++#define SDMA0_RLC2_DOORBELL__CAPTURED__SHIFT 0x1e
++#define SDMA0_RLC2_DOORBELL__ENABLE_MASK 0x10000000L
++#define SDMA0_RLC2_DOORBELL__CAPTURED_MASK 0x40000000L
++//SDMA0_RLC2_STATUS
++#define SDMA0_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
++#define SDMA0_RLC2_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
++#define SDMA0_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
++#define SDMA0_RLC2_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
++//SDMA0_RLC2_DOORBELL_LOG
++#define SDMA0_RLC2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
++#define SDMA0_RLC2_DOORBELL_LOG__DATA__SHIFT 0x2
++#define SDMA0_RLC2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
++#define SDMA0_RLC2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
++//SDMA0_RLC2_WATERMARK
++#define SDMA0_RLC2_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
++#define SDMA0_RLC2_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
++#define SDMA0_RLC2_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
++#define SDMA0_RLC2_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
++//SDMA0_RLC2_DOORBELL_OFFSET
++#define SDMA0_RLC2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA0_RLC2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
++//SDMA0_RLC2_CSA_ADDR_LO
++#define SDMA0_RLC2_CSA_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC2_CSA_ADDR_HI
++#define SDMA0_RLC2_CSA_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC2_IB_SUB_REMAIN
++#define SDMA0_RLC2_IB_SUB_REMAIN__SIZE__SHIFT 0x0
++#define SDMA0_RLC2_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
++//SDMA0_RLC2_PREEMPT
++#define SDMA0_RLC2_PREEMPT__IB_PREEMPT__SHIFT 0x0
++#define SDMA0_RLC2_PREEMPT__IB_PREEMPT_MASK 0x00000001L
++//SDMA0_RLC2_DUMMY_REG
++#define SDMA0_RLC2_DUMMY_REG__DUMMY__SHIFT 0x0
++#define SDMA0_RLC2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
++//SDMA0_RLC2_RB_WPTR_POLL_ADDR_HI
++#define SDMA0_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC2_RB_WPTR_POLL_ADDR_LO
++#define SDMA0_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC2_RB_AQL_CNTL
++#define SDMA0_RLC2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
++#define SDMA0_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
++#define SDMA0_RLC2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
++#define SDMA0_RLC2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
++#define SDMA0_RLC2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
++//SDMA0_RLC2_MINOR_PTR_UPDATE
++#define SDMA0_RLC2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
++#define SDMA0_RLC2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
++//SDMA0_RLC2_MIDCMD_DATA0
++#define SDMA0_RLC2_MIDCMD_DATA0__DATA0__SHIFT 0x0
++#define SDMA0_RLC2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
++//SDMA0_RLC2_MIDCMD_DATA1
++#define SDMA0_RLC2_MIDCMD_DATA1__DATA1__SHIFT 0x0
++#define SDMA0_RLC2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
++//SDMA0_RLC2_MIDCMD_DATA2
++#define SDMA0_RLC2_MIDCMD_DATA2__DATA2__SHIFT 0x0
++#define SDMA0_RLC2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
++//SDMA0_RLC2_MIDCMD_DATA3
++#define SDMA0_RLC2_MIDCMD_DATA3__DATA3__SHIFT 0x0
++#define SDMA0_RLC2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
++//SDMA0_RLC2_MIDCMD_DATA4
++#define SDMA0_RLC2_MIDCMD_DATA4__DATA4__SHIFT 0x0
++#define SDMA0_RLC2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
++//SDMA0_RLC2_MIDCMD_DATA5
++#define SDMA0_RLC2_MIDCMD_DATA5__DATA5__SHIFT 0x0
++#define SDMA0_RLC2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
++//SDMA0_RLC2_MIDCMD_DATA6
++#define SDMA0_RLC2_MIDCMD_DATA6__DATA6__SHIFT 0x0
++#define SDMA0_RLC2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
++//SDMA0_RLC2_MIDCMD_DATA7
++#define SDMA0_RLC2_MIDCMD_DATA7__DATA7__SHIFT 0x0
++#define SDMA0_RLC2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
++//SDMA0_RLC2_MIDCMD_DATA8
++#define SDMA0_RLC2_MIDCMD_DATA8__DATA8__SHIFT 0x0
++#define SDMA0_RLC2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
++//SDMA0_RLC2_MIDCMD_CNTL
++#define SDMA0_RLC2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
++#define SDMA0_RLC2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
++#define SDMA0_RLC2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
++#define SDMA0_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
++#define SDMA0_RLC2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
++#define SDMA0_RLC2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
++#define SDMA0_RLC2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
++#define SDMA0_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
++//SDMA0_RLC3_RB_CNTL
++#define SDMA0_RLC3_RB_CNTL__RB_ENABLE__SHIFT 0x0
++#define SDMA0_RLC3_RB_CNTL__RB_SIZE__SHIFT 0x1
++#define SDMA0_RLC3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
++#define SDMA0_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
++#define SDMA0_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
++#define SDMA0_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
++#define SDMA0_RLC3_RB_CNTL__RB_PRIV__SHIFT 0x17
++#define SDMA0_RLC3_RB_CNTL__RB_VMID__SHIFT 0x18
++#define SDMA0_RLC3_RB_CNTL__RB_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC3_RB_CNTL__RB_SIZE_MASK 0x0000003EL
++#define SDMA0_RLC3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
++#define SDMA0_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
++#define SDMA0_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
++#define SDMA0_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
++#define SDMA0_RLC3_RB_CNTL__RB_PRIV_MASK 0x00800000L
++#define SDMA0_RLC3_RB_CNTL__RB_VMID_MASK 0x0F000000L
++//SDMA0_RLC3_RB_BASE
++#define SDMA0_RLC3_RB_BASE__ADDR__SHIFT 0x0
++#define SDMA0_RLC3_RB_BASE__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC3_RB_BASE_HI
++#define SDMA0_RLC3_RB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
++//SDMA0_RLC3_RB_RPTR
++#define SDMA0_RLC3_RB_RPTR__OFFSET__SHIFT 0x0
++#define SDMA0_RLC3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC3_RB_RPTR_HI
++#define SDMA0_RLC3_RB_RPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA0_RLC3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC3_RB_WPTR
++#define SDMA0_RLC3_RB_WPTR__OFFSET__SHIFT 0x0
++#define SDMA0_RLC3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC3_RB_WPTR_HI
++#define SDMA0_RLC3_RB_WPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA0_RLC3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC3_RB_WPTR_POLL_CNTL
++#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
++#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
++#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
++#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
++#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
++#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
++#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
++#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
++#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
++#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
++//SDMA0_RLC3_RB_RPTR_ADDR_HI
++#define SDMA0_RLC3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC3_RB_RPTR_ADDR_LO
++#define SDMA0_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
++#define SDMA0_RLC3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
++#define SDMA0_RLC3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC3_IB_CNTL
++#define SDMA0_RLC3_IB_CNTL__IB_ENABLE__SHIFT 0x0
++#define SDMA0_RLC3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
++#define SDMA0_RLC3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
++#define SDMA0_RLC3_IB_CNTL__CMD_VMID__SHIFT 0x10
++#define SDMA0_RLC3_IB_CNTL__IB_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
++#define SDMA0_RLC3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
++#define SDMA0_RLC3_IB_CNTL__CMD_VMID_MASK 0x000F0000L
++//SDMA0_RLC3_IB_RPTR
++#define SDMA0_RLC3_IB_RPTR__OFFSET__SHIFT 0x2
++#define SDMA0_RLC3_IB_RPTR__OFFSET_MASK 0x003FFFFCL
++//SDMA0_RLC3_IB_OFFSET
++#define SDMA0_RLC3_IB_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA0_RLC3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
++//SDMA0_RLC3_IB_BASE_LO
++#define SDMA0_RLC3_IB_BASE_LO__ADDR__SHIFT 0x5
++#define SDMA0_RLC3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
++//SDMA0_RLC3_IB_BASE_HI
++#define SDMA0_RLC3_IB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC3_IB_SIZE
++#define SDMA0_RLC3_IB_SIZE__SIZE__SHIFT 0x0
++#define SDMA0_RLC3_IB_SIZE__SIZE_MASK 0x000FFFFFL
++//SDMA0_RLC3_SKIP_CNTL
++#define SDMA0_RLC3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
++#define SDMA0_RLC3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
++//SDMA0_RLC3_CONTEXT_STATUS
++#define SDMA0_RLC3_CONTEXT_STATUS__SELECTED__SHIFT 0x0
++#define SDMA0_RLC3_CONTEXT_STATUS__IDLE__SHIFT 0x2
++#define SDMA0_RLC3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
++#define SDMA0_RLC3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
++#define SDMA0_RLC3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
++#define SDMA0_RLC3_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
++#define SDMA0_RLC3_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
++#define SDMA0_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
++#define SDMA0_RLC3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
++#define SDMA0_RLC3_CONTEXT_STATUS__IDLE_MASK 0x00000004L
++#define SDMA0_RLC3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
++#define SDMA0_RLC3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
++#define SDMA0_RLC3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
++#define SDMA0_RLC3_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
++#define SDMA0_RLC3_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
++#define SDMA0_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
++//SDMA0_RLC3_DOORBELL
++#define SDMA0_RLC3_DOORBELL__ENABLE__SHIFT 0x1c
++#define SDMA0_RLC3_DOORBELL__CAPTURED__SHIFT 0x1e
++#define SDMA0_RLC3_DOORBELL__ENABLE_MASK 0x10000000L
++#define SDMA0_RLC3_DOORBELL__CAPTURED_MASK 0x40000000L
++//SDMA0_RLC3_STATUS
++#define SDMA0_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
++#define SDMA0_RLC3_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
++#define SDMA0_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
++#define SDMA0_RLC3_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
++//SDMA0_RLC3_DOORBELL_LOG
++#define SDMA0_RLC3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
++#define SDMA0_RLC3_DOORBELL_LOG__DATA__SHIFT 0x2
++#define SDMA0_RLC3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
++#define SDMA0_RLC3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
++//SDMA0_RLC3_WATERMARK
++#define SDMA0_RLC3_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
++#define SDMA0_RLC3_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
++#define SDMA0_RLC3_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
++#define SDMA0_RLC3_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
++//SDMA0_RLC3_DOORBELL_OFFSET
++#define SDMA0_RLC3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA0_RLC3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
++//SDMA0_RLC3_CSA_ADDR_LO
++#define SDMA0_RLC3_CSA_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC3_CSA_ADDR_HI
++#define SDMA0_RLC3_CSA_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC3_IB_SUB_REMAIN
++#define SDMA0_RLC3_IB_SUB_REMAIN__SIZE__SHIFT 0x0
++#define SDMA0_RLC3_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
++//SDMA0_RLC3_PREEMPT
++#define SDMA0_RLC3_PREEMPT__IB_PREEMPT__SHIFT 0x0
++#define SDMA0_RLC3_PREEMPT__IB_PREEMPT_MASK 0x00000001L
++//SDMA0_RLC3_DUMMY_REG
++#define SDMA0_RLC3_DUMMY_REG__DUMMY__SHIFT 0x0
++#define SDMA0_RLC3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
++//SDMA0_RLC3_RB_WPTR_POLL_ADDR_HI
++#define SDMA0_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC3_RB_WPTR_POLL_ADDR_LO
++#define SDMA0_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC3_RB_AQL_CNTL
++#define SDMA0_RLC3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
++#define SDMA0_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
++#define SDMA0_RLC3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
++#define SDMA0_RLC3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
++#define SDMA0_RLC3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
++//SDMA0_RLC3_MINOR_PTR_UPDATE
++#define SDMA0_RLC3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
++#define SDMA0_RLC3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
++//SDMA0_RLC3_MIDCMD_DATA0
++#define SDMA0_RLC3_MIDCMD_DATA0__DATA0__SHIFT 0x0
++#define SDMA0_RLC3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
++//SDMA0_RLC3_MIDCMD_DATA1
++#define SDMA0_RLC3_MIDCMD_DATA1__DATA1__SHIFT 0x0
++#define SDMA0_RLC3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
++//SDMA0_RLC3_MIDCMD_DATA2
++#define SDMA0_RLC3_MIDCMD_DATA2__DATA2__SHIFT 0x0
++#define SDMA0_RLC3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
++//SDMA0_RLC3_MIDCMD_DATA3
++#define SDMA0_RLC3_MIDCMD_DATA3__DATA3__SHIFT 0x0
++#define SDMA0_RLC3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
++//SDMA0_RLC3_MIDCMD_DATA4
++#define SDMA0_RLC3_MIDCMD_DATA4__DATA4__SHIFT 0x0
++#define SDMA0_RLC3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
++//SDMA0_RLC3_MIDCMD_DATA5
++#define SDMA0_RLC3_MIDCMD_DATA5__DATA5__SHIFT 0x0
++#define SDMA0_RLC3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
++//SDMA0_RLC3_MIDCMD_DATA6
++#define SDMA0_RLC3_MIDCMD_DATA6__DATA6__SHIFT 0x0
++#define SDMA0_RLC3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
++//SDMA0_RLC3_MIDCMD_DATA7
++#define SDMA0_RLC3_MIDCMD_DATA7__DATA7__SHIFT 0x0
++#define SDMA0_RLC3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
++//SDMA0_RLC3_MIDCMD_DATA8
++#define SDMA0_RLC3_MIDCMD_DATA8__DATA8__SHIFT 0x0
++#define SDMA0_RLC3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
++//SDMA0_RLC3_MIDCMD_CNTL
++#define SDMA0_RLC3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
++#define SDMA0_RLC3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
++#define SDMA0_RLC3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
++#define SDMA0_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
++#define SDMA0_RLC3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
++#define SDMA0_RLC3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
++#define SDMA0_RLC3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
++#define SDMA0_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
++//SDMA0_RLC4_RB_CNTL
++#define SDMA0_RLC4_RB_CNTL__RB_ENABLE__SHIFT 0x0
++#define SDMA0_RLC4_RB_CNTL__RB_SIZE__SHIFT 0x1
++#define SDMA0_RLC4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
++#define SDMA0_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
++#define SDMA0_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
++#define SDMA0_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
++#define SDMA0_RLC4_RB_CNTL__RB_PRIV__SHIFT 0x17
++#define SDMA0_RLC4_RB_CNTL__RB_VMID__SHIFT 0x18
++#define SDMA0_RLC4_RB_CNTL__RB_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC4_RB_CNTL__RB_SIZE_MASK 0x0000003EL
++#define SDMA0_RLC4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
++#define SDMA0_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
++#define SDMA0_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
++#define SDMA0_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
++#define SDMA0_RLC4_RB_CNTL__RB_PRIV_MASK 0x00800000L
++#define SDMA0_RLC4_RB_CNTL__RB_VMID_MASK 0x0F000000L
++//SDMA0_RLC4_RB_BASE
++#define SDMA0_RLC4_RB_BASE__ADDR__SHIFT 0x0
++#define SDMA0_RLC4_RB_BASE__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC4_RB_BASE_HI
++#define SDMA0_RLC4_RB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
++//SDMA0_RLC4_RB_RPTR
++#define SDMA0_RLC4_RB_RPTR__OFFSET__SHIFT 0x0
++#define SDMA0_RLC4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC4_RB_RPTR_HI
++#define SDMA0_RLC4_RB_RPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA0_RLC4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC4_RB_WPTR
++#define SDMA0_RLC4_RB_WPTR__OFFSET__SHIFT 0x0
++#define SDMA0_RLC4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC4_RB_WPTR_HI
++#define SDMA0_RLC4_RB_WPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA0_RLC4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC4_RB_WPTR_POLL_CNTL
++#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
++#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
++#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
++#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
++#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
++#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
++#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
++#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
++#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
++#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
++//SDMA0_RLC4_RB_RPTR_ADDR_HI
++#define SDMA0_RLC4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC4_RB_RPTR_ADDR_LO
++#define SDMA0_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
++#define SDMA0_RLC4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
++#define SDMA0_RLC4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC4_IB_CNTL
++#define SDMA0_RLC4_IB_CNTL__IB_ENABLE__SHIFT 0x0
++#define SDMA0_RLC4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
++#define SDMA0_RLC4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
++#define SDMA0_RLC4_IB_CNTL__CMD_VMID__SHIFT 0x10
++#define SDMA0_RLC4_IB_CNTL__IB_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
++#define SDMA0_RLC4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
++#define SDMA0_RLC4_IB_CNTL__CMD_VMID_MASK 0x000F0000L
++//SDMA0_RLC4_IB_RPTR
++#define SDMA0_RLC4_IB_RPTR__OFFSET__SHIFT 0x2
++#define SDMA0_RLC4_IB_RPTR__OFFSET_MASK 0x003FFFFCL
++//SDMA0_RLC4_IB_OFFSET
++#define SDMA0_RLC4_IB_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA0_RLC4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
++//SDMA0_RLC4_IB_BASE_LO
++#define SDMA0_RLC4_IB_BASE_LO__ADDR__SHIFT 0x5
++#define SDMA0_RLC4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
++//SDMA0_RLC4_IB_BASE_HI
++#define SDMA0_RLC4_IB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC4_IB_SIZE
++#define SDMA0_RLC4_IB_SIZE__SIZE__SHIFT 0x0
++#define SDMA0_RLC4_IB_SIZE__SIZE_MASK 0x000FFFFFL
++//SDMA0_RLC4_SKIP_CNTL
++#define SDMA0_RLC4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
++#define SDMA0_RLC4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
++//SDMA0_RLC4_CONTEXT_STATUS
++#define SDMA0_RLC4_CONTEXT_STATUS__SELECTED__SHIFT 0x0
++#define SDMA0_RLC4_CONTEXT_STATUS__IDLE__SHIFT 0x2
++#define SDMA0_RLC4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
++#define SDMA0_RLC4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
++#define SDMA0_RLC4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
++#define SDMA0_RLC4_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
++#define SDMA0_RLC4_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
++#define SDMA0_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
++#define SDMA0_RLC4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
++#define SDMA0_RLC4_CONTEXT_STATUS__IDLE_MASK 0x00000004L
++#define SDMA0_RLC4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
++#define SDMA0_RLC4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
++#define SDMA0_RLC4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
++#define SDMA0_RLC4_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
++#define SDMA0_RLC4_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
++#define SDMA0_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
++//SDMA0_RLC4_DOORBELL
++#define SDMA0_RLC4_DOORBELL__ENABLE__SHIFT 0x1c
++#define SDMA0_RLC4_DOORBELL__CAPTURED__SHIFT 0x1e
++#define SDMA0_RLC4_DOORBELL__ENABLE_MASK 0x10000000L
++#define SDMA0_RLC4_DOORBELL__CAPTURED_MASK 0x40000000L
++//SDMA0_RLC4_STATUS
++#define SDMA0_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
++#define SDMA0_RLC4_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
++#define SDMA0_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
++#define SDMA0_RLC4_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
++//SDMA0_RLC4_DOORBELL_LOG
++#define SDMA0_RLC4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
++#define SDMA0_RLC4_DOORBELL_LOG__DATA__SHIFT 0x2
++#define SDMA0_RLC4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
++#define SDMA0_RLC4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
++//SDMA0_RLC4_WATERMARK
++#define SDMA0_RLC4_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
++#define SDMA0_RLC4_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
++#define SDMA0_RLC4_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
++#define SDMA0_RLC4_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
++//SDMA0_RLC4_DOORBELL_OFFSET
++#define SDMA0_RLC4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA0_RLC4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
++//SDMA0_RLC4_CSA_ADDR_LO
++#define SDMA0_RLC4_CSA_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC4_CSA_ADDR_HI
++#define SDMA0_RLC4_CSA_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC4_IB_SUB_REMAIN
++#define SDMA0_RLC4_IB_SUB_REMAIN__SIZE__SHIFT 0x0
++#define SDMA0_RLC4_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
++//SDMA0_RLC4_PREEMPT
++#define SDMA0_RLC4_PREEMPT__IB_PREEMPT__SHIFT 0x0
++#define SDMA0_RLC4_PREEMPT__IB_PREEMPT_MASK 0x00000001L
++//SDMA0_RLC4_DUMMY_REG
++#define SDMA0_RLC4_DUMMY_REG__DUMMY__SHIFT 0x0
++#define SDMA0_RLC4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
++//SDMA0_RLC4_RB_WPTR_POLL_ADDR_HI
++#define SDMA0_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC4_RB_WPTR_POLL_ADDR_LO
++#define SDMA0_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC4_RB_AQL_CNTL
++#define SDMA0_RLC4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
++#define SDMA0_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
++#define SDMA0_RLC4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
++#define SDMA0_RLC4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
++#define SDMA0_RLC4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
++//SDMA0_RLC4_MINOR_PTR_UPDATE
++#define SDMA0_RLC4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
++#define SDMA0_RLC4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
++//SDMA0_RLC4_MIDCMD_DATA0
++#define SDMA0_RLC4_MIDCMD_DATA0__DATA0__SHIFT 0x0
++#define SDMA0_RLC4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
++//SDMA0_RLC4_MIDCMD_DATA1
++#define SDMA0_RLC4_MIDCMD_DATA1__DATA1__SHIFT 0x0
++#define SDMA0_RLC4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
++//SDMA0_RLC4_MIDCMD_DATA2
++#define SDMA0_RLC4_MIDCMD_DATA2__DATA2__SHIFT 0x0
++#define SDMA0_RLC4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
++//SDMA0_RLC4_MIDCMD_DATA3
++#define SDMA0_RLC4_MIDCMD_DATA3__DATA3__SHIFT 0x0
++#define SDMA0_RLC4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
++//SDMA0_RLC4_MIDCMD_DATA4
++#define SDMA0_RLC4_MIDCMD_DATA4__DATA4__SHIFT 0x0
++#define SDMA0_RLC4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
++//SDMA0_RLC4_MIDCMD_DATA5
++#define SDMA0_RLC4_MIDCMD_DATA5__DATA5__SHIFT 0x0
++#define SDMA0_RLC4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
++//SDMA0_RLC4_MIDCMD_DATA6
++#define SDMA0_RLC4_MIDCMD_DATA6__DATA6__SHIFT 0x0
++#define SDMA0_RLC4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
++//SDMA0_RLC4_MIDCMD_DATA7
++#define SDMA0_RLC4_MIDCMD_DATA7__DATA7__SHIFT 0x0
++#define SDMA0_RLC4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
++//SDMA0_RLC4_MIDCMD_DATA8
++#define SDMA0_RLC4_MIDCMD_DATA8__DATA8__SHIFT 0x0
++#define SDMA0_RLC4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
++//SDMA0_RLC4_MIDCMD_CNTL
++#define SDMA0_RLC4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
++#define SDMA0_RLC4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
++#define SDMA0_RLC4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
++#define SDMA0_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
++#define SDMA0_RLC4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
++#define SDMA0_RLC4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
++#define SDMA0_RLC4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
++#define SDMA0_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
++//SDMA0_RLC5_RB_CNTL
++#define SDMA0_RLC5_RB_CNTL__RB_ENABLE__SHIFT 0x0
++#define SDMA0_RLC5_RB_CNTL__RB_SIZE__SHIFT 0x1
++#define SDMA0_RLC5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
++#define SDMA0_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
++#define SDMA0_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
++#define SDMA0_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
++#define SDMA0_RLC5_RB_CNTL__RB_PRIV__SHIFT 0x17
++#define SDMA0_RLC5_RB_CNTL__RB_VMID__SHIFT 0x18
++#define SDMA0_RLC5_RB_CNTL__RB_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC5_RB_CNTL__RB_SIZE_MASK 0x0000003EL
++#define SDMA0_RLC5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
++#define SDMA0_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
++#define SDMA0_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
++#define SDMA0_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
++#define SDMA0_RLC5_RB_CNTL__RB_PRIV_MASK 0x00800000L
++#define SDMA0_RLC5_RB_CNTL__RB_VMID_MASK 0x0F000000L
++//SDMA0_RLC5_RB_BASE
++#define SDMA0_RLC5_RB_BASE__ADDR__SHIFT 0x0
++#define SDMA0_RLC5_RB_BASE__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC5_RB_BASE_HI
++#define SDMA0_RLC5_RB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
++//SDMA0_RLC5_RB_RPTR
++#define SDMA0_RLC5_RB_RPTR__OFFSET__SHIFT 0x0
++#define SDMA0_RLC5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC5_RB_RPTR_HI
++#define SDMA0_RLC5_RB_RPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA0_RLC5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC5_RB_WPTR
++#define SDMA0_RLC5_RB_WPTR__OFFSET__SHIFT 0x0
++#define SDMA0_RLC5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC5_RB_WPTR_HI
++#define SDMA0_RLC5_RB_WPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA0_RLC5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC5_RB_WPTR_POLL_CNTL
++#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
++#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
++#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
++#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
++#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
++#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
++#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
++#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
++#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
++#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
++//SDMA0_RLC5_RB_RPTR_ADDR_HI
++#define SDMA0_RLC5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC5_RB_RPTR_ADDR_LO
++#define SDMA0_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
++#define SDMA0_RLC5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
++#define SDMA0_RLC5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC5_IB_CNTL
++#define SDMA0_RLC5_IB_CNTL__IB_ENABLE__SHIFT 0x0
++#define SDMA0_RLC5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
++#define SDMA0_RLC5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
++#define SDMA0_RLC5_IB_CNTL__CMD_VMID__SHIFT 0x10
++#define SDMA0_RLC5_IB_CNTL__IB_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
++#define SDMA0_RLC5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
++#define SDMA0_RLC5_IB_CNTL__CMD_VMID_MASK 0x000F0000L
++//SDMA0_RLC5_IB_RPTR
++#define SDMA0_RLC5_IB_RPTR__OFFSET__SHIFT 0x2
++#define SDMA0_RLC5_IB_RPTR__OFFSET_MASK 0x003FFFFCL
++//SDMA0_RLC5_IB_OFFSET
++#define SDMA0_RLC5_IB_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA0_RLC5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
++//SDMA0_RLC5_IB_BASE_LO
++#define SDMA0_RLC5_IB_BASE_LO__ADDR__SHIFT 0x5
++#define SDMA0_RLC5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
++//SDMA0_RLC5_IB_BASE_HI
++#define SDMA0_RLC5_IB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC5_IB_SIZE
++#define SDMA0_RLC5_IB_SIZE__SIZE__SHIFT 0x0
++#define SDMA0_RLC5_IB_SIZE__SIZE_MASK 0x000FFFFFL
++//SDMA0_RLC5_SKIP_CNTL
++#define SDMA0_RLC5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
++#define SDMA0_RLC5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
++//SDMA0_RLC5_CONTEXT_STATUS
++#define SDMA0_RLC5_CONTEXT_STATUS__SELECTED__SHIFT 0x0
++#define SDMA0_RLC5_CONTEXT_STATUS__IDLE__SHIFT 0x2
++#define SDMA0_RLC5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
++#define SDMA0_RLC5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
++#define SDMA0_RLC5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
++#define SDMA0_RLC5_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
++#define SDMA0_RLC5_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
++#define SDMA0_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
++#define SDMA0_RLC5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
++#define SDMA0_RLC5_CONTEXT_STATUS__IDLE_MASK 0x00000004L
++#define SDMA0_RLC5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
++#define SDMA0_RLC5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
++#define SDMA0_RLC5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
++#define SDMA0_RLC5_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
++#define SDMA0_RLC5_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
++#define SDMA0_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
++//SDMA0_RLC5_DOORBELL
++#define SDMA0_RLC5_DOORBELL__ENABLE__SHIFT 0x1c
++#define SDMA0_RLC5_DOORBELL__CAPTURED__SHIFT 0x1e
++#define SDMA0_RLC5_DOORBELL__ENABLE_MASK 0x10000000L
++#define SDMA0_RLC5_DOORBELL__CAPTURED_MASK 0x40000000L
++//SDMA0_RLC5_STATUS
++#define SDMA0_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
++#define SDMA0_RLC5_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
++#define SDMA0_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
++#define SDMA0_RLC5_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
++//SDMA0_RLC5_DOORBELL_LOG
++#define SDMA0_RLC5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
++#define SDMA0_RLC5_DOORBELL_LOG__DATA__SHIFT 0x2
++#define SDMA0_RLC5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
++#define SDMA0_RLC5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
++//SDMA0_RLC5_WATERMARK
++#define SDMA0_RLC5_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
++#define SDMA0_RLC5_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
++#define SDMA0_RLC5_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
++#define SDMA0_RLC5_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
++//SDMA0_RLC5_DOORBELL_OFFSET
++#define SDMA0_RLC5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA0_RLC5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
++//SDMA0_RLC5_CSA_ADDR_LO
++#define SDMA0_RLC5_CSA_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC5_CSA_ADDR_HI
++#define SDMA0_RLC5_CSA_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC5_IB_SUB_REMAIN
++#define SDMA0_RLC5_IB_SUB_REMAIN__SIZE__SHIFT 0x0
++#define SDMA0_RLC5_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
++//SDMA0_RLC5_PREEMPT
++#define SDMA0_RLC5_PREEMPT__IB_PREEMPT__SHIFT 0x0
++#define SDMA0_RLC5_PREEMPT__IB_PREEMPT_MASK 0x00000001L
++//SDMA0_RLC5_DUMMY_REG
++#define SDMA0_RLC5_DUMMY_REG__DUMMY__SHIFT 0x0
++#define SDMA0_RLC5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
++//SDMA0_RLC5_RB_WPTR_POLL_ADDR_HI
++#define SDMA0_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC5_RB_WPTR_POLL_ADDR_LO
++#define SDMA0_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC5_RB_AQL_CNTL
++#define SDMA0_RLC5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
++#define SDMA0_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
++#define SDMA0_RLC5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
++#define SDMA0_RLC5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
++#define SDMA0_RLC5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
++//SDMA0_RLC5_MINOR_PTR_UPDATE
++#define SDMA0_RLC5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
++#define SDMA0_RLC5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
++//SDMA0_RLC5_MIDCMD_DATA0
++#define SDMA0_RLC5_MIDCMD_DATA0__DATA0__SHIFT 0x0
++#define SDMA0_RLC5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
++//SDMA0_RLC5_MIDCMD_DATA1
++#define SDMA0_RLC5_MIDCMD_DATA1__DATA1__SHIFT 0x0
++#define SDMA0_RLC5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
++//SDMA0_RLC5_MIDCMD_DATA2
++#define SDMA0_RLC5_MIDCMD_DATA2__DATA2__SHIFT 0x0
++#define SDMA0_RLC5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
++//SDMA0_RLC5_MIDCMD_DATA3
++#define SDMA0_RLC5_MIDCMD_DATA3__DATA3__SHIFT 0x0
++#define SDMA0_RLC5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
++//SDMA0_RLC5_MIDCMD_DATA4
++#define SDMA0_RLC5_MIDCMD_DATA4__DATA4__SHIFT 0x0
++#define SDMA0_RLC5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
++//SDMA0_RLC5_MIDCMD_DATA5
++#define SDMA0_RLC5_MIDCMD_DATA5__DATA5__SHIFT 0x0
++#define SDMA0_RLC5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
++//SDMA0_RLC5_MIDCMD_DATA6
++#define SDMA0_RLC5_MIDCMD_DATA6__DATA6__SHIFT 0x0
++#define SDMA0_RLC5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
++//SDMA0_RLC5_MIDCMD_DATA7
++#define SDMA0_RLC5_MIDCMD_DATA7__DATA7__SHIFT 0x0
++#define SDMA0_RLC5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
++//SDMA0_RLC5_MIDCMD_DATA8
++#define SDMA0_RLC5_MIDCMD_DATA8__DATA8__SHIFT 0x0
++#define SDMA0_RLC5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
++//SDMA0_RLC5_MIDCMD_CNTL
++#define SDMA0_RLC5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
++#define SDMA0_RLC5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
++#define SDMA0_RLC5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
++#define SDMA0_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
++#define SDMA0_RLC5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
++#define SDMA0_RLC5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
++#define SDMA0_RLC5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
++#define SDMA0_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
++//SDMA0_RLC6_RB_CNTL
++#define SDMA0_RLC6_RB_CNTL__RB_ENABLE__SHIFT 0x0
++#define SDMA0_RLC6_RB_CNTL__RB_SIZE__SHIFT 0x1
++#define SDMA0_RLC6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
++#define SDMA0_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
++#define SDMA0_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
++#define SDMA0_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
++#define SDMA0_RLC6_RB_CNTL__RB_PRIV__SHIFT 0x17
++#define SDMA0_RLC6_RB_CNTL__RB_VMID__SHIFT 0x18
++#define SDMA0_RLC6_RB_CNTL__RB_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC6_RB_CNTL__RB_SIZE_MASK 0x0000003EL
++#define SDMA0_RLC6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
++#define SDMA0_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
++#define SDMA0_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
++#define SDMA0_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
++#define SDMA0_RLC6_RB_CNTL__RB_PRIV_MASK 0x00800000L
++#define SDMA0_RLC6_RB_CNTL__RB_VMID_MASK 0x0F000000L
++//SDMA0_RLC6_RB_BASE
++#define SDMA0_RLC6_RB_BASE__ADDR__SHIFT 0x0
++#define SDMA0_RLC6_RB_BASE__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC6_RB_BASE_HI
++#define SDMA0_RLC6_RB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
++//SDMA0_RLC6_RB_RPTR
++#define SDMA0_RLC6_RB_RPTR__OFFSET__SHIFT 0x0
++#define SDMA0_RLC6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC6_RB_RPTR_HI
++#define SDMA0_RLC6_RB_RPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA0_RLC6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC6_RB_WPTR
++#define SDMA0_RLC6_RB_WPTR__OFFSET__SHIFT 0x0
++#define SDMA0_RLC6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC6_RB_WPTR_HI
++#define SDMA0_RLC6_RB_WPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA0_RLC6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC6_RB_WPTR_POLL_CNTL
++#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
++#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
++#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
++#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
++#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
++#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
++#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
++#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
++#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
++#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
++//SDMA0_RLC6_RB_RPTR_ADDR_HI
++#define SDMA0_RLC6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC6_RB_RPTR_ADDR_LO
++#define SDMA0_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
++#define SDMA0_RLC6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
++#define SDMA0_RLC6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC6_IB_CNTL
++#define SDMA0_RLC6_IB_CNTL__IB_ENABLE__SHIFT 0x0
++#define SDMA0_RLC6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
++#define SDMA0_RLC6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
++#define SDMA0_RLC6_IB_CNTL__CMD_VMID__SHIFT 0x10
++#define SDMA0_RLC6_IB_CNTL__IB_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
++#define SDMA0_RLC6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
++#define SDMA0_RLC6_IB_CNTL__CMD_VMID_MASK 0x000F0000L
++//SDMA0_RLC6_IB_RPTR
++#define SDMA0_RLC6_IB_RPTR__OFFSET__SHIFT 0x2
++#define SDMA0_RLC6_IB_RPTR__OFFSET_MASK 0x003FFFFCL
++//SDMA0_RLC6_IB_OFFSET
++#define SDMA0_RLC6_IB_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA0_RLC6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
++//SDMA0_RLC6_IB_BASE_LO
++#define SDMA0_RLC6_IB_BASE_LO__ADDR__SHIFT 0x5
++#define SDMA0_RLC6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
++//SDMA0_RLC6_IB_BASE_HI
++#define SDMA0_RLC6_IB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC6_IB_SIZE
++#define SDMA0_RLC6_IB_SIZE__SIZE__SHIFT 0x0
++#define SDMA0_RLC6_IB_SIZE__SIZE_MASK 0x000FFFFFL
++//SDMA0_RLC6_SKIP_CNTL
++#define SDMA0_RLC6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
++#define SDMA0_RLC6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
++//SDMA0_RLC6_CONTEXT_STATUS
++#define SDMA0_RLC6_CONTEXT_STATUS__SELECTED__SHIFT 0x0
++#define SDMA0_RLC6_CONTEXT_STATUS__IDLE__SHIFT 0x2
++#define SDMA0_RLC6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
++#define SDMA0_RLC6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
++#define SDMA0_RLC6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
++#define SDMA0_RLC6_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
++#define SDMA0_RLC6_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
++#define SDMA0_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
++#define SDMA0_RLC6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
++#define SDMA0_RLC6_CONTEXT_STATUS__IDLE_MASK 0x00000004L
++#define SDMA0_RLC6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
++#define SDMA0_RLC6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
++#define SDMA0_RLC6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
++#define SDMA0_RLC6_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
++#define SDMA0_RLC6_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
++#define SDMA0_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
++//SDMA0_RLC6_DOORBELL
++#define SDMA0_RLC6_DOORBELL__ENABLE__SHIFT 0x1c
++#define SDMA0_RLC6_DOORBELL__CAPTURED__SHIFT 0x1e
++#define SDMA0_RLC6_DOORBELL__ENABLE_MASK 0x10000000L
++#define SDMA0_RLC6_DOORBELL__CAPTURED_MASK 0x40000000L
++//SDMA0_RLC6_STATUS
++#define SDMA0_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
++#define SDMA0_RLC6_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
++#define SDMA0_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
++#define SDMA0_RLC6_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
++//SDMA0_RLC6_DOORBELL_LOG
++#define SDMA0_RLC6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
++#define SDMA0_RLC6_DOORBELL_LOG__DATA__SHIFT 0x2
++#define SDMA0_RLC6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
++#define SDMA0_RLC6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
++//SDMA0_RLC6_WATERMARK
++#define SDMA0_RLC6_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
++#define SDMA0_RLC6_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
++#define SDMA0_RLC6_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
++#define SDMA0_RLC6_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
++//SDMA0_RLC6_DOORBELL_OFFSET
++#define SDMA0_RLC6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA0_RLC6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
++//SDMA0_RLC6_CSA_ADDR_LO
++#define SDMA0_RLC6_CSA_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC6_CSA_ADDR_HI
++#define SDMA0_RLC6_CSA_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC6_IB_SUB_REMAIN
++#define SDMA0_RLC6_IB_SUB_REMAIN__SIZE__SHIFT 0x0
++#define SDMA0_RLC6_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
++//SDMA0_RLC6_PREEMPT
++#define SDMA0_RLC6_PREEMPT__IB_PREEMPT__SHIFT 0x0
++#define SDMA0_RLC6_PREEMPT__IB_PREEMPT_MASK 0x00000001L
++//SDMA0_RLC6_DUMMY_REG
++#define SDMA0_RLC6_DUMMY_REG__DUMMY__SHIFT 0x0
++#define SDMA0_RLC6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
++//SDMA0_RLC6_RB_WPTR_POLL_ADDR_HI
++#define SDMA0_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC6_RB_WPTR_POLL_ADDR_LO
++#define SDMA0_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC6_RB_AQL_CNTL
++#define SDMA0_RLC6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
++#define SDMA0_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
++#define SDMA0_RLC6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
++#define SDMA0_RLC6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
++#define SDMA0_RLC6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
++//SDMA0_RLC6_MINOR_PTR_UPDATE
++#define SDMA0_RLC6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
++#define SDMA0_RLC6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
++//SDMA0_RLC6_MIDCMD_DATA0
++#define SDMA0_RLC6_MIDCMD_DATA0__DATA0__SHIFT 0x0
++#define SDMA0_RLC6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
++//SDMA0_RLC6_MIDCMD_DATA1
++#define SDMA0_RLC6_MIDCMD_DATA1__DATA1__SHIFT 0x0
++#define SDMA0_RLC6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
++//SDMA0_RLC6_MIDCMD_DATA2
++#define SDMA0_RLC6_MIDCMD_DATA2__DATA2__SHIFT 0x0
++#define SDMA0_RLC6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
++//SDMA0_RLC6_MIDCMD_DATA3
++#define SDMA0_RLC6_MIDCMD_DATA3__DATA3__SHIFT 0x0
++#define SDMA0_RLC6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
++//SDMA0_RLC6_MIDCMD_DATA4
++#define SDMA0_RLC6_MIDCMD_DATA4__DATA4__SHIFT 0x0
++#define SDMA0_RLC6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
++//SDMA0_RLC6_MIDCMD_DATA5
++#define SDMA0_RLC6_MIDCMD_DATA5__DATA5__SHIFT 0x0
++#define SDMA0_RLC6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
++//SDMA0_RLC6_MIDCMD_DATA6
++#define SDMA0_RLC6_MIDCMD_DATA6__DATA6__SHIFT 0x0
++#define SDMA0_RLC6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
++//SDMA0_RLC6_MIDCMD_DATA7
++#define SDMA0_RLC6_MIDCMD_DATA7__DATA7__SHIFT 0x0
++#define SDMA0_RLC6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
++//SDMA0_RLC6_MIDCMD_DATA8
++#define SDMA0_RLC6_MIDCMD_DATA8__DATA8__SHIFT 0x0
++#define SDMA0_RLC6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
++//SDMA0_RLC6_MIDCMD_CNTL
++#define SDMA0_RLC6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
++#define SDMA0_RLC6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
++#define SDMA0_RLC6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
++#define SDMA0_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
++#define SDMA0_RLC6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
++#define SDMA0_RLC6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
++#define SDMA0_RLC6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
++#define SDMA0_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
++//SDMA0_RLC7_RB_CNTL
++#define SDMA0_RLC7_RB_CNTL__RB_ENABLE__SHIFT 0x0
++#define SDMA0_RLC7_RB_CNTL__RB_SIZE__SHIFT 0x1
++#define SDMA0_RLC7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
++#define SDMA0_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
++#define SDMA0_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
++#define SDMA0_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
++#define SDMA0_RLC7_RB_CNTL__RB_PRIV__SHIFT 0x17
++#define SDMA0_RLC7_RB_CNTL__RB_VMID__SHIFT 0x18
++#define SDMA0_RLC7_RB_CNTL__RB_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC7_RB_CNTL__RB_SIZE_MASK 0x0000003EL
++#define SDMA0_RLC7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
++#define SDMA0_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
++#define SDMA0_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
++#define SDMA0_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
++#define SDMA0_RLC7_RB_CNTL__RB_PRIV_MASK 0x00800000L
++#define SDMA0_RLC7_RB_CNTL__RB_VMID_MASK 0x0F000000L
++//SDMA0_RLC7_RB_BASE
++#define SDMA0_RLC7_RB_BASE__ADDR__SHIFT 0x0
++#define SDMA0_RLC7_RB_BASE__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC7_RB_BASE_HI
++#define SDMA0_RLC7_RB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
++//SDMA0_RLC7_RB_RPTR
++#define SDMA0_RLC7_RB_RPTR__OFFSET__SHIFT 0x0
++#define SDMA0_RLC7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC7_RB_RPTR_HI
++#define SDMA0_RLC7_RB_RPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA0_RLC7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC7_RB_WPTR
++#define SDMA0_RLC7_RB_WPTR__OFFSET__SHIFT 0x0
++#define SDMA0_RLC7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC7_RB_WPTR_HI
++#define SDMA0_RLC7_RB_WPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA0_RLC7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC7_RB_WPTR_POLL_CNTL
++#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
++#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
++#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
++#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
++#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
++#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
++#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
++#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
++#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
++#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
++//SDMA0_RLC7_RB_RPTR_ADDR_HI
++#define SDMA0_RLC7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC7_RB_RPTR_ADDR_LO
++#define SDMA0_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
++#define SDMA0_RLC7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
++#define SDMA0_RLC7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC7_IB_CNTL
++#define SDMA0_RLC7_IB_CNTL__IB_ENABLE__SHIFT 0x0
++#define SDMA0_RLC7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
++#define SDMA0_RLC7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
++#define SDMA0_RLC7_IB_CNTL__CMD_VMID__SHIFT 0x10
++#define SDMA0_RLC7_IB_CNTL__IB_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
++#define SDMA0_RLC7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
++#define SDMA0_RLC7_IB_CNTL__CMD_VMID_MASK 0x000F0000L
++//SDMA0_RLC7_IB_RPTR
++#define SDMA0_RLC7_IB_RPTR__OFFSET__SHIFT 0x2
++#define SDMA0_RLC7_IB_RPTR__OFFSET_MASK 0x003FFFFCL
++//SDMA0_RLC7_IB_OFFSET
++#define SDMA0_RLC7_IB_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA0_RLC7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
++//SDMA0_RLC7_IB_BASE_LO
++#define SDMA0_RLC7_IB_BASE_LO__ADDR__SHIFT 0x5
++#define SDMA0_RLC7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
++//SDMA0_RLC7_IB_BASE_HI
++#define SDMA0_RLC7_IB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC7_IB_SIZE
++#define SDMA0_RLC7_IB_SIZE__SIZE__SHIFT 0x0
++#define SDMA0_RLC7_IB_SIZE__SIZE_MASK 0x000FFFFFL
++//SDMA0_RLC7_SKIP_CNTL
++#define SDMA0_RLC7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
++#define SDMA0_RLC7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
++//SDMA0_RLC7_CONTEXT_STATUS
++#define SDMA0_RLC7_CONTEXT_STATUS__SELECTED__SHIFT 0x0
++#define SDMA0_RLC7_CONTEXT_STATUS__IDLE__SHIFT 0x2
++#define SDMA0_RLC7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
++#define SDMA0_RLC7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
++#define SDMA0_RLC7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
++#define SDMA0_RLC7_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
++#define SDMA0_RLC7_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
++#define SDMA0_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
++#define SDMA0_RLC7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
++#define SDMA0_RLC7_CONTEXT_STATUS__IDLE_MASK 0x00000004L
++#define SDMA0_RLC7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
++#define SDMA0_RLC7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
++#define SDMA0_RLC7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
++#define SDMA0_RLC7_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
++#define SDMA0_RLC7_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
++#define SDMA0_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
++//SDMA0_RLC7_DOORBELL
++#define SDMA0_RLC7_DOORBELL__ENABLE__SHIFT 0x1c
++#define SDMA0_RLC7_DOORBELL__CAPTURED__SHIFT 0x1e
++#define SDMA0_RLC7_DOORBELL__ENABLE_MASK 0x10000000L
++#define SDMA0_RLC7_DOORBELL__CAPTURED_MASK 0x40000000L
++//SDMA0_RLC7_STATUS
++#define SDMA0_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
++#define SDMA0_RLC7_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
++#define SDMA0_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
++#define SDMA0_RLC7_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
++//SDMA0_RLC7_DOORBELL_LOG
++#define SDMA0_RLC7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
++#define SDMA0_RLC7_DOORBELL_LOG__DATA__SHIFT 0x2
++#define SDMA0_RLC7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
++#define SDMA0_RLC7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
++//SDMA0_RLC7_WATERMARK
++#define SDMA0_RLC7_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
++#define SDMA0_RLC7_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
++#define SDMA0_RLC7_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
++#define SDMA0_RLC7_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
++//SDMA0_RLC7_DOORBELL_OFFSET
++#define SDMA0_RLC7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA0_RLC7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
++//SDMA0_RLC7_CSA_ADDR_LO
++#define SDMA0_RLC7_CSA_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC7_CSA_ADDR_HI
++#define SDMA0_RLC7_CSA_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC7_IB_SUB_REMAIN
++#define SDMA0_RLC7_IB_SUB_REMAIN__SIZE__SHIFT 0x0
++#define SDMA0_RLC7_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
++//SDMA0_RLC7_PREEMPT
++#define SDMA0_RLC7_PREEMPT__IB_PREEMPT__SHIFT 0x0
++#define SDMA0_RLC7_PREEMPT__IB_PREEMPT_MASK 0x00000001L
++//SDMA0_RLC7_DUMMY_REG
++#define SDMA0_RLC7_DUMMY_REG__DUMMY__SHIFT 0x0
++#define SDMA0_RLC7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
++//SDMA0_RLC7_RB_WPTR_POLL_ADDR_HI
++#define SDMA0_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC7_RB_WPTR_POLL_ADDR_LO
++#define SDMA0_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC7_RB_AQL_CNTL
++#define SDMA0_RLC7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
++#define SDMA0_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
++#define SDMA0_RLC7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
++#define SDMA0_RLC7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
++#define SDMA0_RLC7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
++//SDMA0_RLC7_MINOR_PTR_UPDATE
++#define SDMA0_RLC7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
++#define SDMA0_RLC7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
++//SDMA0_RLC7_MIDCMD_DATA0
++#define SDMA0_RLC7_MIDCMD_DATA0__DATA0__SHIFT 0x0
++#define SDMA0_RLC7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
++//SDMA0_RLC7_MIDCMD_DATA1
++#define SDMA0_RLC7_MIDCMD_DATA1__DATA1__SHIFT 0x0
++#define SDMA0_RLC7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
++//SDMA0_RLC7_MIDCMD_DATA2
++#define SDMA0_RLC7_MIDCMD_DATA2__DATA2__SHIFT 0x0
++#define SDMA0_RLC7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
++//SDMA0_RLC7_MIDCMD_DATA3
++#define SDMA0_RLC7_MIDCMD_DATA3__DATA3__SHIFT 0x0
++#define SDMA0_RLC7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
++//SDMA0_RLC7_MIDCMD_DATA4
++#define SDMA0_RLC7_MIDCMD_DATA4__DATA4__SHIFT 0x0
++#define SDMA0_RLC7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
++//SDMA0_RLC7_MIDCMD_DATA5
++#define SDMA0_RLC7_MIDCMD_DATA5__DATA5__SHIFT 0x0
++#define SDMA0_RLC7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
++//SDMA0_RLC7_MIDCMD_DATA6
++#define SDMA0_RLC7_MIDCMD_DATA6__DATA6__SHIFT 0x0
++#define SDMA0_RLC7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
++//SDMA0_RLC7_MIDCMD_DATA7
++#define SDMA0_RLC7_MIDCMD_DATA7__DATA7__SHIFT 0x0
++#define SDMA0_RLC7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
++//SDMA0_RLC7_MIDCMD_DATA8
++#define SDMA0_RLC7_MIDCMD_DATA8__DATA8__SHIFT 0x0
++#define SDMA0_RLC7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
++//SDMA0_RLC7_MIDCMD_CNTL
++#define SDMA0_RLC7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
++#define SDMA0_RLC7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
++#define SDMA0_RLC7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
++#define SDMA0_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
++#define SDMA0_RLC7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
++#define SDMA0_RLC7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
++#define SDMA0_RLC7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
++#define SDMA0_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
++
++#endif
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_offset.h
+new file mode 100644
+index 0000000..db24d5e
+--- /dev/null
++++ b/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_offset.h
+@@ -0,0 +1,1039 @@
++/*
++ * Copyright (C) 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
++ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++#ifndef _sdma1_4_2_0_OFFSET_HEADER
++#define _sdma1_4_2_0_OFFSET_HEADER
++
++
++
++// addressBlock: sdma1_sdma1dec
++// base address: 0x6180
++#define mmSDMA1_UCODE_ADDR 0x0000
++#define mmSDMA1_UCODE_ADDR_BASE_IDX 0
++#define mmSDMA1_UCODE_DATA 0x0001
++#define mmSDMA1_UCODE_DATA_BASE_IDX 0
++#define mmSDMA1_VM_CNTL 0x0004
++#define mmSDMA1_VM_CNTL_BASE_IDX 0
++#define mmSDMA1_VM_CTX_LO 0x0005
++#define mmSDMA1_VM_CTX_LO_BASE_IDX 0
++#define mmSDMA1_VM_CTX_HI 0x0006
++#define mmSDMA1_VM_CTX_HI_BASE_IDX 0
++#define mmSDMA1_ACTIVE_FCN_ID 0x0007
++#define mmSDMA1_ACTIVE_FCN_ID_BASE_IDX 0
++#define mmSDMA1_VM_CTX_CNTL 0x0008
++#define mmSDMA1_VM_CTX_CNTL_BASE_IDX 0
++#define mmSDMA1_VIRT_RESET_REQ 0x0009
++#define mmSDMA1_VIRT_RESET_REQ_BASE_IDX 0
++#define mmSDMA1_VF_ENABLE 0x000a
++#define mmSDMA1_VF_ENABLE_BASE_IDX 0
++#define mmSDMA1_CONTEXT_REG_TYPE0 0x000b
++#define mmSDMA1_CONTEXT_REG_TYPE0_BASE_IDX 0
++#define mmSDMA1_CONTEXT_REG_TYPE1 0x000c
++#define mmSDMA1_CONTEXT_REG_TYPE1_BASE_IDX 0
++#define mmSDMA1_CONTEXT_REG_TYPE2 0x000d
++#define mmSDMA1_CONTEXT_REG_TYPE2_BASE_IDX 0
++#define mmSDMA1_CONTEXT_REG_TYPE3 0x000e
++#define mmSDMA1_CONTEXT_REG_TYPE3_BASE_IDX 0
++#define mmSDMA1_PUB_REG_TYPE0 0x000f
++#define mmSDMA1_PUB_REG_TYPE0_BASE_IDX 0
++#define mmSDMA1_PUB_REG_TYPE1 0x0010
++#define mmSDMA1_PUB_REG_TYPE1_BASE_IDX 0
++#define mmSDMA1_PUB_REG_TYPE2 0x0011
++#define mmSDMA1_PUB_REG_TYPE2_BASE_IDX 0
++#define mmSDMA1_PUB_REG_TYPE3 0x0012
++#define mmSDMA1_PUB_REG_TYPE3_BASE_IDX 0
++#define mmSDMA1_MMHUB_CNTL 0x0013
++#define mmSDMA1_MMHUB_CNTL_BASE_IDX 0
++#define mmSDMA1_CONTEXT_GROUP_BOUNDARY 0x0019
++#define mmSDMA1_CONTEXT_GROUP_BOUNDARY_BASE_IDX 0
++#define mmSDMA1_POWER_CNTL 0x001a
++#define mmSDMA1_POWER_CNTL_BASE_IDX 0
++#define mmSDMA1_CLK_CTRL 0x001b
++#define mmSDMA1_CLK_CTRL_BASE_IDX 0
++#define mmSDMA1_CNTL 0x001c
++#define mmSDMA1_CNTL_BASE_IDX 0
++#define mmSDMA1_CHICKEN_BITS 0x001d
++#define mmSDMA1_CHICKEN_BITS_BASE_IDX 0
++#define mmSDMA1_GB_ADDR_CONFIG 0x001e
++#define mmSDMA1_GB_ADDR_CONFIG_BASE_IDX 0
++#define mmSDMA1_GB_ADDR_CONFIG_READ 0x001f
++#define mmSDMA1_GB_ADDR_CONFIG_READ_BASE_IDX 0
++#define mmSDMA1_RB_RPTR_FETCH_HI 0x0020
++#define mmSDMA1_RB_RPTR_FETCH_HI_BASE_IDX 0
++#define mmSDMA1_SEM_WAIT_FAIL_TIMER_CNTL 0x0021
++#define mmSDMA1_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 0
++#define mmSDMA1_RB_RPTR_FETCH 0x0022
++#define mmSDMA1_RB_RPTR_FETCH_BASE_IDX 0
++#define mmSDMA1_IB_OFFSET_FETCH 0x0023
++#define mmSDMA1_IB_OFFSET_FETCH_BASE_IDX 0
++#define mmSDMA1_PROGRAM 0x0024
++#define mmSDMA1_PROGRAM_BASE_IDX 0
++#define mmSDMA1_STATUS_REG 0x0025
++#define mmSDMA1_STATUS_REG_BASE_IDX 0
++#define mmSDMA1_STATUS1_REG 0x0026
++#define mmSDMA1_STATUS1_REG_BASE_IDX 0
++#define mmSDMA1_RD_BURST_CNTL 0x0027
++#define mmSDMA1_RD_BURST_CNTL_BASE_IDX 0
++#define mmSDMA1_HBM_PAGE_CONFIG 0x0028
++#define mmSDMA1_HBM_PAGE_CONFIG_BASE_IDX 0
++#define mmSDMA1_UCODE_CHECKSUM 0x0029
++#define mmSDMA1_UCODE_CHECKSUM_BASE_IDX 0
++#define mmSDMA1_F32_CNTL 0x002a
++#define mmSDMA1_F32_CNTL_BASE_IDX 0
++#define mmSDMA1_FREEZE 0x002b
++#define mmSDMA1_FREEZE_BASE_IDX 0
++#define mmSDMA1_PHASE0_QUANTUM 0x002c
++#define mmSDMA1_PHASE0_QUANTUM_BASE_IDX 0
++#define mmSDMA1_PHASE1_QUANTUM 0x002d
++#define mmSDMA1_PHASE1_QUANTUM_BASE_IDX 0
++#define mmSDMA1_EDC_CONFIG 0x0032
++#define mmSDMA1_EDC_CONFIG_BASE_IDX 0
++#define mmSDMA1_BA_THRESHOLD 0x0033
++#define mmSDMA1_BA_THRESHOLD_BASE_IDX 0
++#define mmSDMA1_ID 0x0034
++#define mmSDMA1_ID_BASE_IDX 0
++#define mmSDMA1_VERSION 0x0035
++#define mmSDMA1_VERSION_BASE_IDX 0
++#define mmSDMA1_EDC_COUNTER 0x0036
++#define mmSDMA1_EDC_COUNTER_BASE_IDX 0
++#define mmSDMA1_EDC_COUNTER_CLEAR 0x0037
++#define mmSDMA1_EDC_COUNTER_CLEAR_BASE_IDX 0
++#define mmSDMA1_STATUS2_REG 0x0038
++#define mmSDMA1_STATUS2_REG_BASE_IDX 0
++#define mmSDMA1_ATOMIC_CNTL 0x0039
++#define mmSDMA1_ATOMIC_CNTL_BASE_IDX 0
++#define mmSDMA1_ATOMIC_PREOP_LO 0x003a
++#define mmSDMA1_ATOMIC_PREOP_LO_BASE_IDX 0
++#define mmSDMA1_ATOMIC_PREOP_HI 0x003b
++#define mmSDMA1_ATOMIC_PREOP_HI_BASE_IDX 0
++#define mmSDMA1_UTCL1_CNTL 0x003c
++#define mmSDMA1_UTCL1_CNTL_BASE_IDX 0
++#define mmSDMA1_UTCL1_WATERMK 0x003d
++#define mmSDMA1_UTCL1_WATERMK_BASE_IDX 0
++#define mmSDMA1_UTCL1_RD_STATUS 0x003e
++#define mmSDMA1_UTCL1_RD_STATUS_BASE_IDX 0
++#define mmSDMA1_UTCL1_WR_STATUS 0x003f
++#define mmSDMA1_UTCL1_WR_STATUS_BASE_IDX 0
++#define mmSDMA1_UTCL1_INV0 0x0040
++#define mmSDMA1_UTCL1_INV0_BASE_IDX 0
++#define mmSDMA1_UTCL1_INV1 0x0041
++#define mmSDMA1_UTCL1_INV1_BASE_IDX 0
++#define mmSDMA1_UTCL1_INV2 0x0042
++#define mmSDMA1_UTCL1_INV2_BASE_IDX 0
++#define mmSDMA1_UTCL1_RD_XNACK0 0x0043
++#define mmSDMA1_UTCL1_RD_XNACK0_BASE_IDX 0
++#define mmSDMA1_UTCL1_RD_XNACK1 0x0044
++#define mmSDMA1_UTCL1_RD_XNACK1_BASE_IDX 0
++#define mmSDMA1_UTCL1_WR_XNACK0 0x0045
++#define mmSDMA1_UTCL1_WR_XNACK0_BASE_IDX 0
++#define mmSDMA1_UTCL1_WR_XNACK1 0x0046
++#define mmSDMA1_UTCL1_WR_XNACK1_BASE_IDX 0
++#define mmSDMA1_UTCL1_TIMEOUT 0x0047
++#define mmSDMA1_UTCL1_TIMEOUT_BASE_IDX 0
++#define mmSDMA1_UTCL1_PAGE 0x0048
++#define mmSDMA1_UTCL1_PAGE_BASE_IDX 0
++#define mmSDMA1_POWER_CNTL_IDLE 0x0049
++#define mmSDMA1_POWER_CNTL_IDLE_BASE_IDX 0
++#define mmSDMA1_RELAX_ORDERING_LUT 0x004a
++#define mmSDMA1_RELAX_ORDERING_LUT_BASE_IDX 0
++#define mmSDMA1_CHICKEN_BITS_2 0x004b
++#define mmSDMA1_CHICKEN_BITS_2_BASE_IDX 0
++#define mmSDMA1_STATUS3_REG 0x004c
++#define mmSDMA1_STATUS3_REG_BASE_IDX 0
++#define mmSDMA1_PHYSICAL_ADDR_LO 0x004d
++#define mmSDMA1_PHYSICAL_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_PHYSICAL_ADDR_HI 0x004e
++#define mmSDMA1_PHYSICAL_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_PHASE2_QUANTUM 0x004f
++#define mmSDMA1_PHASE2_QUANTUM_BASE_IDX 0
++#define mmSDMA1_ERROR_LOG 0x0050
++#define mmSDMA1_ERROR_LOG_BASE_IDX 0
++#define mmSDMA1_PUB_DUMMY_REG0 0x0051
++#define mmSDMA1_PUB_DUMMY_REG0_BASE_IDX 0
++#define mmSDMA1_PUB_DUMMY_REG1 0x0052
++#define mmSDMA1_PUB_DUMMY_REG1_BASE_IDX 0
++#define mmSDMA1_PUB_DUMMY_REG2 0x0053
++#define mmSDMA1_PUB_DUMMY_REG2_BASE_IDX 0
++#define mmSDMA1_PUB_DUMMY_REG3 0x0054
++#define mmSDMA1_PUB_DUMMY_REG3_BASE_IDX 0
++#define mmSDMA1_F32_COUNTER 0x0055
++#define mmSDMA1_F32_COUNTER_BASE_IDX 0
++#define mmSDMA1_PERFMON_CNTL 0x0057
++#define mmSDMA1_PERFMON_CNTL_BASE_IDX 0
++#define mmSDMA1_PERFCOUNTER0_RESULT 0x0058
++#define mmSDMA1_PERFCOUNTER0_RESULT_BASE_IDX 0
++#define mmSDMA1_PERFCOUNTER1_RESULT 0x0059
++#define mmSDMA1_PERFCOUNTER1_RESULT_BASE_IDX 0
++#define mmSDMA1_PERFCOUNTER_TAG_DELAY_RANGE 0x005a
++#define mmSDMA1_PERFCOUNTER_TAG_DELAY_RANGE_BASE_IDX 0
++#define mmSDMA1_CRD_CNTL 0x005b
++#define mmSDMA1_CRD_CNTL_BASE_IDX 0
++#define mmSDMA1_GPU_IOV_VIOLATION_LOG 0x005d
++#define mmSDMA1_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
++#define mmSDMA1_ULV_CNTL 0x005e
++#define mmSDMA1_ULV_CNTL_BASE_IDX 0
++#define mmSDMA1_EA_DBIT_ADDR_DATA 0x0060
++#define mmSDMA1_EA_DBIT_ADDR_DATA_BASE_IDX 0
++#define mmSDMA1_EA_DBIT_ADDR_INDEX 0x0061
++#define mmSDMA1_EA_DBIT_ADDR_INDEX_BASE_IDX 0
++#define mmSDMA1_GFX_RB_CNTL 0x0080
++#define mmSDMA1_GFX_RB_CNTL_BASE_IDX 0
++#define mmSDMA1_GFX_RB_BASE 0x0081
++#define mmSDMA1_GFX_RB_BASE_BASE_IDX 0
++#define mmSDMA1_GFX_RB_BASE_HI 0x0082
++#define mmSDMA1_GFX_RB_BASE_HI_BASE_IDX 0
++#define mmSDMA1_GFX_RB_RPTR 0x0083
++#define mmSDMA1_GFX_RB_RPTR_BASE_IDX 0
++#define mmSDMA1_GFX_RB_RPTR_HI 0x0084
++#define mmSDMA1_GFX_RB_RPTR_HI_BASE_IDX 0
++#define mmSDMA1_GFX_RB_WPTR 0x0085
++#define mmSDMA1_GFX_RB_WPTR_BASE_IDX 0
++#define mmSDMA1_GFX_RB_WPTR_HI 0x0086
++#define mmSDMA1_GFX_RB_WPTR_HI_BASE_IDX 0
++#define mmSDMA1_GFX_RB_WPTR_POLL_CNTL 0x0087
++#define mmSDMA1_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 0
++#define mmSDMA1_GFX_RB_RPTR_ADDR_HI 0x0088
++#define mmSDMA1_GFX_RB_RPTR_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_GFX_RB_RPTR_ADDR_LO 0x0089
++#define mmSDMA1_GFX_RB_RPTR_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_GFX_IB_CNTL 0x008a
++#define mmSDMA1_GFX_IB_CNTL_BASE_IDX 0
++#define mmSDMA1_GFX_IB_RPTR 0x008b
++#define mmSDMA1_GFX_IB_RPTR_BASE_IDX 0
++#define mmSDMA1_GFX_IB_OFFSET 0x008c
++#define mmSDMA1_GFX_IB_OFFSET_BASE_IDX 0
++#define mmSDMA1_GFX_IB_BASE_LO 0x008d
++#define mmSDMA1_GFX_IB_BASE_LO_BASE_IDX 0
++#define mmSDMA1_GFX_IB_BASE_HI 0x008e
++#define mmSDMA1_GFX_IB_BASE_HI_BASE_IDX 0
++#define mmSDMA1_GFX_IB_SIZE 0x008f
++#define mmSDMA1_GFX_IB_SIZE_BASE_IDX 0
++#define mmSDMA1_GFX_SKIP_CNTL 0x0090
++#define mmSDMA1_GFX_SKIP_CNTL_BASE_IDX 0
++#define mmSDMA1_GFX_CONTEXT_STATUS 0x0091
++#define mmSDMA1_GFX_CONTEXT_STATUS_BASE_IDX 0
++#define mmSDMA1_GFX_DOORBELL 0x0092
++#define mmSDMA1_GFX_DOORBELL_BASE_IDX 0
++#define mmSDMA1_GFX_CONTEXT_CNTL 0x0093
++#define mmSDMA1_GFX_CONTEXT_CNTL_BASE_IDX 0
++#define mmSDMA1_GFX_STATUS 0x00a8
++#define mmSDMA1_GFX_STATUS_BASE_IDX 0
++#define mmSDMA1_GFX_DOORBELL_LOG 0x00a9
++#define mmSDMA1_GFX_DOORBELL_LOG_BASE_IDX 0
++#define mmSDMA1_GFX_WATERMARK 0x00aa
++#define mmSDMA1_GFX_WATERMARK_BASE_IDX 0
++#define mmSDMA1_GFX_DOORBELL_OFFSET 0x00ab
++#define mmSDMA1_GFX_DOORBELL_OFFSET_BASE_IDX 0
++#define mmSDMA1_GFX_CSA_ADDR_LO 0x00ac
++#define mmSDMA1_GFX_CSA_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_GFX_CSA_ADDR_HI 0x00ad
++#define mmSDMA1_GFX_CSA_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_GFX_IB_SUB_REMAIN 0x00af
++#define mmSDMA1_GFX_IB_SUB_REMAIN_BASE_IDX 0
++#define mmSDMA1_GFX_PREEMPT 0x00b0
++#define mmSDMA1_GFX_PREEMPT_BASE_IDX 0
++#define mmSDMA1_GFX_DUMMY_REG 0x00b1
++#define mmSDMA1_GFX_DUMMY_REG_BASE_IDX 0
++#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2
++#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3
++#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_GFX_RB_AQL_CNTL 0x00b4
++#define mmSDMA1_GFX_RB_AQL_CNTL_BASE_IDX 0
++#define mmSDMA1_GFX_MINOR_PTR_UPDATE 0x00b5
++#define mmSDMA1_GFX_MINOR_PTR_UPDATE_BASE_IDX 0
++#define mmSDMA1_GFX_MIDCMD_DATA0 0x00c0
++#define mmSDMA1_GFX_MIDCMD_DATA0_BASE_IDX 0
++#define mmSDMA1_GFX_MIDCMD_DATA1 0x00c1
++#define mmSDMA1_GFX_MIDCMD_DATA1_BASE_IDX 0
++#define mmSDMA1_GFX_MIDCMD_DATA2 0x00c2
++#define mmSDMA1_GFX_MIDCMD_DATA2_BASE_IDX 0
++#define mmSDMA1_GFX_MIDCMD_DATA3 0x00c3
++#define mmSDMA1_GFX_MIDCMD_DATA3_BASE_IDX 0
++#define mmSDMA1_GFX_MIDCMD_DATA4 0x00c4
++#define mmSDMA1_GFX_MIDCMD_DATA4_BASE_IDX 0
++#define mmSDMA1_GFX_MIDCMD_DATA5 0x00c5
++#define mmSDMA1_GFX_MIDCMD_DATA5_BASE_IDX 0
++#define mmSDMA1_GFX_MIDCMD_DATA6 0x00c6
++#define mmSDMA1_GFX_MIDCMD_DATA6_BASE_IDX 0
++#define mmSDMA1_GFX_MIDCMD_DATA7 0x00c7
++#define mmSDMA1_GFX_MIDCMD_DATA7_BASE_IDX 0
++#define mmSDMA1_GFX_MIDCMD_DATA8 0x00c8
++#define mmSDMA1_GFX_MIDCMD_DATA8_BASE_IDX 0
++#define mmSDMA1_GFX_MIDCMD_CNTL 0x00c9
++#define mmSDMA1_GFX_MIDCMD_CNTL_BASE_IDX 0
++#define mmSDMA1_PAGE_RB_CNTL 0x00e0
++#define mmSDMA1_PAGE_RB_CNTL_BASE_IDX 0
++#define mmSDMA1_PAGE_RB_BASE 0x00e1
++#define mmSDMA1_PAGE_RB_BASE_BASE_IDX 0
++#define mmSDMA1_PAGE_RB_BASE_HI 0x00e2
++#define mmSDMA1_PAGE_RB_BASE_HI_BASE_IDX 0
++#define mmSDMA1_PAGE_RB_RPTR 0x00e3
++#define mmSDMA1_PAGE_RB_RPTR_BASE_IDX 0
++#define mmSDMA1_PAGE_RB_RPTR_HI 0x00e4
++#define mmSDMA1_PAGE_RB_RPTR_HI_BASE_IDX 0
++#define mmSDMA1_PAGE_RB_WPTR 0x00e5
++#define mmSDMA1_PAGE_RB_WPTR_BASE_IDX 0
++#define mmSDMA1_PAGE_RB_WPTR_HI 0x00e6
++#define mmSDMA1_PAGE_RB_WPTR_HI_BASE_IDX 0
++#define mmSDMA1_PAGE_RB_WPTR_POLL_CNTL 0x00e7
++#define mmSDMA1_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 0
++#define mmSDMA1_PAGE_RB_RPTR_ADDR_HI 0x00e8
++#define mmSDMA1_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_PAGE_RB_RPTR_ADDR_LO 0x00e9
++#define mmSDMA1_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_PAGE_IB_CNTL 0x00ea
++#define mmSDMA1_PAGE_IB_CNTL_BASE_IDX 0
++#define mmSDMA1_PAGE_IB_RPTR 0x00eb
++#define mmSDMA1_PAGE_IB_RPTR_BASE_IDX 0
++#define mmSDMA1_PAGE_IB_OFFSET 0x00ec
++#define mmSDMA1_PAGE_IB_OFFSET_BASE_IDX 0
++#define mmSDMA1_PAGE_IB_BASE_LO 0x00ed
++#define mmSDMA1_PAGE_IB_BASE_LO_BASE_IDX 0
++#define mmSDMA1_PAGE_IB_BASE_HI 0x00ee
++#define mmSDMA1_PAGE_IB_BASE_HI_BASE_IDX 0
++#define mmSDMA1_PAGE_IB_SIZE 0x00ef
++#define mmSDMA1_PAGE_IB_SIZE_BASE_IDX 0
++#define mmSDMA1_PAGE_SKIP_CNTL 0x00f0
++#define mmSDMA1_PAGE_SKIP_CNTL_BASE_IDX 0
++#define mmSDMA1_PAGE_CONTEXT_STATUS 0x00f1
++#define mmSDMA1_PAGE_CONTEXT_STATUS_BASE_IDX 0
++#define mmSDMA1_PAGE_DOORBELL 0x00f2
++#define mmSDMA1_PAGE_DOORBELL_BASE_IDX 0
++#define mmSDMA1_PAGE_STATUS 0x0108
++#define mmSDMA1_PAGE_STATUS_BASE_IDX 0
++#define mmSDMA1_PAGE_DOORBELL_LOG 0x0109
++#define mmSDMA1_PAGE_DOORBELL_LOG_BASE_IDX 0
++#define mmSDMA1_PAGE_WATERMARK 0x010a
++#define mmSDMA1_PAGE_WATERMARK_BASE_IDX 0
++#define mmSDMA1_PAGE_DOORBELL_OFFSET 0x010b
++#define mmSDMA1_PAGE_DOORBELL_OFFSET_BASE_IDX 0
++#define mmSDMA1_PAGE_CSA_ADDR_LO 0x010c
++#define mmSDMA1_PAGE_CSA_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_PAGE_CSA_ADDR_HI 0x010d
++#define mmSDMA1_PAGE_CSA_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_PAGE_IB_SUB_REMAIN 0x010f
++#define mmSDMA1_PAGE_IB_SUB_REMAIN_BASE_IDX 0
++#define mmSDMA1_PAGE_PREEMPT 0x0110
++#define mmSDMA1_PAGE_PREEMPT_BASE_IDX 0
++#define mmSDMA1_PAGE_DUMMY_REG 0x0111
++#define mmSDMA1_PAGE_DUMMY_REG_BASE_IDX 0
++#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_HI 0x0112
++#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_LO 0x0113
++#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_PAGE_RB_AQL_CNTL 0x0114
++#define mmSDMA1_PAGE_RB_AQL_CNTL_BASE_IDX 0
++#define mmSDMA1_PAGE_MINOR_PTR_UPDATE 0x0115
++#define mmSDMA1_PAGE_MINOR_PTR_UPDATE_BASE_IDX 0
++#define mmSDMA1_PAGE_MIDCMD_DATA0 0x0120
++#define mmSDMA1_PAGE_MIDCMD_DATA0_BASE_IDX 0
++#define mmSDMA1_PAGE_MIDCMD_DATA1 0x0121
++#define mmSDMA1_PAGE_MIDCMD_DATA1_BASE_IDX 0
++#define mmSDMA1_PAGE_MIDCMD_DATA2 0x0122
++#define mmSDMA1_PAGE_MIDCMD_DATA2_BASE_IDX 0
++#define mmSDMA1_PAGE_MIDCMD_DATA3 0x0123
++#define mmSDMA1_PAGE_MIDCMD_DATA3_BASE_IDX 0
++#define mmSDMA1_PAGE_MIDCMD_DATA4 0x0124
++#define mmSDMA1_PAGE_MIDCMD_DATA4_BASE_IDX 0
++#define mmSDMA1_PAGE_MIDCMD_DATA5 0x0125
++#define mmSDMA1_PAGE_MIDCMD_DATA5_BASE_IDX 0
++#define mmSDMA1_PAGE_MIDCMD_DATA6 0x0126
++#define mmSDMA1_PAGE_MIDCMD_DATA6_BASE_IDX 0
++#define mmSDMA1_PAGE_MIDCMD_DATA7 0x0127
++#define mmSDMA1_PAGE_MIDCMD_DATA7_BASE_IDX 0
++#define mmSDMA1_PAGE_MIDCMD_DATA8 0x0128
++#define mmSDMA1_PAGE_MIDCMD_DATA8_BASE_IDX 0
++#define mmSDMA1_PAGE_MIDCMD_CNTL 0x0129
++#define mmSDMA1_PAGE_MIDCMD_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC0_RB_CNTL 0x0140
++#define mmSDMA1_RLC0_RB_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC0_RB_BASE 0x0141
++#define mmSDMA1_RLC0_RB_BASE_BASE_IDX 0
++#define mmSDMA1_RLC0_RB_BASE_HI 0x0142
++#define mmSDMA1_RLC0_RB_BASE_HI_BASE_IDX 0
++#define mmSDMA1_RLC0_RB_RPTR 0x0143
++#define mmSDMA1_RLC0_RB_RPTR_BASE_IDX 0
++#define mmSDMA1_RLC0_RB_RPTR_HI 0x0144
++#define mmSDMA1_RLC0_RB_RPTR_HI_BASE_IDX 0
++#define mmSDMA1_RLC0_RB_WPTR 0x0145
++#define mmSDMA1_RLC0_RB_WPTR_BASE_IDX 0
++#define mmSDMA1_RLC0_RB_WPTR_HI 0x0146
++#define mmSDMA1_RLC0_RB_WPTR_HI_BASE_IDX 0
++#define mmSDMA1_RLC0_RB_WPTR_POLL_CNTL 0x0147
++#define mmSDMA1_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC0_RB_RPTR_ADDR_HI 0x0148
++#define mmSDMA1_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_RLC0_RB_RPTR_ADDR_LO 0x0149
++#define mmSDMA1_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_RLC0_IB_CNTL 0x014a
++#define mmSDMA1_RLC0_IB_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC0_IB_RPTR 0x014b
++#define mmSDMA1_RLC0_IB_RPTR_BASE_IDX 0
++#define mmSDMA1_RLC0_IB_OFFSET 0x014c
++#define mmSDMA1_RLC0_IB_OFFSET_BASE_IDX 0
++#define mmSDMA1_RLC0_IB_BASE_LO 0x014d
++#define mmSDMA1_RLC0_IB_BASE_LO_BASE_IDX 0
++#define mmSDMA1_RLC0_IB_BASE_HI 0x014e
++#define mmSDMA1_RLC0_IB_BASE_HI_BASE_IDX 0
++#define mmSDMA1_RLC0_IB_SIZE 0x014f
++#define mmSDMA1_RLC0_IB_SIZE_BASE_IDX 0
++#define mmSDMA1_RLC0_SKIP_CNTL 0x0150
++#define mmSDMA1_RLC0_SKIP_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC0_CONTEXT_STATUS 0x0151
++#define mmSDMA1_RLC0_CONTEXT_STATUS_BASE_IDX 0
++#define mmSDMA1_RLC0_DOORBELL 0x0152
++#define mmSDMA1_RLC0_DOORBELL_BASE_IDX 0
++#define mmSDMA1_RLC0_STATUS 0x0168
++#define mmSDMA1_RLC0_STATUS_BASE_IDX 0
++#define mmSDMA1_RLC0_DOORBELL_LOG 0x0169
++#define mmSDMA1_RLC0_DOORBELL_LOG_BASE_IDX 0
++#define mmSDMA1_RLC0_WATERMARK 0x016a
++#define mmSDMA1_RLC0_WATERMARK_BASE_IDX 0
++#define mmSDMA1_RLC0_DOORBELL_OFFSET 0x016b
++#define mmSDMA1_RLC0_DOORBELL_OFFSET_BASE_IDX 0
++#define mmSDMA1_RLC0_CSA_ADDR_LO 0x016c
++#define mmSDMA1_RLC0_CSA_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_RLC0_CSA_ADDR_HI 0x016d
++#define mmSDMA1_RLC0_CSA_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_RLC0_IB_SUB_REMAIN 0x016f
++#define mmSDMA1_RLC0_IB_SUB_REMAIN_BASE_IDX 0
++#define mmSDMA1_RLC0_PREEMPT 0x0170
++#define mmSDMA1_RLC0_PREEMPT_BASE_IDX 0
++#define mmSDMA1_RLC0_DUMMY_REG 0x0171
++#define mmSDMA1_RLC0_DUMMY_REG_BASE_IDX 0
++#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_HI 0x0172
++#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_LO 0x0173
++#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_RLC0_RB_AQL_CNTL 0x0174
++#define mmSDMA1_RLC0_RB_AQL_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC0_MINOR_PTR_UPDATE 0x0175
++#define mmSDMA1_RLC0_MINOR_PTR_UPDATE_BASE_IDX 0
++#define mmSDMA1_RLC0_MIDCMD_DATA0 0x0180
++#define mmSDMA1_RLC0_MIDCMD_DATA0_BASE_IDX 0
++#define mmSDMA1_RLC0_MIDCMD_DATA1 0x0181
++#define mmSDMA1_RLC0_MIDCMD_DATA1_BASE_IDX 0
++#define mmSDMA1_RLC0_MIDCMD_DATA2 0x0182
++#define mmSDMA1_RLC0_MIDCMD_DATA2_BASE_IDX 0
++#define mmSDMA1_RLC0_MIDCMD_DATA3 0x0183
++#define mmSDMA1_RLC0_MIDCMD_DATA3_BASE_IDX 0
++#define mmSDMA1_RLC0_MIDCMD_DATA4 0x0184
++#define mmSDMA1_RLC0_MIDCMD_DATA4_BASE_IDX 0
++#define mmSDMA1_RLC0_MIDCMD_DATA5 0x0185
++#define mmSDMA1_RLC0_MIDCMD_DATA5_BASE_IDX 0
++#define mmSDMA1_RLC0_MIDCMD_DATA6 0x0186
++#define mmSDMA1_RLC0_MIDCMD_DATA6_BASE_IDX 0
++#define mmSDMA1_RLC0_MIDCMD_DATA7 0x0187
++#define mmSDMA1_RLC0_MIDCMD_DATA7_BASE_IDX 0
++#define mmSDMA1_RLC0_MIDCMD_DATA8 0x0188
++#define mmSDMA1_RLC0_MIDCMD_DATA8_BASE_IDX 0
++#define mmSDMA1_RLC0_MIDCMD_CNTL 0x0189
++#define mmSDMA1_RLC0_MIDCMD_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC1_RB_CNTL 0x01a0
++#define mmSDMA1_RLC1_RB_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC1_RB_BASE 0x01a1
++#define mmSDMA1_RLC1_RB_BASE_BASE_IDX 0
++#define mmSDMA1_RLC1_RB_BASE_HI 0x01a2
++#define mmSDMA1_RLC1_RB_BASE_HI_BASE_IDX 0
++#define mmSDMA1_RLC1_RB_RPTR 0x01a3
++#define mmSDMA1_RLC1_RB_RPTR_BASE_IDX 0
++#define mmSDMA1_RLC1_RB_RPTR_HI 0x01a4
++#define mmSDMA1_RLC1_RB_RPTR_HI_BASE_IDX 0
++#define mmSDMA1_RLC1_RB_WPTR 0x01a5
++#define mmSDMA1_RLC1_RB_WPTR_BASE_IDX 0
++#define mmSDMA1_RLC1_RB_WPTR_HI 0x01a6
++#define mmSDMA1_RLC1_RB_WPTR_HI_BASE_IDX 0
++#define mmSDMA1_RLC1_RB_WPTR_POLL_CNTL 0x01a7
++#define mmSDMA1_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC1_RB_RPTR_ADDR_HI 0x01a8
++#define mmSDMA1_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_RLC1_RB_RPTR_ADDR_LO 0x01a9
++#define mmSDMA1_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_RLC1_IB_CNTL 0x01aa
++#define mmSDMA1_RLC1_IB_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC1_IB_RPTR 0x01ab
++#define mmSDMA1_RLC1_IB_RPTR_BASE_IDX 0
++#define mmSDMA1_RLC1_IB_OFFSET 0x01ac
++#define mmSDMA1_RLC1_IB_OFFSET_BASE_IDX 0
++#define mmSDMA1_RLC1_IB_BASE_LO 0x01ad
++#define mmSDMA1_RLC1_IB_BASE_LO_BASE_IDX 0
++#define mmSDMA1_RLC1_IB_BASE_HI 0x01ae
++#define mmSDMA1_RLC1_IB_BASE_HI_BASE_IDX 0
++#define mmSDMA1_RLC1_IB_SIZE 0x01af
++#define mmSDMA1_RLC1_IB_SIZE_BASE_IDX 0
++#define mmSDMA1_RLC1_SKIP_CNTL 0x01b0
++#define mmSDMA1_RLC1_SKIP_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC1_CONTEXT_STATUS 0x01b1
++#define mmSDMA1_RLC1_CONTEXT_STATUS_BASE_IDX 0
++#define mmSDMA1_RLC1_DOORBELL 0x01b2
++#define mmSDMA1_RLC1_DOORBELL_BASE_IDX 0
++#define mmSDMA1_RLC1_STATUS 0x01c8
++#define mmSDMA1_RLC1_STATUS_BASE_IDX 0
++#define mmSDMA1_RLC1_DOORBELL_LOG 0x01c9
++#define mmSDMA1_RLC1_DOORBELL_LOG_BASE_IDX 0
++#define mmSDMA1_RLC1_WATERMARK 0x01ca
++#define mmSDMA1_RLC1_WATERMARK_BASE_IDX 0
++#define mmSDMA1_RLC1_DOORBELL_OFFSET 0x01cb
++#define mmSDMA1_RLC1_DOORBELL_OFFSET_BASE_IDX 0
++#define mmSDMA1_RLC1_CSA_ADDR_LO 0x01cc
++#define mmSDMA1_RLC1_CSA_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_RLC1_CSA_ADDR_HI 0x01cd
++#define mmSDMA1_RLC1_CSA_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_RLC1_IB_SUB_REMAIN 0x01cf
++#define mmSDMA1_RLC1_IB_SUB_REMAIN_BASE_IDX 0
++#define mmSDMA1_RLC1_PREEMPT 0x01d0
++#define mmSDMA1_RLC1_PREEMPT_BASE_IDX 0
++#define mmSDMA1_RLC1_DUMMY_REG 0x01d1
++#define mmSDMA1_RLC1_DUMMY_REG_BASE_IDX 0
++#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_HI 0x01d2
++#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_LO 0x01d3
++#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_RLC1_RB_AQL_CNTL 0x01d4
++#define mmSDMA1_RLC1_RB_AQL_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC1_MINOR_PTR_UPDATE 0x01d5
++#define mmSDMA1_RLC1_MINOR_PTR_UPDATE_BASE_IDX 0
++#define mmSDMA1_RLC1_MIDCMD_DATA0 0x01e0
++#define mmSDMA1_RLC1_MIDCMD_DATA0_BASE_IDX 0
++#define mmSDMA1_RLC1_MIDCMD_DATA1 0x01e1
++#define mmSDMA1_RLC1_MIDCMD_DATA1_BASE_IDX 0
++#define mmSDMA1_RLC1_MIDCMD_DATA2 0x01e2
++#define mmSDMA1_RLC1_MIDCMD_DATA2_BASE_IDX 0
++#define mmSDMA1_RLC1_MIDCMD_DATA3 0x01e3
++#define mmSDMA1_RLC1_MIDCMD_DATA3_BASE_IDX 0
++#define mmSDMA1_RLC1_MIDCMD_DATA4 0x01e4
++#define mmSDMA1_RLC1_MIDCMD_DATA4_BASE_IDX 0
++#define mmSDMA1_RLC1_MIDCMD_DATA5 0x01e5
++#define mmSDMA1_RLC1_MIDCMD_DATA5_BASE_IDX 0
++#define mmSDMA1_RLC1_MIDCMD_DATA6 0x01e6
++#define mmSDMA1_RLC1_MIDCMD_DATA6_BASE_IDX 0
++#define mmSDMA1_RLC1_MIDCMD_DATA7 0x01e7
++#define mmSDMA1_RLC1_MIDCMD_DATA7_BASE_IDX 0
++#define mmSDMA1_RLC1_MIDCMD_DATA8 0x01e8
++#define mmSDMA1_RLC1_MIDCMD_DATA8_BASE_IDX 0
++#define mmSDMA1_RLC1_MIDCMD_CNTL 0x01e9
++#define mmSDMA1_RLC1_MIDCMD_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC2_RB_CNTL 0x0200
++#define mmSDMA1_RLC2_RB_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC2_RB_BASE 0x0201
++#define mmSDMA1_RLC2_RB_BASE_BASE_IDX 0
++#define mmSDMA1_RLC2_RB_BASE_HI 0x0202
++#define mmSDMA1_RLC2_RB_BASE_HI_BASE_IDX 0
++#define mmSDMA1_RLC2_RB_RPTR 0x0203
++#define mmSDMA1_RLC2_RB_RPTR_BASE_IDX 0
++#define mmSDMA1_RLC2_RB_RPTR_HI 0x0204
++#define mmSDMA1_RLC2_RB_RPTR_HI_BASE_IDX 0
++#define mmSDMA1_RLC2_RB_WPTR 0x0205
++#define mmSDMA1_RLC2_RB_WPTR_BASE_IDX 0
++#define mmSDMA1_RLC2_RB_WPTR_HI 0x0206
++#define mmSDMA1_RLC2_RB_WPTR_HI_BASE_IDX 0
++#define mmSDMA1_RLC2_RB_WPTR_POLL_CNTL 0x0207
++#define mmSDMA1_RLC2_RB_WPTR_POLL_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC2_RB_RPTR_ADDR_HI 0x0208
++#define mmSDMA1_RLC2_RB_RPTR_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_RLC2_RB_RPTR_ADDR_LO 0x0209
++#define mmSDMA1_RLC2_RB_RPTR_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_RLC2_IB_CNTL 0x020a
++#define mmSDMA1_RLC2_IB_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC2_IB_RPTR 0x020b
++#define mmSDMA1_RLC2_IB_RPTR_BASE_IDX 0
++#define mmSDMA1_RLC2_IB_OFFSET 0x020c
++#define mmSDMA1_RLC2_IB_OFFSET_BASE_IDX 0
++#define mmSDMA1_RLC2_IB_BASE_LO 0x020d
++#define mmSDMA1_RLC2_IB_BASE_LO_BASE_IDX 0
++#define mmSDMA1_RLC2_IB_BASE_HI 0x020e
++#define mmSDMA1_RLC2_IB_BASE_HI_BASE_IDX 0
++#define mmSDMA1_RLC2_IB_SIZE 0x020f
++#define mmSDMA1_RLC2_IB_SIZE_BASE_IDX 0
++#define mmSDMA1_RLC2_SKIP_CNTL 0x0210
++#define mmSDMA1_RLC2_SKIP_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC2_CONTEXT_STATUS 0x0211
++#define mmSDMA1_RLC2_CONTEXT_STATUS_BASE_IDX 0
++#define mmSDMA1_RLC2_DOORBELL 0x0212
++#define mmSDMA1_RLC2_DOORBELL_BASE_IDX 0
++#define mmSDMA1_RLC2_STATUS 0x0228
++#define mmSDMA1_RLC2_STATUS_BASE_IDX 0
++#define mmSDMA1_RLC2_DOORBELL_LOG 0x0229
++#define mmSDMA1_RLC2_DOORBELL_LOG_BASE_IDX 0
++#define mmSDMA1_RLC2_WATERMARK 0x022a
++#define mmSDMA1_RLC2_WATERMARK_BASE_IDX 0
++#define mmSDMA1_RLC2_DOORBELL_OFFSET 0x022b
++#define mmSDMA1_RLC2_DOORBELL_OFFSET_BASE_IDX 0
++#define mmSDMA1_RLC2_CSA_ADDR_LO 0x022c
++#define mmSDMA1_RLC2_CSA_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_RLC2_CSA_ADDR_HI 0x022d
++#define mmSDMA1_RLC2_CSA_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_RLC2_IB_SUB_REMAIN 0x022f
++#define mmSDMA1_RLC2_IB_SUB_REMAIN_BASE_IDX 0
++#define mmSDMA1_RLC2_PREEMPT 0x0230
++#define mmSDMA1_RLC2_PREEMPT_BASE_IDX 0
++#define mmSDMA1_RLC2_DUMMY_REG 0x0231
++#define mmSDMA1_RLC2_DUMMY_REG_BASE_IDX 0
++#define mmSDMA1_RLC2_RB_WPTR_POLL_ADDR_HI 0x0232
++#define mmSDMA1_RLC2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_RLC2_RB_WPTR_POLL_ADDR_LO 0x0233
++#define mmSDMA1_RLC2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_RLC2_RB_AQL_CNTL 0x0234
++#define mmSDMA1_RLC2_RB_AQL_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC2_MINOR_PTR_UPDATE 0x0235
++#define mmSDMA1_RLC2_MINOR_PTR_UPDATE_BASE_IDX 0
++#define mmSDMA1_RLC2_MIDCMD_DATA0 0x0240
++#define mmSDMA1_RLC2_MIDCMD_DATA0_BASE_IDX 0
++#define mmSDMA1_RLC2_MIDCMD_DATA1 0x0241
++#define mmSDMA1_RLC2_MIDCMD_DATA1_BASE_IDX 0
++#define mmSDMA1_RLC2_MIDCMD_DATA2 0x0242
++#define mmSDMA1_RLC2_MIDCMD_DATA2_BASE_IDX 0
++#define mmSDMA1_RLC2_MIDCMD_DATA3 0x0243
++#define mmSDMA1_RLC2_MIDCMD_DATA3_BASE_IDX 0
++#define mmSDMA1_RLC2_MIDCMD_DATA4 0x0244
++#define mmSDMA1_RLC2_MIDCMD_DATA4_BASE_IDX 0
++#define mmSDMA1_RLC2_MIDCMD_DATA5 0x0245
++#define mmSDMA1_RLC2_MIDCMD_DATA5_BASE_IDX 0
++#define mmSDMA1_RLC2_MIDCMD_DATA6 0x0246
++#define mmSDMA1_RLC2_MIDCMD_DATA6_BASE_IDX 0
++#define mmSDMA1_RLC2_MIDCMD_DATA7 0x0247
++#define mmSDMA1_RLC2_MIDCMD_DATA7_BASE_IDX 0
++#define mmSDMA1_RLC2_MIDCMD_DATA8 0x0248
++#define mmSDMA1_RLC2_MIDCMD_DATA8_BASE_IDX 0
++#define mmSDMA1_RLC2_MIDCMD_CNTL 0x0249
++#define mmSDMA1_RLC2_MIDCMD_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC3_RB_CNTL 0x0260
++#define mmSDMA1_RLC3_RB_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC3_RB_BASE 0x0261
++#define mmSDMA1_RLC3_RB_BASE_BASE_IDX 0
++#define mmSDMA1_RLC3_RB_BASE_HI 0x0262
++#define mmSDMA1_RLC3_RB_BASE_HI_BASE_IDX 0
++#define mmSDMA1_RLC3_RB_RPTR 0x0263
++#define mmSDMA1_RLC3_RB_RPTR_BASE_IDX 0
++#define mmSDMA1_RLC3_RB_RPTR_HI 0x0264
++#define mmSDMA1_RLC3_RB_RPTR_HI_BASE_IDX 0
++#define mmSDMA1_RLC3_RB_WPTR 0x0265
++#define mmSDMA1_RLC3_RB_WPTR_BASE_IDX 0
++#define mmSDMA1_RLC3_RB_WPTR_HI 0x0266
++#define mmSDMA1_RLC3_RB_WPTR_HI_BASE_IDX 0
++#define mmSDMA1_RLC3_RB_WPTR_POLL_CNTL 0x0267
++#define mmSDMA1_RLC3_RB_WPTR_POLL_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC3_RB_RPTR_ADDR_HI 0x0268
++#define mmSDMA1_RLC3_RB_RPTR_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_RLC3_RB_RPTR_ADDR_LO 0x0269
++#define mmSDMA1_RLC3_RB_RPTR_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_RLC3_IB_CNTL 0x026a
++#define mmSDMA1_RLC3_IB_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC3_IB_RPTR 0x026b
++#define mmSDMA1_RLC3_IB_RPTR_BASE_IDX 0
++#define mmSDMA1_RLC3_IB_OFFSET 0x026c
++#define mmSDMA1_RLC3_IB_OFFSET_BASE_IDX 0
++#define mmSDMA1_RLC3_IB_BASE_LO 0x026d
++#define mmSDMA1_RLC3_IB_BASE_LO_BASE_IDX 0
++#define mmSDMA1_RLC3_IB_BASE_HI 0x026e
++#define mmSDMA1_RLC3_IB_BASE_HI_BASE_IDX 0
++#define mmSDMA1_RLC3_IB_SIZE 0x026f
++#define mmSDMA1_RLC3_IB_SIZE_BASE_IDX 0
++#define mmSDMA1_RLC3_SKIP_CNTL 0x0270
++#define mmSDMA1_RLC3_SKIP_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC3_CONTEXT_STATUS 0x0271
++#define mmSDMA1_RLC3_CONTEXT_STATUS_BASE_IDX 0
++#define mmSDMA1_RLC3_DOORBELL 0x0272
++#define mmSDMA1_RLC3_DOORBELL_BASE_IDX 0
++#define mmSDMA1_RLC3_STATUS 0x0288
++#define mmSDMA1_RLC3_STATUS_BASE_IDX 0
++#define mmSDMA1_RLC3_DOORBELL_LOG 0x0289
++#define mmSDMA1_RLC3_DOORBELL_LOG_BASE_IDX 0
++#define mmSDMA1_RLC3_WATERMARK 0x028a
++#define mmSDMA1_RLC3_WATERMARK_BASE_IDX 0
++#define mmSDMA1_RLC3_DOORBELL_OFFSET 0x028b
++#define mmSDMA1_RLC3_DOORBELL_OFFSET_BASE_IDX 0
++#define mmSDMA1_RLC3_CSA_ADDR_LO 0x028c
++#define mmSDMA1_RLC3_CSA_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_RLC3_CSA_ADDR_HI 0x028d
++#define mmSDMA1_RLC3_CSA_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_RLC3_IB_SUB_REMAIN 0x028f
++#define mmSDMA1_RLC3_IB_SUB_REMAIN_BASE_IDX 0
++#define mmSDMA1_RLC3_PREEMPT 0x0290
++#define mmSDMA1_RLC3_PREEMPT_BASE_IDX 0
++#define mmSDMA1_RLC3_DUMMY_REG 0x0291
++#define mmSDMA1_RLC3_DUMMY_REG_BASE_IDX 0
++#define mmSDMA1_RLC3_RB_WPTR_POLL_ADDR_HI 0x0292
++#define mmSDMA1_RLC3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_RLC3_RB_WPTR_POLL_ADDR_LO 0x0293
++#define mmSDMA1_RLC3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_RLC3_RB_AQL_CNTL 0x0294
++#define mmSDMA1_RLC3_RB_AQL_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC3_MINOR_PTR_UPDATE 0x0295
++#define mmSDMA1_RLC3_MINOR_PTR_UPDATE_BASE_IDX 0
++#define mmSDMA1_RLC3_MIDCMD_DATA0 0x02a0
++#define mmSDMA1_RLC3_MIDCMD_DATA0_BASE_IDX 0
++#define mmSDMA1_RLC3_MIDCMD_DATA1 0x02a1
++#define mmSDMA1_RLC3_MIDCMD_DATA1_BASE_IDX 0
++#define mmSDMA1_RLC3_MIDCMD_DATA2 0x02a2
++#define mmSDMA1_RLC3_MIDCMD_DATA2_BASE_IDX 0
++#define mmSDMA1_RLC3_MIDCMD_DATA3 0x02a3
++#define mmSDMA1_RLC3_MIDCMD_DATA3_BASE_IDX 0
++#define mmSDMA1_RLC3_MIDCMD_DATA4 0x02a4
++#define mmSDMA1_RLC3_MIDCMD_DATA4_BASE_IDX 0
++#define mmSDMA1_RLC3_MIDCMD_DATA5 0x02a5
++#define mmSDMA1_RLC3_MIDCMD_DATA5_BASE_IDX 0
++#define mmSDMA1_RLC3_MIDCMD_DATA6 0x02a6
++#define mmSDMA1_RLC3_MIDCMD_DATA6_BASE_IDX 0
++#define mmSDMA1_RLC3_MIDCMD_DATA7 0x02a7
++#define mmSDMA1_RLC3_MIDCMD_DATA7_BASE_IDX 0
++#define mmSDMA1_RLC3_MIDCMD_DATA8 0x02a8
++#define mmSDMA1_RLC3_MIDCMD_DATA8_BASE_IDX 0
++#define mmSDMA1_RLC3_MIDCMD_CNTL 0x02a9
++#define mmSDMA1_RLC3_MIDCMD_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC4_RB_CNTL 0x02c0
++#define mmSDMA1_RLC4_RB_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC4_RB_BASE 0x02c1
++#define mmSDMA1_RLC4_RB_BASE_BASE_IDX 0
++#define mmSDMA1_RLC4_RB_BASE_HI 0x02c2
++#define mmSDMA1_RLC4_RB_BASE_HI_BASE_IDX 0
++#define mmSDMA1_RLC4_RB_RPTR 0x02c3
++#define mmSDMA1_RLC4_RB_RPTR_BASE_IDX 0
++#define mmSDMA1_RLC4_RB_RPTR_HI 0x02c4
++#define mmSDMA1_RLC4_RB_RPTR_HI_BASE_IDX 0
++#define mmSDMA1_RLC4_RB_WPTR 0x02c5
++#define mmSDMA1_RLC4_RB_WPTR_BASE_IDX 0
++#define mmSDMA1_RLC4_RB_WPTR_HI 0x02c6
++#define mmSDMA1_RLC4_RB_WPTR_HI_BASE_IDX 0
++#define mmSDMA1_RLC4_RB_WPTR_POLL_CNTL 0x02c7
++#define mmSDMA1_RLC4_RB_WPTR_POLL_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC4_RB_RPTR_ADDR_HI 0x02c8
++#define mmSDMA1_RLC4_RB_RPTR_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_RLC4_RB_RPTR_ADDR_LO 0x02c9
++#define mmSDMA1_RLC4_RB_RPTR_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_RLC4_IB_CNTL 0x02ca
++#define mmSDMA1_RLC4_IB_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC4_IB_RPTR 0x02cb
++#define mmSDMA1_RLC4_IB_RPTR_BASE_IDX 0
++#define mmSDMA1_RLC4_IB_OFFSET 0x02cc
++#define mmSDMA1_RLC4_IB_OFFSET_BASE_IDX 0
++#define mmSDMA1_RLC4_IB_BASE_LO 0x02cd
++#define mmSDMA1_RLC4_IB_BASE_LO_BASE_IDX 0
++#define mmSDMA1_RLC4_IB_BASE_HI 0x02ce
++#define mmSDMA1_RLC4_IB_BASE_HI_BASE_IDX 0
++#define mmSDMA1_RLC4_IB_SIZE 0x02cf
++#define mmSDMA1_RLC4_IB_SIZE_BASE_IDX 0
++#define mmSDMA1_RLC4_SKIP_CNTL 0x02d0
++#define mmSDMA1_RLC4_SKIP_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC4_CONTEXT_STATUS 0x02d1
++#define mmSDMA1_RLC4_CONTEXT_STATUS_BASE_IDX 0
++#define mmSDMA1_RLC4_DOORBELL 0x02d2
++#define mmSDMA1_RLC4_DOORBELL_BASE_IDX 0
++#define mmSDMA1_RLC4_STATUS 0x02e8
++#define mmSDMA1_RLC4_STATUS_BASE_IDX 0
++#define mmSDMA1_RLC4_DOORBELL_LOG 0x02e9
++#define mmSDMA1_RLC4_DOORBELL_LOG_BASE_IDX 0
++#define mmSDMA1_RLC4_WATERMARK 0x02ea
++#define mmSDMA1_RLC4_WATERMARK_BASE_IDX 0
++#define mmSDMA1_RLC4_DOORBELL_OFFSET 0x02eb
++#define mmSDMA1_RLC4_DOORBELL_OFFSET_BASE_IDX 0
++#define mmSDMA1_RLC4_CSA_ADDR_LO 0x02ec
++#define mmSDMA1_RLC4_CSA_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_RLC4_CSA_ADDR_HI 0x02ed
++#define mmSDMA1_RLC4_CSA_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_RLC4_IB_SUB_REMAIN 0x02ef
++#define mmSDMA1_RLC4_IB_SUB_REMAIN_BASE_IDX 0
++#define mmSDMA1_RLC4_PREEMPT 0x02f0
++#define mmSDMA1_RLC4_PREEMPT_BASE_IDX 0
++#define mmSDMA1_RLC4_DUMMY_REG 0x02f1
++#define mmSDMA1_RLC4_DUMMY_REG_BASE_IDX 0
++#define mmSDMA1_RLC4_RB_WPTR_POLL_ADDR_HI 0x02f2
++#define mmSDMA1_RLC4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_RLC4_RB_WPTR_POLL_ADDR_LO 0x02f3
++#define mmSDMA1_RLC4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_RLC4_RB_AQL_CNTL 0x02f4
++#define mmSDMA1_RLC4_RB_AQL_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC4_MINOR_PTR_UPDATE 0x02f5
++#define mmSDMA1_RLC4_MINOR_PTR_UPDATE_BASE_IDX 0
++#define mmSDMA1_RLC4_MIDCMD_DATA0 0x0300
++#define mmSDMA1_RLC4_MIDCMD_DATA0_BASE_IDX 0
++#define mmSDMA1_RLC4_MIDCMD_DATA1 0x0301
++#define mmSDMA1_RLC4_MIDCMD_DATA1_BASE_IDX 0
++#define mmSDMA1_RLC4_MIDCMD_DATA2 0x0302
++#define mmSDMA1_RLC4_MIDCMD_DATA2_BASE_IDX 0
++#define mmSDMA1_RLC4_MIDCMD_DATA3 0x0303
++#define mmSDMA1_RLC4_MIDCMD_DATA3_BASE_IDX 0
++#define mmSDMA1_RLC4_MIDCMD_DATA4 0x0304
++#define mmSDMA1_RLC4_MIDCMD_DATA4_BASE_IDX 0
++#define mmSDMA1_RLC4_MIDCMD_DATA5 0x0305
++#define mmSDMA1_RLC4_MIDCMD_DATA5_BASE_IDX 0
++#define mmSDMA1_RLC4_MIDCMD_DATA6 0x0306
++#define mmSDMA1_RLC4_MIDCMD_DATA6_BASE_IDX 0
++#define mmSDMA1_RLC4_MIDCMD_DATA7 0x0307
++#define mmSDMA1_RLC4_MIDCMD_DATA7_BASE_IDX 0
++#define mmSDMA1_RLC4_MIDCMD_DATA8 0x0308
++#define mmSDMA1_RLC4_MIDCMD_DATA8_BASE_IDX 0
++#define mmSDMA1_RLC4_MIDCMD_CNTL 0x0309
++#define mmSDMA1_RLC4_MIDCMD_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC5_RB_CNTL 0x0320
++#define mmSDMA1_RLC5_RB_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC5_RB_BASE 0x0321
++#define mmSDMA1_RLC5_RB_BASE_BASE_IDX 0
++#define mmSDMA1_RLC5_RB_BASE_HI 0x0322
++#define mmSDMA1_RLC5_RB_BASE_HI_BASE_IDX 0
++#define mmSDMA1_RLC5_RB_RPTR 0x0323
++#define mmSDMA1_RLC5_RB_RPTR_BASE_IDX 0
++#define mmSDMA1_RLC5_RB_RPTR_HI 0x0324
++#define mmSDMA1_RLC5_RB_RPTR_HI_BASE_IDX 0
++#define mmSDMA1_RLC5_RB_WPTR 0x0325
++#define mmSDMA1_RLC5_RB_WPTR_BASE_IDX 0
++#define mmSDMA1_RLC5_RB_WPTR_HI 0x0326
++#define mmSDMA1_RLC5_RB_WPTR_HI_BASE_IDX 0
++#define mmSDMA1_RLC5_RB_WPTR_POLL_CNTL 0x0327
++#define mmSDMA1_RLC5_RB_WPTR_POLL_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC5_RB_RPTR_ADDR_HI 0x0328
++#define mmSDMA1_RLC5_RB_RPTR_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_RLC5_RB_RPTR_ADDR_LO 0x0329
++#define mmSDMA1_RLC5_RB_RPTR_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_RLC5_IB_CNTL 0x032a
++#define mmSDMA1_RLC5_IB_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC5_IB_RPTR 0x032b
++#define mmSDMA1_RLC5_IB_RPTR_BASE_IDX 0
++#define mmSDMA1_RLC5_IB_OFFSET 0x032c
++#define mmSDMA1_RLC5_IB_OFFSET_BASE_IDX 0
++#define mmSDMA1_RLC5_IB_BASE_LO 0x032d
++#define mmSDMA1_RLC5_IB_BASE_LO_BASE_IDX 0
++#define mmSDMA1_RLC5_IB_BASE_HI 0x032e
++#define mmSDMA1_RLC5_IB_BASE_HI_BASE_IDX 0
++#define mmSDMA1_RLC5_IB_SIZE 0x032f
++#define mmSDMA1_RLC5_IB_SIZE_BASE_IDX 0
++#define mmSDMA1_RLC5_SKIP_CNTL 0x0330
++#define mmSDMA1_RLC5_SKIP_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC5_CONTEXT_STATUS 0x0331
++#define mmSDMA1_RLC5_CONTEXT_STATUS_BASE_IDX 0
++#define mmSDMA1_RLC5_DOORBELL 0x0332
++#define mmSDMA1_RLC5_DOORBELL_BASE_IDX 0
++#define mmSDMA1_RLC5_STATUS 0x0348
++#define mmSDMA1_RLC5_STATUS_BASE_IDX 0
++#define mmSDMA1_RLC5_DOORBELL_LOG 0x0349
++#define mmSDMA1_RLC5_DOORBELL_LOG_BASE_IDX 0
++#define mmSDMA1_RLC5_WATERMARK 0x034a
++#define mmSDMA1_RLC5_WATERMARK_BASE_IDX 0
++#define mmSDMA1_RLC5_DOORBELL_OFFSET 0x034b
++#define mmSDMA1_RLC5_DOORBELL_OFFSET_BASE_IDX 0
++#define mmSDMA1_RLC5_CSA_ADDR_LO 0x034c
++#define mmSDMA1_RLC5_CSA_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_RLC5_CSA_ADDR_HI 0x034d
++#define mmSDMA1_RLC5_CSA_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_RLC5_IB_SUB_REMAIN 0x034f
++#define mmSDMA1_RLC5_IB_SUB_REMAIN_BASE_IDX 0
++#define mmSDMA1_RLC5_PREEMPT 0x0350
++#define mmSDMA1_RLC5_PREEMPT_BASE_IDX 0
++#define mmSDMA1_RLC5_DUMMY_REG 0x0351
++#define mmSDMA1_RLC5_DUMMY_REG_BASE_IDX 0
++#define mmSDMA1_RLC5_RB_WPTR_POLL_ADDR_HI 0x0352
++#define mmSDMA1_RLC5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_RLC5_RB_WPTR_POLL_ADDR_LO 0x0353
++#define mmSDMA1_RLC5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_RLC5_RB_AQL_CNTL 0x0354
++#define mmSDMA1_RLC5_RB_AQL_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC5_MINOR_PTR_UPDATE 0x0355
++#define mmSDMA1_RLC5_MINOR_PTR_UPDATE_BASE_IDX 0
++#define mmSDMA1_RLC5_MIDCMD_DATA0 0x0360
++#define mmSDMA1_RLC5_MIDCMD_DATA0_BASE_IDX 0
++#define mmSDMA1_RLC5_MIDCMD_DATA1 0x0361
++#define mmSDMA1_RLC5_MIDCMD_DATA1_BASE_IDX 0
++#define mmSDMA1_RLC5_MIDCMD_DATA2 0x0362
++#define mmSDMA1_RLC5_MIDCMD_DATA2_BASE_IDX 0
++#define mmSDMA1_RLC5_MIDCMD_DATA3 0x0363
++#define mmSDMA1_RLC5_MIDCMD_DATA3_BASE_IDX 0
++#define mmSDMA1_RLC5_MIDCMD_DATA4 0x0364
++#define mmSDMA1_RLC5_MIDCMD_DATA4_BASE_IDX 0
++#define mmSDMA1_RLC5_MIDCMD_DATA5 0x0365
++#define mmSDMA1_RLC5_MIDCMD_DATA5_BASE_IDX 0
++#define mmSDMA1_RLC5_MIDCMD_DATA6 0x0366
++#define mmSDMA1_RLC5_MIDCMD_DATA6_BASE_IDX 0
++#define mmSDMA1_RLC5_MIDCMD_DATA7 0x0367
++#define mmSDMA1_RLC5_MIDCMD_DATA7_BASE_IDX 0
++#define mmSDMA1_RLC5_MIDCMD_DATA8 0x0368
++#define mmSDMA1_RLC5_MIDCMD_DATA8_BASE_IDX 0
++#define mmSDMA1_RLC5_MIDCMD_CNTL 0x0369
++#define mmSDMA1_RLC5_MIDCMD_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC6_RB_CNTL 0x0380
++#define mmSDMA1_RLC6_RB_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC6_RB_BASE 0x0381
++#define mmSDMA1_RLC6_RB_BASE_BASE_IDX 0
++#define mmSDMA1_RLC6_RB_BASE_HI 0x0382
++#define mmSDMA1_RLC6_RB_BASE_HI_BASE_IDX 0
++#define mmSDMA1_RLC6_RB_RPTR 0x0383
++#define mmSDMA1_RLC6_RB_RPTR_BASE_IDX 0
++#define mmSDMA1_RLC6_RB_RPTR_HI 0x0384
++#define mmSDMA1_RLC6_RB_RPTR_HI_BASE_IDX 0
++#define mmSDMA1_RLC6_RB_WPTR 0x0385
++#define mmSDMA1_RLC6_RB_WPTR_BASE_IDX 0
++#define mmSDMA1_RLC6_RB_WPTR_HI 0x0386
++#define mmSDMA1_RLC6_RB_WPTR_HI_BASE_IDX 0
++#define mmSDMA1_RLC6_RB_WPTR_POLL_CNTL 0x0387
++#define mmSDMA1_RLC6_RB_WPTR_POLL_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC6_RB_RPTR_ADDR_HI 0x0388
++#define mmSDMA1_RLC6_RB_RPTR_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_RLC6_RB_RPTR_ADDR_LO 0x0389
++#define mmSDMA1_RLC6_RB_RPTR_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_RLC6_IB_CNTL 0x038a
++#define mmSDMA1_RLC6_IB_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC6_IB_RPTR 0x038b
++#define mmSDMA1_RLC6_IB_RPTR_BASE_IDX 0
++#define mmSDMA1_RLC6_IB_OFFSET 0x038c
++#define mmSDMA1_RLC6_IB_OFFSET_BASE_IDX 0
++#define mmSDMA1_RLC6_IB_BASE_LO 0x038d
++#define mmSDMA1_RLC6_IB_BASE_LO_BASE_IDX 0
++#define mmSDMA1_RLC6_IB_BASE_HI 0x038e
++#define mmSDMA1_RLC6_IB_BASE_HI_BASE_IDX 0
++#define mmSDMA1_RLC6_IB_SIZE 0x038f
++#define mmSDMA1_RLC6_IB_SIZE_BASE_IDX 0
++#define mmSDMA1_RLC6_SKIP_CNTL 0x0390
++#define mmSDMA1_RLC6_SKIP_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC6_CONTEXT_STATUS 0x0391
++#define mmSDMA1_RLC6_CONTEXT_STATUS_BASE_IDX 0
++#define mmSDMA1_RLC6_DOORBELL 0x0392
++#define mmSDMA1_RLC6_DOORBELL_BASE_IDX 0
++#define mmSDMA1_RLC6_STATUS 0x03a8
++#define mmSDMA1_RLC6_STATUS_BASE_IDX 0
++#define mmSDMA1_RLC6_DOORBELL_LOG 0x03a9
++#define mmSDMA1_RLC6_DOORBELL_LOG_BASE_IDX 0
++#define mmSDMA1_RLC6_WATERMARK 0x03aa
++#define mmSDMA1_RLC6_WATERMARK_BASE_IDX 0
++#define mmSDMA1_RLC6_DOORBELL_OFFSET 0x03ab
++#define mmSDMA1_RLC6_DOORBELL_OFFSET_BASE_IDX 0
++#define mmSDMA1_RLC6_CSA_ADDR_LO 0x03ac
++#define mmSDMA1_RLC6_CSA_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_RLC6_CSA_ADDR_HI 0x03ad
++#define mmSDMA1_RLC6_CSA_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_RLC6_IB_SUB_REMAIN 0x03af
++#define mmSDMA1_RLC6_IB_SUB_REMAIN_BASE_IDX 0
++#define mmSDMA1_RLC6_PREEMPT 0x03b0
++#define mmSDMA1_RLC6_PREEMPT_BASE_IDX 0
++#define mmSDMA1_RLC6_DUMMY_REG 0x03b1
++#define mmSDMA1_RLC6_DUMMY_REG_BASE_IDX 0
++#define mmSDMA1_RLC6_RB_WPTR_POLL_ADDR_HI 0x03b2
++#define mmSDMA1_RLC6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_RLC6_RB_WPTR_POLL_ADDR_LO 0x03b3
++#define mmSDMA1_RLC6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_RLC6_RB_AQL_CNTL 0x03b4
++#define mmSDMA1_RLC6_RB_AQL_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC6_MINOR_PTR_UPDATE 0x03b5
++#define mmSDMA1_RLC6_MINOR_PTR_UPDATE_BASE_IDX 0
++#define mmSDMA1_RLC6_MIDCMD_DATA0 0x03c0
++#define mmSDMA1_RLC6_MIDCMD_DATA0_BASE_IDX 0
++#define mmSDMA1_RLC6_MIDCMD_DATA1 0x03c1
++#define mmSDMA1_RLC6_MIDCMD_DATA1_BASE_IDX 0
++#define mmSDMA1_RLC6_MIDCMD_DATA2 0x03c2
++#define mmSDMA1_RLC6_MIDCMD_DATA2_BASE_IDX 0
++#define mmSDMA1_RLC6_MIDCMD_DATA3 0x03c3
++#define mmSDMA1_RLC6_MIDCMD_DATA3_BASE_IDX 0
++#define mmSDMA1_RLC6_MIDCMD_DATA4 0x03c4
++#define mmSDMA1_RLC6_MIDCMD_DATA4_BASE_IDX 0
++#define mmSDMA1_RLC6_MIDCMD_DATA5 0x03c5
++#define mmSDMA1_RLC6_MIDCMD_DATA5_BASE_IDX 0
++#define mmSDMA1_RLC6_MIDCMD_DATA6 0x03c6
++#define mmSDMA1_RLC6_MIDCMD_DATA6_BASE_IDX 0
++#define mmSDMA1_RLC6_MIDCMD_DATA7 0x03c7
++#define mmSDMA1_RLC6_MIDCMD_DATA7_BASE_IDX 0
++#define mmSDMA1_RLC6_MIDCMD_DATA8 0x03c8
++#define mmSDMA1_RLC6_MIDCMD_DATA8_BASE_IDX 0
++#define mmSDMA1_RLC6_MIDCMD_CNTL 0x03c9
++#define mmSDMA1_RLC6_MIDCMD_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC7_RB_CNTL 0x03e0
++#define mmSDMA1_RLC7_RB_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC7_RB_BASE 0x03e1
++#define mmSDMA1_RLC7_RB_BASE_BASE_IDX 0
++#define mmSDMA1_RLC7_RB_BASE_HI 0x03e2
++#define mmSDMA1_RLC7_RB_BASE_HI_BASE_IDX 0
++#define mmSDMA1_RLC7_RB_RPTR 0x03e3
++#define mmSDMA1_RLC7_RB_RPTR_BASE_IDX 0
++#define mmSDMA1_RLC7_RB_RPTR_HI 0x03e4
++#define mmSDMA1_RLC7_RB_RPTR_HI_BASE_IDX 0
++#define mmSDMA1_RLC7_RB_WPTR 0x03e5
++#define mmSDMA1_RLC7_RB_WPTR_BASE_IDX 0
++#define mmSDMA1_RLC7_RB_WPTR_HI 0x03e6
++#define mmSDMA1_RLC7_RB_WPTR_HI_BASE_IDX 0
++#define mmSDMA1_RLC7_RB_WPTR_POLL_CNTL 0x03e7
++#define mmSDMA1_RLC7_RB_WPTR_POLL_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC7_RB_RPTR_ADDR_HI 0x03e8
++#define mmSDMA1_RLC7_RB_RPTR_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_RLC7_RB_RPTR_ADDR_LO 0x03e9
++#define mmSDMA1_RLC7_RB_RPTR_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_RLC7_IB_CNTL 0x03ea
++#define mmSDMA1_RLC7_IB_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC7_IB_RPTR 0x03eb
++#define mmSDMA1_RLC7_IB_RPTR_BASE_IDX 0
++#define mmSDMA1_RLC7_IB_OFFSET 0x03ec
++#define mmSDMA1_RLC7_IB_OFFSET_BASE_IDX 0
++#define mmSDMA1_RLC7_IB_BASE_LO 0x03ed
++#define mmSDMA1_RLC7_IB_BASE_LO_BASE_IDX 0
++#define mmSDMA1_RLC7_IB_BASE_HI 0x03ee
++#define mmSDMA1_RLC7_IB_BASE_HI_BASE_IDX 0
++#define mmSDMA1_RLC7_IB_SIZE 0x03ef
++#define mmSDMA1_RLC7_IB_SIZE_BASE_IDX 0
++#define mmSDMA1_RLC7_SKIP_CNTL 0x03f0
++#define mmSDMA1_RLC7_SKIP_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC7_CONTEXT_STATUS 0x03f1
++#define mmSDMA1_RLC7_CONTEXT_STATUS_BASE_IDX 0
++#define mmSDMA1_RLC7_DOORBELL 0x03f2
++#define mmSDMA1_RLC7_DOORBELL_BASE_IDX 0
++#define mmSDMA1_RLC7_STATUS 0x0408
++#define mmSDMA1_RLC7_STATUS_BASE_IDX 0
++#define mmSDMA1_RLC7_DOORBELL_LOG 0x0409
++#define mmSDMA1_RLC7_DOORBELL_LOG_BASE_IDX 0
++#define mmSDMA1_RLC7_WATERMARK 0x040a
++#define mmSDMA1_RLC7_WATERMARK_BASE_IDX 0
++#define mmSDMA1_RLC7_DOORBELL_OFFSET 0x040b
++#define mmSDMA1_RLC7_DOORBELL_OFFSET_BASE_IDX 0
++#define mmSDMA1_RLC7_CSA_ADDR_LO 0x040c
++#define mmSDMA1_RLC7_CSA_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_RLC7_CSA_ADDR_HI 0x040d
++#define mmSDMA1_RLC7_CSA_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_RLC7_IB_SUB_REMAIN 0x040f
++#define mmSDMA1_RLC7_IB_SUB_REMAIN_BASE_IDX 0
++#define mmSDMA1_RLC7_PREEMPT 0x0410
++#define mmSDMA1_RLC7_PREEMPT_BASE_IDX 0
++#define mmSDMA1_RLC7_DUMMY_REG 0x0411
++#define mmSDMA1_RLC7_DUMMY_REG_BASE_IDX 0
++#define mmSDMA1_RLC7_RB_WPTR_POLL_ADDR_HI 0x0412
++#define mmSDMA1_RLC7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
++#define mmSDMA1_RLC7_RB_WPTR_POLL_ADDR_LO 0x0413
++#define mmSDMA1_RLC7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
++#define mmSDMA1_RLC7_RB_AQL_CNTL 0x0414
++#define mmSDMA1_RLC7_RB_AQL_CNTL_BASE_IDX 0
++#define mmSDMA1_RLC7_MINOR_PTR_UPDATE 0x0415
++#define mmSDMA1_RLC7_MINOR_PTR_UPDATE_BASE_IDX 0
++#define mmSDMA1_RLC7_MIDCMD_DATA0 0x0420
++#define mmSDMA1_RLC7_MIDCMD_DATA0_BASE_IDX 0
++#define mmSDMA1_RLC7_MIDCMD_DATA1 0x0421
++#define mmSDMA1_RLC7_MIDCMD_DATA1_BASE_IDX 0
++#define mmSDMA1_RLC7_MIDCMD_DATA2 0x0422
++#define mmSDMA1_RLC7_MIDCMD_DATA2_BASE_IDX 0
++#define mmSDMA1_RLC7_MIDCMD_DATA3 0x0423
++#define mmSDMA1_RLC7_MIDCMD_DATA3_BASE_IDX 0
++#define mmSDMA1_RLC7_MIDCMD_DATA4 0x0424
++#define mmSDMA1_RLC7_MIDCMD_DATA4_BASE_IDX 0
++#define mmSDMA1_RLC7_MIDCMD_DATA5 0x0425
++#define mmSDMA1_RLC7_MIDCMD_DATA5_BASE_IDX 0
++#define mmSDMA1_RLC7_MIDCMD_DATA6 0x0426
++#define mmSDMA1_RLC7_MIDCMD_DATA6_BASE_IDX 0
++#define mmSDMA1_RLC7_MIDCMD_DATA7 0x0427
++#define mmSDMA1_RLC7_MIDCMD_DATA7_BASE_IDX 0
++#define mmSDMA1_RLC7_MIDCMD_DATA8 0x0428
++#define mmSDMA1_RLC7_MIDCMD_DATA8_BASE_IDX 0
++#define mmSDMA1_RLC7_MIDCMD_CNTL 0x0429
++#define mmSDMA1_RLC7_MIDCMD_CNTL_BASE_IDX 0
++
++#endif
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_sh_mask.h
+new file mode 100644
+index 0000000..0420ca5
+--- /dev/null
++++ b/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_sh_mask.h
+@@ -0,0 +1,2948 @@
++/*
++ * Copyright (C) 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
++ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++#ifndef _sdma1_4_2_0_SH_MASK_HEADER
++#define _sdma1_4_2_0_SH_MASK_HEADER
++
++
++// addressBlock: sdma1_sdma1dec
++//SDMA1_UCODE_ADDR
++#define SDMA1_UCODE_ADDR__VALUE__SHIFT 0x0
++#define SDMA1_UCODE_ADDR__VALUE_MASK 0x00001FFFL
++//SDMA1_UCODE_DATA
++#define SDMA1_UCODE_DATA__VALUE__SHIFT 0x0
++#define SDMA1_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
++//SDMA1_VM_CNTL
++#define SDMA1_VM_CNTL__CMD__SHIFT 0x0
++#define SDMA1_VM_CNTL__CMD_MASK 0x0000000FL
++//SDMA1_VM_CTX_LO
++#define SDMA1_VM_CTX_LO__ADDR__SHIFT 0x2
++#define SDMA1_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_VM_CTX_HI
++#define SDMA1_VM_CTX_HI__ADDR__SHIFT 0x0
++#define SDMA1_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_ACTIVE_FCN_ID
++#define SDMA1_ACTIVE_FCN_ID__VFID__SHIFT 0x0
++#define SDMA1_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
++#define SDMA1_ACTIVE_FCN_ID__VF__SHIFT 0x1f
++#define SDMA1_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
++#define SDMA1_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
++#define SDMA1_ACTIVE_FCN_ID__VF_MASK 0x80000000L
++//SDMA1_VM_CTX_CNTL
++#define SDMA1_VM_CTX_CNTL__PRIV__SHIFT 0x0
++#define SDMA1_VM_CTX_CNTL__VMID__SHIFT 0x4
++#define SDMA1_VM_CTX_CNTL__PRIV_MASK 0x00000001L
++#define SDMA1_VM_CTX_CNTL__VMID_MASK 0x000000F0L
++//SDMA1_VIRT_RESET_REQ
++#define SDMA1_VIRT_RESET_REQ__VF__SHIFT 0x0
++#define SDMA1_VIRT_RESET_REQ__PF__SHIFT 0x1f
++#define SDMA1_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
++#define SDMA1_VIRT_RESET_REQ__PF_MASK 0x80000000L
++//SDMA1_VF_ENABLE
++#define SDMA1_VF_ENABLE__VF_ENABLE__SHIFT 0x0
++#define SDMA1_VF_ENABLE__VF_ENABLE_MASK 0x00000001L
++//SDMA1_CONTEXT_REG_TYPE0
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_CNTL__SHIFT 0x0
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE__SHIFT 0x1
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE_HI__SHIFT 0x2
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR__SHIFT 0x3
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_HI__SHIFT 0x4
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR__SHIFT 0x5
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_HI__SHIFT 0x6
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_CNTL__SHIFT 0xa
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_RPTR__SHIFT 0xb
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_OFFSET__SHIFT 0xc
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_LO__SHIFT 0xd
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_HI__SHIFT 0xe
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_SIZE__SHIFT 0xf
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_SKIP_CNTL__SHIFT 0x10
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_STATUS__SHIFT 0x11
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_DOORBELL__SHIFT 0x12
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_CNTL__SHIFT 0x13
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_CNTL_MASK 0x00000001L
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE_MASK 0x00000002L
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE_HI_MASK 0x00000004L
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_MASK 0x00000008L
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_HI_MASK 0x00000010L
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_MASK 0x00000020L
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_HI_MASK 0x00000040L
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_CNTL_MASK 0x00000400L
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_RPTR_MASK 0x00000800L
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_OFFSET_MASK 0x00001000L
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_LO_MASK 0x00002000L
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_HI_MASK 0x00004000L
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_SIZE_MASK 0x00008000L
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_SKIP_CNTL_MASK 0x00010000L
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_STATUS_MASK 0x00020000L
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_DOORBELL_MASK 0x00040000L
++#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_CNTL_MASK 0x00080000L
++//SDMA1_CONTEXT_REG_TYPE1
++#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_STATUS__SHIFT 0x8
++#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_LOG__SHIFT 0x9
++#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_WATERMARK__SHIFT 0xa
++#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_OFFSET__SHIFT 0xb
++#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_LO__SHIFT 0xc
++#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_HI__SHIFT 0xd
++#define SDMA1_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe
++#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_IB_SUB_REMAIN__SHIFT 0xf
++#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_PREEMPT__SHIFT 0x10
++#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DUMMY_REG__SHIFT 0x11
++#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12
++#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13
++#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_AQL_CNTL__SHIFT 0x14
++#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_MINOR_PTR_UPDATE__SHIFT 0x15
++#define SDMA1_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16
++#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_STATUS_MASK 0x00000100L
++#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_LOG_MASK 0x00000200L
++#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_WATERMARK_MASK 0x00000400L
++#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_OFFSET_MASK 0x00000800L
++#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_LO_MASK 0x00001000L
++#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_HI_MASK 0x00002000L
++#define SDMA1_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L
++#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_IB_SUB_REMAIN_MASK 0x00008000L
++#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_PREEMPT_MASK 0x00010000L
++#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DUMMY_REG_MASK 0x00020000L
++#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L
++#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L
++#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_AQL_CNTL_MASK 0x00100000L
++#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L
++#define SDMA1_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L
++//SDMA1_CONTEXT_REG_TYPE2
++#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA0__SHIFT 0x0
++#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA1__SHIFT 0x1
++#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA2__SHIFT 0x2
++#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA3__SHIFT 0x3
++#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA4__SHIFT 0x4
++#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA5__SHIFT 0x5
++#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA6__SHIFT 0x6
++#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA7__SHIFT 0x7
++#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA8__SHIFT 0x8
++#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_CNTL__SHIFT 0x9
++#define SDMA1_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa
++#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA0_MASK 0x00000001L
++#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA1_MASK 0x00000002L
++#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA2_MASK 0x00000004L
++#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA3_MASK 0x00000008L
++#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA4_MASK 0x00000010L
++#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA5_MASK 0x00000020L
++#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA6_MASK 0x00000040L
++#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA7_MASK 0x00000080L
++#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA8_MASK 0x00000100L
++#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_CNTL_MASK 0x00000200L
++#define SDMA1_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L
++//SDMA1_CONTEXT_REG_TYPE3
++#define SDMA1_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0
++#define SDMA1_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL
++//SDMA1_PUB_REG_TYPE0
++#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_ADDR__SHIFT 0x0
++#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_DATA__SHIFT 0x1
++#define SDMA1_PUB_REG_TYPE0__SDMA1_REGISTER_SECURITY_CNTL__SHIFT 0x2
++#define SDMA1_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3
++#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CNTL__SHIFT 0x4
++#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_LO__SHIFT 0x5
++#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_HI__SHIFT 0x6
++#define SDMA1_PUB_REG_TYPE0__SDMA1_ACTIVE_FCN_ID__SHIFT 0x7
++#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_CNTL__SHIFT 0x8
++#define SDMA1_PUB_REG_TYPE0__SDMA1_VIRT_RESET_REQ__SHIFT 0x9
++#define SDMA1_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa
++#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE0__SHIFT 0xb
++#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE1__SHIFT 0xc
++#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE2__SHIFT 0xd
++#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE3__SHIFT 0xe
++#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE0__SHIFT 0xf
++#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE1__SHIFT 0x10
++#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE2__SHIFT 0x11
++#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE3__SHIFT 0x12
++#define SDMA1_PUB_REG_TYPE0__SDMA1_MMHUB_CNTL__SHIFT 0x13
++#define SDMA1_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x14
++#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19
++#define SDMA1_PUB_REG_TYPE0__SDMA1_POWER_CNTL__SHIFT 0x1a
++#define SDMA1_PUB_REG_TYPE0__SDMA1_CLK_CTRL__SHIFT 0x1b
++#define SDMA1_PUB_REG_TYPE0__SDMA1_CNTL__SHIFT 0x1c
++#define SDMA1_PUB_REG_TYPE0__SDMA1_CHICKEN_BITS__SHIFT 0x1d
++#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG__SHIFT 0x1e
++#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_READ__SHIFT 0x1f
++#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_ADDR_MASK 0x00000001L
++#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_DATA_MASK 0x00000002L
++#define SDMA1_PUB_REG_TYPE0__SDMA1_REGISTER_SECURITY_CNTL_MASK 0x00000004L
++#define SDMA1_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L
++#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CNTL_MASK 0x00000010L
++#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_LO_MASK 0x00000020L
++#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_HI_MASK 0x00000040L
++#define SDMA1_PUB_REG_TYPE0__SDMA1_ACTIVE_FCN_ID_MASK 0x00000080L
++#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_CNTL_MASK 0x00000100L
++#define SDMA1_PUB_REG_TYPE0__SDMA1_VIRT_RESET_REQ_MASK 0x00000200L
++#define SDMA1_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L
++#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE0_MASK 0x00000800L
++#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE1_MASK 0x00001000L
++#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE2_MASK 0x00002000L
++#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE3_MASK 0x00004000L
++#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE0_MASK 0x00008000L
++#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE1_MASK 0x00010000L
++#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE2_MASK 0x00020000L
++#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE3_MASK 0x00040000L
++#define SDMA1_PUB_REG_TYPE0__SDMA1_MMHUB_CNTL_MASK 0x00080000L
++#define SDMA1_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01F00000L
++#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L
++#define SDMA1_PUB_REG_TYPE0__SDMA1_POWER_CNTL_MASK 0x04000000L
++#define SDMA1_PUB_REG_TYPE0__SDMA1_CLK_CTRL_MASK 0x08000000L
++#define SDMA1_PUB_REG_TYPE0__SDMA1_CNTL_MASK 0x10000000L
++#define SDMA1_PUB_REG_TYPE0__SDMA1_CHICKEN_BITS_MASK 0x20000000L
++#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_MASK 0x40000000L
++#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_READ_MASK 0x80000000L
++//SDMA1_PUB_REG_TYPE1
++#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_HI__SHIFT 0x0
++#define SDMA1_PUB_REG_TYPE1__SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1
++#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH__SHIFT 0x2
++#define SDMA1_PUB_REG_TYPE1__SDMA1_IB_OFFSET_FETCH__SHIFT 0x3
++#define SDMA1_PUB_REG_TYPE1__SDMA1_PROGRAM__SHIFT 0x4
++#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS_REG__SHIFT 0x5
++#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS1_REG__SHIFT 0x6
++#define SDMA1_PUB_REG_TYPE1__SDMA1_RD_BURST_CNTL__SHIFT 0x7
++#define SDMA1_PUB_REG_TYPE1__SDMA1_HBM_PAGE_CONFIG__SHIFT 0x8
++#define SDMA1_PUB_REG_TYPE1__SDMA1_UCODE_CHECKSUM__SHIFT 0x9
++#define SDMA1_PUB_REG_TYPE1__SDMA1_F32_CNTL__SHIFT 0xa
++#define SDMA1_PUB_REG_TYPE1__SDMA1_FREEZE__SHIFT 0xb
++#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE0_QUANTUM__SHIFT 0xc
++#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE1_QUANTUM__SHIFT 0xd
++#define SDMA1_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe
++#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf
++#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10
++#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11
++#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_CONFIG__SHIFT 0x12
++#define SDMA1_PUB_REG_TYPE1__SDMA1_BA_THRESHOLD__SHIFT 0x13
++#define SDMA1_PUB_REG_TYPE1__SDMA1_ID__SHIFT 0x14
++#define SDMA1_PUB_REG_TYPE1__SDMA1_VERSION__SHIFT 0x15
++#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER__SHIFT 0x16
++#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_CLEAR__SHIFT 0x17
++#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS2_REG__SHIFT 0x18
++#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_CNTL__SHIFT 0x19
++#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_LO__SHIFT 0x1a
++#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_HI__SHIFT 0x1b
++#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_CNTL__SHIFT 0x1c
++#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WATERMK__SHIFT 0x1d
++#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_RD_STATUS__SHIFT 0x1e
++#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WR_STATUS__SHIFT 0x1f
++#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_HI_MASK 0x00000001L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_MASK 0x00000004L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_IB_OFFSET_FETCH_MASK 0x00000008L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_PROGRAM_MASK 0x00000010L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS_REG_MASK 0x00000020L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS1_REG_MASK 0x00000040L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_RD_BURST_CNTL_MASK 0x00000080L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_HBM_PAGE_CONFIG_MASK 0x00000100L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_UCODE_CHECKSUM_MASK 0x00000200L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_F32_CNTL_MASK 0x00000400L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_FREEZE_MASK 0x00000800L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE0_QUANTUM_MASK 0x00001000L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE1_QUANTUM_MASK 0x00002000L
++#define SDMA1_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L
++#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L
++#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L
++#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_CONFIG_MASK 0x00040000L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_BA_THRESHOLD_MASK 0x00080000L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_ID_MASK 0x00100000L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_VERSION_MASK 0x00200000L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_MASK 0x00400000L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_CLEAR_MASK 0x00800000L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS2_REG_MASK 0x01000000L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_CNTL_MASK 0x02000000L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_LO_MASK 0x04000000L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_HI_MASK 0x08000000L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_CNTL_MASK 0x10000000L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WATERMK_MASK 0x20000000L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_RD_STATUS_MASK 0x40000000L
++#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WR_STATUS_MASK 0x80000000L
++//SDMA1_PUB_REG_TYPE2
++#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV0__SHIFT 0x0
++#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV1__SHIFT 0x1
++#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV2__SHIFT 0x2
++#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK0__SHIFT 0x3
++#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK1__SHIFT 0x4
++#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK0__SHIFT 0x5
++#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK1__SHIFT 0x6
++#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_TIMEOUT__SHIFT 0x7
++#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_PAGE__SHIFT 0x8
++#define SDMA1_PUB_REG_TYPE2__SDMA1_POWER_CNTL_IDLE__SHIFT 0x9
++#define SDMA1_PUB_REG_TYPE2__SDMA1_RELAX_ORDERING_LUT__SHIFT 0xa
++#define SDMA1_PUB_REG_TYPE2__SDMA1_CHICKEN_BITS_2__SHIFT 0xb
++#define SDMA1_PUB_REG_TYPE2__SDMA1_STATUS3_REG__SHIFT 0xc
++#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_LO__SHIFT 0xd
++#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_HI__SHIFT 0xe
++#define SDMA1_PUB_REG_TYPE2__SDMA1_PHASE2_QUANTUM__SHIFT 0xf
++#define SDMA1_PUB_REG_TYPE2__SDMA1_ERROR_LOG__SHIFT 0x10
++#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG0__SHIFT 0x11
++#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG1__SHIFT 0x12
++#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG2__SHIFT 0x13
++#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG3__SHIFT 0x14
++#define SDMA1_PUB_REG_TYPE2__SDMA1_F32_COUNTER__SHIFT 0x15
++#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFMON_CNTL__SHIFT 0x17
++#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER0_RESULT__SHIFT 0x18
++#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER1_RESULT__SHIFT 0x19
++#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a
++#define SDMA1_PUB_REG_TYPE2__SDMA1_CRD_CNTL__SHIFT 0x1b
++#define SDMA1_PUB_REG_TYPE2__SDMA1_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d
++#define SDMA1_PUB_REG_TYPE2__SDMA1_ULV_CNTL__SHIFT 0x1e
++#define SDMA1_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f
++#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV0_MASK 0x00000001L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV1_MASK 0x00000002L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV2_MASK 0x00000004L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK0_MASK 0x00000008L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK1_MASK 0x00000010L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK0_MASK 0x00000020L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK1_MASK 0x00000040L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_TIMEOUT_MASK 0x00000080L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_PAGE_MASK 0x00000100L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_POWER_CNTL_IDLE_MASK 0x00000200L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_RELAX_ORDERING_LUT_MASK 0x00000400L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_CHICKEN_BITS_2_MASK 0x00000800L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_STATUS3_REG_MASK 0x00001000L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_LO_MASK 0x00002000L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_HI_MASK 0x00004000L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_PHASE2_QUANTUM_MASK 0x00008000L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_ERROR_LOG_MASK 0x00010000L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG0_MASK 0x00020000L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG1_MASK 0x00040000L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG2_MASK 0x00080000L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG3_MASK 0x00100000L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_F32_COUNTER_MASK 0x00200000L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFMON_CNTL_MASK 0x00800000L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER0_RESULT_MASK 0x01000000L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER1_RESULT_MASK 0x02000000L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_CRD_CNTL_MASK 0x08000000L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L
++#define SDMA1_PUB_REG_TYPE2__SDMA1_ULV_CNTL_MASK 0x40000000L
++#define SDMA1_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L
++//SDMA1_PUB_REG_TYPE3
++#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_DATA__SHIFT 0x0
++#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_INDEX__SHIFT 0x1
++#define SDMA1_PUB_REG_TYPE3__RESERVED__SHIFT 0x2
++#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_DATA_MASK 0x00000001L
++#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_INDEX_MASK 0x00000002L
++#define SDMA1_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFFCL
++//SDMA1_MMHUB_CNTL
++#define SDMA1_MMHUB_CNTL__UNIT_ID__SHIFT 0x0
++#define SDMA1_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL
++//SDMA1_CONTEXT_GROUP_BOUNDARY
++#define SDMA1_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0
++#define SDMA1_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL
++//SDMA1_POWER_CNTL
++#define SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8
++#define SDMA1_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9
++#define SDMA1_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa
++#define SDMA1_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb
++#define SDMA1_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc
++#define SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L
++#define SDMA1_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L
++#define SDMA1_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L
++#define SDMA1_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L
++#define SDMA1_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L
++//SDMA1_CLK_CTRL
++#define SDMA1_CLK_CTRL__ON_DELAY__SHIFT 0x0
++#define SDMA1_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define SDMA1_CLK_CTRL__RESERVED__SHIFT 0xc
++#define SDMA1_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
++#define SDMA1_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
++#define SDMA1_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
++#define SDMA1_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
++#define SDMA1_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
++#define SDMA1_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
++#define SDMA1_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
++#define SDMA1_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
++#define SDMA1_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define SDMA1_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define SDMA1_CLK_CTRL__RESERVED_MASK 0x00FFF000L
++#define SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
++#define SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
++#define SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
++#define SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
++#define SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
++#define SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
++#define SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
++#define SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
++//SDMA1_CNTL
++#define SDMA1_CNTL__TRAP_ENABLE__SHIFT 0x0
++#define SDMA1_CNTL__UTC_L1_ENABLE__SHIFT 0x1
++#define SDMA1_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
++#define SDMA1_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
++#define SDMA1_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
++#define SDMA1_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
++#define SDMA1_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
++#define SDMA1_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12
++#define SDMA1_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c
++#define SDMA1_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d
++#define SDMA1_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e
++#define SDMA1_CNTL__TRAP_ENABLE_MASK 0x00000001L
++#define SDMA1_CNTL__UTC_L1_ENABLE_MASK 0x00000002L
++#define SDMA1_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
++#define SDMA1_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
++#define SDMA1_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
++#define SDMA1_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
++#define SDMA1_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
++#define SDMA1_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L
++#define SDMA1_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
++#define SDMA1_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L
++#define SDMA1_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L
++//SDMA1_CHICKEN_BITS
++#define SDMA1_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0
++#define SDMA1_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
++#define SDMA1_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
++#define SDMA1_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8
++#define SDMA1_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa
++#define SDMA1_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
++#define SDMA1_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
++#define SDMA1_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14
++#define SDMA1_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17
++#define SDMA1_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19
++#define SDMA1_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a
++#define SDMA1_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c
++#define SDMA1_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e
++#define SDMA1_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L
++#define SDMA1_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
++#define SDMA1_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
++#define SDMA1_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L
++#define SDMA1_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L
++#define SDMA1_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
++#define SDMA1_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
++#define SDMA1_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L
++#define SDMA1_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L
++#define SDMA1_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L
++#define SDMA1_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L
++#define SDMA1_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L
++#define SDMA1_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L
++//SDMA1_GB_ADDR_CONFIG
++#define SDMA1_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
++#define SDMA1_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
++#define SDMA1_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8
++#define SDMA1_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
++#define SDMA1_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
++#define SDMA1_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
++#define SDMA1_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
++#define SDMA1_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
++#define SDMA1_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
++#define SDMA1_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
++//SDMA1_GB_ADDR_CONFIG_READ
++#define SDMA1_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
++#define SDMA1_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
++#define SDMA1_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8
++#define SDMA1_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc
++#define SDMA1_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
++#define SDMA1_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
++#define SDMA1_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
++#define SDMA1_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
++#define SDMA1_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L
++#define SDMA1_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
++//SDMA1_RB_RPTR_FETCH_HI
++#define SDMA1_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
++#define SDMA1_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_SEM_WAIT_FAIL_TIMER_CNTL
++#define SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
++#define SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
++//SDMA1_RB_RPTR_FETCH
++#define SDMA1_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
++#define SDMA1_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
++//SDMA1_IB_OFFSET_FETCH
++#define SDMA1_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
++#define SDMA1_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
++//SDMA1_PROGRAM
++#define SDMA1_PROGRAM__STREAM__SHIFT 0x0
++#define SDMA1_PROGRAM__STREAM_MASK 0xFFFFFFFFL
++//SDMA1_STATUS_REG
++#define SDMA1_STATUS_REG__IDLE__SHIFT 0x0
++#define SDMA1_STATUS_REG__REG_IDLE__SHIFT 0x1
++#define SDMA1_STATUS_REG__RB_EMPTY__SHIFT 0x2
++#define SDMA1_STATUS_REG__RB_FULL__SHIFT 0x3
++#define SDMA1_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
++#define SDMA1_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
++#define SDMA1_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
++#define SDMA1_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
++#define SDMA1_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
++#define SDMA1_STATUS_REG__INSIDE_IB__SHIFT 0x9
++#define SDMA1_STATUS_REG__EX_IDLE__SHIFT 0xa
++#define SDMA1_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb
++#define SDMA1_STATUS_REG__PACKET_READY__SHIFT 0xc
++#define SDMA1_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
++#define SDMA1_STATUS_REG__SRBM_IDLE__SHIFT 0xe
++#define SDMA1_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
++#define SDMA1_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
++#define SDMA1_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
++#define SDMA1_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
++#define SDMA1_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
++#define SDMA1_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
++#define SDMA1_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
++#define SDMA1_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
++#define SDMA1_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
++#define SDMA1_STATUS_REG__SEM_IDLE__SHIFT 0x1a
++#define SDMA1_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
++#define SDMA1_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
++#define SDMA1_STATUS_REG__INT_IDLE__SHIFT 0x1e
++#define SDMA1_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
++#define SDMA1_STATUS_REG__IDLE_MASK 0x00000001L
++#define SDMA1_STATUS_REG__REG_IDLE_MASK 0x00000002L
++#define SDMA1_STATUS_REG__RB_EMPTY_MASK 0x00000004L
++#define SDMA1_STATUS_REG__RB_FULL_MASK 0x00000008L
++#define SDMA1_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
++#define SDMA1_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
++#define SDMA1_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
++#define SDMA1_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
++#define SDMA1_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
++#define SDMA1_STATUS_REG__INSIDE_IB_MASK 0x00000200L
++#define SDMA1_STATUS_REG__EX_IDLE_MASK 0x00000400L
++#define SDMA1_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L
++#define SDMA1_STATUS_REG__PACKET_READY_MASK 0x00001000L
++#define SDMA1_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
++#define SDMA1_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
++#define SDMA1_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
++#define SDMA1_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
++#define SDMA1_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
++#define SDMA1_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
++#define SDMA1_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
++#define SDMA1_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
++#define SDMA1_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
++#define SDMA1_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
++#define SDMA1_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
++#define SDMA1_STATUS_REG__SEM_IDLE_MASK 0x04000000L
++#define SDMA1_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
++#define SDMA1_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
++#define SDMA1_STATUS_REG__INT_IDLE_MASK 0x40000000L
++#define SDMA1_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
++//SDMA1_STATUS1_REG
++#define SDMA1_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
++#define SDMA1_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
++#define SDMA1_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
++#define SDMA1_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
++#define SDMA1_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
++#define SDMA1_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
++#define SDMA1_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
++#define SDMA1_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
++#define SDMA1_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
++#define SDMA1_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd
++#define SDMA1_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe
++#define SDMA1_STATUS1_REG__EX_START__SHIFT 0xf
++#define SDMA1_STATUS1_REG__CE_RD_STALL__SHIFT 0x11
++#define SDMA1_STATUS1_REG__CE_WR_STALL__SHIFT 0x12
++#define SDMA1_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
++#define SDMA1_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
++#define SDMA1_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
++#define SDMA1_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
++#define SDMA1_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
++#define SDMA1_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
++#define SDMA1_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
++#define SDMA1_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
++#define SDMA1_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
++#define SDMA1_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L
++#define SDMA1_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L
++#define SDMA1_STATUS1_REG__EX_START_MASK 0x00008000L
++#define SDMA1_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L
++#define SDMA1_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L
++//SDMA1_RD_BURST_CNTL
++#define SDMA1_RD_BURST_CNTL__RD_BURST__SHIFT 0x0
++#define SDMA1_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2
++#define SDMA1_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L
++#define SDMA1_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL
++//SDMA1_HBM_PAGE_CONFIG
++#define SDMA1_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
++#define SDMA1_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000001L
++//SDMA1_UCODE_CHECKSUM
++#define SDMA1_UCODE_CHECKSUM__DATA__SHIFT 0x0
++#define SDMA1_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
++//SDMA1_F32_CNTL
++#define SDMA1_F32_CNTL__HALT__SHIFT 0x0
++#define SDMA1_F32_CNTL__STEP__SHIFT 0x1
++#define SDMA1_F32_CNTL__HALT_MASK 0x00000001L
++#define SDMA1_F32_CNTL__STEP_MASK 0x00000002L
++//SDMA1_FREEZE
++#define SDMA1_FREEZE__PREEMPT__SHIFT 0x0
++#define SDMA1_FREEZE__FREEZE__SHIFT 0x4
++#define SDMA1_FREEZE__FROZEN__SHIFT 0x5
++#define SDMA1_FREEZE__F32_FREEZE__SHIFT 0x6
++#define SDMA1_FREEZE__PREEMPT_MASK 0x00000001L
++#define SDMA1_FREEZE__FREEZE_MASK 0x00000010L
++#define SDMA1_FREEZE__FROZEN_MASK 0x00000020L
++#define SDMA1_FREEZE__F32_FREEZE_MASK 0x00000040L
++//SDMA1_PHASE0_QUANTUM
++#define SDMA1_PHASE0_QUANTUM__UNIT__SHIFT 0x0
++#define SDMA1_PHASE0_QUANTUM__VALUE__SHIFT 0x8
++#define SDMA1_PHASE0_QUANTUM__PREFER__SHIFT 0x1e
++#define SDMA1_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL
++#define SDMA1_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L
++#define SDMA1_PHASE0_QUANTUM__PREFER_MASK 0x40000000L
++//SDMA1_PHASE1_QUANTUM
++#define SDMA1_PHASE1_QUANTUM__UNIT__SHIFT 0x0
++#define SDMA1_PHASE1_QUANTUM__VALUE__SHIFT 0x8
++#define SDMA1_PHASE1_QUANTUM__PREFER__SHIFT 0x1e
++#define SDMA1_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL
++#define SDMA1_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L
++#define SDMA1_PHASE1_QUANTUM__PREFER_MASK 0x40000000L
++//SDMA1_EDC_CONFIG
++#define SDMA1_EDC_CONFIG__DIS_EDC__SHIFT 0x1
++#define SDMA1_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2
++#define SDMA1_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
++#define SDMA1_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L
++//SDMA1_BA_THRESHOLD
++#define SDMA1_BA_THRESHOLD__READ_THRES__SHIFT 0x0
++#define SDMA1_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
++#define SDMA1_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
++#define SDMA1_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
++//SDMA1_ID
++#define SDMA1_ID__DEVICE_ID__SHIFT 0x0
++#define SDMA1_ID__DEVICE_ID_MASK 0x000000FFL
++//SDMA1_VERSION
++#define SDMA1_VERSION__MINVER__SHIFT 0x0
++#define SDMA1_VERSION__MAJVER__SHIFT 0x8
++#define SDMA1_VERSION__REV__SHIFT 0x10
++#define SDMA1_VERSION__MINVER_MASK 0x0000007FL
++#define SDMA1_VERSION__MAJVER_MASK 0x00007F00L
++#define SDMA1_VERSION__REV_MASK 0x003F0000L
++//SDMA1_EDC_COUNTER
++#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_SED__SHIFT 0x0
++#define SDMA1_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2
++#define SDMA1_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3
++#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4
++#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5
++#define SDMA1_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED__SHIFT 0xf
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED__SHIFT 0x10
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED__SHIFT 0x11
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED__SHIFT 0x12
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED__SHIFT 0x13
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED__SHIFT 0x14
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED__SHIFT 0x15
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED__SHIFT 0x16
++#define SDMA1_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0x17
++#define SDMA1_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x18
++#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_SED_MASK 0x00000001L
++#define SDMA1_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L
++#define SDMA1_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L
++#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L
++#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L
++#define SDMA1_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED_MASK 0x00008000L
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED_MASK 0x00010000L
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED_MASK 0x00020000L
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED_MASK 0x00040000L
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED_MASK 0x00080000L
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED_MASK 0x00100000L
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED_MASK 0x00200000L
++#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED_MASK 0x00400000L
++#define SDMA1_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00800000L
++#define SDMA1_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x01000000L
++//SDMA1_EDC_COUNTER_CLEAR
++#define SDMA1_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0
++#define SDMA1_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L
++//SDMA1_STATUS2_REG
++#define SDMA1_STATUS2_REG__ID__SHIFT 0x0
++#define SDMA1_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x2
++#define SDMA1_STATUS2_REG__CMD_OP__SHIFT 0x10
++#define SDMA1_STATUS2_REG__ID_MASK 0x00000003L
++#define SDMA1_STATUS2_REG__F32_INSTR_PTR_MASK 0x00000FFCL
++#define SDMA1_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
++//SDMA1_ATOMIC_CNTL
++#define SDMA1_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
++#define SDMA1_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f
++#define SDMA1_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
++#define SDMA1_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L
++//SDMA1_ATOMIC_PREOP_LO
++#define SDMA1_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
++#define SDMA1_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
++//SDMA1_ATOMIC_PREOP_HI
++#define SDMA1_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
++#define SDMA1_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
++//SDMA1_UTCL1_CNTL
++#define SDMA1_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0
++#define SDMA1_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1
++#define SDMA1_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb
++#define SDMA1_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe
++#define SDMA1_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
++#define SDMA1_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d
++#define SDMA1_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L
++#define SDMA1_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL
++#define SDMA1_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L
++#define SDMA1_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L
++#define SDMA1_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L
++#define SDMA1_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L
++//SDMA1_UTCL1_WATERMK
++#define SDMA1_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0
++#define SDMA1_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0x9
++#define SDMA1_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x11
++#define SDMA1_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x19
++#define SDMA1_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000001FFL
++#define SDMA1_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0001FE00L
++#define SDMA1_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x01FE0000L
++#define SDMA1_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFE000000L
++//SDMA1_UTCL1_RD_STATUS
++#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
++#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
++#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
++#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
++#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
++#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
++#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
++#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
++#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
++#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
++#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
++#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
++#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
++#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
++#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
++#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
++#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
++#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
++#define SDMA1_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12
++#define SDMA1_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13
++#define SDMA1_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14
++#define SDMA1_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15
++#define SDMA1_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16
++#define SDMA1_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a
++#define SDMA1_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d
++#define SDMA1_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e
++#define SDMA1_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f
++#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
++#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
++#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
++#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
++#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
++#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
++#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
++#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
++#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
++#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
++#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
++#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
++#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
++#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
++#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
++#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
++#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
++#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
++#define SDMA1_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L
++#define SDMA1_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L
++#define SDMA1_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L
++#define SDMA1_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L
++#define SDMA1_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L
++#define SDMA1_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L
++#define SDMA1_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L
++#define SDMA1_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L
++#define SDMA1_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L
++//SDMA1_UTCL1_WR_STATUS
++#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
++#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
++#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
++#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
++#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
++#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
++#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
++#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
++#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
++#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
++#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
++#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
++#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
++#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
++#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
++#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
++#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
++#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
++#define SDMA1_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12
++#define SDMA1_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13
++#define SDMA1_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14
++#define SDMA1_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15
++#define SDMA1_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16
++#define SDMA1_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19
++#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c
++#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d
++#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e
++#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f
++#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
++#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
++#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
++#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
++#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
++#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
++#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
++#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
++#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
++#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
++#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
++#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
++#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
++#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
++#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
++#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
++#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
++#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
++#define SDMA1_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L
++#define SDMA1_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L
++#define SDMA1_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L
++#define SDMA1_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L
++#define SDMA1_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L
++#define SDMA1_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L
++#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L
++#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L
++#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L
++#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L
++//SDMA1_UTCL1_INV0
++#define SDMA1_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0
++#define SDMA1_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1
++#define SDMA1_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2
++#define SDMA1_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3
++#define SDMA1_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4
++#define SDMA1_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5
++#define SDMA1_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6
++#define SDMA1_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7
++#define SDMA1_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8
++#define SDMA1_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9
++#define SDMA1_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa
++#define SDMA1_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb
++#define SDMA1_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc
++#define SDMA1_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c
++#define SDMA1_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L
++#define SDMA1_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L
++#define SDMA1_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L
++#define SDMA1_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L
++#define SDMA1_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L
++#define SDMA1_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L
++#define SDMA1_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L
++#define SDMA1_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L
++#define SDMA1_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L
++#define SDMA1_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L
++#define SDMA1_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L
++#define SDMA1_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L
++#define SDMA1_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L
++#define SDMA1_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L
++//SDMA1_UTCL1_INV1
++#define SDMA1_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
++#define SDMA1_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
++//SDMA1_UTCL1_INV2
++#define SDMA1_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0
++#define SDMA1_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL
++//SDMA1_UTCL1_RD_XNACK0
++#define SDMA1_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
++#define SDMA1_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
++//SDMA1_UTCL1_RD_XNACK1
++#define SDMA1_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
++#define SDMA1_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4
++#define SDMA1_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8
++#define SDMA1_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a
++#define SDMA1_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
++#define SDMA1_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L
++#define SDMA1_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
++#define SDMA1_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L
++//SDMA1_UTCL1_WR_XNACK0
++#define SDMA1_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
++#define SDMA1_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
++//SDMA1_UTCL1_WR_XNACK1
++#define SDMA1_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
++#define SDMA1_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4
++#define SDMA1_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8
++#define SDMA1_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a
++#define SDMA1_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
++#define SDMA1_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L
++#define SDMA1_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
++#define SDMA1_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L
++//SDMA1_UTCL1_TIMEOUT
++#define SDMA1_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0
++#define SDMA1_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10
++#define SDMA1_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL
++#define SDMA1_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L
++//SDMA1_UTCL1_PAGE
++#define SDMA1_UTCL1_PAGE__VM_HOLE__SHIFT 0x0
++#define SDMA1_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
++#define SDMA1_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
++#define SDMA1_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9
++#define SDMA1_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L
++#define SDMA1_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
++#define SDMA1_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L
++#define SDMA1_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L
++//SDMA1_POWER_CNTL_IDLE
++#define SDMA1_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0
++#define SDMA1_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10
++#define SDMA1_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18
++#define SDMA1_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL
++#define SDMA1_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L
++#define SDMA1_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L
++//SDMA1_RELAX_ORDERING_LUT
++#define SDMA1_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
++#define SDMA1_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
++#define SDMA1_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
++#define SDMA1_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
++#define SDMA1_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
++#define SDMA1_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
++#define SDMA1_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
++#define SDMA1_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
++#define SDMA1_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
++#define SDMA1_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
++#define SDMA1_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
++#define SDMA1_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
++#define SDMA1_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
++#define SDMA1_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
++#define SDMA1_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
++#define SDMA1_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
++#define SDMA1_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
++#define SDMA1_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
++#define SDMA1_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
++#define SDMA1_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
++#define SDMA1_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
++#define SDMA1_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
++#define SDMA1_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
++#define SDMA1_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
++#define SDMA1_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
++#define SDMA1_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
++#define SDMA1_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
++#define SDMA1_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
++#define SDMA1_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
++#define SDMA1_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
++#define SDMA1_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
++#define SDMA1_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
++#define SDMA1_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
++#define SDMA1_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
++#define SDMA1_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
++#define SDMA1_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
++#define SDMA1_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
++#define SDMA1_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
++//SDMA1_CHICKEN_BITS_2
++#define SDMA1_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
++#define SDMA1_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
++//SDMA1_STATUS3_REG
++#define SDMA1_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
++#define SDMA1_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
++#define SDMA1_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
++#define SDMA1_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15
++#define SDMA1_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16
++#define SDMA1_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
++#define SDMA1_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
++#define SDMA1_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
++#define SDMA1_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L
++#define SDMA1_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L
++//SDMA1_PHYSICAL_ADDR_LO
++#define SDMA1_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
++#define SDMA1_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
++#define SDMA1_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
++#define SDMA1_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
++#define SDMA1_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
++#define SDMA1_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
++#define SDMA1_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
++#define SDMA1_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
++//SDMA1_PHYSICAL_ADDR_HI
++#define SDMA1_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
++//SDMA1_PHASE2_QUANTUM
++#define SDMA1_PHASE2_QUANTUM__UNIT__SHIFT 0x0
++#define SDMA1_PHASE2_QUANTUM__VALUE__SHIFT 0x8
++#define SDMA1_PHASE2_QUANTUM__PREFER__SHIFT 0x1e
++#define SDMA1_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL
++#define SDMA1_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L
++#define SDMA1_PHASE2_QUANTUM__PREFER_MASK 0x40000000L
++//SDMA1_ERROR_LOG
++#define SDMA1_ERROR_LOG__OVERRIDE__SHIFT 0x0
++#define SDMA1_ERROR_LOG__STATUS__SHIFT 0x10
++#define SDMA1_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
++#define SDMA1_ERROR_LOG__STATUS_MASK 0xFFFF0000L
++//SDMA1_PUB_DUMMY_REG0
++#define SDMA1_PUB_DUMMY_REG0__VALUE__SHIFT 0x0
++#define SDMA1_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL
++//SDMA1_PUB_DUMMY_REG1
++#define SDMA1_PUB_DUMMY_REG1__VALUE__SHIFT 0x0
++#define SDMA1_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL
++//SDMA1_PUB_DUMMY_REG2
++#define SDMA1_PUB_DUMMY_REG2__VALUE__SHIFT 0x0
++#define SDMA1_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL
++//SDMA1_PUB_DUMMY_REG3
++#define SDMA1_PUB_DUMMY_REG3__VALUE__SHIFT 0x0
++#define SDMA1_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL
++//SDMA1_F32_COUNTER
++#define SDMA1_F32_COUNTER__VALUE__SHIFT 0x0
++#define SDMA1_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
++//SDMA1_PERFMON_CNTL
++#define SDMA1_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0
++#define SDMA1_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1
++#define SDMA1_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
++#define SDMA1_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa
++#define SDMA1_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb
++#define SDMA1_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc
++#define SDMA1_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L
++#define SDMA1_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L
++#define SDMA1_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL
++#define SDMA1_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L
++#define SDMA1_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L
++#define SDMA1_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L
++//SDMA1_PERFCOUNTER0_RESULT
++#define SDMA1_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
++#define SDMA1_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
++//SDMA1_PERFCOUNTER1_RESULT
++#define SDMA1_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
++#define SDMA1_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
++//SDMA1_PERFCOUNTER_TAG_DELAY_RANGE
++#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0
++#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe
++#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c
++#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL
++#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L
++#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L
++//SDMA1_CRD_CNTL
++#define SDMA1_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
++#define SDMA1_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
++#define SDMA1_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
++#define SDMA1_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
++//SDMA1_GPU_IOV_VIOLATION_LOG
++#define SDMA1_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
++#define SDMA1_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
++#define SDMA1_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
++#define SDMA1_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x12
++#define SDMA1_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13
++#define SDMA1_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x14
++#define SDMA1_GPU_IOV_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x18
++#define SDMA1_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
++#define SDMA1_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
++#define SDMA1_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL
++#define SDMA1_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00040000L
++#define SDMA1_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L
++#define SDMA1_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x00F00000L
++#define SDMA1_GPU_IOV_VIOLATION_LOG__INITIATOR_ID_MASK 0xFF000000L
++//SDMA1_ULV_CNTL
++#define SDMA1_ULV_CNTL__HYSTERESIS__SHIFT 0x0
++#define SDMA1_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b
++#define SDMA1_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c
++#define SDMA1_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d
++#define SDMA1_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e
++#define SDMA1_ULV_CNTL__ULV_STATUS__SHIFT 0x1f
++#define SDMA1_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL
++#define SDMA1_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L
++#define SDMA1_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L
++#define SDMA1_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L
++#define SDMA1_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L
++#define SDMA1_ULV_CNTL__ULV_STATUS_MASK 0x80000000L
++//SDMA1_EA_DBIT_ADDR_DATA
++#define SDMA1_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
++#define SDMA1_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
++//SDMA1_EA_DBIT_ADDR_INDEX
++#define SDMA1_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
++#define SDMA1_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
++//SDMA1_GFX_RB_CNTL
++#define SDMA1_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0
++#define SDMA1_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1
++#define SDMA1_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
++#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
++#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
++#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
++#define SDMA1_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17
++#define SDMA1_GFX_RB_CNTL__RB_VMID__SHIFT 0x18
++#define SDMA1_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L
++#define SDMA1_GFX_RB_CNTL__RB_SIZE_MASK 0x0000003EL
++#define SDMA1_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
++#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
++#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
++#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
++#define SDMA1_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L
++#define SDMA1_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L
++//SDMA1_GFX_RB_BASE
++#define SDMA1_GFX_RB_BASE__ADDR__SHIFT 0x0
++#define SDMA1_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_GFX_RB_BASE_HI
++#define SDMA1_GFX_RB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA1_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
++//SDMA1_GFX_RB_RPTR
++#define SDMA1_GFX_RB_RPTR__OFFSET__SHIFT 0x0
++#define SDMA1_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_GFX_RB_RPTR_HI
++#define SDMA1_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA1_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_GFX_RB_WPTR
++#define SDMA1_GFX_RB_WPTR__OFFSET__SHIFT 0x0
++#define SDMA1_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_GFX_RB_WPTR_HI
++#define SDMA1_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA1_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_GFX_RB_WPTR_POLL_CNTL
++#define SDMA1_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
++#define SDMA1_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
++#define SDMA1_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
++#define SDMA1_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
++#define SDMA1_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
++#define SDMA1_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
++#define SDMA1_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
++#define SDMA1_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
++#define SDMA1_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
++#define SDMA1_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
++//SDMA1_GFX_RB_RPTR_ADDR_HI
++#define SDMA1_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_GFX_RB_RPTR_ADDR_LO
++#define SDMA1_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
++#define SDMA1_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
++#define SDMA1_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_GFX_IB_CNTL
++#define SDMA1_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0
++#define SDMA1_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
++#define SDMA1_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
++#define SDMA1_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10
++#define SDMA1_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L
++#define SDMA1_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
++#define SDMA1_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
++#define SDMA1_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L
++//SDMA1_GFX_IB_RPTR
++#define SDMA1_GFX_IB_RPTR__OFFSET__SHIFT 0x2
++#define SDMA1_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL
++//SDMA1_GFX_IB_OFFSET
++#define SDMA1_GFX_IB_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA1_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
++//SDMA1_GFX_IB_BASE_LO
++#define SDMA1_GFX_IB_BASE_LO__ADDR__SHIFT 0x5
++#define SDMA1_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
++//SDMA1_GFX_IB_BASE_HI
++#define SDMA1_GFX_IB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA1_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_GFX_IB_SIZE
++#define SDMA1_GFX_IB_SIZE__SIZE__SHIFT 0x0
++#define SDMA1_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL
++//SDMA1_GFX_SKIP_CNTL
++#define SDMA1_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
++#define SDMA1_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
++//SDMA1_GFX_CONTEXT_STATUS
++#define SDMA1_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0
++#define SDMA1_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2
++#define SDMA1_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
++#define SDMA1_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
++#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
++#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
++#define SDMA1_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
++#define SDMA1_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
++#define SDMA1_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
++#define SDMA1_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L
++#define SDMA1_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
++#define SDMA1_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
++#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
++#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
++#define SDMA1_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
++#define SDMA1_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
++//SDMA1_GFX_DOORBELL
++#define SDMA1_GFX_DOORBELL__ENABLE__SHIFT 0x1c
++#define SDMA1_GFX_DOORBELL__CAPTURED__SHIFT 0x1e
++#define SDMA1_GFX_DOORBELL__ENABLE_MASK 0x10000000L
++#define SDMA1_GFX_DOORBELL__CAPTURED_MASK 0x40000000L
++//SDMA1_GFX_CONTEXT_CNTL
++#define SDMA1_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10
++#define SDMA1_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L
++//SDMA1_GFX_STATUS
++#define SDMA1_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
++#define SDMA1_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
++#define SDMA1_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
++#define SDMA1_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
++//SDMA1_GFX_DOORBELL_LOG
++#define SDMA1_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
++#define SDMA1_GFX_DOORBELL_LOG__DATA__SHIFT 0x2
++#define SDMA1_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
++#define SDMA1_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
++//SDMA1_GFX_WATERMARK
++#define SDMA1_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
++#define SDMA1_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
++#define SDMA1_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
++#define SDMA1_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
++//SDMA1_GFX_DOORBELL_OFFSET
++#define SDMA1_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA1_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
++//SDMA1_GFX_CSA_ADDR_LO
++#define SDMA1_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_GFX_CSA_ADDR_HI
++#define SDMA1_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_GFX_IB_SUB_REMAIN
++#define SDMA1_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0
++#define SDMA1_GFX_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
++//SDMA1_GFX_PREEMPT
++#define SDMA1_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0
++#define SDMA1_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L
++//SDMA1_GFX_DUMMY_REG
++#define SDMA1_GFX_DUMMY_REG__DUMMY__SHIFT 0x0
++#define SDMA1_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
++//SDMA1_GFX_RB_WPTR_POLL_ADDR_HI
++#define SDMA1_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_GFX_RB_WPTR_POLL_ADDR_LO
++#define SDMA1_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_GFX_RB_AQL_CNTL
++#define SDMA1_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
++#define SDMA1_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
++#define SDMA1_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
++#define SDMA1_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
++#define SDMA1_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
++#define SDMA1_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
++//SDMA1_GFX_MINOR_PTR_UPDATE
++#define SDMA1_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
++#define SDMA1_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
++//SDMA1_GFX_MIDCMD_DATA0
++#define SDMA1_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0
++#define SDMA1_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
++//SDMA1_GFX_MIDCMD_DATA1
++#define SDMA1_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0
++#define SDMA1_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
++//SDMA1_GFX_MIDCMD_DATA2
++#define SDMA1_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0
++#define SDMA1_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
++//SDMA1_GFX_MIDCMD_DATA3
++#define SDMA1_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0
++#define SDMA1_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
++//SDMA1_GFX_MIDCMD_DATA4
++#define SDMA1_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0
++#define SDMA1_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
++//SDMA1_GFX_MIDCMD_DATA5
++#define SDMA1_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0
++#define SDMA1_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
++//SDMA1_GFX_MIDCMD_DATA6
++#define SDMA1_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0
++#define SDMA1_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
++//SDMA1_GFX_MIDCMD_DATA7
++#define SDMA1_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0
++#define SDMA1_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
++//SDMA1_GFX_MIDCMD_DATA8
++#define SDMA1_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0
++#define SDMA1_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
++//SDMA1_GFX_MIDCMD_CNTL
++#define SDMA1_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
++#define SDMA1_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
++#define SDMA1_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
++#define SDMA1_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
++#define SDMA1_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
++#define SDMA1_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
++#define SDMA1_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
++#define SDMA1_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
++//SDMA1_PAGE_RB_CNTL
++#define SDMA1_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0
++#define SDMA1_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1
++#define SDMA1_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
++#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
++#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
++#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
++#define SDMA1_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17
++#define SDMA1_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18
++#define SDMA1_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L
++#define SDMA1_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000003EL
++#define SDMA1_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
++#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
++#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
++#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
++#define SDMA1_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L
++#define SDMA1_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L
++//SDMA1_PAGE_RB_BASE
++#define SDMA1_PAGE_RB_BASE__ADDR__SHIFT 0x0
++#define SDMA1_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_PAGE_RB_BASE_HI
++#define SDMA1_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA1_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
++//SDMA1_PAGE_RB_RPTR
++#define SDMA1_PAGE_RB_RPTR__OFFSET__SHIFT 0x0
++#define SDMA1_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_PAGE_RB_RPTR_HI
++#define SDMA1_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA1_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_PAGE_RB_WPTR
++#define SDMA1_PAGE_RB_WPTR__OFFSET__SHIFT 0x0
++#define SDMA1_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_PAGE_RB_WPTR_HI
++#define SDMA1_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA1_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_PAGE_RB_WPTR_POLL_CNTL
++#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
++#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
++#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
++#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
++#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
++#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
++#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
++#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
++#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
++#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
++//SDMA1_PAGE_RB_RPTR_ADDR_HI
++#define SDMA1_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_PAGE_RB_RPTR_ADDR_LO
++#define SDMA1_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
++#define SDMA1_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
++#define SDMA1_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_PAGE_IB_CNTL
++#define SDMA1_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0
++#define SDMA1_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
++#define SDMA1_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
++#define SDMA1_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10
++#define SDMA1_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L
++#define SDMA1_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
++#define SDMA1_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
++#define SDMA1_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L
++//SDMA1_PAGE_IB_RPTR
++#define SDMA1_PAGE_IB_RPTR__OFFSET__SHIFT 0x2
++#define SDMA1_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL
++//SDMA1_PAGE_IB_OFFSET
++#define SDMA1_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA1_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
++//SDMA1_PAGE_IB_BASE_LO
++#define SDMA1_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5
++#define SDMA1_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
++//SDMA1_PAGE_IB_BASE_HI
++#define SDMA1_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA1_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_PAGE_IB_SIZE
++#define SDMA1_PAGE_IB_SIZE__SIZE__SHIFT 0x0
++#define SDMA1_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL
++//SDMA1_PAGE_SKIP_CNTL
++#define SDMA1_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
++#define SDMA1_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
++//SDMA1_PAGE_CONTEXT_STATUS
++#define SDMA1_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0
++#define SDMA1_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2
++#define SDMA1_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
++#define SDMA1_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
++#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
++#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
++#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
++#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
++#define SDMA1_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
++#define SDMA1_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L
++#define SDMA1_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
++#define SDMA1_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
++#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
++#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
++#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
++#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
++//SDMA1_PAGE_DOORBELL
++#define SDMA1_PAGE_DOORBELL__ENABLE__SHIFT 0x1c
++#define SDMA1_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e
++#define SDMA1_PAGE_DOORBELL__ENABLE_MASK 0x10000000L
++#define SDMA1_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L
++//SDMA1_PAGE_STATUS
++#define SDMA1_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
++#define SDMA1_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
++#define SDMA1_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
++#define SDMA1_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
++//SDMA1_PAGE_DOORBELL_LOG
++#define SDMA1_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
++#define SDMA1_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2
++#define SDMA1_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
++#define SDMA1_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
++//SDMA1_PAGE_WATERMARK
++#define SDMA1_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
++#define SDMA1_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
++#define SDMA1_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
++#define SDMA1_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
++//SDMA1_PAGE_DOORBELL_OFFSET
++#define SDMA1_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA1_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
++//SDMA1_PAGE_CSA_ADDR_LO
++#define SDMA1_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_PAGE_CSA_ADDR_HI
++#define SDMA1_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_PAGE_IB_SUB_REMAIN
++#define SDMA1_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0
++#define SDMA1_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
++//SDMA1_PAGE_PREEMPT
++#define SDMA1_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0
++#define SDMA1_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L
++//SDMA1_PAGE_DUMMY_REG
++#define SDMA1_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0
++#define SDMA1_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
++//SDMA1_PAGE_RB_WPTR_POLL_ADDR_HI
++#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_PAGE_RB_WPTR_POLL_ADDR_LO
++#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_PAGE_RB_AQL_CNTL
++#define SDMA1_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
++#define SDMA1_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
++#define SDMA1_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
++#define SDMA1_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
++#define SDMA1_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
++#define SDMA1_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
++//SDMA1_PAGE_MINOR_PTR_UPDATE
++#define SDMA1_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
++#define SDMA1_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
++//SDMA1_PAGE_MIDCMD_DATA0
++#define SDMA1_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0
++#define SDMA1_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
++//SDMA1_PAGE_MIDCMD_DATA1
++#define SDMA1_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0
++#define SDMA1_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
++//SDMA1_PAGE_MIDCMD_DATA2
++#define SDMA1_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0
++#define SDMA1_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
++//SDMA1_PAGE_MIDCMD_DATA3
++#define SDMA1_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0
++#define SDMA1_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
++//SDMA1_PAGE_MIDCMD_DATA4
++#define SDMA1_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0
++#define SDMA1_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
++//SDMA1_PAGE_MIDCMD_DATA5
++#define SDMA1_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0
++#define SDMA1_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
++//SDMA1_PAGE_MIDCMD_DATA6
++#define SDMA1_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0
++#define SDMA1_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
++//SDMA1_PAGE_MIDCMD_DATA7
++#define SDMA1_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0
++#define SDMA1_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
++//SDMA1_PAGE_MIDCMD_DATA8
++#define SDMA1_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0
++#define SDMA1_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
++//SDMA1_PAGE_MIDCMD_CNTL
++#define SDMA1_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
++#define SDMA1_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
++#define SDMA1_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
++#define SDMA1_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
++#define SDMA1_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
++#define SDMA1_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
++#define SDMA1_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
++#define SDMA1_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
++//SDMA1_RLC0_RB_CNTL
++#define SDMA1_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0
++#define SDMA1_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1
++#define SDMA1_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
++#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
++#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
++#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
++#define SDMA1_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17
++#define SDMA1_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18
++#define SDMA1_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
++#define SDMA1_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000003EL
++#define SDMA1_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
++#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
++#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
++#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
++#define SDMA1_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L
++#define SDMA1_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L
++//SDMA1_RLC0_RB_BASE
++#define SDMA1_RLC0_RB_BASE__ADDR__SHIFT 0x0
++#define SDMA1_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC0_RB_BASE_HI
++#define SDMA1_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
++//SDMA1_RLC0_RB_RPTR
++#define SDMA1_RLC0_RB_RPTR__OFFSET__SHIFT 0x0
++#define SDMA1_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC0_RB_RPTR_HI
++#define SDMA1_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA1_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC0_RB_WPTR
++#define SDMA1_RLC0_RB_WPTR__OFFSET__SHIFT 0x0
++#define SDMA1_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC0_RB_WPTR_HI
++#define SDMA1_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA1_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC0_RB_WPTR_POLL_CNTL
++#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
++#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
++#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
++#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
++#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
++#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
++#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
++#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
++#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
++#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
++//SDMA1_RLC0_RB_RPTR_ADDR_HI
++#define SDMA1_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC0_RB_RPTR_ADDR_LO
++#define SDMA1_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
++#define SDMA1_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
++#define SDMA1_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_RLC0_IB_CNTL
++#define SDMA1_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0
++#define SDMA1_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
++#define SDMA1_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
++#define SDMA1_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10
++#define SDMA1_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
++#define SDMA1_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
++#define SDMA1_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
++#define SDMA1_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
++//SDMA1_RLC0_IB_RPTR
++#define SDMA1_RLC0_IB_RPTR__OFFSET__SHIFT 0x2
++#define SDMA1_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
++//SDMA1_RLC0_IB_OFFSET
++#define SDMA1_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA1_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
++//SDMA1_RLC0_IB_BASE_LO
++#define SDMA1_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5
++#define SDMA1_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
++//SDMA1_RLC0_IB_BASE_HI
++#define SDMA1_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC0_IB_SIZE
++#define SDMA1_RLC0_IB_SIZE__SIZE__SHIFT 0x0
++#define SDMA1_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL
++//SDMA1_RLC0_SKIP_CNTL
++#define SDMA1_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
++#define SDMA1_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
++//SDMA1_RLC0_CONTEXT_STATUS
++#define SDMA1_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
++#define SDMA1_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2
++#define SDMA1_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
++#define SDMA1_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
++#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
++#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
++#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
++#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
++#define SDMA1_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
++#define SDMA1_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
++#define SDMA1_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
++#define SDMA1_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
++#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
++#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
++#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
++#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
++//SDMA1_RLC0_DOORBELL
++#define SDMA1_RLC0_DOORBELL__ENABLE__SHIFT 0x1c
++#define SDMA1_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e
++#define SDMA1_RLC0_DOORBELL__ENABLE_MASK 0x10000000L
++#define SDMA1_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L
++//SDMA1_RLC0_STATUS
++#define SDMA1_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
++#define SDMA1_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
++#define SDMA1_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
++#define SDMA1_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
++//SDMA1_RLC0_DOORBELL_LOG
++#define SDMA1_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
++#define SDMA1_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2
++#define SDMA1_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
++#define SDMA1_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
++//SDMA1_RLC0_WATERMARK
++#define SDMA1_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
++#define SDMA1_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
++#define SDMA1_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
++#define SDMA1_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
++//SDMA1_RLC0_DOORBELL_OFFSET
++#define SDMA1_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA1_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
++//SDMA1_RLC0_CSA_ADDR_LO
++#define SDMA1_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_RLC0_CSA_ADDR_HI
++#define SDMA1_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC0_IB_SUB_REMAIN
++#define SDMA1_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
++#define SDMA1_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
++//SDMA1_RLC0_PREEMPT
++#define SDMA1_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0
++#define SDMA1_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
++//SDMA1_RLC0_DUMMY_REG
++#define SDMA1_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0
++#define SDMA1_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
++//SDMA1_RLC0_RB_WPTR_POLL_ADDR_HI
++#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC0_RB_WPTR_POLL_ADDR_LO
++#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_RLC0_RB_AQL_CNTL
++#define SDMA1_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
++#define SDMA1_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
++#define SDMA1_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
++#define SDMA1_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
++#define SDMA1_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
++#define SDMA1_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
++//SDMA1_RLC0_MINOR_PTR_UPDATE
++#define SDMA1_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
++#define SDMA1_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
++//SDMA1_RLC0_MIDCMD_DATA0
++#define SDMA1_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0
++#define SDMA1_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
++//SDMA1_RLC0_MIDCMD_DATA1
++#define SDMA1_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0
++#define SDMA1_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
++//SDMA1_RLC0_MIDCMD_DATA2
++#define SDMA1_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0
++#define SDMA1_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
++//SDMA1_RLC0_MIDCMD_DATA3
++#define SDMA1_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0
++#define SDMA1_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
++//SDMA1_RLC0_MIDCMD_DATA4
++#define SDMA1_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0
++#define SDMA1_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
++//SDMA1_RLC0_MIDCMD_DATA5
++#define SDMA1_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0
++#define SDMA1_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
++//SDMA1_RLC0_MIDCMD_DATA6
++#define SDMA1_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0
++#define SDMA1_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
++//SDMA1_RLC0_MIDCMD_DATA7
++#define SDMA1_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0
++#define SDMA1_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
++//SDMA1_RLC0_MIDCMD_DATA8
++#define SDMA1_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0
++#define SDMA1_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
++//SDMA1_RLC0_MIDCMD_CNTL
++#define SDMA1_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
++#define SDMA1_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
++#define SDMA1_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
++#define SDMA1_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
++#define SDMA1_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
++#define SDMA1_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
++#define SDMA1_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
++#define SDMA1_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
++//SDMA1_RLC1_RB_CNTL
++#define SDMA1_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0
++#define SDMA1_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1
++#define SDMA1_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
++#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
++#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
++#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
++#define SDMA1_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17
++#define SDMA1_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18
++#define SDMA1_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
++#define SDMA1_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000003EL
++#define SDMA1_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
++#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
++#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
++#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
++#define SDMA1_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L
++#define SDMA1_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L
++//SDMA1_RLC1_RB_BASE
++#define SDMA1_RLC1_RB_BASE__ADDR__SHIFT 0x0
++#define SDMA1_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC1_RB_BASE_HI
++#define SDMA1_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
++//SDMA1_RLC1_RB_RPTR
++#define SDMA1_RLC1_RB_RPTR__OFFSET__SHIFT 0x0
++#define SDMA1_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC1_RB_RPTR_HI
++#define SDMA1_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA1_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC1_RB_WPTR
++#define SDMA1_RLC1_RB_WPTR__OFFSET__SHIFT 0x0
++#define SDMA1_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC1_RB_WPTR_HI
++#define SDMA1_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA1_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC1_RB_WPTR_POLL_CNTL
++#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
++#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
++#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
++#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
++#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
++#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
++#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
++#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
++#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
++#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
++//SDMA1_RLC1_RB_RPTR_ADDR_HI
++#define SDMA1_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC1_RB_RPTR_ADDR_LO
++#define SDMA1_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
++#define SDMA1_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
++#define SDMA1_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_RLC1_IB_CNTL
++#define SDMA1_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0
++#define SDMA1_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
++#define SDMA1_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
++#define SDMA1_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10
++#define SDMA1_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
++#define SDMA1_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
++#define SDMA1_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
++#define SDMA1_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
++//SDMA1_RLC1_IB_RPTR
++#define SDMA1_RLC1_IB_RPTR__OFFSET__SHIFT 0x2
++#define SDMA1_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
++//SDMA1_RLC1_IB_OFFSET
++#define SDMA1_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA1_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
++//SDMA1_RLC1_IB_BASE_LO
++#define SDMA1_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5
++#define SDMA1_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
++//SDMA1_RLC1_IB_BASE_HI
++#define SDMA1_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC1_IB_SIZE
++#define SDMA1_RLC1_IB_SIZE__SIZE__SHIFT 0x0
++#define SDMA1_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL
++//SDMA1_RLC1_SKIP_CNTL
++#define SDMA1_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
++#define SDMA1_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
++//SDMA1_RLC1_CONTEXT_STATUS
++#define SDMA1_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
++#define SDMA1_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2
++#define SDMA1_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
++#define SDMA1_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
++#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
++#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
++#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
++#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
++#define SDMA1_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
++#define SDMA1_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
++#define SDMA1_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
++#define SDMA1_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
++#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
++#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
++#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
++#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
++//SDMA1_RLC1_DOORBELL
++#define SDMA1_RLC1_DOORBELL__ENABLE__SHIFT 0x1c
++#define SDMA1_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e
++#define SDMA1_RLC1_DOORBELL__ENABLE_MASK 0x10000000L
++#define SDMA1_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L
++//SDMA1_RLC1_STATUS
++#define SDMA1_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
++#define SDMA1_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
++#define SDMA1_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
++#define SDMA1_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
++//SDMA1_RLC1_DOORBELL_LOG
++#define SDMA1_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
++#define SDMA1_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2
++#define SDMA1_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
++#define SDMA1_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
++//SDMA1_RLC1_WATERMARK
++#define SDMA1_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
++#define SDMA1_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
++#define SDMA1_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
++#define SDMA1_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
++//SDMA1_RLC1_DOORBELL_OFFSET
++#define SDMA1_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA1_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
++//SDMA1_RLC1_CSA_ADDR_LO
++#define SDMA1_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_RLC1_CSA_ADDR_HI
++#define SDMA1_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC1_IB_SUB_REMAIN
++#define SDMA1_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
++#define SDMA1_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
++//SDMA1_RLC1_PREEMPT
++#define SDMA1_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0
++#define SDMA1_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
++//SDMA1_RLC1_DUMMY_REG
++#define SDMA1_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0
++#define SDMA1_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
++//SDMA1_RLC1_RB_WPTR_POLL_ADDR_HI
++#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC1_RB_WPTR_POLL_ADDR_LO
++#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_RLC1_RB_AQL_CNTL
++#define SDMA1_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
++#define SDMA1_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
++#define SDMA1_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
++#define SDMA1_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
++#define SDMA1_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
++#define SDMA1_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
++//SDMA1_RLC1_MINOR_PTR_UPDATE
++#define SDMA1_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
++#define SDMA1_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
++//SDMA1_RLC1_MIDCMD_DATA0
++#define SDMA1_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0
++#define SDMA1_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
++//SDMA1_RLC1_MIDCMD_DATA1
++#define SDMA1_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0
++#define SDMA1_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
++//SDMA1_RLC1_MIDCMD_DATA2
++#define SDMA1_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0
++#define SDMA1_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
++//SDMA1_RLC1_MIDCMD_DATA3
++#define SDMA1_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0
++#define SDMA1_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
++//SDMA1_RLC1_MIDCMD_DATA4
++#define SDMA1_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0
++#define SDMA1_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
++//SDMA1_RLC1_MIDCMD_DATA5
++#define SDMA1_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0
++#define SDMA1_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
++//SDMA1_RLC1_MIDCMD_DATA6
++#define SDMA1_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0
++#define SDMA1_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
++//SDMA1_RLC1_MIDCMD_DATA7
++#define SDMA1_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0
++#define SDMA1_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
++//SDMA1_RLC1_MIDCMD_DATA8
++#define SDMA1_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0
++#define SDMA1_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
++//SDMA1_RLC1_MIDCMD_CNTL
++#define SDMA1_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
++#define SDMA1_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
++#define SDMA1_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
++#define SDMA1_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
++#define SDMA1_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
++#define SDMA1_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
++#define SDMA1_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
++#define SDMA1_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
++//SDMA1_RLC2_RB_CNTL
++#define SDMA1_RLC2_RB_CNTL__RB_ENABLE__SHIFT 0x0
++#define SDMA1_RLC2_RB_CNTL__RB_SIZE__SHIFT 0x1
++#define SDMA1_RLC2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
++#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
++#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
++#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
++#define SDMA1_RLC2_RB_CNTL__RB_PRIV__SHIFT 0x17
++#define SDMA1_RLC2_RB_CNTL__RB_VMID__SHIFT 0x18
++#define SDMA1_RLC2_RB_CNTL__RB_ENABLE_MASK 0x00000001L
++#define SDMA1_RLC2_RB_CNTL__RB_SIZE_MASK 0x0000003EL
++#define SDMA1_RLC2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
++#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
++#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
++#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
++#define SDMA1_RLC2_RB_CNTL__RB_PRIV_MASK 0x00800000L
++#define SDMA1_RLC2_RB_CNTL__RB_VMID_MASK 0x0F000000L
++//SDMA1_RLC2_RB_BASE
++#define SDMA1_RLC2_RB_BASE__ADDR__SHIFT 0x0
++#define SDMA1_RLC2_RB_BASE__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC2_RB_BASE_HI
++#define SDMA1_RLC2_RB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
++//SDMA1_RLC2_RB_RPTR
++#define SDMA1_RLC2_RB_RPTR__OFFSET__SHIFT 0x0
++#define SDMA1_RLC2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC2_RB_RPTR_HI
++#define SDMA1_RLC2_RB_RPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA1_RLC2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC2_RB_WPTR
++#define SDMA1_RLC2_RB_WPTR__OFFSET__SHIFT 0x0
++#define SDMA1_RLC2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC2_RB_WPTR_HI
++#define SDMA1_RLC2_RB_WPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA1_RLC2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC2_RB_WPTR_POLL_CNTL
++#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
++#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
++#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
++#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
++#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
++#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
++#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
++#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
++#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
++#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
++//SDMA1_RLC2_RB_RPTR_ADDR_HI
++#define SDMA1_RLC2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC2_RB_RPTR_ADDR_LO
++#define SDMA1_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
++#define SDMA1_RLC2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
++#define SDMA1_RLC2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_RLC2_IB_CNTL
++#define SDMA1_RLC2_IB_CNTL__IB_ENABLE__SHIFT 0x0
++#define SDMA1_RLC2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
++#define SDMA1_RLC2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
++#define SDMA1_RLC2_IB_CNTL__CMD_VMID__SHIFT 0x10
++#define SDMA1_RLC2_IB_CNTL__IB_ENABLE_MASK 0x00000001L
++#define SDMA1_RLC2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
++#define SDMA1_RLC2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
++#define SDMA1_RLC2_IB_CNTL__CMD_VMID_MASK 0x000F0000L
++//SDMA1_RLC2_IB_RPTR
++#define SDMA1_RLC2_IB_RPTR__OFFSET__SHIFT 0x2
++#define SDMA1_RLC2_IB_RPTR__OFFSET_MASK 0x003FFFFCL
++//SDMA1_RLC2_IB_OFFSET
++#define SDMA1_RLC2_IB_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA1_RLC2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
++//SDMA1_RLC2_IB_BASE_LO
++#define SDMA1_RLC2_IB_BASE_LO__ADDR__SHIFT 0x5
++#define SDMA1_RLC2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
++//SDMA1_RLC2_IB_BASE_HI
++#define SDMA1_RLC2_IB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC2_IB_SIZE
++#define SDMA1_RLC2_IB_SIZE__SIZE__SHIFT 0x0
++#define SDMA1_RLC2_IB_SIZE__SIZE_MASK 0x000FFFFFL
++//SDMA1_RLC2_SKIP_CNTL
++#define SDMA1_RLC2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
++#define SDMA1_RLC2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
++//SDMA1_RLC2_CONTEXT_STATUS
++#define SDMA1_RLC2_CONTEXT_STATUS__SELECTED__SHIFT 0x0
++#define SDMA1_RLC2_CONTEXT_STATUS__IDLE__SHIFT 0x2
++#define SDMA1_RLC2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
++#define SDMA1_RLC2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
++#define SDMA1_RLC2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
++#define SDMA1_RLC2_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
++#define SDMA1_RLC2_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
++#define SDMA1_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
++#define SDMA1_RLC2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
++#define SDMA1_RLC2_CONTEXT_STATUS__IDLE_MASK 0x00000004L
++#define SDMA1_RLC2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
++#define SDMA1_RLC2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
++#define SDMA1_RLC2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
++#define SDMA1_RLC2_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
++#define SDMA1_RLC2_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
++#define SDMA1_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
++//SDMA1_RLC2_DOORBELL
++#define SDMA1_RLC2_DOORBELL__ENABLE__SHIFT 0x1c
++#define SDMA1_RLC2_DOORBELL__CAPTURED__SHIFT 0x1e
++#define SDMA1_RLC2_DOORBELL__ENABLE_MASK 0x10000000L
++#define SDMA1_RLC2_DOORBELL__CAPTURED_MASK 0x40000000L
++//SDMA1_RLC2_STATUS
++#define SDMA1_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
++#define SDMA1_RLC2_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
++#define SDMA1_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
++#define SDMA1_RLC2_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
++//SDMA1_RLC2_DOORBELL_LOG
++#define SDMA1_RLC2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
++#define SDMA1_RLC2_DOORBELL_LOG__DATA__SHIFT 0x2
++#define SDMA1_RLC2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
++#define SDMA1_RLC2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
++//SDMA1_RLC2_WATERMARK
++#define SDMA1_RLC2_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
++#define SDMA1_RLC2_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
++#define SDMA1_RLC2_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
++#define SDMA1_RLC2_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
++//SDMA1_RLC2_DOORBELL_OFFSET
++#define SDMA1_RLC2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA1_RLC2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
++//SDMA1_RLC2_CSA_ADDR_LO
++#define SDMA1_RLC2_CSA_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_RLC2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_RLC2_CSA_ADDR_HI
++#define SDMA1_RLC2_CSA_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC2_IB_SUB_REMAIN
++#define SDMA1_RLC2_IB_SUB_REMAIN__SIZE__SHIFT 0x0
++#define SDMA1_RLC2_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
++//SDMA1_RLC2_PREEMPT
++#define SDMA1_RLC2_PREEMPT__IB_PREEMPT__SHIFT 0x0
++#define SDMA1_RLC2_PREEMPT__IB_PREEMPT_MASK 0x00000001L
++//SDMA1_RLC2_DUMMY_REG
++#define SDMA1_RLC2_DUMMY_REG__DUMMY__SHIFT 0x0
++#define SDMA1_RLC2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
++//SDMA1_RLC2_RB_WPTR_POLL_ADDR_HI
++#define SDMA1_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC2_RB_WPTR_POLL_ADDR_LO
++#define SDMA1_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_RLC2_RB_AQL_CNTL
++#define SDMA1_RLC2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
++#define SDMA1_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
++#define SDMA1_RLC2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
++#define SDMA1_RLC2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
++#define SDMA1_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
++#define SDMA1_RLC2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
++//SDMA1_RLC2_MINOR_PTR_UPDATE
++#define SDMA1_RLC2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
++#define SDMA1_RLC2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
++//SDMA1_RLC2_MIDCMD_DATA0
++#define SDMA1_RLC2_MIDCMD_DATA0__DATA0__SHIFT 0x0
++#define SDMA1_RLC2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
++//SDMA1_RLC2_MIDCMD_DATA1
++#define SDMA1_RLC2_MIDCMD_DATA1__DATA1__SHIFT 0x0
++#define SDMA1_RLC2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
++//SDMA1_RLC2_MIDCMD_DATA2
++#define SDMA1_RLC2_MIDCMD_DATA2__DATA2__SHIFT 0x0
++#define SDMA1_RLC2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
++//SDMA1_RLC2_MIDCMD_DATA3
++#define SDMA1_RLC2_MIDCMD_DATA3__DATA3__SHIFT 0x0
++#define SDMA1_RLC2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
++//SDMA1_RLC2_MIDCMD_DATA4
++#define SDMA1_RLC2_MIDCMD_DATA4__DATA4__SHIFT 0x0
++#define SDMA1_RLC2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
++//SDMA1_RLC2_MIDCMD_DATA5
++#define SDMA1_RLC2_MIDCMD_DATA5__DATA5__SHIFT 0x0
++#define SDMA1_RLC2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
++//SDMA1_RLC2_MIDCMD_DATA6
++#define SDMA1_RLC2_MIDCMD_DATA6__DATA6__SHIFT 0x0
++#define SDMA1_RLC2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
++//SDMA1_RLC2_MIDCMD_DATA7
++#define SDMA1_RLC2_MIDCMD_DATA7__DATA7__SHIFT 0x0
++#define SDMA1_RLC2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
++//SDMA1_RLC2_MIDCMD_DATA8
++#define SDMA1_RLC2_MIDCMD_DATA8__DATA8__SHIFT 0x0
++#define SDMA1_RLC2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
++//SDMA1_RLC2_MIDCMD_CNTL
++#define SDMA1_RLC2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
++#define SDMA1_RLC2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
++#define SDMA1_RLC2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
++#define SDMA1_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
++#define SDMA1_RLC2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
++#define SDMA1_RLC2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
++#define SDMA1_RLC2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
++#define SDMA1_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
++//SDMA1_RLC3_RB_CNTL
++#define SDMA1_RLC3_RB_CNTL__RB_ENABLE__SHIFT 0x0
++#define SDMA1_RLC3_RB_CNTL__RB_SIZE__SHIFT 0x1
++#define SDMA1_RLC3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
++#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
++#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
++#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
++#define SDMA1_RLC3_RB_CNTL__RB_PRIV__SHIFT 0x17
++#define SDMA1_RLC3_RB_CNTL__RB_VMID__SHIFT 0x18
++#define SDMA1_RLC3_RB_CNTL__RB_ENABLE_MASK 0x00000001L
++#define SDMA1_RLC3_RB_CNTL__RB_SIZE_MASK 0x0000003EL
++#define SDMA1_RLC3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
++#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
++#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
++#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
++#define SDMA1_RLC3_RB_CNTL__RB_PRIV_MASK 0x00800000L
++#define SDMA1_RLC3_RB_CNTL__RB_VMID_MASK 0x0F000000L
++//SDMA1_RLC3_RB_BASE
++#define SDMA1_RLC3_RB_BASE__ADDR__SHIFT 0x0
++#define SDMA1_RLC3_RB_BASE__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC3_RB_BASE_HI
++#define SDMA1_RLC3_RB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
++//SDMA1_RLC3_RB_RPTR
++#define SDMA1_RLC3_RB_RPTR__OFFSET__SHIFT 0x0
++#define SDMA1_RLC3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC3_RB_RPTR_HI
++#define SDMA1_RLC3_RB_RPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA1_RLC3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC3_RB_WPTR
++#define SDMA1_RLC3_RB_WPTR__OFFSET__SHIFT 0x0
++#define SDMA1_RLC3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC3_RB_WPTR_HI
++#define SDMA1_RLC3_RB_WPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA1_RLC3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC3_RB_WPTR_POLL_CNTL
++#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
++#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
++#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
++#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
++#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
++#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
++#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
++#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
++#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
++#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
++//SDMA1_RLC3_RB_RPTR_ADDR_HI
++#define SDMA1_RLC3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC3_RB_RPTR_ADDR_LO
++#define SDMA1_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
++#define SDMA1_RLC3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
++#define SDMA1_RLC3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_RLC3_IB_CNTL
++#define SDMA1_RLC3_IB_CNTL__IB_ENABLE__SHIFT 0x0
++#define SDMA1_RLC3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
++#define SDMA1_RLC3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
++#define SDMA1_RLC3_IB_CNTL__CMD_VMID__SHIFT 0x10
++#define SDMA1_RLC3_IB_CNTL__IB_ENABLE_MASK 0x00000001L
++#define SDMA1_RLC3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
++#define SDMA1_RLC3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
++#define SDMA1_RLC3_IB_CNTL__CMD_VMID_MASK 0x000F0000L
++//SDMA1_RLC3_IB_RPTR
++#define SDMA1_RLC3_IB_RPTR__OFFSET__SHIFT 0x2
++#define SDMA1_RLC3_IB_RPTR__OFFSET_MASK 0x003FFFFCL
++//SDMA1_RLC3_IB_OFFSET
++#define SDMA1_RLC3_IB_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA1_RLC3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
++//SDMA1_RLC3_IB_BASE_LO
++#define SDMA1_RLC3_IB_BASE_LO__ADDR__SHIFT 0x5
++#define SDMA1_RLC3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
++//SDMA1_RLC3_IB_BASE_HI
++#define SDMA1_RLC3_IB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC3_IB_SIZE
++#define SDMA1_RLC3_IB_SIZE__SIZE__SHIFT 0x0
++#define SDMA1_RLC3_IB_SIZE__SIZE_MASK 0x000FFFFFL
++//SDMA1_RLC3_SKIP_CNTL
++#define SDMA1_RLC3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
++#define SDMA1_RLC3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
++//SDMA1_RLC3_CONTEXT_STATUS
++#define SDMA1_RLC3_CONTEXT_STATUS__SELECTED__SHIFT 0x0
++#define SDMA1_RLC3_CONTEXT_STATUS__IDLE__SHIFT 0x2
++#define SDMA1_RLC3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
++#define SDMA1_RLC3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
++#define SDMA1_RLC3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
++#define SDMA1_RLC3_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
++#define SDMA1_RLC3_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
++#define SDMA1_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
++#define SDMA1_RLC3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
++#define SDMA1_RLC3_CONTEXT_STATUS__IDLE_MASK 0x00000004L
++#define SDMA1_RLC3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
++#define SDMA1_RLC3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
++#define SDMA1_RLC3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
++#define SDMA1_RLC3_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
++#define SDMA1_RLC3_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
++#define SDMA1_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
++//SDMA1_RLC3_DOORBELL
++#define SDMA1_RLC3_DOORBELL__ENABLE__SHIFT 0x1c
++#define SDMA1_RLC3_DOORBELL__CAPTURED__SHIFT 0x1e
++#define SDMA1_RLC3_DOORBELL__ENABLE_MASK 0x10000000L
++#define SDMA1_RLC3_DOORBELL__CAPTURED_MASK 0x40000000L
++//SDMA1_RLC3_STATUS
++#define SDMA1_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
++#define SDMA1_RLC3_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
++#define SDMA1_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
++#define SDMA1_RLC3_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
++//SDMA1_RLC3_DOORBELL_LOG
++#define SDMA1_RLC3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
++#define SDMA1_RLC3_DOORBELL_LOG__DATA__SHIFT 0x2
++#define SDMA1_RLC3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
++#define SDMA1_RLC3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
++//SDMA1_RLC3_WATERMARK
++#define SDMA1_RLC3_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
++#define SDMA1_RLC3_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
++#define SDMA1_RLC3_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
++#define SDMA1_RLC3_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
++//SDMA1_RLC3_DOORBELL_OFFSET
++#define SDMA1_RLC3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA1_RLC3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
++//SDMA1_RLC3_CSA_ADDR_LO
++#define SDMA1_RLC3_CSA_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_RLC3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_RLC3_CSA_ADDR_HI
++#define SDMA1_RLC3_CSA_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC3_IB_SUB_REMAIN
++#define SDMA1_RLC3_IB_SUB_REMAIN__SIZE__SHIFT 0x0
++#define SDMA1_RLC3_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
++//SDMA1_RLC3_PREEMPT
++#define SDMA1_RLC3_PREEMPT__IB_PREEMPT__SHIFT 0x0
++#define SDMA1_RLC3_PREEMPT__IB_PREEMPT_MASK 0x00000001L
++//SDMA1_RLC3_DUMMY_REG
++#define SDMA1_RLC3_DUMMY_REG__DUMMY__SHIFT 0x0
++#define SDMA1_RLC3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
++//SDMA1_RLC3_RB_WPTR_POLL_ADDR_HI
++#define SDMA1_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC3_RB_WPTR_POLL_ADDR_LO
++#define SDMA1_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_RLC3_RB_AQL_CNTL
++#define SDMA1_RLC3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
++#define SDMA1_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
++#define SDMA1_RLC3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
++#define SDMA1_RLC3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
++#define SDMA1_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
++#define SDMA1_RLC3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
++//SDMA1_RLC3_MINOR_PTR_UPDATE
++#define SDMA1_RLC3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
++#define SDMA1_RLC3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
++//SDMA1_RLC3_MIDCMD_DATA0
++#define SDMA1_RLC3_MIDCMD_DATA0__DATA0__SHIFT 0x0
++#define SDMA1_RLC3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
++//SDMA1_RLC3_MIDCMD_DATA1
++#define SDMA1_RLC3_MIDCMD_DATA1__DATA1__SHIFT 0x0
++#define SDMA1_RLC3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
++//SDMA1_RLC3_MIDCMD_DATA2
++#define SDMA1_RLC3_MIDCMD_DATA2__DATA2__SHIFT 0x0
++#define SDMA1_RLC3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
++//SDMA1_RLC3_MIDCMD_DATA3
++#define SDMA1_RLC3_MIDCMD_DATA3__DATA3__SHIFT 0x0
++#define SDMA1_RLC3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
++//SDMA1_RLC3_MIDCMD_DATA4
++#define SDMA1_RLC3_MIDCMD_DATA4__DATA4__SHIFT 0x0
++#define SDMA1_RLC3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
++//SDMA1_RLC3_MIDCMD_DATA5
++#define SDMA1_RLC3_MIDCMD_DATA5__DATA5__SHIFT 0x0
++#define SDMA1_RLC3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
++//SDMA1_RLC3_MIDCMD_DATA6
++#define SDMA1_RLC3_MIDCMD_DATA6__DATA6__SHIFT 0x0
++#define SDMA1_RLC3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
++//SDMA1_RLC3_MIDCMD_DATA7
++#define SDMA1_RLC3_MIDCMD_DATA7__DATA7__SHIFT 0x0
++#define SDMA1_RLC3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
++//SDMA1_RLC3_MIDCMD_DATA8
++#define SDMA1_RLC3_MIDCMD_DATA8__DATA8__SHIFT 0x0
++#define SDMA1_RLC3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
++//SDMA1_RLC3_MIDCMD_CNTL
++#define SDMA1_RLC3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
++#define SDMA1_RLC3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
++#define SDMA1_RLC3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
++#define SDMA1_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
++#define SDMA1_RLC3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
++#define SDMA1_RLC3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
++#define SDMA1_RLC3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
++#define SDMA1_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
++//SDMA1_RLC4_RB_CNTL
++#define SDMA1_RLC4_RB_CNTL__RB_ENABLE__SHIFT 0x0
++#define SDMA1_RLC4_RB_CNTL__RB_SIZE__SHIFT 0x1
++#define SDMA1_RLC4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
++#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
++#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
++#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
++#define SDMA1_RLC4_RB_CNTL__RB_PRIV__SHIFT 0x17
++#define SDMA1_RLC4_RB_CNTL__RB_VMID__SHIFT 0x18
++#define SDMA1_RLC4_RB_CNTL__RB_ENABLE_MASK 0x00000001L
++#define SDMA1_RLC4_RB_CNTL__RB_SIZE_MASK 0x0000003EL
++#define SDMA1_RLC4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
++#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
++#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
++#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
++#define SDMA1_RLC4_RB_CNTL__RB_PRIV_MASK 0x00800000L
++#define SDMA1_RLC4_RB_CNTL__RB_VMID_MASK 0x0F000000L
++//SDMA1_RLC4_RB_BASE
++#define SDMA1_RLC4_RB_BASE__ADDR__SHIFT 0x0
++#define SDMA1_RLC4_RB_BASE__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC4_RB_BASE_HI
++#define SDMA1_RLC4_RB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
++//SDMA1_RLC4_RB_RPTR
++#define SDMA1_RLC4_RB_RPTR__OFFSET__SHIFT 0x0
++#define SDMA1_RLC4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC4_RB_RPTR_HI
++#define SDMA1_RLC4_RB_RPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA1_RLC4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC4_RB_WPTR
++#define SDMA1_RLC4_RB_WPTR__OFFSET__SHIFT 0x0
++#define SDMA1_RLC4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC4_RB_WPTR_HI
++#define SDMA1_RLC4_RB_WPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA1_RLC4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC4_RB_WPTR_POLL_CNTL
++#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
++#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
++#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
++#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
++#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
++#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
++#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
++#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
++#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
++#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
++//SDMA1_RLC4_RB_RPTR_ADDR_HI
++#define SDMA1_RLC4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC4_RB_RPTR_ADDR_LO
++#define SDMA1_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
++#define SDMA1_RLC4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
++#define SDMA1_RLC4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_RLC4_IB_CNTL
++#define SDMA1_RLC4_IB_CNTL__IB_ENABLE__SHIFT 0x0
++#define SDMA1_RLC4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
++#define SDMA1_RLC4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
++#define SDMA1_RLC4_IB_CNTL__CMD_VMID__SHIFT 0x10
++#define SDMA1_RLC4_IB_CNTL__IB_ENABLE_MASK 0x00000001L
++#define SDMA1_RLC4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
++#define SDMA1_RLC4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
++#define SDMA1_RLC4_IB_CNTL__CMD_VMID_MASK 0x000F0000L
++//SDMA1_RLC4_IB_RPTR
++#define SDMA1_RLC4_IB_RPTR__OFFSET__SHIFT 0x2
++#define SDMA1_RLC4_IB_RPTR__OFFSET_MASK 0x003FFFFCL
++//SDMA1_RLC4_IB_OFFSET
++#define SDMA1_RLC4_IB_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA1_RLC4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
++//SDMA1_RLC4_IB_BASE_LO
++#define SDMA1_RLC4_IB_BASE_LO__ADDR__SHIFT 0x5
++#define SDMA1_RLC4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
++//SDMA1_RLC4_IB_BASE_HI
++#define SDMA1_RLC4_IB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC4_IB_SIZE
++#define SDMA1_RLC4_IB_SIZE__SIZE__SHIFT 0x0
++#define SDMA1_RLC4_IB_SIZE__SIZE_MASK 0x000FFFFFL
++//SDMA1_RLC4_SKIP_CNTL
++#define SDMA1_RLC4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
++#define SDMA1_RLC4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
++//SDMA1_RLC4_CONTEXT_STATUS
++#define SDMA1_RLC4_CONTEXT_STATUS__SELECTED__SHIFT 0x0
++#define SDMA1_RLC4_CONTEXT_STATUS__IDLE__SHIFT 0x2
++#define SDMA1_RLC4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
++#define SDMA1_RLC4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
++#define SDMA1_RLC4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
++#define SDMA1_RLC4_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
++#define SDMA1_RLC4_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
++#define SDMA1_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
++#define SDMA1_RLC4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
++#define SDMA1_RLC4_CONTEXT_STATUS__IDLE_MASK 0x00000004L
++#define SDMA1_RLC4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
++#define SDMA1_RLC4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
++#define SDMA1_RLC4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
++#define SDMA1_RLC4_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
++#define SDMA1_RLC4_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
++#define SDMA1_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
++//SDMA1_RLC4_DOORBELL
++#define SDMA1_RLC4_DOORBELL__ENABLE__SHIFT 0x1c
++#define SDMA1_RLC4_DOORBELL__CAPTURED__SHIFT 0x1e
++#define SDMA1_RLC4_DOORBELL__ENABLE_MASK 0x10000000L
++#define SDMA1_RLC4_DOORBELL__CAPTURED_MASK 0x40000000L
++//SDMA1_RLC4_STATUS
++#define SDMA1_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
++#define SDMA1_RLC4_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
++#define SDMA1_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
++#define SDMA1_RLC4_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
++//SDMA1_RLC4_DOORBELL_LOG
++#define SDMA1_RLC4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
++#define SDMA1_RLC4_DOORBELL_LOG__DATA__SHIFT 0x2
++#define SDMA1_RLC4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
++#define SDMA1_RLC4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
++//SDMA1_RLC4_WATERMARK
++#define SDMA1_RLC4_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
++#define SDMA1_RLC4_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
++#define SDMA1_RLC4_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
++#define SDMA1_RLC4_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
++//SDMA1_RLC4_DOORBELL_OFFSET
++#define SDMA1_RLC4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA1_RLC4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
++//SDMA1_RLC4_CSA_ADDR_LO
++#define SDMA1_RLC4_CSA_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_RLC4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_RLC4_CSA_ADDR_HI
++#define SDMA1_RLC4_CSA_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC4_IB_SUB_REMAIN
++#define SDMA1_RLC4_IB_SUB_REMAIN__SIZE__SHIFT 0x0
++#define SDMA1_RLC4_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
++//SDMA1_RLC4_PREEMPT
++#define SDMA1_RLC4_PREEMPT__IB_PREEMPT__SHIFT 0x0
++#define SDMA1_RLC4_PREEMPT__IB_PREEMPT_MASK 0x00000001L
++//SDMA1_RLC4_DUMMY_REG
++#define SDMA1_RLC4_DUMMY_REG__DUMMY__SHIFT 0x0
++#define SDMA1_RLC4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
++//SDMA1_RLC4_RB_WPTR_POLL_ADDR_HI
++#define SDMA1_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC4_RB_WPTR_POLL_ADDR_LO
++#define SDMA1_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_RLC4_RB_AQL_CNTL
++#define SDMA1_RLC4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
++#define SDMA1_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
++#define SDMA1_RLC4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
++#define SDMA1_RLC4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
++#define SDMA1_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
++#define SDMA1_RLC4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
++//SDMA1_RLC4_MINOR_PTR_UPDATE
++#define SDMA1_RLC4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
++#define SDMA1_RLC4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
++//SDMA1_RLC4_MIDCMD_DATA0
++#define SDMA1_RLC4_MIDCMD_DATA0__DATA0__SHIFT 0x0
++#define SDMA1_RLC4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
++//SDMA1_RLC4_MIDCMD_DATA1
++#define SDMA1_RLC4_MIDCMD_DATA1__DATA1__SHIFT 0x0
++#define SDMA1_RLC4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
++//SDMA1_RLC4_MIDCMD_DATA2
++#define SDMA1_RLC4_MIDCMD_DATA2__DATA2__SHIFT 0x0
++#define SDMA1_RLC4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
++//SDMA1_RLC4_MIDCMD_DATA3
++#define SDMA1_RLC4_MIDCMD_DATA3__DATA3__SHIFT 0x0
++#define SDMA1_RLC4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
++//SDMA1_RLC4_MIDCMD_DATA4
++#define SDMA1_RLC4_MIDCMD_DATA4__DATA4__SHIFT 0x0
++#define SDMA1_RLC4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
++//SDMA1_RLC4_MIDCMD_DATA5
++#define SDMA1_RLC4_MIDCMD_DATA5__DATA5__SHIFT 0x0
++#define SDMA1_RLC4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
++//SDMA1_RLC4_MIDCMD_DATA6
++#define SDMA1_RLC4_MIDCMD_DATA6__DATA6__SHIFT 0x0
++#define SDMA1_RLC4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
++//SDMA1_RLC4_MIDCMD_DATA7
++#define SDMA1_RLC4_MIDCMD_DATA7__DATA7__SHIFT 0x0
++#define SDMA1_RLC4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
++//SDMA1_RLC4_MIDCMD_DATA8
++#define SDMA1_RLC4_MIDCMD_DATA8__DATA8__SHIFT 0x0
++#define SDMA1_RLC4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
++//SDMA1_RLC4_MIDCMD_CNTL
++#define SDMA1_RLC4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
++#define SDMA1_RLC4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
++#define SDMA1_RLC4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
++#define SDMA1_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
++#define SDMA1_RLC4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
++#define SDMA1_RLC4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
++#define SDMA1_RLC4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
++#define SDMA1_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
++//SDMA1_RLC5_RB_CNTL
++#define SDMA1_RLC5_RB_CNTL__RB_ENABLE__SHIFT 0x0
++#define SDMA1_RLC5_RB_CNTL__RB_SIZE__SHIFT 0x1
++#define SDMA1_RLC5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
++#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
++#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
++#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
++#define SDMA1_RLC5_RB_CNTL__RB_PRIV__SHIFT 0x17
++#define SDMA1_RLC5_RB_CNTL__RB_VMID__SHIFT 0x18
++#define SDMA1_RLC5_RB_CNTL__RB_ENABLE_MASK 0x00000001L
++#define SDMA1_RLC5_RB_CNTL__RB_SIZE_MASK 0x0000003EL
++#define SDMA1_RLC5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
++#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
++#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
++#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
++#define SDMA1_RLC5_RB_CNTL__RB_PRIV_MASK 0x00800000L
++#define SDMA1_RLC5_RB_CNTL__RB_VMID_MASK 0x0F000000L
++//SDMA1_RLC5_RB_BASE
++#define SDMA1_RLC5_RB_BASE__ADDR__SHIFT 0x0
++#define SDMA1_RLC5_RB_BASE__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC5_RB_BASE_HI
++#define SDMA1_RLC5_RB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
++//SDMA1_RLC5_RB_RPTR
++#define SDMA1_RLC5_RB_RPTR__OFFSET__SHIFT 0x0
++#define SDMA1_RLC5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC5_RB_RPTR_HI
++#define SDMA1_RLC5_RB_RPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA1_RLC5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC5_RB_WPTR
++#define SDMA1_RLC5_RB_WPTR__OFFSET__SHIFT 0x0
++#define SDMA1_RLC5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC5_RB_WPTR_HI
++#define SDMA1_RLC5_RB_WPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA1_RLC5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC5_RB_WPTR_POLL_CNTL
++#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
++#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
++#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
++#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
++#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
++#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
++#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
++#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
++#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
++#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
++//SDMA1_RLC5_RB_RPTR_ADDR_HI
++#define SDMA1_RLC5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC5_RB_RPTR_ADDR_LO
++#define SDMA1_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
++#define SDMA1_RLC5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
++#define SDMA1_RLC5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_RLC5_IB_CNTL
++#define SDMA1_RLC5_IB_CNTL__IB_ENABLE__SHIFT 0x0
++#define SDMA1_RLC5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
++#define SDMA1_RLC5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
++#define SDMA1_RLC5_IB_CNTL__CMD_VMID__SHIFT 0x10
++#define SDMA1_RLC5_IB_CNTL__IB_ENABLE_MASK 0x00000001L
++#define SDMA1_RLC5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
++#define SDMA1_RLC5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
++#define SDMA1_RLC5_IB_CNTL__CMD_VMID_MASK 0x000F0000L
++//SDMA1_RLC5_IB_RPTR
++#define SDMA1_RLC5_IB_RPTR__OFFSET__SHIFT 0x2
++#define SDMA1_RLC5_IB_RPTR__OFFSET_MASK 0x003FFFFCL
++//SDMA1_RLC5_IB_OFFSET
++#define SDMA1_RLC5_IB_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA1_RLC5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
++//SDMA1_RLC5_IB_BASE_LO
++#define SDMA1_RLC5_IB_BASE_LO__ADDR__SHIFT 0x5
++#define SDMA1_RLC5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
++//SDMA1_RLC5_IB_BASE_HI
++#define SDMA1_RLC5_IB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC5_IB_SIZE
++#define SDMA1_RLC5_IB_SIZE__SIZE__SHIFT 0x0
++#define SDMA1_RLC5_IB_SIZE__SIZE_MASK 0x000FFFFFL
++//SDMA1_RLC5_SKIP_CNTL
++#define SDMA1_RLC5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
++#define SDMA1_RLC5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
++//SDMA1_RLC5_CONTEXT_STATUS
++#define SDMA1_RLC5_CONTEXT_STATUS__SELECTED__SHIFT 0x0
++#define SDMA1_RLC5_CONTEXT_STATUS__IDLE__SHIFT 0x2
++#define SDMA1_RLC5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
++#define SDMA1_RLC5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
++#define SDMA1_RLC5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
++#define SDMA1_RLC5_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
++#define SDMA1_RLC5_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
++#define SDMA1_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
++#define SDMA1_RLC5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
++#define SDMA1_RLC5_CONTEXT_STATUS__IDLE_MASK 0x00000004L
++#define SDMA1_RLC5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
++#define SDMA1_RLC5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
++#define SDMA1_RLC5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
++#define SDMA1_RLC5_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
++#define SDMA1_RLC5_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
++#define SDMA1_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
++//SDMA1_RLC5_DOORBELL
++#define SDMA1_RLC5_DOORBELL__ENABLE__SHIFT 0x1c
++#define SDMA1_RLC5_DOORBELL__CAPTURED__SHIFT 0x1e
++#define SDMA1_RLC5_DOORBELL__ENABLE_MASK 0x10000000L
++#define SDMA1_RLC5_DOORBELL__CAPTURED_MASK 0x40000000L
++//SDMA1_RLC5_STATUS
++#define SDMA1_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
++#define SDMA1_RLC5_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
++#define SDMA1_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
++#define SDMA1_RLC5_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
++//SDMA1_RLC5_DOORBELL_LOG
++#define SDMA1_RLC5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
++#define SDMA1_RLC5_DOORBELL_LOG__DATA__SHIFT 0x2
++#define SDMA1_RLC5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
++#define SDMA1_RLC5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
++//SDMA1_RLC5_WATERMARK
++#define SDMA1_RLC5_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
++#define SDMA1_RLC5_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
++#define SDMA1_RLC5_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
++#define SDMA1_RLC5_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
++//SDMA1_RLC5_DOORBELL_OFFSET
++#define SDMA1_RLC5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA1_RLC5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
++//SDMA1_RLC5_CSA_ADDR_LO
++#define SDMA1_RLC5_CSA_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_RLC5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_RLC5_CSA_ADDR_HI
++#define SDMA1_RLC5_CSA_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC5_IB_SUB_REMAIN
++#define SDMA1_RLC5_IB_SUB_REMAIN__SIZE__SHIFT 0x0
++#define SDMA1_RLC5_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
++//SDMA1_RLC5_PREEMPT
++#define SDMA1_RLC5_PREEMPT__IB_PREEMPT__SHIFT 0x0
++#define SDMA1_RLC5_PREEMPT__IB_PREEMPT_MASK 0x00000001L
++//SDMA1_RLC5_DUMMY_REG
++#define SDMA1_RLC5_DUMMY_REG__DUMMY__SHIFT 0x0
++#define SDMA1_RLC5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
++//SDMA1_RLC5_RB_WPTR_POLL_ADDR_HI
++#define SDMA1_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC5_RB_WPTR_POLL_ADDR_LO
++#define SDMA1_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_RLC5_RB_AQL_CNTL
++#define SDMA1_RLC5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
++#define SDMA1_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
++#define SDMA1_RLC5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
++#define SDMA1_RLC5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
++#define SDMA1_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
++#define SDMA1_RLC5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
++//SDMA1_RLC5_MINOR_PTR_UPDATE
++#define SDMA1_RLC5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
++#define SDMA1_RLC5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
++//SDMA1_RLC5_MIDCMD_DATA0
++#define SDMA1_RLC5_MIDCMD_DATA0__DATA0__SHIFT 0x0
++#define SDMA1_RLC5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
++//SDMA1_RLC5_MIDCMD_DATA1
++#define SDMA1_RLC5_MIDCMD_DATA1__DATA1__SHIFT 0x0
++#define SDMA1_RLC5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
++//SDMA1_RLC5_MIDCMD_DATA2
++#define SDMA1_RLC5_MIDCMD_DATA2__DATA2__SHIFT 0x0
++#define SDMA1_RLC5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
++//SDMA1_RLC5_MIDCMD_DATA3
++#define SDMA1_RLC5_MIDCMD_DATA3__DATA3__SHIFT 0x0
++#define SDMA1_RLC5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
++//SDMA1_RLC5_MIDCMD_DATA4
++#define SDMA1_RLC5_MIDCMD_DATA4__DATA4__SHIFT 0x0
++#define SDMA1_RLC5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
++//SDMA1_RLC5_MIDCMD_DATA5
++#define SDMA1_RLC5_MIDCMD_DATA5__DATA5__SHIFT 0x0
++#define SDMA1_RLC5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
++//SDMA1_RLC5_MIDCMD_DATA6
++#define SDMA1_RLC5_MIDCMD_DATA6__DATA6__SHIFT 0x0
++#define SDMA1_RLC5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
++//SDMA1_RLC5_MIDCMD_DATA7
++#define SDMA1_RLC5_MIDCMD_DATA7__DATA7__SHIFT 0x0
++#define SDMA1_RLC5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
++//SDMA1_RLC5_MIDCMD_DATA8
++#define SDMA1_RLC5_MIDCMD_DATA8__DATA8__SHIFT 0x0
++#define SDMA1_RLC5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
++//SDMA1_RLC5_MIDCMD_CNTL
++#define SDMA1_RLC5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
++#define SDMA1_RLC5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
++#define SDMA1_RLC5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
++#define SDMA1_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
++#define SDMA1_RLC5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
++#define SDMA1_RLC5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
++#define SDMA1_RLC5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
++#define SDMA1_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
++//SDMA1_RLC6_RB_CNTL
++#define SDMA1_RLC6_RB_CNTL__RB_ENABLE__SHIFT 0x0
++#define SDMA1_RLC6_RB_CNTL__RB_SIZE__SHIFT 0x1
++#define SDMA1_RLC6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
++#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
++#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
++#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
++#define SDMA1_RLC6_RB_CNTL__RB_PRIV__SHIFT 0x17
++#define SDMA1_RLC6_RB_CNTL__RB_VMID__SHIFT 0x18
++#define SDMA1_RLC6_RB_CNTL__RB_ENABLE_MASK 0x00000001L
++#define SDMA1_RLC6_RB_CNTL__RB_SIZE_MASK 0x0000003EL
++#define SDMA1_RLC6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
++#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
++#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
++#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
++#define SDMA1_RLC6_RB_CNTL__RB_PRIV_MASK 0x00800000L
++#define SDMA1_RLC6_RB_CNTL__RB_VMID_MASK 0x0F000000L
++//SDMA1_RLC6_RB_BASE
++#define SDMA1_RLC6_RB_BASE__ADDR__SHIFT 0x0
++#define SDMA1_RLC6_RB_BASE__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC6_RB_BASE_HI
++#define SDMA1_RLC6_RB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
++//SDMA1_RLC6_RB_RPTR
++#define SDMA1_RLC6_RB_RPTR__OFFSET__SHIFT 0x0
++#define SDMA1_RLC6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC6_RB_RPTR_HI
++#define SDMA1_RLC6_RB_RPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA1_RLC6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC6_RB_WPTR
++#define SDMA1_RLC6_RB_WPTR__OFFSET__SHIFT 0x0
++#define SDMA1_RLC6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC6_RB_WPTR_HI
++#define SDMA1_RLC6_RB_WPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA1_RLC6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC6_RB_WPTR_POLL_CNTL
++#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
++#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
++#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
++#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
++#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
++#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
++#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
++#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
++#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
++#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
++//SDMA1_RLC6_RB_RPTR_ADDR_HI
++#define SDMA1_RLC6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC6_RB_RPTR_ADDR_LO
++#define SDMA1_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
++#define SDMA1_RLC6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
++#define SDMA1_RLC6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_RLC6_IB_CNTL
++#define SDMA1_RLC6_IB_CNTL__IB_ENABLE__SHIFT 0x0
++#define SDMA1_RLC6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
++#define SDMA1_RLC6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
++#define SDMA1_RLC6_IB_CNTL__CMD_VMID__SHIFT 0x10
++#define SDMA1_RLC6_IB_CNTL__IB_ENABLE_MASK 0x00000001L
++#define SDMA1_RLC6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
++#define SDMA1_RLC6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
++#define SDMA1_RLC6_IB_CNTL__CMD_VMID_MASK 0x000F0000L
++//SDMA1_RLC6_IB_RPTR
++#define SDMA1_RLC6_IB_RPTR__OFFSET__SHIFT 0x2
++#define SDMA1_RLC6_IB_RPTR__OFFSET_MASK 0x003FFFFCL
++//SDMA1_RLC6_IB_OFFSET
++#define SDMA1_RLC6_IB_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA1_RLC6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
++//SDMA1_RLC6_IB_BASE_LO
++#define SDMA1_RLC6_IB_BASE_LO__ADDR__SHIFT 0x5
++#define SDMA1_RLC6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
++//SDMA1_RLC6_IB_BASE_HI
++#define SDMA1_RLC6_IB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC6_IB_SIZE
++#define SDMA1_RLC6_IB_SIZE__SIZE__SHIFT 0x0
++#define SDMA1_RLC6_IB_SIZE__SIZE_MASK 0x000FFFFFL
++//SDMA1_RLC6_SKIP_CNTL
++#define SDMA1_RLC6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
++#define SDMA1_RLC6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
++//SDMA1_RLC6_CONTEXT_STATUS
++#define SDMA1_RLC6_CONTEXT_STATUS__SELECTED__SHIFT 0x0
++#define SDMA1_RLC6_CONTEXT_STATUS__IDLE__SHIFT 0x2
++#define SDMA1_RLC6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
++#define SDMA1_RLC6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
++#define SDMA1_RLC6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
++#define SDMA1_RLC6_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
++#define SDMA1_RLC6_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
++#define SDMA1_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
++#define SDMA1_RLC6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
++#define SDMA1_RLC6_CONTEXT_STATUS__IDLE_MASK 0x00000004L
++#define SDMA1_RLC6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
++#define SDMA1_RLC6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
++#define SDMA1_RLC6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
++#define SDMA1_RLC6_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
++#define SDMA1_RLC6_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
++#define SDMA1_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
++//SDMA1_RLC6_DOORBELL
++#define SDMA1_RLC6_DOORBELL__ENABLE__SHIFT 0x1c
++#define SDMA1_RLC6_DOORBELL__CAPTURED__SHIFT 0x1e
++#define SDMA1_RLC6_DOORBELL__ENABLE_MASK 0x10000000L
++#define SDMA1_RLC6_DOORBELL__CAPTURED_MASK 0x40000000L
++//SDMA1_RLC6_STATUS
++#define SDMA1_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
++#define SDMA1_RLC6_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
++#define SDMA1_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
++#define SDMA1_RLC6_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
++//SDMA1_RLC6_DOORBELL_LOG
++#define SDMA1_RLC6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
++#define SDMA1_RLC6_DOORBELL_LOG__DATA__SHIFT 0x2
++#define SDMA1_RLC6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
++#define SDMA1_RLC6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
++//SDMA1_RLC6_WATERMARK
++#define SDMA1_RLC6_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
++#define SDMA1_RLC6_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
++#define SDMA1_RLC6_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
++#define SDMA1_RLC6_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
++//SDMA1_RLC6_DOORBELL_OFFSET
++#define SDMA1_RLC6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA1_RLC6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
++//SDMA1_RLC6_CSA_ADDR_LO
++#define SDMA1_RLC6_CSA_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_RLC6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_RLC6_CSA_ADDR_HI
++#define SDMA1_RLC6_CSA_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC6_IB_SUB_REMAIN
++#define SDMA1_RLC6_IB_SUB_REMAIN__SIZE__SHIFT 0x0
++#define SDMA1_RLC6_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
++//SDMA1_RLC6_PREEMPT
++#define SDMA1_RLC6_PREEMPT__IB_PREEMPT__SHIFT 0x0
++#define SDMA1_RLC6_PREEMPT__IB_PREEMPT_MASK 0x00000001L
++//SDMA1_RLC6_DUMMY_REG
++#define SDMA1_RLC6_DUMMY_REG__DUMMY__SHIFT 0x0
++#define SDMA1_RLC6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
++//SDMA1_RLC6_RB_WPTR_POLL_ADDR_HI
++#define SDMA1_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC6_RB_WPTR_POLL_ADDR_LO
++#define SDMA1_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_RLC6_RB_AQL_CNTL
++#define SDMA1_RLC6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
++#define SDMA1_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
++#define SDMA1_RLC6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
++#define SDMA1_RLC6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
++#define SDMA1_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
++#define SDMA1_RLC6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
++//SDMA1_RLC6_MINOR_PTR_UPDATE
++#define SDMA1_RLC6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
++#define SDMA1_RLC6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
++//SDMA1_RLC6_MIDCMD_DATA0
++#define SDMA1_RLC6_MIDCMD_DATA0__DATA0__SHIFT 0x0
++#define SDMA1_RLC6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
++//SDMA1_RLC6_MIDCMD_DATA1
++#define SDMA1_RLC6_MIDCMD_DATA1__DATA1__SHIFT 0x0
++#define SDMA1_RLC6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
++//SDMA1_RLC6_MIDCMD_DATA2
++#define SDMA1_RLC6_MIDCMD_DATA2__DATA2__SHIFT 0x0
++#define SDMA1_RLC6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
++//SDMA1_RLC6_MIDCMD_DATA3
++#define SDMA1_RLC6_MIDCMD_DATA3__DATA3__SHIFT 0x0
++#define SDMA1_RLC6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
++//SDMA1_RLC6_MIDCMD_DATA4
++#define SDMA1_RLC6_MIDCMD_DATA4__DATA4__SHIFT 0x0
++#define SDMA1_RLC6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
++//SDMA1_RLC6_MIDCMD_DATA5
++#define SDMA1_RLC6_MIDCMD_DATA5__DATA5__SHIFT 0x0
++#define SDMA1_RLC6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
++//SDMA1_RLC6_MIDCMD_DATA6
++#define SDMA1_RLC6_MIDCMD_DATA6__DATA6__SHIFT 0x0
++#define SDMA1_RLC6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
++//SDMA1_RLC6_MIDCMD_DATA7
++#define SDMA1_RLC6_MIDCMD_DATA7__DATA7__SHIFT 0x0
++#define SDMA1_RLC6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
++//SDMA1_RLC6_MIDCMD_DATA8
++#define SDMA1_RLC6_MIDCMD_DATA8__DATA8__SHIFT 0x0
++#define SDMA1_RLC6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
++//SDMA1_RLC6_MIDCMD_CNTL
++#define SDMA1_RLC6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
++#define SDMA1_RLC6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
++#define SDMA1_RLC6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
++#define SDMA1_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
++#define SDMA1_RLC6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
++#define SDMA1_RLC6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
++#define SDMA1_RLC6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
++#define SDMA1_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
++//SDMA1_RLC7_RB_CNTL
++#define SDMA1_RLC7_RB_CNTL__RB_ENABLE__SHIFT 0x0
++#define SDMA1_RLC7_RB_CNTL__RB_SIZE__SHIFT 0x1
++#define SDMA1_RLC7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
++#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
++#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
++#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
++#define SDMA1_RLC7_RB_CNTL__RB_PRIV__SHIFT 0x17
++#define SDMA1_RLC7_RB_CNTL__RB_VMID__SHIFT 0x18
++#define SDMA1_RLC7_RB_CNTL__RB_ENABLE_MASK 0x00000001L
++#define SDMA1_RLC7_RB_CNTL__RB_SIZE_MASK 0x0000003EL
++#define SDMA1_RLC7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
++#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
++#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
++#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
++#define SDMA1_RLC7_RB_CNTL__RB_PRIV_MASK 0x00800000L
++#define SDMA1_RLC7_RB_CNTL__RB_VMID_MASK 0x0F000000L
++//SDMA1_RLC7_RB_BASE
++#define SDMA1_RLC7_RB_BASE__ADDR__SHIFT 0x0
++#define SDMA1_RLC7_RB_BASE__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC7_RB_BASE_HI
++#define SDMA1_RLC7_RB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
++//SDMA1_RLC7_RB_RPTR
++#define SDMA1_RLC7_RB_RPTR__OFFSET__SHIFT 0x0
++#define SDMA1_RLC7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC7_RB_RPTR_HI
++#define SDMA1_RLC7_RB_RPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA1_RLC7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC7_RB_WPTR
++#define SDMA1_RLC7_RB_WPTR__OFFSET__SHIFT 0x0
++#define SDMA1_RLC7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC7_RB_WPTR_HI
++#define SDMA1_RLC7_RB_WPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA1_RLC7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA1_RLC7_RB_WPTR_POLL_CNTL
++#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
++#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
++#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
++#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
++#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
++#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
++#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
++#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
++#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
++#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
++//SDMA1_RLC7_RB_RPTR_ADDR_HI
++#define SDMA1_RLC7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC7_RB_RPTR_ADDR_LO
++#define SDMA1_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
++#define SDMA1_RLC7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
++#define SDMA1_RLC7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_RLC7_IB_CNTL
++#define SDMA1_RLC7_IB_CNTL__IB_ENABLE__SHIFT 0x0
++#define SDMA1_RLC7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
++#define SDMA1_RLC7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
++#define SDMA1_RLC7_IB_CNTL__CMD_VMID__SHIFT 0x10
++#define SDMA1_RLC7_IB_CNTL__IB_ENABLE_MASK 0x00000001L
++#define SDMA1_RLC7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
++#define SDMA1_RLC7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
++#define SDMA1_RLC7_IB_CNTL__CMD_VMID_MASK 0x000F0000L
++//SDMA1_RLC7_IB_RPTR
++#define SDMA1_RLC7_IB_RPTR__OFFSET__SHIFT 0x2
++#define SDMA1_RLC7_IB_RPTR__OFFSET_MASK 0x003FFFFCL
++//SDMA1_RLC7_IB_OFFSET
++#define SDMA1_RLC7_IB_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA1_RLC7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
++//SDMA1_RLC7_IB_BASE_LO
++#define SDMA1_RLC7_IB_BASE_LO__ADDR__SHIFT 0x5
++#define SDMA1_RLC7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
++//SDMA1_RLC7_IB_BASE_HI
++#define SDMA1_RLC7_IB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC7_IB_SIZE
++#define SDMA1_RLC7_IB_SIZE__SIZE__SHIFT 0x0
++#define SDMA1_RLC7_IB_SIZE__SIZE_MASK 0x000FFFFFL
++//SDMA1_RLC7_SKIP_CNTL
++#define SDMA1_RLC7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
++#define SDMA1_RLC7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
++//SDMA1_RLC7_CONTEXT_STATUS
++#define SDMA1_RLC7_CONTEXT_STATUS__SELECTED__SHIFT 0x0
++#define SDMA1_RLC7_CONTEXT_STATUS__IDLE__SHIFT 0x2
++#define SDMA1_RLC7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
++#define SDMA1_RLC7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
++#define SDMA1_RLC7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
++#define SDMA1_RLC7_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
++#define SDMA1_RLC7_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
++#define SDMA1_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
++#define SDMA1_RLC7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
++#define SDMA1_RLC7_CONTEXT_STATUS__IDLE_MASK 0x00000004L
++#define SDMA1_RLC7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
++#define SDMA1_RLC7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
++#define SDMA1_RLC7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
++#define SDMA1_RLC7_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
++#define SDMA1_RLC7_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
++#define SDMA1_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
++//SDMA1_RLC7_DOORBELL
++#define SDMA1_RLC7_DOORBELL__ENABLE__SHIFT 0x1c
++#define SDMA1_RLC7_DOORBELL__CAPTURED__SHIFT 0x1e
++#define SDMA1_RLC7_DOORBELL__ENABLE_MASK 0x10000000L
++#define SDMA1_RLC7_DOORBELL__CAPTURED_MASK 0x40000000L
++//SDMA1_RLC7_STATUS
++#define SDMA1_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
++#define SDMA1_RLC7_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
++#define SDMA1_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
++#define SDMA1_RLC7_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
++//SDMA1_RLC7_DOORBELL_LOG
++#define SDMA1_RLC7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
++#define SDMA1_RLC7_DOORBELL_LOG__DATA__SHIFT 0x2
++#define SDMA1_RLC7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
++#define SDMA1_RLC7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
++//SDMA1_RLC7_WATERMARK
++#define SDMA1_RLC7_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
++#define SDMA1_RLC7_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
++#define SDMA1_RLC7_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
++#define SDMA1_RLC7_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
++//SDMA1_RLC7_DOORBELL_OFFSET
++#define SDMA1_RLC7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA1_RLC7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
++//SDMA1_RLC7_CSA_ADDR_LO
++#define SDMA1_RLC7_CSA_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_RLC7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_RLC7_CSA_ADDR_HI
++#define SDMA1_RLC7_CSA_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC7_IB_SUB_REMAIN
++#define SDMA1_RLC7_IB_SUB_REMAIN__SIZE__SHIFT 0x0
++#define SDMA1_RLC7_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
++//SDMA1_RLC7_PREEMPT
++#define SDMA1_RLC7_PREEMPT__IB_PREEMPT__SHIFT 0x0
++#define SDMA1_RLC7_PREEMPT__IB_PREEMPT_MASK 0x00000001L
++//SDMA1_RLC7_DUMMY_REG
++#define SDMA1_RLC7_DUMMY_REG__DUMMY__SHIFT 0x0
++#define SDMA1_RLC7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
++//SDMA1_RLC7_RB_WPTR_POLL_ADDR_HI
++#define SDMA1_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA1_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA1_RLC7_RB_WPTR_POLL_ADDR_LO
++#define SDMA1_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA1_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA1_RLC7_RB_AQL_CNTL
++#define SDMA1_RLC7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
++#define SDMA1_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
++#define SDMA1_RLC7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
++#define SDMA1_RLC7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
++#define SDMA1_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
++#define SDMA1_RLC7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
++//SDMA1_RLC7_MINOR_PTR_UPDATE
++#define SDMA1_RLC7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
++#define SDMA1_RLC7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
++//SDMA1_RLC7_MIDCMD_DATA0
++#define SDMA1_RLC7_MIDCMD_DATA0__DATA0__SHIFT 0x0
++#define SDMA1_RLC7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
++//SDMA1_RLC7_MIDCMD_DATA1
++#define SDMA1_RLC7_MIDCMD_DATA1__DATA1__SHIFT 0x0
++#define SDMA1_RLC7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
++//SDMA1_RLC7_MIDCMD_DATA2
++#define SDMA1_RLC7_MIDCMD_DATA2__DATA2__SHIFT 0x0
++#define SDMA1_RLC7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
++//SDMA1_RLC7_MIDCMD_DATA3
++#define SDMA1_RLC7_MIDCMD_DATA3__DATA3__SHIFT 0x0
++#define SDMA1_RLC7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
++//SDMA1_RLC7_MIDCMD_DATA4
++#define SDMA1_RLC7_MIDCMD_DATA4__DATA4__SHIFT 0x0
++#define SDMA1_RLC7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
++//SDMA1_RLC7_MIDCMD_DATA5
++#define SDMA1_RLC7_MIDCMD_DATA5__DATA5__SHIFT 0x0
++#define SDMA1_RLC7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
++//SDMA1_RLC7_MIDCMD_DATA6
++#define SDMA1_RLC7_MIDCMD_DATA6__DATA6__SHIFT 0x0
++#define SDMA1_RLC7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
++//SDMA1_RLC7_MIDCMD_DATA7
++#define SDMA1_RLC7_MIDCMD_DATA7__DATA7__SHIFT 0x0
++#define SDMA1_RLC7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
++//SDMA1_RLC7_MIDCMD_DATA8
++#define SDMA1_RLC7_MIDCMD_DATA8__DATA8__SHIFT 0x0
++#define SDMA1_RLC7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
++//SDMA1_RLC7_MIDCMD_CNTL
++#define SDMA1_RLC7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
++#define SDMA1_RLC7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
++#define SDMA1_RLC7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
++#define SDMA1_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
++#define SDMA1_RLC7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
++#define SDMA1_RLC7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
++#define SDMA1_RLC7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
++#define SDMA1_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
++
++#endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5117-drm-amdgpu-include-add-thm-11.0.2-headers.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5117-drm-amdgpu-include-add-thm-11.0.2-headers.patch
new file mode 100644
index 00000000..a8885a59
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5117-drm-amdgpu-include-add-thm-11.0.2-headers.patch
@@ -0,0 +1,155 @@
+From 9805f7de5e06c3364c5992063a2da1aa51aa0402 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 26 Mar 2018 15:29:48 +0800
+Subject: [PATCH 5117/5725] drm/amdgpu/include: add thm 11.0.2 headers
+
+Headers for thermal controller.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../amd/include/asic_reg/thm/thm_11_0_2_offset.h | 37 ++++++++++
+ .../amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h | 86 ++++++++++++++++++++++
+ 2 files changed, 123 insertions(+)
+ create mode 100644 drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h
+ create mode 100644 drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h
+
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h
+new file mode 100644
+index 0000000..510ec3c
+--- /dev/null
++++ b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h
+@@ -0,0 +1,37 @@
++/*
++ * Copyright (C) 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
++ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef _thm_11_0_2_OFFSET_HEADER
++#define _thm_11_0_2_OFFSET_HEADER
++
++
++#define mmCG_MULT_THERMAL_STATUS 0x005f
++#define mmCG_MULT_THERMAL_STATUS_BASE_IDX 0
++
++#define mmTHM_THERMAL_INT_ENA 0x000a
++#define mmTHM_THERMAL_INT_ENA_BASE_IDX 0
++#define mmTHM_THERMAL_INT_CTRL 0x000b
++#define mmTHM_THERMAL_INT_CTRL_BASE_IDX 0
++
++#define mmTHM_TCON_THERM_TRIP 0x0002
++#define mmTHM_TCON_THERM_TRIP_BASE_IDX 0
++
++#endif
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h
+new file mode 100644
+index 0000000..f69533f
+--- /dev/null
++++ b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h
+@@ -0,0 +1,86 @@
++/*
++ * Copyright (C) 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
++ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef _thm_11_0_2_SH_MASK_HEADER
++#define _thm_11_0_2_SH_MASK_HEADER
++
++
++//CG_MULT_THERMAL_STATUS
++#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP__SHIFT 0x0
++#define CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT 0x9
++#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP_MASK 0x000001FFL
++#define CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK 0x0003FE00L
++
++//THM_THERMAL_INT_ENA
++#define THM_THERMAL_INT_ENA__THERM_INTH_SET__SHIFT 0x0
++#define THM_THERMAL_INT_ENA__THERM_INTL_SET__SHIFT 0x1
++#define THM_THERMAL_INT_ENA__THERM_TRIGGER_SET__SHIFT 0x2
++#define THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT 0x3
++#define THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT 0x4
++#define THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT 0x5
++#define THM_THERMAL_INT_ENA__THERM_INTH_SET_MASK 0x00000001L
++#define THM_THERMAL_INT_ENA__THERM_INTL_SET_MASK 0x00000002L
++#define THM_THERMAL_INT_ENA__THERM_TRIGGER_SET_MASK 0x00000004L
++#define THM_THERMAL_INT_ENA__THERM_INTH_CLR_MASK 0x00000008L
++#define THM_THERMAL_INT_ENA__THERM_INTL_CLR_MASK 0x00000010L
++#define THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR_MASK 0x00000020L
++//THM_THERMAL_INT_CTRL
++#define THM_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT 0x0
++#define THM_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT 0x8
++#define THM_THERMAL_INT_CTRL__TEMP_THRESHOLD__SHIFT 0x10
++#define THM_THERMAL_INT_CTRL__THERM_INTH_MASK__SHIFT 0x18
++#define THM_THERMAL_INT_CTRL__THERM_INTL_MASK__SHIFT 0x19
++#define THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK__SHIFT 0x1a
++#define THM_THERMAL_INT_CTRL__THERM_PROCHOT_MASK__SHIFT 0x1b
++#define THM_THERMAL_INT_CTRL__THERM_IH_HW_ENA__SHIFT 0x1c
++#define THM_THERMAL_INT_CTRL__MAX_IH_CREDIT__SHIFT 0x1d
++#define THM_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK 0x000000FFL
++#define THM_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK 0x0000FF00L
++#define THM_THERMAL_INT_CTRL__TEMP_THRESHOLD_MASK 0x00FF0000L
++#define THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK 0x01000000L
++#define THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK 0x02000000L
++#define THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK 0x04000000L
++#define THM_THERMAL_INT_CTRL__THERM_PROCHOT_MASK_MASK 0x08000000L
++#define THM_THERMAL_INT_CTRL__THERM_IH_HW_ENA_MASK 0x10000000L
++#define THM_THERMAL_INT_CTRL__MAX_IH_CREDIT_MASK 0xE0000000L
++
++//THM_TCON_THERM_TRIP
++#define THM_TCON_THERM_TRIP__CTF_PAD_POLARITY__SHIFT 0x0
++#define THM_TCON_THERM_TRIP__THERM_TP__SHIFT 0x1
++#define THM_TCON_THERM_TRIP__CTF_THRESHOLD_EXCEEDED__SHIFT 0x2
++#define THM_TCON_THERM_TRIP__THERM_TP_SENSE__SHIFT 0x3
++#define THM_TCON_THERM_TRIP__RSVD2__SHIFT 0x4
++#define THM_TCON_THERM_TRIP__THERM_TP_EN__SHIFT 0x5
++#define THM_TCON_THERM_TRIP__THERM_TP_LMT__SHIFT 0x6
++#define THM_TCON_THERM_TRIP__RSVD3__SHIFT 0xe
++#define THM_TCON_THERM_TRIP__SW_THERM_TP__SHIFT 0x1f
++#define THM_TCON_THERM_TRIP__CTF_PAD_POLARITY_MASK 0x00000001L
++#define THM_TCON_THERM_TRIP__THERM_TP_MASK 0x00000002L
++#define THM_TCON_THERM_TRIP__CTF_THRESHOLD_EXCEEDED_MASK 0x00000004L
++#define THM_TCON_THERM_TRIP__THERM_TP_SENSE_MASK 0x00000008L
++#define THM_TCON_THERM_TRIP__RSVD2_MASK 0x00000010L
++#define THM_TCON_THERM_TRIP__THERM_TP_EN_MASK 0x00000020L
++#define THM_TCON_THERM_TRIP__THERM_TP_LMT_MASK 0x00003FC0L
++#define THM_TCON_THERM_TRIP__RSVD3_MASK 0x7FFFC000L
++#define THM_TCON_THERM_TRIP__SW_THERM_TP_MASK 0x80000000L
++
++#endif
++
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5118-drm-amdgpu-include-Add-mp-11.0-header-files.-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5118-drm-amdgpu-include-Add-mp-11.0-header-files.-v2.patch
new file mode 100644
index 00000000..7f5b64f6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5118-drm-amdgpu-include-Add-mp-11.0-header-files.-v2.patch
@@ -0,0 +1,926 @@
+From 4f49ca002fce763d7189b7257fa1a28262500bda Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Thu, 10 May 2018 21:23:58 +0800
+Subject: [PATCH 5118/5725] drm/amdgpu/include: Add mp 11.0 header files. (v2)
+
+Add the system management controller v11.0 header files.
+
+v2: cleanup
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/include/asic_reg/mp/mp_11_0_offset.h | 358 ++++++++++++++
+ .../drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h | 534 +++++++++++++++++++++
+ 2 files changed, 892 insertions(+)
+ create mode 100644 drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h
+ create mode 100644 drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h
+
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h
+new file mode 100644
+index 0000000..6d0052c
+--- /dev/null
++++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h
+@@ -0,0 +1,358 @@
++/*
++ * Copyright (C) 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
++ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef _mp_11_0_2_OFFSET_HEADER
++#define _mp_11_0_2_OFFSET_HEADER
++
++
++// addressBlock: mp_SmuMp0_SmnDec
++// base address: 0x0
++#define mmMP0_SMN_C2PMSG_32 0x0060
++#define mmMP0_SMN_C2PMSG_32_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_33 0x0061
++#define mmMP0_SMN_C2PMSG_33_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_34 0x0062
++#define mmMP0_SMN_C2PMSG_34_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_35 0x0063
++#define mmMP0_SMN_C2PMSG_35_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_36 0x0064
++#define mmMP0_SMN_C2PMSG_36_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_37 0x0065
++#define mmMP0_SMN_C2PMSG_37_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_38 0x0066
++#define mmMP0_SMN_C2PMSG_38_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_39 0x0067
++#define mmMP0_SMN_C2PMSG_39_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_40 0x0068
++#define mmMP0_SMN_C2PMSG_40_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_41 0x0069
++#define mmMP0_SMN_C2PMSG_41_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_42 0x006a
++#define mmMP0_SMN_C2PMSG_42_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_43 0x006b
++#define mmMP0_SMN_C2PMSG_43_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_44 0x006c
++#define mmMP0_SMN_C2PMSG_44_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_45 0x006d
++#define mmMP0_SMN_C2PMSG_45_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_46 0x006e
++#define mmMP0_SMN_C2PMSG_46_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_47 0x006f
++#define mmMP0_SMN_C2PMSG_47_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_48 0x0070
++#define mmMP0_SMN_C2PMSG_48_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_49 0x0071
++#define mmMP0_SMN_C2PMSG_49_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_50 0x0072
++#define mmMP0_SMN_C2PMSG_50_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_51 0x0073
++#define mmMP0_SMN_C2PMSG_51_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_52 0x0074
++#define mmMP0_SMN_C2PMSG_52_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_53 0x0075
++#define mmMP0_SMN_C2PMSG_53_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_54 0x0076
++#define mmMP0_SMN_C2PMSG_54_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_55 0x0077
++#define mmMP0_SMN_C2PMSG_55_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_56 0x0078
++#define mmMP0_SMN_C2PMSG_56_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_57 0x0079
++#define mmMP0_SMN_C2PMSG_57_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_58 0x007a
++#define mmMP0_SMN_C2PMSG_58_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_59 0x007b
++#define mmMP0_SMN_C2PMSG_59_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_60 0x007c
++#define mmMP0_SMN_C2PMSG_60_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_61 0x007d
++#define mmMP0_SMN_C2PMSG_61_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_62 0x007e
++#define mmMP0_SMN_C2PMSG_62_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_63 0x007f
++#define mmMP0_SMN_C2PMSG_63_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_64 0x0080
++#define mmMP0_SMN_C2PMSG_64_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_65 0x0081
++#define mmMP0_SMN_C2PMSG_65_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_66 0x0082
++#define mmMP0_SMN_C2PMSG_66_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_67 0x0083
++#define mmMP0_SMN_C2PMSG_67_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_68 0x0084
++#define mmMP0_SMN_C2PMSG_68_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_69 0x0085
++#define mmMP0_SMN_C2PMSG_69_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_70 0x0086
++#define mmMP0_SMN_C2PMSG_70_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_71 0x0087
++#define mmMP0_SMN_C2PMSG_71_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_72 0x0088
++#define mmMP0_SMN_C2PMSG_72_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_73 0x0089
++#define mmMP0_SMN_C2PMSG_73_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_74 0x008a
++#define mmMP0_SMN_C2PMSG_74_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_75 0x008b
++#define mmMP0_SMN_C2PMSG_75_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_76 0x008c
++#define mmMP0_SMN_C2PMSG_76_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_77 0x008d
++#define mmMP0_SMN_C2PMSG_77_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_78 0x008e
++#define mmMP0_SMN_C2PMSG_78_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_79 0x008f
++#define mmMP0_SMN_C2PMSG_79_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_80 0x0090
++#define mmMP0_SMN_C2PMSG_80_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_81 0x0091
++#define mmMP0_SMN_C2PMSG_81_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_82 0x0092
++#define mmMP0_SMN_C2PMSG_82_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_83 0x0093
++#define mmMP0_SMN_C2PMSG_83_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_84 0x0094
++#define mmMP0_SMN_C2PMSG_84_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_85 0x0095
++#define mmMP0_SMN_C2PMSG_85_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_86 0x0096
++#define mmMP0_SMN_C2PMSG_86_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_87 0x0097
++#define mmMP0_SMN_C2PMSG_87_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_88 0x0098
++#define mmMP0_SMN_C2PMSG_88_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_89 0x0099
++#define mmMP0_SMN_C2PMSG_89_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_90 0x009a
++#define mmMP0_SMN_C2PMSG_90_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_91 0x009b
++#define mmMP0_SMN_C2PMSG_91_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_92 0x009c
++#define mmMP0_SMN_C2PMSG_92_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_93 0x009d
++#define mmMP0_SMN_C2PMSG_93_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_94 0x009e
++#define mmMP0_SMN_C2PMSG_94_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_95 0x009f
++#define mmMP0_SMN_C2PMSG_95_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_96 0x00a0
++#define mmMP0_SMN_C2PMSG_96_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_97 0x00a1
++#define mmMP0_SMN_C2PMSG_97_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_98 0x00a2
++#define mmMP0_SMN_C2PMSG_98_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_99 0x00a3
++#define mmMP0_SMN_C2PMSG_99_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_100 0x00a4
++#define mmMP0_SMN_C2PMSG_100_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_101 0x00a5
++#define mmMP0_SMN_C2PMSG_101_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_102 0x00a6
++#define mmMP0_SMN_C2PMSG_102_BASE_IDX 0
++#define mmMP0_SMN_C2PMSG_103 0x00a7
++#define mmMP0_SMN_C2PMSG_103_BASE_IDX 0
++#define mmMP0_SMN_ACTIVE_FCN_ID 0x00c0
++#define mmMP0_SMN_ACTIVE_FCN_ID_BASE_IDX 0
++#define mmMP0_SMN_IH_CREDIT 0x00c1
++#define mmMP0_SMN_IH_CREDIT_BASE_IDX 0
++#define mmMP0_SMN_IH_SW_INT 0x00c2
++#define mmMP0_SMN_IH_SW_INT_BASE_IDX 0
++#define mmMP0_SMN_IH_SW_INT_CTRL 0x00c3
++#define mmMP0_SMN_IH_SW_INT_CTRL_BASE_IDX 0
++
++
++// addressBlock: mp_SmuMp1_SmnDec
++// base address: 0x0
++#define mmMP1_SMN_C2PMSG_32 0x0260
++#define mmMP1_SMN_C2PMSG_32_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_33 0x0261
++#define mmMP1_SMN_C2PMSG_33_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_34 0x0262
++#define mmMP1_SMN_C2PMSG_34_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_35 0x0263
++#define mmMP1_SMN_C2PMSG_35_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_36 0x0264
++#define mmMP1_SMN_C2PMSG_36_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_37 0x0265
++#define mmMP1_SMN_C2PMSG_37_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_38 0x0266
++#define mmMP1_SMN_C2PMSG_38_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_39 0x0267
++#define mmMP1_SMN_C2PMSG_39_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_40 0x0268
++#define mmMP1_SMN_C2PMSG_40_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_41 0x0269
++#define mmMP1_SMN_C2PMSG_41_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_42 0x026a
++#define mmMP1_SMN_C2PMSG_42_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_43 0x026b
++#define mmMP1_SMN_C2PMSG_43_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_44 0x026c
++#define mmMP1_SMN_C2PMSG_44_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_45 0x026d
++#define mmMP1_SMN_C2PMSG_45_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_46 0x026e
++#define mmMP1_SMN_C2PMSG_46_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_47 0x026f
++#define mmMP1_SMN_C2PMSG_47_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_48 0x0270
++#define mmMP1_SMN_C2PMSG_48_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_49 0x0271
++#define mmMP1_SMN_C2PMSG_49_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_50 0x0272
++#define mmMP1_SMN_C2PMSG_50_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_51 0x0273
++#define mmMP1_SMN_C2PMSG_51_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_52 0x0274
++#define mmMP1_SMN_C2PMSG_52_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_53 0x0275
++#define mmMP1_SMN_C2PMSG_53_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_54 0x0276
++#define mmMP1_SMN_C2PMSG_54_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_55 0x0277
++#define mmMP1_SMN_C2PMSG_55_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_56 0x0278
++#define mmMP1_SMN_C2PMSG_56_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_57 0x0279
++#define mmMP1_SMN_C2PMSG_57_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_58 0x027a
++#define mmMP1_SMN_C2PMSG_58_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_59 0x027b
++#define mmMP1_SMN_C2PMSG_59_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_60 0x027c
++#define mmMP1_SMN_C2PMSG_60_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_61 0x027d
++#define mmMP1_SMN_C2PMSG_61_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_62 0x027e
++#define mmMP1_SMN_C2PMSG_62_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_63 0x027f
++#define mmMP1_SMN_C2PMSG_63_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_64 0x0280
++#define mmMP1_SMN_C2PMSG_64_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_65 0x0281
++#define mmMP1_SMN_C2PMSG_65_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_66 0x0282
++#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_67 0x0283
++#define mmMP1_SMN_C2PMSG_67_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_68 0x0284
++#define mmMP1_SMN_C2PMSG_68_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_69 0x0285
++#define mmMP1_SMN_C2PMSG_69_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_70 0x0286
++#define mmMP1_SMN_C2PMSG_70_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_71 0x0287
++#define mmMP1_SMN_C2PMSG_71_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_72 0x0288
++#define mmMP1_SMN_C2PMSG_72_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_73 0x0289
++#define mmMP1_SMN_C2PMSG_73_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_74 0x028a
++#define mmMP1_SMN_C2PMSG_74_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_75 0x028b
++#define mmMP1_SMN_C2PMSG_75_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_76 0x028c
++#define mmMP1_SMN_C2PMSG_76_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_77 0x028d
++#define mmMP1_SMN_C2PMSG_77_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_78 0x028e
++#define mmMP1_SMN_C2PMSG_78_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_79 0x028f
++#define mmMP1_SMN_C2PMSG_79_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_80 0x0290
++#define mmMP1_SMN_C2PMSG_80_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_81 0x0291
++#define mmMP1_SMN_C2PMSG_81_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_82 0x0292
++#define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_83 0x0293
++#define mmMP1_SMN_C2PMSG_83_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_84 0x0294
++#define mmMP1_SMN_C2PMSG_84_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_85 0x0295
++#define mmMP1_SMN_C2PMSG_85_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_86 0x0296
++#define mmMP1_SMN_C2PMSG_86_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_87 0x0297
++#define mmMP1_SMN_C2PMSG_87_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_88 0x0298
++#define mmMP1_SMN_C2PMSG_88_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_89 0x0299
++#define mmMP1_SMN_C2PMSG_89_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_90 0x029a
++#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_91 0x029b
++#define mmMP1_SMN_C2PMSG_91_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_92 0x029c
++#define mmMP1_SMN_C2PMSG_92_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_93 0x029d
++#define mmMP1_SMN_C2PMSG_93_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_94 0x029e
++#define mmMP1_SMN_C2PMSG_94_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_95 0x029f
++#define mmMP1_SMN_C2PMSG_95_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_96 0x02a0
++#define mmMP1_SMN_C2PMSG_96_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_97 0x02a1
++#define mmMP1_SMN_C2PMSG_97_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_98 0x02a2
++#define mmMP1_SMN_C2PMSG_98_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_99 0x02a3
++#define mmMP1_SMN_C2PMSG_99_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_100 0x02a4
++#define mmMP1_SMN_C2PMSG_100_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_101 0x02a5
++#define mmMP1_SMN_C2PMSG_101_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_102 0x02a6
++#define mmMP1_SMN_C2PMSG_102_BASE_IDX 0
++#define mmMP1_SMN_C2PMSG_103 0x02a7
++#define mmMP1_SMN_C2PMSG_103_BASE_IDX 0
++#define mmMP1_SMN_ACTIVE_FCN_ID 0x02c0
++#define mmMP1_SMN_ACTIVE_FCN_ID_BASE_IDX 0
++#define mmMP1_SMN_IH_CREDIT 0x02c1
++#define mmMP1_SMN_IH_CREDIT_BASE_IDX 0
++#define mmMP1_SMN_IH_SW_INT 0x02c2
++#define mmMP1_SMN_IH_SW_INT_BASE_IDX 0
++#define mmMP1_SMN_IH_SW_INT_CTRL 0x02c3
++#define mmMP1_SMN_IH_SW_INT_CTRL_BASE_IDX 0
++#define mmMP1_SMN_FPS_CNT 0x02c4
++#define mmMP1_SMN_FPS_CNT_BASE_IDX 0
++#define mmMP1_SMN_PUB_CTRL 0x02c5
++#define mmMP1_SMN_PUB_CTRL_BASE_IDX 0
++#define mmMP1_SMN_EXT_SCRATCH0 0x03c0
++#define mmMP1_SMN_EXT_SCRATCH0_BASE_IDX 0
++#define mmMP1_SMN_EXT_SCRATCH1 0x03c1
++#define mmMP1_SMN_EXT_SCRATCH1_BASE_IDX 0
++#define mmMP1_SMN_EXT_SCRATCH2 0x03c2
++#define mmMP1_SMN_EXT_SCRATCH2_BASE_IDX 0
++#define mmMP1_SMN_EXT_SCRATCH3 0x03c3
++#define mmMP1_SMN_EXT_SCRATCH3_BASE_IDX 0
++#define mmMP1_SMN_EXT_SCRATCH4 0x03c4
++#define mmMP1_SMN_EXT_SCRATCH4_BASE_IDX 0
++#define mmMP1_SMN_EXT_SCRATCH5 0x03c5
++#define mmMP1_SMN_EXT_SCRATCH5_BASE_IDX 0
++#define mmMP1_SMN_EXT_SCRATCH6 0x03c6
++#define mmMP1_SMN_EXT_SCRATCH6_BASE_IDX 0
++#define mmMP1_SMN_EXT_SCRATCH7 0x03c7
++#define mmMP1_SMN_EXT_SCRATCH7_BASE_IDX 0
++
++
++#endif
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h
+new file mode 100644
+index 0000000..1ac8895
+--- /dev/null
++++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h
+@@ -0,0 +1,534 @@
++/*
++ * Copyright (C) 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
++ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef _mp_11_0_2_SH_MASK_HEADER
++#define _mp_11_0_2_SH_MASK_HEADER
++
++
++// addressBlock: mp_SmuMp0_SmnDec
++//MP0_SMN_C2PMSG_32
++#define MP0_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_33
++#define MP0_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_34
++#define MP0_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_35
++#define MP0_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_36
++#define MP0_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_37
++#define MP0_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_38
++#define MP0_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_39
++#define MP0_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_40
++#define MP0_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_41
++#define MP0_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_42
++#define MP0_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_43
++#define MP0_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_44
++#define MP0_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_45
++#define MP0_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_46
++#define MP0_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_47
++#define MP0_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_48
++#define MP0_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_49
++#define MP0_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_50
++#define MP0_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_51
++#define MP0_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_52
++#define MP0_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_53
++#define MP0_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_54
++#define MP0_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_55
++#define MP0_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_56
++#define MP0_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_57
++#define MP0_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_58
++#define MP0_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_59
++#define MP0_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_60
++#define MP0_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_61
++#define MP0_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_62
++#define MP0_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_63
++#define MP0_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_64
++#define MP0_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_65
++#define MP0_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_66
++#define MP0_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_67
++#define MP0_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_68
++#define MP0_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_69
++#define MP0_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_70
++#define MP0_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_71
++#define MP0_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_72
++#define MP0_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_73
++#define MP0_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_74
++#define MP0_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_75
++#define MP0_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_76
++#define MP0_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_77
++#define MP0_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_78
++#define MP0_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_79
++#define MP0_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_80
++#define MP0_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_81
++#define MP0_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_82
++#define MP0_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_83
++#define MP0_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_84
++#define MP0_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_85
++#define MP0_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_86
++#define MP0_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_87
++#define MP0_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_88
++#define MP0_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_89
++#define MP0_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_90
++#define MP0_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_91
++#define MP0_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_92
++#define MP0_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_93
++#define MP0_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_94
++#define MP0_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_95
++#define MP0_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_96
++#define MP0_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_97
++#define MP0_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_98
++#define MP0_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_99
++#define MP0_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_100
++#define MP0_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_101
++#define MP0_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_102
++#define MP0_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_C2PMSG_103
++#define MP0_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
++#define MP0_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
++//MP0_SMN_ACTIVE_FCN_ID
++#define MP0_SMN_ACTIVE_FCN_ID__VFID__SHIFT 0x0
++#define MP0_SMN_ACTIVE_FCN_ID__VF__SHIFT 0x1f
++#define MP0_SMN_ACTIVE_FCN_ID__VFID_MASK 0x0000001FL
++#define MP0_SMN_ACTIVE_FCN_ID__VF_MASK 0x80000000L
++//MP0_SMN_IH_CREDIT
++#define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
++#define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
++#define MP0_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
++#define MP0_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
++//MP0_SMN_IH_SW_INT
++#define MP0_SMN_IH_SW_INT__ID__SHIFT 0x0
++#define MP0_SMN_IH_SW_INT__VALID__SHIFT 0x8
++#define MP0_SMN_IH_SW_INT__ID_MASK 0x000000FFL
++#define MP0_SMN_IH_SW_INT__VALID_MASK 0x00000100L
++//MP0_SMN_IH_SW_INT_CTRL
++#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
++#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
++#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
++#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
++
++
++// addressBlock: mp_SmuMp1_SmnDec
++//MP1_SMN_C2PMSG_32
++#define MP1_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_33
++#define MP1_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_34
++#define MP1_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_35
++#define MP1_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_36
++#define MP1_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_37
++#define MP1_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_38
++#define MP1_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_39
++#define MP1_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_40
++#define MP1_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_41
++#define MP1_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_42
++#define MP1_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_43
++#define MP1_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_44
++#define MP1_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_45
++#define MP1_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_46
++#define MP1_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_47
++#define MP1_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_48
++#define MP1_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_49
++#define MP1_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_50
++#define MP1_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_51
++#define MP1_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_52
++#define MP1_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_53
++#define MP1_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_54
++#define MP1_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_55
++#define MP1_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_56
++#define MP1_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_57
++#define MP1_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_58
++#define MP1_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_59
++#define MP1_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_60
++#define MP1_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_61
++#define MP1_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_62
++#define MP1_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_63
++#define MP1_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_64
++#define MP1_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_65
++#define MP1_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_66
++#define MP1_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_67
++#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_68
++#define MP1_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_69
++#define MP1_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_70
++#define MP1_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_71
++#define MP1_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_72
++#define MP1_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_73
++#define MP1_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_74
++#define MP1_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_75
++#define MP1_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_76
++#define MP1_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_77
++#define MP1_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_78
++#define MP1_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_79
++#define MP1_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_80
++#define MP1_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_81
++#define MP1_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_82
++#define MP1_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_83
++#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_84
++#define MP1_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_85
++#define MP1_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_86
++#define MP1_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_87
++#define MP1_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_88
++#define MP1_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_89
++#define MP1_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_90
++#define MP1_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_91
++#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_92
++#define MP1_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_93
++#define MP1_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_94
++#define MP1_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_95
++#define MP1_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_96
++#define MP1_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_97
++#define MP1_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_98
++#define MP1_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_99
++#define MP1_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_100
++#define MP1_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_101
++#define MP1_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_102
++#define MP1_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_C2PMSG_103
++#define MP1_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
++#define MP1_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
++//MP1_SMN_ACTIVE_FCN_ID
++#define MP1_SMN_ACTIVE_FCN_ID__VFID__SHIFT 0x0
++#define MP1_SMN_ACTIVE_FCN_ID__VF__SHIFT 0x1f
++#define MP1_SMN_ACTIVE_FCN_ID__VFID_MASK 0x0000001FL
++#define MP1_SMN_ACTIVE_FCN_ID__VF_MASK 0x80000000L
++//MP1_SMN_IH_CREDIT
++#define MP1_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
++#define MP1_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
++#define MP1_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
++#define MP1_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
++//MP1_SMN_IH_SW_INT
++#define MP1_SMN_IH_SW_INT__ID__SHIFT 0x0
++#define MP1_SMN_IH_SW_INT__VALID__SHIFT 0x8
++#define MP1_SMN_IH_SW_INT__ID_MASK 0x000000FFL
++#define MP1_SMN_IH_SW_INT__VALID_MASK 0x00000100L
++//MP1_SMN_IH_SW_INT_CTRL
++#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
++#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
++#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
++#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
++//MP1_SMN_FPS_CNT
++#define MP1_SMN_FPS_CNT__COUNT__SHIFT 0x0
++#define MP1_SMN_FPS_CNT__COUNT_MASK 0xFFFFFFFFL
++//MP1_SMN_PUB_CTRL
++#define MP1_SMN_PUB_CTRL__RESET__SHIFT 0x0
++#define MP1_SMN_PUB_CTRL__RESET_MASK 0x00000001L
++//MP1_SMN_EXT_SCRATCH0
++#define MP1_SMN_EXT_SCRATCH0__DATA__SHIFT 0x0
++#define MP1_SMN_EXT_SCRATCH0__DATA_MASK 0xFFFFFFFFL
++//MP1_SMN_EXT_SCRATCH1
++#define MP1_SMN_EXT_SCRATCH1__DATA__SHIFT 0x0
++#define MP1_SMN_EXT_SCRATCH1__DATA_MASK 0xFFFFFFFFL
++//MP1_SMN_EXT_SCRATCH2
++#define MP1_SMN_EXT_SCRATCH2__DATA__SHIFT 0x0
++#define MP1_SMN_EXT_SCRATCH2__DATA_MASK 0xFFFFFFFFL
++//MP1_SMN_EXT_SCRATCH3
++#define MP1_SMN_EXT_SCRATCH3__DATA__SHIFT 0x0
++#define MP1_SMN_EXT_SCRATCH3__DATA_MASK 0xFFFFFFFFL
++//MP1_SMN_EXT_SCRATCH4
++#define MP1_SMN_EXT_SCRATCH4__DATA__SHIFT 0x0
++#define MP1_SMN_EXT_SCRATCH4__DATA_MASK 0xFFFFFFFFL
++//MP1_SMN_EXT_SCRATCH5
++#define MP1_SMN_EXT_SCRATCH5__DATA__SHIFT 0x0
++#define MP1_SMN_EXT_SCRATCH5__DATA_MASK 0xFFFFFFFFL
++//MP1_SMN_EXT_SCRATCH6
++#define MP1_SMN_EXT_SCRATCH6__DATA__SHIFT 0x0
++#define MP1_SMN_EXT_SCRATCH6__DATA_MASK 0xFFFFFFFFL
++//MP1_SMN_EXT_SCRATCH7
++#define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT 0x0
++#define MP1_SMN_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL
++
++
++#endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5119-Revert-drm-amdgpu-Add-nbio-support-for-vega20-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5119-Revert-drm-amdgpu-Add-nbio-support-for-vega20-v2.patch
new file mode 100644
index 00000000..de199212
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5119-Revert-drm-amdgpu-Add-nbio-support-for-vega20-v2.patch
@@ -0,0 +1,82 @@
+From 993c12106676ce2561324143319f4b55e4ffbcb7 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 3 Apr 2018 15:49:56 -0500
+Subject: [PATCH 5119/5725] Revert "drm/amdgpu: Add nbio support for vega20
+ (v2)"
+
+Revert this to add proper nbio 7.4 support.
+
+This reverts commit f5b2e1fa321eff20a9418ebd497d8a466f024a85.
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c | 18 +-----------------
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 2 --
+ 2 files changed, 1 insertion(+), 19 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+index 365517c..df34dc7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+@@ -34,19 +34,10 @@
+ #define smnCPM_CONTROL 0x11180460
+ #define smnPCIE_CNTL2 0x11180070
+
+-/* vega20 */
+-#define mmRCC_DEV0_EPF0_STRAP0_VG20 0x0011
+-#define mmRCC_DEV0_EPF0_STRAP0_VG20_BASE_IDX 2
+-
+ static u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev)
+ {
+ u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
+
+- if (adev->asic_type == CHIP_VEGA20)
+- tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0_VG20);
+- else
+- tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
+-
+ tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
+ tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
+
+@@ -84,14 +75,10 @@ static void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instan
+ SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
+
+ u32 doorbell_range = RREG32(reg);
+- u32 range = 2;
+-
+- if (adev->asic_type == CHIP_VEGA20)
+- range = 8;
+
+ if (use_doorbell) {
+ doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
+- doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, range);
++ doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 2);
+ } else
+ doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
+
+@@ -146,9 +133,6 @@ static void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
+ {
+ uint32_t def, data;
+
+- if (adev->asic_type == CHIP_VEGA20)
+- return;
+-
+ /* NBIF_MGCG_CTRL_LCLK */
+ def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 83f2717..6bd8036 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -497,8 +497,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+
+ if (adev->flags & AMD_IS_APU)
+ adev->nbio_funcs = &nbio_v7_0_funcs;
+- else if (adev->asic_type == CHIP_VEGA20)
+- adev->nbio_funcs = &nbio_v7_0_funcs;
+ else
+ adev->nbio_funcs = &nbio_v6_1_funcs;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5120-drm-amdgpu-Add-nbio-7.4-support-for-vega20-v3.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5120-drm-amdgpu-Add-nbio-7.4-support-for-vega20-v3.patch
new file mode 100644
index 00000000..e6cf094c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5120-drm-amdgpu-Add-nbio-7.4-support-for-vega20-v3.patch
@@ -0,0 +1,346 @@
+From a518593c0a6f23944146c33c50b7816ad0635491 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 23 Mar 2018 14:44:28 -0500
+Subject: [PATCH 5120/5725] drm/amdgpu: Add nbio 7.4 support for vega20 (v3)
+
+Some register offset in nbio v7.4 are different with v7.0.
+We need a seperate nbio_v7_4.c for vega20.
+
+v2: fix doorbell range for sdma (Alex)
+v3: squash in static fix (kbuild test robot)
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/Makefile | 2 +-
+ drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c | 237 +++++++++++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h | 31 +++++
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 2 +
+ drivers/gpu/drm/amd/amdgpu/soc15.h | 1 +
+ 5 files changed, 272 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+ create mode 100644 drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
+index 68a4a06..6f0a496 100644
+--- a/drivers/gpu/drm/amd/amdgpu/Makefile
++++ b/drivers/gpu/drm/amd/amdgpu/Makefile
+@@ -42,7 +42,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce
+
+ amdgpu-y += \
+ vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
+- vega20_reg_init.o
++ vega20_reg_init.o nbio_v7_4.o
+
+ # add DF block
+ amdgpu-y += \
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+new file mode 100644
+index 0000000..89ea920
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+@@ -0,0 +1,237 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++#include "amdgpu.h"
++#include "amdgpu_atombios.h"
++#include "nbio_v7_4.h"
++
++#include "nbio/nbio_7_4_offset.h"
++#include "nbio/nbio_7_4_sh_mask.h"
++
++#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
++
++#define smnCPM_CONTROL 0x11180460
++#define smnPCIE_CNTL2 0x11180070
++
++static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev)
++{
++ u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
++
++ tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
++ tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
++
++ return tmp;
++}
++
++static void nbio_v7_4_mc_access_enable(struct amdgpu_device *adev, bool enable)
++{
++ if (enable)
++ WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
++ BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
++ else
++ WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
++}
++
++static void nbio_v7_4_hdp_flush(struct amdgpu_device *adev,
++ struct amdgpu_ring *ring)
++{
++ if (!ring || !ring->funcs->emit_wreg)
++ WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
++ else
++ amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
++ NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL), 0);
++}
++
++static u32 nbio_v7_4_get_memsize(struct amdgpu_device *adev)
++{
++ return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
++}
++
++static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
++ bool use_doorbell, int doorbell_index)
++{
++ u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
++ SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
++
++ u32 doorbell_range = RREG32(reg);
++
++ if (use_doorbell) {
++ doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
++ doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 8);
++ } else
++ doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
++
++ WREG32(reg, doorbell_range);
++}
++
++static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev,
++ bool enable)
++{
++ WREG32_FIELD15(NBIO, 0, RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
++}
++
++static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
++ bool enable)
++{
++
++}
++
++static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
++ bool use_doorbell, int doorbell_index)
++{
++ u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE);
++
++ if (use_doorbell) {
++ ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
++ ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 2);
++ } else
++ ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
++
++ WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
++}
++
++
++static void nbio_v7_4_update_medium_grain_clock_gating(struct amdgpu_device *adev,
++ bool enable)
++{
++ //TODO: Add support for v7.4
++}
++
++static void nbio_v7_4_update_medium_grain_light_sleep(struct amdgpu_device *adev,
++ bool enable)
++{
++ uint32_t def, data;
++
++ def = data = RREG32_PCIE(smnPCIE_CNTL2);
++ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
++ data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
++ PCIE_CNTL2__MST_MEM_LS_EN_MASK |
++ PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
++ } else {
++ data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
++ PCIE_CNTL2__MST_MEM_LS_EN_MASK |
++ PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
++ }
++
++ if (def != data)
++ WREG32_PCIE(smnPCIE_CNTL2, data);
++}
++
++static void nbio_v7_4_get_clockgating_state(struct amdgpu_device *adev,
++ u32 *flags)
++{
++ int data;
++
++ /* AMD_CG_SUPPORT_BIF_MGCG */
++ data = RREG32_PCIE(smnCPM_CONTROL);
++ if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
++ *flags |= AMD_CG_SUPPORT_BIF_MGCG;
++
++ /* AMD_CG_SUPPORT_BIF_LS */
++ data = RREG32_PCIE(smnPCIE_CNTL2);
++ if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
++ *flags |= AMD_CG_SUPPORT_BIF_LS;
++}
++
++static void nbio_v7_4_ih_control(struct amdgpu_device *adev)
++{
++ u32 interrupt_cntl;
++
++ /* setup interrupt control */
++ WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
++ interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
++ /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
++ * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
++ */
++ interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
++ /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
++ interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
++ WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
++}
++
++static u32 nbio_v7_4_get_hdp_flush_req_offset(struct amdgpu_device *adev)
++{
++ return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ);
++}
++
++static u32 nbio_v7_4_get_hdp_flush_done_offset(struct amdgpu_device *adev)
++{
++ return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE);
++}
++
++static u32 nbio_v7_4_get_pcie_index_offset(struct amdgpu_device *adev)
++{
++ return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
++}
++
++static u32 nbio_v7_4_get_pcie_data_offset(struct amdgpu_device *adev)
++{
++ return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
++}
++
++static const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
++ .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
++ .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
++ .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
++ .ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK,
++ .ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK,
++ .ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK,
++ .ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK,
++ .ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK,
++ .ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK,
++ .ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK,
++ .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK,
++ .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
++};
++
++static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev)
++{
++ if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
++ adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
++}
++
++static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
++{
++
++}
++
++const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
++ .hdp_flush_reg = &nbio_v7_4_hdp_flush_reg,
++ .get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
++ .get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
++ .get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset,
++ .get_pcie_data_offset = nbio_v7_4_get_pcie_data_offset,
++ .get_rev_id = nbio_v7_4_get_rev_id,
++ .mc_access_enable = nbio_v7_4_mc_access_enable,
++ .hdp_flush = nbio_v7_4_hdp_flush,
++ .get_memsize = nbio_v7_4_get_memsize,
++ .sdma_doorbell_range = nbio_v7_4_sdma_doorbell_range,
++ .enable_doorbell_aperture = nbio_v7_4_enable_doorbell_aperture,
++ .enable_doorbell_selfring_aperture = nbio_v7_4_enable_doorbell_selfring_aperture,
++ .ih_doorbell_range = nbio_v7_4_ih_doorbell_range,
++ .update_medium_grain_clock_gating = nbio_v7_4_update_medium_grain_clock_gating,
++ .update_medium_grain_light_sleep = nbio_v7_4_update_medium_grain_light_sleep,
++ .get_clockgating_state = nbio_v7_4_get_clockgating_state,
++ .ih_control = nbio_v7_4_ih_control,
++ .init_registers = nbio_v7_4_init_registers,
++ .detect_hw_virt = nbio_v7_4_detect_hw_virt,
++};
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
+new file mode 100644
+index 0000000..c442865
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
+@@ -0,0 +1,31 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __NBIO_V7_4_H__
++#define __NBIO_V7_4_H__
++
++#include "soc15_common.h"
++
++extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs;
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 6bd8036..73c85a0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -497,6 +497,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+
+ if (adev->flags & AMD_IS_APU)
+ adev->nbio_funcs = &nbio_v7_0_funcs;
++ else if (adev->asic_type == CHIP_VEGA20)
++ adev->nbio_funcs = &nbio_v7_4_funcs;
+ else
+ adev->nbio_funcs = &nbio_v6_1_funcs;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
+index 1f714b7..f8ad780 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
+@@ -26,6 +26,7 @@
+
+ #include "nbio_v6_1.h"
+ #include "nbio_v7_0.h"
++#include "nbio_v7_4.h"
+
+ #define SOC15_FLUSH_GPU_TLB_NUM_WREG 4
+ #define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 1
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5121-drm-amdgpu-update-atomfirmware.h.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5121-drm-amdgpu-update-atomfirmware.h.patch
new file mode 100644
index 00000000..6ac79857
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5121-drm-amdgpu-update-atomfirmware.h.patch
@@ -0,0 +1,114 @@
+From cd8149ccf7070e4690f3d3b8b12928b9990e5072 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Wed, 2 May 2018 15:50:10 +0800
+Subject: [PATCH 5121/5725] drm/amdgpu: update atomfirmware.h
+
+Add struct atom_smc_dpm_info_v4_3
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/include/atomfirmware.h | 86 ++++++++++++++++++++++++++++++
+ 1 file changed, 86 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
+index 4bc118d..6109a45 100644
+--- a/drivers/gpu/drm/amd/include/atomfirmware.h
++++ b/drivers/gpu/drm/amd/include/atomfirmware.h
+@@ -1446,6 +1446,92 @@ struct atom_smc_dpm_info_v4_1
+ uint32_t boardreserved[9];
+ };
+
++/*
++ ***************************************************************************
++ Data Table smc_dpm_info structure
++ ***************************************************************************
++ */
++struct atom_smc_dpm_info_v4_3
++{
++ struct atom_common_table_header table_header;
++ uint8_t liquid1_i2c_address;
++ uint8_t liquid2_i2c_address;
++ uint8_t vr_i2c_address;
++ uint8_t plx_i2c_address;
++
++ uint8_t liquid_i2c_linescl;
++ uint8_t liquid_i2c_linesda;
++ uint8_t vr_i2c_linescl;
++ uint8_t vr_i2c_linesda;
++
++ uint8_t plx_i2c_linescl;
++ uint8_t plx_i2c_linesda;
++ uint8_t vrsensorpresent;
++ uint8_t liquidsensorpresent;
++
++ uint16_t maxvoltagestepgfx;
++ uint16_t maxvoltagestepsoc;
++
++ uint8_t vddgfxvrmapping;
++ uint8_t vddsocvrmapping;
++ uint8_t vddmem0vrmapping;
++ uint8_t vddmem1vrmapping;
++
++ uint8_t gfxulvphasesheddingmask;
++ uint8_t soculvphasesheddingmask;
++ uint8_t externalsensorpresent;
++ uint8_t padding8_v;
++
++ uint16_t gfxmaxcurrent;
++ uint8_t gfxoffset;
++ uint8_t padding_telemetrygfx;
++
++ uint16_t socmaxcurrent;
++ uint8_t socoffset;
++ uint8_t padding_telemetrysoc;
++
++ uint16_t mem0maxcurrent;
++ uint8_t mem0offset;
++ uint8_t padding_telemetrymem0;
++
++ uint16_t mem1maxcurrent;
++ uint8_t mem1offset;
++ uint8_t padding_telemetrymem1;
++
++ uint8_t acdcgpio;
++ uint8_t acdcpolarity;
++ uint8_t vr0hotgpio;
++ uint8_t vr0hotpolarity;
++
++ uint8_t vr1hotgpio;
++ uint8_t vr1hotpolarity;
++ uint8_t padding1;
++ uint8_t padding2;
++
++ uint8_t ledpin0;
++ uint8_t ledpin1;
++ uint8_t ledpin2;
++ uint8_t padding8_4;
++
++ uint8_t pllgfxclkspreadenabled;
++ uint8_t pllgfxclkspreadpercent;
++ uint16_t pllgfxclkspreadfreq;
++
++ uint8_t uclkspreadenabled;
++ uint8_t uclkspreadpercent;
++ uint16_t uclkspreadfreq;
++
++ uint8_t fclkspreadenabled;
++ uint8_t fclkspreadpercent;
++ uint16_t fclkspreadfreq;
++
++ uint8_t fllgfxclkspreadenabled;
++ uint8_t fllgfxclkspreadpercent;
++ uint16_t fllgfxclkspreadfreq;
++
++ uint32_t boardreserved[10];
++};
++
+ /*
+ ***************************************************************************
+ Data Table asic_profiling_info structure
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5122-drm-amd-powerplay-add-vega20_inc.h-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5122-drm-amd-powerplay-add-vega20_inc.h-v2.patch
new file mode 100644
index 00000000..8cd7d89f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5122-drm-amd-powerplay-add-vega20_inc.h-v2.patch
@@ -0,0 +1,59 @@
+From 3876bf5b2025c43884c4189f8b47ebced9d542cf Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Wed, 21 Mar 2018 14:10:21 +0800
+Subject: [PATCH 5122/5725] drm/amd/powerplay: add vega20_inc.h (v2)
+
+v2: use thm 11.0.2 headers
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_inc.h | 35 ++++++++++++++++++++++++
+ 1 file changed, 35 insertions(+)
+ create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_inc.h
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_inc.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_inc.h
+new file mode 100644
+index 0000000..6738bad
+--- /dev/null
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_inc.h
+@@ -0,0 +1,35 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef VEGA20_INC_H
++#define VEGA20_INC_H
++
++#include "asic_reg/thm/thm_11_0_2_offset.h"
++#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
++
++#include "asic_reg/mp/mp_9_0_offset.h"
++#include "asic_reg/mp/mp_9_0_sh_mask.h"
++
++#include "asic_reg/nbio/nbio_7_4_offset.h"
++
++#endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5123-drm-amd-powerplay-add-smu11_driver_if.h-v4.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5123-drm-amd-powerplay-add-smu11_driver_if.h-v4.patch
new file mode 100644
index 00000000..5a6eb137
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5123-drm-amd-powerplay-add-smu11_driver_if.h-v4.patch
@@ -0,0 +1,857 @@
+From 4b5a671926e4580d7a86c9621581fa955c789a95 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Wed, 21 Mar 2018 16:16:41 +0800
+Subject: [PATCH 5123/5725] drm/amd/powerplay: add smu11_driver_if.h (v4)
+
+v2: cleanup
+v3: fit the latest 40.6 smc fw
+v4: update to latest.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../gpu/drm/amd/powerplay/inc/smu11_driver_if.h | 831 +++++++++++++++++++++
+ 1 file changed, 831 insertions(+)
+ create mode 100644 drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+new file mode 100644
+index 0000000..0a39a4c
+--- /dev/null
++++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+@@ -0,0 +1,831 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef SMU11_DRIVER_IF_H
++#define SMU11_DRIVER_IF_H
++
++// *** IMPORTANT ***
++// SMU TEAM: Always increment the interface version if
++// any structure is changed in this file
++#define SMU11_DRIVER_IF_VERSION 0x11
++
++#define PPTABLE_V20_SMU_VERSION 2
++
++#define NUM_GFXCLK_DPM_LEVELS 16
++#define NUM_VCLK_DPM_LEVELS 8
++#define NUM_DCLK_DPM_LEVELS 8
++#define NUM_ECLK_DPM_LEVELS 8
++#define NUM_MP0CLK_DPM_LEVELS 2
++#define NUM_SOCCLK_DPM_LEVELS 8
++#define NUM_UCLK_DPM_LEVELS 4
++#define NUM_FCLK_DPM_LEVELS 8
++#define NUM_DCEFCLK_DPM_LEVELS 8
++#define NUM_DISPCLK_DPM_LEVELS 8
++#define NUM_PIXCLK_DPM_LEVELS 8
++#define NUM_PHYCLK_DPM_LEVELS 8
++#define NUM_LINK_LEVELS 2
++#define NUM_XGMI_LEVELS 2
++
++#define MAX_GFXCLK_DPM_LEVEL (NUM_GFXCLK_DPM_LEVELS - 1)
++#define MAX_VCLK_DPM_LEVEL (NUM_VCLK_DPM_LEVELS - 1)
++#define MAX_DCLK_DPM_LEVEL (NUM_DCLK_DPM_LEVELS - 1)
++#define MAX_ECLK_DPM_LEVEL (NUM_ECLK_DPM_LEVELS - 1)
++#define MAX_MP0CLK_DPM_LEVEL (NUM_MP0CLK_DPM_LEVELS - 1)
++#define MAX_SOCCLK_DPM_LEVEL (NUM_SOCCLK_DPM_LEVELS - 1)
++#define MAX_UCLK_DPM_LEVEL (NUM_UCLK_DPM_LEVELS - 1)
++#define MAX_FCLK_DPM_LEVEL (NUM_FCLK_DPM_LEVELS - 1)
++#define MAX_DCEFCLK_DPM_LEVEL (NUM_DCEFCLK_DPM_LEVELS - 1)
++#define MAX_DISPCLK_DPM_LEVEL (NUM_DISPCLK_DPM_LEVELS - 1)
++#define MAX_PIXCLK_DPM_LEVEL (NUM_PIXCLK_DPM_LEVELS - 1)
++#define MAX_PHYCLK_DPM_LEVEL (NUM_PHYCLK_DPM_LEVELS - 1)
++#define MAX_LINK_LEVEL (NUM_LINK_LEVELS - 1)
++#define MAX_XGMI_LEVEL (NUM_XGMI_LEVELS - 1)
++
++#define PPSMC_GeminiModeNone 0
++#define PPSMC_GeminiModeMaster 1
++#define PPSMC_GeminiModeSlave 2
++
++
++#define FEATURE_DPM_PREFETCHER_BIT 0
++#define FEATURE_DPM_GFXCLK_BIT 1
++#define FEATURE_DPM_UCLK_BIT 2
++#define FEATURE_DPM_SOCCLK_BIT 3
++#define FEATURE_DPM_UVD_BIT 4
++#define FEATURE_DPM_VCE_BIT 5
++#define FEATURE_ULV_BIT 6
++#define FEATURE_DPM_MP0CLK_BIT 7
++#define FEATURE_DPM_LINK_BIT 8
++#define FEATURE_DPM_DCEFCLK_BIT 9
++#define FEATURE_DS_GFXCLK_BIT 10
++#define FEATURE_DS_SOCCLK_BIT 11
++#define FEATURE_DS_LCLK_BIT 12
++#define FEATURE_PPT_BIT 13
++#define FEATURE_TDC_BIT 14
++#define FEATURE_THERMAL_BIT 15
++#define FEATURE_GFX_PER_CU_CG_BIT 16
++#define FEATURE_RM_BIT 17
++#define FEATURE_DS_DCEFCLK_BIT 18
++#define FEATURE_ACDC_BIT 19
++#define FEATURE_VR0HOT_BIT 20
++#define FEATURE_VR1HOT_BIT 21
++#define FEATURE_FW_CTF_BIT 22
++#define FEATURE_LED_DISPLAY_BIT 23
++#define FEATURE_FAN_CONTROL_BIT 24
++#define FEATURE_GFX_EDC_BIT 25
++#define FEATURE_GFXOFF_BIT 26
++#define FEATURE_CG_BIT 27
++#define FEATURE_DPM_FCLK_BIT 28
++#define FEATURE_DS_FCLK_BIT 29
++#define FEATURE_DS_MP1CLK_BIT 30
++#define FEATURE_DS_MP0CLK_BIT 31
++#define FEATURE_XGMI_BIT 32
++#define FEATURE_SPARE_33_BIT 33
++#define FEATURE_SPARE_34_BIT 34
++#define FEATURE_SPARE_35_BIT 35
++#define FEATURE_SPARE_36_BIT 36
++#define FEATURE_SPARE_37_BIT 37
++#define FEATURE_SPARE_38_BIT 38
++#define FEATURE_SPARE_39_BIT 39
++#define FEATURE_SPARE_40_BIT 40
++#define FEATURE_SPARE_41_BIT 41
++#define FEATURE_SPARE_42_BIT 42
++#define FEATURE_SPARE_43_BIT 43
++#define FEATURE_SPARE_44_BIT 44
++#define FEATURE_SPARE_45_BIT 45
++#define FEATURE_SPARE_46_BIT 46
++#define FEATURE_SPARE_47_BIT 47
++#define FEATURE_SPARE_48_BIT 48
++#define FEATURE_SPARE_49_BIT 49
++#define FEATURE_SPARE_50_BIT 50
++#define FEATURE_SPARE_51_BIT 51
++#define FEATURE_SPARE_52_BIT 52
++#define FEATURE_SPARE_53_BIT 53
++#define FEATURE_SPARE_54_BIT 54
++#define FEATURE_SPARE_55_BIT 55
++#define FEATURE_SPARE_56_BIT 56
++#define FEATURE_SPARE_57_BIT 57
++#define FEATURE_SPARE_58_BIT 58
++#define FEATURE_SPARE_59_BIT 59
++#define FEATURE_SPARE_60_BIT 60
++#define FEATURE_SPARE_61_BIT 61
++#define FEATURE_SPARE_62_BIT 62
++#define FEATURE_SPARE_63_BIT 63
++
++#define NUM_FEATURES 64
++
++#define FEATURE_DPM_PREFETCHER_MASK (1 << FEATURE_DPM_PREFETCHER_BIT )
++#define FEATURE_DPM_GFXCLK_MASK (1 << FEATURE_DPM_GFXCLK_BIT )
++#define FEATURE_DPM_UCLK_MASK (1 << FEATURE_DPM_UCLK_BIT )
++#define FEATURE_DPM_SOCCLK_MASK (1 << FEATURE_DPM_SOCCLK_BIT )
++#define FEATURE_DPM_UVD_MASK (1 << FEATURE_DPM_UVD_BIT )
++#define FEATURE_DPM_VCE_MASK (1 << FEATURE_DPM_VCE_BIT )
++#define FEATURE_ULV_MASK (1 << FEATURE_ULV_BIT )
++#define FEATURE_DPM_MP0CLK_MASK (1 << FEATURE_DPM_MP0CLK_BIT )
++#define FEATURE_DPM_LINK_MASK (1 << FEATURE_DPM_LINK_BIT )
++#define FEATURE_DPM_DCEFCLK_MASK (1 << FEATURE_DPM_DCEFCLK_BIT )
++#define FEATURE_DS_GFXCLK_MASK (1 << FEATURE_DS_GFXCLK_BIT )
++#define FEATURE_DS_SOCCLK_MASK (1 << FEATURE_DS_SOCCLK_BIT )
++#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT )
++#define FEATURE_PPT_MASK (1 << FEATURE_PPT_BIT )
++#define FEATURE_TDC_MASK (1 << FEATURE_TDC_BIT )
++#define FEATURE_THERMAL_MASK (1 << FEATURE_THERMAL_BIT )
++#define FEATURE_GFX_PER_CU_CG_MASK (1 << FEATURE_GFX_PER_CU_CG_BIT )
++#define FEATURE_RM_MASK (1 << FEATURE_RM_BIT )
++#define FEATURE_DS_DCEFCLK_MASK (1 << FEATURE_DS_DCEFCLK_BIT )
++#define FEATURE_ACDC_MASK (1 << FEATURE_ACDC_BIT )
++#define FEATURE_VR0HOT_MASK (1 << FEATURE_VR0HOT_BIT )
++#define FEATURE_VR1HOT_MASK (1 << FEATURE_VR1HOT_BIT )
++#define FEATURE_FW_CTF_MASK (1 << FEATURE_FW_CTF_BIT )
++#define FEATURE_LED_DISPLAY_MASK (1 << FEATURE_LED_DISPLAY_BIT )
++#define FEATURE_FAN_CONTROL_MASK (1 << FEATURE_FAN_CONTROL_BIT )
++#define FEATURE_GFX_EDC_MASK (1 << FEATURE_GFX_EDC_BIT )
++#define FEATURE_GFXOFF_MASK (1 << FEATURE_GFXOFF_BIT )
++#define FEATURE_CG_MASK (1 << FEATURE_CG_BIT )
++#define FEATURE_DPM_FCLK_MASK (1 << FEATURE_DPM_FCLK_BIT )
++#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT )
++#define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT )
++#define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT )
++
++
++#define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001
++#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002
++#define DPM_OVERRIDE_ENABLE_VOLT_LINK_UVD_SOCCLK 0x00000004
++#define DPM_OVERRIDE_ENABLE_VOLT_LINK_UVD_UCLK 0x00000008
++#define DPM_OVERRIDE_ENABLE_FREQ_LINK_VCLK_SOCCLK 0x00000010
++#define DPM_OVERRIDE_ENABLE_FREQ_LINK_VCLK_UCLK 0x00000020
++#define DPM_OVERRIDE_ENABLE_FREQ_LINK_DCLK_SOCCLK 0x00000040
++#define DPM_OVERRIDE_ENABLE_FREQ_LINK_DCLK_UCLK 0x00000080
++#define DPM_OVERRIDE_ENABLE_VOLT_LINK_VCE_SOCCLK 0x00000100
++#define DPM_OVERRIDE_ENABLE_VOLT_LINK_VCE_UCLK 0x00000200
++#define DPM_OVERRIDE_ENABLE_FREQ_LINK_ECLK_SOCCLK 0x00000400
++#define DPM_OVERRIDE_ENABLE_FREQ_LINK_ECLK_UCLK 0x00000800
++#define DPM_OVERRIDE_ENABLE_FREQ_LINK_GFXCLK_SOCCLK 0x00001000
++#define DPM_OVERRIDE_ENABLE_FREQ_LINK_GFXCLK_UCLK 0x00002000
++#define DPM_OVERRIDE_ENABLE_GFXOFF_GFXCLK_SWITCH 0x00004000
++#define DPM_OVERRIDE_ENABLE_GFXOFF_SOCCLK_SWITCH 0x00008000
++#define DPM_OVERRIDE_ENABLE_GFXOFF_UCLK_SWITCH 0x00010000
++#define DPM_OVERRIDE_ENABLE_GFXOFF_FCLK_SWITCH 0x00020000
++
++#define VR_MAPPING_VR_SELECT_MASK 0x01
++#define VR_MAPPING_VR_SELECT_SHIFT 0x00
++
++#define VR_MAPPING_PLANE_SELECT_MASK 0x02
++#define VR_MAPPING_PLANE_SELECT_SHIFT 0x01
++
++
++#define PSI_SEL_VR0_PLANE0_PSI0 0x01
++#define PSI_SEL_VR0_PLANE0_PSI1 0x02
++#define PSI_SEL_VR0_PLANE1_PSI0 0x04
++#define PSI_SEL_VR0_PLANE1_PSI1 0x08
++#define PSI_SEL_VR1_PLANE0_PSI0 0x10
++#define PSI_SEL_VR1_PLANE0_PSI1 0x20
++#define PSI_SEL_VR1_PLANE1_PSI0 0x40
++#define PSI_SEL_VR1_PLANE1_PSI1 0x80
++
++
++#define THROTTLER_STATUS_PADDING_BIT 0
++#define THROTTLER_STATUS_TEMP_EDGE_BIT 1
++#define THROTTLER_STATUS_TEMP_HOTSPOT_BIT 2
++#define THROTTLER_STATUS_TEMP_HBM_BIT 3
++#define THROTTLER_STATUS_TEMP_VR_GFX_BIT 4
++#define THROTTLER_STATUS_TEMP_VR_MEM_BIT 5
++#define THROTTLER_STATUS_TEMP_LIQUID_BIT 6
++#define THROTTLER_STATUS_TEMP_PLX_BIT 7
++#define THROTTLER_STATUS_TEMP_SKIN_BIT 8
++#define THROTTLER_STATUS_TDC_GFX_BIT 9
++#define THROTTLER_STATUS_TDC_SOC_BIT 10
++#define THROTTLER_STATUS_PPT_BIT 11
++#define THROTTLER_STATUS_FIT_BIT 12
++#define THROTTLER_STATUS_PPM_BIT 13
++
++
++#define TABLE_TRANSFER_OK 0x0
++#define TABLE_TRANSFER_FAILED 0xFF
++
++
++#define WORKLOAD_DEFAULT_BIT 0
++#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1
++#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2
++#define WORKLOAD_PPLIB_VIDEO_BIT 3
++#define WORKLOAD_PPLIB_VR_BIT 4
++#define WORKLOAD_PPLIB_COMPUTE_BIT 5
++#define WORKLOAD_PPLIB_CUSTOM_BIT 6
++#define WORKLOAD_PPLIB_COUNT 7
++
++
++#define XGMI_STATE_D0 1
++#define XGMI_STATE_D3 0
++
++typedef struct {
++ uint32_t a;
++ uint32_t b;
++ uint32_t c;
++} QuadraticInt_t;
++
++typedef struct {
++ uint32_t m;
++ uint32_t b;
++} LinearInt_t;
++
++typedef struct {
++ uint32_t a;
++ uint32_t b;
++ uint32_t c;
++} DroopInt_t;
++
++typedef enum {
++ PPCLK_GFXCLK,
++ PPCLK_VCLK,
++ PPCLK_DCLK,
++ PPCLK_ECLK,
++ PPCLK_SOCCLK,
++ PPCLK_UCLK,
++ PPCLK_DCEFCLK,
++ PPCLK_DISPCLK,
++ PPCLK_PIXCLK,
++ PPCLK_PHYCLK,
++ PPCLK_FCLK,
++ PPCLK_COUNT,
++} PPCLK_e;
++
++typedef enum {
++ VOLTAGE_MODE_AVFS = 0,
++ VOLTAGE_MODE_AVFS_SS,
++ VOLTAGE_MODE_SS,
++ VOLTAGE_MODE_COUNT,
++} VOLTAGE_MODE_e;
++
++
++typedef enum {
++ AVFS_VOLTAGE_GFX = 0,
++ AVFS_VOLTAGE_SOC,
++ AVFS_VOLTAGE_COUNT,
++} AVFS_VOLTAGE_TYPE_e;
++
++
++typedef struct {
++ uint8_t VoltageMode;
++ uint8_t SnapToDiscrete;
++ uint8_t NumDiscreteLevels;
++ uint8_t padding;
++ LinearInt_t ConversionToAvfsClk;
++ QuadraticInt_t SsCurve;
++} DpmDescriptor_t;
++
++typedef struct {
++ uint32_t Version;
++
++
++ uint32_t FeaturesToRun[2];
++
++
++ uint16_t SocketPowerLimitAc0;
++ uint16_t SocketPowerLimitAc0Tau;
++ uint16_t SocketPowerLimitAc1;
++ uint16_t SocketPowerLimitAc1Tau;
++ uint16_t SocketPowerLimitAc2;
++ uint16_t SocketPowerLimitAc2Tau;
++ uint16_t SocketPowerLimitAc3;
++ uint16_t SocketPowerLimitAc3Tau;
++ uint16_t SocketPowerLimitDc;
++ uint16_t SocketPowerLimitDcTau;
++ uint16_t TdcLimitSoc;
++ uint16_t TdcLimitSocTau;
++ uint16_t TdcLimitGfx;
++ uint16_t TdcLimitGfxTau;
++
++ uint16_t TedgeLimit;
++ uint16_t ThotspotLimit;
++ uint16_t ThbmLimit;
++ uint16_t Tvr_gfxLimit;
++ uint16_t Tvr_memLimit;
++ uint16_t Tliquid1Limit;
++ uint16_t Tliquid2Limit;
++ uint16_t TplxLimit;
++ uint32_t FitLimit;
++
++ uint16_t PpmPowerLimit;
++ uint16_t PpmTemperatureThreshold;
++
++ uint8_t MemoryOnPackage;
++ uint8_t padding8_limits[3];
++
++
++ uint16_t UlvVoltageOffsetSoc;
++ uint16_t UlvVoltageOffsetGfx;
++
++ uint8_t UlvSmnclkDid;
++ uint8_t UlvMp1clkDid;
++ uint8_t UlvGfxclkBypass;
++ uint8_t Padding234;
++
++
++ uint16_t MinVoltageGfx;
++ uint16_t MinVoltageSoc;
++ uint16_t MaxVoltageGfx;
++ uint16_t MaxVoltageSoc;
++
++ uint16_t LoadLineResistanceGfx;
++ uint16_t LoadLineResistanceSoc;
++
++ DpmDescriptor_t DpmDescriptor[PPCLK_COUNT];
++
++ uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ];
++ uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ];
++ uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ];
++ uint16_t FreqTableEclk [NUM_ECLK_DPM_LEVELS ];
++ uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ];
++ uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ];
++ uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ];
++ uint16_t FreqTableDcefclk [NUM_DCEFCLK_DPM_LEVELS ];
++ uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ];
++ uint16_t FreqTablePixclk [NUM_PIXCLK_DPM_LEVELS ];
++ uint16_t FreqTablePhyclk [NUM_PHYCLK_DPM_LEVELS ];
++
++ uint16_t DcModeMaxFreq [PPCLK_COUNT ];
++ uint16_t Padding8_Clks;
++
++ uint16_t Mp0clkFreq [NUM_MP0CLK_DPM_LEVELS];
++ uint16_t Mp0DpmVoltage [NUM_MP0CLK_DPM_LEVELS];
++
++
++ uint16_t GfxclkFidle;
++ uint16_t GfxclkSlewRate;
++ uint16_t CksEnableFreq;
++ uint16_t Padding789;
++ QuadraticInt_t CksVoltageOffset;
++ uint8_t Padding567[4];
++ uint16_t GfxclkDsMaxFreq;
++ uint8_t GfxclkSource;
++ uint8_t Padding456;
++
++ uint8_t LowestUclkReservedForUlv;
++ uint8_t Padding8_Uclk[3];
++
++
++ uint8_t PcieGenSpeed[NUM_LINK_LEVELS];
++ uint8_t PcieLaneCount[NUM_LINK_LEVELS];
++ uint16_t LclkFreq[NUM_LINK_LEVELS];
++
++
++ uint16_t EnableTdpm;
++ uint16_t TdpmHighHystTemperature;
++ uint16_t TdpmLowHystTemperature;
++ uint16_t GfxclkFreqHighTempLimit;
++
++
++ uint16_t FanStopTemp;
++ uint16_t FanStartTemp;
++
++ uint16_t FanGainEdge;
++ uint16_t FanGainHotspot;
++ uint16_t FanGainLiquid;
++ uint16_t FanGainVrVddc;
++ uint16_t FanGainVrMvdd;
++ uint16_t FanGainPlx;
++ uint16_t FanGainHbm;
++ uint16_t FanPwmMin;
++ uint16_t FanAcousticLimitRpm;
++ uint16_t FanThrottlingRpm;
++ uint16_t FanMaximumRpm;
++ uint16_t FanTargetTemperature;
++ uint16_t FanTargetGfxclk;
++ uint8_t FanZeroRpmEnable;
++ uint8_t FanTachEdgePerRev;
++
++
++
++ int16_t FuzzyFan_ErrorSetDelta;
++ int16_t FuzzyFan_ErrorRateSetDelta;
++ int16_t FuzzyFan_PwmSetDelta;
++ uint16_t FuzzyFan_Reserved;
++
++
++ uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT];
++ uint8_t Padding8_Avfs[2];
++
++ QuadraticInt_t qAvfsGb[AVFS_VOLTAGE_COUNT];
++ DroopInt_t dBtcGbGfxCksOn;
++ DroopInt_t dBtcGbGfxCksOff;
++ DroopInt_t dBtcGbGfxAfll;
++ DroopInt_t dBtcGbSoc;
++ LinearInt_t qAgingGb[AVFS_VOLTAGE_COUNT];
++
++ QuadraticInt_t qStaticVoltageOffset[AVFS_VOLTAGE_COUNT];
++
++ uint16_t DcTol[AVFS_VOLTAGE_COUNT];
++
++ uint8_t DcBtcEnabled[AVFS_VOLTAGE_COUNT];
++ uint8_t Padding8_GfxBtc[2];
++
++ uint16_t DcBtcMin[AVFS_VOLTAGE_COUNT];
++ uint16_t DcBtcMax[AVFS_VOLTAGE_COUNT];
++
++
++ uint8_t XgmiLinkSpeed [NUM_XGMI_LEVELS];
++ uint8_t XgmiLinkWidth [NUM_XGMI_LEVELS];
++ uint16_t XgmiFclkFreq [NUM_XGMI_LEVELS];
++ uint16_t XgmiUclkFreq [NUM_XGMI_LEVELS];
++ uint16_t XgmiSocclkFreq [NUM_XGMI_LEVELS];
++ uint16_t XgmiSocVoltage [NUM_XGMI_LEVELS];
++
++ uint32_t DebugOverrides;
++ QuadraticInt_t ReservedEquation0;
++ QuadraticInt_t ReservedEquation1;
++ QuadraticInt_t ReservedEquation2;
++ QuadraticInt_t ReservedEquation3;
++
++ uint16_t MinVoltageUlvGfx;
++ uint16_t MinVoltageUlvSoc;
++
++ uint16_t MGpuFanBoostLimitRpm;
++ uint16_t padding16_Fan;
++
++ uint32_t Reserved[13];
++
++
++
++ uint8_t Liquid1_I2C_address;
++ uint8_t Liquid2_I2C_address;
++ uint8_t Vr_I2C_address;
++ uint8_t Plx_I2C_address;
++
++ uint8_t Liquid_I2C_LineSCL;
++ uint8_t Liquid_I2C_LineSDA;
++ uint8_t Vr_I2C_LineSCL;
++ uint8_t Vr_I2C_LineSDA;
++
++ uint8_t Plx_I2C_LineSCL;
++ uint8_t Plx_I2C_LineSDA;
++ uint8_t VrSensorPresent;
++ uint8_t LiquidSensorPresent;
++
++ uint16_t MaxVoltageStepGfx;
++ uint16_t MaxVoltageStepSoc;
++
++ uint8_t VddGfxVrMapping;
++ uint8_t VddSocVrMapping;
++ uint8_t VddMem0VrMapping;
++ uint8_t VddMem1VrMapping;
++
++ uint8_t GfxUlvPhaseSheddingMask;
++ uint8_t SocUlvPhaseSheddingMask;
++ uint8_t ExternalSensorPresent;
++ uint8_t Padding8_V;
++
++
++ uint16_t GfxMaxCurrent;
++ int8_t GfxOffset;
++ uint8_t Padding_TelemetryGfx;
++
++ uint16_t SocMaxCurrent;
++ int8_t SocOffset;
++ uint8_t Padding_TelemetrySoc;
++
++ uint16_t Mem0MaxCurrent;
++ int8_t Mem0Offset;
++ uint8_t Padding_TelemetryMem0;
++
++ uint16_t Mem1MaxCurrent;
++ int8_t Mem1Offset;
++ uint8_t Padding_TelemetryMem1;
++
++
++ uint8_t AcDcGpio;
++ uint8_t AcDcPolarity;
++ uint8_t VR0HotGpio;
++ uint8_t VR0HotPolarity;
++
++ uint8_t VR1HotGpio;
++ uint8_t VR1HotPolarity;
++ uint8_t Padding1;
++ uint8_t Padding2;
++
++
++
++ uint8_t LedPin0;
++ uint8_t LedPin1;
++ uint8_t LedPin2;
++ uint8_t padding8_4;
++
++
++ uint8_t PllGfxclkSpreadEnabled;
++ uint8_t PllGfxclkSpreadPercent;
++ uint16_t PllGfxclkSpreadFreq;
++
++ uint8_t UclkSpreadEnabled;
++ uint8_t UclkSpreadPercent;
++ uint16_t UclkSpreadFreq;
++
++ uint8_t FclkSpreadEnabled;
++ uint8_t FclkSpreadPercent;
++ uint16_t FclkSpreadFreq;
++
++ uint8_t FllGfxclkSpreadEnabled;
++ uint8_t FllGfxclkSpreadPercent;
++ uint16_t FllGfxclkSpreadFreq;
++
++ uint32_t BoardReserved[10];
++
++
++ uint32_t MmHubPadding[8];
++
++} PPTable_t;
++
++typedef struct {
++
++ uint16_t GfxclkAverageLpfTau;
++ uint16_t SocclkAverageLpfTau;
++ uint16_t UclkAverageLpfTau;
++ uint16_t GfxActivityLpfTau;
++ uint16_t UclkActivityLpfTau;
++
++
++ uint32_t MmHubPadding[8];
++} DriverSmuConfig_t;
++
++typedef struct {
++
++ uint16_t GfxclkFmin;
++ uint16_t GfxclkFmax;
++ uint16_t GfxclkFreq1;
++ uint16_t GfxclkOffsetVolt1;
++ uint16_t GfxclkFreq2;
++ uint16_t GfxclkOffsetVolt2;
++ uint16_t GfxclkFreq3;
++ uint16_t GfxclkOffsetVolt3;
++ uint16_t UclkFmax;
++ int16_t OverDrivePct;
++ uint16_t FanMaximumRpm;
++ uint16_t FanMinimumPwm;
++ uint16_t FanTargetTemperature;
++ uint16_t MaxOpTemp;
++ uint16_t FanZeroRpmEnable;
++ uint16_t Padding;
++
++} OverDriveTable_t;
++
++typedef struct {
++ uint16_t CurrClock[PPCLK_COUNT];
++ uint16_t AverageGfxclkFrequency;
++ uint16_t AverageSocclkFrequency;
++ uint16_t AverageUclkFrequency ;
++ uint16_t AverageGfxActivity ;
++ uint16_t AverageUclkActivity ;
++ uint8_t CurrSocVoltageOffset ;
++ uint8_t CurrGfxVoltageOffset ;
++ uint8_t CurrMemVidOffset ;
++ uint8_t Padding8 ;
++ uint16_t CurrSocketPower ;
++ uint16_t TemperatureEdge ;
++ uint16_t TemperatureHotspot ;
++ uint16_t TemperatureHBM ;
++ uint16_t TemperatureVrGfx ;
++ uint16_t TemperatureVrMem ;
++ uint16_t TemperatureLiquid ;
++ uint16_t TemperaturePlx ;
++ uint32_t ThrottlerStatus ;
++
++ uint8_t LinkDpmLevel;
++ uint8_t Padding[3];
++
++
++ uint32_t MmHubPadding[7];
++} SmuMetrics_t;
++
++typedef struct {
++ uint16_t MinClock;
++ uint16_t MaxClock;
++ uint16_t MinUclk;
++ uint16_t MaxUclk;
++
++ uint8_t WmSetting;
++ uint8_t Padding[3];
++} WatermarkRowGeneric_t;
++
++#define NUM_WM_RANGES 4
++
++typedef enum {
++ WM_SOCCLK = 0,
++ WM_DCEFCLK,
++ WM_COUNT_PP,
++} WM_CLOCK_e;
++
++typedef struct {
++
++ WatermarkRowGeneric_t WatermarkRow[WM_COUNT_PP][NUM_WM_RANGES];
++
++ uint32_t MmHubPadding[7];
++} Watermarks_t;
++
++typedef struct {
++ uint16_t avgPsmCount[45];
++ uint16_t minPsmCount[45];
++ float avgPsmVoltage[45];
++ float minPsmVoltage[45];
++
++ uint16_t avgScsPsmCount;
++ uint16_t minScsPsmCount;
++ float avgScsPsmVoltage;
++ float minScsPsmVoltage;
++
++
++ uint32_t MmHubPadding[6];
++} AvfsDebugTable_t;
++
++typedef struct {
++ uint8_t AvfsVersion;
++ uint8_t AvfsEn[AVFS_VOLTAGE_COUNT];
++
++ uint8_t OverrideVFT[AVFS_VOLTAGE_COUNT];
++ uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT];
++
++ uint8_t OverrideTemperatures[AVFS_VOLTAGE_COUNT];
++ uint8_t OverrideVInversion[AVFS_VOLTAGE_COUNT];
++ uint8_t OverrideP2V[AVFS_VOLTAGE_COUNT];
++ uint8_t OverrideP2VCharzFreq[AVFS_VOLTAGE_COUNT];
++
++ int32_t VFT0_m1[AVFS_VOLTAGE_COUNT];
++ int32_t VFT0_m2[AVFS_VOLTAGE_COUNT];
++ int32_t VFT0_b[AVFS_VOLTAGE_COUNT];
++
++ int32_t VFT1_m1[AVFS_VOLTAGE_COUNT];
++ int32_t VFT1_m2[AVFS_VOLTAGE_COUNT];
++ int32_t VFT1_b[AVFS_VOLTAGE_COUNT];
++
++ int32_t VFT2_m1[AVFS_VOLTAGE_COUNT];
++ int32_t VFT2_m2[AVFS_VOLTAGE_COUNT];
++ int32_t VFT2_b[AVFS_VOLTAGE_COUNT];
++
++ int32_t AvfsGb0_m1[AVFS_VOLTAGE_COUNT];
++ int32_t AvfsGb0_m2[AVFS_VOLTAGE_COUNT];
++ int32_t AvfsGb0_b[AVFS_VOLTAGE_COUNT];
++
++ int32_t AcBtcGb_m1[AVFS_VOLTAGE_COUNT];
++ int32_t AcBtcGb_m2[AVFS_VOLTAGE_COUNT];
++ int32_t AcBtcGb_b[AVFS_VOLTAGE_COUNT];
++
++ uint32_t AvfsTempCold[AVFS_VOLTAGE_COUNT];
++ uint32_t AvfsTempMid[AVFS_VOLTAGE_COUNT];
++ uint32_t AvfsTempHot[AVFS_VOLTAGE_COUNT];
++
++ uint32_t VInversion[AVFS_VOLTAGE_COUNT];
++
++
++ int32_t P2V_m1[AVFS_VOLTAGE_COUNT];
++ int32_t P2V_m2[AVFS_VOLTAGE_COUNT];
++ int32_t P2V_b[AVFS_VOLTAGE_COUNT];
++
++ uint32_t P2VCharzFreq[AVFS_VOLTAGE_COUNT];
++
++ uint32_t EnabledAvfsModules;
++
++ uint32_t MmHubPadding[7];
++} AvfsFuseOverride_t;
++
++typedef struct {
++
++ uint8_t Gfx_ActiveHystLimit;
++ uint8_t Gfx_IdleHystLimit;
++ uint8_t Gfx_FPS;
++ uint8_t Gfx_MinActiveFreqType;
++ uint8_t Gfx_BoosterFreqType;
++ uint8_t Gfx_UseRlcBusy;
++ uint16_t Gfx_MinActiveFreq;
++ uint16_t Gfx_BoosterFreq;
++ uint16_t Gfx_PD_Data_time_constant;
++ uint32_t Gfx_PD_Data_limit_a;
++ uint32_t Gfx_PD_Data_limit_b;
++ uint32_t Gfx_PD_Data_limit_c;
++ uint32_t Gfx_PD_Data_error_coeff;
++ uint32_t Gfx_PD_Data_error_rate_coeff;
++
++ uint8_t Soc_ActiveHystLimit;
++ uint8_t Soc_IdleHystLimit;
++ uint8_t Soc_FPS;
++ uint8_t Soc_MinActiveFreqType;
++ uint8_t Soc_BoosterFreqType;
++ uint8_t Soc_UseRlcBusy;
++ uint16_t Soc_MinActiveFreq;
++ uint16_t Soc_BoosterFreq;
++ uint16_t Soc_PD_Data_time_constant;
++ uint32_t Soc_PD_Data_limit_a;
++ uint32_t Soc_PD_Data_limit_b;
++ uint32_t Soc_PD_Data_limit_c;
++ uint32_t Soc_PD_Data_error_coeff;
++ uint32_t Soc_PD_Data_error_rate_coeff;
++
++ uint8_t Mem_ActiveHystLimit;
++ uint8_t Mem_IdleHystLimit;
++ uint8_t Mem_FPS;
++ uint8_t Mem_MinActiveFreqType;
++ uint8_t Mem_BoosterFreqType;
++ uint8_t Mem_UseRlcBusy;
++ uint16_t Mem_MinActiveFreq;
++ uint16_t Mem_BoosterFreq;
++ uint16_t Mem_PD_Data_time_constant;
++ uint32_t Mem_PD_Data_limit_a;
++ uint32_t Mem_PD_Data_limit_b;
++ uint32_t Mem_PD_Data_limit_c;
++ uint32_t Mem_PD_Data_error_coeff;
++ uint32_t Mem_PD_Data_error_rate_coeff;
++
++ uint8_t Fclk_ActiveHystLimit;
++ uint8_t Fclk_IdleHystLimit;
++ uint8_t Fclk_FPS;
++ uint8_t Fclk_MinActiveFreqType;
++ uint8_t Fclk_BoosterFreqType;
++ uint8_t Fclk_UseRlcBusy;
++ uint16_t Fclk_MinActiveFreq;
++ uint16_t Fclk_BoosterFreq;
++ uint16_t Fclk_PD_Data_time_constant;
++ uint32_t Fclk_PD_Data_limit_a;
++ uint32_t Fclk_PD_Data_limit_b;
++ uint32_t Fclk_PD_Data_limit_c;
++ uint32_t Fclk_PD_Data_error_coeff;
++ uint32_t Fclk_PD_Data_error_rate_coeff;
++
++} DpmActivityMonitorCoeffInt_t;
++
++#define TABLE_PPTABLE 0
++#define TABLE_WATERMARKS 1
++#define TABLE_AVFS 2
++#define TABLE_AVFS_PSM_DEBUG 3
++#define TABLE_AVFS_FUSE_OVERRIDE 4
++#define TABLE_PMSTATUSLOG 5
++#define TABLE_SMU_METRICS 6
++#define TABLE_DRIVER_SMU_CONFIG 7
++#define TABLE_ACTIVITY_MONITOR_COEFF 8
++#define TABLE_OVERDRIVE 9
++#define TABLE_COUNT 10
++
++
++#define UCLK_SWITCH_SLOW 0
++#define UCLK_SWITCH_FAST 1
++
++
++#define SQ_Enable_MASK 0x1
++#define SQ_IR_MASK 0x2
++#define SQ_PCC_MASK 0x4
++#define SQ_EDC_MASK 0x8
++
++#define TCP_Enable_MASK 0x100
++#define TCP_IR_MASK 0x200
++#define TCP_PCC_MASK 0x400
++#define TCP_EDC_MASK 0x800
++
++#define TD_Enable_MASK 0x10000
++#define TD_IR_MASK 0x20000
++#define TD_PCC_MASK 0x40000
++#define TD_EDC_MASK 0x80000
++
++#define DB_Enable_MASK 0x1000000
++#define DB_IR_MASK 0x2000000
++#define DB_PCC_MASK 0x4000000
++#define DB_EDC_MASK 0x8000000
++
++#define SQ_Enable_SHIFT 0
++#define SQ_IR_SHIFT 1
++#define SQ_PCC_SHIFT 2
++#define SQ_EDC_SHIFT 3
++
++#define TCP_Enable_SHIFT 8
++#define TCP_IR_SHIFT 9
++#define TCP_PCC_SHIFT 10
++#define TCP_EDC_SHIFT 11
++
++#define TD_Enable_SHIFT 16
++#define TD_IR_SHIFT 17
++#define TD_PCC_SHIFT 18
++#define TD_EDC_SHIFT 19
++
++#define DB_Enable_SHIFT 24
++#define DB_IR_SHIFT 25
++#define DB_PCC_SHIFT 26
++#define DB_EDC_SHIFT 27
++
++#define REMOVE_FMAX_MARGIN_BIT 0x0
++#define REMOVE_DCTOL_MARGIN_BIT 0x1
++#define REMOVE_PLATFORM_MARGIN_BIT 0x2
++
++#endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5124-drm-amd-powerplay-add-vega20_ppsmc.h-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5124-drm-amd-powerplay-add-vega20_ppsmc.h-v2.patch
new file mode 100644
index 00000000..767702d3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5124-drm-amd-powerplay-add-vega20_ppsmc.h-v2.patch
@@ -0,0 +1,151 @@
+From adc0d33c55115252077f9416103720f995ab3d80 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Wed, 21 Mar 2018 16:21:51 +0800
+Subject: [PATCH 5124/5725] drm/amd/powerplay: add vega20_ppsmc.h (v2)
+
+v2: update to latest.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h | 127 +++++++++++++++++++++++
+ 1 file changed, 127 insertions(+)
+ create mode 100644 drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
+
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
+new file mode 100644
+index 0000000..165429f
+--- /dev/null
++++ b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
+@@ -0,0 +1,127 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef VEGA20_PP_SMC_H
++#define VEGA20_PP_SMC_H
++
++#pragma pack(push, 1)
++
++// SMU Response Codes:
++#define PPSMC_Result_OK 0x1
++#define PPSMC_Result_Failed 0xFF
++#define PPSMC_Result_UnknownCmd 0xFE
++#define PPSMC_Result_CmdRejectedPrereq 0xFD
++#define PPSMC_Result_CmdRejectedBusy 0xFC
++
++// Message Definitions:
++#define PPSMC_MSG_TestMessage 0x1
++#define PPSMC_MSG_GetSmuVersion 0x2
++#define PPSMC_MSG_GetDriverIfVersion 0x3
++#define PPSMC_MSG_SetAllowedFeaturesMaskLow 0x4
++#define PPSMC_MSG_SetAllowedFeaturesMaskHigh 0x5
++#define PPSMC_MSG_EnableAllSmuFeatures 0x6
++#define PPSMC_MSG_DisableAllSmuFeatures 0x7
++#define PPSMC_MSG_EnableSmuFeaturesLow 0x8
++#define PPSMC_MSG_EnableSmuFeaturesHigh 0x9
++#define PPSMC_MSG_DisableSmuFeaturesLow 0xA
++#define PPSMC_MSG_DisableSmuFeaturesHigh 0xB
++#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xC
++#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xD
++#define PPSMC_MSG_SetWorkloadMask 0xE
++#define PPSMC_MSG_SetPptLimit 0xF
++#define PPSMC_MSG_SetDriverDramAddrHigh 0x10
++#define PPSMC_MSG_SetDriverDramAddrLow 0x11
++#define PPSMC_MSG_SetToolsDramAddrHigh 0x12
++#define PPSMC_MSG_SetToolsDramAddrLow 0x13
++#define PPSMC_MSG_TransferTableSmu2Dram 0x14
++#define PPSMC_MSG_TransferTableDram2Smu 0x15
++#define PPSMC_MSG_UseDefaultPPTable 0x16
++#define PPSMC_MSG_UseBackupPPTable 0x17
++#define PPSMC_MSG_RunBtc 0x18
++#define PPSMC_MSG_RequestI2CBus 0x19
++#define PPSMC_MSG_ReleaseI2CBus 0x1A
++#define PPSMC_MSG_SetFloorSocVoltage 0x21
++#define PPSMC_MSG_SoftReset 0x22
++#define PPSMC_MSG_StartBacoMonitor 0x23
++#define PPSMC_MSG_CancelBacoMonitor 0x24
++#define PPSMC_MSG_EnterBaco 0x25
++#define PPSMC_MSG_SetSoftMinByFreq 0x26
++#define PPSMC_MSG_SetSoftMaxByFreq 0x27
++#define PPSMC_MSG_SetHardMinByFreq 0x28
++#define PPSMC_MSG_SetHardMaxByFreq 0x29
++#define PPSMC_MSG_GetMinDpmFreq 0x2A
++#define PPSMC_MSG_GetMaxDpmFreq 0x2B
++#define PPSMC_MSG_GetDpmFreqByIndex 0x2C
++#define PPSMC_MSG_GetDpmClockFreq 0x2D
++#define PPSMC_MSG_GetSsVoltageByDpm 0x2E
++#define PPSMC_MSG_SetMemoryChannelConfig 0x2F
++#define PPSMC_MSG_SetGeminiMode 0x30
++#define PPSMC_MSG_SetGeminiApertureHigh 0x31
++#define PPSMC_MSG_SetGeminiApertureLow 0x32
++#define PPSMC_MSG_SetMinLinkDpmByIndex 0x33
++#define PPSMC_MSG_OverridePcieParameters 0x34
++#define PPSMC_MSG_OverDriveSetPercentage 0x35
++#define PPSMC_MSG_SetMinDeepSleepDcefclk 0x36
++#define PPSMC_MSG_ReenableAcDcInterrupt 0x37
++#define PPSMC_MSG_NotifyPowerSource 0x38
++#define PPSMC_MSG_SetUclkFastSwitch 0x39
++#define PPSMC_MSG_SetUclkDownHyst 0x3A
++//#define PPSMC_MSG_GfxDeviceDriverReset 0x3B
++#define PPSMC_MSG_GetCurrentRpm 0x3C
++#define PPSMC_MSG_SetVideoFps 0x3D
++#define PPSMC_MSG_SetTjMax 0x3E
++#define PPSMC_MSG_SetFanTemperatureTarget 0x3F
++#define PPSMC_MSG_PrepareMp1ForUnload 0x40
++#define PPSMC_MSG_DramLogSetDramAddrHigh 0x41
++#define PPSMC_MSG_DramLogSetDramAddrLow 0x42
++#define PPSMC_MSG_DramLogSetDramSize 0x43
++#define PPSMC_MSG_SetFanMaxRpm 0x44
++#define PPSMC_MSG_SetFanMinPwm 0x45
++#define PPSMC_MSG_ConfigureGfxDidt 0x46
++#define PPSMC_MSG_NumOfDisplays 0x47
++#define PPSMC_MSG_RemoveMargins 0x48
++#define PPSMC_MSG_ReadSerialNumTop32 0x49
++#define PPSMC_MSG_ReadSerialNumBottom32 0x4A
++#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x4B
++#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x4C
++#define PPSMC_MSG_WaflTest 0x4D
++// Unused ID 0x4E to 0x50
++#define PPSMC_MSG_AllowGfxOff 0x51
++#define PPSMC_MSG_DisallowGfxOff 0x52
++#define PPSMC_MSG_GetPptLimit 0x53
++#define PPSMC_MSG_GetDcModeMaxDpmFreq 0x54
++#define PPSMC_MSG_GetDebugData 0x55
++#define PPSMC_MSG_SetXgmiMode 0x56
++#define PPSMC_MSG_RunAfllBtc 0x57
++#define PPSMC_MSG_ExitBaco 0x58
++#define PPSMC_MSG_PrepareMp1ForReset 0x59
++#define PPSMC_MSG_PrepareMp1ForShutdown 0x5A
++#define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x5D
++#define PPSMC_Message_Count 0x5E
++
++typedef uint32_t PPSMC_Result;
++typedef uint32_t PPSMC_Msg;
++
++#pragma pack(pop)
++
++#endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5125-drm-amd-powerplay-add-vega20_pptable.h-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5125-drm-amd-powerplay-add-vega20_pptable.h-v2.patch
new file mode 100644
index 00000000..6d3caa74
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5125-drm-amd-powerplay-add-vega20_pptable.h-v2.patch
@@ -0,0 +1,164 @@
+From 5ca66a7fdc247bf56cadd836211d480f6a3f9447 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Wed, 21 Mar 2018 16:36:08 +0800
+Subject: [PATCH 5125/5725] drm/amd/powerplay: add vega20_pptable.h (v2)
+
+v2: squash in table size fixes
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../gpu/drm/amd/powerplay/hwmgr/vega20_pptable.h | 140 +++++++++++++++++++++
+ 1 file changed, 140 insertions(+)
+ create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_pptable.h
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_pptable.h
+new file mode 100644
+index 0000000..b104f6a
+--- /dev/null
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_pptable.h
+@@ -0,0 +1,140 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++#ifndef _VEGA20_PPTABLE_H_
++#define _VEGA20_PPTABLE_H_
++
++#pragma pack(push, 1)
++
++#define ATOM_VEGA20_PP_THERMALCONTROLLER_NONE 0
++#define ATOM_VEGA20_PP_THERMALCONTROLLER_VEGA20 26
++
++#define ATOM_VEGA20_PP_PLATFORM_CAP_POWERPLAY 0x1
++#define ATOM_VEGA20_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 0x2
++#define ATOM_VEGA20_PP_PLATFORM_CAP_HARDWAREDC 0x4
++#define ATOM_VEGA20_PP_PLATFORM_CAP_BACO 0x8
++#define ATOM_VEGA20_PP_PLATFORM_CAP_BAMACO 0x10
++#define ATOM_VEGA20_PP_PLATFORM_CAP_ENABLESHADOWPSTATE 0x20
++
++#define ATOM_VEGA20_TABLE_REVISION_VEGA20 11
++#define ATOM_VEGA20_ODFEATURE_MAX_COUNT 32
++#define ATOM_VEGA20_ODSETTING_MAX_COUNT 32
++#define ATOM_VEGA20_PPCLOCK_MAX_COUNT 16
++
++enum ATOM_VEGA20_ODFEATURE_ID {
++ ATOM_VEGA20_ODFEATURE_GFXCLK_LIMITS = 0,
++ ATOM_VEGA20_ODFEATURE_GFXCLK_CURVE,
++ ATOM_VEGA20_ODFEATURE_UCLK_MAX,
++ ATOM_VEGA20_ODFEATURE_POWER_LIMIT,
++ ATOM_VEGA20_ODFEATURE_FAN_ACOUSTIC_LIMIT, //FanMaximumRpm
++ ATOM_VEGA20_ODFEATURE_FAN_SPEED_MIN, //FanMinimumPwm
++ ATOM_VEGA20_ODFEATURE_TEMPERATURE_FAN, //FanTargetTemperature
++ ATOM_VEGA20_ODFEATURE_TEMPERATURE_SYSTEM, //MaxOpTemp
++ ATOM_VEGA20_ODFEATURE_COUNT,
++};
++
++enum ATOM_VEGA20_ODSETTING_ID {
++ ATOM_VEGA20_ODSETTING_GFXCLKFMAX = 0,
++ ATOM_VEGA20_ODSETTING_GFXCLKFMIN,
++ ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P1,
++ ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P1,
++ ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P2,
++ ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P2,
++ ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P3,
++ ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P3,
++ ATOM_VEGA20_ODSETTING_UCLKFMAX,
++ ATOM_VEGA20_ODSETTING_POWERPERCENTAGE,
++ ATOM_VEGA20_ODSETTING_FANRPMMIN,
++ ATOM_VEGA20_ODSETTING_FANRPMACOUSTICLIMIT,
++ ATOM_VEGA20_ODSETTING_FANTARGETTEMPERATURE,
++ ATOM_VEGA20_ODSETTING_OPERATINGTEMPMAX,
++ ATOM_VEGA20_ODSETTING_COUNT,
++};
++typedef enum ATOM_VEGA20_ODSETTING_ID ATOM_VEGA20_ODSETTING_ID;
++
++typedef struct _ATOM_VEGA20_OVERDRIVE8_RECORD
++{
++ UCHAR ucODTableRevision;
++ ULONG ODFeatureCount;
++ UCHAR ODFeatureCapabilities [ATOM_VEGA20_ODFEATURE_MAX_COUNT]; //OD feature support flags
++ ULONG ODSettingCount;
++ ULONG ODSettingsMax [ATOM_VEGA20_ODSETTING_MAX_COUNT]; //Upper Limit for each OD Setting
++ ULONG ODSettingsMin [ATOM_VEGA20_ODSETTING_MAX_COUNT]; //Lower Limit for each OD Setting
++} ATOM_VEGA20_OVERDRIVE8_RECORD;
++
++enum ATOM_VEGA20_PPCLOCK_ID {
++ ATOM_VEGA20_PPCLOCK_GFXCLK = 0,
++ ATOM_VEGA20_PPCLOCK_VCLK,
++ ATOM_VEGA20_PPCLOCK_DCLK,
++ ATOM_VEGA20_PPCLOCK_ECLK,
++ ATOM_VEGA20_PPCLOCK_SOCCLK,
++ ATOM_VEGA20_PPCLOCK_UCLK,
++ ATOM_VEGA20_PPCLOCK_FCLK,
++ ATOM_VEGA20_PPCLOCK_DCEFCLK,
++ ATOM_VEGA20_PPCLOCK_DISPCLK,
++ ATOM_VEGA20_PPCLOCK_PIXCLK,
++ ATOM_VEGA20_PPCLOCK_PHYCLK,
++ ATOM_VEGA20_PPCLOCK_COUNT,
++};
++typedef enum ATOM_VEGA20_PPCLOCK_ID ATOM_VEGA20_PPCLOCK_ID;
++
++typedef struct _ATOM_VEGA20_POWER_SAVING_CLOCK_RECORD
++{
++ UCHAR ucTableRevision;
++ ULONG PowerSavingClockCount; // Count of PowerSavingClock Mode
++ ULONG PowerSavingClockMax [ATOM_VEGA20_PPCLOCK_MAX_COUNT]; // PowerSavingClock Mode Clock Maximum array In MHz
++ ULONG PowerSavingClockMin [ATOM_VEGA20_PPCLOCK_MAX_COUNT]; // PowerSavingClock Mode Clock Minimum array In MHz
++} ATOM_VEGA20_POWER_SAVING_CLOCK_RECORD;
++
++typedef struct _ATOM_VEGA20_POWERPLAYTABLE
++{
++ struct atom_common_table_header sHeader;
++ UCHAR ucTableRevision;
++ USHORT usTableSize;
++ ULONG ulGoldenPPID;
++ ULONG ulGoldenRevision;
++ USHORT usFormatID;
++
++ ULONG ulPlatformCaps;
++
++ UCHAR ucThermalControllerType;
++
++ USHORT usSmallPowerLimit1;
++ USHORT usSmallPowerLimit2;
++ USHORT usBoostPowerLimit;
++ USHORT usODTurboPowerLimit;
++ USHORT usODPowerSavePowerLimit;
++ USHORT usSoftwareShutdownTemp;
++
++ ATOM_VEGA20_POWER_SAVING_CLOCK_RECORD PowerSavingClockTable; //PowerSavingClock Mode Clock Min/Max array
++
++ ATOM_VEGA20_OVERDRIVE8_RECORD OverDrive8Table; //OverDrive8 Feature capabilities and Settings Range (Max and Min)
++
++ USHORT usReserve[5];
++
++ PPTable_t smcPPTable;
++
++} ATOM_Vega20_POWERPLAYTABLE;
++
++#pragma pack(pop)
++
++#endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5126-drm-amd-powerplay-add-the-smu-manager-for-vega20-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5126-drm-amd-powerplay-add-the-smu-manager-for-vega20-v2.patch
new file mode 100644
index 00000000..925c8bfb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5126-drm-amd-powerplay-add-the-smu-manager-for-vega20-v2.patch
@@ -0,0 +1,644 @@
+From 03fa4a9aee49d6cd40da5daeb7c715b89b9a9445 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Thu, 2 Aug 2018 15:52:41 -0500
+Subject: [PATCH 5126/5725] drm/amd/powerplay: add the smu manager for vega20
+ (v2)
+
+The SMU manager handles the driver interaction with the SMU
+which handles clock and voltage controls.
+
+v2: switch to SOC15 register access macros
+ reserve space for ActivityMonitor table
+ enable SMU fw loading
+ Drop dead code from bringup
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/smumgr/Makefile | 2 +-
+ .../gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c | 530 +++++++++++++++++++++
+ .../gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h | 61 +++
+ 3 files changed, 592 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+ create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+index 3ef862b..e9d8ad7 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+@@ -6,7 +6,7 @@
+ SMU_MGR = smumgr.o smu8_smumgr.o tonga_smumgr.o fiji_smumgr.o \
+ polaris10_smumgr.o iceland_smumgr.o \
+ smu7_smumgr.o vega10_smumgr.o smu10_smumgr.o ci_smumgr.o \
+- vega12_smumgr.o vegam_smumgr.o smu9_smumgr.o
++ vega12_smumgr.o vegam_smumgr.o smu9_smumgr.o vega20_smumgr.o
+
+ AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+new file mode 100644
+index 0000000..41a2a5d
+--- /dev/null
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+@@ -0,0 +1,530 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "smumgr.h"
++#include "vega20_inc.h"
++#include "soc15_common.h"
++#include "vega20_smumgr.h"
++#include "vega20_ppsmc.h"
++#include "smu11_driver_if.h"
++#include "ppatomctrl.h"
++#include "pp_debug.h"
++#include "smu_ucode_xfer_vi.h"
++#include "smu7_smumgr.h"
++#include "vega20_hwmgr.h"
++
++/* MP Apertures */
++#define MP0_Public 0x03800000
++#define MP0_SRAM 0x03900000
++#define MP1_Public 0x03b00000
++#define MP1_SRAM 0x03c00004
++
++/* address block */
++#define smnMP1_FIRMWARE_FLAGS 0x3010024
++#define smnMP0_FW_INTF 0x30101c0
++#define smnMP1_PUB_CTRL 0x3010b14
++
++static bool vega20_is_smc_ram_running(struct pp_hwmgr *hwmgr)
++{
++ struct amdgpu_device *adev = hwmgr->adev;
++ uint32_t mp1_fw_flags;
++
++ WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
++ (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
++
++ mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
++
++ if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
++ MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
++ return true;
++
++ return false;
++}
++
++/*
++ * Check if SMC has responded to previous message.
++ *
++ * @param smumgr the address of the powerplay hardware manager.
++ * @return TRUE SMC has responded, FALSE otherwise.
++ */
++static uint32_t vega20_wait_for_response(struct pp_hwmgr *hwmgr)
++{
++ struct amdgpu_device *adev = hwmgr->adev;
++ uint32_t reg;
++
++ reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
++
++ phm_wait_for_register_unequal(hwmgr, reg,
++ 0, MP1_C2PMSG_90__CONTENT_MASK);
++
++ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
++}
++
++/*
++ * Send a message to the SMC, and do not wait for its response.
++ * @param smumgr the address of the powerplay hardware manager.
++ * @param msg the message to send.
++ * @return Always return 0.
++ */
++static int vega20_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
++ uint16_t msg)
++{
++ struct amdgpu_device *adev = hwmgr->adev;
++
++ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
++
++ return 0;
++}
++
++/*
++ * Send a message to the SMC, and wait for its response.
++ * @param hwmgr the address of the powerplay hardware manager.
++ * @param msg the message to send.
++ * @return Always return 0.
++ */
++static int vega20_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
++{
++ struct amdgpu_device *adev = hwmgr->adev;
++ int ret = 0;
++
++ vega20_wait_for_response(hwmgr);
++
++ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
++
++ vega20_send_msg_to_smc_without_waiting(hwmgr, msg);
++
++ ret = vega20_wait_for_response(hwmgr);
++ if (ret != PPSMC_Result_OK)
++ pr_err("Failed to send message 0x%x, response 0x%x\n", msg, ret);
++
++ return (ret == PPSMC_Result_OK) ? 0 : -EIO;
++}
++
++/*
++ * Send a message to the SMC with parameter
++ * @param hwmgr: the address of the powerplay hardware manager.
++ * @param msg: the message to send.
++ * @param parameter: the parameter to send
++ * @return Always return 0.
++ */
++static int vega20_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
++ uint16_t msg, uint32_t parameter)
++{
++ struct amdgpu_device *adev = hwmgr->adev;
++ int ret = 0;
++
++ vega20_wait_for_response(hwmgr);
++
++ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
++
++ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
++
++ vega20_send_msg_to_smc_without_waiting(hwmgr, msg);
++
++ ret = vega20_wait_for_response(hwmgr);
++ if (ret != PPSMC_Result_OK)
++ pr_err("Failed to send message 0x%x, response 0x%x\n", msg, ret);
++
++ return (ret == PPSMC_Result_OK) ? 0 : -EIO;
++}
++
++/*
++ * Retrieve an argument from SMC.
++ * @param hwmgr the address of the powerplay hardware manager.
++ * @param arg pointer to store the argument from SMC.
++ * @return Always return 0.
++ */
++int vega20_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
++{
++ struct amdgpu_device *adev = hwmgr->adev;
++
++ *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
++
++ return 0;
++}
++
++/*
++ * Copy table from SMC into driver FB
++ * @param hwmgr the address of the HW manager
++ * @param table_id the driver's table ID to copy from
++ */
++int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
++ uint8_t *table, int16_t table_id)
++{
++ struct vega20_smumgr *priv =
++ (struct vega20_smumgr *)(hwmgr->smu_backend);
++ int ret = 0;
++
++ PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
++ "Invalid SMU Table ID!", return -EINVAL);
++ PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
++ "Invalid SMU Table version!", return -EINVAL);
++ PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
++ "Invalid SMU Table Length!", return -EINVAL);
++
++ PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SetDriverDramAddrHigh,
++ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
++ "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!",
++ return ret);
++ PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SetDriverDramAddrLow,
++ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
++ "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
++ return ret);
++ PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_TransferTableSmu2Dram, table_id)) == 0,
++ "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
++ return ret);
++
++ memcpy(table, priv->smu_tables.entry[table_id].table,
++ priv->smu_tables.entry[table_id].size);
++
++ return 0;
++}
++
++/*
++ * Copy table from Driver FB into SMC
++ * @param hwmgr the address of the HW manager
++ * @param table_id the table to copy from
++ */
++int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
++ uint8_t *table, int16_t table_id)
++{
++ struct vega20_smumgr *priv =
++ (struct vega20_smumgr *)(hwmgr->smu_backend);
++ int ret = 0;
++
++ PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
++ "Invalid SMU Table ID!", return -EINVAL);
++ PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
++ "Invalid SMU Table version!", return -EINVAL);
++ PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
++ "Invalid SMU Table Length!", return -EINVAL);
++
++ memcpy(priv->smu_tables.entry[table_id].table, table,
++ priv->smu_tables.entry[table_id].size);
++
++ PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SetDriverDramAddrHigh,
++ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
++ "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
++ return ret);
++ PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SetDriverDramAddrLow,
++ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
++ "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
++ return ret);
++ PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_TransferTableDram2Smu, table_id)) == 0,
++ "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
++ return ret);
++
++ return 0;
++}
++
++int vega20_enable_smc_features(struct pp_hwmgr *hwmgr,
++ bool enable, uint64_t feature_mask)
++{
++ uint32_t smu_features_low, smu_features_high;
++ int ret = 0;
++
++ smu_features_low = (uint32_t)((feature_mask & SMU_FEATURES_LOW_MASK) >> SMU_FEATURES_LOW_SHIFT);
++ smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
++
++ if (enable) {
++ PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low)) == 0,
++ "[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!",
++ return ret);
++ PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high)) == 0,
++ "[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!",
++ return ret);
++ } else {
++ PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low)) == 0,
++ "[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!",
++ return ret);
++ PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high)) == 0,
++ "[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!",
++ return ret);
++ }
++
++ return 0;
++}
++
++int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
++ uint64_t *features_enabled)
++{
++ uint32_t smc_features_low, smc_features_high;
++ int ret = 0;
++
++ if (features_enabled == NULL)
++ return -EINVAL;
++
++ PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
++ PPSMC_MSG_GetEnabledSmuFeaturesLow)) == 0,
++ "[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!",
++ return ret);
++ PP_ASSERT_WITH_CODE((ret = vega20_read_arg_from_smc(hwmgr,
++ &smc_features_low)) == 0,
++ "[GetEnabledSMCFeatures] Attemp to read SMU features Low argument failed!",
++ return ret);
++ PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
++ PPSMC_MSG_GetEnabledSmuFeaturesHigh)) == 0,
++ "[GetEnabledSMCFeatures] Attemp to get SMU features High failed!",
++ return ret);
++ PP_ASSERT_WITH_CODE((ret = vega20_read_arg_from_smc(hwmgr,
++ &smc_features_high)) == 0,
++ "[GetEnabledSMCFeatures] Attemp to read SMU features High argument failed!",
++ return ret);
++
++ *features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
++ (((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
++
++ return 0;
++}
++
++static int vega20_set_tools_address(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_smumgr *priv =
++ (struct vega20_smumgr *)(hwmgr->smu_backend);
++ int ret = 0;
++
++ if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) {
++ ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SetToolsDramAddrHigh,
++ upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
++ if (!ret)
++ ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SetToolsDramAddrLow,
++ lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
++ }
++
++ return ret;
++}
++
++static int vega20_smu_init(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_smumgr *priv;
++ unsigned long tools_size = 0x19000;
++ int ret = 0;
++
++ struct cgs_firmware_info info = {0};
++
++ ret = cgs_get_firmware_info(hwmgr->device,
++ smu7_convert_fw_type_to_cgs(UCODE_ID_SMU),
++ &info);
++ if (ret || !info.kptr)
++ return -EINVAL;
++
++ priv = kzalloc(sizeof(struct vega20_smumgr), GFP_KERNEL);
++ if (!priv)
++ return -ENOMEM;
++
++ hwmgr->smu_backend = priv;
++
++ /* allocate space for pptable */
++ ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
++ sizeof(PPTable_t),
++ PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ &priv->smu_tables.entry[TABLE_PPTABLE].handle,
++ &priv->smu_tables.entry[TABLE_PPTABLE].mc_addr,
++ &priv->smu_tables.entry[TABLE_PPTABLE].table);
++ if (ret)
++ goto free_backend;
++
++ priv->smu_tables.entry[TABLE_PPTABLE].version = 0x01;
++ priv->smu_tables.entry[TABLE_PPTABLE].size = sizeof(PPTable_t);
++
++ /* allocate space for watermarks table */
++ ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
++ sizeof(Watermarks_t),
++ PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ &priv->smu_tables.entry[TABLE_WATERMARKS].handle,
++ &priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr,
++ &priv->smu_tables.entry[TABLE_WATERMARKS].table);
++ if (ret)
++ goto err0;
++
++ priv->smu_tables.entry[TABLE_WATERMARKS].version = 0x01;
++ priv->smu_tables.entry[TABLE_WATERMARKS].size = sizeof(Watermarks_t);
++
++ /* allocate space for pmstatuslog table */
++ ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
++ tools_size,
++ PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ &priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle,
++ &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr,
++ &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
++ if (ret)
++ goto err1;
++
++ priv->smu_tables.entry[TABLE_PMSTATUSLOG].version = 0x01;
++ priv->smu_tables.entry[TABLE_PMSTATUSLOG].size = tools_size;
++
++ /* allocate space for OverDrive table */
++ ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
++ sizeof(OverDriveTable_t),
++ PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ &priv->smu_tables.entry[TABLE_OVERDRIVE].handle,
++ &priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr,
++ &priv->smu_tables.entry[TABLE_OVERDRIVE].table);
++ if (ret)
++ goto err2;
++
++ priv->smu_tables.entry[TABLE_OVERDRIVE].version = 0x01;
++ priv->smu_tables.entry[TABLE_OVERDRIVE].size = sizeof(OverDriveTable_t);
++
++ /* allocate space for SmuMetrics table */
++ ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
++ sizeof(SmuMetrics_t),
++ PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ &priv->smu_tables.entry[TABLE_SMU_METRICS].handle,
++ &priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr,
++ &priv->smu_tables.entry[TABLE_SMU_METRICS].table);
++ if (ret)
++ goto err3;
++
++ priv->smu_tables.entry[TABLE_SMU_METRICS].version = 0x01;
++ priv->smu_tables.entry[TABLE_SMU_METRICS].size = sizeof(SmuMetrics_t);
++
++ /* allocate space for ActivityMonitor table */
++ ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
++ sizeof(DpmActivityMonitorCoeffInt_t),
++ PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].handle,
++ &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr,
++ &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table);
++ if (ret)
++ goto err4;
++
++ priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01;
++ priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t);
++
++ return 0;
++
++err4:
++ amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_SMU_METRICS].handle,
++ &priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr,
++ &priv->smu_tables.entry[TABLE_SMU_METRICS].table);
++err3:
++ amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_OVERDRIVE].handle,
++ &priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr,
++ &priv->smu_tables.entry[TABLE_OVERDRIVE].table);
++err2:
++ amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle,
++ &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr,
++ &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
++err1:
++ amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_WATERMARKS].handle,
++ &priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr,
++ &priv->smu_tables.entry[TABLE_WATERMARKS].table);
++err0:
++ amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
++ &priv->smu_tables.entry[TABLE_PPTABLE].mc_addr,
++ &priv->smu_tables.entry[TABLE_PPTABLE].table);
++free_backend:
++ kfree(hwmgr->smu_backend);
++
++ return -EINVAL;
++}
++
++static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_smumgr *priv =
++ (struct vega20_smumgr *)(hwmgr->smu_backend);
++
++ if (priv) {
++ amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
++ &priv->smu_tables.entry[TABLE_PPTABLE].mc_addr,
++ &priv->smu_tables.entry[TABLE_PPTABLE].table);
++ amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_WATERMARKS].handle,
++ &priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr,
++ &priv->smu_tables.entry[TABLE_WATERMARKS].table);
++ amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle,
++ &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr,
++ &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
++ amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_OVERDRIVE].handle,
++ &priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr,
++ &priv->smu_tables.entry[TABLE_OVERDRIVE].table);
++ amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_SMU_METRICS].handle,
++ &priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr,
++ &priv->smu_tables.entry[TABLE_SMU_METRICS].table);
++ amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].handle,
++ &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr,
++ &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table);
++ kfree(hwmgr->smu_backend);
++ hwmgr->smu_backend = NULL;
++ }
++ return 0;
++}
++
++static int vega20_start_smu(struct pp_hwmgr *hwmgr)
++{
++ int ret;
++
++ ret = vega20_is_smc_ram_running(hwmgr);
++ PP_ASSERT_WITH_CODE(ret,
++ "[Vega20StartSmu] SMC is not running!",
++ return -EINVAL);
++
++ ret = vega20_set_tools_address(hwmgr);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[Vega20StartSmu] Failed to set tools address!",
++ return ret);
++
++ return 0;
++}
++
++static bool vega20_is_dpm_running(struct pp_hwmgr *hwmgr)
++{
++ uint64_t features_enabled = 0;
++
++ vega20_get_enabled_smc_features(hwmgr, &features_enabled);
++
++ if (features_enabled & SMC_DPM_FEATURES)
++ return true;
++ else
++ return false;
++}
++
++const struct pp_smumgr_func vega20_smu_funcs = {
++ .smu_init = &vega20_smu_init,
++ .smu_fini = &vega20_smu_fini,
++ .start_smu = &vega20_start_smu,
++ .request_smu_load_specific_fw = NULL,
++ .send_msg_to_smc = &vega20_send_msg_to_smc,
++ .send_msg_to_smc_with_parameter = &vega20_send_msg_to_smc_with_parameter,
++ .download_pptable_settings = NULL,
++ .upload_pptable_settings = NULL,
++ .is_dpm_running = vega20_is_dpm_running,
++};
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
+new file mode 100644
+index 0000000..71da822
+--- /dev/null
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
+@@ -0,0 +1,61 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++#ifndef _VEGA20_SMUMANAGER_H_
++#define _VEGA20_SMUMANAGER_H_
++
++#include "hwmgr.h"
++#include "smu11_driver_if.h"
++
++struct smu_table_entry {
++ uint32_t version;
++ uint32_t size;
++ uint64_t mc_addr;
++ void *table;
++ struct amdgpu_bo *handle;
++};
++
++struct smu_table_array {
++ struct smu_table_entry entry[TABLE_COUNT];
++};
++
++struct vega20_smumgr {
++ struct smu_table_array smu_tables;
++};
++
++#define SMU_FEATURES_LOW_MASK 0x00000000FFFFFFFF
++#define SMU_FEATURES_LOW_SHIFT 0
++#define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000
++#define SMU_FEATURES_HIGH_SHIFT 32
++
++int vega20_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg);
++int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
++ uint8_t *table, int16_t table_id);
++int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
++ uint8_t *table, int16_t table_id);
++int vega20_enable_smc_features(struct pp_hwmgr *hwmgr,
++ bool enable, uint64_t feature_mask);
++int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
++ uint64_t *features_enabled);
++
++#endif
++
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5127-drm-amd-powerplay-new-interfaces-for-ActivityMonitor.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5127-drm-amd-powerplay-new-interfaces-for-ActivityMonitor.patch
new file mode 100644
index 00000000..41278ad1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5127-drm-amd-powerplay-new-interfaces-for-ActivityMonitor.patch
@@ -0,0 +1,103 @@
+From 31da9c5e0c43840d95dd9121155a7f12ac3fb523 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Fri, 11 May 2018 10:56:25 +0800
+Subject: [PATCH 5127/5725] drm/amd/powerplay: new interfaces for
+ ActivityMonitor table with SMU
+
+Vega20 has a new activity monitor table that is stored in memory. Add
+API to get and set the new table.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c | 57 ++++++++++++++++++++++
+ .../gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h | 4 ++
+ 2 files changed, 61 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+index 41a2a5d..fe7f710 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+@@ -243,6 +243,63 @@ int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
+ return 0;
+ }
+
++int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
++ uint8_t *table, uint16_t workload_type)
++{
++ struct vega20_smumgr *priv =
++ (struct vega20_smumgr *)(hwmgr->smu_backend);
++ int ret = 0;
++
++ memcpy(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table,
++ priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
++
++ PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SetDriverDramAddrHigh,
++ upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
++ "[SetActivityMonitor] Attempt to Set Dram Addr High Failed!",
++ return ret);
++ PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SetDriverDramAddrLow,
++ lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
++ "[SetActivityMonitor] Attempt to Set Dram Addr Low Failed!",
++ return ret);
++ PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_TransferTableDram2Smu, TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16))) == 0,
++ "[SetActivityMonitor] Attempt to Transfer Table To SMU Failed!",
++ return ret);
++
++ return 0;
++}
++
++int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
++ uint8_t *table, uint16_t workload_type)
++{
++ struct vega20_smumgr *priv =
++ (struct vega20_smumgr *)(hwmgr->smu_backend);
++ int ret = 0;
++
++ PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SetDriverDramAddrHigh,
++ upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
++ "[GetActivityMonitor] Attempt to Set Dram Addr High Failed!",
++ return ret);
++ PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SetDriverDramAddrLow,
++ lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
++ "[GetActivityMonitor] Attempt to Set Dram Addr Low Failed!",
++ return ret);
++ PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_TransferTableSmu2Dram,
++ TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16))) == 0,
++ "[GetActivityMonitor] Attempt to Transfer Table From SMU Failed!",
++ return ret);
++
++ memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
++ priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
++
++ return 0;
++}
++
+ int vega20_enable_smc_features(struct pp_hwmgr *hwmgr,
+ bool enable, uint64_t feature_mask)
+ {
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
+index 71da822..505eb0d 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
+@@ -56,6 +56,10 @@ int vega20_enable_smc_features(struct pp_hwmgr *hwmgr,
+ bool enable, uint64_t feature_mask);
+ int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
+ uint64_t *features_enabled);
++int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
++ uint8_t *table, uint16_t workload_type);
++int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
++ uint8_t *table, uint16_t workload_type);
+
+ #endif
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5128-drm-amd-powerplay-add-the-hw-manager-for-vega20-v3.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5128-drm-amd-powerplay-add-the-hw-manager-for-vega20-v3.patch
new file mode 100644
index 00000000..96e34009
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5128-drm-amd-powerplay-add-the-hw-manager-for-vega20-v3.patch
@@ -0,0 +1,4098 @@
+From d0e5327e3e959cc98085660776742acb3a1ede91 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Thu, 2 Aug 2018 15:55:33 -0500
+Subject: [PATCH 5128/5725] drm/amd/powerplay: add the hw manager for vega20
+ (v3)
+
+hwmgr is the interface for the driver to setup state
+structures which are used by the smu for managing the
+power state.
+
+v2: squash in fixes:
+- update set_watermarks_for_clocks_ranges to use common code
+- drop unsupported apis
+- correct MAX_REGULAR_DPM_NUMBER value
+- multimonitor fixes
+- add check for vbios pptable version
+- revise dpm table setup
+- init fclk dpm state
+- Remove unused definition in vega20_hwmgr
+- support power limit setup
+- enable vega20 to honour DAL clock limits
+- comment out dump_table debugging
+v3: switch to SOC15 register access macros
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/Makefile | 4 +-
+ drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 8 +-
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 2099 ++++++++++++++++++++
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h | 519 +++++
+ .../gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c | 70 +
+ .../gpu/drm/amd/powerplay/hwmgr/vega20_powertune.h | 32 +
+ .../amd/powerplay/hwmgr/vega20_processpptables.c | 919 +++++++++
+ .../amd/powerplay/hwmgr/vega20_processpptables.h | 31 +
+ .../gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c | 212 ++
+ .../gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h | 64 +
+ 10 files changed, 3956 insertions(+), 2 deletions(-)
+ create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+ create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+ create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c
+ create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.h
+ create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+ create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.h
+ create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
+ create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+index 789d577..95621c1 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+@@ -13,7 +13,9 @@ HARDWARE_MGR = hwmgr.o processpptables.o \
+ vega10_thermal.o smu10_hwmgr.o pp_psm.o\
+ vega12_processpptables.o vega12_hwmgr.o \
+ vega12_thermal.o \
+- pp_overdriver.o smu_helper.o
++ pp_overdriver.o smu_helper.o \
++ vega20_processpptables.o vega20_hwmgr.o vega20_powertune.o \
++ vega20_thermal.o
+
+ AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+index 8994aa5..7500a3e 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+@@ -44,11 +44,13 @@ extern const struct pp_smumgr_func vegam_smu_funcs;
+ extern const struct pp_smumgr_func vega10_smu_funcs;
+ extern const struct pp_smumgr_func vega12_smu_funcs;
+ extern const struct pp_smumgr_func smu10_smu_funcs;
++extern const struct pp_smumgr_func vega20_smu_funcs;
+
+ extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr);
+ extern int smu8_init_function_pointers(struct pp_hwmgr *hwmgr);
+ extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
+ extern int vega12_hwmgr_init(struct pp_hwmgr *hwmgr);
++extern int vega20_hwmgr_init(struct pp_hwmgr *hwmgr);
+ extern int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
+
+ static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
+@@ -149,7 +151,6 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
+ case AMDGPU_FAMILY_AI:
+ switch (hwmgr->chip_id) {
+ case CHIP_VEGA10:
+- case CHIP_VEGA20:
+ hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
+ hwmgr->smumgr_funcs = &vega10_smu_funcs;
+ vega10_hwmgr_init(hwmgr);
+@@ -158,6 +159,11 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
+ hwmgr->smumgr_funcs = &vega12_smu_funcs;
+ vega12_hwmgr_init(hwmgr);
+ break;
++ case CHIP_VEGA20:
++ hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
++ hwmgr->smumgr_funcs = &vega20_smu_funcs;
++ vega20_hwmgr_init(hwmgr);
++ break;
+ default:
+ return -EINVAL;
+ }
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+new file mode 100644
+index 0000000..40f0717
+--- /dev/null
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -0,0 +1,2099 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include <linux/delay.h>
++#include <linux/fb.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++
++#include "hwmgr.h"
++#include "amd_powerplay.h"
++#include "vega20_smumgr.h"
++#include "hardwaremanager.h"
++#include "ppatomfwctrl.h"
++#include "atomfirmware.h"
++#include "cgs_common.h"
++#include "vega20_powertune.h"
++#include "vega20_inc.h"
++#include "pppcielanes.h"
++#include "vega20_hwmgr.h"
++#include "vega20_processpptables.h"
++#include "vega20_pptable.h"
++#include "vega20_thermal.h"
++#include "vega20_ppsmc.h"
++#include "pp_debug.h"
++#include "amd_pcie_helpers.h"
++#include "ppinterrupt.h"
++#include "pp_overdriver.h"
++#include "pp_thermal.h"
++
++static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data =
++ (struct vega20_hwmgr *)(hwmgr->backend);
++
++ data->gfxclk_average_alpha = PPVEGA20_VEGA20GFXCLKAVERAGEALPHA_DFLT;
++ data->socclk_average_alpha = PPVEGA20_VEGA20SOCCLKAVERAGEALPHA_DFLT;
++ data->uclk_average_alpha = PPVEGA20_VEGA20UCLKCLKAVERAGEALPHA_DFLT;
++ data->gfx_activity_average_alpha = PPVEGA20_VEGA20GFXACTIVITYAVERAGEALPHA_DFLT;
++ data->lowest_uclk_reserved_for_ulv = PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT;
++
++ data->display_voltage_mode = PPVEGA20_VEGA20DISPLAYVOLTAGEMODE_DFLT;
++ data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
++ data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
++ data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
++ data->disp_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
++ data->disp_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
++ data->disp_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
++ data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
++ data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
++ data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
++ data->phy_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
++ data->phy_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
++ data->phy_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
++
++ data->registry_data.disallowed_features = 0x0;
++ data->registry_data.od_state_in_dc_support = 0;
++ data->registry_data.thermal_support = 1;
++ data->registry_data.skip_baco_hardware = 0;
++
++ data->registry_data.log_avfs_param = 0;
++ data->registry_data.sclk_throttle_low_notification = 1;
++ data->registry_data.force_dpm_high = 0;
++ data->registry_data.stable_pstate_sclk_dpm_percentage = 75;
++
++ data->registry_data.didt_support = 0;
++ if (data->registry_data.didt_support) {
++ data->registry_data.didt_mode = 6;
++ data->registry_data.sq_ramping_support = 1;
++ data->registry_data.db_ramping_support = 0;
++ data->registry_data.td_ramping_support = 0;
++ data->registry_data.tcp_ramping_support = 0;
++ data->registry_data.dbr_ramping_support = 0;
++ data->registry_data.edc_didt_support = 1;
++ data->registry_data.gc_didt_support = 0;
++ data->registry_data.psm_didt_support = 0;
++ }
++
++ data->registry_data.pcie_lane_override = 0xff;
++ data->registry_data.pcie_speed_override = 0xff;
++ data->registry_data.pcie_clock_override = 0xffffffff;
++ data->registry_data.regulator_hot_gpio_support = 1;
++ data->registry_data.ac_dc_switch_gpio_support = 0;
++ data->registry_data.quick_transition_support = 0;
++ data->registry_data.zrpm_start_temp = 0xffff;
++ data->registry_data.zrpm_stop_temp = 0xffff;
++ data->registry_data.odn_feature_enable = 1;
++ data->registry_data.disable_water_mark = 0;
++ data->registry_data.disable_pp_tuning = 0;
++ data->registry_data.disable_xlpp_tuning = 0;
++ data->registry_data.disable_workload_policy = 0;
++ data->registry_data.perf_ui_tuning_profile_turbo = 0x19190F0F;
++ data->registry_data.perf_ui_tuning_profile_powerSave = 0x19191919;
++ data->registry_data.perf_ui_tuning_profile_xl = 0x00000F0A;
++ data->registry_data.force_workload_policy_mask = 0;
++ data->registry_data.disable_3d_fs_detection = 0;
++ data->registry_data.fps_support = 1;
++ data->registry_data.disable_auto_wattman = 1;
++ data->registry_data.auto_wattman_debug = 0;
++ data->registry_data.auto_wattman_sample_period = 100;
++ data->registry_data.auto_wattman_threshold = 50;
++ data->registry_data.gfxoff_controlled_by_driver = 1;
++ data->gfxoff_allowed = false;
++ data->counter_gfxoff = 0;
++}
++
++static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data =
++ (struct vega20_hwmgr *)(hwmgr->backend);
++ struct amdgpu_device *adev = hwmgr->adev;
++
++ if (data->vddci_control == VEGA20_VOLTAGE_CONTROL_NONE)
++ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_ControlVDDCI);
++
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_TablelessHardwareInterface);
++
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_EnableSMU7ThermalManagement);
++
++ if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_UVDPowerGating);
++
++ if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_VCEPowerGating);
++
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_UnTabledHardwareInterface);
++
++ if (data->registry_data.odn_feature_enable)
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_ODNinACSupport);
++ else {
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_OD6inACSupport);
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_OD6PlusinACSupport);
++ }
++
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_ActivityReporting);
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_FanSpeedInTableIsRPM);
++
++ if (data->registry_data.od_state_in_dc_support) {
++ if (data->registry_data.odn_feature_enable)
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_ODNinDCSupport);
++ else {
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_OD6inDCSupport);
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_OD6PlusinDCSupport);
++ }
++ }
++
++ if (data->registry_data.thermal_support &&
++ data->registry_data.fuzzy_fan_control_support &&
++ hwmgr->thermal_controller.advanceFanControlParameters.usTMax)
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_ODFuzzyFanControlSupport);
++
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_DynamicPowerManagement);
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_SMC);
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_ThermalPolicyDelay);
++
++ if (data->registry_data.force_dpm_high)
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_ExclusiveModeAlwaysHigh);
++
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_DynamicUVDState);
++
++ if (data->registry_data.sclk_throttle_low_notification)
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_SclkThrottleLowNotification);
++
++ /* power tune caps */
++ /* assume disabled */
++ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_PowerContainment);
++ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_DiDtSupport);
++ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_SQRamping);
++ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_DBRamping);
++ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_TDRamping);
++ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_TCPRamping);
++ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_DBRRamping);
++ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_DiDtEDCEnable);
++ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_GCEDC);
++ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_PSM);
++
++ if (data->registry_data.didt_support) {
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_DiDtSupport);
++ if (data->registry_data.sq_ramping_support)
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_SQRamping);
++ if (data->registry_data.db_ramping_support)
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_DBRamping);
++ if (data->registry_data.td_ramping_support)
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_TDRamping);
++ if (data->registry_data.tcp_ramping_support)
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_TCPRamping);
++ if (data->registry_data.dbr_ramping_support)
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_DBRRamping);
++ if (data->registry_data.edc_didt_support)
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_DiDtEDCEnable);
++ if (data->registry_data.gc_didt_support)
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_GCEDC);
++ if (data->registry_data.psm_didt_support)
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_PSM);
++ }
++
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_RegulatorHot);
++
++ if (data->registry_data.ac_dc_switch_gpio_support) {
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_AutomaticDCTransition);
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
++ }
++
++ if (data->registry_data.quick_transition_support) {
++ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_AutomaticDCTransition);
++ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_Falcon_QuickTransition);
++ }
++
++ if (data->lowest_uclk_reserved_for_ulv != PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT) {
++ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_LowestUclkReservedForUlv);
++ if (data->lowest_uclk_reserved_for_ulv == 1)
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_LowestUclkReservedForUlv);
++ }
++
++ if (data->registry_data.custom_fan_support)
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_CustomFanControlSupport);
++
++ return 0;
++}
++
++static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++ int i;
++
++ data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
++ FEATURE_DPM_PREFETCHER_BIT;
++ data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
++ FEATURE_DPM_GFXCLK_BIT;
++ data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
++ FEATURE_DPM_UCLK_BIT;
++ data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
++ FEATURE_DPM_SOCCLK_BIT;
++ data->smu_features[GNLD_DPM_UVD].smu_feature_id =
++ FEATURE_DPM_UVD_BIT;
++ data->smu_features[GNLD_DPM_VCE].smu_feature_id =
++ FEATURE_DPM_VCE_BIT;
++ data->smu_features[GNLD_ULV].smu_feature_id =
++ FEATURE_ULV_BIT;
++ data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
++ FEATURE_DPM_MP0CLK_BIT;
++ data->smu_features[GNLD_DPM_LINK].smu_feature_id =
++ FEATURE_DPM_LINK_BIT;
++ data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
++ FEATURE_DPM_DCEFCLK_BIT;
++ data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
++ FEATURE_DS_GFXCLK_BIT;
++ data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
++ FEATURE_DS_SOCCLK_BIT;
++ data->smu_features[GNLD_DS_LCLK].smu_feature_id =
++ FEATURE_DS_LCLK_BIT;
++ data->smu_features[GNLD_PPT].smu_feature_id =
++ FEATURE_PPT_BIT;
++ data->smu_features[GNLD_TDC].smu_feature_id =
++ FEATURE_TDC_BIT;
++ data->smu_features[GNLD_THERMAL].smu_feature_id =
++ FEATURE_THERMAL_BIT;
++ data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
++ FEATURE_GFX_PER_CU_CG_BIT;
++ data->smu_features[GNLD_RM].smu_feature_id =
++ FEATURE_RM_BIT;
++ data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
++ FEATURE_DS_DCEFCLK_BIT;
++ data->smu_features[GNLD_ACDC].smu_feature_id =
++ FEATURE_ACDC_BIT;
++ data->smu_features[GNLD_VR0HOT].smu_feature_id =
++ FEATURE_VR0HOT_BIT;
++ data->smu_features[GNLD_VR1HOT].smu_feature_id =
++ FEATURE_VR1HOT_BIT;
++ data->smu_features[GNLD_FW_CTF].smu_feature_id =
++ FEATURE_FW_CTF_BIT;
++ data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
++ FEATURE_LED_DISPLAY_BIT;
++ data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
++ FEATURE_FAN_CONTROL_BIT;
++ data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
++ data->smu_features[GNLD_GFXOFF].smu_feature_id = FEATURE_GFXOFF_BIT;
++ data->smu_features[GNLD_CG].smu_feature_id = FEATURE_CG_BIT;
++ data->smu_features[GNLD_DPM_FCLK].smu_feature_id = FEATURE_DPM_FCLK_BIT;
++ data->smu_features[GNLD_DS_FCLK].smu_feature_id = FEATURE_DS_FCLK_BIT;
++ data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT;
++ data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT;
++ data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT;
++
++ for (i = 0; i < GNLD_FEATURES_MAX; i++) {
++ data->smu_features[i].smu_feature_bitmap =
++ (uint64_t)(1ULL << data->smu_features[i].smu_feature_id);
++ data->smu_features[i].allowed =
++ ((data->registry_data.disallowed_features >> i) & 1) ?
++ false : true;
++ }
++}
++
++static int vega20_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
++{
++ return 0;
++}
++
++static int vega20_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
++{
++ kfree(hwmgr->backend);
++ hwmgr->backend = NULL;
++
++ return 0;
++}
++
++static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data;
++ struct amdgpu_device *adev = hwmgr->adev;
++
++ data = kzalloc(sizeof(struct vega20_hwmgr), GFP_KERNEL);
++ if (data == NULL)
++ return -ENOMEM;
++
++ hwmgr->backend = data;
++
++ vega20_set_default_registry_data(hwmgr);
++
++ data->disable_dpm_mask = 0xff;
++ data->workload_mask = 0xff;
++
++ /* need to set voltage control types before EVV patching */
++ data->vddc_control = VEGA20_VOLTAGE_CONTROL_NONE;
++ data->mvdd_control = VEGA20_VOLTAGE_CONTROL_NONE;
++ data->vddci_control = VEGA20_VOLTAGE_CONTROL_NONE;
++
++ data->water_marks_bitmap = 0;
++ data->avfs_exist = false;
++
++ vega20_set_features_platform_caps(hwmgr);
++
++ vega20_init_dpm_defaults(hwmgr);
++
++ /* Parse pptable data read from VBIOS */
++ vega20_set_private_data_based_on_pptable(hwmgr);
++
++ data->is_tlu_enabled = false;
++
++ hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
++ VEGA20_MAX_HARDWARE_POWERLEVELS;
++ hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
++ hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
++
++ hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
++ /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
++ hwmgr->platform_descriptor.clockStep.engineClock = 500;
++ hwmgr->platform_descriptor.clockStep.memoryClock = 500;
++
++ data->total_active_cus = adev->gfx.cu_info.number;
++
++ return 0;
++}
++
++static int vega20_init_sclk_threshold(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data =
++ (struct vega20_hwmgr *)(hwmgr->backend);
++
++ data->low_sclk_interrupt_threshold = 0;
++
++ return 0;
++}
++
++static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
++{
++ int ret = 0;
++
++ ret = vega20_init_sclk_threshold(hwmgr);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Failed to init sclk threshold!",
++ return ret);
++
++ return 0;
++}
++
++/*
++ * @fn vega20_init_dpm_state
++ * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
++ *
++ * @param dpm_state - the address of the DPM Table to initiailize.
++ * @return None.
++ */
++static void vega20_init_dpm_state(struct vega20_dpm_state *dpm_state)
++{
++ dpm_state->soft_min_level = 0x0;
++ dpm_state->soft_max_level = 0xffff;
++ dpm_state->hard_min_level = 0x0;
++ dpm_state->hard_max_level = 0xffff;
++}
++
++static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
++ PPCLK_e clk_id, uint32_t *num_of_levels)
++{
++ int ret = 0;
++
++ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_GetDpmFreqByIndex,
++ (clk_id << 16 | 0xFF));
++ PP_ASSERT_WITH_CODE(!ret,
++ "[GetNumOfDpmLevel] failed to get dpm levels!",
++ return ret);
++
++ vega20_read_arg_from_smc(hwmgr, num_of_levels);
++ PP_ASSERT_WITH_CODE(*num_of_levels > 0,
++ "[GetNumOfDpmLevel] number of clk levels is invalid!",
++ return -EINVAL);
++
++ return ret;
++}
++
++static int vega20_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
++ PPCLK_e clk_id, uint32_t index, uint32_t *clk)
++{
++ int ret = 0;
++
++ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_GetDpmFreqByIndex,
++ (clk_id << 16 | index));
++ PP_ASSERT_WITH_CODE(!ret,
++ "[GetDpmFreqByIndex] failed to get dpm freq by index!",
++ return ret);
++
++ vega20_read_arg_from_smc(hwmgr, clk);
++ PP_ASSERT_WITH_CODE(*clk,
++ "[GetDpmFreqByIndex] clk value is invalid!",
++ return -EINVAL);
++
++ return ret;
++}
++
++static int vega20_setup_single_dpm_table(struct pp_hwmgr *hwmgr,
++ struct vega20_single_dpm_table *dpm_table, PPCLK_e clk_id)
++{
++ int ret = 0;
++ uint32_t i, num_of_levels, clk;
++
++ ret = vega20_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupSingleDpmTable] failed to get clk levels!",
++ return ret);
++
++ dpm_table->count = num_of_levels;
++
++ for (i = 0; i < num_of_levels; i++) {
++ ret = vega20_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupSingleDpmTable] failed to get clk of specific level!",
++ return ret);
++ dpm_table->dpm_levels[i].value = clk;
++ dpm_table->dpm_levels[i].enabled = true;
++ }
++
++ return ret;
++}
++
++
++/*
++ * This function is to initialize all DPM state tables
++ * for SMU based on the dependency table.
++ * Dynamic state patching function will then trim these
++ * state tables to the allowed range based
++ * on the power policy or external client requests,
++ * such as UVD request, etc.
++ */
++static int vega20_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data =
++ (struct vega20_hwmgr *)(hwmgr->backend);
++ struct vega20_single_dpm_table *dpm_table;
++ int ret = 0;
++
++ memset(&data->dpm_table, 0, sizeof(data->dpm_table));
++
++ /* socclk */
++ dpm_table = &(data->dpm_table.soc_table);
++ if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
++ ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupDefaultDpmTable] failed to get socclk dpm levels!",
++ return ret);
++ } else {
++ dpm_table->count = 1;
++ dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100;
++ }
++ vega20_init_dpm_state(&(dpm_table->dpm_state));
++
++ /* gfxclk */
++ dpm_table = &(data->dpm_table.gfx_table);
++ if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
++ ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!",
++ return ret);
++ } else {
++ dpm_table->count = 1;
++ dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100;
++ }
++ vega20_init_dpm_state(&(dpm_table->dpm_state));
++
++ /* memclk */
++ dpm_table = &(data->dpm_table.mem_table);
++ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
++ ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupDefaultDpmTable] failed to get memclk dpm levels!",
++ return ret);
++ } else {
++ dpm_table->count = 1;
++ dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100;
++ }
++ vega20_init_dpm_state(&(dpm_table->dpm_state));
++
++ /* eclk */
++ dpm_table = &(data->dpm_table.eclk_table);
++ if (data->smu_features[GNLD_DPM_VCE].enabled) {
++ ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupDefaultDpmTable] failed to get eclk dpm levels!",
++ return ret);
++ } else {
++ dpm_table->count = 1;
++ dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100;
++ }
++ vega20_init_dpm_state(&(dpm_table->dpm_state));
++
++ /* vclk */
++ dpm_table = &(data->dpm_table.vclk_table);
++ if (data->smu_features[GNLD_DPM_UVD].enabled) {
++ ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupDefaultDpmTable] failed to get vclk dpm levels!",
++ return ret);
++ } else {
++ dpm_table->count = 1;
++ dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100;
++ }
++ vega20_init_dpm_state(&(dpm_table->dpm_state));
++
++ /* dclk */
++ dpm_table = &(data->dpm_table.dclk_table);
++ if (data->smu_features[GNLD_DPM_UVD].enabled) {
++ ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupDefaultDpmTable] failed to get dclk dpm levels!",
++ return ret);
++ } else {
++ dpm_table->count = 1;
++ dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100;
++ }
++ vega20_init_dpm_state(&(dpm_table->dpm_state));
++
++ /* dcefclk */
++ dpm_table = &(data->dpm_table.dcef_table);
++ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
++ ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupDefaultDpmTable] failed to get dcefclk dpm levels!",
++ return ret);
++ } else {
++ dpm_table->count = 1;
++ dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100;
++ }
++ vega20_init_dpm_state(&(dpm_table->dpm_state));
++
++ /* pixclk */
++ dpm_table = &(data->dpm_table.pixel_table);
++ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
++ ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupDefaultDpmTable] failed to get pixclk dpm levels!",
++ return ret);
++ } else
++ dpm_table->count = 0;
++ vega20_init_dpm_state(&(dpm_table->dpm_state));
++
++ /* dispclk */
++ dpm_table = &(data->dpm_table.display_table);
++ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
++ ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupDefaultDpmTable] failed to get dispclk dpm levels!",
++ return ret);
++ } else
++ dpm_table->count = 0;
++ vega20_init_dpm_state(&(dpm_table->dpm_state));
++
++ /* phyclk */
++ dpm_table = &(data->dpm_table.phy_table);
++ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
++ ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupDefaultDpmTable] failed to get phyclk dpm levels!",
++ return ret);
++ } else
++ dpm_table->count = 0;
++ vega20_init_dpm_state(&(dpm_table->dpm_state));
++
++ /* fclk */
++ dpm_table = &(data->dpm_table.fclk_table);
++ if (data->smu_features[GNLD_DPM_FCLK].enabled) {
++ ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_FCLK);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupDefaultDpmTable] failed to get fclk dpm levels!",
++ return ret);
++ } else
++ dpm_table->count = 0;
++ vega20_init_dpm_state(&(dpm_table->dpm_state));
++
++ /* save a copy of the default DPM table */
++ memcpy(&(data->golden_dpm_table), &(data->dpm_table),
++ sizeof(struct vega20_dpm_table));
++
++ return 0;
++}
++
++/**
++* Initializes the SMC table and uploads it
++*
++* @param hwmgr the address of the powerplay hardware manager.
++* @param pInput the pointer to input data (PowerState)
++* @return always 0
++*/
++static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
++{
++ int result;
++ struct vega20_hwmgr *data =
++ (struct vega20_hwmgr *)(hwmgr->backend);
++ PPTable_t *pp_table = &(data->smc_state_table.pp_table);
++ struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
++ struct phm_ppt_v3_information *pptable_information =
++ (struct phm_ppt_v3_information *)hwmgr->pptable;
++
++ result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
++ PP_ASSERT_WITH_CODE(!result,
++ "[InitSMCTable] Failed to get vbios bootup values!",
++ return result);
++
++ data->vbios_boot_state.vddc = boot_up_values.usVddc;
++ data->vbios_boot_state.vddci = boot_up_values.usVddci;
++ data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
++ data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
++ data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
++ data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
++ data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
++ data->vbios_boot_state.eclock = boot_up_values.ulEClk;
++ data->vbios_boot_state.vclock = boot_up_values.ulVClk;
++ data->vbios_boot_state.dclock = boot_up_values.ulDClk;
++ data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
++ if (0 != boot_up_values.usVddc) {
++ smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SetFloorSocVoltage,
++ (boot_up_values.usVddc * 4));
++ data->vbios_boot_state.bsoc_vddc_lock = true;
++ } else {
++ data->vbios_boot_state.bsoc_vddc_lock = false;
++ }
++ smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SetMinDeepSleepDcefclk,
++ (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
++
++ memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
++
++ result = vega20_copy_table_to_smc(hwmgr,
++ (uint8_t *)pp_table, TABLE_PPTABLE);
++ PP_ASSERT_WITH_CODE(!result,
++ "[InitSMCTable] Failed to upload PPtable!",
++ return result);
++
++ return 0;
++}
++
++static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data =
++ (struct vega20_hwmgr *)(hwmgr->backend);
++ uint32_t allowed_features_low = 0, allowed_features_high = 0;
++ int i;
++ int ret = 0;
++
++ for (i = 0; i < GNLD_FEATURES_MAX; i++)
++ if (data->smu_features[i].allowed)
++ data->smu_features[i].smu_feature_id > 31 ?
++ (allowed_features_high |=
++ ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_HIGH_SHIFT)
++ & 0xFFFFFFFF)) :
++ (allowed_features_low |=
++ ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT)
++ & 0xFFFFFFFF));
++
++ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetAllowedFeaturesMask] Attempt to set allowed features mask(high) failed!",
++ return ret);
++
++ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
++ return ret);
++
++ return 0;
++}
++
++static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data =
++ (struct vega20_hwmgr *)(hwmgr->backend);
++ uint64_t features_enabled;
++ int i;
++ bool enabled;
++ int ret = 0;
++
++ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
++ PPSMC_MSG_EnableAllSmuFeatures)) == 0,
++ "[EnableAllSMUFeatures] Failed to enable all smu features!",
++ return ret);
++
++ ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[EnableAllSmuFeatures] Failed to get enabled smc features!",
++ return ret);
++
++ for (i = 0; i < GNLD_FEATURES_MAX; i++) {
++ enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
++ true : false;
++ data->smu_features[i].enabled = enabled;
++ data->smu_features[i].supported = enabled;
++
++#if 0
++ if (data->smu_features[i].allowed && !enabled)
++ pr_info("[EnableAllSMUFeatures] feature %d is expected enabled!", i);
++ else if (!data->smu_features[i].allowed && enabled)
++ pr_info("[EnableAllSMUFeatures] feature %d is expected disabled!", i);
++#endif
++ }
++
++ return 0;
++}
++
++static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data =
++ (struct vega20_hwmgr *)(hwmgr->backend);
++ uint64_t features_enabled;
++ int i;
++ bool enabled;
++ int ret = 0;
++
++ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
++ PPSMC_MSG_DisableAllSmuFeatures)) == 0,
++ "[DisableAllSMUFeatures] Failed to disable all smu features!",
++ return ret);
++
++ ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[DisableAllSMUFeatures] Failed to get enabled smc features!",
++ return ret);
++
++ for (i = 0; i < GNLD_FEATURES_MAX; i++) {
++ enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
++ true : false;
++ data->smu_features[i].enabled = enabled;
++ data->smu_features[i].supported = enabled;
++ }
++
++ return 0;
++}
++
++static int vega20_odn_initialize_default_settings(
++ struct pp_hwmgr *hwmgr)
++{
++ return 0;
++}
++
++static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr,
++ PP_Clock *clock, PPCLK_e clock_select)
++{
++ int ret = 0;
++
++ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_GetDcModeMaxDpmFreq,
++ (clock_select << 16))) == 0,
++ "[GetMaxSustainableClock] Failed to get max DC clock from SMC!",
++ return ret);
++ vega20_read_arg_from_smc(hwmgr, clock);
++
++ /* if DC limit is zero, return AC limit */
++ if (*clock == 0) {
++ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_GetMaxDpmFreq,
++ (clock_select << 16))) == 0,
++ "[GetMaxSustainableClock] failed to get max AC clock from SMC!",
++ return ret);
++ vega20_read_arg_from_smc(hwmgr, clock);
++ }
++
++ return 0;
++}
++
++static int vega20_init_max_sustainable_clocks(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data =
++ (struct vega20_hwmgr *)(hwmgr->backend);
++ struct vega20_max_sustainable_clocks *max_sustainable_clocks =
++ &(data->max_sustainable_clocks);
++ int ret = 0;
++
++ max_sustainable_clocks->uclock = data->vbios_boot_state.mem_clock / 100;
++ max_sustainable_clocks->soc_clock = data->vbios_boot_state.soc_clock / 100;
++ max_sustainable_clocks->dcef_clock = data->vbios_boot_state.dcef_clock / 100;
++ max_sustainable_clocks->display_clock = 0xFFFFFFFF;
++ max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
++ max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
++
++ if (data->smu_features[GNLD_DPM_UCLK].enabled)
++ PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
++ &(max_sustainable_clocks->uclock),
++ PPCLK_UCLK)) == 0,
++ "[InitMaxSustainableClocks] failed to get max UCLK from SMC!",
++ return ret);
++
++ if (data->smu_features[GNLD_DPM_SOCCLK].enabled)
++ PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
++ &(max_sustainable_clocks->soc_clock),
++ PPCLK_SOCCLK)) == 0,
++ "[InitMaxSustainableClocks] failed to get max SOCCLK from SMC!",
++ return ret);
++
++ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
++ PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
++ &(max_sustainable_clocks->dcef_clock),
++ PPCLK_DCEFCLK)) == 0,
++ "[InitMaxSustainableClocks] failed to get max DCEFCLK from SMC!",
++ return ret);
++ PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
++ &(max_sustainable_clocks->display_clock),
++ PPCLK_DISPCLK)) == 0,
++ "[InitMaxSustainableClocks] failed to get max DISPCLK from SMC!",
++ return ret);
++ PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
++ &(max_sustainable_clocks->phy_clock),
++ PPCLK_PHYCLK)) == 0,
++ "[InitMaxSustainableClocks] failed to get max PHYCLK from SMC!",
++ return ret);
++ PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
++ &(max_sustainable_clocks->pixel_clock),
++ PPCLK_PIXCLK)) == 0,
++ "[InitMaxSustainableClocks] failed to get max PIXCLK from SMC!",
++ return ret);
++ }
++
++ if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
++ max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
++
++ if (max_sustainable_clocks->uclock < max_sustainable_clocks->dcef_clock)
++ max_sustainable_clocks->dcef_clock = max_sustainable_clocks->uclock;
++
++ return 0;
++}
++
++static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
++{
++ int result = 0;
++
++ smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_NumOfDisplays, 0);
++
++ result = vega20_set_allowed_featuresmask(hwmgr);
++ PP_ASSERT_WITH_CODE(!result,
++ "[EnableDPMTasks] Failed to set allowed featuresmask!\n",
++ return result);
++
++ result = vega20_init_smc_table(hwmgr);
++ PP_ASSERT_WITH_CODE(!result,
++ "[EnableDPMTasks] Failed to initialize SMC table!",
++ return result);
++
++ result = vega20_enable_all_smu_features(hwmgr);
++ PP_ASSERT_WITH_CODE(!result,
++ "[EnableDPMTasks] Failed to enable all smu features!",
++ return result);
++
++ result = vega20_setup_default_dpm_tables(hwmgr);
++ PP_ASSERT_WITH_CODE(!result,
++ "[EnableDPMTasks] Failed to setup default DPM tables!",
++ return result);
++
++ result = vega20_init_max_sustainable_clocks(hwmgr);
++ PP_ASSERT_WITH_CODE(!result,
++ "[EnableDPMTasks] Failed to get maximum sustainable clocks!",
++ return result);
++
++ result = vega20_power_control_set_level(hwmgr);
++ PP_ASSERT_WITH_CODE(!result,
++ "[EnableDPMTasks] Failed to power control set level!",
++ return result);
++
++ result = vega20_odn_initialize_default_settings(hwmgr);
++ PP_ASSERT_WITH_CODE(!result,
++ "[EnableDPMTasks] Failed to initialize odn settings!",
++ return result);
++
++ return result;
++}
++
++static uint32_t vega20_find_lowest_dpm_level(
++ struct vega20_single_dpm_table *table)
++{
++ uint32_t i;
++
++ for (i = 0; i < table->count; i++) {
++ if (table->dpm_levels[i].enabled)
++ break;
++ }
++ if (i >= table->count) {
++ i = 0;
++ table->dpm_levels[i].enabled = true;
++ }
++
++ return i;
++}
++
++static uint32_t vega20_find_highest_dpm_level(
++ struct vega20_single_dpm_table *table)
++{
++ uint32_t i = 0;
++
++ PP_ASSERT_WITH_CODE(table != NULL,
++ "[FindHighestDPMLevel] DPM Table does not exist!",
++ return 0);
++ PP_ASSERT_WITH_CODE(table->count > 0,
++ "[FindHighestDPMLevel] DPM Table has no entry!",
++ return 0);
++ PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER,
++ "[FindHighestDPMLevel] DPM Table has too many entries!",
++ return MAX_REGULAR_DPM_NUMBER - 1);
++
++ for (i = table->count - 1; i >= 0; i--) {
++ if (table->dpm_levels[i].enabled)
++ break;
++ }
++ if (i < 0) {
++ i = 0;
++ table->dpm_levels[i].enabled = true;
++ }
++
++ return i;
++}
++
++static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data =
++ (struct vega20_hwmgr *)(hwmgr->backend);
++ int ret = 0;
++
++ if (data->smu_features[GNLD_DPM_GFXCLK].enabled)
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
++ PPCLK_GFXCLK << 16 |
++ data->dpm_table.gfx_table.dpm_state.soft_min_level)),
++ "Failed to set soft min gfxclk !",
++ return ret);
++
++ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
++ PPCLK_UCLK << 16 |
++ data->dpm_table.mem_table.dpm_state.soft_min_level)),
++ "Failed to set soft min memclk !",
++ return ret);
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetHardMinByFreq,
++ PPCLK_UCLK << 16 |
++ data->dpm_table.mem_table.dpm_state.hard_min_level)),
++ "Failed to set hard min memclk !",
++ return ret);
++ }
++
++ return ret;
++}
++
++static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data =
++ (struct vega20_hwmgr *)(hwmgr->backend);
++ int ret = 0;
++
++ if (data->smu_features[GNLD_DPM_GFXCLK].enabled)
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
++ PPCLK_GFXCLK << 16 |
++ data->dpm_table.gfx_table.dpm_state.soft_max_level)),
++ "Failed to set soft max gfxclk!",
++ return ret);
++
++ if (data->smu_features[GNLD_DPM_UCLK].enabled)
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
++ PPCLK_UCLK << 16 |
++ data->dpm_table.mem_table.dpm_state.soft_max_level)),
++ "Failed to set soft max memclk!",
++ return ret);
++
++ return ret;
++}
++
++int vega20_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
++{
++ struct vega20_hwmgr *data =
++ (struct vega20_hwmgr *)(hwmgr->backend);
++ int ret = 0;
++
++ if (data->smu_features[GNLD_DPM_VCE].supported) {
++ if (data->smu_features[GNLD_DPM_VCE].enabled == enable) {
++ if (enable)
++ PP_DBG_LOG("[EnableDisableVCEDPM] feature VCE DPM already enabled!\n");
++ else
++ PP_DBG_LOG("[EnableDisableVCEDPM] feature VCE DPM already disabled!\n");
++ }
++
++ ret = vega20_enable_smc_features(hwmgr,
++ enable,
++ data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Attempt to Enable/Disable DPM VCE Failed!",
++ return ret);
++ data->smu_features[GNLD_DPM_VCE].enabled = enable;
++ }
++
++ return 0;
++}
++
++static int vega20_get_clock_ranges(struct pp_hwmgr *hwmgr,
++ uint32_t *clock,
++ PPCLK_e clock_select,
++ bool max)
++{
++ int ret;
++ *clock = 0;
++
++ if (max) {
++ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16))) == 0,
++ "[GetClockRanges] Failed to get max clock from SMC!",
++ return ret);
++ vega20_read_arg_from_smc(hwmgr, clock);
++ } else {
++ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_GetMinDpmFreq,
++ (clock_select << 16))) == 0,
++ "[GetClockRanges] Failed to get min clock from SMC!",
++ return ret);
++ vega20_read_arg_from_smc(hwmgr, clock);
++ }
++
++ return 0;
++}
++
++static uint32_t vega20_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
++{
++ struct vega20_hwmgr *data =
++ (struct vega20_hwmgr *)(hwmgr->backend);
++ uint32_t gfx_clk;
++ int ret = 0;
++
++ PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_GFXCLK].enabled,
++ "[GetSclks]: gfxclk dpm not enabled!\n",
++ return -EPERM);
++
++ if (low) {
++ ret = vega20_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, false);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[GetSclks]: fail to get min PPCLK_GFXCLK\n",
++ return ret);
++ } else {
++ ret = vega20_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, true);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[GetSclks]: fail to get max PPCLK_GFXCLK\n",
++ return ret);
++ }
++
++ return (gfx_clk * 100);
++}
++
++static uint32_t vega20_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
++{
++ struct vega20_hwmgr *data =
++ (struct vega20_hwmgr *)(hwmgr->backend);
++ uint32_t mem_clk;
++ int ret = 0;
++
++ PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_UCLK].enabled,
++ "[MemMclks]: memclk dpm not enabled!\n",
++ return -EPERM);
++
++ if (low) {
++ ret = vega20_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, false);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[GetMclks]: fail to get min PPCLK_UCLK\n",
++ return ret);
++ } else {
++ ret = vega20_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, true);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[GetMclks]: fail to get max PPCLK_UCLK\n",
++ return ret);
++ }
++
++ return (mem_clk * 100);
++}
++
++static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
++ uint32_t *query)
++{
++ int ret = 0;
++ SmuMetrics_t metrics_table;
++
++ ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)&metrics_table, TABLE_SMU_METRICS);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Failed to export SMU METRICS table!",
++ return ret);
++
++ *query = metrics_table.CurrSocketPower << 8;
++
++ return ret;
++}
++
++static int vega20_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx_freq)
++{
++ uint32_t gfx_clk = 0;
++ int ret = 0;
++
++ *gfx_freq = 0;
++
++ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16))) == 0,
++ "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
++ return ret);
++ vega20_read_arg_from_smc(hwmgr, &gfx_clk);
++
++ *gfx_freq = gfx_clk * 100;
++
++ return 0;
++}
++
++static int vega20_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_freq)
++{
++ uint32_t mem_clk = 0;
++ int ret = 0;
++
++ *mclk_freq = 0;
++
++ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16))) == 0,
++ "[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
++ return ret);
++ vega20_read_arg_from_smc(hwmgr, &mem_clk);
++
++ *mclk_freq = mem_clk * 100;
++
++ return 0;
++}
++
++static int vega20_get_current_activity_percent(struct pp_hwmgr *hwmgr,
++ uint32_t *activity_percent)
++{
++ int ret = 0;
++ SmuMetrics_t metrics_table;
++
++ ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)&metrics_table, TABLE_SMU_METRICS);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Failed to export SMU METRICS table!",
++ return ret);
++
++ *activity_percent = metrics_table.AverageGfxActivity;
++
++ return ret;
++}
++
++static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
++ void *value, int *size)
++{
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++ int ret = 0;
++
++ switch (idx) {
++ case AMDGPU_PP_SENSOR_GFX_SCLK:
++ ret = vega20_get_current_gfx_clk_freq(hwmgr, (uint32_t *)value);
++ if (!ret)
++ *size = 4;
++ break;
++ case AMDGPU_PP_SENSOR_GFX_MCLK:
++ ret = vega20_get_current_mclk_freq(hwmgr, (uint32_t *)value);
++ if (!ret)
++ *size = 4;
++ break;
++ case AMDGPU_PP_SENSOR_GPU_LOAD:
++ ret = vega20_get_current_activity_percent(hwmgr, (uint32_t *)value);
++ if (!ret)
++ *size = 4;
++ break;
++ case AMDGPU_PP_SENSOR_GPU_TEMP:
++ *((uint32_t *)value) = vega20_thermal_get_temperature(hwmgr);
++ *size = 4;
++ break;
++ case AMDGPU_PP_SENSOR_UVD_POWER:
++ *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
++ *size = 4;
++ break;
++ case AMDGPU_PP_SENSOR_VCE_POWER:
++ *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
++ *size = 4;
++ break;
++ case AMDGPU_PP_SENSOR_GPU_POWER:
++ *size = 16;
++ ret = vega20_get_gpu_power(hwmgr, (uint32_t *)value);
++ break;
++ default:
++ ret = -EINVAL;
++ break;
++ }
++ return ret;
++}
++
++static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr,
++ bool has_disp)
++{
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++
++ if (data->smu_features[GNLD_DPM_UCLK].enabled)
++ return smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SetUclkFastSwitch,
++ has_disp ? 0 : 1);
++
++ return 0;
++}
++
++int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
++ struct pp_display_clock_request *clock_req)
++{
++ int result = 0;
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++ enum amd_pp_clock_type clk_type = clock_req->clock_type;
++ uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
++ PPCLK_e clk_select = 0;
++ uint32_t clk_request = 0;
++
++ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
++ switch (clk_type) {
++ case amd_pp_dcef_clock:
++ clk_freq = clock_req->clock_freq_in_khz / 100;
++ clk_select = PPCLK_DCEFCLK;
++ break;
++ case amd_pp_disp_clock:
++ clk_select = PPCLK_DISPCLK;
++ break;
++ case amd_pp_pixel_clock:
++ clk_select = PPCLK_PIXCLK;
++ break;
++ case amd_pp_phy_clock:
++ clk_select = PPCLK_PHYCLK;
++ break;
++ default:
++ pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
++ result = -EINVAL;
++ break;
++ }
++
++ if (!result) {
++ clk_request = (clk_select << 16) | clk_freq;
++ result = smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SetHardMinByFreq,
++ clk_request);
++ }
++ }
++
++ return result;
++}
++
++static int vega20_notify_smc_display_config_after_ps_adjustment(
++ struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data =
++ (struct vega20_hwmgr *)(hwmgr->backend);
++ struct PP_Clocks min_clocks = {0};
++ struct pp_display_clock_request clock_req;
++ int ret = 0;
++
++ if ((hwmgr->display_config->num_display > 1) &&
++ !hwmgr->display_config->multi_monitor_in_sync)
++ vega20_notify_smc_display_change(hwmgr, false);
++ else
++ vega20_notify_smc_display_change(hwmgr, true);
++
++ min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
++ min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
++ min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
++
++ if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
++ clock_req.clock_type = amd_pp_dcef_clock;
++ clock_req.clock_freq_in_khz = min_clocks.dcefClock;
++ if (!vega20_display_clock_voltage_request(hwmgr, &clock_req)) {
++ if (data->smu_features[GNLD_DS_DCEFCLK].supported)
++ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
++ min_clocks.dcefClockInSR / 100)) == 0,
++ "Attempt to set divider for DCEFCLK Failed!",
++ return ret);
++ } else {
++ pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
++ }
++ }
++
++ return 0;
++}
++
++static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data =
++ (struct vega20_hwmgr *)(hwmgr->backend);
++ int ret = 0;
++
++ data->smc_state_table.gfx_boot_level =
++ data->smc_state_table.gfx_max_level =
++ vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
++ data->smc_state_table.mem_boot_level =
++ data->smc_state_table.mem_max_level =
++ vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
++
++ ret = vega20_upload_dpm_min_level(hwmgr);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Failed to upload boot level to highest!",
++ return ret);
++
++ ret = vega20_upload_dpm_max_level(hwmgr);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Failed to upload dpm max level to highest!",
++ return ret);
++
++ return 0;
++}
++
++static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data =
++ (struct vega20_hwmgr *)(hwmgr->backend);
++ int ret = 0;
++
++ data->smc_state_table.gfx_boot_level =
++ data->smc_state_table.gfx_max_level =
++ vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
++ data->smc_state_table.mem_boot_level =
++ data->smc_state_table.mem_max_level =
++ vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
++
++ ret = vega20_upload_dpm_min_level(hwmgr);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Failed to upload boot level to highest!",
++ return ret);
++
++ ret = vega20_upload_dpm_max_level(hwmgr);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Failed to upload dpm max level to highest!",
++ return ret);
++
++ return 0;
++
++}
++
++static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
++{
++ int ret = 0;
++
++ ret = vega20_upload_dpm_min_level(hwmgr);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Failed to upload DPM Bootup Levels!",
++ return ret);
++
++ ret = vega20_upload_dpm_max_level(hwmgr);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Failed to upload DPM Max Levels!",
++ return ret);
++
++ return 0;
++}
++
++#if 0
++static int vega20_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
++ uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
++{
++ struct phm_ppt_v2_information *table_info =
++ (struct phm_ppt_v2_information *)(hwmgr->pptable);
++
++ if (table_info->vdd_dep_on_sclk->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
++ table_info->vdd_dep_on_socclk->count > VEGA20_UMD_PSTATE_SOCCLK_LEVEL &&
++ table_info->vdd_dep_on_mclk->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) {
++ *sclk_mask = VEGA20_UMD_PSTATE_GFXCLK_LEVEL;
++ *soc_mask = VEGA20_UMD_PSTATE_SOCCLK_LEVEL;
++ *mclk_mask = VEGA20_UMD_PSTATE_MCLK_LEVEL;
++ }
++
++ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
++ *sclk_mask = 0;
++ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
++ *mclk_mask = 0;
++ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
++ *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
++ *soc_mask = table_info->vdd_dep_on_socclk->count - 1;
++ *mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
++ }
++ return 0;
++}
++#endif
++
++static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
++ enum pp_clock_type type, uint32_t mask)
++{
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++ int ret = 0;
++
++ switch (type) {
++ case PP_SCLK:
++ data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
++ data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
++
++ ret = vega20_upload_dpm_min_level(hwmgr);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Failed to upload boot level to lowest!",
++ return ret);
++
++ ret = vega20_upload_dpm_max_level(hwmgr);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Failed to upload dpm max level to highest!",
++ return ret);
++ break;
++
++ case PP_MCLK:
++ data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
++ data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
++
++ ret = vega20_upload_dpm_min_level(hwmgr);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Failed to upload boot level to lowest!",
++ return ret);
++
++ ret = vega20_upload_dpm_max_level(hwmgr);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Failed to upload dpm max level to highest!",
++ return ret);
++
++ break;
++
++ case PP_PCIE:
++ break;
++
++ default:
++ break;
++ }
++
++ return 0;
++}
++
++static int vega20_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
++ enum amd_dpm_forced_level level)
++{
++ int ret = 0;
++#if 0
++ uint32_t sclk_mask = 0;
++ uint32_t mclk_mask = 0;
++ uint32_t soc_mask = 0;
++#endif
++
++ switch (level) {
++ case AMD_DPM_FORCED_LEVEL_HIGH:
++ ret = vega20_force_dpm_highest(hwmgr);
++ break;
++ case AMD_DPM_FORCED_LEVEL_LOW:
++ ret = vega20_force_dpm_lowest(hwmgr);
++ break;
++ case AMD_DPM_FORCED_LEVEL_AUTO:
++ ret = vega20_unforce_dpm_levels(hwmgr);
++ break;
++ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
++ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
++ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
++ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
++#if 0
++ ret = vega20_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
++ if (ret)
++ return ret;
++ vega20_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
++ vega20_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
++#endif
++ break;
++ case AMD_DPM_FORCED_LEVEL_MANUAL:
++ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
++ default:
++ break;
++ }
++#if 0
++ if (!ret) {
++ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
++ vega20_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
++ else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
++ vega20_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
++ }
++#endif
++ return ret;
++}
++
++static uint32_t vega20_get_fan_control_mode(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++
++ if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
++ return AMD_FAN_CTRL_MANUAL;
++ else
++ return AMD_FAN_CTRL_AUTO;
++}
++
++static int vega20_get_dal_power_level(struct pp_hwmgr *hwmgr,
++ struct amd_pp_simple_clock_info *info)
++{
++#if 0
++ struct phm_ppt_v2_information *table_info =
++ (struct phm_ppt_v2_information *)hwmgr->pptable;
++ struct phm_clock_and_voltage_limits *max_limits =
++ &table_info->max_clock_voltage_on_ac;
++
++ info->engine_max_clock = max_limits->sclk;
++ info->memory_max_clock = max_limits->mclk;
++#endif
++ return 0;
++}
++
++
++static int vega20_get_sclks(struct pp_hwmgr *hwmgr,
++ struct pp_clock_levels_with_latency *clocks)
++{
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++ struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
++ int i, count;
++
++ PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_GFXCLK].enabled,
++ "[GetSclks]: gfxclk dpm not enabled!\n",
++ return -EPERM);
++
++ count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
++ clocks->num_levels = count;
++
++ for (i = 0; i < count; i++) {
++ clocks->data[i].clocks_in_khz =
++ dpm_table->dpm_levels[i].value * 100;
++ clocks->data[i].latency_in_us = 0;
++ }
++
++ return 0;
++}
++
++static uint32_t vega20_get_mem_latency(struct pp_hwmgr *hwmgr,
++ uint32_t clock)
++{
++ return 25;
++}
++
++static int vega20_get_memclocks(struct pp_hwmgr *hwmgr,
++ struct pp_clock_levels_with_latency *clocks)
++{
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++ struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.mem_table);
++ int i, count;
++
++ PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_UCLK].enabled,
++ "[GetMclks]: uclk dpm not enabled!\n",
++ return -EPERM);
++
++ count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
++ clocks->num_levels = data->mclk_latency_table.count = count;
++
++ for (i = 0; i < count; i++) {
++ clocks->data[i].clocks_in_khz =
++ data->mclk_latency_table.entries[i].frequency =
++ dpm_table->dpm_levels[i].value * 100;
++ clocks->data[i].latency_in_us =
++ data->mclk_latency_table.entries[i].latency =
++ vega20_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value);
++ }
++
++ return 0;
++}
++
++static int vega20_get_dcefclocks(struct pp_hwmgr *hwmgr,
++ struct pp_clock_levels_with_latency *clocks)
++{
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++ struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.dcef_table);
++ int i, count;
++
++ PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_DCEFCLK].enabled,
++ "[GetDcfclocks]: dcefclk dpm not enabled!\n",
++ return -EPERM);
++
++ count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
++ clocks->num_levels = count;
++
++ for (i = 0; i < count; i++) {
++ clocks->data[i].clocks_in_khz =
++ dpm_table->dpm_levels[i].value * 100;
++ clocks->data[i].latency_in_us = 0;
++ }
++
++ return 0;
++}
++
++static int vega20_get_socclocks(struct pp_hwmgr *hwmgr,
++ struct pp_clock_levels_with_latency *clocks)
++{
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++ struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.soc_table);
++ int i, count;
++
++ PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_SOCCLK].enabled,
++ "[GetSocclks]: socclk dpm not enabled!\n",
++ return -EPERM);
++
++ count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
++ clocks->num_levels = count;
++
++ for (i = 0; i < count; i++) {
++ clocks->data[i].clocks_in_khz =
++ dpm_table->dpm_levels[i].value * 100;
++ clocks->data[i].latency_in_us = 0;
++ }
++
++ return 0;
++
++}
++
++static int vega20_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
++ enum amd_pp_clock_type type,
++ struct pp_clock_levels_with_latency *clocks)
++{
++ int ret;
++
++ switch (type) {
++ case amd_pp_sys_clock:
++ ret = vega20_get_sclks(hwmgr, clocks);
++ break;
++ case amd_pp_mem_clock:
++ ret = vega20_get_memclocks(hwmgr, clocks);
++ break;
++ case amd_pp_dcef_clock:
++ ret = vega20_get_dcefclocks(hwmgr, clocks);
++ break;
++ case amd_pp_soc_clock:
++ ret = vega20_get_socclocks(hwmgr, clocks);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return ret;
++}
++
++static int vega20_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
++ enum amd_pp_clock_type type,
++ struct pp_clock_levels_with_voltage *clocks)
++{
++ clocks->num_levels = 0;
++
++ return 0;
++}
++
++static int vega20_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
++ void *clock_ranges)
++{
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++ Watermarks_t *table = &(data->smc_state_table.water_marks_table);
++ struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
++
++ if (!data->registry_data.disable_water_mark &&
++ data->smu_features[GNLD_DPM_DCEFCLK].supported &&
++ data->smu_features[GNLD_DPM_SOCCLK].supported) {
++ smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
++ data->water_marks_bitmap |= WaterMarksExist;
++ data->water_marks_bitmap &= ~WaterMarksLoaded;
++ }
++
++ return 0;
++}
++
++static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
++ enum pp_clock_type type, char *buf)
++{
++ int i, now, size = 0;
++ struct pp_clock_levels_with_latency clocks;
++ int ret = 0;
++
++ switch (type) {
++ case PP_SCLK:
++ ret = vega20_get_current_gfx_clk_freq(hwmgr, &now);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Attempt to get current gfx clk Failed!",
++ return ret);
++
++ ret = vega20_get_sclks(hwmgr, &clocks);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Attempt to get gfx clk levels Failed!",
++ return ret);
++
++ for (i = 0; i < clocks.num_levels; i++)
++ size += sprintf(buf + size, "%d: %uMhz %s\n",
++ i, clocks.data[i].clocks_in_khz / 100,
++ (clocks.data[i].clocks_in_khz == now) ? "*" : "");
++ break;
++
++ case PP_MCLK:
++ ret = vega20_get_current_mclk_freq(hwmgr, &now);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Attempt to get current mclk freq Failed!",
++ return ret);
++
++ ret = vega20_get_memclocks(hwmgr, &clocks);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Attempt to get memory clk levels Failed!",
++ return ret);
++
++ for (i = 0; i < clocks.num_levels; i++)
++ size += sprintf(buf + size, "%d: %uMhz %s\n",
++ i, clocks.data[i].clocks_in_khz / 100,
++ (clocks.data[i].clocks_in_khz == now) ? "*" : "");
++ break;
++
++ case PP_PCIE:
++ break;
++
++ default:
++ break;
++ }
++ return size;
++}
++
++static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++ int result = 0;
++ Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
++
++ if ((data->water_marks_bitmap & WaterMarksExist) &&
++ !(data->water_marks_bitmap & WaterMarksLoaded)) {
++ result = vega20_copy_table_to_smc(hwmgr,
++ (uint8_t *)wm_table, TABLE_WATERMARKS);
++ PP_ASSERT_WITH_CODE(!result,
++ "Failed to update WMTABLE!",
++ return result);
++ data->water_marks_bitmap |= WaterMarksLoaded;
++ }
++
++ if ((data->water_marks_bitmap & WaterMarksExist) &&
++ data->smu_features[GNLD_DPM_DCEFCLK].supported &&
++ data->smu_features[GNLD_DPM_SOCCLK].supported) {
++ result = smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_NumOfDisplays,
++ hwmgr->display_config->num_display);
++ }
++
++ return result;
++}
++
++int vega20_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
++{
++ struct vega20_hwmgr *data =
++ (struct vega20_hwmgr *)(hwmgr->backend);
++ int ret = 0;
++
++ if (data->smu_features[GNLD_DPM_UVD].supported) {
++ if (data->smu_features[GNLD_DPM_UVD].enabled == enable) {
++ if (enable)
++ PP_DBG_LOG("[EnableDisableUVDDPM] feature DPM UVD already enabled!\n");
++ else
++ PP_DBG_LOG("[EnableDisableUVDDPM] feature DPM UVD already disabled!\n");
++ }
++
++ ret = vega20_enable_smc_features(hwmgr,
++ enable,
++ data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[EnableDisableUVDDPM] Attempt to Enable/Disable DPM UVD Failed!",
++ return ret);
++ data->smu_features[GNLD_DPM_UVD].enabled = enable;
++ }
++
++ return 0;
++}
++
++static void vega20_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
++{
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++
++ data->vce_power_gated = bgate;
++ vega20_enable_disable_vce_dpm(hwmgr, !bgate);
++}
++
++static void vega20_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
++{
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++
++ data->uvd_power_gated = bgate;
++ vega20_enable_disable_uvd_dpm(hwmgr, !bgate);
++}
++
++static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++ struct vega20_single_dpm_table *dpm_table;
++ bool vblank_too_short = false;
++ bool disable_mclk_switching;
++ uint32_t i, latency;
++
++ disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
++ !hwmgr->display_config->multi_monitor_in_sync) ||
++ vblank_too_short;
++ latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
++
++ /* gfxclk */
++ dpm_table = &(data->dpm_table.gfx_table);
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++
++ /* memclk */
++ dpm_table = &(data->dpm_table.mem_table);
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++
++ if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100))
++ dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100;
++
++ if (disable_mclk_switching) {
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
++ if (data->mclk_latency_table.entries[i].latency <= latency) {
++ if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) {
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
++ break;
++ }
++ }
++ }
++ }
++
++ if (hwmgr->display_config->nb_pstate_switch_disable)
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++
++ return 0;
++}
++
++static bool
++vega20_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++ bool is_update_required = false;
++
++ if (data->display_timing.num_existing_displays !=
++ hwmgr->display_config->num_display)
++ is_update_required = true;
++
++ if (data->registry_data.gfx_clk_deep_sleep_support &&
++ (data->display_timing.min_clock_in_sr !=
++ hwmgr->display_config->min_core_set_clock_in_sr))
++ is_update_required = true;
++
++ return is_update_required;
++}
++
++static int vega20_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
++{
++ int ret = 0;
++
++ ret = vega20_disable_all_smu_features(hwmgr);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[DisableDpmTasks] Failed to disable all smu features!",
++ return ret);
++
++ return 0;
++}
++
++static int vega20_power_off_asic(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++ int result;
++
++ result = vega20_disable_dpm_tasks(hwmgr);
++ PP_ASSERT_WITH_CODE((0 == result),
++ "[PowerOffAsic] Failed to disable DPM!",
++ );
++ data->water_marks_bitmap &= ~(WaterMarksLoaded);
++
++ return result;
++}
++
++static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
++ uint32_t virtual_addr_low,
++ uint32_t virtual_addr_hi,
++ uint32_t mc_addr_low,
++ uint32_t mc_addr_hi,
++ uint32_t size)
++{
++ smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SetSystemVirtualDramAddrHigh,
++ virtual_addr_hi);
++ smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SetSystemVirtualDramAddrLow,
++ virtual_addr_low);
++ smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_DramLogSetDramAddrHigh,
++ mc_addr_hi);
++
++ smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_DramLogSetDramAddrLow,
++ mc_addr_low);
++
++ smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_DramLogSetDramSize,
++ size);
++ return 0;
++}
++
++static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
++ struct PP_TemperatureRange *thermal_data)
++{
++ struct phm_ppt_v3_information *pptable_information =
++ (struct phm_ppt_v3_information *)hwmgr->pptable;
++
++ memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
++
++ thermal_data->max = pptable_information->us_software_shutdown_temp *
++ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
++
++ return 0;
++}
++
++static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
++ /* init/fini related */
++ .backend_init =
++ vega20_hwmgr_backend_init,
++ .backend_fini =
++ vega20_hwmgr_backend_fini,
++ .asic_setup =
++ vega20_setup_asic_task,
++ .power_off_asic =
++ vega20_power_off_asic,
++ .dynamic_state_management_enable =
++ vega20_enable_dpm_tasks,
++ .dynamic_state_management_disable =
++ vega20_disable_dpm_tasks,
++ /* power state related */
++ .apply_clocks_adjust_rules =
++ vega20_apply_clocks_adjust_rules,
++ .display_config_changed =
++ vega20_display_configuration_changed_task,
++ .check_smc_update_required_for_display_configuration =
++ vega20_check_smc_update_required_for_display_configuration,
++ .notify_smc_display_config_after_ps_adjustment =
++ vega20_notify_smc_display_config_after_ps_adjustment,
++ /* export to DAL */
++ .get_sclk =
++ vega20_dpm_get_sclk,
++ .get_mclk =
++ vega20_dpm_get_mclk,
++ .get_dal_power_level =
++ vega20_get_dal_power_level,
++ .get_clock_by_type_with_latency =
++ vega20_get_clock_by_type_with_latency,
++ .get_clock_by_type_with_voltage =
++ vega20_get_clock_by_type_with_voltage,
++ .set_watermarks_for_clocks_ranges =
++ vega20_set_watermarks_for_clocks_ranges,
++ .display_clock_voltage_request =
++ vega20_display_clock_voltage_request,
++ /* UMD pstate, profile related */
++ .force_dpm_level =
++ vega20_dpm_force_dpm_level,
++ .set_power_limit =
++ vega20_set_power_limit,
++ /* for sysfs to retrive/set gfxclk/memclk */
++ .force_clock_level =
++ vega20_force_clock_level,
++ .print_clock_levels =
++ vega20_print_clock_levels,
++ .read_sensor =
++ vega20_read_sensor,
++ /* powergate related */
++ .powergate_uvd =
++ vega20_power_gate_uvd,
++ .powergate_vce =
++ vega20_power_gate_vce,
++ /* thermal related */
++ .start_thermal_controller =
++ vega20_start_thermal_controller,
++ .stop_thermal_controller =
++ vega20_thermal_stop_thermal_controller,
++ .get_thermal_temperature_range =
++ vega20_get_thermal_temperature_range,
++ .register_irq_handlers =
++ smu9_register_irq_handlers,
++ .disable_smc_firmware_ctf =
++ vega20_thermal_disable_alert,
++ /* fan control related */
++ .get_fan_speed_info =
++ vega20_fan_ctrl_get_fan_speed_info,
++ .get_fan_speed_rpm =
++ vega20_fan_ctrl_get_fan_speed_rpm,
++ .get_fan_control_mode =
++ vega20_get_fan_control_mode,
++ /* smu memory related */
++ .notify_cac_buffer_info =
++ vega20_notify_cac_buffer_info,
++};
++
++int vega20_hwmgr_init(struct pp_hwmgr *hwmgr)
++{
++ hwmgr->hwmgr_func = &vega20_hwmgr_funcs;
++ hwmgr->pptable_func = &vega20_pptable_funcs;
++
++ return 0;
++}
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+new file mode 100644
+index 0000000..59a59bc
+--- /dev/null
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+@@ -0,0 +1,519 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef _VEGA20_HWMGR_H_
++#define _VEGA20_HWMGR_H_
++
++#include "hwmgr.h"
++#include "smu11_driver_if.h"
++#include "ppatomfwctrl.h"
++
++#define VEGA20_MAX_HARDWARE_POWERLEVELS 2
++
++#define WaterMarksExist 1
++#define WaterMarksLoaded 2
++
++#define VG20_PSUEDO_NUM_GFXCLK_DPM_LEVELS 8
++#define VG20_PSUEDO_NUM_SOCCLK_DPM_LEVELS 8
++#define VG20_PSUEDO_NUM_DCEFCLK_DPM_LEVELS 8
++#define VG20_PSUEDO_NUM_UCLK_DPM_LEVELS 4
++
++typedef uint32_t PP_Clock;
++
++enum {
++ GNLD_DPM_PREFETCHER = 0,
++ GNLD_DPM_GFXCLK,
++ GNLD_DPM_UCLK,
++ GNLD_DPM_SOCCLK,
++ GNLD_DPM_UVD,
++ GNLD_DPM_VCE,
++ GNLD_ULV,
++ GNLD_DPM_MP0CLK,
++ GNLD_DPM_LINK,
++ GNLD_DPM_DCEFCLK,
++ GNLD_DS_GFXCLK,
++ GNLD_DS_SOCCLK,
++ GNLD_DS_LCLK,
++ GNLD_PPT,
++ GNLD_TDC,
++ GNLD_THERMAL,
++ GNLD_GFX_PER_CU_CG,
++ GNLD_RM,
++ GNLD_DS_DCEFCLK,
++ GNLD_ACDC,
++ GNLD_VR0HOT,
++ GNLD_VR1HOT,
++ GNLD_FW_CTF,
++ GNLD_LED_DISPLAY,
++ GNLD_FAN_CONTROL,
++ GNLD_DIDT,
++ GNLD_GFXOFF,
++ GNLD_CG,
++ GNLD_DPM_FCLK,
++ GNLD_DS_FCLK,
++ GNLD_DS_MP1CLK,
++ GNLD_DS_MP0CLK,
++ GNLD_XGMI,
++
++ GNLD_FEATURES_MAX
++};
++
++
++#define GNLD_DPM_MAX (GNLD_DPM_DCEFCLK + 1)
++
++#define SMC_DPM_FEATURES 0x30F
++
++struct smu_features {
++ bool supported;
++ bool enabled;
++ bool allowed;
++ uint32_t smu_feature_id;
++ uint64_t smu_feature_bitmap;
++};
++
++struct vega20_performance_level {
++ uint32_t soc_clock;
++ uint32_t gfx_clock;
++ uint32_t mem_clock;
++};
++
++struct vega20_bacos {
++ uint32_t baco_flags;
++ /* struct vega20_performance_level performance_level; */
++};
++
++struct vega20_uvd_clocks {
++ uint32_t vclk;
++ uint32_t dclk;
++};
++
++struct vega20_vce_clocks {
++ uint32_t evclk;
++ uint32_t ecclk;
++};
++
++struct vega20_power_state {
++ uint32_t magic;
++ struct vega20_uvd_clocks uvd_clks;
++ struct vega20_vce_clocks vce_clks;
++ uint16_t performance_level_count;
++ bool dc_compatible;
++ uint32_t sclk_threshold;
++ struct vega20_performance_level performance_levels[VEGA20_MAX_HARDWARE_POWERLEVELS];
++};
++
++struct vega20_dpm_level {
++ bool enabled;
++ uint32_t value;
++ uint32_t param1;
++};
++
++#define VEGA20_MAX_DEEPSLEEP_DIVIDER_ID 5
++#define MAX_REGULAR_DPM_NUMBER 16
++#define MAX_PCIE_CONF 2
++#define VEGA20_MINIMUM_ENGINE_CLOCK 2500
++
++struct vega20_max_sustainable_clocks {
++ PP_Clock display_clock;
++ PP_Clock phy_clock;
++ PP_Clock pixel_clock;
++ PP_Clock uclock;
++ PP_Clock dcef_clock;
++ PP_Clock soc_clock;
++};
++
++struct vega20_dpm_state {
++ uint32_t soft_min_level;
++ uint32_t soft_max_level;
++ uint32_t hard_min_level;
++ uint32_t hard_max_level;
++};
++
++struct vega20_single_dpm_table {
++ uint32_t count;
++ struct vega20_dpm_state dpm_state;
++ struct vega20_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
++};
++
++struct vega20_odn_dpm_control {
++ uint32_t count;
++ uint32_t entries[MAX_REGULAR_DPM_NUMBER];
++};
++
++struct vega20_pcie_table {
++ uint16_t count;
++ uint8_t pcie_gen[MAX_PCIE_CONF];
++ uint8_t pcie_lane[MAX_PCIE_CONF];
++ uint32_t lclk[MAX_PCIE_CONF];
++};
++
++struct vega20_dpm_table {
++ struct vega20_single_dpm_table soc_table;
++ struct vega20_single_dpm_table gfx_table;
++ struct vega20_single_dpm_table mem_table;
++ struct vega20_single_dpm_table eclk_table;
++ struct vega20_single_dpm_table vclk_table;
++ struct vega20_single_dpm_table dclk_table;
++ struct vega20_single_dpm_table dcef_table;
++ struct vega20_single_dpm_table pixel_table;
++ struct vega20_single_dpm_table display_table;
++ struct vega20_single_dpm_table phy_table;
++ struct vega20_single_dpm_table fclk_table;
++ struct vega20_pcie_table pcie_table;
++};
++
++#define VEGA20_MAX_LEAKAGE_COUNT 8
++struct vega20_leakage_voltage {
++ uint16_t count;
++ uint16_t leakage_id[VEGA20_MAX_LEAKAGE_COUNT];
++ uint16_t actual_voltage[VEGA20_MAX_LEAKAGE_COUNT];
++};
++
++struct vega20_display_timing {
++ uint32_t min_clock_in_sr;
++ uint32_t num_existing_displays;
++};
++
++struct vega20_dpmlevel_enable_mask {
++ uint32_t uvd_dpm_enable_mask;
++ uint32_t vce_dpm_enable_mask;
++ uint32_t samu_dpm_enable_mask;
++ uint32_t sclk_dpm_enable_mask;
++ uint32_t mclk_dpm_enable_mask;
++};
++
++struct vega20_vbios_boot_state {
++ bool bsoc_vddc_lock;
++ uint8_t uc_cooling_id;
++ uint16_t vddc;
++ uint16_t vddci;
++ uint16_t mvddc;
++ uint16_t vdd_gfx;
++ uint32_t gfx_clock;
++ uint32_t mem_clock;
++ uint32_t soc_clock;
++ uint32_t dcef_clock;
++ uint32_t eclock;
++ uint32_t dclock;
++ uint32_t vclock;
++};
++
++#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
++#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
++#define DPMTABLE_UPDATE_SCLK 0x00000004
++#define DPMTABLE_UPDATE_MCLK 0x00000008
++#define DPMTABLE_OD_UPDATE_VDDC 0x00000010
++#define DPMTABLE_OD_UPDATE_SCLK_MASK 0x00000020
++#define DPMTABLE_OD_UPDATE_MCLK_MASK 0x00000040
++
++// To determine if sclk and mclk are in overdrive state
++#define SCLK_MASK_OVERDRIVE_ENABLED 0x00000008
++#define MCLK_MASK_OVERDRIVE_ENABLED 0x00000010
++#define SOCCLK_OVERDRIVE_ENABLED 0x00000020
++
++struct vega20_smc_state_table {
++ uint32_t soc_boot_level;
++ uint32_t gfx_boot_level;
++ uint32_t dcef_boot_level;
++ uint32_t mem_boot_level;
++ uint32_t uvd_boot_level;
++ uint32_t vce_boot_level;
++ uint32_t gfx_max_level;
++ uint32_t mem_max_level;
++ uint8_t vr_hot_gpio;
++ uint8_t ac_dc_gpio;
++ uint8_t therm_out_gpio;
++ uint8_t therm_out_polarity;
++ uint8_t therm_out_mode;
++ PPTable_t pp_table;
++ Watermarks_t water_marks_table;
++ AvfsDebugTable_t avfs_debug_table;
++ AvfsFuseOverride_t avfs_fuse_override_table;
++ SmuMetrics_t smu_metrics;
++ DriverSmuConfig_t driver_smu_config;
++ DpmActivityMonitorCoeffInt_t dpm_activity_monitor_coeffint;
++ OverDriveTable_t overdrive_table;
++};
++
++struct vega20_mclk_latency_entries {
++ uint32_t frequency;
++ uint32_t latency;
++};
++
++struct vega20_mclk_latency_table {
++ uint32_t count;
++ struct vega20_mclk_latency_entries entries[MAX_REGULAR_DPM_NUMBER];
++};
++
++struct vega20_registry_data {
++ uint64_t disallowed_features;
++ uint8_t ac_dc_switch_gpio_support;
++ uint8_t acg_loop_support;
++ uint8_t clock_stretcher_support;
++ uint8_t db_ramping_support;
++ uint8_t didt_mode;
++ uint8_t didt_support;
++ uint8_t edc_didt_support;
++ uint8_t force_dpm_high;
++ uint8_t fuzzy_fan_control_support;
++ uint8_t mclk_dpm_key_disabled;
++ uint8_t od_state_in_dc_support;
++ uint8_t pcie_lane_override;
++ uint8_t pcie_speed_override;
++ uint32_t pcie_clock_override;
++ uint8_t pcie_dpm_key_disabled;
++ uint8_t dcefclk_dpm_key_disabled;
++ uint8_t prefetcher_dpm_key_disabled;
++ uint8_t quick_transition_support;
++ uint8_t regulator_hot_gpio_support;
++ uint8_t master_deep_sleep_support;
++ uint8_t gfx_clk_deep_sleep_support;
++ uint8_t sclk_deep_sleep_support;
++ uint8_t lclk_deep_sleep_support;
++ uint8_t dce_fclk_deep_sleep_support;
++ uint8_t sclk_dpm_key_disabled;
++ uint8_t sclk_throttle_low_notification;
++ uint8_t skip_baco_hardware;
++ uint8_t socclk_dpm_key_disabled;
++ uint8_t sq_ramping_support;
++ uint8_t tcp_ramping_support;
++ uint8_t td_ramping_support;
++ uint8_t dbr_ramping_support;
++ uint8_t gc_didt_support;
++ uint8_t psm_didt_support;
++ uint8_t thermal_support;
++ uint8_t fw_ctf_enabled;
++ uint8_t led_dpm_enabled;
++ uint8_t fan_control_support;
++ uint8_t ulv_support;
++ uint8_t odn_feature_enable;
++ uint8_t disable_water_mark;
++ uint8_t disable_workload_policy;
++ uint32_t force_workload_policy_mask;
++ uint8_t disable_3d_fs_detection;
++ uint8_t disable_pp_tuning;
++ uint8_t disable_xlpp_tuning;
++ uint32_t perf_ui_tuning_profile_turbo;
++ uint32_t perf_ui_tuning_profile_powerSave;
++ uint32_t perf_ui_tuning_profile_xl;
++ uint16_t zrpm_stop_temp;
++ uint16_t zrpm_start_temp;
++ uint32_t stable_pstate_sclk_dpm_percentage;
++ uint8_t fps_support;
++ uint8_t vr0hot;
++ uint8_t vr1hot;
++ uint8_t disable_auto_wattman;
++ uint32_t auto_wattman_debug;
++ uint32_t auto_wattman_sample_period;
++ uint8_t auto_wattman_threshold;
++ uint8_t log_avfs_param;
++ uint8_t enable_enginess;
++ uint8_t custom_fan_support;
++ uint8_t disable_pcc_limit_control;
++ uint8_t gfxoff_controlled_by_driver;
++};
++
++struct vega20_odn_clock_voltage_dependency_table {
++ uint32_t count;
++ struct phm_ppt_v1_clock_voltage_dependency_record
++ entries[MAX_REGULAR_DPM_NUMBER];
++};
++
++struct vega20_odn_dpm_table {
++ struct vega20_odn_dpm_control control_gfxclk_state;
++ struct vega20_odn_dpm_control control_memclk_state;
++ struct phm_odn_clock_levels odn_core_clock_dpm_levels;
++ struct phm_odn_clock_levels odn_memory_clock_dpm_levels;
++ struct vega20_odn_clock_voltage_dependency_table vdd_dependency_on_sclk;
++ struct vega20_odn_clock_voltage_dependency_table vdd_dependency_on_mclk;
++ struct vega20_odn_clock_voltage_dependency_table vdd_dependency_on_socclk;
++ uint32_t odn_mclk_min_limit;
++};
++
++struct vega20_odn_fan_table {
++ uint32_t target_fan_speed;
++ uint32_t target_temperature;
++ uint32_t min_performance_clock;
++ uint32_t min_fan_limit;
++ bool force_fan_pwm;
++};
++
++struct vega20_odn_temp_table {
++ uint16_t target_operating_temp;
++ uint16_t default_target_operating_temp;
++ uint16_t operating_temp_min_limit;
++ uint16_t operating_temp_max_limit;
++ uint16_t operating_temp_step;
++};
++
++struct vega20_odn_data {
++ uint32_t apply_overdrive_next_settings_mask;
++ uint32_t overdrive_next_state;
++ uint32_t overdrive_next_capabilities;
++ uint32_t odn_sclk_dpm_enable_mask;
++ uint32_t odn_mclk_dpm_enable_mask;
++ struct vega20_odn_dpm_table odn_dpm_table;
++ struct vega20_odn_fan_table odn_fan_table;
++ struct vega20_odn_temp_table odn_temp_table;
++};
++
++struct vega20_hwmgr {
++ struct vega20_dpm_table dpm_table;
++ struct vega20_dpm_table golden_dpm_table;
++ struct vega20_registry_data registry_data;
++ struct vega20_vbios_boot_state vbios_boot_state;
++ struct vega20_mclk_latency_table mclk_latency_table;
++
++ struct vega20_max_sustainable_clocks max_sustainable_clocks;
++
++ struct vega20_leakage_voltage vddc_leakage;
++
++ uint32_t vddc_control;
++ struct pp_atomfwctrl_voltage_table vddc_voltage_table;
++ uint32_t mvdd_control;
++ struct pp_atomfwctrl_voltage_table mvdd_voltage_table;
++ uint32_t vddci_control;
++ struct pp_atomfwctrl_voltage_table vddci_voltage_table;
++
++ uint32_t active_auto_throttle_sources;
++ struct vega20_bacos bacos;
++
++ /* ---- General data ---- */
++ uint8_t need_update_dpm_table;
++
++ bool cac_enabled;
++ bool battery_state;
++ bool is_tlu_enabled;
++ bool avfs_exist;
++
++ uint32_t low_sclk_interrupt_threshold;
++
++ uint32_t total_active_cus;
++
++ uint32_t water_marks_bitmap;
++
++ struct vega20_display_timing display_timing;
++
++ /* ---- Vega20 Dyn Register Settings ---- */
++
++ uint32_t debug_settings;
++ uint32_t lowest_uclk_reserved_for_ulv;
++ uint32_t gfxclk_average_alpha;
++ uint32_t socclk_average_alpha;
++ uint32_t uclk_average_alpha;
++ uint32_t gfx_activity_average_alpha;
++ uint32_t display_voltage_mode;
++ uint32_t dcef_clk_quad_eqn_a;
++ uint32_t dcef_clk_quad_eqn_b;
++ uint32_t dcef_clk_quad_eqn_c;
++ uint32_t disp_clk_quad_eqn_a;
++ uint32_t disp_clk_quad_eqn_b;
++ uint32_t disp_clk_quad_eqn_c;
++ uint32_t pixel_clk_quad_eqn_a;
++ uint32_t pixel_clk_quad_eqn_b;
++ uint32_t pixel_clk_quad_eqn_c;
++ uint32_t phy_clk_quad_eqn_a;
++ uint32_t phy_clk_quad_eqn_b;
++ uint32_t phy_clk_quad_eqn_c;
++
++ /* ---- Thermal Temperature Setting ---- */
++ struct vega20_dpmlevel_enable_mask dpm_level_enable_mask;
++
++ /* ---- Power Gating States ---- */
++ bool uvd_power_gated;
++ bool vce_power_gated;
++ bool samu_power_gated;
++ bool need_long_memory_training;
++
++ /* Internal settings to apply the application power optimization parameters */
++ bool apply_optimized_settings;
++ uint32_t disable_dpm_mask;
++
++ /* ---- Overdrive next setting ---- */
++ struct vega20_odn_data odn_data;
++
++ /* ---- Workload Mask ---- */
++ uint32_t workload_mask;
++
++ /* ---- SMU9 ---- */
++ uint32_t smu_version;
++ struct smu_features smu_features[GNLD_FEATURES_MAX];
++ struct vega20_smc_state_table smc_state_table;
++
++ /* ---- Gfxoff ---- */
++ bool gfxoff_allowed;
++ uint32_t counter_gfxoff;
++};
++
++#define VEGA20_DPM2_NEAR_TDP_DEC 10
++#define VEGA20_DPM2_ABOVE_SAFE_INC 5
++#define VEGA20_DPM2_BELOW_SAFE_INC 20
++
++#define VEGA20_DPM2_LTA_WINDOW_SIZE 7
++
++#define VEGA20_DPM2_LTS_TRUNCATE 0
++
++#define VEGA20_DPM2_TDP_SAFE_LIMIT_PERCENT 80
++
++#define VEGA20_DPM2_MAXPS_PERCENT_M 90
++#define VEGA20_DPM2_MAXPS_PERCENT_H 90
++
++#define VEGA20_DPM2_PWREFFICIENCYRATIO_MARGIN 50
++
++#define VEGA20_DPM2_SQ_RAMP_MAX_POWER 0x3FFF
++#define VEGA20_DPM2_SQ_RAMP_MIN_POWER 0x12
++#define VEGA20_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15
++#define VEGA20_DPM2_SQ_RAMP_SHORT_TERM_INTERVAL_SIZE 0x1E
++#define VEGA20_DPM2_SQ_RAMP_LONG_TERM_INTERVAL_RATIO 0xF
++
++#define VEGA20_VOLTAGE_CONTROL_NONE 0x0
++#define VEGA20_VOLTAGE_CONTROL_BY_GPIO 0x1
++#define VEGA20_VOLTAGE_CONTROL_BY_SVID2 0x2
++#define VEGA20_VOLTAGE_CONTROL_MERGED 0x3
++/* To convert to Q8.8 format for firmware */
++#define VEGA20_Q88_FORMAT_CONVERSION_UNIT 256
++
++#define VEGA20_UNUSED_GPIO_PIN 0x7F
++
++#define VEGA20_THERM_OUT_MODE_DISABLE 0x0
++#define VEGA20_THERM_OUT_MODE_THERM_ONLY 0x1
++#define VEGA20_THERM_OUT_MODE_THERM_VRHOT 0x2
++
++#define PPVEGA20_VEGA20DISPLAYVOLTAGEMODE_DFLT 0xffffffff
++#define PPREGKEY_VEGA20QUADRATICEQUATION_DFLT 0xffffffff
++
++#define PPVEGA20_VEGA20GFXCLKAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */
++#define PPVEGA20_VEGA20SOCCLKAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */
++#define PPVEGA20_VEGA20UCLKCLKAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */
++#define PPVEGA20_VEGA20GFXACTIVITYAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */
++#define PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT 0xffffffff
++#define PPVEGA20_VEGA20DISPLAYVOLTAGEMODE_DFLT 0xffffffff
++#define PPREGKEY_VEGA20QUADRATICEQUATION_DFLT 0xffffffff
++
++#define VEGA20_UMD_PSTATE_GFXCLK_LEVEL 0x3
++#define VEGA20_UMD_PSTATE_SOCCLK_LEVEL 0x3
++#define VEGA20_UMD_PSTATE_MCLK_LEVEL 0x2
++#define VEGA20_UMD_PSTATE_UVDCLK_LEVEL 0x3
++#define VEGA20_UMD_PSTATE_VCEMCLK_LEVEL 0x3
++
++#endif /* _VEGA20_HWMGR_H_ */
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c
+new file mode 100644
+index 0000000..a0bfb65
+--- /dev/null
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c
+@@ -0,0 +1,70 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "hwmgr.h"
++#include "vega20_hwmgr.h"
++#include "vega20_powertune.h"
++#include "vega20_smumgr.h"
++#include "vega20_ppsmc.h"
++#include "vega20_inc.h"
++#include "pp_debug.h"
++
++int vega20_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
++{
++ struct vega20_hwmgr *data =
++ (struct vega20_hwmgr *)(hwmgr->backend);
++
++ if (data->smu_features[GNLD_PPT].enabled)
++ return smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SetPptLimit, n);
++
++ return 0;
++}
++
++int vega20_validate_power_level_request(struct pp_hwmgr *hwmgr,
++ uint32_t tdp_percentage_adjustment, uint32_t tdp_absolute_value_adjustment)
++{
++ return (tdp_percentage_adjustment > hwmgr->platform_descriptor.TDPLimit) ? -1 : 0;
++}
++
++static int vega20_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
++ uint32_t adjust_percent)
++{
++ return smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
++}
++
++int vega20_power_control_set_level(struct pp_hwmgr *hwmgr)
++{
++ int adjust_percent, result = 0;
++
++ if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
++ adjust_percent =
++ hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
++ hwmgr->platform_descriptor.TDPAdjustment :
++ (-1 * hwmgr->platform_descriptor.TDPAdjustment);
++ result = vega20_set_overdrive_target_percentage(hwmgr,
++ (uint32_t)adjust_percent);
++ }
++ return result;
++}
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.h
+new file mode 100644
+index 0000000..d68c734
+--- /dev/null
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.h
+@@ -0,0 +1,32 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++#ifndef _VEGA20_POWERTUNE_H_
++#define _VEGA20_POWERTUNE_H_
++
++int vega20_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
++int vega20_power_control_set_level(struct pp_hwmgr *hwmgr);
++int vega20_validate_power_level_request(struct pp_hwmgr *hwmgr,
++ uint32_t tdp_percentage_adjustment,
++ uint32_t tdp_absolute_value_adjustment);
++#endif /* _VEGA20_POWERTUNE_H_ */
++
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+new file mode 100644
+index 0000000..379ac3d
+--- /dev/null
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+@@ -0,0 +1,919 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/fb.h>
++
++#include "smu11_driver_if.h"
++#include "vega20_processpptables.h"
++#include "ppatomfwctrl.h"
++#include "atomfirmware.h"
++#include "pp_debug.h"
++#include "cgs_common.h"
++#include "vega20_pptable.h"
++
++static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable,
++ enum phm_platform_caps cap)
++{
++ if (enable)
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps, cap);
++ else
++ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, cap);
++}
++
++static const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
++{
++ int index = GetIndexIntoMasterDataTable(powerplayinfo);
++
++ u16 size;
++ u8 frev, crev;
++ const void *table_address = hwmgr->soft_pp_table;
++
++ if (!table_address) {
++ table_address = (ATOM_Vega20_POWERPLAYTABLE *)
++ smu_atom_get_data_table(hwmgr->adev, index,
++ &size, &frev, &crev);
++
++ hwmgr->soft_pp_table = table_address;
++ hwmgr->soft_pp_table_size = size;
++ }
++
++ return table_address;
++}
++
++#if 0
++static void dump_pptable(PPTable_t *pptable)
++{
++ int i;
++
++ pr_info("Version = 0x%08x\n", pptable->Version);
++
++ pr_info("FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]);
++ pr_info("FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]);
++
++ pr_info("SocketPowerLimitAc0 = %d\n", pptable->SocketPowerLimitAc0);
++ pr_info("SocketPowerLimitAc0Tau = %d\n", pptable->SocketPowerLimitAc0Tau);
++ pr_info("SocketPowerLimitAc1 = %d\n", pptable->SocketPowerLimitAc1);
++ pr_info("SocketPowerLimitAc1Tau = %d\n", pptable->SocketPowerLimitAc1Tau);
++ pr_info("SocketPowerLimitAc2 = %d\n", pptable->SocketPowerLimitAc2);
++ pr_info("SocketPowerLimitAc2Tau = %d\n", pptable->SocketPowerLimitAc2Tau);
++ pr_info("SocketPowerLimitAc3 = %d\n", pptable->SocketPowerLimitAc3);
++ pr_info("SocketPowerLimitAc3Tau = %d\n", pptable->SocketPowerLimitAc3Tau);
++ pr_info("SocketPowerLimitDc = %d\n", pptable->SocketPowerLimitDc);
++ pr_info("SocketPowerLimitDcTau = %d\n", pptable->SocketPowerLimitDcTau);
++ pr_info("TdcLimitSoc = %d\n", pptable->TdcLimitSoc);
++ pr_info("TdcLimitSocTau = %d\n", pptable->TdcLimitSocTau);
++ pr_info("TdcLimitGfx = %d\n", pptable->TdcLimitGfx);
++ pr_info("TdcLimitGfxTau = %d\n", pptable->TdcLimitGfxTau);
++
++ pr_info("TedgeLimit = %d\n", pptable->TedgeLimit);
++ pr_info("ThotspotLimit = %d\n", pptable->ThotspotLimit);
++ pr_info("ThbmLimit = %d\n", pptable->ThbmLimit);
++ pr_info("Tvr_gfxLimit = %d\n", pptable->Tvr_gfxLimit);
++ pr_info("Tvr_memLimit = %d\n", pptable->Tvr_memLimit);
++ pr_info("Tliquid1Limit = %d\n", pptable->Tliquid1Limit);
++ pr_info("Tliquid2Limit = %d\n", pptable->Tliquid2Limit);
++ pr_info("TplxLimit = %d\n", pptable->TplxLimit);
++ pr_info("FitLimit = %d\n", pptable->FitLimit);
++
++ pr_info("PpmPowerLimit = %d\n", pptable->PpmPowerLimit);
++ pr_info("PpmTemperatureThreshold = %d\n", pptable->PpmTemperatureThreshold);
++
++ pr_info("MemoryOnPackage = 0x%02x\n", pptable->MemoryOnPackage);
++ pr_info("padding8_limits[0] = 0x%02x\n", pptable->padding8_limits[0]);
++ pr_info("padding8_limits[1] = 0x%02x\n", pptable->padding8_limits[1]);
++ pr_info("padding8_limits[2] = 0x%02x\n", pptable->padding8_limits[2]);
++
++ pr_info("UlvVoltageOffsetSoc = %d\n", pptable->UlvVoltageOffsetSoc);
++ pr_info("UlvVoltageOffsetGfx = %d\n", pptable->UlvVoltageOffsetGfx);
++
++ pr_info("UlvSmnclkDid = %d\n", pptable->UlvSmnclkDid);
++ pr_info("UlvMp1clkDid = %d\n", pptable->UlvMp1clkDid);
++ pr_info("UlvGfxclkBypass = %d\n", pptable->UlvGfxclkBypass);
++ pr_info("Padding234 = 0x%02x\n", pptable->Padding234);
++
++ pr_info("MinVoltageGfx = %d\n", pptable->MinVoltageGfx);
++ pr_info("MinVoltageSoc = %d\n", pptable->MinVoltageSoc);
++ pr_info("MaxVoltageGfx = %d\n", pptable->MaxVoltageGfx);
++ pr_info("MaxVoltageSoc = %d\n", pptable->MaxVoltageSoc);
++
++ pr_info("LoadLineResistanceGfx = %d\n", pptable->LoadLineResistanceGfx);
++ pr_info("LoadLineResistanceSoc = %d\n", pptable->LoadLineResistanceSoc);
++
++ pr_info("[PPCLK_GFXCLK]\n"
++ " .VoltageMode = 0x%02x\n"
++ " .SnapToDiscrete = 0x%02x\n"
++ " .NumDiscreteLevels = 0x%02x\n"
++ " .padding = 0x%02x\n"
++ " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
++ " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
++ pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode,
++ pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete,
++ pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels,
++ pptable->DpmDescriptor[PPCLK_GFXCLK].padding,
++ pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m,
++ pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b,
++ pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a,
++ pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b,
++ pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c);
++
++ pr_info("[PPCLK_VCLK]\n"
++ " .VoltageMode = 0x%02x\n"
++ " .SnapToDiscrete = 0x%02x\n"
++ " .NumDiscreteLevels = 0x%02x\n"
++ " .padding = 0x%02x\n"
++ " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
++ " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
++ pptable->DpmDescriptor[PPCLK_VCLK].VoltageMode,
++ pptable->DpmDescriptor[PPCLK_VCLK].SnapToDiscrete,
++ pptable->DpmDescriptor[PPCLK_VCLK].NumDiscreteLevels,
++ pptable->DpmDescriptor[PPCLK_VCLK].padding,
++ pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.m,
++ pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.b,
++ pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.a,
++ pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.b,
++ pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.c);
++
++ pr_info("[PPCLK_DCLK]\n"
++ " .VoltageMode = 0x%02x\n"
++ " .SnapToDiscrete = 0x%02x\n"
++ " .NumDiscreteLevels = 0x%02x\n"
++ " .padding = 0x%02x\n"
++ " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
++ " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
++ pptable->DpmDescriptor[PPCLK_DCLK].VoltageMode,
++ pptable->DpmDescriptor[PPCLK_DCLK].SnapToDiscrete,
++ pptable->DpmDescriptor[PPCLK_DCLK].NumDiscreteLevels,
++ pptable->DpmDescriptor[PPCLK_DCLK].padding,
++ pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.m,
++ pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.b,
++ pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.a,
++ pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.b,
++ pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.c);
++
++ pr_info("[PPCLK_ECLK]\n"
++ " .VoltageMode = 0x%02x\n"
++ " .SnapToDiscrete = 0x%02x\n"
++ " .NumDiscreteLevels = 0x%02x\n"
++ " .padding = 0x%02x\n"
++ " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
++ " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
++ pptable->DpmDescriptor[PPCLK_ECLK].VoltageMode,
++ pptable->DpmDescriptor[PPCLK_ECLK].SnapToDiscrete,
++ pptable->DpmDescriptor[PPCLK_ECLK].NumDiscreteLevels,
++ pptable->DpmDescriptor[PPCLK_ECLK].padding,
++ pptable->DpmDescriptor[PPCLK_ECLK].ConversionToAvfsClk.m,
++ pptable->DpmDescriptor[PPCLK_ECLK].ConversionToAvfsClk.b,
++ pptable->DpmDescriptor[PPCLK_ECLK].SsCurve.a,
++ pptable->DpmDescriptor[PPCLK_ECLK].SsCurve.b,
++ pptable->DpmDescriptor[PPCLK_ECLK].SsCurve.c);
++
++ pr_info("[PPCLK_SOCCLK]\n"
++ " .VoltageMode = 0x%02x\n"
++ " .SnapToDiscrete = 0x%02x\n"
++ " .NumDiscreteLevels = 0x%02x\n"
++ " .padding = 0x%02x\n"
++ " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
++ " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
++ pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode,
++ pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete,
++ pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels,
++ pptable->DpmDescriptor[PPCLK_SOCCLK].padding,
++ pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m,
++ pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b,
++ pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a,
++ pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b,
++ pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c);
++
++ pr_info("[PPCLK_UCLK]\n"
++ " .VoltageMode = 0x%02x\n"
++ " .SnapToDiscrete = 0x%02x\n"
++ " .NumDiscreteLevels = 0x%02x\n"
++ " .padding = 0x%02x\n"
++ " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
++ " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
++ pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode,
++ pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete,
++ pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels,
++ pptable->DpmDescriptor[PPCLK_UCLK].padding,
++ pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m,
++ pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b,
++ pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a,
++ pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b,
++ pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c);
++
++ pr_info("[PPCLK_DCEFCLK]\n"
++ " .VoltageMode = 0x%02x\n"
++ " .SnapToDiscrete = 0x%02x\n"
++ " .NumDiscreteLevels = 0x%02x\n"
++ " .padding = 0x%02x\n"
++ " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
++ " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
++ pptable->DpmDescriptor[PPCLK_DCEFCLK].VoltageMode,
++ pptable->DpmDescriptor[PPCLK_DCEFCLK].SnapToDiscrete,
++ pptable->DpmDescriptor[PPCLK_DCEFCLK].NumDiscreteLevels,
++ pptable->DpmDescriptor[PPCLK_DCEFCLK].padding,
++ pptable->DpmDescriptor[PPCLK_DCEFCLK].ConversionToAvfsClk.m,
++ pptable->DpmDescriptor[PPCLK_DCEFCLK].ConversionToAvfsClk.b,
++ pptable->DpmDescriptor[PPCLK_DCEFCLK].SsCurve.a,
++ pptable->DpmDescriptor[PPCLK_DCEFCLK].SsCurve.b,
++ pptable->DpmDescriptor[PPCLK_DCEFCLK].SsCurve.c);
++
++ pr_info("[PPCLK_DISPCLK]\n"
++ " .VoltageMode = 0x%02x\n"
++ " .SnapToDiscrete = 0x%02x\n"
++ " .NumDiscreteLevels = 0x%02x\n"
++ " .padding = 0x%02x\n"
++ " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
++ " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
++ pptable->DpmDescriptor[PPCLK_DISPCLK].VoltageMode,
++ pptable->DpmDescriptor[PPCLK_DISPCLK].SnapToDiscrete,
++ pptable->DpmDescriptor[PPCLK_DISPCLK].NumDiscreteLevels,
++ pptable->DpmDescriptor[PPCLK_DISPCLK].padding,
++ pptable->DpmDescriptor[PPCLK_DISPCLK].ConversionToAvfsClk.m,
++ pptable->DpmDescriptor[PPCLK_DISPCLK].ConversionToAvfsClk.b,
++ pptable->DpmDescriptor[PPCLK_DISPCLK].SsCurve.a,
++ pptable->DpmDescriptor[PPCLK_DISPCLK].SsCurve.b,
++ pptable->DpmDescriptor[PPCLK_DISPCLK].SsCurve.c);
++
++ pr_info("[PPCLK_PIXCLK]\n"
++ " .VoltageMode = 0x%02x\n"
++ " .SnapToDiscrete = 0x%02x\n"
++ " .NumDiscreteLevels = 0x%02x\n"
++ " .padding = 0x%02x\n"
++ " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
++ " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
++ pptable->DpmDescriptor[PPCLK_PIXCLK].VoltageMode,
++ pptable->DpmDescriptor[PPCLK_PIXCLK].SnapToDiscrete,
++ pptable->DpmDescriptor[PPCLK_PIXCLK].NumDiscreteLevels,
++ pptable->DpmDescriptor[PPCLK_PIXCLK].padding,
++ pptable->DpmDescriptor[PPCLK_PIXCLK].ConversionToAvfsClk.m,
++ pptable->DpmDescriptor[PPCLK_PIXCLK].ConversionToAvfsClk.b,
++ pptable->DpmDescriptor[PPCLK_PIXCLK].SsCurve.a,
++ pptable->DpmDescriptor[PPCLK_PIXCLK].SsCurve.b,
++ pptable->DpmDescriptor[PPCLK_PIXCLK].SsCurve.c);
++
++ pr_info("[PPCLK_PHYCLK]\n"
++ " .VoltageMode = 0x%02x\n"
++ " .SnapToDiscrete = 0x%02x\n"
++ " .NumDiscreteLevels = 0x%02x\n"
++ " .padding = 0x%02x\n"
++ " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
++ " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
++ pptable->DpmDescriptor[PPCLK_PHYCLK].VoltageMode,
++ pptable->DpmDescriptor[PPCLK_PHYCLK].SnapToDiscrete,
++ pptable->DpmDescriptor[PPCLK_PHYCLK].NumDiscreteLevels,
++ pptable->DpmDescriptor[PPCLK_PHYCLK].padding,
++ pptable->DpmDescriptor[PPCLK_PHYCLK].ConversionToAvfsClk.m,
++ pptable->DpmDescriptor[PPCLK_PHYCLK].ConversionToAvfsClk.b,
++ pptable->DpmDescriptor[PPCLK_PHYCLK].SsCurve.a,
++ pptable->DpmDescriptor[PPCLK_PHYCLK].SsCurve.b,
++ pptable->DpmDescriptor[PPCLK_PHYCLK].SsCurve.c);
++
++ pr_info("[PPCLK_FCLK]\n"
++ " .VoltageMode = 0x%02x\n"
++ " .SnapToDiscrete = 0x%02x\n"
++ " .NumDiscreteLevels = 0x%02x\n"
++ " .padding = 0x%02x\n"
++ " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
++ " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
++ pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode,
++ pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete,
++ pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels,
++ pptable->DpmDescriptor[PPCLK_FCLK].padding,
++ pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m,
++ pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b,
++ pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a,
++ pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b,
++ pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c);
++
++
++ pr_info("FreqTableGfx\n");
++ for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++)
++ pr_info(" .[%02d] = %d\n", i, pptable->FreqTableGfx[i]);
++
++ pr_info("FreqTableVclk\n");
++ for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++)
++ pr_info(" .[%02d] = %d\n", i, pptable->FreqTableVclk[i]);
++
++ pr_info("FreqTableDclk\n");
++ for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++)
++ pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDclk[i]);
++
++ pr_info("FreqTableEclk\n");
++ for (i = 0; i < NUM_ECLK_DPM_LEVELS; i++)
++ pr_info(" .[%02d] = %d\n", i, pptable->FreqTableEclk[i]);
++
++ pr_info("FreqTableSocclk\n");
++ for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++)
++ pr_info(" .[%02d] = %d\n", i, pptable->FreqTableSocclk[i]);
++
++ pr_info("FreqTableUclk\n");
++ for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
++ pr_info(" .[%02d] = %d\n", i, pptable->FreqTableUclk[i]);
++
++ pr_info("FreqTableFclk\n");
++ for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++)
++ pr_info(" .[%02d] = %d\n", i, pptable->FreqTableFclk[i]);
++
++ pr_info("FreqTableDcefclk\n");
++ for (i = 0; i < NUM_DCEFCLK_DPM_LEVELS; i++)
++ pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDcefclk[i]);
++
++ pr_info("FreqTableDispclk\n");
++ for (i = 0; i < NUM_DISPCLK_DPM_LEVELS; i++)
++ pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDispclk[i]);
++
++ pr_info("FreqTablePixclk\n");
++ for (i = 0; i < NUM_PIXCLK_DPM_LEVELS; i++)
++ pr_info(" .[%02d] = %d\n", i, pptable->FreqTablePixclk[i]);
++
++ pr_info("FreqTablePhyclk\n");
++ for (i = 0; i < NUM_PHYCLK_DPM_LEVELS; i++)
++ pr_info(" .[%02d] = %d\n", i, pptable->FreqTablePhyclk[i]);
++
++ pr_info("DcModeMaxFreq[PPCLK_GFXCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_GFXCLK]);
++ pr_info("DcModeMaxFreq[PPCLK_VCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_VCLK]);
++ pr_info("DcModeMaxFreq[PPCLK_DCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_DCLK]);
++ pr_info("DcModeMaxFreq[PPCLK_ECLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_ECLK]);
++ pr_info("DcModeMaxFreq[PPCLK_SOCCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_SOCCLK]);
++ pr_info("DcModeMaxFreq[PPCLK_UCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_UCLK]);
++ pr_info("DcModeMaxFreq[PPCLK_DCEFCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_DCEFCLK]);
++ pr_info("DcModeMaxFreq[PPCLK_DISPCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_DISPCLK]);
++ pr_info("DcModeMaxFreq[PPCLK_PIXCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_PIXCLK]);
++ pr_info("DcModeMaxFreq[PPCLK_PHYCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_PHYCLK]);
++ pr_info("DcModeMaxFreq[PPCLK_FCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_FCLK]);
++ pr_info("Padding8_Clks = %d\n", pptable->Padding8_Clks);
++
++ pr_info("Mp0clkFreq\n");
++ for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++)
++ pr_info(" .[%d] = %d\n", i, pptable->Mp0clkFreq[i]);
++
++ pr_info("Mp0DpmVoltage\n");
++ for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++)
++ pr_info(" .[%d] = %d\n", i, pptable->Mp0DpmVoltage[i]);
++
++ pr_info("GfxclkFidle = 0x%x\n", pptable->GfxclkFidle);
++ pr_info("GfxclkSlewRate = 0x%x\n", pptable->GfxclkSlewRate);
++ pr_info("CksEnableFreq = 0x%x\n", pptable->CksEnableFreq);
++ pr_info("Padding789 = 0x%x\n", pptable->Padding789);
++ pr_info("CksVoltageOffset[a = 0x%08x b = 0x%08x c = 0x%08x]\n",
++ pptable->CksVoltageOffset.a,
++ pptable->CksVoltageOffset.b,
++ pptable->CksVoltageOffset.c);
++ pr_info("Padding567[0] = 0x%x\n", pptable->Padding567[0]);
++ pr_info("Padding567[1] = 0x%x\n", pptable->Padding567[1]);
++ pr_info("Padding567[2] = 0x%x\n", pptable->Padding567[2]);
++ pr_info("Padding567[3] = 0x%x\n", pptable->Padding567[3]);
++ pr_info("GfxclkDsMaxFreq = %d\n", pptable->GfxclkDsMaxFreq);
++ pr_info("GfxclkSource = 0x%x\n", pptable->GfxclkSource);
++ pr_info("Padding456 = 0x%x\n", pptable->Padding456);
++
++ pr_info("LowestUclkReservedForUlv = %d\n", pptable->LowestUclkReservedForUlv);
++ pr_info("Padding8_Uclk[0] = 0x%x\n", pptable->Padding8_Uclk[0]);
++ pr_info("Padding8_Uclk[1] = 0x%x\n", pptable->Padding8_Uclk[1]);
++ pr_info("Padding8_Uclk[2] = 0x%x\n", pptable->Padding8_Uclk[2]);
++
++ pr_info("PcieGenSpeed\n");
++ for (i = 0; i < NUM_LINK_LEVELS; i++)
++ pr_info(" .[%d] = %d\n", i, pptable->PcieGenSpeed[i]);
++
++ pr_info("PcieLaneCount\n");
++ for (i = 0; i < NUM_LINK_LEVELS; i++)
++ pr_info(" .[%d] = %d\n", i, pptable->PcieLaneCount[i]);
++
++ pr_info("LclkFreq\n");
++ for (i = 0; i < NUM_LINK_LEVELS; i++)
++ pr_info(" .[%d] = %d\n", i, pptable->LclkFreq[i]);
++
++ pr_info("EnableTdpm = %d\n", pptable->EnableTdpm);
++ pr_info("TdpmHighHystTemperature = %d\n", pptable->TdpmHighHystTemperature);
++ pr_info("TdpmLowHystTemperature = %d\n", pptable->TdpmLowHystTemperature);
++ pr_info("GfxclkFreqHighTempLimit = %d\n", pptable->GfxclkFreqHighTempLimit);
++
++ pr_info("FanStopTemp = %d\n", pptable->FanStopTemp);
++ pr_info("FanStartTemp = %d\n", pptable->FanStartTemp);
++
++ pr_info("FanGainEdge = %d\n", pptable->FanGainEdge);
++ pr_info("FanGainHotspot = %d\n", pptable->FanGainHotspot);
++ pr_info("FanGainLiquid = %d\n", pptable->FanGainLiquid);
++ pr_info("FanGainVrVddc = %d\n", pptable->FanGainVrVddc);
++ pr_info("FanGainVrMvdd = %d\n", pptable->FanGainVrMvdd);
++ pr_info("FanGainPlx = %d\n", pptable->FanGainPlx);
++ pr_info("FanGainHbm = %d\n", pptable->FanGainHbm);
++ pr_info("FanPwmMin = %d\n", pptable->FanPwmMin);
++ pr_info("FanAcousticLimitRpm = %d\n", pptable->FanAcousticLimitRpm);
++ pr_info("FanThrottlingRpm = %d\n", pptable->FanThrottlingRpm);
++ pr_info("FanMaximumRpm = %d\n", pptable->FanMaximumRpm);
++ pr_info("FanTargetTemperature = %d\n", pptable->FanTargetTemperature);
++ pr_info("FanTargetGfxclk = %d\n", pptable->FanTargetGfxclk);
++ pr_info("FanZeroRpmEnable = %d\n", pptable->FanZeroRpmEnable);
++ pr_info("FanTachEdgePerRev = %d\n", pptable->FanTachEdgePerRev);
++
++ pr_info("FuzzyFan_ErrorSetDelta = %d\n", pptable->FuzzyFan_ErrorSetDelta);
++ pr_info("FuzzyFan_ErrorRateSetDelta = %d\n", pptable->FuzzyFan_ErrorRateSetDelta);
++ pr_info("FuzzyFan_PwmSetDelta = %d\n", pptable->FuzzyFan_PwmSetDelta);
++ pr_info("FuzzyFan_Reserved = %d\n", pptable->FuzzyFan_Reserved);
++
++ pr_info("OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]);
++ pr_info("OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]);
++ pr_info("Padding8_Avfs[0] = %d\n", pptable->Padding8_Avfs[0]);
++ pr_info("Padding8_Avfs[1] = %d\n", pptable->Padding8_Avfs[1]);
++
++ pr_info("qAvfsGb[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n",
++ pptable->qAvfsGb[AVFS_VOLTAGE_GFX].a,
++ pptable->qAvfsGb[AVFS_VOLTAGE_GFX].b,
++ pptable->qAvfsGb[AVFS_VOLTAGE_GFX].c);
++ pr_info("qAvfsGb[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n",
++ pptable->qAvfsGb[AVFS_VOLTAGE_SOC].a,
++ pptable->qAvfsGb[AVFS_VOLTAGE_SOC].b,
++ pptable->qAvfsGb[AVFS_VOLTAGE_SOC].c);
++ pr_info("dBtcGbGfxCksOn{a = 0x%x b = 0x%x c = 0x%x}\n",
++ pptable->dBtcGbGfxCksOn.a,
++ pptable->dBtcGbGfxCksOn.b,
++ pptable->dBtcGbGfxCksOn.c);
++ pr_info("dBtcGbGfxCksOff{a = 0x%x b = 0x%x c = 0x%x}\n",
++ pptable->dBtcGbGfxCksOff.a,
++ pptable->dBtcGbGfxCksOff.b,
++ pptable->dBtcGbGfxCksOff.c);
++ pr_info("dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n",
++ pptable->dBtcGbGfxAfll.a,
++ pptable->dBtcGbGfxAfll.b,
++ pptable->dBtcGbGfxAfll.c);
++ pr_info("dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n",
++ pptable->dBtcGbSoc.a,
++ pptable->dBtcGbSoc.b,
++ pptable->dBtcGbSoc.c);
++ pr_info("qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n",
++ pptable->qAgingGb[AVFS_VOLTAGE_GFX].m,
++ pptable->qAgingGb[AVFS_VOLTAGE_GFX].b);
++ pr_info("qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n",
++ pptable->qAgingGb[AVFS_VOLTAGE_SOC].m,
++ pptable->qAgingGb[AVFS_VOLTAGE_SOC].b);
++
++ pr_info("qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n",
++ pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a,
++ pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b,
++ pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c);
++ pr_info("qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n",
++ pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a,
++ pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b,
++ pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c);
++
++ pr_info("DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]);
++ pr_info("DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]);
++
++ pr_info("DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]);
++ pr_info("DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]);
++ pr_info("Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]);
++ pr_info("Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]);
++
++ pr_info("DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]);
++ pr_info("DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]);
++ pr_info("DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]);
++ pr_info("DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]);
++
++ pr_info("XgmiLinkSpeed\n");
++ for (i = 0; i < NUM_XGMI_LEVELS; i++)
++ pr_info(" .[%d] = %d\n", i, pptable->XgmiLinkSpeed[i]);
++ pr_info("XgmiLinkWidth\n");
++ for (i = 0; i < NUM_XGMI_LEVELS; i++)
++ pr_info(" .[%d] = %d\n", i, pptable->XgmiLinkWidth[i]);
++ pr_info("XgmiFclkFreq\n");
++ for (i = 0; i < NUM_XGMI_LEVELS; i++)
++ pr_info(" .[%d] = %d\n", i, pptable->XgmiFclkFreq[i]);
++ pr_info("XgmiUclkFreq\n");
++ for (i = 0; i < NUM_XGMI_LEVELS; i++)
++ pr_info(" .[%d] = %d\n", i, pptable->XgmiUclkFreq[i]);
++ pr_info("XgmiSocclkFreq\n");
++ for (i = 0; i < NUM_XGMI_LEVELS; i++)
++ pr_info(" .[%d] = %d\n", i, pptable->XgmiSocclkFreq[i]);
++ pr_info("XgmiSocVoltage\n");
++ for (i = 0; i < NUM_XGMI_LEVELS; i++)
++ pr_info(" .[%d] = %d\n", i, pptable->XgmiSocVoltage[i]);
++
++ pr_info("DebugOverrides = 0x%x\n", pptable->DebugOverrides);
++ pr_info("ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n",
++ pptable->ReservedEquation0.a,
++ pptable->ReservedEquation0.b,
++ pptable->ReservedEquation0.c);
++ pr_info("ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n",
++ pptable->ReservedEquation1.a,
++ pptable->ReservedEquation1.b,
++ pptable->ReservedEquation1.c);
++ pr_info("ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n",
++ pptable->ReservedEquation2.a,
++ pptable->ReservedEquation2.b,
++ pptable->ReservedEquation2.c);
++ pr_info("ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n",
++ pptable->ReservedEquation3.a,
++ pptable->ReservedEquation3.b,
++ pptable->ReservedEquation3.c);
++
++ pr_info("MinVoltageUlvGfx = %d\n", pptable->MinVoltageUlvGfx);
++ pr_info("MinVoltageUlvSoc = %d\n", pptable->MinVoltageUlvSoc);
++
++ for (i = 0; i < 14; i++)
++ pr_info("Reserved[%d] = 0x%x\n", i, pptable->Reserved[i]);
++
++ pr_info("Liquid1_I2C_address = 0x%x\n", pptable->Liquid1_I2C_address);
++ pr_info("Liquid2_I2C_address = 0x%x\n", pptable->Liquid2_I2C_address);
++ pr_info("Vr_I2C_address = 0x%x\n", pptable->Vr_I2C_address);
++ pr_info("Plx_I2C_address = 0x%x\n", pptable->Plx_I2C_address);
++
++ pr_info("Liquid_I2C_LineSCL = 0x%x\n", pptable->Liquid_I2C_LineSCL);
++ pr_info("Liquid_I2C_LineSDA = 0x%x\n", pptable->Liquid_I2C_LineSDA);
++ pr_info("Vr_I2C_LineSCL = 0x%x\n", pptable->Vr_I2C_LineSCL);
++ pr_info("Vr_I2C_LineSDA = 0x%x\n", pptable->Vr_I2C_LineSDA);
++
++ pr_info("Plx_I2C_LineSCL = 0x%x\n", pptable->Plx_I2C_LineSCL);
++ pr_info("Plx_I2C_LineSDA = 0x%x\n", pptable->Plx_I2C_LineSDA);
++ pr_info("VrSensorPresent = 0x%x\n", pptable->VrSensorPresent);
++ pr_info("LiquidSensorPresent = 0x%x\n", pptable->LiquidSensorPresent);
++
++ pr_info("MaxVoltageStepGfx = 0x%x\n", pptable->MaxVoltageStepGfx);
++ pr_info("MaxVoltageStepSoc = 0x%x\n", pptable->MaxVoltageStepSoc);
++
++ pr_info("VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping);
++ pr_info("VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping);
++ pr_info("VddMem0VrMapping = 0x%x\n", pptable->VddMem0VrMapping);
++ pr_info("VddMem1VrMapping = 0x%x\n", pptable->VddMem1VrMapping);
++
++ pr_info("GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask);
++ pr_info("SocUlvPhaseSheddingMask = 0x%x\n", pptable->SocUlvPhaseSheddingMask);
++ pr_info("ExternalSensorPresent = 0x%x\n", pptable->ExternalSensorPresent);
++ pr_info("Padding8_V = 0x%x\n", pptable->Padding8_V);
++
++ pr_info("GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent);
++ pr_info("GfxOffset = 0x%x\n", pptable->GfxOffset);
++ pr_info("Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx);
++
++ pr_info("SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent);
++ pr_info("SocOffset = 0x%x\n", pptable->SocOffset);
++ pr_info("Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc);
++
++ pr_info("Mem0MaxCurrent = 0x%x\n", pptable->Mem0MaxCurrent);
++ pr_info("Mem0Offset = 0x%x\n", pptable->Mem0Offset);
++ pr_info("Padding_TelemetryMem0 = 0x%x\n", pptable->Padding_TelemetryMem0);
++
++ pr_info("Mem1MaxCurrent = 0x%x\n", pptable->Mem1MaxCurrent);
++ pr_info("Mem1Offset = 0x%x\n", pptable->Mem1Offset);
++ pr_info("Padding_TelemetryMem1 = 0x%x\n", pptable->Padding_TelemetryMem1);
++
++ pr_info("AcDcGpio = %d\n", pptable->AcDcGpio);
++ pr_info("AcDcPolarity = %d\n", pptable->AcDcPolarity);
++ pr_info("VR0HotGpio = %d\n", pptable->VR0HotGpio);
++ pr_info("VR0HotPolarity = %d\n", pptable->VR0HotPolarity);
++
++ pr_info("VR1HotGpio = %d\n", pptable->VR1HotGpio);
++ pr_info("VR1HotPolarity = %d\n", pptable->VR1HotPolarity);
++ pr_info("Padding1 = 0x%x\n", pptable->Padding1);
++ pr_info("Padding2 = 0x%x\n", pptable->Padding2);
++
++ pr_info("LedPin0 = %d\n", pptable->LedPin0);
++ pr_info("LedPin1 = %d\n", pptable->LedPin1);
++ pr_info("LedPin2 = %d\n", pptable->LedPin2);
++ pr_info("padding8_4 = 0x%x\n", pptable->padding8_4);
++
++ pr_info("PllGfxclkSpreadEnabled = %d\n", pptable->PllGfxclkSpreadEnabled);
++ pr_info("PllGfxclkSpreadPercent = %d\n", pptable->PllGfxclkSpreadPercent);
++ pr_info("PllGfxclkSpreadFreq = %d\n", pptable->PllGfxclkSpreadFreq);
++
++ pr_info("UclkSpreadEnabled = %d\n", pptable->UclkSpreadEnabled);
++ pr_info("UclkSpreadPercent = %d\n", pptable->UclkSpreadPercent);
++ pr_info("UclkSpreadFreq = %d\n", pptable->UclkSpreadFreq);
++
++ pr_info("FclkSpreadEnabled = %d\n", pptable->FclkSpreadEnabled);
++ pr_info("FclkSpreadPercent = %d\n", pptable->FclkSpreadPercent);
++ pr_info("FclkSpreadFreq = %d\n", pptable->FclkSpreadFreq);
++
++ pr_info("FllGfxclkSpreadEnabled = %d\n", pptable->FllGfxclkSpreadEnabled);
++ pr_info("FllGfxclkSpreadPercent = %d\n", pptable->FllGfxclkSpreadPercent);
++ pr_info("FllGfxclkSpreadFreq = %d\n", pptable->FllGfxclkSpreadFreq);
++
++ for (i = 0; i < 10; i++)
++ pr_info("BoardReserved[%d] = 0x%x\n", i, pptable->BoardReserved[i]);
++
++ for (i = 0; i < 8; i++)
++ pr_info("MmHubPadding[%d] = 0x%x\n", i, pptable->MmHubPadding[i]);
++}
++#endif
++
++static int check_powerplay_tables(
++ struct pp_hwmgr *hwmgr,
++ const ATOM_Vega20_POWERPLAYTABLE *powerplay_table)
++{
++ PP_ASSERT_WITH_CODE((powerplay_table->sHeader.format_revision >=
++ ATOM_VEGA20_TABLE_REVISION_VEGA20),
++ "Unsupported PPTable format!", return -1);
++ PP_ASSERT_WITH_CODE(powerplay_table->sHeader.structuresize > 0,
++ "Invalid PowerPlay Table!", return -1);
++ PP_ASSERT_WITH_CODE(powerplay_table->smcPPTable.Version == PPTABLE_V20_SMU_VERSION,
++ "Unmatch PPTable version, vbios update may be needed!", return -1);
++
++ //dump_pptable(&powerplay_table->smcPPTable);
++
++ return 0;
++}
++
++static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps)
++{
++ set_hw_cap(
++ hwmgr,
++ 0 != (powerplay_caps & ATOM_VEGA20_PP_PLATFORM_CAP_POWERPLAY),
++ PHM_PlatformCaps_PowerPlaySupport);
++
++ set_hw_cap(
++ hwmgr,
++ 0 != (powerplay_caps & ATOM_VEGA20_PP_PLATFORM_CAP_SBIOSPOWERSOURCE),
++ PHM_PlatformCaps_BiosPowerSourceControl);
++
++ set_hw_cap(
++ hwmgr,
++ 0 != (powerplay_caps & ATOM_VEGA20_PP_PLATFORM_CAP_BACO),
++ PHM_PlatformCaps_BACO);
++
++ set_hw_cap(
++ hwmgr,
++ 0 != (powerplay_caps & ATOM_VEGA20_PP_PLATFORM_CAP_BAMACO),
++ PHM_PlatformCaps_BAMACO);
++
++ return 0;
++}
++
++static int copy_clock_limits_array(
++ struct pp_hwmgr *hwmgr,
++ uint32_t **pptable_info_array,
++ const uint32_t *pptable_array)
++{
++ uint32_t array_size, i;
++ uint32_t *table;
++
++ array_size = sizeof(uint32_t) * ATOM_VEGA20_PPCLOCK_COUNT;
++
++ table = kzalloc(array_size, GFP_KERNEL);
++ if (NULL == table)
++ return -ENOMEM;
++
++ for (i = 0; i < ATOM_VEGA20_PPCLOCK_COUNT; i++)
++ table[i] = pptable_array[i];
++
++ *pptable_info_array = table;
++
++ return 0;
++}
++
++static int copy_overdrive_settings_limits_array(
++ struct pp_hwmgr *hwmgr,
++ uint32_t **pptable_info_array,
++ const uint32_t *pptable_array)
++{
++ uint32_t array_size, i;
++ uint32_t *table;
++
++ array_size = sizeof(uint32_t) * ATOM_VEGA20_ODSETTING_COUNT;
++
++ table = kzalloc(array_size, GFP_KERNEL);
++ if (NULL == table)
++ return -ENOMEM;
++
++ for (i = 0; i < ATOM_VEGA20_ODSETTING_COUNT; i++)
++ table[i] = pptable_array[i];
++
++ *pptable_info_array = table;
++
++ return 0;
++}
++
++static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable)
++{
++ struct atom_smc_dpm_info_v4_3 *smc_dpm_table;
++ int index = GetIndexIntoMasterDataTable(smc_dpm_info);
++
++ PP_ASSERT_WITH_CODE(
++ smc_dpm_table = smu_atom_get_data_table(hwmgr->adev, index, NULL, NULL, NULL),
++ "[appendVbiosPPTable] Failed to retrieve Smc Dpm Table from VBIOS!",
++ return -1);
++
++ ppsmc_pptable->Liquid1_I2C_address = smc_dpm_table->liquid1_i2c_address;
++ ppsmc_pptable->Liquid2_I2C_address = smc_dpm_table->liquid2_i2c_address;
++ ppsmc_pptable->Vr_I2C_address = smc_dpm_table->vr_i2c_address;
++ ppsmc_pptable->Plx_I2C_address = smc_dpm_table->plx_i2c_address;
++
++ ppsmc_pptable->Liquid_I2C_LineSCL = smc_dpm_table->liquid_i2c_linescl;
++ ppsmc_pptable->Liquid_I2C_LineSDA = smc_dpm_table->liquid_i2c_linesda;
++ ppsmc_pptable->Vr_I2C_LineSCL = smc_dpm_table->vr_i2c_linescl;
++ ppsmc_pptable->Vr_I2C_LineSDA = smc_dpm_table->vr_i2c_linesda;
++
++ ppsmc_pptable->Plx_I2C_LineSCL = smc_dpm_table->plx_i2c_linescl;
++ ppsmc_pptable->Plx_I2C_LineSDA = smc_dpm_table->plx_i2c_linesda;
++ ppsmc_pptable->VrSensorPresent = smc_dpm_table->vrsensorpresent;
++ ppsmc_pptable->LiquidSensorPresent = smc_dpm_table->liquidsensorpresent;
++
++ ppsmc_pptable->MaxVoltageStepGfx = smc_dpm_table->maxvoltagestepgfx;
++ ppsmc_pptable->MaxVoltageStepSoc = smc_dpm_table->maxvoltagestepsoc;
++
++ ppsmc_pptable->VddGfxVrMapping = smc_dpm_table->vddgfxvrmapping;
++ ppsmc_pptable->VddSocVrMapping = smc_dpm_table->vddsocvrmapping;
++ ppsmc_pptable->VddMem0VrMapping = smc_dpm_table->vddmem0vrmapping;
++ ppsmc_pptable->VddMem1VrMapping = smc_dpm_table->vddmem1vrmapping;
++
++ ppsmc_pptable->GfxUlvPhaseSheddingMask = smc_dpm_table->gfxulvphasesheddingmask;
++ ppsmc_pptable->SocUlvPhaseSheddingMask = smc_dpm_table->soculvphasesheddingmask;
++ ppsmc_pptable->ExternalSensorPresent = smc_dpm_table->externalsensorpresent;
++
++ ppsmc_pptable->GfxMaxCurrent = smc_dpm_table->gfxmaxcurrent;
++ ppsmc_pptable->GfxOffset = smc_dpm_table->gfxoffset;
++ ppsmc_pptable->Padding_TelemetryGfx = smc_dpm_table->padding_telemetrygfx;
++
++ ppsmc_pptable->SocMaxCurrent = smc_dpm_table->socmaxcurrent;
++ ppsmc_pptable->SocOffset = smc_dpm_table->socoffset;
++ ppsmc_pptable->Padding_TelemetrySoc = smc_dpm_table->padding_telemetrysoc;
++
++ ppsmc_pptable->Mem0MaxCurrent = smc_dpm_table->mem0maxcurrent;
++ ppsmc_pptable->Mem0Offset = smc_dpm_table->mem0offset;
++ ppsmc_pptable->Padding_TelemetryMem0 = smc_dpm_table->padding_telemetrymem0;
++
++ ppsmc_pptable->Mem1MaxCurrent = smc_dpm_table->mem1maxcurrent;
++ ppsmc_pptable->Mem1Offset = smc_dpm_table->mem1offset;
++ ppsmc_pptable->Padding_TelemetryMem1 = smc_dpm_table->padding_telemetrymem1;
++
++ ppsmc_pptable->AcDcGpio = smc_dpm_table->acdcgpio;
++ ppsmc_pptable->AcDcPolarity = smc_dpm_table->acdcpolarity;
++ ppsmc_pptable->VR0HotGpio = smc_dpm_table->vr0hotgpio;
++ ppsmc_pptable->VR0HotPolarity = smc_dpm_table->vr0hotpolarity;
++
++ ppsmc_pptable->VR1HotGpio = smc_dpm_table->vr1hotgpio;
++ ppsmc_pptable->VR1HotPolarity = smc_dpm_table->vr1hotpolarity;
++ ppsmc_pptable->Padding1 = smc_dpm_table->padding1;
++ ppsmc_pptable->Padding2 = smc_dpm_table->padding2;
++
++ ppsmc_pptable->LedPin0 = smc_dpm_table->ledpin0;
++ ppsmc_pptable->LedPin1 = smc_dpm_table->ledpin1;
++ ppsmc_pptable->LedPin2 = smc_dpm_table->ledpin2;
++
++ ppsmc_pptable->PllGfxclkSpreadEnabled = smc_dpm_table->pllgfxclkspreadenabled;
++ ppsmc_pptable->PllGfxclkSpreadPercent = smc_dpm_table->pllgfxclkspreadpercent;
++ ppsmc_pptable->PllGfxclkSpreadFreq = smc_dpm_table->pllgfxclkspreadfreq;
++
++ ppsmc_pptable->UclkSpreadEnabled = 0;
++ ppsmc_pptable->UclkSpreadPercent = smc_dpm_table->uclkspreadpercent;
++ ppsmc_pptable->UclkSpreadFreq = smc_dpm_table->uclkspreadfreq;
++
++ ppsmc_pptable->FclkSpreadEnabled = 0;
++ ppsmc_pptable->FclkSpreadPercent = smc_dpm_table->fclkspreadpercent;
++ ppsmc_pptable->FclkSpreadFreq = smc_dpm_table->fclkspreadfreq;
++
++ ppsmc_pptable->FllGfxclkSpreadEnabled = smc_dpm_table->fllgfxclkspreadenabled;
++ ppsmc_pptable->FllGfxclkSpreadPercent = smc_dpm_table->fllgfxclkspreadpercent;
++ ppsmc_pptable->FllGfxclkSpreadFreq = smc_dpm_table->fllgfxclkspreadfreq;
++
++ return 0;
++}
++
++#define VEGA20_ENGINECLOCK_HARDMAX 198000
++static int init_powerplay_table_information(
++ struct pp_hwmgr *hwmgr,
++ const ATOM_Vega20_POWERPLAYTABLE *powerplay_table)
++{
++ struct phm_ppt_v3_information *pptable_information =
++ (struct phm_ppt_v3_information *)hwmgr->pptable;
++ uint32_t disable_power_control = 0;
++ int result;
++
++ hwmgr->thermal_controller.ucType = powerplay_table->ucThermalControllerType;
++ pptable_information->uc_thermal_controller_type = powerplay_table->ucThermalControllerType;
++
++ set_hw_cap(hwmgr,
++ ATOM_VEGA20_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType,
++ PHM_PlatformCaps_ThermalController);
++
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
++
++ if (powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_GFXCLKFMAX] > VEGA20_ENGINECLOCK_HARDMAX)
++ hwmgr->platform_descriptor.overdriveLimit.engineClock = VEGA20_ENGINECLOCK_HARDMAX;
++ else
++ hwmgr->platform_descriptor.overdriveLimit.engineClock = powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_GFXCLKFMAX];
++ hwmgr->platform_descriptor.overdriveLimit.memoryClock = powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_UCLKFMAX];
++
++ copy_overdrive_settings_limits_array(hwmgr, &pptable_information->od_settings_max, powerplay_table->OverDrive8Table.ODSettingsMax);
++ copy_overdrive_settings_limits_array(hwmgr, &pptable_information->od_settings_min, powerplay_table->OverDrive8Table.ODSettingsMin);
++
++ /* hwmgr->platformDescriptor.minOverdriveVDDC = 0;
++ hwmgr->platformDescriptor.maxOverdriveVDDC = 0;
++ hwmgr->platformDescriptor.overdriveVDDCStep = 0; */
++
++ if (hwmgr->platform_descriptor.overdriveLimit.engineClock > 0
++ && hwmgr->platform_descriptor.overdriveLimit.memoryClock > 0)
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ACOverdriveSupport);
++
++ pptable_information->us_small_power_limit1 = powerplay_table->usSmallPowerLimit1;
++ pptable_information->us_small_power_limit2 = powerplay_table->usSmallPowerLimit2;
++ pptable_information->us_boost_power_limit = powerplay_table->usBoostPowerLimit;
++ pptable_information->us_od_turbo_power_limit = powerplay_table->usODTurboPowerLimit;
++ pptable_information->us_od_powersave_power_limit = powerplay_table->usODPowerSavePowerLimit;
++
++ pptable_information->us_software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp;
++
++ hwmgr->platform_descriptor.TDPODLimit = (uint16_t)powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE];
++
++ disable_power_control = 0;
++ if (!disable_power_control && hwmgr->platform_descriptor.TDPODLimit) {
++ /* enable TDP overdrive (PowerControl) feature as well if supported */
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_PowerControl);
++ }
++
++ copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_max, powerplay_table->PowerSavingClockTable.PowerSavingClockMax);
++ copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_min, powerplay_table->PowerSavingClockTable.PowerSavingClockMin);
++
++ pptable_information->smc_pptable = (PPTable_t *)kmalloc(sizeof(PPTable_t), GFP_KERNEL);
++ if (pptable_information->smc_pptable == NULL)
++ return -ENOMEM;
++
++ memcpy(pptable_information->smc_pptable, &(powerplay_table->smcPPTable), sizeof(PPTable_t));
++
++ result = append_vbios_pptable(hwmgr, (pptable_information->smc_pptable));
++
++ return result;
++}
++
++static int vega20_pp_tables_initialize(struct pp_hwmgr *hwmgr)
++{
++ int result = 0;
++ const ATOM_Vega20_POWERPLAYTABLE *powerplay_table;
++
++ hwmgr->pptable = kzalloc(sizeof(struct phm_ppt_v3_information), GFP_KERNEL);
++ PP_ASSERT_WITH_CODE((hwmgr->pptable != NULL),
++ "Failed to allocate hwmgr->pptable!", return -ENOMEM);
++
++ powerplay_table = get_powerplay_table(hwmgr);
++ PP_ASSERT_WITH_CODE((powerplay_table != NULL),
++ "Missing PowerPlay Table!", return -1);
++
++ result = check_powerplay_tables(hwmgr, powerplay_table);
++ PP_ASSERT_WITH_CODE((result == 0),
++ "check_powerplay_tables failed", return result);
++
++ result = set_platform_caps(hwmgr,
++ le32_to_cpu(powerplay_table->ulPlatformCaps));
++ PP_ASSERT_WITH_CODE((result == 0),
++ "set_platform_caps failed", return result);
++
++ result = init_powerplay_table_information(hwmgr, powerplay_table);
++ PP_ASSERT_WITH_CODE((result == 0),
++ "init_powerplay_table_information failed", return result);
++
++ return result;
++}
++
++static int vega20_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
++{
++ struct phm_ppt_v3_information *pp_table_info =
++ (struct phm_ppt_v3_information *)(hwmgr->pptable);
++
++ kfree(pp_table_info->power_saving_clock_max);
++ pp_table_info->power_saving_clock_max = NULL;
++
++ kfree(pp_table_info->power_saving_clock_min);
++ pp_table_info->power_saving_clock_min = NULL;
++
++ kfree(pp_table_info->od_settings_max);
++ pp_table_info->od_settings_max = NULL;
++
++ kfree(pp_table_info->od_settings_min);
++ pp_table_info->od_settings_min = NULL;
++
++ kfree(pp_table_info->smc_pptable);
++ pp_table_info->smc_pptable = NULL;
++
++ kfree(hwmgr->pptable);
++ hwmgr->pptable = NULL;
++
++ return 0;
++}
++
++const struct pp_table_func vega20_pptable_funcs = {
++ .pptable_init = vega20_pp_tables_initialize,
++ .pptable_fini = vega20_pp_tables_uninitialize,
++};
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.h
+new file mode 100644
+index 0000000..846c2cb
+--- /dev/null
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.h
+@@ -0,0 +1,31 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef VEGA20_PROCESSPPTABLES_H
++#define VEGA20_PROCESSPPTABLES_H
++
++#include "hwmgr.h"
++
++extern const struct pp_table_func vega20_pptable_funcs;
++
++#endif
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
+new file mode 100644
+index 0000000..2984ddd5
+--- /dev/null
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
+@@ -0,0 +1,212 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "vega20_thermal.h"
++#include "vega20_hwmgr.h"
++#include "vega20_smumgr.h"
++#include "vega20_ppsmc.h"
++#include "vega20_inc.h"
++#include "soc15_common.h"
++#include "pp_debug.h"
++
++static int vega20_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
++{
++ int ret = 0;
++
++ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
++ PPSMC_MSG_GetCurrentRpm)) == 0,
++ "Attempt to get current RPM from SMC Failed!",
++ return ret);
++ PP_ASSERT_WITH_CODE((ret = vega20_read_arg_from_smc(hwmgr,
++ current_rpm)) == 0,
++ "Attempt to read current RPM from SMC Failed!",
++ return ret);
++
++ return 0;
++}
++
++int vega20_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
++ struct phm_fan_speed_info *fan_speed_info)
++{
++ memset(fan_speed_info, 0, sizeof(*fan_speed_info));
++ fan_speed_info->supports_percent_read = false;
++ fan_speed_info->supports_percent_write = false;
++ fan_speed_info->supports_rpm_read = true;
++ fan_speed_info->supports_rpm_write = true;
++
++ return 0;
++}
++
++int vega20_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
++{
++ *speed = 0;
++
++ return vega20_get_current_rpm(hwmgr, speed);
++}
++
++/**
++* Reads the remote temperature from the SIslands thermal controller.
++*
++* @param hwmgr The address of the hardware manager.
++*/
++int vega20_thermal_get_temperature(struct pp_hwmgr *hwmgr)
++{
++ struct amdgpu_device *adev = hwmgr->adev;
++ int temp = 0;
++
++ temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS);
++
++ temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
++ CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
++
++ temp = temp & 0x1ff;
++
++ temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
++ return temp;
++}
++
++/**
++* Set the requested temperature range for high and low alert signals
++*
++* @param hwmgr The address of the hardware manager.
++* @param range Temperature range to be programmed for
++* high and low alert signals
++* @exception PP_Result_BadInput if the input data is not valid.
++*/
++static int vega20_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
++ struct PP_TemperatureRange *range)
++{
++ struct amdgpu_device *adev = hwmgr->adev;
++ int low = VEGA20_THERMAL_MINIMUM_ALERT_TEMP *
++ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
++ int high = VEGA20_THERMAL_MAXIMUM_ALERT_TEMP *
++ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
++ uint32_t val;
++
++ if (low < range->min)
++ low = range->min;
++ if (high > range->max)
++ high = range->max;
++
++ if (low > high)
++ return -EINVAL;
++
++ val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
++
++ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
++ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
++ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
++ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
++ val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
++
++ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
++
++ return 0;
++}
++
++/**
++* Enable thermal alerts on the RV770 thermal controller.
++*
++* @param hwmgr The address of the hardware manager.
++*/
++static int vega20_thermal_enable_alert(struct pp_hwmgr *hwmgr)
++{
++ struct amdgpu_device *adev = hwmgr->adev;
++ uint32_t val = 0;
++
++ val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
++ val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
++ val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
++
++ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
++
++ return 0;
++}
++
++/**
++* Disable thermal alerts on the RV770 thermal controller.
++* @param hwmgr The address of the hardware manager.
++*/
++int vega20_thermal_disable_alert(struct pp_hwmgr *hwmgr)
++{
++ struct amdgpu_device *adev = hwmgr->adev;
++
++ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);
++
++ return 0;
++}
++
++/**
++* Uninitialize the thermal controller.
++* Currently just disables alerts.
++* @param hwmgr The address of the hardware manager.
++*/
++int vega20_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
++{
++ int result = vega20_thermal_disable_alert(hwmgr);
++
++ return result;
++}
++
++/**
++* Set up the fan table to control the fan using the SMC.
++* @param hwmgr the address of the powerplay hardware manager.
++* @param pInput the pointer to input data
++* @param pOutput the pointer to output data
++* @param pStorage the pointer to temporary storage
++* @param Result the last failure code
++* @return result from set temperature range routine
++*/
++static int vega20_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
++{
++ int ret;
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++ PPTable_t *table = &(data->smc_state_table.pp_table);
++
++ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SetFanTemperatureTarget,
++ (uint32_t)table->FanTargetTemperature);
++
++ return ret;
++}
++
++int vega20_start_thermal_controller(struct pp_hwmgr *hwmgr,
++ struct PP_TemperatureRange *range)
++{
++ int ret = 0;
++
++ if (range == NULL)
++ return -EINVAL;
++
++ ret = vega20_thermal_set_temperature_range(hwmgr, range);
++ if (ret)
++ return ret;
++
++ ret = vega20_thermal_enable_alert(hwmgr);
++ if (ret)
++ return ret;
++
++ ret = vega20_thermal_setup_fan_table(hwmgr);
++
++ return ret;
++};
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h
+new file mode 100644
+index 0000000..2a6d49f
+--- /dev/null
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h
+@@ -0,0 +1,64 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef VEGA20_THERMAL_H
++#define VEGA20_THERMAL_H
++
++#include "hwmgr.h"
++
++struct vega20_temperature {
++ uint16_t edge_temp;
++ uint16_t hot_spot_temp;
++ uint16_t hbm_temp;
++ uint16_t vr_soc_temp;
++ uint16_t vr_mem_temp;
++ uint16_t liquid1_temp;
++ uint16_t liquid2_temp;
++ uint16_t plx_temp;
++};
++
++#define VEGA20_THERMAL_HIGH_ALERT_MASK 0x1
++#define VEGA20_THERMAL_LOW_ALERT_MASK 0x2
++
++#define VEGA20_THERMAL_MINIMUM_TEMP_READING -256
++#define VEGA20_THERMAL_MAXIMUM_TEMP_READING 255
++
++#define VEGA20_THERMAL_MINIMUM_ALERT_TEMP 0
++#define VEGA20_THERMAL_MAXIMUM_ALERT_TEMP 255
++
++#define FDO_PWM_MODE_STATIC 1
++#define FDO_PWM_MODE_STATIC_RPM 5
++
++extern int vega20_thermal_get_temperature(struct pp_hwmgr *hwmgr);
++extern int vega20_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
++extern int vega20_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
++ struct phm_fan_speed_info *fan_speed_info);
++extern int vega20_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
++extern int vega20_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr,
++ uint32_t *speed);
++extern int vega20_thermal_disable_alert(struct pp_hwmgr *hwmgr);
++extern int vega20_start_thermal_controller(struct pp_hwmgr *hwmgr,
++ struct PP_TemperatureRange *range);
++
++#endif
++
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5129-drm-amd-powerplay-support-workload-profile-query-and.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5129-drm-amd-powerplay-support-workload-profile-query-and.patch
new file mode 100644
index 00000000..62e896d9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5129-drm-amd-powerplay-support-workload-profile-query-and.patch
@@ -0,0 +1,239 @@
+From 06bd7dfa5edda4b5a608a5ba6a13307c1e7e8b5d Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Fri, 6 Jul 2018 14:00:37 -0500
+Subject: [PATCH 5129/5725] drm/amd/powerplay: support workload profile query
+ and setup for vega20
+
+Support the power profile API.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 193 ++++++++++++++++++++-
+ 1 file changed, 192 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index 40f0717..06471d1 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -384,10 +384,13 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+
+ hwmgr->backend = data;
+
++ hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO];
++ hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
++ hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
++
+ vega20_set_default_registry_data(hwmgr);
+
+ data->disable_dpm_mask = 0xff;
+- data->workload_mask = 0xff;
+
+ /* need to set voltage control types before EVV patching */
+ data->vddc_control = VEGA20_VOLTAGE_CONTROL_NONE;
+@@ -1971,6 +1974,190 @@ static int vega20_power_off_asic(struct pp_hwmgr *hwmgr)
+ return result;
+ }
+
++static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
++{
++ DpmActivityMonitorCoeffInt_t activity_monitor;
++ uint32_t i, size = 0;
++ uint16_t workload_type = 0;
++ static const char *profile_name[] = {
++ "3D_FULL_SCREEN",
++ "POWER_SAVING",
++ "VIDEO",
++ "VR",
++ "COMPUTE",
++ "CUSTOM"};
++ static const char *title[] = {
++ "PROFILE_INDEX(NAME)",
++ "CLOCK_TYPE(NAME)",
++ "FPS",
++ "UseRlcBusy",
++ "MinActiveFreqType",
++ "MinActiveFreq",
++ "BoosterFreqType",
++ "BoosterFreq",
++ "PD_Data_limit_c",
++ "PD_Data_error_coeff",
++ "PD_Data_error_rate_coeff"};
++ int result = 0;
++
++ if (!buf)
++ return -EINVAL;
++
++ size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
++ title[0], title[1], title[2], title[3], title[4], title[5],
++ title[6], title[7], title[8], title[9], title[10]);
++
++ for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
++ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
++ workload_type = i + 1;
++ result = vega20_get_activity_monitor_coeff(hwmgr,
++ (uint8_t *)(&activity_monitor), workload_type);
++ PP_ASSERT_WITH_CODE(!result,
++ "[GetPowerProfile] Failed to get activity monitor!",
++ return result);
++
++ size += sprintf(buf + size, "%2d(%14s%s)\n",
++ i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ");
++
++ size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
++ " ",
++ 0,
++ "GFXCLK",
++ activity_monitor.Gfx_FPS,
++ activity_monitor.Gfx_UseRlcBusy,
++ activity_monitor.Gfx_MinActiveFreqType,
++ activity_monitor.Gfx_MinActiveFreq,
++ activity_monitor.Gfx_BoosterFreqType,
++ activity_monitor.Gfx_BoosterFreq,
++ activity_monitor.Gfx_PD_Data_limit_c,
++ activity_monitor.Gfx_PD_Data_error_coeff,
++ activity_monitor.Gfx_PD_Data_error_rate_coeff);
++
++ size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
++ " ",
++ 1,
++ "SOCCLK",
++ activity_monitor.Soc_FPS,
++ activity_monitor.Soc_UseRlcBusy,
++ activity_monitor.Soc_MinActiveFreqType,
++ activity_monitor.Soc_MinActiveFreq,
++ activity_monitor.Soc_BoosterFreqType,
++ activity_monitor.Soc_BoosterFreq,
++ activity_monitor.Soc_PD_Data_limit_c,
++ activity_monitor.Soc_PD_Data_error_coeff,
++ activity_monitor.Soc_PD_Data_error_rate_coeff);
++
++ size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
++ " ",
++ 2,
++ "UCLK",
++ activity_monitor.Mem_FPS,
++ activity_monitor.Mem_UseRlcBusy,
++ activity_monitor.Mem_MinActiveFreqType,
++ activity_monitor.Mem_MinActiveFreq,
++ activity_monitor.Mem_BoosterFreqType,
++ activity_monitor.Mem_BoosterFreq,
++ activity_monitor.Mem_PD_Data_limit_c,
++ activity_monitor.Mem_PD_Data_error_coeff,
++ activity_monitor.Mem_PD_Data_error_rate_coeff);
++
++ size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
++ " ",
++ 3,
++ "FCLK",
++ activity_monitor.Fclk_FPS,
++ activity_monitor.Fclk_UseRlcBusy,
++ activity_monitor.Fclk_MinActiveFreqType,
++ activity_monitor.Fclk_MinActiveFreq,
++ activity_monitor.Fclk_BoosterFreqType,
++ activity_monitor.Fclk_BoosterFreq,
++ activity_monitor.Fclk_PD_Data_limit_c,
++ activity_monitor.Fclk_PD_Data_error_coeff,
++ activity_monitor.Fclk_PD_Data_error_rate_coeff);
++ }
++
++ return size;
++}
++
++static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
++{
++ DpmActivityMonitorCoeffInt_t activity_monitor;
++ int result = 0;
++
++ hwmgr->power_profile_mode = input[size];
++
++ if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
++ if (size < 10)
++ return -EINVAL;
++
++ result = vega20_get_activity_monitor_coeff(hwmgr,
++ (uint8_t *)(&activity_monitor),
++ WORKLOAD_PPLIB_CUSTOM_BIT);
++ PP_ASSERT_WITH_CODE(!result,
++ "[SetPowerProfile] Failed to get activity monitor!",
++ return result);
++
++ switch (input[0]) {
++ case 0: /* Gfxclk */
++ activity_monitor.Gfx_FPS = input[1];
++ activity_monitor.Gfx_UseRlcBusy = input[2];
++ activity_monitor.Gfx_MinActiveFreqType = input[3];
++ activity_monitor.Gfx_MinActiveFreq = input[4];
++ activity_monitor.Gfx_BoosterFreqType = input[5];
++ activity_monitor.Gfx_BoosterFreq = input[6];
++ activity_monitor.Gfx_PD_Data_limit_c = input[7];
++ activity_monitor.Gfx_PD_Data_error_coeff = input[8];
++ activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
++ break;
++ case 1: /* Socclk */
++ activity_monitor.Soc_FPS = input[1];
++ activity_monitor.Soc_UseRlcBusy = input[2];
++ activity_monitor.Soc_MinActiveFreqType = input[3];
++ activity_monitor.Soc_MinActiveFreq = input[4];
++ activity_monitor.Soc_BoosterFreqType = input[5];
++ activity_monitor.Soc_BoosterFreq = input[6];
++ activity_monitor.Soc_PD_Data_limit_c = input[7];
++ activity_monitor.Soc_PD_Data_error_coeff = input[8];
++ activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
++ break;
++ case 2: /* Uclk */
++ activity_monitor.Mem_FPS = input[1];
++ activity_monitor.Mem_UseRlcBusy = input[2];
++ activity_monitor.Mem_MinActiveFreqType = input[3];
++ activity_monitor.Mem_MinActiveFreq = input[4];
++ activity_monitor.Mem_BoosterFreqType = input[5];
++ activity_monitor.Mem_BoosterFreq = input[6];
++ activity_monitor.Mem_PD_Data_limit_c = input[7];
++ activity_monitor.Mem_PD_Data_error_coeff = input[8];
++ activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
++ break;
++ case 3: /* Fclk */
++ activity_monitor.Fclk_FPS = input[1];
++ activity_monitor.Fclk_UseRlcBusy = input[2];
++ activity_monitor.Fclk_MinActiveFreqType = input[3];
++ activity_monitor.Fclk_MinActiveFreq = input[4];
++ activity_monitor.Fclk_BoosterFreqType = input[5];
++ activity_monitor.Fclk_BoosterFreq = input[6];
++ activity_monitor.Fclk_PD_Data_limit_c = input[7];
++ activity_monitor.Fclk_PD_Data_error_coeff = input[8];
++ activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9];
++ break;
++ }
++
++ result = vega20_set_activity_monitor_coeff(hwmgr,
++ (uint8_t *)(&activity_monitor),
++ WORKLOAD_PPLIB_CUSTOM_BIT);
++ PP_ASSERT_WITH_CODE(!result,
++ "[SetPowerProfile] Failed to set activity monitor!",
++ return result);
++ }
++
++ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
++ 1 << hwmgr->power_profile_mode);
++
++ return 0;
++}
++
+ static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
+ uint32_t virtual_addr_low,
+ uint32_t virtual_addr_hi,
+@@ -2053,6 +2240,10 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
+ /* UMD pstate, profile related */
+ .force_dpm_level =
+ vega20_dpm_force_dpm_level,
++ .get_power_profile_mode =
++ vega20_get_power_profile_mode,
++ .set_power_profile_mode =
++ vega20_set_power_profile_mode,
+ .set_power_limit =
+ vega20_set_power_limit,
+ /* for sysfs to retrive/set gfxclk/memclk */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5130-drm-amd-powerplay-init-vega20-uvd-vce-powergate-stat.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5130-drm-amd-powerplay-init-vega20-uvd-vce-powergate-stat.patch
new file mode 100644
index 00000000..c776755b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5130-drm-amd-powerplay-init-vega20-uvd-vce-powergate-stat.patch
@@ -0,0 +1,54 @@
+From 26ffc5f64ceecc1c00079e626a03917726f54fd3 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Fri, 4 May 2018 15:20:15 +0800
+Subject: [PATCH 5130/5725] drm/amd/powerplay: init vega20 uvd/vce powergate
+ status on dpm setup
+
+This is essentially necessary when uvd/vce dpm is not enabled yet.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index 06471d1..a82a3df 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -932,6 +932,21 @@ static int vega20_init_max_sustainable_clocks(struct pp_hwmgr *hwmgr)
+ return 0;
+ }
+
++static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data =
++ (struct vega20_hwmgr *)(hwmgr->backend);
++
++ data->uvd_power_gated = true;
++ data->vce_power_gated = true;
++
++ if (data->smu_features[GNLD_DPM_UVD].enabled)
++ data->uvd_power_gated = false;
++
++ if (data->smu_features[GNLD_DPM_VCE].enabled)
++ data->vce_power_gated = false;
++}
++
+ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+ {
+ int result = 0;
+@@ -954,6 +969,9 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+ "[EnableDPMTasks] Failed to enable all smu features!",
+ return result);
+
++ /* Initialize UVD/VCE powergating state */
++ vega20_init_powergate_state(hwmgr);
++
+ result = vega20_setup_default_dpm_tables(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "[EnableDPMTasks] Failed to setup default DPM tables!",
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5131-drm-amd-powerplay-correct-force-clock-level-related-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5131-drm-amd-powerplay-correct-force-clock-level-related-.patch
new file mode 100644
index 00000000..18bc7163
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5131-drm-amd-powerplay-correct-force-clock-level-related-.patch
@@ -0,0 +1,234 @@
+From efa23959765f0234527bb15bed34241aeb353800 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Tue, 8 May 2018 18:27:03 +0800
+Subject: [PATCH 5131/5725] drm/amd/powerplay: correct force clock level
+ related settings for vega20 (v2)
+
+1. The min/max level is determined by soft_min_level/soft_max_level.
+2. Vega20 comes with pptable v3 which has no vdd related
+table(vdd_dep_on_socclk, vdd_dep_on_mclk) support.
+3. Vega20 does not support separate fan feature control(enable or
+disable).
+
+v2: squash in fixes:
+- bug fix for force dpm level settings
+- fix wrong data type
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 109 ++++++++++++---------
+ 1 file changed, 64 insertions(+), 45 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index a82a3df..289e3ee 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -1015,7 +1015,7 @@ static uint32_t vega20_find_lowest_dpm_level(
+ static uint32_t vega20_find_highest_dpm_level(
+ struct vega20_single_dpm_table *table)
+ {
+- uint32_t i = 0;
++ int i = 0;
+
+ PP_ASSERT_WITH_CODE(table != NULL,
+ "[FindHighestDPMLevel] DPM Table does not exist!",
+@@ -1409,14 +1409,20 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
+ {
+ struct vega20_hwmgr *data =
+ (struct vega20_hwmgr *)(hwmgr->backend);
++ uint32_t soft_level;
+ int ret = 0;
+
+- data->smc_state_table.gfx_boot_level =
+- data->smc_state_table.gfx_max_level =
+- vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
+- data->smc_state_table.mem_boot_level =
+- data->smc_state_table.mem_max_level =
+- vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
++ soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
++
++ data->dpm_table.gfx_table.dpm_state.soft_min_level =
++ data->dpm_table.gfx_table.dpm_state.soft_max_level =
++ data->dpm_table.gfx_table.dpm_levels[soft_level].value;
++
++ soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
++
++ data->dpm_table.mem_table.dpm_state.soft_min_level =
++ data->dpm_table.mem_table.dpm_state.soft_max_level =
++ data->dpm_table.mem_table.dpm_levels[soft_level].value;
+
+ ret = vega20_upload_dpm_min_level(hwmgr);
+ PP_ASSERT_WITH_CODE(!ret,
+@@ -1435,14 +1441,20 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
+ {
+ struct vega20_hwmgr *data =
+ (struct vega20_hwmgr *)(hwmgr->backend);
++ uint32_t soft_level;
+ int ret = 0;
+
+- data->smc_state_table.gfx_boot_level =
+- data->smc_state_table.gfx_max_level =
+- vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
+- data->smc_state_table.mem_boot_level =
+- data->smc_state_table.mem_max_level =
+- vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
++ soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
++
++ data->dpm_table.gfx_table.dpm_state.soft_min_level =
++ data->dpm_table.gfx_table.dpm_state.soft_max_level =
++ data->dpm_table.gfx_table.dpm_levels[soft_level].value;
++
++ soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
++
++ data->dpm_table.mem_table.dpm_state.soft_min_level =
++ data->dpm_table.mem_table.dpm_state.soft_max_level =
++ data->dpm_table.mem_table.dpm_levels[soft_level].value;
+
+ ret = vega20_upload_dpm_min_level(hwmgr);
+ PP_ASSERT_WITH_CODE(!ret,
+@@ -1475,19 +1487,24 @@ static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
+ return 0;
+ }
+
+-#if 0
+ static int vega20_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
+ uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
+ {
+- struct phm_ppt_v2_information *table_info =
+- (struct phm_ppt_v2_information *)(hwmgr->pptable);
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++ struct vega20_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
++ struct vega20_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
++ struct vega20_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table);
++
++ *sclk_mask = 0;
++ *mclk_mask = 0;
++ *soc_mask = 0;
+
+- if (table_info->vdd_dep_on_sclk->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
+- table_info->vdd_dep_on_socclk->count > VEGA20_UMD_PSTATE_SOCCLK_LEVEL &&
+- table_info->vdd_dep_on_mclk->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) {
++ if (gfx_dpm_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
++ mem_dpm_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL &&
++ soc_dpm_table->count > VEGA20_UMD_PSTATE_SOCCLK_LEVEL) {
+ *sclk_mask = VEGA20_UMD_PSTATE_GFXCLK_LEVEL;
+- *soc_mask = VEGA20_UMD_PSTATE_SOCCLK_LEVEL;
+ *mclk_mask = VEGA20_UMD_PSTATE_MCLK_LEVEL;
++ *soc_mask = VEGA20_UMD_PSTATE_SOCCLK_LEVEL;
+ }
+
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+@@ -1495,24 +1512,30 @@ static int vega20_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ *mclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+- *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
+- *soc_mask = table_info->vdd_dep_on_socclk->count - 1;
+- *mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
++ *sclk_mask = gfx_dpm_table->count - 1;
++ *mclk_mask = mem_dpm_table->count - 1;
++ *soc_mask = soc_dpm_table->count - 1;
+ }
++
+ return 0;
+ }
+-#endif
+
+ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
+ enum pp_clock_type type, uint32_t mask)
+ {
+ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++ uint32_t soft_min_level, soft_max_level;
+ int ret = 0;
+
+ switch (type) {
+ case PP_SCLK:
+- data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
+- data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
++ soft_min_level = mask ? (ffs(mask) - 1) : 0;
++ soft_max_level = mask ? (fls(mask) - 1) : 0;
++
++ data->dpm_table.gfx_table.dpm_state.soft_min_level =
++ data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
++ data->dpm_table.gfx_table.dpm_state.soft_max_level =
++ data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
+
+ ret = vega20_upload_dpm_min_level(hwmgr);
+ PP_ASSERT_WITH_CODE(!ret,
+@@ -1526,8 +1549,13 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
+ break;
+
+ case PP_MCLK:
+- data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
+- data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
++ soft_min_level = mask ? (ffs(mask) - 1) : 0;
++ soft_max_level = mask ? (fls(mask) - 1) : 0;
++
++ data->dpm_table.mem_table.dpm_state.soft_min_level =
++ data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
++ data->dpm_table.mem_table.dpm_state.soft_max_level =
++ data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
+
+ ret = vega20_upload_dpm_min_level(hwmgr);
+ PP_ASSERT_WITH_CODE(!ret,
+@@ -1555,47 +1583,38 @@ static int vega20_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
+ enum amd_dpm_forced_level level)
+ {
+ int ret = 0;
+-#if 0
+- uint32_t sclk_mask = 0;
+- uint32_t mclk_mask = 0;
+- uint32_t soc_mask = 0;
+-#endif
++ uint32_t sclk_mask, mclk_mask, soc_mask;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ ret = vega20_force_dpm_highest(hwmgr);
+ break;
++
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ ret = vega20_force_dpm_lowest(hwmgr);
+ break;
++
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ ret = vega20_unforce_dpm_levels(hwmgr);
+ break;
++
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+-#if 0
+ ret = vega20_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
+ if (ret)
+ return ret;
+- vega20_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
+- vega20_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
+-#endif
++ vega20_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask);
++ vega20_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask);
+ break;
++
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ default:
+ break;
+ }
+-#if 0
+- if (!ret) {
+- if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+- vega20_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
+- else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+- vega20_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
+- }
+-#endif
++
+ return ret;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5132-drm-amd-powerplay-export-vega20-stable-pstate-clocks.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5132-drm-amd-powerplay-export-vega20-stable-pstate-clocks.patch
new file mode 100644
index 00000000..2d8b0db7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5132-drm-amd-powerplay-export-vega20-stable-pstate-clocks.patch
@@ -0,0 +1,62 @@
+From e290a846503b9a28d5cd012c487badb8ea631073 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Tue, 8 May 2018 18:23:16 +0800
+Subject: [PATCH 5132/5725] drm/amd/powerplay: export vega20 stable pstate
+ clocks
+
+Needed for querying the stable pstate clocks.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 26 +++++++++++++++++++++-
+ 1 file changed, 25 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index 289e3ee..7b6e48a 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -846,6 +846,25 @@ static int vega20_odn_initialize_default_settings(
+ return 0;
+ }
+
++static int vega20_populate_umdpstate_clocks(
++ struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++ struct vega20_single_dpm_table *gfx_table = &(data->dpm_table.gfx_table);
++ struct vega20_single_dpm_table *mem_table = &(data->dpm_table.mem_table);
++
++ hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value;
++ hwmgr->pstate_mclk = mem_table->dpm_levels[0].value;
++
++ if (gfx_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
++ mem_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) {
++ hwmgr->pstate_sclk = gfx_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
++ hwmgr->pstate_mclk = mem_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
++ }
++
++ return 0;
++}
++
+ static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr,
+ PP_Clock *clock, PPCLK_e clock_select)
+ {
+@@ -992,7 +1011,12 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+ "[EnableDPMTasks] Failed to initialize odn settings!",
+ return result);
+
+- return result;
++ result = vega20_populate_umdpstate_clocks(hwmgr);
++ PP_ASSERT_WITH_CODE(!result,
++ "[EnableDPMTasks] Failed to populate umdpstate clocks!",
++ return result);
++
++ return 0;
+ }
+
+ static uint32_t vega20_find_lowest_dpm_level(
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5133-drm-amd-powerplay-add-vega20-pre_display_config_chan.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5133-drm-amd-powerplay-add-vega20-pre_display_config_chan.patch
new file mode 100644
index 00000000..df45e4f6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5133-drm-amd-powerplay-add-vega20-pre_display_config_chan.patch
@@ -0,0 +1,83 @@
+From a14d285dd750ef6d28da806486d31454e089c14e Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Wed, 9 May 2018 11:14:06 +0800
+Subject: [PATCH 5133/5725] drm/amd/powerplay: add vega20
+ pre_display_config_changed callback
+
+fix possible handshake hang and video playback crash
+
+Corner cases:
+ - Handshake between SMU and DCE causes hangs when CRTC is not
+ enabled
+ - System crash occurs when starting 4K playback with Movies and TV
+ in an SLS configuration
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 41 ++++++++++++++++++++++
+ 1 file changed, 41 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index 7b6e48a..5b0c654 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -1874,6 +1874,45 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
+ return size;
+ }
+
++static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
++ struct vega20_single_dpm_table *dpm_table)
++{
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++ int ret = 0;
++
++ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
++ PP_ASSERT_WITH_CODE(dpm_table->count > 0,
++ "[SetUclkToHightestDpmLevel] Dpm table has no entry!",
++ return -EINVAL);
++ PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS,
++ "[SetUclkToHightestDpmLevel] Dpm table has too many entries!",
++ return -EINVAL);
++
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_SetHardMinByFreq,
++ (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
++ "[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
++ return ret);
++ }
++
++ return ret;
++}
++
++static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++ int ret = 0;
++
++ smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_NumOfDisplays, 0);
++
++ ret = vega20_set_uclk_to_highest_dpm_level(hwmgr,
++ &data->dpm_table.mem_table);
++
++ return ret;
++}
++
+ static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+ {
+ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
+@@ -2277,6 +2316,8 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
+ /* power state related */
+ .apply_clocks_adjust_rules =
+ vega20_apply_clocks_adjust_rules,
++ .pre_display_config_changed =
++ vega20_pre_display_configuration_changed_task,
+ .display_config_changed =
+ vega20_display_configuration_changed_task,
+ .check_smc_update_required_for_display_configuration =
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5134-drm-amd-powerplay-conv-the-vega20-pstate-sclk-mclk-i.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5134-drm-amd-powerplay-conv-the-vega20-pstate-sclk-mclk-i.patch
new file mode 100644
index 00000000..6ba975cf
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5134-drm-amd-powerplay-conv-the-vega20-pstate-sclk-mclk-i.patch
@@ -0,0 +1,32 @@
+From ef8e712bbda26e9239167f16d210ccbbaa3e82bf Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Fri, 11 May 2018 16:10:51 +0800
+Subject: [PATCH 5134/5725] drm/amd/powerplay: conv the vega20 pstate sclk/mclk
+ into necessary 10KHz unit
+
+Powerplay uses 10KHz units.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index 5b0c654..182f25c 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -862,6 +862,9 @@ static int vega20_populate_umdpstate_clocks(
+ hwmgr->pstate_mclk = mem_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
+ }
+
++ hwmgr->pstate_sclk = hwmgr->pstate_sclk * 100;
++ hwmgr->pstate_mclk = hwmgr->pstate_mclk * 100;
++
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5135-drm-amd-powerplay-initialize-vega20-overdrive-settin.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5135-drm-amd-powerplay-initialize-vega20-overdrive-settin.patch
new file mode 100644
index 00000000..1c6a8e8d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5135-drm-amd-powerplay-initialize-vega20-overdrive-settin.patch
@@ -0,0 +1,635 @@
+From 00267b5af20fbf9d573ebac9f6d9943f86909487 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 21 May 2018 10:16:41 +0800
+Subject: [PATCH 5135/5725] drm/amd/powerplay: initialize vega20 overdrive
+ settings
+
+The initialized overdrive settings are taken from vbios and SMU(
+by PPSMC_MSG_TransferTableSmu2Dram).
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 293 +++++++++++++++++++--
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h | 53 +++-
+ .../amd/powerplay/hwmgr/vega20_processpptables.c | 103 ++++++--
+ .../gpu/drm/amd/powerplay/inc/hardwaremanager.h | 2 +
+ drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 1 +
+ 5 files changed, 403 insertions(+), 49 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index 182f25c..51bc05d 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -103,7 +103,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
+ data->registry_data.quick_transition_support = 0;
+ data->registry_data.zrpm_start_temp = 0xffff;
+ data->registry_data.zrpm_stop_temp = 0xffff;
+- data->registry_data.odn_feature_enable = 1;
++ data->registry_data.od8_feature_enable = 1;
+ data->registry_data.disable_water_mark = 0;
+ data->registry_data.disable_pp_tuning = 0;
+ data->registry_data.disable_xlpp_tuning = 0;
+@@ -150,15 +150,9 @@ static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UnTabledHardwareInterface);
+
+- if (data->registry_data.odn_feature_enable)
++ if (data->registry_data.od8_feature_enable)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+- PHM_PlatformCaps_ODNinACSupport);
+- else {
+- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+- PHM_PlatformCaps_OD6inACSupport);
+- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+- PHM_PlatformCaps_OD6PlusinACSupport);
+- }
++ PHM_PlatformCaps_OD8inACSupport);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ActivityReporting);
+@@ -166,15 +160,9 @@ static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
+ PHM_PlatformCaps_FanSpeedInTableIsRPM);
+
+ if (data->registry_data.od_state_in_dc_support) {
+- if (data->registry_data.odn_feature_enable)
++ if (data->registry_data.od8_feature_enable)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+- PHM_PlatformCaps_ODNinDCSupport);
+- else {
+- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+- PHM_PlatformCaps_OD6inDCSupport);
+- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+- PHM_PlatformCaps_OD6PlusinDCSupport);
+- }
++ PHM_PlatformCaps_OD8inDCSupport);
+ }
+
+ if (data->registry_data.thermal_support &&
+@@ -840,9 +828,276 @@ static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
+ return 0;
+ }
+
+-static int vega20_odn_initialize_default_settings(
++static int vega20_od8_set_feature_capabilities(
++ struct pp_hwmgr *hwmgr)
++{
++ struct phm_ppt_v3_information *pptable_information =
++ (struct phm_ppt_v3_information *)hwmgr->pptable;
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++ struct vega20_od8_settings *od_settings = &(data->od8_settings);
++
++ od_settings->overdrive8_capabilities = 0;
++
++ if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
++ if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_GFXCLKFMAX] > 0 &&
++ pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_GFXCLKFMAX] > 0 &&
++ pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_GFXCLKFMIN] > 0 &&
++ pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_GFXCLKFMIN] > 0)
++ od_settings->overdrive8_capabilities |= OD8_GFXCLK_LIMITS;
++
++ if (pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P1] > 0 &&
++ pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P2] > 0 &&
++ pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P3] > 0 &&
++ pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P1] > 0 &&
++ pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P2] > 0 &&
++ pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P3] > 0 &&
++ pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P1] > 0 &&
++ pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P2] > 0 &&
++ pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P3] > 0 &&
++ pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P1] > 0 &&
++ pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P2] > 0 &&
++ pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P3] > 0)
++ od_settings->overdrive8_capabilities |= OD8_GFXCLK_CURVE;
++ }
++
++ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
++ if (pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_UCLKFMAX] > 0 &&
++ pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_UCLKFMAX] > 0)
++ od_settings->overdrive8_capabilities |= OD8_UCLK_MAX;
++ }
++
++ if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE] > 0 &&
++ pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE] <= 100)
++ od_settings->overdrive8_capabilities |= OD8_POWER_LIMIT;
++
++ if (data->smu_features[GNLD_FAN_CONTROL].enabled) {
++ if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_FANRPMMIN] > 0)
++ od_settings->overdrive8_capabilities |= OD8_FAN_SPEED_MIN;
++
++ if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_FANRPMACOUSTICLIMIT] > 0)
++ od_settings->overdrive8_capabilities |= OD8_ACOUSTIC_LIMIT_SCLK;
++ }
++
++ if (data->smu_features[GNLD_THERMAL].enabled) {
++ if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_FANTARGETTEMPERATURE] > 0)
++ od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_FAN;
++
++ if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_OPERATINGTEMPMAX] > 0)
++ od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_SYSTEM;
++ }
++
++ return 0;
++}
++
++static int vega20_od8_set_feature_id(
++ struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++ struct vega20_od8_settings *od_settings = &(data->od8_settings);
++
++ if (od_settings->overdrive8_capabilities & OD8_GFXCLK_LIMITS) {
++ od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id =
++ OD8_GFXCLK_LIMITS;
++ od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id =
++ OD8_GFXCLK_LIMITS;
++ } else {
++ od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id =
++ 0;
++ od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id =
++ 0;
++ }
++
++ if (od_settings->overdrive8_capabilities & OD8_GFXCLK_CURVE) {
++ od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id =
++ OD8_GFXCLK_CURVE;
++ od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id =
++ OD8_GFXCLK_CURVE;
++ od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id =
++ OD8_GFXCLK_CURVE;
++ od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id =
++ OD8_GFXCLK_CURVE;
++ od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id =
++ OD8_GFXCLK_CURVE;
++ od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id =
++ OD8_GFXCLK_CURVE;
++ } else {
++ od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id =
++ 0;
++ od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id =
++ 0;
++ od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id =
++ 0;
++ od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id =
++ 0;
++ od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id =
++ 0;
++ od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id =
++ 0;
++ }
++
++ if (od_settings->overdrive8_capabilities & OD8_UCLK_MAX)
++ od_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id = OD8_UCLK_MAX;
++ else
++ od_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id = 0;
++
++ if (od_settings->overdrive8_capabilities & OD8_POWER_LIMIT)
++ od_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id = OD8_POWER_LIMIT;
++ else
++ od_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id = 0;
++
++ if (od_settings->overdrive8_capabilities & OD8_ACOUSTIC_LIMIT_SCLK)
++ od_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id =
++ OD8_ACOUSTIC_LIMIT_SCLK;
++ else
++ od_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id =
++ 0;
++
++ if (od_settings->overdrive8_capabilities & OD8_FAN_SPEED_MIN)
++ od_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id =
++ OD8_FAN_SPEED_MIN;
++ else
++ od_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id =
++ 0;
++
++ if (od_settings->overdrive8_capabilities & OD8_TEMPERATURE_FAN)
++ od_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id =
++ OD8_TEMPERATURE_FAN;
++ else
++ od_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id =
++ 0;
++
++ if (od_settings->overdrive8_capabilities & OD8_TEMPERATURE_SYSTEM)
++ od_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id =
++ OD8_TEMPERATURE_SYSTEM;
++ else
++ od_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id =
++ 0;
++
++ return 0;
++}
++
++static int vega20_od8_initialize_default_settings(
+ struct pp_hwmgr *hwmgr)
+ {
++ struct phm_ppt_v3_information *pptable_information =
++ (struct phm_ppt_v3_information *)hwmgr->pptable;
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++ struct vega20_od8_settings *od8_settings = &(data->od8_settings);
++ OverDriveTable_t *od_table = &(data->smc_state_table.overdrive_table);
++ int i, ret = 0;
++
++ /* Set Feature Capabilities */
++ vega20_od8_set_feature_capabilities(hwmgr);
++
++ /* Map FeatureID to individual settings */
++ vega20_od8_set_feature_id(hwmgr);
++
++ /* Set default values */
++ ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Failed to export over drive table!",
++ return ret);
++
++ if (od8_settings->overdrive8_capabilities & OD8_GFXCLK_LIMITS) {
++ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value =
++ od_table->GfxclkFmin;
++ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value =
++ od_table->GfxclkFmax;
++ } else {
++ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value =
++ 0;
++ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value =
++ 0;
++ }
++
++ if (od8_settings->overdrive8_capabilities & OD8_GFXCLK_CURVE) {
++ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value =
++ od_table->GfxclkFreq1;
++ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value =
++ od_table->GfxclkOffsetVolt1;
++ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value =
++ od_table->GfxclkFreq2;
++ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value =
++ od_table->GfxclkOffsetVolt2;
++ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value =
++ od_table->GfxclkFreq3;
++ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value =
++ od_table->GfxclkOffsetVolt3;
++ } else {
++ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value =
++ 0;
++ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value =
++ 0;
++ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value =
++ 0;
++ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value =
++ 0;
++ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value =
++ 0;
++ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value =
++ 0;
++ }
++
++ if (od8_settings->overdrive8_capabilities & OD8_UCLK_MAX)
++ od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value =
++ od_table->UclkFmax;
++ else
++ od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value =
++ 0;
++
++ if (od8_settings->overdrive8_capabilities & OD8_POWER_LIMIT)
++ od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value =
++ od_table->OverDrivePct;
++ else
++ od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value =
++ 0;
++
++ if (od8_settings->overdrive8_capabilities & OD8_ACOUSTIC_LIMIT_SCLK)
++ od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value =
++ od_table->FanMaximumRpm;
++ else
++ od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value =
++ 0;
++
++ if (od8_settings->overdrive8_capabilities & OD8_FAN_SPEED_MIN)
++ od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value =
++ od_table->FanMinimumPwm;
++ else
++ od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value =
++ 0;
++
++ if (od8_settings->overdrive8_capabilities & OD8_TEMPERATURE_FAN)
++ od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value =
++ od_table->FanTargetTemperature;
++ else
++ od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value =
++ 0;
++
++ if (od8_settings->overdrive8_capabilities & OD8_TEMPERATURE_SYSTEM)
++ od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value =
++ od_table->MaxOpTemp;
++ else
++ od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value =
++ 0;
++
++ for (i = 0; i < OD8_SETTING_COUNT; i++) {
++ if (od8_settings->od8_settings_array[i].feature_id) {
++ od8_settings->od8_settings_array[i].min_value =
++ pptable_information->od_settings_min[i];
++ od8_settings->od8_settings_array[i].max_value =
++ pptable_information->od_settings_max[i];
++ od8_settings->od8_settings_array[i].current_value =
++ od8_settings->od8_settings_array[i].default_value;
++ } else {
++ od8_settings->od8_settings_array[i].min_value =
++ 0;
++ od8_settings->od8_settings_array[i].max_value =
++ 0;
++ od8_settings->od8_settings_array[i].current_value =
++ 0;
++ }
++ }
++
+ return 0;
+ }
+
+@@ -1009,7 +1264,7 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+ "[EnableDPMTasks] Failed to power control set level!",
+ return result);
+
+- result = vega20_odn_initialize_default_settings(hwmgr);
++ result = vega20_od8_initialize_default_settings(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "[EnableDPMTasks] Failed to initialize odn settings!",
+ return result);
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+index 59a59bc..130052a 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+@@ -306,7 +306,7 @@ struct vega20_registry_data {
+ uint8_t led_dpm_enabled;
+ uint8_t fan_control_support;
+ uint8_t ulv_support;
+- uint8_t odn_feature_enable;
++ uint8_t od8_feature_enable;
+ uint8_t disable_water_mark;
+ uint8_t disable_workload_policy;
+ uint32_t force_workload_policy_mask;
+@@ -377,6 +377,54 @@ struct vega20_odn_data {
+ struct vega20_odn_temp_table odn_temp_table;
+ };
+
++enum OD8_FEATURE_ID
++{
++ OD8_GFXCLK_LIMITS = 1 << 0,
++ OD8_GFXCLK_CURVE = 1 << 1,
++ OD8_UCLK_MAX = 1 << 2,
++ OD8_POWER_LIMIT = 1 << 3,
++ OD8_ACOUSTIC_LIMIT_SCLK = 1 << 4, //FanMaximumRpm
++ OD8_FAN_SPEED_MIN = 1 << 5, //FanMinimumPwm
++ OD8_TEMPERATURE_FAN = 1 << 6, //FanTargetTemperature
++ OD8_TEMPERATURE_SYSTEM = 1 << 7, //MaxOpTemp
++ OD8_MEMORY_TIMING_TUNE = 1 << 8,
++ OD8_FAN_ZERO_RPM_CONTROL = 1 << 9
++};
++
++enum OD8_SETTING_ID
++{
++ OD8_SETTING_GFXCLK_FMIN = 0,
++ OD8_SETTING_GFXCLK_FMAX,
++ OD8_SETTING_GFXCLK_FREQ1,
++ OD8_SETTING_GFXCLK_VOLTAGE1,
++ OD8_SETTING_GFXCLK_FREQ2,
++ OD8_SETTING_GFXCLK_VOLTAGE2,
++ OD8_SETTING_GFXCLK_FREQ3,
++ OD8_SETTING_GFXCLK_VOLTAGE3,
++ OD8_SETTING_UCLK_FMAX,
++ OD8_SETTING_POWER_PERCENTAGE,
++ OD8_SETTING_FAN_ACOUSTIC_LIMIT,
++ OD8_SETTING_FAN_MIN_SPEED,
++ OD8_SETTING_FAN_TARGET_TEMP,
++ OD8_SETTING_OPERATING_TEMP_MAX,
++ OD8_SETTING_AC_TIMING,
++ OD8_SETTING_FAN_ZERO_RPM_CONTROL,
++ OD8_SETTING_COUNT
++};
++
++struct vega20_od8_single_setting {
++ uint32_t feature_id;
++ int32_t min_value;
++ int32_t max_value;
++ int32_t current_value;
++ int32_t default_value;
++};
++
++struct vega20_od8_settings {
++ uint32_t overdrive8_capabilities;
++ struct vega20_od8_single_setting od8_settings_array[OD8_SETTING_COUNT];
++};
++
+ struct vega20_hwmgr {
+ struct vega20_dpm_table dpm_table;
+ struct vega20_dpm_table golden_dpm_table;
+@@ -452,6 +500,9 @@ struct vega20_hwmgr {
+ /* ---- Overdrive next setting ---- */
+ struct vega20_odn_data odn_data;
+
++ /* ---- Overdrive8 Setting ---- */
++ struct vega20_od8_settings od8_settings;
++
+ /* ---- Workload Mask ---- */
+ uint32_t workload_mask;
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+index 379ac3d..32d24a4 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+@@ -664,18 +664,18 @@ static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps)
+ static int copy_clock_limits_array(
+ struct pp_hwmgr *hwmgr,
+ uint32_t **pptable_info_array,
+- const uint32_t *pptable_array)
++ const uint32_t *pptable_array,
++ uint32_t power_saving_clock_count)
+ {
+ uint32_t array_size, i;
+ uint32_t *table;
+
+- array_size = sizeof(uint32_t) * ATOM_VEGA20_PPCLOCK_COUNT;
+-
++ array_size = sizeof(uint32_t) * power_saving_clock_count;
+ table = kzalloc(array_size, GFP_KERNEL);
+ if (NULL == table)
+ return -ENOMEM;
+
+- for (i = 0; i < ATOM_VEGA20_PPCLOCK_COUNT; i++)
++ for (i = 0; i < power_saving_clock_count; i++)
+ table[i] = pptable_array[i];
+
+ *pptable_info_array = table;
+@@ -686,22 +686,52 @@ static int copy_clock_limits_array(
+ static int copy_overdrive_settings_limits_array(
+ struct pp_hwmgr *hwmgr,
+ uint32_t **pptable_info_array,
+- const uint32_t *pptable_array)
++ const uint32_t *pptable_array,
++ uint32_t od_setting_count)
+ {
+ uint32_t array_size, i;
+ uint32_t *table;
+
+- array_size = sizeof(uint32_t) * ATOM_VEGA20_ODSETTING_COUNT;
++ array_size = sizeof(uint32_t) * od_setting_count;
++ table = kzalloc(array_size, GFP_KERNEL);
++ if (NULL == table)
++ return -ENOMEM;
++
++ for (i = 0; i < od_setting_count; i++)
++ table[i] = pptable_array[i];
++
++ *pptable_info_array = table;
++
++ return 0;
++}
++
++static int copy_overdrive_feature_capabilities_array(
++ struct pp_hwmgr *hwmgr,
++ uint8_t **pptable_info_array,
++ const uint8_t *pptable_array,
++ uint8_t od_feature_count)
++{
++ uint32_t array_size, i;
++ uint8_t *table;
++ bool od_supported = false;
+
++ array_size = sizeof(uint8_t) * od_feature_count;
+ table = kzalloc(array_size, GFP_KERNEL);
+ if (NULL == table)
+ return -ENOMEM;
+
+- for (i = 0; i < ATOM_VEGA20_ODSETTING_COUNT; i++)
++ for (i = 0; i < od_feature_count; i++) {
+ table[i] = pptable_array[i];
++ if (table[i])
++ od_supported = true;
++ }
+
+ *pptable_info_array = table;
+
++ if (od_supported)
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++ PHM_PlatformCaps_ACOverdriveSupport);
++
+ return 0;
+ }
+
+@@ -799,6 +829,7 @@ static int init_powerplay_table_information(
+ struct phm_ppt_v3_information *pptable_information =
+ (struct phm_ppt_v3_information *)hwmgr->pptable;
+ uint32_t disable_power_control = 0;
++ uint32_t od_feature_count, od_setting_count, power_saving_clock_count;
+ int result;
+
+ hwmgr->thermal_controller.ucType = powerplay_table->ucThermalControllerType;
+@@ -810,22 +841,25 @@ static int init_powerplay_table_information(
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+
+- if (powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_GFXCLKFMAX] > VEGA20_ENGINECLOCK_HARDMAX)
+- hwmgr->platform_descriptor.overdriveLimit.engineClock = VEGA20_ENGINECLOCK_HARDMAX;
+- else
+- hwmgr->platform_descriptor.overdriveLimit.engineClock = powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_GFXCLKFMAX];
+- hwmgr->platform_descriptor.overdriveLimit.memoryClock = powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_UCLKFMAX];
+-
+- copy_overdrive_settings_limits_array(hwmgr, &pptable_information->od_settings_max, powerplay_table->OverDrive8Table.ODSettingsMax);
+- copy_overdrive_settings_limits_array(hwmgr, &pptable_information->od_settings_min, powerplay_table->OverDrive8Table.ODSettingsMin);
+-
+- /* hwmgr->platformDescriptor.minOverdriveVDDC = 0;
+- hwmgr->platformDescriptor.maxOverdriveVDDC = 0;
+- hwmgr->platformDescriptor.overdriveVDDCStep = 0; */
+-
+- if (hwmgr->platform_descriptor.overdriveLimit.engineClock > 0
+- && hwmgr->platform_descriptor.overdriveLimit.memoryClock > 0)
+- phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ACOverdriveSupport);
++ if (powerplay_table->OverDrive8Table.ucODTableRevision == 1) {
++ od_feature_count = (powerplay_table->OverDrive8Table.ODFeatureCount > ATOM_VEGA20_ODFEATURE_COUNT) ?
++ ATOM_VEGA20_ODFEATURE_COUNT : powerplay_table->OverDrive8Table.ODFeatureCount;
++ od_setting_count = (powerplay_table->OverDrive8Table.ODSettingCount > ATOM_VEGA20_ODSETTING_COUNT) ?
++ ATOM_VEGA20_ODSETTING_COUNT : powerplay_table->OverDrive8Table.ODSettingCount;
++
++ copy_overdrive_feature_capabilities_array(hwmgr,
++ &pptable_information->od_feature_capabilities,
++ powerplay_table->OverDrive8Table.ODFeatureCapabilities,
++ od_feature_count);
++ copy_overdrive_settings_limits_array(hwmgr,
++ &pptable_information->od_settings_max,
++ powerplay_table->OverDrive8Table.ODSettingsMax,
++ od_setting_count);
++ copy_overdrive_settings_limits_array(hwmgr,
++ &pptable_information->od_settings_min,
++ powerplay_table->OverDrive8Table.ODSettingsMin,
++ od_setting_count);
++ }
+
+ pptable_information->us_small_power_limit1 = powerplay_table->usSmallPowerLimit1;
+ pptable_information->us_small_power_limit2 = powerplay_table->usSmallPowerLimit2;
+@@ -838,15 +872,23 @@ static int init_powerplay_table_information(
+ hwmgr->platform_descriptor.TDPODLimit = (uint16_t)powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE];
+
+ disable_power_control = 0;
+- if (!disable_power_control && hwmgr->platform_descriptor.TDPODLimit) {
++ if (!disable_power_control && hwmgr->platform_descriptor.TDPODLimit)
+ /* enable TDP overdrive (PowerControl) feature as well if supported */
+- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+- PHM_PlatformCaps_PowerControl);
++ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerControl);
++
++ if (powerplay_table->PowerSavingClockTable.ucTableRevision == 1) {
++ power_saving_clock_count = (powerplay_table->PowerSavingClockTable.PowerSavingClockCount >= ATOM_VEGA20_PPCLOCK_COUNT) ?
++ ATOM_VEGA20_PPCLOCK_COUNT : powerplay_table->PowerSavingClockTable.PowerSavingClockCount;
++ copy_clock_limits_array(hwmgr,
++ &pptable_information->power_saving_clock_max,
++ powerplay_table->PowerSavingClockTable.PowerSavingClockMax,
++ power_saving_clock_count);
++ copy_clock_limits_array(hwmgr,
++ &pptable_information->power_saving_clock_min,
++ powerplay_table->PowerSavingClockTable.PowerSavingClockMin,
++ power_saving_clock_count);
+ }
+
+- copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_max, powerplay_table->PowerSavingClockTable.PowerSavingClockMax);
+- copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_min, powerplay_table->PowerSavingClockTable.PowerSavingClockMin);
+-
+ pptable_information->smc_pptable = (PPTable_t *)kmalloc(sizeof(PPTable_t), GFP_KERNEL);
+ if (pptable_information->smc_pptable == NULL)
+ return -ENOMEM;
+@@ -898,6 +940,9 @@ static int vega20_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
+ kfree(pp_table_info->power_saving_clock_min);
+ pp_table_info->power_saving_clock_min = NULL;
+
++ kfree(pp_table_info->od_feature_capabilities);
++ pp_table_info->od_feature_capabilities = NULL;
++
+ kfree(pp_table_info->od_settings_max);
+ pp_table_info->od_settings_max = NULL;
+
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+index 429c9c4..54fd012 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+@@ -232,6 +232,8 @@ enum phm_platform_caps {
+ PHM_PlatformCaps_UVDClientMCTuning,
+ PHM_PlatformCaps_ODNinACSupport,
+ PHM_PlatformCaps_ODNinDCSupport,
++ PHM_PlatformCaps_OD8inACSupport,
++ PHM_PlatformCaps_OD8inDCSupport,
+ PHM_PlatformCaps_UMDPState,
+ PHM_PlatformCaps_AutoWattmanSupport,
+ PHM_PlatformCaps_AutoWattmanEnable_CCCState,
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+index 7e58a0d..e0cb7d0 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+@@ -584,6 +584,7 @@ struct phm_ppt_v3_information
+ uint32_t *power_saving_clock_max;
+ uint32_t *power_saving_clock_min;
+
++ uint8_t *od_feature_capabilities;
+ uint32_t *od_settings_max;
+ uint32_t *od_settings_min;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5136-drm-amd-powerplay-new-interfaces-for-overdrive-vega2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5136-drm-amd-powerplay-new-interfaces-for-overdrive-vega2.patch
new file mode 100644
index 00000000..69be14a9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5136-drm-amd-powerplay-new-interfaces-for-overdrive-vega2.patch
@@ -0,0 +1,207 @@
+From 688a7fc74cf16e031991d912e7e2b329dc6832d5 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 21 May 2018 10:19:06 +0800
+Subject: [PATCH 5136/5725] drm/amd/powerplay: new interfaces for overdrive
+ vega20 sclk and mclk
+
+Add support for the new SMU firmware interface for clock adjustment.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 169 +++++++++++++++++++++
+ 1 file changed, 169 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index 51bc05d..1e9426f 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -1101,6 +1101,166 @@ static int vega20_od8_initialize_default_settings(
+ return 0;
+ }
+
++static int vega20_od8_set_settings(
++ struct pp_hwmgr *hwmgr,
++ uint32_t index,
++ uint32_t value)
++{
++ OverDriveTable_t od_table;
++ int ret = 0;
++
++ ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Failed to export over drive table!",
++ return ret);
++
++ switch(index) {
++ case OD8_SETTING_GFXCLK_FMIN:
++ od_table.GfxclkFmin = (uint16_t)value;
++ break;
++ case OD8_SETTING_GFXCLK_FMAX:
++ od_table.GfxclkFmax = (uint16_t)value;
++ break;
++ case OD8_SETTING_GFXCLK_FREQ1:
++ od_table.GfxclkFreq1 = (uint16_t)value;
++ break;
++ case OD8_SETTING_GFXCLK_VOLTAGE1:
++ od_table.GfxclkOffsetVolt1 = (uint16_t)value;
++ break;
++ case OD8_SETTING_GFXCLK_FREQ2:
++ od_table.GfxclkFreq2 = (uint16_t)value;
++ break;
++ case OD8_SETTING_GFXCLK_VOLTAGE2:
++ od_table.GfxclkOffsetVolt2 = (uint16_t)value;
++ break;
++ case OD8_SETTING_GFXCLK_FREQ3:
++ od_table.GfxclkFreq3 = (uint16_t)value;
++ break;
++ case OD8_SETTING_GFXCLK_VOLTAGE3:
++ od_table.GfxclkOffsetVolt3 = (uint16_t)value;
++ break;
++ case OD8_SETTING_UCLK_FMAX:
++ od_table.UclkFmax = (uint16_t)value;
++ break;
++ case OD8_SETTING_POWER_PERCENTAGE:
++ od_table.OverDrivePct = (int16_t)value;
++ break;
++ case OD8_SETTING_FAN_ACOUSTIC_LIMIT:
++ od_table.FanMaximumRpm = (uint16_t)value;
++ break;
++ case OD8_SETTING_FAN_MIN_SPEED:
++ od_table.FanMinimumPwm = (uint16_t)value;
++ break;
++ case OD8_SETTING_FAN_TARGET_TEMP:
++ od_table.FanTargetTemperature = (uint16_t)value;
++ break;
++ case OD8_SETTING_OPERATING_TEMP_MAX:
++ od_table.MaxOpTemp = (uint16_t)value;
++ break;
++ }
++
++ ret = vega20_copy_table_to_smc(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Failed to import over drive table!",
++ return ret);
++
++ return 0;
++}
++
++static int vega20_get_sclk_od(
++ struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data = hwmgr->backend;
++ struct vega20_single_dpm_table *sclk_table =
++ &(data->dpm_table.gfx_table);
++ struct vega20_single_dpm_table *golden_sclk_table =
++ &(data->golden_dpm_table.gfx_table);
++ int value;
++
++ /* od percentage */
++ value = DIV_ROUND_UP((sclk_table->dpm_levels[sclk_table->count - 1].value -
++ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * 100,
++ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value);
++
++ return value;
++}
++
++static int vega20_set_sclk_od(
++ struct pp_hwmgr *hwmgr, uint32_t value)
++{
++ struct vega20_hwmgr *data = hwmgr->backend;
++ struct vega20_single_dpm_table *sclk_table =
++ &(data->dpm_table.gfx_table);
++ struct vega20_single_dpm_table *golden_sclk_table =
++ &(data->golden_dpm_table.gfx_table);
++ uint32_t od_sclk;
++ int ret = 0;
++
++ od_sclk = golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * value;
++ do_div(od_sclk, 100);
++ od_sclk += golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
++
++ ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_GFXCLK_FMAX, od_sclk);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetSclkOD] failed to set od gfxclk!",
++ return ret);
++
++ /* refresh gfxclk table */
++ ret = vega20_setup_single_dpm_table(hwmgr, sclk_table, PPCLK_GFXCLK);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetSclkOD] failed to refresh gfxclk table!",
++ return ret);
++
++ return 0;
++}
++
++static int vega20_get_mclk_od(
++ struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data = hwmgr->backend;
++ struct vega20_single_dpm_table *mclk_table =
++ &(data->dpm_table.mem_table);
++ struct vega20_single_dpm_table *golden_mclk_table =
++ &(data->golden_dpm_table.mem_table);
++ int value;
++
++ /* od percentage */
++ value = DIV_ROUND_UP((mclk_table->dpm_levels[mclk_table->count - 1].value -
++ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * 100,
++ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value);
++
++ return value;
++}
++
++static int vega20_set_mclk_od(
++ struct pp_hwmgr *hwmgr, uint32_t value)
++{
++ struct vega20_hwmgr *data = hwmgr->backend;
++ struct vega20_single_dpm_table *mclk_table =
++ &(data->dpm_table.mem_table);
++ struct vega20_single_dpm_table *golden_mclk_table =
++ &(data->golden_dpm_table.mem_table);
++ uint32_t od_mclk;
++ int ret = 0;
++
++ od_mclk = golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * value;
++ do_div(od_mclk, 100);
++ od_mclk += golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
++
++ ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_UCLK_FMAX, od_mclk);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetMclkOD] failed to set od memclk!",
++ return ret);
++
++ /* refresh memclk table */
++ ret = vega20_setup_single_dpm_table(hwmgr, mclk_table, PPCLK_UCLK);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetMclkOD] failed to refresh memclk table!",
++ return ret);
++
++ return 0;
++}
++
+ static int vega20_populate_umdpstate_clocks(
+ struct pp_hwmgr *hwmgr)
+ {
+@@ -2604,8 +2764,17 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
+ vega20_get_power_profile_mode,
+ .set_power_profile_mode =
+ vega20_set_power_profile_mode,
++ /* od related */
+ .set_power_limit =
+ vega20_set_power_limit,
++ .get_sclk_od =
++ vega20_get_sclk_od,
++ .set_sclk_od =
++ vega20_set_sclk_od,
++ .get_mclk_od =
++ vega20_get_mclk_od,
++ .set_mclk_od =
++ vega20_set_mclk_od,
+ /* for sysfs to retrive/set gfxclk/memclk */
+ .force_clock_level =
+ vega20_force_clock_level,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5137-drm-amd-powerplay-revise-vega20-PPSMC_MSG_SetSoftMin.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5137-drm-amd-powerplay-revise-vega20-PPSMC_MSG_SetSoftMin.patch
new file mode 100644
index 00000000..ccfc66ff
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5137-drm-amd-powerplay-revise-vega20-PPSMC_MSG_SetSoftMin.patch
@@ -0,0 +1,175 @@
+From 36156ef0d0f81b5b822351c1b9cd5db79e73c1e3 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 21 May 2018 10:24:57 +0800
+Subject: [PATCH 5137/5725] drm/amd/powerplay: revise vega20
+ PPSMC_MSG_SetSoftMin/[Max]ByFreq settings
+
+UVD, VCE and Socclk also need to be taken into consideration when
+setting PPSMC_MSG_SetSoftMinByFreq and PPSMC_MSG_SetSoftMaxByFreq.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 109 ++++++++++++++++++---
+ 1 file changed, 96 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index 1e9426f..3f769f3 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -1485,31 +1485,72 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
+ {
+ struct vega20_hwmgr *data =
+ (struct vega20_hwmgr *)(hwmgr->backend);
++ uint32_t min_freq;
+ int ret = 0;
+
+- if (data->smu_features[GNLD_DPM_GFXCLK].enabled)
++ if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
++ min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+- PPCLK_GFXCLK << 16 |
+- data->dpm_table.gfx_table.dpm_state.soft_min_level)),
++ (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set soft min gfxclk !",
+ return ret);
++ }
+
+ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
++ min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+- PPCLK_UCLK << 16 |
+- data->dpm_table.mem_table.dpm_state.soft_min_level)),
++ (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set soft min memclk !",
+ return ret);
++
++ min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetHardMinByFreq,
+- PPCLK_UCLK << 16 |
+- data->dpm_table.mem_table.dpm_state.hard_min_level)),
++ (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set hard min memclk !",
+ return ret);
+ }
+
++ if (data->smu_features[GNLD_DPM_UVD].enabled) {
++ min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
++
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
++ (PPCLK_VCLK << 16) | (min_freq & 0xffff))),
++ "Failed to set soft min vclk!",
++ return ret);
++
++ min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level;
++
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
++ (PPCLK_DCLK << 16) | (min_freq & 0xffff))),
++ "Failed to set soft min dclk!",
++ return ret);
++ }
++
++ if (data->smu_features[GNLD_DPM_VCE].enabled) {
++ min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
++
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
++ (PPCLK_ECLK << 16) | (min_freq & 0xffff))),
++ "Failed to set soft min eclk!",
++ return ret);
++ }
++
++ if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
++ min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
++
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
++ (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))),
++ "Failed to set soft min socclk!",
++ return ret);
++ }
++
+ return ret;
+ }
+
+@@ -1517,23 +1558,65 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
+ {
+ struct vega20_hwmgr *data =
+ (struct vega20_hwmgr *)(hwmgr->backend);
++ uint32_t max_freq;
+ int ret = 0;
+
+- if (data->smu_features[GNLD_DPM_GFXCLK].enabled)
++ if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
++ max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
++
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+- PPCLK_GFXCLK << 16 |
+- data->dpm_table.gfx_table.dpm_state.soft_max_level)),
++ (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))),
+ "Failed to set soft max gfxclk!",
+ return ret);
++ }
++
++ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
++ max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
+
+- if (data->smu_features[GNLD_DPM_UCLK].enabled)
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+- PPCLK_UCLK << 16 |
+- data->dpm_table.mem_table.dpm_state.soft_max_level)),
++ (PPCLK_UCLK << 16) | (max_freq & 0xffff))),
+ "Failed to set soft max memclk!",
+ return ret);
++ }
++
++ if (data->smu_features[GNLD_DPM_UVD].enabled) {
++ max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
++
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
++ (PPCLK_VCLK << 16) | (max_freq & 0xffff))),
++ "Failed to set soft max vclk!",
++ return ret);
++
++ max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
++ (PPCLK_DCLK << 16) | (max_freq & 0xffff))),
++ "Failed to set soft max dclk!",
++ return ret);
++ }
++
++ if (data->smu_features[GNLD_DPM_VCE].enabled) {
++ max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
++
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
++ (PPCLK_ECLK << 16) | (max_freq & 0xffff))),
++ "Failed to set soft max eclk!",
++ return ret);
++ }
++
++ if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
++ max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
++
++ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
++ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
++ (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))),
++ "Failed to set soft max socclk!",
++ return ret);
++ }
+
+ return ret;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5138-drm-amd-powerplay-update-vega20-clocks-threshold-set.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5138-drm-amd-powerplay-update-vega20-clocks-threshold-set.patch
new file mode 100644
index 00000000..78c4442e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5138-drm-amd-powerplay-update-vega20-clocks-threshold-set.patch
@@ -0,0 +1,159 @@
+From a153848ba79cc69ea6ff454678f8f7337c338def Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 21 May 2018 10:43:31 +0800
+Subject: [PATCH 5138/5725] drm/amd/powerplay: update vega20 clocks threshold
+ settings on power state adjust
+
+UVD, VCE and SOC clocks need to be taken into consideration. Also, the
+thresholds need be updated correspondingly when stable power state is selected.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 112 +++++++++++++++++++++
+ 1 file changed, 112 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index 3f769f3..ed928c5 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -2503,6 +2503,23 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
++ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
++ if (VEGA20_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
++ }
++
++ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
++ }
++
++ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ }
++ }
++
+ /* memclk */
+ dpm_table = &(data->dpm_table.mem_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+@@ -2510,9 +2527,28 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
++ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
++ if (VEGA20_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
++ }
++
++ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
++ }
++
++ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ }
++ }
++
++ /* honour DAL's UCLK Hardmin */
+ if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100))
+ dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100;
+
++ /* Hardmin is dependent on displayconfig */
+ if (disable_mclk_switching) {
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
+@@ -2528,6 +2564,82 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
+ if (hwmgr->display_config->nb_pstate_switch_disable)
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
++ /* vclk */
++ dpm_table = &(data->dpm_table.vclk_table);
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++
++ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
++ if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
++ }
++
++ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ }
++ }
++
++ /* dclk */
++ dpm_table = &(data->dpm_table.dclk_table);
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++
++ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
++ if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
++ }
++
++ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ }
++ }
++
++ /* socclk */
++ dpm_table = &(data->dpm_table.soc_table);
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++
++ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
++ if (VEGA20_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value;
++ }
++
++ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ }
++ }
++
++ /* eclk */
++ dpm_table = &(data->dpm_table.eclk_table);
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++
++ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
++ if (VEGA20_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value;
++ }
++
++ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ }
++ }
++
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5139-drm-amdgpu-enable-vega20-powerplay-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5139-drm-amdgpu-enable-vega20-powerplay-support.patch
new file mode 100644
index 00000000..b45ac682
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5139-drm-amdgpu-enable-vega20-powerplay-support.patch
@@ -0,0 +1,31 @@
+From 5a9699d0d0b0e6a6e1a48d8ef0862a9956811e24 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Wed, 2 May 2018 15:45:54 +0800
+Subject: [PATCH 5139/5725] drm/amdgpu: enable vega20 powerplay support
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 73c85a0..dd44b50 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -520,9 +520,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ if (adev->asic_type != CHIP_VEGA20) {
+ amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
+- if (!amdgpu_sriov_vf(adev))
+- amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ }
++ if (!amdgpu_sriov_vf(adev))
++ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ #if defined(CONFIG_DRM_AMD_DC)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5140-drm-amdgpu-Add-psp-11.0-support-for-vega20.-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5140-drm-amdgpu-Add-psp-11.0-support-for-vega20.-v2.patch
new file mode 100644
index 00000000..5d0903a2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5140-drm-amdgpu-Add-psp-11.0-support-for-vega20.-v2.patch
@@ -0,0 +1,733 @@
+From 862b99ce17c4fba9d2001a0e55542e08a9968d40 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 11 May 2018 14:54:50 +0800
+Subject: [PATCH 5140/5725] drm/amdgpu: Add psp 11.0 support for vega20. (v2)
+
+Add psp 11.0 code for vega20 and enable it. PSP is the
+security processor for the GPU. It handles firmware
+loading and GPU resets among other things.
+
+v2: whitespace fix, enable support, adjust reg includes (Alex)
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/Makefile | 3 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 14 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | 565 ++++++++++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/psp_v11_0.h | 30 ++
+ drivers/gpu/drm/amd/amdgpu/psp_v3_1.c | 2 -
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 5 +-
+ 7 files changed, 614 insertions(+), 6 deletions(-)
+ create mode 100644 drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+ create mode 100644 drivers/gpu/drm/amd/amdgpu/psp_v11_0.h
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
+index 6f0a496..e0854bb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/Makefile
++++ b/drivers/gpu/drm/amd/amdgpu/Makefile
+@@ -68,7 +68,8 @@ amdgpu-y += \
+ amdgpu-y += \
+ amdgpu_psp.o \
+ psp_v3_1.o \
+- psp_v10_0.o
++ psp_v10_0.o \
++ psp_v11_0.o
+
+ # add SMC block
+ amdgpu-y += \
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index a13cd21..70b7d42 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -31,6 +31,7 @@
+ #include "soc15_common.h"
+ #include "psp_v3_1.h"
+ #include "psp_v10_0.h"
++#include "psp_v11_0.h"
+
+ static void psp_set_funcs(struct amdgpu_device *adev);
+
+@@ -52,12 +53,14 @@ static int psp_sw_init(void *handle)
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+- case CHIP_VEGA20:
+ psp_v3_1_set_psp_funcs(psp);
+ break;
+ case CHIP_RAVEN:
+ psp_v10_0_set_psp_funcs(psp);
+ break;
++ case CHIP_VEGA20:
++ psp_v11_0_set_psp_funcs(psp);
++ break;
+ default:
+ return -EINVAL;
+ }
+@@ -593,3 +596,12 @@ const struct amdgpu_ip_block_version psp_v10_0_ip_block =
+ .rev = 0,
+ .funcs = &psp_ip_funcs,
+ };
++
++const struct amdgpu_ip_block_version psp_v11_0_ip_block =
++{
++ .type = AMD_IP_BLOCK_TYPE_PSP,
++ .major = 11,
++ .minor = 0,
++ .rev = 0,
++ .funcs = &psp_ip_funcs,
++};
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+index 967712f..d772545 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+@@ -164,5 +164,6 @@ extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
+ extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
+
+ int psp_gpu_reset(struct amdgpu_device *adev);
++extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+new file mode 100644
+index 0000000..9c58a23
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+@@ -0,0 +1,565 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include <linux/firmware.h>
++#include "amdgpu.h"
++#include "amdgpu_psp.h"
++#include "amdgpu_ucode.h"
++#include "soc15_common.h"
++#include "psp_v11_0.h"
++
++#include "mp/mp_11_0_offset.h"
++#include "mp/mp_11_0_sh_mask.h"
++#include "gc/gc_9_0_offset.h"
++#include "sdma0/sdma0_4_0_offset.h"
++#include "nbio/nbio_7_4_offset.h"
++
++MODULE_FIRMWARE("amdgpu/vega20_sos.bin");
++
++/* address block */
++#define smnMP1_FIRMWARE_FLAGS 0x3010024
++
++static int
++psp_v11_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type)
++{
++ switch (ucode->ucode_id) {
++ case AMDGPU_UCODE_ID_SDMA0:
++ *type = GFX_FW_TYPE_SDMA0;
++ break;
++ case AMDGPU_UCODE_ID_SDMA1:
++ *type = GFX_FW_TYPE_SDMA1;
++ break;
++ case AMDGPU_UCODE_ID_CP_CE:
++ *type = GFX_FW_TYPE_CP_CE;
++ break;
++ case AMDGPU_UCODE_ID_CP_PFP:
++ *type = GFX_FW_TYPE_CP_PFP;
++ break;
++ case AMDGPU_UCODE_ID_CP_ME:
++ *type = GFX_FW_TYPE_CP_ME;
++ break;
++ case AMDGPU_UCODE_ID_CP_MEC1:
++ *type = GFX_FW_TYPE_CP_MEC;
++ break;
++ case AMDGPU_UCODE_ID_CP_MEC1_JT:
++ *type = GFX_FW_TYPE_CP_MEC_ME1;
++ break;
++ case AMDGPU_UCODE_ID_CP_MEC2:
++ *type = GFX_FW_TYPE_CP_MEC;
++ break;
++ case AMDGPU_UCODE_ID_CP_MEC2_JT:
++ *type = GFX_FW_TYPE_CP_MEC_ME2;
++ break;
++ case AMDGPU_UCODE_ID_RLC_G:
++ *type = GFX_FW_TYPE_RLC_G;
++ break;
++ case AMDGPU_UCODE_ID_SMC:
++ *type = GFX_FW_TYPE_SMU;
++ break;
++ case AMDGPU_UCODE_ID_UVD:
++ *type = GFX_FW_TYPE_UVD;
++ break;
++ case AMDGPU_UCODE_ID_VCE:
++ *type = GFX_FW_TYPE_VCE;
++ break;
++ case AMDGPU_UCODE_ID_MAXIMUM:
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int psp_v11_0_init_microcode(struct psp_context *psp)
++{
++ struct amdgpu_device *adev = psp->adev;
++ const char *chip_name;
++ char fw_name[30];
++ int err = 0;
++ const struct psp_firmware_header_v1_0 *hdr;
++
++ DRM_DEBUG("\n");
++
++ switch (adev->asic_type) {
++ case CHIP_VEGA20:
++ chip_name = "vega20";
++ break;
++ default:
++ BUG();
++ }
++
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
++ err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev);
++ if (err)
++ goto out;
++
++ err = amdgpu_ucode_validate(adev->psp.sos_fw);
++ if (err)
++ goto out;
++
++ hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
++ adev->psp.sos_fw_version = le32_to_cpu(hdr->header.ucode_version);
++ adev->psp.sos_feature_version = le32_to_cpu(hdr->ucode_feature_version);
++ adev->psp.sos_bin_size = le32_to_cpu(hdr->sos_size_bytes);
++ adev->psp.sys_bin_size = le32_to_cpu(hdr->header.ucode_size_bytes) -
++ le32_to_cpu(hdr->sos_size_bytes);
++ adev->psp.sys_start_addr = (uint8_t *)hdr +
++ le32_to_cpu(hdr->header.ucode_array_offset_bytes);
++ adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
++ le32_to_cpu(hdr->sos_offset_bytes);
++ return 0;
++out:
++ if (err) {
++ dev_err(adev->dev,
++ "psp v11.0: Failed to load firmware \"%s\"\n",
++ fw_name);
++ release_firmware(adev->psp.sos_fw);
++ adev->psp.sos_fw = NULL;
++ }
++
++ return err;
++}
++
++static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
++{
++ int ret;
++ uint32_t psp_gfxdrv_command_reg = 0;
++ struct amdgpu_device *adev = psp->adev;
++ uint32_t sol_reg;
++
++ /* Check sOS sign of life register to confirm sys driver and sOS
++ * are already been loaded.
++ */
++ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
++ if (sol_reg)
++ return 0;
++
++ /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
++ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
++ 0x80000000, 0x80000000, false);
++ if (ret)
++ return ret;
++
++ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
++
++ /* Copy PSP System Driver binary to memory */
++ memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
++
++ /* Provide the sys driver to bootrom */
++ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
++ (uint32_t)(psp->fw_pri_mc_addr >> 20));
++ psp_gfxdrv_command_reg = 1 << 16;
++ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
++ psp_gfxdrv_command_reg);
++
++ /* there might be handshake issue with hardware which needs delay */
++ mdelay(20);
++
++ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
++ 0x80000000, 0x80000000, false);
++
++ return ret;
++}
++
++static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
++{
++ int ret;
++ unsigned int psp_gfxdrv_command_reg = 0;
++ struct amdgpu_device *adev = psp->adev;
++ uint32_t sol_reg;
++
++ /* Check sOS sign of life register to confirm sys driver and sOS
++ * are already been loaded.
++ */
++ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
++ if (sol_reg)
++ return 0;
++
++ /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
++ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
++ 0x80000000, 0x80000000, false);
++ if (ret)
++ return ret;
++
++ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
++
++ /* Copy Secure OS binary to PSP memory */
++ memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
++
++ /* Provide the PSP secure OS to bootrom */
++ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
++ (uint32_t)(psp->fw_pri_mc_addr >> 20));
++ psp_gfxdrv_command_reg = 2 << 16;
++ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
++ psp_gfxdrv_command_reg);
++
++ /* there might be handshake issue with hardware which needs delay */
++ mdelay(20);
++ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
++ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
++ 0, true);
++
++ return ret;
++}
++
++static int psp_v11_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode,
++ struct psp_gfx_cmd_resp *cmd)
++{
++ int ret;
++ uint64_t fw_mem_mc_addr = ucode->mc_addr;
++
++ memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
++
++ cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
++ cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
++ cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
++ cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
++
++ ret = psp_v11_0_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
++ if (ret)
++ DRM_ERROR("Unknown firmware type\n");
++
++ return ret;
++}
++
++static int psp_v11_0_ring_init(struct psp_context *psp,
++ enum psp_ring_type ring_type)
++{
++ int ret = 0;
++ struct psp_ring *ring;
++ struct amdgpu_device *adev = psp->adev;
++
++ ring = &psp->km_ring;
++
++ ring->ring_type = ring_type;
++
++ /* allocate 4k Page of Local Frame Buffer memory for ring */
++ ring->ring_size = 0x1000;
++ ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ &adev->firmware.rbuf,
++ &ring->ring_mem_mc_addr,
++ (void **)&ring->ring_mem);
++ if (ret) {
++ ring->ring_size = 0;
++ return ret;
++ }
++
++ return 0;
++}
++
++static int psp_v11_0_ring_create(struct psp_context *psp,
++ enum psp_ring_type ring_type)
++{
++ int ret = 0;
++ unsigned int psp_ring_reg = 0;
++ struct psp_ring *ring = &psp->km_ring;
++ struct amdgpu_device *adev = psp->adev;
++
++ /* Write low address of the ring to C2PMSG_69 */
++ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
++ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
++ /* Write high address of the ring to C2PMSG_70 */
++ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
++ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
++ /* Write size of ring to C2PMSG_71 */
++ psp_ring_reg = ring->ring_size;
++ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
++ /* Write the ring initialization command to C2PMSG_64 */
++ psp_ring_reg = ring_type;
++ psp_ring_reg = psp_ring_reg << 16;
++ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
++
++ /* there might be handshake issue with hardware which needs delay */
++ mdelay(20);
++
++ /* Wait for response flag (bit 31) in C2PMSG_64 */
++ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
++ 0x80000000, 0x8000FFFF, false);
++
++ return ret;
++}
++
++static int psp_v11_0_ring_stop(struct psp_context *psp,
++ enum psp_ring_type ring_type)
++{
++ int ret = 0;
++ struct psp_ring *ring;
++ struct amdgpu_device *adev = psp->adev;
++
++ ring = &psp->km_ring;
++
++ /* Write the ring destroy command to C2PMSG_64 */
++ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_DESTROY_RINGS);
++
++ /* there might be handshake issue with hardware which needs delay */
++ mdelay(20);
++
++ /* Wait for response flag (bit 31) in C2PMSG_64 */
++ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
++ 0x80000000, 0x80000000, false);
++
++ return ret;
++}
++
++static int psp_v11_0_ring_destroy(struct psp_context *psp,
++ enum psp_ring_type ring_type)
++{
++ int ret = 0;
++ struct psp_ring *ring = &psp->km_ring;
++ struct amdgpu_device *adev = psp->adev;
++
++ ret = psp_v11_0_ring_stop(psp, ring_type);
++ if (ret)
++ DRM_ERROR("Fail to stop psp ring\n");
++
++ amdgpu_bo_free_kernel(&adev->firmware.rbuf,
++ &ring->ring_mem_mc_addr,
++ (void **)&ring->ring_mem);
++
++ return ret;
++}
++
++static int psp_v11_0_cmd_submit(struct psp_context *psp,
++ struct amdgpu_firmware_info *ucode,
++ uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
++ int index)
++{
++ unsigned int psp_write_ptr_reg = 0;
++ struct psp_gfx_rb_frame *write_frame = psp->km_ring.ring_mem;
++ struct psp_ring *ring = &psp->km_ring;
++ struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
++ struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
++ ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
++ struct amdgpu_device *adev = psp->adev;
++ uint32_t ring_size_dw = ring->ring_size / 4;
++ uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
++
++ /* KM (GPCOM) prepare write pointer */
++ psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
++
++ /* Update KM RB frame pointer to new frame */
++ /* write_frame ptr increments by size of rb_frame in bytes */
++ /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
++ if ((psp_write_ptr_reg % ring_size_dw) == 0)
++ write_frame = ring_buffer_start;
++ else
++ write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
++ /* Check invalid write_frame ptr address */
++ if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
++ DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
++ ring_buffer_start, ring_buffer_end, write_frame);
++ DRM_ERROR("write_frame is pointing to address out of bounds\n");
++ return -EINVAL;
++ }
++
++ /* Initialize KM RB frame */
++ memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
++
++ /* Update KM RB frame */
++ write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
++ write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
++ write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
++ write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
++ write_frame->fence_value = index;
++
++ /* Update the write Pointer in DWORDs */
++ psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
++ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
++
++ return 0;
++}
++
++static int
++psp_v11_0_sram_map(struct amdgpu_device *adev,
++ unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
++ unsigned int *sram_data_reg_offset,
++ enum AMDGPU_UCODE_ID ucode_id)
++{
++ int ret = 0;
++
++ switch (ucode_id) {
++/* TODO: needs to confirm */
++#if 0
++ case AMDGPU_UCODE_ID_SMC:
++ *sram_offset = 0;
++ *sram_addr_reg_offset = 0;
++ *sram_data_reg_offset = 0;
++ break;
++#endif
++
++ case AMDGPU_UCODE_ID_CP_CE:
++ *sram_offset = 0x0;
++ *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR);
++ *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA);
++ break;
++
++ case AMDGPU_UCODE_ID_CP_PFP:
++ *sram_offset = 0x0;
++ *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR);
++ *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA);
++ break;
++
++ case AMDGPU_UCODE_ID_CP_ME:
++ *sram_offset = 0x0;
++ *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR);
++ *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA);
++ break;
++
++ case AMDGPU_UCODE_ID_CP_MEC1:
++ *sram_offset = 0x10000;
++ *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR);
++ *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA);
++ break;
++
++ case AMDGPU_UCODE_ID_CP_MEC2:
++ *sram_offset = 0x10000;
++ *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR);
++ *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA);
++ break;
++
++ case AMDGPU_UCODE_ID_RLC_G:
++ *sram_offset = 0x2000;
++ *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
++ *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
++ break;
++
++ case AMDGPU_UCODE_ID_SDMA0:
++ *sram_offset = 0x0;
++ *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
++ *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
++ break;
++
++/* TODO: needs to confirm */
++#if 0
++ case AMDGPU_UCODE_ID_SDMA1:
++ *sram_offset = ;
++ *sram_addr_reg_offset = ;
++ break;
++
++ case AMDGPU_UCODE_ID_UVD:
++ *sram_offset = ;
++ *sram_addr_reg_offset = ;
++ break;
++
++ case AMDGPU_UCODE_ID_VCE:
++ *sram_offset = ;
++ *sram_addr_reg_offset = ;
++ break;
++#endif
++
++ case AMDGPU_UCODE_ID_MAXIMUM:
++ default:
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++static bool psp_v11_0_compare_sram_data(struct psp_context *psp,
++ struct amdgpu_firmware_info *ucode,
++ enum AMDGPU_UCODE_ID ucode_type)
++{
++ int err = 0;
++ unsigned int fw_sram_reg_val = 0;
++ unsigned int fw_sram_addr_reg_offset = 0;
++ unsigned int fw_sram_data_reg_offset = 0;
++ unsigned int ucode_size;
++ uint32_t *ucode_mem = NULL;
++ struct amdgpu_device *adev = psp->adev;
++
++ err = psp_v11_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
++ &fw_sram_data_reg_offset, ucode_type);
++ if (err)
++ return false;
++
++ WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val);
++
++ ucode_size = ucode->ucode_size;
++ ucode_mem = (uint32_t *)ucode->kaddr;
++ while (ucode_size) {
++ fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
++
++ if (*ucode_mem != fw_sram_reg_val)
++ return false;
++
++ ucode_mem++;
++ /* 4 bytes */
++ ucode_size -= 4;
++ }
++
++ return true;
++}
++
++static int psp_v11_0_mode1_reset(struct psp_context *psp)
++{
++ int ret;
++ uint32_t offset;
++ struct amdgpu_device *adev = psp->adev;
++
++ offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64);
++
++ ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false);
++
++ if (ret) {
++ DRM_INFO("psp is not working correctly before mode1 reset!\n");
++ return -EINVAL;
++ }
++
++ /*send the mode 1 reset command*/
++ WREG32(offset, GFX_CTRL_CMD_ID_MODE1_RST);
++
++ mdelay(1000);
++
++ offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
++
++ ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false);
++
++ if (ret) {
++ DRM_INFO("psp mode 1 reset failed!\n");
++ return -EINVAL;
++ }
++
++ DRM_INFO("psp mode1 reset succeed \n");
++
++ return 0;
++}
++
++static const struct psp_funcs psp_v11_0_funcs = {
++ .init_microcode = psp_v11_0_init_microcode,
++ .bootloader_load_sysdrv = psp_v11_0_bootloader_load_sysdrv,
++ .bootloader_load_sos = psp_v11_0_bootloader_load_sos,
++ .prep_cmd_buf = psp_v11_0_prep_cmd_buf,
++ .ring_init = psp_v11_0_ring_init,
++ .ring_create = psp_v11_0_ring_create,
++ .ring_stop = psp_v11_0_ring_stop,
++ .ring_destroy = psp_v11_0_ring_destroy,
++ .cmd_submit = psp_v11_0_cmd_submit,
++ .compare_sram_data = psp_v11_0_compare_sram_data,
++ .mode1_reset = psp_v11_0_mode1_reset,
++};
++
++void psp_v11_0_set_psp_funcs(struct psp_context *psp)
++{
++ psp->funcs = &psp_v11_0_funcs;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.h b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.h
+new file mode 100644
+index 0000000..082c16c
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.h
+@@ -0,0 +1,30 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++#ifndef __PSP_V11_0_H__
++#define __PSP_V11_0_H__
++
++#include "amdgpu_psp.h"
++
++void psp_v11_0_set_psp_funcs(struct psp_context *psp);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+index 727071f..e1ebf77 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+@@ -41,8 +41,6 @@ MODULE_FIRMWARE("amdgpu/vega10_sos.bin");
+ MODULE_FIRMWARE("amdgpu/vega10_asd.bin");
+ MODULE_FIRMWARE("amdgpu/vega12_sos.bin");
+ MODULE_FIRMWARE("amdgpu/vega12_asd.bin");
+-MODULE_FIRMWARE("amdgpu/vega20_sos.bin");
+-MODULE_FIRMWARE("amdgpu/vega20_asd.bin");
+
+
+ #define smnMP1_FIRMWARE_FLAGS 0x3010028
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index dd44b50..fc0cb7d3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -518,9 +518,10 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+- if (adev->asic_type != CHIP_VEGA20) {
++ if (adev->asic_type == CHIP_VEGA20)
++ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
++ else
+ amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
+- }
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5141-drm-amdgpu-vg20-Change-the-load-type-of-vega20-to-ps.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5141-drm-amdgpu-vg20-Change-the-load-type-of-vega20-to-ps.patch
new file mode 100644
index 00000000..7682f432
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5141-drm-amdgpu-vg20-Change-the-load-type-of-vega20-to-ps.patch
@@ -0,0 +1,42 @@
+From 7ff5622fedf766f24b0e41ed90479a52eea317a4 Mon Sep 17 00:00:00 2001
+From: Likun Gao <Likun.Gao@amd.com>
+Date: Mon, 9 Jul 2018 13:47:04 -0500
+Subject: [PATCH 5141/5725] drm/amdgpu/vg20: Change the load type of vega20 to
+ psp (v2)
+
+Modified the vega20 load type to psp now that psp
+support is implemented.
+
+v2: squash in fixes history (Alex)
+
+Signed-off-by: Likun Gao <Likun.Gao@amd.com>
+Reviewed-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+index abcc163..63e2996 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+@@ -303,12 +303,11 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
+ case CHIP_VEGA10:
+ case CHIP_RAVEN:
+ case CHIP_VEGA12:
++ case CHIP_VEGA20:
+ if (!load_type)
+ return AMDGPU_FW_LOAD_DIRECT;
+ else
+ return AMDGPU_FW_LOAD_PSP;
+- case CHIP_VEGA20:
+- return AMDGPU_FW_LOAD_DIRECT;
+ default:
+ DRM_ERROR("Unknow firmware load type\n");
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5142-drm-amd-powerplay-enable-fclk-ss-by-default.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5142-drm-amd-powerplay-enable-fclk-ss-by-default.patch
new file mode 100644
index 00000000..68e0319b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5142-drm-amd-powerplay-enable-fclk-ss-by-default.patch
@@ -0,0 +1,30 @@
+From 2b5c414b21dec4e30cdce34d5cf4db6125dce77b Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Tue, 17 Jul 2018 10:22:37 +0800
+Subject: [PATCH 5142/5725] drm/amd/powerplay: enable fclk ss by default
+
+Set fclk ss as enabled on default.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Rex Zhu <rezhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+index 32d24a4..5f1f7a3 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+@@ -810,7 +810,7 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
+ ppsmc_pptable->UclkSpreadPercent = smc_dpm_table->uclkspreadpercent;
+ ppsmc_pptable->UclkSpreadFreq = smc_dpm_table->uclkspreadfreq;
+
+- ppsmc_pptable->FclkSpreadEnabled = 0;
++ ppsmc_pptable->FclkSpreadEnabled = smc_dpm_table->fclkspreadenabled;
+ ppsmc_pptable->FclkSpreadPercent = smc_dpm_table->fclkspreadpercent;
+ ppsmc_pptable->FclkSpreadFreq = smc_dpm_table->fclkspreadfreq;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5143-drm-amd-powerplay-remove-setting-soc-floor-voltage-b.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5143-drm-amd-powerplay-remove-setting-soc-floor-voltage-b.patch
new file mode 100644
index 00000000..8fce8b08
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5143-drm-amd-powerplay-remove-setting-soc-floor-voltage-b.patch
@@ -0,0 +1,52 @@
+From 82de7bf123e36b48ab08a847511703161671000d Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Wed, 18 Jul 2018 10:59:02 +0800
+Subject: [PATCH 5143/5725] drm/amd/powerplay: remove setting soc floor voltage
+ before sending pptable
+
+SOC voltage is not able to switch and forced to low 0.8V when running HEVC.
+Thus the test failed.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Rex Zhu <rezhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 9 +--------
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h | 1 -
+ 2 files changed, 1 insertion(+), 9 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index ed928c5..ad6ce14 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -707,14 +707,7 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
+ data->vbios_boot_state.vclock = boot_up_values.ulVClk;
+ data->vbios_boot_state.dclock = boot_up_values.ulDClk;
+ data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
+- if (0 != boot_up_values.usVddc) {
+- smum_send_msg_to_smc_with_parameter(hwmgr,
+- PPSMC_MSG_SetFloorSocVoltage,
+- (boot_up_values.usVddc * 4));
+- data->vbios_boot_state.bsoc_vddc_lock = true;
+- } else {
+- data->vbios_boot_state.bsoc_vddc_lock = false;
+- }
++
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetMinDeepSleepDcefclk,
+ (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+index 130052a..72e4f2a 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+@@ -203,7 +203,6 @@ struct vega20_dpmlevel_enable_mask {
+ };
+
+ struct vega20_vbios_boot_state {
+- bool bsoc_vddc_lock;
+ uint8_t uc_cooling_id;
+ uint16_t vddc;
+ uint16_t vddci;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5144-drm-amd-powerplay-avoid-enabling-disabling-uvd-vce-d.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5144-drm-amd-powerplay-avoid-enabling-disabling-uvd-vce-d.patch
new file mode 100644
index 00000000..680a5ecb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5144-drm-amd-powerplay-avoid-enabling-disabling-uvd-vce-d.patch
@@ -0,0 +1,44 @@
+From aafb5e0746d9b3ef99f0b986743661009ccf4bd7 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Thu, 19 Jul 2018 18:40:25 +0800
+Subject: [PATCH 5144/5725] drm/amd/powerplay: avoid enabling/disabling uvd/vce
+ dpm twice
+
+For vega20, there are two UVD rings which share one powerplay instance.
+Under some case(two rings used parallel), the uvd dpm is disabled twice
+which causes the SMC hang.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Rex Zhu <rezhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index ad6ce14..c4302bc 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -2464,6 +2464,9 @@ static void vega20_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
+ {
+ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
+
++ if (data->vce_power_gated == bgate)
++ return ;
++
+ data->vce_power_gated = bgate;
+ vega20_enable_disable_vce_dpm(hwmgr, !bgate);
+ }
+@@ -2472,6 +2475,9 @@ static void vega20_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
+ {
+ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
+
++ if (data->uvd_power_gated == bgate)
++ return ;
++
+ data->uvd_power_gated = bgate;
+ vega20_enable_disable_uvd_dpm(hwmgr, !bgate);
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5145-drm-amd-powerplay-correct-the-argument-for-PPSMC_MSG.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5145-drm-amd-powerplay-correct-the-argument-for-PPSMC_MSG.patch
new file mode 100644
index 00000000..9bef8516
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5145-drm-amd-powerplay-correct-the-argument-for-PPSMC_MSG.patch
@@ -0,0 +1,32 @@
+From 35f226e8cbcf159e5830d72d8fb0f2abad47a76f Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Fri, 20 Jul 2018 10:53:31 +0800
+Subject: [PATCH 5145/5725] drm/amd/powerplay: correct the argument for
+ PPSMC_MSG_SetUclkFastSwitch
+
+The argument was set wrongly. Fast/slow switch was asked when there is
+actually a slow/fast switch needed.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index c4302bc..c5bdb2b 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -1839,7 +1839,7 @@ static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr,
+ if (data->smu_features[GNLD_DPM_UCLK].enabled)
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetUclkFastSwitch,
+- has_disp ? 0 : 1);
++ has_disp ? 1 : 0);
+
+ return 0;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5146-drm-amd-powerplay-allow-slow-switch-only-if-NBPState.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5146-drm-amd-powerplay-allow-slow-switch-only-if-NBPState.patch
new file mode 100644
index 00000000..09f23b42
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5146-drm-amd-powerplay-allow-slow-switch-only-if-NBPState.patch
@@ -0,0 +1,32 @@
+From 7fc868fa07ad1d1683f161cc7be09f9e0cc3083a Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Fri, 20 Jul 2018 10:56:21 +0800
+Subject: [PATCH 5146/5725] drm/amd/powerplay: allow slow switch only if
+ NBPState enabled
+
+Otherwise there may be potential SMU performance issues.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index c5bdb2b..1170f23 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -1896,7 +1896,8 @@ static int vega20_notify_smc_display_config_after_ps_adjustment(
+ int ret = 0;
+
+ if ((hwmgr->display_config->num_display > 1) &&
+- !hwmgr->display_config->multi_monitor_in_sync)
++ !hwmgr->display_config->multi_monitor_in_sync &&
++ !hwmgr->display_config->nb_pstate_switch_disable)
+ vega20_notify_smc_display_change(hwmgr, false);
+ else
+ vega20_notify_smc_display_change(hwmgr, true);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5147-drm-amd-powerplay-remove-max-DCEFCLK-limitation.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5147-drm-amd-powerplay-remove-max-DCEFCLK-limitation.patch
new file mode 100644
index 00000000..293f63c4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5147-drm-amd-powerplay-remove-max-DCEFCLK-limitation.patch
@@ -0,0 +1,32 @@
+From ac2225cf56167652605623a2f6992fef7060a678 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 30 Jul 2018 14:01:00 +0800
+Subject: [PATCH 5147/5725] drm/amd/powerplay: remove max DCEFCLK limitation
+
+The latest SMU fw removes the limitation that required
+UCLK >= DCEFCLK.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index 1170f23..d7c4334 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -1356,9 +1356,6 @@ static int vega20_init_max_sustainable_clocks(struct pp_hwmgr *hwmgr)
+ if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
+ max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
+
+- if (max_sustainable_clocks->uclock < max_sustainable_clocks->dcef_clock)
+- max_sustainable_clocks->dcef_clock = max_sustainable_clocks->uclock;
+-
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5148-drm-amd-powerplay-added-voltage-boot-time-calibratio.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5148-drm-amd-powerplay-added-voltage-boot-time-calibratio.patch
new file mode 100644
index 00000000..5270d2a6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5148-drm-amd-powerplay-added-voltage-boot-time-calibratio.patch
@@ -0,0 +1,47 @@
+From eb099c1dc4267f8ec5e8d194b5f8d87ea717ff1c Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Fri, 10 Aug 2018 14:27:56 +0800
+Subject: [PATCH 5148/5725] drm/amd/powerplay: added voltage boot time
+ calibration
+
+Run AFLL BTC after upload pptable and before enabling
+all smu features.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index d7c4334..fb32b28 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -756,6 +756,11 @@ static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
+ return 0;
+ }
+
++static int vega20_run_btc_afll(struct pp_hwmgr *hwmgr)
++{
++ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc);
++}
++
+ static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr)
+ {
+ struct vega20_hwmgr *data =
+@@ -1391,6 +1396,11 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+ "[EnableDPMTasks] Failed to initialize SMC table!",
+ return result);
+
++ result = vega20_run_btc_afll(hwmgr);
++ PP_ASSERT_WITH_CODE(!result,
++ "[EnableDPMTasks] Failed to run btc afll!",
++ return result);
++
+ result = vega20_enable_all_smu_features(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "[EnableDPMTasks] Failed to enable all smu features!",
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5149-drm-amdgpu-gfx9-Update-gfx9-golden-settings.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5149-drm-amdgpu-gfx9-Update-gfx9-golden-settings.patch
new file mode 100644
index 00000000..b3d4003d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5149-drm-amdgpu-gfx9-Update-gfx9-golden-settings.patch
@@ -0,0 +1,32 @@
+From 408180c90b82c36a3c9dd14747ce148d77c30f2a Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Thu, 26 Jul 2018 12:31:34 +0800
+Subject: [PATCH 5149/5725] drm/amdgpu/gfx9: Update gfx9 golden settings.
+
+Update the goldensettings for vega20.
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index fe751d2..6131db4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -82,7 +82,7 @@ MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
+
+ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
+ {
+- SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5150-drm-amdgpu-update-vega20-sdma-golden-settings.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5150-drm-amdgpu-update-vega20-sdma-golden-settings.patch
new file mode 100644
index 00000000..ed496306
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5150-drm-amdgpu-update-vega20-sdma-golden-settings.patch
@@ -0,0 +1,136 @@
+From 65a2c0c01012d4cb3a16a611e1a0d7b63e2ab33d Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Wed, 18 Jul 2018 16:00:03 +0800
+Subject: [PATCH 5150/5725] drm/amdgpu: update vega20 sdma golden settings
+
+Updated vega20 SDMA0 and SDMA1 golden settings.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 68 ++++++++++++++++++++++++++++------
+ 1 file changed, 57 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index d102712..94cb277 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -27,10 +27,10 @@
+ #include "amdgpu_ucode.h"
+ #include "amdgpu_trace.h"
+
+-#include "sdma0/sdma0_4_0_offset.h"
+-#include "sdma0/sdma0_4_0_sh_mask.h"
+-#include "sdma1/sdma1_4_0_offset.h"
+-#include "sdma1/sdma1_4_0_sh_mask.h"
++#include "sdma0/sdma0_4_2_offset.h"
++#include "sdma0/sdma0_4_2_sh_mask.h"
++#include "sdma1/sdma1_4_2_offset.h"
++#include "sdma1/sdma1_4_2_sh_mask.h"
+ #include "hdp/hdp_4_0_offset.h"
+ #include "sdma0/sdma0_4_1_default.h"
+
+@@ -98,8 +98,7 @@ static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001)
+ };
+
+-static const struct soc15_reg_golden golden_settings_sdma_4_1[] =
+-{
++static const struct soc15_reg_golden golden_settings_sdma_4_1[] = {
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100),
+@@ -112,26 +111,67 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] =
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0)
+ };
+
+-static const struct soc15_reg_golden golden_settings_sdma_4_2[] =
++static const struct soc15_reg_golden golden_settings_sdma0_4_2_init[] = {
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000),
++};
++
++static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
+ {
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RD_BURST_CNTL, 0x0000000f, 0x00000003),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC2_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC3_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC4_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC5_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC6_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
++};
++
++static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RD_BURST_CNTL, 0x0000000f, 0x00000003),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0)
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC2_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC3_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC4_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC5_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC6_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ };
+
+ static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
+@@ -168,8 +208,14 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
+ break;
+ case CHIP_VEGA20:
+ soc15_program_register_sequence(adev,
+- golden_settings_sdma_4_2,
+- ARRAY_SIZE(golden_settings_sdma_4_2));
++ golden_settings_sdma0_4_2_init,
++ ARRAY_SIZE(golden_settings_sdma0_4_2_init));
++ soc15_program_register_sequence(adev,
++ golden_settings_sdma0_4_2,
++ ARRAY_SIZE(golden_settings_sdma0_4_2));
++ soc15_program_register_sequence(adev,
++ golden_settings_sdma1_4_2,
++ ARRAY_SIZE(golden_settings_sdma1_4_2));
+ break;
+ case CHIP_RAVEN:
+ soc15_program_register_sequence(adev,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5151-drm-amdgpu-psp-Enlarge-PSP-TMR-SIZE-from-3M-to-4M.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5151-drm-amdgpu-psp-Enlarge-PSP-TMR-SIZE-from-3M-to-4M.patch
new file mode 100644
index 00000000..9c194b35
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5151-drm-amdgpu-psp-Enlarge-PSP-TMR-SIZE-from-3M-to-4M.patch
@@ -0,0 +1,54 @@
+From da5f24c016704e073d38ee23f06159f25a05fbbb Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Tue, 14 Aug 2018 10:33:25 +0800
+Subject: [PATCH 5151/5725] drm/amdgpu/psp: Enlarge PSP TMR SIZE from 3M to 4M.
+
+Enlarge the PSP TMR SIZE to 4M for dual UVD fw front-door loading.
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 6 ++++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 1 +
+ 2 files changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index 70b7d42..f74f155 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -162,7 +162,7 @@ static int psp_tmr_init(struct psp_context *psp)
+ * Note: this memory need be reserved till the driver
+ * uninitializes.
+ */
+- ret = amdgpu_bo_create_kernel(psp->adev, 0x300000, 0x100000,
++ ret = amdgpu_bo_create_kernel(psp->adev, PSP_TMR_SIZE, 0x100000,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
+
+@@ -178,7 +178,9 @@ static int psp_tmr_load(struct psp_context *psp)
+ if (!cmd)
+ return -ENOMEM;
+
+- psp_prep_tmr_cmd_buf(cmd, psp->tmr_mc_addr, 0x300000);
++ psp_prep_tmr_cmd_buf(cmd, psp->tmr_mc_addr, PSP_TMR_SIZE);
++ DRM_INFO("reserve 0x%x from 0x%llx for PSP TMR SIZE\n",
++ PSP_TMR_SIZE, psp->tmr_mc_addr);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr, 1);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+index d772545..981887c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+@@ -32,6 +32,7 @@
+ #define PSP_CMD_BUFFER_SIZE 0x1000
+ #define PSP_ASD_SHARED_MEM_SIZE 0x4000
+ #define PSP_1_MEG 0x100000
++#define PSP_TMR_SIZE 0x400000
+
+ struct psp_context;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5152-drm-amdgpu-remove-experimental-flag-for-vega20.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5152-drm-amdgpu-remove-experimental-flag-for-vega20.patch
new file mode 100644
index 00000000..0fe2d0b6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5152-drm-amdgpu-remove-experimental-flag-for-vega20.patch
@@ -0,0 +1,38 @@
+From 6b43adb45edfdcdd53922ae4f7ec16380c79c7c0 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 14 Aug 2018 11:44:44 -0500
+Subject: [PATCH 5152/5725] drm/amdgpu: remove experimental flag for vega20
+
+Now that PSP and SMU support is in place.
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 6754cbf..549782d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -786,12 +786,12 @@ static const struct pci_device_id pciidlist[] = {
+ {0x1002, 0x69A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
+ {0x1002, 0x69AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
+ /* Vega 20 */
+- {0x1002, 0x66A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
+- {0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
+- {0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
+- {0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
+- {0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
+- {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
++ {0x1002, 0x66A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
++ {0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
++ {0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
++ {0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
++ {0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
++ {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+ /* Raven */
+ {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5153-drm-amdgpu-Cancel-the-delay-work-when-suspend.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5153-drm-amdgpu-Cancel-the-delay-work-when-suspend.patch
new file mode 100644
index 00000000..411bcfe1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5153-drm-amdgpu-Cancel-the-delay-work-when-suspend.patch
@@ -0,0 +1,34 @@
+From 0e7c6f6f6ef972f75c3a4192d1f88e4dcb22f223 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Tue, 14 Aug 2018 17:31:09 +0800
+Subject: [PATCH 5153/5725] drm/amdgpu: Cancel the delay work when suspend
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Cancel the delay work to avoid the corner case that
+ib test was not running when suspend
+
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 65cf144..b798801 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2774,6 +2774,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
+ if (fbcon)
+ amdgpu_fbdev_set_suspend(adev, 1);
+
++ cancel_delayed_work_sync(&adev->late_init_work);
++
+ if (!amdgpu_device_has_dc_support(adev)) {
+ /* turn off display hw */
+ drm_modeset_lock_all(dev);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5154-drm-amd-pp-OverDrive-gfx-domain-voltage-on-Tonga.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5154-drm-amd-pp-OverDrive-gfx-domain-voltage-on-Tonga.patch
new file mode 100644
index 00000000..8d9da9f8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5154-drm-amd-pp-OverDrive-gfx-domain-voltage-on-Tonga.patch
@@ -0,0 +1,36 @@
+From 9c3b3fc4f5041996fbb02fc29779e667182d3db7 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Mon, 13 Aug 2018 18:37:39 +0800
+Subject: [PATCH 5154/5725] drm/amd/pp: OverDrive gfx domain voltage on Tonga
+
+Also ajust the gfx domain voltage on Tonga when user overdriver
+the voltage.
+
+For Tonga, Driver do not update user's setting to voltage table
+in smu, we only pick up a minimum value from voltage table that
+not less than the user's setting.
+
+v2: fix a typo
+
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index 15e110f..263a781 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -4857,6 +4857,7 @@ static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
+ podn_vdd_dep_in_backend->entries[input_level].clk = input_clk;
+ podn_dpm_table_in_backend->entries[input_level].vddc = input_vol;
+ podn_vdd_dep_in_backend->entries[input_level].vddc = input_vol;
++ podn_vdd_dep_in_backend->entries[input_level].vddgfx = input_vol;
+ } else {
+ return -EINVAL;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5155-drm-amdgpu-fix-integer-overflow-test-in-amdgpu_bo_li.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5155-drm-amdgpu-fix-integer-overflow-test-in-amdgpu_bo_li.patch
new file mode 100644
index 00000000..1228b712
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5155-drm-amdgpu-fix-integer-overflow-test-in-amdgpu_bo_li.patch
@@ -0,0 +1,38 @@
+From ad340cfac93205d30681cf83c9ebf7128767efb1 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Fri, 10 Aug 2018 18:50:32 +0800
+Subject: [PATCH 5155/5725] drm/amdgpu: fix integer overflow test in
+ amdgpu_bo_list_create()
+
+We accidentally left out the size of the amdgpu_bo_list struct. It
+could lead to memory corruption on 32 bit systems. You'd have to
+pick the absolute maximum and set "num_entries == 59652323" then size
+would wrap to 16 bytes.
+
+Fixes: 920990cb080a ("drm/amdgpu: allocate the bo_list array after the list")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Bas Nieuwenhuizen <basni@chromium.org>
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+index b603249..06fa232 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+@@ -67,7 +67,8 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
+ unsigned i;
+ int r;
+
+- if (num_entries > SIZE_MAX / sizeof(struct amdgpu_bo_list_entry))
++ if (num_entries > (SIZE_MAX - sizeof(struct amdgpu_bo_list))
++ / sizeof(struct amdgpu_bo_list_entry))
+ return -EINVAL;
+
+ size = sizeof(struct amdgpu_bo_list);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5156-drm-amdgpu-Change-VCE-booting-with-firmware-loaded-b.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5156-drm-amdgpu-Change-VCE-booting-with-firmware-loaded-b.patch
new file mode 100644
index 00000000..9ae4df1c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5156-drm-amdgpu-Change-VCE-booting-with-firmware-loaded-b.patch
@@ -0,0 +1,62 @@
+From dc1a269d5bbac245710695d802859b88e875dccc Mon Sep 17 00:00:00 2001
+From: James Zhu <jzhums@gmail.com>
+Date: Tue, 14 Aug 2018 14:53:51 -0400
+Subject: [PATCH 5156/5725] drm/amdgpu: Change VCE booting with firmware loaded
+ by PSP
+
+With PSP firmware loading, TMR mc address is supposed to be used.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Acked-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vce_v4_0.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+index 65f8860..258f015 100755
+--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+@@ -595,6 +595,7 @@ static int vce_v4_0_resume(void *handle)
+ static void vce_v4_0_mc_resume(struct amdgpu_device *adev)
+ {
+ uint32_t offset, size;
++ uint64_t tmr_mc_addr;
+
+ WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A), 0, ~(1 << 16));
+ WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), 0x1FF000, ~0xFF9FF000);
+@@ -607,21 +608,25 @@ static void vce_v4_0_mc_resume(struct amdgpu_device *adev)
+ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
+ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
+
++ offset = AMDGPU_VCE_FIRMWARE_OFFSET;
++
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
++ tmr_mc_addr = (uint64_t)(adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_hi) << 32 |
++ adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_lo;
+ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
+- (adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8));
++ (tmr_mc_addr >> 8));
+ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
+- (adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 40) & 0xff);
++ (tmr_mc_addr >> 40) & 0xff);
++ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), 0);
+ } else {
+ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
+ (adev->vce.gpu_addr >> 8));
+ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
+ (adev->vce.gpu_addr >> 40) & 0xff);
++ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), offset & ~0x0f000000);
+ }
+
+- offset = AMDGPU_VCE_FIRMWARE_OFFSET;
+ size = VCE_V4_0_FW_SIZE;
+- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), offset & ~0x0f000000);
+ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
+
+ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1), (adev->vce.gpu_addr >> 8));
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5157-drm-amdgpu-Use-kvmalloc-for-allocating-UVD-VCE-VCN-B.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5157-drm-amdgpu-Use-kvmalloc-for-allocating-UVD-VCE-VCN-B.patch
new file mode 100644
index 00000000..de51c4e3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5157-drm-amdgpu-Use-kvmalloc-for-allocating-UVD-VCE-VCN-B.patch
@@ -0,0 +1,113 @@
+From 254ba022a91b2f6cf90b8f8aed91f4a8a3891a39 Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Wed, 9 Jan 2019 21:01:21 +0530
+Subject: [PATCH 5157/5725] drm/amdgpu: Use kvmalloc for allocating UVD/VCE/VCN
+ BO backup memory
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The allocated size can be (at least?) as large as megabytes, and
+there's no need for it to be physically contiguous.
+
+May avoid spurious failures to initialize / suspend the corresponding
+block while there's memory pressure.
+
+Bugzilla: https://bugs.freedesktop.org/107432
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 6 +++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 6 +++---
+ drivers/gpu/drm/amd/amdgpu/vce_v4_0.c | 4 ++--
+ 3 files changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index b933d1f..83c6e71 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -314,7 +314,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
+ for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
+- kfree(adev->uvd.inst[j].saved_bo);
++ kvfree(adev->uvd.inst[j].saved_bo);
+
+ amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
+ &adev->uvd.inst[j].gpu_addr,
+@@ -360,7 +360,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
+ size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
+ ptr = adev->uvd.inst[j].cpu_addr;
+
+- adev->uvd.inst[j].saved_bo = kmalloc(size, GFP_KERNEL);
++ adev->uvd.inst[j].saved_bo = kvmalloc(size, GFP_KERNEL);
+ if (!adev->uvd.inst[j].saved_bo)
+ return -ENOMEM;
+ memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
+@@ -386,7 +386,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
+
+ if (adev->uvd.inst[i].saved_bo != NULL) {
+ memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
+- kfree(adev->uvd.inst[i].saved_bo);
++ kvfree(adev->uvd.inst[i].saved_bo);
+ adev->uvd.inst[i].saved_bo = NULL;
+ } else {
+ const struct common_firmware_header *hdr;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 9485972..461f7dc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -130,7 +130,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
+ {
+ int i;
+
+- kfree(adev->vcn.saved_bo);
++ kvfree(adev->vcn.saved_bo);
+
+ amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
+ &adev->vcn.gpu_addr,
+@@ -161,7 +161,7 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
+ size = amdgpu_bo_size(adev->vcn.vcpu_bo);
+ ptr = adev->vcn.cpu_addr;
+
+- adev->vcn.saved_bo = kmalloc(size, GFP_KERNEL);
++ adev->vcn.saved_bo = kvmalloc(size, GFP_KERNEL);
+ if (!adev->vcn.saved_bo)
+ return -ENOMEM;
+
+@@ -183,7 +183,7 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
+
+ if (adev->vcn.saved_bo != NULL) {
+ memcpy_toio(ptr, adev->vcn.saved_bo, size);
+- kfree(adev->vcn.saved_bo);
++ kvfree(adev->vcn.saved_bo);
+ adev->vcn.saved_bo = NULL;
+ } else {
+ const struct common_firmware_header *hdr;
+diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+index 258f015..90ac4d2 100755
+--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+@@ -438,7 +438,7 @@ static int vce_v4_0_sw_init(void *handle)
+ const struct common_firmware_header *hdr;
+ unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
+
+- adev->vce.saved_bo = kmalloc(size, GFP_KERNEL);
++ adev->vce.saved_bo = kvmalloc(size, GFP_KERNEL);
+ if (!adev->vce.saved_bo)
+ return -ENOMEM;
+
+@@ -490,7 +490,7 @@ static int vce_v4_0_sw_fini(void *handle)
+ amdgpu_virt_free_mm_table(adev);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+- kfree(adev->vce.saved_bo);
++ kvfree(adev->vce.saved_bo);
+ adev->vce.saved_bo = NULL;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5158-drm-amdgpu-added-support-2nd-UVD-instance.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5158-drm-amdgpu-added-support-2nd-UVD-instance.patch
new file mode 100644
index 00000000..08d71fca
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5158-drm-amdgpu-added-support-2nd-UVD-instance.patch
@@ -0,0 +1,80 @@
+From 0f47e74b4198c16aa27dcfa4c55e9149f7cc4f6e Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Tue, 14 Aug 2018 14:53:52 -0400
+Subject: [PATCH 5158/5725] drm/amdgpu: added support 2nd UVD instance
+
+Added psp fw loading support for vega20 2nd UVD instance.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h | 3 ++-
+ drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | 3 +++
+ drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 7 +++++++
+ 4 files changed, 13 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+index 1a265e4..b2f820c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+@@ -193,6 +193,7 @@ enum AMDGPU_UCODE_ID {
+ AMDGPU_UCODE_ID_STORAGE,
+ AMDGPU_UCODE_ID_SMC,
+ AMDGPU_UCODE_ID_UVD,
++ AMDGPU_UCODE_ID_UVD1,
+ AMDGPU_UCODE_ID_VCE,
+ AMDGPU_UCODE_ID_VCN,
+ AMDGPU_UCODE_ID_MAXIMUM,
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+index 0cf48d2..882bd83 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
++++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+@@ -189,7 +189,8 @@ enum psp_gfx_fw_type
+ GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM = 20,
+ GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM = 21,
+ GFX_FW_TYPE_RLC_RESTORE_LIST_CNTL = 22,
+- GFX_FW_TYPE_MAX = 23
++ GFX_FW_TYPE_UVD1 = 23,
++ GFX_FW_TYPE_MAX = 24
+ };
+
+ /* Command to load HW IP FW. */
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+index 9c58a23..b70cfa3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+@@ -81,6 +81,9 @@ psp_v11_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *
+ case AMDGPU_UCODE_ID_VCE:
+ *type = GFX_FW_TYPE_VCE;
+ break;
++ case AMDGPU_UCODE_ID_UVD1:
++ *type = GFX_FW_TYPE_UVD1;
++ break;
+ case AMDGPU_UCODE_ID_MAXIMUM:
+ default:
+ return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+index 2a583d8..cc705b4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+@@ -440,6 +440,13 @@ static int uvd_v7_0_sw_init(void *handle)
+ adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
++
++ if (adev->uvd.num_uvd_inst == UVD7_MAX_HW_INSTANCES_VEGA20) {
++ adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].ucode_id = AMDGPU_UCODE_ID_UVD1;
++ adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].fw = adev->uvd.fw;
++ adev->firmware.fw_size +=
++ ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
++ }
+ DRM_INFO("PSP loading UVD firmware\n");
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5159-drm-amd-display-Program-vline-interrupt-on-FAST-upda.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5159-drm-amd-display-Program-vline-interrupt-on-FAST-upda.patch
new file mode 100644
index 00000000..99457f54
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5159-drm-amd-display-Program-vline-interrupt-on-FAST-upda.patch
@@ -0,0 +1,49 @@
+From 1c0782f641cacf29186897f1b4cd915389d85013 Mon Sep 17 00:00:00 2001
+From: SivapiriyanKumarasamy <sivapiriyan.kumarasamy@amd.com>
+Date: Fri, 18 May 2018 17:05:52 -0400
+Subject: [PATCH 5159/5725] drm/amd/display: Program vline interrupt on FAST
+ update
+
+Signed-off-by: SivapiriyanKumarasamy <sivapiriyan.kumarasamy@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 3727085..b9420c3 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1365,6 +1365,13 @@ static void commit_planes_do_stream_update(struct dc *dc,
+ stream_update->adjust->v_total_min,
+ stream_update->adjust->v_total_max);
+
++ if (stream_update->periodic_fn_vsync_delta &&
++ pipe_ctx->stream_res.tg &&
++ pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
++ pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
++ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing,
++ pipe_ctx->stream->periodic_fn_vsync_delta);
++
+ /* Full fe update*/
+ if (update_type == UPDATE_TYPE_FAST)
+ continue;
+@@ -1392,12 +1399,6 @@ static void commit_planes_do_stream_update(struct dc *dc,
+ pipe_ctx->stream_res.abm, stream->abm_level);
+ }
+
+- if (stream_update->periodic_fn_vsync_delta &&
+- pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
+- pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
+- pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing,
+- pipe_ctx->stream->periodic_fn_vsync_delta);
+-
+ if (stream_update->hdr_static_metadata ||
+ stream_update->vrr_infopacket) {
+ resource_build_info_frame(pipe_ctx);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5160-drm-amd-display-Enable-Stereo-in-Dal3.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5160-drm-amd-display-Enable-Stereo-in-Dal3.patch
new file mode 100644
index 00000000..1c8f04e6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5160-drm-amd-display-Enable-Stereo-in-Dal3.patch
@@ -0,0 +1,387 @@
+From b1d4dc76ed1afd02805e1ec106d51921a1fa12f5 Mon Sep 17 00:00:00 2001
+From: Alvin lee <alvin.lee3@amd.com>
+Date: Mon, 4 Jun 2018 17:31:25 -0400
+Subject: [PATCH 5160/5725] drm/amd/display: Enable Stereo in Dal3
+
+- program infoframe for Stereo
+- program stereo flip control registers properly
+
+v2: Add missing license headers
+
+Signed-off-by: Alvin lee <alvin.lee3@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/Makefile | 3 +-
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 57 ++++++--------
+ drivers/gpu/drm/amd/display/dc/dc_stream.h | 1 +
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 18 ++++-
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 4 +
+ .../drm/amd/display/modules/inc/mod_info_packet.h | 40 ++++++++++
+ .../drm/amd/display/modules/info_packet/Makefile | 31 ++++++++
+ .../amd/display/modules/info_packet/info_packet.c | 92 ++++++++++++++++++++++
+ 8 files changed, 208 insertions(+), 38 deletions(-)
+ create mode 100644 drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
+ create mode 100644 drivers/gpu/drm/amd/display/modules/info_packet/Makefile
+ create mode 100644 drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+
+diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
+index 4674952..e3dfdf3 100644
+--- a/drivers/gpu/drm/amd/display/Makefile
++++ b/drivers/gpu/drm/amd/display/Makefile
+@@ -10,11 +10,12 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/hw
+ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
+ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
+ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
++subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet
+
+ #TODO: remove when Timing Sync feature is complete
+ subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
+
+-DAL_LIBS = amdgpu_dm dc modules/freesync modules/color
++DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet
+
+ AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS)))
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index d02dac1..40fd969 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -1559,6 +1559,20 @@ static bool is_hdr_static_meta_changed(struct dc_stream_state *cur_stream,
+ return false;
+ }
+
++static bool is_vsc_info_packet_changed(struct dc_stream_state *cur_stream,
++ struct dc_stream_state *new_stream)
++{
++ if (cur_stream == NULL)
++ return true;
++
++ if (memcmp(&cur_stream->vsc_infopacket,
++ &new_stream->vsc_infopacket,
++ sizeof(struct dc_info_packet)) != 0)
++ return true;
++
++ return false;
++}
++
+ static bool is_timing_changed(struct dc_stream_state *cur_stream,
+ struct dc_stream_state *new_stream)
+ {
+@@ -1599,6 +1613,9 @@ static bool are_stream_backends_same(
+ if (stream_a->dpms_off != stream_b->dpms_off)
+ return false;
+
++ if (is_vsc_info_packet_changed(stream_a, stream_b))
++ return false;
++
+ return true;
+ }
+
+@@ -2487,43 +2504,10 @@ static void set_vsc_info_packet(
+ struct dc_info_packet *info_packet,
+ struct dc_stream_state *stream)
+ {
+- unsigned int vscPacketRevision = 0;
+- unsigned int i;
+-
+- /*VSC packet set to 2 when DP revision >= 1.2*/
+- if (stream->psr_version != 0) {
+- vscPacketRevision = 2;
+- }
+-
+- /* VSC packet not needed based on the features
+- * supported by this DP display
+- */
+- if (vscPacketRevision == 0)
++ if (!stream->vsc_infopacket.valid)
+ return;
+
+- if (vscPacketRevision == 0x2) {
+- /* Secondary-data Packet ID = 0*/
+- info_packet->hb0 = 0x00;
+- /* 07h - Packet Type Value indicating Video
+- * Stream Configuration packet
+- */
+- info_packet->hb1 = 0x07;
+- /* 02h = VSC SDP supporting 3D stereo and PSR
+- * (applies to eDP v1.3 or higher).
+- */
+- info_packet->hb2 = 0x02;
+- /* 08h = VSC packet supporting 3D stereo + PSR
+- * (HB2 = 02h).
+- */
+- info_packet->hb3 = 0x08;
+-
+- for (i = 0; i < 28; i++)
+- info_packet->sb[i] = 0;
+-
+- info_packet->valid = true;
+- }
+-
+- /*TODO: stereo 3D support and extend pixel encoding colorimetry*/
++ *info_packet = stream->vsc_infopacket;
+ }
+
+ void dc_resource_state_destruct(struct dc_state *context)
+@@ -2705,6 +2689,9 @@ bool pipe_need_reprogram(
+ if (pipe_ctx_old->stream->dpms_off != pipe_ctx->stream->dpms_off)
+ return true;
+
++ if (is_vsc_info_packet_changed(pipe_ctx_old->stream, pipe_ctx->stream))
++ return true;
++
+ return false;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+index 90ee911..9890ad7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+@@ -55,6 +55,7 @@ struct dc_stream_state {
+ struct dc_crtc_timing timing;
+ struct dc_crtc_timing_adjust adjust;
+ struct dc_info_packet vrr_infopacket;
++ struct dc_info_packet vsc_infopacket;
+
+ struct rect src; /* composition area */
+ struct rect dst; /* stream addressable area */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+index ec4a5f6..8da2b8a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+@@ -313,10 +313,24 @@ bool hubp1_program_surface_flip_and_addr(
+ {
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+- /* program flip type */
+- REG_SET(DCSURF_FLIP_CONTROL, 0,
++
++ //program flip type
++ REG_UPDATE(DCSURF_FLIP_CONTROL,
+ SURFACE_FLIP_TYPE, flip_immediate);
+
++
++ if (address->type == PLN_ADDR_TYPE_GRPH_STEREO) {
++ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x1);
++ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x1);
++
++ } else {
++ // turn off stereo if not in stereo
++ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x0);
++ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x0);
++ }
++
++
++
+ /* HW automatically latch rest of address register on write to
+ * DCSURF_PRIMARY_SURFACE_ADDRESS if SURFACE_UPDATE_LOCK is not used
+ *
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+index 48c1907..7605af9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+@@ -270,6 +270,8 @@
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, META_PITCH_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_TYPE, mask_sh),\
++ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, mask_sh),\
++ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_PENDING, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_UPDATE_LOCK, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH, mask_sh),\
+@@ -451,6 +453,8 @@
+ type H_MIRROR_EN;\
+ type SURFACE_PIXEL_FORMAT;\
+ type SURFACE_FLIP_TYPE;\
++ type SURFACE_FLIP_MODE_FOR_STEREOSYNC;\
++ type SURFACE_FLIP_IN_STEREOSYNC;\
+ type SURFACE_UPDATE_LOCK;\
+ type SURFACE_FLIP_PENDING;\
+ type PRI_VIEWPORT_WIDTH; \
+diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
+new file mode 100644
+index 0000000..786b343
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
+@@ -0,0 +1,40 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef MOD_INFO_PACKET_H_
++#define MOD_INFO_PACKET_H_
++
++struct info_packet_inputs {
++ const struct dc_stream_state *pStream;
++};
++
++struct info_packets {
++ struct dc_info_packet *pVscInfoPacket;
++};
++
++void mod_build_infopackets(struct info_packet_inputs *inputs,
++ struct info_packets *info_packets);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/Makefile b/drivers/gpu/drm/amd/display/modules/info_packet/Makefile
+new file mode 100644
+index 0000000..4c382d7
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/modules/info_packet/Makefile
+@@ -0,0 +1,31 @@
++#
++# Copyright 2017 Advanced Micro Devices, Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included in
++# all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++# OTHER DEALINGS IN THE SOFTWARE.
++#
++#
++# Makefile for the 'info_packet' sub-module of DAL.
++#
++
++INFO_PACKET = info_packet.o
++
++AMD_DAL_INFO_PACKET = $(addprefix $(AMDDALPATH)/modules/info_packet/,$(INFO_PACKET))
++#$(info ************ DAL INFO_PACKET MODULE MAKEFILE ************)
++
++AMD_DISPLAY_FILES += $(AMD_DAL_INFO_PACKET)
+diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+new file mode 100644
+index 0000000..24b6cc1
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+@@ -0,0 +1,92 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#include "mod_info_packet.h"
++#include "core_types.h"
++
++static void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
++ struct dc_info_packet *info_packet)
++{
++ unsigned int vscPacketRevision = 0;
++ unsigned int i;
++
++ if (stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE && stream->view_format != VIEW_3D_FORMAT_NONE)
++ vscPacketRevision = 1;
++
++
++ /*VSC packet set to 2 when DP revision >= 1.2*/
++ if (stream->psr_version != 0)
++ vscPacketRevision = 2;
++
++
++ /* VSC packet not needed based on the features
++ * supported by this DP display
++ */
++ if (vscPacketRevision == 0)
++ return;
++
++ if (vscPacketRevision == 0x2) {
++ /* Secondary-data Packet ID = 0*/
++ info_packet->hb0 = 0x00;
++ /* 07h - Packet Type Value indicating Video
++ * Stream Configuration packet
++ */
++ info_packet->hb1 = 0x07;
++ /* 02h = VSC SDP supporting 3D stereo and PSR
++ * (applies to eDP v1.3 or higher).
++ */
++ info_packet->hb2 = 0x02;
++ /* 08h = VSC packet supporting 3D stereo + PSR
++ * (HB2 = 02h).
++ */
++ info_packet->hb3 = 0x08;
++
++ for (i = 0; i < 28; i++)
++ info_packet->sb[i] = 0;
++
++ info_packet->valid = true;
++ }
++
++ if (vscPacketRevision == 0x1) {
++
++ info_packet->hb0 = 0x00; // Secondary-data Packet ID = 0
++ info_packet->hb1 = 0x07; // 07h = Packet Type Value indicating Video Stream Configuration packet
++ info_packet->hb2 = 0x01; // 01h = Revision number. VSC SDP supporting 3D stereo only
++ info_packet->hb3 = 0x01; // 01h = VSC SDP supporting 3D stereo only (HB2 = 01h).
++
++ if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_INBAND_FA)
++ info_packet->sb[0] = 0x1;
++
++ info_packet->valid = true;
++ }
++}
++
++void mod_build_infopackets(struct info_packet_inputs *inputs,
++ struct info_packets *info_packets)
++{
++ if (info_packets->pVscInfoPacket != NULL)
++ mod_build_vsc_infopacket(inputs->pStream, info_packets->pVscInfoPacket);
++}
++
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5161-drm-amd-display-Program-vsc_infopacket-in-commit_pla.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5161-drm-amd-display-Program-vsc_infopacket-in-commit_pla.patch
new file mode 100644
index 00000000..f3884e61
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5161-drm-amd-display-Program-vsc_infopacket-in-commit_pla.patch
@@ -0,0 +1,45 @@
+From c526d0e83ad5bbf2477d3b51c4decd90850aaafc Mon Sep 17 00:00:00 2001
+From: Alvin lee <alvin.lee3@amd.com>
+Date: Fri, 8 Jun 2018 13:58:36 -0400
+Subject: [PATCH 5161/5725] drm/amd/display: Program vsc_infopacket in
+ commit_planes_for_stream
+
+Signed-off-by: Alvin lee <alvin.lee3@amd.com>
+Reviewed-by: Jun Lei <Jun.Lei@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 3 ++-
+ drivers/gpu/drm/amd/display/dc/dc_stream.h | 1 +
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index b9420c3..e484676 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1400,7 +1400,8 @@ static void commit_planes_do_stream_update(struct dc *dc,
+ }
+
+ if (stream_update->hdr_static_metadata ||
+- stream_update->vrr_infopacket) {
++ stream_update->vrr_infopacket ||
++ stream_update->vsc_infopacket) {
+ resource_build_info_frame(pipe_ctx);
+ dc->hwss.update_info_frame(pipe_ctx);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+index 9890ad7..489fb04 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+@@ -130,6 +130,7 @@ struct dc_stream_update {
+
+ struct dc_crtc_timing_adjust *adjust;
+ struct dc_info_packet *vrr_infopacket;
++ struct dc_info_packet *vsc_infopacket;
+
+ bool *dpms_off;
+ };
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5162-drm-amd-display-Handle-HDR-meta-update-as-fast-updat.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5162-drm-amd-display-Handle-HDR-meta-update-as-fast-updat.patch
new file mode 100644
index 00000000..ae039e8a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5162-drm-amd-display-Handle-HDR-meta-update-as-fast-updat.patch
@@ -0,0 +1,61 @@
+From 79acfc05bf1bb97415f8d2fba64d137ff74c21a5 Mon Sep 17 00:00:00 2001
+From: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Date: Thu, 5 Jul 2018 19:23:17 -0400
+Subject: [PATCH 5162/5725] drm/amd/display: Handle HDR meta update as fast
+ update
+
+[Why]
+Vesa DPMS tool sends different HDR meta in OS flips without changing output
+parameters. We don't properly update HDR info frame:
+- we label HDR meta update as fast update
+- when updating HW info frame, we only do it if full update
+
+[How]
+It should still be fast update, so when doing HW infoframe update,
+do it always no matter the update type.
+Also, don't request passive flip for HDR meta update only without output
+transfer function or color space changed.
+
+Signed-off-by: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index e484676..5b079dc 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1372,6 +1372,13 @@ static void commit_planes_do_stream_update(struct dc *dc,
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing,
+ pipe_ctx->stream->periodic_fn_vsync_delta);
+
++ if (stream_update->hdr_static_metadata ||
++ stream_update->vrr_infopacket ||
++ stream_update->vsc_infopacket) {
++ resource_build_info_frame(pipe_ctx);
++ dc->hwss.update_info_frame(pipe_ctx);
++ }
++
+ /* Full fe update*/
+ if (update_type == UPDATE_TYPE_FAST)
+ continue;
+@@ -1398,13 +1405,6 @@ static void commit_planes_do_stream_update(struct dc *dc,
+ pipe_ctx->stream_res.abm->funcs->set_abm_level(
+ pipe_ctx->stream_res.abm, stream->abm_level);
+ }
+-
+- if (stream_update->hdr_static_metadata ||
+- stream_update->vrr_infopacket ||
+- stream_update->vsc_infopacket) {
+- resource_build_info_frame(pipe_ctx);
+- dc->hwss.update_info_frame(pipe_ctx);
+- }
+ }
+ }
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5163-drm-amd-display-HDR-dynamic-meta-should-be-treated-a.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5163-drm-amd-display-HDR-dynamic-meta-should-be-treated-a.patch
new file mode 100644
index 00000000..93695808
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5163-drm-amd-display-HDR-dynamic-meta-should-be-treated-a.patch
@@ -0,0 +1,46 @@
+From 055a09f77b51edfc8c149c50c4054ae1a84715d2 Mon Sep 17 00:00:00 2001
+From: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Date: Fri, 20 Jul 2018 15:44:08 -0400
+Subject: [PATCH 5163/5725] drm/amd/display: HDR dynamic meta should be treated
+ as stream update
+
+[Why]
+Recently we fixed HDR static meta using AFMT registers to be treated as
+fast stream update.
+Dynamic meta is still being treated as (full) surface update because it
+touches HUBP and it travels with pipe data.
+Here we change it to be (fast) stream update.
+Note, originally we also wanted to redesign here a bit, but without OS
+level support for true dynamic meta, it's left the same. We are simply
+using HW that can do dynamic meta to send HDR static meta, I still prefer
+keeping it in one static meta type then defining dynamic meta types to
+hold the same info. Once we know how OS interfaces look like, we can
+do proper design.
+
+[How]
+Move dyn meta update from update_hubp_dpp to commit_planes_do_stream_update
+
+Signed-off-by: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 5b079dc..b7e4800 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1372,7 +1372,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing,
+ pipe_ctx->stream->periodic_fn_vsync_delta);
+
+- if (stream_update->hdr_static_metadata ||
++ if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
+ stream_update->vrr_infopacket ||
+ stream_update->vsc_infopacket) {
+ resource_build_info_frame(pipe_ctx);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5164-drm-amd-display-Program-gamut-remap-as-part-of-strea.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5164-drm-amd-display-Program-gamut-remap-as-part-of-strea.patch
new file mode 100644
index 00000000..fc9f7f65
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5164-drm-amd-display-Program-gamut-remap-as-part-of-strea.patch
@@ -0,0 +1,85 @@
+From ee129f25bd43813bfbc94677c12441ba4513fcfc Mon Sep 17 00:00:00 2001
+From: SivapiriyanKumarasamy <sivapiriyan.kumarasamy@amd.com>
+Date: Thu, 26 Jul 2018 14:58:35 -0400
+Subject: [PATCH 5164/5725] drm/amd/display: Program gamut remap as part of
+ stream update
+
+Add gamut remap to dc_stream_update struct, and program if set when updating
+streams.
+
+Signed-off-by: SivapiriyanKumarasamy <sivapiriyan.kumarasamy@amd.com>
+Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 20 ++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/dc_stream.h | 8 ++++++++
+ 2 files changed, 28 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index b7e4800..16beef9 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -357,6 +357,23 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream,
+ opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
+ }
+
++bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
++{
++ int i = 0;
++ bool ret = false;
++ struct pipe_ctx *pipes;
++
++ for (i = 0; i < MAX_PIPES; i++) {
++ if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
++ pipes = &dc->current_state->res_ctx.pipe_ctx[i];
++ dc->hwss.program_gamut_remap(pipes);
++ ret = true;
++ }
++ }
++
++ return ret;
++}
++
+ void dc_stream_set_static_screen_events(struct dc *dc,
+ struct dc_stream_state **streams,
+ int num_streams,
+@@ -1379,6 +1396,9 @@ static void commit_planes_do_stream_update(struct dc *dc,
+ dc->hwss.update_info_frame(pipe_ctx);
+ }
+
++ if (stream_update->gamut_remap)
++ dc_stream_set_gamut_remap(dc, stream);
++
+ /* Full fe update*/
+ if (update_type == UPDATE_TYPE_FAST)
+ continue;
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+index 489fb04..c531d80 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+@@ -133,6 +133,11 @@ struct dc_stream_update {
+ struct dc_info_packet *vsc_infopacket;
+
+ bool *dpms_off;
++
++ struct colorspace_transform *gamut_remap;
++ enum dc_color_space *output_color_space;
++
++
+ };
+
+ bool dc_is_stream_unchanged(
+@@ -298,6 +303,9 @@ void dc_stream_set_static_screen_events(struct dc *dc,
+ void dc_stream_set_dither_option(struct dc_stream_state *stream,
+ enum dc_dither_option option);
+
++bool dc_stream_set_gamut_remap(struct dc *dc,
++ const struct dc_stream_state *stream);
++
+ bool dc_stream_get_crtc_position(struct dc *dc,
+ struct dc_stream_state **stream,
+ int num_streams,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5165-drm-amdgpu-Improve-a-error-message-and-fix-a-typo.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5165-drm-amdgpu-Improve-a-error-message-and-fix-a-typo.patch
new file mode 100644
index 00000000..714e899c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5165-drm-amdgpu-Improve-a-error-message-and-fix-a-typo.patch
@@ -0,0 +1,41 @@
+From 2acdfe12470a66a1f5b6524d787303f3564f2017 Mon Sep 17 00:00:00 2001
+From: Yong Zhao <yong.zhao@amd.com>
+Date: Thu, 5 Jul 2018 14:16:08 -0400
+Subject: [PATCH 5165/5725] drm/amdgpu: Improve a error message and fix a typo
+
+Change-Id: Ibeb387a098b5cb885ef554c4bcd0c23434bc3e4c
+Signed-off-by: Yong Zhao <yong.zhao@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+index c36a1ce..d2702b0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+@@ -63,7 +63,7 @@ enum {
+ enum {
+ ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL,
+ ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF,
+- ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENTION = 0x03000000,
++ ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000,
+ /* extend the mask to 26 bits in order to match the low address field */
+ ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6,
+ ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 4c0f2a3..59e1f44 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -1515,7 +1515,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
+ ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
+ is_invalid_userptr);
+ if (ret) {
+- pr_err("Failed to map radeon bo to gpuvm\n");
++ pr_err("Failed to map bo to gpuvm\n");
+ goto map_bo_to_gpuvm_failed;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5166-drm-amdgpu-Remove-VM-based-compute-profile-switching.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5166-drm-amdgpu-Remove-VM-based-compute-profile-switching.patch
new file mode 100644
index 00000000..f81cdbce
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5166-drm-amdgpu-Remove-VM-based-compute-profile-switching.patch
@@ -0,0 +1,196 @@
+From 72f96f62e18f8bec73fb67e735f5fb7ed00211c3 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Sun, 15 Jul 2018 00:57:02 -0400
+Subject: [PATCH 5166/5725] drm/amdgpu: Remove VM-based compute profile
+ switching
+
+VM-based compute profile switching was rejected upstream. It was
+replaced with queue-based switching and a dedicated kfd2kgd interface.
+
+Change-Id: I7c319bfe2533b29d8e5199dc2fc8fb001bbffdfd
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 9 ++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 53 ------------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 6 ---
+ 3 files changed, 5 insertions(+), 63 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 59e1f44..fb22f77 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -1094,6 +1094,10 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
+ struct amdgpu_vm *avm = &drv_priv->vm;
+ int ret;
+
++ /* Already a compute VM? */
++ if (avm->process_info)
++ return -EINVAL;
++
+ /* Convert VM into a compute VM */
+ ret = amdgpu_vm_make_compute(adev, avm);
+ if (ret)
+@@ -1115,7 +1119,7 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
+ struct amdkfd_process_info *process_info = vm->process_info;
+ struct amdgpu_bo *pd = vm->root.base.bo;
+
+- if (vm->vm_context != AMDGPU_VM_CONTEXT_COMPUTE)
++ if (!process_info)
+ return;
+
+ /* Release eviction fence from PD */
+@@ -1123,9 +1127,6 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
+ amdgpu_bo_fence(pd, NULL, false);
+ amdgpu_bo_unreserve(pd);
+
+- if (!process_info)
+- return;
+-
+ /* Update process info */
+ mutex_lock(&process_info->lock);
+ process_info->n_vms--;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 5de844d..deaef9f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2561,24 +2561,6 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
+ adev->vm_manager.fragment_size);
+ }
+
+-static void amdgpu_inc_compute_vms(struct amdgpu_device *adev)
+-{
+- /* Temporary use only the first VM manager */
+- unsigned int vmhub = 0; /*ring->funcs->vmhub;*/
+- struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+-
+- mutex_lock(&id_mgr->lock);
+- if ((adev->vm_manager.n_compute_vms++ == 0) &&
+- (!amdgpu_sriov_vf(adev))) {
+- /* First Compute VM: enable compute power profile */
+- if (adev->powerplay.pp_funcs &&
+- adev->powerplay.pp_funcs->switch_power_profile)
+- amdgpu_dpm_switch_power_profile(adev,
+- PP_SMC_POWER_PROFILE_COMPUTE, true);
+- }
+- mutex_unlock(&id_mgr->lock);
+-}
+-
+ /**
+ * amdgpu_vm_init - initialize a vm instance
+ *
+@@ -2691,11 +2673,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ INIT_KFIFO(vm->faults);
+ vm->fault_credit = 16;
+
+- vm->vm_context = vm_context;
+-
+- if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
+- amdgpu_inc_compute_vms(adev);
+-
+ return 0;
+
+ error_unreserve:
+@@ -2722,7 +2699,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ * page tables allocated yet.
+ *
+ * Changes the following VM parameters:
+- * - vm_context
+ * - use_cpu_for_update
+ * - pte_supports_ats
+ * - pasid (old PASID is released, because compute manages its own PASIDs)
+@@ -2744,13 +2720,6 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ return r;
+
+ /* Sanity checks */
+- if (vm->vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
+- /* Can happen if ioctl is interrupted by a signal after
+- * this function already completed. Just return success.
+- */
+- r = 0;
+- goto error;
+- }
+ if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
+ r = -EINVAL;
+ goto error;
+@@ -2768,7 +2737,6 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ }
+
+ /* Update VM state */
+- vm->vm_context = AMDGPU_VM_CONTEXT_COMPUTE;
+ vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
+ AMDGPU_VM_USE_CPU_FOR_COMPUTE);
+ vm->pte_support_ats = pte_support_ats;
+@@ -2787,8 +2755,6 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ vm->pasid = 0;
+ }
+
+- /* Count the new compute VM */
+- amdgpu_inc_compute_vms(adev);
+ /* Free the shadow bo for compute VM */
+ amdgpu_bo_unref(&vm->root.base.bo->shadow);
+
+@@ -2858,24 +2824,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+ }
+
+- if (vm->vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
+- struct amdgpu_vmid_mgr *id_mgr =
+- &adev->vm_manager.id_mgr[AMDGPU_GFXHUB];
+- mutex_lock(&id_mgr->lock);
+-
+- WARN(adev->vm_manager.n_compute_vms == 0, "Unbalanced number of Compute VMs");
+-
+- if ((--adev->vm_manager.n_compute_vms == 0) &&
+- (!amdgpu_sriov_vf(adev))) {
+- /* Last KFD VM: enable graphics power profile */
+- if (adev->powerplay.pp_funcs &&
+- adev->powerplay.pp_funcs->switch_power_profile)
+- amdgpu_dpm_switch_power_profile(adev,
+- PP_SMC_POWER_PROFILE_COMPUTE, false);
+- }
+- mutex_unlock(&id_mgr->lock);
+- }
+-
+ drm_sched_entity_fini(vm->entity.sched, &vm->entity);
+
+ if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
+@@ -2986,7 +2934,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
+
+ idr_init(&adev->vm_manager.pasid_idr);
+ spin_lock_init(&adev->vm_manager.pasid_lock);
+- adev->vm_manager.n_compute_vms = 0;
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+index b96bfed..3461300 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+@@ -236,9 +236,6 @@ struct amdgpu_vm {
+ /* Limit non-retry fault storms */
+ unsigned int fault_credit;
+
+- /* Whether this is a Compute or GFX Context */
+- int vm_context;
+-
+ /* Points to the KFD process VM info */
+ struct amdkfd_process_info *process_info;
+
+@@ -288,9 +285,6 @@ struct amdgpu_vm_manager {
+ */
+ struct idr pasid_idr;
+ spinlock_t pasid_lock;
+-
+- /* Number of Compute VMs, used for detecting Compute activity */
+- unsigned n_compute_vms;
+ };
+
+ #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5167-drm-amdgpu-hybrid-add-AMDGPU-VERSION.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5167-drm-amdgpu-hybrid-add-AMDGPU-VERSION.patch
new file mode 100644
index 00000000..f0278f66
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5167-drm-amdgpu-hybrid-add-AMDGPU-VERSION.patch
@@ -0,0 +1,38 @@
+From 096e342b55eb0ee4de15b6360658af1a61bffe1b Mon Sep 17 00:00:00 2001
+From: "Le.Ma" <Le.Ma@amd.com>
+Date: Thu, 14 Sep 2017 15:53:13 +0800
+Subject: [PATCH 5167/5725] drm/amdgpu: [hybrid] add AMDGPU VERSION
+
+Change-Id: Id211892a7812dd945d5449bbaa116af268f418c6
+Signed-off-by: Le.Ma <Le.Ma@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 549782d..66a7e9f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -76,7 +76,7 @@
+ #define KMS_DRIVER_MINOR 27
+ #define KMS_DRIVER_PATCHLEVEL 0
+
+-#define AMDGPU_VERSION "18.30.2.15"
++#define AMDGPU_VERSION "18.45.2.415"
+
+ int amdgpu_vram_limit = 0;
+ int amdgpu_vis_vram_limit = 0;
+@@ -1197,6 +1197,9 @@ static int __init amdgpu_init(void)
+
+ DRM_INFO("amdgpu kernel modesetting enabled.\n");
+ DRM_INFO("amdgpu version: %s\n", AMDGPU_VERSION);
++#if defined(DRM_VER) && defined(DRM_PATCH) && defined(DRM_SUB)
++ DRM_INFO("OS DRM version: %d.%d.%d\n", DRM_VER, DRM_PATCH, DRM_SUB);
++#endif
+ driver = &kms_driver;
+ pdriver = &amdgpu_kms_pci_driver;
+ driver->num_ioctls = amdgpu_max_kms_ioctl;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5168-drm-amdgpu-cleanup-HW_IP-query.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5168-drm-amdgpu-cleanup-HW_IP-query.patch
new file mode 100644
index 00000000..418db25f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5168-drm-amdgpu-cleanup-HW_IP-query.patch
@@ -0,0 +1,250 @@
+From 87965875a13c7e01cb3b7f4e358067c2b14808f5 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Wed, 1 Aug 2018 13:52:25 +0200
+Subject: [PATCH 5168/5725] drm/amdgpu: cleanup HW_IP query
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Move the code into a separate function.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 204 +++++++++++++++++---------------
+ 1 file changed, 110 insertions(+), 94 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index 98c0c38..e60cfbf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -260,6 +260,109 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
+ return 0;
+ }
+
++static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
++ struct drm_amdgpu_info *info,
++ struct drm_amdgpu_info_hw_ip *result)
++{
++ uint32_t ib_start_alignment = 0;
++ uint32_t ib_size_alignment = 0;
++ enum amd_ip_block_type type;
++ uint32_t ring_mask = 0;
++ unsigned int i, j;
++
++ if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
++ return -EINVAL;
++
++ switch (info->query_hw_ip.type) {
++ case AMDGPU_HW_IP_GFX:
++ type = AMD_IP_BLOCK_TYPE_GFX;
++ for (i = 0; i < adev->gfx.num_gfx_rings; i++)
++ ring_mask |= adev->gfx.gfx_ring[i].ready << i;
++ ib_start_alignment = 32;
++ ib_size_alignment = 32;
++ break;
++ case AMDGPU_HW_IP_COMPUTE:
++ type = AMD_IP_BLOCK_TYPE_GFX;
++ for (i = 0; i < adev->gfx.num_compute_rings; i++)
++ ring_mask |= adev->gfx.compute_ring[i].ready << i;
++ ib_start_alignment = 32;
++ ib_size_alignment = 32;
++ break;
++ case AMDGPU_HW_IP_DMA:
++ type = AMD_IP_BLOCK_TYPE_SDMA;
++ for (i = 0; i < adev->sdma.num_instances; i++)
++ ring_mask |= adev->sdma.instance[i].ring.ready << i;
++ ib_start_alignment = 256;
++ ib_size_alignment = 4;
++ break;
++ case AMDGPU_HW_IP_UVD:
++ type = AMD_IP_BLOCK_TYPE_UVD;
++ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
++ if (adev->uvd.harvest_config & (1 << i))
++ continue;
++ ring_mask |= adev->uvd.inst[i].ring.ready;
++ }
++ ib_start_alignment = 64;
++ ib_size_alignment = 64;
++ break;
++ case AMDGPU_HW_IP_VCE:
++ type = AMD_IP_BLOCK_TYPE_VCE;
++ for (i = 0; i < adev->vce.num_rings; i++)
++ ring_mask |= adev->vce.ring[i].ready << i;
++ ib_start_alignment = 4;
++ ib_size_alignment = 1;
++ break;
++ case AMDGPU_HW_IP_UVD_ENC:
++ type = AMD_IP_BLOCK_TYPE_UVD;
++ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
++ if (adev->uvd.harvest_config & (1 << i))
++ continue;
++ for (j = 0; j < adev->uvd.num_enc_rings; j++)
++ ring_mask |= adev->uvd.inst[i].ring_enc[j].ready << j;
++ }
++ ib_start_alignment = 64;
++ ib_size_alignment = 64;
++ break;
++ case AMDGPU_HW_IP_VCN_DEC:
++ type = AMD_IP_BLOCK_TYPE_VCN;
++ ring_mask = adev->vcn.ring_dec.ready;
++ ib_start_alignment = 16;
++ ib_size_alignment = 16;
++ break;
++ case AMDGPU_HW_IP_VCN_ENC:
++ type = AMD_IP_BLOCK_TYPE_VCN;
++ for (i = 0; i < adev->vcn.num_enc_rings; i++)
++ ring_mask |= adev->vcn.ring_enc[i].ready << i;
++ ib_start_alignment = 64;
++ ib_size_alignment = 1;
++ break;
++ case AMDGPU_HW_IP_VCN_JPEG:
++ type = AMD_IP_BLOCK_TYPE_VCN;
++ ring_mask = adev->vcn.ring_jpeg.ready;
++ ib_start_alignment = 16;
++ ib_size_alignment = 16;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ for (i = 0; i < adev->num_ip_blocks; i++)
++ if (adev->ip_blocks[i].version->type == type &&
++ adev->ip_blocks[i].status.valid)
++ break;
++
++ if (i == adev->num_ip_blocks)
++ return 0;
++
++ result->hw_ip_version_major = adev->ip_blocks[i].version->major;
++ result->hw_ip_version_minor = adev->ip_blocks[i].version->minor;
++ result->capabilities_flags = 0;
++ result->available_rings = ring_mask;
++ result->ib_start_alignment = ib_start_alignment;
++ result->ib_size_alignment = ib_size_alignment;
++ return 0;
++}
++
+ /*
+ * Userspace get information ioctl
+ */
+@@ -285,7 +388,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ struct drm_crtc *crtc;
+ uint32_t ui32 = 0;
+ uint64_t ui64 = 0;
+- int i, j, found;
++ int i, found;
+ int ui32_size = sizeof(ui32);
+
+ if (!info->return_size || !info->return_pointer)
+@@ -332,101 +435,14 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
+ case AMDGPU_INFO_HW_IP_INFO: {
+ struct drm_amdgpu_info_hw_ip ip = {};
+- enum amd_ip_block_type type;
+- uint32_t ring_mask = 0;
+- uint32_t ib_start_alignment = 0;
+- uint32_t ib_size_alignment = 0;
+-
+- if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
+- return -EINVAL;
++ int ret;
+
+- switch (info->query_hw_ip.type) {
+- case AMDGPU_HW_IP_GFX:
+- type = AMD_IP_BLOCK_TYPE_GFX;
+- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+- ring_mask |= adev->gfx.gfx_ring[i].ready << i;
+- ib_start_alignment = 32;
+- ib_size_alignment = 32;
+- break;
+- case AMDGPU_HW_IP_COMPUTE:
+- type = AMD_IP_BLOCK_TYPE_GFX;
+- for (i = 0; i < adev->gfx.num_compute_rings; i++)
+- ring_mask |= adev->gfx.compute_ring[i].ready << i;
+- ib_start_alignment = 32;
+- ib_size_alignment = 32;
+- break;
+- case AMDGPU_HW_IP_DMA:
+- type = AMD_IP_BLOCK_TYPE_SDMA;
+- for (i = 0; i < adev->sdma.num_instances; i++)
+- ring_mask |= adev->sdma.instance[i].ring.ready << i;
+- ib_start_alignment = 256;
+- ib_size_alignment = 4;
+- break;
+- case AMDGPU_HW_IP_UVD:
+- type = AMD_IP_BLOCK_TYPE_UVD;
+- for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+- if (adev->uvd.harvest_config & (1 << i))
+- continue;
+- ring_mask |= adev->uvd.inst[i].ring.ready;
+- }
+- ib_start_alignment = 64;
+- ib_size_alignment = 64;
+- break;
+- case AMDGPU_HW_IP_VCE:
+- type = AMD_IP_BLOCK_TYPE_VCE;
+- for (i = 0; i < adev->vce.num_rings; i++)
+- ring_mask |= adev->vce.ring[i].ready << i;
+- ib_start_alignment = 4;
+- ib_size_alignment = 1;
+- break;
+- case AMDGPU_HW_IP_UVD_ENC:
+- type = AMD_IP_BLOCK_TYPE_UVD;
+- for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+- if (adev->uvd.harvest_config & (1 << i))
+- continue;
+- for (j = 0; j < adev->uvd.num_enc_rings; j++)
+- ring_mask |= adev->uvd.inst[i].ring_enc[j].ready << j;
+- }
+- ib_start_alignment = 64;
+- ib_size_alignment = 64;
+- break;
+- case AMDGPU_HW_IP_VCN_DEC:
+- type = AMD_IP_BLOCK_TYPE_VCN;
+- ring_mask = adev->vcn.ring_dec.ready;
+- ib_start_alignment = 16;
+- ib_size_alignment = 16;
+- break;
+- case AMDGPU_HW_IP_VCN_ENC:
+- type = AMD_IP_BLOCK_TYPE_VCN;
+- for (i = 0; i < adev->vcn.num_enc_rings; i++)
+- ring_mask |= adev->vcn.ring_enc[i].ready << i;
+- ib_start_alignment = 64;
+- ib_size_alignment = 1;
+- break;
+- case AMDGPU_HW_IP_VCN_JPEG:
+- type = AMD_IP_BLOCK_TYPE_VCN;
+- ring_mask = adev->vcn.ring_jpeg.ready;
+- ib_start_alignment = 16;
+- ib_size_alignment = 16;
+- break;
+- default:
+- return -EINVAL;
+- }
++ ret = amdgpu_hw_ip_info(adev, info, &ip);
++ if (ret)
++ return ret;
+
+- for (i = 0; i < adev->num_ip_blocks; i++) {
+- if (adev->ip_blocks[i].version->type == type &&
+- adev->ip_blocks[i].status.valid) {
+- ip.hw_ip_version_major = adev->ip_blocks[i].version->major;
+- ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor;
+- ip.capabilities_flags = 0;
+- ip.available_rings = ring_mask;
+- ip.ib_start_alignment = ib_start_alignment;
+- ip.ib_size_alignment = ib_size_alignment;
+- break;
+- }
+- }
+- return copy_to_user(out, &ip,
+- min((size_t)size, sizeof(ip))) ? -EFAULT : 0;
++ ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip)));
++ return ret ? -EFAULT : 0;
+ }
+ case AMDGPU_INFO_HW_IP_COUNT: {
+ enum amd_ip_block_type type;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5169-Revert-drm-amdgpu-switch-firmware-path-for-SI-parts.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5169-Revert-drm-amdgpu-switch-firmware-path-for-SI-parts.patch
new file mode 100644
index 00000000..5982f60a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5169-Revert-drm-amdgpu-switch-firmware-path-for-SI-parts.patch
@@ -0,0 +1,189 @@
+From 288a3fa92d6143cb178c6475c803e0f3e4457e8c Mon Sep 17 00:00:00 2001
+From: Kevin Wang <Kevin1.Wang@amd.com>
+Date: Wed, 5 Sep 2018 13:06:31 +0800
+Subject: [PATCH 5169/5725] Revert "drm/amdgpu: switch firmware path for SI
+ parts"
+
+This reverts commit 95ab8a1338fc32958f1272e18947de65223c1f15.
+
+revert for dkms osdb build
+
+Change-Id: Ifd13dbb8b301227d72175b818e89cee20453a974
+Signed-off-by: Kevin Wang <Kevin1.Wang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 56 +++++++++++++++++------------------
+ drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 14 ++++-----
+ drivers/gpu/drm/amd/amdgpu/si_dpm.c | 22 +++++++-------
+ 3 files changed, 46 insertions(+), 46 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+index 4518021..0005f70 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+@@ -44,30 +44,30 @@ static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
+ static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
+ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev);
+
+-MODULE_FIRMWARE("amdgpu/tahiti_pfp.bin");
+-MODULE_FIRMWARE("amdgpu/tahiti_me.bin");
+-MODULE_FIRMWARE("amdgpu/tahiti_ce.bin");
+-MODULE_FIRMWARE("amdgpu/tahiti_rlc.bin");
+-
+-MODULE_FIRMWARE("amdgpu/pitcairn_pfp.bin");
+-MODULE_FIRMWARE("amdgpu/pitcairn_me.bin");
+-MODULE_FIRMWARE("amdgpu/pitcairn_ce.bin");
+-MODULE_FIRMWARE("amdgpu/pitcairn_rlc.bin");
+-
+-MODULE_FIRMWARE("amdgpu/verde_pfp.bin");
+-MODULE_FIRMWARE("amdgpu/verde_me.bin");
+-MODULE_FIRMWARE("amdgpu/verde_ce.bin");
+-MODULE_FIRMWARE("amdgpu/verde_rlc.bin");
+-
+-MODULE_FIRMWARE("amdgpu/oland_pfp.bin");
+-MODULE_FIRMWARE("amdgpu/oland_me.bin");
+-MODULE_FIRMWARE("amdgpu/oland_ce.bin");
+-MODULE_FIRMWARE("amdgpu/oland_rlc.bin");
+-
+-MODULE_FIRMWARE("amdgpu/hainan_pfp.bin");
+-MODULE_FIRMWARE("amdgpu/hainan_me.bin");
+-MODULE_FIRMWARE("amdgpu/hainan_ce.bin");
+-MODULE_FIRMWARE("amdgpu/hainan_rlc.bin");
++MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
++MODULE_FIRMWARE("radeon/tahiti_me.bin");
++MODULE_FIRMWARE("radeon/tahiti_ce.bin");
++MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
++
++MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
++MODULE_FIRMWARE("radeon/pitcairn_me.bin");
++MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
++MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
++
++MODULE_FIRMWARE("radeon/verde_pfp.bin");
++MODULE_FIRMWARE("radeon/verde_me.bin");
++MODULE_FIRMWARE("radeon/verde_ce.bin");
++MODULE_FIRMWARE("radeon/verde_rlc.bin");
++
++MODULE_FIRMWARE("radeon/oland_pfp.bin");
++MODULE_FIRMWARE("radeon/oland_me.bin");
++MODULE_FIRMWARE("radeon/oland_ce.bin");
++MODULE_FIRMWARE("radeon/oland_rlc.bin");
++
++MODULE_FIRMWARE("radeon/hainan_pfp.bin");
++MODULE_FIRMWARE("radeon/hainan_me.bin");
++MODULE_FIRMWARE("radeon/hainan_ce.bin");
++MODULE_FIRMWARE("radeon/hainan_rlc.bin");
+
+ static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev);
+ static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
+@@ -335,7 +335,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
+ default: BUG();
+ }
+
+- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+@@ -346,7 +346,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
+ adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+@@ -357,7 +357,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
+ adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+@@ -368,7 +368,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
+ adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+ err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+index 8fc37d7..a5077a9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+@@ -41,11 +41,11 @@ static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
+ static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
+ static int gmc_v6_0_wait_for_idle(void *handle);
+
+-MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
+-MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
+-MODULE_FIRMWARE("amdgpu/verde_mc.bin");
+-MODULE_FIRMWARE("amdgpu/oland_mc.bin");
+-MODULE_FIRMWARE("amdgpu/si58_mc.bin");
++MODULE_FIRMWARE("radeon/tahiti_mc.bin");
++MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
++MODULE_FIRMWARE("radeon/verde_mc.bin");
++MODULE_FIRMWARE("radeon/oland_mc.bin");
++MODULE_FIRMWARE("radeon/si58_mc.bin");
+
+ #define MC_SEQ_MISC0__MT__MASK 0xf0000000
+ #define MC_SEQ_MISC0__MT__GDDR1 0x10000000
+@@ -134,9 +134,9 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
+ is_58_fw = true;
+
+ if (is_58_fw)
+- snprintf(fw_name, sizeof(fw_name), "amdgpu/si58_mc.bin");
++ snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
+ else
+- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+index a32f6f6..996a4e5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+@@ -56,16 +56,16 @@
+
+ #define BIOS_SCRATCH_4 0x5cd
+
+-MODULE_FIRMWARE("amdgpu/tahiti_smc.bin");
+-MODULE_FIRMWARE("amdgpu/pitcairn_smc.bin");
+-MODULE_FIRMWARE("amdgpu/pitcairn_k_smc.bin");
+-MODULE_FIRMWARE("amdgpu/verde_smc.bin");
+-MODULE_FIRMWARE("amdgpu/verde_k_smc.bin");
+-MODULE_FIRMWARE("amdgpu/oland_smc.bin");
+-MODULE_FIRMWARE("amdgpu/oland_k_smc.bin");
+-MODULE_FIRMWARE("amdgpu/hainan_smc.bin");
+-MODULE_FIRMWARE("amdgpu/hainan_k_smc.bin");
+-MODULE_FIRMWARE("amdgpu/banks_k_2_smc.bin");
++MODULE_FIRMWARE("radeon/tahiti_smc.bin");
++MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
++MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
++MODULE_FIRMWARE("radeon/verde_smc.bin");
++MODULE_FIRMWARE("radeon/verde_k_smc.bin");
++MODULE_FIRMWARE("radeon/oland_smc.bin");
++MODULE_FIRMWARE("radeon/oland_k_smc.bin");
++MODULE_FIRMWARE("radeon/hainan_smc.bin");
++MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
++MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
+
+ static const struct amd_pm_funcs si_dpm_funcs;
+
+@@ -7663,7 +7663,7 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
+ default: BUG();
+ }
+
+- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5170-Revert-drm-amdgpu-switch-firmware-path-for-CIK-parts.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5170-Revert-drm-amdgpu-switch-firmware-path-for-CIK-parts.patch
new file mode 100644
index 00000000..ab6f4a7f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5170-Revert-drm-amdgpu-switch-firmware-path-for-CIK-parts.patch
@@ -0,0 +1,316 @@
+From 7934b759fa9068da93d2eee95d3bdece93817fff Mon Sep 17 00:00:00 2001
+From: Kevin Wang <Kevin1.Wang@amd.com>
+Date: Wed, 5 Sep 2018 13:07:00 +0800
+Subject: [PATCH 5170/5725] Revert "drm/amdgpu: switch firmware path for CIK
+ parts (v2)"
+
+This reverts commit 88794c4bbf324088c17c1d266613fcd02175b29c.
+
+revert for dkms osdb build
+
+Change-Id: I0f5e1d84166518dcac90bfea412f4ec12f25582d
+Signed-off-by: Kevin Wang <Kevin1.Wang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | 8 ++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 10 ++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 10 ++---
+ drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 10 ++---
+ drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 24 +++++------
+ drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 72 ++++++++++++++++-----------------
+ drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 9 +++--
+ 7 files changed, 73 insertions(+), 70 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+index 693ec5e..e950730 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+@@ -314,17 +314,17 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
+ (adev->pdev->revision == 0x81) ||
+ (adev->pdev->device == 0x665f)) {
+ info->is_kicker = true;
+- strcpy(fw_name, "amdgpu/bonaire_k_smc.bin");
++ strcpy(fw_name, "radeon/bonaire_k_smc.bin");
+ } else {
+- strcpy(fw_name, "amdgpu/bonaire_smc.bin");
++ strcpy(fw_name, "radeon/bonaire_smc.bin");
+ }
+ break;
+ case CHIP_HAWAII:
+ if (adev->pdev->revision == 0x80) {
+ info->is_kicker = true;
+- strcpy(fw_name, "amdgpu/hawaii_k_smc.bin");
++ strcpy(fw_name, "radeon/hawaii_k_smc.bin");
+ } else {
+- strcpy(fw_name, "amdgpu/hawaii_smc.bin");
++ strcpy(fw_name, "radeon/hawaii_smc.bin");
+ }
+ break;
+ case CHIP_TOPAZ:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index 83c6e71..dc6d0f5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -53,11 +53,11 @@
+
+ /* Firmware Names */
+ #ifdef CONFIG_DRM_AMDGPU_CIK
+-#define FIRMWARE_BONAIRE "amdgpu/bonaire_uvd.bin"
+-#define FIRMWARE_KABINI "amdgpu/kabini_uvd.bin"
+-#define FIRMWARE_KAVERI "amdgpu/kaveri_uvd.bin"
+-#define FIRMWARE_HAWAII "amdgpu/hawaii_uvd.bin"
+-#define FIRMWARE_MULLINS "amdgpu/mullins_uvd.bin"
++#define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin"
++#define FIRMWARE_KABINI "radeon/kabini_uvd.bin"
++#define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin"
++#define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin"
++#define FIRMWARE_MULLINS "radeon/mullins_uvd.bin"
+ #endif
+ #define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin"
+ #define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin"
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+index 7c23719..6ebf95a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+@@ -40,11 +40,11 @@
+
+ /* Firmware Names */
+ #ifdef CONFIG_DRM_AMDGPU_CIK
+-#define FIRMWARE_BONAIRE "amdgpu/bonaire_vce.bin"
+-#define FIRMWARE_KABINI "amdgpu/kabini_vce.bin"
+-#define FIRMWARE_KAVERI "amdgpu/kaveri_vce.bin"
+-#define FIRMWARE_HAWAII "amdgpu/hawaii_vce.bin"
+-#define FIRMWARE_MULLINS "amdgpu/mullins_vce.bin"
++#define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin"
++#define FIRMWARE_KABINI "radeon/kabini_vce.bin"
++#define FIRMWARE_KAVERI "radeon/kaveri_vce.bin"
++#define FIRMWARE_HAWAII "radeon/hawaii_vce.bin"
++#define FIRMWARE_MULLINS "radeon/mullins_vce.bin"
+ #endif
+ #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
+ #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
+diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+index 9bf0b24..2b41ed7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+@@ -49,10 +49,10 @@
+ #include "gmc/gmc_7_1_d.h"
+ #include "gmc/gmc_7_1_sh_mask.h"
+
+-MODULE_FIRMWARE("amdgpu/bonaire_smc.bin");
+-MODULE_FIRMWARE("amdgpu/bonaire_k_smc.bin");
+-MODULE_FIRMWARE("amdgpu/hawaii_smc.bin");
+-MODULE_FIRMWARE("amdgpu/hawaii_k_smc.bin");
++MODULE_FIRMWARE("radeon/bonaire_smc.bin");
++MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
++MODULE_FIRMWARE("radeon/hawaii_smc.bin");
++MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
+
+ #define MC_CG_ARB_FREQ_F0 0x0a
+ #define MC_CG_ARB_FREQ_F1 0x0b
+@@ -5814,7 +5814,7 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev)
+ default: BUG();
+ }
+
+- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+index e1b56e7..1543e7e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
++++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+@@ -54,16 +54,16 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
+ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
+ static int cik_sdma_soft_reset(void *handle);
+
+-MODULE_FIRMWARE("amdgpu/bonaire_sdma.bin");
+-MODULE_FIRMWARE("amdgpu/bonaire_sdma1.bin");
+-MODULE_FIRMWARE("amdgpu/hawaii_sdma.bin");
+-MODULE_FIRMWARE("amdgpu/hawaii_sdma1.bin");
+-MODULE_FIRMWARE("amdgpu/kaveri_sdma.bin");
+-MODULE_FIRMWARE("amdgpu/kaveri_sdma1.bin");
+-MODULE_FIRMWARE("amdgpu/kabini_sdma.bin");
+-MODULE_FIRMWARE("amdgpu/kabini_sdma1.bin");
+-MODULE_FIRMWARE("amdgpu/mullins_sdma.bin");
+-MODULE_FIRMWARE("amdgpu/mullins_sdma1.bin");
++MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
++MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
++MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
++MODULE_FIRMWARE("radeon/hawaii_sdma1.bin");
++MODULE_FIRMWARE("radeon/kaveri_sdma.bin");
++MODULE_FIRMWARE("radeon/kaveri_sdma1.bin");
++MODULE_FIRMWARE("radeon/kabini_sdma.bin");
++MODULE_FIRMWARE("radeon/kabini_sdma1.bin");
++MODULE_FIRMWARE("radeon/mullins_sdma.bin");
++MODULE_FIRMWARE("radeon/mullins_sdma1.bin");
+
+ u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
+
+@@ -132,9 +132,9 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev)
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (i == 0)
+- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
+ else
+- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name);
+ err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+index 46dfa24..703803f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+@@ -57,36 +57,36 @@ static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
+ static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
+ static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
+
+-MODULE_FIRMWARE("amdgpu/bonaire_pfp.bin");
+-MODULE_FIRMWARE("amdgpu/bonaire_me.bin");
+-MODULE_FIRMWARE("amdgpu/bonaire_ce.bin");
+-MODULE_FIRMWARE("amdgpu/bonaire_rlc.bin");
+-MODULE_FIRMWARE("amdgpu/bonaire_mec.bin");
+-
+-MODULE_FIRMWARE("amdgpu/hawaii_pfp.bin");
+-MODULE_FIRMWARE("amdgpu/hawaii_me.bin");
+-MODULE_FIRMWARE("amdgpu/hawaii_ce.bin");
+-MODULE_FIRMWARE("amdgpu/hawaii_rlc.bin");
+-MODULE_FIRMWARE("amdgpu/hawaii_mec.bin");
+-
+-MODULE_FIRMWARE("amdgpu/kaveri_pfp.bin");
+-MODULE_FIRMWARE("amdgpu/kaveri_me.bin");
+-MODULE_FIRMWARE("amdgpu/kaveri_ce.bin");
+-MODULE_FIRMWARE("amdgpu/kaveri_rlc.bin");
+-MODULE_FIRMWARE("amdgpu/kaveri_mec.bin");
+-MODULE_FIRMWARE("amdgpu/kaveri_mec2.bin");
+-
+-MODULE_FIRMWARE("amdgpu/kabini_pfp.bin");
+-MODULE_FIRMWARE("amdgpu/kabini_me.bin");
+-MODULE_FIRMWARE("amdgpu/kabini_ce.bin");
+-MODULE_FIRMWARE("amdgpu/kabini_rlc.bin");
+-MODULE_FIRMWARE("amdgpu/kabini_mec.bin");
+-
+-MODULE_FIRMWARE("amdgpu/mullins_pfp.bin");
+-MODULE_FIRMWARE("amdgpu/mullins_me.bin");
+-MODULE_FIRMWARE("amdgpu/mullins_ce.bin");
+-MODULE_FIRMWARE("amdgpu/mullins_rlc.bin");
+-MODULE_FIRMWARE("amdgpu/mullins_mec.bin");
++MODULE_FIRMWARE("radeon/bonaire_pfp.bin");
++MODULE_FIRMWARE("radeon/bonaire_me.bin");
++MODULE_FIRMWARE("radeon/bonaire_ce.bin");
++MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
++MODULE_FIRMWARE("radeon/bonaire_mec.bin");
++
++MODULE_FIRMWARE("radeon/hawaii_pfp.bin");
++MODULE_FIRMWARE("radeon/hawaii_me.bin");
++MODULE_FIRMWARE("radeon/hawaii_ce.bin");
++MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
++MODULE_FIRMWARE("radeon/hawaii_mec.bin");
++
++MODULE_FIRMWARE("radeon/kaveri_pfp.bin");
++MODULE_FIRMWARE("radeon/kaveri_me.bin");
++MODULE_FIRMWARE("radeon/kaveri_ce.bin");
++MODULE_FIRMWARE("radeon/kaveri_rlc.bin");
++MODULE_FIRMWARE("radeon/kaveri_mec.bin");
++MODULE_FIRMWARE("radeon/kaveri_mec2.bin");
++
++MODULE_FIRMWARE("radeon/kabini_pfp.bin");
++MODULE_FIRMWARE("radeon/kabini_me.bin");
++MODULE_FIRMWARE("radeon/kabini_ce.bin");
++MODULE_FIRMWARE("radeon/kabini_rlc.bin");
++MODULE_FIRMWARE("radeon/kabini_mec.bin");
++
++MODULE_FIRMWARE("radeon/mullins_pfp.bin");
++MODULE_FIRMWARE("radeon/mullins_me.bin");
++MODULE_FIRMWARE("radeon/mullins_ce.bin");
++MODULE_FIRMWARE("radeon/mullins_rlc.bin");
++MODULE_FIRMWARE("radeon/mullins_mec.bin");
+
+ static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
+ {
+@@ -925,7 +925,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
+ default: BUG();
+ }
+
+- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+@@ -933,7 +933,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
+ if (err)
+ goto out;
+
+- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+@@ -941,7 +941,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
+ if (err)
+ goto out;
+
+- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+@@ -949,7 +949,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
+ if (err)
+ goto out;
+
+- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+@@ -958,7 +958,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
+ goto out;
+
+ if (adev->asic_type == CHIP_KAVERI) {
+- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+@@ -967,7 +967,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
+ goto out;
+ }
+
+- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+ err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+index 5f854e5..46ce9f0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+@@ -50,8 +50,8 @@ static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
+ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
+ static int gmc_v7_0_wait_for_idle(void *handle);
+
+-MODULE_FIRMWARE("amdgpu/bonaire_mc.bin");
+-MODULE_FIRMWARE("amdgpu/hawaii_mc.bin");
++MODULE_FIRMWARE("radeon/bonaire_mc.bin");
++MODULE_FIRMWARE("radeon/hawaii_mc.bin");
+ MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
+
+ static const u32 golden_settings_iceland_a11[] =
+@@ -150,7 +150,10 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
+ default: BUG();
+ }
+
+- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
++ if (adev->asic_type == CHIP_TOPAZ)
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
++ else
++ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+
+ err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
+ if (err)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5171-drm-amdgpu-Refine-function-name-and-function-args.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5171-drm-amdgpu-Refine-function-name-and-function-args.patch
new file mode 100644
index 00000000..551c9b6d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5171-drm-amdgpu-Refine-function-name-and-function-args.patch
@@ -0,0 +1,126 @@
+From 9c3a1fbdfbf9e2dc7be4a0654d8a3cdb04ffe659 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Tue, 14 Aug 2018 13:32:30 +0800
+Subject: [PATCH 5171/5725] drm/amdgpu: Refine function name and function args
+
+There are no any logical changes here.
+
+1. change function names:
+ amdgpu_device_ip_late_set_pg/cg_state to
+ amdgpu_device_set_pg/cg_state.
+2. add a function argument cg/pg_state, so
+ we can enable/disable cg/pg through those functions
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 39 +++++++++++++++---------------
+ 1 file changed, 19 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index b798801..3b915c7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1740,24 +1740,26 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
+ }
+
+ /**
+- * amdgpu_device_ip_late_set_cg_state - late init for clockgating
++ * amdgpu_device_set_cg_state - set clockgating for amdgpu device
+ *
+ * @adev: amdgpu_device pointer
+ *
+- * Late initialization pass enabling clockgating for hardware IPs.
+ * The list of all the hardware IPs that make up the asic is walked and the
+- * set_clockgating_state callbacks are run. This stage is run late
+- * in the init process.
++ * set_clockgating_state callbacks are run.
++ * Late initialization pass enabling clockgating for hardware IPs.
++ * Fini or suspend, pass disabling clockgating for hardware IPs.
+ * Returns 0 on success, negative error code on failure.
+ */
+-static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
++static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
++ enum amd_clockgating_state state)
+ {
+- int i = 0, r;
++ int i, j, r;
+
+ if (amdgpu_emu_mode == 1)
+ return 0;
+
+- for (i = 0; i < adev->num_ip_blocks; i++) {
++ for (j = 0; j < adev->num_ip_blocks; j++) {
++ i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+ /* skip CG for VCE/UVD, it's handled specially */
+@@ -1767,7 +1769,7 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
+ adev->ip_blocks[i].version->funcs->set_clockgating_state) {
+ /* enable clockgating to save power */
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+- AMD_CG_STATE_GATE);
++ state);
+ if (r) {
+ DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+@@ -1779,14 +1781,15 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
+ return 0;
+ }
+
+-static int amdgpu_device_ip_late_set_pg_state(struct amdgpu_device *adev)
++static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
+ {
+- int i = 0, r;
++ int i, j, r;
+
+ if (amdgpu_emu_mode == 1)
+ return 0;
+
+- for (i = 0; i < adev->num_ip_blocks; i++) {
++ for (j = 0; j < adev->num_ip_blocks; j++) {
++ i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+ /* skip CG for VCE/UVD, it's handled specially */
+@@ -1796,7 +1799,7 @@ static int amdgpu_device_ip_late_set_pg_state(struct amdgpu_device *adev)
+ adev->ip_blocks[i].version->funcs->set_powergating_state) {
+ /* enable powergating to save power */
+ r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
+- AMD_PG_STATE_GATE);
++ state);
+ if (r) {
+ DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+@@ -1837,8 +1840,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
+ }
+ }
+
+- amdgpu_device_ip_late_set_cg_state(adev);
+- amdgpu_device_ip_late_set_pg_state(adev);
++ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
++ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
+
+ queue_delayed_work(system_wq, &adev->late_init_work,
+ msecs_to_jiffies(AMDGPU_RESUME_MS));
+@@ -1955,13 +1958,9 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
+ }
+
+ /**
+- * amdgpu_device_ip_late_init_func_handler - work handler for clockgating
+- *
+- * @work: work_struct
++ * amdgpu_device_ip_late_init_func_handler - work handler for ib test
+ *
+- * Work handler for amdgpu_device_ip_late_set_cg_state. We put the
+- * clockgating setup into a worker thread to speed up driver init and
+- * resume from suspend.
++ * @work: work_struct.
+ */
+ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
+ {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5172-drm-amdgpu-Set-power-ungate-state-when-suspend-fini.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5172-drm-amdgpu-Set-power-ungate-state-when-suspend-fini.patch
new file mode 100644
index 00000000..0bc84008
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5172-drm-amdgpu-Set-power-ungate-state-when-suspend-fini.patch
@@ -0,0 +1,126 @@
+From 12f151885433dad62234c0e9e7324da35430e9b7 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Tue, 14 Aug 2018 16:54:15 +0800
+Subject: [PATCH 5172/5725] drm/amdgpu: Set power ungate state when
+ suspend/fini
+
+Unify to set power ungate state at the begin of suspend/fini.
+Remove the workaround code for gfx off feature in
+amdgpu_device.c.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 11 +++++------
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 4 ----
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 19 ++++++++++++-------
+ 3 files changed, 17 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 3b915c7..cb99579 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1867,6 +1867,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
+ int i, r;
+
+ amdgpu_amdkfd_device_fini(adev);
++
++ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+ /* need to disable SMC first */
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.hw)
+@@ -1881,8 +1883,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+- amdgpu_gfx_off_ctrl(adev, false);
+- cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
++
+ r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
+ /* XXX handle errors */
+ if (r) {
+@@ -2004,6 +2005,8 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_request_full_gpu(adev, false);
+
++ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
++
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+@@ -2059,10 +2062,6 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
+ DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
+ }
+
+- /* call smu to disable gfx off feature first when suspend */
+- amdgpu_gfx_off_ctrl(adev, false);
+- cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
+-
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 96d517e..6f79369 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -5163,10 +5163,6 @@ static int gfx_v8_0_hw_fini(void *handle)
+ gfx_v8_0_cp_enable(adev, false);
+ gfx_v8_0_rlc_stop(adev);
+
+- amdgpu_device_ip_set_powergating_state(adev,
+- AMD_IP_BLOCK_TYPE_GFX,
+- AMD_PG_STATE_UNGATE);
+-
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 6131db4..2b78ec4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -3247,9 +3247,6 @@ static int gfx_v9_0_hw_fini(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
+
+- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
+- AMD_PG_STATE_UNGATE);
+-
+ amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
+ amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+
+@@ -3768,6 +3765,10 @@ static int gfx_v9_0_set_powergating_state(void *handle,
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
++ if (!enable) {
++ amdgpu_gfx_off_ctrl(adev, false);
++ cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
++ }
+ if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
+ gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
+ gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
+@@ -3787,12 +3788,16 @@ static int gfx_v9_0_set_powergating_state(void *handle,
+ /* update mgcg state */
+ gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
+
+- /* set gfx off through smu */
+- amdgpu_gfx_off_ctrl(adev, true);
++ if (enable)
++ amdgpu_gfx_off_ctrl(adev, true);
+ break;
+ case CHIP_VEGA12:
+- /* set gfx off through smu */
+- amdgpu_gfx_off_ctrl(adev, true);
++ if (!enable) {
++ amdgpu_gfx_off_ctrl(adev, false);
++ cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
++ } else {
++ amdgpu_gfx_off_ctrl(adev, true);
++ }
+ break;
+ default:
+ break;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5173-drm-amdgpu-Set-clock-ungate-state-when-suspend-fini.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5173-drm-amdgpu-Set-clock-ungate-state-when-suspend-fini.patch
new file mode 100644
index 00000000..fe5f8256
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5173-drm-amdgpu-Set-clock-ungate-state-when-suspend-fini.patch
@@ -0,0 +1,131 @@
+From 63d33c319ab6f97cbf22e86bba32ad46c33e1b3b Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Tue, 14 Aug 2018 17:28:46 +0800
+Subject: [PATCH 5173/5725] drm/amdgpu: Set clock ungate state when
+ suspend/fini
+
+After set power ungate state, set clock ungate state
+before when suspend or fini.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 56 +++---------------------------
+ 1 file changed, 5 insertions(+), 51 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index cb99579..b5d0c9c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1750,6 +1750,7 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
+ * Fini or suspend, pass disabling clockgating for hardware IPs.
+ * Returns 0 on success, negative error code on failure.
+ */
++
+ static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
+ enum amd_clockgating_state state)
+ {
+@@ -1869,21 +1870,13 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
+ amdgpu_amdkfd_device_fini(adev);
+
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
++ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
++
+ /* need to disable SMC first */
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.hw)
+ continue;
+- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC &&
+- adev->ip_blocks[i].version->funcs->set_clockgating_state) {
+- /* ungate blocks before hw fini so that we can shutdown the blocks safely */
+- r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+- AMD_CG_STATE_UNGATE);
+- if (r) {
+- DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+- adev->ip_blocks[i].version->funcs->name, r);
+- return r;
+- }
+-
++ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
+ r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
+ /* XXX handle errors */
+ if (r) {
+@@ -1899,20 +1892,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
+ if (!adev->ip_blocks[i].status.hw)
+ continue;
+
+- if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
+- adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
+- adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
+- adev->ip_blocks[i].version->funcs->set_clockgating_state) {
+- /* ungate blocks before hw fini so that we can shutdown the blocks safely */
+- r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+- AMD_CG_STATE_UNGATE);
+- if (r) {
+- DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+- adev->ip_blocks[i].version->funcs->name, r);
+- return r;
+- }
+- }
+-
+ r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
+ /* XXX handle errors */
+ if (r) {
+@@ -2006,21 +1985,13 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
+ amdgpu_virt_request_full_gpu(adev, false);
+
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
++ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+ /* displays are handled separately */
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
+- /* ungate blocks so that suspend can properly shut them down */
+- if (adev->ip_blocks[i].version->funcs->set_clockgating_state) {
+- r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+- AMD_CG_STATE_UNGATE);
+- if (r) {
+- DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+- adev->ip_blocks[i].version->funcs->name, r);
+- }
+- }
+ /* XXX handle errors */
+ r = adev->ip_blocks[i].version->funcs->suspend(adev);
+ /* XXX handle errors */
+@@ -2055,29 +2026,12 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_request_full_gpu(adev, false);
+
+- /* ungate SMC block first */
+- r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
+- AMD_CG_STATE_UNGATE);
+- if (r) {
+- DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
+- }
+-
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+ /* displays are handled in phase1 */
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
+ continue;
+- /* ungate blocks so that suspend can properly shut them down */
+- if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC &&
+- adev->ip_blocks[i].version->funcs->set_clockgating_state) {
+- r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+- AMD_CG_STATE_UNGATE);
+- if (r) {
+- DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+- adev->ip_blocks[i].version->funcs->name, r);
+- }
+- }
+ /* XXX handle errors */
+ r = adev->ip_blocks[i].version->funcs->suspend(adev);
+ /* XXX handle errors */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5174-drm-amdgpu-fix-VM-size-reporting-on-Raven.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5174-drm-amdgpu-fix-VM-size-reporting-on-Raven.patch
new file mode 100644
index 00000000..1343a09f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5174-drm-amdgpu-fix-VM-size-reporting-on-Raven.patch
@@ -0,0 +1,36 @@
+From 8be3d3ff43d978593a398551cb1e4005463c6782 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Wed, 15 Aug 2018 14:04:47 +0200
+Subject: [PATCH 5174/5725] drm/amdgpu: fix VM size reporting on Raven
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Raven doesn't have an VCE block and so also no buggy VCE firmware.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index e60cfbf..1c45f1b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -649,7 +649,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ vm_size -= AMDGPU_VA_RESERVED_SIZE;
+
+ /* Older VCE FW versions are buggy and can handle only 40bits */
+- if (adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
++ if (adev->vce.fw_version &&
++ adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
+ vm_size = min(vm_size, 1ULL << 40);
+
+ dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5175-drm-amdgpu-Do-not-evict-VRAM-on-APUs-with-disabled-H.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5175-drm-amdgpu-Do-not-evict-VRAM-on-APUs-with-disabled-H.patch
new file mode 100644
index 00000000..8929d7f1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5175-drm-amdgpu-Do-not-evict-VRAM-on-APUs-with-disabled-H.patch
@@ -0,0 +1,45 @@
+From cbb6ae4260d94fce545ea582a0de7e0b084756f9 Mon Sep 17 00:00:00 2001
+From: Paul Menzel <pmenzel@molgen.mpg.de>
+Date: Wed, 25 Jul 2018 12:54:19 +0200
+Subject: [PATCH 5175/5725] drm/amdgpu: Do not evict VRAM on APUs with disabled
+ HIBERNATE
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Improve commit d796d844 (drm/radeon/kms: make hibernate work on IGPs) to
+only migrate VRAM objects if the Linux kernel is actually built with
+support for hibernation (suspend to disk).
+
+The better solution is to get the information, if this is suspend or
+hibernate, from `amdgpu_device_suspend()`, but that’s more involved, so
+apply the simple solution first.
+
+Link: https://bugs.freedesktop.org/show_bug.cgi?id=107277
+Signed-off-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 4c26b94..6f86f44 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -1060,10 +1060,12 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
+ int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
+ {
+ /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
+- if (0 && (adev->flags & AMD_IS_APU)) {
++#ifndef CONFIG_HIBERNATION
++ if (adev->flags & AMD_IS_APU) {
+ /* Useless to evict on IGP chips */
+ return 0;
+ }
++#endif
+ return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5176-drm-amd-display-Do-not-retain-link-settings.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5176-drm-amd-display-Do-not-retain-link-settings.patch
new file mode 100644
index 00000000..86a65393
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5176-drm-amd-display-Do-not-retain-link-settings.patch
@@ -0,0 +1,53 @@
+From 45be44a0f066d38f9fa9075cd161dc3102f49eee Mon Sep 17 00:00:00 2001
+From: Samson Tam <Samson.Tam@amd.com>
+Date: Mon, 30 Jul 2018 12:22:35 -0400
+Subject: [PATCH 5176/5725] drm/amd/display: Do not retain link settings
+
+Do not retrain link settings if lane count and link rate are both
+unknown. Causes driver to be stuck reading VBIOS register after
+removing emulated connection.
+
+Signed-off-by: Samson Tam <Samson.Tam@amd.com>
+Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 15 +++++++++++++--
+ 1 file changed, 13 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 16beef9..8f4a288 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -52,6 +52,8 @@
+ #include "dm_helpers.h"
+ #include "mem_input.h"
+ #include "hubp.h"
++
++#include "dc_link_dp.h"
+ #define DC_LOGGER \
+ dc->ctx->logger
+
+@@ -434,8 +436,17 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link)
+ {
+- link->preferred_link_setting = *link_setting;
+- dp_retrain_link_dp_test(link, link_setting, false);
++ struct dc_link_settings store_settings = *link_setting;
++ struct dc_stream_state *link_stream =
++ link->dc->current_state->res_ctx.pipe_ctx[0].stream;
++
++ link->preferred_link_setting = store_settings;
++ if (link_stream)
++ decide_link_settings(link_stream, &store_settings);
++
++ if ((store_settings.lane_count != LANE_COUNT_UNKNOWN) &&
++ (store_settings.link_rate != LINK_RATE_UNKNOWN))
++ dp_retrain_link_dp_test(link, &store_settings, false);
+ }
+
+ void dc_link_enable_hpd(const struct dc_link *link)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5177-drm-amd-display-Create-new-i2c-resource.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5177-drm-amd-display-Create-new-i2c-resource.patch
new file mode 100644
index 00000000..72b3f9e8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5177-drm-amd-display-Create-new-i2c-resource.patch
@@ -0,0 +1,2888 @@
+From 2dfcd88b1cd363eab06fc6a4491d3aab35798e3b Mon Sep 17 00:00:00 2001
+From: David Francis <David.Francis@amd.com>
+Date: Mon, 23 Jul 2018 14:12:10 -0400
+Subject: [PATCH 5177/5725] drm/amd/display: Create new i2c resource
+
+[Why]
+I2C code did not match dc resource model and was generally
+unpleasant
+
+[How]
+Move code into new svelte dce_i2c files, replacing various i2c
+objects with two structs: dce_i2c_sw and dce_i2c_hw. Fully split
+sw and hw code paths. Remove all redundant declarations. Use
+address lists to distinguish between versions. Change dce80 code
+to newer register access macros.
+
+Signed-off-by: David Francis <David.Francis@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 9 +-
+ drivers/gpu/drm/amd/display/dc/bios/bios_parser.c | 10 +-
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 8 +-
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 4 +-
+ drivers/gpu/drm/amd/display/dc/dce/Makefile | 4 +-
+ drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c | 60 ++
+ drivers/gpu/drm/amd/display/dc/dce/dce_i2c.h | 71 ++
+ drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c | 951 +++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h | 335 ++++++++
+ drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c | 602 +++++++++++++
+ drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h | 57 ++
+ .../drm/amd/display/dc/dce100/dce100_resource.c | 51 +-
+ .../drm/amd/display/dc/dce110/dce110_resource.c | 51 +-
+ .../drm/amd/display/dc/dce112/dce112_resource.c | 51 +-
+ .../drm/amd/display/dc/dce120/dce120_resource.c | 65 +-
+ .../gpu/drm/amd/display/dc/dce80/dce80_resource.c | 99 +++
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 51 +-
+ drivers/gpu/drm/amd/display/dc/inc/core_types.h | 3 +
+ 18 files changed, 2452 insertions(+), 30 deletions(-)
+ create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c
+ create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_i2c.h
+ create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+ create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
+ create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
+ create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 3305d9d..2704c7f 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -71,8 +71,6 @@
+
+ #include "modules/inc/mod_freesync.h"
+
+-#include "i2caux_interface.h"
+-
+ /* basic init/fini API */
+ static int amdgpu_dm_init(struct amdgpu_device *adev);
+ static void amdgpu_dm_fini(struct amdgpu_device *adev);
+@@ -3893,9 +3891,9 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
+ cmd.payloads[i].data = msgs[i].buf;
+ }
+
+- if (dal_i2caux_submit_i2c_command(
+- ddc_service->ctx->i2caux,
+- ddc_service->ddc_pin,
++ if (dc_submit_i2c(
++ ddc_service->ctx->dc,
++ ddc_service->ddc_pin->hw_info.ddc_channel,
+ &cmd))
+ result = num;
+
+@@ -3931,6 +3929,7 @@ create_i2c(struct ddc_service *ddc_service,
+ snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
+ i2c_set_adapdata(&i2c->base, i2c);
+ i2c->ddc_service = ddc_service;
++ i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
+
+ return i2c;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+index be8a249..bfa5816 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+@@ -42,7 +42,7 @@
+ #include "bios_parser_interface.h"
+
+ #include "bios_parser_common.h"
+-/* TODO remove - only needed for default i2c speed */
++
+ #include "dc.h"
+
+ #define THREE_PERCENT_OF_10000 300
+@@ -2671,11 +2671,9 @@ static bool i2c_read(
+
+ cmd.payloads = payloads;
+ cmd.number_of_payloads = ARRAY_SIZE(payloads);
+-
+- /* TODO route this through drm i2c_adapter */
+- result = dal_i2caux_submit_i2c_command(
+- ddc->ctx->i2caux,
+- ddc,
++ result = dc_submit_i2c(
++ ddc->ctx->dc,
++ ddc->hw_info.ddc_channel,
+ &cmd);
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 8f4a288..152c7dd 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -54,6 +54,9 @@
+ #include "hubp.h"
+
+ #include "dc_link_dp.h"
++
++#include "dce/dce_i2c.h"
++
+ #define DC_LOGGER \
+ dc->ctx->logger
+
+@@ -1724,9 +1727,8 @@ bool dc_submit_i2c(
+
+ struct dc_link *link = dc->links[link_index];
+ struct ddc_service *ddc = link->ddc;
+-
+- return dal_i2caux_submit_i2c_command(
+- ddc->ctx->i2caux,
++ return dce_i2c_submit_command(
++ dc->res_pool,
+ ddc->ddc_pin,
+ cmd);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 7af0f31..f8b299c 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -1530,8 +1530,8 @@ static bool i2c_write(struct pipe_ctx *pipe_ctx,
+ payload.write = true;
+ cmd.payloads = &payload;
+
+- if (dc_submit_i2c(pipe_ctx->stream->ctx->dc,
+- pipe_ctx->stream->sink->link->link_index, &cmd))
++ if (dm_helpers_submit_i2c(pipe_ctx->stream->ctx,
++ pipe_ctx->stream->sink->link, &cmd))
+ return true;
+
+ return false;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
+index 67b0852..36ef8b1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
+@@ -7,8 +7,8 @@
+
+ DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
+ dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
+-dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o
+-
++dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
++dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o
+
+ AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c
+new file mode 100644
+index 0000000..35a7539
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c
+@@ -0,0 +1,60 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++#include "dce_i2c.h"
++#include "reg_helper.h"
++
++bool dce_i2c_submit_command(
++ struct resource_pool *pool,
++ struct ddc *ddc,
++ struct i2c_command *cmd)
++{
++ struct dce_i2c_hw *dce_i2c_hw;
++ struct dce_i2c_sw *dce_i2c_sw;
++
++ if (!ddc) {
++ BREAK_TO_DEBUGGER();
++ return false;
++ }
++
++ if (!cmd) {
++ BREAK_TO_DEBUGGER();
++ return false;
++ }
++
++ /* The software engine is only available on dce8 */
++ dce_i2c_sw = dce_i2c_acquire_i2c_sw_engine(pool, ddc);
++
++ if (!dce_i2c_sw) {
++ dce_i2c_hw = acquire_i2c_hw_engine(pool, ddc);
++
++ if (!dce_i2c_hw)
++ return false;
++
++ return dce_i2c_submit_command_hw(pool, ddc, cmd, dce_i2c_hw);
++ }
++
++ return dce_i2c_submit_command_sw(pool, ddc, cmd, dce_i2c_sw);
++
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.h
+new file mode 100644
+index 0000000..d655f89
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.h
+@@ -0,0 +1,71 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef __DCE_I2C_H__
++#define __DCE_I2C_H__
++
++#include "inc/core_types.h"
++#include "dce_i2c_hw.h"
++#include "dce_i2c_sw.h"
++
++enum dce_i2c_transaction_status {
++ DCE_I2C_TRANSACTION_STATUS_UNKNOWN = (-1L),
++ DCE_I2C_TRANSACTION_STATUS_SUCCEEDED,
++ DCE_I2C_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY,
++ DCE_I2C_TRANSACTION_STATUS_FAILED_TIMEOUT,
++ DCE_I2C_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR,
++ DCE_I2C_TRANSACTION_STATUS_FAILED_NACK,
++ DCE_I2C_TRANSACTION_STATUS_FAILED_INCOMPLETE,
++ DCE_I2C_TRANSACTION_STATUS_FAILED_OPERATION,
++ DCE_I2C_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
++ DCE_I2C_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW,
++ DCE_I2C_TRANSACTION_STATUS_FAILED_HPD_DISCON
++};
++
++enum dce_i2c_transaction_operation {
++ DCE_I2C_TRANSACTION_READ,
++ DCE_I2C_TRANSACTION_WRITE
++};
++
++struct dce_i2c_transaction_payload {
++ enum dce_i2c_transaction_address_space address_space;
++ uint32_t address;
++ uint32_t length;
++ uint8_t *data;
++};
++
++struct dce_i2c_transaction_request {
++ enum dce_i2c_transaction_operation operation;
++ struct dce_i2c_transaction_payload payload;
++ enum dce_i2c_transaction_status status;
++};
++
++
++bool dce_i2c_submit_command(
++ struct resource_pool *pool,
++ struct ddc *ddc,
++ struct i2c_command *cmd);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+new file mode 100644
+index 0000000..6a57c48
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+@@ -0,0 +1,951 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++#include "dce_i2c.h"
++#include "dce_i2c_hw.h"
++#include "reg_helper.h"
++#include "include/gpio_service_interface.h"
++
++#define CTX \
++ dce_i2c_hw->ctx
++#define REG(reg)\
++ dce_i2c_hw->regs->reg
++
++#undef FN
++#define FN(reg_name, field_name) \
++ dce_i2c_hw->shifts->field_name, dce_i2c_hw->masks->field_name
++
++
++static inline void reset_hw_engine(struct dce_i2c_hw *dce_i2c_hw)
++{
++ REG_UPDATE_2(DC_I2C_CONTROL,
++ DC_I2C_SW_STATUS_RESET, 1,
++ DC_I2C_SW_STATUS_RESET, 1);
++}
++
++static bool is_hw_busy(struct dce_i2c_hw *dce_i2c_hw)
++{
++ uint32_t i2c_sw_status = 0;
++
++ REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
++ if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_IDLE)
++ return false;
++
++ reset_hw_engine(dce_i2c_hw);
++
++ REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
++ return i2c_sw_status != DC_I2C_STATUS__DC_I2C_STATUS_IDLE;
++}
++
++static void set_speed_hw_dce80(
++ struct dce_i2c_hw *dce_i2c_hw,
++ uint32_t speed)
++{
++
++ if (speed) {
++ REG_UPDATE_N(SPEED, 2,
++ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed,
++ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
++ }
++}
++static void set_speed_hw_dce100(
++ struct dce_i2c_hw *dce_i2c_hw,
++ uint32_t speed)
++{
++
++ if (speed) {
++ if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
++ REG_UPDATE_N(SPEED, 3,
++ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed,
++ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2,
++ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1);
++ else
++ REG_UPDATE_N(SPEED, 2,
++ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed,
++ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
++ }
++}
++bool dce_i2c_hw_engine_acquire_engine(
++ struct dce_i2c_hw *dce_i2c_hw,
++ struct ddc *ddc)
++{
++
++ enum gpio_result result;
++ uint32_t current_speed;
++
++ result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
++ GPIO_DDC_CONFIG_TYPE_MODE_I2C);
++
++ if (result != GPIO_RESULT_OK)
++ return false;
++
++ dce_i2c_hw->ddc = ddc;
++
++
++ current_speed = dce_i2c_hw->funcs->get_speed(dce_i2c_hw);
++
++ if (current_speed)
++ dce_i2c_hw->original_speed = current_speed;
++
++ return true;
++}
++bool dce_i2c_engine_acquire_hw(
++ struct dce_i2c_hw *dce_i2c_hw,
++ struct ddc *ddc_handle)
++{
++
++ uint32_t counter = 0;
++ bool result;
++
++ do {
++ result = dce_i2c_hw_engine_acquire_engine(
++ dce_i2c_hw, ddc_handle);
++
++ if (result)
++ break;
++
++ /* i2c_engine is busy by VBios, lets wait and retry */
++
++ udelay(10);
++
++ ++counter;
++ } while (counter < 2);
++
++ if (result) {
++ if (!dce_i2c_hw->funcs->setup_engine(dce_i2c_hw)) {
++ dce_i2c_hw->funcs->release_engine(dce_i2c_hw);
++ result = false;
++ }
++ }
++
++ return result;
++}
++struct dce_i2c_hw *acquire_i2c_hw_engine(
++ struct resource_pool *pool,
++ struct ddc *ddc)
++{
++
++ struct dce_i2c_hw *engine = NULL;
++
++ if (!ddc)
++ return NULL;
++
++ if (ddc->hw_info.hw_supported) {
++ enum gpio_ddc_line line = dal_ddc_get_line(ddc);
++
++ if (line < pool->pipe_count)
++ engine = pool->hw_i2cs[line];
++ }
++
++ if (!engine)
++ return NULL;
++
++
++ if (!pool->i2c_hw_buffer_in_use &&
++ dce_i2c_engine_acquire_hw(engine, ddc)) {
++ pool->i2c_hw_buffer_in_use = true;
++ return engine;
++ }
++
++
++ return NULL;
++}
++
++static bool setup_engine_hw_dce100(
++ struct dce_i2c_hw *dce_i2c_hw)
++{
++ uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
++
++ if (dce_i2c_hw->setup_limit != 0)
++ i2c_setup_limit = dce_i2c_hw->setup_limit;
++ /* Program pin select */
++ REG_UPDATE_6(DC_I2C_CONTROL,
++ DC_I2C_GO, 0,
++ DC_I2C_SOFT_RESET, 0,
++ DC_I2C_SEND_RESET, 0,
++ DC_I2C_SW_STATUS_RESET, 1,
++ DC_I2C_TRANSACTION_COUNT, 0,
++ DC_I2C_DDC_SELECT, dce_i2c_hw->engine_id);
++
++ /* Program time limit */
++ if (dce_i2c_hw->send_reset_length == 0) {
++ /*pre-dcn*/
++ REG_UPDATE_N(SETUP, 2,
++ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit,
++ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
++ }
++ /* Program HW priority
++ * set to High - interrupt software I2C at any time
++ * Enable restart of SW I2C that was interrupted by HW
++ * disable queuing of software while I2C is in use by HW
++ */
++ REG_UPDATE_2(DC_I2C_ARBITRATION,
++ DC_I2C_NO_QUEUED_SW_GO, 0,
++ DC_I2C_SW_PRIORITY, DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL);
++
++ return true;
++}
++static bool setup_engine_hw_dce80(
++ struct dce_i2c_hw *dce_i2c_hw)
++{
++
++ /* Program pin select */
++ {
++ REG_UPDATE_6(DC_I2C_CONTROL,
++ DC_I2C_GO, 0,
++ DC_I2C_SOFT_RESET, 0,
++ DC_I2C_SEND_RESET, 0,
++ DC_I2C_SW_STATUS_RESET, 1,
++ DC_I2C_TRANSACTION_COUNT, 0,
++ DC_I2C_DDC_SELECT, dce_i2c_hw->engine_id);
++ }
++
++ /* Program time limit */
++ {
++ REG_UPDATE_2(SETUP,
++ DC_I2C_DDC1_TIME_LIMIT, I2C_SETUP_TIME_LIMIT_DCE,
++ DC_I2C_DDC1_ENABLE, 1);
++ }
++
++ /* Program HW priority
++ * set to High - interrupt software I2C at any time
++ * Enable restart of SW I2C that was interrupted by HW
++ * disable queuing of software while I2C is in use by HW
++ */
++ {
++ REG_UPDATE_2(DC_I2C_ARBITRATION,
++ DC_I2C_NO_QUEUED_SW_GO, 0,
++ DC_I2C_SW_PRIORITY, DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL);
++ }
++
++ return true;
++}
++
++
++
++static void process_channel_reply_hw_dce80(
++ struct dce_i2c_hw *dce_i2c_hw,
++ struct i2c_reply_transaction_data *reply)
++{
++ uint32_t length = reply->length;
++ uint8_t *buffer = reply->data;
++
++ REG_SET_3(DC_I2C_DATA, 0,
++ DC_I2C_INDEX, length - 1,
++ DC_I2C_DATA_RW, 1,
++ DC_I2C_INDEX_WRITE, 1);
++
++ while (length) {
++ /* after reading the status,
++ * if the I2C operation executed successfully
++ * (i.e. DC_I2C_STATUS_DONE = 1) then the I2C controller
++ * should read data bytes from I2C circular data buffer
++ */
++
++ uint32_t i2c_data;
++
++ REG_GET(DC_I2C_DATA, DC_I2C_DATA, &i2c_data);
++ *buffer++ = i2c_data;
++
++ --length;
++ }
++}
++static void process_channel_reply_hw_dce100(
++ struct dce_i2c_hw *dce_i2c_hw,
++ struct i2c_reply_transaction_data *reply)
++{
++ uint32_t length = reply->length;
++ uint8_t *buffer = reply->data;
++
++ REG_SET_3(DC_I2C_DATA, 0,
++ DC_I2C_INDEX, dce_i2c_hw->buffer_used_write,
++ DC_I2C_DATA_RW, 1,
++ DC_I2C_INDEX_WRITE, 1);
++
++ while (length) {
++ /* after reading the status,
++ * if the I2C operation executed successfully
++ * (i.e. DC_I2C_STATUS_DONE = 1) then the I2C controller
++ * should read data bytes from I2C circular data buffer
++ */
++
++ uint32_t i2c_data;
++
++ REG_GET(DC_I2C_DATA, DC_I2C_DATA, &i2c_data);
++ *buffer++ = i2c_data;
++
++ --length;
++ }
++}
++enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result(
++ struct dce_i2c_hw *dce_i2c_hw,
++ uint32_t timeout,
++ enum i2c_channel_operation_result expected_result)
++{
++ enum i2c_channel_operation_result result;
++ uint32_t i = 0;
++
++ if (!timeout)
++ return I2C_CHANNEL_OPERATION_SUCCEEDED;
++
++ do {
++
++ result = dce_i2c_hw->funcs->get_channel_status(
++ dce_i2c_hw, NULL);
++
++ if (result != expected_result)
++ break;
++
++ udelay(1);
++
++ ++i;
++ } while (i < timeout);
++ return result;
++}
++static enum i2c_channel_operation_result get_channel_status_hw(
++ struct dce_i2c_hw *dce_i2c_hw,
++ uint8_t *returned_bytes)
++{
++ uint32_t i2c_sw_status = 0;
++ uint32_t value =
++ REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
++ if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW)
++ return I2C_CHANNEL_OPERATION_ENGINE_BUSY;
++ else if (value & dce_i2c_hw->masks->DC_I2C_SW_STOPPED_ON_NACK)
++ return I2C_CHANNEL_OPERATION_NO_RESPONSE;
++ else if (value & dce_i2c_hw->masks->DC_I2C_SW_TIMEOUT)
++ return I2C_CHANNEL_OPERATION_TIMEOUT;
++ else if (value & dce_i2c_hw->masks->DC_I2C_SW_ABORTED)
++ return I2C_CHANNEL_OPERATION_FAILED;
++ else if (value & dce_i2c_hw->masks->DC_I2C_SW_DONE)
++ return I2C_CHANNEL_OPERATION_SUCCEEDED;
++
++ /*
++ * this is the case when HW used for communication, I2C_SW_STATUS
++ * could be zero
++ */
++ return I2C_CHANNEL_OPERATION_SUCCEEDED;
++}
++
++static void submit_channel_request_hw(
++ struct dce_i2c_hw *dce_i2c_hw,
++ struct i2c_request_transaction_data *request)
++{
++ request->status = I2C_CHANNEL_OPERATION_SUCCEEDED;
++
++ if (!dce_i2c_hw->funcs->process_transaction(dce_i2c_hw, request))
++ return;
++
++ if (dce_i2c_hw->funcs->is_hw_busy(dce_i2c_hw)) {
++ request->status = I2C_CHANNEL_OPERATION_ENGINE_BUSY;
++ return;
++ }
++
++ dce_i2c_hw->funcs->execute_transaction(dce_i2c_hw);
++
++
++}
++uint32_t get_reference_clock(
++ struct dc_bios *bios)
++{
++ struct dc_firmware_info info = { { 0 } };
++
++ if (bios->funcs->get_firmware_info(bios, &info) != BP_RESULT_OK)
++ return 0;
++
++ return info.pll_info.crystal_frequency;
++}
++
++static void execute_transaction_hw(
++ struct dce_i2c_hw *dce_i2c_hw)
++{
++ REG_UPDATE_N(SETUP, 5,
++ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_EN), 0,
++ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_DRIVE_EN), 0,
++ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_SEL), 0,
++ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_TRANSACTION_DELAY), 0,
++ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_BYTE_DELAY), 0);
++
++
++ REG_UPDATE_5(DC_I2C_CONTROL,
++ DC_I2C_SOFT_RESET, 0,
++ DC_I2C_SW_STATUS_RESET, 0,
++ DC_I2C_SEND_RESET, 0,
++ DC_I2C_GO, 0,
++ DC_I2C_TRANSACTION_COUNT, dce_i2c_hw->transaction_count - 1);
++
++ /* start I2C transfer */
++ REG_UPDATE(DC_I2C_CONTROL, DC_I2C_GO, 1);
++
++ /* all transactions were executed and HW buffer became empty
++ * (even though it actually happens when status becomes DONE)
++ */
++ dce_i2c_hw->transaction_count = 0;
++ dce_i2c_hw->buffer_used_bytes = 0;
++}
++static bool process_transaction_hw_dce80(
++ struct dce_i2c_hw *dce_i2c_hw,
++ struct i2c_request_transaction_data *request)
++{
++ uint32_t length = request->length;
++ uint8_t *buffer = request->data;
++
++ bool last_transaction = false;
++ uint32_t value = 0;
++
++ {
++
++ last_transaction = ((dce_i2c_hw->transaction_count == 3) ||
++ (request->action == DCE_I2C_TRANSACTION_ACTION_I2C_WRITE) ||
++ (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ));
++
++
++ switch (dce_i2c_hw->transaction_count) {
++ case 0:
++ REG_UPDATE_5(DC_I2C_TRANSACTION0,
++ DC_I2C_STOP_ON_NACK0, 1,
++ DC_I2C_START0, 1,
++ DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ),
++ DC_I2C_COUNT0, length,
++ DC_I2C_STOP0, last_transaction ? 1 : 0);
++ break;
++ case 1:
++ REG_UPDATE_5(DC_I2C_TRANSACTION1,
++ DC_I2C_STOP_ON_NACK0, 1,
++ DC_I2C_START0, 1,
++ DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ),
++ DC_I2C_COUNT0, length,
++ DC_I2C_STOP0, last_transaction ? 1 : 0);
++ break;
++ case 2:
++ REG_UPDATE_5(DC_I2C_TRANSACTION2,
++ DC_I2C_STOP_ON_NACK0, 1,
++ DC_I2C_START0, 1,
++ DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ),
++ DC_I2C_COUNT0, length,
++ DC_I2C_STOP0, last_transaction ? 1 : 0);
++ break;
++ case 3:
++ REG_UPDATE_5(DC_I2C_TRANSACTION3,
++ DC_I2C_STOP_ON_NACK0, 1,
++ DC_I2C_START0, 1,
++ DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ),
++ DC_I2C_COUNT0, length,
++ DC_I2C_STOP0, last_transaction ? 1 : 0);
++ break;
++ default:
++ /* TODO Warning ? */
++ break;
++ }
++ }
++
++ /* Write the I2C address and I2C data
++ * into the hardware circular buffer, one byte per entry.
++ * As an example, the 7-bit I2C slave address for CRT monitor
++ * for reading DDC/EDID information is 0b1010001.
++ * For an I2C send operation, the LSB must be programmed to 0;
++ * for I2C receive operation, the LSB must be programmed to 1.
++ */
++
++ {
++ if (dce_i2c_hw->transaction_count == 0) {
++ value = REG_SET_4(DC_I2C_DATA, 0,
++ DC_I2C_DATA_RW, false,
++ DC_I2C_DATA, request->address,
++ DC_I2C_INDEX, 0,
++ DC_I2C_INDEX_WRITE, 1);
++ } else
++ value = REG_SET_2(DC_I2C_DATA, 0,
++ DC_I2C_DATA_RW, false,
++ DC_I2C_DATA, request->address);
++
++ if (!(request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ)) {
++
++ while (length) {
++ REG_SET_2(DC_I2C_DATA, value,
++ DC_I2C_INDEX_WRITE, 0,
++ DC_I2C_DATA, *buffer++);
++ --length;
++ }
++ }
++ }
++
++ ++dce_i2c_hw->transaction_count;
++ dce_i2c_hw->buffer_used_bytes += length + 1;
++
++ return last_transaction;
++}
++
++#define STOP_TRANS_PREDICAT \
++ ((dce_i2c_hw->transaction_count == 3) || \
++ (request->action == DCE_I2C_TRANSACTION_ACTION_I2C_WRITE) || \
++ (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ))
++
++#define SET_I2C_TRANSACTION(id) \
++ do { \
++ REG_UPDATE_N(DC_I2C_TRANSACTION##id, 5, \
++ FN(DC_I2C_TRANSACTION0, DC_I2C_STOP_ON_NACK0), 1, \
++ FN(DC_I2C_TRANSACTION0, DC_I2C_START0), 1, \
++ FN(DC_I2C_TRANSACTION0, DC_I2C_STOP0), STOP_TRANS_PREDICAT ? 1:0, \
++ FN(DC_I2C_TRANSACTION0, DC_I2C_RW0), (0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ)), \
++ FN(DC_I2C_TRANSACTION0, DC_I2C_COUNT0), length); \
++ if (STOP_TRANS_PREDICAT) \
++ last_transaction = true; \
++ } while (false)
++
++static bool process_transaction_hw_dce100(
++ struct dce_i2c_hw *dce_i2c_hw,
++ struct i2c_request_transaction_data *request)
++{
++ uint32_t length = request->length;
++ uint8_t *buffer = request->data;
++ uint32_t value = 0;
++
++ bool last_transaction = false;
++
++ switch (dce_i2c_hw->transaction_count) {
++ case 0:
++ SET_I2C_TRANSACTION(0);
++ break;
++ case 1:
++ SET_I2C_TRANSACTION(1);
++ break;
++ case 2:
++ SET_I2C_TRANSACTION(2);
++ break;
++ case 3:
++ SET_I2C_TRANSACTION(3);
++ break;
++ default:
++ /* TODO Warning ? */
++ break;
++ }
++
++
++ /* Write the I2C address and I2C data
++ * into the hardware circular buffer, one byte per entry.
++ * As an example, the 7-bit I2C slave address for CRT monitor
++ * for reading DDC/EDID information is 0b1010001.
++ * For an I2C send operation, the LSB must be programmed to 0;
++ * for I2C receive operation, the LSB must be programmed to 1.
++ */
++ if (dce_i2c_hw->transaction_count == 0) {
++ value = REG_SET_4(DC_I2C_DATA, 0,
++ DC_I2C_DATA_RW, false,
++ DC_I2C_DATA, request->address,
++ DC_I2C_INDEX, 0,
++ DC_I2C_INDEX_WRITE, 1);
++ dce_i2c_hw->buffer_used_write = 0;
++ } else
++ value = REG_SET_2(DC_I2C_DATA, 0,
++ DC_I2C_DATA_RW, false,
++ DC_I2C_DATA, request->address);
++
++ dce_i2c_hw->buffer_used_write++;
++
++ if (!(request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ)) {
++ while (length) {
++ REG_SET_2(DC_I2C_DATA, value,
++ DC_I2C_INDEX_WRITE, 0,
++ DC_I2C_DATA, *buffer++);
++ dce_i2c_hw->buffer_used_write++;
++ --length;
++ }
++ }
++
++ ++dce_i2c_hw->transaction_count;
++ dce_i2c_hw->buffer_used_bytes += length + 1;
++
++ return last_transaction;
++}
++static uint32_t get_transaction_timeout_hw(
++ const struct dce_i2c_hw *dce_i2c_hw,
++ uint32_t length)
++{
++
++ uint32_t speed = dce_i2c_hw->funcs->get_speed(dce_i2c_hw);
++
++
++
++ uint32_t period_timeout;
++ uint32_t num_of_clock_stretches;
++
++ if (!speed)
++ return 0;
++
++ period_timeout = (1000 * TRANSACTION_TIMEOUT_IN_I2C_CLOCKS) / speed;
++
++ num_of_clock_stretches = 1 + (length << 3) + 1;
++ num_of_clock_stretches +=
++ (dce_i2c_hw->buffer_used_bytes << 3) +
++ (dce_i2c_hw->transaction_count << 1);
++
++ return period_timeout * num_of_clock_stretches;
++}
++
++static void release_engine_dce_hw(
++ struct resource_pool *pool,
++ struct dce_i2c_hw *dce_i2c_hw)
++{
++ pool->i2c_hw_buffer_in_use = false;
++
++ dce_i2c_hw->funcs->release_engine(dce_i2c_hw);
++ dal_ddc_close(dce_i2c_hw->ddc);
++
++ dce_i2c_hw->ddc = NULL;
++}
++
++static void release_engine_hw(
++ struct dce_i2c_hw *dce_i2c_hw)
++{
++ bool safe_to_reset;
++
++ /* Restore original HW engine speed */
++
++ dce_i2c_hw->funcs->set_speed(dce_i2c_hw, dce_i2c_hw->original_speed);
++
++ /* Release I2C */
++ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, 1);
++
++ /* Reset HW engine */
++ {
++ uint32_t i2c_sw_status = 0;
++
++ REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
++ /* if used by SW, safe to reset */
++ safe_to_reset = (i2c_sw_status == 1);
++ }
++
++ if (safe_to_reset)
++ REG_UPDATE_2(DC_I2C_CONTROL,
++ DC_I2C_SOFT_RESET, 1,
++ DC_I2C_SW_STATUS_RESET, 1);
++ else
++ REG_UPDATE(DC_I2C_CONTROL, DC_I2C_SW_STATUS_RESET, 1);
++ /* HW I2c engine - clock gating feature */
++ if (!dce_i2c_hw->engine_keep_power_up_count)
++ dce_i2c_hw->funcs->disable_i2c_hw_engine(dce_i2c_hw);
++
++}
++
++
++static void disable_i2c_hw_engine(
++ struct dce_i2c_hw *dce_i2c_hw)
++{
++ REG_UPDATE_N(SETUP, 1, FN(SETUP, DC_I2C_DDC1_ENABLE), 0);
++}
++static uint32_t get_speed_hw(
++ const struct dce_i2c_hw *dce_i2c_hw)
++{
++ uint32_t pre_scale = 0;
++
++ REG_GET(SPEED, DC_I2C_DDC1_PRESCALE, &pre_scale);
++
++ /* [anaumov] it seems following is unnecessary */
++ /*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/
++ return pre_scale ?
++ dce_i2c_hw->reference_frequency / pre_scale :
++ dce_i2c_hw->default_speed;
++}
++static uint32_t get_hw_buffer_available_size(
++ const struct dce_i2c_hw *dce_i2c_hw)
++{
++ return dce_i2c_hw->buffer_size -
++ dce_i2c_hw->buffer_used_bytes;
++}
++bool dce_i2c_hw_engine_submit_request(
++ struct dce_i2c_hw *dce_i2c_hw,
++ struct dce_i2c_transaction_request *dce_i2c_request,
++ bool middle_of_transaction)
++{
++
++ struct i2c_request_transaction_data request;
++
++ uint32_t transaction_timeout;
++
++ enum i2c_channel_operation_result operation_result;
++
++ bool result = false;
++
++ /* We need following:
++ * transaction length will not exceed
++ * the number of free bytes in HW buffer (minus one for address)
++ */
++
++ if (dce_i2c_request->payload.length >=
++ get_hw_buffer_available_size(dce_i2c_hw)) {
++ dce_i2c_request->status =
++ DCE_I2C_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW;
++ return false;
++ }
++
++ if (dce_i2c_request->operation == DCE_I2C_TRANSACTION_READ)
++ request.action = middle_of_transaction ?
++ DCE_I2C_TRANSACTION_ACTION_I2C_READ_MOT :
++ DCE_I2C_TRANSACTION_ACTION_I2C_READ;
++ else if (dce_i2c_request->operation == DCE_I2C_TRANSACTION_WRITE)
++ request.action = middle_of_transaction ?
++ DCE_I2C_TRANSACTION_ACTION_I2C_WRITE_MOT :
++ DCE_I2C_TRANSACTION_ACTION_I2C_WRITE;
++ else {
++ dce_i2c_request->status =
++ DCE_I2C_TRANSACTION_STATUS_FAILED_INVALID_OPERATION;
++ /* [anaumov] in DAL2, there was no "return false" */
++ return false;
++ }
++
++ request.address = (uint8_t) dce_i2c_request->payload.address;
++ request.length = dce_i2c_request->payload.length;
++ request.data = dce_i2c_request->payload.data;
++
++ /* obtain timeout value before submitting request */
++
++ transaction_timeout = get_transaction_timeout_hw(
++ dce_i2c_hw, dce_i2c_request->payload.length + 1);
++
++ submit_channel_request_hw(
++ dce_i2c_hw, &request);
++
++ if ((request.status == I2C_CHANNEL_OPERATION_FAILED) ||
++ (request.status == I2C_CHANNEL_OPERATION_ENGINE_BUSY)) {
++ dce_i2c_request->status =
++ DCE_I2C_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY;
++ return false;
++ }
++
++ /* wait until transaction proceed */
++
++ operation_result = dce_i2c_hw_engine_wait_on_operation_result(
++ dce_i2c_hw,
++ transaction_timeout,
++ I2C_CHANNEL_OPERATION_ENGINE_BUSY);
++
++ /* update transaction status */
++
++ switch (operation_result) {
++ case I2C_CHANNEL_OPERATION_SUCCEEDED:
++ dce_i2c_request->status =
++ DCE_I2C_TRANSACTION_STATUS_SUCCEEDED;
++ result = true;
++ break;
++ case I2C_CHANNEL_OPERATION_NO_RESPONSE:
++ dce_i2c_request->status =
++ DCE_I2C_TRANSACTION_STATUS_FAILED_NACK;
++ break;
++ case I2C_CHANNEL_OPERATION_TIMEOUT:
++ dce_i2c_request->status =
++ DCE_I2C_TRANSACTION_STATUS_FAILED_TIMEOUT;
++ break;
++ case I2C_CHANNEL_OPERATION_FAILED:
++ dce_i2c_request->status =
++ DCE_I2C_TRANSACTION_STATUS_FAILED_INCOMPLETE;
++ break;
++ default:
++ dce_i2c_request->status =
++ DCE_I2C_TRANSACTION_STATUS_FAILED_OPERATION;
++ }
++
++ if (result && (dce_i2c_request->operation == DCE_I2C_TRANSACTION_READ)) {
++ struct i2c_reply_transaction_data reply;
++
++ reply.data = dce_i2c_request->payload.data;
++ reply.length = dce_i2c_request->payload.length;
++
++ dce_i2c_hw->funcs->process_channel_reply(dce_i2c_hw, &reply);
++
++
++ }
++
++ return result;
++}
++
++bool dce_i2c_submit_command_hw(
++ struct resource_pool *pool,
++ struct ddc *ddc,
++ struct i2c_command *cmd,
++ struct dce_i2c_hw *dce_i2c_hw)
++{
++ uint8_t index_of_payload = 0;
++ bool result;
++
++ dce_i2c_hw->funcs->set_speed(dce_i2c_hw, cmd->speed);
++
++ result = true;
++
++ while (index_of_payload < cmd->number_of_payloads) {
++ bool mot = (index_of_payload != cmd->number_of_payloads - 1);
++
++ struct i2c_payload *payload = cmd->payloads + index_of_payload;
++
++ struct dce_i2c_transaction_request request = { 0 };
++
++ request.operation = payload->write ?
++ DCE_I2C_TRANSACTION_WRITE :
++ DCE_I2C_TRANSACTION_READ;
++
++ request.payload.address_space =
++ DCE_I2C_TRANSACTION_ADDRESS_SPACE_I2C;
++ request.payload.address = (payload->address << 1) |
++ !payload->write;
++ request.payload.length = payload->length;
++ request.payload.data = payload->data;
++
++
++ if (!dce_i2c_hw_engine_submit_request(
++ dce_i2c_hw, &request, mot)) {
++ result = false;
++ break;
++ }
++
++
++
++ ++index_of_payload;
++ }
++
++ release_engine_dce_hw(pool, dce_i2c_hw);
++
++ return result;
++}
++static const struct dce_i2c_hw_funcs dce100_i2c_hw_funcs = {
++ .setup_engine = setup_engine_hw_dce100,
++ .set_speed = set_speed_hw_dce100,
++ .get_speed = get_speed_hw,
++ .release_engine = release_engine_hw,
++ .process_transaction = process_transaction_hw_dce100,
++ .process_channel_reply = process_channel_reply_hw_dce100,
++ .is_hw_busy = is_hw_busy,
++ .get_channel_status = get_channel_status_hw,
++ .execute_transaction = execute_transaction_hw,
++ .disable_i2c_hw_engine = disable_i2c_hw_engine
++};
++static const struct dce_i2c_hw_funcs dce80_i2c_hw_funcs = {
++ .setup_engine = setup_engine_hw_dce80,
++ .set_speed = set_speed_hw_dce80,
++ .get_speed = get_speed_hw,
++ .release_engine = release_engine_hw,
++ .process_transaction = process_transaction_hw_dce80,
++ .process_channel_reply = process_channel_reply_hw_dce80,
++ .is_hw_busy = is_hw_busy,
++ .get_channel_status = get_channel_status_hw,
++ .execute_transaction = execute_transaction_hw,
++ .disable_i2c_hw_engine = disable_i2c_hw_engine
++};
++
++
++
++void dce_i2c_hw_construct(
++ struct dce_i2c_hw *dce_i2c_hw,
++ struct dc_context *ctx,
++ uint32_t engine_id,
++ const struct dce_i2c_registers *regs,
++ const struct dce_i2c_shift *shifts,
++ const struct dce_i2c_mask *masks)
++{
++ dce_i2c_hw->ctx = ctx;
++ dce_i2c_hw->engine_id = engine_id;
++ dce_i2c_hw->reference_frequency = get_reference_clock(ctx->dc_bios) >> 1;
++ dce_i2c_hw->regs = regs;
++ dce_i2c_hw->shifts = shifts;
++ dce_i2c_hw->masks = masks;
++ dce_i2c_hw->buffer_used_bytes = 0;
++ dce_i2c_hw->transaction_count = 0;
++ dce_i2c_hw->engine_keep_power_up_count = 1;
++ dce_i2c_hw->original_speed = DEFAULT_I2C_HW_SPEED;
++ dce_i2c_hw->default_speed = DEFAULT_I2C_HW_SPEED;
++ dce_i2c_hw->send_reset_length = 0;
++ dce_i2c_hw->setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
++ dce_i2c_hw->funcs = &dce80_i2c_hw_funcs;
++ dce_i2c_hw->buffer_size = I2C_HW_BUFFER_SIZE_DCE;
++}
++
++void dce100_i2c_hw_construct(
++ struct dce_i2c_hw *dce_i2c_hw,
++ struct dc_context *ctx,
++ uint32_t engine_id,
++ const struct dce_i2c_registers *regs,
++ const struct dce_i2c_shift *shifts,
++ const struct dce_i2c_mask *masks)
++{
++
++ uint32_t xtal_ref_div = 0;
++
++ dce_i2c_hw_construct(dce_i2c_hw,
++ ctx,
++ engine_id,
++ regs,
++ shifts,
++ masks);
++ dce_i2c_hw->funcs = &dce100_i2c_hw_funcs;
++ dce_i2c_hw->buffer_size = I2C_HW_BUFFER_SIZE_DCE100;
++
++ REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
++
++ if (xtal_ref_div == 0)
++ xtal_ref_div = 2;
++
++ /*Calculating Reference Clock by divding original frequency by
++ * XTAL_REF_DIV.
++ * At upper level, uint32_t reference_frequency =
++ * dal_dce_i2c_get_reference_clock(as) >> 1
++ * which already divided by 2. So we need x2 to get original
++ * reference clock from ppll_info
++ */
++ dce_i2c_hw->reference_frequency =
++ (dce_i2c_hw->reference_frequency * 2) / xtal_ref_div;
++}
++
++void dce112_i2c_hw_construct(
++ struct dce_i2c_hw *dce_i2c_hw,
++ struct dc_context *ctx,
++ uint32_t engine_id,
++ const struct dce_i2c_registers *regs,
++ const struct dce_i2c_shift *shifts,
++ const struct dce_i2c_mask *masks)
++{
++ dce100_i2c_hw_construct(dce_i2c_hw,
++ ctx,
++ engine_id,
++ regs,
++ shifts,
++ masks);
++ dce_i2c_hw->default_speed = DEFAULT_I2C_HW_SPEED_100KHZ;
++}
++
++void dcn1_i2c_hw_construct(
++ struct dce_i2c_hw *dce_i2c_hw,
++ struct dc_context *ctx,
++ uint32_t engine_id,
++ const struct dce_i2c_registers *regs,
++ const struct dce_i2c_shift *shifts,
++ const struct dce_i2c_mask *masks)
++{
++ dce112_i2c_hw_construct(dce_i2c_hw,
++ ctx,
++ engine_id,
++ regs,
++ shifts,
++ masks);
++ dce_i2c_hw->setup_limit = I2C_SETUP_TIME_LIMIT_DCN;
++}
++
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
+new file mode 100644
+index 0000000..8baef39
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
+@@ -0,0 +1,335 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef __DCE_I2C_HW_H__
++#define __DCE_I2C_HW_H__
++
++enum dc_i2c_status {
++ DC_I2C_STATUS__DC_I2C_STATUS_IDLE,
++ DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW,
++ DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW
++};
++
++enum dc_i2c_arbitration {
++ DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL,
++ DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_HIGH
++};
++
++enum i2c_channel_operation_result {
++ I2C_CHANNEL_OPERATION_SUCCEEDED,
++ I2C_CHANNEL_OPERATION_FAILED,
++ I2C_CHANNEL_OPERATION_NOT_GRANTED,
++ I2C_CHANNEL_OPERATION_IS_BUSY,
++ I2C_CHANNEL_OPERATION_NO_HANDLE_PROVIDED,
++ I2C_CHANNEL_OPERATION_CHANNEL_IN_USE,
++ I2C_CHANNEL_OPERATION_CHANNEL_CLIENT_MAX_ALLOWED,
++ I2C_CHANNEL_OPERATION_ENGINE_BUSY,
++ I2C_CHANNEL_OPERATION_TIMEOUT,
++ I2C_CHANNEL_OPERATION_NO_RESPONSE,
++ I2C_CHANNEL_OPERATION_HW_REQUEST_I2C_BUS,
++ I2C_CHANNEL_OPERATION_WRONG_PARAMETER,
++ I2C_CHANNEL_OPERATION_OUT_NB_OF_RETRIES,
++ I2C_CHANNEL_OPERATION_NOT_STARTED
++};
++
++
++enum dce_i2c_transaction_action {
++ DCE_I2C_TRANSACTION_ACTION_I2C_WRITE = 0x00,
++ DCE_I2C_TRANSACTION_ACTION_I2C_READ = 0x10,
++ DCE_I2C_TRANSACTION_ACTION_I2C_STATUS_REQUEST = 0x20,
++
++ DCE_I2C_TRANSACTION_ACTION_I2C_WRITE_MOT = 0x40,
++ DCE_I2C_TRANSACTION_ACTION_I2C_READ_MOT = 0x50,
++ DCE_I2C_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT = 0x60,
++
++ DCE_I2C_TRANSACTION_ACTION_DP_WRITE = 0x80,
++ DCE_I2C_TRANSACTION_ACTION_DP_READ = 0x90
++};
++
++enum {
++ I2C_SETUP_TIME_LIMIT_DCE = 255,
++ I2C_SETUP_TIME_LIMIT_DCN = 3,
++ I2C_HW_BUFFER_SIZE_DCE100 = 538,
++ I2C_HW_BUFFER_SIZE_DCE = 144,
++ I2C_SEND_RESET_LENGTH_9 = 9,
++ I2C_SEND_RESET_LENGTH_10 = 10,
++ DEFAULT_I2C_HW_SPEED = 50,
++ DEFAULT_I2C_HW_SPEED_100KHZ = 100,
++ TRANSACTION_TIMEOUT_IN_I2C_CLOCKS = 32,
++};
++
++#define I2C_HW_ENGINE_COMMON_REG_LIST(id)\
++ SRI(SETUP, DC_I2C_DDC, id),\
++ SRI(SPEED, DC_I2C_DDC, id),\
++ SR(DC_I2C_ARBITRATION),\
++ SR(DC_I2C_CONTROL),\
++ SR(DC_I2C_SW_STATUS),\
++ SR(DC_I2C_TRANSACTION0),\
++ SR(DC_I2C_TRANSACTION1),\
++ SR(DC_I2C_TRANSACTION2),\
++ SR(DC_I2C_TRANSACTION3),\
++ SR(DC_I2C_DATA),\
++ SR(MICROSECOND_TIME_BASE_DIV)
++
++#define I2C_SF(reg_name, field_name, post_fix)\
++ .field_name = reg_name ## __ ## field_name ## post_fix
++
++#define I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)\
++ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE, mask_sh),\
++ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT, mask_sh),\
++ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_EN, mask_sh),\
++ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_DRIVE_EN, mask_sh),\
++ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_SEL, mask_sh),\
++ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_TRANSACTION_DELAY, mask_sh),\
++ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_BYTE_DELAY, mask_sh),\
++ I2C_SF(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, mask_sh),\
++ I2C_SF(DC_I2C_ARBITRATION, DC_I2C_NO_QUEUED_SW_GO, mask_sh),\
++ I2C_SF(DC_I2C_ARBITRATION, DC_I2C_SW_PRIORITY, mask_sh),\
++ I2C_SF(DC_I2C_CONTROL, DC_I2C_SOFT_RESET, mask_sh),\
++ I2C_SF(DC_I2C_CONTROL, DC_I2C_SW_STATUS_RESET, mask_sh),\
++ I2C_SF(DC_I2C_CONTROL, DC_I2C_GO, mask_sh),\
++ I2C_SF(DC_I2C_CONTROL, DC_I2C_SEND_RESET, mask_sh),\
++ I2C_SF(DC_I2C_CONTROL, DC_I2C_TRANSACTION_COUNT, mask_sh),\
++ I2C_SF(DC_I2C_CONTROL, DC_I2C_DDC_SELECT, mask_sh),\
++ I2C_SF(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE, mask_sh),\
++ I2C_SF(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD, mask_sh),\
++ I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_STOPPED_ON_NACK, mask_sh),\
++ I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_TIMEOUT, mask_sh),\
++ I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_ABORTED, mask_sh),\
++ I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_DONE, mask_sh),\
++ I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, mask_sh),\
++ I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_STOP_ON_NACK0, mask_sh),\
++ I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_START0, mask_sh),\
++ I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_RW0, mask_sh),\
++ I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_STOP0, mask_sh),\
++ I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_COUNT0, mask_sh),\
++ I2C_SF(DC_I2C_DATA, DC_I2C_DATA_RW, mask_sh),\
++ I2C_SF(DC_I2C_DATA, DC_I2C_DATA, mask_sh),\
++ I2C_SF(DC_I2C_DATA, DC_I2C_INDEX, mask_sh),\
++ I2C_SF(DC_I2C_DATA, DC_I2C_INDEX_WRITE, mask_sh),\
++ I2C_SF(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, mask_sh)
++
++#define I2C_COMMON_MASK_SH_LIST_DCE110(mask_sh)\
++ I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh),\
++ I2C_SF(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL, mask_sh)
++
++struct dce_i2c_shift {
++ uint8_t DC_I2C_DDC1_ENABLE;
++ uint8_t DC_I2C_DDC1_TIME_LIMIT;
++ uint8_t DC_I2C_DDC1_DATA_DRIVE_EN;
++ uint8_t DC_I2C_DDC1_CLK_DRIVE_EN;
++ uint8_t DC_I2C_DDC1_DATA_DRIVE_SEL;
++ uint8_t DC_I2C_DDC1_INTRA_TRANSACTION_DELAY;
++ uint8_t DC_I2C_DDC1_INTRA_BYTE_DELAY;
++ uint8_t DC_I2C_SW_DONE_USING_I2C_REG;
++ uint8_t DC_I2C_NO_QUEUED_SW_GO;
++ uint8_t DC_I2C_SW_PRIORITY;
++ uint8_t DC_I2C_SOFT_RESET;
++ uint8_t DC_I2C_SW_STATUS_RESET;
++ uint8_t DC_I2C_GO;
++ uint8_t DC_I2C_SEND_RESET;
++ uint8_t DC_I2C_TRANSACTION_COUNT;
++ uint8_t DC_I2C_DDC_SELECT;
++ uint8_t DC_I2C_DDC1_PRESCALE;
++ uint8_t DC_I2C_DDC1_THRESHOLD;
++ uint8_t DC_I2C_DDC1_START_STOP_TIMING_CNTL;
++ uint8_t DC_I2C_SW_STOPPED_ON_NACK;
++ uint8_t DC_I2C_SW_TIMEOUT;
++ uint8_t DC_I2C_SW_ABORTED;
++ uint8_t DC_I2C_SW_DONE;
++ uint8_t DC_I2C_SW_STATUS;
++ uint8_t DC_I2C_STOP_ON_NACK0;
++ uint8_t DC_I2C_START0;
++ uint8_t DC_I2C_RW0;
++ uint8_t DC_I2C_STOP0;
++ uint8_t DC_I2C_COUNT0;
++ uint8_t DC_I2C_DATA_RW;
++ uint8_t DC_I2C_DATA;
++ uint8_t DC_I2C_INDEX;
++ uint8_t DC_I2C_INDEX_WRITE;
++ uint8_t XTAL_REF_DIV;
++};
++
++struct dce_i2c_mask {
++ uint32_t DC_I2C_DDC1_ENABLE;
++ uint32_t DC_I2C_DDC1_TIME_LIMIT;
++ uint32_t DC_I2C_DDC1_DATA_DRIVE_EN;
++ uint32_t DC_I2C_DDC1_CLK_DRIVE_EN;
++ uint32_t DC_I2C_DDC1_DATA_DRIVE_SEL;
++ uint32_t DC_I2C_DDC1_INTRA_TRANSACTION_DELAY;
++ uint32_t DC_I2C_DDC1_INTRA_BYTE_DELAY;
++ uint32_t DC_I2C_SW_DONE_USING_I2C_REG;
++ uint32_t DC_I2C_NO_QUEUED_SW_GO;
++ uint32_t DC_I2C_SW_PRIORITY;
++ uint32_t DC_I2C_SOFT_RESET;
++ uint32_t DC_I2C_SW_STATUS_RESET;
++ uint32_t DC_I2C_GO;
++ uint32_t DC_I2C_SEND_RESET;
++ uint32_t DC_I2C_TRANSACTION_COUNT;
++ uint32_t DC_I2C_DDC_SELECT;
++ uint32_t DC_I2C_DDC1_PRESCALE;
++ uint32_t DC_I2C_DDC1_THRESHOLD;
++ uint32_t DC_I2C_DDC1_START_STOP_TIMING_CNTL;
++ uint32_t DC_I2C_SW_STOPPED_ON_NACK;
++ uint32_t DC_I2C_SW_TIMEOUT;
++ uint32_t DC_I2C_SW_ABORTED;
++ uint32_t DC_I2C_SW_DONE;
++ uint32_t DC_I2C_SW_STATUS;
++ uint32_t DC_I2C_STOP_ON_NACK0;
++ uint32_t DC_I2C_START0;
++ uint32_t DC_I2C_RW0;
++ uint32_t DC_I2C_STOP0;
++ uint32_t DC_I2C_COUNT0;
++ uint32_t DC_I2C_DATA_RW;
++ uint32_t DC_I2C_DATA;
++ uint32_t DC_I2C_INDEX;
++ uint32_t DC_I2C_INDEX_WRITE;
++ uint32_t XTAL_REF_DIV;
++};
++
++struct dce_i2c_registers {
++ uint32_t SETUP;
++ uint32_t SPEED;
++ uint32_t DC_I2C_ARBITRATION;
++ uint32_t DC_I2C_CONTROL;
++ uint32_t DC_I2C_SW_STATUS;
++ uint32_t DC_I2C_TRANSACTION0;
++ uint32_t DC_I2C_TRANSACTION1;
++ uint32_t DC_I2C_TRANSACTION2;
++ uint32_t DC_I2C_TRANSACTION3;
++ uint32_t DC_I2C_DATA;
++ uint32_t MICROSECOND_TIME_BASE_DIV;
++};
++
++enum dce_i2c_transaction_address_space {
++ DCE_I2C_TRANSACTION_ADDRESS_SPACE_I2C = 1,
++ DCE_I2C_TRANSACTION_ADDRESS_SPACE_DPCD
++};
++
++struct i2c_request_transaction_data {
++ enum dce_i2c_transaction_action action;
++ enum i2c_channel_operation_result status;
++ uint8_t address;
++ uint32_t length;
++ uint8_t *data;
++};
++
++struct i2c_reply_transaction_data {
++ uint32_t length;
++ uint8_t *data;
++};
++
++struct dce_i2c_hw {
++ struct ddc *ddc;
++ uint32_t original_speed;
++ uint32_t engine_keep_power_up_count;
++ uint32_t transaction_count;
++ uint32_t buffer_used_bytes;
++ uint32_t buffer_used_write;
++ uint32_t reference_frequency;
++ uint32_t default_speed;
++ uint32_t engine_id;
++ uint32_t setup_limit;
++ uint32_t send_reset_length;
++ uint32_t buffer_size;
++ struct dc_context *ctx;
++
++ const struct dce_i2c_hw_funcs *funcs;
++ const struct dce_i2c_registers *regs;
++ const struct dce_i2c_shift *shifts;
++ const struct dce_i2c_mask *masks;
++};
++
++
++struct dce_i2c_hw_funcs {
++ bool (*setup_engine)(
++ struct dce_i2c_hw *dce_i2c_hw);
++ void (*set_speed)(
++ struct dce_i2c_hw *dce_i2c_hw,
++ uint32_t speed);
++ uint32_t (*get_speed)(
++ const struct dce_i2c_hw *dce_i2c_hw);
++ void (*release_engine)(
++ struct dce_i2c_hw *dce_i2c_hw);
++ bool (*process_transaction)(
++ struct dce_i2c_hw *dce_i2c_hw,
++ struct i2c_request_transaction_data *request);
++ void (*process_channel_reply)(
++ struct dce_i2c_hw *dce_i2c_hw,
++ struct i2c_reply_transaction_data *reply);
++ bool (*is_hw_busy)(
++ struct dce_i2c_hw *dce_i2c_hw);
++ enum i2c_channel_operation_result (*get_channel_status)(
++ struct dce_i2c_hw *dce_i2c_hw,
++ uint8_t *returned_bytes);
++ void (*execute_transaction)(
++ struct dce_i2c_hw *dce_i2c_hw);
++ void (*disable_i2c_hw_engine)(
++ struct dce_i2c_hw *dce_i2c_hw);
++};
++
++void dce_i2c_hw_construct(
++ struct dce_i2c_hw *dce_i2c_hw,
++ struct dc_context *ctx,
++ uint32_t engine_id,
++ const struct dce_i2c_registers *regs,
++ const struct dce_i2c_shift *shifts,
++ const struct dce_i2c_mask *masks);
++
++void dce100_i2c_hw_construct(
++ struct dce_i2c_hw *dce_i2c_hw,
++ struct dc_context *ctx,
++ uint32_t engine_id,
++ const struct dce_i2c_registers *regs,
++ const struct dce_i2c_shift *shifts,
++ const struct dce_i2c_mask *masks);
++
++void dce112_i2c_hw_construct(
++ struct dce_i2c_hw *dce_i2c_hw,
++ struct dc_context *ctx,
++ uint32_t engine_id,
++ const struct dce_i2c_registers *regs,
++ const struct dce_i2c_shift *shifts,
++ const struct dce_i2c_mask *masks);
++
++void dcn1_i2c_hw_construct(
++ struct dce_i2c_hw *dce_i2c_hw,
++ struct dc_context *ctx,
++ uint32_t engine_id,
++ const struct dce_i2c_registers *regs,
++ const struct dce_i2c_shift *shifts,
++ const struct dce_i2c_mask *masks);
++
++bool dce_i2c_submit_command_hw(
++ struct resource_pool *pool,
++ struct ddc *ddc,
++ struct i2c_command *cmd,
++ struct dce_i2c_hw *dce_i2c_hw);
++
++struct dce_i2c_hw *acquire_i2c_hw_engine(
++ struct resource_pool *pool,
++ struct ddc *ddc);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
+new file mode 100644
+index 0000000..ab11129
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
+@@ -0,0 +1,602 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++#include "dce_i2c.h"
++#include "dce_i2c_sw.h"
++#include "include/gpio_service_interface.h"
++#define SCL false
++#define SDA true
++
++void dce_i2c_sw_construct(
++ struct dce_i2c_sw *dce_i2c_sw,
++ struct dc_context *ctx)
++{
++ dce_i2c_sw->ctx = ctx;
++}
++
++static inline bool read_bit_from_ddc(
++ struct ddc *ddc,
++ bool data_nor_clock)
++{
++ uint32_t value = 0;
++
++ if (data_nor_clock)
++ dal_gpio_get_value(ddc->pin_data, &value);
++ else
++ dal_gpio_get_value(ddc->pin_clock, &value);
++
++ return (value != 0);
++}
++
++static inline void write_bit_to_ddc(
++ struct ddc *ddc,
++ bool data_nor_clock,
++ bool bit)
++{
++ uint32_t value = bit ? 1 : 0;
++
++ if (data_nor_clock)
++ dal_gpio_set_value(ddc->pin_data, value);
++ else
++ dal_gpio_set_value(ddc->pin_clock, value);
++}
++
++static void release_engine_dce_sw(
++ struct resource_pool *pool,
++ struct dce_i2c_sw *dce_i2c_sw)
++{
++ dal_ddc_close(dce_i2c_sw->ddc);
++ dce_i2c_sw->ddc = NULL;
++}
++
++enum i2c_channel_operation_result dce_i2c_sw_engine_get_channel_status(
++ struct dce_i2c_sw *engine,
++ uint8_t *returned_bytes)
++{
++ /* No arbitration with VBIOS is performed since DCE 6.0 */
++ return I2C_CHANNEL_OPERATION_SUCCEEDED;
++}
++static bool get_hw_supported_ddc_line(
++ struct ddc *ddc,
++ enum gpio_ddc_line *line)
++{
++ enum gpio_ddc_line line_found;
++
++ *line = GPIO_DDC_LINE_UNKNOWN;
++
++ if (!ddc) {
++ BREAK_TO_DEBUGGER();
++ return false;
++ }
++
++ if (!ddc->hw_info.hw_supported)
++ return false;
++
++ line_found = dal_ddc_get_line(ddc);
++
++ if (line_found >= GPIO_DDC_LINE_COUNT)
++ return false;
++
++ *line = line_found;
++
++ return true;
++}
++static bool wait_for_scl_high_sw(
++ struct dc_context *ctx,
++ struct ddc *ddc,
++ uint16_t clock_delay_div_4)
++{
++ uint32_t scl_retry = 0;
++ uint32_t scl_retry_max = I2C_SW_TIMEOUT_DELAY / clock_delay_div_4;
++
++ udelay(clock_delay_div_4);
++
++ do {
++ if (read_bit_from_ddc(ddc, SCL))
++ return true;
++
++ udelay(clock_delay_div_4);
++
++ ++scl_retry;
++ } while (scl_retry <= scl_retry_max);
++
++ return false;
++}
++static bool write_byte_sw(
++ struct dc_context *ctx,
++ struct ddc *ddc_handle,
++ uint16_t clock_delay_div_4,
++ uint8_t byte)
++{
++ int32_t shift = 7;
++ bool ack;
++
++ /* bits are transmitted serially, starting from MSB */
++
++ do {
++ udelay(clock_delay_div_4);
++
++ write_bit_to_ddc(ddc_handle, SDA, (byte >> shift) & 1);
++
++ udelay(clock_delay_div_4);
++
++ write_bit_to_ddc(ddc_handle, SCL, true);
++
++ if (!wait_for_scl_high_sw(ctx, ddc_handle, clock_delay_div_4))
++ return false;
++
++ write_bit_to_ddc(ddc_handle, SCL, false);
++
++ --shift;
++ } while (shift >= 0);
++
++ /* The display sends ACK by preventing the SDA from going high
++ * after the SCL pulse we use to send our last data bit.
++ * If the SDA goes high after that bit, it's a NACK
++ */
++
++ udelay(clock_delay_div_4);
++
++ write_bit_to_ddc(ddc_handle, SDA, true);
++
++ udelay(clock_delay_div_4);
++
++ write_bit_to_ddc(ddc_handle, SCL, true);
++
++ if (!wait_for_scl_high_sw(ctx, ddc_handle, clock_delay_div_4))
++ return false;
++
++ /* read ACK bit */
++
++ ack = !read_bit_from_ddc(ddc_handle, SDA);
++
++ udelay(clock_delay_div_4 << 1);
++
++ write_bit_to_ddc(ddc_handle, SCL, false);
++
++ udelay(clock_delay_div_4 << 1);
++
++ return ack;
++}
++
++static bool read_byte_sw(
++ struct dc_context *ctx,
++ struct ddc *ddc_handle,
++ uint16_t clock_delay_div_4,
++ uint8_t *byte,
++ bool more)
++{
++ int32_t shift = 7;
++
++ uint8_t data = 0;
++
++ /* The data bits are read from MSB to LSB;
++ * bit is read while SCL is high
++ */
++
++ do {
++ write_bit_to_ddc(ddc_handle, SCL, true);
++
++ if (!wait_for_scl_high_sw(ctx, ddc_handle, clock_delay_div_4))
++ return false;
++
++ if (read_bit_from_ddc(ddc_handle, SDA))
++ data |= (1 << shift);
++
++ write_bit_to_ddc(ddc_handle, SCL, false);
++
++ udelay(clock_delay_div_4 << 1);
++
++ --shift;
++ } while (shift >= 0);
++
++ /* read only whole byte */
++
++ *byte = data;
++
++ udelay(clock_delay_div_4);
++
++ /* send the acknowledge bit:
++ * SDA low means ACK, SDA high means NACK
++ */
++
++ write_bit_to_ddc(ddc_handle, SDA, !more);
++
++ udelay(clock_delay_div_4);
++
++ write_bit_to_ddc(ddc_handle, SCL, true);
++
++ if (!wait_for_scl_high_sw(ctx, ddc_handle, clock_delay_div_4))
++ return false;
++
++ write_bit_to_ddc(ddc_handle, SCL, false);
++
++ udelay(clock_delay_div_4);
++
++ write_bit_to_ddc(ddc_handle, SDA, true);
++
++ udelay(clock_delay_div_4);
++
++ return true;
++}
++static bool stop_sync_sw(
++ struct dc_context *ctx,
++ struct ddc *ddc_handle,
++ uint16_t clock_delay_div_4)
++{
++ uint32_t retry = 0;
++
++ /* The I2C communications stop signal is:
++ * the SDA going high from low, while the SCL is high.
++ */
++
++ write_bit_to_ddc(ddc_handle, SCL, false);
++
++ udelay(clock_delay_div_4);
++
++ write_bit_to_ddc(ddc_handle, SDA, false);
++
++ udelay(clock_delay_div_4);
++
++ write_bit_to_ddc(ddc_handle, SCL, true);
++
++ if (!wait_for_scl_high_sw(ctx, ddc_handle, clock_delay_div_4))
++ return false;
++
++ write_bit_to_ddc(ddc_handle, SDA, true);
++
++ do {
++ udelay(clock_delay_div_4);
++
++ if (read_bit_from_ddc(ddc_handle, SDA))
++ return true;
++
++ ++retry;
++ } while (retry <= 2);
++
++ return false;
++}
++static bool i2c_write_sw(
++ struct dc_context *ctx,
++ struct ddc *ddc_handle,
++ uint16_t clock_delay_div_4,
++ uint8_t address,
++ uint32_t length,
++ const uint8_t *data)
++{
++ uint32_t i = 0;
++
++ if (!write_byte_sw(ctx, ddc_handle, clock_delay_div_4, address))
++ return false;
++
++ while (i < length) {
++ if (!write_byte_sw(ctx, ddc_handle, clock_delay_div_4, data[i]))
++ return false;
++ ++i;
++ }
++
++ return true;
++}
++
++static bool i2c_read_sw(
++ struct dc_context *ctx,
++ struct ddc *ddc_handle,
++ uint16_t clock_delay_div_4,
++ uint8_t address,
++ uint32_t length,
++ uint8_t *data)
++{
++ uint32_t i = 0;
++
++ if (!write_byte_sw(ctx, ddc_handle, clock_delay_div_4, address))
++ return false;
++
++ while (i < length) {
++ if (!read_byte_sw(ctx, ddc_handle, clock_delay_div_4, data + i,
++ i < length - 1))
++ return false;
++ ++i;
++ }
++
++ return true;
++}
++
++
++
++static bool start_sync_sw(
++ struct dc_context *ctx,
++ struct ddc *ddc_handle,
++ uint16_t clock_delay_div_4)
++{
++ uint32_t retry = 0;
++
++ /* The I2C communications start signal is:
++ * the SDA going low from high, while the SCL is high.
++ */
++
++ write_bit_to_ddc(ddc_handle, SCL, true);
++
++ udelay(clock_delay_div_4);
++
++ do {
++ write_bit_to_ddc(ddc_handle, SDA, true);
++
++ if (!read_bit_from_ddc(ddc_handle, SDA)) {
++ ++retry;
++ continue;
++ }
++
++ udelay(clock_delay_div_4);
++
++ write_bit_to_ddc(ddc_handle, SCL, true);
++
++ if (!wait_for_scl_high_sw(ctx, ddc_handle, clock_delay_div_4))
++ break;
++
++ write_bit_to_ddc(ddc_handle, SDA, false);
++
++ udelay(clock_delay_div_4);
++
++ write_bit_to_ddc(ddc_handle, SCL, false);
++
++ udelay(clock_delay_div_4);
++
++ return true;
++ } while (retry <= I2C_SW_RETRIES);
++
++ return false;
++}
++
++void dce_i2c_sw_engine_set_speed(
++ struct dce_i2c_sw *engine,
++ uint32_t speed)
++{
++ ASSERT(speed);
++
++ engine->speed = speed ? speed : DCE_I2C_DEFAULT_I2C_SW_SPEED;
++
++ engine->clock_delay = 1000 / engine->speed;
++
++ if (engine->clock_delay < 12)
++ engine->clock_delay = 12;
++}
++
++bool dce_i2c_sw_engine_acquire_engine(
++ struct dce_i2c_sw *engine,
++ struct ddc *ddc)
++{
++ enum gpio_result result;
++
++ result = dal_ddc_open(ddc, GPIO_MODE_FAST_OUTPUT,
++ GPIO_DDC_CONFIG_TYPE_MODE_I2C);
++
++ if (result != GPIO_RESULT_OK)
++ return false;
++
++ engine->ddc = ddc;
++
++ return true;
++}
++bool dce_i2c_engine_acquire_sw(
++ struct dce_i2c_sw *dce_i2c_sw,
++ struct ddc *ddc_handle)
++{
++ uint32_t counter = 0;
++ bool result;
++
++ do {
++
++ result = dce_i2c_sw_engine_acquire_engine(
++ dce_i2c_sw, ddc_handle);
++
++ if (result)
++ break;
++
++ /* i2c_engine is busy by VBios, lets wait and retry */
++
++ udelay(10);
++
++ ++counter;
++ } while (counter < 2);
++
++ return result;
++}
++
++
++
++
++void dce_i2c_sw_engine_submit_channel_request(
++ struct dce_i2c_sw *engine,
++ struct i2c_request_transaction_data *req)
++{
++ struct ddc *ddc = engine->ddc;
++ uint16_t clock_delay_div_4 = engine->clock_delay >> 2;
++
++ /* send sync (start / repeated start) */
++
++ bool result = start_sync_sw(engine->ctx, ddc, clock_delay_div_4);
++
++ /* process payload */
++
++ if (result) {
++ switch (req->action) {
++ case DCE_I2C_TRANSACTION_ACTION_I2C_WRITE:
++ case DCE_I2C_TRANSACTION_ACTION_I2C_WRITE_MOT:
++ result = i2c_write_sw(engine->ctx, ddc, clock_delay_div_4,
++ req->address, req->length, req->data);
++ break;
++ case DCE_I2C_TRANSACTION_ACTION_I2C_READ:
++ case DCE_I2C_TRANSACTION_ACTION_I2C_READ_MOT:
++ result = i2c_read_sw(engine->ctx, ddc, clock_delay_div_4,
++ req->address, req->length, req->data);
++ break;
++ default:
++ result = false;
++ break;
++ }
++ }
++
++ /* send stop if not 'mot' or operation failed */
++
++ if (!result ||
++ (req->action == DCE_I2C_TRANSACTION_ACTION_I2C_WRITE) ||
++ (req->action == DCE_I2C_TRANSACTION_ACTION_I2C_READ))
++ if (!stop_sync_sw(engine->ctx, ddc, clock_delay_div_4))
++ result = false;
++
++ req->status = result ?
++ I2C_CHANNEL_OPERATION_SUCCEEDED :
++ I2C_CHANNEL_OPERATION_FAILED;
++}
++bool dce_i2c_sw_engine_submit_request(
++ struct dce_i2c_sw *engine,
++ struct dce_i2c_transaction_request *dce_i2c_request,
++ bool middle_of_transaction)
++{
++ struct i2c_request_transaction_data request;
++ bool operation_succeeded = false;
++
++ if (dce_i2c_request->operation == DCE_I2C_TRANSACTION_READ)
++ request.action = middle_of_transaction ?
++ DCE_I2C_TRANSACTION_ACTION_I2C_READ_MOT :
++ DCE_I2C_TRANSACTION_ACTION_I2C_READ;
++ else if (dce_i2c_request->operation == DCE_I2C_TRANSACTION_WRITE)
++ request.action = middle_of_transaction ?
++ DCE_I2C_TRANSACTION_ACTION_I2C_WRITE_MOT :
++ DCE_I2C_TRANSACTION_ACTION_I2C_WRITE;
++ else {
++ dce_i2c_request->status =
++ DCE_I2C_TRANSACTION_STATUS_FAILED_INVALID_OPERATION;
++ /* in DAL2, there was no "return false" */
++ return false;
++ }
++
++ request.address = (uint8_t)dce_i2c_request->payload.address;
++ request.length = dce_i2c_request->payload.length;
++ request.data = dce_i2c_request->payload.data;
++
++ dce_i2c_sw_engine_submit_channel_request(engine, &request);
++
++ if ((request.status == I2C_CHANNEL_OPERATION_ENGINE_BUSY) ||
++ (request.status == I2C_CHANNEL_OPERATION_FAILED))
++ dce_i2c_request->status =
++ DCE_I2C_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY;
++ else {
++ enum i2c_channel_operation_result operation_result;
++
++ do {
++ operation_result =
++ dce_i2c_sw_engine_get_channel_status(engine, NULL);
++
++ switch (operation_result) {
++ case I2C_CHANNEL_OPERATION_SUCCEEDED:
++ dce_i2c_request->status =
++ DCE_I2C_TRANSACTION_STATUS_SUCCEEDED;
++ operation_succeeded = true;
++ break;
++ case I2C_CHANNEL_OPERATION_NO_RESPONSE:
++ dce_i2c_request->status =
++ DCE_I2C_TRANSACTION_STATUS_FAILED_NACK;
++ break;
++ case I2C_CHANNEL_OPERATION_TIMEOUT:
++ dce_i2c_request->status =
++ DCE_I2C_TRANSACTION_STATUS_FAILED_TIMEOUT;
++ break;
++ case I2C_CHANNEL_OPERATION_FAILED:
++ dce_i2c_request->status =
++ DCE_I2C_TRANSACTION_STATUS_FAILED_INCOMPLETE;
++ break;
++ default:
++ dce_i2c_request->status =
++ DCE_I2C_TRANSACTION_STATUS_FAILED_OPERATION;
++ break;
++ }
++ } while (operation_result == I2C_CHANNEL_OPERATION_ENGINE_BUSY);
++ }
++
++ return operation_succeeded;
++}
++bool dce_i2c_submit_command_sw(
++ struct resource_pool *pool,
++ struct ddc *ddc,
++ struct i2c_command *cmd,
++ struct dce_i2c_sw *dce_i2c_sw)
++{
++ uint8_t index_of_payload = 0;
++ bool result;
++
++ dce_i2c_sw_engine_set_speed(dce_i2c_sw, cmd->speed);
++
++ result = true;
++
++ while (index_of_payload < cmd->number_of_payloads) {
++ bool mot = (index_of_payload != cmd->number_of_payloads - 1);
++
++ struct i2c_payload *payload = cmd->payloads + index_of_payload;
++
++ struct dce_i2c_transaction_request request = { 0 };
++
++ request.operation = payload->write ?
++ DCE_I2C_TRANSACTION_WRITE :
++ DCE_I2C_TRANSACTION_READ;
++
++ request.payload.address_space =
++ DCE_I2C_TRANSACTION_ADDRESS_SPACE_I2C;
++ request.payload.address = (payload->address << 1) |
++ !payload->write;
++ request.payload.length = payload->length;
++ request.payload.data = payload->data;
++
++
++ if (!dce_i2c_sw_engine_submit_request(
++ dce_i2c_sw, &request, mot)) {
++ result = false;
++ break;
++ }
++
++ ++index_of_payload;
++ }
++
++ release_engine_dce_sw(pool, dce_i2c_sw);
++
++ return result;
++}
++struct dce_i2c_sw *dce_i2c_acquire_i2c_sw_engine(
++ struct resource_pool *pool,
++ struct ddc *ddc)
++{
++ enum gpio_ddc_line line;
++ struct dce_i2c_sw *engine = NULL;
++
++ if (get_hw_supported_ddc_line(ddc, &line))
++ engine = pool->sw_i2cs[line];
++
++ if (!engine)
++ return NULL;
++
++ if (!dce_i2c_engine_acquire_sw(engine, ddc))
++ return NULL;
++
++ return engine;
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h
+new file mode 100644
+index 0000000..5bbcdd4
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h
+@@ -0,0 +1,57 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef __DCE_I2C_SW_H__
++#define __DCE_I2C_SW_H__
++
++enum {
++ DCE_I2C_DEFAULT_I2C_SW_SPEED = 50,
++ I2C_SW_RETRIES = 10,
++ I2C_SW_TIMEOUT_DELAY = 3000,
++};
++
++struct dce_i2c_sw {
++ struct ddc *ddc;
++ struct dc_context *ctx;
++ uint32_t clock_delay;
++ uint32_t speed;
++};
++
++void dce_i2c_sw_construct(
++ struct dce_i2c_sw *dce_i2c_sw,
++ struct dc_context *ctx);
++
++bool dce_i2c_submit_command_sw(
++ struct resource_pool *pool,
++ struct ddc *ddc,
++ struct i2c_command *cmd,
++ struct dce_i2c_sw *dce_i2c_sw);
++
++struct dce_i2c_sw *dce_i2c_acquire_i2c_sw_engine(
++ struct resource_pool *pool,
++ struct ddc *ddc);
++
++#endif
++
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+index 3f76e60..ae613b0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+@@ -54,6 +54,7 @@
+ #include "dce/dce_dmcu.h"
+ #include "dce/dce_aux.h"
+ #include "dce/dce_abm.h"
++#include "dce/dce_i2c.h"
+
+ #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
+ #include "gmc/gmc_8_2_d.h"
+@@ -602,7 +603,40 @@ struct aux_engine *dce100_aux_engine_create(
+
+ return &aux_engine->base;
+ }
++#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) }
++
++static const struct dce_i2c_registers i2c_hw_regs[] = {
++ i2c_inst_regs(1),
++ i2c_inst_regs(2),
++ i2c_inst_regs(3),
++ i2c_inst_regs(4),
++ i2c_inst_regs(5),
++ i2c_inst_regs(6),
++};
++
++static const struct dce_i2c_shift i2c_shifts = {
++ I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
++};
++
++static const struct dce_i2c_mask i2c_masks = {
++ I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
++};
++
++struct dce_i2c_hw *dce100_i2c_hw_create(
++ struct dc_context *ctx,
++ uint32_t inst)
++{
++ struct dce_i2c_hw *dce_i2c_hw =
++ kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
++
++ if (!dce_i2c_hw)
++ return NULL;
++
++ dce100_i2c_hw_construct(dce_i2c_hw, ctx, inst,
++ &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
+
++ return dce_i2c_hw;
++}
+ struct clock_source *dce100_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+@@ -658,7 +692,14 @@ static void destruct(struct dce110_resource_pool *pool)
+
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+-
++ if (pool->base.hw_i2cs[i] != NULL) {
++ kfree(pool->base.hw_i2cs[i]);
++ pool->base.hw_i2cs[i] = NULL;
++ }
++ if (pool->base.sw_i2cs[i] != NULL) {
++ kfree(pool->base.sw_i2cs[i]);
++ pool->base.sw_i2cs[i] = NULL;
++ }
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+@@ -970,6 +1011,14 @@ static bool construct(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
++ pool->base.hw_i2cs[i] = dce100_i2c_hw_create(ctx, i);
++ if (pool->base.hw_i2cs[i] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC:failed to create i2c engine!!\n");
++ goto res_create_fail;
++ }
++ pool->base.sw_i2cs[i] = NULL;
+ }
+
+ dc->caps.max_planes = pool->base.pipe_count;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+index e5e9e92..49c5c70 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+@@ -52,6 +52,7 @@
+ #include "dce/dce_aux.h"
+ #include "dce/dce_abm.h"
+ #include "dce/dce_dmcu.h"
++#include "dce/dce_i2c.h"
+
+ #define DC_LOGGER \
+ dc->ctx->logger
+@@ -620,7 +621,40 @@ struct aux_engine *dce110_aux_engine_create(
+
+ return &aux_engine->base;
+ }
++#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) }
++
++static const struct dce_i2c_registers i2c_hw_regs[] = {
++ i2c_inst_regs(1),
++ i2c_inst_regs(2),
++ i2c_inst_regs(3),
++ i2c_inst_regs(4),
++ i2c_inst_regs(5),
++ i2c_inst_regs(6),
++};
++
++static const struct dce_i2c_shift i2c_shifts = {
++ I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
++};
++
++static const struct dce_i2c_mask i2c_masks = {
++ I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
++};
++
++struct dce_i2c_hw *dce110_i2c_hw_create(
++ struct dc_context *ctx,
++ uint32_t inst)
++{
++ struct dce_i2c_hw *dce_i2c_hw =
++ kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
++
++ if (!dce_i2c_hw)
++ return NULL;
++
++ dce100_i2c_hw_construct(dce_i2c_hw, ctx, inst,
++ &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
+
++ return dce_i2c_hw;
++}
+ struct clock_source *dce110_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+@@ -687,7 +721,14 @@ static void destruct(struct dce110_resource_pool *pool)
+
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+-
++ if (pool->base.hw_i2cs[i] != NULL) {
++ kfree(pool->base.hw_i2cs[i]);
++ pool->base.hw_i2cs[i] = NULL;
++ }
++ if (pool->base.sw_i2cs[i] != NULL) {
++ kfree(pool->base.sw_i2cs[i]);
++ pool->base.sw_i2cs[i] = NULL;
++ }
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+@@ -1303,6 +1344,14 @@ static bool construct(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
++ pool->base.hw_i2cs[i] = dce110_i2c_hw_create(ctx, i);
++ if (pool->base.hw_i2cs[i] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC:failed to create i2c engine!!\n");
++ goto res_create_fail;
++ }
++ pool->base.sw_i2cs[i] = NULL;
+ }
+
+ dc->fbc_compressor = dce110_compressor_create(ctx);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+index 2881293..d35dc730 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+@@ -50,6 +50,7 @@
+ #include "dce/dce_abm.h"
+ #include "dce/dce_dmcu.h"
+ #include "dce/dce_aux.h"
++#include "dce/dce_i2c.h"
+
+ #include "reg_helper.h"
+
+@@ -620,7 +621,40 @@ struct aux_engine *dce112_aux_engine_create(
+
+ return &aux_engine->base;
+ }
++#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) }
++
++static const struct dce_i2c_registers i2c_hw_regs[] = {
++ i2c_inst_regs(1),
++ i2c_inst_regs(2),
++ i2c_inst_regs(3),
++ i2c_inst_regs(4),
++ i2c_inst_regs(5),
++ i2c_inst_regs(6),
++};
++
++static const struct dce_i2c_shift i2c_shifts = {
++ I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
++};
++
++static const struct dce_i2c_mask i2c_masks = {
++ I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
++};
++
++struct dce_i2c_hw *dce112_i2c_hw_create(
++ struct dc_context *ctx,
++ uint32_t inst)
++{
++ struct dce_i2c_hw *dce_i2c_hw =
++ kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
++
++ if (!dce_i2c_hw)
++ return NULL;
++
++ dce112_i2c_hw_construct(dce_i2c_hw, ctx, inst,
++ &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
+
++ return dce_i2c_hw;
++}
+ struct clock_source *dce112_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+@@ -676,7 +710,14 @@ static void destruct(struct dce110_resource_pool *pool)
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+-
++ if (pool->base.hw_i2cs[i] != NULL) {
++ kfree(pool->base.hw_i2cs[i]);
++ pool->base.hw_i2cs[i] = NULL;
++ }
++ if (pool->base.sw_i2cs[i] != NULL) {
++ kfree(pool->base.sw_i2cs[i]);
++ pool->base.sw_i2cs[i] = NULL;
++ }
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+@@ -1252,6 +1293,14 @@ static bool construct(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
++ pool->base.hw_i2cs[i] = dce112_i2c_hw_create(ctx, i);
++ if (pool->base.hw_i2cs[i] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC:failed to create i2c engine!!\n");
++ goto res_create_fail;
++ }
++ pool->base.sw_i2cs[i] = NULL;
+ }
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+index d43f37d..b2fb06f3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+@@ -54,6 +54,7 @@
+ #include "dce/dce_abm.h"
+ #include "dce/dce_dmcu.h"
+ #include "dce/dce_aux.h"
++#include "dce/dce_i2c.h"
+
+ #include "dce/dce_12_0_offset.h"
+ #include "dce/dce_12_0_sh_mask.h"
+@@ -392,7 +393,40 @@ struct aux_engine *dce120_aux_engine_create(
+
+ return &aux_engine->base;
+ }
++#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) }
++
++static const struct dce_i2c_registers i2c_hw_regs[] = {
++ i2c_inst_regs(1),
++ i2c_inst_regs(2),
++ i2c_inst_regs(3),
++ i2c_inst_regs(4),
++ i2c_inst_regs(5),
++ i2c_inst_regs(6),
++};
++
++static const struct dce_i2c_shift i2c_shifts = {
++ I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
++};
+
++static const struct dce_i2c_mask i2c_masks = {
++ I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
++};
++
++struct dce_i2c_hw *dce120_i2c_hw_create(
++ struct dc_context *ctx,
++ uint32_t inst)
++{
++ struct dce_i2c_hw *dce_i2c_hw =
++ kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
++
++ if (!dce_i2c_hw)
++ return NULL;
++
++ dce112_i2c_hw_construct(dce_i2c_hw, ctx, inst,
++ &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
++
++ return dce_i2c_hw;
++}
+ static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX)
+ };
+@@ -501,7 +535,14 @@ static void destruct(struct dce110_resource_pool *pool)
+
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+-
++ if (pool->base.hw_i2cs[i] != NULL) {
++ kfree(pool->base.hw_i2cs[i]);
++ pool->base.hw_i2cs[i] = NULL;
++ }
++ if (pool->base.sw_i2cs[i] != NULL) {
++ kfree(pool->base.sw_i2cs[i]);
++ pool->base.sw_i2cs[i] = NULL;
++ }
+ }
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+@@ -957,6 +998,7 @@ static bool construct(
+ goto res_create_fail;
+ }
+
++
+ irq_init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dce120_create(&irq_init_data);
+ if (!pool->base.irqs)
+@@ -1021,13 +1063,20 @@ static bool construct(
+ "DC: failed to create output pixel processor!\n");
+ }
+ pool->base.engines[i] = dce120_aux_engine_create(ctx, i);
+- if (pool->base.engines[i] == NULL) {
+- BREAK_TO_DEBUGGER();
+- dm_error(
+- "DC:failed to create aux engine!!\n");
+- goto res_create_fail;
+- }
+-
++ if (pool->base.engines[i] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC:failed to create aux engine!!\n");
++ goto res_create_fail;
++ }
++ pool->base.hw_i2cs[i] = dce120_i2c_hw_create(ctx, i);
++ if (pool->base.hw_i2cs[i] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC:failed to create i2c engine!!\n");
++ goto res_create_fail;
++ }
++ pool->base.sw_i2cs[i] = NULL;
+ /* check next valid pipe */
+ j++;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+index 604c629..4eae859 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+@@ -56,6 +56,7 @@
+ #include "dce/dce_dmcu.h"
+ #include "dce/dce_aux.h"
+ #include "dce/dce_abm.h"
++#include "dce/dce_i2c.h"
+ /* TODO remove this include */
+
+ #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
+@@ -480,7 +481,54 @@ struct aux_engine *dce80_aux_engine_create(
+
+ return &aux_engine->base;
+ }
++#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) }
++
++static const struct dce_i2c_registers i2c_hw_regs[] = {
++ i2c_inst_regs(1),
++ i2c_inst_regs(2),
++ i2c_inst_regs(3),
++ i2c_inst_regs(4),
++ i2c_inst_regs(5),
++ i2c_inst_regs(6),
++};
++
++static const struct dce_i2c_shift i2c_shifts = {
++ I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
++};
++
++static const struct dce_i2c_mask i2c_masks = {
++ I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
++};
++
++struct dce_i2c_hw *dce80_i2c_hw_create(
++ struct dc_context *ctx,
++ uint32_t inst)
++{
++ struct dce_i2c_hw *dce_i2c_hw =
++ kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
++
++ if (!dce_i2c_hw)
++ return NULL;
++
++ dce_i2c_hw_construct(dce_i2c_hw, ctx, inst,
++ &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
+
++ return dce_i2c_hw;
++}
++
++struct dce_i2c_sw *dce80_i2c_sw_create(
++ struct dc_context *ctx)
++{
++ struct dce_i2c_sw *dce_i2c_sw =
++ kzalloc(sizeof(struct dce_i2c_sw), GFP_KERNEL);
++
++ if (!dce_i2c_sw)
++ return NULL;
++
++ dce_i2c_sw_construct(dce_i2c_sw, ctx);
++
++ return dce_i2c_sw;
++}
+ static struct stream_encoder *dce80_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+@@ -691,6 +739,14 @@ static void destruct(struct dce110_resource_pool *pool)
+
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
++ if (pool->base.hw_i2cs[i] != NULL) {
++ kfree(pool->base.hw_i2cs[i]);
++ pool->base.hw_i2cs[i] = NULL;
++ }
++ if (pool->base.sw_i2cs[i] != NULL) {
++ kfree(pool->base.sw_i2cs[i]);
++ pool->base.sw_i2cs[i] = NULL;
++ }
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+@@ -887,6 +943,7 @@ static bool dce80_construct(
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
++
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+ pool->base.dccg->max_clks_state =
+ static_clk_info.max_clocks_state;
+@@ -943,6 +1000,20 @@ static bool dce80_construct(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
++ pool->base.hw_i2cs[i] = dce80_i2c_hw_create(ctx, i);
++ if (pool->base.hw_i2cs[i] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC:failed to create i2c engine!!\n");
++ goto res_create_fail;
++ }
++ pool->base.sw_i2cs[i] = dce80_i2c_sw_create(ctx);
++ if (pool->base.sw_i2cs[i] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC:failed to create sw i2c!!\n");
++ goto res_create_fail;
++ }
+ }
+
+ dc->caps.max_planes = pool->base.pipe_count;
+@@ -1129,6 +1200,20 @@ static bool dce81_construct(
+ dm_error("DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
++ pool->base.hw_i2cs[i] = dce80_i2c_hw_create(ctx, i);
++ if (pool->base.hw_i2cs[i] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC:failed to create i2c engine!!\n");
++ goto res_create_fail;
++ }
++ pool->base.sw_i2cs[i] = dce80_i2c_sw_create(ctx);
++ if (pool->base.sw_i2cs[i] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC:failed to create sw i2c!!\n");
++ goto res_create_fail;
++ }
+ }
+
+ dc->caps.max_planes = pool->base.pipe_count;
+@@ -1311,6 +1396,20 @@ static bool dce83_construct(
+ dm_error("DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
++ pool->base.hw_i2cs[i] = dce80_i2c_hw_create(ctx, i);
++ if (pool->base.hw_i2cs[i] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC:failed to create i2c engine!!\n");
++ goto res_create_fail;
++ }
++ pool->base.sw_i2cs[i] = dce80_i2c_sw_create(ctx);
++ if (pool->base.sw_i2cs[i] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC:failed to create sw i2c!!\n");
++ goto res_create_fail;
++ }
+ }
+
+ dc->caps.max_planes = pool->base.pipe_count;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index 6b44ed3..28ebad8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -65,6 +65,7 @@
+ #include "dce/dce_abm.h"
+ #include "dce/dce_dmcu.h"
+ #include "dce/dce_aux.h"
++#include "dce/dce_i2c.h"
+
+ const struct _vcs_dpi_ip_params_st dcn1_0_ip = {
+ .rob_buffer_size_kbytes = 64,
+@@ -610,7 +611,40 @@ struct aux_engine *dcn10_aux_engine_create(
+
+ return &aux_engine->base;
+ }
++#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) }
++
++static const struct dce_i2c_registers i2c_hw_regs[] = {
++ i2c_inst_regs(1),
++ i2c_inst_regs(2),
++ i2c_inst_regs(3),
++ i2c_inst_regs(4),
++ i2c_inst_regs(5),
++ i2c_inst_regs(6),
++};
++
++static const struct dce_i2c_shift i2c_shifts = {
++ I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
++};
++
++static const struct dce_i2c_mask i2c_masks = {
++ I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
++};
++
++struct dce_i2c_hw *dcn10_i2c_hw_create(
++ struct dc_context *ctx,
++ uint32_t inst)
++{
++ struct dce_i2c_hw *dce_i2c_hw =
++ kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
++
++ if (!dce_i2c_hw)
++ return NULL;
++
++ dcn1_i2c_hw_construct(dce_i2c_hw, ctx, inst,
++ &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
+
++ return dce_i2c_hw;
++}
+ static struct mpc *dcn10_mpc_create(struct dc_context *ctx)
+ {
+ struct dcn10_mpc *mpc10 = kzalloc(sizeof(struct dcn10_mpc),
+@@ -862,6 +896,14 @@ static void destruct(struct dcn10_resource_pool *pool)
+
+ if (pool->base.engines[i] != NULL)
+ pool->base.engines[i]->funcs->destroy_engine(&pool->base.engines[i]);
++ if (pool->base.hw_i2cs[i] != NULL) {
++ kfree(pool->base.hw_i2cs[i]);
++ pool->base.hw_i2cs[i] = NULL;
++ }
++ if (pool->base.sw_i2cs[i] != NULL) {
++ kfree(pool->base.sw_i2cs[i]);
++ pool->base.sw_i2cs[i] = NULL;
++ }
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++)
+@@ -1300,7 +1342,14 @@ static bool construct(
+ "DC:failed to create aux engine!!\n");
+ goto fail;
+ }
+-
++ pool->base.hw_i2cs[i] = dcn10_i2c_hw_create(ctx, i);
++ if (pool->base.hw_i2cs[i] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC:failed to create hw i2c!!\n");
++ goto fail;
++ }
++ pool->base.sw_i2cs[i] = NULL;
+ /* check next valid pipe */
+ j++;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+index b4d3300..ed388d3 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+@@ -143,6 +143,9 @@ struct resource_pool {
+ struct mpc *mpc;
+ struct pp_smu_funcs_rv *pp_smu;
+ struct pp_smu_display_requirement_rv pp_smu_req;
++ struct dce_i2c_hw *hw_i2cs[MAX_PIPES];
++ struct dce_i2c_sw *sw_i2cs[MAX_PIPES];
++ bool i2c_hw_buffer_in_use;
+
+ unsigned int pipe_count;
+ unsigned int underlay_pipe_index;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5178-drm-amd-display-Program-csc-matrix-as-part-of-stream.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5178-drm-amd-display-Program-csc-matrix-as-part-of-stream.patch
new file mode 100644
index 00000000..1dc0d962
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5178-drm-amd-display-Program-csc-matrix-as-part-of-stream.patch
@@ -0,0 +1,85 @@
+From 4a753992838c0e896320a503ebc6f841187d5f8d Mon Sep 17 00:00:00 2001
+From: SivapiriyanKumarasamy <sivapiriyan.kumarasamy@amd.com>
+Date: Thu, 26 Jul 2018 14:58:35 -0400
+Subject: [PATCH 5178/5725] drm/amd/display: Program csc matrix as part of
+ stream update
+
+Add csc_transform struct to dc_stream_update, and program if set when
+updating streams
+
+Signed-off-by: SivapiriyanKumarasamy <sivapiriyan.kumarasamy@amd.com>
+Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 24 ++++++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/dc_stream.h | 4 ++++
+ 2 files changed, 28 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 152c7dd..18632db 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -379,6 +379,27 @@ bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stre
+ return ret;
+ }
+
++bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
++{
++ int i = 0;
++ bool ret = false;
++ struct pipe_ctx *pipes;
++
++ for (i = 0; i < MAX_PIPES; i++) {
++ if (dc->current_state->res_ctx.pipe_ctx[i].stream
++ == stream) {
++
++ pipes = &dc->current_state->res_ctx.pipe_ctx[i];
++ dc->hwss.program_csc_matrix(pipes,
++ stream->output_color_space,
++ stream->csc_color_matrix.matrix);
++ ret = true;
++ }
++ }
++
++ return ret;
++}
++
+ void dc_stream_set_static_screen_events(struct dc *dc,
+ struct dc_stream_state **streams,
+ int num_streams,
+@@ -1413,6 +1434,9 @@ static void commit_planes_do_stream_update(struct dc *dc,
+ if (stream_update->gamut_remap)
+ dc_stream_set_gamut_remap(dc, stream);
+
++ if (stream_update->output_csc_transform)
++ dc_stream_program_csc_matrix(dc, stream);
++
+ /* Full fe update*/
+ if (update_type == UPDATE_TYPE_FAST)
+ continue;
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+index c531d80..069c2fc 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+@@ -137,6 +137,7 @@ struct dc_stream_update {
+ struct colorspace_transform *gamut_remap;
+ enum dc_color_space *output_color_space;
+
++ struct dc_csc_transform *output_csc_transform;
+
+ };
+
+@@ -306,6 +307,9 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream,
+ bool dc_stream_set_gamut_remap(struct dc *dc,
+ const struct dc_stream_state *stream);
+
++bool dc_stream_program_csc_matrix(struct dc *dc,
++ struct dc_stream_state *stream);
++
+ bool dc_stream_get_crtc_position(struct dc *dc,
+ struct dc_stream_state **stream,
+ int num_streams,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5179-drm-amdgpu-display-disable-eDP-fast-boot-optimizatio.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5179-drm-amdgpu-display-disable-eDP-fast-boot-optimizatio.patch
new file mode 100644
index 00000000..420a9d75
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5179-drm-amdgpu-display-disable-eDP-fast-boot-optimizatio.patch
@@ -0,0 +1,37 @@
+From e24afc5217e39de550f266f4ee4f7e777749c553 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 16 Aug 2018 15:35:21 -0500
+Subject: [PATCH 5179/5725] drm/amdgpu/display: disable eDP fast boot
+ optimization on DCE8
+
+Seems to cause blank screens.
+
+Bug: https://bugs.freedesktop.org/show_bug.cgi?id=106940
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 3b4a9f9..2fa8d04 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -1562,7 +1562,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
+ bool can_eDP_fast_boot_optimize = false;
+
+ if (edp_link) {
+- can_eDP_fast_boot_optimize =
++ /* this seems to cause blank screens on DCE8 */
++ if ((dc->ctx->dce_version == DCE_VERSION_8_0) ||
++ (dc->ctx->dce_version == DCE_VERSION_8_1) ||
++ (dc->ctx->dce_version == DCE_VERSION_8_3))
++ can_eDP_fast_boot_optimize = false;
++ else
++ can_eDP_fast_boot_optimize =
+ edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc);
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5180-drm-amd-display-Define-registers-for-dcn10.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5180-drm-amd-display-Define-registers-for-dcn10.patch
new file mode 100644
index 00000000..df7f9cd8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5180-drm-amd-display-Define-registers-for-dcn10.patch
@@ -0,0 +1,38 @@
+From 76ca45f0635ba94dce39617241552f0749d2e467 Mon Sep 17 00:00:00 2001
+From: Nikola Cornij <nikola.cornij@amd.com>
+Date: Fri, 13 Jul 2018 18:19:07 -0400
+Subject: [PATCH 5180/5725] drm/amd/display: Define registers for dcn10
+
+Define register for dcn10 for future changes
+
+Signed-off-by: Nikola Cornij <nikola.cornij@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+index 6b3e4de..67f3e4d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+@@ -260,6 +260,7 @@ struct dcn10_stream_enc_registers {
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP6_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP7_ENABLE, mask_sh),\
++ SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP7_SEND, mask_sh),\
+ SE_SF(DP0_DP_DB_CNTL, DP_DB_DISABLE, mask_sh),\
+ SE_SF(DP0_DP_MSA_COLORIMETRY, DP_MSA_MISC0, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM1, DP_MSA_HTOTAL, mask_sh),\
+@@ -364,6 +365,7 @@ struct dcn10_stream_enc_registers {
+ type DP_SEC_GSP5_ENABLE;\
+ type DP_SEC_GSP6_ENABLE;\
+ type DP_SEC_GSP7_ENABLE;\
++ type DP_SEC_GSP7_SEND;\
+ type DP_SEC_MPG_ENABLE;\
+ type DP_VID_STREAM_DIS_DEFER;\
+ type DP_VID_STREAM_ENABLE;\
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5181-drm-amd-display-Combine-dce80-and-dce100-i2c-hw-func.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5181-drm-amd-display-Combine-dce80-and-dce100-i2c-hw-func.patch
new file mode 100644
index 00000000..ed5f8320
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5181-drm-amd-display-Combine-dce80-and-dce100-i2c-hw-func.patch
@@ -0,0 +1,349 @@
+From f8f33cbd7ae08b2f7f3510ca5729d37e64442037 Mon Sep 17 00:00:00 2001
+From: David Francis <David.Francis@amd.com>
+Date: Thu, 9 Aug 2018 13:15:36 -0400
+Subject: [PATCH 5181/5725] drm/amd/display: Combine dce80 and dce100 i2c hw
+ functions
+
+[Why]
+There are two versions of the hw function pointers: one for dce80
+and one for all other versions. These paired functions are
+nearly identical. dce80 and dce100 should not require
+different i2c access functions.
+
+[How]
+Combine each pair of functions into a single function. Mostly
+the new functions are based on the dce100 versions as those
+versions are newer, support more features, and
+were more maintained.
+
+Signed-off-by: David Francis <David.Francis@amd.com>
+Reviewed-by: Sun peng Li <Sunpeng.Li@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c | 237 ++++--------------------
+ 1 file changed, 40 insertions(+), 197 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+index 6a57c48..3a63e3c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+@@ -58,18 +58,7 @@ static bool is_hw_busy(struct dce_i2c_hw *dce_i2c_hw)
+ return i2c_sw_status != DC_I2C_STATUS__DC_I2C_STATUS_IDLE;
+ }
+
+-static void set_speed_hw_dce80(
+- struct dce_i2c_hw *dce_i2c_hw,
+- uint32_t speed)
+-{
+-
+- if (speed) {
+- REG_UPDATE_N(SPEED, 2,
+- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed,
+- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
+- }
+-}
+-static void set_speed_hw_dce100(
++static void set_speed(
+ struct dce_i2c_hw *dce_i2c_hw,
+ uint32_t speed)
+ {
+@@ -86,6 +75,7 @@ static void set_speed_hw_dce100(
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
+ }
+ }
++
+ bool dce_i2c_hw_engine_acquire_engine(
+ struct dce_i2c_hw *dce_i2c_hw,
+ struct ddc *ddc)
+@@ -172,7 +162,7 @@ struct dce_i2c_hw *acquire_i2c_hw_engine(
+ return NULL;
+ }
+
+-static bool setup_engine_hw_dce100(
++static bool setup_engine(
+ struct dce_i2c_hw *dce_i2c_hw)
+ {
+ uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
+@@ -206,72 +196,11 @@ static bool setup_engine_hw_dce100(
+
+ return true;
+ }
+-static bool setup_engine_hw_dce80(
+- struct dce_i2c_hw *dce_i2c_hw)
+-{
+-
+- /* Program pin select */
+- {
+- REG_UPDATE_6(DC_I2C_CONTROL,
+- DC_I2C_GO, 0,
+- DC_I2C_SOFT_RESET, 0,
+- DC_I2C_SEND_RESET, 0,
+- DC_I2C_SW_STATUS_RESET, 1,
+- DC_I2C_TRANSACTION_COUNT, 0,
+- DC_I2C_DDC_SELECT, dce_i2c_hw->engine_id);
+- }
+-
+- /* Program time limit */
+- {
+- REG_UPDATE_2(SETUP,
+- DC_I2C_DDC1_TIME_LIMIT, I2C_SETUP_TIME_LIMIT_DCE,
+- DC_I2C_DDC1_ENABLE, 1);
+- }
+-
+- /* Program HW priority
+- * set to High - interrupt software I2C at any time
+- * Enable restart of SW I2C that was interrupted by HW
+- * disable queuing of software while I2C is in use by HW
+- */
+- {
+- REG_UPDATE_2(DC_I2C_ARBITRATION,
+- DC_I2C_NO_QUEUED_SW_GO, 0,
+- DC_I2C_SW_PRIORITY, DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL);
+- }
+
+- return true;
+-}
+
+
+
+-static void process_channel_reply_hw_dce80(
+- struct dce_i2c_hw *dce_i2c_hw,
+- struct i2c_reply_transaction_data *reply)
+-{
+- uint32_t length = reply->length;
+- uint8_t *buffer = reply->data;
+-
+- REG_SET_3(DC_I2C_DATA, 0,
+- DC_I2C_INDEX, length - 1,
+- DC_I2C_DATA_RW, 1,
+- DC_I2C_INDEX_WRITE, 1);
+-
+- while (length) {
+- /* after reading the status,
+- * if the I2C operation executed successfully
+- * (i.e. DC_I2C_STATUS_DONE = 1) then the I2C controller
+- * should read data bytes from I2C circular data buffer
+- */
+-
+- uint32_t i2c_data;
+-
+- REG_GET(DC_I2C_DATA, DC_I2C_DATA, &i2c_data);
+- *buffer++ = i2c_data;
+-
+- --length;
+- }
+-}
+-static void process_channel_reply_hw_dce100(
++static void process_channel_reply(
+ struct dce_i2c_hw *dce_i2c_hw,
+ struct i2c_reply_transaction_data *reply)
+ {
+@@ -404,7 +333,7 @@ static void execute_transaction_hw(
+ dce_i2c_hw->transaction_count = 0;
+ dce_i2c_hw->buffer_used_bytes = 0;
+ }
+-static bool process_transaction_hw_dce80(
++static bool process_transaction(
+ struct dce_i2c_hw *dce_i2c_hw,
+ struct i2c_request_transaction_data *request)
+ {
+@@ -414,135 +343,49 @@ static bool process_transaction_hw_dce80(
+ bool last_transaction = false;
+ uint32_t value = 0;
+
+- {
+-
+- last_transaction = ((dce_i2c_hw->transaction_count == 3) ||
+- (request->action == DCE_I2C_TRANSACTION_ACTION_I2C_WRITE) ||
+- (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ));
++ last_transaction = ((dce_i2c_hw->transaction_count == 3) ||
++ (request->action == DCE_I2C_TRANSACTION_ACTION_I2C_WRITE) ||
++ (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ));
+
+
+- switch (dce_i2c_hw->transaction_count) {
+- case 0:
+- REG_UPDATE_5(DC_I2C_TRANSACTION0,
+- DC_I2C_STOP_ON_NACK0, 1,
+- DC_I2C_START0, 1,
+- DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ),
+- DC_I2C_COUNT0, length,
+- DC_I2C_STOP0, last_transaction ? 1 : 0);
+- break;
+- case 1:
+- REG_UPDATE_5(DC_I2C_TRANSACTION1,
+- DC_I2C_STOP_ON_NACK0, 1,
+- DC_I2C_START0, 1,
+- DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ),
+- DC_I2C_COUNT0, length,
+- DC_I2C_STOP0, last_transaction ? 1 : 0);
+- break;
+- case 2:
+- REG_UPDATE_5(DC_I2C_TRANSACTION2,
+- DC_I2C_STOP_ON_NACK0, 1,
+- DC_I2C_START0, 1,
+- DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ),
+- DC_I2C_COUNT0, length,
+- DC_I2C_STOP0, last_transaction ? 1 : 0);
+- break;
+- case 3:
+- REG_UPDATE_5(DC_I2C_TRANSACTION3,
+- DC_I2C_STOP_ON_NACK0, 1,
+- DC_I2C_START0, 1,
+- DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ),
+- DC_I2C_COUNT0, length,
+- DC_I2C_STOP0, last_transaction ? 1 : 0);
+- break;
+- default:
+- /* TODO Warning ? */
+- break;
+- }
+- }
+-
+- /* Write the I2C address and I2C data
+- * into the hardware circular buffer, one byte per entry.
+- * As an example, the 7-bit I2C slave address for CRT monitor
+- * for reading DDC/EDID information is 0b1010001.
+- * For an I2C send operation, the LSB must be programmed to 0;
+- * for I2C receive operation, the LSB must be programmed to 1.
+- */
+-
+- {
+- if (dce_i2c_hw->transaction_count == 0) {
+- value = REG_SET_4(DC_I2C_DATA, 0,
+- DC_I2C_DATA_RW, false,
+- DC_I2C_DATA, request->address,
+- DC_I2C_INDEX, 0,
+- DC_I2C_INDEX_WRITE, 1);
+- } else
+- value = REG_SET_2(DC_I2C_DATA, 0,
+- DC_I2C_DATA_RW, false,
+- DC_I2C_DATA, request->address);
+-
+- if (!(request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ)) {
+-
+- while (length) {
+- REG_SET_2(DC_I2C_DATA, value,
+- DC_I2C_INDEX_WRITE, 0,
+- DC_I2C_DATA, *buffer++);
+- --length;
+- }
+- }
+- }
+-
+- ++dce_i2c_hw->transaction_count;
+- dce_i2c_hw->buffer_used_bytes += length + 1;
+-
+- return last_transaction;
+-}
+-
+-#define STOP_TRANS_PREDICAT \
+- ((dce_i2c_hw->transaction_count == 3) || \
+- (request->action == DCE_I2C_TRANSACTION_ACTION_I2C_WRITE) || \
+- (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ))
+-
+-#define SET_I2C_TRANSACTION(id) \
+- do { \
+- REG_UPDATE_N(DC_I2C_TRANSACTION##id, 5, \
+- FN(DC_I2C_TRANSACTION0, DC_I2C_STOP_ON_NACK0), 1, \
+- FN(DC_I2C_TRANSACTION0, DC_I2C_START0), 1, \
+- FN(DC_I2C_TRANSACTION0, DC_I2C_STOP0), STOP_TRANS_PREDICAT ? 1:0, \
+- FN(DC_I2C_TRANSACTION0, DC_I2C_RW0), (0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ)), \
+- FN(DC_I2C_TRANSACTION0, DC_I2C_COUNT0), length); \
+- if (STOP_TRANS_PREDICAT) \
+- last_transaction = true; \
+- } while (false)
+-
+-static bool process_transaction_hw_dce100(
+- struct dce_i2c_hw *dce_i2c_hw,
+- struct i2c_request_transaction_data *request)
+-{
+- uint32_t length = request->length;
+- uint8_t *buffer = request->data;
+- uint32_t value = 0;
+-
+- bool last_transaction = false;
+-
+ switch (dce_i2c_hw->transaction_count) {
+ case 0:
+- SET_I2C_TRANSACTION(0);
++ REG_UPDATE_5(DC_I2C_TRANSACTION0,
++ DC_I2C_STOP_ON_NACK0, 1,
++ DC_I2C_START0, 1,
++ DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ),
++ DC_I2C_COUNT0, length,
++ DC_I2C_STOP0, last_transaction ? 1 : 0);
+ break;
+ case 1:
+- SET_I2C_TRANSACTION(1);
++ REG_UPDATE_5(DC_I2C_TRANSACTION1,
++ DC_I2C_STOP_ON_NACK0, 1,
++ DC_I2C_START0, 1,
++ DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ),
++ DC_I2C_COUNT0, length,
++ DC_I2C_STOP0, last_transaction ? 1 : 0);
+ break;
+ case 2:
+- SET_I2C_TRANSACTION(2);
++ REG_UPDATE_5(DC_I2C_TRANSACTION2,
++ DC_I2C_STOP_ON_NACK0, 1,
++ DC_I2C_START0, 1,
++ DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ),
++ DC_I2C_COUNT0, length,
++ DC_I2C_STOP0, last_transaction ? 1 : 0);
+ break;
+ case 3:
+- SET_I2C_TRANSACTION(3);
++ REG_UPDATE_5(DC_I2C_TRANSACTION3,
++ DC_I2C_STOP_ON_NACK0, 1,
++ DC_I2C_START0, 1,
++ DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ),
++ DC_I2C_COUNT0, length,
++ DC_I2C_STOP0, last_transaction ? 1 : 0);
+ break;
+ default:
+ /* TODO Warning ? */
+ break;
+ }
+
+-
+ /* Write the I2C address and I2C data
+ * into the hardware circular buffer, one byte per entry.
+ * As an example, the 7-bit I2C slave address for CRT monitor
+@@ -828,24 +671,24 @@ bool dce_i2c_submit_command_hw(
+ return result;
+ }
+ static const struct dce_i2c_hw_funcs dce100_i2c_hw_funcs = {
+- .setup_engine = setup_engine_hw_dce100,
+- .set_speed = set_speed_hw_dce100,
++ .setup_engine = setup_engine,
++ .set_speed = set_speed,
+ .get_speed = get_speed_hw,
+ .release_engine = release_engine_hw,
+- .process_transaction = process_transaction_hw_dce100,
+- .process_channel_reply = process_channel_reply_hw_dce100,
++ .process_transaction = process_transaction,
++ .process_channel_reply = process_channel_reply,
+ .is_hw_busy = is_hw_busy,
+ .get_channel_status = get_channel_status_hw,
+ .execute_transaction = execute_transaction_hw,
+ .disable_i2c_hw_engine = disable_i2c_hw_engine
+ };
+ static const struct dce_i2c_hw_funcs dce80_i2c_hw_funcs = {
+- .setup_engine = setup_engine_hw_dce80,
+- .set_speed = set_speed_hw_dce80,
++ .setup_engine = setup_engine,
++ .set_speed = set_speed,
+ .get_speed = get_speed_hw,
+ .release_engine = release_engine_hw,
+- .process_transaction = process_transaction_hw_dce80,
+- .process_channel_reply = process_channel_reply_hw_dce80,
++ .process_transaction = process_transaction,
++ .process_channel_reply = process_channel_reply,
+ .is_hw_busy = is_hw_busy,
+ .get_channel_status = get_channel_status_hw,
+ .execute_transaction = execute_transaction_hw,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5182-drm-amd-display-move-edp-fast-boot-optimization-flag.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5182-drm-amd-display-move-edp-fast-boot-optimization-flag.patch
new file mode 100644
index 00000000..f093a9f1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5182-drm-amd-display-move-edp-fast-boot-optimization-flag.patch
@@ -0,0 +1,128 @@
+From c8da9913728433fc368049782b1d586e410d2bcf Mon Sep 17 00:00:00 2001
+From: Anthony Koo <Anthony.Koo@amd.com>
+Date: Tue, 21 Aug 2018 14:28:05 -0500
+Subject: [PATCH 5182/5725] drm/amd/display: move edp fast boot optimization
+ flag to stream
+
+[Why]
+During S4/S3 stress test it is possible to resume from S4 without
+calling mode set on eDP, meaning high level optimization flag is not
+reset. If this is followed by an S3 resume call, driver will see
+optimization flag is set and consume it and think backend is powered
+on when in fact it is not.
+
+This results in PHY being off in sequence where
+S4->Resume->S3->Resume->ApplyOpt->black screen.
+
+[How]
+Move optimization flag to stream instead of a DC flag.
+
+Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 4 ++--
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 --
+ drivers/gpu/drm/amd/display/dc/dc_stream.h | 1 +
+ .../amd/display/dc/dce110/dce110_hw_sequencer.c | 28 ++++++++++++++--------
+ 4 files changed, 21 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index f8b299c..42d0ce7 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -2497,8 +2497,8 @@ void core_link_enable_stream(
+
+ /* eDP lit up by bios already, no need to enable again. */
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
+- core_dc->apply_edp_fast_boot_optimization) {
+- core_dc->apply_edp_fast_boot_optimization = false;
++ pipe_ctx->stream->apply_edp_fast_boot_optimization) {
++ pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
+ pipe_ctx->stream->dpms_off = false;
+ return;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index f6069a0..ae17668 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -311,8 +311,6 @@ struct dc {
+
+ bool optimized_required;
+
+- bool apply_edp_fast_boot_optimization;
+-
+ /* FBC compressor */
+ struct compressor *fbc_compressor;
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+index 069c2fc..6e375ed 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+@@ -102,6 +102,7 @@ struct dc_stream_state {
+ int phy_pix_clk;
+ enum signal_type signal;
+ bool dpms_off;
++ bool apply_edp_fast_boot_optimization;
+
+ struct dc_stream_status status;
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 2fa8d04..d1add1b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -1557,32 +1557,40 @@ static struct dc_link *get_link_for_edp_not_in_use(
+ */
+ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
+ {
++ int i;
+ struct dc_link *edp_link_to_turnoff = NULL;
+ struct dc_link *edp_link = get_link_for_edp(dc);
+- bool can_eDP_fast_boot_optimize = false;
++ bool can_edp_fast_boot_optimize = false;
++ bool apply_edp_fast_boot_optimization = false;
+
+ if (edp_link) {
+ /* this seems to cause blank screens on DCE8 */
+ if ((dc->ctx->dce_version == DCE_VERSION_8_0) ||
+ (dc->ctx->dce_version == DCE_VERSION_8_1) ||
+ (dc->ctx->dce_version == DCE_VERSION_8_3))
+- can_eDP_fast_boot_optimize = false;
++ can_edp_fast_boot_optimize = false;
+ else
+- can_eDP_fast_boot_optimize =
++ can_edp_fast_boot_optimize =
+ edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc);
+ }
+
+- if (can_eDP_fast_boot_optimize) {
++ if (can_edp_fast_boot_optimize)
+ edp_link_to_turnoff = get_link_for_edp_not_in_use(dc, context);
+
+- /* if OS doesn't light up eDP and eDP link is available, we want to disable
+- * If resume from S4/S5, should optimization.
+- */
+- if (!edp_link_to_turnoff)
+- dc->apply_edp_fast_boot_optimization = true;
++ /* if OS doesn't light up eDP and eDP link is available, we want to disable
++ * If resume from S4/S5, should optimization.
++ */
++ if (can_edp_fast_boot_optimize && !edp_link_to_turnoff) {
++ /* Find eDP stream and set optimization flag */
++ for (i = 0; i < context->stream_count; i++) {
++ if (context->streams[i]->signal == SIGNAL_TYPE_EDP) {
++ context->streams[i]->apply_edp_fast_boot_optimization = true;
++ apply_edp_fast_boot_optimization = true;
++ }
++ }
+ }
+
+- if (!dc->apply_edp_fast_boot_optimization) {
++ if (!apply_edp_fast_boot_optimization) {
+ if (edp_link_to_turnoff) {
+ /*turn off backlight before DP_blank and encoder powered down*/
+ dc->hwss.edp_backlight_control(edp_link_to_turnoff, false);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5183-drm-amd-display-implement-DPMS-DTN-test-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5183-drm-amd-display-implement-DPMS-DTN-test-v2.patch
new file mode 100644
index 00000000..45f203c4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5183-drm-amd-display-implement-DPMS-DTN-test-v2.patch
@@ -0,0 +1,607 @@
+From ab4b9705875dbf55351ceae98890bf597c0a97e3 Mon Sep 17 00:00:00 2001
+From: Jun Lei <Jun.Lei@amd.com>
+Date: Wed, 8 Aug 2018 11:53:39 -0400
+Subject: [PATCH 5183/5725] drm/amd/display: implement DPMS DTN test v2
+
+[why]
+Existing DTN infrastructure in driver is hacky. It uses implicit log
+names, and also incorrect escape ID.
+
+[how]
+- Implement using generic DTN escape ID.
+- Move file logging functionality from driver to to script; driver now outputs to string/buffer
+- Move HWSS debug functionality to separate c file
+- Add debug functionalty for per-block logging as CSV
+- Add pretty print in python
+
+Signed-off-by: Jun Lei <Jun.Lei@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/Makefile | 2 +-
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 2 +-
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h | 5 +
+ .../display/dc/dcn10/dcn10_hw_sequencer_debug.c | 510 +++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 1 +
+ 5 files changed, 518 insertions(+), 2 deletions(-)
+ create mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+index c08137e..9c4b93a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+@@ -1,7 +1,7 @@
+ #
+ # Makefile for DCN.
+
+-DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
++DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o dcn10_hw_sequencer_debug.o \
+ dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
+ dcn10_hubp.o dcn10_mpc.o \
+ dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 4b8bedb..051f427 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -71,7 +71,6 @@ void print_microsec(struct dc_context *dc_ctx, uint32_t ref_cycle)
+ us_x10 % frac);
+ }
+
+-
+ static void log_mpc_crc(struct dc *dc)
+ {
+ struct dc_context *dc_ctx = dc->ctx;
+@@ -2712,6 +2711,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
+ .setup_stereo = dcn10_setup_stereo,
+ .set_avmute = dce110_set_avmute,
+ .log_hw_state = dcn10_log_hw_state,
++ .get_hw_state = dcn10_get_hw_state,
+ .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .ready_shared_resources = ready_shared_resources,
+ .optimize_shared_resources = optimize_shared_resources,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+index 7139fb7..84d461e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+@@ -46,4 +46,9 @@ void dcn10_program_pipe(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+
++void dcn10_get_hw_state(
++ struct dc *dc,
++ char *pBuf, unsigned int bufSize,
++ unsigned int mask);
++
+ #endif /* __DC_HWSS_DCN10_H__ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+new file mode 100644
+index 0000000..9288b00
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+@@ -0,0 +1,510 @@
++/*
++ * Copyright 2016 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#include "dm_services.h"
++#include "core_types.h"
++#include "resource.h"
++#include "custom_float.h"
++#include "dcn10_hw_sequencer.h"
++#include "dce110/dce110_hw_sequencer.h"
++#include "dce/dce_hwseq.h"
++#include "abm.h"
++#include "dmcu.h"
++#include "dcn10_optc.h"
++#include "dcn10/dcn10_dpp.h"
++#include "dcn10/dcn10_mpc.h"
++#include "timing_generator.h"
++#include "opp.h"
++#include "ipp.h"
++#include "mpc.h"
++#include "reg_helper.h"
++#include "custom_float.h"
++#include "dcn10_hubp.h"
++#include "dcn10_hubbub.h"
++#include "dcn10_cm_common.h"
++
++static unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...)
++{
++ unsigned int ret_vsnprintf;
++ unsigned int chars_printed;
++
++ va_list args;
++ va_start(args, fmt);
++
++ ret_vsnprintf = vsnprintf(pBuf, bufSize, fmt, args);
++
++ va_end(args);
++
++ if (ret_vsnprintf > 0) {
++ if (ret_vsnprintf < bufSize)
++ chars_printed = ret_vsnprintf;
++ else
++ chars_printed = bufSize - 1;
++ } else
++ chars_printed = 0;
++
++ return chars_printed;
++}
++
++static unsigned int dcn10_get_hubbub_state(struct dc *dc, char *pBuf, unsigned int bufSize)
++{
++ struct dc_context *dc_ctx = dc->ctx;
++ struct dcn_hubbub_wm wm = {0};
++ int i;
++
++ unsigned int chars_printed = 0;
++ unsigned int remaining_buffer = bufSize;
++
++ const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000;
++ static const unsigned int frac = 1000;
++
++ hubbub1_wm_read_state(dc->res_pool->hubbub, &wm);
++
++ chars_printed = snprintf_count(pBuf, remaining_buffer, "wm_set_index,data_urgent,pte_meta_urgent,sr_enter,sr_exit,dram_clk_chanage\n");
++ remaining_buffer -= chars_printed;
++ pBuf += chars_printed;
++
++ for (i = 0; i < 4; i++) {
++ struct dcn_hubbub_wm_set *s;
++
++ s = &wm.sets[i];
++
++ chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%d.%03d,%d.%03d,%d.%03d,%d.%03d,%d.%03d\n",
++ s->wm_set,
++ (s->data_urgent * frac) / ref_clk_mhz / frac, (s->data_urgent * frac) / ref_clk_mhz % frac,
++ (s->pte_meta_urgent * frac) / ref_clk_mhz / frac, (s->pte_meta_urgent * frac) / ref_clk_mhz % frac,
++ (s->sr_enter * frac) / ref_clk_mhz / frac, (s->sr_enter * frac) / ref_clk_mhz % frac,
++ (s->sr_exit * frac) / ref_clk_mhz / frac, (s->sr_exit * frac) / ref_clk_mhz % frac,
++ (s->dram_clk_chanage * frac) / ref_clk_mhz / frac, (s->dram_clk_chanage * frac) / ref_clk_mhz % frac);
++ remaining_buffer -= chars_printed;
++ pBuf += chars_printed;
++ }
++
++ return bufSize - remaining_buffer;
++}
++
++static unsigned int dcn10_get_hubp_states(struct dc *dc, char *pBuf, unsigned int bufSize)
++{
++ struct dc_context *dc_ctx = dc->ctx;
++ struct resource_pool *pool = dc->res_pool;
++ int i;
++
++ unsigned int chars_printed = 0;
++ unsigned int remaining_buffer = bufSize;
++
++ const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000;
++ static const unsigned int frac = 1000;
++
++ chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,format,addr_hi,width,height,rotation,mirror,sw_mode,dcc_en,blank_en,ttu_dis,underflow,"
++ "min_ttu_vblank,qos_low_wm,qos_high_wm"
++ "\n");
++ remaining_buffer -= chars_printed;
++ pBuf += chars_printed;
++
++ for (i = 0; i < pool->pipe_count; i++) {
++ struct hubp *hubp = pool->hubps[i];
++ struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
++
++ hubp->funcs->hubp_read_state(hubp);
++
++ if (!s->blank_en) {
++ chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,%x,%d,%d,%x,%x,%x,%x,%x,%x,%x,"
++ "%d.%03d,%d.%03d,%d.%03d"
++ "\n",
++ hubp->inst,
++ s->pixel_format,
++ s->inuse_addr_hi,
++ s->viewport_width,
++ s->viewport_height,
++ s->rotation_angle,
++ s->h_mirror_en,
++ s->sw_mode,
++ s->dcc_en,
++ s->blank_en,
++ s->ttu_disable,
++ s->underflow_status,
++ (s->min_ttu_vblank * frac) / ref_clk_mhz / frac, (s->min_ttu_vblank * frac) / ref_clk_mhz % frac,
++ (s->qos_level_low_wm * frac) / ref_clk_mhz / frac, (s->qos_level_low_wm * frac) / ref_clk_mhz % frac,
++ (s->qos_level_high_wm * frac) / ref_clk_mhz / frac, (s->qos_level_high_wm * frac) / ref_clk_mhz % frac);
++
++ remaining_buffer -= chars_printed;
++ pBuf += chars_printed;
++ }
++ }
++
++ return bufSize - remaining_buffer;
++}
++
++static unsigned int dcn10_get_rq_states(struct dc *dc, char *pBuf, unsigned int bufSize)
++{
++ struct resource_pool *pool = dc->res_pool;
++ int i;
++
++ unsigned int chars_printed = 0;
++ unsigned int remaining_buffer = bufSize;
++
++ chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,drq_exp_m,prq_exp_m,mrq_exp_m,crq_exp_m,plane1_ba,"
++ "luma_chunk_s,luma_min_chu_s,luma_meta_ch_s,luma_min_m_c_s,luma_dpte_gr_s,luma_mpte_gr_s,luma_swath_hei,luma_pte_row_h,"
++ "chroma_chunk_s,chroma_min_chu_s,chroma_meta_ch_s,chroma_min_m_c_s,chroma_dpte_gr_s,chroma_mpte_gr_s,chroma_swath_hei,chroma_pte_row_h"
++ "\n");
++ remaining_buffer -= chars_printed;
++ pBuf += chars_printed;
++
++ for (i = 0; i < pool->pipe_count; i++) {
++ struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
++ struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
++
++ if (!s->blank_en) {
++ chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,%x,%x,%x,%x,"
++ "%x,%x,%x,%x,%x,%x,%x,%x,"
++ "%x,%x,%x,%x,%x,%x,%x,%x"
++ "\n",
++ pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
++ rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
++ rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
++ rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
++ rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
++ rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
++ rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
++ rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
++ rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
++
++ remaining_buffer -= chars_printed;
++ pBuf += chars_printed;
++ }
++ }
++
++ return bufSize - remaining_buffer;
++}
++
++static unsigned int dcn10_get_dlg_states(struct dc *dc, char *pBuf, unsigned int bufSize)
++{
++ struct resource_pool *pool = dc->res_pool;
++ int i;
++
++ unsigned int chars_printed = 0;
++ unsigned int remaining_buffer = bufSize;
++
++ chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,rc_hbe,dlg_vbe,min_d_y_n,rc_per_ht,rc_x_a_s,"
++ "dst_y_a_s,dst_y_pf,dst_y_vvb,dst_y_rvb,dst_y_vfl,dst_y_rfl,rf_pix_fq,"
++ "vratio_pf,vrat_pf_c,rc_pg_vbl,rc_pg_vbc,rc_mc_vbl,rc_mc_vbc,rc_pg_fll,"
++ "rc_pg_flc,rc_mc_fll,rc_mc_flc,pr_nom_l,pr_nom_c,rc_pg_nl,rc_pg_nc,"
++ "mr_nom_l,mr_nom_c,rc_mc_nl,rc_mc_nc,rc_ld_pl,rc_ld_pc,rc_ld_l,"
++ "rc_ld_c,cha_cur0,ofst_cur1,cha_cur1,vr_af_vc0,ddrq_limt,x_rt_dlay,x_rp_dlay,x_rr_sfl"
++ "\n");
++ remaining_buffer -= chars_printed;
++ pBuf += chars_printed;
++
++ for (i = 0; i < pool->pipe_count; i++) {
++ struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
++ struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
++
++ if (!s->blank_en) {
++ chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,%x,%x,%x,"
++ "%x,%x,%x,%x,%x,%x,%x,"
++ "%x,%x,%x,%x,%x,%x,%x,"
++ "%x,%x,%x,%x,%x,%x,%x,"
++ "%x,%x,%x,%x,%x,%x,%x,"
++ "%x,%x,%x,%x,%x,%x,%x,%x,%x,%x"
++ "\n",
++ pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
++ dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
++ dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
++ dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
++ dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
++ dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
++ dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
++ dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
++ dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
++ dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
++ dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
++ dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
++ dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
++ dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
++ dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
++ dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
++ dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
++ dlg_regs->xfc_reg_remote_surface_flip_latency);
++
++ remaining_buffer -= chars_printed;
++ pBuf += chars_printed;
++ }
++ }
++
++ return bufSize - remaining_buffer;
++}
++
++static unsigned int dcn10_get_ttu_states(struct dc *dc, char *pBuf, unsigned int bufSize)
++{
++ struct resource_pool *pool = dc->res_pool;
++ int i;
++
++ unsigned int chars_printed = 0;
++ unsigned int remaining_buffer = bufSize;
++
++ chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,qos_ll_wm,qos_lh_wm,mn_ttu_vb,qos_l_flp,rc_rd_p_l,rc_rd_l,rc_rd_p_c,"
++ "rc_rd_c,rc_rd_c0,rc_rd_pc0,rc_rd_c1,rc_rd_pc1,qos_lf_l,qos_rds_l,"
++ "qos_lf_c,qos_rds_c,qos_lf_c0,qos_rds_c0,qos_lf_c1,qos_rds_c1"
++ "\n");
++ remaining_buffer -= chars_printed;
++ pBuf += chars_printed;
++
++ for (i = 0; i < pool->pipe_count; i++) {
++ struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
++ struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
++
++ if (!s->blank_en) {
++ chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,%x,%x,%x,%x,%x,%x,"
++ "%x,%x,%x,%x,%x,%x,%x,"
++ "%x,%x,%x,%x,%x,%x"
++ "\n",
++ pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
++ ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
++ ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
++ ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
++ ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
++ ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
++ ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
++
++ remaining_buffer -= chars_printed;
++ pBuf += chars_printed;
++ }
++ }
++
++ return bufSize - remaining_buffer;
++}
++
++static unsigned int dcn10_get_cm_states(struct dc *dc, char *pBuf, unsigned int bufSize)
++{
++ struct resource_pool *pool = dc->res_pool;
++ int i;
++
++ unsigned int chars_printed = 0;
++ unsigned int remaining_buffer = bufSize;
++
++ chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,igam_format,igam_mode,dgam_mode,rgam_mode,gamut_mode,"
++ "c11_c12,c13_c14,c21_c22,c23_c24,c31_c32,c33_c34"
++ "\n");
++ remaining_buffer -= chars_printed;
++ pBuf += chars_printed;
++
++ for (i = 0; i < pool->pipe_count; i++) {
++ struct dpp *dpp = pool->dpps[i];
++ struct dcn_dpp_state s = {0};
++
++ dpp->funcs->dpp_read_state(dpp, &s);
++
++ if (s.is_enabled) {
++ chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,%x,%x,%x,%x,"
++ "%08x,%08x,%08x,%08x,%08x,%08x"
++ "\n",
++ dpp->inst, s.igam_input_format, s.igam_lut_mode, s.dgam_lut_mode,
++ s.rgam_lut_mode, s.gamut_remap_mode, s.gamut_remap_c11_c12,
++ s.gamut_remap_c13_c14, s.gamut_remap_c21_c22, s.gamut_remap_c23_c24,
++ s.gamut_remap_c31_c32, s.gamut_remap_c33_c34);
++
++ remaining_buffer -= chars_printed;
++ pBuf += chars_printed;
++ }
++ }
++
++ return bufSize - remaining_buffer;
++}
++
++static unsigned int dcn10_get_mpcc_states(struct dc *dc, char *pBuf, unsigned int bufSize)
++{
++ struct resource_pool *pool = dc->res_pool;
++ int i;
++
++ unsigned int chars_printed = 0;
++ unsigned int remaining_buffer = bufSize;
++
++ chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,opp,dpp,mpccbot,mode,alpha_mode,premult,overlap_only,idle\n");
++ remaining_buffer -= chars_printed;
++ pBuf += chars_printed;
++
++ for (i = 0; i < pool->pipe_count; i++) {
++ struct mpcc_state s = {0};
++
++ pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
++
++ if (s.opp_id != 0xf) {
++ chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,%x,%x,%x,%x,%x,%x,%x\n",
++ i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
++ s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
++ s.idle);
++
++ remaining_buffer -= chars_printed;
++ pBuf += chars_printed;
++ }
++ }
++
++ return bufSize - remaining_buffer;
++}
++
++static unsigned int dcn10_get_otg_states(struct dc *dc, char *pBuf, unsigned int bufSize)
++{
++ struct resource_pool *pool = dc->res_pool;
++ int i;
++
++ unsigned int chars_printed = 0;
++ unsigned int remaining_buffer = bufSize;
++
++ chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,v_bs,v_be,v_ss,v_se,vpol,vmax,vmin,vmax_sel,vmin_sel,"
++ "h_bs,h_be,h_ss,h_se,hpol,htot,vtot,underflow\n");
++ remaining_buffer -= chars_printed;
++ pBuf += chars_printed;
++
++ for (i = 0; i < pool->timing_generator_count; i++) {
++ struct timing_generator *tg = pool->timing_generators[i];
++ struct dcn_otg_state s = {0};
++
++ optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
++
++ //only print if OTG master is enabled
++ if (s.otg_enabled & 1) {
++ chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%d,%d,%d,%d,%d,%d,%d,%d,%d,"
++ "%d,%d,%d,%d,%d,%d,%d,%d"
++ "\n",
++ tg->inst,
++ s.v_blank_start,
++ s.v_blank_end,
++ s.v_sync_a_start,
++ s.v_sync_a_end,
++ s.v_sync_a_pol,
++ s.v_total_max,
++ s.v_total_min,
++ s.v_total_max_sel,
++ s.v_total_min_sel,
++ s.h_blank_start,
++ s.h_blank_end,
++ s.h_sync_a_start,
++ s.h_sync_a_end,
++ s.h_sync_a_pol,
++ s.h_total,
++ s.v_total,
++ s.underflow_occurred_status);
++
++ remaining_buffer -= chars_printed;
++ pBuf += chars_printed;
++
++ // Clear underflow for debug purposes
++ // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
++ // This function is called only from Windows or Diags test environment, hence it's safe to clear
++ // it from here without affecting the original intent.
++ tg->funcs->clear_optc_underflow(tg);
++ }
++ }
++
++ return bufSize - remaining_buffer;
++}
++
++static unsigned int dcn10_get_clock_states(struct dc *dc, char *pBuf, unsigned int bufSize)
++{
++ unsigned int chars_printed = 0;
++
++ chars_printed = snprintf_count(pBuf, bufSize, "dcfclk_khz,dcfclk_deep_sleep_khz,dispclk_khz,"
++ "dppclk_khz,max_supported_dppclk_khz,fclk_khz,socclk_khz\n"
++ "%d,%d,%d,%d,%d,%d,%d\n",
++ dc->current_state->bw.dcn.clk.dcfclk_khz,
++ dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz,
++ dc->current_state->bw.dcn.clk.dispclk_khz,
++ dc->current_state->bw.dcn.clk.dppclk_khz,
++ dc->current_state->bw.dcn.clk.max_supported_dppclk_khz,
++ dc->current_state->bw.dcn.clk.fclk_khz,
++ dc->current_state->bw.dcn.clk.socclk_khz);
++
++ return chars_printed;
++}
++
++void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask)
++{
++ const unsigned int DC_HW_STATE_MASK_HUBBUB = 0x1;
++ const unsigned int DC_HW_STATE_MASK_HUBP = 0x2;
++ const unsigned int DC_HW_STATE_MASK_RQ = 0x4;
++ const unsigned int DC_HW_STATE_MASK_DLG = 0x8;
++ const unsigned int DC_HW_STATE_MASK_TTU = 0x10;
++ const unsigned int DC_HW_STATE_MASK_CM = 0x20;
++ const unsigned int DC_HW_STATE_MASK_MPCC = 0x40;
++ const unsigned int DC_HW_STATE_MASK_OTG = 0x80;
++ const unsigned int DC_HW_STATE_MASK_CLOCKS = 0x100;
++
++ unsigned int chars_printed = 0;
++ unsigned int remaining_buf_size = bufSize;
++
++ if (mask == 0x0)
++ mask = 0xFFFF;
++
++ if ((mask & DC_HW_STATE_MASK_HUBBUB) && remaining_buf_size > 0) {
++ chars_printed = dcn10_get_hubbub_state(dc, pBuf, remaining_buf_size);
++ pBuf += chars_printed;
++ remaining_buf_size -= chars_printed;
++ }
++
++ if ((mask & DC_HW_STATE_MASK_HUBP) && remaining_buf_size > 0) {
++ chars_printed = dcn10_get_hubp_states(dc, pBuf, remaining_buf_size);
++ pBuf += chars_printed;
++ remaining_buf_size -= chars_printed;
++ }
++
++ if ((mask & DC_HW_STATE_MASK_RQ) && remaining_buf_size > 0) {
++ chars_printed = dcn10_get_rq_states(dc, pBuf, remaining_buf_size);
++ pBuf += chars_printed;
++ remaining_buf_size -= chars_printed;
++ }
++
++ if ((mask & DC_HW_STATE_MASK_DLG) && remaining_buf_size > 0) {
++ chars_printed = dcn10_get_dlg_states(dc, pBuf, remaining_buf_size);
++ pBuf += chars_printed;
++ remaining_buf_size -= chars_printed;
++ }
++
++ if ((mask & DC_HW_STATE_MASK_TTU) && remaining_buf_size > 0) {
++ chars_printed = dcn10_get_ttu_states(dc, pBuf, remaining_buf_size);
++ pBuf += chars_printed;
++ remaining_buf_size -= chars_printed;
++ }
++
++ if ((mask & DC_HW_STATE_MASK_CM) && remaining_buf_size > 0) {
++ chars_printed = dcn10_get_cm_states(dc, pBuf, remaining_buf_size);
++ pBuf += chars_printed;
++ remaining_buf_size -= chars_printed;
++ }
++
++ if ((mask & DC_HW_STATE_MASK_MPCC) && remaining_buf_size > 0) {
++ chars_printed = dcn10_get_mpcc_states(dc, pBuf, remaining_buf_size);
++ pBuf += chars_printed;
++ remaining_buf_size -= chars_printed;
++ }
++
++ if ((mask & DC_HW_STATE_MASK_OTG) && remaining_buf_size > 0) {
++ chars_printed = dcn10_get_otg_states(dc, pBuf, remaining_buf_size);
++ pBuf += chars_printed;
++ remaining_buf_size -= chars_printed;
++ }
++
++ if ((mask & DC_HW_STATE_MASK_CLOCKS) && remaining_buf_size > 0)
++ chars_printed = dcn10_get_clock_states(dc, pBuf, remaining_buf_size);
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+index a14ce4d..9a97356 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+@@ -203,6 +203,7 @@ struct hw_sequencer_funcs {
+ void (*set_avmute)(struct pipe_ctx *pipe_ctx, bool enable);
+
+ void (*log_hw_state)(struct dc *dc);
++ void (*get_hw_state)(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask);
+
+ void (*wait_for_mpcc_disconnect)(struct dc *dc,
+ struct resource_pool *res_pool,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5184-drm-amdgpu-Remove-the-sriov-checking-and-add-firmwar.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5184-drm-amdgpu-Remove-the-sriov-checking-and-add-firmwar.patch
new file mode 100644
index 00000000..fbf29c59
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5184-drm-amdgpu-Remove-the-sriov-checking-and-add-firmwar.patch
@@ -0,0 +1,122 @@
+From 54c201258fcaba7753378e0770a46d6bb67e9b94 Mon Sep 17 00:00:00 2001
+From: Emily Deng <Emily.Deng@amd.com>
+Date: Fri, 17 Aug 2018 18:26:41 +0800
+Subject: [PATCH 5184/5725] drm/amdgpu: Remove the sriov checking and add
+ firmware checking
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Unify bare metal and sriov, and add firmware checking for
+reg write and reg wait unify command.
+
+Signed-off-by: Emily Deng <Emily.Deng@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Reviewed-and-Tested-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 2 ++
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 59 ++++++++++++++++++++++++++++++++-
+ 2 files changed, 60 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+index 53e9e2a..f172e92 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+@@ -274,6 +274,8 @@ struct amdgpu_gfx {
+ uint32_t rlc_srls_feature_version;
+ uint32_t mec_feature_version;
+ uint32_t mec2_feature_version;
++ bool mec_fw_write_wait;
++ bool me_fw_write_wait;
+ struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
+ unsigned num_gfx_rings;
+ struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 2b78ec4..7a95547 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -482,6 +482,59 @@ static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
+ le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
+ }
+
++static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
++{
++ adev->gfx.me_fw_write_wait = false;
++ adev->gfx.mec_fw_write_wait = false;
++
++ switch (adev->asic_type) {
++ case CHIP_VEGA10:
++ if ((adev->gfx.me_fw_version >= 0x0000009c) &&
++ (adev->gfx.me_feature_version >= 42) &&
++ (adev->gfx.pfp_fw_version >= 0x000000b1) &&
++ (adev->gfx.pfp_feature_version >= 42))
++ adev->gfx.me_fw_write_wait = true;
++
++ if ((adev->gfx.mec_fw_version >= 0x00000193) &&
++ (adev->gfx.mec_feature_version >= 42))
++ adev->gfx.mec_fw_write_wait = true;
++ break;
++ case CHIP_VEGA12:
++ if ((adev->gfx.me_fw_version >= 0x0000009c) &&
++ (adev->gfx.me_feature_version >= 44) &&
++ (adev->gfx.pfp_fw_version >= 0x000000b2) &&
++ (adev->gfx.pfp_feature_version >= 44))
++ adev->gfx.me_fw_write_wait = true;
++
++ if ((adev->gfx.mec_fw_version >= 0x00000196) &&
++ (adev->gfx.mec_feature_version >= 44))
++ adev->gfx.mec_fw_write_wait = true;
++ break;
++ case CHIP_VEGA20:
++ if ((adev->gfx.me_fw_version >= 0x0000009c) &&
++ (adev->gfx.me_feature_version >= 44) &&
++ (adev->gfx.pfp_fw_version >= 0x000000b2) &&
++ (adev->gfx.pfp_feature_version >= 44))
++ adev->gfx.me_fw_write_wait = true;
++
++ if ((adev->gfx.mec_fw_version >= 0x00000197) &&
++ (adev->gfx.mec_feature_version >= 44))
++ adev->gfx.mec_fw_write_wait = true;
++ break;
++ case CHIP_RAVEN:
++ if ((adev->gfx.me_fw_version >= 0x0000009c) &&
++ (adev->gfx.me_feature_version >= 42) &&
++ (adev->gfx.pfp_fw_version >= 0x000000b1) &&
++ (adev->gfx.pfp_feature_version >= 42))
++ adev->gfx.me_fw_write_wait = true;
++
++ if ((adev->gfx.mec_fw_version >= 0x00000192) &&
++ (adev->gfx.mec_feature_version >= 42))
++ adev->gfx.mec_fw_write_wait = true;
++ break;
++ }
++}
++
+ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
+ {
+ const char *chip_name;
+@@ -716,6 +769,7 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
+ }
+
+ out:
++ gfx_v9_0_check_fw_write_wait(adev);
+ if (err) {
+ dev_err(adev->dev,
+ "gfx9: Failed to load firmware \"%s\"\n",
+@@ -4366,8 +4420,11 @@ static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
+ uint32_t ref, uint32_t mask)
+ {
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
++ struct amdgpu_device *adev = ring->adev;
++ bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ?
++ adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait;
+
+- if (amdgpu_sriov_vf(ring->adev))
++ if (fw_version_ok)
+ gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
+ ref, mask, 0x20);
+ else
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5185-drm-amdgpu-use-kiq-to-do-invalidate-tlb.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5185-drm-amdgpu-use-kiq-to-do-invalidate-tlb.patch
new file mode 100644
index 00000000..f48fafec
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5185-drm-amdgpu-use-kiq-to-do-invalidate-tlb.patch
@@ -0,0 +1,171 @@
+From af65449ea823482ec1da97e04e7daf2414842292 Mon Sep 17 00:00:00 2001
+From: Emily Deng <Emily.Deng@amd.com>
+Date: Fri, 17 Aug 2018 18:25:36 +0800
+Subject: [PATCH 5185/5725] drm/amdgpu: use kiq to do invalidate tlb
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+To avoid the tlb flush not interrupted by world switch, use kiq and one
+command to do tlb invalidate.
+
+v2:
+Refine the invalidate lock position.
+
+Signed-off-by: Emily Deng <Emily.Deng@amd.com>
+Reviewed-and-Tested-by: Rex Zhu <Rex.Zhu@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 ++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 3 --
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 74 +++++++++++++++++++++++++++++---
+ 3 files changed, 71 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index ece78da..b11832a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -216,6 +216,10 @@ enum amdgpu_kiq_irq {
+ AMDGPU_CP_KIQ_IRQ_LAST
+ };
+
++#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
++#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
++#define MAX_KIQ_REG_TRY 20
++
+ int amdgpu_device_ip_set_clockgating_state(void *dev,
+ enum amd_ip_block_type block_type,
+ enum amd_clockgating_state state);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+index 21adb1b6..3885636 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+@@ -22,9 +22,6 @@
+ */
+
+ #include "amdgpu.h"
+-#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
+-#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
+-#define MAX_KIQ_REG_TRY 20
+
+ uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
+ {
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 8a5bccc..ff9868a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -310,6 +310,58 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
+ return req;
+ }
+
++signed long amdgpu_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
++ uint32_t reg0, uint32_t reg1,
++ uint32_t ref, uint32_t mask)
++{
++ signed long r, cnt = 0;
++ unsigned long flags;
++ uint32_t seq;
++ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
++ struct amdgpu_ring *ring = &kiq->ring;
++
++ if (!ring->ready)
++ return -EINVAL;
++
++ spin_lock_irqsave(&kiq->ring_lock, flags);
++
++ amdgpu_ring_alloc(ring, 32);
++ amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
++ ref, mask);
++ amdgpu_fence_emit_polling(ring, &seq);
++ amdgpu_ring_commit(ring);
++ spin_unlock_irqrestore(&kiq->ring_lock, flags);
++
++ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
++
++ /* don't wait anymore for gpu reset case because this way may
++ * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
++ * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
++ * never return if we keep waiting in virt_kiq_rreg, which cause
++ * gpu_recover() hang there.
++ *
++ * also don't wait anymore for IRQ context
++ * */
++ if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
++ goto failed_kiq;
++
++ might_sleep();
++
++ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
++ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
++ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
++ }
++
++ if (cnt > MAX_KIQ_REG_TRY)
++ goto failed_kiq;
++
++ return 0;
++
++failed_kiq:
++ pr_err("failed to invalidate tlb with kiq\n");
++ return r;
++}
++
+ /*
+ * GART
+ * VMID 0 is the physical GPU addresses as used by the kernel.
+@@ -331,13 +383,19 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
+ /* Use register 17 for GART */
+ const unsigned eng = 17;
+ unsigned i, j;
+-
+- spin_lock(&adev->gmc.invalidate_lock);
++ int r;
+
+ for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
+ struct amdgpu_vmhub *hub = &adev->vmhub[i];
+ u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
+
++ r = amdgpu_kiq_reg_write_reg_wait(adev, hub->vm_inv_eng0_req + eng,
++ hub->vm_inv_eng0_ack + eng, tmp, 1 << vmid);
++ if (!r)
++ continue;
++
++ spin_lock(&adev->gmc.invalidate_lock);
++
+ WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
+
+ /* Busy wait for ACK.*/
+@@ -348,8 +406,10 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
+ break;
+ cpu_relax();
+ }
+- if (j < 100)
++ if (j < 100) {
++ spin_unlock(&adev->gmc.invalidate_lock);
+ continue;
++ }
+
+ /* Wait for ACK with a delay.*/
+ for (j = 0; j < adev->usec_timeout; j++) {
+@@ -359,13 +419,13 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
+ break;
+ udelay(1);
+ }
+- if (j < adev->usec_timeout)
++ if (j < adev->usec_timeout) {
++ spin_unlock(&adev->gmc.invalidate_lock);
+ continue;
+-
++ }
++ spin_unlock(&adev->gmc.invalidate_lock);
+ DRM_ERROR("Timeout waiting for VM flush ACK!\n");
+ }
+-
+- spin_unlock(&adev->gmc.invalidate_lock);
+ }
+
+ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5186-drm-amdgpu-remove-fulll-access-for-suspend-phase1.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5186-drm-amdgpu-remove-fulll-access-for-suspend-phase1.patch
new file mode 100644
index 00000000..1fa08b9d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5186-drm-amdgpu-remove-fulll-access-for-suspend-phase1.patch
@@ -0,0 +1,43 @@
+From 8fdf3237b9fa11639eae655c1ffb7d9391f52f91 Mon Sep 17 00:00:00 2001
+From: Yintian Tao <yttao@amd.com>
+Date: Thu, 16 Aug 2018 16:17:57 +0800
+Subject: [PATCH 5186/5725] drm/amdgpu: remove fulll access for suspend phase1
+
+There is no need for gpu full access for suspend phase1
+because under virtualization there is no hw register access
+for dce block.
+
+Signed-off-by: Yintian Tao <yttao@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 ------
+ 1 file changed, 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index b5d0c9c..1acb85e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2023,9 +2023,6 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
+ {
+ int i, r;
+
+- if (amdgpu_sriov_vf(adev))
+- amdgpu_virt_request_full_gpu(adev, false);
+-
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+@@ -2041,9 +2038,6 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
+ }
+ }
+
+- if (amdgpu_sriov_vf(adev))
+- amdgpu_virt_release_full_gpu(adev, false);
+-
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5187-drm-amdgpu-Fix-compile-warning.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5187-drm-amdgpu-Fix-compile-warning.patch
new file mode 100644
index 00000000..0c79546b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5187-drm-amdgpu-Fix-compile-warning.patch
@@ -0,0 +1,36 @@
+From 9fde38ed94d85441a2aa4848d2d6b79103b17d07 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Mon, 20 Aug 2018 20:19:18 +0800
+Subject: [PATCH 5187/5725] drm/amdgpu: Fix compile warning
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+In function ‘gfx_v9_0_check_fw_write_wait’:
+warning: enumeration value ‘CHIP_TAHITI’ not handled in switch [-Wswitch]
+
+Always add default case in case there is no match
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 7a95547..29a7727 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -532,6 +532,8 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
+ (adev->gfx.mec_feature_version >= 42))
+ adev->gfx.mec_fw_write_wait = true;
+ break;
++ default:
++ break;
+ }
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5188-drm-amdgpu-fix-sdma-doorbell-range-setting.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5188-drm-amdgpu-fix-sdma-doorbell-range-setting.patch
new file mode 100644
index 00000000..ac85d682
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5188-drm-amdgpu-fix-sdma-doorbell-range-setting.patch
@@ -0,0 +1,31 @@
+From e170d935d5e91d9d5ad652b5125af623efcfbd62 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Tue, 21 Aug 2018 14:51:53 +0800
+Subject: [PATCH 5188/5725] drm/amdgpu: fix sdma doorbell range setting
+
+Use the old doorbell range setting until the driver is
+able to support more sdma queues.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+index 89ea920..2e65447 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+@@ -76,7 +76,7 @@ static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instan
+
+ if (use_doorbell) {
+ doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
+- doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 8);
++ doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 2);
+ } else
+ doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5189-drm-amdgpu-sriov-Only-sriov-runtime-support-use-kiq.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5189-drm-amdgpu-sriov-Only-sriov-runtime-support-use-kiq.patch
new file mode 100644
index 00000000..6ef48024
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5189-drm-amdgpu-sriov-Only-sriov-runtime-support-use-kiq.patch
@@ -0,0 +1,55 @@
+From 31e80a31227132bf2918e9df1fbafd84854bfbc2 Mon Sep 17 00:00:00 2001
+From: Emily Deng <Emily.Deng@amd.com>
+Date: Tue, 21 Aug 2018 18:51:38 +0800
+Subject: [PATCH 5189/5725] drm/amdgpu/sriov: Only sriov runtime support use
+ kiq
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+For sriov, don't use kiq in exclusive mode, as don't know how long time
+it will take, some times it will occur exclusive timeout.
+
+Signed-off-by: Emily Deng <Emily.Deng@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index ff9868a..4b54027 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -320,9 +320,6 @@ signed long amdgpu_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_ring *ring = &kiq->ring;
+
+- if (!ring->ready)
+- return -EINVAL;
+-
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ amdgpu_ring_alloc(ring, 32);
+@@ -389,10 +386,14 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
+ struct amdgpu_vmhub *hub = &adev->vmhub[i];
+ u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
+
+- r = amdgpu_kiq_reg_write_reg_wait(adev, hub->vm_inv_eng0_req + eng,
+- hub->vm_inv_eng0_ack + eng, tmp, 1 << vmid);
+- if (!r)
+- continue;
++ if (adev->gfx.kiq.ring.ready &&
++ (amdgpu_sriov_runtime(adev) ||
++ !amdgpu_sriov_vf(adev))) {
++ r = amdgpu_kiq_reg_write_reg_wait(adev, hub->vm_inv_eng0_req + eng,
++ hub->vm_inv_eng0_ack + eng, tmp, 1 << vmid);
++ if (!r)
++ continue;
++ }
+
+ spin_lock(&adev->gmc.invalidate_lock);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5190-drm-amd-display-fix-a-compile-warning.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5190-drm-amd-display-fix-a-compile-warning.patch
new file mode 100644
index 00000000..03ca1797
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5190-drm-amd-display-fix-a-compile-warning.patch
@@ -0,0 +1,44 @@
+From 211b7243418fc68f7574237637f7ba182911f5f3 Mon Sep 17 00:00:00 2001
+From: Wen Yang <wen.yang99@zte.com.cn>
+Date: Fri, 17 Aug 2018 11:09:48 +0800
+Subject: [PATCH 5190/5725] drm/amd/display: fix a compile warning
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Fix comile warning like,
+ CC [M] drivers/gpu/drm/i915/gvt/execlist.o
+ CC [M] drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.o
+ CC [M] drivers/gpu/drm/radeon/btc_dpm.o
+ CC [M] drivers/isdn/hisax/avm_a1p.o
+ CC [M] drivers/gpu/drm/amd/amdgpu/../display/dc/dcn10/dcn10_dpp.o
+drivers/gpu/drm/amd/amdgpu/../display/dc/dcn10/dcn10_hw_sequencer.c: In function ‘dcn10_update_mpcc’:
+drivers/gpu/drm/amd/amdgpu/../display/dc/dcn10/dcn10_hw_sequencer.c:1903:9: warning: missing braces around initializer [-Wmissing-braces]
+ struct mpcc_blnd_cfg blnd_cfg = {0};
+ ^
+drivers/gpu/drm/amd/amdgpu/../display/dc/dcn10/dcn10_hw_sequencer.c:1903:9: warning: (near initialization for ‘blnd_cfg.black_color’) [-Wmissing-braces]
+
+Acked-by: Randy Dunlap <rdunlap@infradead.org>
+Signed-off-by: Wen Yang <wen.yang99@zte.com.cn>
+Reviewed-by: Jiang Biao <jiang.biao2@zte.com.cn>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 051f427..1c5bb14 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -1917,7 +1917,7 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
+ static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
+ {
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+- struct mpcc_blnd_cfg blnd_cfg = {0};
++ struct mpcc_blnd_cfg blnd_cfg = {{0}};
+ bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
+ int mpcc_id;
+ struct mpcc *new_mpcc;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5191-drm-amd-display-indent-an-if-statement.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5191-drm-amd-display-indent-an-if-statement.patch
new file mode 100644
index 00000000..540b4290
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5191-drm-amd-display-indent-an-if-statement.patch
@@ -0,0 +1,30 @@
+From 4b1a1bba2d8f48cc6ec1f65846b7bd832fdee9aa Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Tue, 14 Aug 2018 12:09:45 +0300
+Subject: [PATCH 5191/5725] drm/amd/display: indent an if statement
+
+The if statement isn't indented and it makes static checkers complain.
+
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 40fd969..3462ec1 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -348,7 +348,7 @@ static bool is_dp_and_hdmi_sharable(
+
+ if (stream1->clamping.c_depth != COLOR_DEPTH_888 ||
+ stream2->clamping.c_depth != COLOR_DEPTH_888)
+- return false;
++ return false;
+
+ return true;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5192-drm-amdgpu-Don-t-use-kiq-in-gpu-reset.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5192-drm-amdgpu-Don-t-use-kiq-in-gpu-reset.patch
new file mode 100644
index 00000000..209f133c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5192-drm-amdgpu-Don-t-use-kiq-in-gpu-reset.patch
@@ -0,0 +1,53 @@
+From 4aa26b2b8f57c5b5f6be33b40435aafbe21eaec0 Mon Sep 17 00:00:00 2001
+From: Emily Deng <Emily.Deng@amd.com>
+Date: Wed, 22 Aug 2018 20:32:23 +0800
+Subject: [PATCH 5192/5725] drm/amdgpu: Don't use kiq in gpu reset
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+When in gpu reset, don't use kiq, it will generate more TDR.
+
+Signed-off-by: Emily Deng <Emily.Deng@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>.
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 15 ++++-----------
+ 1 file changed, 4 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 4b54027..02366a5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -331,15 +331,8 @@ signed long amdgpu_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+- /* don't wait anymore for gpu reset case because this way may
+- * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+- * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+- * never return if we keep waiting in virt_kiq_rreg, which cause
+- * gpu_recover() hang there.
+- *
+- * also don't wait anymore for IRQ context
+- * */
+- if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
++ /* don't wait anymore for IRQ context */
++ if (r < 1 && in_interrupt())
+ goto failed_kiq;
+
+ might_sleep();
+@@ -387,8 +380,8 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
+ u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
+
+ if (adev->gfx.kiq.ring.ready &&
+- (amdgpu_sriov_runtime(adev) ||
+- !amdgpu_sriov_vf(adev))) {
++ (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
++ !adev->in_gpu_reset) {
+ r = amdgpu_kiq_reg_write_reg_wait(adev, hub->vm_inv_eng0_req + eng,
+ hub->vm_inv_eng0_ack + eng, tmp, 1 << vmid);
+ if (!r)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5193-drm-amdgpu-display-add-support-for-LVDS-v5.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5193-drm-amdgpu-display-add-support-for-LVDS-v5.patch
new file mode 100644
index 00000000..bc76ca47
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5193-drm-amdgpu-display-add-support-for-LVDS-v5.patch
@@ -0,0 +1,346 @@
+From b29e8e1e503c7f93a9815cf68973236532c49793 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 14 Aug 2018 14:53:52 -0500
+Subject: [PATCH 5193/5725] drm/amdgpu/display: add support for LVDS (v5)
+
+This adds support for LVDS displays.
+
+v2: add support for spread spectrum, sink detect
+v3: clean up enable_lvds_output
+v4: fix up link_detect
+v5: remove assert on 888 format
+
+Bug: https://bugs.freedesktop.org/show_bug.cgi?id=105880
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 45 ++++++++++++++++++++++
+ .../gpu/drm/amd/display/dc/dce/dce_clock_source.c | 10 +++++
+ .../gpu/drm/amd/display/dc/dce/dce_clock_source.h | 2 +
+ .../gpu/drm/amd/display/dc/dce/dce_link_encoder.c | 34 ++++++++++++++++
+ .../gpu/drm/amd/display/dc/dce/dce_link_encoder.h | 6 +++
+ .../drm/amd/display/dc/dce/dce_stream_encoder.c | 24 ++++++++++++
+ .../gpu/drm/amd/display/dc/inc/hw/link_encoder.h | 3 ++
+ .../gpu/drm/amd/display/dc/inc/hw/stream_encoder.h | 4 ++
+ drivers/gpu/drm/amd/display/include/signal_types.h | 5 +++
+ 10 files changed, 135 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 2704c7f..7dc0e7f 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3632,6 +3632,8 @@ static int to_drm_connector_type(enum signal_type st)
+ return DRM_MODE_CONNECTOR_HDMIA;
+ case SIGNAL_TYPE_EDP:
+ return DRM_MODE_CONNECTOR_eDP;
++ case SIGNAL_TYPE_LVDS:
++ return DRM_MODE_CONNECTOR_LVDS;
+ case SIGNAL_TYPE_RGB:
+ return DRM_MODE_CONNECTOR_VGA;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 42d0ce7..61cf4fe 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -203,6 +203,11 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
+ uint32_t is_hpd_high = 0;
+ struct gpio *hpd_pin;
+
++ if (link->connector_signal == SIGNAL_TYPE_LVDS) {
++ *type = dc_connection_single;
++ return true;
++ }
++
+ /* todo: may need to lock gpio access */
+ hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+ if (hpd_pin == NULL)
+@@ -616,6 +621,10 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+ link->local_sink)
+ return true;
+
++ if (link->connector_signal == SIGNAL_TYPE_LVDS &&
++ link->local_sink)
++ return true;
++
+ prev_sink = link->local_sink;
+ if (prev_sink != NULL) {
+ dc_sink_retain(prev_sink);
+@@ -649,6 +658,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+ break;
+ }
+
++ case SIGNAL_TYPE_LVDS: {
++ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
++ sink_caps.signal = SIGNAL_TYPE_LVDS;
++ break;
++ }
++
+ case SIGNAL_TYPE_EDP: {
+ detect_edp_sink_caps(link);
+ sink_caps.transaction_type =
+@@ -1087,6 +1102,9 @@ static bool construct(
+ dal_irq_get_rx_source(hpd_gpio);
+ }
+ break;
++ case CONNECTOR_ID_LVDS:
++ link->connector_signal = SIGNAL_TYPE_LVDS;
++ break;
+ default:
+ DC_LOG_WARNING("Unsupported Connector type:%d!\n", link->link_id.id);
+ goto create_fail;
+@@ -1920,6 +1938,24 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
+ dal_ddc_service_read_scdc_data(link->ddc);
+ }
+
++static void enable_link_lvds(struct pipe_ctx *pipe_ctx)
++{
++ struct dc_stream_state *stream = pipe_ctx->stream;
++ struct dc_link *link = stream->sink->link;
++
++ if (stream->phy_pix_clk == 0)
++ stream->phy_pix_clk = stream->timing.pix_clk_khz;
++
++ memset(&stream->sink->link->cur_link_settings, 0,
++ sizeof(struct dc_link_settings));
++
++ link->link_enc->funcs->enable_lvds_output(
++ link->link_enc,
++ pipe_ctx->clock_source->id,
++ stream->phy_pix_clk);
++
++}
++
+ /****************************enable_link***********************************/
+ static enum dc_status enable_link(
+ struct dc_state *state,
+@@ -1943,6 +1979,10 @@ static enum dc_status enable_link(
+ enable_link_hdmi(pipe_ctx);
+ status = DC_OK;
+ break;
++ case SIGNAL_TYPE_LVDS:
++ enable_link_lvds(pipe_ctx);
++ status = DC_OK;
++ break;
+ case SIGNAL_TYPE_VIRTUAL:
+ status = DC_OK;
+ break;
+@@ -2492,6 +2532,11 @@ void core_link_enable_stream(
+ (pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) ?
+ true : false);
+
++ if (dc_is_lvds_signal(pipe_ctx->stream->signal))
++ pipe_ctx->stream_res.stream_enc->funcs->lvds_set_stream_attribute(
++ pipe_ctx->stream_res.stream_enc,
++ &stream->timing);
++
+ resource_build_info_frame(pipe_ctx);
+ core_dc->hwss.update_info_frame(pipe_ctx);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+index c5069a10..217fab4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+@@ -75,6 +75,11 @@ static const struct spread_spectrum_data *get_ss_data_entry(
+ entrys_num = clk_src->hdmi_ss_params_cnt;
+ break;
+
++ case SIGNAL_TYPE_LVDS:
++ ss_parm = clk_src->lvds_ss_params;
++ entrys_num = clk_src->lvds_ss_params_cnt;
++ break;
++
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_EDP:
+@@ -1182,6 +1187,11 @@ static void ss_info_from_atombios_create(
+ AS_SIGNAL_TYPE_DVI,
+ &clk_src->dvi_ss_params,
+ &clk_src->dvi_ss_params_cnt);
++ get_ss_info_from_atombios(
++ clk_src,
++ AS_SIGNAL_TYPE_LVDS,
++ &clk_src->lvds_ss_params,
++ &clk_src->lvds_ss_params_cnt);
+ }
+
+ static bool calc_pll_max_vco_construct(
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
+index 801bb65..e1f20ed 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
+@@ -125,6 +125,8 @@ struct dce110_clk_src {
+ uint32_t hdmi_ss_params_cnt;
+ struct spread_spectrum_data *dvi_ss_params;
+ uint32_t dvi_ss_params_cnt;
++ struct spread_spectrum_data *lvds_ss_params;
++ uint32_t lvds_ss_params_cnt;
+
+ uint32_t ext_clk_khz;
+ uint32_t ref_freq_khz;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+index eff7d22..4942590 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+@@ -102,6 +102,7 @@ static const struct link_encoder_funcs dce110_lnk_enc_funcs = {
+ .enable_tmds_output = dce110_link_encoder_enable_tmds_output,
+ .enable_dp_output = dce110_link_encoder_enable_dp_output,
+ .enable_dp_mst_output = dce110_link_encoder_enable_dp_mst_output,
++ .enable_lvds_output = dce110_link_encoder_enable_lvds_output,
+ .disable_output = dce110_link_encoder_disable_output,
+ .dp_set_lane_settings = dce110_link_encoder_dp_set_lane_settings,
+ .dp_set_phy_pattern = dce110_link_encoder_dp_set_phy_pattern,
+@@ -814,6 +815,7 @@ bool dce110_link_encoder_validate_output_with_stream(
+ enc110, &stream->timing);
+ break;
+ case SIGNAL_TYPE_EDP:
++ case SIGNAL_TYPE_LVDS:
+ is_valid =
+ (stream->timing.
+ pixel_encoding == PIXEL_ENCODING_RGB) ? true : false;
+@@ -955,6 +957,38 @@ void dce110_link_encoder_enable_tmds_output(
+ }
+ }
+
++/* TODO: still need depth or just pass in adjusted pixel clock? */
++void dce110_link_encoder_enable_lvds_output(
++ struct link_encoder *enc,
++ enum clock_source_id clock_source,
++ uint32_t pixel_clock)
++{
++ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
++ struct bp_transmitter_control cntl = { 0 };
++ enum bp_result result;
++
++ /* Enable the PHY */
++ cntl.connector_obj_id = enc110->base.connector;
++ cntl.action = TRANSMITTER_CONTROL_ENABLE;
++ cntl.engine_id = enc->preferred_engine;
++ cntl.transmitter = enc110->base.transmitter;
++ cntl.pll_id = clock_source;
++ cntl.signal = SIGNAL_TYPE_LVDS;
++ cntl.lanes_number = 4;
++
++ cntl.hpd_sel = enc110->base.hpd_source;
++
++ cntl.pixel_clock = pixel_clock;
++
++ result = link_transmitter_control(enc110, &cntl);
++
++ if (result != BP_RESULT_OK) {
++ DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
++ __func__);
++ BREAK_TO_DEBUGGER();
++ }
++}
++
+ /* enables DP PHY output */
+ void dce110_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
+index 3470694..3c9368d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
+@@ -225,6 +225,12 @@ void dce110_link_encoder_enable_dp_mst_output(
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+
++/* enables LVDS PHY output */
++void dce110_link_encoder_enable_lvds_output(
++ struct link_encoder *enc,
++ enum clock_source_id clock_source,
++ uint32_t pixel_clock);
++
+ /* disable PHY output */
+ void dce110_link_encoder_disable_output(
+ struct link_encoder *enc,
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+index b139b40..d65cc8c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+@@ -674,6 +674,28 @@ static void dce110_stream_encoder_dvi_set_stream_attribute(
+ dce110_stream_encoder_set_stream_attribute_helper(enc110, crtc_timing);
+ }
+
++/* setup stream encoder in LVDS mode */
++static void dce110_stream_encoder_lvds_set_stream_attribute(
++ struct stream_encoder *enc,
++ struct dc_crtc_timing *crtc_timing)
++{
++ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
++ struct bp_encoder_control cntl = {0};
++
++ cntl.action = ENCODER_CONTROL_SETUP;
++ cntl.engine_id = enc110->base.id;
++ cntl.signal = SIGNAL_TYPE_LVDS;
++ cntl.enable_dp_audio = false;
++ cntl.pixel_clock = crtc_timing->pix_clk_khz;
++ cntl.lanes_number = LANE_COUNT_FOUR;
++
++ if (enc110->base.bp->funcs->encoder_control(
++ enc110->base.bp, &cntl) != BP_RESULT_OK)
++ return;
++
++ ASSERT(crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB);
++}
++
+ static void dce110_stream_encoder_set_mst_bandwidth(
+ struct stream_encoder *enc,
+ struct fixed31_32 avg_time_slots_per_mtp)
+@@ -1564,6 +1586,8 @@ static const struct stream_encoder_funcs dce110_str_enc_funcs = {
+ dce110_stream_encoder_hdmi_set_stream_attribute,
+ .dvi_set_stream_attribute =
+ dce110_stream_encoder_dvi_set_stream_attribute,
++ .lvds_set_stream_attribute =
++ dce110_stream_encoder_lvds_set_stream_attribute,
+ .set_mst_bandwidth =
+ dce110_stream_encoder_set_mst_bandwidth,
+ .update_hdmi_info_packets =
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+index cf6df2e..5881892 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+@@ -131,6 +131,9 @@ struct link_encoder_funcs {
+ void (*enable_dp_mst_output)(struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
++ void (*enable_lvds_output)(struct link_encoder *enc,
++ enum clock_source_id clock_source,
++ uint32_t pixel_clock);
+ void (*disable_output)(struct link_encoder *link_enc,
+ enum signal_type signal);
+ void (*dp_set_lane_settings)(struct link_encoder *enc,
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+index cfa7ec9..53a9b64 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+@@ -101,6 +101,10 @@ struct stream_encoder_funcs {
+ struct dc_crtc_timing *crtc_timing,
+ bool is_dual_link);
+
++ void (*lvds_set_stream_attribute)(
++ struct stream_encoder *enc,
++ struct dc_crtc_timing *crtc_timing);
++
+ void (*set_mst_bandwidth)(
+ struct stream_encoder *enc,
+ struct fixed31_32 avg_time_slots_per_mtp);
+diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h
+index 199c5db..03476b1 100644
+--- a/drivers/gpu/drm/amd/display/include/signal_types.h
++++ b/drivers/gpu/drm/amd/display/include/signal_types.h
+@@ -68,6 +68,11 @@ static inline bool dc_is_embedded_signal(enum signal_type signal)
+ return (signal == SIGNAL_TYPE_EDP || signal == SIGNAL_TYPE_LVDS);
+ }
+
++static inline bool dc_is_lvds_signal(enum signal_type signal)
++{
++ return (signal == SIGNAL_TYPE_LVDS);
++}
++
+ static inline bool dc_is_dvi_signal(enum signal_type signal)
+ {
+ switch (signal) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5194-drm-amdgpu-amdgpu_kiq_reg_write_reg_wait-can-be-stat.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5194-drm-amdgpu-amdgpu_kiq_reg_write_reg_wait-can-be-stat.patch
new file mode 100644
index 00000000..187ef2fc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5194-drm-amdgpu-amdgpu_kiq_reg_write_reg_wait-can-be-stat.patch
@@ -0,0 +1,30 @@
+From 1647630c611b5143b128a2319d0c61c1bb459ba5 Mon Sep 17 00:00:00 2001
+From: kbuild test robot <fengguang.wu@intel.com>
+Date: Wed, 22 Aug 2018 10:31:01 +0800
+Subject: [PATCH 5194/5725] drm/amdgpu: amdgpu_kiq_reg_write_reg_wait() can be
+ static
+
+Fixes: d790449835e6 ("drm/amdgpu: use kiq to do invalidate tlb")
+Reviewed-by: Emily Deng <Emily.Deng@amd.com>
+Signed-off-by: kbuild test robot <fengguang.wu@intel.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 02366a5..2a96c0a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -310,7 +310,7 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
+ return req;
+ }
+
+-signed long amdgpu_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
++static signed long amdgpu_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
+ uint32_t reg0, uint32_t reg1,
+ uint32_t ref, uint32_t mask)
+ {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5195-drm-amdgpu-cleanup-GPU-recovery-check-a-bit-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5195-drm-amdgpu-cleanup-GPU-recovery-check-a-bit-v2.patch
new file mode 100644
index 00000000..47bc9ef5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5195-drm-amdgpu-cleanup-GPU-recovery-check-a-bit-v2.patch
@@ -0,0 +1,193 @@
+From 2f5d61b081ee4f06981c1596a19ee53bcbbab2e0 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Tue, 21 Aug 2018 10:45:29 +0200
+Subject: [PATCH 5195/5725] drm/amdgpu: cleanup GPU recovery check a bit (v2)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Check if we should call the function instead of providing the forced
+flag.
+
+v2: rebase on KFD changes (Alex)
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Acked-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 ++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 3 ++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 38 ++++++++++++++++++++----------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 3 ++-
+ drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c | 3 ++-
+ 8 files changed, 38 insertions(+), 22 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index b11832a..91be1d4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1286,8 +1286,9 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
+ #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
+
+ /* Common functions */
++bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
+ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+- struct amdgpu_job* job, bool force);
++ struct amdgpu_job* job);
+ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
+ bool amdgpu_device_need_post(struct amdgpu_device *adev);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index 0adee23..599cb6f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -280,7 +280,8 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+- amdgpu_device_gpu_recover(adev, NULL, false);
++ if (amdgpu_device_should_recover_gpu(adev))
++ amdgpu_device_gpu_recover(adev, NULL);
+ }
+
+ u32 pool_to_domain(enum kgd_memory_pool p)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 1acb85e..67adfb4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3304,31 +3304,43 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
+ }
+
+ /**
++ * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
++ *
++ * @adev: amdgpu device pointer
++ *
++ * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
++ * a hung GPU.
++ */
++bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
++{
++ if (!amdgpu_device_ip_check_soft_reset(adev)) {
++ DRM_INFO("Timeout, but no hardware hang detected.\n");
++ return false;
++ }
++
++ if (amdgpu_gpu_recovery == 0 || (amdgpu_gpu_recovery == -1 &&
++ !amdgpu_sriov_vf(adev))) {
++ DRM_INFO("GPU recovery disabled.\n");
++ return false;
++ }
++
++ return true;
++}
++
++/**
+ * amdgpu_device_gpu_recover - reset the asic and recover scheduler
+ *
+ * @adev: amdgpu device pointer
+ * @job: which job trigger hang
+- * @force: forces reset regardless of amdgpu_gpu_recovery
+ *
+ * Attempt to reset the GPU if it has hung (all asics).
+ * Returns 0 for success or an error on failure.
+ */
+ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+- struct amdgpu_job *job, bool force)
++ struct amdgpu_job *job)
+ {
+ int i, r, resched;
+
+- if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
+- DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
+- return 0;
+- }
+-
+- if (!force && (amdgpu_gpu_recovery == 0 ||
+- (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))) {
+- DRM_INFO("GPU recovery disabled.\n");
+- return 0;
+- }
+-
+ dev_info(adev->dev, "GPU reset begin!\n");
+
+ mutex_lock(&adev->lock_reset);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+index 1ec9590..e7f6389 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+@@ -702,7 +702,7 @@ static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
+ struct amdgpu_device *adev = dev->dev_private;
+
+ seq_printf(m, "gpu recover\n");
+- amdgpu_device_gpu_recover(adev, NULL, true);
++ amdgpu_device_gpu_recover(adev, NULL);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+index da8eda8..2d29753 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+@@ -105,8 +105,8 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
+ struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
+ reset_work);
+
+- if (!amdgpu_sriov_vf(adev))
+- amdgpu_device_gpu_recover(adev, NULL, false);
++ if (!amdgpu_sriov_vf(adev) && amdgpu_device_should_recover_gpu(adev))
++ amdgpu_device_gpu_recover(adev, NULL);
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+index 1250aae..994b569 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+@@ -37,7 +37,8 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
+ job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
+ ring->fence_drv.sync_seq);
+
+- amdgpu_device_gpu_recover(ring->adev, job, false);
++ if (amdgpu_device_should_recover_gpu(ring->adev))
++ amdgpu_device_gpu_recover(ring->adev, job);
+ }
+
+ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
+diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+index 078f70f..8cbb465 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
++++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+@@ -266,8 +266,8 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
+ }
+
+ /* Trigger recovery for world switch failure if no TDR */
+- if (amdgpu_lockup_timeout == 0)
+- amdgpu_device_gpu_recover(adev, NULL, true);
++ if (amdgpu_device_should_recover_gpu(adev))
++ amdgpu_device_gpu_recover(adev, NULL);
+ }
+
+ static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+index 9fc1c37..842567b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
++++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+@@ -521,7 +521,8 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
+ }
+
+ /* Trigger recovery due to world switch failure */
+- amdgpu_device_gpu_recover(adev, NULL, false);
++ if (amdgpu_device_should_recover_gpu(adev))
++ amdgpu_device_gpu_recover(adev, NULL);
+ }
+
+ static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5196-drm-amdgpu-validate-the-VM-root-PD-from-the-VM-code.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5196-drm-amdgpu-validate-the-VM-root-PD-from-the-VM-code.patch
new file mode 100644
index 00000000..33c788c5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5196-drm-amdgpu-validate-the-VM-root-PD-from-the-VM-code.patch
@@ -0,0 +1,42 @@
+From 6d514bcb1053af1796748d2d9515318b7abec0a4 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Wed, 15 Aug 2018 19:10:40 +0200
+Subject: [PATCH 5196/5725] drm/amdgpu: validate the VM root PD from the VM
+ code
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Preparation for following changes. This validates the root PD twice,
+but the overhead of that should be minimal.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index deaef9f..bd12f4a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -291,11 +291,11 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
+ struct amdgpu_bo *bo = bo_base->bo;
+
+- if (bo->parent) {
+- r = validate(param, bo);
+- if (r)
+- break;
++ r = validate(param, bo);
++ if (r)
++ break;
+
++ if (bo->parent) {
+ spin_lock(&glob->lru_lock);
+ ttm_bo_move_to_lru_tail(&bo->tbo);
+ if (bo->shadow)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5197-drm-amdgpu-move-setting-the-GART-addr-into-TTM.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5197-drm-amdgpu-move-setting-the-GART-addr-into-TTM.patch
new file mode 100644
index 00000000..1b63ede8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5197-drm-amdgpu-move-setting-the-GART-addr-into-TTM.patch
@@ -0,0 +1,52 @@
+From ee84fcc04f1147bf6b0285b3bd778cb4a6e808a2 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Tue, 21 Aug 2018 16:47:01 +0200
+Subject: [PATCH 5197/5725] drm/amdgpu: move setting the GART addr into TTM
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Move setting the GART addr for window based copies into the TTM code who
+uses it.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 2 --
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 5 ++++-
+ 2 files changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+index 994b569..f72d959 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+@@ -83,8 +83,6 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
+ r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
+ if (r)
+ kfree(*job);
+- else
+- (*job)->vm_pd_addr = adev->gart.table_addr;
+
+ return r;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index b741827..f9233be 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -2247,7 +2247,10 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
+ if (r)
+ return r;
+
+- job->vm_needs_flush = vm_needs_flush;
++ if (vm_needs_flush) {
++ job->vm_pd_addr = adev->gart.table_addr;
++ job->vm_needs_flush = true;
++ }
+ if (resv) {
+ r = amdgpu_sync_resv(adev, &job->sync, resv,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5198-drm-amdgpu-rename-gart.robj-into-gart.bo.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5198-drm-amdgpu-rename-gart.robj-into-gart.bo.patch
new file mode 100644
index 00000000..940b51d9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5198-drm-amdgpu-rename-gart.robj-into-gart.bo.patch
@@ -0,0 +1,212 @@
+From 3272a1b6061148fffdb127c7869f6256f8cebc61 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Tue, 21 Aug 2018 17:07:47 +0200
+Subject: [PATCH 5198/5725] drm/amdgpu: rename gart.robj into gart.bo
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+sed -i "s/gart.robj/gart.bo/" drivers/gpu/drm/amd/amdgpu/*.c
+sed -i "s/gart.robj/gart.bo/" drivers/gpu/drm/amd/amdgpu/*.h
+
+Just cleaning up radeon leftovers.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 32 ++++++++++++++++----------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h | 2 +-
+ drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 4 ++--
+ 6 files changed, 25 insertions(+), 25 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+index a14379c..b2e0083 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+@@ -112,7 +112,7 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
+ {
+ int r;
+
+- if (adev->gart.robj == NULL) {
++ if (adev->gart.bo == NULL) {
+ struct amdgpu_bo_param bp;
+
+ memset(&bp, 0, sizeof(bp));
+@@ -123,7 +123,7 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ bp.type = ttm_bo_type_kernel;
+ bp.resv = NULL;
+- r = amdgpu_bo_create(adev, &bp, &adev->gart.robj);
++ r = amdgpu_bo_create(adev, &bp, &adev->gart.bo);
+ if (r) {
+ return r;
+ }
+@@ -145,19 +145,19 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
+ {
+ int r;
+
+- r = amdgpu_bo_reserve(adev->gart.robj, false);
++ r = amdgpu_bo_reserve(adev->gart.bo, false);
+ if (unlikely(r != 0))
+ return r;
+- r = amdgpu_bo_pin(adev->gart.robj, AMDGPU_GEM_DOMAIN_VRAM);
++ r = amdgpu_bo_pin(adev->gart.bo, AMDGPU_GEM_DOMAIN_VRAM);
+ if (r) {
+- amdgpu_bo_unreserve(adev->gart.robj);
++ amdgpu_bo_unreserve(adev->gart.bo);
+ return r;
+ }
+- r = amdgpu_bo_kmap(adev->gart.robj, &adev->gart.ptr);
++ r = amdgpu_bo_kmap(adev->gart.bo, &adev->gart.ptr);
+ if (r)
+- amdgpu_bo_unpin(adev->gart.robj);
+- amdgpu_bo_unreserve(adev->gart.robj);
+- adev->gart.table_addr = amdgpu_bo_gpu_offset(adev->gart.robj);
++ amdgpu_bo_unpin(adev->gart.bo);
++ amdgpu_bo_unreserve(adev->gart.bo);
++ adev->gart.table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
+ return r;
+ }
+
+@@ -173,14 +173,14 @@ void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
+ {
+ int r;
+
+- if (adev->gart.robj == NULL) {
++ if (adev->gart.bo == NULL) {
+ return;
+ }
+- r = amdgpu_bo_reserve(adev->gart.robj, true);
++ r = amdgpu_bo_reserve(adev->gart.bo, true);
+ if (likely(r == 0)) {
+- amdgpu_bo_kunmap(adev->gart.robj);
+- amdgpu_bo_unpin(adev->gart.robj);
+- amdgpu_bo_unreserve(adev->gart.robj);
++ amdgpu_bo_kunmap(adev->gart.bo);
++ amdgpu_bo_unpin(adev->gart.bo);
++ amdgpu_bo_unreserve(adev->gart.bo);
+ adev->gart.ptr = NULL;
+ }
+ }
+@@ -196,10 +196,10 @@ void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
+ */
+ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
+ {
+- if (adev->gart.robj == NULL) {
++ if (adev->gart.bo == NULL) {
+ return;
+ }
+- amdgpu_bo_unref(&adev->gart.robj);
++ amdgpu_bo_unref(&adev->gart.bo);
+ }
+
+ /*
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+index 456295c..35af864 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+@@ -39,7 +39,7 @@ struct amdgpu_bo;
+
+ struct amdgpu_gart {
+ u64 table_addr;
+- struct amdgpu_bo *robj;
++ struct amdgpu_bo *bo;
+ void *ptr;
+ unsigned num_gpu_pages;
+ unsigned num_cpu_pages;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+index a5077a9..a0f3be9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+@@ -496,7 +496,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
+ int r, i;
+ u32 field;
+
+- if (adev->gart.robj == NULL) {
++ if (adev->gart.bo == NULL) {
+ dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
+ return -EINVAL;
+ }
+@@ -587,7 +587,7 @@ static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
+ {
+ int r;
+
+- if (adev->gart.robj) {
++ if (adev->gart.bo) {
+ dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+index 46ce9f0..eadc5c2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+@@ -607,7 +607,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
+ int r, i;
+ u32 tmp, field;
+
+- if (adev->gart.robj == NULL) {
++ if (adev->gart.bo == NULL) {
+ dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
+ return -EINVAL;
+ }
+@@ -708,7 +708,7 @@ static int gmc_v7_0_gart_init(struct amdgpu_device *adev)
+ {
+ int r;
+
+- if (adev->gart.robj) {
++ if (adev->gart.bo) {
+ WARN(1, "R600 PCIE GART already initialized\n");
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index 82f19e7..18b86db 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -813,7 +813,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
+ int r, i;
+ u32 tmp, field;
+
+- if (adev->gart.robj == NULL) {
++ if (adev->gart.bo == NULL) {
+ dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
+ return -EINVAL;
+ }
+@@ -931,7 +931,7 @@ static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
+ {
+ int r;
+
+- if (adev->gart.robj) {
++ if (adev->gart.bo) {
+ WARN(1, "R600 PCIE GART already initialized\n");
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 2a96c0a..0c83829 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -836,7 +836,7 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
+ {
+ int r;
+
+- if (adev->gart.robj) {
++ if (adev->gart.bo) {
+ WARN(1, "VEGA10 PCIE GART already initialized\n");
+ return 0;
+ }
+@@ -1073,7 +1073,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
+ golden_settings_vega10_hdp,
+ ARRAY_SIZE(golden_settings_vega10_hdp));
+
+- if (adev->gart.robj == NULL) {
++ if (adev->gart.bo == NULL) {
+ dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
+ return -EINVAL;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5199-drm-amdgpu-remove-gart.table_addr.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5199-drm-amdgpu-remove-gart.table_addr.patch
new file mode 100644
index 00000000..11282bae
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5199-drm-amdgpu-remove-gart.table_addr.patch
@@ -0,0 +1,255 @@
+From 46940ee89ee802079dfe4ffb9998694e70f47d00 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Tue, 21 Aug 2018 17:18:22 +0200
+Subject: [PATCH 5199/5725] drm/amdgpu: remove gart.table_addr
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+We can easily figure out the address on the fly.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 1 -
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h | 1 -
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 7 +++----
+ drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 9 +++++----
+ drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 9 +++++----
+ drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 9 +++++----
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 7 +++----
+ 9 files changed, 24 insertions(+), 25 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+index b2e0083..5586874 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+@@ -157,7 +157,6 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
+ if (r)
+ amdgpu_bo_unpin(adev->gart.bo);
+ amdgpu_bo_unreserve(adev->gart.bo);
+- adev->gart.table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
+ return r;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+index 35af864..5ee8f20 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+@@ -38,7 +38,6 @@ struct amdgpu_bo;
+ #define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
+
+ struct amdgpu_gart {
+- u64 table_addr;
+ struct amdgpu_bo *bo;
+ void *ptr;
+ unsigned num_gpu_pages;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index f9233be..c263f18 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -2187,7 +2187,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
+ src_addr = num_dw * 4;
+ src_addr += job->ibs[0].gpu_addr;
+
+- dst_addr = adev->gart.table_addr;
++ dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
+ dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
+ amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
+ dst_addr, num_bytes);
+@@ -2248,7 +2248,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
+ return r;
+
+ if (vm_needs_flush) {
+- job->vm_pd_addr = adev->gart.table_addr;
++ job->vm_pd_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
+ job->vm_needs_flush = true;
+ }
+ if (resv) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+index acfbd2d..2baab7e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+@@ -37,11 +37,10 @@ u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev)
+
+ static void gfxhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
+ {
+- uint64_t value;
++ uint64_t value = amdgpu_bo_gpu_offset(adev->gart.bo);
+
+- BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
+- value = adev->gart.table_addr - adev->gmc.vram_start
+- + adev->vm_manager.vram_base_offset;
++ BUG_ON(value & (~0x0000FFFFFFFFF000ULL));
++ value -= adev->gmc.vram_start + adev->vm_manager.vram_base_offset;
+ value &= 0x0000FFFFFFFFF000ULL;
+ value |= 0x1; /*valid bit*/
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+index a0f3be9..ff045df 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+@@ -493,6 +493,7 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
+
+ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
+ {
++ uint64_t table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
+ int r, i;
+ u32 field;
+
+@@ -531,7 +532,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
+ /* setup context0 */
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
+- WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
++ WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
+ WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+ (u32)(adev->dummy_page_addr >> 12));
+ WREG32(mmVM_CONTEXT0_CNTL2, 0);
+@@ -555,10 +556,10 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
+ for (i = 1; i < 16; i++) {
+ if (i < 8)
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
+- adev->gart.table_addr >> 12);
++ table_addr >> 12);
+ else
+ WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
+- adev->gart.table_addr >> 12);
++ table_addr >> 12);
+ }
+
+ /* enable context1-15 */
+@@ -578,7 +579,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
+ gmc_v6_0_flush_gpu_tlb(adev, 0);
+ dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(adev->gmc.gart_size >> 20),
+- (unsigned long long)adev->gart.table_addr);
++ (unsigned long long)table_addr);
+ adev->gart.ready = true;
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+index eadc5c2..9276792 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+@@ -604,6 +604,7 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
+ */
+ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
+ {
++ uint64_t table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
+ int r, i;
+ u32 tmp, field;
+
+@@ -645,7 +646,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
+ /* setup context0 */
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
+- WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
++ WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
+ WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+ (u32)(adev->dummy_page_addr >> 12));
+ WREG32(mmVM_CONTEXT0_CNTL2, 0);
+@@ -669,10 +670,10 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
+ for (i = 1; i < 16; i++) {
+ if (i < 8)
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
+- adev->gart.table_addr >> 12);
++ table_addr >> 12);
+ else
+ WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
+- adev->gart.table_addr >> 12);
++ table_addr >> 12);
+ }
+
+ /* enable context1-15 */
+@@ -699,7 +700,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
+ gmc_v7_0_flush_gpu_tlb(adev, 0);
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(adev->gmc.gart_size >> 20),
+- (unsigned long long)adev->gart.table_addr);
++ (unsigned long long)table_addr);
+ adev->gart.ready = true;
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index 18b86db..5073b76 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -810,6 +810,7 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
+ */
+ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
+ {
++ uint64_t table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
+ int r, i;
+ u32 tmp, field;
+
+@@ -867,7 +868,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
+ /* setup context0 */
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
+- WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
++ WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
+ WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+ (u32)(adev->dummy_page_addr >> 12));
+ WREG32(mmVM_CONTEXT0_CNTL2, 0);
+@@ -891,10 +892,10 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
+ for (i = 1; i < 16; i++) {
+ if (i < 8)
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
+- adev->gart.table_addr >> 12);
++ table_addr >> 12);
+ else
+ WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
+- adev->gart.table_addr >> 12);
++ table_addr >> 12);
+ }
+
+ /* enable context1-15 */
+@@ -922,7 +923,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
+ gmc_v8_0_flush_gpu_tlb(adev, 0);
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(adev->gmc.gart_size >> 20),
+- (unsigned long long)adev->gart.table_addr);
++ (unsigned long long)table_addr);
+ adev->gart.ready = true;
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 0c83829..730a589 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -1117,7 +1117,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
+
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(adev->gmc.gart_size >> 20),
+- (unsigned long long)adev->gart.table_addr);
++ (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
+ adev->gart.ready = true;
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+index e70a0d4..800ec46 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+@@ -47,11 +47,10 @@ u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
+
+ static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
+ {
+- uint64_t value;
++ uint64_t value = amdgpu_bo_gpu_offset(adev->gart.bo);
+
+- BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
+- value = adev->gart.table_addr - adev->gmc.vram_start +
+- adev->vm_manager.vram_base_offset;
++ BUG_ON(value & (~0x0000FFFFFFFFF000ULL));
++ value -= adev->gmc.vram_start + adev->vm_manager.vram_base_offset;
+ value &= 0x0000FFFFFFFFF000ULL;
+ value |= 0x1; /* valid bit */
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5200-drm-amdgpu-set-correct-base-for-THM-NBIF-MP1-IP.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5200-drm-amdgpu-set-correct-base-for-THM-NBIF-MP1-IP.patch
new file mode 100644
index 00000000..21701eb9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5200-drm-amdgpu-set-correct-base-for-THM-NBIF-MP1-IP.patch
@@ -0,0 +1,39 @@
+From 3208f350fd2995ca2d7e4923b7d3883ebf3db7b3 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Fri, 17 Aug 2018 09:31:56 +0800
+Subject: [PATCH 5200/5725] drm/amdgpu: set correct base for THM/NBIF/MP1 IP
+
+Set correct address base for vega20.
+
+Change-Id: I7435980e2ca156ee2b443a97899d40aaba4876cb
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
+index 52778de..2d44735 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
++++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
+@@ -38,6 +38,7 @@ int vega20_reg_base_init(struct amdgpu_device *adev)
+ adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
+ adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
+ adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
++ adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
+ adev->reg_offset[UVD_HWIP][i] = (uint32_t *)(&(UVD_BASE.instance[i]));
+ adev->reg_offset[VCE_HWIP][i] = (uint32_t *)(&(VCE_BASE.instance[i]));
+ adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
+@@ -46,6 +47,8 @@ int vega20_reg_base_init(struct amdgpu_device *adev)
+ adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(SDMA0_BASE.instance[i]));
+ adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(SDMA1_BASE.instance[i]));
+ adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
++ adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
++ adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+ }
+ return 0;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5201-drm-amdgpu-Only-retrieve-GPU-address-of-GART-table-a.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5201-drm-amdgpu-Only-retrieve-GPU-address-of-GART-table-a.patch
new file mode 100644
index 00000000..4d260fb8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5201-drm-amdgpu-Only-retrieve-GPU-address-of-GART-table-a.patch
@@ -0,0 +1,94 @@
+From 58db932768acc6ef8a9e52d499d67024b80d54a2 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
+Date: Tue, 28 Aug 2018 11:26:17 +0200
+Subject: [PATCH 5201/5725] drm/amdgpu: Only retrieve GPU address of GART table
+ after pinning it
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Doing it earlier hits a WARN_ON_ONCE in amdgpu_bo_gpu_offset.
+
+Fixes: "drm/amdgpu: remove gart.table_addr"
+
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 5 ++++-
+ drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 5 ++++-
+ drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 5 ++++-
+ 3 files changed, 12 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+index ff045df..3ed83ae 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+@@ -493,7 +493,7 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
+
+ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
+ {
+- uint64_t table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
++ uint64_t table_addr;
+ int r, i;
+ u32 field;
+
+@@ -504,6 +504,9 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
+ r = amdgpu_gart_table_vram_pin(adev);
+ if (r)
+ return r;
++
++ table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
++
+ /* Setup TLB control */
+ WREG32(mmMC_VM_MX_L1_TLB_CNTL,
+ (0xA << 7) |
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+index 9276792..e3d6ff1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+@@ -604,7 +604,7 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
+ */
+ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
+ {
+- uint64_t table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
++ uint64_t table_addr;
+ int r, i;
+ u32 tmp, field;
+
+@@ -615,6 +615,9 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
+ r = amdgpu_gart_table_vram_pin(adev);
+ if (r)
+ return r;
++
++ table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
++
+ /* Setup TLB control */
+ tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index 5073b76..7006eb4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -810,7 +810,7 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
+ */
+ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
+ {
+- uint64_t table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
++ uint64_t table_addr;
+ int r, i;
+ u32 tmp, field;
+
+@@ -821,6 +821,9 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
+ r = amdgpu_gart_table_vram_pin(adev);
+ if (r)
+ return r;
++
++ table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
++
+ /* Setup TLB control */
+ tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5202-drm-amdgpu-switch-firmware-path-for-SI-parts.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5202-drm-amdgpu-switch-firmware-path-for-SI-parts.patch
new file mode 100644
index 00000000..9e9fa524
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5202-drm-amdgpu-switch-firmware-path-for-SI-parts.patch
@@ -0,0 +1,191 @@
+From cc1a7594f917608790a9ff02adc275dcc79baeb4 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Mon, 2 Jul 2018 14:35:36 -0500
+Subject: [PATCH 5202/5725] drm/amdgpu: switch firmware path for SI parts
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Use separate firmware path for amdgpu to avoid conflicts
+with radeon on SI parts.
+
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 56 +++++++++++++++++------------------
+ drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 14 ++++-----
+ drivers/gpu/drm/amd/amdgpu/si_dpm.c | 22 +++++++-------
+ 3 files changed, 46 insertions(+), 46 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+index 0005f70..4518021 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+@@ -44,30 +44,30 @@ static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
+ static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
+ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev);
+
+-MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
+-MODULE_FIRMWARE("radeon/tahiti_me.bin");
+-MODULE_FIRMWARE("radeon/tahiti_ce.bin");
+-MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
+-
+-MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
+-MODULE_FIRMWARE("radeon/pitcairn_me.bin");
+-MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
+-MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
+-
+-MODULE_FIRMWARE("radeon/verde_pfp.bin");
+-MODULE_FIRMWARE("radeon/verde_me.bin");
+-MODULE_FIRMWARE("radeon/verde_ce.bin");
+-MODULE_FIRMWARE("radeon/verde_rlc.bin");
+-
+-MODULE_FIRMWARE("radeon/oland_pfp.bin");
+-MODULE_FIRMWARE("radeon/oland_me.bin");
+-MODULE_FIRMWARE("radeon/oland_ce.bin");
+-MODULE_FIRMWARE("radeon/oland_rlc.bin");
+-
+-MODULE_FIRMWARE("radeon/hainan_pfp.bin");
+-MODULE_FIRMWARE("radeon/hainan_me.bin");
+-MODULE_FIRMWARE("radeon/hainan_ce.bin");
+-MODULE_FIRMWARE("radeon/hainan_rlc.bin");
++MODULE_FIRMWARE("amdgpu/tahiti_pfp.bin");
++MODULE_FIRMWARE("amdgpu/tahiti_me.bin");
++MODULE_FIRMWARE("amdgpu/tahiti_ce.bin");
++MODULE_FIRMWARE("amdgpu/tahiti_rlc.bin");
++
++MODULE_FIRMWARE("amdgpu/pitcairn_pfp.bin");
++MODULE_FIRMWARE("amdgpu/pitcairn_me.bin");
++MODULE_FIRMWARE("amdgpu/pitcairn_ce.bin");
++MODULE_FIRMWARE("amdgpu/pitcairn_rlc.bin");
++
++MODULE_FIRMWARE("amdgpu/verde_pfp.bin");
++MODULE_FIRMWARE("amdgpu/verde_me.bin");
++MODULE_FIRMWARE("amdgpu/verde_ce.bin");
++MODULE_FIRMWARE("amdgpu/verde_rlc.bin");
++
++MODULE_FIRMWARE("amdgpu/oland_pfp.bin");
++MODULE_FIRMWARE("amdgpu/oland_me.bin");
++MODULE_FIRMWARE("amdgpu/oland_ce.bin");
++MODULE_FIRMWARE("amdgpu/oland_rlc.bin");
++
++MODULE_FIRMWARE("amdgpu/hainan_pfp.bin");
++MODULE_FIRMWARE("amdgpu/hainan_me.bin");
++MODULE_FIRMWARE("amdgpu/hainan_ce.bin");
++MODULE_FIRMWARE("amdgpu/hainan_rlc.bin");
+
+ static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev);
+ static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
+@@ -335,7 +335,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
+ default: BUG();
+ }
+
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
+ err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+@@ -346,7 +346,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
+ adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
+ err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+@@ -357,7 +357,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
+ adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
+ err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+@@ -368,7 +368,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
+ adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
+ err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+index 3ed83ae..d13110f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+@@ -41,11 +41,11 @@ static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
+ static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
+ static int gmc_v6_0_wait_for_idle(void *handle);
+
+-MODULE_FIRMWARE("radeon/tahiti_mc.bin");
+-MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
+-MODULE_FIRMWARE("radeon/verde_mc.bin");
+-MODULE_FIRMWARE("radeon/oland_mc.bin");
+-MODULE_FIRMWARE("radeon/si58_mc.bin");
++MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
++MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
++MODULE_FIRMWARE("amdgpu/verde_mc.bin");
++MODULE_FIRMWARE("amdgpu/oland_mc.bin");
++MODULE_FIRMWARE("amdgpu/si58_mc.bin");
+
+ #define MC_SEQ_MISC0__MT__MASK 0xf0000000
+ #define MC_SEQ_MISC0__MT__GDDR1 0x10000000
+@@ -134,9 +134,9 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
+ is_58_fw = true;
+
+ if (is_58_fw)
+- snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/si58_mc.bin");
+ else
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
+ err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+index 996a4e5..a32f6f6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+@@ -56,16 +56,16 @@
+
+ #define BIOS_SCRATCH_4 0x5cd
+
+-MODULE_FIRMWARE("radeon/tahiti_smc.bin");
+-MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
+-MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
+-MODULE_FIRMWARE("radeon/verde_smc.bin");
+-MODULE_FIRMWARE("radeon/verde_k_smc.bin");
+-MODULE_FIRMWARE("radeon/oland_smc.bin");
+-MODULE_FIRMWARE("radeon/oland_k_smc.bin");
+-MODULE_FIRMWARE("radeon/hainan_smc.bin");
+-MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
+-MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
++MODULE_FIRMWARE("amdgpu/tahiti_smc.bin");
++MODULE_FIRMWARE("amdgpu/pitcairn_smc.bin");
++MODULE_FIRMWARE("amdgpu/pitcairn_k_smc.bin");
++MODULE_FIRMWARE("amdgpu/verde_smc.bin");
++MODULE_FIRMWARE("amdgpu/verde_k_smc.bin");
++MODULE_FIRMWARE("amdgpu/oland_smc.bin");
++MODULE_FIRMWARE("amdgpu/oland_k_smc.bin");
++MODULE_FIRMWARE("amdgpu/hainan_smc.bin");
++MODULE_FIRMWARE("amdgpu/hainan_k_smc.bin");
++MODULE_FIRMWARE("amdgpu/banks_k_2_smc.bin");
+
+ static const struct amd_pm_funcs si_dpm_funcs;
+
+@@ -7663,7 +7663,7 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
+ default: BUG();
+ }
+
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
+ err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5203-drm-amdgpu-switch-firmware-path-for-CIK-parts-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5203-drm-amdgpu-switch-firmware-path-for-CIK-parts-v2.patch
new file mode 100644
index 00000000..64b2e22a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5203-drm-amdgpu-switch-firmware-path-for-CIK-parts-v2.patch
@@ -0,0 +1,320 @@
+From 2056411d82c4ab5f03516b1163aa382b53b9fc2d Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Mon, 2 Jul 2018 14:32:28 -0500
+Subject: [PATCH 5203/5725] drm/amdgpu: switch firmware path for CIK parts (v2)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Use separate firmware path for amdgpu to avoid conflicts
+with radeon on CIK parts.
+
+v2: squash in logic simplification (Alex)
+
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | 8 ++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 10 ++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 10 ++---
+ drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 10 ++---
+ drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 24 +++++------
+ drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 72 ++++++++++++++++-----------------
+ drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 9 ++---
+ 7 files changed, 70 insertions(+), 73 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+index e950730..693ec5e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+@@ -314,17 +314,17 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
+ (adev->pdev->revision == 0x81) ||
+ (adev->pdev->device == 0x665f)) {
+ info->is_kicker = true;
+- strcpy(fw_name, "radeon/bonaire_k_smc.bin");
++ strcpy(fw_name, "amdgpu/bonaire_k_smc.bin");
+ } else {
+- strcpy(fw_name, "radeon/bonaire_smc.bin");
++ strcpy(fw_name, "amdgpu/bonaire_smc.bin");
+ }
+ break;
+ case CHIP_HAWAII:
+ if (adev->pdev->revision == 0x80) {
+ info->is_kicker = true;
+- strcpy(fw_name, "radeon/hawaii_k_smc.bin");
++ strcpy(fw_name, "amdgpu/hawaii_k_smc.bin");
+ } else {
+- strcpy(fw_name, "radeon/hawaii_smc.bin");
++ strcpy(fw_name, "amdgpu/hawaii_smc.bin");
+ }
+ break;
+ case CHIP_TOPAZ:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index dc6d0f5..83c6e71 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -53,11 +53,11 @@
+
+ /* Firmware Names */
+ #ifdef CONFIG_DRM_AMDGPU_CIK
+-#define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin"
+-#define FIRMWARE_KABINI "radeon/kabini_uvd.bin"
+-#define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin"
+-#define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin"
+-#define FIRMWARE_MULLINS "radeon/mullins_uvd.bin"
++#define FIRMWARE_BONAIRE "amdgpu/bonaire_uvd.bin"
++#define FIRMWARE_KABINI "amdgpu/kabini_uvd.bin"
++#define FIRMWARE_KAVERI "amdgpu/kaveri_uvd.bin"
++#define FIRMWARE_HAWAII "amdgpu/hawaii_uvd.bin"
++#define FIRMWARE_MULLINS "amdgpu/mullins_uvd.bin"
+ #endif
+ #define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin"
+ #define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin"
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+index 6ebf95a..7c23719 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+@@ -40,11 +40,11 @@
+
+ /* Firmware Names */
+ #ifdef CONFIG_DRM_AMDGPU_CIK
+-#define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin"
+-#define FIRMWARE_KABINI "radeon/kabini_vce.bin"
+-#define FIRMWARE_KAVERI "radeon/kaveri_vce.bin"
+-#define FIRMWARE_HAWAII "radeon/hawaii_vce.bin"
+-#define FIRMWARE_MULLINS "radeon/mullins_vce.bin"
++#define FIRMWARE_BONAIRE "amdgpu/bonaire_vce.bin"
++#define FIRMWARE_KABINI "amdgpu/kabini_vce.bin"
++#define FIRMWARE_KAVERI "amdgpu/kaveri_vce.bin"
++#define FIRMWARE_HAWAII "amdgpu/hawaii_vce.bin"
++#define FIRMWARE_MULLINS "amdgpu/mullins_vce.bin"
+ #endif
+ #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
+ #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
+diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+index 2b41ed7..9bf0b24 100644
+--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+@@ -49,10 +49,10 @@
+ #include "gmc/gmc_7_1_d.h"
+ #include "gmc/gmc_7_1_sh_mask.h"
+
+-MODULE_FIRMWARE("radeon/bonaire_smc.bin");
+-MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
+-MODULE_FIRMWARE("radeon/hawaii_smc.bin");
+-MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
++MODULE_FIRMWARE("amdgpu/bonaire_smc.bin");
++MODULE_FIRMWARE("amdgpu/bonaire_k_smc.bin");
++MODULE_FIRMWARE("amdgpu/hawaii_smc.bin");
++MODULE_FIRMWARE("amdgpu/hawaii_k_smc.bin");
+
+ #define MC_CG_ARB_FREQ_F0 0x0a
+ #define MC_CG_ARB_FREQ_F1 0x0b
+@@ -5814,7 +5814,7 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev)
+ default: BUG();
+ }
+
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
+ err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+index 1543e7e..e1b56e7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
++++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+@@ -54,16 +54,16 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
+ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
+ static int cik_sdma_soft_reset(void *handle);
+
+-MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
+-MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
+-MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
+-MODULE_FIRMWARE("radeon/hawaii_sdma1.bin");
+-MODULE_FIRMWARE("radeon/kaveri_sdma.bin");
+-MODULE_FIRMWARE("radeon/kaveri_sdma1.bin");
+-MODULE_FIRMWARE("radeon/kabini_sdma.bin");
+-MODULE_FIRMWARE("radeon/kabini_sdma1.bin");
+-MODULE_FIRMWARE("radeon/mullins_sdma.bin");
+-MODULE_FIRMWARE("radeon/mullins_sdma1.bin");
++MODULE_FIRMWARE("amdgpu/bonaire_sdma.bin");
++MODULE_FIRMWARE("amdgpu/bonaire_sdma1.bin");
++MODULE_FIRMWARE("amdgpu/hawaii_sdma.bin");
++MODULE_FIRMWARE("amdgpu/hawaii_sdma1.bin");
++MODULE_FIRMWARE("amdgpu/kaveri_sdma.bin");
++MODULE_FIRMWARE("amdgpu/kaveri_sdma1.bin");
++MODULE_FIRMWARE("amdgpu/kabini_sdma.bin");
++MODULE_FIRMWARE("amdgpu/kabini_sdma1.bin");
++MODULE_FIRMWARE("amdgpu/mullins_sdma.bin");
++MODULE_FIRMWARE("amdgpu/mullins_sdma1.bin");
+
+ u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
+
+@@ -132,9 +132,9 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev)
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (i == 0)
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
+ else
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
+ err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+index 703803f..46dfa24 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+@@ -57,36 +57,36 @@ static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
+ static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
+ static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
+
+-MODULE_FIRMWARE("radeon/bonaire_pfp.bin");
+-MODULE_FIRMWARE("radeon/bonaire_me.bin");
+-MODULE_FIRMWARE("radeon/bonaire_ce.bin");
+-MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
+-MODULE_FIRMWARE("radeon/bonaire_mec.bin");
+-
+-MODULE_FIRMWARE("radeon/hawaii_pfp.bin");
+-MODULE_FIRMWARE("radeon/hawaii_me.bin");
+-MODULE_FIRMWARE("radeon/hawaii_ce.bin");
+-MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
+-MODULE_FIRMWARE("radeon/hawaii_mec.bin");
+-
+-MODULE_FIRMWARE("radeon/kaveri_pfp.bin");
+-MODULE_FIRMWARE("radeon/kaveri_me.bin");
+-MODULE_FIRMWARE("radeon/kaveri_ce.bin");
+-MODULE_FIRMWARE("radeon/kaveri_rlc.bin");
+-MODULE_FIRMWARE("radeon/kaveri_mec.bin");
+-MODULE_FIRMWARE("radeon/kaveri_mec2.bin");
+-
+-MODULE_FIRMWARE("radeon/kabini_pfp.bin");
+-MODULE_FIRMWARE("radeon/kabini_me.bin");
+-MODULE_FIRMWARE("radeon/kabini_ce.bin");
+-MODULE_FIRMWARE("radeon/kabini_rlc.bin");
+-MODULE_FIRMWARE("radeon/kabini_mec.bin");
+-
+-MODULE_FIRMWARE("radeon/mullins_pfp.bin");
+-MODULE_FIRMWARE("radeon/mullins_me.bin");
+-MODULE_FIRMWARE("radeon/mullins_ce.bin");
+-MODULE_FIRMWARE("radeon/mullins_rlc.bin");
+-MODULE_FIRMWARE("radeon/mullins_mec.bin");
++MODULE_FIRMWARE("amdgpu/bonaire_pfp.bin");
++MODULE_FIRMWARE("amdgpu/bonaire_me.bin");
++MODULE_FIRMWARE("amdgpu/bonaire_ce.bin");
++MODULE_FIRMWARE("amdgpu/bonaire_rlc.bin");
++MODULE_FIRMWARE("amdgpu/bonaire_mec.bin");
++
++MODULE_FIRMWARE("amdgpu/hawaii_pfp.bin");
++MODULE_FIRMWARE("amdgpu/hawaii_me.bin");
++MODULE_FIRMWARE("amdgpu/hawaii_ce.bin");
++MODULE_FIRMWARE("amdgpu/hawaii_rlc.bin");
++MODULE_FIRMWARE("amdgpu/hawaii_mec.bin");
++
++MODULE_FIRMWARE("amdgpu/kaveri_pfp.bin");
++MODULE_FIRMWARE("amdgpu/kaveri_me.bin");
++MODULE_FIRMWARE("amdgpu/kaveri_ce.bin");
++MODULE_FIRMWARE("amdgpu/kaveri_rlc.bin");
++MODULE_FIRMWARE("amdgpu/kaveri_mec.bin");
++MODULE_FIRMWARE("amdgpu/kaveri_mec2.bin");
++
++MODULE_FIRMWARE("amdgpu/kabini_pfp.bin");
++MODULE_FIRMWARE("amdgpu/kabini_me.bin");
++MODULE_FIRMWARE("amdgpu/kabini_ce.bin");
++MODULE_FIRMWARE("amdgpu/kabini_rlc.bin");
++MODULE_FIRMWARE("amdgpu/kabini_mec.bin");
++
++MODULE_FIRMWARE("amdgpu/mullins_pfp.bin");
++MODULE_FIRMWARE("amdgpu/mullins_me.bin");
++MODULE_FIRMWARE("amdgpu/mullins_ce.bin");
++MODULE_FIRMWARE("amdgpu/mullins_rlc.bin");
++MODULE_FIRMWARE("amdgpu/mullins_mec.bin");
+
+ static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
+ {
+@@ -925,7 +925,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
+ default: BUG();
+ }
+
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
+ err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+@@ -933,7 +933,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
+ if (err)
+ goto out;
+
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
+ err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+@@ -941,7 +941,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
+ if (err)
+ goto out;
+
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
+ err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+@@ -949,7 +949,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
+ if (err)
+ goto out;
+
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+@@ -958,7 +958,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
+ goto out;
+
+ if (adev->asic_type == CHIP_KAVERI) {
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+@@ -967,7 +967,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
+ goto out;
+ }
+
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
+ err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+index e3d6ff1..725de42 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+@@ -50,8 +50,8 @@ static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
+ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
+ static int gmc_v7_0_wait_for_idle(void *handle);
+
+-MODULE_FIRMWARE("radeon/bonaire_mc.bin");
+-MODULE_FIRMWARE("radeon/hawaii_mc.bin");
++MODULE_FIRMWARE("amdgpu/bonaire_mc.bin");
++MODULE_FIRMWARE("amdgpu/hawaii_mc.bin");
+ MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
+
+ static const u32 golden_settings_iceland_a11[] =
+@@ -150,10 +150,7 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
+ default: BUG();
+ }
+
+- if (adev->asic_type == CHIP_TOPAZ)
+- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
+- else
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
+
+ err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
+ if (err)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5204-Hybrid-Version-18.45.0.418.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5204-Hybrid-Version-18.45.0.418.patch
new file mode 100644
index 00000000..90a074c1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5204-Hybrid-Version-18.45.0.418.patch
@@ -0,0 +1,27 @@
+From b7ac7a68948d89e368ac2870e5a1f5c92e04377d Mon Sep 17 00:00:00 2001
+From: Junshan Fang <Junshan.Fang@amd.com>
+Date: Mon, 10 Sep 2018 16:54:54 +0800
+Subject: [PATCH 5204/5725] Hybrid Version: 18.45.0.418
+
+Change-Id: Iaa9ee8d86aa50c1720012304844e904f64d1e1b9
+Signed-off-by: Junshan Fang <Junshan.Fang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 66a7e9f..ba99200 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -76,7 +76,7 @@
+ #define KMS_DRIVER_MINOR 27
+ #define KMS_DRIVER_PATCHLEVEL 0
+
+-#define AMDGPU_VERSION "18.45.2.415"
++#define AMDGPU_VERSION "18.45.0.418"
+
+ int amdgpu_vram_limit = 0;
+ int amdgpu_vis_vram_limit = 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5205-drm-amdgpu-add-amdgpu_gmc_pd_addr-helper.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5205-drm-amdgpu-add-amdgpu_gmc_pd_addr-helper.patch
new file mode 100644
index 00000000..065bb311
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5205-drm-amdgpu-add-amdgpu_gmc_pd_addr-helper.patch
@@ -0,0 +1,196 @@
+From 821731d132b32769926090b3385f1bc414c4fb7f Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Wed, 22 Aug 2018 12:22:14 +0200
+Subject: [PATCH 5205/5725] drm/amdgpu: add amdgpu_gmc_pd_addr helper
+
+Add a helper to get the root PD address and remove the workarounds from
+the GMC9 code for that.
+
+Change-Id: I64bbd27d52938da78b38390b98febb473c08fba1
+Signed-off-by: Kevin Wang <Kevin1.Wang@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/Makefile | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 47 ++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 2 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 7 +---
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 4 --
+ drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 7 +---
+ 9 files changed, 55 insertions(+), 20 deletions(-)
+ create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
+index e0854bb..064dd5f7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/Makefile
++++ b/drivers/gpu/drm/amd/amdgpu/Makefile
+@@ -31,7 +31,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
+ amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
+ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
+ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
+- amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o amdgpu_sem.o amdgpu_amdkfd_fence.o \
++ amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o amdgpu_sem.o amdgpu_gmc.o amdgpu_amdkfd_fence.o \
+ amdgpu_debugfs.o amdgpu_ids.o
+
+ # add asic specific block
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index fb22f77..02d9ae7d2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -404,7 +404,7 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
+ return ret;
+ }
+
+- vm->pd_phys_addr = get_vm_pd_gpu_offset(vm);
++ vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
+
+ if (vm->use_cpu_for_update) {
+ ret = amdgpu_bo_kmap(pd, NULL);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 24f2489..fd9fe69 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -962,7 +962,7 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
+ }
+
+ if (p->job->vm) {
+- p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.base.bo);
++ p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
+
+ r = amdgpu_bo_vm_update_pte(p);
+ if (r)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+new file mode 100644
+index 0000000..36058fe
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+@@ -0,0 +1,47 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ */
++
++#include "amdgpu.h"
++
++/**
++ * amdgpu_gmc_pd_addr - return the address of the root directory
++ *
++ */
++uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo)
++{
++ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
++ uint64_t pd_addr;
++
++ pd_addr = amdgpu_bo_gpu_offset(bo);
++ /* TODO: move that into ASIC specific code */
++ if (adev->asic_type >= CHIP_VEGA10) {
++ uint64_t flags = AMDGPU_PTE_VALID;
++
++ amdgpu_gmc_get_vm_pde(adev, -1, &pd_addr, &flags);
++ pd_addr |= flags;
++ }
++ return pd_addr;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+index 5dbbac6..f347ba9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+@@ -134,4 +134,6 @@ static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc)
+ return (gmc->real_vram_size == gmc->visible_vram_size);
+ }
+
++uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo);
++
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index c263f18..f1d9fe3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -2248,7 +2248,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
+ return r;
+
+ if (vm_needs_flush) {
+- job->vm_pd_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
++ job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
+ job->vm_needs_flush = true;
+ }
+ if (resv) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+index 2baab7e..3403ded 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+@@ -37,12 +37,7 @@ u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev)
+
+ static void gfxhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
+ {
+- uint64_t value = amdgpu_bo_gpu_offset(adev->gart.bo);
+-
+- BUG_ON(value & (~0x0000FFFFFFFFF000ULL));
+- value -= adev->gmc.vram_start + adev->vm_manager.vram_base_offset;
+- value &= 0x0000FFFFFFFFF000ULL;
+- value |= 0x1; /*valid bit*/
++ uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ lower_32_bits(value));
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 730a589..a2625e2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -428,12 +428,8 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
+ uint32_t req = gmc_v9_0_get_invalidate_req(vmid);
+- uint64_t flags = AMDGPU_PTE_VALID;
+ unsigned eng = ring->vm_inv_eng;
+
+- amdgpu_gmc_get_vm_pde(adev, -1, &pd_addr, &flags);
+- pd_addr |= flags;
+-
+ amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
+ lower_32_bits(pd_addr));
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+index 800ec46..5f6a9c8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+@@ -47,12 +47,7 @@ u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
+
+ static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
+ {
+- uint64_t value = amdgpu_bo_gpu_offset(adev->gart.bo);
+-
+- BUG_ON(value & (~0x0000FFFFFFFFF000ULL));
+- value -= adev->gmc.vram_start + adev->vm_manager.vram_base_offset;
+- value &= 0x0000FFFFFFFFF000ULL;
+- value |= 0x1; /* valid bit */
++ uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ lower_32_bits(value));
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5206-drm-amdgpu-add-ring-soft-recovery-v4.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5206-drm-amdgpu-add-ring-soft-recovery-v4.patch
new file mode 100644
index 00000000..c7e1cf55
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5206-drm-amdgpu-add-ring-soft-recovery-v4.patch
@@ -0,0 +1,100 @@
+From 8d7fd13f802585147ea04537d32c12bd2ba828e5 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Tue, 21 Aug 2018 11:11:36 +0200
+Subject: [PATCH 5206/5725] drm/amdgpu: add ring soft recovery v4
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Instead of hammering hard on the GPU try a soft recovery first.
+
+v2: reorder code a bit
+v3: increase timeout to 10ms, increment GPU reset counter
+v4: squash in compile fix (Christian)
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 6 ++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 25 +++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 4 ++++
+ 3 files changed, 35 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+index f72d959..2d50825 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+@@ -33,6 +33,12 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
+ struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
+ struct amdgpu_job *job = to_amdgpu_job(s_job);
+
++ if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
++ DRM_ERROR("ring %s timeout, but soft recovered\n",
++ s_job->sched->name);
++ return;
++ }
++
+ DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
+ job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
+ ring->fence_drv.sync_seq);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+index 93794a8..5a56d9a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+@@ -481,6 +481,31 @@ void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
+ amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
+ }
+
++/**
++ * amdgpu_ring_soft_recovery - try to soft recover a ring lockup
++ *
++ * @ring: ring to try the recovery on
++ * @vmid: VMID we try to get going again
++ * @fence: timedout fence
++ *
++ * Tries to get a ring proceeding again when it is stuck.
++ */
++bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
++ struct dma_fence *fence)
++{
++ ktime_t deadline = ktime_add_us(ktime_get(), 10000);
++
++ if (!ring->funcs->soft_recovery)
++ return false;
++
++ atomic_inc(&ring->adev->gpu_reset_counter);
++ while (!dma_fence_is_signaled(fence) &&
++ ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
++ ring->funcs->soft_recovery(ring, vmid);
++
++ return dma_fence_is_signaled(fence);
++}
++
+ /*
+ * Debugfs info
+ */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+index 1f5fcfd..6ea3827 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+@@ -168,6 +168,8 @@ struct amdgpu_ring_funcs {
+ /* priority functions */
+ void (*set_priority) (struct amdgpu_ring *ring,
+ enum drm_sched_priority priority);
++ /* Try to soft recover the ring to make the fence signal */
++ void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
+ };
+
+ struct amdgpu_ring {
+@@ -265,6 +267,8 @@ void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+ void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
+ uint32_t reg0, uint32_t val0,
+ uint32_t reg1, uint32_t val1);
++bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
++ struct dma_fence *fence);
+
+ static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
+ {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5207-drm-amdgpu-implement-soft_recovery-for-GFX7.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5207-drm-amdgpu-implement-soft_recovery-for-GFX7.patch
new file mode 100644
index 00000000..638148d3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5207-drm-amdgpu-implement-soft_recovery-for-GFX7.patch
@@ -0,0 +1,51 @@
+From 76f7d226a2d5c1c237a70bd98a5610315719b912 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Wed, 22 Aug 2018 11:55:23 +0200
+Subject: [PATCH 5207/5725] drm/amdgpu: implement soft_recovery for GFX7
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Try to kill waves on the SQ.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+index 46dfa24..e3d5714 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+@@ -4232,6 +4232,18 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
+ amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
+ }
+
++static void gfx_v7_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
++{
++ struct amdgpu_device *adev = ring->adev;
++ uint32_t value = 0;
++
++ value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
++ value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
++ value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
++ value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
++ WREG32(mmSQ_CMD, value);
++}
++
+ static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
+ {
+ WREG32(mmSQ_IND_INDEX,
+@@ -5109,6 +5121,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
+ .emit_wreg = gfx_v7_0_ring_emit_wreg,
++ .soft_recovery = gfx_v7_0_ring_soft_recovery,
+ };
+
+ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5208-drm-amdgpu-implement-soft_recovery-for-GFX8-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5208-drm-amdgpu-implement-soft_recovery-for-GFX8-v2.patch
new file mode 100644
index 00000000..56a84fba
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5208-drm-amdgpu-implement-soft_recovery-for-GFX8-v2.patch
@@ -0,0 +1,53 @@
+From 66594074e644dd038e5d5b12ec4bfe6240fec9ed Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Tue, 21 Aug 2018 12:45:31 +0200
+Subject: [PATCH 5208/5725] drm/amdgpu: implement soft_recovery for GFX8 v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Try to kill waves on the SQ.
+
+v2: only for the GFX ring for now.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 6f79369..19b65cf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -6726,6 +6726,18 @@ static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
+ amdgpu_ring_write(ring, val);
+ }
+
++static void gfx_v8_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
++{
++ struct amdgpu_device *adev = ring->adev;
++ uint32_t value = 0;
++
++ value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
++ value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
++ value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
++ value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
++ WREG32(mmSQ_CMD, value);
++}
++
+ static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
+ enum amdgpu_interrupt_state state)
+ {
+@@ -7184,6 +7196,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
+ .init_cond_exec = gfx_v8_0_ring_emit_init_cond_exec,
+ .patch_cond_exec = gfx_v8_0_ring_emit_patch_cond_exec,
+ .emit_wreg = gfx_v8_0_ring_emit_wreg,
++ .soft_recovery = gfx_v8_0_ring_soft_recovery,
+ };
+
+ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5209-drm-amdgpu-implement-soft_recovery-for-GFX9.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5209-drm-amdgpu-implement-soft_recovery-for-GFX9.patch
new file mode 100644
index 00000000..12c3da46
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5209-drm-amdgpu-implement-soft_recovery-for-GFX9.patch
@@ -0,0 +1,51 @@
+From 4af0f806ce0f5025b29e01c679515553d3d9d99e Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Wed, 22 Aug 2018 12:04:11 +0200
+Subject: [PATCH 5209/5725] drm/amdgpu: implement soft_recovery for GFX9
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Try to kill waves on the SQ.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 29a7727..f588822 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -4434,6 +4434,18 @@ static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
+ ref, mask);
+ }
+
++static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
++{
++ struct amdgpu_device *adev = ring->adev;
++ uint32_t value = 0;
++
++ value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
++ value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
++ value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
++ value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
++ WREG32(mmSQ_CMD, value);
++}
++
+ static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
+ enum amdgpu_interrupt_state state)
+ {
+@@ -4757,6 +4769,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
+ .emit_wreg = gfx_v9_0_ring_emit_wreg,
+ .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
++ .soft_recovery = gfx_v9_0_ring_soft_recovery,
+ };
+
+ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5210-drm-amdgpu-Adjust-the-VM-size-based-on-system-memory.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5210-drm-amdgpu-Adjust-the-VM-size-based-on-system-memory.patch
new file mode 100644
index 00000000..319271ab
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5210-drm-amdgpu-Adjust-the-VM-size-based-on-system-memory.patch
@@ -0,0 +1,110 @@
+From b04efcbd0a3c3ac8b89c88f672518f1ba8d4a0d8 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Tue, 21 Aug 2018 17:14:32 -0400
+Subject: [PATCH 5210/5725] drm/amdgpu: Adjust the VM size based on system
+ memory size v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Set the VM size based on system memory size between the ASIC-specific
+limits given by min_vm_size and max_bits. GFXv9 GPUs will keep their
+default VM size of 256TB (48 bit). Only older GPUs will adjust VM size
+depending on system memory size.
+
+This makes more VM space available for ROCm applications on GFXv8 GPUs
+that want to map all available VRAM and system memory in their SVM
+address space.
+
+v2:
+* Clarify comment
+* Round up memory size before >> 30
+* Round up automatic vm_size to power of two
+
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Acked-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 32 ++++++++++++++++++++++++++++----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 2 +-
+ 2 files changed, 29 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index bd12f4a..516074d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2495,28 +2495,52 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
+ * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
+ *
+ * @adev: amdgpu_device pointer
+- * @vm_size: the default vm size if it's set auto
++ * @min_vm_size: the minimum vm size in GB if it's set auto
+ * @fragment_size_default: Default PTE fragment size
+ * @max_level: max VMPT level
+ * @max_bits: max address space size in bits
+ *
+ */
+-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
++void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
+ uint32_t fragment_size_default, unsigned max_level,
+ unsigned max_bits)
+ {
++ unsigned int max_size = 1 << (max_bits - 30);
++ unsigned int vm_size;
+ uint64_t tmp;
+
+ /* adjust vm size first */
+ if (amdgpu_vm_size != -1) {
+- unsigned max_size = 1 << (max_bits - 30);
+-
+ vm_size = amdgpu_vm_size;
+ if (vm_size > max_size) {
+ dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
+ amdgpu_vm_size, max_size);
+ vm_size = max_size;
+ }
++ } else {
++ struct sysinfo si;
++ unsigned int phys_ram_gb;
++
++ /* Optimal VM size depends on the amount of physical
++ * RAM available. Underlying requirements and
++ * assumptions:
++ *
++ * - Need to map system memory and VRAM from all GPUs
++ * - VRAM from other GPUs not known here
++ * - Assume VRAM <= system memory
++ * - On GFX8 and older, VM space can be segmented for
++ * different MTYPEs
++ * - Need to allow room for fragmentation, guard pages etc.
++ *
++ * This adds up to a rough guess of system memory x3.
++ * Round up to power of two to maximize the available
++ * VM size with the given page table size.
++ */
++ si_meminfo(&si);
++ phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
++ (1 << 30) - 1) >> 30;
++ vm_size = roundup_pow_of_two(
++ min(max(phys_ram_gb * 3, min_vm_size), max_size));
+ }
+
+ adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+index 3461300..fd8da1d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+@@ -346,7 +346,7 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
+ void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
+ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va);
+-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
++void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
+ uint32_t fragment_size_default, unsigned max_level,
+ unsigned max_bits);
+ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5211-drm-amdgpu-Enable-disable-gfx-PG-feature-in-rlc-safe.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5211-drm-amdgpu-Enable-disable-gfx-PG-feature-in-rlc-safe.patch
new file mode 100644
index 00000000..e6319208
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5211-drm-amdgpu-Enable-disable-gfx-PG-feature-in-rlc-safe.patch
@@ -0,0 +1,49 @@
+From 522ad169b4ddd0140b26dc104d615980327d5d7d Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Fri, 24 Aug 2018 17:26:23 +0800
+Subject: [PATCH 5211/5725] drm/amdgpu: Enable/disable gfx PG feature in rlc
+ safe mode
+
+This is required by gfx hw and can fix the rlc hang when
+do s3 stree test on Cz/St.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Hang Zhou <hang.zhou@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 19b65cf..be2cd71 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -5659,6 +5659,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
++ if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
++ AMD_PG_SUPPORT_RLC_SMU_HS |
++ AMD_PG_SUPPORT_CP |
++ AMD_PG_SUPPORT_GFX_DMG))
++ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ switch (adev->asic_type) {
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+@@ -5708,7 +5713,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
+ default:
+ break;
+ }
+-
++ if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
++ AMD_PG_SUPPORT_RLC_SMU_HS |
++ AMD_PG_SUPPORT_CP |
++ AMD_PG_SUPPORT_GFX_DMG))
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5212-drm-amdgpu-Remove-duplicated-power-source-update.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5212-drm-amdgpu-Remove-duplicated-power-source-update.patch
new file mode 100644
index 00000000..5efccc05
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5212-drm-amdgpu-Remove-duplicated-power-source-update.patch
@@ -0,0 +1,38 @@
+From d6e5ea19d097d414359f9485d2c8a531d2d71392 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Thu, 23 Aug 2018 11:46:13 +0800
+Subject: [PATCH 5212/5725] drm/amdgpu: Remove duplicated power source update
+
+when ac/dc switch, driver will be notified by acpi event.
+then the power source will be updated. so don't need to
+get power source when set power state.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 8 --------
+ 1 file changed, 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index 1e9ed55..4928c61 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -1932,14 +1932,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
+ amdgpu_fence_wait_empty(ring);
+ }
+
+- mutex_lock(&adev->pm.mutex);
+- /* update battery/ac status */
+- if (power_supply_is_system_supplied() > 0)
+- adev->pm.ac_power = true;
+- else
+- adev->pm.ac_power = false;
+- mutex_unlock(&adev->pm.mutex);
+-
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ if (!amdgpu_device_has_dc_support(adev)) {
+ mutex_lock(&adev->pm.mutex);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5213-drm-amdgpu-Fix-vce-initialize-failed-on-Kaveri-Mulli.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5213-drm-amdgpu-Fix-vce-initialize-failed-on-Kaveri-Mulli.patch
new file mode 100644
index 00000000..ea4966f5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5213-drm-amdgpu-Fix-vce-initialize-failed-on-Kaveri-Mulli.patch
@@ -0,0 +1,128 @@
+From 5a85a6d82310e9149f46db3e2bd1beba1f54c4df Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Thu, 23 Aug 2018 15:30:45 +0800
+Subject: [PATCH 5213/5725] drm/amdgpu: Fix vce initialize failed on
+ Kaveri/Mullins
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Forgot to add vce pg support via smu for Kaveri/Mullins.
+
+Fixes: 561a5c83eadd ("drm/amd/pp: Unify powergate_uvd/vce/mmhub
+ to set_powergating_by_smu")
+
+v2: refine patch descriptions suggested by Michel
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Tested-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 41 +++++++++++++++++++++++--------------
+ 1 file changed, 26 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+index cee92f8..c243e22 100644
+--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+@@ -66,7 +66,6 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
+ static int kv_init_fps_limits(struct amdgpu_device *adev);
+
+ static void kv_dpm_powergate_uvd(void *handle, bool gate);
+-static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
+ static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
+ static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
+
+@@ -1374,6 +1373,8 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
+
+ static void kv_dpm_disable(struct amdgpu_device *adev)
+ {
++ struct kv_power_info *pi = kv_get_pi(adev);
++
+ amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
+ AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
+ amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
+@@ -1387,7 +1388,8 @@ static void kv_dpm_disable(struct amdgpu_device *adev)
+ /* powerup blocks */
+ kv_dpm_powergate_acp(adev, false);
+ kv_dpm_powergate_samu(adev, false);
+- kv_dpm_powergate_vce(adev, false);
++ if (pi->caps_vce_pg) /* power on the VCE block */
++ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
+ kv_dpm_powergate_uvd(adev, false);
+
+ kv_enable_smc_cac(adev, false);
+@@ -1551,7 +1553,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
+ int ret;
+
+ if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
+- kv_dpm_powergate_vce(adev, false);
+ if (pi->caps_stable_p_state)
+ pi->vce_boot_level = table->count - 1;
+ else
+@@ -1573,7 +1574,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
+ kv_enable_vce_dpm(adev, true);
+ } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
+ kv_enable_vce_dpm(adev, false);
+- kv_dpm_powergate_vce(adev, true);
+ }
+
+ return 0;
+@@ -1702,24 +1702,32 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate)
+ }
+ }
+
+-static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
++static void kv_dpm_powergate_vce(void *handle, bool gate)
+ {
++ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct kv_power_info *pi = kv_get_pi(adev);
+-
+- if (pi->vce_power_gated == gate)
+- return;
++ int ret;
+
+ pi->vce_power_gated = gate;
+
+- if (!pi->caps_vce_pg)
+- return;
+-
+- if (gate)
+- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
+- else
+- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
++ if (gate) {
++ /* stop the VCE block */
++ ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
++ AMD_PG_STATE_GATE);
++ kv_enable_vce_dpm(adev, false);
++ if (pi->caps_vce_pg) /* power off the VCE block */
++ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
++ } else {
++ if (pi->caps_vce_pg) /* power on the VCE block */
++ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
++ kv_enable_vce_dpm(adev, true);
++ /* re-init the VCE block */
++ ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
++ AMD_PG_STATE_UNGATE);
++ }
+ }
+
++
+ static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
+ {
+ struct kv_power_info *pi = kv_get_pi(adev);
+@@ -3312,6 +3320,9 @@ static int kv_set_powergating_by_smu(void *handle,
+ case AMD_IP_BLOCK_TYPE_UVD:
+ kv_dpm_powergate_uvd(handle, gate);
+ break;
++ case AMD_IP_BLOCK_TYPE_VCE:
++ kv_dpm_powergate_vce(handle, gate);
++ break;
+ default:
+ break;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5214-drm-amdgpu-Update-power-state-at-the-end-of-smu-hw_i.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5214-drm-amdgpu-Update-power-state-at-the-end-of-smu-hw_i.patch
new file mode 100644
index 00000000..405824df
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5214-drm-amdgpu-Update-power-state-at-the-end-of-smu-hw_i.patch
@@ -0,0 +1,72 @@
+From 40b5c496cca3aa9d36c1286dd14229988f7b2dcf Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Fri, 24 Aug 2018 16:17:54 +0800
+Subject: [PATCH 5214/5725] drm/amdgpu: Update power state at the end of smu
+ hw_init.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+For SI/Kv, the power state is managed by function
+amdgpu_pm_compute_clocks.
+
+when dpm enabled, we should call amdgpu_pm_compute_clocks
+to update current power state instand of set boot state.
+
+this change can fix the oops when kfd driver was enabled on Kv.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Tested-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 4 +---
+ drivers/gpu/drm/amd/amdgpu/si_dpm.c | 3 +--
+ 2 files changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+index c243e22..0290a49 100644
+--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+@@ -1353,8 +1353,6 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
+ return ret;
+ }
+
+- kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
+-
+ if (adev->irq.installed &&
+ amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
+ ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
+@@ -3068,7 +3066,7 @@ static int kv_dpm_hw_init(void *handle)
+ else
+ adev->pm.dpm_enabled = true;
+ mutex_unlock(&adev->pm.mutex);
+-
++ amdgpu_pm_compute_clocks(adev);
+ return ret;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+index a32f6f6..9f7e63b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+@@ -6887,7 +6887,6 @@ static int si_dpm_enable(struct amdgpu_device *adev)
+
+ si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+ si_thermal_start_thermal_controller(adev);
+- ni_update_current_ps(adev, boot_ps);
+
+ return 0;
+ }
+@@ -7760,7 +7759,7 @@ static int si_dpm_hw_init(void *handle)
+ else
+ adev->pm.dpm_enabled = true;
+ mutex_unlock(&adev->pm.mutex);
+-
++ amdgpu_pm_compute_clocks(adev);
+ return ret;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5215-drm-amdgpu-Power-on-uvd-block-when-hw_fini.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5215-drm-amdgpu-Power-on-uvd-block-when-hw_fini.patch
new file mode 100644
index 00000000..f9bd6635
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5215-drm-amdgpu-Power-on-uvd-block-when-hw_fini.patch
@@ -0,0 +1,46 @@
+From 40cf18f69650756e13fb32e4a76321f7eddfd4df Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Thu, 23 Aug 2018 15:41:57 +0800
+Subject: [PATCH 5215/5725] drm/amdgpu: Power on uvd block when hw_fini
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+when hw_fini/suspend, smu only need to power on uvd block
+if uvd pg is supported, don't need to call uvd to do hw_init.
+
+v2: fix typo in patch descriptions and comments.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Tested-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+index 0290a49..be880c3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+@@ -65,7 +65,6 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
+ int min_temp, int max_temp);
+ static int kv_init_fps_limits(struct amdgpu_device *adev);
+
+-static void kv_dpm_powergate_uvd(void *handle, bool gate);
+ static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
+ static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
+
+@@ -1388,7 +1387,8 @@ static void kv_dpm_disable(struct amdgpu_device *adev)
+ kv_dpm_powergate_samu(adev, false);
+ if (pi->caps_vce_pg) /* power on the VCE block */
+ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
+- kv_dpm_powergate_uvd(adev, false);
++ if (pi->caps_uvd_pg) /* power on the UVD block */
++ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
+
+ kv_enable_smc_cac(adev, false);
+ kv_enable_didt(adev, false);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5216-drm-amdgpu-Remove-dead-code-in-amdgpu_pm.c.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5216-drm-amdgpu-Remove-dead-code-in-amdgpu_pm.c.patch
new file mode 100644
index 00000000..cbdbc883
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5216-drm-amdgpu-Remove-dead-code-in-amdgpu_pm.c.patch
@@ -0,0 +1,72 @@
+From 97de30522fb39e3dccd3db8d922c6799f3d64298 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Thu, 23 Aug 2018 15:45:15 +0800
+Subject: [PATCH 5216/5725] drm/amdgpu: Remove dead code in amdgpu_pm.c
+
+As we have unify powergate_uvd/vce/mmhub to set_powergating_by_smu,
+and set_powergating_by_smu was supported by both dpm and powerplay.
+so remove the else case.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 35 ----------------------------------
+ 1 file changed, 35 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index 4928c61..f5404f2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -1719,18 +1719,6 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
+ mutex_lock(&adev->pm.mutex);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
+ mutex_unlock(&adev->pm.mutex);
+- } else {
+- if (enable) {
+- mutex_lock(&adev->pm.mutex);
+- adev->pm.dpm.uvd_active = true;
+- adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
+- mutex_unlock(&adev->pm.mutex);
+- } else {
+- mutex_lock(&adev->pm.mutex);
+- adev->pm.dpm.uvd_active = false;
+- mutex_unlock(&adev->pm.mutex);
+- }
+- amdgpu_pm_compute_clocks(adev);
+ }
+ }
+
+@@ -1741,29 +1729,6 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
+ mutex_lock(&adev->pm.mutex);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
+ mutex_unlock(&adev->pm.mutex);
+- } else {
+- if (enable) {
+- mutex_lock(&adev->pm.mutex);
+- adev->pm.dpm.vce_active = true;
+- /* XXX select vce level based on ring/task */
+- adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
+- mutex_unlock(&adev->pm.mutex);
+- amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+- AMD_CG_STATE_UNGATE);
+- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+- AMD_PG_STATE_UNGATE);
+- amdgpu_pm_compute_clocks(adev);
+- } else {
+- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+- AMD_PG_STATE_GATE);
+- amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+- AMD_CG_STATE_GATE);
+- mutex_lock(&adev->pm.mutex);
+- adev->pm.dpm.vce_active = false;
+- mutex_unlock(&adev->pm.mutex);
+- amdgpu_pm_compute_clocks(adev);
+- }
+-
+ }
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5217-drm-amdgpu-Remove-duplicate-code-in-gfx_v8_0.c.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5217-drm-amdgpu-Remove-duplicate-code-in-gfx_v8_0.c.patch
new file mode 100644
index 00000000..dbc014c2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5217-drm-amdgpu-Remove-duplicate-code-in-gfx_v8_0.c.patch
@@ -0,0 +1,161 @@
+From 91820f3765ee67568d2752f3f2aae7e818fac4be Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Fri, 17 Aug 2018 13:13:12 +0800
+Subject: [PATCH 5217/5725] drm/amdgpu: Remove duplicate code in gfx_v8_0.c
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+There are no any logical changes here.
+
+1. if kcq can be enabled via kiq, we don't need to
+ do kiq ring test.
+2. amdgpu_ring_test_ring function can be used to
+ sync the ring complete, remove the duplicate code.
+
+v2: alloc 6 (not 7) dws for unmap_queues
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 80 ++++++-----------------------------
+ 1 file changed, 13 insertions(+), 67 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index be2cd71..275d524 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -4603,7 +4603,6 @@ static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring)
+ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
+ {
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+- uint32_t scratch, tmp = 0;
+ uint64_t queue_mask = 0;
+ int r, i;
+
+@@ -4622,17 +4621,10 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
+ queue_mask |= (1ull << i);
+ }
+
+- r = amdgpu_gfx_scratch_get(adev, &scratch);
+- if (r) {
+- DRM_ERROR("Failed to get scratch reg (%d).\n", r);
+- return r;
+- }
+- WREG32(scratch, 0xCAFEDEAD);
+-
+- r = amdgpu_ring_alloc(kiq_ring, (8 * adev->gfx.num_compute_rings) + 11);
++ kiq_ring->ready = true;
++ r = amdgpu_ring_alloc(kiq_ring, (8 * adev->gfx.num_compute_rings) + 8);
+ if (r) {
+ DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+- amdgpu_gfx_scratch_free(adev, scratch);
+ return r;
+ }
+ /* set resources */
+@@ -4664,25 +4656,12 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
+ amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
+ }
+- /* write to scratch for completion */
+- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
+- amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
+- amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
+- amdgpu_ring_commit(kiq_ring);
+
+- for (i = 0; i < adev->usec_timeout; i++) {
+- tmp = RREG32(scratch);
+- if (tmp == 0xDEADBEEF)
+- break;
+- DRM_UDELAY(1);
+- }
+- if (i >= adev->usec_timeout) {
+- DRM_ERROR("KCQ enable failed (scratch(0x%04X)=0x%08X)\n",
+- scratch, tmp);
+- r = -EINVAL;
++ r = amdgpu_ring_test_ring(kiq_ring);
++ if (r) {
++ DRM_ERROR("KCQ enable failed\n");
++ kiq_ring->ready = false;
+ }
+- amdgpu_gfx_scratch_free(adev, scratch);
+-
+ return r;
+ }
+
+@@ -5013,15 +4992,6 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
+ if (r)
+ goto done;
+
+- /* Test KIQ */
+- ring = &adev->gfx.kiq.ring;
+- ring->ready = true;
+- r = amdgpu_ring_test_ring(ring);
+- if (r) {
+- ring->ready = false;
+- goto done;
+- }
+-
+ /* Test KCQs */
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+@@ -5091,23 +5061,11 @@ static int gfx_v8_0_hw_init(void *handle)
+
+ static int gfx_v8_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring)
+ {
+- struct amdgpu_device *adev = kiq_ring->adev;
+- uint32_t scratch, tmp = 0;
+- int r, i;
+-
+- r = amdgpu_gfx_scratch_get(adev, &scratch);
+- if (r) {
+- DRM_ERROR("Failed to get scratch reg (%d).\n", r);
+- return r;
+- }
+- WREG32(scratch, 0xCAFEDEAD);
++ int r;
+
+- r = amdgpu_ring_alloc(kiq_ring, 10);
+- if (r) {
++ r = amdgpu_ring_alloc(kiq_ring, 6);
++ if (r)
+ DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+- amdgpu_gfx_scratch_free(adev, scratch);
+- return r;
+- }
+
+ /* unmap queues */
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
+@@ -5120,23 +5078,11 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+- /* write to scratch for completion */
+- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
+- amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
+- amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
+- amdgpu_ring_commit(kiq_ring);
+
+- for (i = 0; i < adev->usec_timeout; i++) {
+- tmp = RREG32(scratch);
+- if (tmp == 0xDEADBEEF)
+- break;
+- DRM_UDELAY(1);
+- }
+- if (i >= adev->usec_timeout) {
+- DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp);
+- r = -EINVAL;
+- }
+- amdgpu_gfx_scratch_free(adev, scratch);
++ r = amdgpu_ring_test_ring(kiq_ring);
++ if (r)
++ DRM_ERROR("KCQ disable failed\n");
++
+ return r;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5218-drm-amdgpu-Refine-gfx_v8_0_kcq_disable-function.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5218-drm-amdgpu-Refine-gfx_v8_0_kcq_disable-function.patch
new file mode 100644
index 00000000..a112e5c0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5218-drm-amdgpu-Refine-gfx_v8_0_kcq_disable-function.patch
@@ -0,0 +1,85 @@
+From afd02ee7a6e7a4fe8d38fa935c4ff525a42cd618 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Fri, 17 Aug 2018 14:57:18 +0800
+Subject: [PATCH 5218/5725] drm/amdgpu: Refine gfx_v8_0_kcq_disable function
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Send all kcq unmap_queue packets and then wait for
+complete.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 29 +++++++++++++++--------------
+ 1 file changed, 15 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 275d524..4f9dff2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -5059,26 +5059,29 @@ static int gfx_v8_0_hw_init(void *handle)
+ return r;
+ }
+
+-static int gfx_v8_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring)
++static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
+ {
+- int r;
++ int r, i;
++ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+
+- r = amdgpu_ring_alloc(kiq_ring, 6);
++ r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
+ if (r)
+ DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+
+- /* unmap queues */
+- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
+- amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
++ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
++ struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
++
++ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
++ amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+ PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
+ PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
+ PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
+ PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
+- amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
+- amdgpu_ring_write(kiq_ring, 0);
+- amdgpu_ring_write(kiq_ring, 0);
+- amdgpu_ring_write(kiq_ring, 0);
+-
++ amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
++ amdgpu_ring_write(kiq_ring, 0);
++ amdgpu_ring_write(kiq_ring, 0);
++ amdgpu_ring_write(kiq_ring, 0);
++ }
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ DRM_ERROR("KCQ disable failed\n");
+@@ -5089,7 +5092,6 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring
+ static int gfx_v8_0_hw_fini(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+- int i;
+
+ amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
+ amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+@@ -5099,8 +5101,7 @@ static int gfx_v8_0_hw_fini(void *handle)
+ amdgpu_irq_put(adev, &adev->gfx.sq_irq, 0);
+
+ /* disable KCQ to avoid CPC touch memory not valid anymore */
+- for (i = 0; i < adev->gfx.num_compute_rings; i++)
+- gfx_v8_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
++ gfx_v8_0_kcq_disable(adev);
+
+ if (amdgpu_sriov_vf(adev)) {
+ pr_debug("For SRIOV client, shouldn't do anything.\n");
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5219-drm-amdgpu-Remove-duplicate-code-in-gfx_v9_0.c.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5219-drm-amdgpu-Remove-duplicate-code-in-gfx_v9_0.c.patch
new file mode 100644
index 00000000..0bc77e84
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5219-drm-amdgpu-Remove-duplicate-code-in-gfx_v9_0.c.patch
@@ -0,0 +1,152 @@
+From 6f94a15a8f68692929a023f8f6adabb92aa39eea Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Fri, 17 Aug 2018 16:42:35 +0800
+Subject: [PATCH 5219/5725] drm/amdgpu: Remove duplicate code in gfx_v9_0.c
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+There are no any logical changes here.
+
+1. if kcq can be enabled via kiq, we don't need to
+ do kiq ring test.
+2. amdgpu_ring_test_ring function can be used to
+ sync the ring complete, remove the duplicate code.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 72 ++++++-----------------------------
+ 1 file changed, 12 insertions(+), 60 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index f588822..cfe75dc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -2671,7 +2671,6 @@ static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
+ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
+ {
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+- uint32_t scratch, tmp = 0;
+ uint64_t queue_mask = 0;
+ int r, i;
+
+@@ -2690,17 +2689,10 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
+ queue_mask |= (1ull << i);
+ }
+
+- r = amdgpu_gfx_scratch_get(adev, &scratch);
+- if (r) {
+- DRM_ERROR("Failed to get scratch reg (%d).\n", r);
+- return r;
+- }
+- WREG32(scratch, 0xCAFEDEAD);
+-
+- r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 11);
++ kiq_ring->ready = true;
++ r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 8);
+ if (r) {
+ DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+- amdgpu_gfx_scratch_free(adev, scratch);
+ return r;
+ }
+
+@@ -2737,24 +2729,12 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
+ amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
+ }
+- /* write to scratch for completion */
+- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
+- amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
+- amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
+- amdgpu_ring_commit(kiq_ring);
+
+- for (i = 0; i < adev->usec_timeout; i++) {
+- tmp = RREG32(scratch);
+- if (tmp == 0xDEADBEEF)
+- break;
+- DRM_UDELAY(1);
+- }
+- if (i >= adev->usec_timeout) {
+- DRM_ERROR("KCQ enable failed (scratch(0x%04X)=0x%08X)\n",
+- scratch, tmp);
+- r = -EINVAL;
++ r = amdgpu_ring_test_ring(kiq_ring);
++ if (r) {
++ DRM_ERROR("KCQ enable failed\n");
++ kiq_ring->ready = false;
+ }
+- amdgpu_gfx_scratch_free(adev, scratch);
+
+ return r;
+ }
+@@ -3193,12 +3173,6 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
+ return r;
+ }
+
+- ring = &adev->gfx.kiq.ring;
+- ring->ready = true;
+- r = amdgpu_ring_test_ring(ring);
+- if (r)
+- ring->ready = false;
+-
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+
+@@ -3249,21 +3223,11 @@ static int gfx_v9_0_hw_init(void *handle)
+
+ static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring)
+ {
+- struct amdgpu_device *adev = kiq_ring->adev;
+- uint32_t scratch, tmp = 0;
+- int r, i;
+-
+- r = amdgpu_gfx_scratch_get(adev, &scratch);
+- if (r) {
+- DRM_ERROR("Failed to get scratch reg (%d).\n", r);
+- return r;
+- }
+- WREG32(scratch, 0xCAFEDEAD);
++ int r;
+
+- r = amdgpu_ring_alloc(kiq_ring, 10);
++ r = amdgpu_ring_alloc(kiq_ring, 6);
+ if (r) {
+ DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+- amdgpu_gfx_scratch_free(adev, scratch);
+ return r;
+ }
+
+@@ -3278,23 +3242,11 @@ static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+- /* write to scratch for completion */
+- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
+- amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
+- amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
+- amdgpu_ring_commit(kiq_ring);
+
+- for (i = 0; i < adev->usec_timeout; i++) {
+- tmp = RREG32(scratch);
+- if (tmp == 0xDEADBEEF)
+- break;
+- DRM_UDELAY(1);
+- }
+- if (i >= adev->usec_timeout) {
+- DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp);
+- r = -EINVAL;
+- }
+- amdgpu_gfx_scratch_free(adev, scratch);
++ r = amdgpu_ring_test_ring(kiq_ring);
++ if (r)
++ DRM_ERROR("KCQ disable failed\n");
++
+ return r;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5220-drm-amdgpu-Refine-gfx_v9_0_kcq_disable-function.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5220-drm-amdgpu-Refine-gfx_v9_0_kcq_disable-function.patch
new file mode 100644
index 00000000..ff7a7036
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5220-drm-amdgpu-Refine-gfx_v9_0_kcq_disable-function.patch
@@ -0,0 +1,86 @@
+From 587223801033bd206dd5d3d449b86a3ad1034b3d Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Fri, 17 Aug 2018 16:45:16 +0800
+Subject: [PATCH 5220/5725] drm/amdgpu: Refine gfx_v9_0_kcq_disable function
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Send all kcq unmap_queue packets and then wait for
+complete.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 33 ++++++++++++++++-----------------
+ 1 file changed, 16 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index cfe75dc..a581135 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -3221,28 +3221,29 @@ static int gfx_v9_0_hw_init(void *handle)
+ return r;
+ }
+
+-static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring)
++static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev)
+ {
+- int r;
++ int r, i;
++ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+
+- r = amdgpu_ring_alloc(kiq_ring, 6);
+- if (r) {
++ r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
++ if (r)
+ DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+- return r;
+- }
+
+- /* unmap queues */
+- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
+- amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
++ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
++ struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
++
++ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
++ amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+ PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
+ PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
+ PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
+ PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
+- amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
+- amdgpu_ring_write(kiq_ring, 0);
+- amdgpu_ring_write(kiq_ring, 0);
+- amdgpu_ring_write(kiq_ring, 0);
+-
++ amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
++ amdgpu_ring_write(kiq_ring, 0);
++ amdgpu_ring_write(kiq_ring, 0);
++ amdgpu_ring_write(kiq_ring, 0);
++ }
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ DRM_ERROR("KCQ disable failed\n");
+@@ -3253,14 +3254,12 @@ static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring
+ static int gfx_v9_0_hw_fini(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+- int i;
+
+ amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
+ amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+
+ /* disable KCQ to avoid CPC touch memory not valid anymore */
+- for (i = 0; i < adev->gfx.num_compute_rings; i++)
+- gfx_v9_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
++ gfx_v9_0_kcq_disable(adev);
+
+ if (amdgpu_sriov_vf(adev)) {
+ gfx_v9_0_cp_gfx_enable(adev, false);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5221-drm-amdgpu-Change-kiq-initialize-reset-sequence-on-g.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5221-drm-amdgpu-Change-kiq-initialize-reset-sequence-on-g.patch
new file mode 100644
index 00000000..60298704
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5221-drm-amdgpu-Change-kiq-initialize-reset-sequence-on-g.patch
@@ -0,0 +1,126 @@
+From eb9bb2769c12f94db35219f917d6994e7c79a4ea Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Wed, 22 Aug 2018 17:58:31 +0800
+Subject: [PATCH 5221/5725] drm/amdgpu: Change kiq initialize/reset sequence on
+ gfx8
+
+1. initialize kiq before initialize gfx ring.
+2. set kiq ring ready immediately when kiq initialize
+ successfully.
+3. split function gfx_v8_0_kiq_resume into two functions.
+ gfx_v8_0_kiq_resume is for kiq initialize.
+ gfx_v8_0_kcq_resume is for kcq initialize.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 49 +++++++++++++++++++++--------------
+ 1 file changed, 30 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 4f9dff2..9a8f572 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -4621,7 +4621,6 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
+ queue_mask |= (1ull << i);
+ }
+
+- kiq_ring->ready = true;
+ r = amdgpu_ring_alloc(kiq_ring, (8 * adev->gfx.num_compute_rings) + 8);
+ if (r) {
+ DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+@@ -4948,26 +4947,33 @@ static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev)
+
+ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
+ {
+- struct amdgpu_ring *ring = NULL;
+- int r = 0, i;
+-
+- gfx_v8_0_cp_compute_enable(adev, true);
++ struct amdgpu_ring *ring;
++ int r;
+
+ ring = &adev->gfx.kiq.ring;
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0))
+- goto done;
++ return r;
+
+ r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
+- if (!r) {
+- r = gfx_v8_0_kiq_init_queue(ring);
+- amdgpu_bo_kunmap(ring->mqd_obj);
+- ring->mqd_ptr = NULL;
+- }
++ if (unlikely(r != 0))
++ return r;
++
++ gfx_v8_0_kiq_init_queue(ring);
++ amdgpu_bo_kunmap(ring->mqd_obj);
++ ring->mqd_ptr = NULL;
+ amdgpu_bo_unreserve(ring->mqd_obj);
+- if (r)
+- goto done;
++ ring->ready = true;
++ return 0;
++}
++
++static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
++{
++ struct amdgpu_ring *ring = NULL;
++ int r = 0, i;
++
++ gfx_v8_0_cp_compute_enable(adev, true);
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+@@ -5023,14 +5029,17 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
+ return r;
+ }
+
+- r = gfx_v8_0_cp_gfx_resume(adev);
++ r = gfx_v8_0_kiq_resume(adev);
+ if (r)
+ return r;
+
+- r = gfx_v8_0_kiq_resume(adev);
++ r = gfx_v8_0_cp_gfx_resume(adev);
+ if (r)
+ return r;
+
++ r = gfx_v8_0_kcq_resume(adev);
++ if (r)
++ return r;
+ gfx_v8_0_enable_gui_idle_interrupt(adev, true);
+
+ return 0;
+@@ -5333,10 +5342,6 @@ static int gfx_v8_0_post_soft_reset(void *handle)
+ srbm_soft_reset = adev->gfx.srbm_soft_reset;
+
+ if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
+- REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
+- gfx_v8_0_cp_gfx_resume(adev);
+-
+- if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
+@@ -5352,7 +5357,13 @@ static int gfx_v8_0_post_soft_reset(void *handle)
+ mutex_unlock(&adev->srbm_mutex);
+ }
+ gfx_v8_0_kiq_resume(adev);
++ gfx_v8_0_kcq_resume(adev);
+ }
++
++ if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
++ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
++ gfx_v8_0_cp_gfx_resume(adev);
++
+ gfx_v8_0_rlc_start(adev);
+
+ return 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5222-drm-amdgpu-Change-kiq-ring-initialize-sequence-on-gf.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5222-drm-amdgpu-Change-kiq-ring-initialize-sequence-on-gf.patch
new file mode 100644
index 00000000..324ecae9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5222-drm-amdgpu-Change-kiq-ring-initialize-sequence-on-gf.patch
@@ -0,0 +1,98 @@
+From 3325a5a805f29438a03f1bd49b261b43868378b9 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Wed, 22 Aug 2018 18:54:45 +0800
+Subject: [PATCH 5222/5725] drm/amdgpu: Change kiq ring initialize sequence on
+ gfx9
+
+1. initialize kiq before initialize gfx ring.
+2. set kiq ring ready immediately when kiq initialize
+ successfully.
+3. split function gfx_v9_0_kiq_resume into two functions.
+ gfx_v9_0_kiq_resume is for kiq initialize.
+ gfx_v9_0_kcq_resume is for kcq initialize.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 38 ++++++++++++++++++++++-------------
+ 1 file changed, 24 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index a581135..fdb0ad4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -2689,7 +2689,6 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
+ queue_mask |= (1ull << i);
+ }
+
+- kiq_ring->ready = true;
+ r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 8);
+ if (r) {
+ DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+@@ -3096,26 +3095,33 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
+
+ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
+ {
+- struct amdgpu_ring *ring = NULL;
+- int r = 0, i;
+-
+- gfx_v9_0_cp_compute_enable(adev, true);
++ struct amdgpu_ring *ring;
++ int r;
+
+ ring = &adev->gfx.kiq.ring;
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0))
+- goto done;
++ return r;
+
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+- if (!r) {
+- r = gfx_v9_0_kiq_init_queue(ring);
+- amdgpu_bo_kunmap(ring->mqd_obj);
+- ring->mqd_ptr = NULL;
+- }
++ if (unlikely(r != 0))
++ return r;
++
++ gfx_v9_0_kiq_init_queue(ring);
++ amdgpu_bo_kunmap(ring->mqd_obj);
++ ring->mqd_ptr = NULL;
+ amdgpu_bo_unreserve(ring->mqd_obj);
+- if (r)
+- goto done;
++ ring->ready = true;
++ return 0;
++}
++
++static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
++{
++ struct amdgpu_ring *ring = NULL;
++ int r = 0, i;
++
++ gfx_v9_0_cp_compute_enable(adev, true);
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+@@ -3158,11 +3164,15 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
+ return r;
+ }
+
++ r = gfx_v9_0_kiq_resume(adev);
++ if (r)
++ return r;
++
+ r = gfx_v9_0_cp_gfx_resume(adev);
+ if (r)
+ return r;
+
+- r = gfx_v9_0_kiq_resume(adev);
++ r = gfx_v9_0_kcq_resume(adev);
+ if (r)
+ return r;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5223-drm-amdgpu-amdgpu_ctx_add_fence-can-t-fail.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5223-drm-amdgpu-amdgpu_ctx_add_fence-can-t-fail.patch
new file mode 100644
index 00000000..3fd21b79
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5223-drm-amdgpu-amdgpu_ctx_add_fence-can-t-fail.patch
@@ -0,0 +1,80 @@
+From cd86c4401e49c5e703ef271f95e37bfa70a5abaa Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 24 Aug 2018 14:23:33 +0200
+Subject: [PATCH 5223/5725] drm/amdgpu: amdgpu_ctx_add_fence can't fail
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+No more waiting for a fence done here.
+
+Change-Id: I69ded731b7a85b0123d6cc05983c56ab5ff8cdcf
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 9 +--------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 4 +---
+ 3 files changed, 3 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 91be1d4..9c59470 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -550,7 +550,7 @@ struct amdgpu_ctx_mgr {
+ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
+ int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
+
+-int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
++void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+ struct dma_fence *fence, uint64_t *seq);
+ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+ struct amdgpu_ring *ring, uint64_t seq);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index fd9fe69..c2a5caa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -1233,14 +1233,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
+ job->owner = p->filp;
+ p->fence = dma_fence_get(&job->base.s_fence->finished);
+
+- r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq);
+- if (r) {
+- dma_fence_put(p->fence);
+- dma_fence_put(&job->base.s_fence->finished);
+- amdgpu_job_free(job);
+- amdgpu_mn_unlock(p->mn);
+- return r;
+- }
++ amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq);
+
+ amdgpu_cs_post_dependencies(p);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+index 38903ea..f04f525 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+@@ -352,7 +352,7 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
+ return 0;
+ }
+
+-int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
++void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+ struct dma_fence *fence, uint64_t* handler)
+ {
+ struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
+@@ -375,8 +375,6 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+ dma_fence_put(other);
+ if (handler)
+ *handler = seq;
+-
+- return 0;
+ }
+
+ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5224-drm-amdgpu-fix-holding-mn_lock-while-allocating-memo.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5224-drm-amdgpu-fix-holding-mn_lock-while-allocating-memo.patch
new file mode 100644
index 00000000..34699660
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5224-drm-amdgpu-fix-holding-mn_lock-while-allocating-memo.patch
@@ -0,0 +1,79 @@
+From 138a4a1b2b33df8f640163553d40a7630984122b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 24 Aug 2018 14:48:02 +0200
+Subject: [PATCH 5224/5725] drm/amdgpu: fix holding mn_lock while allocating
+ memory
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+We can't hold the mn_lock while allocating memory.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Acked-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 31 +++++++++++++++++++------------
+ 1 file changed, 19 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index c2a5caa..f7fa60b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -1210,26 +1210,24 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
+
+ int r;
+
++ job = p->job;
++ p->job = NULL;
++
++ r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
++ if (r)
++ goto error_unlock;
++
++ /* No memory allocation is allowed while holding the mn lock */
+ amdgpu_mn_lock(p->mn);
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ struct amdgpu_bo *bo = e->robj;
+
+ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
+- amdgpu_mn_unlock(p->mn);
+- return -ERESTARTSYS;
++ r = -ERESTARTSYS;
++ goto error_abort;
+ }
+ }
+
+- job = p->job;
+- p->job = NULL;
+-
+- r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
+- if (r) {
+- amdgpu_job_free(job);
+- amdgpu_mn_unlock(p->mn);
+- return r;
+- }
+-
+ job->owner = p->filp;
+ p->fence = dma_fence_get(&job->base.s_fence->finished);
+
+@@ -1254,6 +1252,15 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
+ amdgpu_mn_unlock(p->mn);
+
+ return 0;
++
++error_abort:
++ dma_fence_put(&job->base.s_fence->finished);
++ job->base.s_fence = NULL;
++
++error_unlock:
++ amdgpu_job_free(job);
++ amdgpu_mn_unlock(p->mn);
++ return r;
+ }
+
+ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5225-drm-amdgpu-add-amdgpu_gmc_get_pde_for_bo-helper-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5225-drm-amdgpu-add-amdgpu_gmc_get_pde_for_bo-helper-v2.patch
new file mode 100644
index 00000000..263123ce
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5225-drm-amdgpu-add-amdgpu_gmc_get_pde_for_bo-helper-v2.patch
@@ -0,0 +1,175 @@
+From 909ec6b159a1b1e4e0b589484269e77d29af64c7 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Wed, 22 Aug 2018 14:11:19 +0200
+Subject: [PATCH 5225/5725] drm/amdgpu: add amdgpu_gmc_get_pde_for_bo helper v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Helper to get the PDE for a PD/PT.
+
+v2: improve documentation
+
+Change-Id: I66a29a89be3746201da06ef7bf0d982201206e5d
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 37 +++++++++++++++++++++++++++++++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 2 ++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 23 +++++++++++++++++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 +---
+ 5 files changed, 59 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+index 36058fe..a249931 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+@@ -27,6 +27,38 @@
+ #include "amdgpu.h"
+
+ /**
++ * amdgpu_gmc_get_pde_for_bo - get the PDE for a BO
++ *
++ * @bo: the BO to get the PDE for
++ * @level: the level in the PD hirarchy
++ * @addr: resulting addr
++ * @flags: resulting flags
++ *
++ * Get the address and flags to be used for a PDE (Page Directory Entry).
++ */
++void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
++ uint64_t *addr, uint64_t *flags)
++{
++ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
++ struct ttm_dma_tt *ttm;
++
++ switch (bo->tbo.mem.mem_type) {
++ case TTM_PL_TT:
++ ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
++ *addr = ttm->dma_address[0];
++ break;
++ case TTM_PL_VRAM:
++ *addr = amdgpu_bo_gpu_offset(bo);
++ break;
++ default:
++ *addr = 0;
++ break;
++ }
++ *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, &bo->tbo.mem);
++ amdgpu_gmc_get_vm_pde(adev, level, addr, flags);
++}
++
++/**
+ * amdgpu_gmc_pd_addr - return the address of the root directory
+ *
+ */
+@@ -35,13 +67,14 @@ uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo)
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ uint64_t pd_addr;
+
+- pd_addr = amdgpu_bo_gpu_offset(bo);
+ /* TODO: move that into ASIC specific code */
+ if (adev->asic_type >= CHIP_VEGA10) {
+ uint64_t flags = AMDGPU_PTE_VALID;
+
+- amdgpu_gmc_get_vm_pde(adev, -1, &pd_addr, &flags);
++ amdgpu_gmc_get_pde_for_bo(bo, -1, &pd_addr, &flags);
+ pd_addr |= flags;
++ } else {
++ pd_addr = amdgpu_bo_gpu_offset(bo);
+ }
+ return pd_addr;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+index f347ba9..aa0502e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+@@ -134,6 +134,8 @@ static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc)
+ return (gmc->real_vram_size == gmc->visible_vram_size);
+ }
+
++void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
++ uint64_t *addr, uint64_t *flags);
+ uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo);
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index f1d9fe3..027cff2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -1450,13 +1450,14 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
+ }
+
+ /**
+- * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
++ * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
+ *
+ * @ttm: The ttm_tt object to compute the flags for
+ * @mem: The memory registry backing this ttm_tt object
++ *
++ * Figure out the flags to use for a VM PDE (Page Directory Entry).
+ */
+-uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
+- struct ttm_mem_reg *mem)
++uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
+ {
+ uint64_t flags = 0;
+
+@@ -1473,6 +1474,22 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
+ if (mem && mem->mem_type == AMDGPU_PL_DGMA_IMPORT)
+ flags |= AMDGPU_PTE_SYSTEM;
+
++ return flags;
++}
++
++/**
++ * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
++ *
++ * @ttm: The ttm_tt object to compute the flags for
++ * @mem: The memory registry backing this ttm_tt object
++
++ * Figure out the flags to use for a VM PTE (Page Table Entry).
++ */
++uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
++ struct ttm_mem_reg *mem)
++{
++ uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
++
+ flags |= adev->gart.gart_pte_flags;
+ flags |= AMDGPU_PTE_READABLE;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+index 66251b6..ef0f86f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+@@ -120,6 +120,7 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
+ int *last_invalidated);
+ bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm);
+ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
++uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem);
+ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
+ struct ttm_mem_reg *mem);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 516074d..4b6d0cc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -970,9 +970,7 @@ static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
+ pbo = pbo->parent;
+
+ level += params->adev->vm_manager.root_level;
+- pt = amdgpu_bo_gpu_offset(entry->base.bo);
+- flags = AMDGPU_PTE_VALID;
+- amdgpu_gmc_get_vm_pde(params->adev, level, &pt, &flags);
++ amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
+ pde = (entry - parent->entries) * 8;
+ if (bo->shadow)
+ params->func(params, bo->shadow, pde, pt, 1, 0, flags);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5226-drm-amdgpu-enable-GTT-PD-PT-for-raven-v3.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5226-drm-amdgpu-enable-GTT-PD-PT-for-raven-v3.patch
new file mode 100644
index 00000000..925961c0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5226-drm-amdgpu-enable-GTT-PD-PT-for-raven-v3.patch
@@ -0,0 +1,90 @@
+From dae2ee6b39e11dbf47f644c4c8cca66373485f6b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Wed, 22 Aug 2018 16:44:56 +0200
+Subject: [PATCH 5226/5725] drm/amdgpu: enable GTT PD/PT for raven v3
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Should work on Vega10 as well, but with an obvious performance hit.
+
+Older APUs can be enabled as well, but will probably be more work.
+
+v2: fix error checking
+v3: use more general check
+
+Change-Id: I5194cf3cb267032198d5f16c0be95e39838515b7
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Acked-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 38 ++++++++++++++++++++++++++++++++++
+ 1 file changed, 38 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 4b6d0cc..c5c2d6d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -308,6 +308,9 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ list_move(&bo_base->vm_status, &vm->moved);
+ spin_unlock(&vm->moved_lock);
+ } else {
++ r = amdgpu_ttm_alloc_gart(&bo->tbo);
++ if (r)
++ break;
+ list_move(&bo_base->vm_status, &vm->relocated);
+ }
+ }
+@@ -384,6 +387,10 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
+ if (r)
+ goto error;
+
++ r = amdgpu_ttm_alloc_gart(&bo->tbo);
++ if (r)
++ return r;
++
+ r = amdgpu_job_alloc_with_ib(adev, 64, &job);
+ if (r)
+ goto error;
+@@ -434,6 +441,37 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
+ }
+
+ /**
++ * amdgpu_vm_bo_param - fill in parameters for PD/PT allocation
++ *
++ * @adev: amdgpu_device pointer
++ * @vm: requesting vm
++ * @bp: resulting BO allocation parameters
++ */
++static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
++ int level, struct amdgpu_bo_param *bp)
++{
++ memset(bp, 0, sizeof(*bp));
++
++ bp->size = amdgpu_vm_bo_size(adev, level);
++ bp->byte_align = AMDGPU_GPU_PAGE_SIZE;
++ bp->domain = AMDGPU_GEM_DOMAIN_VRAM;
++ if (bp->size <= PAGE_SIZE && adev->asic_type >= CHIP_VEGA10 &&
++ adev->flags & AMD_IS_APU)
++ bp->domain |= AMDGPU_GEM_DOMAIN_GTT;
++ bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
++ bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
++ AMDGPU_GEM_CREATE_CPU_GTT_USWC;
++ if (vm->use_cpu_for_update)
++ bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
++ else
++ bp->flags |= AMDGPU_GEM_CREATE_SHADOW |
++ AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
++ bp->type = ttm_bo_type_kernel;
++ if (vm->root.base.bo)
++ bp->resv = vm->root.base.bo->tbo.resv;
++}
++
++/**
+ * amdgpu_vm_alloc_levels - allocate the PD/PT levels
+ *
+ * @adev: amdgpu_device pointer
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5227-drm-amdgpu-Refine-gmc9-VM-fault-print.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5227-drm-amdgpu-Refine-gmc9-VM-fault-print.patch
new file mode 100644
index 00000000..2be6e7cd
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5227-drm-amdgpu-Refine-gmc9-VM-fault-print.patch
@@ -0,0 +1,36 @@
+From 3e9b16e776ee6f6698042c01a04f3685df075945 Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Mon, 27 Aug 2018 14:17:26 -0400
+Subject: [PATCH 5227/5725] drm/amdgpu: Refine gmc9 VM fault print.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The fault reports the page number where the fault happend and not
+the exact faulty address. Update the print message to reflect that.
+
+Change-Id: Ib94e5424a63f135d2e8a972b0aa46ec05ddfcbe4
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Marek Olšák <marek.olsak@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index a2625e2..9e8b52c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -269,7 +269,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
+ entry->src_id, entry->ring_id, entry->vmid,
+ entry->pasid, task_info.process_name, task_info.tgid,
+ task_info.task_name, task_info.pid);
+- dev_err(adev->dev, " at address 0x%016llx from %d\n",
++ dev_err(adev->dev, " in page starting at address 0x%016llx from %d\n",
+ addr, entry->client_id);
+ if (!amdgpu_sriov_vf(adev))
+ dev_err(adev->dev,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5228-drm-amdgpu-remove-extra-newline-when-printing-VM-fau.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5228-drm-amdgpu-remove-extra-newline-when-printing-VM-fau.patch
new file mode 100644
index 00000000..22c18d75
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5228-drm-amdgpu-remove-extra-newline-when-printing-VM-fau.patch
@@ -0,0 +1,36 @@
+From e38eca9b9a0401fb51f533c9830b9b671ae94085 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 27 Aug 2018 15:43:37 +0200
+Subject: [PATCH 5228/5725] drm/amdgpu: remove extra newline when printing VM
+ faults
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Looks like a copy&paste error to me.
+
+Change-Id: Ie9364e688d2adc8b76e4bcd46616bcad84f79d59
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 9e8b52c..bc815ebe 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -264,7 +264,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
+ amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
+
+ dev_err(adev->dev,
+- "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d\n)\n",
++ "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n",
+ entry->vmid_src ? "mmhub" : "gfxhub",
+ entry->src_id, entry->ring_id, entry->vmid,
+ entry->pasid, task_info.process_name, task_info.tgid,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5229-drm-amdgpu-move-full-access-into-amdgpu_device_ip_su.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5229-drm-amdgpu-move-full-access-into-amdgpu_device_ip_su.patch
new file mode 100644
index 00000000..065041d6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5229-drm-amdgpu-move-full-access-into-amdgpu_device_ip_su.patch
@@ -0,0 +1,63 @@
+From 09063ee0ba4aab71b9fd758ab08cd3a5a15142af Mon Sep 17 00:00:00 2001
+From: Yintian Tao <yttao@amd.com>
+Date: Wed, 22 Aug 2018 17:08:13 +0800
+Subject: [PATCH 5229/5725] drm/amdgpu: move full access into
+ amdgpu_device_ip_suspend
+
+It will be more safe to make full-acess include both phase1 and phase2.
+Then accessing special registeris wherever at phase1 or phase2 will not
+block any shutdown and suspend process under virtualization.
+
+Change-Id: I118925ae2d1aca54e53bd770b6e10ac4763e0c0f
+Signed-off-by: Yintian Tao <yttao@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 67adfb4..e850cde 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1981,9 +1981,6 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
+ {
+ int i, r;
+
+- if (amdgpu_sriov_vf(adev))
+- amdgpu_virt_request_full_gpu(adev, false);
+-
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+
+@@ -2002,9 +1999,6 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
+ }
+ }
+
+- if (amdgpu_sriov_vf(adev))
+- amdgpu_virt_release_full_gpu(adev, false);
+-
+ return 0;
+ }
+
+@@ -2056,11 +2050,17 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
+ {
+ int r;
+
++ if (amdgpu_sriov_vf(adev))
++ amdgpu_virt_request_full_gpu(adev, false);
++
+ r = amdgpu_device_ip_suspend_phase1(adev);
+ if (r)
+ return r;
+ r = amdgpu_device_ip_suspend_phase2(adev);
+
++ if (amdgpu_sriov_vf(adev))
++ amdgpu_virt_release_full_gpu(adev, false);
++
+ return r;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5230-drm-amdgpu-Need-to-set-moved-to-true-when-evict-bo.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5230-drm-amdgpu-Need-to-set-moved-to-true-when-evict-bo.patch
new file mode 100644
index 00000000..7259cb0b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5230-drm-amdgpu-Need-to-set-moved-to-true-when-evict-bo.patch
@@ -0,0 +1,46 @@
+From 734205614e657e5627ba93aa9501f4b60e5534ca Mon Sep 17 00:00:00 2001
+From: Emily Deng <Emily.Deng@amd.com>
+Date: Tue, 28 Aug 2018 20:52:40 +0800
+Subject: [PATCH 5230/5725] drm/amdgpu: Need to set moved to true when evict bo
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Fix the VMC page fault when the running sequence is as below:
+1.amdgpu_gem_create_ioctl
+2.ttm_bo_swapout->amdgpu_vm_bo_invalidate, as not called
+amdgpu_vm_bo_base_init, so won't called
+list_add_tail(&base->bo_list, &bo->va). Even the bo was evicted,
+it won't set the bo_base->moved.
+3.drm_gem_open_ioctl->amdgpu_vm_bo_base_init, here only called
+list_move_tail(&base->vm_status, &vm->evicted), but not set the
+bo_base->moved.
+4.amdgpu_vm_bo_map->amdgpu_vm_bo_insert_map, as the bo_base->moved is
+not set true, the function amdgpu_vm_bo_insert_map will call
+list_move(&bo_va->base.vm_status, &vm->moved)
+5.amdgpu_cs_ioctl won't validate the swapout bo, as it is only in the
+moved list, not in the evict list. So VMC page fault occurs.
+
+Change-Id: I67e6f90ea7c8e1252c5571b35ac7993dffed4e87
+Signed-off-by: Emily Deng <Emily.Deng@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index c5c2d6d..d23eeb3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -172,6 +172,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
+ * is validated on next vm use to avoid fault.
+ * */
+ list_move_tail(&base->vm_status, &vm->evicted);
++ base->moved = true;
+ }
+
+ /**
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5231-drm-amdgpu-remove-amdgpu_bo_gpu_accessible.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5231-drm-amdgpu-remove-amdgpu_bo_gpu_accessible.patch
new file mode 100644
index 00000000..ce342e3f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5231-drm-amdgpu-remove-amdgpu_bo_gpu_accessible.patch
@@ -0,0 +1,52 @@
+From 1067db605e056a9cf494cb57c4ebc9387c897cab Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Tue, 28 Aug 2018 13:44:32 +0200
+Subject: [PATCH 5231/5725] drm/amdgpu: remove amdgpu_bo_gpu_accessible
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Not used any more.
+
+Change-Id: I7330fe09c523fccad0668eade97a4a51b796b825
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 18 ------------------
+ 1 file changed, 18 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+index ae4d06c..bd953bf 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+@@ -197,24 +197,6 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
+ }
+
+ /**
+- * amdgpu_bo_gpu_accessible - return whether the bo is currently in memory that
+- * is accessible to the GPU.
+- */
+-static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
+-{
+- switch (bo->tbo.mem.mem_type) {
+- case TTM_PL_TT: return amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem);
+-
+- case TTM_PL_VRAM:
+- case AMDGPU_PL_DGMA:
+- case AMDGPU_PL_DGMA_IMPORT:
+- return true;
+-
+- default: return false;
+- }
+-}
+-
+-/**
+ * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
+ */
+ static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5232-drm-amdgpu-move-amdgpu_device_-vram-gtt-_location.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5232-drm-amdgpu-move-amdgpu_device_-vram-gtt-_location.patch
new file mode 100644
index 00000000..f7cd77cc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5232-drm-amdgpu-move-amdgpu_device_-vram-gtt-_location.patch
@@ -0,0 +1,268 @@
+From 3f8b4df452587400e84c1decc0d85e6a5d3ec260 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Thu, 23 Aug 2018 15:20:43 +0200
+Subject: [PATCH 5232/5725] drm/amdgpu: move amdgpu_device_(vram|gtt)_location
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Move that into amdgpu_gmc.c since we are really deadling with GMC
+address space here.
+
+Change-Id: Ifbd4646af85e9452a5f06b3af5dc50c30a3002fd
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 --
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 65 ------------------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 64 +++++++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 4 ++
+ drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 4 +-
+ drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 4 +-
+ drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 4 +-
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 4 +-
+ 8 files changed, 76 insertions(+), 77 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 9c59470..64b3990 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1294,10 +1294,6 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev);
+
+ void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
+ u64 num_vis_bytes);
+-void amdgpu_device_vram_location(struct amdgpu_device *adev,
+- struct amdgpu_gmc *mc, u64 base);
+-void amdgpu_device_gart_location(struct amdgpu_device *adev,
+- struct amdgpu_gmc *mc);
+ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev);
+ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
+ const u32 *registers,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index e850cde..02d9e4b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -652,71 +652,6 @@ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
+ }
+
+ /**
+- * amdgpu_device_vram_location - try to find VRAM location
+- *
+- * @adev: amdgpu device structure holding all necessary informations
+- * @mc: memory controller structure holding memory informations
+- * @base: base address at which to put VRAM
+- *
+- * Function will try to place VRAM at base address provided
+- * as parameter.
+- */
+-void amdgpu_device_vram_location(struct amdgpu_device *adev,
+- struct amdgpu_gmc *mc, u64 base)
+-{
+- uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
+-
+- mc->vram_start = base;
+- mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+- if (limit && limit < mc->real_vram_size)
+- mc->real_vram_size = limit;
+- dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
+- mc->mc_vram_size >> 20, mc->vram_start,
+- mc->vram_end, mc->real_vram_size >> 20);
+-}
+-
+-/**
+- * amdgpu_device_gart_location - try to find GART location
+- *
+- * @adev: amdgpu device structure holding all necessary informations
+- * @mc: memory controller structure holding memory informations
+- *
+- * Function will place try to place GART before or after VRAM.
+- *
+- * If GART size is bigger than space left then we ajust GART size.
+- * Thus function will never fails.
+- */
+-void amdgpu_device_gart_location(struct amdgpu_device *adev,
+- struct amdgpu_gmc *mc)
+-{
+- u64 size_af, size_bf;
+-
+- mc->gart_size += adev->pm.smu_prv_buffer_size;
+-
+- size_af = adev->gmc.mc_mask - mc->vram_end;
+- size_bf = mc->vram_start;
+- if (size_bf > size_af) {
+- if (mc->gart_size > size_bf) {
+- dev_warn(adev->dev, "limiting GART\n");
+- mc->gart_size = size_bf;
+- }
+- mc->gart_start = 0;
+- } else {
+- if (mc->gart_size > size_af) {
+- dev_warn(adev->dev, "limiting GART\n");
+- mc->gart_size = size_af;
+- }
+- /* VCE doesn't like it when BOs cross a 4GB segment, so align
+- * the GART base on a 4GB boundary as well.
+- */
+- mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
+- }
+- mc->gart_end = mc->gart_start + mc->gart_size - 1;
+- dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
+- mc->gart_size >> 20, mc->gart_start, mc->gart_end);
+-}
+-
+-/**
+ * amdgpu_device_resize_fb_bar - try to resize FB BAR
+ *
+ * @adev: amdgpu_device pointer
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+index a249931..72dffa3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+@@ -78,3 +78,67 @@ uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo)
+ }
+ return pd_addr;
+ }
++
++/**
++ * amdgpu_gmc_vram_location - try to find VRAM location
++ *
++ * @adev: amdgpu device structure holding all necessary informations
++ * @mc: memory controller structure holding memory informations
++ * @base: base address at which to put VRAM
++ *
++ * Function will try to place VRAM at base address provided
++ * as parameter.
++ */
++void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
++ u64 base)
++{
++ uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
++
++ mc->vram_start = base;
++ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
++ if (limit && limit < mc->real_vram_size)
++ mc->real_vram_size = limit;
++ dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
++ mc->mc_vram_size >> 20, mc->vram_start,
++ mc->vram_end, mc->real_vram_size >> 20);
++}
++
++/**
++ * amdgpu_gmc_gart_location - try to find GART location
++ *
++ * @adev: amdgpu device structure holding all necessary informations
++ * @mc: memory controller structure holding memory informations
++ *
++ * Function will place try to place GART before or after VRAM.
++ *
++ * If GART size is bigger than space left then we ajust GART size.
++ * Thus function will never fails.
++ */
++void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
++{
++ u64 size_af, size_bf;
++
++ mc->gart_size += adev->pm.smu_prv_buffer_size;
++
++ size_af = adev->gmc.mc_mask - mc->vram_end;
++ size_bf = mc->vram_start;
++ if (size_bf > size_af) {
++ if (mc->gart_size > size_bf) {
++ dev_warn(adev->dev, "limiting GART\n");
++ mc->gart_size = size_bf;
++ }
++ mc->gart_start = 0;
++ } else {
++ if (mc->gart_size > size_af) {
++ dev_warn(adev->dev, "limiting GART\n");
++ mc->gart_size = size_af;
++ }
++ /* VCE doesn't like it when BOs cross a 4GB segment, so align
++ * the GART base on a 4GB boundary as well.
++ */
++ mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
++ }
++ mc->gart_end = mc->gart_start + mc->gart_size - 1;
++ dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
++ mc->gart_size >> 20, mc->gart_start, mc->gart_end);
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+index aa0502e..81bb310 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+@@ -137,5 +137,9 @@ static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc)
+ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
+ uint64_t *addr, uint64_t *flags);
+ uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo);
++void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
++ u64 base);
++void amdgpu_gmc_gart_location(struct amdgpu_device *adev,
++ struct amdgpu_gmc *mc);
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+index d13110f..3911c52 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+@@ -223,8 +223,8 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
+ u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
+ base <<= 24;
+
+- amdgpu_device_vram_location(adev, &adev->gmc, base);
+- amdgpu_device_gart_location(adev, mc);
++ amdgpu_gmc_vram_location(adev, &adev->gmc, base);
++ amdgpu_gmc_gart_location(adev, mc);
+ }
+
+ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+index 725de42..35fb090 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+@@ -241,8 +241,8 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
+ u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
+ base <<= 24;
+
+- amdgpu_device_vram_location(adev, &adev->gmc, base);
+- amdgpu_device_gart_location(adev, mc);
++ amdgpu_gmc_vram_location(adev, &adev->gmc, base);
++ amdgpu_gmc_gart_location(adev, mc);
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index 7006eb4..bd3b859 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -410,8 +410,8 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
+ base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
+ base <<= 24;
+
+- amdgpu_device_vram_location(adev, &adev->gmc, base);
+- amdgpu_device_gart_location(adev, mc);
++ amdgpu_gmc_vram_location(adev, &adev->gmc, base);
++ amdgpu_gmc_gart_location(adev, mc);
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index bc815ebe..2ee8a84 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -748,8 +748,8 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
+ u64 base = 0;
+ if (!amdgpu_sriov_vf(adev))
+ base = mmhub_v1_0_get_fb_location(adev);
+- amdgpu_device_vram_location(adev, &adev->gmc, base);
+- amdgpu_device_gart_location(adev, mc);
++ amdgpu_gmc_vram_location(adev, &adev->gmc, base);
++ amdgpu_gmc_gart_location(adev, mc);
+ /* base offset of vram pages */
+ adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5233-drm-amdgpu-fix-amdgpu_gmc_gart_location-a-little-bit.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5233-drm-amdgpu-fix-amdgpu_gmc_gart_location-a-little-bit.patch
new file mode 100644
index 00000000..b9dbf594
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5233-drm-amdgpu-fix-amdgpu_gmc_gart_location-a-little-bit.patch
@@ -0,0 +1,65 @@
+From 66657df1f779e91fc7b033e0f6c7b390463d4036 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Thu, 23 Aug 2018 20:38:52 +0200
+Subject: [PATCH 5233/5725] drm/amdgpu: fix amdgpu_gmc_gart_location a little
+ bit
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Improve the VCE limitation handling.
+
+Change-Id: Ic9cada48d0bcab353cddf1145cd423c9c53a8a31
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 28 +++++++++++++---------------
+ 1 file changed, 13 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+index 72dffa3..8269197 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+@@ -120,24 +120,22 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
+
+ mc->gart_size += adev->pm.smu_prv_buffer_size;
+
+- size_af = adev->gmc.mc_mask - mc->vram_end;
++ /* VCE doesn't like it when BOs cross a 4GB segment, so align
++ * the GART base on a 4GB boundary as well.
++ */
+ size_bf = mc->vram_start;
+- if (size_bf > size_af) {
+- if (mc->gart_size > size_bf) {
+- dev_warn(adev->dev, "limiting GART\n");
+- mc->gart_size = size_bf;
+- }
++ size_af = adev->gmc.mc_mask + 1 -
++ ALIGN(mc->vram_end + 1, 0x100000000ULL);
++
++ if (mc->gart_size > max(size_bf, size_af)) {
++ dev_warn(adev->dev, "limiting GART\n");
++ mc->gart_size = max(size_bf, size_af);
++ }
++
++ if (size_bf > size_af)
+ mc->gart_start = 0;
+- } else {
+- if (mc->gart_size > size_af) {
+- dev_warn(adev->dev, "limiting GART\n");
+- mc->gart_size = size_af;
+- }
+- /* VCE doesn't like it when BOs cross a 4GB segment, so align
+- * the GART base on a 4GB boundary as well.
+- */
++ else
+ mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
+- }
+ mc->gart_end = mc->gart_start + mc->gart_size - 1;
+ dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
+ mc->gart_size >> 20, mc->gart_start, mc->gart_end);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5234-drm-amdgpu-stop-using-gart_start-as-offset-for-the-G.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5234-drm-amdgpu-stop-using-gart_start-as-offset-for-the-G.patch
new file mode 100644
index 00000000..baa70f76
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5234-drm-amdgpu-stop-using-gart_start-as-offset-for-the-G.patch
@@ -0,0 +1,70 @@
+From 95e944f03d4a6369c4372d01321cbc6d7782a1eb Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 27 Aug 2018 13:12:19 +0200
+Subject: [PATCH 5234/5725] drm/amdgpu: stop using gart_start as offset for the
+ GTT domain
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Further separate GART and GTT domain.
+
+Change-Id: Ib7722970d70b2c402b2973b65fe497d7f87cfae3
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 3 ++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 6 +++---
+ 2 files changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+index ee4b908..9d1d2fd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+@@ -144,7 +144,8 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
+ spin_unlock(&mgr->lock);
+
+ if (!r)
+- mem->start = node->node.start;
++ mem->start = node->node.start +
++ (adev->gmc.gart_start >> PAGE_SHIFT);
+
+ return r;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 027cff2..7053715 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -188,7 +188,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ case TTM_PL_TT:
+ /* GTT memory */
+ man->func = &amdgpu_gtt_mgr_func;
+- man->gpu_offset = adev->gmc.gart_start;
++ man->gpu_offset = 0;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
+@@ -1097,7 +1097,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
+ flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
+
+ /* bind pages into GART page tables */
+- gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
++ gtt->offset = ((u64)bo_mem->start << PAGE_SHIFT) - adev->gmc.gart_start;
+ r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
+ ttm->pages, gtt->ttm.dma_address, flags);
+
+@@ -1146,7 +1146,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
+ flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
+
+ /* Bind pages */
+- gtt->offset = (u64)tmp.start << PAGE_SHIFT;
++ gtt->offset = ((u64)tmp.start << PAGE_SHIFT) - adev->gmc.gart_start;
+ r = amdgpu_ttm_gart_bind(adev, bo, flags);
+ if (unlikely(r)) {
+ ttm_bo_mem_put(bo, &tmp);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5235-drm-amdgpu-distinct-between-allocated-GART-space-and.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5235-drm-amdgpu-distinct-between-allocated-GART-space-and.patch
new file mode 100644
index 00000000..59dfb189
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5235-drm-amdgpu-distinct-between-allocated-GART-space-and.patch
@@ -0,0 +1,84 @@
+From 9afc3df3cff3343b3d9a7705fe84b85f8b1f3163 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 27 Aug 2018 13:51:27 +0200
+Subject: [PATCH 5235/5725] drm/amdgpu: distinct between allocated GART space
+ and GMC addr
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Most of the time we only need to know if the BO has a valid GMC addr.
+
+Change-Id: I3864f3f5c1cbdd12db06445e1597ccab1e7d5fad
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 --
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 13 +++++--------
+ 2 files changed, 5 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 6f86f44..a4092d9 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -1403,8 +1403,6 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
+ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
+ {
+ WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
+- WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
+- !amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem));
+ WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
+ !bo->pin_count);
+ WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 7053715..0916667 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -365,7 +365,7 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
+ {
+ uint64_t addr = 0;
+
+- if (mem->mem_type != TTM_PL_TT || amdgpu_gtt_mgr_has_gart_addr(mem)) {
++ if (mm_node->start != AMDGPU_BO_INVALID_OFFSET) {
+ addr = mm_node->start << PAGE_SHIFT;
+ addr += bo->bdev->man[mem->mem_type].gpu_offset;
+ }
+@@ -453,8 +453,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
+ /* Map only what needs to be accessed. Map src to window 0 and
+ * dst to window 1
+ */
+- if (src->mem->mem_type == TTM_PL_TT &&
+- !amdgpu_gtt_mgr_has_gart_addr(src->mem)) {
++ if (src->mem->start == AMDGPU_BO_INVALID_OFFSET) {
+ r = amdgpu_map_buffer(src->bo, src->mem,
+ PFN_UP(cur_size + src_page_offset),
+ src_node_start, 0, ring,
+@@ -467,8 +466,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
+ from += src_page_offset;
+ }
+
+- if (dst->mem->mem_type == TTM_PL_TT &&
+- !amdgpu_gtt_mgr_has_gart_addr(dst->mem)) {
++ if (dst->mem->start == AMDGPU_BO_INVALID_OFFSET) {
+ r = amdgpu_map_buffer(dst->bo, dst->mem,
+ PFN_UP(cur_size + dst_page_offset),
+ dst_node_start, 1, ring,
+@@ -1122,11 +1120,10 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
+ uint64_t flags;
+ int r;
+
+- if (bo->mem.mem_type != TTM_PL_TT ||
+- amdgpu_gtt_mgr_has_gart_addr(&bo->mem))
++ if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
+ return 0;
+
+- /* allocate GTT space */
++ /* allocate GART space */
+ tmp = bo->mem;
+ tmp.mm_node = NULL;
+ placement.num_placement = 1;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5236-drm-amdgpu-use-the-smaller-hole-for-GART.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5236-drm-amdgpu-use-the-smaller-hole-for-GART.patch
new file mode 100644
index 00000000..84f0d704
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5236-drm-amdgpu-use-the-smaller-hole-for-GART.patch
@@ -0,0 +1,37 @@
+From 640f3fccc447cf0f5797e5ca837af0042723e76b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 24 Aug 2018 09:40:10 +0200
+Subject: [PATCH 5236/5725] drm/amdgpu: use the smaller hole for GART
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Instead of the larger one use the smaller hole in the MC address
+space for the GART mappings.
+
+Change-Id: Ibb5d15772105b5fc52010825f85badc7abd4d12c
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+index 8269197..265ec68 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+@@ -132,7 +132,8 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
+ mc->gart_size = max(size_bf, size_af);
+ }
+
+- if (size_bf > size_af)
++ if ((size_bf >= mc->gart_size && size_bf < size_af) ||
++ (size_af < mc->gart_size))
+ mc->gart_start = 0;
+ else
+ mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5237-drm-amdgpu-remove-redundant-memset.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5237-drm-amdgpu-remove-redundant-memset.patch
new file mode 100644
index 00000000..5298a83b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5237-drm-amdgpu-remove-redundant-memset.patch
@@ -0,0 +1,37 @@
+From 298fa1bac2cdac7abdf0aa29cdf98b4068d43ce7 Mon Sep 17 00:00:00 2001
+From: Philip Yang <Philip.Yang@amd.com>
+Date: Wed, 29 Aug 2018 10:53:23 -0400
+Subject: [PATCH 5237/5725] drm/amdgpu: remove redundant memset
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+kvmalloc_array uses __GFP_ZERO flag ensures that the returned address
+is zeroed already, memset it to zero again afterwards is unnecessary,
+and in this case buggy because we only clear the first entry.
+
+Change-Id: I45ec0934143e2eab62b4d6c6aa8cbd7b998ee30a
+Signed-off-by: Philip Yang <Philip.Yang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index d23eeb3..b50e18f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -507,7 +507,6 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!parent->entries)
+ return -ENOMEM;
+- memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
+ }
+
+ from = saddr >> shift;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5238-drm-amdgpu-add-missing-CHIP_HAINAN-in-amdgpu_ucode_g.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5238-drm-amdgpu-add-missing-CHIP_HAINAN-in-amdgpu_ucode_g.patch
new file mode 100644
index 00000000..0d16f99b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5238-drm-amdgpu-add-missing-CHIP_HAINAN-in-amdgpu_ucode_g.patch
@@ -0,0 +1,35 @@
+From 71f5c896f2e1dff3d92b28ac805dce0f99743079 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 28 Aug 2018 14:16:23 -0500
+Subject: [PATCH 5238/5725] drm/amdgpu: add missing CHIP_HAINAN in
+ amdgpu_ucode_get_load_type
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This caused a confusing error message, but there is functionally
+no problem since the default method is DIRECT.
+
+Change-Id: I8703584ad0d1e5e4144bad7351d01068c9c3c6a4
+Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+index 63e2996..8777dad 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+@@ -277,6 +277,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
++ case CHIP_HAINAN:
+ return AMDGPU_FW_LOAD_DIRECT;
+ #endif
+ #ifdef CONFIG_DRM_AMDGPU_CIK
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5239-drm-amdgpu-put-GART-away-from-VRAM-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5239-drm-amdgpu-put-GART-away-from-VRAM-v2.patch
new file mode 100644
index 00000000..ec797c6a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5239-drm-amdgpu-put-GART-away-from-VRAM-v2.patch
@@ -0,0 +1,56 @@
+From 9b64c81216570451e37d6b3787101fc12c06b76b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 24 Aug 2018 10:48:12 +0200
+Subject: [PATCH 5239/5725] drm/amdgpu: put GART away from VRAM v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Always try to put the GART away from where VRAM is.
+
+v2: correctly handle the 4GB limitation
+
+Change-Id: I9be1a460283753a976ba4489fdcff4f131384555
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+index 265ec68..c6bcc47 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+@@ -116,6 +116,7 @@ void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
+ */
+ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
+ {
++ const uint64_t four_gb = 0x100000000ULL;
+ u64 size_af, size_bf;
+
+ mc->gart_size += adev->pm.smu_prv_buffer_size;
+@@ -124,8 +125,7 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
+ * the GART base on a 4GB boundary as well.
+ */
+ size_bf = mc->vram_start;
+- size_af = adev->gmc.mc_mask + 1 -
+- ALIGN(mc->vram_end + 1, 0x100000000ULL);
++ size_af = adev->gmc.mc_mask + 1 - ALIGN(mc->vram_end + 1, four_gb);
+
+ if (mc->gart_size > max(size_bf, size_af)) {
+ dev_warn(adev->dev, "limiting GART\n");
+@@ -136,7 +136,9 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
+ (size_af < mc->gart_size))
+ mc->gart_start = 0;
+ else
+- mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
++ mc->gart_start = mc->mc_mask - mc->gart_size + 1;
++
++ mc->gart_start &= four_gb - 1;
+ mc->gart_end = mc->gart_start + mc->gart_size - 1;
+ dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
+ mc->gart_size >> 20, mc->gart_start, mc->gart_end);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5240-drm-amdgpu-Revert-kmap-PDs-PTs-in-amdgpu_vm_update_d.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5240-drm-amdgpu-Revert-kmap-PDs-PTs-in-amdgpu_vm_update_d.patch
new file mode 100644
index 00000000..cd15a3b4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5240-drm-amdgpu-Revert-kmap-PDs-PTs-in-amdgpu_vm_update_d.patch
@@ -0,0 +1,56 @@
+From 2e1e02c45fc5d002c7efff09b8a9653ff7741164 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Thu, 30 Aug 2018 09:45:07 +0200
+Subject: [PATCH 5240/5725] drm/amdgpu: Revert "kmap PDs/PTs in
+ amdgpu_vm_update_directories"
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This reverts commit a7f91061c60ad9cac2e6a03b642be6a4f88b3662.
+
+Felix pointed out that we need to have the BOs mapped even before
+amdgpu_vm_update_directories is called.
+
+Change-Id: Ibb904d980e7f8e41d8f9e00064fa24f1c7b1a82c
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Acked-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 13 ++++---------
+ 1 file changed, 4 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index b50e18f..99ef1e9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -309,7 +309,10 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ list_move(&bo_base->vm_status, &vm->moved);
+ spin_unlock(&vm->moved_lock);
+ } else {
+- r = amdgpu_ttm_alloc_gart(&bo->tbo);
++ if (vm->use_cpu_for_update)
++ r = amdgpu_bo_kmap(bo, NULL);
++ else
++ r = amdgpu_ttm_alloc_gart(&bo->tbo);
+ if (r)
+ break;
+ list_move(&bo_base->vm_status, &vm->relocated);
+@@ -1076,14 +1079,6 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
+ params.adev = adev;
+
+ if (vm->use_cpu_for_update) {
+- struct amdgpu_vm_bo_base *bo_base;
+-
+- list_for_each_entry(bo_base, &vm->relocated, vm_status) {
+- r = amdgpu_bo_kmap(bo_base->bo, NULL);
+- if (unlikely(r))
+- return r;
+- }
+-
+ r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
+ if (unlikely(r))
+ return r;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5241-drm-amdgpu-gmc9-rework-stolen-vga-memory-handling.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5241-drm-amdgpu-gmc9-rework-stolen-vga-memory-handling.patch
new file mode 100644
index 00000000..efe8917a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5241-drm-amdgpu-gmc9-rework-stolen-vga-memory-handling.patch
@@ -0,0 +1,127 @@
+From b795b8498211c7933b0e9fbd099fa0f8dd2b4e52 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 30 Aug 2018 09:31:56 -0500
+Subject: [PATCH 5241/5725] drm/amdgpu/gmc9: rework stolen vga memory handling
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+No functional change, just rework it in order to adjust the
+behavior on a per asic level. The problem is that on vega10,
+something corrupts the lower 8 MB of vram on the second
+resume from S3. This does not seem to affect Raven, other
+gmc9 based asics need testing.
+
+Change-Id: Ib82827aa5a48ee5829b870d98ae3b733ca822e9a
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 48 +++++++++++++++++++++--------------
+ 1 file changed, 29 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 2ee8a84..25c33e8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -691,6 +691,28 @@ static int gmc_v9_0_ecc_available(struct amdgpu_device *adev)
+ return lost_sheep == 0;
+ }
+
++static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
++{
++
++ /*
++ * TODO:
++ * Currently there is a bug where some memory client outside
++ * of the driver writes to first 8M of VRAM on S3 resume,
++ * this overrides GART which by default gets placed in first 8M and
++ * causes VM_FAULTS once GTT is accessed.
++ * Keep the stolen memory reservation until the while this is not solved.
++ * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
++ */
++ switch (adev->asic_type) {
++ case CHIP_RAVEN:
++ case CHIP_VEGA10:
++ case CHIP_VEGA12:
++ case CHIP_VEGA20:
++ default:
++ return true;
++ }
++}
++
+ static int gmc_v9_0_late_init(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+@@ -707,10 +729,8 @@ static int gmc_v9_0_late_init(void *handle)
+ unsigned i;
+ int r;
+
+- /*
+- * TODO - Uncomment once GART corruption issue is fixed.
+- */
+- /* amdgpu_bo_late_init(adev); */
++ if (!gmc_v9_0_keep_stolen_memory(adev))
++ amdgpu_bo_late_init(adev);
+
+ for(i = 0; i < adev->num_rings; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+@@ -848,18 +868,16 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
+
+ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
+ {
+-#if 0
+ u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
+-#endif
+ unsigned size;
+
+ /*
+ * TODO Remove once GART corruption is resolved
+ * Check related code in gmc_v9_0_sw_fini
+ * */
+- size = 9 * 1024 * 1024;
++ if (gmc_v9_0_keep_stolen_memory(adev))
++ return 9 * 1024 * 1024;
+
+-#if 0
+ if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
+ size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
+ } else {
+@@ -876,6 +894,7 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
+ break;
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
++ case CHIP_VEGA20:
+ default:
+ viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
+ size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
+@@ -888,7 +907,6 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
+ if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
+ return 0;
+
+-#endif
+ return size;
+ }
+
+@@ -1013,16 +1031,8 @@ static int gmc_v9_0_sw_fini(void *handle)
+ amdgpu_vm_manager_fini(adev);
+ gmc_v9_0_gart_fini(adev);
+
+- /*
+- * TODO:
+- * Currently there is a bug where some memory client outside
+- * of the driver writes to first 8M of VRAM on S3 resume,
+- * this overrides GART which by default gets placed in first 8M and
+- * causes VM_FAULTS once GTT is accessed.
+- * Keep the stolen memory reservation until the while this is not solved.
+- * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
+- */
+- amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
++ if (gmc_v9_0_keep_stolen_memory(adev))
++ amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
+
+ amdgpu_bo_fini(adev);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5242-drm-amdgpu-gmc9-don-t-keep-stolen-memory-on-Raven.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5242-drm-amdgpu-gmc9-don-t-keep-stolen-memory-on-Raven.patch
new file mode 100644
index 00000000..82496c86
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5242-drm-amdgpu-gmc9-don-t-keep-stolen-memory-on-Raven.patch
@@ -0,0 +1,38 @@
+From 0a41395f8c44c3235e5b6df7b43e424b3a356fdf Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 30 Aug 2018 09:41:12 -0500
+Subject: [PATCH 5242/5725] drm/amdgpu/gmc9: don't keep stolen memory on Raven
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Raven does not appear to be affected by the same issue
+as vega10. Enable the full stolen memory handling on
+Raven. Reserve the appropriate size at init time to avoid
+display artifacts and then free it at the end of init once
+the new FB is up and running.
+
+Bug: https://bugs.freedesktop.org/show_bug.cgi?id=106639
+Change-Id: I40e015a6b960afddc1af688be7200bfd656b7aa4
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 25c33e8..f5db31c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -705,6 +705,7 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
+ */
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
++ return false;
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5243-drm-amdgpu-gmc9-don-t-keep-stolen-memory-on-vega12.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5243-drm-amdgpu-gmc9-don-t-keep-stolen-memory-on-vega12.patch
new file mode 100644
index 00000000..52f08d1e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5243-drm-amdgpu-gmc9-don-t-keep-stolen-memory-on-vega12.patch
@@ -0,0 +1,39 @@
+From adc0ac47cfc049e1a8a26c03a8af1bc745fbb6a1 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 30 Aug 2018 09:44:31 -0500
+Subject: [PATCH 5243/5725] drm/amdgpu/gmc9: don't keep stolen memory on vega12
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+vega12 does not appear to be affected by the same issue
+as vega10. Enable the full stolen memory handling on
+vega12. Reserve the appropriate size at init time to avoid
+display artifacts and then free it at the end of init once
+the new FB is up and running.
+
+Change-Id: Ie03e8aecc0ad5f03a079c4beaaf8deee53481590
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index f5db31c..be59cb8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -706,8 +706,9 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+ return false;
+- case CHIP_VEGA10:
+ case CHIP_VEGA12:
++ return false;
++ case CHIP_VEGA10:
+ case CHIP_VEGA20:
+ default:
+ return true;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5244-drm-amdgpu-gmc9-don-t-keep-stolen-memory-on-vega20.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5244-drm-amdgpu-gmc9-don-t-keep-stolen-memory-on-vega20.patch
new file mode 100644
index 00000000..da5bc1ec
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5244-drm-amdgpu-gmc9-don-t-keep-stolen-memory-on-vega20.patch
@@ -0,0 +1,46 @@
+From ae7548fc5b5bdac26fa7abe9ae8a6004b893bbc8 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 30 Aug 2018 09:46:27 -0500
+Subject: [PATCH 5244/5725] drm/amdgpu/gmc9: don't keep stolen memory on vega20
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Vega20 does not appear to be affected by the same issue
+as vega10. Enable the full stolen memory handling on
+vega20. Reserve the appropriate size at init time to avoid
+display artifacts and then free it at the end of init once
+the new FB is up and running.
+
+Change-Id: I46c569cb06638381c400ece457dfa13693803690
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index be59cb8..7e9d1c6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -704,14 +704,13 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
+ * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
+ */
+ switch (adev->asic_type) {
++ case CHIP_VEGA10:
++ return true;
+ case CHIP_RAVEN:
+- return false;
+ case CHIP_VEGA12:
+- return false;
+- case CHIP_VEGA10:
+ case CHIP_VEGA20:
+ default:
+- return true;
++ return false;
+ }
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5245-drm-amd-powerplay-added-vega20-overdrive-support-V3.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5245-drm-amd-powerplay-added-vega20-overdrive-support-V3.patch
new file mode 100644
index 00000000..075a05ca
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5245-drm-amd-powerplay-added-vega20-overdrive-support-V3.patch
@@ -0,0 +1,452 @@
+From 74b0a922b04d1b411801ff7e08fc8c0d8798e89b Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Wed, 29 Aug 2018 14:38:50 +0800
+Subject: [PATCH 5245/5725] drm/amd/powerplay: added vega20 overdrive support
+ V3
+
+Added vega20 overdrive support based on existing OD sysfs
+APIs. However, the OD logics are simplified on vega20. So,
+the behavior will be a little different and works only on
+some limited levels.
+
+V2: fix typo
+ fix commit description
+ revise error logs
+ add support for clock OD
+
+V3: separate clock from voltage OD settings
+
+Change-Id: I403cb38a95863db664cf06d030ac42a19bff6b33
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 45 ++++
+ drivers/gpu/drm/amd/include/kgd_pp_interface.h | 2 +
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 289 ++++++++++++++++++++-
+ 3 files changed, 335 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index f5404f2..e63b0c2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -474,6 +474,8 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
+ * in each power level within a power state. The pp_od_clk_voltage is used for
+ * this.
+ *
++ * < For Vega10 and previous ASICs >
++ *
+ * Reading the file will display:
+ *
+ * - a list of engine clock levels and voltages labeled OD_SCLK
+@@ -491,6 +493,44 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
+ * "c" (commit) to the file to commit your changes. If you want to reset to the
+ * default power levels, write "r" (reset) to the file to reset them.
+ *
++ *
++ * < For Vega20 >
++ *
++ * Reading the file will display:
++ *
++ * - minimum and maximum engine clock labeled OD_SCLK
++ *
++ * - maximum memory clock labeled OD_MCLK
++ *
++ * - three <frequency, voltage offset> points labeled OD_VDDC_CURVE.
++ * They can be used to calibrate the sclk voltage curve.
++ *
++ * - a list of valid ranges for sclk, mclk, and voltage curve points
++ * labeled OD_RANGE
++ *
++ * To manually adjust these settings:
++ *
++ * - First select manual using power_dpm_force_performance_level
++ *
++ * - For clock frequency setting, enter a new value by writing a
++ * string that contains "s/m index clock" to the file. The index
++ * should be 0 if to set minimum clock. And 1 if to set maximum
++ * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
++ * "m 1 800" will update maximum mclk to be 800Mhz.
++ *
++ * For sclk voltage curve, enter the new values by writing a
++ * string that contains "vc point clock voff" to the file. The
++ * points are indexed by 0, 1 and 2. E.g., "vc 0 300 10" will
++ * update point1 with clock set as 300Mhz and voltage increased
++ * by 10mV. "vc 2 1000 -10" will update point3 with clock set
++ * as 1000Mhz and voltage drop by 10mV.
++ *
++ * - When you have edited all of the states as needed, write "c" (commit)
++ * to the file to commit your changes
++ *
++ * - If you want to reset to the default power levels, write "r" (reset)
++ * to the file to reset them
++ *
+ */
+
+ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
+@@ -520,6 +560,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
+ type = PP_OD_RESTORE_DEFAULT_TABLE;
+ else if (*buf == 'c')
+ type = PP_OD_COMMIT_DPM_TABLE;
++ else if (!strncmp(buf, "vc", 2))
++ type = PP_OD_EDIT_VDDC_CURVE;
+ else
+ return -EINVAL;
+
+@@ -527,6 +569,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
+
+ tmp_str = buf_cpy;
+
++ if (type == PP_OD_EDIT_VDDC_CURVE)
++ tmp_str++;
+ while (isspace(*++tmp_str));
+
+ while (tmp_str[0]) {
+@@ -570,6 +614,7 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
+ if (adev->powerplay.pp_funcs->print_clock_levels) {
+ size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
+ size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
++ size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
+ size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
+ return size;
+ } else {
+diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+index 6a41b81..448dee4 100644
+--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+@@ -94,6 +94,7 @@ enum pp_clock_type {
+ PP_PCIE,
+ OD_SCLK,
+ OD_MCLK,
++ OD_VDDC_CURVE,
+ OD_RANGE,
+ };
+
+@@ -141,6 +142,7 @@ enum {
+ enum PP_OD_DPM_TABLE_COMMAND {
+ PP_OD_EDIT_SCLK_VDDC_TABLE,
+ PP_OD_EDIT_MCLK_VDDC_TABLE,
++ PP_OD_EDIT_VDDC_CURVE,
+ PP_OD_RESTORE_DEFAULT_TABLE,
+ PP_OD_COMMIT_DPM_TABLE
+ };
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index fb32b28..3efd59e 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -2325,11 +2325,207 @@ static int vega20_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
+ return 0;
+ }
+
++static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
++ enum PP_OD_DPM_TABLE_COMMAND type,
++ long *input, uint32_t size)
++{
++ struct vega20_hwmgr *data =
++ (struct vega20_hwmgr *)(hwmgr->backend);
++ struct vega20_od8_single_setting *od8_settings =
++ data->od8_settings.od8_settings_array;
++ OverDriveTable_t *od_table =
++ &(data->smc_state_table.overdrive_table);
++ struct pp_clock_levels_with_latency clocks;
++ int32_t input_index, input_clk, input_vol, i;
++ int ret;
++
++ PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
++ return -EINVAL);
++
++ switch (type) {
++ case PP_OD_EDIT_SCLK_VDDC_TABLE:
++ if (!(od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
++ od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id)) {
++ pr_info("Sclk min/max frequency overdrive not supported\n");
++ return -EOPNOTSUPP;
++ }
++
++ for (i = 0; i < size; i += 2) {
++ if (i + 2 > size) {
++ pr_info("invalid number of input parameters %d\n",
++ size);
++ return -EINVAL;
++ }
++
++ input_index = input[i];
++ input_clk = input[i + 1];
++
++ if (input_index != 0 && input_index != 1) {
++ pr_info("Invalid index %d\n", input_index);
++ pr_info("Support min/max sclk frequency setting only which index by 0/1\n");
++ return -EINVAL;
++ }
++
++ if (input_clk < od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value ||
++ input_clk > od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value) {
++ pr_info("clock freq %d is not within allowed range [%d - %d]\n",
++ input_clk,
++ od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value,
++ od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value);
++ return -EINVAL;
++ }
++
++ if (input_index == 0)
++ od_table->GfxclkFmin = input_clk;
++ else
++ od_table->GfxclkFmax = input_clk;
++ }
++
++ break;
++
++ case PP_OD_EDIT_MCLK_VDDC_TABLE:
++ if (!od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
++ pr_info("Mclk max frequency overdrive not supported\n");
++ return -EOPNOTSUPP;
++ }
++
++ ret = vega20_get_memclocks(hwmgr, &clocks);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Attempt to get memory clk levels failed!",
++ return ret);
++
++ for (i = 0; i < size; i += 2) {
++ if (i + 2 > size) {
++ pr_info("invalid number of input parameters %d\n",
++ size);
++ return -EINVAL;
++ }
++
++ input_index = input[i];
++ input_clk = input[i + 1];
++
++ if (input_index != 1) {
++ pr_info("Invalid index %d\n", input_index);
++ pr_info("Support max Mclk frequency setting only which index by 1\n");
++ return -EINVAL;
++ }
++
++ if (input_clk < clocks.data[0].clocks_in_khz / 100 ||
++ input_clk > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) {
++ pr_info("clock freq %d is not within allowed range [%d - %d]\n",
++ input_clk,
++ clocks.data[0].clocks_in_khz / 100,
++ od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
++ return -EINVAL;
++ }
++
++ od_table->UclkFmax = input_clk;
++ }
++
++ break;
++
++ case PP_OD_EDIT_VDDC_CURVE:
++ if (!(od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
++ od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
++ od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
++ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
++ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
++ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id)) {
++ pr_info("Voltage curve calibrate not supported\n");
++ return -EOPNOTSUPP;
++ }
++
++ for (i = 0; i < size; i += 3) {
++ if (i + 3 > size) {
++ pr_info("invalid number of input parameters %d\n",
++ size);
++ return -EINVAL;
++ }
++
++ input_index = input[i];
++ input_clk = input[i + 1];
++ input_vol = input[i + 2];
++
++ if (input_index > 2) {
++ pr_info("Setting for point %d is not supported\n",
++ input_index + 1);
++ pr_info("Three supported points index by 0, 1, 2\n");
++ return -EINVAL;
++ }
++
++ if (input_clk < od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value ||
++ input_clk > od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value) {
++ pr_info("clock freq %d is not within allowed range [%d - %d]\n",
++ input_clk,
++ od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value,
++ od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value);
++ return -EINVAL;
++ }
++
++ /* TODO: suppose voltage1/2/3 has the same min/max value */
++ if (input_vol < od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value ||
++ input_vol > od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value) {
++ pr_info("clock voltage offset %d is not within allowed range [%d - %d]\n",
++ input_vol,
++ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value,
++ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value);
++ return -EINVAL;
++ }
++
++ switch (input_index) {
++ case 0:
++ od_table->GfxclkFreq1 = input_clk;
++ od_table->GfxclkOffsetVolt1 = input_vol;
++ break;
++ case 1:
++ od_table->GfxclkFreq2 = input_clk;
++ od_table->GfxclkOffsetVolt2 = input_vol;
++ break;
++ case 2:
++ od_table->GfxclkFreq3 = input_clk;
++ od_table->GfxclkOffsetVolt3 = input_vol;
++ break;
++ }
++ }
++ break;
++
++ case PP_OD_RESTORE_DEFAULT_TABLE:
++ ret = vega20_copy_table_from_smc(hwmgr,
++ (uint8_t *)od_table,
++ TABLE_OVERDRIVE);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Failed to export overdrive table!",
++ return ret);
++ break;
++
++ case PP_OD_COMMIT_DPM_TABLE:
++ ret = vega20_copy_table_to_smc(hwmgr,
++ (uint8_t *)od_table,
++ TABLE_OVERDRIVE);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Failed to import overdrive table!",
++ return ret);
++
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
+ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
+ enum pp_clock_type type, char *buf)
+ {
+- int i, now, size = 0;
++ struct vega20_hwmgr *data =
++ (struct vega20_hwmgr *)(hwmgr->backend);
++ struct vega20_od8_single_setting *od8_settings =
++ data->od8_settings.od8_settings_array;
++ OverDriveTable_t *od_table =
++ &(data->smc_state_table.overdrive_table);
+ struct pp_clock_levels_with_latency clocks;
++ int i, now, size = 0;
+ int ret = 0;
+
+ switch (type) {
+@@ -2370,6 +2566,95 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
+ case PP_PCIE:
+ break;
+
++ case OD_SCLK:
++ if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
++ od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
++ size = sprintf(buf, "%s:\n", "OD_SCLK");
++ size += sprintf(buf + size, "0: %10uMhz\n",
++ od_table->GfxclkFmin);
++ size += sprintf(buf + size, "1: %10uMhz\n",
++ od_table->GfxclkFmax);
++ }
++ break;
++
++ case OD_MCLK:
++ if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
++ size = sprintf(buf, "%s:\n", "OD_MCLK");
++ size += sprintf(buf + size, "1: %10uMhz\n",
++ od_table->UclkFmax);
++ }
++
++ break;
++
++ case OD_VDDC_CURVE:
++ if (od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
++ od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
++ od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
++ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
++ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
++ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
++ size = sprintf(buf, "%s:\n", "OD_VDDC_CURVE");
++ size += sprintf(buf + size, "0: %10uMhz %10dmV\n",
++ od_table->GfxclkFreq1,
++ od_table->GfxclkOffsetVolt1);
++ size += sprintf(buf + size, "1: %10uMhz %10dmV\n",
++ od_table->GfxclkFreq2,
++ od_table->GfxclkOffsetVolt2);
++ size += sprintf(buf + size, "2: %10uMhz %10dmV\n",
++ od_table->GfxclkFreq3,
++ od_table->GfxclkOffsetVolt3);
++ }
++
++ break;
++
++ case OD_RANGE:
++ size = sprintf(buf, "%s:\n", "OD_RANGE");
++
++ if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
++ od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
++ size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
++ od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value,
++ od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value);
++ }
++
++ if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
++ ret = vega20_get_memclocks(hwmgr, &clocks);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Fail to get memory clk levels!",
++ return ret);
++
++ size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
++ clocks.data[0].clocks_in_khz / 100,
++ od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
++ }
++
++ if (od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
++ od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
++ od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
++ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
++ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
++ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
++ size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
++ od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value,
++ od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value);
++ size += sprintf(buf + size, "VDDC_CURVE_VOFF[0]: %7dmV %11dmV\n",
++ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value,
++ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value);
++ size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
++ od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value,
++ od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value);
++ size += sprintf(buf + size, "VDDC_CURVE_VOFF[1]: %7dmV %11dmV\n",
++ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value,
++ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value);
++ size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
++ od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value,
++ od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value);
++ size += sprintf(buf + size, "VDDC_CURVE_VOFF[2]: %7dmV %11dmV\n",
++ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value,
++ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value);
++ }
++
++ break;
+ default:
+ break;
+ }
+@@ -2977,6 +3262,8 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
+ vega20_get_mclk_od,
+ .set_mclk_od =
+ vega20_set_mclk_od,
++ .odn_edit_dpm_table =
++ vega20_odn_edit_dpm_table,
+ /* for sysfs to retrive/set gfxclk/memclk */
+ .force_clock_level =
+ vega20_force_clock_level,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5246-drm-amd-powerplay-correct-data-type-to-support-under.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5246-drm-amd-powerplay-correct-data-type-to-support-under.patch
new file mode 100644
index 00000000..519c46a8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5246-drm-amd-powerplay-correct-data-type-to-support-under.patch
@@ -0,0 +1,38 @@
+From 187b94674d59a7f6a0894eb87cbe51a4e17aca8b Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Thu, 30 Aug 2018 12:38:45 +0800
+Subject: [PATCH 5246/5725] drm/amd/powerplay: correct data type to support
+ under voltage
+
+For under voltage, negative value will be applied to voltage
+offset. Update the data type to cover this case.
+
+Change-Id: I955da13fd9777320b0605b6b620133d596b573be
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+index 0a39a4c..59e621e 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+@@ -569,11 +569,11 @@ typedef struct {
+ uint16_t GfxclkFmin;
+ uint16_t GfxclkFmax;
+ uint16_t GfxclkFreq1;
+- uint16_t GfxclkOffsetVolt1;
++ int16_t GfxclkOffsetVolt1;
+ uint16_t GfxclkFreq2;
+- uint16_t GfxclkOffsetVolt2;
++ int16_t GfxclkOffsetVolt2;
+ uint16_t GfxclkFreq3;
+- uint16_t GfxclkOffsetVolt3;
++ int16_t GfxclkOffsetVolt3;
+ uint16_t UclkFmax;
+ int16_t OverDrivePct;
+ uint16_t FanMaximumRpm;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5247-drm-amdgpu-Set-pasid-for-compute-vm-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5247-drm-amdgpu-Set-pasid-for-compute-vm-v2.patch
new file mode 100644
index 00000000..b37d8e29
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5247-drm-amdgpu-Set-pasid-for-compute-vm-v2.patch
@@ -0,0 +1,215 @@
+From 7eeed48a4d8bf8209279e843748e950a4b12a6ef Mon Sep 17 00:00:00 2001
+From: Oak Zeng <Oak.Zeng@amd.com>
+Date: Wed, 29 Aug 2018 12:33:52 -0500
+Subject: [PATCH 5247/5725] drm/amdgpu: Set pasid for compute vm (v2)
+
+To make a amdgpu vm to a compute vm, the old pasid will be freed and
+replaced with a pasid managed by kfd. Kfd can't reuse original pasid
+allocated by amdgpu because kfd uses different pasid policy with amdgpu.
+For example, all graphic devices share one same pasid in a process.
+
+v2: rebase (Alex)
+
+Change-Id: I39a27154841cf46b47c4c62d04617bfed6b2fd3e
+Signed-off-by: Oak Zeng <Oak.Zeng@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 8 ++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 10 +++----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 38 +++++++++++++++++++++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 4 +--
+ drivers/gpu/drm/amd/include/kgd_kfd_interface.h | 4 +--
+ 6 files changed, 48 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+index cb0588d..23ac8a6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+@@ -168,11 +168,11 @@ uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
+ })
+
+ /* GPUVM API */
+-int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm,
+- void **process_info,
+- struct dma_fence **ef);
++int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid,
++ void **vm, void **process_info,
++ struct dma_fence **ef);
+ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
+- struct file *filp,
++ struct file *filp, unsigned int pasid,
+ void **vm, void **process_info,
+ struct dma_fence **ef);
+ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 02d9ae7d2..0b73606 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -1048,8 +1048,8 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
+ return ret;
+ }
+
+-int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm,
+- void **process_info,
++int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid,
++ void **vm, void **process_info,
+ struct dma_fence **ef)
+ {
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+@@ -1061,7 +1061,7 @@ int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm,
+ return -ENOMEM;
+
+ /* Initialize AMDGPU part of the VM */
+- ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, 0);
++ ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
+ if (ret) {
+ pr_err("Failed init vm ret %d\n", ret);
+ goto amdgpu_vm_init_fail;
+@@ -1084,7 +1084,7 @@ int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm,
+ }
+
+ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
+- struct file *filp,
++ struct file *filp, unsigned int pasid,
+ void **vm, void **process_info,
+ struct dma_fence **ef)
+ {
+@@ -1099,7 +1099,7 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
+ return -EINVAL;
+
+ /* Convert VM into a compute VM */
+- ret = amdgpu_vm_make_compute(adev, avm);
++ ret = amdgpu_vm_make_compute(adev, avm, pasid);
+ if (ret)
+ return ret;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 99ef1e9..66436e8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2765,7 +2765,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ * Returns:
+ * 0 for success, -errno for errors.
+ */
+-int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
++int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid)
+ {
+ bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
+ int r;
+@@ -2777,7 +2777,20 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ /* Sanity checks */
+ if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
+ r = -EINVAL;
+- goto error;
++ goto unreserve_bo;
++ }
++
++ if (pasid) {
++ unsigned long flags;
++
++ spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
++ r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
++ GFP_ATOMIC);
++ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
++
++ if (r == -ENOSPC)
++ goto unreserve_bo;
++ r = 0;
+ }
+
+ /* Check if PD needs to be reinitialized and do it before
+@@ -2788,7 +2801,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ adev->vm_manager.root_level,
+ pte_support_ats);
+ if (r)
+- goto error;
++ goto free_idr;
+ }
+
+ /* Update VM state */
+@@ -2807,13 +2820,30 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
+ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+
++ /* Free the original amdgpu allocated pasid
++ * Will be replaced with kfd allocated pasid
++ */
++ amdgpu_pasid_free(vm->pasid);
+ vm->pasid = 0;
+ }
+
+ /* Free the shadow bo for compute VM */
+ amdgpu_bo_unref(&vm->root.base.bo->shadow);
+
+-error:
++ if (pasid)
++ vm->pasid = pasid;
++
++ goto unreserve_bo;
++
++free_idr:
++ if (pasid) {
++ unsigned long flags;
++
++ spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
++ idr_remove(&adev->vm_manager.pasid_idr, pasid);
++ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
++ }
++unreserve_bo:
+ amdgpu_bo_unreserve(vm->root.base.bo);
+ return r;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+index fd8da1d..58ed2d9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+@@ -295,7 +295,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev);
+ void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
+ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int vm_context, unsigned int pasid);
+-int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
++int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid);
+ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+ bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
+ unsigned int pasid);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 735c96a..da67302 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -727,11 +727,11 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
+
+ if (drm_file)
+ ret = dev->kfd2kgd->acquire_process_vm(
+- dev->kgd, drm_file,
++ dev->kgd, drm_file, p->pasid,
+ &pdd->vm, &p->kgd_process_info, &p->ef);
+ else
+ ret = dev->kfd2kgd->create_process_vm(
+- dev->kgd, &pdd->vm, &p->kgd_process_info, &p->ef);
++ dev->kgd, p->pasid, &pdd->vm, &p->kgd_process_info, &p->ef);
+ if (ret) {
+ pr_err("Failed to create process VM object\n");
+ return ret;
+diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+index e81fdbc..da7c6f5 100644
+--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+@@ -267,10 +267,10 @@ struct kfd2kgd_calls {
+
+ uint32_t (*get_max_engine_clock_in_mhz)(struct kgd_dev *kgd);
+
+- int (*create_process_vm)(struct kgd_dev *kgd, void **vm,
++ int (*create_process_vm)(struct kgd_dev *kgd, unsigned int pasid, void **vm,
+ void **process_info, struct dma_fence **ef);
+ int (*acquire_process_vm)(struct kgd_dev *kgd, struct file *filp,
+- void **vm, void **process_info,
++ unsigned int pasid, void **vm, void **process_info,
+ struct dma_fence **ef);
+ void (*destroy_process_vm)(struct kgd_dev *kgd, void *vm);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5248-drm-amd-display-Eliminate-i2c-hw-function-pointers.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5248-drm-amd-display-Eliminate-i2c-hw-function-pointers.patch
new file mode 100644
index 00000000..ff8fda80
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5248-drm-amd-display-Eliminate-i2c-hw-function-pointers.patch
@@ -0,0 +1,806 @@
+From 00b3bee54353b4bf547e46a7b527046cfdf4fa07 Mon Sep 17 00:00:00 2001
+From: David Francis <David.Francis@amd.com>
+Date: Thu, 9 Aug 2018 13:20:04 -0400
+Subject: [PATCH 5248/5725] drm/amd/display: Eliminate i2c hw function pointers
+
+[Why]
+The function pointers of the dce_i2c_hw struct were never
+accessed from outside dce_i2c_hw.c and had only one version.
+As function pointers take up space and make debugging difficult,
+and they are not needed in this case, they should be removed.
+
+[How]
+Remove the dce_i2c_hw_funcs struct and make static all
+functions that were previously a part of it. Reorder
+the functions in dce_i2c_hw.c.
+
+Signed-off-by: David Francis <David.Francis@amd.com>
+Reviewed-by: Sun peng Li <Sunpeng.Li@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c | 607 ++++++++++++------------
+ drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h | 29 --
+ 2 files changed, 291 insertions(+), 345 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+index 3a63e3c..cd7da59 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+@@ -36,223 +36,41 @@
+ #define FN(reg_name, field_name) \
+ dce_i2c_hw->shifts->field_name, dce_i2c_hw->masks->field_name
+
+-
+-static inline void reset_hw_engine(struct dce_i2c_hw *dce_i2c_hw)
+-{
+- REG_UPDATE_2(DC_I2C_CONTROL,
+- DC_I2C_SW_STATUS_RESET, 1,
+- DC_I2C_SW_STATUS_RESET, 1);
+-}
+-
+-static bool is_hw_busy(struct dce_i2c_hw *dce_i2c_hw)
+-{
+- uint32_t i2c_sw_status = 0;
+-
+- REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
+- if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_IDLE)
+- return false;
+-
+- reset_hw_engine(dce_i2c_hw);
+-
+- REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
+- return i2c_sw_status != DC_I2C_STATUS__DC_I2C_STATUS_IDLE;
+-}
+-
+-static void set_speed(
+- struct dce_i2c_hw *dce_i2c_hw,
+- uint32_t speed)
+-{
+-
+- if (speed) {
+- if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
+- REG_UPDATE_N(SPEED, 3,
+- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed,
+- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2,
+- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1);
+- else
+- REG_UPDATE_N(SPEED, 2,
+- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed,
+- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
+- }
+-}
+-
+-bool dce_i2c_hw_engine_acquire_engine(
+- struct dce_i2c_hw *dce_i2c_hw,
+- struct ddc *ddc)
+-{
+-
+- enum gpio_result result;
+- uint32_t current_speed;
+-
+- result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
+- GPIO_DDC_CONFIG_TYPE_MODE_I2C);
+-
+- if (result != GPIO_RESULT_OK)
+- return false;
+-
+- dce_i2c_hw->ddc = ddc;
+-
+-
+- current_speed = dce_i2c_hw->funcs->get_speed(dce_i2c_hw);
+-
+- if (current_speed)
+- dce_i2c_hw->original_speed = current_speed;
+-
+- return true;
+-}
+-bool dce_i2c_engine_acquire_hw(
+- struct dce_i2c_hw *dce_i2c_hw,
+- struct ddc *ddc_handle)
+-{
+-
+- uint32_t counter = 0;
+- bool result;
+-
+- do {
+- result = dce_i2c_hw_engine_acquire_engine(
+- dce_i2c_hw, ddc_handle);
+-
+- if (result)
+- break;
+-
+- /* i2c_engine is busy by VBios, lets wait and retry */
+-
+- udelay(10);
+-
+- ++counter;
+- } while (counter < 2);
+-
+- if (result) {
+- if (!dce_i2c_hw->funcs->setup_engine(dce_i2c_hw)) {
+- dce_i2c_hw->funcs->release_engine(dce_i2c_hw);
+- result = false;
+- }
+- }
+-
+- return result;
+-}
+-struct dce_i2c_hw *acquire_i2c_hw_engine(
+- struct resource_pool *pool,
+- struct ddc *ddc)
++static void disable_i2c_hw_engine(
++ struct dce_i2c_hw *dce_i2c_hw)
+ {
+-
+- struct dce_i2c_hw *engine = NULL;
+-
+- if (!ddc)
+- return NULL;
+-
+- if (ddc->hw_info.hw_supported) {
+- enum gpio_ddc_line line = dal_ddc_get_line(ddc);
+-
+- if (line < pool->pipe_count)
+- engine = pool->hw_i2cs[line];
+- }
+-
+- if (!engine)
+- return NULL;
+-
+-
+- if (!pool->i2c_hw_buffer_in_use &&
+- dce_i2c_engine_acquire_hw(engine, ddc)) {
+- pool->i2c_hw_buffer_in_use = true;
+- return engine;
+- }
+-
+-
+- return NULL;
++ REG_UPDATE_N(SETUP, 1, FN(SETUP, DC_I2C_DDC1_ENABLE), 0);
+ }
+
+-static bool setup_engine(
++static void execute_transaction(
+ struct dce_i2c_hw *dce_i2c_hw)
+ {
+- uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
++ REG_UPDATE_N(SETUP, 5,
++ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_EN), 0,
++ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_DRIVE_EN), 0,
++ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_SEL), 0,
++ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_TRANSACTION_DELAY), 0,
++ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_BYTE_DELAY), 0);
+
+- if (dce_i2c_hw->setup_limit != 0)
+- i2c_setup_limit = dce_i2c_hw->setup_limit;
+- /* Program pin select */
+- REG_UPDATE_6(DC_I2C_CONTROL,
+- DC_I2C_GO, 0,
++
++ REG_UPDATE_5(DC_I2C_CONTROL,
+ DC_I2C_SOFT_RESET, 0,
++ DC_I2C_SW_STATUS_RESET, 0,
+ DC_I2C_SEND_RESET, 0,
+- DC_I2C_SW_STATUS_RESET, 1,
+- DC_I2C_TRANSACTION_COUNT, 0,
+- DC_I2C_DDC_SELECT, dce_i2c_hw->engine_id);
+-
+- /* Program time limit */
+- if (dce_i2c_hw->send_reset_length == 0) {
+- /*pre-dcn*/
+- REG_UPDATE_N(SETUP, 2,
+- FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit,
+- FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
+- }
+- /* Program HW priority
+- * set to High - interrupt software I2C at any time
+- * Enable restart of SW I2C that was interrupted by HW
+- * disable queuing of software while I2C is in use by HW
+- */
+- REG_UPDATE_2(DC_I2C_ARBITRATION,
+- DC_I2C_NO_QUEUED_SW_GO, 0,
+- DC_I2C_SW_PRIORITY, DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL);
+-
+- return true;
+-}
+-
+-
+-
+-
+-static void process_channel_reply(
+- struct dce_i2c_hw *dce_i2c_hw,
+- struct i2c_reply_transaction_data *reply)
+-{
+- uint32_t length = reply->length;
+- uint8_t *buffer = reply->data;
+-
+- REG_SET_3(DC_I2C_DATA, 0,
+- DC_I2C_INDEX, dce_i2c_hw->buffer_used_write,
+- DC_I2C_DATA_RW, 1,
+- DC_I2C_INDEX_WRITE, 1);
+-
+- while (length) {
+- /* after reading the status,
+- * if the I2C operation executed successfully
+- * (i.e. DC_I2C_STATUS_DONE = 1) then the I2C controller
+- * should read data bytes from I2C circular data buffer
+- */
+-
+- uint32_t i2c_data;
++ DC_I2C_GO, 0,
++ DC_I2C_TRANSACTION_COUNT, dce_i2c_hw->transaction_count - 1);
+
+- REG_GET(DC_I2C_DATA, DC_I2C_DATA, &i2c_data);
+- *buffer++ = i2c_data;
++ /* start I2C transfer */
++ REG_UPDATE(DC_I2C_CONTROL, DC_I2C_GO, 1);
+
+- --length;
+- }
++ /* all transactions were executed and HW buffer became empty
++ * (even though it actually happens when status becomes DONE)
++ */
++ dce_i2c_hw->transaction_count = 0;
++ dce_i2c_hw->buffer_used_bytes = 0;
+ }
+-enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result(
+- struct dce_i2c_hw *dce_i2c_hw,
+- uint32_t timeout,
+- enum i2c_channel_operation_result expected_result)
+-{
+- enum i2c_channel_operation_result result;
+- uint32_t i = 0;
+-
+- if (!timeout)
+- return I2C_CHANNEL_OPERATION_SUCCEEDED;
+-
+- do {
+
+- result = dce_i2c_hw->funcs->get_channel_status(
+- dce_i2c_hw, NULL);
+-
+- if (result != expected_result)
+- break;
+-
+- udelay(1);
+-
+- ++i;
+- } while (i < timeout);
+- return result;
+-}
+-static enum i2c_channel_operation_result get_channel_status_hw(
++static enum i2c_channel_operation_result get_channel_status(
+ struct dce_i2c_hw *dce_i2c_hw,
+ uint8_t *returned_bytes)
+ {
+@@ -277,24 +95,13 @@ static enum i2c_channel_operation_result get_channel_status_hw(
+ return I2C_CHANNEL_OPERATION_SUCCEEDED;
+ }
+
+-static void submit_channel_request_hw(
+- struct dce_i2c_hw *dce_i2c_hw,
+- struct i2c_request_transaction_data *request)
++static uint32_t get_hw_buffer_available_size(
++ const struct dce_i2c_hw *dce_i2c_hw)
+ {
+- request->status = I2C_CHANNEL_OPERATION_SUCCEEDED;
+-
+- if (!dce_i2c_hw->funcs->process_transaction(dce_i2c_hw, request))
+- return;
+-
+- if (dce_i2c_hw->funcs->is_hw_busy(dce_i2c_hw)) {
+- request->status = I2C_CHANNEL_OPERATION_ENGINE_BUSY;
+- return;
+- }
+-
+- dce_i2c_hw->funcs->execute_transaction(dce_i2c_hw);
+-
+-
++ return dce_i2c_hw->buffer_size -
++ dce_i2c_hw->buffer_used_bytes;
+ }
++
+ uint32_t get_reference_clock(
+ struct dc_bios *bios)
+ {
+@@ -306,33 +113,48 @@ uint32_t get_reference_clock(
+ return info.pll_info.crystal_frequency;
+ }
+
+-static void execute_transaction_hw(
+- struct dce_i2c_hw *dce_i2c_hw)
++static uint32_t get_speed(
++ const struct dce_i2c_hw *dce_i2c_hw)
+ {
+- REG_UPDATE_N(SETUP, 5,
+- FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_EN), 0,
+- FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_DRIVE_EN), 0,
+- FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_SEL), 0,
+- FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_TRANSACTION_DELAY), 0,
+- FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_BYTE_DELAY), 0);
++ uint32_t pre_scale = 0;
+
++ REG_GET(SPEED, DC_I2C_DDC1_PRESCALE, &pre_scale);
+
+- REG_UPDATE_5(DC_I2C_CONTROL,
+- DC_I2C_SOFT_RESET, 0,
+- DC_I2C_SW_STATUS_RESET, 0,
+- DC_I2C_SEND_RESET, 0,
+- DC_I2C_GO, 0,
+- DC_I2C_TRANSACTION_COUNT, dce_i2c_hw->transaction_count - 1);
++ /* [anaumov] it seems following is unnecessary */
++ /*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/
++ return pre_scale ?
++ dce_i2c_hw->reference_frequency / pre_scale :
++ dce_i2c_hw->default_speed;
++}
+
+- /* start I2C transfer */
+- REG_UPDATE(DC_I2C_CONTROL, DC_I2C_GO, 1);
++static void process_channel_reply(
++ struct dce_i2c_hw *dce_i2c_hw,
++ struct i2c_reply_transaction_data *reply)
++{
++ uint32_t length = reply->length;
++ uint8_t *buffer = reply->data;
+
+- /* all transactions were executed and HW buffer became empty
+- * (even though it actually happens when status becomes DONE)
+- */
+- dce_i2c_hw->transaction_count = 0;
+- dce_i2c_hw->buffer_used_bytes = 0;
++ REG_SET_3(DC_I2C_DATA, 0,
++ DC_I2C_INDEX, dce_i2c_hw->buffer_used_write,
++ DC_I2C_DATA_RW, 1,
++ DC_I2C_INDEX_WRITE, 1);
++
++ while (length) {
++ /* after reading the status,
++ * if the I2C operation executed successfully
++ * (i.e. DC_I2C_STATUS_DONE = 1) then the I2C controller
++ * should read data bytes from I2C circular data buffer
++ */
++
++ uint32_t i2c_data;
++
++ REG_GET(DC_I2C_DATA, DC_I2C_DATA, &i2c_data);
++ *buffer++ = i2c_data;
++
++ --length;
++ }
+ }
++
+ static bool process_transaction(
+ struct dce_i2c_hw *dce_i2c_hw,
+ struct i2c_request_transaction_data *request)
+@@ -422,51 +244,89 @@ static bool process_transaction(
+
+ return last_transaction;
+ }
+-static uint32_t get_transaction_timeout_hw(
+- const struct dce_i2c_hw *dce_i2c_hw,
+- uint32_t length)
+-{
+-
+- uint32_t speed = dce_i2c_hw->funcs->get_speed(dce_i2c_hw);
+
++static inline void reset_hw_engine(struct dce_i2c_hw *dce_i2c_hw)
++{
++ REG_UPDATE_2(DC_I2C_CONTROL,
++ DC_I2C_SW_STATUS_RESET, 1,
++ DC_I2C_SW_STATUS_RESET, 1);
++}
+
++static void set_speed(
++ struct dce_i2c_hw *dce_i2c_hw,
++ uint32_t speed)
++{
+
+- uint32_t period_timeout;
+- uint32_t num_of_clock_stretches;
++ if (speed) {
++ if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
++ REG_UPDATE_N(SPEED, 3,
++ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed,
++ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2,
++ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1);
++ else
++ REG_UPDATE_N(SPEED, 2,
++ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed,
++ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
++ }
++}
+
+- if (!speed)
+- return 0;
++static bool setup_engine(
++ struct dce_i2c_hw *dce_i2c_hw)
++{
++ uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
+
+- period_timeout = (1000 * TRANSACTION_TIMEOUT_IN_I2C_CLOCKS) / speed;
++ if (dce_i2c_hw->setup_limit != 0)
++ i2c_setup_limit = dce_i2c_hw->setup_limit;
++ /* Program pin select */
++ REG_UPDATE_6(DC_I2C_CONTROL,
++ DC_I2C_GO, 0,
++ DC_I2C_SOFT_RESET, 0,
++ DC_I2C_SEND_RESET, 0,
++ DC_I2C_SW_STATUS_RESET, 1,
++ DC_I2C_TRANSACTION_COUNT, 0,
++ DC_I2C_DDC_SELECT, dce_i2c_hw->engine_id);
+
+- num_of_clock_stretches = 1 + (length << 3) + 1;
+- num_of_clock_stretches +=
+- (dce_i2c_hw->buffer_used_bytes << 3) +
+- (dce_i2c_hw->transaction_count << 1);
++ /* Program time limit */
++ if (dce_i2c_hw->send_reset_length == 0) {
++ /*pre-dcn*/
++ REG_UPDATE_N(SETUP, 2,
++ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit,
++ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
++ }
++ /* Program HW priority
++ * set to High - interrupt software I2C at any time
++ * Enable restart of SW I2C that was interrupted by HW
++ * disable queuing of software while I2C is in use by HW
++ */
++ REG_UPDATE_2(DC_I2C_ARBITRATION,
++ DC_I2C_NO_QUEUED_SW_GO, 0,
++ DC_I2C_SW_PRIORITY, DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL);
+
+- return period_timeout * num_of_clock_stretches;
++ return true;
+ }
+
+-static void release_engine_dce_hw(
+- struct resource_pool *pool,
+- struct dce_i2c_hw *dce_i2c_hw)
++static bool is_hw_busy(struct dce_i2c_hw *dce_i2c_hw)
+ {
+- pool->i2c_hw_buffer_in_use = false;
++ uint32_t i2c_sw_status = 0;
+
+- dce_i2c_hw->funcs->release_engine(dce_i2c_hw);
+- dal_ddc_close(dce_i2c_hw->ddc);
++ REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
++ if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_IDLE)
++ return false;
+
+- dce_i2c_hw->ddc = NULL;
++ reset_hw_engine(dce_i2c_hw);
++
++ REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
++ return i2c_sw_status != DC_I2C_STATUS__DC_I2C_STATUS_IDLE;
+ }
+
+-static void release_engine_hw(
++static void release_engine(
+ struct dce_i2c_hw *dce_i2c_hw)
+ {
+ bool safe_to_reset;
+
+ /* Restore original HW engine speed */
+
+- dce_i2c_hw->funcs->set_speed(dce_i2c_hw, dce_i2c_hw->original_speed);
++ set_speed(dce_i2c_hw, dce_i2c_hw->original_speed);
+
+ /* Release I2C */
+ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, 1);
+@@ -488,35 +348,180 @@ static void release_engine_hw(
+ REG_UPDATE(DC_I2C_CONTROL, DC_I2C_SW_STATUS_RESET, 1);
+ /* HW I2c engine - clock gating feature */
+ if (!dce_i2c_hw->engine_keep_power_up_count)
+- dce_i2c_hw->funcs->disable_i2c_hw_engine(dce_i2c_hw);
++ disable_i2c_hw_engine(dce_i2c_hw);
+
+ }
+
+-
+-static void disable_i2c_hw_engine(
++static void release_engine_dce_hw(
++ struct resource_pool *pool,
+ struct dce_i2c_hw *dce_i2c_hw)
+ {
+- REG_UPDATE_N(SETUP, 1, FN(SETUP, DC_I2C_DDC1_ENABLE), 0);
++ pool->i2c_hw_buffer_in_use = false;
++
++ release_engine(dce_i2c_hw);
++ dal_ddc_close(dce_i2c_hw->ddc);
++
++ dce_i2c_hw->ddc = NULL;
+ }
+-static uint32_t get_speed_hw(
+- const struct dce_i2c_hw *dce_i2c_hw)
++
++bool dce_i2c_hw_engine_acquire_engine(
++ struct dce_i2c_hw *dce_i2c_hw,
++ struct ddc *ddc)
+ {
+- uint32_t pre_scale = 0;
+
+- REG_GET(SPEED, DC_I2C_DDC1_PRESCALE, &pre_scale);
++ enum gpio_result result;
++ uint32_t current_speed;
+
+- /* [anaumov] it seems following is unnecessary */
+- /*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/
+- return pre_scale ?
+- dce_i2c_hw->reference_frequency / pre_scale :
+- dce_i2c_hw->default_speed;
++ result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
++ GPIO_DDC_CONFIG_TYPE_MODE_I2C);
++
++ if (result != GPIO_RESULT_OK)
++ return false;
++
++ dce_i2c_hw->ddc = ddc;
++
++
++ current_speed = get_speed(dce_i2c_hw);
++
++ if (current_speed)
++ dce_i2c_hw->original_speed = current_speed;
++
++ return true;
+ }
+-static uint32_t get_hw_buffer_available_size(
+- const struct dce_i2c_hw *dce_i2c_hw)
++
++bool dce_i2c_engine_acquire_hw(
++ struct dce_i2c_hw *dce_i2c_hw,
++ struct ddc *ddc_handle)
+ {
+- return dce_i2c_hw->buffer_size -
+- dce_i2c_hw->buffer_used_bytes;
++
++ uint32_t counter = 0;
++ bool result;
++
++ do {
++ result = dce_i2c_hw_engine_acquire_engine(
++ dce_i2c_hw, ddc_handle);
++
++ if (result)
++ break;
++
++ /* i2c_engine is busy by VBios, lets wait and retry */
++
++ udelay(10);
++
++ ++counter;
++ } while (counter < 2);
++
++ if (result) {
++ if (!setup_engine(dce_i2c_hw)) {
++ release_engine(dce_i2c_hw);
++ result = false;
++ }
++ }
++
++ return result;
++}
++
++struct dce_i2c_hw *acquire_i2c_hw_engine(
++ struct resource_pool *pool,
++ struct ddc *ddc)
++{
++
++ struct dce_i2c_hw *engine = NULL;
++
++ if (!ddc)
++ return NULL;
++
++ if (ddc->hw_info.hw_supported) {
++ enum gpio_ddc_line line = dal_ddc_get_line(ddc);
++
++ if (line < pool->pipe_count)
++ engine = pool->hw_i2cs[line];
++ }
++
++ if (!engine)
++ return NULL;
++
++
++ if (!pool->i2c_hw_buffer_in_use &&
++ dce_i2c_engine_acquire_hw(engine, ddc)) {
++ pool->i2c_hw_buffer_in_use = true;
++ return engine;
++ }
++
++
++ return NULL;
++}
++
++enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result(
++ struct dce_i2c_hw *dce_i2c_hw,
++ uint32_t timeout,
++ enum i2c_channel_operation_result expected_result)
++{
++ enum i2c_channel_operation_result result;
++ uint32_t i = 0;
++
++ if (!timeout)
++ return I2C_CHANNEL_OPERATION_SUCCEEDED;
++
++ do {
++
++ result = get_channel_status(
++ dce_i2c_hw, NULL);
++
++ if (result != expected_result)
++ break;
++
++ udelay(1);
++
++ ++i;
++ } while (i < timeout);
++ return result;
++}
++
++static void submit_channel_request_hw(
++ struct dce_i2c_hw *dce_i2c_hw,
++ struct i2c_request_transaction_data *request)
++{
++ request->status = I2C_CHANNEL_OPERATION_SUCCEEDED;
++
++ if (!process_transaction(dce_i2c_hw, request))
++ return;
++
++ if (is_hw_busy(dce_i2c_hw)) {
++ request->status = I2C_CHANNEL_OPERATION_ENGINE_BUSY;
++ return;
++ }
++
++ execute_transaction(dce_i2c_hw);
++
++
++}
++
++static uint32_t get_transaction_timeout_hw(
++ const struct dce_i2c_hw *dce_i2c_hw,
++ uint32_t length)
++{
++
++ uint32_t speed = get_speed(dce_i2c_hw);
++
++
++
++ uint32_t period_timeout;
++ uint32_t num_of_clock_stretches;
++
++ if (!speed)
++ return 0;
++
++ period_timeout = (1000 * TRANSACTION_TIMEOUT_IN_I2C_CLOCKS) / speed;
++
++ num_of_clock_stretches = 1 + (length << 3) + 1;
++ num_of_clock_stretches +=
++ (dce_i2c_hw->buffer_used_bytes << 3) +
++ (dce_i2c_hw->transaction_count << 1);
++
++ return period_timeout * num_of_clock_stretches;
+ }
++
+ bool dce_i2c_hw_engine_submit_request(
+ struct dce_i2c_hw *dce_i2c_hw,
+ struct dce_i2c_transaction_request *dce_i2c_request,
+@@ -615,9 +620,7 @@ bool dce_i2c_hw_engine_submit_request(
+ reply.data = dce_i2c_request->payload.data;
+ reply.length = dce_i2c_request->payload.length;
+
+- dce_i2c_hw->funcs->process_channel_reply(dce_i2c_hw, &reply);
+-
+-
++ process_channel_reply(dce_i2c_hw, &reply);
+ }
+
+ return result;
+@@ -632,7 +635,7 @@ bool dce_i2c_submit_command_hw(
+ uint8_t index_of_payload = 0;
+ bool result;
+
+- dce_i2c_hw->funcs->set_speed(dce_i2c_hw, cmd->speed);
++ set_speed(dce_i2c_hw, cmd->speed);
+
+ result = true;
+
+@@ -670,32 +673,6 @@ bool dce_i2c_submit_command_hw(
+
+ return result;
+ }
+-static const struct dce_i2c_hw_funcs dce100_i2c_hw_funcs = {
+- .setup_engine = setup_engine,
+- .set_speed = set_speed,
+- .get_speed = get_speed_hw,
+- .release_engine = release_engine_hw,
+- .process_transaction = process_transaction,
+- .process_channel_reply = process_channel_reply,
+- .is_hw_busy = is_hw_busy,
+- .get_channel_status = get_channel_status_hw,
+- .execute_transaction = execute_transaction_hw,
+- .disable_i2c_hw_engine = disable_i2c_hw_engine
+-};
+-static const struct dce_i2c_hw_funcs dce80_i2c_hw_funcs = {
+- .setup_engine = setup_engine,
+- .set_speed = set_speed,
+- .get_speed = get_speed_hw,
+- .release_engine = release_engine_hw,
+- .process_transaction = process_transaction,
+- .process_channel_reply = process_channel_reply,
+- .is_hw_busy = is_hw_busy,
+- .get_channel_status = get_channel_status_hw,
+- .execute_transaction = execute_transaction_hw,
+- .disable_i2c_hw_engine = disable_i2c_hw_engine
+-};
+-
+-
+
+ void dce_i2c_hw_construct(
+ struct dce_i2c_hw *dce_i2c_hw,
+@@ -718,7 +695,6 @@ void dce_i2c_hw_construct(
+ dce_i2c_hw->default_speed = DEFAULT_I2C_HW_SPEED;
+ dce_i2c_hw->send_reset_length = 0;
+ dce_i2c_hw->setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
+- dce_i2c_hw->funcs = &dce80_i2c_hw_funcs;
+ dce_i2c_hw->buffer_size = I2C_HW_BUFFER_SIZE_DCE;
+ }
+
+@@ -739,7 +715,6 @@ void dce100_i2c_hw_construct(
+ regs,
+ shifts,
+ masks);
+- dce_i2c_hw->funcs = &dce100_i2c_hw_funcs;
+ dce_i2c_hw->buffer_size = I2C_HW_BUFFER_SIZE_DCE100;
+
+ REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
+index 8baef39..742c1da 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
+@@ -256,40 +256,11 @@ struct dce_i2c_hw {
+ uint32_t buffer_size;
+ struct dc_context *ctx;
+
+- const struct dce_i2c_hw_funcs *funcs;
+ const struct dce_i2c_registers *regs;
+ const struct dce_i2c_shift *shifts;
+ const struct dce_i2c_mask *masks;
+ };
+
+-
+-struct dce_i2c_hw_funcs {
+- bool (*setup_engine)(
+- struct dce_i2c_hw *dce_i2c_hw);
+- void (*set_speed)(
+- struct dce_i2c_hw *dce_i2c_hw,
+- uint32_t speed);
+- uint32_t (*get_speed)(
+- const struct dce_i2c_hw *dce_i2c_hw);
+- void (*release_engine)(
+- struct dce_i2c_hw *dce_i2c_hw);
+- bool (*process_transaction)(
+- struct dce_i2c_hw *dce_i2c_hw,
+- struct i2c_request_transaction_data *request);
+- void (*process_channel_reply)(
+- struct dce_i2c_hw *dce_i2c_hw,
+- struct i2c_reply_transaction_data *reply);
+- bool (*is_hw_busy)(
+- struct dce_i2c_hw *dce_i2c_hw);
+- enum i2c_channel_operation_result (*get_channel_status)(
+- struct dce_i2c_hw *dce_i2c_hw,
+- uint8_t *returned_bytes);
+- void (*execute_transaction)(
+- struct dce_i2c_hw *dce_i2c_hw);
+- void (*disable_i2c_hw_engine)(
+- struct dce_i2c_hw *dce_i2c_hw);
+-};
+-
+ void dce_i2c_hw_construct(
+ struct dce_i2c_hw *dce_i2c_hw,
+ struct dc_context *ctx,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5249-drm-amd-display-dc-3.1.63.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5249-drm-amd-display-dc-3.1.63.patch
new file mode 100644
index 00000000..6cf87f35
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5249-drm-amd-display-dc-3.1.63.patch
@@ -0,0 +1,29 @@
+From 876d7db013ca82c0dda189ff7b3b2f3a6aa2fc43 Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Wed, 18 Jul 2018 20:29:46 -0400
+Subject: [PATCH 5249/5725] drm/amd/display: dc 3.1.63
+
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Steven Chiu <Steven.Chiu@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index ae17668..c012918 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -38,7 +38,7 @@
+ #include "inc/compressor.h"
+ #include "dml/display_mode_lib.h"
+
+-#define DC_VER "3.1.62"
++#define DC_VER "3.1.63"
+
+ #define MAX_SURFACES 3
+ #define MAX_STREAMS 6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5250-drm-amd-display-Use-non-deprecated-vblank-handler.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5250-drm-amd-display-Use-non-deprecated-vblank-handler.patch
new file mode 100644
index 00000000..c67b16ce
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5250-drm-amd-display-Use-non-deprecated-vblank-handler.patch
@@ -0,0 +1,44 @@
+From fef7cb1f3e8eea1b7cc7c410ea0e584861641b6e Mon Sep 17 00:00:00 2001
+From: "Leo (Sunpeng) Li" <sunpeng.li@amd.com>
+Date: Mon, 13 Aug 2018 17:45:05 -0400
+Subject: [PATCH 5250/5725] drm/amd/display: Use non-deprecated vblank handler
+
+[Why]
+drm_handle_vblank is deprecated. Use drm_crtc_handle_vblank instead.
+
+Signed-off-by: Leo (Sunpeng) Li <sunpeng.li@amd.com>
+Reviewed-by: David Francis <David.Francis@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 7dc0e7f..9b6dd67 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -312,16 +312,14 @@ static void dm_crtc_high_irq(void *interrupt_params)
+ {
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+- uint8_t crtc_index = 0;
+ struct amdgpu_crtc *acrtc;
+
+ acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
+
+- if (acrtc)
+- crtc_index = acrtc->crtc_id;
+-
+- drm_handle_vblank(adev->ddev, crtc_index);
+- amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
++ if (acrtc) {
++ drm_crtc_handle_vblank(&acrtc->base);
++ amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
++ }
+ }
+
+ static int dm_set_clockgating_state(void *handle,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5251-drm-amd-display-Add-support-for-hw_state-logging-via.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5251-drm-amd-display-Add-support-for-hw_state-logging-via.patch
new file mode 100644
index 00000000..5fc369a9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5251-drm-amd-display-Add-support-for-hw_state-logging-via.patch
@@ -0,0 +1,156 @@
+From 0976843058799c347b6f9f7c9d38b4508356d296 Mon Sep 17 00:00:00 2001
+From: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Date: Tue, 14 Aug 2018 15:40:57 -0400
+Subject: [PATCH 5251/5725] drm/amd/display: Add support for hw_state logging
+ via debugfs
+
+[Why]
+
+We have logging methods for printing hardware state for newer ASICs
+but no way to trigger the log output.
+
+[How]
+
+Add support for triggering the output via writing to a debugfs file
+entry. Log output currently goes into dmesg for convenience, but
+accessing via a read should be possible later.
+
+Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Reviewed-by: Jordan Lazare <Jordan.Lazare@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 5 ++
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 53 ++++++++++++++++++++++
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h | 1 +
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 22 +++++++--
+ 4 files changed, 77 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 9b6dd67..7b27d39 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -481,6 +481,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
+ goto error;
+ }
+
++#if defined(CONFIG_DEBUG_FS)
++ if (dtn_debugfs_init(adev))
++ DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
++#endif
++
+ DRM_DEBUG_DRIVER("KMS initialized.\n");
+
+ return 0;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+index 0d9e410..e79ac1e 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+@@ -720,3 +720,56 @@ int connector_debugfs_init(struct amdgpu_dm_connector *connector)
+ return 0;
+ }
+
++static ssize_t dtn_log_read(
++ struct file *f,
++ char __user *buf,
++ size_t size,
++ loff_t *pos)
++{
++ /* TODO: Write log output to the user supplied buffer. */
++ return 0;
++}
++
++static ssize_t dtn_log_write(
++ struct file *f,
++ const char __user *buf,
++ size_t size,
++ loff_t *pos)
++{
++ struct amdgpu_device *adev = file_inode(f)->i_private;
++ struct dc *dc = adev->dm.dc;
++
++ /* Write triggers log output via dmesg. */
++ if (size == 0)
++ return 0;
++
++ if (dc->hwss.log_hw_state)
++ dc->hwss.log_hw_state(dc);
++
++ return size;
++}
++
++int dtn_debugfs_init(struct amdgpu_device *adev)
++{
++ static const struct file_operations dtn_log_fops = {
++ .owner = THIS_MODULE,
++ .read = dtn_log_read,
++ .write = dtn_log_write,
++ .llseek = default_llseek
++ };
++
++ struct drm_minor *minor = adev->ddev->primary;
++ struct dentry *root = minor->debugfs_root;
++
++ struct dentry *ent = debugfs_create_file(
++ "amdgpu_dm_dtn_log",
++ 0644,
++ root,
++ adev,
++ &dtn_log_fops);
++
++ if (IS_ERR(ent))
++ return PTR_ERR(ent);
++
++ return 0;
++}
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
+index d9ed1b2..bdef158 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
+@@ -30,5 +30,6 @@
+ #include "amdgpu_dm.h"
+
+ int connector_debugfs_init(struct amdgpu_dm_connector *connector);
++int dtn_debugfs_init(struct amdgpu_device *adev);
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+index d1ce925..cd5e991 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -336,14 +336,28 @@ bool dm_helpers_dp_mst_send_payload_allocation(
+ }
+
+ void dm_dtn_log_begin(struct dc_context *ctx)
+-{}
++{
++ pr_info("[dtn begin]\n");
++}
+
+ void dm_dtn_log_append_v(struct dc_context *ctx,
+- const char *pMsg, ...)
+-{}
++ const char *msg, ...)
++{
++ struct va_format vaf;
++ va_list args;
++
++ va_start(args, msg);
++ vaf.fmt = msg;
++ vaf.va = &args;
++
++ pr_info("%pV", &vaf);
++ va_end(args);
++}
+
+ void dm_dtn_log_end(struct dc_context *ctx)
+-{}
++{
++ pr_info("[dtn end]\n");
++}
+
+ bool dm_helpers_dp_mst_start_top_mgr(
+ struct dc_context *ctx,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5252-drm-amd-display-eliminate-long-wait-between-register.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5252-drm-amd-display-eliminate-long-wait-between-register.patch
new file mode 100644
index 00000000..23cdab94
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5252-drm-amd-display-eliminate-long-wait-between-register.patch
@@ -0,0 +1,59 @@
+From 4ee2b584e0c376e3221ebeee23ec66e51e600d99 Mon Sep 17 00:00:00 2001
+From: Ken Chalmers <ken.chalmers@amd.com>
+Date: Fri, 10 Aug 2018 15:51:59 -0400
+Subject: [PATCH 5252/5725] drm/amd/display: eliminate long wait between
+ register polls on Maximus
+
+[Why]
+Now that we "scale" time delays correctly on Maximus (as of diags svn
+r170115), the forced "35 ms" wait time now becomes 35 ms * 500 = 17.5
+seconds, which is far too long. Even having to repeat polling a
+register once causes excessive delays on Maximus.
+
+[How]
+Just use the regular wait time passed to the generic_reg_wait()
+function. This is sufficient for Maximus now, and it also means that
+there's one less "Maximus-only" code path in DAL.
+
+Also disable the "REG_WAIT taking a while:" message on Maximus, since
+things do take a while longer there and 1-2ms delays are not uncommon
+(and nothing to worry about).
+
+Signed-off-by: Ken Chalmers <ken.chalmers@amd.com>
+Reviewed-by: Eric Bernstein <Eric.Bernstein@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc_helper.c | 9 ++-------
+ 1 file changed, 2 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
+index e68077e..fcfd50b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
++++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
+@@ -219,12 +219,6 @@ uint32_t generic_reg_wait(const struct dc_context *ctx,
+ /* something is terribly wrong if time out is > 200ms. (5Hz) */
+ ASSERT(delay_between_poll_us * time_out_num_tries <= 200000);
+
+- if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
+- /* 35 seconds */
+- delay_between_poll_us = 35000;
+- time_out_num_tries = 1000;
+- }
+-
+ for (i = 0; i <= time_out_num_tries; i++) {
+ if (i) {
+ if (delay_between_poll_us >= 1000)
+@@ -238,7 +232,8 @@ uint32_t generic_reg_wait(const struct dc_context *ctx,
+ field_value = get_reg_field_value_ex(reg_val, mask, shift);
+
+ if (field_value == condition_value) {
+- if (i * delay_between_poll_us > 1000)
++ if (i * delay_between_poll_us > 1000 &&
++ !IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
+ dm_output_to_console("REG_WAIT taking a while: %dms in %s line:%d\n",
+ delay_between_poll_us * i / 1000,
+ func_name, line);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5253-drm-amd-display-Fix-memory-leak-caused-by-missed-dc_.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5253-drm-amd-display-Fix-memory-leak-caused-by-missed-dc_.patch
new file mode 100644
index 00000000..35114954
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5253-drm-amd-display-Fix-memory-leak-caused-by-missed-dc_.patch
@@ -0,0 +1,43 @@
+From e565c259efdc6a97604751d6629a4900a3e8ffe0 Mon Sep 17 00:00:00 2001
+From: SivapiriyanKumarasamy <sivapiriyan.kumarasamy@amd.com>
+Date: Wed, 15 Aug 2018 16:55:18 -0400
+Subject: [PATCH 5253/5725] drm/amd/display: Fix memory leak caused by missed
+ dc_sink_release
+
+[Why]
+There is currently an intermittent hang from a memory leak in
+DTN stress testing. It is caused by unfreed memory during driver
+disable.
+
+[How]
+Do a dc_sink_release in the case that skips it incorrectly.
+
+Signed-off-by: SivapiriyanKumarasamy <sivapiriyan.kumarasamy@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 61cf4fe..960521ce 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -772,8 +772,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+ * fail-safe mode
+ */
+ if (dc_is_hdmi_signal(link->connector_signal) ||
+- dc_is_dvi_signal(link->connector_signal))
++ dc_is_dvi_signal(link->connector_signal)) {
++ if (prev_sink != NULL)
++ dc_sink_release(prev_sink);
++
+ return false;
++ }
+ default:
+ break;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5254-drm-amd-display-Remove-redundant-i2c-structs.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5254-drm-amd-display-Remove-redundant-i2c-structs.patch
new file mode 100644
index 00000000..08036b85
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5254-drm-amd-display-Remove-redundant-i2c-structs.patch
@@ -0,0 +1,363 @@
+From c676b307a635348ba6d9f459f576ce8aa682df63 Mon Sep 17 00:00:00 2001
+From: David Francis <David.Francis@amd.com>
+Date: Fri, 3 Aug 2018 14:25:19 -0400
+Subject: [PATCH 5254/5725] drm/amd/display: Remove redundant i2c structs
+
+[Why]
+The i2c code contains two structs that contain the same
+information as i2c_payload
+
+[How]
+Replace references to those structs with references to
+i2c_payload
+
+dce_i2c_transaction_request->status was written to but never read,
+so all references to it are removed
+
+Signed-off-by: David Francis <David.Francis@amd.com>
+Reviewed-by: Jordan Lazare <Jordan.Lazare@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_i2c.h | 33 ----------
+ drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c | 84 +++++--------------------
+ drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h | 5 --
+ drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c | 83 ++++--------------------
+ 4 files changed, 28 insertions(+), 177 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.h
+index d655f89..a171c5c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.h
+@@ -30,39 +30,6 @@
+ #include "dce_i2c_hw.h"
+ #include "dce_i2c_sw.h"
+
+-enum dce_i2c_transaction_status {
+- DCE_I2C_TRANSACTION_STATUS_UNKNOWN = (-1L),
+- DCE_I2C_TRANSACTION_STATUS_SUCCEEDED,
+- DCE_I2C_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY,
+- DCE_I2C_TRANSACTION_STATUS_FAILED_TIMEOUT,
+- DCE_I2C_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR,
+- DCE_I2C_TRANSACTION_STATUS_FAILED_NACK,
+- DCE_I2C_TRANSACTION_STATUS_FAILED_INCOMPLETE,
+- DCE_I2C_TRANSACTION_STATUS_FAILED_OPERATION,
+- DCE_I2C_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
+- DCE_I2C_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW,
+- DCE_I2C_TRANSACTION_STATUS_FAILED_HPD_DISCON
+-};
+-
+-enum dce_i2c_transaction_operation {
+- DCE_I2C_TRANSACTION_READ,
+- DCE_I2C_TRANSACTION_WRITE
+-};
+-
+-struct dce_i2c_transaction_payload {
+- enum dce_i2c_transaction_address_space address_space;
+- uint32_t address;
+- uint32_t length;
+- uint8_t *data;
+-};
+-
+-struct dce_i2c_transaction_request {
+- enum dce_i2c_transaction_operation operation;
+- struct dce_i2c_transaction_payload payload;
+- enum dce_i2c_transaction_status status;
+-};
+-
+-
+ bool dce_i2c_submit_command(
+ struct resource_pool *pool,
+ struct ddc *ddc,
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+index cd7da59..2800d3f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+@@ -129,7 +129,7 @@ static uint32_t get_speed(
+
+ static void process_channel_reply(
+ struct dce_i2c_hw *dce_i2c_hw,
+- struct i2c_reply_transaction_data *reply)
++ struct i2c_payload *reply)
+ {
+ uint32_t length = reply->length;
+ uint8_t *buffer = reply->data;
+@@ -522,9 +522,9 @@ static uint32_t get_transaction_timeout_hw(
+ return period_timeout * num_of_clock_stretches;
+ }
+
+-bool dce_i2c_hw_engine_submit_request(
++bool dce_i2c_hw_engine_submit_payload(
+ struct dce_i2c_hw *dce_i2c_hw,
+- struct dce_i2c_transaction_request *dce_i2c_request,
++ struct i2c_payload *payload,
+ bool middle_of_transaction)
+ {
+
+@@ -541,46 +541,36 @@ bool dce_i2c_hw_engine_submit_request(
+ * the number of free bytes in HW buffer (minus one for address)
+ */
+
+- if (dce_i2c_request->payload.length >=
++ if (payload->length >=
+ get_hw_buffer_available_size(dce_i2c_hw)) {
+- dce_i2c_request->status =
+- DCE_I2C_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW;
+ return false;
+ }
+
+- if (dce_i2c_request->operation == DCE_I2C_TRANSACTION_READ)
++ if (!payload->write)
+ request.action = middle_of_transaction ?
+ DCE_I2C_TRANSACTION_ACTION_I2C_READ_MOT :
+ DCE_I2C_TRANSACTION_ACTION_I2C_READ;
+- else if (dce_i2c_request->operation == DCE_I2C_TRANSACTION_WRITE)
++ else
+ request.action = middle_of_transaction ?
+ DCE_I2C_TRANSACTION_ACTION_I2C_WRITE_MOT :
+ DCE_I2C_TRANSACTION_ACTION_I2C_WRITE;
+- else {
+- dce_i2c_request->status =
+- DCE_I2C_TRANSACTION_STATUS_FAILED_INVALID_OPERATION;
+- /* [anaumov] in DAL2, there was no "return false" */
+- return false;
+- }
+
+- request.address = (uint8_t) dce_i2c_request->payload.address;
+- request.length = dce_i2c_request->payload.length;
+- request.data = dce_i2c_request->payload.data;
++
++ request.address = (uint8_t) ((payload->address << 1) | !payload->write);
++ request.length = payload->length;
++ request.data = payload->data;
+
+ /* obtain timeout value before submitting request */
+
+ transaction_timeout = get_transaction_timeout_hw(
+- dce_i2c_hw, dce_i2c_request->payload.length + 1);
++ dce_i2c_hw, payload->length + 1);
+
+ submit_channel_request_hw(
+ dce_i2c_hw, &request);
+
+ if ((request.status == I2C_CHANNEL_OPERATION_FAILED) ||
+- (request.status == I2C_CHANNEL_OPERATION_ENGINE_BUSY)) {
+- dce_i2c_request->status =
+- DCE_I2C_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY;
++ (request.status == I2C_CHANNEL_OPERATION_ENGINE_BUSY))
+ return false;
+- }
+
+ /* wait until transaction proceed */
+
+@@ -591,37 +581,11 @@ bool dce_i2c_hw_engine_submit_request(
+
+ /* update transaction status */
+
+- switch (operation_result) {
+- case I2C_CHANNEL_OPERATION_SUCCEEDED:
+- dce_i2c_request->status =
+- DCE_I2C_TRANSACTION_STATUS_SUCCEEDED;
++ if (operation_result == I2C_CHANNEL_OPERATION_SUCCEEDED)
+ result = true;
+- break;
+- case I2C_CHANNEL_OPERATION_NO_RESPONSE:
+- dce_i2c_request->status =
+- DCE_I2C_TRANSACTION_STATUS_FAILED_NACK;
+- break;
+- case I2C_CHANNEL_OPERATION_TIMEOUT:
+- dce_i2c_request->status =
+- DCE_I2C_TRANSACTION_STATUS_FAILED_TIMEOUT;
+- break;
+- case I2C_CHANNEL_OPERATION_FAILED:
+- dce_i2c_request->status =
+- DCE_I2C_TRANSACTION_STATUS_FAILED_INCOMPLETE;
+- break;
+- default:
+- dce_i2c_request->status =
+- DCE_I2C_TRANSACTION_STATUS_FAILED_OPERATION;
+- }
+
+- if (result && (dce_i2c_request->operation == DCE_I2C_TRANSACTION_READ)) {
+- struct i2c_reply_transaction_data reply;
+-
+- reply.data = dce_i2c_request->payload.data;
+- reply.length = dce_i2c_request->payload.length;
+-
+- process_channel_reply(dce_i2c_hw, &reply);
+- }
++ if (result && (!payload->write))
++ process_channel_reply(dce_i2c_hw, payload);
+
+ return result;
+ }
+@@ -644,22 +608,8 @@ bool dce_i2c_submit_command_hw(
+
+ struct i2c_payload *payload = cmd->payloads + index_of_payload;
+
+- struct dce_i2c_transaction_request request = { 0 };
+-
+- request.operation = payload->write ?
+- DCE_I2C_TRANSACTION_WRITE :
+- DCE_I2C_TRANSACTION_READ;
+-
+- request.payload.address_space =
+- DCE_I2C_TRANSACTION_ADDRESS_SPACE_I2C;
+- request.payload.address = (payload->address << 1) |
+- !payload->write;
+- request.payload.length = payload->length;
+- request.payload.data = payload->data;
+-
+-
+- if (!dce_i2c_hw_engine_submit_request(
+- dce_i2c_hw, &request, mot)) {
++ if (!dce_i2c_hw_engine_submit_payload(
++ dce_i2c_hw, payload, mot)) {
+ result = false;
+ break;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
+index 742c1da..7f19bb4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
+@@ -236,11 +236,6 @@ struct i2c_request_transaction_data {
+ uint8_t *data;
+ };
+
+-struct i2c_reply_transaction_data {
+- uint32_t length;
+- uint8_t *data;
+-};
+-
+ struct dce_i2c_hw {
+ struct ddc *ddc;
+ uint32_t original_speed;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
+index ab11129..f026669 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
+@@ -70,13 +70,6 @@ static void release_engine_dce_sw(
+ dce_i2c_sw->ddc = NULL;
+ }
+
+-enum i2c_channel_operation_result dce_i2c_sw_engine_get_channel_status(
+- struct dce_i2c_sw *engine,
+- uint8_t *returned_bytes)
+-{
+- /* No arbitration with VBIOS is performed since DCE 6.0 */
+- return I2C_CHANNEL_OPERATION_SUCCEEDED;
+-}
+ static bool get_hw_supported_ddc_line(
+ struct ddc *ddc,
+ enum gpio_ddc_line *line)
+@@ -469,73 +462,33 @@ void dce_i2c_sw_engine_submit_channel_request(
+ I2C_CHANNEL_OPERATION_SUCCEEDED :
+ I2C_CHANNEL_OPERATION_FAILED;
+ }
+-bool dce_i2c_sw_engine_submit_request(
++bool dce_i2c_sw_engine_submit_payload(
+ struct dce_i2c_sw *engine,
+- struct dce_i2c_transaction_request *dce_i2c_request,
++ struct i2c_payload *payload,
+ bool middle_of_transaction)
+ {
+ struct i2c_request_transaction_data request;
+- bool operation_succeeded = false;
+
+- if (dce_i2c_request->operation == DCE_I2C_TRANSACTION_READ)
++ if (!payload->write)
+ request.action = middle_of_transaction ?
+ DCE_I2C_TRANSACTION_ACTION_I2C_READ_MOT :
+ DCE_I2C_TRANSACTION_ACTION_I2C_READ;
+- else if (dce_i2c_request->operation == DCE_I2C_TRANSACTION_WRITE)
++ else
+ request.action = middle_of_transaction ?
+ DCE_I2C_TRANSACTION_ACTION_I2C_WRITE_MOT :
+ DCE_I2C_TRANSACTION_ACTION_I2C_WRITE;
+- else {
+- dce_i2c_request->status =
+- DCE_I2C_TRANSACTION_STATUS_FAILED_INVALID_OPERATION;
+- /* in DAL2, there was no "return false" */
+- return false;
+- }
+
+- request.address = (uint8_t)dce_i2c_request->payload.address;
+- request.length = dce_i2c_request->payload.length;
+- request.data = dce_i2c_request->payload.data;
++ request.address = (uint8_t) ((payload->address << 1) | !payload->write);
++ request.length = payload->length;
++ request.data = payload->data;
+
+ dce_i2c_sw_engine_submit_channel_request(engine, &request);
+
+ if ((request.status == I2C_CHANNEL_OPERATION_ENGINE_BUSY) ||
+ (request.status == I2C_CHANNEL_OPERATION_FAILED))
+- dce_i2c_request->status =
+- DCE_I2C_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY;
+- else {
+- enum i2c_channel_operation_result operation_result;
+-
+- do {
+- operation_result =
+- dce_i2c_sw_engine_get_channel_status(engine, NULL);
+-
+- switch (operation_result) {
+- case I2C_CHANNEL_OPERATION_SUCCEEDED:
+- dce_i2c_request->status =
+- DCE_I2C_TRANSACTION_STATUS_SUCCEEDED;
+- operation_succeeded = true;
+- break;
+- case I2C_CHANNEL_OPERATION_NO_RESPONSE:
+- dce_i2c_request->status =
+- DCE_I2C_TRANSACTION_STATUS_FAILED_NACK;
+- break;
+- case I2C_CHANNEL_OPERATION_TIMEOUT:
+- dce_i2c_request->status =
+- DCE_I2C_TRANSACTION_STATUS_FAILED_TIMEOUT;
+- break;
+- case I2C_CHANNEL_OPERATION_FAILED:
+- dce_i2c_request->status =
+- DCE_I2C_TRANSACTION_STATUS_FAILED_INCOMPLETE;
+- break;
+- default:
+- dce_i2c_request->status =
+- DCE_I2C_TRANSACTION_STATUS_FAILED_OPERATION;
+- break;
+- }
+- } while (operation_result == I2C_CHANNEL_OPERATION_ENGINE_BUSY);
+- }
++ return false;
+
+- return operation_succeeded;
++ return true;
+ }
+ bool dce_i2c_submit_command_sw(
+ struct resource_pool *pool,
+@@ -555,22 +508,8 @@ bool dce_i2c_submit_command_sw(
+
+ struct i2c_payload *payload = cmd->payloads + index_of_payload;
+
+- struct dce_i2c_transaction_request request = { 0 };
+-
+- request.operation = payload->write ?
+- DCE_I2C_TRANSACTION_WRITE :
+- DCE_I2C_TRANSACTION_READ;
+-
+- request.payload.address_space =
+- DCE_I2C_TRANSACTION_ADDRESS_SPACE_I2C;
+- request.payload.address = (payload->address << 1) |
+- !payload->write;
+- request.payload.length = payload->length;
+- request.payload.data = payload->data;
+-
+-
+- if (!dce_i2c_sw_engine_submit_request(
+- dce_i2c_sw, &request, mot)) {
++ if (!dce_i2c_sw_engine_submit_payload(
++ dce_i2c_sw, payload, mot)) {
+ result = false;
+ break;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5255-drm-amd-display-support-48-MHZ-refclk-off.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5255-drm-amd-display-support-48-MHZ-refclk-off.patch
new file mode 100644
index 00000000..e2d02322
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5255-drm-amd-display-support-48-MHZ-refclk-off.patch
@@ -0,0 +1,101 @@
+From 2ad85afb3678c6c4bdefb8580803526b5c211ff8 Mon Sep 17 00:00:00 2001
+From: Eric Yang <Eric.Yang2@amd.com>
+Date: Wed, 15 Aug 2018 17:35:50 -0400
+Subject: [PATCH 5255/5725] drm/amd/display: support 48 MHZ refclk off
+
+[Why]
+On PCO and up, whenever SMU receive message to indicate active
+display count = 0. SMU will turn off 48MHZ TMDP reference clock
+by writing to 1 TMDP_48M_Refclk_Driver_PWDN. Once this clock is
+off, no PHY register will respond to register access. This means
+our current sequence of notifying display count along with requesting
+clock will cause driver to hang when accessing PHY registers after
+displays count goes to 0.
+
+[How]
+Separate the PPSMC_MSG_SetDisplayCount message from the SMU messages
+that request clocks, have display own sequencing of this message so
+that we can send it at the appropriate time.
+Do not redundantly power off HW when entering S3, S4, since display
+should already be called to disable all streams. And ASIC soon be
+powered down.
+
+Signed-off-by: Eric Yang <Eric.Yang2@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 36 +++++++++++++++++++++++++++++---
+ 1 file changed, 33 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 18632db..7c5382a 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1393,6 +1393,34 @@ static struct dc_stream_status *stream_get_status(
+
+ static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
+
++static void notify_display_count_to_smu(
++ struct dc *dc,
++ struct dc_state *context)
++{
++ int i, display_count;
++ struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
++
++ /*
++ * if function pointer not set up, this message is
++ * sent as part of pplib_apply_display_requirements.
++ * So just return.
++ */
++ if (!pp_smu->set_display_count)
++ return;
++
++ display_count = 0;
++ for (i = 0; i < context->stream_count; i++) {
++ const struct dc_stream_state *stream = context->streams[i];
++
++ /* only notify active stream */
++ if (stream->dpms_off)
++ continue;
++
++ display_count++;
++ }
++
++ pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
++}
+
+ static void commit_planes_do_stream_update(struct dc *dc,
+ struct dc_stream_state *stream,
+@@ -1446,13 +1474,17 @@ static void commit_planes_do_stream_update(struct dc *dc,
+ core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE);
+ dc->hwss.pplib_apply_display_requirements(
+ dc, dc->current_state);
++ notify_display_count_to_smu(dc, dc->current_state);
+ } else {
+ dc->hwss.pplib_apply_display_requirements(
+ dc, dc->current_state);
++ notify_display_count_to_smu(dc, dc->current_state);
+ core_link_enable_stream(dc->current_state, pipe_ctx);
+ }
+ }
+
++
++
+ if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
+ if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
+ // if otg funcs defined check if blanked before programming
+@@ -1713,9 +1745,7 @@ void dc_set_power_state(
+ dc->hwss.init_hw(dc);
+ break;
+ default:
+-
+- dc->hwss.power_down(dc);
+-
++ ASSERT(dc->current_state->stream_count == 0);
+ /* Zero out the current context so that on resume we start with
+ * clean state, and dc hw programming optimizations will not
+ * cause any trouble.
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5256-drm-amd-display-Flatten-unnecessary-i2c-functions.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5256-drm-amd-display-Flatten-unnecessary-i2c-functions.patch
new file mode 100644
index 00000000..172e8799
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5256-drm-amd-display-Flatten-unnecessary-i2c-functions.patch
@@ -0,0 +1,212 @@
+From 96288a7d1c7be36f49c31fba17f25b192223ae73 Mon Sep 17 00:00:00 2001
+From: David Francis <David.Francis@amd.com>
+Date: Fri, 3 Aug 2018 13:24:28 -0400
+Subject: [PATCH 5256/5725] drm/amd/display: Flatten unnecessary i2c functions
+
+[Why]
+The dce_i2c_hw code contained four funtcions that were only
+called in one place and did not have a clearly delineated
+purpose.
+
+[How]
+Inline these functions, keeping the same functionality.
+
+This is not a functional change.
+
+The functions disable_i2c_hw_engine and release_engine_dce_hw were
+pulled into their respective callers.
+
+The most interesting part of this change is the acquire functions.
+dce_i2c_hw_engine_acquire_engine was pulled into
+dce_i2c_engine_acquire_hw, and dce_i2c_engine_acquire_hw was pulled
+into acquire_i2c_hw_engine.
+
+Some notes to show that this change is not functional:
+-Failure conditions in any function resulted in a cascade of calls that
+ended in a 'return NULL'.
+Those are replaced with a direct 'return NULL'.
+
+-The variable result is the one from dce_i2c_hw_engine_acquire_engine.
+The boolean result used as part of return logic was removed.
+
+-As the second half of dce_i2c_hw_engine_acquire_engine is only executed
+if that function is returning true and therefore exiting the do-while
+loop in dce_i2c_engine_acquire_hw, those lines were moved outside
+of the loop.
+
+Signed-off-by: David Francis <David.Francis@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c | 111 ++++++++----------------
+ 1 file changed, 34 insertions(+), 77 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+index 2800d3f..40f2d6e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+@@ -36,12 +36,6 @@
+ #define FN(reg_name, field_name) \
+ dce_i2c_hw->shifts->field_name, dce_i2c_hw->masks->field_name
+
+-static void disable_i2c_hw_engine(
+- struct dce_i2c_hw *dce_i2c_hw)
+-{
+- REG_UPDATE_N(SETUP, 1, FN(SETUP, DC_I2C_DDC1_ENABLE), 0);
+-}
+-
+ static void execute_transaction(
+ struct dce_i2c_hw *dce_i2c_hw)
+ {
+@@ -348,60 +342,40 @@ static void release_engine(
+ REG_UPDATE(DC_I2C_CONTROL, DC_I2C_SW_STATUS_RESET, 1);
+ /* HW I2c engine - clock gating feature */
+ if (!dce_i2c_hw->engine_keep_power_up_count)
+- disable_i2c_hw_engine(dce_i2c_hw);
++ REG_UPDATE_N(SETUP, 1, FN(SETUP, DC_I2C_DDC1_ENABLE), 0);
+
+ }
+
+-static void release_engine_dce_hw(
++struct dce_i2c_hw *acquire_i2c_hw_engine(
+ struct resource_pool *pool,
+- struct dce_i2c_hw *dce_i2c_hw)
+-{
+- pool->i2c_hw_buffer_in_use = false;
+-
+- release_engine(dce_i2c_hw);
+- dal_ddc_close(dce_i2c_hw->ddc);
+-
+- dce_i2c_hw->ddc = NULL;
+-}
+-
+-bool dce_i2c_hw_engine_acquire_engine(
+- struct dce_i2c_hw *dce_i2c_hw,
+ struct ddc *ddc)
+ {
+-
++ uint32_t counter = 0;
+ enum gpio_result result;
+ uint32_t current_speed;
++ struct dce_i2c_hw *dce_i2c_hw = NULL;
+
+- result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
+- GPIO_DDC_CONFIG_TYPE_MODE_I2C);
+-
+- if (result != GPIO_RESULT_OK)
+- return false;
+-
+- dce_i2c_hw->ddc = ddc;
+-
+-
+- current_speed = get_speed(dce_i2c_hw);
++ if (!ddc)
++ return NULL;
+
+- if (current_speed)
+- dce_i2c_hw->original_speed = current_speed;
++ if (ddc->hw_info.hw_supported) {
++ enum gpio_ddc_line line = dal_ddc_get_line(ddc);
+
+- return true;
+-}
++ if (line < pool->pipe_count)
++ dce_i2c_hw = pool->hw_i2cs[line];
++ }
+
+-bool dce_i2c_engine_acquire_hw(
+- struct dce_i2c_hw *dce_i2c_hw,
+- struct ddc *ddc_handle)
+-{
++ if (!dce_i2c_hw)
++ return NULL;
+
+- uint32_t counter = 0;
+- bool result;
++ if (pool->i2c_hw_buffer_in_use)
++ return NULL;
+
+ do {
+- result = dce_i2c_hw_engine_acquire_engine(
+- dce_i2c_hw, ddc_handle);
++ result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
++ GPIO_DDC_CONFIG_TYPE_MODE_I2C);
+
+- if (result)
++ if (result == GPIO_RESULT_OK)
+ break;
+
+ /* i2c_engine is busy by VBios, lets wait and retry */
+@@ -411,45 +385,23 @@ bool dce_i2c_engine_acquire_hw(
+ ++counter;
+ } while (counter < 2);
+
+- if (result) {
+- if (!setup_engine(dce_i2c_hw)) {
+- release_engine(dce_i2c_hw);
+- result = false;
+- }
+- }
+-
+- return result;
+-}
+-
+-struct dce_i2c_hw *acquire_i2c_hw_engine(
+- struct resource_pool *pool,
+- struct ddc *ddc)
+-{
+-
+- struct dce_i2c_hw *engine = NULL;
+-
+- if (!ddc)
++ if (result != GPIO_RESULT_OK)
+ return NULL;
+
+- if (ddc->hw_info.hw_supported) {
+- enum gpio_ddc_line line = dal_ddc_get_line(ddc);
+-
+- if (line < pool->pipe_count)
+- engine = pool->hw_i2cs[line];
+- }
++ dce_i2c_hw->ddc = ddc;
+
+- if (!engine)
+- return NULL;
++ current_speed = get_speed(dce_i2c_hw);
+
++ if (current_speed)
++ dce_i2c_hw->original_speed = current_speed;
+
+- if (!pool->i2c_hw_buffer_in_use &&
+- dce_i2c_engine_acquire_hw(engine, ddc)) {
+- pool->i2c_hw_buffer_in_use = true;
+- return engine;
++ if (!setup_engine(dce_i2c_hw)) {
++ release_engine(dce_i2c_hw);
++ return NULL;
+ }
+
+-
+- return NULL;
++ pool->i2c_hw_buffer_in_use = true;
++ return dce_i2c_hw;
+ }
+
+ enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result(
+@@ -619,7 +571,12 @@ bool dce_i2c_submit_command_hw(
+ ++index_of_payload;
+ }
+
+- release_engine_dce_hw(pool, dce_i2c_hw);
++ pool->i2c_hw_buffer_in_use = false;
++
++ release_engine(dce_i2c_hw);
++ dal_ddc_close(dce_i2c_hw->ddc);
++
++ dce_i2c_hw->ddc = NULL;
+
+ return result;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5257-drm-amdgpu-fix-mask-in-GART-location-calculation.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5257-drm-amdgpu-fix-mask-in-GART-location-calculation.patch
new file mode 100644
index 00000000..084b56a6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5257-drm-amdgpu-fix-mask-in-GART-location-calculation.patch
@@ -0,0 +1,36 @@
+From 8c66cbb3b487a63c8c2b7f77b5a39a0d81cad60a Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 14 Sep 2018 10:17:24 +0200
+Subject: [PATCH 5257/5725] drm/amdgpu: fix mask in GART location calculation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+We need to mask the lower bits not the upper one.
+
+Fixes: ec210e3226dc0 drm/amdgpu: put GART away from VRAM v2
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Acked-by: James Zhu <James.Zhu@amd.com>
+Tested-by: James Zhu <James.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+index c6bcc47..068f0c6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+@@ -138,7 +138,7 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
+ else
+ mc->gart_start = mc->mc_mask - mc->gart_size + 1;
+
+- mc->gart_start &= four_gb - 1;
++ mc->gart_start &= ~(four_gb - 1);
+ mc->gart_end = mc->gart_start + mc->gart_size - 1;
+ dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
+ mc->gart_size >> 20, mc->gart_start, mc->gart_end);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5258-drm-amdgpu-revert-stop-using-gart_start-as-offset-fo.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5258-drm-amdgpu-revert-stop-using-gart_start-as-offset-fo.patch
new file mode 100644
index 00000000..d5bee5bc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5258-drm-amdgpu-revert-stop-using-gart_start-as-offset-fo.patch
@@ -0,0 +1,72 @@
+From a40daa8382c1b9e282879e90ef32d75da3538b7c Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 14 Sep 2018 12:54:33 +0200
+Subject: [PATCH 5258/5725] drm/amdgpu: revert "stop using gart_start as offset
+ for the GTT domain"
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Turned out the commit is incomplete and since we remove using the AGP
+mapping from the GTT manager it is also not necessary any more.
+
+This reverts commit 22d8bfafcc12dfa17b91d2e8ae4e1898e782003a.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Acked-by: James Zhu <James.Zhu@amd.com>
+Tested-by: James Zhu <James.Zhu@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 3 +--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 6 +++---
+ 2 files changed, 4 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+index 9d1d2fd..ee4b908 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+@@ -144,8 +144,7 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
+ spin_unlock(&mgr->lock);
+
+ if (!r)
+- mem->start = node->node.start +
+- (adev->gmc.gart_start >> PAGE_SHIFT);
++ mem->start = node->node.start;
+
+ return r;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 0916667..7c46a03f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -188,7 +188,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ case TTM_PL_TT:
+ /* GTT memory */
+ man->func = &amdgpu_gtt_mgr_func;
+- man->gpu_offset = 0;
++ man->gpu_offset = adev->gmc.gart_start;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
+@@ -1095,7 +1095,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
+ flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
+
+ /* bind pages into GART page tables */
+- gtt->offset = ((u64)bo_mem->start << PAGE_SHIFT) - adev->gmc.gart_start;
++ gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
+ r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
+ ttm->pages, gtt->ttm.dma_address, flags);
+
+@@ -1143,7 +1143,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
+ flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
+
+ /* Bind pages */
+- gtt->offset = ((u64)tmp.start << PAGE_SHIFT) - adev->gmc.gart_start;
++ gtt->offset = (u64)tmp.start << PAGE_SHIFT;
+ r = amdgpu_ttm_gart_bind(adev, bo, flags);
+ if (unlikely(r)) {
+ ttm_bo_mem_put(bo, &tmp);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5259-drm-amdgpu-Fix-SDMA-hang-in-prt-mode-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5259-drm-amdgpu-Fix-SDMA-hang-in-prt-mode-v2.patch
new file mode 100644
index 00000000..c684fcf9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5259-drm-amdgpu-Fix-SDMA-hang-in-prt-mode-v2.patch
@@ -0,0 +1,58 @@
+From dff95925fbde5d875c9b02a4a7b236b55fdd77d8 Mon Sep 17 00:00:00 2001
+From: Tao Zhou <tao.zhou1@amd.com>
+Date: Fri, 7 Sep 2018 13:50:31 +0800
+Subject: [PATCH 5259/5725] drm/amdgpu: Fix SDMA hang in prt mode v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Fix SDMA hang in prt mode, clear XNACK_WATERMARK in reg SDMA0_UTCL1_WATERMK to avoid the issue
+
+Affected ASICs: VEGA10 VEGA12 RV1 RV2
+
+v2: add reg clear for SDMA1
+
+Change-Id: I2261b8e753600731d0d8ee8bbdfc08d01eeb428e
+Signed-off-by: Tao Zhou <tao.zhou1@amd.com>
+Tested-by: Yukun Li <yukun1.li@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index 94cb277..fa2eec1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -70,6 +70,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
+@@ -81,7 +82,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0)
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xfc000000, 0x00000000)
+ };
+
+ static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
+@@ -108,7 +110,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] = {
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+- SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0)
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
+ };
+
+ static const struct soc15_reg_golden golden_settings_sdma0_4_2_init[] = {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5260-drm-amdgpu-add-new-polaris-pci-id.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5260-drm-amdgpu-add-new-polaris-pci-id.patch
new file mode 100644
index 00000000..af6dac62
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5260-drm-amdgpu-add-new-polaris-pci-id.patch
@@ -0,0 +1,55 @@
+From 44cad906e964642cc04502e14adb398524ec4561 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 18 Sep 2018 15:28:24 -0500
+Subject: [PATCH 5260/5725] drm/amdgpu: add new polaris pci id
+
+Add new pci id.
+
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | 14 ++++++++------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 1 +
+ 2 files changed, 9 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+index 693ec5e..8816c69 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+@@ -367,12 +367,14 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
+ break;
+ case CHIP_POLARIS10:
+ if (type == CGS_UCODE_ID_SMU) {
+- if ((adev->pdev->device == 0x67df) &&
+- ((adev->pdev->revision == 0xe0) ||
+- (adev->pdev->revision == 0xe3) ||
+- (adev->pdev->revision == 0xe4) ||
+- (adev->pdev->revision == 0xe5) ||
+- (adev->pdev->revision == 0xe7) ||
++ if (((adev->pdev->device == 0x67df) &&
++ ((adev->pdev->revision == 0xe0) ||
++ (adev->pdev->revision == 0xe3) ||
++ (adev->pdev->revision == 0xe4) ||
++ (adev->pdev->revision == 0xe5) ||
++ (adev->pdev->revision == 0xe7) ||
++ (adev->pdev->revision == 0xef))) ||
++ ((adev->pdev->device == 0x6fdf) &&
+ (adev->pdev->revision == 0xef))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index ba99200..887cbd9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -757,6 +757,7 @@ static const struct pci_device_id pciidlist[] = {
+ {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
++ {0x1002, 0x6FDF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ /* Polaris12 */
+ {0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ {0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5261-drm-amdgpu-sriov-Correct-the-setting-about-sdma-door.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5261-drm-amdgpu-sriov-Correct-the-setting-about-sdma-door.patch
new file mode 100644
index 00000000..9a1c080d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5261-drm-amdgpu-sriov-Correct-the-setting-about-sdma-door.patch
@@ -0,0 +1,103 @@
+From eadce9f3ab7260cd76d8125dd0f6b9dc1964449a Mon Sep 17 00:00:00 2001
+From: Emily Deng <Emily.Deng@amd.com>
+Date: Thu, 9 Aug 2018 15:05:31 +0800
+Subject: [PATCH 5261/5725] drm/amdgpu/sriov: Correct the setting about sdma
+ doorbell offset of Vega10
+
+Correct the format
+
+For vega10 sriov, the sdma doorbell must be fixed as follow to keep the
+same setting with host driver, or it will happen conflicts.
+
+Change-Id: Ibe1a6c0b9c331e4d0b1169807cf45e7e4e5a82f2
+Signed-off-by: Emily Deng <Emily.Deng@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 8 ++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 27 +++++++++++++++++++--------
+ drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 12 +++++++++---
+ 3 files changed, 36 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 64b3990..7a8b8fe 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -425,6 +425,14 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
+ AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xE8,
+ AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xE9,
+
++ /* For vega10 sriov, the sdma doorbell must be fixed as follow
++ * to keep the same setting with host driver, or it will
++ * happen conflicts */
++ AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 = 0xF0,
++ AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1,
++ AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 = 0xF2,
++ AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3,
++
+ /* Interrupt handler */
+ AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */
+ AMDGPU_DOORBELL64_IH_RING1 = 0xF5, /* For page migration request log */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index 599cb6f..589eca8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -206,14 +206,25 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
+ * process in case of 64-bit doorbells so we
+ * can use each doorbell assignment twice.
+ */
+- gpu_resources.sdma_doorbell[0][i] =
+- AMDGPU_DOORBELL64_sDMA_ENGINE0 + (i >> 1);
+- gpu_resources.sdma_doorbell[0][i+1] =
+- AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1);
+- gpu_resources.sdma_doorbell[1][i] =
+- AMDGPU_DOORBELL64_sDMA_ENGINE1 + (i >> 1);
+- gpu_resources.sdma_doorbell[1][i+1] =
+- AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1);
++ if (adev->asic_type == CHIP_VEGA10) {
++ gpu_resources.sdma_doorbell[0][i] =
++ AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + (i >> 1);
++ gpu_resources.sdma_doorbell[0][i+1] =
++ AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1);
++ gpu_resources.sdma_doorbell[1][i] =
++ AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + (i >> 1);
++ gpu_resources.sdma_doorbell[1][i+1] =
++ AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1);
++ } else {
++ gpu_resources.sdma_doorbell[0][i] =
++ AMDGPU_DOORBELL64_sDMA_ENGINE0 + (i >> 1);
++ gpu_resources.sdma_doorbell[0][i+1] =
++ AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1);
++ gpu_resources.sdma_doorbell[1][i] =
++ AMDGPU_DOORBELL64_sDMA_ENGINE1 + (i >> 1);
++ gpu_resources.sdma_doorbell[1][i+1] =
++ AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1);
++ }
+ }
+ /* Doorbells 0x0e0-0ff and 0x2e0-2ff are reserved for
+ * SDMA, IH and VCN. So don't use them for the CP.
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index fa2eec1..3b131a9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -1306,9 +1306,15 @@ static int sdma_v4_0_sw_init(void *handle)
+ DRM_INFO("use_doorbell being set to: [%s]\n",
+ ring->use_doorbell?"true":"false");
+
+- ring->doorbell_index = (i == 0) ?
+- (AMDGPU_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset
+- : (AMDGPU_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset
++ if (adev->asic_type == CHIP_VEGA10)
++ ring->doorbell_index = (i == 0) ?
++ (AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset
++ : (AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset
++ else
++ ring->doorbell_index = (i == 0) ?
++ (AMDGPU_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset
++ : (AMDGPU_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset
++
+
+ sprintf(ring->name, "sdma%d", i);
+ r = amdgpu_ring_init(adev, ring, 1024,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5262-drm-amdgpu-add-picasso-to-asic_type-enum.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5262-drm-amdgpu-add-picasso-to-asic_type-enum.patch
new file mode 100644
index 00000000..af7b61b7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5262-drm-amdgpu-add-picasso-to-asic_type-enum.patch
@@ -0,0 +1,30 @@
+From 16be544cc33e34c17d020f9ced9c1549b90936fa Mon Sep 17 00:00:00 2001
+From: Likun Gao <Likun.Gao@amd.com>
+Date: Tue, 10 Jul 2018 20:10:05 +0800
+Subject: [PATCH 5262/5725] drm/amdgpu: add picasso to asic_type enum
+
+Add picasso to amd_asic_type enum and amdgpu_asic_name[].
+
+Signed-off-by: Likun Gao <Likun.Gao@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 02d9e4b..948b62b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -89,6 +89,7 @@ static const char *amdgpu_asic_name[] = {
+ "VEGA12",
+ "VEGA20",
+ "RAVEN",
++ "PICASSO",
+ "LAST",
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5263-drm-amdgpu-add-soc15-support-for-picasso.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5263-drm-amdgpu-add-soc15-support-for-picasso.patch
new file mode 100644
index 00000000..d357edc4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5263-drm-amdgpu-add-soc15-support-for-picasso.patch
@@ -0,0 +1,97 @@
+From 75c11c5019e434a49310bd1a62ed6d9280628687 Mon Sep 17 00:00:00 2001
+From: Likun Gao <Likun.Gao@amd.com>
+Date: Tue, 10 Jul 2018 20:22:36 +0800
+Subject: [PATCH 5263/5725] drm/amdgpu: add soc15 support for picasso
+
+Add the IP blocks, clock and powergating flags, and common clockgating support.
+
+Signed-off-by: Likun Gao <Likun.Gao@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 7 ++++++-
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 21 +++++++++++++++++++++
+ 2 files changed, 27 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 948b62b..44adc15 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -62,6 +62,7 @@
+ MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
+ MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
+ MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
++MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
+
+ #define AMDGPU_RESUME_MS 2000
+
+@@ -1385,6 +1386,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
+ case CHIP_RAVEN:
+ chip_name = "raven";
+ break;
++ case CHIP_PICASSO:
++ chip_name = "picasso";
++ break;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
+@@ -1510,7 +1514,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+- if (adev->asic_type == CHIP_RAVEN)
++ case CHIP_PICASSO:
++ if ((adev->asic_type == CHIP_RAVEN) || (adev->asic_type == CHIP_PICASSO))
+ adev->family = AMDGPU_FAMILY_RV;
+ else
+ adev->family = AMDGPU_FAMILY_AI;
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index fc0cb7d3..d3b73af 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -486,6 +486,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_RAVEN:
++ case CHIP_PICASSO:
+ vega10_reg_base_init(adev);
+ break;
+ case CHIP_VEGA20:
+@@ -724,6 +725,25 @@ static int soc15_common_early_init(void *handle)
+
+ adev->external_rev_id = 0x1;
+ break;
++ case CHIP_PICASSO:
++ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGLS |
++ AMD_CG_SUPPORT_GFX_CP_LS |
++ AMD_CG_SUPPORT_GFX_3D_CGCG |
++ AMD_CG_SUPPORT_GFX_3D_CGLS |
++ AMD_CG_SUPPORT_GFX_CGCG |
++ AMD_CG_SUPPORT_GFX_CGLS |
++ AMD_CG_SUPPORT_BIF_LS |
++ AMD_CG_SUPPORT_HDP_LS |
++ AMD_CG_SUPPORT_ROM_MGCG |
++ AMD_CG_SUPPORT_MC_MGCG |
++ AMD_CG_SUPPORT_MC_LS |
++ AMD_CG_SUPPORT_SDMA_MGCG |
++ AMD_CG_SUPPORT_SDMA_LS;
++
++ adev->pg_flags = 0;
++
++ adev->external_rev_id = adev->rev_id + 0x41;
++ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+@@ -924,6 +944,7 @@ static int soc15_common_set_clockgating_state(void *handle,
+ state == AMD_CG_STATE_GATE ? true : false);
+ break;
+ case CHIP_RAVEN:
++ case CHIP_PICASSO:
+ adev->nbio_funcs->update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
+ adev->nbio_funcs->update_medium_grain_light_sleep(adev,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5264-drm-amdgpu-add-picasso-ucode-loading-method.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5264-drm-amdgpu-add-picasso-ucode-loading-method.patch
new file mode 100644
index 00000000..17a876a9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5264-drm-amdgpu-add-picasso-ucode-loading-method.patch
@@ -0,0 +1,30 @@
+From 2b5b7c26917416736507f638fb299e7fa326bd8a Mon Sep 17 00:00:00 2001
+From: Likun Gao <Likun.Gao@amd.com>
+Date: Tue, 10 Jul 2018 20:15:45 +0800
+Subject: [PATCH 5264/5725] drm/amdgpu: add picasso ucode loading method
+
+Same as raven.
+
+Signed-off-by: Likun Gao <Likun.Gao@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+index 8777dad..fcf260d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+@@ -303,6 +303,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
+ return AMDGPU_FW_LOAD_SMU;
+ case CHIP_VEGA10:
+ case CHIP_RAVEN:
++ case CHIP_PICASSO:
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ if (!load_type)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5265-drm-amdgpu-add-picasso-support-for-vcn.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5265-drm-amdgpu-add-picasso-support-for-vcn.patch
new file mode 100644
index 00000000..4a09f2f2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5265-drm-amdgpu-add-picasso-support-for-vcn.patch
@@ -0,0 +1,43 @@
+From b27b14207abd8f6776a76e10891c3bb75be0bda2 Mon Sep 17 00:00:00 2001
+From: Likun Gao <Likun.Gao@amd.com>
+Date: Tue, 10 Jul 2018 20:17:13 +0800
+Subject: [PATCH 5265/5725] drm/amdgpu: add picasso support for vcn
+
+Add vcn support for picasso.
+
+Signed-off-by: Likun Gao <Likun.Gao@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 461f7dc..0102e7e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -42,8 +42,10 @@
+
+ /* Firmware Names */
+ #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
++#define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
+
+ MODULE_FIRMWARE(FIRMWARE_RAVEN);
++MODULE_FIRMWARE(FIRMWARE_PICASSO);
+
+ static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
+
+@@ -61,6 +63,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+ case CHIP_RAVEN:
+ fw_name = FIRMWARE_RAVEN;
+ break;
++ case CHIP_PICASSO:
++ fw_name = FIRMWARE_PICASSO;
++ break;
+ default:
+ return -EINVAL;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5266-drm-amdgpu-add-clockgating-support-for-picasso.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5266-drm-amdgpu-add-clockgating-support-for-picasso.patch
new file mode 100644
index 00000000..8031e760
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5266-drm-amdgpu-add-clockgating-support-for-picasso.patch
@@ -0,0 +1,73 @@
+From a83a269a4d973a5b2c22a8f0e04e0a91d56d818c Mon Sep 17 00:00:00 2001
+From: Likun Gao <Likun.Gao@amd.com>
+Date: Tue, 10 Jul 2018 20:25:24 +0800
+Subject: [PATCH 5266/5725] drm/amdgpu: add clockgating support for picasso
+
+Treat it the same as raven for now.
+
+Signed-off-by: Likun Gao <Likun.Gao@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+index 5f6a9c8..8875e10 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+@@ -614,7 +614,7 @@ static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
+
+ def = data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
+
+- if (adev->asic_type != CHIP_RAVEN) {
++ if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO) {
+ def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
+ def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2);
+ } else
+@@ -630,7 +630,7 @@ static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
+ DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
+
+- if (adev->asic_type != CHIP_RAVEN)
++ if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO)
+ data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
+@@ -647,7 +647,7 @@ static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
+ DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
+
+- if (adev->asic_type != CHIP_RAVEN)
++ if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO)
+ data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
+@@ -660,13 +660,13 @@ static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
+ WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
+
+ if (def1 != data1) {
+- if (adev->asic_type != CHIP_RAVEN)
++ if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO)
+ WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1);
+ else
+ WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV, data1);
+ }
+
+- if (adev->asic_type != CHIP_RAVEN && def2 != data2)
++ if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO && def2 != data2)
+ WREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2, data2);
+ }
+
+@@ -730,6 +730,7 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ case CHIP_RAVEN:
++ case CHIP_PICASSO:
+ mmhub_v1_0_update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
+ athub_update_medium_grain_clock_gating(adev,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5267-drm-amdgpu-add-picasso-support-for-gmc.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5267-drm-amdgpu-add-picasso-support-for-gmc.patch
new file mode 100644
index 00000000..01d73f0c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5267-drm-amdgpu-add-picasso-support-for-gmc.patch
@@ -0,0 +1,54 @@
+From 526c60c33960d28d47249723aedbab69733d1ade Mon Sep 17 00:00:00 2001
+From: Likun Gao <Likun.Gao@amd.com>
+Date: Tue, 10 Jul 2018 20:26:41 +0800
+Subject: [PATCH 5267/5725] drm/amdgpu: add picasso support for gmc
+
+Same as raven.
+
+Signed-off-by: Likun Gao <Likun.Gao@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 7e9d1c6..f4a9ca2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -837,6 +837,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
+ adev->gmc.gart_size = 512ULL << 20;
+ break;
+ case CHIP_RAVEN: /* DCE SG support */
++ case CHIP_PICASSO: /* DCE SG support */
+ adev->gmc.gart_size = 1024ULL << 20;
+ break;
+ }
+@@ -925,6 +926,7 @@ static int gmc_v9_0_sw_init(void *handle)
+ adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
++ case CHIP_PICASSO:
+ if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
+ amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
+ } else {
+@@ -1056,6 +1058,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
+ case CHIP_VEGA12:
+ break;
+ case CHIP_RAVEN:
++ case CHIP_PICASSO:
+ soc15_program_register_sequence(adev,
+ golden_settings_athub_1_0_0,
+ ARRAY_SIZE(golden_settings_athub_1_0_0));
+@@ -1090,6 +1093,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
++ case CHIP_PICASSO:
+ mmhub_v1_0_initialize_power_gating(adev);
+ mmhub_v1_0_update_power_gating(adev, true);
+ break;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5268-drm-amdgpu-add-picasso-support-for-gfx_v9_0.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5268-drm-amdgpu-add-picasso-support-for-gfx_v9_0.patch
new file mode 100644
index 00000000..854290d3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5268-drm-amdgpu-add-picasso-support-for-gfx_v9_0.patch
@@ -0,0 +1,136 @@
+From c214b2d9da9f6ce3b4e9c25cc3f1dfc8422ea628 Mon Sep 17 00:00:00 2001
+From: Likun Gao <Likun.Gao@amd.com>
+Date: Tue, 10 Jul 2018 20:29:12 +0800
+Subject: [PATCH 5268/5725] drm/amdgpu: add picasso support for gfx_v9_0
+
+Add gfx support to picasso
+
+Signed-off-by: Likun Gao <Likun.Gao@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 30 +++++++++++++++++++++++++++---
+ 1 file changed, 27 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index fdb0ad4..1cc6b87 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -80,6 +80,13 @@ MODULE_FIRMWARE("amdgpu/raven_mec.bin");
+ MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
+ MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
+
++MODULE_FIRMWARE("amdgpu/picasso_ce.bin");
++MODULE_FIRMWARE("amdgpu/picasso_pfp.bin");
++MODULE_FIRMWARE("amdgpu/picasso_me.bin");
++MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
++MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
++MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
++
+ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
+ {
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
+@@ -240,6 +247,7 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
+ #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
+ #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
+ #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
++#define PICASSO_GB_ADDR_CONFIG_GOLDEN 0x24000042
+
+ static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
+ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
+@@ -279,6 +287,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
+ ARRAY_SIZE(golden_settings_gc_9_0_vg20));
+ break;
+ case CHIP_RAVEN:
++ case CHIP_PICASSO:
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_9_1,
+ ARRAY_SIZE(golden_settings_gc_9_1));
+@@ -566,6 +575,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
+ case CHIP_RAVEN:
+ chip_name = "raven";
+ break;
++ case CHIP_PICASSO:
++ chip_name = "picasso";
++ break;
+ default:
+ BUG();
+ }
+@@ -1019,7 +1031,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+ }
+
+- if (adev->asic_type == CHIP_RAVEN) {
++ if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_PICASSO) {
+ /* TODO: double check the cp_table_size for RV */
+ adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
+ r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
+@@ -1269,6 +1281,14 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
+ gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
+ break;
++ case CHIP_PICASSO:
++ adev->gfx.config.max_hw_contexts = 8;
++ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
++ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
++ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
++ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
++ gb_addr_config = PICASSO_GB_ADDR_CONFIG_GOLDEN;
++ break;
+ default:
+ BUG();
+ break;
+@@ -1547,6 +1567,7 @@ static int gfx_v9_0_sw_init(void *handle)
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ case CHIP_RAVEN:
++ case CHIP_PICASSO:
+ adev->gfx.mec.num_mec = 2;
+ break;
+ default:
+@@ -1708,7 +1729,7 @@ static int gfx_v9_0_sw_fini(void *handle)
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+- if (adev->asic_type == CHIP_RAVEN) {
++ if ((adev->asic_type == CHIP_RAVEN) || (adev->asic_type == CHIP_PICASSO)) {
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+@@ -2378,7 +2399,7 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
+ return r;
+ }
+
+- if (adev->asic_type == CHIP_RAVEN) {
++ if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_PICASSO) {
+ if (amdgpu_lbpw != 0)
+ gfx_v9_0_enable_lbpw(adev, true);
+ else
+@@ -3782,6 +3803,7 @@ static int gfx_v9_0_set_powergating_state(void *handle,
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
++ case CHIP_PICASSO:
+ if (!enable) {
+ amdgpu_gfx_off_ctrl(adev, false);
+ cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
+@@ -3836,6 +3858,7 @@ static int gfx_v9_0_set_clockgating_state(void *handle,
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ case CHIP_RAVEN:
++ case CHIP_PICASSO:
+ gfx_v9_0_update_gfx_clock_gating(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
+ break;
+@@ -4855,6 +4878,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ case CHIP_RAVEN:
++ case CHIP_PICASSO:
+ adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
+ break;
+ default:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5269-drm-amdgpu-add-picasso-support-for-sdma_v4.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5269-drm-amdgpu-add-picasso-support-for-sdma_v4.patch
new file mode 100644
index 00000000..f3a36c6d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5269-drm-amdgpu-add-picasso-support-for-sdma_v4.patch
@@ -0,0 +1,81 @@
+From dca2cb2f350acaba44b01ec7474e7fe8a7d12309 Mon Sep 17 00:00:00 2001
+From: Likun Gao <Likun.Gao@amd.com>
+Date: Tue, 10 Jul 2018 20:30:42 +0800
+Subject: [PATCH 5269/5725] drm/amdgpu: add picasso support for sdma_v4
+
+Add sdma support to picasso
+
+Signed-off-by: Likun Gao <Likun.Gao@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index 3b131a9..f9bfc71 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -48,6 +48,7 @@ MODULE_FIRMWARE("amdgpu/vega12_sdma1.bin");
+ MODULE_FIRMWARE("amdgpu/vega20_sdma.bin");
+ MODULE_FIRMWARE("amdgpu/vega20_sdma1.bin");
+ MODULE_FIRMWARE("amdgpu/raven_sdma.bin");
++MODULE_FIRMWARE("amdgpu/picasso_sdma.bin");
+
+ #define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
+ #define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
+@@ -221,6 +222,7 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
+ ARRAY_SIZE(golden_settings_sdma1_4_2));
+ break;
+ case CHIP_RAVEN:
++ case CHIP_PICASSO:
+ soc15_program_register_sequence(adev,
+ golden_settings_sdma_4_1,
+ ARRAY_SIZE(golden_settings_sdma_4_1));
+@@ -269,6 +271,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
+ case CHIP_RAVEN:
+ chip_name = "raven";
+ break;
++ case CHIP_PICASSO:
++ chip_name = "picasso";
++ break;
+ default:
+ BUG();
+ }
+@@ -839,6 +844,7 @@ static void sdma_v4_0_init_pg(struct amdgpu_device *adev)
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
++ case CHIP_PICASSO:
+ sdma_v4_1_init_power_gating(adev);
+ sdma_v4_1_update_power_gating(adev, true);
+ break;
+@@ -1260,7 +1266,7 @@ static int sdma_v4_0_early_init(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- if (adev->asic_type == CHIP_RAVEN)
++ if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_PICASSO)
+ adev->sdma.num_instances = 1;
+ else
+ adev->sdma.num_instances = 2;
+@@ -1609,6 +1615,7 @@ static int sdma_v4_0_set_clockgating_state(void *handle,
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ case CHIP_RAVEN:
++ case CHIP_PICASSO:
+ sdma_v4_0_update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
+ sdma_v4_0_update_medium_grain_light_sleep(adev,
+@@ -1627,6 +1634,7 @@ static int sdma_v4_0_set_powergating_state(void *handle,
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
++ case CHIP_PICASSO:
+ sdma_v4_1_update_power_gating(adev,
+ state == AMD_PG_STATE_GATE ? true : false);
+ break;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5270-drm-amdgpu-add-picasso-for-amdgpu-kms.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5270-drm-amdgpu-add-picasso-for-amdgpu-kms.patch
new file mode 100644
index 00000000..ac7c9b85
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5270-drm-amdgpu-add-picasso-for-amdgpu-kms.patch
@@ -0,0 +1,31 @@
+From 143700634db9ef52e8a9db7de1bd9950e55fcc3e Mon Sep 17 00:00:00 2001
+From: Likun Gao <Likun.Gao@amd.com>
+Date: Tue, 10 Jul 2018 20:34:10 +0800
+Subject: [PATCH 5270/5725] drm/amdgpu: add picasso for amdgpu kms
+
+Add picasso for amdgpu kms
+
+Signed-off-by: Likun Gao <Likun.Gao@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index 1c45f1b..7585182 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -994,7 +994,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
+
+ pm_runtime_get_sync(dev->dev);
+
+- if (adev->asic_type != CHIP_RAVEN) {
++ if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO) {
+ amdgpu_uvd_free_handles(adev, file_priv);
+ amdgpu_vce_free_handles(adev, file_priv);
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5271-drm-amdgpu-Add-pg-support-for-gfxoff-for-PCO.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5271-drm-amdgpu-Add-pg-support-for-gfxoff-for-PCO.patch
new file mode 100644
index 00000000..58c71531
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5271-drm-amdgpu-Add-pg-support-for-gfxoff-for-PCO.patch
@@ -0,0 +1,33 @@
+From 32fa7d8a7ea0b540df7f1a6af8610684138a5455 Mon Sep 17 00:00:00 2001
+From: Kenneth Feng <kenneth.feng@amd.com>
+Date: Tue, 7 Aug 2018 17:05:22 +0800
+Subject: [PATCH 5271/5725] drm/amdgpu: Add pg support for gfxoff for PCO
+
+Add pg support for gfxoff.
+
+Signed-off-by: Kenneth Feng <kenneth.feng@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index d3b73af..2539fa7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -740,7 +740,10 @@ static int soc15_common_early_init(void *handle)
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS;
+
+- adev->pg_flags = 0;
++ if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
++ adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
++ AMD_PG_SUPPORT_CP |
++ AMD_PG_SUPPORT_RLC_SMU_HS;
+
+ adev->external_rev_id = adev->rev_id + 0x41;
+ break;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5272-drm-amdgpu-Enable-SDMA-power-gating-for-PCO.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5272-drm-amdgpu-Enable-SDMA-power-gating-for-PCO.patch
new file mode 100644
index 00000000..d8a54d2f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5272-drm-amdgpu-Enable-SDMA-power-gating-for-PCO.patch
@@ -0,0 +1,30 @@
+From 09e6d24225d3db318103b22277fd65ec503d4688 Mon Sep 17 00:00:00 2001
+From: Kenneth Feng <kenneth.feng@amd.com>
+Date: Fri, 10 Aug 2018 16:22:26 +0800
+Subject: [PATCH 5272/5725] drm/amdgpu: Enable SDMA power gating for PCO
+
+Enable SDMA power gating
+
+Signed-off-by: Kenneth Feng <kenneth.feng@amd.com>
+Acked-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 2539fa7..b205a4a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -740,6 +740,8 @@ static int soc15_common_early_init(void *handle)
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS;
+
++ adev->pg_flags = AMD_PG_SUPPORT_SDMA;
++
+ if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
+ adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_CP |
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5273-drm-amdgpu-enable-mmhub-power-gating.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5273-drm-amdgpu-enable-mmhub-power-gating.patch
new file mode 100644
index 00000000..620e89d9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5273-drm-amdgpu-enable-mmhub-power-gating.patch
@@ -0,0 +1,303 @@
+From a1e83257a02656dbc39271a93e4d0d46089a1e4f Mon Sep 17 00:00:00 2001
+From: Kenneth Feng <kenneth.feng@amd.com>
+Date: Mon, 20 Aug 2018 15:39:32 +0800
+Subject: [PATCH 5273/5725] drm/amdgpu: enable mmhub power gating
+
+Remove some functions due to the design change.
+All the mmhub power gating sequence is moved to
+smu fw.Driver sends the message to enable mmhub
+powergating.We can also skip the fw version check
+since the old fw version is in a very early stage
+and we don't use that fw for release.
+
+Signed-off-by: Kenneth Feng <kenneth.feng@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 1 -
+ drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 220 --------------------------------
+ drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h | 1 -
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 2 +-
+ 4 files changed, 1 insertion(+), 223 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index f4a9ca2..0cc4fcf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -1094,7 +1094,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+ case CHIP_PICASSO:
+- mmhub_v1_0_initialize_power_gating(adev);
+ mmhub_v1_0_update_power_gating(adev, true);
+ break;
+ default:
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+index 8875e10..59b67fa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+@@ -254,236 +254,16 @@ static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev)
+ }
+ }
+
+-struct pctl_data {
+- uint32_t index;
+- uint32_t data;
+-};
+-
+-static const struct pctl_data pctl0_data[] = {
+- {0x0, 0x7a640},
+- {0x9, 0x2a64a},
+- {0xd, 0x2a680},
+- {0x11, 0x6a684},
+- {0x19, 0xea68e},
+- {0x29, 0xa69e},
+- {0x2b, 0x0010a6c0},
+- {0x3d, 0x83a707},
+- {0xc2, 0x8a7a4},
+- {0xcc, 0x1a7b8},
+- {0xcf, 0xfa7cc},
+- {0xe0, 0x17a7dd},
+- {0xf9, 0xa7dc},
+- {0xfb, 0x12a7f5},
+- {0x10f, 0xa808},
+- {0x111, 0x12a810},
+- {0x125, 0x7a82c}
+-};
+-#define PCTL0_DATA_LEN (ARRAY_SIZE(pctl0_data))
+-
+-#define PCTL0_RENG_EXEC_END_PTR 0x12d
+-#define PCTL0_STCTRL_REG_SAVE_RANGE0_BASE 0xa640
+-#define PCTL0_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa833
+-
+-static const struct pctl_data pctl1_data[] = {
+- {0x0, 0x39a000},
+- {0x3b, 0x44a040},
+- {0x81, 0x2a08d},
+- {0x85, 0x6ba094},
+- {0xf2, 0x18a100},
+- {0x10c, 0x4a132},
+- {0x112, 0xca141},
+- {0x120, 0x2fa158},
+- {0x151, 0x17a1d0},
+- {0x16a, 0x1a1e9},
+- {0x16d, 0x13a1ec},
+- {0x182, 0x7a201},
+- {0x18b, 0x3a20a},
+- {0x190, 0x7a580},
+- {0x199, 0xa590},
+- {0x19b, 0x4a594},
+- {0x1a1, 0x1a59c},
+- {0x1a4, 0x7a82c},
+- {0x1ad, 0xfa7cc},
+- {0x1be, 0x17a7dd},
+- {0x1d7, 0x12a810},
+- {0x1eb, 0x4000a7e1},
+- {0x1ec, 0x5000a7f5},
+- {0x1ed, 0x4000a7e2},
+- {0x1ee, 0x5000a7dc},
+- {0x1ef, 0x4000a7e3},
+- {0x1f0, 0x5000a7f6},
+- {0x1f1, 0x5000a7e4}
+-};
+-#define PCTL1_DATA_LEN (ARRAY_SIZE(pctl1_data))
+-
+-#define PCTL1_RENG_EXEC_END_PTR 0x1f1
+-#define PCTL1_STCTRL_REG_SAVE_RANGE0_BASE 0xa000
+-#define PCTL1_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa20d
+-#define PCTL1_STCTRL_REG_SAVE_RANGE1_BASE 0xa580
+-#define PCTL1_STCTRL_REG_SAVE_RANGE1_LIMIT 0xa59d
+-#define PCTL1_STCTRL_REG_SAVE_RANGE2_BASE 0xa82c
+-#define PCTL1_STCTRL_REG_SAVE_RANGE2_LIMIT 0xa833
+-
+-static void mmhub_v1_0_power_gating_write_save_ranges(struct amdgpu_device *adev)
+-{
+- uint32_t tmp = 0;
+-
+- /* PCTL0_STCTRL_REGISTER_SAVE_RANGE0 */
+- tmp = REG_SET_FIELD(tmp, PCTL0_STCTRL_REGISTER_SAVE_RANGE0,
+- STCTRL_REGISTER_SAVE_BASE,
+- PCTL0_STCTRL_REG_SAVE_RANGE0_BASE);
+- tmp = REG_SET_FIELD(tmp, PCTL0_STCTRL_REGISTER_SAVE_RANGE0,
+- STCTRL_REGISTER_SAVE_LIMIT,
+- PCTL0_STCTRL_REG_SAVE_RANGE0_LIMIT);
+- WREG32_SOC15(MMHUB, 0, mmPCTL0_STCTRL_REGISTER_SAVE_RANGE0, tmp);
+-
+- /* PCTL1_STCTRL_REGISTER_SAVE_RANGE0 */
+- tmp = 0;
+- tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE0,
+- STCTRL_REGISTER_SAVE_BASE,
+- PCTL1_STCTRL_REG_SAVE_RANGE0_BASE);
+- tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE0,
+- STCTRL_REGISTER_SAVE_LIMIT,
+- PCTL1_STCTRL_REG_SAVE_RANGE0_LIMIT);
+- WREG32_SOC15(MMHUB, 0, mmPCTL1_STCTRL_REGISTER_SAVE_RANGE0, tmp);
+-
+- /* PCTL1_STCTRL_REGISTER_SAVE_RANGE1 */
+- tmp = 0;
+- tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE1,
+- STCTRL_REGISTER_SAVE_BASE,
+- PCTL1_STCTRL_REG_SAVE_RANGE1_BASE);
+- tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE1,
+- STCTRL_REGISTER_SAVE_LIMIT,
+- PCTL1_STCTRL_REG_SAVE_RANGE1_LIMIT);
+- WREG32_SOC15(MMHUB, 0, mmPCTL1_STCTRL_REGISTER_SAVE_RANGE1, tmp);
+-
+- /* PCTL1_STCTRL_REGISTER_SAVE_RANGE2 */
+- tmp = 0;
+- tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE2,
+- STCTRL_REGISTER_SAVE_BASE,
+- PCTL1_STCTRL_REG_SAVE_RANGE2_BASE);
+- tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE2,
+- STCTRL_REGISTER_SAVE_LIMIT,
+- PCTL1_STCTRL_REG_SAVE_RANGE2_LIMIT);
+- WREG32_SOC15(MMHUB, 0, mmPCTL1_STCTRL_REGISTER_SAVE_RANGE2, tmp);
+-}
+-
+-void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev)
+-{
+- uint32_t pctl0_misc = 0;
+- uint32_t pctl0_reng_execute = 0;
+- uint32_t pctl1_misc = 0;
+- uint32_t pctl1_reng_execute = 0;
+- int i = 0;
+-
+- if (amdgpu_sriov_vf(adev))
+- return;
+-
+- /****************** pctl0 **********************/
+- pctl0_misc = RREG32_SOC15(MMHUB, 0, mmPCTL0_MISC);
+- pctl0_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE);
+-
+- /* Light sleep must be disabled before writing to pctl0 registers */
+- pctl0_misc &= ~PCTL0_MISC__RENG_MEM_LS_ENABLE_MASK;
+- WREG32_SOC15(MMHUB, 0, mmPCTL0_MISC, pctl0_misc);
+-
+- /* Write data used to access ram of register engine */
+- for (i = 0; i < PCTL0_DATA_LEN; i++) {
+- WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_RAM_INDEX,
+- pctl0_data[i].index);
+- WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_RAM_DATA,
+- pctl0_data[i].data);
+- }
+-
+- /* Re-enable light sleep */
+- pctl0_misc |= PCTL0_MISC__RENG_MEM_LS_ENABLE_MASK;
+- WREG32_SOC15(MMHUB, 0, mmPCTL0_MISC, pctl0_misc);
+-
+- /****************** pctl1 **********************/
+- pctl1_misc = RREG32_SOC15(MMHUB, 0, mmPCTL1_MISC);
+- pctl1_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE);
+-
+- /* Light sleep must be disabled before writing to pctl1 registers */
+- pctl1_misc &= ~PCTL1_MISC__RENG_MEM_LS_ENABLE_MASK;
+- WREG32_SOC15(MMHUB, 0, mmPCTL1_MISC, pctl1_misc);
+-
+- /* Write data used to access ram of register engine */
+- for (i = 0; i < PCTL1_DATA_LEN; i++) {
+- WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_RAM_INDEX,
+- pctl1_data[i].index);
+- WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_RAM_DATA,
+- pctl1_data[i].data);
+- }
+-
+- /* Re-enable light sleep */
+- pctl1_misc |= PCTL1_MISC__RENG_MEM_LS_ENABLE_MASK;
+- WREG32_SOC15(MMHUB, 0, mmPCTL1_MISC, pctl1_misc);
+-
+- mmhub_v1_0_power_gating_write_save_ranges(adev);
+-
+- /* Set the reng execute end ptr for pctl0 */
+- pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
+- PCTL0_RENG_EXECUTE,
+- RENG_EXECUTE_END_PTR,
+- PCTL0_RENG_EXEC_END_PTR);
+- WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE, pctl0_reng_execute);
+-
+- /* Set the reng execute end ptr for pctl1 */
+- pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute,
+- PCTL1_RENG_EXECUTE,
+- RENG_EXECUTE_END_PTR,
+- PCTL1_RENG_EXEC_END_PTR);
+- WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute);
+-}
+-
+ void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
+ bool enable)
+ {
+- uint32_t pctl0_reng_execute = 0;
+- uint32_t pctl1_reng_execute = 0;
+-
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+- pctl0_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE);
+- pctl1_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE);
+-
+ if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) {
+- pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
+- PCTL0_RENG_EXECUTE,
+- RENG_EXECUTE_ON_PWR_UP, 1);
+- pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
+- PCTL0_RENG_EXECUTE,
+- RENG_EXECUTE_ON_REG_UPDATE, 1);
+- WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE, pctl0_reng_execute);
+-
+- pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute,
+- PCTL1_RENG_EXECUTE,
+- RENG_EXECUTE_ON_PWR_UP, 1);
+- pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute,
+- PCTL1_RENG_EXECUTE,
+- RENG_EXECUTE_ON_REG_UPDATE, 1);
+- WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute);
+-
+ if (adev->powerplay.pp_funcs->set_powergating_by_smu)
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
+
+- } else {
+- pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
+- PCTL0_RENG_EXECUTE,
+- RENG_EXECUTE_ON_PWR_UP, 0);
+- pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
+- PCTL0_RENG_EXECUTE,
+- RENG_EXECUTE_ON_REG_UPDATE, 0);
+- WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE, pctl0_reng_execute);
+-
+- pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute,
+- PCTL1_RENG_EXECUTE,
+- RENG_EXECUTE_ON_PWR_UP, 0);
+- pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute,
+- PCTL1_RENG_EXECUTE,
+- RENG_EXECUTE_ON_REG_UPDATE, 0);
+- WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute);
+ }
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
+index 5d38229..bef3d0c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
+@@ -32,7 +32,6 @@ void mmhub_v1_0_init(struct amdgpu_device *adev);
+ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state);
+ void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
+-void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev);
+ void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
+ bool enable);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index b205a4a..53159f1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -740,7 +740,7 @@ static int soc15_common_early_init(void *handle)
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS;
+
+- adev->pg_flags = AMD_PG_SUPPORT_SDMA;
++ adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_MMHUB;
+
+ if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
+ adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5274-drm-amdgpu-enable-vcn-powergating-for-PCO.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5274-drm-amdgpu-enable-vcn-powergating-for-PCO.patch
new file mode 100644
index 00000000..dceb9d2e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5274-drm-amdgpu-enable-vcn-powergating-for-PCO.patch
@@ -0,0 +1,31 @@
+From 5e24e31f25e114183fb51b3de7d13c9224bb73a4 Mon Sep 17 00:00:00 2001
+From: Kenneth Feng <kenneth.feng@amd.com>
+Date: Fri, 24 Aug 2018 16:44:11 +0800
+Subject: [PATCH 5274/5725] drm/amdgpu: enable vcn powergating for PCO
+
+enable vcn pg
+
+Signed-off-by: Kenneth Feng <kenneth.feng@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 53159f1..7837f0c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -740,7 +740,9 @@ static int soc15_common_early_init(void *handle)
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS;
+
+- adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_MMHUB;
++ adev->pg_flags = AMD_PG_SUPPORT_SDMA |
++ AMD_PG_SUPPORT_MMHUB |
++ AMD_PG_SUPPORT_VCN;
+
+ if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
+ adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5275-drm-amdgpu-add-ip-blocks-for-picasso-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5275-drm-amdgpu-add-ip-blocks-for-picasso-v2.patch
new file mode 100644
index 00000000..10031352
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5275-drm-amdgpu-add-ip-blocks-for-picasso-v2.patch
@@ -0,0 +1,49 @@
+From d9567f5a260039583642483c40a89455216f0883 Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Mon, 9 Jul 2018 20:00:05 +0800
+Subject: [PATCH 5275/5725] drm/amdgpu: add ip blocks for picasso (v2)
+
+Add PCO IPs.
+
+V2: enable VCN as well
+
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Likun Gao <Likun.Gao@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 7837f0c..e338ad6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -556,6 +556,24 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
+ break;
++ case CHIP_PICASSO:
++ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
++ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
++ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
++ amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
++ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
++ if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
++ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
++#if defined(CONFIG_DRM_AMD_DC)
++ else if (amdgpu_device_has_dc_support(adev))
++ amdgpu_device_ip_block_add(adev, &dm_ip_block);
++#else
++# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
++#endif
++ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
++ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
++ amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
++ break;
+ default:
+ return -EINVAL;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5276-drm-amdgpu-add-new-raven-series-device.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5276-drm-amdgpu-add-new-raven-series-device.patch
new file mode 100644
index 00000000..db185c41
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5276-drm-amdgpu-add-new-raven-series-device.patch
@@ -0,0 +1,31 @@
+From 4e553597a217e37b9dff01f45d95b3281737d666 Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Mon, 9 Jul 2018 19:51:19 +0800
+Subject: [PATCH 5276/5725] drm/amdgpu: add new raven series device
+
+This patch is to add new pci device for raven series.
+
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Likun Gao <Likun.Gao@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 887cbd9..a434b2c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -795,6 +795,8 @@ static const struct pci_device_id pciidlist[] = {
+ {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+ /* Raven */
+ {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
++ /* Picasso */
++ {0x1002, 0x15d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PICASSO|AMD_IS_APU},
+
+ {0, 0, 0}
+ };
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5277-drm-amdgpu-enable-gfxoff-in-non-sriov-and-stutter-mo.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5277-drm-amdgpu-enable-gfxoff-in-non-sriov-and-stutter-mo.patch
new file mode 100644
index 00000000..faa44321
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5277-drm-amdgpu-enable-gfxoff-in-non-sriov-and-stutter-mo.patch
@@ -0,0 +1,47 @@
+From 06f34b75563870d1fc7c07eeda212516b6a85517 Mon Sep 17 00:00:00 2001
+From: Kenneth Feng <kenneth.feng@amd.com>
+Date: Thu, 6 Sep 2018 14:56:19 +0800
+Subject: [PATCH 5277/5725] drm/amdgpu:enable gfxoff in non-sriov and stutter
+ mode by default
+
+enable gfxoff in non-sriov and stutter mode by default
+
+Change-Id: I91a5db7cabbfa66d97bfb522a671a12695b51b22
+Signed-off-by: Kenneth Feng <kenneth.feng@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 ++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 ++--
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 44adc15..9fb469d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1542,6 +1542,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
+ }
+
+ adev->powerplay.pp_feature = amdgpu_pp_feature_mask;
++ if (amdgpu_sriov_vf(adev))
++ adev->powerplay.pp_feature &= ~PP_GFXOFF_MASK;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index a434b2c..22f4538 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -120,8 +120,8 @@ uint amdgpu_pg_mask = 0xffffffff;
+ uint amdgpu_sdma_phase_quantum = 32;
+ char *amdgpu_disable_cu = NULL;
+ char *amdgpu_virtual_display = NULL;
+-/* OverDrive(bit 14),gfxoff(bit 15),stutter mode(bit 17) disabled by default*/
+-uint amdgpu_pp_feature_mask = 0xfffd3fff;
++/* OverDrive(bit 14) disabled by default*/
++uint amdgpu_pp_feature_mask = 0xffffbfff;
+ int amdgpu_ngg = 0;
+ int amdgpu_prim_buf_per_se = 0;
+ int amdgpu_pos_buf_per_se = 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5278-drm-amdgpu-use-IP-presence-to-free-uvd-and-vce-handl.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5278-drm-amdgpu-use-IP-presence-to-free-uvd-and-vce-handl.patch
new file mode 100644
index 00000000..e35f3cf2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5278-drm-amdgpu-use-IP-presence-to-free-uvd-and-vce-handl.patch
@@ -0,0 +1,35 @@
+From 912f6eba02d178369b7a993d0cb7e4251008ce8b Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed, 12 Sep 2018 13:51:25 -0500
+Subject: [PATCH 5278/5725] drm/amdgpu: use IP presence to free uvd and vce
+ handles
+
+Rather than checking the asic type, check whether the UVD
+or VCE IP blocks exist. This way we don't have to update
+the check with new asics that use VCN.
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index 7585182..a47f456 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -994,10 +994,10 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
+
+ pm_runtime_get_sync(dev->dev);
+
+- if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO) {
++ if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL)
+ amdgpu_uvd_free_handles(adev, file_priv);
++ if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
+ amdgpu_vce_free_handles(adev, file_priv);
+- }
+
+ amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5279-drm-amdgpu-move-get_rev_id-at-first-before-load-gpu_.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5279-drm-amdgpu-move-get_rev_id-at-first-before-load-gpu_.patch
new file mode 100644
index 00000000..897e2228
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5279-drm-amdgpu-move-get_rev_id-at-first-before-load-gpu_.patch
@@ -0,0 +1,69 @@
+From 163e3edbfeb15c5e67637089e651c325316ddfb6 Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Fri, 15 Jun 2018 16:05:48 -0500
+Subject: [PATCH 5279/5725] drm/amdgpu:move get_rev_id at first before load
+ gpu_info firmware
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Rev id is used for identifying Raven2 series of chips. So we would better to
+initialize it at first.
+
+Change-Id: Id39580fd0ed559bf83640ef64a686064d9a613b0
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index e338ad6..794cfe4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -479,6 +479,11 @@ static const struct amdgpu_ip_block_version vega10_common_ip_block =
+ .funcs = &soc15_common_ip_funcs,
+ };
+
++static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
++{
++ return adev->nbio_funcs->get_rev_id(adev);
++}
++
+ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+ {
+ /* Set IP register base before any HW register access */
+@@ -507,6 +512,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+ adev->df_funcs = &df_v3_6_funcs;
+ else
+ adev->df_funcs = &df_v1_7_funcs;
++
++ adev->rev_id = soc15_get_rev_id(adev);
+ adev->nbio_funcs->detect_hw_virt(adev);
+
+ if (amdgpu_sriov_vf(adev))
+@@ -581,11 +588,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+ return 0;
+ }
+
+-static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
+-{
+- return adev->nbio_funcs->get_rev_id(adev);
+-}
+-
+ static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+ {
+ adev->nbio_funcs->hdp_flush(adev, ring);
+@@ -642,7 +644,6 @@ static int soc15_common_early_init(void *handle)
+
+ adev->asic_funcs = &soc15_asic_funcs;
+
+- adev->rev_id = soc15_get_rev_id(adev);
+ adev->external_rev_id = 0xFF;
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5280-drm-amdgpu-set-external-rev-id-for-raven2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5280-drm-amdgpu-set-external-rev-id-for-raven2.patch
new file mode 100644
index 00000000..7c4b29d9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5280-drm-amdgpu-set-external-rev-id-for-raven2.patch
@@ -0,0 +1,33 @@
+From c633b6f43155abaaef927d2027ed2f4fdc6d1059 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Fri, 15 Jun 2018 17:28:44 -0500
+Subject: [PATCH 5280/5725] drm/amdgpu: set external rev id for raven2
+
+It's different from raven1.
+
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Feifei Xu <Feifei.Xu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 794cfe4..0a93560 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -742,7 +742,10 @@ static int soc15_common_early_init(void *handle)
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_RLC_SMU_HS;
+
+- adev->external_rev_id = 0x1;
++ if (adev->rev_id >= 0x8)
++ adev->external_rev_id = adev->rev_id + 0x81;
++ else
++ adev->external_rev_id = 0x1;
+ break;
+ case CHIP_PICASSO:
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGLS |
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5281-drm-amdgpu-add-raven2-to-gpu_info-firmware.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5281-drm-amdgpu-add-raven2-to-gpu_info-firmware.patch
new file mode 100644
index 00000000..28936520
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5281-drm-amdgpu-add-raven2-to-gpu_info-firmware.patch
@@ -0,0 +1,41 @@
+From e526ee97abdfabad7a6df467aa463287b468327b Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Thu, 4 Jan 2018 17:26:00 +0800
+Subject: [PATCH 5281/5725] drm/amdgpu: add raven2 to gpu_info firmware
+
+Add gpu_info firmware for raven2.
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 9fb469d..cb8cd32 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -63,6 +63,7 @@ MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
+ MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
+ MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
+ MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
++MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
+
+ #define AMDGPU_RESUME_MS 2000
+
+@@ -1384,7 +1385,10 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
+ chip_name = "vega12";
+ break;
+ case CHIP_RAVEN:
+- chip_name = "raven";
++ if (adev->rev_id >= 8)
++ chip_name = "raven2";
++ else
++ chip_name = "raven";
+ break;
+ case CHIP_PICASSO:
+ chip_name = "picasso";
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5282-drm-amdgpu-add-raven2-vcn-firmware-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5282-drm-amdgpu-add-raven2-vcn-firmware-support.patch
new file mode 100644
index 00000000..07d1acc6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5282-drm-amdgpu-add-raven2-vcn-firmware-support.patch
@@ -0,0 +1,45 @@
+From 7e1fb90a9f7c9026d36f650b4e74b12ced4c4883 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Fri, 15 Jun 2018 16:01:41 -0500
+Subject: [PATCH 5282/5725] drm/amdgpu: add raven2 vcn firmware support
+
+Specify raven2 vcn firmware on amdgpu_vce_sw_init.
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 0102e7e..ca8944e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -43,9 +43,11 @@
+ /* Firmware Names */
+ #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
+ #define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
++#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
+
+ MODULE_FIRMWARE(FIRMWARE_RAVEN);
+ MODULE_FIRMWARE(FIRMWARE_PICASSO);
++MODULE_FIRMWARE(FIRMWARE_RAVEN2);
+
+ static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
+
+@@ -61,7 +63,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+- fw_name = FIRMWARE_RAVEN;
++ if (adev->rev_id >= 8)
++ fw_name = FIRMWARE_RAVEN2;
++ else
++ fw_name = FIRMWARE_RAVEN;
+ break;
+ case CHIP_PICASSO:
+ fw_name = FIRMWARE_PICASSO;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5283-drm-amdgpu-add-psp-support-for-raven2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5283-drm-amdgpu-add-psp-support-for-raven2.patch
new file mode 100644
index 00000000..c4d98ad3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5283-drm-amdgpu-add-psp-support-for-raven2.patch
@@ -0,0 +1,43 @@
+From fe6d898510192300f1fa96a16b02d19ae6c5c88d Mon Sep 17 00:00:00 2001
+From: Likun Gao <Likun.Gao@amd.com>
+Date: Tue, 5 Jun 2018 14:05:45 +0800
+Subject: [PATCH 5283/5725] drm/amdgpu: add psp support for raven2
+
+Modified for using raven2_asd.bin to replace raven_asd.bin for raven2
+
+Change-Id: I1cfe49ca09786409daa4d15d3e1eb11bbf5f3805
+Signed-off-by: Likun Gao <Likun.Gao@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/psp_v10_0.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+index 02be34e..1ac597c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+@@ -35,6 +35,8 @@
+ #include "sdma0/sdma0_4_1_offset.h"
+
+ MODULE_FIRMWARE("amdgpu/raven_asd.bin");
++MODULE_FIRMWARE("amdgpu/picasso_asd.bin");
++MODULE_FIRMWARE("amdgpu/raven2_asd.bin");
+
+ static int
+ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type)
+@@ -111,7 +113,10 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+- chip_name = "raven";
++ if (adev->rev_id >= 0x8)
++ chip_name = "raven2";
++ else
++ chip_name = "raven";
+ break;
+ default: BUG();
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5284-drm-amdgpu-sdma4-specify-raven2-firmware.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5284-drm-amdgpu-sdma4-specify-raven2-firmware.patch
new file mode 100644
index 00000000..ba1aad23
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5284-drm-amdgpu-sdma4-specify-raven2-firmware.patch
@@ -0,0 +1,41 @@
+From 4e855b4f80aaa72ecc0477991170cc36e73244aa Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Thu, 4 Jan 2018 18:05:35 +0800
+Subject: [PATCH 5284/5725] drm/amdgpu/sdma4: specify raven2 firmware.
+
+use raven2 sdma firmware.
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index f9bfc71..7aa1c47 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -49,6 +49,7 @@ MODULE_FIRMWARE("amdgpu/vega20_sdma.bin");
+ MODULE_FIRMWARE("amdgpu/vega20_sdma1.bin");
+ MODULE_FIRMWARE("amdgpu/raven_sdma.bin");
+ MODULE_FIRMWARE("amdgpu/picasso_sdma.bin");
++MODULE_FIRMWARE("amdgpu/raven2_sdma.bin");
+
+ #define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
+ #define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
+@@ -269,7 +270,10 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
+ chip_name = "vega20";
+ break;
+ case CHIP_RAVEN:
+- chip_name = "raven";
++ if (adev->rev_id >= 8)
++ chip_name = "raven2";
++ else
++ chip_name = "raven";
+ break;
+ case CHIP_PICASSO:
+ chip_name = "picasso";
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5285-drm-amdgpu-sdma4-Add-raven2-golden-setting.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5285-drm-amdgpu-sdma4-Add-raven2-golden-setting.patch
new file mode 100644
index 00000000..04b57277
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5285-drm-amdgpu-sdma4-Add-raven2-golden-setting.patch
@@ -0,0 +1,56 @@
+From 1e9ce6c19886890163a626d509e5ce4fc312567c Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Thu, 4 Jan 2018 18:13:41 +0800
+Subject: [PATCH 5285/5725] drm/amdgpu/sdma4: Add raven2 golden setting
+
+Golden register settings from the hw team.
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 21 ++++++++++++++++-----
+ 1 file changed, 16 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index 7aa1c47..31baa47 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -185,6 +185,12 @@ static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00000002)
+ };
+
++static const struct soc15_reg_golden golden_settings_sdma_rv2[] =
++{
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00003001),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00003001)
++};
++
+ static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
+ u32 instance, u32 offset)
+ {
+@@ -225,11 +231,16 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
+ case CHIP_RAVEN:
+ case CHIP_PICASSO:
+ soc15_program_register_sequence(adev,
+- golden_settings_sdma_4_1,
+- ARRAY_SIZE(golden_settings_sdma_4_1));
+- soc15_program_register_sequence(adev,
+- golden_settings_sdma_rv1,
+- ARRAY_SIZE(golden_settings_sdma_rv1));
++ golden_settings_sdma_4_1,
++ ARRAY_SIZE(golden_settings_sdma_4_1));
++ if (adev->rev_id >= 8)
++ soc15_program_register_sequence(adev,
++ golden_settings_sdma_rv2,
++ ARRAY_SIZE(golden_settings_sdma_rv2));
++ else
++ soc15_program_register_sequence(adev,
++ golden_settings_sdma_rv1,
++ ARRAY_SIZE(golden_settings_sdma_rv1));
+ break;
+ default:
+ break;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5286-drm-amdgpu-gfx9-add-support-for-raven2-gfx-firmware.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5286-drm-amdgpu-gfx9-add-support-for-raven2-gfx-firmware.patch
new file mode 100644
index 00000000..be3222ce
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5286-drm-amdgpu-gfx9-add-support-for-raven2-gfx-firmware.patch
@@ -0,0 +1,48 @@
+From 7877163a0e2a21811f302c011251b4047af9300f Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Thu, 4 Jan 2018 18:33:49 +0800
+Subject: [PATCH 5286/5725] drm/amdgpu/gfx9: add support for raven2 gfx
+ firmware
+
+use raven2 gfx firmware.
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 1cc6b87..3e6f75c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -87,6 +87,13 @@ MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
+ MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
+ MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
+
++MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
++MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
++MODULE_FIRMWARE("amdgpu/raven2_me.bin");
++MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
++MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
++MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
++
+ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
+ {
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
+@@ -573,7 +580,10 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
+ chip_name = "vega20";
+ break;
+ case CHIP_RAVEN:
+- chip_name = "raven";
++ if (adev->rev_id >= 8)
++ chip_name = "raven2";
++ else
++ chip_name = "raven";
+ break;
+ case CHIP_PICASSO:
+ chip_name = "picasso";
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5287-drm-amdgpu-gfx9-add-raven2-golden-setting.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5287-drm-amdgpu-gfx9-add-raven2-golden-setting.patch
new file mode 100644
index 00000000..070f31ac
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5287-drm-amdgpu-gfx9-add-raven2-golden-setting.patch
@@ -0,0 +1,89 @@
+From f94da551943cbf478cfcc6d2a60198f40f88fa98 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Thu, 4 Jan 2018 18:36:40 +0800
+Subject: [PATCH 5287/5725] drm/amdgpu/gfx9: add raven2 golden setting
+
+Golden register settings from the hw team.
+
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 40 ++++++++++++++++++++++++++++++++++-
+ 1 file changed, 39 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 3e6f75c..ad2945e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -187,6 +187,29 @@ static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
+ };
+
++static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
++{
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
++};
++
+ static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
+ {
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
+@@ -255,6 +278,7 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
+ #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
+ #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
+ #define PICASSO_GB_ADDR_CONFIG_GOLDEN 0x24000042
++#define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041
+
+ static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
+ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
+@@ -294,6 +318,17 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
+ ARRAY_SIZE(golden_settings_gc_9_0_vg20));
+ break;
+ case CHIP_RAVEN:
++ soc15_program_register_sequence(adev, golden_settings_gc_9_1,
++ ARRAY_SIZE(golden_settings_gc_9_1));
++ if (adev->rev_id >= 8)
++ soc15_program_register_sequence(adev,
++ golden_settings_gc_9_1_rv2,
++ ARRAY_SIZE(golden_settings_gc_9_1_rv2));
++ else
++ soc15_program_register_sequence(adev,
++ golden_settings_gc_9_1_rv1,
++ ARRAY_SIZE(golden_settings_gc_9_1_rv1));
++ break;
+ case CHIP_PICASSO:
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_9_1,
+@@ -1289,7 +1324,10 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
+- gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
++ if (adev->rev_id >= 8)
++ gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
++ else
++ gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_PICASSO:
+ adev->gfx.config.max_hw_contexts = 8;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5288-drm-amd-display-Add-Raven2-definitions-in-dc.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5288-drm-amd-display-Add-Raven2-definitions-in-dc.patch
new file mode 100644
index 00000000..8a68e240
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5288-drm-amd-display-Add-Raven2-definitions-in-dc.patch
@@ -0,0 +1,249 @@
+From 484263d0bd7913eef9c02d0aee39ccb4038e2f8a Mon Sep 17 00:00:00 2001
+From: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Date: Mon, 22 Jan 2018 16:12:27 -0500
+Subject: [PATCH 5288/5725] drm/amd/display: Add Raven2 definitions in dc
+
+Add Raven2 definitions in the dc code
+
+Signed-off-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../amd/display/dc/bios/command_table_helper2.c | 5 +++
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 7 +++++
+ .../gpu/drm/amd/display/dc/dce/dce_clock_source.c | 7 +++++
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 36 +++++++++++++++++++++-
+ drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c | 5 +++
+ drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c | 5 +++
+ drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c | 4 +++
+ drivers/gpu/drm/amd/display/include/dal_asic_id.h | 7 +++++
+ drivers/gpu/drm/amd/display/include/dal_types.h | 3 ++
+ 9 files changed, 78 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+index 770ff89..b8aad13 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+@@ -61,6 +61,11 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
+ return true;
+ #endif
+
++#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
++ case DCN_VERSION_1_01:
++ *h = dal_cmd_tbl_helper_dce112_get_table2();
++ return true;
++#endif
+ case DCE_VERSION_12_0:
+ *h = dal_cmd_tbl_helper_dce112_get_table2();
+ return true;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 3462ec1..8c6695a 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -88,6 +88,10 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
+ #ifdef CONFIG_X86
+ case FAMILY_RV:
+ dc_version = DCN_VERSION_1_0;
++#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
++ if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev))
++ dc_version = DCN_VERSION_1_01;
++#endif
+ break;
+ #endif
+ default:
+@@ -138,6 +142,9 @@ struct resource_pool *dc_create_resource_pool(
+
+ #ifdef CONFIG_X86
+ case DCN_VERSION_1_0:
++#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
++ case DCN_VERSION_1_01:
++#endif
+ res_pool = dcn10_create_resource_pool(
+ num_virtual_links, dc);
+ break;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+index 217fab4..8e0a00b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+@@ -601,6 +601,9 @@ static uint32_t dce110_get_pix_clk_dividers(
+ case DCN_VERSION_1_0:
+ #endif
+
++#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
++ case DCN_VERSION_1_01:
++#endif
+ dce112_get_pix_clk_dividers_helper(clk_src,
+ pll_settings, pix_clk_params);
+ break;
+@@ -991,6 +994,10 @@ static bool dce110_program_pix_clk(
+ case DCN_VERSION_1_0:
+ #endif
+
++#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
++ case DCN_VERSION_1_01:
++#endif
++
+ if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO) {
+ bp_pc_params.flags.SET_GENLOCK_REF_DIV_SRC =
+ pll_settings->use_external_clk;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index 28ebad8..2cc4719 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -152,7 +152,10 @@ enum dcn10_clk_src_array_id {
+ DCN10_CLK_SRC_PLL1,
+ DCN10_CLK_SRC_PLL2,
+ DCN10_CLK_SRC_PLL3,
+- DCN10_CLK_SRC_TOTAL
++ DCN10_CLK_SRC_TOTAL,
++#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
++ DCN101_CLK_SRC_TOTAL = DCN10_CLK_SRC_PLL3
++#endif
+ };
+
+ /* begin *********************
+@@ -1162,6 +1165,10 @@ static bool construct(
+ /* max pipe num for ASIC before check pipe fuses */
+ pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+
++#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
++ if (dc->ctx->dce_version == DCN_VERSION_1_01)
++ pool->base.pipe_count = 3;
++#endif
+ dc->caps.max_video_width = 3840;
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 100;
+@@ -1193,13 +1200,28 @@ static bool construct(
+ dcn10_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ &clk_src_regs[2], false);
++
++#ifdef CONFIG_DRM_AMD_DC_DCN1_01
++ if (dc->ctx->dce_version == DCN_VERSION_1_0) {
++ pool->base.clock_sources[DCN10_CLK_SRC_PLL3] =
++ dcn10_clock_source_create(ctx, ctx->dc_bios,
++ CLOCK_SOURCE_COMBO_PHY_PLL3,
++ &clk_src_regs[3], false);
++ }
++#else
+ pool->base.clock_sources[DCN10_CLK_SRC_PLL3] =
+ dcn10_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs[3], false);
++#endif
+
+ pool->base.clk_src_count = DCN10_CLK_SRC_TOTAL;
+
++#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
++ if (dc->ctx->dce_version == DCN_VERSION_1_01)
++ pool->base.clk_src_count = DCN101_CLK_SRC_TOTAL;
++#endif
++
+ pool->base.dp_clock_source =
+ dcn10_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_ID_DP_DTO,
+@@ -1245,6 +1267,18 @@ static bool construct(
+ memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults));
+ memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults));
+
++#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
++ if (dc->ctx->dce_version == DCN_VERSION_1_01) {
++ struct dcn_soc_bounding_box *dcn_soc = dc->dcn_soc;
++ struct dcn_ip_params *dcn_ip = dc->dcn_ip;
++ struct display_mode_lib *dml = &dc->dml;
++
++ dml->ip.max_num_dpp = 3;
++ /* TODO how to handle 23.84? */
++ dcn_soc->dram_clock_change_latency = 23;
++ dcn_ip->max_num_dpp = 3;
++ }
++#endif
+ if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) {
+ dc->dcn_soc->urgent_latency = 3;
+ dc->debug.disable_dmcu = true;
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+index 83df779..875c0f3 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
++++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+@@ -86,6 +86,11 @@ bool dal_hw_factory_init(
+ dal_hw_factory_dcn10_init(factory);
+ return true;
+ #endif
++#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
++ case DCN_VERSION_1_01:
++ dal_hw_factory_dcn10_init(factory);
++ return true;
++#endif
+
+ default:
+ ASSERT_CRITICAL(false);
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+index e754131..f67ce5a 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
++++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+@@ -83,6 +83,11 @@ bool dal_hw_translate_init(
+ dal_hw_translate_dcn10_init(translate);
+ return true;
+ #endif
++#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
++ case DCN_VERSION_1_01:
++ dal_hw_translate_dcn10_init(translate);
++ return true;
++#endif
+
+ default:
+ BREAK_TO_DEBUGGER();
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
+index f7ed355..c3f20fc 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
+@@ -96,6 +96,10 @@ struct i2caux *dal_i2caux_create(
+ return dal_i2caux_dcn10_create(ctx);
+ #endif
+
++#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
++ case DCN_VERSION_1_01:
++ return dal_i2caux_dcn10_create(ctx);
++#endif
+ default:
+ BREAK_TO_DEBUGGER();
+ return NULL;
+diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+index 25029ed..4f501dd 100644
+--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
++++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+@@ -131,8 +131,15 @@
+ #define INTERNAL_REV_RAVEN_A0 0x00 /* First spin of Raven */
+ #define RAVEN_A0 0x01
+ #define RAVEN_B0 0x21
++#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
++/* DCN1_01 */
++#define RAVEN2_A0 0x81
++#endif
+ #define RAVEN_UNKNOWN 0xFF
+
++#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
++#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < 0xF0))
++#endif /* DCN1_01 */
+ #define ASIC_REV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN)
+ #define RAVEN1_F0 0xF0
+ #define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN))
+diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
+index 840142b..8962713 100644
+--- a/drivers/gpu/drm/amd/display/include/dal_types.h
++++ b/drivers/gpu/drm/amd/display/include/dal_types.h
+@@ -44,6 +44,9 @@ enum dce_version {
+ DCE_VERSION_12_0,
+ DCE_VERSION_MAX,
+ DCN_VERSION_1_0,
++#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
++ DCN_VERSION_1_01,
++#endif /* DCN1_01 */
+ DCN_VERSION_MAX
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5289-drm-amd-display-Add-DC-config-flag-for-Raven2-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5289-drm-amd-display-Add-DC-config-flag-for-Raven2-v2.patch
new file mode 100644
index 00000000..59b98e2b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5289-drm-amd-display-Add-DC-config-flag-for-Raven2-v2.patch
@@ -0,0 +1,51 @@
+From 19d5cf8af2b5b1f5d494451536790dba7d6d9f79 Mon Sep 17 00:00:00 2001
+From: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Date: Mon, 22 Jan 2018 17:40:50 -0500
+Subject: [PATCH 5289/5725] drm/amd/display: Add DC config flag for Raven2 (v2)
+
+Add DRM_AMD_DC_DCN1_01 config flag for Raven2
+
+v2: Make DC select DRM_AMD_DC_DCN1_01 (Alex)
+
+Change-Id: Ifdc0114678612a522c035bffd9809b3fc1caa1a7
+Signed-off-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/display/Kconfig | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
+index 6a824ac..27a0db5 100644
+--- a/drivers/gpu/drm/amd/display/Kconfig
++++ b/drivers/gpu/drm/amd/display/Kconfig
+@@ -4,11 +4,23 @@ menu "Display Engine Configuration"
+ config DRM_AMD_DC
+ bool "AMD DC - Enable new display engine"
+ default y
++ select DRM_AMD_DC_DCN1_0 if X86 && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
++ select DRM_AMD_DC_DCN1_01 if X86 && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
+ help
+ Choose this option if you want to use the new display engine
+ support for AMDGPU.This adds required support for Vega and
+ Raven ASICs.
+
++config DRM_AMD_DC_DCN1_0
++ def_bool n
++ help
++ RV family support for display engine
++
++config DRM_AMD_DC_DCN1_01
++ def_bool n
++ help
++ RV2 family for display engine
++
+ config DEBUG_KERNEL_DC
+ bool "Enable kgdb break in DC"
+ depends on DRM_AMD_DC
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5290-drm-amd-powerplay-update-smu10_verify_smc-to-raven2-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5290-drm-amd-powerplay-update-smu10_verify_smc-to-raven2-.patch
new file mode 100644
index 00000000..52dc50c4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5290-drm-amd-powerplay-update-smu10_verify_smc-to-raven2-.patch
@@ -0,0 +1,34 @@
+From ca13e30f3cfc6165bb7787e09308925b000b7f2a Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Fri, 15 Jun 2018 17:22:38 -0500
+Subject: [PATCH 5290/5725] drm/amd/powerplay:update smu10_verify_smc* to
+ raven2 compatible
+
+Check the raven2 version number as well.
+
+Change-Id: Ic6a6439eb849c90b57a4df4e58fea372a3ac5bcc
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+index bb07d43..6f961de 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+@@ -177,7 +177,8 @@ static int smu10_verify_smc_interface(struct pp_hwmgr *hwmgr)
+ PPSMC_MSG_GetDriverIfVersion);
+ smc_driver_if_version = smu10_read_arg_from_smc(hwmgr);
+
+- if (smc_driver_if_version != SMU10_DRIVER_IF_VERSION) {
++ if ((smc_driver_if_version != SMU10_DRIVER_IF_VERSION) &&
++ (smc_driver_if_version != SMU10_DRIVER_IF_VERSION + 1)) {
+ pr_err("Attempt to read SMC IF Version Number Failed!\n");
+ return -EINVAL;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5291-drm-amd-powerplay-round-up-the-Mhz-convertion-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5291-drm-amd-powerplay-round-up-the-Mhz-convertion-v2.patch
new file mode 100644
index 00000000..5cb67ffe
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5291-drm-amd-powerplay-round-up-the-Mhz-convertion-v2.patch
@@ -0,0 +1,45 @@
+From 36bf40d897e7ca3bf4718214ae6fb1e8dec27fac Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Tue, 19 Jun 2018 10:32:50 -0500
+Subject: [PATCH 5291/5725] drm/amd/powerplay: round up the Mhz convertion (v2)
+
+Since the clock value there may be like 29999 10Khz.
+
+v2: rebase (Alex)
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+index a63e006..a9c54f7 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+@@ -211,12 +211,18 @@ static int smu10_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
+ return 0;
+ }
+
++static inline uint32_t convert_10k_to_mhz(uint32_t clock)
++{
++ return (clock + 99) / 100;
++}
++
+ static int smu10_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
+ {
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+- if (smu10_data->need_min_deep_sleep_dcefclk && smu10_data->deep_sleep_dcefclk != clock/100) {
+- smu10_data->deep_sleep_dcefclk = clock/100;
++ if (smu10_data->need_min_deep_sleep_dcefclk &&
++ smu10_data->deep_sleep_dcefclk != convert_10k_to_mhz(clock)) {
++ smu10_data->deep_sleep_dcefclk = convert_10k_to_mhz(clock);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetMinDeepSleepDcefclk,
+ smu10_data->deep_sleep_dcefclk);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5292-drm-amd-powerplay-disable-raven2-force-dpm-level-sup.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5292-drm-amd-powerplay-disable-raven2-force-dpm-level-sup.patch
new file mode 100644
index 00000000..5fc44097
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5292-drm-amd-powerplay-disable-raven2-force-dpm-level-sup.patch
@@ -0,0 +1,43 @@
+From ee4c2bfa340cd93eb31b9b1e17a5d361ba0367fb Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Tue, 19 Jun 2018 10:41:00 -0500
+Subject: [PATCH 5292/5725] drm/amd/powerplay: disable raven2 force dpm level
+ support (v2)
+
+It's not supported yet.
+
+v2: rebase (Alex)
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+index a9c54f7..1e800c1 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+@@ -551,12 +551,18 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
+ enum amd_dpm_forced_level level)
+ {
+ struct smu10_hwmgr *data = hwmgr->backend;
++ struct amdgpu_device *adev = hwmgr->adev;
+
+ if (hwmgr->smu_version < 0x1E3700) {
+ pr_info("smu firmware version too old, can not set dpm level\n");
+ return 0;
+ }
+
++ /* Disable UMDPSTATE support on rv2 temporarily */
++ if ((adev->asic_type == CHIP_RAVEN) &&
++ (adev->rev_id >= 8))
++ return 0;
++
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5293-drm-amdgpu-set-CG-flags-for-raven2-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5293-drm-amdgpu-set-CG-flags-for-raven2-v2.patch
new file mode 100644
index 00000000..5affddcc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5293-drm-amdgpu-set-CG-flags-for-raven2-v2.patch
@@ -0,0 +1,87 @@
+From da3ce2a9c146572fbb27bbc9cf4e7c272f9f0bcb Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Tue, 19 Jun 2018 10:46:42 -0500
+Subject: [PATCH 5293/5725] drm/amdgpu: set CG flags for raven2 (v2)
+
+Raven2 does not enable all of the CG flags that raven1 does.
+
+v2: rebase (Alex)
+
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Feifei Xu <Feifei.Xu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 57 +++++++++++++++++++++++++-------------
+ 1 file changed, 37 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 0a93560..f5a44d1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -714,26 +714,43 @@ static int soc15_common_early_init(void *handle)
+ adev->external_rev_id = adev->rev_id + 0x28;
+ break;
+ case CHIP_RAVEN:
+- adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+- AMD_CG_SUPPORT_GFX_MGLS |
+- AMD_CG_SUPPORT_GFX_RLC_LS |
+- AMD_CG_SUPPORT_GFX_CP_LS |
+- AMD_CG_SUPPORT_GFX_3D_CGCG |
+- AMD_CG_SUPPORT_GFX_3D_CGLS |
+- AMD_CG_SUPPORT_GFX_CGCG |
+- AMD_CG_SUPPORT_GFX_CGLS |
+- AMD_CG_SUPPORT_BIF_MGCG |
+- AMD_CG_SUPPORT_BIF_LS |
+- AMD_CG_SUPPORT_HDP_MGCG |
+- AMD_CG_SUPPORT_HDP_LS |
+- AMD_CG_SUPPORT_DRM_MGCG |
+- AMD_CG_SUPPORT_DRM_LS |
+- AMD_CG_SUPPORT_ROM_MGCG |
+- AMD_CG_SUPPORT_MC_MGCG |
+- AMD_CG_SUPPORT_MC_LS |
+- AMD_CG_SUPPORT_SDMA_MGCG |
+- AMD_CG_SUPPORT_SDMA_LS |
+- AMD_CG_SUPPORT_VCN_MGCG;
++ if (adev->rev_id >= 0x8)
++ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
++ AMD_CG_SUPPORT_GFX_MGLS |
++ AMD_CG_SUPPORT_GFX_CP_LS |
++ AMD_CG_SUPPORT_GFX_3D_CGCG |
++ AMD_CG_SUPPORT_GFX_3D_CGLS |
++ AMD_CG_SUPPORT_GFX_CGCG |
++ AMD_CG_SUPPORT_GFX_CGLS |
++ AMD_CG_SUPPORT_BIF_LS |
++ AMD_CG_SUPPORT_HDP_LS |
++ AMD_CG_SUPPORT_ROM_MGCG |
++ AMD_CG_SUPPORT_MC_MGCG |
++ AMD_CG_SUPPORT_MC_LS |
++ AMD_CG_SUPPORT_SDMA_MGCG |
++ AMD_CG_SUPPORT_SDMA_LS |
++ AMD_CG_SUPPORT_VCN_MGCG;
++ else
++ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
++ AMD_CG_SUPPORT_GFX_MGLS |
++ AMD_CG_SUPPORT_GFX_RLC_LS |
++ AMD_CG_SUPPORT_GFX_CP_LS |
++ AMD_CG_SUPPORT_GFX_3D_CGCG |
++ AMD_CG_SUPPORT_GFX_3D_CGLS |
++ AMD_CG_SUPPORT_GFX_CGCG |
++ AMD_CG_SUPPORT_GFX_CGLS |
++ AMD_CG_SUPPORT_BIF_MGCG |
++ AMD_CG_SUPPORT_BIF_LS |
++ AMD_CG_SUPPORT_HDP_MGCG |
++ AMD_CG_SUPPORT_HDP_LS |
++ AMD_CG_SUPPORT_DRM_MGCG |
++ AMD_CG_SUPPORT_DRM_LS |
++ AMD_CG_SUPPORT_ROM_MGCG |
++ AMD_CG_SUPPORT_MC_MGCG |
++ AMD_CG_SUPPORT_MC_LS |
++ AMD_CG_SUPPORT_SDMA_MGCG |
++ AMD_CG_SUPPORT_SDMA_LS |
++ AMD_CG_SUPPORT_VCN_MGCG;
+
+ adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5294-drm-amdgpu-Initialize-fences-array-entries-in-amdgpu.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5294-drm-amdgpu-Initialize-fences-array-entries-in-amdgpu.patch
new file mode 100644
index 00000000..2cbc087b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5294-drm-amdgpu-Initialize-fences-array-entries-in-amdgpu.patch
@@ -0,0 +1,50 @@
+From 73876fe5946f2a57a091d3b22b9f36793df7e08c Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
+Date: Wed, 12 Sep 2018 18:07:10 +0200
+Subject: [PATCH 5294/5725] drm/amdgpu:Initialize fences array entries in
+ amdgpu_sa
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The entries were only initialized once in amdgpu_sa_bo_new. If a fence
+wasn't signalled yet in the first amdgpu_sa_bo_next_hole call, but then
+got signalled before a later amdgpu_sa_bo_next_hole call, it could
+destroy the fence but leave its pointer in the array, resulting in
+use-after-free in amdgpu_sa_bo_new.
+
+Change-Id: I8f60093cc0362f9726cf152f9f0431b785da743e
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+index fb1667b..12f2bf9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+@@ -226,6 +226,8 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
+ for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
+ struct amdgpu_sa_bo *sa_bo;
+
++ fences[i] = NULL;
++
+ if (list_empty(&sa_manager->flist[i]))
+ continue;
+
+@@ -296,10 +298,8 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
+
+ spin_lock(&sa_manager->wq.lock);
+ do {
+- for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
+- fences[i] = NULL;
++ for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
+ tries[i] = 0;
+- }
+
+ do {
+ amdgpu_sa_bo_try_free(sa_manager);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5295-drm-amdgpu-soc15-clean-up-picasso-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5295-drm-amdgpu-soc15-clean-up-picasso-support.patch
new file mode 100644
index 00000000..dfc19c99
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5295-drm-amdgpu-soc15-clean-up-picasso-support.patch
@@ -0,0 +1,44 @@
+From 143900bfbbcd185e6a546cf1bc6af879307ee06d Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 13 Sep 2018 15:05:22 -0500
+Subject: [PATCH 5295/5725] drm/amdgpu/soc15: clean up picasso support
+
+It's the same as raven so remove the duplicate case.
+
+Acked-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 17 -----------------
+ 1 file changed, 17 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index f5a44d1..f930e09 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -546,23 +546,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
+ break;
+ case CHIP_RAVEN:
+- amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+- amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
+- amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+- amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
+- amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
+- amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+-#if defined(CONFIG_DRM_AMD_DC)
+- else if (amdgpu_device_has_dc_support(adev))
+- amdgpu_device_ip_block_add(adev, &dm_ip_block);
+-#else
+-# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
+-#endif
+- amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+- amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+- amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
+- break;
+ case CHIP_PICASSO:
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5296-drm-amdgpu-simplify-Raven-Raven2-and-Picasso-handlin.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5296-drm-amdgpu-simplify-Raven-Raven2-and-Picasso-handlin.patch
new file mode 100644
index 00000000..a1643580
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5296-drm-amdgpu-simplify-Raven-Raven2-and-Picasso-handlin.patch
@@ -0,0 +1,511 @@
+From 225d8edc0aab6d021b23055e645c496c78a2ebb4 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 13 Sep 2018 15:41:57 -0500
+Subject: [PATCH 5296/5725] drm/amdgpu: simplify Raven, Raven2, and Picasso
+ handling
+
+Treat them all as Raven rather than adding a new picasso
+asic type. This simplifies a lot of code and also handles the
+case of rv2 chips with the 0x15d8 pci id. It also fixes dmcu
+fw handling for picasso.
+
+Acked-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 9 ++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 1 -
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 7 ++--
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 32 +++------------
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 4 --
+ drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 11 +++--
+ drivers/gpu/drm/amd/amdgpu/psp_v10_0.c | 2 +
+ drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 11 ++---
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 66 ++++++++++++++----------------
+ 10 files changed, 52 insertions(+), 94 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index cb8cd32..4701240 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -91,7 +91,6 @@ static const char *amdgpu_asic_name[] = {
+ "VEGA12",
+ "VEGA20",
+ "RAVEN",
+- "PICASSO",
+ "LAST",
+ };
+
+@@ -1387,12 +1386,11 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
+ case CHIP_RAVEN:
+ if (adev->rev_id >= 8)
+ chip_name = "raven2";
++ else if (adev->pdev->device == 0x15d8)
++ chip_name = "picasso";
+ else
+ chip_name = "raven";
+ break;
+- case CHIP_PICASSO:
+- chip_name = "picasso";
+- break;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
+@@ -1518,8 +1516,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+- case CHIP_PICASSO:
+- if ((adev->asic_type == CHIP_RAVEN) || (adev->asic_type == CHIP_PICASSO))
++ if (adev->asic_type == CHIP_RAVEN)
+ adev->family = AMDGPU_FAMILY_RV;
+ else
+ adev->family = AMDGPU_FAMILY_AI;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 22f4538..8556192 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -795,8 +795,7 @@ static const struct pci_device_id pciidlist[] = {
+ {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+ /* Raven */
+ {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
+- /* Picasso */
+- {0x1002, 0x15d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PICASSO|AMD_IS_APU},
++ {0x1002, 0x15d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
+
+ {0, 0, 0}
+ };
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+index fcf260d..8777dad 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+@@ -303,7 +303,6 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
+ return AMDGPU_FW_LOAD_SMU;
+ case CHIP_VEGA10:
+ case CHIP_RAVEN:
+- case CHIP_PICASSO:
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ if (!load_type)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index ca8944e..86b1627 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -63,14 +63,13 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+- if (adev->rev_id >= 8)
++ if (adev->rev_id >= 8)
+ fw_name = FIRMWARE_RAVEN2;
++ else if (adev->pdev->device == 0x15d8)
++ fw_name = FIRMWARE_PICASSO;
+ else
+ fw_name = FIRMWARE_RAVEN;
+ break;
+- case CHIP_PICASSO:
+- fw_name = FIRMWARE_PICASSO;
+- break;
+ default:
+ return -EINVAL;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index ad2945e..e040c87 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -277,7 +277,6 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
+ #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
+ #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
+ #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
+-#define PICASSO_GB_ADDR_CONFIG_GOLDEN 0x24000042
+ #define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041
+
+ static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
+@@ -329,14 +328,6 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
+ golden_settings_gc_9_1_rv1,
+ ARRAY_SIZE(golden_settings_gc_9_1_rv1));
+ break;
+- case CHIP_PICASSO:
+- soc15_program_register_sequence(adev,
+- golden_settings_gc_9_1,
+- ARRAY_SIZE(golden_settings_gc_9_1));
+- soc15_program_register_sequence(adev,
+- golden_settings_gc_9_1_rv1,
+- ARRAY_SIZE(golden_settings_gc_9_1_rv1));
+- break;
+ default:
+ break;
+ }
+@@ -617,12 +608,11 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
+ case CHIP_RAVEN:
+ if (adev->rev_id >= 8)
+ chip_name = "raven2";
++ else if (adev->pdev->device == 0x15d8)
++ chip_name = "picasso";
+ else
+ chip_name = "raven";
+ break;
+- case CHIP_PICASSO:
+- chip_name = "picasso";
+- break;
+ default:
+ BUG();
+ }
+@@ -1076,7 +1066,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+ }
+
+- if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_PICASSO) {
++ if (adev->asic_type == CHIP_RAVEN) {
+ /* TODO: double check the cp_table_size for RV */
+ adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
+ r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
+@@ -1329,14 +1319,6 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
+ else
+ gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
+ break;
+- case CHIP_PICASSO:
+- adev->gfx.config.max_hw_contexts = 8;
+- adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+- adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+- adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+- adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
+- gb_addr_config = PICASSO_GB_ADDR_CONFIG_GOLDEN;
+- break;
+ default:
+ BUG();
+ break;
+@@ -1615,7 +1597,6 @@ static int gfx_v9_0_sw_init(void *handle)
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+- case CHIP_PICASSO:
+ adev->gfx.mec.num_mec = 2;
+ break;
+ default:
+@@ -1777,7 +1758,7 @@ static int gfx_v9_0_sw_fini(void *handle)
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+- if ((adev->asic_type == CHIP_RAVEN) || (adev->asic_type == CHIP_PICASSO)) {
++ if (adev->asic_type == CHIP_RAVEN) {
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+@@ -2447,7 +2428,7 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
+ return r;
+ }
+
+- if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_PICASSO) {
++ if (adev->asic_type == CHIP_RAVEN) {
+ if (amdgpu_lbpw != 0)
+ gfx_v9_0_enable_lbpw(adev, true);
+ else
+@@ -3851,7 +3832,6 @@ static int gfx_v9_0_set_powergating_state(void *handle,
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+- case CHIP_PICASSO:
+ if (!enable) {
+ amdgpu_gfx_off_ctrl(adev, false);
+ cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
+@@ -3906,7 +3886,6 @@ static int gfx_v9_0_set_clockgating_state(void *handle,
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+- case CHIP_PICASSO:
+ gfx_v9_0_update_gfx_clock_gating(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
+ break;
+@@ -4926,7 +4905,6 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+- case CHIP_PICASSO:
+ adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
+ break;
+ default:
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 0cc4fcf..5b04b45 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -837,7 +837,6 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
+ adev->gmc.gart_size = 512ULL << 20;
+ break;
+ case CHIP_RAVEN: /* DCE SG support */
+- case CHIP_PICASSO: /* DCE SG support */
+ adev->gmc.gart_size = 1024ULL << 20;
+ break;
+ }
+@@ -926,7 +925,6 @@ static int gmc_v9_0_sw_init(void *handle)
+ adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+- case CHIP_PICASSO:
+ if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
+ amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
+ } else {
+@@ -1058,7 +1056,6 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
+ case CHIP_VEGA12:
+ break;
+ case CHIP_RAVEN:
+- case CHIP_PICASSO:
+ soc15_program_register_sequence(adev,
+ golden_settings_athub_1_0_0,
+ ARRAY_SIZE(golden_settings_athub_1_0_0));
+@@ -1093,7 +1090,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+- case CHIP_PICASSO:
+ mmhub_v1_0_update_power_gating(adev, true);
+ break;
+ default:
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+index 59b67fa..63fec50 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+@@ -394,7 +394,7 @@ static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
+
+ def = data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
+
+- if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO) {
++ if (adev->asic_type != CHIP_RAVEN) {
+ def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
+ def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2);
+ } else
+@@ -410,7 +410,7 @@ static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
+ DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
+
+- if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO)
++ if (adev->asic_type != CHIP_RAVEN)
+ data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
+@@ -427,7 +427,7 @@ static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
+ DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
+
+- if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO)
++ if (adev->asic_type != CHIP_RAVEN)
+ data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
+@@ -440,13 +440,13 @@ static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
+ WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
+
+ if (def1 != data1) {
+- if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO)
++ if (adev->asic_type != CHIP_RAVEN)
+ WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1);
+ else
+ WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV, data1);
+ }
+
+- if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO && def2 != data2)
++ if (adev->asic_type != CHIP_RAVEN && def2 != data2)
+ WREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2, data2);
+ }
+
+@@ -510,7 +510,6 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+- case CHIP_PICASSO:
+ mmhub_v1_0_update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
+ athub_update_medium_grain_clock_gating(adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+index 1ac597c..45f9322 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+@@ -115,6 +115,8 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
+ case CHIP_RAVEN:
+ if (adev->rev_id >= 0x8)
+ chip_name = "raven2";
++ else if (adev->pdev->device == 0x15d8)
++ chip_name = "picasso";
+ else
+ chip_name = "raven";
+ break;
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index 31baa47..ff93ef6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -229,7 +229,6 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
+ ARRAY_SIZE(golden_settings_sdma1_4_2));
+ break;
+ case CHIP_RAVEN:
+- case CHIP_PICASSO:
+ soc15_program_register_sequence(adev,
+ golden_settings_sdma_4_1,
+ ARRAY_SIZE(golden_settings_sdma_4_1));
+@@ -283,12 +282,11 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
+ case CHIP_RAVEN:
+ if (adev->rev_id >= 8)
+ chip_name = "raven2";
++ else if (adev->pdev->device == 0x15d8)
++ chip_name = "picasso";
+ else
+ chip_name = "raven";
+ break;
+- case CHIP_PICASSO:
+- chip_name = "picasso";
+- break;
+ default:
+ BUG();
+ }
+@@ -859,7 +857,6 @@ static void sdma_v4_0_init_pg(struct amdgpu_device *adev)
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+- case CHIP_PICASSO:
+ sdma_v4_1_init_power_gating(adev);
+ sdma_v4_1_update_power_gating(adev, true);
+ break;
+@@ -1281,7 +1278,7 @@ static int sdma_v4_0_early_init(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_PICASSO)
++ if (adev->asic_type == CHIP_RAVEN)
+ adev->sdma.num_instances = 1;
+ else
+ adev->sdma.num_instances = 2;
+@@ -1630,7 +1627,6 @@ static int sdma_v4_0_set_clockgating_state(void *handle,
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+- case CHIP_PICASSO:
+ sdma_v4_0_update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
+ sdma_v4_0_update_medium_grain_light_sleep(adev,
+@@ -1649,7 +1645,6 @@ static int sdma_v4_0_set_powergating_state(void *handle,
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+- case CHIP_PICASSO:
+ sdma_v4_1_update_power_gating(adev,
+ state == AMD_PG_STATE_GATE ? true : false);
+ break;
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index f930e09..c4daf1f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -491,7 +491,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_RAVEN:
+- case CHIP_PICASSO:
+ vega10_reg_base_init(adev);
+ break;
+ case CHIP_VEGA20:
+@@ -546,7 +545,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
+ break;
+ case CHIP_RAVEN:
+- case CHIP_PICASSO:
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+@@ -698,6 +696,13 @@ static int soc15_common_early_init(void *handle)
+ break;
+ case CHIP_RAVEN:
+ if (adev->rev_id >= 0x8)
++ adev->external_rev_id = adev->rev_id + 0x81;
++ else if (adev->pdev->device == 0x15d8)
++ adev->external_rev_id = adev->rev_id + 0x41;
++ else
++ adev->external_rev_id = 0x1;
++
++ if (adev->rev_id >= 0x8) {
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+@@ -713,7 +718,27 @@ static int soc15_common_early_init(void *handle)
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_VCN_MGCG;
+- else
++
++ adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
++ } else if (adev->pdev->device == 0x15d8) {
++ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGLS |
++ AMD_CG_SUPPORT_GFX_CP_LS |
++ AMD_CG_SUPPORT_GFX_3D_CGCG |
++ AMD_CG_SUPPORT_GFX_3D_CGLS |
++ AMD_CG_SUPPORT_GFX_CGCG |
++ AMD_CG_SUPPORT_GFX_CGLS |
++ AMD_CG_SUPPORT_BIF_LS |
++ AMD_CG_SUPPORT_HDP_LS |
++ AMD_CG_SUPPORT_ROM_MGCG |
++ AMD_CG_SUPPORT_MC_MGCG |
++ AMD_CG_SUPPORT_MC_LS |
++ AMD_CG_SUPPORT_SDMA_MGCG |
++ AMD_CG_SUPPORT_SDMA_LS;
++
++ adev->pg_flags = AMD_PG_SUPPORT_SDMA |
++ AMD_PG_SUPPORT_MMHUB |
++ AMD_PG_SUPPORT_VCN;
++ } else {
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_RLC_LS |
+@@ -735,43 +760,13 @@ static int soc15_common_early_init(void *handle)
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_VCN_MGCG;
+
+- adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
+-
+- if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
+- adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+- AMD_PG_SUPPORT_CP |
+- AMD_PG_SUPPORT_RLC_SMU_HS;
+-
+- if (adev->rev_id >= 0x8)
+- adev->external_rev_id = adev->rev_id + 0x81;
+- else
+- adev->external_rev_id = 0x1;
+- break;
+- case CHIP_PICASSO:
+- adev->cg_flags = AMD_CG_SUPPORT_GFX_MGLS |
+- AMD_CG_SUPPORT_GFX_CP_LS |
+- AMD_CG_SUPPORT_GFX_3D_CGCG |
+- AMD_CG_SUPPORT_GFX_3D_CGLS |
+- AMD_CG_SUPPORT_GFX_CGCG |
+- AMD_CG_SUPPORT_GFX_CGLS |
+- AMD_CG_SUPPORT_BIF_LS |
+- AMD_CG_SUPPORT_HDP_LS |
+- AMD_CG_SUPPORT_ROM_MGCG |
+- AMD_CG_SUPPORT_MC_MGCG |
+- AMD_CG_SUPPORT_MC_LS |
+- AMD_CG_SUPPORT_SDMA_MGCG |
+- AMD_CG_SUPPORT_SDMA_LS;
+-
+- adev->pg_flags = AMD_PG_SUPPORT_SDMA |
+- AMD_PG_SUPPORT_MMHUB |
+- AMD_PG_SUPPORT_VCN;
++ adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
++ }
+
+ if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
+ adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_RLC_SMU_HS;
+-
+- adev->external_rev_id = adev->rev_id + 0x41;
+ break;
+ default:
+ /* FIXME: not supported yet */
+@@ -973,7 +968,6 @@ static int soc15_common_set_clockgating_state(void *handle,
+ state == AMD_CG_STATE_GATE ? true : false);
+ break;
+ case CHIP_RAVEN:
+- case CHIP_PICASSO:
+ adev->nbio_funcs->update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
+ adev->nbio_funcs->update_medium_grain_light_sleep(adev,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5297-drm-amd-display-Fix-3D-stereo-issues.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5297-drm-amd-display-Fix-3D-stereo-issues.patch
new file mode 100644
index 00000000..430b76c7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5297-drm-amd-display-Fix-3D-stereo-issues.patch
@@ -0,0 +1,112 @@
+From 5dba7a6b54662db60c5a3ca8b5ca7e60de405cd2 Mon Sep 17 00:00:00 2001
+From: Charlene Liu <charlene.liu@amd.com>
+Date: Mon, 27 Aug 2018 11:31:08 -0400
+Subject: [PATCH 5297/5725] drm/amd/display: Fix 3D stereo issues.
+
+We were not providing the correct pixel clocks to DML for marks
+calculation.
+
+Signed-off-by: Charlene Liu <charlene.liu@amd.com>
+Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c | 6 +++++-
+ drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 5 +++--
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 5 ++++-
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c | 3 +++
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 2 ++
+ 5 files changed, 17 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+index 160d11a..9ebe30b 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+@@ -2881,6 +2881,7 @@ static void populate_initial_data(
+
+ /* Pipes without underlay after */
+ for (i = 0; i < pipe_count; i++) {
++ unsigned int pixel_clock_khz;
+ if (!pipe[i].stream || pipe[i].bottom_pipe)
+ continue;
+
+@@ -2889,7 +2890,10 @@ static void populate_initial_data(
+ data->lpt_en[num_displays + 4] = false;
+ data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_total);
+ data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_total);
+- data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->timing.pix_clk_khz, 1000);
++ pixel_clock_khz = pipe[i].stream->timing.pix_clk_khz;
++ if (pipe[i].stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
++ pixel_clock_khz *= 2;
++ data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pixel_clock_khz, 1000);
+ if (pipe[i].plane_state) {
+ data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.width);
+ data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+index 32b3413..80ec09e 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+@@ -852,8 +852,9 @@ bool dcn_validate_bandwidth(
+ v->v_sync_plus_back_porch[input_idx] = pipe->stream->timing.v_total
+ - v->vactive[input_idx]
+ - pipe->stream->timing.v_front_porch;
+- v->pixel_clock[input_idx] = pipe->stream->timing.pix_clk_khz / 1000.0f;
+-
++ v->pixel_clock[input_idx] = pipe->stream->timing.pix_clk_khz/1000.0;
++ if (pipe->stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
++ v->pixel_clock[input_idx] *= 2;
+ if (!pipe->plane_state) {
+ v->dcc_enable[input_idx] = dcn_bw_yes;
+ v->source_pixel_format[input_idx] = dcn_bw_rgb_sub_32;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 8c6695a..b2f6711 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -344,6 +344,9 @@ bool resource_are_streams_timing_synchronizable(
+ || !dc_is_dp_signal(stream2->signal)))
+ return false;
+
++ if (stream1->view_format != stream2->view_format)
++ return false;
++
+ return true;
+ }
+ static bool is_dp_and_hdmi_sharable(
+@@ -354,7 +357,7 @@ static bool is_dp_and_hdmi_sharable(
+ return false;
+
+ if (stream1->clamping.c_depth != COLOR_DEPTH_888 ||
+- stream2->clamping.c_depth != COLOR_DEPTH_888)
++ stream2->clamping.c_depth != COLOR_DEPTH_888)
+ return false;
+
+ return true;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+index 49c5c70..cfca786 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+@@ -796,6 +796,9 @@ static void get_pixel_clock_parameters(
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ pixel_clk_params->requested_pix_clk = pixel_clk_params->requested_pix_clk / 2;
+ }
++ if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
++ pixel_clk_params->requested_pix_clk *= 2;
++
+ }
+
+ void dce110_resource_build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index 2cc4719..e44031e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -979,6 +979,8 @@ static void get_pixel_clock_parameters(
+
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ pixel_clk_params->requested_pix_clk /= 2;
++ if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
++ pixel_clk_params->requested_pix_clk *= 2;
+
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5298-drm-amd-display-stop-using-switch-for-different-CS-r.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5298-drm-amd-display-stop-using-switch-for-different-CS-r.patch
new file mode 100644
index 00000000..7f94443c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5298-drm-amd-display-stop-using-switch-for-different-CS-r.patch
@@ -0,0 +1,521 @@
+From a6c56d556cf36920a49cfd80e18aee5b55379fc4 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Wed, 29 Aug 2018 16:23:59 -0400
+Subject: [PATCH 5298/5725] drm/amd/display: stop using switch for different CS
+ revisions
+
+Clock sources currently have support for asic specific
+function pointers. But actual separation into functions
+was never performed, leaving us with giant functions that
+rely on switch.
+
+This change creates separate functions, removing switch use.
+
+Change-Id: I28e514e57ea5fcd93b44f6d3298ba2323d90a885
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+---
+ .../gpu/drm/amd/display/dc/dce/dce_clock_source.c | 371 ++++++++++++---------
+ .../gpu/drm/amd/display/dc/dce/dce_clock_source.h | 9 +
+ .../drm/amd/display/dc/dce112/dce112_resource.c | 2 +-
+ .../drm/amd/display/dc/dce120/dce120_resource.c | 2 +-
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 2 +-
+ 5 files changed, 218 insertions(+), 168 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+index 8e0a00b..9b38027 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+@@ -584,34 +584,42 @@ static uint32_t dce110_get_pix_clk_dividers(
+ return 0;
+ }
+
+- switch (cs->ctx->dce_version) {
+- case DCE_VERSION_8_0:
+- case DCE_VERSION_8_1:
+- case DCE_VERSION_8_3:
+- case DCE_VERSION_10_0:
+- case DCE_VERSION_11_0:
+- pll_calc_error =
+- dce110_get_pix_clk_dividers_helper(clk_src,
++ pll_calc_error = dce110_get_pix_clk_dividers_helper(clk_src,
+ pll_settings, pix_clk_params);
+- break;
+- case DCE_VERSION_11_2:
+- case DCE_VERSION_11_22:
+- case DCE_VERSION_12_0:
+-#ifdef CONFIG_X86
+- case DCN_VERSION_1_0:
+-#endif
+
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
+- case DCN_VERSION_1_01:
+-#endif
+- dce112_get_pix_clk_dividers_helper(clk_src,
+- pll_settings, pix_clk_params);
+- break;
+- default:
+- break;
++ return pll_calc_error;
++}
++
++static uint32_t dce112_get_pix_clk_dividers(
++ struct clock_source *cs,
++ struct pixel_clk_params *pix_clk_params,
++ struct pll_settings *pll_settings)
++{
++ struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(cs);
++ DC_LOGGER_INIT();
++
++ if (pix_clk_params == NULL || pll_settings == NULL
++ || pix_clk_params->requested_pix_clk == 0) {
++ DC_LOG_ERROR(
++ "%s: Invalid parameters!!\n", __func__);
++ return -1;
+ }
+
+- return pll_calc_error;
++ memset(pll_settings, 0, sizeof(*pll_settings));
++
++ if (cs->id == CLOCK_SOURCE_ID_DP_DTO ||
++ cs->id == CLOCK_SOURCE_ID_EXTERNAL) {
++ pll_settings->adjusted_pix_clk = clk_src->ext_clk_khz;
++ pll_settings->calculated_pix_clk = clk_src->ext_clk_khz;
++ pll_settings->actual_pix_clk =
++ pix_clk_params->requested_pix_clk;
++ return -1;
++ }
++
++ dce112_get_pix_clk_dividers_helper(clk_src,
++ pll_settings, pix_clk_params);
++
++ return 0;
+ }
+
+ static uint32_t dce110_get_pll_pixel_rate_in_hz(
+@@ -917,7 +925,66 @@ static bool dce110_program_pix_clk(
+ struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
+ struct bp_pixel_clock_parameters bp_pc_params = {0};
+
+-#ifdef CONFIG_X86
++ /* First disable SS
++ * ATOMBIOS will enable by default SS on PLL for DP,
++ * do not disable it here
++ */
++ if (clock_source->id != CLOCK_SOURCE_ID_EXTERNAL &&
++ !dc_is_dp_signal(pix_clk_params->signal_type) &&
++ clock_source->ctx->dce_version <= DCE_VERSION_11_0)
++ disable_spread_spectrum(clk_src);
++
++ /*ATOMBIOS expects pixel rate adjusted by deep color ratio)*/
++ bp_pc_params.controller_id = pix_clk_params->controller_id;
++ bp_pc_params.pll_id = clock_source->id;
++ bp_pc_params.target_pixel_clock = pll_settings->actual_pix_clk;
++ bp_pc_params.encoder_object_id = pix_clk_params->encoder_object_id;
++ bp_pc_params.signal_type = pix_clk_params->signal_type;
++
++ bp_pc_params.reference_divider = pll_settings->reference_divider;
++ bp_pc_params.feedback_divider = pll_settings->feedback_divider;
++ bp_pc_params.fractional_feedback_divider =
++ pll_settings->fract_feedback_divider;
++ bp_pc_params.pixel_clock_post_divider =
++ pll_settings->pix_clk_post_divider;
++ bp_pc_params.flags.SET_EXTERNAL_REF_DIV_SRC =
++ pll_settings->use_external_clk;
++
++ if (clk_src->bios->funcs->set_pixel_clock(
++ clk_src->bios, &bp_pc_params) != BP_RESULT_OK)
++ return false;
++ /* Enable SS
++ * ATOMBIOS will enable by default SS for DP on PLL ( DP ID clock),
++ * based on HW display PLL team, SS control settings should be programmed
++ * during PLL Reset, but they do not have effect
++ * until SS_EN is asserted.*/
++ if (clock_source->id != CLOCK_SOURCE_ID_EXTERNAL
++ && !dc_is_dp_signal(pix_clk_params->signal_type)) {
++
++ if (pix_clk_params->flags.ENABLE_SS)
++ if (!enable_spread_spectrum(clk_src,
++ pix_clk_params->signal_type,
++ pll_settings))
++ return false;
++
++ /* Resync deep color DTO */
++ dce110_program_pixel_clk_resync(clk_src,
++ pix_clk_params->signal_type,
++ pix_clk_params->color_depth);
++ }
++
++ return true;
++}
++
++static bool dce112_program_pix_clk(
++ struct clock_source *clock_source,
++ struct pixel_clk_params *pix_clk_params,
++ struct pll_settings *pll_settings)
++{
++ struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
++ struct bp_pixel_clock_parameters bp_pc_params = {0};
++
++#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) {
+ unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
+ unsigned dp_dto_ref_kHz = 700000;
+@@ -948,82 +1015,29 @@ static bool dce110_program_pix_clk(
+ bp_pc_params.encoder_object_id = pix_clk_params->encoder_object_id;
+ bp_pc_params.signal_type = pix_clk_params->signal_type;
+
+- switch (clock_source->ctx->dce_version) {
+- case DCE_VERSION_8_0:
+- case DCE_VERSION_8_1:
+- case DCE_VERSION_8_3:
+- case DCE_VERSION_10_0:
+- case DCE_VERSION_11_0:
+- bp_pc_params.reference_divider = pll_settings->reference_divider;
+- bp_pc_params.feedback_divider = pll_settings->feedback_divider;
+- bp_pc_params.fractional_feedback_divider =
+- pll_settings->fract_feedback_divider;
+- bp_pc_params.pixel_clock_post_divider =
+- pll_settings->pix_clk_post_divider;
+- bp_pc_params.flags.SET_EXTERNAL_REF_DIV_SRC =
++ if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO) {
++ bp_pc_params.flags.SET_GENLOCK_REF_DIV_SRC =
+ pll_settings->use_external_clk;
+-
+- if (clk_src->bios->funcs->set_pixel_clock(
+- clk_src->bios, &bp_pc_params) != BP_RESULT_OK)
+- return false;
+- /* Enable SS
+- * ATOMBIOS will enable by default SS for DP on PLL ( DP ID clock),
+- * based on HW display PLL team, SS control settings should be programmed
+- * during PLL Reset, but they do not have effect
+- * until SS_EN is asserted.*/
+- if (clock_source->id != CLOCK_SOURCE_ID_EXTERNAL
+- && !dc_is_dp_signal(pix_clk_params->signal_type)) {
+-
+- if (pix_clk_params->flags.ENABLE_SS)
+- if (!enable_spread_spectrum(clk_src,
+- pix_clk_params->signal_type,
+- pll_settings))
+- return false;
+-
+- /* Resync deep color DTO */
+- dce110_program_pixel_clk_resync(clk_src,
+- pix_clk_params->signal_type,
+- pix_clk_params->color_depth);
++ bp_pc_params.flags.SET_XTALIN_REF_SRC =
++ !pll_settings->use_external_clk;
++ if (pix_clk_params->flags.SUPPORT_YCBCR420) {
++ bp_pc_params.flags.SUPPORT_YUV_420 = 1;
+ }
+-
+- break;
+- case DCE_VERSION_11_2:
+- case DCE_VERSION_11_22:
+- case DCE_VERSION_12_0:
+-#ifdef CONFIG_X86
+- case DCN_VERSION_1_0:
+-#endif
+-
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
+- case DCN_VERSION_1_01:
+-#endif
+-
+- if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO) {
+- bp_pc_params.flags.SET_GENLOCK_REF_DIV_SRC =
+- pll_settings->use_external_clk;
+- bp_pc_params.flags.SET_XTALIN_REF_SRC =
+- !pll_settings->use_external_clk;
+- if (pix_clk_params->flags.SUPPORT_YCBCR420) {
+- bp_pc_params.flags.SUPPORT_YUV_420 = 1;
+- }
+- }
+- if (clk_src->bios->funcs->set_pixel_clock(
+- clk_src->bios, &bp_pc_params) != BP_RESULT_OK)
+- return false;
+- /* Resync deep color DTO */
+- if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO)
+- dce112_program_pixel_clk_resync(clk_src,
+- pix_clk_params->signal_type,
+- pix_clk_params->color_depth,
+- pix_clk_params->flags.SUPPORT_YCBCR420);
+- break;
+- default:
+- break;
+ }
++ if (clk_src->bios->funcs->set_pixel_clock(
++ clk_src->bios, &bp_pc_params) != BP_RESULT_OK)
++ return false;
++ /* Resync deep color DTO */
++ if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO)
++ dce112_program_pixel_clk_resync(clk_src,
++ pix_clk_params->signal_type,
++ pix_clk_params->color_depth,
++ pix_clk_params->flags.SUPPORT_YCBCR420);
+
+ return true;
+ }
+
++
+ static bool dce110_clock_source_power_down(
+ struct clock_source *clk_src)
+ {
+@@ -1050,6 +1064,12 @@ static bool dce110_clock_source_power_down(
+ /*****************************************/
+ /* Constructor */
+ /*****************************************/
++
++static const struct clock_source_funcs dce112_clk_src_funcs = {
++ .cs_power_down = dce110_clock_source_power_down,
++ .program_pix_clk = dce112_program_pix_clk,
++ .get_pix_clk_dividers = dce112_get_pix_clk_dividers
++};
+ static const struct clock_source_funcs dce110_clk_src_funcs = {
+ .cs_power_down = dce110_clock_source_power_down,
+ .program_pix_clk = dce110_program_pix_clk,
+@@ -1057,6 +1077,7 @@ static const struct clock_source_funcs dce110_clk_src_funcs = {
+ .get_pix_rate_in_hz = dce110_get_pix_rate_in_hz
+ };
+
++
+ static void get_ss_info_from_atombios(
+ struct dce110_clk_src *clk_src,
+ enum as_signal_type as_signal,
+@@ -1310,81 +1331,70 @@ bool dce110_clk_src_construct(
+ clk_src->ext_clk_khz =
+ fw_info.external_clock_source_frequency_for_dp;
+
+- switch (clk_src->base.ctx->dce_version) {
+- case DCE_VERSION_8_0:
+- case DCE_VERSION_8_1:
+- case DCE_VERSION_8_3:
+- case DCE_VERSION_10_0:
+- case DCE_VERSION_11_0:
+-
+- /* structure normally used with PLL ranges from ATOMBIOS; DS on by default */
+- calc_pll_cs_init_data.bp = bios;
+- calc_pll_cs_init_data.min_pix_clk_pll_post_divider = 1;
+- calc_pll_cs_init_data.max_pix_clk_pll_post_divider =
+- clk_src->cs_mask->PLL_POST_DIV_PIXCLK;
+- calc_pll_cs_init_data.min_pll_ref_divider = 1;
+- calc_pll_cs_init_data.max_pll_ref_divider = clk_src->cs_mask->PLL_REF_DIV;
+- /* when 0 use minInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
+- calc_pll_cs_init_data.min_override_input_pxl_clk_pll_freq_khz = 0;
+- /* when 0 use maxInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
+- calc_pll_cs_init_data.max_override_input_pxl_clk_pll_freq_khz = 0;
+- /*numberOfFractFBDividerDecimalPoints*/
+- calc_pll_cs_init_data.num_fract_fb_divider_decimal_point =
+- FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
+- /*number of decimal point to round off for fractional feedback divider value*/
+- calc_pll_cs_init_data.num_fract_fb_divider_decimal_point_precision =
+- FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
+- calc_pll_cs_init_data.ctx = ctx;
+-
+- /*structure for HDMI, no SS or SS% <= 0.06% for 27 MHz Ref clock */
+- calc_pll_cs_init_data_hdmi.bp = bios;
+- calc_pll_cs_init_data_hdmi.min_pix_clk_pll_post_divider = 1;
+- calc_pll_cs_init_data_hdmi.max_pix_clk_pll_post_divider =
+- clk_src->cs_mask->PLL_POST_DIV_PIXCLK;
+- calc_pll_cs_init_data_hdmi.min_pll_ref_divider = 1;
+- calc_pll_cs_init_data_hdmi.max_pll_ref_divider = clk_src->cs_mask->PLL_REF_DIV;
+- /* when 0 use minInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
+- calc_pll_cs_init_data_hdmi.min_override_input_pxl_clk_pll_freq_khz = 13500;
+- /* when 0 use maxInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
+- calc_pll_cs_init_data_hdmi.max_override_input_pxl_clk_pll_freq_khz = 27000;
+- /*numberOfFractFBDividerDecimalPoints*/
+- calc_pll_cs_init_data_hdmi.num_fract_fb_divider_decimal_point =
+- FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
+- /*number of decimal point to round off for fractional feedback divider value*/
+- calc_pll_cs_init_data_hdmi.num_fract_fb_divider_decimal_point_precision =
+- FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
+- calc_pll_cs_init_data_hdmi.ctx = ctx;
+-
+- clk_src->ref_freq_khz = fw_info.pll_info.crystal_frequency;
+-
+- if (clk_src->base.id == CLOCK_SOURCE_ID_EXTERNAL)
+- return true;
+-
+- /* PLL only from here on */
+- ss_info_from_atombios_create(clk_src);
+-
+- if (!calc_pll_max_vco_construct(
+- &clk_src->calc_pll,
+- &calc_pll_cs_init_data)) {
+- ASSERT_CRITICAL(false);
+- goto unexpected_failure;
+- }
++ /* structure normally used with PLL ranges from ATOMBIOS; DS on by default */
++ calc_pll_cs_init_data.bp = bios;
++ calc_pll_cs_init_data.min_pix_clk_pll_post_divider = 1;
++ calc_pll_cs_init_data.max_pix_clk_pll_post_divider =
++ clk_src->cs_mask->PLL_POST_DIV_PIXCLK;
++ calc_pll_cs_init_data.min_pll_ref_divider = 1;
++ calc_pll_cs_init_data.max_pll_ref_divider = clk_src->cs_mask->PLL_REF_DIV;
++ /* when 0 use minInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
++ calc_pll_cs_init_data.min_override_input_pxl_clk_pll_freq_khz = 0;
++ /* when 0 use maxInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
++ calc_pll_cs_init_data.max_override_input_pxl_clk_pll_freq_khz = 0;
++ /*numberOfFractFBDividerDecimalPoints*/
++ calc_pll_cs_init_data.num_fract_fb_divider_decimal_point =
++ FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
++ /*number of decimal point to round off for fractional feedback divider value*/
++ calc_pll_cs_init_data.num_fract_fb_divider_decimal_point_precision =
++ FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
++ calc_pll_cs_init_data.ctx = ctx;
++
++ /*structure for HDMI, no SS or SS% <= 0.06% for 27 MHz Ref clock */
++ calc_pll_cs_init_data_hdmi.bp = bios;
++ calc_pll_cs_init_data_hdmi.min_pix_clk_pll_post_divider = 1;
++ calc_pll_cs_init_data_hdmi.max_pix_clk_pll_post_divider =
++ clk_src->cs_mask->PLL_POST_DIV_PIXCLK;
++ calc_pll_cs_init_data_hdmi.min_pll_ref_divider = 1;
++ calc_pll_cs_init_data_hdmi.max_pll_ref_divider = clk_src->cs_mask->PLL_REF_DIV;
++ /* when 0 use minInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
++ calc_pll_cs_init_data_hdmi.min_override_input_pxl_clk_pll_freq_khz = 13500;
++ /* when 0 use maxInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
++ calc_pll_cs_init_data_hdmi.max_override_input_pxl_clk_pll_freq_khz = 27000;
++ /*numberOfFractFBDividerDecimalPoints*/
++ calc_pll_cs_init_data_hdmi.num_fract_fb_divider_decimal_point =
++ FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
++ /*number of decimal point to round off for fractional feedback divider value*/
++ calc_pll_cs_init_data_hdmi.num_fract_fb_divider_decimal_point_precision =
++ FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
++ calc_pll_cs_init_data_hdmi.ctx = ctx;
++
++ clk_src->ref_freq_khz = fw_info.pll_info.crystal_frequency;
++
++ if (clk_src->base.id == CLOCK_SOURCE_ID_EXTERNAL)
++ return true;
+
++ /* PLL only from here on */
++ ss_info_from_atombios_create(clk_src);
+
+- calc_pll_cs_init_data_hdmi.
+- min_override_input_pxl_clk_pll_freq_khz = clk_src->ref_freq_khz/2;
+- calc_pll_cs_init_data_hdmi.
+- max_override_input_pxl_clk_pll_freq_khz = clk_src->ref_freq_khz;
++ if (!calc_pll_max_vco_construct(
++ &clk_src->calc_pll,
++ &calc_pll_cs_init_data)) {
++ ASSERT_CRITICAL(false);
++ goto unexpected_failure;
++ }
+
+
+- if (!calc_pll_max_vco_construct(
+- &clk_src->calc_pll_hdmi, &calc_pll_cs_init_data_hdmi)) {
+- ASSERT_CRITICAL(false);
+- goto unexpected_failure;
+- }
+- break;
+- default:
+- break;
++ calc_pll_cs_init_data_hdmi.
++ min_override_input_pxl_clk_pll_freq_khz = clk_src->ref_freq_khz/2;
++ calc_pll_cs_init_data_hdmi.
++ max_override_input_pxl_clk_pll_freq_khz = clk_src->ref_freq_khz;
++
++
++ if (!calc_pll_max_vco_construct(
++ &clk_src->calc_pll_hdmi, &calc_pll_cs_init_data_hdmi)) {
++ ASSERT_CRITICAL(false);
++ goto unexpected_failure;
+ }
+
+ return true;
+@@ -1393,3 +1403,34 @@ bool dce110_clk_src_construct(
+ return false;
+ }
+
++bool dce112_clk_src_construct(
++ struct dce110_clk_src *clk_src,
++ struct dc_context *ctx,
++ struct dc_bios *bios,
++ enum clock_source_id id,
++ const struct dce110_clk_src_regs *regs,
++ const struct dce110_clk_src_shift *cs_shift,
++ const struct dce110_clk_src_mask *cs_mask)
++{
++ struct dc_firmware_info fw_info = { { 0 } };
++
++ clk_src->base.ctx = ctx;
++ clk_src->bios = bios;
++ clk_src->base.id = id;
++ clk_src->base.funcs = &dce112_clk_src_funcs;
++
++ clk_src->regs = regs;
++ clk_src->cs_shift = cs_shift;
++ clk_src->cs_mask = cs_mask;
++
++ if (clk_src->bios->funcs->get_firmware_info(
++ clk_src->bios, &fw_info) != BP_RESULT_OK) {
++ ASSERT_CRITICAL(false);
++ return false;
++ }
++
++ clk_src->ext_clk_khz = fw_info.external_clock_source_frequency_for_dp;
++
++ return true;
++}
++
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
+index e1f20ed..b8e0e2c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
+@@ -144,4 +144,13 @@ bool dce110_clk_src_construct(
+ const struct dce110_clk_src_shift *cs_shift,
+ const struct dce110_clk_src_mask *cs_mask);
+
++bool dce112_clk_src_construct(
++ struct dce110_clk_src *clk_src,
++ struct dc_context *ctx,
++ struct dc_bios *bios,
++ enum clock_source_id id,
++ const struct dce110_clk_src_regs *regs,
++ const struct dce110_clk_src_shift *cs_shift,
++ const struct dce110_clk_src_mask *cs_mask);
++
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+index d35dc730..f3d55a6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+@@ -668,7 +668,7 @@ struct clock_source *dce112_clock_source_create(
+ if (!clk_src)
+ return NULL;
+
+- if (dce110_clk_src_construct(clk_src, ctx, bios, id,
++ if (dce112_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+index b2fb06f3..8afa43a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+@@ -455,7 +455,7 @@ struct clock_source *dce120_clock_source_create(
+ if (!clk_src)
+ return NULL;
+
+- if (dce110_clk_src_construct(clk_src, ctx, bios, id,
++ if (dce112_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index e44031e..e8a22d5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -748,7 +748,7 @@ struct clock_source *dcn10_clock_source_create(
+ if (!clk_src)
+ return NULL;
+
+- if (dce110_clk_src_construct(clk_src, ctx, bios, id,
++ if (dce112_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5299-drm-amd-display-dc-3.1.66.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5299-drm-amd-display-dc-3.1.66.patch
new file mode 100644
index 00000000..262e3946
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5299-drm-amd-display-dc-3.1.66.patch
@@ -0,0 +1,29 @@
+From 015ea9c0bfcb839693c6ef6476dae089a2030a6f Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Mon, 27 Aug 2018 13:35:31 -0400
+Subject: [PATCH 5299/5725] drm/amd/display: dc 3.1.66
+
+Change-Id: I3cdae9a323d4aad14528a136cdd6a959b2091433
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Steven Chiu <Steven.Chiu@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index c012918..b56afdb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -38,7 +38,7 @@
+ #include "inc/compressor.h"
+ #include "dml/display_mode_lib.h"
+
+-#define DC_VER "3.1.63"
++#define DC_VER "3.1.66"
+
+ #define MAX_SURFACES 3
+ #define MAX_STREAMS 6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5300-drm-amd-display-add-query-HPD-interface.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5300-drm-amd-display-add-query-HPD-interface.patch
new file mode 100644
index 00000000..c2dc7a89
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5300-drm-amd-display-add-query-HPD-interface.patch
@@ -0,0 +1,63 @@
+From 7002835db76e88eeba934b78e8e6ecb2e5937d96 Mon Sep 17 00:00:00 2001
+From: Chiawen Huang <chiawen.huang@amd.com>
+Date: Wed, 5 Sep 2018 20:34:57 +0800
+Subject: [PATCH 5300/5725] drm/amd/display: add query HPD interface.
+
+[Why]
+current dc_link_detect function is not only detection but also update some link data.
+
+[How]
+added a pure get HPD state function.
+
+Signed-off-by: Chiawen Huang <chiawen.huang@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 18 ++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/dc_link.h | 1 +
+ 2 files changed, 19 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 960521ce..2dfdcc9 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -890,6 +890,24 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+ return true;
+ }
+
++bool dc_link_get_hpd_state(struct dc_link *dc_link)
++{
++ struct gpio *hpd_pin;
++ uint32_t state;
++
++ hpd_pin = get_hpd_gpio(dc_link->ctx->dc_bios,
++ dc_link->link_id, dc_link->ctx->gpio_service);
++ if (hpd_pin == NULL)
++ ASSERT(false);
++
++ dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT);
++ dal_gpio_get_value(hpd_pin, &state);
++ dal_gpio_close(hpd_pin);
++ dal_gpio_destroy_irq(&hpd_pin);
++
++ return state;
++}
++
+ static enum hpd_source_id get_hpd_line(
+ struct dc_link *link)
+ {
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
+index 1794764..4daec70 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
+@@ -167,6 +167,7 @@ enum dc_detect_reason {
+ };
+
+ bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
++bool dc_link_get_hpd_state(struct dc_link *dc_link);
+
+ /* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
+ * Return:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5301-drm-amd-display-Drop-amdgpu_display_manager.dal-memb.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5301-drm-amd-display-Drop-amdgpu_display_manager.dal-memb.patch
new file mode 100644
index 00000000..ce3aa311
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5301-drm-amd-display-Drop-amdgpu_display_manager.dal-memb.patch
@@ -0,0 +1,49 @@
+From 651710bc7551d3a97cbc779a1fbb269c89071192 Mon Sep 17 00:00:00 2001
+From: Leo Li <sunpeng.li@amd.com>
+Date: Wed, 5 Sep 2018 11:19:42 -0400
+Subject: [PATCH 5301/5725] drm/amd/display: Drop amdgpu_display_manager.dal
+ member
+
+[Why]
+It's not being used anymore.
+
+[How]
+Nuke it
+
+Change-Id: I736147a70f51fd36977a7d6d91622f81a7070a12
+Signed-off-by: Leo Li <sunpeng.li@amd.com>
+Reviewed-by: David Francis <David.Francis@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 --
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 1 -
+ 2 files changed, 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 7b27d39..f6afa4e 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -426,8 +426,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
+
+ init_data.cgs_device = adev->dm.cgs_device;
+
+- adev->dm.dal = NULL;
+-
+ init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
+
+ /*
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+index 88b646e..d01ed33 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+@@ -103,7 +103,6 @@ struct dm_comressor_info {
+ #endif
+
+ struct amdgpu_display_manager {
+- struct dal *dal;
+ struct dc *dc;
+ struct cgs_device *cgs_device;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5302-drm-amd-display-Drop-amdgpu_dm_prev_state-struct.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5302-drm-amd-display-Drop-amdgpu_dm_prev_state-struct.patch
new file mode 100644
index 00000000..9368d657
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5302-drm-amd-display-Drop-amdgpu_dm_prev_state-struct.patch
@@ -0,0 +1,48 @@
+From 6188d097b8316e815429f49731946dc17024fec3 Mon Sep 17 00:00:00 2001
+From: Leo Li <sunpeng.li@amd.com>
+Date: Wed, 5 Sep 2018 11:28:29 -0400
+Subject: [PATCH 5302/5725] drm/amd/display: Drop amdgpu_dm_prev_state struct
+
+[Why]
+It's not being used
+
+[How]
+Nuke it
+
+Signed-off-by: Leo Li <sunpeng.li@amd.com>
+Reviewed-by: David Francis <David.Francis@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 9 ---------
+ 1 file changed, 9 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+index d01ed33..772368c 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+@@ -54,13 +54,6 @@ struct drm_device;
+ struct amdgpu_dm_irq_handler_data;
+ struct dc;
+
+-struct amdgpu_dm_prev_state {
+- struct drm_framebuffer *fb;
+- int32_t x;
+- int32_t y;
+- struct drm_display_mode mode;
+-};
+-
+ struct common_irq_params {
+ struct amdgpu_device *adev;
+ enum dc_irq_source irq_src;
+@@ -110,8 +103,6 @@ struct amdgpu_display_manager {
+ struct drm_device *ddev; /*DRM base driver*/
+ u16 display_indexes_num;
+
+- struct amdgpu_dm_prev_state prev_state;
+-
+ /*
+ * 'irq_source_handler_table' holds a list of handlers
+ * per (DAL) IRQ source.
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5303-drm-amdgpu-add-GDS-GWS-and-OA-debugfs-files.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5303-drm-amdgpu-add-GDS-GWS-and-OA-debugfs-files.patch
new file mode 100644
index 00000000..758ddc49
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5303-drm-amdgpu-add-GDS-GWS-and-OA-debugfs-files.patch
@@ -0,0 +1,48 @@
+From f58a50893dd4ab084390fa1342d360d379654a8c Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 14 Sep 2018 15:43:57 +0200
+Subject: [PATCH 5303/5725] drm/amdgpu: add GDS, GWS and OA debugfs files
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Additional to the existing files for VRAM and GTT.
+
+Change-Id: I1f418e127ded38ff48ebda14a4226709524cffd3
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 7c46a03f..614a5cf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -2400,7 +2400,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
+ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
+ {
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+- unsigned ttm_pl = *(int *)node->info_ent->data;
++ unsigned ttm_pl = (uintptr_t)node->info_ent->data;
+ struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl];
+@@ -2416,8 +2416,11 @@ static int ttm_pl_dgma = AMDGPU_PL_DGMA;
+ static int ttm_pl_dgma_import = AMDGPU_PL_DGMA_IMPORT;
+
+ static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
+- {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
+- {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
++ {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM},
++ {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT},
++ {"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS},
++ {"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS},
++ {"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA},
+ {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
+ #ifdef CONFIG_SWIOTLB
+ {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5304-drm-amdgpu-stop-crashing-on-GDS-GWS-OA-eviction.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5304-drm-amdgpu-stop-crashing-on-GDS-GWS-OA-eviction.patch
new file mode 100644
index 00000000..791342fb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5304-drm-amdgpu-stop-crashing-on-GDS-GWS-OA-eviction.patch
@@ -0,0 +1,63 @@
+From 32424d7bb58667c5d3bb52bb910f02710e84610a Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 14 Sep 2018 20:44:17 +0200
+Subject: [PATCH 5304/5725] drm/amdgpu: stop crashing on GDS/GWS/OA eviction
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Simply ignore any copying here.
+
+Change-Id: I8c10da1b6199f4aa60fb8c52473eb2adb3286116
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 614a5cf..212ce92 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -273,6 +273,13 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
+
+ abo = ttm_to_amdgpu_bo(bo);
+ switch (bo->mem.mem_type) {
++ case AMDGPU_PL_GDS:
++ case AMDGPU_PL_GWS:
++ case AMDGPU_PL_OA:
++ placement->num_placement = 0;
++ placement->num_busy_placement = 0;
++ return;
++
+ case TTM_PL_VRAM:
+ case AMDGPU_PL_DGMA:
+ if (!adev->mman.buffer_funcs_enabled) {
+@@ -302,6 +309,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
+ case AMDGPU_PL_DGMA_IMPORT:
+ default:
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
++ break;
+ }
+ *placement = abo->placement;
+ }
+@@ -699,6 +707,16 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
+ amdgpu_move_null(bo, new_mem);
+ return 0;
+ }
++ if (old_mem->mem_type == AMDGPU_PL_GDS ||
++ old_mem->mem_type == AMDGPU_PL_GWS ||
++ old_mem->mem_type == AMDGPU_PL_OA ||
++ new_mem->mem_type == AMDGPU_PL_GDS ||
++ new_mem->mem_type == AMDGPU_PL_GWS ||
++ new_mem->mem_type == AMDGPU_PL_OA) {
++ /* Nothing to save here */
++ amdgpu_move_null(bo, new_mem);
++ return 0;
++ }
+
+ if (!adev->mman.buffer_funcs_enabled)
+ goto memcpy;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5305-drm-amdgpu-don-t-allocate-zero-sized-kernel-BOs.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5305-drm-amdgpu-don-t-allocate-zero-sized-kernel-BOs.patch
new file mode 100644
index 00000000..aee762cd
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5305-drm-amdgpu-don-t-allocate-zero-sized-kernel-BOs.patch
@@ -0,0 +1,35 @@
+From ca1acca379a1fffbc8d021d5c17228ce33fff3bc Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 14 Sep 2018 21:03:37 +0200
+Subject: [PATCH 5305/5725] drm/amdgpu: don't allocate zero sized kernel BOs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Just free the BO if the size should be zero.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index a4092d9..d96c9a9 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -277,6 +277,11 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
+ bool free = false;
+ int r;
+
++ if (!size) {
++ amdgpu_bo_unref(bo_ptr);
++ return 0;
++ }
++
+ memset(&bp, 0, sizeof(bp));
+ bp.size = size;
+ bp.byte_align = align;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5306-drm-amdgpu-drop-size-check.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5306-drm-amdgpu-drop-size-check.patch
new file mode 100644
index 00000000..2a547e26
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5306-drm-amdgpu-drop-size-check.patch
@@ -0,0 +1,46 @@
+From 227bc979d2b556195511885787bee0e67636db90 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 14 Sep 2018 21:06:50 +0200
+Subject: [PATCH 5306/5725] drm/amdgpu: drop size check
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+We no don't allocate zero sized kernel BOs any longer.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 15 ++++++---------
+ 1 file changed, 6 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 212ce92..1654715 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -1990,15 +1990,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
+ * This is used for VGA emulation and pre-OS scanout buffers to
+ * avoid display artifacts while transitioning between pre-OS
+ * and driver. */
+- if (adev->gmc.stolen_size) {
+- r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
+- AMDGPU_GEM_DOMAIN_VRAM,
+- &adev->stolen_vga_memory,
+- NULL, NULL);
+- if (r)
+- return r;
+- }
+-
++ r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ &adev->stolen_vga_memory,
++ NULL, NULL);
++ if (r)
++ return r;
+ DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
+ (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5307-drm-amd-amdgpu-Avoid-fault-when-allocating-an-empty-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5307-drm-amd-amdgpu-Avoid-fault-when-allocating-an-empty-.patch
new file mode 100644
index 00000000..3062307a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5307-drm-amd-amdgpu-Avoid-fault-when-allocating-an-empty-.patch
@@ -0,0 +1,32 @@
+From d86e145a81a071d807a91a8c346dcde8da849a05 Mon Sep 17 00:00:00 2001
+From: Tom St Denis <tom.stdenis@amd.com>
+Date: Mon, 17 Sep 2018 14:07:00 -0400
+Subject: [PATCH 5307/5725] drm/amd/amdgpu: Avoid fault when allocating an
+ empty buffer object
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Signed-off-by: Tom St Denis <tom.stdenis@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index d96c9a9..18b87454 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -375,7 +375,8 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
+ if (r)
+ return r;
+
+- amdgpu_bo_unreserve(*bo_ptr);
++ if (*bo_ptr)
++ amdgpu_bo_unreserve(*bo_ptr);
+
+ return 0;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5308-drm-amdgpu-use-processed-values-for-counting.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5308-drm-amdgpu-use-processed-values-for-counting.patch
new file mode 100644
index 00000000..ddf5d91a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5308-drm-amdgpu-use-processed-values-for-counting.patch
@@ -0,0 +1,62 @@
+From 85e65fa1fac4c42a5d258e7cdb06cad53831609b Mon Sep 17 00:00:00 2001
+From: "A. Wilcox" <AWilcox@Wilcox-Tech.com>
+Date: Sun, 1 Jul 2018 22:44:52 -0500
+Subject: [PATCH 5308/5725] drm/amdgpu: use processed values for counting
+
+adev->gfx.rlc has the values from rlc_hdr already processed by
+le32_to_cpu. Using the rlc_hdr values on big-endian machines causes
+a kernel Oops due to writing well outside of the array (0x24000000
+instead of 0x24).
+
+Signed-off-by: A. Wilcox <AWilcox@Wilcox-Tech.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 9a8f572..1d7034c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -1114,14 +1114,14 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
+
+ tmp = (unsigned int *)((uintptr_t)rlc_hdr +
+ le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
+- for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
++ for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
+ adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
+
+ adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
+
+ tmp = (unsigned int *)((uintptr_t)rlc_hdr +
+ le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
+- for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
++ for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
+ adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
+
+ if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index e040c87..b9918ea 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -692,14 +692,14 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
+
+ tmp = (unsigned int *)((uintptr_t)rlc_hdr +
+ le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
+- for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
++ for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
+ adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
+
+ adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
+
+ tmp = (unsigned int *)((uintptr_t)rlc_hdr +
+ le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
+- for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
++ for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
+ adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
+
+ if (adev->gfx.rlc.is_rlc_v2_1)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5309-drm-amdgpu-update-vram_info-structure-in-atomfirmwar.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5309-drm-amdgpu-update-vram_info-structure-in-atomfirmwar.patch
new file mode 100644
index 00000000..c302e1dc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5309-drm-amdgpu-update-vram_info-structure-in-atomfirmwar.patch
@@ -0,0 +1,88 @@
+From b1db2ee4989d51cfffe383672b40f2c60bf6abcb Mon Sep 17 00:00:00 2001
+From: Hawking Zhang <Hawking.Zhang@amd.com>
+Date: Mon, 17 Sep 2018 20:25:03 +0800
+Subject: [PATCH 5309/5725] drm/amdgpu: update vram_info structure in
+ atomfirmware.h
+
+atomfirmware has structure changes in varm_info. Updated it
+to the latest one.
+
+Change-Id: Ie5d60413e5db1dfb4aaf23dc94bc5fd4ed0a01cd
+Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c | 2 +-
+ drivers/gpu/drm/amd/include/atomfirmware.h | 20 +++++++++++---------
+ 2 files changed, 12 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+index 2369158..5461d0d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+@@ -174,7 +174,7 @@ static int convert_atom_mem_type_to_vram_type (struct amdgpu_device *adev,
+ case ATOM_DGPU_VRAM_TYPE_GDDR5:
+ vram_type = AMDGPU_VRAM_TYPE_GDDR5;
+ break;
+- case ATOM_DGPU_VRAM_TYPE_HBM:
++ case ATOM_DGPU_VRAM_TYPE_HBM2:
+ vram_type = AMDGPU_VRAM_TYPE_HBM;
+ break;
+ default:
+diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
+index 6109a45..8ae7adb 100644
+--- a/drivers/gpu/drm/amd/include/atomfirmware.h
++++ b/drivers/gpu/drm/amd/include/atomfirmware.h
+@@ -179,7 +179,7 @@ enum atom_voltage_type
+
+ enum atom_dgpu_vram_type{
+ ATOM_DGPU_VRAM_TYPE_GDDR5 = 0x50,
+- ATOM_DGPU_VRAM_TYPE_HBM = 0x60,
++ ATOM_DGPU_VRAM_TYPE_HBM2 = 0x60,
+ };
+
+ enum atom_dp_vs_preemph_def{
+@@ -1699,10 +1699,10 @@ struct atom_vram_module_v9
+ {
+ // Design Specific Values
+ uint32_t memory_size; // Total memory size in unit of MB for CONFIG_MEMSIZE zeros
+- uint32_t channel_enable; // for 32 channel ASIC usage
+- uint32_t umcch_addrcfg;
+- uint32_t umcch_addrsel;
+- uint32_t umcch_colsel;
++ uint32_t channel_enable; // bit vector, each bit indicate specific channel enable or not
++ uint32_t max_mem_clk; // max memory clock of this memory in unit of 10kHz, =0 means it is not defined
++ uint16_t reserved[3];
++ uint16_t mem_voltage; // mem_voltage
+ uint16_t vram_module_size; // Size of atom_vram_module_v9
+ uint8_t ext_memory_id; // Current memory module ID
+ uint8_t memory_type; // enum of atom_dgpu_vram_type
+@@ -1712,20 +1712,22 @@ struct atom_vram_module_v9
+ uint8_t tunningset_id; // MC phy registers set per.
+ uint8_t vender_rev_id; // [7:4] Revision, [3:0] Vendor code
+ uint8_t refreshrate; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+- uint16_t vram_rsd2; // reserved
++ uint8_t hbm_ven_rev_id; // hbm_ven_rev_id
++ uint8_t vram_rsd2; // reserved
+ char dram_pnstring[20]; // part number end with '0'.
+ };
+
+-
+ struct atom_vram_info_header_v2_3
+ {
+- struct atom_common_table_header table_header;
++ struct atom_common_table_header table_header;
+ uint16_t mem_adjust_tbloffset; // offset of atom_umc_init_reg_block structure for memory vendor specific UMC adjust setting
+ uint16_t mem_clk_patch_tbloffset; // offset of atom_umc_init_reg_block structure for memory clock specific UMC setting
+ uint16_t mc_adjust_pertile_tbloffset; // offset of atom_umc_init_reg_block structure for Per Byte Offset Preset Settings
+ uint16_t mc_phyinit_tbloffset; // offset of atom_umc_init_reg_block structure for MC phy init set
+ uint16_t dram_data_remap_tbloffset; // reserved for now
+- uint16_t vram_rsd2[3];
++ uint16_t tmrs_seq_offset; // offset of HBM tmrs
++ uint16_t post_ucode_init_offset; // offset of atom_umc_init_reg_block structure for MC phy init after MC uCode complete umc init
++ uint16_t vram_rsd2;
+ uint8_t vram_module_num; // indicate number of VRAM module
+ uint8_t vram_rsd1[2];
+ uint8_t mc_phy_tile_num; // indicate the MCD tile number which use in DramDataRemapTbl and usMcAdjustPerTileTblOffset
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5310-drm-amdgpu-fix-unknown-vram-mem-type-for-vega20.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5310-drm-amdgpu-fix-unknown-vram-mem-type-for-vega20.patch
new file mode 100644
index 00000000..d31373ba
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5310-drm-amdgpu-fix-unknown-vram-mem-type-for-vega20.patch
@@ -0,0 +1,69 @@
+From 78950f411da864f192b13ee8bca24fdf7e657544 Mon Sep 17 00:00:00 2001
+From: Hawking Zhang <Hawking.Zhang@amd.com>
+Date: Mon, 17 Sep 2018 20:19:48 +0800
+Subject: [PATCH 5310/5725] drm/amdgpu: fix unknown vram mem type for vega20
+
+vega20 should use umc_info v3_3 instead of v3_1. There are
+serveral versions of umc_info for vega series. Compared to
+various versions of these structures, vram_info strucure is
+unified for vega series. The patch switch to query mem_type
+from vram_info structure for all the vega series dGPU.
+
+Change-Id: If8d22b687ec5d0f4445527e69841df83479cc485
+Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c | 14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+index 5461d0d..b61e1dc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+@@ -117,6 +117,10 @@ union igp_info {
+ union umc_info {
+ struct atom_umc_info_v3_1 v31;
+ };
++
++union vram_info {
++ struct atom_vram_info_header_v2_3 v23;
++};
+ /*
+ * Return vram width from integrated system info table, if available,
+ * or 0 if not.
+@@ -195,7 +199,7 @@ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
+ int index;
+ u16 data_offset, size;
+ union igp_info *igp_info;
+- union umc_info *umc_info;
++ union vram_info *vram_info;
+ u8 frev, crev;
+ u8 mem_type;
+
+@@ -204,7 +208,7 @@ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
+ integratedsysteminfo);
+ else
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+- umc_info);
++ vram_info);
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context,
+ index, &size,
+ &frev, &crev, &data_offset)) {
+@@ -219,11 +223,11 @@ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
+ return 0;
+ }
+ } else {
+- umc_info = (union umc_info *)
++ vram_info = (union vram_info *)
+ (mode_info->atom_context->bios + data_offset);
+ switch (crev) {
+- case 1:
+- mem_type = umc_info->v31.vram_type;
++ case 3:
++ mem_type = vram_info->v23.vram_module[0].memory_type;
+ return convert_atom_mem_type_to_vram_type(adev, mem_type);
+ default:
+ return 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5311-drm-amd-powerplay-update-OD-feature-judgement.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5311-drm-amd-powerplay-update-OD-feature-judgement.patch
new file mode 100644
index 00000000..f41d91c9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5311-drm-amd-powerplay-update-OD-feature-judgement.patch
@@ -0,0 +1,148 @@
+From 8cecb19d79f04b625112604dd14d9fac3b28e3c7 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Wed, 12 Sep 2018 11:45:01 +0800
+Subject: [PATCH 5311/5725] drm/amd/powerplay: update OD feature judgement
+
+Update the conditions to judge whether an OD feature
+should be supported on vega20.
+
+Change-Id: Iaabdd4db8f685fb94c960263fe38a21b36377aa2
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 79 +++++++++++++++-------
+ .../gpu/drm/amd/powerplay/hwmgr/vega20_pptable.h | 2 +
+ 2 files changed, 55 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index 3efd59e..afc61b5 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -832,58 +832,85 @@ static int vega20_od8_set_feature_capabilities(
+ struct phm_ppt_v3_information *pptable_information =
+ (struct phm_ppt_v3_information *)hwmgr->pptable;
+ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++ PPTable_t *pp_table = &(data->smc_state_table.pp_table);
+ struct vega20_od8_settings *od_settings = &(data->od8_settings);
+
+ od_settings->overdrive8_capabilities = 0;
+
+ if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+- if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_GFXCLKFMAX] > 0 &&
+- pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_GFXCLKFMAX] > 0 &&
+- pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_GFXCLKFMIN] > 0 &&
+- pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_GFXCLKFMIN] > 0)
++ if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_LIMITS] &&
++ pptable_information->od_settings_max[OD8_SETTING_GFXCLK_FMAX] > 0 &&
++ pptable_information->od_settings_min[OD8_SETTING_GFXCLK_FMIN] > 0 &&
++ (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_FMAX] >=
++ pptable_information->od_settings_min[OD8_SETTING_GFXCLK_FMIN]))
+ od_settings->overdrive8_capabilities |= OD8_GFXCLK_LIMITS;
+
+- if (pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P1] > 0 &&
+- pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P2] > 0 &&
+- pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P3] > 0 &&
+- pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P1] > 0 &&
+- pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P2] > 0 &&
+- pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P3] > 0 &&
+- pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P1] > 0 &&
+- pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P2] > 0 &&
+- pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P3] > 0 &&
+- pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P1] > 0 &&
+- pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P2] > 0 &&
+- pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P3] > 0)
++ if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_CURVE] &&
++ (pptable_information->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1] >=
++ pp_table->MinVoltageGfx / VOLTAGE_SCALE) &&
++ (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] <=
++ pp_table->MaxVoltageGfx / VOLTAGE_SCALE) &&
++ (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] >=
++ pptable_information->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1]))
+ od_settings->overdrive8_capabilities |= OD8_GFXCLK_CURVE;
+ }
+
+ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+- if (pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_UCLKFMAX] > 0 &&
+- pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_UCLKFMAX] > 0)
++ if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_UCLK_MAX] &&
++ pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] > 0 &&
++ pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] > 0 &&
++ (pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] >=
++ pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX]))
+ od_settings->overdrive8_capabilities |= OD8_UCLK_MAX;
+ }
+
+- if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE] > 0 &&
+- pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE] <= 100)
++ if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_POWER_LIMIT] &&
++ pptable_information->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] > 0 &&
++ pptable_information->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] <= 100 &&
++ pptable_information->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] > 0 &&
++ pptable_information->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] <= 100)
+ od_settings->overdrive8_capabilities |= OD8_POWER_LIMIT;
+
+ if (data->smu_features[GNLD_FAN_CONTROL].enabled) {
+- if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_FANRPMMIN] > 0)
+- od_settings->overdrive8_capabilities |= OD8_FAN_SPEED_MIN;
+-
+- if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_FANRPMACOUSTICLIMIT] > 0)
++ if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ACOUSTIC_LIMIT] &&
++ pptable_information->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 &&
++ pptable_information->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 &&
++ (pptable_information->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] >=
++ pptable_information->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT]))
+ od_settings->overdrive8_capabilities |= OD8_ACOUSTIC_LIMIT_SCLK;
++
++ if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_SPEED_MIN] &&
++ (pptable_information->od_settings_min[OD8_SETTING_FAN_MIN_SPEED] >=
++ (pp_table->FanPwmMin * pp_table->FanMaximumRpm / 100)) &&
++ pptable_information->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] > 0 &&
++ (pptable_information->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] >=
++ pptable_information->od_settings_min[OD8_SETTING_FAN_MIN_SPEED]))
++ od_settings->overdrive8_capabilities |= OD8_FAN_SPEED_MIN;
+ }
+
+ if (data->smu_features[GNLD_THERMAL].enabled) {
+- if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_FANTARGETTEMPERATURE] > 0)
++ if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_FAN] &&
++ pptable_information->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] > 0 &&
++ pptable_information->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP] > 0 &&
++ (pptable_information->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] >=
++ pptable_information->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP]))
+ od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_FAN;
+
+- if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_OPERATINGTEMPMAX] > 0)
++ if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_SYSTEM] &&
++ pptable_information->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] > 0 &&
++ pptable_information->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX] > 0 &&
++ (pptable_information->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] >=
++ pptable_information->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX]))
+ od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_SYSTEM;
+ }
+
++ if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_MEMORY_TIMING_TUNE])
++ od_settings->overdrive8_capabilities |= OD8_MEMORY_TIMING_TUNE;
++
++ if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ZERO_RPM_CONTROL] &&
++ pp_table->FanZeroRpmEnable)
++ od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL;
++
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_pptable.h
+index b104f6a..2222e29 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_pptable.h
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_pptable.h
+@@ -49,6 +49,8 @@ enum ATOM_VEGA20_ODFEATURE_ID {
+ ATOM_VEGA20_ODFEATURE_FAN_SPEED_MIN, //FanMinimumPwm
+ ATOM_VEGA20_ODFEATURE_TEMPERATURE_FAN, //FanTargetTemperature
+ ATOM_VEGA20_ODFEATURE_TEMPERATURE_SYSTEM, //MaxOpTemp
++ ATOM_VEGA20_ODFEATURE_MEMORY_TIMING_TUNE,
++ ATOM_VEGA20_ODFEATURE_FAN_ZERO_RPM_CONTROL,
+ ATOM_VEGA20_ODFEATURE_COUNT,
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5312-drm-amd-powerplay-update-OD-to-take-voltage-value-in.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5312-drm-amd-powerplay-update-OD-to-take-voltage-value-in.patch
new file mode 100644
index 00000000..b02e1099
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5312-drm-amd-powerplay-update-OD-to-take-voltage-value-in.patch
@@ -0,0 +1,327 @@
+From e3ea525573a45e8b50a72da6845864f84169eedb Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Thu, 13 Sep 2018 16:14:33 +0800
+Subject: [PATCH 5312/5725] drm/amd/powerplay: update OD to take voltage value
+ instead of offset
+
+With the latest SMC fw, we are able to get the voltage value for
+specific frequency point. So, we update the OD relates to take
+absolute voltage instead of offset.
+
+Change-Id: I4ac8518f518cf3d70e59b16e3ea2102cd63c52d6
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 12 +--
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 112 +++++++++++++++------
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h | 4 +
+ .../gpu/drm/amd/powerplay/inc/smu11_driver_if.h | 6 +-
+ drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h | 3 +-
+ 5 files changed, 96 insertions(+), 41 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index e63b0c2..9c5036f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -502,7 +502,7 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
+ *
+ * - maximum memory clock labeled OD_MCLK
+ *
+- * - three <frequency, voltage offset> points labeled OD_VDDC_CURVE.
++ * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
+ * They can be used to calibrate the sclk voltage curve.
+ *
+ * - a list of valid ranges for sclk, mclk, and voltage curve points
+@@ -519,11 +519,11 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
+ * "m 1 800" will update maximum mclk to be 800Mhz.
+ *
+ * For sclk voltage curve, enter the new values by writing a
+- * string that contains "vc point clock voff" to the file. The
+- * points are indexed by 0, 1 and 2. E.g., "vc 0 300 10" will
+- * update point1 with clock set as 300Mhz and voltage increased
+- * by 10mV. "vc 2 1000 -10" will update point3 with clock set
+- * as 1000Mhz and voltage drop by 10mV.
++ * string that contains "vc point clock voltage" to the file. The
++ * points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will
++ * update point1 with clock set as 300Mhz and voltage as
++ * 600mV. "vc 2 1000 1000" will update point3 with clock set
++ * as 1000Mhz and voltage 1000mV.
+ *
+ * - When you have edited all of the states as needed, write "c" (commit)
+ * to the file to commit your changes
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index afc61b5..4ab7288 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -1001,6 +1001,26 @@ static int vega20_od8_set_feature_id(
+ return 0;
+ }
+
++static int vega20_od8_get_gfx_clock_base_voltage(
++ struct pp_hwmgr *hwmgr,
++ uint32_t *voltage,
++ uint32_t freq)
++{
++ int ret = 0;
++
++ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
++ PPSMC_MSG_GetAVFSVoltageByDpm,
++ ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq));
++ PP_ASSERT_WITH_CODE(!ret,
++ "[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!",
++ return ret);
++
++ vega20_read_arg_from_smc(hwmgr, voltage);
++ *voltage = *voltage / VOLTAGE_SCALE;
++
++ return 0;
++}
++
+ static int vega20_od8_initialize_default_settings(
+ struct pp_hwmgr *hwmgr)
+ {
+@@ -1036,18 +1056,41 @@ static int vega20_od8_initialize_default_settings(
+ }
+
+ if (od8_settings->overdrive8_capabilities & OD8_GFXCLK_CURVE) {
++ od_table->GfxclkFreq1 = od_table->GfxclkFmin;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value =
+ od_table->GfxclkFreq1;
+- od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value =
+- od_table->GfxclkOffsetVolt1;
+- od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value =
+- od_table->GfxclkFreq2;
+- od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value =
+- od_table->GfxclkOffsetVolt2;
++
++ od_table->GfxclkFreq3 = od_table->GfxclkFmax;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value =
+ od_table->GfxclkFreq3;
+- od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value =
+- od_table->GfxclkOffsetVolt3;
++
++ od_table->GfxclkFreq2 = (od_table->GfxclkFreq1 + od_table->GfxclkFreq3) / 2;
++ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value =
++ od_table->GfxclkFreq2;
++
++ PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr,
++ &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value),
++ od_table->GfxclkFreq1),
++ "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!",
++ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value = 0);
++ od_table->GfxclkVolt1 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value
++ * VOLTAGE_SCALE;
++
++ PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr,
++ &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value),
++ od_table->GfxclkFreq2),
++ "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!",
++ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value = 0);
++ od_table->GfxclkVolt2 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value
++ * VOLTAGE_SCALE;
++
++ PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr,
++ &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value),
++ od_table->GfxclkFreq3),
++ "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!",
++ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value = 0);
++ od_table->GfxclkVolt3 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value
++ * VOLTAGE_SCALE;
+ } else {
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value =
+ 0;
+@@ -1086,7 +1129,7 @@ static int vega20_od8_initialize_default_settings(
+
+ if (od8_settings->overdrive8_capabilities & OD8_FAN_SPEED_MIN)
+ od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value =
+- od_table->FanMinimumPwm;
++ od_table->FanMinimumPwm * data->smc_state_table.pp_table.FanMaximumRpm / 100;
+ else
+ od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value =
+ 0;
+@@ -1123,6 +1166,11 @@ static int vega20_od8_initialize_default_settings(
+ }
+ }
+
++ ret = vega20_copy_table_to_smc(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Failed to import over drive table!",
++ return ret);
++
+ return 0;
+ }
+
+@@ -1150,19 +1198,19 @@ static int vega20_od8_set_settings(
+ od_table.GfxclkFreq1 = (uint16_t)value;
+ break;
+ case OD8_SETTING_GFXCLK_VOLTAGE1:
+- od_table.GfxclkOffsetVolt1 = (uint16_t)value;
++ od_table.GfxclkVolt1 = (uint16_t)value;
+ break;
+ case OD8_SETTING_GFXCLK_FREQ2:
+ od_table.GfxclkFreq2 = (uint16_t)value;
+ break;
+ case OD8_SETTING_GFXCLK_VOLTAGE2:
+- od_table.GfxclkOffsetVolt2 = (uint16_t)value;
++ od_table.GfxclkVolt2 = (uint16_t)value;
+ break;
+ case OD8_SETTING_GFXCLK_FREQ3:
+ od_table.GfxclkFreq3 = (uint16_t)value;
+ break;
+ case OD8_SETTING_GFXCLK_VOLTAGE3:
+- od_table.GfxclkOffsetVolt3 = (uint16_t)value;
++ od_table.GfxclkVolt3 = (uint16_t)value;
+ break;
+ case OD8_SETTING_UCLK_FMAX:
+ od_table.UclkFmax = (uint16_t)value;
+@@ -2364,6 +2412,7 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
+ &(data->smc_state_table.overdrive_table);
+ struct pp_clock_levels_with_latency clocks;
+ int32_t input_index, input_clk, input_vol, i;
++ int od8_id;
+ int ret;
+
+ PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
+@@ -2480,37 +2529,38 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
+ return -EINVAL;
+ }
+
+- if (input_clk < od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value ||
+- input_clk > od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value) {
++ od8_id = OD8_SETTING_GFXCLK_FREQ1 + 2 * input_index;
++ if (input_clk < od8_settings[od8_id].min_value ||
++ input_clk > od8_settings[od8_id].max_value) {
+ pr_info("clock freq %d is not within allowed range [%d - %d]\n",
+ input_clk,
+- od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value,
+- od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value);
++ od8_settings[od8_id].min_value,
++ od8_settings[od8_id].max_value);
+ return -EINVAL;
+ }
+
+- /* TODO: suppose voltage1/2/3 has the same min/max value */
+- if (input_vol < od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value ||
+- input_vol > od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value) {
+- pr_info("clock voltage offset %d is not within allowed range [%d - %d]\n",
++ od8_id = OD8_SETTING_GFXCLK_VOLTAGE1 + 2 * input_index;
++ if (input_vol < od8_settings[od8_id].min_value ||
++ input_vol > od8_settings[od8_id].max_value) {
++ pr_info("clock voltage %d is not within allowed range [%d - %d]\n",
+ input_vol,
+- od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value,
+- od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value);
++ od8_settings[od8_id].min_value,
++ od8_settings[od8_id].max_value);
+ return -EINVAL;
+ }
+
+ switch (input_index) {
+ case 0:
+ od_table->GfxclkFreq1 = input_clk;
+- od_table->GfxclkOffsetVolt1 = input_vol;
++ od_table->GfxclkVolt1 = input_vol * VOLTAGE_SCALE;
+ break;
+ case 1:
+ od_table->GfxclkFreq2 = input_clk;
+- od_table->GfxclkOffsetVolt2 = input_vol;
++ od_table->GfxclkVolt2 = input_vol * VOLTAGE_SCALE;
+ break;
+ case 2:
+ od_table->GfxclkFreq3 = input_clk;
+- od_table->GfxclkOffsetVolt3 = input_vol;
++ od_table->GfxclkVolt3 = input_vol * VOLTAGE_SCALE;
+ break;
+ }
+ }
+@@ -2623,13 +2673,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
+ size = sprintf(buf, "%s:\n", "OD_VDDC_CURVE");
+ size += sprintf(buf + size, "0: %10uMhz %10dmV\n",
+ od_table->GfxclkFreq1,
+- od_table->GfxclkOffsetVolt1);
++ od_table->GfxclkVolt1 / VOLTAGE_SCALE);
+ size += sprintf(buf + size, "1: %10uMhz %10dmV\n",
+ od_table->GfxclkFreq2,
+- od_table->GfxclkOffsetVolt2);
++ od_table->GfxclkVolt2 / VOLTAGE_SCALE);
+ size += sprintf(buf + size, "2: %10uMhz %10dmV\n",
+ od_table->GfxclkFreq3,
+- od_table->GfxclkOffsetVolt3);
++ od_table->GfxclkVolt3 / VOLTAGE_SCALE);
+ }
+
+ break;
+@@ -2664,19 +2714,19 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
+ od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value,
+ od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value);
+- size += sprintf(buf + size, "VDDC_CURVE_VOFF[0]: %7dmV %11dmV\n",
++ size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
+ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value,
+ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
+ od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value,
+ od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value);
+- size += sprintf(buf + size, "VDDC_CURVE_VOFF[1]: %7dmV %11dmV\n",
++ size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
+ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value,
+ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
+ od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value,
+ od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value);
+- size += sprintf(buf + size, "VDDC_CURVE_VOFF[2]: %7dmV %11dmV\n",
++ size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
+ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value,
+ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value);
+ }
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+index 72e4f2a..b71a5f2 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+@@ -38,6 +38,10 @@
+ #define VG20_PSUEDO_NUM_DCEFCLK_DPM_LEVELS 8
+ #define VG20_PSUEDO_NUM_UCLK_DPM_LEVELS 4
+
++//OverDriver8 macro defs
++#define AVFS_CURVE 0
++#define OD8_HOTCURVE_TEMPERATURE 85
++
+ typedef uint32_t PP_Clock;
+
+ enum {
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+index 59e621e..71191de 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+@@ -569,11 +569,11 @@ typedef struct {
+ uint16_t GfxclkFmin;
+ uint16_t GfxclkFmax;
+ uint16_t GfxclkFreq1;
+- int16_t GfxclkOffsetVolt1;
++ uint16_t GfxclkVolt1;
+ uint16_t GfxclkFreq2;
+- int16_t GfxclkOffsetVolt2;
++ uint16_t GfxclkVolt2;
+ uint16_t GfxclkFreq3;
+- int16_t GfxclkOffsetVolt3;
++ uint16_t GfxclkVolt3;
+ uint16_t UclkFmax;
+ int16_t OverDrivePct;
+ uint16_t FanMaximumRpm;
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
+index 165429f..45d64a8 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
+@@ -117,7 +117,8 @@
+ #define PPSMC_MSG_PrepareMp1ForReset 0x59
+ #define PPSMC_MSG_PrepareMp1ForShutdown 0x5A
+ #define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x5D
+-#define PPSMC_Message_Count 0x5E
++#define PPSMC_MSG_GetAVFSVoltageByDpm 0x5F
++#define PPSMC_Message_Count 0x60
+
+ typedef uint32_t PPSMC_Result;
+ typedef uint32_t PPSMC_Msg;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5313-drm-amd-powerplay-retrieve-the-updated-clock-table-a.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5313-drm-amd-powerplay-retrieve-the-updated-clock-table-a.patch
new file mode 100644
index 00000000..705453ed
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5313-drm-amd-powerplay-retrieve-the-updated-clock-table-a.patch
@@ -0,0 +1,246 @@
+From b83aec9f317522fd260a36e242518d4cefd0b5e4 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 17 Sep 2018 14:59:54 +0800
+Subject: [PATCH 5313/5725] drm/amd/powerplay: retrieve the updated clock table
+ after OD
+
+With OD settings applied, the clock table will be updated accordingly.
+We need to retrieve the new clock tables then.
+
+Change-Id: Iad4e95d3f195a0217456d41e495730578209062b
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 114 ++++++++++++++++-----
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h | 2 +
+ 2 files changed, 90 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index 4ab7288..7dcfc79 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -514,6 +514,47 @@ static int vega20_setup_single_dpm_table(struct pp_hwmgr *hwmgr,
+ return ret;
+ }
+
++static int vega20_setup_gfxclk_dpm_table(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data =
++ (struct vega20_hwmgr *)(hwmgr->backend);
++ struct vega20_single_dpm_table *dpm_table;
++ int ret = 0;
++
++ dpm_table = &(data->dpm_table.gfx_table);
++ if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
++ ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!",
++ return ret);
++ } else {
++ dpm_table->count = 1;
++ dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100;
++ }
++
++ return ret;
++}
++
++static int vega20_setup_memclk_dpm_table(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data =
++ (struct vega20_hwmgr *)(hwmgr->backend);
++ struct vega20_single_dpm_table *dpm_table;
++ int ret = 0;
++
++ dpm_table = &(data->dpm_table.mem_table);
++ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
++ ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK);
++ PP_ASSERT_WITH_CODE(!ret,
++ "[SetupDefaultDpmTable] failed to get memclk dpm levels!",
++ return ret);
++ } else {
++ dpm_table->count = 1;
++ dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100;
++ }
++
++ return ret;
++}
+
+ /*
+ * This function is to initialize all DPM state tables
+@@ -547,28 +588,16 @@ static int vega20_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+
+ /* gfxclk */
+ dpm_table = &(data->dpm_table.gfx_table);
+- if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+- ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK);
+- PP_ASSERT_WITH_CODE(!ret,
+- "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!",
+- return ret);
+- } else {
+- dpm_table->count = 1;
+- dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100;
+- }
++ ret = vega20_setup_gfxclk_dpm_table(hwmgr);
++ if (ret)
++ return ret;
+ vega20_init_dpm_state(&(dpm_table->dpm_state));
+
+ /* memclk */
+ dpm_table = &(data->dpm_table.mem_table);
+- if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+- ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK);
+- PP_ASSERT_WITH_CODE(!ret,
+- "[SetupDefaultDpmTable] failed to get memclk dpm levels!",
+- return ret);
+- } else {
+- dpm_table->count = 1;
+- dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100;
+- }
++ ret = vega20_setup_memclk_dpm_table(hwmgr);
++ if (ret)
++ return ret;
+ vega20_init_dpm_state(&(dpm_table->dpm_state));
+
+ /* eclk */
+@@ -1181,6 +1210,9 @@ static int vega20_od8_set_settings(
+ {
+ OverDriveTable_t od_table;
+ int ret = 0;
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++ struct vega20_od8_single_setting *od8_settings =
++ data->od8_settings.od8_settings_array;
+
+ ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE);
+ PP_ASSERT_WITH_CODE(!ret,
+@@ -1192,6 +1224,10 @@ static int vega20_od8_set_settings(
+ od_table.GfxclkFmin = (uint16_t)value;
+ break;
+ case OD8_SETTING_GFXCLK_FMAX:
++ if (value < od8_settings[OD8_SETTING_GFXCLK_FMAX].min_value ||
++ value > od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value)
++ return -EINVAL;
++
+ od_table.GfxclkFmax = (uint16_t)value;
+ break;
+ case OD8_SETTING_GFXCLK_FREQ1:
+@@ -1213,6 +1249,9 @@ static int vega20_od8_set_settings(
+ od_table.GfxclkVolt3 = (uint16_t)value;
+ break;
+ case OD8_SETTING_UCLK_FMAX:
++ if (value < od8_settings[OD8_SETTING_UCLK_FMAX].min_value ||
++ value > od8_settings[OD8_SETTING_UCLK_FMAX].max_value)
++ return -EINVAL;
+ od_table.UclkFmax = (uint16_t)value;
+ break;
+ case OD8_SETTING_POWER_PERCENTAGE:
+@@ -1262,8 +1301,6 @@ static int vega20_set_sclk_od(
+ struct pp_hwmgr *hwmgr, uint32_t value)
+ {
+ struct vega20_hwmgr *data = hwmgr->backend;
+- struct vega20_single_dpm_table *sclk_table =
+- &(data->dpm_table.gfx_table);
+ struct vega20_single_dpm_table *golden_sclk_table =
+ &(data->golden_dpm_table.gfx_table);
+ uint32_t od_sclk;
+@@ -1278,8 +1315,8 @@ static int vega20_set_sclk_od(
+ "[SetSclkOD] failed to set od gfxclk!",
+ return ret);
+
+- /* refresh gfxclk table */
+- ret = vega20_setup_single_dpm_table(hwmgr, sclk_table, PPCLK_GFXCLK);
++ /* retrieve updated gfxclk table */
++ ret = vega20_setup_gfxclk_dpm_table(hwmgr);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetSclkOD] failed to refresh gfxclk table!",
+ return ret);
+@@ -1309,8 +1346,6 @@ static int vega20_set_mclk_od(
+ struct pp_hwmgr *hwmgr, uint32_t value)
+ {
+ struct vega20_hwmgr *data = hwmgr->backend;
+- struct vega20_single_dpm_table *mclk_table =
+- &(data->dpm_table.mem_table);
+ struct vega20_single_dpm_table *golden_mclk_table =
+ &(data->golden_dpm_table.mem_table);
+ uint32_t od_mclk;
+@@ -1325,8 +1360,8 @@ static int vega20_set_mclk_od(
+ "[SetMclkOD] failed to set od memclk!",
+ return ret);
+
+- /* refresh memclk table */
+- ret = vega20_setup_single_dpm_table(hwmgr, mclk_table, PPCLK_UCLK);
++ /* retrieve updated memclk table */
++ ret = vega20_setup_memclk_dpm_table(hwmgr);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetMclkOD] failed to refresh memclk table!",
+ return ret);
+@@ -2451,6 +2486,10 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
+ return -EINVAL;
+ }
+
++ if ((input_index == 0 && od_table->GfxclkFmin != input_clk) ||
++ (input_index == 1 && od_table->GfxclkFmax != input_clk))
++ data->gfxclk_overdrive = true;
++
+ if (input_index == 0)
+ od_table->GfxclkFmin = input_clk;
+ else
+@@ -2495,6 +2534,9 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
+ return -EINVAL;
+ }
+
++ if (input_index == 1 && od_table->UclkFmax != input_clk)
++ data->memclk_overdrive = true;
++
+ od_table->UclkFmax = input_clk;
+ }
+
+@@ -2567,6 +2609,9 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
+ break;
+
+ case PP_OD_RESTORE_DEFAULT_TABLE:
++ data->gfxclk_overdrive = false;
++ data->memclk_overdrive = false;
++
+ ret = vega20_copy_table_from_smc(hwmgr,
+ (uint8_t *)od_table,
+ TABLE_OVERDRIVE);
+@@ -2583,6 +2628,23 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
+ "Failed to import overdrive table!",
+ return ret);
+
++ /* retrieve updated gfxclk table */
++ if (data->gfxclk_overdrive) {
++ data->gfxclk_overdrive = false;
++
++ ret = vega20_setup_gfxclk_dpm_table(hwmgr);
++ if (ret)
++ return ret;
++ }
++
++ /* retrieve updated memclk table */
++ if (data->memclk_overdrive) {
++ data->memclk_overdrive = false;
++
++ ret = vega20_setup_memclk_dpm_table(hwmgr);
++ if (ret)
++ return ret;
++ }
+ break;
+
+ default:
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+index b71a5f2..56fe6a0 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+@@ -502,6 +502,8 @@ struct vega20_hwmgr {
+
+ /* ---- Overdrive next setting ---- */
+ struct vega20_odn_data odn_data;
++ bool gfxclk_overdrive;
++ bool memclk_overdrive;
+
+ /* ---- Overdrive8 Setting ---- */
+ struct vega20_od8_settings od8_settings;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5314-drm-amdgpu-stop-pipelining-VM-PDs-PTs-moves.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5314-drm-amdgpu-stop-pipelining-VM-PDs-PTs-moves.patch
new file mode 100644
index 00000000..4465954a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5314-drm-amdgpu-stop-pipelining-VM-PDs-PTs-moves.patch
@@ -0,0 +1,51 @@
+From a4119361491429e5f18414ea866e608e47a94ae3 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Tue, 11 Sep 2018 09:30:46 +0200
+Subject: [PATCH 5314/5725] drm/amdgpu: stop pipelining VM PDs/PTs moves
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+We are going to need this for recoverable page fault handling and it
+makes shadow handling during GPU reset much more easier.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Acked-by: Junwei Zhang <Jerry.Zhang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 6 +++++-
+ 2 files changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 18b87454..2466147 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -1410,7 +1410,7 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
+ {
+ WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
+ WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
+- !bo->pin_count);
++ !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
+ WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
+ WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+ !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 1654715..1220809 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -552,7 +552,11 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
+ if (r)
+ goto error;
+
+- r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
++ /* Always block for VM page tables before committing the new location */
++ if (bo->type == ttm_bo_type_kernel)
++ r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem);
++ else
++ r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
+ dma_fence_put(fence);
+ return r;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5315-drm-amdgpu-always-enable-shadow-BOs-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5315-drm-amdgpu-always-enable-shadow-BOs-v2.patch
new file mode 100644
index 00000000..84d2a85d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5315-drm-amdgpu-always-enable-shadow-BOs-v2.patch
@@ -0,0 +1,55 @@
+From 599fbd1a28b44196b63c65650421a405baa31135 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Tue, 11 Sep 2018 10:30:31 +0200
+Subject: [PATCH 5315/5725] drm/amdgpu: always enable shadow BOs v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Even when GPU recovery is disabled we could run into a manually
+triggered recovery.
+
+v2: keep accidental removed comments
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Acked-by: Emily Deng <Emily.Deng@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 14 +-------------
+ 1 file changed, 1 insertion(+), 13 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 2466147..68c3b94 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -51,18 +51,6 @@
+ *
+ */
+
+-static bool amdgpu_bo_need_backup(struct amdgpu_device *adev)
+-{
+- if (adev->flags & AMD_IS_APU)
+- return false;
+-
+- if (amdgpu_gpu_recovery == 0 ||
+- (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))
+- return false;
+-
+- return true;
+-}
+-
+ /**
+ * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting
+ *
+@@ -643,7 +631,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
+ if (r)
+ return r;
+
+- if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_bo_need_backup(adev)) {
++ if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) {
+ if (!bp->resv)
+ WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
+ NULL));
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5316-drm-amdgpu-shadow-BOs-don-t-need-any-alignment.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5316-drm-amdgpu-shadow-BOs-don-t-need-any-alignment.patch
new file mode 100644
index 00000000..4c800552
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5316-drm-amdgpu-shadow-BOs-don-t-need-any-alignment.patch
@@ -0,0 +1,50 @@
+From 9db6d8c022bd1acadb16f663c13dc2cd883508c0 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Tue, 11 Sep 2018 10:31:54 +0200
+Subject: [PATCH 5316/5725] drm/amdgpu: shadow BOs don't need any alignment
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+They aren't directly used by the hardware.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 68c3b94..ae4f267 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -575,7 +575,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
+ }
+
+ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
+- unsigned long size, int byte_align,
++ unsigned long size,
+ struct amdgpu_bo *bo)
+ {
+ struct amdgpu_bo_param bp;
+@@ -586,7 +586,6 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
+
+ memset(&bp, 0, sizeof(bp));
+ bp.size = size;
+- bp.byte_align = byte_align;
+ bp.domain = AMDGPU_GEM_DOMAIN_GTT;
+ bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
+ AMDGPU_GEM_CREATE_SHADOW;
+@@ -636,7 +635,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
+ WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
+ NULL));
+
+- r = amdgpu_bo_create_shadow(adev, bp->size, bp->byte_align, (*bo_ptr));
++ r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr);
+
+ if (!bp->resv)
+ reservation_object_unlock((*bo_ptr)->tbo.resv);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5317-drm-amdgpu-always-recover-VRAM-during-GPU-recovery.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5317-drm-amdgpu-always-recover-VRAM-during-GPU-recovery.patch
new file mode 100644
index 00000000..ff16685d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5317-drm-amdgpu-always-recover-VRAM-during-GPU-recovery.patch
@@ -0,0 +1,63 @@
+From 514900819c27ef1922a9b77d280c805e32342385 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Tue, 11 Sep 2018 10:36:16 +0200
+Subject: [PATCH 5317/5725] drm/amdgpu: always recover VRAM during GPU recovery
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+It shouldn't add much overhead and we should make sure that critical
+VRAM content is always restored.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Acked-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 4701240..c6ac3f5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3051,7 +3051,7 @@ static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
+ }
+
+ /**
+- * amdgpu_device_handle_vram_lost - Handle the loss of VRAM contents
++ * amdgpu_device_recover_vram - Recover some VRAM contents
+ *
+ * @adev: amdgpu_device pointer
+ *
+@@ -3060,7 +3060,7 @@ static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
+ * the contents of VRAM might be lost.
+ * Returns 0 on success, 1 on failure.
+ */
+-static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
++static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
+ {
+ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+ struct amdgpu_bo *bo, *tmp;
+@@ -3191,8 +3191,8 @@ static int amdgpu_device_reset(struct amdgpu_device *adev)
+
+ amdgpu_amdkfd_post_reset(adev);
+
+- if (!r && ((need_full_reset && !(adev->flags & AMD_IS_APU)) || vram_lost))
+- r = amdgpu_device_handle_vram_lost(adev);
++ if (!r)
++ r = amdgpu_device_recover_vram(adev);
+
+ return r;
+ }
+@@ -3241,7 +3241,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
+ amdgpu_virt_release_full_gpu(adev, true);
+ if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
+ atomic_inc(&adev->vram_lost_counter);
+- r = amdgpu_device_handle_vram_lost(adev);
++ r = amdgpu_device_recover_vram(adev);
+ }
+
+ return r;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5318-drm-amdgpu-fix-shadow-BO-restoring.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5318-drm-amdgpu-fix-shadow-BO-restoring.patch
new file mode 100644
index 00000000..b72cbe95
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5318-drm-amdgpu-fix-shadow-BO-restoring.patch
@@ -0,0 +1,266 @@
+From ac2dc7d9d3af2271679c0b41537eca2b1f8a512d Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Tue, 11 Sep 2018 11:50:57 +0200
+Subject: [PATCH 5318/5725] drm/amdgpu: fix shadow BO restoring
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Don't grab the reservation lock any more and simplify the handling quite
+a bit.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 109 ++++++++---------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 46 ++++--------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 8 +--
+ 3 files changed, 43 insertions(+), 120 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index c6ac3f5..13efee8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3003,54 +3003,6 @@ static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
+ }
+
+ /**
+- * amdgpu_device_recover_vram_from_shadow - restore shadowed VRAM buffers
+- *
+- * @adev: amdgpu_device pointer
+- * @ring: amdgpu_ring for the engine handling the buffer operations
+- * @bo: amdgpu_bo buffer whose shadow is being restored
+- * @fence: dma_fence associated with the operation
+- *
+- * Restores the VRAM buffer contents from the shadow in GTT. Used to
+- * restore things like GPUVM page tables after a GPU reset where
+- * the contents of VRAM might be lost.
+- * Returns 0 on success, negative error code on failure.
+- */
+-static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
+- struct amdgpu_ring *ring,
+- struct amdgpu_bo *bo,
+- struct dma_fence **fence)
+-{
+- uint32_t domain;
+- int r;
+-
+- if (!bo->shadow)
+- return 0;
+-
+- r = amdgpu_bo_reserve(bo, true);
+- if (r)
+- return r;
+- domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+- /* if bo has been evicted, then no need to recover */
+- if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+- r = amdgpu_bo_validate(bo->shadow);
+- if (r) {
+- DRM_ERROR("bo validate failed!\n");
+- goto err;
+- }
+-
+- r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
+- NULL, fence, true);
+- if (r) {
+- DRM_ERROR("recover page table failed!\n");
+- goto err;
+- }
+- }
+-err:
+- amdgpu_bo_unreserve(bo);
+- return r;
+-}
+-
+-/**
+ * amdgpu_device_recover_vram - Recover some VRAM contents
+ *
+ * @adev: amdgpu_device pointer
+@@ -3058,16 +3010,15 @@ static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
+ * Restores the contents of VRAM buffers from the shadows in GTT. Used to
+ * restore things like GPUVM page tables after a GPU reset where
+ * the contents of VRAM might be lost.
+- * Returns 0 on success, 1 on failure.
++ *
++ * Returns:
++ * 0 on success, negative error code on failure.
+ */
+ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
+ {
+- struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+- struct amdgpu_bo *bo, *tmp;
+ struct dma_fence *fence = NULL, *next = NULL;
+- long r = 1;
+- int i = 0;
+- long tmo;
++ struct amdgpu_bo *shadow;
++ long r = 1, tmo;
+
+ if (amdgpu_sriov_runtime(adev))
+ tmo = msecs_to_jiffies(8000);
+@@ -3076,44 +3027,40 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
+
+ DRM_INFO("recover vram bo from shadow start\n");
+ mutex_lock(&adev->shadow_list_lock);
+- list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
+- next = NULL;
+- amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
++ list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
++
++ /* No need to recover an evicted BO */
++ if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
++ shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
++ continue;
++
++ r = amdgpu_bo_restore_shadow(shadow, &next);
++ if (r)
++ break;
++
+ if (fence) {
+ r = dma_fence_wait_timeout(fence, false, tmo);
+- if (r == 0)
+- pr_err("wait fence %p[%d] timeout\n", fence, i);
+- else if (r < 0)
+- pr_err("wait fence %p[%d] interrupted\n", fence, i);
+- if (r < 1) {
+- dma_fence_put(fence);
+- fence = next;
++ dma_fence_put(fence);
++ fence = next;
++ if (r <= 0)
+ break;
+- }
+- i++;
++ } else {
++ fence = next;
+ }
+-
+- dma_fence_put(fence);
+- fence = next;
+ }
+ mutex_unlock(&adev->shadow_list_lock);
+
+- if (fence) {
+- r = dma_fence_wait_timeout(fence, false, tmo);
+- if (r == 0)
+- pr_err("wait fence %p[%d] timeout\n", fence, i);
+- else if (r < 0)
+- pr_err("wait fence %p[%d] interrupted\n", fence, i);
+-
+- }
++ if (fence)
++ tmo = dma_fence_wait_timeout(fence, false, tmo);
+ dma_fence_put(fence);
+
+- if (r > 0)
+- DRM_INFO("recover vram bo from shadow done\n");
+- else
++ if (r <= 0 || tmo <= 0) {
+ DRM_ERROR("recover vram bo from shadow failed\n");
++ return -EIO;
++ }
+
+- return (r > 0) ? 0 : 1;
++ DRM_INFO("recover vram bo from shadow done\n");
++ return 0;
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index ae4f267..e05f8c9 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -597,7 +597,7 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
+ if (!r) {
+ bo->shadow->parent = amdgpu_bo_ref(bo);
+ mutex_lock(&adev->shadow_list_lock);
+- list_add_tail(&bo->shadow_list, &adev->shadow_list);
++ list_add_tail(&bo->shadow->shadow_list, &adev->shadow_list);
+ mutex_unlock(&adev->shadow_list_lock);
+ }
+
+@@ -729,13 +729,10 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
+ }
+
+ /**
+- * amdgpu_bo_restore_from_shadow - restore an &amdgpu_bo buffer object
+- * @adev: amdgpu device object
+- * @ring: amdgpu_ring for the engine handling the buffer operations
+- * @bo: &amdgpu_bo buffer to be restored
+- * @resv: reservation object with embedded fence
++ * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
++ *
++ * @shadow: &amdgpu_bo shadow to be restored
+ * @fence: dma_fence associated with the operation
+- * @direct: whether to submit the job directly
+ *
+ * Copies a buffer object's shadow content back to the object.
+ * This is used for recovering a buffer from its shadow in case of a gpu
+@@ -744,36 +741,19 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
+-int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
+- struct amdgpu_ring *ring,
+- struct amdgpu_bo *bo,
+- struct reservation_object *resv,
+- struct dma_fence **fence,
+- bool direct)
++int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
+
+ {
+- struct amdgpu_bo *shadow = bo->shadow;
+- uint64_t bo_addr, shadow_addr;
+- int r;
+-
+- if (!shadow)
+- return -EINVAL;
+-
+- bo_addr = amdgpu_bo_gpu_offset(bo);
+- shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
+-
+- r = reservation_object_reserve_shared(bo->tbo.resv);
+- if (r)
+- goto err;
++ struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev);
++ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
++ uint64_t shadow_addr, parent_addr;
+
+- r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
+- amdgpu_bo_size(bo), resv, fence,
+- direct, false);
+- if (!r)
+- amdgpu_bo_fence(bo, *fence, true);
++ shadow_addr = amdgpu_bo_gpu_offset(shadow);
++ parent_addr = amdgpu_bo_gpu_offset(shadow->parent);
+
+-err:
+- return r;
++ return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
++ amdgpu_bo_size(shadow), NULL, fence,
++ true, false);
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+index bd953bf..3674265 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+@@ -276,12 +276,8 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
+ struct reservation_object *resv,
+ struct dma_fence **fence, bool direct);
+ int amdgpu_bo_validate(struct amdgpu_bo *bo);
+-int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
+- struct amdgpu_ring *ring,
+- struct amdgpu_bo *bo,
+- struct reservation_object *resv,
+- struct dma_fence **fence,
+- bool direct);
++int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
++ struct dma_fence **fence);
+ uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
+ uint32_t domain);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5319-drm-amdgpu-fix-up-GDS-GWS-OA-shifting.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5319-drm-amdgpu-fix-up-GDS-GWS-OA-shifting.patch
new file mode 100644
index 00000000..001880bb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5319-drm-amdgpu-fix-up-GDS-GWS-OA-shifting.patch
@@ -0,0 +1,252 @@
+From be50641f155e86f42c15ba1b3e77533e514316a4 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 14 Sep 2018 16:06:31 +0200
+Subject: [PATCH 5319/5725] drm/amdgpu: fix up GDS/GWS/OA shifting
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+That only worked by pure coincident. Completely remove the shifting and
+always apply correct PAGE_SHIFT.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 12 ++++++------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h | 7 -------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 12 +++---------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 14 +++++++-------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 6 +++++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 15 +++------------
+ drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 9 ---------
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 9 ---------
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 12 +-----------
+ 9 files changed, 25 insertions(+), 71 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index f7fa60b..a1ad99e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -714,16 +714,16 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+ e->bo_va = amdgpu_vm_bo_find(vm, e->robj);
+
+ if (gds) {
+- p->job->gds_base = amdgpu_bo_gpu_offset(gds);
+- p->job->gds_size = amdgpu_bo_size(gds);
++ p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
++ p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
+ }
+ if (gws) {
+- p->job->gws_base = amdgpu_bo_gpu_offset(gws);
+- p->job->gws_size = amdgpu_bo_size(gws);
++ p->job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
++ p->job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
+ }
+ if (oa) {
+- p->job->oa_base = amdgpu_bo_gpu_offset(oa);
+- p->job->oa_size = amdgpu_bo_size(oa);
++ p->job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
++ p->job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
+ }
+
+ if (!r && p->uf_entry.robj) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
+index e73728d..ecbcefe 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
+@@ -24,13 +24,6 @@
+ #ifndef __AMDGPU_GDS_H__
+ #define __AMDGPU_GDS_H__
+
+-/* Because TTM request that alloacted buffer should be PAGE_SIZE aligned,
+- * we should report GDS/GWS/OA size as PAGE_SIZE aligned
+- * */
+-#define AMDGPU_GDS_SHIFT 2
+-#define AMDGPU_GWS_SHIFT PAGE_SHIFT
+-#define AMDGPU_OA_SHIFT PAGE_SHIFT
+-
+ struct amdgpu_ring;
+ struct amdgpu_bo;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+index 6171e03..3cd2b29 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+@@ -276,16 +276,10 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
+ return -EINVAL;
+ }
+ flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+- if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
+- size = size << AMDGPU_GDS_SHIFT;
+- else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
+- size = size << AMDGPU_GWS_SHIFT;
+- else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
+- size = size << AMDGPU_OA_SHIFT;
+- else
+- return -EINVAL;
++ /* GDS allocations must be DW aligned */
++ if (args->in.domains & AMDGPU_GEM_DOMAIN_GDS)
++ size = ALIGN(size, 4);
+ }
+- size = roundup(size, PAGE_SIZE);
+
+ if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
+ r = amdgpu_bo_reserve(vm->root.base.bo, false);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index a47f456..37b6416 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -524,13 +524,13 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ struct drm_amdgpu_info_gds gds_info;
+
+ memset(&gds_info, 0, sizeof(gds_info));
+- gds_info.gds_gfx_partition_size = adev->gds.mem.gfx_partition_size >> AMDGPU_GDS_SHIFT;
+- gds_info.compute_partition_size = adev->gds.mem.cs_partition_size >> AMDGPU_GDS_SHIFT;
+- gds_info.gds_total_size = adev->gds.mem.total_size >> AMDGPU_GDS_SHIFT;
+- gds_info.gws_per_gfx_partition = adev->gds.gws.gfx_partition_size >> AMDGPU_GWS_SHIFT;
+- gds_info.gws_per_compute_partition = adev->gds.gws.cs_partition_size >> AMDGPU_GWS_SHIFT;
+- gds_info.oa_per_gfx_partition = adev->gds.oa.gfx_partition_size >> AMDGPU_OA_SHIFT;
+- gds_info.oa_per_compute_partition = adev->gds.oa.cs_partition_size >> AMDGPU_OA_SHIFT;
++ gds_info.gds_gfx_partition_size = adev->gds.mem.gfx_partition_size;
++ gds_info.compute_partition_size = adev->gds.mem.cs_partition_size;
++ gds_info.gds_total_size = adev->gds.mem.total_size;
++ gds_info.gws_per_gfx_partition = adev->gds.gws.gfx_partition_size;
++ gds_info.gws_per_compute_partition = adev->gds.gws.cs_partition_size;
++ gds_info.oa_per_gfx_partition = adev->gds.oa.gfx_partition_size;
++ gds_info.oa_per_compute_partition = adev->gds.oa.cs_partition_size;
+ return copy_to_user(out, &gds_info,
+ min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index e05f8c9..a43ec44 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -453,7 +453,11 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
+ int r;
+
+ page_align = roundup(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
+- size = ALIGN(size, PAGE_SIZE);
++ if (bp->domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS |
++ AMDGPU_GEM_DOMAIN_OA))
++ size <<= PAGE_SHIFT;
++ else
++ size = ALIGN(size, PAGE_SIZE);
+
+ if (!amdgpu_bo_validate_size(adev, size, bp->domain))
+ return -ENOMEM;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 1220809..1d79941 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -2031,19 +2031,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
+ amdgpu_ssg_init(adev);
+
+ /* Initialize various on-chip memory pools */
+- adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
+- adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
+- adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT;
+- adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT;
+- adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT;
+- adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT;
+- adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT;
+- adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT;
+- adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT;
+ /* GDS Memory */
+ if (adev->gds.mem.total_size) {
+ r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
+- adev->gds.mem.total_size >> PAGE_SHIFT);
++ adev->gds.mem.total_size);
+ if (r) {
+ DRM_ERROR("Failed initializing GDS heap.\n");
+ return r;
+@@ -2053,7 +2044,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
+ /* GWS */
+ if (adev->gds.gws.total_size) {
+ r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
+- adev->gds.gws.total_size >> PAGE_SHIFT);
++ adev->gds.gws.total_size);
+ if (r) {
+ DRM_ERROR("Failed initializing gws heap.\n");
+ return r;
+@@ -2063,7 +2054,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
+ /* OA */
+ if (adev->gds.oa.total_size) {
+ r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
+- adev->gds.oa.total_size >> PAGE_SHIFT);
++ adev->gds.oa.total_size);
+ if (r) {
+ DRM_ERROR("Failed initializing oa heap.\n");
+ return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+index e3d5714..c87cc9a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+@@ -4190,15 +4190,6 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
+ uint32_t gws_base, uint32_t gws_size,
+ uint32_t oa_base, uint32_t oa_size)
+ {
+- gds_base = gds_base >> AMDGPU_GDS_SHIFT;
+- gds_size = gds_size >> AMDGPU_GDS_SHIFT;
+-
+- gws_base = gws_base >> AMDGPU_GWS_SHIFT;
+- gws_size = gws_size >> AMDGPU_GWS_SHIFT;
+-
+- oa_base = oa_base >> AMDGPU_OA_SHIFT;
+- oa_size = oa_size >> AMDGPU_OA_SHIFT;
+-
+ /* GDS Base */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 1d7034c..5e775c0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -5395,15 +5395,6 @@ static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
+ uint32_t gws_base, uint32_t gws_size,
+ uint32_t oa_base, uint32_t oa_size)
+ {
+- gds_base = gds_base >> AMDGPU_GDS_SHIFT;
+- gds_size = gds_size >> AMDGPU_GDS_SHIFT;
+-
+- gws_base = gws_base >> AMDGPU_GWS_SHIFT;
+- gws_size = gws_size >> AMDGPU_GWS_SHIFT;
+-
+- oa_base = oa_base >> AMDGPU_OA_SHIFT;
+- oa_size = oa_size >> AMDGPU_OA_SHIFT;
+-
+ /* GDS Base */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index b9918ea..06ac237 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1528,8 +1528,7 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
+ gfx_v9_0_write_data_to_reg(ring, 0, false,
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
+ (adev->gds.mem.total_size +
+- adev->gfx.ngg.gds_reserve_size) >>
+- AMDGPU_GDS_SHIFT);
++ adev->gfx.ngg.gds_reserve_size));
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
+ amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
+@@ -3477,15 +3476,6 @@ static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
+ {
+ struct amdgpu_device *adev = ring->adev;
+
+- gds_base = gds_base >> AMDGPU_GDS_SHIFT;
+- gds_size = gds_size >> AMDGPU_GDS_SHIFT;
+-
+- gws_base = gws_base >> AMDGPU_GWS_SHIFT;
+- gws_size = gws_size >> AMDGPU_GWS_SHIFT;
+-
+- oa_base = oa_base >> AMDGPU_OA_SHIFT;
+- oa_size = oa_size >> AMDGPU_OA_SHIFT;
+-
+ /* GDS Base */
+ gfx_v9_0_write_data_to_reg(ring, 0, false,
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5320-drm-amdgpu-initialize-GDS-GWS-OA-domains-even-when-t.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5320-drm-amdgpu-initialize-GDS-GWS-OA-domains-even-when-t.patch
new file mode 100644
index 00000000..84209e25
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5320-drm-amdgpu-initialize-GDS-GWS-OA-domains-even-when-t.patch
@@ -0,0 +1,91 @@
+From 6017a0c490126e47b42b285cf0d02036910aba38 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 14 Sep 2018 20:59:27 +0200
+Subject: [PATCH 5320/5725] drm/amdgpu: initialize GDS/GWS/OA domains even when
+ they are zero sized
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Stops crashing on SI.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 48 +++++++++++++--------------------
+ 1 file changed, 18 insertions(+), 30 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 1d79941..e8e36df 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -2031,34 +2031,25 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
+ amdgpu_ssg_init(adev);
+
+ /* Initialize various on-chip memory pools */
+- /* GDS Memory */
+- if (adev->gds.mem.total_size) {
+- r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
+- adev->gds.mem.total_size);
+- if (r) {
+- DRM_ERROR("Failed initializing GDS heap.\n");
+- return r;
+- }
++ r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
++ adev->gds.mem.total_size);
++ if (r) {
++ DRM_ERROR("Failed initializing GDS heap.\n");
++ return r;
+ }
+
+- /* GWS */
+- if (adev->gds.gws.total_size) {
+- r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
+- adev->gds.gws.total_size);
+- if (r) {
+- DRM_ERROR("Failed initializing gws heap.\n");
+- return r;
+- }
++ r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
++ adev->gds.gws.total_size);
++ if (r) {
++ DRM_ERROR("Failed initializing gws heap.\n");
++ return r;
+ }
+
+- /* OA */
+- if (adev->gds.oa.total_size) {
+- r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
+- adev->gds.oa.total_size);
+- if (r) {
+- DRM_ERROR("Failed initializing oa heap.\n");
+- return r;
+- }
++ r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
++ adev->gds.oa.total_size);
++ if (r) {
++ DRM_ERROR("Failed initializing oa heap.\n");
++ return r;
+ }
+
+ /* Register debugfs entries for amdgpu_ttm */
+@@ -2098,12 +2089,9 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
+
+ ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
+ ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
+- if (adev->gds.mem.total_size)
+- ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
+- if (adev->gds.gws.total_size)
+- ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
+- if (adev->gds.oa.total_size)
+- ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
++ ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
++ ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
++ ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
+ ttm_bo_device_release(&adev->mman.bdev);
+ amdgpu_ttm_global_fini(adev);
+ adev->mman.initialized = false;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5321-drm-amdgpu-move-reserving-GDS-GWS-OA-into-common-cod.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5321-drm-amdgpu-move-reserving-GDS-GWS-OA-into-common-cod.patch
new file mode 100644
index 00000000..9cb3ab64
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5321-drm-amdgpu-move-reserving-GDS-GWS-OA-into-common-cod.patch
@@ -0,0 +1,156 @@
+From 1938eb8e62db762120655629eee383e5e0e9d20c Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 14 Sep 2018 21:08:57 +0200
+Subject: [PATCH 5321/5725] drm/amdgpu: move reserving GDS/GWS/OA into common
+ code
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+We don't need that in the per ASIC code.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 18 ++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 19 -------------------
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 19 -------------------
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 19 -------------------
+ 4 files changed, 18 insertions(+), 57 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index e8e36df..04bc197 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -2038,6 +2038,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
+ return r;
+ }
+
++ r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
++ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
++ &adev->gds.gds_gfx_bo, NULL, NULL);
++ if (r)
++ return r;
++
+ r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
+ adev->gds.gws.total_size);
+ if (r) {
+@@ -2045,6 +2051,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
+ return r;
+ }
+
++ r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
++ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
++ &adev->gds.gws_gfx_bo, NULL, NULL);
++ if (r)
++ return r;
++
+ r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
+ adev->gds.oa.total_size);
+ if (r) {
+@@ -2052,6 +2064,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
+ return r;
+ }
+
++ r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
++ PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
++ &adev->gds.oa_gfx_bo, NULL, NULL);
++ if (r)
++ return r;
++
+ /* Register debugfs entries for amdgpu_ttm */
+ r = amdgpu_ttm_debugfs_init(adev);
+ if (r) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+index c87cc9a..01ca681 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+@@ -4602,25 +4602,6 @@ static int gfx_v7_0_sw_init(void *handle)
+ }
+ }
+
+- /* reserve GDS, GWS and OA resource for gfx */
+- r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
+- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
+- &adev->gds.gds_gfx_bo, NULL, NULL);
+- if (r)
+- return r;
+-
+- r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
+- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
+- &adev->gds.gws_gfx_bo, NULL, NULL);
+- if (r)
+- return r;
+-
+- r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
+- PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
+- &adev->gds.oa_gfx_bo, NULL, NULL);
+- if (r)
+- return r;
+-
+ adev->gfx.ce_ram_size = 0x8000;
+
+ gfx_v7_0_gpu_early_init(adev);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 5e775c0..096347a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -2160,25 +2160,6 @@ static int gfx_v8_0_sw_init(void *handle)
+ if (r)
+ return r;
+
+- /* reserve GDS, GWS and OA resource for gfx */
+- r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
+- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
+- &adev->gds.gds_gfx_bo, NULL, NULL);
+- if (r)
+- return r;
+-
+- r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
+- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
+- &adev->gds.gws_gfx_bo, NULL, NULL);
+- if (r)
+- return r;
+-
+- r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
+- PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
+- &adev->gds.oa_gfx_bo, NULL, NULL);
+- if (r)
+- return r;
+-
+ adev->gfx.ce_ram_size = 0x8000;
+
+ r = gfx_v8_0_gpu_early_init(adev);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 06ac237..27a5ada 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1701,25 +1701,6 @@ static int gfx_v9_0_sw_init(void *handle)
+ if (r)
+ return r;
+
+- /* reserve GDS, GWS and OA resource for gfx */
+- r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
+- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
+- &adev->gds.gds_gfx_bo, NULL, NULL);
+- if (r)
+- return r;
+-
+- r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
+- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
+- &adev->gds.gws_gfx_bo, NULL, NULL);
+- if (r)
+- return r;
+-
+- r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
+- PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
+- &adev->gds.oa_gfx_bo, NULL, NULL);
+- if (r)
+- return r;
+-
+ adev->gfx.ce_ram_size = 0x8000;
+
+ r = gfx_v9_0_gpu_early_init(adev);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5322-drm-amd-Add-ucode-DMCU-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5322-drm-amd-Add-ucode-DMCU-support.patch
new file mode 100644
index 00000000..75f95a02
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5322-drm-amd-Add-ucode-DMCU-support.patch
@@ -0,0 +1,115 @@
+From b2d472d71927342b9529a64fd8b92c8c8b0f67bd Mon Sep 17 00:00:00 2001
+From: David Francis <David.Francis@amd.com>
+Date: Tue, 11 Sep 2018 13:41:01 -0400
+Subject: [PATCH 5322/5725] drm/amd: Add ucode DMCU support
+
+DMCU (Display Microcontroller Unit) is a GPU chip involved in
+eDP features like Adaptive Backlight Modulation and Panel Self
+Refresh.
+
+DMCU has two pieces of firmware: the ERAM and the interrupt
+vectors, which must be loaded seperately.
+
+To this end, the DMCU firmware has a custom header and parsing
+logic similar to MEC, to extract the two ucodes from a single
+struct firmware.
+
+Signed-off-by: David Francis <David.Francis@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 21 +++++++++++++++++++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h | 10 ++++++++++
+ 2 files changed, 29 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+index 8777dad..59fe359 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+@@ -322,6 +322,7 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
+ {
+ const struct common_firmware_header *header = NULL;
+ const struct gfx_firmware_header_v1_0 *cp_hdr = NULL;
++ const struct dmcu_firmware_header_v1_0 *dmcu_hdr = NULL;
+
+ if (NULL == ucode->fw)
+ return 0;
+@@ -333,8 +334,8 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
+ return 0;
+
+ header = (const struct common_firmware_header *)ucode->fw->data;
+-
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
++ dmcu_hdr = (const struct dmcu_firmware_header_v1_0 *)ucode->fw->data;
+
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP ||
+ (ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1 &&
+@@ -343,7 +344,9 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
+ ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2_JT &&
+ ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL &&
+ ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM &&
+- ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) {
++ ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM &&
++ ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_ERAM &&
++ ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_INTV)) {
+ ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes);
+
+ memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
+@@ -365,6 +368,20 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
+ le32_to_cpu(header->ucode_array_offset_bytes) +
+ le32_to_cpu(cp_hdr->jt_offset) * 4),
+ ucode->ucode_size);
++ } else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCU_ERAM) {
++ ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) -
++ le32_to_cpu(dmcu_hdr->intv_size_bytes);
++
++ memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
++ le32_to_cpu(header->ucode_array_offset_bytes)),
++ ucode->ucode_size);
++ } else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCU_INTV) {
++ ucode->ucode_size = le32_to_cpu(dmcu_hdr->intv_size_bytes);
++
++ memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
++ le32_to_cpu(header->ucode_array_offset_bytes) +
++ le32_to_cpu(dmcu_hdr->intv_offset_bytes)),
++ ucode->ucode_size);
+ } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL) {
+ ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes;
+ memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_cntl,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+index b2f820c..15791af 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+@@ -157,6 +157,13 @@ struct gpu_info_firmware_header_v1_0 {
+ uint16_t version_minor; /* version */
+ };
+
++/* version_major=1, version_minor=0 */
++struct dmcu_firmware_header_v1_0 {
++ struct common_firmware_header header;
++ uint32_t intv_offset_bytes; /* interrupt vectors offset from end of header, in bytes */
++ uint32_t intv_size_bytes; /* size of interrupt vectors, in bytes */
++};
++
+ /* header is fixed size */
+ union amdgpu_firmware_header {
+ struct common_firmware_header common;
+@@ -170,6 +177,7 @@ union amdgpu_firmware_header {
+ struct sdma_firmware_header_v1_0 sdma;
+ struct sdma_firmware_header_v1_1 sdma_v1_1;
+ struct gpu_info_firmware_header_v1_0 gpu_info;
++ struct dmcu_firmware_header_v1_0 dmcu;
+ uint8_t raw[0x100];
+ };
+
+@@ -196,6 +204,8 @@ enum AMDGPU_UCODE_ID {
+ AMDGPU_UCODE_ID_UVD1,
+ AMDGPU_UCODE_ID_VCE,
+ AMDGPU_UCODE_ID_VCN,
++ AMDGPU_UCODE_ID_DMCU_ERAM,
++ AMDGPU_UCODE_ID_DMCU_INTV,
+ AMDGPU_UCODE_ID_MAXIMUM,
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5323-drm-amd-Add-PSP-DMCU-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5323-drm-amd-Add-PSP-DMCU-support.patch
new file mode 100644
index 00000000..f47f7a7b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5323-drm-amd-Add-PSP-DMCU-support.patch
@@ -0,0 +1,39 @@
+From 805b1f56cee0a4b0a0de324063a718566608ef50 Mon Sep 17 00:00:00 2001
+From: David Francis <David.Francis@amd.com>
+Date: Tue, 11 Sep 2018 13:46:41 -0400
+Subject: [PATCH 5323/5725] drm/amd: Add PSP DMCU support
+
+DMCU (Display Microcontroller Unit) is a GPU chip involved in
+eDP features like Adaptive Backlight Modulation and Panel Self
+Refresh.
+
+PSP is already equipped to handle DMCU firmware loading, all
+that is needed is to translate between the new DMCU ucode ID and
+the equivalent psp_gfx_fw_type.
+
+Signed-off-by: David Francis <David.Francis@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/psp_v10_0.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+index 45f9322..295c220 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+@@ -93,6 +93,12 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *
+ case AMDGPU_UCODE_ID_VCN:
+ *type = GFX_FW_TYPE_VCN;
+ break;
++ case AMDGPU_UCODE_ID_DMCU_ERAM:
++ *type = GFX_FW_TYPE_DMCU_ERAM;
++ break;
++ case AMDGPU_UCODE_ID_DMCU_INTV:
++ *type = GFX_FW_TYPE_DMCU_ISR;
++ break;
+ case AMDGPU_UCODE_ID_MAXIMUM:
+ default:
+ return -EINVAL;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5324-drm-amd-Add-DM-DMCU-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5324-drm-amd-Add-DM-DMCU-support.patch
new file mode 100644
index 00000000..5b5730f5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5324-drm-amd-Add-DM-DMCU-support.patch
@@ -0,0 +1,172 @@
+From 407b52c8b48feb8b02f1d97e273a946734b7268c Mon Sep 17 00:00:00 2001
+From: David Francis <David.Francis@amd.com>
+Date: Tue, 11 Sep 2018 13:49:49 -0400
+Subject: [PATCH 5324/5725] drm/amd: Add DM DMCU support
+
+DMCU (Display Microcontroller Unit) is a GPU chip involved in
+eDP features like Adaptive Backlight Modulation and Panel Self
+Refresh.
+
+DC is already fully equipped to initialize DMCU as long as the
+firmware is loaded.
+
+At the moment only the raven firmware is available.
+
+A single .bin file is loaded by the kernel's loading mechanism
+and split into two ucodes according to the header.
+
+DMCU is optional, so if the firmware is not found, no error or
+warning is raised.
+
+Signed-off-by: David Francis <David.Francis@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 91 ++++++++++++++++++++++-
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 2 +
+ 2 files changed, 92 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index f6afa4e..3954196 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -30,6 +30,7 @@
+ #include "vid.h"
+ #include "amdgpu.h"
+ #include "amdgpu_display.h"
++#include "amdgpu_ucode.h"
+ #include "atom.h"
+ #include "amdgpu_dm.h"
+ #include "amdgpu_pm.h"
+@@ -50,6 +51,7 @@
+ #include <linux/version.h>
+ #include <linux/types.h>
+ #include <linux/pm_runtime.h>
++#include <linux/firmware.h>
+
+ #include <drm/drmP.h>
+ #include <drm/drm_atomic.h>
+@@ -71,6 +73,9 @@
+
+ #include "modules/inc/mod_freesync.h"
+
++#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
++MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
++
+ /* basic init/fini API */
+ static int amdgpu_dm_init(struct amdgpu_device *adev);
+ static void amdgpu_dm_fini(struct amdgpu_device *adev);
+@@ -516,13 +521,97 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
+ return;
+ }
+
+-static int dm_sw_init(void *handle)
++static int load_dmcu_fw(struct amdgpu_device *adev)
+ {
++ const char *fw_name_dmcu;
++ int r;
++ const struct dmcu_firmware_header_v1_0 *hdr;
++
++ switch(adev->asic_type) {
++ case CHIP_BONAIRE:
++ case CHIP_HAWAII:
++ case CHIP_KAVERI:
++ case CHIP_KABINI:
++ case CHIP_MULLINS:
++ case CHIP_TONGA:
++ case CHIP_FIJI:
++ case CHIP_CARRIZO:
++ case CHIP_STONEY:
++ case CHIP_POLARIS11:
++ case CHIP_POLARIS10:
++ case CHIP_POLARIS12:
++ case CHIP_VEGAM:
++ case CHIP_VEGA10:
++ case CHIP_VEGA12:
++ case CHIP_VEGA20:
++ return 0;
++ case CHIP_RAVEN:
++ fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
++ break;
++ default:
++ DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
++ return -1;
++ }
++
++ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
++ DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
++ return 0;
++ }
++
++ r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
++ if (r == -ENOENT) {
++ /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
++ DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
++ adev->dm.fw_dmcu = NULL;
++ return 0;
++ }
++ if (r) {
++ dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
++ fw_name_dmcu);
++ return r;
++ }
++
++ r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
++ if (r) {
++ dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
++ fw_name_dmcu);
++ release_firmware(adev->dm.fw_dmcu);
++ adev->dm.fw_dmcu = NULL;
++ return r;
++ }
++
++ hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
++ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
++ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
++ adev->firmware.fw_size +=
++ ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
++
++ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
++ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
++ adev->firmware.fw_size +=
++ ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
++
++ DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
++
+ return 0;
+ }
+
++static int dm_sw_init(void *handle)
++{
++ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++
++ return load_dmcu_fw(adev);
++}
++
+ static int dm_sw_fini(void *handle)
+ {
++ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++
++ if(adev->dm.fw_dmcu) {
++ release_firmware(adev->dm.fw_dmcu);
++ adev->dm.fw_dmcu = NULL;
++ }
++
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+index 772368c..49b9572 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+@@ -142,6 +142,8 @@ struct amdgpu_display_manager {
+ struct drm_atomic_state *cached_state;
+
+ struct dm_comressor_info compressor;
++
++ const struct firmware *fw_dmcu;
+ };
+
+ struct amdgpu_dm_connector {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5325-drm-amdgpu-Add-DMCU-to-firmware-query-interface.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5325-drm-amdgpu-Add-DMCU-to-firmware-query-interface.patch
new file mode 100644
index 00000000..e2f87e7c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5325-drm-amdgpu-Add-DMCU-to-firmware-query-interface.patch
@@ -0,0 +1,62 @@
+From 5a9f238063389fac83862d05fff2ff5a46bf90a8 Mon Sep 17 00:00:00 2001
+From: David Francis <David.Francis@amd.com>
+Date: Thu, 13 Sep 2018 15:37:50 -0400
+Subject: [PATCH 5325/5725] drm/amdgpu: Add DMCU to firmware query interface
+
+DMCU firmware version can be read using the AMDGPU_INFO ioctl
+or the amdgpu_firmware_info debugfs entry
+
+Signed-off-by: David Francis <David.Francis@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 12 ++++++++++++
+ include/uapi/drm/amdgpu_drm.h | 2 ++
+ 2 files changed, 14 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index 37b6416..bca3e2c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -254,6 +254,10 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
+ fw_info->ver = adev->psp.asd_fw_version;
+ fw_info->feature = adev->psp.asd_feature_version;
+ break;
++ case AMDGPU_INFO_FW_DMCU:
++ fw_info->ver = adev->dm.dmcu_fw_version;
++ fw_info->feature = 0;
++ break;
+ default:
+ return -EINVAL;
+ }
+@@ -1322,6 +1326,14 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
+ seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
++ /* DMCU */
++ query_fw.fw_type = AMDGPU_INFO_FW_DMCU;
++ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
++ if (ret)
++ return ret;
++ seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n",
++ fw_info.feature, fw_info.ver);
++
+
+ seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
+
+diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
+index a0c3286..b1dd9cb 100644
+--- a/include/uapi/drm/amdgpu_drm.h
++++ b/include/uapi/drm/amdgpu_drm.h
+@@ -708,6 +708,8 @@ struct drm_amdgpu_cs_chunk_data {
+ #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM 0x10
+ /* Subquery id: Query GFX RLC SRLS firmware version */
+ #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM 0x11
++ /* Subquery id: Query DMCU firmware version */
++ #define AMDGPU_INFO_FW_DMCU 0x12
+ /* number of bytes moved for TTM migration */
+ #define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
+ /* the used VRAM size */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5326-drm-amd-display-Add-DMCU-firmware-version.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5326-drm-amd-display-Add-DMCU-firmware-version.patch
new file mode 100644
index 00000000..6f123fee
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5326-drm-amd-display-Add-DMCU-firmware-version.patch
@@ -0,0 +1,43 @@
+From 417e4366c6fc2c44816e38fd43742b055dc65db2 Mon Sep 17 00:00:00 2001
+From: David Francis <David.Francis@amd.com>
+Date: Thu, 13 Sep 2018 15:36:27 -0400
+Subject: [PATCH 5326/5725] drm/amd/display: Add DMCU firmware version
+
+Read the version number from the common firmware header and store
+it in the dm struct
+
+Signed-off-by: David Francis <David.Francis@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 ++
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 1 +
+ 2 files changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 3954196..ae505ed 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -591,6 +591,8 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
+
++ adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
++
+ DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
+
+ return 0;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+index 49b9572..2f94317 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+@@ -144,6 +144,7 @@ struct amdgpu_display_manager {
+ struct dm_comressor_info compressor;
+
+ const struct firmware *fw_dmcu;
++ uint32_t dmcu_fw_version;
+ };
+
+ struct amdgpu_dm_connector {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5327-drm-amdgpu-display-return-proper-error-codes-in-dm.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5327-drm-amdgpu-display-return-proper-error-codes-in-dm.patch
new file mode 100644
index 00000000..a764cd91
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5327-drm-amdgpu-display-return-proper-error-codes-in-dm.patch
@@ -0,0 +1,57 @@
+From e1118c5f1a69a0336b24731dba21041dcbe30716 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 13 Sep 2018 11:01:28 -0500
+Subject: [PATCH 5327/5725] drm/amdgpu/display: return proper error codes in dm
+
+Replace -1 with proper error codes.
+
+Acked-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index ae505ed..3815c7b 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -495,7 +495,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
+ error:
+ amdgpu_dm_fini(adev);
+
+- return -1;
++ return -EINVAL;
+ }
+
+ static void amdgpu_dm_fini(struct amdgpu_device *adev)
+@@ -550,7 +550,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
+ break;
+ default:
+ DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
+- return -1;
++ return -EINVAL;
+ }
+
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+@@ -1647,7 +1647,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+ link_cnt = dm->dc->caps.max_links;
+ if (amdgpu_dm_mode_config_init(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize mode config\n");
+- return -1;
++ return -EINVAL;
+ }
+
+ /* Identify the number of planes to be initialized */
+@@ -1769,7 +1769,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+ kfree(aconnector);
+ for (i = 0; i < dm->dc->caps.max_planes; i++)
+ kfree(mode_info->planes[i]);
+- return -1;
++ return -EINVAL;
+ }
+
+ static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5328-drm-amdgpu-try-allocating-VRAM-as-power-of-two.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5328-drm-amdgpu-try-allocating-VRAM-as-power-of-two.patch
new file mode 100644
index 00000000..6f1dab4e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5328-drm-amdgpu-try-allocating-VRAM-as-power-of-two.patch
@@ -0,0 +1,99 @@
+From 2f407369bd54114e7f02a8fcdb1acc4b3ea4ae07 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 7 Sep 2018 14:27:05 +0200
+Subject: [PATCH 5328/5725] drm/amdgpu: try allocating VRAM as power of two
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Try to allocate VRAM in power of two sizes and only fallback to vram
+split sizes if that fails.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Felix Kuehling <felix.kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 52 +++++++++++++++++++++-------
+ 1 file changed, 40 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+index f949efb..4a8726b9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+@@ -126,6 +126,28 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
+ }
+
+ /**
++ * amdgpu_vram_mgr_virt_start - update virtual start address
++ *
++ * @mem: ttm_mem_reg to update
++ * @node: just allocated node
++ *
++ * Calculate a virtual BO start address to easily check if everything is CPU
++ * accessible.
++ */
++static void amdgpu_vram_mgr_virt_start(struct ttm_mem_reg *mem,
++ struct drm_mm_node *node)
++{
++ unsigned long start;
++
++ start = node->start + node->size;
++ if (start > mem->num_pages)
++ start -= mem->num_pages;
++ else
++ start = 0;
++ mem->start = max(mem->start, start);
++}
++
++/**
+ * amdgpu_vram_mgr_new - allocate new ranges
+ *
+ * @man: TTM memory type manager
+@@ -177,10 +199,25 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
+ pages_left = mem->num_pages;
+
+ spin_lock(&mgr->lock);
+- for (i = 0; i < num_nodes; ++i) {
++ for (i = 0; pages_left >= pages_per_node; ++i) {
++ unsigned long pages = rounddown_pow_of_two(pages_left);
++
++ r = drm_mm_insert_node_in_range(mm, &nodes[i], pages,
++ pages_per_node, 0,
++ place->fpfn, lpfn,
++ mode);
++ if (unlikely(r))
++ break;
++
++ usage += nodes[i].size << PAGE_SHIFT;
++ vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
++ amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
++ pages_left -= pages;
++ }
++
++ for (; pages_left; ++i) {
+ unsigned long pages = min(pages_left, pages_per_node);
+ uint32_t alignment = mem->page_alignment;
+- unsigned long start;
+
+ if (pages == pages_per_node)
+ alignment = pages_per_node;
+@@ -194,16 +231,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
+
+ usage += nodes[i].size << PAGE_SHIFT;
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
+-
+- /* Calculate a virtual BO start address to easily check if
+- * everything is CPU accessible.
+- */
+- start = nodes[i].start + nodes[i].size;
+- if (start > mem->num_pages)
+- start -= mem->num_pages;
+- else
+- start = 0;
+- mem->start = max(mem->start, start);
++ amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
+ pages_left -= pages;
+ }
+ spin_unlock(&mgr->lock);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5329-drm-amdgpu-enable-AGP-aperture-for-GMC9-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5329-drm-amdgpu-enable-AGP-aperture-for-GMC9-v2.patch
new file mode 100644
index 00000000..b2d9c65b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5329-drm-amdgpu-enable-AGP-aperture-for-GMC9-v2.patch
@@ -0,0 +1,88 @@
+From c0bf5944a681b76983f31c3ca53913895e1ec641 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 27 Aug 2018 18:23:11 +0200
+Subject: [PATCH 5329/5725] drm/amdgpu: enable AGP aperture for GMC9 v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Enable the old AGP aperture to avoid GART mappings.
+
+v2: don't enable it for SRIOV
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 10 +++++-----
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 ++
+ drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 10 +++++-----
+ 3 files changed, 12 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+index 3403ded..ffd0ec9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+@@ -65,16 +65,16 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
+ {
+ uint64_t value;
+
+- /* Disable AGP. */
++ /* Program the AGP BAR */
+ WREG32_SOC15(GC, 0, mmMC_VM_AGP_BASE, 0);
+- WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, 0);
+- WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, 0xFFFFFFFF);
++ WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
++ WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
+
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+- adev->gmc.vram_start >> 18);
++ min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18);
+ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+- adev->gmc.vram_end >> 18);
++ max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18);
+
+ /* Set default page address. */
+ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 5b04b45..d7d6d0c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -771,6 +771,8 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
+ base = mmhub_v1_0_get_fb_location(adev);
+ amdgpu_gmc_vram_location(adev, &adev->gmc, base);
+ amdgpu_gmc_gart_location(adev, mc);
++ if (!amdgpu_sriov_vf(adev))
++ amdgpu_gmc_agp_location(adev, mc);
+ /* base offset of vram pages */
+ adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+index 63fec50..21a822f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+@@ -76,16 +76,16 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
+ uint64_t value;
+ uint32_t tmp;
+
+- /* Disable AGP. */
++ /* Program the AGP BAR */
+ WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BASE, 0);
+- WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_TOP, 0);
+- WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BOT, 0x00FFFFFF);
++ WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
++ WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
+
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+- adev->gmc.vram_start >> 18);
++ min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18);
+ WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+- adev->gmc.vram_end >> 18);
++ max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18);
+
+ /* Set default page address. */
+ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5330-drm-amdgpu-fix-the-page-fault-of-raven2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5330-drm-amdgpu-fix-the-page-fault-of-raven2.patch
new file mode 100644
index 00000000..f0e5c9d3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5330-drm-amdgpu-fix-the-page-fault-of-raven2.patch
@@ -0,0 +1,83 @@
+From 71f0a8227045b3ffacf87299e130cd4bc41a3338 Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Fri, 21 Sep 2018 18:15:01 +0800
+Subject: [PATCH 5330/5725] drm/amdgpu: fix the page fault of raven2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+While the apg_end address is 0xffffffff, if add 1 with it, the value will be
+overflow and roll back to 0. So when 0 is written to
+mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, the system aperture is actually disabled. And
+so any access to vram will trigger a page fault.
+
+Raven2's HW issue only need increase the vram end address, and needn't do it on
+the agp.
+
+Change-Id: I4f775c01857a3c8111f2145023ad15c4e94f0b26
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Cc: Marek Olšák <marek.olsak@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 16 ++++++++++++++--
+ drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 16 ++++++++++++++--
+ 2 files changed, 28 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+index ffd0ec9..ceb7847 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+@@ -73,8 +73,20 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18);
+- WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+- max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18);
++
++ if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
++ /*
++ * Raven2 has a HW issue that it is unable to use the vram which
++ * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
++ * workaround that increase system aperture high address (add 1)
++ * to get rid of the VM fault and hardware hang.
++ */
++ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
++ max((adev->gmc.vram_end >> 18) + 0x1,
++ adev->gmc.agp_end >> 18));
++ else
++ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
++ max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18);
+
+ /* Set default page address. */
+ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+index 21a822f..cca6c1b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+@@ -84,8 +84,20 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18);
+- WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+- max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18);
++
++ if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
++ /*
++ * Raven2 has a HW issue that it is unable to use the vram which
++ * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
++ * workaround that increase system aperture high address (add 1)
++ * to get rid of the VM fault and hardware hang.
++ */
++ WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
++ max((adev->gmc.vram_end >> 18) + 0x1,
++ adev->gmc.agp_end >> 18));
++ else
++ WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
++ max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18);
+
+ /* Set default page address. */
+ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5331-drm-amdgpu-add-amdgpu_gmc_agp_location-v3.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5331-drm-amdgpu-add-amdgpu_gmc_agp_location-v3.patch
new file mode 100644
index 00000000..f456dbcf
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5331-drm-amdgpu-add-amdgpu_gmc_agp_location-v3.patch
@@ -0,0 +1,96 @@
+From f72e874431ab6dbceb6c3227e062a25fb0cda6e7 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 24 Aug 2018 12:08:06 +0200
+Subject: [PATCH 5331/5725] drm/amdgpu: add amdgpu_gmc_agp_location v3
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Helper to figure out the location of the AGP BAR.
+
+v2: fix a couple of bugs
+v3: correctly add one to vram_end
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 43 +++++++++++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 5 ++++
+ 2 files changed, 48 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+index 068f0c6..0effe84 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+@@ -143,3 +143,46 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
+ dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
+ mc->gart_size >> 20, mc->gart_start, mc->gart_end);
+ }
++
++/**
++ * amdgpu_gmc_agp_location - try to find AGP location
++ * @adev: amdgpu device structure holding all necessary informations
++ * @mc: memory controller structure holding memory informations
++ *
++ * Function will place try to find a place for the AGP BAR in the MC address
++ * space.
++ *
++ * AGP BAR will be assigned the largest available hole in the address space.
++ * Should be called after VRAM and GART locations are setup.
++ */
++void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
++{
++ const uint64_t sixteen_gb = 1ULL << 34;
++ const uint64_t sixteen_gb_mask = ~(sixteen_gb - 1);
++ u64 size_af, size_bf;
++
++ if (mc->vram_start > mc->gart_start) {
++ size_bf = (mc->vram_start & sixteen_gb_mask) -
++ ALIGN(mc->gart_end + 1, sixteen_gb);
++ size_af = mc->mc_mask + 1 - ALIGN(mc->vram_end + 1, sixteen_gb);
++ } else {
++ size_bf = mc->vram_start & sixteen_gb_mask;
++ size_af = (mc->gart_start & sixteen_gb_mask) -
++ ALIGN(mc->vram_end + 1, sixteen_gb);
++ }
++
++ if (size_bf > size_af) {
++ mc->agp_start = mc->vram_start > mc->gart_start ?
++ mc->gart_end + 1 : 0;
++ mc->agp_size = size_bf;
++ } else {
++ mc->agp_start = (mc->vram_start > mc->gart_start ?
++ mc->vram_end : mc->gart_end) + 1,
++ mc->agp_size = size_af;
++ }
++
++ mc->agp_start = ALIGN(mc->agp_start, sixteen_gb);
++ mc->agp_end = mc->agp_start + mc->agp_size - 1;
++ dev_info(adev->dev, "AGP: %lluM 0x%016llX - 0x%016llX\n",
++ mc->agp_size >> 20, mc->agp_start, mc->agp_end);
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+index 81bb310..75447d9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+@@ -81,6 +81,9 @@ struct amdgpu_gmc {
+ * about vram size near mc fb location */
+ u64 mc_vram_size;
+ u64 visible_vram_size;
++ u64 agp_size;
++ u64 agp_start;
++ u64 agp_end;
+ u64 gart_size;
+ u64 gart_start;
+ u64 gart_end;
+@@ -141,5 +144,7 @@ void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
+ u64 base);
+ void amdgpu_gmc_gart_location(struct amdgpu_device *adev,
+ struct amdgpu_gmc *mc);
++void amdgpu_gmc_agp_location(struct amdgpu_device *adev,
++ struct amdgpu_gmc *mc);
+
+ #endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5332-drm-amdgpu-Temporary-fix-amdgpu_vm_release_compute-b.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5332-drm-amdgpu-Temporary-fix-amdgpu_vm_release_compute-b.patch
new file mode 100644
index 00000000..bd134eeb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5332-drm-amdgpu-Temporary-fix-amdgpu_vm_release_compute-b.patch
@@ -0,0 +1,116 @@
+From ed1d809b661c3cb4d195cc0591974cc63dc4986d Mon Sep 17 00:00:00 2001
+From: Prike Liang <Prike.Liang@amd.com>
+Date: Sun, 30 Sep 2018 10:12:38 +0800
+Subject: [PATCH 5332/5725] drm/amdgpu: Temporary fix amdgpu_vm_release_compute
+ build error
+
+Change-Id: Ib1ca92ed3bbab57243a041392c3e5bfdde989a90
+Signed-off-by: Prike Liang <Prike.Liang@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 20 ++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 1 +
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 4 +++-
+ 6 files changed, 27 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+index d2702b0..01b9a9d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+@@ -182,6 +182,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
+ .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
+ .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
+ .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
++ .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
+ .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
+ .alloc_pasid = amdgpu_pasid_alloc,
+ .free_pasid = amdgpu_pasid_free,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+index 69ac7be..7e7fe0c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+@@ -154,6 +154,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
+ .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
+ .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
+ .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
++ .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
+ .create_process_gpumem = create_process_gpumem,
+ .destroy_process_gpumem = destroy_process_gpumem,
+ .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+index b6852a1..c501ead 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+@@ -204,6 +204,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
+ .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
+ .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
+ .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
++ .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
+ .create_process_gpumem = create_process_gpumem,
+ .destroy_process_gpumem = destroy_process_gpumem,
+ .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 66436e8..4d39bf8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2849,6 +2849,26 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
+ }
+
+ /**
++ * amdgpu_vm_release_compute - release a compute vm
++ * @adev: amdgpu_device pointer
++ * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
++ *
++ * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
++ * pasid from vm. Compute should stop use of vm after this call.
++ */
++void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
++{
++ if (vm->pasid) {
++ unsigned long flags;
++
++ spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
++ idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
++ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
++ }
++ vm->pasid = 0;
++}
++
++/**
+ * amdgpu_vm_free_levels - free PD/PT levels
+ *
+ * @adev: amdgpu device structure
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+index 58ed2d9..1c9a661 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+@@ -296,6 +296,7 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
+ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int vm_context, unsigned int pasid);
+ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid);
++void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+ bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
+ unsigned int pasid);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index da67302..ba4e35c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -355,8 +355,10 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
+ pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n",
+ pdd->dev->id, p->pasid);
+
+- if (pdd->drm_file)
++ if (pdd->drm_file) {
++ pdd->dev->kfd2kgd->release_process_vm(pdd->dev->kgd, pdd->vm);
+ fput(pdd->drm_file);
++ }
+ else if (pdd->vm)
+ pdd->dev->kfd2kgd->destroy_process_vm(
+ pdd->dev->kgd, pdd->vm);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5333-drm-amdgpu-fix-VM-clearing-for-the-root-PD.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5333-drm-amdgpu-fix-VM-clearing-for-the-root-PD.patch
new file mode 100644
index 00000000..12ac111e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5333-drm-amdgpu-fix-VM-clearing-for-the-root-PD.patch
@@ -0,0 +1,42 @@
+From 510a6b573e8030f21d945cbdf350d573e50c5304 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Thu, 16 Aug 2018 12:01:03 +0200
+Subject: [PATCH 5333/5725] drm/amdgpu: fix VM clearing for the root PD
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+We need to figure out the address after validating the BO, not before.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 4d39bf8..a6b9a3c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -363,7 +363,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
+ uint64_t addr;
+ int r;
+
+- addr = amdgpu_bo_gpu_offset(bo);
+ entries = amdgpu_bo_size(bo) / 8;
+
+ if (pte_support_ats) {
+@@ -399,6 +398,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
+ if (r)
+ goto error;
+
++ addr = amdgpu_bo_gpu_offset(bo);
+ if (ats_entries) {
+ uint64_t ats_value;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5334-drm-amdgpu-fix-preamble-handling.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5334-drm-amdgpu-fix-preamble-handling.patch
new file mode 100644
index 00000000..519a1612
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5334-drm-amdgpu-fix-preamble-handling.patch
@@ -0,0 +1,55 @@
+From 5ccff6efa334974ba35d3bad9b6c667fc1438442 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Tue, 21 Aug 2018 15:09:39 +0200
+Subject: [PATCH 5334/5725] drm/amdgpu: fix preamble handling
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+At this point the command submission can still be interrupted.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index a1ad99e..3e41c03 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -1015,13 +1015,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
+ if (r)
+ return r;
+
+- if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) {
+- parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
+- if (!parser->ctx->preamble_presented) {
+- parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
+- parser->ctx->preamble_presented = true;
+- }
+- }
++ if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
++ parser->job->preamble_status |=
++ AMDGPU_PREAMBLE_IB_PRESENT;
+
+ if (parser->ring && parser->ring != ring)
+ return -EINVAL;
+@@ -1235,6 +1231,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
+
+ amdgpu_cs_post_dependencies(p);
+
++ if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
++ !p->ctx->preamble_presented) {
++ job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
++ p->ctx->preamble_presented = true;
++ }
++
+ cs->out.handle = seq;
+ job->uf_sequence = seq;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5335-amdgpu-fix-multi-process-hang-issue.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5335-amdgpu-fix-multi-process-hang-issue.patch
new file mode 100644
index 00000000..422c9ff1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5335-amdgpu-fix-multi-process-hang-issue.patch
@@ -0,0 +1,56 @@
+From 0230676eb4da2e340777783362fcc5d2cd38fc84 Mon Sep 17 00:00:00 2001
+From: Emily Deng <Emily.Deng@amd.com>
+Date: Wed, 22 Aug 2018 20:18:25 +0800
+Subject: [PATCH 5335/5725] amdgpu: fix multi-process hang issue
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+SWDEV-146499: hang during multi vulkan process testing
+
+cause:
+the second frame's PREAMBLE_IB have clear-state
+and LOAD actions, those actions ruin the pipeline
+that is still doing process in the previous frame's
+work-load IB.
+
+fix:
+need insert pipeline sync if have context switch for
+SRIOV (because only SRIOV will report PREEMPTION flag
+to UMD)
+
+Signed-off-by: Monk Liu <Monk.Liu@amd.com>
+Signed-off-by: Emily Deng <Emily.Deng@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+index a38daa3..1d86c3b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+@@ -171,8 +171,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ return r;
+ }
+
++ need_ctx_switch = ring->current_ctx != fence_ctx;
+ if (ring->funcs->emit_pipeline_sync && job &&
+ ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
++ (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
+ amdgpu_vm_need_pipeline_sync(ring, job))) {
+ need_pipe_sync = true;
+
+@@ -206,7 +208,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ amdgpu_ring_emit_hdp_flush(ring);
+
+ skip_preamble = ring->current_ctx == fence_ctx;
+- need_ctx_switch = ring->current_ctx != fence_ctx;
+ if (job && ring->funcs->emit_cntxcntl) {
+ if (need_ctx_switch)
+ status |= AMDGPU_HAVE_CTX_SWITCH;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5336-drm-amdgpu-Fix-page-fault-and-kasan-warning-on-pci-d.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5336-drm-amdgpu-Fix-page-fault-and-kasan-warning-on-pci-d.patch
new file mode 100644
index 00000000..efa7e831
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5336-drm-amdgpu-Fix-page-fault-and-kasan-warning-on-pci-d.patch
@@ -0,0 +1,201 @@
+From b489217e297d3866ddf3bcd9bf2d3e3ac8cebb88 Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Wed, 22 Aug 2018 10:07:35 -0400
+Subject: [PATCH 5336/5725] drm/amdgpu: Fix page fault and kasan warning on pci
+ device remove.
+
+Problem:
+When executing echo 1 > /sys/class/drm/card0/device/remove kasan warning
+as bellow and page fault happen because adev->gart.pages already freed by the
+time amdgpu_gart_unbind is called.
+
+BUG: KASAN: user-memory-access in amdgpu_gart_unbind+0x98/0x180 [amdgpu]
+Write of size 8 at addr 0000000000003648 by task bash/1828
+CPU: 2 PID: 1828 Comm: bash Tainted: G W O 4.18.0-rc1-dev+ #29
+Hardware name: Gigabyte Technology Co., Ltd. AX370-Gaming/AX370-Gaming-CF, BIOS F3 06/19/2017
+Call Trace:
+dump_stack+0x71/0xab
+kasan_report+0x109/0x390
+amdgpu_gart_unbind+0x98/0x180 [amdgpu]
+ttm_tt_unbind+0x43/0x60 [ttm]
+ttm_bo_move_ttm+0x83/0x1c0 [ttm]
+ttm_bo_handle_move_mem+0xb97/0xd00 [ttm]
+ttm_bo_evict+0x273/0x530 [ttm]
+ttm_mem_evict_first+0x29c/0x360 [ttm]
+ttm_bo_force_list_clean+0xfc/0x210 [ttm]
+ttm_bo_clean_mm+0xe7/0x160 [ttm]
+amdgpu_ttm_fini+0xda/0x1d0 [amdgpu]
+amdgpu_bo_fini+0xf/0x60 [amdgpu]
+gmc_v8_0_sw_fini+0x36/0x70 [amdgpu]
+amdgpu_device_fini+0x2d0/0x7d0 [amdgpu]
+amdgpu_driver_unload_kms+0x6a/0xd0 [amdgpu]
+drm_dev_unregister+0x79/0x180 [drm]
+amdgpu_pci_remove+0x2a/0x60 [amdgpu]
+pci_device_remove+0x5b/0x100
+device_release_driver_internal+0x236/0x360
+pci_stop_bus_device+0xbf/0xf0
+pci_stop_and_remove_bus_device_locked+0x16/0x30
+remove_store+0xda/0xf0
+kernfs_fop_write+0x186/0x220
+__vfs_write+0xcc/0x330
+vfs_write+0xe6/0x250
+ksys_write+0xb1/0x140
+do_syscall_64+0x77/0x1e0
+entry_SYSCALL_64_after_hwframe+0x44/0xa9
+RIP: 0033:0x7f66ebbb32c0
+
+Fix:
+Split gmc_v{6,7,8,9}_0_gart_fini to postpone amdgpu_gart_fini to after
+memory managers are shut down since gart unbind happens
+as part of this procedure
+
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Acked-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 9 ++-------
+ drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 16 ++--------------
+ drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 16 ++--------------
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 16 ++--------------
+ 4 files changed, 8 insertions(+), 49 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+index 3911c52..b8335d8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+@@ -636,12 +636,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
+ amdgpu_gart_table_vram_unpin(adev);
+ }
+
+-static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
+-{
+- amdgpu_gart_table_vram_free(adev);
+- amdgpu_gart_fini(adev);
+-}
+-
+ static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
+ u32 status, u32 addr, u32 mc_client)
+ {
+@@ -938,8 +932,9 @@ static int gmc_v6_0_sw_fini(void *handle)
+
+ amdgpu_gem_force_release(adev);
+ amdgpu_vm_manager_fini(adev);
+- gmc_v6_0_gart_fini(adev);
++ amdgpu_gart_table_vram_free(adev);
+ amdgpu_bo_fini(adev);
++ amdgpu_gart_fini(adev);
+ release_firmware(adev->gmc.fw);
+ adev->gmc.fw = NULL;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+index 35fb090..61ffd5c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+@@ -751,19 +751,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
+ }
+
+ /**
+- * gmc_v7_0_gart_fini - vm fini callback
+- *
+- * @adev: amdgpu_device pointer
+- *
+- * Tears down the driver GART/VM setup (CIK).
+- */
+-static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
+-{
+- amdgpu_gart_table_vram_free(adev);
+- amdgpu_gart_fini(adev);
+-}
+-
+-/**
+ * gmc_v7_0_vm_decode_fault - print human readable fault info
+ *
+ * @adev: amdgpu_device pointer
+@@ -1100,8 +1087,9 @@ static int gmc_v7_0_sw_fini(void *handle)
+ amdgpu_gem_force_release(adev);
+ amdgpu_vm_manager_fini(adev);
+ kfree(adev->gmc.vm_fault_info);
+- gmc_v7_0_gart_fini(adev);
++ amdgpu_gart_table_vram_free(adev);
+ amdgpu_bo_fini(adev);
++ amdgpu_gart_fini(adev);
+ release_firmware(adev->gmc.fw);
+ adev->gmc.fw = NULL;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index bd3b859..199f1a5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -977,19 +977,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
+ }
+
+ /**
+- * gmc_v8_0_gart_fini - vm fini callback
+- *
+- * @adev: amdgpu_device pointer
+- *
+- * Tears down the driver GART/VM setup (CIK).
+- */
+-static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
+-{
+- amdgpu_gart_table_vram_free(adev);
+- amdgpu_gart_fini(adev);
+-}
+-
+-/**
+ * gmc_v8_0_vm_decode_fault - print human readable fault info
+ *
+ * @adev: amdgpu_device pointer
+@@ -1208,8 +1195,9 @@ static int gmc_v8_0_sw_fini(void *handle)
+ amdgpu_gem_force_release(adev);
+ amdgpu_vm_manager_fini(adev);
+ kfree(adev->gmc.vm_fault_info);
+- gmc_v8_0_gart_fini(adev);
++ amdgpu_gart_table_vram_free(adev);
+ amdgpu_bo_fini(adev);
++ amdgpu_gart_fini(adev);
+ release_firmware(adev->gmc.fw);
+ adev->gmc.fw = NULL;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index d7d6d0c..7362ac1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -1013,31 +1013,19 @@ static int gmc_v9_0_sw_init(void *handle)
+ return 0;
+ }
+
+-/**
+- * gmc_v9_0_gart_fini - vm fini callback
+- *
+- * @adev: amdgpu_device pointer
+- *
+- * Tears down the driver GART/VM setup (CIK).
+- */
+-static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
+-{
+- amdgpu_gart_table_vram_free(adev);
+- amdgpu_gart_fini(adev);
+-}
+-
+ static int gmc_v9_0_sw_fini(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_gem_force_release(adev);
+ amdgpu_vm_manager_fini(adev);
+- gmc_v9_0_gart_fini(adev);
+
+ if (gmc_v9_0_keep_stolen_memory(adev))
+ amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
+
++ amdgpu_gart_table_vram_free(adev);
+ amdgpu_bo_fini(adev);
++ amdgpu_gart_fini(adev);
+
+ return 0;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5337-drm-amd-display-Fix-bug-use-wrong-pp-interface.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5337-drm-amd-display-Fix-bug-use-wrong-pp-interface.patch
new file mode 100644
index 00000000..6c25cb64
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5337-drm-amd-display-Fix-bug-use-wrong-pp-interface.patch
@@ -0,0 +1,51 @@
+From ec1fddd5f88206908dabcd3579e338431346b0c3 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Thu, 16 Aug 2018 11:36:38 +0800
+Subject: [PATCH 5337/5725] drm/amd/display: Fix bug use wrong pp interface
+
+Used wrong pp interface, the original interface is
+exposed by dpm on SI and paritial CI.
+
+Pointed out by Francis David <david.francis@amd.com>
+
+v2: dal only need to set min_dcefclk and min_fclk to smu.
+ so use display_clock_voltage_request interface,
+ instand of update all display configuration.
+
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+index e5c5b0a..cfa907b 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+@@ -480,12 +480,20 @@ void pp_rv_set_display_requirement(struct pp_smu *pp,
+ {
+ const struct dc_context *ctx = pp->dm;
+ struct amdgpu_device *adev = ctx->driver_context;
++ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
++ struct pp_display_clock_request clock = {0};
+
+- if (!pp_funcs || !pp_funcs->display_configuration_changed)
++ if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
+ return;
+
+- amdgpu_dpm_display_configuration_changed(adev);
++ clock.clock_type = amd_pp_dcf_clock;
++ clock.clock_freq_in_khz = req->hard_min_dcefclk_khz;
++ pp_funcs->display_clock_voltage_request(pp_handle, &clock);
++
++ clock.clock_type = amd_pp_f_clock;
++ clock.clock_freq_in_khz = req->hard_min_fclk_khz;
++ pp_funcs->display_clock_voltage_request(pp_handle, &clock);
+ }
+
+ void pp_rv_set_wm_ranges(struct pp_smu *pp,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5338-drm-amdgpu-remove-extra-root-PD-alignment.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5338-drm-amdgpu-remove-extra-root-PD-alignment.patch
new file mode 100644
index 00000000..884e47ee
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5338-drm-amdgpu-remove-extra-root-PD-alignment.patch
@@ -0,0 +1,60 @@
+From bea7b8d6bc5ebc0b32b3c83ad7f6c4a80c4f1deb Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Wed, 22 Aug 2018 15:47:37 +0200
+Subject: [PATCH 5338/5725] drm/amdgpu: remove extra root PD alignment
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Just another leftover from radeon.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Acked-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 +---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 3 ---
+ 2 files changed, 1 insertion(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index a6b9a3c..4abd3b0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2634,8 +2634,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ {
+ struct amdgpu_bo_param bp;
+ struct amdgpu_bo *root;
+- const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
+- AMDGPU_VM_PTE_COUNT(adev) * 8);
+ unsigned ring_instance;
+ struct amdgpu_ring *ring;
+ struct drm_sched_rq *rq;
+@@ -2690,7 +2688,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
+ memset(&bp, 0, sizeof(bp));
+ bp.size = size;
+- bp.byte_align = align;
++ bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
+ bp.flags = flags;
+ bp.type = ttm_bo_type_kernel;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+index 1c9a661..e6ca941 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+@@ -51,9 +51,6 @@ struct amdgpu_bo_list_entry;
+ /* number of entries in page table */
+ #define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
+
+-/* PTBs (Page Table Blocks) need to be aligned to 32K */
+-#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
+-
+ #define AMDGPU_PTE_VALID (1ULL << 0)
+ #define AMDGPU_PTE_SYSTEM (1ULL << 1)
+ #define AMDGPU_PTE_SNOOPED (1ULL << 2)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5339-drm-amdgpu-add-helper-for-VM-PD-PT-allocation-parame.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5339-drm-amdgpu-add-helper-for-VM-PD-PT-allocation-parame.patch
new file mode 100644
index 00000000..39c224ea
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5339-drm-amdgpu-add-helper-for-VM-PD-PT-allocation-parame.patch
@@ -0,0 +1,105 @@
+From 446ef9067389bbdb9cc51d3b53d3391a8da09b26 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 27 Aug 2018 15:17:59 -0500
+Subject: [PATCH 5339/5725] drm/amdgpu: add helper for VM PD/PT allocation
+ parameters v3
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add a helper function to figure them out only once.
+
+v2: fix typo with memset
+v3: rebase on kfd changes (Alex)
+
+Change-Id: Idc7b0fdb3c8cf51b69818de83c6954665e8dfee2
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 37 +++-------------------------------
+ 1 file changed, 3 insertions(+), 34 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 4abd3b0..87c1b55 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -498,8 +498,8 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
+ unsigned level, bool ats)
+ {
+ unsigned shift = amdgpu_vm_level_shift(adev, level);
++ struct amdgpu_bo_param bp;
+ unsigned pt_idx, from, to;
+- u64 flags;
+ int r;
+
+ if (!parent->entries) {
+@@ -522,30 +522,14 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
+ saddr = saddr & ((1 << shift) - 1);
+ eaddr = eaddr & ((1 << shift) - 1);
+
+- flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+- if (vm->root.base.bo->shadow)
+- flags |= AMDGPU_GEM_CREATE_SHADOW;
+- if (vm->use_cpu_for_update)
+- flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+- else
+- flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
++ amdgpu_vm_bo_param(adev, vm, level, &bp);
+
+ /* walk over the address space and allocate the page tables */
+ for (pt_idx = from; pt_idx <= to; ++pt_idx) {
+- struct reservation_object *resv = vm->root.base.bo->tbo.resv;
+ struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
+ struct amdgpu_bo *pt;
+
+ if (!entry->base.bo) {
+- struct amdgpu_bo_param bp;
+-
+- memset(&bp, 0, sizeof(bp));
+- bp.size = amdgpu_vm_bo_size(adev, level);
+- bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
+- bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
+- bp.flags = flags;
+- bp.type = ttm_bo_type_kernel;
+- bp.resv = resv;
+ r = amdgpu_bo_create(adev, &bp, &pt);
+ if (r)
+ return r;
+@@ -2637,8 +2621,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ unsigned ring_instance;
+ struct amdgpu_ring *ring;
+ struct drm_sched_rq *rq;
+- unsigned long size;
+- uint64_t flags;
+ int r, i;
+
+ vm->va = RB_ROOT_CACHED;
+@@ -2679,20 +2661,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ "CPU update of VM recommended only for large BAR system\n");
+ vm->last_update = NULL;
+
+- flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+- if (vm->use_cpu_for_update)
+- flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+- else if (vm_context != AMDGPU_VM_CONTEXT_COMPUTE)
+- flags |= AMDGPU_GEM_CREATE_SHADOW;
+-
+- size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
+- memset(&bp, 0, sizeof(bp));
+- bp.size = size;
+- bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
+- bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
+- bp.flags = flags;
+- bp.type = ttm_bo_type_kernel;
+- bp.resv = NULL;
++ amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp);
+ r = amdgpu_bo_create(adev, &bp, &root);
+ if (r)
+ goto error_free_sched_entity;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5340-drm-amdgpu-add-GMC9-support-for-PDs-PTs-in-system-me.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5340-drm-amdgpu-add-GMC9-support-for-PDs-PTs-in-system-me.patch
new file mode 100644
index 00000000..e7cc54ea
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5340-drm-amdgpu-add-GMC9-support-for-PDs-PTs-in-system-me.patch
@@ -0,0 +1,36 @@
+From 0a5b80924446d912f99d5d4e48cb28adac775a6d Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Wed, 22 Aug 2018 12:27:05 +0200
+Subject: [PATCH 5340/5725] drm/amdgpu: add GMC9 support for PDs/PTs in system
+ memory
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add the necessary handling.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 7362ac1..b5d849b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -559,7 +559,7 @@ static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
+ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
+ uint64_t *addr, uint64_t *flags)
+ {
+- if (!(*flags & AMDGPU_PDE_PTE))
++ if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
+ *addr = adev->vm_manager.vram_base_offset + *addr -
+ adev->gmc.vram_start;
+ BUG_ON(*addr & 0xFFFF00000000003FULL);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5341-drm-amdgpu-add-amdgpu_gmc_get_pde_for_bo-helper-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5341-drm-amdgpu-add-amdgpu_gmc_get_pde_for_bo-helper-v2.patch
new file mode 100644
index 00000000..6d2d6ed0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5341-drm-amdgpu-add-amdgpu_gmc_get_pde_for_bo-helper-v2.patch
@@ -0,0 +1,39 @@
+From 63eac2d24dff8a350a71a1dcd3ed2ad5c4bcf983 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Wed, 22 Aug 2018 14:11:19 +0200
+Subject: [PATCH 5341/5725] drm/amdgpu: add amdgpu_gmc_get_pde_for_bo helper v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Helper to get the PDE for a PD/PT.
+
+v2: improve documentation
+
+Change-Id: I292ed8fe0ac5c31119568532ccd09c4981d8e5a8
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 04bc197..4c7e231 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -1490,9 +1490,6 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
+ flags |= AMDGPU_PTE_SNOOPED;
+ }
+
+- if (mem && mem->mem_type == AMDGPU_PL_DGMA_IMPORT)
+- flags |= AMDGPU_PTE_SYSTEM;
+-
+ return flags;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5342-drm-amd-display-Improve-spelling-grammar-and-formatt.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5342-drm-amd-display-Improve-spelling-grammar-and-formatt.patch
new file mode 100644
index 00000000..b94da601
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5342-drm-amd-display-Improve-spelling-grammar-and-formatt.patch
@@ -0,0 +1,638 @@
+From fde7d81af7bcc798ba0843f31cf6b4915734d342 Mon Sep 17 00:00:00 2001
+From: David Francis <David.Francis@amd.com>
+Date: Wed, 15 Aug 2018 14:38:30 -0400
+Subject: [PATCH 5342/5725] drm/amd/display: Improve spelling, grammar, and
+ formatting of amdgpu_dm.c comments
+
+[Why]
+Good spelling and grammar makes comments
+more pleasant and clearer.
+
+Linux has coding standards for comments
+that we should try to follow.
+
+[How]
+Fix obvious spelling and grammar issues
+
+Ensure all comments use '/*' and '*/' and multi-line comments
+follow linux convention
+
+Remove line-of-stars comments that do not separate sections
+of code and comments referring to lines of code that have
+since been removed
+
+Change-Id: I31145072df4c79ab46649ee65a0d49c16733e4bd
+Signed-off-by: David Francis <David.Francis@amd.com>
+Reviewed-by: Nicholas Kazlauskas <Nicholas.Kazlauskas@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 216 ++++++++++++----------
+ 1 file changed, 121 insertions(+), 95 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 3815c7b..8eb935e 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -80,12 +80,7 @@ MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
+ static int amdgpu_dm_init(struct amdgpu_device *adev);
+ static void amdgpu_dm_fini(struct amdgpu_device *adev);
+
+-/* initializes drm_device display related structures, based on the information
+- * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
+- * drm_encoder, drm_mode_config
+- *
+- * Returns 0 on success
+- */
++
+ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
+ /* removes and deallocates the drm structures, created by the above function */
+ static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
+@@ -113,6 +108,12 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool nonblock);
+
++static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
++
++static int amdgpu_dm_atomic_commit(struct drm_device *dev,
++ struct drm_atomic_state *state,
++ bool nonblock);
++
+ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
+
+ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+@@ -243,10 +244,6 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev,
+ struct drm_crtc *crtc;
+ struct amdgpu_crtc *amdgpu_crtc;
+
+- /*
+- * following if is check inherited from both functions where this one is
+- * used now. Need to be checked why it could happen.
+- */
+ if (otg_inst == -1) {
+ WARN_ON(1);
+ return adev->mode_info.crtcs[0];
+@@ -272,7 +269,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
+ amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
+
+ /* IRQ could occur when in initial stage */
+- /*TODO work and BO cleanup */
++ /* TODO work and BO cleanup */
+ if (amdgpu_crtc == NULL) {
+ DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
+ return;
+@@ -290,9 +287,9 @@ static void dm_pflip_high_irq(void *interrupt_params)
+ return;
+ }
+
+- /* wakeup usersapce */
++ /* wake up usersapce */
+ if (amdgpu_crtc->event) {
+- /* Update to correct count/ts if racing with vblank irq */
++ /* Update to correct count(s) if racing with vblank irq */
+ drm_accurate_vblank_count(&amdgpu_crtc->base);
+
+ drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
+@@ -391,8 +388,8 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
+
+ }
+
+-
+-/* Init display KMS
++/*
++ * Init display KMS
+ *
+ * Returns 0 on success
+ */
+@@ -1003,24 +1000,27 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
+
+ sink = aconnector->dc_link->local_sink;
+
+- /* Edid mgmt connector gets first update only in mode_valid hook and then
++ /*
++ * Edid mgmt connector gets first update only in mode_valid hook and then
+ * the connector sink is set to either fake or physical sink depends on link status.
+- * don't do it here if u are during boot
++ * Skip if already done during boot.
+ */
+ if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
+ && aconnector->dc_em_sink) {
+
+- /* For S3 resume with headless use eml_sink to fake stream
+- * because on resume connecotr->sink is set ti NULL
++ /*
++ * For S3 resume with headless use eml_sink to fake stream
++ * because on resume connector->sink is set to NULL
+ */
+ mutex_lock(&dev->mode_config.mutex);
+
+ if (sink) {
+ if (aconnector->dc_sink) {
+ amdgpu_dm_update_freesync_caps(connector, NULL);
+- /* retain and release bellow are used for
+- * bump up refcount for sink because the link don't point
+- * to it anymore after disconnect so on next crtc to connector
++ /*
++ * retain and release below are used to
++ * bump up refcount for sink because the link doesn't point
++ * to it anymore after disconnect, so on next crtc to connector
+ * reshuffle by UMD we will get into unwanted dc_sink release
+ */
+ if (aconnector->dc_sink != aconnector->dc_em_sink)
+@@ -1049,8 +1049,10 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
+ return;
+
+ if (aconnector->dc_sink == sink) {
+- /* We got a DP short pulse (Link Loss, DP CTS, etc...).
+- * Do nothing!! */
++ /*
++ * We got a DP short pulse (Link Loss, DP CTS, etc...).
++ * Do nothing!!
++ */
+ DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
+ aconnector->connector_id);
+ return;
+@@ -1061,11 +1063,15 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
+
+ mutex_lock(&dev->mode_config.mutex);
+
+- /* 1. Update status of the drm connector
+- * 2. Send an event and let userspace tell us what to do */
++ /*
++ * 1. Update status of the drm connector
++ * 2. Send an event and let userspace tell us what to do
++ */
+ if (sink) {
+- /* TODO: check if we still need the S3 mode update workaround.
+- * If yes, put it here. */
++ /*
++ * TODO: check if we still need the S3 mode update workaround.
++ * If yes, put it here.
++ */
+ if (aconnector->dc_sink)
+ amdgpu_dm_update_freesync_caps(connector, NULL);
+
+@@ -1100,7 +1106,8 @@ static void handle_hpd_irq(void *param)
+ struct drm_device *dev = connector->dev;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+
+- /* In case of failure or MST no need to update connector status or notify the OS
++ /*
++ * In case of failure or MST no need to update connector status or notify the OS
+ * since (for MST case) MST does this in it's own context.
+ */
+ mutex_lock(&aconnector->hpd_lock);
+@@ -1198,7 +1205,7 @@ static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
+ break;
+ }
+
+- /* check if there is new irq to be handle */
++ /* check if there is new irq to be handled */
+ dret = drm_dp_dpcd_read(
+ &aconnector->dm_dp_aux.aux,
+ dpcd_addr,
+@@ -1224,7 +1231,8 @@ static void handle_hpd_rx_irq(void *param)
+ bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+
+- /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
++ /*
++ * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
+ * conflict, after implement i2c helper, this mutex should be
+ * retired.
+ */
+@@ -1332,7 +1340,8 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+- /* Actions of amdgpu_irq_add_id():
++ /*
++ * Actions of amdgpu_irq_add_id():
+ * 1. Register a set() function with base driver.
+ * Base driver will call set() function to enable/disable an
+ * interrupt in DC hardware.
+@@ -1412,7 +1421,8 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+- /* Actions of amdgpu_irq_add_id():
++ /*
++ * Actions of amdgpu_irq_add_id():
+ * 1. Register a set() function with base driver.
+ * Base driver will call set() function to enable/disable an
+ * interrupt in DC hardware.
+@@ -1421,7 +1431,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
+ * coming from DC hardware.
+ * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+ * for acknowledging and handling.
+- * */
++ */
+
+ /* Use VSTARTUP interrupt */
+ for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
+@@ -1499,7 +1509,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
+
+ adev->ddev->mode_config.preferred_depth = 24;
+ adev->ddev->mode_config.prefer_shadow = 1;
+- /* indicate support of immediate flip */
++ /* indicates support for immediate flip */
+ adev->ddev->mode_config.async_page_flip = true;
+
+ adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
+@@ -1585,7 +1595,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
+ plane->base.type = mode_info->plane_type[plane_id];
+
+ /*
+- * HACK: IGT tests expect that each plane can only have one
++ * HACK: IGT tests expect that each plane can only have
+ * one possible CRTC. For now, set one CRTC for each
+ * plane that is not an underlay, but still allow multiple
+ * CRTCs for underlay planes.
+@@ -1613,10 +1623,11 @@ static void register_backlight_device(struct amdgpu_display_manager *dm,
+
+ if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
+ link->type != dc_connection_none) {
+- /* Event if registration failed, we should continue with
+- * DM initialization because not having a backlight control
+- * is better then a black screen.
+- */
++ /*
++ * Event if registration failed, we should continue with
++ * DM initialization because not having a backlight control
++ * is better then a black screen.
++ */
+ amdgpu_dm_register_backlight_device(dm);
+
+ if (dm->backlight_dev)
+@@ -1626,7 +1637,8 @@ static void register_backlight_device(struct amdgpu_display_manager *dm,
+ }
+
+
+-/* In this architecture, the association
++/*
++ * In this architecture, the association
+ * connector -> encoder -> crtc
+ * id not really requried. The crtc and connector will hold the
+ * display_index as an abstraction to use with DAL component
+@@ -1782,7 +1794,7 @@ static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
+ * amdgpu_display_funcs functions
+ *****************************************************************************/
+
+-/**
++/*
+ * dm_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+@@ -2013,9 +2025,11 @@ static int dm_early_init(void *handle)
+ if (adev->mode_info.funcs == NULL)
+ adev->mode_info.funcs = &dm_display_funcs;
+
+- /* Note: Do NOT change adev->audio_endpt_rreg and
++ /*
++ * Note: Do NOT change adev->audio_endpt_rreg and
+ * adev->audio_endpt_wreg because they are initialised in
+- * amdgpu_device_init() */
++ * amdgpu_device_init()
++ */
+ #if defined(CONFIG_DEBUG_KERNEL_DC)
+ device_create_file(
+ adev->ddev->dev,
+@@ -2061,7 +2075,7 @@ static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
+ {
+ plane_state->src_rect.x = state->src_x >> 16;
+ plane_state->src_rect.y = state->src_y >> 16;
+- /*we ignore for now mantissa and do not to deal with floating pixels :(*/
++ /* we ignore the mantissa for now and do not deal with floating pixels :( */
+ plane_state->src_rect.width = state->src_w >> 16;
+
+ if (plane_state->src_rect.width == 0)
+@@ -2113,7 +2127,7 @@ static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
+ int r = amdgpu_bo_reserve(rbo, false);
+
+ if (unlikely(r)) {
+- // Don't show error msg. when return -ERESTARTSYS
++ /* Don't show error message when returning -ERESTARTSYS */
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Unable to reserve buffer: %d\n", r);
+ return r;
+@@ -2311,8 +2325,6 @@ static int fill_plane_attributes(struct amdgpu_device *adev,
+ return ret;
+ }
+
+-/*****************************************************************************/
+-
+ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
+ const struct dm_connector_state *dm_state,
+ struct dc_stream_state *stream)
+@@ -2375,7 +2387,8 @@ convert_color_depth_from_display_info(const struct drm_connector *connector)
+
+ switch (bpc) {
+ case 0:
+- /* Temporary Work around, DRM don't parse color depth for
++ /*
++ * Temporary Work around, DRM doesn't parse color depth for
+ * EDID revision before 1.4
+ * TODO: Fix edid parsing
+ */
+@@ -2488,8 +2501,6 @@ static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_
+
+ }
+
+-/*****************************************************************************/
+-
+ static int
+ get_norm_pix_clk(const struct dc_crtc_timing *timing)
+ {
+@@ -2804,10 +2815,11 @@ static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
+ for (i = 0; i < context->stream_count ; i++) {
+ if (!context->streams[i])
+ continue;
+- /* TODO: add a function to read AMD VSDB bits and will set
+- * crtc_sync_master.multi_sync_enabled flag
+- * For now its set to false
+- */
++ /*
++ *TODO: add a function to read AMD VSDB bits and will set
++ * crtc_sync_master.multi_sync_enabled flag
++ * For now it's set to false
++ */
+ set_multisync_trigger_params(context->streams[i]);
+ }
+ set_master_stream(context, context->streams, context->stream_count);
+@@ -2869,7 +2881,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ head);
+
+ if (preferred_mode == NULL) {
+- /* This may not be an error, the use case is when we we have no
++ /*
++ * This may not be an error, the use case is when we have no
+ * usermode calls to reset and set mode upon hotplug. In this
+ * case, we call set mode ourselves to restore the previous mode
+ * and the modelist may not be filled in in time.
+@@ -3011,10 +3024,12 @@ amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
+ bool connected;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+- /* Notes:
++ /*
++ * Notes:
+ * 1. This interface is NOT called in context of HPD irq.
+ * 2. This interface *is called* in context of user-mode ioctl. Which
+- * makes it a bad place for *any* MST-related activit. */
++ * makes it a bad place for *any* MST-related activity.
++ */
+
+ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
+ !aconnector->fake_enable)
+@@ -3270,7 +3285,8 @@ static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
+ {
+ struct dc_link *link = (struct dc_link *)aconnector->dc_link;
+
+- /* In case of headless boot with force on for DP managed connector
++ /*
++ * In case of headless boot with force on for DP managed connector
+ * Those settings have to be != 0 to get initial modeset
+ */
+ if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+@@ -3298,7 +3314,8 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
+ (mode->flags & DRM_MODE_FLAG_DBLSCAN))
+ return result;
+
+- /* Only run this the first time mode_valid is called to initilialize
++ /*
++ * Only run this the first time mode_valid is called to initilialize
+ * EDID mgmt
+ */
+ if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
+@@ -3339,9 +3356,9 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
+ static const struct drm_connector_helper_funcs
+ amdgpu_dm_connector_helper_funcs = {
+ /*
+- * If hotplug a second bigger display in FB Con mode, bigger resolution
++ * If hotplugging a second bigger display in FB Con mode, bigger resolution
+ * modes will be filtered by drm_mode_validate_size(), and those modes
+- * is missing after user start lightdm. So we need to renew modes list.
++ * are missing after user start lightdm. So we need to renew modes list.
+ * in get_modes call back, not just return the modes count
+ */
+ .get_modes = get_modes,
+@@ -3367,7 +3384,7 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ return ret;
+ }
+
+- /* In some use cases, like reset, no stream is attached */
++ /* In some use cases, like reset, no stream is attached */
+ if (!dm_crtc_state->stream)
+ return 0;
+
+@@ -3588,7 +3605,7 @@ static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
+ * TODO: these are currently initialized to rgb formats only.
+ * For future use cases we should either initialize them dynamically based on
+ * plane capabilities, or initialize this array to all formats, so internal drm
+- * check will succeed, and let DC to implement proper check
++ * check will succeed, and let DC implement proper check
+ */
+ static const uint32_t rgb_formats[] = {
+ DRM_FORMAT_RGB888,
+@@ -3916,7 +3933,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
+ aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
+ mutex_init(&aconnector->hpd_lock);
+
+- /* configure support HPD hot plug connector_>polled default value is 0
++ /*
++ * configure support HPD hot plug connector_>polled default value is 0
+ * which means HPD hot plug not supported
+ */
+ switch (connector_type) {
+@@ -4029,7 +4047,8 @@ create_i2c(struct ddc_service *ddc_service,
+ }
+
+
+-/* Note: this function assumes that dc_link_detect() was called for the
++/*
++ * Note: this function assumes that dc_link_detect() was called for the
+ * dc_link which will be represented by this aconnector.
+ */
+ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+@@ -4360,7 +4379,8 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
+ target_vblank = target - drm_crtc_vblank_count(crtc) +
+ amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
+
+- /* TODO This might fail and hence better not used, wait
++ /*
++ * TODO This might fail and hence better not used, wait
+ * explicitly on fences instead
+ * and in general should be called for
+ * blocking commit to as per framework helpers
+@@ -4377,7 +4397,8 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
+
+ amdgpu_bo_unreserve(abo);
+
+- /* Wait until we're out of the vertical blank period before the one
++ /*
++ * Wait until we're out of the vertical blank period before the one
+ * targeted by the flip
+ */
+ while ((acrtc->enabled &&
+@@ -4631,7 +4652,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ }
+ }
+
+-/**
++/*
+ * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
+ * @crtc_state: the DRM CRTC state
+ * @stream_state: the DC stream state.
+@@ -4668,8 +4689,10 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev,
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream)
+ manage_dm_interrupts(adev, acrtc, false);
+ }
+- /* Add check here for SoC's that support hardware cursor plane, to
+- * unset legacy_cursor_update */
++ /*
++ * Add check here for SoC's that support hardware cursor plane, to
++ * unset legacy_cursor_update
++ */
+
+ return drm_atomic_helper_commit(dev, state, nonblock);
+
+@@ -4736,8 +4759,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+ * this could happen because of issues with
+ * userspace notifications delivery.
+ * In this case userspace tries to set mode on
+- * display which is disconnect in fact.
+- * dc_sink in NULL in this case on aconnector.
++ * display which is disconnected in fact.
++ * dc_sink is NULL in this case on aconnector.
+ * We expect reset mode will come soon.
+ *
+ * This can also happen when unplug is done
+@@ -4806,7 +4829,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+ if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+- /* Skip any thing not scale or underscan changes */
++ /* Skip anything that is not scaling or underscan changes */
+ if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
+ continue;
+
+@@ -4890,10 +4913,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+
+ drm_atomic_helper_cleanup_planes(dev, state);
+
+- /* Finally, drop a runtime PM reference for each newly disabled CRTC,
+- * so we can put the GPU into runtime suspend if we're not driving any
+- * displays anymore
+- */
++ /*
++ * Finally, drop a runtime PM reference for each newly disabled CRTC,
++ * so we can put the GPU into runtime suspend if we're not driving any
++ * displays anymore
++ */
+ for (i = 0; i < crtc_disable_count; i++)
+ pm_runtime_put_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->dev);
+@@ -4958,9 +4982,9 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
+ }
+
+ /*
+- * This functions handle all cases when set mode does not come upon hotplug.
+- * This include when the same display is unplugged then plugged back into the
+- * same port and when we are running without usermode desktop manager supprot
++ * This function handles all cases when set mode does not come upon hotplug.
++ * This includes when a display is unplugged then plugged back into the
++ * same port and when running without usermode desktop manager supprot
+ */
+ void dm_restore_drm_connector_state(struct drm_device *dev,
+ struct drm_connector *connector)
+@@ -4989,7 +5013,7 @@ void dm_restore_drm_connector_state(struct drm_device *dev,
+ dm_force_atomic_commit(&aconnector->base);
+ }
+
+-/*`
++/*
+ * Grabs all modesetting locks to serialize against any blocking commits,
+ * Waits for completion of all non blocking commits.
+ */
+@@ -5000,7 +5024,8 @@ static int do_aquire_global_lock(struct drm_device *dev,
+ struct drm_crtc_commit *commit;
+ long ret;
+
+- /* Adding all modeset locks to aquire_ctx will
++ /*
++ * Adding all modeset locks to aquire_ctx will
+ * ensure that when the framework release it the
+ * extra locks we are locking here will get released to
+ */
+@@ -5019,7 +5044,8 @@ static int do_aquire_global_lock(struct drm_device *dev,
+ if (!commit)
+ continue;
+
+- /* Make sure all pending HW programming completed and
++ /*
++ * Make sure all pending HW programming completed and
+ * page flips done
+ */
+ ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
+@@ -5113,7 +5139,7 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
+
+ /* TODO This hack should go away */
+ if (aconnector && enable) {
+- // Make sure fake sink is created in plug-in scenario
++ /* Make sure fake sink is created in plug-in scenario */
+ drm_new_conn_state = drm_atomic_get_new_connector_state(state,
+ &aconnector->base);
+
+@@ -5134,9 +5160,9 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
+
+ /*
+ * we can have no stream on ACTION_SET if a display
+- * was disconnected during S3, in this case it not and
++ * was disconnected during S3, in this case it is not an
+ * error, the OS will be updated after detection, and
+- * do the right thing on next atomic commit
++ * will do the right thing on next atomic commit
+ */
+
+ if (!new_stream) {
+@@ -5493,7 +5519,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ goto fail;
+
+ /* Check scaling and underscan changes*/
+- /*TODO Removed scaling changes validation due to inability to commit
++ /* TODO Removed scaling changes validation due to inability to commit
+ * new stream into context w\o causing full reset. Need to
+ * decide how to handle.
+ */
+@@ -5516,7 +5542,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+
+ /*
+ * For full updates case when
+- * removing/adding/updating streams on once CRTC while flipping
++ * removing/adding/updating streams on once CRTC while flipping
+ * on another CRTC,
+ * acquiring global lock will guarantee that any such full
+ * update commit
+@@ -5626,12 +5652,12 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
+ (edid->version == 1 && edid->revision > 1))) {
+ for (i = 0; i < 4; i++) {
+
+- timing = &edid->detailed_timings[i];
+- data = &timing->data.other_data;
+- range = &data->data.range;
+- /*
+- * Check if monitor has continuous frequency mode
+- */
++ timing = &edid->detailed_timings[i];
++ data = &timing->data.other_data;
++ range = &data->data.range;
++ /*
++ * Check if monitor has continuous frequency mode
++ */
+ if (data->type != EDID_DETAIL_MONITOR_RANGE)
+ continue;
+ /*
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5343-drm-amd-display-Support-reading-hw-state-from-debugf.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5343-drm-amd-display-Support-reading-hw-state-from-debugf.patch
new file mode 100644
index 00000000..96d8e9ee
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5343-drm-amd-display-Support-reading-hw-state-from-debugf.patch
@@ -0,0 +1,386 @@
+From 6bd918c6223b680eb46563dd586132ab96fa20f9 Mon Sep 17 00:00:00 2001
+From: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Date: Wed, 15 Aug 2018 12:00:23 -0400
+Subject: [PATCH 5343/5725] drm/amd/display: Support reading hw state from
+ debugfs file
+
+[Why]
+
+Logging hardware state can be done by triggering a write to the
+debugfs file. It would also be useful to be able to read the hardware
+state from the debugfs file to be able to generate a clean log without
+timestamps.
+
+[How]
+
+Usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dtn_log
+
+Threading is an obvious concern when dealing with multiple debugfs
+operations and blocking on global state in dm or dc seems unfavorable.
+
+Adding an extra parameter for the debugfs log context state is the
+implementation done here. Existing code that made use of DTN_INFO
+and its associated macros needed to be refactored to support this.
+
+We don't know the size of the log in advance so it reallocates the
+log string dynamically. Once the log has been generated it's copied
+into the user supplied buffer for the debugfs. This allows for seeking
+support but it's worth nothing that unlike triggering output via
+dmesg the hardware state might change in-between reads if your buffer
+size is too small.
+
+Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Reviewed-by: Jordan Lazare <Jordan.Lazare@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 39 +++++++++-
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 83 +++++++++++++++++++---
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 24 ++++---
+ drivers/gpu/drm/amd/display/dc/dm_services.h | 10 ++-
+ drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 3 +-
+ .../gpu/drm/amd/display/include/logger_interface.h | 6 +-
+ drivers/gpu/drm/amd/display/include/logger_types.h | 6 ++
+ 7 files changed, 142 insertions(+), 29 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+index e79ac1e..35ca732 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+@@ -720,16 +720,49 @@ int connector_debugfs_init(struct amdgpu_dm_connector *connector)
+ return 0;
+ }
+
++/*
++ * Writes DTN log state to the user supplied buffer.
++ * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dtn_log
++ */
+ static ssize_t dtn_log_read(
+ struct file *f,
+ char __user *buf,
+ size_t size,
+ loff_t *pos)
+ {
+- /* TODO: Write log output to the user supplied buffer. */
+- return 0;
++ struct amdgpu_device *adev = file_inode(f)->i_private;
++ struct dc *dc = adev->dm.dc;
++ struct dc_log_buffer_ctx log_ctx = { 0 };
++ ssize_t result = 0;
++
++ if (!buf || !size)
++ return -EINVAL;
++
++ if (!dc->hwss.log_hw_state)
++ return 0;
++
++ dc->hwss.log_hw_state(dc, &log_ctx);
++
++ if (*pos < log_ctx.pos) {
++ size_t to_copy = log_ctx.pos - *pos;
++
++ to_copy = min(to_copy, size);
++
++ if (!copy_to_user(buf, log_ctx.buf + *pos, to_copy)) {
++ *pos += to_copy;
++ result = to_copy;
++ }
++ }
++
++ kfree(log_ctx.buf);
++
++ return result;
+ }
+
++/*
++ * Writes DTN log state to dmesg when triggered via a write.
++ * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dtn_log
++ */
+ static ssize_t dtn_log_write(
+ struct file *f,
+ const char __user *buf,
+@@ -744,7 +777,7 @@ static ssize_t dtn_log_write(
+ return 0;
+
+ if (dc->hwss.log_hw_state)
+- dc->hwss.log_hw_state(dc);
++ dc->hwss.log_hw_state(dc, NULL);
+
+ return size;
+ }
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+index cd5e991..37e1424 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -41,6 +41,8 @@
+
+ #include "dm_helpers.h"
+
++#define kvcalloc(n, size, gfp) kvzalloc(((n)*(size)), gfp)
++
+ /* dm_helpers_parse_edid_caps
+ *
+ * Parse edid caps
+@@ -335,28 +337,91 @@ bool dm_helpers_dp_mst_send_payload_allocation(
+ return true;
+ }
+
+-void dm_dtn_log_begin(struct dc_context *ctx)
++void dm_dtn_log_begin(struct dc_context *ctx,
++ struct dc_log_buffer_ctx *log_ctx)
+ {
+- pr_info("[dtn begin]\n");
++ static const char msg[] = "[dtn begin]\n";
++
++ if (!log_ctx) {
++ pr_info("%s", msg);
++ return;
++ }
++
++ dm_dtn_log_append_v(ctx, log_ctx, "%s", msg);
+ }
+
+ void dm_dtn_log_append_v(struct dc_context *ctx,
+- const char *msg, ...)
++ struct dc_log_buffer_ctx *log_ctx,
++ const char *msg, ...)
+ {
+- struct va_format vaf;
+ va_list args;
++ size_t total;
++ int n;
++
++ if (!log_ctx) {
++ /* No context, redirect to dmesg. */
++ struct va_format vaf;
++
++ vaf.fmt = msg;
++ vaf.va = &args;
+
++ va_start(args, msg);
++ pr_info("%pV", &vaf);
++ va_end(args);
++
++ return;
++ }
++
++ /* Measure the output. */
+ va_start(args, msg);
+- vaf.fmt = msg;
+- vaf.va = &args;
++ n = vsnprintf(NULL, 0, msg, args);
++ va_end(args);
++
++ if (n <= 0)
++ return;
++
++ /* Reallocate the string buffer as needed. */
++ total = log_ctx->pos + n + 1;
+
+- pr_info("%pV", &vaf);
++ if (total > log_ctx->size) {
++ char *buf = (char *)kvcalloc(total, sizeof(char), GFP_KERNEL);
++
++ if (buf) {
++ memcpy(buf, log_ctx->buf, log_ctx->pos);
++ kfree(log_ctx->buf);
++
++ log_ctx->buf = buf;
++ log_ctx->size = total;
++ }
++ }
++
++ if (!log_ctx->buf)
++ return;
++
++ /* Write the formatted string to the log buffer. */
++ va_start(args, msg);
++ n = vscnprintf(
++ log_ctx->buf + log_ctx->pos,
++ log_ctx->size - log_ctx->pos,
++ msg,
++ args);
+ va_end(args);
++
++ if (n > 0)
++ log_ctx->pos += n;
+ }
+
+-void dm_dtn_log_end(struct dc_context *ctx)
++void dm_dtn_log_end(struct dc_context *ctx,
++ struct dc_log_buffer_ctx *log_ctx)
+ {
+- pr_info("[dtn end]\n");
++ static const char msg[] = "[dtn end]\n";
++
++ if (!log_ctx) {
++ pr_info("%s", msg);
++ return;
++ }
++
++ dm_dtn_log_append_v(ctx, log_ctx, "%s", msg);
+ }
+
+ bool dm_helpers_dp_mst_start_top_mgr(
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 1c5bb14..6bd4ec3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -58,9 +58,11 @@
+
+ /*print is 17 wide, first two characters are spaces*/
+ #define DTN_INFO_MICRO_SEC(ref_cycle) \
+- print_microsec(dc_ctx, ref_cycle)
++ print_microsec(dc_ctx, log_ctx, ref_cycle)
+
+-void print_microsec(struct dc_context *dc_ctx, uint32_t ref_cycle)
++void print_microsec(struct dc_context *dc_ctx,
++ struct dc_log_buffer_ctx *log_ctx,
++ uint32_t ref_cycle)
+ {
+ const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000;
+ static const unsigned int frac = 1000;
+@@ -71,7 +73,8 @@ void print_microsec(struct dc_context *dc_ctx, uint32_t ref_cycle)
+ us_x10 % frac);
+ }
+
+-static void log_mpc_crc(struct dc *dc)
++static void log_mpc_crc(struct dc *dc,
++ struct dc_log_buffer_ctx *log_ctx)
+ {
+ struct dc_context *dc_ctx = dc->ctx;
+ struct dce_hwseq *hws = dc->hwseq;
+@@ -84,7 +87,7 @@ static void log_mpc_crc(struct dc *dc)
+ REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
+ }
+
+-void dcn10_log_hubbub_state(struct dc *dc)
++void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx)
+ {
+ struct dc_context *dc_ctx = dc->ctx;
+ struct dcn_hubbub_wm wm = {0};
+@@ -111,7 +114,7 @@ void dcn10_log_hubbub_state(struct dc *dc)
+ DTN_INFO("\n");
+ }
+
+-static void dcn10_log_hubp_states(struct dc *dc)
++static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
+ {
+ struct dc_context *dc_ctx = dc->ctx;
+ struct resource_pool *pool = dc->res_pool;
+@@ -226,7 +229,8 @@ static void dcn10_log_hubp_states(struct dc *dc)
+ DTN_INFO("\n");
+ }
+
+-void dcn10_log_hw_state(struct dc *dc)
++void dcn10_log_hw_state(struct dc *dc,
++ struct dc_log_buffer_ctx *log_ctx)
+ {
+ struct dc_context *dc_ctx = dc->ctx;
+ struct resource_pool *pool = dc->res_pool;
+@@ -234,9 +238,9 @@ void dcn10_log_hw_state(struct dc *dc)
+
+ DTN_INFO_BEGIN();
+
+- dcn10_log_hubbub_state(dc);
++ dcn10_log_hubbub_state(dc, log_ctx);
+
+- dcn10_log_hubp_states(dc);
++ dcn10_log_hubp_states(dc, log_ctx);
+
+ DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
+ " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
+@@ -347,7 +351,7 @@ void dcn10_log_hw_state(struct dc *dc)
+ dc->current_state->bw.dcn.clk.fclk_khz,
+ dc->current_state->bw.dcn.clk.socclk_khz);
+
+- log_mpc_crc(dc);
++ log_mpc_crc(dc, log_ctx);
+
+ DTN_INFO_END();
+ }
+@@ -857,7 +861,7 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc)
+
+ if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) {
+ if (should_log_hw_state) {
+- dcn10_log_hw_state(dc);
++ dcn10_log_hw_state(dc, NULL);
+ }
+ BREAK_TO_DEBUGGER();
+ if (dcn10_hw_wa_force_recovery(dc)) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
+index eb5ab39..28128c0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
++++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
+@@ -359,8 +359,12 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line);
+ * Debug and verification hooks
+ */
+
+-void dm_dtn_log_begin(struct dc_context *ctx);
+-void dm_dtn_log_append_v(struct dc_context *ctx, const char *msg, ...);
+-void dm_dtn_log_end(struct dc_context *ctx);
++void dm_dtn_log_begin(struct dc_context *ctx,
++ struct dc_log_buffer_ctx *log_ctx);
++void dm_dtn_log_append_v(struct dc_context *ctx,
++ struct dc_log_buffer_ctx *log_ctx,
++ const char *msg, ...);
++void dm_dtn_log_end(struct dc_context *ctx,
++ struct dc_log_buffer_ctx *log_ctx);
+
+ #endif /* __DM_SERVICES_H__ */
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+index 9a97356..26f29d5 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+@@ -202,7 +202,8 @@ struct hw_sequencer_funcs {
+
+ void (*set_avmute)(struct pipe_ctx *pipe_ctx, bool enable);
+
+- void (*log_hw_state)(struct dc *dc);
++ void (*log_hw_state)(struct dc *dc,
++ struct dc_log_buffer_ctx *log_ctx);
+ void (*get_hw_state)(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask);
+
+ void (*wait_for_mpcc_disconnect)(struct dc *dc,
+diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h
+index e3c7961..a0b68c2 100644
+--- a/drivers/gpu/drm/amd/display/include/logger_interface.h
++++ b/drivers/gpu/drm/amd/display/include/logger_interface.h
+@@ -129,13 +129,13 @@ void context_clock_trace(
+ * Display Test Next logging
+ */
+ #define DTN_INFO_BEGIN() \
+- dm_dtn_log_begin(dc_ctx)
++ dm_dtn_log_begin(dc_ctx, log_ctx)
+
+ #define DTN_INFO(msg, ...) \
+- dm_dtn_log_append_v(dc_ctx, msg, ##__VA_ARGS__)
++ dm_dtn_log_append_v(dc_ctx, log_ctx, msg, ##__VA_ARGS__)
+
+ #define DTN_INFO_END() \
+- dm_dtn_log_end(dc_ctx)
++ dm_dtn_log_end(dc_ctx, log_ctx)
+
+ #define PERFORMANCE_TRACE_START() \
+ unsigned long long perf_trc_start_stmp = dm_get_timestamp(dc->ctx)
+diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
+index bc57326..d96550d 100644
+--- a/drivers/gpu/drm/amd/display/include/logger_types.h
++++ b/drivers/gpu/drm/amd/display/include/logger_types.h
+@@ -66,6 +66,12 @@
+
+ struct dal_logger;
+
++struct dc_log_buffer_ctx {
++ char *buf;
++ size_t pos;
++ size_t size;
++};
++
+ enum dc_log_type {
+ LOG_ERROR = 0,
+ LOG_WARNING,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5344-Revert-drm-amdgpu-Temporary-fix-amdgpu_vm_release_co.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5344-Revert-drm-amdgpu-Temporary-fix-amdgpu_vm_release_co.patch
new file mode 100644
index 00000000..84491f3a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5344-Revert-drm-amdgpu-Temporary-fix-amdgpu_vm_release_co.patch
@@ -0,0 +1,118 @@
+From 298b6d43537dbb479efdd0980db75572063ee25c Mon Sep 17 00:00:00 2001
+From: Prike Liang <Prike.Liang@amd.com>
+Date: Mon, 8 Oct 2018 11:35:43 +0800
+Subject: [PATCH 5344/5725] Revert "drm/amdgpu: Temporary fix
+ amdgpu_vm_release_compute build error"
+
+This reverts commit afc72192bd7ce470c78f2a35f69be7524e0a7532.
+
+Change-Id: Ie20a54abcc3409786e1c05082d76c11221a5a6a3
+Signed-off-by: Prike Liang <Prike.Liang@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 1 -
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 1 -
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 1 -
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 20 --------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 1 -
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 4 +---
+ 6 files changed, 1 insertion(+), 27 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+index 01b9a9d..d2702b0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+@@ -182,7 +182,6 @@ static const struct kfd2kgd_calls kfd2kgd = {
+ .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
+ .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
+ .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
+- .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
+ .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
+ .alloc_pasid = amdgpu_pasid_alloc,
+ .free_pasid = amdgpu_pasid_free,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+index 7e7fe0c..69ac7be 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+@@ -154,7 +154,6 @@ static const struct kfd2kgd_calls kfd2kgd = {
+ .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
+ .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
+ .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
+- .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
+ .create_process_gpumem = create_process_gpumem,
+ .destroy_process_gpumem = destroy_process_gpumem,
+ .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+index c501ead..b6852a1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+@@ -204,7 +204,6 @@ static const struct kfd2kgd_calls kfd2kgd = {
+ .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
+ .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
+ .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
+- .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
+ .create_process_gpumem = create_process_gpumem,
+ .destroy_process_gpumem = destroy_process_gpumem,
+ .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 87c1b55..feef79c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2816,26 +2816,6 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
+ }
+
+ /**
+- * amdgpu_vm_release_compute - release a compute vm
+- * @adev: amdgpu_device pointer
+- * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
+- *
+- * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
+- * pasid from vm. Compute should stop use of vm after this call.
+- */
+-void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+-{
+- if (vm->pasid) {
+- unsigned long flags;
+-
+- spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
+- idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
+- spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+- }
+- vm->pasid = 0;
+-}
+-
+-/**
+ * amdgpu_vm_free_levels - free PD/PT levels
+ *
+ * @adev: amdgpu device structure
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+index e6ca941..74c3b59 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+@@ -293,7 +293,6 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
+ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int vm_context, unsigned int pasid);
+ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid);
+-void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+ bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
+ unsigned int pasid);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index ba4e35c..da67302 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -355,10 +355,8 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
+ pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n",
+ pdd->dev->id, p->pasid);
+
+- if (pdd->drm_file) {
+- pdd->dev->kfd2kgd->release_process_vm(pdd->dev->kgd, pdd->vm);
++ if (pdd->drm_file)
+ fput(pdd->drm_file);
+- }
+ else if (pdd->vm)
+ pdd->dev->kfd2kgd->destroy_process_vm(
+ pdd->dev->kgd, pdd->vm);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5345-drm-amdgpu-Use-drm_dev_unplug-in-PCI-.remove.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5345-drm-amdgpu-Use-drm_dev_unplug-in-PCI-.remove.patch
new file mode 100644
index 00000000..11112c1f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5345-drm-amdgpu-Use-drm_dev_unplug-in-PCI-.remove.patch
@@ -0,0 +1,39 @@
+From 7128b6f79f79f0c075c91c53ba6dd1d1a3b05c8e Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Thu, 30 Aug 2018 11:24:17 -0400
+Subject: [PATCH 5345/5725] drm/amdgpu: Use drm_dev_unplug in PCI .remove
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This at least allows to fail any subsequent IOCTLs with -ENODEV
+after the device is gone.
+Still this operation is not supported yet in graphic mode
+and will lead at least to page faults and other issues.
+
+Change-Id: I4afbcec9bb2f2d7bbd3d512b4a0193912c635987
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 8556192..1cb9a93 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -900,8 +900,8 @@ amdgpu_pci_remove(struct pci_dev *pdev)
+ {
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+- drm_dev_unregister(dev);
+- drm_dev_put(dev);
++ DRM_ERROR("Device removal is currently not supported outside of fbcon\n");
++ drm_dev_unplug(dev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5346-drm-amdgpu-move-size-calculations-to-the-front-of-th.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5346-drm-amdgpu-move-size-calculations-to-the-front-of-th.patch
new file mode 100644
index 00000000..ee35e254
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5346-drm-amdgpu-move-size-calculations-to-the-front-of-th.patch
@@ -0,0 +1,123 @@
+From 48e0e1bcb40cd4a729cc6f899e9e9057ba4e70c4 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Thu, 30 Aug 2018 10:31:52 +0200
+Subject: [PATCH 5346/5725] drm/amdgpu: move size calculations to the front of
+ the file again
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+amdgpu_vm_bo_* functions should come much later.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 84 +++++++++++++++++-----------------
+ 1 file changed, 42 insertions(+), 42 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index feef79c..8b83cc4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -134,48 +134,6 @@ struct amdgpu_prt_cb {
+ };
+
+ /**
+- * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
+- *
+- * @base: base structure for tracking BO usage in a VM
+- * @vm: vm to which bo is to be added
+- * @bo: amdgpu buffer object
+- *
+- * Initialize a bo_va_base structure and add it to the appropriate lists
+- *
+- */
+-static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
+- struct amdgpu_vm *vm,
+- struct amdgpu_bo *bo)
+-{
+- base->vm = vm;
+- base->bo = bo;
+- INIT_LIST_HEAD(&base->bo_list);
+- INIT_LIST_HEAD(&base->vm_status);
+-
+- if (!bo)
+- return;
+- list_add_tail(&base->bo_list, &bo->va);
+-
+- if (bo->tbo.type == ttm_bo_type_kernel)
+- list_move(&base->vm_status, &vm->relocated);
+-
+- if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
+- return;
+-
+- if (bo->preferred_domains &
+- amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
+- return;
+-
+- /*
+- * we checked all the prerequisites, but it looks like this per vm bo
+- * is currently evicted. add the bo to the evicted list to make sure it
+- * is validated on next vm use to avoid fault.
+- * */
+- list_move_tail(&base->vm_status, &vm->evicted);
+- base->moved = true;
+-}
+-
+-/**
+ * amdgpu_vm_level_shift - return the addr shift for each level
+ *
+ * @adev: amdgpu_device pointer
+@@ -247,6 +205,48 @@ static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
+ }
+
+ /**
++ * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
++ *
++ * @base: base structure for tracking BO usage in a VM
++ * @vm: vm to which bo is to be added
++ * @bo: amdgpu buffer object
++ *
++ * Initialize a bo_va_base structure and add it to the appropriate lists
++ *
++ */
++static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
++ struct amdgpu_vm *vm,
++ struct amdgpu_bo *bo)
++{
++ base->vm = vm;
++ base->bo = bo;
++ INIT_LIST_HEAD(&base->bo_list);
++ INIT_LIST_HEAD(&base->vm_status);
++
++ if (!bo)
++ return;
++ list_add_tail(&base->bo_list, &bo->va);
++
++ if (bo->tbo.type == ttm_bo_type_kernel)
++ list_move(&base->vm_status, &vm->relocated);
++
++ if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
++ return;
++
++ if (bo->preferred_domains &
++ amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
++ return;
++
++ /*
++ * we checked all the prerequisites, but it looks like this per vm bo
++ * is currently evicted. add the bo to the evicted list to make sure it
++ * is validated on next vm use to avoid fault.
++ * */
++ list_move_tail(&base->vm_status, &vm->evicted);
++ base->moved = true;
++}
++
++/**
+ * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
+ *
+ * @vm: vm providing the BOs
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5347-drm-amdgpu-fix-amdgpu_mn_unlock-in-the-CS-error-path.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5347-drm-amdgpu-fix-amdgpu_mn_unlock-in-the-CS-error-path.patch
new file mode 100644
index 00000000..2484df09
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5347-drm-amdgpu-fix-amdgpu_mn_unlock-in-the-CS-error-path.patch
@@ -0,0 +1,36 @@
+From f654fe26508465c6d2ac120b627ab3efe462724a Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 3 Sep 2018 10:51:51 +0200
+Subject: [PATCH 5347/5725] drm/amdgpu: fix amdgpu_mn_unlock() in the CS error
+ path
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Avoid unlocking a lock we never locked.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 3e41c03..67551d4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -1258,10 +1258,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
+ error_abort:
+ dma_fence_put(&job->base.s_fence->finished);
+ job->base.s_fence = NULL;
++ amdgpu_mn_unlock(p->mn);
+
+ error_unlock:
+ amdgpu_job_free(job);
+- amdgpu_mn_unlock(p->mn);
+ return r;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5348-drm-amdgpu-correctly-sign-extend-48bit-addresses-v3.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5348-drm-amdgpu-correctly-sign-extend-48bit-addresses-v3.patch
new file mode 100644
index 00000000..854db5ac
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5348-drm-amdgpu-correctly-sign-extend-48bit-addresses-v3.patch
@@ -0,0 +1,240 @@
+From 2faf9a526fde349ce29fbd76bd448525dd4a3dd0 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 27 Aug 2018 18:22:31 +0200
+Subject: [PATCH 5348/5725] drm/amdgpu: correctly sign extend 48bit addresses
+ v3
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Correct sign extend the GMC addresses to 48bit.
+
+v2: sign extending turned out easier than thought.
+v3: clean up the defines and move them into amdgpu_gmc.h as well
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 10 +++++-----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 26 ++++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 8 ++++----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 6 ++----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 7 ++++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 13 -------------
+ 9 files changed, 44 insertions(+), 32 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index 589eca8..9ff80a5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -158,7 +158,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
+ .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
+ .gpuvm_size = min(adev->vm_manager.max_pfn
+ << AMDGPU_GPU_PAGE_SHIFT,
+- AMDGPU_VA_HOLE_START),
++ AMDGPU_GMC_HOLE_START),
+ .drm_render_minor = adev->ddev->render->index
+ };
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 67551d4..a22d8ce 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -920,7 +920,7 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
+ if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
+ continue;
+
+- va_start = chunk_ib->va_start & AMDGPU_VA_HOLE_MASK;
++ va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK;
+ r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
+ if (r) {
+ DRM_ERROR("IB va_start is invalid\n");
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+index 3cd2b29..fc6da55 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+@@ -716,16 +716,16 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
+ return -EINVAL;
+ }
+
+- if (args->va_address >= AMDGPU_VA_HOLE_START &&
+- args->va_address < AMDGPU_VA_HOLE_END) {
++ if (args->va_address >= AMDGPU_GMC_HOLE_START &&
++ args->va_address < AMDGPU_GMC_HOLE_END) {
+ dev_dbg(&dev->pdev->dev,
+ "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
+- args->va_address, AMDGPU_VA_HOLE_START,
+- AMDGPU_VA_HOLE_END);
++ args->va_address, AMDGPU_GMC_HOLE_START,
++ AMDGPU_GMC_HOLE_END);
+ return -EINVAL;
+ }
+
+- args->va_address &= AMDGPU_VA_HOLE_MASK;
++ args->va_address &= AMDGPU_GMC_HOLE_MASK;
+
+ if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
+ dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+index 75447d9..167aaf9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+@@ -30,6 +30,19 @@
+
+ #include "amdgpu_irq.h"
+
++/* VA hole for 48bit addresses on Vega10 */
++#define AMDGPU_GMC_HOLE_START 0x0000800000000000ULL
++#define AMDGPU_GMC_HOLE_END 0xffff800000000000ULL
++
++/*
++ * Hardware is programmed as if the hole doesn't exists with start and end
++ * address values.
++ *
++ * This mask is used to remove the upper 16bits of the VA and so come up with
++ * the linear addr value.
++ */
++#define AMDGPU_GMC_HOLE_MASK 0x0000ffffffffffffULL
++
+ struct firmware;
+
+ /*
+@@ -137,6 +150,19 @@ static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc)
+ return (gmc->real_vram_size == gmc->visible_vram_size);
+ }
+
++/**
++ * amdgpu_gmc_sign_extend - sign extend the given gmc address
++ *
++ * @addr: address to extend
++ */
++static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr)
++{
++ if (addr >= AMDGPU_GMC_HOLE_START)
++ addr |= AMDGPU_GMC_HOLE_END;
++
++ return addr;
++}
++
+ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
+ uint64_t *addr, uint64_t *flags);
+ uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index bca3e2c..42b9d30 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -659,11 +659,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+
+ dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
+ dev_info.virtual_address_max =
+- min(vm_size, AMDGPU_VA_HOLE_START);
++ min(vm_size, AMDGPU_GMC_HOLE_START);
+
+- if (vm_size > AMDGPU_VA_HOLE_START) {
+- dev_info.high_va_offset = AMDGPU_VA_HOLE_END;
+- dev_info.high_va_max = AMDGPU_VA_HOLE_END | vm_size;
++ if (vm_size > AMDGPU_GMC_HOLE_START) {
++ dev_info.high_va_offset = AMDGPU_GMC_HOLE_END;
++ dev_info.high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
+ }
+ dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
+ dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index a43ec44..b2947f4 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -1387,7 +1387,7 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
+ !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
+ WARN_ON_ONCE(bo->tbo.mem.mem_type == AMDGPU_PL_DGMA_IMPORT);
+
+- return bo->tbo.offset;
++ return amdgpu_gmc_sign_extend(bo->tbo.offset);
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+index 3885636..f2f358a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+@@ -28,9 +28,7 @@ uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
+ uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
+
+ addr -= AMDGPU_VA_RESERVED_SIZE;
+-
+- if (addr >= AMDGPU_VA_HOLE_START)
+- addr |= AMDGPU_VA_HOLE_END;
++ addr = amdgpu_gmc_sign_extend(addr);
+
+ return addr;
+ }
+@@ -73,7 +71,7 @@ void amdgpu_free_static_csa(struct amdgpu_device *adev) {
+ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo_va **bo_va)
+ {
+- uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_VA_HOLE_MASK;
++ uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
+ struct ww_acquire_ctx ticket;
+ struct list_head list;
+ struct amdgpu_bo_list_entry pd;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 8b83cc4..063f0d0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -369,7 +369,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
+ if (level == adev->vm_manager.root_level) {
+ ats_entries = amdgpu_vm_level_shift(adev, level);
+ ats_entries += AMDGPU_GPU_PAGE_SHIFT;
+- ats_entries = AMDGPU_VA_HOLE_START >> ats_entries;
++ ats_entries = AMDGPU_GMC_HOLE_START >> ats_entries;
+ ats_entries = min(ats_entries, entries);
+ entries -= ats_entries;
+ } else {
+@@ -599,7 +599,7 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
+ eaddr = saddr + size - 1;
+
+ if (vm->pte_support_ats)
+- ats = saddr < AMDGPU_VA_HOLE_START;
++ ats = saddr < AMDGPU_GMC_HOLE_START;
+
+ saddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr /= AMDGPU_GPU_PAGE_SIZE;
+@@ -1921,7 +1921,8 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping, list);
+ list_del(&mapping->list);
+
+- if (vm->pte_support_ats && mapping->start < AMDGPU_VA_HOLE_START)
++ if (vm->pte_support_ats &&
++ mapping->start < AMDGPU_GMC_HOLE_START)
+ init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
+
+ r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+index 74c3b59..2cc41ef 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+@@ -103,19 +103,6 @@ struct amdgpu_bo_list_entry;
+ /* hardcode that limit for now */
+ #define AMDGPU_VA_RESERVED_SIZE (1ULL << 20)
+
+-/* VA hole for 48bit addresses on Vega10 */
+-#define AMDGPU_VA_HOLE_START 0x0000800000000000ULL
+-#define AMDGPU_VA_HOLE_END 0xffff800000000000ULL
+-
+-/*
+- * Hardware is programmed as if the hole doesn't exists with start and end
+- * address values.
+- *
+- * This mask is used to remove the upper 16bits of the VA and so come up with
+- * the linear addr value.
+- */
+-#define AMDGPU_VA_HOLE_MASK 0x0000ffffffffffffULL
+-
+ /* max vmids dedicated for process */
+ #define AMDGPU_VM_MAX_RESERVED_VMID 1
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5349-drm-amdgpu-use-the-AGP-aperture-for-system-memory-ac.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5349-drm-amdgpu-use-the-AGP-aperture-for-system-memory-ac.patch
new file mode 100644
index 00000000..9e23dfb6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5349-drm-amdgpu-use-the-AGP-aperture-for-system-memory-ac.patch
@@ -0,0 +1,143 @@
+From 68951012a61fec3c237bf7b96be065a358105ce8 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 27 Aug 2018 18:19:48 +0200
+Subject: [PATCH 5349/5725] drm/amdgpu: use the AGP aperture for system memory
+ access v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Start to use the old AGP aperture for system memory access.
+
+v2: Move that to amdgpu_ttm_alloc_gart
+
+Change-Id: I136cced8a259d40fa984f416b5d70b6c4ebb9b83
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 23 +++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 44 +++++++++++++++++++--------------
+ 3 files changed, 49 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+index 0effe84..fec88f6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+@@ -80,6 +80,29 @@ uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo)
+ }
+
+ /**
++ * amdgpu_gmc_agp_addr - return the address in the AGP address space
++ *
++ * @tbo: TTM BO which needs the address, must be in GTT domain
++ *
++ * Tries to figure out how to access the BO through the AGP aperture. Returns
++ * AMDGPU_BO_INVALID_OFFSET if that is not possible.
++ */
++uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
++{
++ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
++ struct ttm_dma_tt *ttm;
++
++ if (bo->num_pages != 1 || bo->ttm->caching_state == tt_cached)
++ return AMDGPU_BO_INVALID_OFFSET;
++
++ ttm = container_of(bo->ttm, struct ttm_dma_tt, ttm);
++ if (ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
++ return AMDGPU_BO_INVALID_OFFSET;
++
++ return adev->gmc.agp_start + ttm->dma_address[0];
++}
++
++/**
+ * amdgpu_gmc_vram_location - try to find VRAM location
+ *
+ * @adev: amdgpu device structure holding all necessary informations
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+index 167aaf9..2189606 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+@@ -166,6 +166,7 @@ static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr)
+ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
+ uint64_t *addr, uint64_t *flags);
+ uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo);
++uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo);
+ void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
+ u64 base);
+ void amdgpu_gmc_gart_location(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 4c7e231..f70687d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -1139,30 +1139,34 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
+
+ struct ttm_placement placement;
+ struct ttm_place placements;
+- uint64_t flags;
++ uint64_t addr, flags;
+ int r;
+
+ if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
+ return 0;
+
+- /* allocate GART space */
+- tmp = bo->mem;
+- tmp.mm_node = NULL;
+- placement.num_placement = 1;
+- placement.placement = &placements;
+- placement.num_busy_placement = 1;
+- placement.busy_placement = &placements;
+- placements.fpfn = 0;
+- placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
+- placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
+- TTM_PL_FLAG_TT;
+-
+- r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
+- if (unlikely(r))
+- return r;
++ addr = amdgpu_gmc_agp_addr(bo);
++ if (addr != AMDGPU_BO_INVALID_OFFSET) {
++ bo->mem.start = addr >> PAGE_SHIFT;
++ } else {
++ /* allocate GART space */
++ tmp = bo->mem;
++ tmp.mm_node = NULL;
++ placement.num_placement = 1;
++ placement.placement = &placements;
++ placement.num_busy_placement = 1;
++ placement.busy_placement = &placements;
++ placements.fpfn = 0;
++ placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
++ placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
++ TTM_PL_FLAG_TT;
++
++ r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
++ if (unlikely(r))
++ return r;
+
+- /* compute PTE flags for this buffer object */
+- flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
++ /* compute PTE flags for this buffer object */
++ flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
+
+ /* Bind pages */
+ gtt->offset = (u64)tmp.start << PAGE_SHIFT;
+@@ -1170,10 +1174,12 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
+ if (unlikely(r)) {
+ ttm_bo_mem_put(bo, &tmp);
+ return r;
+- }
++ }
+
+ ttm_bo_mem_put(bo, &bo->mem);
+ bo->mem = tmp;
++ }
++
+ bo->offset = (bo->mem.start << PAGE_SHIFT) +
+ bo->bdev->man[bo->mem.mem_type].gpu_offset;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5350-drm-amd-display-Build-stream-update-and-plane-update.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5350-drm-amd-display-Build-stream-update-and-plane-update.patch
new file mode 100644
index 00000000..e87939fa
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5350-drm-amd-display-Build-stream-update-and-plane-update.patch
@@ -0,0 +1,176 @@
+From 01ee85a2b1c0c9a2caaeeac730662ddb9c2dfee8 Mon Sep 17 00:00:00 2001
+From: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Date: Mon, 20 Aug 2018 13:32:07 -0400
+Subject: [PATCH 5350/5725] drm/amd/display: Build stream update and plane
+ updates in dm
+
+[Why]
+We currently lock modeset by setting a boolean in dm. We want to lock
+Based on what DC tells us.
+
+[How]
+Build stream_updates and plane_update based on what changed. Then we
+call check_update_surfaces_for_stream() to get the update type
+We lock only if update_type is not fast
+
+Change-Id: I70dbee91970830632be8abb960ae47b3adf9ab8d
+Signed-off-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 116 +++++++++++++++++++++-
+ 1 file changed, 114 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 8eb935e..8132f40 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -5441,6 +5441,100 @@ static int dm_update_planes_state(struct dc *dc,
+
+ return ret;
+ }
++enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, struct drm_atomic_state *state)
++{
++
++
++ int i, j, num_plane;
++ struct drm_plane_state *old_plane_state, *new_plane_state;
++ struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
++ struct drm_crtc *new_plane_crtc, *old_plane_crtc;
++ struct drm_plane *plane;
++
++ struct drm_crtc *crtc;
++ struct drm_crtc_state *new_crtc_state, *old_crtc_state;
++ struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
++ struct dc_stream_status *status = NULL;
++
++ struct dc_surface_update *updates = kzalloc(MAX_SURFACES * sizeof(struct dc_surface_update), GFP_KERNEL);
++ struct dc_plane_state *surface = kzalloc(MAX_SURFACES * sizeof(struct dc_plane_state), GFP_KERNEL);
++ struct dc_stream_update stream_update;
++ enum surface_update_type update_type = UPDATE_TYPE_FAST;
++
++
++ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
++ new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
++ old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
++ num_plane = 0;
++
++ if (new_dm_crtc_state->stream) {
++
++ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
++ new_plane_crtc = new_plane_state->crtc;
++ old_plane_crtc = old_plane_state->crtc;
++ new_dm_plane_state = to_dm_plane_state(new_plane_state);
++ old_dm_plane_state = to_dm_plane_state(old_plane_state);
++
++ if (plane->type == DRM_PLANE_TYPE_CURSOR)
++ continue;
++
++ if (!state->allow_modeset)
++ continue;
++
++ if (crtc == new_plane_crtc) {
++ updates[num_plane].surface = &surface[num_plane];
++
++ if (new_crtc_state->mode_changed) {
++ updates[num_plane].surface->src_rect =
++ new_dm_plane_state->dc_state->src_rect;
++ updates[num_plane].surface->dst_rect =
++ new_dm_plane_state->dc_state->dst_rect;
++ updates[num_plane].surface->rotation =
++ new_dm_plane_state->dc_state->rotation;
++ updates[num_plane].surface->in_transfer_func =
++ new_dm_plane_state->dc_state->in_transfer_func;
++ stream_update.dst = new_dm_crtc_state->stream->dst;
++ stream_update.src = new_dm_crtc_state->stream->src;
++ }
++
++ if (new_crtc_state->color_mgmt_changed) {
++ updates[num_plane].gamma =
++ new_dm_plane_state->dc_state->gamma_correction;
++ updates[num_plane].in_transfer_func =
++ new_dm_plane_state->dc_state->in_transfer_func;
++ stream_update.gamut_remap =
++ &new_dm_crtc_state->stream->gamut_remap_matrix;
++ stream_update.out_transfer_func =
++ new_dm_crtc_state->stream->out_transfer_func;
++ }
++
++ num_plane++;
++ }
++ }
++
++ if (num_plane > 0) {
++ status = dc_stream_get_status(new_dm_crtc_state->stream);
++ update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
++ &stream_update, status);
++
++ if (update_type > UPDATE_TYPE_MED) {
++ update_type = UPDATE_TYPE_FULL;
++ goto ret;
++ }
++ }
++
++ } else if (!new_dm_crtc_state->stream && old_dm_crtc_state->stream) {
++ update_type = UPDATE_TYPE_FULL;
++ goto ret;
++ }
++ }
++
++ret:
++ kfree(updates);
++ kfree(surface);
++
++ return update_type;
++}
+
+ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+@@ -5452,6 +5546,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
++ enum surface_update_type update_type = UPDATE_TYPE_FAST;
++ enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
+ int ret, i;
+
+ /*
+@@ -5537,7 +5633,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
+ continue;
+
+- lock_and_validation_needed = true;
++ overall_update_type = UPDATE_TYPE_FULL;
++ lock_and_validation_needed = true;
+ }
+
+ /*
+@@ -5549,8 +5646,23 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ * will wait for completion of any outstanding flip using DRMs
+ * synchronization events.
+ */
++ update_type = dm_determine_update_type_for_commit(dc, state);
++
++ if (overall_update_type < update_type)
++ overall_update_type = update_type;
++
++ /*
++ * lock_and_validation_needed was an old way to determine if we need to set
++ * the global lock. Leaving it in to check if we broke any corner cases
++ * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
++ * lock_and_validation_needed false = UPDATE_TYPE_FAST
++ */
++ if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
++ WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
++ else if (!lock_and_validation_needed && overall_update_type > UPDATE_TYPE_FAST)
++ WARN(1, "Global lock should NOT be set, overall_update_type should be UPDATE_TYPE_FAST");
+
+- if (lock_and_validation_needed) {
++ if (overall_update_type > UPDATE_TYPE_FAST) {
+
+ ret = do_aquire_global_lock(dev, state);
+ if (ret)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5351-drm-amd-display-Add-DP-YCbCr-4-2-0-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5351-drm-amd-display-Add-DP-YCbCr-4-2-0-support.patch
new file mode 100644
index 00000000..6599f18b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5351-drm-amd-display-Add-DP-YCbCr-4-2-0-support.patch
@@ -0,0 +1,243 @@
+From 0c7156b879b15ee67233702196dd596efbc4d779 Mon Sep 17 00:00:00 2001
+From: Eric Bernstein <eric.bernstein@amd.com>
+Date: Thu, 24 May 2018 15:50:27 -0400
+Subject: [PATCH 5351/5725] drm/amd/display: Add DP YCbCr 4:2:0 support
+
+[Why]
+For supporting DP YCbCr 4:2:0 output.
+
+[How]
+Update mod_build_vsc_infopacket to support Pixel
+Encoding/Colorimetry Format indication for VSC SDP rev5.
+
+Change-Id: Id6035c6a954bc698e379fe43cc8079e29d7dd765
+Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
+Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ .../amd/display/modules/info_packet/info_packet.c | 189 ++++++++++++++++++++-
+ 1 file changed, 188 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+index 24b6cc1..52378fc 100644
+--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
++++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+@@ -26,20 +26,38 @@
+ #include "mod_info_packet.h"
+ #include "core_types.h"
+
++enum ColorimetryRGBDP {
++ ColorimetryRGB_DP_sRGB = 0,
++ ColorimetryRGB_DP_AdobeRGB = 3,
++ ColorimetryRGB_DP_P3 = 4,
++ ColorimetryRGB_DP_CustomColorProfile = 5,
++ ColorimetryRGB_DP_ITU_R_BT2020RGB = 6,
++};
++enum ColorimetryYCCDP {
++ ColorimetryYCC_DP_ITU601 = 0,
++ ColorimetryYCC_DP_ITU709 = 1,
++ ColorimetryYCC_DP_AdobeYCC = 5,
++ ColorimetryYCC_DP_ITU2020YCC = 6,
++ ColorimetryYCC_DP_ITU2020YCbCr = 7,
++};
++
+ static void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
+ struct dc_info_packet *info_packet)
+ {
+ unsigned int vscPacketRevision = 0;
+ unsigned int i;
++ unsigned int pixelEncoding = 0;
++ unsigned int colorimetryFormat = 0;
+
+ if (stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE && stream->view_format != VIEW_3D_FORMAT_NONE)
+ vscPacketRevision = 1;
+
+-
+ /*VSC packet set to 2 when DP revision >= 1.2*/
+ if (stream->psr_version != 0)
+ vscPacketRevision = 2;
+
++ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
++ vscPacketRevision = 5;
+
+ /* VSC packet not needed based on the features
+ * supported by this DP display
+@@ -81,6 +99,175 @@ static void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
+
+ info_packet->valid = true;
+ }
++
++ /* 05h = VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/Colorimetry Format indication.
++ * Added in DP1.3, a DP Source device is allowed to indicate the pixel encoding/colorimetry
++ * format to the DP Sink device with VSC SDP only when the DP Sink device supports it
++ * (i.e., VSC_SDP_EXTENSION_FOR_COLORIMETRY_SUPPORTED bit in the DPRX_FEATURE_ENUMERATION_LIST
++ * register (DPCD Address 02210h, bit 3) is set to 1).
++ * (Requires VSC_SDP_EXTENSION_FOR_COLORIMETRY_SUPPORTED bit set to 1 in DPCD 02210h. This
++ * DPCD register is exposed in the new Extended Receiver Capability field for DPCD Rev. 1.4
++ * (and higher). When MISC1. bit 6. is Set to 1, a Source device uses a VSC SDP to indicate
++ * the Pixel Encoding/Colorimetry Format and that a Sink device must ignore MISC1, bit 7, and
++ * MISC0, bits 7:1 (MISC1, bit 7. and MISC0, bits 7:1 become “don’t careâ€).)
++ */
++ if (vscPacketRevision == 0x5) {
++ /* Secondary-data Packet ID = 0 */
++ info_packet->hb0 = 0x00;
++ /* 07h - Packet Type Value indicating Video Stream Configuration packet */
++ info_packet->hb1 = 0x07;
++ /* 05h = VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/Colorimetry Format indication. */
++ info_packet->hb2 = 0x05;
++ /* 13h = VSC SDP supporting 3D stereo, + PSR2, + Pixel Encoding/Colorimetry Format indication (HB2 = 05h). */
++ info_packet->hb3 = 0x13;
++
++ info_packet->valid = true;
++
++ /* Set VSC SDP fields for pixel encoding and colorimetry format from DP 1.3 specs
++ * Data Bytes DB 18~16
++ * Bits 3:0 (Colorimetry Format) | Bits 7:4 (Pixel Encoding)
++ * ----------------------------------------------------------------------------------------------------
++ * 0x0 = sRGB | 0 = RGB
++ * 0x1 = RGB Wide Gamut Fixed Point
++ * 0x2 = RGB Wide Gamut Floating Point
++ * 0x3 = AdobeRGB
++ * 0x4 = DCI-P3
++ * 0x5 = CustomColorProfile
++ * (others reserved)
++ * ----------------------------------------------------------------------------------------------------
++ * 0x0 = ITU-R BT.601 | 1 = YCbCr444
++ * 0x1 = ITU-R BT.709
++ * 0x2 = xvYCC601
++ * 0x3 = xvYCC709
++ * 0x4 = sYCC601
++ * 0x5 = AdobeYCC601
++ * 0x6 = ITU-R BT.2020 Y'cC'bcC'rc
++ * 0x7 = ITU-R BT.2020 Y'C'bC'r
++ * (others reserved)
++ * ----------------------------------------------------------------------------------------------------
++ * 0x0 = ITU-R BT.601 | 2 = YCbCr422
++ * 0x1 = ITU-R BT.709
++ * 0x2 = xvYCC601
++ * 0x3 = xvYCC709
++ * 0x4 = sYCC601
++ * 0x5 = AdobeYCC601
++ * 0x6 = ITU-R BT.2020 Y'cC'bcC'rc
++ * 0x7 = ITU-R BT.2020 Y'C'bC'r
++ * (others reserved)
++ * ----------------------------------------------------------------------------------------------------
++ * 0x0 = ITU-R BT.601 | 3 = YCbCr420
++ * 0x1 = ITU-R BT.709
++ * 0x2 = xvYCC601
++ * 0x3 = xvYCC709
++ * 0x4 = sYCC601
++ * 0x5 = AdobeYCC601
++ * 0x6 = ITU-R BT.2020 Y'cC'bcC'rc
++ * 0x7 = ITU-R BT.2020 Y'C'bC'r
++ * (others reserved)
++ * ----------------------------------------------------------------------------------------------------
++ * 0x0 =DICOM Part14 Grayscale | 4 = Yonly
++ * Display Function
++ * (others reserved)
++ */
++
++ /* Set Pixel Encoding */
++ switch (stream->timing.pixel_encoding) {
++ case PIXEL_ENCODING_RGB:
++ pixelEncoding = 0x0; /* RGB = 0h */
++ break;
++ case PIXEL_ENCODING_YCBCR444:
++ pixelEncoding = 0x1; /* YCbCr444 = 1h */
++ break;
++ case PIXEL_ENCODING_YCBCR422:
++ pixelEncoding = 0x2; /* YCbCr422 = 2h */
++ break;
++ case PIXEL_ENCODING_YCBCR420:
++ pixelEncoding = 0x3; /* YCbCr420 = 3h */
++ break;
++ default:
++ pixelEncoding = 0x0; /* default RGB = 0h */
++ break;
++ }
++
++ /* Set Colorimetry format based on pixel encoding */
++ switch (stream->timing.pixel_encoding) {
++ case PIXEL_ENCODING_RGB:
++ if ((stream->output_color_space == COLOR_SPACE_SRGB) ||
++ (stream->output_color_space == COLOR_SPACE_SRGB_LIMITED))
++ colorimetryFormat = ColorimetryRGB_DP_sRGB;
++ else if (stream->output_color_space == COLOR_SPACE_ADOBERGB)
++ colorimetryFormat = ColorimetryRGB_DP_AdobeRGB;
++ else if ((stream->output_color_space == COLOR_SPACE_2020_RGB_FULLRANGE) ||
++ (stream->output_color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE))
++ colorimetryFormat = ColorimetryRGB_DP_ITU_R_BT2020RGB;
++ break;
++
++ case PIXEL_ENCODING_YCBCR444:
++ case PIXEL_ENCODING_YCBCR422:
++ case PIXEL_ENCODING_YCBCR420:
++ /* Note: xvYCC probably not supported correctly here on DP since colorspace translation
++ * loses distinction between BT601 vs xvYCC601 in translation
++ */
++ if (stream->output_color_space == COLOR_SPACE_YCBCR601)
++ colorimetryFormat = ColorimetryYCC_DP_ITU601;
++ else if (stream->output_color_space == COLOR_SPACE_YCBCR709)
++ colorimetryFormat = ColorimetryYCC_DP_ITU709;
++ else if (stream->output_color_space == COLOR_SPACE_ADOBERGB)
++ colorimetryFormat = ColorimetryYCC_DP_AdobeYCC;
++ else if (stream->output_color_space == COLOR_SPACE_2020_YCBCR)
++ colorimetryFormat = ColorimetryYCC_DP_ITU2020YCbCr;
++ break;
++
++ default:
++ colorimetryFormat = ColorimetryRGB_DP_sRGB;
++ break;
++ }
++
++ info_packet->sb[16] = (pixelEncoding << 4) | colorimetryFormat;
++
++ /* Set color depth */
++ switch (stream->timing.display_color_depth) {
++ case COLOR_DEPTH_666:
++ /* NOTE: This is actually not valid for YCbCr pixel encoding to have 6 bpc
++ * as of DP1.4 spec, but value of 0 probably reserved here for potential future use.
++ */
++ info_packet->sb[17] = 0;
++ break;
++ case COLOR_DEPTH_888:
++ info_packet->sb[17] = 1;
++ break;
++ case COLOR_DEPTH_101010:
++ info_packet->sb[17] = 2;
++ break;
++ case COLOR_DEPTH_121212:
++ info_packet->sb[17] = 3;
++ break;
++ /*case COLOR_DEPTH_141414: -- NO SUCH FORMAT IN DP SPEC */
++ case COLOR_DEPTH_161616:
++ info_packet->sb[17] = 4;
++ break;
++ default:
++ info_packet->sb[17] = 0;
++ break;
++ }
++
++ /* all YCbCr are always limited range */
++ if ((stream->output_color_space == COLOR_SPACE_SRGB_LIMITED) ||
++ (stream->output_color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) ||
++ (pixelEncoding != 0x0)) {
++ info_packet->sb[17] |= 0x80; /* DB17 bit 7 set to 1 for CEA timing. */
++ }
++
++ /* Content Type (Bits 2:0)
++ * 0 = Not defined.
++ * 1 = Graphics.
++ * 2 = Photo.
++ * 3 = Video.
++ * 4 = Game.
++ */
++ info_packet->sb[18] = 0;
++ }
++
+ }
+
+ void mod_build_infopackets(struct info_packet_inputs *inputs,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5352-drm-amd-display-Fix-DAL217-tests-modify-DTN-logs-for.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5352-drm-amd-display-Fix-DAL217-tests-modify-DTN-logs-for.patch
new file mode 100644
index 00000000..ae8d0db6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5352-drm-amd-display-Fix-DAL217-tests-modify-DTN-logs-for.patch
@@ -0,0 +1,47 @@
+From 7ea148f4ee22cf0f05e45ad384f11c8f1072e96f Mon Sep 17 00:00:00 2001
+From: Gary Kattan <gary.kattan@amd.com>
+Date: Mon, 20 Aug 2018 15:12:14 -0700
+Subject: [PATCH 5352/5725] drm/amd/display: Fix DAL217 tests modify DTN logs
+ for other tests
+
+[Why]Update Code to get DTN golden log check to pass for tests run after
+DAL217 tests.
+[How]Change how dcn10_log_hw_state function prints HW state info
+(CM_GAMUT_REMAP_Cx_Cx registers) when GAMUT REMAP is in bypass mode.
+
+Change-Id: I2c116ab220a7c2582c011474f6e5d7ab3018cde6
+Signed-off-by: Gary Kattan <gary.kattan@amd.com>
+Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+index 5f2054a..dcb3c55 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+@@ -116,12 +116,14 @@ void dpp_read_state(struct dpp *dpp_base,
+ REG_GET(CM_GAMUT_REMAP_CONTROL,
+ CM_GAMUT_REMAP_MODE, &s->gamut_remap_mode);
+
+- s->gamut_remap_c11_c12 = REG_READ(CM_GAMUT_REMAP_C11_C12);
+- s->gamut_remap_c13_c14 = REG_READ(CM_GAMUT_REMAP_C13_C14);
+- s->gamut_remap_c21_c22 = REG_READ(CM_GAMUT_REMAP_C21_C22);
+- s->gamut_remap_c23_c24 = REG_READ(CM_GAMUT_REMAP_C23_C24);
+- s->gamut_remap_c31_c32 = REG_READ(CM_GAMUT_REMAP_C31_C32);
+- s->gamut_remap_c33_c34 = REG_READ(CM_GAMUT_REMAP_C33_C34);
++ if (s->gamut_remap_mode) {
++ s->gamut_remap_c11_c12 = REG_READ(CM_GAMUT_REMAP_C11_C12);
++ s->gamut_remap_c13_c14 = REG_READ(CM_GAMUT_REMAP_C13_C14);
++ s->gamut_remap_c21_c22 = REG_READ(CM_GAMUT_REMAP_C21_C22);
++ s->gamut_remap_c23_c24 = REG_READ(CM_GAMUT_REMAP_C23_C24);
++ s->gamut_remap_c31_c32 = REG_READ(CM_GAMUT_REMAP_C31_C32);
++ s->gamut_remap_c33_c34 = REG_READ(CM_GAMUT_REMAP_C33_C34);
++ }
+ }
+
+ /* Program gamut remap in bypass mode */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5353-drm-amd-display-Add-driver-side-parsing-for-CM.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5353-drm-amd-display-Add-driver-side-parsing-for-CM.patch
new file mode 100644
index 00000000..7b90a33e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5353-drm-amd-display-Add-driver-side-parsing-for-CM.patch
@@ -0,0 +1,68 @@
+From b9108f310126578eea2a83df78efa52b96b47751 Mon Sep 17 00:00:00 2001
+From: Jun Lei <Jun.Lei@amd.com>
+Date: Mon, 13 Aug 2018 15:11:44 -0400
+Subject: [PATCH 5353/5725] drm/amd/display: Add driver-side parsing for CM
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Although 4 unique register values exist for gamma modes, two are
+actually the same (the two RAMs) It’s not possible for caller to
+understand this HW specific behavior, so some parsing is necessary
+in driver
+
+Change-Id: I073e0f67aed5c9bc8760e89d7755e6399b3687e2
+Signed-off-by: Jun Lei <Jun.Lei@amd.com>
+Reviewed-by: Wesley Chalmers <Wesley.Chalmers@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ .../display/dc/dcn10/dcn10_hw_sequencer_debug.c | 29 +++++++++++++++++++---
+ 1 file changed, 25 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+index 9288b00..9c21825 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+@@ -314,14 +314,35 @@ static unsigned int dcn10_get_cm_states(struct dc *dc, char *pBuf, unsigned int
+ struct dpp *dpp = pool->dpps[i];
+ struct dcn_dpp_state s = {0};
+
++
++
++
+ dpp->funcs->dpp_read_state(dpp, &s);
+
+ if (s.is_enabled) {
+- chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,%x,%x,%x,%x,"
+- "%08x,%08x,%08x,%08x,%08x,%08x"
++ chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,"
++ "%s,%s,%s,"
++ "%x,%08x,%08x,%08x,%08x,%08x,%08x"
+ "\n",
+- dpp->inst, s.igam_input_format, s.igam_lut_mode, s.dgam_lut_mode,
+- s.rgam_lut_mode, s.gamut_remap_mode, s.gamut_remap_c11_c12,
++ dpp->inst, s.igam_input_format,
++ (s.igam_lut_mode == 0) ? "BypassFixed" :
++ ((s.igam_lut_mode == 1) ? "BypassFloat" :
++ ((s.igam_lut_mode == 2) ? "RAM" :
++ ((s.igam_lut_mode == 3) ? "RAM" :
++ "Unknown"))),
++ (s.dgam_lut_mode == 0) ? "Bypass" :
++ ((s.dgam_lut_mode == 1) ? "sRGB" :
++ ((s.dgam_lut_mode == 2) ? "Ycc" :
++ ((s.dgam_lut_mode == 3) ? "RAM" :
++ ((s.dgam_lut_mode == 4) ? "RAM" :
++ "Unknown")))),
++ (s.rgam_lut_mode == 0) ? "Bypass" :
++ ((s.rgam_lut_mode == 1) ? "sRGB" :
++ ((s.rgam_lut_mode == 2) ? "Ycc" :
++ ((s.rgam_lut_mode == 3) ? "RAM" :
++ ((s.rgam_lut_mode == 4) ? "RAM" :
++ "Unknown")))),
++ s.gamut_remap_mode, s.gamut_remap_c11_c12,
+ s.gamut_remap_c13_c14, s.gamut_remap_c21_c22, s.gamut_remap_c23_c24,
+ s.gamut_remap_c31_c32, s.gamut_remap_c33_c34);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5354-drm-amd-display-remove-dead-dc-vbios-code.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5354-drm-amd-display-remove-dead-dc-vbios-code.patch
new file mode 100644
index 00000000..8cde504e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5354-drm-amd-display-remove-dead-dc-vbios-code.patch
@@ -0,0 +1,1906 @@
+From e3a3d1ac61d3edbe70fe3f5e510f558364252c3c Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Tue, 14 Aug 2018 16:12:54 -0400
+Subject: [PATCH 5354/5725] drm/amd/display: remove dead dc vbios code
+
+Change-Id: Id1e7c39db419f13cf478c6a0c6f4b84c039acffe
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/bios/bios_parser.c | 1177 --------------------
+ drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | 312 +-----
+ drivers/gpu/drm/amd/display/dc/dc_bios_types.h | 64 --
+ 3 files changed, 39 insertions(+), 1514 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+index bfa5816..0e1dc1b 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+@@ -52,24 +52,13 @@
+ #define DC_LOGGER \
+ bp->base.ctx->logger
+
+-/* GUID to validate external display connection info table (aka OPM module) */
+-static const uint8_t ext_display_connection_guid[NUMBER_OF_UCHAR_FOR_GUID] = {
+- 0x91, 0x6E, 0x57, 0x09,
+- 0x3F, 0x6D, 0xD2, 0x11,
+- 0x39, 0x8E, 0x00, 0xA0,
+- 0xC9, 0x69, 0x72, 0x3B};
+-
+ #define DATA_TABLES(table) (bp->master_data_tbl->ListOfDataTables.table)
+
+ static void get_atom_data_table_revision(
+ ATOM_COMMON_TABLE_HEADER *atom_data_tbl,
+ struct atom_data_revision *tbl_revision);
+-static uint32_t get_dst_number_from_object(struct bios_parser *bp,
+- ATOM_OBJECT *object);
+ static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object,
+ uint16_t **id_list);
+-static uint32_t get_dest_obj_list(struct bios_parser *bp,
+- ATOM_OBJECT *object, uint16_t **id_list);
+ static ATOM_OBJECT *get_bios_object(struct bios_parser *bp,
+ struct graphics_object_id id);
+ static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
+@@ -163,29 +152,6 @@ static uint8_t bios_parser_get_connectors_number(struct dc_bios *dcb)
+ le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset));
+ }
+
+-static struct graphics_object_id bios_parser_get_encoder_id(
+- struct dc_bios *dcb,
+- uint32_t i)
+-{
+- struct bios_parser *bp = BP_FROM_DCB(dcb);
+- struct graphics_object_id object_id = dal_graphics_object_id_init(
+- 0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
+-
+- uint32_t encoder_table_offset = bp->object_info_tbl_offset
+- + le16_to_cpu(bp->object_info_tbl.v1_1->usEncoderObjectTableOffset);
+-
+- ATOM_OBJECT_TABLE *tbl =
+- GET_IMAGE(ATOM_OBJECT_TABLE, encoder_table_offset);
+-
+- if (tbl && tbl->ucNumberOfObjects > i) {
+- const uint16_t id = le16_to_cpu(tbl->asObjects[i].usObjectID);
+-
+- object_id = object_id_from_bios_object_id(id);
+- }
+-
+- return object_id;
+-}
+-
+ static struct graphics_object_id bios_parser_get_connector_id(
+ struct dc_bios *dcb,
+ uint8_t i)
+@@ -217,15 +183,6 @@ static struct graphics_object_id bios_parser_get_connector_id(
+ return object_id;
+ }
+
+-static uint32_t bios_parser_get_dst_number(struct dc_bios *dcb,
+- struct graphics_object_id id)
+-{
+- struct bios_parser *bp = BP_FROM_DCB(dcb);
+- ATOM_OBJECT *object = get_bios_object(bp, id);
+-
+- return get_dst_number_from_object(bp, object);
+-}
+-
+ static enum bp_result bios_parser_get_src_obj(struct dc_bios *dcb,
+ struct graphics_object_id object_id, uint32_t index,
+ struct graphics_object_id *src_object_id)
+@@ -255,30 +212,6 @@ static enum bp_result bios_parser_get_src_obj(struct dc_bios *dcb,
+ return BP_RESULT_OK;
+ }
+
+-static enum bp_result bios_parser_get_dst_obj(struct dc_bios *dcb,
+- struct graphics_object_id object_id, uint32_t index,
+- struct graphics_object_id *dest_object_id)
+-{
+- uint32_t number;
+- uint16_t *id = NULL;
+- ATOM_OBJECT *object;
+- struct bios_parser *bp = BP_FROM_DCB(dcb);
+-
+- if (!dest_object_id)
+- return BP_RESULT_BADINPUT;
+-
+- object = get_bios_object(bp, object_id);
+-
+- number = get_dest_obj_list(bp, object, &id);
+-
+- if (number <= index || !id)
+- return BP_RESULT_BADINPUT;
+-
+- *dest_object_id = object_id_from_bios_object_id(id[index]);
+-
+- return BP_RESULT_OK;
+-}
+-
+ static enum bp_result bios_parser_get_i2c_info(struct dc_bios *dcb,
+ struct graphics_object_id id,
+ struct graphics_object_i2c_info *info)
+@@ -325,196 +258,6 @@ static enum bp_result bios_parser_get_i2c_info(struct dc_bios *dcb,
+ return BP_RESULT_NORECORD;
+ }
+
+-static enum bp_result get_voltage_ddc_info_v1(uint8_t *i2c_line,
+- ATOM_COMMON_TABLE_HEADER *header,
+- uint8_t *address)
+-{
+- enum bp_result result = BP_RESULT_NORECORD;
+- ATOM_VOLTAGE_OBJECT_INFO *info =
+- (ATOM_VOLTAGE_OBJECT_INFO *) address;
+-
+- uint8_t *voltage_current_object = (uint8_t *) &info->asVoltageObj[0];
+-
+- while ((address + le16_to_cpu(header->usStructureSize)) > voltage_current_object) {
+- ATOM_VOLTAGE_OBJECT *object =
+- (ATOM_VOLTAGE_OBJECT *) voltage_current_object;
+-
+- if ((object->ucVoltageType == SET_VOLTAGE_INIT_MODE) &&
+- (object->ucVoltageType &
+- VOLTAGE_CONTROLLED_BY_I2C_MASK)) {
+-
+- *i2c_line = object->asControl.ucVoltageControlI2cLine
+- ^ 0x90;
+- result = BP_RESULT_OK;
+- break;
+- }
+-
+- voltage_current_object += object->ucSize;
+- }
+- return result;
+-}
+-
+-static enum bp_result get_voltage_ddc_info_v3(uint8_t *i2c_line,
+- uint32_t index,
+- ATOM_COMMON_TABLE_HEADER *header,
+- uint8_t *address)
+-{
+- enum bp_result result = BP_RESULT_NORECORD;
+- ATOM_VOLTAGE_OBJECT_INFO_V3_1 *info =
+- (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *) address;
+-
+- uint8_t *voltage_current_object =
+- (uint8_t *) (&(info->asVoltageObj[0]));
+-
+- while ((address + le16_to_cpu(header->usStructureSize)) > voltage_current_object) {
+- ATOM_I2C_VOLTAGE_OBJECT_V3 *object =
+- (ATOM_I2C_VOLTAGE_OBJECT_V3 *) voltage_current_object;
+-
+- if (object->sHeader.ucVoltageMode ==
+- ATOM_INIT_VOLTAGE_REGULATOR) {
+- if (object->sHeader.ucVoltageType == index) {
+- *i2c_line = object->ucVoltageControlI2cLine
+- ^ 0x90;
+- result = BP_RESULT_OK;
+- break;
+- }
+- }
+-
+- voltage_current_object += le16_to_cpu(object->sHeader.usSize);
+- }
+- return result;
+-}
+-
+-static enum bp_result bios_parser_get_thermal_ddc_info(
+- struct dc_bios *dcb,
+- uint32_t i2c_channel_id,
+- struct graphics_object_i2c_info *info)
+-{
+- struct bios_parser *bp = BP_FROM_DCB(dcb);
+- ATOM_I2C_ID_CONFIG_ACCESS *config;
+- ATOM_I2C_RECORD record;
+-
+- if (!info)
+- return BP_RESULT_BADINPUT;
+-
+- config = (ATOM_I2C_ID_CONFIG_ACCESS *) &i2c_channel_id;
+-
+- record.sucI2cId.bfHW_Capable = config->sbfAccess.bfHW_Capable;
+- record.sucI2cId.bfI2C_LineMux = config->sbfAccess.bfI2C_LineMux;
+- record.sucI2cId.bfHW_EngineID = config->sbfAccess.bfHW_EngineID;
+-
+- return get_gpio_i2c_info(bp, &record, info);
+-}
+-
+-static enum bp_result bios_parser_get_voltage_ddc_info(struct dc_bios *dcb,
+- uint32_t index,
+- struct graphics_object_i2c_info *info)
+-{
+- uint8_t i2c_line = 0;
+- enum bp_result result = BP_RESULT_NORECORD;
+- uint8_t *voltage_info_address;
+- ATOM_COMMON_TABLE_HEADER *header;
+- struct atom_data_revision revision = {0};
+- struct bios_parser *bp = BP_FROM_DCB(dcb);
+-
+- if (!DATA_TABLES(VoltageObjectInfo))
+- return result;
+-
+- voltage_info_address = bios_get_image(&bp->base, DATA_TABLES(VoltageObjectInfo), sizeof(ATOM_COMMON_TABLE_HEADER));
+-
+- header = (ATOM_COMMON_TABLE_HEADER *) voltage_info_address;
+-
+- get_atom_data_table_revision(header, &revision);
+-
+- switch (revision.major) {
+- case 1:
+- case 2:
+- result = get_voltage_ddc_info_v1(&i2c_line, header,
+- voltage_info_address);
+- break;
+- case 3:
+- if (revision.minor != 1)
+- break;
+- result = get_voltage_ddc_info_v3(&i2c_line, index, header,
+- voltage_info_address);
+- break;
+- }
+-
+- if (result == BP_RESULT_OK)
+- result = bios_parser_get_thermal_ddc_info(dcb,
+- i2c_line, info);
+-
+- return result;
+-}
+-
+-/* TODO: temporary commented out to suppress 'defined but not used' warning */
+-#if 0
+-static enum bp_result bios_parser_get_ddc_info_for_i2c_line(
+- struct bios_parser *bp,
+- uint8_t i2c_line, struct graphics_object_i2c_info *info)
+-{
+- uint32_t offset;
+- ATOM_OBJECT *object;
+- ATOM_OBJECT_TABLE *table;
+- uint32_t i;
+-
+- if (!info)
+- return BP_RESULT_BADINPUT;
+-
+- offset = le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+-
+- offset += bp->object_info_tbl_offset;
+-
+- table = GET_IMAGE(ATOM_OBJECT_TABLE, offset);
+-
+- if (!table)
+- return BP_RESULT_BADBIOSTABLE;
+-
+- for (i = 0; i < table->ucNumberOfObjects; i++) {
+- object = &table->asObjects[i];
+-
+- if (!object) {
+- BREAK_TO_DEBUGGER(); /* Invalid object id */
+- return BP_RESULT_BADINPUT;
+- }
+-
+- offset = le16_to_cpu(object->usRecordOffset)
+- + bp->object_info_tbl_offset;
+-
+- for (;;) {
+- ATOM_COMMON_RECORD_HEADER *header =
+- GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+-
+- if (!header)
+- return BP_RESULT_BADBIOSTABLE;
+-
+- offset += header->ucRecordSize;
+-
+- if (LAST_RECORD_TYPE == header->ucRecordType ||
+- !header->ucRecordSize)
+- break;
+-
+- if (ATOM_I2C_RECORD_TYPE == header->ucRecordType
+- && sizeof(ATOM_I2C_RECORD) <=
+- header->ucRecordSize) {
+- ATOM_I2C_RECORD *record =
+- (ATOM_I2C_RECORD *) header;
+-
+- if (i2c_line != record->sucI2cId.bfI2C_LineMux)
+- continue;
+-
+- /* get the I2C info */
+- if (get_gpio_i2c_info(bp, record, info) ==
+- BP_RESULT_OK)
+- return BP_RESULT_OK;
+- }
+- }
+- }
+-
+- return BP_RESULT_NORECORD;
+-}
+-#endif
+-
+ static enum bp_result bios_parser_get_hpd_info(struct dc_bios *dcb,
+ struct graphics_object_id id,
+ struct graphics_object_hpd_info *info)
+@@ -1129,62 +872,6 @@ static bool bios_parser_is_device_id_supported(
+ return (le16_to_cpu(bp->object_info_tbl.v1_1->usDeviceSupport) & mask) != 0;
+ }
+
+-static enum bp_result bios_parser_crt_control(
+- struct dc_bios *dcb,
+- enum engine_id engine_id,
+- bool enable,
+- uint32_t pixel_clock)
+-{
+- struct bios_parser *bp = BP_FROM_DCB(dcb);
+- uint8_t standard;
+-
+- if (!bp->cmd_tbl.dac1_encoder_control &&
+- engine_id == ENGINE_ID_DACA)
+- return BP_RESULT_FAILURE;
+- if (!bp->cmd_tbl.dac2_encoder_control &&
+- engine_id == ENGINE_ID_DACB)
+- return BP_RESULT_FAILURE;
+- /* validate params */
+- switch (engine_id) {
+- case ENGINE_ID_DACA:
+- case ENGINE_ID_DACB:
+- break;
+- default:
+- /* unsupported engine */
+- return BP_RESULT_FAILURE;
+- }
+-
+- standard = ATOM_DAC1_PS2; /* == ATOM_DAC2_PS2 */
+-
+- if (enable) {
+- if (engine_id == ENGINE_ID_DACA) {
+- bp->cmd_tbl.dac1_encoder_control(bp, enable,
+- pixel_clock, standard);
+- if (bp->cmd_tbl.dac1_output_control != NULL)
+- bp->cmd_tbl.dac1_output_control(bp, enable);
+- } else {
+- bp->cmd_tbl.dac2_encoder_control(bp, enable,
+- pixel_clock, standard);
+- if (bp->cmd_tbl.dac2_output_control != NULL)
+- bp->cmd_tbl.dac2_output_control(bp, enable);
+- }
+- } else {
+- if (engine_id == ENGINE_ID_DACA) {
+- if (bp->cmd_tbl.dac1_output_control != NULL)
+- bp->cmd_tbl.dac1_output_control(bp, enable);
+- bp->cmd_tbl.dac1_encoder_control(bp, enable,
+- pixel_clock, standard);
+- } else {
+- if (bp->cmd_tbl.dac2_output_control != NULL)
+- bp->cmd_tbl.dac2_output_control(bp, enable);
+- bp->cmd_tbl.dac2_encoder_control(bp, enable,
+- pixel_clock, standard);
+- }
+- }
+-
+- return BP_RESULT_OK;
+-}
+-
+ static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp,
+ ATOM_OBJECT *object)
+ {
+@@ -1219,49 +906,6 @@ static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp,
+ return NULL;
+ }
+
+-/**
+- * Get I2C information of input object id
+- *
+- * search all records to find the ATOM_I2C_RECORD_TYPE record IR
+- */
+-static ATOM_I2C_RECORD *get_i2c_record(
+- struct bios_parser *bp,
+- ATOM_OBJECT *object)
+-{
+- uint32_t offset;
+- ATOM_COMMON_RECORD_HEADER *record_header;
+-
+- if (!object) {
+- BREAK_TO_DEBUGGER();
+- /* Invalid object */
+- return NULL;
+- }
+-
+- offset = le16_to_cpu(object->usRecordOffset)
+- + bp->object_info_tbl_offset;
+-
+- for (;;) {
+- record_header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+-
+- if (!record_header)
+- return NULL;
+-
+- if (LAST_RECORD_TYPE == record_header->ucRecordType ||
+- 0 == record_header->ucRecordSize)
+- break;
+-
+- if (ATOM_I2C_RECORD_TYPE == record_header->ucRecordType &&
+- sizeof(ATOM_I2C_RECORD) <=
+- record_header->ucRecordSize) {
+- return (ATOM_I2C_RECORD *)record_header;
+- }
+-
+- offset += record_header->ucRecordSize;
+- }
+-
+- return NULL;
+-}
+-
+ static enum bp_result get_ss_info_from_ss_info_table(
+ struct bios_parser *bp,
+ uint32_t id,
+@@ -2356,40 +2000,6 @@ static ATOM_OBJECT *get_bios_object(struct bios_parser *bp,
+ return NULL;
+ }
+
+-static uint32_t get_dest_obj_list(struct bios_parser *bp,
+- ATOM_OBJECT *object, uint16_t **id_list)
+-{
+- uint32_t offset;
+- uint8_t *number;
+-
+- if (!object) {
+- BREAK_TO_DEBUGGER(); /* Invalid object id */
+- return 0;
+- }
+-
+- offset = le16_to_cpu(object->usSrcDstTableOffset)
+- + bp->object_info_tbl_offset;
+-
+- number = GET_IMAGE(uint8_t, offset);
+- if (!number)
+- return 0;
+-
+- offset += sizeof(uint8_t);
+- offset += sizeof(uint16_t) * (*number);
+-
+- number = GET_IMAGE(uint8_t, offset);
+- if ((!number) || (!*number))
+- return 0;
+-
+- offset += sizeof(uint8_t);
+- *id_list = (uint16_t *)bios_get_image(&bp->base, offset, *number * sizeof(uint16_t));
+-
+- if (!*id_list)
+- return 0;
+-
+- return *number;
+-}
+-
+ static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object,
+ uint16_t **id_list)
+ {
+@@ -2417,35 +2027,6 @@ static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object,
+ return *number;
+ }
+
+-static uint32_t get_dst_number_from_object(struct bios_parser *bp,
+- ATOM_OBJECT *object)
+-{
+- uint32_t offset;
+- uint8_t *number;
+-
+- if (!object) {
+- BREAK_TO_DEBUGGER(); /* Invalid encoder object id*/
+- return 0;
+- }
+-
+- offset = le16_to_cpu(object->usSrcDstTableOffset)
+- + bp->object_info_tbl_offset;
+-
+- number = GET_IMAGE(uint8_t, offset);
+- if (!number)
+- return 0;
+-
+- offset += sizeof(uint8_t);
+- offset += sizeof(uint16_t) * (*number);
+-
+- number = GET_IMAGE(uint8_t, offset);
+-
+- if (!number)
+- return 0;
+-
+- return *number;
+-}
+-
+ static struct device_id device_type_from_device_id(uint16_t device_id)
+ {
+
+@@ -2625,750 +2206,6 @@ static uint32_t get_support_mask_for_device_id(struct device_id device_id)
+ }
+
+ /**
+- * HwContext interface for writing MM registers
+- */
+-
+-static bool i2c_read(
+- struct bios_parser *bp,
+- struct graphics_object_i2c_info *i2c_info,
+- uint8_t *buffer,
+- uint32_t length)
+-{
+- struct ddc *ddc;
+- uint8_t offset[2] = { 0, 0 };
+- bool result = false;
+- struct i2c_command cmd;
+- struct gpio_ddc_hw_info hw_info = {
+- i2c_info->i2c_hw_assist,
+- i2c_info->i2c_line };
+-
+- ddc = dal_gpio_create_ddc(bp->base.ctx->gpio_service,
+- i2c_info->gpio_info.clk_a_register_index,
+- (1 << i2c_info->gpio_info.clk_a_shift), &hw_info);
+-
+- if (!ddc)
+- return result;
+-
+- /*Using SW engine */
+- cmd.engine = I2C_COMMAND_ENGINE_SW;
+- cmd.speed = ddc->ctx->dc->caps.i2c_speed_in_khz;
+-
+- {
+- struct i2c_payload payloads[] = {
+- {
+- .address = i2c_info->i2c_slave_address >> 1,
+- .data = offset,
+- .length = sizeof(offset),
+- .write = true
+- },
+- {
+- .address = i2c_info->i2c_slave_address >> 1,
+- .data = buffer,
+- .length = length,
+- .write = false
+- }
+- };
+-
+- cmd.payloads = payloads;
+- cmd.number_of_payloads = ARRAY_SIZE(payloads);
+- result = dc_submit_i2c(
+- ddc->ctx->dc,
+- ddc->hw_info.ddc_channel,
+- &cmd);
+- }
+-
+- dal_gpio_destroy_ddc(&ddc);
+-
+- return result;
+-}
+-
+-/**
+- * Read external display connection info table through i2c.
+- * validate the GUID and checksum.
+- *
+- * @return enum bp_result whether all data was sucessfully read
+- */
+-static enum bp_result get_ext_display_connection_info(
+- struct bios_parser *bp,
+- ATOM_OBJECT *opm_object,
+- ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO *ext_display_connection_info_tbl)
+-{
+- bool config_tbl_present = false;
+- ATOM_I2C_RECORD *i2c_record = NULL;
+- uint32_t i = 0;
+-
+- if (opm_object == NULL)
+- return BP_RESULT_BADINPUT;
+-
+- i2c_record = get_i2c_record(bp, opm_object);
+-
+- if (i2c_record != NULL) {
+- ATOM_GPIO_I2C_INFO *gpio_i2c_header;
+- struct graphics_object_i2c_info i2c_info;
+-
+- gpio_i2c_header = GET_IMAGE(ATOM_GPIO_I2C_INFO,
+- bp->master_data_tbl->ListOfDataTables.GPIO_I2C_Info);
+-
+- if (NULL == gpio_i2c_header)
+- return BP_RESULT_BADBIOSTABLE;
+-
+- if (get_gpio_i2c_info(bp, i2c_record, &i2c_info) !=
+- BP_RESULT_OK)
+- return BP_RESULT_BADBIOSTABLE;
+-
+- if (i2c_read(bp,
+- &i2c_info,
+- (uint8_t *)ext_display_connection_info_tbl,
+- sizeof(ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO))) {
+- config_tbl_present = true;
+- }
+- }
+-
+- /* Validate GUID */
+- if (config_tbl_present)
+- for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; i++) {
+- if (ext_display_connection_info_tbl->ucGuid[i]
+- != ext_display_connection_guid[i]) {
+- config_tbl_present = false;
+- break;
+- }
+- }
+-
+- /* Validate checksum */
+- if (config_tbl_present) {
+- uint8_t check_sum = 0;
+- uint8_t *buf =
+- (uint8_t *)ext_display_connection_info_tbl;
+-
+- for (i = 0; i < sizeof(ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO);
+- i++) {
+- check_sum += buf[i];
+- }
+-
+- if (check_sum != 0)
+- config_tbl_present = false;
+- }
+-
+- if (config_tbl_present)
+- return BP_RESULT_OK;
+- else
+- return BP_RESULT_FAILURE;
+-}
+-
+-/*
+- * Gets the first device ID in the same group as the given ID for enumerating.
+- * For instance, if any DFP device ID is passed, returns the device ID for DFP1.
+- *
+- * The first device ID in the same group as the passed device ID, or 0 if no
+- * matching device group found.
+- */
+-static uint32_t enum_first_device_id(uint32_t dev_id)
+-{
+- /* Return the first in the group that this ID belongs to. */
+- if (dev_id & ATOM_DEVICE_CRT_SUPPORT)
+- return ATOM_DEVICE_CRT1_SUPPORT;
+- else if (dev_id & ATOM_DEVICE_DFP_SUPPORT)
+- return ATOM_DEVICE_DFP1_SUPPORT;
+- else if (dev_id & ATOM_DEVICE_LCD_SUPPORT)
+- return ATOM_DEVICE_LCD1_SUPPORT;
+- else if (dev_id & ATOM_DEVICE_TV_SUPPORT)
+- return ATOM_DEVICE_TV1_SUPPORT;
+- else if (dev_id & ATOM_DEVICE_CV_SUPPORT)
+- return ATOM_DEVICE_CV_SUPPORT;
+-
+- /* No group found for this device ID. */
+-
+- dm_error("%s: incorrect input %d\n", __func__, dev_id);
+- /* No matching support flag for given device ID */
+- return 0;
+-}
+-
+-/*
+- * Gets the next device ID in the group for a given device ID.
+- *
+- * The current device ID being enumerated on.
+- *
+- * The next device ID in the group, or 0 if no device exists.
+- */
+-static uint32_t enum_next_dev_id(uint32_t dev_id)
+-{
+- /* Get next device ID in the group. */
+- switch (dev_id) {
+- case ATOM_DEVICE_CRT1_SUPPORT:
+- return ATOM_DEVICE_CRT2_SUPPORT;
+- case ATOM_DEVICE_LCD1_SUPPORT:
+- return ATOM_DEVICE_LCD2_SUPPORT;
+- case ATOM_DEVICE_DFP1_SUPPORT:
+- return ATOM_DEVICE_DFP2_SUPPORT;
+- case ATOM_DEVICE_DFP2_SUPPORT:
+- return ATOM_DEVICE_DFP3_SUPPORT;
+- case ATOM_DEVICE_DFP3_SUPPORT:
+- return ATOM_DEVICE_DFP4_SUPPORT;
+- case ATOM_DEVICE_DFP4_SUPPORT:
+- return ATOM_DEVICE_DFP5_SUPPORT;
+- case ATOM_DEVICE_DFP5_SUPPORT:
+- return ATOM_DEVICE_DFP6_SUPPORT;
+- }
+-
+- /* Done enumerating through devices. */
+- return 0;
+-}
+-
+-/*
+- * Returns the new device tag record for patched BIOS object.
+- *
+- * [IN] pExtDisplayPath - External display path to copy device tag from.
+- * [IN] deviceSupport - Bit vector for device ID support flags.
+- * [OUT] pDeviceTag - Device tag structure to fill with patched data.
+- *
+- * True if a compatible device ID was found, false otherwise.
+- */
+-static bool get_patched_device_tag(
+- struct bios_parser *bp,
+- EXT_DISPLAY_PATH *ext_display_path,
+- uint32_t device_support,
+- ATOM_CONNECTOR_DEVICE_TAG *device_tag)
+-{
+- uint32_t dev_id;
+- /* Use fallback behaviour if not supported. */
+- if (!bp->remap_device_tags) {
+- device_tag->ulACPIDeviceEnum =
+- cpu_to_le32((uint32_t) le16_to_cpu(ext_display_path->usDeviceACPIEnum));
+- device_tag->usDeviceID =
+- cpu_to_le16(le16_to_cpu(ext_display_path->usDeviceTag));
+- return true;
+- }
+-
+- /* Find the first unused in the same group. */
+- dev_id = enum_first_device_id(le16_to_cpu(ext_display_path->usDeviceTag));
+- while (dev_id != 0) {
+- /* Assign this device ID if supported. */
+- if ((device_support & dev_id) != 0) {
+- device_tag->ulACPIDeviceEnum =
+- cpu_to_le32((uint32_t) le16_to_cpu(ext_display_path->usDeviceACPIEnum));
+- device_tag->usDeviceID = cpu_to_le16((USHORT) dev_id);
+- return true;
+- }
+-
+- dev_id = enum_next_dev_id(dev_id);
+- }
+-
+- /* No compatible device ID found. */
+- return false;
+-}
+-
+-/*
+- * Adds a device tag to a BIOS object's device tag record if there is
+- * matching device ID supported.
+- *
+- * pObject - Pointer to the BIOS object to add the device tag to.
+- * pExtDisplayPath - Display path to retrieve base device ID from.
+- * pDeviceSupport - Pointer to bit vector for supported device IDs.
+- */
+-static void add_device_tag_from_ext_display_path(
+- struct bios_parser *bp,
+- ATOM_OBJECT *object,
+- EXT_DISPLAY_PATH *ext_display_path,
+- uint32_t *device_support)
+-{
+- /* Get device tag record for object. */
+- ATOM_CONNECTOR_DEVICE_TAG *device_tag = NULL;
+- ATOM_CONNECTOR_DEVICE_TAG_RECORD *device_tag_record = NULL;
+- enum bp_result result =
+- bios_parser_get_device_tag_record(
+- bp, object, &device_tag_record);
+-
+- if ((le16_to_cpu(ext_display_path->usDeviceTag) != CONNECTOR_OBJECT_ID_NONE)
+- && (result == BP_RESULT_OK)) {
+- uint8_t index;
+-
+- if ((device_tag_record->ucNumberOfDevice == 1) &&
+- (le16_to_cpu(device_tag_record->asDeviceTag[0].usDeviceID) == 0)) {
+- /*Workaround bug in current VBIOS releases where
+- * ucNumberOfDevice = 1 but there is no actual device
+- * tag data. This w/a is temporary until the updated
+- * VBIOS is distributed. */
+- device_tag_record->ucNumberOfDevice =
+- device_tag_record->ucNumberOfDevice - 1;
+- }
+-
+- /* Attempt to find a matching device ID. */
+- index = device_tag_record->ucNumberOfDevice;
+- device_tag = &device_tag_record->asDeviceTag[index];
+- if (get_patched_device_tag(
+- bp,
+- ext_display_path,
+- *device_support,
+- device_tag)) {
+- /* Update cached device support to remove assigned ID.
+- */
+- *device_support &= ~le16_to_cpu(device_tag->usDeviceID);
+- device_tag_record->ucNumberOfDevice++;
+- }
+- }
+-}
+-
+-/*
+- * Read out a single EXT_DISPLAY_PATH from the external display connection info
+- * table. The specific entry in the table is determined by the enum_id passed
+- * in.
+- *
+- * EXT_DISPLAY_PATH describing a single Configuration table entry
+- */
+-
+-#define INVALID_CONNECTOR 0xffff
+-
+-static EXT_DISPLAY_PATH *get_ext_display_path_entry(
+- ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO *config_table,
+- uint32_t bios_object_id)
+-{
+- EXT_DISPLAY_PATH *ext_display_path;
+- uint32_t ext_display_path_index =
+- ((bios_object_id & ENUM_ID_MASK) >> ENUM_ID_SHIFT) - 1;
+-
+- if (ext_display_path_index >= MAX_NUMBER_OF_EXT_DISPLAY_PATH)
+- return NULL;
+-
+- ext_display_path = &config_table->sPath[ext_display_path_index];
+-
+- if (le16_to_cpu(ext_display_path->usDeviceConnector) == INVALID_CONNECTOR)
+- ext_display_path->usDeviceConnector = cpu_to_le16(0);
+-
+- return ext_display_path;
+-}
+-
+-/*
+- * Get AUX/DDC information of input object id
+- *
+- * search all records to find the ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE record
+- * IR
+- */
+-static ATOM_CONNECTOR_AUXDDC_LUT_RECORD *get_ext_connector_aux_ddc_lut_record(
+- struct bios_parser *bp,
+- ATOM_OBJECT *object)
+-{
+- uint32_t offset;
+- ATOM_COMMON_RECORD_HEADER *header;
+-
+- if (!object) {
+- BREAK_TO_DEBUGGER();
+- /* Invalid object */
+- return NULL;
+- }
+-
+- offset = le16_to_cpu(object->usRecordOffset)
+- + bp->object_info_tbl_offset;
+-
+- for (;;) {
+- header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+-
+- if (!header)
+- return NULL;
+-
+- if (LAST_RECORD_TYPE == header->ucRecordType ||
+- 0 == header->ucRecordSize)
+- break;
+-
+- if (ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE ==
+- header->ucRecordType &&
+- sizeof(ATOM_CONNECTOR_AUXDDC_LUT_RECORD) <=
+- header->ucRecordSize)
+- return (ATOM_CONNECTOR_AUXDDC_LUT_RECORD *)(header);
+-
+- offset += header->ucRecordSize;
+- }
+-
+- return NULL;
+-}
+-
+-/*
+- * Get AUX/DDC information of input object id
+- *
+- * search all records to find the ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE record
+- * IR
+- */
+-static ATOM_CONNECTOR_HPDPIN_LUT_RECORD *get_ext_connector_hpd_pin_lut_record(
+- struct bios_parser *bp,
+- ATOM_OBJECT *object)
+-{
+- uint32_t offset;
+- ATOM_COMMON_RECORD_HEADER *header;
+-
+- if (!object) {
+- BREAK_TO_DEBUGGER();
+- /* Invalid object */
+- return NULL;
+- }
+-
+- offset = le16_to_cpu(object->usRecordOffset)
+- + bp->object_info_tbl_offset;
+-
+- for (;;) {
+- header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+-
+- if (!header)
+- return NULL;
+-
+- if (LAST_RECORD_TYPE == header->ucRecordType ||
+- 0 == header->ucRecordSize)
+- break;
+-
+- if (ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE ==
+- header->ucRecordType &&
+- sizeof(ATOM_CONNECTOR_HPDPIN_LUT_RECORD) <=
+- header->ucRecordSize)
+- return (ATOM_CONNECTOR_HPDPIN_LUT_RECORD *)header;
+-
+- offset += header->ucRecordSize;
+- }
+-
+- return NULL;
+-}
+-
+-/*
+- * Check whether we need to patch the VBIOS connector info table with
+- * data from an external display connection info table. This is
+- * necessary to support MXM boards with an OPM (output personality
+- * module). With these designs, the VBIOS connector info table
+- * specifies an MXM_CONNECTOR with a unique ID. The driver retrieves
+- * the external connection info table through i2c and then looks up the
+- * connector ID to find the real connector type (e.g. DFP1).
+- *
+- */
+-static enum bp_result patch_bios_image_from_ext_display_connection_info(
+- struct bios_parser *bp)
+-{
+- ATOM_OBJECT_TABLE *connector_tbl;
+- uint32_t connector_tbl_offset;
+- struct graphics_object_id object_id;
+- ATOM_OBJECT *object;
+- ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO ext_display_connection_info_tbl;
+- EXT_DISPLAY_PATH *ext_display_path;
+- ATOM_CONNECTOR_AUXDDC_LUT_RECORD *aux_ddc_lut_record = NULL;
+- ATOM_I2C_RECORD *i2c_record = NULL;
+- ATOM_CONNECTOR_HPDPIN_LUT_RECORD *hpd_pin_lut_record = NULL;
+- ATOM_HPD_INT_RECORD *hpd_record = NULL;
+- ATOM_OBJECT_TABLE *encoder_table;
+- uint32_t encoder_table_offset;
+- ATOM_OBJECT *opm_object = NULL;
+- uint32_t i = 0;
+- struct graphics_object_id opm_object_id =
+- dal_graphics_object_id_init(
+- GENERIC_ID_MXM_OPM,
+- ENUM_ID_1,
+- OBJECT_TYPE_GENERIC);
+- ATOM_CONNECTOR_DEVICE_TAG_RECORD *dev_tag_record;
+- uint32_t cached_device_support =
+- le16_to_cpu(bp->object_info_tbl.v1_1->usDeviceSupport);
+-
+- uint32_t dst_number;
+- uint16_t *dst_object_id_list;
+-
+- opm_object = get_bios_object(bp, opm_object_id);
+- if (!opm_object)
+- return BP_RESULT_UNSUPPORTED;
+-
+- memset(&ext_display_connection_info_tbl, 0,
+- sizeof(ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO));
+-
+- connector_tbl_offset = bp->object_info_tbl_offset
+- + le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+- connector_tbl = GET_IMAGE(ATOM_OBJECT_TABLE, connector_tbl_offset);
+-
+- /* Read Connector info table from EEPROM through i2c */
+- if (get_ext_display_connection_info(bp,
+- opm_object,
+- &ext_display_connection_info_tbl) != BP_RESULT_OK) {
+-
+- DC_LOG_WARNING("%s: Failed to read Connection Info Table", __func__);
+- return BP_RESULT_UNSUPPORTED;
+- }
+-
+- /* Get pointer to AUX/DDC and HPD LUTs */
+- aux_ddc_lut_record =
+- get_ext_connector_aux_ddc_lut_record(bp, opm_object);
+- hpd_pin_lut_record =
+- get_ext_connector_hpd_pin_lut_record(bp, opm_object);
+-
+- if ((aux_ddc_lut_record == NULL) || (hpd_pin_lut_record == NULL))
+- return BP_RESULT_UNSUPPORTED;
+-
+- /* Cache support bits for currently unmapped device types. */
+- if (bp->remap_device_tags) {
+- for (i = 0; i < connector_tbl->ucNumberOfObjects; ++i) {
+- uint32_t j;
+- /* Remove support for all non-MXM connectors. */
+- object = &connector_tbl->asObjects[i];
+- object_id = object_id_from_bios_object_id(
+- le16_to_cpu(object->usObjectID));
+- if ((OBJECT_TYPE_CONNECTOR != object_id.type) ||
+- (CONNECTOR_ID_MXM == object_id.id))
+- continue;
+-
+- /* Remove support for all device tags. */
+- if (bios_parser_get_device_tag_record(
+- bp, object, &dev_tag_record) != BP_RESULT_OK)
+- continue;
+-
+- for (j = 0; j < dev_tag_record->ucNumberOfDevice; ++j) {
+- ATOM_CONNECTOR_DEVICE_TAG *device_tag =
+- &dev_tag_record->asDeviceTag[j];
+- cached_device_support &=
+- ~le16_to_cpu(device_tag->usDeviceID);
+- }
+- }
+- }
+-
+- /* Find all MXM connector objects and patch them with connector info
+- * from the external display connection info table. */
+- for (i = 0; i < connector_tbl->ucNumberOfObjects; i++) {
+- uint32_t j;
+-
+- object = &connector_tbl->asObjects[i];
+- object_id = object_id_from_bios_object_id(le16_to_cpu(object->usObjectID));
+- if ((OBJECT_TYPE_CONNECTOR != object_id.type) ||
+- (CONNECTOR_ID_MXM != object_id.id))
+- continue;
+-
+- /* Get the correct connection info table entry based on the enum
+- * id. */
+- ext_display_path = get_ext_display_path_entry(
+- &ext_display_connection_info_tbl,
+- le16_to_cpu(object->usObjectID));
+- if (!ext_display_path)
+- return BP_RESULT_FAILURE;
+-
+- /* Patch device connector ID */
+- object->usObjectID =
+- cpu_to_le16(le16_to_cpu(ext_display_path->usDeviceConnector));
+-
+- /* Patch device tag, ulACPIDeviceEnum. */
+- add_device_tag_from_ext_display_path(
+- bp,
+- object,
+- ext_display_path,
+- &cached_device_support);
+-
+- /* Patch HPD info */
+- if (ext_display_path->ucExtHPDPINLutIndex <
+- MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES) {
+- hpd_record = get_hpd_record(bp, object);
+- if (hpd_record) {
+- uint8_t index =
+- ext_display_path->ucExtHPDPINLutIndex;
+- hpd_record->ucHPDIntGPIOID =
+- hpd_pin_lut_record->ucHPDPINMap[index];
+- } else {
+- BREAK_TO_DEBUGGER();
+- /* Invalid hpd record */
+- return BP_RESULT_FAILURE;
+- }
+- }
+-
+- /* Patch I2C/AUX info */
+- if (ext_display_path->ucExtHPDPINLutIndex <
+- MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES) {
+- i2c_record = get_i2c_record(bp, object);
+- if (i2c_record) {
+- uint8_t index =
+- ext_display_path->ucExtAUXDDCLutIndex;
+- i2c_record->sucI2cId =
+- aux_ddc_lut_record->ucAUXDDCMap[index];
+- } else {
+- BREAK_TO_DEBUGGER();
+- /* Invalid I2C record */
+- return BP_RESULT_FAILURE;
+- }
+- }
+-
+- /* Merge with other MXM connectors that map to the same physical
+- * connector. */
+- for (j = i + 1;
+- j < connector_tbl->ucNumberOfObjects; j++) {
+- ATOM_OBJECT *next_object;
+- struct graphics_object_id next_object_id;
+- EXT_DISPLAY_PATH *next_ext_display_path;
+-
+- next_object = &connector_tbl->asObjects[j];
+- next_object_id = object_id_from_bios_object_id(
+- le16_to_cpu(next_object->usObjectID));
+-
+- if ((OBJECT_TYPE_CONNECTOR != next_object_id.type) &&
+- (CONNECTOR_ID_MXM == next_object_id.id))
+- continue;
+-
+- next_ext_display_path = get_ext_display_path_entry(
+- &ext_display_connection_info_tbl,
+- le16_to_cpu(next_object->usObjectID));
+-
+- if (next_ext_display_path == NULL)
+- return BP_RESULT_FAILURE;
+-
+- /* Merge if using same connector. */
+- if ((le16_to_cpu(next_ext_display_path->usDeviceConnector) ==
+- le16_to_cpu(ext_display_path->usDeviceConnector)) &&
+- (le16_to_cpu(ext_display_path->usDeviceConnector) != 0)) {
+- /* Clear duplicate connector from table. */
+- next_object->usObjectID = cpu_to_le16(0);
+- add_device_tag_from_ext_display_path(
+- bp,
+- object,
+- ext_display_path,
+- &cached_device_support);
+- }
+- }
+- }
+-
+- /* Find all encoders which have an MXM object as their destination.
+- * Replace the MXM object with the real connector Id from the external
+- * display connection info table */
+-
+- encoder_table_offset = bp->object_info_tbl_offset
+- + le16_to_cpu(bp->object_info_tbl.v1_1->usEncoderObjectTableOffset);
+- encoder_table = GET_IMAGE(ATOM_OBJECT_TABLE, encoder_table_offset);
+-
+- for (i = 0; i < encoder_table->ucNumberOfObjects; i++) {
+- uint32_t j;
+-
+- object = &encoder_table->asObjects[i];
+-
+- dst_number = get_dest_obj_list(bp, object, &dst_object_id_list);
+-
+- for (j = 0; j < dst_number; j++) {
+- object_id = object_id_from_bios_object_id(
+- dst_object_id_list[j]);
+-
+- if ((OBJECT_TYPE_CONNECTOR != object_id.type) ||
+- (CONNECTOR_ID_MXM != object_id.id))
+- continue;
+-
+- /* Get the correct connection info table entry based on
+- * the enum id. */
+- ext_display_path =
+- get_ext_display_path_entry(
+- &ext_display_connection_info_tbl,
+- dst_object_id_list[j]);
+-
+- if (ext_display_path == NULL)
+- return BP_RESULT_FAILURE;
+-
+- dst_object_id_list[j] =
+- le16_to_cpu(ext_display_path->usDeviceConnector);
+- }
+- }
+-
+- return BP_RESULT_OK;
+-}
+-
+-/*
+- * Check whether we need to patch the VBIOS connector info table with
+- * data from an external display connection info table. This is
+- * necessary to support MXM boards with an OPM (output personality
+- * module). With these designs, the VBIOS connector info table
+- * specifies an MXM_CONNECTOR with a unique ID. The driver retrieves
+- * the external connection info table through i2c and then looks up the
+- * connector ID to find the real connector type (e.g. DFP1).
+- *
+- */
+-
+-static void process_ext_display_connection_info(struct bios_parser *bp)
+-{
+- ATOM_OBJECT_TABLE *connector_tbl;
+- uint32_t connector_tbl_offset;
+- struct graphics_object_id object_id;
+- ATOM_OBJECT *object;
+- bool mxm_connector_found = false;
+- bool null_entry_found = false;
+- uint32_t i = 0;
+-
+- connector_tbl_offset = bp->object_info_tbl_offset +
+- le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+- connector_tbl = GET_IMAGE(ATOM_OBJECT_TABLE, connector_tbl_offset);
+-
+- /* Look for MXM connectors to determine whether we need patch the VBIOS
+- * connector info table. Look for null entries to determine whether we
+- * need to compact connector table. */
+- for (i = 0; i < connector_tbl->ucNumberOfObjects; i++) {
+- object = &connector_tbl->asObjects[i];
+- object_id = object_id_from_bios_object_id(le16_to_cpu(object->usObjectID));
+-
+- if ((OBJECT_TYPE_CONNECTOR == object_id.type) &&
+- (CONNECTOR_ID_MXM == object_id.id)) {
+- /* Once we found MXM connector - we can break */
+- mxm_connector_found = true;
+- break;
+- } else if (OBJECT_TYPE_CONNECTOR != object_id.type) {
+- /* We need to continue looping - to check if MXM
+- * connector present */
+- null_entry_found = true;
+- }
+- }
+-
+- /* Patch BIOS image */
+- if (mxm_connector_found || null_entry_found) {
+- uint32_t connectors_num = 0;
+- uint8_t *original_bios;
+- /* Step 1: Replace bios image with the new copy which will be
+- * patched */
+- bp->base.bios_local_image = kzalloc(bp->base.bios_size,
+- GFP_KERNEL);
+- if (bp->base.bios_local_image == NULL) {
+- BREAK_TO_DEBUGGER();
+- /* Failed to alloc bp->base.bios_local_image */
+- return;
+- }
+-
+- memmove(bp->base.bios_local_image, bp->base.bios, bp->base.bios_size);
+- original_bios = bp->base.bios;
+- bp->base.bios = bp->base.bios_local_image;
+- connector_tbl =
+- GET_IMAGE(ATOM_OBJECT_TABLE, connector_tbl_offset);
+-
+- /* Step 2: (only if MXM connector found) Patch BIOS image with
+- * info from external module */
+- if (mxm_connector_found &&
+- patch_bios_image_from_ext_display_connection_info(bp) !=
+- BP_RESULT_OK) {
+- /* Patching the bios image has failed. We will copy
+- * again original image provided and afterwards
+- * only remove null entries */
+- memmove(
+- bp->base.bios_local_image,
+- original_bios,
+- bp->base.bios_size);
+- }
+-
+- /* Step 3: Compact connector table (remove null entries, valid
+- * entries moved to beginning) */
+- for (i = 0; i < connector_tbl->ucNumberOfObjects; i++) {
+- object = &connector_tbl->asObjects[i];
+- object_id = object_id_from_bios_object_id(
+- le16_to_cpu(object->usObjectID));
+-
+- if (OBJECT_TYPE_CONNECTOR != object_id.type)
+- continue;
+-
+- if (i != connectors_num) {
+- memmove(
+- &connector_tbl->
+- asObjects[connectors_num],
+- object,
+- sizeof(ATOM_OBJECT));
+- }
+- ++connectors_num;
+- }
+- connector_tbl->ucNumberOfObjects = (uint8_t)connectors_num;
+- }
+-}
+-
+-static void bios_parser_post_init(struct dc_bios *dcb)
+-{
+- struct bios_parser *bp = BP_FROM_DCB(dcb);
+-
+- process_ext_display_connection_info(bp);
+-}
+-
+-/**
+ * bios_parser_set_scratch_critical_state
+ *
+ * @brief
+@@ -3959,22 +2796,12 @@ static enum bp_result bios_get_board_layout_info(
+ static const struct dc_vbios_funcs vbios_funcs = {
+ .get_connectors_number = bios_parser_get_connectors_number,
+
+- .get_encoder_id = bios_parser_get_encoder_id,
+-
+ .get_connector_id = bios_parser_get_connector_id,
+
+- .get_dst_number = bios_parser_get_dst_number,
+-
+ .get_src_obj = bios_parser_get_src_obj,
+
+- .get_dst_obj = bios_parser_get_dst_obj,
+-
+ .get_i2c_info = bios_parser_get_i2c_info,
+
+- .get_voltage_ddc_info = bios_parser_get_voltage_ddc_info,
+-
+- .get_thermal_ddc_info = bios_parser_get_thermal_ddc_info,
+-
+ .get_hpd_info = bios_parser_get_hpd_info,
+
+ .get_device_tag = bios_parser_get_device_tag,
+@@ -3993,7 +2820,6 @@ static const struct dc_vbios_funcs vbios_funcs = {
+
+ /* bios scratch register communication */
+ .is_accelerated_mode = bios_is_accelerated_mode,
+- .get_vga_enabled_displays = bios_get_vga_enabled_displays,
+
+ .set_scratch_critical_state = bios_parser_set_scratch_critical_state,
+
+@@ -4004,8 +2830,6 @@ static const struct dc_vbios_funcs vbios_funcs = {
+
+ .transmitter_control = bios_parser_transmitter_control,
+
+- .crt_control = bios_parser_crt_control, /* not used in DAL3. keep for now in case we need to support VGA on Bonaire */
+-
+ .enable_crtc = bios_parser_enable_crtc,
+
+ .adjust_pixel_clock = bios_parser_adjust_pixel_clock,
+@@ -4025,7 +2849,6 @@ static const struct dc_vbios_funcs vbios_funcs = {
+ .enable_disp_power_gating = bios_parser_enable_disp_power_gating,
+
+ /* SW init and patch */
+- .post_init = bios_parser_post_init, /* patch vbios table for mxm module by reading i2c */
+
+ .bios_parser_destroy = bios_parser_destroy,
+
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+index eab007e..ff764da 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+@@ -166,21 +166,6 @@ static uint8_t bios_parser_get_connectors_number(struct dc_bios *dcb)
+ return count;
+ }
+
+-static struct graphics_object_id bios_parser_get_encoder_id(
+- struct dc_bios *dcb,
+- uint32_t i)
+-{
+- struct bios_parser *bp = BP_FROM_DCB(dcb);
+- struct graphics_object_id object_id = dal_graphics_object_id_init(
+- 0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
+-
+- if (bp->object_info_tbl.v1_4->number_of_path > i)
+- object_id = object_id_from_bios_object_id(
+- bp->object_info_tbl.v1_4->display_path[i].encoderobjid);
+-
+- return object_id;
+-}
+-
+ static struct graphics_object_id bios_parser_get_connector_id(
+ struct dc_bios *dcb,
+ uint8_t i)
+@@ -204,26 +189,6 @@ static struct graphics_object_id bios_parser_get_connector_id(
+ return object_id;
+ }
+
+-
+-/* TODO: GetNumberOfSrc*/
+-
+-static uint32_t bios_parser_get_dst_number(struct dc_bios *dcb,
+- struct graphics_object_id id)
+-{
+- /* connector has 1 Dest, encoder has 0 Dest */
+- switch (id.type) {
+- case OBJECT_TYPE_ENCODER:
+- return 0;
+- case OBJECT_TYPE_CONNECTOR:
+- return 1;
+- default:
+- return 0;
+- }
+-}
+-
+-/* removed getSrcObjList, getDestObjList*/
+-
+-
+ static enum bp_result bios_parser_get_src_obj(struct dc_bios *dcb,
+ struct graphics_object_id object_id, uint32_t index,
+ struct graphics_object_id *src_object_id)
+@@ -283,52 +248,10 @@ static enum bp_result bios_parser_get_src_obj(struct dc_bios *dcb,
+ return bp_result;
+ }
+
+-static enum bp_result bios_parser_get_dst_obj(struct dc_bios *dcb,
+- struct graphics_object_id object_id, uint32_t index,
+- struct graphics_object_id *dest_object_id)
+-{
+- struct bios_parser *bp = BP_FROM_DCB(dcb);
+- unsigned int i;
+- enum bp_result bp_result = BP_RESULT_BADINPUT;
+- struct graphics_object_id obj_id = {0};
+- struct object_info_table *tbl = &bp->object_info_tbl;
+-
+- if (!dest_object_id)
+- return BP_RESULT_BADINPUT;
+-
+- switch (object_id.type) {
+- case OBJECT_TYPE_ENCODER:
+- /* TODO: since num of src must be less than 2.
+- * If found in for loop, should break.
+- * DAL2 implementation may be changed too
+- */
+- for (i = 0; i < tbl->v1_4->number_of_path; i++) {
+- obj_id = object_id_from_bios_object_id(
+- tbl->v1_4->display_path[i].encoderobjid);
+- if (object_id.type == obj_id.type &&
+- object_id.id == obj_id.id &&
+- object_id.enum_id ==
+- obj_id.enum_id) {
+- *dest_object_id =
+- object_id_from_bios_object_id(
+- tbl->v1_4->display_path[i].display_objid);
+- /* break; */
+- }
+- }
+- bp_result = BP_RESULT_OK;
+- break;
+- default:
+- break;
+- }
+-
+- return bp_result;
+-}
+-
+-
+ /* from graphics_object_id, find display path which includes the object_id */
+ static struct atom_display_object_path_v2 *get_bios_object(
+- struct bios_parser *bp,
+- struct graphics_object_id id)
++ struct bios_parser *bp,
++ struct graphics_object_id id)
+ {
+ unsigned int i;
+ struct graphics_object_id obj_id = {0};
+@@ -337,27 +260,22 @@ static struct atom_display_object_path_v2 *get_bios_object(
+ case OBJECT_TYPE_ENCODER:
+ for (i = 0; i < bp->object_info_tbl.v1_4->number_of_path; i++) {
+ obj_id = object_id_from_bios_object_id(
+- bp->object_info_tbl.v1_4->display_path[i].encoderobjid);
+- if (id.type == obj_id.type &&
+- id.id == obj_id.id &&
+- id.enum_id == obj_id.enum_id)
+- return
+- &bp->object_info_tbl.v1_4->display_path[i];
++ bp->object_info_tbl.v1_4->display_path[i].encoderobjid);
++ if (id.type == obj_id.type && id.id == obj_id.id
++ && id.enum_id == obj_id.enum_id)
++ return &bp->object_info_tbl.v1_4->display_path[i];
+ }
+ case OBJECT_TYPE_CONNECTOR:
+ case OBJECT_TYPE_GENERIC:
+ /* Both Generic and Connector Object ID
+ * will be stored on display_objid
+- */
++ */
+ for (i = 0; i < bp->object_info_tbl.v1_4->number_of_path; i++) {
+ obj_id = object_id_from_bios_object_id(
+- bp->object_info_tbl.v1_4->display_path[i].display_objid
+- );
+- if (id.type == obj_id.type &&
+- id.id == obj_id.id &&
+- id.enum_id == obj_id.enum_id)
+- return
+- &bp->object_info_tbl.v1_4->display_path[i];
++ bp->object_info_tbl.v1_4->display_path[i].display_objid);
++ if (id.type == obj_id.type && id.id == obj_id.id
++ && id.enum_id == obj_id.enum_id)
++ return &bp->object_info_tbl.v1_4->display_path[i];
+ }
+ default:
+ return NULL;
+@@ -489,99 +407,6 @@ static enum bp_result get_gpio_i2c_info(
+ return BP_RESULT_OK;
+ }
+
+-static enum bp_result get_voltage_ddc_info_v4(
+- uint8_t *i2c_line,
+- uint32_t index,
+- struct atom_common_table_header *header,
+- uint8_t *address)
+-{
+- enum bp_result result = BP_RESULT_NORECORD;
+- struct atom_voltage_objects_info_v4_1 *info =
+- (struct atom_voltage_objects_info_v4_1 *) address;
+-
+- uint8_t *voltage_current_object =
+- (uint8_t *) (&(info->voltage_object[0]));
+-
+- while ((address + le16_to_cpu(header->structuresize)) >
+- voltage_current_object) {
+- struct atom_i2c_voltage_object_v4 *object =
+- (struct atom_i2c_voltage_object_v4 *)
+- voltage_current_object;
+-
+- if (object->header.voltage_mode ==
+- ATOM_INIT_VOLTAGE_REGULATOR) {
+- if (object->header.voltage_type == index) {
+- *i2c_line = object->i2c_id ^ 0x90;
+- result = BP_RESULT_OK;
+- break;
+- }
+- }
+-
+- voltage_current_object +=
+- le16_to_cpu(object->header.object_size);
+- }
+- return result;
+-}
+-
+-static enum bp_result bios_parser_get_thermal_ddc_info(
+- struct dc_bios *dcb,
+- uint32_t i2c_channel_id,
+- struct graphics_object_i2c_info *info)
+-{
+- struct bios_parser *bp = BP_FROM_DCB(dcb);
+- struct i2c_id_config_access *config;
+- struct atom_i2c_record record;
+-
+- if (!info)
+- return BP_RESULT_BADINPUT;
+-
+- config = (struct i2c_id_config_access *) &i2c_channel_id;
+-
+- record.i2c_id = config->bfHW_Capable;
+- record.i2c_id |= config->bfI2C_LineMux;
+- record.i2c_id |= config->bfHW_EngineID;
+-
+- return get_gpio_i2c_info(bp, &record, info);
+-}
+-
+-static enum bp_result bios_parser_get_voltage_ddc_info(struct dc_bios *dcb,
+- uint32_t index,
+- struct graphics_object_i2c_info *info)
+-{
+- uint8_t i2c_line = 0;
+- enum bp_result result = BP_RESULT_NORECORD;
+- uint8_t *voltage_info_address;
+- struct atom_common_table_header *header;
+- struct atom_data_revision revision = {0};
+- struct bios_parser *bp = BP_FROM_DCB(dcb);
+-
+- if (!DATA_TABLES(voltageobject_info))
+- return result;
+-
+- voltage_info_address = bios_get_image(&bp->base,
+- DATA_TABLES(voltageobject_info),
+- sizeof(struct atom_common_table_header));
+-
+- header = (struct atom_common_table_header *) voltage_info_address;
+-
+- get_atom_data_table_revision(header, &revision);
+-
+- switch (revision.major) {
+- case 4:
+- if (revision.minor != 1)
+- break;
+- result = get_voltage_ddc_info_v4(&i2c_line, index, header,
+- voltage_info_address);
+- break;
+- }
+-
+- if (result == BP_RESULT_OK)
+- result = bios_parser_get_thermal_ddc_info(dcb,
+- i2c_line, info);
+-
+- return result;
+-}
+-
+ static enum bp_result bios_parser_get_hpd_info(
+ struct dc_bios *dcb,
+ struct graphics_object_id id,
+@@ -997,8 +822,8 @@ static enum bp_result bios_parser_get_spread_spectrum_info(
+ }
+
+ static enum bp_result get_embedded_panel_info_v2_1(
+- struct bios_parser *bp,
+- struct embedded_panel_info *info)
++ struct bios_parser *bp,
++ struct embedded_panel_info *info)
+ {
+ struct lcd_info_v2_1 *lvds;
+
+@@ -1021,92 +846,78 @@ static enum bp_result get_embedded_panel_info_v2_1(
+ memset(info, 0, sizeof(struct embedded_panel_info));
+
+ /* We need to convert from 10KHz units into KHz units */
+- info->lcd_timing.pixel_clk =
+- le16_to_cpu(lvds->lcd_timing.pixclk) * 10;
++ info->lcd_timing.pixel_clk = le16_to_cpu(lvds->lcd_timing.pixclk) * 10;
+ /* usHActive does not include borders, according to VBIOS team */
+- info->lcd_timing.horizontal_addressable =
+- le16_to_cpu(lvds->lcd_timing.h_active);
++ info->lcd_timing.horizontal_addressable = le16_to_cpu(lvds->lcd_timing.h_active);
+ /* usHBlanking_Time includes borders, so we should really be
+ * subtractingborders duing this translation, but LVDS generally
+ * doesn't have borders, so we should be okay leaving this as is for
+ * now. May need to revisit if we ever have LVDS with borders
+ */
+- info->lcd_timing.horizontal_blanking_time =
+- le16_to_cpu(lvds->lcd_timing.h_blanking_time);
++ info->lcd_timing.horizontal_blanking_time = le16_to_cpu(lvds->lcd_timing.h_blanking_time);
+ /* usVActive does not include borders, according to VBIOS team*/
+- info->lcd_timing.vertical_addressable =
+- le16_to_cpu(lvds->lcd_timing.v_active);
++ info->lcd_timing.vertical_addressable = le16_to_cpu(lvds->lcd_timing.v_active);
+ /* usVBlanking_Time includes borders, so we should really be
+ * subtracting borders duing this translation, but LVDS generally
+ * doesn't have borders, so we should be okay leaving this as is for
+ * now. May need to revisit if we ever have LVDS with borders
+ */
+- info->lcd_timing.vertical_blanking_time =
+- le16_to_cpu(lvds->lcd_timing.v_blanking_time);
+- info->lcd_timing.horizontal_sync_offset =
+- le16_to_cpu(lvds->lcd_timing.h_sync_offset);
+- info->lcd_timing.horizontal_sync_width =
+- le16_to_cpu(lvds->lcd_timing.h_sync_width);
+- info->lcd_timing.vertical_sync_offset =
+- le16_to_cpu(lvds->lcd_timing.v_sync_offset);
+- info->lcd_timing.vertical_sync_width =
+- le16_to_cpu(lvds->lcd_timing.v_syncwidth);
++ info->lcd_timing.vertical_blanking_time = le16_to_cpu(lvds->lcd_timing.v_blanking_time);
++ info->lcd_timing.horizontal_sync_offset = le16_to_cpu(lvds->lcd_timing.h_sync_offset);
++ info->lcd_timing.horizontal_sync_width = le16_to_cpu(lvds->lcd_timing.h_sync_width);
++ info->lcd_timing.vertical_sync_offset = le16_to_cpu(lvds->lcd_timing.v_sync_offset);
++ info->lcd_timing.vertical_sync_width = le16_to_cpu(lvds->lcd_timing.v_syncwidth);
+ info->lcd_timing.horizontal_border = lvds->lcd_timing.h_border;
+ info->lcd_timing.vertical_border = lvds->lcd_timing.v_border;
+
+ /* not provided by VBIOS */
+ info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF = 0;
+
+- info->lcd_timing.misc_info.H_SYNC_POLARITY =
+- ~(uint32_t)
+- (lvds->lcd_timing.miscinfo & ATOM_HSYNC_POLARITY);
+- info->lcd_timing.misc_info.V_SYNC_POLARITY =
+- ~(uint32_t)
+- (lvds->lcd_timing.miscinfo & ATOM_VSYNC_POLARITY);
++ info->lcd_timing.misc_info.H_SYNC_POLARITY = ~(uint32_t) (lvds->lcd_timing.miscinfo
++ & ATOM_HSYNC_POLARITY);
++ info->lcd_timing.misc_info.V_SYNC_POLARITY = ~(uint32_t) (lvds->lcd_timing.miscinfo
++ & ATOM_VSYNC_POLARITY);
+
+ /* not provided by VBIOS */
+ info->lcd_timing.misc_info.VERTICAL_CUT_OFF = 0;
+
+- info->lcd_timing.misc_info.H_REPLICATION_BY2 =
+- !!(lvds->lcd_timing.miscinfo & ATOM_H_REPLICATIONBY2);
+- info->lcd_timing.misc_info.V_REPLICATION_BY2 =
+- !!(lvds->lcd_timing.miscinfo & ATOM_V_REPLICATIONBY2);
+- info->lcd_timing.misc_info.COMPOSITE_SYNC =
+- !!(lvds->lcd_timing.miscinfo & ATOM_COMPOSITESYNC);
+- info->lcd_timing.misc_info.INTERLACE =
+- !!(lvds->lcd_timing.miscinfo & ATOM_INTERLACE);
++ info->lcd_timing.misc_info.H_REPLICATION_BY2 = !!(lvds->lcd_timing.miscinfo
++ & ATOM_H_REPLICATIONBY2);
++ info->lcd_timing.misc_info.V_REPLICATION_BY2 = !!(lvds->lcd_timing.miscinfo
++ & ATOM_V_REPLICATIONBY2);
++ info->lcd_timing.misc_info.COMPOSITE_SYNC = !!(lvds->lcd_timing.miscinfo
++ & ATOM_COMPOSITESYNC);
++ info->lcd_timing.misc_info.INTERLACE = !!(lvds->lcd_timing.miscinfo & ATOM_INTERLACE);
+
+ /* not provided by VBIOS*/
+ info->lcd_timing.misc_info.DOUBLE_CLOCK = 0;
+ /* not provided by VBIOS*/
+ info->ss_id = 0;
+
+- info->realtek_eDPToLVDS =
+- !!(lvds->dplvdsrxid == eDP_TO_LVDS_REALTEK_ID);
++ info->realtek_eDPToLVDS = !!(lvds->dplvdsrxid == eDP_TO_LVDS_REALTEK_ID);
+
+ return BP_RESULT_OK;
+ }
+
+ static enum bp_result bios_parser_get_embedded_panel_info(
+- struct dc_bios *dcb,
+- struct embedded_panel_info *info)
++ struct dc_bios *dcb,
++ struct embedded_panel_info *info)
+ {
+- struct bios_parser *bp = BP_FROM_DCB(dcb);
++ struct bios_parser
++ *bp = BP_FROM_DCB(dcb);
+ struct atom_common_table_header *header;
+ struct atom_data_revision tbl_revision;
+
+ if (!DATA_TABLES(lcd_info))
+ return BP_RESULT_FAILURE;
+
+- header = GET_IMAGE(struct atom_common_table_header,
+- DATA_TABLES(lcd_info));
++ header = GET_IMAGE(struct atom_common_table_header, DATA_TABLES(lcd_info));
+
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ get_atom_data_table_revision(header, &tbl_revision);
+
+-
+ switch (tbl_revision.major) {
+ case 2:
+ switch (tbl_revision.minor) {
+@@ -1174,12 +985,6 @@ static bool bios_parser_is_device_id_supported(
+ mask) != 0;
+ }
+
+-static void bios_parser_post_init(
+- struct dc_bios *dcb)
+-{
+- /* TODO for OPM module. Need implement later */
+-}
+-
+ static uint32_t bios_parser_get_ss_entry_number(
+ struct dc_bios *dcb,
+ enum as_signal_type signal)
+@@ -1238,17 +1043,6 @@ static enum bp_result bios_parser_set_dce_clock(
+ return bp->cmd_tbl.set_dce_clock(bp, bp_params);
+ }
+
+-static unsigned int bios_parser_get_smu_clock_info(
+- struct dc_bios *dcb)
+-{
+- struct bios_parser *bp = BP_FROM_DCB(dcb);
+-
+- if (!bp->cmd_tbl.get_smu_clock_info)
+- return BP_RESULT_FAILURE;
+-
+- return bp->cmd_tbl.get_smu_clock_info(bp, 0);
+-}
+-
+ static enum bp_result bios_parser_program_crtc_timing(
+ struct dc_bios *dcb,
+ struct bp_hw_crtc_timing_parameters *bp_params)
+@@ -1306,13 +1100,6 @@ static bool bios_parser_is_accelerated_mode(
+ return bios_is_accelerated_mode(dcb);
+ }
+
+-static uint32_t bios_parser_get_vga_enabled_displays(
+- struct dc_bios *bios)
+-{
+- return bios_get_vga_enabled_displays(bios);
+-}
+-
+-
+ /**
+ * bios_parser_set_scratch_critical_state
+ *
+@@ -2071,22 +1858,12 @@ static enum bp_result bios_get_board_layout_info(
+ static const struct dc_vbios_funcs vbios_funcs = {
+ .get_connectors_number = bios_parser_get_connectors_number,
+
+- .get_encoder_id = bios_parser_get_encoder_id,
+-
+ .get_connector_id = bios_parser_get_connector_id,
+
+- .get_dst_number = bios_parser_get_dst_number,
+-
+ .get_src_obj = bios_parser_get_src_obj,
+
+- .get_dst_obj = bios_parser_get_dst_obj,
+-
+ .get_i2c_info = bios_parser_get_i2c_info,
+
+- .get_voltage_ddc_info = bios_parser_get_voltage_ddc_info,
+-
+- .get_thermal_ddc_info = bios_parser_get_thermal_ddc_info,
+-
+ .get_hpd_info = bios_parser_get_hpd_info,
+
+ .get_device_tag = bios_parser_get_device_tag,
+@@ -2105,10 +1882,7 @@ static const struct dc_vbios_funcs vbios_funcs = {
+
+ .is_device_id_supported = bios_parser_is_device_id_supported,
+
+-
+-
+ .is_accelerated_mode = bios_parser_is_accelerated_mode,
+- .get_vga_enabled_displays = bios_parser_get_vga_enabled_displays,
+
+ .set_scratch_critical_state = bios_parser_set_scratch_critical_state,
+
+@@ -2126,20 +1900,12 @@ static const struct dc_vbios_funcs vbios_funcs = {
+
+ .program_crtc_timing = bios_parser_program_crtc_timing,
+
+- /* .blank_crtc = bios_parser_blank_crtc, */
+-
+ .crtc_source_select = bios_parser_crtc_source_select,
+
+- /* .external_encoder_control = bios_parser_external_encoder_control, */
+-
+ .enable_disp_power_gating = bios_parser_enable_disp_power_gating,
+
+- .post_init = bios_parser_post_init,
+-
+ .bios_parser_destroy = firmware_parser_destroy,
+
+- .get_smu_clock_info = bios_parser_get_smu_clock_info,
+-
+ .get_board_layout_info = bios_get_board_layout_info,
+ };
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+index 90082ba..8130b95 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+@@ -41,38 +41,17 @@
+ struct dc_vbios_funcs {
+ uint8_t (*get_connectors_number)(struct dc_bios *bios);
+
+- struct graphics_object_id (*get_encoder_id)(
+- struct dc_bios *bios,
+- uint32_t i);
+ struct graphics_object_id (*get_connector_id)(
+ struct dc_bios *bios,
+ uint8_t connector_index);
+- uint32_t (*get_dst_number)(
+- struct dc_bios *bios,
+- struct graphics_object_id id);
+-
+ enum bp_result (*get_src_obj)(
+ struct dc_bios *bios,
+ struct graphics_object_id object_id, uint32_t index,
+ struct graphics_object_id *src_object_id);
+- enum bp_result (*get_dst_obj)(
+- struct dc_bios *bios,
+- struct graphics_object_id object_id, uint32_t index,
+- struct graphics_object_id *dest_object_id);
+-
+ enum bp_result (*get_i2c_info)(
+ struct dc_bios *dcb,
+ struct graphics_object_id id,
+ struct graphics_object_i2c_info *info);
+-
+- enum bp_result (*get_voltage_ddc_info)(
+- struct dc_bios *bios,
+- uint32_t index,
+- struct graphics_object_i2c_info *info);
+- enum bp_result (*get_thermal_ddc_info)(
+- struct dc_bios *bios,
+- uint32_t i2c_channel_id,
+- struct graphics_object_i2c_info *info);
+ enum bp_result (*get_hpd_info)(
+ struct dc_bios *bios,
+ struct graphics_object_id id,
+@@ -105,35 +84,8 @@ struct dc_vbios_funcs {
+ struct graphics_object_id object_id,
+ struct bp_encoder_cap_info *info);
+
+- bool (*is_lid_status_changed)(
+- struct dc_bios *bios);
+- bool (*is_display_config_changed)(
+- struct dc_bios *bios);
+ bool (*is_accelerated_mode)(
+ struct dc_bios *bios);
+- uint32_t (*get_vga_enabled_displays)(
+- struct dc_bios *bios);
+- void (*get_bios_event_info)(
+- struct dc_bios *bios,
+- struct bios_event_info *info);
+- void (*update_requested_backlight_level)(
+- struct dc_bios *bios,
+- uint32_t backlight_8bit);
+- uint32_t (*get_requested_backlight_level)(
+- struct dc_bios *bios);
+- void (*take_backlight_control)(
+- struct dc_bios *bios,
+- bool cntl);
+-
+- bool (*is_active_display)(
+- struct dc_bios *bios,
+- enum signal_type signal,
+- const struct connector_device_tag_info *device_tag);
+- enum controller_id (*get_embedded_display_controller_id)(
+- struct dc_bios *bios);
+- uint32_t (*get_embedded_display_refresh_rate)(
+- struct dc_bios *bios);
+-
+ void (*set_scratch_critical_state)(
+ struct dc_bios *bios,
+ bool state);
+@@ -149,11 +101,6 @@ struct dc_vbios_funcs {
+ enum bp_result (*transmitter_control)(
+ struct dc_bios *bios,
+ struct bp_transmitter_control *cntl);
+- enum bp_result (*crt_control)(
+- struct dc_bios *bios,
+- enum engine_id engine_id,
+- bool enable,
+- uint32_t pixel_clock);
+ enum bp_result (*enable_crtc)(
+ struct dc_bios *bios,
+ enum controller_id id,
+@@ -167,8 +114,6 @@ struct dc_vbios_funcs {
+ enum bp_result (*set_dce_clock)(
+ struct dc_bios *bios,
+ struct bp_set_dce_clock_parameters *bp_params);
+- unsigned int (*get_smu_clock_info)(
+- struct dc_bios *bios);
+ enum bp_result (*enable_spread_spectrum_on_ppll)(
+ struct dc_bios *bios,
+ struct bp_spread_spectrum_parameters *bp_params,
+@@ -183,20 +128,11 @@ struct dc_vbios_funcs {
+ enum bp_result (*program_display_engine_pll)(
+ struct dc_bios *bios,
+ struct bp_pixel_clock_parameters *bp_params);
+-
+- enum signal_type (*dac_load_detect)(
+- struct dc_bios *bios,
+- struct graphics_object_id encoder,
+- struct graphics_object_id connector,
+- enum signal_type display_signal);
+-
+ enum bp_result (*enable_disp_power_gating)(
+ struct dc_bios *bios,
+ enum controller_id controller_id,
+ enum bp_pipe_control_action action);
+
+- void (*post_init)(struct dc_bios *bios);
+-
+ void (*bios_parser_destroy)(struct dc_bios **dcb);
+
+ enum bp_result (*get_board_layout_info)(
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5355-drm-amd-display-remove-unused-clk_src-code.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5355-drm-amd-display-remove-unused-clk_src-code.patch
new file mode 100644
index 00000000..68f1d88a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5355-drm-amd-display-remove-unused-clk_src-code.patch
@@ -0,0 +1,137 @@
+From 121ca5763d0c15f114d9a1c2bc02285429243b7c Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Mon, 30 Jul 2018 14:45:42 -0400
+Subject: [PATCH 5355/5725] drm/amd/display: remove unused clk_src code
+
+Change-Id: I54e39a4387c5abeec56a53e1a0a094a51b9839fe
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ .../gpu/drm/amd/display/dc/dce/dce_clock_source.c | 87 +---------------------
+ drivers/gpu/drm/amd/display/dc/inc/clock_source.h | 4 -
+ 2 files changed, 1 insertion(+), 90 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+index 9b38027..f7b22bcf 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+@@ -622,90 +622,6 @@ static uint32_t dce112_get_pix_clk_dividers(
+ return 0;
+ }
+
+-static uint32_t dce110_get_pll_pixel_rate_in_hz(
+- struct clock_source *cs,
+- struct pixel_clk_params *pix_clk_params,
+- struct pll_settings *pll_settings)
+-{
+- uint32_t inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
+- struct dc *dc_core = cs->ctx->dc;
+- struct dc_state *context = dc_core->current_state;
+- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[inst];
+-
+- /* This function need separate to different DCE version, before separate, just use pixel clock */
+- return pipe_ctx->stream->phy_pix_clk;
+-
+-}
+-
+-static uint32_t dce110_get_dp_pixel_rate_from_combo_phy_pll(
+- struct clock_source *cs,
+- struct pixel_clk_params *pix_clk_params,
+- struct pll_settings *pll_settings)
+-{
+- uint32_t inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
+- struct dc *dc_core = cs->ctx->dc;
+- struct dc_state *context = dc_core->current_state;
+- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[inst];
+-
+- /* This function need separate to different DCE version, before separate, just use pixel clock */
+- return pipe_ctx->stream->phy_pix_clk;
+-}
+-
+-static uint32_t dce110_get_d_to_pixel_rate_in_hz(
+- struct clock_source *cs,
+- struct pixel_clk_params *pix_clk_params,
+- struct pll_settings *pll_settings)
+-{
+- uint32_t inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
+- struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(cs);
+- int dto_enabled = 0;
+- struct fixed31_32 pix_rate;
+-
+- REG_GET(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, &dto_enabled);
+-
+- if (dto_enabled) {
+- uint32_t phase = 0;
+- uint32_t modulo = 0;
+- REG_GET(PHASE[inst], DP_DTO0_PHASE, &phase);
+- REG_GET(MODULO[inst], DP_DTO0_MODULO, &modulo);
+-
+- if (modulo == 0) {
+- return 0;
+- }
+-
+- pix_rate = dc_fixpt_from_int(clk_src->ref_freq_khz);
+- pix_rate = dc_fixpt_mul_int(pix_rate, 1000);
+- pix_rate = dc_fixpt_mul_int(pix_rate, phase);
+- pix_rate = dc_fixpt_div_int(pix_rate, modulo);
+-
+- return dc_fixpt_round(pix_rate);
+- } else {
+- return dce110_get_dp_pixel_rate_from_combo_phy_pll(cs, pix_clk_params, pll_settings);
+- }
+-}
+-
+-static uint32_t dce110_get_pix_rate_in_hz(
+- struct clock_source *cs,
+- struct pixel_clk_params *pix_clk_params,
+- struct pll_settings *pll_settings)
+-{
+- uint32_t pix_rate = 0;
+- switch (pix_clk_params->signal_type) {
+- case SIGNAL_TYPE_DISPLAY_PORT:
+- case SIGNAL_TYPE_DISPLAY_PORT_MST:
+- case SIGNAL_TYPE_EDP:
+- case SIGNAL_TYPE_VIRTUAL:
+- pix_rate = dce110_get_d_to_pixel_rate_in_hz(cs, pix_clk_params, pll_settings);
+- break;
+- case SIGNAL_TYPE_HDMI_TYPE_A:
+- default:
+- pix_rate = dce110_get_pll_pixel_rate_in_hz(cs, pix_clk_params, pll_settings);
+- break;
+- }
+-
+- return pix_rate;
+-}
+-
+ static bool disable_spread_spectrum(struct dce110_clk_src *clk_src)
+ {
+ enum bp_result result;
+@@ -1073,8 +989,7 @@ static const struct clock_source_funcs dce112_clk_src_funcs = {
+ static const struct clock_source_funcs dce110_clk_src_funcs = {
+ .cs_power_down = dce110_clock_source_power_down,
+ .program_pix_clk = dce110_program_pix_clk,
+- .get_pix_clk_dividers = dce110_get_pix_clk_dividers,
+- .get_pix_rate_in_hz = dce110_get_pix_rate_in_hz
++ .get_pix_clk_dividers = dce110_get_pix_clk_dividers
+ };
+
+
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/clock_source.h b/drivers/gpu/drm/amd/display/dc/inc/clock_source.h
+index ebcf67b..47ef904 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/clock_source.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/clock_source.h
+@@ -166,10 +166,6 @@ struct clock_source_funcs {
+ struct clock_source *,
+ struct pixel_clk_params *,
+ struct pll_settings *);
+- uint32_t (*get_pix_rate_in_hz)(
+- struct clock_source *,
+- struct pixel_clk_params *,
+- struct pll_settings *);
+ };
+
+ struct clock_source {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5356-drm-amd-display-add-disconnect_delay-to-dc_panel_pat.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5356-drm-amd-display-add-disconnect_delay-to-dc_panel_pat.patch
new file mode 100644
index 00000000..5b2468cf
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5356-drm-amd-display-add-disconnect_delay-to-dc_panel_pat.patch
@@ -0,0 +1,31 @@
+From e2889bc861fba264bdd0ea3a9e3de562092808f1 Mon Sep 17 00:00:00 2001
+From: Derek Lai <Derek.Lai@amd.com>
+Date: Thu, 23 Aug 2018 15:13:23 +0800
+Subject: [PATCH 5356/5725] drm/amd/display: add disconnect_delay to
+ dc_panel_patch
+
+Some display need disconnect delay. Adding this parameter for future use
+
+Change-Id: I0075cf2be0fe658f07425f80d23c592726929a76
+Signed-off-by: Derek Lai <Derek.Lai@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc_types.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
+index 58a6ef8..4fb6278 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
+@@ -191,6 +191,7 @@ union display_content_support {
+ };
+
+ struct dc_panel_patch {
++ unsigned int disconnect_delay;
+ unsigned int dppowerup_delay;
+ unsigned int extra_t12_ms;
+ };
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5357-drm-amd-display-add-aux-transition-event-log.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5357-drm-amd-display-add-aux-transition-event-log.patch
new file mode 100644
index 00000000..b32e03d5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5357-drm-amd-display-add-aux-transition-event-log.patch
@@ -0,0 +1,177 @@
+From 00973bfeebbf4791375d6b2765eb7599d8cc6a8d Mon Sep 17 00:00:00 2001
+From: Chiawen Huang <chiawen.huang@amd.com>
+Date: Fri, 24 Aug 2018 17:45:28 +0800
+Subject: [PATCH 5357/5725] drm/amd/display: add aux transition event log.
+
+[Why]
+Enhance aux transition debugging information.
+
+[How]
+Added Aux request and reply event log.
+
+Change-Id: I19f3fd904089f57f0eaebbcc0cb613430c11b5b0
+Signed-off-by: Chiawen Huang <chiawen.huang@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dm_event_log.h | 39 ++++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c | 13 ++++++++
+ .../display/dc/i2caux/dce110/aux_engine_dce110.c | 7 +++-
+ .../gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c | 18 ++++++++++
+ 4 files changed, 76 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/gpu/drm/amd/display/dc/dm_event_log.h
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dm_event_log.h b/drivers/gpu/drm/amd/display/dc/dm_event_log.h
+new file mode 100644
+index 0000000..c1ce2dd
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/dm_event_log.h
+@@ -0,0 +1,39 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++/**
++ * This file defines external dependencies of Display Core.
++ */
++
++#ifndef __DM_EVENT_LOG_H__
++
++#define __DM_EVENT_LOG_H__
++
++#define EVENT_LOG_I2CAUX_READ(transType, dcc, address, status, len, data)
++#define EVENT_LOG_I2CAUX_WRITE(transType, dcc, address, status, len, data)
++#define EVENT_LOG_AUX_REQ(dcc, type, action, address, len, data)
++#define EVENT_LOG_AUX_Reply(dcc, type, swStatus, replyStatus, len, data)
++
++#endif
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
+index 0afd2fa..03292c5 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
+@@ -24,6 +24,7 @@
+ */
+
+ #include "dm_services.h"
++#include "dm_event_log.h"
+
+ /*
+ * Pre-requisites: headers required by header of this unit
+@@ -296,6 +297,12 @@ static bool read_command(
+
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
++ EVENT_LOG_I2CAUX_READ(request->payload.address_space,
++ engine->base.ddc->pin_data->en,
++ request->payload.address,
++ request->status,
++ request->payload.length,
++ request->payload.data);
+ DC_LOG_I2C_AUX("READ: addr:0x%x value:0x%x Result:%d",
+ request->payload.address,
+ request->payload.data[0],
+@@ -512,6 +519,12 @@ static bool write_command(
+
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
++ EVENT_LOG_I2CAUX_WRITE(request->payload.address_space,
++ engine->base.ddc->pin_data->en,
++ request->payload.address,
++ request->status,
++ request->payload.length,
++ request->payload.data);
+ DC_LOG_I2C_AUX("WRITE: addr:0x%x value:0x%x Result:%d",
+ request->payload.address,
+ request->payload.data[0],
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
+index ae5caa9..4a88fc7 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
+@@ -24,6 +24,7 @@
+ */
+
+ #include "dm_services.h"
++#include "dm_event_log.h"
+
+ /*
+ * Pre-requisites: headers required by header of this unit
+@@ -273,6 +274,8 @@ static void submit_channel_request(
+ REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
+ 10, aux110->timeout_period/10);
+ REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
++ EVENT_LOG_AUX_REQ(engine->base.ddc->pin_data->en, Native, request->action,
++ request->address, request->length, request->data);
+ }
+
+ static int read_channel_reply(struct aux_engine *engine, uint32_t size,
+@@ -336,7 +339,9 @@ static void process_channel_reply(
+ uint32_t sw_status;
+
+ bytes_replied = read_channel_reply(engine, reply->length, reply->data,
+- &reply_result, &sw_status);
++ &reply_result, &sw_status);
++ EVENT_LOG_AUX_Reply(engine->base.ddc->pin_data->en, Native,
++ sw_status, reply_result, bytes_replied, reply->data);
+
+ /* in case HPD is LOW, exit AUX transaction */
+ if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c
+index 4b54fcf..1747b9f 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c
+@@ -24,6 +24,7 @@
+ */
+
+ #include "dm_services.h"
++#include "dm_event_log.h"
+
+ /*
+ * Pre-requisites: headers required by header of this unit
+@@ -170,6 +171,23 @@ bool dal_i2c_hw_engine_submit_request(
+ process_channel_reply(&hw_engine->base, &reply);
+ }
+
++ if (i2caux_request->operation == I2CAUX_TRANSACTION_READ) {
++ EVENT_LOG_I2CAUX_READ(i2caux_request->payload.address_space,
++ engine->ddc->pin_data->en,
++ i2caux_request->payload.address,
++ i2caux_request->status,
++ i2caux_request->payload.length,
++ i2caux_request->payload.data);
++ } else if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE) {
++ EVENT_LOG_I2CAUX_WRITE(i2caux_request->payload.address_space,
++ engine->ddc->pin_data->en,
++ i2caux_request->payload.address,
++ i2caux_request->status,
++ i2caux_request->payload.length,
++ i2caux_request->payload.data);
++ }
++
++
+ return result;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5358-drm-amd-display-num-of-sw-i2c-aux-engines-less-than-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5358-drm-amd-display-num-of-sw-i2c-aux-engines-less-than-.patch
new file mode 100644
index 00000000..51075ed3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5358-drm-amd-display-num-of-sw-i2c-aux-engines-less-than-.patch
@@ -0,0 +1,269 @@
+From 9c122507f7a1d46aec6b5f662a10941fd122deb7 Mon Sep 17 00:00:00 2001
+From: Hersen Wu <hersenxs.wu@amd.com>
+Date: Tue, 21 Aug 2018 09:35:47 -0400
+Subject: [PATCH 5358/5725] drm/amd/display: num of sw i2c/aux engines less
+ than num of connectors
+
+[why]
+AMD Stoney reference board, there are only 2 pipes (not include
+underlay), and 3 connectors. resource creation, only
+2 I2C/AUX engines are created. Within dc_link_aux_transfer, when
+pin_data_en =2, refer to enengines[ddc_pin->pin_data->en] = NULL.
+NULL point is referred later causing system crash.
+
+[how]
+each asic design has fixed number of ddc engines at hw side.
+for each ddc engine, create its i2x/aux engine at sw side.
+
+Change-Id: I5bf6e45756b1e95f246a502219011755c9d465cf
+Signed-off-by: Hersen Wu <hersenxs.wu@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ .../drm/amd/display/dc/dce100/dce100_resource.c | 6 +++++-
+ .../drm/amd/display/dc/dce110/dce110_resource.c | 4 ++++
+ .../drm/amd/display/dc/dce112/dce112_resource.c | 5 +++++
+ .../drm/amd/display/dc/dce120/dce120_resource.c | 9 ++++++--
+ .../gpu/drm/amd/display/dc/dce80/dce80_resource.c | 25 ++++++++++++++++++++++
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 7 ++++--
+ drivers/gpu/drm/amd/display/dc/inc/resource.h | 1 +
+ 7 files changed, 52 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+index ae613b0..b1cc388 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+@@ -372,7 +372,8 @@ static const struct resource_caps res_cap = {
+ .num_timing_generator = 6,
+ .num_audio = 6,
+ .num_stream_encoder = 6,
+- .num_pll = 3
++ .num_pll = 3,
++ .num_ddc = 6,
+ };
+
+ #define CTX ctx
+@@ -1004,6 +1005,9 @@ static bool construct(
+ "DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
++ }
++
++ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ pool->base.engines[i] = dce100_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+index cfca786..b44cc70 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+@@ -378,6 +378,7 @@ static const struct resource_caps carrizo_resource_cap = {
+ .num_audio = 3,
+ .num_stream_encoder = 3,
+ .num_pll = 2,
++ .num_ddc = 3,
+ };
+
+ static const struct resource_caps stoney_resource_cap = {
+@@ -386,6 +387,7 @@ static const struct resource_caps stoney_resource_cap = {
+ .num_audio = 3,
+ .num_stream_encoder = 3,
+ .num_pll = 2,
++ .num_ddc = 3,
+ };
+
+ #define CTX ctx
+@@ -1339,7 +1341,9 @@ static bool construct(
+ "DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
++ }
+
++ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ pool->base.engines[i] = dce110_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+index f3d55a6..0f8332e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+@@ -384,6 +384,7 @@ static const struct resource_caps polaris_10_resource_cap = {
+ .num_audio = 6,
+ .num_stream_encoder = 6,
+ .num_pll = 8, /* why 8? 6 combo PHY PLL + 2 regular PLLs? */
++ .num_ddc = 6,
+ };
+
+ static const struct resource_caps polaris_11_resource_cap = {
+@@ -391,6 +392,7 @@ static const struct resource_caps polaris_11_resource_cap = {
+ .num_audio = 5,
+ .num_stream_encoder = 5,
+ .num_pll = 8, /* why 8? 6 combo PHY PLL + 2 regular PLLs? */
++ .num_ddc = 5,
+ };
+
+ #define CTX ctx
+@@ -1286,6 +1288,9 @@ static bool construct(
+ "DC:failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
++ }
++
++ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ pool->base.engines[i] = dce112_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+index 8afa43a..5905580 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+@@ -436,6 +436,7 @@ static const struct resource_caps res_cap = {
+ .num_audio = 7,
+ .num_stream_encoder = 6,
+ .num_pll = 6,
++ .num_ddc = 6,
+ };
+
+ static const struct dc_debug_options debug_defaults = {
+@@ -1062,6 +1063,12 @@ static bool construct(
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ }
++
++ /* check next valid pipe */
++ j++;
++ }
++
++ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ pool->base.engines[i] = dce120_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+@@ -1077,8 +1084,6 @@ static bool construct(
+ goto res_create_fail;
+ }
+ pool->base.sw_i2cs[i] = NULL;
+- /* check next valid pipe */
+- j++;
+ }
+
+ /* valid pipe num */
+diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+index 4eae859..1dc590c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+@@ -367,6 +367,7 @@ static const struct resource_caps res_cap = {
+ .num_audio = 6,
+ .num_stream_encoder = 6,
+ .num_pll = 3,
++ .num_ddc = 6,
+ };
+
+ static const struct resource_caps res_cap_81 = {
+@@ -374,6 +375,7 @@ static const struct resource_caps res_cap_81 = {
+ .num_audio = 7,
+ .num_stream_encoder = 7,
+ .num_pll = 3,
++ .num_ddc = 6,
+ };
+
+ static const struct resource_caps res_cap_83 = {
+@@ -381,6 +383,7 @@ static const struct resource_caps res_cap_83 = {
+ .num_audio = 6,
+ .num_stream_encoder = 6,
+ .num_pll = 2,
++ .num_ddc = 2,
+ };
+
+ static const struct dce_dmcu_registers dmcu_regs = {
+@@ -992,7 +995,9 @@ static bool dce80_construct(
+ dm_error("DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
++ }
+
++ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ pool->base.engines[i] = dce80_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+@@ -1200,6 +1205,16 @@ static bool dce81_construct(
+ dm_error("DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
++ }
++
++ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
++ pool->base.engines[i] = dce80_aux_engine_create(ctx, i);
++ if (pool->base.engines[i] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC:failed to create aux engine!!\n");
++ goto res_create_fail;
++ }
+ pool->base.hw_i2cs[i] = dce80_i2c_hw_create(ctx, i);
+ if (pool->base.hw_i2cs[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+@@ -1396,6 +1411,16 @@ static bool dce83_construct(
+ dm_error("DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
++ }
++
++ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
++ pool->base.engines[i] = dce80_aux_engine_create(ctx, i);
++ if (pool->base.engines[i] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC:failed to create aux engine!!\n");
++ goto res_create_fail;
++ }
+ pool->base.hw_i2cs[i] = dce80_i2c_hw_create(ctx, i);
+ if (pool->base.hw_i2cs[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index e8a22d5..cb1b134 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -504,6 +504,7 @@ static const struct resource_caps res_cap = {
+ .num_audio = 4,
+ .num_stream_encoder = 4,
+ .num_pll = 4,
++ .num_ddc = 4,
+ };
+
+ static const struct dc_debug_options debug_defaults_drv = {
+@@ -1370,7 +1371,11 @@ static bool construct(
+ dm_error("DC: failed to create tg!\n");
+ goto fail;
+ }
++ /* check next valid pipe */
++ j++;
++ }
+
++ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ pool->base.engines[i] = dcn10_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+@@ -1386,8 +1391,6 @@ static bool construct(
+ goto fail;
+ }
+ pool->base.sw_i2cs[i] = NULL;
+- /* check next valid pipe */
+- j++;
+ }
+
+ /* valid pipe num */
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
+index 5b32100..76d00c6 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
+@@ -44,6 +44,7 @@ struct resource_caps {
+ int num_stream_encoder;
+ int num_pll;
+ int num_dwb;
++ int num_ddc;
+ };
+
+ struct resource_straps {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5359-drm-amd-display-Use-DRM-helper-for-best_encoder.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5359-drm-amd-display-Use-DRM-helper-for-best_encoder.patch
new file mode 100644
index 00000000..df0a54f3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5359-drm-amd-display-Use-DRM-helper-for-best_encoder.patch
@@ -0,0 +1,71 @@
+From ff2838442aa7a1e461bdb32218eb40cf04827a80 Mon Sep 17 00:00:00 2001
+From: Leo Li <sunpeng.li@amd.com>
+Date: Thu, 23 Aug 2018 15:28:08 -0400
+Subject: [PATCH 5359/5725] drm/amd/display: Use DRM helper for best_encoder
+
+[Why]
+Our implementation is functionally identical to DRM's
+
+Note that instead of checking if the provided id is 0, the helper
+follows through with the mode object search. However, It will still
+return NULL, since 0 is not a valid object id, and missed searches
+will return NULL.
+
+[How]
+Remove our implementation, and replace it with
+drm_atomic_helper_best_encoder.
+
+Change-Id: I96a0adce860385dc3efe9b2b3b8b89c06eeff273
+Signed-off-by: Leo Li <sunpeng.li@amd.com>
+Reviewed-by: David Francis <David.Francis@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 24 +----------------------
+ 1 file changed, 1 insertion(+), 23 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 8132f40..569a75e 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3221,28 +3221,6 @@ static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
+ .atomic_get_property = amdgpu_dm_connector_atomic_get_property
+ };
+
+-static struct drm_encoder *best_encoder(struct drm_connector *connector)
+-{
+- int enc_id = connector->encoder_ids[0];
+- struct drm_mode_object *obj;
+- struct drm_encoder *encoder;
+-
+- DRM_DEBUG_DRIVER("Finding the best encoder\n");
+-
+- /* pick the encoder ids */
+- if (enc_id) {
+- obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
+- if (!obj) {
+- DRM_ERROR("Couldn't find a matching encoder for our connector\n");
+- return NULL;
+- }
+- encoder = obj_to_encoder(obj);
+- return encoder;
+- }
+- DRM_ERROR("No encoder id\n");
+- return NULL;
+-}
+-
+ static int get_modes(struct drm_connector *connector)
+ {
+ return amdgpu_dm_connector_get_modes(connector);
+@@ -3363,7 +3341,7 @@ amdgpu_dm_connector_helper_funcs = {
+ */
+ .get_modes = get_modes,
+ .mode_valid = amdgpu_dm_connector_mode_valid,
+- .best_encoder = best_encoder
++ .best_encoder = drm_atomic_helper_best_encoder
+ };
+
+ static void dm_crtc_helper_disable(struct drm_crtc *crtc)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5360-drm-amd-display-Reorder-resource_pool-to-put-i2c-wit.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5360-drm-amd-display-Reorder-resource_pool-to-put-i2c-wit.patch
new file mode 100644
index 00000000..3f95e849
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5360-drm-amd-display-Reorder-resource_pool-to-put-i2c-wit.patch
@@ -0,0 +1,41 @@
+From fb269e870ea29370c6677c59c370ba29352955d4 Mon Sep 17 00:00:00 2001
+From: David Francis <David.Francis@amd.com>
+Date: Thu, 9 Aug 2018 10:05:10 -0400
+Subject: [PATCH 5360/5725] drm/amd/display: Reorder resource_pool to put i2c
+ with aux
+
+[Why]
+The i2c and aux engines are similar, and should be placed
+next to eachother for readability
+
+[How]
+Reorder the elements of the resource_pool struct
+
+Change-Id: I74bb54c9bd8d08cf51013fc597c54d33e379f8a5
+Signed-off-by: David Francis <David.Francis@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/inc/core_types.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+index ed388d3..d7dadfd 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+@@ -138,11 +138,11 @@ struct resource_pool {
+ struct output_pixel_processor *opps[MAX_PIPES];
+ struct timing_generator *timing_generators[MAX_PIPES];
+ struct stream_encoder *stream_enc[MAX_PIPES * 2];
+- struct aux_engine *engines[MAX_PIPES];
+ struct hubbub *hubbub;
+ struct mpc *mpc;
+ struct pp_smu_funcs_rv *pp_smu;
+ struct pp_smu_display_requirement_rv pp_smu_req;
++ struct aux_engine *engines[MAX_PIPES];
+ struct dce_i2c_hw *hw_i2cs[MAX_PIPES];
+ struct dce_i2c_sw *sw_i2cs[MAX_PIPES];
+ bool i2c_hw_buffer_in_use;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5361-drm-amd-display-use-link-type-to-decide-stream-enc-a.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5361-drm-amd-display-use-link-type-to-decide-stream-enc-a.patch
new file mode 100644
index 00000000..9a1af8ed
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5361-drm-amd-display-use-link-type-to-decide-stream-enc-a.patch
@@ -0,0 +1,40 @@
+From 4ca03f41ec0e668ec717d2e8fce7f38dae9c172f Mon Sep 17 00:00:00 2001
+From: Eric Yang <Eric.Yang2@amd.com>
+Date: Fri, 24 Aug 2018 16:54:14 -0400
+Subject: [PATCH 5361/5725] drm/amd/display: use link type to decide stream enc
+ acquisition
+
+[Why]
+Virtual sink is used when set mode happens on a disconnected display
+to allow the mode set to proceed. This did not work with MST because
+the logic for acquiring stream encoder uses stream signal to determine
+the special handling is required, and stream signal is virtual instead
+of DP in this case.
+
+[How]
+Use link type to decide instead.
+
+Change-Id: Ied8056c5cfc035dd313ddb3631bca72442491cd6
+Signed-off-by: Eric Yang <Eric.Yang2@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index b2f6711..6d27db6 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -1753,7 +1753,7 @@ static struct stream_encoder *find_first_free_match_stream_enc_for_link(
+ * required for non DP connectors.
+ */
+
+- if (j >= 0 && dc_is_dp_signal(stream->signal))
++ if (j >= 0 && link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT)
+ return pool->stream_enc[j];
+
+ return NULL;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5362-drm-amd-display-Remove-call-to-amdgpu_pm_compute_clo.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5362-drm-amd-display-Remove-call-to-amdgpu_pm_compute_clo.patch
new file mode 100644
index 00000000..68fb2a74
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5362-drm-amd-display-Remove-call-to-amdgpu_pm_compute_clo.patch
@@ -0,0 +1,47 @@
+From e5a62fa1758732956b100f59dadc3eff48a6eaf2 Mon Sep 17 00:00:00 2001
+From: David Francis <David.Francis@amd.com>
+Date: Fri, 17 Aug 2018 14:24:26 -0400
+Subject: [PATCH 5362/5725] drm/amd/display: Remove call to
+ amdgpu_pm_compute_clocks
+
+[Why]
+The extraneous call to amdgpu_pm_compute_clocks is deprecated.
+
+[How]
+Remove it.
+
+Change-Id: I0bfee7f1aca4184b441c39efbdc580394bdd1020
+Signed-off-by: David Francis <David.Francis@amd.com>
+Signed-off-by: Leo Li <sunpeng.li@amd.com>
+Reviewed-by: David Francis <David.Francis@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 8 --------
+ 1 file changed, 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+index cfa907b..6d16b4a 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+@@ -101,18 +101,10 @@ bool dm_pp_apply_display_requirements(
+ adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
+ }
+
+- /* TODO: complete implementation of
+- * pp_display_configuration_change().
+- * Follow example of:
+- * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
+- * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
+ if (adev->powerplay.pp_funcs->display_configuration_change)
+ adev->powerplay.pp_funcs->display_configuration_change(
+ adev->powerplay.pp_handle,
+ &adev->pm.pm_display_cfg);
+-
+- /* TODO: replace by a separate call to 'apply display cfg'? */
+- amdgpu_pm_compute_clocks(adev);
+ }
+
+ return true;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5363-drm-amd-display-clean-code-for-transition-event-log.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5363-drm-amd-display-clean-code-for-transition-event-log.patch
new file mode 100644
index 00000000..9143f055
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5363-drm-amd-display-clean-code-for-transition-event-log.patch
@@ -0,0 +1,94 @@
+From 3fff3ba44cbe607552c7c9c528cde3a4656aac40 Mon Sep 17 00:00:00 2001
+From: Chiawen Huang <chiawen.huang@amd.com>
+Date: Tue, 28 Aug 2018 13:38:34 +0800
+Subject: [PATCH 5363/5725] drm/amd/display: clean code for transition event
+ log.
+
+[Why]
+There are same purpose transition events.
+
+[How]
+remove the redundant event log.
+
+Change-Id: I90faf48f7c0c492b7b753ebbeb819a08c5f074e5
+Signed-off-by: Chiawen Huang <chiawen.huang@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dm_event_log.h | 2 --
+ drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c | 12 ------------
+ drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c | 15 ---------------
+ 3 files changed, 29 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dm_event_log.h b/drivers/gpu/drm/amd/display/dc/dm_event_log.h
+index c1ce2dd..00a275d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dm_event_log.h
++++ b/drivers/gpu/drm/amd/display/dc/dm_event_log.h
+@@ -31,8 +31,6 @@
+
+ #define __DM_EVENT_LOG_H__
+
+-#define EVENT_LOG_I2CAUX_READ(transType, dcc, address, status, len, data)
+-#define EVENT_LOG_I2CAUX_WRITE(transType, dcc, address, status, len, data)
+ #define EVENT_LOG_AUX_REQ(dcc, type, action, address, len, data)
+ #define EVENT_LOG_AUX_Reply(dcc, type, swStatus, replyStatus, len, data)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
+index 03292c5..8cbf38b 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
+@@ -297,12 +297,6 @@ static bool read_command(
+
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+- EVENT_LOG_I2CAUX_READ(request->payload.address_space,
+- engine->base.ddc->pin_data->en,
+- request->payload.address,
+- request->status,
+- request->payload.length,
+- request->payload.data);
+ DC_LOG_I2C_AUX("READ: addr:0x%x value:0x%x Result:%d",
+ request->payload.address,
+ request->payload.data[0],
+@@ -519,12 +513,6 @@ static bool write_command(
+
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+- EVENT_LOG_I2CAUX_WRITE(request->payload.address_space,
+- engine->base.ddc->pin_data->en,
+- request->payload.address,
+- request->status,
+- request->payload.length,
+- request->payload.data);
+ DC_LOG_I2C_AUX("WRITE: addr:0x%x value:0x%x Result:%d",
+ request->payload.address,
+ request->payload.data[0],
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c
+index 1747b9f..c995ef4 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c
+@@ -171,21 +171,6 @@ bool dal_i2c_hw_engine_submit_request(
+ process_channel_reply(&hw_engine->base, &reply);
+ }
+
+- if (i2caux_request->operation == I2CAUX_TRANSACTION_READ) {
+- EVENT_LOG_I2CAUX_READ(i2caux_request->payload.address_space,
+- engine->ddc->pin_data->en,
+- i2caux_request->payload.address,
+- i2caux_request->status,
+- i2caux_request->payload.length,
+- i2caux_request->payload.data);
+- } else if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE) {
+- EVENT_LOG_I2CAUX_WRITE(i2caux_request->payload.address_space,
+- engine->ddc->pin_data->en,
+- i2caux_request->payload.address,
+- i2caux_request->status,
+- i2caux_request->payload.length,
+- i2caux_request->payload.data);
+- }
+
+
+ return result;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5364-drm-amd-display-Add-invariant-support-instrumentatio.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5364-drm-amd-display-Add-invariant-support-instrumentatio.patch
new file mode 100644
index 00000000..cebb8e88
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5364-drm-amd-display-Add-invariant-support-instrumentatio.patch
@@ -0,0 +1,193 @@
+From 8fd2082307c2f032cfc88bc7d33ed9f90e9a5fd5 Mon Sep 17 00:00:00 2001
+From: Jun Lei <Jun.Lei@amd.com>
+Date: Wed, 22 Aug 2018 17:00:34 -0400
+Subject: [PATCH 5364/5725] drm/amd/display: Add invariant support
+ instrumentation in driver
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Existing debug dump are all invariant, new “low 32-bit of addressâ€
+dump is not invariant
+
+Change-Id: I59a1cba0d253ae266ed94698c3cc790cad20f083
+Signed-off-by: Jun Lei <Jun.Lei@amd.com>
+Reviewed-by: Eric Yang <eric.yang2@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 3 +
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 1 +
+ .../display/dc/dcn10/dcn10_hw_sequencer_debug.c | 84 +++++++++++++++-------
+ 3 files changed, 61 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+index 8da2b8a..74132a1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+@@ -974,6 +974,9 @@ void hubp1_read_state(struct hubp *hubp)
+ REG_GET(DCSURF_SURFACE_EARLIEST_INUSE_HIGH,
+ SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, &s->inuse_addr_hi);
+
++ REG_GET(DCSURF_SURFACE_EARLIEST_INUSE,
++ SURFACE_EARLIEST_INUSE_ADDRESS, &s->inuse_addr_lo);
++
+ REG_GET_2(DCSURF_PRI_VIEWPORT_DIMENSION,
+ PRI_VIEWPORT_WIDTH, &s->viewport_width,
+ PRI_VIEWPORT_HEIGHT, &s->viewport_height);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+index 7605af9..4890273 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+@@ -639,6 +639,7 @@ struct dcn_hubp_state {
+ struct _vcs_dpi_display_rq_regs_st rq_regs;
+ uint32_t pixel_format;
+ uint32_t inuse_addr_hi;
++ uint32_t inuse_addr_lo;
+ uint32_t viewport_width;
+ uint32_t viewport_height;
+ uint32_t rotation_angle;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+index 9c21825..6415890 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+@@ -105,7 +105,7 @@ static unsigned int dcn10_get_hubbub_state(struct dc *dc, char *pBuf, unsigned i
+ return bufSize - remaining_buffer;
+ }
+
+-static unsigned int dcn10_get_hubp_states(struct dc *dc, char *pBuf, unsigned int bufSize)
++static unsigned int dcn10_get_hubp_states(struct dc *dc, char *pBuf, unsigned int bufSize, bool invarOnly)
+ {
+ struct dc_context *dc_ctx = dc->ctx;
+ struct resource_pool *pool = dc->res_pool;
+@@ -117,9 +117,15 @@ static unsigned int dcn10_get_hubp_states(struct dc *dc, char *pBuf, unsigned in
+ const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000;
+ static const unsigned int frac = 1000;
+
+- chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,format,addr_hi,width,height,rotation,mirror,sw_mode,dcc_en,blank_en,ttu_dis,underflow,"
+- "min_ttu_vblank,qos_low_wm,qos_high_wm"
+- "\n");
++ if (invarOnly)
++ chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,format,addr_hi,width,height,rotation,mirror,sw_mode,dcc_en,blank_en,ttu_dis,underflow,"
++ "min_ttu_vblank,qos_low_wm,qos_high_wm"
++ "\n");
++ else
++ chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,format,addr_hi,addr_lo,width,height,rotation,mirror,sw_mode,dcc_en,blank_en,ttu_dis,underflow,"
++ "min_ttu_vblank,qos_low_wm,qos_high_wm"
++ "\n");
++
+ remaining_buffer -= chars_printed;
+ pBuf += chars_printed;
+
+@@ -130,24 +136,45 @@ static unsigned int dcn10_get_hubp_states(struct dc *dc, char *pBuf, unsigned in
+ hubp->funcs->hubp_read_state(hubp);
+
+ if (!s->blank_en) {
+- chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,%x,%d,%d,%x,%x,%x,%x,%x,%x,%x,"
+- "%d.%03d,%d.%03d,%d.%03d"
+- "\n",
+- hubp->inst,
+- s->pixel_format,
+- s->inuse_addr_hi,
+- s->viewport_width,
+- s->viewport_height,
+- s->rotation_angle,
+- s->h_mirror_en,
+- s->sw_mode,
+- s->dcc_en,
+- s->blank_en,
+- s->ttu_disable,
+- s->underflow_status,
+- (s->min_ttu_vblank * frac) / ref_clk_mhz / frac, (s->min_ttu_vblank * frac) / ref_clk_mhz % frac,
+- (s->qos_level_low_wm * frac) / ref_clk_mhz / frac, (s->qos_level_low_wm * frac) / ref_clk_mhz % frac,
+- (s->qos_level_high_wm * frac) / ref_clk_mhz / frac, (s->qos_level_high_wm * frac) / ref_clk_mhz % frac);
++ if (invarOnly)
++ chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,%x,%d,%d,%x,%x,%x,%x,%x,%x,%x,"
++ "%d.%03d,%d.%03d,%d.%03d"
++ "\n",
++ hubp->inst,
++ s->pixel_format,
++ s->inuse_addr_hi,
++ s->viewport_width,
++ s->viewport_height,
++ s->rotation_angle,
++ s->h_mirror_en,
++ s->sw_mode,
++ s->dcc_en,
++ s->blank_en,
++ s->ttu_disable,
++ s->underflow_status,
++ (s->min_ttu_vblank * frac) / ref_clk_mhz / frac, (s->min_ttu_vblank * frac) / ref_clk_mhz % frac,
++ (s->qos_level_low_wm * frac) / ref_clk_mhz / frac, (s->qos_level_low_wm * frac) / ref_clk_mhz % frac,
++ (s->qos_level_high_wm * frac) / ref_clk_mhz / frac, (s->qos_level_high_wm * frac) / ref_clk_mhz % frac);
++ else
++ chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,%x,%x,%d,%d,%x,%x,%x,%x,%x,%x,%x,"
++ "%d.%03d,%d.%03d,%d.%03d"
++ "\n",
++ hubp->inst,
++ s->pixel_format,
++ s->inuse_addr_hi,
++ s->inuse_addr_lo,
++ s->viewport_width,
++ s->viewport_height,
++ s->rotation_angle,
++ s->h_mirror_en,
++ s->sw_mode,
++ s->dcc_en,
++ s->blank_en,
++ s->ttu_disable,
++ s->underflow_status,
++ (s->min_ttu_vblank * frac) / ref_clk_mhz / frac, (s->min_ttu_vblank * frac) / ref_clk_mhz % frac,
++ (s->qos_level_low_wm * frac) / ref_clk_mhz / frac, (s->qos_level_low_wm * frac) / ref_clk_mhz % frac,
++ (s->qos_level_high_wm * frac) / ref_clk_mhz / frac, (s->qos_level_high_wm * frac) / ref_clk_mhz % frac);
+
+ remaining_buffer -= chars_printed;
+ pBuf += chars_printed;
+@@ -314,9 +341,6 @@ static unsigned int dcn10_get_cm_states(struct dc *dc, char *pBuf, unsigned int
+ struct dpp *dpp = pool->dpps[i];
+ struct dcn_dpp_state s = {0};
+
+-
+-
+-
+ dpp->funcs->dpp_read_state(dpp, &s);
+
+ if (s.is_enabled) {
+@@ -462,6 +486,11 @@ static unsigned int dcn10_get_clock_states(struct dc *dc, char *pBuf, unsigned i
+
+ void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask)
+ {
++ /*
++ * Mask Format
++ * Bit 0 - 15: Hardware block mask
++ * Bit 15: 1 = Invariant Only, 0 = All
++ */
+ const unsigned int DC_HW_STATE_MASK_HUBBUB = 0x1;
+ const unsigned int DC_HW_STATE_MASK_HUBP = 0x2;
+ const unsigned int DC_HW_STATE_MASK_RQ = 0x4;
+@@ -471,12 +500,13 @@ void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigne
+ const unsigned int DC_HW_STATE_MASK_MPCC = 0x40;
+ const unsigned int DC_HW_STATE_MASK_OTG = 0x80;
+ const unsigned int DC_HW_STATE_MASK_CLOCKS = 0x100;
++ const unsigned int DC_HW_STATE_INVAR_ONLY = 0x8000;
+
+ unsigned int chars_printed = 0;
+ unsigned int remaining_buf_size = bufSize;
+
+ if (mask == 0x0)
+- mask = 0xFFFF;
++ mask = 0xFFFF; // Default, capture all, invariant only
+
+ if ((mask & DC_HW_STATE_MASK_HUBBUB) && remaining_buf_size > 0) {
+ chars_printed = dcn10_get_hubbub_state(dc, pBuf, remaining_buf_size);
+@@ -485,7 +515,7 @@ void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigne
+ }
+
+ if ((mask & DC_HW_STATE_MASK_HUBP) && remaining_buf_size > 0) {
+- chars_printed = dcn10_get_hubp_states(dc, pBuf, remaining_buf_size);
++ chars_printed = dcn10_get_hubp_states(dc, pBuf, remaining_buf_size, mask & DC_HW_STATE_INVAR_ONLY);
+ pBuf += chars_printed;
+ remaining_buf_size -= chars_printed;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5365-drm-amd-display-Fix-warning-storm-on-Raven2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5365-drm-amd-display-Fix-warning-storm-on-Raven2.patch
new file mode 100644
index 00000000..05266ae7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5365-drm-amd-display-Fix-warning-storm-on-Raven2.patch
@@ -0,0 +1,90 @@
+From 4564b7c8f9cc50501ee02f3cb5e30764a2c27d6f Mon Sep 17 00:00:00 2001
+From: Roman Li <Roman.Li@amd.com>
+Date: Tue, 9 Oct 2018 13:50:09 -0400
+Subject: [PATCH 5365/5725] drm/amd/display: Fix warning storm on Raven2
+
+[Why]
+Wrong index for pstate debug test register
+
+[How]
+Add correct index value for dcn1_01 in hubbub1_construct()
+
+Signed-off-by: Hersen Wu <hersenxs.wu@amd.com>
+Signed-off-by: Roman Li <Roman.Li@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+---
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c | 43 +++++++++++++++++++++-
+ 1 file changed, 41 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+index 1ea91e1..297e1e5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+@@ -116,7 +116,43 @@ bool hubbub1_verify_allow_pstate_change_high(
+ forced_pstate_allow = false;
+ }
+
+- /* RV1:
++ /* RV2:
++ * dchubbubdebugind, at: 0xB
++ * description
++ * 0: Pipe0 Plane0 Allow Pstate Change
++ * 1: Pipe0 Plane1 Allow Pstate Change
++ * 2: Pipe0 Cursor0 Allow Pstate Change
++ * 3: Pipe0 Cursor1 Allow Pstate Change
++ * 4: Pipe1 Plane0 Allow Pstate Change
++ * 5: Pipe1 Plane1 Allow Pstate Change
++ * 6: Pipe1 Cursor0 Allow Pstate Change
++ * 7: Pipe1 Cursor1 Allow Pstate Change
++ * 8: Pipe2 Plane0 Allow Pstate Change
++ * 9: Pipe2 Plane1 Allow Pstate Change
++ * 10: Pipe2 Cursor0 Allow Pstate Change
++ * 11: Pipe2 Cursor1 Allow Pstate Change
++ * 12: Pipe3 Plane0 Allow Pstate Change
++ * 13: Pipe3 Plane1 Allow Pstate Change
++ * 14: Pipe3 Cursor0 Allow Pstate Change
++ * 15: Pipe3 Cursor1 Allow Pstate Change
++ * 16: Pipe4 Plane0 Allow Pstate Change
++ * 17: Pipe4 Plane1 Allow Pstate Change
++ * 18: Pipe4 Cursor0 Allow Pstate Change
++ * 19: Pipe4 Cursor1 Allow Pstate Change
++ * 20: Pipe5 Plane0 Allow Pstate Change
++ * 21: Pipe5 Plane1 Allow Pstate Change
++ * 22: Pipe5 Cursor0 Allow Pstate Change
++ * 23: Pipe5 Cursor1 Allow Pstate Change
++ * 24: Pipe6 Plane0 Allow Pstate Change
++ * 25: Pipe6 Plane1 Allow Pstate Change
++ * 26: Pipe6 Cursor0 Allow Pstate Change
++ * 27: Pipe6 Cursor1 Allow Pstate Change
++ * 28: WB0 Allow Pstate Change
++ * 29: WB1 Allow Pstate Change
++ * 30: Arbiter's allow_pstate_change
++ * 31: SOC pstate change request"
++ *
++ * RV1:
+ * dchubbubdebugind, at: 0x7
+ * description "3-0: Pipe0 cursor0 QOS
+ * 7-4: Pipe1 cursor0 QOS
+@@ -140,7 +176,6 @@ bool hubbub1_verify_allow_pstate_change_high(
+ * 31: SOC pstate change request
+ */
+
+-
+ REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub->debug_test_index_pstate);
+
+ for (i = 0; i < pstate_wait_timeout_us; i++) {
+@@ -802,5 +837,9 @@ void hubbub1_construct(struct hubbub *hubbub,
+ hubbub->masks = hubbub_mask;
+
+ hubbub->debug_test_index_pstate = 0x7;
++#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
++ if (ctx->dce_version == DCN_VERSION_1_01)
++ hubbub->debug_test_index_pstate = 0xB;
++#endif
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5366-drm-amd-display-RV2-DP-MST-2nd-display-within-daisy-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5366-drm-amd-display-RV2-DP-MST-2nd-display-within-daisy-.patch
new file mode 100644
index 00000000..8cd25bb8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5366-drm-amd-display-RV2-DP-MST-2nd-display-within-daisy-.patch
@@ -0,0 +1,57 @@
+From 515d6c7b46a3bb5620c0d292c03e0a25d01cfef6 Mon Sep 17 00:00:00 2001
+From: Hersen Wu <hersenxs.wu@amd.com>
+Date: Tue, 9 Oct 2018 13:50:10 -0400
+Subject: [PATCH 5366/5725] drm/amd/display: RV2 DP MST 2nd display within
+ daisy chain not light up
+
+RV2 resource is limit to 3 pipes. Limitation should apply to all HW
+blocks instead of front pipe.
+
+Signed-off-by: Hersen Wu <hersenxs.wu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 19 ++++++++++++++++++-
+ 1 file changed, 18 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index cb1b134..e148f70 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -507,6 +507,18 @@ static const struct resource_caps res_cap = {
+ .num_ddc = 4,
+ };
+
++#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
++static const struct resource_caps rv2_res_cap = {
++ .num_timing_generator = 3,
++ .num_opp = 3,
++ .num_video_plane = 3,
++ .num_audio = 3,
++ .num_stream_encoder = 3,
++ .num_pll = 3,
++ .num_ddc = 3,
++};
++#endif
++
+ static const struct dc_debug_options debug_defaults_drv = {
+ .sanity_checks = true,
+ .disable_dmcu = true,
+@@ -1152,7 +1164,12 @@ static bool construct(
+
+ ctx->dc_bios->regs = &bios_regs;
+
+- pool->base.res_cap = &res_cap;
++#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
++ if (ctx->dce_version == DCN_VERSION_1_01)
++ pool->base.res_cap = &rv2_res_cap;
++ else
++#endif
++ pool->base.res_cap = &res_cap;
+ pool->base.funcs = &dcn10_res_pool_funcs;
+
+ /*
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5367-drm-amdgpu-interim-disable-RV2-GFX-CG-flag-for-urgen.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5367-drm-amdgpu-interim-disable-RV2-GFX-CG-flag-for-urgen.patch
new file mode 100644
index 00000000..9952f6d3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5367-drm-amdgpu-interim-disable-RV2-GFX-CG-flag-for-urgen.patch
@@ -0,0 +1,36 @@
+From 97e8f44def63dd86a495df0796a0b4b55fc27b73 Mon Sep 17 00:00:00 2001
+From: Likun Gao <Likun.Gao@amd.com>
+Date: Tue, 13 Nov 2018 14:01:59 +0800
+Subject: [PATCH 5367/5725] drm/amdgpu: interim disable RV2 GFX CG flag for
+ urgent use
+
+interim disable AMD_CG_SUPPORT_GFX_CGCG, AMD_CG_SUPPORT_GFX_MGCG, and
+AMD_CG_SUPPORT_GFX_3D_CGCG on RV2 as SMU firmware newer than 37.16.0 will result
+to RLC safe mode fail.
+
+Signed-off-by: Likun Gao <Likun.Gao@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index c4daf1f..3c4f940 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -703,12 +703,9 @@ static int soc15_common_early_init(void *handle)
+ adev->external_rev_id = 0x1;
+
+ if (adev->rev_id >= 0x8) {
+- adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+- AMD_CG_SUPPORT_GFX_MGLS |
++ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+- AMD_CG_SUPPORT_GFX_3D_CGCG |
+ AMD_CG_SUPPORT_GFX_3D_CGLS |
+- AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_HDP_LS |
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5368-drm-drivers-drop-redundant-drm_edid_to_eld-calls.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5368-drm-drivers-drop-redundant-drm_edid_to_eld-calls.patch
new file mode 100644
index 00000000..8186f93b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5368-drm-drivers-drop-redundant-drm_edid_to_eld-calls.patch
@@ -0,0 +1,53 @@
+From 5b41d93d4916b18c624b992285c5de3012d78c70 Mon Sep 17 00:00:00 2001
+From: Jani Nikula <jani.nikula@intel.com>
+Date: Wed, 1 Nov 2017 16:21:02 +0200
+Subject: [PATCH 5368/5725] drm/drivers: drop redundant drm_edid_to_eld() calls
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+drm_add_edid_modes() now fills in the ELD automatically, so the calls to
+drm_edid_to_eld() are redundant. Remove them.
+
+All the other places are obvious, but nv50 has detached
+drm_edid_to_eld() from the drm_add_edid_modes() call.
+
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: Christian König <christian.koenig@amd.com>
+Cc: Archit Taneja <architt@codeaurora.org>
+Cc: Andrzej Hajda <a.hajda@samsung.com>
+Cc: Russell King <linux@armlinux.org.uk>
+Cc: CK Hu <ck.hu@mediatek.com>
+Cc: Philipp Zabel <p.zabel@pengutronix.de>
+Cc: Ben Skeggs <bskeggs@redhat.com>
+Cc: Mark Yao <mark.yao@rock-chips.com>
+Cc: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+Cc: Vincent Abriou <vincent.abriou@st.com>
+Cc: Thierry Reding <thierry.reding@gmail.com>
+Cc: Eric Anholt <eric@anholt.net>
+Acked-by: Eric Anholt <eric@anholt.net>
+Acked-by: Archit Taneja <architt@codeaurora.org>
+Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Acked-by: Thierry Reding <treding@nvidia.com>
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/0959ca02b983afc9e74dd9acd190ba6e25f21678.1509545641.git.jani.nikula@intel.com
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+index 370071b..5aa20f1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+@@ -354,7 +354,6 @@ static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
+ if (amdgpu_connector->edid) {
+ drm_mode_connector_update_edid_property(connector, amdgpu_connector->edid);
+ ret = drm_add_edid_modes(connector, amdgpu_connector->edid);
+- drm_edid_to_eld(connector, amdgpu_connector->edid);
+ return ret;
+ }
+ drm_mode_connector_update_edid_property(connector, NULL);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5369-drm-amdgpu-add-license-to-Makefiles.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5369-drm-amdgpu-add-license-to-Makefiles.patch
new file mode 100644
index 00000000..ba640803
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5369-drm-amdgpu-add-license-to-Makefiles.patch
@@ -0,0 +1,803 @@
+From ead09a3769cc8ee600068dad761622b754224eeb Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 30 Nov 2017 21:15:50 -0500
+Subject: [PATCH 5369/5725] drm/amdgpu: add license to Makefiles
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Was missing license text.
+
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/acp/Makefile | 21 ++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/Makefile | 22 ++++++++++++++++++++-
+ drivers/gpu/drm/amd/display/Makefile | 21 ++++++++++++++++++++
+ drivers/gpu/drm/amd/display/amdgpu_dm/Makefile | 21 ++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/Makefile | 21 ++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/basics/Makefile | 21 ++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/bios/Makefile | 21 ++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/calcs/Makefile | 21 ++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/dce/Makefile | 21 ++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/dce100/Makefile | 21 ++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/dce110/Makefile | 21 ++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/dce112/Makefile | 21 ++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/dce120/Makefile | 23 +++++++++++++++++++++-
+ drivers/gpu/drm/amd/display/dc/dce80/Makefile | 21 ++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/dcn10/Makefile | 21 ++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/dml/Makefile | 21 ++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/gpio/Makefile | 21 ++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/i2caux/Makefile | 21 ++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/irq/Makefile | 21 ++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/virtual/Makefile | 21 ++++++++++++++++++++
+ .../gpu/drm/amd/display/modules/freesync/Makefile | 21 ++++++++++++++++++++
+ drivers/gpu/drm/amd/lib/Makefile | 21 ++++++++++++++++++++
+ drivers/gpu/drm/amd/powerplay/Makefile | 22 ++++++++++++++++++++-
+ drivers/gpu/drm/amd/powerplay/hwmgr/Makefile | 22 ++++++++++++++++++++-
+ drivers/gpu/drm/amd/powerplay/smumgr/Makefile | 22 ++++++++++++++++++++-
+ 25 files changed, 526 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/acp/Makefile b/drivers/gpu/drm/amd/acp/Makefile
+index 8a08e81..d4176a3 100644
+--- a/drivers/gpu/drm/amd/acp/Makefile
++++ b/drivers/gpu/drm/amd/acp/Makefile
+@@ -1,4 +1,25 @@
+ #
++# Copyright 2017 Advanced Micro Devices, Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included in
++# all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++# OTHER DEALINGS IN THE SOFTWARE.
++#
++#
+ # Makefile for the ACP, which is a sub-component
+ # of AMDSOC/AMDGPU drm driver.
+ # It provides the HW control for ACP related functionalities.
+diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
+index 064dd5f7..0e9fe5e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/Makefile
++++ b/drivers/gpu/drm/amd/amdgpu/Makefile
+@@ -1,4 +1,24 @@
+-# SPDX-License-Identifier: GPL-2.0
++#
++# Copyright 2017 Advanced Micro Devices, Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included in
++# all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++# OTHER DEALINGS IN THE SOFTWARE.
++#
+ #
+ # Makefile for the drm device driver. This driver provides support for the
+ # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
+index e3dfdf3..c97dc96 100644
+--- a/drivers/gpu/drm/amd/display/Makefile
++++ b/drivers/gpu/drm/amd/display/Makefile
+@@ -1,4 +1,25 @@
+ #
++# Copyright 2017 Advanced Micro Devices, Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included in
++# all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++# OTHER DEALINGS IN THE SOFTWARE.
++#
++#
+ # Makefile for the DAL (Display Abstract Layer), which is a sub-component
+ # of the AMDGPU drm driver.
+ # It provides the HW control for display related functionalities.
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+index 98854bc..9491187 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+@@ -1,4 +1,25 @@
+ #
++# Copyright 2017 Advanced Micro Devices, Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included in
++# all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++# OTHER DEALINGS IN THE SOFTWARE.
++#
++#
+ # Makefile for the 'dm' sub-component of DAL.
+ # It provides the control and status of dm blocks.
+
+diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
+index c6d36da..532a515 100644
+--- a/drivers/gpu/drm/amd/display/dc/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/Makefile
+@@ -1,4 +1,25 @@
+ #
++# Copyright 2017 Advanced Micro Devices, Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included in
++# all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++# OTHER DEALINGS IN THE SOFTWARE.
++#
++#
+ # Makefile for Display Core (dc) component.
+ #
+
+diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile
+index ccd94a3..a50a764 100644
+--- a/drivers/gpu/drm/amd/display/dc/basics/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile
+@@ -1,4 +1,25 @@
+ #
++# Copyright 2017 Advanced Micro Devices, Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included in
++# all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++# OTHER DEALINGS IN THE SOFTWARE.
++#
++#
+ # Makefile for the 'utils' sub-component of DAL.
+ # It provides the general basic services required by other DAL
+ # subcomponents.
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/Makefile b/drivers/gpu/drm/amd/display/dc/bios/Makefile
+index 6ec815d..239e86b 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/bios/Makefile
+@@ -1,4 +1,25 @@
+ #
++# Copyright 2017 Advanced Micro Devices, Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included in
++# all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++# OTHER DEALINGS IN THE SOFTWARE.
++#
++#
+ # Makefile for the 'bios' sub-component of DAL.
+ # It provides the parsing and executing controls for atom bios image.
+
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+index 5370f92..416500e 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+@@ -1,4 +1,25 @@
+ #
++# Copyright 2017 Advanced Micro Devices, Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included in
++# all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++# OTHER DEALINGS IN THE SOFTWARE.
++#
++#
+ # Makefile for the 'calcs' sub-component of DAL.
+ # It calculates Bandwidth and Watermarks values for HW programming
+ #
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
+index 36ef8b1..8f7f0e8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
+@@ -1,4 +1,25 @@
+ #
++# Copyright 2017 Advanced Micro Devices, Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included in
++# all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++# OTHER DEALINGS IN THE SOFTWARE.
++#
++#
+ # Makefile for common 'dce' logic
+ # HW object file under this folder follow similar pattern for HW programming
+ # - register offset and/or shift + mask stored in the dec_hw struct
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/Makefile b/drivers/gpu/drm/amd/display/dc/dce100/Makefile
+index ea40870..a822d4e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dce100/Makefile
+@@ -1,4 +1,25 @@
+ #
++# Copyright 2017 Advanced Micro Devices, Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included in
++# all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++# OTHER DEALINGS IN THE SOFTWARE.
++#
++#
+ # Makefile for the 'controller' sub-component of DAL.
+ # It provides the control and status of HW CRTC block.
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/Makefile b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
+index 98d956e..d564c0e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
+@@ -1,4 +1,25 @@
+ #
++# Copyright 2017 Advanced Micro Devices, Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included in
++# all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++# OTHER DEALINGS IN THE SOFTWARE.
++#
++#
+ # Makefile for the 'controller' sub-component of DAL.
+ # It provides the control and status of HW CRTC block.
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce112/Makefile b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
+index 265ac43..8e09044 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce112/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
+@@ -1,4 +1,25 @@
+ #
++# Copyright 2017 Advanced Micro Devices, Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included in
++# all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++# OTHER DEALINGS IN THE SOFTWARE.
++#
++#
+ # Makefile for the 'controller' sub-component of DAL.
+ # It provides the control and status of HW CRTC block.
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce120/Makefile b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
+index 1779b96..37db1f8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce120/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
+@@ -1,4 +1,25 @@
+ #
++# Copyright 2017 Advanced Micro Devices, Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included in
++# all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++# OTHER DEALINGS IN THE SOFTWARE.
++#
++#
+ # Makefile for the 'controller' sub-component of DAL.
+ # It provides the control and status of HW CRTC block.
+
+@@ -8,4 +29,4 @@ dce120_hw_sequencer.o
+
+ AMD_DAL_DCE120 = $(addprefix $(AMDDALPATH)/dc/dce120/,$(DCE120))
+
+-AMD_DISPLAY_FILES += $(AMD_DAL_DCE120)
+\ No newline at end of file
++AMD_DISPLAY_FILES += $(AMD_DAL_DCE120)
+diff --git a/drivers/gpu/drm/amd/display/dc/dce80/Makefile b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
+index 43d8b3b..666fcb2 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce80/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
+@@ -1,4 +1,25 @@
+ #
++# Copyright 2017 Advanced Micro Devices, Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included in
++# all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++# OTHER DEALINGS IN THE SOFTWARE.
++#
++#
+ # Makefile for the 'controller' sub-component of DAL.
+ # It provides the control and status of HW CRTC block.
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+index 9c4b93a..032f872 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+@@ -1,4 +1,25 @@
+ #
++# Copyright 2017 Advanced Micro Devices, Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included in
++# all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++# OTHER DEALINGS IN THE SOFTWARE.
++#
++#
+ # Makefile for DCN.
+
+ DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o dcn10_hw_sequencer_debug.o \
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
+index ec2475d..d97ca65 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
+@@ -1,4 +1,25 @@
+ #
++# Copyright 2017 Advanced Micro Devices, Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included in
++# all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++# OTHER DEALINGS IN THE SOFTWARE.
++#
++#
+ # Makefile for the 'utils' sub-component of DAL.
+ # It provides the general basic services required by other DAL
+ # subcomponents.
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
+index 9d9dffe..b9d9930 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
+@@ -1,4 +1,25 @@
+ #
++# Copyright 2017 Advanced Micro Devices, Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included in
++# all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++# OTHER DEALINGS IN THE SOFTWARE.
++#
++#
+ # Makefile for the 'gpio' sub-component of DAL.
+ # It provides the control and status of HW GPIO pins.
+
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/Makefile b/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
+index c1870ee..a851d07 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
+@@ -1,4 +1,25 @@
+ #
++# Copyright 2017 Advanced Micro Devices, Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included in
++# all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++# OTHER DEALINGS IN THE SOFTWARE.
++#
++#
+ # Makefile for the 'i2c' sub-component of DAL.
+ # It provides the control and status of HW i2c engine of the adapter.
+
+diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile
+index b094a5b..a76ee60 100644
+--- a/drivers/gpu/drm/amd/display/dc/irq/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile
+@@ -1,4 +1,25 @@
+ #
++# Copyright 2017 Advanced Micro Devices, Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included in
++# all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++# OTHER DEALINGS IN THE SOFTWARE.
++#
++#
+ # Makefile for the 'audio' sub-component of DAL.
+ # It provides the control and status of HW adapter resources,
+ # that are global for the ASIC and sharable between pipes.
+diff --git a/drivers/gpu/drm/amd/display/dc/virtual/Makefile b/drivers/gpu/drm/amd/display/dc/virtual/Makefile
+index fc0b731..07326d2 100644
+--- a/drivers/gpu/drm/amd/display/dc/virtual/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/virtual/Makefile
+@@ -1,4 +1,25 @@
+ #
++# Copyright 2017 Advanced Micro Devices, Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included in
++# all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++# OTHER DEALINGS IN THE SOFTWARE.
++#
++#
+ # Makefile for the virtual sub-component of DAL.
+ # It provides the control and status of HW CRTC block.
+
+diff --git a/drivers/gpu/drm/amd/display/modules/freesync/Makefile b/drivers/gpu/drm/amd/display/modules/freesync/Makefile
+index db8e0ff..fb9a499 100644
+--- a/drivers/gpu/drm/amd/display/modules/freesync/Makefile
++++ b/drivers/gpu/drm/amd/display/modules/freesync/Makefile
+@@ -1,4 +1,25 @@
+ #
++# Copyright 2017 Advanced Micro Devices, Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included in
++# all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++# OTHER DEALINGS IN THE SOFTWARE.
++#
++#
+ # Makefile for the 'freesync' sub-module of DAL.
+ #
+
+diff --git a/drivers/gpu/drm/amd/lib/Makefile b/drivers/gpu/drm/amd/lib/Makefile
+index 2b95bf7..80ea119 100644
+--- a/drivers/gpu/drm/amd/lib/Makefile
++++ b/drivers/gpu/drm/amd/lib/Makefile
+@@ -1,4 +1,25 @@
+ #
++# Copyright 2017 Advanced Micro Devices, Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included in
++# all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++# OTHER DEALINGS IN THE SOFTWARE.
++#
++#
+ # Makefile for AMD library routines, which are used by AMD driver
+ # components.
+ #
+diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile
+index 8c55c6e..231785a 100644
+--- a/drivers/gpu/drm/amd/powerplay/Makefile
++++ b/drivers/gpu/drm/amd/powerplay/Makefile
+@@ -1,4 +1,24 @@
+-# SPDX-License-Identifier: GPL-2.0
++#
++# Copyright 2017 Advanced Micro Devices, Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included in
++# all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++# OTHER DEALINGS IN THE SOFTWARE.
++#
+
+ subdir-ccflags-y += \
+ -I$(FULL_AMD_PATH)/powerplay/inc/ \
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+index 95621c1..ade8973 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+@@ -1,4 +1,24 @@
+-# SPDX-License-Identifier: GPL-2.0
++#
++# Copyright 2017 Advanced Micro Devices, Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included in
++# all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++# OTHER DEALINGS IN THE SOFTWARE.
++#
+ #
+ # Makefile for the 'hw manager' sub-component of powerplay.
+ # It provides the hardware management services for the driver.
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+index e9d8ad7..6c59c61 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+@@ -1,4 +1,24 @@
+-# SPDX-License-Identifier: GPL-2.0
++#
++# Copyright 2017 Advanced Micro Devices, Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included in
++# all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++# OTHER DEALINGS IN THE SOFTWARE.
++#
+ #
+ # Makefile for the 'smu manager' sub-component of powerplay.
+ # It provides the smu management services for the driver.
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5370-drm-amdgpu-Fix-header-file-dependencies.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5370-drm-amdgpu-Fix-header-file-dependencies.patch
new file mode 100644
index 00000000..8732b0ce
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5370-drm-amdgpu-Fix-header-file-dependencies.patch
@@ -0,0 +1,44 @@
+From 4129bca9481188d365ef6ed9747e853880d47289 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Tue, 6 Feb 2018 20:32:32 -0500
+Subject: [PATCH 5370/5725] drm/amdgpu: Fix header file dependencies
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 1 +
+ 2 files changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+index 6ea3827..6467c9c 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+@@ -26,6 +26,7 @@
+
+ #include <drm/amdgpu_drm.h>
+ #include <drm/gpu_scheduler.h>
++#include <drm/drm_print.h>
+
+ /* max number of rings */
+ #define AMDGPU_MAX_RINGS 21
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+index 2cc41ef..6adc59b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+@@ -32,6 +32,7 @@
+ #endif
+ #include <linux/rbtree.h>
+ #include <drm/gpu_scheduler.h>
++#include <drm/drm_file.h>
+
+ #include "amdgpu_sync.h"
+ #include "amdgpu_ring.h"
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5371-drm-amdgpu-re-enable-CGCG-on-CZ-and-disable-on-ST.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5371-drm-amdgpu-re-enable-CGCG-on-CZ-and-disable-on-ST.patch
new file mode 100644
index 00000000..b6f62ef2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5371-drm-amdgpu-re-enable-CGCG-on-CZ-and-disable-on-ST.patch
@@ -0,0 +1,32 @@
+From 4ea963333ee188d2008cb3195ca3c2ee66353b26 Mon Sep 17 00:00:00 2001
+From: Shirish S <shirish.s@amd.com>
+Date: Mon, 5 Feb 2018 09:23:00 +0530
+Subject: [PATCH 5371/5725] drm/amdgpu: re-enable CGCG on CZ and disable on ST
+
+The CGCG feature on Stoney is causing GFX related
+issues such as freezes and blank outs.
+
+Signed-off-by: Shirish S <shirish.s@amd.com>
+Reviewed-by: Arindam Nath <arindam.nath@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vi.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
+index 6789cc2..f9c62a7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vi.c
++++ b/drivers/gpu/drm/amd/amdgpu/vi.c
+@@ -1129,6 +1129,7 @@ static int vi_common_early_init(void *handle)
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_GFX_CGTS |
+ AMD_CG_SUPPORT_GFX_CGTS_LS |
++ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_HDP_MGCG |
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5372-drm-amdgpu-Handle-64-bit-return-from-drm_crtc_vblank.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5372-drm-amdgpu-Handle-64-bit-return-from-drm_crtc_vblank.patch
new file mode 100644
index 00000000..77fc1d51
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5372-drm-amdgpu-Handle-64-bit-return-from-drm_crtc_vblank.patch
@@ -0,0 +1,66 @@
+From ddb3b436f628535ac9e8e0fd0be9e6e62cc94e61 Mon Sep 17 00:00:00 2001
+From: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
+Date: Fri, 2 Feb 2018 21:12:56 -0800
+Subject: [PATCH 5372/5725] drm/amdgpu: Handle 64-bit return from
+ drm_crtc_vblank_count()
+
+570e86963a51 ("drm: Widen vblank count to 64-bits [v3]") changed the
+return type for drm_crtc_vblank_count() to u64. This could cause
+potential problems if the return value is used in arithmetic operations
+with a 32-bit reference HW vblank count. Explicitly typecasting this down
+to u32 either fixes a potential problem or serves to add clarity in case
+the typecasting was implicitly done.
+
+Cc: Keith Packard <keithp@keithp.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
+Reviewed-by: Keith Packard <keithp@keithp.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com> for both this patch
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20180203051302.9974-4-dhinakaran.pandiyan@intel.com
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 2 +-
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+index 36e3ddf..e00b594 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+@@ -214,7 +214,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
+ amdgpu_bo_unreserve(new_abo);
+
+ work->base = amdgpu_bo_gpu_offset(new_abo);
+- work->target_vblank = target - drm_crtc_vblank_count(crtc) +
++ work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
+ amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
+
+ /* we borrow the event spin lock for protecting flip_wrok */
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 569a75e..641f715 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4354,7 +4354,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
+
+
+ /* Prepare wait for target vblank early - before the fence-waits */
+- target_vblank = target - drm_crtc_vblank_count(crtc) +
++ target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
+ amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
+
+ /*
+@@ -4597,7 +4597,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ amdgpu_dm_do_flip(
+ crtc,
+ fb,
+- drm_crtc_vblank_count(crtc) + *wait_for_vblank,
++ (uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank,
+ dm_state->context);
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5373-drm-amdgpu-fix-module-parameter-descriptions.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5373-drm-amdgpu-fix-module-parameter-descriptions.patch
new file mode 100644
index 00000000..ce634429
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5373-drm-amdgpu-fix-module-parameter-descriptions.patch
@@ -0,0 +1,33 @@
+From 30050c32a2562d24ece29b986d09841d6e671db4 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 27 Feb 2018 11:44:31 -0500
+Subject: [PATCH 5373/5725] drm/amdgpu: fix module parameter descriptions
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Some were missing the close parens around options.
+
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 1cb9a93..2e40e42 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -492,7 +492,7 @@ module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
+ * DOC: gpu_recovery (int)
+ * Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default is -1 (auto, disabled except SRIOV).
+ */
+-MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto");
++MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)");
+ module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
+
+ /**
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5374-drm-amd-amdgpu-re-add-missing-GC-9.1-and-SDMA0-4.1-s.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5374-drm-amd-amdgpu-re-add-missing-GC-9.1-and-SDMA0-4.1-s.patch
new file mode 100644
index 00000000..5e6868a6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5374-drm-amd-amdgpu-re-add-missing-GC-9.1-and-SDMA0-4.1-s.patch
@@ -0,0 +1,32841 @@
+From 2750068cf2097dcb976c2fa4c522214019d1f115 Mon Sep 17 00:00:00 2001
+From: Tom St Denis <tom.stdenis@amd.com>
+Date: Tue, 6 Mar 2018 10:52:41 -0500
+Subject: [PATCH 5374/5725] drm/amd/amdgpu: re-add missing GC 9.1 and SDMA0 4.1
+ sh_mask header files
+
+These are required by umr to properly parse bitfield offsets.
+
+Signed-off-by: Tom St Denis <tom.stdenis@amd.com>
+Reviewed-by: Alex Deucher <alexdeucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/include/asic_reg/gc/gc_9_1_sh_mask.h | 31150 +++++++++++++++++++
+ .../amd/include/asic_reg/sdma0/sdma0_4_1_sh_mask.h | 1658 +
+ 2 files changed, 32808 insertions(+)
+ create mode 100644 drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_sh_mask.h
+ create mode 100644 drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_1_sh_mask.h
+
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_sh_mask.h
+new file mode 100644
+index 0000000..13bfc2e
+--- /dev/null
++++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_sh_mask.h
+@@ -0,0 +1,31150 @@
++/*
++ * Copyright (C) 2017 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
++ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++#ifndef _gc_9_1_SH_MASK_HEADER
++#define _gc_9_1_SH_MASK_HEADER
++
++
++// addressBlock: gc_grbmdec
++//GRBM_CNTL
++#define GRBM_CNTL__READ_TIMEOUT__SHIFT 0x0
++#define GRBM_CNTL__REPORT_LAST_RDERR__SHIFT 0x1f
++#define GRBM_CNTL__READ_TIMEOUT_MASK 0x000000FFL
++#define GRBM_CNTL__REPORT_LAST_RDERR_MASK 0x80000000L
++//GRBM_SKEW_CNTL
++#define GRBM_SKEW_CNTL__SKEW_TOP_THRESHOLD__SHIFT 0x0
++#define GRBM_SKEW_CNTL__SKEW_COUNT__SHIFT 0x6
++#define GRBM_SKEW_CNTL__SKEW_TOP_THRESHOLD_MASK 0x0000003FL
++#define GRBM_SKEW_CNTL__SKEW_COUNT_MASK 0x00000FC0L
++//GRBM_STATUS2
++#define GRBM_STATUS2__ME0PIPE1_CMDFIFO_AVAIL__SHIFT 0x0
++#define GRBM_STATUS2__ME0PIPE1_CF_RQ_PENDING__SHIFT 0x4
++#define GRBM_STATUS2__ME0PIPE1_PF_RQ_PENDING__SHIFT 0x5
++#define GRBM_STATUS2__ME1PIPE0_RQ_PENDING__SHIFT 0x6
++#define GRBM_STATUS2__ME1PIPE1_RQ_PENDING__SHIFT 0x7
++#define GRBM_STATUS2__ME1PIPE2_RQ_PENDING__SHIFT 0x8
++#define GRBM_STATUS2__ME1PIPE3_RQ_PENDING__SHIFT 0x9
++#define GRBM_STATUS2__ME2PIPE0_RQ_PENDING__SHIFT 0xa
++#define GRBM_STATUS2__ME2PIPE1_RQ_PENDING__SHIFT 0xb
++#define GRBM_STATUS2__ME2PIPE2_RQ_PENDING__SHIFT 0xc
++#define GRBM_STATUS2__ME2PIPE3_RQ_PENDING__SHIFT 0xd
++#define GRBM_STATUS2__RLC_RQ_PENDING__SHIFT 0xe
++#define GRBM_STATUS2__UTCL2_BUSY__SHIFT 0xf
++#define GRBM_STATUS2__EA_BUSY__SHIFT 0x10
++#define GRBM_STATUS2__RMI_BUSY__SHIFT 0x11
++#define GRBM_STATUS2__UTCL2_RQ_PENDING__SHIFT 0x12
++#define GRBM_STATUS2__CPF_RQ_PENDING__SHIFT 0x13
++#define GRBM_STATUS2__EA_LINK_BUSY__SHIFT 0x14
++#define GRBM_STATUS2__RLC_BUSY__SHIFT 0x18
++#define GRBM_STATUS2__TC_BUSY__SHIFT 0x19
++#define GRBM_STATUS2__TCC_CC_RESIDENT__SHIFT 0x1a
++#define GRBM_STATUS2__CPF_BUSY__SHIFT 0x1c
++#define GRBM_STATUS2__CPC_BUSY__SHIFT 0x1d
++#define GRBM_STATUS2__CPG_BUSY__SHIFT 0x1e
++#define GRBM_STATUS2__CPAXI_BUSY__SHIFT 0x1f
++#define GRBM_STATUS2__ME0PIPE1_CMDFIFO_AVAIL_MASK 0x0000000FL
++#define GRBM_STATUS2__ME0PIPE1_CF_RQ_PENDING_MASK 0x00000010L
++#define GRBM_STATUS2__ME0PIPE1_PF_RQ_PENDING_MASK 0x00000020L
++#define GRBM_STATUS2__ME1PIPE0_RQ_PENDING_MASK 0x00000040L
++#define GRBM_STATUS2__ME1PIPE1_RQ_PENDING_MASK 0x00000080L
++#define GRBM_STATUS2__ME1PIPE2_RQ_PENDING_MASK 0x00000100L
++#define GRBM_STATUS2__ME1PIPE3_RQ_PENDING_MASK 0x00000200L
++#define GRBM_STATUS2__ME2PIPE0_RQ_PENDING_MASK 0x00000400L
++#define GRBM_STATUS2__ME2PIPE1_RQ_PENDING_MASK 0x00000800L
++#define GRBM_STATUS2__ME2PIPE2_RQ_PENDING_MASK 0x00001000L
++#define GRBM_STATUS2__ME2PIPE3_RQ_PENDING_MASK 0x00002000L
++#define GRBM_STATUS2__RLC_RQ_PENDING_MASK 0x00004000L
++#define GRBM_STATUS2__UTCL2_BUSY_MASK 0x00008000L
++#define GRBM_STATUS2__EA_BUSY_MASK 0x00010000L
++#define GRBM_STATUS2__RMI_BUSY_MASK 0x00020000L
++#define GRBM_STATUS2__UTCL2_RQ_PENDING_MASK 0x00040000L
++#define GRBM_STATUS2__CPF_RQ_PENDING_MASK 0x00080000L
++#define GRBM_STATUS2__EA_LINK_BUSY_MASK 0x00100000L
++#define GRBM_STATUS2__RLC_BUSY_MASK 0x01000000L
++#define GRBM_STATUS2__TC_BUSY_MASK 0x02000000L
++#define GRBM_STATUS2__TCC_CC_RESIDENT_MASK 0x04000000L
++#define GRBM_STATUS2__CPF_BUSY_MASK 0x10000000L
++#define GRBM_STATUS2__CPC_BUSY_MASK 0x20000000L
++#define GRBM_STATUS2__CPG_BUSY_MASK 0x40000000L
++#define GRBM_STATUS2__CPAXI_BUSY_MASK 0x80000000L
++//GRBM_PWR_CNTL
++#define GRBM_PWR_CNTL__ALL_REQ_TYPE__SHIFT 0x0
++#define GRBM_PWR_CNTL__GFX_REQ_TYPE__SHIFT 0x2
++#define GRBM_PWR_CNTL__ALL_RSP_TYPE__SHIFT 0x4
++#define GRBM_PWR_CNTL__GFX_RSP_TYPE__SHIFT 0x6
++#define GRBM_PWR_CNTL__GFX_REQ_EN__SHIFT 0xe
++#define GRBM_PWR_CNTL__ALL_REQ_EN__SHIFT 0xf
++#define GRBM_PWR_CNTL__ALL_REQ_TYPE_MASK 0x00000003L
++#define GRBM_PWR_CNTL__GFX_REQ_TYPE_MASK 0x0000000CL
++#define GRBM_PWR_CNTL__ALL_RSP_TYPE_MASK 0x00000030L
++#define GRBM_PWR_CNTL__GFX_RSP_TYPE_MASK 0x000000C0L
++#define GRBM_PWR_CNTL__GFX_REQ_EN_MASK 0x00004000L
++#define GRBM_PWR_CNTL__ALL_REQ_EN_MASK 0x00008000L
++//GRBM_STATUS
++#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL__SHIFT 0x0
++#define GRBM_STATUS__RSMU_RQ_PENDING__SHIFT 0x5
++#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING__SHIFT 0x7
++#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING__SHIFT 0x8
++#define GRBM_STATUS__GDS_DMA_RQ_PENDING__SHIFT 0x9
++#define GRBM_STATUS__DB_CLEAN__SHIFT 0xc
++#define GRBM_STATUS__CB_CLEAN__SHIFT 0xd
++#define GRBM_STATUS__TA_BUSY__SHIFT 0xe
++#define GRBM_STATUS__GDS_BUSY__SHIFT 0xf
++#define GRBM_STATUS__WD_BUSY_NO_DMA__SHIFT 0x10
++#define GRBM_STATUS__VGT_BUSY__SHIFT 0x11
++#define GRBM_STATUS__IA_BUSY_NO_DMA__SHIFT 0x12
++#define GRBM_STATUS__IA_BUSY__SHIFT 0x13
++#define GRBM_STATUS__SX_BUSY__SHIFT 0x14
++#define GRBM_STATUS__WD_BUSY__SHIFT 0x15
++#define GRBM_STATUS__SPI_BUSY__SHIFT 0x16
++#define GRBM_STATUS__BCI_BUSY__SHIFT 0x17
++#define GRBM_STATUS__SC_BUSY__SHIFT 0x18
++#define GRBM_STATUS__PA_BUSY__SHIFT 0x19
++#define GRBM_STATUS__DB_BUSY__SHIFT 0x1a
++#define GRBM_STATUS__CP_COHERENCY_BUSY__SHIFT 0x1c
++#define GRBM_STATUS__CP_BUSY__SHIFT 0x1d
++#define GRBM_STATUS__CB_BUSY__SHIFT 0x1e
++#define GRBM_STATUS__GUI_ACTIVE__SHIFT 0x1f
++#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL_MASK 0x0000000FL
++#define GRBM_STATUS__RSMU_RQ_PENDING_MASK 0x00000020L
++#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING_MASK 0x00000080L
++#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING_MASK 0x00000100L
++#define GRBM_STATUS__GDS_DMA_RQ_PENDING_MASK 0x00000200L
++#define GRBM_STATUS__DB_CLEAN_MASK 0x00001000L
++#define GRBM_STATUS__CB_CLEAN_MASK 0x00002000L
++#define GRBM_STATUS__TA_BUSY_MASK 0x00004000L
++#define GRBM_STATUS__GDS_BUSY_MASK 0x00008000L
++#define GRBM_STATUS__WD_BUSY_NO_DMA_MASK 0x00010000L
++#define GRBM_STATUS__VGT_BUSY_MASK 0x00020000L
++#define GRBM_STATUS__IA_BUSY_NO_DMA_MASK 0x00040000L
++#define GRBM_STATUS__IA_BUSY_MASK 0x00080000L
++#define GRBM_STATUS__SX_BUSY_MASK 0x00100000L
++#define GRBM_STATUS__WD_BUSY_MASK 0x00200000L
++#define GRBM_STATUS__SPI_BUSY_MASK 0x00400000L
++#define GRBM_STATUS__BCI_BUSY_MASK 0x00800000L
++#define GRBM_STATUS__SC_BUSY_MASK 0x01000000L
++#define GRBM_STATUS__PA_BUSY_MASK 0x02000000L
++#define GRBM_STATUS__DB_BUSY_MASK 0x04000000L
++#define GRBM_STATUS__CP_COHERENCY_BUSY_MASK 0x10000000L
++#define GRBM_STATUS__CP_BUSY_MASK 0x20000000L
++#define GRBM_STATUS__CB_BUSY_MASK 0x40000000L
++#define GRBM_STATUS__GUI_ACTIVE_MASK 0x80000000L
++//GRBM_STATUS_SE0
++#define GRBM_STATUS_SE0__DB_CLEAN__SHIFT 0x1
++#define GRBM_STATUS_SE0__CB_CLEAN__SHIFT 0x2
++#define GRBM_STATUS_SE0__RMI_BUSY__SHIFT 0x15
++#define GRBM_STATUS_SE0__BCI_BUSY__SHIFT 0x16
++#define GRBM_STATUS_SE0__VGT_BUSY__SHIFT 0x17
++#define GRBM_STATUS_SE0__PA_BUSY__SHIFT 0x18
++#define GRBM_STATUS_SE0__TA_BUSY__SHIFT 0x19
++#define GRBM_STATUS_SE0__SX_BUSY__SHIFT 0x1a
++#define GRBM_STATUS_SE0__SPI_BUSY__SHIFT 0x1b
++#define GRBM_STATUS_SE0__SC_BUSY__SHIFT 0x1d
++#define GRBM_STATUS_SE0__DB_BUSY__SHIFT 0x1e
++#define GRBM_STATUS_SE0__CB_BUSY__SHIFT 0x1f
++#define GRBM_STATUS_SE0__DB_CLEAN_MASK 0x00000002L
++#define GRBM_STATUS_SE0__CB_CLEAN_MASK 0x00000004L
++#define GRBM_STATUS_SE0__RMI_BUSY_MASK 0x00200000L
++#define GRBM_STATUS_SE0__BCI_BUSY_MASK 0x00400000L
++#define GRBM_STATUS_SE0__VGT_BUSY_MASK 0x00800000L
++#define GRBM_STATUS_SE0__PA_BUSY_MASK 0x01000000L
++#define GRBM_STATUS_SE0__TA_BUSY_MASK 0x02000000L
++#define GRBM_STATUS_SE0__SX_BUSY_MASK 0x04000000L
++#define GRBM_STATUS_SE0__SPI_BUSY_MASK 0x08000000L
++#define GRBM_STATUS_SE0__SC_BUSY_MASK 0x20000000L
++#define GRBM_STATUS_SE0__DB_BUSY_MASK 0x40000000L
++#define GRBM_STATUS_SE0__CB_BUSY_MASK 0x80000000L
++//GRBM_STATUS_SE1
++#define GRBM_STATUS_SE1__DB_CLEAN__SHIFT 0x1
++#define GRBM_STATUS_SE1__CB_CLEAN__SHIFT 0x2
++#define GRBM_STATUS_SE1__RMI_BUSY__SHIFT 0x15
++#define GRBM_STATUS_SE1__BCI_BUSY__SHIFT 0x16
++#define GRBM_STATUS_SE1__VGT_BUSY__SHIFT 0x17
++#define GRBM_STATUS_SE1__PA_BUSY__SHIFT 0x18
++#define GRBM_STATUS_SE1__TA_BUSY__SHIFT 0x19
++#define GRBM_STATUS_SE1__SX_BUSY__SHIFT 0x1a
++#define GRBM_STATUS_SE1__SPI_BUSY__SHIFT 0x1b
++#define GRBM_STATUS_SE1__SC_BUSY__SHIFT 0x1d
++#define GRBM_STATUS_SE1__DB_BUSY__SHIFT 0x1e
++#define GRBM_STATUS_SE1__CB_BUSY__SHIFT 0x1f
++#define GRBM_STATUS_SE1__DB_CLEAN_MASK 0x00000002L
++#define GRBM_STATUS_SE1__CB_CLEAN_MASK 0x00000004L
++#define GRBM_STATUS_SE1__RMI_BUSY_MASK 0x00200000L
++#define GRBM_STATUS_SE1__BCI_BUSY_MASK 0x00400000L
++#define GRBM_STATUS_SE1__VGT_BUSY_MASK 0x00800000L
++#define GRBM_STATUS_SE1__PA_BUSY_MASK 0x01000000L
++#define GRBM_STATUS_SE1__TA_BUSY_MASK 0x02000000L
++#define GRBM_STATUS_SE1__SX_BUSY_MASK 0x04000000L
++#define GRBM_STATUS_SE1__SPI_BUSY_MASK 0x08000000L
++#define GRBM_STATUS_SE1__SC_BUSY_MASK 0x20000000L
++#define GRBM_STATUS_SE1__DB_BUSY_MASK 0x40000000L
++#define GRBM_STATUS_SE1__CB_BUSY_MASK 0x80000000L
++//GRBM_SOFT_RESET
++#define GRBM_SOFT_RESET__SOFT_RESET_CP__SHIFT 0x0
++#define GRBM_SOFT_RESET__SOFT_RESET_RLC__SHIFT 0x2
++#define GRBM_SOFT_RESET__SOFT_RESET_GFX__SHIFT 0x10
++#define GRBM_SOFT_RESET__SOFT_RESET_CPF__SHIFT 0x11
++#define GRBM_SOFT_RESET__SOFT_RESET_CPC__SHIFT 0x12
++#define GRBM_SOFT_RESET__SOFT_RESET_CPG__SHIFT 0x13
++#define GRBM_SOFT_RESET__SOFT_RESET_CAC__SHIFT 0x14
++#define GRBM_SOFT_RESET__SOFT_RESET_CPAXI__SHIFT 0x15
++#define GRBM_SOFT_RESET__SOFT_RESET_EA__SHIFT 0x16
++#define GRBM_SOFT_RESET__SOFT_RESET_CP_MASK 0x00000001L
++#define GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK 0x00000004L
++#define GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK 0x00010000L
++#define GRBM_SOFT_RESET__SOFT_RESET_CPF_MASK 0x00020000L
++#define GRBM_SOFT_RESET__SOFT_RESET_CPC_MASK 0x00040000L
++#define GRBM_SOFT_RESET__SOFT_RESET_CPG_MASK 0x00080000L
++#define GRBM_SOFT_RESET__SOFT_RESET_CAC_MASK 0x00100000L
++#define GRBM_SOFT_RESET__SOFT_RESET_CPAXI_MASK 0x00200000L
++#define GRBM_SOFT_RESET__SOFT_RESET_EA_MASK 0x00400000L
++//GRBM_CGTT_CLK_CNTL
++#define GRBM_CGTT_CLK_CNTL__ON_DELAY__SHIFT 0x0
++#define GRBM_CGTT_CLK_CNTL__OFF_HYSTERESIS__SHIFT 0x4
++#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
++#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define GRBM_CGTT_CLK_CNTL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
++#define GRBM_CGTT_CLK_CNTL__ON_DELAY_MASK 0x0000000FL
++#define GRBM_CGTT_CLK_CNTL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
++#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define GRBM_CGTT_CLK_CNTL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
++//GRBM_GFX_CLKEN_CNTL
++#define GRBM_GFX_CLKEN_CNTL__PREFIX_DELAY_CNT__SHIFT 0x0
++#define GRBM_GFX_CLKEN_CNTL__POST_DELAY_CNT__SHIFT 0x8
++#define GRBM_GFX_CLKEN_CNTL__PREFIX_DELAY_CNT_MASK 0x0000000FL
++#define GRBM_GFX_CLKEN_CNTL__POST_DELAY_CNT_MASK 0x00001F00L
++//GRBM_WAIT_IDLE_CLOCKS
++#define GRBM_WAIT_IDLE_CLOCKS__WAIT_IDLE_CLOCKS__SHIFT 0x0
++#define GRBM_WAIT_IDLE_CLOCKS__WAIT_IDLE_CLOCKS_MASK 0x000000FFL
++//GRBM_STATUS_SE2
++#define GRBM_STATUS_SE2__DB_CLEAN__SHIFT 0x1
++#define GRBM_STATUS_SE2__CB_CLEAN__SHIFT 0x2
++#define GRBM_STATUS_SE2__RMI_BUSY__SHIFT 0x15
++#define GRBM_STATUS_SE2__BCI_BUSY__SHIFT 0x16
++#define GRBM_STATUS_SE2__VGT_BUSY__SHIFT 0x17
++#define GRBM_STATUS_SE2__PA_BUSY__SHIFT 0x18
++#define GRBM_STATUS_SE2__TA_BUSY__SHIFT 0x19
++#define GRBM_STATUS_SE2__SX_BUSY__SHIFT 0x1a
++#define GRBM_STATUS_SE2__SPI_BUSY__SHIFT 0x1b
++#define GRBM_STATUS_SE2__SC_BUSY__SHIFT 0x1d
++#define GRBM_STATUS_SE2__DB_BUSY__SHIFT 0x1e
++#define GRBM_STATUS_SE2__CB_BUSY__SHIFT 0x1f
++#define GRBM_STATUS_SE2__DB_CLEAN_MASK 0x00000002L
++#define GRBM_STATUS_SE2__CB_CLEAN_MASK 0x00000004L
++#define GRBM_STATUS_SE2__RMI_BUSY_MASK 0x00200000L
++#define GRBM_STATUS_SE2__BCI_BUSY_MASK 0x00400000L
++#define GRBM_STATUS_SE2__VGT_BUSY_MASK 0x00800000L
++#define GRBM_STATUS_SE2__PA_BUSY_MASK 0x01000000L
++#define GRBM_STATUS_SE2__TA_BUSY_MASK 0x02000000L
++#define GRBM_STATUS_SE2__SX_BUSY_MASK 0x04000000L
++#define GRBM_STATUS_SE2__SPI_BUSY_MASK 0x08000000L
++#define GRBM_STATUS_SE2__SC_BUSY_MASK 0x20000000L
++#define GRBM_STATUS_SE2__DB_BUSY_MASK 0x40000000L
++#define GRBM_STATUS_SE2__CB_BUSY_MASK 0x80000000L
++//GRBM_STATUS_SE3
++#define GRBM_STATUS_SE3__DB_CLEAN__SHIFT 0x1
++#define GRBM_STATUS_SE3__CB_CLEAN__SHIFT 0x2
++#define GRBM_STATUS_SE3__RMI_BUSY__SHIFT 0x15
++#define GRBM_STATUS_SE3__BCI_BUSY__SHIFT 0x16
++#define GRBM_STATUS_SE3__VGT_BUSY__SHIFT 0x17
++#define GRBM_STATUS_SE3__PA_BUSY__SHIFT 0x18
++#define GRBM_STATUS_SE3__TA_BUSY__SHIFT 0x19
++#define GRBM_STATUS_SE3__SX_BUSY__SHIFT 0x1a
++#define GRBM_STATUS_SE3__SPI_BUSY__SHIFT 0x1b
++#define GRBM_STATUS_SE3__SC_BUSY__SHIFT 0x1d
++#define GRBM_STATUS_SE3__DB_BUSY__SHIFT 0x1e
++#define GRBM_STATUS_SE3__CB_BUSY__SHIFT 0x1f
++#define GRBM_STATUS_SE3__DB_CLEAN_MASK 0x00000002L
++#define GRBM_STATUS_SE3__CB_CLEAN_MASK 0x00000004L
++#define GRBM_STATUS_SE3__RMI_BUSY_MASK 0x00200000L
++#define GRBM_STATUS_SE3__BCI_BUSY_MASK 0x00400000L
++#define GRBM_STATUS_SE3__VGT_BUSY_MASK 0x00800000L
++#define GRBM_STATUS_SE3__PA_BUSY_MASK 0x01000000L
++#define GRBM_STATUS_SE3__TA_BUSY_MASK 0x02000000L
++#define GRBM_STATUS_SE3__SX_BUSY_MASK 0x04000000L
++#define GRBM_STATUS_SE3__SPI_BUSY_MASK 0x08000000L
++#define GRBM_STATUS_SE3__SC_BUSY_MASK 0x20000000L
++#define GRBM_STATUS_SE3__DB_BUSY_MASK 0x40000000L
++#define GRBM_STATUS_SE3__CB_BUSY_MASK 0x80000000L
++//GRBM_READ_ERROR
++#define GRBM_READ_ERROR__READ_ADDRESS__SHIFT 0x2
++#define GRBM_READ_ERROR__READ_PIPEID__SHIFT 0x14
++#define GRBM_READ_ERROR__READ_MEID__SHIFT 0x16
++#define GRBM_READ_ERROR__READ_ERROR__SHIFT 0x1f
++#define GRBM_READ_ERROR__READ_ADDRESS_MASK 0x0003FFFCL
++#define GRBM_READ_ERROR__READ_PIPEID_MASK 0x00300000L
++#define GRBM_READ_ERROR__READ_MEID_MASK 0x00C00000L
++#define GRBM_READ_ERROR__READ_ERROR_MASK 0x80000000L
++//GRBM_READ_ERROR2
++#define GRBM_READ_ERROR2__READ_REQUESTER_CPF__SHIFT 0x10
++#define GRBM_READ_ERROR2__READ_REQUESTER_RSMU__SHIFT 0x11
++#define GRBM_READ_ERROR2__READ_REQUESTER_RLC__SHIFT 0x12
++#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA__SHIFT 0x13
++#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF__SHIFT 0x14
++#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_PF__SHIFT 0x15
++#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_CF__SHIFT 0x16
++#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_PF__SHIFT 0x17
++#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE0__SHIFT 0x18
++#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE1__SHIFT 0x19
++#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE2__SHIFT 0x1a
++#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE3__SHIFT 0x1b
++#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE0__SHIFT 0x1c
++#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE1__SHIFT 0x1d
++#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE2__SHIFT 0x1e
++#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE3__SHIFT 0x1f
++#define GRBM_READ_ERROR2__READ_REQUESTER_CPF_MASK 0x00010000L
++#define GRBM_READ_ERROR2__READ_REQUESTER_RSMU_MASK 0x00020000L
++#define GRBM_READ_ERROR2__READ_REQUESTER_RLC_MASK 0x00040000L
++#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA_MASK 0x00080000L
++#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF_MASK 0x00100000L
++#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_PF_MASK 0x00200000L
++#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_CF_MASK 0x00400000L
++#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_PF_MASK 0x00800000L
++#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE0_MASK 0x01000000L
++#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE1_MASK 0x02000000L
++#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE2_MASK 0x04000000L
++#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE3_MASK 0x08000000L
++#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE0_MASK 0x10000000L
++#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE1_MASK 0x20000000L
++#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE2_MASK 0x40000000L
++#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE3_MASK 0x80000000L
++//GRBM_INT_CNTL
++#define GRBM_INT_CNTL__RDERR_INT_ENABLE__SHIFT 0x0
++#define GRBM_INT_CNTL__GUI_IDLE_INT_ENABLE__SHIFT 0x13
++#define GRBM_INT_CNTL__RDERR_INT_ENABLE_MASK 0x00000001L
++#define GRBM_INT_CNTL__GUI_IDLE_INT_ENABLE_MASK 0x00080000L
++//GRBM_TRAP_OP
++#define GRBM_TRAP_OP__RW__SHIFT 0x0
++#define GRBM_TRAP_OP__RW_MASK 0x00000001L
++//GRBM_TRAP_ADDR
++#define GRBM_TRAP_ADDR__DATA__SHIFT 0x0
++#define GRBM_TRAP_ADDR__DATA_MASK 0x0003FFFFL
++//GRBM_TRAP_ADDR_MSK
++#define GRBM_TRAP_ADDR_MSK__DATA__SHIFT 0x0
++#define GRBM_TRAP_ADDR_MSK__DATA_MASK 0x0003FFFFL
++//GRBM_TRAP_WD
++#define GRBM_TRAP_WD__DATA__SHIFT 0x0
++#define GRBM_TRAP_WD__DATA_MASK 0xFFFFFFFFL
++//GRBM_TRAP_WD_MSK
++#define GRBM_TRAP_WD_MSK__DATA__SHIFT 0x0
++#define GRBM_TRAP_WD_MSK__DATA_MASK 0xFFFFFFFFL
++//GRBM_DSM_BYPASS
++#define GRBM_DSM_BYPASS__BYPASS_BITS__SHIFT 0x0
++#define GRBM_DSM_BYPASS__BYPASS_EN__SHIFT 0x2
++#define GRBM_DSM_BYPASS__BYPASS_BITS_MASK 0x00000003L
++#define GRBM_DSM_BYPASS__BYPASS_EN_MASK 0x00000004L
++//GRBM_WRITE_ERROR
++#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RLC__SHIFT 0x0
++#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RSMU__SHIFT 0x1
++#define GRBM_WRITE_ERROR__WRITE_SSRCID__SHIFT 0x2
++#define GRBM_WRITE_ERROR__WRITE_VFID__SHIFT 0x5
++#define GRBM_WRITE_ERROR__WRITE_VF__SHIFT 0xc
++#define GRBM_WRITE_ERROR__WRITE_VMID__SHIFT 0xd
++#define GRBM_WRITE_ERROR__WRITE_PIPEID__SHIFT 0x14
++#define GRBM_WRITE_ERROR__WRITE_MEID__SHIFT 0x16
++#define GRBM_WRITE_ERROR__WRITE_ERROR__SHIFT 0x1f
++#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RLC_MASK 0x00000001L
++#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RSMU_MASK 0x00000002L
++#define GRBM_WRITE_ERROR__WRITE_SSRCID_MASK 0x0000001CL
++#define GRBM_WRITE_ERROR__WRITE_VFID_MASK 0x000001E0L
++#define GRBM_WRITE_ERROR__WRITE_VF_MASK 0x00001000L
++#define GRBM_WRITE_ERROR__WRITE_VMID_MASK 0x0001E000L
++#define GRBM_WRITE_ERROR__WRITE_PIPEID_MASK 0x00300000L
++#define GRBM_WRITE_ERROR__WRITE_MEID_MASK 0x00C00000L
++#define GRBM_WRITE_ERROR__WRITE_ERROR_MASK 0x80000000L
++//GRBM_IOV_ERROR
++#define GRBM_IOV_ERROR__IOV_ADDR__SHIFT 0x2
++#define GRBM_IOV_ERROR__IOV_VFID__SHIFT 0x14
++#define GRBM_IOV_ERROR__IOV_VF__SHIFT 0x1a
++#define GRBM_IOV_ERROR__IOV_OP__SHIFT 0x1b
++#define GRBM_IOV_ERROR__IOV_ERROR__SHIFT 0x1f
++#define GRBM_IOV_ERROR__IOV_ADDR_MASK 0x000FFFFCL
++#define GRBM_IOV_ERROR__IOV_VFID_MASK 0x03F00000L
++#define GRBM_IOV_ERROR__IOV_VF_MASK 0x04000000L
++#define GRBM_IOV_ERROR__IOV_OP_MASK 0x08000000L
++#define GRBM_IOV_ERROR__IOV_ERROR_MASK 0x80000000L
++//GRBM_CHIP_REVISION
++#define GRBM_CHIP_REVISION__CHIP_REVISION__SHIFT 0x0
++#define GRBM_CHIP_REVISION__CHIP_REVISION_MASK 0x000000FFL
++//GRBM_GFX_CNTL
++#define GRBM_GFX_CNTL__PIPEID__SHIFT 0x0
++#define GRBM_GFX_CNTL__MEID__SHIFT 0x2
++#define GRBM_GFX_CNTL__VMID__SHIFT 0x4
++#define GRBM_GFX_CNTL__QUEUEID__SHIFT 0x8
++#define GRBM_GFX_CNTL__PIPEID_MASK 0x00000003L
++#define GRBM_GFX_CNTL__MEID_MASK 0x0000000CL
++#define GRBM_GFX_CNTL__VMID_MASK 0x000000F0L
++#define GRBM_GFX_CNTL__QUEUEID_MASK 0x00000700L
++//GRBM_RSMU_CFG
++#define GRBM_RSMU_CFG__APERTURE_ID__SHIFT 0x0
++#define GRBM_RSMU_CFG__QOS__SHIFT 0xc
++#define GRBM_RSMU_CFG__POSTED_WR__SHIFT 0x10
++#define GRBM_RSMU_CFG__APERTURE_ID_MASK 0x00000FFFL
++#define GRBM_RSMU_CFG__QOS_MASK 0x0000F000L
++#define GRBM_RSMU_CFG__POSTED_WR_MASK 0x00010000L
++//GRBM_IH_CREDIT
++#define GRBM_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
++#define GRBM_IH_CREDIT__IH_CLIENT_ID__SHIFT 0x10
++#define GRBM_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
++#define GRBM_IH_CREDIT__IH_CLIENT_ID_MASK 0x00FF0000L
++//GRBM_PWR_CNTL2
++#define GRBM_PWR_CNTL2__PWR_REQUEST_HALT__SHIFT 0x10
++#define GRBM_PWR_CNTL2__PWR_GFX3D_REQUEST_HALT__SHIFT 0x14
++#define GRBM_PWR_CNTL2__PWR_REQUEST_HALT_MASK 0x00010000L
++#define GRBM_PWR_CNTL2__PWR_GFX3D_REQUEST_HALT_MASK 0x00100000L
++//GRBM_UTCL2_INVAL_RANGE_START
++#define GRBM_UTCL2_INVAL_RANGE_START__DATA__SHIFT 0x0
++#define GRBM_UTCL2_INVAL_RANGE_START__DATA_MASK 0x0003FFFFL
++//GRBM_UTCL2_INVAL_RANGE_END
++#define GRBM_UTCL2_INVAL_RANGE_END__DATA__SHIFT 0x0
++#define GRBM_UTCL2_INVAL_RANGE_END__DATA_MASK 0x0003FFFFL
++//GRBM_RSMU_READ_ERROR
++#define GRBM_RSMU_READ_ERROR__RSMU_READ_ADDRESS__SHIFT 0x2
++#define GRBM_RSMU_READ_ERROR__RSMU_READ_VF__SHIFT 0x14
++#define GRBM_RSMU_READ_ERROR__RSMU_READ_VFID__SHIFT 0x15
++#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR_TYPE__SHIFT 0x1b
++#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR__SHIFT 0x1f
++#define GRBM_RSMU_READ_ERROR__RSMU_READ_ADDRESS_MASK 0x000FFFFCL
++#define GRBM_RSMU_READ_ERROR__RSMU_READ_VF_MASK 0x00100000L
++#define GRBM_RSMU_READ_ERROR__RSMU_READ_VFID_MASK 0x07E00000L
++#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR_TYPE_MASK 0x08000000L
++#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR_MASK 0x80000000L
++//GRBM_CHICKEN_BITS
++#define GRBM_CHICKEN_BITS__DISABLE_CP_VMID_RESET_REQ__SHIFT 0x0
++#define GRBM_CHICKEN_BITS__DISABLE_CP_VMID_RESET_REQ_MASK 0x00000001L
++//GRBM_NOWHERE
++#define GRBM_NOWHERE__DATA__SHIFT 0x0
++#define GRBM_NOWHERE__DATA_MASK 0xFFFFFFFFL
++//GRBM_SCRATCH_REG0
++#define GRBM_SCRATCH_REG0__SCRATCH_REG0__SHIFT 0x0
++#define GRBM_SCRATCH_REG0__SCRATCH_REG0_MASK 0xFFFFFFFFL
++//GRBM_SCRATCH_REG1
++#define GRBM_SCRATCH_REG1__SCRATCH_REG1__SHIFT 0x0
++#define GRBM_SCRATCH_REG1__SCRATCH_REG1_MASK 0xFFFFFFFFL
++//GRBM_SCRATCH_REG2
++#define GRBM_SCRATCH_REG2__SCRATCH_REG2__SHIFT 0x0
++#define GRBM_SCRATCH_REG2__SCRATCH_REG2_MASK 0xFFFFFFFFL
++//GRBM_SCRATCH_REG3
++#define GRBM_SCRATCH_REG3__SCRATCH_REG3__SHIFT 0x0
++#define GRBM_SCRATCH_REG3__SCRATCH_REG3_MASK 0xFFFFFFFFL
++//GRBM_SCRATCH_REG4
++#define GRBM_SCRATCH_REG4__SCRATCH_REG4__SHIFT 0x0
++#define GRBM_SCRATCH_REG4__SCRATCH_REG4_MASK 0xFFFFFFFFL
++//GRBM_SCRATCH_REG5
++#define GRBM_SCRATCH_REG5__SCRATCH_REG5__SHIFT 0x0
++#define GRBM_SCRATCH_REG5__SCRATCH_REG5_MASK 0xFFFFFFFFL
++//GRBM_SCRATCH_REG6
++#define GRBM_SCRATCH_REG6__SCRATCH_REG6__SHIFT 0x0
++#define GRBM_SCRATCH_REG6__SCRATCH_REG6_MASK 0xFFFFFFFFL
++//GRBM_SCRATCH_REG7
++#define GRBM_SCRATCH_REG7__SCRATCH_REG7__SHIFT 0x0
++#define GRBM_SCRATCH_REG7__SCRATCH_REG7_MASK 0xFFFFFFFFL
++
++
++// addressBlock: gc_cpdec
++//CP_CPC_STATUS
++#define CP_CPC_STATUS__MEC1_BUSY__SHIFT 0x0
++#define CP_CPC_STATUS__MEC2_BUSY__SHIFT 0x1
++#define CP_CPC_STATUS__DC0_BUSY__SHIFT 0x2
++#define CP_CPC_STATUS__DC1_BUSY__SHIFT 0x3
++#define CP_CPC_STATUS__RCIU1_BUSY__SHIFT 0x4
++#define CP_CPC_STATUS__RCIU2_BUSY__SHIFT 0x5
++#define CP_CPC_STATUS__ROQ1_BUSY__SHIFT 0x6
++#define CP_CPC_STATUS__ROQ2_BUSY__SHIFT 0x7
++#define CP_CPC_STATUS__TCIU_BUSY__SHIFT 0xa
++#define CP_CPC_STATUS__SCRATCH_RAM_BUSY__SHIFT 0xb
++#define CP_CPC_STATUS__QU_BUSY__SHIFT 0xc
++#define CP_CPC_STATUS__UTCL2IU_BUSY__SHIFT 0xd
++#define CP_CPC_STATUS__SAVE_RESTORE_BUSY__SHIFT 0xe
++#define CP_CPC_STATUS__CPG_CPC_BUSY__SHIFT 0x1d
++#define CP_CPC_STATUS__CPF_CPC_BUSY__SHIFT 0x1e
++#define CP_CPC_STATUS__CPC_BUSY__SHIFT 0x1f
++#define CP_CPC_STATUS__MEC1_BUSY_MASK 0x00000001L
++#define CP_CPC_STATUS__MEC2_BUSY_MASK 0x00000002L
++#define CP_CPC_STATUS__DC0_BUSY_MASK 0x00000004L
++#define CP_CPC_STATUS__DC1_BUSY_MASK 0x00000008L
++#define CP_CPC_STATUS__RCIU1_BUSY_MASK 0x00000010L
++#define CP_CPC_STATUS__RCIU2_BUSY_MASK 0x00000020L
++#define CP_CPC_STATUS__ROQ1_BUSY_MASK 0x00000040L
++#define CP_CPC_STATUS__ROQ2_BUSY_MASK 0x00000080L
++#define CP_CPC_STATUS__TCIU_BUSY_MASK 0x00000400L
++#define CP_CPC_STATUS__SCRATCH_RAM_BUSY_MASK 0x00000800L
++#define CP_CPC_STATUS__QU_BUSY_MASK 0x00001000L
++#define CP_CPC_STATUS__UTCL2IU_BUSY_MASK 0x00002000L
++#define CP_CPC_STATUS__SAVE_RESTORE_BUSY_MASK 0x00004000L
++#define CP_CPC_STATUS__CPG_CPC_BUSY_MASK 0x20000000L
++#define CP_CPC_STATUS__CPF_CPC_BUSY_MASK 0x40000000L
++#define CP_CPC_STATUS__CPC_BUSY_MASK 0x80000000L
++//CP_CPC_BUSY_STAT
++#define CP_CPC_BUSY_STAT__MEC1_LOAD_BUSY__SHIFT 0x0
++#define CP_CPC_BUSY_STAT__MEC1_SEMAPOHRE_BUSY__SHIFT 0x1
++#define CP_CPC_BUSY_STAT__MEC1_MUTEX_BUSY__SHIFT 0x2
++#define CP_CPC_BUSY_STAT__MEC1_MESSAGE_BUSY__SHIFT 0x3
++#define CP_CPC_BUSY_STAT__MEC1_EOP_QUEUE_BUSY__SHIFT 0x4
++#define CP_CPC_BUSY_STAT__MEC1_IQ_QUEUE_BUSY__SHIFT 0x5
++#define CP_CPC_BUSY_STAT__MEC1_IB_QUEUE_BUSY__SHIFT 0x6
++#define CP_CPC_BUSY_STAT__MEC1_TC_BUSY__SHIFT 0x7
++#define CP_CPC_BUSY_STAT__MEC1_DMA_BUSY__SHIFT 0x8
++#define CP_CPC_BUSY_STAT__MEC1_PARTIAL_FLUSH_BUSY__SHIFT 0x9
++#define CP_CPC_BUSY_STAT__MEC1_PIPE0_BUSY__SHIFT 0xa
++#define CP_CPC_BUSY_STAT__MEC1_PIPE1_BUSY__SHIFT 0xb
++#define CP_CPC_BUSY_STAT__MEC1_PIPE2_BUSY__SHIFT 0xc
++#define CP_CPC_BUSY_STAT__MEC1_PIPE3_BUSY__SHIFT 0xd
++#define CP_CPC_BUSY_STAT__MEC2_LOAD_BUSY__SHIFT 0x10
++#define CP_CPC_BUSY_STAT__MEC2_SEMAPOHRE_BUSY__SHIFT 0x11
++#define CP_CPC_BUSY_STAT__MEC2_MUTEX_BUSY__SHIFT 0x12
++#define CP_CPC_BUSY_STAT__MEC2_MESSAGE_BUSY__SHIFT 0x13
++#define CP_CPC_BUSY_STAT__MEC2_EOP_QUEUE_BUSY__SHIFT 0x14
++#define CP_CPC_BUSY_STAT__MEC2_IQ_QUEUE_BUSY__SHIFT 0x15
++#define CP_CPC_BUSY_STAT__MEC2_IB_QUEUE_BUSY__SHIFT 0x16
++#define CP_CPC_BUSY_STAT__MEC2_TC_BUSY__SHIFT 0x17
++#define CP_CPC_BUSY_STAT__MEC2_DMA_BUSY__SHIFT 0x18
++#define CP_CPC_BUSY_STAT__MEC2_PARTIAL_FLUSH_BUSY__SHIFT 0x19
++#define CP_CPC_BUSY_STAT__MEC2_PIPE0_BUSY__SHIFT 0x1a
++#define CP_CPC_BUSY_STAT__MEC2_PIPE1_BUSY__SHIFT 0x1b
++#define CP_CPC_BUSY_STAT__MEC2_PIPE2_BUSY__SHIFT 0x1c
++#define CP_CPC_BUSY_STAT__MEC2_PIPE3_BUSY__SHIFT 0x1d
++#define CP_CPC_BUSY_STAT__MEC1_LOAD_BUSY_MASK 0x00000001L
++#define CP_CPC_BUSY_STAT__MEC1_SEMAPOHRE_BUSY_MASK 0x00000002L
++#define CP_CPC_BUSY_STAT__MEC1_MUTEX_BUSY_MASK 0x00000004L
++#define CP_CPC_BUSY_STAT__MEC1_MESSAGE_BUSY_MASK 0x00000008L
++#define CP_CPC_BUSY_STAT__MEC1_EOP_QUEUE_BUSY_MASK 0x00000010L
++#define CP_CPC_BUSY_STAT__MEC1_IQ_QUEUE_BUSY_MASK 0x00000020L
++#define CP_CPC_BUSY_STAT__MEC1_IB_QUEUE_BUSY_MASK 0x00000040L
++#define CP_CPC_BUSY_STAT__MEC1_TC_BUSY_MASK 0x00000080L
++#define CP_CPC_BUSY_STAT__MEC1_DMA_BUSY_MASK 0x00000100L
++#define CP_CPC_BUSY_STAT__MEC1_PARTIAL_FLUSH_BUSY_MASK 0x00000200L
++#define CP_CPC_BUSY_STAT__MEC1_PIPE0_BUSY_MASK 0x00000400L
++#define CP_CPC_BUSY_STAT__MEC1_PIPE1_BUSY_MASK 0x00000800L
++#define CP_CPC_BUSY_STAT__MEC1_PIPE2_BUSY_MASK 0x00001000L
++#define CP_CPC_BUSY_STAT__MEC1_PIPE3_BUSY_MASK 0x00002000L
++#define CP_CPC_BUSY_STAT__MEC2_LOAD_BUSY_MASK 0x00010000L
++#define CP_CPC_BUSY_STAT__MEC2_SEMAPOHRE_BUSY_MASK 0x00020000L
++#define CP_CPC_BUSY_STAT__MEC2_MUTEX_BUSY_MASK 0x00040000L
++#define CP_CPC_BUSY_STAT__MEC2_MESSAGE_BUSY_MASK 0x00080000L
++#define CP_CPC_BUSY_STAT__MEC2_EOP_QUEUE_BUSY_MASK 0x00100000L
++#define CP_CPC_BUSY_STAT__MEC2_IQ_QUEUE_BUSY_MASK 0x00200000L
++#define CP_CPC_BUSY_STAT__MEC2_IB_QUEUE_BUSY_MASK 0x00400000L
++#define CP_CPC_BUSY_STAT__MEC2_TC_BUSY_MASK 0x00800000L
++#define CP_CPC_BUSY_STAT__MEC2_DMA_BUSY_MASK 0x01000000L
++#define CP_CPC_BUSY_STAT__MEC2_PARTIAL_FLUSH_BUSY_MASK 0x02000000L
++#define CP_CPC_BUSY_STAT__MEC2_PIPE0_BUSY_MASK 0x04000000L
++#define CP_CPC_BUSY_STAT__MEC2_PIPE1_BUSY_MASK 0x08000000L
++#define CP_CPC_BUSY_STAT__MEC2_PIPE2_BUSY_MASK 0x10000000L
++#define CP_CPC_BUSY_STAT__MEC2_PIPE3_BUSY_MASK 0x20000000L
++//CP_CPC_STALLED_STAT1
++#define CP_CPC_STALLED_STAT1__RCIU_TX_FREE_STALL__SHIFT 0x3
++#define CP_CPC_STALLED_STAT1__RCIU_PRIV_VIOLATION__SHIFT 0x4
++#define CP_CPC_STALLED_STAT1__TCIU_TX_FREE_STALL__SHIFT 0x6
++#define CP_CPC_STALLED_STAT1__MEC1_DECODING_PACKET__SHIFT 0x8
++#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU__SHIFT 0x9
++#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU_READ__SHIFT 0xa
++#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_ROQ_DATA__SHIFT 0xd
++#define CP_CPC_STALLED_STAT1__MEC2_DECODING_PACKET__SHIFT 0x10
++#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU__SHIFT 0x11
++#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU_READ__SHIFT 0x12
++#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_ROQ_DATA__SHIFT 0x15
++#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE__SHIFT 0x16
++#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS__SHIFT 0x17
++#define CP_CPC_STALLED_STAT1__UTCL1_WAITING_ON_TRANS__SHIFT 0x18
++#define CP_CPC_STALLED_STAT1__RCIU_TX_FREE_STALL_MASK 0x00000008L
++#define CP_CPC_STALLED_STAT1__RCIU_PRIV_VIOLATION_MASK 0x00000010L
++#define CP_CPC_STALLED_STAT1__TCIU_TX_FREE_STALL_MASK 0x00000040L
++#define CP_CPC_STALLED_STAT1__MEC1_DECODING_PACKET_MASK 0x00000100L
++#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU_MASK 0x00000200L
++#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU_READ_MASK 0x00000400L
++#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_ROQ_DATA_MASK 0x00002000L
++#define CP_CPC_STALLED_STAT1__MEC2_DECODING_PACKET_MASK 0x00010000L
++#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU_MASK 0x00020000L
++#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU_READ_MASK 0x00040000L
++#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_ROQ_DATA_MASK 0x00200000L
++#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE_MASK 0x00400000L
++#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS_MASK 0x00800000L
++#define CP_CPC_STALLED_STAT1__UTCL1_WAITING_ON_TRANS_MASK 0x01000000L
++//CP_CPF_STATUS
++#define CP_CPF_STATUS__POST_WPTR_GFX_BUSY__SHIFT 0x0
++#define CP_CPF_STATUS__CSF_BUSY__SHIFT 0x1
++#define CP_CPF_STATUS__ROQ_ALIGN_BUSY__SHIFT 0x4
++#define CP_CPF_STATUS__ROQ_RING_BUSY__SHIFT 0x5
++#define CP_CPF_STATUS__ROQ_INDIRECT1_BUSY__SHIFT 0x6
++#define CP_CPF_STATUS__ROQ_INDIRECT2_BUSY__SHIFT 0x7
++#define CP_CPF_STATUS__ROQ_STATE_BUSY__SHIFT 0x8
++#define CP_CPF_STATUS__ROQ_CE_RING_BUSY__SHIFT 0x9
++#define CP_CPF_STATUS__ROQ_CE_INDIRECT1_BUSY__SHIFT 0xa
++#define CP_CPF_STATUS__ROQ_CE_INDIRECT2_BUSY__SHIFT 0xb
++#define CP_CPF_STATUS__SEMAPHORE_BUSY__SHIFT 0xc
++#define CP_CPF_STATUS__INTERRUPT_BUSY__SHIFT 0xd
++#define CP_CPF_STATUS__TCIU_BUSY__SHIFT 0xe
++#define CP_CPF_STATUS__HQD_BUSY__SHIFT 0xf
++#define CP_CPF_STATUS__PRT_BUSY__SHIFT 0x10
++#define CP_CPF_STATUS__UTCL2IU_BUSY__SHIFT 0x11
++#define CP_CPF_STATUS__CPF_GFX_BUSY__SHIFT 0x1a
++#define CP_CPF_STATUS__CPF_CMP_BUSY__SHIFT 0x1b
++#define CP_CPF_STATUS__GRBM_CPF_STAT_BUSY__SHIFT 0x1c
++#define CP_CPF_STATUS__CPC_CPF_BUSY__SHIFT 0x1e
++#define CP_CPF_STATUS__CPF_BUSY__SHIFT 0x1f
++#define CP_CPF_STATUS__POST_WPTR_GFX_BUSY_MASK 0x00000001L
++#define CP_CPF_STATUS__CSF_BUSY_MASK 0x00000002L
++#define CP_CPF_STATUS__ROQ_ALIGN_BUSY_MASK 0x00000010L
++#define CP_CPF_STATUS__ROQ_RING_BUSY_MASK 0x00000020L
++#define CP_CPF_STATUS__ROQ_INDIRECT1_BUSY_MASK 0x00000040L
++#define CP_CPF_STATUS__ROQ_INDIRECT2_BUSY_MASK 0x00000080L
++#define CP_CPF_STATUS__ROQ_STATE_BUSY_MASK 0x00000100L
++#define CP_CPF_STATUS__ROQ_CE_RING_BUSY_MASK 0x00000200L
++#define CP_CPF_STATUS__ROQ_CE_INDIRECT1_BUSY_MASK 0x00000400L
++#define CP_CPF_STATUS__ROQ_CE_INDIRECT2_BUSY_MASK 0x00000800L
++#define CP_CPF_STATUS__SEMAPHORE_BUSY_MASK 0x00001000L
++#define CP_CPF_STATUS__INTERRUPT_BUSY_MASK 0x00002000L
++#define CP_CPF_STATUS__TCIU_BUSY_MASK 0x00004000L
++#define CP_CPF_STATUS__HQD_BUSY_MASK 0x00008000L
++#define CP_CPF_STATUS__PRT_BUSY_MASK 0x00010000L
++#define CP_CPF_STATUS__UTCL2IU_BUSY_MASK 0x00020000L
++#define CP_CPF_STATUS__CPF_GFX_BUSY_MASK 0x04000000L
++#define CP_CPF_STATUS__CPF_CMP_BUSY_MASK 0x08000000L
++#define CP_CPF_STATUS__GRBM_CPF_STAT_BUSY_MASK 0x30000000L
++#define CP_CPF_STATUS__CPC_CPF_BUSY_MASK 0x40000000L
++#define CP_CPF_STATUS__CPF_BUSY_MASK 0x80000000L
++//CP_CPF_BUSY_STAT
++#define CP_CPF_BUSY_STAT__REG_BUS_FIFO_BUSY__SHIFT 0x0
++#define CP_CPF_BUSY_STAT__CSF_RING_BUSY__SHIFT 0x1
++#define CP_CPF_BUSY_STAT__CSF_INDIRECT1_BUSY__SHIFT 0x2
++#define CP_CPF_BUSY_STAT__CSF_INDIRECT2_BUSY__SHIFT 0x3
++#define CP_CPF_BUSY_STAT__CSF_STATE_BUSY__SHIFT 0x4
++#define CP_CPF_BUSY_STAT__CSF_CE_INDR1_BUSY__SHIFT 0x5
++#define CP_CPF_BUSY_STAT__CSF_CE_INDR2_BUSY__SHIFT 0x6
++#define CP_CPF_BUSY_STAT__CSF_ARBITER_BUSY__SHIFT 0x7
++#define CP_CPF_BUSY_STAT__CSF_INPUT_BUSY__SHIFT 0x8
++#define CP_CPF_BUSY_STAT__OUTSTANDING_READ_TAGS__SHIFT 0x9
++#define CP_CPF_BUSY_STAT__HPD_PROCESSING_EOP_BUSY__SHIFT 0xb
++#define CP_CPF_BUSY_STAT__HQD_DISPATCH_BUSY__SHIFT 0xc
++#define CP_CPF_BUSY_STAT__HQD_IQ_TIMER_BUSY__SHIFT 0xd
++#define CP_CPF_BUSY_STAT__HQD_DMA_OFFLOAD_BUSY__SHIFT 0xe
++#define CP_CPF_BUSY_STAT__HQD_WAIT_SEMAPHORE_BUSY__SHIFT 0xf
++#define CP_CPF_BUSY_STAT__HQD_SIGNAL_SEMAPHORE_BUSY__SHIFT 0x10
++#define CP_CPF_BUSY_STAT__HQD_MESSAGE_BUSY__SHIFT 0x11
++#define CP_CPF_BUSY_STAT__HQD_PQ_FETCHER_BUSY__SHIFT 0x12
++#define CP_CPF_BUSY_STAT__HQD_IB_FETCHER_BUSY__SHIFT 0x13
++#define CP_CPF_BUSY_STAT__HQD_IQ_FETCHER_BUSY__SHIFT 0x14
++#define CP_CPF_BUSY_STAT__HQD_EOP_FETCHER_BUSY__SHIFT 0x15
++#define CP_CPF_BUSY_STAT__HQD_CONSUMED_RPTR_BUSY__SHIFT 0x16
++#define CP_CPF_BUSY_STAT__HQD_FETCHER_ARB_BUSY__SHIFT 0x17
++#define CP_CPF_BUSY_STAT__HQD_ROQ_ALIGN_BUSY__SHIFT 0x18
++#define CP_CPF_BUSY_STAT__HQD_ROQ_EOP_BUSY__SHIFT 0x19
++#define CP_CPF_BUSY_STAT__HQD_ROQ_IQ_BUSY__SHIFT 0x1a
++#define CP_CPF_BUSY_STAT__HQD_ROQ_PQ_BUSY__SHIFT 0x1b
++#define CP_CPF_BUSY_STAT__HQD_ROQ_IB_BUSY__SHIFT 0x1c
++#define CP_CPF_BUSY_STAT__HQD_WPTR_POLL_BUSY__SHIFT 0x1d
++#define CP_CPF_BUSY_STAT__HQD_PQ_BUSY__SHIFT 0x1e
++#define CP_CPF_BUSY_STAT__HQD_IB_BUSY__SHIFT 0x1f
++#define CP_CPF_BUSY_STAT__REG_BUS_FIFO_BUSY_MASK 0x00000001L
++#define CP_CPF_BUSY_STAT__CSF_RING_BUSY_MASK 0x00000002L
++#define CP_CPF_BUSY_STAT__CSF_INDIRECT1_BUSY_MASK 0x00000004L
++#define CP_CPF_BUSY_STAT__CSF_INDIRECT2_BUSY_MASK 0x00000008L
++#define CP_CPF_BUSY_STAT__CSF_STATE_BUSY_MASK 0x00000010L
++#define CP_CPF_BUSY_STAT__CSF_CE_INDR1_BUSY_MASK 0x00000020L
++#define CP_CPF_BUSY_STAT__CSF_CE_INDR2_BUSY_MASK 0x00000040L
++#define CP_CPF_BUSY_STAT__CSF_ARBITER_BUSY_MASK 0x00000080L
++#define CP_CPF_BUSY_STAT__CSF_INPUT_BUSY_MASK 0x00000100L
++#define CP_CPF_BUSY_STAT__OUTSTANDING_READ_TAGS_MASK 0x00000200L
++#define CP_CPF_BUSY_STAT__HPD_PROCESSING_EOP_BUSY_MASK 0x00000800L
++#define CP_CPF_BUSY_STAT__HQD_DISPATCH_BUSY_MASK 0x00001000L
++#define CP_CPF_BUSY_STAT__HQD_IQ_TIMER_BUSY_MASK 0x00002000L
++#define CP_CPF_BUSY_STAT__HQD_DMA_OFFLOAD_BUSY_MASK 0x00004000L
++#define CP_CPF_BUSY_STAT__HQD_WAIT_SEMAPHORE_BUSY_MASK 0x00008000L
++#define CP_CPF_BUSY_STAT__HQD_SIGNAL_SEMAPHORE_BUSY_MASK 0x00010000L
++#define CP_CPF_BUSY_STAT__HQD_MESSAGE_BUSY_MASK 0x00020000L
++#define CP_CPF_BUSY_STAT__HQD_PQ_FETCHER_BUSY_MASK 0x00040000L
++#define CP_CPF_BUSY_STAT__HQD_IB_FETCHER_BUSY_MASK 0x00080000L
++#define CP_CPF_BUSY_STAT__HQD_IQ_FETCHER_BUSY_MASK 0x00100000L
++#define CP_CPF_BUSY_STAT__HQD_EOP_FETCHER_BUSY_MASK 0x00200000L
++#define CP_CPF_BUSY_STAT__HQD_CONSUMED_RPTR_BUSY_MASK 0x00400000L
++#define CP_CPF_BUSY_STAT__HQD_FETCHER_ARB_BUSY_MASK 0x00800000L
++#define CP_CPF_BUSY_STAT__HQD_ROQ_ALIGN_BUSY_MASK 0x01000000L
++#define CP_CPF_BUSY_STAT__HQD_ROQ_EOP_BUSY_MASK 0x02000000L
++#define CP_CPF_BUSY_STAT__HQD_ROQ_IQ_BUSY_MASK 0x04000000L
++#define CP_CPF_BUSY_STAT__HQD_ROQ_PQ_BUSY_MASK 0x08000000L
++#define CP_CPF_BUSY_STAT__HQD_ROQ_IB_BUSY_MASK 0x10000000L
++#define CP_CPF_BUSY_STAT__HQD_WPTR_POLL_BUSY_MASK 0x20000000L
++#define CP_CPF_BUSY_STAT__HQD_PQ_BUSY_MASK 0x40000000L
++#define CP_CPF_BUSY_STAT__HQD_IB_BUSY_MASK 0x80000000L
++//CP_CPF_STALLED_STAT1
++#define CP_CPF_STALLED_STAT1__RING_FETCHING_DATA__SHIFT 0x0
++#define CP_CPF_STALLED_STAT1__INDR1_FETCHING_DATA__SHIFT 0x1
++#define CP_CPF_STALLED_STAT1__INDR2_FETCHING_DATA__SHIFT 0x2
++#define CP_CPF_STALLED_STAT1__STATE_FETCHING_DATA__SHIFT 0x3
++#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_FREE__SHIFT 0x5
++#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_TAGS__SHIFT 0x6
++#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE__SHIFT 0x7
++#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS__SHIFT 0x8
++#define CP_CPF_STALLED_STAT1__GFX_UTCL1_WAITING_ON_TRANS__SHIFT 0x9
++#define CP_CPF_STALLED_STAT1__CMP_UTCL1_WAITING_ON_TRANS__SHIFT 0xa
++#define CP_CPF_STALLED_STAT1__RCIU_WAITING_ON_FREE__SHIFT 0xb
++#define CP_CPF_STALLED_STAT1__RING_FETCHING_DATA_MASK 0x00000001L
++#define CP_CPF_STALLED_STAT1__INDR1_FETCHING_DATA_MASK 0x00000002L
++#define CP_CPF_STALLED_STAT1__INDR2_FETCHING_DATA_MASK 0x00000004L
++#define CP_CPF_STALLED_STAT1__STATE_FETCHING_DATA_MASK 0x00000008L
++#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_FREE_MASK 0x00000020L
++#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_TAGS_MASK 0x00000040L
++#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE_MASK 0x00000080L
++#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS_MASK 0x00000100L
++#define CP_CPF_STALLED_STAT1__GFX_UTCL1_WAITING_ON_TRANS_MASK 0x00000200L
++#define CP_CPF_STALLED_STAT1__CMP_UTCL1_WAITING_ON_TRANS_MASK 0x00000400L
++#define CP_CPF_STALLED_STAT1__RCIU_WAITING_ON_FREE_MASK 0x00000800L
++//CP_CPC_GRBM_FREE_COUNT
++#define CP_CPC_GRBM_FREE_COUNT__FREE_COUNT__SHIFT 0x0
++#define CP_CPC_GRBM_FREE_COUNT__FREE_COUNT_MASK 0x0000003FL
++//CP_MEC_CNTL
++#define CP_MEC_CNTL__MEC_INVALIDATE_ICACHE__SHIFT 0x4
++#define CP_MEC_CNTL__MEC_ME1_PIPE0_RESET__SHIFT 0x10
++#define CP_MEC_CNTL__MEC_ME1_PIPE1_RESET__SHIFT 0x11
++#define CP_MEC_CNTL__MEC_ME1_PIPE2_RESET__SHIFT 0x12
++#define CP_MEC_CNTL__MEC_ME1_PIPE3_RESET__SHIFT 0x13
++#define CP_MEC_CNTL__MEC_ME2_PIPE0_RESET__SHIFT 0x14
++#define CP_MEC_CNTL__MEC_ME2_PIPE1_RESET__SHIFT 0x15
++#define CP_MEC_CNTL__MEC_ME2_HALT__SHIFT 0x1c
++#define CP_MEC_CNTL__MEC_ME2_STEP__SHIFT 0x1d
++#define CP_MEC_CNTL__MEC_ME1_HALT__SHIFT 0x1e
++#define CP_MEC_CNTL__MEC_ME1_STEP__SHIFT 0x1f
++#define CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK 0x00000010L
++#define CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK 0x00010000L
++#define CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK 0x00020000L
++#define CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK 0x00040000L
++#define CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK 0x00080000L
++#define CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK 0x00100000L
++#define CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK 0x00200000L
++#define CP_MEC_CNTL__MEC_ME2_HALT_MASK 0x10000000L
++#define CP_MEC_CNTL__MEC_ME2_STEP_MASK 0x20000000L
++#define CP_MEC_CNTL__MEC_ME1_HALT_MASK 0x40000000L
++#define CP_MEC_CNTL__MEC_ME1_STEP_MASK 0x80000000L
++//CP_MEC_ME1_HEADER_DUMP
++#define CP_MEC_ME1_HEADER_DUMP__HEADER_DUMP__SHIFT 0x0
++#define CP_MEC_ME1_HEADER_DUMP__HEADER_DUMP_MASK 0xFFFFFFFFL
++//CP_MEC_ME2_HEADER_DUMP
++#define CP_MEC_ME2_HEADER_DUMP__HEADER_DUMP__SHIFT 0x0
++#define CP_MEC_ME2_HEADER_DUMP__HEADER_DUMP_MASK 0xFFFFFFFFL
++//CP_CPC_SCRATCH_INDEX
++#define CP_CPC_SCRATCH_INDEX__SCRATCH_INDEX__SHIFT 0x0
++#define CP_CPC_SCRATCH_INDEX__SCRATCH_INDEX_MASK 0x000001FFL
++//CP_CPC_SCRATCH_DATA
++#define CP_CPC_SCRATCH_DATA__SCRATCH_DATA__SHIFT 0x0
++#define CP_CPC_SCRATCH_DATA__SCRATCH_DATA_MASK 0xFFFFFFFFL
++//CP_CPF_GRBM_FREE_COUNT
++#define CP_CPF_GRBM_FREE_COUNT__FREE_COUNT__SHIFT 0x0
++#define CP_CPF_GRBM_FREE_COUNT__FREE_COUNT_MASK 0x00000007L
++//CP_CPC_HALT_HYST_COUNT
++#define CP_CPC_HALT_HYST_COUNT__COUNT__SHIFT 0x0
++#define CP_CPC_HALT_HYST_COUNT__COUNT_MASK 0x0000000FL
++//CP_PRT_LOD_STATS_CNTL0
++#define CP_PRT_LOD_STATS_CNTL0__BU_SIZE__SHIFT 0x0
++#define CP_PRT_LOD_STATS_CNTL0__BU_SIZE_MASK 0xFFFFFFFFL
++//CP_PRT_LOD_STATS_CNTL1
++#define CP_PRT_LOD_STATS_CNTL1__BASE_LO__SHIFT 0x0
++#define CP_PRT_LOD_STATS_CNTL1__BASE_LO_MASK 0xFFFFFFFFL
++//CP_PRT_LOD_STATS_CNTL2
++#define CP_PRT_LOD_STATS_CNTL2__BASE_HI__SHIFT 0x0
++#define CP_PRT_LOD_STATS_CNTL2__BASE_HI_MASK 0x000003FFL
++//CP_PRT_LOD_STATS_CNTL3
++#define CP_PRT_LOD_STATS_CNTL3__INTERVAL__SHIFT 0x2
++#define CP_PRT_LOD_STATS_CNTL3__RESET_CNT__SHIFT 0xa
++#define CP_PRT_LOD_STATS_CNTL3__RESET_FORCE__SHIFT 0x12
++#define CP_PRT_LOD_STATS_CNTL3__REPORT_AND_RESET__SHIFT 0x13
++#define CP_PRT_LOD_STATS_CNTL3__MC_VMID__SHIFT 0x17
++#define CP_PRT_LOD_STATS_CNTL3__CACHE_POLICY__SHIFT 0x1c
++#define CP_PRT_LOD_STATS_CNTL3__INTERVAL_MASK 0x000003FCL
++#define CP_PRT_LOD_STATS_CNTL3__RESET_CNT_MASK 0x0003FC00L
++#define CP_PRT_LOD_STATS_CNTL3__RESET_FORCE_MASK 0x00040000L
++#define CP_PRT_LOD_STATS_CNTL3__REPORT_AND_RESET_MASK 0x00080000L
++#define CP_PRT_LOD_STATS_CNTL3__MC_VMID_MASK 0x07800000L
++#define CP_PRT_LOD_STATS_CNTL3__CACHE_POLICY_MASK 0x10000000L
++//CP_CE_COMPARE_COUNT
++#define CP_CE_COMPARE_COUNT__COMPARE_COUNT__SHIFT 0x0
++#define CP_CE_COMPARE_COUNT__COMPARE_COUNT_MASK 0xFFFFFFFFL
++//CP_CE_DE_COUNT
++#define CP_CE_DE_COUNT__DRAW_ENGINE_COUNT__SHIFT 0x0
++#define CP_CE_DE_COUNT__DRAW_ENGINE_COUNT_MASK 0xFFFFFFFFL
++//CP_DE_CE_COUNT
++#define CP_DE_CE_COUNT__CONST_ENGINE_COUNT__SHIFT 0x0
++#define CP_DE_CE_COUNT__CONST_ENGINE_COUNT_MASK 0xFFFFFFFFL
++//CP_DE_LAST_INVAL_COUNT
++#define CP_DE_LAST_INVAL_COUNT__LAST_INVAL_COUNT__SHIFT 0x0
++#define CP_DE_LAST_INVAL_COUNT__LAST_INVAL_COUNT_MASK 0xFFFFFFFFL
++//CP_DE_DE_COUNT
++#define CP_DE_DE_COUNT__DRAW_ENGINE_COUNT__SHIFT 0x0
++#define CP_DE_DE_COUNT__DRAW_ENGINE_COUNT_MASK 0xFFFFFFFFL
++//CP_STALLED_STAT3
++#define CP_STALLED_STAT3__CE_TO_CSF_NOT_RDY_TO_RCV__SHIFT 0x0
++#define CP_STALLED_STAT3__CE_TO_RAM_INIT_FETCHER_NOT_RDY_TO_RCV__SHIFT 0x1
++#define CP_STALLED_STAT3__CE_WAITING_ON_DATA_FROM_RAM_INIT_FETCHER__SHIFT 0x2
++#define CP_STALLED_STAT3__CE_TO_RAM_INIT_NOT_RDY__SHIFT 0x3
++#define CP_STALLED_STAT3__CE_TO_RAM_DUMP_NOT_RDY__SHIFT 0x4
++#define CP_STALLED_STAT3__CE_TO_RAM_WRITE_NOT_RDY__SHIFT 0x5
++#define CP_STALLED_STAT3__CE_TO_INC_FIFO_NOT_RDY_TO_RCV__SHIFT 0x6
++#define CP_STALLED_STAT3__CE_TO_WR_FIFO_NOT_RDY_TO_RCV__SHIFT 0x7
++#define CP_STALLED_STAT3__CE_WAITING_ON_BUFFER_DATA__SHIFT 0xa
++#define CP_STALLED_STAT3__CE_WAITING_ON_CE_BUFFER_FLAG__SHIFT 0xb
++#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER__SHIFT 0xc
++#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_UNDERFLOW__SHIFT 0xd
++#define CP_STALLED_STAT3__TCIU_WAITING_ON_FREE__SHIFT 0xe
++#define CP_STALLED_STAT3__TCIU_WAITING_ON_TAGS__SHIFT 0xf
++#define CP_STALLED_STAT3__CE_STALLED_ON_TC_WR_CONFIRM__SHIFT 0x10
++#define CP_STALLED_STAT3__CE_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0x11
++#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_FREE__SHIFT 0x12
++#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_TAGS__SHIFT 0x13
++#define CP_STALLED_STAT3__UTCL1_WAITING_ON_TRANS__SHIFT 0x14
++#define CP_STALLED_STAT3__CE_TO_CSF_NOT_RDY_TO_RCV_MASK 0x00000001L
++#define CP_STALLED_STAT3__CE_TO_RAM_INIT_FETCHER_NOT_RDY_TO_RCV_MASK 0x00000002L
++#define CP_STALLED_STAT3__CE_WAITING_ON_DATA_FROM_RAM_INIT_FETCHER_MASK 0x00000004L
++#define CP_STALLED_STAT3__CE_TO_RAM_INIT_NOT_RDY_MASK 0x00000008L
++#define CP_STALLED_STAT3__CE_TO_RAM_DUMP_NOT_RDY_MASK 0x00000010L
++#define CP_STALLED_STAT3__CE_TO_RAM_WRITE_NOT_RDY_MASK 0x00000020L
++#define CP_STALLED_STAT3__CE_TO_INC_FIFO_NOT_RDY_TO_RCV_MASK 0x00000040L
++#define CP_STALLED_STAT3__CE_TO_WR_FIFO_NOT_RDY_TO_RCV_MASK 0x00000080L
++#define CP_STALLED_STAT3__CE_WAITING_ON_BUFFER_DATA_MASK 0x00000400L
++#define CP_STALLED_STAT3__CE_WAITING_ON_CE_BUFFER_FLAG_MASK 0x00000800L
++#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_MASK 0x00001000L
++#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_UNDERFLOW_MASK 0x00002000L
++#define CP_STALLED_STAT3__TCIU_WAITING_ON_FREE_MASK 0x00004000L
++#define CP_STALLED_STAT3__TCIU_WAITING_ON_TAGS_MASK 0x00008000L
++#define CP_STALLED_STAT3__CE_STALLED_ON_TC_WR_CONFIRM_MASK 0x00010000L
++#define CP_STALLED_STAT3__CE_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x00020000L
++#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_FREE_MASK 0x00040000L
++#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_TAGS_MASK 0x00080000L
++#define CP_STALLED_STAT3__UTCL1_WAITING_ON_TRANS_MASK 0x00100000L
++//CP_STALLED_STAT1
++#define CP_STALLED_STAT1__RBIU_TO_DMA_NOT_RDY_TO_RCV__SHIFT 0x0
++#define CP_STALLED_STAT1__RBIU_TO_SEM_NOT_RDY_TO_RCV__SHIFT 0x2
++#define CP_STALLED_STAT1__RBIU_TO_MEMWR_NOT_RDY_TO_RCV__SHIFT 0x4
++#define CP_STALLED_STAT1__ME_HAS_ACTIVE_CE_BUFFER_FLAG__SHIFT 0xa
++#define CP_STALLED_STAT1__ME_HAS_ACTIVE_DE_BUFFER_FLAG__SHIFT 0xb
++#define CP_STALLED_STAT1__ME_STALLED_ON_TC_WR_CONFIRM__SHIFT 0xc
++#define CP_STALLED_STAT1__ME_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0xd
++#define CP_STALLED_STAT1__ME_WAITING_ON_TC_READ_DATA__SHIFT 0xe
++#define CP_STALLED_STAT1__ME_WAITING_ON_REG_READ_DATA__SHIFT 0xf
++#define CP_STALLED_STAT1__RCIU_WAITING_ON_GDS_FREE__SHIFT 0x17
++#define CP_STALLED_STAT1__RCIU_WAITING_ON_GRBM_FREE__SHIFT 0x18
++#define CP_STALLED_STAT1__RCIU_WAITING_ON_VGT_FREE__SHIFT 0x19
++#define CP_STALLED_STAT1__RCIU_STALLED_ON_ME_READ__SHIFT 0x1a
++#define CP_STALLED_STAT1__RCIU_STALLED_ON_DMA_READ__SHIFT 0x1b
++#define CP_STALLED_STAT1__RCIU_STALLED_ON_APPEND_READ__SHIFT 0x1c
++#define CP_STALLED_STAT1__RCIU_HALTED_BY_REG_VIOLATION__SHIFT 0x1d
++#define CP_STALLED_STAT1__RBIU_TO_DMA_NOT_RDY_TO_RCV_MASK 0x00000001L
++#define CP_STALLED_STAT1__RBIU_TO_SEM_NOT_RDY_TO_RCV_MASK 0x00000004L
++#define CP_STALLED_STAT1__RBIU_TO_MEMWR_NOT_RDY_TO_RCV_MASK 0x00000010L
++#define CP_STALLED_STAT1__ME_HAS_ACTIVE_CE_BUFFER_FLAG_MASK 0x00000400L
++#define CP_STALLED_STAT1__ME_HAS_ACTIVE_DE_BUFFER_FLAG_MASK 0x00000800L
++#define CP_STALLED_STAT1__ME_STALLED_ON_TC_WR_CONFIRM_MASK 0x00001000L
++#define CP_STALLED_STAT1__ME_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x00002000L
++#define CP_STALLED_STAT1__ME_WAITING_ON_TC_READ_DATA_MASK 0x00004000L
++#define CP_STALLED_STAT1__ME_WAITING_ON_REG_READ_DATA_MASK 0x00008000L
++#define CP_STALLED_STAT1__RCIU_WAITING_ON_GDS_FREE_MASK 0x00800000L
++#define CP_STALLED_STAT1__RCIU_WAITING_ON_GRBM_FREE_MASK 0x01000000L
++#define CP_STALLED_STAT1__RCIU_WAITING_ON_VGT_FREE_MASK 0x02000000L
++#define CP_STALLED_STAT1__RCIU_STALLED_ON_ME_READ_MASK 0x04000000L
++#define CP_STALLED_STAT1__RCIU_STALLED_ON_DMA_READ_MASK 0x08000000L
++#define CP_STALLED_STAT1__RCIU_STALLED_ON_APPEND_READ_MASK 0x10000000L
++#define CP_STALLED_STAT1__RCIU_HALTED_BY_REG_VIOLATION_MASK 0x20000000L
++//CP_STALLED_STAT2
++#define CP_STALLED_STAT2__PFP_TO_CSF_NOT_RDY_TO_RCV__SHIFT 0x0
++#define CP_STALLED_STAT2__PFP_TO_MEQ_NOT_RDY_TO_RCV__SHIFT 0x1
++#define CP_STALLED_STAT2__PFP_TO_RCIU_NOT_RDY_TO_RCV__SHIFT 0x2
++#define CP_STALLED_STAT2__PFP_TO_VGT_WRITES_PENDING__SHIFT 0x4
++#define CP_STALLED_STAT2__PFP_RCIU_READ_PENDING__SHIFT 0x5
++#define CP_STALLED_STAT2__PFP_WAITING_ON_BUFFER_DATA__SHIFT 0x8
++#define CP_STALLED_STAT2__ME_WAIT_ON_CE_COUNTER__SHIFT 0x9
++#define CP_STALLED_STAT2__ME_WAIT_ON_AVAIL_BUFFER__SHIFT 0xa
++#define CP_STALLED_STAT2__GFX_CNTX_NOT_AVAIL_TO_ME__SHIFT 0xb
++#define CP_STALLED_STAT2__ME_RCIU_NOT_RDY_TO_RCV__SHIFT 0xc
++#define CP_STALLED_STAT2__ME_TO_CONST_NOT_RDY_TO_RCV__SHIFT 0xd
++#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_PFP__SHIFT 0xe
++#define CP_STALLED_STAT2__ME_WAITING_ON_PARTIAL_FLUSH__SHIFT 0xf
++#define CP_STALLED_STAT2__MEQ_TO_ME_NOT_RDY_TO_RCV__SHIFT 0x10
++#define CP_STALLED_STAT2__STQ_TO_ME_NOT_RDY_TO_RCV__SHIFT 0x11
++#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_STQ__SHIFT 0x12
++#define CP_STALLED_STAT2__PFP_STALLED_ON_TC_WR_CONFIRM__SHIFT 0x13
++#define CP_STALLED_STAT2__PFP_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0x14
++#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_SC_EOP_DONE__SHIFT 0x15
++#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_WR_CONFIRM__SHIFT 0x16
++#define CP_STALLED_STAT2__STRMO_WR_OF_PRIM_DATA_PENDING__SHIFT 0x17
++#define CP_STALLED_STAT2__PIPE_STATS_WR_DATA_PENDING__SHIFT 0x18
++#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_CS_DONE__SHIFT 0x19
++#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_PS_DONE__SHIFT 0x1a
++#define CP_STALLED_STAT2__APPEND_WAIT_ON_WR_CONFIRM__SHIFT 0x1b
++#define CP_STALLED_STAT2__APPEND_ACTIVE_PARTITION__SHIFT 0x1c
++#define CP_STALLED_STAT2__APPEND_WAITING_TO_SEND_MEMWRITE__SHIFT 0x1d
++#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_IDLE_CNTXS__SHIFT 0x1e
++#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_ALL_CLEAN__SHIFT 0x1f
++#define CP_STALLED_STAT2__PFP_TO_CSF_NOT_RDY_TO_RCV_MASK 0x00000001L
++#define CP_STALLED_STAT2__PFP_TO_MEQ_NOT_RDY_TO_RCV_MASK 0x00000002L
++#define CP_STALLED_STAT2__PFP_TO_RCIU_NOT_RDY_TO_RCV_MASK 0x00000004L
++#define CP_STALLED_STAT2__PFP_TO_VGT_WRITES_PENDING_MASK 0x00000010L
++#define CP_STALLED_STAT2__PFP_RCIU_READ_PENDING_MASK 0x00000020L
++#define CP_STALLED_STAT2__PFP_WAITING_ON_BUFFER_DATA_MASK 0x00000100L
++#define CP_STALLED_STAT2__ME_WAIT_ON_CE_COUNTER_MASK 0x00000200L
++#define CP_STALLED_STAT2__ME_WAIT_ON_AVAIL_BUFFER_MASK 0x00000400L
++#define CP_STALLED_STAT2__GFX_CNTX_NOT_AVAIL_TO_ME_MASK 0x00000800L
++#define CP_STALLED_STAT2__ME_RCIU_NOT_RDY_TO_RCV_MASK 0x00001000L
++#define CP_STALLED_STAT2__ME_TO_CONST_NOT_RDY_TO_RCV_MASK 0x00002000L
++#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_PFP_MASK 0x00004000L
++#define CP_STALLED_STAT2__ME_WAITING_ON_PARTIAL_FLUSH_MASK 0x00008000L
++#define CP_STALLED_STAT2__MEQ_TO_ME_NOT_RDY_TO_RCV_MASK 0x00010000L
++#define CP_STALLED_STAT2__STQ_TO_ME_NOT_RDY_TO_RCV_MASK 0x00020000L
++#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_STQ_MASK 0x00040000L
++#define CP_STALLED_STAT2__PFP_STALLED_ON_TC_WR_CONFIRM_MASK 0x00080000L
++#define CP_STALLED_STAT2__PFP_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x00100000L
++#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_SC_EOP_DONE_MASK 0x00200000L
++#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_WR_CONFIRM_MASK 0x00400000L
++#define CP_STALLED_STAT2__STRMO_WR_OF_PRIM_DATA_PENDING_MASK 0x00800000L
++#define CP_STALLED_STAT2__PIPE_STATS_WR_DATA_PENDING_MASK 0x01000000L
++#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_CS_DONE_MASK 0x02000000L
++#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_PS_DONE_MASK 0x04000000L
++#define CP_STALLED_STAT2__APPEND_WAIT_ON_WR_CONFIRM_MASK 0x08000000L
++#define CP_STALLED_STAT2__APPEND_ACTIVE_PARTITION_MASK 0x10000000L
++#define CP_STALLED_STAT2__APPEND_WAITING_TO_SEND_MEMWRITE_MASK 0x20000000L
++#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_IDLE_CNTXS_MASK 0x40000000L
++#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_ALL_CLEAN_MASK 0x80000000L
++//CP_BUSY_STAT
++#define CP_BUSY_STAT__REG_BUS_FIFO_BUSY__SHIFT 0x0
++#define CP_BUSY_STAT__COHER_CNT_NEQ_ZERO__SHIFT 0x6
++#define CP_BUSY_STAT__PFP_PARSING_PACKETS__SHIFT 0x7
++#define CP_BUSY_STAT__ME_PARSING_PACKETS__SHIFT 0x8
++#define CP_BUSY_STAT__RCIU_PFP_BUSY__SHIFT 0x9
++#define CP_BUSY_STAT__RCIU_ME_BUSY__SHIFT 0xa
++#define CP_BUSY_STAT__SEM_CMDFIFO_NOT_EMPTY__SHIFT 0xc
++#define CP_BUSY_STAT__SEM_FAILED_AND_HOLDING__SHIFT 0xd
++#define CP_BUSY_STAT__SEM_POLLING_FOR_PASS__SHIFT 0xe
++#define CP_BUSY_STAT__GFX_CONTEXT_BUSY__SHIFT 0xf
++#define CP_BUSY_STAT__ME_PARSER_BUSY__SHIFT 0x11
++#define CP_BUSY_STAT__EOP_DONE_BUSY__SHIFT 0x12
++#define CP_BUSY_STAT__STRM_OUT_BUSY__SHIFT 0x13
++#define CP_BUSY_STAT__PIPE_STATS_BUSY__SHIFT 0x14
++#define CP_BUSY_STAT__RCIU_CE_BUSY__SHIFT 0x15
++#define CP_BUSY_STAT__CE_PARSING_PACKETS__SHIFT 0x16
++#define CP_BUSY_STAT__REG_BUS_FIFO_BUSY_MASK 0x00000001L
++#define CP_BUSY_STAT__COHER_CNT_NEQ_ZERO_MASK 0x00000040L
++#define CP_BUSY_STAT__PFP_PARSING_PACKETS_MASK 0x00000080L
++#define CP_BUSY_STAT__ME_PARSING_PACKETS_MASK 0x00000100L
++#define CP_BUSY_STAT__RCIU_PFP_BUSY_MASK 0x00000200L
++#define CP_BUSY_STAT__RCIU_ME_BUSY_MASK 0x00000400L
++#define CP_BUSY_STAT__SEM_CMDFIFO_NOT_EMPTY_MASK 0x00001000L
++#define CP_BUSY_STAT__SEM_FAILED_AND_HOLDING_MASK 0x00002000L
++#define CP_BUSY_STAT__SEM_POLLING_FOR_PASS_MASK 0x00004000L
++#define CP_BUSY_STAT__GFX_CONTEXT_BUSY_MASK 0x00008000L
++#define CP_BUSY_STAT__ME_PARSER_BUSY_MASK 0x00020000L
++#define CP_BUSY_STAT__EOP_DONE_BUSY_MASK 0x00040000L
++#define CP_BUSY_STAT__STRM_OUT_BUSY_MASK 0x00080000L
++#define CP_BUSY_STAT__PIPE_STATS_BUSY_MASK 0x00100000L
++#define CP_BUSY_STAT__RCIU_CE_BUSY_MASK 0x00200000L
++#define CP_BUSY_STAT__CE_PARSING_PACKETS_MASK 0x00400000L
++//CP_STAT
++#define CP_STAT__ROQ_RING_BUSY__SHIFT 0x9
++#define CP_STAT__ROQ_INDIRECT1_BUSY__SHIFT 0xa
++#define CP_STAT__ROQ_INDIRECT2_BUSY__SHIFT 0xb
++#define CP_STAT__ROQ_STATE_BUSY__SHIFT 0xc
++#define CP_STAT__DC_BUSY__SHIFT 0xd
++#define CP_STAT__UTCL2IU_BUSY__SHIFT 0xe
++#define CP_STAT__PFP_BUSY__SHIFT 0xf
++#define CP_STAT__MEQ_BUSY__SHIFT 0x10
++#define CP_STAT__ME_BUSY__SHIFT 0x11
++#define CP_STAT__QUERY_BUSY__SHIFT 0x12
++#define CP_STAT__SEMAPHORE_BUSY__SHIFT 0x13
++#define CP_STAT__INTERRUPT_BUSY__SHIFT 0x14
++#define CP_STAT__SURFACE_SYNC_BUSY__SHIFT 0x15
++#define CP_STAT__DMA_BUSY__SHIFT 0x16
++#define CP_STAT__RCIU_BUSY__SHIFT 0x17
++#define CP_STAT__SCRATCH_RAM_BUSY__SHIFT 0x18
++#define CP_STAT__CE_BUSY__SHIFT 0x1a
++#define CP_STAT__TCIU_BUSY__SHIFT 0x1b
++#define CP_STAT__ROQ_CE_RING_BUSY__SHIFT 0x1c
++#define CP_STAT__ROQ_CE_INDIRECT1_BUSY__SHIFT 0x1d
++#define CP_STAT__ROQ_CE_INDIRECT2_BUSY__SHIFT 0x1e
++#define CP_STAT__CP_BUSY__SHIFT 0x1f
++#define CP_STAT__ROQ_RING_BUSY_MASK 0x00000200L
++#define CP_STAT__ROQ_INDIRECT1_BUSY_MASK 0x00000400L
++#define CP_STAT__ROQ_INDIRECT2_BUSY_MASK 0x00000800L
++#define CP_STAT__ROQ_STATE_BUSY_MASK 0x00001000L
++#define CP_STAT__DC_BUSY_MASK 0x00002000L
++#define CP_STAT__UTCL2IU_BUSY_MASK 0x00004000L
++#define CP_STAT__PFP_BUSY_MASK 0x00008000L
++#define CP_STAT__MEQ_BUSY_MASK 0x00010000L
++#define CP_STAT__ME_BUSY_MASK 0x00020000L
++#define CP_STAT__QUERY_BUSY_MASK 0x00040000L
++#define CP_STAT__SEMAPHORE_BUSY_MASK 0x00080000L
++#define CP_STAT__INTERRUPT_BUSY_MASK 0x00100000L
++#define CP_STAT__SURFACE_SYNC_BUSY_MASK 0x00200000L
++#define CP_STAT__DMA_BUSY_MASK 0x00400000L
++#define CP_STAT__RCIU_BUSY_MASK 0x00800000L
++#define CP_STAT__SCRATCH_RAM_BUSY_MASK 0x01000000L
++#define CP_STAT__CE_BUSY_MASK 0x04000000L
++#define CP_STAT__TCIU_BUSY_MASK 0x08000000L
++#define CP_STAT__ROQ_CE_RING_BUSY_MASK 0x10000000L
++#define CP_STAT__ROQ_CE_INDIRECT1_BUSY_MASK 0x20000000L
++#define CP_STAT__ROQ_CE_INDIRECT2_BUSY_MASK 0x40000000L
++#define CP_STAT__CP_BUSY_MASK 0x80000000L
++//CP_ME_HEADER_DUMP
++#define CP_ME_HEADER_DUMP__ME_HEADER_DUMP__SHIFT 0x0
++#define CP_ME_HEADER_DUMP__ME_HEADER_DUMP_MASK 0xFFFFFFFFL
++//CP_PFP_HEADER_DUMP
++#define CP_PFP_HEADER_DUMP__PFP_HEADER_DUMP__SHIFT 0x0
++#define CP_PFP_HEADER_DUMP__PFP_HEADER_DUMP_MASK 0xFFFFFFFFL
++//CP_GRBM_FREE_COUNT
++#define CP_GRBM_FREE_COUNT__FREE_COUNT__SHIFT 0x0
++#define CP_GRBM_FREE_COUNT__FREE_COUNT_GDS__SHIFT 0x8
++#define CP_GRBM_FREE_COUNT__FREE_COUNT_PFP__SHIFT 0x10
++#define CP_GRBM_FREE_COUNT__FREE_COUNT_MASK 0x0000003FL
++#define CP_GRBM_FREE_COUNT__FREE_COUNT_GDS_MASK 0x00003F00L
++#define CP_GRBM_FREE_COUNT__FREE_COUNT_PFP_MASK 0x003F0000L
++//CP_CE_HEADER_DUMP
++#define CP_CE_HEADER_DUMP__CE_HEADER_DUMP__SHIFT 0x0
++#define CP_CE_HEADER_DUMP__CE_HEADER_DUMP_MASK 0xFFFFFFFFL
++//CP_PFP_INSTR_PNTR
++#define CP_PFP_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
++#define CP_PFP_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
++//CP_ME_INSTR_PNTR
++#define CP_ME_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
++#define CP_ME_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
++//CP_CE_INSTR_PNTR
++#define CP_CE_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
++#define CP_CE_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
++//CP_MEC1_INSTR_PNTR
++#define CP_MEC1_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
++#define CP_MEC1_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
++//CP_MEC2_INSTR_PNTR
++#define CP_MEC2_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
++#define CP_MEC2_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
++//CP_CSF_STAT
++#define CP_CSF_STAT__BUFFER_REQUEST_COUNT__SHIFT 0x8
++#define CP_CSF_STAT__BUFFER_REQUEST_COUNT_MASK 0x0001FF00L
++//CP_ME_CNTL
++#define CP_ME_CNTL__CE_INVALIDATE_ICACHE__SHIFT 0x4
++#define CP_ME_CNTL__PFP_INVALIDATE_ICACHE__SHIFT 0x6
++#define CP_ME_CNTL__ME_INVALIDATE_ICACHE__SHIFT 0x8
++#define CP_ME_CNTL__CE_PIPE0_RESET__SHIFT 0x10
++#define CP_ME_CNTL__CE_PIPE1_RESET__SHIFT 0x11
++#define CP_ME_CNTL__PFP_PIPE0_RESET__SHIFT 0x12
++#define CP_ME_CNTL__PFP_PIPE1_RESET__SHIFT 0x13
++#define CP_ME_CNTL__ME_PIPE0_RESET__SHIFT 0x14
++#define CP_ME_CNTL__ME_PIPE1_RESET__SHIFT 0x15
++#define CP_ME_CNTL__CE_HALT__SHIFT 0x18
++#define CP_ME_CNTL__CE_STEP__SHIFT 0x19
++#define CP_ME_CNTL__PFP_HALT__SHIFT 0x1a
++#define CP_ME_CNTL__PFP_STEP__SHIFT 0x1b
++#define CP_ME_CNTL__ME_HALT__SHIFT 0x1c
++#define CP_ME_CNTL__ME_STEP__SHIFT 0x1d
++#define CP_ME_CNTL__CE_INVALIDATE_ICACHE_MASK 0x00000010L
++#define CP_ME_CNTL__PFP_INVALIDATE_ICACHE_MASK 0x00000040L
++#define CP_ME_CNTL__ME_INVALIDATE_ICACHE_MASK 0x00000100L
++#define CP_ME_CNTL__CE_PIPE0_RESET_MASK 0x00010000L
++#define CP_ME_CNTL__CE_PIPE1_RESET_MASK 0x00020000L
++#define CP_ME_CNTL__PFP_PIPE0_RESET_MASK 0x00040000L
++#define CP_ME_CNTL__PFP_PIPE1_RESET_MASK 0x00080000L
++#define CP_ME_CNTL__ME_PIPE0_RESET_MASK 0x00100000L
++#define CP_ME_CNTL__ME_PIPE1_RESET_MASK 0x00200000L
++#define CP_ME_CNTL__CE_HALT_MASK 0x01000000L
++#define CP_ME_CNTL__CE_STEP_MASK 0x02000000L
++#define CP_ME_CNTL__PFP_HALT_MASK 0x04000000L
++#define CP_ME_CNTL__PFP_STEP_MASK 0x08000000L
++#define CP_ME_CNTL__ME_HALT_MASK 0x10000000L
++#define CP_ME_CNTL__ME_STEP_MASK 0x20000000L
++//CP_CNTX_STAT
++#define CP_CNTX_STAT__ACTIVE_HP3D_CONTEXTS__SHIFT 0x0
++#define CP_CNTX_STAT__CURRENT_HP3D_CONTEXT__SHIFT 0x8
++#define CP_CNTX_STAT__ACTIVE_GFX_CONTEXTS__SHIFT 0x14
++#define CP_CNTX_STAT__CURRENT_GFX_CONTEXT__SHIFT 0x1c
++#define CP_CNTX_STAT__ACTIVE_HP3D_CONTEXTS_MASK 0x000000FFL
++#define CP_CNTX_STAT__CURRENT_HP3D_CONTEXT_MASK 0x00000700L
++#define CP_CNTX_STAT__ACTIVE_GFX_CONTEXTS_MASK 0x0FF00000L
++#define CP_CNTX_STAT__CURRENT_GFX_CONTEXT_MASK 0x70000000L
++//CP_ME_PREEMPTION
++#define CP_ME_PREEMPTION__OBSOLETE__SHIFT 0x0
++#define CP_ME_PREEMPTION__OBSOLETE_MASK 0x00000001L
++//CP_ROQ_THRESHOLDS
++#define CP_ROQ_THRESHOLDS__IB1_START__SHIFT 0x0
++#define CP_ROQ_THRESHOLDS__IB2_START__SHIFT 0x8
++#define CP_ROQ_THRESHOLDS__IB1_START_MASK 0x000000FFL
++#define CP_ROQ_THRESHOLDS__IB2_START_MASK 0x0000FF00L
++//CP_MEQ_STQ_THRESHOLD
++#define CP_MEQ_STQ_THRESHOLD__STQ_START__SHIFT 0x0
++#define CP_MEQ_STQ_THRESHOLD__STQ_START_MASK 0x000000FFL
++//CP_RB2_RPTR
++#define CP_RB2_RPTR__RB_RPTR__SHIFT 0x0
++#define CP_RB2_RPTR__RB_RPTR_MASK 0x000FFFFFL
++//CP_RB1_RPTR
++#define CP_RB1_RPTR__RB_RPTR__SHIFT 0x0
++#define CP_RB1_RPTR__RB_RPTR_MASK 0x000FFFFFL
++//CP_RB0_RPTR
++#define CP_RB0_RPTR__RB_RPTR__SHIFT 0x0
++#define CP_RB0_RPTR__RB_RPTR_MASK 0x000FFFFFL
++//CP_RB_RPTR
++#define CP_RB_RPTR__RB_RPTR__SHIFT 0x0
++#define CP_RB_RPTR__RB_RPTR_MASK 0x000FFFFFL
++//CP_RB_WPTR_DELAY
++#define CP_RB_WPTR_DELAY__PRE_WRITE_TIMER__SHIFT 0x0
++#define CP_RB_WPTR_DELAY__PRE_WRITE_LIMIT__SHIFT 0x1c
++#define CP_RB_WPTR_DELAY__PRE_WRITE_TIMER_MASK 0x0FFFFFFFL
++#define CP_RB_WPTR_DELAY__PRE_WRITE_LIMIT_MASK 0xF0000000L
++//CP_RB_WPTR_POLL_CNTL
++#define CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT 0x0
++#define CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
++#define CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK 0x0000FFFFL
++#define CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
++//CP_ROQ1_THRESHOLDS
++#define CP_ROQ1_THRESHOLDS__RB1_START__SHIFT 0x0
++#define CP_ROQ1_THRESHOLDS__RB2_START__SHIFT 0x8
++#define CP_ROQ1_THRESHOLDS__R0_IB1_START__SHIFT 0x10
++#define CP_ROQ1_THRESHOLDS__R1_IB1_START__SHIFT 0x18
++#define CP_ROQ1_THRESHOLDS__RB1_START_MASK 0x000000FFL
++#define CP_ROQ1_THRESHOLDS__RB2_START_MASK 0x0000FF00L
++#define CP_ROQ1_THRESHOLDS__R0_IB1_START_MASK 0x00FF0000L
++#define CP_ROQ1_THRESHOLDS__R1_IB1_START_MASK 0xFF000000L
++//CP_ROQ2_THRESHOLDS
++#define CP_ROQ2_THRESHOLDS__R2_IB1_START__SHIFT 0x0
++#define CP_ROQ2_THRESHOLDS__R0_IB2_START__SHIFT 0x8
++#define CP_ROQ2_THRESHOLDS__R1_IB2_START__SHIFT 0x10
++#define CP_ROQ2_THRESHOLDS__R2_IB2_START__SHIFT 0x18
++#define CP_ROQ2_THRESHOLDS__R2_IB1_START_MASK 0x000000FFL
++#define CP_ROQ2_THRESHOLDS__R0_IB2_START_MASK 0x0000FF00L
++#define CP_ROQ2_THRESHOLDS__R1_IB2_START_MASK 0x00FF0000L
++#define CP_ROQ2_THRESHOLDS__R2_IB2_START_MASK 0xFF000000L
++//CP_STQ_THRESHOLDS
++#define CP_STQ_THRESHOLDS__STQ0_START__SHIFT 0x0
++#define CP_STQ_THRESHOLDS__STQ1_START__SHIFT 0x8
++#define CP_STQ_THRESHOLDS__STQ2_START__SHIFT 0x10
++#define CP_STQ_THRESHOLDS__STQ0_START_MASK 0x000000FFL
++#define CP_STQ_THRESHOLDS__STQ1_START_MASK 0x0000FF00L
++#define CP_STQ_THRESHOLDS__STQ2_START_MASK 0x00FF0000L
++//CP_QUEUE_THRESHOLDS
++#define CP_QUEUE_THRESHOLDS__ROQ_IB1_START__SHIFT 0x0
++#define CP_QUEUE_THRESHOLDS__ROQ_IB2_START__SHIFT 0x8
++#define CP_QUEUE_THRESHOLDS__ROQ_IB1_START_MASK 0x0000003FL
++#define CP_QUEUE_THRESHOLDS__ROQ_IB2_START_MASK 0x00003F00L
++//CP_MEQ_THRESHOLDS
++#define CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT 0x0
++#define CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT 0x8
++#define CP_MEQ_THRESHOLDS__MEQ1_START_MASK 0x000000FFL
++#define CP_MEQ_THRESHOLDS__MEQ2_START_MASK 0x0000FF00L
++//CP_ROQ_AVAIL
++#define CP_ROQ_AVAIL__ROQ_CNT_RING__SHIFT 0x0
++#define CP_ROQ_AVAIL__ROQ_CNT_IB1__SHIFT 0x10
++#define CP_ROQ_AVAIL__ROQ_CNT_RING_MASK 0x000007FFL
++#define CP_ROQ_AVAIL__ROQ_CNT_IB1_MASK 0x07FF0000L
++//CP_STQ_AVAIL
++#define CP_STQ_AVAIL__STQ_CNT__SHIFT 0x0
++#define CP_STQ_AVAIL__STQ_CNT_MASK 0x000001FFL
++//CP_ROQ2_AVAIL
++#define CP_ROQ2_AVAIL__ROQ_CNT_IB2__SHIFT 0x0
++#define CP_ROQ2_AVAIL__ROQ_CNT_IB2_MASK 0x000007FFL
++//CP_MEQ_AVAIL
++#define CP_MEQ_AVAIL__MEQ_CNT__SHIFT 0x0
++#define CP_MEQ_AVAIL__MEQ_CNT_MASK 0x000003FFL
++//CP_CMD_INDEX
++#define CP_CMD_INDEX__CMD_INDEX__SHIFT 0x0
++#define CP_CMD_INDEX__CMD_ME_SEL__SHIFT 0xc
++#define CP_CMD_INDEX__CMD_QUEUE_SEL__SHIFT 0x10
++#define CP_CMD_INDEX__CMD_INDEX_MASK 0x000007FFL
++#define CP_CMD_INDEX__CMD_ME_SEL_MASK 0x00003000L
++#define CP_CMD_INDEX__CMD_QUEUE_SEL_MASK 0x00070000L
++//CP_CMD_DATA
++#define CP_CMD_DATA__CMD_DATA__SHIFT 0x0
++#define CP_CMD_DATA__CMD_DATA_MASK 0xFFFFFFFFL
++//CP_ROQ_RB_STAT
++#define CP_ROQ_RB_STAT__ROQ_RPTR_PRIMARY__SHIFT 0x0
++#define CP_ROQ_RB_STAT__ROQ_WPTR_PRIMARY__SHIFT 0x10
++#define CP_ROQ_RB_STAT__ROQ_RPTR_PRIMARY_MASK 0x000003FFL
++#define CP_ROQ_RB_STAT__ROQ_WPTR_PRIMARY_MASK 0x03FF0000L
++//CP_ROQ_IB1_STAT
++#define CP_ROQ_IB1_STAT__ROQ_RPTR_INDIRECT1__SHIFT 0x0
++#define CP_ROQ_IB1_STAT__ROQ_WPTR_INDIRECT1__SHIFT 0x10
++#define CP_ROQ_IB1_STAT__ROQ_RPTR_INDIRECT1_MASK 0x000003FFL
++#define CP_ROQ_IB1_STAT__ROQ_WPTR_INDIRECT1_MASK 0x03FF0000L
++//CP_ROQ_IB2_STAT
++#define CP_ROQ_IB2_STAT__ROQ_RPTR_INDIRECT2__SHIFT 0x0
++#define CP_ROQ_IB2_STAT__ROQ_WPTR_INDIRECT2__SHIFT 0x10
++#define CP_ROQ_IB2_STAT__ROQ_RPTR_INDIRECT2_MASK 0x000003FFL
++#define CP_ROQ_IB2_STAT__ROQ_WPTR_INDIRECT2_MASK 0x03FF0000L
++//CP_STQ_STAT
++#define CP_STQ_STAT__STQ_RPTR__SHIFT 0x0
++#define CP_STQ_STAT__STQ_RPTR_MASK 0x000003FFL
++//CP_STQ_WR_STAT
++#define CP_STQ_WR_STAT__STQ_WPTR__SHIFT 0x0
++#define CP_STQ_WR_STAT__STQ_WPTR_MASK 0x000003FFL
++//CP_MEQ_STAT
++#define CP_MEQ_STAT__MEQ_RPTR__SHIFT 0x0
++#define CP_MEQ_STAT__MEQ_WPTR__SHIFT 0x10
++#define CP_MEQ_STAT__MEQ_RPTR_MASK 0x000003FFL
++#define CP_MEQ_STAT__MEQ_WPTR_MASK 0x03FF0000L
++//CP_CEQ1_AVAIL
++#define CP_CEQ1_AVAIL__CEQ_CNT_RING__SHIFT 0x0
++#define CP_CEQ1_AVAIL__CEQ_CNT_IB1__SHIFT 0x10
++#define CP_CEQ1_AVAIL__CEQ_CNT_RING_MASK 0x000007FFL
++#define CP_CEQ1_AVAIL__CEQ_CNT_IB1_MASK 0x07FF0000L
++//CP_CEQ2_AVAIL
++#define CP_CEQ2_AVAIL__CEQ_CNT_IB2__SHIFT 0x0
++#define CP_CEQ2_AVAIL__CEQ_CNT_IB2_MASK 0x000007FFL
++//CP_CE_ROQ_RB_STAT
++#define CP_CE_ROQ_RB_STAT__CEQ_RPTR_PRIMARY__SHIFT 0x0
++#define CP_CE_ROQ_RB_STAT__CEQ_WPTR_PRIMARY__SHIFT 0x10
++#define CP_CE_ROQ_RB_STAT__CEQ_RPTR_PRIMARY_MASK 0x000003FFL
++#define CP_CE_ROQ_RB_STAT__CEQ_WPTR_PRIMARY_MASK 0x03FF0000L
++//CP_CE_ROQ_IB1_STAT
++#define CP_CE_ROQ_IB1_STAT__CEQ_RPTR_INDIRECT1__SHIFT 0x0
++#define CP_CE_ROQ_IB1_STAT__CEQ_WPTR_INDIRECT1__SHIFT 0x10
++#define CP_CE_ROQ_IB1_STAT__CEQ_RPTR_INDIRECT1_MASK 0x000003FFL
++#define CP_CE_ROQ_IB1_STAT__CEQ_WPTR_INDIRECT1_MASK 0x03FF0000L
++//CP_CE_ROQ_IB2_STAT
++#define CP_CE_ROQ_IB2_STAT__CEQ_RPTR_INDIRECT2__SHIFT 0x0
++#define CP_CE_ROQ_IB2_STAT__CEQ_WPTR_INDIRECT2__SHIFT 0x10
++#define CP_CE_ROQ_IB2_STAT__CEQ_RPTR_INDIRECT2_MASK 0x000003FFL
++#define CP_CE_ROQ_IB2_STAT__CEQ_WPTR_INDIRECT2_MASK 0x03FF0000L
++#define CP_INT_STAT_DEBUG__PRIV_INSTR_INT_ASSERTED__SHIFT 0x16
++#define CP_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
++#define CP_INT_STAT_DEBUG__PRIV_INSTR_INT_ASSERTED_MASK 0x00400000L
++#define CP_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
++
++
++// addressBlock: gc_padec
++//VGT_VTX_VECT_EJECT_REG
++#define VGT_VTX_VECT_EJECT_REG__PRIM_COUNT__SHIFT 0x0
++#define VGT_VTX_VECT_EJECT_REG__PRIM_COUNT_MASK 0x0000007FL
++//VGT_DMA_DATA_FIFO_DEPTH
++#define VGT_DMA_DATA_FIFO_DEPTH__DMA_DATA_FIFO_DEPTH__SHIFT 0x0
++#define VGT_DMA_DATA_FIFO_DEPTH__DMA2DRAW_FIFO_DEPTH__SHIFT 0x9
++#define VGT_DMA_DATA_FIFO_DEPTH__DMA_DATA_FIFO_DEPTH_MASK 0x000001FFL
++#define VGT_DMA_DATA_FIFO_DEPTH__DMA2DRAW_FIFO_DEPTH_MASK 0x0007FE00L
++//VGT_DMA_REQ_FIFO_DEPTH
++#define VGT_DMA_REQ_FIFO_DEPTH__DMA_REQ_FIFO_DEPTH__SHIFT 0x0
++#define VGT_DMA_REQ_FIFO_DEPTH__DMA_REQ_FIFO_DEPTH_MASK 0x0000003FL
++//VGT_DRAW_INIT_FIFO_DEPTH
++#define VGT_DRAW_INIT_FIFO_DEPTH__DRAW_INIT_FIFO_DEPTH__SHIFT 0x0
++#define VGT_DRAW_INIT_FIFO_DEPTH__DRAW_INIT_FIFO_DEPTH_MASK 0x0000003FL
++//VGT_LAST_COPY_STATE
++#define VGT_LAST_COPY_STATE__SRC_STATE_ID__SHIFT 0x0
++#define VGT_LAST_COPY_STATE__DST_STATE_ID__SHIFT 0x10
++#define VGT_LAST_COPY_STATE__SRC_STATE_ID_MASK 0x00000007L
++#define VGT_LAST_COPY_STATE__DST_STATE_ID_MASK 0x00070000L
++//VGT_CACHE_INVALIDATION
++#define VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT 0x0
++#define VGT_CACHE_INVALIDATION__DIS_INSTANCING_OPT__SHIFT 0x4
++#define VGT_CACHE_INVALIDATION__VS_NO_EXTRA_BUFFER__SHIFT 0x5
++#define VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT 0x6
++#define VGT_CACHE_INVALIDATION__USE_GS_DONE__SHIFT 0x9
++#define VGT_CACHE_INVALIDATION__DIS_RANGE_FULL_INVLD__SHIFT 0xb
++#define VGT_CACHE_INVALIDATION__GS_LATE_ALLOC_EN__SHIFT 0xc
++#define VGT_CACHE_INVALIDATION__STREAMOUT_FULL_FLUSH__SHIFT 0xd
++#define VGT_CACHE_INVALIDATION__ES_LIMIT__SHIFT 0x10
++#define VGT_CACHE_INVALIDATION__ENABLE_PING_PONG__SHIFT 0x15
++#define VGT_CACHE_INVALIDATION__OPT_FLOW_CNTL_1__SHIFT 0x16
++#define VGT_CACHE_INVALIDATION__OPT_FLOW_CNTL_2__SHIFT 0x19
++#define VGT_CACHE_INVALIDATION__EN_WAVE_MERGE__SHIFT 0x1c
++#define VGT_CACHE_INVALIDATION__ENABLE_PING_PONG_EOI__SHIFT 0x1d
++#define VGT_CACHE_INVALIDATION__CACHE_INVALIDATION_MASK 0x00000003L
++#define VGT_CACHE_INVALIDATION__DIS_INSTANCING_OPT_MASK 0x00000010L
++#define VGT_CACHE_INVALIDATION__VS_NO_EXTRA_BUFFER_MASK 0x00000020L
++#define VGT_CACHE_INVALIDATION__AUTO_INVLD_EN_MASK 0x000000C0L
++#define VGT_CACHE_INVALIDATION__USE_GS_DONE_MASK 0x00000200L
++#define VGT_CACHE_INVALIDATION__DIS_RANGE_FULL_INVLD_MASK 0x00000800L
++#define VGT_CACHE_INVALIDATION__GS_LATE_ALLOC_EN_MASK 0x00001000L
++#define VGT_CACHE_INVALIDATION__STREAMOUT_FULL_FLUSH_MASK 0x00002000L
++#define VGT_CACHE_INVALIDATION__ES_LIMIT_MASK 0x001F0000L
++#define VGT_CACHE_INVALIDATION__ENABLE_PING_PONG_MASK 0x00200000L
++#define VGT_CACHE_INVALIDATION__OPT_FLOW_CNTL_1_MASK 0x01C00000L
++#define VGT_CACHE_INVALIDATION__OPT_FLOW_CNTL_2_MASK 0x0E000000L
++#define VGT_CACHE_INVALIDATION__EN_WAVE_MERGE_MASK 0x10000000L
++#define VGT_CACHE_INVALIDATION__ENABLE_PING_PONG_EOI_MASK 0x20000000L
++//VGT_STRMOUT_DELAY
++#define VGT_STRMOUT_DELAY__SKIP_DELAY__SHIFT 0x0
++#define VGT_STRMOUT_DELAY__SE0_WD_DELAY__SHIFT 0x8
++#define VGT_STRMOUT_DELAY__SE1_WD_DELAY__SHIFT 0xb
++#define VGT_STRMOUT_DELAY__SE2_WD_DELAY__SHIFT 0xe
++#define VGT_STRMOUT_DELAY__SE3_WD_DELAY__SHIFT 0x11
++#define VGT_STRMOUT_DELAY__SKIP_DELAY_MASK 0x000000FFL
++#define VGT_STRMOUT_DELAY__SE0_WD_DELAY_MASK 0x00000700L
++#define VGT_STRMOUT_DELAY__SE1_WD_DELAY_MASK 0x00003800L
++#define VGT_STRMOUT_DELAY__SE2_WD_DELAY_MASK 0x0001C000L
++#define VGT_STRMOUT_DELAY__SE3_WD_DELAY_MASK 0x000E0000L
++//VGT_FIFO_DEPTHS
++#define VGT_FIFO_DEPTHS__VS_DEALLOC_TBL_DEPTH__SHIFT 0x0
++#define VGT_FIFO_DEPTHS__RESERVED_0__SHIFT 0x7
++#define VGT_FIFO_DEPTHS__CLIPP_FIFO_DEPTH__SHIFT 0x8
++#define VGT_FIFO_DEPTHS__HSINPUT_FIFO_DEPTH__SHIFT 0x16
++#define VGT_FIFO_DEPTHS__VS_DEALLOC_TBL_DEPTH_MASK 0x0000007FL
++#define VGT_FIFO_DEPTHS__RESERVED_0_MASK 0x00000080L
++#define VGT_FIFO_DEPTHS__CLIPP_FIFO_DEPTH_MASK 0x003FFF00L
++#define VGT_FIFO_DEPTHS__HSINPUT_FIFO_DEPTH_MASK 0x0FC00000L
++//VGT_GS_VERTEX_REUSE
++#define VGT_GS_VERTEX_REUSE__VERT_REUSE__SHIFT 0x0
++#define VGT_GS_VERTEX_REUSE__VERT_REUSE_MASK 0x0000001FL
++//VGT_MC_LAT_CNTL
++#define VGT_MC_LAT_CNTL__MC_TIME_STAMP_RES__SHIFT 0x0
++#define VGT_MC_LAT_CNTL__MC_TIME_STAMP_RES_MASK 0x0000000FL
++//IA_CNTL_STATUS
++#define IA_CNTL_STATUS__IA_BUSY__SHIFT 0x0
++#define IA_CNTL_STATUS__IA_DMA_BUSY__SHIFT 0x1
++#define IA_CNTL_STATUS__IA_DMA_REQ_BUSY__SHIFT 0x2
++#define IA_CNTL_STATUS__IA_GRP_BUSY__SHIFT 0x3
++#define IA_CNTL_STATUS__IA_ADC_BUSY__SHIFT 0x4
++#define IA_CNTL_STATUS__IA_BUSY_MASK 0x00000001L
++#define IA_CNTL_STATUS__IA_DMA_BUSY_MASK 0x00000002L
++#define IA_CNTL_STATUS__IA_DMA_REQ_BUSY_MASK 0x00000004L
++#define IA_CNTL_STATUS__IA_GRP_BUSY_MASK 0x00000008L
++#define IA_CNTL_STATUS__IA_ADC_BUSY_MASK 0x00000010L
++//VGT_CNTL_STATUS
++#define VGT_CNTL_STATUS__VGT_BUSY__SHIFT 0x0
++#define VGT_CNTL_STATUS__VGT_OUT_INDX_BUSY__SHIFT 0x1
++#define VGT_CNTL_STATUS__VGT_OUT_BUSY__SHIFT 0x2
++#define VGT_CNTL_STATUS__VGT_PT_BUSY__SHIFT 0x3
++#define VGT_CNTL_STATUS__VGT_TE_BUSY__SHIFT 0x4
++#define VGT_CNTL_STATUS__VGT_VR_BUSY__SHIFT 0x5
++#define VGT_CNTL_STATUS__VGT_PI_BUSY__SHIFT 0x6
++#define VGT_CNTL_STATUS__VGT_GS_BUSY__SHIFT 0x7
++#define VGT_CNTL_STATUS__VGT_HS_BUSY__SHIFT 0x8
++#define VGT_CNTL_STATUS__VGT_TE11_BUSY__SHIFT 0x9
++#define VGT_CNTL_STATUS__VGT_PRIMGEN_BUSY__SHIFT 0xa
++#define VGT_CNTL_STATUS__VGT_BUSY_MASK 0x00000001L
++#define VGT_CNTL_STATUS__VGT_OUT_INDX_BUSY_MASK 0x00000002L
++#define VGT_CNTL_STATUS__VGT_OUT_BUSY_MASK 0x00000004L
++#define VGT_CNTL_STATUS__VGT_PT_BUSY_MASK 0x00000008L
++#define VGT_CNTL_STATUS__VGT_TE_BUSY_MASK 0x00000010L
++#define VGT_CNTL_STATUS__VGT_VR_BUSY_MASK 0x00000020L
++#define VGT_CNTL_STATUS__VGT_PI_BUSY_MASK 0x00000040L
++#define VGT_CNTL_STATUS__VGT_GS_BUSY_MASK 0x00000080L
++#define VGT_CNTL_STATUS__VGT_HS_BUSY_MASK 0x00000100L
++#define VGT_CNTL_STATUS__VGT_TE11_BUSY_MASK 0x00000200L
++#define VGT_CNTL_STATUS__VGT_PRIMGEN_BUSY_MASK 0x00000400L
++//WD_CNTL_STATUS
++#define WD_CNTL_STATUS__WD_BUSY__SHIFT 0x0
++#define WD_CNTL_STATUS__WD_SPL_DMA_BUSY__SHIFT 0x1
++#define WD_CNTL_STATUS__WD_SPL_DI_BUSY__SHIFT 0x2
++#define WD_CNTL_STATUS__WD_ADC_BUSY__SHIFT 0x3
++#define WD_CNTL_STATUS__WD_BUSY_MASK 0x00000001L
++#define WD_CNTL_STATUS__WD_SPL_DMA_BUSY_MASK 0x00000002L
++#define WD_CNTL_STATUS__WD_SPL_DI_BUSY_MASK 0x00000004L
++#define WD_CNTL_STATUS__WD_ADC_BUSY_MASK 0x00000008L
++//CC_GC_PRIM_CONFIG
++#define CC_GC_PRIM_CONFIG__INACTIVE_IA__SHIFT 0x10
++#define CC_GC_PRIM_CONFIG__INACTIVE_VGT_PA__SHIFT 0x18
++#define CC_GC_PRIM_CONFIG__INACTIVE_IA_MASK 0x00030000L
++#define CC_GC_PRIM_CONFIG__INACTIVE_VGT_PA_MASK 0x0F000000L
++//GC_USER_PRIM_CONFIG
++#define GC_USER_PRIM_CONFIG__INACTIVE_IA__SHIFT 0x10
++#define GC_USER_PRIM_CONFIG__INACTIVE_VGT_PA__SHIFT 0x18
++#define GC_USER_PRIM_CONFIG__INACTIVE_IA_MASK 0x00030000L
++#define GC_USER_PRIM_CONFIG__INACTIVE_VGT_PA_MASK 0x0F000000L
++//WD_QOS
++#define WD_QOS__DRAW_STALL__SHIFT 0x0
++#define WD_QOS__DRAW_STALL_MASK 0x00000001L
++//WD_UTCL1_CNTL
++#define WD_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
++#define WD_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
++#define WD_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
++#define WD_UTCL1_CNTL__BYPASS__SHIFT 0x19
++#define WD_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
++#define WD_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
++#define WD_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
++#define WD_UTCL1_CNTL__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
++#define WD_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
++#define WD_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
++#define WD_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
++#define WD_UTCL1_CNTL__BYPASS_MASK 0x02000000L
++#define WD_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
++#define WD_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
++#define WD_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
++#define WD_UTCL1_CNTL__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
++//WD_UTCL1_STATUS
++#define WD_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
++#define WD_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
++#define WD_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
++#define WD_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
++#define WD_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
++#define WD_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
++#define WD_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
++#define WD_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
++#define WD_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
++#define WD_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
++#define WD_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
++#define WD_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
++//IA_UTCL1_CNTL
++#define IA_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
++#define IA_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
++#define IA_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
++#define IA_UTCL1_CNTL__BYPASS__SHIFT 0x19
++#define IA_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
++#define IA_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
++#define IA_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
++#define IA_UTCL1_CNTL__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
++#define IA_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
++#define IA_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
++#define IA_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
++#define IA_UTCL1_CNTL__BYPASS_MASK 0x02000000L
++#define IA_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
++#define IA_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
++#define IA_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
++#define IA_UTCL1_CNTL__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
++//IA_UTCL1_STATUS
++#define IA_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
++#define IA_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
++#define IA_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
++#define IA_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
++#define IA_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
++#define IA_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
++#define IA_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
++#define IA_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
++#define IA_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
++#define IA_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
++#define IA_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
++#define IA_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
++//VGT_SYS_CONFIG
++#define VGT_SYS_CONFIG__DUAL_CORE_EN__SHIFT 0x0
++#define VGT_SYS_CONFIG__MAX_LS_HS_THDGRP__SHIFT 0x1
++#define VGT_SYS_CONFIG__ADC_EVENT_FILTER_DISABLE__SHIFT 0x7
++#define VGT_SYS_CONFIG__DUAL_CORE_EN_MASK 0x00000001L
++#define VGT_SYS_CONFIG__MAX_LS_HS_THDGRP_MASK 0x0000007EL
++#define VGT_SYS_CONFIG__ADC_EVENT_FILTER_DISABLE_MASK 0x00000080L
++//VGT_VS_MAX_WAVE_ID
++#define VGT_VS_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
++#define VGT_VS_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000FFFL
++//VGT_GS_MAX_WAVE_ID
++#define VGT_GS_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
++#define VGT_GS_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000FFFL
++//GFX_PIPE_CONTROL
++#define GFX_PIPE_CONTROL__HYSTERESIS_CNT__SHIFT 0x0
++#define GFX_PIPE_CONTROL__RESERVED__SHIFT 0xd
++#define GFX_PIPE_CONTROL__CONTEXT_SUSPEND_EN__SHIFT 0x10
++#define GFX_PIPE_CONTROL__HYSTERESIS_CNT_MASK 0x00001FFFL
++#define GFX_PIPE_CONTROL__RESERVED_MASK 0x0000E000L
++#define GFX_PIPE_CONTROL__CONTEXT_SUSPEND_EN_MASK 0x00010000L
++//CC_GC_SHADER_ARRAY_CONFIG
++#define CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT 0x10
++#define CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK 0xFFFF0000L
++//GC_USER_SHADER_ARRAY_CONFIG
++#define GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT 0x10
++#define GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK 0xFFFF0000L
++//VGT_DMA_PRIMITIVE_TYPE
++#define VGT_DMA_PRIMITIVE_TYPE__PRIM_TYPE__SHIFT 0x0
++#define VGT_DMA_PRIMITIVE_TYPE__PRIM_TYPE_MASK 0x0000003FL
++//VGT_DMA_CONTROL
++#define VGT_DMA_CONTROL__PRIMGROUP_SIZE__SHIFT 0x0
++#define VGT_DMA_CONTROL__IA_SWITCH_ON_EOP__SHIFT 0x11
++#define VGT_DMA_CONTROL__SWITCH_ON_EOI__SHIFT 0x13
++#define VGT_DMA_CONTROL__WD_SWITCH_ON_EOP__SHIFT 0x14
++#define VGT_DMA_CONTROL__EN_INST_OPT_BASIC__SHIFT 0x15
++#define VGT_DMA_CONTROL__EN_INST_OPT_ADV__SHIFT 0x16
++#define VGT_DMA_CONTROL__HW_USE_ONLY__SHIFT 0x17
++#define VGT_DMA_CONTROL__PRIMGROUP_SIZE_MASK 0x0000FFFFL
++#define VGT_DMA_CONTROL__IA_SWITCH_ON_EOP_MASK 0x00020000L
++#define VGT_DMA_CONTROL__SWITCH_ON_EOI_MASK 0x00080000L
++#define VGT_DMA_CONTROL__WD_SWITCH_ON_EOP_MASK 0x00100000L
++#define VGT_DMA_CONTROL__EN_INST_OPT_BASIC_MASK 0x00200000L
++#define VGT_DMA_CONTROL__EN_INST_OPT_ADV_MASK 0x00400000L
++#define VGT_DMA_CONTROL__HW_USE_ONLY_MASK 0x00800000L
++//VGT_DMA_LS_HS_CONFIG
++#define VGT_DMA_LS_HS_CONFIG__HS_NUM_INPUT_CP__SHIFT 0x8
++#define VGT_DMA_LS_HS_CONFIG__HS_NUM_INPUT_CP_MASK 0x00003F00L
++//WD_BUF_RESOURCE_1
++#define WD_BUF_RESOURCE_1__POS_BUF_SIZE__SHIFT 0x0
++#define WD_BUF_RESOURCE_1__INDEX_BUF_SIZE__SHIFT 0x10
++#define WD_BUF_RESOURCE_1__POS_BUF_SIZE_MASK 0x0000FFFFL
++#define WD_BUF_RESOURCE_1__INDEX_BUF_SIZE_MASK 0xFFFF0000L
++//WD_BUF_RESOURCE_2
++#define WD_BUF_RESOURCE_2__PARAM_BUF_SIZE__SHIFT 0x0
++#define WD_BUF_RESOURCE_2__ADDR_MODE__SHIFT 0xf
++#define WD_BUF_RESOURCE_2__CNTL_SB_BUF_SIZE__SHIFT 0x10
++#define WD_BUF_RESOURCE_2__PARAM_BUF_SIZE_MASK 0x00001FFFL
++#define WD_BUF_RESOURCE_2__ADDR_MODE_MASK 0x00008000L
++#define WD_BUF_RESOURCE_2__CNTL_SB_BUF_SIZE_MASK 0xFFFF0000L
++//PA_CL_CNTL_STATUS
++#define PA_CL_CNTL_STATUS__UTC_FAULT_DETECTED__SHIFT 0x0
++#define PA_CL_CNTL_STATUS__UTC_RETRY_DETECTED__SHIFT 0x1
++#define PA_CL_CNTL_STATUS__UTC_PRT_DETECTED__SHIFT 0x2
++#define PA_CL_CNTL_STATUS__UTC_FAULT_DETECTED_MASK 0x00000001L
++#define PA_CL_CNTL_STATUS__UTC_RETRY_DETECTED_MASK 0x00000002L
++#define PA_CL_CNTL_STATUS__UTC_PRT_DETECTED_MASK 0x00000004L
++//PA_CL_ENHANCE
++#define PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA__SHIFT 0x0
++#define PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT 0x1
++#define PA_CL_ENHANCE__CLIPPED_PRIM_SEQ_STALL__SHIFT 0x3
++#define PA_CL_ENHANCE__VE_NAN_PROC_DISABLE__SHIFT 0x4
++#define PA_CL_ENHANCE__IGNORE_PIPELINE_RESET__SHIFT 0x6
++#define PA_CL_ENHANCE__KILL_INNER_EDGE_FLAGS__SHIFT 0x7
++#define PA_CL_ENHANCE__NGG_PA_TO_ALL_SC__SHIFT 0x8
++#define PA_CL_ENHANCE__TC_LATENCY_TIME_STAMP_RESOLUTION__SHIFT 0x9
++#define PA_CL_ENHANCE__NGG_BYPASS_PRIM_FILTER__SHIFT 0xb
++#define PA_CL_ENHANCE__NGG_SIDEBAND_MEMORY_DEPTH__SHIFT 0xc
++#define PA_CL_ENHANCE__NGG_PRIM_INDICES_FIFO_DEPTH__SHIFT 0xe
++#define PA_CL_ENHANCE__ECO_SPARE3__SHIFT 0x1c
++#define PA_CL_ENHANCE__ECO_SPARE2__SHIFT 0x1d
++#define PA_CL_ENHANCE__ECO_SPARE1__SHIFT 0x1e
++#define PA_CL_ENHANCE__ECO_SPARE0__SHIFT 0x1f
++#define PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK 0x00000001L
++#define PA_CL_ENHANCE__NUM_CLIP_SEQ_MASK 0x00000006L
++#define PA_CL_ENHANCE__CLIPPED_PRIM_SEQ_STALL_MASK 0x00000008L
++#define PA_CL_ENHANCE__VE_NAN_PROC_DISABLE_MASK 0x00000010L
++#define PA_CL_ENHANCE__IGNORE_PIPELINE_RESET_MASK 0x00000040L
++#define PA_CL_ENHANCE__KILL_INNER_EDGE_FLAGS_MASK 0x00000080L
++#define PA_CL_ENHANCE__NGG_PA_TO_ALL_SC_MASK 0x00000100L
++#define PA_CL_ENHANCE__TC_LATENCY_TIME_STAMP_RESOLUTION_MASK 0x00000600L
++#define PA_CL_ENHANCE__NGG_BYPASS_PRIM_FILTER_MASK 0x00000800L
++#define PA_CL_ENHANCE__NGG_SIDEBAND_MEMORY_DEPTH_MASK 0x00003000L
++#define PA_CL_ENHANCE__NGG_PRIM_INDICES_FIFO_DEPTH_MASK 0x0001C000L
++#define PA_CL_ENHANCE__ECO_SPARE3_MASK 0x10000000L
++#define PA_CL_ENHANCE__ECO_SPARE2_MASK 0x20000000L
++#define PA_CL_ENHANCE__ECO_SPARE1_MASK 0x40000000L
++#define PA_CL_ENHANCE__ECO_SPARE0_MASK 0x80000000L
++//PA_SU_CNTL_STATUS
++#define PA_SU_CNTL_STATUS__SU_BUSY__SHIFT 0x1f
++#define PA_SU_CNTL_STATUS__SU_BUSY_MASK 0x80000000L
++//PA_SC_FIFO_DEPTH_CNTL
++#define PA_SC_FIFO_DEPTH_CNTL__DEPTH__SHIFT 0x0
++#define PA_SC_FIFO_DEPTH_CNTL__DEPTH_MASK 0x000003FFL
++//PA_SC_P3D_TRAP_SCREEN_HV_LOCK
++#define PA_SC_P3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES__SHIFT 0x0
++#define PA_SC_P3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES_MASK 0x00000001L
++//PA_SC_HP3D_TRAP_SCREEN_HV_LOCK
++#define PA_SC_HP3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES__SHIFT 0x0
++#define PA_SC_HP3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES_MASK 0x00000001L
++//PA_SC_TRAP_SCREEN_HV_LOCK
++#define PA_SC_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES__SHIFT 0x0
++#define PA_SC_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES_MASK 0x00000001L
++//PA_SC_FORCE_EOV_MAX_CNTS
++#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT 0x0
++#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT 0x10
++#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT_MASK 0x0000FFFFL
++#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT_MASK 0xFFFF0000L
++//PA_SC_BINNER_EVENT_CNTL_0
++#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_0__SHIFT 0x0
++#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS1__SHIFT 0x2
++#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS2__SHIFT 0x4
++#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS3__SHIFT 0x6
++#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH_TS__SHIFT 0x8
++#define PA_SC_BINNER_EVENT_CNTL_0__CONTEXT_DONE__SHIFT 0xa
++#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH__SHIFT 0xc
++#define PA_SC_BINNER_EVENT_CNTL_0__CS_PARTIAL_FLUSH__SHIFT 0xe
++#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_SYNC__SHIFT 0x10
++#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_9__SHIFT 0x12
++#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_RESET__SHIFT 0x14
++#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_INCR_DE__SHIFT 0x16
++#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_IB_END__SHIFT 0x18
++#define PA_SC_BINNER_EVENT_CNTL_0__RST_PIX_CNT__SHIFT 0x1a
++#define PA_SC_BINNER_EVENT_CNTL_0__BREAK_BATCH__SHIFT 0x1c
++#define PA_SC_BINNER_EVENT_CNTL_0__VS_PARTIAL_FLUSH__SHIFT 0x1e
++#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_0_MASK 0x00000003L
++#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS1_MASK 0x0000000CL
++#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS2_MASK 0x00000030L
++#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS3_MASK 0x000000C0L
++#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH_TS_MASK 0x00000300L
++#define PA_SC_BINNER_EVENT_CNTL_0__CONTEXT_DONE_MASK 0x00000C00L
++#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH_MASK 0x00003000L
++#define PA_SC_BINNER_EVENT_CNTL_0__CS_PARTIAL_FLUSH_MASK 0x0000C000L
++#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_SYNC_MASK 0x00030000L
++#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_9_MASK 0x000C0000L
++#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_RESET_MASK 0x00300000L
++#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_INCR_DE_MASK 0x00C00000L
++#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_IB_END_MASK 0x03000000L
++#define PA_SC_BINNER_EVENT_CNTL_0__RST_PIX_CNT_MASK 0x0C000000L
++#define PA_SC_BINNER_EVENT_CNTL_0__BREAK_BATCH_MASK 0x30000000L
++#define PA_SC_BINNER_EVENT_CNTL_0__VS_PARTIAL_FLUSH_MASK 0xC0000000L
++//PA_SC_BINNER_EVENT_CNTL_1
++#define PA_SC_BINNER_EVENT_CNTL_1__PS_PARTIAL_FLUSH__SHIFT 0x0
++#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_HS_OUTPUT__SHIFT 0x2
++#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_DFSM__SHIFT 0x4
++#define PA_SC_BINNER_EVENT_CNTL_1__RESET_TO_LOWEST_VGT__SHIFT 0x6
++#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_TS_EVENT__SHIFT 0x8
++#define PA_SC_BINNER_EVENT_CNTL_1__ZPASS_DONE__SHIFT 0xa
++#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_EVENT__SHIFT 0xc
++#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_START__SHIFT 0xe
++#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_STOP__SHIFT 0x10
++#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_START__SHIFT 0x12
++#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_STOP__SHIFT 0x14
++#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_SAMPLE__SHIFT 0x16
++#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_ES_OUTPUT__SHIFT 0x18
++#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_GS_OUTPUT__SHIFT 0x1a
++#define PA_SC_BINNER_EVENT_CNTL_1__SAMPLE_PIPELINESTAT__SHIFT 0x1c
++#define PA_SC_BINNER_EVENT_CNTL_1__SO_VGTSTREAMOUT_FLUSH__SHIFT 0x1e
++#define PA_SC_BINNER_EVENT_CNTL_1__PS_PARTIAL_FLUSH_MASK 0x00000003L
++#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_HS_OUTPUT_MASK 0x0000000CL
++#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_DFSM_MASK 0x00000030L
++#define PA_SC_BINNER_EVENT_CNTL_1__RESET_TO_LOWEST_VGT_MASK 0x000000C0L
++#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_TS_EVENT_MASK 0x00000300L
++#define PA_SC_BINNER_EVENT_CNTL_1__ZPASS_DONE_MASK 0x00000C00L
++#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_EVENT_MASK 0x00003000L
++#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_START_MASK 0x0000C000L
++#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_STOP_MASK 0x00030000L
++#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_START_MASK 0x000C0000L
++#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_STOP_MASK 0x00300000L
++#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_SAMPLE_MASK 0x00C00000L
++#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_ES_OUTPUT_MASK 0x03000000L
++#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_GS_OUTPUT_MASK 0x0C000000L
++#define PA_SC_BINNER_EVENT_CNTL_1__SAMPLE_PIPELINESTAT_MASK 0x30000000L
++#define PA_SC_BINNER_EVENT_CNTL_1__SO_VGTSTREAMOUT_FLUSH_MASK 0xC0000000L
++//PA_SC_BINNER_EVENT_CNTL_2
++#define PA_SC_BINNER_EVENT_CNTL_2__SAMPLE_STREAMOUTSTATS__SHIFT 0x0
++#define PA_SC_BINNER_EVENT_CNTL_2__RESET_VTX_CNT__SHIFT 0x2
++#define PA_SC_BINNER_EVENT_CNTL_2__BLOCK_CONTEXT_DONE__SHIFT 0x4
++#define PA_SC_BINNER_EVENT_CNTL_2__CS_CONTEXT_DONE__SHIFT 0x6
++#define PA_SC_BINNER_EVENT_CNTL_2__VGT_FLUSH__SHIFT 0x8
++#define PA_SC_BINNER_EVENT_CNTL_2__TGID_ROLLOVER__SHIFT 0xa
++#define PA_SC_BINNER_EVENT_CNTL_2__SQ_NON_EVENT__SHIFT 0xc
++#define PA_SC_BINNER_EVENT_CNTL_2__SC_SEND_DB_VPZ__SHIFT 0xe
++#define PA_SC_BINNER_EVENT_CNTL_2__BOTTOM_OF_PIPE_TS__SHIFT 0x10
++#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_SX_TS__SHIFT 0x12
++#define PA_SC_BINNER_EVENT_CNTL_2__DB_CACHE_FLUSH_AND_INV__SHIFT 0x14
++#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_DATA_TS__SHIFT 0x16
++#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_META__SHIFT 0x18
++#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_DATA_TS__SHIFT 0x1a
++#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_META__SHIFT 0x1c
++#define PA_SC_BINNER_EVENT_CNTL_2__CS_DONE__SHIFT 0x1e
++#define PA_SC_BINNER_EVENT_CNTL_2__SAMPLE_STREAMOUTSTATS_MASK 0x00000003L
++#define PA_SC_BINNER_EVENT_CNTL_2__RESET_VTX_CNT_MASK 0x0000000CL
++#define PA_SC_BINNER_EVENT_CNTL_2__BLOCK_CONTEXT_DONE_MASK 0x00000030L
++#define PA_SC_BINNER_EVENT_CNTL_2__CS_CONTEXT_DONE_MASK 0x000000C0L
++#define PA_SC_BINNER_EVENT_CNTL_2__VGT_FLUSH_MASK 0x00000300L
++#define PA_SC_BINNER_EVENT_CNTL_2__TGID_ROLLOVER_MASK 0x00000C00L
++#define PA_SC_BINNER_EVENT_CNTL_2__SQ_NON_EVENT_MASK 0x00003000L
++#define PA_SC_BINNER_EVENT_CNTL_2__SC_SEND_DB_VPZ_MASK 0x0000C000L
++#define PA_SC_BINNER_EVENT_CNTL_2__BOTTOM_OF_PIPE_TS_MASK 0x00030000L
++#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_SX_TS_MASK 0x000C0000L
++#define PA_SC_BINNER_EVENT_CNTL_2__DB_CACHE_FLUSH_AND_INV_MASK 0x00300000L
++#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_DATA_TS_MASK 0x00C00000L
++#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_META_MASK 0x03000000L
++#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_DATA_TS_MASK 0x0C000000L
++#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_META_MASK 0x30000000L
++#define PA_SC_BINNER_EVENT_CNTL_2__CS_DONE_MASK 0xC0000000L
++//PA_SC_BINNER_EVENT_CNTL_3
++#define PA_SC_BINNER_EVENT_CNTL_3__PS_DONE__SHIFT 0x0
++#define PA_SC_BINNER_EVENT_CNTL_3__FLUSH_AND_INV_CB_PIXEL_DATA__SHIFT 0x2
++#define PA_SC_BINNER_EVENT_CNTL_3__SX_CB_RAT_ACK_REQUEST__SHIFT 0x4
++#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_START__SHIFT 0x6
++#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_STOP__SHIFT 0x8
++#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_MARKER__SHIFT 0xa
++#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_FLUSH__SHIFT 0xc
++#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_FINISH__SHIFT 0xe
++#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_CONTROL__SHIFT 0x10
++#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_DUMP__SHIFT 0x12
++#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_RESET__SHIFT 0x14
++#define PA_SC_BINNER_EVENT_CNTL_3__CONTEXT_SUSPEND__SHIFT 0x16
++#define PA_SC_BINNER_EVENT_CNTL_3__OFFCHIP_HS_DEALLOC__SHIFT 0x18
++#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_NGG_PIPELINE__SHIFT 0x1a
++#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_LEGACY_PIPELINE__SHIFT 0x1c
++#define PA_SC_BINNER_EVENT_CNTL_3__RESERVED_63__SHIFT 0x1e
++#define PA_SC_BINNER_EVENT_CNTL_3__PS_DONE_MASK 0x00000003L
++#define PA_SC_BINNER_EVENT_CNTL_3__FLUSH_AND_INV_CB_PIXEL_DATA_MASK 0x0000000CL
++#define PA_SC_BINNER_EVENT_CNTL_3__SX_CB_RAT_ACK_REQUEST_MASK 0x00000030L
++#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_START_MASK 0x000000C0L
++#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_STOP_MASK 0x00000300L
++#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_MARKER_MASK 0x00000C00L
++#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_FLUSH_MASK 0x00003000L
++#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_FINISH_MASK 0x0000C000L
++#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_CONTROL_MASK 0x00030000L
++#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_DUMP_MASK 0x000C0000L
++#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_RESET_MASK 0x00300000L
++#define PA_SC_BINNER_EVENT_CNTL_3__CONTEXT_SUSPEND_MASK 0x00C00000L
++#define PA_SC_BINNER_EVENT_CNTL_3__OFFCHIP_HS_DEALLOC_MASK 0x03000000L
++#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_NGG_PIPELINE_MASK 0x0C000000L
++#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_LEGACY_PIPELINE_MASK 0x30000000L
++#define PA_SC_BINNER_EVENT_CNTL_3__RESERVED_63_MASK 0xC0000000L
++//PA_SC_BINNER_TIMEOUT_COUNTER
++#define PA_SC_BINNER_TIMEOUT_COUNTER__THRESHOLD__SHIFT 0x0
++#define PA_SC_BINNER_TIMEOUT_COUNTER__THRESHOLD_MASK 0xFFFFFFFFL
++//PA_SC_BINNER_PERF_CNTL_0
++#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_PRIMS_THRESHOLD__SHIFT 0x0
++#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_PRIMS_THRESHOLD__SHIFT 0xa
++#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_CONTEXT_THRESHOLD__SHIFT 0x14
++#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_CONTEXT_THRESHOLD__SHIFT 0x17
++#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_PRIMS_THRESHOLD_MASK 0x000003FFL
++#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_PRIMS_THRESHOLD_MASK 0x000FFC00L
++#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_CONTEXT_THRESHOLD_MASK 0x00700000L
++#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_CONTEXT_THRESHOLD_MASK 0x03800000L
++//PA_SC_BINNER_PERF_CNTL_1
++#define PA_SC_BINNER_PERF_CNTL_1__BIN_HIST_NUM_PERSISTENT_STATE_THRESHOLD__SHIFT 0x0
++#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_PERSISTENT_STATE_THRESHOLD__SHIFT 0x5
++#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_TRIV_REJECTED_PRIMS_THRESHOLD__SHIFT 0xa
++#define PA_SC_BINNER_PERF_CNTL_1__BIN_HIST_NUM_PERSISTENT_STATE_THRESHOLD_MASK 0x0000001FL
++#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_PERSISTENT_STATE_THRESHOLD_MASK 0x000003E0L
++#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_TRIV_REJECTED_PRIMS_THRESHOLD_MASK 0x03FFFC00L
++//PA_SC_BINNER_PERF_CNTL_2
++#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_ROWS_PER_PRIM_THRESHOLD__SHIFT 0x0
++#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_COLUMNS_PER_ROW_THRESHOLD__SHIFT 0xb
++#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_ROWS_PER_PRIM_THRESHOLD_MASK 0x000007FFL
++#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_COLUMNS_PER_ROW_THRESHOLD_MASK 0x003FF800L
++//PA_SC_BINNER_PERF_CNTL_3
++#define PA_SC_BINNER_PERF_CNTL_3__BATCH_HIST_NUM_PS_WAVE_BREAKS_THRESHOLD__SHIFT 0x0
++#define PA_SC_BINNER_PERF_CNTL_3__BATCH_HIST_NUM_PS_WAVE_BREAKS_THRESHOLD_MASK 0xFFFFFFFFL
++//PA_SC_FIFO_SIZE
++#define PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT 0x0
++#define PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT 0x6
++#define PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT 0xf
++#define PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT 0x15
++#define PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE_MASK 0x0000003FL
++#define PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE_MASK 0x00007FC0L
++#define PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE_MASK 0x001F8000L
++#define PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE_MASK 0xFFE00000L
++//PA_SC_IF_FIFO_SIZE
++#define PA_SC_IF_FIFO_SIZE__SC_DB_TILE_IF_FIFO_SIZE__SHIFT 0x0
++#define PA_SC_IF_FIFO_SIZE__SC_DB_QUAD_IF_FIFO_SIZE__SHIFT 0x6
++#define PA_SC_IF_FIFO_SIZE__SC_SPI_IF_FIFO_SIZE__SHIFT 0xc
++#define PA_SC_IF_FIFO_SIZE__SC_BCI_IF_FIFO_SIZE__SHIFT 0x12
++#define PA_SC_IF_FIFO_SIZE__SC_DB_TILE_IF_FIFO_SIZE_MASK 0x0000003FL
++#define PA_SC_IF_FIFO_SIZE__SC_DB_QUAD_IF_FIFO_SIZE_MASK 0x00000FC0L
++#define PA_SC_IF_FIFO_SIZE__SC_SPI_IF_FIFO_SIZE_MASK 0x0003F000L
++#define PA_SC_IF_FIFO_SIZE__SC_BCI_IF_FIFO_SIZE_MASK 0x00FC0000L
++//PA_SC_PKR_WAVE_TABLE_CNTL
++#define PA_SC_PKR_WAVE_TABLE_CNTL__SIZE__SHIFT 0x0
++#define PA_SC_PKR_WAVE_TABLE_CNTL__SIZE_MASK 0x0000003FL
++//PA_UTCL1_CNTL1
++#define PA_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
++#define PA_UTCL1_CNTL1__GPUVM_64K_DEFAULT__SHIFT 0x1
++#define PA_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
++#define PA_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
++#define PA_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
++#define PA_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
++#define PA_UTCL1_CNTL1__SPARE__SHIFT 0x10
++#define PA_UTCL1_CNTL1__ENABLE_PUSH_LFIFO__SHIFT 0x11
++#define PA_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB__SHIFT 0x12
++#define PA_UTCL1_CNTL1__REG_INV_VMID__SHIFT 0x13
++#define PA_UTCL1_CNTL1__REG_INV_ALL_VMID__SHIFT 0x17
++#define PA_UTCL1_CNTL1__REG_INV_TOGGLE__SHIFT 0x18
++#define PA_UTCL1_CNTL1__INVALIDATE_ALL_VMID__SHIFT 0x19
++#define PA_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
++#define PA_UTCL1_CNTL1__FORCE_IN_ORDER__SHIFT 0x1b
++#define PA_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
++#define PA_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
++#define PA_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
++#define PA_UTCL1_CNTL1__GPUVM_64K_DEFAULT_MASK 0x00000002L
++#define PA_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
++#define PA_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
++#define PA_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
++#define PA_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
++#define PA_UTCL1_CNTL1__SPARE_MASK 0x00010000L
++#define PA_UTCL1_CNTL1__ENABLE_PUSH_LFIFO_MASK 0x00020000L
++#define PA_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB_MASK 0x00040000L
++#define PA_UTCL1_CNTL1__REG_INV_VMID_MASK 0x00780000L
++#define PA_UTCL1_CNTL1__REG_INV_ALL_VMID_MASK 0x00800000L
++#define PA_UTCL1_CNTL1__REG_INV_TOGGLE_MASK 0x01000000L
++#define PA_UTCL1_CNTL1__INVALIDATE_ALL_VMID_MASK 0x02000000L
++#define PA_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
++#define PA_UTCL1_CNTL1__FORCE_IN_ORDER_MASK 0x08000000L
++#define PA_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
++#define PA_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
++//PA_UTCL1_CNTL2
++#define PA_UTCL1_CNTL2__SPARE1__SHIFT 0x0
++#define PA_UTCL1_CNTL2__SPARE2__SHIFT 0x8
++#define PA_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
++#define PA_UTCL1_CNTL2__LINE_VALID__SHIFT 0xa
++#define PA_UTCL1_CNTL2__SPARE3__SHIFT 0xb
++#define PA_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
++#define PA_UTCL1_CNTL2__ENABLE_SHOOTDOWN_OPT__SHIFT 0xd
++#define PA_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
++#define PA_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
++#define PA_UTCL1_CNTL2__SPARE4__SHIFT 0x10
++#define PA_UTCL1_CNTL2__ENABLE_PERF_EVENT_RD_WR__SHIFT 0x12
++#define PA_UTCL1_CNTL2__PERF_EVENT_RD_WR__SHIFT 0x13
++#define PA_UTCL1_CNTL2__ENABLE_PERF_EVENT_VMID__SHIFT 0x14
++#define PA_UTCL1_CNTL2__PERF_EVENT_VMID__SHIFT 0x15
++#define PA_UTCL1_CNTL2__SPARE5__SHIFT 0x19
++#define PA_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
++#define PA_UTCL1_CNTL2__RESERVED__SHIFT 0x1b
++#define PA_UTCL1_CNTL2__SPARE1_MASK 0x000000FFL
++#define PA_UTCL1_CNTL2__SPARE2_MASK 0x00000100L
++#define PA_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
++#define PA_UTCL1_CNTL2__LINE_VALID_MASK 0x00000400L
++#define PA_UTCL1_CNTL2__SPARE3_MASK 0x00000800L
++#define PA_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
++#define PA_UTCL1_CNTL2__ENABLE_SHOOTDOWN_OPT_MASK 0x00002000L
++#define PA_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
++#define PA_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
++#define PA_UTCL1_CNTL2__SPARE4_MASK 0x00030000L
++#define PA_UTCL1_CNTL2__ENABLE_PERF_EVENT_RD_WR_MASK 0x00040000L
++#define PA_UTCL1_CNTL2__PERF_EVENT_RD_WR_MASK 0x00080000L
++#define PA_UTCL1_CNTL2__ENABLE_PERF_EVENT_VMID_MASK 0x00100000L
++#define PA_UTCL1_CNTL2__PERF_EVENT_VMID_MASK 0x01E00000L
++#define PA_UTCL1_CNTL2__SPARE5_MASK 0x02000000L
++#define PA_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
++#define PA_UTCL1_CNTL2__RESERVED_MASK 0xF8000000L
++//PA_SIDEBAND_REQUEST_DELAYS
++#define PA_SIDEBAND_REQUEST_DELAYS__RETRY_DELAY__SHIFT 0x0
++#define PA_SIDEBAND_REQUEST_DELAYS__INITIAL_DELAY__SHIFT 0x10
++#define PA_SIDEBAND_REQUEST_DELAYS__RETRY_DELAY_MASK 0x0000FFFFL
++#define PA_SIDEBAND_REQUEST_DELAYS__INITIAL_DELAY_MASK 0xFFFF0000L
++//PA_SC_ENHANCE
++#define PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER__SHIFT 0x0
++#define PA_SC_ENHANCE__DISABLE_SC_DB_TILE_FIX__SHIFT 0x1
++#define PA_SC_ENHANCE__DISABLE_AA_MASK_FULL_FIX__SHIFT 0x2
++#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOCATIONS__SHIFT 0x3
++#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOC_CENTROID__SHIFT 0x4
++#define PA_SC_ENHANCE__DISABLE_SCISSOR_FIX__SHIFT 0x5
++#define PA_SC_ENHANCE__SEND_UNLIT_STILES_TO_PACKER__SHIFT 0x6
++#define PA_SC_ENHANCE__DISABLE_DUALGRAD_PERF_OPTIMIZATION__SHIFT 0x7
++#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_PRIM__SHIFT 0x8
++#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_SUPERTILE__SHIFT 0x9
++#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_TILE__SHIFT 0xa
++#define PA_SC_ENHANCE__DISABLE_PA_SC_GUIDANCE__SHIFT 0xb
++#define PA_SC_ENHANCE__DISABLE_EOV_ALL_CTRL_ONLY_COMBINATIONS__SHIFT 0xc
++#define PA_SC_ENHANCE__ENABLE_MULTICYCLE_BUBBLE_FREEZE__SHIFT 0xd
++#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_PA_SC_GUIDANCE__SHIFT 0xe
++#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_POLY_MODE__SHIFT 0xf
++#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EOP_SYNC_NULL_PRIMS_LAST__SHIFT 0x10
++#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_THRESHOLD_SWITCHING__SHIFT 0x11
++#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_THRESHOLD_SWITCH_AT_EOPG_ONLY__SHIFT 0x12
++#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_DESIRED_FIFO_EMPTY_SWITCHING__SHIFT 0x13
++#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_SELECTED_FIFO_EMPTY_SWITCHING__SHIFT 0x14
++#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EMPTY_SWITCHING_HYSTERYSIS__SHIFT 0x15
++#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_DESIRED_FIFO_IS_NEXT_FEID__SHIFT 0x16
++#define PA_SC_ENHANCE__DISABLE_OOO_NO_EOPG_SKEW_DESIRED_FIFO_IS_CURRENT_FIFO__SHIFT 0x17
++#define PA_SC_ENHANCE__OOO_DISABLE_EOP_ON_FIRST_LIVE_PRIM_HIT__SHIFT 0x18
++#define PA_SC_ENHANCE__OOO_DISABLE_EOPG_SKEW_THRESHOLD_SWITCHING__SHIFT 0x19
++#define PA_SC_ENHANCE__DISABLE_EOP_LINE_STIPPLE_RESET__SHIFT 0x1a
++#define PA_SC_ENHANCE__DISABLE_VPZ_EOP_LINE_STIPPLE_RESET__SHIFT 0x1b
++#define PA_SC_ENHANCE__IOO_DISABLE_SCAN_UNSELECTED_FIFOS_FOR_DUAL_GFX_RING_CHANGE__SHIFT 0x1c
++#define PA_SC_ENHANCE__OOO_USE_ABSOLUTE_FIFO_COUNT_IN_THRESHOLD_SWITCHING__SHIFT 0x1d
++#define PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK 0x00000001L
++#define PA_SC_ENHANCE__DISABLE_SC_DB_TILE_FIX_MASK 0x00000002L
++#define PA_SC_ENHANCE__DISABLE_AA_MASK_FULL_FIX_MASK 0x00000004L
++#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOCATIONS_MASK 0x00000008L
++#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOC_CENTROID_MASK 0x00000010L
++#define PA_SC_ENHANCE__DISABLE_SCISSOR_FIX_MASK 0x00000020L
++#define PA_SC_ENHANCE__SEND_UNLIT_STILES_TO_PACKER_MASK 0x00000040L
++#define PA_SC_ENHANCE__DISABLE_DUALGRAD_PERF_OPTIMIZATION_MASK 0x00000080L
++#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_PRIM_MASK 0x00000100L
++#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_SUPERTILE_MASK 0x00000200L
++#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_TILE_MASK 0x00000400L
++#define PA_SC_ENHANCE__DISABLE_PA_SC_GUIDANCE_MASK 0x00000800L
++#define PA_SC_ENHANCE__DISABLE_EOV_ALL_CTRL_ONLY_COMBINATIONS_MASK 0x00001000L
++#define PA_SC_ENHANCE__ENABLE_MULTICYCLE_BUBBLE_FREEZE_MASK 0x00002000L
++#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_PA_SC_GUIDANCE_MASK 0x00004000L
++#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_POLY_MODE_MASK 0x00008000L
++#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EOP_SYNC_NULL_PRIMS_LAST_MASK 0x00010000L
++#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_THRESHOLD_SWITCHING_MASK 0x00020000L
++#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_THRESHOLD_SWITCH_AT_EOPG_ONLY_MASK 0x00040000L
++#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_DESIRED_FIFO_EMPTY_SWITCHING_MASK 0x00080000L
++#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_SELECTED_FIFO_EMPTY_SWITCHING_MASK 0x00100000L
++#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EMPTY_SWITCHING_HYSTERYSIS_MASK 0x00200000L
++#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_DESIRED_FIFO_IS_NEXT_FEID_MASK 0x00400000L
++#define PA_SC_ENHANCE__DISABLE_OOO_NO_EOPG_SKEW_DESIRED_FIFO_IS_CURRENT_FIFO_MASK 0x00800000L
++#define PA_SC_ENHANCE__OOO_DISABLE_EOP_ON_FIRST_LIVE_PRIM_HIT_MASK 0x01000000L
++#define PA_SC_ENHANCE__OOO_DISABLE_EOPG_SKEW_THRESHOLD_SWITCHING_MASK 0x02000000L
++#define PA_SC_ENHANCE__DISABLE_EOP_LINE_STIPPLE_RESET_MASK 0x04000000L
++#define PA_SC_ENHANCE__DISABLE_VPZ_EOP_LINE_STIPPLE_RESET_MASK 0x08000000L
++#define PA_SC_ENHANCE__IOO_DISABLE_SCAN_UNSELECTED_FIFOS_FOR_DUAL_GFX_RING_CHANGE_MASK 0x10000000L
++#define PA_SC_ENHANCE__OOO_USE_ABSOLUTE_FIFO_COUNT_IN_THRESHOLD_SWITCHING_MASK 0x20000000L
++//PA_SC_ENHANCE_1
++#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE_ENABLE__SHIFT 0x0
++#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE__SHIFT 0x1
++#define PA_SC_ENHANCE_1__DISABLE_SC_BINNING__SHIFT 0x3
++#define PA_SC_ENHANCE_1__BYPASS_PBB__SHIFT 0x4
++#define PA_SC_ENHANCE_1__ECO_SPARE0__SHIFT 0x5
++#define PA_SC_ENHANCE_1__ECO_SPARE1__SHIFT 0x6
++#define PA_SC_ENHANCE_1__ECO_SPARE2__SHIFT 0x7
++#define PA_SC_ENHANCE_1__ECO_SPARE3__SHIFT 0x8
++#define PA_SC_ENHANCE_1__DISABLE_SC_PROCESS_RESET_PBB__SHIFT 0x9
++#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_OPT__SHIFT 0xa
++#define PA_SC_ENHANCE_1__ENABLE_DFSM_FLUSH_EVENT_TO_FLUSH_POPS_CAM__SHIFT 0xb
++#define PA_SC_ENHANCE_1__DISABLE_PACKER_GRAD_FDCE_ENHANCE__SHIFT 0xd
++#define PA_SC_ENHANCE_1__DISABLE_SC_DB_TILE_INTF_FINE_CLOCK_GATE__SHIFT 0xe
++#define PA_SC_ENHANCE_1__DISABLE_SC_PIPELINE_RESET_LEGACY_MODE_TRANSITION__SHIFT 0xf
++#define PA_SC_ENHANCE_1__DISABLE_PACKER_ODC_ENHANCE__SHIFT 0x10
++#define PA_SC_ENHANCE_1__ALLOW_SCALE_LINE_WIDTH_PAD_WITH_BINNING__SHIFT 0x11
++#define PA_SC_ENHANCE_1__OPTIMAL_BIN_SELECTION__SHIFT 0x12
++#define PA_SC_ENHANCE_1__DISABLE_FORCE_SOP_ALL_EVENTS__SHIFT 0x13
++#define PA_SC_ENHANCE_1__DISABLE_PBB_CLK_OPTIMIZATION__SHIFT 0x14
++#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_CLK_OPTIMIZATION__SHIFT 0x15
++#define PA_SC_ENHANCE_1__DISABLE_PBB_BINNING_CLK_OPTIMIZATION__SHIFT 0x16
++#define PA_SC_ENHANCE_1__RSVD__SHIFT 0x17
++#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE_ENABLE_MASK 0x00000001L
++#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE_MASK 0x00000006L
++#define PA_SC_ENHANCE_1__DISABLE_SC_BINNING_MASK 0x00000008L
++#define PA_SC_ENHANCE_1__BYPASS_PBB_MASK 0x00000010L
++#define PA_SC_ENHANCE_1__ECO_SPARE0_MASK 0x00000020L
++#define PA_SC_ENHANCE_1__ECO_SPARE1_MASK 0x00000040L
++#define PA_SC_ENHANCE_1__ECO_SPARE2_MASK 0x00000080L
++#define PA_SC_ENHANCE_1__ECO_SPARE3_MASK 0x00000100L
++#define PA_SC_ENHANCE_1__DISABLE_SC_PROCESS_RESET_PBB_MASK 0x00000200L
++#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_OPT_MASK 0x00000400L
++#define PA_SC_ENHANCE_1__ENABLE_DFSM_FLUSH_EVENT_TO_FLUSH_POPS_CAM_MASK 0x00000800L
++#define PA_SC_ENHANCE_1__DISABLE_PACKER_GRAD_FDCE_ENHANCE_MASK 0x00002000L
++#define PA_SC_ENHANCE_1__DISABLE_SC_DB_TILE_INTF_FINE_CLOCK_GATE_MASK 0x00004000L
++#define PA_SC_ENHANCE_1__DISABLE_SC_PIPELINE_RESET_LEGACY_MODE_TRANSITION_MASK 0x00008000L
++#define PA_SC_ENHANCE_1__DISABLE_PACKER_ODC_ENHANCE_MASK 0x00010000L
++#define PA_SC_ENHANCE_1__ALLOW_SCALE_LINE_WIDTH_PAD_WITH_BINNING_MASK 0x00020000L
++#define PA_SC_ENHANCE_1__OPTIMAL_BIN_SELECTION_MASK 0x00040000L
++#define PA_SC_ENHANCE_1__DISABLE_FORCE_SOP_ALL_EVENTS_MASK 0x00080000L
++#define PA_SC_ENHANCE_1__DISABLE_PBB_CLK_OPTIMIZATION_MASK 0x00100000L
++#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_CLK_OPTIMIZATION_MASK 0x00200000L
++#define PA_SC_ENHANCE_1__DISABLE_PBB_BINNING_CLK_OPTIMIZATION_MASK 0x00400000L
++#define PA_SC_ENHANCE_1__RSVD_MASK 0xFF800000L
++//PA_SC_DSM_CNTL
++#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_0__SHIFT 0x0
++#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_1__SHIFT 0x1
++#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_0_MASK 0x00000001L
++#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_1_MASK 0x00000002L
++//PA_SC_TILE_STEERING_CREST_OVERRIDE
++#define PA_SC_TILE_STEERING_CREST_OVERRIDE__ONE_RB_MODE_ENABLE__SHIFT 0x0
++#define PA_SC_TILE_STEERING_CREST_OVERRIDE__SE_SELECT__SHIFT 0x1
++#define PA_SC_TILE_STEERING_CREST_OVERRIDE__RB_SELECT__SHIFT 0x5
++#define PA_SC_TILE_STEERING_CREST_OVERRIDE__ONE_RB_MODE_ENABLE_MASK 0x00000001L
++#define PA_SC_TILE_STEERING_CREST_OVERRIDE__SE_SELECT_MASK 0x00000006L
++#define PA_SC_TILE_STEERING_CREST_OVERRIDE__RB_SELECT_MASK 0x00000060L
++
++
++// addressBlock: gc_sqdec
++//SQ_CONFIG
++#define SQ_CONFIG__UNUSED__SHIFT 0x0
++#define SQ_CONFIG__OVERRIDE_ALU_BUSY__SHIFT 0x7
++#define SQ_CONFIG__OVERRIDE_LDS_IDX_BUSY__SHIFT 0xb
++#define SQ_CONFIG__EARLY_TA_DONE_DISABLE__SHIFT 0xc
++#define SQ_CONFIG__DUA_FLAT_LOCK_ENABLE__SHIFT 0xd
++#define SQ_CONFIG__DUA_LDS_BYPASS_DISABLE__SHIFT 0xe
++#define SQ_CONFIG__DUA_FLAT_LDS_PINGPONG_DISABLE__SHIFT 0xf
++#define SQ_CONFIG__DISABLE_VMEM_SOFT_CLAUSE__SHIFT 0x10
++#define SQ_CONFIG__DISABLE_SMEM_SOFT_CLAUSE__SHIFT 0x11
++#define SQ_CONFIG__ENABLE_HIPRIO_ON_EXP_RDY_VS__SHIFT 0x12
++#define SQ_CONFIG__PRIO_VAL_ON_EXP_RDY_VS__SHIFT 0x13
++#define SQ_CONFIG__REPLAY_SLEEP_CNT__SHIFT 0x15
++#define SQ_CONFIG__DISABLE_SP_VGPR_WRITE_SKIP__SHIFT 0x1c
++#define SQ_CONFIG__DISABLE_SP_REDUNDANT_THREAD_GATING__SHIFT 0x1d
++#define SQ_CONFIG__DISABLE_FLAT_SOFT_CLAUSE__SHIFT 0x1e
++#define SQ_CONFIG__DISABLE_MIMG_SOFT_CLAUSE__SHIFT 0x1f
++#define SQ_CONFIG__UNUSED_MASK 0x0000007FL
++#define SQ_CONFIG__OVERRIDE_ALU_BUSY_MASK 0x00000080L
++#define SQ_CONFIG__OVERRIDE_LDS_IDX_BUSY_MASK 0x00000800L
++#define SQ_CONFIG__EARLY_TA_DONE_DISABLE_MASK 0x00001000L
++#define SQ_CONFIG__DUA_FLAT_LOCK_ENABLE_MASK 0x00002000L
++#define SQ_CONFIG__DUA_LDS_BYPASS_DISABLE_MASK 0x00004000L
++#define SQ_CONFIG__DUA_FLAT_LDS_PINGPONG_DISABLE_MASK 0x00008000L
++#define SQ_CONFIG__DISABLE_VMEM_SOFT_CLAUSE_MASK 0x00010000L
++#define SQ_CONFIG__DISABLE_SMEM_SOFT_CLAUSE_MASK 0x00020000L
++#define SQ_CONFIG__ENABLE_HIPRIO_ON_EXP_RDY_VS_MASK 0x00040000L
++#define SQ_CONFIG__PRIO_VAL_ON_EXP_RDY_VS_MASK 0x00180000L
++#define SQ_CONFIG__REPLAY_SLEEP_CNT_MASK 0x0FE00000L
++#define SQ_CONFIG__DISABLE_SP_VGPR_WRITE_SKIP_MASK 0x10000000L
++#define SQ_CONFIG__DISABLE_SP_REDUNDANT_THREAD_GATING_MASK 0x20000000L
++#define SQ_CONFIG__DISABLE_FLAT_SOFT_CLAUSE_MASK 0x40000000L
++#define SQ_CONFIG__DISABLE_MIMG_SOFT_CLAUSE_MASK 0x80000000L
++//SQC_CONFIG
++#define SQC_CONFIG__INST_CACHE_SIZE__SHIFT 0x0
++#define SQC_CONFIG__DATA_CACHE_SIZE__SHIFT 0x2
++#define SQC_CONFIG__MISS_FIFO_DEPTH__SHIFT 0x4
++#define SQC_CONFIG__HIT_FIFO_DEPTH__SHIFT 0x6
++#define SQC_CONFIG__FORCE_ALWAYS_MISS__SHIFT 0x7
++#define SQC_CONFIG__FORCE_IN_ORDER__SHIFT 0x8
++#define SQC_CONFIG__IDENTITY_HASH_BANK__SHIFT 0x9
++#define SQC_CONFIG__IDENTITY_HASH_SET__SHIFT 0xa
++#define SQC_CONFIG__PER_VMID_INV_DISABLE__SHIFT 0xb
++#define SQC_CONFIG__EVICT_LRU__SHIFT 0xc
++#define SQC_CONFIG__FORCE_2_BANK__SHIFT 0xe
++#define SQC_CONFIG__FORCE_1_BANK__SHIFT 0xf
++#define SQC_CONFIG__LS_DISABLE_CLOCKS__SHIFT 0x10
++#define SQC_CONFIG__INST_PRF_COUNT__SHIFT 0x18
++#define SQC_CONFIG__INST_PRF_FILTER_DIS__SHIFT 0x1a
++#define SQC_CONFIG__INST_CACHE_SIZE_MASK 0x00000003L
++#define SQC_CONFIG__DATA_CACHE_SIZE_MASK 0x0000000CL
++#define SQC_CONFIG__MISS_FIFO_DEPTH_MASK 0x00000030L
++#define SQC_CONFIG__HIT_FIFO_DEPTH_MASK 0x00000040L
++#define SQC_CONFIG__FORCE_ALWAYS_MISS_MASK 0x00000080L
++#define SQC_CONFIG__FORCE_IN_ORDER_MASK 0x00000100L
++#define SQC_CONFIG__IDENTITY_HASH_BANK_MASK 0x00000200L
++#define SQC_CONFIG__IDENTITY_HASH_SET_MASK 0x00000400L
++#define SQC_CONFIG__PER_VMID_INV_DISABLE_MASK 0x00000800L
++#define SQC_CONFIG__EVICT_LRU_MASK 0x00003000L
++#define SQC_CONFIG__FORCE_2_BANK_MASK 0x00004000L
++#define SQC_CONFIG__FORCE_1_BANK_MASK 0x00008000L
++#define SQC_CONFIG__LS_DISABLE_CLOCKS_MASK 0x00FF0000L
++#define SQC_CONFIG__INST_PRF_COUNT_MASK 0x03000000L
++#define SQC_CONFIG__INST_PRF_FILTER_DIS_MASK 0x04000000L
++//LDS_CONFIG
++#define LDS_CONFIG__ADDR_OUT_OF_RANGE_REPORTING__SHIFT 0x0
++#define LDS_CONFIG__ADDR_OUT_OF_RANGE_REPORTING_MASK 0x00000001L
++//SQ_RANDOM_WAVE_PRI
++#define SQ_RANDOM_WAVE_PRI__RET__SHIFT 0x0
++#define SQ_RANDOM_WAVE_PRI__RUI__SHIFT 0x7
++#define SQ_RANDOM_WAVE_PRI__RNG__SHIFT 0xa
++#define SQ_RANDOM_WAVE_PRI__RET_MASK 0x0000007FL
++#define SQ_RANDOM_WAVE_PRI__RUI_MASK 0x00000380L
++#define SQ_RANDOM_WAVE_PRI__RNG_MASK 0x007FFC00L
++//SQ_REG_CREDITS
++#define SQ_REG_CREDITS__SRBM_CREDITS__SHIFT 0x0
++#define SQ_REG_CREDITS__CMD_CREDITS__SHIFT 0x8
++#define SQ_REG_CREDITS__REG_BUSY__SHIFT 0x1c
++#define SQ_REG_CREDITS__SRBM_OVERFLOW__SHIFT 0x1d
++#define SQ_REG_CREDITS__IMMED_OVERFLOW__SHIFT 0x1e
++#define SQ_REG_CREDITS__CMD_OVERFLOW__SHIFT 0x1f
++#define SQ_REG_CREDITS__SRBM_CREDITS_MASK 0x0000003FL
++#define SQ_REG_CREDITS__CMD_CREDITS_MASK 0x00000F00L
++#define SQ_REG_CREDITS__REG_BUSY_MASK 0x10000000L
++#define SQ_REG_CREDITS__SRBM_OVERFLOW_MASK 0x20000000L
++#define SQ_REG_CREDITS__IMMED_OVERFLOW_MASK 0x40000000L
++#define SQ_REG_CREDITS__CMD_OVERFLOW_MASK 0x80000000L
++//SQ_FIFO_SIZES
++#define SQ_FIFO_SIZES__INTERRUPT_FIFO_SIZE__SHIFT 0x0
++#define SQ_FIFO_SIZES__TTRACE_FIFO_SIZE__SHIFT 0x8
++#define SQ_FIFO_SIZES__EXPORT_BUF_SIZE__SHIFT 0x10
++#define SQ_FIFO_SIZES__VMEM_DATA_FIFO_SIZE__SHIFT 0x12
++#define SQ_FIFO_SIZES__INTERRUPT_FIFO_SIZE_MASK 0x0000000FL
++#define SQ_FIFO_SIZES__TTRACE_FIFO_SIZE_MASK 0x00000F00L
++#define SQ_FIFO_SIZES__EXPORT_BUF_SIZE_MASK 0x00030000L
++#define SQ_FIFO_SIZES__VMEM_DATA_FIFO_SIZE_MASK 0x000C0000L
++//SQ_DSM_CNTL
++#define SQ_DSM_CNTL__WAVEFRONT_STALL_0__SHIFT 0x0
++#define SQ_DSM_CNTL__WAVEFRONT_STALL_1__SHIFT 0x1
++#define SQ_DSM_CNTL__SPI_BACKPRESSURE_0__SHIFT 0x2
++#define SQ_DSM_CNTL__SPI_BACKPRESSURE_1__SHIFT 0x3
++#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA0__SHIFT 0x8
++#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA1__SHIFT 0x9
++#define SQ_DSM_CNTL__SGPR_ENABLE_SINGLE_WRITE__SHIFT 0xa
++#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA0__SHIFT 0x10
++#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA1__SHIFT 0x11
++#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE01__SHIFT 0x12
++#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA2__SHIFT 0x13
++#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA3__SHIFT 0x14
++#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE23__SHIFT 0x15
++#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA0__SHIFT 0x18
++#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA1__SHIFT 0x19
++#define SQ_DSM_CNTL__SP_ENABLE_SINGLE_WRITE__SHIFT 0x1a
++#define SQ_DSM_CNTL__WAVEFRONT_STALL_0_MASK 0x00000001L
++#define SQ_DSM_CNTL__WAVEFRONT_STALL_1_MASK 0x00000002L
++#define SQ_DSM_CNTL__SPI_BACKPRESSURE_0_MASK 0x00000004L
++#define SQ_DSM_CNTL__SPI_BACKPRESSURE_1_MASK 0x00000008L
++#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA0_MASK 0x00000100L
++#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA1_MASK 0x00000200L
++#define SQ_DSM_CNTL__SGPR_ENABLE_SINGLE_WRITE_MASK 0x00000400L
++#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA0_MASK 0x00010000L
++#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA1_MASK 0x00020000L
++#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE01_MASK 0x00040000L
++#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA2_MASK 0x00080000L
++#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA3_MASK 0x00100000L
++#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE23_MASK 0x00200000L
++#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA0_MASK 0x01000000L
++#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA1_MASK 0x02000000L
++#define SQ_DSM_CNTL__SP_ENABLE_SINGLE_WRITE_MASK 0x04000000L
++//SQ_DSM_CNTL2
++#define SQ_DSM_CNTL2__SGPR_ENABLE_ERROR_INJECT__SHIFT 0x0
++#define SQ_DSM_CNTL2__SGPR_SELECT_INJECT_DELAY__SHIFT 0x2
++#define SQ_DSM_CNTL2__LDS_D_ENABLE_ERROR_INJECT__SHIFT 0x3
++#define SQ_DSM_CNTL2__LDS_D_SELECT_INJECT_DELAY__SHIFT 0x5
++#define SQ_DSM_CNTL2__LDS_I_ENABLE_ERROR_INJECT__SHIFT 0x6
++#define SQ_DSM_CNTL2__LDS_I_SELECT_INJECT_DELAY__SHIFT 0x8
++#define SQ_DSM_CNTL2__SP_ENABLE_ERROR_INJECT__SHIFT 0x9
++#define SQ_DSM_CNTL2__SP_SELECT_INJECT_DELAY__SHIFT 0xb
++#define SQ_DSM_CNTL2__LDS_INJECT_DELAY__SHIFT 0xe
++#define SQ_DSM_CNTL2__SP_INJECT_DELAY__SHIFT 0x14
++#define SQ_DSM_CNTL2__SQ_INJECT_DELAY__SHIFT 0x1a
++#define SQ_DSM_CNTL2__SGPR_ENABLE_ERROR_INJECT_MASK 0x00000003L
++#define SQ_DSM_CNTL2__SGPR_SELECT_INJECT_DELAY_MASK 0x00000004L
++#define SQ_DSM_CNTL2__LDS_D_ENABLE_ERROR_INJECT_MASK 0x00000018L
++#define SQ_DSM_CNTL2__LDS_D_SELECT_INJECT_DELAY_MASK 0x00000020L
++#define SQ_DSM_CNTL2__LDS_I_ENABLE_ERROR_INJECT_MASK 0x000000C0L
++#define SQ_DSM_CNTL2__LDS_I_SELECT_INJECT_DELAY_MASK 0x00000100L
++#define SQ_DSM_CNTL2__SP_ENABLE_ERROR_INJECT_MASK 0x00000600L
++#define SQ_DSM_CNTL2__SP_SELECT_INJECT_DELAY_MASK 0x00000800L
++#define SQ_DSM_CNTL2__LDS_INJECT_DELAY_MASK 0x000FC000L
++#define SQ_DSM_CNTL2__SP_INJECT_DELAY_MASK 0x03F00000L
++#define SQ_DSM_CNTL2__SQ_INJECT_DELAY_MASK 0xFC000000L
++//SQ_RUNTIME_CONFIG
++#define SQ_RUNTIME_CONFIG__ENABLE_TEX_ARB_OLDEST__SHIFT 0x0
++#define SQ_RUNTIME_CONFIG__ENABLE_TEX_ARB_OLDEST_MASK 0x00000001L
++//SH_MEM_BASES
++#define SH_MEM_BASES__PRIVATE_BASE__SHIFT 0x0
++#define SH_MEM_BASES__SHARED_BASE__SHIFT 0x10
++#define SH_MEM_BASES__PRIVATE_BASE_MASK 0x0000FFFFL
++#define SH_MEM_BASES__SHARED_BASE_MASK 0xFFFF0000L
++//SH_MEM_CONFIG
++#define SH_MEM_CONFIG__ADDRESS_MODE__SHIFT 0x0
++#define SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT 0x3
++#define SH_MEM_CONFIG__RETRY_DISABLE__SHIFT 0xc
++#define SH_MEM_CONFIG__PRIVATE_NV__SHIFT 0xd
++#define SH_MEM_CONFIG__ADDRESS_MODE_MASK 0x00000001L
++#define SH_MEM_CONFIG__ALIGNMENT_MODE_MASK 0x00000018L
++#define SH_MEM_CONFIG__RETRY_DISABLE_MASK 0x00001000L
++#define SH_MEM_CONFIG__PRIVATE_NV_MASK 0x00002000L
++//CC_GC_SHADER_RATE_CONFIG
++#define CC_GC_SHADER_RATE_CONFIG__DPFP_RATE__SHIFT 0x1
++#define CC_GC_SHADER_RATE_CONFIG__SQC_BALANCE_DISABLE__SHIFT 0x3
++#define CC_GC_SHADER_RATE_CONFIG__HALF_LDS__SHIFT 0x4
++#define CC_GC_SHADER_RATE_CONFIG__DPFP_RATE_MASK 0x00000006L
++#define CC_GC_SHADER_RATE_CONFIG__SQC_BALANCE_DISABLE_MASK 0x00000008L
++#define CC_GC_SHADER_RATE_CONFIG__HALF_LDS_MASK 0x00000010L
++//GC_USER_SHADER_RATE_CONFIG
++#define GC_USER_SHADER_RATE_CONFIG__DPFP_RATE__SHIFT 0x1
++#define GC_USER_SHADER_RATE_CONFIG__SQC_BALANCE_DISABLE__SHIFT 0x3
++#define GC_USER_SHADER_RATE_CONFIG__HALF_LDS__SHIFT 0x4
++#define GC_USER_SHADER_RATE_CONFIG__DPFP_RATE_MASK 0x00000006L
++#define GC_USER_SHADER_RATE_CONFIG__SQC_BALANCE_DISABLE_MASK 0x00000008L
++#define GC_USER_SHADER_RATE_CONFIG__HALF_LDS_MASK 0x00000010L
++//SQ_INTERRUPT_AUTO_MASK
++#define SQ_INTERRUPT_AUTO_MASK__MASK__SHIFT 0x0
++#define SQ_INTERRUPT_AUTO_MASK__MASK_MASK 0x00FFFFFFL
++//SQ_INTERRUPT_MSG_CTRL
++#define SQ_INTERRUPT_MSG_CTRL__STALL__SHIFT 0x0
++#define SQ_INTERRUPT_MSG_CTRL__STALL_MASK 0x00000001L
++//SQ_UTCL1_CNTL1
++#define SQ_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
++#define SQ_UTCL1_CNTL1__GPUVM_64K_DEF__SHIFT 0x1
++#define SQ_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
++#define SQ_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
++#define SQ_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
++#define SQ_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
++#define SQ_UTCL1_CNTL1__USERVM_DIS__SHIFT 0x10
++#define SQ_UTCL1_CNTL1__ENABLE_PUSH_LFIFO__SHIFT 0x11
++#define SQ_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB__SHIFT 0x12
++#define SQ_UTCL1_CNTL1__REG_INVALIDATE_VMID__SHIFT 0x13
++#define SQ_UTCL1_CNTL1__REG_INVALIDATE_ALL_VMID__SHIFT 0x17
++#define SQ_UTCL1_CNTL1__REG_INVALIDATE_TOGGLE__SHIFT 0x18
++#define SQ_UTCL1_CNTL1__REG_INVALIDATE_ALL__SHIFT 0x19
++#define SQ_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
++#define SQ_UTCL1_CNTL1__FORCE_IN_ORDER__SHIFT 0x1b
++#define SQ_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
++#define SQ_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
++#define SQ_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
++#define SQ_UTCL1_CNTL1__GPUVM_64K_DEF_MASK 0x00000002L
++#define SQ_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
++#define SQ_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
++#define SQ_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
++#define SQ_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
++#define SQ_UTCL1_CNTL1__USERVM_DIS_MASK 0x00010000L
++#define SQ_UTCL1_CNTL1__ENABLE_PUSH_LFIFO_MASK 0x00020000L
++#define SQ_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB_MASK 0x00040000L
++#define SQ_UTCL1_CNTL1__REG_INVALIDATE_VMID_MASK 0x00780000L
++#define SQ_UTCL1_CNTL1__REG_INVALIDATE_ALL_VMID_MASK 0x00800000L
++#define SQ_UTCL1_CNTL1__REG_INVALIDATE_TOGGLE_MASK 0x01000000L
++#define SQ_UTCL1_CNTL1__REG_INVALIDATE_ALL_MASK 0x02000000L
++#define SQ_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
++#define SQ_UTCL1_CNTL1__FORCE_IN_ORDER_MASK 0x08000000L
++#define SQ_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
++#define SQ_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
++//SQ_UTCL1_CNTL2
++#define SQ_UTCL1_CNTL2__SPARE__SHIFT 0x0
++#define SQ_UTCL1_CNTL2__LFIFO_SCAN_DISABLE__SHIFT 0x8
++#define SQ_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
++#define SQ_UTCL1_CNTL2__LINE_VALID__SHIFT 0xa
++#define SQ_UTCL1_CNTL2__DIS_EDC__SHIFT 0xb
++#define SQ_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
++#define SQ_UTCL1_CNTL2__SHOOTDOWN_OPT__SHIFT 0xd
++#define SQ_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
++#define SQ_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
++#define SQ_UTCL1_CNTL2__RETRY_TIMER__SHIFT 0x10
++#define SQ_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
++#define SQ_UTCL1_CNTL2__PREFETCH_PAGE__SHIFT 0x1c
++#define SQ_UTCL1_CNTL2__SPARE_MASK 0x000000FFL
++#define SQ_UTCL1_CNTL2__LFIFO_SCAN_DISABLE_MASK 0x00000100L
++#define SQ_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
++#define SQ_UTCL1_CNTL2__LINE_VALID_MASK 0x00000400L
++#define SQ_UTCL1_CNTL2__DIS_EDC_MASK 0x00000800L
++#define SQ_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
++#define SQ_UTCL1_CNTL2__SHOOTDOWN_OPT_MASK 0x00002000L
++#define SQ_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
++#define SQ_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
++#define SQ_UTCL1_CNTL2__RETRY_TIMER_MASK 0x007F0000L
++#define SQ_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
++#define SQ_UTCL1_CNTL2__PREFETCH_PAGE_MASK 0xF0000000L
++//SQ_UTCL1_STATUS
++#define SQ_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
++#define SQ_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
++#define SQ_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
++#define SQ_UTCL1_STATUS__RESERVED__SHIFT 0x3
++#define SQ_UTCL1_STATUS__UNUSED__SHIFT 0x10
++#define SQ_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
++#define SQ_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
++#define SQ_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
++#define SQ_UTCL1_STATUS__RESERVED_MASK 0x0000FFF8L
++#define SQ_UTCL1_STATUS__UNUSED_MASK 0xFFFF0000L
++//SQ_SHADER_TBA_LO
++#define SQ_SHADER_TBA_LO__ADDR_LO__SHIFT 0x0
++#define SQ_SHADER_TBA_LO__ADDR_LO_MASK 0xFFFFFFFFL
++//SQ_SHADER_TBA_HI
++#define SQ_SHADER_TBA_HI__ADDR_HI__SHIFT 0x0
++#define SQ_SHADER_TBA_HI__ADDR_HI_MASK 0x000000FFL
++//SQ_SHADER_TMA_LO
++#define SQ_SHADER_TMA_LO__ADDR_LO__SHIFT 0x0
++#define SQ_SHADER_TMA_LO__ADDR_LO_MASK 0xFFFFFFFFL
++//SQ_SHADER_TMA_HI
++#define SQ_SHADER_TMA_HI__ADDR_HI__SHIFT 0x0
++#define SQ_SHADER_TMA_HI__ADDR_HI_MASK 0x000000FFL
++//SQC_DSM_CNTL
++#define SQC_DSM_CNTL__INST_UTCL1_LFIFO_DSM_IRRITATOR_DATA__SHIFT 0x0
++#define SQC_DSM_CNTL__INST_UTCL1_LFIFO_ENABLE_SINGLE_WRITE__SHIFT 0x2
++#define SQC_DSM_CNTL__DATA_CU0_WRITE_DATA_BUF_DSM_IRRITATOR_DATA__SHIFT 0x3
++#define SQC_DSM_CNTL__DATA_CU0_WRITE_DATA_BUF_ENABLE_SINGLE_WRITE__SHIFT 0x5
++#define SQC_DSM_CNTL__DATA_CU0_UTCL1_LFIFO_DSM_IRRITATOR_DATA__SHIFT 0x6
++#define SQC_DSM_CNTL__DATA_CU0_UTCL1_LFIFO_ENABLE_SINGLE_WRITE__SHIFT 0x8
++#define SQC_DSM_CNTL__DATA_CU1_WRITE_DATA_BUF_DSM_IRRITATOR_DATA__SHIFT 0x9
++#define SQC_DSM_CNTL__DATA_CU1_WRITE_DATA_BUF_ENABLE_SINGLE_WRITE__SHIFT 0xb
++#define SQC_DSM_CNTL__DATA_CU1_UTCL1_LFIFO_DSM_IRRITATOR_DATA__SHIFT 0xc
++#define SQC_DSM_CNTL__DATA_CU1_UTCL1_LFIFO_ENABLE_SINGLE_WRITE__SHIFT 0xe
++#define SQC_DSM_CNTL__DATA_CU2_WRITE_DATA_BUF_DSM_IRRITATOR_DATA__SHIFT 0xf
++#define SQC_DSM_CNTL__DATA_CU2_WRITE_DATA_BUF_ENABLE_SINGLE_WRITE__SHIFT 0x11
++#define SQC_DSM_CNTL__DATA_CU2_UTCL1_LFIFO_DSM_IRRITATOR_DATA__SHIFT 0x12
++#define SQC_DSM_CNTL__DATA_CU2_UTCL1_LFIFO_ENABLE_SINGLE_WRITE__SHIFT 0x14
++#define SQC_DSM_CNTL__INST_UTCL1_LFIFO_DSM_IRRITATOR_DATA_MASK 0x00000003L
++#define SQC_DSM_CNTL__INST_UTCL1_LFIFO_ENABLE_SINGLE_WRITE_MASK 0x00000004L
++#define SQC_DSM_CNTL__DATA_CU0_WRITE_DATA_BUF_DSM_IRRITATOR_DATA_MASK 0x00000018L
++#define SQC_DSM_CNTL__DATA_CU0_WRITE_DATA_BUF_ENABLE_SINGLE_WRITE_MASK 0x00000020L
++#define SQC_DSM_CNTL__DATA_CU0_UTCL1_LFIFO_DSM_IRRITATOR_DATA_MASK 0x000000C0L
++#define SQC_DSM_CNTL__DATA_CU0_UTCL1_LFIFO_ENABLE_SINGLE_WRITE_MASK 0x00000100L
++#define SQC_DSM_CNTL__DATA_CU1_WRITE_DATA_BUF_DSM_IRRITATOR_DATA_MASK 0x00000600L
++#define SQC_DSM_CNTL__DATA_CU1_WRITE_DATA_BUF_ENABLE_SINGLE_WRITE_MASK 0x00000800L
++#define SQC_DSM_CNTL__DATA_CU1_UTCL1_LFIFO_DSM_IRRITATOR_DATA_MASK 0x00003000L
++#define SQC_DSM_CNTL__DATA_CU1_UTCL1_LFIFO_ENABLE_SINGLE_WRITE_MASK 0x00004000L
++#define SQC_DSM_CNTL__DATA_CU2_WRITE_DATA_BUF_DSM_IRRITATOR_DATA_MASK 0x00018000L
++#define SQC_DSM_CNTL__DATA_CU2_WRITE_DATA_BUF_ENABLE_SINGLE_WRITE_MASK 0x00020000L
++#define SQC_DSM_CNTL__DATA_CU2_UTCL1_LFIFO_DSM_IRRITATOR_DATA_MASK 0x000C0000L
++#define SQC_DSM_CNTL__DATA_CU2_UTCL1_LFIFO_ENABLE_SINGLE_WRITE_MASK 0x00100000L
++//SQC_DSM_CNTLA
++#define SQC_DSM_CNTLA__INST_TAG_RAM_DSM_IRRITATOR_DATA__SHIFT 0x0
++#define SQC_DSM_CNTLA__INST_TAG_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x2
++#define SQC_DSM_CNTLA__INST_UTCL1_MISS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x3
++#define SQC_DSM_CNTLA__INST_UTCL1_MISS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x5
++#define SQC_DSM_CNTLA__INST_MISS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x6
++#define SQC_DSM_CNTLA__INST_MISS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x8
++#define SQC_DSM_CNTLA__INST_BANK_RAM_DSM_IRRITATOR_DATA__SHIFT 0x9
++#define SQC_DSM_CNTLA__INST_BANK_RAM_ENABLE_SINGLE_WRITE__SHIFT 0xb
++#define SQC_DSM_CNTLA__DATA_TAG_RAM_DSM_IRRITATOR_DATA__SHIFT 0xc
++#define SQC_DSM_CNTLA__DATA_TAG_RAM_ENABLE_SINGLE_WRITE__SHIFT 0xe
++#define SQC_DSM_CNTLA__DATA_HIT_FIFO_DSM_IRRITATOR_DATA__SHIFT 0xf
++#define SQC_DSM_CNTLA__DATA_HIT_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x11
++#define SQC_DSM_CNTLA__DATA_MISS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x12
++#define SQC_DSM_CNTLA__DATA_MISS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x14
++#define SQC_DSM_CNTLA__DATA_DIRTY_BIT_RAM_DSM_IRRITATOR_DATA__SHIFT 0x15
++#define SQC_DSM_CNTLA__DATA_DIRTY_BIT_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x17
++#define SQC_DSM_CNTLA__DATA_BANK_RAM_DSM_IRRITATOR_DATA__SHIFT 0x18
++#define SQC_DSM_CNTLA__DATA_BANK_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x1a
++#define SQC_DSM_CNTLA__INST_TAG_RAM_DSM_IRRITATOR_DATA_MASK 0x00000003L
++#define SQC_DSM_CNTLA__INST_TAG_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
++#define SQC_DSM_CNTLA__INST_UTCL1_MISS_FIFO_DSM_IRRITATOR_DATA_MASK 0x00000018L
++#define SQC_DSM_CNTLA__INST_UTCL1_MISS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00000020L
++#define SQC_DSM_CNTLA__INST_MISS_FIFO_DSM_IRRITATOR_DATA_MASK 0x000000C0L
++#define SQC_DSM_CNTLA__INST_MISS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00000100L
++#define SQC_DSM_CNTLA__INST_BANK_RAM_DSM_IRRITATOR_DATA_MASK 0x00000600L
++#define SQC_DSM_CNTLA__INST_BANK_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
++#define SQC_DSM_CNTLA__DATA_TAG_RAM_DSM_IRRITATOR_DATA_MASK 0x00003000L
++#define SQC_DSM_CNTLA__DATA_TAG_RAM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
++#define SQC_DSM_CNTLA__DATA_HIT_FIFO_DSM_IRRITATOR_DATA_MASK 0x00018000L
++#define SQC_DSM_CNTLA__DATA_HIT_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00020000L
++#define SQC_DSM_CNTLA__DATA_MISS_FIFO_DSM_IRRITATOR_DATA_MASK 0x000C0000L
++#define SQC_DSM_CNTLA__DATA_MISS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00100000L
++#define SQC_DSM_CNTLA__DATA_DIRTY_BIT_RAM_DSM_IRRITATOR_DATA_MASK 0x00600000L
++#define SQC_DSM_CNTLA__DATA_DIRTY_BIT_RAM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
++#define SQC_DSM_CNTLA__DATA_BANK_RAM_DSM_IRRITATOR_DATA_MASK 0x03000000L
++#define SQC_DSM_CNTLA__DATA_BANK_RAM_ENABLE_SINGLE_WRITE_MASK 0x04000000L
++//SQC_DSM_CNTLB
++#define SQC_DSM_CNTLB__INST_TAG_RAM_DSM_IRRITATOR_DATA__SHIFT 0x0
++#define SQC_DSM_CNTLB__INST_TAG_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x2
++#define SQC_DSM_CNTLB__INST_UTCL1_MISS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x3
++#define SQC_DSM_CNTLB__INST_UTCL1_MISS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x5
++#define SQC_DSM_CNTLB__INST_MISS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x6
++#define SQC_DSM_CNTLB__INST_MISS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x8
++#define SQC_DSM_CNTLB__INST_BANK_RAM_DSM_IRRITATOR_DATA__SHIFT 0x9
++#define SQC_DSM_CNTLB__INST_BANK_RAM_ENABLE_SINGLE_WRITE__SHIFT 0xb
++#define SQC_DSM_CNTLB__DATA_TAG_RAM_DSM_IRRITATOR_DATA__SHIFT 0xc
++#define SQC_DSM_CNTLB__DATA_TAG_RAM_ENABLE_SINGLE_WRITE__SHIFT 0xe
++#define SQC_DSM_CNTLB__DATA_HIT_FIFO_DSM_IRRITATOR_DATA__SHIFT 0xf
++#define SQC_DSM_CNTLB__DATA_HIT_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x11
++#define SQC_DSM_CNTLB__DATA_MISS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x12
++#define SQC_DSM_CNTLB__DATA_MISS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x14
++#define SQC_DSM_CNTLB__DATA_DIRTY_BIT_RAM_DSM_IRRITATOR_DATA__SHIFT 0x15
++#define SQC_DSM_CNTLB__DATA_DIRTY_BIT_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x17
++#define SQC_DSM_CNTLB__DATA_BANK_RAM_DSM_IRRITATOR_DATA__SHIFT 0x18
++#define SQC_DSM_CNTLB__DATA_BANK_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x1a
++#define SQC_DSM_CNTLB__INST_TAG_RAM_DSM_IRRITATOR_DATA_MASK 0x00000003L
++#define SQC_DSM_CNTLB__INST_TAG_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
++#define SQC_DSM_CNTLB__INST_UTCL1_MISS_FIFO_DSM_IRRITATOR_DATA_MASK 0x00000018L
++#define SQC_DSM_CNTLB__INST_UTCL1_MISS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00000020L
++#define SQC_DSM_CNTLB__INST_MISS_FIFO_DSM_IRRITATOR_DATA_MASK 0x000000C0L
++#define SQC_DSM_CNTLB__INST_MISS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00000100L
++#define SQC_DSM_CNTLB__INST_BANK_RAM_DSM_IRRITATOR_DATA_MASK 0x00000600L
++#define SQC_DSM_CNTLB__INST_BANK_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
++#define SQC_DSM_CNTLB__DATA_TAG_RAM_DSM_IRRITATOR_DATA_MASK 0x00003000L
++#define SQC_DSM_CNTLB__DATA_TAG_RAM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
++#define SQC_DSM_CNTLB__DATA_HIT_FIFO_DSM_IRRITATOR_DATA_MASK 0x00018000L
++#define SQC_DSM_CNTLB__DATA_HIT_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00020000L
++#define SQC_DSM_CNTLB__DATA_MISS_FIFO_DSM_IRRITATOR_DATA_MASK 0x000C0000L
++#define SQC_DSM_CNTLB__DATA_MISS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00100000L
++#define SQC_DSM_CNTLB__DATA_DIRTY_BIT_RAM_DSM_IRRITATOR_DATA_MASK 0x00600000L
++#define SQC_DSM_CNTLB__DATA_DIRTY_BIT_RAM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
++#define SQC_DSM_CNTLB__DATA_BANK_RAM_DSM_IRRITATOR_DATA_MASK 0x03000000L
++#define SQC_DSM_CNTLB__DATA_BANK_RAM_ENABLE_SINGLE_WRITE_MASK 0x04000000L
++//SQC_DSM_CNTL2
++#define SQC_DSM_CNTL2__INST_UTCL1_LFIFO_ENABLE_ERROR_INJECT__SHIFT 0x0
++#define SQC_DSM_CNTL2__INST_UTCL1_LFIFO_SELECT_INJECT_DELAY__SHIFT 0x2
++#define SQC_DSM_CNTL2__DATA_CU0_WRITE_DATA_BUF_ENABLE_ERROR_INJECT__SHIFT 0x3
++#define SQC_DSM_CNTL2__DATA_CU0_WRITE_DATA_BUF_SELECT_INJECT_DELAY__SHIFT 0x5
++#define SQC_DSM_CNTL2__DATA_CU0_UTCL1_LFIFO_ENABLE_ERROR_INJECT__SHIFT 0x6
++#define SQC_DSM_CNTL2__DATA_CU0_UTCL1_LFIFO_SELECT_INJECT_DELAY__SHIFT 0x8
++#define SQC_DSM_CNTL2__DATA_CU1_WRITE_DATA_BUF_ENABLE_ERROR_INJECT__SHIFT 0x9
++#define SQC_DSM_CNTL2__DATA_CU1_WRITE_DATA_BUF_SELECT_INJECT_DELAY__SHIFT 0xb
++#define SQC_DSM_CNTL2__DATA_CU1_UTCL1_LFIFO_ENABLE_ERROR_INJECT__SHIFT 0xc
++#define SQC_DSM_CNTL2__DATA_CU1_UTCL1_LFIFO_SELECT_INJECT_DELAY__SHIFT 0xe
++#define SQC_DSM_CNTL2__DATA_CU2_WRITE_DATA_BUF_ENABLE_ERROR_INJECT__SHIFT 0xf
++#define SQC_DSM_CNTL2__DATA_CU2_WRITE_DATA_BUF_SELECT_INJECT_DELAY__SHIFT 0x11
++#define SQC_DSM_CNTL2__DATA_CU2_UTCL1_LFIFO_ENABLE_ERROR_INJECT__SHIFT 0x12
++#define SQC_DSM_CNTL2__DATA_CU2_UTCL1_LFIFO_SELECT_INJECT_DELAY__SHIFT 0x14
++#define SQC_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
++#define SQC_DSM_CNTL2__INST_UTCL1_LFIFO_ENABLE_ERROR_INJECT_MASK 0x00000003L
++#define SQC_DSM_CNTL2__INST_UTCL1_LFIFO_SELECT_INJECT_DELAY_MASK 0x00000004L
++#define SQC_DSM_CNTL2__DATA_CU0_WRITE_DATA_BUF_ENABLE_ERROR_INJECT_MASK 0x00000018L
++#define SQC_DSM_CNTL2__DATA_CU0_WRITE_DATA_BUF_SELECT_INJECT_DELAY_MASK 0x00000020L
++#define SQC_DSM_CNTL2__DATA_CU0_UTCL1_LFIFO_ENABLE_ERROR_INJECT_MASK 0x000000C0L
++#define SQC_DSM_CNTL2__DATA_CU0_UTCL1_LFIFO_SELECT_INJECT_DELAY_MASK 0x00000100L
++#define SQC_DSM_CNTL2__DATA_CU1_WRITE_DATA_BUF_ENABLE_ERROR_INJECT_MASK 0x00000600L
++#define SQC_DSM_CNTL2__DATA_CU1_WRITE_DATA_BUF_SELECT_INJECT_DELAY_MASK 0x00000800L
++#define SQC_DSM_CNTL2__DATA_CU1_UTCL1_LFIFO_ENABLE_ERROR_INJECT_MASK 0x00003000L
++#define SQC_DSM_CNTL2__DATA_CU1_UTCL1_LFIFO_SELECT_INJECT_DELAY_MASK 0x00004000L
++#define SQC_DSM_CNTL2__DATA_CU2_WRITE_DATA_BUF_ENABLE_ERROR_INJECT_MASK 0x00018000L
++#define SQC_DSM_CNTL2__DATA_CU2_WRITE_DATA_BUF_SELECT_INJECT_DELAY_MASK 0x00020000L
++#define SQC_DSM_CNTL2__DATA_CU2_UTCL1_LFIFO_ENABLE_ERROR_INJECT_MASK 0x000C0000L
++#define SQC_DSM_CNTL2__DATA_CU2_UTCL1_LFIFO_SELECT_INJECT_DELAY_MASK 0x00100000L
++#define SQC_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
++//SQC_DSM_CNTL2A
++#define SQC_DSM_CNTL2A__INST_TAG_RAM_ENABLE_ERROR_INJECT__SHIFT 0x0
++#define SQC_DSM_CNTL2A__INST_TAG_RAM_SELECT_INJECT_DELAY__SHIFT 0x2
++#define SQC_DSM_CNTL2A__INST_UTCL1_MISS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x3
++#define SQC_DSM_CNTL2A__INST_UTCL1_MISS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x5
++#define SQC_DSM_CNTL2A__INST_MISS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x6
++#define SQC_DSM_CNTL2A__INST_MISS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x8
++#define SQC_DSM_CNTL2A__INST_BANK_RAM_ENABLE_ERROR_INJECT__SHIFT 0x9
++#define SQC_DSM_CNTL2A__INST_BANK_RAM_SELECT_INJECT_DELAY__SHIFT 0xb
++#define SQC_DSM_CNTL2A__DATA_TAG_RAM_ENABLE_ERROR_INJECT__SHIFT 0xc
++#define SQC_DSM_CNTL2A__DATA_TAG_RAM_SELECT_INJECT_DELAY__SHIFT 0xe
++#define SQC_DSM_CNTL2A__DATA_HIT_FIFO_ENABLE_ERROR_INJECT__SHIFT 0xf
++#define SQC_DSM_CNTL2A__DATA_HIT_FIFO_SELECT_INJECT_DELAY__SHIFT 0x11
++#define SQC_DSM_CNTL2A__DATA_MISS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x12
++#define SQC_DSM_CNTL2A__DATA_MISS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x14
++#define SQC_DSM_CNTL2A__DATA_DIRTY_BIT_RAM_ENABLE_ERROR_INJECT__SHIFT 0x15
++#define SQC_DSM_CNTL2A__DATA_DIRTY_BIT_RAM_SELECT_INJECT_DELAY__SHIFT 0x17
++#define SQC_DSM_CNTL2A__DATA_BANK_RAM_ENABLE_ERROR_INJECT__SHIFT 0x18
++#define SQC_DSM_CNTL2A__DATA_BANK_RAM_SELECT_INJECT_DELAY__SHIFT 0x1a
++#define SQC_DSM_CNTL2A__INST_TAG_RAM_ENABLE_ERROR_INJECT_MASK 0x00000003L
++#define SQC_DSM_CNTL2A__INST_TAG_RAM_SELECT_INJECT_DELAY_MASK 0x00000004L
++#define SQC_DSM_CNTL2A__INST_UTCL1_MISS_FIFO_ENABLE_ERROR_INJECT_MASK 0x00000018L
++#define SQC_DSM_CNTL2A__INST_UTCL1_MISS_FIFO_SELECT_INJECT_DELAY_MASK 0x00000020L
++#define SQC_DSM_CNTL2A__INST_MISS_FIFO_ENABLE_ERROR_INJECT_MASK 0x000000C0L
++#define SQC_DSM_CNTL2A__INST_MISS_FIFO_SELECT_INJECT_DELAY_MASK 0x00000100L
++#define SQC_DSM_CNTL2A__INST_BANK_RAM_ENABLE_ERROR_INJECT_MASK 0x00000600L
++#define SQC_DSM_CNTL2A__INST_BANK_RAM_SELECT_INJECT_DELAY_MASK 0x00000800L
++#define SQC_DSM_CNTL2A__DATA_TAG_RAM_ENABLE_ERROR_INJECT_MASK 0x00003000L
++#define SQC_DSM_CNTL2A__DATA_TAG_RAM_SELECT_INJECT_DELAY_MASK 0x00004000L
++#define SQC_DSM_CNTL2A__DATA_HIT_FIFO_ENABLE_ERROR_INJECT_MASK 0x00018000L
++#define SQC_DSM_CNTL2A__DATA_HIT_FIFO_SELECT_INJECT_DELAY_MASK 0x00020000L
++#define SQC_DSM_CNTL2A__DATA_MISS_FIFO_ENABLE_ERROR_INJECT_MASK 0x000C0000L
++#define SQC_DSM_CNTL2A__DATA_MISS_FIFO_SELECT_INJECT_DELAY_MASK 0x00100000L
++#define SQC_DSM_CNTL2A__DATA_DIRTY_BIT_RAM_ENABLE_ERROR_INJECT_MASK 0x00600000L
++#define SQC_DSM_CNTL2A__DATA_DIRTY_BIT_RAM_SELECT_INJECT_DELAY_MASK 0x00800000L
++#define SQC_DSM_CNTL2A__DATA_BANK_RAM_ENABLE_ERROR_INJECT_MASK 0x03000000L
++#define SQC_DSM_CNTL2A__DATA_BANK_RAM_SELECT_INJECT_DELAY_MASK 0x04000000L
++//SQC_DSM_CNTL2B
++#define SQC_DSM_CNTL2B__INST_TAG_RAM_ENABLE_ERROR_INJECT__SHIFT 0x0
++#define SQC_DSM_CNTL2B__INST_TAG_RAM_SELECT_INJECT_DELAY__SHIFT 0x2
++#define SQC_DSM_CNTL2B__INST_UTCL1_MISS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x3
++#define SQC_DSM_CNTL2B__INST_UTCL1_MISS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x5
++#define SQC_DSM_CNTL2B__INST_MISS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x6
++#define SQC_DSM_CNTL2B__INST_MISS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x8
++#define SQC_DSM_CNTL2B__INST_BANK_RAM_ENABLE_ERROR_INJECT__SHIFT 0x9
++#define SQC_DSM_CNTL2B__INST_BANK_RAM_SELECT_INJECT_DELAY__SHIFT 0xb
++#define SQC_DSM_CNTL2B__DATA_TAG_RAM_ENABLE_ERROR_INJECT__SHIFT 0xc
++#define SQC_DSM_CNTL2B__DATA_TAG_RAM_SELECT_INJECT_DELAY__SHIFT 0xe
++#define SQC_DSM_CNTL2B__DATA_HIT_FIFO_ENABLE_ERROR_INJECT__SHIFT 0xf
++#define SQC_DSM_CNTL2B__DATA_HIT_FIFO_SELECT_INJECT_DELAY__SHIFT 0x11
++#define SQC_DSM_CNTL2B__DATA_MISS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x12
++#define SQC_DSM_CNTL2B__DATA_MISS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x14
++#define SQC_DSM_CNTL2B__DATA_DIRTY_BIT_RAM_ENABLE_ERROR_INJECT__SHIFT 0x15
++#define SQC_DSM_CNTL2B__DATA_DIRTY_BIT_RAM_SELECT_INJECT_DELAY__SHIFT 0x17
++#define SQC_DSM_CNTL2B__DATA_BANK_RAM_ENABLE_ERROR_INJECT__SHIFT 0x18
++#define SQC_DSM_CNTL2B__DATA_BANK_RAM_SELECT_INJECT_DELAY__SHIFT 0x1a
++#define SQC_DSM_CNTL2B__INST_TAG_RAM_ENABLE_ERROR_INJECT_MASK 0x00000003L
++#define SQC_DSM_CNTL2B__INST_TAG_RAM_SELECT_INJECT_DELAY_MASK 0x00000004L
++#define SQC_DSM_CNTL2B__INST_UTCL1_MISS_FIFO_ENABLE_ERROR_INJECT_MASK 0x00000018L
++#define SQC_DSM_CNTL2B__INST_UTCL1_MISS_FIFO_SELECT_INJECT_DELAY_MASK 0x00000020L
++#define SQC_DSM_CNTL2B__INST_MISS_FIFO_ENABLE_ERROR_INJECT_MASK 0x000000C0L
++#define SQC_DSM_CNTL2B__INST_MISS_FIFO_SELECT_INJECT_DELAY_MASK 0x00000100L
++#define SQC_DSM_CNTL2B__INST_BANK_RAM_ENABLE_ERROR_INJECT_MASK 0x00000600L
++#define SQC_DSM_CNTL2B__INST_BANK_RAM_SELECT_INJECT_DELAY_MASK 0x00000800L
++#define SQC_DSM_CNTL2B__DATA_TAG_RAM_ENABLE_ERROR_INJECT_MASK 0x00003000L
++#define SQC_DSM_CNTL2B__DATA_TAG_RAM_SELECT_INJECT_DELAY_MASK 0x00004000L
++#define SQC_DSM_CNTL2B__DATA_HIT_FIFO_ENABLE_ERROR_INJECT_MASK 0x00018000L
++#define SQC_DSM_CNTL2B__DATA_HIT_FIFO_SELECT_INJECT_DELAY_MASK 0x00020000L
++#define SQC_DSM_CNTL2B__DATA_MISS_FIFO_ENABLE_ERROR_INJECT_MASK 0x000C0000L
++#define SQC_DSM_CNTL2B__DATA_MISS_FIFO_SELECT_INJECT_DELAY_MASK 0x00100000L
++#define SQC_DSM_CNTL2B__DATA_DIRTY_BIT_RAM_ENABLE_ERROR_INJECT_MASK 0x00600000L
++#define SQC_DSM_CNTL2B__DATA_DIRTY_BIT_RAM_SELECT_INJECT_DELAY_MASK 0x00800000L
++#define SQC_DSM_CNTL2B__DATA_BANK_RAM_ENABLE_ERROR_INJECT_MASK 0x03000000L
++#define SQC_DSM_CNTL2B__DATA_BANK_RAM_SELECT_INJECT_DELAY_MASK 0x04000000L
++//SQC_EDC_FUE_CNTL
++#define SQC_EDC_FUE_CNTL__BLOCK_FUE_FLAGS__SHIFT 0x0
++#define SQC_EDC_FUE_CNTL__FUE_INTERRUPT_ENABLES__SHIFT 0x10
++#define SQC_EDC_FUE_CNTL__BLOCK_FUE_FLAGS_MASK 0x0000FFFFL
++#define SQC_EDC_FUE_CNTL__FUE_INTERRUPT_ENABLES_MASK 0xFFFF0000L
++//SQC_EDC_CNT2
++#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_SEC_COUNT__SHIFT 0x0
++#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_DED_COUNT__SHIFT 0x2
++#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_SEC_COUNT__SHIFT 0x4
++#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_DED_COUNT__SHIFT 0x6
++#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_SEC_COUNT__SHIFT 0x8
++#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_DED_COUNT__SHIFT 0xa
++#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_SEC_COUNT__SHIFT 0xc
++#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_DED_COUNT__SHIFT 0xe
++#define SQC_EDC_CNT2__INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT__SHIFT 0x10
++#define SQC_EDC_CNT2__INST_BANKA_MISS_FIFO_SED_COUNT__SHIFT 0x12
++#define SQC_EDC_CNT2__DATA_BANKA_HIT_FIFO_SED_COUNT__SHIFT 0x14
++#define SQC_EDC_CNT2__DATA_BANKA_MISS_FIFO_SED_COUNT__SHIFT 0x16
++#define SQC_EDC_CNT2__DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT__SHIFT 0x18
++#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x1a
++#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_DED_COUNT__SHIFT 0x1c
++#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_SEC_COUNT_MASK 0x00000003L
++#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_DED_COUNT_MASK 0x0000000CL
++#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_SEC_COUNT_MASK 0x00000030L
++#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_DED_COUNT_MASK 0x000000C0L
++#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_SEC_COUNT_MASK 0x00000300L
++#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_DED_COUNT_MASK 0x00000C00L
++#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_SEC_COUNT_MASK 0x00003000L
++#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_DED_COUNT_MASK 0x0000C000L
++#define SQC_EDC_CNT2__INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT_MASK 0x00030000L
++#define SQC_EDC_CNT2__INST_BANKA_MISS_FIFO_SED_COUNT_MASK 0x000C0000L
++#define SQC_EDC_CNT2__DATA_BANKA_HIT_FIFO_SED_COUNT_MASK 0x00300000L
++#define SQC_EDC_CNT2__DATA_BANKA_MISS_FIFO_SED_COUNT_MASK 0x00C00000L
++#define SQC_EDC_CNT2__DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT_MASK 0x03000000L
++#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_SEC_COUNT_MASK 0x0C000000L
++#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_DED_COUNT_MASK 0x30000000L
++//SQC_EDC_CNT3
++#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_SEC_COUNT__SHIFT 0x0
++#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_DED_COUNT__SHIFT 0x2
++#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_SEC_COUNT__SHIFT 0x4
++#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_DED_COUNT__SHIFT 0x6
++#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_SEC_COUNT__SHIFT 0x8
++#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_DED_COUNT__SHIFT 0xa
++#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_SEC_COUNT__SHIFT 0xc
++#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_DED_COUNT__SHIFT 0xe
++#define SQC_EDC_CNT3__INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT__SHIFT 0x10
++#define SQC_EDC_CNT3__INST_BANKB_MISS_FIFO_SED_COUNT__SHIFT 0x12
++#define SQC_EDC_CNT3__DATA_BANKB_HIT_FIFO_SED_COUNT__SHIFT 0x14
++#define SQC_EDC_CNT3__DATA_BANKB_MISS_FIFO_SED_COUNT__SHIFT 0x16
++#define SQC_EDC_CNT3__DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT__SHIFT 0x18
++#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_SEC_COUNT_MASK 0x00000003L
++#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_DED_COUNT_MASK 0x0000000CL
++#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_SEC_COUNT_MASK 0x00000030L
++#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_DED_COUNT_MASK 0x000000C0L
++#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_SEC_COUNT_MASK 0x00000300L
++#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_DED_COUNT_MASK 0x00000C00L
++#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_SEC_COUNT_MASK 0x00003000L
++#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_DED_COUNT_MASK 0x0000C000L
++#define SQC_EDC_CNT3__INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT_MASK 0x00030000L
++#define SQC_EDC_CNT3__INST_BANKB_MISS_FIFO_SED_COUNT_MASK 0x000C0000L
++#define SQC_EDC_CNT3__DATA_BANKB_HIT_FIFO_SED_COUNT_MASK 0x00300000L
++#define SQC_EDC_CNT3__DATA_BANKB_MISS_FIFO_SED_COUNT_MASK 0x00C00000L
++#define SQC_EDC_CNT3__DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT_MASK 0x03000000L
++//SQ_REG_TIMESTAMP
++#define SQ_REG_TIMESTAMP__TIMESTAMP__SHIFT 0x0
++#define SQ_REG_TIMESTAMP__TIMESTAMP_MASK 0x000000FFL
++//SQ_CMD_TIMESTAMP
++#define SQ_CMD_TIMESTAMP__TIMESTAMP__SHIFT 0x0
++#define SQ_CMD_TIMESTAMP__TIMESTAMP_MASK 0x000000FFL
++//SQ_IND_INDEX
++#define SQ_IND_INDEX__WAVE_ID__SHIFT 0x0
++#define SQ_IND_INDEX__SIMD_ID__SHIFT 0x4
++#define SQ_IND_INDEX__THREAD_ID__SHIFT 0x6
++#define SQ_IND_INDEX__AUTO_INCR__SHIFT 0xc
++#define SQ_IND_INDEX__FORCE_READ__SHIFT 0xd
++#define SQ_IND_INDEX__READ_TIMEOUT__SHIFT 0xe
++#define SQ_IND_INDEX__UNINDEXED__SHIFT 0xf
++#define SQ_IND_INDEX__INDEX__SHIFT 0x10
++#define SQ_IND_INDEX__WAVE_ID_MASK 0x0000000FL
++#define SQ_IND_INDEX__SIMD_ID_MASK 0x00000030L
++#define SQ_IND_INDEX__THREAD_ID_MASK 0x00000FC0L
++#define SQ_IND_INDEX__AUTO_INCR_MASK 0x00001000L
++#define SQ_IND_INDEX__FORCE_READ_MASK 0x00002000L
++#define SQ_IND_INDEX__READ_TIMEOUT_MASK 0x00004000L
++#define SQ_IND_INDEX__UNINDEXED_MASK 0x00008000L
++#define SQ_IND_INDEX__INDEX_MASK 0xFFFF0000L
++//SQ_IND_DATA
++#define SQ_IND_DATA__DATA__SHIFT 0x0
++#define SQ_IND_DATA__DATA_MASK 0xFFFFFFFFL
++//SQ_CMD
++#define SQ_CMD__CMD__SHIFT 0x0
++#define SQ_CMD__MODE__SHIFT 0x4
++#define SQ_CMD__CHECK_VMID__SHIFT 0x7
++#define SQ_CMD__DATA__SHIFT 0x8
++#define SQ_CMD__WAVE_ID__SHIFT 0x10
++#define SQ_CMD__SIMD_ID__SHIFT 0x14
++#define SQ_CMD__QUEUE_ID__SHIFT 0x18
++#define SQ_CMD__VM_ID__SHIFT 0x1c
++#define SQ_CMD__CMD_MASK 0x00000007L
++#define SQ_CMD__MODE_MASK 0x00000070L
++#define SQ_CMD__CHECK_VMID_MASK 0x00000080L
++#define SQ_CMD__DATA_MASK 0x00000F00L
++#define SQ_CMD__WAVE_ID_MASK 0x000F0000L
++#define SQ_CMD__SIMD_ID_MASK 0x00300000L
++#define SQ_CMD__QUEUE_ID_MASK 0x07000000L
++#define SQ_CMD__VM_ID_MASK 0xF0000000L
++//SQ_TIME_HI
++#define SQ_TIME_HI__TIME__SHIFT 0x0
++#define SQ_TIME_HI__TIME_MASK 0xFFFFFFFFL
++//SQ_TIME_LO
++#define SQ_TIME_LO__TIME__SHIFT 0x0
++#define SQ_TIME_LO__TIME_MASK 0xFFFFFFFFL
++//SQ_DS_0
++#define SQ_DS_0__OFFSET0__SHIFT 0x0
++#define SQ_DS_0__OFFSET1__SHIFT 0x8
++#define SQ_DS_0__GDS__SHIFT 0x10
++#define SQ_DS_0__OP__SHIFT 0x11
++#define SQ_DS_0__ENCODING__SHIFT 0x1a
++#define SQ_DS_0__OFFSET0_MASK 0x000000FFL
++#define SQ_DS_0__OFFSET1_MASK 0x0000FF00L
++#define SQ_DS_0__GDS_MASK 0x00010000L
++#define SQ_DS_0__OP_MASK 0x01FE0000L
++#define SQ_DS_0__ENCODING_MASK 0xFC000000L
++//SQ_DS_1
++#define SQ_DS_1__ADDR__SHIFT 0x0
++#define SQ_DS_1__DATA0__SHIFT 0x8
++#define SQ_DS_1__DATA1__SHIFT 0x10
++#define SQ_DS_1__VDST__SHIFT 0x18
++#define SQ_DS_1__ADDR_MASK 0x000000FFL
++#define SQ_DS_1__DATA0_MASK 0x0000FF00L
++#define SQ_DS_1__DATA1_MASK 0x00FF0000L
++#define SQ_DS_1__VDST_MASK 0xFF000000L
++//SQ_EXP_0
++#define SQ_EXP_0__EN__SHIFT 0x0
++#define SQ_EXP_0__TGT__SHIFT 0x4
++#define SQ_EXP_0__COMPR__SHIFT 0xa
++#define SQ_EXP_0__DONE__SHIFT 0xb
++#define SQ_EXP_0__VM__SHIFT 0xc
++#define SQ_EXP_0__ENCODING__SHIFT 0x1a
++#define SQ_EXP_0__EN_MASK 0x0000000FL
++#define SQ_EXP_0__TGT_MASK 0x000003F0L
++#define SQ_EXP_0__COMPR_MASK 0x00000400L
++#define SQ_EXP_0__DONE_MASK 0x00000800L
++#define SQ_EXP_0__VM_MASK 0x00001000L
++#define SQ_EXP_0__ENCODING_MASK 0xFC000000L
++//SQ_EXP_1
++#define SQ_EXP_1__VSRC0__SHIFT 0x0
++#define SQ_EXP_1__VSRC1__SHIFT 0x8
++#define SQ_EXP_1__VSRC2__SHIFT 0x10
++#define SQ_EXP_1__VSRC3__SHIFT 0x18
++#define SQ_EXP_1__VSRC0_MASK 0x000000FFL
++#define SQ_EXP_1__VSRC1_MASK 0x0000FF00L
++#define SQ_EXP_1__VSRC2_MASK 0x00FF0000L
++#define SQ_EXP_1__VSRC3_MASK 0xFF000000L
++//SQ_FLAT_0
++#define SQ_FLAT_0__OFFSET__SHIFT 0x0
++#define SQ_FLAT_0__LDS__SHIFT 0xd
++#define SQ_FLAT_0__SEG__SHIFT 0xe
++#define SQ_FLAT_0__GLC__SHIFT 0x10
++#define SQ_FLAT_0__SLC__SHIFT 0x11
++#define SQ_FLAT_0__OP__SHIFT 0x12
++#define SQ_FLAT_0__ENCODING__SHIFT 0x1a
++#define SQ_FLAT_0__OFFSET_MASK 0x00000FFFL
++#define SQ_FLAT_0__LDS_MASK 0x00002000L
++#define SQ_FLAT_0__SEG_MASK 0x0000C000L
++#define SQ_FLAT_0__GLC_MASK 0x00010000L
++#define SQ_FLAT_0__SLC_MASK 0x00020000L
++#define SQ_FLAT_0__OP_MASK 0x01FC0000L
++#define SQ_FLAT_0__ENCODING_MASK 0xFC000000L
++//SQ_FLAT_1
++#define SQ_FLAT_1__ADDR__SHIFT 0x0
++#define SQ_FLAT_1__DATA__SHIFT 0x8
++#define SQ_FLAT_1__SADDR__SHIFT 0x10
++#define SQ_FLAT_1__NV__SHIFT 0x17
++#define SQ_FLAT_1__VDST__SHIFT 0x18
++#define SQ_FLAT_1__ADDR_MASK 0x000000FFL
++#define SQ_FLAT_1__DATA_MASK 0x0000FF00L
++#define SQ_FLAT_1__SADDR_MASK 0x007F0000L
++#define SQ_FLAT_1__NV_MASK 0x00800000L
++#define SQ_FLAT_1__VDST_MASK 0xFF000000L
++//SQ_GLBL_0
++#define SQ_GLBL_0__OFFSET__SHIFT 0x0
++#define SQ_GLBL_0__LDS__SHIFT 0xd
++#define SQ_GLBL_0__SEG__SHIFT 0xe
++#define SQ_GLBL_0__GLC__SHIFT 0x10
++#define SQ_GLBL_0__SLC__SHIFT 0x11
++#define SQ_GLBL_0__OP__SHIFT 0x12
++#define SQ_GLBL_0__ENCODING__SHIFT 0x1a
++#define SQ_GLBL_0__OFFSET_MASK 0x00001FFFL
++#define SQ_GLBL_0__LDS_MASK 0x00002000L
++#define SQ_GLBL_0__SEG_MASK 0x0000C000L
++#define SQ_GLBL_0__GLC_MASK 0x00010000L
++#define SQ_GLBL_0__SLC_MASK 0x00020000L
++#define SQ_GLBL_0__OP_MASK 0x01FC0000L
++#define SQ_GLBL_0__ENCODING_MASK 0xFC000000L
++//SQ_GLBL_1
++#define SQ_GLBL_1__ADDR__SHIFT 0x0
++#define SQ_GLBL_1__DATA__SHIFT 0x8
++#define SQ_GLBL_1__SADDR__SHIFT 0x10
++#define SQ_GLBL_1__NV__SHIFT 0x17
++#define SQ_GLBL_1__VDST__SHIFT 0x18
++#define SQ_GLBL_1__ADDR_MASK 0x000000FFL
++#define SQ_GLBL_1__DATA_MASK 0x0000FF00L
++#define SQ_GLBL_1__SADDR_MASK 0x007F0000L
++#define SQ_GLBL_1__NV_MASK 0x00800000L
++#define SQ_GLBL_1__VDST_MASK 0xFF000000L
++//SQ_INST
++#define SQ_INST__ENCODING__SHIFT 0x0
++#define SQ_INST__ENCODING_MASK 0xFFFFFFFFL
++//SQ_MIMG_0
++#define SQ_MIMG_0__OPM__SHIFT 0x0
++#define SQ_MIMG_0__DMASK__SHIFT 0x8
++#define SQ_MIMG_0__UNORM__SHIFT 0xc
++#define SQ_MIMG_0__GLC__SHIFT 0xd
++#define SQ_MIMG_0__DA__SHIFT 0xe
++#define SQ_MIMG_0__A16__SHIFT 0xf
++#define SQ_MIMG_0__TFE__SHIFT 0x10
++#define SQ_MIMG_0__LWE__SHIFT 0x11
++#define SQ_MIMG_0__OP__SHIFT 0x12
++#define SQ_MIMG_0__SLC__SHIFT 0x19
++#define SQ_MIMG_0__ENCODING__SHIFT 0x1a
++#define SQ_MIMG_0__OPM_MASK 0x00000001L
++#define SQ_MIMG_0__DMASK_MASK 0x00000F00L
++#define SQ_MIMG_0__UNORM_MASK 0x00001000L
++#define SQ_MIMG_0__GLC_MASK 0x00002000L
++#define SQ_MIMG_0__DA_MASK 0x00004000L
++#define SQ_MIMG_0__A16_MASK 0x00008000L
++#define SQ_MIMG_0__TFE_MASK 0x00010000L
++#define SQ_MIMG_0__LWE_MASK 0x00020000L
++#define SQ_MIMG_0__OP_MASK 0x01FC0000L
++#define SQ_MIMG_0__SLC_MASK 0x02000000L
++#define SQ_MIMG_0__ENCODING_MASK 0xFC000000L
++//SQ_MIMG_1
++#define SQ_MIMG_1__VADDR__SHIFT 0x0
++#define SQ_MIMG_1__VDATA__SHIFT 0x8
++#define SQ_MIMG_1__SRSRC__SHIFT 0x10
++#define SQ_MIMG_1__SSAMP__SHIFT 0x15
++#define SQ_MIMG_1__D16__SHIFT 0x1f
++#define SQ_MIMG_1__VADDR_MASK 0x000000FFL
++#define SQ_MIMG_1__VDATA_MASK 0x0000FF00L
++#define SQ_MIMG_1__SRSRC_MASK 0x001F0000L
++#define SQ_MIMG_1__SSAMP_MASK 0x03E00000L
++#define SQ_MIMG_1__D16_MASK 0x80000000L
++//SQ_MTBUF_0
++#define SQ_MTBUF_0__OFFSET__SHIFT 0x0
++#define SQ_MTBUF_0__OFFEN__SHIFT 0xc
++#define SQ_MTBUF_0__IDXEN__SHIFT 0xd
++#define SQ_MTBUF_0__GLC__SHIFT 0xe
++#define SQ_MTBUF_0__OP__SHIFT 0xf
++#define SQ_MTBUF_0__DFMT__SHIFT 0x13
++#define SQ_MTBUF_0__NFMT__SHIFT 0x17
++#define SQ_MTBUF_0__ENCODING__SHIFT 0x1a
++#define SQ_MTBUF_0__OFFSET_MASK 0x00000FFFL
++#define SQ_MTBUF_0__OFFEN_MASK 0x00001000L
++#define SQ_MTBUF_0__IDXEN_MASK 0x00002000L
++#define SQ_MTBUF_0__GLC_MASK 0x00004000L
++#define SQ_MTBUF_0__OP_MASK 0x00078000L
++#define SQ_MTBUF_0__DFMT_MASK 0x00780000L
++#define SQ_MTBUF_0__NFMT_MASK 0x03800000L
++#define SQ_MTBUF_0__ENCODING_MASK 0xFC000000L
++//SQ_MTBUF_1
++#define SQ_MTBUF_1__VADDR__SHIFT 0x0
++#define SQ_MTBUF_1__VDATA__SHIFT 0x8
++#define SQ_MTBUF_1__SRSRC__SHIFT 0x10
++#define SQ_MTBUF_1__SLC__SHIFT 0x16
++#define SQ_MTBUF_1__TFE__SHIFT 0x17
++#define SQ_MTBUF_1__SOFFSET__SHIFT 0x18
++#define SQ_MTBUF_1__VADDR_MASK 0x000000FFL
++#define SQ_MTBUF_1__VDATA_MASK 0x0000FF00L
++#define SQ_MTBUF_1__SRSRC_MASK 0x001F0000L
++#define SQ_MTBUF_1__SLC_MASK 0x00400000L
++#define SQ_MTBUF_1__TFE_MASK 0x00800000L
++#define SQ_MTBUF_1__SOFFSET_MASK 0xFF000000L
++//SQ_MUBUF_0
++#define SQ_MUBUF_0__OFFSET__SHIFT 0x0
++#define SQ_MUBUF_0__OFFEN__SHIFT 0xc
++#define SQ_MUBUF_0__IDXEN__SHIFT 0xd
++#define SQ_MUBUF_0__GLC__SHIFT 0xe
++#define SQ_MUBUF_0__LDS__SHIFT 0x10
++#define SQ_MUBUF_0__SLC__SHIFT 0x11
++#define SQ_MUBUF_0__OP__SHIFT 0x12
++#define SQ_MUBUF_0__ENCODING__SHIFT 0x1a
++#define SQ_MUBUF_0__OFFSET_MASK 0x00000FFFL
++#define SQ_MUBUF_0__OFFEN_MASK 0x00001000L
++#define SQ_MUBUF_0__IDXEN_MASK 0x00002000L
++#define SQ_MUBUF_0__GLC_MASK 0x00004000L
++#define SQ_MUBUF_0__LDS_MASK 0x00010000L
++#define SQ_MUBUF_0__SLC_MASK 0x00020000L
++#define SQ_MUBUF_0__OP_MASK 0x01FC0000L
++#define SQ_MUBUF_0__ENCODING_MASK 0xFC000000L
++//SQ_MUBUF_1
++#define SQ_MUBUF_1__VADDR__SHIFT 0x0
++#define SQ_MUBUF_1__VDATA__SHIFT 0x8
++#define SQ_MUBUF_1__SRSRC__SHIFT 0x10
++#define SQ_MUBUF_1__TFE__SHIFT 0x17
++#define SQ_MUBUF_1__SOFFSET__SHIFT 0x18
++#define SQ_MUBUF_1__VADDR_MASK 0x000000FFL
++#define SQ_MUBUF_1__VDATA_MASK 0x0000FF00L
++#define SQ_MUBUF_1__SRSRC_MASK 0x001F0000L
++#define SQ_MUBUF_1__TFE_MASK 0x00800000L
++#define SQ_MUBUF_1__SOFFSET_MASK 0xFF000000L
++//SQ_SCRATCH_0
++#define SQ_SCRATCH_0__OFFSET__SHIFT 0x0
++#define SQ_SCRATCH_0__LDS__SHIFT 0xd
++#define SQ_SCRATCH_0__SEG__SHIFT 0xe
++#define SQ_SCRATCH_0__GLC__SHIFT 0x10
++#define SQ_SCRATCH_0__SLC__SHIFT 0x11
++#define SQ_SCRATCH_0__OP__SHIFT 0x12
++#define SQ_SCRATCH_0__ENCODING__SHIFT 0x1a
++#define SQ_SCRATCH_0__OFFSET_MASK 0x00001FFFL
++#define SQ_SCRATCH_0__LDS_MASK 0x00002000L
++#define SQ_SCRATCH_0__SEG_MASK 0x0000C000L
++#define SQ_SCRATCH_0__GLC_MASK 0x00010000L
++#define SQ_SCRATCH_0__SLC_MASK 0x00020000L
++#define SQ_SCRATCH_0__OP_MASK 0x01FC0000L
++#define SQ_SCRATCH_0__ENCODING_MASK 0xFC000000L
++//SQ_SCRATCH_1
++#define SQ_SCRATCH_1__ADDR__SHIFT 0x0
++#define SQ_SCRATCH_1__DATA__SHIFT 0x8
++#define SQ_SCRATCH_1__SADDR__SHIFT 0x10
++#define SQ_SCRATCH_1__NV__SHIFT 0x17
++#define SQ_SCRATCH_1__VDST__SHIFT 0x18
++#define SQ_SCRATCH_1__ADDR_MASK 0x000000FFL
++#define SQ_SCRATCH_1__DATA_MASK 0x0000FF00L
++#define SQ_SCRATCH_1__SADDR_MASK 0x007F0000L
++#define SQ_SCRATCH_1__NV_MASK 0x00800000L
++#define SQ_SCRATCH_1__VDST_MASK 0xFF000000L
++//SQ_SMEM_0
++#define SQ_SMEM_0__SBASE__SHIFT 0x0
++#define SQ_SMEM_0__SDATA__SHIFT 0x6
++#define SQ_SMEM_0__SOFFSET_EN__SHIFT 0xe
++#define SQ_SMEM_0__NV__SHIFT 0xf
++#define SQ_SMEM_0__GLC__SHIFT 0x10
++#define SQ_SMEM_0__IMM__SHIFT 0x11
++#define SQ_SMEM_0__OP__SHIFT 0x12
++#define SQ_SMEM_0__ENCODING__SHIFT 0x1a
++#define SQ_SMEM_0__SBASE_MASK 0x0000003FL
++#define SQ_SMEM_0__SDATA_MASK 0x00001FC0L
++#define SQ_SMEM_0__SOFFSET_EN_MASK 0x00004000L
++#define SQ_SMEM_0__NV_MASK 0x00008000L
++#define SQ_SMEM_0__GLC_MASK 0x00010000L
++#define SQ_SMEM_0__IMM_MASK 0x00020000L
++#define SQ_SMEM_0__OP_MASK 0x03FC0000L
++#define SQ_SMEM_0__ENCODING_MASK 0xFC000000L
++//SQ_SMEM_1
++#define SQ_SMEM_1__OFFSET__SHIFT 0x0
++#define SQ_SMEM_1__SOFFSET__SHIFT 0x19
++#define SQ_SMEM_1__OFFSET_MASK 0x001FFFFFL
++#define SQ_SMEM_1__SOFFSET_MASK 0xFE000000L
++//SQ_SOP1
++#define SQ_SOP1__SSRC0__SHIFT 0x0
++#define SQ_SOP1__OP__SHIFT 0x8
++#define SQ_SOP1__SDST__SHIFT 0x10
++#define SQ_SOP1__ENCODING__SHIFT 0x17
++#define SQ_SOP1__SSRC0_MASK 0x000000FFL
++#define SQ_SOP1__OP_MASK 0x0000FF00L
++#define SQ_SOP1__SDST_MASK 0x007F0000L
++#define SQ_SOP1__ENCODING_MASK 0xFF800000L
++//SQ_SOP2
++#define SQ_SOP2__SSRC0__SHIFT 0x0
++#define SQ_SOP2__SSRC1__SHIFT 0x8
++#define SQ_SOP2__SDST__SHIFT 0x10
++#define SQ_SOP2__OP__SHIFT 0x17
++#define SQ_SOP2__ENCODING__SHIFT 0x1e
++#define SQ_SOP2__SSRC0_MASK 0x000000FFL
++#define SQ_SOP2__SSRC1_MASK 0x0000FF00L
++#define SQ_SOP2__SDST_MASK 0x007F0000L
++#define SQ_SOP2__OP_MASK 0x3F800000L
++#define SQ_SOP2__ENCODING_MASK 0xC0000000L
++//SQ_SOPC
++#define SQ_SOPC__SSRC0__SHIFT 0x0
++#define SQ_SOPC__SSRC1__SHIFT 0x8
++#define SQ_SOPC__OP__SHIFT 0x10
++#define SQ_SOPC__ENCODING__SHIFT 0x17
++#define SQ_SOPC__SSRC0_MASK 0x000000FFL
++#define SQ_SOPC__SSRC1_MASK 0x0000FF00L
++#define SQ_SOPC__OP_MASK 0x007F0000L
++#define SQ_SOPC__ENCODING_MASK 0xFF800000L
++//SQ_SOPK
++#define SQ_SOPK__SIMM16__SHIFT 0x0
++#define SQ_SOPK__SDST__SHIFT 0x10
++#define SQ_SOPK__OP__SHIFT 0x17
++#define SQ_SOPK__ENCODING__SHIFT 0x1c
++#define SQ_SOPK__SIMM16_MASK 0x0000FFFFL
++#define SQ_SOPK__SDST_MASK 0x007F0000L
++#define SQ_SOPK__OP_MASK 0x0F800000L
++#define SQ_SOPK__ENCODING_MASK 0xF0000000L
++//SQ_SOPP
++#define SQ_SOPP__SIMM16__SHIFT 0x0
++#define SQ_SOPP__OP__SHIFT 0x10
++#define SQ_SOPP__ENCODING__SHIFT 0x17
++#define SQ_SOPP__SIMM16_MASK 0x0000FFFFL
++#define SQ_SOPP__OP_MASK 0x007F0000L
++#define SQ_SOPP__ENCODING_MASK 0xFF800000L
++//SQ_VINTRP
++#define SQ_VINTRP__VSRC__SHIFT 0x0
++#define SQ_VINTRP__ATTRCHAN__SHIFT 0x8
++#define SQ_VINTRP__ATTR__SHIFT 0xa
++#define SQ_VINTRP__OP__SHIFT 0x10
++#define SQ_VINTRP__VDST__SHIFT 0x12
++#define SQ_VINTRP__ENCODING__SHIFT 0x1a
++#define SQ_VINTRP__VSRC_MASK 0x000000FFL
++#define SQ_VINTRP__ATTRCHAN_MASK 0x00000300L
++#define SQ_VINTRP__ATTR_MASK 0x0000FC00L
++#define SQ_VINTRP__OP_MASK 0x00030000L
++#define SQ_VINTRP__VDST_MASK 0x03FC0000L
++#define SQ_VINTRP__ENCODING_MASK 0xFC000000L
++//SQ_VOP1
++#define SQ_VOP1__SRC0__SHIFT 0x0
++#define SQ_VOP1__OP__SHIFT 0x9
++#define SQ_VOP1__VDST__SHIFT 0x11
++#define SQ_VOP1__ENCODING__SHIFT 0x19
++#define SQ_VOP1__SRC0_MASK 0x000001FFL
++#define SQ_VOP1__OP_MASK 0x0001FE00L
++#define SQ_VOP1__VDST_MASK 0x01FE0000L
++#define SQ_VOP1__ENCODING_MASK 0xFE000000L
++//SQ_VOP2
++#define SQ_VOP2__SRC0__SHIFT 0x0
++#define SQ_VOP2__VSRC1__SHIFT 0x9
++#define SQ_VOP2__VDST__SHIFT 0x11
++#define SQ_VOP2__OP__SHIFT 0x19
++#define SQ_VOP2__ENCODING__SHIFT 0x1f
++#define SQ_VOP2__SRC0_MASK 0x000001FFL
++#define SQ_VOP2__VSRC1_MASK 0x0001FE00L
++#define SQ_VOP2__VDST_MASK 0x01FE0000L
++#define SQ_VOP2__OP_MASK 0x7E000000L
++#define SQ_VOP2__ENCODING_MASK 0x80000000L
++//SQ_VOP3P_0
++#define SQ_VOP3P_0__VDST__SHIFT 0x0
++#define SQ_VOP3P_0__NEG_HI__SHIFT 0x8
++#define SQ_VOP3P_0__OP_SEL__SHIFT 0xb
++#define SQ_VOP3P_0__OP_SEL_HI_2__SHIFT 0xe
++#define SQ_VOP3P_0__CLAMP__SHIFT 0xf
++#define SQ_VOP3P_0__OP__SHIFT 0x10
++#define SQ_VOP3P_0__ENCODING__SHIFT 0x17
++#define SQ_VOP3P_0__VDST_MASK 0x000000FFL
++#define SQ_VOP3P_0__NEG_HI_MASK 0x00000700L
++#define SQ_VOP3P_0__OP_SEL_MASK 0x00003800L
++#define SQ_VOP3P_0__OP_SEL_HI_2_MASK 0x00004000L
++#define SQ_VOP3P_0__CLAMP_MASK 0x00008000L
++#define SQ_VOP3P_0__OP_MASK 0x007F0000L
++#define SQ_VOP3P_0__ENCODING_MASK 0xFF800000L
++//SQ_VOP3P_1
++#define SQ_VOP3P_1__SRC0__SHIFT 0x0
++#define SQ_VOP3P_1__SRC1__SHIFT 0x9
++#define SQ_VOP3P_1__SRC2__SHIFT 0x12
++#define SQ_VOP3P_1__OP_SEL_HI__SHIFT 0x1b
++#define SQ_VOP3P_1__NEG__SHIFT 0x1d
++#define SQ_VOP3P_1__SRC0_MASK 0x000001FFL
++#define SQ_VOP3P_1__SRC1_MASK 0x0003FE00L
++#define SQ_VOP3P_1__SRC2_MASK 0x07FC0000L
++#define SQ_VOP3P_1__OP_SEL_HI_MASK 0x18000000L
++#define SQ_VOP3P_1__NEG_MASK 0xE0000000L
++//SQ_VOP3_0
++#define SQ_VOP3_0__VDST__SHIFT 0x0
++#define SQ_VOP3_0__ABS__SHIFT 0x8
++#define SQ_VOP3_0__OP_SEL__SHIFT 0xb
++#define SQ_VOP3_0__CLAMP__SHIFT 0xf
++#define SQ_VOP3_0__OP__SHIFT 0x10
++#define SQ_VOP3_0__ENCODING__SHIFT 0x1a
++#define SQ_VOP3_0__VDST_MASK 0x000000FFL
++#define SQ_VOP3_0__ABS_MASK 0x00000700L
++#define SQ_VOP3_0__OP_SEL_MASK 0x00007800L
++#define SQ_VOP3_0__CLAMP_MASK 0x00008000L
++#define SQ_VOP3_0__OP_MASK 0x03FF0000L
++#define SQ_VOP3_0__ENCODING_MASK 0xFC000000L
++//SQ_VOP3_0_SDST_ENC
++#define SQ_VOP3_0_SDST_ENC__VDST__SHIFT 0x0
++#define SQ_VOP3_0_SDST_ENC__SDST__SHIFT 0x8
++#define SQ_VOP3_0_SDST_ENC__CLAMP__SHIFT 0xf
++#define SQ_VOP3_0_SDST_ENC__OP__SHIFT 0x10
++#define SQ_VOP3_0_SDST_ENC__ENCODING__SHIFT 0x1a
++#define SQ_VOP3_0_SDST_ENC__VDST_MASK 0x000000FFL
++#define SQ_VOP3_0_SDST_ENC__SDST_MASK 0x00007F00L
++#define SQ_VOP3_0_SDST_ENC__CLAMP_MASK 0x00008000L
++#define SQ_VOP3_0_SDST_ENC__OP_MASK 0x03FF0000L
++#define SQ_VOP3_0_SDST_ENC__ENCODING_MASK 0xFC000000L
++//SQ_VOP3_1
++#define SQ_VOP3_1__SRC0__SHIFT 0x0
++#define SQ_VOP3_1__SRC1__SHIFT 0x9
++#define SQ_VOP3_1__SRC2__SHIFT 0x12
++#define SQ_VOP3_1__OMOD__SHIFT 0x1b
++#define SQ_VOP3_1__NEG__SHIFT 0x1d
++#define SQ_VOP3_1__SRC0_MASK 0x000001FFL
++#define SQ_VOP3_1__SRC1_MASK 0x0003FE00L
++#define SQ_VOP3_1__SRC2_MASK 0x07FC0000L
++#define SQ_VOP3_1__OMOD_MASK 0x18000000L
++#define SQ_VOP3_1__NEG_MASK 0xE0000000L
++//SQ_VOPC
++#define SQ_VOPC__SRC0__SHIFT 0x0
++#define SQ_VOPC__VSRC1__SHIFT 0x9
++#define SQ_VOPC__OP__SHIFT 0x11
++#define SQ_VOPC__ENCODING__SHIFT 0x19
++#define SQ_VOPC__SRC0_MASK 0x000001FFL
++#define SQ_VOPC__VSRC1_MASK 0x0001FE00L
++#define SQ_VOPC__OP_MASK 0x01FE0000L
++#define SQ_VOPC__ENCODING_MASK 0xFE000000L
++//SQ_VOP_DPP
++#define SQ_VOP_DPP__SRC0__SHIFT 0x0
++#define SQ_VOP_DPP__DPP_CTRL__SHIFT 0x8
++#define SQ_VOP_DPP__BOUND_CTRL__SHIFT 0x13
++#define SQ_VOP_DPP__SRC0_NEG__SHIFT 0x14
++#define SQ_VOP_DPP__SRC0_ABS__SHIFT 0x15
++#define SQ_VOP_DPP__SRC1_NEG__SHIFT 0x16
++#define SQ_VOP_DPP__SRC1_ABS__SHIFT 0x17
++#define SQ_VOP_DPP__BANK_MASK__SHIFT 0x18
++#define SQ_VOP_DPP__ROW_MASK__SHIFT 0x1c
++#define SQ_VOP_DPP__SRC0_MASK 0x000000FFL
++#define SQ_VOP_DPP__DPP_CTRL_MASK 0x0001FF00L
++#define SQ_VOP_DPP__BOUND_CTRL_MASK 0x00080000L
++#define SQ_VOP_DPP__SRC0_NEG_MASK 0x00100000L
++#define SQ_VOP_DPP__SRC0_ABS_MASK 0x00200000L
++#define SQ_VOP_DPP__SRC1_NEG_MASK 0x00400000L
++#define SQ_VOP_DPP__SRC1_ABS_MASK 0x00800000L
++#define SQ_VOP_DPP__BANK_MASK_MASK 0x0F000000L
++#define SQ_VOP_DPP__ROW_MASK_MASK 0xF0000000L
++//SQ_VOP_SDWA
++#define SQ_VOP_SDWA__SRC0__SHIFT 0x0
++#define SQ_VOP_SDWA__DST_SEL__SHIFT 0x8
++#define SQ_VOP_SDWA__DST_UNUSED__SHIFT 0xb
++#define SQ_VOP_SDWA__CLAMP__SHIFT 0xd
++#define SQ_VOP_SDWA__OMOD__SHIFT 0xe
++#define SQ_VOP_SDWA__SRC0_SEL__SHIFT 0x10
++#define SQ_VOP_SDWA__SRC0_SEXT__SHIFT 0x13
++#define SQ_VOP_SDWA__SRC0_NEG__SHIFT 0x14
++#define SQ_VOP_SDWA__SRC0_ABS__SHIFT 0x15
++#define SQ_VOP_SDWA__S0__SHIFT 0x17
++#define SQ_VOP_SDWA__SRC1_SEL__SHIFT 0x18
++#define SQ_VOP_SDWA__SRC1_SEXT__SHIFT 0x1b
++#define SQ_VOP_SDWA__SRC1_NEG__SHIFT 0x1c
++#define SQ_VOP_SDWA__SRC1_ABS__SHIFT 0x1d
++#define SQ_VOP_SDWA__S1__SHIFT 0x1f
++#define SQ_VOP_SDWA__SRC0_MASK 0x000000FFL
++#define SQ_VOP_SDWA__DST_SEL_MASK 0x00000700L
++#define SQ_VOP_SDWA__DST_UNUSED_MASK 0x00001800L
++#define SQ_VOP_SDWA__CLAMP_MASK 0x00002000L
++#define SQ_VOP_SDWA__OMOD_MASK 0x0000C000L
++#define SQ_VOP_SDWA__SRC0_SEL_MASK 0x00070000L
++#define SQ_VOP_SDWA__SRC0_SEXT_MASK 0x00080000L
++#define SQ_VOP_SDWA__SRC0_NEG_MASK 0x00100000L
++#define SQ_VOP_SDWA__SRC0_ABS_MASK 0x00200000L
++#define SQ_VOP_SDWA__S0_MASK 0x00800000L
++#define SQ_VOP_SDWA__SRC1_SEL_MASK 0x07000000L
++#define SQ_VOP_SDWA__SRC1_SEXT_MASK 0x08000000L
++#define SQ_VOP_SDWA__SRC1_NEG_MASK 0x10000000L
++#define SQ_VOP_SDWA__SRC1_ABS_MASK 0x20000000L
++#define SQ_VOP_SDWA__S1_MASK 0x80000000L
++//SQ_VOP_SDWA_SDST_ENC
++#define SQ_VOP_SDWA_SDST_ENC__SRC0__SHIFT 0x0
++#define SQ_VOP_SDWA_SDST_ENC__SDST__SHIFT 0x8
++#define SQ_VOP_SDWA_SDST_ENC__SD__SHIFT 0xf
++#define SQ_VOP_SDWA_SDST_ENC__SRC0_SEL__SHIFT 0x10
++#define SQ_VOP_SDWA_SDST_ENC__SRC0_SEXT__SHIFT 0x13
++#define SQ_VOP_SDWA_SDST_ENC__SRC0_NEG__SHIFT 0x14
++#define SQ_VOP_SDWA_SDST_ENC__SRC0_ABS__SHIFT 0x15
++#define SQ_VOP_SDWA_SDST_ENC__S0__SHIFT 0x17
++#define SQ_VOP_SDWA_SDST_ENC__SRC1_SEL__SHIFT 0x18
++#define SQ_VOP_SDWA_SDST_ENC__SRC1_SEXT__SHIFT 0x1b
++#define SQ_VOP_SDWA_SDST_ENC__SRC1_NEG__SHIFT 0x1c
++#define SQ_VOP_SDWA_SDST_ENC__SRC1_ABS__SHIFT 0x1d
++#define SQ_VOP_SDWA_SDST_ENC__S1__SHIFT 0x1f
++#define SQ_VOP_SDWA_SDST_ENC__SRC0_MASK 0x000000FFL
++#define SQ_VOP_SDWA_SDST_ENC__SDST_MASK 0x00007F00L
++#define SQ_VOP_SDWA_SDST_ENC__SD_MASK 0x00008000L
++#define SQ_VOP_SDWA_SDST_ENC__SRC0_SEL_MASK 0x00070000L
++#define SQ_VOP_SDWA_SDST_ENC__SRC0_SEXT_MASK 0x00080000L
++#define SQ_VOP_SDWA_SDST_ENC__SRC0_NEG_MASK 0x00100000L
++#define SQ_VOP_SDWA_SDST_ENC__SRC0_ABS_MASK 0x00200000L
++#define SQ_VOP_SDWA_SDST_ENC__S0_MASK 0x00800000L
++#define SQ_VOP_SDWA_SDST_ENC__SRC1_SEL_MASK 0x07000000L
++#define SQ_VOP_SDWA_SDST_ENC__SRC1_SEXT_MASK 0x08000000L
++#define SQ_VOP_SDWA_SDST_ENC__SRC1_NEG_MASK 0x10000000L
++#define SQ_VOP_SDWA_SDST_ENC__SRC1_ABS_MASK 0x20000000L
++#define SQ_VOP_SDWA_SDST_ENC__S1_MASK 0x80000000L
++//SQ_LB_CTR_CTRL
++#define SQ_LB_CTR_CTRL__START__SHIFT 0x0
++#define SQ_LB_CTR_CTRL__LOAD__SHIFT 0x1
++#define SQ_LB_CTR_CTRL__CLEAR__SHIFT 0x2
++#define SQ_LB_CTR_CTRL__START_MASK 0x00000001L
++#define SQ_LB_CTR_CTRL__LOAD_MASK 0x00000002L
++#define SQ_LB_CTR_CTRL__CLEAR_MASK 0x00000004L
++//SQ_LB_DATA0
++#define SQ_LB_DATA0__DATA__SHIFT 0x0
++#define SQ_LB_DATA0__DATA_MASK 0xFFFFFFFFL
++//SQ_LB_DATA1
++#define SQ_LB_DATA1__DATA__SHIFT 0x0
++#define SQ_LB_DATA1__DATA_MASK 0xFFFFFFFFL
++//SQ_LB_DATA2
++#define SQ_LB_DATA2__DATA__SHIFT 0x0
++#define SQ_LB_DATA2__DATA_MASK 0xFFFFFFFFL
++//SQ_LB_DATA3
++#define SQ_LB_DATA3__DATA__SHIFT 0x0
++#define SQ_LB_DATA3__DATA_MASK 0xFFFFFFFFL
++//SQ_LB_CTR_SEL
++#define SQ_LB_CTR_SEL__SEL0__SHIFT 0x0
++#define SQ_LB_CTR_SEL__SEL1__SHIFT 0x4
++#define SQ_LB_CTR_SEL__SEL2__SHIFT 0x8
++#define SQ_LB_CTR_SEL__SEL3__SHIFT 0xc
++#define SQ_LB_CTR_SEL__SEL0_MASK 0x0000000FL
++#define SQ_LB_CTR_SEL__SEL1_MASK 0x000000F0L
++#define SQ_LB_CTR_SEL__SEL2_MASK 0x00000F00L
++#define SQ_LB_CTR_SEL__SEL3_MASK 0x0000F000L
++//SQ_LB_CTR0_CU
++#define SQ_LB_CTR0_CU__SH0_MASK__SHIFT 0x0
++#define SQ_LB_CTR0_CU__SH1_MASK__SHIFT 0x10
++#define SQ_LB_CTR0_CU__SH0_MASK_MASK 0x0000FFFFL
++#define SQ_LB_CTR0_CU__SH1_MASK_MASK 0xFFFF0000L
++//SQ_LB_CTR1_CU
++#define SQ_LB_CTR1_CU__SH0_MASK__SHIFT 0x0
++#define SQ_LB_CTR1_CU__SH1_MASK__SHIFT 0x10
++#define SQ_LB_CTR1_CU__SH0_MASK_MASK 0x0000FFFFL
++#define SQ_LB_CTR1_CU__SH1_MASK_MASK 0xFFFF0000L
++//SQ_LB_CTR2_CU
++#define SQ_LB_CTR2_CU__SH0_MASK__SHIFT 0x0
++#define SQ_LB_CTR2_CU__SH1_MASK__SHIFT 0x10
++#define SQ_LB_CTR2_CU__SH0_MASK_MASK 0x0000FFFFL
++#define SQ_LB_CTR2_CU__SH1_MASK_MASK 0xFFFF0000L
++//SQ_LB_CTR3_CU
++#define SQ_LB_CTR3_CU__SH0_MASK__SHIFT 0x0
++#define SQ_LB_CTR3_CU__SH1_MASK__SHIFT 0x10
++#define SQ_LB_CTR3_CU__SH0_MASK_MASK 0x0000FFFFL
++#define SQ_LB_CTR3_CU__SH1_MASK_MASK 0xFFFF0000L
++//SQC_EDC_CNT
++#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x0
++#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x2
++#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x4
++#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_DED_COUNT__SHIFT 0x6
++#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x8
++#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_DED_COUNT__SHIFT 0xa
++#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_SEC_COUNT__SHIFT 0xc
++#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_DED_COUNT__SHIFT 0xe
++#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x10
++#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x12
++#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x14
++#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_DED_COUNT__SHIFT 0x16
++#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x18
++#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x1a
++#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x1c
++#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_DED_COUNT__SHIFT 0x1e
++#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00000003L
++#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_DED_COUNT_MASK 0x0000000CL
++#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_SEC_COUNT_MASK 0x00000030L
++#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_DED_COUNT_MASK 0x000000C0L
++#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00000300L
++#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_DED_COUNT_MASK 0x00000C00L
++#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_SEC_COUNT_MASK 0x00003000L
++#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_DED_COUNT_MASK 0x0000C000L
++#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00030000L
++#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_DED_COUNT_MASK 0x000C0000L
++#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_SEC_COUNT_MASK 0x00300000L
++#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_DED_COUNT_MASK 0x00C00000L
++#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_SEC_COUNT_MASK 0x03000000L
++#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_DED_COUNT_MASK 0x0C000000L
++#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_SEC_COUNT_MASK 0x30000000L
++#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_DED_COUNT_MASK 0xC0000000L
++//SQ_EDC_SEC_CNT
++#define SQ_EDC_SEC_CNT__LDS_SEC__SHIFT 0x0
++#define SQ_EDC_SEC_CNT__SGPR_SEC__SHIFT 0x8
++#define SQ_EDC_SEC_CNT__VGPR_SEC__SHIFT 0x10
++#define SQ_EDC_SEC_CNT__LDS_SEC_MASK 0x000000FFL
++#define SQ_EDC_SEC_CNT__SGPR_SEC_MASK 0x0000FF00L
++#define SQ_EDC_SEC_CNT__VGPR_SEC_MASK 0x00FF0000L
++//SQ_EDC_DED_CNT
++#define SQ_EDC_DED_CNT__LDS_DED__SHIFT 0x0
++#define SQ_EDC_DED_CNT__SGPR_DED__SHIFT 0x8
++#define SQ_EDC_DED_CNT__VGPR_DED__SHIFT 0x10
++#define SQ_EDC_DED_CNT__LDS_DED_MASK 0x000000FFL
++#define SQ_EDC_DED_CNT__SGPR_DED_MASK 0x0000FF00L
++#define SQ_EDC_DED_CNT__VGPR_DED_MASK 0x00FF0000L
++//SQ_EDC_INFO
++#define SQ_EDC_INFO__WAVE_ID__SHIFT 0x0
++#define SQ_EDC_INFO__SIMD_ID__SHIFT 0x4
++#define SQ_EDC_INFO__SOURCE__SHIFT 0x6
++#define SQ_EDC_INFO__VM_ID__SHIFT 0x9
++#define SQ_EDC_INFO__WAVE_ID_MASK 0x0000000FL
++#define SQ_EDC_INFO__SIMD_ID_MASK 0x00000030L
++#define SQ_EDC_INFO__SOURCE_MASK 0x000001C0L
++#define SQ_EDC_INFO__VM_ID_MASK 0x00001E00L
++//SQ_EDC_CNT
++#define SQ_EDC_CNT__LDS_D_SEC_COUNT__SHIFT 0x0
++#define SQ_EDC_CNT__LDS_D_DED_COUNT__SHIFT 0x2
++#define SQ_EDC_CNT__LDS_I_SEC_COUNT__SHIFT 0x4
++#define SQ_EDC_CNT__LDS_I_DED_COUNT__SHIFT 0x6
++#define SQ_EDC_CNT__SGPR_SEC_COUNT__SHIFT 0x8
++#define SQ_EDC_CNT__SGPR_DED_COUNT__SHIFT 0xa
++#define SQ_EDC_CNT__VGPR0_SEC_COUNT__SHIFT 0xc
++#define SQ_EDC_CNT__VGPR0_DED_COUNT__SHIFT 0xe
++#define SQ_EDC_CNT__VGPR1_SEC_COUNT__SHIFT 0x10
++#define SQ_EDC_CNT__VGPR1_DED_COUNT__SHIFT 0x12
++#define SQ_EDC_CNT__VGPR2_SEC_COUNT__SHIFT 0x14
++#define SQ_EDC_CNT__VGPR2_DED_COUNT__SHIFT 0x16
++#define SQ_EDC_CNT__VGPR3_SEC_COUNT__SHIFT 0x18
++#define SQ_EDC_CNT__VGPR3_DED_COUNT__SHIFT 0x1a
++#define SQ_EDC_CNT__LDS_D_SEC_COUNT_MASK 0x00000003L
++#define SQ_EDC_CNT__LDS_D_DED_COUNT_MASK 0x0000000CL
++#define SQ_EDC_CNT__LDS_I_SEC_COUNT_MASK 0x00000030L
++#define SQ_EDC_CNT__LDS_I_DED_COUNT_MASK 0x000000C0L
++#define SQ_EDC_CNT__SGPR_SEC_COUNT_MASK 0x00000300L
++#define SQ_EDC_CNT__SGPR_DED_COUNT_MASK 0x00000C00L
++#define SQ_EDC_CNT__VGPR0_SEC_COUNT_MASK 0x00003000L
++#define SQ_EDC_CNT__VGPR0_DED_COUNT_MASK 0x0000C000L
++#define SQ_EDC_CNT__VGPR1_SEC_COUNT_MASK 0x00030000L
++#define SQ_EDC_CNT__VGPR1_DED_COUNT_MASK 0x000C0000L
++#define SQ_EDC_CNT__VGPR2_SEC_COUNT_MASK 0x00300000L
++#define SQ_EDC_CNT__VGPR2_DED_COUNT_MASK 0x00C00000L
++#define SQ_EDC_CNT__VGPR3_SEC_COUNT_MASK 0x03000000L
++#define SQ_EDC_CNT__VGPR3_DED_COUNT_MASK 0x0C000000L
++//SQ_EDC_FUE_CNTL
++#define SQ_EDC_FUE_CNTL__BLOCK_FUE_FLAGS__SHIFT 0x0
++#define SQ_EDC_FUE_CNTL__FUE_INTERRUPT_ENABLES__SHIFT 0x10
++#define SQ_EDC_FUE_CNTL__BLOCK_FUE_FLAGS_MASK 0x0000FFFFL
++#define SQ_EDC_FUE_CNTL__FUE_INTERRUPT_ENABLES_MASK 0xFFFF0000L
++//SQ_THREAD_TRACE_WORD_CMN
++#define SQ_THREAD_TRACE_WORD_CMN__TOKEN_TYPE__SHIFT 0x0
++#define SQ_THREAD_TRACE_WORD_CMN__TIME_DELTA__SHIFT 0x4
++#define SQ_THREAD_TRACE_WORD_CMN__TOKEN_TYPE_MASK 0x000FL
++#define SQ_THREAD_TRACE_WORD_CMN__TIME_DELTA_MASK 0x0010L
++//SQ_THREAD_TRACE_WORD_EVENT
++#define SQ_THREAD_TRACE_WORD_EVENT__TOKEN_TYPE__SHIFT 0x0
++#define SQ_THREAD_TRACE_WORD_EVENT__TIME_DELTA__SHIFT 0x4
++#define SQ_THREAD_TRACE_WORD_EVENT__SH_ID__SHIFT 0x5
++#define SQ_THREAD_TRACE_WORD_EVENT__STAGE__SHIFT 0x6
++#define SQ_THREAD_TRACE_WORD_EVENT__EVENT_TYPE__SHIFT 0xa
++#define SQ_THREAD_TRACE_WORD_EVENT__TOKEN_TYPE_MASK 0x000FL
++#define SQ_THREAD_TRACE_WORD_EVENT__TIME_DELTA_MASK 0x0010L
++#define SQ_THREAD_TRACE_WORD_EVENT__SH_ID_MASK 0x0020L
++#define SQ_THREAD_TRACE_WORD_EVENT__STAGE_MASK 0x01C0L
++#define SQ_THREAD_TRACE_WORD_EVENT__EVENT_TYPE_MASK 0xFC00L
++//SQ_THREAD_TRACE_WORD_INST
++#define SQ_THREAD_TRACE_WORD_INST__TOKEN_TYPE__SHIFT 0x0
++#define SQ_THREAD_TRACE_WORD_INST__TIME_DELTA__SHIFT 0x4
++#define SQ_THREAD_TRACE_WORD_INST__WAVE_ID__SHIFT 0x5
++#define SQ_THREAD_TRACE_WORD_INST__SIMD_ID__SHIFT 0x9
++#define SQ_THREAD_TRACE_WORD_INST__INST_TYPE__SHIFT 0xb
++#define SQ_THREAD_TRACE_WORD_INST__TOKEN_TYPE_MASK 0x000FL
++#define SQ_THREAD_TRACE_WORD_INST__TIME_DELTA_MASK 0x0010L
++#define SQ_THREAD_TRACE_WORD_INST__WAVE_ID_MASK 0x01E0L
++#define SQ_THREAD_TRACE_WORD_INST__SIMD_ID_MASK 0x0600L
++#define SQ_THREAD_TRACE_WORD_INST__INST_TYPE_MASK 0xF800L
++//SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2
++#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TOKEN_TYPE__SHIFT 0x0
++#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TIME_DELTA__SHIFT 0x4
++#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__WAVE_ID__SHIFT 0x5
++#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__SIMD_ID__SHIFT 0x9
++#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TRAP_ERROR__SHIFT 0xf
++#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__PC_LO__SHIFT 0x10
++#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TOKEN_TYPE_MASK 0x0000000FL
++#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TIME_DELTA_MASK 0x00000010L
++#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__WAVE_ID_MASK 0x000001E0L
++#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__SIMD_ID_MASK 0x00000600L
++#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TRAP_ERROR_MASK 0x00008000L
++#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__PC_LO_MASK 0xFFFF0000L
++//SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2
++#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TOKEN_TYPE__SHIFT 0x0
++#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TIME_DELTA__SHIFT 0x4
++#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__SH_ID__SHIFT 0x5
++#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__CU_ID__SHIFT 0x6
++#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__WAVE_ID__SHIFT 0xa
++#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__SIMD_ID__SHIFT 0xe
++#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__DATA_LO__SHIFT 0x10
++#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TOKEN_TYPE_MASK 0x0000000FL
++#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TIME_DELTA_MASK 0x00000010L
++#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__SH_ID_MASK 0x00000020L
++#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__CU_ID_MASK 0x000003C0L
++#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__WAVE_ID_MASK 0x00003C00L
++#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__SIMD_ID_MASK 0x0000C000L
++#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__DATA_LO_MASK 0xFFFF0000L
++//SQ_THREAD_TRACE_WORD_ISSUE
++#define SQ_THREAD_TRACE_WORD_ISSUE__TOKEN_TYPE__SHIFT 0x0
++#define SQ_THREAD_TRACE_WORD_ISSUE__TIME_DELTA__SHIFT 0x4
++#define SQ_THREAD_TRACE_WORD_ISSUE__SIMD_ID__SHIFT 0x5
++#define SQ_THREAD_TRACE_WORD_ISSUE__INST0__SHIFT 0x8
++#define SQ_THREAD_TRACE_WORD_ISSUE__INST1__SHIFT 0xa
++#define SQ_THREAD_TRACE_WORD_ISSUE__INST2__SHIFT 0xc
++#define SQ_THREAD_TRACE_WORD_ISSUE__INST3__SHIFT 0xe
++#define SQ_THREAD_TRACE_WORD_ISSUE__INST4__SHIFT 0x10
++#define SQ_THREAD_TRACE_WORD_ISSUE__INST5__SHIFT 0x12
++#define SQ_THREAD_TRACE_WORD_ISSUE__INST6__SHIFT 0x14
++#define SQ_THREAD_TRACE_WORD_ISSUE__INST7__SHIFT 0x16
++#define SQ_THREAD_TRACE_WORD_ISSUE__INST8__SHIFT 0x18
++#define SQ_THREAD_TRACE_WORD_ISSUE__INST9__SHIFT 0x1a
++#define SQ_THREAD_TRACE_WORD_ISSUE__TOKEN_TYPE_MASK 0x0000000FL
++#define SQ_THREAD_TRACE_WORD_ISSUE__TIME_DELTA_MASK 0x00000010L
++#define SQ_THREAD_TRACE_WORD_ISSUE__SIMD_ID_MASK 0x00000060L
++#define SQ_THREAD_TRACE_WORD_ISSUE__INST0_MASK 0x00000300L
++#define SQ_THREAD_TRACE_WORD_ISSUE__INST1_MASK 0x00000C00L
++#define SQ_THREAD_TRACE_WORD_ISSUE__INST2_MASK 0x00003000L
++#define SQ_THREAD_TRACE_WORD_ISSUE__INST3_MASK 0x0000C000L
++#define SQ_THREAD_TRACE_WORD_ISSUE__INST4_MASK 0x00030000L
++#define SQ_THREAD_TRACE_WORD_ISSUE__INST5_MASK 0x000C0000L
++#define SQ_THREAD_TRACE_WORD_ISSUE__INST6_MASK 0x00300000L
++#define SQ_THREAD_TRACE_WORD_ISSUE__INST7_MASK 0x00C00000L
++#define SQ_THREAD_TRACE_WORD_ISSUE__INST8_MASK 0x03000000L
++#define SQ_THREAD_TRACE_WORD_ISSUE__INST9_MASK 0x0C000000L
++//SQ_THREAD_TRACE_WORD_MISC
++#define SQ_THREAD_TRACE_WORD_MISC__TOKEN_TYPE__SHIFT 0x0
++#define SQ_THREAD_TRACE_WORD_MISC__TIME_DELTA__SHIFT 0x4
++#define SQ_THREAD_TRACE_WORD_MISC__SH_ID__SHIFT 0xc
++#define SQ_THREAD_TRACE_WORD_MISC__MISC_TOKEN_TYPE__SHIFT 0xd
++#define SQ_THREAD_TRACE_WORD_MISC__TOKEN_TYPE_MASK 0x000FL
++#define SQ_THREAD_TRACE_WORD_MISC__TIME_DELTA_MASK 0x0FF0L
++#define SQ_THREAD_TRACE_WORD_MISC__SH_ID_MASK 0x1000L
++#define SQ_THREAD_TRACE_WORD_MISC__MISC_TOKEN_TYPE_MASK 0xE000L
++//SQ_THREAD_TRACE_WORD_PERF_1_OF_2
++#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TOKEN_TYPE__SHIFT 0x0
++#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TIME_DELTA__SHIFT 0x4
++#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__SH_ID__SHIFT 0x5
++#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CU_ID__SHIFT 0x6
++#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR_BANK__SHIFT 0xa
++#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR0__SHIFT 0xc
++#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR1_LO__SHIFT 0x19
++#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TOKEN_TYPE_MASK 0x0000000FL
++#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TIME_DELTA_MASK 0x00000010L
++#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__SH_ID_MASK 0x00000020L
++#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CU_ID_MASK 0x000003C0L
++#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR_BANK_MASK 0x00000C00L
++#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR0_MASK 0x01FFF000L
++#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR1_LO_MASK 0xFE000000L
++//SQ_THREAD_TRACE_WORD_REG_1_OF_2
++#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TOKEN_TYPE__SHIFT 0x0
++#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TIME_DELTA__SHIFT 0x4
++#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__PIPE_ID__SHIFT 0x5
++#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__ME_ID__SHIFT 0x7
++#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_DROPPED_PREV__SHIFT 0x9
++#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_TYPE__SHIFT 0xa
++#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_PRIV__SHIFT 0xe
++#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_OP__SHIFT 0xf
++#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_ADDR__SHIFT 0x10
++#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TOKEN_TYPE_MASK 0x0000000FL
++#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TIME_DELTA_MASK 0x00000010L
++#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__PIPE_ID_MASK 0x00000060L
++#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__ME_ID_MASK 0x00000180L
++#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_DROPPED_PREV_MASK 0x00000200L
++#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_TYPE_MASK 0x00001C00L
++#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_PRIV_MASK 0x00004000L
++#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_OP_MASK 0x00008000L
++#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_ADDR_MASK 0xFFFF0000L
++//SQ_THREAD_TRACE_WORD_REG_2_OF_2
++#define SQ_THREAD_TRACE_WORD_REG_2_OF_2__DATA__SHIFT 0x0
++#define SQ_THREAD_TRACE_WORD_REG_2_OF_2__DATA_MASK 0xFFFFFFFFL
++//SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2
++#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__TOKEN_TYPE__SHIFT 0x0
++#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__TIME_DELTA__SHIFT 0x4
++#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__PIPE_ID__SHIFT 0x5
++#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__ME_ID__SHIFT 0x7
++#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__REG_ADDR__SHIFT 0x9
++#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__DATA_LO__SHIFT 0x10
++#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__TOKEN_TYPE_MASK 0x0000000FL
++#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__TIME_DELTA_MASK 0x00000010L
++#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__PIPE_ID_MASK 0x00000060L
++#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__ME_ID_MASK 0x00000180L
++#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__REG_ADDR_MASK 0x0000FE00L
++#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__DATA_LO_MASK 0xFFFF0000L
++//SQ_THREAD_TRACE_WORD_REG_CS_2_OF_2
++#define SQ_THREAD_TRACE_WORD_REG_CS_2_OF_2__DATA_HI__SHIFT 0x0
++#define SQ_THREAD_TRACE_WORD_REG_CS_2_OF_2__DATA_HI_MASK 0x0000FFFFL
++//SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2
++#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TOKEN_TYPE__SHIFT 0x0
++#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TIME_LO__SHIFT 0x10
++#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TOKEN_TYPE_MASK 0x0000000FL
++#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TIME_LO_MASK 0xFFFF0000L
++//SQ_THREAD_TRACE_WORD_WAVE
++#define SQ_THREAD_TRACE_WORD_WAVE__TOKEN_TYPE__SHIFT 0x0
++#define SQ_THREAD_TRACE_WORD_WAVE__TIME_DELTA__SHIFT 0x4
++#define SQ_THREAD_TRACE_WORD_WAVE__SH_ID__SHIFT 0x5
++#define SQ_THREAD_TRACE_WORD_WAVE__CU_ID__SHIFT 0x6
++#define SQ_THREAD_TRACE_WORD_WAVE__WAVE_ID__SHIFT 0xa
++#define SQ_THREAD_TRACE_WORD_WAVE__SIMD_ID__SHIFT 0xe
++#define SQ_THREAD_TRACE_WORD_WAVE__TOKEN_TYPE_MASK 0x000FL
++#define SQ_THREAD_TRACE_WORD_WAVE__TIME_DELTA_MASK 0x0010L
++#define SQ_THREAD_TRACE_WORD_WAVE__SH_ID_MASK 0x0020L
++#define SQ_THREAD_TRACE_WORD_WAVE__CU_ID_MASK 0x03C0L
++#define SQ_THREAD_TRACE_WORD_WAVE__WAVE_ID_MASK 0x3C00L
++#define SQ_THREAD_TRACE_WORD_WAVE__SIMD_ID_MASK 0xC000L
++//SQ_THREAD_TRACE_WORD_WAVE_START
++#define SQ_THREAD_TRACE_WORD_WAVE_START__TOKEN_TYPE__SHIFT 0x0
++#define SQ_THREAD_TRACE_WORD_WAVE_START__TIME_DELTA__SHIFT 0x4
++#define SQ_THREAD_TRACE_WORD_WAVE_START__SH_ID__SHIFT 0x5
++#define SQ_THREAD_TRACE_WORD_WAVE_START__CU_ID__SHIFT 0x6
++#define SQ_THREAD_TRACE_WORD_WAVE_START__WAVE_ID__SHIFT 0xa
++#define SQ_THREAD_TRACE_WORD_WAVE_START__SIMD_ID__SHIFT 0xe
++#define SQ_THREAD_TRACE_WORD_WAVE_START__DISPATCHER__SHIFT 0x10
++#define SQ_THREAD_TRACE_WORD_WAVE_START__VS_NO_ALLOC_OR_GROUPED__SHIFT 0x15
++#define SQ_THREAD_TRACE_WORD_WAVE_START__COUNT__SHIFT 0x16
++#define SQ_THREAD_TRACE_WORD_WAVE_START__TG_ID__SHIFT 0x1d
++#define SQ_THREAD_TRACE_WORD_WAVE_START__TOKEN_TYPE_MASK 0x0000000FL
++#define SQ_THREAD_TRACE_WORD_WAVE_START__TIME_DELTA_MASK 0x00000010L
++#define SQ_THREAD_TRACE_WORD_WAVE_START__SH_ID_MASK 0x00000020L
++#define SQ_THREAD_TRACE_WORD_WAVE_START__CU_ID_MASK 0x000003C0L
++#define SQ_THREAD_TRACE_WORD_WAVE_START__WAVE_ID_MASK 0x00003C00L
++#define SQ_THREAD_TRACE_WORD_WAVE_START__SIMD_ID_MASK 0x0000C000L
++#define SQ_THREAD_TRACE_WORD_WAVE_START__DISPATCHER_MASK 0x001F0000L
++#define SQ_THREAD_TRACE_WORD_WAVE_START__VS_NO_ALLOC_OR_GROUPED_MASK 0x00200000L
++#define SQ_THREAD_TRACE_WORD_WAVE_START__COUNT_MASK 0x1FC00000L
++#define SQ_THREAD_TRACE_WORD_WAVE_START__TG_ID_MASK 0xE0000000L
++//SQ_THREAD_TRACE_WORD_INST_PC_2_OF_2
++#define SQ_THREAD_TRACE_WORD_INST_PC_2_OF_2__PC_HI__SHIFT 0x0
++#define SQ_THREAD_TRACE_WORD_INST_PC_2_OF_2__PC_HI_MASK 0x00FFFFFFL
++//SQ_THREAD_TRACE_WORD_INST_USERDATA_2_OF_2
++#define SQ_THREAD_TRACE_WORD_INST_USERDATA_2_OF_2__DATA_HI__SHIFT 0x0
++#define SQ_THREAD_TRACE_WORD_INST_USERDATA_2_OF_2__DATA_HI_MASK 0xFFFFL
++//SQ_THREAD_TRACE_WORD_PERF_2_OF_2
++#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR1_HI__SHIFT 0x0
++#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR2__SHIFT 0x6
++#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR3__SHIFT 0x13
++#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR1_HI_MASK 0x0000003FL
++#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR2_MASK 0x0007FFC0L
++#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR3_MASK 0xFFF80000L
++//SQ_THREAD_TRACE_WORD_TIMESTAMP_2_OF_2
++#define SQ_THREAD_TRACE_WORD_TIMESTAMP_2_OF_2__TIME_HI__SHIFT 0x0
++#define SQ_THREAD_TRACE_WORD_TIMESTAMP_2_OF_2__TIME_HI_MASK 0xFFFFFFFFL
++//SQ_WREXEC_EXEC_HI
++#define SQ_WREXEC_EXEC_HI__ADDR_HI__SHIFT 0x0
++#define SQ_WREXEC_EXEC_HI__FIRST_WAVE__SHIFT 0x1a
++#define SQ_WREXEC_EXEC_HI__ATC__SHIFT 0x1b
++#define SQ_WREXEC_EXEC_HI__MTYPE__SHIFT 0x1c
++#define SQ_WREXEC_EXEC_HI__MSB__SHIFT 0x1f
++#define SQ_WREXEC_EXEC_HI__ADDR_HI_MASK 0x0000FFFFL
++#define SQ_WREXEC_EXEC_HI__FIRST_WAVE_MASK 0x04000000L
++#define SQ_WREXEC_EXEC_HI__ATC_MASK 0x08000000L
++#define SQ_WREXEC_EXEC_HI__MTYPE_MASK 0x70000000L
++#define SQ_WREXEC_EXEC_HI__MSB_MASK 0x80000000L
++//SQ_WREXEC_EXEC_LO
++#define SQ_WREXEC_EXEC_LO__ADDR_LO__SHIFT 0x0
++#define SQ_WREXEC_EXEC_LO__ADDR_LO_MASK 0xFFFFFFFFL
++//SQ_BUF_RSRC_WORD0
++#define SQ_BUF_RSRC_WORD0__BASE_ADDRESS__SHIFT 0x0
++#define SQ_BUF_RSRC_WORD0__BASE_ADDRESS_MASK 0xFFFFFFFFL
++//SQ_BUF_RSRC_WORD1
++#define SQ_BUF_RSRC_WORD1__BASE_ADDRESS_HI__SHIFT 0x0
++#define SQ_BUF_RSRC_WORD1__STRIDE__SHIFT 0x10
++#define SQ_BUF_RSRC_WORD1__CACHE_SWIZZLE__SHIFT 0x1e
++#define SQ_BUF_RSRC_WORD1__SWIZZLE_ENABLE__SHIFT 0x1f
++#define SQ_BUF_RSRC_WORD1__BASE_ADDRESS_HI_MASK 0x0000FFFFL
++#define SQ_BUF_RSRC_WORD1__STRIDE_MASK 0x3FFF0000L
++#define SQ_BUF_RSRC_WORD1__CACHE_SWIZZLE_MASK 0x40000000L
++#define SQ_BUF_RSRC_WORD1__SWIZZLE_ENABLE_MASK 0x80000000L
++//SQ_BUF_RSRC_WORD2
++#define SQ_BUF_RSRC_WORD2__NUM_RECORDS__SHIFT 0x0
++#define SQ_BUF_RSRC_WORD2__NUM_RECORDS_MASK 0xFFFFFFFFL
++//SQ_BUF_RSRC_WORD3
++#define SQ_BUF_RSRC_WORD3__DST_SEL_X__SHIFT 0x0
++#define SQ_BUF_RSRC_WORD3__DST_SEL_Y__SHIFT 0x3
++#define SQ_BUF_RSRC_WORD3__DST_SEL_Z__SHIFT 0x6
++#define SQ_BUF_RSRC_WORD3__DST_SEL_W__SHIFT 0x9
++#define SQ_BUF_RSRC_WORD3__NUM_FORMAT__SHIFT 0xc
++#define SQ_BUF_RSRC_WORD3__DATA_FORMAT__SHIFT 0xf
++#define SQ_BUF_RSRC_WORD3__USER_VM_ENABLE__SHIFT 0x13
++#define SQ_BUF_RSRC_WORD3__USER_VM_MODE__SHIFT 0x14
++#define SQ_BUF_RSRC_WORD3__INDEX_STRIDE__SHIFT 0x15
++#define SQ_BUF_RSRC_WORD3__ADD_TID_ENABLE__SHIFT 0x17
++#define SQ_BUF_RSRC_WORD3__NV__SHIFT 0x1b
++#define SQ_BUF_RSRC_WORD3__TYPE__SHIFT 0x1e
++#define SQ_BUF_RSRC_WORD3__DST_SEL_X_MASK 0x00000007L
++#define SQ_BUF_RSRC_WORD3__DST_SEL_Y_MASK 0x00000038L
++#define SQ_BUF_RSRC_WORD3__DST_SEL_Z_MASK 0x000001C0L
++#define SQ_BUF_RSRC_WORD3__DST_SEL_W_MASK 0x00000E00L
++#define SQ_BUF_RSRC_WORD3__NUM_FORMAT_MASK 0x00007000L
++#define SQ_BUF_RSRC_WORD3__DATA_FORMAT_MASK 0x00078000L
++#define SQ_BUF_RSRC_WORD3__USER_VM_ENABLE_MASK 0x00080000L
++#define SQ_BUF_RSRC_WORD3__USER_VM_MODE_MASK 0x00100000L
++#define SQ_BUF_RSRC_WORD3__INDEX_STRIDE_MASK 0x00600000L
++#define SQ_BUF_RSRC_WORD3__ADD_TID_ENABLE_MASK 0x00800000L
++#define SQ_BUF_RSRC_WORD3__NV_MASK 0x08000000L
++#define SQ_BUF_RSRC_WORD3__TYPE_MASK 0xC0000000L
++//SQ_IMG_RSRC_WORD0
++#define SQ_IMG_RSRC_WORD0__BASE_ADDRESS__SHIFT 0x0
++#define SQ_IMG_RSRC_WORD0__BASE_ADDRESS_MASK 0xFFFFFFFFL
++//SQ_IMG_RSRC_WORD1
++#define SQ_IMG_RSRC_WORD1__BASE_ADDRESS_HI__SHIFT 0x0
++#define SQ_IMG_RSRC_WORD1__MIN_LOD__SHIFT 0x8
++#define SQ_IMG_RSRC_WORD1__DATA_FORMAT__SHIFT 0x14
++#define SQ_IMG_RSRC_WORD1__NUM_FORMAT__SHIFT 0x1a
++#define SQ_IMG_RSRC_WORD1__NV__SHIFT 0x1e
++#define SQ_IMG_RSRC_WORD1__META_DIRECT__SHIFT 0x1f
++#define SQ_IMG_RSRC_WORD1__BASE_ADDRESS_HI_MASK 0x000000FFL
++#define SQ_IMG_RSRC_WORD1__MIN_LOD_MASK 0x000FFF00L
++#define SQ_IMG_RSRC_WORD1__DATA_FORMAT_MASK 0x03F00000L
++#define SQ_IMG_RSRC_WORD1__NUM_FORMAT_MASK 0x3C000000L
++#define SQ_IMG_RSRC_WORD1__NV_MASK 0x40000000L
++#define SQ_IMG_RSRC_WORD1__META_DIRECT_MASK 0x80000000L
++//SQ_IMG_RSRC_WORD2
++#define SQ_IMG_RSRC_WORD2__WIDTH__SHIFT 0x0
++#define SQ_IMG_RSRC_WORD2__HEIGHT__SHIFT 0xe
++#define SQ_IMG_RSRC_WORD2__PERF_MOD__SHIFT 0x1c
++#define SQ_IMG_RSRC_WORD2__WIDTH_MASK 0x00003FFFL
++#define SQ_IMG_RSRC_WORD2__HEIGHT_MASK 0x0FFFC000L
++#define SQ_IMG_RSRC_WORD2__PERF_MOD_MASK 0x70000000L
++//SQ_IMG_RSRC_WORD3
++#define SQ_IMG_RSRC_WORD3__DST_SEL_X__SHIFT 0x0
++#define SQ_IMG_RSRC_WORD3__DST_SEL_Y__SHIFT 0x3
++#define SQ_IMG_RSRC_WORD3__DST_SEL_Z__SHIFT 0x6
++#define SQ_IMG_RSRC_WORD3__DST_SEL_W__SHIFT 0x9
++#define SQ_IMG_RSRC_WORD3__BASE_LEVEL__SHIFT 0xc
++#define SQ_IMG_RSRC_WORD3__LAST_LEVEL__SHIFT 0x10
++#define SQ_IMG_RSRC_WORD3__SW_MODE__SHIFT 0x14
++#define SQ_IMG_RSRC_WORD3__TYPE__SHIFT 0x1c
++#define SQ_IMG_RSRC_WORD3__DST_SEL_X_MASK 0x00000007L
++#define SQ_IMG_RSRC_WORD3__DST_SEL_Y_MASK 0x00000038L
++#define SQ_IMG_RSRC_WORD3__DST_SEL_Z_MASK 0x000001C0L
++#define SQ_IMG_RSRC_WORD3__DST_SEL_W_MASK 0x00000E00L
++#define SQ_IMG_RSRC_WORD3__BASE_LEVEL_MASK 0x0000F000L
++#define SQ_IMG_RSRC_WORD3__LAST_LEVEL_MASK 0x000F0000L
++#define SQ_IMG_RSRC_WORD3__SW_MODE_MASK 0x01F00000L
++#define SQ_IMG_RSRC_WORD3__TYPE_MASK 0xF0000000L
++//SQ_IMG_RSRC_WORD4
++#define SQ_IMG_RSRC_WORD4__DEPTH__SHIFT 0x0
++#define SQ_IMG_RSRC_WORD4__PITCH__SHIFT 0xd
++#define SQ_IMG_RSRC_WORD4__BC_SWIZZLE__SHIFT 0x1d
++#define SQ_IMG_RSRC_WORD4__DEPTH_MASK 0x00001FFFL
++#define SQ_IMG_RSRC_WORD4__PITCH_MASK 0x1FFFE000L
++#define SQ_IMG_RSRC_WORD4__BC_SWIZZLE_MASK 0xE0000000L
++//SQ_IMG_RSRC_WORD5
++#define SQ_IMG_RSRC_WORD5__BASE_ARRAY__SHIFT 0x0
++#define SQ_IMG_RSRC_WORD5__ARRAY_PITCH__SHIFT 0xd
++#define SQ_IMG_RSRC_WORD5__META_DATA_ADDRESS__SHIFT 0x11
++#define SQ_IMG_RSRC_WORD5__META_LINEAR__SHIFT 0x19
++#define SQ_IMG_RSRC_WORD5__META_PIPE_ALIGNED__SHIFT 0x1a
++#define SQ_IMG_RSRC_WORD5__META_RB_ALIGNED__SHIFT 0x1b
++#define SQ_IMG_RSRC_WORD5__MAX_MIP__SHIFT 0x1c
++#define SQ_IMG_RSRC_WORD5__BASE_ARRAY_MASK 0x00001FFFL
++#define SQ_IMG_RSRC_WORD5__ARRAY_PITCH_MASK 0x0001E000L
++#define SQ_IMG_RSRC_WORD5__META_DATA_ADDRESS_MASK 0x01FE0000L
++#define SQ_IMG_RSRC_WORD5__META_LINEAR_MASK 0x02000000L
++#define SQ_IMG_RSRC_WORD5__META_PIPE_ALIGNED_MASK 0x04000000L
++#define SQ_IMG_RSRC_WORD5__META_RB_ALIGNED_MASK 0x08000000L
++#define SQ_IMG_RSRC_WORD5__MAX_MIP_MASK 0xF0000000L
++//SQ_IMG_RSRC_WORD6
++#define SQ_IMG_RSRC_WORD6__MIN_LOD_WARN__SHIFT 0x0
++#define SQ_IMG_RSRC_WORD6__COUNTER_BANK_ID__SHIFT 0xc
++#define SQ_IMG_RSRC_WORD6__LOD_HDW_CNT_EN__SHIFT 0x14
++#define SQ_IMG_RSRC_WORD6__COMPRESSION_EN__SHIFT 0x15
++#define SQ_IMG_RSRC_WORD6__ALPHA_IS_ON_MSB__SHIFT 0x16
++#define SQ_IMG_RSRC_WORD6__COLOR_TRANSFORM__SHIFT 0x17
++#define SQ_IMG_RSRC_WORD6__LOST_ALPHA_BITS__SHIFT 0x18
++#define SQ_IMG_RSRC_WORD6__LOST_COLOR_BITS__SHIFT 0x1c
++#define SQ_IMG_RSRC_WORD6__MIN_LOD_WARN_MASK 0x00000FFFL
++#define SQ_IMG_RSRC_WORD6__COUNTER_BANK_ID_MASK 0x000FF000L
++#define SQ_IMG_RSRC_WORD6__LOD_HDW_CNT_EN_MASK 0x00100000L
++#define SQ_IMG_RSRC_WORD6__COMPRESSION_EN_MASK 0x00200000L
++#define SQ_IMG_RSRC_WORD6__ALPHA_IS_ON_MSB_MASK 0x00400000L
++#define SQ_IMG_RSRC_WORD6__COLOR_TRANSFORM_MASK 0x00800000L
++#define SQ_IMG_RSRC_WORD6__LOST_ALPHA_BITS_MASK 0x0F000000L
++#define SQ_IMG_RSRC_WORD6__LOST_COLOR_BITS_MASK 0xF0000000L
++//SQ_IMG_RSRC_WORD7
++#define SQ_IMG_RSRC_WORD7__META_DATA_ADDRESS__SHIFT 0x0
++#define SQ_IMG_RSRC_WORD7__META_DATA_ADDRESS_MASK 0xFFFFFFFFL
++//SQ_IMG_SAMP_WORD0
++#define SQ_IMG_SAMP_WORD0__CLAMP_X__SHIFT 0x0
++#define SQ_IMG_SAMP_WORD0__CLAMP_Y__SHIFT 0x3
++#define SQ_IMG_SAMP_WORD0__CLAMP_Z__SHIFT 0x6
++#define SQ_IMG_SAMP_WORD0__MAX_ANISO_RATIO__SHIFT 0x9
++#define SQ_IMG_SAMP_WORD0__DEPTH_COMPARE_FUNC__SHIFT 0xc
++#define SQ_IMG_SAMP_WORD0__FORCE_UNNORMALIZED__SHIFT 0xf
++#define SQ_IMG_SAMP_WORD0__ANISO_THRESHOLD__SHIFT 0x10
++#define SQ_IMG_SAMP_WORD0__MC_COORD_TRUNC__SHIFT 0x13
++#define SQ_IMG_SAMP_WORD0__FORCE_DEGAMMA__SHIFT 0x14
++#define SQ_IMG_SAMP_WORD0__ANISO_BIAS__SHIFT 0x15
++#define SQ_IMG_SAMP_WORD0__TRUNC_COORD__SHIFT 0x1b
++#define SQ_IMG_SAMP_WORD0__DISABLE_CUBE_WRAP__SHIFT 0x1c
++#define SQ_IMG_SAMP_WORD0__FILTER_MODE__SHIFT 0x1d
++#define SQ_IMG_SAMP_WORD0__COMPAT_MODE__SHIFT 0x1f
++#define SQ_IMG_SAMP_WORD0__CLAMP_X_MASK 0x00000007L
++#define SQ_IMG_SAMP_WORD0__CLAMP_Y_MASK 0x00000038L
++#define SQ_IMG_SAMP_WORD0__CLAMP_Z_MASK 0x000001C0L
++#define SQ_IMG_SAMP_WORD0__MAX_ANISO_RATIO_MASK 0x00000E00L
++#define SQ_IMG_SAMP_WORD0__DEPTH_COMPARE_FUNC_MASK 0x00007000L
++#define SQ_IMG_SAMP_WORD0__FORCE_UNNORMALIZED_MASK 0x00008000L
++#define SQ_IMG_SAMP_WORD0__ANISO_THRESHOLD_MASK 0x00070000L
++#define SQ_IMG_SAMP_WORD0__MC_COORD_TRUNC_MASK 0x00080000L
++#define SQ_IMG_SAMP_WORD0__FORCE_DEGAMMA_MASK 0x00100000L
++#define SQ_IMG_SAMP_WORD0__ANISO_BIAS_MASK 0x07E00000L
++#define SQ_IMG_SAMP_WORD0__TRUNC_COORD_MASK 0x08000000L
++#define SQ_IMG_SAMP_WORD0__DISABLE_CUBE_WRAP_MASK 0x10000000L
++#define SQ_IMG_SAMP_WORD0__FILTER_MODE_MASK 0x60000000L
++#define SQ_IMG_SAMP_WORD0__COMPAT_MODE_MASK 0x80000000L
++//SQ_IMG_SAMP_WORD1
++#define SQ_IMG_SAMP_WORD1__MIN_LOD__SHIFT 0x0
++#define SQ_IMG_SAMP_WORD1__MAX_LOD__SHIFT 0xc
++#define SQ_IMG_SAMP_WORD1__PERF_MIP__SHIFT 0x18
++#define SQ_IMG_SAMP_WORD1__PERF_Z__SHIFT 0x1c
++#define SQ_IMG_SAMP_WORD1__MIN_LOD_MASK 0x00000FFFL
++#define SQ_IMG_SAMP_WORD1__MAX_LOD_MASK 0x00FFF000L
++#define SQ_IMG_SAMP_WORD1__PERF_MIP_MASK 0x0F000000L
++#define SQ_IMG_SAMP_WORD1__PERF_Z_MASK 0xF0000000L
++//SQ_IMG_SAMP_WORD2
++#define SQ_IMG_SAMP_WORD2__LOD_BIAS__SHIFT 0x0
++#define SQ_IMG_SAMP_WORD2__LOD_BIAS_SEC__SHIFT 0xe
++#define SQ_IMG_SAMP_WORD2__XY_MAG_FILTER__SHIFT 0x14
++#define SQ_IMG_SAMP_WORD2__XY_MIN_FILTER__SHIFT 0x16
++#define SQ_IMG_SAMP_WORD2__Z_FILTER__SHIFT 0x18
++#define SQ_IMG_SAMP_WORD2__MIP_FILTER__SHIFT 0x1a
++#define SQ_IMG_SAMP_WORD2__MIP_POINT_PRECLAMP__SHIFT 0x1c
++#define SQ_IMG_SAMP_WORD2__BLEND_ZERO_PRT__SHIFT 0x1d
++#define SQ_IMG_SAMP_WORD2__FILTER_PREC_FIX__SHIFT 0x1e
++#define SQ_IMG_SAMP_WORD2__ANISO_OVERRIDE__SHIFT 0x1f
++#define SQ_IMG_SAMP_WORD2__LOD_BIAS_MASK 0x00003FFFL
++#define SQ_IMG_SAMP_WORD2__LOD_BIAS_SEC_MASK 0x000FC000L
++#define SQ_IMG_SAMP_WORD2__XY_MAG_FILTER_MASK 0x00300000L
++#define SQ_IMG_SAMP_WORD2__XY_MIN_FILTER_MASK 0x00C00000L
++#define SQ_IMG_SAMP_WORD2__Z_FILTER_MASK 0x03000000L
++#define SQ_IMG_SAMP_WORD2__MIP_FILTER_MASK 0x0C000000L
++#define SQ_IMG_SAMP_WORD2__MIP_POINT_PRECLAMP_MASK 0x10000000L
++#define SQ_IMG_SAMP_WORD2__BLEND_ZERO_PRT_MASK 0x20000000L
++#define SQ_IMG_SAMP_WORD2__FILTER_PREC_FIX_MASK 0x40000000L
++#define SQ_IMG_SAMP_WORD2__ANISO_OVERRIDE_MASK 0x80000000L
++//SQ_IMG_SAMP_WORD3
++#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_PTR__SHIFT 0x0
++#define SQ_IMG_SAMP_WORD3__SKIP_DEGAMMA__SHIFT 0xc
++#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_TYPE__SHIFT 0x1e
++#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_PTR_MASK 0x00000FFFL
++#define SQ_IMG_SAMP_WORD3__SKIP_DEGAMMA_MASK 0x00001000L
++#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_TYPE_MASK 0xC0000000L
++//SQ_FLAT_SCRATCH_WORD0
++#define SQ_FLAT_SCRATCH_WORD0__SIZE__SHIFT 0x0
++#define SQ_FLAT_SCRATCH_WORD0__SIZE_MASK 0x0007FFFFL
++//SQ_FLAT_SCRATCH_WORD1
++#define SQ_FLAT_SCRATCH_WORD1__OFFSET__SHIFT 0x0
++#define SQ_FLAT_SCRATCH_WORD1__OFFSET_MASK 0x00FFFFFFL
++//SQ_M0_GPR_IDX_WORD
++#define SQ_M0_GPR_IDX_WORD__INDEX__SHIFT 0x0
++#define SQ_M0_GPR_IDX_WORD__VSRC0_REL__SHIFT 0xc
++#define SQ_M0_GPR_IDX_WORD__VSRC1_REL__SHIFT 0xd
++#define SQ_M0_GPR_IDX_WORD__VSRC2_REL__SHIFT 0xe
++#define SQ_M0_GPR_IDX_WORD__VDST_REL__SHIFT 0xf
++#define SQ_M0_GPR_IDX_WORD__INDEX_MASK 0x000000FFL
++#define SQ_M0_GPR_IDX_WORD__VSRC0_REL_MASK 0x00001000L
++#define SQ_M0_GPR_IDX_WORD__VSRC1_REL_MASK 0x00002000L
++#define SQ_M0_GPR_IDX_WORD__VSRC2_REL_MASK 0x00004000L
++#define SQ_M0_GPR_IDX_WORD__VDST_REL_MASK 0x00008000L
++//SQC_ICACHE_UTCL1_CNTL1
++#define SQC_ICACHE_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
++#define SQC_ICACHE_UTCL1_CNTL1__GPUVM_64K_DEF__SHIFT 0x1
++#define SQC_ICACHE_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
++#define SQC_ICACHE_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
++#define SQC_ICACHE_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
++#define SQC_ICACHE_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
++#define SQC_ICACHE_UTCL1_CNTL1__ENABLE_PUSH_LFIFO__SHIFT 0x11
++#define SQC_ICACHE_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB__SHIFT 0x12
++#define SQC_ICACHE_UTCL1_CNTL1__REG_INVALIDATE_VMID__SHIFT 0x13
++#define SQC_ICACHE_UTCL1_CNTL1__REG_INVALIDATE_ALL_VMID__SHIFT 0x17
++#define SQC_ICACHE_UTCL1_CNTL1__REG_INVALIDATE_TOGGLE__SHIFT 0x18
++#define SQC_ICACHE_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID__SHIFT 0x19
++#define SQC_ICACHE_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
++#define SQC_ICACHE_UTCL1_CNTL1__FORCE_IN_ORDER__SHIFT 0x1b
++#define SQC_ICACHE_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
++#define SQC_ICACHE_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
++#define SQC_ICACHE_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
++#define SQC_ICACHE_UTCL1_CNTL1__GPUVM_64K_DEF_MASK 0x00000002L
++#define SQC_ICACHE_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
++#define SQC_ICACHE_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
++#define SQC_ICACHE_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
++#define SQC_ICACHE_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
++#define SQC_ICACHE_UTCL1_CNTL1__ENABLE_PUSH_LFIFO_MASK 0x00020000L
++#define SQC_ICACHE_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB_MASK 0x00040000L
++#define SQC_ICACHE_UTCL1_CNTL1__REG_INVALIDATE_VMID_MASK 0x00780000L
++#define SQC_ICACHE_UTCL1_CNTL1__REG_INVALIDATE_ALL_VMID_MASK 0x00800000L
++#define SQC_ICACHE_UTCL1_CNTL1__REG_INVALIDATE_TOGGLE_MASK 0x01000000L
++#define SQC_ICACHE_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID_MASK 0x02000000L
++#define SQC_ICACHE_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
++#define SQC_ICACHE_UTCL1_CNTL1__FORCE_IN_ORDER_MASK 0x08000000L
++#define SQC_ICACHE_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
++#define SQC_ICACHE_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
++//SQC_ICACHE_UTCL1_CNTL2
++#define SQC_ICACHE_UTCL1_CNTL2__SPARE__SHIFT 0x0
++#define SQC_ICACHE_UTCL1_CNTL2__LFIFO_SCAN_DISABLE__SHIFT 0x8
++#define SQC_ICACHE_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
++#define SQC_ICACHE_UTCL1_CNTL2__LINE_VALID__SHIFT 0xa
++#define SQC_ICACHE_UTCL1_CNTL2__DIS_EDC__SHIFT 0xb
++#define SQC_ICACHE_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
++#define SQC_ICACHE_UTCL1_CNTL2__SHOOTDOWN_OPT__SHIFT 0xd
++#define SQC_ICACHE_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
++#define SQC_ICACHE_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
++#define SQC_ICACHE_UTCL1_CNTL2__ARB_BURST_MODE__SHIFT 0x10
++#define SQC_ICACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_RD_WR__SHIFT 0x12
++#define SQC_ICACHE_UTCL1_CNTL2__PERF_EVENT_RD_WR__SHIFT 0x13
++#define SQC_ICACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_VMID__SHIFT 0x14
++#define SQC_ICACHE_UTCL1_CNTL2__PERF_EVENT_VMID__SHIFT 0x15
++#define SQC_ICACHE_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
++#define SQC_ICACHE_UTCL1_CNTL2__SPARE_MASK 0x000000FFL
++#define SQC_ICACHE_UTCL1_CNTL2__LFIFO_SCAN_DISABLE_MASK 0x00000100L
++#define SQC_ICACHE_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
++#define SQC_ICACHE_UTCL1_CNTL2__LINE_VALID_MASK 0x00000400L
++#define SQC_ICACHE_UTCL1_CNTL2__DIS_EDC_MASK 0x00000800L
++#define SQC_ICACHE_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
++#define SQC_ICACHE_UTCL1_CNTL2__SHOOTDOWN_OPT_MASK 0x00002000L
++#define SQC_ICACHE_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
++#define SQC_ICACHE_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
++#define SQC_ICACHE_UTCL1_CNTL2__ARB_BURST_MODE_MASK 0x00030000L
++#define SQC_ICACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_RD_WR_MASK 0x00040000L
++#define SQC_ICACHE_UTCL1_CNTL2__PERF_EVENT_RD_WR_MASK 0x00080000L
++#define SQC_ICACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_VMID_MASK 0x00100000L
++#define SQC_ICACHE_UTCL1_CNTL2__PERF_EVENT_VMID_MASK 0x01E00000L
++#define SQC_ICACHE_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
++//SQC_DCACHE_UTCL1_CNTL1
++#define SQC_DCACHE_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
++#define SQC_DCACHE_UTCL1_CNTL1__GPUVM_64K_DEF__SHIFT 0x1
++#define SQC_DCACHE_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
++#define SQC_DCACHE_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
++#define SQC_DCACHE_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
++#define SQC_DCACHE_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
++#define SQC_DCACHE_UTCL1_CNTL1__ENABLE_PUSH_LFIFO__SHIFT 0x11
++#define SQC_DCACHE_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB__SHIFT 0x12
++#define SQC_DCACHE_UTCL1_CNTL1__REG_INVALIDATE_VMID__SHIFT 0x13
++#define SQC_DCACHE_UTCL1_CNTL1__REG_INVALIDATE_ALL_VMID__SHIFT 0x17
++#define SQC_DCACHE_UTCL1_CNTL1__REG_INVALIDATE_TOGGLE__SHIFT 0x18
++#define SQC_DCACHE_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID__SHIFT 0x19
++#define SQC_DCACHE_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
++#define SQC_DCACHE_UTCL1_CNTL1__FORCE_IN_ORDER__SHIFT 0x1b
++#define SQC_DCACHE_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
++#define SQC_DCACHE_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
++#define SQC_DCACHE_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
++#define SQC_DCACHE_UTCL1_CNTL1__GPUVM_64K_DEF_MASK 0x00000002L
++#define SQC_DCACHE_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
++#define SQC_DCACHE_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
++#define SQC_DCACHE_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
++#define SQC_DCACHE_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
++#define SQC_DCACHE_UTCL1_CNTL1__ENABLE_PUSH_LFIFO_MASK 0x00020000L
++#define SQC_DCACHE_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB_MASK 0x00040000L
++#define SQC_DCACHE_UTCL1_CNTL1__REG_INVALIDATE_VMID_MASK 0x00780000L
++#define SQC_DCACHE_UTCL1_CNTL1__REG_INVALIDATE_ALL_VMID_MASK 0x00800000L
++#define SQC_DCACHE_UTCL1_CNTL1__REG_INVALIDATE_TOGGLE_MASK 0x01000000L
++#define SQC_DCACHE_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID_MASK 0x02000000L
++#define SQC_DCACHE_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
++#define SQC_DCACHE_UTCL1_CNTL1__FORCE_IN_ORDER_MASK 0x08000000L
++#define SQC_DCACHE_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
++#define SQC_DCACHE_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
++//SQC_DCACHE_UTCL1_CNTL2
++#define SQC_DCACHE_UTCL1_CNTL2__SPARE__SHIFT 0x0
++#define SQC_DCACHE_UTCL1_CNTL2__LFIFO_SCAN_DISABLE__SHIFT 0x8
++#define SQC_DCACHE_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
++#define SQC_DCACHE_UTCL1_CNTL2__LINE_VALID__SHIFT 0xa
++#define SQC_DCACHE_UTCL1_CNTL2__DIS_EDC__SHIFT 0xb
++#define SQC_DCACHE_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
++#define SQC_DCACHE_UTCL1_CNTL2__SHOOTDOWN_OPT__SHIFT 0xd
++#define SQC_DCACHE_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
++#define SQC_DCACHE_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
++#define SQC_DCACHE_UTCL1_CNTL2__ARB_BURST_MODE__SHIFT 0x10
++#define SQC_DCACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_RD_WR__SHIFT 0x12
++#define SQC_DCACHE_UTCL1_CNTL2__PERF_EVENT_RD_WR__SHIFT 0x13
++#define SQC_DCACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_VMID__SHIFT 0x14
++#define SQC_DCACHE_UTCL1_CNTL2__PERF_EVENT_VMID__SHIFT 0x15
++#define SQC_DCACHE_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
++#define SQC_DCACHE_UTCL1_CNTL2__SPARE_MASK 0x000000FFL
++#define SQC_DCACHE_UTCL1_CNTL2__LFIFO_SCAN_DISABLE_MASK 0x00000100L
++#define SQC_DCACHE_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
++#define SQC_DCACHE_UTCL1_CNTL2__LINE_VALID_MASK 0x00000400L
++#define SQC_DCACHE_UTCL1_CNTL2__DIS_EDC_MASK 0x00000800L
++#define SQC_DCACHE_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
++#define SQC_DCACHE_UTCL1_CNTL2__SHOOTDOWN_OPT_MASK 0x00002000L
++#define SQC_DCACHE_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
++#define SQC_DCACHE_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
++#define SQC_DCACHE_UTCL1_CNTL2__ARB_BURST_MODE_MASK 0x00030000L
++#define SQC_DCACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_RD_WR_MASK 0x00040000L
++#define SQC_DCACHE_UTCL1_CNTL2__PERF_EVENT_RD_WR_MASK 0x00080000L
++#define SQC_DCACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_VMID_MASK 0x00100000L
++#define SQC_DCACHE_UTCL1_CNTL2__PERF_EVENT_VMID_MASK 0x01E00000L
++#define SQC_DCACHE_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
++//SQC_ICACHE_UTCL1_STATUS
++#define SQC_ICACHE_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
++#define SQC_ICACHE_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
++#define SQC_ICACHE_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
++#define SQC_ICACHE_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
++#define SQC_ICACHE_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
++#define SQC_ICACHE_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
++//SQC_DCACHE_UTCL1_STATUS
++#define SQC_DCACHE_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
++#define SQC_DCACHE_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
++#define SQC_DCACHE_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
++#define SQC_DCACHE_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
++#define SQC_DCACHE_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
++#define SQC_DCACHE_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
++
++
++// addressBlock: gc_shsdec
++//SX_DEBUG_1
++#define SX_DEBUG_1__SX_DB_QUAD_CREDIT__SHIFT 0x0
++#define SX_DEBUG_1__DISABLE_BLEND_OPT_DONT_RD_DST__SHIFT 0x8
++#define SX_DEBUG_1__DISABLE_BLEND_OPT_BYPASS__SHIFT 0x9
++#define SX_DEBUG_1__DISABLE_BLEND_OPT_DISCARD_PIXEL__SHIFT 0xa
++#define SX_DEBUG_1__DISABLE_QUAD_PAIR_OPT__SHIFT 0xb
++#define SX_DEBUG_1__DISABLE_PIX_EN_ZERO_OPT__SHIFT 0xc
++#define SX_DEBUG_1__PC_CFG__SHIFT 0xd
++#define SX_DEBUG_1__DEBUG_DATA__SHIFT 0xe
++#define SX_DEBUG_1__SX_DB_QUAD_CREDIT_MASK 0x0000007FL
++#define SX_DEBUG_1__DISABLE_BLEND_OPT_DONT_RD_DST_MASK 0x00000100L
++#define SX_DEBUG_1__DISABLE_BLEND_OPT_BYPASS_MASK 0x00000200L
++#define SX_DEBUG_1__DISABLE_BLEND_OPT_DISCARD_PIXEL_MASK 0x00000400L
++#define SX_DEBUG_1__DISABLE_QUAD_PAIR_OPT_MASK 0x00000800L
++#define SX_DEBUG_1__DISABLE_PIX_EN_ZERO_OPT_MASK 0x00001000L
++#define SX_DEBUG_1__PC_CFG_MASK 0x00002000L
++#define SX_DEBUG_1__DEBUG_DATA_MASK 0xFFFFC000L
++//SPI_PS_MAX_WAVE_ID
++#define SPI_PS_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
++#define SPI_PS_MAX_WAVE_ID__MAX_COLLISION_WAVE_ID__SHIFT 0x10
++#define SPI_PS_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000FFFL
++#define SPI_PS_MAX_WAVE_ID__MAX_COLLISION_WAVE_ID_MASK 0x03FF0000L
++//SPI_START_PHASE
++#define SPI_START_PHASE__VGPR_START_PHASE__SHIFT 0x0
++#define SPI_START_PHASE__SGPR_START_PHASE__SHIFT 0x2
++#define SPI_START_PHASE__WAVE_START_PHASE__SHIFT 0x4
++#define SPI_START_PHASE__VGPR_START_PHASE_MASK 0x00000003L
++#define SPI_START_PHASE__SGPR_START_PHASE_MASK 0x0000000CL
++#define SPI_START_PHASE__WAVE_START_PHASE_MASK 0x00000030L
++//SPI_GFX_CNTL
++#define SPI_GFX_CNTL__RESET_COUNTS__SHIFT 0x0
++#define SPI_GFX_CNTL__RESET_COUNTS_MASK 0x00000001L
++//SPI_DSM_CNTL
++#define SPI_DSM_CNTL__SPI_SR_MEM_DSM_IRRITATOR_DATA__SHIFT 0x0
++#define SPI_DSM_CNTL__SPI_SR_MEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
++#define SPI_DSM_CNTL__UNUSED__SHIFT 0x3
++#define SPI_DSM_CNTL__SPI_SR_MEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
++#define SPI_DSM_CNTL__SPI_SR_MEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
++#define SPI_DSM_CNTL__UNUSED_MASK 0xFFFFFFF8L
++//SPI_DSM_CNTL2
++#define SPI_DSM_CNTL2__SPI_SR_MEM_ENABLE_ERROR_INJECT__SHIFT 0x0
++#define SPI_DSM_CNTL2__SPI_SR_MEM_SELECT_INJECT_DELAY__SHIFT 0x2
++#define SPI_DSM_CNTL2__SPI_SR_MEM_INJECT_DELAY__SHIFT 0x4
++#define SPI_DSM_CNTL2__UNUSED__SHIFT 0xa
++#define SPI_DSM_CNTL2__SPI_SR_MEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
++#define SPI_DSM_CNTL2__SPI_SR_MEM_SELECT_INJECT_DELAY_MASK 0x00000004L
++#define SPI_DSM_CNTL2__SPI_SR_MEM_INJECT_DELAY_MASK 0x000003F0L
++#define SPI_DSM_CNTL2__UNUSED_MASK 0xFFFFFC00L
++//SPI_EDC_CNT
++#define SPI_EDC_CNT__SPI_SR_MEM_SED_COUNT__SHIFT 0x0
++#define SPI_EDC_CNT__SPI_SR_MEM_SED_COUNT_MASK 0x00000003L
++//SPI_CONFIG_PS_CU_EN
++#define SPI_CONFIG_PS_CU_EN__ENABLE__SHIFT 0x0
++#define SPI_CONFIG_PS_CU_EN__PKR0_CU_EN__SHIFT 0x1
++#define SPI_CONFIG_PS_CU_EN__PKR1_CU_EN__SHIFT 0x10
++#define SPI_CONFIG_PS_CU_EN__ENABLE_MASK 0x00000001L
++#define SPI_CONFIG_PS_CU_EN__PKR0_CU_EN_MASK 0x0000FFFEL
++#define SPI_CONFIG_PS_CU_EN__PKR1_CU_EN_MASK 0xFFFF0000L
++//SPI_WF_LIFETIME_CNTL
++#define SPI_WF_LIFETIME_CNTL__SAMPLE_PERIOD__SHIFT 0x0
++#define SPI_WF_LIFETIME_CNTL__EN__SHIFT 0x4
++#define SPI_WF_LIFETIME_CNTL__SAMPLE_PERIOD_MASK 0x0000000FL
++#define SPI_WF_LIFETIME_CNTL__EN_MASK 0x00000010L
++//SPI_WF_LIFETIME_LIMIT_0
++#define SPI_WF_LIFETIME_LIMIT_0__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_LIMIT_0__EN_WARN__SHIFT 0x1f
++#define SPI_WF_LIFETIME_LIMIT_0__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_LIMIT_0__EN_WARN_MASK 0x80000000L
++//SPI_WF_LIFETIME_LIMIT_1
++#define SPI_WF_LIFETIME_LIMIT_1__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_LIMIT_1__EN_WARN__SHIFT 0x1f
++#define SPI_WF_LIFETIME_LIMIT_1__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_LIMIT_1__EN_WARN_MASK 0x80000000L
++//SPI_WF_LIFETIME_LIMIT_2
++#define SPI_WF_LIFETIME_LIMIT_2__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_LIMIT_2__EN_WARN__SHIFT 0x1f
++#define SPI_WF_LIFETIME_LIMIT_2__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_LIMIT_2__EN_WARN_MASK 0x80000000L
++//SPI_WF_LIFETIME_LIMIT_3
++#define SPI_WF_LIFETIME_LIMIT_3__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_LIMIT_3__EN_WARN__SHIFT 0x1f
++#define SPI_WF_LIFETIME_LIMIT_3__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_LIMIT_3__EN_WARN_MASK 0x80000000L
++//SPI_WF_LIFETIME_LIMIT_4
++#define SPI_WF_LIFETIME_LIMIT_4__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_LIMIT_4__EN_WARN__SHIFT 0x1f
++#define SPI_WF_LIFETIME_LIMIT_4__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_LIMIT_4__EN_WARN_MASK 0x80000000L
++//SPI_WF_LIFETIME_LIMIT_5
++#define SPI_WF_LIFETIME_LIMIT_5__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_LIMIT_5__EN_WARN__SHIFT 0x1f
++#define SPI_WF_LIFETIME_LIMIT_5__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_LIMIT_5__EN_WARN_MASK 0x80000000L
++//SPI_WF_LIFETIME_LIMIT_6
++#define SPI_WF_LIFETIME_LIMIT_6__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_LIMIT_6__EN_WARN__SHIFT 0x1f
++#define SPI_WF_LIFETIME_LIMIT_6__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_LIMIT_6__EN_WARN_MASK 0x80000000L
++//SPI_WF_LIFETIME_LIMIT_7
++#define SPI_WF_LIFETIME_LIMIT_7__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_LIMIT_7__EN_WARN__SHIFT 0x1f
++#define SPI_WF_LIFETIME_LIMIT_7__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_LIMIT_7__EN_WARN_MASK 0x80000000L
++//SPI_WF_LIFETIME_LIMIT_8
++#define SPI_WF_LIFETIME_LIMIT_8__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_LIMIT_8__EN_WARN__SHIFT 0x1f
++#define SPI_WF_LIFETIME_LIMIT_8__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_LIMIT_8__EN_WARN_MASK 0x80000000L
++//SPI_WF_LIFETIME_LIMIT_9
++#define SPI_WF_LIFETIME_LIMIT_9__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_LIMIT_9__EN_WARN__SHIFT 0x1f
++#define SPI_WF_LIFETIME_LIMIT_9__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_LIMIT_9__EN_WARN_MASK 0x80000000L
++//SPI_WF_LIFETIME_STATUS_0
++#define SPI_WF_LIFETIME_STATUS_0__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_STATUS_0__INT_SENT__SHIFT 0x1f
++#define SPI_WF_LIFETIME_STATUS_0__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_STATUS_0__INT_SENT_MASK 0x80000000L
++//SPI_WF_LIFETIME_STATUS_1
++#define SPI_WF_LIFETIME_STATUS_1__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_STATUS_1__INT_SENT__SHIFT 0x1f
++#define SPI_WF_LIFETIME_STATUS_1__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_STATUS_1__INT_SENT_MASK 0x80000000L
++//SPI_WF_LIFETIME_STATUS_2
++#define SPI_WF_LIFETIME_STATUS_2__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_STATUS_2__INT_SENT__SHIFT 0x1f
++#define SPI_WF_LIFETIME_STATUS_2__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_STATUS_2__INT_SENT_MASK 0x80000000L
++//SPI_WF_LIFETIME_STATUS_3
++#define SPI_WF_LIFETIME_STATUS_3__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_STATUS_3__INT_SENT__SHIFT 0x1f
++#define SPI_WF_LIFETIME_STATUS_3__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_STATUS_3__INT_SENT_MASK 0x80000000L
++//SPI_WF_LIFETIME_STATUS_4
++#define SPI_WF_LIFETIME_STATUS_4__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_STATUS_4__INT_SENT__SHIFT 0x1f
++#define SPI_WF_LIFETIME_STATUS_4__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_STATUS_4__INT_SENT_MASK 0x80000000L
++//SPI_WF_LIFETIME_STATUS_5
++#define SPI_WF_LIFETIME_STATUS_5__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_STATUS_5__INT_SENT__SHIFT 0x1f
++#define SPI_WF_LIFETIME_STATUS_5__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_STATUS_5__INT_SENT_MASK 0x80000000L
++//SPI_WF_LIFETIME_STATUS_6
++#define SPI_WF_LIFETIME_STATUS_6__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_STATUS_6__INT_SENT__SHIFT 0x1f
++#define SPI_WF_LIFETIME_STATUS_6__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_STATUS_6__INT_SENT_MASK 0x80000000L
++//SPI_WF_LIFETIME_STATUS_7
++#define SPI_WF_LIFETIME_STATUS_7__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_STATUS_7__INT_SENT__SHIFT 0x1f
++#define SPI_WF_LIFETIME_STATUS_7__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_STATUS_7__INT_SENT_MASK 0x80000000L
++//SPI_WF_LIFETIME_STATUS_8
++#define SPI_WF_LIFETIME_STATUS_8__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_STATUS_8__INT_SENT__SHIFT 0x1f
++#define SPI_WF_LIFETIME_STATUS_8__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_STATUS_8__INT_SENT_MASK 0x80000000L
++//SPI_WF_LIFETIME_STATUS_9
++#define SPI_WF_LIFETIME_STATUS_9__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_STATUS_9__INT_SENT__SHIFT 0x1f
++#define SPI_WF_LIFETIME_STATUS_9__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_STATUS_9__INT_SENT_MASK 0x80000000L
++//SPI_WF_LIFETIME_STATUS_10
++#define SPI_WF_LIFETIME_STATUS_10__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_STATUS_10__INT_SENT__SHIFT 0x1f
++#define SPI_WF_LIFETIME_STATUS_10__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_STATUS_10__INT_SENT_MASK 0x80000000L
++//SPI_WF_LIFETIME_STATUS_11
++#define SPI_WF_LIFETIME_STATUS_11__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_STATUS_11__INT_SENT__SHIFT 0x1f
++#define SPI_WF_LIFETIME_STATUS_11__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_STATUS_11__INT_SENT_MASK 0x80000000L
++//SPI_WF_LIFETIME_STATUS_12
++#define SPI_WF_LIFETIME_STATUS_12__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_STATUS_12__INT_SENT__SHIFT 0x1f
++#define SPI_WF_LIFETIME_STATUS_12__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_STATUS_12__INT_SENT_MASK 0x80000000L
++//SPI_WF_LIFETIME_STATUS_13
++#define SPI_WF_LIFETIME_STATUS_13__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_STATUS_13__INT_SENT__SHIFT 0x1f
++#define SPI_WF_LIFETIME_STATUS_13__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_STATUS_13__INT_SENT_MASK 0x80000000L
++//SPI_WF_LIFETIME_STATUS_14
++#define SPI_WF_LIFETIME_STATUS_14__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_STATUS_14__INT_SENT__SHIFT 0x1f
++#define SPI_WF_LIFETIME_STATUS_14__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_STATUS_14__INT_SENT_MASK 0x80000000L
++//SPI_WF_LIFETIME_STATUS_15
++#define SPI_WF_LIFETIME_STATUS_15__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_STATUS_15__INT_SENT__SHIFT 0x1f
++#define SPI_WF_LIFETIME_STATUS_15__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_STATUS_15__INT_SENT_MASK 0x80000000L
++//SPI_WF_LIFETIME_STATUS_16
++#define SPI_WF_LIFETIME_STATUS_16__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_STATUS_16__INT_SENT__SHIFT 0x1f
++#define SPI_WF_LIFETIME_STATUS_16__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_STATUS_16__INT_SENT_MASK 0x80000000L
++//SPI_WF_LIFETIME_STATUS_17
++#define SPI_WF_LIFETIME_STATUS_17__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_STATUS_17__INT_SENT__SHIFT 0x1f
++#define SPI_WF_LIFETIME_STATUS_17__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_STATUS_17__INT_SENT_MASK 0x80000000L
++//SPI_WF_LIFETIME_STATUS_18
++#define SPI_WF_LIFETIME_STATUS_18__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_STATUS_18__INT_SENT__SHIFT 0x1f
++#define SPI_WF_LIFETIME_STATUS_18__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_STATUS_18__INT_SENT_MASK 0x80000000L
++//SPI_WF_LIFETIME_STATUS_19
++#define SPI_WF_LIFETIME_STATUS_19__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_STATUS_19__INT_SENT__SHIFT 0x1f
++#define SPI_WF_LIFETIME_STATUS_19__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_STATUS_19__INT_SENT_MASK 0x80000000L
++//SPI_WF_LIFETIME_STATUS_20
++#define SPI_WF_LIFETIME_STATUS_20__MAX_CNT__SHIFT 0x0
++#define SPI_WF_LIFETIME_STATUS_20__INT_SENT__SHIFT 0x1f
++#define SPI_WF_LIFETIME_STATUS_20__MAX_CNT_MASK 0x7FFFFFFFL
++#define SPI_WF_LIFETIME_STATUS_20__INT_SENT_MASK 0x80000000L
++//SPI_LB_CTR_CTRL
++#define SPI_LB_CTR_CTRL__LOAD__SHIFT 0x0
++#define SPI_LB_CTR_CTRL__WAVES_SELECT__SHIFT 0x1
++#define SPI_LB_CTR_CTRL__CLEAR_ON_READ__SHIFT 0x3
++#define SPI_LB_CTR_CTRL__RESET_COUNTS__SHIFT 0x4
++#define SPI_LB_CTR_CTRL__LOAD_MASK 0x00000001L
++#define SPI_LB_CTR_CTRL__WAVES_SELECT_MASK 0x00000006L
++#define SPI_LB_CTR_CTRL__CLEAR_ON_READ_MASK 0x00000008L
++#define SPI_LB_CTR_CTRL__RESET_COUNTS_MASK 0x00000010L
++//SPI_LB_CU_MASK
++#define SPI_LB_CU_MASK__CU_MASK__SHIFT 0x0
++#define SPI_LB_CU_MASK__CU_MASK_MASK 0xFFFFL
++//SPI_LB_DATA_REG
++#define SPI_LB_DATA_REG__CNT_DATA__SHIFT 0x0
++#define SPI_LB_DATA_REG__CNT_DATA_MASK 0xFFFFFFFFL
++//SPI_PG_ENABLE_STATIC_CU_MASK
++#define SPI_PG_ENABLE_STATIC_CU_MASK__CU_MASK__SHIFT 0x0
++#define SPI_PG_ENABLE_STATIC_CU_MASK__CU_MASK_MASK 0xFFFFL
++//SPI_GDS_CREDITS
++#define SPI_GDS_CREDITS__DS_DATA_CREDITS__SHIFT 0x0
++#define SPI_GDS_CREDITS__DS_CMD_CREDITS__SHIFT 0x8
++#define SPI_GDS_CREDITS__UNUSED__SHIFT 0x10
++#define SPI_GDS_CREDITS__DS_DATA_CREDITS_MASK 0x000000FFL
++#define SPI_GDS_CREDITS__DS_CMD_CREDITS_MASK 0x0000FF00L
++#define SPI_GDS_CREDITS__UNUSED_MASK 0xFFFF0000L
++//SPI_SX_EXPORT_BUFFER_SIZES
++#define SPI_SX_EXPORT_BUFFER_SIZES__COLOR_BUFFER_SIZE__SHIFT 0x0
++#define SPI_SX_EXPORT_BUFFER_SIZES__POSITION_BUFFER_SIZE__SHIFT 0x10
++#define SPI_SX_EXPORT_BUFFER_SIZES__COLOR_BUFFER_SIZE_MASK 0x0000FFFFL
++#define SPI_SX_EXPORT_BUFFER_SIZES__POSITION_BUFFER_SIZE_MASK 0xFFFF0000L
++//SPI_SX_SCOREBOARD_BUFFER_SIZES
++#define SPI_SX_SCOREBOARD_BUFFER_SIZES__COLOR_SCOREBOARD_SIZE__SHIFT 0x0
++#define SPI_SX_SCOREBOARD_BUFFER_SIZES__POSITION_SCOREBOARD_SIZE__SHIFT 0x10
++#define SPI_SX_SCOREBOARD_BUFFER_SIZES__COLOR_SCOREBOARD_SIZE_MASK 0x0000FFFFL
++#define SPI_SX_SCOREBOARD_BUFFER_SIZES__POSITION_SCOREBOARD_SIZE_MASK 0xFFFF0000L
++//SPI_CSQ_WF_ACTIVE_STATUS
++#define SPI_CSQ_WF_ACTIVE_STATUS__ACTIVE__SHIFT 0x0
++#define SPI_CSQ_WF_ACTIVE_STATUS__ACTIVE_MASK 0xFFFFFFFFL
++//SPI_CSQ_WF_ACTIVE_COUNT_0
++#define SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT__SHIFT 0x0
++#define SPI_CSQ_WF_ACTIVE_COUNT_0__EVENTS__SHIFT 0x10
++#define SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK 0x000007FFL
++#define SPI_CSQ_WF_ACTIVE_COUNT_0__EVENTS_MASK 0x07FF0000L
++//SPI_CSQ_WF_ACTIVE_COUNT_1
++#define SPI_CSQ_WF_ACTIVE_COUNT_1__COUNT__SHIFT 0x0
++#define SPI_CSQ_WF_ACTIVE_COUNT_1__EVENTS__SHIFT 0x10
++#define SPI_CSQ_WF_ACTIVE_COUNT_1__COUNT_MASK 0x000007FFL
++#define SPI_CSQ_WF_ACTIVE_COUNT_1__EVENTS_MASK 0x07FF0000L
++//SPI_CSQ_WF_ACTIVE_COUNT_2
++#define SPI_CSQ_WF_ACTIVE_COUNT_2__COUNT__SHIFT 0x0
++#define SPI_CSQ_WF_ACTIVE_COUNT_2__EVENTS__SHIFT 0x10
++#define SPI_CSQ_WF_ACTIVE_COUNT_2__COUNT_MASK 0x000007FFL
++#define SPI_CSQ_WF_ACTIVE_COUNT_2__EVENTS_MASK 0x07FF0000L
++//SPI_CSQ_WF_ACTIVE_COUNT_3
++#define SPI_CSQ_WF_ACTIVE_COUNT_3__COUNT__SHIFT 0x0
++#define SPI_CSQ_WF_ACTIVE_COUNT_3__EVENTS__SHIFT 0x10
++#define SPI_CSQ_WF_ACTIVE_COUNT_3__COUNT_MASK 0x000007FFL
++#define SPI_CSQ_WF_ACTIVE_COUNT_3__EVENTS_MASK 0x07FF0000L
++//SPI_CSQ_WF_ACTIVE_COUNT_4
++#define SPI_CSQ_WF_ACTIVE_COUNT_4__COUNT__SHIFT 0x0
++#define SPI_CSQ_WF_ACTIVE_COUNT_4__EVENTS__SHIFT 0x10
++#define SPI_CSQ_WF_ACTIVE_COUNT_4__COUNT_MASK 0x000007FFL
++#define SPI_CSQ_WF_ACTIVE_COUNT_4__EVENTS_MASK 0x07FF0000L
++//SPI_CSQ_WF_ACTIVE_COUNT_5
++#define SPI_CSQ_WF_ACTIVE_COUNT_5__COUNT__SHIFT 0x0
++#define SPI_CSQ_WF_ACTIVE_COUNT_5__EVENTS__SHIFT 0x10
++#define SPI_CSQ_WF_ACTIVE_COUNT_5__COUNT_MASK 0x000007FFL
++#define SPI_CSQ_WF_ACTIVE_COUNT_5__EVENTS_MASK 0x07FF0000L
++//SPI_CSQ_WF_ACTIVE_COUNT_6
++#define SPI_CSQ_WF_ACTIVE_COUNT_6__COUNT__SHIFT 0x0
++#define SPI_CSQ_WF_ACTIVE_COUNT_6__EVENTS__SHIFT 0x10
++#define SPI_CSQ_WF_ACTIVE_COUNT_6__COUNT_MASK 0x000007FFL
++#define SPI_CSQ_WF_ACTIVE_COUNT_6__EVENTS_MASK 0x07FF0000L
++//SPI_CSQ_WF_ACTIVE_COUNT_7
++#define SPI_CSQ_WF_ACTIVE_COUNT_7__COUNT__SHIFT 0x0
++#define SPI_CSQ_WF_ACTIVE_COUNT_7__EVENTS__SHIFT 0x10
++#define SPI_CSQ_WF_ACTIVE_COUNT_7__COUNT_MASK 0x000007FFL
++#define SPI_CSQ_WF_ACTIVE_COUNT_7__EVENTS_MASK 0x07FF0000L
++//SPI_LB_DATA_WAVES
++#define SPI_LB_DATA_WAVES__COUNT0__SHIFT 0x0
++#define SPI_LB_DATA_WAVES__COUNT1__SHIFT 0x10
++#define SPI_LB_DATA_WAVES__COUNT0_MASK 0x0000FFFFL
++#define SPI_LB_DATA_WAVES__COUNT1_MASK 0xFFFF0000L
++//SPI_LB_DATA_PERCU_WAVE_HSGS
++#define SPI_LB_DATA_PERCU_WAVE_HSGS__CU_USED_HS__SHIFT 0x0
++#define SPI_LB_DATA_PERCU_WAVE_HSGS__CU_USED_GS__SHIFT 0x10
++#define SPI_LB_DATA_PERCU_WAVE_HSGS__CU_USED_HS_MASK 0x0000FFFFL
++#define SPI_LB_DATA_PERCU_WAVE_HSGS__CU_USED_GS_MASK 0xFFFF0000L
++//SPI_LB_DATA_PERCU_WAVE_VSPS
++#define SPI_LB_DATA_PERCU_WAVE_VSPS__CU_USED_VS__SHIFT 0x0
++#define SPI_LB_DATA_PERCU_WAVE_VSPS__CU_USED_PS__SHIFT 0x10
++#define SPI_LB_DATA_PERCU_WAVE_VSPS__CU_USED_VS_MASK 0x0000FFFFL
++#define SPI_LB_DATA_PERCU_WAVE_VSPS__CU_USED_PS_MASK 0xFFFF0000L
++//SPI_LB_DATA_PERCU_WAVE_CS
++#define SPI_LB_DATA_PERCU_WAVE_CS__ACTIVE__SHIFT 0x0
++#define SPI_LB_DATA_PERCU_WAVE_CS__ACTIVE_MASK 0xFFFFL
++//SPI_P0_TRAP_SCREEN_PSBA_LO
++#define SPI_P0_TRAP_SCREEN_PSBA_LO__MEM_BASE__SHIFT 0x0
++#define SPI_P0_TRAP_SCREEN_PSBA_LO__MEM_BASE_MASK 0xFFFFFFFFL
++//SPI_P0_TRAP_SCREEN_PSBA_HI
++#define SPI_P0_TRAP_SCREEN_PSBA_HI__MEM_BASE__SHIFT 0x0
++#define SPI_P0_TRAP_SCREEN_PSBA_HI__MEM_BASE_MASK 0xFFL
++//SPI_P0_TRAP_SCREEN_PSMA_LO
++#define SPI_P0_TRAP_SCREEN_PSMA_LO__MEM_BASE__SHIFT 0x0
++#define SPI_P0_TRAP_SCREEN_PSMA_LO__MEM_BASE_MASK 0xFFFFFFFFL
++//SPI_P0_TRAP_SCREEN_PSMA_HI
++#define SPI_P0_TRAP_SCREEN_PSMA_HI__MEM_BASE__SHIFT 0x0
++#define SPI_P0_TRAP_SCREEN_PSMA_HI__MEM_BASE_MASK 0xFFL
++//SPI_P0_TRAP_SCREEN_GPR_MIN
++#define SPI_P0_TRAP_SCREEN_GPR_MIN__VGPR_MIN__SHIFT 0x0
++#define SPI_P0_TRAP_SCREEN_GPR_MIN__SGPR_MIN__SHIFT 0x6
++#define SPI_P0_TRAP_SCREEN_GPR_MIN__VGPR_MIN_MASK 0x003FL
++#define SPI_P0_TRAP_SCREEN_GPR_MIN__SGPR_MIN_MASK 0x03C0L
++//SPI_P1_TRAP_SCREEN_PSBA_LO
++#define SPI_P1_TRAP_SCREEN_PSBA_LO__MEM_BASE__SHIFT 0x0
++#define SPI_P1_TRAP_SCREEN_PSBA_LO__MEM_BASE_MASK 0xFFFFFFFFL
++//SPI_P1_TRAP_SCREEN_PSBA_HI
++#define SPI_P1_TRAP_SCREEN_PSBA_HI__MEM_BASE__SHIFT 0x0
++#define SPI_P1_TRAP_SCREEN_PSBA_HI__MEM_BASE_MASK 0xFFL
++//SPI_P1_TRAP_SCREEN_PSMA_LO
++#define SPI_P1_TRAP_SCREEN_PSMA_LO__MEM_BASE__SHIFT 0x0
++#define SPI_P1_TRAP_SCREEN_PSMA_LO__MEM_BASE_MASK 0xFFFFFFFFL
++//SPI_P1_TRAP_SCREEN_PSMA_HI
++#define SPI_P1_TRAP_SCREEN_PSMA_HI__MEM_BASE__SHIFT 0x0
++#define SPI_P1_TRAP_SCREEN_PSMA_HI__MEM_BASE_MASK 0xFFL
++//SPI_P1_TRAP_SCREEN_GPR_MIN
++#define SPI_P1_TRAP_SCREEN_GPR_MIN__VGPR_MIN__SHIFT 0x0
++#define SPI_P1_TRAP_SCREEN_GPR_MIN__SGPR_MIN__SHIFT 0x6
++#define SPI_P1_TRAP_SCREEN_GPR_MIN__VGPR_MIN_MASK 0x003FL
++#define SPI_P1_TRAP_SCREEN_GPR_MIN__SGPR_MIN_MASK 0x03C0L
++
++
++// addressBlock: gc_tpdec
++//TD_CNTL
++#define TD_CNTL__SYNC_PHASE_SH__SHIFT 0x0
++#define TD_CNTL__SYNC_PHASE_VC_SMX__SHIFT 0x4
++#define TD_CNTL__PAD_STALL_EN__SHIFT 0x8
++#define TD_CNTL__EXTEND_LDS_STALL__SHIFT 0x9
++#define TD_CNTL__LDS_STALL_PHASE_ADJUST__SHIFT 0xb
++#define TD_CNTL__PRECISION_COMPATIBILITY__SHIFT 0xf
++#define TD_CNTL__GATHER4_FLOAT_MODE__SHIFT 0x10
++#define TD_CNTL__LD_FLOAT_MODE__SHIFT 0x12
++#define TD_CNTL__GATHER4_DX9_MODE__SHIFT 0x13
++#define TD_CNTL__DISABLE_POWER_THROTTLE__SHIFT 0x14
++#define TD_CNTL__ENABLE_ROUND_TO_ZERO__SHIFT 0x15
++#define TD_CNTL__DISABLE_2BIT_SIGNED_FORMAT__SHIFT 0x17
++#define TD_CNTL__DISABLE_MM_QNAN_COMPARE_RESULT__SHIFT 0x18
++#define TD_CNTL__SYNC_PHASE_SH_MASK 0x00000003L
++#define TD_CNTL__SYNC_PHASE_VC_SMX_MASK 0x00000030L
++#define TD_CNTL__PAD_STALL_EN_MASK 0x00000100L
++#define TD_CNTL__EXTEND_LDS_STALL_MASK 0x00000600L
++#define TD_CNTL__LDS_STALL_PHASE_ADJUST_MASK 0x00001800L
++#define TD_CNTL__PRECISION_COMPATIBILITY_MASK 0x00008000L
++#define TD_CNTL__GATHER4_FLOAT_MODE_MASK 0x00010000L
++#define TD_CNTL__LD_FLOAT_MODE_MASK 0x00040000L
++#define TD_CNTL__GATHER4_DX9_MODE_MASK 0x00080000L
++#define TD_CNTL__DISABLE_POWER_THROTTLE_MASK 0x00100000L
++#define TD_CNTL__ENABLE_ROUND_TO_ZERO_MASK 0x00200000L
++#define TD_CNTL__DISABLE_2BIT_SIGNED_FORMAT_MASK 0x00800000L
++#define TD_CNTL__DISABLE_MM_QNAN_COMPARE_RESULT_MASK 0x01000000L
++//TD_STATUS
++#define TD_STATUS__BUSY__SHIFT 0x1f
++#define TD_STATUS__BUSY_MASK 0x80000000L
++//TD_DSM_CNTL
++#define TD_DSM_CNTL__TD_SS_FIFO_LO_DSM_IRRITATOR_DATA__SHIFT 0x0
++#define TD_DSM_CNTL__TD_SS_FIFO_LO_ENABLE_SINGLE_WRITE__SHIFT 0x2
++#define TD_DSM_CNTL__TD_SS_FIFO_HI_DSM_IRRITATOR_DATA__SHIFT 0x3
++#define TD_DSM_CNTL__TD_SS_FIFO_HI_ENABLE_SINGLE_WRITE__SHIFT 0x5
++#define TD_DSM_CNTL__TD_CS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x6
++#define TD_DSM_CNTL__TD_CS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x8
++#define TD_DSM_CNTL__TD_SS_FIFO_LO_DSM_IRRITATOR_DATA_MASK 0x00000003L
++#define TD_DSM_CNTL__TD_SS_FIFO_LO_ENABLE_SINGLE_WRITE_MASK 0x00000004L
++#define TD_DSM_CNTL__TD_SS_FIFO_HI_DSM_IRRITATOR_DATA_MASK 0x00000018L
++#define TD_DSM_CNTL__TD_SS_FIFO_HI_ENABLE_SINGLE_WRITE_MASK 0x00000020L
++#define TD_DSM_CNTL__TD_CS_FIFO_DSM_IRRITATOR_DATA_MASK 0x000000C0L
++#define TD_DSM_CNTL__TD_CS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00000100L
++//TD_DSM_CNTL2
++#define TD_DSM_CNTL2__TD_SS_FIFO_LO_ENABLE_ERROR_INJECT__SHIFT 0x0
++#define TD_DSM_CNTL2__TD_SS_FIFO_LO_SELECT_INJECT_DELAY__SHIFT 0x2
++#define TD_DSM_CNTL2__TD_SS_FIFO_HI_ENABLE_ERROR_INJECT__SHIFT 0x3
++#define TD_DSM_CNTL2__TD_SS_FIFO_HI_SELECT_INJECT_DELAY__SHIFT 0x5
++#define TD_DSM_CNTL2__TD_CS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x6
++#define TD_DSM_CNTL2__TD_CS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x8
++#define TD_DSM_CNTL2__TD_INJECT_DELAY__SHIFT 0x1a
++#define TD_DSM_CNTL2__TD_SS_FIFO_LO_ENABLE_ERROR_INJECT_MASK 0x00000003L
++#define TD_DSM_CNTL2__TD_SS_FIFO_LO_SELECT_INJECT_DELAY_MASK 0x00000004L
++#define TD_DSM_CNTL2__TD_SS_FIFO_HI_ENABLE_ERROR_INJECT_MASK 0x00000018L
++#define TD_DSM_CNTL2__TD_SS_FIFO_HI_SELECT_INJECT_DELAY_MASK 0x00000020L
++#define TD_DSM_CNTL2__TD_CS_FIFO_ENABLE_ERROR_INJECT_MASK 0x000000C0L
++#define TD_DSM_CNTL2__TD_CS_FIFO_SELECT_INJECT_DELAY_MASK 0x00000100L
++#define TD_DSM_CNTL2__TD_INJECT_DELAY_MASK 0xFC000000L
++//TD_SCRATCH
++#define TD_SCRATCH__SCRATCH__SHIFT 0x0
++#define TD_SCRATCH__SCRATCH_MASK 0xFFFFFFFFL
++//TA_CNTL
++#define TA_CNTL__FX_XNACK_CREDIT__SHIFT 0x0
++#define TA_CNTL__SQ_XNACK_CREDIT__SHIFT 0x9
++#define TA_CNTL__TC_DATA_CREDIT__SHIFT 0xd
++#define TA_CNTL__ALIGNER_CREDIT__SHIFT 0x10
++#define TA_CNTL__TD_FIFO_CREDIT__SHIFT 0x16
++#define TA_CNTL__FX_XNACK_CREDIT_MASK 0x0000007FL
++#define TA_CNTL__SQ_XNACK_CREDIT_MASK 0x00001E00L
++#define TA_CNTL__TC_DATA_CREDIT_MASK 0x0000E000L
++#define TA_CNTL__ALIGNER_CREDIT_MASK 0x001F0000L
++#define TA_CNTL__TD_FIFO_CREDIT_MASK 0xFFC00000L
++//TA_CNTL_AUX
++#define TA_CNTL_AUX__SCOAL_DSWIZZLE_N__SHIFT 0x0
++#define TA_CNTL_AUX__RESERVED__SHIFT 0x1
++#define TA_CNTL_AUX__TFAULT_EN_OVERRIDE__SHIFT 0x5
++#define TA_CNTL_AUX__GATHERH_DST_SEL__SHIFT 0x6
++#define TA_CNTL_AUX__DISABLE_GATHER4_BC_SWIZZLE__SHIFT 0x7
++#define TA_CNTL_AUX__NONIMG_ANISO_BYPASS__SHIFT 0x9
++#define TA_CNTL_AUX__ANISO_HALF_THRESH__SHIFT 0xa
++#define TA_CNTL_AUX__ANISO_ERROR_FP_VBIAS__SHIFT 0xc
++#define TA_CNTL_AUX__ANISO_STEP_ORDER__SHIFT 0xd
++#define TA_CNTL_AUX__ANISO_STEP__SHIFT 0xe
++#define TA_CNTL_AUX__MINMAG_UNNORM__SHIFT 0xf
++#define TA_CNTL_AUX__ANISO_WEIGHT_MODE__SHIFT 0x10
++#define TA_CNTL_AUX__ANISO_RATIO_LUT__SHIFT 0x11
++#define TA_CNTL_AUX__ANISO_TAP__SHIFT 0x12
++#define TA_CNTL_AUX__ANISO_MIP_ADJ_MODE__SHIFT 0x13
++#define TA_CNTL_AUX__DETERMINISM_RESERVED_DISABLE__SHIFT 0x14
++#define TA_CNTL_AUX__DETERMINISM_OPCODE_STRICT_DISABLE__SHIFT 0x15
++#define TA_CNTL_AUX__DETERMINISM_MISC_DISABLE__SHIFT 0x16
++#define TA_CNTL_AUX__DETERMINISM_SAMPLE_C_DFMT_DISABLE__SHIFT 0x17
++#define TA_CNTL_AUX__DETERMINISM_SAMPLER_MSAA_DISABLE__SHIFT 0x18
++#define TA_CNTL_AUX__DETERMINISM_WRITEOP_READFMT_DISABLE__SHIFT 0x19
++#define TA_CNTL_AUX__DETERMINISM_DFMT_NFMT_DISABLE__SHIFT 0x1a
++#define TA_CNTL_AUX__DISABLE_DWORD_X2_COALESCE__SHIFT 0x1b
++#define TA_CNTL_AUX__CUBEMAP_SLICE_CLAMP__SHIFT 0x1c
++#define TA_CNTL_AUX__TRUNC_SMALL_NEG__SHIFT 0x1d
++#define TA_CNTL_AUX__ARRAY_ROUND_MODE__SHIFT 0x1e
++#define TA_CNTL_AUX__SCOAL_DSWIZZLE_N_MASK 0x00000001L
++#define TA_CNTL_AUX__RESERVED_MASK 0x0000000EL
++#define TA_CNTL_AUX__TFAULT_EN_OVERRIDE_MASK 0x00000020L
++#define TA_CNTL_AUX__GATHERH_DST_SEL_MASK 0x00000040L
++#define TA_CNTL_AUX__DISABLE_GATHER4_BC_SWIZZLE_MASK 0x00000080L
++#define TA_CNTL_AUX__NONIMG_ANISO_BYPASS_MASK 0x00000200L
++#define TA_CNTL_AUX__ANISO_HALF_THRESH_MASK 0x00000C00L
++#define TA_CNTL_AUX__ANISO_ERROR_FP_VBIAS_MASK 0x00001000L
++#define TA_CNTL_AUX__ANISO_STEP_ORDER_MASK 0x00002000L
++#define TA_CNTL_AUX__ANISO_STEP_MASK 0x00004000L
++#define TA_CNTL_AUX__MINMAG_UNNORM_MASK 0x00008000L
++#define TA_CNTL_AUX__ANISO_WEIGHT_MODE_MASK 0x00010000L
++#define TA_CNTL_AUX__ANISO_RATIO_LUT_MASK 0x00020000L
++#define TA_CNTL_AUX__ANISO_TAP_MASK 0x00040000L
++#define TA_CNTL_AUX__ANISO_MIP_ADJ_MODE_MASK 0x00080000L
++#define TA_CNTL_AUX__DETERMINISM_RESERVED_DISABLE_MASK 0x00100000L
++#define TA_CNTL_AUX__DETERMINISM_OPCODE_STRICT_DISABLE_MASK 0x00200000L
++#define TA_CNTL_AUX__DETERMINISM_MISC_DISABLE_MASK 0x00400000L
++#define TA_CNTL_AUX__DETERMINISM_SAMPLE_C_DFMT_DISABLE_MASK 0x00800000L
++#define TA_CNTL_AUX__DETERMINISM_SAMPLER_MSAA_DISABLE_MASK 0x01000000L
++#define TA_CNTL_AUX__DETERMINISM_WRITEOP_READFMT_DISABLE_MASK 0x02000000L
++#define TA_CNTL_AUX__DETERMINISM_DFMT_NFMT_DISABLE_MASK 0x04000000L
++#define TA_CNTL_AUX__DISABLE_DWORD_X2_COALESCE_MASK 0x08000000L
++#define TA_CNTL_AUX__CUBEMAP_SLICE_CLAMP_MASK 0x10000000L
++#define TA_CNTL_AUX__TRUNC_SMALL_NEG_MASK 0x20000000L
++#define TA_CNTL_AUX__ARRAY_ROUND_MODE_MASK 0xC0000000L
++//TA_RESERVED_010C
++#define TA_RESERVED_010C__Unused__SHIFT 0x0
++#define TA_RESERVED_010C__Unused_MASK 0xFFFFFFFFL
++//TA_STATUS
++#define TA_STATUS__FG_PFIFO_EMPTYB__SHIFT 0xc
++#define TA_STATUS__FG_LFIFO_EMPTYB__SHIFT 0xd
++#define TA_STATUS__FG_SFIFO_EMPTYB__SHIFT 0xe
++#define TA_STATUS__FL_PFIFO_EMPTYB__SHIFT 0x10
++#define TA_STATUS__FL_LFIFO_EMPTYB__SHIFT 0x11
++#define TA_STATUS__FL_SFIFO_EMPTYB__SHIFT 0x12
++#define TA_STATUS__FA_PFIFO_EMPTYB__SHIFT 0x14
++#define TA_STATUS__FA_LFIFO_EMPTYB__SHIFT 0x15
++#define TA_STATUS__FA_SFIFO_EMPTYB__SHIFT 0x16
++#define TA_STATUS__IN_BUSY__SHIFT 0x18
++#define TA_STATUS__FG_BUSY__SHIFT 0x19
++#define TA_STATUS__LA_BUSY__SHIFT 0x1a
++#define TA_STATUS__FL_BUSY__SHIFT 0x1b
++#define TA_STATUS__TA_BUSY__SHIFT 0x1c
++#define TA_STATUS__FA_BUSY__SHIFT 0x1d
++#define TA_STATUS__AL_BUSY__SHIFT 0x1e
++#define TA_STATUS__BUSY__SHIFT 0x1f
++#define TA_STATUS__FG_PFIFO_EMPTYB_MASK 0x00001000L
++#define TA_STATUS__FG_LFIFO_EMPTYB_MASK 0x00002000L
++#define TA_STATUS__FG_SFIFO_EMPTYB_MASK 0x00004000L
++#define TA_STATUS__FL_PFIFO_EMPTYB_MASK 0x00010000L
++#define TA_STATUS__FL_LFIFO_EMPTYB_MASK 0x00020000L
++#define TA_STATUS__FL_SFIFO_EMPTYB_MASK 0x00040000L
++#define TA_STATUS__FA_PFIFO_EMPTYB_MASK 0x00100000L
++#define TA_STATUS__FA_LFIFO_EMPTYB_MASK 0x00200000L
++#define TA_STATUS__FA_SFIFO_EMPTYB_MASK 0x00400000L
++#define TA_STATUS__IN_BUSY_MASK 0x01000000L
++#define TA_STATUS__FG_BUSY_MASK 0x02000000L
++#define TA_STATUS__LA_BUSY_MASK 0x04000000L
++#define TA_STATUS__FL_BUSY_MASK 0x08000000L
++#define TA_STATUS__TA_BUSY_MASK 0x10000000L
++#define TA_STATUS__FA_BUSY_MASK 0x20000000L
++#define TA_STATUS__AL_BUSY_MASK 0x40000000L
++#define TA_STATUS__BUSY_MASK 0x80000000L
++//TA_SCRATCH
++#define TA_SCRATCH__SCRATCH__SHIFT 0x0
++#define TA_SCRATCH__SCRATCH_MASK 0xFFFFFFFFL
++
++
++// addressBlock: gc_gdsdec
++//GDS_CONFIG
++#define GDS_CONFIG__SH0_GPR_PHASE_SEL__SHIFT 0x1
++#define GDS_CONFIG__SH1_GPR_PHASE_SEL__SHIFT 0x3
++#define GDS_CONFIG__SH2_GPR_PHASE_SEL__SHIFT 0x5
++#define GDS_CONFIG__SH3_GPR_PHASE_SEL__SHIFT 0x7
++#define GDS_CONFIG__SH0_GPR_PHASE_SEL_MASK 0x00000006L
++#define GDS_CONFIG__SH1_GPR_PHASE_SEL_MASK 0x00000018L
++#define GDS_CONFIG__SH2_GPR_PHASE_SEL_MASK 0x00000060L
++#define GDS_CONFIG__SH3_GPR_PHASE_SEL_MASK 0x00000180L
++//GDS_CNTL_STATUS
++#define GDS_CNTL_STATUS__GDS_BUSY__SHIFT 0x0
++#define GDS_CNTL_STATUS__GRBM_WBUF_BUSY__SHIFT 0x1
++#define GDS_CNTL_STATUS__ORD_APP_BUSY__SHIFT 0x2
++#define GDS_CNTL_STATUS__DS_BANK_CONFLICT__SHIFT 0x3
++#define GDS_CNTL_STATUS__DS_ADDR_CONFLICT__SHIFT 0x4
++#define GDS_CNTL_STATUS__DS_WR_CLAMP__SHIFT 0x5
++#define GDS_CNTL_STATUS__DS_RD_CLAMP__SHIFT 0x6
++#define GDS_CNTL_STATUS__GRBM_RBUF_BUSY__SHIFT 0x7
++#define GDS_CNTL_STATUS__DS_BUSY__SHIFT 0x8
++#define GDS_CNTL_STATUS__GWS_BUSY__SHIFT 0x9
++#define GDS_CNTL_STATUS__ORD_FIFO_BUSY__SHIFT 0xa
++#define GDS_CNTL_STATUS__CREDIT_BUSY0__SHIFT 0xb
++#define GDS_CNTL_STATUS__CREDIT_BUSY1__SHIFT 0xc
++#define GDS_CNTL_STATUS__CREDIT_BUSY2__SHIFT 0xd
++#define GDS_CNTL_STATUS__CREDIT_BUSY3__SHIFT 0xe
++#define GDS_CNTL_STATUS__GDS_BUSY_MASK 0x00000001L
++#define GDS_CNTL_STATUS__GRBM_WBUF_BUSY_MASK 0x00000002L
++#define GDS_CNTL_STATUS__ORD_APP_BUSY_MASK 0x00000004L
++#define GDS_CNTL_STATUS__DS_BANK_CONFLICT_MASK 0x00000008L
++#define GDS_CNTL_STATUS__DS_ADDR_CONFLICT_MASK 0x00000010L
++#define GDS_CNTL_STATUS__DS_WR_CLAMP_MASK 0x00000020L
++#define GDS_CNTL_STATUS__DS_RD_CLAMP_MASK 0x00000040L
++#define GDS_CNTL_STATUS__GRBM_RBUF_BUSY_MASK 0x00000080L
++#define GDS_CNTL_STATUS__DS_BUSY_MASK 0x00000100L
++#define GDS_CNTL_STATUS__GWS_BUSY_MASK 0x00000200L
++#define GDS_CNTL_STATUS__ORD_FIFO_BUSY_MASK 0x00000400L
++#define GDS_CNTL_STATUS__CREDIT_BUSY0_MASK 0x00000800L
++#define GDS_CNTL_STATUS__CREDIT_BUSY1_MASK 0x00001000L
++#define GDS_CNTL_STATUS__CREDIT_BUSY2_MASK 0x00002000L
++#define GDS_CNTL_STATUS__CREDIT_BUSY3_MASK 0x00004000L
++//GDS_ENHANCE2
++#define GDS_ENHANCE2__MISC__SHIFT 0x0
++#define GDS_ENHANCE2__UNUSED__SHIFT 0x10
++#define GDS_ENHANCE2__MISC_MASK 0x0000FFFFL
++#define GDS_ENHANCE2__UNUSED_MASK 0xFFFF0000L
++//GDS_PROTECTION_FAULT
++#define GDS_PROTECTION_FAULT__WRITE_DIS__SHIFT 0x0
++#define GDS_PROTECTION_FAULT__FAULT_DETECTED__SHIFT 0x1
++#define GDS_PROTECTION_FAULT__GRBM__SHIFT 0x2
++#define GDS_PROTECTION_FAULT__SH_ID__SHIFT 0x3
++#define GDS_PROTECTION_FAULT__CU_ID__SHIFT 0x6
++#define GDS_PROTECTION_FAULT__SIMD_ID__SHIFT 0xa
++#define GDS_PROTECTION_FAULT__WAVE_ID__SHIFT 0xc
++#define GDS_PROTECTION_FAULT__ADDRESS__SHIFT 0x10
++#define GDS_PROTECTION_FAULT__WRITE_DIS_MASK 0x00000001L
++#define GDS_PROTECTION_FAULT__FAULT_DETECTED_MASK 0x00000002L
++#define GDS_PROTECTION_FAULT__GRBM_MASK 0x00000004L
++#define GDS_PROTECTION_FAULT__SH_ID_MASK 0x00000038L
++#define GDS_PROTECTION_FAULT__CU_ID_MASK 0x000003C0L
++#define GDS_PROTECTION_FAULT__SIMD_ID_MASK 0x00000C00L
++#define GDS_PROTECTION_FAULT__WAVE_ID_MASK 0x0000F000L
++#define GDS_PROTECTION_FAULT__ADDRESS_MASK 0xFFFF0000L
++//GDS_VM_PROTECTION_FAULT
++#define GDS_VM_PROTECTION_FAULT__WRITE_DIS__SHIFT 0x0
++#define GDS_VM_PROTECTION_FAULT__FAULT_DETECTED__SHIFT 0x1
++#define GDS_VM_PROTECTION_FAULT__GWS__SHIFT 0x2
++#define GDS_VM_PROTECTION_FAULT__OA__SHIFT 0x3
++#define GDS_VM_PROTECTION_FAULT__GRBM__SHIFT 0x4
++#define GDS_VM_PROTECTION_FAULT__TMZ__SHIFT 0x5
++#define GDS_VM_PROTECTION_FAULT__VMID__SHIFT 0x8
++#define GDS_VM_PROTECTION_FAULT__ADDRESS__SHIFT 0x10
++#define GDS_VM_PROTECTION_FAULT__WRITE_DIS_MASK 0x00000001L
++#define GDS_VM_PROTECTION_FAULT__FAULT_DETECTED_MASK 0x00000002L
++#define GDS_VM_PROTECTION_FAULT__GWS_MASK 0x00000004L
++#define GDS_VM_PROTECTION_FAULT__OA_MASK 0x00000008L
++#define GDS_VM_PROTECTION_FAULT__GRBM_MASK 0x00000010L
++#define GDS_VM_PROTECTION_FAULT__TMZ_MASK 0x00000020L
++#define GDS_VM_PROTECTION_FAULT__VMID_MASK 0x00000F00L
++#define GDS_VM_PROTECTION_FAULT__ADDRESS_MASK 0xFFFF0000L
++//GDS_EDC_CNT
++#define GDS_EDC_CNT__GDS_MEM_DED__SHIFT 0x0
++#define GDS_EDC_CNT__GDS_INPUT_QUEUE_SED__SHIFT 0x2
++#define GDS_EDC_CNT__GDS_MEM_SEC__SHIFT 0x4
++#define GDS_EDC_CNT__UNUSED__SHIFT 0x6
++#define GDS_EDC_CNT__GDS_MEM_DED_MASK 0x00000003L
++#define GDS_EDC_CNT__GDS_INPUT_QUEUE_SED_MASK 0x0000000CL
++#define GDS_EDC_CNT__GDS_MEM_SEC_MASK 0x00000030L
++#define GDS_EDC_CNT__UNUSED_MASK 0xFFFFFFC0L
++//GDS_EDC_GRBM_CNT
++#define GDS_EDC_GRBM_CNT__DED__SHIFT 0x0
++#define GDS_EDC_GRBM_CNT__SEC__SHIFT 0x2
++#define GDS_EDC_GRBM_CNT__UNUSED__SHIFT 0x4
++#define GDS_EDC_GRBM_CNT__DED_MASK 0x00000003L
++#define GDS_EDC_GRBM_CNT__SEC_MASK 0x0000000CL
++#define GDS_EDC_GRBM_CNT__UNUSED_MASK 0xFFFFFFF0L
++//GDS_EDC_OA_DED
++#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED__SHIFT 0x0
++#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED__SHIFT 0x1
++#define GDS_EDC_OA_DED__ME0_CS_DED__SHIFT 0x2
++#define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED__SHIFT 0x3
++#define GDS_EDC_OA_DED__ME1_PIPE0_DED__SHIFT 0x4
++#define GDS_EDC_OA_DED__ME1_PIPE1_DED__SHIFT 0x5
++#define GDS_EDC_OA_DED__ME1_PIPE2_DED__SHIFT 0x6
++#define GDS_EDC_OA_DED__ME1_PIPE3_DED__SHIFT 0x7
++#define GDS_EDC_OA_DED__ME2_PIPE0_DED__SHIFT 0x8
++#define GDS_EDC_OA_DED__ME2_PIPE1_DED__SHIFT 0x9
++#define GDS_EDC_OA_DED__ME2_PIPE2_DED__SHIFT 0xa
++#define GDS_EDC_OA_DED__ME2_PIPE3_DED__SHIFT 0xb
++#define GDS_EDC_OA_DED__UNUSED1__SHIFT 0xc
++#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED_MASK 0x00000001L
++#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED_MASK 0x00000002L
++#define GDS_EDC_OA_DED__ME0_CS_DED_MASK 0x00000004L
++#define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED_MASK 0x00000008L
++#define GDS_EDC_OA_DED__ME1_PIPE0_DED_MASK 0x00000010L
++#define GDS_EDC_OA_DED__ME1_PIPE1_DED_MASK 0x00000020L
++#define GDS_EDC_OA_DED__ME1_PIPE2_DED_MASK 0x00000040L
++#define GDS_EDC_OA_DED__ME1_PIPE3_DED_MASK 0x00000080L
++#define GDS_EDC_OA_DED__ME2_PIPE0_DED_MASK 0x00000100L
++#define GDS_EDC_OA_DED__ME2_PIPE1_DED_MASK 0x00000200L
++#define GDS_EDC_OA_DED__ME2_PIPE2_DED_MASK 0x00000400L
++#define GDS_EDC_OA_DED__ME2_PIPE3_DED_MASK 0x00000800L
++#define GDS_EDC_OA_DED__UNUSED1_MASK 0xFFFFF000L
++//GDS_DSM_CNTL
++#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_0__SHIFT 0x0
++#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_1__SHIFT 0x1
++#define GDS_DSM_CNTL__GDS_MEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
++#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_0__SHIFT 0x3
++#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_1__SHIFT 0x4
++#define GDS_DSM_CNTL__GDS_INPUT_QUEUE_ENABLE_SINGLE_WRITE__SHIFT 0x5
++#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_0__SHIFT 0x6
++#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_1__SHIFT 0x7
++#define GDS_DSM_CNTL__GDS_PHY_CMD_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x8
++#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_0__SHIFT 0x9
++#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_1__SHIFT 0xa
++#define GDS_DSM_CNTL__GDS_PHY_DATA_RAM_ENABLE_SINGLE_WRITE__SHIFT 0xb
++#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_0__SHIFT 0xc
++#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_1__SHIFT 0xd
++#define GDS_DSM_CNTL__GDS_PIPE_MEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
++#define GDS_DSM_CNTL__UNUSED__SHIFT 0xf
++#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_0_MASK 0x00000001L
++#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_1_MASK 0x00000002L
++#define GDS_DSM_CNTL__GDS_MEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
++#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_0_MASK 0x00000008L
++#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_1_MASK 0x00000010L
++#define GDS_DSM_CNTL__GDS_INPUT_QUEUE_ENABLE_SINGLE_WRITE_MASK 0x00000020L
++#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_0_MASK 0x00000040L
++#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_1_MASK 0x00000080L
++#define GDS_DSM_CNTL__GDS_PHY_CMD_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
++#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_0_MASK 0x00000200L
++#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_1_MASK 0x00000400L
++#define GDS_DSM_CNTL__GDS_PHY_DATA_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
++#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_0_MASK 0x00001000L
++#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_1_MASK 0x00002000L
++#define GDS_DSM_CNTL__GDS_PIPE_MEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
++#define GDS_DSM_CNTL__UNUSED_MASK 0xFFFF8000L
++//GDS_EDC_OA_PHY_CNT
++#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC__SHIFT 0x0
++#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED__SHIFT 0x2
++#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC__SHIFT 0x4
++#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED__SHIFT 0x6
++#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SED__SHIFT 0x8
++#define GDS_EDC_OA_PHY_CNT__UNUSED1__SHIFT 0xa
++#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC_MASK 0x00000003L
++#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED_MASK 0x0000000CL
++#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC_MASK 0x00000030L
++#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED_MASK 0x000000C0L
++#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SED_MASK 0x00000300L
++#define GDS_EDC_OA_PHY_CNT__UNUSED1_MASK 0xFFFFFC00L
++//GDS_EDC_OA_PIPE_CNT
++#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC__SHIFT 0x0
++#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED__SHIFT 0x2
++#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC__SHIFT 0x4
++#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED__SHIFT 0x6
++#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC__SHIFT 0x8
++#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED__SHIFT 0xa
++#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC__SHIFT 0xc
++#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED__SHIFT 0xe
++#define GDS_EDC_OA_PIPE_CNT__UNUSED__SHIFT 0x10
++#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC_MASK 0x00000003L
++#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED_MASK 0x0000000CL
++#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC_MASK 0x00000030L
++#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED_MASK 0x000000C0L
++#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC_MASK 0x00000300L
++#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED_MASK 0x00000C00L
++#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC_MASK 0x00003000L
++#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED_MASK 0x0000C000L
++#define GDS_EDC_OA_PIPE_CNT__UNUSED_MASK 0xFFFF0000L
++//GDS_DSM_CNTL2
++#define GDS_DSM_CNTL2__GDS_MEM_ENABLE_ERROR_INJECT__SHIFT 0x0
++#define GDS_DSM_CNTL2__GDS_MEM_SELECT_INJECT_DELAY__SHIFT 0x2
++#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_ENABLE_ERROR_INJECT__SHIFT 0x3
++#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_SELECT_INJECT_DELAY__SHIFT 0x5
++#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_ENABLE_ERROR_INJECT__SHIFT 0x6
++#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_SELECT_INJECT_DELAY__SHIFT 0x8
++#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_ENABLE_ERROR_INJECT__SHIFT 0x9
++#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_SELECT_INJECT_DELAY__SHIFT 0xb
++#define GDS_DSM_CNTL2__GDS_PIPE_MEM_ENABLE_ERROR_INJECT__SHIFT 0xc
++#define GDS_DSM_CNTL2__GDS_PIPE_MEM_SELECT_INJECT_DELAY__SHIFT 0xe
++#define GDS_DSM_CNTL2__UNUSED__SHIFT 0xf
++#define GDS_DSM_CNTL2__GDS_INJECT_DELAY__SHIFT 0x1a
++#define GDS_DSM_CNTL2__GDS_MEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
++#define GDS_DSM_CNTL2__GDS_MEM_SELECT_INJECT_DELAY_MASK 0x00000004L
++#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_ENABLE_ERROR_INJECT_MASK 0x00000018L
++#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_SELECT_INJECT_DELAY_MASK 0x00000020L
++#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
++#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_SELECT_INJECT_DELAY_MASK 0x00000100L
++#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_ENABLE_ERROR_INJECT_MASK 0x00000600L
++#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_SELECT_INJECT_DELAY_MASK 0x00000800L
++#define GDS_DSM_CNTL2__GDS_PIPE_MEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
++#define GDS_DSM_CNTL2__GDS_PIPE_MEM_SELECT_INJECT_DELAY_MASK 0x00004000L
++#define GDS_DSM_CNTL2__UNUSED_MASK 0x03FF8000L
++#define GDS_DSM_CNTL2__GDS_INJECT_DELAY_MASK 0xFC000000L
++//GDS_WD_GDS_CSB
++#define GDS_WD_GDS_CSB__COUNTER__SHIFT 0x0
++#define GDS_WD_GDS_CSB__UNUSED__SHIFT 0xd
++#define GDS_WD_GDS_CSB__COUNTER_MASK 0x00001FFFL
++#define GDS_WD_GDS_CSB__UNUSED_MASK 0xFFFFE000L
++
++
++// addressBlock: gc_rbdec
++//DB_DEBUG
++#define DB_DEBUG__DEBUG_STENCIL_COMPRESS_DISABLE__SHIFT 0x0
++#define DB_DEBUG__DEBUG_DEPTH_COMPRESS_DISABLE__SHIFT 0x1
++#define DB_DEBUG__FETCH_FULL_Z_TILE__SHIFT 0x2
++#define DB_DEBUG__FETCH_FULL_STENCIL_TILE__SHIFT 0x3
++#define DB_DEBUG__FORCE_Z_MODE__SHIFT 0x4
++#define DB_DEBUG__DEBUG_FORCE_DEPTH_READ__SHIFT 0x6
++#define DB_DEBUG__DEBUG_FORCE_STENCIL_READ__SHIFT 0x7
++#define DB_DEBUG__DEBUG_FORCE_HIZ_ENABLE__SHIFT 0x8
++#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE0__SHIFT 0xa
++#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE1__SHIFT 0xc
++#define DB_DEBUG__DEBUG_FAST_Z_DISABLE__SHIFT 0xe
++#define DB_DEBUG__DEBUG_FAST_STENCIL_DISABLE__SHIFT 0xf
++#define DB_DEBUG__DEBUG_NOOP_CULL_DISABLE__SHIFT 0x10
++#define DB_DEBUG__DISABLE_SUMM_SQUADS__SHIFT 0x11
++#define DB_DEBUG__DEPTH_CACHE_FORCE_MISS__SHIFT 0x12
++#define DB_DEBUG__DEBUG_FORCE_FULL_Z_RANGE__SHIFT 0x13
++#define DB_DEBUG__NEVER_FREE_Z_ONLY__SHIFT 0x15
++#define DB_DEBUG__ZPASS_COUNTS_LOOK_AT_PIPE_STAT_EVENTS__SHIFT 0x16
++#define DB_DEBUG__DISABLE_VPORT_ZPLANE_OPTIMIZATION__SHIFT 0x17
++#define DB_DEBUG__DECOMPRESS_AFTER_N_ZPLANES__SHIFT 0x18
++#define DB_DEBUG__ONE_FREE_IN_FLIGHT__SHIFT 0x1c
++#define DB_DEBUG__FORCE_MISS_IF_NOT_INFLIGHT__SHIFT 0x1d
++#define DB_DEBUG__DISABLE_DEPTH_SURFACE_SYNC__SHIFT 0x1e
++#define DB_DEBUG__DISABLE_HTILE_SURFACE_SYNC__SHIFT 0x1f
++#define DB_DEBUG__DEBUG_STENCIL_COMPRESS_DISABLE_MASK 0x00000001L
++#define DB_DEBUG__DEBUG_DEPTH_COMPRESS_DISABLE_MASK 0x00000002L
++#define DB_DEBUG__FETCH_FULL_Z_TILE_MASK 0x00000004L
++#define DB_DEBUG__FETCH_FULL_STENCIL_TILE_MASK 0x00000008L
++#define DB_DEBUG__FORCE_Z_MODE_MASK 0x00000030L
++#define DB_DEBUG__DEBUG_FORCE_DEPTH_READ_MASK 0x00000040L
++#define DB_DEBUG__DEBUG_FORCE_STENCIL_READ_MASK 0x00000080L
++#define DB_DEBUG__DEBUG_FORCE_HIZ_ENABLE_MASK 0x00000300L
++#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE0_MASK 0x00000C00L
++#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE1_MASK 0x00003000L
++#define DB_DEBUG__DEBUG_FAST_Z_DISABLE_MASK 0x00004000L
++#define DB_DEBUG__DEBUG_FAST_STENCIL_DISABLE_MASK 0x00008000L
++#define DB_DEBUG__DEBUG_NOOP_CULL_DISABLE_MASK 0x00010000L
++#define DB_DEBUG__DISABLE_SUMM_SQUADS_MASK 0x00020000L
++#define DB_DEBUG__DEPTH_CACHE_FORCE_MISS_MASK 0x00040000L
++#define DB_DEBUG__DEBUG_FORCE_FULL_Z_RANGE_MASK 0x00180000L
++#define DB_DEBUG__NEVER_FREE_Z_ONLY_MASK 0x00200000L
++#define DB_DEBUG__ZPASS_COUNTS_LOOK_AT_PIPE_STAT_EVENTS_MASK 0x00400000L
++#define DB_DEBUG__DISABLE_VPORT_ZPLANE_OPTIMIZATION_MASK 0x00800000L
++#define DB_DEBUG__DECOMPRESS_AFTER_N_ZPLANES_MASK 0x0F000000L
++#define DB_DEBUG__ONE_FREE_IN_FLIGHT_MASK 0x10000000L
++#define DB_DEBUG__FORCE_MISS_IF_NOT_INFLIGHT_MASK 0x20000000L
++#define DB_DEBUG__DISABLE_DEPTH_SURFACE_SYNC_MASK 0x40000000L
++#define DB_DEBUG__DISABLE_HTILE_SURFACE_SYNC_MASK 0x80000000L
++//DB_DEBUG2
++#define DB_DEBUG2__ALLOW_COMPZ_BYTE_MASKING__SHIFT 0x0
++#define DB_DEBUG2__DISABLE_TC_ZRANGE_L0_CACHE__SHIFT 0x1
++#define DB_DEBUG2__DISABLE_TC_MASK_L0_CACHE__SHIFT 0x2
++#define DB_DEBUG2__DTR_ROUND_ROBIN_ARB__SHIFT 0x3
++#define DB_DEBUG2__DTR_PREZ_STALLS_FOR_ETF_ROOM__SHIFT 0x4
++#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL__SHIFT 0x5
++#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL_REZ__SHIFT 0x6
++#define DB_DEBUG2__ENABLE_VIEWPORT_STALL_ON_ALL__SHIFT 0x7
++#define DB_DEBUG2__OPTIMIZE_HIZ_MATCHES_FB_DISABLE__SHIFT 0x8
++#define DB_DEBUG2__CLK_OFF_DELAY__SHIFT 0x9
++#define DB_DEBUG2__DISABLE_TILE_COVERED_FOR_PS_ITER__SHIFT 0xe
++#define DB_DEBUG2__ENABLE_SUBTILE_GROUPING__SHIFT 0xf
++#define DB_DEBUG2__RESERVED__SHIFT 0x10
++#define DB_DEBUG2__DISABLE_NULL_EOT_FORWARDING__SHIFT 0x11
++#define DB_DEBUG2__DISABLE_DTT_DATA_FORWARDING__SHIFT 0x12
++#define DB_DEBUG2__DISABLE_QUAD_COHERENCY_STALL__SHIFT 0x13
++#define DB_DEBUG2__ENABLE_PREZ_OF_REZ_SUMM__SHIFT 0x1c
++#define DB_DEBUG2__DISABLE_PREZL_VIEWPORT_STALL__SHIFT 0x1d
++#define DB_DEBUG2__DISABLE_SINGLE_STENCIL_QUAD_SUMM__SHIFT 0x1e
++#define DB_DEBUG2__DISABLE_WRITE_STALL_ON_RDWR_CONFLICT__SHIFT 0x1f
++#define DB_DEBUG2__ALLOW_COMPZ_BYTE_MASKING_MASK 0x00000001L
++#define DB_DEBUG2__DISABLE_TC_ZRANGE_L0_CACHE_MASK 0x00000002L
++#define DB_DEBUG2__DISABLE_TC_MASK_L0_CACHE_MASK 0x00000004L
++#define DB_DEBUG2__DTR_ROUND_ROBIN_ARB_MASK 0x00000008L
++#define DB_DEBUG2__DTR_PREZ_STALLS_FOR_ETF_ROOM_MASK 0x00000010L
++#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL_MASK 0x00000020L
++#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL_REZ_MASK 0x00000040L
++#define DB_DEBUG2__ENABLE_VIEWPORT_STALL_ON_ALL_MASK 0x00000080L
++#define DB_DEBUG2__OPTIMIZE_HIZ_MATCHES_FB_DISABLE_MASK 0x00000100L
++#define DB_DEBUG2__CLK_OFF_DELAY_MASK 0x00003E00L
++#define DB_DEBUG2__DISABLE_TILE_COVERED_FOR_PS_ITER_MASK 0x00004000L
++#define DB_DEBUG2__ENABLE_SUBTILE_GROUPING_MASK 0x00008000L
++#define DB_DEBUG2__RESERVED_MASK 0x00010000L
++#define DB_DEBUG2__DISABLE_NULL_EOT_FORWARDING_MASK 0x00020000L
++#define DB_DEBUG2__DISABLE_DTT_DATA_FORWARDING_MASK 0x00040000L
++#define DB_DEBUG2__DISABLE_QUAD_COHERENCY_STALL_MASK 0x00080000L
++#define DB_DEBUG2__ENABLE_PREZ_OF_REZ_SUMM_MASK 0x10000000L
++#define DB_DEBUG2__DISABLE_PREZL_VIEWPORT_STALL_MASK 0x20000000L
++#define DB_DEBUG2__DISABLE_SINGLE_STENCIL_QUAD_SUMM_MASK 0x40000000L
++#define DB_DEBUG2__DISABLE_WRITE_STALL_ON_RDWR_CONFLICT_MASK 0x80000000L
++//DB_DEBUG3
++#define DB_DEBUG3__DISABLE_CLEAR_ZRANGE_CORRECTION__SHIFT 0x0
++#define DB_DEBUG3__ROUND_ZRANGE_CORRECTION__SHIFT 0x1
++#define DB_DEBUG3__FORCE_DB_IS_GOOD__SHIFT 0x2
++#define DB_DEBUG3__DISABLE_TL_SSO_NULL_SUPPRESSION__SHIFT 0x3
++#define DB_DEBUG3__DISABLE_HIZ_ON_VPORT_CLAMP__SHIFT 0x4
++#define DB_DEBUG3__EQAA_INTERPOLATE_COMP_Z__SHIFT 0x5
++#define DB_DEBUG3__EQAA_INTERPOLATE_SRC_Z__SHIFT 0x6
++#define DB_DEBUG3__DISABLE_TCP_CAM_BYPASS__SHIFT 0x7
++#define DB_DEBUG3__DISABLE_ZCMP_DIRTY_SUPPRESSION__SHIFT 0x8
++#define DB_DEBUG3__DISABLE_REDUNDANT_PLANE_FLUSHES_OPT__SHIFT 0x9
++#define DB_DEBUG3__DISABLE_RECOMP_TO_1ZPLANE_WITHOUT_FASTOP__SHIFT 0xa
++#define DB_DEBUG3__ENABLE_INCOHERENT_EQAA_READS__SHIFT 0xb
++#define DB_DEBUG3__DISABLE_OP_Z_DATA_FORWARDING__SHIFT 0xc
++#define DB_DEBUG3__DISABLE_OP_DF_BYPASS__SHIFT 0xd
++#define DB_DEBUG3__DISABLE_OP_DF_WRITE_COMBINE__SHIFT 0xe
++#define DB_DEBUG3__DISABLE_OP_DF_DIRECT_FEEDBACK__SHIFT 0xf
++#define DB_DEBUG3__ALLOW_RF2P_RW_COLLISION__SHIFT 0x10
++#define DB_DEBUG3__SLOW_PREZ_TO_A2M_OMASK_RATE__SHIFT 0x11
++#define DB_DEBUG3__DISABLE_OP_S_DATA_FORWARDING__SHIFT 0x12
++#define DB_DEBUG3__DISABLE_TC_UPDATE_WRITE_COMBINE__SHIFT 0x13
++#define DB_DEBUG3__DISABLE_HZ_TC_WRITE_COMBINE__SHIFT 0x14
++#define DB_DEBUG3__ENABLE_RECOMP_ZDIRTY_SUPPRESSION_OPT__SHIFT 0x15
++#define DB_DEBUG3__ENABLE_TC_MA_ROUND_ROBIN_ARB__SHIFT 0x16
++#define DB_DEBUG3__DISABLE_RAM_READ_SUPPRESION_ON_FWD__SHIFT 0x17
++#define DB_DEBUG3__DISABLE_EQAA_A2M_PERF_OPT__SHIFT 0x18
++#define DB_DEBUG3__DISABLE_DI_DT_STALL__SHIFT 0x19
++#define DB_DEBUG3__ENABLE_DB_PROCESS_RESET__SHIFT 0x1a
++#define DB_DEBUG3__DISABLE_OVERRASTERIZATION_FIX__SHIFT 0x1b
++#define DB_DEBUG3__DONT_INSERT_CONTEXT_SUSPEND__SHIFT 0x1c
++#define DB_DEBUG3__DONT_DELETE_CONTEXT_SUSPEND__SHIFT 0x1d
++#define DB_DEBUG3__DISABLE_4XAA_2P_DELAYED_WRITE__SHIFT 0x1e
++#define DB_DEBUG3__DISABLE_4XAA_2P_INTERLEAVED_PMASK__SHIFT 0x1f
++#define DB_DEBUG3__DISABLE_CLEAR_ZRANGE_CORRECTION_MASK 0x00000001L
++#define DB_DEBUG3__ROUND_ZRANGE_CORRECTION_MASK 0x00000002L
++#define DB_DEBUG3__FORCE_DB_IS_GOOD_MASK 0x00000004L
++#define DB_DEBUG3__DISABLE_TL_SSO_NULL_SUPPRESSION_MASK 0x00000008L
++#define DB_DEBUG3__DISABLE_HIZ_ON_VPORT_CLAMP_MASK 0x00000010L
++#define DB_DEBUG3__EQAA_INTERPOLATE_COMP_Z_MASK 0x00000020L
++#define DB_DEBUG3__EQAA_INTERPOLATE_SRC_Z_MASK 0x00000040L
++#define DB_DEBUG3__DISABLE_TCP_CAM_BYPASS_MASK 0x00000080L
++#define DB_DEBUG3__DISABLE_ZCMP_DIRTY_SUPPRESSION_MASK 0x00000100L
++#define DB_DEBUG3__DISABLE_REDUNDANT_PLANE_FLUSHES_OPT_MASK 0x00000200L
++#define DB_DEBUG3__DISABLE_RECOMP_TO_1ZPLANE_WITHOUT_FASTOP_MASK 0x00000400L
++#define DB_DEBUG3__ENABLE_INCOHERENT_EQAA_READS_MASK 0x00000800L
++#define DB_DEBUG3__DISABLE_OP_Z_DATA_FORWARDING_MASK 0x00001000L
++#define DB_DEBUG3__DISABLE_OP_DF_BYPASS_MASK 0x00002000L
++#define DB_DEBUG3__DISABLE_OP_DF_WRITE_COMBINE_MASK 0x00004000L
++#define DB_DEBUG3__DISABLE_OP_DF_DIRECT_FEEDBACK_MASK 0x00008000L
++#define DB_DEBUG3__ALLOW_RF2P_RW_COLLISION_MASK 0x00010000L
++#define DB_DEBUG3__SLOW_PREZ_TO_A2M_OMASK_RATE_MASK 0x00020000L
++#define DB_DEBUG3__DISABLE_OP_S_DATA_FORWARDING_MASK 0x00040000L
++#define DB_DEBUG3__DISABLE_TC_UPDATE_WRITE_COMBINE_MASK 0x00080000L
++#define DB_DEBUG3__DISABLE_HZ_TC_WRITE_COMBINE_MASK 0x00100000L
++#define DB_DEBUG3__ENABLE_RECOMP_ZDIRTY_SUPPRESSION_OPT_MASK 0x00200000L
++#define DB_DEBUG3__ENABLE_TC_MA_ROUND_ROBIN_ARB_MASK 0x00400000L
++#define DB_DEBUG3__DISABLE_RAM_READ_SUPPRESION_ON_FWD_MASK 0x00800000L
++#define DB_DEBUG3__DISABLE_EQAA_A2M_PERF_OPT_MASK 0x01000000L
++#define DB_DEBUG3__DISABLE_DI_DT_STALL_MASK 0x02000000L
++#define DB_DEBUG3__ENABLE_DB_PROCESS_RESET_MASK 0x04000000L
++#define DB_DEBUG3__DISABLE_OVERRASTERIZATION_FIX_MASK 0x08000000L
++#define DB_DEBUG3__DONT_INSERT_CONTEXT_SUSPEND_MASK 0x10000000L
++#define DB_DEBUG3__DONT_DELETE_CONTEXT_SUSPEND_MASK 0x20000000L
++#define DB_DEBUG3__DISABLE_4XAA_2P_DELAYED_WRITE_MASK 0x40000000L
++#define DB_DEBUG3__DISABLE_4XAA_2P_INTERLEAVED_PMASK_MASK 0x80000000L
++//DB_DEBUG4
++#define DB_DEBUG4__DISABLE_QC_Z_MASK_SUMMATION__SHIFT 0x0
++#define DB_DEBUG4__DISABLE_QC_STENCIL_MASK_SUMMATION__SHIFT 0x1
++#define DB_DEBUG4__DISABLE_RESUMM_TO_SINGLE_STENCIL__SHIFT 0x2
++#define DB_DEBUG4__DISABLE_PREZ_POSTZ_DTILE_CONFLICT_STALL__SHIFT 0x3
++#define DB_DEBUG4__DISABLE_4XAA_2P_ZD_HOLDOFF__SHIFT 0x4
++#define DB_DEBUG4__ENABLE_A2M_DQUAD_OPTIMIZATION__SHIFT 0x5
++#define DB_DEBUG4__ENABLE_DBCB_SLOW_FORMAT_COLLAPSE__SHIFT 0x6
++#define DB_DEBUG4__ALWAYS_ON_RMI_CLK_EN__SHIFT 0x7
++#define DB_DEBUG4__DFSM_CONVERT_PASSTHROUGH_TO_BYPASS__SHIFT 0x8
++#define DB_DEBUG4__DISABLE_UNMAPPED_Z_INDICATOR__SHIFT 0x9
++#define DB_DEBUG4__DISABLE_UNMAPPED_S_INDICATOR__SHIFT 0xa
++#define DB_DEBUG4__DISABLE_UNMAPPED_H_INDICATOR__SHIFT 0xb
++#define DB_DEBUG4__DISABLE_SEPARATE_DFSM_CLK__SHIFT 0xc
++#define DB_DEBUG4__DISABLE_DTT_FAST_HTILENACK_LOOKUP__SHIFT 0xd
++#define DB_DEBUG4__DISABLE_RESCHECK_MEMCOHER_OPTIMIZATION__SHIFT 0xe
++#define DB_DEBUG4__DISABLE_TS_WRITE_L0__SHIFT 0xf
++#define DB_DEBUG4__DISABLE_DYNAMIC_RAM_LIGHT_SLEEP_MODE__SHIFT 0x10
++#define DB_DEBUG4__DISABLE_HIZ_Q1_TS_COLLISION_DETECT__SHIFT 0x11
++#define DB_DEBUG4__DISABLE_HIZ_Q2_TS_COLLISION_DETECT__SHIFT 0x12
++#define DB_DEBUG4__DB_EXTRA_DEBUG4__SHIFT 0x13
++#define DB_DEBUG4__DISABLE_QC_Z_MASK_SUMMATION_MASK 0x00000001L
++#define DB_DEBUG4__DISABLE_QC_STENCIL_MASK_SUMMATION_MASK 0x00000002L
++#define DB_DEBUG4__DISABLE_RESUMM_TO_SINGLE_STENCIL_MASK 0x00000004L
++#define DB_DEBUG4__DISABLE_PREZ_POSTZ_DTILE_CONFLICT_STALL_MASK 0x00000008L
++#define DB_DEBUG4__DISABLE_4XAA_2P_ZD_HOLDOFF_MASK 0x00000010L
++#define DB_DEBUG4__ENABLE_A2M_DQUAD_OPTIMIZATION_MASK 0x00000020L
++#define DB_DEBUG4__ENABLE_DBCB_SLOW_FORMAT_COLLAPSE_MASK 0x00000040L
++#define DB_DEBUG4__ALWAYS_ON_RMI_CLK_EN_MASK 0x00000080L
++#define DB_DEBUG4__DFSM_CONVERT_PASSTHROUGH_TO_BYPASS_MASK 0x00000100L
++#define DB_DEBUG4__DISABLE_UNMAPPED_Z_INDICATOR_MASK 0x00000200L
++#define DB_DEBUG4__DISABLE_UNMAPPED_S_INDICATOR_MASK 0x00000400L
++#define DB_DEBUG4__DISABLE_UNMAPPED_H_INDICATOR_MASK 0x00000800L
++#define DB_DEBUG4__DISABLE_SEPARATE_DFSM_CLK_MASK 0x00001000L
++#define DB_DEBUG4__DISABLE_DTT_FAST_HTILENACK_LOOKUP_MASK 0x00002000L
++#define DB_DEBUG4__DISABLE_RESCHECK_MEMCOHER_OPTIMIZATION_MASK 0x00004000L
++#define DB_DEBUG4__DISABLE_TS_WRITE_L0_MASK 0x00008000L
++#define DB_DEBUG4__DISABLE_DYNAMIC_RAM_LIGHT_SLEEP_MODE_MASK 0x00010000L
++#define DB_DEBUG4__DISABLE_HIZ_Q1_TS_COLLISION_DETECT_MASK 0x00020000L
++#define DB_DEBUG4__DISABLE_HIZ_Q2_TS_COLLISION_DETECT_MASK 0x00040000L
++#define DB_DEBUG4__DB_EXTRA_DEBUG4_MASK 0xFFF80000L
++//DB_CREDIT_LIMIT
++#define DB_CREDIT_LIMIT__DB_SC_TILE_CREDITS__SHIFT 0x0
++#define DB_CREDIT_LIMIT__DB_SC_QUAD_CREDITS__SHIFT 0x5
++#define DB_CREDIT_LIMIT__DB_CB_LQUAD_CREDITS__SHIFT 0xa
++#define DB_CREDIT_LIMIT__DB_CB_TILE_CREDITS__SHIFT 0x18
++#define DB_CREDIT_LIMIT__DB_SC_TILE_CREDITS_MASK 0x0000001FL
++#define DB_CREDIT_LIMIT__DB_SC_QUAD_CREDITS_MASK 0x000003E0L
++#define DB_CREDIT_LIMIT__DB_CB_LQUAD_CREDITS_MASK 0x00001C00L
++#define DB_CREDIT_LIMIT__DB_CB_TILE_CREDITS_MASK 0x7F000000L
++//DB_WATERMARKS
++#define DB_WATERMARKS__DEPTH_FREE__SHIFT 0x0
++#define DB_WATERMARKS__DEPTH_FLUSH__SHIFT 0x5
++#define DB_WATERMARKS__FORCE_SUMMARIZE__SHIFT 0xb
++#define DB_WATERMARKS__DEPTH_PENDING_FREE__SHIFT 0xf
++#define DB_WATERMARKS__DEPTH_CACHELINE_FREE__SHIFT 0x14
++#define DB_WATERMARKS__AUTO_FLUSH_HTILE__SHIFT 0x1e
++#define DB_WATERMARKS__AUTO_FLUSH_QUAD__SHIFT 0x1f
++#define DB_WATERMARKS__DEPTH_FREE_MASK 0x0000001FL
++#define DB_WATERMARKS__DEPTH_FLUSH_MASK 0x000007E0L
++#define DB_WATERMARKS__FORCE_SUMMARIZE_MASK 0x00007800L
++#define DB_WATERMARKS__DEPTH_PENDING_FREE_MASK 0x000F8000L
++#define DB_WATERMARKS__DEPTH_CACHELINE_FREE_MASK 0x0FF00000L
++#define DB_WATERMARKS__AUTO_FLUSH_HTILE_MASK 0x40000000L
++#define DB_WATERMARKS__AUTO_FLUSH_QUAD_MASK 0x80000000L
++//DB_SUBTILE_CONTROL
++#define DB_SUBTILE_CONTROL__MSAA1_X__SHIFT 0x0
++#define DB_SUBTILE_CONTROL__MSAA1_Y__SHIFT 0x2
++#define DB_SUBTILE_CONTROL__MSAA2_X__SHIFT 0x4
++#define DB_SUBTILE_CONTROL__MSAA2_Y__SHIFT 0x6
++#define DB_SUBTILE_CONTROL__MSAA4_X__SHIFT 0x8
++#define DB_SUBTILE_CONTROL__MSAA4_Y__SHIFT 0xa
++#define DB_SUBTILE_CONTROL__MSAA8_X__SHIFT 0xc
++#define DB_SUBTILE_CONTROL__MSAA8_Y__SHIFT 0xe
++#define DB_SUBTILE_CONTROL__MSAA16_X__SHIFT 0x10
++#define DB_SUBTILE_CONTROL__MSAA16_Y__SHIFT 0x12
++#define DB_SUBTILE_CONTROL__MSAA1_X_MASK 0x00000003L
++#define DB_SUBTILE_CONTROL__MSAA1_Y_MASK 0x0000000CL
++#define DB_SUBTILE_CONTROL__MSAA2_X_MASK 0x00000030L
++#define DB_SUBTILE_CONTROL__MSAA2_Y_MASK 0x000000C0L
++#define DB_SUBTILE_CONTROL__MSAA4_X_MASK 0x00000300L
++#define DB_SUBTILE_CONTROL__MSAA4_Y_MASK 0x00000C00L
++#define DB_SUBTILE_CONTROL__MSAA8_X_MASK 0x00003000L
++#define DB_SUBTILE_CONTROL__MSAA8_Y_MASK 0x0000C000L
++#define DB_SUBTILE_CONTROL__MSAA16_X_MASK 0x00030000L
++#define DB_SUBTILE_CONTROL__MSAA16_Y_MASK 0x000C0000L
++//DB_FREE_CACHELINES
++#define DB_FREE_CACHELINES__FREE_DTILE_DEPTH__SHIFT 0x0
++#define DB_FREE_CACHELINES__FREE_PLANE_DEPTH__SHIFT 0x7
++#define DB_FREE_CACHELINES__FREE_Z_DEPTH__SHIFT 0xe
++#define DB_FREE_CACHELINES__FREE_HTILE_DEPTH__SHIFT 0x14
++#define DB_FREE_CACHELINES__QUAD_READ_REQS__SHIFT 0x18
++#define DB_FREE_CACHELINES__FREE_DTILE_DEPTH_MASK 0x0000007FL
++#define DB_FREE_CACHELINES__FREE_PLANE_DEPTH_MASK 0x00003F80L
++#define DB_FREE_CACHELINES__FREE_Z_DEPTH_MASK 0x000FC000L
++#define DB_FREE_CACHELINES__FREE_HTILE_DEPTH_MASK 0x00F00000L
++#define DB_FREE_CACHELINES__QUAD_READ_REQS_MASK 0xFF000000L
++//DB_FIFO_DEPTH1
++#define DB_FIFO_DEPTH1__DB_RMI_RDREQ_CREDITS__SHIFT 0x0
++#define DB_FIFO_DEPTH1__DB_RMI_WRREQ_CREDITS__SHIFT 0x5
++#define DB_FIFO_DEPTH1__MCC_DEPTH__SHIFT 0xa
++#define DB_FIFO_DEPTH1__QC_DEPTH__SHIFT 0x10
++#define DB_FIFO_DEPTH1__LTILE_PROBE_FIFO_DEPTH__SHIFT 0x15
++#define DB_FIFO_DEPTH1__DB_RMI_RDREQ_CREDITS_MASK 0x0000001FL
++#define DB_FIFO_DEPTH1__DB_RMI_WRREQ_CREDITS_MASK 0x000003E0L
++#define DB_FIFO_DEPTH1__MCC_DEPTH_MASK 0x0000FC00L
++#define DB_FIFO_DEPTH1__QC_DEPTH_MASK 0x001F0000L
++#define DB_FIFO_DEPTH1__LTILE_PROBE_FIFO_DEPTH_MASK 0x1FE00000L
++//DB_FIFO_DEPTH2
++#define DB_FIFO_DEPTH2__EQUAD_FIFO_DEPTH__SHIFT 0x0
++#define DB_FIFO_DEPTH2__ETILE_OP_FIFO_DEPTH__SHIFT 0x8
++#define DB_FIFO_DEPTH2__LQUAD_FIFO_DEPTH__SHIFT 0xf
++#define DB_FIFO_DEPTH2__LTILE_OP_FIFO_DEPTH__SHIFT 0x19
++#define DB_FIFO_DEPTH2__EQUAD_FIFO_DEPTH_MASK 0x000000FFL
++#define DB_FIFO_DEPTH2__ETILE_OP_FIFO_DEPTH_MASK 0x00007F00L
++#define DB_FIFO_DEPTH2__LQUAD_FIFO_DEPTH_MASK 0x01FF8000L
++#define DB_FIFO_DEPTH2__LTILE_OP_FIFO_DEPTH_MASK 0xFE000000L
++//DB_EXCEPTION_CONTROL
++#define DB_EXCEPTION_CONTROL__EARLY_Z_PANIC_DISABLE__SHIFT 0x0
++#define DB_EXCEPTION_CONTROL__LATE_Z_PANIC_DISABLE__SHIFT 0x1
++#define DB_EXCEPTION_CONTROL__RE_Z_PANIC_DISABLE__SHIFT 0x2
++#define DB_EXCEPTION_CONTROL__EARLY_Z_PANIC_DISABLE_MASK 0x00000001L
++#define DB_EXCEPTION_CONTROL__LATE_Z_PANIC_DISABLE_MASK 0x00000002L
++#define DB_EXCEPTION_CONTROL__RE_Z_PANIC_DISABLE_MASK 0x00000004L
++//DB_RING_CONTROL
++#define DB_RING_CONTROL__COUNTER_CONTROL__SHIFT 0x0
++#define DB_RING_CONTROL__COUNTER_CONTROL_MASK 0x00000003L
++//DB_MEM_ARB_WATERMARKS
++#define DB_MEM_ARB_WATERMARKS__CLIENT0_WATERMARK__SHIFT 0x0
++#define DB_MEM_ARB_WATERMARKS__CLIENT1_WATERMARK__SHIFT 0x8
++#define DB_MEM_ARB_WATERMARKS__CLIENT2_WATERMARK__SHIFT 0x10
++#define DB_MEM_ARB_WATERMARKS__CLIENT3_WATERMARK__SHIFT 0x18
++#define DB_MEM_ARB_WATERMARKS__CLIENT0_WATERMARK_MASK 0x00000007L
++#define DB_MEM_ARB_WATERMARKS__CLIENT1_WATERMARK_MASK 0x00000700L
++#define DB_MEM_ARB_WATERMARKS__CLIENT2_WATERMARK_MASK 0x00070000L
++#define DB_MEM_ARB_WATERMARKS__CLIENT3_WATERMARK_MASK 0x07000000L
++//DB_RMI_CACHE_POLICY
++#define DB_RMI_CACHE_POLICY__Z_RD__SHIFT 0x0
++#define DB_RMI_CACHE_POLICY__S_RD__SHIFT 0x1
++#define DB_RMI_CACHE_POLICY__HTILE_RD__SHIFT 0x2
++#define DB_RMI_CACHE_POLICY__Z_WR__SHIFT 0x8
++#define DB_RMI_CACHE_POLICY__S_WR__SHIFT 0x9
++#define DB_RMI_CACHE_POLICY__HTILE_WR__SHIFT 0xa
++#define DB_RMI_CACHE_POLICY__ZPCPSD_WR__SHIFT 0xb
++#define DB_RMI_CACHE_POLICY__CC_RD__SHIFT 0x10
++#define DB_RMI_CACHE_POLICY__FMASK_RD__SHIFT 0x11
++#define DB_RMI_CACHE_POLICY__CMASK_RD__SHIFT 0x12
++#define DB_RMI_CACHE_POLICY__DCC_RD__SHIFT 0x13
++#define DB_RMI_CACHE_POLICY__CC_WR__SHIFT 0x18
++#define DB_RMI_CACHE_POLICY__FMASK_WR__SHIFT 0x19
++#define DB_RMI_CACHE_POLICY__CMASK_WR__SHIFT 0x1a
++#define DB_RMI_CACHE_POLICY__DCC_WR__SHIFT 0x1b
++#define DB_RMI_CACHE_POLICY__Z_RD_MASK 0x00000001L
++#define DB_RMI_CACHE_POLICY__S_RD_MASK 0x00000002L
++#define DB_RMI_CACHE_POLICY__HTILE_RD_MASK 0x00000004L
++#define DB_RMI_CACHE_POLICY__Z_WR_MASK 0x00000100L
++#define DB_RMI_CACHE_POLICY__S_WR_MASK 0x00000200L
++#define DB_RMI_CACHE_POLICY__HTILE_WR_MASK 0x00000400L
++#define DB_RMI_CACHE_POLICY__ZPCPSD_WR_MASK 0x00000800L
++#define DB_RMI_CACHE_POLICY__CC_RD_MASK 0x00010000L
++#define DB_RMI_CACHE_POLICY__FMASK_RD_MASK 0x00020000L
++#define DB_RMI_CACHE_POLICY__CMASK_RD_MASK 0x00040000L
++#define DB_RMI_CACHE_POLICY__DCC_RD_MASK 0x00080000L
++#define DB_RMI_CACHE_POLICY__CC_WR_MASK 0x01000000L
++#define DB_RMI_CACHE_POLICY__FMASK_WR_MASK 0x02000000L
++#define DB_RMI_CACHE_POLICY__CMASK_WR_MASK 0x04000000L
++#define DB_RMI_CACHE_POLICY__DCC_WR_MASK 0x08000000L
++//DB_DFSM_CONFIG
++#define DB_DFSM_CONFIG__BYPASS_DFSM__SHIFT 0x0
++#define DB_DFSM_CONFIG__DISABLE_PUNCHOUT__SHIFT 0x1
++#define DB_DFSM_CONFIG__DISABLE_POPS__SHIFT 0x2
++#define DB_DFSM_CONFIG__FORCE_FLUSH__SHIFT 0x3
++#define DB_DFSM_CONFIG__MIDDLE_PIPE_MAX_DEPTH__SHIFT 0x8
++#define DB_DFSM_CONFIG__BYPASS_DFSM_MASK 0x00000001L
++#define DB_DFSM_CONFIG__DISABLE_PUNCHOUT_MASK 0x00000002L
++#define DB_DFSM_CONFIG__DISABLE_POPS_MASK 0x00000004L
++#define DB_DFSM_CONFIG__FORCE_FLUSH_MASK 0x00000008L
++#define DB_DFSM_CONFIG__MIDDLE_PIPE_MAX_DEPTH_MASK 0x00007F00L
++//DB_DFSM_WATERMARK
++#define DB_DFSM_WATERMARK__DFSM_HIGH_WATERMARK__SHIFT 0x0
++#define DB_DFSM_WATERMARK__POPS_HIGH_WATERMARK__SHIFT 0x10
++#define DB_DFSM_WATERMARK__DFSM_HIGH_WATERMARK_MASK 0x0000FFFFL
++#define DB_DFSM_WATERMARK__POPS_HIGH_WATERMARK_MASK 0xFFFF0000L
++//DB_DFSM_TILES_IN_FLIGHT
++#define DB_DFSM_TILES_IN_FLIGHT__HIGH_WATERMARK__SHIFT 0x0
++#define DB_DFSM_TILES_IN_FLIGHT__HARD_LIMIT__SHIFT 0x10
++#define DB_DFSM_TILES_IN_FLIGHT__HIGH_WATERMARK_MASK 0x0000FFFFL
++#define DB_DFSM_TILES_IN_FLIGHT__HARD_LIMIT_MASK 0xFFFF0000L
++//DB_DFSM_PRIMS_IN_FLIGHT
++#define DB_DFSM_PRIMS_IN_FLIGHT__HIGH_WATERMARK__SHIFT 0x0
++#define DB_DFSM_PRIMS_IN_FLIGHT__HARD_LIMIT__SHIFT 0x10
++#define DB_DFSM_PRIMS_IN_FLIGHT__HIGH_WATERMARK_MASK 0x0000FFFFL
++#define DB_DFSM_PRIMS_IN_FLIGHT__HARD_LIMIT_MASK 0xFFFF0000L
++//DB_DFSM_WATCHDOG
++#define DB_DFSM_WATCHDOG__TIMER_TARGET__SHIFT 0x0
++#define DB_DFSM_WATCHDOG__TIMER_TARGET_MASK 0xFFFFFFFFL
++//DB_DFSM_FLUSH_ENABLE
++#define DB_DFSM_FLUSH_ENABLE__PRIMARY_EVENTS__SHIFT 0x0
++#define DB_DFSM_FLUSH_ENABLE__AUX_FORCE_PASSTHRU__SHIFT 0x18
++#define DB_DFSM_FLUSH_ENABLE__AUX_EVENTS__SHIFT 0x1c
++#define DB_DFSM_FLUSH_ENABLE__PRIMARY_EVENTS_MASK 0x000003FFL
++#define DB_DFSM_FLUSH_ENABLE__AUX_FORCE_PASSTHRU_MASK 0x0F000000L
++#define DB_DFSM_FLUSH_ENABLE__AUX_EVENTS_MASK 0xF0000000L
++//DB_DFSM_FLUSH_AUX_EVENT
++#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_A__SHIFT 0x0
++#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_B__SHIFT 0x8
++#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_C__SHIFT 0x10
++#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_D__SHIFT 0x18
++#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_A_MASK 0x000000FFL
++#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_B_MASK 0x0000FF00L
++#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_C_MASK 0x00FF0000L
++#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_D_MASK 0xFF000000L
++//CC_RB_REDUNDANCY
++#define CC_RB_REDUNDANCY__FAILED_RB0__SHIFT 0x8
++#define CC_RB_REDUNDANCY__EN_REDUNDANCY0__SHIFT 0xc
++#define CC_RB_REDUNDANCY__FAILED_RB1__SHIFT 0x10
++#define CC_RB_REDUNDANCY__EN_REDUNDANCY1__SHIFT 0x14
++#define CC_RB_REDUNDANCY__FAILED_RB0_MASK 0x00000F00L
++#define CC_RB_REDUNDANCY__EN_REDUNDANCY0_MASK 0x00001000L
++#define CC_RB_REDUNDANCY__FAILED_RB1_MASK 0x000F0000L
++#define CC_RB_REDUNDANCY__EN_REDUNDANCY1_MASK 0x00100000L
++//CC_RB_BACKEND_DISABLE
++#define CC_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT 0x10
++#define CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK 0x00FF0000L
++//GB_ADDR_CONFIG
++#define GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
++#define GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
++#define GB_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0x6
++#define GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8
++#define GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
++#define GB_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x10
++#define GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
++#define GB_ADDR_CONFIG__NUM_GPUS__SHIFT 0x15
++#define GB_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x18
++#define GB_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0x1a
++#define GB_ADDR_CONFIG__ROW_SIZE__SHIFT 0x1c
++#define GB_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x1e
++#define GB_ADDR_CONFIG__SE_ENABLE__SHIFT 0x1f
++#define GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
++#define GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
++#define GB_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
++#define GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
++#define GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
++#define GB_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
++#define GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
++#define GB_ADDR_CONFIG__NUM_GPUS_MASK 0x00E00000L
++#define GB_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
++#define GB_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x0C000000L
++#define GB_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
++#define GB_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
++#define GB_ADDR_CONFIG__SE_ENABLE_MASK 0x80000000L
++//GB_BACKEND_MAP
++#define GB_BACKEND_MAP__BACKEND_MAP__SHIFT 0x0
++#define GB_BACKEND_MAP__BACKEND_MAP_MASK 0xFFFFFFFFL
++//GB_GPU_ID
++#define GB_GPU_ID__GPU_ID__SHIFT 0x0
++#define GB_GPU_ID__GPU_ID_MASK 0x0000000FL
++//CC_RB_DAISY_CHAIN
++#define CC_RB_DAISY_CHAIN__RB_0__SHIFT 0x0
++#define CC_RB_DAISY_CHAIN__RB_1__SHIFT 0x4
++#define CC_RB_DAISY_CHAIN__RB_2__SHIFT 0x8
++#define CC_RB_DAISY_CHAIN__RB_3__SHIFT 0xc
++#define CC_RB_DAISY_CHAIN__RB_4__SHIFT 0x10
++#define CC_RB_DAISY_CHAIN__RB_5__SHIFT 0x14
++#define CC_RB_DAISY_CHAIN__RB_6__SHIFT 0x18
++#define CC_RB_DAISY_CHAIN__RB_7__SHIFT 0x1c
++#define CC_RB_DAISY_CHAIN__RB_0_MASK 0x0000000FL
++#define CC_RB_DAISY_CHAIN__RB_1_MASK 0x000000F0L
++#define CC_RB_DAISY_CHAIN__RB_2_MASK 0x00000F00L
++#define CC_RB_DAISY_CHAIN__RB_3_MASK 0x0000F000L
++#define CC_RB_DAISY_CHAIN__RB_4_MASK 0x000F0000L
++#define CC_RB_DAISY_CHAIN__RB_5_MASK 0x00F00000L
++#define CC_RB_DAISY_CHAIN__RB_6_MASK 0x0F000000L
++#define CC_RB_DAISY_CHAIN__RB_7_MASK 0xF0000000L
++//GB_ADDR_CONFIG_READ
++#define GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
++#define GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
++#define GB_ADDR_CONFIG_READ__MAX_COMPRESSED_FRAGS__SHIFT 0x6
++#define GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8
++#define GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc
++#define GB_ADDR_CONFIG_READ__SHADER_ENGINE_TILE_SIZE__SHIFT 0x10
++#define GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
++#define GB_ADDR_CONFIG_READ__NUM_GPUS__SHIFT 0x15
++#define GB_ADDR_CONFIG_READ__MULTI_GPU_TILE_SIZE__SHIFT 0x18
++#define GB_ADDR_CONFIG_READ__NUM_RB_PER_SE__SHIFT 0x1a
++#define GB_ADDR_CONFIG_READ__ROW_SIZE__SHIFT 0x1c
++#define GB_ADDR_CONFIG_READ__NUM_LOWER_PIPES__SHIFT 0x1e
++#define GB_ADDR_CONFIG_READ__SE_ENABLE__SHIFT 0x1f
++#define GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
++#define GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
++#define GB_ADDR_CONFIG_READ__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
++#define GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
++#define GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L
++#define GB_ADDR_CONFIG_READ__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
++#define GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
++#define GB_ADDR_CONFIG_READ__NUM_GPUS_MASK 0x00E00000L
++#define GB_ADDR_CONFIG_READ__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
++#define GB_ADDR_CONFIG_READ__NUM_RB_PER_SE_MASK 0x0C000000L
++#define GB_ADDR_CONFIG_READ__ROW_SIZE_MASK 0x30000000L
++#define GB_ADDR_CONFIG_READ__NUM_LOWER_PIPES_MASK 0x40000000L
++#define GB_ADDR_CONFIG_READ__SE_ENABLE_MASK 0x80000000L
++//GB_TILE_MODE0
++#define GB_TILE_MODE0__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE0__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE0__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE0__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE0__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE0__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE0__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE0__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE0__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE1
++#define GB_TILE_MODE1__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE1__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE1__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE1__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE1__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE1__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE1__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE1__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE1__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE1__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE2
++#define GB_TILE_MODE2__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE2__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE2__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE2__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE2__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE2__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE2__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE2__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE2__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE2__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE3
++#define GB_TILE_MODE3__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE3__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE3__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE3__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE3__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE3__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE3__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE3__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE3__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE3__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE4
++#define GB_TILE_MODE4__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE4__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE4__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE4__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE4__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE4__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE4__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE4__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE4__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE4__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE5
++#define GB_TILE_MODE5__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE5__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE5__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE5__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE5__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE5__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE5__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE5__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE5__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE5__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE6
++#define GB_TILE_MODE6__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE6__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE6__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE6__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE6__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE6__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE6__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE6__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE6__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE6__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE7
++#define GB_TILE_MODE7__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE7__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE7__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE7__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE7__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE7__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE7__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE7__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE7__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE7__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE8
++#define GB_TILE_MODE8__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE8__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE8__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE8__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE8__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE8__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE8__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE8__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE8__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE8__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE9
++#define GB_TILE_MODE9__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE9__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE9__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE9__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE9__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE9__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE9__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE9__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE9__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE9__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE10
++#define GB_TILE_MODE10__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE10__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE10__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE10__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE10__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE10__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE10__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE10__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE10__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE10__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE11
++#define GB_TILE_MODE11__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE11__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE11__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE11__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE11__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE11__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE11__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE11__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE11__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE11__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE12
++#define GB_TILE_MODE12__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE12__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE12__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE12__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE12__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE12__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE12__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE12__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE12__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE12__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE13
++#define GB_TILE_MODE13__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE13__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE13__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE13__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE13__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE13__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE13__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE13__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE13__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE13__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE14
++#define GB_TILE_MODE14__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE14__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE14__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE14__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE14__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE14__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE14__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE14__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE14__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE14__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE15
++#define GB_TILE_MODE15__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE15__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE15__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE15__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE15__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE15__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE15__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE15__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE15__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE15__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE16
++#define GB_TILE_MODE16__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE16__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE16__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE16__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE16__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE16__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE16__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE16__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE16__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE16__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE17
++#define GB_TILE_MODE17__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE17__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE17__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE17__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE17__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE17__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE17__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE17__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE17__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE17__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE18
++#define GB_TILE_MODE18__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE18__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE18__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE18__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE18__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE18__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE18__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE18__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE18__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE18__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE19
++#define GB_TILE_MODE19__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE19__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE19__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE19__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE19__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE19__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE19__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE19__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE19__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE19__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE20
++#define GB_TILE_MODE20__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE20__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE20__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE20__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE20__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE20__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE20__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE20__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE20__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE20__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE21
++#define GB_TILE_MODE21__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE21__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE21__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE21__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE21__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE21__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE21__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE21__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE21__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE21__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE22
++#define GB_TILE_MODE22__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE22__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE22__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE22__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE22__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE22__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE22__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE22__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE22__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE22__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE23
++#define GB_TILE_MODE23__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE23__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE23__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE23__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE23__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE23__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE23__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE23__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE23__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE23__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE24
++#define GB_TILE_MODE24__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE24__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE24__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE24__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE24__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE24__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE24__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE24__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE24__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE24__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE25
++#define GB_TILE_MODE25__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE25__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE25__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE25__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE25__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE25__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE25__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE25__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE25__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE25__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE26
++#define GB_TILE_MODE26__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE26__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE26__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE26__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE26__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE26__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE26__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE26__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE26__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE26__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE27
++#define GB_TILE_MODE27__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE27__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE27__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE27__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE27__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE27__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE27__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE27__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE27__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE27__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE28
++#define GB_TILE_MODE28__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE28__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE28__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE28__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE28__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE28__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE28__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE28__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE28__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE28__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE29
++#define GB_TILE_MODE29__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE29__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE29__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE29__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE29__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE29__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE29__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE29__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE29__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE29__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE30
++#define GB_TILE_MODE30__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE30__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE30__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE30__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE30__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE30__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE30__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE30__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE30__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE30__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_TILE_MODE31
++#define GB_TILE_MODE31__ARRAY_MODE__SHIFT 0x2
++#define GB_TILE_MODE31__PIPE_CONFIG__SHIFT 0x6
++#define GB_TILE_MODE31__TILE_SPLIT__SHIFT 0xb
++#define GB_TILE_MODE31__MICRO_TILE_MODE_NEW__SHIFT 0x16
++#define GB_TILE_MODE31__SAMPLE_SPLIT__SHIFT 0x19
++#define GB_TILE_MODE31__ARRAY_MODE_MASK 0x0000003CL
++#define GB_TILE_MODE31__PIPE_CONFIG_MASK 0x000007C0L
++#define GB_TILE_MODE31__TILE_SPLIT_MASK 0x00003800L
++#define GB_TILE_MODE31__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
++#define GB_TILE_MODE31__SAMPLE_SPLIT_MASK 0x06000000L
++//GB_MACROTILE_MODE0
++#define GB_MACROTILE_MODE0__BANK_WIDTH__SHIFT 0x0
++#define GB_MACROTILE_MODE0__BANK_HEIGHT__SHIFT 0x2
++#define GB_MACROTILE_MODE0__MACRO_TILE_ASPECT__SHIFT 0x4
++#define GB_MACROTILE_MODE0__NUM_BANKS__SHIFT 0x6
++#define GB_MACROTILE_MODE0__BANK_WIDTH_MASK 0x00000003L
++#define GB_MACROTILE_MODE0__BANK_HEIGHT_MASK 0x0000000CL
++#define GB_MACROTILE_MODE0__MACRO_TILE_ASPECT_MASK 0x00000030L
++#define GB_MACROTILE_MODE0__NUM_BANKS_MASK 0x000000C0L
++//GB_MACROTILE_MODE1
++#define GB_MACROTILE_MODE1__BANK_WIDTH__SHIFT 0x0
++#define GB_MACROTILE_MODE1__BANK_HEIGHT__SHIFT 0x2
++#define GB_MACROTILE_MODE1__MACRO_TILE_ASPECT__SHIFT 0x4
++#define GB_MACROTILE_MODE1__NUM_BANKS__SHIFT 0x6
++#define GB_MACROTILE_MODE1__BANK_WIDTH_MASK 0x00000003L
++#define GB_MACROTILE_MODE1__BANK_HEIGHT_MASK 0x0000000CL
++#define GB_MACROTILE_MODE1__MACRO_TILE_ASPECT_MASK 0x00000030L
++#define GB_MACROTILE_MODE1__NUM_BANKS_MASK 0x000000C0L
++//GB_MACROTILE_MODE2
++#define GB_MACROTILE_MODE2__BANK_WIDTH__SHIFT 0x0
++#define GB_MACROTILE_MODE2__BANK_HEIGHT__SHIFT 0x2
++#define GB_MACROTILE_MODE2__MACRO_TILE_ASPECT__SHIFT 0x4
++#define GB_MACROTILE_MODE2__NUM_BANKS__SHIFT 0x6
++#define GB_MACROTILE_MODE2__BANK_WIDTH_MASK 0x00000003L
++#define GB_MACROTILE_MODE2__BANK_HEIGHT_MASK 0x0000000CL
++#define GB_MACROTILE_MODE2__MACRO_TILE_ASPECT_MASK 0x00000030L
++#define GB_MACROTILE_MODE2__NUM_BANKS_MASK 0x000000C0L
++//GB_MACROTILE_MODE3
++#define GB_MACROTILE_MODE3__BANK_WIDTH__SHIFT 0x0
++#define GB_MACROTILE_MODE3__BANK_HEIGHT__SHIFT 0x2
++#define GB_MACROTILE_MODE3__MACRO_TILE_ASPECT__SHIFT 0x4
++#define GB_MACROTILE_MODE3__NUM_BANKS__SHIFT 0x6
++#define GB_MACROTILE_MODE3__BANK_WIDTH_MASK 0x00000003L
++#define GB_MACROTILE_MODE3__BANK_HEIGHT_MASK 0x0000000CL
++#define GB_MACROTILE_MODE3__MACRO_TILE_ASPECT_MASK 0x00000030L
++#define GB_MACROTILE_MODE3__NUM_BANKS_MASK 0x000000C0L
++//GB_MACROTILE_MODE4
++#define GB_MACROTILE_MODE4__BANK_WIDTH__SHIFT 0x0
++#define GB_MACROTILE_MODE4__BANK_HEIGHT__SHIFT 0x2
++#define GB_MACROTILE_MODE4__MACRO_TILE_ASPECT__SHIFT 0x4
++#define GB_MACROTILE_MODE4__NUM_BANKS__SHIFT 0x6
++#define GB_MACROTILE_MODE4__BANK_WIDTH_MASK 0x00000003L
++#define GB_MACROTILE_MODE4__BANK_HEIGHT_MASK 0x0000000CL
++#define GB_MACROTILE_MODE4__MACRO_TILE_ASPECT_MASK 0x00000030L
++#define GB_MACROTILE_MODE4__NUM_BANKS_MASK 0x000000C0L
++//GB_MACROTILE_MODE5
++#define GB_MACROTILE_MODE5__BANK_WIDTH__SHIFT 0x0
++#define GB_MACROTILE_MODE5__BANK_HEIGHT__SHIFT 0x2
++#define GB_MACROTILE_MODE5__MACRO_TILE_ASPECT__SHIFT 0x4
++#define GB_MACROTILE_MODE5__NUM_BANKS__SHIFT 0x6
++#define GB_MACROTILE_MODE5__BANK_WIDTH_MASK 0x00000003L
++#define GB_MACROTILE_MODE5__BANK_HEIGHT_MASK 0x0000000CL
++#define GB_MACROTILE_MODE5__MACRO_TILE_ASPECT_MASK 0x00000030L
++#define GB_MACROTILE_MODE5__NUM_BANKS_MASK 0x000000C0L
++//GB_MACROTILE_MODE6
++#define GB_MACROTILE_MODE6__BANK_WIDTH__SHIFT 0x0
++#define GB_MACROTILE_MODE6__BANK_HEIGHT__SHIFT 0x2
++#define GB_MACROTILE_MODE6__MACRO_TILE_ASPECT__SHIFT 0x4
++#define GB_MACROTILE_MODE6__NUM_BANKS__SHIFT 0x6
++#define GB_MACROTILE_MODE6__BANK_WIDTH_MASK 0x00000003L
++#define GB_MACROTILE_MODE6__BANK_HEIGHT_MASK 0x0000000CL
++#define GB_MACROTILE_MODE6__MACRO_TILE_ASPECT_MASK 0x00000030L
++#define GB_MACROTILE_MODE6__NUM_BANKS_MASK 0x000000C0L
++//GB_MACROTILE_MODE7
++#define GB_MACROTILE_MODE7__BANK_WIDTH__SHIFT 0x0
++#define GB_MACROTILE_MODE7__BANK_HEIGHT__SHIFT 0x2
++#define GB_MACROTILE_MODE7__MACRO_TILE_ASPECT__SHIFT 0x4
++#define GB_MACROTILE_MODE7__NUM_BANKS__SHIFT 0x6
++#define GB_MACROTILE_MODE7__BANK_WIDTH_MASK 0x00000003L
++#define GB_MACROTILE_MODE7__BANK_HEIGHT_MASK 0x0000000CL
++#define GB_MACROTILE_MODE7__MACRO_TILE_ASPECT_MASK 0x00000030L
++#define GB_MACROTILE_MODE7__NUM_BANKS_MASK 0x000000C0L
++//GB_MACROTILE_MODE8
++#define GB_MACROTILE_MODE8__BANK_WIDTH__SHIFT 0x0
++#define GB_MACROTILE_MODE8__BANK_HEIGHT__SHIFT 0x2
++#define GB_MACROTILE_MODE8__MACRO_TILE_ASPECT__SHIFT 0x4
++#define GB_MACROTILE_MODE8__NUM_BANKS__SHIFT 0x6
++#define GB_MACROTILE_MODE8__BANK_WIDTH_MASK 0x00000003L
++#define GB_MACROTILE_MODE8__BANK_HEIGHT_MASK 0x0000000CL
++#define GB_MACROTILE_MODE8__MACRO_TILE_ASPECT_MASK 0x00000030L
++#define GB_MACROTILE_MODE8__NUM_BANKS_MASK 0x000000C0L
++//GB_MACROTILE_MODE9
++#define GB_MACROTILE_MODE9__BANK_WIDTH__SHIFT 0x0
++#define GB_MACROTILE_MODE9__BANK_HEIGHT__SHIFT 0x2
++#define GB_MACROTILE_MODE9__MACRO_TILE_ASPECT__SHIFT 0x4
++#define GB_MACROTILE_MODE9__NUM_BANKS__SHIFT 0x6
++#define GB_MACROTILE_MODE9__BANK_WIDTH_MASK 0x00000003L
++#define GB_MACROTILE_MODE9__BANK_HEIGHT_MASK 0x0000000CL
++#define GB_MACROTILE_MODE9__MACRO_TILE_ASPECT_MASK 0x00000030L
++#define GB_MACROTILE_MODE9__NUM_BANKS_MASK 0x000000C0L
++//GB_MACROTILE_MODE10
++#define GB_MACROTILE_MODE10__BANK_WIDTH__SHIFT 0x0
++#define GB_MACROTILE_MODE10__BANK_HEIGHT__SHIFT 0x2
++#define GB_MACROTILE_MODE10__MACRO_TILE_ASPECT__SHIFT 0x4
++#define GB_MACROTILE_MODE10__NUM_BANKS__SHIFT 0x6
++#define GB_MACROTILE_MODE10__BANK_WIDTH_MASK 0x00000003L
++#define GB_MACROTILE_MODE10__BANK_HEIGHT_MASK 0x0000000CL
++#define GB_MACROTILE_MODE10__MACRO_TILE_ASPECT_MASK 0x00000030L
++#define GB_MACROTILE_MODE10__NUM_BANKS_MASK 0x000000C0L
++//GB_MACROTILE_MODE11
++#define GB_MACROTILE_MODE11__BANK_WIDTH__SHIFT 0x0
++#define GB_MACROTILE_MODE11__BANK_HEIGHT__SHIFT 0x2
++#define GB_MACROTILE_MODE11__MACRO_TILE_ASPECT__SHIFT 0x4
++#define GB_MACROTILE_MODE11__NUM_BANKS__SHIFT 0x6
++#define GB_MACROTILE_MODE11__BANK_WIDTH_MASK 0x00000003L
++#define GB_MACROTILE_MODE11__BANK_HEIGHT_MASK 0x0000000CL
++#define GB_MACROTILE_MODE11__MACRO_TILE_ASPECT_MASK 0x00000030L
++#define GB_MACROTILE_MODE11__NUM_BANKS_MASK 0x000000C0L
++//GB_MACROTILE_MODE12
++#define GB_MACROTILE_MODE12__BANK_WIDTH__SHIFT 0x0
++#define GB_MACROTILE_MODE12__BANK_HEIGHT__SHIFT 0x2
++#define GB_MACROTILE_MODE12__MACRO_TILE_ASPECT__SHIFT 0x4
++#define GB_MACROTILE_MODE12__NUM_BANKS__SHIFT 0x6
++#define GB_MACROTILE_MODE12__BANK_WIDTH_MASK 0x00000003L
++#define GB_MACROTILE_MODE12__BANK_HEIGHT_MASK 0x0000000CL
++#define GB_MACROTILE_MODE12__MACRO_TILE_ASPECT_MASK 0x00000030L
++#define GB_MACROTILE_MODE12__NUM_BANKS_MASK 0x000000C0L
++//GB_MACROTILE_MODE13
++#define GB_MACROTILE_MODE13__BANK_WIDTH__SHIFT 0x0
++#define GB_MACROTILE_MODE13__BANK_HEIGHT__SHIFT 0x2
++#define GB_MACROTILE_MODE13__MACRO_TILE_ASPECT__SHIFT 0x4
++#define GB_MACROTILE_MODE13__NUM_BANKS__SHIFT 0x6
++#define GB_MACROTILE_MODE13__BANK_WIDTH_MASK 0x00000003L
++#define GB_MACROTILE_MODE13__BANK_HEIGHT_MASK 0x0000000CL
++#define GB_MACROTILE_MODE13__MACRO_TILE_ASPECT_MASK 0x00000030L
++#define GB_MACROTILE_MODE13__NUM_BANKS_MASK 0x000000C0L
++//GB_MACROTILE_MODE14
++#define GB_MACROTILE_MODE14__BANK_WIDTH__SHIFT 0x0
++#define GB_MACROTILE_MODE14__BANK_HEIGHT__SHIFT 0x2
++#define GB_MACROTILE_MODE14__MACRO_TILE_ASPECT__SHIFT 0x4
++#define GB_MACROTILE_MODE14__NUM_BANKS__SHIFT 0x6
++#define GB_MACROTILE_MODE14__BANK_WIDTH_MASK 0x00000003L
++#define GB_MACROTILE_MODE14__BANK_HEIGHT_MASK 0x0000000CL
++#define GB_MACROTILE_MODE14__MACRO_TILE_ASPECT_MASK 0x00000030L
++#define GB_MACROTILE_MODE14__NUM_BANKS_MASK 0x000000C0L
++//GB_MACROTILE_MODE15
++#define GB_MACROTILE_MODE15__BANK_WIDTH__SHIFT 0x0
++#define GB_MACROTILE_MODE15__BANK_HEIGHT__SHIFT 0x2
++#define GB_MACROTILE_MODE15__MACRO_TILE_ASPECT__SHIFT 0x4
++#define GB_MACROTILE_MODE15__NUM_BANKS__SHIFT 0x6
++#define GB_MACROTILE_MODE15__BANK_WIDTH_MASK 0x00000003L
++#define GB_MACROTILE_MODE15__BANK_HEIGHT_MASK 0x0000000CL
++#define GB_MACROTILE_MODE15__MACRO_TILE_ASPECT_MASK 0x00000030L
++#define GB_MACROTILE_MODE15__NUM_BANKS_MASK 0x000000C0L
++//CB_HW_CONTROL
++#define CB_HW_CONTROL__CM_CACHE_EVICT_POINT__SHIFT 0x0
++#define CB_HW_CONTROL__FC_CACHE_EVICT_POINT__SHIFT 0x6
++#define CB_HW_CONTROL__CC_CACHE_EVICT_POINT__SHIFT 0xc
++#define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE__SHIFT 0x10
++#define CB_HW_CONTROL__DISABLE_INTNORM_LE11BPC_CLAMPING__SHIFT 0x12
++#define CB_HW_CONTROL__FORCE_NEEDS_DST__SHIFT 0x13
++#define CB_HW_CONTROL__FORCE_ALWAYS_TOGGLE__SHIFT 0x14
++#define CB_HW_CONTROL__DISABLE_BLEND_OPT_RESULT_EQ_DEST__SHIFT 0x15
++#define CB_HW_CONTROL__DISABLE_FULL_WRITE_MASK__SHIFT 0x16
++#define CB_HW_CONTROL__DISABLE_RESOLVE_OPT_FOR_SINGLE_FRAG__SHIFT 0x17
++#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DONT_RD_DST__SHIFT 0x18
++#define CB_HW_CONTROL__DISABLE_BLEND_OPT_BYPASS__SHIFT 0x19
++#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DISCARD_PIXEL__SHIFT 0x1a
++#define CB_HW_CONTROL__DISABLE_BLEND_OPT_WHEN_DISABLED_SRCALPHA_IS_USED__SHIFT 0x1b
++#define CB_HW_CONTROL__PRIORITIZE_FC_WR_OVER_FC_RD_ON_CMASK_CONFLICT__SHIFT 0x1c
++#define CB_HW_CONTROL__PRIORITIZE_FC_EVICT_OVER_FOP_RD_ON_BANK_CONFLICT__SHIFT 0x1d
++#define CB_HW_CONTROL__DISABLE_CC_IB_SERIALIZER_STATE_OPT__SHIFT 0x1e
++#define CB_HW_CONTROL__DISABLE_PIXEL_IN_QUAD_FIX_FOR_LINEAR_SURFACE__SHIFT 0x1f
++#define CB_HW_CONTROL__CM_CACHE_EVICT_POINT_MASK 0x0000000FL
++#define CB_HW_CONTROL__FC_CACHE_EVICT_POINT_MASK 0x000003C0L
++#define CB_HW_CONTROL__CC_CACHE_EVICT_POINT_MASK 0x0000F000L
++#define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE_MASK 0x00010000L
++#define CB_HW_CONTROL__DISABLE_INTNORM_LE11BPC_CLAMPING_MASK 0x00040000L
++#define CB_HW_CONTROL__FORCE_NEEDS_DST_MASK 0x00080000L
++#define CB_HW_CONTROL__FORCE_ALWAYS_TOGGLE_MASK 0x00100000L
++#define CB_HW_CONTROL__DISABLE_BLEND_OPT_RESULT_EQ_DEST_MASK 0x00200000L
++#define CB_HW_CONTROL__DISABLE_FULL_WRITE_MASK_MASK 0x00400000L
++#define CB_HW_CONTROL__DISABLE_RESOLVE_OPT_FOR_SINGLE_FRAG_MASK 0x00800000L
++#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DONT_RD_DST_MASK 0x01000000L
++#define CB_HW_CONTROL__DISABLE_BLEND_OPT_BYPASS_MASK 0x02000000L
++#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DISCARD_PIXEL_MASK 0x04000000L
++#define CB_HW_CONTROL__DISABLE_BLEND_OPT_WHEN_DISABLED_SRCALPHA_IS_USED_MASK 0x08000000L
++#define CB_HW_CONTROL__PRIORITIZE_FC_WR_OVER_FC_RD_ON_CMASK_CONFLICT_MASK 0x10000000L
++#define CB_HW_CONTROL__PRIORITIZE_FC_EVICT_OVER_FOP_RD_ON_BANK_CONFLICT_MASK 0x20000000L
++#define CB_HW_CONTROL__DISABLE_CC_IB_SERIALIZER_STATE_OPT_MASK 0x40000000L
++#define CB_HW_CONTROL__DISABLE_PIXEL_IN_QUAD_FIX_FOR_LINEAR_SURFACE_MASK 0x80000000L
++//CB_HW_CONTROL_1
++#define CB_HW_CONTROL_1__CM_CACHE_NUM_TAGS__SHIFT 0x0
++#define CB_HW_CONTROL_1__FC_CACHE_NUM_TAGS__SHIFT 0x5
++#define CB_HW_CONTROL_1__CC_CACHE_NUM_TAGS__SHIFT 0xb
++#define CB_HW_CONTROL_1__CM_TILE_FIFO_DEPTH__SHIFT 0x11
++#define CB_HW_CONTROL_1__RMI_CREDITS__SHIFT 0x1a
++#define CB_HW_CONTROL_1__CM_CACHE_NUM_TAGS_MASK 0x0000001FL
++#define CB_HW_CONTROL_1__FC_CACHE_NUM_TAGS_MASK 0x000007E0L
++#define CB_HW_CONTROL_1__CC_CACHE_NUM_TAGS_MASK 0x0001F800L
++#define CB_HW_CONTROL_1__CM_TILE_FIFO_DEPTH_MASK 0x03FE0000L
++#define CB_HW_CONTROL_1__RMI_CREDITS_MASK 0xFC000000L
++//CB_HW_CONTROL_2
++#define CB_HW_CONTROL_2__CC_EVEN_ODD_FIFO_DEPTH__SHIFT 0x0
++#define CB_HW_CONTROL_2__FC_RDLAT_TILE_FIFO_DEPTH__SHIFT 0x8
++#define CB_HW_CONTROL_2__FC_RDLAT_QUAD_FIFO_DEPTH__SHIFT 0xf
++#define CB_HW_CONTROL_2__DRR_ASSUMED_FIFO_DEPTH_DIV8__SHIFT 0x18
++#define CB_HW_CONTROL_2__CHICKEN_BITS__SHIFT 0x1c
++#define CB_HW_CONTROL_2__CC_EVEN_ODD_FIFO_DEPTH_MASK 0x000000FFL
++#define CB_HW_CONTROL_2__FC_RDLAT_TILE_FIFO_DEPTH_MASK 0x00007F00L
++#define CB_HW_CONTROL_2__FC_RDLAT_QUAD_FIFO_DEPTH_MASK 0x007F8000L
++#define CB_HW_CONTROL_2__DRR_ASSUMED_FIFO_DEPTH_DIV8_MASK 0x0F000000L
++#define CB_HW_CONTROL_2__CHICKEN_BITS_MASK 0xF0000000L
++//CB_HW_CONTROL_3
++#define CB_HW_CONTROL_3__DISABLE_SLOW_MODE_EMPTY_HALF_QUAD_KILL__SHIFT 0x0
++#define CB_HW_CONTROL_3__RAM_ADDRESS_CONFLICTS_DISALLOWED__SHIFT 0x1
++#define CB_HW_CONTROL_3__DISABLE_FAST_CLEAR_FETCH_OPT__SHIFT 0x2
++#define CB_HW_CONTROL_3__DISABLE_QUAD_MARKER_DROP_STOP__SHIFT 0x3
++#define CB_HW_CONTROL_3__DISABLE_OVERWRITE_COMBINER_CAM_CLR__SHIFT 0x4
++#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_OVWR_STATUS_ACCUM__SHIFT 0x5
++#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_OVWR_KEY_MOD__SHIFT 0x6
++#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_PANIC_GATING__SHIFT 0x7
++#define CB_HW_CONTROL_3__DISABLE_OVERWRITE_COMBINER_TARGET_MASK_VALIDATION__SHIFT 0x8
++#define CB_HW_CONTROL_3__SPLIT_ALL_FAST_MODE_TRANSFERS__SHIFT 0x9
++#define CB_HW_CONTROL_3__DISABLE_SHADER_BLEND_OPTS__SHIFT 0xa
++#define CB_HW_CONTROL_3__DISABLE_CMASK_LAST_QUAD_INSERTION__SHIFT 0xb
++#define CB_HW_CONTROL_3__DISABLE_ROP3_FIXES_OF_BUG_511967__SHIFT 0xc
++#define CB_HW_CONTROL_3__DISABLE_ROP3_FIXES_OF_BUG_520657__SHIFT 0xd
++#define CB_HW_CONTROL_3__DISABLE_OC_FIXES_OF_BUG_522542__SHIFT 0xe
++#define CB_HW_CONTROL_3__FORCE_RMI_LAST_HIGH__SHIFT 0xf
++#define CB_HW_CONTROL_3__FORCE_RMI_CLKEN_HIGH__SHIFT 0x10
++#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_CC__SHIFT 0x11
++#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_FC__SHIFT 0x12
++#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_DC__SHIFT 0x13
++#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_CM__SHIFT 0x14
++#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CC__SHIFT 0x15
++#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_FC__SHIFT 0x16
++#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_DC__SHIFT 0x17
++#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CM__SHIFT 0x18
++#define CB_HW_CONTROL_3__DISABLE_NACK_COLOR_RD_WR_OPT__SHIFT 0x19
++#define CB_HW_CONTROL_3__DISABLE_BLENDER_CLOCK_GATING__SHIFT 0x1a
++#define CB_HW_CONTROL_3__DISABLE_DUALSRC_WITH_OBJPRIMID_FIX__SHIFT 0x1b
++#define CB_HW_CONTROL_3__COLOR_CACHE_PREFETCH_NUM_CLS__SHIFT 0x1c
++#define CB_HW_CONTROL_3__DISABLE_SLOW_MODE_EMPTY_HALF_QUAD_KILL_MASK 0x00000001L
++#define CB_HW_CONTROL_3__RAM_ADDRESS_CONFLICTS_DISALLOWED_MASK 0x00000002L
++#define CB_HW_CONTROL_3__DISABLE_FAST_CLEAR_FETCH_OPT_MASK 0x00000004L
++#define CB_HW_CONTROL_3__DISABLE_QUAD_MARKER_DROP_STOP_MASK 0x00000008L
++#define CB_HW_CONTROL_3__DISABLE_OVERWRITE_COMBINER_CAM_CLR_MASK 0x00000010L
++#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_OVWR_STATUS_ACCUM_MASK 0x00000020L
++#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_OVWR_KEY_MOD_MASK 0x00000040L
++#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_PANIC_GATING_MASK 0x00000080L
++#define CB_HW_CONTROL_3__DISABLE_OVERWRITE_COMBINER_TARGET_MASK_VALIDATION_MASK 0x00000100L
++#define CB_HW_CONTROL_3__SPLIT_ALL_FAST_MODE_TRANSFERS_MASK 0x00000200L
++#define CB_HW_CONTROL_3__DISABLE_SHADER_BLEND_OPTS_MASK 0x00000400L
++#define CB_HW_CONTROL_3__DISABLE_CMASK_LAST_QUAD_INSERTION_MASK 0x00000800L
++#define CB_HW_CONTROL_3__DISABLE_ROP3_FIXES_OF_BUG_511967_MASK 0x00001000L
++#define CB_HW_CONTROL_3__DISABLE_ROP3_FIXES_OF_BUG_520657_MASK 0x00002000L
++#define CB_HW_CONTROL_3__DISABLE_OC_FIXES_OF_BUG_522542_MASK 0x00004000L
++#define CB_HW_CONTROL_3__FORCE_RMI_LAST_HIGH_MASK 0x00008000L
++#define CB_HW_CONTROL_3__FORCE_RMI_CLKEN_HIGH_MASK 0x00010000L
++#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_CC_MASK 0x00020000L
++#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_FC_MASK 0x00040000L
++#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_DC_MASK 0x00080000L
++#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_CM_MASK 0x00100000L
++#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CC_MASK 0x00200000L
++#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_FC_MASK 0x00400000L
++#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_DC_MASK 0x00800000L
++#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CM_MASK 0x01000000L
++#define CB_HW_CONTROL_3__DISABLE_NACK_COLOR_RD_WR_OPT_MASK 0x02000000L
++#define CB_HW_CONTROL_3__DISABLE_BLENDER_CLOCK_GATING_MASK 0x04000000L
++#define CB_HW_CONTROL_3__DISABLE_DUALSRC_WITH_OBJPRIMID_FIX_MASK 0x08000000L
++#define CB_HW_CONTROL_3__COLOR_CACHE_PREFETCH_NUM_CLS_MASK 0x30000000L
++//CB_HW_MEM_ARBITER_RD
++#define CB_HW_MEM_ARBITER_RD__MODE__SHIFT 0x0
++#define CB_HW_MEM_ARBITER_RD__IGNORE_URGENT_AGE__SHIFT 0x2
++#define CB_HW_MEM_ARBITER_RD__BREAK_GROUP_AGE__SHIFT 0x6
++#define CB_HW_MEM_ARBITER_RD__WEIGHT_CC__SHIFT 0xa
++#define CB_HW_MEM_ARBITER_RD__WEIGHT_FC__SHIFT 0xc
++#define CB_HW_MEM_ARBITER_RD__WEIGHT_CM__SHIFT 0xe
++#define CB_HW_MEM_ARBITER_RD__WEIGHT_DC__SHIFT 0x10
++#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_REQS__SHIFT 0x12
++#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_NOREQS__SHIFT 0x14
++#define CB_HW_MEM_ARBITER_RD__WEIGHT_IGNORE_NUM_TIDS__SHIFT 0x16
++#define CB_HW_MEM_ARBITER_RD__SCALE_AGE__SHIFT 0x17
++#define CB_HW_MEM_ARBITER_RD__SCALE_WEIGHT__SHIFT 0x1a
++#define CB_HW_MEM_ARBITER_RD__SEND_LASTS_WITHIN_GROUPS__SHIFT 0x1d
++#define CB_HW_MEM_ARBITER_RD__MODE_MASK 0x00000003L
++#define CB_HW_MEM_ARBITER_RD__IGNORE_URGENT_AGE_MASK 0x0000003CL
++#define CB_HW_MEM_ARBITER_RD__BREAK_GROUP_AGE_MASK 0x000003C0L
++#define CB_HW_MEM_ARBITER_RD__WEIGHT_CC_MASK 0x00000C00L
++#define CB_HW_MEM_ARBITER_RD__WEIGHT_FC_MASK 0x00003000L
++#define CB_HW_MEM_ARBITER_RD__WEIGHT_CM_MASK 0x0000C000L
++#define CB_HW_MEM_ARBITER_RD__WEIGHT_DC_MASK 0x00030000L
++#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_REQS_MASK 0x000C0000L
++#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_NOREQS_MASK 0x00300000L
++#define CB_HW_MEM_ARBITER_RD__WEIGHT_IGNORE_NUM_TIDS_MASK 0x00400000L
++#define CB_HW_MEM_ARBITER_RD__SCALE_AGE_MASK 0x03800000L
++#define CB_HW_MEM_ARBITER_RD__SCALE_WEIGHT_MASK 0x1C000000L
++#define CB_HW_MEM_ARBITER_RD__SEND_LASTS_WITHIN_GROUPS_MASK 0x20000000L
++//CB_HW_MEM_ARBITER_WR
++#define CB_HW_MEM_ARBITER_WR__MODE__SHIFT 0x0
++#define CB_HW_MEM_ARBITER_WR__IGNORE_URGENT_AGE__SHIFT 0x2
++#define CB_HW_MEM_ARBITER_WR__BREAK_GROUP_AGE__SHIFT 0x6
++#define CB_HW_MEM_ARBITER_WR__WEIGHT_CC__SHIFT 0xa
++#define CB_HW_MEM_ARBITER_WR__WEIGHT_FC__SHIFT 0xc
++#define CB_HW_MEM_ARBITER_WR__WEIGHT_CM__SHIFT 0xe
++#define CB_HW_MEM_ARBITER_WR__WEIGHT_DC__SHIFT 0x10
++#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_REQS__SHIFT 0x12
++#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_NOREQS__SHIFT 0x14
++#define CB_HW_MEM_ARBITER_WR__WEIGHT_IGNORE_BYTE_MASK__SHIFT 0x16
++#define CB_HW_MEM_ARBITER_WR__SCALE_AGE__SHIFT 0x17
++#define CB_HW_MEM_ARBITER_WR__SCALE_WEIGHT__SHIFT 0x1a
++#define CB_HW_MEM_ARBITER_WR__SEND_LASTS_WITHIN_GROUPS__SHIFT 0x1d
++#define CB_HW_MEM_ARBITER_WR__MODE_MASK 0x00000003L
++#define CB_HW_MEM_ARBITER_WR__IGNORE_URGENT_AGE_MASK 0x0000003CL
++#define CB_HW_MEM_ARBITER_WR__BREAK_GROUP_AGE_MASK 0x000003C0L
++#define CB_HW_MEM_ARBITER_WR__WEIGHT_CC_MASK 0x00000C00L
++#define CB_HW_MEM_ARBITER_WR__WEIGHT_FC_MASK 0x00003000L
++#define CB_HW_MEM_ARBITER_WR__WEIGHT_CM_MASK 0x0000C000L
++#define CB_HW_MEM_ARBITER_WR__WEIGHT_DC_MASK 0x00030000L
++#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_REQS_MASK 0x000C0000L
++#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_NOREQS_MASK 0x00300000L
++#define CB_HW_MEM_ARBITER_WR__WEIGHT_IGNORE_BYTE_MASK_MASK 0x00400000L
++#define CB_HW_MEM_ARBITER_WR__SCALE_AGE_MASK 0x03800000L
++#define CB_HW_MEM_ARBITER_WR__SCALE_WEIGHT_MASK 0x1C000000L
++#define CB_HW_MEM_ARBITER_WR__SEND_LASTS_WITHIN_GROUPS_MASK 0x20000000L
++//CB_DCC_CONFIG
++#define CB_DCC_CONFIG__OVERWRITE_COMBINER_DEPTH__SHIFT 0x0
++#define CB_DCC_CONFIG__OVERWRITE_COMBINER_DISABLE__SHIFT 0x5
++#define CB_DCC_CONFIG__OVERWRITE_COMBINER_CC_POP_DISABLE__SHIFT 0x6
++#define CB_DCC_CONFIG__FC_RDLAT_KEYID_FIFO_DEPTH__SHIFT 0x8
++#define CB_DCC_CONFIG__READ_RETURN_SKID_FIFO_DEPTH__SHIFT 0x10
++#define CB_DCC_CONFIG__DCC_CACHE_EVICT_POINT__SHIFT 0x18
++#define CB_DCC_CONFIG__DCC_CACHE_NUM_TAGS__SHIFT 0x1c
++#define CB_DCC_CONFIG__OVERWRITE_COMBINER_DEPTH_MASK 0x0000001FL
++#define CB_DCC_CONFIG__OVERWRITE_COMBINER_DISABLE_MASK 0x00000020L
++#define CB_DCC_CONFIG__OVERWRITE_COMBINER_CC_POP_DISABLE_MASK 0x00000040L
++#define CB_DCC_CONFIG__FC_RDLAT_KEYID_FIFO_DEPTH_MASK 0x0000FF00L
++#define CB_DCC_CONFIG__READ_RETURN_SKID_FIFO_DEPTH_MASK 0x007F0000L
++#define CB_DCC_CONFIG__DCC_CACHE_EVICT_POINT_MASK 0x0F000000L
++#define CB_DCC_CONFIG__DCC_CACHE_NUM_TAGS_MASK 0xF0000000L
++//GC_USER_RB_REDUNDANCY
++#define GC_USER_RB_REDUNDANCY__FAILED_RB0__SHIFT 0x8
++#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY0__SHIFT 0xc
++#define GC_USER_RB_REDUNDANCY__FAILED_RB1__SHIFT 0x10
++#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY1__SHIFT 0x14
++#define GC_USER_RB_REDUNDANCY__FAILED_RB0_MASK 0x00000F00L
++#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY0_MASK 0x00001000L
++#define GC_USER_RB_REDUNDANCY__FAILED_RB1_MASK 0x000F0000L
++#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY1_MASK 0x00100000L
++//GC_USER_RB_BACKEND_DISABLE
++#define GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT 0x10
++#define GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK 0x00FF0000L
++
++
++// addressBlock: gc_ea_gceadec2
++//GCEA_EDC_CNT
++#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0
++#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2
++#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4
++#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6
++#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8
++#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa
++#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc
++#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe
++#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10
++#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12
++#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14
++#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16
++#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18
++#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a
++#define GCEA_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c
++#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
++#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
++#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
++#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
++#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
++#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
++#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L
++#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L
++#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L
++#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L
++#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L
++#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L
++#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L
++#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L
++#define GCEA_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L
++//GCEA_EDC_CNT2
++#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0
++#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2
++#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4
++#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6
++#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8
++#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
++#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
++#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
++#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
++#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
++#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
++#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
++#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
++#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
++#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
++#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
++//GCEA_DSM_CNTL
++#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
++#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
++#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
++#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
++#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
++#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
++#define GCEA_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
++#define GCEA_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
++#define GCEA_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
++#define GCEA_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
++#define GCEA_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
++#define GCEA_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
++#define GCEA_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
++#define GCEA_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
++#define GCEA_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15
++#define GCEA_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
++#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
++#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
++#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
++#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
++#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
++#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
++#define GCEA_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
++#define GCEA_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
++#define GCEA_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
++#define GCEA_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
++#define GCEA_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
++#define GCEA_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
++#define GCEA_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
++#define GCEA_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
++#define GCEA_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
++#define GCEA_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
++//GCEA_DSM_CNTLA
++#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
++#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
++#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
++#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
++#define GCEA_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
++#define GCEA_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
++#define GCEA_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
++#define GCEA_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
++#define GCEA_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
++#define GCEA_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
++#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
++#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
++#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
++#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
++#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
++#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
++#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
++#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
++#define GCEA_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
++#define GCEA_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
++#define GCEA_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
++#define GCEA_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
++#define GCEA_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
++#define GCEA_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
++#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
++#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
++#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
++#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
++//GCEA_DSM_CNTLB
++//GCEA_DSM_CNTL2
++#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
++#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2
++#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
++#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5
++#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
++#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8
++#define GCEA_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
++#define GCEA_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb
++#define GCEA_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
++#define GCEA_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe
++#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
++#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11
++#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
++#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14
++#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15
++#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17
++#define GCEA_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
++#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
++#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
++#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
++#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
++#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
++#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
++#define GCEA_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
++#define GCEA_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
++#define GCEA_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
++#define GCEA_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
++#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
++#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
++#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
++#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
++#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
++#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L
++#define GCEA_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
++//GCEA_DSM_CNTL2A
++#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
++#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2
++#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
++#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5
++#define GCEA_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
++#define GCEA_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8
++#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
++#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb
++#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
++#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe
++#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
++#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11
++#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
++#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14
++#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
++#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
++#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
++#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
++#define GCEA_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
++#define GCEA_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
++#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
++#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
++#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
++#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
++#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
++#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
++#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
++#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
++//GCEA_DSM_CNTL2B
++//GCEA_TCC_XBR_CREDITS
++#define GCEA_TCC_XBR_CREDITS__DRAM_RD_LIMIT__SHIFT 0x0
++#define GCEA_TCC_XBR_CREDITS__DRAM_RD_RESERVE__SHIFT 0x6
++#define GCEA_TCC_XBR_CREDITS__IO_RD_LIMIT__SHIFT 0x8
++#define GCEA_TCC_XBR_CREDITS__IO_RD_RESERVE__SHIFT 0xe
++#define GCEA_TCC_XBR_CREDITS__DRAM_WR_LIMIT__SHIFT 0x10
++#define GCEA_TCC_XBR_CREDITS__DRAM_WR_RESERVE__SHIFT 0x16
++#define GCEA_TCC_XBR_CREDITS__IO_WR_LIMIT__SHIFT 0x18
++#define GCEA_TCC_XBR_CREDITS__IO_WR_RESERVE__SHIFT 0x1e
++#define GCEA_TCC_XBR_CREDITS__DRAM_RD_LIMIT_MASK 0x0000003FL
++#define GCEA_TCC_XBR_CREDITS__DRAM_RD_RESERVE_MASK 0x000000C0L
++#define GCEA_TCC_XBR_CREDITS__IO_RD_LIMIT_MASK 0x00003F00L
++#define GCEA_TCC_XBR_CREDITS__IO_RD_RESERVE_MASK 0x0000C000L
++#define GCEA_TCC_XBR_CREDITS__DRAM_WR_LIMIT_MASK 0x003F0000L
++#define GCEA_TCC_XBR_CREDITS__DRAM_WR_RESERVE_MASK 0x00C00000L
++#define GCEA_TCC_XBR_CREDITS__IO_WR_LIMIT_MASK 0x3F000000L
++#define GCEA_TCC_XBR_CREDITS__IO_WR_RESERVE_MASK 0xC0000000L
++//GCEA_TCC_XBR_MAXBURST
++#define GCEA_TCC_XBR_MAXBURST__DRAM_RD__SHIFT 0x0
++#define GCEA_TCC_XBR_MAXBURST__IO_RD__SHIFT 0x4
++#define GCEA_TCC_XBR_MAXBURST__DRAM_WR__SHIFT 0x8
++#define GCEA_TCC_XBR_MAXBURST__IO_WR__SHIFT 0xc
++#define GCEA_TCC_XBR_MAXBURST__DRAM_RD_MASK 0x0000000FL
++#define GCEA_TCC_XBR_MAXBURST__IO_RD_MASK 0x000000F0L
++#define GCEA_TCC_XBR_MAXBURST__DRAM_WR_MASK 0x00000F00L
++#define GCEA_TCC_XBR_MAXBURST__IO_WR_MASK 0x0000F000L
++//GCEA_PROBE_CNTL
++#define GCEA_PROBE_CNTL__REQ2RSP_DELAY__SHIFT 0x0
++#define GCEA_PROBE_CNTL__PRB_FILTER_DISABLE__SHIFT 0x5
++#define GCEA_PROBE_CNTL__REQ2RSP_DELAY_MASK 0x0000001FL
++#define GCEA_PROBE_CNTL__PRB_FILTER_DISABLE_MASK 0x00000020L
++//GCEA_PROBE_MAP
++#define GCEA_PROBE_MAP__CHADDR0_TO_RIGHTTCC__SHIFT 0x0
++#define GCEA_PROBE_MAP__CHADDR1_TO_RIGHTTCC__SHIFT 0x1
++#define GCEA_PROBE_MAP__CHADDR2_TO_RIGHTTCC__SHIFT 0x2
++#define GCEA_PROBE_MAP__CHADDR3_TO_RIGHTTCC__SHIFT 0x3
++#define GCEA_PROBE_MAP__CHADDR4_TO_RIGHTTCC__SHIFT 0x4
++#define GCEA_PROBE_MAP__CHADDR5_TO_RIGHTTCC__SHIFT 0x5
++#define GCEA_PROBE_MAP__CHADDR6_TO_RIGHTTCC__SHIFT 0x6
++#define GCEA_PROBE_MAP__CHADDR7_TO_RIGHTTCC__SHIFT 0x7
++#define GCEA_PROBE_MAP__CHADDR8_TO_RIGHTTCC__SHIFT 0x8
++#define GCEA_PROBE_MAP__CHADDR9_TO_RIGHTTCC__SHIFT 0x9
++#define GCEA_PROBE_MAP__CHADDR10_TO_RIGHTTCC__SHIFT 0xa
++#define GCEA_PROBE_MAP__CHADDR11_TO_RIGHTTCC__SHIFT 0xb
++#define GCEA_PROBE_MAP__CHADDR12_TO_RIGHTTCC__SHIFT 0xc
++#define GCEA_PROBE_MAP__CHADDR13_TO_RIGHTTCC__SHIFT 0xd
++#define GCEA_PROBE_MAP__CHADDR14_TO_RIGHTTCC__SHIFT 0xe
++#define GCEA_PROBE_MAP__CHADDR15_TO_RIGHTTCC__SHIFT 0xf
++#define GCEA_PROBE_MAP__INTLV_SIZE__SHIFT 0x10
++#define GCEA_PROBE_MAP__CHADDR0_TO_RIGHTTCC_MASK 0x00000001L
++#define GCEA_PROBE_MAP__CHADDR1_TO_RIGHTTCC_MASK 0x00000002L
++#define GCEA_PROBE_MAP__CHADDR2_TO_RIGHTTCC_MASK 0x00000004L
++#define GCEA_PROBE_MAP__CHADDR3_TO_RIGHTTCC_MASK 0x00000008L
++#define GCEA_PROBE_MAP__CHADDR4_TO_RIGHTTCC_MASK 0x00000010L
++#define GCEA_PROBE_MAP__CHADDR5_TO_RIGHTTCC_MASK 0x00000020L
++#define GCEA_PROBE_MAP__CHADDR6_TO_RIGHTTCC_MASK 0x00000040L
++#define GCEA_PROBE_MAP__CHADDR7_TO_RIGHTTCC_MASK 0x00000080L
++#define GCEA_PROBE_MAP__CHADDR8_TO_RIGHTTCC_MASK 0x00000100L
++#define GCEA_PROBE_MAP__CHADDR9_TO_RIGHTTCC_MASK 0x00000200L
++#define GCEA_PROBE_MAP__CHADDR10_TO_RIGHTTCC_MASK 0x00000400L
++#define GCEA_PROBE_MAP__CHADDR11_TO_RIGHTTCC_MASK 0x00000800L
++#define GCEA_PROBE_MAP__CHADDR12_TO_RIGHTTCC_MASK 0x00001000L
++#define GCEA_PROBE_MAP__CHADDR13_TO_RIGHTTCC_MASK 0x00002000L
++#define GCEA_PROBE_MAP__CHADDR14_TO_RIGHTTCC_MASK 0x00004000L
++#define GCEA_PROBE_MAP__CHADDR15_TO_RIGHTTCC_MASK 0x00008000L
++#define GCEA_PROBE_MAP__INTLV_SIZE_MASK 0x00030000L
++//GCEA_ERR_STATUS
++#define GCEA_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
++#define GCEA_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
++#define GCEA_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0x8
++#define GCEA_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0x9
++#define GCEA_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xa
++#define GCEA_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
++#define GCEA_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
++#define GCEA_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000100L
++#define GCEA_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000200L
++#define GCEA_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00000400L
++//GCEA_MISC2
++#define GCEA_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0
++#define GCEA_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1
++#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2
++#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7
++#define GCEA_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L
++#define GCEA_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L
++#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL
++#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L
++//GCEA_SDP_BACKDOOR_CMDCREDITS0
++#define GCEA_SDP_BACKDOOR_CMDCREDITS0__CREDITS_RECEIVED__SHIFT 0x0
++#define GCEA_SDP_BACKDOOR_CMDCREDITS0__CREDITS_RECEIVED_MASK 0xFFFFFFFFL
++//GCEA_SDP_BACKDOOR_CMDCREDITS1
++#define GCEA_SDP_BACKDOOR_CMDCREDITS1__CREDITS_RECEIVED__SHIFT 0x0
++#define GCEA_SDP_BACKDOOR_CMDCREDITS1__CREDITS_RECEIVED_MASK 0x7FFFFFFFL
++//GCEA_SDP_BACKDOOR_DATACREDITS0
++#define GCEA_SDP_BACKDOOR_DATACREDITS0__CREDITS_RECEIVED__SHIFT 0x0
++#define GCEA_SDP_BACKDOOR_DATACREDITS0__CREDITS_RECEIVED_MASK 0xFFFFFFFFL
++//GCEA_SDP_BACKDOOR_DATACREDITS1
++#define GCEA_SDP_BACKDOOR_DATACREDITS1__CREDITS_RECEIVED__SHIFT 0x0
++#define GCEA_SDP_BACKDOOR_DATACREDITS1__CREDITS_RECEIVED_MASK 0x7FFFFFFFL
++//GCEA_SDP_BACKDOOR_MISCCREDITS
++#define GCEA_SDP_BACKDOOR_MISCCREDITS__RDRSP_CREDITS_RELEASED__SHIFT 0x0
++#define GCEA_SDP_BACKDOOR_MISCCREDITS__WRRSP_CREDITS_RELEASED__SHIFT 0x8
++#define GCEA_SDP_BACKDOOR_MISCCREDITS__PRB_REQ_CREDITS_RELEASED__SHIFT 0x10
++#define GCEA_SDP_BACKDOOR_MISCCREDITS__PRB_RSP_CREDITS_RECEIVED__SHIFT 0x17
++#define GCEA_SDP_BACKDOOR_MISCCREDITS__RDRSP_CREDITS_RELEASED_MASK 0x000000FFL
++#define GCEA_SDP_BACKDOOR_MISCCREDITS__WRRSP_CREDITS_RELEASED_MASK 0x0000FF00L
++#define GCEA_SDP_BACKDOOR_MISCCREDITS__PRB_REQ_CREDITS_RELEASED_MASK 0x007F0000L
++#define GCEA_SDP_BACKDOOR_MISCCREDITS__PRB_RSP_CREDITS_RECEIVED_MASK 0x3F800000L
++//GCEA_SDP_ENABLE
++#define GCEA_SDP_ENABLE__ENABLE__SHIFT 0x0
++#define GCEA_SDP_ENABLE__ENABLE_MASK 0x00000001L
++
++
++// addressBlock: gc_rmi_rmidec
++//RMI_GENERAL_CNTL
++#define RMI_GENERAL_CNTL__BURST_DISABLE__SHIFT 0x0
++#define RMI_GENERAL_CNTL__VMID_BYPASS_ENABLE__SHIFT 0x1
++#define RMI_GENERAL_CNTL__XBAR_MUX_CONFIG__SHIFT 0x11
++#define RMI_GENERAL_CNTL__RB0_HARVEST_EN__SHIFT 0x13
++#define RMI_GENERAL_CNTL__RB1_HARVEST_EN__SHIFT 0x14
++#define RMI_GENERAL_CNTL__LOOPBACK_DIS_BY_REQ_TYPE__SHIFT 0x15
++#define RMI_GENERAL_CNTL__XBAR_MUX_CONFIG_UPDATE__SHIFT 0x19
++#define RMI_GENERAL_CNTL__SKID_FIFO_0_OVERFLOW_ERROR_MASK__SHIFT 0x1a
++#define RMI_GENERAL_CNTL__SKID_FIFO_0_UNDERFLOW_ERROR_MASK__SHIFT 0x1b
++#define RMI_GENERAL_CNTL__SKID_FIFO_1_OVERFLOW_ERROR_MASK__SHIFT 0x1c
++#define RMI_GENERAL_CNTL__SKID_FIFO_1_UNDERFLOW_ERROR_MASK__SHIFT 0x1d
++#define RMI_GENERAL_CNTL__SKID_FIFO_FREESPACE_IS_ZERO_ERROR_MASK__SHIFT 0x1e
++#define RMI_GENERAL_CNTL__BURST_DISABLE_MASK 0x00000001L
++#define RMI_GENERAL_CNTL__VMID_BYPASS_ENABLE_MASK 0x0001FFFEL
++#define RMI_GENERAL_CNTL__XBAR_MUX_CONFIG_MASK 0x00060000L
++#define RMI_GENERAL_CNTL__RB0_HARVEST_EN_MASK 0x00080000L
++#define RMI_GENERAL_CNTL__RB1_HARVEST_EN_MASK 0x00100000L
++#define RMI_GENERAL_CNTL__LOOPBACK_DIS_BY_REQ_TYPE_MASK 0x01E00000L
++#define RMI_GENERAL_CNTL__XBAR_MUX_CONFIG_UPDATE_MASK 0x02000000L
++#define RMI_GENERAL_CNTL__SKID_FIFO_0_OVERFLOW_ERROR_MASK_MASK 0x04000000L
++#define RMI_GENERAL_CNTL__SKID_FIFO_0_UNDERFLOW_ERROR_MASK_MASK 0x08000000L
++#define RMI_GENERAL_CNTL__SKID_FIFO_1_OVERFLOW_ERROR_MASK_MASK 0x10000000L
++#define RMI_GENERAL_CNTL__SKID_FIFO_1_UNDERFLOW_ERROR_MASK_MASK 0x20000000L
++#define RMI_GENERAL_CNTL__SKID_FIFO_FREESPACE_IS_ZERO_ERROR_MASK_MASK 0x40000000L
++//RMI_GENERAL_CNTL1
++#define RMI_GENERAL_CNTL1__EARLY_WRACK_ENABLE_PER_MTYPE__SHIFT 0x0
++#define RMI_GENERAL_CNTL1__TCIW0_64B_RD_STALL_MODE__SHIFT 0x4
++#define RMI_GENERAL_CNTL1__TCIW1_64B_RD_STALL_MODE__SHIFT 0x6
++#define RMI_GENERAL_CNTL1__EARLY_WRACK_DISABLE_FOR_LOOPBACK__SHIFT 0x8
++#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE_VALUE__SHIFT 0x9
++#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE__SHIFT 0xa
++#define RMI_GENERAL_CNTL1__UTCL1_PROBE0_RR_ARB_BURST_HINT_EN__SHIFT 0xb
++#define RMI_GENERAL_CNTL1__UTCL1_PROBE1_RR_ARB_BURST_HINT_EN__SHIFT 0xc
++#define RMI_GENERAL_CNTL1__EARLY_WRACK_ENABLE_PER_MTYPE_MASK 0x0000000FL
++#define RMI_GENERAL_CNTL1__TCIW0_64B_RD_STALL_MODE_MASK 0x00000030L
++#define RMI_GENERAL_CNTL1__TCIW1_64B_RD_STALL_MODE_MASK 0x000000C0L
++#define RMI_GENERAL_CNTL1__EARLY_WRACK_DISABLE_FOR_LOOPBACK_MASK 0x00000100L
++#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE_VALUE_MASK 0x00000200L
++#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE_MASK 0x00000400L
++#define RMI_GENERAL_CNTL1__UTCL1_PROBE0_RR_ARB_BURST_HINT_EN_MASK 0x00000800L
++#define RMI_GENERAL_CNTL1__UTCL1_PROBE1_RR_ARB_BURST_HINT_EN_MASK 0x00001000L
++//RMI_GENERAL_STATUS
++#define RMI_GENERAL_STATUS__GENERAL_RMI_ERRORS_COMBINED__SHIFT 0x0
++#define RMI_GENERAL_STATUS__SKID_FIFO_0_OVERFLOW_ERROR__SHIFT 0x1
++#define RMI_GENERAL_STATUS__SKID_FIFO_0_UNDERFLOW_ERROR__SHIFT 0x2
++#define RMI_GENERAL_STATUS__SKID_FIFO_1_OVERFLOW_ERROR__SHIFT 0x3
++#define RMI_GENERAL_STATUS__SKID_FIFO_1_UNDERFLOW_ERROR__SHIFT 0x4
++#define RMI_GENERAL_STATUS__RMI_XBAR_BUSY__SHIFT 0x5
++#define RMI_GENERAL_STATUS__RMI_UTCL1_BUSY__SHIFT 0x6
++#define RMI_GENERAL_STATUS__RMI_SCOREBOARD_BUSY__SHIFT 0x7
++#define RMI_GENERAL_STATUS__TCIW0_PRT_FIFO_BUSY__SHIFT 0x8
++#define RMI_GENERAL_STATUS__TCIW_FRMTR0_BUSY__SHIFT 0x9
++#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR0_BUSY__SHIFT 0xa
++#define RMI_GENERAL_STATUS__WRREQ_CONSUMER_FIFO_0_BUSY__SHIFT 0xb
++#define RMI_GENERAL_STATUS__RDREQ_CONSUMER_FIFO_0_BUSY__SHIFT 0xc
++#define RMI_GENERAL_STATUS__TCIW1_PRT_FIFO_BUSY__SHIFT 0xd
++#define RMI_GENERAL_STATUS__TCIW_FRMTR1_BUSY__SHIFT 0xe
++#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR1_BUSY__SHIFT 0xf
++#define RMI_GENERAL_STATUS__WRREQ_CONSUMER_FIFO_1_BUSY__SHIFT 0x10
++#define RMI_GENERAL_STATUS__RDREQ_CONSUMER_FIFO_1_BUSY__SHIFT 0x11
++#define RMI_GENERAL_STATUS__UTC_PROBE1_BUSY__SHIFT 0x12
++#define RMI_GENERAL_STATUS__UTC_PROBE0_BUSY__SHIFT 0x13
++#define RMI_GENERAL_STATUS__RMI_XNACK_BUSY__SHIFT 0x14
++#define RMI_GENERAL_STATUS__XNACK_FIFO_NUM_USED__SHIFT 0x15
++#define RMI_GENERAL_STATUS__XNACK_FIFO_EMPTY__SHIFT 0x1d
++#define RMI_GENERAL_STATUS__XNACK_FIFO_FULL__SHIFT 0x1e
++#define RMI_GENERAL_STATUS__SKID_FIFO_FREESPACE_IS_ZERO_ERROR__SHIFT 0x1f
++#define RMI_GENERAL_STATUS__GENERAL_RMI_ERRORS_COMBINED_MASK 0x00000001L
++#define RMI_GENERAL_STATUS__SKID_FIFO_0_OVERFLOW_ERROR_MASK 0x00000002L
++#define RMI_GENERAL_STATUS__SKID_FIFO_0_UNDERFLOW_ERROR_MASK 0x00000004L
++#define RMI_GENERAL_STATUS__SKID_FIFO_1_OVERFLOW_ERROR_MASK 0x00000008L
++#define RMI_GENERAL_STATUS__SKID_FIFO_1_UNDERFLOW_ERROR_MASK 0x00000010L
++#define RMI_GENERAL_STATUS__RMI_XBAR_BUSY_MASK 0x00000020L
++#define RMI_GENERAL_STATUS__RMI_UTCL1_BUSY_MASK 0x00000040L
++#define RMI_GENERAL_STATUS__RMI_SCOREBOARD_BUSY_MASK 0x00000080L
++#define RMI_GENERAL_STATUS__TCIW0_PRT_FIFO_BUSY_MASK 0x00000100L
++#define RMI_GENERAL_STATUS__TCIW_FRMTR0_BUSY_MASK 0x00000200L
++#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR0_BUSY_MASK 0x00000400L
++#define RMI_GENERAL_STATUS__WRREQ_CONSUMER_FIFO_0_BUSY_MASK 0x00000800L
++#define RMI_GENERAL_STATUS__RDREQ_CONSUMER_FIFO_0_BUSY_MASK 0x00001000L
++#define RMI_GENERAL_STATUS__TCIW1_PRT_FIFO_BUSY_MASK 0x00002000L
++#define RMI_GENERAL_STATUS__TCIW_FRMTR1_BUSY_MASK 0x00004000L
++#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR1_BUSY_MASK 0x00008000L
++#define RMI_GENERAL_STATUS__WRREQ_CONSUMER_FIFO_1_BUSY_MASK 0x00010000L
++#define RMI_GENERAL_STATUS__RDREQ_CONSUMER_FIFO_1_BUSY_MASK 0x00020000L
++#define RMI_GENERAL_STATUS__UTC_PROBE1_BUSY_MASK 0x00040000L
++#define RMI_GENERAL_STATUS__UTC_PROBE0_BUSY_MASK 0x00080000L
++#define RMI_GENERAL_STATUS__RMI_XNACK_BUSY_MASK 0x00100000L
++#define RMI_GENERAL_STATUS__XNACK_FIFO_NUM_USED_MASK 0x1FE00000L
++#define RMI_GENERAL_STATUS__XNACK_FIFO_EMPTY_MASK 0x20000000L
++#define RMI_GENERAL_STATUS__XNACK_FIFO_FULL_MASK 0x40000000L
++#define RMI_GENERAL_STATUS__SKID_FIFO_FREESPACE_IS_ZERO_ERROR_MASK 0x80000000L
++//RMI_SUBBLOCK_STATUS0
++#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE0__SHIFT 0x0
++#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE0__SHIFT 0x7
++#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE0__SHIFT 0x8
++#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE1__SHIFT 0x9
++#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE1__SHIFT 0x10
++#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE1__SHIFT 0x11
++#define RMI_SUBBLOCK_STATUS0__TCIW0_INFLIGHT_CNT__SHIFT 0x12
++#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE0_MASK 0x0000007FL
++#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE0_MASK 0x00000080L
++#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE0_MASK 0x00000100L
++#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE1_MASK 0x0000FE00L
++#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE1_MASK 0x00010000L
++#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE1_MASK 0x00020000L
++#define RMI_SUBBLOCK_STATUS0__TCIW0_INFLIGHT_CNT_MASK 0x0FFC0000L
++//RMI_SUBBLOCK_STATUS1
++#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_0_FREE_SPACE__SHIFT 0x0
++#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_1_FREE_SPACE__SHIFT 0xa
++#define RMI_SUBBLOCK_STATUS1__TCIW1_INFLIGHT_CNT__SHIFT 0x14
++#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_0_FREE_SPACE_MASK 0x000003FFL
++#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_1_FREE_SPACE_MASK 0x000FFC00L
++#define RMI_SUBBLOCK_STATUS1__TCIW1_INFLIGHT_CNT_MASK 0x3FF00000L
++//RMI_SUBBLOCK_STATUS2
++#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_0_NUM_USED__SHIFT 0x0
++#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_1_NUM_USED__SHIFT 0x9
++#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_0_NUM_USED_MASK 0x000001FFL
++#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_1_NUM_USED_MASK 0x0003FE00L
++//RMI_SUBBLOCK_STATUS3
++#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_0_FREE_SPACE_TOTAL__SHIFT 0x0
++#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_1_FREE_SPACE_TOTAL__SHIFT 0xa
++#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_0_FREE_SPACE_TOTAL_MASK 0x000003FFL
++#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_1_FREE_SPACE_TOTAL_MASK 0x000FFC00L
++//RMI_XBAR_CONFIG
++#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_OVERRIDE__SHIFT 0x0
++#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_REQ_TYPE_OVERRIDE__SHIFT 0x2
++#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_CB_DB_OVERRIDE__SHIFT 0x6
++#define RMI_XBAR_CONFIG__ARBITER_DIS__SHIFT 0x7
++#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ__SHIFT 0x8
++#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ_OVERRIDE__SHIFT 0xc
++#define RMI_XBAR_CONFIG__XBAR_EN_IN_RB0__SHIFT 0xd
++#define RMI_XBAR_CONFIG__XBAR_EN_IN_RB1__SHIFT 0xe
++#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_OVERRIDE_MASK 0x00000003L
++#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_REQ_TYPE_OVERRIDE_MASK 0x0000003CL
++#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_CB_DB_OVERRIDE_MASK 0x00000040L
++#define RMI_XBAR_CONFIG__ARBITER_DIS_MASK 0x00000080L
++#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ_MASK 0x00000F00L
++#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ_OVERRIDE_MASK 0x00001000L
++#define RMI_XBAR_CONFIG__XBAR_EN_IN_RB0_MASK 0x00002000L
++#define RMI_XBAR_CONFIG__XBAR_EN_IN_RB1_MASK 0x00004000L
++//RMI_PROBE_POP_LOGIC_CNTL
++#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_0_MAX_DEPTH__SHIFT 0x0
++#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE0_DIS__SHIFT 0x7
++#define RMI_PROBE_POP_LOGIC_CNTL__REDUCE_MAX_XLAT_CHAIN_SIZE_BY_2__SHIFT 0x8
++#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_1_MAX_DEPTH__SHIFT 0xa
++#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE1_DIS__SHIFT 0x11
++#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_0_MAX_DEPTH_MASK 0x0000007FL
++#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE0_DIS_MASK 0x00000080L
++#define RMI_PROBE_POP_LOGIC_CNTL__REDUCE_MAX_XLAT_CHAIN_SIZE_BY_2_MASK 0x00000300L
++#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_1_MAX_DEPTH_MASK 0x0001FC00L
++#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE1_DIS_MASK 0x00020000L
++//RMI_UTC_XNACK_N_MISC_CNTL
++#define RMI_UTC_XNACK_N_MISC_CNTL__MASTER_XNACK_TIMER_INC__SHIFT 0x0
++#define RMI_UTC_XNACK_N_MISC_CNTL__IND_XNACK_TIMER_START_VALUE__SHIFT 0x8
++#define RMI_UTC_XNACK_N_MISC_CNTL__UTCL1_PERM_MODE__SHIFT 0xc
++#define RMI_UTC_XNACK_N_MISC_CNTL__CP_VMID_RESET_REQUEST_DISABLE__SHIFT 0xd
++#define RMI_UTC_XNACK_N_MISC_CNTL__MASTER_XNACK_TIMER_INC_MASK 0x000000FFL
++#define RMI_UTC_XNACK_N_MISC_CNTL__IND_XNACK_TIMER_START_VALUE_MASK 0x00000F00L
++#define RMI_UTC_XNACK_N_MISC_CNTL__UTCL1_PERM_MODE_MASK 0x00001000L
++#define RMI_UTC_XNACK_N_MISC_CNTL__CP_VMID_RESET_REQUEST_DISABLE_MASK 0x00002000L
++//RMI_DEMUX_CNTL
++#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL__SHIFT 0x0
++#define RMI_DEMUX_CNTL__DEMUX_ARB0_BREAK_LOB_ON_IDLEIN__SHIFT 0x1
++#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL_TIMER_OVERRIDE__SHIFT 0x4
++#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL_TIMER_START_VALUE__SHIFT 0x6
++#define RMI_DEMUX_CNTL__DEMUX_ARB0_MODE__SHIFT 0xe
++#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL__SHIFT 0x10
++#define RMI_DEMUX_CNTL__DEMUX_ARB1_BREAK_LOB_ON_IDLEIN__SHIFT 0x11
++#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL_TIMER_OVERRIDE__SHIFT 0x14
++#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL_TIMER_START_VALUE__SHIFT 0x16
++#define RMI_DEMUX_CNTL__DEMUX_ARB1_MODE__SHIFT 0x1e
++#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL_MASK 0x00000001L
++#define RMI_DEMUX_CNTL__DEMUX_ARB0_BREAK_LOB_ON_IDLEIN_MASK 0x00000002L
++#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL_TIMER_OVERRIDE_MASK 0x00000030L
++#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL_TIMER_START_VALUE_MASK 0x00003FC0L
++#define RMI_DEMUX_CNTL__DEMUX_ARB0_MODE_MASK 0x0000C000L
++#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL_MASK 0x00010000L
++#define RMI_DEMUX_CNTL__DEMUX_ARB1_BREAK_LOB_ON_IDLEIN_MASK 0x00020000L
++#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL_TIMER_OVERRIDE_MASK 0x00300000L
++#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL_TIMER_START_VALUE_MASK 0x3FC00000L
++#define RMI_DEMUX_CNTL__DEMUX_ARB1_MODE_MASK 0xC0000000L
++//RMI_UTCL1_CNTL1
++#define RMI_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
++#define RMI_UTCL1_CNTL1__GPUVM_64K_DEF__SHIFT 0x1
++#define RMI_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
++#define RMI_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
++#define RMI_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
++#define RMI_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
++#define RMI_UTCL1_CNTL1__USERVM_DIS__SHIFT 0x10
++#define RMI_UTCL1_CNTL1__ENABLE_PUSH_LFIFO__SHIFT 0x11
++#define RMI_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB__SHIFT 0x12
++#define RMI_UTCL1_CNTL1__REG_INV_VMID__SHIFT 0x13
++#define RMI_UTCL1_CNTL1__REG_INV_ALL_VMID__SHIFT 0x17
++#define RMI_UTCL1_CNTL1__REG_INV_TOGGLE__SHIFT 0x18
++#define RMI_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID__SHIFT 0x19
++#define RMI_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
++#define RMI_UTCL1_CNTL1__FORCE_IN_ORDER__SHIFT 0x1b
++#define RMI_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
++#define RMI_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
++#define RMI_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
++#define RMI_UTCL1_CNTL1__GPUVM_64K_DEF_MASK 0x00000002L
++#define RMI_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
++#define RMI_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
++#define RMI_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
++#define RMI_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
++#define RMI_UTCL1_CNTL1__USERVM_DIS_MASK 0x00010000L
++#define RMI_UTCL1_CNTL1__ENABLE_PUSH_LFIFO_MASK 0x00020000L
++#define RMI_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB_MASK 0x00040000L
++#define RMI_UTCL1_CNTL1__REG_INV_VMID_MASK 0x00780000L
++#define RMI_UTCL1_CNTL1__REG_INV_ALL_VMID_MASK 0x00800000L
++#define RMI_UTCL1_CNTL1__REG_INV_TOGGLE_MASK 0x01000000L
++#define RMI_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID_MASK 0x02000000L
++#define RMI_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
++#define RMI_UTCL1_CNTL1__FORCE_IN_ORDER_MASK 0x08000000L
++#define RMI_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
++#define RMI_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
++//RMI_UTCL1_CNTL2
++#define RMI_UTCL1_CNTL2__UTC_SPARE__SHIFT 0x0
++#define RMI_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
++#define RMI_UTCL1_CNTL2__LINE_VALID__SHIFT 0xa
++#define RMI_UTCL1_CNTL2__DIS_EDC__SHIFT 0xb
++#define RMI_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
++#define RMI_UTCL1_CNTL2__SHOOTDOWN_OPT__SHIFT 0xd
++#define RMI_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
++#define RMI_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
++#define RMI_UTCL1_CNTL2__UTCL1_ARB_BURST_MODE__SHIFT 0x10
++#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_RD_WR__SHIFT 0x12
++#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_RD_WR__SHIFT 0x13
++#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_VMID__SHIFT 0x14
++#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_VMID__SHIFT 0x15
++#define RMI_UTCL1_CNTL2__UTCL1_DIS_DUAL_L2_REQ__SHIFT 0x19
++#define RMI_UTCL1_CNTL2__UTCL1_FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
++#define RMI_UTCL1_CNTL2__UTC_SPARE_MASK 0x000000FFL
++#define RMI_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
++#define RMI_UTCL1_CNTL2__LINE_VALID_MASK 0x00000400L
++#define RMI_UTCL1_CNTL2__DIS_EDC_MASK 0x00000800L
++#define RMI_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
++#define RMI_UTCL1_CNTL2__SHOOTDOWN_OPT_MASK 0x00002000L
++#define RMI_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
++#define RMI_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
++#define RMI_UTCL1_CNTL2__UTCL1_ARB_BURST_MODE_MASK 0x00030000L
++#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_RD_WR_MASK 0x00040000L
++#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_RD_WR_MASK 0x00080000L
++#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_VMID_MASK 0x00100000L
++#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_VMID_MASK 0x01E00000L
++#define RMI_UTCL1_CNTL2__UTCL1_DIS_DUAL_L2_REQ_MASK 0x02000000L
++#define RMI_UTCL1_CNTL2__UTCL1_FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
++//RMI_UTC_UNIT_CONFIG
++//RMI_TCIW_FORMATTER0_CNTL
++#define RMI_TCIW_FORMATTER0_CNTL__WR_COMBINE0_DIS_OVERRIDE__SHIFT 0x0
++#define RMI_TCIW_FORMATTER0_CNTL__WR_COMBINE0_TIME_OUT_WINDOW__SHIFT 0x1
++#define RMI_TCIW_FORMATTER0_CNTL__TCIW0_MAX_ALLOWED_INFLIGHT_REQ__SHIFT 0x9
++#define RMI_TCIW_FORMATTER0_CNTL__SKID_FIFO_0_FREE_SPACE_DELTA__SHIFT 0x13
++#define RMI_TCIW_FORMATTER0_CNTL__SKID_FIFO_0_FREE_SPACE_DELTA_UPDATE__SHIFT 0x1b
++#define RMI_TCIW_FORMATTER0_CNTL__TCIW0_REQ_SAFE_MODE__SHIFT 0x1c
++#define RMI_TCIW_FORMATTER0_CNTL__RMI_IN0_REORDER_DIS__SHIFT 0x1d
++#define RMI_TCIW_FORMATTER0_CNTL__WR_COMBINE0_DIS_AT_LAST_OF_BURST__SHIFT 0x1e
++#define RMI_TCIW_FORMATTER0_CNTL__ALL_FAULT_RET0_DATA__SHIFT 0x1f
++#define RMI_TCIW_FORMATTER0_CNTL__WR_COMBINE0_DIS_OVERRIDE_MASK 0x00000001L
++#define RMI_TCIW_FORMATTER0_CNTL__WR_COMBINE0_TIME_OUT_WINDOW_MASK 0x000001FEL
++#define RMI_TCIW_FORMATTER0_CNTL__TCIW0_MAX_ALLOWED_INFLIGHT_REQ_MASK 0x0007FE00L
++#define RMI_TCIW_FORMATTER0_CNTL__SKID_FIFO_0_FREE_SPACE_DELTA_MASK 0x07F80000L
++#define RMI_TCIW_FORMATTER0_CNTL__SKID_FIFO_0_FREE_SPACE_DELTA_UPDATE_MASK 0x08000000L
++#define RMI_TCIW_FORMATTER0_CNTL__TCIW0_REQ_SAFE_MODE_MASK 0x10000000L
++#define RMI_TCIW_FORMATTER0_CNTL__RMI_IN0_REORDER_DIS_MASK 0x20000000L
++#define RMI_TCIW_FORMATTER0_CNTL__WR_COMBINE0_DIS_AT_LAST_OF_BURST_MASK 0x40000000L
++#define RMI_TCIW_FORMATTER0_CNTL__ALL_FAULT_RET0_DATA_MASK 0x80000000L
++//RMI_TCIW_FORMATTER1_CNTL
++#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_OVERRIDE__SHIFT 0x0
++#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_TIME_OUT_WINDOW__SHIFT 0x1
++#define RMI_TCIW_FORMATTER1_CNTL__TCIW1_MAX_ALLOWED_INFLIGHT_REQ__SHIFT 0x9
++#define RMI_TCIW_FORMATTER1_CNTL__SKID_FIFO_1_FREE_SPACE_DELTA__SHIFT 0x13
++#define RMI_TCIW_FORMATTER1_CNTL__SKID_FIFO_1_FREE_SPACE_DELTA_UPDATE__SHIFT 0x1b
++#define RMI_TCIW_FORMATTER1_CNTL__TCIW1_REQ_SAFE_MODE__SHIFT 0x1c
++#define RMI_TCIW_FORMATTER1_CNTL__RMI_IN1_REORDER_DIS__SHIFT 0x1d
++#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_AT_LAST_OF_BURST__SHIFT 0x1e
++#define RMI_TCIW_FORMATTER1_CNTL__ALL_FAULT_RET1_DATA__SHIFT 0x1f
++#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_OVERRIDE_MASK 0x00000001L
++#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_TIME_OUT_WINDOW_MASK 0x000001FEL
++#define RMI_TCIW_FORMATTER1_CNTL__TCIW1_MAX_ALLOWED_INFLIGHT_REQ_MASK 0x0007FE00L
++#define RMI_TCIW_FORMATTER1_CNTL__SKID_FIFO_1_FREE_SPACE_DELTA_MASK 0x07F80000L
++#define RMI_TCIW_FORMATTER1_CNTL__SKID_FIFO_1_FREE_SPACE_DELTA_UPDATE_MASK 0x08000000L
++#define RMI_TCIW_FORMATTER1_CNTL__TCIW1_REQ_SAFE_MODE_MASK 0x10000000L
++#define RMI_TCIW_FORMATTER1_CNTL__RMI_IN1_REORDER_DIS_MASK 0x20000000L
++#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_AT_LAST_OF_BURST_MASK 0x40000000L
++#define RMI_TCIW_FORMATTER1_CNTL__ALL_FAULT_RET1_DATA_MASK 0x80000000L
++//RMI_SCOREBOARD_CNTL
++#define RMI_SCOREBOARD_CNTL__COMPLETE_RB0_FLUSH__SHIFT 0x0
++#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB0__SHIFT 0x1
++#define RMI_SCOREBOARD_CNTL__COMPLETE_RB1_FLUSH__SHIFT 0x2
++#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB1__SHIFT 0x3
++#define RMI_SCOREBOARD_CNTL__TIME_STAMP_FLUSH_RB1__SHIFT 0x4
++#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_EN__SHIFT 0x5
++#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_VALUE__SHIFT 0x6
++#define RMI_SCOREBOARD_CNTL__TIME_STAMP_FLUSH_RB0__SHIFT 0x7
++#define RMI_SCOREBOARD_CNTL__FORCE_VMID_INVAL_DONE_EN__SHIFT 0x8
++#define RMI_SCOREBOARD_CNTL__FORCE_VMID_INVAL_DONE_TIMER_START_VALUE__SHIFT 0x9
++#define RMI_SCOREBOARD_CNTL__COMPLETE_RB0_FLUSH_MASK 0x00000001L
++#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB0_MASK 0x00000002L
++#define RMI_SCOREBOARD_CNTL__COMPLETE_RB1_FLUSH_MASK 0x00000004L
++#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB1_MASK 0x00000008L
++#define RMI_SCOREBOARD_CNTL__TIME_STAMP_FLUSH_RB1_MASK 0x00000010L
++#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_EN_MASK 0x00000020L
++#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_VALUE_MASK 0x00000040L
++#define RMI_SCOREBOARD_CNTL__TIME_STAMP_FLUSH_RB0_MASK 0x00000080L
++#define RMI_SCOREBOARD_CNTL__FORCE_VMID_INVAL_DONE_EN_MASK 0x00000100L
++#define RMI_SCOREBOARD_CNTL__FORCE_VMID_INVAL_DONE_TIMER_START_VALUE_MASK 0x001FFE00L
++//RMI_SCOREBOARD_STATUS0
++#define RMI_SCOREBOARD_STATUS0__CURRENT_SESSION_ID__SHIFT 0x0
++#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_IN_PROG__SHIFT 0x1
++#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_REQ_VMID__SHIFT 0x2
++#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_UTC_DONE__SHIFT 0x12
++#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_DONE__SHIFT 0x13
++#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_FLUSH_TYPE__SHIFT 0x14
++#define RMI_SCOREBOARD_STATUS0__FORCE_VMID_INV_DONE__SHIFT 0x15
++#define RMI_SCOREBOARD_STATUS0__CURRENT_SESSION_ID_MASK 0x00000001L
++#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_IN_PROG_MASK 0x00000002L
++#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_REQ_VMID_MASK 0x0003FFFCL
++#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_UTC_DONE_MASK 0x00040000L
++#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_DONE_MASK 0x00080000L
++#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_FLUSH_TYPE_MASK 0x00100000L
++#define RMI_SCOREBOARD_STATUS0__FORCE_VMID_INV_DONE_MASK 0x00200000L
++//RMI_SCOREBOARD_STATUS1
++#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB0__SHIFT 0x0
++#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB0__SHIFT 0xc
++#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB0__SHIFT 0xd
++#define RMI_SCOREBOARD_STATUS1__MULTI_VMID_INVAL_FROM_CP_DETECTED__SHIFT 0xe
++#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB1__SHIFT 0xf
++#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB1__SHIFT 0x1b
++#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB1__SHIFT 0x1c
++#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB1__SHIFT 0x1d
++#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB0__SHIFT 0x1e
++#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB0_MASK 0x00000FFFL
++#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB0_MASK 0x00001000L
++#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB0_MASK 0x00002000L
++#define RMI_SCOREBOARD_STATUS1__MULTI_VMID_INVAL_FROM_CP_DETECTED_MASK 0x00004000L
++#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB1_MASK 0x07FF8000L
++#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB1_MASK 0x08000000L
++#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB1_MASK 0x10000000L
++#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB1_MASK 0x20000000L
++#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB0_MASK 0x40000000L
++//RMI_SCOREBOARD_STATUS2
++#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB0__SHIFT 0x0
++#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB0__SHIFT 0xc
++#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB1__SHIFT 0xd
++#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB1__SHIFT 0x19
++#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB1__SHIFT 0x1a
++#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB0__SHIFT 0x1b
++#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB0__SHIFT 0x1c
++#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB1__SHIFT 0x1d
++#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB0__SHIFT 0x1e
++#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB1__SHIFT 0x1f
++#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB0_MASK 0x00000FFFL
++#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB0_MASK 0x00001000L
++#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB1_MASK 0x01FFE000L
++#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB1_MASK 0x02000000L
++#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB1_MASK 0x04000000L
++#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB0_MASK 0x08000000L
++#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB0_MASK 0x10000000L
++#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB1_MASK 0x20000000L
++#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB0_MASK 0x40000000L
++#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB1_MASK 0x80000000L
++//RMI_XBAR_ARBITER_CONFIG
++#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_MODE__SHIFT 0x0
++#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_WEIGHTEDRR__SHIFT 0x2
++#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL__SHIFT 0x3
++#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_IDLEIN__SHIFT 0x4
++#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_OVERRIDE__SHIFT 0x6
++#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_START_VALUE__SHIFT 0x8
++#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_MODE__SHIFT 0x10
++#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_WEIGHTEDRR__SHIFT 0x12
++#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL__SHIFT 0x13
++#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_IDLEIN__SHIFT 0x14
++#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_OVERRIDE__SHIFT 0x16
++#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_START_VALUE__SHIFT 0x18
++#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_MODE_MASK 0x00000003L
++#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_WEIGHTEDRR_MASK 0x00000004L
++#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_MASK 0x00000008L
++#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_IDLEIN_MASK 0x00000010L
++#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_OVERRIDE_MASK 0x000000C0L
++#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_START_VALUE_MASK 0x0000FF00L
++#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_MODE_MASK 0x00030000L
++#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_WEIGHTEDRR_MASK 0x00040000L
++#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_MASK 0x00080000L
++#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_IDLEIN_MASK 0x00100000L
++#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_OVERRIDE_MASK 0x00C00000L
++#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_START_VALUE_MASK 0xFF000000L
++//RMI_XBAR_ARBITER_CONFIG_1
++#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_RD__SHIFT 0x0
++#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_WR__SHIFT 0x8
++#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB1_RD__SHIFT 0x10
++#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB1_WR__SHIFT 0x18
++#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_RD_MASK 0x000000FFL
++#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_WR_MASK 0x0000FF00L
++#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB1_RD_MASK 0x00FF0000L
++#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB1_WR_MASK 0xFF000000L
++//RMI_CLOCK_CNTRL
++#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_BUSY_MASK__SHIFT 0x0
++#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_BUSY_MASK__SHIFT 0x5
++#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_WAKEUP_MASK__SHIFT 0xa
++#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_WAKEUP_MASK__SHIFT 0xf
++#define RMI_CLOCK_CNTRL__DYN_CLK_RB1_BUSY_MASK__SHIFT 0x14
++#define RMI_CLOCK_CNTRL__DYN_CLK_RB1_WAKEUP_MASK__SHIFT 0x19
++#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_BUSY_MASK_MASK 0x0000001FL
++#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_BUSY_MASK_MASK 0x000003E0L
++#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_WAKEUP_MASK_MASK 0x00007C00L
++#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_WAKEUP_MASK_MASK 0x000F8000L
++#define RMI_CLOCK_CNTRL__DYN_CLK_RB1_BUSY_MASK_MASK 0x01F00000L
++#define RMI_CLOCK_CNTRL__DYN_CLK_RB1_WAKEUP_MASK_MASK 0x3E000000L
++//RMI_UTCL1_STATUS
++#define RMI_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
++#define RMI_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
++#define RMI_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
++#define RMI_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
++#define RMI_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
++#define RMI_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
++//RMI_SPARE
++#define RMI_SPARE__RMI_ARBITER_STALL_TIMER_ENABLED_ALLOW_STREAMING__SHIFT 0x0
++#define RMI_SPARE__SPARE_BIT_1__SHIFT 0x1
++#define RMI_SPARE__SPARE_BIT_2__SHIFT 0x2
++#define RMI_SPARE__SPARE_BIT_3__SHIFT 0x3
++#define RMI_SPARE__SPARE_BIT_4__SHIFT 0x4
++#define RMI_SPARE__SPARE_BIT_5__SHIFT 0x5
++#define RMI_SPARE__SPARE_BIT_6__SHIFT 0x6
++#define RMI_SPARE__SPARE_BIT_7__SHIFT 0x7
++#define RMI_SPARE__SPARE_BIT_8_0__SHIFT 0x8
++#define RMI_SPARE__SPARE_BIT_16_0__SHIFT 0x10
++#define RMI_SPARE__RMI_ARBITER_STALL_TIMER_ENABLED_ALLOW_STREAMING_MASK 0x00000001L
++#define RMI_SPARE__SPARE_BIT_1_MASK 0x00000002L
++#define RMI_SPARE__SPARE_BIT_2_MASK 0x00000004L
++#define RMI_SPARE__SPARE_BIT_3_MASK 0x00000008L
++#define RMI_SPARE__SPARE_BIT_4_MASK 0x00000010L
++#define RMI_SPARE__SPARE_BIT_5_MASK 0x00000020L
++#define RMI_SPARE__SPARE_BIT_6_MASK 0x00000040L
++#define RMI_SPARE__SPARE_BIT_7_MASK 0x00000080L
++#define RMI_SPARE__SPARE_BIT_8_0_MASK 0x0000FF00L
++#define RMI_SPARE__SPARE_BIT_16_0_MASK 0xFFFF0000L
++//RMI_SPARE_1
++#define RMI_SPARE_1__SPARE_BIT_8__SHIFT 0x0
++#define RMI_SPARE_1__SPARE_BIT_9__SHIFT 0x1
++#define RMI_SPARE_1__SPARE_BIT_10__SHIFT 0x2
++#define RMI_SPARE_1__SPARE_BIT_11__SHIFT 0x3
++#define RMI_SPARE_1__SPARE_BIT_12__SHIFT 0x4
++#define RMI_SPARE_1__SPARE_BIT_13__SHIFT 0x5
++#define RMI_SPARE_1__SPARE_BIT_14__SHIFT 0x6
++#define RMI_SPARE_1__SPARE_BIT_15__SHIFT 0x7
++#define RMI_SPARE_1__SPARE_BIT_8_1__SHIFT 0x8
++#define RMI_SPARE_1__SPARE_BIT_16_1__SHIFT 0x10
++#define RMI_SPARE_1__SPARE_BIT_8_MASK 0x00000001L
++#define RMI_SPARE_1__SPARE_BIT_9_MASK 0x00000002L
++#define RMI_SPARE_1__SPARE_BIT_10_MASK 0x00000004L
++#define RMI_SPARE_1__SPARE_BIT_11_MASK 0x00000008L
++#define RMI_SPARE_1__SPARE_BIT_12_MASK 0x00000010L
++#define RMI_SPARE_1__SPARE_BIT_13_MASK 0x00000020L
++#define RMI_SPARE_1__SPARE_BIT_14_MASK 0x00000040L
++#define RMI_SPARE_1__SPARE_BIT_15_MASK 0x00000080L
++#define RMI_SPARE_1__SPARE_BIT_8_1_MASK 0x0000FF00L
++#define RMI_SPARE_1__SPARE_BIT_16_1_MASK 0xFFFF0000L
++//RMI_SPARE_2
++#define RMI_SPARE_2__SPARE_BIT_16__SHIFT 0x0
++#define RMI_SPARE_2__SPARE_BIT_17__SHIFT 0x1
++#define RMI_SPARE_2__SPARE_BIT_18__SHIFT 0x2
++#define RMI_SPARE_2__SPARE_BIT_19__SHIFT 0x3
++#define RMI_SPARE_2__SPARE_BIT_20__SHIFT 0x4
++#define RMI_SPARE_2__SPARE_BIT_21__SHIFT 0x5
++#define RMI_SPARE_2__SPARE_BIT_22__SHIFT 0x6
++#define RMI_SPARE_2__SPARE_BIT_23__SHIFT 0x7
++#define RMI_SPARE_2__SPARE_BIT_4_0__SHIFT 0x8
++#define RMI_SPARE_2__SPARE_BIT_4_1__SHIFT 0xc
++#define RMI_SPARE_2__SPARE_BIT_8_2__SHIFT 0x10
++#define RMI_SPARE_2__SPARE_BIT_8_3__SHIFT 0x18
++#define RMI_SPARE_2__SPARE_BIT_16_MASK 0x00000001L
++#define RMI_SPARE_2__SPARE_BIT_17_MASK 0x00000002L
++#define RMI_SPARE_2__SPARE_BIT_18_MASK 0x00000004L
++#define RMI_SPARE_2__SPARE_BIT_19_MASK 0x00000008L
++#define RMI_SPARE_2__SPARE_BIT_20_MASK 0x00000010L
++#define RMI_SPARE_2__SPARE_BIT_21_MASK 0x00000020L
++#define RMI_SPARE_2__SPARE_BIT_22_MASK 0x00000040L
++#define RMI_SPARE_2__SPARE_BIT_23_MASK 0x00000080L
++#define RMI_SPARE_2__SPARE_BIT_4_0_MASK 0x00000F00L
++#define RMI_SPARE_2__SPARE_BIT_4_1_MASK 0x0000F000L
++#define RMI_SPARE_2__SPARE_BIT_8_2_MASK 0x00FF0000L
++#define RMI_SPARE_2__SPARE_BIT_8_3_MASK 0xFF000000L
++
++
++// addressBlock: gc_dbgu_gfx_dbgudec
++//port_a_addr
++#define port_a_addr__Index__SHIFT 0x0
++#define port_a_addr__Reserved__SHIFT 0x8
++#define port_a_addr__ReadEnable__SHIFT 0x1f
++#define port_a_addr__Index_MASK 0x000000FFL
++#define port_a_addr__Reserved_MASK 0x7FFFFF00L
++#define port_a_addr__ReadEnable_MASK 0x80000000L
++//port_a_data_lo
++#define port_a_data_lo__Data__SHIFT 0x0
++#define port_a_data_lo__Data_MASK 0xFFFFFFFFL
++//port_a_data_hi
++#define port_a_data_hi__Data__SHIFT 0x0
++#define port_a_data_hi__Data_MASK 0xFFFFFFFFL
++//port_b_addr
++#define port_b_addr__Index__SHIFT 0x0
++#define port_b_addr__Reserved__SHIFT 0x8
++#define port_b_addr__ReadEnable__SHIFT 0x1f
++#define port_b_addr__Index_MASK 0x000000FFL
++#define port_b_addr__Reserved_MASK 0x7FFFFF00L
++#define port_b_addr__ReadEnable_MASK 0x80000000L
++//port_b_data_lo
++#define port_b_data_lo__Data__SHIFT 0x0
++#define port_b_data_lo__Data_MASK 0xFFFFFFFFL
++//port_b_data_hi
++#define port_b_data_hi__Data__SHIFT 0x0
++#define port_b_data_hi__Data_MASK 0xFFFFFFFFL
++//port_c_addr
++#define port_c_addr__Index__SHIFT 0x0
++#define port_c_addr__Reserved__SHIFT 0x8
++#define port_c_addr__ReadEnable__SHIFT 0x1f
++#define port_c_addr__Index_MASK 0x000000FFL
++#define port_c_addr__Reserved_MASK 0x7FFFFF00L
++#define port_c_addr__ReadEnable_MASK 0x80000000L
++//port_c_data_lo
++#define port_c_data_lo__Data__SHIFT 0x0
++#define port_c_data_lo__Data_MASK 0xFFFFFFFFL
++//port_c_data_hi
++#define port_c_data_hi__Data__SHIFT 0x0
++#define port_c_data_hi__Data_MASK 0xFFFFFFFFL
++//port_d_addr
++#define port_d_addr__Index__SHIFT 0x0
++#define port_d_addr__Reserved__SHIFT 0x8
++#define port_d_addr__ReadEnable__SHIFT 0x1f
++#define port_d_addr__Index_MASK 0x000000FFL
++#define port_d_addr__Reserved_MASK 0x7FFFFF00L
++#define port_d_addr__ReadEnable_MASK 0x80000000L
++//port_d_data_lo
++#define port_d_data_lo__Data__SHIFT 0x0
++#define port_d_data_lo__Data_MASK 0xFFFFFFFFL
++//port_d_data_hi
++#define port_d_data_hi__Data__SHIFT 0x0
++#define port_d_data_hi__Data_MASK 0xFFFFFFFFL
++
++
++// addressBlock: gc_utcl2_atcl2dec
++//ATC_L2_CNTL
++#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS__SHIFT 0x0
++#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS__SHIFT 0x3
++#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD__SHIFT 0x6
++#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD__SHIFT 0x7
++#define ATC_L2_CNTL__CACHE_INVALIDATE_MODE__SHIFT 0x8
++#define ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0xb
++#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS_MASK 0x00000003L
++#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS_MASK 0x00000018L
++#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD_MASK 0x00000040L
++#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD_MASK 0x00000080L
++#define ATC_L2_CNTL__CACHE_INVALIDATE_MODE_MASK 0x00000700L
++#define ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00000800L
++//ATC_L2_CNTL2
++#define ATC_L2_CNTL2__BANK_SELECT__SHIFT 0x0
++#define ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE__SHIFT 0x6
++#define ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x8
++#define ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS__SHIFT 0x9
++#define ATC_L2_CNTL2__L2_CACHE_VMID_MODE__SHIFT 0xc
++#define ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0xf
++#define ATC_L2_CNTL2__BANK_SELECT_MASK 0x0000003FL
++#define ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE_MASK 0x000000C0L
++#define ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000100L
++#define ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS_MASK 0x00000E00L
++#define ATC_L2_CNTL2__L2_CACHE_VMID_MODE_MASK 0x00007000L
++#define ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x001F8000L
++//ATC_L2_CACHE_DATA0
++#define ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID__SHIFT 0x0
++#define ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID__SHIFT 0x1
++#define ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES__SHIFT 0x2
++#define ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH__SHIFT 0x17
++#define ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID_MASK 0x00000001L
++#define ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID_MASK 0x00000002L
++#define ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES_MASK 0x007FFFFCL
++#define ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH_MASK 0x07800000L
++//ATC_L2_CACHE_DATA1
++#define ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW__SHIFT 0x0
++#define ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW_MASK 0xFFFFFFFFL
++//ATC_L2_CACHE_DATA2
++#define ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS__SHIFT 0x0
++#define ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS_MASK 0xFFFFFFFFL
++//ATC_L2_CNTL3
++#define ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST__SHIFT 0x0
++#define ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1__SHIFT 0x3
++#define ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST_MASK 0x00000007L
++#define ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1_MASK 0x000001F8L
++//ATC_L2_STATUS
++#define ATC_L2_STATUS__BUSY__SHIFT 0x0
++#define ATC_L2_STATUS__PARITY_ERROR_INFO__SHIFT 0x1
++#define ATC_L2_STATUS__BUSY_MASK 0x00000001L
++#define ATC_L2_STATUS__PARITY_ERROR_INFO_MASK 0x3FFFFFFEL
++//ATC_L2_STATUS2
++#define ATC_L2_STATUS2__IFIFO_NON_FATAL_PARITY_ERROR_INFO__SHIFT 0x0
++#define ATC_L2_STATUS2__IFIFO_FATAL_PARITY_ERROR_INFO__SHIFT 0x8
++#define ATC_L2_STATUS2__IFIFO_NON_FATAL_PARITY_ERROR_INFO_MASK 0x000000FFL
++#define ATC_L2_STATUS2__IFIFO_FATAL_PARITY_ERROR_INFO_MASK 0x0000FF00L
++//ATC_L2_MISC_CG
++#define ATC_L2_MISC_CG__OFFDLY__SHIFT 0x6
++#define ATC_L2_MISC_CG__ENABLE__SHIFT 0x12
++#define ATC_L2_MISC_CG__MEM_LS_ENABLE__SHIFT 0x13
++#define ATC_L2_MISC_CG__OFFDLY_MASK 0x00000FC0L
++#define ATC_L2_MISC_CG__ENABLE_MASK 0x00040000L
++#define ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK 0x00080000L
++//ATC_L2_MEM_POWER_LS
++#define ATC_L2_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
++#define ATC_L2_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
++#define ATC_L2_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
++#define ATC_L2_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L
++//ATC_L2_CGTT_CLK_CTRL
++#define ATC_L2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
++#define ATC_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
++#define ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
++#define ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
++#define ATC_L2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define ATC_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
++#define ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
++#define ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
++
++
++// addressBlock: gc_utcl2_vml2pfdec
++//VM_L2_CNTL
++#define VM_L2_CNTL__ENABLE_L2_CACHE__SHIFT 0x0
++#define VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING__SHIFT 0x1
++#define VM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE__SHIFT 0x2
++#define VM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE__SHIFT 0x4
++#define VM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE__SHIFT 0x8
++#define VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x9
++#define VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0xa
++#define VM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0xb
++#define VM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE__SHIFT 0xc
++#define VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT 0xf
++#define VM_L2_CNTL__PDE_FAULT_CLASSIFICATION__SHIFT 0x12
++#define VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT 0x13
++#define VM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE__SHIFT 0x15
++#define VM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE__SHIFT 0x1a
++#define VM_L2_CNTL__ENABLE_L2_CACHE_MASK 0x00000001L
++#define VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK 0x00000002L
++#define VM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE_MASK 0x0000000CL
++#define VM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE_MASK 0x00000030L
++#define VM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE_MASK 0x00000100L
++#define VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000200L
++#define VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000400L
++#define VM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00000800L
++#define VM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE_MASK 0x00007000L
++#define VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE_MASK 0x00038000L
++#define VM_L2_CNTL__PDE_FAULT_CLASSIFICATION_MASK 0x00040000L
++#define VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE_MASK 0x00180000L
++#define VM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE_MASK 0x03E00000L
++#define VM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE_MASK 0x0C000000L
++//VM_L2_CNTL2
++#define VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS__SHIFT 0x0
++#define VM_L2_CNTL2__INVALIDATE_L2_CACHE__SHIFT 0x1
++#define VM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN__SHIFT 0x15
++#define VM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION__SHIFT 0x16
++#define VM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE__SHIFT 0x17
++#define VM_L2_CNTL2__INVALIDATE_CACHE_MODE__SHIFT 0x1a
++#define VM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE__SHIFT 0x1c
++#define VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK 0x00000001L
++#define VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK 0x00000002L
++#define VM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN_MASK 0x00200000L
++#define VM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION_MASK 0x00400000L
++#define VM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE_MASK 0x03800000L
++#define VM_L2_CNTL2__INVALIDATE_CACHE_MODE_MASK 0x0C000000L
++#define VM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE_MASK 0x70000000L
++//VM_L2_CNTL3
++#define VM_L2_CNTL3__BANK_SELECT__SHIFT 0x0
++#define VM_L2_CNTL3__L2_CACHE_UPDATE_MODE__SHIFT 0x6
++#define VM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0x8
++#define VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0xf
++#define VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY__SHIFT 0x14
++#define VM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE__SHIFT 0x15
++#define VM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE__SHIFT 0x18
++#define VM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS__SHIFT 0x1c
++#define VM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS__SHIFT 0x1d
++#define VM_L2_CNTL3__PDE_CACHE_FORCE_MISS__SHIFT 0x1e
++#define VM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY__SHIFT 0x1f
++#define VM_L2_CNTL3__BANK_SELECT_MASK 0x0000003FL
++#define VM_L2_CNTL3__L2_CACHE_UPDATE_MODE_MASK 0x000000C0L
++#define VM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x00001F00L
++#define VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000F8000L
++#define VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK 0x00100000L
++#define VM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE_MASK 0x00E00000L
++#define VM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE_MASK 0x0F000000L
++#define VM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS_MASK 0x10000000L
++#define VM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS_MASK 0x20000000L
++#define VM_L2_CNTL3__PDE_CACHE_FORCE_MISS_MASK 0x40000000L
++#define VM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY_MASK 0x80000000L
++//VM_L2_STATUS
++#define VM_L2_STATUS__L2_BUSY__SHIFT 0x0
++#define VM_L2_STATUS__CONTEXT_DOMAIN_BUSY__SHIFT 0x1
++#define VM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS__SHIFT 0x11
++#define VM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS__SHIFT 0x12
++#define VM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS__SHIFT 0x13
++#define VM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS__SHIFT 0x14
++#define VM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS__SHIFT 0x15
++#define VM_L2_STATUS__L2_BUSY_MASK 0x00000001L
++#define VM_L2_STATUS__CONTEXT_DOMAIN_BUSY_MASK 0x0001FFFEL
++#define VM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS_MASK 0x00020000L
++#define VM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS_MASK 0x00040000L
++#define VM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS_MASK 0x00080000L
++#define VM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS_MASK 0x00100000L
++#define VM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS_MASK 0x00200000L
++//VM_DUMMY_PAGE_FAULT_CNTL
++#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE__SHIFT 0x0
++#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL__SHIFT 0x1
++#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS__SHIFT 0x2
++#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE_MASK 0x00000001L
++#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL_MASK 0x00000002L
++#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS_MASK 0x000000FCL
++//VM_DUMMY_PAGE_FAULT_ADDR_LO32
++#define VM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32__SHIFT 0x0
++#define VM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
++//VM_DUMMY_PAGE_FAULT_ADDR_HI32
++#define VM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4__SHIFT 0x0
++#define VM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4_MASK 0x0000000FL
++//VM_L2_PROTECTION_FAULT_CNTL
++#define VM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x0
++#define VM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES__SHIFT 0x1
++#define VM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x2
++#define VM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x3
++#define VM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x4
++#define VM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x5
++#define VM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x6
++#define VM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x7
++#define VM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x8
++#define VM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x9
++#define VM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
++#define VM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xb
++#define VM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
++#define VM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0xd
++#define VM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x1d
++#define VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT__SHIFT 0x1e
++#define VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT__SHIFT 0x1f
++#define VM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00000001L
++#define VM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES_MASK 0x00000002L
++#define VM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000004L
++#define VM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000008L
++#define VM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000010L
++#define VM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000020L
++#define VM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000040L
++#define VM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000080L
++#define VM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000100L
++#define VM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000200L
++#define VM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
++#define VM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000800L
++#define VM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
++#define VM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x1FFFE000L
++#define VM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x20000000L
++#define VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT_MASK 0x40000000L
++#define VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT_MASK 0x80000000L
++//VM_L2_PROTECTION_FAULT_CNTL2
++#define VM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x0
++#define VM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x10
++#define VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE__SHIFT 0x11
++#define VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY__SHIFT 0x12
++#define VM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT__SHIFT 0x13
++#define VM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x0000FFFFL
++#define VM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x00010000L
++#define VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_MASK 0x00020000L
++#define VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY_MASK 0x00040000L
++#define VM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT_MASK 0x00080000L
++//VM_L2_PROTECTION_FAULT_MM_CNTL3
++#define VM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0
++#define VM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL
++//VM_L2_PROTECTION_FAULT_MM_CNTL4
++#define VM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0
++#define VM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL
++//VM_L2_PROTECTION_FAULT_STATUS
++#define VM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS__SHIFT 0x0
++#define VM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR__SHIFT 0x1
++#define VM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS__SHIFT 0x4
++#define VM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR__SHIFT 0x8
++#define VM_L2_PROTECTION_FAULT_STATUS__CID__SHIFT 0x9
++#define VM_L2_PROTECTION_FAULT_STATUS__RW__SHIFT 0x12
++#define VM_L2_PROTECTION_FAULT_STATUS__ATOMIC__SHIFT 0x13
++#define VM_L2_PROTECTION_FAULT_STATUS__VMID__SHIFT 0x14
++#define VM_L2_PROTECTION_FAULT_STATUS__VF__SHIFT 0x18
++#define VM_L2_PROTECTION_FAULT_STATUS__VFID__SHIFT 0x19
++#define VM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS_MASK 0x00000001L
++#define VM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR_MASK 0x0000000EL
++#define VM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS_MASK 0x000000F0L
++#define VM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR_MASK 0x00000100L
++#define VM_L2_PROTECTION_FAULT_STATUS__CID_MASK 0x0003FE00L
++#define VM_L2_PROTECTION_FAULT_STATUS__RW_MASK 0x00040000L
++#define VM_L2_PROTECTION_FAULT_STATUS__ATOMIC_MASK 0x00080000L
++#define VM_L2_PROTECTION_FAULT_STATUS__VMID_MASK 0x00F00000L
++#define VM_L2_PROTECTION_FAULT_STATUS__VF_MASK 0x01000000L
++#define VM_L2_PROTECTION_FAULT_STATUS__VFID_MASK 0x1E000000L
++//VM_L2_PROTECTION_FAULT_ADDR_LO32
++#define VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32__SHIFT 0x0
++#define VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
++//VM_L2_PROTECTION_FAULT_ADDR_HI32
++#define VM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4__SHIFT 0x0
++#define VM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4_MASK 0x0000000FL
++//VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32
++#define VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32__SHIFT 0x0
++#define VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
++//VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32
++#define VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4__SHIFT 0x0
++#define VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4_MASK 0x0000000FL
++//VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32
++#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32
++#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32
++#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32
++#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32
++#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32__SHIFT 0x0
++#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32_MASK 0xFFFFFFFFL
++//VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32
++#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4__SHIFT 0x0
++#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4_MASK 0x0000000FL
++//VM_L2_CNTL4
++#define VM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT__SHIFT 0x0
++#define VM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL__SHIFT 0x6
++#define VM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL__SHIFT 0x7
++#define VM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x8
++#define VM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x12
++#define VM_L2_CNTL4__BPM_CGCGLS_OVERRIDE__SHIFT 0x1c
++#define VM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT_MASK 0x0000003FL
++#define VM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL_MASK 0x00000040L
++#define VM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL_MASK 0x00000080L
++#define VM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0003FF00L
++#define VM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0FFC0000L
++#define VM_L2_CNTL4__BPM_CGCGLS_OVERRIDE_MASK 0x10000000L
++//VM_L2_MM_GROUP_RT_CLASSES
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS__SHIFT 0x0
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS__SHIFT 0x1
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS__SHIFT 0x2
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS__SHIFT 0x3
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS__SHIFT 0x4
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS__SHIFT 0x5
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS__SHIFT 0x6
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS__SHIFT 0x7
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS__SHIFT 0x8
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS__SHIFT 0x9
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS__SHIFT 0xa
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS__SHIFT 0xb
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS__SHIFT 0xc
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS__SHIFT 0xd
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS__SHIFT 0xe
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS__SHIFT 0xf
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS__SHIFT 0x10
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS__SHIFT 0x11
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS__SHIFT 0x12
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS__SHIFT 0x13
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS__SHIFT 0x14
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS__SHIFT 0x15
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS__SHIFT 0x16
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS__SHIFT 0x17
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS__SHIFT 0x18
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS__SHIFT 0x19
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS__SHIFT 0x1a
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS__SHIFT 0x1b
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS__SHIFT 0x1c
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS__SHIFT 0x1d
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS__SHIFT 0x1e
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS__SHIFT 0x1f
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS_MASK 0x00000001L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS_MASK 0x00000002L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS_MASK 0x00000004L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS_MASK 0x00000008L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS_MASK 0x00000010L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS_MASK 0x00000020L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS_MASK 0x00000040L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS_MASK 0x00000080L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS_MASK 0x00000100L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS_MASK 0x00000200L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS_MASK 0x00000400L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS_MASK 0x00000800L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS_MASK 0x00001000L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS_MASK 0x00002000L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS_MASK 0x00004000L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS_MASK 0x00008000L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS_MASK 0x00010000L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS_MASK 0x00020000L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS_MASK 0x00040000L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS_MASK 0x00080000L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS_MASK 0x00100000L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS_MASK 0x00200000L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS_MASK 0x00400000L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS_MASK 0x00800000L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS_MASK 0x01000000L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS_MASK 0x02000000L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS_MASK 0x04000000L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS_MASK 0x08000000L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS_MASK 0x10000000L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS_MASK 0x20000000L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS_MASK 0x40000000L
++#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS_MASK 0x80000000L
++//VM_L2_BANK_SELECT_RESERVED_CID
++#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID__SHIFT 0x0
++#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa
++#define VM_L2_BANK_SELECT_RESERVED_CID__ENABLE__SHIFT 0x14
++#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18
++#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19
++#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL
++#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L
++#define VM_L2_BANK_SELECT_RESERVED_CID__ENABLE_MASK 0x00100000L
++#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L
++#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L
++//VM_L2_BANK_SELECT_RESERVED_CID2
++#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID__SHIFT 0x0
++#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa
++#define VM_L2_BANK_SELECT_RESERVED_CID2__ENABLE__SHIFT 0x14
++#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18
++#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19
++#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL
++#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L
++#define VM_L2_BANK_SELECT_RESERVED_CID2__ENABLE_MASK 0x00100000L
++#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L
++#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L
++//VM_L2_CACHE_PARITY_CNTL
++#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES__SHIFT 0x0
++#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES__SHIFT 0x1
++#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES__SHIFT 0x2
++#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE__SHIFT 0x3
++#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE__SHIFT 0x4
++#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE__SHIFT 0x5
++#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK__SHIFT 0x6
++#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER__SHIFT 0x9
++#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC__SHIFT 0xc
++#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES_MASK 0x00000001L
++#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES_MASK 0x00000002L
++#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES_MASK 0x00000004L
++#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE_MASK 0x00000008L
++#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE_MASK 0x00000010L
++#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE_MASK 0x00000020L
++#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK_MASK 0x000001C0L
++#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER_MASK 0x00000E00L
++#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC_MASK 0x0000F000L
++//VM_L2_CGTT_CLK_CTRL
++#define VM_L2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
++#define VM_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
++#define VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
++#define VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
++#define VM_L2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define VM_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
++#define VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
++#define VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
++
++
++// addressBlock: gc_utcl2_vml2vcdec
++//VM_CONTEXT0_CNTL
++#define VM_CONTEXT0_CNTL__ENABLE_CONTEXT__SHIFT 0x0
++#define VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
++#define VM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
++#define VM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
++#define VM_CONTEXT0_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
++#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
++#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
++#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
++#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
++#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
++#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
++#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
++#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
++#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
++#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
++#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
++#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
++#define VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
++#define VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
++#define VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
++#define VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
++#define VM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
++#define VM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
++#define VM_CONTEXT0_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
++#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
++#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
++#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
++#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
++#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
++#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
++#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
++#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
++#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
++#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
++#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
++#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
++#define VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
++#define VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
++//VM_CONTEXT1_CNTL
++#define VM_CONTEXT1_CNTL__ENABLE_CONTEXT__SHIFT 0x0
++#define VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
++#define VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
++#define VM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
++#define VM_CONTEXT1_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
++#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
++#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
++#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
++#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
++#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
++#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
++#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
++#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
++#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
++#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
++#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
++#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
++#define VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
++#define VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
++#define VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
++#define VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
++#define VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
++#define VM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
++#define VM_CONTEXT1_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
++#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
++#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
++#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
++#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
++#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
++#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
++#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
++#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
++#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
++#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
++#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
++#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
++#define VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
++#define VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
++//VM_CONTEXT2_CNTL
++#define VM_CONTEXT2_CNTL__ENABLE_CONTEXT__SHIFT 0x0
++#define VM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
++#define VM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
++#define VM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
++#define VM_CONTEXT2_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
++#define VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
++#define VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
++#define VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
++#define VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
++#define VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
++#define VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
++#define VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
++#define VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
++#define VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
++#define VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
++#define VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
++#define VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
++#define VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
++#define VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
++#define VM_CONTEXT2_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
++#define VM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
++#define VM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
++#define VM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
++#define VM_CONTEXT2_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
++#define VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
++#define VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
++#define VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
++#define VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
++#define VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
++#define VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
++#define VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
++#define VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
++#define VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
++#define VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
++#define VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
++#define VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
++#define VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
++#define VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
++//VM_CONTEXT3_CNTL
++#define VM_CONTEXT3_CNTL__ENABLE_CONTEXT__SHIFT 0x0
++#define VM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
++#define VM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
++#define VM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
++#define VM_CONTEXT3_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
++#define VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
++#define VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
++#define VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
++#define VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
++#define VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
++#define VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
++#define VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
++#define VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
++#define VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
++#define VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
++#define VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
++#define VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
++#define VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
++#define VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
++#define VM_CONTEXT3_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
++#define VM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
++#define VM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
++#define VM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
++#define VM_CONTEXT3_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
++#define VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
++#define VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
++#define VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
++#define VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
++#define VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
++#define VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
++#define VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
++#define VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
++#define VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
++#define VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
++#define VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
++#define VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
++#define VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
++#define VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
++//VM_CONTEXT4_CNTL
++#define VM_CONTEXT4_CNTL__ENABLE_CONTEXT__SHIFT 0x0
++#define VM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
++#define VM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
++#define VM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
++#define VM_CONTEXT4_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
++#define VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
++#define VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
++#define VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
++#define VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
++#define VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
++#define VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
++#define VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
++#define VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
++#define VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
++#define VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
++#define VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
++#define VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
++#define VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
++#define VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
++#define VM_CONTEXT4_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
++#define VM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
++#define VM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
++#define VM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
++#define VM_CONTEXT4_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
++#define VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
++#define VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
++#define VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
++#define VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
++#define VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
++#define VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
++#define VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
++#define VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
++#define VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
++#define VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
++#define VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
++#define VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
++#define VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
++#define VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
++//VM_CONTEXT5_CNTL
++#define VM_CONTEXT5_CNTL__ENABLE_CONTEXT__SHIFT 0x0
++#define VM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
++#define VM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
++#define VM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
++#define VM_CONTEXT5_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
++#define VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
++#define VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
++#define VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
++#define VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
++#define VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
++#define VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
++#define VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
++#define VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
++#define VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
++#define VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
++#define VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
++#define VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
++#define VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
++#define VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
++#define VM_CONTEXT5_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
++#define VM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
++#define VM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
++#define VM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
++#define VM_CONTEXT5_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
++#define VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
++#define VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
++#define VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
++#define VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
++#define VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
++#define VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
++#define VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
++#define VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
++#define VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
++#define VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
++#define VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
++#define VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
++#define VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
++#define VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
++//VM_CONTEXT6_CNTL
++#define VM_CONTEXT6_CNTL__ENABLE_CONTEXT__SHIFT 0x0
++#define VM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
++#define VM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
++#define VM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
++#define VM_CONTEXT6_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
++#define VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
++#define VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
++#define VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
++#define VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
++#define VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
++#define VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
++#define VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
++#define VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
++#define VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
++#define VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
++#define VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
++#define VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
++#define VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
++#define VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
++#define VM_CONTEXT6_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
++#define VM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
++#define VM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
++#define VM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
++#define VM_CONTEXT6_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
++#define VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
++#define VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
++#define VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
++#define VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
++#define VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
++#define VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
++#define VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
++#define VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
++#define VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
++#define VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
++#define VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
++#define VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
++#define VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
++#define VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
++//VM_CONTEXT7_CNTL
++#define VM_CONTEXT7_CNTL__ENABLE_CONTEXT__SHIFT 0x0
++#define VM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
++#define VM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
++#define VM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
++#define VM_CONTEXT7_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
++#define VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
++#define VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
++#define VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
++#define VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
++#define VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
++#define VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
++#define VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
++#define VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
++#define VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
++#define VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
++#define VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
++#define VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
++#define VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
++#define VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
++#define VM_CONTEXT7_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
++#define VM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
++#define VM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
++#define VM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
++#define VM_CONTEXT7_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
++#define VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
++#define VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
++#define VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
++#define VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
++#define VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
++#define VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
++#define VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
++#define VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
++#define VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
++#define VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
++#define VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
++#define VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
++#define VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
++#define VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
++//VM_CONTEXT8_CNTL
++#define VM_CONTEXT8_CNTL__ENABLE_CONTEXT__SHIFT 0x0
++#define VM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
++#define VM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
++#define VM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
++#define VM_CONTEXT8_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
++#define VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
++#define VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
++#define VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
++#define VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
++#define VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
++#define VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
++#define VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
++#define VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
++#define VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
++#define VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
++#define VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
++#define VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
++#define VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
++#define VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
++#define VM_CONTEXT8_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
++#define VM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
++#define VM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
++#define VM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
++#define VM_CONTEXT8_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
++#define VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
++#define VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
++#define VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
++#define VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
++#define VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
++#define VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
++#define VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
++#define VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
++#define VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
++#define VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
++#define VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
++#define VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
++#define VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
++#define VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
++//VM_CONTEXT9_CNTL
++#define VM_CONTEXT9_CNTL__ENABLE_CONTEXT__SHIFT 0x0
++#define VM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
++#define VM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
++#define VM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
++#define VM_CONTEXT9_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
++#define VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
++#define VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
++#define VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
++#define VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
++#define VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
++#define VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
++#define VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
++#define VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
++#define VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
++#define VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
++#define VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
++#define VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
++#define VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
++#define VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
++#define VM_CONTEXT9_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
++#define VM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
++#define VM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
++#define VM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
++#define VM_CONTEXT9_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
++#define VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
++#define VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
++#define VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
++#define VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
++#define VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
++#define VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
++#define VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
++#define VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
++#define VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
++#define VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
++#define VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
++#define VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
++#define VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
++#define VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
++//VM_CONTEXT10_CNTL
++#define VM_CONTEXT10_CNTL__ENABLE_CONTEXT__SHIFT 0x0
++#define VM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
++#define VM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
++#define VM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
++#define VM_CONTEXT10_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
++#define VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
++#define VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
++#define VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
++#define VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
++#define VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
++#define VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
++#define VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
++#define VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
++#define VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
++#define VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
++#define VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
++#define VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
++#define VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
++#define VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
++#define VM_CONTEXT10_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
++#define VM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
++#define VM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
++#define VM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
++#define VM_CONTEXT10_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
++#define VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
++#define VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
++#define VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
++#define VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
++#define VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
++#define VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
++#define VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
++#define VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
++#define VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
++#define VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
++#define VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
++#define VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
++#define VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
++#define VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
++//VM_CONTEXT11_CNTL
++#define VM_CONTEXT11_CNTL__ENABLE_CONTEXT__SHIFT 0x0
++#define VM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
++#define VM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
++#define VM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
++#define VM_CONTEXT11_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
++#define VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
++#define VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
++#define VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
++#define VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
++#define VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
++#define VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
++#define VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
++#define VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
++#define VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
++#define VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
++#define VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
++#define VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
++#define VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
++#define VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
++#define VM_CONTEXT11_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
++#define VM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
++#define VM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
++#define VM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
++#define VM_CONTEXT11_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
++#define VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
++#define VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
++#define VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
++#define VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
++#define VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
++#define VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
++#define VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
++#define VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
++#define VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
++#define VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
++#define VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
++#define VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
++#define VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
++#define VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
++//VM_CONTEXT12_CNTL
++#define VM_CONTEXT12_CNTL__ENABLE_CONTEXT__SHIFT 0x0
++#define VM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
++#define VM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
++#define VM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
++#define VM_CONTEXT12_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
++#define VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
++#define VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
++#define VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
++#define VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
++#define VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
++#define VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
++#define VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
++#define VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
++#define VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
++#define VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
++#define VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
++#define VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
++#define VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
++#define VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
++#define VM_CONTEXT12_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
++#define VM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
++#define VM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
++#define VM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
++#define VM_CONTEXT12_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
++#define VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
++#define VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
++#define VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
++#define VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
++#define VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
++#define VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
++#define VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
++#define VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
++#define VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
++#define VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
++#define VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
++#define VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
++#define VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
++#define VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
++//VM_CONTEXT13_CNTL
++#define VM_CONTEXT13_CNTL__ENABLE_CONTEXT__SHIFT 0x0
++#define VM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
++#define VM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
++#define VM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
++#define VM_CONTEXT13_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
++#define VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
++#define VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
++#define VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
++#define VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
++#define VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
++#define VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
++#define VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
++#define VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
++#define VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
++#define VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
++#define VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
++#define VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
++#define VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
++#define VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
++#define VM_CONTEXT13_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
++#define VM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
++#define VM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
++#define VM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
++#define VM_CONTEXT13_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
++#define VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
++#define VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
++#define VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
++#define VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
++#define VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
++#define VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
++#define VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
++#define VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
++#define VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
++#define VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
++#define VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
++#define VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
++#define VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
++#define VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
++//VM_CONTEXT14_CNTL
++#define VM_CONTEXT14_CNTL__ENABLE_CONTEXT__SHIFT 0x0
++#define VM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
++#define VM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
++#define VM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
++#define VM_CONTEXT14_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
++#define VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
++#define VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
++#define VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
++#define VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
++#define VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
++#define VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
++#define VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
++#define VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
++#define VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
++#define VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
++#define VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
++#define VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
++#define VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
++#define VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
++#define VM_CONTEXT14_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
++#define VM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
++#define VM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
++#define VM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
++#define VM_CONTEXT14_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
++#define VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
++#define VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
++#define VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
++#define VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
++#define VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
++#define VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
++#define VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
++#define VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
++#define VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
++#define VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
++#define VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
++#define VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
++#define VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
++#define VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
++//VM_CONTEXT15_CNTL
++#define VM_CONTEXT15_CNTL__ENABLE_CONTEXT__SHIFT 0x0
++#define VM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
++#define VM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
++#define VM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
++#define VM_CONTEXT15_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
++#define VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
++#define VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
++#define VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
++#define VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
++#define VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
++#define VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
++#define VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
++#define VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
++#define VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
++#define VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
++#define VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
++#define VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
++#define VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
++#define VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
++#define VM_CONTEXT15_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
++#define VM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
++#define VM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
++#define VM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
++#define VM_CONTEXT15_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
++#define VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
++#define VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
++#define VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
++#define VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
++#define VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
++#define VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
++#define VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
++#define VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
++#define VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
++#define VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
++#define VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
++#define VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
++#define VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
++#define VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
++//VM_CONTEXTS_DISABLE
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0__SHIFT 0x0
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1__SHIFT 0x1
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2__SHIFT 0x2
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3__SHIFT 0x3
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4__SHIFT 0x4
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5__SHIFT 0x5
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6__SHIFT 0x6
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7__SHIFT 0x7
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8__SHIFT 0x8
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9__SHIFT 0x9
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10__SHIFT 0xa
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11__SHIFT 0xb
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12__SHIFT 0xc
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13__SHIFT 0xd
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14__SHIFT 0xe
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15__SHIFT 0xf
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0_MASK 0x00000001L
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1_MASK 0x00000002L
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2_MASK 0x00000004L
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3_MASK 0x00000008L
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4_MASK 0x00000010L
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5_MASK 0x00000020L
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6_MASK 0x00000040L
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7_MASK 0x00000080L
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8_MASK 0x00000100L
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9_MASK 0x00000200L
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10_MASK 0x00000400L
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11_MASK 0x00000800L
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12_MASK 0x00001000L
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13_MASK 0x00002000L
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14_MASK 0x00004000L
++#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15_MASK 0x00008000L
++//VM_INVALIDATE_ENG0_SEM
++#define VM_INVALIDATE_ENG0_SEM__SEMAPHORE__SHIFT 0x0
++#define VM_INVALIDATE_ENG0_SEM__SEMAPHORE_MASK 0x00000001L
++//VM_INVALIDATE_ENG1_SEM
++#define VM_INVALIDATE_ENG1_SEM__SEMAPHORE__SHIFT 0x0
++#define VM_INVALIDATE_ENG1_SEM__SEMAPHORE_MASK 0x00000001L
++//VM_INVALIDATE_ENG2_SEM
++#define VM_INVALIDATE_ENG2_SEM__SEMAPHORE__SHIFT 0x0
++#define VM_INVALIDATE_ENG2_SEM__SEMAPHORE_MASK 0x00000001L
++//VM_INVALIDATE_ENG3_SEM
++#define VM_INVALIDATE_ENG3_SEM__SEMAPHORE__SHIFT 0x0
++#define VM_INVALIDATE_ENG3_SEM__SEMAPHORE_MASK 0x00000001L
++//VM_INVALIDATE_ENG4_SEM
++#define VM_INVALIDATE_ENG4_SEM__SEMAPHORE__SHIFT 0x0
++#define VM_INVALIDATE_ENG4_SEM__SEMAPHORE_MASK 0x00000001L
++//VM_INVALIDATE_ENG5_SEM
++#define VM_INVALIDATE_ENG5_SEM__SEMAPHORE__SHIFT 0x0
++#define VM_INVALIDATE_ENG5_SEM__SEMAPHORE_MASK 0x00000001L
++//VM_INVALIDATE_ENG6_SEM
++#define VM_INVALIDATE_ENG6_SEM__SEMAPHORE__SHIFT 0x0
++#define VM_INVALIDATE_ENG6_SEM__SEMAPHORE_MASK 0x00000001L
++//VM_INVALIDATE_ENG7_SEM
++#define VM_INVALIDATE_ENG7_SEM__SEMAPHORE__SHIFT 0x0
++#define VM_INVALIDATE_ENG7_SEM__SEMAPHORE_MASK 0x00000001L
++//VM_INVALIDATE_ENG8_SEM
++#define VM_INVALIDATE_ENG8_SEM__SEMAPHORE__SHIFT 0x0
++#define VM_INVALIDATE_ENG8_SEM__SEMAPHORE_MASK 0x00000001L
++//VM_INVALIDATE_ENG9_SEM
++#define VM_INVALIDATE_ENG9_SEM__SEMAPHORE__SHIFT 0x0
++#define VM_INVALIDATE_ENG9_SEM__SEMAPHORE_MASK 0x00000001L
++//VM_INVALIDATE_ENG10_SEM
++#define VM_INVALIDATE_ENG10_SEM__SEMAPHORE__SHIFT 0x0
++#define VM_INVALIDATE_ENG10_SEM__SEMAPHORE_MASK 0x00000001L
++//VM_INVALIDATE_ENG11_SEM
++#define VM_INVALIDATE_ENG11_SEM__SEMAPHORE__SHIFT 0x0
++#define VM_INVALIDATE_ENG11_SEM__SEMAPHORE_MASK 0x00000001L
++//VM_INVALIDATE_ENG12_SEM
++#define VM_INVALIDATE_ENG12_SEM__SEMAPHORE__SHIFT 0x0
++#define VM_INVALIDATE_ENG12_SEM__SEMAPHORE_MASK 0x00000001L
++//VM_INVALIDATE_ENG13_SEM
++#define VM_INVALIDATE_ENG13_SEM__SEMAPHORE__SHIFT 0x0
++#define VM_INVALIDATE_ENG13_SEM__SEMAPHORE_MASK 0x00000001L
++//VM_INVALIDATE_ENG14_SEM
++#define VM_INVALIDATE_ENG14_SEM__SEMAPHORE__SHIFT 0x0
++#define VM_INVALIDATE_ENG14_SEM__SEMAPHORE_MASK 0x00000001L
++//VM_INVALIDATE_ENG15_SEM
++#define VM_INVALIDATE_ENG15_SEM__SEMAPHORE__SHIFT 0x0
++#define VM_INVALIDATE_ENG15_SEM__SEMAPHORE_MASK 0x00000001L
++//VM_INVALIDATE_ENG16_SEM
++#define VM_INVALIDATE_ENG16_SEM__SEMAPHORE__SHIFT 0x0
++#define VM_INVALIDATE_ENG16_SEM__SEMAPHORE_MASK 0x00000001L
++//VM_INVALIDATE_ENG17_SEM
++#define VM_INVALIDATE_ENG17_SEM__SEMAPHORE__SHIFT 0x0
++#define VM_INVALIDATE_ENG17_SEM__SEMAPHORE_MASK 0x00000001L
++//VM_INVALIDATE_ENG0_REQ
++#define VM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
++#define VM_INVALIDATE_ENG0_REQ__FLUSH_TYPE__SHIFT 0x10
++#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
++#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
++#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
++#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
++#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
++#define VM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
++#define VM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG0_REQ__FLUSH_TYPE_MASK 0x00030000L
++#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
++#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
++#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
++#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
++#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
++#define VM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
++//VM_INVALIDATE_ENG1_REQ
++#define VM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
++#define VM_INVALIDATE_ENG1_REQ__FLUSH_TYPE__SHIFT 0x10
++#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
++#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
++#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
++#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
++#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
++#define VM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
++#define VM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG1_REQ__FLUSH_TYPE_MASK 0x00030000L
++#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
++#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
++#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
++#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
++#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
++#define VM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
++//VM_INVALIDATE_ENG2_REQ
++#define VM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
++#define VM_INVALIDATE_ENG2_REQ__FLUSH_TYPE__SHIFT 0x10
++#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
++#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
++#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
++#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
++#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
++#define VM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
++#define VM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG2_REQ__FLUSH_TYPE_MASK 0x00030000L
++#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
++#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
++#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
++#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
++#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
++#define VM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
++//VM_INVALIDATE_ENG3_REQ
++#define VM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
++#define VM_INVALIDATE_ENG3_REQ__FLUSH_TYPE__SHIFT 0x10
++#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
++#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
++#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
++#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
++#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
++#define VM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
++#define VM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG3_REQ__FLUSH_TYPE_MASK 0x00030000L
++#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
++#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
++#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
++#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
++#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
++#define VM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
++//VM_INVALIDATE_ENG4_REQ
++#define VM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
++#define VM_INVALIDATE_ENG4_REQ__FLUSH_TYPE__SHIFT 0x10
++#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
++#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
++#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
++#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
++#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
++#define VM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
++#define VM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG4_REQ__FLUSH_TYPE_MASK 0x00030000L
++#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
++#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
++#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
++#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
++#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
++#define VM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
++//VM_INVALIDATE_ENG5_REQ
++#define VM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
++#define VM_INVALIDATE_ENG5_REQ__FLUSH_TYPE__SHIFT 0x10
++#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
++#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
++#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
++#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
++#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
++#define VM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
++#define VM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG5_REQ__FLUSH_TYPE_MASK 0x00030000L
++#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
++#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
++#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
++#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
++#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
++#define VM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
++//VM_INVALIDATE_ENG6_REQ
++#define VM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
++#define VM_INVALIDATE_ENG6_REQ__FLUSH_TYPE__SHIFT 0x10
++#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
++#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
++#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
++#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
++#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
++#define VM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
++#define VM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG6_REQ__FLUSH_TYPE_MASK 0x00030000L
++#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
++#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
++#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
++#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
++#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
++#define VM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
++//VM_INVALIDATE_ENG7_REQ
++#define VM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
++#define VM_INVALIDATE_ENG7_REQ__FLUSH_TYPE__SHIFT 0x10
++#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
++#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
++#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
++#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
++#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
++#define VM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
++#define VM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG7_REQ__FLUSH_TYPE_MASK 0x00030000L
++#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
++#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
++#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
++#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
++#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
++#define VM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
++//VM_INVALIDATE_ENG8_REQ
++#define VM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
++#define VM_INVALIDATE_ENG8_REQ__FLUSH_TYPE__SHIFT 0x10
++#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
++#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
++#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
++#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
++#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
++#define VM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
++#define VM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG8_REQ__FLUSH_TYPE_MASK 0x00030000L
++#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
++#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
++#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
++#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
++#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
++#define VM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
++//VM_INVALIDATE_ENG9_REQ
++#define VM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
++#define VM_INVALIDATE_ENG9_REQ__FLUSH_TYPE__SHIFT 0x10
++#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
++#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
++#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
++#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
++#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
++#define VM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
++#define VM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG9_REQ__FLUSH_TYPE_MASK 0x00030000L
++#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
++#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
++#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
++#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
++#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
++#define VM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
++//VM_INVALIDATE_ENG10_REQ
++#define VM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
++#define VM_INVALIDATE_ENG10_REQ__FLUSH_TYPE__SHIFT 0x10
++#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
++#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
++#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
++#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
++#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
++#define VM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
++#define VM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG10_REQ__FLUSH_TYPE_MASK 0x00030000L
++#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
++#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
++#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
++#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
++#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
++#define VM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
++//VM_INVALIDATE_ENG11_REQ
++#define VM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
++#define VM_INVALIDATE_ENG11_REQ__FLUSH_TYPE__SHIFT 0x10
++#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
++#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
++#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
++#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
++#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
++#define VM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
++#define VM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG11_REQ__FLUSH_TYPE_MASK 0x00030000L
++#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
++#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
++#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
++#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
++#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
++#define VM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
++//VM_INVALIDATE_ENG12_REQ
++#define VM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
++#define VM_INVALIDATE_ENG12_REQ__FLUSH_TYPE__SHIFT 0x10
++#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
++#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
++#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
++#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
++#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
++#define VM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
++#define VM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG12_REQ__FLUSH_TYPE_MASK 0x00030000L
++#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
++#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
++#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
++#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
++#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
++#define VM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
++//VM_INVALIDATE_ENG13_REQ
++#define VM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
++#define VM_INVALIDATE_ENG13_REQ__FLUSH_TYPE__SHIFT 0x10
++#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
++#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
++#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
++#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
++#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
++#define VM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
++#define VM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG13_REQ__FLUSH_TYPE_MASK 0x00030000L
++#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
++#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
++#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
++#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
++#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
++#define VM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
++//VM_INVALIDATE_ENG14_REQ
++#define VM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
++#define VM_INVALIDATE_ENG14_REQ__FLUSH_TYPE__SHIFT 0x10
++#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
++#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
++#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
++#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
++#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
++#define VM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
++#define VM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG14_REQ__FLUSH_TYPE_MASK 0x00030000L
++#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
++#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
++#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
++#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
++#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
++#define VM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
++//VM_INVALIDATE_ENG15_REQ
++#define VM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
++#define VM_INVALIDATE_ENG15_REQ__FLUSH_TYPE__SHIFT 0x10
++#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
++#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
++#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
++#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
++#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
++#define VM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
++#define VM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG15_REQ__FLUSH_TYPE_MASK 0x00030000L
++#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
++#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
++#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
++#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
++#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
++#define VM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
++//VM_INVALIDATE_ENG16_REQ
++#define VM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
++#define VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE__SHIFT 0x10
++#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
++#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
++#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
++#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
++#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
++#define VM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
++#define VM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE_MASK 0x00030000L
++#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
++#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
++#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
++#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
++#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
++#define VM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
++//VM_INVALIDATE_ENG17_REQ
++#define VM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
++#define VM_INVALIDATE_ENG17_REQ__FLUSH_TYPE__SHIFT 0x10
++#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
++#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
++#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
++#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
++#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
++#define VM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
++#define VM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG17_REQ__FLUSH_TYPE_MASK 0x00030000L
++#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
++#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
++#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
++#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
++#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
++#define VM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
++//VM_INVALIDATE_ENG0_ACK
++#define VM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
++#define VM_INVALIDATE_ENG0_ACK__SEMAPHORE__SHIFT 0x10
++#define VM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG0_ACK__SEMAPHORE_MASK 0x00010000L
++//VM_INVALIDATE_ENG1_ACK
++#define VM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
++#define VM_INVALIDATE_ENG1_ACK__SEMAPHORE__SHIFT 0x10
++#define VM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG1_ACK__SEMAPHORE_MASK 0x00010000L
++//VM_INVALIDATE_ENG2_ACK
++#define VM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
++#define VM_INVALIDATE_ENG2_ACK__SEMAPHORE__SHIFT 0x10
++#define VM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG2_ACK__SEMAPHORE_MASK 0x00010000L
++//VM_INVALIDATE_ENG3_ACK
++#define VM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
++#define VM_INVALIDATE_ENG3_ACK__SEMAPHORE__SHIFT 0x10
++#define VM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG3_ACK__SEMAPHORE_MASK 0x00010000L
++//VM_INVALIDATE_ENG4_ACK
++#define VM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
++#define VM_INVALIDATE_ENG4_ACK__SEMAPHORE__SHIFT 0x10
++#define VM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG4_ACK__SEMAPHORE_MASK 0x00010000L
++//VM_INVALIDATE_ENG5_ACK
++#define VM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
++#define VM_INVALIDATE_ENG5_ACK__SEMAPHORE__SHIFT 0x10
++#define VM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG5_ACK__SEMAPHORE_MASK 0x00010000L
++//VM_INVALIDATE_ENG6_ACK
++#define VM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
++#define VM_INVALIDATE_ENG6_ACK__SEMAPHORE__SHIFT 0x10
++#define VM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG6_ACK__SEMAPHORE_MASK 0x00010000L
++//VM_INVALIDATE_ENG7_ACK
++#define VM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
++#define VM_INVALIDATE_ENG7_ACK__SEMAPHORE__SHIFT 0x10
++#define VM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG7_ACK__SEMAPHORE_MASK 0x00010000L
++//VM_INVALIDATE_ENG8_ACK
++#define VM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
++#define VM_INVALIDATE_ENG8_ACK__SEMAPHORE__SHIFT 0x10
++#define VM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG8_ACK__SEMAPHORE_MASK 0x00010000L
++//VM_INVALIDATE_ENG9_ACK
++#define VM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
++#define VM_INVALIDATE_ENG9_ACK__SEMAPHORE__SHIFT 0x10
++#define VM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG9_ACK__SEMAPHORE_MASK 0x00010000L
++//VM_INVALIDATE_ENG10_ACK
++#define VM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
++#define VM_INVALIDATE_ENG10_ACK__SEMAPHORE__SHIFT 0x10
++#define VM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG10_ACK__SEMAPHORE_MASK 0x00010000L
++//VM_INVALIDATE_ENG11_ACK
++#define VM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
++#define VM_INVALIDATE_ENG11_ACK__SEMAPHORE__SHIFT 0x10
++#define VM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG11_ACK__SEMAPHORE_MASK 0x00010000L
++//VM_INVALIDATE_ENG12_ACK
++#define VM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
++#define VM_INVALIDATE_ENG12_ACK__SEMAPHORE__SHIFT 0x10
++#define VM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG12_ACK__SEMAPHORE_MASK 0x00010000L
++//VM_INVALIDATE_ENG13_ACK
++#define VM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
++#define VM_INVALIDATE_ENG13_ACK__SEMAPHORE__SHIFT 0x10
++#define VM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG13_ACK__SEMAPHORE_MASK 0x00010000L
++//VM_INVALIDATE_ENG14_ACK
++#define VM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
++#define VM_INVALIDATE_ENG14_ACK__SEMAPHORE__SHIFT 0x10
++#define VM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG14_ACK__SEMAPHORE_MASK 0x00010000L
++//VM_INVALIDATE_ENG15_ACK
++#define VM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
++#define VM_INVALIDATE_ENG15_ACK__SEMAPHORE__SHIFT 0x10
++#define VM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG15_ACK__SEMAPHORE_MASK 0x00010000L
++//VM_INVALIDATE_ENG16_ACK
++#define VM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
++#define VM_INVALIDATE_ENG16_ACK__SEMAPHORE__SHIFT 0x10
++#define VM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG16_ACK__SEMAPHORE_MASK 0x00010000L
++//VM_INVALIDATE_ENG17_ACK
++#define VM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
++#define VM_INVALIDATE_ENG17_ACK__SEMAPHORE__SHIFT 0x10
++#define VM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
++#define VM_INVALIDATE_ENG17_ACK__SEMAPHORE_MASK 0x00010000L
++//VM_INVALIDATE_ENG0_ADDR_RANGE_LO32
++#define VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
++#define VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
++#define VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
++#define VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
++//VM_INVALIDATE_ENG0_ADDR_RANGE_HI32
++#define VM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
++#define VM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
++//VM_INVALIDATE_ENG1_ADDR_RANGE_LO32
++#define VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
++#define VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
++#define VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
++#define VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
++//VM_INVALIDATE_ENG1_ADDR_RANGE_HI32
++#define VM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
++#define VM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
++//VM_INVALIDATE_ENG2_ADDR_RANGE_LO32
++#define VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
++#define VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
++#define VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
++#define VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
++//VM_INVALIDATE_ENG2_ADDR_RANGE_HI32
++#define VM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
++#define VM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
++//VM_INVALIDATE_ENG3_ADDR_RANGE_LO32
++#define VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
++#define VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
++#define VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
++#define VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
++//VM_INVALIDATE_ENG3_ADDR_RANGE_HI32
++#define VM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
++#define VM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
++//VM_INVALIDATE_ENG4_ADDR_RANGE_LO32
++#define VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
++#define VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
++#define VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
++#define VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
++//VM_INVALIDATE_ENG4_ADDR_RANGE_HI32
++#define VM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
++#define VM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
++//VM_INVALIDATE_ENG5_ADDR_RANGE_LO32
++#define VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
++#define VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
++#define VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
++#define VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
++//VM_INVALIDATE_ENG5_ADDR_RANGE_HI32
++#define VM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
++#define VM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
++//VM_INVALIDATE_ENG6_ADDR_RANGE_LO32
++#define VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
++#define VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
++#define VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
++#define VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
++//VM_INVALIDATE_ENG6_ADDR_RANGE_HI32
++#define VM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
++#define VM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
++//VM_INVALIDATE_ENG7_ADDR_RANGE_LO32
++#define VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
++#define VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
++#define VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
++#define VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
++//VM_INVALIDATE_ENG7_ADDR_RANGE_HI32
++#define VM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
++#define VM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
++//VM_INVALIDATE_ENG8_ADDR_RANGE_LO32
++#define VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
++#define VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
++#define VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
++#define VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
++//VM_INVALIDATE_ENG8_ADDR_RANGE_HI32
++#define VM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
++#define VM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
++//VM_INVALIDATE_ENG9_ADDR_RANGE_LO32
++#define VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
++#define VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
++#define VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
++#define VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
++//VM_INVALIDATE_ENG9_ADDR_RANGE_HI32
++#define VM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
++#define VM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
++//VM_INVALIDATE_ENG10_ADDR_RANGE_LO32
++#define VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
++#define VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
++#define VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
++#define VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
++//VM_INVALIDATE_ENG10_ADDR_RANGE_HI32
++#define VM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
++#define VM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
++//VM_INVALIDATE_ENG11_ADDR_RANGE_LO32
++#define VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
++#define VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
++#define VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
++#define VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
++//VM_INVALIDATE_ENG11_ADDR_RANGE_HI32
++#define VM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
++#define VM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
++//VM_INVALIDATE_ENG12_ADDR_RANGE_LO32
++#define VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
++#define VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
++#define VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
++#define VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
++//VM_INVALIDATE_ENG12_ADDR_RANGE_HI32
++#define VM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
++#define VM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
++//VM_INVALIDATE_ENG13_ADDR_RANGE_LO32
++#define VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
++#define VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
++#define VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
++#define VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
++//VM_INVALIDATE_ENG13_ADDR_RANGE_HI32
++#define VM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
++#define VM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
++//VM_INVALIDATE_ENG14_ADDR_RANGE_LO32
++#define VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
++#define VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
++#define VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
++#define VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
++//VM_INVALIDATE_ENG14_ADDR_RANGE_HI32
++#define VM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
++#define VM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
++//VM_INVALIDATE_ENG15_ADDR_RANGE_LO32
++#define VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
++#define VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
++#define VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
++#define VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
++//VM_INVALIDATE_ENG15_ADDR_RANGE_HI32
++#define VM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
++#define VM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
++//VM_INVALIDATE_ENG16_ADDR_RANGE_LO32
++#define VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
++#define VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
++#define VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
++#define VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
++//VM_INVALIDATE_ENG16_ADDR_RANGE_HI32
++#define VM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
++#define VM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
++//VM_INVALIDATE_ENG17_ADDR_RANGE_LO32
++#define VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
++#define VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
++#define VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
++#define VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
++//VM_INVALIDATE_ENG17_ADDR_RANGE_HI32
++#define VM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
++#define VM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
++//VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32
++#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
++#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32
++#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
++#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
++//VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
++#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
++#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32
++#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
++#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
++//VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32
++#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
++#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32
++#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
++#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
++//VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32
++#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
++#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32
++#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
++#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
++//VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32
++#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
++#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32
++#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
++#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
++//VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32
++#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
++#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32
++#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
++#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
++//VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32
++#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
++#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32
++#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
++#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
++//VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32
++#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
++#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32
++#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
++#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
++//VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32
++#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
++#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32
++#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
++#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
++//VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32
++#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
++#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32
++#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
++#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
++//VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32
++#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
++#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32
++#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
++#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
++//VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32
++#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
++#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32
++#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
++#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
++//VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32
++#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
++#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32
++#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
++#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
++//VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32
++#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
++#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32
++#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
++#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
++//VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32
++#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
++#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32
++#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
++#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
++//VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32
++#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
++#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32
++#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
++#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
++//VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32
++#define VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32
++#define VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32
++#define VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32
++#define VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32
++#define VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32
++#define VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32
++#define VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32
++#define VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32
++#define VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32
++#define VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32
++#define VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32
++#define VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32
++#define VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32
++#define VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32
++#define VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32
++#define VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32
++#define VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32
++#define VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32
++#define VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32
++#define VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32
++#define VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32
++#define VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32
++#define VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32
++#define VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32
++#define VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32
++#define VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32
++#define VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32
++#define VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32
++#define VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32
++#define VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32
++#define VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32
++#define VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32
++#define VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32
++#define VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32
++#define VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32
++#define VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32
++#define VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32
++#define VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32
++#define VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32
++#define VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32
++#define VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32
++#define VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32
++#define VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32
++#define VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32
++#define VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32
++#define VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32
++#define VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32
++#define VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32
++#define VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32
++#define VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32
++#define VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32
++#define VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32
++#define VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32
++#define VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32
++#define VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32
++#define VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32
++#define VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32
++#define VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32
++#define VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32
++#define VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32
++#define VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32
++#define VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++//VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32
++#define VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
++#define VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
++//VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32
++#define VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
++#define VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
++
++
++// addressBlock: gc_utcl2_vmsharedpfdec
++//MC_VM_NB_MMIOBASE
++#define MC_VM_NB_MMIOBASE__MMIOBASE__SHIFT 0x0
++#define MC_VM_NB_MMIOBASE__MMIOBASE_MASK 0xFFFFFFFFL
++//MC_VM_NB_MMIOLIMIT
++#define MC_VM_NB_MMIOLIMIT__MMIOLIMIT__SHIFT 0x0
++#define MC_VM_NB_MMIOLIMIT__MMIOLIMIT_MASK 0xFFFFFFFFL
++//MC_VM_NB_PCI_CTRL
++#define MC_VM_NB_PCI_CTRL__MMIOENABLE__SHIFT 0x17
++#define MC_VM_NB_PCI_CTRL__MMIOENABLE_MASK 0x00800000L
++//MC_VM_NB_PCI_ARB
++#define MC_VM_NB_PCI_ARB__VGA_HOLE__SHIFT 0x3
++#define MC_VM_NB_PCI_ARB__VGA_HOLE_MASK 0x00000008L
++//MC_VM_NB_TOP_OF_DRAM_SLOT1
++#define MC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM__SHIFT 0x17
++#define MC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM_MASK 0xFF800000L
++//MC_VM_NB_LOWER_TOP_OF_DRAM2
++#define MC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE__SHIFT 0x0
++#define MC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2__SHIFT 0x17
++#define MC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE_MASK 0x00000001L
++#define MC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2_MASK 0xFF800000L
++//MC_VM_NB_UPPER_TOP_OF_DRAM2
++#define MC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2__SHIFT 0x0
++#define MC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2_MASK 0x00000FFFL
++//MC_VM_FB_OFFSET
++#define MC_VM_FB_OFFSET__FB_OFFSET__SHIFT 0x0
++#define MC_VM_FB_OFFSET__FB_OFFSET_MASK 0x00FFFFFFL
++//MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB
++#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB__SHIFT 0x0
++#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB_MASK 0xFFFFFFFFL
++//MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB
++#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB__SHIFT 0x0
++#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB_MASK 0x0000000FL
++//MC_VM_STEERING
++#define MC_VM_STEERING__DEFAULT_STEERING__SHIFT 0x0
++#define MC_VM_STEERING__DEFAULT_STEERING_MASK 0x00000003L
++//MC_SHARED_VIRT_RESET_REQ
++#define MC_SHARED_VIRT_RESET_REQ__VF__SHIFT 0x0
++#define MC_SHARED_VIRT_RESET_REQ__PF__SHIFT 0x1f
++#define MC_SHARED_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
++#define MC_SHARED_VIRT_RESET_REQ__PF_MASK 0x80000000L
++//MC_MEM_POWER_LS
++#define MC_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
++#define MC_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
++#define MC_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
++#define MC_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L
++//MC_VM_CACHEABLE_DRAM_ADDRESS_START
++#define MC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS__SHIFT 0x0
++#define MC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL
++//MC_VM_CACHEABLE_DRAM_ADDRESS_END
++#define MC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS__SHIFT 0x0
++#define MC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL
++//MC_VM_APT_CNTL
++#define MC_VM_APT_CNTL__FORCE_MTYPE_UC__SHIFT 0x0
++#define MC_VM_APT_CNTL__DIRECT_SYSTEM_EN__SHIFT 0x1
++#define MC_VM_APT_CNTL__FORCE_MTYPE_UC_MASK 0x00000001L
++#define MC_VM_APT_CNTL__DIRECT_SYSTEM_EN_MASK 0x00000002L
++//MC_VM_LOCAL_HBM_ADDRESS_START
++#define MC_VM_LOCAL_HBM_ADDRESS_START__ADDRESS__SHIFT 0x0
++#define MC_VM_LOCAL_HBM_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL
++//MC_VM_LOCAL_HBM_ADDRESS_END
++#define MC_VM_LOCAL_HBM_ADDRESS_END__ADDRESS__SHIFT 0x0
++#define MC_VM_LOCAL_HBM_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL
++//MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL
++#define MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK__SHIFT 0x0
++#define MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK_MASK 0x00000001L
++
++
++// addressBlock: gc_utcl2_vmsharedvcdec
++//MC_VM_FB_LOCATION_BASE
++#define MC_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0
++#define MC_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x00FFFFFFL
++//MC_VM_FB_LOCATION_TOP
++#define MC_VM_FB_LOCATION_TOP__FB_TOP__SHIFT 0x0
++#define MC_VM_FB_LOCATION_TOP__FB_TOP_MASK 0x00FFFFFFL
++//MC_VM_AGP_TOP
++#define MC_VM_AGP_TOP__AGP_TOP__SHIFT 0x0
++#define MC_VM_AGP_TOP__AGP_TOP_MASK 0x00FFFFFFL
++//MC_VM_AGP_BOT
++#define MC_VM_AGP_BOT__AGP_BOT__SHIFT 0x0
++#define MC_VM_AGP_BOT__AGP_BOT_MASK 0x00FFFFFFL
++//MC_VM_AGP_BASE
++#define MC_VM_AGP_BASE__AGP_BASE__SHIFT 0x0
++#define MC_VM_AGP_BASE__AGP_BASE_MASK 0x00FFFFFFL
++//MC_VM_SYSTEM_APERTURE_LOW_ADDR
++#define MC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR__SHIFT 0x0
++#define MC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL
++//MC_VM_SYSTEM_APERTURE_HIGH_ADDR
++#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR__SHIFT 0x0
++#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL
++//MC_VM_MX_L1_TLB_CNTL
++#define MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
++#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
++#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
++#define MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
++#define MC_VM_MX_L1_TLB_CNTL__ECO_BITS__SHIFT 0x7
++#define MC_VM_MX_L1_TLB_CNTL__MTYPE__SHIFT 0xb
++#define MC_VM_MX_L1_TLB_CNTL__ATC_EN__SHIFT 0xd
++#define MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
++#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
++#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
++#define MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
++#define MC_VM_MX_L1_TLB_CNTL__ECO_BITS_MASK 0x00000780L
++#define MC_VM_MX_L1_TLB_CNTL__MTYPE_MASK 0x00001800L
++#define MC_VM_MX_L1_TLB_CNTL__ATC_EN_MASK 0x00002000L
++
++
++// addressBlock: gc_ea_gceadec
++//GCEA_DRAM_RD_CLI2GRP_MAP0
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
++#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
++//GCEA_DRAM_RD_CLI2GRP_MAP1
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
++#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
++//GCEA_DRAM_WR_CLI2GRP_MAP0
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
++#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
++//GCEA_DRAM_WR_CLI2GRP_MAP1
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
++#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
++//GCEA_DRAM_RD_GRP2VC_MAP
++#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
++#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
++#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
++#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
++#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
++#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
++#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
++#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
++//GCEA_DRAM_WR_GRP2VC_MAP
++#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
++#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
++#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
++#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
++#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
++#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
++#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
++#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
++//GCEA_DRAM_RD_LAZY
++#define GCEA_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
++#define GCEA_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
++#define GCEA_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
++#define GCEA_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
++#define GCEA_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
++#define GCEA_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
++#define GCEA_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
++#define GCEA_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
++//GCEA_DRAM_WR_LAZY
++#define GCEA_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
++#define GCEA_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
++#define GCEA_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
++#define GCEA_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
++#define GCEA_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
++#define GCEA_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
++#define GCEA_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
++#define GCEA_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
++//GCEA_DRAM_RD_CAM_CNTL
++#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
++#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
++#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
++#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
++#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
++#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
++#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
++#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
++#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
++#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
++#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
++#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
++#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
++#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
++#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
++#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
++//GCEA_DRAM_WR_CAM_CNTL
++#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
++#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
++#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
++#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
++#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
++#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
++#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
++#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
++#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
++#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
++#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
++#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
++#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
++#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
++#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
++#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
++//GCEA_DRAM_PAGE_BURST
++#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
++#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
++#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
++#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
++#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
++#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
++#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
++#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
++//GCEA_DRAM_RD_PRI_AGE
++#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
++#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
++#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
++#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
++#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
++#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
++#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
++#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
++#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
++#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
++#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
++#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
++#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
++#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
++#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
++#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
++//GCEA_DRAM_WR_PRI_AGE
++#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
++#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
++#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
++#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
++#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
++#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
++#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
++#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
++#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
++#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
++#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
++#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
++#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
++#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
++#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
++#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
++//GCEA_DRAM_RD_PRI_QUEUING
++#define GCEA_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
++#define GCEA_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
++#define GCEA_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
++#define GCEA_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
++#define GCEA_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
++#define GCEA_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
++#define GCEA_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
++#define GCEA_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
++//GCEA_DRAM_WR_PRI_QUEUING
++#define GCEA_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
++#define GCEA_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
++#define GCEA_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
++#define GCEA_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
++#define GCEA_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
++#define GCEA_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
++#define GCEA_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
++#define GCEA_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
++//GCEA_DRAM_RD_PRI_FIXED
++#define GCEA_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
++#define GCEA_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
++#define GCEA_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
++#define GCEA_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
++#define GCEA_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
++#define GCEA_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
++#define GCEA_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
++#define GCEA_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
++//GCEA_DRAM_WR_PRI_FIXED
++#define GCEA_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
++#define GCEA_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
++#define GCEA_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
++#define GCEA_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
++#define GCEA_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
++#define GCEA_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
++#define GCEA_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
++#define GCEA_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
++//GCEA_DRAM_RD_PRI_URGENCY
++#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
++#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
++#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
++#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
++#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
++#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
++#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
++#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
++#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
++#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
++#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
++#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
++#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
++#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
++#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
++#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
++//GCEA_DRAM_WR_PRI_URGENCY
++#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
++#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
++#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
++#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
++#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
++#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
++#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
++#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
++#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
++#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
++#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
++#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
++#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
++#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
++#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
++#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
++//GCEA_DRAM_RD_PRI_QUANT_PRI1
++#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
++#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
++#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
++#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
++#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
++#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
++#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
++#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
++//GCEA_DRAM_RD_PRI_QUANT_PRI2
++#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
++#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
++#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
++#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
++#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
++#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
++#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
++#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
++//GCEA_DRAM_RD_PRI_QUANT_PRI3
++#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
++#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
++#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
++#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
++#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
++#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
++#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
++#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
++//GCEA_DRAM_WR_PRI_QUANT_PRI1
++#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
++#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
++#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
++#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
++#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
++#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
++#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
++#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
++//GCEA_DRAM_WR_PRI_QUANT_PRI2
++#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
++#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
++#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
++#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
++#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
++#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
++#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
++#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
++//GCEA_DRAM_WR_PRI_QUANT_PRI3
++#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
++#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
++#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
++#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
++#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
++#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
++#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
++#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
++//GCEA_ADDRNORM_BASE_ADDR0
++#define GCEA_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL__SHIFT 0x0
++#define GCEA_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN__SHIFT 0x1
++#define GCEA_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN__SHIFT 0x4
++#define GCEA_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL__SHIFT 0x8
++#define GCEA_ADDRNORM_BASE_ADDR0__BASE_ADDR__SHIFT 0xc
++#define GCEA_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL_MASK 0x00000001L
++#define GCEA_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
++#define GCEA_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN_MASK 0x000000F0L
++#define GCEA_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL_MASK 0x00000700L
++#define GCEA_ADDRNORM_BASE_ADDR0__BASE_ADDR_MASK 0xFFFFF000L
++//GCEA_ADDRNORM_LIMIT_ADDR0
++#define GCEA_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID__SHIFT 0x0
++#define GCEA_ADDRNORM_LIMIT_ADDR0__INTLV_NUM_SOCKETS__SHIFT 0x8
++#define GCEA_ADDRNORM_LIMIT_ADDR0__INTLV_NUM_DIES__SHIFT 0xa
++#define GCEA_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR__SHIFT 0xc
++#define GCEA_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID_MASK 0x0000000FL
++#define GCEA_ADDRNORM_LIMIT_ADDR0__INTLV_NUM_SOCKETS_MASK 0x00000100L
++#define GCEA_ADDRNORM_LIMIT_ADDR0__INTLV_NUM_DIES_MASK 0x00000C00L
++#define GCEA_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR_MASK 0xFFFFF000L
++//GCEA_ADDRNORM_BASE_ADDR1
++#define GCEA_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL__SHIFT 0x0
++#define GCEA_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN__SHIFT 0x1
++#define GCEA_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN__SHIFT 0x4
++#define GCEA_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL__SHIFT 0x8
++#define GCEA_ADDRNORM_BASE_ADDR1__BASE_ADDR__SHIFT 0xc
++#define GCEA_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL_MASK 0x00000001L
++#define GCEA_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
++#define GCEA_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN_MASK 0x000000F0L
++#define GCEA_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL_MASK 0x00000700L
++#define GCEA_ADDRNORM_BASE_ADDR1__BASE_ADDR_MASK 0xFFFFF000L
++//GCEA_ADDRNORM_LIMIT_ADDR1
++#define GCEA_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID__SHIFT 0x0
++#define GCEA_ADDRNORM_LIMIT_ADDR1__INTLV_NUM_SOCKETS__SHIFT 0x8
++#define GCEA_ADDRNORM_LIMIT_ADDR1__INTLV_NUM_DIES__SHIFT 0xa
++#define GCEA_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR__SHIFT 0xc
++#define GCEA_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID_MASK 0x0000000FL
++#define GCEA_ADDRNORM_LIMIT_ADDR1__INTLV_NUM_SOCKETS_MASK 0x00000100L
++#define GCEA_ADDRNORM_LIMIT_ADDR1__INTLV_NUM_DIES_MASK 0x00000C00L
++#define GCEA_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR_MASK 0xFFFFF000L
++//GCEA_ADDRNORM_OFFSET_ADDR1
++#define GCEA_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN__SHIFT 0x0
++#define GCEA_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET__SHIFT 0x14
++#define GCEA_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN_MASK 0x00000001L
++#define GCEA_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_MASK 0xFFF00000L
++//GCEA_ADDRNORM_HOLE_CNTL
++#define GCEA_ADDRNORM_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0
++#define GCEA_ADDRNORM_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7
++#define GCEA_ADDRNORM_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L
++#define GCEA_ADDRNORM_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L
++//GCEA_ADDRDEC_BANK_CFG
++#define GCEA_ADDRDEC_BANK_CFG__BANK_MASK_DRAM__SHIFT 0x0
++#define GCEA_ADDRDEC_BANK_CFG__BANK_MASK_GMI__SHIFT 0x5
++#define GCEA_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM__SHIFT 0xa
++#define GCEA_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI__SHIFT 0xd
++#define GCEA_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM__SHIFT 0x10
++#define GCEA_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI__SHIFT 0x11
++#define GCEA_ADDRDEC_BANK_CFG__BANK_MASK_DRAM_MASK 0x0000001FL
++#define GCEA_ADDRDEC_BANK_CFG__BANK_MASK_GMI_MASK 0x000003E0L
++#define GCEA_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM_MASK 0x00001C00L
++#define GCEA_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI_MASK 0x0000E000L
++#define GCEA_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM_MASK 0x00010000L
++#define GCEA_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI_MASK 0x00020000L
++//GCEA_ADDRDEC_MISC_CFG
++#define GCEA_ADDRDEC_MISC_CFG__VCM_EN0__SHIFT 0x0
++#define GCEA_ADDRDEC_MISC_CFG__VCM_EN1__SHIFT 0x1
++#define GCEA_ADDRDEC_MISC_CFG__VCM_EN2__SHIFT 0x2
++#define GCEA_ADDRDEC_MISC_CFG__VCM_EN3__SHIFT 0x3
++#define GCEA_ADDRDEC_MISC_CFG__VCM_EN4__SHIFT 0x4
++#define GCEA_ADDRDEC_MISC_CFG__PCH_MASK_DRAM__SHIFT 0x8
++#define GCEA_ADDRDEC_MISC_CFG__PCH_MASK_GMI__SHIFT 0x9
++#define GCEA_ADDRDEC_MISC_CFG__CH_MASK_DRAM__SHIFT 0xc
++#define GCEA_ADDRDEC_MISC_CFG__CH_MASK_GMI__SHIFT 0x10
++#define GCEA_ADDRDEC_MISC_CFG__CS_MASK_DRAM__SHIFT 0x14
++#define GCEA_ADDRDEC_MISC_CFG__CS_MASK_GMI__SHIFT 0x16
++#define GCEA_ADDRDEC_MISC_CFG__RM_MASK_DRAM__SHIFT 0x18
++#define GCEA_ADDRDEC_MISC_CFG__RM_MASK_GMI__SHIFT 0x1b
++#define GCEA_ADDRDEC_MISC_CFG__VCM_EN0_MASK 0x00000001L
++#define GCEA_ADDRDEC_MISC_CFG__VCM_EN1_MASK 0x00000002L
++#define GCEA_ADDRDEC_MISC_CFG__VCM_EN2_MASK 0x00000004L
++#define GCEA_ADDRDEC_MISC_CFG__VCM_EN3_MASK 0x00000008L
++#define GCEA_ADDRDEC_MISC_CFG__VCM_EN4_MASK 0x00000010L
++#define GCEA_ADDRDEC_MISC_CFG__PCH_MASK_DRAM_MASK 0x00000100L
++#define GCEA_ADDRDEC_MISC_CFG__PCH_MASK_GMI_MASK 0x00000200L
++#define GCEA_ADDRDEC_MISC_CFG__CH_MASK_DRAM_MASK 0x0000F000L
++#define GCEA_ADDRDEC_MISC_CFG__CH_MASK_GMI_MASK 0x000F0000L
++#define GCEA_ADDRDEC_MISC_CFG__CS_MASK_DRAM_MASK 0x00300000L
++#define GCEA_ADDRDEC_MISC_CFG__CS_MASK_GMI_MASK 0x00C00000L
++#define GCEA_ADDRDEC_MISC_CFG__RM_MASK_DRAM_MASK 0x07000000L
++#define GCEA_ADDRDEC_MISC_CFG__RM_MASK_GMI_MASK 0x38000000L
++//GCEA_ADDRDECDRAM_ADDR_HASH_BANK0
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L
++//GCEA_ADDRDECDRAM_ADDR_HASH_BANK1
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L
++//GCEA_ADDRDECDRAM_ADDR_HASH_BANK2
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L
++//GCEA_ADDRDECDRAM_ADDR_HASH_BANK3
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L
++//GCEA_ADDRDECDRAM_ADDR_HASH_BANK4
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL
++#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L
++//GCEA_ADDRDECDRAM_ADDR_HASH_PC
++#define GCEA_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0
++#define GCEA_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR__SHIFT 0x1
++#define GCEA_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe
++#define GCEA_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L
++#define GCEA_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL
++#define GCEA_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L
++//GCEA_ADDRDECDRAM_ADDR_HASH_PC2
++#define GCEA_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0
++#define GCEA_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000001FL
++//GCEA_ADDRDECDRAM_ADDR_HASH_CS0
++#define GCEA_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0
++#define GCEA_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1
++#define GCEA_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L
++#define GCEA_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL
++//GCEA_ADDRDECDRAM_ADDR_HASH_CS1
++#define GCEA_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0
++#define GCEA_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1
++#define GCEA_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L
++#define GCEA_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL
++//GCEA_ADDRDECDRAM_HARVEST_ENABLE
++#define GCEA_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0
++#define GCEA_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1
++#define GCEA_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2
++#define GCEA_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3
++#define GCEA_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L
++#define GCEA_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L
++#define GCEA_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L
++#define GCEA_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L
++//GCEA_ADDRDEC0_BASE_ADDR_CS0
++#define GCEA_ADDRDEC0_BASE_ADDR_CS0__CS_ENABLE__SHIFT 0x0
++#define GCEA_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
++#define GCEA_ADDRDEC0_BASE_ADDR_CS0__CS_ENABLE_MASK 0x00000001L
++#define GCEA_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
++//GCEA_ADDRDEC0_BASE_ADDR_CS1
++#define GCEA_ADDRDEC0_BASE_ADDR_CS1__CS_ENABLE__SHIFT 0x0
++#define GCEA_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
++#define GCEA_ADDRDEC0_BASE_ADDR_CS1__CS_ENABLE_MASK 0x00000001L
++#define GCEA_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
++//GCEA_ADDRDEC0_BASE_ADDR_CS2
++#define GCEA_ADDRDEC0_BASE_ADDR_CS2__CS_ENABLE__SHIFT 0x0
++#define GCEA_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
++#define GCEA_ADDRDEC0_BASE_ADDR_CS2__CS_ENABLE_MASK 0x00000001L
++#define GCEA_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
++//GCEA_ADDRDEC0_BASE_ADDR_CS3
++#define GCEA_ADDRDEC0_BASE_ADDR_CS3__CS_ENABLE__SHIFT 0x0
++#define GCEA_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
++#define GCEA_ADDRDEC0_BASE_ADDR_CS3__CS_ENABLE_MASK 0x00000001L
++#define GCEA_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
++//GCEA_ADDRDEC0_BASE_ADDR_SECCS0
++#define GCEA_ADDRDEC0_BASE_ADDR_SECCS0__CS_ENABLE__SHIFT 0x0
++#define GCEA_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
++#define GCEA_ADDRDEC0_BASE_ADDR_SECCS0__CS_ENABLE_MASK 0x00000001L
++#define GCEA_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
++//GCEA_ADDRDEC0_BASE_ADDR_SECCS1
++#define GCEA_ADDRDEC0_BASE_ADDR_SECCS1__CS_ENABLE__SHIFT 0x0
++#define GCEA_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
++#define GCEA_ADDRDEC0_BASE_ADDR_SECCS1__CS_ENABLE_MASK 0x00000001L
++#define GCEA_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
++//GCEA_ADDRDEC0_BASE_ADDR_SECCS2
++#define GCEA_ADDRDEC0_BASE_ADDR_SECCS2__CS_ENABLE__SHIFT 0x0
++#define GCEA_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
++#define GCEA_ADDRDEC0_BASE_ADDR_SECCS2__CS_ENABLE_MASK 0x00000001L
++#define GCEA_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
++//GCEA_ADDRDEC0_BASE_ADDR_SECCS3
++#define GCEA_ADDRDEC0_BASE_ADDR_SECCS3__CS_ENABLE__SHIFT 0x0
++#define GCEA_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
++#define GCEA_ADDRDEC0_BASE_ADDR_SECCS3__CS_ENABLE_MASK 0x00000001L
++#define GCEA_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
++//GCEA_ADDRDEC0_ADDR_MASK_CS01
++#define GCEA_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
++#define GCEA_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
++//GCEA_ADDRDEC0_ADDR_MASK_CS23
++#define GCEA_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
++#define GCEA_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
++//GCEA_ADDRDEC0_ADDR_MASK_SECCS01
++#define GCEA_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
++#define GCEA_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
++//GCEA_ADDRDEC0_ADDR_MASK_SECCS23
++#define GCEA_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
++#define GCEA_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
++//GCEA_ADDRDEC0_ADDR_CFG_CS01
++#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x2
++#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
++#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
++#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
++#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
++#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
++#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000CL
++#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
++#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
++#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
++#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
++#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
++//GCEA_ADDRDEC0_ADDR_CFG_CS23
++#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x2
++#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
++#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
++#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
++#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
++#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
++#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000CL
++#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
++#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
++#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
++#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
++#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
++//GCEA_ADDRDEC0_ADDR_SEL_CS01
++#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK0__SHIFT 0x0
++#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK1__SHIFT 0x4
++#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK2__SHIFT 0x8
++#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK3__SHIFT 0xc
++#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK4__SHIFT 0x10
++#define GCEA_ADDRDEC0_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
++#define GCEA_ADDRDEC0_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
++#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
++#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
++#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
++#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
++#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK4_MASK 0x000F0000L
++#define GCEA_ADDRDEC0_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
++#define GCEA_ADDRDEC0_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
++//GCEA_ADDRDEC0_ADDR_SEL_CS23
++#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK0__SHIFT 0x0
++#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK1__SHIFT 0x4
++#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK2__SHIFT 0x8
++#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK3__SHIFT 0xc
++#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK4__SHIFT 0x10
++#define GCEA_ADDRDEC0_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
++#define GCEA_ADDRDEC0_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
++#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
++#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
++#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
++#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
++#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK4_MASK 0x000F0000L
++#define GCEA_ADDRDEC0_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
++#define GCEA_ADDRDEC0_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
++//GCEA_ADDRDEC0_COL_SEL_LO_CS01
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL0__SHIFT 0x0
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL1__SHIFT 0x4
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL2__SHIFT 0x8
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL3__SHIFT 0xc
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL4__SHIFT 0x10
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL5__SHIFT 0x14
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL6__SHIFT 0x18
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
++//GCEA_ADDRDEC0_COL_SEL_LO_CS23
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL0__SHIFT 0x0
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL1__SHIFT 0x4
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL2__SHIFT 0x8
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL3__SHIFT 0xc
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL4__SHIFT 0x10
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL5__SHIFT 0x14
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL6__SHIFT 0x18
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
++#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
++//GCEA_ADDRDEC0_COL_SEL_HI_CS01
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL8__SHIFT 0x0
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL9__SHIFT 0x4
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL10__SHIFT 0x8
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL11__SHIFT 0xc
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL12__SHIFT 0x10
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL13__SHIFT 0x14
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL14__SHIFT 0x18
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
++//GCEA_ADDRDEC0_COL_SEL_HI_CS23
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL8__SHIFT 0x0
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL9__SHIFT 0x4
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL10__SHIFT 0x8
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL11__SHIFT 0xc
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL12__SHIFT 0x10
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL13__SHIFT 0x14
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL14__SHIFT 0x18
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
++#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
++//GCEA_ADDRDEC0_RM_SEL_CS01
++#define GCEA_ADDRDEC0_RM_SEL_CS01__RM0__SHIFT 0x0
++#define GCEA_ADDRDEC0_RM_SEL_CS01__RM1__SHIFT 0x4
++#define GCEA_ADDRDEC0_RM_SEL_CS01__RM2__SHIFT 0x8
++#define GCEA_ADDRDEC0_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
++#define GCEA_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
++#define GCEA_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
++#define GCEA_ADDRDEC0_RM_SEL_CS01__RM0_MASK 0x0000000FL
++#define GCEA_ADDRDEC0_RM_SEL_CS01__RM1_MASK 0x000000F0L
++#define GCEA_ADDRDEC0_RM_SEL_CS01__RM2_MASK 0x00000F00L
++#define GCEA_ADDRDEC0_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
++#define GCEA_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
++#define GCEA_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
++//GCEA_ADDRDEC0_RM_SEL_CS23
++#define GCEA_ADDRDEC0_RM_SEL_CS23__RM0__SHIFT 0x0
++#define GCEA_ADDRDEC0_RM_SEL_CS23__RM1__SHIFT 0x4
++#define GCEA_ADDRDEC0_RM_SEL_CS23__RM2__SHIFT 0x8
++#define GCEA_ADDRDEC0_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
++#define GCEA_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
++#define GCEA_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
++#define GCEA_ADDRDEC0_RM_SEL_CS23__RM0_MASK 0x0000000FL
++#define GCEA_ADDRDEC0_RM_SEL_CS23__RM1_MASK 0x000000F0L
++#define GCEA_ADDRDEC0_RM_SEL_CS23__RM2_MASK 0x00000F00L
++#define GCEA_ADDRDEC0_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
++#define GCEA_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
++#define GCEA_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
++//GCEA_ADDRDEC0_RM_SEL_SECCS01
++#define GCEA_ADDRDEC0_RM_SEL_SECCS01__RM0__SHIFT 0x0
++#define GCEA_ADDRDEC0_RM_SEL_SECCS01__RM1__SHIFT 0x4
++#define GCEA_ADDRDEC0_RM_SEL_SECCS01__RM2__SHIFT 0x8
++#define GCEA_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
++#define GCEA_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
++#define GCEA_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
++#define GCEA_ADDRDEC0_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
++#define GCEA_ADDRDEC0_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
++#define GCEA_ADDRDEC0_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
++#define GCEA_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
++#define GCEA_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
++#define GCEA_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
++//GCEA_ADDRDEC0_RM_SEL_SECCS23
++#define GCEA_ADDRDEC0_RM_SEL_SECCS23__RM0__SHIFT 0x0
++#define GCEA_ADDRDEC0_RM_SEL_SECCS23__RM1__SHIFT 0x4
++#define GCEA_ADDRDEC0_RM_SEL_SECCS23__RM2__SHIFT 0x8
++#define GCEA_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
++#define GCEA_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
++#define GCEA_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
++#define GCEA_ADDRDEC0_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
++#define GCEA_ADDRDEC0_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
++#define GCEA_ADDRDEC0_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
++#define GCEA_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
++#define GCEA_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
++#define GCEA_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
++//GCEA_ADDRDEC1_BASE_ADDR_CS0
++#define GCEA_ADDRDEC1_BASE_ADDR_CS0__CS_ENABLE__SHIFT 0x0
++#define GCEA_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
++#define GCEA_ADDRDEC1_BASE_ADDR_CS0__CS_ENABLE_MASK 0x00000001L
++#define GCEA_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
++//GCEA_ADDRDEC1_BASE_ADDR_CS1
++#define GCEA_ADDRDEC1_BASE_ADDR_CS1__CS_ENABLE__SHIFT 0x0
++#define GCEA_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
++#define GCEA_ADDRDEC1_BASE_ADDR_CS1__CS_ENABLE_MASK 0x00000001L
++#define GCEA_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
++//GCEA_ADDRDEC1_BASE_ADDR_CS2
++#define GCEA_ADDRDEC1_BASE_ADDR_CS2__CS_ENABLE__SHIFT 0x0
++#define GCEA_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
++#define GCEA_ADDRDEC1_BASE_ADDR_CS2__CS_ENABLE_MASK 0x00000001L
++#define GCEA_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
++//GCEA_ADDRDEC1_BASE_ADDR_CS3
++#define GCEA_ADDRDEC1_BASE_ADDR_CS3__CS_ENABLE__SHIFT 0x0
++#define GCEA_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
++#define GCEA_ADDRDEC1_BASE_ADDR_CS3__CS_ENABLE_MASK 0x00000001L
++#define GCEA_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
++//GCEA_ADDRDEC1_BASE_ADDR_SECCS0
++#define GCEA_ADDRDEC1_BASE_ADDR_SECCS0__CS_ENABLE__SHIFT 0x0
++#define GCEA_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
++#define GCEA_ADDRDEC1_BASE_ADDR_SECCS0__CS_ENABLE_MASK 0x00000001L
++#define GCEA_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
++//GCEA_ADDRDEC1_BASE_ADDR_SECCS1
++#define GCEA_ADDRDEC1_BASE_ADDR_SECCS1__CS_ENABLE__SHIFT 0x0
++#define GCEA_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
++#define GCEA_ADDRDEC1_BASE_ADDR_SECCS1__CS_ENABLE_MASK 0x00000001L
++#define GCEA_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
++//GCEA_ADDRDEC1_BASE_ADDR_SECCS2
++#define GCEA_ADDRDEC1_BASE_ADDR_SECCS2__CS_ENABLE__SHIFT 0x0
++#define GCEA_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
++#define GCEA_ADDRDEC1_BASE_ADDR_SECCS2__CS_ENABLE_MASK 0x00000001L
++#define GCEA_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
++//GCEA_ADDRDEC1_BASE_ADDR_SECCS3
++#define GCEA_ADDRDEC1_BASE_ADDR_SECCS3__CS_ENABLE__SHIFT 0x0
++#define GCEA_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
++#define GCEA_ADDRDEC1_BASE_ADDR_SECCS3__CS_ENABLE_MASK 0x00000001L
++#define GCEA_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
++//GCEA_ADDRDEC1_ADDR_MASK_CS01
++#define GCEA_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
++#define GCEA_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
++//GCEA_ADDRDEC1_ADDR_MASK_CS23
++#define GCEA_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
++#define GCEA_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
++//GCEA_ADDRDEC1_ADDR_MASK_SECCS01
++#define GCEA_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
++#define GCEA_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
++//GCEA_ADDRDEC1_ADDR_MASK_SECCS23
++#define GCEA_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
++#define GCEA_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
++//GCEA_ADDRDEC1_ADDR_CFG_CS01
++#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x2
++#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
++#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
++#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
++#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
++#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
++#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000CL
++#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
++#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
++#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
++#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
++#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
++//GCEA_ADDRDEC1_ADDR_CFG_CS23
++#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x2
++#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
++#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
++#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
++#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
++#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
++#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000CL
++#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
++#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
++#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
++#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
++#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
++//GCEA_ADDRDEC1_ADDR_SEL_CS01
++#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK0__SHIFT 0x0
++#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK1__SHIFT 0x4
++#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK2__SHIFT 0x8
++#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK3__SHIFT 0xc
++#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK4__SHIFT 0x10
++#define GCEA_ADDRDEC1_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
++#define GCEA_ADDRDEC1_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
++#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
++#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
++#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
++#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
++#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK4_MASK 0x000F0000L
++#define GCEA_ADDRDEC1_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
++#define GCEA_ADDRDEC1_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
++//GCEA_ADDRDEC1_ADDR_SEL_CS23
++#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK0__SHIFT 0x0
++#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK1__SHIFT 0x4
++#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK2__SHIFT 0x8
++#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK3__SHIFT 0xc
++#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK4__SHIFT 0x10
++#define GCEA_ADDRDEC1_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
++#define GCEA_ADDRDEC1_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
++#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
++#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
++#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
++#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
++#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK4_MASK 0x000F0000L
++#define GCEA_ADDRDEC1_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
++#define GCEA_ADDRDEC1_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
++//GCEA_ADDRDEC1_COL_SEL_LO_CS01
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL0__SHIFT 0x0
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL1__SHIFT 0x4
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL2__SHIFT 0x8
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL3__SHIFT 0xc
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL4__SHIFT 0x10
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL5__SHIFT 0x14
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL6__SHIFT 0x18
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
++//GCEA_ADDRDEC1_COL_SEL_LO_CS23
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL0__SHIFT 0x0
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL1__SHIFT 0x4
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL2__SHIFT 0x8
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL3__SHIFT 0xc
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL4__SHIFT 0x10
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL5__SHIFT 0x14
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL6__SHIFT 0x18
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
++#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
++//GCEA_ADDRDEC1_COL_SEL_HI_CS01
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL8__SHIFT 0x0
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL9__SHIFT 0x4
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL10__SHIFT 0x8
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL11__SHIFT 0xc
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL12__SHIFT 0x10
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL13__SHIFT 0x14
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL14__SHIFT 0x18
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
++//GCEA_ADDRDEC1_COL_SEL_HI_CS23
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL8__SHIFT 0x0
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL9__SHIFT 0x4
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL10__SHIFT 0x8
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL11__SHIFT 0xc
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL12__SHIFT 0x10
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL13__SHIFT 0x14
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL14__SHIFT 0x18
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
++#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
++//GCEA_ADDRDEC1_RM_SEL_CS01
++#define GCEA_ADDRDEC1_RM_SEL_CS01__RM0__SHIFT 0x0
++#define GCEA_ADDRDEC1_RM_SEL_CS01__RM1__SHIFT 0x4
++#define GCEA_ADDRDEC1_RM_SEL_CS01__RM2__SHIFT 0x8
++#define GCEA_ADDRDEC1_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
++#define GCEA_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
++#define GCEA_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
++#define GCEA_ADDRDEC1_RM_SEL_CS01__RM0_MASK 0x0000000FL
++#define GCEA_ADDRDEC1_RM_SEL_CS01__RM1_MASK 0x000000F0L
++#define GCEA_ADDRDEC1_RM_SEL_CS01__RM2_MASK 0x00000F00L
++#define GCEA_ADDRDEC1_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
++#define GCEA_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
++#define GCEA_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
++//GCEA_ADDRDEC1_RM_SEL_CS23
++#define GCEA_ADDRDEC1_RM_SEL_CS23__RM0__SHIFT 0x0
++#define GCEA_ADDRDEC1_RM_SEL_CS23__RM1__SHIFT 0x4
++#define GCEA_ADDRDEC1_RM_SEL_CS23__RM2__SHIFT 0x8
++#define GCEA_ADDRDEC1_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
++#define GCEA_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
++#define GCEA_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
++#define GCEA_ADDRDEC1_RM_SEL_CS23__RM0_MASK 0x0000000FL
++#define GCEA_ADDRDEC1_RM_SEL_CS23__RM1_MASK 0x000000F0L
++#define GCEA_ADDRDEC1_RM_SEL_CS23__RM2_MASK 0x00000F00L
++#define GCEA_ADDRDEC1_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
++#define GCEA_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
++#define GCEA_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
++//GCEA_ADDRDEC1_RM_SEL_SECCS01
++#define GCEA_ADDRDEC1_RM_SEL_SECCS01__RM0__SHIFT 0x0
++#define GCEA_ADDRDEC1_RM_SEL_SECCS01__RM1__SHIFT 0x4
++#define GCEA_ADDRDEC1_RM_SEL_SECCS01__RM2__SHIFT 0x8
++#define GCEA_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
++#define GCEA_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
++#define GCEA_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
++#define GCEA_ADDRDEC1_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
++#define GCEA_ADDRDEC1_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
++#define GCEA_ADDRDEC1_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
++#define GCEA_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
++#define GCEA_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
++#define GCEA_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
++//GCEA_ADDRDEC1_RM_SEL_SECCS23
++#define GCEA_ADDRDEC1_RM_SEL_SECCS23__RM0__SHIFT 0x0
++#define GCEA_ADDRDEC1_RM_SEL_SECCS23__RM1__SHIFT 0x4
++#define GCEA_ADDRDEC1_RM_SEL_SECCS23__RM2__SHIFT 0x8
++#define GCEA_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
++#define GCEA_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
++#define GCEA_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
++#define GCEA_ADDRDEC1_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
++#define GCEA_ADDRDEC1_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
++#define GCEA_ADDRDEC1_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
++#define GCEA_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
++#define GCEA_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
++#define GCEA_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
++//GCEA_IO_RD_CLI2GRP_MAP0
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
++#define GCEA_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
++//GCEA_IO_RD_CLI2GRP_MAP1
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
++#define GCEA_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
++//GCEA_IO_WR_CLI2GRP_MAP0
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
++#define GCEA_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
++//GCEA_IO_WR_CLI2GRP_MAP1
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
++#define GCEA_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
++//GCEA_IO_RD_COMBINE_FLUSH
++#define GCEA_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
++#define GCEA_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
++#define GCEA_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
++#define GCEA_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
++#define GCEA_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
++#define GCEA_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
++#define GCEA_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
++#define GCEA_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
++//GCEA_IO_WR_COMBINE_FLUSH
++#define GCEA_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
++#define GCEA_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
++#define GCEA_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
++#define GCEA_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
++#define GCEA_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
++#define GCEA_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
++#define GCEA_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
++#define GCEA_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
++//GCEA_IO_GROUP_BURST
++#define GCEA_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0
++#define GCEA_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8
++#define GCEA_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10
++#define GCEA_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18
++#define GCEA_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL
++#define GCEA_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
++#define GCEA_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
++#define GCEA_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L
++//GCEA_IO_RD_PRI_AGE
++#define GCEA_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
++#define GCEA_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
++#define GCEA_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
++#define GCEA_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
++#define GCEA_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
++#define GCEA_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
++#define GCEA_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
++#define GCEA_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
++#define GCEA_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
++#define GCEA_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
++#define GCEA_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
++#define GCEA_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
++#define GCEA_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
++#define GCEA_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
++#define GCEA_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
++#define GCEA_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
++//GCEA_IO_WR_PRI_AGE
++#define GCEA_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
++#define GCEA_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
++#define GCEA_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
++#define GCEA_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
++#define GCEA_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
++#define GCEA_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
++#define GCEA_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
++#define GCEA_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
++#define GCEA_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
++#define GCEA_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
++#define GCEA_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
++#define GCEA_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
++#define GCEA_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
++#define GCEA_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
++#define GCEA_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
++#define GCEA_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
++//GCEA_IO_RD_PRI_QUEUING
++#define GCEA_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
++#define GCEA_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
++#define GCEA_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
++#define GCEA_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
++#define GCEA_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
++#define GCEA_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
++#define GCEA_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
++#define GCEA_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
++//GCEA_IO_WR_PRI_QUEUING
++#define GCEA_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
++#define GCEA_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
++#define GCEA_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
++#define GCEA_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
++#define GCEA_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
++#define GCEA_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
++#define GCEA_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
++#define GCEA_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
++//GCEA_IO_RD_PRI_FIXED
++#define GCEA_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
++#define GCEA_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
++#define GCEA_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
++#define GCEA_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
++#define GCEA_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
++#define GCEA_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
++#define GCEA_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
++#define GCEA_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
++//GCEA_IO_WR_PRI_FIXED
++#define GCEA_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
++#define GCEA_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
++#define GCEA_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
++#define GCEA_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
++#define GCEA_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
++#define GCEA_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
++#define GCEA_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
++#define GCEA_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
++//GCEA_IO_RD_PRI_URGENCY
++#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
++#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
++#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
++#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
++#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
++#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
++#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
++#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
++#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
++#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
++#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
++#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
++#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
++#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
++#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
++#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
++//GCEA_IO_WR_PRI_URGENCY
++#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
++#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
++#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
++#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
++#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
++#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
++#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
++#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
++#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
++#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
++#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
++#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
++#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
++#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
++#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
++#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
++//GCEA_IO_RD_PRI_URGENCY_MASK
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID0_MASK__SHIFT 0x0
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID1_MASK__SHIFT 0x1
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID2_MASK__SHIFT 0x2
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID3_MASK__SHIFT 0x3
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID4_MASK__SHIFT 0x4
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID5_MASK__SHIFT 0x5
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID6_MASK__SHIFT 0x6
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID7_MASK__SHIFT 0x7
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID8_MASK__SHIFT 0x8
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID9_MASK__SHIFT 0x9
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID10_MASK__SHIFT 0xa
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID11_MASK__SHIFT 0xb
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID12_MASK__SHIFT 0xc
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID13_MASK__SHIFT 0xd
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID14_MASK__SHIFT 0xe
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID15_MASK__SHIFT 0xf
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID16_MASK__SHIFT 0x10
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID17_MASK__SHIFT 0x11
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID18_MASK__SHIFT 0x12
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID19_MASK__SHIFT 0x13
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID20_MASK__SHIFT 0x14
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID21_MASK__SHIFT 0x15
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID22_MASK__SHIFT 0x16
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID23_MASK__SHIFT 0x17
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID24_MASK__SHIFT 0x18
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID25_MASK__SHIFT 0x19
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID26_MASK__SHIFT 0x1a
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID27_MASK__SHIFT 0x1b
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID28_MASK__SHIFT 0x1c
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID29_MASK__SHIFT 0x1d
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID30_MASK__SHIFT 0x1e
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID31_MASK__SHIFT 0x1f
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID0_MASK_MASK 0x00000001L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID1_MASK_MASK 0x00000002L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID2_MASK_MASK 0x00000004L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID3_MASK_MASK 0x00000008L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID4_MASK_MASK 0x00000010L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID5_MASK_MASK 0x00000020L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID6_MASK_MASK 0x00000040L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID7_MASK_MASK 0x00000080L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID8_MASK_MASK 0x00000100L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID9_MASK_MASK 0x00000200L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID10_MASK_MASK 0x00000400L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID11_MASK_MASK 0x00000800L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID12_MASK_MASK 0x00001000L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID13_MASK_MASK 0x00002000L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID14_MASK_MASK 0x00004000L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID15_MASK_MASK 0x00008000L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID16_MASK_MASK 0x00010000L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID17_MASK_MASK 0x00020000L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID18_MASK_MASK 0x00040000L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID19_MASK_MASK 0x00080000L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID20_MASK_MASK 0x00100000L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID21_MASK_MASK 0x00200000L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID22_MASK_MASK 0x00400000L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID23_MASK_MASK 0x00800000L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID24_MASK_MASK 0x01000000L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID25_MASK_MASK 0x02000000L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID26_MASK_MASK 0x04000000L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID27_MASK_MASK 0x08000000L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID28_MASK_MASK 0x10000000L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID29_MASK_MASK 0x20000000L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID30_MASK_MASK 0x40000000L
++#define GCEA_IO_RD_PRI_URGENCY_MASK__CID31_MASK_MASK 0x80000000L
++//GCEA_IO_WR_PRI_URGENCY_MASK
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID0_MASK__SHIFT 0x0
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID1_MASK__SHIFT 0x1
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID2_MASK__SHIFT 0x2
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID3_MASK__SHIFT 0x3
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID4_MASK__SHIFT 0x4
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID5_MASK__SHIFT 0x5
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID6_MASK__SHIFT 0x6
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID7_MASK__SHIFT 0x7
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID8_MASK__SHIFT 0x8
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID9_MASK__SHIFT 0x9
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID10_MASK__SHIFT 0xa
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID11_MASK__SHIFT 0xb
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID12_MASK__SHIFT 0xc
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID13_MASK__SHIFT 0xd
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID14_MASK__SHIFT 0xe
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID15_MASK__SHIFT 0xf
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID16_MASK__SHIFT 0x10
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID17_MASK__SHIFT 0x11
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID18_MASK__SHIFT 0x12
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID19_MASK__SHIFT 0x13
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID20_MASK__SHIFT 0x14
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID21_MASK__SHIFT 0x15
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID22_MASK__SHIFT 0x16
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID23_MASK__SHIFT 0x17
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID24_MASK__SHIFT 0x18
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID25_MASK__SHIFT 0x19
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID26_MASK__SHIFT 0x1a
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID27_MASK__SHIFT 0x1b
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID28_MASK__SHIFT 0x1c
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID29_MASK__SHIFT 0x1d
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID30_MASK__SHIFT 0x1e
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID31_MASK__SHIFT 0x1f
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID0_MASK_MASK 0x00000001L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID1_MASK_MASK 0x00000002L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID2_MASK_MASK 0x00000004L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID3_MASK_MASK 0x00000008L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID4_MASK_MASK 0x00000010L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID5_MASK_MASK 0x00000020L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID6_MASK_MASK 0x00000040L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID7_MASK_MASK 0x00000080L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID8_MASK_MASK 0x00000100L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID9_MASK_MASK 0x00000200L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID10_MASK_MASK 0x00000400L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID11_MASK_MASK 0x00000800L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID12_MASK_MASK 0x00001000L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID13_MASK_MASK 0x00002000L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID14_MASK_MASK 0x00004000L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID15_MASK_MASK 0x00008000L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID16_MASK_MASK 0x00010000L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID17_MASK_MASK 0x00020000L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID18_MASK_MASK 0x00040000L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID19_MASK_MASK 0x00080000L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID20_MASK_MASK 0x00100000L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID21_MASK_MASK 0x00200000L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID22_MASK_MASK 0x00400000L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID23_MASK_MASK 0x00800000L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID24_MASK_MASK 0x01000000L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID25_MASK_MASK 0x02000000L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID26_MASK_MASK 0x04000000L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID27_MASK_MASK 0x08000000L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID28_MASK_MASK 0x10000000L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID29_MASK_MASK 0x20000000L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID30_MASK_MASK 0x40000000L
++#define GCEA_IO_WR_PRI_URGENCY_MASK__CID31_MASK_MASK 0x80000000L
++//GCEA_IO_RD_PRI_QUANT_PRI1
++#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
++#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
++#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
++#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
++#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
++#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
++#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
++#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
++//GCEA_IO_RD_PRI_QUANT_PRI2
++#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
++#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
++#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
++#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
++#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
++#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
++#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
++#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
++//GCEA_IO_RD_PRI_QUANT_PRI3
++#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
++#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
++#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
++#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
++#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
++#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
++#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
++#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
++//GCEA_IO_WR_PRI_QUANT_PRI1
++#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
++#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
++#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
++#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
++#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
++#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
++#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
++#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
++//GCEA_IO_WR_PRI_QUANT_PRI2
++#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
++#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
++#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
++#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
++#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
++#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
++#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
++#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
++//GCEA_IO_WR_PRI_QUANT_PRI3
++#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
++#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
++#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
++#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
++#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
++#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
++#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
++#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
++//GCEA_SDP_ARB_DRAM
++#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
++#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
++#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10
++#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11
++#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12
++#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13
++#define GCEA_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14
++#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
++#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
++#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
++#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
++#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L
++#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L
++#define GCEA_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L
++//GCEA_SDP_ARB_FINAL
++#define GCEA_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0
++#define GCEA_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5
++#define GCEA_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
++#define GCEA_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
++#define GCEA_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11
++#define GCEA_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12
++#define GCEA_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13
++#define GCEA_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14
++#define GCEA_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15
++#define GCEA_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16
++#define GCEA_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17
++#define GCEA_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18
++#define GCEA_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19
++#define GCEA_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a
++#define GCEA_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL
++#define GCEA_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L
++#define GCEA_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
++#define GCEA_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
++#define GCEA_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L
++#define GCEA_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L
++#define GCEA_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L
++#define GCEA_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L
++#define GCEA_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L
++#define GCEA_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L
++#define GCEA_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L
++#define GCEA_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L
++#define GCEA_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L
++#define GCEA_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L
++//GCEA_SDP_DRAM_PRIORITY
++#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
++#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
++#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
++#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
++#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
++#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
++#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
++#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
++#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
++#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
++#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
++#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
++#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
++#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
++#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
++#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
++//GCEA_SDP_IO_PRIORITY
++#define GCEA_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
++#define GCEA_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
++#define GCEA_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
++#define GCEA_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
++#define GCEA_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
++#define GCEA_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
++#define GCEA_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
++#define GCEA_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
++#define GCEA_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
++#define GCEA_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
++#define GCEA_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
++#define GCEA_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
++#define GCEA_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
++#define GCEA_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
++#define GCEA_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
++#define GCEA_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
++//GCEA_SDP_CREDITS
++#define GCEA_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0
++#define GCEA_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8
++#define GCEA_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10
++#define GCEA_SDP_CREDITS__PRB_REQ_CREDITS__SHIFT 0x18
++#define GCEA_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL
++#define GCEA_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L
++#define GCEA_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L
++#define GCEA_SDP_CREDITS__PRB_REQ_CREDITS_MASK 0x3F000000L
++//GCEA_SDP_TAG_RESERVE0
++#define GCEA_SDP_TAG_RESERVE0__VC0__SHIFT 0x0
++#define GCEA_SDP_TAG_RESERVE0__VC1__SHIFT 0x8
++#define GCEA_SDP_TAG_RESERVE0__VC2__SHIFT 0x10
++#define GCEA_SDP_TAG_RESERVE0__VC3__SHIFT 0x18
++#define GCEA_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL
++#define GCEA_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L
++#define GCEA_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L
++#define GCEA_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L
++//GCEA_SDP_TAG_RESERVE1
++#define GCEA_SDP_TAG_RESERVE1__VC4__SHIFT 0x0
++#define GCEA_SDP_TAG_RESERVE1__VC5__SHIFT 0x8
++#define GCEA_SDP_TAG_RESERVE1__VC6__SHIFT 0x10
++#define GCEA_SDP_TAG_RESERVE1__VC7__SHIFT 0x18
++#define GCEA_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL
++#define GCEA_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L
++#define GCEA_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L
++#define GCEA_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L
++//GCEA_SDP_VCC_RESERVE0
++#define GCEA_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0
++#define GCEA_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6
++#define GCEA_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc
++#define GCEA_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12
++#define GCEA_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18
++#define GCEA_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
++#define GCEA_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
++#define GCEA_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
++#define GCEA_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
++#define GCEA_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
++//GCEA_SDP_VCC_RESERVE1
++#define GCEA_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0
++#define GCEA_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6
++#define GCEA_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc
++#define GCEA_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
++#define GCEA_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
++#define GCEA_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
++#define GCEA_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
++#define GCEA_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
++//GCEA_SDP_VCD_RESERVE0
++#define GCEA_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0
++#define GCEA_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6
++#define GCEA_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc
++#define GCEA_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12
++#define GCEA_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18
++#define GCEA_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
++#define GCEA_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
++#define GCEA_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
++#define GCEA_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
++#define GCEA_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
++//GCEA_SDP_VCD_RESERVE1
++#define GCEA_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0
++#define GCEA_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6
++#define GCEA_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc
++#define GCEA_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
++#define GCEA_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
++#define GCEA_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
++#define GCEA_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
++#define GCEA_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
++//GCEA_SDP_REQ_CNTL
++#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0
++#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1
++#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2
++#define GCEA_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3
++#define GCEA_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x4
++#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L
++#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L
++#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L
++#define GCEA_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L
++#define GCEA_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000010L
++//GCEA_MISC
++#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0
++#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1
++#define GCEA_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2
++#define GCEA_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3
++#define GCEA_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4
++#define GCEA_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5
++#define GCEA_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6
++#define GCEA_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7
++#define GCEA_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8
++#define GCEA_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9
++#define GCEA_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa
++#define GCEA_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb
++#define GCEA_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc
++#define GCEA_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd
++#define GCEA_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe
++#define GCEA_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf
++#define GCEA_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11
++#define GCEA_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13
++#define GCEA_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15
++#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a
++#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b
++#define GCEA_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c
++#define GCEA_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d
++#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e
++#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f
++#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L
++#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L
++#define GCEA_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L
++#define GCEA_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L
++#define GCEA_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L
++#define GCEA_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L
++#define GCEA_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L
++#define GCEA_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L
++#define GCEA_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L
++#define GCEA_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L
++#define GCEA_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L
++#define GCEA_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L
++#define GCEA_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L
++#define GCEA_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L
++#define GCEA_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L
++#define GCEA_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L
++#define GCEA_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L
++#define GCEA_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L
++#define GCEA_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L
++#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L
++#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L
++#define GCEA_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L
++#define GCEA_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L
++#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L
++#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L
++//GCEA_LATENCY_SAMPLING
++#define GCEA_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0
++#define GCEA_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1
++#define GCEA_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2
++#define GCEA_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3
++#define GCEA_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4
++#define GCEA_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5
++#define GCEA_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6
++#define GCEA_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7
++#define GCEA_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8
++#define GCEA_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9
++#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa
++#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb
++#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc
++#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd
++#define GCEA_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe
++#define GCEA_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16
++#define GCEA_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L
++#define GCEA_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L
++#define GCEA_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L
++#define GCEA_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L
++#define GCEA_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L
++#define GCEA_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L
++#define GCEA_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L
++#define GCEA_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L
++#define GCEA_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L
++#define GCEA_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L
++#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L
++#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L
++#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L
++#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L
++#define GCEA_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L
++#define GCEA_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L
++//GCEA_PERFCOUNTER_LO
++#define GCEA_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
++#define GCEA_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
++//GCEA_PERFCOUNTER_HI
++#define GCEA_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
++#define GCEA_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
++#define GCEA_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
++#define GCEA_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
++//GCEA_PERFCOUNTER0_CFG
++#define GCEA_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
++#define GCEA_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
++#define GCEA_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
++#define GCEA_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
++#define GCEA_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
++#define GCEA_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
++#define GCEA_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
++#define GCEA_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
++#define GCEA_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
++#define GCEA_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
++//GCEA_PERFCOUNTER1_CFG
++#define GCEA_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
++#define GCEA_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
++#define GCEA_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
++#define GCEA_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
++#define GCEA_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
++#define GCEA_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
++#define GCEA_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
++#define GCEA_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
++#define GCEA_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
++#define GCEA_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
++//GCEA_PERFCOUNTER_RSLT_CNTL
++#define GCEA_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
++#define GCEA_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
++#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
++#define GCEA_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
++#define GCEA_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
++#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
++#define GCEA_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
++#define GCEA_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
++#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
++#define GCEA_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
++#define GCEA_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
++#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
++
++
++// addressBlock: gc_tcdec
++//TCP_INVALIDATE
++#define TCP_INVALIDATE__START__SHIFT 0x0
++#define TCP_INVALIDATE__START_MASK 0x00000001L
++//TCP_STATUS
++#define TCP_STATUS__TCP_BUSY__SHIFT 0x0
++#define TCP_STATUS__INPUT_BUSY__SHIFT 0x1
++#define TCP_STATUS__ADRS_BUSY__SHIFT 0x2
++#define TCP_STATUS__TAGRAMS_BUSY__SHIFT 0x3
++#define TCP_STATUS__CNTRL_BUSY__SHIFT 0x4
++#define TCP_STATUS__LFIFO_BUSY__SHIFT 0x5
++#define TCP_STATUS__READ_BUSY__SHIFT 0x6
++#define TCP_STATUS__FORMAT_BUSY__SHIFT 0x7
++#define TCP_STATUS__VM_BUSY__SHIFT 0x8
++#define TCP_STATUS__TCP_BUSY_MASK 0x00000001L
++#define TCP_STATUS__INPUT_BUSY_MASK 0x00000002L
++#define TCP_STATUS__ADRS_BUSY_MASK 0x00000004L
++#define TCP_STATUS__TAGRAMS_BUSY_MASK 0x00000008L
++#define TCP_STATUS__CNTRL_BUSY_MASK 0x00000010L
++#define TCP_STATUS__LFIFO_BUSY_MASK 0x00000020L
++#define TCP_STATUS__READ_BUSY_MASK 0x00000040L
++#define TCP_STATUS__FORMAT_BUSY_MASK 0x00000080L
++#define TCP_STATUS__VM_BUSY_MASK 0x00000100L
++//TCP_CNTL
++#define TCP_CNTL__FORCE_HIT__SHIFT 0x0
++#define TCP_CNTL__FORCE_MISS__SHIFT 0x1
++#define TCP_CNTL__L1_SIZE__SHIFT 0x2
++#define TCP_CNTL__FLAT_BUF_HASH_ENABLE__SHIFT 0x4
++#define TCP_CNTL__FLAT_BUF_CACHE_SWIZZLE__SHIFT 0x5
++#define TCP_CNTL__FORCE_EOW_TOTAL_CNT__SHIFT 0xf
++#define TCP_CNTL__FORCE_EOW_TAGRAM_CNT__SHIFT 0x16
++#define TCP_CNTL__DISABLE_Z_MAP__SHIFT 0x1c
++#define TCP_CNTL__INV_ALL_VMIDS__SHIFT 0x1d
++#define TCP_CNTL__ASTC_VE_MSB_TOLERANT__SHIFT 0x1e
++#define TCP_CNTL__FORCE_HIT_MASK 0x00000001L
++#define TCP_CNTL__FORCE_MISS_MASK 0x00000002L
++#define TCP_CNTL__L1_SIZE_MASK 0x0000000CL
++#define TCP_CNTL__FLAT_BUF_HASH_ENABLE_MASK 0x00000010L
++#define TCP_CNTL__FLAT_BUF_CACHE_SWIZZLE_MASK 0x00000020L
++#define TCP_CNTL__FORCE_EOW_TOTAL_CNT_MASK 0x001F8000L
++#define TCP_CNTL__FORCE_EOW_TAGRAM_CNT_MASK 0x0FC00000L
++#define TCP_CNTL__DISABLE_Z_MAP_MASK 0x10000000L
++#define TCP_CNTL__INV_ALL_VMIDS_MASK 0x20000000L
++#define TCP_CNTL__ASTC_VE_MSB_TOLERANT_MASK 0x40000000L
++//TCP_CHAN_STEER_LO
++#define TCP_CHAN_STEER_LO__CHAN0__SHIFT 0x0
++#define TCP_CHAN_STEER_LO__CHAN1__SHIFT 0x4
++#define TCP_CHAN_STEER_LO__CHAN2__SHIFT 0x8
++#define TCP_CHAN_STEER_LO__CHAN3__SHIFT 0xc
++#define TCP_CHAN_STEER_LO__CHAN4__SHIFT 0x10
++#define TCP_CHAN_STEER_LO__CHAN5__SHIFT 0x14
++#define TCP_CHAN_STEER_LO__CHAN6__SHIFT 0x18
++#define TCP_CHAN_STEER_LO__CHAN7__SHIFT 0x1c
++#define TCP_CHAN_STEER_LO__CHAN0_MASK 0x0000000FL
++#define TCP_CHAN_STEER_LO__CHAN1_MASK 0x000000F0L
++#define TCP_CHAN_STEER_LO__CHAN2_MASK 0x00000F00L
++#define TCP_CHAN_STEER_LO__CHAN3_MASK 0x0000F000L
++#define TCP_CHAN_STEER_LO__CHAN4_MASK 0x000F0000L
++#define TCP_CHAN_STEER_LO__CHAN5_MASK 0x00F00000L
++#define TCP_CHAN_STEER_LO__CHAN6_MASK 0x0F000000L
++#define TCP_CHAN_STEER_LO__CHAN7_MASK 0xF0000000L
++//TCP_CHAN_STEER_HI
++#define TCP_CHAN_STEER_HI__CHAN8__SHIFT 0x0
++#define TCP_CHAN_STEER_HI__CHAN9__SHIFT 0x4
++#define TCP_CHAN_STEER_HI__CHANA__SHIFT 0x8
++#define TCP_CHAN_STEER_HI__CHANB__SHIFT 0xc
++#define TCP_CHAN_STEER_HI__CHANC__SHIFT 0x10
++#define TCP_CHAN_STEER_HI__CHAND__SHIFT 0x14
++#define TCP_CHAN_STEER_HI__CHANE__SHIFT 0x18
++#define TCP_CHAN_STEER_HI__CHANF__SHIFT 0x1c
++#define TCP_CHAN_STEER_HI__CHAN8_MASK 0x0000000FL
++#define TCP_CHAN_STEER_HI__CHAN9_MASK 0x000000F0L
++#define TCP_CHAN_STEER_HI__CHANA_MASK 0x00000F00L
++#define TCP_CHAN_STEER_HI__CHANB_MASK 0x0000F000L
++#define TCP_CHAN_STEER_HI__CHANC_MASK 0x000F0000L
++#define TCP_CHAN_STEER_HI__CHAND_MASK 0x00F00000L
++#define TCP_CHAN_STEER_HI__CHANE_MASK 0x0F000000L
++#define TCP_CHAN_STEER_HI__CHANF_MASK 0xF0000000L
++//TCP_ADDR_CONFIG
++#define TCP_ADDR_CONFIG__NUM_TCC_BANKS__SHIFT 0x0
++#define TCP_ADDR_CONFIG__NUM_BANKS__SHIFT 0x4
++#define TCP_ADDR_CONFIG__COLHI_WIDTH__SHIFT 0x6
++#define TCP_ADDR_CONFIG__RB_SPLIT_COLHI__SHIFT 0x9
++#define TCP_ADDR_CONFIG__NUM_TCC_BANKS_MASK 0x0000000FL
++#define TCP_ADDR_CONFIG__NUM_BANKS_MASK 0x00000030L
++#define TCP_ADDR_CONFIG__COLHI_WIDTH_MASK 0x000001C0L
++#define TCP_ADDR_CONFIG__RB_SPLIT_COLHI_MASK 0x00000200L
++//TCP_CREDIT
++#define TCP_CREDIT__LFIFO_CREDIT__SHIFT 0x0
++#define TCP_CREDIT__REQ_FIFO_CREDIT__SHIFT 0x10
++#define TCP_CREDIT__TD_CREDIT__SHIFT 0x1d
++#define TCP_CREDIT__LFIFO_CREDIT_MASK 0x000003FFL
++#define TCP_CREDIT__REQ_FIFO_CREDIT_MASK 0x007F0000L
++#define TCP_CREDIT__TD_CREDIT_MASK 0xE0000000L
++//TCP_BUFFER_ADDR_HASH_CNTL
++#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_BITS__SHIFT 0x0
++#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_BITS__SHIFT 0x8
++#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_XOR_COUNT__SHIFT 0x10
++#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_XOR_COUNT__SHIFT 0x18
++#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_BITS_MASK 0x00000007L
++#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_BITS_MASK 0x00000700L
++#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_XOR_COUNT_MASK 0x00070000L
++#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_XOR_COUNT_MASK 0x07000000L
++//TCP_EDC_CNT
++#define TCP_EDC_CNT__SEC_COUNT__SHIFT 0x0
++#define TCP_EDC_CNT__LFIFO_SED_COUNT__SHIFT 0x8
++#define TCP_EDC_CNT__DED_COUNT__SHIFT 0x10
++#define TCP_EDC_CNT__SEC_COUNT_MASK 0x000000FFL
++#define TCP_EDC_CNT__LFIFO_SED_COUNT_MASK 0x0000FF00L
++#define TCP_EDC_CNT__DED_COUNT_MASK 0x00FF0000L
++//TC_CFG_L1_LOAD_POLICY0
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_0__SHIFT 0x0
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_1__SHIFT 0x2
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_2__SHIFT 0x4
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_3__SHIFT 0x6
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_4__SHIFT 0x8
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_5__SHIFT 0xa
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_6__SHIFT 0xc
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_7__SHIFT 0xe
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_8__SHIFT 0x10
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_9__SHIFT 0x12
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_10__SHIFT 0x14
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_11__SHIFT 0x16
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_12__SHIFT 0x18
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_13__SHIFT 0x1a
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_14__SHIFT 0x1c
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_15__SHIFT 0x1e
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_0_MASK 0x00000003L
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_1_MASK 0x0000000CL
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_2_MASK 0x00000030L
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_3_MASK 0x000000C0L
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_4_MASK 0x00000300L
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_5_MASK 0x00000C00L
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_6_MASK 0x00003000L
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_7_MASK 0x0000C000L
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_8_MASK 0x00030000L
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_9_MASK 0x000C0000L
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_10_MASK 0x00300000L
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_11_MASK 0x00C00000L
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_12_MASK 0x03000000L
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_13_MASK 0x0C000000L
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_14_MASK 0x30000000L
++#define TC_CFG_L1_LOAD_POLICY0__POLICY_15_MASK 0xC0000000L
++//TC_CFG_L1_LOAD_POLICY1
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_16__SHIFT 0x0
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_17__SHIFT 0x2
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_18__SHIFT 0x4
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_19__SHIFT 0x6
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_20__SHIFT 0x8
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_21__SHIFT 0xa
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_22__SHIFT 0xc
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_23__SHIFT 0xe
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_24__SHIFT 0x10
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_25__SHIFT 0x12
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_26__SHIFT 0x14
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_27__SHIFT 0x16
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_28__SHIFT 0x18
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_29__SHIFT 0x1a
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_30__SHIFT 0x1c
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_31__SHIFT 0x1e
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_16_MASK 0x00000003L
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_17_MASK 0x0000000CL
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_18_MASK 0x00000030L
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_19_MASK 0x000000C0L
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_20_MASK 0x00000300L
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_21_MASK 0x00000C00L
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_22_MASK 0x00003000L
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_23_MASK 0x0000C000L
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_24_MASK 0x00030000L
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_25_MASK 0x000C0000L
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_26_MASK 0x00300000L
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_27_MASK 0x00C00000L
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_28_MASK 0x03000000L
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_29_MASK 0x0C000000L
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_30_MASK 0x30000000L
++#define TC_CFG_L1_LOAD_POLICY1__POLICY_31_MASK 0xC0000000L
++//TC_CFG_L1_STORE_POLICY
++#define TC_CFG_L1_STORE_POLICY__POLICY_0__SHIFT 0x0
++#define TC_CFG_L1_STORE_POLICY__POLICY_1__SHIFT 0x1
++#define TC_CFG_L1_STORE_POLICY__POLICY_2__SHIFT 0x2
++#define TC_CFG_L1_STORE_POLICY__POLICY_3__SHIFT 0x3
++#define TC_CFG_L1_STORE_POLICY__POLICY_4__SHIFT 0x4
++#define TC_CFG_L1_STORE_POLICY__POLICY_5__SHIFT 0x5
++#define TC_CFG_L1_STORE_POLICY__POLICY_6__SHIFT 0x6
++#define TC_CFG_L1_STORE_POLICY__POLICY_7__SHIFT 0x7
++#define TC_CFG_L1_STORE_POLICY__POLICY_8__SHIFT 0x8
++#define TC_CFG_L1_STORE_POLICY__POLICY_9__SHIFT 0x9
++#define TC_CFG_L1_STORE_POLICY__POLICY_10__SHIFT 0xa
++#define TC_CFG_L1_STORE_POLICY__POLICY_11__SHIFT 0xb
++#define TC_CFG_L1_STORE_POLICY__POLICY_12__SHIFT 0xc
++#define TC_CFG_L1_STORE_POLICY__POLICY_13__SHIFT 0xd
++#define TC_CFG_L1_STORE_POLICY__POLICY_14__SHIFT 0xe
++#define TC_CFG_L1_STORE_POLICY__POLICY_15__SHIFT 0xf
++#define TC_CFG_L1_STORE_POLICY__POLICY_16__SHIFT 0x10
++#define TC_CFG_L1_STORE_POLICY__POLICY_17__SHIFT 0x11
++#define TC_CFG_L1_STORE_POLICY__POLICY_18__SHIFT 0x12
++#define TC_CFG_L1_STORE_POLICY__POLICY_19__SHIFT 0x13
++#define TC_CFG_L1_STORE_POLICY__POLICY_20__SHIFT 0x14
++#define TC_CFG_L1_STORE_POLICY__POLICY_21__SHIFT 0x15
++#define TC_CFG_L1_STORE_POLICY__POLICY_22__SHIFT 0x16
++#define TC_CFG_L1_STORE_POLICY__POLICY_23__SHIFT 0x17
++#define TC_CFG_L1_STORE_POLICY__POLICY_24__SHIFT 0x18
++#define TC_CFG_L1_STORE_POLICY__POLICY_25__SHIFT 0x19
++#define TC_CFG_L1_STORE_POLICY__POLICY_26__SHIFT 0x1a
++#define TC_CFG_L1_STORE_POLICY__POLICY_27__SHIFT 0x1b
++#define TC_CFG_L1_STORE_POLICY__POLICY_28__SHIFT 0x1c
++#define TC_CFG_L1_STORE_POLICY__POLICY_29__SHIFT 0x1d
++#define TC_CFG_L1_STORE_POLICY__POLICY_30__SHIFT 0x1e
++#define TC_CFG_L1_STORE_POLICY__POLICY_31__SHIFT 0x1f
++#define TC_CFG_L1_STORE_POLICY__POLICY_0_MASK 0x00000001L
++#define TC_CFG_L1_STORE_POLICY__POLICY_1_MASK 0x00000002L
++#define TC_CFG_L1_STORE_POLICY__POLICY_2_MASK 0x00000004L
++#define TC_CFG_L1_STORE_POLICY__POLICY_3_MASK 0x00000008L
++#define TC_CFG_L1_STORE_POLICY__POLICY_4_MASK 0x00000010L
++#define TC_CFG_L1_STORE_POLICY__POLICY_5_MASK 0x00000020L
++#define TC_CFG_L1_STORE_POLICY__POLICY_6_MASK 0x00000040L
++#define TC_CFG_L1_STORE_POLICY__POLICY_7_MASK 0x00000080L
++#define TC_CFG_L1_STORE_POLICY__POLICY_8_MASK 0x00000100L
++#define TC_CFG_L1_STORE_POLICY__POLICY_9_MASK 0x00000200L
++#define TC_CFG_L1_STORE_POLICY__POLICY_10_MASK 0x00000400L
++#define TC_CFG_L1_STORE_POLICY__POLICY_11_MASK 0x00000800L
++#define TC_CFG_L1_STORE_POLICY__POLICY_12_MASK 0x00001000L
++#define TC_CFG_L1_STORE_POLICY__POLICY_13_MASK 0x00002000L
++#define TC_CFG_L1_STORE_POLICY__POLICY_14_MASK 0x00004000L
++#define TC_CFG_L1_STORE_POLICY__POLICY_15_MASK 0x00008000L
++#define TC_CFG_L1_STORE_POLICY__POLICY_16_MASK 0x00010000L
++#define TC_CFG_L1_STORE_POLICY__POLICY_17_MASK 0x00020000L
++#define TC_CFG_L1_STORE_POLICY__POLICY_18_MASK 0x00040000L
++#define TC_CFG_L1_STORE_POLICY__POLICY_19_MASK 0x00080000L
++#define TC_CFG_L1_STORE_POLICY__POLICY_20_MASK 0x00100000L
++#define TC_CFG_L1_STORE_POLICY__POLICY_21_MASK 0x00200000L
++#define TC_CFG_L1_STORE_POLICY__POLICY_22_MASK 0x00400000L
++#define TC_CFG_L1_STORE_POLICY__POLICY_23_MASK 0x00800000L
++#define TC_CFG_L1_STORE_POLICY__POLICY_24_MASK 0x01000000L
++#define TC_CFG_L1_STORE_POLICY__POLICY_25_MASK 0x02000000L
++#define TC_CFG_L1_STORE_POLICY__POLICY_26_MASK 0x04000000L
++#define TC_CFG_L1_STORE_POLICY__POLICY_27_MASK 0x08000000L
++#define TC_CFG_L1_STORE_POLICY__POLICY_28_MASK 0x10000000L
++#define TC_CFG_L1_STORE_POLICY__POLICY_29_MASK 0x20000000L
++#define TC_CFG_L1_STORE_POLICY__POLICY_30_MASK 0x40000000L
++#define TC_CFG_L1_STORE_POLICY__POLICY_31_MASK 0x80000000L
++//TC_CFG_L2_LOAD_POLICY0
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_0__SHIFT 0x0
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_1__SHIFT 0x2
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_2__SHIFT 0x4
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_3__SHIFT 0x6
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_4__SHIFT 0x8
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_5__SHIFT 0xa
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_6__SHIFT 0xc
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_7__SHIFT 0xe
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_8__SHIFT 0x10
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_9__SHIFT 0x12
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_10__SHIFT 0x14
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_11__SHIFT 0x16
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_12__SHIFT 0x18
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_13__SHIFT 0x1a
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_14__SHIFT 0x1c
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_15__SHIFT 0x1e
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_0_MASK 0x00000003L
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_1_MASK 0x0000000CL
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_2_MASK 0x00000030L
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_3_MASK 0x000000C0L
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_4_MASK 0x00000300L
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_5_MASK 0x00000C00L
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_6_MASK 0x00003000L
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_7_MASK 0x0000C000L
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_8_MASK 0x00030000L
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_9_MASK 0x000C0000L
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_10_MASK 0x00300000L
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_11_MASK 0x00C00000L
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_12_MASK 0x03000000L
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_13_MASK 0x0C000000L
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_14_MASK 0x30000000L
++#define TC_CFG_L2_LOAD_POLICY0__POLICY_15_MASK 0xC0000000L
++//TC_CFG_L2_LOAD_POLICY1
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_16__SHIFT 0x0
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_17__SHIFT 0x2
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_18__SHIFT 0x4
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_19__SHIFT 0x6
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_20__SHIFT 0x8
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_21__SHIFT 0xa
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_22__SHIFT 0xc
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_23__SHIFT 0xe
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_24__SHIFT 0x10
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_25__SHIFT 0x12
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_26__SHIFT 0x14
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_27__SHIFT 0x16
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_28__SHIFT 0x18
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_29__SHIFT 0x1a
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_30__SHIFT 0x1c
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_31__SHIFT 0x1e
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_16_MASK 0x00000003L
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_17_MASK 0x0000000CL
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_18_MASK 0x00000030L
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_19_MASK 0x000000C0L
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_20_MASK 0x00000300L
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_21_MASK 0x00000C00L
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_22_MASK 0x00003000L
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_23_MASK 0x0000C000L
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_24_MASK 0x00030000L
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_25_MASK 0x000C0000L
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_26_MASK 0x00300000L
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_27_MASK 0x00C00000L
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_28_MASK 0x03000000L
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_29_MASK 0x0C000000L
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_30_MASK 0x30000000L
++#define TC_CFG_L2_LOAD_POLICY1__POLICY_31_MASK 0xC0000000L
++//TC_CFG_L2_STORE_POLICY0
++#define TC_CFG_L2_STORE_POLICY0__POLICY_0__SHIFT 0x0
++#define TC_CFG_L2_STORE_POLICY0__POLICY_1__SHIFT 0x2
++#define TC_CFG_L2_STORE_POLICY0__POLICY_2__SHIFT 0x4
++#define TC_CFG_L2_STORE_POLICY0__POLICY_3__SHIFT 0x6
++#define TC_CFG_L2_STORE_POLICY0__POLICY_4__SHIFT 0x8
++#define TC_CFG_L2_STORE_POLICY0__POLICY_5__SHIFT 0xa
++#define TC_CFG_L2_STORE_POLICY0__POLICY_6__SHIFT 0xc
++#define TC_CFG_L2_STORE_POLICY0__POLICY_7__SHIFT 0xe
++#define TC_CFG_L2_STORE_POLICY0__POLICY_8__SHIFT 0x10
++#define TC_CFG_L2_STORE_POLICY0__POLICY_9__SHIFT 0x12
++#define TC_CFG_L2_STORE_POLICY0__POLICY_10__SHIFT 0x14
++#define TC_CFG_L2_STORE_POLICY0__POLICY_11__SHIFT 0x16
++#define TC_CFG_L2_STORE_POLICY0__POLICY_12__SHIFT 0x18
++#define TC_CFG_L2_STORE_POLICY0__POLICY_13__SHIFT 0x1a
++#define TC_CFG_L2_STORE_POLICY0__POLICY_14__SHIFT 0x1c
++#define TC_CFG_L2_STORE_POLICY0__POLICY_15__SHIFT 0x1e
++#define TC_CFG_L2_STORE_POLICY0__POLICY_0_MASK 0x00000003L
++#define TC_CFG_L2_STORE_POLICY0__POLICY_1_MASK 0x0000000CL
++#define TC_CFG_L2_STORE_POLICY0__POLICY_2_MASK 0x00000030L
++#define TC_CFG_L2_STORE_POLICY0__POLICY_3_MASK 0x000000C0L
++#define TC_CFG_L2_STORE_POLICY0__POLICY_4_MASK 0x00000300L
++#define TC_CFG_L2_STORE_POLICY0__POLICY_5_MASK 0x00000C00L
++#define TC_CFG_L2_STORE_POLICY0__POLICY_6_MASK 0x00003000L
++#define TC_CFG_L2_STORE_POLICY0__POLICY_7_MASK 0x0000C000L
++#define TC_CFG_L2_STORE_POLICY0__POLICY_8_MASK 0x00030000L
++#define TC_CFG_L2_STORE_POLICY0__POLICY_9_MASK 0x000C0000L
++#define TC_CFG_L2_STORE_POLICY0__POLICY_10_MASK 0x00300000L
++#define TC_CFG_L2_STORE_POLICY0__POLICY_11_MASK 0x00C00000L
++#define TC_CFG_L2_STORE_POLICY0__POLICY_12_MASK 0x03000000L
++#define TC_CFG_L2_STORE_POLICY0__POLICY_13_MASK 0x0C000000L
++#define TC_CFG_L2_STORE_POLICY0__POLICY_14_MASK 0x30000000L
++#define TC_CFG_L2_STORE_POLICY0__POLICY_15_MASK 0xC0000000L
++//TC_CFG_L2_STORE_POLICY1
++#define TC_CFG_L2_STORE_POLICY1__POLICY_16__SHIFT 0x0
++#define TC_CFG_L2_STORE_POLICY1__POLICY_17__SHIFT 0x2
++#define TC_CFG_L2_STORE_POLICY1__POLICY_18__SHIFT 0x4
++#define TC_CFG_L2_STORE_POLICY1__POLICY_19__SHIFT 0x6
++#define TC_CFG_L2_STORE_POLICY1__POLICY_20__SHIFT 0x8
++#define TC_CFG_L2_STORE_POLICY1__POLICY_21__SHIFT 0xa
++#define TC_CFG_L2_STORE_POLICY1__POLICY_22__SHIFT 0xc
++#define TC_CFG_L2_STORE_POLICY1__POLICY_23__SHIFT 0xe
++#define TC_CFG_L2_STORE_POLICY1__POLICY_24__SHIFT 0x10
++#define TC_CFG_L2_STORE_POLICY1__POLICY_25__SHIFT 0x12
++#define TC_CFG_L2_STORE_POLICY1__POLICY_26__SHIFT 0x14
++#define TC_CFG_L2_STORE_POLICY1__POLICY_27__SHIFT 0x16
++#define TC_CFG_L2_STORE_POLICY1__POLICY_28__SHIFT 0x18
++#define TC_CFG_L2_STORE_POLICY1__POLICY_29__SHIFT 0x1a
++#define TC_CFG_L2_STORE_POLICY1__POLICY_30__SHIFT 0x1c
++#define TC_CFG_L2_STORE_POLICY1__POLICY_31__SHIFT 0x1e
++#define TC_CFG_L2_STORE_POLICY1__POLICY_16_MASK 0x00000003L
++#define TC_CFG_L2_STORE_POLICY1__POLICY_17_MASK 0x0000000CL
++#define TC_CFG_L2_STORE_POLICY1__POLICY_18_MASK 0x00000030L
++#define TC_CFG_L2_STORE_POLICY1__POLICY_19_MASK 0x000000C0L
++#define TC_CFG_L2_STORE_POLICY1__POLICY_20_MASK 0x00000300L
++#define TC_CFG_L2_STORE_POLICY1__POLICY_21_MASK 0x00000C00L
++#define TC_CFG_L2_STORE_POLICY1__POLICY_22_MASK 0x00003000L
++#define TC_CFG_L2_STORE_POLICY1__POLICY_23_MASK 0x0000C000L
++#define TC_CFG_L2_STORE_POLICY1__POLICY_24_MASK 0x00030000L
++#define TC_CFG_L2_STORE_POLICY1__POLICY_25_MASK 0x000C0000L
++#define TC_CFG_L2_STORE_POLICY1__POLICY_26_MASK 0x00300000L
++#define TC_CFG_L2_STORE_POLICY1__POLICY_27_MASK 0x00C00000L
++#define TC_CFG_L2_STORE_POLICY1__POLICY_28_MASK 0x03000000L
++#define TC_CFG_L2_STORE_POLICY1__POLICY_29_MASK 0x0C000000L
++#define TC_CFG_L2_STORE_POLICY1__POLICY_30_MASK 0x30000000L
++#define TC_CFG_L2_STORE_POLICY1__POLICY_31_MASK 0xC0000000L
++//TC_CFG_L2_ATOMIC_POLICY
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_0__SHIFT 0x0
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_1__SHIFT 0x2
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_2__SHIFT 0x4
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_3__SHIFT 0x6
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_4__SHIFT 0x8
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_5__SHIFT 0xa
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_6__SHIFT 0xc
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_7__SHIFT 0xe
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_8__SHIFT 0x10
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_9__SHIFT 0x12
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_10__SHIFT 0x14
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_11__SHIFT 0x16
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_12__SHIFT 0x18
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_13__SHIFT 0x1a
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_14__SHIFT 0x1c
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_15__SHIFT 0x1e
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_0_MASK 0x00000003L
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_1_MASK 0x0000000CL
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_2_MASK 0x00000030L
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_3_MASK 0x000000C0L
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_4_MASK 0x00000300L
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_5_MASK 0x00000C00L
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_6_MASK 0x00003000L
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_7_MASK 0x0000C000L
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_8_MASK 0x00030000L
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_9_MASK 0x000C0000L
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_10_MASK 0x00300000L
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_11_MASK 0x00C00000L
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_12_MASK 0x03000000L
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_13_MASK 0x0C000000L
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_14_MASK 0x30000000L
++#define TC_CFG_L2_ATOMIC_POLICY__POLICY_15_MASK 0xC0000000L
++//TC_CFG_L1_VOLATILE
++#define TC_CFG_L1_VOLATILE__VOL__SHIFT 0x0
++#define TC_CFG_L1_VOLATILE__VOL_MASK 0x0000000FL
++//TC_CFG_L2_VOLATILE
++#define TC_CFG_L2_VOLATILE__VOL__SHIFT 0x0
++#define TC_CFG_L2_VOLATILE__VOL_MASK 0x0000000FL
++//TCI_STATUS
++#define TCI_STATUS__TCI_BUSY__SHIFT 0x0
++#define TCI_STATUS__TCI_BUSY_MASK 0x00000001L
++//TCI_CNTL_1
++#define TCI_CNTL_1__WBINVL1_NUM_CYCLES__SHIFT 0x0
++#define TCI_CNTL_1__REQ_FIFO_DEPTH__SHIFT 0x10
++#define TCI_CNTL_1__WDATA_RAM_DEPTH__SHIFT 0x18
++#define TCI_CNTL_1__WBINVL1_NUM_CYCLES_MASK 0x0000FFFFL
++#define TCI_CNTL_1__REQ_FIFO_DEPTH_MASK 0x00FF0000L
++#define TCI_CNTL_1__WDATA_RAM_DEPTH_MASK 0xFF000000L
++//TCI_CNTL_2
++#define TCI_CNTL_2__L1_INVAL_ON_WBINVL2__SHIFT 0x0
++#define TCI_CNTL_2__TCA_MAX_CREDIT__SHIFT 0x1
++#define TCI_CNTL_2__L1_INVAL_ON_WBINVL2_MASK 0x00000001L
++#define TCI_CNTL_2__TCA_MAX_CREDIT_MASK 0x000001FEL
++//TCC_CTRL
++#define TCC_CTRL__CACHE_SIZE__SHIFT 0x0
++#define TCC_CTRL__RATE__SHIFT 0x2
++#define TCC_CTRL__WRITEBACK_MARGIN__SHIFT 0x4
++#define TCC_CTRL__METADATA_LATENCY_FIFO_SIZE__SHIFT 0x8
++#define TCC_CTRL__SRC_FIFO_SIZE__SHIFT 0xc
++#define TCC_CTRL__LATENCY_FIFO_SIZE__SHIFT 0x10
++#define TCC_CTRL__LINEAR_SET_HASH__SHIFT 0x15
++#define TCC_CTRL__MDC_SIZE__SHIFT 0x18
++#define TCC_CTRL__MDC_SECTOR_SIZE__SHIFT 0x1a
++#define TCC_CTRL__MDC_SIDEBAND_FIFO_SIZE__SHIFT 0x1c
++#define TCC_CTRL__CACHE_SIZE_MASK 0x00000003L
++#define TCC_CTRL__RATE_MASK 0x0000000CL
++#define TCC_CTRL__WRITEBACK_MARGIN_MASK 0x000000F0L
++#define TCC_CTRL__METADATA_LATENCY_FIFO_SIZE_MASK 0x00000F00L
++#define TCC_CTRL__SRC_FIFO_SIZE_MASK 0x0000F000L
++#define TCC_CTRL__LATENCY_FIFO_SIZE_MASK 0x000F0000L
++#define TCC_CTRL__LINEAR_SET_HASH_MASK 0x00200000L
++#define TCC_CTRL__MDC_SIZE_MASK 0x03000000L
++#define TCC_CTRL__MDC_SECTOR_SIZE_MASK 0x0C000000L
++#define TCC_CTRL__MDC_SIDEBAND_FIFO_SIZE_MASK 0xF0000000L
++//TCC_CTRL2
++#define TCC_CTRL2__PROBE_FIFO_SIZE__SHIFT 0x0
++#define TCC_CTRL2__PROBE_FIFO_SIZE_MASK 0x0000000FL
++//TCC_EDC_CNT
++#define TCC_EDC_CNT__CACHE_DATA_SEC_COUNT__SHIFT 0x0
++#define TCC_EDC_CNT__CACHE_DATA_DED_COUNT__SHIFT 0x2
++#define TCC_EDC_CNT__CACHE_DIRTY_SEC_COUNT__SHIFT 0x4
++#define TCC_EDC_CNT__CACHE_DIRTY_DED_COUNT__SHIFT 0x6
++#define TCC_EDC_CNT__HIGH_RATE_TAG_SEC_COUNT__SHIFT 0x8
++#define TCC_EDC_CNT__HIGH_RATE_TAG_DED_COUNT__SHIFT 0xa
++#define TCC_EDC_CNT__LOW_RATE_TAG_SEC_COUNT__SHIFT 0xc
++#define TCC_EDC_CNT__LOW_RATE_TAG_DED_COUNT__SHIFT 0xe
++#define TCC_EDC_CNT__SRC_FIFO_SEC_COUNT__SHIFT 0x10
++#define TCC_EDC_CNT__SRC_FIFO_DED_COUNT__SHIFT 0x12
++#define TCC_EDC_CNT__IN_USE_DEC_SED_COUNT__SHIFT 0x14
++#define TCC_EDC_CNT__IN_USE_TRANSFER_SED_COUNT__SHIFT 0x16
++#define TCC_EDC_CNT__LATENCY_FIFO_SED_COUNT__SHIFT 0x18
++#define TCC_EDC_CNT__RETURN_DATA_SED_COUNT__SHIFT 0x1a
++#define TCC_EDC_CNT__RETURN_CONTROL_SED_COUNT__SHIFT 0x1c
++#define TCC_EDC_CNT__UC_ATOMIC_FIFO_SED_COUNT__SHIFT 0x1e
++#define TCC_EDC_CNT__CACHE_DATA_SEC_COUNT_MASK 0x00000003L
++#define TCC_EDC_CNT__CACHE_DATA_DED_COUNT_MASK 0x0000000CL
++#define TCC_EDC_CNT__CACHE_DIRTY_SEC_COUNT_MASK 0x00000030L
++#define TCC_EDC_CNT__CACHE_DIRTY_DED_COUNT_MASK 0x000000C0L
++#define TCC_EDC_CNT__HIGH_RATE_TAG_SEC_COUNT_MASK 0x00000300L
++#define TCC_EDC_CNT__HIGH_RATE_TAG_DED_COUNT_MASK 0x00000C00L
++#define TCC_EDC_CNT__LOW_RATE_TAG_SEC_COUNT_MASK 0x00003000L
++#define TCC_EDC_CNT__LOW_RATE_TAG_DED_COUNT_MASK 0x0000C000L
++#define TCC_EDC_CNT__SRC_FIFO_SEC_COUNT_MASK 0x00030000L
++#define TCC_EDC_CNT__SRC_FIFO_DED_COUNT_MASK 0x000C0000L
++#define TCC_EDC_CNT__IN_USE_DEC_SED_COUNT_MASK 0x00300000L
++#define TCC_EDC_CNT__IN_USE_TRANSFER_SED_COUNT_MASK 0x00C00000L
++#define TCC_EDC_CNT__LATENCY_FIFO_SED_COUNT_MASK 0x03000000L
++#define TCC_EDC_CNT__RETURN_DATA_SED_COUNT_MASK 0x0C000000L
++#define TCC_EDC_CNT__RETURN_CONTROL_SED_COUNT_MASK 0x30000000L
++#define TCC_EDC_CNT__UC_ATOMIC_FIFO_SED_COUNT_MASK 0xC0000000L
++//TCC_EDC_CNT2
++#define TCC_EDC_CNT2__WRITE_RETURN_SED_COUNT__SHIFT 0x0
++#define TCC_EDC_CNT2__WRITE_CACHE_READ_SED_COUNT__SHIFT 0x2
++#define TCC_EDC_CNT2__SRC_FIFO_NEXT_RAM_SED_COUNT__SHIFT 0x4
++#define TCC_EDC_CNT2__LATENCY_FIFO_NEXT_RAM_SED_COUNT__SHIFT 0x6
++#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_SED_COUNT__SHIFT 0x8
++#define TCC_EDC_CNT2__WRITE_RETURN_SED_COUNT_MASK 0x00000003L
++#define TCC_EDC_CNT2__WRITE_CACHE_READ_SED_COUNT_MASK 0x0000000CL
++#define TCC_EDC_CNT2__SRC_FIFO_NEXT_RAM_SED_COUNT_MASK 0x00000030L
++#define TCC_EDC_CNT2__LATENCY_FIFO_NEXT_RAM_SED_COUNT_MASK 0x000000C0L
++#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_SED_COUNT_MASK 0x00000300L
++//TCC_REDUNDANCY
++#define TCC_REDUNDANCY__MC_SEL0__SHIFT 0x0
++#define TCC_REDUNDANCY__MC_SEL1__SHIFT 0x1
++#define TCC_REDUNDANCY__MC_SEL0_MASK 0x00000001L
++#define TCC_REDUNDANCY__MC_SEL1_MASK 0x00000002L
++//TCC_EXE_DISABLE
++#define TCC_EXE_DISABLE__EXE_DISABLE__SHIFT 0x1
++#define TCC_EXE_DISABLE__EXE_DISABLE_MASK 0x00000002L
++//TCC_DSM_CNTL
++#define TCC_DSM_CNTL__CACHE_DATA_IRRITATOR_DATA_SEL__SHIFT 0x0
++#define TCC_DSM_CNTL__CACHE_DATA_IRRITATOR_SINGLE_WRITE__SHIFT 0x2
++#define TCC_DSM_CNTL__CACHE_DATA_BANK_0_1_IRRITATOR_DATA_SEL__SHIFT 0x3
++#define TCC_DSM_CNTL__CACHE_DATA_BANK_0_1_IRRITATOR_SINGLE_WRITE__SHIFT 0x5
++#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_0_IRRITATOR_DATA_SEL__SHIFT 0x6
++#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_0_IRRITATOR_SINGLE_WRITE__SHIFT 0x8
++#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_1_IRRITATOR_DATA_SEL__SHIFT 0x9
++#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_1_IRRITATOR_SINGLE_WRITE__SHIFT 0xb
++#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_0_IRRITATOR_DATA_SEL__SHIFT 0xc
++#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_0_IRRITATOR_SINGLE_WRITE__SHIFT 0xe
++#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_1_IRRITATOR_DATA_SEL__SHIFT 0xf
++#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_1_IRRITATOR_SINGLE_WRITE__SHIFT 0x11
++#define TCC_DSM_CNTL__HIGH_RATE_TAG_IRRITATOR_DATA_SEL__SHIFT 0x12
++#define TCC_DSM_CNTL__HIGH_RATE_TAG_IRRITATOR_SINGLE_WRITE__SHIFT 0x14
++#define TCC_DSM_CNTL__LOW_RATE_TAG_IRRITATOR_DATA_SEL__SHIFT 0x15
++#define TCC_DSM_CNTL__LOW_RATE_TAG_IRRITATOR_SINGLE_WRITE__SHIFT 0x17
++#define TCC_DSM_CNTL__IN_USE_DEC_IRRITATOR_DATA_SEL__SHIFT 0x18
++#define TCC_DSM_CNTL__IN_USE_DEC_IRRITATOR_SINGLE_WRITE__SHIFT 0x1a
++#define TCC_DSM_CNTL__IN_USE_TRANSFER_IRRITATOR_DATA_SEL__SHIFT 0x1b
++#define TCC_DSM_CNTL__IN_USE_TRANSFER_IRRITATOR_SINGLE_WRITE__SHIFT 0x1d
++#define TCC_DSM_CNTL__CACHE_DATA_IRRITATOR_DATA_SEL_MASK 0x00000003L
++#define TCC_DSM_CNTL__CACHE_DATA_IRRITATOR_SINGLE_WRITE_MASK 0x00000004L
++#define TCC_DSM_CNTL__CACHE_DATA_BANK_0_1_IRRITATOR_DATA_SEL_MASK 0x00000018L
++#define TCC_DSM_CNTL__CACHE_DATA_BANK_0_1_IRRITATOR_SINGLE_WRITE_MASK 0x00000020L
++#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_0_IRRITATOR_DATA_SEL_MASK 0x000000C0L
++#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_0_IRRITATOR_SINGLE_WRITE_MASK 0x00000100L
++#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_1_IRRITATOR_DATA_SEL_MASK 0x00000600L
++#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_1_IRRITATOR_SINGLE_WRITE_MASK 0x00000800L
++#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_0_IRRITATOR_DATA_SEL_MASK 0x00003000L
++#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_0_IRRITATOR_SINGLE_WRITE_MASK 0x00004000L
++#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_1_IRRITATOR_DATA_SEL_MASK 0x00018000L
++#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_1_IRRITATOR_SINGLE_WRITE_MASK 0x00020000L
++#define TCC_DSM_CNTL__HIGH_RATE_TAG_IRRITATOR_DATA_SEL_MASK 0x000C0000L
++#define TCC_DSM_CNTL__HIGH_RATE_TAG_IRRITATOR_SINGLE_WRITE_MASK 0x00100000L
++#define TCC_DSM_CNTL__LOW_RATE_TAG_IRRITATOR_DATA_SEL_MASK 0x00600000L
++#define TCC_DSM_CNTL__LOW_RATE_TAG_IRRITATOR_SINGLE_WRITE_MASK 0x00800000L
++#define TCC_DSM_CNTL__IN_USE_DEC_IRRITATOR_DATA_SEL_MASK 0x03000000L
++#define TCC_DSM_CNTL__IN_USE_DEC_IRRITATOR_SINGLE_WRITE_MASK 0x04000000L
++#define TCC_DSM_CNTL__IN_USE_TRANSFER_IRRITATOR_DATA_SEL_MASK 0x18000000L
++#define TCC_DSM_CNTL__IN_USE_TRANSFER_IRRITATOR_SINGLE_WRITE_MASK 0x20000000L
++//TCC_DSM_CNTLA
++#define TCC_DSM_CNTLA__SRC_FIFO_IRRITATOR_DATA_SEL__SHIFT 0x0
++#define TCC_DSM_CNTLA__SRC_FIFO_IRRITATOR_SINGLE_WRITE__SHIFT 0x2
++#define TCC_DSM_CNTLA__UC_ATOMIC_FIFO_IRRITATOR_DATA_SEL__SHIFT 0x3
++#define TCC_DSM_CNTLA__UC_ATOMIC_FIFO_IRRITATOR_SINGLE_WRITE__SHIFT 0x5
++#define TCC_DSM_CNTLA__WRITE_RETURN_IRRITATOR_DATA_SEL__SHIFT 0x6
++#define TCC_DSM_CNTLA__WRITE_RETURN_IRRITATOR_SINGLE_WRITE__SHIFT 0x8
++#define TCC_DSM_CNTLA__WRITE_CACHE_READ_IRRITATOR_DATA_SEL__SHIFT 0x9
++#define TCC_DSM_CNTLA__WRITE_CACHE_READ_IRRITATOR_SINGLE_WRITE__SHIFT 0xb
++#define TCC_DSM_CNTLA__SRC_FIFO_NEXT_RAM_IRRITATOR_DATA_SEL__SHIFT 0xc
++#define TCC_DSM_CNTLA__SRC_FIFO_NEXT_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0xe
++#define TCC_DSM_CNTLA__LATENCY_FIFO_NEXT_RAM_IRRITATOR_DATA_SEL__SHIFT 0xf
++#define TCC_DSM_CNTLA__LATENCY_FIFO_NEXT_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0x11
++#define TCC_DSM_CNTLA__CACHE_TAG_PROBE_FIFO_IRRITATOR_DATA_SEL__SHIFT 0x12
++#define TCC_DSM_CNTLA__CACHE_TAG_PROBE_FIFO_IRRITATOR_SINGLE_WRITE__SHIFT 0x14
++#define TCC_DSM_CNTLA__LATENCY_FIFO_IRRITATOR_DATA_SEL__SHIFT 0x15
++#define TCC_DSM_CNTLA__LATENCY_FIFO_IRRITATOR_SINGLE_WRITE__SHIFT 0x17
++#define TCC_DSM_CNTLA__RETURN_DATA_IRRITATOR_DATA_SEL__SHIFT 0x18
++#define TCC_DSM_CNTLA__RETURN_DATA_IRRITATOR_SINGLE_WRITE__SHIFT 0x1a
++#define TCC_DSM_CNTLA__RETURN_CONTROL_IRRITATOR_DATA_SEL__SHIFT 0x1b
++#define TCC_DSM_CNTLA__RETURN_CONTROL_IRRITATOR_SINGLE_WRITE__SHIFT 0x1d
++#define TCC_DSM_CNTLA__SRC_FIFO_IRRITATOR_DATA_SEL_MASK 0x00000003L
++#define TCC_DSM_CNTLA__SRC_FIFO_IRRITATOR_SINGLE_WRITE_MASK 0x00000004L
++#define TCC_DSM_CNTLA__UC_ATOMIC_FIFO_IRRITATOR_DATA_SEL_MASK 0x00000018L
++#define TCC_DSM_CNTLA__UC_ATOMIC_FIFO_IRRITATOR_SINGLE_WRITE_MASK 0x00000020L
++#define TCC_DSM_CNTLA__WRITE_RETURN_IRRITATOR_DATA_SEL_MASK 0x000000C0L
++#define TCC_DSM_CNTLA__WRITE_RETURN_IRRITATOR_SINGLE_WRITE_MASK 0x00000100L
++#define TCC_DSM_CNTLA__WRITE_CACHE_READ_IRRITATOR_DATA_SEL_MASK 0x00000600L
++#define TCC_DSM_CNTLA__WRITE_CACHE_READ_IRRITATOR_SINGLE_WRITE_MASK 0x00000800L
++#define TCC_DSM_CNTLA__SRC_FIFO_NEXT_RAM_IRRITATOR_DATA_SEL_MASK 0x00003000L
++#define TCC_DSM_CNTLA__SRC_FIFO_NEXT_RAM_IRRITATOR_SINGLE_WRITE_MASK 0x00004000L
++#define TCC_DSM_CNTLA__LATENCY_FIFO_NEXT_RAM_IRRITATOR_DATA_SEL_MASK 0x00018000L
++#define TCC_DSM_CNTLA__LATENCY_FIFO_NEXT_RAM_IRRITATOR_SINGLE_WRITE_MASK 0x00020000L
++#define TCC_DSM_CNTLA__CACHE_TAG_PROBE_FIFO_IRRITATOR_DATA_SEL_MASK 0x000C0000L
++#define TCC_DSM_CNTLA__CACHE_TAG_PROBE_FIFO_IRRITATOR_SINGLE_WRITE_MASK 0x00100000L
++#define TCC_DSM_CNTLA__LATENCY_FIFO_IRRITATOR_DATA_SEL_MASK 0x00600000L
++#define TCC_DSM_CNTLA__LATENCY_FIFO_IRRITATOR_SINGLE_WRITE_MASK 0x00800000L
++#define TCC_DSM_CNTLA__RETURN_DATA_IRRITATOR_DATA_SEL_MASK 0x03000000L
++#define TCC_DSM_CNTLA__RETURN_DATA_IRRITATOR_SINGLE_WRITE_MASK 0x04000000L
++#define TCC_DSM_CNTLA__RETURN_CONTROL_IRRITATOR_DATA_SEL_MASK 0x18000000L
++#define TCC_DSM_CNTLA__RETURN_CONTROL_IRRITATOR_SINGLE_WRITE_MASK 0x20000000L
++//TCC_DSM_CNTL2
++#define TCC_DSM_CNTL2__CACHE_DATA_ENABLE_ERROR_INJECT__SHIFT 0x0
++#define TCC_DSM_CNTL2__CACHE_DATA_SELECT_INJECT_DELAY__SHIFT 0x2
++#define TCC_DSM_CNTL2__CACHE_DATA_BANK_0_1_ENABLE_ERROR_INJECT__SHIFT 0x3
++#define TCC_DSM_CNTL2__CACHE_DATA_BANK_0_1_SELECT_INJECT_DELAY__SHIFT 0x5
++#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_0_ENABLE_ERROR_INJECT__SHIFT 0x6
++#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_0_SELECT_INJECT_DELAY__SHIFT 0x8
++#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_1_ENABLE_ERROR_INJECT__SHIFT 0x9
++#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_1_SELECT_INJECT_DELAY__SHIFT 0xb
++#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_0_ENABLE_ERROR_INJECT__SHIFT 0xc
++#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_0_SELECT_INJECT_DELAY__SHIFT 0xe
++#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_1_ENABLE_ERROR_INJECT__SHIFT 0xf
++#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_1_SELECT_INJECT_DELAY__SHIFT 0x11
++#define TCC_DSM_CNTL2__HIGH_RATE_TAG_ENABLE_ERROR_INJECT__SHIFT 0x12
++#define TCC_DSM_CNTL2__HIGH_RATE_TAG_SELECT_INJECT_DELAY__SHIFT 0x14
++#define TCC_DSM_CNTL2__LOW_RATE_TAG_ENABLE_ERROR_INJECT__SHIFT 0x15
++#define TCC_DSM_CNTL2__LOW_RATE_TAG_SELECT_INJECT_DELAY__SHIFT 0x17
++#define TCC_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
++#define TCC_DSM_CNTL2__CACHE_DATA_ENABLE_ERROR_INJECT_MASK 0x00000003L
++#define TCC_DSM_CNTL2__CACHE_DATA_SELECT_INJECT_DELAY_MASK 0x00000004L
++#define TCC_DSM_CNTL2__CACHE_DATA_BANK_0_1_ENABLE_ERROR_INJECT_MASK 0x00000018L
++#define TCC_DSM_CNTL2__CACHE_DATA_BANK_0_1_SELECT_INJECT_DELAY_MASK 0x00000020L
++#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_0_ENABLE_ERROR_INJECT_MASK 0x000000C0L
++#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_0_SELECT_INJECT_DELAY_MASK 0x00000100L
++#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_1_ENABLE_ERROR_INJECT_MASK 0x00000600L
++#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_1_SELECT_INJECT_DELAY_MASK 0x00000800L
++#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_0_ENABLE_ERROR_INJECT_MASK 0x00003000L
++#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_0_SELECT_INJECT_DELAY_MASK 0x00004000L
++#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_1_ENABLE_ERROR_INJECT_MASK 0x00018000L
++#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_1_SELECT_INJECT_DELAY_MASK 0x00020000L
++#define TCC_DSM_CNTL2__HIGH_RATE_TAG_ENABLE_ERROR_INJECT_MASK 0x000C0000L
++#define TCC_DSM_CNTL2__HIGH_RATE_TAG_SELECT_INJECT_DELAY_MASK 0x00100000L
++#define TCC_DSM_CNTL2__LOW_RATE_TAG_ENABLE_ERROR_INJECT_MASK 0x00600000L
++#define TCC_DSM_CNTL2__LOW_RATE_TAG_SELECT_INJECT_DELAY_MASK 0x00800000L
++#define TCC_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
++//TCC_DSM_CNTL2A
++#define TCC_DSM_CNTL2A__IN_USE_DEC_ENABLE_ERROR_INJECT__SHIFT 0x0
++#define TCC_DSM_CNTL2A__IN_USE_DEC_SELECT_INJECT_DELAY__SHIFT 0x2
++#define TCC_DSM_CNTL2A__IN_USE_TRANSFER_ENABLE_ERROR_INJECT__SHIFT 0x3
++#define TCC_DSM_CNTL2A__IN_USE_TRANSFER_SELECT_INJECT_DELAY__SHIFT 0x5
++#define TCC_DSM_CNTL2A__RETURN_DATA_ENABLE_ERROR_INJECT__SHIFT 0x6
++#define TCC_DSM_CNTL2A__RETURN_DATA_SELECT_INJECT_DELAY__SHIFT 0x8
++#define TCC_DSM_CNTL2A__RETURN_CONTROL_ENABLE_ERROR_INJECT__SHIFT 0x9
++#define TCC_DSM_CNTL2A__RETURN_CONTROL_SELECT_INJECT_DELAY__SHIFT 0xb
++#define TCC_DSM_CNTL2A__UC_ATOMIC_FIFO_ENABLE_ERROR_INJECT__SHIFT 0xc
++#define TCC_DSM_CNTL2A__UC_ATOMIC_FIFO_SELECT_INJECT_DELAY__SHIFT 0xe
++#define TCC_DSM_CNTL2A__WRITE_RETURN_ENABLE_ERROR_INJECT__SHIFT 0xf
++#define TCC_DSM_CNTL2A__WRITE_RETURN_SELECT_INJECT_DELAY__SHIFT 0x11
++#define TCC_DSM_CNTL2A__WRITE_CACHE_READ_ENABLE_ERROR_INJECT__SHIFT 0x12
++#define TCC_DSM_CNTL2A__WRITE_CACHE_READ_SELECT_INJECT_DELAY__SHIFT 0x14
++#define TCC_DSM_CNTL2A__SRC_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x15
++#define TCC_DSM_CNTL2A__SRC_FIFO_SELECT_INJECT_DELAY__SHIFT 0x17
++#define TCC_DSM_CNTL2A__SRC_FIFO_NEXT_RAM_ENABLE_ERROR_INJECT__SHIFT 0x18
++#define TCC_DSM_CNTL2A__SRC_FIFO_NEXT_RAM_SELECT_INJECT_DELAY__SHIFT 0x1a
++#define TCC_DSM_CNTL2A__CACHE_TAG_PROBE_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x1b
++#define TCC_DSM_CNTL2A__CACHE_TAG_PROBE_FIFO_SELECT_INJECT_DELAY__SHIFT 0x1d
++#define TCC_DSM_CNTL2A__IN_USE_DEC_ENABLE_ERROR_INJECT_MASK 0x00000003L
++#define TCC_DSM_CNTL2A__IN_USE_DEC_SELECT_INJECT_DELAY_MASK 0x00000004L
++#define TCC_DSM_CNTL2A__IN_USE_TRANSFER_ENABLE_ERROR_INJECT_MASK 0x00000018L
++#define TCC_DSM_CNTL2A__IN_USE_TRANSFER_SELECT_INJECT_DELAY_MASK 0x00000020L
++#define TCC_DSM_CNTL2A__RETURN_DATA_ENABLE_ERROR_INJECT_MASK 0x000000C0L
++#define TCC_DSM_CNTL2A__RETURN_DATA_SELECT_INJECT_DELAY_MASK 0x00000100L
++#define TCC_DSM_CNTL2A__RETURN_CONTROL_ENABLE_ERROR_INJECT_MASK 0x00000600L
++#define TCC_DSM_CNTL2A__RETURN_CONTROL_SELECT_INJECT_DELAY_MASK 0x00000800L
++#define TCC_DSM_CNTL2A__UC_ATOMIC_FIFO_ENABLE_ERROR_INJECT_MASK 0x00003000L
++#define TCC_DSM_CNTL2A__UC_ATOMIC_FIFO_SELECT_INJECT_DELAY_MASK 0x00004000L
++#define TCC_DSM_CNTL2A__WRITE_RETURN_ENABLE_ERROR_INJECT_MASK 0x00018000L
++#define TCC_DSM_CNTL2A__WRITE_RETURN_SELECT_INJECT_DELAY_MASK 0x00020000L
++#define TCC_DSM_CNTL2A__WRITE_CACHE_READ_ENABLE_ERROR_INJECT_MASK 0x000C0000L
++#define TCC_DSM_CNTL2A__WRITE_CACHE_READ_SELECT_INJECT_DELAY_MASK 0x00100000L
++#define TCC_DSM_CNTL2A__SRC_FIFO_ENABLE_ERROR_INJECT_MASK 0x00600000L
++#define TCC_DSM_CNTL2A__SRC_FIFO_SELECT_INJECT_DELAY_MASK 0x00800000L
++#define TCC_DSM_CNTL2A__SRC_FIFO_NEXT_RAM_ENABLE_ERROR_INJECT_MASK 0x03000000L
++#define TCC_DSM_CNTL2A__SRC_FIFO_NEXT_RAM_SELECT_INJECT_DELAY_MASK 0x04000000L
++#define TCC_DSM_CNTL2A__CACHE_TAG_PROBE_FIFO_ENABLE_ERROR_INJECT_MASK 0x18000000L
++#define TCC_DSM_CNTL2A__CACHE_TAG_PROBE_FIFO_SELECT_INJECT_DELAY_MASK 0x20000000L
++//TCC_DSM_CNTL2B
++#define TCC_DSM_CNTL2B__LATENCY_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x0
++#define TCC_DSM_CNTL2B__LATENCY_FIFO_SELECT_INJECT_DELAY__SHIFT 0x2
++#define TCC_DSM_CNTL2B__LATENCY_FIFO_NEXT_RAM_ENABLE_ERROR_INJECT__SHIFT 0x3
++#define TCC_DSM_CNTL2B__LATENCY_FIFO_NEXT_RAM_SELECT_INJECT_DELAY__SHIFT 0x5
++#define TCC_DSM_CNTL2B__LATENCY_FIFO_ENABLE_ERROR_INJECT_MASK 0x00000003L
++#define TCC_DSM_CNTL2B__LATENCY_FIFO_SELECT_INJECT_DELAY_MASK 0x00000004L
++#define TCC_DSM_CNTL2B__LATENCY_FIFO_NEXT_RAM_ENABLE_ERROR_INJECT_MASK 0x00000018L
++#define TCC_DSM_CNTL2B__LATENCY_FIFO_NEXT_RAM_SELECT_INJECT_DELAY_MASK 0x00000020L
++//TCC_WBINVL2
++#define TCC_WBINVL2__DONE__SHIFT 0x4
++#define TCC_WBINVL2__DONE_MASK 0x00000010L
++//TCC_SOFT_RESET
++#define TCC_SOFT_RESET__HALT_FOR_RESET__SHIFT 0x0
++#define TCC_SOFT_RESET__HALT_FOR_RESET_MASK 0x00000001L
++//TCA_CTRL
++#define TCA_CTRL__HOLE_TIMEOUT__SHIFT 0x0
++#define TCA_CTRL__RB_STILL_4_PHASE__SHIFT 0x4
++#define TCA_CTRL__RB_AS_TCI__SHIFT 0x5
++#define TCA_CTRL__DISABLE_UTCL2_PRIORITY__SHIFT 0x6
++#define TCA_CTRL__DISABLE_RB_ONLY_TCA_ARBITER__SHIFT 0x7
++#define TCA_CTRL__HOLE_TIMEOUT_MASK 0x0000000FL
++#define TCA_CTRL__RB_STILL_4_PHASE_MASK 0x00000010L
++#define TCA_CTRL__RB_AS_TCI_MASK 0x00000020L
++#define TCA_CTRL__DISABLE_UTCL2_PRIORITY_MASK 0x00000040L
++#define TCA_CTRL__DISABLE_RB_ONLY_TCA_ARBITER_MASK 0x00000080L
++//TCA_BURST_MASK
++#define TCA_BURST_MASK__ADDR_MASK__SHIFT 0x0
++#define TCA_BURST_MASK__ADDR_MASK_MASK 0xFFFFFFFFL
++//TCA_BURST_CTRL
++#define TCA_BURST_CTRL__MAX_BURST__SHIFT 0x0
++#define TCA_BURST_CTRL__RB_DISABLE__SHIFT 0x3
++#define TCA_BURST_CTRL__TCP_DISABLE__SHIFT 0x4
++#define TCA_BURST_CTRL__SQC_DISABLE__SHIFT 0x5
++#define TCA_BURST_CTRL__CPF_DISABLE__SHIFT 0x6
++#define TCA_BURST_CTRL__CPG_DISABLE__SHIFT 0x7
++#define TCA_BURST_CTRL__IA_DISABLE__SHIFT 0x8
++#define TCA_BURST_CTRL__WD_DISABLE__SHIFT 0x9
++#define TCA_BURST_CTRL__SQG_DISABLE__SHIFT 0xa
++#define TCA_BURST_CTRL__UTCL2_DISABLE__SHIFT 0xb
++#define TCA_BURST_CTRL__TPI_DISABLE__SHIFT 0xc
++#define TCA_BURST_CTRL__RLC_DISABLE__SHIFT 0xd
++#define TCA_BURST_CTRL__PA_DISABLE__SHIFT 0xe
++#define TCA_BURST_CTRL__MAX_BURST_MASK 0x00000007L
++#define TCA_BURST_CTRL__RB_DISABLE_MASK 0x00000008L
++#define TCA_BURST_CTRL__TCP_DISABLE_MASK 0x00000010L
++#define TCA_BURST_CTRL__SQC_DISABLE_MASK 0x00000020L
++#define TCA_BURST_CTRL__CPF_DISABLE_MASK 0x00000040L
++#define TCA_BURST_CTRL__CPG_DISABLE_MASK 0x00000080L
++#define TCA_BURST_CTRL__IA_DISABLE_MASK 0x00000100L
++#define TCA_BURST_CTRL__WD_DISABLE_MASK 0x00000200L
++#define TCA_BURST_CTRL__SQG_DISABLE_MASK 0x00000400L
++#define TCA_BURST_CTRL__UTCL2_DISABLE_MASK 0x00000800L
++#define TCA_BURST_CTRL__TPI_DISABLE_MASK 0x00001000L
++#define TCA_BURST_CTRL__RLC_DISABLE_MASK 0x00002000L
++#define TCA_BURST_CTRL__PA_DISABLE_MASK 0x00004000L
++//TCA_DSM_CNTL
++#define TCA_DSM_CNTL__HOLE_FIFO_SED_IRRITATOR_DATA_SEL__SHIFT 0x0
++#define TCA_DSM_CNTL__HOLE_FIFO_SED_IRRITATOR_SINGLE_WRITE__SHIFT 0x2
++#define TCA_DSM_CNTL__REQ_FIFO_SED_IRRITATOR_DATA_SEL__SHIFT 0x3
++#define TCA_DSM_CNTL__REQ_FIFO_SED_IRRITATOR_SINGLE_WRITE__SHIFT 0x5
++#define TCA_DSM_CNTL__HOLE_FIFO_SED_IRRITATOR_DATA_SEL_MASK 0x00000003L
++#define TCA_DSM_CNTL__HOLE_FIFO_SED_IRRITATOR_SINGLE_WRITE_MASK 0x00000004L
++#define TCA_DSM_CNTL__REQ_FIFO_SED_IRRITATOR_DATA_SEL_MASK 0x00000018L
++#define TCA_DSM_CNTL__REQ_FIFO_SED_IRRITATOR_SINGLE_WRITE_MASK 0x00000020L
++//TCA_DSM_CNTL2
++#define TCA_DSM_CNTL2__HOLE_FIFO_SED_ENABLE_ERROR_INJECT__SHIFT 0x0
++#define TCA_DSM_CNTL2__HOLE_FIFO_SED_SELECT_INJECT_DELAY__SHIFT 0x2
++#define TCA_DSM_CNTL2__REQ_FIFO_SED_ENABLE_ERROR_INJECT__SHIFT 0x3
++#define TCA_DSM_CNTL2__REQ_FIFO_SED_SELECT_INJECT_DELAY__SHIFT 0x5
++#define TCA_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
++#define TCA_DSM_CNTL2__HOLE_FIFO_SED_ENABLE_ERROR_INJECT_MASK 0x00000003L
++#define TCA_DSM_CNTL2__HOLE_FIFO_SED_SELECT_INJECT_DELAY_MASK 0x00000004L
++#define TCA_DSM_CNTL2__REQ_FIFO_SED_ENABLE_ERROR_INJECT_MASK 0x00000018L
++#define TCA_DSM_CNTL2__REQ_FIFO_SED_SELECT_INJECT_DELAY_MASK 0x00000020L
++#define TCA_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
++//TCA_EDC_CNT
++#define TCA_EDC_CNT__HOLE_FIFO_SED_COUNT__SHIFT 0x0
++#define TCA_EDC_CNT__REQ_FIFO_SED_COUNT__SHIFT 0x2
++#define TCA_EDC_CNT__HOLE_FIFO_SED_COUNT_MASK 0x00000003L
++#define TCA_EDC_CNT__REQ_FIFO_SED_COUNT_MASK 0x0000000CL
++
++
++// addressBlock: gc_shdec
++//SPI_SHADER_PGM_RSRC3_PS
++#define SPI_SHADER_PGM_RSRC3_PS__CU_EN__SHIFT 0x0
++#define SPI_SHADER_PGM_RSRC3_PS__WAVE_LIMIT__SHIFT 0x10
++#define SPI_SHADER_PGM_RSRC3_PS__LOCK_LOW_THRESHOLD__SHIFT 0x16
++#define SPI_SHADER_PGM_RSRC3_PS__SIMD_DISABLE__SHIFT 0x1a
++#define SPI_SHADER_PGM_RSRC3_PS__CU_EN_MASK 0x0000FFFFL
++#define SPI_SHADER_PGM_RSRC3_PS__WAVE_LIMIT_MASK 0x003F0000L
++#define SPI_SHADER_PGM_RSRC3_PS__LOCK_LOW_THRESHOLD_MASK 0x03C00000L
++#define SPI_SHADER_PGM_RSRC3_PS__SIMD_DISABLE_MASK 0x3C000000L
++//SPI_SHADER_PGM_LO_PS
++#define SPI_SHADER_PGM_LO_PS__MEM_BASE__SHIFT 0x0
++#define SPI_SHADER_PGM_LO_PS__MEM_BASE_MASK 0xFFFFFFFFL
++//SPI_SHADER_PGM_HI_PS
++#define SPI_SHADER_PGM_HI_PS__MEM_BASE__SHIFT 0x0
++#define SPI_SHADER_PGM_HI_PS__MEM_BASE_MASK 0xFFL
++//SPI_SHADER_PGM_RSRC1_PS
++#define SPI_SHADER_PGM_RSRC1_PS__VGPRS__SHIFT 0x0
++#define SPI_SHADER_PGM_RSRC1_PS__SGPRS__SHIFT 0x6
++#define SPI_SHADER_PGM_RSRC1_PS__PRIORITY__SHIFT 0xa
++#define SPI_SHADER_PGM_RSRC1_PS__FLOAT_MODE__SHIFT 0xc
++#define SPI_SHADER_PGM_RSRC1_PS__PRIV__SHIFT 0x14
++#define SPI_SHADER_PGM_RSRC1_PS__DX10_CLAMP__SHIFT 0x15
++#define SPI_SHADER_PGM_RSRC1_PS__IEEE_MODE__SHIFT 0x17
++#define SPI_SHADER_PGM_RSRC1_PS__CU_GROUP_DISABLE__SHIFT 0x18
++#define SPI_SHADER_PGM_RSRC1_PS__FP16_OVFL__SHIFT 0x1d
++#define SPI_SHADER_PGM_RSRC1_PS__VGPRS_MASK 0x0000003FL
++#define SPI_SHADER_PGM_RSRC1_PS__SGPRS_MASK 0x000003C0L
++#define SPI_SHADER_PGM_RSRC1_PS__PRIORITY_MASK 0x00000C00L
++#define SPI_SHADER_PGM_RSRC1_PS__FLOAT_MODE_MASK 0x000FF000L
++#define SPI_SHADER_PGM_RSRC1_PS__PRIV_MASK 0x00100000L
++#define SPI_SHADER_PGM_RSRC1_PS__DX10_CLAMP_MASK 0x00200000L
++#define SPI_SHADER_PGM_RSRC1_PS__IEEE_MODE_MASK 0x00800000L
++#define SPI_SHADER_PGM_RSRC1_PS__CU_GROUP_DISABLE_MASK 0x01000000L
++#define SPI_SHADER_PGM_RSRC1_PS__FP16_OVFL_MASK 0x20000000L
++//SPI_SHADER_PGM_RSRC2_PS
++#define SPI_SHADER_PGM_RSRC2_PS__SCRATCH_EN__SHIFT 0x0
++#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR__SHIFT 0x1
++#define SPI_SHADER_PGM_RSRC2_PS__TRAP_PRESENT__SHIFT 0x6
++#define SPI_SHADER_PGM_RSRC2_PS__WAVE_CNT_EN__SHIFT 0x7
++#define SPI_SHADER_PGM_RSRC2_PS__EXTRA_LDS_SIZE__SHIFT 0x8
++#define SPI_SHADER_PGM_RSRC2_PS__EXCP_EN__SHIFT 0x10
++#define SPI_SHADER_PGM_RSRC2_PS__LOAD_COLLISION_WAVEID__SHIFT 0x19
++#define SPI_SHADER_PGM_RSRC2_PS__LOAD_INTRAWAVE_COLLISION__SHIFT 0x1a
++#define SPI_SHADER_PGM_RSRC2_PS__SKIP_USGPR0__SHIFT 0x1b
++#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR_MSB__SHIFT 0x1c
++#define SPI_SHADER_PGM_RSRC2_PS__SCRATCH_EN_MASK 0x00000001L
++#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR_MASK 0x0000003EL
++#define SPI_SHADER_PGM_RSRC2_PS__TRAP_PRESENT_MASK 0x00000040L
++#define SPI_SHADER_PGM_RSRC2_PS__WAVE_CNT_EN_MASK 0x00000080L
++#define SPI_SHADER_PGM_RSRC2_PS__EXTRA_LDS_SIZE_MASK 0x0000FF00L
++#define SPI_SHADER_PGM_RSRC2_PS__EXCP_EN_MASK 0x01FF0000L
++#define SPI_SHADER_PGM_RSRC2_PS__LOAD_COLLISION_WAVEID_MASK 0x02000000L
++#define SPI_SHADER_PGM_RSRC2_PS__LOAD_INTRAWAVE_COLLISION_MASK 0x04000000L
++#define SPI_SHADER_PGM_RSRC2_PS__SKIP_USGPR0_MASK 0x08000000L
++#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR_MSB_MASK 0x10000000L
++//SPI_SHADER_USER_DATA_PS_0
++#define SPI_SHADER_USER_DATA_PS_0__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_0__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_1
++#define SPI_SHADER_USER_DATA_PS_1__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_1__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_2
++#define SPI_SHADER_USER_DATA_PS_2__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_2__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_3
++#define SPI_SHADER_USER_DATA_PS_3__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_3__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_4
++#define SPI_SHADER_USER_DATA_PS_4__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_4__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_5
++#define SPI_SHADER_USER_DATA_PS_5__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_5__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_6
++#define SPI_SHADER_USER_DATA_PS_6__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_6__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_7
++#define SPI_SHADER_USER_DATA_PS_7__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_7__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_8
++#define SPI_SHADER_USER_DATA_PS_8__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_8__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_9
++#define SPI_SHADER_USER_DATA_PS_9__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_9__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_10
++#define SPI_SHADER_USER_DATA_PS_10__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_10__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_11
++#define SPI_SHADER_USER_DATA_PS_11__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_11__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_12
++#define SPI_SHADER_USER_DATA_PS_12__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_12__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_13
++#define SPI_SHADER_USER_DATA_PS_13__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_13__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_14
++#define SPI_SHADER_USER_DATA_PS_14__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_14__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_15
++#define SPI_SHADER_USER_DATA_PS_15__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_15__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_16
++#define SPI_SHADER_USER_DATA_PS_16__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_16__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_17
++#define SPI_SHADER_USER_DATA_PS_17__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_17__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_18
++#define SPI_SHADER_USER_DATA_PS_18__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_18__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_19
++#define SPI_SHADER_USER_DATA_PS_19__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_19__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_20
++#define SPI_SHADER_USER_DATA_PS_20__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_20__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_21
++#define SPI_SHADER_USER_DATA_PS_21__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_21__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_22
++#define SPI_SHADER_USER_DATA_PS_22__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_22__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_23
++#define SPI_SHADER_USER_DATA_PS_23__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_23__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_24
++#define SPI_SHADER_USER_DATA_PS_24__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_24__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_25
++#define SPI_SHADER_USER_DATA_PS_25__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_25__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_26
++#define SPI_SHADER_USER_DATA_PS_26__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_26__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_27
++#define SPI_SHADER_USER_DATA_PS_27__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_27__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_28
++#define SPI_SHADER_USER_DATA_PS_28__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_28__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_29
++#define SPI_SHADER_USER_DATA_PS_29__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_29__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_30
++#define SPI_SHADER_USER_DATA_PS_30__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_30__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_PS_31
++#define SPI_SHADER_USER_DATA_PS_31__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_PS_31__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_PGM_RSRC3_VS
++#define SPI_SHADER_PGM_RSRC3_VS__CU_EN__SHIFT 0x0
++#define SPI_SHADER_PGM_RSRC3_VS__WAVE_LIMIT__SHIFT 0x10
++#define SPI_SHADER_PGM_RSRC3_VS__LOCK_LOW_THRESHOLD__SHIFT 0x16
++#define SPI_SHADER_PGM_RSRC3_VS__SIMD_DISABLE__SHIFT 0x1a
++#define SPI_SHADER_PGM_RSRC3_VS__CU_EN_MASK 0x0000FFFFL
++#define SPI_SHADER_PGM_RSRC3_VS__WAVE_LIMIT_MASK 0x003F0000L
++#define SPI_SHADER_PGM_RSRC3_VS__LOCK_LOW_THRESHOLD_MASK 0x03C00000L
++#define SPI_SHADER_PGM_RSRC3_VS__SIMD_DISABLE_MASK 0x3C000000L
++//SPI_SHADER_LATE_ALLOC_VS
++#define SPI_SHADER_LATE_ALLOC_VS__LIMIT__SHIFT 0x0
++#define SPI_SHADER_LATE_ALLOC_VS__LIMIT_MASK 0x0000003FL
++//SPI_SHADER_PGM_LO_VS
++#define SPI_SHADER_PGM_LO_VS__MEM_BASE__SHIFT 0x0
++#define SPI_SHADER_PGM_LO_VS__MEM_BASE_MASK 0xFFFFFFFFL
++//SPI_SHADER_PGM_HI_VS
++#define SPI_SHADER_PGM_HI_VS__MEM_BASE__SHIFT 0x0
++#define SPI_SHADER_PGM_HI_VS__MEM_BASE_MASK 0xFFL
++//SPI_SHADER_PGM_RSRC1_VS
++#define SPI_SHADER_PGM_RSRC1_VS__VGPRS__SHIFT 0x0
++#define SPI_SHADER_PGM_RSRC1_VS__SGPRS__SHIFT 0x6
++#define SPI_SHADER_PGM_RSRC1_VS__PRIORITY__SHIFT 0xa
++#define SPI_SHADER_PGM_RSRC1_VS__FLOAT_MODE__SHIFT 0xc
++#define SPI_SHADER_PGM_RSRC1_VS__PRIV__SHIFT 0x14
++#define SPI_SHADER_PGM_RSRC1_VS__DX10_CLAMP__SHIFT 0x15
++#define SPI_SHADER_PGM_RSRC1_VS__IEEE_MODE__SHIFT 0x17
++#define SPI_SHADER_PGM_RSRC1_VS__VGPR_COMP_CNT__SHIFT 0x18
++#define SPI_SHADER_PGM_RSRC1_VS__CU_GROUP_ENABLE__SHIFT 0x1a
++#define SPI_SHADER_PGM_RSRC1_VS__FP16_OVFL__SHIFT 0x1f
++#define SPI_SHADER_PGM_RSRC1_VS__VGPRS_MASK 0x0000003FL
++#define SPI_SHADER_PGM_RSRC1_VS__SGPRS_MASK 0x000003C0L
++#define SPI_SHADER_PGM_RSRC1_VS__PRIORITY_MASK 0x00000C00L
++#define SPI_SHADER_PGM_RSRC1_VS__FLOAT_MODE_MASK 0x000FF000L
++#define SPI_SHADER_PGM_RSRC1_VS__PRIV_MASK 0x00100000L
++#define SPI_SHADER_PGM_RSRC1_VS__DX10_CLAMP_MASK 0x00200000L
++#define SPI_SHADER_PGM_RSRC1_VS__IEEE_MODE_MASK 0x00800000L
++#define SPI_SHADER_PGM_RSRC1_VS__VGPR_COMP_CNT_MASK 0x03000000L
++#define SPI_SHADER_PGM_RSRC1_VS__CU_GROUP_ENABLE_MASK 0x04000000L
++#define SPI_SHADER_PGM_RSRC1_VS__FP16_OVFL_MASK 0x80000000L
++//SPI_SHADER_PGM_RSRC2_VS
++#define SPI_SHADER_PGM_RSRC2_VS__SCRATCH_EN__SHIFT 0x0
++#define SPI_SHADER_PGM_RSRC2_VS__USER_SGPR__SHIFT 0x1
++#define SPI_SHADER_PGM_RSRC2_VS__TRAP_PRESENT__SHIFT 0x6
++#define SPI_SHADER_PGM_RSRC2_VS__OC_LDS_EN__SHIFT 0x7
++#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE0_EN__SHIFT 0x8
++#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE1_EN__SHIFT 0x9
++#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE2_EN__SHIFT 0xa
++#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE3_EN__SHIFT 0xb
++#define SPI_SHADER_PGM_RSRC2_VS__SO_EN__SHIFT 0xc
++#define SPI_SHADER_PGM_RSRC2_VS__EXCP_EN__SHIFT 0xd
++#define SPI_SHADER_PGM_RSRC2_VS__PC_BASE_EN__SHIFT 0x16
++#define SPI_SHADER_PGM_RSRC2_VS__DISPATCH_DRAW_EN__SHIFT 0x18
++#define SPI_SHADER_PGM_RSRC2_VS__SKIP_USGPR0__SHIFT 0x1b
++#define SPI_SHADER_PGM_RSRC2_VS__USER_SGPR_MSB__SHIFT 0x1c
++#define SPI_SHADER_PGM_RSRC2_VS__SCRATCH_EN_MASK 0x00000001L
++#define SPI_SHADER_PGM_RSRC2_VS__USER_SGPR_MASK 0x0000003EL
++#define SPI_SHADER_PGM_RSRC2_VS__TRAP_PRESENT_MASK 0x00000040L
++#define SPI_SHADER_PGM_RSRC2_VS__OC_LDS_EN_MASK 0x00000080L
++#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE0_EN_MASK 0x00000100L
++#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE1_EN_MASK 0x00000200L
++#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE2_EN_MASK 0x00000400L
++#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE3_EN_MASK 0x00000800L
++#define SPI_SHADER_PGM_RSRC2_VS__SO_EN_MASK 0x00001000L
++#define SPI_SHADER_PGM_RSRC2_VS__EXCP_EN_MASK 0x003FE000L
++#define SPI_SHADER_PGM_RSRC2_VS__PC_BASE_EN_MASK 0x00400000L
++#define SPI_SHADER_PGM_RSRC2_VS__DISPATCH_DRAW_EN_MASK 0x01000000L
++#define SPI_SHADER_PGM_RSRC2_VS__SKIP_USGPR0_MASK 0x08000000L
++#define SPI_SHADER_PGM_RSRC2_VS__USER_SGPR_MSB_MASK 0x10000000L
++//SPI_SHADER_USER_DATA_VS_0
++#define SPI_SHADER_USER_DATA_VS_0__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_0__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_1
++#define SPI_SHADER_USER_DATA_VS_1__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_1__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_2
++#define SPI_SHADER_USER_DATA_VS_2__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_2__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_3
++#define SPI_SHADER_USER_DATA_VS_3__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_3__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_4
++#define SPI_SHADER_USER_DATA_VS_4__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_4__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_5
++#define SPI_SHADER_USER_DATA_VS_5__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_5__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_6
++#define SPI_SHADER_USER_DATA_VS_6__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_6__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_7
++#define SPI_SHADER_USER_DATA_VS_7__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_7__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_8
++#define SPI_SHADER_USER_DATA_VS_8__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_8__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_9
++#define SPI_SHADER_USER_DATA_VS_9__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_9__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_10
++#define SPI_SHADER_USER_DATA_VS_10__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_10__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_11
++#define SPI_SHADER_USER_DATA_VS_11__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_11__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_12
++#define SPI_SHADER_USER_DATA_VS_12__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_12__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_13
++#define SPI_SHADER_USER_DATA_VS_13__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_13__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_14
++#define SPI_SHADER_USER_DATA_VS_14__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_14__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_15
++#define SPI_SHADER_USER_DATA_VS_15__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_15__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_16
++#define SPI_SHADER_USER_DATA_VS_16__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_16__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_17
++#define SPI_SHADER_USER_DATA_VS_17__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_17__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_18
++#define SPI_SHADER_USER_DATA_VS_18__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_18__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_19
++#define SPI_SHADER_USER_DATA_VS_19__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_19__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_20
++#define SPI_SHADER_USER_DATA_VS_20__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_20__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_21
++#define SPI_SHADER_USER_DATA_VS_21__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_21__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_22
++#define SPI_SHADER_USER_DATA_VS_22__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_22__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_23
++#define SPI_SHADER_USER_DATA_VS_23__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_23__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_24
++#define SPI_SHADER_USER_DATA_VS_24__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_24__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_25
++#define SPI_SHADER_USER_DATA_VS_25__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_25__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_26
++#define SPI_SHADER_USER_DATA_VS_26__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_26__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_27
++#define SPI_SHADER_USER_DATA_VS_27__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_27__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_28
++#define SPI_SHADER_USER_DATA_VS_28__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_28__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_29
++#define SPI_SHADER_USER_DATA_VS_29__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_29__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_30
++#define SPI_SHADER_USER_DATA_VS_30__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_30__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_VS_31
++#define SPI_SHADER_USER_DATA_VS_31__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_VS_31__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_PGM_RSRC2_GS_VS
++#define SPI_SHADER_PGM_RSRC2_GS_VS__SCRATCH_EN__SHIFT 0x0
++#define SPI_SHADER_PGM_RSRC2_GS_VS__USER_SGPR__SHIFT 0x1
++#define SPI_SHADER_PGM_RSRC2_GS_VS__TRAP_PRESENT__SHIFT 0x6
++#define SPI_SHADER_PGM_RSRC2_GS_VS__EXCP_EN__SHIFT 0x7
++#define SPI_SHADER_PGM_RSRC2_GS_VS__VGPR_COMP_CNT__SHIFT 0x10
++#define SPI_SHADER_PGM_RSRC2_GS_VS__OC_LDS_EN__SHIFT 0x12
++#define SPI_SHADER_PGM_RSRC2_GS_VS__LDS_SIZE__SHIFT 0x13
++#define SPI_SHADER_PGM_RSRC2_GS_VS__SKIP_USGPR0__SHIFT 0x1b
++#define SPI_SHADER_PGM_RSRC2_GS_VS__USER_SGPR_MSB__SHIFT 0x1c
++#define SPI_SHADER_PGM_RSRC2_GS_VS__SCRATCH_EN_MASK 0x00000001L
++#define SPI_SHADER_PGM_RSRC2_GS_VS__USER_SGPR_MASK 0x0000003EL
++#define SPI_SHADER_PGM_RSRC2_GS_VS__TRAP_PRESENT_MASK 0x00000040L
++#define SPI_SHADER_PGM_RSRC2_GS_VS__EXCP_EN_MASK 0x0000FF80L
++#define SPI_SHADER_PGM_RSRC2_GS_VS__VGPR_COMP_CNT_MASK 0x00030000L
++#define SPI_SHADER_PGM_RSRC2_GS_VS__OC_LDS_EN_MASK 0x00040000L
++#define SPI_SHADER_PGM_RSRC2_GS_VS__LDS_SIZE_MASK 0x07F80000L
++#define SPI_SHADER_PGM_RSRC2_GS_VS__SKIP_USGPR0_MASK 0x08000000L
++#define SPI_SHADER_PGM_RSRC2_GS_VS__USER_SGPR_MSB_MASK 0x10000000L
++//SPI_SHADER_PGM_RSRC4_GS
++#define SPI_SHADER_PGM_RSRC4_GS__GROUP_FIFO_DEPTH__SHIFT 0x0
++#define SPI_SHADER_PGM_RSRC4_GS__SPI_SHADER_LATE_ALLOC_GS__SHIFT 0x7
++#define SPI_SHADER_PGM_RSRC4_GS__GROUP_FIFO_DEPTH_MASK 0x0000007FL
++#define SPI_SHADER_PGM_RSRC4_GS__SPI_SHADER_LATE_ALLOC_GS_MASK 0x00003F80L
++//SPI_SHADER_USER_DATA_ADDR_LO_GS
++#define SPI_SHADER_USER_DATA_ADDR_LO_GS__MEM_BASE__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ADDR_LO_GS__MEM_BASE_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ADDR_HI_GS
++#define SPI_SHADER_USER_DATA_ADDR_HI_GS__MEM_BASE__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ADDR_HI_GS__MEM_BASE_MASK 0xFFFFFFFFL
++//SPI_SHADER_PGM_LO_ES
++#define SPI_SHADER_PGM_LO_ES__MEM_BASE__SHIFT 0x0
++#define SPI_SHADER_PGM_LO_ES__MEM_BASE_MASK 0xFFFFFFFFL
++//SPI_SHADER_PGM_HI_ES
++#define SPI_SHADER_PGM_HI_ES__MEM_BASE__SHIFT 0x0
++#define SPI_SHADER_PGM_HI_ES__MEM_BASE_MASK 0xFFL
++//SPI_SHADER_PGM_RSRC3_GS
++#define SPI_SHADER_PGM_RSRC3_GS__CU_EN__SHIFT 0x0
++#define SPI_SHADER_PGM_RSRC3_GS__WAVE_LIMIT__SHIFT 0x10
++#define SPI_SHADER_PGM_RSRC3_GS__LOCK_LOW_THRESHOLD__SHIFT 0x16
++#define SPI_SHADER_PGM_RSRC3_GS__SIMD_DISABLE__SHIFT 0x1a
++#define SPI_SHADER_PGM_RSRC3_GS__CU_EN_MASK 0x0000FFFFL
++#define SPI_SHADER_PGM_RSRC3_GS__WAVE_LIMIT_MASK 0x003F0000L
++#define SPI_SHADER_PGM_RSRC3_GS__LOCK_LOW_THRESHOLD_MASK 0x03C00000L
++#define SPI_SHADER_PGM_RSRC3_GS__SIMD_DISABLE_MASK 0x3C000000L
++//SPI_SHADER_PGM_LO_GS
++#define SPI_SHADER_PGM_LO_GS__MEM_BASE__SHIFT 0x0
++#define SPI_SHADER_PGM_LO_GS__MEM_BASE_MASK 0xFFFFFFFFL
++//SPI_SHADER_PGM_HI_GS
++#define SPI_SHADER_PGM_HI_GS__MEM_BASE__SHIFT 0x0
++#define SPI_SHADER_PGM_HI_GS__MEM_BASE_MASK 0xFFL
++//SPI_SHADER_PGM_RSRC1_GS
++#define SPI_SHADER_PGM_RSRC1_GS__VGPRS__SHIFT 0x0
++#define SPI_SHADER_PGM_RSRC1_GS__SGPRS__SHIFT 0x6
++#define SPI_SHADER_PGM_RSRC1_GS__PRIORITY__SHIFT 0xa
++#define SPI_SHADER_PGM_RSRC1_GS__FLOAT_MODE__SHIFT 0xc
++#define SPI_SHADER_PGM_RSRC1_GS__PRIV__SHIFT 0x14
++#define SPI_SHADER_PGM_RSRC1_GS__DX10_CLAMP__SHIFT 0x15
++#define SPI_SHADER_PGM_RSRC1_GS__IEEE_MODE__SHIFT 0x17
++#define SPI_SHADER_PGM_RSRC1_GS__CU_GROUP_ENABLE__SHIFT 0x18
++#define SPI_SHADER_PGM_RSRC1_GS__GS_VGPR_COMP_CNT__SHIFT 0x1d
++#define SPI_SHADER_PGM_RSRC1_GS__FP16_OVFL__SHIFT 0x1f
++#define SPI_SHADER_PGM_RSRC1_GS__VGPRS_MASK 0x0000003FL
++#define SPI_SHADER_PGM_RSRC1_GS__SGPRS_MASK 0x000003C0L
++#define SPI_SHADER_PGM_RSRC1_GS__PRIORITY_MASK 0x00000C00L
++#define SPI_SHADER_PGM_RSRC1_GS__FLOAT_MODE_MASK 0x000FF000L
++#define SPI_SHADER_PGM_RSRC1_GS__PRIV_MASK 0x00100000L
++#define SPI_SHADER_PGM_RSRC1_GS__DX10_CLAMP_MASK 0x00200000L
++#define SPI_SHADER_PGM_RSRC1_GS__IEEE_MODE_MASK 0x00800000L
++#define SPI_SHADER_PGM_RSRC1_GS__CU_GROUP_ENABLE_MASK 0x01000000L
++#define SPI_SHADER_PGM_RSRC1_GS__GS_VGPR_COMP_CNT_MASK 0x60000000L
++#define SPI_SHADER_PGM_RSRC1_GS__FP16_OVFL_MASK 0x80000000L
++//SPI_SHADER_PGM_RSRC2_GS
++#define SPI_SHADER_PGM_RSRC2_GS__SCRATCH_EN__SHIFT 0x0
++#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR__SHIFT 0x1
++#define SPI_SHADER_PGM_RSRC2_GS__TRAP_PRESENT__SHIFT 0x6
++#define SPI_SHADER_PGM_RSRC2_GS__EXCP_EN__SHIFT 0x7
++#define SPI_SHADER_PGM_RSRC2_GS__ES_VGPR_COMP_CNT__SHIFT 0x10
++#define SPI_SHADER_PGM_RSRC2_GS__OC_LDS_EN__SHIFT 0x12
++#define SPI_SHADER_PGM_RSRC2_GS__LDS_SIZE__SHIFT 0x13
++#define SPI_SHADER_PGM_RSRC2_GS__SKIP_USGPR0__SHIFT 0x1b
++#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR_MSB__SHIFT 0x1c
++#define SPI_SHADER_PGM_RSRC2_GS__SCRATCH_EN_MASK 0x00000001L
++#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR_MASK 0x0000003EL
++#define SPI_SHADER_PGM_RSRC2_GS__TRAP_PRESENT_MASK 0x00000040L
++#define SPI_SHADER_PGM_RSRC2_GS__EXCP_EN_MASK 0x0000FF80L
++#define SPI_SHADER_PGM_RSRC2_GS__ES_VGPR_COMP_CNT_MASK 0x00030000L
++#define SPI_SHADER_PGM_RSRC2_GS__OC_LDS_EN_MASK 0x00040000L
++#define SPI_SHADER_PGM_RSRC2_GS__LDS_SIZE_MASK 0x07F80000L
++#define SPI_SHADER_PGM_RSRC2_GS__SKIP_USGPR0_MASK 0x08000000L
++#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR_MSB_MASK 0x10000000L
++//SPI_SHADER_USER_DATA_ES_0
++#define SPI_SHADER_USER_DATA_ES_0__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_0__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_1
++#define SPI_SHADER_USER_DATA_ES_1__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_1__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_2
++#define SPI_SHADER_USER_DATA_ES_2__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_2__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_3
++#define SPI_SHADER_USER_DATA_ES_3__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_3__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_4
++#define SPI_SHADER_USER_DATA_ES_4__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_4__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_5
++#define SPI_SHADER_USER_DATA_ES_5__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_5__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_6
++#define SPI_SHADER_USER_DATA_ES_6__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_6__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_7
++#define SPI_SHADER_USER_DATA_ES_7__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_7__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_8
++#define SPI_SHADER_USER_DATA_ES_8__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_8__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_9
++#define SPI_SHADER_USER_DATA_ES_9__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_9__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_10
++#define SPI_SHADER_USER_DATA_ES_10__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_10__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_11
++#define SPI_SHADER_USER_DATA_ES_11__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_11__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_12
++#define SPI_SHADER_USER_DATA_ES_12__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_12__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_13
++#define SPI_SHADER_USER_DATA_ES_13__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_13__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_14
++#define SPI_SHADER_USER_DATA_ES_14__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_14__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_15
++#define SPI_SHADER_USER_DATA_ES_15__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_15__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_16
++#define SPI_SHADER_USER_DATA_ES_16__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_16__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_17
++#define SPI_SHADER_USER_DATA_ES_17__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_17__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_18
++#define SPI_SHADER_USER_DATA_ES_18__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_18__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_19
++#define SPI_SHADER_USER_DATA_ES_19__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_19__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_20
++#define SPI_SHADER_USER_DATA_ES_20__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_20__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_21
++#define SPI_SHADER_USER_DATA_ES_21__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_21__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_22
++#define SPI_SHADER_USER_DATA_ES_22__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_22__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_23
++#define SPI_SHADER_USER_DATA_ES_23__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_23__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_24
++#define SPI_SHADER_USER_DATA_ES_24__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_24__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_25
++#define SPI_SHADER_USER_DATA_ES_25__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_25__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_26
++#define SPI_SHADER_USER_DATA_ES_26__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_26__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_27
++#define SPI_SHADER_USER_DATA_ES_27__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_27__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_28
++#define SPI_SHADER_USER_DATA_ES_28__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_28__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_29
++#define SPI_SHADER_USER_DATA_ES_29__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_29__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_30
++#define SPI_SHADER_USER_DATA_ES_30__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_30__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ES_31
++#define SPI_SHADER_USER_DATA_ES_31__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ES_31__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_PGM_RSRC4_HS
++#define SPI_SHADER_PGM_RSRC4_HS__GROUP_FIFO_DEPTH__SHIFT 0x0
++#define SPI_SHADER_PGM_RSRC4_HS__GROUP_FIFO_DEPTH_MASK 0x0000007FL
++//SPI_SHADER_USER_DATA_ADDR_LO_HS
++#define SPI_SHADER_USER_DATA_ADDR_LO_HS__MEM_BASE__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ADDR_LO_HS__MEM_BASE_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_ADDR_HI_HS
++#define SPI_SHADER_USER_DATA_ADDR_HI_HS__MEM_BASE__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_ADDR_HI_HS__MEM_BASE_MASK 0xFFFFFFFFL
++//SPI_SHADER_PGM_LO_LS
++#define SPI_SHADER_PGM_LO_LS__MEM_BASE__SHIFT 0x0
++#define SPI_SHADER_PGM_LO_LS__MEM_BASE_MASK 0xFFFFFFFFL
++//SPI_SHADER_PGM_HI_LS
++#define SPI_SHADER_PGM_HI_LS__MEM_BASE__SHIFT 0x0
++#define SPI_SHADER_PGM_HI_LS__MEM_BASE_MASK 0xFFL
++//SPI_SHADER_PGM_RSRC3_HS
++#define SPI_SHADER_PGM_RSRC3_HS__WAVE_LIMIT__SHIFT 0x0
++#define SPI_SHADER_PGM_RSRC3_HS__LOCK_LOW_THRESHOLD__SHIFT 0x6
++#define SPI_SHADER_PGM_RSRC3_HS__SIMD_DISABLE__SHIFT 0xa
++#define SPI_SHADER_PGM_RSRC3_HS__CU_EN__SHIFT 0x10
++#define SPI_SHADER_PGM_RSRC3_HS__WAVE_LIMIT_MASK 0x0000003FL
++#define SPI_SHADER_PGM_RSRC3_HS__LOCK_LOW_THRESHOLD_MASK 0x000003C0L
++#define SPI_SHADER_PGM_RSRC3_HS__SIMD_DISABLE_MASK 0x00003C00L
++#define SPI_SHADER_PGM_RSRC3_HS__CU_EN_MASK 0xFFFF0000L
++//SPI_SHADER_PGM_LO_HS
++#define SPI_SHADER_PGM_LO_HS__MEM_BASE__SHIFT 0x0
++#define SPI_SHADER_PGM_LO_HS__MEM_BASE_MASK 0xFFFFFFFFL
++//SPI_SHADER_PGM_HI_HS
++#define SPI_SHADER_PGM_HI_HS__MEM_BASE__SHIFT 0x0
++#define SPI_SHADER_PGM_HI_HS__MEM_BASE_MASK 0xFFL
++//SPI_SHADER_PGM_RSRC1_HS
++#define SPI_SHADER_PGM_RSRC1_HS__VGPRS__SHIFT 0x0
++#define SPI_SHADER_PGM_RSRC1_HS__SGPRS__SHIFT 0x6
++#define SPI_SHADER_PGM_RSRC1_HS__PRIORITY__SHIFT 0xa
++#define SPI_SHADER_PGM_RSRC1_HS__FLOAT_MODE__SHIFT 0xc
++#define SPI_SHADER_PGM_RSRC1_HS__PRIV__SHIFT 0x14
++#define SPI_SHADER_PGM_RSRC1_HS__DX10_CLAMP__SHIFT 0x15
++#define SPI_SHADER_PGM_RSRC1_HS__IEEE_MODE__SHIFT 0x17
++#define SPI_SHADER_PGM_RSRC1_HS__LS_VGPR_COMP_CNT__SHIFT 0x1c
++#define SPI_SHADER_PGM_RSRC1_HS__FP16_OVFL__SHIFT 0x1e
++#define SPI_SHADER_PGM_RSRC1_HS__VGPRS_MASK 0x0000003FL
++#define SPI_SHADER_PGM_RSRC1_HS__SGPRS_MASK 0x000003C0L
++#define SPI_SHADER_PGM_RSRC1_HS__PRIORITY_MASK 0x00000C00L
++#define SPI_SHADER_PGM_RSRC1_HS__FLOAT_MODE_MASK 0x000FF000L
++#define SPI_SHADER_PGM_RSRC1_HS__PRIV_MASK 0x00100000L
++#define SPI_SHADER_PGM_RSRC1_HS__DX10_CLAMP_MASK 0x00200000L
++#define SPI_SHADER_PGM_RSRC1_HS__IEEE_MODE_MASK 0x00800000L
++#define SPI_SHADER_PGM_RSRC1_HS__LS_VGPR_COMP_CNT_MASK 0x30000000L
++#define SPI_SHADER_PGM_RSRC1_HS__FP16_OVFL_MASK 0x40000000L
++//SPI_SHADER_PGM_RSRC2_HS
++#define SPI_SHADER_PGM_RSRC2_HS__SCRATCH_EN__SHIFT 0x0
++#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR__SHIFT 0x1
++#define SPI_SHADER_PGM_RSRC2_HS__TRAP_PRESENT__SHIFT 0x6
++#define SPI_SHADER_PGM_RSRC2_HS__EXCP_EN__SHIFT 0x7
++#define SPI_SHADER_PGM_RSRC2_HS__LDS_SIZE__SHIFT 0x10
++#define SPI_SHADER_PGM_RSRC2_HS__SKIP_USGPR0__SHIFT 0x1b
++#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR_MSB__SHIFT 0x1c
++#define SPI_SHADER_PGM_RSRC2_HS__SCRATCH_EN_MASK 0x00000001L
++#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR_MASK 0x0000003EL
++#define SPI_SHADER_PGM_RSRC2_HS__TRAP_PRESENT_MASK 0x00000040L
++#define SPI_SHADER_PGM_RSRC2_HS__EXCP_EN_MASK 0x0000FF80L
++#define SPI_SHADER_PGM_RSRC2_HS__LDS_SIZE_MASK 0x01FF0000L
++#define SPI_SHADER_PGM_RSRC2_HS__SKIP_USGPR0_MASK 0x08000000L
++#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR_MSB_MASK 0x10000000L
++//SPI_SHADER_USER_DATA_LS_0
++#define SPI_SHADER_USER_DATA_LS_0__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_0__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_1
++#define SPI_SHADER_USER_DATA_LS_1__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_1__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_2
++#define SPI_SHADER_USER_DATA_LS_2__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_2__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_3
++#define SPI_SHADER_USER_DATA_LS_3__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_3__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_4
++#define SPI_SHADER_USER_DATA_LS_4__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_4__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_5
++#define SPI_SHADER_USER_DATA_LS_5__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_5__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_6
++#define SPI_SHADER_USER_DATA_LS_6__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_6__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_7
++#define SPI_SHADER_USER_DATA_LS_7__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_7__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_8
++#define SPI_SHADER_USER_DATA_LS_8__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_8__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_9
++#define SPI_SHADER_USER_DATA_LS_9__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_9__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_10
++#define SPI_SHADER_USER_DATA_LS_10__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_10__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_11
++#define SPI_SHADER_USER_DATA_LS_11__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_11__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_12
++#define SPI_SHADER_USER_DATA_LS_12__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_12__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_13
++#define SPI_SHADER_USER_DATA_LS_13__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_13__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_14
++#define SPI_SHADER_USER_DATA_LS_14__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_14__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_15
++#define SPI_SHADER_USER_DATA_LS_15__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_15__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_16
++#define SPI_SHADER_USER_DATA_LS_16__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_16__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_17
++#define SPI_SHADER_USER_DATA_LS_17__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_17__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_18
++#define SPI_SHADER_USER_DATA_LS_18__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_18__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_19
++#define SPI_SHADER_USER_DATA_LS_19__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_19__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_20
++#define SPI_SHADER_USER_DATA_LS_20__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_20__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_21
++#define SPI_SHADER_USER_DATA_LS_21__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_21__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_22
++#define SPI_SHADER_USER_DATA_LS_22__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_22__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_23
++#define SPI_SHADER_USER_DATA_LS_23__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_23__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_24
++#define SPI_SHADER_USER_DATA_LS_24__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_24__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_25
++#define SPI_SHADER_USER_DATA_LS_25__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_25__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_26
++#define SPI_SHADER_USER_DATA_LS_26__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_26__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_27
++#define SPI_SHADER_USER_DATA_LS_27__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_27__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_28
++#define SPI_SHADER_USER_DATA_LS_28__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_28__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_29
++#define SPI_SHADER_USER_DATA_LS_29__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_29__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_30
++#define SPI_SHADER_USER_DATA_LS_30__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_30__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_LS_31
++#define SPI_SHADER_USER_DATA_LS_31__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_LS_31__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_0
++#define SPI_SHADER_USER_DATA_COMMON_0__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_0__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_1
++#define SPI_SHADER_USER_DATA_COMMON_1__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_1__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_2
++#define SPI_SHADER_USER_DATA_COMMON_2__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_2__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_3
++#define SPI_SHADER_USER_DATA_COMMON_3__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_3__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_4
++#define SPI_SHADER_USER_DATA_COMMON_4__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_4__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_5
++#define SPI_SHADER_USER_DATA_COMMON_5__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_5__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_6
++#define SPI_SHADER_USER_DATA_COMMON_6__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_6__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_7
++#define SPI_SHADER_USER_DATA_COMMON_7__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_7__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_8
++#define SPI_SHADER_USER_DATA_COMMON_8__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_8__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_9
++#define SPI_SHADER_USER_DATA_COMMON_9__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_9__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_10
++#define SPI_SHADER_USER_DATA_COMMON_10__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_10__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_11
++#define SPI_SHADER_USER_DATA_COMMON_11__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_11__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_12
++#define SPI_SHADER_USER_DATA_COMMON_12__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_12__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_13
++#define SPI_SHADER_USER_DATA_COMMON_13__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_13__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_14
++#define SPI_SHADER_USER_DATA_COMMON_14__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_14__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_15
++#define SPI_SHADER_USER_DATA_COMMON_15__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_15__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_16
++#define SPI_SHADER_USER_DATA_COMMON_16__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_16__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_17
++#define SPI_SHADER_USER_DATA_COMMON_17__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_17__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_18
++#define SPI_SHADER_USER_DATA_COMMON_18__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_18__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_19
++#define SPI_SHADER_USER_DATA_COMMON_19__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_19__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_20
++#define SPI_SHADER_USER_DATA_COMMON_20__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_20__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_21
++#define SPI_SHADER_USER_DATA_COMMON_21__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_21__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_22
++#define SPI_SHADER_USER_DATA_COMMON_22__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_22__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_23
++#define SPI_SHADER_USER_DATA_COMMON_23__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_23__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_24
++#define SPI_SHADER_USER_DATA_COMMON_24__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_24__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_25
++#define SPI_SHADER_USER_DATA_COMMON_25__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_25__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_26
++#define SPI_SHADER_USER_DATA_COMMON_26__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_26__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_27
++#define SPI_SHADER_USER_DATA_COMMON_27__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_27__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_28
++#define SPI_SHADER_USER_DATA_COMMON_28__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_28__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_29
++#define SPI_SHADER_USER_DATA_COMMON_29__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_29__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_30
++#define SPI_SHADER_USER_DATA_COMMON_30__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_30__DATA_MASK 0xFFFFFFFFL
++//SPI_SHADER_USER_DATA_COMMON_31
++#define SPI_SHADER_USER_DATA_COMMON_31__DATA__SHIFT 0x0
++#define SPI_SHADER_USER_DATA_COMMON_31__DATA_MASK 0xFFFFFFFFL
++//COMPUTE_DISPATCH_INITIATOR
++#define COMPUTE_DISPATCH_INITIATOR__COMPUTE_SHADER_EN__SHIFT 0x0
++#define COMPUTE_DISPATCH_INITIATOR__PARTIAL_TG_EN__SHIFT 0x1
++#define COMPUTE_DISPATCH_INITIATOR__FORCE_START_AT_000__SHIFT 0x2
++#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_ENBL__SHIFT 0x3
++#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_MODE__SHIFT 0x4
++#define COMPUTE_DISPATCH_INITIATOR__USE_THREAD_DIMENSIONS__SHIFT 0x5
++#define COMPUTE_DISPATCH_INITIATOR__ORDER_MODE__SHIFT 0x6
++#define COMPUTE_DISPATCH_INITIATOR__SCALAR_L1_INV_VOL__SHIFT 0xa
++#define COMPUTE_DISPATCH_INITIATOR__VECTOR_L1_INV_VOL__SHIFT 0xb
++#define COMPUTE_DISPATCH_INITIATOR__RESERVED__SHIFT 0xc
++#define COMPUTE_DISPATCH_INITIATOR__RESTORE__SHIFT 0xe
++#define COMPUTE_DISPATCH_INITIATOR__COMPUTE_SHADER_EN_MASK 0x00000001L
++#define COMPUTE_DISPATCH_INITIATOR__PARTIAL_TG_EN_MASK 0x00000002L
++#define COMPUTE_DISPATCH_INITIATOR__FORCE_START_AT_000_MASK 0x00000004L
++#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_ENBL_MASK 0x00000008L
++#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_MODE_MASK 0x00000010L
++#define COMPUTE_DISPATCH_INITIATOR__USE_THREAD_DIMENSIONS_MASK 0x00000020L
++#define COMPUTE_DISPATCH_INITIATOR__ORDER_MODE_MASK 0x00000040L
++#define COMPUTE_DISPATCH_INITIATOR__SCALAR_L1_INV_VOL_MASK 0x00000400L
++#define COMPUTE_DISPATCH_INITIATOR__VECTOR_L1_INV_VOL_MASK 0x00000800L
++#define COMPUTE_DISPATCH_INITIATOR__RESERVED_MASK 0x00001000L
++#define COMPUTE_DISPATCH_INITIATOR__RESTORE_MASK 0x00004000L
++//COMPUTE_DIM_X
++#define COMPUTE_DIM_X__SIZE__SHIFT 0x0
++#define COMPUTE_DIM_X__SIZE_MASK 0xFFFFFFFFL
++//COMPUTE_DIM_Y
++#define COMPUTE_DIM_Y__SIZE__SHIFT 0x0
++#define COMPUTE_DIM_Y__SIZE_MASK 0xFFFFFFFFL
++//COMPUTE_DIM_Z
++#define COMPUTE_DIM_Z__SIZE__SHIFT 0x0
++#define COMPUTE_DIM_Z__SIZE_MASK 0xFFFFFFFFL
++//COMPUTE_START_X
++#define COMPUTE_START_X__START__SHIFT 0x0
++#define COMPUTE_START_X__START_MASK 0xFFFFFFFFL
++//COMPUTE_START_Y
++#define COMPUTE_START_Y__START__SHIFT 0x0
++#define COMPUTE_START_Y__START_MASK 0xFFFFFFFFL
++//COMPUTE_START_Z
++#define COMPUTE_START_Z__START__SHIFT 0x0
++#define COMPUTE_START_Z__START_MASK 0xFFFFFFFFL
++//COMPUTE_NUM_THREAD_X
++#define COMPUTE_NUM_THREAD_X__NUM_THREAD_FULL__SHIFT 0x0
++#define COMPUTE_NUM_THREAD_X__NUM_THREAD_PARTIAL__SHIFT 0x10
++#define COMPUTE_NUM_THREAD_X__NUM_THREAD_FULL_MASK 0x0000FFFFL
++#define COMPUTE_NUM_THREAD_X__NUM_THREAD_PARTIAL_MASK 0xFFFF0000L
++//COMPUTE_NUM_THREAD_Y
++#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_FULL__SHIFT 0x0
++#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_PARTIAL__SHIFT 0x10
++#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_FULL_MASK 0x0000FFFFL
++#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_PARTIAL_MASK 0xFFFF0000L
++//COMPUTE_NUM_THREAD_Z
++#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_FULL__SHIFT 0x0
++#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_PARTIAL__SHIFT 0x10
++#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_FULL_MASK 0x0000FFFFL
++#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_PARTIAL_MASK 0xFFFF0000L
++//COMPUTE_PIPELINESTAT_ENABLE
++#define COMPUTE_PIPELINESTAT_ENABLE__PIPELINESTAT_ENABLE__SHIFT 0x0
++#define COMPUTE_PIPELINESTAT_ENABLE__PIPELINESTAT_ENABLE_MASK 0x00000001L
++//COMPUTE_PERFCOUNT_ENABLE
++#define COMPUTE_PERFCOUNT_ENABLE__PERFCOUNT_ENABLE__SHIFT 0x0
++#define COMPUTE_PERFCOUNT_ENABLE__PERFCOUNT_ENABLE_MASK 0x00000001L
++//COMPUTE_PGM_LO
++#define COMPUTE_PGM_LO__DATA__SHIFT 0x0
++#define COMPUTE_PGM_LO__DATA_MASK 0xFFFFFFFFL
++//COMPUTE_PGM_HI
++#define COMPUTE_PGM_HI__DATA__SHIFT 0x0
++#define COMPUTE_PGM_HI__DATA_MASK 0x000000FFL
++//COMPUTE_DISPATCH_PKT_ADDR_LO
++#define COMPUTE_DISPATCH_PKT_ADDR_LO__DATA__SHIFT 0x0
++#define COMPUTE_DISPATCH_PKT_ADDR_LO__DATA_MASK 0xFFFFFFFFL
++//COMPUTE_DISPATCH_PKT_ADDR_HI
++#define COMPUTE_DISPATCH_PKT_ADDR_HI__DATA__SHIFT 0x0
++#define COMPUTE_DISPATCH_PKT_ADDR_HI__DATA_MASK 0x000000FFL
++//COMPUTE_DISPATCH_SCRATCH_BASE_LO
++#define COMPUTE_DISPATCH_SCRATCH_BASE_LO__DATA__SHIFT 0x0
++#define COMPUTE_DISPATCH_SCRATCH_BASE_LO__DATA_MASK 0xFFFFFFFFL
++//COMPUTE_DISPATCH_SCRATCH_BASE_HI
++#define COMPUTE_DISPATCH_SCRATCH_BASE_HI__DATA__SHIFT 0x0
++#define COMPUTE_DISPATCH_SCRATCH_BASE_HI__DATA_MASK 0x000000FFL
++//COMPUTE_PGM_RSRC1
++#define COMPUTE_PGM_RSRC1__VGPRS__SHIFT 0x0
++#define COMPUTE_PGM_RSRC1__SGPRS__SHIFT 0x6
++#define COMPUTE_PGM_RSRC1__PRIORITY__SHIFT 0xa
++#define COMPUTE_PGM_RSRC1__FLOAT_MODE__SHIFT 0xc
++#define COMPUTE_PGM_RSRC1__PRIV__SHIFT 0x14
++#define COMPUTE_PGM_RSRC1__DX10_CLAMP__SHIFT 0x15
++#define COMPUTE_PGM_RSRC1__IEEE_MODE__SHIFT 0x17
++#define COMPUTE_PGM_RSRC1__BULKY__SHIFT 0x18
++#define COMPUTE_PGM_RSRC1__FP16_OVFL__SHIFT 0x1a
++#define COMPUTE_PGM_RSRC1__VGPRS_MASK 0x0000003FL
++#define COMPUTE_PGM_RSRC1__SGPRS_MASK 0x000003C0L
++#define COMPUTE_PGM_RSRC1__PRIORITY_MASK 0x00000C00L
++#define COMPUTE_PGM_RSRC1__FLOAT_MODE_MASK 0x000FF000L
++#define COMPUTE_PGM_RSRC1__PRIV_MASK 0x00100000L
++#define COMPUTE_PGM_RSRC1__DX10_CLAMP_MASK 0x00200000L
++#define COMPUTE_PGM_RSRC1__IEEE_MODE_MASK 0x00800000L
++#define COMPUTE_PGM_RSRC1__BULKY_MASK 0x01000000L
++#define COMPUTE_PGM_RSRC1__FP16_OVFL_MASK 0x04000000L
++//COMPUTE_PGM_RSRC2
++#define COMPUTE_PGM_RSRC2__SCRATCH_EN__SHIFT 0x0
++#define COMPUTE_PGM_RSRC2__USER_SGPR__SHIFT 0x1
++#define COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT 0x6
++#define COMPUTE_PGM_RSRC2__TGID_X_EN__SHIFT 0x7
++#define COMPUTE_PGM_RSRC2__TGID_Y_EN__SHIFT 0x8
++#define COMPUTE_PGM_RSRC2__TGID_Z_EN__SHIFT 0x9
++#define COMPUTE_PGM_RSRC2__TG_SIZE_EN__SHIFT 0xa
++#define COMPUTE_PGM_RSRC2__TIDIG_COMP_CNT__SHIFT 0xb
++#define COMPUTE_PGM_RSRC2__EXCP_EN_MSB__SHIFT 0xd
++#define COMPUTE_PGM_RSRC2__LDS_SIZE__SHIFT 0xf
++#define COMPUTE_PGM_RSRC2__EXCP_EN__SHIFT 0x18
++#define COMPUTE_PGM_RSRC2__SKIP_USGPR0__SHIFT 0x1f
++#define COMPUTE_PGM_RSRC2__SCRATCH_EN_MASK 0x00000001L
++#define COMPUTE_PGM_RSRC2__USER_SGPR_MASK 0x0000003EL
++#define COMPUTE_PGM_RSRC2__TRAP_PRESENT_MASK 0x00000040L
++#define COMPUTE_PGM_RSRC2__TGID_X_EN_MASK 0x00000080L
++#define COMPUTE_PGM_RSRC2__TGID_Y_EN_MASK 0x00000100L
++#define COMPUTE_PGM_RSRC2__TGID_Z_EN_MASK 0x00000200L
++#define COMPUTE_PGM_RSRC2__TG_SIZE_EN_MASK 0x00000400L
++#define COMPUTE_PGM_RSRC2__TIDIG_COMP_CNT_MASK 0x00001800L
++#define COMPUTE_PGM_RSRC2__EXCP_EN_MSB_MASK 0x00006000L
++#define COMPUTE_PGM_RSRC2__LDS_SIZE_MASK 0x00FF8000L
++#define COMPUTE_PGM_RSRC2__EXCP_EN_MASK 0x7F000000L
++#define COMPUTE_PGM_RSRC2__SKIP_USGPR0_MASK 0x80000000L
++//COMPUTE_VMID
++#define COMPUTE_VMID__DATA__SHIFT 0x0
++#define COMPUTE_VMID__DATA_MASK 0x0000000FL
++//COMPUTE_RESOURCE_LIMITS
++#define COMPUTE_RESOURCE_LIMITS__WAVES_PER_SH__SHIFT 0x0
++#define COMPUTE_RESOURCE_LIMITS__TG_PER_CU__SHIFT 0xc
++#define COMPUTE_RESOURCE_LIMITS__LOCK_THRESHOLD__SHIFT 0x10
++#define COMPUTE_RESOURCE_LIMITS__SIMD_DEST_CNTL__SHIFT 0x16
++#define COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST__SHIFT 0x17
++#define COMPUTE_RESOURCE_LIMITS__CU_GROUP_COUNT__SHIFT 0x18
++#define COMPUTE_RESOURCE_LIMITS__SIMD_DISABLE__SHIFT 0x1b
++#define COMPUTE_RESOURCE_LIMITS__WAVES_PER_SH_MASK 0x000003FFL
++#define COMPUTE_RESOURCE_LIMITS__TG_PER_CU_MASK 0x0000F000L
++#define COMPUTE_RESOURCE_LIMITS__LOCK_THRESHOLD_MASK 0x003F0000L
++#define COMPUTE_RESOURCE_LIMITS__SIMD_DEST_CNTL_MASK 0x00400000L
++#define COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK 0x00800000L
++#define COMPUTE_RESOURCE_LIMITS__CU_GROUP_COUNT_MASK 0x07000000L
++#define COMPUTE_RESOURCE_LIMITS__SIMD_DISABLE_MASK 0x78000000L
++//COMPUTE_STATIC_THREAD_MGMT_SE0
++#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH0_CU_EN__SHIFT 0x0
++#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH1_CU_EN__SHIFT 0x10
++#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH0_CU_EN_MASK 0x0000FFFFL
++#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH1_CU_EN_MASK 0xFFFF0000L
++//COMPUTE_STATIC_THREAD_MGMT_SE1
++#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH0_CU_EN__SHIFT 0x0
++#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH1_CU_EN__SHIFT 0x10
++#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH0_CU_EN_MASK 0x0000FFFFL
++#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH1_CU_EN_MASK 0xFFFF0000L
++//COMPUTE_TMPRING_SIZE
++#define COMPUTE_TMPRING_SIZE__WAVES__SHIFT 0x0
++#define COMPUTE_TMPRING_SIZE__WAVESIZE__SHIFT 0xc
++#define COMPUTE_TMPRING_SIZE__WAVES_MASK 0x00000FFFL
++#define COMPUTE_TMPRING_SIZE__WAVESIZE_MASK 0x01FFF000L
++//COMPUTE_STATIC_THREAD_MGMT_SE2
++#define COMPUTE_STATIC_THREAD_MGMT_SE2__SH0_CU_EN__SHIFT 0x0
++#define COMPUTE_STATIC_THREAD_MGMT_SE2__SH1_CU_EN__SHIFT 0x10
++#define COMPUTE_STATIC_THREAD_MGMT_SE2__SH0_CU_EN_MASK 0x0000FFFFL
++#define COMPUTE_STATIC_THREAD_MGMT_SE2__SH1_CU_EN_MASK 0xFFFF0000L
++//COMPUTE_STATIC_THREAD_MGMT_SE3
++#define COMPUTE_STATIC_THREAD_MGMT_SE3__SH0_CU_EN__SHIFT 0x0
++#define COMPUTE_STATIC_THREAD_MGMT_SE3__SH1_CU_EN__SHIFT 0x10
++#define COMPUTE_STATIC_THREAD_MGMT_SE3__SH0_CU_EN_MASK 0x0000FFFFL
++#define COMPUTE_STATIC_THREAD_MGMT_SE3__SH1_CU_EN_MASK 0xFFFF0000L
++//COMPUTE_RESTART_X
++#define COMPUTE_RESTART_X__RESTART__SHIFT 0x0
++#define COMPUTE_RESTART_X__RESTART_MASK 0xFFFFFFFFL
++//COMPUTE_RESTART_Y
++#define COMPUTE_RESTART_Y__RESTART__SHIFT 0x0
++#define COMPUTE_RESTART_Y__RESTART_MASK 0xFFFFFFFFL
++//COMPUTE_RESTART_Z
++#define COMPUTE_RESTART_Z__RESTART__SHIFT 0x0
++#define COMPUTE_RESTART_Z__RESTART_MASK 0xFFFFFFFFL
++//COMPUTE_THREAD_TRACE_ENABLE
++#define COMPUTE_THREAD_TRACE_ENABLE__THREAD_TRACE_ENABLE__SHIFT 0x0
++#define COMPUTE_THREAD_TRACE_ENABLE__THREAD_TRACE_ENABLE_MASK 0x00000001L
++//COMPUTE_MISC_RESERVED
++#define COMPUTE_MISC_RESERVED__SEND_SEID__SHIFT 0x0
++#define COMPUTE_MISC_RESERVED__RESERVED2__SHIFT 0x2
++#define COMPUTE_MISC_RESERVED__RESERVED3__SHIFT 0x3
++#define COMPUTE_MISC_RESERVED__RESERVED4__SHIFT 0x4
++#define COMPUTE_MISC_RESERVED__WAVE_ID_BASE__SHIFT 0x5
++#define COMPUTE_MISC_RESERVED__SEND_SEID_MASK 0x00000003L
++#define COMPUTE_MISC_RESERVED__RESERVED2_MASK 0x00000004L
++#define COMPUTE_MISC_RESERVED__RESERVED3_MASK 0x00000008L
++#define COMPUTE_MISC_RESERVED__RESERVED4_MASK 0x00000010L
++#define COMPUTE_MISC_RESERVED__WAVE_ID_BASE_MASK 0x0001FFE0L
++//COMPUTE_DISPATCH_ID
++#define COMPUTE_DISPATCH_ID__DISPATCH_ID__SHIFT 0x0
++#define COMPUTE_DISPATCH_ID__DISPATCH_ID_MASK 0xFFFFFFFFL
++//COMPUTE_THREADGROUP_ID
++#define COMPUTE_THREADGROUP_ID__THREADGROUP_ID__SHIFT 0x0
++#define COMPUTE_THREADGROUP_ID__THREADGROUP_ID_MASK 0xFFFFFFFFL
++//COMPUTE_RELAUNCH
++#define COMPUTE_RELAUNCH__PAYLOAD__SHIFT 0x0
++#define COMPUTE_RELAUNCH__IS_EVENT__SHIFT 0x1e
++#define COMPUTE_RELAUNCH__IS_STATE__SHIFT 0x1f
++#define COMPUTE_RELAUNCH__PAYLOAD_MASK 0x3FFFFFFFL
++#define COMPUTE_RELAUNCH__IS_EVENT_MASK 0x40000000L
++#define COMPUTE_RELAUNCH__IS_STATE_MASK 0x80000000L
++//COMPUTE_WAVE_RESTORE_ADDR_LO
++#define COMPUTE_WAVE_RESTORE_ADDR_LO__ADDR__SHIFT 0x0
++#define COMPUTE_WAVE_RESTORE_ADDR_LO__ADDR_MASK 0xFFFFFFFFL
++//COMPUTE_WAVE_RESTORE_ADDR_HI
++#define COMPUTE_WAVE_RESTORE_ADDR_HI__ADDR__SHIFT 0x0
++#define COMPUTE_WAVE_RESTORE_ADDR_HI__ADDR_MASK 0xFFFFL
++//COMPUTE_USER_DATA_0
++#define COMPUTE_USER_DATA_0__DATA__SHIFT 0x0
++#define COMPUTE_USER_DATA_0__DATA_MASK 0xFFFFFFFFL
++//COMPUTE_USER_DATA_1
++#define COMPUTE_USER_DATA_1__DATA__SHIFT 0x0
++#define COMPUTE_USER_DATA_1__DATA_MASK 0xFFFFFFFFL
++//COMPUTE_USER_DATA_2
++#define COMPUTE_USER_DATA_2__DATA__SHIFT 0x0
++#define COMPUTE_USER_DATA_2__DATA_MASK 0xFFFFFFFFL
++//COMPUTE_USER_DATA_3
++#define COMPUTE_USER_DATA_3__DATA__SHIFT 0x0
++#define COMPUTE_USER_DATA_3__DATA_MASK 0xFFFFFFFFL
++//COMPUTE_USER_DATA_4
++#define COMPUTE_USER_DATA_4__DATA__SHIFT 0x0
++#define COMPUTE_USER_DATA_4__DATA_MASK 0xFFFFFFFFL
++//COMPUTE_USER_DATA_5
++#define COMPUTE_USER_DATA_5__DATA__SHIFT 0x0
++#define COMPUTE_USER_DATA_5__DATA_MASK 0xFFFFFFFFL
++//COMPUTE_USER_DATA_6
++#define COMPUTE_USER_DATA_6__DATA__SHIFT 0x0
++#define COMPUTE_USER_DATA_6__DATA_MASK 0xFFFFFFFFL
++//COMPUTE_USER_DATA_7
++#define COMPUTE_USER_DATA_7__DATA__SHIFT 0x0
++#define COMPUTE_USER_DATA_7__DATA_MASK 0xFFFFFFFFL
++//COMPUTE_USER_DATA_8
++#define COMPUTE_USER_DATA_8__DATA__SHIFT 0x0
++#define COMPUTE_USER_DATA_8__DATA_MASK 0xFFFFFFFFL
++//COMPUTE_USER_DATA_9
++#define COMPUTE_USER_DATA_9__DATA__SHIFT 0x0
++#define COMPUTE_USER_DATA_9__DATA_MASK 0xFFFFFFFFL
++//COMPUTE_USER_DATA_10
++#define COMPUTE_USER_DATA_10__DATA__SHIFT 0x0
++#define COMPUTE_USER_DATA_10__DATA_MASK 0xFFFFFFFFL
++//COMPUTE_USER_DATA_11
++#define COMPUTE_USER_DATA_11__DATA__SHIFT 0x0
++#define COMPUTE_USER_DATA_11__DATA_MASK 0xFFFFFFFFL
++//COMPUTE_USER_DATA_12
++#define COMPUTE_USER_DATA_12__DATA__SHIFT 0x0
++#define COMPUTE_USER_DATA_12__DATA_MASK 0xFFFFFFFFL
++//COMPUTE_USER_DATA_13
++#define COMPUTE_USER_DATA_13__DATA__SHIFT 0x0
++#define COMPUTE_USER_DATA_13__DATA_MASK 0xFFFFFFFFL
++//COMPUTE_USER_DATA_14
++#define COMPUTE_USER_DATA_14__DATA__SHIFT 0x0
++#define COMPUTE_USER_DATA_14__DATA_MASK 0xFFFFFFFFL
++//COMPUTE_USER_DATA_15
++#define COMPUTE_USER_DATA_15__DATA__SHIFT 0x0
++#define COMPUTE_USER_DATA_15__DATA_MASK 0xFFFFFFFFL
++//COMPUTE_NOWHERE
++#define COMPUTE_NOWHERE__DATA__SHIFT 0x0
++#define COMPUTE_NOWHERE__DATA_MASK 0xFFFFFFFFL
++
++
++// addressBlock: gc_cppdec
++//CP_DFY_CNTL
++#define CP_DFY_CNTL__POLICY__SHIFT 0x0
++#define CP_DFY_CNTL__MTYPE__SHIFT 0x2
++#define CP_DFY_CNTL__TPI_SDP_SEL__SHIFT 0x1a
++#define CP_DFY_CNTL__LFSR_RESET__SHIFT 0x1c
++#define CP_DFY_CNTL__MODE__SHIFT 0x1d
++#define CP_DFY_CNTL__ENABLE__SHIFT 0x1f
++#define CP_DFY_CNTL__POLICY_MASK 0x00000001L
++#define CP_DFY_CNTL__MTYPE_MASK 0x0000000CL
++#define CP_DFY_CNTL__TPI_SDP_SEL_MASK 0x04000000L
++#define CP_DFY_CNTL__LFSR_RESET_MASK 0x10000000L
++#define CP_DFY_CNTL__MODE_MASK 0x60000000L
++#define CP_DFY_CNTL__ENABLE_MASK 0x80000000L
++//CP_DFY_STAT
++#define CP_DFY_STAT__BURST_COUNT__SHIFT 0x0
++#define CP_DFY_STAT__TAGS_PENDING__SHIFT 0x10
++#define CP_DFY_STAT__BUSY__SHIFT 0x1f
++#define CP_DFY_STAT__BURST_COUNT_MASK 0x0000FFFFL
++#define CP_DFY_STAT__TAGS_PENDING_MASK 0x07FF0000L
++#define CP_DFY_STAT__BUSY_MASK 0x80000000L
++//CP_DFY_ADDR_HI
++#define CP_DFY_ADDR_HI__ADDR_HI__SHIFT 0x0
++#define CP_DFY_ADDR_HI__ADDR_HI_MASK 0xFFFFFFFFL
++//CP_DFY_ADDR_LO
++#define CP_DFY_ADDR_LO__ADDR_LO__SHIFT 0x5
++#define CP_DFY_ADDR_LO__ADDR_LO_MASK 0xFFFFFFE0L
++//CP_DFY_DATA_0
++#define CP_DFY_DATA_0__DATA__SHIFT 0x0
++#define CP_DFY_DATA_0__DATA_MASK 0xFFFFFFFFL
++//CP_DFY_DATA_1
++#define CP_DFY_DATA_1__DATA__SHIFT 0x0
++#define CP_DFY_DATA_1__DATA_MASK 0xFFFFFFFFL
++//CP_DFY_DATA_2
++#define CP_DFY_DATA_2__DATA__SHIFT 0x0
++#define CP_DFY_DATA_2__DATA_MASK 0xFFFFFFFFL
++//CP_DFY_DATA_3
++#define CP_DFY_DATA_3__DATA__SHIFT 0x0
++#define CP_DFY_DATA_3__DATA_MASK 0xFFFFFFFFL
++//CP_DFY_DATA_4
++#define CP_DFY_DATA_4__DATA__SHIFT 0x0
++#define CP_DFY_DATA_4__DATA_MASK 0xFFFFFFFFL
++//CP_DFY_DATA_5
++#define CP_DFY_DATA_5__DATA__SHIFT 0x0
++#define CP_DFY_DATA_5__DATA_MASK 0xFFFFFFFFL
++//CP_DFY_DATA_6
++#define CP_DFY_DATA_6__DATA__SHIFT 0x0
++#define CP_DFY_DATA_6__DATA_MASK 0xFFFFFFFFL
++//CP_DFY_DATA_7
++#define CP_DFY_DATA_7__DATA__SHIFT 0x0
++#define CP_DFY_DATA_7__DATA_MASK 0xFFFFFFFFL
++//CP_DFY_DATA_8
++#define CP_DFY_DATA_8__DATA__SHIFT 0x0
++#define CP_DFY_DATA_8__DATA_MASK 0xFFFFFFFFL
++//CP_DFY_DATA_9
++#define CP_DFY_DATA_9__DATA__SHIFT 0x0
++#define CP_DFY_DATA_9__DATA_MASK 0xFFFFFFFFL
++//CP_DFY_DATA_10
++#define CP_DFY_DATA_10__DATA__SHIFT 0x0
++#define CP_DFY_DATA_10__DATA_MASK 0xFFFFFFFFL
++//CP_DFY_DATA_11
++#define CP_DFY_DATA_11__DATA__SHIFT 0x0
++#define CP_DFY_DATA_11__DATA_MASK 0xFFFFFFFFL
++//CP_DFY_DATA_12
++#define CP_DFY_DATA_12__DATA__SHIFT 0x0
++#define CP_DFY_DATA_12__DATA_MASK 0xFFFFFFFFL
++//CP_DFY_DATA_13
++#define CP_DFY_DATA_13__DATA__SHIFT 0x0
++#define CP_DFY_DATA_13__DATA_MASK 0xFFFFFFFFL
++//CP_DFY_DATA_14
++#define CP_DFY_DATA_14__DATA__SHIFT 0x0
++#define CP_DFY_DATA_14__DATA_MASK 0xFFFFFFFFL
++//CP_DFY_DATA_15
++#define CP_DFY_DATA_15__DATA__SHIFT 0x0
++#define CP_DFY_DATA_15__DATA_MASK 0xFFFFFFFFL
++//CP_DFY_CMD
++#define CP_DFY_CMD__OFFSET__SHIFT 0x0
++#define CP_DFY_CMD__SIZE__SHIFT 0x10
++#define CP_DFY_CMD__OFFSET_MASK 0x000001FFL
++#define CP_DFY_CMD__SIZE_MASK 0xFFFF0000L
++//CP_EOPQ_WAIT_TIME
++#define CP_EOPQ_WAIT_TIME__WAIT_TIME__SHIFT 0x0
++#define CP_EOPQ_WAIT_TIME__SCALE_COUNT__SHIFT 0xa
++#define CP_EOPQ_WAIT_TIME__WAIT_TIME_MASK 0x000003FFL
++#define CP_EOPQ_WAIT_TIME__SCALE_COUNT_MASK 0x0003FC00L
++//CP_CPC_MGCG_SYNC_CNTL
++#define CP_CPC_MGCG_SYNC_CNTL__COOLDOWN_PERIOD__SHIFT 0x0
++#define CP_CPC_MGCG_SYNC_CNTL__WARMUP_PERIOD__SHIFT 0x8
++#define CP_CPC_MGCG_SYNC_CNTL__COOLDOWN_PERIOD_MASK 0x000000FFL
++#define CP_CPC_MGCG_SYNC_CNTL__WARMUP_PERIOD_MASK 0x0000FF00L
++//CPC_INT_INFO
++#define CPC_INT_INFO__ADDR_HI__SHIFT 0x0
++#define CPC_INT_INFO__TYPE__SHIFT 0x10
++#define CPC_INT_INFO__VMID__SHIFT 0x14
++#define CPC_INT_INFO__QUEUE_ID__SHIFT 0x1c
++#define CPC_INT_INFO__ADDR_HI_MASK 0x0000FFFFL
++#define CPC_INT_INFO__TYPE_MASK 0x00010000L
++#define CPC_INT_INFO__VMID_MASK 0x00F00000L
++#define CPC_INT_INFO__QUEUE_ID_MASK 0x70000000L
++//CP_VIRT_STATUS
++#define CP_VIRT_STATUS__VIRT_STATUS__SHIFT 0x0
++#define CP_VIRT_STATUS__VIRT_STATUS_MASK 0xFFFFFFFFL
++//CPC_INT_ADDR
++#define CPC_INT_ADDR__ADDR__SHIFT 0x0
++#define CPC_INT_ADDR__ADDR_MASK 0xFFFFFFFFL
++//CPC_INT_PASID
++#define CPC_INT_PASID__PASID__SHIFT 0x0
++#define CPC_INT_PASID__PASID_MASK 0x0000FFFFL
++//CP_GFX_ERROR
++#define CP_GFX_ERROR__EDC_ERROR_ID__SHIFT 0x0
++#define CP_GFX_ERROR__SUA_ERROR__SHIFT 0x4
++#define CP_GFX_ERROR__RSVD1_ERROR__SHIFT 0x5
++#define CP_GFX_ERROR__RSVD2_ERROR__SHIFT 0x6
++#define CP_GFX_ERROR__SEM_UTCL1_ERROR__SHIFT 0x7
++#define CP_GFX_ERROR__QU_STRM_UTCL1_ERROR__SHIFT 0x8
++#define CP_GFX_ERROR__QU_EOP_UTCL1_ERROR__SHIFT 0x9
++#define CP_GFX_ERROR__QU_PIPE_UTCL1_ERROR__SHIFT 0xa
++#define CP_GFX_ERROR__QU_READ_UTCL1_ERROR__SHIFT 0xb
++#define CP_GFX_ERROR__SYNC_MEMRD_UTCL1_ERROR__SHIFT 0xc
++#define CP_GFX_ERROR__SYNC_MEMWR_UTCL1_ERROR__SHIFT 0xd
++#define CP_GFX_ERROR__SHADOW_UTCL1_ERROR__SHIFT 0xe
++#define CP_GFX_ERROR__APPEND_UTCL1_ERROR__SHIFT 0xf
++#define CP_GFX_ERROR__CE_DMA_UTCL1_ERROR__SHIFT 0x10
++#define CP_GFX_ERROR__PFP_VGTDMA_UTCL1_ERROR__SHIFT 0x11
++#define CP_GFX_ERROR__DMA_SRC_UTCL1_ERROR__SHIFT 0x12
++#define CP_GFX_ERROR__DMA_DST_UTCL1_ERROR__SHIFT 0x13
++#define CP_GFX_ERROR__PFP_TC_UTCL1_ERROR__SHIFT 0x14
++#define CP_GFX_ERROR__ME_TC_UTCL1_ERROR__SHIFT 0x15
++#define CP_GFX_ERROR__CE_TC_UTCL1_ERROR__SHIFT 0x16
++#define CP_GFX_ERROR__PRT_LOD_UTCL1_ERROR__SHIFT 0x17
++#define CP_GFX_ERROR__RDPTR_RPT_UTCL1_ERROR__SHIFT 0x18
++#define CP_GFX_ERROR__RB_FETCHER_UTCL1_ERROR__SHIFT 0x19
++#define CP_GFX_ERROR__I1_FETCHER_UTCL1_ERROR__SHIFT 0x1a
++#define CP_GFX_ERROR__I2_FETCHER_UTCL1_ERROR__SHIFT 0x1b
++#define CP_GFX_ERROR__C1_FETCHER_UTCL1_ERROR__SHIFT 0x1c
++#define CP_GFX_ERROR__C2_FETCHER_UTCL1_ERROR__SHIFT 0x1d
++#define CP_GFX_ERROR__ST_FETCHER_UTCL1_ERROR__SHIFT 0x1e
++#define CP_GFX_ERROR__CE_INIT_UTCL1_ERROR__SHIFT 0x1f
++#define CP_GFX_ERROR__EDC_ERROR_ID_MASK 0x0000000FL
++#define CP_GFX_ERROR__SUA_ERROR_MASK 0x00000010L
++#define CP_GFX_ERROR__RSVD1_ERROR_MASK 0x00000020L
++#define CP_GFX_ERROR__RSVD2_ERROR_MASK 0x00000040L
++#define CP_GFX_ERROR__SEM_UTCL1_ERROR_MASK 0x00000080L
++#define CP_GFX_ERROR__QU_STRM_UTCL1_ERROR_MASK 0x00000100L
++#define CP_GFX_ERROR__QU_EOP_UTCL1_ERROR_MASK 0x00000200L
++#define CP_GFX_ERROR__QU_PIPE_UTCL1_ERROR_MASK 0x00000400L
++#define CP_GFX_ERROR__QU_READ_UTCL1_ERROR_MASK 0x00000800L
++#define CP_GFX_ERROR__SYNC_MEMRD_UTCL1_ERROR_MASK 0x00001000L
++#define CP_GFX_ERROR__SYNC_MEMWR_UTCL1_ERROR_MASK 0x00002000L
++#define CP_GFX_ERROR__SHADOW_UTCL1_ERROR_MASK 0x00004000L
++#define CP_GFX_ERROR__APPEND_UTCL1_ERROR_MASK 0x00008000L
++#define CP_GFX_ERROR__CE_DMA_UTCL1_ERROR_MASK 0x00010000L
++#define CP_GFX_ERROR__PFP_VGTDMA_UTCL1_ERROR_MASK 0x00020000L
++#define CP_GFX_ERROR__DMA_SRC_UTCL1_ERROR_MASK 0x00040000L
++#define CP_GFX_ERROR__DMA_DST_UTCL1_ERROR_MASK 0x00080000L
++#define CP_GFX_ERROR__PFP_TC_UTCL1_ERROR_MASK 0x00100000L
++#define CP_GFX_ERROR__ME_TC_UTCL1_ERROR_MASK 0x00200000L
++#define CP_GFX_ERROR__CE_TC_UTCL1_ERROR_MASK 0x00400000L
++#define CP_GFX_ERROR__PRT_LOD_UTCL1_ERROR_MASK 0x00800000L
++#define CP_GFX_ERROR__RDPTR_RPT_UTCL1_ERROR_MASK 0x01000000L
++#define CP_GFX_ERROR__RB_FETCHER_UTCL1_ERROR_MASK 0x02000000L
++#define CP_GFX_ERROR__I1_FETCHER_UTCL1_ERROR_MASK 0x04000000L
++#define CP_GFX_ERROR__I2_FETCHER_UTCL1_ERROR_MASK 0x08000000L
++#define CP_GFX_ERROR__C1_FETCHER_UTCL1_ERROR_MASK 0x10000000L
++#define CP_GFX_ERROR__C2_FETCHER_UTCL1_ERROR_MASK 0x20000000L
++#define CP_GFX_ERROR__ST_FETCHER_UTCL1_ERROR_MASK 0x40000000L
++#define CP_GFX_ERROR__CE_INIT_UTCL1_ERROR_MASK 0x80000000L
++//CPG_UTCL1_CNTL
++#define CPG_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
++#define CPG_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
++#define CPG_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
++#define CPG_UTCL1_CNTL__BYPASS__SHIFT 0x19
++#define CPG_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
++#define CPG_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
++#define CPG_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
++#define CPG_UTCL1_CNTL__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
++#define CPG_UTCL1_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x1e
++#define CPG_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
++#define CPG_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
++#define CPG_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
++#define CPG_UTCL1_CNTL__BYPASS_MASK 0x02000000L
++#define CPG_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
++#define CPG_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
++#define CPG_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
++#define CPG_UTCL1_CNTL__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
++#define CPG_UTCL1_CNTL__MTYPE_NO_PTE_MODE_MASK 0x40000000L
++//CPC_UTCL1_CNTL
++#define CPC_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
++#define CPC_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
++#define CPC_UTCL1_CNTL__BYPASS__SHIFT 0x19
++#define CPC_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
++#define CPC_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
++#define CPC_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
++#define CPC_UTCL1_CNTL__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
++#define CPC_UTCL1_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x1e
++#define CPC_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
++#define CPC_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
++#define CPC_UTCL1_CNTL__BYPASS_MASK 0x02000000L
++#define CPC_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
++#define CPC_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
++#define CPC_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
++#define CPC_UTCL1_CNTL__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
++#define CPC_UTCL1_CNTL__MTYPE_NO_PTE_MODE_MASK 0x40000000L
++//CPF_UTCL1_CNTL
++#define CPF_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
++#define CPF_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
++#define CPF_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
++#define CPF_UTCL1_CNTL__BYPASS__SHIFT 0x19
++#define CPF_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
++#define CPF_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
++#define CPF_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
++#define CPF_UTCL1_CNTL__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
++#define CPF_UTCL1_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x1e
++#define CPF_UTCL1_CNTL__FORCE_NO_EXE__SHIFT 0x1f
++#define CPF_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
++#define CPF_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
++#define CPF_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
++#define CPF_UTCL1_CNTL__BYPASS_MASK 0x02000000L
++#define CPF_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
++#define CPF_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
++#define CPF_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
++#define CPF_UTCL1_CNTL__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
++#define CPF_UTCL1_CNTL__MTYPE_NO_PTE_MODE_MASK 0x40000000L
++#define CPF_UTCL1_CNTL__FORCE_NO_EXE_MASK 0x80000000L
++//CP_AQL_SMM_STATUS
++#define CP_AQL_SMM_STATUS__AQL_QUEUE_SMM__SHIFT 0x0
++#define CP_AQL_SMM_STATUS__AQL_QUEUE_SMM_MASK 0xFFFFFFFFL
++//CP_RB0_BASE
++#define CP_RB0_BASE__RB_BASE__SHIFT 0x0
++#define CP_RB0_BASE__RB_BASE_MASK 0xFFFFFFFFL
++//CP_RB_BASE
++#define CP_RB_BASE__RB_BASE__SHIFT 0x0
++#define CP_RB_BASE__RB_BASE_MASK 0xFFFFFFFFL
++//CP_RB0_CNTL
++#define CP_RB0_CNTL__RB_BUFSZ__SHIFT 0x0
++#define CP_RB0_CNTL__RB_BLKSZ__SHIFT 0x8
++#define CP_RB0_CNTL__BUF_SWAP__SHIFT 0x11
++#define CP_RB0_CNTL__MIN_AVAILSZ__SHIFT 0x14
++#define CP_RB0_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
++#define CP_RB0_CNTL__CACHE_POLICY__SHIFT 0x18
++#define CP_RB0_CNTL__RB_NO_UPDATE__SHIFT 0x1b
++#define CP_RB0_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
++#define CP_RB0_CNTL__RB_BUFSZ_MASK 0x0000003FL
++#define CP_RB0_CNTL__RB_BLKSZ_MASK 0x00003F00L
++#define CP_RB0_CNTL__BUF_SWAP_MASK 0x00060000L
++#define CP_RB0_CNTL__MIN_AVAILSZ_MASK 0x00300000L
++#define CP_RB0_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
++#define CP_RB0_CNTL__CACHE_POLICY_MASK 0x01000000L
++#define CP_RB0_CNTL__RB_NO_UPDATE_MASK 0x08000000L
++#define CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
++//CP_RB_CNTL
++#define CP_RB_CNTL__RB_BUFSZ__SHIFT 0x0
++#define CP_RB_CNTL__RB_BLKSZ__SHIFT 0x8
++#define CP_RB_CNTL__MIN_AVAILSZ__SHIFT 0x14
++#define CP_RB_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
++#define CP_RB_CNTL__CACHE_POLICY__SHIFT 0x18
++#define CP_RB_CNTL__RB_NO_UPDATE__SHIFT 0x1b
++#define CP_RB_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
++#define CP_RB_CNTL__RB_BUFSZ_MASK 0x0000003FL
++#define CP_RB_CNTL__RB_BLKSZ_MASK 0x00003F00L
++#define CP_RB_CNTL__MIN_AVAILSZ_MASK 0x00300000L
++#define CP_RB_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
++#define CP_RB_CNTL__CACHE_POLICY_MASK 0x01000000L
++#define CP_RB_CNTL__RB_NO_UPDATE_MASK 0x08000000L
++#define CP_RB_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
++//CP_RB_RPTR_WR
++#define CP_RB_RPTR_WR__RB_RPTR_WR__SHIFT 0x0
++#define CP_RB_RPTR_WR__RB_RPTR_WR_MASK 0x000FFFFFL
++//CP_RB0_RPTR_ADDR
++#define CP_RB0_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
++#define CP_RB0_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
++//CP_RB_RPTR_ADDR
++#define CP_RB_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
++#define CP_RB_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
++//CP_RB0_RPTR_ADDR_HI
++#define CP_RB0_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
++#define CP_RB0_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
++//CP_RB_RPTR_ADDR_HI
++#define CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
++#define CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
++//CP_RB0_BUFSZ_MASK
++#define CP_RB0_BUFSZ_MASK__DATA__SHIFT 0x0
++#define CP_RB0_BUFSZ_MASK__DATA_MASK 0x000FFFFFL
++//CP_RB_BUFSZ_MASK
++#define CP_RB_BUFSZ_MASK__DATA__SHIFT 0x0
++#define CP_RB_BUFSZ_MASK__DATA_MASK 0x000FFFFFL
++//CP_RB_WPTR_POLL_ADDR_LO
++#define CP_RB_WPTR_POLL_ADDR_LO__RB_WPTR_POLL_ADDR_LO__SHIFT 0x2
++#define CP_RB_WPTR_POLL_ADDR_LO__RB_WPTR_POLL_ADDR_LO_MASK 0xFFFFFFFCL
++//CP_RB_WPTR_POLL_ADDR_HI
++#define CP_RB_WPTR_POLL_ADDR_HI__RB_WPTR_POLL_ADDR_HI__SHIFT 0x0
++#define CP_RB_WPTR_POLL_ADDR_HI__RB_WPTR_POLL_ADDR_HI_MASK 0x0000FFFFL
++//GC_PRIV_MODE
++//CP_INT_CNTL
++#define CP_INT_CNTL__CP_VM_DOORBELL_WR_INT_ENABLE__SHIFT 0xb
++#define CP_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
++#define CP_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
++#define CP_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
++#define CP_INT_CNTL__CMP_BUSY_INT_ENABLE__SHIFT 0x12
++#define CP_INT_CNTL__CNTX_BUSY_INT_ENABLE__SHIFT 0x13
++#define CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE__SHIFT 0x14
++#define CP_INT_CNTL__GFX_IDLE_INT_ENABLE__SHIFT 0x15
++#define CP_INT_CNTL__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
++#define CP_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
++#define CP_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
++#define CP_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
++#define CP_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
++#define CP_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
++#define CP_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
++#define CP_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
++#define CP_INT_CNTL__CP_VM_DOORBELL_WR_INT_ENABLE_MASK 0x00000800L
++#define CP_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
++#define CP_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
++#define CP_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
++#define CP_INT_CNTL__CMP_BUSY_INT_ENABLE_MASK 0x00040000L
++#define CP_INT_CNTL__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
++#define CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
++#define CP_INT_CNTL__GFX_IDLE_INT_ENABLE_MASK 0x00200000L
++#define CP_INT_CNTL__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
++#define CP_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
++#define CP_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
++#define CP_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
++#define CP_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
++#define CP_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
++#define CP_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
++#define CP_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
++//CP_INT_STATUS
++#define CP_INT_STATUS__CP_VM_DOORBELL_WR_INT_STAT__SHIFT 0xb
++#define CP_INT_STATUS__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
++#define CP_INT_STATUS__GPF_INT_STAT__SHIFT 0x10
++#define CP_INT_STATUS__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
++#define CP_INT_STATUS__CMP_BUSY_INT_STAT__SHIFT 0x12
++#define CP_INT_STATUS__CNTX_BUSY_INT_STAT__SHIFT 0x13
++#define CP_INT_STATUS__CNTX_EMPTY_INT_STAT__SHIFT 0x14
++#define CP_INT_STATUS__GFX_IDLE_INT_STAT__SHIFT 0x15
++#define CP_INT_STATUS__PRIV_INSTR_INT_STAT__SHIFT 0x16
++#define CP_INT_STATUS__PRIV_REG_INT_STAT__SHIFT 0x17
++#define CP_INT_STATUS__OPCODE_ERROR_INT_STAT__SHIFT 0x18
++#define CP_INT_STATUS__TIME_STAMP_INT_STAT__SHIFT 0x1a
++#define CP_INT_STATUS__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
++#define CP_INT_STATUS__GENERIC2_INT_STAT__SHIFT 0x1d
++#define CP_INT_STATUS__GENERIC1_INT_STAT__SHIFT 0x1e
++#define CP_INT_STATUS__GENERIC0_INT_STAT__SHIFT 0x1f
++#define CP_INT_STATUS__CP_VM_DOORBELL_WR_INT_STAT_MASK 0x00000800L
++#define CP_INT_STATUS__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
++#define CP_INT_STATUS__GPF_INT_STAT_MASK 0x00010000L
++#define CP_INT_STATUS__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
++#define CP_INT_STATUS__CMP_BUSY_INT_STAT_MASK 0x00040000L
++#define CP_INT_STATUS__CNTX_BUSY_INT_STAT_MASK 0x00080000L
++#define CP_INT_STATUS__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
++#define CP_INT_STATUS__GFX_IDLE_INT_STAT_MASK 0x00200000L
++#define CP_INT_STATUS__PRIV_INSTR_INT_STAT_MASK 0x00400000L
++#define CP_INT_STATUS__PRIV_REG_INT_STAT_MASK 0x00800000L
++#define CP_INT_STATUS__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
++#define CP_INT_STATUS__TIME_STAMP_INT_STAT_MASK 0x04000000L
++#define CP_INT_STATUS__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
++#define CP_INT_STATUS__GENERIC2_INT_STAT_MASK 0x20000000L
++#define CP_INT_STATUS__GENERIC1_INT_STAT_MASK 0x40000000L
++#define CP_INT_STATUS__GENERIC0_INT_STAT_MASK 0x80000000L
++//CP_DEVICE_ID
++#define CP_DEVICE_ID__DEVICE_ID__SHIFT 0x0
++#define CP_DEVICE_ID__DEVICE_ID_MASK 0x000000FFL
++//CP_ME0_PIPE_PRIORITY_CNTS
++#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
++#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
++#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
++#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
++#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
++#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
++#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
++#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
++//CP_RING_PRIORITY_CNTS
++#define CP_RING_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
++#define CP_RING_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
++#define CP_RING_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
++#define CP_RING_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
++#define CP_RING_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
++#define CP_RING_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
++#define CP_RING_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
++#define CP_RING_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
++//CP_ME0_PIPE0_PRIORITY
++#define CP_ME0_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
++#define CP_ME0_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L
++//CP_RING0_PRIORITY
++#define CP_RING0_PRIORITY__PRIORITY__SHIFT 0x0
++#define CP_RING0_PRIORITY__PRIORITY_MASK 0x00000003L
++//CP_ME0_PIPE1_PRIORITY
++#define CP_ME0_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
++#define CP_ME0_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L
++//CP_RING1_PRIORITY
++#define CP_RING1_PRIORITY__PRIORITY__SHIFT 0x0
++#define CP_RING1_PRIORITY__PRIORITY_MASK 0x00000003L
++//CP_ME0_PIPE2_PRIORITY
++#define CP_ME0_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0
++#define CP_ME0_PIPE2_PRIORITY__PRIORITY_MASK 0x00000003L
++//CP_RING2_PRIORITY
++#define CP_RING2_PRIORITY__PRIORITY__SHIFT 0x0
++#define CP_RING2_PRIORITY__PRIORITY_MASK 0x00000003L
++//CP_FATAL_ERROR
++#define CP_FATAL_ERROR__CPF_FATAL_ERROR__SHIFT 0x0
++#define CP_FATAL_ERROR__CPG_FATAL_ERROR__SHIFT 0x1
++#define CP_FATAL_ERROR__GFX_HALT_PROC__SHIFT 0x2
++#define CP_FATAL_ERROR__DIS_CPG_FATAL_ERROR__SHIFT 0x3
++#define CP_FATAL_ERROR__CPG_TAG_FATAL_ERROR_EN__SHIFT 0x4
++#define CP_FATAL_ERROR__CPF_FATAL_ERROR_MASK 0x00000001L
++#define CP_FATAL_ERROR__CPG_FATAL_ERROR_MASK 0x00000002L
++#define CP_FATAL_ERROR__GFX_HALT_PROC_MASK 0x00000004L
++#define CP_FATAL_ERROR__DIS_CPG_FATAL_ERROR_MASK 0x00000008L
++#define CP_FATAL_ERROR__CPG_TAG_FATAL_ERROR_EN_MASK 0x00000010L
++//CP_RB_VMID
++#define CP_RB_VMID__RB0_VMID__SHIFT 0x0
++#define CP_RB_VMID__RB1_VMID__SHIFT 0x8
++#define CP_RB_VMID__RB2_VMID__SHIFT 0x10
++#define CP_RB_VMID__RB0_VMID_MASK 0x0000000FL
++#define CP_RB_VMID__RB1_VMID_MASK 0x00000F00L
++#define CP_RB_VMID__RB2_VMID_MASK 0x000F0000L
++//CP_ME0_PIPE0_VMID
++#define CP_ME0_PIPE0_VMID__VMID__SHIFT 0x0
++#define CP_ME0_PIPE0_VMID__VMID_MASK 0x0000000FL
++//CP_ME0_PIPE1_VMID
++#define CP_ME0_PIPE1_VMID__VMID__SHIFT 0x0
++#define CP_ME0_PIPE1_VMID__VMID_MASK 0x0000000FL
++//CP_RB0_WPTR
++#define CP_RB0_WPTR__RB_WPTR__SHIFT 0x0
++#define CP_RB0_WPTR__RB_WPTR_MASK 0xFFFFFFFFL
++//CP_RB_WPTR
++#define CP_RB_WPTR__RB_WPTR__SHIFT 0x0
++#define CP_RB_WPTR__RB_WPTR_MASK 0xFFFFFFFFL
++//CP_RB0_WPTR_HI
++#define CP_RB0_WPTR_HI__RB_WPTR__SHIFT 0x0
++#define CP_RB0_WPTR_HI__RB_WPTR_MASK 0xFFFFFFFFL
++//CP_RB_WPTR_HI
++#define CP_RB_WPTR_HI__RB_WPTR__SHIFT 0x0
++#define CP_RB_WPTR_HI__RB_WPTR_MASK 0xFFFFFFFFL
++//CP_RB1_WPTR
++#define CP_RB1_WPTR__RB_WPTR__SHIFT 0x0
++#define CP_RB1_WPTR__RB_WPTR_MASK 0xFFFFFFFFL
++//CP_RB1_WPTR_HI
++#define CP_RB1_WPTR_HI__RB_WPTR__SHIFT 0x0
++#define CP_RB1_WPTR_HI__RB_WPTR_MASK 0xFFFFFFFFL
++//CP_RB2_WPTR
++#define CP_RB2_WPTR__RB_WPTR__SHIFT 0x0
++#define CP_RB2_WPTR__RB_WPTR_MASK 0x000FFFFFL
++//CP_RB_DOORBELL_CONTROL
++#define CP_RB_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT 0x1
++#define CP_RB_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT 0x2
++#define CP_RB_DOORBELL_CONTROL__DOORBELL_EN__SHIFT 0x1e
++#define CP_RB_DOORBELL_CONTROL__DOORBELL_HIT__SHIFT 0x1f
++#define CP_RB_DOORBELL_CONTROL__DOORBELL_BIF_DROP_MASK 0x00000002L
++#define CP_RB_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
++#define CP_RB_DOORBELL_CONTROL__DOORBELL_EN_MASK 0x40000000L
++#define CP_RB_DOORBELL_CONTROL__DOORBELL_HIT_MASK 0x80000000L
++//CP_RB_DOORBELL_RANGE_LOWER
++#define CP_RB_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER__SHIFT 0x2
++#define CP_RB_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER_MASK 0x0FFFFFFCL
++//CP_RB_DOORBELL_RANGE_UPPER
++#define CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER__SHIFT 0x2
++#define CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK 0x0FFFFFFCL
++//CP_MEC_DOORBELL_RANGE_LOWER
++#define CP_MEC_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER__SHIFT 0x2
++#define CP_MEC_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER_MASK 0x0FFFFFFCL
++//CP_MEC_DOORBELL_RANGE_UPPER
++#define CP_MEC_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER__SHIFT 0x2
++#define CP_MEC_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK 0x0FFFFFFCL
++//CPG_UTCL1_ERROR
++#define CPG_UTCL1_ERROR__ERROR_DETECTED_HALT__SHIFT 0x0
++#define CPG_UTCL1_ERROR__ERROR_DETECTED_HALT_MASK 0x00000001L
++//CPC_UTCL1_ERROR
++#define CPC_UTCL1_ERROR__ERROR_DETECTED_HALT__SHIFT 0x0
++#define CPC_UTCL1_ERROR__ERROR_DETECTED_HALT_MASK 0x00000001L
++//CP_RB1_BASE
++#define CP_RB1_BASE__RB_BASE__SHIFT 0x0
++#define CP_RB1_BASE__RB_BASE_MASK 0xFFFFFFFFL
++//CP_RB1_CNTL
++#define CP_RB1_CNTL__RB_BUFSZ__SHIFT 0x0
++#define CP_RB1_CNTL__RB_BLKSZ__SHIFT 0x8
++#define CP_RB1_CNTL__MIN_AVAILSZ__SHIFT 0x14
++#define CP_RB1_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
++#define CP_RB1_CNTL__CACHE_POLICY__SHIFT 0x18
++#define CP_RB1_CNTL__RB_NO_UPDATE__SHIFT 0x1b
++#define CP_RB1_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
++#define CP_RB1_CNTL__RB_BUFSZ_MASK 0x0000003FL
++#define CP_RB1_CNTL__RB_BLKSZ_MASK 0x00003F00L
++#define CP_RB1_CNTL__MIN_AVAILSZ_MASK 0x00300000L
++#define CP_RB1_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
++#define CP_RB1_CNTL__CACHE_POLICY_MASK 0x01000000L
++#define CP_RB1_CNTL__RB_NO_UPDATE_MASK 0x08000000L
++#define CP_RB1_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
++//CP_RB1_RPTR_ADDR
++#define CP_RB1_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
++#define CP_RB1_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
++//CP_RB1_RPTR_ADDR_HI
++#define CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
++#define CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
++//CP_RB2_BASE
++#define CP_RB2_BASE__RB_BASE__SHIFT 0x0
++#define CP_RB2_BASE__RB_BASE_MASK 0xFFFFFFFFL
++//CP_RB2_CNTL
++#define CP_RB2_CNTL__RB_BUFSZ__SHIFT 0x0
++#define CP_RB2_CNTL__RB_BLKSZ__SHIFT 0x8
++#define CP_RB2_CNTL__MIN_AVAILSZ__SHIFT 0x14
++#define CP_RB2_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
++#define CP_RB2_CNTL__CACHE_POLICY__SHIFT 0x18
++#define CP_RB2_CNTL__RB_NO_UPDATE__SHIFT 0x1b
++#define CP_RB2_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
++#define CP_RB2_CNTL__RB_BUFSZ_MASK 0x0000003FL
++#define CP_RB2_CNTL__RB_BLKSZ_MASK 0x00003F00L
++#define CP_RB2_CNTL__MIN_AVAILSZ_MASK 0x00300000L
++#define CP_RB2_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
++#define CP_RB2_CNTL__CACHE_POLICY_MASK 0x01000000L
++#define CP_RB2_CNTL__RB_NO_UPDATE_MASK 0x08000000L
++#define CP_RB2_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
++//CP_RB2_RPTR_ADDR
++#define CP_RB2_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
++#define CP_RB2_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
++//CP_RB2_RPTR_ADDR_HI
++#define CP_RB2_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
++#define CP_RB2_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
++//CP_RB0_ACTIVE
++#define CP_RB0_ACTIVE__ACTIVE__SHIFT 0x0
++#define CP_RB0_ACTIVE__ACTIVE_MASK 0x00000001L
++//CP_RB_ACTIVE
++#define CP_RB_ACTIVE__ACTIVE__SHIFT 0x0
++#define CP_RB_ACTIVE__ACTIVE_MASK 0x00000001L
++//CP_INT_CNTL_RING0
++#define CP_INT_CNTL_RING0__CP_VM_DOORBELL_WR_INT_ENABLE__SHIFT 0xb
++#define CP_INT_CNTL_RING0__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
++#define CP_INT_CNTL_RING0__GPF_INT_ENABLE__SHIFT 0x10
++#define CP_INT_CNTL_RING0__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
++#define CP_INT_CNTL_RING0__CMP_BUSY_INT_ENABLE__SHIFT 0x12
++#define CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE__SHIFT 0x13
++#define CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE__SHIFT 0x14
++#define CP_INT_CNTL_RING0__GFX_IDLE_INT_ENABLE__SHIFT 0x15
++#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
++#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE__SHIFT 0x17
++#define CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
++#define CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
++#define CP_INT_CNTL_RING0__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
++#define CP_INT_CNTL_RING0__GENERIC2_INT_ENABLE__SHIFT 0x1d
++#define CP_INT_CNTL_RING0__GENERIC1_INT_ENABLE__SHIFT 0x1e
++#define CP_INT_CNTL_RING0__GENERIC0_INT_ENABLE__SHIFT 0x1f
++#define CP_INT_CNTL_RING0__CP_VM_DOORBELL_WR_INT_ENABLE_MASK 0x00000800L
++#define CP_INT_CNTL_RING0__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
++#define CP_INT_CNTL_RING0__GPF_INT_ENABLE_MASK 0x00010000L
++#define CP_INT_CNTL_RING0__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
++#define CP_INT_CNTL_RING0__CMP_BUSY_INT_ENABLE_MASK 0x00040000L
++#define CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
++#define CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
++#define CP_INT_CNTL_RING0__GFX_IDLE_INT_ENABLE_MASK 0x00200000L
++#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
++#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK 0x00800000L
++#define CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
++#define CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
++#define CP_INT_CNTL_RING0__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
++#define CP_INT_CNTL_RING0__GENERIC2_INT_ENABLE_MASK 0x20000000L
++#define CP_INT_CNTL_RING0__GENERIC1_INT_ENABLE_MASK 0x40000000L
++#define CP_INT_CNTL_RING0__GENERIC0_INT_ENABLE_MASK 0x80000000L
++//CP_INT_CNTL_RING1
++#define CP_INT_CNTL_RING1__CP_VM_DOORBELL_WR_INT_ENABLE__SHIFT 0xb
++#define CP_INT_CNTL_RING1__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
++#define CP_INT_CNTL_RING1__GPF_INT_ENABLE__SHIFT 0x10
++#define CP_INT_CNTL_RING1__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
++#define CP_INT_CNTL_RING1__CMP_BUSY_INT_ENABLE__SHIFT 0x12
++#define CP_INT_CNTL_RING1__CNTX_BUSY_INT_ENABLE__SHIFT 0x13
++#define CP_INT_CNTL_RING1__CNTX_EMPTY_INT_ENABLE__SHIFT 0x14
++#define CP_INT_CNTL_RING1__GFX_IDLE_INT_ENABLE__SHIFT 0x15
++#define CP_INT_CNTL_RING1__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
++#define CP_INT_CNTL_RING1__PRIV_REG_INT_ENABLE__SHIFT 0x17
++#define CP_INT_CNTL_RING1__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
++#define CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
++#define CP_INT_CNTL_RING1__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
++#define CP_INT_CNTL_RING1__GENERIC2_INT_ENABLE__SHIFT 0x1d
++#define CP_INT_CNTL_RING1__GENERIC1_INT_ENABLE__SHIFT 0x1e
++#define CP_INT_CNTL_RING1__GENERIC0_INT_ENABLE__SHIFT 0x1f
++#define CP_INT_CNTL_RING1__CP_VM_DOORBELL_WR_INT_ENABLE_MASK 0x00000800L
++#define CP_INT_CNTL_RING1__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
++#define CP_INT_CNTL_RING1__GPF_INT_ENABLE_MASK 0x00010000L
++#define CP_INT_CNTL_RING1__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
++#define CP_INT_CNTL_RING1__CMP_BUSY_INT_ENABLE_MASK 0x00040000L
++#define CP_INT_CNTL_RING1__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
++#define CP_INT_CNTL_RING1__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
++#define CP_INT_CNTL_RING1__GFX_IDLE_INT_ENABLE_MASK 0x00200000L
++#define CP_INT_CNTL_RING1__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
++#define CP_INT_CNTL_RING1__PRIV_REG_INT_ENABLE_MASK 0x00800000L
++#define CP_INT_CNTL_RING1__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
++#define CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
++#define CP_INT_CNTL_RING1__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
++#define CP_INT_CNTL_RING1__GENERIC2_INT_ENABLE_MASK 0x20000000L
++#define CP_INT_CNTL_RING1__GENERIC1_INT_ENABLE_MASK 0x40000000L
++#define CP_INT_CNTL_RING1__GENERIC0_INT_ENABLE_MASK 0x80000000L
++//CP_INT_CNTL_RING2
++#define CP_INT_CNTL_RING2__CP_VM_DOORBELL_WR_INT_ENABLE__SHIFT 0xb
++#define CP_INT_CNTL_RING2__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
++#define CP_INT_CNTL_RING2__GPF_INT_ENABLE__SHIFT 0x10
++#define CP_INT_CNTL_RING2__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
++#define CP_INT_CNTL_RING2__CMP_BUSY_INT_ENABLE__SHIFT 0x12
++#define CP_INT_CNTL_RING2__CNTX_BUSY_INT_ENABLE__SHIFT 0x13
++#define CP_INT_CNTL_RING2__CNTX_EMPTY_INT_ENABLE__SHIFT 0x14
++#define CP_INT_CNTL_RING2__GFX_IDLE_INT_ENABLE__SHIFT 0x15
++#define CP_INT_CNTL_RING2__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
++#define CP_INT_CNTL_RING2__PRIV_REG_INT_ENABLE__SHIFT 0x17
++#define CP_INT_CNTL_RING2__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
++#define CP_INT_CNTL_RING2__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
++#define CP_INT_CNTL_RING2__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
++#define CP_INT_CNTL_RING2__GENERIC2_INT_ENABLE__SHIFT 0x1d
++#define CP_INT_CNTL_RING2__GENERIC1_INT_ENABLE__SHIFT 0x1e
++#define CP_INT_CNTL_RING2__GENERIC0_INT_ENABLE__SHIFT 0x1f
++#define CP_INT_CNTL_RING2__CP_VM_DOORBELL_WR_INT_ENABLE_MASK 0x00000800L
++#define CP_INT_CNTL_RING2__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
++#define CP_INT_CNTL_RING2__GPF_INT_ENABLE_MASK 0x00010000L
++#define CP_INT_CNTL_RING2__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
++#define CP_INT_CNTL_RING2__CMP_BUSY_INT_ENABLE_MASK 0x00040000L
++#define CP_INT_CNTL_RING2__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
++#define CP_INT_CNTL_RING2__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
++#define CP_INT_CNTL_RING2__GFX_IDLE_INT_ENABLE_MASK 0x00200000L
++#define CP_INT_CNTL_RING2__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
++#define CP_INT_CNTL_RING2__PRIV_REG_INT_ENABLE_MASK 0x00800000L
++#define CP_INT_CNTL_RING2__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
++#define CP_INT_CNTL_RING2__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
++#define CP_INT_CNTL_RING2__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
++#define CP_INT_CNTL_RING2__GENERIC2_INT_ENABLE_MASK 0x20000000L
++#define CP_INT_CNTL_RING2__GENERIC1_INT_ENABLE_MASK 0x40000000L
++#define CP_INT_CNTL_RING2__GENERIC0_INT_ENABLE_MASK 0x80000000L
++//CP_INT_STATUS_RING0
++#define CP_INT_STATUS_RING0__CP_VM_DOORBELL_WR_INT_STAT__SHIFT 0xb
++#define CP_INT_STATUS_RING0__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
++#define CP_INT_STATUS_RING0__GPF_INT_STAT__SHIFT 0x10
++#define CP_INT_STATUS_RING0__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
++#define CP_INT_STATUS_RING0__CMP_BUSY_INT_STAT__SHIFT 0x12
++#define CP_INT_STATUS_RING0__GCNTX_BUSY_INT_STAT__SHIFT 0x13
++#define CP_INT_STATUS_RING0__CNTX_EMPTY_INT_STAT__SHIFT 0x14
++#define CP_INT_STATUS_RING0__GFX_IDLE_INT_STAT__SHIFT 0x15
++#define CP_INT_STATUS_RING0__PRIV_INSTR_INT_STAT__SHIFT 0x16
++#define CP_INT_STATUS_RING0__PRIV_REG_INT_STAT__SHIFT 0x17
++#define CP_INT_STATUS_RING0__OPCODE_ERROR_INT_STAT__SHIFT 0x18
++#define CP_INT_STATUS_RING0__TIME_STAMP_INT_STAT__SHIFT 0x1a
++#define CP_INT_STATUS_RING0__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
++#define CP_INT_STATUS_RING0__GENERIC2_INT_STAT__SHIFT 0x1d
++#define CP_INT_STATUS_RING0__GENERIC1_INT_STAT__SHIFT 0x1e
++#define CP_INT_STATUS_RING0__GENERIC0_INT_STAT__SHIFT 0x1f
++#define CP_INT_STATUS_RING0__CP_VM_DOORBELL_WR_INT_STAT_MASK 0x00000800L
++#define CP_INT_STATUS_RING0__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
++#define CP_INT_STATUS_RING0__GPF_INT_STAT_MASK 0x00010000L
++#define CP_INT_STATUS_RING0__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
++#define CP_INT_STATUS_RING0__CMP_BUSY_INT_STAT_MASK 0x00040000L
++#define CP_INT_STATUS_RING0__GCNTX_BUSY_INT_STAT_MASK 0x00080000L
++#define CP_INT_STATUS_RING0__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
++#define CP_INT_STATUS_RING0__GFX_IDLE_INT_STAT_MASK 0x00200000L
++#define CP_INT_STATUS_RING0__PRIV_INSTR_INT_STAT_MASK 0x00400000L
++#define CP_INT_STATUS_RING0__PRIV_REG_INT_STAT_MASK 0x00800000L
++#define CP_INT_STATUS_RING0__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
++#define CP_INT_STATUS_RING0__TIME_STAMP_INT_STAT_MASK 0x04000000L
++#define CP_INT_STATUS_RING0__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
++#define CP_INT_STATUS_RING0__GENERIC2_INT_STAT_MASK 0x20000000L
++#define CP_INT_STATUS_RING0__GENERIC1_INT_STAT_MASK 0x40000000L
++#define CP_INT_STATUS_RING0__GENERIC0_INT_STAT_MASK 0x80000000L
++//CP_INT_STATUS_RING1
++#define CP_INT_STATUS_RING1__CP_VM_DOORBELL_WR_INT_STAT__SHIFT 0xb
++#define CP_INT_STATUS_RING1__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
++#define CP_INT_STATUS_RING1__GPF_INT_STAT__SHIFT 0x10
++#define CP_INT_STATUS_RING1__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
++#define CP_INT_STATUS_RING1__CMP_BUSY_INT_STAT__SHIFT 0x12
++#define CP_INT_STATUS_RING1__CNTX_BUSY_INT_STAT__SHIFT 0x13
++#define CP_INT_STATUS_RING1__CNTX_EMPTY_INT_STAT__SHIFT 0x14
++#define CP_INT_STATUS_RING1__GFX_IDLE_INT_STAT__SHIFT 0x15
++#define CP_INT_STATUS_RING1__PRIV_INSTR_INT_STAT__SHIFT 0x16
++#define CP_INT_STATUS_RING1__PRIV_REG_INT_STAT__SHIFT 0x17
++#define CP_INT_STATUS_RING1__OPCODE_ERROR_INT_STAT__SHIFT 0x18
++#define CP_INT_STATUS_RING1__TIME_STAMP_INT_STAT__SHIFT 0x1a
++#define CP_INT_STATUS_RING1__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
++#define CP_INT_STATUS_RING1__GENERIC2_INT_STAT__SHIFT 0x1d
++#define CP_INT_STATUS_RING1__GENERIC1_INT_STAT__SHIFT 0x1e
++#define CP_INT_STATUS_RING1__GENERIC0_INT_STAT__SHIFT 0x1f
++#define CP_INT_STATUS_RING1__CP_VM_DOORBELL_WR_INT_STAT_MASK 0x00000800L
++#define CP_INT_STATUS_RING1__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
++#define CP_INT_STATUS_RING1__GPF_INT_STAT_MASK 0x00010000L
++#define CP_INT_STATUS_RING1__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
++#define CP_INT_STATUS_RING1__CMP_BUSY_INT_STAT_MASK 0x00040000L
++#define CP_INT_STATUS_RING1__CNTX_BUSY_INT_STAT_MASK 0x00080000L
++#define CP_INT_STATUS_RING1__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
++#define CP_INT_STATUS_RING1__GFX_IDLE_INT_STAT_MASK 0x00200000L
++#define CP_INT_STATUS_RING1__PRIV_INSTR_INT_STAT_MASK 0x00400000L
++#define CP_INT_STATUS_RING1__PRIV_REG_INT_STAT_MASK 0x00800000L
++#define CP_INT_STATUS_RING1__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
++#define CP_INT_STATUS_RING1__TIME_STAMP_INT_STAT_MASK 0x04000000L
++#define CP_INT_STATUS_RING1__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
++#define CP_INT_STATUS_RING1__GENERIC2_INT_STAT_MASK 0x20000000L
++#define CP_INT_STATUS_RING1__GENERIC1_INT_STAT_MASK 0x40000000L
++#define CP_INT_STATUS_RING1__GENERIC0_INT_STAT_MASK 0x80000000L
++//CP_INT_STATUS_RING2
++#define CP_INT_STATUS_RING2__CP_VM_DOORBELL_WR_INT_STAT__SHIFT 0xb
++#define CP_INT_STATUS_RING2__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
++#define CP_INT_STATUS_RING2__GPF_INT_STAT__SHIFT 0x10
++#define CP_INT_STATUS_RING2__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
++#define CP_INT_STATUS_RING2__CMP_BUSY_INT_STAT__SHIFT 0x12
++#define CP_INT_STATUS_RING2__CNTX_BUSY_INT_STAT__SHIFT 0x13
++#define CP_INT_STATUS_RING2__CNTX_EMPTY_INT_STAT__SHIFT 0x14
++#define CP_INT_STATUS_RING2__GFX_IDLE_INT_STAT__SHIFT 0x15
++#define CP_INT_STATUS_RING2__PRIV_INSTR_INT_STAT__SHIFT 0x16
++#define CP_INT_STATUS_RING2__PRIV_REG_INT_STAT__SHIFT 0x17
++#define CP_INT_STATUS_RING2__OPCODE_ERROR_INT_STAT__SHIFT 0x18
++#define CP_INT_STATUS_RING2__TIME_STAMP_INT_STAT__SHIFT 0x1a
++#define CP_INT_STATUS_RING2__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
++#define CP_INT_STATUS_RING2__GENERIC2_INT_STAT__SHIFT 0x1d
++#define CP_INT_STATUS_RING2__GENERIC1_INT_STAT__SHIFT 0x1e
++#define CP_INT_STATUS_RING2__GENERIC0_INT_STAT__SHIFT 0x1f
++#define CP_INT_STATUS_RING2__CP_VM_DOORBELL_WR_INT_STAT_MASK 0x00000800L
++#define CP_INT_STATUS_RING2__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
++#define CP_INT_STATUS_RING2__GPF_INT_STAT_MASK 0x00010000L
++#define CP_INT_STATUS_RING2__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
++#define CP_INT_STATUS_RING2__CMP_BUSY_INT_STAT_MASK 0x00040000L
++#define CP_INT_STATUS_RING2__CNTX_BUSY_INT_STAT_MASK 0x00080000L
++#define CP_INT_STATUS_RING2__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
++#define CP_INT_STATUS_RING2__GFX_IDLE_INT_STAT_MASK 0x00200000L
++#define CP_INT_STATUS_RING2__PRIV_INSTR_INT_STAT_MASK 0x00400000L
++#define CP_INT_STATUS_RING2__PRIV_REG_INT_STAT_MASK 0x00800000L
++#define CP_INT_STATUS_RING2__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
++#define CP_INT_STATUS_RING2__TIME_STAMP_INT_STAT_MASK 0x04000000L
++#define CP_INT_STATUS_RING2__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
++#define CP_INT_STATUS_RING2__GENERIC2_INT_STAT_MASK 0x20000000L
++#define CP_INT_STATUS_RING2__GENERIC1_INT_STAT_MASK 0x40000000L
++#define CP_INT_STATUS_RING2__GENERIC0_INT_STAT_MASK 0x80000000L
++#define CP_PFP_F32_INTERRUPT__PRIV_REG_INT__SHIFT 0x1
++#define CP_PFP_F32_INTERRUPT__PRIV_REG_INT_MASK 0x00000002L
++#define CP_MEC1_F32_INTERRUPT__PRIV_REG_INT__SHIFT 0x1
++#define CP_MEC1_F32_INTERRUPT__PRIV_REG_INT_MASK 0x00000002L
++#define CP_MEC2_F32_INTERRUPT__PRIV_REG_INT__SHIFT 0x1
++#define CP_MEC2_F32_INTERRUPT__PRIV_REG_INT_MASK 0x00000002L
++//CP_PWR_CNTL
++#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE0__SHIFT 0x0
++#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE1__SHIFT 0x1
++#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE0__SHIFT 0x8
++#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE1__SHIFT 0x9
++#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE2__SHIFT 0xa
++#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE3__SHIFT 0xb
++#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE0__SHIFT 0x10
++#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE1__SHIFT 0x11
++#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE2__SHIFT 0x12
++#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE3__SHIFT 0x13
++#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE0_MASK 0x00000001L
++#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE1_MASK 0x00000002L
++#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE0_MASK 0x00000100L
++#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE1_MASK 0x00000200L
++#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE2_MASK 0x00000400L
++#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE3_MASK 0x00000800L
++#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE0_MASK 0x00010000L
++#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE1_MASK 0x00020000L
++#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE2_MASK 0x00040000L
++#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE3_MASK 0x00080000L
++//CP_MEM_SLP_CNTL
++#define CP_MEM_SLP_CNTL__CP_MEM_LS_EN__SHIFT 0x0
++#define CP_MEM_SLP_CNTL__CP_MEM_DS_EN__SHIFT 0x1
++#define CP_MEM_SLP_CNTL__RESERVED__SHIFT 0x2
++#define CP_MEM_SLP_CNTL__CP_LS_DS_BUSY_OVERRIDE__SHIFT 0x7
++#define CP_MEM_SLP_CNTL__CP_MEM_LS_ON_DELAY__SHIFT 0x8
++#define CP_MEM_SLP_CNTL__CP_MEM_LS_OFF_DELAY__SHIFT 0x10
++#define CP_MEM_SLP_CNTL__RESERVED1__SHIFT 0x18
++#define CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK 0x00000001L
++#define CP_MEM_SLP_CNTL__CP_MEM_DS_EN_MASK 0x00000002L
++#define CP_MEM_SLP_CNTL__RESERVED_MASK 0x0000007CL
++#define CP_MEM_SLP_CNTL__CP_LS_DS_BUSY_OVERRIDE_MASK 0x00000080L
++#define CP_MEM_SLP_CNTL__CP_MEM_LS_ON_DELAY_MASK 0x0000FF00L
++#define CP_MEM_SLP_CNTL__CP_MEM_LS_OFF_DELAY_MASK 0x00FF0000L
++#define CP_MEM_SLP_CNTL__RESERVED1_MASK 0xFF000000L
++//CP_ECC_FIRSTOCCURRENCE
++#define CP_ECC_FIRSTOCCURRENCE__INTERFACE__SHIFT 0x0
++#define CP_ECC_FIRSTOCCURRENCE__CLIENT__SHIFT 0x4
++#define CP_ECC_FIRSTOCCURRENCE__ME__SHIFT 0x8
++#define CP_ECC_FIRSTOCCURRENCE__PIPE__SHIFT 0xa
++#define CP_ECC_FIRSTOCCURRENCE__QUEUE__SHIFT 0xc
++#define CP_ECC_FIRSTOCCURRENCE__VMID__SHIFT 0x10
++#define CP_ECC_FIRSTOCCURRENCE__INTERFACE_MASK 0x00000003L
++#define CP_ECC_FIRSTOCCURRENCE__CLIENT_MASK 0x000000F0L
++#define CP_ECC_FIRSTOCCURRENCE__ME_MASK 0x00000300L
++#define CP_ECC_FIRSTOCCURRENCE__PIPE_MASK 0x00000C00L
++#define CP_ECC_FIRSTOCCURRENCE__QUEUE_MASK 0x00007000L
++#define CP_ECC_FIRSTOCCURRENCE__VMID_MASK 0x000F0000L
++//CP_ECC_FIRSTOCCURRENCE_RING0
++#define CP_ECC_FIRSTOCCURRENCE_RING0__OBSOLETE__SHIFT 0x0
++#define CP_ECC_FIRSTOCCURRENCE_RING0__OBSOLETE_MASK 0xFFFFFFFFL
++//CP_ECC_FIRSTOCCURRENCE_RING1
++#define CP_ECC_FIRSTOCCURRENCE_RING1__OBSOLETE__SHIFT 0x0
++#define CP_ECC_FIRSTOCCURRENCE_RING1__OBSOLETE_MASK 0xFFFFFFFFL
++//CP_ECC_FIRSTOCCURRENCE_RING2
++#define CP_ECC_FIRSTOCCURRENCE_RING2__OBSOLETE__SHIFT 0x0
++#define CP_ECC_FIRSTOCCURRENCE_RING2__OBSOLETE_MASK 0xFFFFFFFFL
++//GB_EDC_MODE
++#define GB_EDC_MODE__FORCE_SEC_ON_DED__SHIFT 0xf
++#define GB_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10
++#define GB_EDC_MODE__GATE_FUE__SHIFT 0x11
++#define GB_EDC_MODE__DED_MODE__SHIFT 0x14
++#define GB_EDC_MODE__PROP_FED__SHIFT 0x1d
++#define GB_EDC_MODE__BYPASS__SHIFT 0x1f
++#define GB_EDC_MODE__FORCE_SEC_ON_DED_MASK 0x00008000L
++#define GB_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L
++#define GB_EDC_MODE__GATE_FUE_MASK 0x00020000L
++#define GB_EDC_MODE__DED_MODE_MASK 0x00300000L
++#define GB_EDC_MODE__PROP_FED_MASK 0x20000000L
++#define GB_EDC_MODE__BYPASS_MASK 0x80000000L
++//CP_PQ_WPTR_POLL_CNTL
++#define CP_PQ_WPTR_POLL_CNTL__PERIOD__SHIFT 0x0
++#define CP_PQ_WPTR_POLL_CNTL__DISABLE_PEND_REQ_ONE_SHOT__SHIFT 0x1d
++#define CP_PQ_WPTR_POLL_CNTL__POLL_ACTIVE__SHIFT 0x1e
++#define CP_PQ_WPTR_POLL_CNTL__EN__SHIFT 0x1f
++#define CP_PQ_WPTR_POLL_CNTL__PERIOD_MASK 0x000000FFL
++#define CP_PQ_WPTR_POLL_CNTL__DISABLE_PEND_REQ_ONE_SHOT_MASK 0x20000000L
++#define CP_PQ_WPTR_POLL_CNTL__POLL_ACTIVE_MASK 0x40000000L
++#define CP_PQ_WPTR_POLL_CNTL__EN_MASK 0x80000000L
++//CP_PQ_WPTR_POLL_CNTL1
++#define CP_PQ_WPTR_POLL_CNTL1__QUEUE_MASK__SHIFT 0x0
++#define CP_PQ_WPTR_POLL_CNTL1__QUEUE_MASK_MASK 0xFFFFFFFFL
++//CP_ME1_PIPE0_INT_CNTL
++#define CP_ME1_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
++#define CP_ME1_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
++#define CP_ME1_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
++#define CP_ME1_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
++#define CP_ME1_PIPE0_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
++#define CP_ME1_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
++#define CP_ME1_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
++#define CP_ME1_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
++#define CP_ME1_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
++#define CP_ME1_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
++#define CP_ME1_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
++#define CP_ME1_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
++#define CP_ME1_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
++#define CP_ME1_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
++#define CP_ME1_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
++#define CP_ME1_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
++#define CP_ME1_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
++#define CP_ME1_PIPE0_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
++#define CP_ME1_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
++#define CP_ME1_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
++#define CP_ME1_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
++#define CP_ME1_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
++#define CP_ME1_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
++#define CP_ME1_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
++#define CP_ME1_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
++#define CP_ME1_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
++//CP_ME1_PIPE1_INT_CNTL
++#define CP_ME1_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
++#define CP_ME1_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
++#define CP_ME1_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
++#define CP_ME1_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
++#define CP_ME1_PIPE1_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
++#define CP_ME1_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
++#define CP_ME1_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
++#define CP_ME1_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
++#define CP_ME1_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
++#define CP_ME1_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
++#define CP_ME1_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
++#define CP_ME1_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
++#define CP_ME1_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
++#define CP_ME1_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
++#define CP_ME1_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
++#define CP_ME1_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
++#define CP_ME1_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
++#define CP_ME1_PIPE1_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
++#define CP_ME1_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
++#define CP_ME1_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
++#define CP_ME1_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
++#define CP_ME1_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
++#define CP_ME1_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
++#define CP_ME1_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
++#define CP_ME1_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
++#define CP_ME1_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
++//CP_ME1_PIPE2_INT_CNTL
++#define CP_ME1_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
++#define CP_ME1_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
++#define CP_ME1_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
++#define CP_ME1_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
++#define CP_ME1_PIPE2_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
++#define CP_ME1_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
++#define CP_ME1_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
++#define CP_ME1_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
++#define CP_ME1_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
++#define CP_ME1_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
++#define CP_ME1_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
++#define CP_ME1_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
++#define CP_ME1_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
++#define CP_ME1_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
++#define CP_ME1_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
++#define CP_ME1_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
++#define CP_ME1_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
++#define CP_ME1_PIPE2_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
++#define CP_ME1_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
++#define CP_ME1_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
++#define CP_ME1_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
++#define CP_ME1_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
++#define CP_ME1_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
++#define CP_ME1_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
++#define CP_ME1_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
++#define CP_ME1_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
++//CP_ME1_PIPE3_INT_CNTL
++#define CP_ME1_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
++#define CP_ME1_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
++#define CP_ME1_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
++#define CP_ME1_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
++#define CP_ME1_PIPE3_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
++#define CP_ME1_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
++#define CP_ME1_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
++#define CP_ME1_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
++#define CP_ME1_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
++#define CP_ME1_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
++#define CP_ME1_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
++#define CP_ME1_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
++#define CP_ME1_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
++#define CP_ME1_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
++#define CP_ME1_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
++#define CP_ME1_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
++#define CP_ME1_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
++#define CP_ME1_PIPE3_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
++#define CP_ME1_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
++#define CP_ME1_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
++#define CP_ME1_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
++#define CP_ME1_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
++#define CP_ME1_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
++#define CP_ME1_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
++#define CP_ME1_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
++#define CP_ME1_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
++//CP_ME2_PIPE0_INT_CNTL
++#define CP_ME2_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
++#define CP_ME2_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
++#define CP_ME2_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
++#define CP_ME2_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
++#define CP_ME2_PIPE0_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
++#define CP_ME2_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
++#define CP_ME2_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
++#define CP_ME2_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
++#define CP_ME2_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
++#define CP_ME2_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
++#define CP_ME2_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
++#define CP_ME2_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
++#define CP_ME2_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
++#define CP_ME2_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
++#define CP_ME2_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
++#define CP_ME2_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
++#define CP_ME2_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
++#define CP_ME2_PIPE0_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
++#define CP_ME2_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
++#define CP_ME2_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
++#define CP_ME2_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
++#define CP_ME2_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
++#define CP_ME2_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
++#define CP_ME2_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
++#define CP_ME2_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
++#define CP_ME2_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
++//CP_ME2_PIPE1_INT_CNTL
++#define CP_ME2_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
++#define CP_ME2_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
++#define CP_ME2_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
++#define CP_ME2_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
++#define CP_ME2_PIPE1_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
++#define CP_ME2_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
++#define CP_ME2_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
++#define CP_ME2_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
++#define CP_ME2_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
++#define CP_ME2_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
++#define CP_ME2_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
++#define CP_ME2_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
++#define CP_ME2_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
++#define CP_ME2_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
++#define CP_ME2_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
++#define CP_ME2_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
++#define CP_ME2_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
++#define CP_ME2_PIPE1_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
++#define CP_ME2_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
++#define CP_ME2_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
++#define CP_ME2_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
++#define CP_ME2_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
++#define CP_ME2_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
++#define CP_ME2_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
++#define CP_ME2_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
++#define CP_ME2_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
++//CP_ME2_PIPE2_INT_CNTL
++#define CP_ME2_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
++#define CP_ME2_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
++#define CP_ME2_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
++#define CP_ME2_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
++#define CP_ME2_PIPE2_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
++#define CP_ME2_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
++#define CP_ME2_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
++#define CP_ME2_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
++#define CP_ME2_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
++#define CP_ME2_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
++#define CP_ME2_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
++#define CP_ME2_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
++#define CP_ME2_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
++#define CP_ME2_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
++#define CP_ME2_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
++#define CP_ME2_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
++#define CP_ME2_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
++#define CP_ME2_PIPE2_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
++#define CP_ME2_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
++#define CP_ME2_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
++#define CP_ME2_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
++#define CP_ME2_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
++#define CP_ME2_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
++#define CP_ME2_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
++#define CP_ME2_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
++#define CP_ME2_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
++//CP_ME2_PIPE3_INT_CNTL
++#define CP_ME2_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
++#define CP_ME2_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
++#define CP_ME2_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
++#define CP_ME2_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
++#define CP_ME2_PIPE3_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
++#define CP_ME2_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
++#define CP_ME2_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
++#define CP_ME2_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
++#define CP_ME2_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
++#define CP_ME2_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
++#define CP_ME2_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
++#define CP_ME2_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
++#define CP_ME2_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
++#define CP_ME2_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
++#define CP_ME2_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
++#define CP_ME2_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
++#define CP_ME2_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
++#define CP_ME2_PIPE3_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
++#define CP_ME2_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
++#define CP_ME2_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
++#define CP_ME2_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
++#define CP_ME2_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
++#define CP_ME2_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
++#define CP_ME2_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
++#define CP_ME2_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
++#define CP_ME2_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
++//CP_ME1_PIPE0_INT_STATUS
++#define CP_ME1_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
++#define CP_ME1_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
++#define CP_ME1_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
++#define CP_ME1_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
++#define CP_ME1_PIPE0_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
++#define CP_ME1_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
++#define CP_ME1_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
++#define CP_ME1_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
++#define CP_ME1_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
++#define CP_ME1_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
++#define CP_ME1_PIPE0_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
++#define CP_ME1_PIPE0_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
++#define CP_ME1_PIPE0_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
++#define CP_ME1_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
++#define CP_ME1_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
++#define CP_ME1_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
++#define CP_ME1_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
++#define CP_ME1_PIPE0_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
++#define CP_ME1_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
++#define CP_ME1_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
++#define CP_ME1_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
++#define CP_ME1_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
++#define CP_ME1_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
++#define CP_ME1_PIPE0_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
++#define CP_ME1_PIPE0_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
++#define CP_ME1_PIPE0_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
++//CP_ME1_PIPE1_INT_STATUS
++#define CP_ME1_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
++#define CP_ME1_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
++#define CP_ME1_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
++#define CP_ME1_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
++#define CP_ME1_PIPE1_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
++#define CP_ME1_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
++#define CP_ME1_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
++#define CP_ME1_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
++#define CP_ME1_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
++#define CP_ME1_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
++#define CP_ME1_PIPE1_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
++#define CP_ME1_PIPE1_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
++#define CP_ME1_PIPE1_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
++#define CP_ME1_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
++#define CP_ME1_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
++#define CP_ME1_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
++#define CP_ME1_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
++#define CP_ME1_PIPE1_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
++#define CP_ME1_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
++#define CP_ME1_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
++#define CP_ME1_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
++#define CP_ME1_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
++#define CP_ME1_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
++#define CP_ME1_PIPE1_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
++#define CP_ME1_PIPE1_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
++#define CP_ME1_PIPE1_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
++//CP_ME1_PIPE2_INT_STATUS
++#define CP_ME1_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
++#define CP_ME1_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
++#define CP_ME1_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
++#define CP_ME1_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
++#define CP_ME1_PIPE2_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
++#define CP_ME1_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
++#define CP_ME1_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
++#define CP_ME1_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
++#define CP_ME1_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
++#define CP_ME1_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
++#define CP_ME1_PIPE2_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
++#define CP_ME1_PIPE2_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
++#define CP_ME1_PIPE2_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
++#define CP_ME1_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
++#define CP_ME1_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
++#define CP_ME1_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
++#define CP_ME1_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
++#define CP_ME1_PIPE2_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
++#define CP_ME1_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
++#define CP_ME1_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
++#define CP_ME1_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
++#define CP_ME1_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
++#define CP_ME1_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
++#define CP_ME1_PIPE2_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
++#define CP_ME1_PIPE2_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
++#define CP_ME1_PIPE2_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
++//CP_ME1_PIPE3_INT_STATUS
++#define CP_ME1_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
++#define CP_ME1_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
++#define CP_ME1_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
++#define CP_ME1_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
++#define CP_ME1_PIPE3_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
++#define CP_ME1_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
++#define CP_ME1_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
++#define CP_ME1_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
++#define CP_ME1_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
++#define CP_ME1_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
++#define CP_ME1_PIPE3_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
++#define CP_ME1_PIPE3_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
++#define CP_ME1_PIPE3_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
++#define CP_ME1_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
++#define CP_ME1_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
++#define CP_ME1_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
++#define CP_ME1_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
++#define CP_ME1_PIPE3_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
++#define CP_ME1_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
++#define CP_ME1_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
++#define CP_ME1_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
++#define CP_ME1_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
++#define CP_ME1_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
++#define CP_ME1_PIPE3_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
++#define CP_ME1_PIPE3_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
++#define CP_ME1_PIPE3_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
++//CP_ME2_PIPE0_INT_STATUS
++#define CP_ME2_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
++#define CP_ME2_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
++#define CP_ME2_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
++#define CP_ME2_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
++#define CP_ME2_PIPE0_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
++#define CP_ME2_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
++#define CP_ME2_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
++#define CP_ME2_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
++#define CP_ME2_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
++#define CP_ME2_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
++#define CP_ME2_PIPE0_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
++#define CP_ME2_PIPE0_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
++#define CP_ME2_PIPE0_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
++#define CP_ME2_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
++#define CP_ME2_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
++#define CP_ME2_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
++#define CP_ME2_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
++#define CP_ME2_PIPE0_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
++#define CP_ME2_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
++#define CP_ME2_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
++#define CP_ME2_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
++#define CP_ME2_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
++#define CP_ME2_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
++#define CP_ME2_PIPE0_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
++#define CP_ME2_PIPE0_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
++#define CP_ME2_PIPE0_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
++//CP_ME2_PIPE1_INT_STATUS
++#define CP_ME2_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
++#define CP_ME2_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
++#define CP_ME2_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
++#define CP_ME2_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
++#define CP_ME2_PIPE1_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
++#define CP_ME2_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
++#define CP_ME2_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
++#define CP_ME2_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
++#define CP_ME2_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
++#define CP_ME2_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
++#define CP_ME2_PIPE1_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
++#define CP_ME2_PIPE1_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
++#define CP_ME2_PIPE1_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
++#define CP_ME2_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
++#define CP_ME2_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
++#define CP_ME2_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
++#define CP_ME2_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
++#define CP_ME2_PIPE1_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
++#define CP_ME2_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
++#define CP_ME2_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
++#define CP_ME2_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
++#define CP_ME2_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
++#define CP_ME2_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
++#define CP_ME2_PIPE1_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
++#define CP_ME2_PIPE1_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
++#define CP_ME2_PIPE1_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
++//CP_ME2_PIPE2_INT_STATUS
++#define CP_ME2_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
++#define CP_ME2_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
++#define CP_ME2_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
++#define CP_ME2_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
++#define CP_ME2_PIPE2_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
++#define CP_ME2_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
++#define CP_ME2_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
++#define CP_ME2_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
++#define CP_ME2_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
++#define CP_ME2_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
++#define CP_ME2_PIPE2_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
++#define CP_ME2_PIPE2_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
++#define CP_ME2_PIPE2_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
++#define CP_ME2_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
++#define CP_ME2_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
++#define CP_ME2_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
++#define CP_ME2_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
++#define CP_ME2_PIPE2_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
++#define CP_ME2_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
++#define CP_ME2_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
++#define CP_ME2_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
++#define CP_ME2_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
++#define CP_ME2_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
++#define CP_ME2_PIPE2_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
++#define CP_ME2_PIPE2_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
++#define CP_ME2_PIPE2_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
++//CP_ME2_PIPE3_INT_STATUS
++#define CP_ME2_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
++#define CP_ME2_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
++#define CP_ME2_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
++#define CP_ME2_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
++#define CP_ME2_PIPE3_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
++#define CP_ME2_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
++#define CP_ME2_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
++#define CP_ME2_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
++#define CP_ME2_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
++#define CP_ME2_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
++#define CP_ME2_PIPE3_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
++#define CP_ME2_PIPE3_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
++#define CP_ME2_PIPE3_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
++#define CP_ME2_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
++#define CP_ME2_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
++#define CP_ME2_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
++#define CP_ME2_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
++#define CP_ME2_PIPE3_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
++#define CP_ME2_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
++#define CP_ME2_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
++#define CP_ME2_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
++#define CP_ME2_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
++#define CP_ME2_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
++#define CP_ME2_PIPE3_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
++#define CP_ME2_PIPE3_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
++#define CP_ME2_PIPE3_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
++#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
++#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
++#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
++#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
++//CC_GC_EDC_CONFIG
++#define CC_GC_EDC_CONFIG__DIS_EDC__SHIFT 0x1
++#define CC_GC_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
++//CP_ME1_PIPE_PRIORITY_CNTS
++#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
++#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
++#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
++#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
++#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
++#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
++#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
++#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
++//CP_ME1_PIPE0_PRIORITY
++#define CP_ME1_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
++#define CP_ME1_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L
++//CP_ME1_PIPE1_PRIORITY
++#define CP_ME1_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
++#define CP_ME1_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L
++//CP_ME1_PIPE2_PRIORITY
++#define CP_ME1_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0
++#define CP_ME1_PIPE2_PRIORITY__PRIORITY_MASK 0x00000003L
++//CP_ME1_PIPE3_PRIORITY
++#define CP_ME1_PIPE3_PRIORITY__PRIORITY__SHIFT 0x0
++#define CP_ME1_PIPE3_PRIORITY__PRIORITY_MASK 0x00000003L
++//CP_ME2_PIPE_PRIORITY_CNTS
++#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
++#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
++#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
++#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
++#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
++#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
++#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
++#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
++//CP_ME2_PIPE0_PRIORITY
++#define CP_ME2_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
++#define CP_ME2_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L
++//CP_ME2_PIPE1_PRIORITY
++#define CP_ME2_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
++#define CP_ME2_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L
++//CP_ME2_PIPE2_PRIORITY
++#define CP_ME2_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0
++#define CP_ME2_PIPE2_PRIORITY__PRIORITY_MASK 0x00000003L
++//CP_ME2_PIPE3_PRIORITY
++#define CP_ME2_PIPE3_PRIORITY__PRIORITY__SHIFT 0x0
++#define CP_ME2_PIPE3_PRIORITY__PRIORITY_MASK 0x00000003L
++//CP_CE_PRGRM_CNTR_START
++#define CP_CE_PRGRM_CNTR_START__IP_START__SHIFT 0x0
++#define CP_CE_PRGRM_CNTR_START__IP_START_MASK 0x000007FFL
++//CP_PFP_PRGRM_CNTR_START
++#define CP_PFP_PRGRM_CNTR_START__IP_START__SHIFT 0x0
++#define CP_PFP_PRGRM_CNTR_START__IP_START_MASK 0x00001FFFL
++//CP_ME_PRGRM_CNTR_START
++#define CP_ME_PRGRM_CNTR_START__IP_START__SHIFT 0x0
++#define CP_ME_PRGRM_CNTR_START__IP_START_MASK 0x00000FFFL
++//CP_MEC1_PRGRM_CNTR_START
++#define CP_MEC1_PRGRM_CNTR_START__IP_START__SHIFT 0x0
++#define CP_MEC1_PRGRM_CNTR_START__IP_START_MASK 0x0000FFFFL
++//CP_MEC2_PRGRM_CNTR_START
++#define CP_MEC2_PRGRM_CNTR_START__IP_START__SHIFT 0x0
++#define CP_MEC2_PRGRM_CNTR_START__IP_START_MASK 0x0000FFFFL
++//CP_CE_INTR_ROUTINE_START
++#define CP_CE_INTR_ROUTINE_START__IR_START__SHIFT 0x0
++#define CP_CE_INTR_ROUTINE_START__IR_START_MASK 0x000007FFL
++//CP_PFP_INTR_ROUTINE_START
++#define CP_PFP_INTR_ROUTINE_START__IR_START__SHIFT 0x0
++#define CP_PFP_INTR_ROUTINE_START__IR_START_MASK 0x00001FFFL
++//CP_ME_INTR_ROUTINE_START
++#define CP_ME_INTR_ROUTINE_START__IR_START__SHIFT 0x0
++#define CP_ME_INTR_ROUTINE_START__IR_START_MASK 0x00000FFFL
++//CP_MEC1_INTR_ROUTINE_START
++#define CP_MEC1_INTR_ROUTINE_START__IR_START__SHIFT 0x0
++#define CP_MEC1_INTR_ROUTINE_START__IR_START_MASK 0x0000FFFFL
++//CP_MEC2_INTR_ROUTINE_START
++#define CP_MEC2_INTR_ROUTINE_START__IR_START__SHIFT 0x0
++#define CP_MEC2_INTR_ROUTINE_START__IR_START_MASK 0x0000FFFFL
++//CP_CONTEXT_CNTL
++#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_WD_CNTX__SHIFT 0x0
++#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_PIPE_CNTX__SHIFT 0x4
++#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_WD_CNTX__SHIFT 0x10
++#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_PIPE_CNTX__SHIFT 0x14
++#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_WD_CNTX_MASK 0x00000007L
++#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_PIPE_CNTX_MASK 0x00000070L
++#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_WD_CNTX_MASK 0x00070000L
++#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_PIPE_CNTX_MASK 0x00700000L
++//CP_MAX_CONTEXT
++#define CP_MAX_CONTEXT__MAX_CONTEXT__SHIFT 0x0
++#define CP_MAX_CONTEXT__MAX_CONTEXT_MASK 0x00000007L
++//CP_IQ_WAIT_TIME1
++#define CP_IQ_WAIT_TIME1__IB_OFFLOAD__SHIFT 0x0
++#define CP_IQ_WAIT_TIME1__ATOMIC_OFFLOAD__SHIFT 0x8
++#define CP_IQ_WAIT_TIME1__WRM_OFFLOAD__SHIFT 0x10
++#define CP_IQ_WAIT_TIME1__GWS__SHIFT 0x18
++#define CP_IQ_WAIT_TIME1__IB_OFFLOAD_MASK 0x000000FFL
++#define CP_IQ_WAIT_TIME1__ATOMIC_OFFLOAD_MASK 0x0000FF00L
++#define CP_IQ_WAIT_TIME1__WRM_OFFLOAD_MASK 0x00FF0000L
++#define CP_IQ_WAIT_TIME1__GWS_MASK 0xFF000000L
++//CP_IQ_WAIT_TIME2
++#define CP_IQ_WAIT_TIME2__QUE_SLEEP__SHIFT 0x0
++#define CP_IQ_WAIT_TIME2__SCH_WAVE__SHIFT 0x8
++#define CP_IQ_WAIT_TIME2__SEM_REARM__SHIFT 0x10
++#define CP_IQ_WAIT_TIME2__DEQ_RETRY__SHIFT 0x18
++#define CP_IQ_WAIT_TIME2__QUE_SLEEP_MASK 0x000000FFL
++#define CP_IQ_WAIT_TIME2__SCH_WAVE_MASK 0x0000FF00L
++#define CP_IQ_WAIT_TIME2__SEM_REARM_MASK 0x00FF0000L
++#define CP_IQ_WAIT_TIME2__DEQ_RETRY_MASK 0xFF000000L
++//CP_RB0_BASE_HI
++#define CP_RB0_BASE_HI__RB_BASE_HI__SHIFT 0x0
++#define CP_RB0_BASE_HI__RB_BASE_HI_MASK 0x000000FFL
++//CP_RB1_BASE_HI
++#define CP_RB1_BASE_HI__RB_BASE_HI__SHIFT 0x0
++#define CP_RB1_BASE_HI__RB_BASE_HI_MASK 0x000000FFL
++//CP_VMID_RESET
++#define CP_VMID_RESET__RESET_REQUEST__SHIFT 0x0
++#define CP_VMID_RESET__RESET_REQUEST_MASK 0x0000FFFFL
++//CPC_INT_CNTL
++#define CPC_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
++#define CPC_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
++#define CPC_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
++#define CPC_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
++#define CPC_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
++#define CPC_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
++#define CPC_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
++#define CPC_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
++#define CPC_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
++#define CPC_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
++#define CPC_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
++#define CPC_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
++#define CPC_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
++#define CPC_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
++#define CPC_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
++#define CPC_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
++#define CPC_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
++#define CPC_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
++#define CPC_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
++#define CPC_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
++#define CPC_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
++#define CPC_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
++#define CPC_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
++#define CPC_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
++#define CPC_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
++#define CPC_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
++//CPC_INT_STATUS
++#define CPC_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
++#define CPC_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
++#define CPC_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
++#define CPC_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
++#define CPC_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
++#define CPC_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
++#define CPC_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
++#define CPC_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
++#define CPC_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
++#define CPC_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
++#define CPC_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
++#define CPC_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
++#define CPC_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
++#define CPC_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
++#define CPC_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
++#define CPC_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
++#define CPC_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
++#define CPC_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
++#define CPC_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
++#define CPC_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
++#define CPC_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
++#define CPC_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
++#define CPC_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
++#define CPC_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
++#define CPC_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
++#define CPC_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
++//CP_VMID_PREEMPT
++#define CP_VMID_PREEMPT__PREEMPT_REQUEST__SHIFT 0x0
++#define CP_VMID_PREEMPT__VIRT_COMMAND__SHIFT 0x10
++#define CP_VMID_PREEMPT__PREEMPT_REQUEST_MASK 0x0000FFFFL
++#define CP_VMID_PREEMPT__VIRT_COMMAND_MASK 0x000F0000L
++//CPC_INT_CNTX_ID
++#define CPC_INT_CNTX_ID__CNTX_ID__SHIFT 0x0
++#define CPC_INT_CNTX_ID__CNTX_ID_MASK 0xFFFFFFFFL
++//CP_PQ_STATUS
++#define CP_PQ_STATUS__DOORBELL_UPDATED__SHIFT 0x0
++#define CP_PQ_STATUS__DOORBELL_ENABLE__SHIFT 0x1
++#define CP_PQ_STATUS__DOORBELL_UPDATED_MASK 0x00000001L
++#define CP_PQ_STATUS__DOORBELL_ENABLE_MASK 0x00000002L
++//CP_CPC_IC_BASE_LO
++#define CP_CPC_IC_BASE_LO__IC_BASE_LO__SHIFT 0xc
++#define CP_CPC_IC_BASE_LO__IC_BASE_LO_MASK 0xFFFFF000L
++//CP_CPC_IC_BASE_HI
++#define CP_CPC_IC_BASE_HI__IC_BASE_HI__SHIFT 0x0
++#define CP_CPC_IC_BASE_HI__IC_BASE_HI_MASK 0x0000FFFFL
++//CP_CPC_IC_BASE_CNTL
++#define CP_CPC_IC_BASE_CNTL__VMID__SHIFT 0x0
++#define CP_CPC_IC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
++#define CP_CPC_IC_BASE_CNTL__VMID_MASK 0x0000000FL
++#define CP_CPC_IC_BASE_CNTL__CACHE_POLICY_MASK 0x01000000L
++//CP_CPC_IC_OP_CNTL
++#define CP_CPC_IC_OP_CNTL__INVALIDATE_CACHE__SHIFT 0x0
++#define CP_CPC_IC_OP_CNTL__PRIME_ICACHE__SHIFT 0x4
++#define CP_CPC_IC_OP_CNTL__ICACHE_PRIMED__SHIFT 0x5
++#define CP_CPC_IC_OP_CNTL__INVALIDATE_CACHE_MASK 0x00000001L
++#define CP_CPC_IC_OP_CNTL__PRIME_ICACHE_MASK 0x00000010L
++#define CP_CPC_IC_OP_CNTL__ICACHE_PRIMED_MASK 0x00000020L
++//CP_MEC1_F32_INT_DIS
++#define CP_MEC1_F32_INT_DIS__EDC_ROQ_FED_INT__SHIFT 0x0
++#define CP_MEC1_F32_INT_DIS__PRIV_REG_INT__SHIFT 0x1
++#define CP_MEC1_F32_INT_DIS__RESERVED_BIT_ERR_INT__SHIFT 0x2
++#define CP_MEC1_F32_INT_DIS__EDC_TC_FED_INT__SHIFT 0x3
++#define CP_MEC1_F32_INT_DIS__EDC_GDS_FED_INT__SHIFT 0x4
++#define CP_MEC1_F32_INT_DIS__EDC_SCRATCH_FED_INT__SHIFT 0x5
++#define CP_MEC1_F32_INT_DIS__WAVE_RESTORE_INT__SHIFT 0x6
++#define CP_MEC1_F32_INT_DIS__SUA_VIOLATION_INT__SHIFT 0x7
++#define CP_MEC1_F32_INT_DIS__EDC_DMA_FED_INT__SHIFT 0x8
++#define CP_MEC1_F32_INT_DIS__IQ_TIMER_INT__SHIFT 0x9
++#define CP_MEC1_F32_INT_DIS__GPF_INT_CPF__SHIFT 0xa
++#define CP_MEC1_F32_INT_DIS__GPF_INT_DMA__SHIFT 0xb
++#define CP_MEC1_F32_INT_DIS__GPF_INT_CPC__SHIFT 0xc
++#define CP_MEC1_F32_INT_DIS__EDC_SR_MEM_FED_INT__SHIFT 0xd
++#define CP_MEC1_F32_INT_DIS__QUEUE_MESSAGE_INT__SHIFT 0xe
++#define CP_MEC1_F32_INT_DIS__FATAL_EDC_ERROR_INT__SHIFT 0xf
++#define CP_MEC1_F32_INT_DIS__EDC_ROQ_FED_INT_MASK 0x00000001L
++#define CP_MEC1_F32_INT_DIS__PRIV_REG_INT_MASK 0x00000002L
++#define CP_MEC1_F32_INT_DIS__RESERVED_BIT_ERR_INT_MASK 0x00000004L
++#define CP_MEC1_F32_INT_DIS__EDC_TC_FED_INT_MASK 0x00000008L
++#define CP_MEC1_F32_INT_DIS__EDC_GDS_FED_INT_MASK 0x00000010L
++#define CP_MEC1_F32_INT_DIS__EDC_SCRATCH_FED_INT_MASK 0x00000020L
++#define CP_MEC1_F32_INT_DIS__WAVE_RESTORE_INT_MASK 0x00000040L
++#define CP_MEC1_F32_INT_DIS__SUA_VIOLATION_INT_MASK 0x00000080L
++#define CP_MEC1_F32_INT_DIS__EDC_DMA_FED_INT_MASK 0x00000100L
++#define CP_MEC1_F32_INT_DIS__IQ_TIMER_INT_MASK 0x00000200L
++#define CP_MEC1_F32_INT_DIS__GPF_INT_CPF_MASK 0x00000400L
++#define CP_MEC1_F32_INT_DIS__GPF_INT_DMA_MASK 0x00000800L
++#define CP_MEC1_F32_INT_DIS__GPF_INT_CPC_MASK 0x00001000L
++#define CP_MEC1_F32_INT_DIS__EDC_SR_MEM_FED_INT_MASK 0x00002000L
++#define CP_MEC1_F32_INT_DIS__QUEUE_MESSAGE_INT_MASK 0x00004000L
++#define CP_MEC1_F32_INT_DIS__FATAL_EDC_ERROR_INT_MASK 0x00008000L
++//CP_MEC2_F32_INT_DIS
++#define CP_MEC2_F32_INT_DIS__EDC_ROQ_FED_INT__SHIFT 0x0
++#define CP_MEC2_F32_INT_DIS__PRIV_REG_INT__SHIFT 0x1
++#define CP_MEC2_F32_INT_DIS__RESERVED_BIT_ERR_INT__SHIFT 0x2
++#define CP_MEC2_F32_INT_DIS__EDC_TC_FED_INT__SHIFT 0x3
++#define CP_MEC2_F32_INT_DIS__EDC_GDS_FED_INT__SHIFT 0x4
++#define CP_MEC2_F32_INT_DIS__EDC_SCRATCH_FED_INT__SHIFT 0x5
++#define CP_MEC2_F32_INT_DIS__WAVE_RESTORE_INT__SHIFT 0x6
++#define CP_MEC2_F32_INT_DIS__SUA_VIOLATION_INT__SHIFT 0x7
++#define CP_MEC2_F32_INT_DIS__EDC_DMA_FED_INT__SHIFT 0x8
++#define CP_MEC2_F32_INT_DIS__IQ_TIMER_INT__SHIFT 0x9
++#define CP_MEC2_F32_INT_DIS__GPF_INT_CPF__SHIFT 0xa
++#define CP_MEC2_F32_INT_DIS__GPF_INT_DMA__SHIFT 0xb
++#define CP_MEC2_F32_INT_DIS__GPF_INT_CPC__SHIFT 0xc
++#define CP_MEC2_F32_INT_DIS__EDC_SR_MEM_FED_INT__SHIFT 0xd
++#define CP_MEC2_F32_INT_DIS__QUEUE_MESSAGE_INT__SHIFT 0xe
++#define CP_MEC2_F32_INT_DIS__FATAL_EDC_ERROR_INT__SHIFT 0xf
++#define CP_MEC2_F32_INT_DIS__EDC_ROQ_FED_INT_MASK 0x00000001L
++#define CP_MEC2_F32_INT_DIS__PRIV_REG_INT_MASK 0x00000002L
++#define CP_MEC2_F32_INT_DIS__RESERVED_BIT_ERR_INT_MASK 0x00000004L
++#define CP_MEC2_F32_INT_DIS__EDC_TC_FED_INT_MASK 0x00000008L
++#define CP_MEC2_F32_INT_DIS__EDC_GDS_FED_INT_MASK 0x00000010L
++#define CP_MEC2_F32_INT_DIS__EDC_SCRATCH_FED_INT_MASK 0x00000020L
++#define CP_MEC2_F32_INT_DIS__WAVE_RESTORE_INT_MASK 0x00000040L
++#define CP_MEC2_F32_INT_DIS__SUA_VIOLATION_INT_MASK 0x00000080L
++#define CP_MEC2_F32_INT_DIS__EDC_DMA_FED_INT_MASK 0x00000100L
++#define CP_MEC2_F32_INT_DIS__IQ_TIMER_INT_MASK 0x00000200L
++#define CP_MEC2_F32_INT_DIS__GPF_INT_CPF_MASK 0x00000400L
++#define CP_MEC2_F32_INT_DIS__GPF_INT_DMA_MASK 0x00000800L
++#define CP_MEC2_F32_INT_DIS__GPF_INT_CPC_MASK 0x00001000L
++#define CP_MEC2_F32_INT_DIS__EDC_SR_MEM_FED_INT_MASK 0x00002000L
++#define CP_MEC2_F32_INT_DIS__QUEUE_MESSAGE_INT_MASK 0x00004000L
++#define CP_MEC2_F32_INT_DIS__FATAL_EDC_ERROR_INT_MASK 0x00008000L
++//CP_VMID_STATUS
++#define CP_VMID_STATUS__PREEMPT_DE_STATUS__SHIFT 0x0
++#define CP_VMID_STATUS__PREEMPT_CE_STATUS__SHIFT 0x10
++#define CP_VMID_STATUS__PREEMPT_DE_STATUS_MASK 0x0000FFFFL
++#define CP_VMID_STATUS__PREEMPT_CE_STATUS_MASK 0xFFFF0000L
++
++
++// addressBlock: gc_cppdec2
++//CP_RB_DOORBELL_CONTROL_SCH_0
++#define CP_RB_DOORBELL_CONTROL_SCH_0__DOORBELL_OFFSET__SHIFT 0x2
++#define CP_RB_DOORBELL_CONTROL_SCH_0__DOORBELL_EN__SHIFT 0x1e
++#define CP_RB_DOORBELL_CONTROL_SCH_0__DOORBELL_HIT__SHIFT 0x1f
++#define CP_RB_DOORBELL_CONTROL_SCH_0__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
++#define CP_RB_DOORBELL_CONTROL_SCH_0__DOORBELL_EN_MASK 0x40000000L
++#define CP_RB_DOORBELL_CONTROL_SCH_0__DOORBELL_HIT_MASK 0x80000000L
++//CP_RB_DOORBELL_CONTROL_SCH_1
++#define CP_RB_DOORBELL_CONTROL_SCH_1__DOORBELL_OFFSET__SHIFT 0x2
++#define CP_RB_DOORBELL_CONTROL_SCH_1__DOORBELL_EN__SHIFT 0x1e
++#define CP_RB_DOORBELL_CONTROL_SCH_1__DOORBELL_HIT__SHIFT 0x1f
++#define CP_RB_DOORBELL_CONTROL_SCH_1__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
++#define CP_RB_DOORBELL_CONTROL_SCH_1__DOORBELL_EN_MASK 0x40000000L
++#define CP_RB_DOORBELL_CONTROL_SCH_1__DOORBELL_HIT_MASK 0x80000000L
++//CP_RB_DOORBELL_CONTROL_SCH_2
++#define CP_RB_DOORBELL_CONTROL_SCH_2__DOORBELL_OFFSET__SHIFT 0x2
++#define CP_RB_DOORBELL_CONTROL_SCH_2__DOORBELL_EN__SHIFT 0x1e
++#define CP_RB_DOORBELL_CONTROL_SCH_2__DOORBELL_HIT__SHIFT 0x1f
++#define CP_RB_DOORBELL_CONTROL_SCH_2__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
++#define CP_RB_DOORBELL_CONTROL_SCH_2__DOORBELL_EN_MASK 0x40000000L
++#define CP_RB_DOORBELL_CONTROL_SCH_2__DOORBELL_HIT_MASK 0x80000000L
++//CP_RB_DOORBELL_CONTROL_SCH_3
++#define CP_RB_DOORBELL_CONTROL_SCH_3__DOORBELL_OFFSET__SHIFT 0x2
++#define CP_RB_DOORBELL_CONTROL_SCH_3__DOORBELL_EN__SHIFT 0x1e
++#define CP_RB_DOORBELL_CONTROL_SCH_3__DOORBELL_HIT__SHIFT 0x1f
++#define CP_RB_DOORBELL_CONTROL_SCH_3__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
++#define CP_RB_DOORBELL_CONTROL_SCH_3__DOORBELL_EN_MASK 0x40000000L
++#define CP_RB_DOORBELL_CONTROL_SCH_3__DOORBELL_HIT_MASK 0x80000000L
++//CP_RB_DOORBELL_CONTROL_SCH_4
++#define CP_RB_DOORBELL_CONTROL_SCH_4__DOORBELL_OFFSET__SHIFT 0x2
++#define CP_RB_DOORBELL_CONTROL_SCH_4__DOORBELL_EN__SHIFT 0x1e
++#define CP_RB_DOORBELL_CONTROL_SCH_4__DOORBELL_HIT__SHIFT 0x1f
++#define CP_RB_DOORBELL_CONTROL_SCH_4__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
++#define CP_RB_DOORBELL_CONTROL_SCH_4__DOORBELL_EN_MASK 0x40000000L
++#define CP_RB_DOORBELL_CONTROL_SCH_4__DOORBELL_HIT_MASK 0x80000000L
++//CP_RB_DOORBELL_CONTROL_SCH_5
++#define CP_RB_DOORBELL_CONTROL_SCH_5__DOORBELL_OFFSET__SHIFT 0x2
++#define CP_RB_DOORBELL_CONTROL_SCH_5__DOORBELL_EN__SHIFT 0x1e
++#define CP_RB_DOORBELL_CONTROL_SCH_5__DOORBELL_HIT__SHIFT 0x1f
++#define CP_RB_DOORBELL_CONTROL_SCH_5__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
++#define CP_RB_DOORBELL_CONTROL_SCH_5__DOORBELL_EN_MASK 0x40000000L
++#define CP_RB_DOORBELL_CONTROL_SCH_5__DOORBELL_HIT_MASK 0x80000000L
++//CP_RB_DOORBELL_CONTROL_SCH_6
++#define CP_RB_DOORBELL_CONTROL_SCH_6__DOORBELL_OFFSET__SHIFT 0x2
++#define CP_RB_DOORBELL_CONTROL_SCH_6__DOORBELL_EN__SHIFT 0x1e
++#define CP_RB_DOORBELL_CONTROL_SCH_6__DOORBELL_HIT__SHIFT 0x1f
++#define CP_RB_DOORBELL_CONTROL_SCH_6__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
++#define CP_RB_DOORBELL_CONTROL_SCH_6__DOORBELL_EN_MASK 0x40000000L
++#define CP_RB_DOORBELL_CONTROL_SCH_6__DOORBELL_HIT_MASK 0x80000000L
++//CP_RB_DOORBELL_CONTROL_SCH_7
++#define CP_RB_DOORBELL_CONTROL_SCH_7__DOORBELL_OFFSET__SHIFT 0x2
++#define CP_RB_DOORBELL_CONTROL_SCH_7__DOORBELL_EN__SHIFT 0x1e
++#define CP_RB_DOORBELL_CONTROL_SCH_7__DOORBELL_HIT__SHIFT 0x1f
++#define CP_RB_DOORBELL_CONTROL_SCH_7__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
++#define CP_RB_DOORBELL_CONTROL_SCH_7__DOORBELL_EN_MASK 0x40000000L
++#define CP_RB_DOORBELL_CONTROL_SCH_7__DOORBELL_HIT_MASK 0x80000000L
++//CP_RB_DOORBELL_CLEAR
++#define CP_RB_DOORBELL_CLEAR__MAPPED_QUEUE__SHIFT 0x0
++#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_EN_CLEAR__SHIFT 0x8
++#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_HIT_CLEAR__SHIFT 0x9
++#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_EN_CLEAR__SHIFT 0xa
++#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_HIT_CLEAR__SHIFT 0xb
++#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_EN_CLEAR__SHIFT 0xc
++#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_HIT_CLEAR__SHIFT 0xd
++#define CP_RB_DOORBELL_CLEAR__MAPPED_QUEUE_MASK 0x00000007L
++#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_EN_CLEAR_MASK 0x00000100L
++#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_HIT_CLEAR_MASK 0x00000200L
++#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_EN_CLEAR_MASK 0x00000400L
++#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_HIT_CLEAR_MASK 0x00000800L
++#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_EN_CLEAR_MASK 0x00001000L
++#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_HIT_CLEAR_MASK 0x00002000L
++//CP_GFX_MQD_CONTROL
++#define CP_GFX_MQD_CONTROL__VMID__SHIFT 0x0
++#define CP_GFX_MQD_CONTROL__EXE_DISABLE__SHIFT 0x17
++#define CP_GFX_MQD_CONTROL__CACHE_POLICY__SHIFT 0x18
++#define CP_GFX_MQD_CONTROL__VMID_MASK 0x0000000FL
++#define CP_GFX_MQD_CONTROL__EXE_DISABLE_MASK 0x00800000L
++#define CP_GFX_MQD_CONTROL__CACHE_POLICY_MASK 0x01000000L
++//CP_GFX_MQD_BASE_ADDR
++#define CP_GFX_MQD_BASE_ADDR__BASE_ADDR__SHIFT 0x2
++#define CP_GFX_MQD_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFCL
++//CP_GFX_MQD_BASE_ADDR_HI
++#define CP_GFX_MQD_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
++#define CP_GFX_MQD_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x0000FFFFL
++//CP_RB_STATUS
++#define CP_RB_STATUS__DOORBELL_UPDATED__SHIFT 0x0
++#define CP_RB_STATUS__DOORBELL_ENABLE__SHIFT 0x1
++#define CP_RB_STATUS__DOORBELL_UPDATED_MASK 0x00000001L
++#define CP_RB_STATUS__DOORBELL_ENABLE_MASK 0x00000002L
++//CPG_UTCL1_STATUS
++#define CPG_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
++#define CPG_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
++#define CPG_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
++#define CPG_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
++#define CPG_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
++#define CPG_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
++#define CPG_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
++#define CPG_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
++#define CPG_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
++#define CPG_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
++#define CPG_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
++#define CPG_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
++//CPC_UTCL1_STATUS
++#define CPC_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
++#define CPC_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
++#define CPC_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
++#define CPC_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
++#define CPC_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
++#define CPC_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
++#define CPC_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
++#define CPC_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
++#define CPC_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
++#define CPC_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
++#define CPC_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
++#define CPC_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
++//CPF_UTCL1_STATUS
++#define CPF_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
++#define CPF_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
++#define CPF_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
++#define CPF_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
++#define CPF_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
++#define CPF_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
++#define CPF_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
++#define CPF_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
++#define CPF_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
++#define CPF_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
++#define CPF_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
++#define CPF_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
++//CP_SD_CNTL
++#define CP_SD_CNTL__CPF_EN__SHIFT 0x0
++#define CP_SD_CNTL__CPG_EN__SHIFT 0x1
++#define CP_SD_CNTL__CPC_EN__SHIFT 0x2
++#define CP_SD_CNTL__RLC_EN__SHIFT 0x3
++#define CP_SD_CNTL__SPI_EN__SHIFT 0x4
++#define CP_SD_CNTL__WD_EN__SHIFT 0x5
++#define CP_SD_CNTL__IA_EN__SHIFT 0x6
++#define CP_SD_CNTL__PA_EN__SHIFT 0x7
++#define CP_SD_CNTL__RMI_EN__SHIFT 0x8
++#define CP_SD_CNTL__EA_EN__SHIFT 0x9
++#define CP_SD_CNTL__CPF_EN_MASK 0x00000001L
++#define CP_SD_CNTL__CPG_EN_MASK 0x00000002L
++#define CP_SD_CNTL__CPC_EN_MASK 0x00000004L
++#define CP_SD_CNTL__RLC_EN_MASK 0x00000008L
++#define CP_SD_CNTL__SPI_EN_MASK 0x00000010L
++#define CP_SD_CNTL__WD_EN_MASK 0x00000020L
++#define CP_SD_CNTL__IA_EN_MASK 0x00000040L
++#define CP_SD_CNTL__PA_EN_MASK 0x00000080L
++#define CP_SD_CNTL__RMI_EN_MASK 0x00000100L
++#define CP_SD_CNTL__EA_EN_MASK 0x00000200L
++//CP_SOFT_RESET_CNTL
++#define CP_SOFT_RESET_CNTL__CMP_ONLY_SOFT_RESET__SHIFT 0x0
++#define CP_SOFT_RESET_CNTL__GFX_ONLY_SOFT_RESET__SHIFT 0x1
++#define CP_SOFT_RESET_CNTL__CMP_HQD_REG_RESET__SHIFT 0x2
++#define CP_SOFT_RESET_CNTL__CMP_INTR_REG_RESET__SHIFT 0x3
++#define CP_SOFT_RESET_CNTL__CMP_HQD_QUEUE_DOORBELL_RESET__SHIFT 0x4
++#define CP_SOFT_RESET_CNTL__GFX_RB_DOORBELL_RESET__SHIFT 0x5
++#define CP_SOFT_RESET_CNTL__GFX_INTR_REG_RESET__SHIFT 0x6
++#define CP_SOFT_RESET_CNTL__CMP_ONLY_SOFT_RESET_MASK 0x00000001L
++#define CP_SOFT_RESET_CNTL__GFX_ONLY_SOFT_RESET_MASK 0x00000002L
++#define CP_SOFT_RESET_CNTL__CMP_HQD_REG_RESET_MASK 0x00000004L
++#define CP_SOFT_RESET_CNTL__CMP_INTR_REG_RESET_MASK 0x00000008L
++#define CP_SOFT_RESET_CNTL__CMP_HQD_QUEUE_DOORBELL_RESET_MASK 0x00000010L
++#define CP_SOFT_RESET_CNTL__GFX_RB_DOORBELL_RESET_MASK 0x00000020L
++#define CP_SOFT_RESET_CNTL__GFX_INTR_REG_RESET_MASK 0x00000040L
++//CP_CPC_GFX_CNTL
++#define CP_CPC_GFX_CNTL__QUEUEID__SHIFT 0x0
++#define CP_CPC_GFX_CNTL__PIPEID__SHIFT 0x3
++#define CP_CPC_GFX_CNTL__MEID__SHIFT 0x5
++#define CP_CPC_GFX_CNTL__VALID__SHIFT 0x7
++#define CP_CPC_GFX_CNTL__QUEUEID_MASK 0x00000007L
++#define CP_CPC_GFX_CNTL__PIPEID_MASK 0x00000018L
++#define CP_CPC_GFX_CNTL__MEID_MASK 0x00000060L
++#define CP_CPC_GFX_CNTL__VALID_MASK 0x00000080L
++
++
++// addressBlock: gc_spipdec
++//SPI_ARB_PRIORITY
++#define SPI_ARB_PRIORITY__PIPE_ORDER_TS0__SHIFT 0x0
++#define SPI_ARB_PRIORITY__PIPE_ORDER_TS1__SHIFT 0x3
++#define SPI_ARB_PRIORITY__PIPE_ORDER_TS2__SHIFT 0x6
++#define SPI_ARB_PRIORITY__PIPE_ORDER_TS3__SHIFT 0x9
++#define SPI_ARB_PRIORITY__TS0_DUR_MULT__SHIFT 0xc
++#define SPI_ARB_PRIORITY__TS1_DUR_MULT__SHIFT 0xe
++#define SPI_ARB_PRIORITY__TS2_DUR_MULT__SHIFT 0x10
++#define SPI_ARB_PRIORITY__TS3_DUR_MULT__SHIFT 0x12
++#define SPI_ARB_PRIORITY__PIPE_ORDER_TS0_MASK 0x00000007L
++#define SPI_ARB_PRIORITY__PIPE_ORDER_TS1_MASK 0x00000038L
++#define SPI_ARB_PRIORITY__PIPE_ORDER_TS2_MASK 0x000001C0L
++#define SPI_ARB_PRIORITY__PIPE_ORDER_TS3_MASK 0x00000E00L
++#define SPI_ARB_PRIORITY__TS0_DUR_MULT_MASK 0x00003000L
++#define SPI_ARB_PRIORITY__TS1_DUR_MULT_MASK 0x0000C000L
++#define SPI_ARB_PRIORITY__TS2_DUR_MULT_MASK 0x00030000L
++#define SPI_ARB_PRIORITY__TS3_DUR_MULT_MASK 0x000C0000L
++//SPI_ARB_CYCLES_0
++#define SPI_ARB_CYCLES_0__TS0_DURATION__SHIFT 0x0
++#define SPI_ARB_CYCLES_0__TS1_DURATION__SHIFT 0x10
++#define SPI_ARB_CYCLES_0__TS0_DURATION_MASK 0x0000FFFFL
++#define SPI_ARB_CYCLES_0__TS1_DURATION_MASK 0xFFFF0000L
++//SPI_ARB_CYCLES_1
++#define SPI_ARB_CYCLES_1__TS2_DURATION__SHIFT 0x0
++#define SPI_ARB_CYCLES_1__TS3_DURATION__SHIFT 0x10
++#define SPI_ARB_CYCLES_1__TS2_DURATION_MASK 0x0000FFFFL
++#define SPI_ARB_CYCLES_1__TS3_DURATION_MASK 0xFFFF0000L
++//SPI_WCL_PIPE_PERCENT_GFX
++#define SPI_WCL_PIPE_PERCENT_GFX__VALUE__SHIFT 0x0
++#define SPI_WCL_PIPE_PERCENT_GFX__LS_GRP_VALUE__SHIFT 0x7
++#define SPI_WCL_PIPE_PERCENT_GFX__HS_GRP_VALUE__SHIFT 0xc
++#define SPI_WCL_PIPE_PERCENT_GFX__ES_GRP_VALUE__SHIFT 0x11
++#define SPI_WCL_PIPE_PERCENT_GFX__GS_GRP_VALUE__SHIFT 0x16
++#define SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK 0x0000007FL
++#define SPI_WCL_PIPE_PERCENT_GFX__LS_GRP_VALUE_MASK 0x00000F80L
++#define SPI_WCL_PIPE_PERCENT_GFX__HS_GRP_VALUE_MASK 0x0001F000L
++#define SPI_WCL_PIPE_PERCENT_GFX__ES_GRP_VALUE_MASK 0x003E0000L
++#define SPI_WCL_PIPE_PERCENT_GFX__GS_GRP_VALUE_MASK 0x07C00000L
++//SPI_WCL_PIPE_PERCENT_HP3D
++#define SPI_WCL_PIPE_PERCENT_HP3D__VALUE__SHIFT 0x0
++#define SPI_WCL_PIPE_PERCENT_HP3D__HS_GRP_VALUE__SHIFT 0xc
++#define SPI_WCL_PIPE_PERCENT_HP3D__GS_GRP_VALUE__SHIFT 0x16
++#define SPI_WCL_PIPE_PERCENT_HP3D__VALUE_MASK 0x0000007FL
++#define SPI_WCL_PIPE_PERCENT_HP3D__HS_GRP_VALUE_MASK 0x0001F000L
++#define SPI_WCL_PIPE_PERCENT_HP3D__GS_GRP_VALUE_MASK 0x07C00000L
++//SPI_WCL_PIPE_PERCENT_CS0
++#define SPI_WCL_PIPE_PERCENT_CS0__VALUE__SHIFT 0x0
++#define SPI_WCL_PIPE_PERCENT_CS0__VALUE_MASK 0x7FL
++//SPI_WCL_PIPE_PERCENT_CS1
++#define SPI_WCL_PIPE_PERCENT_CS1__VALUE__SHIFT 0x0
++#define SPI_WCL_PIPE_PERCENT_CS1__VALUE_MASK 0x7FL
++//SPI_WCL_PIPE_PERCENT_CS2
++#define SPI_WCL_PIPE_PERCENT_CS2__VALUE__SHIFT 0x0
++#define SPI_WCL_PIPE_PERCENT_CS2__VALUE_MASK 0x7FL
++//SPI_WCL_PIPE_PERCENT_CS3
++#define SPI_WCL_PIPE_PERCENT_CS3__VALUE__SHIFT 0x0
++#define SPI_WCL_PIPE_PERCENT_CS3__VALUE_MASK 0x7FL
++//SPI_WCL_PIPE_PERCENT_CS4
++#define SPI_WCL_PIPE_PERCENT_CS4__VALUE__SHIFT 0x0
++#define SPI_WCL_PIPE_PERCENT_CS4__VALUE_MASK 0x7FL
++//SPI_WCL_PIPE_PERCENT_CS5
++#define SPI_WCL_PIPE_PERCENT_CS5__VALUE__SHIFT 0x0
++#define SPI_WCL_PIPE_PERCENT_CS5__VALUE_MASK 0x7FL
++//SPI_WCL_PIPE_PERCENT_CS6
++#define SPI_WCL_PIPE_PERCENT_CS6__VALUE__SHIFT 0x0
++#define SPI_WCL_PIPE_PERCENT_CS6__VALUE_MASK 0x7FL
++//SPI_WCL_PIPE_PERCENT_CS7
++#define SPI_WCL_PIPE_PERCENT_CS7__VALUE__SHIFT 0x0
++#define SPI_WCL_PIPE_PERCENT_CS7__VALUE_MASK 0x7FL
++//SPI_COMPUTE_QUEUE_RESET
++#define SPI_COMPUTE_QUEUE_RESET__RESET__SHIFT 0x0
++#define SPI_COMPUTE_QUEUE_RESET__RESET_MASK 0x01L
++//SPI_RESOURCE_RESERVE_CU_0
++#define SPI_RESOURCE_RESERVE_CU_0__VGPR__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_CU_0__SGPR__SHIFT 0x4
++#define SPI_RESOURCE_RESERVE_CU_0__LDS__SHIFT 0x8
++#define SPI_RESOURCE_RESERVE_CU_0__WAVES__SHIFT 0xc
++#define SPI_RESOURCE_RESERVE_CU_0__BARRIERS__SHIFT 0xf
++#define SPI_RESOURCE_RESERVE_CU_0__VGPR_MASK 0x0000000FL
++#define SPI_RESOURCE_RESERVE_CU_0__SGPR_MASK 0x000000F0L
++#define SPI_RESOURCE_RESERVE_CU_0__LDS_MASK 0x00000F00L
++#define SPI_RESOURCE_RESERVE_CU_0__WAVES_MASK 0x00007000L
++#define SPI_RESOURCE_RESERVE_CU_0__BARRIERS_MASK 0x00078000L
++//SPI_RESOURCE_RESERVE_CU_1
++#define SPI_RESOURCE_RESERVE_CU_1__VGPR__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_CU_1__SGPR__SHIFT 0x4
++#define SPI_RESOURCE_RESERVE_CU_1__LDS__SHIFT 0x8
++#define SPI_RESOURCE_RESERVE_CU_1__WAVES__SHIFT 0xc
++#define SPI_RESOURCE_RESERVE_CU_1__BARRIERS__SHIFT 0xf
++#define SPI_RESOURCE_RESERVE_CU_1__VGPR_MASK 0x0000000FL
++#define SPI_RESOURCE_RESERVE_CU_1__SGPR_MASK 0x000000F0L
++#define SPI_RESOURCE_RESERVE_CU_1__LDS_MASK 0x00000F00L
++#define SPI_RESOURCE_RESERVE_CU_1__WAVES_MASK 0x00007000L
++#define SPI_RESOURCE_RESERVE_CU_1__BARRIERS_MASK 0x00078000L
++//SPI_RESOURCE_RESERVE_CU_2
++#define SPI_RESOURCE_RESERVE_CU_2__VGPR__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_CU_2__SGPR__SHIFT 0x4
++#define SPI_RESOURCE_RESERVE_CU_2__LDS__SHIFT 0x8
++#define SPI_RESOURCE_RESERVE_CU_2__WAVES__SHIFT 0xc
++#define SPI_RESOURCE_RESERVE_CU_2__BARRIERS__SHIFT 0xf
++#define SPI_RESOURCE_RESERVE_CU_2__VGPR_MASK 0x0000000FL
++#define SPI_RESOURCE_RESERVE_CU_2__SGPR_MASK 0x000000F0L
++#define SPI_RESOURCE_RESERVE_CU_2__LDS_MASK 0x00000F00L
++#define SPI_RESOURCE_RESERVE_CU_2__WAVES_MASK 0x00007000L
++#define SPI_RESOURCE_RESERVE_CU_2__BARRIERS_MASK 0x00078000L
++//SPI_RESOURCE_RESERVE_CU_3
++#define SPI_RESOURCE_RESERVE_CU_3__VGPR__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_CU_3__SGPR__SHIFT 0x4
++#define SPI_RESOURCE_RESERVE_CU_3__LDS__SHIFT 0x8
++#define SPI_RESOURCE_RESERVE_CU_3__WAVES__SHIFT 0xc
++#define SPI_RESOURCE_RESERVE_CU_3__BARRIERS__SHIFT 0xf
++#define SPI_RESOURCE_RESERVE_CU_3__VGPR_MASK 0x0000000FL
++#define SPI_RESOURCE_RESERVE_CU_3__SGPR_MASK 0x000000F0L
++#define SPI_RESOURCE_RESERVE_CU_3__LDS_MASK 0x00000F00L
++#define SPI_RESOURCE_RESERVE_CU_3__WAVES_MASK 0x00007000L
++#define SPI_RESOURCE_RESERVE_CU_3__BARRIERS_MASK 0x00078000L
++//SPI_RESOURCE_RESERVE_CU_4
++#define SPI_RESOURCE_RESERVE_CU_4__VGPR__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_CU_4__SGPR__SHIFT 0x4
++#define SPI_RESOURCE_RESERVE_CU_4__LDS__SHIFT 0x8
++#define SPI_RESOURCE_RESERVE_CU_4__WAVES__SHIFT 0xc
++#define SPI_RESOURCE_RESERVE_CU_4__BARRIERS__SHIFT 0xf
++#define SPI_RESOURCE_RESERVE_CU_4__VGPR_MASK 0x0000000FL
++#define SPI_RESOURCE_RESERVE_CU_4__SGPR_MASK 0x000000F0L
++#define SPI_RESOURCE_RESERVE_CU_4__LDS_MASK 0x00000F00L
++#define SPI_RESOURCE_RESERVE_CU_4__WAVES_MASK 0x00007000L
++#define SPI_RESOURCE_RESERVE_CU_4__BARRIERS_MASK 0x00078000L
++//SPI_RESOURCE_RESERVE_CU_5
++#define SPI_RESOURCE_RESERVE_CU_5__VGPR__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_CU_5__SGPR__SHIFT 0x4
++#define SPI_RESOURCE_RESERVE_CU_5__LDS__SHIFT 0x8
++#define SPI_RESOURCE_RESERVE_CU_5__WAVES__SHIFT 0xc
++#define SPI_RESOURCE_RESERVE_CU_5__BARRIERS__SHIFT 0xf
++#define SPI_RESOURCE_RESERVE_CU_5__VGPR_MASK 0x0000000FL
++#define SPI_RESOURCE_RESERVE_CU_5__SGPR_MASK 0x000000F0L
++#define SPI_RESOURCE_RESERVE_CU_5__LDS_MASK 0x00000F00L
++#define SPI_RESOURCE_RESERVE_CU_5__WAVES_MASK 0x00007000L
++#define SPI_RESOURCE_RESERVE_CU_5__BARRIERS_MASK 0x00078000L
++//SPI_RESOURCE_RESERVE_CU_6
++#define SPI_RESOURCE_RESERVE_CU_6__VGPR__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_CU_6__SGPR__SHIFT 0x4
++#define SPI_RESOURCE_RESERVE_CU_6__LDS__SHIFT 0x8
++#define SPI_RESOURCE_RESERVE_CU_6__WAVES__SHIFT 0xc
++#define SPI_RESOURCE_RESERVE_CU_6__BARRIERS__SHIFT 0xf
++#define SPI_RESOURCE_RESERVE_CU_6__VGPR_MASK 0x0000000FL
++#define SPI_RESOURCE_RESERVE_CU_6__SGPR_MASK 0x000000F0L
++#define SPI_RESOURCE_RESERVE_CU_6__LDS_MASK 0x00000F00L
++#define SPI_RESOURCE_RESERVE_CU_6__WAVES_MASK 0x00007000L
++#define SPI_RESOURCE_RESERVE_CU_6__BARRIERS_MASK 0x00078000L
++//SPI_RESOURCE_RESERVE_CU_7
++#define SPI_RESOURCE_RESERVE_CU_7__VGPR__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_CU_7__SGPR__SHIFT 0x4
++#define SPI_RESOURCE_RESERVE_CU_7__LDS__SHIFT 0x8
++#define SPI_RESOURCE_RESERVE_CU_7__WAVES__SHIFT 0xc
++#define SPI_RESOURCE_RESERVE_CU_7__BARRIERS__SHIFT 0xf
++#define SPI_RESOURCE_RESERVE_CU_7__VGPR_MASK 0x0000000FL
++#define SPI_RESOURCE_RESERVE_CU_7__SGPR_MASK 0x000000F0L
++#define SPI_RESOURCE_RESERVE_CU_7__LDS_MASK 0x00000F00L
++#define SPI_RESOURCE_RESERVE_CU_7__WAVES_MASK 0x00007000L
++#define SPI_RESOURCE_RESERVE_CU_7__BARRIERS_MASK 0x00078000L
++//SPI_RESOURCE_RESERVE_CU_8
++#define SPI_RESOURCE_RESERVE_CU_8__VGPR__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_CU_8__SGPR__SHIFT 0x4
++#define SPI_RESOURCE_RESERVE_CU_8__LDS__SHIFT 0x8
++#define SPI_RESOURCE_RESERVE_CU_8__WAVES__SHIFT 0xc
++#define SPI_RESOURCE_RESERVE_CU_8__BARRIERS__SHIFT 0xf
++#define SPI_RESOURCE_RESERVE_CU_8__VGPR_MASK 0x0000000FL
++#define SPI_RESOURCE_RESERVE_CU_8__SGPR_MASK 0x000000F0L
++#define SPI_RESOURCE_RESERVE_CU_8__LDS_MASK 0x00000F00L
++#define SPI_RESOURCE_RESERVE_CU_8__WAVES_MASK 0x00007000L
++#define SPI_RESOURCE_RESERVE_CU_8__BARRIERS_MASK 0x00078000L
++//SPI_RESOURCE_RESERVE_CU_9
++#define SPI_RESOURCE_RESERVE_CU_9__VGPR__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_CU_9__SGPR__SHIFT 0x4
++#define SPI_RESOURCE_RESERVE_CU_9__LDS__SHIFT 0x8
++#define SPI_RESOURCE_RESERVE_CU_9__WAVES__SHIFT 0xc
++#define SPI_RESOURCE_RESERVE_CU_9__BARRIERS__SHIFT 0xf
++#define SPI_RESOURCE_RESERVE_CU_9__VGPR_MASK 0x0000000FL
++#define SPI_RESOURCE_RESERVE_CU_9__SGPR_MASK 0x000000F0L
++#define SPI_RESOURCE_RESERVE_CU_9__LDS_MASK 0x00000F00L
++#define SPI_RESOURCE_RESERVE_CU_9__WAVES_MASK 0x00007000L
++#define SPI_RESOURCE_RESERVE_CU_9__BARRIERS_MASK 0x00078000L
++//SPI_RESOURCE_RESERVE_EN_CU_0
++#define SPI_RESOURCE_RESERVE_EN_CU_0__EN__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_EN_CU_0__TYPE_MASK__SHIFT 0x1
++#define SPI_RESOURCE_RESERVE_EN_CU_0__QUEUE_MASK__SHIFT 0x10
++#define SPI_RESOURCE_RESERVE_EN_CU_0__RESERVE_SPACE_ONLY__SHIFT 0x18
++#define SPI_RESOURCE_RESERVE_EN_CU_0__EN_MASK 0x00000001L
++#define SPI_RESOURCE_RESERVE_EN_CU_0__TYPE_MASK_MASK 0x0000FFFEL
++#define SPI_RESOURCE_RESERVE_EN_CU_0__QUEUE_MASK_MASK 0x00FF0000L
++#define SPI_RESOURCE_RESERVE_EN_CU_0__RESERVE_SPACE_ONLY_MASK 0x01000000L
++//SPI_RESOURCE_RESERVE_EN_CU_1
++#define SPI_RESOURCE_RESERVE_EN_CU_1__EN__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_EN_CU_1__TYPE_MASK__SHIFT 0x1
++#define SPI_RESOURCE_RESERVE_EN_CU_1__QUEUE_MASK__SHIFT 0x10
++#define SPI_RESOURCE_RESERVE_EN_CU_1__RESERVE_SPACE_ONLY__SHIFT 0x18
++#define SPI_RESOURCE_RESERVE_EN_CU_1__EN_MASK 0x00000001L
++#define SPI_RESOURCE_RESERVE_EN_CU_1__TYPE_MASK_MASK 0x0000FFFEL
++#define SPI_RESOURCE_RESERVE_EN_CU_1__QUEUE_MASK_MASK 0x00FF0000L
++#define SPI_RESOURCE_RESERVE_EN_CU_1__RESERVE_SPACE_ONLY_MASK 0x01000000L
++//SPI_RESOURCE_RESERVE_EN_CU_2
++#define SPI_RESOURCE_RESERVE_EN_CU_2__EN__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_EN_CU_2__TYPE_MASK__SHIFT 0x1
++#define SPI_RESOURCE_RESERVE_EN_CU_2__QUEUE_MASK__SHIFT 0x10
++#define SPI_RESOURCE_RESERVE_EN_CU_2__RESERVE_SPACE_ONLY__SHIFT 0x18
++#define SPI_RESOURCE_RESERVE_EN_CU_2__EN_MASK 0x00000001L
++#define SPI_RESOURCE_RESERVE_EN_CU_2__TYPE_MASK_MASK 0x0000FFFEL
++#define SPI_RESOURCE_RESERVE_EN_CU_2__QUEUE_MASK_MASK 0x00FF0000L
++#define SPI_RESOURCE_RESERVE_EN_CU_2__RESERVE_SPACE_ONLY_MASK 0x01000000L
++//SPI_RESOURCE_RESERVE_EN_CU_3
++#define SPI_RESOURCE_RESERVE_EN_CU_3__EN__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_EN_CU_3__TYPE_MASK__SHIFT 0x1
++#define SPI_RESOURCE_RESERVE_EN_CU_3__QUEUE_MASK__SHIFT 0x10
++#define SPI_RESOURCE_RESERVE_EN_CU_3__RESERVE_SPACE_ONLY__SHIFT 0x18
++#define SPI_RESOURCE_RESERVE_EN_CU_3__EN_MASK 0x00000001L
++#define SPI_RESOURCE_RESERVE_EN_CU_3__TYPE_MASK_MASK 0x0000FFFEL
++#define SPI_RESOURCE_RESERVE_EN_CU_3__QUEUE_MASK_MASK 0x00FF0000L
++#define SPI_RESOURCE_RESERVE_EN_CU_3__RESERVE_SPACE_ONLY_MASK 0x01000000L
++//SPI_RESOURCE_RESERVE_EN_CU_4
++#define SPI_RESOURCE_RESERVE_EN_CU_4__EN__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_EN_CU_4__TYPE_MASK__SHIFT 0x1
++#define SPI_RESOURCE_RESERVE_EN_CU_4__QUEUE_MASK__SHIFT 0x10
++#define SPI_RESOURCE_RESERVE_EN_CU_4__RESERVE_SPACE_ONLY__SHIFT 0x18
++#define SPI_RESOURCE_RESERVE_EN_CU_4__EN_MASK 0x00000001L
++#define SPI_RESOURCE_RESERVE_EN_CU_4__TYPE_MASK_MASK 0x0000FFFEL
++#define SPI_RESOURCE_RESERVE_EN_CU_4__QUEUE_MASK_MASK 0x00FF0000L
++#define SPI_RESOURCE_RESERVE_EN_CU_4__RESERVE_SPACE_ONLY_MASK 0x01000000L
++//SPI_RESOURCE_RESERVE_EN_CU_5
++#define SPI_RESOURCE_RESERVE_EN_CU_5__EN__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_EN_CU_5__TYPE_MASK__SHIFT 0x1
++#define SPI_RESOURCE_RESERVE_EN_CU_5__QUEUE_MASK__SHIFT 0x10
++#define SPI_RESOURCE_RESERVE_EN_CU_5__RESERVE_SPACE_ONLY__SHIFT 0x18
++#define SPI_RESOURCE_RESERVE_EN_CU_5__EN_MASK 0x00000001L
++#define SPI_RESOURCE_RESERVE_EN_CU_5__TYPE_MASK_MASK 0x0000FFFEL
++#define SPI_RESOURCE_RESERVE_EN_CU_5__QUEUE_MASK_MASK 0x00FF0000L
++#define SPI_RESOURCE_RESERVE_EN_CU_5__RESERVE_SPACE_ONLY_MASK 0x01000000L
++//SPI_RESOURCE_RESERVE_EN_CU_6
++#define SPI_RESOURCE_RESERVE_EN_CU_6__EN__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_EN_CU_6__TYPE_MASK__SHIFT 0x1
++#define SPI_RESOURCE_RESERVE_EN_CU_6__QUEUE_MASK__SHIFT 0x10
++#define SPI_RESOURCE_RESERVE_EN_CU_6__RESERVE_SPACE_ONLY__SHIFT 0x18
++#define SPI_RESOURCE_RESERVE_EN_CU_6__EN_MASK 0x00000001L
++#define SPI_RESOURCE_RESERVE_EN_CU_6__TYPE_MASK_MASK 0x0000FFFEL
++#define SPI_RESOURCE_RESERVE_EN_CU_6__QUEUE_MASK_MASK 0x00FF0000L
++#define SPI_RESOURCE_RESERVE_EN_CU_6__RESERVE_SPACE_ONLY_MASK 0x01000000L
++//SPI_RESOURCE_RESERVE_EN_CU_7
++#define SPI_RESOURCE_RESERVE_EN_CU_7__EN__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_EN_CU_7__TYPE_MASK__SHIFT 0x1
++#define SPI_RESOURCE_RESERVE_EN_CU_7__QUEUE_MASK__SHIFT 0x10
++#define SPI_RESOURCE_RESERVE_EN_CU_7__RESERVE_SPACE_ONLY__SHIFT 0x18
++#define SPI_RESOURCE_RESERVE_EN_CU_7__EN_MASK 0x00000001L
++#define SPI_RESOURCE_RESERVE_EN_CU_7__TYPE_MASK_MASK 0x0000FFFEL
++#define SPI_RESOURCE_RESERVE_EN_CU_7__QUEUE_MASK_MASK 0x00FF0000L
++#define SPI_RESOURCE_RESERVE_EN_CU_7__RESERVE_SPACE_ONLY_MASK 0x01000000L
++//SPI_RESOURCE_RESERVE_EN_CU_8
++#define SPI_RESOURCE_RESERVE_EN_CU_8__EN__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_EN_CU_8__TYPE_MASK__SHIFT 0x1
++#define SPI_RESOURCE_RESERVE_EN_CU_8__QUEUE_MASK__SHIFT 0x10
++#define SPI_RESOURCE_RESERVE_EN_CU_8__RESERVE_SPACE_ONLY__SHIFT 0x18
++#define SPI_RESOURCE_RESERVE_EN_CU_8__EN_MASK 0x00000001L
++#define SPI_RESOURCE_RESERVE_EN_CU_8__TYPE_MASK_MASK 0x0000FFFEL
++#define SPI_RESOURCE_RESERVE_EN_CU_8__QUEUE_MASK_MASK 0x00FF0000L
++#define SPI_RESOURCE_RESERVE_EN_CU_8__RESERVE_SPACE_ONLY_MASK 0x01000000L
++//SPI_RESOURCE_RESERVE_EN_CU_9
++#define SPI_RESOURCE_RESERVE_EN_CU_9__EN__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_EN_CU_9__TYPE_MASK__SHIFT 0x1
++#define SPI_RESOURCE_RESERVE_EN_CU_9__QUEUE_MASK__SHIFT 0x10
++#define SPI_RESOURCE_RESERVE_EN_CU_9__RESERVE_SPACE_ONLY__SHIFT 0x18
++#define SPI_RESOURCE_RESERVE_EN_CU_9__EN_MASK 0x00000001L
++#define SPI_RESOURCE_RESERVE_EN_CU_9__TYPE_MASK_MASK 0x0000FFFEL
++#define SPI_RESOURCE_RESERVE_EN_CU_9__QUEUE_MASK_MASK 0x00FF0000L
++#define SPI_RESOURCE_RESERVE_EN_CU_9__RESERVE_SPACE_ONLY_MASK 0x01000000L
++//SPI_RESOURCE_RESERVE_CU_10
++#define SPI_RESOURCE_RESERVE_CU_10__VGPR__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_CU_10__SGPR__SHIFT 0x4
++#define SPI_RESOURCE_RESERVE_CU_10__LDS__SHIFT 0x8
++#define SPI_RESOURCE_RESERVE_CU_10__WAVES__SHIFT 0xc
++#define SPI_RESOURCE_RESERVE_CU_10__BARRIERS__SHIFT 0xf
++#define SPI_RESOURCE_RESERVE_CU_10__VGPR_MASK 0x0000000FL
++#define SPI_RESOURCE_RESERVE_CU_10__SGPR_MASK 0x000000F0L
++#define SPI_RESOURCE_RESERVE_CU_10__LDS_MASK 0x00000F00L
++#define SPI_RESOURCE_RESERVE_CU_10__WAVES_MASK 0x00007000L
++#define SPI_RESOURCE_RESERVE_CU_10__BARRIERS_MASK 0x00078000L
++//SPI_RESOURCE_RESERVE_CU_11
++#define SPI_RESOURCE_RESERVE_CU_11__VGPR__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_CU_11__SGPR__SHIFT 0x4
++#define SPI_RESOURCE_RESERVE_CU_11__LDS__SHIFT 0x8
++#define SPI_RESOURCE_RESERVE_CU_11__WAVES__SHIFT 0xc
++#define SPI_RESOURCE_RESERVE_CU_11__BARRIERS__SHIFT 0xf
++#define SPI_RESOURCE_RESERVE_CU_11__VGPR_MASK 0x0000000FL
++#define SPI_RESOURCE_RESERVE_CU_11__SGPR_MASK 0x000000F0L
++#define SPI_RESOURCE_RESERVE_CU_11__LDS_MASK 0x00000F00L
++#define SPI_RESOURCE_RESERVE_CU_11__WAVES_MASK 0x00007000L
++#define SPI_RESOURCE_RESERVE_CU_11__BARRIERS_MASK 0x00078000L
++//SPI_RESOURCE_RESERVE_EN_CU_10
++#define SPI_RESOURCE_RESERVE_EN_CU_10__EN__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_EN_CU_10__TYPE_MASK__SHIFT 0x1
++#define SPI_RESOURCE_RESERVE_EN_CU_10__QUEUE_MASK__SHIFT 0x10
++#define SPI_RESOURCE_RESERVE_EN_CU_10__RESERVE_SPACE_ONLY__SHIFT 0x18
++#define SPI_RESOURCE_RESERVE_EN_CU_10__EN_MASK 0x00000001L
++#define SPI_RESOURCE_RESERVE_EN_CU_10__TYPE_MASK_MASK 0x0000FFFEL
++#define SPI_RESOURCE_RESERVE_EN_CU_10__QUEUE_MASK_MASK 0x00FF0000L
++#define SPI_RESOURCE_RESERVE_EN_CU_10__RESERVE_SPACE_ONLY_MASK 0x01000000L
++//SPI_RESOURCE_RESERVE_EN_CU_11
++#define SPI_RESOURCE_RESERVE_EN_CU_11__EN__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_EN_CU_11__TYPE_MASK__SHIFT 0x1
++#define SPI_RESOURCE_RESERVE_EN_CU_11__QUEUE_MASK__SHIFT 0x10
++#define SPI_RESOURCE_RESERVE_EN_CU_11__RESERVE_SPACE_ONLY__SHIFT 0x18
++#define SPI_RESOURCE_RESERVE_EN_CU_11__EN_MASK 0x00000001L
++#define SPI_RESOURCE_RESERVE_EN_CU_11__TYPE_MASK_MASK 0x0000FFFEL
++#define SPI_RESOURCE_RESERVE_EN_CU_11__QUEUE_MASK_MASK 0x00FF0000L
++#define SPI_RESOURCE_RESERVE_EN_CU_11__RESERVE_SPACE_ONLY_MASK 0x01000000L
++//SPI_RESOURCE_RESERVE_CU_12
++#define SPI_RESOURCE_RESERVE_CU_12__VGPR__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_CU_12__SGPR__SHIFT 0x4
++#define SPI_RESOURCE_RESERVE_CU_12__LDS__SHIFT 0x8
++#define SPI_RESOURCE_RESERVE_CU_12__WAVES__SHIFT 0xc
++#define SPI_RESOURCE_RESERVE_CU_12__BARRIERS__SHIFT 0xf
++#define SPI_RESOURCE_RESERVE_CU_12__VGPR_MASK 0x0000000FL
++#define SPI_RESOURCE_RESERVE_CU_12__SGPR_MASK 0x000000F0L
++#define SPI_RESOURCE_RESERVE_CU_12__LDS_MASK 0x00000F00L
++#define SPI_RESOURCE_RESERVE_CU_12__WAVES_MASK 0x00007000L
++#define SPI_RESOURCE_RESERVE_CU_12__BARRIERS_MASK 0x00078000L
++//SPI_RESOURCE_RESERVE_CU_13
++#define SPI_RESOURCE_RESERVE_CU_13__VGPR__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_CU_13__SGPR__SHIFT 0x4
++#define SPI_RESOURCE_RESERVE_CU_13__LDS__SHIFT 0x8
++#define SPI_RESOURCE_RESERVE_CU_13__WAVES__SHIFT 0xc
++#define SPI_RESOURCE_RESERVE_CU_13__BARRIERS__SHIFT 0xf
++#define SPI_RESOURCE_RESERVE_CU_13__VGPR_MASK 0x0000000FL
++#define SPI_RESOURCE_RESERVE_CU_13__SGPR_MASK 0x000000F0L
++#define SPI_RESOURCE_RESERVE_CU_13__LDS_MASK 0x00000F00L
++#define SPI_RESOURCE_RESERVE_CU_13__WAVES_MASK 0x00007000L
++#define SPI_RESOURCE_RESERVE_CU_13__BARRIERS_MASK 0x00078000L
++//SPI_RESOURCE_RESERVE_CU_14
++#define SPI_RESOURCE_RESERVE_CU_14__VGPR__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_CU_14__SGPR__SHIFT 0x4
++#define SPI_RESOURCE_RESERVE_CU_14__LDS__SHIFT 0x8
++#define SPI_RESOURCE_RESERVE_CU_14__WAVES__SHIFT 0xc
++#define SPI_RESOURCE_RESERVE_CU_14__BARRIERS__SHIFT 0xf
++#define SPI_RESOURCE_RESERVE_CU_14__VGPR_MASK 0x0000000FL
++#define SPI_RESOURCE_RESERVE_CU_14__SGPR_MASK 0x000000F0L
++#define SPI_RESOURCE_RESERVE_CU_14__LDS_MASK 0x00000F00L
++#define SPI_RESOURCE_RESERVE_CU_14__WAVES_MASK 0x00007000L
++#define SPI_RESOURCE_RESERVE_CU_14__BARRIERS_MASK 0x00078000L
++//SPI_RESOURCE_RESERVE_CU_15
++#define SPI_RESOURCE_RESERVE_CU_15__VGPR__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_CU_15__SGPR__SHIFT 0x4
++#define SPI_RESOURCE_RESERVE_CU_15__LDS__SHIFT 0x8
++#define SPI_RESOURCE_RESERVE_CU_15__WAVES__SHIFT 0xc
++#define SPI_RESOURCE_RESERVE_CU_15__BARRIERS__SHIFT 0xf
++#define SPI_RESOURCE_RESERVE_CU_15__VGPR_MASK 0x0000000FL
++#define SPI_RESOURCE_RESERVE_CU_15__SGPR_MASK 0x000000F0L
++#define SPI_RESOURCE_RESERVE_CU_15__LDS_MASK 0x00000F00L
++#define SPI_RESOURCE_RESERVE_CU_15__WAVES_MASK 0x00007000L
++#define SPI_RESOURCE_RESERVE_CU_15__BARRIERS_MASK 0x00078000L
++//SPI_RESOURCE_RESERVE_EN_CU_12
++#define SPI_RESOURCE_RESERVE_EN_CU_12__EN__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_EN_CU_12__TYPE_MASK__SHIFT 0x1
++#define SPI_RESOURCE_RESERVE_EN_CU_12__QUEUE_MASK__SHIFT 0x10
++#define SPI_RESOURCE_RESERVE_EN_CU_12__RESERVE_SPACE_ONLY__SHIFT 0x18
++#define SPI_RESOURCE_RESERVE_EN_CU_12__EN_MASK 0x00000001L
++#define SPI_RESOURCE_RESERVE_EN_CU_12__TYPE_MASK_MASK 0x0000FFFEL
++#define SPI_RESOURCE_RESERVE_EN_CU_12__QUEUE_MASK_MASK 0x00FF0000L
++#define SPI_RESOURCE_RESERVE_EN_CU_12__RESERVE_SPACE_ONLY_MASK 0x01000000L
++//SPI_RESOURCE_RESERVE_EN_CU_13
++#define SPI_RESOURCE_RESERVE_EN_CU_13__EN__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_EN_CU_13__TYPE_MASK__SHIFT 0x1
++#define SPI_RESOURCE_RESERVE_EN_CU_13__QUEUE_MASK__SHIFT 0x10
++#define SPI_RESOURCE_RESERVE_EN_CU_13__RESERVE_SPACE_ONLY__SHIFT 0x18
++#define SPI_RESOURCE_RESERVE_EN_CU_13__EN_MASK 0x00000001L
++#define SPI_RESOURCE_RESERVE_EN_CU_13__TYPE_MASK_MASK 0x0000FFFEL
++#define SPI_RESOURCE_RESERVE_EN_CU_13__QUEUE_MASK_MASK 0x00FF0000L
++#define SPI_RESOURCE_RESERVE_EN_CU_13__RESERVE_SPACE_ONLY_MASK 0x01000000L
++//SPI_RESOURCE_RESERVE_EN_CU_14
++#define SPI_RESOURCE_RESERVE_EN_CU_14__EN__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_EN_CU_14__TYPE_MASK__SHIFT 0x1
++#define SPI_RESOURCE_RESERVE_EN_CU_14__QUEUE_MASK__SHIFT 0x10
++#define SPI_RESOURCE_RESERVE_EN_CU_14__RESERVE_SPACE_ONLY__SHIFT 0x18
++#define SPI_RESOURCE_RESERVE_EN_CU_14__EN_MASK 0x00000001L
++#define SPI_RESOURCE_RESERVE_EN_CU_14__TYPE_MASK_MASK 0x0000FFFEL
++#define SPI_RESOURCE_RESERVE_EN_CU_14__QUEUE_MASK_MASK 0x00FF0000L
++#define SPI_RESOURCE_RESERVE_EN_CU_14__RESERVE_SPACE_ONLY_MASK 0x01000000L
++//SPI_RESOURCE_RESERVE_EN_CU_15
++#define SPI_RESOURCE_RESERVE_EN_CU_15__EN__SHIFT 0x0
++#define SPI_RESOURCE_RESERVE_EN_CU_15__TYPE_MASK__SHIFT 0x1
++#define SPI_RESOURCE_RESERVE_EN_CU_15__QUEUE_MASK__SHIFT 0x10
++#define SPI_RESOURCE_RESERVE_EN_CU_15__RESERVE_SPACE_ONLY__SHIFT 0x18
++#define SPI_RESOURCE_RESERVE_EN_CU_15__EN_MASK 0x00000001L
++#define SPI_RESOURCE_RESERVE_EN_CU_15__TYPE_MASK_MASK 0x0000FFFEL
++#define SPI_RESOURCE_RESERVE_EN_CU_15__QUEUE_MASK_MASK 0x00FF0000L
++#define SPI_RESOURCE_RESERVE_EN_CU_15__RESERVE_SPACE_ONLY_MASK 0x01000000L
++//SPI_COMPUTE_WF_CTX_SAVE
++#define SPI_COMPUTE_WF_CTX_SAVE__INITIATE__SHIFT 0x0
++#define SPI_COMPUTE_WF_CTX_SAVE__GDS_INTERRUPT_EN__SHIFT 0x1
++#define SPI_COMPUTE_WF_CTX_SAVE__DONE_INTERRUPT_EN__SHIFT 0x2
++#define SPI_COMPUTE_WF_CTX_SAVE__GDS_REQ_BUSY__SHIFT 0x1e
++#define SPI_COMPUTE_WF_CTX_SAVE__SAVE_BUSY__SHIFT 0x1f
++#define SPI_COMPUTE_WF_CTX_SAVE__INITIATE_MASK 0x00000001L
++#define SPI_COMPUTE_WF_CTX_SAVE__GDS_INTERRUPT_EN_MASK 0x00000002L
++#define SPI_COMPUTE_WF_CTX_SAVE__DONE_INTERRUPT_EN_MASK 0x00000004L
++#define SPI_COMPUTE_WF_CTX_SAVE__GDS_REQ_BUSY_MASK 0x40000000L
++#define SPI_COMPUTE_WF_CTX_SAVE__SAVE_BUSY_MASK 0x80000000L
++//SPI_ARB_CNTL_0
++#define SPI_ARB_CNTL_0__EXP_ARB_COL_WT__SHIFT 0x0
++#define SPI_ARB_CNTL_0__EXP_ARB_POS_WT__SHIFT 0x4
++#define SPI_ARB_CNTL_0__EXP_ARB_GDS_WT__SHIFT 0x8
++#define SPI_ARB_CNTL_0__EXP_ARB_COL_WT_MASK 0x0000000FL
++#define SPI_ARB_CNTL_0__EXP_ARB_POS_WT_MASK 0x000000F0L
++#define SPI_ARB_CNTL_0__EXP_ARB_GDS_WT_MASK 0x00000F00L
++
++
++// addressBlock: gc_cpphqddec
++//CP_HQD_GFX_CONTROL
++#define CP_HQD_GFX_CONTROL__MESSAGE__SHIFT 0x0
++#define CP_HQD_GFX_CONTROL__MISC__SHIFT 0x4
++#define CP_HQD_GFX_CONTROL__DB_UPDATED_MSG_EN__SHIFT 0xf
++#define CP_HQD_GFX_CONTROL__MESSAGE_MASK 0x0000000FL
++#define CP_HQD_GFX_CONTROL__MISC_MASK 0x00007FF0L
++#define CP_HQD_GFX_CONTROL__DB_UPDATED_MSG_EN_MASK 0x00008000L
++//CP_HQD_GFX_STATUS
++#define CP_HQD_GFX_STATUS__STATUS__SHIFT 0x0
++#define CP_HQD_GFX_STATUS__STATUS_MASK 0x0000FFFFL
++//CP_HPD_ROQ_OFFSETS
++#define CP_HPD_ROQ_OFFSETS__IQ_OFFSET__SHIFT 0x0
++#define CP_HPD_ROQ_OFFSETS__PQ_OFFSET__SHIFT 0x8
++#define CP_HPD_ROQ_OFFSETS__IB_OFFSET__SHIFT 0x10
++#define CP_HPD_ROQ_OFFSETS__IQ_OFFSET_MASK 0x00000007L
++#define CP_HPD_ROQ_OFFSETS__PQ_OFFSET_MASK 0x00003F00L
++#define CP_HPD_ROQ_OFFSETS__IB_OFFSET_MASK 0x003F0000L
++//CP_HPD_STATUS0
++#define CP_HPD_STATUS0__QUEUE_STATE__SHIFT 0x0
++#define CP_HPD_STATUS0__MAPPED_QUEUE__SHIFT 0x5
++#define CP_HPD_STATUS0__QUEUE_AVAILABLE__SHIFT 0x8
++#define CP_HPD_STATUS0__FETCHING_MQD__SHIFT 0x10
++#define CP_HPD_STATUS0__PEND_TXFER_SIZE_PQIB__SHIFT 0x11
++#define CP_HPD_STATUS0__PEND_TXFER_SIZE_IQ__SHIFT 0x12
++#define CP_HPD_STATUS0__FORCE_QUEUE_STATE__SHIFT 0x14
++#define CP_HPD_STATUS0__FORCE_QUEUE__SHIFT 0x1f
++#define CP_HPD_STATUS0__QUEUE_STATE_MASK 0x0000001FL
++#define CP_HPD_STATUS0__MAPPED_QUEUE_MASK 0x000000E0L
++#define CP_HPD_STATUS0__QUEUE_AVAILABLE_MASK 0x0000FF00L
++#define CP_HPD_STATUS0__FETCHING_MQD_MASK 0x00010000L
++#define CP_HPD_STATUS0__PEND_TXFER_SIZE_PQIB_MASK 0x00020000L
++#define CP_HPD_STATUS0__PEND_TXFER_SIZE_IQ_MASK 0x00040000L
++#define CP_HPD_STATUS0__FORCE_QUEUE_STATE_MASK 0x01F00000L
++#define CP_HPD_STATUS0__FORCE_QUEUE_MASK 0x80000000L
++//CP_HPD_UTCL1_CNTL
++#define CP_HPD_UTCL1_CNTL__SELECT__SHIFT 0x0
++#define CP_HPD_UTCL1_CNTL__SELECT_MASK 0x0000000FL
++//CP_HPD_UTCL1_ERROR
++#define CP_HPD_UTCL1_ERROR__ADDR_HI__SHIFT 0x0
++#define CP_HPD_UTCL1_ERROR__TYPE__SHIFT 0x10
++#define CP_HPD_UTCL1_ERROR__VMID__SHIFT 0x14
++#define CP_HPD_UTCL1_ERROR__ADDR_HI_MASK 0x0000FFFFL
++#define CP_HPD_UTCL1_ERROR__TYPE_MASK 0x00010000L
++#define CP_HPD_UTCL1_ERROR__VMID_MASK 0x00F00000L
++//CP_HPD_UTCL1_ERROR_ADDR
++#define CP_HPD_UTCL1_ERROR_ADDR__ADDR__SHIFT 0xc
++#define CP_HPD_UTCL1_ERROR_ADDR__ADDR_MASK 0xFFFFF000L
++//CP_MQD_BASE_ADDR
++#define CP_MQD_BASE_ADDR__BASE_ADDR__SHIFT 0x2
++#define CP_MQD_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFCL
++//CP_MQD_BASE_ADDR_HI
++#define CP_MQD_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
++#define CP_MQD_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x0000FFFFL
++//CP_HQD_ACTIVE
++#define CP_HQD_ACTIVE__ACTIVE__SHIFT 0x0
++#define CP_HQD_ACTIVE__BUSY_GATE__SHIFT 0x1
++#define CP_HQD_ACTIVE__ACTIVE_MASK 0x00000001L
++#define CP_HQD_ACTIVE__BUSY_GATE_MASK 0x00000002L
++//CP_HQD_VMID
++#define CP_HQD_VMID__VMID__SHIFT 0x0
++#define CP_HQD_VMID__IB_VMID__SHIFT 0x8
++#define CP_HQD_VMID__VQID__SHIFT 0x10
++#define CP_HQD_VMID__VMID_MASK 0x0000000FL
++#define CP_HQD_VMID__IB_VMID_MASK 0x00000F00L
++#define CP_HQD_VMID__VQID_MASK 0x03FF0000L
++//CP_HQD_PERSISTENT_STATE
++#define CP_HQD_PERSISTENT_STATE__PRELOAD_REQ__SHIFT 0x0
++#define CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT 0x8
++#define CP_HQD_PERSISTENT_STATE__WPP_SWITCH_QOS_EN__SHIFT 0x15
++#define CP_HQD_PERSISTENT_STATE__IQ_SWITCH_QOS_EN__SHIFT 0x16
++#define CP_HQD_PERSISTENT_STATE__IB_SWITCH_QOS_EN__SHIFT 0x17
++#define CP_HQD_PERSISTENT_STATE__EOP_SWITCH_QOS_EN__SHIFT 0x18
++#define CP_HQD_PERSISTENT_STATE__PQ_SWITCH_QOS_EN__SHIFT 0x19
++#define CP_HQD_PERSISTENT_STATE__TC_OFFLOAD_QOS_EN__SHIFT 0x1a
++#define CP_HQD_PERSISTENT_STATE__CACHE_FULL_PACKET_EN__SHIFT 0x1b
++#define CP_HQD_PERSISTENT_STATE__RESTORE_ACTIVE__SHIFT 0x1c
++#define CP_HQD_PERSISTENT_STATE__RELAUNCH_WAVES__SHIFT 0x1d
++#define CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT 0x1e
++#define CP_HQD_PERSISTENT_STATE__DISP_ACTIVE__SHIFT 0x1f
++#define CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK 0x00000001L
++#define CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE_MASK 0x0003FF00L
++#define CP_HQD_PERSISTENT_STATE__WPP_SWITCH_QOS_EN_MASK 0x00200000L
++#define CP_HQD_PERSISTENT_STATE__IQ_SWITCH_QOS_EN_MASK 0x00400000L
++#define CP_HQD_PERSISTENT_STATE__IB_SWITCH_QOS_EN_MASK 0x00800000L
++#define CP_HQD_PERSISTENT_STATE__EOP_SWITCH_QOS_EN_MASK 0x01000000L
++#define CP_HQD_PERSISTENT_STATE__PQ_SWITCH_QOS_EN_MASK 0x02000000L
++#define CP_HQD_PERSISTENT_STATE__TC_OFFLOAD_QOS_EN_MASK 0x04000000L
++#define CP_HQD_PERSISTENT_STATE__CACHE_FULL_PACKET_EN_MASK 0x08000000L
++#define CP_HQD_PERSISTENT_STATE__RESTORE_ACTIVE_MASK 0x10000000L
++#define CP_HQD_PERSISTENT_STATE__RELAUNCH_WAVES_MASK 0x20000000L
++#define CP_HQD_PERSISTENT_STATE__QSWITCH_MODE_MASK 0x40000000L
++#define CP_HQD_PERSISTENT_STATE__DISP_ACTIVE_MASK 0x80000000L
++//CP_HQD_PIPE_PRIORITY
++#define CP_HQD_PIPE_PRIORITY__PIPE_PRIORITY__SHIFT 0x0
++#define CP_HQD_PIPE_PRIORITY__PIPE_PRIORITY_MASK 0x00000003L
++//CP_HQD_QUEUE_PRIORITY
++#define CP_HQD_QUEUE_PRIORITY__PRIORITY_LEVEL__SHIFT 0x0
++#define CP_HQD_QUEUE_PRIORITY__PRIORITY_LEVEL_MASK 0x0000000FL
++//CP_HQD_QUANTUM
++#define CP_HQD_QUANTUM__QUANTUM_EN__SHIFT 0x0
++#define CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT 0x4
++#define CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT 0x8
++#define CP_HQD_QUANTUM__QUANTUM_ACTIVE__SHIFT 0x1f
++#define CP_HQD_QUANTUM__QUANTUM_EN_MASK 0x00000001L
++#define CP_HQD_QUANTUM__QUANTUM_SCALE_MASK 0x00000010L
++#define CP_HQD_QUANTUM__QUANTUM_DURATION_MASK 0x00003F00L
++#define CP_HQD_QUANTUM__QUANTUM_ACTIVE_MASK 0x80000000L
++//CP_HQD_PQ_BASE
++#define CP_HQD_PQ_BASE__ADDR__SHIFT 0x0
++#define CP_HQD_PQ_BASE__ADDR_MASK 0xFFFFFFFFL
++//CP_HQD_PQ_BASE_HI
++#define CP_HQD_PQ_BASE_HI__ADDR_HI__SHIFT 0x0
++#define CP_HQD_PQ_BASE_HI__ADDR_HI_MASK 0x000000FFL
++//CP_HQD_PQ_RPTR
++#define CP_HQD_PQ_RPTR__CONSUMED_OFFSET__SHIFT 0x0
++#define CP_HQD_PQ_RPTR__CONSUMED_OFFSET_MASK 0xFFFFFFFFL
++//CP_HQD_PQ_RPTR_REPORT_ADDR
++#define CP_HQD_PQ_RPTR_REPORT_ADDR__RPTR_REPORT_ADDR__SHIFT 0x2
++#define CP_HQD_PQ_RPTR_REPORT_ADDR__RPTR_REPORT_ADDR_MASK 0xFFFFFFFCL
++//CP_HQD_PQ_RPTR_REPORT_ADDR_HI
++#define CP_HQD_PQ_RPTR_REPORT_ADDR_HI__RPTR_REPORT_ADDR_HI__SHIFT 0x0
++#define CP_HQD_PQ_RPTR_REPORT_ADDR_HI__RPTR_REPORT_ADDR_HI_MASK 0x0000FFFFL
++//CP_HQD_PQ_WPTR_POLL_ADDR
++#define CP_HQD_PQ_WPTR_POLL_ADDR__WPTR_ADDR__SHIFT 0x3
++#define CP_HQD_PQ_WPTR_POLL_ADDR__WPTR_ADDR_MASK 0xFFFFFFF8L
++//CP_HQD_PQ_WPTR_POLL_ADDR_HI
++#define CP_HQD_PQ_WPTR_POLL_ADDR_HI__WPTR_ADDR_HI__SHIFT 0x0
++#define CP_HQD_PQ_WPTR_POLL_ADDR_HI__WPTR_ADDR_HI_MASK 0x0000FFFFL
++//CP_HQD_PQ_DOORBELL_CONTROL
++#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT 0x0
++#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT 0x1
++#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT 0x2
++#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE__SHIFT 0x1c
++#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SCHD_HIT__SHIFT 0x1d
++#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN__SHIFT 0x1e
++#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT__SHIFT 0x1f
++#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE_MASK 0x00000001L
++#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP_MASK 0x00000002L
++#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
++#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK 0x10000000L
++#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SCHD_HIT_MASK 0x20000000L
++#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK 0x40000000L
++#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK 0x80000000L
++//CP_HQD_PQ_CONTROL
++#define CP_HQD_PQ_CONTROL__QUEUE_SIZE__SHIFT 0x0
++#define CP_HQD_PQ_CONTROL__WPTR_CARRY__SHIFT 0x6
++#define CP_HQD_PQ_CONTROL__RPTR_CARRY__SHIFT 0x7
++#define CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT 0x8
++#define CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT 0xe
++#define CP_HQD_PQ_CONTROL__PQ_EMPTY__SHIFT 0xf
++#define CP_HQD_PQ_CONTROL__WPP_CLAMP_EN__SHIFT 0x10
++#define CP_HQD_PQ_CONTROL__ENDIAN_SWAP__SHIFT 0x11
++#define CP_HQD_PQ_CONTROL__MIN_AVAIL_SIZE__SHIFT 0x14
++#define CP_HQD_PQ_CONTROL__EXE_DISABLE__SHIFT 0x17
++#define CP_HQD_PQ_CONTROL__CACHE_POLICY__SHIFT 0x18
++#define CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT 0x19
++#define CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR__SHIFT 0x1b
++#define CP_HQD_PQ_CONTROL__UNORD_DISPATCH__SHIFT 0x1c
++#define CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP__SHIFT 0x1d
++#define CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT 0x1e
++#define CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT 0x1f
++#define CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK 0x0000003FL
++#define CP_HQD_PQ_CONTROL__WPTR_CARRY_MASK 0x00000040L
++#define CP_HQD_PQ_CONTROL__RPTR_CARRY_MASK 0x00000080L
++#define CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK 0x00003F00L
++#define CP_HQD_PQ_CONTROL__QUEUE_FULL_EN_MASK 0x00004000L
++#define CP_HQD_PQ_CONTROL__PQ_EMPTY_MASK 0x00008000L
++#define CP_HQD_PQ_CONTROL__WPP_CLAMP_EN_MASK 0x00010000L
++#define CP_HQD_PQ_CONTROL__ENDIAN_SWAP_MASK 0x00060000L
++#define CP_HQD_PQ_CONTROL__MIN_AVAIL_SIZE_MASK 0x00300000L
++#define CP_HQD_PQ_CONTROL__EXE_DISABLE_MASK 0x00800000L
++#define CP_HQD_PQ_CONTROL__CACHE_POLICY_MASK 0x01000000L
++#define CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR_MASK 0x06000000L
++#define CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK 0x08000000L
++#define CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK 0x10000000L
++#define CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP_MASK 0x20000000L
++#define CP_HQD_PQ_CONTROL__PRIV_STATE_MASK 0x40000000L
++#define CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK 0x80000000L
++//CP_HQD_IB_BASE_ADDR
++#define CP_HQD_IB_BASE_ADDR__IB_BASE_ADDR__SHIFT 0x2
++#define CP_HQD_IB_BASE_ADDR__IB_BASE_ADDR_MASK 0xFFFFFFFCL
++//CP_HQD_IB_BASE_ADDR_HI
++#define CP_HQD_IB_BASE_ADDR_HI__IB_BASE_ADDR_HI__SHIFT 0x0
++#define CP_HQD_IB_BASE_ADDR_HI__IB_BASE_ADDR_HI_MASK 0x0000FFFFL
++//CP_HQD_IB_RPTR
++#define CP_HQD_IB_RPTR__CONSUMED_OFFSET__SHIFT 0x0
++#define CP_HQD_IB_RPTR__CONSUMED_OFFSET_MASK 0x000FFFFFL
++//CP_HQD_IB_CONTROL
++#define CP_HQD_IB_CONTROL__IB_SIZE__SHIFT 0x0
++#define CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT 0x14
++#define CP_HQD_IB_CONTROL__IB_EXE_DISABLE__SHIFT 0x17
++#define CP_HQD_IB_CONTROL__IB_CACHE_POLICY__SHIFT 0x18
++#define CP_HQD_IB_CONTROL__PROCESSING_IB__SHIFT 0x1f
++#define CP_HQD_IB_CONTROL__IB_SIZE_MASK 0x000FFFFFL
++#define CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE_MASK 0x00300000L
++#define CP_HQD_IB_CONTROL__IB_EXE_DISABLE_MASK 0x00800000L
++#define CP_HQD_IB_CONTROL__IB_CACHE_POLICY_MASK 0x01000000L
++#define CP_HQD_IB_CONTROL__PROCESSING_IB_MASK 0x80000000L
++//CP_HQD_IQ_TIMER
++#define CP_HQD_IQ_TIMER__WAIT_TIME__SHIFT 0x0
++#define CP_HQD_IQ_TIMER__RETRY_TYPE__SHIFT 0x8
++#define CP_HQD_IQ_TIMER__IMMEDIATE_EXPIRE__SHIFT 0xb
++#define CP_HQD_IQ_TIMER__INTERRUPT_TYPE__SHIFT 0xc
++#define CP_HQD_IQ_TIMER__CLOCK_COUNT__SHIFT 0xe
++#define CP_HQD_IQ_TIMER__INTERRUPT_SIZE__SHIFT 0x10
++#define CP_HQD_IQ_TIMER__QUANTUM_TIMER__SHIFT 0x16
++#define CP_HQD_IQ_TIMER__EXE_DISABLE__SHIFT 0x17
++#define CP_HQD_IQ_TIMER__CACHE_POLICY__SHIFT 0x18
++#define CP_HQD_IQ_TIMER__QUEUE_TYPE__SHIFT 0x19
++#define CP_HQD_IQ_TIMER__REARM_TIMER__SHIFT 0x1c
++#define CP_HQD_IQ_TIMER__PROCESS_IQ_EN__SHIFT 0x1d
++#define CP_HQD_IQ_TIMER__PROCESSING_IQ__SHIFT 0x1e
++#define CP_HQD_IQ_TIMER__ACTIVE__SHIFT 0x1f
++#define CP_HQD_IQ_TIMER__WAIT_TIME_MASK 0x000000FFL
++#define CP_HQD_IQ_TIMER__RETRY_TYPE_MASK 0x00000700L
++#define CP_HQD_IQ_TIMER__IMMEDIATE_EXPIRE_MASK 0x00000800L
++#define CP_HQD_IQ_TIMER__INTERRUPT_TYPE_MASK 0x00003000L
++#define CP_HQD_IQ_TIMER__CLOCK_COUNT_MASK 0x0000C000L
++#define CP_HQD_IQ_TIMER__INTERRUPT_SIZE_MASK 0x003F0000L
++#define CP_HQD_IQ_TIMER__QUANTUM_TIMER_MASK 0x00400000L
++#define CP_HQD_IQ_TIMER__EXE_DISABLE_MASK 0x00800000L
++#define CP_HQD_IQ_TIMER__CACHE_POLICY_MASK 0x01000000L
++#define CP_HQD_IQ_TIMER__QUEUE_TYPE_MASK 0x02000000L
++#define CP_HQD_IQ_TIMER__REARM_TIMER_MASK 0x10000000L
++#define CP_HQD_IQ_TIMER__PROCESS_IQ_EN_MASK 0x20000000L
++#define CP_HQD_IQ_TIMER__PROCESSING_IQ_MASK 0x40000000L
++#define CP_HQD_IQ_TIMER__ACTIVE_MASK 0x80000000L
++//CP_HQD_IQ_RPTR
++#define CP_HQD_IQ_RPTR__OFFSET__SHIFT 0x0
++#define CP_HQD_IQ_RPTR__OFFSET_MASK 0x0000003FL
++//CP_HQD_DEQUEUE_REQUEST
++#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ__SHIFT 0x0
++#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND__SHIFT 0x4
++#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_INT__SHIFT 0x8
++#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_EN__SHIFT 0x9
++#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_EN__SHIFT 0xa
++#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_MASK 0x00000007L
++#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK 0x00000010L
++#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_INT_MASK 0x00000100L
++#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_EN_MASK 0x00000200L
++#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_EN_MASK 0x00000400L
++//CP_HQD_DMA_OFFLOAD
++#define CP_HQD_DMA_OFFLOAD__DMA_OFFLOAD__SHIFT 0x0
++#define CP_HQD_DMA_OFFLOAD__DMA_OFFLOAD_MASK 0x00000001L
++//CP_HQD_OFFLOAD
++#define CP_HQD_OFFLOAD__DMA_OFFLOAD__SHIFT 0x0
++#define CP_HQD_OFFLOAD__DMA_OFFLOAD_EN__SHIFT 0x1
++#define CP_HQD_OFFLOAD__AQL_OFFLOAD__SHIFT 0x2
++#define CP_HQD_OFFLOAD__AQL_OFFLOAD_EN__SHIFT 0x3
++#define CP_HQD_OFFLOAD__EOP_OFFLOAD__SHIFT 0x4
++#define CP_HQD_OFFLOAD__EOP_OFFLOAD_EN__SHIFT 0x5
++#define CP_HQD_OFFLOAD__DMA_OFFLOAD_MASK 0x00000001L
++#define CP_HQD_OFFLOAD__DMA_OFFLOAD_EN_MASK 0x00000002L
++#define CP_HQD_OFFLOAD__AQL_OFFLOAD_MASK 0x00000004L
++#define CP_HQD_OFFLOAD__AQL_OFFLOAD_EN_MASK 0x00000008L
++#define CP_HQD_OFFLOAD__EOP_OFFLOAD_MASK 0x00000010L
++#define CP_HQD_OFFLOAD__EOP_OFFLOAD_EN_MASK 0x00000020L
++//CP_HQD_SEMA_CMD
++#define CP_HQD_SEMA_CMD__RETRY__SHIFT 0x0
++#define CP_HQD_SEMA_CMD__RESULT__SHIFT 0x1
++#define CP_HQD_SEMA_CMD__RETRY_MASK 0x00000001L
++#define CP_HQD_SEMA_CMD__RESULT_MASK 0x00000006L
++//CP_HQD_MSG_TYPE
++#define CP_HQD_MSG_TYPE__ACTION__SHIFT 0x0
++#define CP_HQD_MSG_TYPE__SAVE_STATE__SHIFT 0x4
++#define CP_HQD_MSG_TYPE__ACTION_MASK 0x00000007L
++#define CP_HQD_MSG_TYPE__SAVE_STATE_MASK 0x00000070L
++//CP_HQD_ATOMIC0_PREOP_LO
++#define CP_HQD_ATOMIC0_PREOP_LO__ATOMIC0_PREOP_LO__SHIFT 0x0
++#define CP_HQD_ATOMIC0_PREOP_LO__ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
++//CP_HQD_ATOMIC0_PREOP_HI
++#define CP_HQD_ATOMIC0_PREOP_HI__ATOMIC0_PREOP_HI__SHIFT 0x0
++#define CP_HQD_ATOMIC0_PREOP_HI__ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
++//CP_HQD_ATOMIC1_PREOP_LO
++#define CP_HQD_ATOMIC1_PREOP_LO__ATOMIC1_PREOP_LO__SHIFT 0x0
++#define CP_HQD_ATOMIC1_PREOP_LO__ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
++//CP_HQD_ATOMIC1_PREOP_HI
++#define CP_HQD_ATOMIC1_PREOP_HI__ATOMIC1_PREOP_HI__SHIFT 0x0
++#define CP_HQD_ATOMIC1_PREOP_HI__ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
++//CP_HQD_HQ_SCHEDULER0
++#define CP_HQD_HQ_SCHEDULER0__SCHEDULER__SHIFT 0x0
++#define CP_HQD_HQ_SCHEDULER0__SCHEDULER_MASK 0xFFFFFFFFL
++//CP_HQD_HQ_STATUS0
++#define CP_HQD_HQ_STATUS0__DEQUEUE_STATUS__SHIFT 0x0
++#define CP_HQD_HQ_STATUS0__DEQUEUE_RETRY_CNT__SHIFT 0x2
++#define CP_HQD_HQ_STATUS0__RSV_6_4__SHIFT 0x4
++#define CP_HQD_HQ_STATUS0__SCRATCH_RAM_INIT__SHIFT 0x7
++#define CP_HQD_HQ_STATUS0__TCL2_DIRTY__SHIFT 0x8
++#define CP_HQD_HQ_STATUS0__PG_ACTIVATED__SHIFT 0x9
++#define CP_HQD_HQ_STATUS0__RSVR_29_10__SHIFT 0xa
++#define CP_HQD_HQ_STATUS0__QUEUE_IDLE__SHIFT 0x1e
++#define CP_HQD_HQ_STATUS0__DB_UPDATED_MSG_EN__SHIFT 0x1f
++#define CP_HQD_HQ_STATUS0__DEQUEUE_STATUS_MASK 0x00000003L
++#define CP_HQD_HQ_STATUS0__DEQUEUE_RETRY_CNT_MASK 0x0000000CL
++#define CP_HQD_HQ_STATUS0__RSV_6_4_MASK 0x00000070L
++#define CP_HQD_HQ_STATUS0__SCRATCH_RAM_INIT_MASK 0x00000080L
++#define CP_HQD_HQ_STATUS0__TCL2_DIRTY_MASK 0x00000100L
++#define CP_HQD_HQ_STATUS0__PG_ACTIVATED_MASK 0x00000200L
++#define CP_HQD_HQ_STATUS0__RSVR_29_10_MASK 0x3FFFFC00L
++#define CP_HQD_HQ_STATUS0__QUEUE_IDLE_MASK 0x40000000L
++#define CP_HQD_HQ_STATUS0__DB_UPDATED_MSG_EN_MASK 0x80000000L
++//CP_HQD_HQ_CONTROL0
++#define CP_HQD_HQ_CONTROL0__CONTROL__SHIFT 0x0
++#define CP_HQD_HQ_CONTROL0__CONTROL_MASK 0xFFFFFFFFL
++//CP_HQD_HQ_SCHEDULER1
++#define CP_HQD_HQ_SCHEDULER1__SCHEDULER__SHIFT 0x0
++#define CP_HQD_HQ_SCHEDULER1__SCHEDULER_MASK 0xFFFFFFFFL
++//CP_MQD_CONTROL
++#define CP_MQD_CONTROL__VMID__SHIFT 0x0
++#define CP_MQD_CONTROL__PRIV_STATE__SHIFT 0x8
++#define CP_MQD_CONTROL__PROCESSING_MQD__SHIFT 0xc
++#define CP_MQD_CONTROL__PROCESSING_MQD_EN__SHIFT 0xd
++#define CP_MQD_CONTROL__EXE_DISABLE__SHIFT 0x17
++#define CP_MQD_CONTROL__CACHE_POLICY__SHIFT 0x18
++#define CP_MQD_CONTROL__VMID_MASK 0x0000000FL
++#define CP_MQD_CONTROL__PRIV_STATE_MASK 0x00000100L
++#define CP_MQD_CONTROL__PROCESSING_MQD_MASK 0x00001000L
++#define CP_MQD_CONTROL__PROCESSING_MQD_EN_MASK 0x00002000L
++#define CP_MQD_CONTROL__EXE_DISABLE_MASK 0x00800000L
++#define CP_MQD_CONTROL__CACHE_POLICY_MASK 0x01000000L
++//CP_HQD_HQ_STATUS1
++#define CP_HQD_HQ_STATUS1__STATUS__SHIFT 0x0
++#define CP_HQD_HQ_STATUS1__STATUS_MASK 0xFFFFFFFFL
++//CP_HQD_HQ_CONTROL1
++#define CP_HQD_HQ_CONTROL1__CONTROL__SHIFT 0x0
++#define CP_HQD_HQ_CONTROL1__CONTROL_MASK 0xFFFFFFFFL
++//CP_HQD_EOP_BASE_ADDR
++#define CP_HQD_EOP_BASE_ADDR__BASE_ADDR__SHIFT 0x0
++#define CP_HQD_EOP_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
++//CP_HQD_EOP_BASE_ADDR_HI
++#define CP_HQD_EOP_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
++#define CP_HQD_EOP_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x000000FFL
++//CP_HQD_EOP_CONTROL
++#define CP_HQD_EOP_CONTROL__EOP_SIZE__SHIFT 0x0
++#define CP_HQD_EOP_CONTROL__PROCESSING_EOP__SHIFT 0x8
++#define CP_HQD_EOP_CONTROL__PROCESS_EOP_EN__SHIFT 0xc
++#define CP_HQD_EOP_CONTROL__PROCESSING_EOPIB__SHIFT 0xd
++#define CP_HQD_EOP_CONTROL__PROCESS_EOPIB_EN__SHIFT 0xe
++#define CP_HQD_EOP_CONTROL__HALT_FETCHER__SHIFT 0x15
++#define CP_HQD_EOP_CONTROL__HALT_FETCHER_EN__SHIFT 0x16
++#define CP_HQD_EOP_CONTROL__EXE_DISABLE__SHIFT 0x17
++#define CP_HQD_EOP_CONTROL__CACHE_POLICY__SHIFT 0x18
++#define CP_HQD_EOP_CONTROL__SIG_SEM_RESULT__SHIFT 0x1d
++#define CP_HQD_EOP_CONTROL__PEND_SIG_SEM__SHIFT 0x1f
++#define CP_HQD_EOP_CONTROL__EOP_SIZE_MASK 0x0000003FL
++#define CP_HQD_EOP_CONTROL__PROCESSING_EOP_MASK 0x00000100L
++#define CP_HQD_EOP_CONTROL__PROCESS_EOP_EN_MASK 0x00001000L
++#define CP_HQD_EOP_CONTROL__PROCESSING_EOPIB_MASK 0x00002000L
++#define CP_HQD_EOP_CONTROL__PROCESS_EOPIB_EN_MASK 0x00004000L
++#define CP_HQD_EOP_CONTROL__HALT_FETCHER_MASK 0x00200000L
++#define CP_HQD_EOP_CONTROL__HALT_FETCHER_EN_MASK 0x00400000L
++#define CP_HQD_EOP_CONTROL__EXE_DISABLE_MASK 0x00800000L
++#define CP_HQD_EOP_CONTROL__CACHE_POLICY_MASK 0x01000000L
++#define CP_HQD_EOP_CONTROL__SIG_SEM_RESULT_MASK 0x60000000L
++#define CP_HQD_EOP_CONTROL__PEND_SIG_SEM_MASK 0x80000000L
++//CP_HQD_EOP_RPTR
++#define CP_HQD_EOP_RPTR__RPTR__SHIFT 0x0
++#define CP_HQD_EOP_RPTR__RESET_FETCHER__SHIFT 0x1c
++#define CP_HQD_EOP_RPTR__DEQUEUE_PEND__SHIFT 0x1d
++#define CP_HQD_EOP_RPTR__RPTR_EQ_CSMD_WPTR__SHIFT 0x1e
++#define CP_HQD_EOP_RPTR__INIT_FETCHER__SHIFT 0x1f
++#define CP_HQD_EOP_RPTR__RPTR_MASK 0x00001FFFL
++#define CP_HQD_EOP_RPTR__RESET_FETCHER_MASK 0x10000000L
++#define CP_HQD_EOP_RPTR__DEQUEUE_PEND_MASK 0x20000000L
++#define CP_HQD_EOP_RPTR__RPTR_EQ_CSMD_WPTR_MASK 0x40000000L
++#define CP_HQD_EOP_RPTR__INIT_FETCHER_MASK 0x80000000L
++//CP_HQD_EOP_WPTR
++#define CP_HQD_EOP_WPTR__WPTR__SHIFT 0x0
++#define CP_HQD_EOP_WPTR__EOP_EMPTY__SHIFT 0xf
++#define CP_HQD_EOP_WPTR__EOP_AVAIL__SHIFT 0x10
++#define CP_HQD_EOP_WPTR__WPTR_MASK 0x00001FFFL
++#define CP_HQD_EOP_WPTR__EOP_EMPTY_MASK 0x00008000L
++#define CP_HQD_EOP_WPTR__EOP_AVAIL_MASK 0x1FFF0000L
++//CP_HQD_EOP_EVENTS
++#define CP_HQD_EOP_EVENTS__EVENT_COUNT__SHIFT 0x0
++#define CP_HQD_EOP_EVENTS__CS_PARTIAL_FLUSH_PEND__SHIFT 0x10
++#define CP_HQD_EOP_EVENTS__EVENT_COUNT_MASK 0x00000FFFL
++#define CP_HQD_EOP_EVENTS__CS_PARTIAL_FLUSH_PEND_MASK 0x00010000L
++//CP_HQD_CTX_SAVE_BASE_ADDR_LO
++#define CP_HQD_CTX_SAVE_BASE_ADDR_LO__ADDR__SHIFT 0xc
++#define CP_HQD_CTX_SAVE_BASE_ADDR_LO__ADDR_MASK 0xFFFFF000L
++//CP_HQD_CTX_SAVE_BASE_ADDR_HI
++#define CP_HQD_CTX_SAVE_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
++#define CP_HQD_CTX_SAVE_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
++//CP_HQD_CTX_SAVE_CONTROL
++#define CP_HQD_CTX_SAVE_CONTROL__POLICY__SHIFT 0x3
++#define CP_HQD_CTX_SAVE_CONTROL__EXE_DISABLE__SHIFT 0x17
++#define CP_HQD_CTX_SAVE_CONTROL__POLICY_MASK 0x00000008L
++#define CP_HQD_CTX_SAVE_CONTROL__EXE_DISABLE_MASK 0x00800000L
++//CP_HQD_CNTL_STACK_OFFSET
++#define CP_HQD_CNTL_STACK_OFFSET__OFFSET__SHIFT 0x2
++#define CP_HQD_CNTL_STACK_OFFSET__OFFSET_MASK 0x00007FFCL
++//CP_HQD_CNTL_STACK_SIZE
++#define CP_HQD_CNTL_STACK_SIZE__SIZE__SHIFT 0xc
++#define CP_HQD_CNTL_STACK_SIZE__SIZE_MASK 0x00007000L
++//CP_HQD_WG_STATE_OFFSET
++#define CP_HQD_WG_STATE_OFFSET__OFFSET__SHIFT 0x2
++#define CP_HQD_WG_STATE_OFFSET__OFFSET_MASK 0x01FFFFFCL
++//CP_HQD_CTX_SAVE_SIZE
++#define CP_HQD_CTX_SAVE_SIZE__SIZE__SHIFT 0xc
++#define CP_HQD_CTX_SAVE_SIZE__SIZE_MASK 0x01FFF000L
++//CP_HQD_GDS_RESOURCE_STATE
++#define CP_HQD_GDS_RESOURCE_STATE__OA_REQUIRED__SHIFT 0x0
++#define CP_HQD_GDS_RESOURCE_STATE__OA_ACQUIRED__SHIFT 0x1
++#define CP_HQD_GDS_RESOURCE_STATE__GWS_SIZE__SHIFT 0x4
++#define CP_HQD_GDS_RESOURCE_STATE__GWS_PNTR__SHIFT 0xc
++#define CP_HQD_GDS_RESOURCE_STATE__OA_REQUIRED_MASK 0x00000001L
++#define CP_HQD_GDS_RESOURCE_STATE__OA_ACQUIRED_MASK 0x00000002L
++#define CP_HQD_GDS_RESOURCE_STATE__GWS_SIZE_MASK 0x000003F0L
++#define CP_HQD_GDS_RESOURCE_STATE__GWS_PNTR_MASK 0x0003F000L
++//CP_HQD_ERROR
++#define CP_HQD_ERROR__EDC_ERROR_ID__SHIFT 0x0
++#define CP_HQD_ERROR__SUA_ERROR__SHIFT 0x4
++#define CP_HQD_ERROR__AQL_ERROR__SHIFT 0x5
++#define CP_HQD_ERROR__PQ_UTCL1_ERROR__SHIFT 0x8
++#define CP_HQD_ERROR__IB_UTCL1_ERROR__SHIFT 0x9
++#define CP_HQD_ERROR__EOP_UTCL1_ERROR__SHIFT 0xa
++#define CP_HQD_ERROR__IQ_UTCL1_ERROR__SHIFT 0xb
++#define CP_HQD_ERROR__RRPT_UTCL1_ERROR__SHIFT 0xc
++#define CP_HQD_ERROR__WPP_UTCL1_ERROR__SHIFT 0xd
++#define CP_HQD_ERROR__SEM_UTCL1_ERROR__SHIFT 0xe
++#define CP_HQD_ERROR__DMA_SRC_UTCL1_ERROR__SHIFT 0xf
++#define CP_HQD_ERROR__DMA_DST_UTCL1_ERROR__SHIFT 0x10
++#define CP_HQD_ERROR__SR_UTCL1_ERROR__SHIFT 0x11
++#define CP_HQD_ERROR__QU_UTCL1_ERROR__SHIFT 0x12
++#define CP_HQD_ERROR__TC_UTCL1_ERROR__SHIFT 0x13
++#define CP_HQD_ERROR__EDC_ERROR_ID_MASK 0x0000000FL
++#define CP_HQD_ERROR__SUA_ERROR_MASK 0x00000010L
++#define CP_HQD_ERROR__AQL_ERROR_MASK 0x00000020L
++#define CP_HQD_ERROR__PQ_UTCL1_ERROR_MASK 0x00000100L
++#define CP_HQD_ERROR__IB_UTCL1_ERROR_MASK 0x00000200L
++#define CP_HQD_ERROR__EOP_UTCL1_ERROR_MASK 0x00000400L
++#define CP_HQD_ERROR__IQ_UTCL1_ERROR_MASK 0x00000800L
++#define CP_HQD_ERROR__RRPT_UTCL1_ERROR_MASK 0x00001000L
++#define CP_HQD_ERROR__WPP_UTCL1_ERROR_MASK 0x00002000L
++#define CP_HQD_ERROR__SEM_UTCL1_ERROR_MASK 0x00004000L
++#define CP_HQD_ERROR__DMA_SRC_UTCL1_ERROR_MASK 0x00008000L
++#define CP_HQD_ERROR__DMA_DST_UTCL1_ERROR_MASK 0x00010000L
++#define CP_HQD_ERROR__SR_UTCL1_ERROR_MASK 0x00020000L
++#define CP_HQD_ERROR__QU_UTCL1_ERROR_MASK 0x00040000L
++#define CP_HQD_ERROR__TC_UTCL1_ERROR_MASK 0x00080000L
++//CP_HQD_EOP_WPTR_MEM
++#define CP_HQD_EOP_WPTR_MEM__WPTR__SHIFT 0x0
++#define CP_HQD_EOP_WPTR_MEM__WPTR_MASK 0x00001FFFL
++//CP_HQD_AQL_CONTROL
++#define CP_HQD_AQL_CONTROL__CONTROL0__SHIFT 0x0
++#define CP_HQD_AQL_CONTROL__CONTROL0_EN__SHIFT 0xf
++#define CP_HQD_AQL_CONTROL__CONTROL1__SHIFT 0x10
++#define CP_HQD_AQL_CONTROL__CONTROL1_EN__SHIFT 0x1f
++#define CP_HQD_AQL_CONTROL__CONTROL0_MASK 0x00007FFFL
++#define CP_HQD_AQL_CONTROL__CONTROL0_EN_MASK 0x00008000L
++#define CP_HQD_AQL_CONTROL__CONTROL1_MASK 0x7FFF0000L
++#define CP_HQD_AQL_CONTROL__CONTROL1_EN_MASK 0x80000000L
++//CP_HQD_PQ_WPTR_LO
++#define CP_HQD_PQ_WPTR_LO__OFFSET__SHIFT 0x0
++#define CP_HQD_PQ_WPTR_LO__OFFSET_MASK 0xFFFFFFFFL
++//CP_HQD_PQ_WPTR_HI
++#define CP_HQD_PQ_WPTR_HI__DATA__SHIFT 0x0
++#define CP_HQD_PQ_WPTR_HI__DATA_MASK 0xFFFFFFFFL
++
++
++// addressBlock: gc_didtdec
++//DIDT_IND_INDEX
++#define DIDT_IND_INDEX__DIDT_IND_INDEX__SHIFT 0x0
++#define DIDT_IND_INDEX__DIDT_IND_INDEX_MASK 0xFFFFFFFFL
++//DIDT_IND_DATA
++#define DIDT_IND_DATA__DIDT_IND_DATA__SHIFT 0x0
++#define DIDT_IND_DATA__DIDT_IND_DATA_MASK 0xFFFFFFFFL
++
++
++// addressBlock: gc_gccacdec
++//GC_CAC_CTRL_1
++#define GC_CAC_CTRL_1__CAC_WINDOW__SHIFT 0x0
++#define GC_CAC_CTRL_1__TDP_WINDOW__SHIFT 0x18
++#define GC_CAC_CTRL_1__CAC_WINDOW_MASK 0x00FFFFFFL
++#define GC_CAC_CTRL_1__TDP_WINDOW_MASK 0xFF000000L
++//GC_CAC_CTRL_2
++#define GC_CAC_CTRL_2__CAC_ENABLE__SHIFT 0x0
++#define GC_CAC_CTRL_2__CAC_SOFT_CTRL_ENABLE__SHIFT 0x1
++#define GC_CAC_CTRL_2__UNUSED_0__SHIFT 0x2
++#define GC_CAC_CTRL_2__CAC_ENABLE_MASK 0x00000001L
++#define GC_CAC_CTRL_2__CAC_SOFT_CTRL_ENABLE_MASK 0x00000002L
++#define GC_CAC_CTRL_2__UNUSED_0_MASK 0xFFFFFFFCL
++//GC_CAC_CGTT_CLK_CTRL
++#define GC_CAC_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
++#define GC_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
++#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
++#define GC_CAC_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define GC_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
++#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
++//GC_CAC_AGGR_LOWER
++#define GC_CAC_AGGR_LOWER__AGGR_31_0__SHIFT 0x0
++#define GC_CAC_AGGR_LOWER__AGGR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_AGGR_UPPER
++#define GC_CAC_AGGR_UPPER__AGGR_63_32__SHIFT 0x0
++#define GC_CAC_AGGR_UPPER__AGGR_63_32_MASK 0xFFFFFFFFL
++//GC_CAC_PG_AGGR_LOWER
++#define GC_CAC_PG_AGGR_LOWER__LKG_AGGR_31_0__SHIFT 0x0
++#define GC_CAC_PG_AGGR_LOWER__LKG_AGGR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_PG_AGGR_UPPER
++#define GC_CAC_PG_AGGR_UPPER__LKG_AGGR_63_32__SHIFT 0x0
++#define GC_CAC_PG_AGGR_UPPER__LKG_AGGR_63_32_MASK 0xFFFFFFFFL
++//GC_CAC_SOFT_CTRL
++#define GC_CAC_SOFT_CTRL__SOFT_SNAP__SHIFT 0x0
++#define GC_CAC_SOFT_CTRL__UNUSED__SHIFT 0x1
++#define GC_CAC_SOFT_CTRL__SOFT_SNAP_MASK 0x00000001L
++#define GC_CAC_SOFT_CTRL__UNUSED_MASK 0xFFFFFFFEL
++//GC_DIDT_CTRL0
++#define GC_DIDT_CTRL0__DIDT_CTRL_EN__SHIFT 0x0
++#define GC_DIDT_CTRL0__PHASE_OFFSET__SHIFT 0x1
++#define GC_DIDT_CTRL0__DIDT_SW_RST__SHIFT 0x3
++#define GC_DIDT_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x4
++#define GC_DIDT_CTRL0__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x5
++#define GC_DIDT_CTRL0__DIDT_CTRL_EN_MASK 0x00000001L
++#define GC_DIDT_CTRL0__PHASE_OFFSET_MASK 0x00000006L
++#define GC_DIDT_CTRL0__DIDT_SW_RST_MASK 0x00000008L
++#define GC_DIDT_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x00000010L
++#define GC_DIDT_CTRL0__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001E0L
++//GC_DIDT_CTRL1
++#define GC_DIDT_CTRL1__MIN_POWER__SHIFT 0x0
++#define GC_DIDT_CTRL1__MAX_POWER__SHIFT 0x10
++#define GC_DIDT_CTRL1__MIN_POWER_MASK 0x0000FFFFL
++#define GC_DIDT_CTRL1__MAX_POWER_MASK 0xFFFF0000L
++//GC_DIDT_CTRL2
++#define GC_DIDT_CTRL2__MAX_POWER_DELTA__SHIFT 0x0
++#define GC_DIDT_CTRL2__UNUSED_0__SHIFT 0xe
++#define GC_DIDT_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
++#define GC_DIDT_CTRL2__UNUSED_1__SHIFT 0x1a
++#define GC_DIDT_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
++#define GC_DIDT_CTRL2__UNUSED_2__SHIFT 0x1f
++#define GC_DIDT_CTRL2__MAX_POWER_DELTA_MASK 0x00003FFFL
++#define GC_DIDT_CTRL2__UNUSED_0_MASK 0x0000C000L
++#define GC_DIDT_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK 0x03FF0000L
++#define GC_DIDT_CTRL2__UNUSED_1_MASK 0x04000000L
++#define GC_DIDT_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000L
++#define GC_DIDT_CTRL2__UNUSED_2_MASK 0x80000000L
++//GC_DIDT_WEIGHT
++#define GC_DIDT_WEIGHT__SQ_WEIGHT__SHIFT 0x0
++#define GC_DIDT_WEIGHT__DB_WEIGHT__SHIFT 0x8
++#define GC_DIDT_WEIGHT__TD_WEIGHT__SHIFT 0x10
++#define GC_DIDT_WEIGHT__TCP_WEIGHT__SHIFT 0x18
++#define GC_DIDT_WEIGHT__SQ_WEIGHT_MASK 0x000000FFL
++#define GC_DIDT_WEIGHT__DB_WEIGHT_MASK 0x0000FF00L
++#define GC_DIDT_WEIGHT__TD_WEIGHT_MASK 0x00FF0000L
++#define GC_DIDT_WEIGHT__TCP_WEIGHT_MASK 0xFF000000L
++//GC_EDC_CTRL
++#define GC_EDC_CTRL__EDC_EN__SHIFT 0x0
++#define GC_EDC_CTRL__EDC_SW_RST__SHIFT 0x1
++#define GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT 0x2
++#define GC_EDC_CTRL__EDC_FORCE_STALL__SHIFT 0x3
++#define GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
++#define GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT 0x9
++#define GC_EDC_CTRL__UNUSED_0__SHIFT 0xa
++#define GC_EDC_CTRL__EDC_EN_MASK 0x00000001L
++#define GC_EDC_CTRL__EDC_SW_RST_MASK 0x00000002L
++#define GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK 0x00000004L
++#define GC_EDC_CTRL__EDC_FORCE_STALL_MASK 0x00000008L
++#define GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
++#define GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK 0x00000200L
++#define GC_EDC_CTRL__UNUSED_0_MASK 0xFFFFFC00L
++//GC_EDC_THRESHOLD
++#define GC_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT 0x0
++#define GC_EDC_THRESHOLD__EDC_THRESHOLD_MASK 0xFFFFFFFFL
++//GC_EDC_STATUS
++#define GC_EDC_STATUS__EDC_THROTTLE_LEVEL__SHIFT 0x0
++#define GC_EDC_STATUS__EDC_ROLLING_DROOP_DELTA__SHIFT 0x3
++#define GC_EDC_STATUS__EDC_THROTTLE_LEVEL_MASK 0x00000007L
++#define GC_EDC_STATUS__EDC_ROLLING_DROOP_DELTA_MASK 0x03FFFFF8L
++//GC_EDC_OVERFLOW
++#define GC_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW__SHIFT 0x0
++#define GC_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER__SHIFT 0x1
++#define GC_EDC_OVERFLOW__EDC_DROOP_LEVEL_OVERFLOW__SHIFT 0x11
++#define GC_EDC_OVERFLOW__PSM_COUNTER__SHIFT 0x12
++#define GC_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW_MASK 0x00000001L
++#define GC_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER_MASK 0x0001FFFEL
++#define GC_EDC_OVERFLOW__EDC_DROOP_LEVEL_OVERFLOW_MASK 0x00020000L
++#define GC_EDC_OVERFLOW__PSM_COUNTER_MASK 0xFFFC0000L
++//GC_EDC_ROLLING_POWER_DELTA
++#define GC_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA__SHIFT 0x0
++#define GC_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA_MASK 0xFFFFFFFFL
++//GC_DIDT_DROOP_CTRL
++#define GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_EN__SHIFT 0x0
++#define GC_DIDT_DROOP_CTRL__DIDT_DROOP_THRESHOLD__SHIFT 0x1
++#define GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_INDEX__SHIFT 0xf
++#define GC_DIDT_DROOP_CTRL__DIDT_LEVEL_SEL__SHIFT 0x13
++#define GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_OVERFLOW__SHIFT 0x1f
++#define GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_EN_MASK 0x00000001L
++#define GC_DIDT_DROOP_CTRL__DIDT_DROOP_THRESHOLD_MASK 0x00007FFEL
++#define GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_INDEX_MASK 0x00078000L
++#define GC_DIDT_DROOP_CTRL__DIDT_LEVEL_SEL_MASK 0x00080000L
++#define GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_OVERFLOW_MASK 0x80000000L
++//GC_EDC_DROOP_CTRL
++#define GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_EN__SHIFT 0x0
++#define GC_EDC_DROOP_CTRL__EDC_DROOP_THRESHOLD__SHIFT 0x1
++#define GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_INDEX__SHIFT 0xf
++#define GC_EDC_DROOP_CTRL__AVG_PSM_SEL__SHIFT 0x14
++#define GC_EDC_DROOP_CTRL__EDC_LEVEL_SEL__SHIFT 0x15
++#define GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_EN_MASK 0x00000001L
++#define GC_EDC_DROOP_CTRL__EDC_DROOP_THRESHOLD_MASK 0x00007FFEL
++#define GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_INDEX_MASK 0x000F8000L
++#define GC_EDC_DROOP_CTRL__AVG_PSM_SEL_MASK 0x00100000L
++#define GC_EDC_DROOP_CTRL__EDC_LEVEL_SEL_MASK 0x00200000L
++//GC_CAC_IND_INDEX
++#define GC_CAC_IND_INDEX__GC_CAC_IND_ADDR__SHIFT 0x0
++#define GC_CAC_IND_INDEX__GC_CAC_IND_ADDR_MASK 0xFFFFFFFFL
++//GC_CAC_IND_DATA
++#define GC_CAC_IND_DATA__GC_CAC_IND_DATA__SHIFT 0x0
++#define GC_CAC_IND_DATA__GC_CAC_IND_DATA_MASK 0xFFFFFFFFL
++//SE_CAC_CGTT_CLK_CTRL
++#define SE_CAC_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
++#define SE_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
++#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
++#define SE_CAC_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define SE_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
++#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
++//SE_CAC_IND_INDEX
++#define SE_CAC_IND_INDEX__SE_CAC_IND_ADDR__SHIFT 0x0
++#define SE_CAC_IND_INDEX__SE_CAC_IND_ADDR_MASK 0xFFFFFFFFL
++//SE_CAC_IND_DATA
++#define SE_CAC_IND_DATA__SE_CAC_IND_DATA__SHIFT 0x0
++#define SE_CAC_IND_DATA__SE_CAC_IND_DATA_MASK 0xFFFFFFFFL
++
++
++// addressBlock: gc_tcpdec
++//TCP_WATCH0_ADDR_H
++#define TCP_WATCH0_ADDR_H__ADDR__SHIFT 0x0
++#define TCP_WATCH0_ADDR_H__ADDR_MASK 0x0000FFFFL
++//TCP_WATCH0_ADDR_L
++#define TCP_WATCH0_ADDR_L__ADDR__SHIFT 0x6
++#define TCP_WATCH0_ADDR_L__ADDR_MASK 0xFFFFFFC0L
++//TCP_WATCH0_CNTL
++#define TCP_WATCH0_CNTL__MASK__SHIFT 0x0
++#define TCP_WATCH0_CNTL__VMID__SHIFT 0x18
++#define TCP_WATCH0_CNTL__ATC__SHIFT 0x1c
++#define TCP_WATCH0_CNTL__MODE__SHIFT 0x1d
++#define TCP_WATCH0_CNTL__VALID__SHIFT 0x1f
++#define TCP_WATCH0_CNTL__MASK_MASK 0x00FFFFFFL
++#define TCP_WATCH0_CNTL__VMID_MASK 0x0F000000L
++#define TCP_WATCH0_CNTL__ATC_MASK 0x10000000L
++#define TCP_WATCH0_CNTL__MODE_MASK 0x60000000L
++#define TCP_WATCH0_CNTL__VALID_MASK 0x80000000L
++//TCP_WATCH1_ADDR_H
++#define TCP_WATCH1_ADDR_H__ADDR__SHIFT 0x0
++#define TCP_WATCH1_ADDR_H__ADDR_MASK 0x0000FFFFL
++//TCP_WATCH1_ADDR_L
++#define TCP_WATCH1_ADDR_L__ADDR__SHIFT 0x6
++#define TCP_WATCH1_ADDR_L__ADDR_MASK 0xFFFFFFC0L
++//TCP_WATCH1_CNTL
++#define TCP_WATCH1_CNTL__MASK__SHIFT 0x0
++#define TCP_WATCH1_CNTL__VMID__SHIFT 0x18
++#define TCP_WATCH1_CNTL__ATC__SHIFT 0x1c
++#define TCP_WATCH1_CNTL__MODE__SHIFT 0x1d
++#define TCP_WATCH1_CNTL__VALID__SHIFT 0x1f
++#define TCP_WATCH1_CNTL__MASK_MASK 0x00FFFFFFL
++#define TCP_WATCH1_CNTL__VMID_MASK 0x0F000000L
++#define TCP_WATCH1_CNTL__ATC_MASK 0x10000000L
++#define TCP_WATCH1_CNTL__MODE_MASK 0x60000000L
++#define TCP_WATCH1_CNTL__VALID_MASK 0x80000000L
++//TCP_WATCH2_ADDR_H
++#define TCP_WATCH2_ADDR_H__ADDR__SHIFT 0x0
++#define TCP_WATCH2_ADDR_H__ADDR_MASK 0x0000FFFFL
++//TCP_WATCH2_ADDR_L
++#define TCP_WATCH2_ADDR_L__ADDR__SHIFT 0x6
++#define TCP_WATCH2_ADDR_L__ADDR_MASK 0xFFFFFFC0L
++//TCP_WATCH2_CNTL
++#define TCP_WATCH2_CNTL__MASK__SHIFT 0x0
++#define TCP_WATCH2_CNTL__VMID__SHIFT 0x18
++#define TCP_WATCH2_CNTL__ATC__SHIFT 0x1c
++#define TCP_WATCH2_CNTL__MODE__SHIFT 0x1d
++#define TCP_WATCH2_CNTL__VALID__SHIFT 0x1f
++#define TCP_WATCH2_CNTL__MASK_MASK 0x00FFFFFFL
++#define TCP_WATCH2_CNTL__VMID_MASK 0x0F000000L
++#define TCP_WATCH2_CNTL__ATC_MASK 0x10000000L
++#define TCP_WATCH2_CNTL__MODE_MASK 0x60000000L
++#define TCP_WATCH2_CNTL__VALID_MASK 0x80000000L
++//TCP_WATCH3_ADDR_H
++#define TCP_WATCH3_ADDR_H__ADDR__SHIFT 0x0
++#define TCP_WATCH3_ADDR_H__ADDR_MASK 0x0000FFFFL
++//TCP_WATCH3_ADDR_L
++#define TCP_WATCH3_ADDR_L__ADDR__SHIFT 0x6
++#define TCP_WATCH3_ADDR_L__ADDR_MASK 0xFFFFFFC0L
++//TCP_WATCH3_CNTL
++#define TCP_WATCH3_CNTL__MASK__SHIFT 0x0
++#define TCP_WATCH3_CNTL__VMID__SHIFT 0x18
++#define TCP_WATCH3_CNTL__ATC__SHIFT 0x1c
++#define TCP_WATCH3_CNTL__MODE__SHIFT 0x1d
++#define TCP_WATCH3_CNTL__VALID__SHIFT 0x1f
++#define TCP_WATCH3_CNTL__MASK_MASK 0x00FFFFFFL
++#define TCP_WATCH3_CNTL__VMID_MASK 0x0F000000L
++#define TCP_WATCH3_CNTL__ATC_MASK 0x10000000L
++#define TCP_WATCH3_CNTL__MODE_MASK 0x60000000L
++#define TCP_WATCH3_CNTL__VALID_MASK 0x80000000L
++//TCP_GATCL1_CNTL
++#define TCP_GATCL1_CNTL__INVALIDATE_ALL_VMID__SHIFT 0x19
++#define TCP_GATCL1_CNTL__FORCE_MISS__SHIFT 0x1a
++#define TCP_GATCL1_CNTL__FORCE_IN_ORDER__SHIFT 0x1b
++#define TCP_GATCL1_CNTL__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
++#define TCP_GATCL1_CNTL__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
++#define TCP_GATCL1_CNTL__INVALIDATE_ALL_VMID_MASK 0x02000000L
++#define TCP_GATCL1_CNTL__FORCE_MISS_MASK 0x04000000L
++#define TCP_GATCL1_CNTL__FORCE_IN_ORDER_MASK 0x08000000L
++#define TCP_GATCL1_CNTL__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
++#define TCP_GATCL1_CNTL__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
++//TCP_ATC_EDC_GATCL1_CNT
++#define TCP_ATC_EDC_GATCL1_CNT__DATA_SEC__SHIFT 0x0
++#define TCP_ATC_EDC_GATCL1_CNT__DATA_SEC_MASK 0x000000FFL
++//TCP_GATCL1_DSM_CNTL
++#define TCP_GATCL1_DSM_CNTL__SEL_DSM_TCP_GATCL1_IRRITATOR_DATA_A0__SHIFT 0x0
++#define TCP_GATCL1_DSM_CNTL__SEL_DSM_TCP_GATCL1_IRRITATOR_DATA_A1__SHIFT 0x1
++#define TCP_GATCL1_DSM_CNTL__TCP_GATCL1_ENABLE_SINGLE_WRITE_A__SHIFT 0x2
++#define TCP_GATCL1_DSM_CNTL__SEL_DSM_TCP_GATCL1_IRRITATOR_DATA_A0_MASK 0x00000001L
++#define TCP_GATCL1_DSM_CNTL__SEL_DSM_TCP_GATCL1_IRRITATOR_DATA_A1_MASK 0x00000002L
++#define TCP_GATCL1_DSM_CNTL__TCP_GATCL1_ENABLE_SINGLE_WRITE_A_MASK 0x00000004L
++//TCP_CNTL2
++#define TCP_CNTL2__LS_DISABLE_CLOCKS__SHIFT 0x0
++#define TCP_CNTL2__LS_DISABLE_CLOCKS_MASK 0x000000FFL
++//TCP_UTCL1_CNTL1
++#define TCP_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
++#define TCP_UTCL1_CNTL1__GPUVM_64K_DEFAULT__SHIFT 0x1
++#define TCP_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
++#define TCP_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
++#define TCP_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
++#define TCP_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
++#define TCP_UTCL1_CNTL1__REG_INV_VMID__SHIFT 0x13
++#define TCP_UTCL1_CNTL1__REG_INV_ALL_VMID__SHIFT 0x17
++#define TCP_UTCL1_CNTL1__REG_INV_TOGGLE__SHIFT 0x18
++#define TCP_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID__SHIFT 0x19
++#define TCP_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
++#define TCP_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
++#define TCP_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
++#define TCP_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
++#define TCP_UTCL1_CNTL1__GPUVM_64K_DEFAULT_MASK 0x00000002L
++#define TCP_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
++#define TCP_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
++#define TCP_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
++#define TCP_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
++#define TCP_UTCL1_CNTL1__REG_INV_VMID_MASK 0x00780000L
++#define TCP_UTCL1_CNTL1__REG_INV_ALL_VMID_MASK 0x00800000L
++#define TCP_UTCL1_CNTL1__REG_INV_TOGGLE_MASK 0x01000000L
++#define TCP_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID_MASK 0x02000000L
++#define TCP_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
++#define TCP_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
++#define TCP_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
++//TCP_UTCL1_CNTL2
++#define TCP_UTCL1_CNTL2__SPARE__SHIFT 0x0
++#define TCP_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
++#define TCP_UTCL1_CNTL2__ANY_LINE_VALID__SHIFT 0xa
++#define TCP_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
++#define TCP_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
++#define TCP_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
++#define TCP_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
++#define TCP_UTCL1_CNTL2__SPARE_MASK 0x000000FFL
++#define TCP_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
++#define TCP_UTCL1_CNTL2__ANY_LINE_VALID_MASK 0x00000400L
++#define TCP_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
++#define TCP_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
++#define TCP_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
++#define TCP_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
++//TCP_UTCL1_STATUS
++#define TCP_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
++#define TCP_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
++#define TCP_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
++#define TCP_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
++#define TCP_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
++#define TCP_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
++//TCP_PERFCOUNTER_FILTER
++#define TCP_PERFCOUNTER_FILTER__BUFFER__SHIFT 0x0
++#define TCP_PERFCOUNTER_FILTER__FLAT__SHIFT 0x1
++#define TCP_PERFCOUNTER_FILTER__DIM__SHIFT 0x2
++#define TCP_PERFCOUNTER_FILTER__DATA_FORMAT__SHIFT 0x5
++#define TCP_PERFCOUNTER_FILTER__NUM_FORMAT__SHIFT 0xb
++#define TCP_PERFCOUNTER_FILTER__SW_MODE__SHIFT 0xf
++#define TCP_PERFCOUNTER_FILTER__NUM_SAMPLES__SHIFT 0x14
++#define TCP_PERFCOUNTER_FILTER__OPCODE_TYPE__SHIFT 0x16
++#define TCP_PERFCOUNTER_FILTER__GLC__SHIFT 0x19
++#define TCP_PERFCOUNTER_FILTER__SLC__SHIFT 0x1a
++#define TCP_PERFCOUNTER_FILTER__COMPRESSION_ENABLE__SHIFT 0x1b
++#define TCP_PERFCOUNTER_FILTER__ADDR_MODE__SHIFT 0x1c
++#define TCP_PERFCOUNTER_FILTER__BUFFER_MASK 0x00000001L
++#define TCP_PERFCOUNTER_FILTER__FLAT_MASK 0x00000002L
++#define TCP_PERFCOUNTER_FILTER__DIM_MASK 0x0000001CL
++#define TCP_PERFCOUNTER_FILTER__DATA_FORMAT_MASK 0x000007E0L
++#define TCP_PERFCOUNTER_FILTER__NUM_FORMAT_MASK 0x00007800L
++#define TCP_PERFCOUNTER_FILTER__SW_MODE_MASK 0x000F8000L
++#define TCP_PERFCOUNTER_FILTER__NUM_SAMPLES_MASK 0x00300000L
++#define TCP_PERFCOUNTER_FILTER__OPCODE_TYPE_MASK 0x01C00000L
++#define TCP_PERFCOUNTER_FILTER__GLC_MASK 0x02000000L
++#define TCP_PERFCOUNTER_FILTER__SLC_MASK 0x04000000L
++#define TCP_PERFCOUNTER_FILTER__COMPRESSION_ENABLE_MASK 0x08000000L
++#define TCP_PERFCOUNTER_FILTER__ADDR_MODE_MASK 0x70000000L
++//TCP_PERFCOUNTER_FILTER_EN
++#define TCP_PERFCOUNTER_FILTER_EN__BUFFER__SHIFT 0x0
++#define TCP_PERFCOUNTER_FILTER_EN__FLAT__SHIFT 0x1
++#define TCP_PERFCOUNTER_FILTER_EN__DIM__SHIFT 0x2
++#define TCP_PERFCOUNTER_FILTER_EN__DATA_FORMAT__SHIFT 0x3
++#define TCP_PERFCOUNTER_FILTER_EN__NUM_FORMAT__SHIFT 0x4
++#define TCP_PERFCOUNTER_FILTER_EN__SW_MODE__SHIFT 0x5
++#define TCP_PERFCOUNTER_FILTER_EN__NUM_SAMPLES__SHIFT 0x6
++#define TCP_PERFCOUNTER_FILTER_EN__OPCODE_TYPE__SHIFT 0x7
++#define TCP_PERFCOUNTER_FILTER_EN__GLC__SHIFT 0x8
++#define TCP_PERFCOUNTER_FILTER_EN__SLC__SHIFT 0x9
++#define TCP_PERFCOUNTER_FILTER_EN__COMPRESSION_ENABLE__SHIFT 0xa
++#define TCP_PERFCOUNTER_FILTER_EN__ADDR_MODE__SHIFT 0xb
++#define TCP_PERFCOUNTER_FILTER_EN__BUFFER_MASK 0x00000001L
++#define TCP_PERFCOUNTER_FILTER_EN__FLAT_MASK 0x00000002L
++#define TCP_PERFCOUNTER_FILTER_EN__DIM_MASK 0x00000004L
++#define TCP_PERFCOUNTER_FILTER_EN__DATA_FORMAT_MASK 0x00000008L
++#define TCP_PERFCOUNTER_FILTER_EN__NUM_FORMAT_MASK 0x00000010L
++#define TCP_PERFCOUNTER_FILTER_EN__SW_MODE_MASK 0x00000020L
++#define TCP_PERFCOUNTER_FILTER_EN__NUM_SAMPLES_MASK 0x00000040L
++#define TCP_PERFCOUNTER_FILTER_EN__OPCODE_TYPE_MASK 0x00000080L
++#define TCP_PERFCOUNTER_FILTER_EN__GLC_MASK 0x00000100L
++#define TCP_PERFCOUNTER_FILTER_EN__SLC_MASK 0x00000200L
++#define TCP_PERFCOUNTER_FILTER_EN__COMPRESSION_ENABLE_MASK 0x00000400L
++#define TCP_PERFCOUNTER_FILTER_EN__ADDR_MODE_MASK 0x00000800L
++
++
++// addressBlock: gc_gdspdec
++//GDS_VMID0_BASE
++#define GDS_VMID0_BASE__BASE__SHIFT 0x0
++#define GDS_VMID0_BASE__BASE_MASK 0x0000FFFFL
++//GDS_VMID0_SIZE
++#define GDS_VMID0_SIZE__SIZE__SHIFT 0x0
++#define GDS_VMID0_SIZE__SIZE_MASK 0x0001FFFFL
++//GDS_VMID1_BASE
++#define GDS_VMID1_BASE__BASE__SHIFT 0x0
++#define GDS_VMID1_BASE__BASE_MASK 0x0000FFFFL
++//GDS_VMID1_SIZE
++#define GDS_VMID1_SIZE__SIZE__SHIFT 0x0
++#define GDS_VMID1_SIZE__SIZE_MASK 0x0001FFFFL
++//GDS_VMID2_BASE
++#define GDS_VMID2_BASE__BASE__SHIFT 0x0
++#define GDS_VMID2_BASE__BASE_MASK 0x0000FFFFL
++//GDS_VMID2_SIZE
++#define GDS_VMID2_SIZE__SIZE__SHIFT 0x0
++#define GDS_VMID2_SIZE__SIZE_MASK 0x0001FFFFL
++//GDS_VMID3_BASE
++#define GDS_VMID3_BASE__BASE__SHIFT 0x0
++#define GDS_VMID3_BASE__BASE_MASK 0x0000FFFFL
++//GDS_VMID3_SIZE
++#define GDS_VMID3_SIZE__SIZE__SHIFT 0x0
++#define GDS_VMID3_SIZE__SIZE_MASK 0x0001FFFFL
++//GDS_VMID4_BASE
++#define GDS_VMID4_BASE__BASE__SHIFT 0x0
++#define GDS_VMID4_BASE__BASE_MASK 0x0000FFFFL
++//GDS_VMID4_SIZE
++#define GDS_VMID4_SIZE__SIZE__SHIFT 0x0
++#define GDS_VMID4_SIZE__SIZE_MASK 0x0001FFFFL
++//GDS_VMID5_BASE
++#define GDS_VMID5_BASE__BASE__SHIFT 0x0
++#define GDS_VMID5_BASE__BASE_MASK 0x0000FFFFL
++//GDS_VMID5_SIZE
++#define GDS_VMID5_SIZE__SIZE__SHIFT 0x0
++#define GDS_VMID5_SIZE__SIZE_MASK 0x0001FFFFL
++//GDS_VMID6_BASE
++#define GDS_VMID6_BASE__BASE__SHIFT 0x0
++#define GDS_VMID6_BASE__BASE_MASK 0x0000FFFFL
++//GDS_VMID6_SIZE
++#define GDS_VMID6_SIZE__SIZE__SHIFT 0x0
++#define GDS_VMID6_SIZE__SIZE_MASK 0x0001FFFFL
++//GDS_VMID7_BASE
++#define GDS_VMID7_BASE__BASE__SHIFT 0x0
++#define GDS_VMID7_BASE__BASE_MASK 0x0000FFFFL
++//GDS_VMID7_SIZE
++#define GDS_VMID7_SIZE__SIZE__SHIFT 0x0
++#define GDS_VMID7_SIZE__SIZE_MASK 0x0001FFFFL
++//GDS_VMID8_BASE
++#define GDS_VMID8_BASE__BASE__SHIFT 0x0
++#define GDS_VMID8_BASE__BASE_MASK 0x0000FFFFL
++//GDS_VMID8_SIZE
++#define GDS_VMID8_SIZE__SIZE__SHIFT 0x0
++#define GDS_VMID8_SIZE__SIZE_MASK 0x0001FFFFL
++//GDS_VMID9_BASE
++#define GDS_VMID9_BASE__BASE__SHIFT 0x0
++#define GDS_VMID9_BASE__BASE_MASK 0x0000FFFFL
++//GDS_VMID9_SIZE
++#define GDS_VMID9_SIZE__SIZE__SHIFT 0x0
++#define GDS_VMID9_SIZE__SIZE_MASK 0x0001FFFFL
++//GDS_VMID10_BASE
++#define GDS_VMID10_BASE__BASE__SHIFT 0x0
++#define GDS_VMID10_BASE__BASE_MASK 0x0000FFFFL
++//GDS_VMID10_SIZE
++#define GDS_VMID10_SIZE__SIZE__SHIFT 0x0
++#define GDS_VMID10_SIZE__SIZE_MASK 0x0001FFFFL
++//GDS_VMID11_BASE
++#define GDS_VMID11_BASE__BASE__SHIFT 0x0
++#define GDS_VMID11_BASE__BASE_MASK 0x0000FFFFL
++//GDS_VMID11_SIZE
++#define GDS_VMID11_SIZE__SIZE__SHIFT 0x0
++#define GDS_VMID11_SIZE__SIZE_MASK 0x0001FFFFL
++//GDS_VMID12_BASE
++#define GDS_VMID12_BASE__BASE__SHIFT 0x0
++#define GDS_VMID12_BASE__BASE_MASK 0x0000FFFFL
++//GDS_VMID12_SIZE
++#define GDS_VMID12_SIZE__SIZE__SHIFT 0x0
++#define GDS_VMID12_SIZE__SIZE_MASK 0x0001FFFFL
++//GDS_VMID13_BASE
++#define GDS_VMID13_BASE__BASE__SHIFT 0x0
++#define GDS_VMID13_BASE__BASE_MASK 0x0000FFFFL
++//GDS_VMID13_SIZE
++#define GDS_VMID13_SIZE__SIZE__SHIFT 0x0
++#define GDS_VMID13_SIZE__SIZE_MASK 0x0001FFFFL
++//GDS_VMID14_BASE
++#define GDS_VMID14_BASE__BASE__SHIFT 0x0
++#define GDS_VMID14_BASE__BASE_MASK 0x0000FFFFL
++//GDS_VMID14_SIZE
++#define GDS_VMID14_SIZE__SIZE__SHIFT 0x0
++#define GDS_VMID14_SIZE__SIZE_MASK 0x0001FFFFL
++//GDS_VMID15_BASE
++#define GDS_VMID15_BASE__BASE__SHIFT 0x0
++#define GDS_VMID15_BASE__BASE_MASK 0x0000FFFFL
++//GDS_VMID15_SIZE
++#define GDS_VMID15_SIZE__SIZE__SHIFT 0x0
++#define GDS_VMID15_SIZE__SIZE_MASK 0x0001FFFFL
++//GDS_GWS_VMID0
++#define GDS_GWS_VMID0__BASE__SHIFT 0x0
++#define GDS_GWS_VMID0__SIZE__SHIFT 0x10
++#define GDS_GWS_VMID0__BASE_MASK 0x0000003FL
++#define GDS_GWS_VMID0__SIZE_MASK 0x007F0000L
++//GDS_GWS_VMID1
++#define GDS_GWS_VMID1__BASE__SHIFT 0x0
++#define GDS_GWS_VMID1__SIZE__SHIFT 0x10
++#define GDS_GWS_VMID1__BASE_MASK 0x0000003FL
++#define GDS_GWS_VMID1__SIZE_MASK 0x007F0000L
++//GDS_GWS_VMID2
++#define GDS_GWS_VMID2__BASE__SHIFT 0x0
++#define GDS_GWS_VMID2__SIZE__SHIFT 0x10
++#define GDS_GWS_VMID2__BASE_MASK 0x0000003FL
++#define GDS_GWS_VMID2__SIZE_MASK 0x007F0000L
++//GDS_GWS_VMID3
++#define GDS_GWS_VMID3__BASE__SHIFT 0x0
++#define GDS_GWS_VMID3__SIZE__SHIFT 0x10
++#define GDS_GWS_VMID3__BASE_MASK 0x0000003FL
++#define GDS_GWS_VMID3__SIZE_MASK 0x007F0000L
++//GDS_GWS_VMID4
++#define GDS_GWS_VMID4__BASE__SHIFT 0x0
++#define GDS_GWS_VMID4__SIZE__SHIFT 0x10
++#define GDS_GWS_VMID4__BASE_MASK 0x0000003FL
++#define GDS_GWS_VMID4__SIZE_MASK 0x007F0000L
++//GDS_GWS_VMID5
++#define GDS_GWS_VMID5__BASE__SHIFT 0x0
++#define GDS_GWS_VMID5__SIZE__SHIFT 0x10
++#define GDS_GWS_VMID5__BASE_MASK 0x0000003FL
++#define GDS_GWS_VMID5__SIZE_MASK 0x007F0000L
++//GDS_GWS_VMID6
++#define GDS_GWS_VMID6__BASE__SHIFT 0x0
++#define GDS_GWS_VMID6__SIZE__SHIFT 0x10
++#define GDS_GWS_VMID6__BASE_MASK 0x0000003FL
++#define GDS_GWS_VMID6__SIZE_MASK 0x007F0000L
++//GDS_GWS_VMID7
++#define GDS_GWS_VMID7__BASE__SHIFT 0x0
++#define GDS_GWS_VMID7__SIZE__SHIFT 0x10
++#define GDS_GWS_VMID7__BASE_MASK 0x0000003FL
++#define GDS_GWS_VMID7__SIZE_MASK 0x007F0000L
++//GDS_GWS_VMID8
++#define GDS_GWS_VMID8__BASE__SHIFT 0x0
++#define GDS_GWS_VMID8__SIZE__SHIFT 0x10
++#define GDS_GWS_VMID8__BASE_MASK 0x0000003FL
++#define GDS_GWS_VMID8__SIZE_MASK 0x007F0000L
++//GDS_GWS_VMID9
++#define GDS_GWS_VMID9__BASE__SHIFT 0x0
++#define GDS_GWS_VMID9__SIZE__SHIFT 0x10
++#define GDS_GWS_VMID9__BASE_MASK 0x0000003FL
++#define GDS_GWS_VMID9__SIZE_MASK 0x007F0000L
++//GDS_GWS_VMID10
++#define GDS_GWS_VMID10__BASE__SHIFT 0x0
++#define GDS_GWS_VMID10__SIZE__SHIFT 0x10
++#define GDS_GWS_VMID10__BASE_MASK 0x0000003FL
++#define GDS_GWS_VMID10__SIZE_MASK 0x007F0000L
++//GDS_GWS_VMID11
++#define GDS_GWS_VMID11__BASE__SHIFT 0x0
++#define GDS_GWS_VMID11__SIZE__SHIFT 0x10
++#define GDS_GWS_VMID11__BASE_MASK 0x0000003FL
++#define GDS_GWS_VMID11__SIZE_MASK 0x007F0000L
++//GDS_GWS_VMID12
++#define GDS_GWS_VMID12__BASE__SHIFT 0x0
++#define GDS_GWS_VMID12__SIZE__SHIFT 0x10
++#define GDS_GWS_VMID12__BASE_MASK 0x0000003FL
++#define GDS_GWS_VMID12__SIZE_MASK 0x007F0000L
++//GDS_GWS_VMID13
++#define GDS_GWS_VMID13__BASE__SHIFT 0x0
++#define GDS_GWS_VMID13__SIZE__SHIFT 0x10
++#define GDS_GWS_VMID13__BASE_MASK 0x0000003FL
++#define GDS_GWS_VMID13__SIZE_MASK 0x007F0000L
++//GDS_GWS_VMID14
++#define GDS_GWS_VMID14__BASE__SHIFT 0x0
++#define GDS_GWS_VMID14__SIZE__SHIFT 0x10
++#define GDS_GWS_VMID14__BASE_MASK 0x0000003FL
++#define GDS_GWS_VMID14__SIZE_MASK 0x007F0000L
++//GDS_GWS_VMID15
++#define GDS_GWS_VMID15__BASE__SHIFT 0x0
++#define GDS_GWS_VMID15__SIZE__SHIFT 0x10
++#define GDS_GWS_VMID15__BASE_MASK 0x0000003FL
++#define GDS_GWS_VMID15__SIZE_MASK 0x007F0000L
++//GDS_OA_VMID0
++#define GDS_OA_VMID0__MASK__SHIFT 0x0
++#define GDS_OA_VMID0__UNUSED__SHIFT 0x10
++#define GDS_OA_VMID0__MASK_MASK 0x0000FFFFL
++#define GDS_OA_VMID0__UNUSED_MASK 0xFFFF0000L
++//GDS_OA_VMID1
++#define GDS_OA_VMID1__MASK__SHIFT 0x0
++#define GDS_OA_VMID1__UNUSED__SHIFT 0x10
++#define GDS_OA_VMID1__MASK_MASK 0x0000FFFFL
++#define GDS_OA_VMID1__UNUSED_MASK 0xFFFF0000L
++//GDS_OA_VMID2
++#define GDS_OA_VMID2__MASK__SHIFT 0x0
++#define GDS_OA_VMID2__UNUSED__SHIFT 0x10
++#define GDS_OA_VMID2__MASK_MASK 0x0000FFFFL
++#define GDS_OA_VMID2__UNUSED_MASK 0xFFFF0000L
++//GDS_OA_VMID3
++#define GDS_OA_VMID3__MASK__SHIFT 0x0
++#define GDS_OA_VMID3__UNUSED__SHIFT 0x10
++#define GDS_OA_VMID3__MASK_MASK 0x0000FFFFL
++#define GDS_OA_VMID3__UNUSED_MASK 0xFFFF0000L
++//GDS_OA_VMID4
++#define GDS_OA_VMID4__MASK__SHIFT 0x0
++#define GDS_OA_VMID4__UNUSED__SHIFT 0x10
++#define GDS_OA_VMID4__MASK_MASK 0x0000FFFFL
++#define GDS_OA_VMID4__UNUSED_MASK 0xFFFF0000L
++//GDS_OA_VMID5
++#define GDS_OA_VMID5__MASK__SHIFT 0x0
++#define GDS_OA_VMID5__UNUSED__SHIFT 0x10
++#define GDS_OA_VMID5__MASK_MASK 0x0000FFFFL
++#define GDS_OA_VMID5__UNUSED_MASK 0xFFFF0000L
++//GDS_OA_VMID6
++#define GDS_OA_VMID6__MASK__SHIFT 0x0
++#define GDS_OA_VMID6__UNUSED__SHIFT 0x10
++#define GDS_OA_VMID6__MASK_MASK 0x0000FFFFL
++#define GDS_OA_VMID6__UNUSED_MASK 0xFFFF0000L
++//GDS_OA_VMID7
++#define GDS_OA_VMID7__MASK__SHIFT 0x0
++#define GDS_OA_VMID7__UNUSED__SHIFT 0x10
++#define GDS_OA_VMID7__MASK_MASK 0x0000FFFFL
++#define GDS_OA_VMID7__UNUSED_MASK 0xFFFF0000L
++//GDS_OA_VMID8
++#define GDS_OA_VMID8__MASK__SHIFT 0x0
++#define GDS_OA_VMID8__UNUSED__SHIFT 0x10
++#define GDS_OA_VMID8__MASK_MASK 0x0000FFFFL
++#define GDS_OA_VMID8__UNUSED_MASK 0xFFFF0000L
++//GDS_OA_VMID9
++#define GDS_OA_VMID9__MASK__SHIFT 0x0
++#define GDS_OA_VMID9__UNUSED__SHIFT 0x10
++#define GDS_OA_VMID9__MASK_MASK 0x0000FFFFL
++#define GDS_OA_VMID9__UNUSED_MASK 0xFFFF0000L
++//GDS_OA_VMID10
++#define GDS_OA_VMID10__MASK__SHIFT 0x0
++#define GDS_OA_VMID10__UNUSED__SHIFT 0x10
++#define GDS_OA_VMID10__MASK_MASK 0x0000FFFFL
++#define GDS_OA_VMID10__UNUSED_MASK 0xFFFF0000L
++//GDS_OA_VMID11
++#define GDS_OA_VMID11__MASK__SHIFT 0x0
++#define GDS_OA_VMID11__UNUSED__SHIFT 0x10
++#define GDS_OA_VMID11__MASK_MASK 0x0000FFFFL
++#define GDS_OA_VMID11__UNUSED_MASK 0xFFFF0000L
++//GDS_OA_VMID12
++#define GDS_OA_VMID12__MASK__SHIFT 0x0
++#define GDS_OA_VMID12__UNUSED__SHIFT 0x10
++#define GDS_OA_VMID12__MASK_MASK 0x0000FFFFL
++#define GDS_OA_VMID12__UNUSED_MASK 0xFFFF0000L
++//GDS_OA_VMID13
++#define GDS_OA_VMID13__MASK__SHIFT 0x0
++#define GDS_OA_VMID13__UNUSED__SHIFT 0x10
++#define GDS_OA_VMID13__MASK_MASK 0x0000FFFFL
++#define GDS_OA_VMID13__UNUSED_MASK 0xFFFF0000L
++//GDS_OA_VMID14
++#define GDS_OA_VMID14__MASK__SHIFT 0x0
++#define GDS_OA_VMID14__UNUSED__SHIFT 0x10
++#define GDS_OA_VMID14__MASK_MASK 0x0000FFFFL
++#define GDS_OA_VMID14__UNUSED_MASK 0xFFFF0000L
++//GDS_OA_VMID15
++#define GDS_OA_VMID15__MASK__SHIFT 0x0
++#define GDS_OA_VMID15__UNUSED__SHIFT 0x10
++#define GDS_OA_VMID15__MASK_MASK 0x0000FFFFL
++#define GDS_OA_VMID15__UNUSED_MASK 0xFFFF0000L
++//GDS_GWS_RESET0
++#define GDS_GWS_RESET0__RESOURCE0_RESET__SHIFT 0x0
++#define GDS_GWS_RESET0__RESOURCE1_RESET__SHIFT 0x1
++#define GDS_GWS_RESET0__RESOURCE2_RESET__SHIFT 0x2
++#define GDS_GWS_RESET0__RESOURCE3_RESET__SHIFT 0x3
++#define GDS_GWS_RESET0__RESOURCE4_RESET__SHIFT 0x4
++#define GDS_GWS_RESET0__RESOURCE5_RESET__SHIFT 0x5
++#define GDS_GWS_RESET0__RESOURCE6_RESET__SHIFT 0x6
++#define GDS_GWS_RESET0__RESOURCE7_RESET__SHIFT 0x7
++#define GDS_GWS_RESET0__RESOURCE8_RESET__SHIFT 0x8
++#define GDS_GWS_RESET0__RESOURCE9_RESET__SHIFT 0x9
++#define GDS_GWS_RESET0__RESOURCE10_RESET__SHIFT 0xa
++#define GDS_GWS_RESET0__RESOURCE11_RESET__SHIFT 0xb
++#define GDS_GWS_RESET0__RESOURCE12_RESET__SHIFT 0xc
++#define GDS_GWS_RESET0__RESOURCE13_RESET__SHIFT 0xd
++#define GDS_GWS_RESET0__RESOURCE14_RESET__SHIFT 0xe
++#define GDS_GWS_RESET0__RESOURCE15_RESET__SHIFT 0xf
++#define GDS_GWS_RESET0__RESOURCE16_RESET__SHIFT 0x10
++#define GDS_GWS_RESET0__RESOURCE17_RESET__SHIFT 0x11
++#define GDS_GWS_RESET0__RESOURCE18_RESET__SHIFT 0x12
++#define GDS_GWS_RESET0__RESOURCE19_RESET__SHIFT 0x13
++#define GDS_GWS_RESET0__RESOURCE20_RESET__SHIFT 0x14
++#define GDS_GWS_RESET0__RESOURCE21_RESET__SHIFT 0x15
++#define GDS_GWS_RESET0__RESOURCE22_RESET__SHIFT 0x16
++#define GDS_GWS_RESET0__RESOURCE23_RESET__SHIFT 0x17
++#define GDS_GWS_RESET0__RESOURCE24_RESET__SHIFT 0x18
++#define GDS_GWS_RESET0__RESOURCE25_RESET__SHIFT 0x19
++#define GDS_GWS_RESET0__RESOURCE26_RESET__SHIFT 0x1a
++#define GDS_GWS_RESET0__RESOURCE27_RESET__SHIFT 0x1b
++#define GDS_GWS_RESET0__RESOURCE28_RESET__SHIFT 0x1c
++#define GDS_GWS_RESET0__RESOURCE29_RESET__SHIFT 0x1d
++#define GDS_GWS_RESET0__RESOURCE30_RESET__SHIFT 0x1e
++#define GDS_GWS_RESET0__RESOURCE31_RESET__SHIFT 0x1f
++#define GDS_GWS_RESET0__RESOURCE0_RESET_MASK 0x00000001L
++#define GDS_GWS_RESET0__RESOURCE1_RESET_MASK 0x00000002L
++#define GDS_GWS_RESET0__RESOURCE2_RESET_MASK 0x00000004L
++#define GDS_GWS_RESET0__RESOURCE3_RESET_MASK 0x00000008L
++#define GDS_GWS_RESET0__RESOURCE4_RESET_MASK 0x00000010L
++#define GDS_GWS_RESET0__RESOURCE5_RESET_MASK 0x00000020L
++#define GDS_GWS_RESET0__RESOURCE6_RESET_MASK 0x00000040L
++#define GDS_GWS_RESET0__RESOURCE7_RESET_MASK 0x00000080L
++#define GDS_GWS_RESET0__RESOURCE8_RESET_MASK 0x00000100L
++#define GDS_GWS_RESET0__RESOURCE9_RESET_MASK 0x00000200L
++#define GDS_GWS_RESET0__RESOURCE10_RESET_MASK 0x00000400L
++#define GDS_GWS_RESET0__RESOURCE11_RESET_MASK 0x00000800L
++#define GDS_GWS_RESET0__RESOURCE12_RESET_MASK 0x00001000L
++#define GDS_GWS_RESET0__RESOURCE13_RESET_MASK 0x00002000L
++#define GDS_GWS_RESET0__RESOURCE14_RESET_MASK 0x00004000L
++#define GDS_GWS_RESET0__RESOURCE15_RESET_MASK 0x00008000L
++#define GDS_GWS_RESET0__RESOURCE16_RESET_MASK 0x00010000L
++#define GDS_GWS_RESET0__RESOURCE17_RESET_MASK 0x00020000L
++#define GDS_GWS_RESET0__RESOURCE18_RESET_MASK 0x00040000L
++#define GDS_GWS_RESET0__RESOURCE19_RESET_MASK 0x00080000L
++#define GDS_GWS_RESET0__RESOURCE20_RESET_MASK 0x00100000L
++#define GDS_GWS_RESET0__RESOURCE21_RESET_MASK 0x00200000L
++#define GDS_GWS_RESET0__RESOURCE22_RESET_MASK 0x00400000L
++#define GDS_GWS_RESET0__RESOURCE23_RESET_MASK 0x00800000L
++#define GDS_GWS_RESET0__RESOURCE24_RESET_MASK 0x01000000L
++#define GDS_GWS_RESET0__RESOURCE25_RESET_MASK 0x02000000L
++#define GDS_GWS_RESET0__RESOURCE26_RESET_MASK 0x04000000L
++#define GDS_GWS_RESET0__RESOURCE27_RESET_MASK 0x08000000L
++#define GDS_GWS_RESET0__RESOURCE28_RESET_MASK 0x10000000L
++#define GDS_GWS_RESET0__RESOURCE29_RESET_MASK 0x20000000L
++#define GDS_GWS_RESET0__RESOURCE30_RESET_MASK 0x40000000L
++#define GDS_GWS_RESET0__RESOURCE31_RESET_MASK 0x80000000L
++//GDS_GWS_RESET1
++#define GDS_GWS_RESET1__RESOURCE32_RESET__SHIFT 0x0
++#define GDS_GWS_RESET1__RESOURCE33_RESET__SHIFT 0x1
++#define GDS_GWS_RESET1__RESOURCE34_RESET__SHIFT 0x2
++#define GDS_GWS_RESET1__RESOURCE35_RESET__SHIFT 0x3
++#define GDS_GWS_RESET1__RESOURCE36_RESET__SHIFT 0x4
++#define GDS_GWS_RESET1__RESOURCE37_RESET__SHIFT 0x5
++#define GDS_GWS_RESET1__RESOURCE38_RESET__SHIFT 0x6
++#define GDS_GWS_RESET1__RESOURCE39_RESET__SHIFT 0x7
++#define GDS_GWS_RESET1__RESOURCE40_RESET__SHIFT 0x8
++#define GDS_GWS_RESET1__RESOURCE41_RESET__SHIFT 0x9
++#define GDS_GWS_RESET1__RESOURCE42_RESET__SHIFT 0xa
++#define GDS_GWS_RESET1__RESOURCE43_RESET__SHIFT 0xb
++#define GDS_GWS_RESET1__RESOURCE44_RESET__SHIFT 0xc
++#define GDS_GWS_RESET1__RESOURCE45_RESET__SHIFT 0xd
++#define GDS_GWS_RESET1__RESOURCE46_RESET__SHIFT 0xe
++#define GDS_GWS_RESET1__RESOURCE47_RESET__SHIFT 0xf
++#define GDS_GWS_RESET1__RESOURCE48_RESET__SHIFT 0x10
++#define GDS_GWS_RESET1__RESOURCE49_RESET__SHIFT 0x11
++#define GDS_GWS_RESET1__RESOURCE50_RESET__SHIFT 0x12
++#define GDS_GWS_RESET1__RESOURCE51_RESET__SHIFT 0x13
++#define GDS_GWS_RESET1__RESOURCE52_RESET__SHIFT 0x14
++#define GDS_GWS_RESET1__RESOURCE53_RESET__SHIFT 0x15
++#define GDS_GWS_RESET1__RESOURCE54_RESET__SHIFT 0x16
++#define GDS_GWS_RESET1__RESOURCE55_RESET__SHIFT 0x17
++#define GDS_GWS_RESET1__RESOURCE56_RESET__SHIFT 0x18
++#define GDS_GWS_RESET1__RESOURCE57_RESET__SHIFT 0x19
++#define GDS_GWS_RESET1__RESOURCE58_RESET__SHIFT 0x1a
++#define GDS_GWS_RESET1__RESOURCE59_RESET__SHIFT 0x1b
++#define GDS_GWS_RESET1__RESOURCE60_RESET__SHIFT 0x1c
++#define GDS_GWS_RESET1__RESOURCE61_RESET__SHIFT 0x1d
++#define GDS_GWS_RESET1__RESOURCE62_RESET__SHIFT 0x1e
++#define GDS_GWS_RESET1__RESOURCE63_RESET__SHIFT 0x1f
++#define GDS_GWS_RESET1__RESOURCE32_RESET_MASK 0x00000001L
++#define GDS_GWS_RESET1__RESOURCE33_RESET_MASK 0x00000002L
++#define GDS_GWS_RESET1__RESOURCE34_RESET_MASK 0x00000004L
++#define GDS_GWS_RESET1__RESOURCE35_RESET_MASK 0x00000008L
++#define GDS_GWS_RESET1__RESOURCE36_RESET_MASK 0x00000010L
++#define GDS_GWS_RESET1__RESOURCE37_RESET_MASK 0x00000020L
++#define GDS_GWS_RESET1__RESOURCE38_RESET_MASK 0x00000040L
++#define GDS_GWS_RESET1__RESOURCE39_RESET_MASK 0x00000080L
++#define GDS_GWS_RESET1__RESOURCE40_RESET_MASK 0x00000100L
++#define GDS_GWS_RESET1__RESOURCE41_RESET_MASK 0x00000200L
++#define GDS_GWS_RESET1__RESOURCE42_RESET_MASK 0x00000400L
++#define GDS_GWS_RESET1__RESOURCE43_RESET_MASK 0x00000800L
++#define GDS_GWS_RESET1__RESOURCE44_RESET_MASK 0x00001000L
++#define GDS_GWS_RESET1__RESOURCE45_RESET_MASK 0x00002000L
++#define GDS_GWS_RESET1__RESOURCE46_RESET_MASK 0x00004000L
++#define GDS_GWS_RESET1__RESOURCE47_RESET_MASK 0x00008000L
++#define GDS_GWS_RESET1__RESOURCE48_RESET_MASK 0x00010000L
++#define GDS_GWS_RESET1__RESOURCE49_RESET_MASK 0x00020000L
++#define GDS_GWS_RESET1__RESOURCE50_RESET_MASK 0x00040000L
++#define GDS_GWS_RESET1__RESOURCE51_RESET_MASK 0x00080000L
++#define GDS_GWS_RESET1__RESOURCE52_RESET_MASK 0x00100000L
++#define GDS_GWS_RESET1__RESOURCE53_RESET_MASK 0x00200000L
++#define GDS_GWS_RESET1__RESOURCE54_RESET_MASK 0x00400000L
++#define GDS_GWS_RESET1__RESOURCE55_RESET_MASK 0x00800000L
++#define GDS_GWS_RESET1__RESOURCE56_RESET_MASK 0x01000000L
++#define GDS_GWS_RESET1__RESOURCE57_RESET_MASK 0x02000000L
++#define GDS_GWS_RESET1__RESOURCE58_RESET_MASK 0x04000000L
++#define GDS_GWS_RESET1__RESOURCE59_RESET_MASK 0x08000000L
++#define GDS_GWS_RESET1__RESOURCE60_RESET_MASK 0x10000000L
++#define GDS_GWS_RESET1__RESOURCE61_RESET_MASK 0x20000000L
++#define GDS_GWS_RESET1__RESOURCE62_RESET_MASK 0x40000000L
++#define GDS_GWS_RESET1__RESOURCE63_RESET_MASK 0x80000000L
++//GDS_GWS_RESOURCE_RESET
++#define GDS_GWS_RESOURCE_RESET__RESET__SHIFT 0x0
++#define GDS_GWS_RESOURCE_RESET__RESOURCE_ID__SHIFT 0x8
++#define GDS_GWS_RESOURCE_RESET__RESET_MASK 0x00000001L
++#define GDS_GWS_RESOURCE_RESET__RESOURCE_ID_MASK 0x0000FF00L
++//GDS_COMPUTE_MAX_WAVE_ID
++#define GDS_COMPUTE_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
++#define GDS_COMPUTE_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000FFFL
++//GDS_OA_RESET_MASK
++#define GDS_OA_RESET_MASK__ME0_GFXHP3D_PIX_RESET__SHIFT 0x0
++#define GDS_OA_RESET_MASK__ME0_GFXHP3D_VTX_RESET__SHIFT 0x1
++#define GDS_OA_RESET_MASK__ME0_CS_RESET__SHIFT 0x2
++#define GDS_OA_RESET_MASK__ME0_GFXHP3D_GS_RESET__SHIFT 0x3
++#define GDS_OA_RESET_MASK__ME1_PIPE0_RESET__SHIFT 0x4
++#define GDS_OA_RESET_MASK__ME1_PIPE1_RESET__SHIFT 0x5
++#define GDS_OA_RESET_MASK__ME1_PIPE2_RESET__SHIFT 0x6
++#define GDS_OA_RESET_MASK__ME1_PIPE3_RESET__SHIFT 0x7
++#define GDS_OA_RESET_MASK__ME2_PIPE0_RESET__SHIFT 0x8
++#define GDS_OA_RESET_MASK__ME2_PIPE1_RESET__SHIFT 0x9
++#define GDS_OA_RESET_MASK__ME2_PIPE2_RESET__SHIFT 0xa
++#define GDS_OA_RESET_MASK__ME2_PIPE3_RESET__SHIFT 0xb
++#define GDS_OA_RESET_MASK__UNUSED1__SHIFT 0xc
++#define GDS_OA_RESET_MASK__ME0_GFXHP3D_PIX_RESET_MASK 0x00000001L
++#define GDS_OA_RESET_MASK__ME0_GFXHP3D_VTX_RESET_MASK 0x00000002L
++#define GDS_OA_RESET_MASK__ME0_CS_RESET_MASK 0x00000004L
++#define GDS_OA_RESET_MASK__ME0_GFXHP3D_GS_RESET_MASK 0x00000008L
++#define GDS_OA_RESET_MASK__ME1_PIPE0_RESET_MASK 0x00000010L
++#define GDS_OA_RESET_MASK__ME1_PIPE1_RESET_MASK 0x00000020L
++#define GDS_OA_RESET_MASK__ME1_PIPE2_RESET_MASK 0x00000040L
++#define GDS_OA_RESET_MASK__ME1_PIPE3_RESET_MASK 0x00000080L
++#define GDS_OA_RESET_MASK__ME2_PIPE0_RESET_MASK 0x00000100L
++#define GDS_OA_RESET_MASK__ME2_PIPE1_RESET_MASK 0x00000200L
++#define GDS_OA_RESET_MASK__ME2_PIPE2_RESET_MASK 0x00000400L
++#define GDS_OA_RESET_MASK__ME2_PIPE3_RESET_MASK 0x00000800L
++#define GDS_OA_RESET_MASK__UNUSED1_MASK 0xFFFFF000L
++//GDS_OA_RESET
++#define GDS_OA_RESET__RESET__SHIFT 0x0
++#define GDS_OA_RESET__PIPE_ID__SHIFT 0x8
++#define GDS_OA_RESET__RESET_MASK 0x00000001L
++#define GDS_OA_RESET__PIPE_ID_MASK 0x0000FF00L
++//GDS_ENHANCE
++#define GDS_ENHANCE__MISC__SHIFT 0x0
++#define GDS_ENHANCE__AUTO_INC_INDEX__SHIFT 0x10
++#define GDS_ENHANCE__CGPG_RESTORE__SHIFT 0x11
++#define GDS_ENHANCE__RD_BUF_TAG_MISS__SHIFT 0x12
++#define GDS_ENHANCE__GDSA_PC_CGTS_DIS__SHIFT 0x13
++#define GDS_ENHANCE__GDSO_PC_CGTS_DIS__SHIFT 0x14
++#define GDS_ENHANCE__WD_GDS_CSB_OVERRIDE__SHIFT 0x15
++#define GDS_ENHANCE__UNUSED__SHIFT 0x16
++#define GDS_ENHANCE__MISC_MASK 0x0000FFFFL
++#define GDS_ENHANCE__AUTO_INC_INDEX_MASK 0x00010000L
++#define GDS_ENHANCE__CGPG_RESTORE_MASK 0x00020000L
++#define GDS_ENHANCE__RD_BUF_TAG_MISS_MASK 0x00040000L
++#define GDS_ENHANCE__GDSA_PC_CGTS_DIS_MASK 0x00080000L
++#define GDS_ENHANCE__GDSO_PC_CGTS_DIS_MASK 0x00100000L
++#define GDS_ENHANCE__WD_GDS_CSB_OVERRIDE_MASK 0x00200000L
++#define GDS_ENHANCE__UNUSED_MASK 0xFFC00000L
++//GDS_OA_CGPG_RESTORE
++#define GDS_OA_CGPG_RESTORE__VMID__SHIFT 0x0
++#define GDS_OA_CGPG_RESTORE__MEID__SHIFT 0x8
++#define GDS_OA_CGPG_RESTORE__PIPEID__SHIFT 0xc
++#define GDS_OA_CGPG_RESTORE__QUEUEID__SHIFT 0x10
++#define GDS_OA_CGPG_RESTORE__UNUSED__SHIFT 0x14
++#define GDS_OA_CGPG_RESTORE__VMID_MASK 0x000000FFL
++#define GDS_OA_CGPG_RESTORE__MEID_MASK 0x00000F00L
++#define GDS_OA_CGPG_RESTORE__PIPEID_MASK 0x0000F000L
++#define GDS_OA_CGPG_RESTORE__QUEUEID_MASK 0x000F0000L
++#define GDS_OA_CGPG_RESTORE__UNUSED_MASK 0xFFF00000L
++//GDS_CS_CTXSW_STATUS
++#define GDS_CS_CTXSW_STATUS__R__SHIFT 0x0
++#define GDS_CS_CTXSW_STATUS__W__SHIFT 0x1
++#define GDS_CS_CTXSW_STATUS__UNUSED__SHIFT 0x2
++#define GDS_CS_CTXSW_STATUS__R_MASK 0x00000001L
++#define GDS_CS_CTXSW_STATUS__W_MASK 0x00000002L
++#define GDS_CS_CTXSW_STATUS__UNUSED_MASK 0xFFFFFFFCL
++//GDS_CS_CTXSW_CNT0
++#define GDS_CS_CTXSW_CNT0__UPDN__SHIFT 0x0
++#define GDS_CS_CTXSW_CNT0__PTR__SHIFT 0x10
++#define GDS_CS_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
++#define GDS_CS_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
++//GDS_CS_CTXSW_CNT1
++#define GDS_CS_CTXSW_CNT1__UPDN__SHIFT 0x0
++#define GDS_CS_CTXSW_CNT1__PTR__SHIFT 0x10
++#define GDS_CS_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
++#define GDS_CS_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
++//GDS_CS_CTXSW_CNT2
++#define GDS_CS_CTXSW_CNT2__UPDN__SHIFT 0x0
++#define GDS_CS_CTXSW_CNT2__PTR__SHIFT 0x10
++#define GDS_CS_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
++#define GDS_CS_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
++//GDS_CS_CTXSW_CNT3
++#define GDS_CS_CTXSW_CNT3__UPDN__SHIFT 0x0
++#define GDS_CS_CTXSW_CNT3__PTR__SHIFT 0x10
++#define GDS_CS_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
++#define GDS_CS_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
++//GDS_GFX_CTXSW_STATUS
++#define GDS_GFX_CTXSW_STATUS__R__SHIFT 0x0
++#define GDS_GFX_CTXSW_STATUS__W__SHIFT 0x1
++#define GDS_GFX_CTXSW_STATUS__UNUSED__SHIFT 0x2
++#define GDS_GFX_CTXSW_STATUS__R_MASK 0x00000001L
++#define GDS_GFX_CTXSW_STATUS__W_MASK 0x00000002L
++#define GDS_GFX_CTXSW_STATUS__UNUSED_MASK 0xFFFFFFFCL
++//GDS_VS_CTXSW_CNT0
++#define GDS_VS_CTXSW_CNT0__UPDN__SHIFT 0x0
++#define GDS_VS_CTXSW_CNT0__PTR__SHIFT 0x10
++#define GDS_VS_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
++#define GDS_VS_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
++//GDS_VS_CTXSW_CNT1
++#define GDS_VS_CTXSW_CNT1__UPDN__SHIFT 0x0
++#define GDS_VS_CTXSW_CNT1__PTR__SHIFT 0x10
++#define GDS_VS_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
++#define GDS_VS_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
++//GDS_VS_CTXSW_CNT2
++#define GDS_VS_CTXSW_CNT2__UPDN__SHIFT 0x0
++#define GDS_VS_CTXSW_CNT2__PTR__SHIFT 0x10
++#define GDS_VS_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
++#define GDS_VS_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
++//GDS_VS_CTXSW_CNT3
++#define GDS_VS_CTXSW_CNT3__UPDN__SHIFT 0x0
++#define GDS_VS_CTXSW_CNT3__PTR__SHIFT 0x10
++#define GDS_VS_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
++#define GDS_VS_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
++//GDS_PS0_CTXSW_CNT0
++#define GDS_PS0_CTXSW_CNT0__UPDN__SHIFT 0x0
++#define GDS_PS0_CTXSW_CNT0__PTR__SHIFT 0x10
++#define GDS_PS0_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
++#define GDS_PS0_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
++//GDS_PS0_CTXSW_CNT1
++#define GDS_PS0_CTXSW_CNT1__UPDN__SHIFT 0x0
++#define GDS_PS0_CTXSW_CNT1__PTR__SHIFT 0x10
++#define GDS_PS0_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
++#define GDS_PS0_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
++//GDS_PS0_CTXSW_CNT2
++#define GDS_PS0_CTXSW_CNT2__UPDN__SHIFT 0x0
++#define GDS_PS0_CTXSW_CNT2__PTR__SHIFT 0x10
++#define GDS_PS0_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
++#define GDS_PS0_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
++//GDS_PS0_CTXSW_CNT3
++#define GDS_PS0_CTXSW_CNT3__UPDN__SHIFT 0x0
++#define GDS_PS0_CTXSW_CNT3__PTR__SHIFT 0x10
++#define GDS_PS0_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
++#define GDS_PS0_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
++//GDS_PS1_CTXSW_CNT0
++#define GDS_PS1_CTXSW_CNT0__UPDN__SHIFT 0x0
++#define GDS_PS1_CTXSW_CNT0__PTR__SHIFT 0x10
++#define GDS_PS1_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
++#define GDS_PS1_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
++//GDS_PS1_CTXSW_CNT1
++#define GDS_PS1_CTXSW_CNT1__UPDN__SHIFT 0x0
++#define GDS_PS1_CTXSW_CNT1__PTR__SHIFT 0x10
++#define GDS_PS1_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
++#define GDS_PS1_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
++//GDS_PS1_CTXSW_CNT2
++#define GDS_PS1_CTXSW_CNT2__UPDN__SHIFT 0x0
++#define GDS_PS1_CTXSW_CNT2__PTR__SHIFT 0x10
++#define GDS_PS1_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
++#define GDS_PS1_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
++//GDS_PS1_CTXSW_CNT3
++#define GDS_PS1_CTXSW_CNT3__UPDN__SHIFT 0x0
++#define GDS_PS1_CTXSW_CNT3__PTR__SHIFT 0x10
++#define GDS_PS1_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
++#define GDS_PS1_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
++//GDS_PS2_CTXSW_CNT0
++#define GDS_PS2_CTXSW_CNT0__UPDN__SHIFT 0x0
++#define GDS_PS2_CTXSW_CNT0__PTR__SHIFT 0x10
++#define GDS_PS2_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
++#define GDS_PS2_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
++//GDS_PS2_CTXSW_CNT1
++#define GDS_PS2_CTXSW_CNT1__UPDN__SHIFT 0x0
++#define GDS_PS2_CTXSW_CNT1__PTR__SHIFT 0x10
++#define GDS_PS2_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
++#define GDS_PS2_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
++//GDS_PS2_CTXSW_CNT2
++#define GDS_PS2_CTXSW_CNT2__UPDN__SHIFT 0x0
++#define GDS_PS2_CTXSW_CNT2__PTR__SHIFT 0x10
++#define GDS_PS2_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
++#define GDS_PS2_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
++//GDS_PS2_CTXSW_CNT3
++#define GDS_PS2_CTXSW_CNT3__UPDN__SHIFT 0x0
++#define GDS_PS2_CTXSW_CNT3__PTR__SHIFT 0x10
++#define GDS_PS2_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
++#define GDS_PS2_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
++//GDS_PS3_CTXSW_CNT0
++#define GDS_PS3_CTXSW_CNT0__UPDN__SHIFT 0x0
++#define GDS_PS3_CTXSW_CNT0__PTR__SHIFT 0x10
++#define GDS_PS3_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
++#define GDS_PS3_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
++//GDS_PS3_CTXSW_CNT1
++#define GDS_PS3_CTXSW_CNT1__UPDN__SHIFT 0x0
++#define GDS_PS3_CTXSW_CNT1__PTR__SHIFT 0x10
++#define GDS_PS3_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
++#define GDS_PS3_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
++//GDS_PS3_CTXSW_CNT2
++#define GDS_PS3_CTXSW_CNT2__UPDN__SHIFT 0x0
++#define GDS_PS3_CTXSW_CNT2__PTR__SHIFT 0x10
++#define GDS_PS3_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
++#define GDS_PS3_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
++//GDS_PS3_CTXSW_CNT3
++#define GDS_PS3_CTXSW_CNT3__UPDN__SHIFT 0x0
++#define GDS_PS3_CTXSW_CNT3__PTR__SHIFT 0x10
++#define GDS_PS3_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
++#define GDS_PS3_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
++//GDS_PS4_CTXSW_CNT0
++#define GDS_PS4_CTXSW_CNT0__UPDN__SHIFT 0x0
++#define GDS_PS4_CTXSW_CNT0__PTR__SHIFT 0x10
++#define GDS_PS4_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
++#define GDS_PS4_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
++//GDS_PS4_CTXSW_CNT1
++#define GDS_PS4_CTXSW_CNT1__UPDN__SHIFT 0x0
++#define GDS_PS4_CTXSW_CNT1__PTR__SHIFT 0x10
++#define GDS_PS4_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
++#define GDS_PS4_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
++//GDS_PS4_CTXSW_CNT2
++#define GDS_PS4_CTXSW_CNT2__UPDN__SHIFT 0x0
++#define GDS_PS4_CTXSW_CNT2__PTR__SHIFT 0x10
++#define GDS_PS4_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
++#define GDS_PS4_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
++//GDS_PS4_CTXSW_CNT3
++#define GDS_PS4_CTXSW_CNT3__UPDN__SHIFT 0x0
++#define GDS_PS4_CTXSW_CNT3__PTR__SHIFT 0x10
++#define GDS_PS4_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
++#define GDS_PS4_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
++//GDS_PS5_CTXSW_CNT0
++#define GDS_PS5_CTXSW_CNT0__UPDN__SHIFT 0x0
++#define GDS_PS5_CTXSW_CNT0__PTR__SHIFT 0x10
++#define GDS_PS5_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
++#define GDS_PS5_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
++//GDS_PS5_CTXSW_CNT1
++#define GDS_PS5_CTXSW_CNT1__UPDN__SHIFT 0x0
++#define GDS_PS5_CTXSW_CNT1__PTR__SHIFT 0x10
++#define GDS_PS5_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
++#define GDS_PS5_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
++//GDS_PS5_CTXSW_CNT2
++#define GDS_PS5_CTXSW_CNT2__UPDN__SHIFT 0x0
++#define GDS_PS5_CTXSW_CNT2__PTR__SHIFT 0x10
++#define GDS_PS5_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
++#define GDS_PS5_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
++//GDS_PS5_CTXSW_CNT3
++#define GDS_PS5_CTXSW_CNT3__UPDN__SHIFT 0x0
++#define GDS_PS5_CTXSW_CNT3__PTR__SHIFT 0x10
++#define GDS_PS5_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
++#define GDS_PS5_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
++//GDS_PS6_CTXSW_CNT0
++#define GDS_PS6_CTXSW_CNT0__UPDN__SHIFT 0x0
++#define GDS_PS6_CTXSW_CNT0__PTR__SHIFT 0x10
++#define GDS_PS6_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
++#define GDS_PS6_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
++//GDS_PS6_CTXSW_CNT1
++#define GDS_PS6_CTXSW_CNT1__UPDN__SHIFT 0x0
++#define GDS_PS6_CTXSW_CNT1__PTR__SHIFT 0x10
++#define GDS_PS6_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
++#define GDS_PS6_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
++//GDS_PS6_CTXSW_CNT2
++#define GDS_PS6_CTXSW_CNT2__UPDN__SHIFT 0x0
++#define GDS_PS6_CTXSW_CNT2__PTR__SHIFT 0x10
++#define GDS_PS6_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
++#define GDS_PS6_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
++//GDS_PS6_CTXSW_CNT3
++#define GDS_PS6_CTXSW_CNT3__UPDN__SHIFT 0x0
++#define GDS_PS6_CTXSW_CNT3__PTR__SHIFT 0x10
++#define GDS_PS6_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
++#define GDS_PS6_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
++//GDS_PS7_CTXSW_CNT0
++#define GDS_PS7_CTXSW_CNT0__UPDN__SHIFT 0x0
++#define GDS_PS7_CTXSW_CNT0__PTR__SHIFT 0x10
++#define GDS_PS7_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
++#define GDS_PS7_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
++//GDS_PS7_CTXSW_CNT1
++#define GDS_PS7_CTXSW_CNT1__UPDN__SHIFT 0x0
++#define GDS_PS7_CTXSW_CNT1__PTR__SHIFT 0x10
++#define GDS_PS7_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
++#define GDS_PS7_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
++//GDS_PS7_CTXSW_CNT2
++#define GDS_PS7_CTXSW_CNT2__UPDN__SHIFT 0x0
++#define GDS_PS7_CTXSW_CNT2__PTR__SHIFT 0x10
++#define GDS_PS7_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
++#define GDS_PS7_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
++//GDS_PS7_CTXSW_CNT3
++#define GDS_PS7_CTXSW_CNT3__UPDN__SHIFT 0x0
++#define GDS_PS7_CTXSW_CNT3__PTR__SHIFT 0x10
++#define GDS_PS7_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
++#define GDS_PS7_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
++//GDS_GS_CTXSW_CNT0
++#define GDS_GS_CTXSW_CNT0__UPDN__SHIFT 0x0
++#define GDS_GS_CTXSW_CNT0__PTR__SHIFT 0x10
++#define GDS_GS_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
++#define GDS_GS_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
++//GDS_GS_CTXSW_CNT1
++#define GDS_GS_CTXSW_CNT1__UPDN__SHIFT 0x0
++#define GDS_GS_CTXSW_CNT1__PTR__SHIFT 0x10
++#define GDS_GS_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
++#define GDS_GS_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
++//GDS_GS_CTXSW_CNT2
++#define GDS_GS_CTXSW_CNT2__UPDN__SHIFT 0x0
++#define GDS_GS_CTXSW_CNT2__PTR__SHIFT 0x10
++#define GDS_GS_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
++#define GDS_GS_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
++//GDS_GS_CTXSW_CNT3
++#define GDS_GS_CTXSW_CNT3__UPDN__SHIFT 0x0
++#define GDS_GS_CTXSW_CNT3__PTR__SHIFT 0x10
++#define GDS_GS_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
++#define GDS_GS_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
++
++
++// addressBlock: gc_rasdec
++//RAS_SIGNATURE_CONTROL
++#define RAS_SIGNATURE_CONTROL__ENABLE__SHIFT 0x0
++#define RAS_SIGNATURE_CONTROL__ENABLE_MASK 0x00000001L
++//RAS_SIGNATURE_MASK
++#define RAS_SIGNATURE_MASK__INPUT_BUS_MASK__SHIFT 0x0
++#define RAS_SIGNATURE_MASK__INPUT_BUS_MASK_MASK 0xFFFFFFFFL
++//RAS_SX_SIGNATURE0
++#define RAS_SX_SIGNATURE0__SIGNATURE__SHIFT 0x0
++#define RAS_SX_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
++//RAS_SX_SIGNATURE1
++#define RAS_SX_SIGNATURE1__SIGNATURE__SHIFT 0x0
++#define RAS_SX_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
++//RAS_SX_SIGNATURE2
++#define RAS_SX_SIGNATURE2__SIGNATURE__SHIFT 0x0
++#define RAS_SX_SIGNATURE2__SIGNATURE_MASK 0xFFFFFFFFL
++//RAS_SX_SIGNATURE3
++#define RAS_SX_SIGNATURE3__SIGNATURE__SHIFT 0x0
++#define RAS_SX_SIGNATURE3__SIGNATURE_MASK 0xFFFFFFFFL
++//RAS_DB_SIGNATURE0
++#define RAS_DB_SIGNATURE0__SIGNATURE__SHIFT 0x0
++#define RAS_DB_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
++//RAS_PA_SIGNATURE0
++#define RAS_PA_SIGNATURE0__SIGNATURE__SHIFT 0x0
++#define RAS_PA_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
++//RAS_VGT_SIGNATURE0
++#define RAS_VGT_SIGNATURE0__SIGNATURE__SHIFT 0x0
++#define RAS_VGT_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
++//RAS_SQ_SIGNATURE0
++#define RAS_SQ_SIGNATURE0__SIGNATURE__SHIFT 0x0
++#define RAS_SQ_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
++//RAS_SC_SIGNATURE0
++#define RAS_SC_SIGNATURE0__SIGNATURE__SHIFT 0x0
++#define RAS_SC_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
++//RAS_SC_SIGNATURE1
++#define RAS_SC_SIGNATURE1__SIGNATURE__SHIFT 0x0
++#define RAS_SC_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
++//RAS_SC_SIGNATURE2
++#define RAS_SC_SIGNATURE2__SIGNATURE__SHIFT 0x0
++#define RAS_SC_SIGNATURE2__SIGNATURE_MASK 0xFFFFFFFFL
++//RAS_SC_SIGNATURE3
++#define RAS_SC_SIGNATURE3__SIGNATURE__SHIFT 0x0
++#define RAS_SC_SIGNATURE3__SIGNATURE_MASK 0xFFFFFFFFL
++//RAS_SC_SIGNATURE4
++#define RAS_SC_SIGNATURE4__SIGNATURE__SHIFT 0x0
++#define RAS_SC_SIGNATURE4__SIGNATURE_MASK 0xFFFFFFFFL
++//RAS_SC_SIGNATURE5
++#define RAS_SC_SIGNATURE5__SIGNATURE__SHIFT 0x0
++#define RAS_SC_SIGNATURE5__SIGNATURE_MASK 0xFFFFFFFFL
++//RAS_SC_SIGNATURE6
++#define RAS_SC_SIGNATURE6__SIGNATURE__SHIFT 0x0
++#define RAS_SC_SIGNATURE6__SIGNATURE_MASK 0xFFFFFFFFL
++//RAS_SC_SIGNATURE7
++#define RAS_SC_SIGNATURE7__SIGNATURE__SHIFT 0x0
++#define RAS_SC_SIGNATURE7__SIGNATURE_MASK 0xFFFFFFFFL
++//RAS_IA_SIGNATURE0
++#define RAS_IA_SIGNATURE0__SIGNATURE__SHIFT 0x0
++#define RAS_IA_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
++//RAS_IA_SIGNATURE1
++#define RAS_IA_SIGNATURE1__SIGNATURE__SHIFT 0x0
++#define RAS_IA_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
++//RAS_SPI_SIGNATURE0
++#define RAS_SPI_SIGNATURE0__SIGNATURE__SHIFT 0x0
++#define RAS_SPI_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
++//RAS_SPI_SIGNATURE1
++#define RAS_SPI_SIGNATURE1__SIGNATURE__SHIFT 0x0
++#define RAS_SPI_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
++//RAS_TA_SIGNATURE0
++#define RAS_TA_SIGNATURE0__SIGNATURE__SHIFT 0x0
++#define RAS_TA_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
++//RAS_TD_SIGNATURE0
++#define RAS_TD_SIGNATURE0__SIGNATURE__SHIFT 0x0
++#define RAS_TD_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
++//RAS_CB_SIGNATURE0
++#define RAS_CB_SIGNATURE0__SIGNATURE__SHIFT 0x0
++#define RAS_CB_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
++//RAS_BCI_SIGNATURE0
++#define RAS_BCI_SIGNATURE0__SIGNATURE__SHIFT 0x0
++#define RAS_BCI_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
++//RAS_BCI_SIGNATURE1
++#define RAS_BCI_SIGNATURE1__SIGNATURE__SHIFT 0x0
++#define RAS_BCI_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
++//RAS_TA_SIGNATURE1
++#define RAS_TA_SIGNATURE1__SIGNATURE__SHIFT 0x0
++#define RAS_TA_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
++
++
++// addressBlock: gc_gfxdec0
++//DB_RENDER_CONTROL
++#define DB_RENDER_CONTROL__DEPTH_CLEAR_ENABLE__SHIFT 0x0
++#define DB_RENDER_CONTROL__STENCIL_CLEAR_ENABLE__SHIFT 0x1
++#define DB_RENDER_CONTROL__DEPTH_COPY__SHIFT 0x2
++#define DB_RENDER_CONTROL__STENCIL_COPY__SHIFT 0x3
++#define DB_RENDER_CONTROL__RESUMMARIZE_ENABLE__SHIFT 0x4
++#define DB_RENDER_CONTROL__STENCIL_COMPRESS_DISABLE__SHIFT 0x5
++#define DB_RENDER_CONTROL__DEPTH_COMPRESS_DISABLE__SHIFT 0x6
++#define DB_RENDER_CONTROL__COPY_CENTROID__SHIFT 0x7
++#define DB_RENDER_CONTROL__COPY_SAMPLE__SHIFT 0x8
++#define DB_RENDER_CONTROL__DECOMPRESS_ENABLE__SHIFT 0xc
++#define DB_RENDER_CONTROL__DEPTH_CLEAR_ENABLE_MASK 0x00000001L
++#define DB_RENDER_CONTROL__STENCIL_CLEAR_ENABLE_MASK 0x00000002L
++#define DB_RENDER_CONTROL__DEPTH_COPY_MASK 0x00000004L
++#define DB_RENDER_CONTROL__STENCIL_COPY_MASK 0x00000008L
++#define DB_RENDER_CONTROL__RESUMMARIZE_ENABLE_MASK 0x00000010L
++#define DB_RENDER_CONTROL__STENCIL_COMPRESS_DISABLE_MASK 0x00000020L
++#define DB_RENDER_CONTROL__DEPTH_COMPRESS_DISABLE_MASK 0x00000040L
++#define DB_RENDER_CONTROL__COPY_CENTROID_MASK 0x00000080L
++#define DB_RENDER_CONTROL__COPY_SAMPLE_MASK 0x00000F00L
++#define DB_RENDER_CONTROL__DECOMPRESS_ENABLE_MASK 0x00001000L
++//DB_COUNT_CONTROL
++#define DB_COUNT_CONTROL__ZPASS_INCREMENT_DISABLE__SHIFT 0x0
++#define DB_COUNT_CONTROL__PERFECT_ZPASS_COUNTS__SHIFT 0x1
++#define DB_COUNT_CONTROL__SAMPLE_RATE__SHIFT 0x4
++#define DB_COUNT_CONTROL__ZPASS_ENABLE__SHIFT 0x8
++#define DB_COUNT_CONTROL__ZFAIL_ENABLE__SHIFT 0xc
++#define DB_COUNT_CONTROL__SFAIL_ENABLE__SHIFT 0x10
++#define DB_COUNT_CONTROL__DBFAIL_ENABLE__SHIFT 0x14
++#define DB_COUNT_CONTROL__SLICE_EVEN_ENABLE__SHIFT 0x18
++#define DB_COUNT_CONTROL__SLICE_ODD_ENABLE__SHIFT 0x1c
++#define DB_COUNT_CONTROL__ZPASS_INCREMENT_DISABLE_MASK 0x00000001L
++#define DB_COUNT_CONTROL__PERFECT_ZPASS_COUNTS_MASK 0x00000002L
++#define DB_COUNT_CONTROL__SAMPLE_RATE_MASK 0x00000070L
++#define DB_COUNT_CONTROL__ZPASS_ENABLE_MASK 0x00000F00L
++#define DB_COUNT_CONTROL__ZFAIL_ENABLE_MASK 0x0000F000L
++#define DB_COUNT_CONTROL__SFAIL_ENABLE_MASK 0x000F0000L
++#define DB_COUNT_CONTROL__DBFAIL_ENABLE_MASK 0x00F00000L
++#define DB_COUNT_CONTROL__SLICE_EVEN_ENABLE_MASK 0x0F000000L
++#define DB_COUNT_CONTROL__SLICE_ODD_ENABLE_MASK 0xF0000000L
++//DB_DEPTH_VIEW
++#define DB_DEPTH_VIEW__SLICE_START__SHIFT 0x0
++#define DB_DEPTH_VIEW__SLICE_MAX__SHIFT 0xd
++#define DB_DEPTH_VIEW__Z_READ_ONLY__SHIFT 0x18
++#define DB_DEPTH_VIEW__STENCIL_READ_ONLY__SHIFT 0x19
++#define DB_DEPTH_VIEW__MIPID__SHIFT 0x1a
++#define DB_DEPTH_VIEW__SLICE_START_MASK 0x000007FFL
++#define DB_DEPTH_VIEW__SLICE_MAX_MASK 0x00FFE000L
++#define DB_DEPTH_VIEW__Z_READ_ONLY_MASK 0x01000000L
++#define DB_DEPTH_VIEW__STENCIL_READ_ONLY_MASK 0x02000000L
++#define DB_DEPTH_VIEW__MIPID_MASK 0x3C000000L
++//DB_RENDER_OVERRIDE
++#define DB_RENDER_OVERRIDE__FORCE_HIZ_ENABLE__SHIFT 0x0
++#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE0__SHIFT 0x2
++#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE1__SHIFT 0x4
++#define DB_RENDER_OVERRIDE__FORCE_SHADER_Z_ORDER__SHIFT 0x6
++#define DB_RENDER_OVERRIDE__FAST_Z_DISABLE__SHIFT 0x7
++#define DB_RENDER_OVERRIDE__FAST_STENCIL_DISABLE__SHIFT 0x8
++#define DB_RENDER_OVERRIDE__NOOP_CULL_DISABLE__SHIFT 0x9
++#define DB_RENDER_OVERRIDE__FORCE_COLOR_KILL__SHIFT 0xa
++#define DB_RENDER_OVERRIDE__FORCE_Z_READ__SHIFT 0xb
++#define DB_RENDER_OVERRIDE__FORCE_STENCIL_READ__SHIFT 0xc
++#define DB_RENDER_OVERRIDE__FORCE_FULL_Z_RANGE__SHIFT 0xd
++#define DB_RENDER_OVERRIDE__FORCE_QC_SMASK_CONFLICT__SHIFT 0xf
++#define DB_RENDER_OVERRIDE__DISABLE_VIEWPORT_CLAMP__SHIFT 0x10
++#define DB_RENDER_OVERRIDE__IGNORE_SC_ZRANGE__SHIFT 0x11
++#define DB_RENDER_OVERRIDE__DISABLE_FULLY_COVERED__SHIFT 0x12
++#define DB_RENDER_OVERRIDE__FORCE_Z_LIMIT_SUMM__SHIFT 0x13
++#define DB_RENDER_OVERRIDE__MAX_TILES_IN_DTT__SHIFT 0x15
++#define DB_RENDER_OVERRIDE__DISABLE_TILE_RATE_TILES__SHIFT 0x1a
++#define DB_RENDER_OVERRIDE__FORCE_Z_DIRTY__SHIFT 0x1b
++#define DB_RENDER_OVERRIDE__FORCE_STENCIL_DIRTY__SHIFT 0x1c
++#define DB_RENDER_OVERRIDE__FORCE_Z_VALID__SHIFT 0x1d
++#define DB_RENDER_OVERRIDE__FORCE_STENCIL_VALID__SHIFT 0x1e
++#define DB_RENDER_OVERRIDE__PRESERVE_COMPRESSION__SHIFT 0x1f
++#define DB_RENDER_OVERRIDE__FORCE_HIZ_ENABLE_MASK 0x00000003L
++#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE0_MASK 0x0000000CL
++#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE1_MASK 0x00000030L
++#define DB_RENDER_OVERRIDE__FORCE_SHADER_Z_ORDER_MASK 0x00000040L
++#define DB_RENDER_OVERRIDE__FAST_Z_DISABLE_MASK 0x00000080L
++#define DB_RENDER_OVERRIDE__FAST_STENCIL_DISABLE_MASK 0x00000100L
++#define DB_RENDER_OVERRIDE__NOOP_CULL_DISABLE_MASK 0x00000200L
++#define DB_RENDER_OVERRIDE__FORCE_COLOR_KILL_MASK 0x00000400L
++#define DB_RENDER_OVERRIDE__FORCE_Z_READ_MASK 0x00000800L
++#define DB_RENDER_OVERRIDE__FORCE_STENCIL_READ_MASK 0x00001000L
++#define DB_RENDER_OVERRIDE__FORCE_FULL_Z_RANGE_MASK 0x00006000L
++#define DB_RENDER_OVERRIDE__FORCE_QC_SMASK_CONFLICT_MASK 0x00008000L
++#define DB_RENDER_OVERRIDE__DISABLE_VIEWPORT_CLAMP_MASK 0x00010000L
++#define DB_RENDER_OVERRIDE__IGNORE_SC_ZRANGE_MASK 0x00020000L
++#define DB_RENDER_OVERRIDE__DISABLE_FULLY_COVERED_MASK 0x00040000L
++#define DB_RENDER_OVERRIDE__FORCE_Z_LIMIT_SUMM_MASK 0x00180000L
++#define DB_RENDER_OVERRIDE__MAX_TILES_IN_DTT_MASK 0x03E00000L
++#define DB_RENDER_OVERRIDE__DISABLE_TILE_RATE_TILES_MASK 0x04000000L
++#define DB_RENDER_OVERRIDE__FORCE_Z_DIRTY_MASK 0x08000000L
++#define DB_RENDER_OVERRIDE__FORCE_STENCIL_DIRTY_MASK 0x10000000L
++#define DB_RENDER_OVERRIDE__FORCE_Z_VALID_MASK 0x20000000L
++#define DB_RENDER_OVERRIDE__FORCE_STENCIL_VALID_MASK 0x40000000L
++#define DB_RENDER_OVERRIDE__PRESERVE_COMPRESSION_MASK 0x80000000L
++//DB_RENDER_OVERRIDE2
++#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_CONTROL__SHIFT 0x0
++#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_COUNTDOWN__SHIFT 0x2
++#define DB_RENDER_OVERRIDE2__DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION__SHIFT 0x5
++#define DB_RENDER_OVERRIDE2__DISABLE_SMEM_EXPCLEAR_OPTIMIZATION__SHIFT 0x6
++#define DB_RENDER_OVERRIDE2__DISABLE_COLOR_ON_VALIDATION__SHIFT 0x7
++#define DB_RENDER_OVERRIDE2__DECOMPRESS_Z_ON_FLUSH__SHIFT 0x8
++#define DB_RENDER_OVERRIDE2__DISABLE_REG_SNOOP__SHIFT 0x9
++#define DB_RENDER_OVERRIDE2__DEPTH_BOUNDS_HIER_DEPTH_DISABLE__SHIFT 0xa
++#define DB_RENDER_OVERRIDE2__SEPARATE_HIZS_FUNC_ENABLE__SHIFT 0xb
++#define DB_RENDER_OVERRIDE2__HIZ_ZFUNC__SHIFT 0xc
++#define DB_RENDER_OVERRIDE2__HIS_SFUNC_FF__SHIFT 0xf
++#define DB_RENDER_OVERRIDE2__HIS_SFUNC_BF__SHIFT 0x12
++#define DB_RENDER_OVERRIDE2__PRESERVE_ZRANGE__SHIFT 0x15
++#define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS__SHIFT 0x16
++#define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS__SHIFT 0x17
++#define DB_RENDER_OVERRIDE2__ALLOW_PARTIAL_RES_HIER_KILL__SHIFT 0x19
++#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_CONTROL_MASK 0x00000003L
++#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_COUNTDOWN_MASK 0x0000001CL
++#define DB_RENDER_OVERRIDE2__DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION_MASK 0x00000020L
++#define DB_RENDER_OVERRIDE2__DISABLE_SMEM_EXPCLEAR_OPTIMIZATION_MASK 0x00000040L
++#define DB_RENDER_OVERRIDE2__DISABLE_COLOR_ON_VALIDATION_MASK 0x00000080L
++#define DB_RENDER_OVERRIDE2__DECOMPRESS_Z_ON_FLUSH_MASK 0x00000100L
++#define DB_RENDER_OVERRIDE2__DISABLE_REG_SNOOP_MASK 0x00000200L
++#define DB_RENDER_OVERRIDE2__DEPTH_BOUNDS_HIER_DEPTH_DISABLE_MASK 0x00000400L
++#define DB_RENDER_OVERRIDE2__SEPARATE_HIZS_FUNC_ENABLE_MASK 0x00000800L
++#define DB_RENDER_OVERRIDE2__HIZ_ZFUNC_MASK 0x00007000L
++#define DB_RENDER_OVERRIDE2__HIS_SFUNC_FF_MASK 0x00038000L
++#define DB_RENDER_OVERRIDE2__HIS_SFUNC_BF_MASK 0x001C0000L
++#define DB_RENDER_OVERRIDE2__PRESERVE_ZRANGE_MASK 0x00200000L
++#define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS_MASK 0x00400000L
++#define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS_MASK 0x00800000L
++#define DB_RENDER_OVERRIDE2__ALLOW_PARTIAL_RES_HIER_KILL_MASK 0x02000000L
++//DB_HTILE_DATA_BASE
++#define DB_HTILE_DATA_BASE__BASE_256B__SHIFT 0x0
++#define DB_HTILE_DATA_BASE__BASE_256B_MASK 0xFFFFFFFFL
++//DB_HTILE_DATA_BASE_HI
++#define DB_HTILE_DATA_BASE_HI__BASE_HI__SHIFT 0x0
++#define DB_HTILE_DATA_BASE_HI__BASE_HI_MASK 0x000000FFL
++//DB_DEPTH_SIZE
++#define DB_DEPTH_SIZE__X_MAX__SHIFT 0x0
++#define DB_DEPTH_SIZE__Y_MAX__SHIFT 0x10
++#define DB_DEPTH_SIZE__X_MAX_MASK 0x00003FFFL
++#define DB_DEPTH_SIZE__Y_MAX_MASK 0x3FFF0000L
++//DB_DEPTH_BOUNDS_MIN
++#define DB_DEPTH_BOUNDS_MIN__MIN__SHIFT 0x0
++#define DB_DEPTH_BOUNDS_MIN__MIN_MASK 0xFFFFFFFFL
++//DB_DEPTH_BOUNDS_MAX
++#define DB_DEPTH_BOUNDS_MAX__MAX__SHIFT 0x0
++#define DB_DEPTH_BOUNDS_MAX__MAX_MASK 0xFFFFFFFFL
++//DB_STENCIL_CLEAR
++#define DB_STENCIL_CLEAR__CLEAR__SHIFT 0x0
++#define DB_STENCIL_CLEAR__CLEAR_MASK 0x000000FFL
++//DB_DEPTH_CLEAR
++#define DB_DEPTH_CLEAR__DEPTH_CLEAR__SHIFT 0x0
++#define DB_DEPTH_CLEAR__DEPTH_CLEAR_MASK 0xFFFFFFFFL
++//PA_SC_SCREEN_SCISSOR_TL
++#define PA_SC_SCREEN_SCISSOR_TL__TL_X__SHIFT 0x0
++#define PA_SC_SCREEN_SCISSOR_TL__TL_Y__SHIFT 0x10
++#define PA_SC_SCREEN_SCISSOR_TL__TL_X_MASK 0x0000FFFFL
++#define PA_SC_SCREEN_SCISSOR_TL__TL_Y_MASK 0xFFFF0000L
++//PA_SC_SCREEN_SCISSOR_BR
++#define PA_SC_SCREEN_SCISSOR_BR__BR_X__SHIFT 0x0
++#define PA_SC_SCREEN_SCISSOR_BR__BR_Y__SHIFT 0x10
++#define PA_SC_SCREEN_SCISSOR_BR__BR_X_MASK 0x0000FFFFL
++#define PA_SC_SCREEN_SCISSOR_BR__BR_Y_MASK 0xFFFF0000L
++//DB_Z_INFO
++#define DB_Z_INFO__FORMAT__SHIFT 0x0
++#define DB_Z_INFO__NUM_SAMPLES__SHIFT 0x2
++#define DB_Z_INFO__SW_MODE__SHIFT 0x4
++#define DB_Z_INFO__PARTIALLY_RESIDENT__SHIFT 0xc
++#define DB_Z_INFO__FAULT_BEHAVIOR__SHIFT 0xd
++#define DB_Z_INFO__ITERATE_FLUSH__SHIFT 0xf
++#define DB_Z_INFO__MAXMIP__SHIFT 0x10
++#define DB_Z_INFO__DECOMPRESS_ON_N_ZPLANES__SHIFT 0x17
++#define DB_Z_INFO__ALLOW_EXPCLEAR__SHIFT 0x1b
++#define DB_Z_INFO__READ_SIZE__SHIFT 0x1c
++#define DB_Z_INFO__TILE_SURFACE_ENABLE__SHIFT 0x1d
++#define DB_Z_INFO__CLEAR_DISALLOWED__SHIFT 0x1e
++#define DB_Z_INFO__ZRANGE_PRECISION__SHIFT 0x1f
++#define DB_Z_INFO__FORMAT_MASK 0x00000003L
++#define DB_Z_INFO__NUM_SAMPLES_MASK 0x0000000CL
++#define DB_Z_INFO__SW_MODE_MASK 0x000001F0L
++#define DB_Z_INFO__PARTIALLY_RESIDENT_MASK 0x00001000L
++#define DB_Z_INFO__FAULT_BEHAVIOR_MASK 0x00006000L
++#define DB_Z_INFO__ITERATE_FLUSH_MASK 0x00008000L
++#define DB_Z_INFO__MAXMIP_MASK 0x000F0000L
++#define DB_Z_INFO__DECOMPRESS_ON_N_ZPLANES_MASK 0x07800000L
++#define DB_Z_INFO__ALLOW_EXPCLEAR_MASK 0x08000000L
++#define DB_Z_INFO__READ_SIZE_MASK 0x10000000L
++#define DB_Z_INFO__TILE_SURFACE_ENABLE_MASK 0x20000000L
++#define DB_Z_INFO__CLEAR_DISALLOWED_MASK 0x40000000L
++#define DB_Z_INFO__ZRANGE_PRECISION_MASK 0x80000000L
++//DB_STENCIL_INFO
++#define DB_STENCIL_INFO__FORMAT__SHIFT 0x0
++#define DB_STENCIL_INFO__SW_MODE__SHIFT 0x4
++#define DB_STENCIL_INFO__PARTIALLY_RESIDENT__SHIFT 0xc
++#define DB_STENCIL_INFO__FAULT_BEHAVIOR__SHIFT 0xd
++#define DB_STENCIL_INFO__ITERATE_FLUSH__SHIFT 0xf
++#define DB_STENCIL_INFO__ALLOW_EXPCLEAR__SHIFT 0x1b
++#define DB_STENCIL_INFO__TILE_STENCIL_DISABLE__SHIFT 0x1d
++#define DB_STENCIL_INFO__CLEAR_DISALLOWED__SHIFT 0x1e
++#define DB_STENCIL_INFO__FORMAT_MASK 0x00000001L
++#define DB_STENCIL_INFO__SW_MODE_MASK 0x000001F0L
++#define DB_STENCIL_INFO__PARTIALLY_RESIDENT_MASK 0x00001000L
++#define DB_STENCIL_INFO__FAULT_BEHAVIOR_MASK 0x00006000L
++#define DB_STENCIL_INFO__ITERATE_FLUSH_MASK 0x00008000L
++#define DB_STENCIL_INFO__ALLOW_EXPCLEAR_MASK 0x08000000L
++#define DB_STENCIL_INFO__TILE_STENCIL_DISABLE_MASK 0x20000000L
++#define DB_STENCIL_INFO__CLEAR_DISALLOWED_MASK 0x40000000L
++//DB_Z_READ_BASE
++#define DB_Z_READ_BASE__BASE_256B__SHIFT 0x0
++#define DB_Z_READ_BASE__BASE_256B_MASK 0xFFFFFFFFL
++//DB_Z_READ_BASE_HI
++#define DB_Z_READ_BASE_HI__BASE_HI__SHIFT 0x0
++#define DB_Z_READ_BASE_HI__BASE_HI_MASK 0x000000FFL
++//DB_STENCIL_READ_BASE
++#define DB_STENCIL_READ_BASE__BASE_256B__SHIFT 0x0
++#define DB_STENCIL_READ_BASE__BASE_256B_MASK 0xFFFFFFFFL
++//DB_STENCIL_READ_BASE_HI
++#define DB_STENCIL_READ_BASE_HI__BASE_HI__SHIFT 0x0
++#define DB_STENCIL_READ_BASE_HI__BASE_HI_MASK 0x000000FFL
++//DB_Z_WRITE_BASE
++#define DB_Z_WRITE_BASE__BASE_256B__SHIFT 0x0
++#define DB_Z_WRITE_BASE__BASE_256B_MASK 0xFFFFFFFFL
++//DB_Z_WRITE_BASE_HI
++#define DB_Z_WRITE_BASE_HI__BASE_HI__SHIFT 0x0
++#define DB_Z_WRITE_BASE_HI__BASE_HI_MASK 0x000000FFL
++//DB_STENCIL_WRITE_BASE
++#define DB_STENCIL_WRITE_BASE__BASE_256B__SHIFT 0x0
++#define DB_STENCIL_WRITE_BASE__BASE_256B_MASK 0xFFFFFFFFL
++//DB_STENCIL_WRITE_BASE_HI
++#define DB_STENCIL_WRITE_BASE_HI__BASE_HI__SHIFT 0x0
++#define DB_STENCIL_WRITE_BASE_HI__BASE_HI_MASK 0x000000FFL
++//DB_DFSM_CONTROL
++#define DB_DFSM_CONTROL__PUNCHOUT_MODE__SHIFT 0x0
++#define DB_DFSM_CONTROL__POPS_DRAIN_PS_ON_OVERLAP__SHIFT 0x2
++#define DB_DFSM_CONTROL__DISALLOW_OVERFLOW__SHIFT 0x3
++#define DB_DFSM_CONTROL__PUNCHOUT_MODE_MASK 0x00000003L
++#define DB_DFSM_CONTROL__POPS_DRAIN_PS_ON_OVERLAP_MASK 0x00000004L
++#define DB_DFSM_CONTROL__DISALLOW_OVERFLOW_MASK 0x00000008L
++//DB_Z_INFO2
++#define DB_Z_INFO2__EPITCH__SHIFT 0x0
++#define DB_Z_INFO2__EPITCH_MASK 0x0000FFFFL
++//DB_STENCIL_INFO2
++#define DB_STENCIL_INFO2__EPITCH__SHIFT 0x0
++#define DB_STENCIL_INFO2__EPITCH_MASK 0x0000FFFFL
++//TA_BC_BASE_ADDR
++#define TA_BC_BASE_ADDR__ADDRESS__SHIFT 0x0
++#define TA_BC_BASE_ADDR__ADDRESS_MASK 0xFFFFFFFFL
++//TA_BC_BASE_ADDR_HI
++#define TA_BC_BASE_ADDR_HI__ADDRESS__SHIFT 0x0
++#define TA_BC_BASE_ADDR_HI__ADDRESS_MASK 0x000000FFL
++//COHER_DEST_BASE_HI_0
++#define COHER_DEST_BASE_HI_0__DEST_BASE_HI_256B__SHIFT 0x0
++#define COHER_DEST_BASE_HI_0__DEST_BASE_HI_256B_MASK 0x000000FFL
++//COHER_DEST_BASE_HI_1
++#define COHER_DEST_BASE_HI_1__DEST_BASE_HI_256B__SHIFT 0x0
++#define COHER_DEST_BASE_HI_1__DEST_BASE_HI_256B_MASK 0x000000FFL
++//COHER_DEST_BASE_HI_2
++#define COHER_DEST_BASE_HI_2__DEST_BASE_HI_256B__SHIFT 0x0
++#define COHER_DEST_BASE_HI_2__DEST_BASE_HI_256B_MASK 0x000000FFL
++//COHER_DEST_BASE_HI_3
++#define COHER_DEST_BASE_HI_3__DEST_BASE_HI_256B__SHIFT 0x0
++#define COHER_DEST_BASE_HI_3__DEST_BASE_HI_256B_MASK 0x000000FFL
++//COHER_DEST_BASE_2
++#define COHER_DEST_BASE_2__DEST_BASE_256B__SHIFT 0x0
++#define COHER_DEST_BASE_2__DEST_BASE_256B_MASK 0xFFFFFFFFL
++//COHER_DEST_BASE_3
++#define COHER_DEST_BASE_3__DEST_BASE_256B__SHIFT 0x0
++#define COHER_DEST_BASE_3__DEST_BASE_256B_MASK 0xFFFFFFFFL
++//PA_SC_WINDOW_OFFSET
++#define PA_SC_WINDOW_OFFSET__WINDOW_X_OFFSET__SHIFT 0x0
++#define PA_SC_WINDOW_OFFSET__WINDOW_Y_OFFSET__SHIFT 0x10
++#define PA_SC_WINDOW_OFFSET__WINDOW_X_OFFSET_MASK 0x0000FFFFL
++#define PA_SC_WINDOW_OFFSET__WINDOW_Y_OFFSET_MASK 0xFFFF0000L
++//PA_SC_WINDOW_SCISSOR_TL
++#define PA_SC_WINDOW_SCISSOR_TL__TL_X__SHIFT 0x0
++#define PA_SC_WINDOW_SCISSOR_TL__TL_Y__SHIFT 0x10
++#define PA_SC_WINDOW_SCISSOR_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
++#define PA_SC_WINDOW_SCISSOR_TL__TL_X_MASK 0x00007FFFL
++#define PA_SC_WINDOW_SCISSOR_TL__TL_Y_MASK 0x7FFF0000L
++#define PA_SC_WINDOW_SCISSOR_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
++//PA_SC_WINDOW_SCISSOR_BR
++#define PA_SC_WINDOW_SCISSOR_BR__BR_X__SHIFT 0x0
++#define PA_SC_WINDOW_SCISSOR_BR__BR_Y__SHIFT 0x10
++#define PA_SC_WINDOW_SCISSOR_BR__BR_X_MASK 0x00007FFFL
++#define PA_SC_WINDOW_SCISSOR_BR__BR_Y_MASK 0x7FFF0000L
++//PA_SC_CLIPRECT_RULE
++#define PA_SC_CLIPRECT_RULE__CLIP_RULE__SHIFT 0x0
++#define PA_SC_CLIPRECT_RULE__CLIP_RULE_MASK 0x0000FFFFL
++//PA_SC_CLIPRECT_0_TL
++#define PA_SC_CLIPRECT_0_TL__TL_X__SHIFT 0x0
++#define PA_SC_CLIPRECT_0_TL__TL_Y__SHIFT 0x10
++#define PA_SC_CLIPRECT_0_TL__TL_X_MASK 0x00007FFFL
++#define PA_SC_CLIPRECT_0_TL__TL_Y_MASK 0x7FFF0000L
++//PA_SC_CLIPRECT_0_BR
++#define PA_SC_CLIPRECT_0_BR__BR_X__SHIFT 0x0
++#define PA_SC_CLIPRECT_0_BR__BR_Y__SHIFT 0x10
++#define PA_SC_CLIPRECT_0_BR__BR_X_MASK 0x00007FFFL
++#define PA_SC_CLIPRECT_0_BR__BR_Y_MASK 0x7FFF0000L
++//PA_SC_CLIPRECT_1_TL
++#define PA_SC_CLIPRECT_1_TL__TL_X__SHIFT 0x0
++#define PA_SC_CLIPRECT_1_TL__TL_Y__SHIFT 0x10
++#define PA_SC_CLIPRECT_1_TL__TL_X_MASK 0x00007FFFL
++#define PA_SC_CLIPRECT_1_TL__TL_Y_MASK 0x7FFF0000L
++//PA_SC_CLIPRECT_1_BR
++#define PA_SC_CLIPRECT_1_BR__BR_X__SHIFT 0x0
++#define PA_SC_CLIPRECT_1_BR__BR_Y__SHIFT 0x10
++#define PA_SC_CLIPRECT_1_BR__BR_X_MASK 0x00007FFFL
++#define PA_SC_CLIPRECT_1_BR__BR_Y_MASK 0x7FFF0000L
++//PA_SC_CLIPRECT_2_TL
++#define PA_SC_CLIPRECT_2_TL__TL_X__SHIFT 0x0
++#define PA_SC_CLIPRECT_2_TL__TL_Y__SHIFT 0x10
++#define PA_SC_CLIPRECT_2_TL__TL_X_MASK 0x00007FFFL
++#define PA_SC_CLIPRECT_2_TL__TL_Y_MASK 0x7FFF0000L
++//PA_SC_CLIPRECT_2_BR
++#define PA_SC_CLIPRECT_2_BR__BR_X__SHIFT 0x0
++#define PA_SC_CLIPRECT_2_BR__BR_Y__SHIFT 0x10
++#define PA_SC_CLIPRECT_2_BR__BR_X_MASK 0x00007FFFL
++#define PA_SC_CLIPRECT_2_BR__BR_Y_MASK 0x7FFF0000L
++//PA_SC_CLIPRECT_3_TL
++#define PA_SC_CLIPRECT_3_TL__TL_X__SHIFT 0x0
++#define PA_SC_CLIPRECT_3_TL__TL_Y__SHIFT 0x10
++#define PA_SC_CLIPRECT_3_TL__TL_X_MASK 0x00007FFFL
++#define PA_SC_CLIPRECT_3_TL__TL_Y_MASK 0x7FFF0000L
++//PA_SC_CLIPRECT_3_BR
++#define PA_SC_CLIPRECT_3_BR__BR_X__SHIFT 0x0
++#define PA_SC_CLIPRECT_3_BR__BR_Y__SHIFT 0x10
++#define PA_SC_CLIPRECT_3_BR__BR_X_MASK 0x00007FFFL
++#define PA_SC_CLIPRECT_3_BR__BR_Y_MASK 0x7FFF0000L
++//PA_SC_EDGERULE
++#define PA_SC_EDGERULE__ER_TRI__SHIFT 0x0
++#define PA_SC_EDGERULE__ER_POINT__SHIFT 0x4
++#define PA_SC_EDGERULE__ER_RECT__SHIFT 0x8
++#define PA_SC_EDGERULE__ER_LINE_LR__SHIFT 0xc
++#define PA_SC_EDGERULE__ER_LINE_RL__SHIFT 0x12
++#define PA_SC_EDGERULE__ER_LINE_TB__SHIFT 0x18
++#define PA_SC_EDGERULE__ER_LINE_BT__SHIFT 0x1c
++#define PA_SC_EDGERULE__ER_TRI_MASK 0x0000000FL
++#define PA_SC_EDGERULE__ER_POINT_MASK 0x000000F0L
++#define PA_SC_EDGERULE__ER_RECT_MASK 0x00000F00L
++#define PA_SC_EDGERULE__ER_LINE_LR_MASK 0x0003F000L
++#define PA_SC_EDGERULE__ER_LINE_RL_MASK 0x00FC0000L
++#define PA_SC_EDGERULE__ER_LINE_TB_MASK 0x0F000000L
++#define PA_SC_EDGERULE__ER_LINE_BT_MASK 0xF0000000L
++//PA_SU_HARDWARE_SCREEN_OFFSET
++#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_X__SHIFT 0x0
++#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_Y__SHIFT 0x10
++#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_X_MASK 0x000001FFL
++#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_Y_MASK 0x01FF0000L
++//CB_TARGET_MASK
++#define CB_TARGET_MASK__TARGET0_ENABLE__SHIFT 0x0
++#define CB_TARGET_MASK__TARGET1_ENABLE__SHIFT 0x4
++#define CB_TARGET_MASK__TARGET2_ENABLE__SHIFT 0x8
++#define CB_TARGET_MASK__TARGET3_ENABLE__SHIFT 0xc
++#define CB_TARGET_MASK__TARGET4_ENABLE__SHIFT 0x10
++#define CB_TARGET_MASK__TARGET5_ENABLE__SHIFT 0x14
++#define CB_TARGET_MASK__TARGET6_ENABLE__SHIFT 0x18
++#define CB_TARGET_MASK__TARGET7_ENABLE__SHIFT 0x1c
++#define CB_TARGET_MASK__TARGET0_ENABLE_MASK 0x0000000FL
++#define CB_TARGET_MASK__TARGET1_ENABLE_MASK 0x000000F0L
++#define CB_TARGET_MASK__TARGET2_ENABLE_MASK 0x00000F00L
++#define CB_TARGET_MASK__TARGET3_ENABLE_MASK 0x0000F000L
++#define CB_TARGET_MASK__TARGET4_ENABLE_MASK 0x000F0000L
++#define CB_TARGET_MASK__TARGET5_ENABLE_MASK 0x00F00000L
++#define CB_TARGET_MASK__TARGET6_ENABLE_MASK 0x0F000000L
++#define CB_TARGET_MASK__TARGET7_ENABLE_MASK 0xF0000000L
++//CB_SHADER_MASK
++#define CB_SHADER_MASK__OUTPUT0_ENABLE__SHIFT 0x0
++#define CB_SHADER_MASK__OUTPUT1_ENABLE__SHIFT 0x4
++#define CB_SHADER_MASK__OUTPUT2_ENABLE__SHIFT 0x8
++#define CB_SHADER_MASK__OUTPUT3_ENABLE__SHIFT 0xc
++#define CB_SHADER_MASK__OUTPUT4_ENABLE__SHIFT 0x10
++#define CB_SHADER_MASK__OUTPUT5_ENABLE__SHIFT 0x14
++#define CB_SHADER_MASK__OUTPUT6_ENABLE__SHIFT 0x18
++#define CB_SHADER_MASK__OUTPUT7_ENABLE__SHIFT 0x1c
++#define CB_SHADER_MASK__OUTPUT0_ENABLE_MASK 0x0000000FL
++#define CB_SHADER_MASK__OUTPUT1_ENABLE_MASK 0x000000F0L
++#define CB_SHADER_MASK__OUTPUT2_ENABLE_MASK 0x00000F00L
++#define CB_SHADER_MASK__OUTPUT3_ENABLE_MASK 0x0000F000L
++#define CB_SHADER_MASK__OUTPUT4_ENABLE_MASK 0x000F0000L
++#define CB_SHADER_MASK__OUTPUT5_ENABLE_MASK 0x00F00000L
++#define CB_SHADER_MASK__OUTPUT6_ENABLE_MASK 0x0F000000L
++#define CB_SHADER_MASK__OUTPUT7_ENABLE_MASK 0xF0000000L
++//PA_SC_GENERIC_SCISSOR_TL
++#define PA_SC_GENERIC_SCISSOR_TL__TL_X__SHIFT 0x0
++#define PA_SC_GENERIC_SCISSOR_TL__TL_Y__SHIFT 0x10
++#define PA_SC_GENERIC_SCISSOR_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
++#define PA_SC_GENERIC_SCISSOR_TL__TL_X_MASK 0x00007FFFL
++#define PA_SC_GENERIC_SCISSOR_TL__TL_Y_MASK 0x7FFF0000L
++#define PA_SC_GENERIC_SCISSOR_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
++//PA_SC_GENERIC_SCISSOR_BR
++#define PA_SC_GENERIC_SCISSOR_BR__BR_X__SHIFT 0x0
++#define PA_SC_GENERIC_SCISSOR_BR__BR_Y__SHIFT 0x10
++#define PA_SC_GENERIC_SCISSOR_BR__BR_X_MASK 0x00007FFFL
++#define PA_SC_GENERIC_SCISSOR_BR__BR_Y_MASK 0x7FFF0000L
++//COHER_DEST_BASE_0
++#define COHER_DEST_BASE_0__DEST_BASE_256B__SHIFT 0x0
++#define COHER_DEST_BASE_0__DEST_BASE_256B_MASK 0xFFFFFFFFL
++//COHER_DEST_BASE_1
++#define COHER_DEST_BASE_1__DEST_BASE_256B__SHIFT 0x0
++#define COHER_DEST_BASE_1__DEST_BASE_256B_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_SCISSOR_0_TL
++#define PA_SC_VPORT_SCISSOR_0_TL__TL_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_0_TL__TL_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_0_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
++#define PA_SC_VPORT_SCISSOR_0_TL__TL_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_0_TL__TL_Y_MASK 0x7FFF0000L
++#define PA_SC_VPORT_SCISSOR_0_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
++//PA_SC_VPORT_SCISSOR_0_BR
++#define PA_SC_VPORT_SCISSOR_0_BR__BR_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_0_BR__BR_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_0_BR__BR_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_0_BR__BR_Y_MASK 0x7FFF0000L
++//PA_SC_VPORT_SCISSOR_1_TL
++#define PA_SC_VPORT_SCISSOR_1_TL__TL_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_1_TL__TL_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_1_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
++#define PA_SC_VPORT_SCISSOR_1_TL__TL_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_1_TL__TL_Y_MASK 0x7FFF0000L
++#define PA_SC_VPORT_SCISSOR_1_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
++//PA_SC_VPORT_SCISSOR_1_BR
++#define PA_SC_VPORT_SCISSOR_1_BR__BR_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_1_BR__BR_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_1_BR__BR_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_1_BR__BR_Y_MASK 0x7FFF0000L
++//PA_SC_VPORT_SCISSOR_2_TL
++#define PA_SC_VPORT_SCISSOR_2_TL__TL_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_2_TL__TL_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_2_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
++#define PA_SC_VPORT_SCISSOR_2_TL__TL_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_2_TL__TL_Y_MASK 0x7FFF0000L
++#define PA_SC_VPORT_SCISSOR_2_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
++//PA_SC_VPORT_SCISSOR_2_BR
++#define PA_SC_VPORT_SCISSOR_2_BR__BR_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_2_BR__BR_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_2_BR__BR_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_2_BR__BR_Y_MASK 0x7FFF0000L
++//PA_SC_VPORT_SCISSOR_3_TL
++#define PA_SC_VPORT_SCISSOR_3_TL__TL_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_3_TL__TL_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_3_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
++#define PA_SC_VPORT_SCISSOR_3_TL__TL_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_3_TL__TL_Y_MASK 0x7FFF0000L
++#define PA_SC_VPORT_SCISSOR_3_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
++//PA_SC_VPORT_SCISSOR_3_BR
++#define PA_SC_VPORT_SCISSOR_3_BR__BR_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_3_BR__BR_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_3_BR__BR_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_3_BR__BR_Y_MASK 0x7FFF0000L
++//PA_SC_VPORT_SCISSOR_4_TL
++#define PA_SC_VPORT_SCISSOR_4_TL__TL_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_4_TL__TL_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_4_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
++#define PA_SC_VPORT_SCISSOR_4_TL__TL_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_4_TL__TL_Y_MASK 0x7FFF0000L
++#define PA_SC_VPORT_SCISSOR_4_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
++//PA_SC_VPORT_SCISSOR_4_BR
++#define PA_SC_VPORT_SCISSOR_4_BR__BR_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_4_BR__BR_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_4_BR__BR_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_4_BR__BR_Y_MASK 0x7FFF0000L
++//PA_SC_VPORT_SCISSOR_5_TL
++#define PA_SC_VPORT_SCISSOR_5_TL__TL_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_5_TL__TL_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_5_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
++#define PA_SC_VPORT_SCISSOR_5_TL__TL_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_5_TL__TL_Y_MASK 0x7FFF0000L
++#define PA_SC_VPORT_SCISSOR_5_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
++//PA_SC_VPORT_SCISSOR_5_BR
++#define PA_SC_VPORT_SCISSOR_5_BR__BR_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_5_BR__BR_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_5_BR__BR_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_5_BR__BR_Y_MASK 0x7FFF0000L
++//PA_SC_VPORT_SCISSOR_6_TL
++#define PA_SC_VPORT_SCISSOR_6_TL__TL_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_6_TL__TL_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_6_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
++#define PA_SC_VPORT_SCISSOR_6_TL__TL_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_6_TL__TL_Y_MASK 0x7FFF0000L
++#define PA_SC_VPORT_SCISSOR_6_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
++//PA_SC_VPORT_SCISSOR_6_BR
++#define PA_SC_VPORT_SCISSOR_6_BR__BR_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_6_BR__BR_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_6_BR__BR_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_6_BR__BR_Y_MASK 0x7FFF0000L
++//PA_SC_VPORT_SCISSOR_7_TL
++#define PA_SC_VPORT_SCISSOR_7_TL__TL_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_7_TL__TL_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_7_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
++#define PA_SC_VPORT_SCISSOR_7_TL__TL_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_7_TL__TL_Y_MASK 0x7FFF0000L
++#define PA_SC_VPORT_SCISSOR_7_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
++//PA_SC_VPORT_SCISSOR_7_BR
++#define PA_SC_VPORT_SCISSOR_7_BR__BR_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_7_BR__BR_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_7_BR__BR_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_7_BR__BR_Y_MASK 0x7FFF0000L
++//PA_SC_VPORT_SCISSOR_8_TL
++#define PA_SC_VPORT_SCISSOR_8_TL__TL_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_8_TL__TL_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_8_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
++#define PA_SC_VPORT_SCISSOR_8_TL__TL_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_8_TL__TL_Y_MASK 0x7FFF0000L
++#define PA_SC_VPORT_SCISSOR_8_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
++//PA_SC_VPORT_SCISSOR_8_BR
++#define PA_SC_VPORT_SCISSOR_8_BR__BR_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_8_BR__BR_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_8_BR__BR_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_8_BR__BR_Y_MASK 0x7FFF0000L
++//PA_SC_VPORT_SCISSOR_9_TL
++#define PA_SC_VPORT_SCISSOR_9_TL__TL_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_9_TL__TL_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_9_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
++#define PA_SC_VPORT_SCISSOR_9_TL__TL_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_9_TL__TL_Y_MASK 0x7FFF0000L
++#define PA_SC_VPORT_SCISSOR_9_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
++//PA_SC_VPORT_SCISSOR_9_BR
++#define PA_SC_VPORT_SCISSOR_9_BR__BR_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_9_BR__BR_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_9_BR__BR_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_9_BR__BR_Y_MASK 0x7FFF0000L
++//PA_SC_VPORT_SCISSOR_10_TL
++#define PA_SC_VPORT_SCISSOR_10_TL__TL_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_10_TL__TL_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_10_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
++#define PA_SC_VPORT_SCISSOR_10_TL__TL_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_10_TL__TL_Y_MASK 0x7FFF0000L
++#define PA_SC_VPORT_SCISSOR_10_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
++//PA_SC_VPORT_SCISSOR_10_BR
++#define PA_SC_VPORT_SCISSOR_10_BR__BR_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_10_BR__BR_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_10_BR__BR_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_10_BR__BR_Y_MASK 0x7FFF0000L
++//PA_SC_VPORT_SCISSOR_11_TL
++#define PA_SC_VPORT_SCISSOR_11_TL__TL_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_11_TL__TL_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_11_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
++#define PA_SC_VPORT_SCISSOR_11_TL__TL_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_11_TL__TL_Y_MASK 0x7FFF0000L
++#define PA_SC_VPORT_SCISSOR_11_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
++//PA_SC_VPORT_SCISSOR_11_BR
++#define PA_SC_VPORT_SCISSOR_11_BR__BR_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_11_BR__BR_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_11_BR__BR_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_11_BR__BR_Y_MASK 0x7FFF0000L
++//PA_SC_VPORT_SCISSOR_12_TL
++#define PA_SC_VPORT_SCISSOR_12_TL__TL_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_12_TL__TL_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_12_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
++#define PA_SC_VPORT_SCISSOR_12_TL__TL_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_12_TL__TL_Y_MASK 0x7FFF0000L
++#define PA_SC_VPORT_SCISSOR_12_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
++//PA_SC_VPORT_SCISSOR_12_BR
++#define PA_SC_VPORT_SCISSOR_12_BR__BR_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_12_BR__BR_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_12_BR__BR_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_12_BR__BR_Y_MASK 0x7FFF0000L
++//PA_SC_VPORT_SCISSOR_13_TL
++#define PA_SC_VPORT_SCISSOR_13_TL__TL_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_13_TL__TL_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_13_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
++#define PA_SC_VPORT_SCISSOR_13_TL__TL_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_13_TL__TL_Y_MASK 0x7FFF0000L
++#define PA_SC_VPORT_SCISSOR_13_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
++//PA_SC_VPORT_SCISSOR_13_BR
++#define PA_SC_VPORT_SCISSOR_13_BR__BR_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_13_BR__BR_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_13_BR__BR_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_13_BR__BR_Y_MASK 0x7FFF0000L
++//PA_SC_VPORT_SCISSOR_14_TL
++#define PA_SC_VPORT_SCISSOR_14_TL__TL_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_14_TL__TL_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_14_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
++#define PA_SC_VPORT_SCISSOR_14_TL__TL_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_14_TL__TL_Y_MASK 0x7FFF0000L
++#define PA_SC_VPORT_SCISSOR_14_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
++//PA_SC_VPORT_SCISSOR_14_BR
++#define PA_SC_VPORT_SCISSOR_14_BR__BR_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_14_BR__BR_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_14_BR__BR_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_14_BR__BR_Y_MASK 0x7FFF0000L
++//PA_SC_VPORT_SCISSOR_15_TL
++#define PA_SC_VPORT_SCISSOR_15_TL__TL_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_15_TL__TL_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_15_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
++#define PA_SC_VPORT_SCISSOR_15_TL__TL_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_15_TL__TL_Y_MASK 0x7FFF0000L
++#define PA_SC_VPORT_SCISSOR_15_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
++//PA_SC_VPORT_SCISSOR_15_BR
++#define PA_SC_VPORT_SCISSOR_15_BR__BR_X__SHIFT 0x0
++#define PA_SC_VPORT_SCISSOR_15_BR__BR_Y__SHIFT 0x10
++#define PA_SC_VPORT_SCISSOR_15_BR__BR_X_MASK 0x00007FFFL
++#define PA_SC_VPORT_SCISSOR_15_BR__BR_Y_MASK 0x7FFF0000L
++//PA_SC_VPORT_ZMIN_0
++#define PA_SC_VPORT_ZMIN_0__VPORT_ZMIN__SHIFT 0x0
++#define PA_SC_VPORT_ZMIN_0__VPORT_ZMIN_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMAX_0
++#define PA_SC_VPORT_ZMAX_0__VPORT_ZMAX__SHIFT 0x0
++#define PA_SC_VPORT_ZMAX_0__VPORT_ZMAX_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMIN_1
++#define PA_SC_VPORT_ZMIN_1__VPORT_ZMIN__SHIFT 0x0
++#define PA_SC_VPORT_ZMIN_1__VPORT_ZMIN_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMAX_1
++#define PA_SC_VPORT_ZMAX_1__VPORT_ZMAX__SHIFT 0x0
++#define PA_SC_VPORT_ZMAX_1__VPORT_ZMAX_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMIN_2
++#define PA_SC_VPORT_ZMIN_2__VPORT_ZMIN__SHIFT 0x0
++#define PA_SC_VPORT_ZMIN_2__VPORT_ZMIN_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMAX_2
++#define PA_SC_VPORT_ZMAX_2__VPORT_ZMAX__SHIFT 0x0
++#define PA_SC_VPORT_ZMAX_2__VPORT_ZMAX_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMIN_3
++#define PA_SC_VPORT_ZMIN_3__VPORT_ZMIN__SHIFT 0x0
++#define PA_SC_VPORT_ZMIN_3__VPORT_ZMIN_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMAX_3
++#define PA_SC_VPORT_ZMAX_3__VPORT_ZMAX__SHIFT 0x0
++#define PA_SC_VPORT_ZMAX_3__VPORT_ZMAX_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMIN_4
++#define PA_SC_VPORT_ZMIN_4__VPORT_ZMIN__SHIFT 0x0
++#define PA_SC_VPORT_ZMIN_4__VPORT_ZMIN_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMAX_4
++#define PA_SC_VPORT_ZMAX_4__VPORT_ZMAX__SHIFT 0x0
++#define PA_SC_VPORT_ZMAX_4__VPORT_ZMAX_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMIN_5
++#define PA_SC_VPORT_ZMIN_5__VPORT_ZMIN__SHIFT 0x0
++#define PA_SC_VPORT_ZMIN_5__VPORT_ZMIN_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMAX_5
++#define PA_SC_VPORT_ZMAX_5__VPORT_ZMAX__SHIFT 0x0
++#define PA_SC_VPORT_ZMAX_5__VPORT_ZMAX_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMIN_6
++#define PA_SC_VPORT_ZMIN_6__VPORT_ZMIN__SHIFT 0x0
++#define PA_SC_VPORT_ZMIN_6__VPORT_ZMIN_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMAX_6
++#define PA_SC_VPORT_ZMAX_6__VPORT_ZMAX__SHIFT 0x0
++#define PA_SC_VPORT_ZMAX_6__VPORT_ZMAX_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMIN_7
++#define PA_SC_VPORT_ZMIN_7__VPORT_ZMIN__SHIFT 0x0
++#define PA_SC_VPORT_ZMIN_7__VPORT_ZMIN_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMAX_7
++#define PA_SC_VPORT_ZMAX_7__VPORT_ZMAX__SHIFT 0x0
++#define PA_SC_VPORT_ZMAX_7__VPORT_ZMAX_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMIN_8
++#define PA_SC_VPORT_ZMIN_8__VPORT_ZMIN__SHIFT 0x0
++#define PA_SC_VPORT_ZMIN_8__VPORT_ZMIN_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMAX_8
++#define PA_SC_VPORT_ZMAX_8__VPORT_ZMAX__SHIFT 0x0
++#define PA_SC_VPORT_ZMAX_8__VPORT_ZMAX_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMIN_9
++#define PA_SC_VPORT_ZMIN_9__VPORT_ZMIN__SHIFT 0x0
++#define PA_SC_VPORT_ZMIN_9__VPORT_ZMIN_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMAX_9
++#define PA_SC_VPORT_ZMAX_9__VPORT_ZMAX__SHIFT 0x0
++#define PA_SC_VPORT_ZMAX_9__VPORT_ZMAX_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMIN_10
++#define PA_SC_VPORT_ZMIN_10__VPORT_ZMIN__SHIFT 0x0
++#define PA_SC_VPORT_ZMIN_10__VPORT_ZMIN_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMAX_10
++#define PA_SC_VPORT_ZMAX_10__VPORT_ZMAX__SHIFT 0x0
++#define PA_SC_VPORT_ZMAX_10__VPORT_ZMAX_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMIN_11
++#define PA_SC_VPORT_ZMIN_11__VPORT_ZMIN__SHIFT 0x0
++#define PA_SC_VPORT_ZMIN_11__VPORT_ZMIN_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMAX_11
++#define PA_SC_VPORT_ZMAX_11__VPORT_ZMAX__SHIFT 0x0
++#define PA_SC_VPORT_ZMAX_11__VPORT_ZMAX_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMIN_12
++#define PA_SC_VPORT_ZMIN_12__VPORT_ZMIN__SHIFT 0x0
++#define PA_SC_VPORT_ZMIN_12__VPORT_ZMIN_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMAX_12
++#define PA_SC_VPORT_ZMAX_12__VPORT_ZMAX__SHIFT 0x0
++#define PA_SC_VPORT_ZMAX_12__VPORT_ZMAX_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMIN_13
++#define PA_SC_VPORT_ZMIN_13__VPORT_ZMIN__SHIFT 0x0
++#define PA_SC_VPORT_ZMIN_13__VPORT_ZMIN_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMAX_13
++#define PA_SC_VPORT_ZMAX_13__VPORT_ZMAX__SHIFT 0x0
++#define PA_SC_VPORT_ZMAX_13__VPORT_ZMAX_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMIN_14
++#define PA_SC_VPORT_ZMIN_14__VPORT_ZMIN__SHIFT 0x0
++#define PA_SC_VPORT_ZMIN_14__VPORT_ZMIN_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMAX_14
++#define PA_SC_VPORT_ZMAX_14__VPORT_ZMAX__SHIFT 0x0
++#define PA_SC_VPORT_ZMAX_14__VPORT_ZMAX_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMIN_15
++#define PA_SC_VPORT_ZMIN_15__VPORT_ZMIN__SHIFT 0x0
++#define PA_SC_VPORT_ZMIN_15__VPORT_ZMIN_MASK 0xFFFFFFFFL
++//PA_SC_VPORT_ZMAX_15
++#define PA_SC_VPORT_ZMAX_15__VPORT_ZMAX__SHIFT 0x0
++#define PA_SC_VPORT_ZMAX_15__VPORT_ZMAX_MASK 0xFFFFFFFFL
++//PA_SC_RASTER_CONFIG
++#define PA_SC_RASTER_CONFIG__RB_MAP_PKR0__SHIFT 0x0
++#define PA_SC_RASTER_CONFIG__RB_MAP_PKR1__SHIFT 0x2
++#define PA_SC_RASTER_CONFIG__RB_XSEL2__SHIFT 0x4
++#define PA_SC_RASTER_CONFIG__RB_XSEL__SHIFT 0x6
++#define PA_SC_RASTER_CONFIG__RB_YSEL__SHIFT 0x7
++#define PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT 0x8
++#define PA_SC_RASTER_CONFIG__PKR_XSEL__SHIFT 0xa
++#define PA_SC_RASTER_CONFIG__PKR_YSEL__SHIFT 0xc
++#define PA_SC_RASTER_CONFIG__PKR_XSEL2__SHIFT 0xe
++#define PA_SC_RASTER_CONFIG__SC_MAP__SHIFT 0x10
++#define PA_SC_RASTER_CONFIG__SC_XSEL__SHIFT 0x12
++#define PA_SC_RASTER_CONFIG__SC_YSEL__SHIFT 0x14
++#define PA_SC_RASTER_CONFIG__SE_MAP__SHIFT 0x18
++#define PA_SC_RASTER_CONFIG__SE_XSEL__SHIFT 0x1a
++#define PA_SC_RASTER_CONFIG__SE_YSEL__SHIFT 0x1d
++#define PA_SC_RASTER_CONFIG__RB_MAP_PKR0_MASK 0x00000003L
++#define PA_SC_RASTER_CONFIG__RB_MAP_PKR1_MASK 0x0000000CL
++#define PA_SC_RASTER_CONFIG__RB_XSEL2_MASK 0x00000030L
++#define PA_SC_RASTER_CONFIG__RB_XSEL_MASK 0x00000040L
++#define PA_SC_RASTER_CONFIG__RB_YSEL_MASK 0x00000080L
++#define PA_SC_RASTER_CONFIG__PKR_MAP_MASK 0x00000300L
++#define PA_SC_RASTER_CONFIG__PKR_XSEL_MASK 0x00000C00L
++#define PA_SC_RASTER_CONFIG__PKR_YSEL_MASK 0x00003000L
++#define PA_SC_RASTER_CONFIG__PKR_XSEL2_MASK 0x0000C000L
++#define PA_SC_RASTER_CONFIG__SC_MAP_MASK 0x00030000L
++#define PA_SC_RASTER_CONFIG__SC_XSEL_MASK 0x000C0000L
++#define PA_SC_RASTER_CONFIG__SC_YSEL_MASK 0x00300000L
++#define PA_SC_RASTER_CONFIG__SE_MAP_MASK 0x03000000L
++#define PA_SC_RASTER_CONFIG__SE_XSEL_MASK 0x1C000000L
++#define PA_SC_RASTER_CONFIG__SE_YSEL_MASK 0xE0000000L
++//PA_SC_RASTER_CONFIG_1
++#define PA_SC_RASTER_CONFIG_1__SE_PAIR_MAP__SHIFT 0x0
++#define PA_SC_RASTER_CONFIG_1__SE_PAIR_XSEL__SHIFT 0x2
++#define PA_SC_RASTER_CONFIG_1__SE_PAIR_YSEL__SHIFT 0x5
++#define PA_SC_RASTER_CONFIG_1__SE_PAIR_MAP_MASK 0x00000003L
++#define PA_SC_RASTER_CONFIG_1__SE_PAIR_XSEL_MASK 0x0000001CL
++#define PA_SC_RASTER_CONFIG_1__SE_PAIR_YSEL_MASK 0x000000E0L
++//PA_SC_SCREEN_EXTENT_CONTROL
++#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_EVEN_ENABLE__SHIFT 0x0
++#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_ODD_ENABLE__SHIFT 0x2
++#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_EVEN_ENABLE_MASK 0x00000003L
++#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_ODD_ENABLE_MASK 0x0000000CL
++//PA_SC_TILE_STEERING_OVERRIDE
++#define PA_SC_TILE_STEERING_OVERRIDE__ENABLE__SHIFT 0x0
++#define PA_SC_TILE_STEERING_OVERRIDE__NUM_SE__SHIFT 0x1
++#define PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SE__SHIFT 0x5
++#define PA_SC_TILE_STEERING_OVERRIDE__DISABLE_SRBSL_DB_OPTIMIZED_PACKING__SHIFT 0x8
++#define PA_SC_TILE_STEERING_OVERRIDE__ENABLE_MASK 0x00000001L
++#define PA_SC_TILE_STEERING_OVERRIDE__NUM_SE_MASK 0x00000006L
++#define PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SE_MASK 0x00000060L
++#define PA_SC_TILE_STEERING_OVERRIDE__DISABLE_SRBSL_DB_OPTIMIZED_PACKING_MASK 0x00000100L
++//CP_PERFMON_CNTX_CNTL
++#define CP_PERFMON_CNTX_CNTL__PERFMON_ENABLE__SHIFT 0x1f
++#define CP_PERFMON_CNTX_CNTL__PERFMON_ENABLE_MASK 0x80000000L
++//CP_PIPEID
++#define CP_PIPEID__PIPE_ID__SHIFT 0x0
++#define CP_PIPEID__PIPE_ID_MASK 0x00000003L
++//CP_RINGID
++#define CP_RINGID__RINGID__SHIFT 0x0
++#define CP_RINGID__RINGID_MASK 0x00000003L
++//CP_VMID
++#define CP_VMID__VMID__SHIFT 0x0
++#define CP_VMID__VMID_MASK 0x0000000FL
++//PA_SC_RIGHT_VERT_GRID
++#define PA_SC_RIGHT_VERT_GRID__LEFT_QTR__SHIFT 0x0
++#define PA_SC_RIGHT_VERT_GRID__LEFT_HALF__SHIFT 0x8
++#define PA_SC_RIGHT_VERT_GRID__RIGHT_HALF__SHIFT 0x10
++#define PA_SC_RIGHT_VERT_GRID__RIGHT_QTR__SHIFT 0x18
++#define PA_SC_RIGHT_VERT_GRID__LEFT_QTR_MASK 0x000000FFL
++#define PA_SC_RIGHT_VERT_GRID__LEFT_HALF_MASK 0x0000FF00L
++#define PA_SC_RIGHT_VERT_GRID__RIGHT_HALF_MASK 0x00FF0000L
++#define PA_SC_RIGHT_VERT_GRID__RIGHT_QTR_MASK 0xFF000000L
++//PA_SC_LEFT_VERT_GRID
++#define PA_SC_LEFT_VERT_GRID__LEFT_QTR__SHIFT 0x0
++#define PA_SC_LEFT_VERT_GRID__LEFT_HALF__SHIFT 0x8
++#define PA_SC_LEFT_VERT_GRID__RIGHT_HALF__SHIFT 0x10
++#define PA_SC_LEFT_VERT_GRID__RIGHT_QTR__SHIFT 0x18
++#define PA_SC_LEFT_VERT_GRID__LEFT_QTR_MASK 0x000000FFL
++#define PA_SC_LEFT_VERT_GRID__LEFT_HALF_MASK 0x0000FF00L
++#define PA_SC_LEFT_VERT_GRID__RIGHT_HALF_MASK 0x00FF0000L
++#define PA_SC_LEFT_VERT_GRID__RIGHT_QTR_MASK 0xFF000000L
++//PA_SC_HORIZ_GRID
++#define PA_SC_HORIZ_GRID__TOP_QTR__SHIFT 0x0
++#define PA_SC_HORIZ_GRID__TOP_HALF__SHIFT 0x8
++#define PA_SC_HORIZ_GRID__BOT_HALF__SHIFT 0x10
++#define PA_SC_HORIZ_GRID__BOT_QTR__SHIFT 0x18
++#define PA_SC_HORIZ_GRID__TOP_QTR_MASK 0x000000FFL
++#define PA_SC_HORIZ_GRID__TOP_HALF_MASK 0x0000FF00L
++#define PA_SC_HORIZ_GRID__BOT_HALF_MASK 0x00FF0000L
++#define PA_SC_HORIZ_GRID__BOT_QTR_MASK 0xFF000000L
++//VGT_MULTI_PRIM_IB_RESET_INDX
++#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX__SHIFT 0x0
++#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX_MASK 0xFFFFFFFFL
++//CB_BLEND_RED
++#define CB_BLEND_RED__BLEND_RED__SHIFT 0x0
++#define CB_BLEND_RED__BLEND_RED_MASK 0xFFFFFFFFL
++//CB_BLEND_GREEN
++#define CB_BLEND_GREEN__BLEND_GREEN__SHIFT 0x0
++#define CB_BLEND_GREEN__BLEND_GREEN_MASK 0xFFFFFFFFL
++//CB_BLEND_BLUE
++#define CB_BLEND_BLUE__BLEND_BLUE__SHIFT 0x0
++#define CB_BLEND_BLUE__BLEND_BLUE_MASK 0xFFFFFFFFL
++//CB_BLEND_ALPHA
++#define CB_BLEND_ALPHA__BLEND_ALPHA__SHIFT 0x0
++#define CB_BLEND_ALPHA__BLEND_ALPHA_MASK 0xFFFFFFFFL
++//CB_DCC_CONTROL
++#define CB_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
++#define CB_DCC_CONTROL__OVERWRITE_COMBINER_MRT_SHARING_DISABLE__SHIFT 0x1
++#define CB_DCC_CONTROL__OVERWRITE_COMBINER_WATERMARK__SHIFT 0x2
++#define CB_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
++#define CB_DCC_CONTROL__OVERWRITE_COMBINER_MRT_SHARING_DISABLE_MASK 0x00000002L
++#define CB_DCC_CONTROL__OVERWRITE_COMBINER_WATERMARK_MASK 0x0000007CL
++//DB_STENCIL_CONTROL
++#define DB_STENCIL_CONTROL__STENCILFAIL__SHIFT 0x0
++#define DB_STENCIL_CONTROL__STENCILZPASS__SHIFT 0x4
++#define DB_STENCIL_CONTROL__STENCILZFAIL__SHIFT 0x8
++#define DB_STENCIL_CONTROL__STENCILFAIL_BF__SHIFT 0xc
++#define DB_STENCIL_CONTROL__STENCILZPASS_BF__SHIFT 0x10
++#define DB_STENCIL_CONTROL__STENCILZFAIL_BF__SHIFT 0x14
++#define DB_STENCIL_CONTROL__STENCILFAIL_MASK 0x0000000FL
++#define DB_STENCIL_CONTROL__STENCILZPASS_MASK 0x000000F0L
++#define DB_STENCIL_CONTROL__STENCILZFAIL_MASK 0x00000F00L
++#define DB_STENCIL_CONTROL__STENCILFAIL_BF_MASK 0x0000F000L
++#define DB_STENCIL_CONTROL__STENCILZPASS_BF_MASK 0x000F0000L
++#define DB_STENCIL_CONTROL__STENCILZFAIL_BF_MASK 0x00F00000L
++//DB_STENCILREFMASK
++#define DB_STENCILREFMASK__STENCILTESTVAL__SHIFT 0x0
++#define DB_STENCILREFMASK__STENCILMASK__SHIFT 0x8
++#define DB_STENCILREFMASK__STENCILWRITEMASK__SHIFT 0x10
++#define DB_STENCILREFMASK__STENCILOPVAL__SHIFT 0x18
++#define DB_STENCILREFMASK__STENCILTESTVAL_MASK 0x000000FFL
++#define DB_STENCILREFMASK__STENCILMASK_MASK 0x0000FF00L
++#define DB_STENCILREFMASK__STENCILWRITEMASK_MASK 0x00FF0000L
++#define DB_STENCILREFMASK__STENCILOPVAL_MASK 0xFF000000L
++//DB_STENCILREFMASK_BF
++#define DB_STENCILREFMASK_BF__STENCILTESTVAL_BF__SHIFT 0x0
++#define DB_STENCILREFMASK_BF__STENCILMASK_BF__SHIFT 0x8
++#define DB_STENCILREFMASK_BF__STENCILWRITEMASK_BF__SHIFT 0x10
++#define DB_STENCILREFMASK_BF__STENCILOPVAL_BF__SHIFT 0x18
++#define DB_STENCILREFMASK_BF__STENCILTESTVAL_BF_MASK 0x000000FFL
++#define DB_STENCILREFMASK_BF__STENCILMASK_BF_MASK 0x0000FF00L
++#define DB_STENCILREFMASK_BF__STENCILWRITEMASK_BF_MASK 0x00FF0000L
++#define DB_STENCILREFMASK_BF__STENCILOPVAL_BF_MASK 0xFF000000L
++//PA_CL_VPORT_XSCALE
++#define PA_CL_VPORT_XSCALE__VPORT_XSCALE__SHIFT 0x0
++#define PA_CL_VPORT_XSCALE__VPORT_XSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XOFFSET
++#define PA_CL_VPORT_XOFFSET__VPORT_XOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_XOFFSET__VPORT_XOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YSCALE
++#define PA_CL_VPORT_YSCALE__VPORT_YSCALE__SHIFT 0x0
++#define PA_CL_VPORT_YSCALE__VPORT_YSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YOFFSET
++#define PA_CL_VPORT_YOFFSET__VPORT_YOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_YOFFSET__VPORT_YOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZSCALE
++#define PA_CL_VPORT_ZSCALE__VPORT_ZSCALE__SHIFT 0x0
++#define PA_CL_VPORT_ZSCALE__VPORT_ZSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZOFFSET
++#define PA_CL_VPORT_ZOFFSET__VPORT_ZOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_ZOFFSET__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XSCALE_1
++#define PA_CL_VPORT_XSCALE_1__VPORT_XSCALE__SHIFT 0x0
++#define PA_CL_VPORT_XSCALE_1__VPORT_XSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XOFFSET_1
++#define PA_CL_VPORT_XOFFSET_1__VPORT_XOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_XOFFSET_1__VPORT_XOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YSCALE_1
++#define PA_CL_VPORT_YSCALE_1__VPORT_YSCALE__SHIFT 0x0
++#define PA_CL_VPORT_YSCALE_1__VPORT_YSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YOFFSET_1
++#define PA_CL_VPORT_YOFFSET_1__VPORT_YOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_YOFFSET_1__VPORT_YOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZSCALE_1
++#define PA_CL_VPORT_ZSCALE_1__VPORT_ZSCALE__SHIFT 0x0
++#define PA_CL_VPORT_ZSCALE_1__VPORT_ZSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZOFFSET_1
++#define PA_CL_VPORT_ZOFFSET_1__VPORT_ZOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_ZOFFSET_1__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XSCALE_2
++#define PA_CL_VPORT_XSCALE_2__VPORT_XSCALE__SHIFT 0x0
++#define PA_CL_VPORT_XSCALE_2__VPORT_XSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XOFFSET_2
++#define PA_CL_VPORT_XOFFSET_2__VPORT_XOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_XOFFSET_2__VPORT_XOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YSCALE_2
++#define PA_CL_VPORT_YSCALE_2__VPORT_YSCALE__SHIFT 0x0
++#define PA_CL_VPORT_YSCALE_2__VPORT_YSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YOFFSET_2
++#define PA_CL_VPORT_YOFFSET_2__VPORT_YOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_YOFFSET_2__VPORT_YOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZSCALE_2
++#define PA_CL_VPORT_ZSCALE_2__VPORT_ZSCALE__SHIFT 0x0
++#define PA_CL_VPORT_ZSCALE_2__VPORT_ZSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZOFFSET_2
++#define PA_CL_VPORT_ZOFFSET_2__VPORT_ZOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_ZOFFSET_2__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XSCALE_3
++#define PA_CL_VPORT_XSCALE_3__VPORT_XSCALE__SHIFT 0x0
++#define PA_CL_VPORT_XSCALE_3__VPORT_XSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XOFFSET_3
++#define PA_CL_VPORT_XOFFSET_3__VPORT_XOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_XOFFSET_3__VPORT_XOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YSCALE_3
++#define PA_CL_VPORT_YSCALE_3__VPORT_YSCALE__SHIFT 0x0
++#define PA_CL_VPORT_YSCALE_3__VPORT_YSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YOFFSET_3
++#define PA_CL_VPORT_YOFFSET_3__VPORT_YOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_YOFFSET_3__VPORT_YOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZSCALE_3
++#define PA_CL_VPORT_ZSCALE_3__VPORT_ZSCALE__SHIFT 0x0
++#define PA_CL_VPORT_ZSCALE_3__VPORT_ZSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZOFFSET_3
++#define PA_CL_VPORT_ZOFFSET_3__VPORT_ZOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_ZOFFSET_3__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XSCALE_4
++#define PA_CL_VPORT_XSCALE_4__VPORT_XSCALE__SHIFT 0x0
++#define PA_CL_VPORT_XSCALE_4__VPORT_XSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XOFFSET_4
++#define PA_CL_VPORT_XOFFSET_4__VPORT_XOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_XOFFSET_4__VPORT_XOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YSCALE_4
++#define PA_CL_VPORT_YSCALE_4__VPORT_YSCALE__SHIFT 0x0
++#define PA_CL_VPORT_YSCALE_4__VPORT_YSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YOFFSET_4
++#define PA_CL_VPORT_YOFFSET_4__VPORT_YOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_YOFFSET_4__VPORT_YOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZSCALE_4
++#define PA_CL_VPORT_ZSCALE_4__VPORT_ZSCALE__SHIFT 0x0
++#define PA_CL_VPORT_ZSCALE_4__VPORT_ZSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZOFFSET_4
++#define PA_CL_VPORT_ZOFFSET_4__VPORT_ZOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_ZOFFSET_4__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XSCALE_5
++#define PA_CL_VPORT_XSCALE_5__VPORT_XSCALE__SHIFT 0x0
++#define PA_CL_VPORT_XSCALE_5__VPORT_XSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XOFFSET_5
++#define PA_CL_VPORT_XOFFSET_5__VPORT_XOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_XOFFSET_5__VPORT_XOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YSCALE_5
++#define PA_CL_VPORT_YSCALE_5__VPORT_YSCALE__SHIFT 0x0
++#define PA_CL_VPORT_YSCALE_5__VPORT_YSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YOFFSET_5
++#define PA_CL_VPORT_YOFFSET_5__VPORT_YOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_YOFFSET_5__VPORT_YOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZSCALE_5
++#define PA_CL_VPORT_ZSCALE_5__VPORT_ZSCALE__SHIFT 0x0
++#define PA_CL_VPORT_ZSCALE_5__VPORT_ZSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZOFFSET_5
++#define PA_CL_VPORT_ZOFFSET_5__VPORT_ZOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_ZOFFSET_5__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XSCALE_6
++#define PA_CL_VPORT_XSCALE_6__VPORT_XSCALE__SHIFT 0x0
++#define PA_CL_VPORT_XSCALE_6__VPORT_XSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XOFFSET_6
++#define PA_CL_VPORT_XOFFSET_6__VPORT_XOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_XOFFSET_6__VPORT_XOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YSCALE_6
++#define PA_CL_VPORT_YSCALE_6__VPORT_YSCALE__SHIFT 0x0
++#define PA_CL_VPORT_YSCALE_6__VPORT_YSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YOFFSET_6
++#define PA_CL_VPORT_YOFFSET_6__VPORT_YOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_YOFFSET_6__VPORT_YOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZSCALE_6
++#define PA_CL_VPORT_ZSCALE_6__VPORT_ZSCALE__SHIFT 0x0
++#define PA_CL_VPORT_ZSCALE_6__VPORT_ZSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZOFFSET_6
++#define PA_CL_VPORT_ZOFFSET_6__VPORT_ZOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_ZOFFSET_6__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XSCALE_7
++#define PA_CL_VPORT_XSCALE_7__VPORT_XSCALE__SHIFT 0x0
++#define PA_CL_VPORT_XSCALE_7__VPORT_XSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XOFFSET_7
++#define PA_CL_VPORT_XOFFSET_7__VPORT_XOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_XOFFSET_7__VPORT_XOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YSCALE_7
++#define PA_CL_VPORT_YSCALE_7__VPORT_YSCALE__SHIFT 0x0
++#define PA_CL_VPORT_YSCALE_7__VPORT_YSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YOFFSET_7
++#define PA_CL_VPORT_YOFFSET_7__VPORT_YOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_YOFFSET_7__VPORT_YOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZSCALE_7
++#define PA_CL_VPORT_ZSCALE_7__VPORT_ZSCALE__SHIFT 0x0
++#define PA_CL_VPORT_ZSCALE_7__VPORT_ZSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZOFFSET_7
++#define PA_CL_VPORT_ZOFFSET_7__VPORT_ZOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_ZOFFSET_7__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XSCALE_8
++#define PA_CL_VPORT_XSCALE_8__VPORT_XSCALE__SHIFT 0x0
++#define PA_CL_VPORT_XSCALE_8__VPORT_XSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XOFFSET_8
++#define PA_CL_VPORT_XOFFSET_8__VPORT_XOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_XOFFSET_8__VPORT_XOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YSCALE_8
++#define PA_CL_VPORT_YSCALE_8__VPORT_YSCALE__SHIFT 0x0
++#define PA_CL_VPORT_YSCALE_8__VPORT_YSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YOFFSET_8
++#define PA_CL_VPORT_YOFFSET_8__VPORT_YOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_YOFFSET_8__VPORT_YOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZSCALE_8
++#define PA_CL_VPORT_ZSCALE_8__VPORT_ZSCALE__SHIFT 0x0
++#define PA_CL_VPORT_ZSCALE_8__VPORT_ZSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZOFFSET_8
++#define PA_CL_VPORT_ZOFFSET_8__VPORT_ZOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_ZOFFSET_8__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XSCALE_9
++#define PA_CL_VPORT_XSCALE_9__VPORT_XSCALE__SHIFT 0x0
++#define PA_CL_VPORT_XSCALE_9__VPORT_XSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XOFFSET_9
++#define PA_CL_VPORT_XOFFSET_9__VPORT_XOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_XOFFSET_9__VPORT_XOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YSCALE_9
++#define PA_CL_VPORT_YSCALE_9__VPORT_YSCALE__SHIFT 0x0
++#define PA_CL_VPORT_YSCALE_9__VPORT_YSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YOFFSET_9
++#define PA_CL_VPORT_YOFFSET_9__VPORT_YOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_YOFFSET_9__VPORT_YOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZSCALE_9
++#define PA_CL_VPORT_ZSCALE_9__VPORT_ZSCALE__SHIFT 0x0
++#define PA_CL_VPORT_ZSCALE_9__VPORT_ZSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZOFFSET_9
++#define PA_CL_VPORT_ZOFFSET_9__VPORT_ZOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_ZOFFSET_9__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XSCALE_10
++#define PA_CL_VPORT_XSCALE_10__VPORT_XSCALE__SHIFT 0x0
++#define PA_CL_VPORT_XSCALE_10__VPORT_XSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XOFFSET_10
++#define PA_CL_VPORT_XOFFSET_10__VPORT_XOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_XOFFSET_10__VPORT_XOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YSCALE_10
++#define PA_CL_VPORT_YSCALE_10__VPORT_YSCALE__SHIFT 0x0
++#define PA_CL_VPORT_YSCALE_10__VPORT_YSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YOFFSET_10
++#define PA_CL_VPORT_YOFFSET_10__VPORT_YOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_YOFFSET_10__VPORT_YOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZSCALE_10
++#define PA_CL_VPORT_ZSCALE_10__VPORT_ZSCALE__SHIFT 0x0
++#define PA_CL_VPORT_ZSCALE_10__VPORT_ZSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZOFFSET_10
++#define PA_CL_VPORT_ZOFFSET_10__VPORT_ZOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_ZOFFSET_10__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XSCALE_11
++#define PA_CL_VPORT_XSCALE_11__VPORT_XSCALE__SHIFT 0x0
++#define PA_CL_VPORT_XSCALE_11__VPORT_XSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XOFFSET_11
++#define PA_CL_VPORT_XOFFSET_11__VPORT_XOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_XOFFSET_11__VPORT_XOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YSCALE_11
++#define PA_CL_VPORT_YSCALE_11__VPORT_YSCALE__SHIFT 0x0
++#define PA_CL_VPORT_YSCALE_11__VPORT_YSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YOFFSET_11
++#define PA_CL_VPORT_YOFFSET_11__VPORT_YOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_YOFFSET_11__VPORT_YOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZSCALE_11
++#define PA_CL_VPORT_ZSCALE_11__VPORT_ZSCALE__SHIFT 0x0
++#define PA_CL_VPORT_ZSCALE_11__VPORT_ZSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZOFFSET_11
++#define PA_CL_VPORT_ZOFFSET_11__VPORT_ZOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_ZOFFSET_11__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XSCALE_12
++#define PA_CL_VPORT_XSCALE_12__VPORT_XSCALE__SHIFT 0x0
++#define PA_CL_VPORT_XSCALE_12__VPORT_XSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XOFFSET_12
++#define PA_CL_VPORT_XOFFSET_12__VPORT_XOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_XOFFSET_12__VPORT_XOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YSCALE_12
++#define PA_CL_VPORT_YSCALE_12__VPORT_YSCALE__SHIFT 0x0
++#define PA_CL_VPORT_YSCALE_12__VPORT_YSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YOFFSET_12
++#define PA_CL_VPORT_YOFFSET_12__VPORT_YOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_YOFFSET_12__VPORT_YOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZSCALE_12
++#define PA_CL_VPORT_ZSCALE_12__VPORT_ZSCALE__SHIFT 0x0
++#define PA_CL_VPORT_ZSCALE_12__VPORT_ZSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZOFFSET_12
++#define PA_CL_VPORT_ZOFFSET_12__VPORT_ZOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_ZOFFSET_12__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XSCALE_13
++#define PA_CL_VPORT_XSCALE_13__VPORT_XSCALE__SHIFT 0x0
++#define PA_CL_VPORT_XSCALE_13__VPORT_XSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XOFFSET_13
++#define PA_CL_VPORT_XOFFSET_13__VPORT_XOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_XOFFSET_13__VPORT_XOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YSCALE_13
++#define PA_CL_VPORT_YSCALE_13__VPORT_YSCALE__SHIFT 0x0
++#define PA_CL_VPORT_YSCALE_13__VPORT_YSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YOFFSET_13
++#define PA_CL_VPORT_YOFFSET_13__VPORT_YOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_YOFFSET_13__VPORT_YOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZSCALE_13
++#define PA_CL_VPORT_ZSCALE_13__VPORT_ZSCALE__SHIFT 0x0
++#define PA_CL_VPORT_ZSCALE_13__VPORT_ZSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZOFFSET_13
++#define PA_CL_VPORT_ZOFFSET_13__VPORT_ZOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_ZOFFSET_13__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XSCALE_14
++#define PA_CL_VPORT_XSCALE_14__VPORT_XSCALE__SHIFT 0x0
++#define PA_CL_VPORT_XSCALE_14__VPORT_XSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XOFFSET_14
++#define PA_CL_VPORT_XOFFSET_14__VPORT_XOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_XOFFSET_14__VPORT_XOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YSCALE_14
++#define PA_CL_VPORT_YSCALE_14__VPORT_YSCALE__SHIFT 0x0
++#define PA_CL_VPORT_YSCALE_14__VPORT_YSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YOFFSET_14
++#define PA_CL_VPORT_YOFFSET_14__VPORT_YOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_YOFFSET_14__VPORT_YOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZSCALE_14
++#define PA_CL_VPORT_ZSCALE_14__VPORT_ZSCALE__SHIFT 0x0
++#define PA_CL_VPORT_ZSCALE_14__VPORT_ZSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZOFFSET_14
++#define PA_CL_VPORT_ZOFFSET_14__VPORT_ZOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_ZOFFSET_14__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XSCALE_15
++#define PA_CL_VPORT_XSCALE_15__VPORT_XSCALE__SHIFT 0x0
++#define PA_CL_VPORT_XSCALE_15__VPORT_XSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_XOFFSET_15
++#define PA_CL_VPORT_XOFFSET_15__VPORT_XOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_XOFFSET_15__VPORT_XOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YSCALE_15
++#define PA_CL_VPORT_YSCALE_15__VPORT_YSCALE__SHIFT 0x0
++#define PA_CL_VPORT_YSCALE_15__VPORT_YSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_YOFFSET_15
++#define PA_CL_VPORT_YOFFSET_15__VPORT_YOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_YOFFSET_15__VPORT_YOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZSCALE_15
++#define PA_CL_VPORT_ZSCALE_15__VPORT_ZSCALE__SHIFT 0x0
++#define PA_CL_VPORT_ZSCALE_15__VPORT_ZSCALE_MASK 0xFFFFFFFFL
++//PA_CL_VPORT_ZOFFSET_15
++#define PA_CL_VPORT_ZOFFSET_15__VPORT_ZOFFSET__SHIFT 0x0
++#define PA_CL_VPORT_ZOFFSET_15__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
++//PA_CL_UCP_0_X
++#define PA_CL_UCP_0_X__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_UCP_0_X__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_UCP_0_Y
++#define PA_CL_UCP_0_Y__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_UCP_0_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_UCP_0_Z
++#define PA_CL_UCP_0_Z__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_UCP_0_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_UCP_0_W
++#define PA_CL_UCP_0_W__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_UCP_0_W__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_UCP_1_X
++#define PA_CL_UCP_1_X__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_UCP_1_X__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_UCP_1_Y
++#define PA_CL_UCP_1_Y__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_UCP_1_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_UCP_1_Z
++#define PA_CL_UCP_1_Z__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_UCP_1_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_UCP_1_W
++#define PA_CL_UCP_1_W__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_UCP_1_W__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_UCP_2_X
++#define PA_CL_UCP_2_X__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_UCP_2_X__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_UCP_2_Y
++#define PA_CL_UCP_2_Y__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_UCP_2_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_UCP_2_Z
++#define PA_CL_UCP_2_Z__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_UCP_2_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_UCP_2_W
++#define PA_CL_UCP_2_W__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_UCP_2_W__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_UCP_3_X
++#define PA_CL_UCP_3_X__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_UCP_3_X__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_UCP_3_Y
++#define PA_CL_UCP_3_Y__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_UCP_3_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_UCP_3_Z
++#define PA_CL_UCP_3_Z__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_UCP_3_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_UCP_3_W
++#define PA_CL_UCP_3_W__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_UCP_3_W__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_UCP_4_X
++#define PA_CL_UCP_4_X__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_UCP_4_X__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_UCP_4_Y
++#define PA_CL_UCP_4_Y__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_UCP_4_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_UCP_4_Z
++#define PA_CL_UCP_4_Z__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_UCP_4_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_UCP_4_W
++#define PA_CL_UCP_4_W__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_UCP_4_W__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_UCP_5_X
++#define PA_CL_UCP_5_X__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_UCP_5_X__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_UCP_5_Y
++#define PA_CL_UCP_5_Y__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_UCP_5_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_UCP_5_Z
++#define PA_CL_UCP_5_Z__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_UCP_5_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_UCP_5_W
++#define PA_CL_UCP_5_W__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_UCP_5_W__DATA_REGISTER_MASK 0xFFFFFFFFL
++//SPI_PS_INPUT_CNTL_0
++#define SPI_PS_INPUT_CNTL_0__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_0__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_0__CYL_WRAP__SHIFT 0xd
++#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX__SHIFT 0x11
++#define SPI_PS_INPUT_CNTL_0__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_0__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_0__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
++#define SPI_PS_INPUT_CNTL_0__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_0__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_0__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_0__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_0__CYL_WRAP_MASK 0x0001E000L
++#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX_MASK 0x00020000L
++#define SPI_PS_INPUT_CNTL_0__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_0__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_0__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
++#define SPI_PS_INPUT_CNTL_0__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_0__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_1
++#define SPI_PS_INPUT_CNTL_1__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_1__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_1__CYL_WRAP__SHIFT 0xd
++#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX__SHIFT 0x11
++#define SPI_PS_INPUT_CNTL_1__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_1__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_1__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
++#define SPI_PS_INPUT_CNTL_1__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_1__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_1__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_1__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_1__CYL_WRAP_MASK 0x0001E000L
++#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX_MASK 0x00020000L
++#define SPI_PS_INPUT_CNTL_1__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_1__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_1__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
++#define SPI_PS_INPUT_CNTL_1__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_1__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_2
++#define SPI_PS_INPUT_CNTL_2__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_2__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_2__CYL_WRAP__SHIFT 0xd
++#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX__SHIFT 0x11
++#define SPI_PS_INPUT_CNTL_2__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_2__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_2__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
++#define SPI_PS_INPUT_CNTL_2__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_2__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_2__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_2__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_2__CYL_WRAP_MASK 0x0001E000L
++#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX_MASK 0x00020000L
++#define SPI_PS_INPUT_CNTL_2__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_2__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_2__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
++#define SPI_PS_INPUT_CNTL_2__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_2__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_3
++#define SPI_PS_INPUT_CNTL_3__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_3__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_3__CYL_WRAP__SHIFT 0xd
++#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX__SHIFT 0x11
++#define SPI_PS_INPUT_CNTL_3__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_3__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_3__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
++#define SPI_PS_INPUT_CNTL_3__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_3__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_3__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_3__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_3__CYL_WRAP_MASK 0x0001E000L
++#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX_MASK 0x00020000L
++#define SPI_PS_INPUT_CNTL_3__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_3__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_3__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
++#define SPI_PS_INPUT_CNTL_3__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_3__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_4
++#define SPI_PS_INPUT_CNTL_4__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_4__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_4__CYL_WRAP__SHIFT 0xd
++#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX__SHIFT 0x11
++#define SPI_PS_INPUT_CNTL_4__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_4__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_4__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
++#define SPI_PS_INPUT_CNTL_4__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_4__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_4__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_4__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_4__CYL_WRAP_MASK 0x0001E000L
++#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX_MASK 0x00020000L
++#define SPI_PS_INPUT_CNTL_4__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_4__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_4__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
++#define SPI_PS_INPUT_CNTL_4__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_4__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_5
++#define SPI_PS_INPUT_CNTL_5__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_5__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_5__CYL_WRAP__SHIFT 0xd
++#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX__SHIFT 0x11
++#define SPI_PS_INPUT_CNTL_5__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_5__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_5__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
++#define SPI_PS_INPUT_CNTL_5__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_5__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_5__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_5__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_5__CYL_WRAP_MASK 0x0001E000L
++#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX_MASK 0x00020000L
++#define SPI_PS_INPUT_CNTL_5__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_5__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_5__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
++#define SPI_PS_INPUT_CNTL_5__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_5__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_6
++#define SPI_PS_INPUT_CNTL_6__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_6__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_6__CYL_WRAP__SHIFT 0xd
++#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX__SHIFT 0x11
++#define SPI_PS_INPUT_CNTL_6__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_6__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_6__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
++#define SPI_PS_INPUT_CNTL_6__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_6__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_6__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_6__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_6__CYL_WRAP_MASK 0x0001E000L
++#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX_MASK 0x00020000L
++#define SPI_PS_INPUT_CNTL_6__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_6__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_6__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
++#define SPI_PS_INPUT_CNTL_6__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_6__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_7
++#define SPI_PS_INPUT_CNTL_7__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_7__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_7__CYL_WRAP__SHIFT 0xd
++#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX__SHIFT 0x11
++#define SPI_PS_INPUT_CNTL_7__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_7__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_7__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
++#define SPI_PS_INPUT_CNTL_7__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_7__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_7__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_7__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_7__CYL_WRAP_MASK 0x0001E000L
++#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX_MASK 0x00020000L
++#define SPI_PS_INPUT_CNTL_7__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_7__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_7__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
++#define SPI_PS_INPUT_CNTL_7__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_7__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_8
++#define SPI_PS_INPUT_CNTL_8__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_8__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_8__CYL_WRAP__SHIFT 0xd
++#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX__SHIFT 0x11
++#define SPI_PS_INPUT_CNTL_8__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_8__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_8__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
++#define SPI_PS_INPUT_CNTL_8__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_8__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_8__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_8__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_8__CYL_WRAP_MASK 0x0001E000L
++#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX_MASK 0x00020000L
++#define SPI_PS_INPUT_CNTL_8__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_8__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_8__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
++#define SPI_PS_INPUT_CNTL_8__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_8__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_9
++#define SPI_PS_INPUT_CNTL_9__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_9__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_9__CYL_WRAP__SHIFT 0xd
++#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX__SHIFT 0x11
++#define SPI_PS_INPUT_CNTL_9__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_9__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_9__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
++#define SPI_PS_INPUT_CNTL_9__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_9__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_9__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_9__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_9__CYL_WRAP_MASK 0x0001E000L
++#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX_MASK 0x00020000L
++#define SPI_PS_INPUT_CNTL_9__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_9__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_9__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
++#define SPI_PS_INPUT_CNTL_9__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_9__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_10
++#define SPI_PS_INPUT_CNTL_10__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_10__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_10__CYL_WRAP__SHIFT 0xd
++#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX__SHIFT 0x11
++#define SPI_PS_INPUT_CNTL_10__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_10__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_10__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
++#define SPI_PS_INPUT_CNTL_10__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_10__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_10__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_10__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_10__CYL_WRAP_MASK 0x0001E000L
++#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX_MASK 0x00020000L
++#define SPI_PS_INPUT_CNTL_10__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_10__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_10__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
++#define SPI_PS_INPUT_CNTL_10__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_10__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_11
++#define SPI_PS_INPUT_CNTL_11__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_11__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_11__CYL_WRAP__SHIFT 0xd
++#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX__SHIFT 0x11
++#define SPI_PS_INPUT_CNTL_11__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_11__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_11__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
++#define SPI_PS_INPUT_CNTL_11__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_11__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_11__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_11__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_11__CYL_WRAP_MASK 0x0001E000L
++#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX_MASK 0x00020000L
++#define SPI_PS_INPUT_CNTL_11__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_11__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_11__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
++#define SPI_PS_INPUT_CNTL_11__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_11__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_12
++#define SPI_PS_INPUT_CNTL_12__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_12__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_12__CYL_WRAP__SHIFT 0xd
++#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX__SHIFT 0x11
++#define SPI_PS_INPUT_CNTL_12__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_12__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_12__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
++#define SPI_PS_INPUT_CNTL_12__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_12__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_12__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_12__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_12__CYL_WRAP_MASK 0x0001E000L
++#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX_MASK 0x00020000L
++#define SPI_PS_INPUT_CNTL_12__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_12__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_12__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
++#define SPI_PS_INPUT_CNTL_12__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_12__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_13
++#define SPI_PS_INPUT_CNTL_13__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_13__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_13__CYL_WRAP__SHIFT 0xd
++#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX__SHIFT 0x11
++#define SPI_PS_INPUT_CNTL_13__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_13__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_13__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
++#define SPI_PS_INPUT_CNTL_13__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_13__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_13__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_13__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_13__CYL_WRAP_MASK 0x0001E000L
++#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX_MASK 0x00020000L
++#define SPI_PS_INPUT_CNTL_13__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_13__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_13__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
++#define SPI_PS_INPUT_CNTL_13__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_13__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_14
++#define SPI_PS_INPUT_CNTL_14__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_14__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_14__CYL_WRAP__SHIFT 0xd
++#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX__SHIFT 0x11
++#define SPI_PS_INPUT_CNTL_14__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_14__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_14__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
++#define SPI_PS_INPUT_CNTL_14__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_14__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_14__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_14__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_14__CYL_WRAP_MASK 0x0001E000L
++#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX_MASK 0x00020000L
++#define SPI_PS_INPUT_CNTL_14__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_14__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_14__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
++#define SPI_PS_INPUT_CNTL_14__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_14__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_15
++#define SPI_PS_INPUT_CNTL_15__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_15__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_15__CYL_WRAP__SHIFT 0xd
++#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX__SHIFT 0x11
++#define SPI_PS_INPUT_CNTL_15__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_15__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_15__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
++#define SPI_PS_INPUT_CNTL_15__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_15__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_15__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_15__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_15__CYL_WRAP_MASK 0x0001E000L
++#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX_MASK 0x00020000L
++#define SPI_PS_INPUT_CNTL_15__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_15__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_15__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
++#define SPI_PS_INPUT_CNTL_15__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_15__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_16
++#define SPI_PS_INPUT_CNTL_16__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_16__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_16__CYL_WRAP__SHIFT 0xd
++#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX__SHIFT 0x11
++#define SPI_PS_INPUT_CNTL_16__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_16__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_16__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
++#define SPI_PS_INPUT_CNTL_16__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_16__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_16__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_16__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_16__CYL_WRAP_MASK 0x0001E000L
++#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX_MASK 0x00020000L
++#define SPI_PS_INPUT_CNTL_16__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_16__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_16__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
++#define SPI_PS_INPUT_CNTL_16__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_16__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_17
++#define SPI_PS_INPUT_CNTL_17__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_17__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_17__CYL_WRAP__SHIFT 0xd
++#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX__SHIFT 0x11
++#define SPI_PS_INPUT_CNTL_17__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_17__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_17__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
++#define SPI_PS_INPUT_CNTL_17__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_17__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_17__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_17__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_17__CYL_WRAP_MASK 0x0001E000L
++#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX_MASK 0x00020000L
++#define SPI_PS_INPUT_CNTL_17__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_17__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_17__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
++#define SPI_PS_INPUT_CNTL_17__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_17__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_18
++#define SPI_PS_INPUT_CNTL_18__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_18__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_18__CYL_WRAP__SHIFT 0xd
++#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX__SHIFT 0x11
++#define SPI_PS_INPUT_CNTL_18__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_18__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_18__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
++#define SPI_PS_INPUT_CNTL_18__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_18__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_18__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_18__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_18__CYL_WRAP_MASK 0x0001E000L
++#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX_MASK 0x00020000L
++#define SPI_PS_INPUT_CNTL_18__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_18__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_18__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
++#define SPI_PS_INPUT_CNTL_18__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_18__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_19
++#define SPI_PS_INPUT_CNTL_19__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_19__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_19__CYL_WRAP__SHIFT 0xd
++#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX__SHIFT 0x11
++#define SPI_PS_INPUT_CNTL_19__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_19__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_19__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
++#define SPI_PS_INPUT_CNTL_19__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_19__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_19__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_19__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_19__CYL_WRAP_MASK 0x0001E000L
++#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX_MASK 0x00020000L
++#define SPI_PS_INPUT_CNTL_19__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_19__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_19__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
++#define SPI_PS_INPUT_CNTL_19__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_19__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_20
++#define SPI_PS_INPUT_CNTL_20__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_20__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_20__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_20__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_20__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_20__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_20__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_20__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_20__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_20__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_20__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_20__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_20__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_20__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_21
++#define SPI_PS_INPUT_CNTL_21__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_21__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_21__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_21__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_21__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_21__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_21__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_21__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_21__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_21__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_21__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_21__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_21__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_21__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_22
++#define SPI_PS_INPUT_CNTL_22__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_22__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_22__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_22__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_22__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_22__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_22__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_22__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_22__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_22__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_22__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_22__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_22__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_22__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_23
++#define SPI_PS_INPUT_CNTL_23__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_23__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_23__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_23__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_23__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_23__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_23__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_23__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_23__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_23__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_23__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_23__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_23__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_23__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_24
++#define SPI_PS_INPUT_CNTL_24__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_24__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_24__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_24__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_24__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_24__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_24__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_24__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_24__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_24__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_24__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_24__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_24__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_24__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_25
++#define SPI_PS_INPUT_CNTL_25__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_25__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_25__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_25__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_25__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_25__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_25__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_25__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_25__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_25__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_25__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_25__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_25__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_25__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_26
++#define SPI_PS_INPUT_CNTL_26__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_26__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_26__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_26__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_26__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_26__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_26__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_26__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_26__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_26__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_26__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_26__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_26__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_26__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_27
++#define SPI_PS_INPUT_CNTL_27__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_27__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_27__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_27__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_27__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_27__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_27__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_27__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_27__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_27__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_27__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_27__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_27__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_27__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_28
++#define SPI_PS_INPUT_CNTL_28__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_28__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_28__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_28__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_28__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_28__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_28__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_28__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_28__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_28__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_28__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_28__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_28__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_28__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_29
++#define SPI_PS_INPUT_CNTL_29__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_29__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_29__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_29__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_29__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_29__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_29__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_29__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_29__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_29__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_29__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_29__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_29__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_29__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_30
++#define SPI_PS_INPUT_CNTL_30__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_30__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_30__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_30__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_30__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_30__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_30__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_30__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_30__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_30__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_30__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_30__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_30__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_30__ATTR1_VALID_MASK 0x02000000L
++//SPI_PS_INPUT_CNTL_31
++#define SPI_PS_INPUT_CNTL_31__OFFSET__SHIFT 0x0
++#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL__SHIFT 0x8
++#define SPI_PS_INPUT_CNTL_31__FLAT_SHADE__SHIFT 0xa
++#define SPI_PS_INPUT_CNTL_31__DUP__SHIFT 0x12
++#define SPI_PS_INPUT_CNTL_31__FP16_INTERP_MODE__SHIFT 0x13
++#define SPI_PS_INPUT_CNTL_31__USE_DEFAULT_ATTR1__SHIFT 0x14
++#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL_ATTR1__SHIFT 0x15
++#define SPI_PS_INPUT_CNTL_31__ATTR0_VALID__SHIFT 0x18
++#define SPI_PS_INPUT_CNTL_31__ATTR1_VALID__SHIFT 0x19
++#define SPI_PS_INPUT_CNTL_31__OFFSET_MASK 0x0000003FL
++#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL_MASK 0x00000300L
++#define SPI_PS_INPUT_CNTL_31__FLAT_SHADE_MASK 0x00000400L
++#define SPI_PS_INPUT_CNTL_31__DUP_MASK 0x00040000L
++#define SPI_PS_INPUT_CNTL_31__FP16_INTERP_MODE_MASK 0x00080000L
++#define SPI_PS_INPUT_CNTL_31__USE_DEFAULT_ATTR1_MASK 0x00100000L
++#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL_ATTR1_MASK 0x00600000L
++#define SPI_PS_INPUT_CNTL_31__ATTR0_VALID_MASK 0x01000000L
++#define SPI_PS_INPUT_CNTL_31__ATTR1_VALID_MASK 0x02000000L
++//SPI_VS_OUT_CONFIG
++#define SPI_VS_OUT_CONFIG__VS_EXPORT_COUNT__SHIFT 0x1
++#define SPI_VS_OUT_CONFIG__VS_HALF_PACK__SHIFT 0x6
++#define SPI_VS_OUT_CONFIG__VS_EXPORT_COUNT_MASK 0x0000003EL
++#define SPI_VS_OUT_CONFIG__VS_HALF_PACK_MASK 0x00000040L
++//SPI_PS_INPUT_ENA
++#define SPI_PS_INPUT_ENA__PERSP_SAMPLE_ENA__SHIFT 0x0
++#define SPI_PS_INPUT_ENA__PERSP_CENTER_ENA__SHIFT 0x1
++#define SPI_PS_INPUT_ENA__PERSP_CENTROID_ENA__SHIFT 0x2
++#define SPI_PS_INPUT_ENA__PERSP_PULL_MODEL_ENA__SHIFT 0x3
++#define SPI_PS_INPUT_ENA__LINEAR_SAMPLE_ENA__SHIFT 0x4
++#define SPI_PS_INPUT_ENA__LINEAR_CENTER_ENA__SHIFT 0x5
++#define SPI_PS_INPUT_ENA__LINEAR_CENTROID_ENA__SHIFT 0x6
++#define SPI_PS_INPUT_ENA__LINE_STIPPLE_TEX_ENA__SHIFT 0x7
++#define SPI_PS_INPUT_ENA__POS_X_FLOAT_ENA__SHIFT 0x8
++#define SPI_PS_INPUT_ENA__POS_Y_FLOAT_ENA__SHIFT 0x9
++#define SPI_PS_INPUT_ENA__POS_Z_FLOAT_ENA__SHIFT 0xa
++#define SPI_PS_INPUT_ENA__POS_W_FLOAT_ENA__SHIFT 0xb
++#define SPI_PS_INPUT_ENA__FRONT_FACE_ENA__SHIFT 0xc
++#define SPI_PS_INPUT_ENA__ANCILLARY_ENA__SHIFT 0xd
++#define SPI_PS_INPUT_ENA__SAMPLE_COVERAGE_ENA__SHIFT 0xe
++#define SPI_PS_INPUT_ENA__POS_FIXED_PT_ENA__SHIFT 0xf
++#define SPI_PS_INPUT_ENA__PERSP_SAMPLE_ENA_MASK 0x00000001L
++#define SPI_PS_INPUT_ENA__PERSP_CENTER_ENA_MASK 0x00000002L
++#define SPI_PS_INPUT_ENA__PERSP_CENTROID_ENA_MASK 0x00000004L
++#define SPI_PS_INPUT_ENA__PERSP_PULL_MODEL_ENA_MASK 0x00000008L
++#define SPI_PS_INPUT_ENA__LINEAR_SAMPLE_ENA_MASK 0x00000010L
++#define SPI_PS_INPUT_ENA__LINEAR_CENTER_ENA_MASK 0x00000020L
++#define SPI_PS_INPUT_ENA__LINEAR_CENTROID_ENA_MASK 0x00000040L
++#define SPI_PS_INPUT_ENA__LINE_STIPPLE_TEX_ENA_MASK 0x00000080L
++#define SPI_PS_INPUT_ENA__POS_X_FLOAT_ENA_MASK 0x00000100L
++#define SPI_PS_INPUT_ENA__POS_Y_FLOAT_ENA_MASK 0x00000200L
++#define SPI_PS_INPUT_ENA__POS_Z_FLOAT_ENA_MASK 0x00000400L
++#define SPI_PS_INPUT_ENA__POS_W_FLOAT_ENA_MASK 0x00000800L
++#define SPI_PS_INPUT_ENA__FRONT_FACE_ENA_MASK 0x00001000L
++#define SPI_PS_INPUT_ENA__ANCILLARY_ENA_MASK 0x00002000L
++#define SPI_PS_INPUT_ENA__SAMPLE_COVERAGE_ENA_MASK 0x00004000L
++#define SPI_PS_INPUT_ENA__POS_FIXED_PT_ENA_MASK 0x00008000L
++//SPI_PS_INPUT_ADDR
++#define SPI_PS_INPUT_ADDR__PERSP_SAMPLE_ENA__SHIFT 0x0
++#define SPI_PS_INPUT_ADDR__PERSP_CENTER_ENA__SHIFT 0x1
++#define SPI_PS_INPUT_ADDR__PERSP_CENTROID_ENA__SHIFT 0x2
++#define SPI_PS_INPUT_ADDR__PERSP_PULL_MODEL_ENA__SHIFT 0x3
++#define SPI_PS_INPUT_ADDR__LINEAR_SAMPLE_ENA__SHIFT 0x4
++#define SPI_PS_INPUT_ADDR__LINEAR_CENTER_ENA__SHIFT 0x5
++#define SPI_PS_INPUT_ADDR__LINEAR_CENTROID_ENA__SHIFT 0x6
++#define SPI_PS_INPUT_ADDR__LINE_STIPPLE_TEX_ENA__SHIFT 0x7
++#define SPI_PS_INPUT_ADDR__POS_X_FLOAT_ENA__SHIFT 0x8
++#define SPI_PS_INPUT_ADDR__POS_Y_FLOAT_ENA__SHIFT 0x9
++#define SPI_PS_INPUT_ADDR__POS_Z_FLOAT_ENA__SHIFT 0xa
++#define SPI_PS_INPUT_ADDR__POS_W_FLOAT_ENA__SHIFT 0xb
++#define SPI_PS_INPUT_ADDR__FRONT_FACE_ENA__SHIFT 0xc
++#define SPI_PS_INPUT_ADDR__ANCILLARY_ENA__SHIFT 0xd
++#define SPI_PS_INPUT_ADDR__SAMPLE_COVERAGE_ENA__SHIFT 0xe
++#define SPI_PS_INPUT_ADDR__POS_FIXED_PT_ENA__SHIFT 0xf
++#define SPI_PS_INPUT_ADDR__PERSP_SAMPLE_ENA_MASK 0x00000001L
++#define SPI_PS_INPUT_ADDR__PERSP_CENTER_ENA_MASK 0x00000002L
++#define SPI_PS_INPUT_ADDR__PERSP_CENTROID_ENA_MASK 0x00000004L
++#define SPI_PS_INPUT_ADDR__PERSP_PULL_MODEL_ENA_MASK 0x00000008L
++#define SPI_PS_INPUT_ADDR__LINEAR_SAMPLE_ENA_MASK 0x00000010L
++#define SPI_PS_INPUT_ADDR__LINEAR_CENTER_ENA_MASK 0x00000020L
++#define SPI_PS_INPUT_ADDR__LINEAR_CENTROID_ENA_MASK 0x00000040L
++#define SPI_PS_INPUT_ADDR__LINE_STIPPLE_TEX_ENA_MASK 0x00000080L
++#define SPI_PS_INPUT_ADDR__POS_X_FLOAT_ENA_MASK 0x00000100L
++#define SPI_PS_INPUT_ADDR__POS_Y_FLOAT_ENA_MASK 0x00000200L
++#define SPI_PS_INPUT_ADDR__POS_Z_FLOAT_ENA_MASK 0x00000400L
++#define SPI_PS_INPUT_ADDR__POS_W_FLOAT_ENA_MASK 0x00000800L
++#define SPI_PS_INPUT_ADDR__FRONT_FACE_ENA_MASK 0x00001000L
++#define SPI_PS_INPUT_ADDR__ANCILLARY_ENA_MASK 0x00002000L
++#define SPI_PS_INPUT_ADDR__SAMPLE_COVERAGE_ENA_MASK 0x00004000L
++#define SPI_PS_INPUT_ADDR__POS_FIXED_PT_ENA_MASK 0x00008000L
++//SPI_INTERP_CONTROL_0
++#define SPI_INTERP_CONTROL_0__FLAT_SHADE_ENA__SHIFT 0x0
++#define SPI_INTERP_CONTROL_0__PNT_SPRITE_ENA__SHIFT 0x1
++#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_X__SHIFT 0x2
++#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Y__SHIFT 0x5
++#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Z__SHIFT 0x8
++#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_W__SHIFT 0xb
++#define SPI_INTERP_CONTROL_0__PNT_SPRITE_TOP_1__SHIFT 0xe
++#define SPI_INTERP_CONTROL_0__FLAT_SHADE_ENA_MASK 0x00000001L
++#define SPI_INTERP_CONTROL_0__PNT_SPRITE_ENA_MASK 0x00000002L
++#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_X_MASK 0x0000001CL
++#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Y_MASK 0x000000E0L
++#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Z_MASK 0x00000700L
++#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_W_MASK 0x00003800L
++#define SPI_INTERP_CONTROL_0__PNT_SPRITE_TOP_1_MASK 0x00004000L
++//SPI_PS_IN_CONTROL
++#define SPI_PS_IN_CONTROL__NUM_INTERP__SHIFT 0x0
++#define SPI_PS_IN_CONTROL__PARAM_GEN__SHIFT 0x6
++#define SPI_PS_IN_CONTROL__OFFCHIP_PARAM_EN__SHIFT 0x7
++#define SPI_PS_IN_CONTROL__LATE_PC_DEALLOC__SHIFT 0x8
++#define SPI_PS_IN_CONTROL__BC_OPTIMIZE_DISABLE__SHIFT 0xe
++#define SPI_PS_IN_CONTROL__NUM_INTERP_MASK 0x0000003FL
++#define SPI_PS_IN_CONTROL__PARAM_GEN_MASK 0x00000040L
++#define SPI_PS_IN_CONTROL__OFFCHIP_PARAM_EN_MASK 0x00000080L
++#define SPI_PS_IN_CONTROL__LATE_PC_DEALLOC_MASK 0x00000100L
++#define SPI_PS_IN_CONTROL__BC_OPTIMIZE_DISABLE_MASK 0x00004000L
++//SPI_BARYC_CNTL
++#define SPI_BARYC_CNTL__PERSP_CENTER_CNTL__SHIFT 0x0
++#define SPI_BARYC_CNTL__PERSP_CENTROID_CNTL__SHIFT 0x4
++#define SPI_BARYC_CNTL__LINEAR_CENTER_CNTL__SHIFT 0x8
++#define SPI_BARYC_CNTL__LINEAR_CENTROID_CNTL__SHIFT 0xc
++#define SPI_BARYC_CNTL__POS_FLOAT_LOCATION__SHIFT 0x10
++#define SPI_BARYC_CNTL__POS_FLOAT_ULC__SHIFT 0x14
++#define SPI_BARYC_CNTL__FRONT_FACE_ALL_BITS__SHIFT 0x18
++#define SPI_BARYC_CNTL__PERSP_CENTER_CNTL_MASK 0x00000001L
++#define SPI_BARYC_CNTL__PERSP_CENTROID_CNTL_MASK 0x00000010L
++#define SPI_BARYC_CNTL__LINEAR_CENTER_CNTL_MASK 0x00000100L
++#define SPI_BARYC_CNTL__LINEAR_CENTROID_CNTL_MASK 0x00001000L
++#define SPI_BARYC_CNTL__POS_FLOAT_LOCATION_MASK 0x00030000L
++#define SPI_BARYC_CNTL__POS_FLOAT_ULC_MASK 0x00100000L
++#define SPI_BARYC_CNTL__FRONT_FACE_ALL_BITS_MASK 0x01000000L
++//SPI_TMPRING_SIZE
++#define SPI_TMPRING_SIZE__WAVES__SHIFT 0x0
++#define SPI_TMPRING_SIZE__WAVESIZE__SHIFT 0xc
++#define SPI_TMPRING_SIZE__WAVES_MASK 0x00000FFFL
++#define SPI_TMPRING_SIZE__WAVESIZE_MASK 0x01FFF000L
++//SPI_SHADER_POS_FORMAT
++#define SPI_SHADER_POS_FORMAT__POS0_EXPORT_FORMAT__SHIFT 0x0
++#define SPI_SHADER_POS_FORMAT__POS1_EXPORT_FORMAT__SHIFT 0x4
++#define SPI_SHADER_POS_FORMAT__POS2_EXPORT_FORMAT__SHIFT 0x8
++#define SPI_SHADER_POS_FORMAT__POS3_EXPORT_FORMAT__SHIFT 0xc
++#define SPI_SHADER_POS_FORMAT__POS0_EXPORT_FORMAT_MASK 0x0000000FL
++#define SPI_SHADER_POS_FORMAT__POS1_EXPORT_FORMAT_MASK 0x000000F0L
++#define SPI_SHADER_POS_FORMAT__POS2_EXPORT_FORMAT_MASK 0x00000F00L
++#define SPI_SHADER_POS_FORMAT__POS3_EXPORT_FORMAT_MASK 0x0000F000L
++//SPI_SHADER_Z_FORMAT
++#define SPI_SHADER_Z_FORMAT__Z_EXPORT_FORMAT__SHIFT 0x0
++#define SPI_SHADER_Z_FORMAT__Z_EXPORT_FORMAT_MASK 0x0000000FL
++//SPI_SHADER_COL_FORMAT
++#define SPI_SHADER_COL_FORMAT__COL0_EXPORT_FORMAT__SHIFT 0x0
++#define SPI_SHADER_COL_FORMAT__COL1_EXPORT_FORMAT__SHIFT 0x4
++#define SPI_SHADER_COL_FORMAT__COL2_EXPORT_FORMAT__SHIFT 0x8
++#define SPI_SHADER_COL_FORMAT__COL3_EXPORT_FORMAT__SHIFT 0xc
++#define SPI_SHADER_COL_FORMAT__COL4_EXPORT_FORMAT__SHIFT 0x10
++#define SPI_SHADER_COL_FORMAT__COL5_EXPORT_FORMAT__SHIFT 0x14
++#define SPI_SHADER_COL_FORMAT__COL6_EXPORT_FORMAT__SHIFT 0x18
++#define SPI_SHADER_COL_FORMAT__COL7_EXPORT_FORMAT__SHIFT 0x1c
++#define SPI_SHADER_COL_FORMAT__COL0_EXPORT_FORMAT_MASK 0x0000000FL
++#define SPI_SHADER_COL_FORMAT__COL1_EXPORT_FORMAT_MASK 0x000000F0L
++#define SPI_SHADER_COL_FORMAT__COL2_EXPORT_FORMAT_MASK 0x00000F00L
++#define SPI_SHADER_COL_FORMAT__COL3_EXPORT_FORMAT_MASK 0x0000F000L
++#define SPI_SHADER_COL_FORMAT__COL4_EXPORT_FORMAT_MASK 0x000F0000L
++#define SPI_SHADER_COL_FORMAT__COL5_EXPORT_FORMAT_MASK 0x00F00000L
++#define SPI_SHADER_COL_FORMAT__COL6_EXPORT_FORMAT_MASK 0x0F000000L
++#define SPI_SHADER_COL_FORMAT__COL7_EXPORT_FORMAT_MASK 0xF0000000L
++//SX_PS_DOWNCONVERT
++#define SX_PS_DOWNCONVERT__MRT0__SHIFT 0x0
++#define SX_PS_DOWNCONVERT__MRT1__SHIFT 0x4
++#define SX_PS_DOWNCONVERT__MRT2__SHIFT 0x8
++#define SX_PS_DOWNCONVERT__MRT3__SHIFT 0xc
++#define SX_PS_DOWNCONVERT__MRT4__SHIFT 0x10
++#define SX_PS_DOWNCONVERT__MRT5__SHIFT 0x14
++#define SX_PS_DOWNCONVERT__MRT6__SHIFT 0x18
++#define SX_PS_DOWNCONVERT__MRT7__SHIFT 0x1c
++#define SX_PS_DOWNCONVERT__MRT0_MASK 0x0000000FL
++#define SX_PS_DOWNCONVERT__MRT1_MASK 0x000000F0L
++#define SX_PS_DOWNCONVERT__MRT2_MASK 0x00000F00L
++#define SX_PS_DOWNCONVERT__MRT3_MASK 0x0000F000L
++#define SX_PS_DOWNCONVERT__MRT4_MASK 0x000F0000L
++#define SX_PS_DOWNCONVERT__MRT5_MASK 0x00F00000L
++#define SX_PS_DOWNCONVERT__MRT6_MASK 0x0F000000L
++#define SX_PS_DOWNCONVERT__MRT7_MASK 0xF0000000L
++//SX_BLEND_OPT_EPSILON
++#define SX_BLEND_OPT_EPSILON__MRT0_EPSILON__SHIFT 0x0
++#define SX_BLEND_OPT_EPSILON__MRT1_EPSILON__SHIFT 0x4
++#define SX_BLEND_OPT_EPSILON__MRT2_EPSILON__SHIFT 0x8
++#define SX_BLEND_OPT_EPSILON__MRT3_EPSILON__SHIFT 0xc
++#define SX_BLEND_OPT_EPSILON__MRT4_EPSILON__SHIFT 0x10
++#define SX_BLEND_OPT_EPSILON__MRT5_EPSILON__SHIFT 0x14
++#define SX_BLEND_OPT_EPSILON__MRT6_EPSILON__SHIFT 0x18
++#define SX_BLEND_OPT_EPSILON__MRT7_EPSILON__SHIFT 0x1c
++#define SX_BLEND_OPT_EPSILON__MRT0_EPSILON_MASK 0x0000000FL
++#define SX_BLEND_OPT_EPSILON__MRT1_EPSILON_MASK 0x000000F0L
++#define SX_BLEND_OPT_EPSILON__MRT2_EPSILON_MASK 0x00000F00L
++#define SX_BLEND_OPT_EPSILON__MRT3_EPSILON_MASK 0x0000F000L
++#define SX_BLEND_OPT_EPSILON__MRT4_EPSILON_MASK 0x000F0000L
++#define SX_BLEND_OPT_EPSILON__MRT5_EPSILON_MASK 0x00F00000L
++#define SX_BLEND_OPT_EPSILON__MRT6_EPSILON_MASK 0x0F000000L
++#define SX_BLEND_OPT_EPSILON__MRT7_EPSILON_MASK 0xF0000000L
++//SX_BLEND_OPT_CONTROL
++#define SX_BLEND_OPT_CONTROL__MRT0_COLOR_OPT_DISABLE__SHIFT 0x0
++#define SX_BLEND_OPT_CONTROL__MRT0_ALPHA_OPT_DISABLE__SHIFT 0x1
++#define SX_BLEND_OPT_CONTROL__MRT1_COLOR_OPT_DISABLE__SHIFT 0x4
++#define SX_BLEND_OPT_CONTROL__MRT1_ALPHA_OPT_DISABLE__SHIFT 0x5
++#define SX_BLEND_OPT_CONTROL__MRT2_COLOR_OPT_DISABLE__SHIFT 0x8
++#define SX_BLEND_OPT_CONTROL__MRT2_ALPHA_OPT_DISABLE__SHIFT 0x9
++#define SX_BLEND_OPT_CONTROL__MRT3_COLOR_OPT_DISABLE__SHIFT 0xc
++#define SX_BLEND_OPT_CONTROL__MRT3_ALPHA_OPT_DISABLE__SHIFT 0xd
++#define SX_BLEND_OPT_CONTROL__MRT4_COLOR_OPT_DISABLE__SHIFT 0x10
++#define SX_BLEND_OPT_CONTROL__MRT4_ALPHA_OPT_DISABLE__SHIFT 0x11
++#define SX_BLEND_OPT_CONTROL__MRT5_COLOR_OPT_DISABLE__SHIFT 0x14
++#define SX_BLEND_OPT_CONTROL__MRT5_ALPHA_OPT_DISABLE__SHIFT 0x15
++#define SX_BLEND_OPT_CONTROL__MRT6_COLOR_OPT_DISABLE__SHIFT 0x18
++#define SX_BLEND_OPT_CONTROL__MRT6_ALPHA_OPT_DISABLE__SHIFT 0x19
++#define SX_BLEND_OPT_CONTROL__MRT7_COLOR_OPT_DISABLE__SHIFT 0x1c
++#define SX_BLEND_OPT_CONTROL__MRT7_ALPHA_OPT_DISABLE__SHIFT 0x1d
++#define SX_BLEND_OPT_CONTROL__PIXEN_ZERO_OPT_DISABLE__SHIFT 0x1f
++#define SX_BLEND_OPT_CONTROL__MRT0_COLOR_OPT_DISABLE_MASK 0x00000001L
++#define SX_BLEND_OPT_CONTROL__MRT0_ALPHA_OPT_DISABLE_MASK 0x00000002L
++#define SX_BLEND_OPT_CONTROL__MRT1_COLOR_OPT_DISABLE_MASK 0x00000010L
++#define SX_BLEND_OPT_CONTROL__MRT1_ALPHA_OPT_DISABLE_MASK 0x00000020L
++#define SX_BLEND_OPT_CONTROL__MRT2_COLOR_OPT_DISABLE_MASK 0x00000100L
++#define SX_BLEND_OPT_CONTROL__MRT2_ALPHA_OPT_DISABLE_MASK 0x00000200L
++#define SX_BLEND_OPT_CONTROL__MRT3_COLOR_OPT_DISABLE_MASK 0x00001000L
++#define SX_BLEND_OPT_CONTROL__MRT3_ALPHA_OPT_DISABLE_MASK 0x00002000L
++#define SX_BLEND_OPT_CONTROL__MRT4_COLOR_OPT_DISABLE_MASK 0x00010000L
++#define SX_BLEND_OPT_CONTROL__MRT4_ALPHA_OPT_DISABLE_MASK 0x00020000L
++#define SX_BLEND_OPT_CONTROL__MRT5_COLOR_OPT_DISABLE_MASK 0x00100000L
++#define SX_BLEND_OPT_CONTROL__MRT5_ALPHA_OPT_DISABLE_MASK 0x00200000L
++#define SX_BLEND_OPT_CONTROL__MRT6_COLOR_OPT_DISABLE_MASK 0x01000000L
++#define SX_BLEND_OPT_CONTROL__MRT6_ALPHA_OPT_DISABLE_MASK 0x02000000L
++#define SX_BLEND_OPT_CONTROL__MRT7_COLOR_OPT_DISABLE_MASK 0x10000000L
++#define SX_BLEND_OPT_CONTROL__MRT7_ALPHA_OPT_DISABLE_MASK 0x20000000L
++#define SX_BLEND_OPT_CONTROL__PIXEN_ZERO_OPT_DISABLE_MASK 0x80000000L
++//SX_MRT0_BLEND_OPT
++#define SX_MRT0_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
++#define SX_MRT0_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
++#define SX_MRT0_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
++#define SX_MRT0_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
++#define SX_MRT0_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
++#define SX_MRT0_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
++#define SX_MRT0_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
++#define SX_MRT0_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
++#define SX_MRT0_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
++#define SX_MRT0_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
++#define SX_MRT0_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
++#define SX_MRT0_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
++//SX_MRT1_BLEND_OPT
++#define SX_MRT1_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
++#define SX_MRT1_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
++#define SX_MRT1_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
++#define SX_MRT1_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
++#define SX_MRT1_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
++#define SX_MRT1_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
++#define SX_MRT1_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
++#define SX_MRT1_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
++#define SX_MRT1_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
++#define SX_MRT1_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
++#define SX_MRT1_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
++#define SX_MRT1_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
++//SX_MRT2_BLEND_OPT
++#define SX_MRT2_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
++#define SX_MRT2_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
++#define SX_MRT2_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
++#define SX_MRT2_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
++#define SX_MRT2_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
++#define SX_MRT2_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
++#define SX_MRT2_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
++#define SX_MRT2_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
++#define SX_MRT2_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
++#define SX_MRT2_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
++#define SX_MRT2_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
++#define SX_MRT2_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
++//SX_MRT3_BLEND_OPT
++#define SX_MRT3_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
++#define SX_MRT3_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
++#define SX_MRT3_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
++#define SX_MRT3_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
++#define SX_MRT3_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
++#define SX_MRT3_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
++#define SX_MRT3_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
++#define SX_MRT3_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
++#define SX_MRT3_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
++#define SX_MRT3_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
++#define SX_MRT3_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
++#define SX_MRT3_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
++//SX_MRT4_BLEND_OPT
++#define SX_MRT4_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
++#define SX_MRT4_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
++#define SX_MRT4_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
++#define SX_MRT4_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
++#define SX_MRT4_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
++#define SX_MRT4_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
++#define SX_MRT4_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
++#define SX_MRT4_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
++#define SX_MRT4_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
++#define SX_MRT4_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
++#define SX_MRT4_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
++#define SX_MRT4_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
++//SX_MRT5_BLEND_OPT
++#define SX_MRT5_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
++#define SX_MRT5_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
++#define SX_MRT5_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
++#define SX_MRT5_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
++#define SX_MRT5_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
++#define SX_MRT5_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
++#define SX_MRT5_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
++#define SX_MRT5_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
++#define SX_MRT5_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
++#define SX_MRT5_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
++#define SX_MRT5_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
++#define SX_MRT5_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
++//SX_MRT6_BLEND_OPT
++#define SX_MRT6_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
++#define SX_MRT6_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
++#define SX_MRT6_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
++#define SX_MRT6_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
++#define SX_MRT6_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
++#define SX_MRT6_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
++#define SX_MRT6_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
++#define SX_MRT6_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
++#define SX_MRT6_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
++#define SX_MRT6_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
++#define SX_MRT6_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
++#define SX_MRT6_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
++//SX_MRT7_BLEND_OPT
++#define SX_MRT7_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
++#define SX_MRT7_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
++#define SX_MRT7_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
++#define SX_MRT7_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
++#define SX_MRT7_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
++#define SX_MRT7_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
++#define SX_MRT7_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
++#define SX_MRT7_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
++#define SX_MRT7_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
++#define SX_MRT7_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
++#define SX_MRT7_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
++#define SX_MRT7_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
++//CB_BLEND0_CONTROL
++#define CB_BLEND0_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
++#define CB_BLEND0_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
++#define CB_BLEND0_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
++#define CB_BLEND0_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
++#define CB_BLEND0_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
++#define CB_BLEND0_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
++#define CB_BLEND0_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
++#define CB_BLEND0_CONTROL__ENABLE__SHIFT 0x1e
++#define CB_BLEND0_CONTROL__DISABLE_ROP3__SHIFT 0x1f
++#define CB_BLEND0_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
++#define CB_BLEND0_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
++#define CB_BLEND0_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
++#define CB_BLEND0_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
++#define CB_BLEND0_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
++#define CB_BLEND0_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
++#define CB_BLEND0_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
++#define CB_BLEND0_CONTROL__ENABLE_MASK 0x40000000L
++#define CB_BLEND0_CONTROL__DISABLE_ROP3_MASK 0x80000000L
++//CB_BLEND1_CONTROL
++#define CB_BLEND1_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
++#define CB_BLEND1_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
++#define CB_BLEND1_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
++#define CB_BLEND1_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
++#define CB_BLEND1_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
++#define CB_BLEND1_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
++#define CB_BLEND1_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
++#define CB_BLEND1_CONTROL__ENABLE__SHIFT 0x1e
++#define CB_BLEND1_CONTROL__DISABLE_ROP3__SHIFT 0x1f
++#define CB_BLEND1_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
++#define CB_BLEND1_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
++#define CB_BLEND1_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
++#define CB_BLEND1_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
++#define CB_BLEND1_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
++#define CB_BLEND1_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
++#define CB_BLEND1_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
++#define CB_BLEND1_CONTROL__ENABLE_MASK 0x40000000L
++#define CB_BLEND1_CONTROL__DISABLE_ROP3_MASK 0x80000000L
++//CB_BLEND2_CONTROL
++#define CB_BLEND2_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
++#define CB_BLEND2_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
++#define CB_BLEND2_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
++#define CB_BLEND2_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
++#define CB_BLEND2_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
++#define CB_BLEND2_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
++#define CB_BLEND2_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
++#define CB_BLEND2_CONTROL__ENABLE__SHIFT 0x1e
++#define CB_BLEND2_CONTROL__DISABLE_ROP3__SHIFT 0x1f
++#define CB_BLEND2_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
++#define CB_BLEND2_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
++#define CB_BLEND2_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
++#define CB_BLEND2_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
++#define CB_BLEND2_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
++#define CB_BLEND2_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
++#define CB_BLEND2_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
++#define CB_BLEND2_CONTROL__ENABLE_MASK 0x40000000L
++#define CB_BLEND2_CONTROL__DISABLE_ROP3_MASK 0x80000000L
++//CB_BLEND3_CONTROL
++#define CB_BLEND3_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
++#define CB_BLEND3_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
++#define CB_BLEND3_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
++#define CB_BLEND3_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
++#define CB_BLEND3_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
++#define CB_BLEND3_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
++#define CB_BLEND3_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
++#define CB_BLEND3_CONTROL__ENABLE__SHIFT 0x1e
++#define CB_BLEND3_CONTROL__DISABLE_ROP3__SHIFT 0x1f
++#define CB_BLEND3_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
++#define CB_BLEND3_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
++#define CB_BLEND3_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
++#define CB_BLEND3_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
++#define CB_BLEND3_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
++#define CB_BLEND3_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
++#define CB_BLEND3_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
++#define CB_BLEND3_CONTROL__ENABLE_MASK 0x40000000L
++#define CB_BLEND3_CONTROL__DISABLE_ROP3_MASK 0x80000000L
++//CB_BLEND4_CONTROL
++#define CB_BLEND4_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
++#define CB_BLEND4_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
++#define CB_BLEND4_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
++#define CB_BLEND4_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
++#define CB_BLEND4_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
++#define CB_BLEND4_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
++#define CB_BLEND4_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
++#define CB_BLEND4_CONTROL__ENABLE__SHIFT 0x1e
++#define CB_BLEND4_CONTROL__DISABLE_ROP3__SHIFT 0x1f
++#define CB_BLEND4_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
++#define CB_BLEND4_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
++#define CB_BLEND4_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
++#define CB_BLEND4_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
++#define CB_BLEND4_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
++#define CB_BLEND4_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
++#define CB_BLEND4_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
++#define CB_BLEND4_CONTROL__ENABLE_MASK 0x40000000L
++#define CB_BLEND4_CONTROL__DISABLE_ROP3_MASK 0x80000000L
++//CB_BLEND5_CONTROL
++#define CB_BLEND5_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
++#define CB_BLEND5_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
++#define CB_BLEND5_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
++#define CB_BLEND5_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
++#define CB_BLEND5_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
++#define CB_BLEND5_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
++#define CB_BLEND5_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
++#define CB_BLEND5_CONTROL__ENABLE__SHIFT 0x1e
++#define CB_BLEND5_CONTROL__DISABLE_ROP3__SHIFT 0x1f
++#define CB_BLEND5_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
++#define CB_BLEND5_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
++#define CB_BLEND5_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
++#define CB_BLEND5_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
++#define CB_BLEND5_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
++#define CB_BLEND5_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
++#define CB_BLEND5_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
++#define CB_BLEND5_CONTROL__ENABLE_MASK 0x40000000L
++#define CB_BLEND5_CONTROL__DISABLE_ROP3_MASK 0x80000000L
++//CB_BLEND6_CONTROL
++#define CB_BLEND6_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
++#define CB_BLEND6_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
++#define CB_BLEND6_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
++#define CB_BLEND6_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
++#define CB_BLEND6_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
++#define CB_BLEND6_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
++#define CB_BLEND6_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
++#define CB_BLEND6_CONTROL__ENABLE__SHIFT 0x1e
++#define CB_BLEND6_CONTROL__DISABLE_ROP3__SHIFT 0x1f
++#define CB_BLEND6_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
++#define CB_BLEND6_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
++#define CB_BLEND6_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
++#define CB_BLEND6_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
++#define CB_BLEND6_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
++#define CB_BLEND6_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
++#define CB_BLEND6_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
++#define CB_BLEND6_CONTROL__ENABLE_MASK 0x40000000L
++#define CB_BLEND6_CONTROL__DISABLE_ROP3_MASK 0x80000000L
++//CB_BLEND7_CONTROL
++#define CB_BLEND7_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
++#define CB_BLEND7_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
++#define CB_BLEND7_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
++#define CB_BLEND7_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
++#define CB_BLEND7_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
++#define CB_BLEND7_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
++#define CB_BLEND7_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
++#define CB_BLEND7_CONTROL__ENABLE__SHIFT 0x1e
++#define CB_BLEND7_CONTROL__DISABLE_ROP3__SHIFT 0x1f
++#define CB_BLEND7_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
++#define CB_BLEND7_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
++#define CB_BLEND7_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
++#define CB_BLEND7_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
++#define CB_BLEND7_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
++#define CB_BLEND7_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
++#define CB_BLEND7_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
++#define CB_BLEND7_CONTROL__ENABLE_MASK 0x40000000L
++#define CB_BLEND7_CONTROL__DISABLE_ROP3_MASK 0x80000000L
++//CB_MRT0_EPITCH
++#define CB_MRT0_EPITCH__EPITCH__SHIFT 0x0
++#define CB_MRT0_EPITCH__EPITCH_MASK 0x0000FFFFL
++//CB_MRT1_EPITCH
++#define CB_MRT1_EPITCH__EPITCH__SHIFT 0x0
++#define CB_MRT1_EPITCH__EPITCH_MASK 0x0000FFFFL
++//CB_MRT2_EPITCH
++#define CB_MRT2_EPITCH__EPITCH__SHIFT 0x0
++#define CB_MRT2_EPITCH__EPITCH_MASK 0x0000FFFFL
++//CB_MRT3_EPITCH
++#define CB_MRT3_EPITCH__EPITCH__SHIFT 0x0
++#define CB_MRT3_EPITCH__EPITCH_MASK 0x0000FFFFL
++//CB_MRT4_EPITCH
++#define CB_MRT4_EPITCH__EPITCH__SHIFT 0x0
++#define CB_MRT4_EPITCH__EPITCH_MASK 0x0000FFFFL
++//CB_MRT5_EPITCH
++#define CB_MRT5_EPITCH__EPITCH__SHIFT 0x0
++#define CB_MRT5_EPITCH__EPITCH_MASK 0x0000FFFFL
++//CB_MRT6_EPITCH
++#define CB_MRT6_EPITCH__EPITCH__SHIFT 0x0
++#define CB_MRT6_EPITCH__EPITCH_MASK 0x0000FFFFL
++//CB_MRT7_EPITCH
++#define CB_MRT7_EPITCH__EPITCH__SHIFT 0x0
++#define CB_MRT7_EPITCH__EPITCH_MASK 0x0000FFFFL
++//CS_COPY_STATE
++#define CS_COPY_STATE__SRC_STATE_ID__SHIFT 0x0
++#define CS_COPY_STATE__SRC_STATE_ID_MASK 0x00000007L
++//GFX_COPY_STATE
++#define GFX_COPY_STATE__SRC_STATE_ID__SHIFT 0x0
++#define GFX_COPY_STATE__SRC_STATE_ID_MASK 0x00000007L
++//PA_CL_POINT_X_RAD
++#define PA_CL_POINT_X_RAD__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_POINT_X_RAD__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_POINT_Y_RAD
++#define PA_CL_POINT_Y_RAD__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_POINT_Y_RAD__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_POINT_SIZE
++#define PA_CL_POINT_SIZE__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_POINT_SIZE__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_POINT_CULL_RAD
++#define PA_CL_POINT_CULL_RAD__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_POINT_CULL_RAD__DATA_REGISTER_MASK 0xFFFFFFFFL
++//VGT_DMA_BASE_HI
++#define VGT_DMA_BASE_HI__BASE_ADDR__SHIFT 0x0
++#define VGT_DMA_BASE_HI__BASE_ADDR_MASK 0x0000FFFFL
++//VGT_DMA_BASE
++#define VGT_DMA_BASE__BASE_ADDR__SHIFT 0x0
++#define VGT_DMA_BASE__BASE_ADDR_MASK 0xFFFFFFFFL
++//VGT_DRAW_INITIATOR
++#define VGT_DRAW_INITIATOR__SOURCE_SELECT__SHIFT 0x0
++#define VGT_DRAW_INITIATOR__MAJOR_MODE__SHIFT 0x2
++#define VGT_DRAW_INITIATOR__SPRITE_EN_R6XX__SHIFT 0x4
++#define VGT_DRAW_INITIATOR__NOT_EOP__SHIFT 0x5
++#define VGT_DRAW_INITIATOR__USE_OPAQUE__SHIFT 0x6
++#define VGT_DRAW_INITIATOR__UNROLLED_INST__SHIFT 0x7
++#define VGT_DRAW_INITIATOR__GRBM_SKEW_NO_DEC__SHIFT 0x8
++#define VGT_DRAW_INITIATOR__REG_RT_INDEX__SHIFT 0x1d
++#define VGT_DRAW_INITIATOR__SOURCE_SELECT_MASK 0x00000003L
++#define VGT_DRAW_INITIATOR__MAJOR_MODE_MASK 0x0000000CL
++#define VGT_DRAW_INITIATOR__SPRITE_EN_R6XX_MASK 0x00000010L
++#define VGT_DRAW_INITIATOR__NOT_EOP_MASK 0x00000020L
++#define VGT_DRAW_INITIATOR__USE_OPAQUE_MASK 0x00000040L
++#define VGT_DRAW_INITIATOR__UNROLLED_INST_MASK 0x00000080L
++#define VGT_DRAW_INITIATOR__GRBM_SKEW_NO_DEC_MASK 0x00000100L
++#define VGT_DRAW_INITIATOR__REG_RT_INDEX_MASK 0xE0000000L
++//VGT_IMMED_DATA
++#define VGT_IMMED_DATA__DATA__SHIFT 0x0
++#define VGT_IMMED_DATA__DATA_MASK 0xFFFFFFFFL
++//VGT_EVENT_ADDRESS_REG
++#define VGT_EVENT_ADDRESS_REG__ADDRESS_LOW__SHIFT 0x0
++#define VGT_EVENT_ADDRESS_REG__ADDRESS_LOW_MASK 0x0FFFFFFFL
++//DB_DEPTH_CONTROL
++#define DB_DEPTH_CONTROL__STENCIL_ENABLE__SHIFT 0x0
++#define DB_DEPTH_CONTROL__Z_ENABLE__SHIFT 0x1
++#define DB_DEPTH_CONTROL__Z_WRITE_ENABLE__SHIFT 0x2
++#define DB_DEPTH_CONTROL__DEPTH_BOUNDS_ENABLE__SHIFT 0x3
++#define DB_DEPTH_CONTROL__ZFUNC__SHIFT 0x4
++#define DB_DEPTH_CONTROL__BACKFACE_ENABLE__SHIFT 0x7
++#define DB_DEPTH_CONTROL__STENCILFUNC__SHIFT 0x8
++#define DB_DEPTH_CONTROL__STENCILFUNC_BF__SHIFT 0x14
++#define DB_DEPTH_CONTROL__ENABLE_COLOR_WRITES_ON_DEPTH_FAIL__SHIFT 0x1e
++#define DB_DEPTH_CONTROL__DISABLE_COLOR_WRITES_ON_DEPTH_PASS__SHIFT 0x1f
++#define DB_DEPTH_CONTROL__STENCIL_ENABLE_MASK 0x00000001L
++#define DB_DEPTH_CONTROL__Z_ENABLE_MASK 0x00000002L
++#define DB_DEPTH_CONTROL__Z_WRITE_ENABLE_MASK 0x00000004L
++#define DB_DEPTH_CONTROL__DEPTH_BOUNDS_ENABLE_MASK 0x00000008L
++#define DB_DEPTH_CONTROL__ZFUNC_MASK 0x00000070L
++#define DB_DEPTH_CONTROL__BACKFACE_ENABLE_MASK 0x00000080L
++#define DB_DEPTH_CONTROL__STENCILFUNC_MASK 0x00000700L
++#define DB_DEPTH_CONTROL__STENCILFUNC_BF_MASK 0x00700000L
++#define DB_DEPTH_CONTROL__ENABLE_COLOR_WRITES_ON_DEPTH_FAIL_MASK 0x40000000L
++#define DB_DEPTH_CONTROL__DISABLE_COLOR_WRITES_ON_DEPTH_PASS_MASK 0x80000000L
++//DB_EQAA
++#define DB_EQAA__MAX_ANCHOR_SAMPLES__SHIFT 0x0
++#define DB_EQAA__PS_ITER_SAMPLES__SHIFT 0x4
++#define DB_EQAA__MASK_EXPORT_NUM_SAMPLES__SHIFT 0x8
++#define DB_EQAA__ALPHA_TO_MASK_NUM_SAMPLES__SHIFT 0xc
++#define DB_EQAA__HIGH_QUALITY_INTERSECTIONS__SHIFT 0x10
++#define DB_EQAA__INCOHERENT_EQAA_READS__SHIFT 0x11
++#define DB_EQAA__INTERPOLATE_COMP_Z__SHIFT 0x12
++#define DB_EQAA__INTERPOLATE_SRC_Z__SHIFT 0x13
++#define DB_EQAA__STATIC_ANCHOR_ASSOCIATIONS__SHIFT 0x14
++#define DB_EQAA__ALPHA_TO_MASK_EQAA_DISABLE__SHIFT 0x15
++#define DB_EQAA__OVERRASTERIZATION_AMOUNT__SHIFT 0x18
++#define DB_EQAA__ENABLE_POSTZ_OVERRASTERIZATION__SHIFT 0x1b
++#define DB_EQAA__MAX_ANCHOR_SAMPLES_MASK 0x00000007L
++#define DB_EQAA__PS_ITER_SAMPLES_MASK 0x00000070L
++#define DB_EQAA__MASK_EXPORT_NUM_SAMPLES_MASK 0x00000700L
++#define DB_EQAA__ALPHA_TO_MASK_NUM_SAMPLES_MASK 0x00007000L
++#define DB_EQAA__HIGH_QUALITY_INTERSECTIONS_MASK 0x00010000L
++#define DB_EQAA__INCOHERENT_EQAA_READS_MASK 0x00020000L
++#define DB_EQAA__INTERPOLATE_COMP_Z_MASK 0x00040000L
++#define DB_EQAA__INTERPOLATE_SRC_Z_MASK 0x00080000L
++#define DB_EQAA__STATIC_ANCHOR_ASSOCIATIONS_MASK 0x00100000L
++#define DB_EQAA__ALPHA_TO_MASK_EQAA_DISABLE_MASK 0x00200000L
++#define DB_EQAA__OVERRASTERIZATION_AMOUNT_MASK 0x07000000L
++#define DB_EQAA__ENABLE_POSTZ_OVERRASTERIZATION_MASK 0x08000000L
++//CB_COLOR_CONTROL
++#define CB_COLOR_CONTROL__DISABLE_DUAL_QUAD__SHIFT 0x0
++#define CB_COLOR_CONTROL__DEGAMMA_ENABLE__SHIFT 0x3
++#define CB_COLOR_CONTROL__MODE__SHIFT 0x4
++#define CB_COLOR_CONTROL__ROP3__SHIFT 0x10
++#define CB_COLOR_CONTROL__DISABLE_DUAL_QUAD_MASK 0x00000001L
++#define CB_COLOR_CONTROL__DEGAMMA_ENABLE_MASK 0x00000008L
++#define CB_COLOR_CONTROL__MODE_MASK 0x00000070L
++#define CB_COLOR_CONTROL__ROP3_MASK 0x00FF0000L
++//DB_SHADER_CONTROL
++#define DB_SHADER_CONTROL__Z_EXPORT_ENABLE__SHIFT 0x0
++#define DB_SHADER_CONTROL__STENCIL_TEST_VAL_EXPORT_ENABLE__SHIFT 0x1
++#define DB_SHADER_CONTROL__STENCIL_OP_VAL_EXPORT_ENABLE__SHIFT 0x2
++#define DB_SHADER_CONTROL__Z_ORDER__SHIFT 0x4
++#define DB_SHADER_CONTROL__KILL_ENABLE__SHIFT 0x6
++#define DB_SHADER_CONTROL__COVERAGE_TO_MASK_ENABLE__SHIFT 0x7
++#define DB_SHADER_CONTROL__MASK_EXPORT_ENABLE__SHIFT 0x8
++#define DB_SHADER_CONTROL__EXEC_ON_HIER_FAIL__SHIFT 0x9
++#define DB_SHADER_CONTROL__EXEC_ON_NOOP__SHIFT 0xa
++#define DB_SHADER_CONTROL__ALPHA_TO_MASK_DISABLE__SHIFT 0xb
++#define DB_SHADER_CONTROL__DEPTH_BEFORE_SHADER__SHIFT 0xc
++#define DB_SHADER_CONTROL__CONSERVATIVE_Z_EXPORT__SHIFT 0xd
++#define DB_SHADER_CONTROL__DUAL_QUAD_DISABLE__SHIFT 0xf
++#define DB_SHADER_CONTROL__PRIMITIVE_ORDERED_PIXEL_SHADER__SHIFT 0x10
++#define DB_SHADER_CONTROL__EXEC_IF_OVERLAPPED__SHIFT 0x11
++#define DB_SHADER_CONTROL__POPS_OVERLAP_NUM_SAMPLES__SHIFT 0x14
++#define DB_SHADER_CONTROL__Z_EXPORT_ENABLE_MASK 0x00000001L
++#define DB_SHADER_CONTROL__STENCIL_TEST_VAL_EXPORT_ENABLE_MASK 0x00000002L
++#define DB_SHADER_CONTROL__STENCIL_OP_VAL_EXPORT_ENABLE_MASK 0x00000004L
++#define DB_SHADER_CONTROL__Z_ORDER_MASK 0x00000030L
++#define DB_SHADER_CONTROL__KILL_ENABLE_MASK 0x00000040L
++#define DB_SHADER_CONTROL__COVERAGE_TO_MASK_ENABLE_MASK 0x00000080L
++#define DB_SHADER_CONTROL__MASK_EXPORT_ENABLE_MASK 0x00000100L
++#define DB_SHADER_CONTROL__EXEC_ON_HIER_FAIL_MASK 0x00000200L
++#define DB_SHADER_CONTROL__EXEC_ON_NOOP_MASK 0x00000400L
++#define DB_SHADER_CONTROL__ALPHA_TO_MASK_DISABLE_MASK 0x00000800L
++#define DB_SHADER_CONTROL__DEPTH_BEFORE_SHADER_MASK 0x00001000L
++#define DB_SHADER_CONTROL__CONSERVATIVE_Z_EXPORT_MASK 0x00006000L
++#define DB_SHADER_CONTROL__DUAL_QUAD_DISABLE_MASK 0x00008000L
++#define DB_SHADER_CONTROL__PRIMITIVE_ORDERED_PIXEL_SHADER_MASK 0x00010000L
++#define DB_SHADER_CONTROL__EXEC_IF_OVERLAPPED_MASK 0x00020000L
++#define DB_SHADER_CONTROL__POPS_OVERLAP_NUM_SAMPLES_MASK 0x00700000L
++//PA_CL_CLIP_CNTL
++#define PA_CL_CLIP_CNTL__UCP_ENA_0__SHIFT 0x0
++#define PA_CL_CLIP_CNTL__UCP_ENA_1__SHIFT 0x1
++#define PA_CL_CLIP_CNTL__UCP_ENA_2__SHIFT 0x2
++#define PA_CL_CLIP_CNTL__UCP_ENA_3__SHIFT 0x3
++#define PA_CL_CLIP_CNTL__UCP_ENA_4__SHIFT 0x4
++#define PA_CL_CLIP_CNTL__UCP_ENA_5__SHIFT 0x5
++#define PA_CL_CLIP_CNTL__PS_UCP_Y_SCALE_NEG__SHIFT 0xd
++#define PA_CL_CLIP_CNTL__PS_UCP_MODE__SHIFT 0xe
++#define PA_CL_CLIP_CNTL__CLIP_DISABLE__SHIFT 0x10
++#define PA_CL_CLIP_CNTL__UCP_CULL_ONLY_ENA__SHIFT 0x11
++#define PA_CL_CLIP_CNTL__BOUNDARY_EDGE_FLAG_ENA__SHIFT 0x12
++#define PA_CL_CLIP_CNTL__DX_CLIP_SPACE_DEF__SHIFT 0x13
++#define PA_CL_CLIP_CNTL__DIS_CLIP_ERR_DETECT__SHIFT 0x14
++#define PA_CL_CLIP_CNTL__VTX_KILL_OR__SHIFT 0x15
++#define PA_CL_CLIP_CNTL__DX_RASTERIZATION_KILL__SHIFT 0x16
++#define PA_CL_CLIP_CNTL__DX_LINEAR_ATTR_CLIP_ENA__SHIFT 0x18
++#define PA_CL_CLIP_CNTL__VTE_VPORT_PROVOKE_DISABLE__SHIFT 0x19
++#define PA_CL_CLIP_CNTL__ZCLIP_NEAR_DISABLE__SHIFT 0x1a
++#define PA_CL_CLIP_CNTL__ZCLIP_FAR_DISABLE__SHIFT 0x1b
++#define PA_CL_CLIP_CNTL__UCP_ENA_0_MASK 0x00000001L
++#define PA_CL_CLIP_CNTL__UCP_ENA_1_MASK 0x00000002L
++#define PA_CL_CLIP_CNTL__UCP_ENA_2_MASK 0x00000004L
++#define PA_CL_CLIP_CNTL__UCP_ENA_3_MASK 0x00000008L
++#define PA_CL_CLIP_CNTL__UCP_ENA_4_MASK 0x00000010L
++#define PA_CL_CLIP_CNTL__UCP_ENA_5_MASK 0x00000020L
++#define PA_CL_CLIP_CNTL__PS_UCP_Y_SCALE_NEG_MASK 0x00002000L
++#define PA_CL_CLIP_CNTL__PS_UCP_MODE_MASK 0x0000C000L
++#define PA_CL_CLIP_CNTL__CLIP_DISABLE_MASK 0x00010000L
++#define PA_CL_CLIP_CNTL__UCP_CULL_ONLY_ENA_MASK 0x00020000L
++#define PA_CL_CLIP_CNTL__BOUNDARY_EDGE_FLAG_ENA_MASK 0x00040000L
++#define PA_CL_CLIP_CNTL__DX_CLIP_SPACE_DEF_MASK 0x00080000L
++#define PA_CL_CLIP_CNTL__DIS_CLIP_ERR_DETECT_MASK 0x00100000L
++#define PA_CL_CLIP_CNTL__VTX_KILL_OR_MASK 0x00200000L
++#define PA_CL_CLIP_CNTL__DX_RASTERIZATION_KILL_MASK 0x00400000L
++#define PA_CL_CLIP_CNTL__DX_LINEAR_ATTR_CLIP_ENA_MASK 0x01000000L
++#define PA_CL_CLIP_CNTL__VTE_VPORT_PROVOKE_DISABLE_MASK 0x02000000L
++#define PA_CL_CLIP_CNTL__ZCLIP_NEAR_DISABLE_MASK 0x04000000L
++#define PA_CL_CLIP_CNTL__ZCLIP_FAR_DISABLE_MASK 0x08000000L
++//PA_SU_SC_MODE_CNTL
++#define PA_SU_SC_MODE_CNTL__CULL_FRONT__SHIFT 0x0
++#define PA_SU_SC_MODE_CNTL__CULL_BACK__SHIFT 0x1
++#define PA_SU_SC_MODE_CNTL__FACE__SHIFT 0x2
++#define PA_SU_SC_MODE_CNTL__POLY_MODE__SHIFT 0x3
++#define PA_SU_SC_MODE_CNTL__POLYMODE_FRONT_PTYPE__SHIFT 0x5
++#define PA_SU_SC_MODE_CNTL__POLYMODE_BACK_PTYPE__SHIFT 0x8
++#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_FRONT_ENABLE__SHIFT 0xb
++#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_BACK_ENABLE__SHIFT 0xc
++#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_PARA_ENABLE__SHIFT 0xd
++#define PA_SU_SC_MODE_CNTL__VTX_WINDOW_OFFSET_ENABLE__SHIFT 0x10
++#define PA_SU_SC_MODE_CNTL__PROVOKING_VTX_LAST__SHIFT 0x13
++#define PA_SU_SC_MODE_CNTL__PERSP_CORR_DIS__SHIFT 0x14
++#define PA_SU_SC_MODE_CNTL__MULTI_PRIM_IB_ENA__SHIFT 0x15
++#define PA_SU_SC_MODE_CNTL__RIGHT_TRIANGLE_ALTERNATE_GRADIENT_REF__SHIFT 0x16
++#define PA_SU_SC_MODE_CNTL__NEW_QUAD_DECOMPOSITION__SHIFT 0x17
++#define PA_SU_SC_MODE_CNTL__CULL_FRONT_MASK 0x00000001L
++#define PA_SU_SC_MODE_CNTL__CULL_BACK_MASK 0x00000002L
++#define PA_SU_SC_MODE_CNTL__FACE_MASK 0x00000004L
++#define PA_SU_SC_MODE_CNTL__POLY_MODE_MASK 0x00000018L
++#define PA_SU_SC_MODE_CNTL__POLYMODE_FRONT_PTYPE_MASK 0x000000E0L
++#define PA_SU_SC_MODE_CNTL__POLYMODE_BACK_PTYPE_MASK 0x00000700L
++#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_FRONT_ENABLE_MASK 0x00000800L
++#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_BACK_ENABLE_MASK 0x00001000L
++#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_PARA_ENABLE_MASK 0x00002000L
++#define PA_SU_SC_MODE_CNTL__VTX_WINDOW_OFFSET_ENABLE_MASK 0x00010000L
++#define PA_SU_SC_MODE_CNTL__PROVOKING_VTX_LAST_MASK 0x00080000L
++#define PA_SU_SC_MODE_CNTL__PERSP_CORR_DIS_MASK 0x00100000L
++#define PA_SU_SC_MODE_CNTL__MULTI_PRIM_IB_ENA_MASK 0x00200000L
++#define PA_SU_SC_MODE_CNTL__RIGHT_TRIANGLE_ALTERNATE_GRADIENT_REF_MASK 0x00400000L
++#define PA_SU_SC_MODE_CNTL__NEW_QUAD_DECOMPOSITION_MASK 0x00800000L
++//PA_CL_VTE_CNTL
++#define PA_CL_VTE_CNTL__VPORT_X_SCALE_ENA__SHIFT 0x0
++#define PA_CL_VTE_CNTL__VPORT_X_OFFSET_ENA__SHIFT 0x1
++#define PA_CL_VTE_CNTL__VPORT_Y_SCALE_ENA__SHIFT 0x2
++#define PA_CL_VTE_CNTL__VPORT_Y_OFFSET_ENA__SHIFT 0x3
++#define PA_CL_VTE_CNTL__VPORT_Z_SCALE_ENA__SHIFT 0x4
++#define PA_CL_VTE_CNTL__VPORT_Z_OFFSET_ENA__SHIFT 0x5
++#define PA_CL_VTE_CNTL__VTX_XY_FMT__SHIFT 0x8
++#define PA_CL_VTE_CNTL__VTX_Z_FMT__SHIFT 0x9
++#define PA_CL_VTE_CNTL__VTX_W0_FMT__SHIFT 0xa
++#define PA_CL_VTE_CNTL__PERFCOUNTER_REF__SHIFT 0xb
++#define PA_CL_VTE_CNTL__VPORT_X_SCALE_ENA_MASK 0x00000001L
++#define PA_CL_VTE_CNTL__VPORT_X_OFFSET_ENA_MASK 0x00000002L
++#define PA_CL_VTE_CNTL__VPORT_Y_SCALE_ENA_MASK 0x00000004L
++#define PA_CL_VTE_CNTL__VPORT_Y_OFFSET_ENA_MASK 0x00000008L
++#define PA_CL_VTE_CNTL__VPORT_Z_SCALE_ENA_MASK 0x00000010L
++#define PA_CL_VTE_CNTL__VPORT_Z_OFFSET_ENA_MASK 0x00000020L
++#define PA_CL_VTE_CNTL__VTX_XY_FMT_MASK 0x00000100L
++#define PA_CL_VTE_CNTL__VTX_Z_FMT_MASK 0x00000200L
++#define PA_CL_VTE_CNTL__VTX_W0_FMT_MASK 0x00000400L
++#define PA_CL_VTE_CNTL__PERFCOUNTER_REF_MASK 0x00000800L
++//PA_CL_VS_OUT_CNTL
++#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_0__SHIFT 0x0
++#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_1__SHIFT 0x1
++#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_2__SHIFT 0x2
++#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_3__SHIFT 0x3
++#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_4__SHIFT 0x4
++#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_5__SHIFT 0x5
++#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_6__SHIFT 0x6
++#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_7__SHIFT 0x7
++#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_0__SHIFT 0x8
++#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_1__SHIFT 0x9
++#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_2__SHIFT 0xa
++#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_3__SHIFT 0xb
++#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_4__SHIFT 0xc
++#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_5__SHIFT 0xd
++#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_6__SHIFT 0xe
++#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_7__SHIFT 0xf
++#define PA_CL_VS_OUT_CNTL__USE_VTX_POINT_SIZE__SHIFT 0x10
++#define PA_CL_VS_OUT_CNTL__USE_VTX_EDGE_FLAG__SHIFT 0x11
++#define PA_CL_VS_OUT_CNTL__USE_VTX_RENDER_TARGET_INDX__SHIFT 0x12
++#define PA_CL_VS_OUT_CNTL__USE_VTX_VIEWPORT_INDX__SHIFT 0x13
++#define PA_CL_VS_OUT_CNTL__USE_VTX_KILL_FLAG__SHIFT 0x14
++#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_VEC_ENA__SHIFT 0x15
++#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST0_VEC_ENA__SHIFT 0x16
++#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST1_VEC_ENA__SHIFT 0x17
++#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA__SHIFT 0x18
++#define PA_CL_VS_OUT_CNTL__USE_VTX_GS_CUT_FLAG__SHIFT 0x19
++#define PA_CL_VS_OUT_CNTL__USE_VTX_LINE_WIDTH__SHIFT 0x1a
++#define PA_CL_VS_OUT_CNTL__USE_VTX_SHD_OBJPRIM_ID__SHIFT 0x1b
++#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_0_MASK 0x00000001L
++#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_1_MASK 0x00000002L
++#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_2_MASK 0x00000004L
++#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_3_MASK 0x00000008L
++#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_4_MASK 0x00000010L
++#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_5_MASK 0x00000020L
++#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_6_MASK 0x00000040L
++#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_7_MASK 0x00000080L
++#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_0_MASK 0x00000100L
++#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_1_MASK 0x00000200L
++#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_2_MASK 0x00000400L
++#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_3_MASK 0x00000800L
++#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_4_MASK 0x00001000L
++#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_5_MASK 0x00002000L
++#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_6_MASK 0x00004000L
++#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_7_MASK 0x00008000L
++#define PA_CL_VS_OUT_CNTL__USE_VTX_POINT_SIZE_MASK 0x00010000L
++#define PA_CL_VS_OUT_CNTL__USE_VTX_EDGE_FLAG_MASK 0x00020000L
++#define PA_CL_VS_OUT_CNTL__USE_VTX_RENDER_TARGET_INDX_MASK 0x00040000L
++#define PA_CL_VS_OUT_CNTL__USE_VTX_VIEWPORT_INDX_MASK 0x00080000L
++#define PA_CL_VS_OUT_CNTL__USE_VTX_KILL_FLAG_MASK 0x00100000L
++#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_VEC_ENA_MASK 0x00200000L
++#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST0_VEC_ENA_MASK 0x00400000L
++#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST1_VEC_ENA_MASK 0x00800000L
++#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA_MASK 0x01000000L
++#define PA_CL_VS_OUT_CNTL__USE_VTX_GS_CUT_FLAG_MASK 0x02000000L
++#define PA_CL_VS_OUT_CNTL__USE_VTX_LINE_WIDTH_MASK 0x04000000L
++#define PA_CL_VS_OUT_CNTL__USE_VTX_SHD_OBJPRIM_ID_MASK 0x08000000L
++//PA_CL_NANINF_CNTL
++#define PA_CL_NANINF_CNTL__VTE_XY_INF_DISCARD__SHIFT 0x0
++#define PA_CL_NANINF_CNTL__VTE_Z_INF_DISCARD__SHIFT 0x1
++#define PA_CL_NANINF_CNTL__VTE_W_INF_DISCARD__SHIFT 0x2
++#define PA_CL_NANINF_CNTL__VTE_0XNANINF_IS_0__SHIFT 0x3
++#define PA_CL_NANINF_CNTL__VTE_XY_NAN_RETAIN__SHIFT 0x4
++#define PA_CL_NANINF_CNTL__VTE_Z_NAN_RETAIN__SHIFT 0x5
++#define PA_CL_NANINF_CNTL__VTE_W_NAN_RETAIN__SHIFT 0x6
++#define PA_CL_NANINF_CNTL__VTE_W_RECIP_NAN_IS_0__SHIFT 0x7
++#define PA_CL_NANINF_CNTL__VS_XY_NAN_TO_INF__SHIFT 0x8
++#define PA_CL_NANINF_CNTL__VS_XY_INF_RETAIN__SHIFT 0x9
++#define PA_CL_NANINF_CNTL__VS_Z_NAN_TO_INF__SHIFT 0xa
++#define PA_CL_NANINF_CNTL__VS_Z_INF_RETAIN__SHIFT 0xb
++#define PA_CL_NANINF_CNTL__VS_W_NAN_TO_INF__SHIFT 0xc
++#define PA_CL_NANINF_CNTL__VS_W_INF_RETAIN__SHIFT 0xd
++#define PA_CL_NANINF_CNTL__VS_CLIP_DIST_INF_DISCARD__SHIFT 0xe
++#define PA_CL_NANINF_CNTL__VTE_NO_OUTPUT_NEG_0__SHIFT 0x14
++#define PA_CL_NANINF_CNTL__VTE_XY_INF_DISCARD_MASK 0x00000001L
++#define PA_CL_NANINF_CNTL__VTE_Z_INF_DISCARD_MASK 0x00000002L
++#define PA_CL_NANINF_CNTL__VTE_W_INF_DISCARD_MASK 0x00000004L
++#define PA_CL_NANINF_CNTL__VTE_0XNANINF_IS_0_MASK 0x00000008L
++#define PA_CL_NANINF_CNTL__VTE_XY_NAN_RETAIN_MASK 0x00000010L
++#define PA_CL_NANINF_CNTL__VTE_Z_NAN_RETAIN_MASK 0x00000020L
++#define PA_CL_NANINF_CNTL__VTE_W_NAN_RETAIN_MASK 0x00000040L
++#define PA_CL_NANINF_CNTL__VTE_W_RECIP_NAN_IS_0_MASK 0x00000080L
++#define PA_CL_NANINF_CNTL__VS_XY_NAN_TO_INF_MASK 0x00000100L
++#define PA_CL_NANINF_CNTL__VS_XY_INF_RETAIN_MASK 0x00000200L
++#define PA_CL_NANINF_CNTL__VS_Z_NAN_TO_INF_MASK 0x00000400L
++#define PA_CL_NANINF_CNTL__VS_Z_INF_RETAIN_MASK 0x00000800L
++#define PA_CL_NANINF_CNTL__VS_W_NAN_TO_INF_MASK 0x00001000L
++#define PA_CL_NANINF_CNTL__VS_W_INF_RETAIN_MASK 0x00002000L
++#define PA_CL_NANINF_CNTL__VS_CLIP_DIST_INF_DISCARD_MASK 0x00004000L
++#define PA_CL_NANINF_CNTL__VTE_NO_OUTPUT_NEG_0_MASK 0x00100000L
++//PA_SU_LINE_STIPPLE_CNTL
++#define PA_SU_LINE_STIPPLE_CNTL__LINE_STIPPLE_RESET__SHIFT 0x0
++#define PA_SU_LINE_STIPPLE_CNTL__EXPAND_FULL_LENGTH__SHIFT 0x2
++#define PA_SU_LINE_STIPPLE_CNTL__FRACTIONAL_ACCUM__SHIFT 0x3
++#define PA_SU_LINE_STIPPLE_CNTL__DIAMOND_ADJUST__SHIFT 0x4
++#define PA_SU_LINE_STIPPLE_CNTL__LINE_STIPPLE_RESET_MASK 0x00000003L
++#define PA_SU_LINE_STIPPLE_CNTL__EXPAND_FULL_LENGTH_MASK 0x00000004L
++#define PA_SU_LINE_STIPPLE_CNTL__FRACTIONAL_ACCUM_MASK 0x00000008L
++#define PA_SU_LINE_STIPPLE_CNTL__DIAMOND_ADJUST_MASK 0x00000010L
++//PA_SU_LINE_STIPPLE_SCALE
++#define PA_SU_LINE_STIPPLE_SCALE__LINE_STIPPLE_SCALE__SHIFT 0x0
++#define PA_SU_LINE_STIPPLE_SCALE__LINE_STIPPLE_SCALE_MASK 0xFFFFFFFFL
++//PA_SU_PRIM_FILTER_CNTL
++#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE__SHIFT 0x0
++#define PA_SU_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE__SHIFT 0x1
++#define PA_SU_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE__SHIFT 0x2
++#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE__SHIFT 0x3
++#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_EXPAND_ENA__SHIFT 0x4
++#define PA_SU_PRIM_FILTER_CNTL__LINE_EXPAND_ENA__SHIFT 0x5
++#define PA_SU_PRIM_FILTER_CNTL__POINT_EXPAND_ENA__SHIFT 0x6
++#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_EXPAND_ENA__SHIFT 0x7
++#define PA_SU_PRIM_FILTER_CNTL__PRIM_EXPAND_CONSTANT__SHIFT 0x8
++#define PA_SU_PRIM_FILTER_CNTL__XMAX_RIGHT_EXCLUSION__SHIFT 0x1e
++#define PA_SU_PRIM_FILTER_CNTL__YMAX_BOTTOM_EXCLUSION__SHIFT 0x1f
++#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE_MASK 0x00000001L
++#define PA_SU_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE_MASK 0x00000002L
++#define PA_SU_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE_MASK 0x00000004L
++#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE_MASK 0x00000008L
++#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_EXPAND_ENA_MASK 0x00000010L
++#define PA_SU_PRIM_FILTER_CNTL__LINE_EXPAND_ENA_MASK 0x00000020L
++#define PA_SU_PRIM_FILTER_CNTL__POINT_EXPAND_ENA_MASK 0x00000040L
++#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_EXPAND_ENA_MASK 0x00000080L
++#define PA_SU_PRIM_FILTER_CNTL__PRIM_EXPAND_CONSTANT_MASK 0x0000FF00L
++#define PA_SU_PRIM_FILTER_CNTL__XMAX_RIGHT_EXCLUSION_MASK 0x40000000L
++#define PA_SU_PRIM_FILTER_CNTL__YMAX_BOTTOM_EXCLUSION_MASK 0x80000000L
++//PA_SU_SMALL_PRIM_FILTER_CNTL
++#define PA_SU_SMALL_PRIM_FILTER_CNTL__SMALL_PRIM_FILTER_ENABLE__SHIFT 0x0
++#define PA_SU_SMALL_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE__SHIFT 0x1
++#define PA_SU_SMALL_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE__SHIFT 0x2
++#define PA_SU_SMALL_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE__SHIFT 0x3
++#define PA_SU_SMALL_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE__SHIFT 0x4
++#define PA_SU_SMALL_PRIM_FILTER_CNTL__SRBSL_ENABLE__SHIFT 0x5
++#define PA_SU_SMALL_PRIM_FILTER_CNTL__SMALL_PRIM_FILTER_ENABLE_MASK 0x00000001L
++#define PA_SU_SMALL_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE_MASK 0x00000002L
++#define PA_SU_SMALL_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE_MASK 0x00000004L
++#define PA_SU_SMALL_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE_MASK 0x00000008L
++#define PA_SU_SMALL_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE_MASK 0x00000010L
++#define PA_SU_SMALL_PRIM_FILTER_CNTL__SRBSL_ENABLE_MASK 0x00000020L
++//PA_CL_OBJPRIM_ID_CNTL
++#define PA_CL_OBJPRIM_ID_CNTL__OBJ_ID_SEL__SHIFT 0x0
++#define PA_CL_OBJPRIM_ID_CNTL__ADD_PIPED_PRIM_ID__SHIFT 0x1
++#define PA_CL_OBJPRIM_ID_CNTL__EN_32BIT_OBJPRIMID__SHIFT 0x2
++#define PA_CL_OBJPRIM_ID_CNTL__OBJ_ID_SEL_MASK 0x00000001L
++#define PA_CL_OBJPRIM_ID_CNTL__ADD_PIPED_PRIM_ID_MASK 0x00000002L
++#define PA_CL_OBJPRIM_ID_CNTL__EN_32BIT_OBJPRIMID_MASK 0x00000004L
++//PA_CL_NGG_CNTL
++#define PA_CL_NGG_CNTL__VERTEX_REUSE_OFF__SHIFT 0x0
++#define PA_CL_NGG_CNTL__INDEX_BUF_EDGE_FLAG_ENA__SHIFT 0x1
++#define PA_CL_NGG_CNTL__VERTEX_REUSE_OFF_MASK 0x00000001L
++#define PA_CL_NGG_CNTL__INDEX_BUF_EDGE_FLAG_ENA_MASK 0x00000002L
++//PA_SU_OVER_RASTERIZATION_CNTL
++#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_TRIANGLES__SHIFT 0x0
++#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_LINES__SHIFT 0x1
++#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_POINTS__SHIFT 0x2
++#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_RECTANGLES__SHIFT 0x3
++#define PA_SU_OVER_RASTERIZATION_CNTL__USE_PROVOKING_ZW__SHIFT 0x4
++#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_TRIANGLES_MASK 0x00000001L
++#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_LINES_MASK 0x00000002L
++#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_POINTS_MASK 0x00000004L
++#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_RECTANGLES_MASK 0x00000008L
++#define PA_SU_OVER_RASTERIZATION_CNTL__USE_PROVOKING_ZW_MASK 0x00000010L
++//PA_SU_POINT_SIZE
++#define PA_SU_POINT_SIZE__HEIGHT__SHIFT 0x0
++#define PA_SU_POINT_SIZE__WIDTH__SHIFT 0x10
++#define PA_SU_POINT_SIZE__HEIGHT_MASK 0x0000FFFFL
++#define PA_SU_POINT_SIZE__WIDTH_MASK 0xFFFF0000L
++//PA_SU_POINT_MINMAX
++#define PA_SU_POINT_MINMAX__MIN_SIZE__SHIFT 0x0
++#define PA_SU_POINT_MINMAX__MAX_SIZE__SHIFT 0x10
++#define PA_SU_POINT_MINMAX__MIN_SIZE_MASK 0x0000FFFFL
++#define PA_SU_POINT_MINMAX__MAX_SIZE_MASK 0xFFFF0000L
++//PA_SU_LINE_CNTL
++#define PA_SU_LINE_CNTL__WIDTH__SHIFT 0x0
++#define PA_SU_LINE_CNTL__WIDTH_MASK 0x0000FFFFL
++//PA_SC_LINE_STIPPLE
++#define PA_SC_LINE_STIPPLE__LINE_PATTERN__SHIFT 0x0
++#define PA_SC_LINE_STIPPLE__REPEAT_COUNT__SHIFT 0x10
++#define PA_SC_LINE_STIPPLE__PATTERN_BIT_ORDER__SHIFT 0x1c
++#define PA_SC_LINE_STIPPLE__AUTO_RESET_CNTL__SHIFT 0x1d
++#define PA_SC_LINE_STIPPLE__LINE_PATTERN_MASK 0x0000FFFFL
++#define PA_SC_LINE_STIPPLE__REPEAT_COUNT_MASK 0x00FF0000L
++#define PA_SC_LINE_STIPPLE__PATTERN_BIT_ORDER_MASK 0x10000000L
++#define PA_SC_LINE_STIPPLE__AUTO_RESET_CNTL_MASK 0x60000000L
++//VGT_OUTPUT_PATH_CNTL
++#define VGT_OUTPUT_PATH_CNTL__PATH_SELECT__SHIFT 0x0
++#define VGT_OUTPUT_PATH_CNTL__PATH_SELECT_MASK 0x00000007L
++//VGT_HOS_CNTL
++#define VGT_HOS_CNTL__TESS_MODE__SHIFT 0x0
++#define VGT_HOS_CNTL__TESS_MODE_MASK 0x00000003L
++//VGT_HOS_MAX_TESS_LEVEL
++#define VGT_HOS_MAX_TESS_LEVEL__MAX_TESS__SHIFT 0x0
++#define VGT_HOS_MAX_TESS_LEVEL__MAX_TESS_MASK 0xFFFFFFFFL
++//VGT_HOS_MIN_TESS_LEVEL
++#define VGT_HOS_MIN_TESS_LEVEL__MIN_TESS__SHIFT 0x0
++#define VGT_HOS_MIN_TESS_LEVEL__MIN_TESS_MASK 0xFFFFFFFFL
++//VGT_HOS_REUSE_DEPTH
++#define VGT_HOS_REUSE_DEPTH__REUSE_DEPTH__SHIFT 0x0
++#define VGT_HOS_REUSE_DEPTH__REUSE_DEPTH_MASK 0x000000FFL
++//VGT_GROUP_PRIM_TYPE
++#define VGT_GROUP_PRIM_TYPE__PRIM_TYPE__SHIFT 0x0
++#define VGT_GROUP_PRIM_TYPE__RETAIN_ORDER__SHIFT 0xe
++#define VGT_GROUP_PRIM_TYPE__RETAIN_QUADS__SHIFT 0xf
++#define VGT_GROUP_PRIM_TYPE__PRIM_ORDER__SHIFT 0x10
++#define VGT_GROUP_PRIM_TYPE__PRIM_TYPE_MASK 0x0000001FL
++#define VGT_GROUP_PRIM_TYPE__RETAIN_ORDER_MASK 0x00004000L
++#define VGT_GROUP_PRIM_TYPE__RETAIN_QUADS_MASK 0x00008000L
++#define VGT_GROUP_PRIM_TYPE__PRIM_ORDER_MASK 0x00070000L
++//VGT_GROUP_FIRST_DECR
++#define VGT_GROUP_FIRST_DECR__FIRST_DECR__SHIFT 0x0
++#define VGT_GROUP_FIRST_DECR__FIRST_DECR_MASK 0x0000000FL
++//VGT_GROUP_DECR
++#define VGT_GROUP_DECR__DECR__SHIFT 0x0
++#define VGT_GROUP_DECR__DECR_MASK 0x0000000FL
++//VGT_GROUP_VECT_0_CNTL
++#define VGT_GROUP_VECT_0_CNTL__COMP_X_EN__SHIFT 0x0
++#define VGT_GROUP_VECT_0_CNTL__COMP_Y_EN__SHIFT 0x1
++#define VGT_GROUP_VECT_0_CNTL__COMP_Z_EN__SHIFT 0x2
++#define VGT_GROUP_VECT_0_CNTL__COMP_W_EN__SHIFT 0x3
++#define VGT_GROUP_VECT_0_CNTL__STRIDE__SHIFT 0x8
++#define VGT_GROUP_VECT_0_CNTL__SHIFT__SHIFT 0x10
++#define VGT_GROUP_VECT_0_CNTL__COMP_X_EN_MASK 0x00000001L
++#define VGT_GROUP_VECT_0_CNTL__COMP_Y_EN_MASK 0x00000002L
++#define VGT_GROUP_VECT_0_CNTL__COMP_Z_EN_MASK 0x00000004L
++#define VGT_GROUP_VECT_0_CNTL__COMP_W_EN_MASK 0x00000008L
++#define VGT_GROUP_VECT_0_CNTL__STRIDE_MASK 0x0000FF00L
++#define VGT_GROUP_VECT_0_CNTL__SHIFT_MASK 0x00FF0000L
++//VGT_GROUP_VECT_1_CNTL
++#define VGT_GROUP_VECT_1_CNTL__COMP_X_EN__SHIFT 0x0
++#define VGT_GROUP_VECT_1_CNTL__COMP_Y_EN__SHIFT 0x1
++#define VGT_GROUP_VECT_1_CNTL__COMP_Z_EN__SHIFT 0x2
++#define VGT_GROUP_VECT_1_CNTL__COMP_W_EN__SHIFT 0x3
++#define VGT_GROUP_VECT_1_CNTL__STRIDE__SHIFT 0x8
++#define VGT_GROUP_VECT_1_CNTL__SHIFT__SHIFT 0x10
++#define VGT_GROUP_VECT_1_CNTL__COMP_X_EN_MASK 0x00000001L
++#define VGT_GROUP_VECT_1_CNTL__COMP_Y_EN_MASK 0x00000002L
++#define VGT_GROUP_VECT_1_CNTL__COMP_Z_EN_MASK 0x00000004L
++#define VGT_GROUP_VECT_1_CNTL__COMP_W_EN_MASK 0x00000008L
++#define VGT_GROUP_VECT_1_CNTL__STRIDE_MASK 0x0000FF00L
++#define VGT_GROUP_VECT_1_CNTL__SHIFT_MASK 0x00FF0000L
++//VGT_GROUP_VECT_0_FMT_CNTL
++#define VGT_GROUP_VECT_0_FMT_CNTL__X_CONV__SHIFT 0x0
++#define VGT_GROUP_VECT_0_FMT_CNTL__X_OFFSET__SHIFT 0x4
++#define VGT_GROUP_VECT_0_FMT_CNTL__Y_CONV__SHIFT 0x8
++#define VGT_GROUP_VECT_0_FMT_CNTL__Y_OFFSET__SHIFT 0xc
++#define VGT_GROUP_VECT_0_FMT_CNTL__Z_CONV__SHIFT 0x10
++#define VGT_GROUP_VECT_0_FMT_CNTL__Z_OFFSET__SHIFT 0x14
++#define VGT_GROUP_VECT_0_FMT_CNTL__W_CONV__SHIFT 0x18
++#define VGT_GROUP_VECT_0_FMT_CNTL__W_OFFSET__SHIFT 0x1c
++#define VGT_GROUP_VECT_0_FMT_CNTL__X_CONV_MASK 0x0000000FL
++#define VGT_GROUP_VECT_0_FMT_CNTL__X_OFFSET_MASK 0x000000F0L
++#define VGT_GROUP_VECT_0_FMT_CNTL__Y_CONV_MASK 0x00000F00L
++#define VGT_GROUP_VECT_0_FMT_CNTL__Y_OFFSET_MASK 0x0000F000L
++#define VGT_GROUP_VECT_0_FMT_CNTL__Z_CONV_MASK 0x000F0000L
++#define VGT_GROUP_VECT_0_FMT_CNTL__Z_OFFSET_MASK 0x00F00000L
++#define VGT_GROUP_VECT_0_FMT_CNTL__W_CONV_MASK 0x0F000000L
++#define VGT_GROUP_VECT_0_FMT_CNTL__W_OFFSET_MASK 0xF0000000L
++//VGT_GROUP_VECT_1_FMT_CNTL
++#define VGT_GROUP_VECT_1_FMT_CNTL__X_CONV__SHIFT 0x0
++#define VGT_GROUP_VECT_1_FMT_CNTL__X_OFFSET__SHIFT 0x4
++#define VGT_GROUP_VECT_1_FMT_CNTL__Y_CONV__SHIFT 0x8
++#define VGT_GROUP_VECT_1_FMT_CNTL__Y_OFFSET__SHIFT 0xc
++#define VGT_GROUP_VECT_1_FMT_CNTL__Z_CONV__SHIFT 0x10
++#define VGT_GROUP_VECT_1_FMT_CNTL__Z_OFFSET__SHIFT 0x14
++#define VGT_GROUP_VECT_1_FMT_CNTL__W_CONV__SHIFT 0x18
++#define VGT_GROUP_VECT_1_FMT_CNTL__W_OFFSET__SHIFT 0x1c
++#define VGT_GROUP_VECT_1_FMT_CNTL__X_CONV_MASK 0x0000000FL
++#define VGT_GROUP_VECT_1_FMT_CNTL__X_OFFSET_MASK 0x000000F0L
++#define VGT_GROUP_VECT_1_FMT_CNTL__Y_CONV_MASK 0x00000F00L
++#define VGT_GROUP_VECT_1_FMT_CNTL__Y_OFFSET_MASK 0x0000F000L
++#define VGT_GROUP_VECT_1_FMT_CNTL__Z_CONV_MASK 0x000F0000L
++#define VGT_GROUP_VECT_1_FMT_CNTL__Z_OFFSET_MASK 0x00F00000L
++#define VGT_GROUP_VECT_1_FMT_CNTL__W_CONV_MASK 0x0F000000L
++#define VGT_GROUP_VECT_1_FMT_CNTL__W_OFFSET_MASK 0xF0000000L
++//VGT_GS_MODE
++#define VGT_GS_MODE__MODE__SHIFT 0x0
++#define VGT_GS_MODE__RESERVED_0__SHIFT 0x3
++#define VGT_GS_MODE__CUT_MODE__SHIFT 0x4
++#define VGT_GS_MODE__RESERVED_1__SHIFT 0x6
++#define VGT_GS_MODE__GS_C_PACK_EN__SHIFT 0xb
++#define VGT_GS_MODE__RESERVED_2__SHIFT 0xc
++#define VGT_GS_MODE__ES_PASSTHRU__SHIFT 0xd
++#define VGT_GS_MODE__RESERVED_3__SHIFT 0xe
++#define VGT_GS_MODE__RESERVED_4__SHIFT 0xf
++#define VGT_GS_MODE__RESERVED_5__SHIFT 0x10
++#define VGT_GS_MODE__PARTIAL_THD_AT_EOI__SHIFT 0x11
++#define VGT_GS_MODE__SUPPRESS_CUTS__SHIFT 0x12
++#define VGT_GS_MODE__ES_WRITE_OPTIMIZE__SHIFT 0x13
++#define VGT_GS_MODE__GS_WRITE_OPTIMIZE__SHIFT 0x14
++#define VGT_GS_MODE__ONCHIP__SHIFT 0x15
++#define VGT_GS_MODE__MODE_MASK 0x00000007L
++#define VGT_GS_MODE__RESERVED_0_MASK 0x00000008L
++#define VGT_GS_MODE__CUT_MODE_MASK 0x00000030L
++#define VGT_GS_MODE__RESERVED_1_MASK 0x000007C0L
++#define VGT_GS_MODE__GS_C_PACK_EN_MASK 0x00000800L
++#define VGT_GS_MODE__RESERVED_2_MASK 0x00001000L
++#define VGT_GS_MODE__ES_PASSTHRU_MASK 0x00002000L
++#define VGT_GS_MODE__RESERVED_3_MASK 0x00004000L
++#define VGT_GS_MODE__RESERVED_4_MASK 0x00008000L
++#define VGT_GS_MODE__RESERVED_5_MASK 0x00010000L
++#define VGT_GS_MODE__PARTIAL_THD_AT_EOI_MASK 0x00020000L
++#define VGT_GS_MODE__SUPPRESS_CUTS_MASK 0x00040000L
++#define VGT_GS_MODE__ES_WRITE_OPTIMIZE_MASK 0x00080000L
++#define VGT_GS_MODE__GS_WRITE_OPTIMIZE_MASK 0x00100000L
++#define VGT_GS_MODE__ONCHIP_MASK 0x00600000L
++//VGT_GS_ONCHIP_CNTL
++#define VGT_GS_ONCHIP_CNTL__ES_VERTS_PER_SUBGRP__SHIFT 0x0
++#define VGT_GS_ONCHIP_CNTL__GS_PRIMS_PER_SUBGRP__SHIFT 0xb
++#define VGT_GS_ONCHIP_CNTL__GS_INST_PRIMS_IN_SUBGRP__SHIFT 0x16
++#define VGT_GS_ONCHIP_CNTL__ES_VERTS_PER_SUBGRP_MASK 0x000007FFL
++#define VGT_GS_ONCHIP_CNTL__GS_PRIMS_PER_SUBGRP_MASK 0x003FF800L
++#define VGT_GS_ONCHIP_CNTL__GS_INST_PRIMS_IN_SUBGRP_MASK 0xFFC00000L
++//PA_SC_MODE_CNTL_0
++#define PA_SC_MODE_CNTL_0__MSAA_ENABLE__SHIFT 0x0
++#define PA_SC_MODE_CNTL_0__VPORT_SCISSOR_ENABLE__SHIFT 0x1
++#define PA_SC_MODE_CNTL_0__LINE_STIPPLE_ENABLE__SHIFT 0x2
++#define PA_SC_MODE_CNTL_0__SEND_UNLIT_STILES_TO_PKR__SHIFT 0x3
++#define PA_SC_MODE_CNTL_0__SCALE_LINE_WIDTH_PAD__SHIFT 0x4
++#define PA_SC_MODE_CNTL_0__ALTERNATE_RBS_PER_TILE__SHIFT 0x5
++#define PA_SC_MODE_CNTL_0__COARSE_TILE_STARTS_ON_EVEN_RB__SHIFT 0x6
++#define PA_SC_MODE_CNTL_0__MSAA_ENABLE_MASK 0x00000001L
++#define PA_SC_MODE_CNTL_0__VPORT_SCISSOR_ENABLE_MASK 0x00000002L
++#define PA_SC_MODE_CNTL_0__LINE_STIPPLE_ENABLE_MASK 0x00000004L
++#define PA_SC_MODE_CNTL_0__SEND_UNLIT_STILES_TO_PKR_MASK 0x00000008L
++#define PA_SC_MODE_CNTL_0__SCALE_LINE_WIDTH_PAD_MASK 0x00000010L
++#define PA_SC_MODE_CNTL_0__ALTERNATE_RBS_PER_TILE_MASK 0x00000020L
++#define PA_SC_MODE_CNTL_0__COARSE_TILE_STARTS_ON_EVEN_RB_MASK 0x00000040L
++//PA_SC_MODE_CNTL_1
++#define PA_SC_MODE_CNTL_1__WALK_SIZE__SHIFT 0x0
++#define PA_SC_MODE_CNTL_1__WALK_ALIGNMENT__SHIFT 0x1
++#define PA_SC_MODE_CNTL_1__WALK_ALIGN8_PRIM_FITS_ST__SHIFT 0x2
++#define PA_SC_MODE_CNTL_1__WALK_FENCE_ENABLE__SHIFT 0x3
++#define PA_SC_MODE_CNTL_1__WALK_FENCE_SIZE__SHIFT 0x4
++#define PA_SC_MODE_CNTL_1__SUPERTILE_WALK_ORDER_ENABLE__SHIFT 0x7
++#define PA_SC_MODE_CNTL_1__TILE_WALK_ORDER_ENABLE__SHIFT 0x8
++#define PA_SC_MODE_CNTL_1__TILE_COVER_DISABLE__SHIFT 0x9
++#define PA_SC_MODE_CNTL_1__TILE_COVER_NO_SCISSOR__SHIFT 0xa
++#define PA_SC_MODE_CNTL_1__ZMM_LINE_EXTENT__SHIFT 0xb
++#define PA_SC_MODE_CNTL_1__ZMM_LINE_OFFSET__SHIFT 0xc
++#define PA_SC_MODE_CNTL_1__ZMM_RECT_EXTENT__SHIFT 0xd
++#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_HI_Z__SHIFT 0xe
++#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_DETAIL_MASK__SHIFT 0xf
++#define PA_SC_MODE_CNTL_1__PS_ITER_SAMPLE__SHIFT 0x10
++#define PA_SC_MODE_CNTL_1__MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE__SHIFT 0x11
++#define PA_SC_MODE_CNTL_1__MULTI_GPU_SUPERTILE_ENABLE__SHIFT 0x12
++#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_ENABLE__SHIFT 0x13
++#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE__SHIFT 0x14
++#define PA_SC_MODE_CNTL_1__MULTI_GPU_PRIM_DISCARD_ENABLE__SHIFT 0x18
++#define PA_SC_MODE_CNTL_1__FORCE_EOV_CNTDWN_ENABLE__SHIFT 0x19
++#define PA_SC_MODE_CNTL_1__FORCE_EOV_REZ_ENABLE__SHIFT 0x1a
++#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_PRIMITIVE_ENABLE__SHIFT 0x1b
++#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_WATER_MARK__SHIFT 0x1c
++#define PA_SC_MODE_CNTL_1__WALK_SIZE_MASK 0x00000001L
++#define PA_SC_MODE_CNTL_1__WALK_ALIGNMENT_MASK 0x00000002L
++#define PA_SC_MODE_CNTL_1__WALK_ALIGN8_PRIM_FITS_ST_MASK 0x00000004L
++#define PA_SC_MODE_CNTL_1__WALK_FENCE_ENABLE_MASK 0x00000008L
++#define PA_SC_MODE_CNTL_1__WALK_FENCE_SIZE_MASK 0x00000070L
++#define PA_SC_MODE_CNTL_1__SUPERTILE_WALK_ORDER_ENABLE_MASK 0x00000080L
++#define PA_SC_MODE_CNTL_1__TILE_WALK_ORDER_ENABLE_MASK 0x00000100L
++#define PA_SC_MODE_CNTL_1__TILE_COVER_DISABLE_MASK 0x00000200L
++#define PA_SC_MODE_CNTL_1__TILE_COVER_NO_SCISSOR_MASK 0x00000400L
++#define PA_SC_MODE_CNTL_1__ZMM_LINE_EXTENT_MASK 0x00000800L
++#define PA_SC_MODE_CNTL_1__ZMM_LINE_OFFSET_MASK 0x00001000L
++#define PA_SC_MODE_CNTL_1__ZMM_RECT_EXTENT_MASK 0x00002000L
++#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_HI_Z_MASK 0x00004000L
++#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_DETAIL_MASK_MASK 0x00008000L
++#define PA_SC_MODE_CNTL_1__PS_ITER_SAMPLE_MASK 0x00010000L
++#define PA_SC_MODE_CNTL_1__MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE_MASK 0x00020000L
++#define PA_SC_MODE_CNTL_1__MULTI_GPU_SUPERTILE_ENABLE_MASK 0x00040000L
++#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_ENABLE_MASK 0x00080000L
++#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_MASK 0x00F00000L
++#define PA_SC_MODE_CNTL_1__MULTI_GPU_PRIM_DISCARD_ENABLE_MASK 0x01000000L
++#define PA_SC_MODE_CNTL_1__FORCE_EOV_CNTDWN_ENABLE_MASK 0x02000000L
++#define PA_SC_MODE_CNTL_1__FORCE_EOV_REZ_ENABLE_MASK 0x04000000L
++#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_PRIMITIVE_ENABLE_MASK 0x08000000L
++#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_WATER_MARK_MASK 0x70000000L
++//VGT_ENHANCE
++#define VGT_ENHANCE__MISC__SHIFT 0x0
++#define VGT_ENHANCE__MISC_MASK 0xFFFFFFFFL
++//VGT_GS_PER_ES
++#define VGT_GS_PER_ES__GS_PER_ES__SHIFT 0x0
++#define VGT_GS_PER_ES__GS_PER_ES_MASK 0x000007FFL
++//VGT_ES_PER_GS
++#define VGT_ES_PER_GS__ES_PER_GS__SHIFT 0x0
++#define VGT_ES_PER_GS__ES_PER_GS_MASK 0x000007FFL
++//VGT_GS_PER_VS
++#define VGT_GS_PER_VS__GS_PER_VS__SHIFT 0x0
++#define VGT_GS_PER_VS__GS_PER_VS_MASK 0x0000000FL
++//VGT_GSVS_RING_OFFSET_1
++#define VGT_GSVS_RING_OFFSET_1__OFFSET__SHIFT 0x0
++#define VGT_GSVS_RING_OFFSET_1__OFFSET_MASK 0x00007FFFL
++//VGT_GSVS_RING_OFFSET_2
++#define VGT_GSVS_RING_OFFSET_2__OFFSET__SHIFT 0x0
++#define VGT_GSVS_RING_OFFSET_2__OFFSET_MASK 0x00007FFFL
++//VGT_GSVS_RING_OFFSET_3
++#define VGT_GSVS_RING_OFFSET_3__OFFSET__SHIFT 0x0
++#define VGT_GSVS_RING_OFFSET_3__OFFSET_MASK 0x00007FFFL
++//VGT_GS_OUT_PRIM_TYPE
++#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE__SHIFT 0x0
++#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_1__SHIFT 0x8
++#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_2__SHIFT 0x10
++#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_3__SHIFT 0x16
++#define VGT_GS_OUT_PRIM_TYPE__UNIQUE_TYPE_PER_STREAM__SHIFT 0x1f
++#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_MASK 0x0000003FL
++#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_1_MASK 0x00003F00L
++#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_2_MASK 0x003F0000L
++#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_3_MASK 0x0FC00000L
++#define VGT_GS_OUT_PRIM_TYPE__UNIQUE_TYPE_PER_STREAM_MASK 0x80000000L
++//IA_ENHANCE
++#define IA_ENHANCE__MISC__SHIFT 0x0
++#define IA_ENHANCE__MISC_MASK 0xFFFFFFFFL
++//VGT_DMA_SIZE
++#define VGT_DMA_SIZE__NUM_INDICES__SHIFT 0x0
++#define VGT_DMA_SIZE__NUM_INDICES_MASK 0xFFFFFFFFL
++//VGT_DMA_MAX_SIZE
++#define VGT_DMA_MAX_SIZE__MAX_SIZE__SHIFT 0x0
++#define VGT_DMA_MAX_SIZE__MAX_SIZE_MASK 0xFFFFFFFFL
++//VGT_DMA_INDEX_TYPE
++#define VGT_DMA_INDEX_TYPE__INDEX_TYPE__SHIFT 0x0
++#define VGT_DMA_INDEX_TYPE__SWAP_MODE__SHIFT 0x2
++#define VGT_DMA_INDEX_TYPE__BUF_TYPE__SHIFT 0x4
++#define VGT_DMA_INDEX_TYPE__RDREQ_POLICY__SHIFT 0x6
++#define VGT_DMA_INDEX_TYPE__PRIMGEN_EN__SHIFT 0x8
++#define VGT_DMA_INDEX_TYPE__NOT_EOP__SHIFT 0x9
++#define VGT_DMA_INDEX_TYPE__REQ_PATH__SHIFT 0xa
++#define VGT_DMA_INDEX_TYPE__INDEX_TYPE_MASK 0x00000003L
++#define VGT_DMA_INDEX_TYPE__SWAP_MODE_MASK 0x0000000CL
++#define VGT_DMA_INDEX_TYPE__BUF_TYPE_MASK 0x00000030L
++#define VGT_DMA_INDEX_TYPE__RDREQ_POLICY_MASK 0x00000040L
++#define VGT_DMA_INDEX_TYPE__PRIMGEN_EN_MASK 0x00000100L
++#define VGT_DMA_INDEX_TYPE__NOT_EOP_MASK 0x00000200L
++#define VGT_DMA_INDEX_TYPE__REQ_PATH_MASK 0x00000400L
++//WD_ENHANCE
++#define WD_ENHANCE__MISC__SHIFT 0x0
++#define WD_ENHANCE__MISC_MASK 0xFFFFFFFFL
++//VGT_PRIMITIVEID_EN
++#define VGT_PRIMITIVEID_EN__PRIMITIVEID_EN__SHIFT 0x0
++#define VGT_PRIMITIVEID_EN__DISABLE_RESET_ON_EOI__SHIFT 0x1
++#define VGT_PRIMITIVEID_EN__NGG_DISABLE_PROVOK_REUSE__SHIFT 0x2
++#define VGT_PRIMITIVEID_EN__PRIMITIVEID_EN_MASK 0x00000001L
++#define VGT_PRIMITIVEID_EN__DISABLE_RESET_ON_EOI_MASK 0x00000002L
++#define VGT_PRIMITIVEID_EN__NGG_DISABLE_PROVOK_REUSE_MASK 0x00000004L
++//VGT_DMA_NUM_INSTANCES
++#define VGT_DMA_NUM_INSTANCES__NUM_INSTANCES__SHIFT 0x0
++#define VGT_DMA_NUM_INSTANCES__NUM_INSTANCES_MASK 0xFFFFFFFFL
++//VGT_PRIMITIVEID_RESET
++#define VGT_PRIMITIVEID_RESET__VALUE__SHIFT 0x0
++#define VGT_PRIMITIVEID_RESET__VALUE_MASK 0xFFFFFFFFL
++//VGT_EVENT_INITIATOR
++#define VGT_EVENT_INITIATOR__EVENT_TYPE__SHIFT 0x0
++#define VGT_EVENT_INITIATOR__ADDRESS_HI__SHIFT 0xa
++#define VGT_EVENT_INITIATOR__EXTENDED_EVENT__SHIFT 0x1b
++#define VGT_EVENT_INITIATOR__EVENT_TYPE_MASK 0x0000003FL
++#define VGT_EVENT_INITIATOR__ADDRESS_HI_MASK 0x07FFFC00L
++#define VGT_EVENT_INITIATOR__EXTENDED_EVENT_MASK 0x08000000L
++//VGT_GS_MAX_PRIMS_PER_SUBGROUP
++#define VGT_GS_MAX_PRIMS_PER_SUBGROUP__MAX_PRIMS_PER_SUBGROUP__SHIFT 0x0
++#define VGT_GS_MAX_PRIMS_PER_SUBGROUP__MAX_PRIMS_PER_SUBGROUP_MASK 0x0000FFFFL
++//VGT_DRAW_PAYLOAD_CNTL
++#define VGT_DRAW_PAYLOAD_CNTL__OBJPRIM_ID_EN__SHIFT 0x0
++#define VGT_DRAW_PAYLOAD_CNTL__EN_REG_RT_INDEX__SHIFT 0x1
++#define VGT_DRAW_PAYLOAD_CNTL__EN_PIPELINE_PRIMID__SHIFT 0x2
++#define VGT_DRAW_PAYLOAD_CNTL__OBJECT_ID_INST_EN__SHIFT 0x3
++#define VGT_DRAW_PAYLOAD_CNTL__OBJPRIM_ID_EN_MASK 0x00000001L
++#define VGT_DRAW_PAYLOAD_CNTL__EN_REG_RT_INDEX_MASK 0x00000002L
++#define VGT_DRAW_PAYLOAD_CNTL__EN_PIPELINE_PRIMID_MASK 0x00000004L
++#define VGT_DRAW_PAYLOAD_CNTL__OBJECT_ID_INST_EN_MASK 0x00000008L
++//VGT_INSTANCE_STEP_RATE_0
++#define VGT_INSTANCE_STEP_RATE_0__STEP_RATE__SHIFT 0x0
++#define VGT_INSTANCE_STEP_RATE_0__STEP_RATE_MASK 0xFFFFFFFFL
++//VGT_INSTANCE_STEP_RATE_1
++#define VGT_INSTANCE_STEP_RATE_1__STEP_RATE__SHIFT 0x0
++#define VGT_INSTANCE_STEP_RATE_1__STEP_RATE_MASK 0xFFFFFFFFL
++//VGT_ESGS_RING_ITEMSIZE
++#define VGT_ESGS_RING_ITEMSIZE__ITEMSIZE__SHIFT 0x0
++#define VGT_ESGS_RING_ITEMSIZE__ITEMSIZE_MASK 0x00007FFFL
++//VGT_GSVS_RING_ITEMSIZE
++#define VGT_GSVS_RING_ITEMSIZE__ITEMSIZE__SHIFT 0x0
++#define VGT_GSVS_RING_ITEMSIZE__ITEMSIZE_MASK 0x00007FFFL
++//VGT_REUSE_OFF
++#define VGT_REUSE_OFF__REUSE_OFF__SHIFT 0x0
++#define VGT_REUSE_OFF__REUSE_OFF_MASK 0x00000001L
++//VGT_VTX_CNT_EN
++#define VGT_VTX_CNT_EN__VTX_CNT_EN__SHIFT 0x0
++#define VGT_VTX_CNT_EN__VTX_CNT_EN_MASK 0x00000001L
++//DB_HTILE_SURFACE
++#define DB_HTILE_SURFACE__FULL_CACHE__SHIFT 0x1
++#define DB_HTILE_SURFACE__HTILE_USES_PRELOAD_WIN__SHIFT 0x2
++#define DB_HTILE_SURFACE__PRELOAD__SHIFT 0x3
++#define DB_HTILE_SURFACE__PREFETCH_WIDTH__SHIFT 0x4
++#define DB_HTILE_SURFACE__PREFETCH_HEIGHT__SHIFT 0xa
++#define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE__SHIFT 0x10
++#define DB_HTILE_SURFACE__PIPE_ALIGNED__SHIFT 0x12
++#define DB_HTILE_SURFACE__RB_ALIGNED__SHIFT 0x13
++#define DB_HTILE_SURFACE__FULL_CACHE_MASK 0x00000002L
++#define DB_HTILE_SURFACE__HTILE_USES_PRELOAD_WIN_MASK 0x00000004L
++#define DB_HTILE_SURFACE__PRELOAD_MASK 0x00000008L
++#define DB_HTILE_SURFACE__PREFETCH_WIDTH_MASK 0x000003F0L
++#define DB_HTILE_SURFACE__PREFETCH_HEIGHT_MASK 0x0000FC00L
++#define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE_MASK 0x00010000L
++#define DB_HTILE_SURFACE__PIPE_ALIGNED_MASK 0x00040000L
++#define DB_HTILE_SURFACE__RB_ALIGNED_MASK 0x00080000L
++//DB_SRESULTS_COMPARE_STATE0
++#define DB_SRESULTS_COMPARE_STATE0__COMPAREFUNC0__SHIFT 0x0
++#define DB_SRESULTS_COMPARE_STATE0__COMPAREVALUE0__SHIFT 0x4
++#define DB_SRESULTS_COMPARE_STATE0__COMPAREMASK0__SHIFT 0xc
++#define DB_SRESULTS_COMPARE_STATE0__ENABLE0__SHIFT 0x18
++#define DB_SRESULTS_COMPARE_STATE0__COMPAREFUNC0_MASK 0x00000007L
++#define DB_SRESULTS_COMPARE_STATE0__COMPAREVALUE0_MASK 0x00000FF0L
++#define DB_SRESULTS_COMPARE_STATE0__COMPAREMASK0_MASK 0x000FF000L
++#define DB_SRESULTS_COMPARE_STATE0__ENABLE0_MASK 0x01000000L
++//DB_SRESULTS_COMPARE_STATE1
++#define DB_SRESULTS_COMPARE_STATE1__COMPAREFUNC1__SHIFT 0x0
++#define DB_SRESULTS_COMPARE_STATE1__COMPAREVALUE1__SHIFT 0x4
++#define DB_SRESULTS_COMPARE_STATE1__COMPAREMASK1__SHIFT 0xc
++#define DB_SRESULTS_COMPARE_STATE1__ENABLE1__SHIFT 0x18
++#define DB_SRESULTS_COMPARE_STATE1__COMPAREFUNC1_MASK 0x00000007L
++#define DB_SRESULTS_COMPARE_STATE1__COMPAREVALUE1_MASK 0x00000FF0L
++#define DB_SRESULTS_COMPARE_STATE1__COMPAREMASK1_MASK 0x000FF000L
++#define DB_SRESULTS_COMPARE_STATE1__ENABLE1_MASK 0x01000000L
++//DB_PRELOAD_CONTROL
++#define DB_PRELOAD_CONTROL__START_X__SHIFT 0x0
++#define DB_PRELOAD_CONTROL__START_Y__SHIFT 0x8
++#define DB_PRELOAD_CONTROL__MAX_X__SHIFT 0x10
++#define DB_PRELOAD_CONTROL__MAX_Y__SHIFT 0x18
++#define DB_PRELOAD_CONTROL__START_X_MASK 0x000000FFL
++#define DB_PRELOAD_CONTROL__START_Y_MASK 0x0000FF00L
++#define DB_PRELOAD_CONTROL__MAX_X_MASK 0x00FF0000L
++#define DB_PRELOAD_CONTROL__MAX_Y_MASK 0xFF000000L
++//VGT_STRMOUT_BUFFER_SIZE_0
++#define VGT_STRMOUT_BUFFER_SIZE_0__SIZE__SHIFT 0x0
++#define VGT_STRMOUT_BUFFER_SIZE_0__SIZE_MASK 0xFFFFFFFFL
++//VGT_STRMOUT_VTX_STRIDE_0
++#define VGT_STRMOUT_VTX_STRIDE_0__STRIDE__SHIFT 0x0
++#define VGT_STRMOUT_VTX_STRIDE_0__STRIDE_MASK 0x000003FFL
++//VGT_STRMOUT_BUFFER_OFFSET_0
++#define VGT_STRMOUT_BUFFER_OFFSET_0__OFFSET__SHIFT 0x0
++#define VGT_STRMOUT_BUFFER_OFFSET_0__OFFSET_MASK 0xFFFFFFFFL
++//VGT_STRMOUT_BUFFER_SIZE_1
++#define VGT_STRMOUT_BUFFER_SIZE_1__SIZE__SHIFT 0x0
++#define VGT_STRMOUT_BUFFER_SIZE_1__SIZE_MASK 0xFFFFFFFFL
++//VGT_STRMOUT_VTX_STRIDE_1
++#define VGT_STRMOUT_VTX_STRIDE_1__STRIDE__SHIFT 0x0
++#define VGT_STRMOUT_VTX_STRIDE_1__STRIDE_MASK 0x000003FFL
++//VGT_STRMOUT_BUFFER_OFFSET_1
++#define VGT_STRMOUT_BUFFER_OFFSET_1__OFFSET__SHIFT 0x0
++#define VGT_STRMOUT_BUFFER_OFFSET_1__OFFSET_MASK 0xFFFFFFFFL
++//VGT_STRMOUT_BUFFER_SIZE_2
++#define VGT_STRMOUT_BUFFER_SIZE_2__SIZE__SHIFT 0x0
++#define VGT_STRMOUT_BUFFER_SIZE_2__SIZE_MASK 0xFFFFFFFFL
++//VGT_STRMOUT_VTX_STRIDE_2
++#define VGT_STRMOUT_VTX_STRIDE_2__STRIDE__SHIFT 0x0
++#define VGT_STRMOUT_VTX_STRIDE_2__STRIDE_MASK 0x000003FFL
++//VGT_STRMOUT_BUFFER_OFFSET_2
++#define VGT_STRMOUT_BUFFER_OFFSET_2__OFFSET__SHIFT 0x0
++#define VGT_STRMOUT_BUFFER_OFFSET_2__OFFSET_MASK 0xFFFFFFFFL
++//VGT_STRMOUT_BUFFER_SIZE_3
++#define VGT_STRMOUT_BUFFER_SIZE_3__SIZE__SHIFT 0x0
++#define VGT_STRMOUT_BUFFER_SIZE_3__SIZE_MASK 0xFFFFFFFFL
++//VGT_STRMOUT_VTX_STRIDE_3
++#define VGT_STRMOUT_VTX_STRIDE_3__STRIDE__SHIFT 0x0
++#define VGT_STRMOUT_VTX_STRIDE_3__STRIDE_MASK 0x000003FFL
++//VGT_STRMOUT_BUFFER_OFFSET_3
++#define VGT_STRMOUT_BUFFER_OFFSET_3__OFFSET__SHIFT 0x0
++#define VGT_STRMOUT_BUFFER_OFFSET_3__OFFSET_MASK 0xFFFFFFFFL
++//VGT_STRMOUT_DRAW_OPAQUE_OFFSET
++#define VGT_STRMOUT_DRAW_OPAQUE_OFFSET__OFFSET__SHIFT 0x0
++#define VGT_STRMOUT_DRAW_OPAQUE_OFFSET__OFFSET_MASK 0xFFFFFFFFL
++//VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
++#define VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE__SIZE__SHIFT 0x0
++#define VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE__SIZE_MASK 0xFFFFFFFFL
++//VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
++#define VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE__VERTEX_STRIDE__SHIFT 0x0
++#define VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE__VERTEX_STRIDE_MASK 0x000001FFL
++//VGT_GS_MAX_VERT_OUT
++#define VGT_GS_MAX_VERT_OUT__MAX_VERT_OUT__SHIFT 0x0
++#define VGT_GS_MAX_VERT_OUT__MAX_VERT_OUT_MASK 0x000007FFL
++//VGT_TESS_DISTRIBUTION
++#define VGT_TESS_DISTRIBUTION__ACCUM_ISOLINE__SHIFT 0x0
++#define VGT_TESS_DISTRIBUTION__ACCUM_TRI__SHIFT 0x8
++#define VGT_TESS_DISTRIBUTION__ACCUM_QUAD__SHIFT 0x10
++#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT__SHIFT 0x18
++#define VGT_TESS_DISTRIBUTION__TRAP_SPLIT__SHIFT 0x1d
++#define VGT_TESS_DISTRIBUTION__ACCUM_ISOLINE_MASK 0x000000FFL
++#define VGT_TESS_DISTRIBUTION__ACCUM_TRI_MASK 0x0000FF00L
++#define VGT_TESS_DISTRIBUTION__ACCUM_QUAD_MASK 0x00FF0000L
++#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT_MASK 0x1F000000L
++#define VGT_TESS_DISTRIBUTION__TRAP_SPLIT_MASK 0xE0000000L
++//VGT_SHADER_STAGES_EN
++#define VGT_SHADER_STAGES_EN__LS_EN__SHIFT 0x0
++#define VGT_SHADER_STAGES_EN__HS_EN__SHIFT 0x2
++#define VGT_SHADER_STAGES_EN__ES_EN__SHIFT 0x3
++#define VGT_SHADER_STAGES_EN__GS_EN__SHIFT 0x5
++#define VGT_SHADER_STAGES_EN__VS_EN__SHIFT 0x6
++#define VGT_SHADER_STAGES_EN__DISPATCH_DRAW_EN__SHIFT 0x9
++#define VGT_SHADER_STAGES_EN__DIS_DEALLOC_ACCUM_0__SHIFT 0xa
++#define VGT_SHADER_STAGES_EN__DIS_DEALLOC_ACCUM_1__SHIFT 0xb
++#define VGT_SHADER_STAGES_EN__VS_WAVE_ID_EN__SHIFT 0xc
++#define VGT_SHADER_STAGES_EN__PRIMGEN_EN__SHIFT 0xd
++#define VGT_SHADER_STAGES_EN__ORDERED_ID_MODE__SHIFT 0xe
++#define VGT_SHADER_STAGES_EN__MAX_PRIMGRP_IN_WAVE__SHIFT 0xf
++#define VGT_SHADER_STAGES_EN__GS_FAST_LAUNCH__SHIFT 0x13
++#define VGT_SHADER_STAGES_EN__LS_EN_MASK 0x00000003L
++#define VGT_SHADER_STAGES_EN__HS_EN_MASK 0x00000004L
++#define VGT_SHADER_STAGES_EN__ES_EN_MASK 0x00000018L
++#define VGT_SHADER_STAGES_EN__GS_EN_MASK 0x00000020L
++#define VGT_SHADER_STAGES_EN__VS_EN_MASK 0x000000C0L
++#define VGT_SHADER_STAGES_EN__DISPATCH_DRAW_EN_MASK 0x00000200L
++#define VGT_SHADER_STAGES_EN__DIS_DEALLOC_ACCUM_0_MASK 0x00000400L
++#define VGT_SHADER_STAGES_EN__DIS_DEALLOC_ACCUM_1_MASK 0x00000800L
++#define VGT_SHADER_STAGES_EN__VS_WAVE_ID_EN_MASK 0x00001000L
++#define VGT_SHADER_STAGES_EN__PRIMGEN_EN_MASK 0x00002000L
++#define VGT_SHADER_STAGES_EN__ORDERED_ID_MODE_MASK 0x00004000L
++#define VGT_SHADER_STAGES_EN__MAX_PRIMGRP_IN_WAVE_MASK 0x00078000L
++#define VGT_SHADER_STAGES_EN__GS_FAST_LAUNCH_MASK 0x00080000L
++//VGT_LS_HS_CONFIG
++#define VGT_LS_HS_CONFIG__NUM_PATCHES__SHIFT 0x0
++#define VGT_LS_HS_CONFIG__HS_NUM_INPUT_CP__SHIFT 0x8
++#define VGT_LS_HS_CONFIG__HS_NUM_OUTPUT_CP__SHIFT 0xe
++#define VGT_LS_HS_CONFIG__NUM_PATCHES_MASK 0x000000FFL
++#define VGT_LS_HS_CONFIG__HS_NUM_INPUT_CP_MASK 0x00003F00L
++#define VGT_LS_HS_CONFIG__HS_NUM_OUTPUT_CP_MASK 0x000FC000L
++//VGT_GS_VERT_ITEMSIZE
++#define VGT_GS_VERT_ITEMSIZE__ITEMSIZE__SHIFT 0x0
++#define VGT_GS_VERT_ITEMSIZE__ITEMSIZE_MASK 0x00007FFFL
++//VGT_GS_VERT_ITEMSIZE_1
++#define VGT_GS_VERT_ITEMSIZE_1__ITEMSIZE__SHIFT 0x0
++#define VGT_GS_VERT_ITEMSIZE_1__ITEMSIZE_MASK 0x00007FFFL
++//VGT_GS_VERT_ITEMSIZE_2
++#define VGT_GS_VERT_ITEMSIZE_2__ITEMSIZE__SHIFT 0x0
++#define VGT_GS_VERT_ITEMSIZE_2__ITEMSIZE_MASK 0x00007FFFL
++//VGT_GS_VERT_ITEMSIZE_3
++#define VGT_GS_VERT_ITEMSIZE_3__ITEMSIZE__SHIFT 0x0
++#define VGT_GS_VERT_ITEMSIZE_3__ITEMSIZE_MASK 0x00007FFFL
++//VGT_TF_PARAM
++#define VGT_TF_PARAM__TYPE__SHIFT 0x0
++#define VGT_TF_PARAM__PARTITIONING__SHIFT 0x2
++#define VGT_TF_PARAM__TOPOLOGY__SHIFT 0x5
++#define VGT_TF_PARAM__RESERVED_REDUC_AXIS__SHIFT 0x8
++#define VGT_TF_PARAM__DEPRECATED__SHIFT 0x9
++#define VGT_TF_PARAM__DISABLE_DONUTS__SHIFT 0xe
++#define VGT_TF_PARAM__RDREQ_POLICY__SHIFT 0xf
++#define VGT_TF_PARAM__DISTRIBUTION_MODE__SHIFT 0x11
++#define VGT_TF_PARAM__TYPE_MASK 0x00000003L
++#define VGT_TF_PARAM__PARTITIONING_MASK 0x0000001CL
++#define VGT_TF_PARAM__TOPOLOGY_MASK 0x000000E0L
++#define VGT_TF_PARAM__RESERVED_REDUC_AXIS_MASK 0x00000100L
++#define VGT_TF_PARAM__DEPRECATED_MASK 0x00000200L
++#define VGT_TF_PARAM__DISABLE_DONUTS_MASK 0x00004000L
++#define VGT_TF_PARAM__RDREQ_POLICY_MASK 0x00008000L
++#define VGT_TF_PARAM__DISTRIBUTION_MODE_MASK 0x00060000L
++//DB_ALPHA_TO_MASK
++#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_ENABLE__SHIFT 0x0
++#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET0__SHIFT 0x8
++#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET1__SHIFT 0xa
++#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET2__SHIFT 0xc
++#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET3__SHIFT 0xe
++#define DB_ALPHA_TO_MASK__OFFSET_ROUND__SHIFT 0x10
++#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_ENABLE_MASK 0x00000001L
++#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET0_MASK 0x00000300L
++#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET1_MASK 0x00000C00L
++#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET2_MASK 0x00003000L
++#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET3_MASK 0x0000C000L
++#define DB_ALPHA_TO_MASK__OFFSET_ROUND_MASK 0x00010000L
++//VGT_DISPATCH_DRAW_INDEX
++#define VGT_DISPATCH_DRAW_INDEX__MATCH_INDEX__SHIFT 0x0
++#define VGT_DISPATCH_DRAW_INDEX__MATCH_INDEX_MASK 0xFFFFFFFFL
++//PA_SU_POLY_OFFSET_DB_FMT_CNTL
++#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_NEG_NUM_DB_BITS__SHIFT 0x0
++#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_DB_IS_FLOAT_FMT__SHIFT 0x8
++#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_NEG_NUM_DB_BITS_MASK 0x000000FFL
++#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_DB_IS_FLOAT_FMT_MASK 0x00000100L
++//PA_SU_POLY_OFFSET_CLAMP
++#define PA_SU_POLY_OFFSET_CLAMP__CLAMP__SHIFT 0x0
++#define PA_SU_POLY_OFFSET_CLAMP__CLAMP_MASK 0xFFFFFFFFL
++//PA_SU_POLY_OFFSET_FRONT_SCALE
++#define PA_SU_POLY_OFFSET_FRONT_SCALE__SCALE__SHIFT 0x0
++#define PA_SU_POLY_OFFSET_FRONT_SCALE__SCALE_MASK 0xFFFFFFFFL
++//PA_SU_POLY_OFFSET_FRONT_OFFSET
++#define PA_SU_POLY_OFFSET_FRONT_OFFSET__OFFSET__SHIFT 0x0
++#define PA_SU_POLY_OFFSET_FRONT_OFFSET__OFFSET_MASK 0xFFFFFFFFL
++//PA_SU_POLY_OFFSET_BACK_SCALE
++#define PA_SU_POLY_OFFSET_BACK_SCALE__SCALE__SHIFT 0x0
++#define PA_SU_POLY_OFFSET_BACK_SCALE__SCALE_MASK 0xFFFFFFFFL
++//PA_SU_POLY_OFFSET_BACK_OFFSET
++#define PA_SU_POLY_OFFSET_BACK_OFFSET__OFFSET__SHIFT 0x0
++#define PA_SU_POLY_OFFSET_BACK_OFFSET__OFFSET_MASK 0xFFFFFFFFL
++//VGT_GS_INSTANCE_CNT
++#define VGT_GS_INSTANCE_CNT__ENABLE__SHIFT 0x0
++#define VGT_GS_INSTANCE_CNT__CNT__SHIFT 0x2
++#define VGT_GS_INSTANCE_CNT__ENABLE_MASK 0x00000001L
++#define VGT_GS_INSTANCE_CNT__CNT_MASK 0x000001FCL
++//VGT_STRMOUT_CONFIG
++#define VGT_STRMOUT_CONFIG__STREAMOUT_0_EN__SHIFT 0x0
++#define VGT_STRMOUT_CONFIG__STREAMOUT_1_EN__SHIFT 0x1
++#define VGT_STRMOUT_CONFIG__STREAMOUT_2_EN__SHIFT 0x2
++#define VGT_STRMOUT_CONFIG__STREAMOUT_3_EN__SHIFT 0x3
++#define VGT_STRMOUT_CONFIG__RAST_STREAM__SHIFT 0x4
++#define VGT_STRMOUT_CONFIG__EN_PRIMS_NEEDED_CNT__SHIFT 0x7
++#define VGT_STRMOUT_CONFIG__RAST_STREAM_MASK__SHIFT 0x8
++#define VGT_STRMOUT_CONFIG__USE_RAST_STREAM_MASK__SHIFT 0x1f
++#define VGT_STRMOUT_CONFIG__STREAMOUT_0_EN_MASK 0x00000001L
++#define VGT_STRMOUT_CONFIG__STREAMOUT_1_EN_MASK 0x00000002L
++#define VGT_STRMOUT_CONFIG__STREAMOUT_2_EN_MASK 0x00000004L
++#define VGT_STRMOUT_CONFIG__STREAMOUT_3_EN_MASK 0x00000008L
++#define VGT_STRMOUT_CONFIG__RAST_STREAM_MASK 0x00000070L
++#define VGT_STRMOUT_CONFIG__EN_PRIMS_NEEDED_CNT_MASK 0x00000080L
++#define VGT_STRMOUT_CONFIG__RAST_STREAM_MASK_MASK 0x00000F00L
++#define VGT_STRMOUT_CONFIG__USE_RAST_STREAM_MASK_MASK 0x80000000L
++//VGT_STRMOUT_BUFFER_CONFIG
++#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_0_BUFFER_EN__SHIFT 0x0
++#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_1_BUFFER_EN__SHIFT 0x4
++#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_2_BUFFER_EN__SHIFT 0x8
++#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_3_BUFFER_EN__SHIFT 0xc
++#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_0_BUFFER_EN_MASK 0x0000000FL
++#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_1_BUFFER_EN_MASK 0x000000F0L
++#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_2_BUFFER_EN_MASK 0x00000F00L
++#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_3_BUFFER_EN_MASK 0x0000F000L
++//VGT_DMA_EVENT_INITIATOR
++#define VGT_DMA_EVENT_INITIATOR__EVENT_TYPE__SHIFT 0x0
++#define VGT_DMA_EVENT_INITIATOR__ADDRESS_HI__SHIFT 0xa
++#define VGT_DMA_EVENT_INITIATOR__EXTENDED_EVENT__SHIFT 0x1b
++#define VGT_DMA_EVENT_INITIATOR__EVENT_TYPE_MASK 0x0000003FL
++#define VGT_DMA_EVENT_INITIATOR__ADDRESS_HI_MASK 0x07FFFC00L
++#define VGT_DMA_EVENT_INITIATOR__EXTENDED_EVENT_MASK 0x08000000L
++//PA_SC_CENTROID_PRIORITY_0
++#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_0__SHIFT 0x0
++#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_1__SHIFT 0x4
++#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_2__SHIFT 0x8
++#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_3__SHIFT 0xc
++#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_4__SHIFT 0x10
++#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_5__SHIFT 0x14
++#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_6__SHIFT 0x18
++#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_7__SHIFT 0x1c
++#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_0_MASK 0x0000000FL
++#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_1_MASK 0x000000F0L
++#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_2_MASK 0x00000F00L
++#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_3_MASK 0x0000F000L
++#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_4_MASK 0x000F0000L
++#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_5_MASK 0x00F00000L
++#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_6_MASK 0x0F000000L
++#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_7_MASK 0xF0000000L
++//PA_SC_CENTROID_PRIORITY_1
++#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_8__SHIFT 0x0
++#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_9__SHIFT 0x4
++#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_10__SHIFT 0x8
++#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_11__SHIFT 0xc
++#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_12__SHIFT 0x10
++#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_13__SHIFT 0x14
++#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_14__SHIFT 0x18
++#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_15__SHIFT 0x1c
++#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_8_MASK 0x0000000FL
++#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_9_MASK 0x000000F0L
++#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_10_MASK 0x00000F00L
++#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_11_MASK 0x0000F000L
++#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_12_MASK 0x000F0000L
++#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_13_MASK 0x00F00000L
++#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_14_MASK 0x0F000000L
++#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_15_MASK 0xF0000000L
++//PA_SC_LINE_CNTL
++#define PA_SC_LINE_CNTL__EXPAND_LINE_WIDTH__SHIFT 0x9
++#define PA_SC_LINE_CNTL__LAST_PIXEL__SHIFT 0xa
++#define PA_SC_LINE_CNTL__PERPENDICULAR_ENDCAP_ENA__SHIFT 0xb
++#define PA_SC_LINE_CNTL__DX10_DIAMOND_TEST_ENA__SHIFT 0xc
++#define PA_SC_LINE_CNTL__EXPAND_LINE_WIDTH_MASK 0x00000200L
++#define PA_SC_LINE_CNTL__LAST_PIXEL_MASK 0x00000400L
++#define PA_SC_LINE_CNTL__PERPENDICULAR_ENDCAP_ENA_MASK 0x00000800L
++#define PA_SC_LINE_CNTL__DX10_DIAMOND_TEST_ENA_MASK 0x00001000L
++//PA_SC_AA_CONFIG
++#define PA_SC_AA_CONFIG__MSAA_NUM_SAMPLES__SHIFT 0x0
++#define PA_SC_AA_CONFIG__AA_MASK_CENTROID_DTMN__SHIFT 0x4
++#define PA_SC_AA_CONFIG__MAX_SAMPLE_DIST__SHIFT 0xd
++#define PA_SC_AA_CONFIG__MSAA_EXPOSED_SAMPLES__SHIFT 0x14
++#define PA_SC_AA_CONFIG__DETAIL_TO_EXPOSED_MODE__SHIFT 0x18
++#define PA_SC_AA_CONFIG__COVERAGE_TO_SHADER_SELECT__SHIFT 0x1a
++#define PA_SC_AA_CONFIG__MSAA_NUM_SAMPLES_MASK 0x00000007L
++#define PA_SC_AA_CONFIG__AA_MASK_CENTROID_DTMN_MASK 0x00000010L
++#define PA_SC_AA_CONFIG__MAX_SAMPLE_DIST_MASK 0x0001E000L
++#define PA_SC_AA_CONFIG__MSAA_EXPOSED_SAMPLES_MASK 0x00700000L
++#define PA_SC_AA_CONFIG__DETAIL_TO_EXPOSED_MODE_MASK 0x03000000L
++#define PA_SC_AA_CONFIG__COVERAGE_TO_SHADER_SELECT_MASK 0x0C000000L
++//PA_SU_VTX_CNTL
++#define PA_SU_VTX_CNTL__PIX_CENTER__SHIFT 0x0
++#define PA_SU_VTX_CNTL__ROUND_MODE__SHIFT 0x1
++#define PA_SU_VTX_CNTL__QUANT_MODE__SHIFT 0x3
++#define PA_SU_VTX_CNTL__PIX_CENTER_MASK 0x00000001L
++#define PA_SU_VTX_CNTL__ROUND_MODE_MASK 0x00000006L
++#define PA_SU_VTX_CNTL__QUANT_MODE_MASK 0x00000038L
++//PA_CL_GB_VERT_CLIP_ADJ
++#define PA_CL_GB_VERT_CLIP_ADJ__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_GB_VERT_CLIP_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_GB_VERT_DISC_ADJ
++#define PA_CL_GB_VERT_DISC_ADJ__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_GB_VERT_DISC_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_GB_HORZ_CLIP_ADJ
++#define PA_CL_GB_HORZ_CLIP_ADJ__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_GB_HORZ_CLIP_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_CL_GB_HORZ_DISC_ADJ
++#define PA_CL_GB_HORZ_DISC_ADJ__DATA_REGISTER__SHIFT 0x0
++#define PA_CL_GB_HORZ_DISC_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
++//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_X__SHIFT 0x0
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_Y__SHIFT 0x4
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_X__SHIFT 0x8
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_Y__SHIFT 0xc
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_X__SHIFT 0x10
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_Y__SHIFT 0x14
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_X__SHIFT 0x18
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_Y__SHIFT 0x1c
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_X_MASK 0x0000000FL
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_Y_MASK 0x000000F0L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_X_MASK 0x00000F00L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_Y_MASK 0x0000F000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_X_MASK 0x000F0000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_Y_MASK 0x00F00000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_X_MASK 0x0F000000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_Y_MASK 0xF0000000L
++//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_X__SHIFT 0x0
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_Y__SHIFT 0x4
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_X__SHIFT 0x8
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_Y__SHIFT 0xc
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_X__SHIFT 0x10
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_Y__SHIFT 0x14
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_X__SHIFT 0x18
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_Y__SHIFT 0x1c
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_X_MASK 0x0000000FL
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_Y_MASK 0x000000F0L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_X_MASK 0x00000F00L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_Y_MASK 0x0000F000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_X_MASK 0x000F0000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_Y_MASK 0x00F00000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_X_MASK 0x0F000000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_Y_MASK 0xF0000000L
++//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_X__SHIFT 0x0
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_Y__SHIFT 0x4
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_X__SHIFT 0x8
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_Y__SHIFT 0xc
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_X__SHIFT 0x10
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_Y__SHIFT 0x14
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_X__SHIFT 0x18
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_Y__SHIFT 0x1c
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_X_MASK 0x0000000FL
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_Y_MASK 0x000000F0L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_X_MASK 0x00000F00L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_Y_MASK 0x0000F000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_X_MASK 0x000F0000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_Y_MASK 0x00F00000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_X_MASK 0x0F000000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_Y_MASK 0xF0000000L
++//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_X__SHIFT 0x0
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_Y__SHIFT 0x4
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_X__SHIFT 0x8
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_Y__SHIFT 0xc
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_X__SHIFT 0x10
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_Y__SHIFT 0x14
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_X__SHIFT 0x18
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_Y__SHIFT 0x1c
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_X_MASK 0x0000000FL
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_Y_MASK 0x000000F0L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_X_MASK 0x00000F00L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_Y_MASK 0x0000F000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_X_MASK 0x000F0000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_Y_MASK 0x00F00000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_X_MASK 0x0F000000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_Y_MASK 0xF0000000L
++//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_X__SHIFT 0x0
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_Y__SHIFT 0x4
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_X__SHIFT 0x8
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_Y__SHIFT 0xc
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_X__SHIFT 0x10
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_Y__SHIFT 0x14
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_X__SHIFT 0x18
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_Y__SHIFT 0x1c
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_X_MASK 0x0000000FL
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_Y_MASK 0x000000F0L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_X_MASK 0x00000F00L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_Y_MASK 0x0000F000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_X_MASK 0x000F0000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_Y_MASK 0x00F00000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_X_MASK 0x0F000000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_Y_MASK 0xF0000000L
++//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_X__SHIFT 0x0
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_Y__SHIFT 0x4
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_X__SHIFT 0x8
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_Y__SHIFT 0xc
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_X__SHIFT 0x10
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_Y__SHIFT 0x14
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_X__SHIFT 0x18
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_Y__SHIFT 0x1c
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_X_MASK 0x0000000FL
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_Y_MASK 0x000000F0L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_X_MASK 0x00000F00L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_Y_MASK 0x0000F000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_X_MASK 0x000F0000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_Y_MASK 0x00F00000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_X_MASK 0x0F000000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_Y_MASK 0xF0000000L
++//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_X__SHIFT 0x0
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_Y__SHIFT 0x4
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_X__SHIFT 0x8
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_Y__SHIFT 0xc
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_X__SHIFT 0x10
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_Y__SHIFT 0x14
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_X__SHIFT 0x18
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_Y__SHIFT 0x1c
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_X_MASK 0x0000000FL
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_Y_MASK 0x000000F0L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_X_MASK 0x00000F00L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_Y_MASK 0x0000F000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_X_MASK 0x000F0000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_Y_MASK 0x00F00000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_X_MASK 0x0F000000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_Y_MASK 0xF0000000L
++//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_X__SHIFT 0x0
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_Y__SHIFT 0x4
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_X__SHIFT 0x8
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_Y__SHIFT 0xc
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_X__SHIFT 0x10
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_Y__SHIFT 0x14
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_X__SHIFT 0x18
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_Y__SHIFT 0x1c
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_X_MASK 0x0000000FL
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_Y_MASK 0x000000F0L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_X_MASK 0x00000F00L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_Y_MASK 0x0000F000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_X_MASK 0x000F0000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_Y_MASK 0x00F00000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_X_MASK 0x0F000000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_Y_MASK 0xF0000000L
++//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_X__SHIFT 0x0
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_Y__SHIFT 0x4
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_X__SHIFT 0x8
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_Y__SHIFT 0xc
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_X__SHIFT 0x10
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_Y__SHIFT 0x14
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_X__SHIFT 0x18
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_Y__SHIFT 0x1c
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_X_MASK 0x0000000FL
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_Y_MASK 0x000000F0L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_X_MASK 0x00000F00L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_Y_MASK 0x0000F000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_X_MASK 0x000F0000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_Y_MASK 0x00F00000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_X_MASK 0x0F000000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_Y_MASK 0xF0000000L
++//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_X__SHIFT 0x0
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_Y__SHIFT 0x4
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_X__SHIFT 0x8
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_Y__SHIFT 0xc
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_X__SHIFT 0x10
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_Y__SHIFT 0x14
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_X__SHIFT 0x18
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_Y__SHIFT 0x1c
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_X_MASK 0x0000000FL
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_Y_MASK 0x000000F0L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_X_MASK 0x00000F00L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_Y_MASK 0x0000F000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_X_MASK 0x000F0000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_Y_MASK 0x00F00000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_X_MASK 0x0F000000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_Y_MASK 0xF0000000L
++//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_X__SHIFT 0x0
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_Y__SHIFT 0x4
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_X__SHIFT 0x8
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_Y__SHIFT 0xc
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_X__SHIFT 0x10
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_Y__SHIFT 0x14
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_X__SHIFT 0x18
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_Y__SHIFT 0x1c
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_X_MASK 0x0000000FL
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_Y_MASK 0x000000F0L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_X_MASK 0x00000F00L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_Y_MASK 0x0000F000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_X_MASK 0x000F0000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_Y_MASK 0x00F00000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_X_MASK 0x0F000000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_Y_MASK 0xF0000000L
++//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_X__SHIFT 0x0
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_Y__SHIFT 0x4
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_X__SHIFT 0x8
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_Y__SHIFT 0xc
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_X__SHIFT 0x10
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_Y__SHIFT 0x14
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_X__SHIFT 0x18
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_Y__SHIFT 0x1c
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_X_MASK 0x0000000FL
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_Y_MASK 0x000000F0L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_X_MASK 0x00000F00L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_Y_MASK 0x0000F000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_X_MASK 0x000F0000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_Y_MASK 0x00F00000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_X_MASK 0x0F000000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_Y_MASK 0xF0000000L
++//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_X__SHIFT 0x0
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_Y__SHIFT 0x4
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_X__SHIFT 0x8
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_Y__SHIFT 0xc
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_X__SHIFT 0x10
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_Y__SHIFT 0x14
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_X__SHIFT 0x18
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_Y__SHIFT 0x1c
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_X_MASK 0x0000000FL
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_Y_MASK 0x000000F0L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_X_MASK 0x00000F00L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_Y_MASK 0x0000F000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_X_MASK 0x000F0000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_Y_MASK 0x00F00000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_X_MASK 0x0F000000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_Y_MASK 0xF0000000L
++//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_X__SHIFT 0x0
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_Y__SHIFT 0x4
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_X__SHIFT 0x8
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_Y__SHIFT 0xc
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_X__SHIFT 0x10
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_Y__SHIFT 0x14
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_X__SHIFT 0x18
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_Y__SHIFT 0x1c
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_X_MASK 0x0000000FL
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_Y_MASK 0x000000F0L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_X_MASK 0x00000F00L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_Y_MASK 0x0000F000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_X_MASK 0x000F0000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_Y_MASK 0x00F00000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_X_MASK 0x0F000000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_Y_MASK 0xF0000000L
++//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_X__SHIFT 0x0
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_Y__SHIFT 0x4
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_X__SHIFT 0x8
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_Y__SHIFT 0xc
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_X__SHIFT 0x10
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_Y__SHIFT 0x14
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_X__SHIFT 0x18
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_Y__SHIFT 0x1c
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_X_MASK 0x0000000FL
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_Y_MASK 0x000000F0L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_X_MASK 0x00000F00L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_Y_MASK 0x0000F000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_X_MASK 0x000F0000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_Y_MASK 0x00F00000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_X_MASK 0x0F000000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_Y_MASK 0xF0000000L
++//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_X__SHIFT 0x0
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_Y__SHIFT 0x4
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_X__SHIFT 0x8
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_Y__SHIFT 0xc
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_X__SHIFT 0x10
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_Y__SHIFT 0x14
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_X__SHIFT 0x18
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_Y__SHIFT 0x1c
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_X_MASK 0x0000000FL
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_Y_MASK 0x000000F0L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_X_MASK 0x00000F00L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_Y_MASK 0x0000F000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_X_MASK 0x000F0000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_Y_MASK 0x00F00000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_X_MASK 0x0F000000L
++#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_Y_MASK 0xF0000000L
++//PA_SC_AA_MASK_X0Y0_X1Y0
++#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X0Y0__SHIFT 0x0
++#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X1Y0__SHIFT 0x10
++#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X0Y0_MASK 0x0000FFFFL
++#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X1Y0_MASK 0xFFFF0000L
++//PA_SC_AA_MASK_X0Y1_X1Y1
++#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X0Y1__SHIFT 0x0
++#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X1Y1__SHIFT 0x10
++#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X0Y1_MASK 0x0000FFFFL
++#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X1Y1_MASK 0xFFFF0000L
++//PA_SC_SHADER_CONTROL
++#define PA_SC_SHADER_CONTROL__REALIGN_DQUADS_AFTER_N_WAVES__SHIFT 0x0
++#define PA_SC_SHADER_CONTROL__LOAD_COLLISION_WAVEID__SHIFT 0x2
++#define PA_SC_SHADER_CONTROL__LOAD_INTRAWAVE_COLLISION__SHIFT 0x3
++#define PA_SC_SHADER_CONTROL__REALIGN_DQUADS_AFTER_N_WAVES_MASK 0x00000003L
++#define PA_SC_SHADER_CONTROL__LOAD_COLLISION_WAVEID_MASK 0x00000004L
++#define PA_SC_SHADER_CONTROL__LOAD_INTRAWAVE_COLLISION_MASK 0x00000008L
++//PA_SC_BINNER_CNTL_0
++#define PA_SC_BINNER_CNTL_0__BINNING_MODE__SHIFT 0x0
++#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X__SHIFT 0x2
++#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y__SHIFT 0x3
++#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X_EXTEND__SHIFT 0x4
++#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y_EXTEND__SHIFT 0x7
++#define PA_SC_BINNER_CNTL_0__CONTEXT_STATES_PER_BIN__SHIFT 0xa
++#define PA_SC_BINNER_CNTL_0__PERSISTENT_STATES_PER_BIN__SHIFT 0xd
++#define PA_SC_BINNER_CNTL_0__DISABLE_START_OF_PRIM__SHIFT 0x12
++#define PA_SC_BINNER_CNTL_0__FPOVS_PER_BATCH__SHIFT 0x13
++#define PA_SC_BINNER_CNTL_0__OPTIMAL_BIN_SELECTION__SHIFT 0x1b
++#define PA_SC_BINNER_CNTL_0__BINNING_MODE_MASK 0x00000003L
++#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X_MASK 0x00000004L
++#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y_MASK 0x00000008L
++#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X_EXTEND_MASK 0x00000070L
++#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y_EXTEND_MASK 0x00000380L
++#define PA_SC_BINNER_CNTL_0__CONTEXT_STATES_PER_BIN_MASK 0x00001C00L
++#define PA_SC_BINNER_CNTL_0__PERSISTENT_STATES_PER_BIN_MASK 0x0003E000L
++#define PA_SC_BINNER_CNTL_0__DISABLE_START_OF_PRIM_MASK 0x00040000L
++#define PA_SC_BINNER_CNTL_0__FPOVS_PER_BATCH_MASK 0x07F80000L
++#define PA_SC_BINNER_CNTL_0__OPTIMAL_BIN_SELECTION_MASK 0x08000000L
++//PA_SC_BINNER_CNTL_1
++#define PA_SC_BINNER_CNTL_1__MAX_ALLOC_COUNT__SHIFT 0x0
++#define PA_SC_BINNER_CNTL_1__MAX_PRIM_PER_BATCH__SHIFT 0x10
++#define PA_SC_BINNER_CNTL_1__MAX_ALLOC_COUNT_MASK 0x0000FFFFL
++#define PA_SC_BINNER_CNTL_1__MAX_PRIM_PER_BATCH_MASK 0xFFFF0000L
++//PA_SC_CONSERVATIVE_RASTERIZATION_CNTL
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_ENABLE__SHIFT 0x0
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_SAMPLE_SELECT__SHIFT 0x1
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_ENABLE__SHIFT 0x5
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_SAMPLE_SELECT__SHIFT 0x6
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PBB_UNCERTAINTY_REGION_ENABLE__SHIFT 0xa
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_EXTENT__SHIFT 0xb
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_OFFSET__SHIFT 0xc
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_OVER_RAST_INNER_TO_NORMAL__SHIFT 0xd
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_UNDER_RAST_INNER_TO_NORMAL__SHIFT 0xe
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__DEGENERATE_OVERRIDE_INNER_TO_NORMAL_DISABLE__SHIFT 0xf
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNCERTAINTY_REGION_MODE__SHIFT 0x10
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OUTER_UNCERTAINTY_EDGERULE_OVERRIDE__SHIFT 0x12
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__INNER_UNCERTAINTY_EDGERULE_OVERRIDE__SHIFT 0x13
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__NULL_SQUAD_AA_MASK_ENABLE__SHIFT 0x14
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__COVERAGE_AA_MASK_ENABLE__SHIFT 0x15
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PREZ_AA_MASK_ENABLE__SHIFT 0x16
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__POSTZ_AA_MASK_ENABLE__SHIFT 0x17
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__CENTROID_SAMPLE_OVERRIDE__SHIFT 0x18
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_ENABLE_MASK 0x00000001L
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_SAMPLE_SELECT_MASK 0x0000001EL
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_ENABLE_MASK 0x00000020L
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_SAMPLE_SELECT_MASK 0x000003C0L
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PBB_UNCERTAINTY_REGION_ENABLE_MASK 0x00000400L
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_EXTENT_MASK 0x00000800L
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_OFFSET_MASK 0x00001000L
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_OVER_RAST_INNER_TO_NORMAL_MASK 0x00002000L
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_UNDER_RAST_INNER_TO_NORMAL_MASK 0x00004000L
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__DEGENERATE_OVERRIDE_INNER_TO_NORMAL_DISABLE_MASK 0x00008000L
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNCERTAINTY_REGION_MODE_MASK 0x00030000L
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OUTER_UNCERTAINTY_EDGERULE_OVERRIDE_MASK 0x00040000L
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__INNER_UNCERTAINTY_EDGERULE_OVERRIDE_MASK 0x00080000L
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__NULL_SQUAD_AA_MASK_ENABLE_MASK 0x00100000L
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__COVERAGE_AA_MASK_ENABLE_MASK 0x00200000L
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PREZ_AA_MASK_ENABLE_MASK 0x00400000L
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__POSTZ_AA_MASK_ENABLE_MASK 0x00800000L
++#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__CENTROID_SAMPLE_OVERRIDE_MASK 0x01000000L
++//PA_SC_NGG_MODE_CNTL
++#define PA_SC_NGG_MODE_CNTL__MAX_DEALLOCS_IN_WAVE__SHIFT 0x0
++#define PA_SC_NGG_MODE_CNTL__MAX_DEALLOCS_IN_WAVE_MASK 0x000007FFL
++//VGT_VERTEX_REUSE_BLOCK_CNTL
++#define VGT_VERTEX_REUSE_BLOCK_CNTL__VTX_REUSE_DEPTH__SHIFT 0x0
++#define VGT_VERTEX_REUSE_BLOCK_CNTL__VTX_REUSE_DEPTH_MASK 0x000000FFL
++//VGT_OUT_DEALLOC_CNTL
++#define VGT_OUT_DEALLOC_CNTL__DEALLOC_DIST__SHIFT 0x0
++#define VGT_OUT_DEALLOC_CNTL__DEALLOC_DIST_MASK 0x0000007FL
++//CB_COLOR0_BASE
++#define CB_COLOR0_BASE__BASE_256B__SHIFT 0x0
++#define CB_COLOR0_BASE__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR0_BASE_EXT
++#define CB_COLOR0_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR0_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR0_ATTRIB2
++#define CB_COLOR0_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
++#define CB_COLOR0_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
++#define CB_COLOR0_ATTRIB2__MAX_MIP__SHIFT 0x1c
++#define CB_COLOR0_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
++#define CB_COLOR0_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
++#define CB_COLOR0_ATTRIB2__MAX_MIP_MASK 0xF0000000L
++//CB_COLOR0_VIEW
++#define CB_COLOR0_VIEW__SLICE_START__SHIFT 0x0
++#define CB_COLOR0_VIEW__SLICE_MAX__SHIFT 0xd
++#define CB_COLOR0_VIEW__MIP_LEVEL__SHIFT 0x18
++#define CB_COLOR0_VIEW__SLICE_START_MASK 0x000007FFL
++#define CB_COLOR0_VIEW__SLICE_MAX_MASK 0x00FFE000L
++#define CB_COLOR0_VIEW__MIP_LEVEL_MASK 0x0F000000L
++//CB_COLOR0_INFO
++#define CB_COLOR0_INFO__ENDIAN__SHIFT 0x0
++#define CB_COLOR0_INFO__FORMAT__SHIFT 0x2
++#define CB_COLOR0_INFO__NUMBER_TYPE__SHIFT 0x8
++#define CB_COLOR0_INFO__COMP_SWAP__SHIFT 0xb
++#define CB_COLOR0_INFO__FAST_CLEAR__SHIFT 0xd
++#define CB_COLOR0_INFO__COMPRESSION__SHIFT 0xe
++#define CB_COLOR0_INFO__BLEND_CLAMP__SHIFT 0xf
++#define CB_COLOR0_INFO__BLEND_BYPASS__SHIFT 0x10
++#define CB_COLOR0_INFO__SIMPLE_FLOAT__SHIFT 0x11
++#define CB_COLOR0_INFO__ROUND_MODE__SHIFT 0x12
++#define CB_COLOR0_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
++#define CB_COLOR0_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
++#define CB_COLOR0_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
++#define CB_COLOR0_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
++#define CB_COLOR0_INFO__DCC_ENABLE__SHIFT 0x1c
++#define CB_COLOR0_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
++#define CB_COLOR0_INFO__ENDIAN_MASK 0x00000003L
++#define CB_COLOR0_INFO__FORMAT_MASK 0x0000007CL
++#define CB_COLOR0_INFO__NUMBER_TYPE_MASK 0x00000700L
++#define CB_COLOR0_INFO__COMP_SWAP_MASK 0x00001800L
++#define CB_COLOR0_INFO__FAST_CLEAR_MASK 0x00002000L
++#define CB_COLOR0_INFO__COMPRESSION_MASK 0x00004000L
++#define CB_COLOR0_INFO__BLEND_CLAMP_MASK 0x00008000L
++#define CB_COLOR0_INFO__BLEND_BYPASS_MASK 0x00010000L
++#define CB_COLOR0_INFO__SIMPLE_FLOAT_MASK 0x00020000L
++#define CB_COLOR0_INFO__ROUND_MODE_MASK 0x00040000L
++#define CB_COLOR0_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
++#define CB_COLOR0_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
++#define CB_COLOR0_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
++#define CB_COLOR0_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
++#define CB_COLOR0_INFO__DCC_ENABLE_MASK 0x10000000L
++#define CB_COLOR0_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
++//CB_COLOR0_ATTRIB
++#define CB_COLOR0_ATTRIB__MIP0_DEPTH__SHIFT 0x0
++#define CB_COLOR0_ATTRIB__META_LINEAR__SHIFT 0xb
++#define CB_COLOR0_ATTRIB__NUM_SAMPLES__SHIFT 0xc
++#define CB_COLOR0_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
++#define CB_COLOR0_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
++#define CB_COLOR0_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
++#define CB_COLOR0_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
++#define CB_COLOR0_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
++#define CB_COLOR0_ATTRIB__RB_ALIGNED__SHIFT 0x1e
++#define CB_COLOR0_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
++#define CB_COLOR0_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
++#define CB_COLOR0_ATTRIB__META_LINEAR_MASK 0x00000800L
++#define CB_COLOR0_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
++#define CB_COLOR0_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
++#define CB_COLOR0_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
++#define CB_COLOR0_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
++#define CB_COLOR0_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
++#define CB_COLOR0_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
++#define CB_COLOR0_ATTRIB__RB_ALIGNED_MASK 0x40000000L
++#define CB_COLOR0_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
++//CB_COLOR0_DCC_CONTROL
++#define CB_COLOR0_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
++#define CB_COLOR0_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
++#define CB_COLOR0_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
++#define CB_COLOR0_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
++#define CB_COLOR0_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
++#define CB_COLOR0_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
++#define CB_COLOR0_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
++#define CB_COLOR0_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
++#define CB_COLOR0_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
++#define CB_COLOR0_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
++#define CB_COLOR0_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
++#define CB_COLOR0_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
++#define CB_COLOR0_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
++#define CB_COLOR0_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
++#define CB_COLOR0_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
++#define CB_COLOR0_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
++#define CB_COLOR0_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
++#define CB_COLOR0_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
++//CB_COLOR0_CMASK
++#define CB_COLOR0_CMASK__BASE_256B__SHIFT 0x0
++#define CB_COLOR0_CMASK__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR0_CMASK_BASE_EXT
++#define CB_COLOR0_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR0_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR0_FMASK
++#define CB_COLOR0_FMASK__BASE_256B__SHIFT 0x0
++#define CB_COLOR0_FMASK__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR0_FMASK_BASE_EXT
++#define CB_COLOR0_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR0_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR0_CLEAR_WORD0
++#define CB_COLOR0_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
++#define CB_COLOR0_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
++//CB_COLOR0_CLEAR_WORD1
++#define CB_COLOR0_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
++#define CB_COLOR0_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
++//CB_COLOR0_DCC_BASE
++#define CB_COLOR0_DCC_BASE__BASE_256B__SHIFT 0x0
++#define CB_COLOR0_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR0_DCC_BASE_EXT
++#define CB_COLOR0_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR0_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR1_BASE
++#define CB_COLOR1_BASE__BASE_256B__SHIFT 0x0
++#define CB_COLOR1_BASE__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR1_BASE_EXT
++#define CB_COLOR1_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR1_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR1_ATTRIB2
++#define CB_COLOR1_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
++#define CB_COLOR1_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
++#define CB_COLOR1_ATTRIB2__MAX_MIP__SHIFT 0x1c
++#define CB_COLOR1_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
++#define CB_COLOR1_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
++#define CB_COLOR1_ATTRIB2__MAX_MIP_MASK 0xF0000000L
++//CB_COLOR1_VIEW
++#define CB_COLOR1_VIEW__SLICE_START__SHIFT 0x0
++#define CB_COLOR1_VIEW__SLICE_MAX__SHIFT 0xd
++#define CB_COLOR1_VIEW__MIP_LEVEL__SHIFT 0x18
++#define CB_COLOR1_VIEW__SLICE_START_MASK 0x000007FFL
++#define CB_COLOR1_VIEW__SLICE_MAX_MASK 0x00FFE000L
++#define CB_COLOR1_VIEW__MIP_LEVEL_MASK 0x0F000000L
++//CB_COLOR1_INFO
++#define CB_COLOR1_INFO__ENDIAN__SHIFT 0x0
++#define CB_COLOR1_INFO__FORMAT__SHIFT 0x2
++#define CB_COLOR1_INFO__NUMBER_TYPE__SHIFT 0x8
++#define CB_COLOR1_INFO__COMP_SWAP__SHIFT 0xb
++#define CB_COLOR1_INFO__FAST_CLEAR__SHIFT 0xd
++#define CB_COLOR1_INFO__COMPRESSION__SHIFT 0xe
++#define CB_COLOR1_INFO__BLEND_CLAMP__SHIFT 0xf
++#define CB_COLOR1_INFO__BLEND_BYPASS__SHIFT 0x10
++#define CB_COLOR1_INFO__SIMPLE_FLOAT__SHIFT 0x11
++#define CB_COLOR1_INFO__ROUND_MODE__SHIFT 0x12
++#define CB_COLOR1_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
++#define CB_COLOR1_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
++#define CB_COLOR1_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
++#define CB_COLOR1_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
++#define CB_COLOR1_INFO__DCC_ENABLE__SHIFT 0x1c
++#define CB_COLOR1_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
++#define CB_COLOR1_INFO__ENDIAN_MASK 0x00000003L
++#define CB_COLOR1_INFO__FORMAT_MASK 0x0000007CL
++#define CB_COLOR1_INFO__NUMBER_TYPE_MASK 0x00000700L
++#define CB_COLOR1_INFO__COMP_SWAP_MASK 0x00001800L
++#define CB_COLOR1_INFO__FAST_CLEAR_MASK 0x00002000L
++#define CB_COLOR1_INFO__COMPRESSION_MASK 0x00004000L
++#define CB_COLOR1_INFO__BLEND_CLAMP_MASK 0x00008000L
++#define CB_COLOR1_INFO__BLEND_BYPASS_MASK 0x00010000L
++#define CB_COLOR1_INFO__SIMPLE_FLOAT_MASK 0x00020000L
++#define CB_COLOR1_INFO__ROUND_MODE_MASK 0x00040000L
++#define CB_COLOR1_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
++#define CB_COLOR1_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
++#define CB_COLOR1_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
++#define CB_COLOR1_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
++#define CB_COLOR1_INFO__DCC_ENABLE_MASK 0x10000000L
++#define CB_COLOR1_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
++//CB_COLOR1_ATTRIB
++#define CB_COLOR1_ATTRIB__MIP0_DEPTH__SHIFT 0x0
++#define CB_COLOR1_ATTRIB__META_LINEAR__SHIFT 0xb
++#define CB_COLOR1_ATTRIB__NUM_SAMPLES__SHIFT 0xc
++#define CB_COLOR1_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
++#define CB_COLOR1_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
++#define CB_COLOR1_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
++#define CB_COLOR1_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
++#define CB_COLOR1_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
++#define CB_COLOR1_ATTRIB__RB_ALIGNED__SHIFT 0x1e
++#define CB_COLOR1_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
++#define CB_COLOR1_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
++#define CB_COLOR1_ATTRIB__META_LINEAR_MASK 0x00000800L
++#define CB_COLOR1_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
++#define CB_COLOR1_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
++#define CB_COLOR1_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
++#define CB_COLOR1_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
++#define CB_COLOR1_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
++#define CB_COLOR1_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
++#define CB_COLOR1_ATTRIB__RB_ALIGNED_MASK 0x40000000L
++#define CB_COLOR1_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
++//CB_COLOR1_DCC_CONTROL
++#define CB_COLOR1_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
++#define CB_COLOR1_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
++#define CB_COLOR1_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
++#define CB_COLOR1_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
++#define CB_COLOR1_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
++#define CB_COLOR1_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
++#define CB_COLOR1_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
++#define CB_COLOR1_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
++#define CB_COLOR1_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
++#define CB_COLOR1_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
++#define CB_COLOR1_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
++#define CB_COLOR1_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
++#define CB_COLOR1_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
++#define CB_COLOR1_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
++#define CB_COLOR1_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
++#define CB_COLOR1_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
++#define CB_COLOR1_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
++#define CB_COLOR1_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
++//CB_COLOR1_CMASK
++#define CB_COLOR1_CMASK__BASE_256B__SHIFT 0x0
++#define CB_COLOR1_CMASK__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR1_CMASK_BASE_EXT
++#define CB_COLOR1_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR1_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR1_FMASK
++#define CB_COLOR1_FMASK__BASE_256B__SHIFT 0x0
++#define CB_COLOR1_FMASK__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR1_FMASK_BASE_EXT
++#define CB_COLOR1_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR1_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR1_CLEAR_WORD0
++#define CB_COLOR1_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
++#define CB_COLOR1_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
++//CB_COLOR1_CLEAR_WORD1
++#define CB_COLOR1_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
++#define CB_COLOR1_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
++//CB_COLOR1_DCC_BASE
++#define CB_COLOR1_DCC_BASE__BASE_256B__SHIFT 0x0
++#define CB_COLOR1_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR1_DCC_BASE_EXT
++#define CB_COLOR1_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR1_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR2_BASE
++#define CB_COLOR2_BASE__BASE_256B__SHIFT 0x0
++#define CB_COLOR2_BASE__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR2_BASE_EXT
++#define CB_COLOR2_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR2_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR2_ATTRIB2
++#define CB_COLOR2_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
++#define CB_COLOR2_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
++#define CB_COLOR2_ATTRIB2__MAX_MIP__SHIFT 0x1c
++#define CB_COLOR2_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
++#define CB_COLOR2_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
++#define CB_COLOR2_ATTRIB2__MAX_MIP_MASK 0xF0000000L
++//CB_COLOR2_VIEW
++#define CB_COLOR2_VIEW__SLICE_START__SHIFT 0x0
++#define CB_COLOR2_VIEW__SLICE_MAX__SHIFT 0xd
++#define CB_COLOR2_VIEW__MIP_LEVEL__SHIFT 0x18
++#define CB_COLOR2_VIEW__SLICE_START_MASK 0x000007FFL
++#define CB_COLOR2_VIEW__SLICE_MAX_MASK 0x00FFE000L
++#define CB_COLOR2_VIEW__MIP_LEVEL_MASK 0x0F000000L
++//CB_COLOR2_INFO
++#define CB_COLOR2_INFO__ENDIAN__SHIFT 0x0
++#define CB_COLOR2_INFO__FORMAT__SHIFT 0x2
++#define CB_COLOR2_INFO__NUMBER_TYPE__SHIFT 0x8
++#define CB_COLOR2_INFO__COMP_SWAP__SHIFT 0xb
++#define CB_COLOR2_INFO__FAST_CLEAR__SHIFT 0xd
++#define CB_COLOR2_INFO__COMPRESSION__SHIFT 0xe
++#define CB_COLOR2_INFO__BLEND_CLAMP__SHIFT 0xf
++#define CB_COLOR2_INFO__BLEND_BYPASS__SHIFT 0x10
++#define CB_COLOR2_INFO__SIMPLE_FLOAT__SHIFT 0x11
++#define CB_COLOR2_INFO__ROUND_MODE__SHIFT 0x12
++#define CB_COLOR2_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
++#define CB_COLOR2_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
++#define CB_COLOR2_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
++#define CB_COLOR2_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
++#define CB_COLOR2_INFO__DCC_ENABLE__SHIFT 0x1c
++#define CB_COLOR2_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
++#define CB_COLOR2_INFO__ENDIAN_MASK 0x00000003L
++#define CB_COLOR2_INFO__FORMAT_MASK 0x0000007CL
++#define CB_COLOR2_INFO__NUMBER_TYPE_MASK 0x00000700L
++#define CB_COLOR2_INFO__COMP_SWAP_MASK 0x00001800L
++#define CB_COLOR2_INFO__FAST_CLEAR_MASK 0x00002000L
++#define CB_COLOR2_INFO__COMPRESSION_MASK 0x00004000L
++#define CB_COLOR2_INFO__BLEND_CLAMP_MASK 0x00008000L
++#define CB_COLOR2_INFO__BLEND_BYPASS_MASK 0x00010000L
++#define CB_COLOR2_INFO__SIMPLE_FLOAT_MASK 0x00020000L
++#define CB_COLOR2_INFO__ROUND_MODE_MASK 0x00040000L
++#define CB_COLOR2_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
++#define CB_COLOR2_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
++#define CB_COLOR2_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
++#define CB_COLOR2_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
++#define CB_COLOR2_INFO__DCC_ENABLE_MASK 0x10000000L
++#define CB_COLOR2_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
++//CB_COLOR2_ATTRIB
++#define CB_COLOR2_ATTRIB__MIP0_DEPTH__SHIFT 0x0
++#define CB_COLOR2_ATTRIB__META_LINEAR__SHIFT 0xb
++#define CB_COLOR2_ATTRIB__NUM_SAMPLES__SHIFT 0xc
++#define CB_COLOR2_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
++#define CB_COLOR2_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
++#define CB_COLOR2_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
++#define CB_COLOR2_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
++#define CB_COLOR2_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
++#define CB_COLOR2_ATTRIB__RB_ALIGNED__SHIFT 0x1e
++#define CB_COLOR2_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
++#define CB_COLOR2_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
++#define CB_COLOR2_ATTRIB__META_LINEAR_MASK 0x00000800L
++#define CB_COLOR2_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
++#define CB_COLOR2_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
++#define CB_COLOR2_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
++#define CB_COLOR2_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
++#define CB_COLOR2_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
++#define CB_COLOR2_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
++#define CB_COLOR2_ATTRIB__RB_ALIGNED_MASK 0x40000000L
++#define CB_COLOR2_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
++//CB_COLOR2_DCC_CONTROL
++#define CB_COLOR2_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
++#define CB_COLOR2_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
++#define CB_COLOR2_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
++#define CB_COLOR2_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
++#define CB_COLOR2_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
++#define CB_COLOR2_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
++#define CB_COLOR2_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
++#define CB_COLOR2_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
++#define CB_COLOR2_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
++#define CB_COLOR2_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
++#define CB_COLOR2_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
++#define CB_COLOR2_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
++#define CB_COLOR2_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
++#define CB_COLOR2_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
++#define CB_COLOR2_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
++#define CB_COLOR2_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
++#define CB_COLOR2_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
++#define CB_COLOR2_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
++//CB_COLOR2_CMASK
++#define CB_COLOR2_CMASK__BASE_256B__SHIFT 0x0
++#define CB_COLOR2_CMASK__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR2_CMASK_BASE_EXT
++#define CB_COLOR2_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR2_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR2_FMASK
++#define CB_COLOR2_FMASK__BASE_256B__SHIFT 0x0
++#define CB_COLOR2_FMASK__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR2_FMASK_BASE_EXT
++#define CB_COLOR2_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR2_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR2_CLEAR_WORD0
++#define CB_COLOR2_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
++#define CB_COLOR2_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
++//CB_COLOR2_CLEAR_WORD1
++#define CB_COLOR2_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
++#define CB_COLOR2_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
++//CB_COLOR2_DCC_BASE
++#define CB_COLOR2_DCC_BASE__BASE_256B__SHIFT 0x0
++#define CB_COLOR2_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR2_DCC_BASE_EXT
++#define CB_COLOR2_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR2_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR3_BASE
++#define CB_COLOR3_BASE__BASE_256B__SHIFT 0x0
++#define CB_COLOR3_BASE__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR3_BASE_EXT
++#define CB_COLOR3_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR3_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR3_ATTRIB2
++#define CB_COLOR3_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
++#define CB_COLOR3_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
++#define CB_COLOR3_ATTRIB2__MAX_MIP__SHIFT 0x1c
++#define CB_COLOR3_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
++#define CB_COLOR3_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
++#define CB_COLOR3_ATTRIB2__MAX_MIP_MASK 0xF0000000L
++//CB_COLOR3_VIEW
++#define CB_COLOR3_VIEW__SLICE_START__SHIFT 0x0
++#define CB_COLOR3_VIEW__SLICE_MAX__SHIFT 0xd
++#define CB_COLOR3_VIEW__MIP_LEVEL__SHIFT 0x18
++#define CB_COLOR3_VIEW__SLICE_START_MASK 0x000007FFL
++#define CB_COLOR3_VIEW__SLICE_MAX_MASK 0x00FFE000L
++#define CB_COLOR3_VIEW__MIP_LEVEL_MASK 0x0F000000L
++//CB_COLOR3_INFO
++#define CB_COLOR3_INFO__ENDIAN__SHIFT 0x0
++#define CB_COLOR3_INFO__FORMAT__SHIFT 0x2
++#define CB_COLOR3_INFO__NUMBER_TYPE__SHIFT 0x8
++#define CB_COLOR3_INFO__COMP_SWAP__SHIFT 0xb
++#define CB_COLOR3_INFO__FAST_CLEAR__SHIFT 0xd
++#define CB_COLOR3_INFO__COMPRESSION__SHIFT 0xe
++#define CB_COLOR3_INFO__BLEND_CLAMP__SHIFT 0xf
++#define CB_COLOR3_INFO__BLEND_BYPASS__SHIFT 0x10
++#define CB_COLOR3_INFO__SIMPLE_FLOAT__SHIFT 0x11
++#define CB_COLOR3_INFO__ROUND_MODE__SHIFT 0x12
++#define CB_COLOR3_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
++#define CB_COLOR3_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
++#define CB_COLOR3_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
++#define CB_COLOR3_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
++#define CB_COLOR3_INFO__DCC_ENABLE__SHIFT 0x1c
++#define CB_COLOR3_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
++#define CB_COLOR3_INFO__ENDIAN_MASK 0x00000003L
++#define CB_COLOR3_INFO__FORMAT_MASK 0x0000007CL
++#define CB_COLOR3_INFO__NUMBER_TYPE_MASK 0x00000700L
++#define CB_COLOR3_INFO__COMP_SWAP_MASK 0x00001800L
++#define CB_COLOR3_INFO__FAST_CLEAR_MASK 0x00002000L
++#define CB_COLOR3_INFO__COMPRESSION_MASK 0x00004000L
++#define CB_COLOR3_INFO__BLEND_CLAMP_MASK 0x00008000L
++#define CB_COLOR3_INFO__BLEND_BYPASS_MASK 0x00010000L
++#define CB_COLOR3_INFO__SIMPLE_FLOAT_MASK 0x00020000L
++#define CB_COLOR3_INFO__ROUND_MODE_MASK 0x00040000L
++#define CB_COLOR3_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
++#define CB_COLOR3_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
++#define CB_COLOR3_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
++#define CB_COLOR3_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
++#define CB_COLOR3_INFO__DCC_ENABLE_MASK 0x10000000L
++#define CB_COLOR3_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
++//CB_COLOR3_ATTRIB
++#define CB_COLOR3_ATTRIB__MIP0_DEPTH__SHIFT 0x0
++#define CB_COLOR3_ATTRIB__META_LINEAR__SHIFT 0xb
++#define CB_COLOR3_ATTRIB__NUM_SAMPLES__SHIFT 0xc
++#define CB_COLOR3_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
++#define CB_COLOR3_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
++#define CB_COLOR3_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
++#define CB_COLOR3_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
++#define CB_COLOR3_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
++#define CB_COLOR3_ATTRIB__RB_ALIGNED__SHIFT 0x1e
++#define CB_COLOR3_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
++#define CB_COLOR3_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
++#define CB_COLOR3_ATTRIB__META_LINEAR_MASK 0x00000800L
++#define CB_COLOR3_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
++#define CB_COLOR3_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
++#define CB_COLOR3_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
++#define CB_COLOR3_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
++#define CB_COLOR3_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
++#define CB_COLOR3_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
++#define CB_COLOR3_ATTRIB__RB_ALIGNED_MASK 0x40000000L
++#define CB_COLOR3_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
++//CB_COLOR3_DCC_CONTROL
++#define CB_COLOR3_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
++#define CB_COLOR3_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
++#define CB_COLOR3_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
++#define CB_COLOR3_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
++#define CB_COLOR3_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
++#define CB_COLOR3_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
++#define CB_COLOR3_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
++#define CB_COLOR3_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
++#define CB_COLOR3_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
++#define CB_COLOR3_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
++#define CB_COLOR3_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
++#define CB_COLOR3_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
++#define CB_COLOR3_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
++#define CB_COLOR3_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
++#define CB_COLOR3_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
++#define CB_COLOR3_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
++#define CB_COLOR3_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
++#define CB_COLOR3_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
++//CB_COLOR3_CMASK
++#define CB_COLOR3_CMASK__BASE_256B__SHIFT 0x0
++#define CB_COLOR3_CMASK__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR3_CMASK_BASE_EXT
++#define CB_COLOR3_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR3_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR3_FMASK
++#define CB_COLOR3_FMASK__BASE_256B__SHIFT 0x0
++#define CB_COLOR3_FMASK__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR3_FMASK_BASE_EXT
++#define CB_COLOR3_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR3_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR3_CLEAR_WORD0
++#define CB_COLOR3_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
++#define CB_COLOR3_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
++//CB_COLOR3_CLEAR_WORD1
++#define CB_COLOR3_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
++#define CB_COLOR3_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
++//CB_COLOR3_DCC_BASE
++#define CB_COLOR3_DCC_BASE__BASE_256B__SHIFT 0x0
++#define CB_COLOR3_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR3_DCC_BASE_EXT
++#define CB_COLOR3_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR3_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR4_BASE
++#define CB_COLOR4_BASE__BASE_256B__SHIFT 0x0
++#define CB_COLOR4_BASE__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR4_BASE_EXT
++#define CB_COLOR4_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR4_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR4_ATTRIB2
++#define CB_COLOR4_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
++#define CB_COLOR4_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
++#define CB_COLOR4_ATTRIB2__MAX_MIP__SHIFT 0x1c
++#define CB_COLOR4_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
++#define CB_COLOR4_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
++#define CB_COLOR4_ATTRIB2__MAX_MIP_MASK 0xF0000000L
++//CB_COLOR4_VIEW
++#define CB_COLOR4_VIEW__SLICE_START__SHIFT 0x0
++#define CB_COLOR4_VIEW__SLICE_MAX__SHIFT 0xd
++#define CB_COLOR4_VIEW__MIP_LEVEL__SHIFT 0x18
++#define CB_COLOR4_VIEW__SLICE_START_MASK 0x000007FFL
++#define CB_COLOR4_VIEW__SLICE_MAX_MASK 0x00FFE000L
++#define CB_COLOR4_VIEW__MIP_LEVEL_MASK 0x0F000000L
++//CB_COLOR4_INFO
++#define CB_COLOR4_INFO__ENDIAN__SHIFT 0x0
++#define CB_COLOR4_INFO__FORMAT__SHIFT 0x2
++#define CB_COLOR4_INFO__NUMBER_TYPE__SHIFT 0x8
++#define CB_COLOR4_INFO__COMP_SWAP__SHIFT 0xb
++#define CB_COLOR4_INFO__FAST_CLEAR__SHIFT 0xd
++#define CB_COLOR4_INFO__COMPRESSION__SHIFT 0xe
++#define CB_COLOR4_INFO__BLEND_CLAMP__SHIFT 0xf
++#define CB_COLOR4_INFO__BLEND_BYPASS__SHIFT 0x10
++#define CB_COLOR4_INFO__SIMPLE_FLOAT__SHIFT 0x11
++#define CB_COLOR4_INFO__ROUND_MODE__SHIFT 0x12
++#define CB_COLOR4_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
++#define CB_COLOR4_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
++#define CB_COLOR4_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
++#define CB_COLOR4_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
++#define CB_COLOR4_INFO__DCC_ENABLE__SHIFT 0x1c
++#define CB_COLOR4_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
++#define CB_COLOR4_INFO__ENDIAN_MASK 0x00000003L
++#define CB_COLOR4_INFO__FORMAT_MASK 0x0000007CL
++#define CB_COLOR4_INFO__NUMBER_TYPE_MASK 0x00000700L
++#define CB_COLOR4_INFO__COMP_SWAP_MASK 0x00001800L
++#define CB_COLOR4_INFO__FAST_CLEAR_MASK 0x00002000L
++#define CB_COLOR4_INFO__COMPRESSION_MASK 0x00004000L
++#define CB_COLOR4_INFO__BLEND_CLAMP_MASK 0x00008000L
++#define CB_COLOR4_INFO__BLEND_BYPASS_MASK 0x00010000L
++#define CB_COLOR4_INFO__SIMPLE_FLOAT_MASK 0x00020000L
++#define CB_COLOR4_INFO__ROUND_MODE_MASK 0x00040000L
++#define CB_COLOR4_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
++#define CB_COLOR4_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
++#define CB_COLOR4_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
++#define CB_COLOR4_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
++#define CB_COLOR4_INFO__DCC_ENABLE_MASK 0x10000000L
++#define CB_COLOR4_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
++//CB_COLOR4_ATTRIB
++#define CB_COLOR4_ATTRIB__MIP0_DEPTH__SHIFT 0x0
++#define CB_COLOR4_ATTRIB__META_LINEAR__SHIFT 0xb
++#define CB_COLOR4_ATTRIB__NUM_SAMPLES__SHIFT 0xc
++#define CB_COLOR4_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
++#define CB_COLOR4_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
++#define CB_COLOR4_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
++#define CB_COLOR4_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
++#define CB_COLOR4_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
++#define CB_COLOR4_ATTRIB__RB_ALIGNED__SHIFT 0x1e
++#define CB_COLOR4_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
++#define CB_COLOR4_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
++#define CB_COLOR4_ATTRIB__META_LINEAR_MASK 0x00000800L
++#define CB_COLOR4_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
++#define CB_COLOR4_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
++#define CB_COLOR4_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
++#define CB_COLOR4_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
++#define CB_COLOR4_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
++#define CB_COLOR4_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
++#define CB_COLOR4_ATTRIB__RB_ALIGNED_MASK 0x40000000L
++#define CB_COLOR4_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
++//CB_COLOR4_DCC_CONTROL
++#define CB_COLOR4_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
++#define CB_COLOR4_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
++#define CB_COLOR4_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
++#define CB_COLOR4_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
++#define CB_COLOR4_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
++#define CB_COLOR4_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
++#define CB_COLOR4_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
++#define CB_COLOR4_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
++#define CB_COLOR4_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
++#define CB_COLOR4_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
++#define CB_COLOR4_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
++#define CB_COLOR4_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
++#define CB_COLOR4_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
++#define CB_COLOR4_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
++#define CB_COLOR4_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
++#define CB_COLOR4_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
++#define CB_COLOR4_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
++#define CB_COLOR4_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
++//CB_COLOR4_CMASK
++#define CB_COLOR4_CMASK__BASE_256B__SHIFT 0x0
++#define CB_COLOR4_CMASK__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR4_CMASK_BASE_EXT
++#define CB_COLOR4_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR4_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR4_FMASK
++#define CB_COLOR4_FMASK__BASE_256B__SHIFT 0x0
++#define CB_COLOR4_FMASK__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR4_FMASK_BASE_EXT
++#define CB_COLOR4_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR4_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR4_CLEAR_WORD0
++#define CB_COLOR4_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
++#define CB_COLOR4_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
++//CB_COLOR4_CLEAR_WORD1
++#define CB_COLOR4_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
++#define CB_COLOR4_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
++//CB_COLOR4_DCC_BASE
++#define CB_COLOR4_DCC_BASE__BASE_256B__SHIFT 0x0
++#define CB_COLOR4_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR4_DCC_BASE_EXT
++#define CB_COLOR4_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR4_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR5_BASE
++#define CB_COLOR5_BASE__BASE_256B__SHIFT 0x0
++#define CB_COLOR5_BASE__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR5_BASE_EXT
++#define CB_COLOR5_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR5_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR5_ATTRIB2
++#define CB_COLOR5_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
++#define CB_COLOR5_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
++#define CB_COLOR5_ATTRIB2__MAX_MIP__SHIFT 0x1c
++#define CB_COLOR5_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
++#define CB_COLOR5_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
++#define CB_COLOR5_ATTRIB2__MAX_MIP_MASK 0xF0000000L
++//CB_COLOR5_VIEW
++#define CB_COLOR5_VIEW__SLICE_START__SHIFT 0x0
++#define CB_COLOR5_VIEW__SLICE_MAX__SHIFT 0xd
++#define CB_COLOR5_VIEW__MIP_LEVEL__SHIFT 0x18
++#define CB_COLOR5_VIEW__SLICE_START_MASK 0x000007FFL
++#define CB_COLOR5_VIEW__SLICE_MAX_MASK 0x00FFE000L
++#define CB_COLOR5_VIEW__MIP_LEVEL_MASK 0x0F000000L
++//CB_COLOR5_INFO
++#define CB_COLOR5_INFO__ENDIAN__SHIFT 0x0
++#define CB_COLOR5_INFO__FORMAT__SHIFT 0x2
++#define CB_COLOR5_INFO__NUMBER_TYPE__SHIFT 0x8
++#define CB_COLOR5_INFO__COMP_SWAP__SHIFT 0xb
++#define CB_COLOR5_INFO__FAST_CLEAR__SHIFT 0xd
++#define CB_COLOR5_INFO__COMPRESSION__SHIFT 0xe
++#define CB_COLOR5_INFO__BLEND_CLAMP__SHIFT 0xf
++#define CB_COLOR5_INFO__BLEND_BYPASS__SHIFT 0x10
++#define CB_COLOR5_INFO__SIMPLE_FLOAT__SHIFT 0x11
++#define CB_COLOR5_INFO__ROUND_MODE__SHIFT 0x12
++#define CB_COLOR5_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
++#define CB_COLOR5_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
++#define CB_COLOR5_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
++#define CB_COLOR5_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
++#define CB_COLOR5_INFO__DCC_ENABLE__SHIFT 0x1c
++#define CB_COLOR5_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
++#define CB_COLOR5_INFO__ENDIAN_MASK 0x00000003L
++#define CB_COLOR5_INFO__FORMAT_MASK 0x0000007CL
++#define CB_COLOR5_INFO__NUMBER_TYPE_MASK 0x00000700L
++#define CB_COLOR5_INFO__COMP_SWAP_MASK 0x00001800L
++#define CB_COLOR5_INFO__FAST_CLEAR_MASK 0x00002000L
++#define CB_COLOR5_INFO__COMPRESSION_MASK 0x00004000L
++#define CB_COLOR5_INFO__BLEND_CLAMP_MASK 0x00008000L
++#define CB_COLOR5_INFO__BLEND_BYPASS_MASK 0x00010000L
++#define CB_COLOR5_INFO__SIMPLE_FLOAT_MASK 0x00020000L
++#define CB_COLOR5_INFO__ROUND_MODE_MASK 0x00040000L
++#define CB_COLOR5_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
++#define CB_COLOR5_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
++#define CB_COLOR5_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
++#define CB_COLOR5_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
++#define CB_COLOR5_INFO__DCC_ENABLE_MASK 0x10000000L
++#define CB_COLOR5_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
++//CB_COLOR5_ATTRIB
++#define CB_COLOR5_ATTRIB__MIP0_DEPTH__SHIFT 0x0
++#define CB_COLOR5_ATTRIB__META_LINEAR__SHIFT 0xb
++#define CB_COLOR5_ATTRIB__NUM_SAMPLES__SHIFT 0xc
++#define CB_COLOR5_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
++#define CB_COLOR5_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
++#define CB_COLOR5_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
++#define CB_COLOR5_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
++#define CB_COLOR5_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
++#define CB_COLOR5_ATTRIB__RB_ALIGNED__SHIFT 0x1e
++#define CB_COLOR5_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
++#define CB_COLOR5_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
++#define CB_COLOR5_ATTRIB__META_LINEAR_MASK 0x00000800L
++#define CB_COLOR5_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
++#define CB_COLOR5_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
++#define CB_COLOR5_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
++#define CB_COLOR5_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
++#define CB_COLOR5_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
++#define CB_COLOR5_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
++#define CB_COLOR5_ATTRIB__RB_ALIGNED_MASK 0x40000000L
++#define CB_COLOR5_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
++//CB_COLOR5_DCC_CONTROL
++#define CB_COLOR5_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
++#define CB_COLOR5_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
++#define CB_COLOR5_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
++#define CB_COLOR5_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
++#define CB_COLOR5_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
++#define CB_COLOR5_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
++#define CB_COLOR5_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
++#define CB_COLOR5_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
++#define CB_COLOR5_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
++#define CB_COLOR5_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
++#define CB_COLOR5_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
++#define CB_COLOR5_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
++#define CB_COLOR5_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
++#define CB_COLOR5_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
++#define CB_COLOR5_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
++#define CB_COLOR5_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
++#define CB_COLOR5_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
++#define CB_COLOR5_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
++//CB_COLOR5_CMASK
++#define CB_COLOR5_CMASK__BASE_256B__SHIFT 0x0
++#define CB_COLOR5_CMASK__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR5_CMASK_BASE_EXT
++#define CB_COLOR5_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR5_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR5_FMASK
++#define CB_COLOR5_FMASK__BASE_256B__SHIFT 0x0
++#define CB_COLOR5_FMASK__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR5_FMASK_BASE_EXT
++#define CB_COLOR5_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR5_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR5_CLEAR_WORD0
++#define CB_COLOR5_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
++#define CB_COLOR5_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
++//CB_COLOR5_CLEAR_WORD1
++#define CB_COLOR5_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
++#define CB_COLOR5_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
++//CB_COLOR5_DCC_BASE
++#define CB_COLOR5_DCC_BASE__BASE_256B__SHIFT 0x0
++#define CB_COLOR5_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR5_DCC_BASE_EXT
++#define CB_COLOR5_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR5_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR6_BASE
++#define CB_COLOR6_BASE__BASE_256B__SHIFT 0x0
++#define CB_COLOR6_BASE__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR6_BASE_EXT
++#define CB_COLOR6_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR6_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR6_ATTRIB2
++#define CB_COLOR6_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
++#define CB_COLOR6_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
++#define CB_COLOR6_ATTRIB2__MAX_MIP__SHIFT 0x1c
++#define CB_COLOR6_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
++#define CB_COLOR6_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
++#define CB_COLOR6_ATTRIB2__MAX_MIP_MASK 0xF0000000L
++//CB_COLOR6_VIEW
++#define CB_COLOR6_VIEW__SLICE_START__SHIFT 0x0
++#define CB_COLOR6_VIEW__SLICE_MAX__SHIFT 0xd
++#define CB_COLOR6_VIEW__MIP_LEVEL__SHIFT 0x18
++#define CB_COLOR6_VIEW__SLICE_START_MASK 0x000007FFL
++#define CB_COLOR6_VIEW__SLICE_MAX_MASK 0x00FFE000L
++#define CB_COLOR6_VIEW__MIP_LEVEL_MASK 0x0F000000L
++//CB_COLOR6_INFO
++#define CB_COLOR6_INFO__ENDIAN__SHIFT 0x0
++#define CB_COLOR6_INFO__FORMAT__SHIFT 0x2
++#define CB_COLOR6_INFO__NUMBER_TYPE__SHIFT 0x8
++#define CB_COLOR6_INFO__COMP_SWAP__SHIFT 0xb
++#define CB_COLOR6_INFO__FAST_CLEAR__SHIFT 0xd
++#define CB_COLOR6_INFO__COMPRESSION__SHIFT 0xe
++#define CB_COLOR6_INFO__BLEND_CLAMP__SHIFT 0xf
++#define CB_COLOR6_INFO__BLEND_BYPASS__SHIFT 0x10
++#define CB_COLOR6_INFO__SIMPLE_FLOAT__SHIFT 0x11
++#define CB_COLOR6_INFO__ROUND_MODE__SHIFT 0x12
++#define CB_COLOR6_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
++#define CB_COLOR6_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
++#define CB_COLOR6_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
++#define CB_COLOR6_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
++#define CB_COLOR6_INFO__DCC_ENABLE__SHIFT 0x1c
++#define CB_COLOR6_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
++#define CB_COLOR6_INFO__ENDIAN_MASK 0x00000003L
++#define CB_COLOR6_INFO__FORMAT_MASK 0x0000007CL
++#define CB_COLOR6_INFO__NUMBER_TYPE_MASK 0x00000700L
++#define CB_COLOR6_INFO__COMP_SWAP_MASK 0x00001800L
++#define CB_COLOR6_INFO__FAST_CLEAR_MASK 0x00002000L
++#define CB_COLOR6_INFO__COMPRESSION_MASK 0x00004000L
++#define CB_COLOR6_INFO__BLEND_CLAMP_MASK 0x00008000L
++#define CB_COLOR6_INFO__BLEND_BYPASS_MASK 0x00010000L
++#define CB_COLOR6_INFO__SIMPLE_FLOAT_MASK 0x00020000L
++#define CB_COLOR6_INFO__ROUND_MODE_MASK 0x00040000L
++#define CB_COLOR6_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
++#define CB_COLOR6_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
++#define CB_COLOR6_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
++#define CB_COLOR6_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
++#define CB_COLOR6_INFO__DCC_ENABLE_MASK 0x10000000L
++#define CB_COLOR6_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
++//CB_COLOR6_ATTRIB
++#define CB_COLOR6_ATTRIB__MIP0_DEPTH__SHIFT 0x0
++#define CB_COLOR6_ATTRIB__META_LINEAR__SHIFT 0xb
++#define CB_COLOR6_ATTRIB__NUM_SAMPLES__SHIFT 0xc
++#define CB_COLOR6_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
++#define CB_COLOR6_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
++#define CB_COLOR6_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
++#define CB_COLOR6_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
++#define CB_COLOR6_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
++#define CB_COLOR6_ATTRIB__RB_ALIGNED__SHIFT 0x1e
++#define CB_COLOR6_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
++#define CB_COLOR6_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
++#define CB_COLOR6_ATTRIB__META_LINEAR_MASK 0x00000800L
++#define CB_COLOR6_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
++#define CB_COLOR6_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
++#define CB_COLOR6_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
++#define CB_COLOR6_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
++#define CB_COLOR6_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
++#define CB_COLOR6_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
++#define CB_COLOR6_ATTRIB__RB_ALIGNED_MASK 0x40000000L
++#define CB_COLOR6_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
++//CB_COLOR6_DCC_CONTROL
++#define CB_COLOR6_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
++#define CB_COLOR6_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
++#define CB_COLOR6_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
++#define CB_COLOR6_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
++#define CB_COLOR6_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
++#define CB_COLOR6_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
++#define CB_COLOR6_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
++#define CB_COLOR6_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
++#define CB_COLOR6_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
++#define CB_COLOR6_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
++#define CB_COLOR6_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
++#define CB_COLOR6_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
++#define CB_COLOR6_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
++#define CB_COLOR6_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
++#define CB_COLOR6_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
++#define CB_COLOR6_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
++#define CB_COLOR6_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
++#define CB_COLOR6_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
++//CB_COLOR6_CMASK
++#define CB_COLOR6_CMASK__BASE_256B__SHIFT 0x0
++#define CB_COLOR6_CMASK__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR6_CMASK_BASE_EXT
++#define CB_COLOR6_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR6_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR6_FMASK
++#define CB_COLOR6_FMASK__BASE_256B__SHIFT 0x0
++#define CB_COLOR6_FMASK__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR6_FMASK_BASE_EXT
++#define CB_COLOR6_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR6_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR6_CLEAR_WORD0
++#define CB_COLOR6_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
++#define CB_COLOR6_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
++//CB_COLOR6_CLEAR_WORD1
++#define CB_COLOR6_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
++#define CB_COLOR6_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
++//CB_COLOR6_DCC_BASE
++#define CB_COLOR6_DCC_BASE__BASE_256B__SHIFT 0x0
++#define CB_COLOR6_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR6_DCC_BASE_EXT
++#define CB_COLOR6_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR6_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR7_BASE
++#define CB_COLOR7_BASE__BASE_256B__SHIFT 0x0
++#define CB_COLOR7_BASE__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR7_BASE_EXT
++#define CB_COLOR7_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR7_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR7_ATTRIB2
++#define CB_COLOR7_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
++#define CB_COLOR7_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
++#define CB_COLOR7_ATTRIB2__MAX_MIP__SHIFT 0x1c
++#define CB_COLOR7_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
++#define CB_COLOR7_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
++#define CB_COLOR7_ATTRIB2__MAX_MIP_MASK 0xF0000000L
++//CB_COLOR7_VIEW
++#define CB_COLOR7_VIEW__SLICE_START__SHIFT 0x0
++#define CB_COLOR7_VIEW__SLICE_MAX__SHIFT 0xd
++#define CB_COLOR7_VIEW__MIP_LEVEL__SHIFT 0x18
++#define CB_COLOR7_VIEW__SLICE_START_MASK 0x000007FFL
++#define CB_COLOR7_VIEW__SLICE_MAX_MASK 0x00FFE000L
++#define CB_COLOR7_VIEW__MIP_LEVEL_MASK 0x0F000000L
++//CB_COLOR7_INFO
++#define CB_COLOR7_INFO__ENDIAN__SHIFT 0x0
++#define CB_COLOR7_INFO__FORMAT__SHIFT 0x2
++#define CB_COLOR7_INFO__NUMBER_TYPE__SHIFT 0x8
++#define CB_COLOR7_INFO__COMP_SWAP__SHIFT 0xb
++#define CB_COLOR7_INFO__FAST_CLEAR__SHIFT 0xd
++#define CB_COLOR7_INFO__COMPRESSION__SHIFT 0xe
++#define CB_COLOR7_INFO__BLEND_CLAMP__SHIFT 0xf
++#define CB_COLOR7_INFO__BLEND_BYPASS__SHIFT 0x10
++#define CB_COLOR7_INFO__SIMPLE_FLOAT__SHIFT 0x11
++#define CB_COLOR7_INFO__ROUND_MODE__SHIFT 0x12
++#define CB_COLOR7_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
++#define CB_COLOR7_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
++#define CB_COLOR7_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
++#define CB_COLOR7_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
++#define CB_COLOR7_INFO__DCC_ENABLE__SHIFT 0x1c
++#define CB_COLOR7_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
++#define CB_COLOR7_INFO__ENDIAN_MASK 0x00000003L
++#define CB_COLOR7_INFO__FORMAT_MASK 0x0000007CL
++#define CB_COLOR7_INFO__NUMBER_TYPE_MASK 0x00000700L
++#define CB_COLOR7_INFO__COMP_SWAP_MASK 0x00001800L
++#define CB_COLOR7_INFO__FAST_CLEAR_MASK 0x00002000L
++#define CB_COLOR7_INFO__COMPRESSION_MASK 0x00004000L
++#define CB_COLOR7_INFO__BLEND_CLAMP_MASK 0x00008000L
++#define CB_COLOR7_INFO__BLEND_BYPASS_MASK 0x00010000L
++#define CB_COLOR7_INFO__SIMPLE_FLOAT_MASK 0x00020000L
++#define CB_COLOR7_INFO__ROUND_MODE_MASK 0x00040000L
++#define CB_COLOR7_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
++#define CB_COLOR7_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
++#define CB_COLOR7_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
++#define CB_COLOR7_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
++#define CB_COLOR7_INFO__DCC_ENABLE_MASK 0x10000000L
++#define CB_COLOR7_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
++//CB_COLOR7_ATTRIB
++#define CB_COLOR7_ATTRIB__MIP0_DEPTH__SHIFT 0x0
++#define CB_COLOR7_ATTRIB__META_LINEAR__SHIFT 0xb
++#define CB_COLOR7_ATTRIB__NUM_SAMPLES__SHIFT 0xc
++#define CB_COLOR7_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
++#define CB_COLOR7_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
++#define CB_COLOR7_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
++#define CB_COLOR7_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
++#define CB_COLOR7_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
++#define CB_COLOR7_ATTRIB__RB_ALIGNED__SHIFT 0x1e
++#define CB_COLOR7_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
++#define CB_COLOR7_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
++#define CB_COLOR7_ATTRIB__META_LINEAR_MASK 0x00000800L
++#define CB_COLOR7_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
++#define CB_COLOR7_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
++#define CB_COLOR7_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
++#define CB_COLOR7_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
++#define CB_COLOR7_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
++#define CB_COLOR7_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
++#define CB_COLOR7_ATTRIB__RB_ALIGNED_MASK 0x40000000L
++#define CB_COLOR7_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
++//CB_COLOR7_DCC_CONTROL
++#define CB_COLOR7_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
++#define CB_COLOR7_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
++#define CB_COLOR7_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
++#define CB_COLOR7_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
++#define CB_COLOR7_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
++#define CB_COLOR7_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
++#define CB_COLOR7_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
++#define CB_COLOR7_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
++#define CB_COLOR7_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
++#define CB_COLOR7_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
++#define CB_COLOR7_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
++#define CB_COLOR7_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
++#define CB_COLOR7_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
++#define CB_COLOR7_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
++#define CB_COLOR7_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
++#define CB_COLOR7_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
++#define CB_COLOR7_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
++#define CB_COLOR7_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
++//CB_COLOR7_CMASK
++#define CB_COLOR7_CMASK__BASE_256B__SHIFT 0x0
++#define CB_COLOR7_CMASK__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR7_CMASK_BASE_EXT
++#define CB_COLOR7_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR7_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR7_FMASK
++#define CB_COLOR7_FMASK__BASE_256B__SHIFT 0x0
++#define CB_COLOR7_FMASK__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR7_FMASK_BASE_EXT
++#define CB_COLOR7_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR7_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
++//CB_COLOR7_CLEAR_WORD0
++#define CB_COLOR7_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
++#define CB_COLOR7_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
++//CB_COLOR7_CLEAR_WORD1
++#define CB_COLOR7_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
++#define CB_COLOR7_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
++//CB_COLOR7_DCC_BASE
++#define CB_COLOR7_DCC_BASE__BASE_256B__SHIFT 0x0
++#define CB_COLOR7_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
++//CB_COLOR7_DCC_BASE_EXT
++#define CB_COLOR7_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
++#define CB_COLOR7_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
++
++
++// addressBlock: gc_gfxudec
++//CP_EOP_DONE_ADDR_LO
++#define CP_EOP_DONE_ADDR_LO__ADDR_LO__SHIFT 0x2
++#define CP_EOP_DONE_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFCL
++//CP_EOP_DONE_ADDR_HI
++#define CP_EOP_DONE_ADDR_HI__ADDR_HI__SHIFT 0x0
++#define CP_EOP_DONE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
++//CP_EOP_DONE_DATA_LO
++#define CP_EOP_DONE_DATA_LO__DATA_LO__SHIFT 0x0
++#define CP_EOP_DONE_DATA_LO__DATA_LO_MASK 0xFFFFFFFFL
++//CP_EOP_DONE_DATA_HI
++#define CP_EOP_DONE_DATA_HI__DATA_HI__SHIFT 0x0
++#define CP_EOP_DONE_DATA_HI__DATA_HI_MASK 0xFFFFFFFFL
++//CP_EOP_LAST_FENCE_LO
++#define CP_EOP_LAST_FENCE_LO__LAST_FENCE_LO__SHIFT 0x0
++#define CP_EOP_LAST_FENCE_LO__LAST_FENCE_LO_MASK 0xFFFFFFFFL
++//CP_EOP_LAST_FENCE_HI
++#define CP_EOP_LAST_FENCE_HI__LAST_FENCE_HI__SHIFT 0x0
++#define CP_EOP_LAST_FENCE_HI__LAST_FENCE_HI_MASK 0xFFFFFFFFL
++//CP_STREAM_OUT_ADDR_LO
++#define CP_STREAM_OUT_ADDR_LO__STREAM_OUT_ADDR_LO__SHIFT 0x2
++#define CP_STREAM_OUT_ADDR_LO__STREAM_OUT_ADDR_LO_MASK 0xFFFFFFFCL
++//CP_STREAM_OUT_ADDR_HI
++#define CP_STREAM_OUT_ADDR_HI__STREAM_OUT_ADDR_HI__SHIFT 0x0
++#define CP_STREAM_OUT_ADDR_HI__STREAM_OUT_ADDR_HI_MASK 0x0000FFFFL
++//CP_NUM_PRIM_WRITTEN_COUNT0_LO
++#define CP_NUM_PRIM_WRITTEN_COUNT0_LO__NUM_PRIM_WRITTEN_CNT0_LO__SHIFT 0x0
++#define CP_NUM_PRIM_WRITTEN_COUNT0_LO__NUM_PRIM_WRITTEN_CNT0_LO_MASK 0xFFFFFFFFL
++//CP_NUM_PRIM_WRITTEN_COUNT0_HI
++#define CP_NUM_PRIM_WRITTEN_COUNT0_HI__NUM_PRIM_WRITTEN_CNT0_HI__SHIFT 0x0
++#define CP_NUM_PRIM_WRITTEN_COUNT0_HI__NUM_PRIM_WRITTEN_CNT0_HI_MASK 0xFFFFFFFFL
++//CP_NUM_PRIM_NEEDED_COUNT0_LO
++#define CP_NUM_PRIM_NEEDED_COUNT0_LO__NUM_PRIM_NEEDED_CNT0_LO__SHIFT 0x0
++#define CP_NUM_PRIM_NEEDED_COUNT0_LO__NUM_PRIM_NEEDED_CNT0_LO_MASK 0xFFFFFFFFL
++//CP_NUM_PRIM_NEEDED_COUNT0_HI
++#define CP_NUM_PRIM_NEEDED_COUNT0_HI__NUM_PRIM_NEEDED_CNT0_HI__SHIFT 0x0
++#define CP_NUM_PRIM_NEEDED_COUNT0_HI__NUM_PRIM_NEEDED_CNT0_HI_MASK 0xFFFFFFFFL
++//CP_NUM_PRIM_WRITTEN_COUNT1_LO
++#define CP_NUM_PRIM_WRITTEN_COUNT1_LO__NUM_PRIM_WRITTEN_CNT1_LO__SHIFT 0x0
++#define CP_NUM_PRIM_WRITTEN_COUNT1_LO__NUM_PRIM_WRITTEN_CNT1_LO_MASK 0xFFFFFFFFL
++//CP_NUM_PRIM_WRITTEN_COUNT1_HI
++#define CP_NUM_PRIM_WRITTEN_COUNT1_HI__NUM_PRIM_WRITTEN_CNT1_HI__SHIFT 0x0
++#define CP_NUM_PRIM_WRITTEN_COUNT1_HI__NUM_PRIM_WRITTEN_CNT1_HI_MASK 0xFFFFFFFFL
++//CP_NUM_PRIM_NEEDED_COUNT1_LO
++#define CP_NUM_PRIM_NEEDED_COUNT1_LO__NUM_PRIM_NEEDED_CNT1_LO__SHIFT 0x0
++#define CP_NUM_PRIM_NEEDED_COUNT1_LO__NUM_PRIM_NEEDED_CNT1_LO_MASK 0xFFFFFFFFL
++//CP_NUM_PRIM_NEEDED_COUNT1_HI
++#define CP_NUM_PRIM_NEEDED_COUNT1_HI__NUM_PRIM_NEEDED_CNT1_HI__SHIFT 0x0
++#define CP_NUM_PRIM_NEEDED_COUNT1_HI__NUM_PRIM_NEEDED_CNT1_HI_MASK 0xFFFFFFFFL
++//CP_NUM_PRIM_WRITTEN_COUNT2_LO
++#define CP_NUM_PRIM_WRITTEN_COUNT2_LO__NUM_PRIM_WRITTEN_CNT2_LO__SHIFT 0x0
++#define CP_NUM_PRIM_WRITTEN_COUNT2_LO__NUM_PRIM_WRITTEN_CNT2_LO_MASK 0xFFFFFFFFL
++//CP_NUM_PRIM_WRITTEN_COUNT2_HI
++#define CP_NUM_PRIM_WRITTEN_COUNT2_HI__NUM_PRIM_WRITTEN_CNT2_HI__SHIFT 0x0
++#define CP_NUM_PRIM_WRITTEN_COUNT2_HI__NUM_PRIM_WRITTEN_CNT2_HI_MASK 0xFFFFFFFFL
++//CP_NUM_PRIM_NEEDED_COUNT2_LO
++#define CP_NUM_PRIM_NEEDED_COUNT2_LO__NUM_PRIM_NEEDED_CNT2_LO__SHIFT 0x0
++#define CP_NUM_PRIM_NEEDED_COUNT2_LO__NUM_PRIM_NEEDED_CNT2_LO_MASK 0xFFFFFFFFL
++//CP_NUM_PRIM_NEEDED_COUNT2_HI
++#define CP_NUM_PRIM_NEEDED_COUNT2_HI__NUM_PRIM_NEEDED_CNT2_HI__SHIFT 0x0
++#define CP_NUM_PRIM_NEEDED_COUNT2_HI__NUM_PRIM_NEEDED_CNT2_HI_MASK 0xFFFFFFFFL
++//CP_NUM_PRIM_WRITTEN_COUNT3_LO
++#define CP_NUM_PRIM_WRITTEN_COUNT3_LO__NUM_PRIM_WRITTEN_CNT3_LO__SHIFT 0x0
++#define CP_NUM_PRIM_WRITTEN_COUNT3_LO__NUM_PRIM_WRITTEN_CNT3_LO_MASK 0xFFFFFFFFL
++//CP_NUM_PRIM_WRITTEN_COUNT3_HI
++#define CP_NUM_PRIM_WRITTEN_COUNT3_HI__NUM_PRIM_WRITTEN_CNT3_HI__SHIFT 0x0
++#define CP_NUM_PRIM_WRITTEN_COUNT3_HI__NUM_PRIM_WRITTEN_CNT3_HI_MASK 0xFFFFFFFFL
++//CP_NUM_PRIM_NEEDED_COUNT3_LO
++#define CP_NUM_PRIM_NEEDED_COUNT3_LO__NUM_PRIM_NEEDED_CNT3_LO__SHIFT 0x0
++#define CP_NUM_PRIM_NEEDED_COUNT3_LO__NUM_PRIM_NEEDED_CNT3_LO_MASK 0xFFFFFFFFL
++//CP_NUM_PRIM_NEEDED_COUNT3_HI
++#define CP_NUM_PRIM_NEEDED_COUNT3_HI__NUM_PRIM_NEEDED_CNT3_HI__SHIFT 0x0
++#define CP_NUM_PRIM_NEEDED_COUNT3_HI__NUM_PRIM_NEEDED_CNT3_HI_MASK 0xFFFFFFFFL
++//CP_PIPE_STATS_ADDR_LO
++#define CP_PIPE_STATS_ADDR_LO__PIPE_STATS_ADDR_LO__SHIFT 0x2
++#define CP_PIPE_STATS_ADDR_LO__PIPE_STATS_ADDR_LO_MASK 0xFFFFFFFCL
++//CP_PIPE_STATS_ADDR_HI
++#define CP_PIPE_STATS_ADDR_HI__PIPE_STATS_ADDR_HI__SHIFT 0x0
++#define CP_PIPE_STATS_ADDR_HI__PIPE_STATS_ADDR_HI_MASK 0x0000FFFFL
++//CP_VGT_IAVERT_COUNT_LO
++#define CP_VGT_IAVERT_COUNT_LO__IAVERT_COUNT_LO__SHIFT 0x0
++#define CP_VGT_IAVERT_COUNT_LO__IAVERT_COUNT_LO_MASK 0xFFFFFFFFL
++//CP_VGT_IAVERT_COUNT_HI
++#define CP_VGT_IAVERT_COUNT_HI__IAVERT_COUNT_HI__SHIFT 0x0
++#define CP_VGT_IAVERT_COUNT_HI__IAVERT_COUNT_HI_MASK 0xFFFFFFFFL
++//CP_VGT_IAPRIM_COUNT_LO
++#define CP_VGT_IAPRIM_COUNT_LO__IAPRIM_COUNT_LO__SHIFT 0x0
++#define CP_VGT_IAPRIM_COUNT_LO__IAPRIM_COUNT_LO_MASK 0xFFFFFFFFL
++//CP_VGT_IAPRIM_COUNT_HI
++#define CP_VGT_IAPRIM_COUNT_HI__IAPRIM_COUNT_HI__SHIFT 0x0
++#define CP_VGT_IAPRIM_COUNT_HI__IAPRIM_COUNT_HI_MASK 0xFFFFFFFFL
++//CP_VGT_GSPRIM_COUNT_LO
++#define CP_VGT_GSPRIM_COUNT_LO__GSPRIM_COUNT_LO__SHIFT 0x0
++#define CP_VGT_GSPRIM_COUNT_LO__GSPRIM_COUNT_LO_MASK 0xFFFFFFFFL
++//CP_VGT_GSPRIM_COUNT_HI
++#define CP_VGT_GSPRIM_COUNT_HI__GSPRIM_COUNT_HI__SHIFT 0x0
++#define CP_VGT_GSPRIM_COUNT_HI__GSPRIM_COUNT_HI_MASK 0xFFFFFFFFL
++//CP_VGT_VSINVOC_COUNT_LO
++#define CP_VGT_VSINVOC_COUNT_LO__VSINVOC_COUNT_LO__SHIFT 0x0
++#define CP_VGT_VSINVOC_COUNT_LO__VSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
++//CP_VGT_VSINVOC_COUNT_HI
++#define CP_VGT_VSINVOC_COUNT_HI__VSINVOC_COUNT_HI__SHIFT 0x0
++#define CP_VGT_VSINVOC_COUNT_HI__VSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
++//CP_VGT_GSINVOC_COUNT_LO
++#define CP_VGT_GSINVOC_COUNT_LO__GSINVOC_COUNT_LO__SHIFT 0x0
++#define CP_VGT_GSINVOC_COUNT_LO__GSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
++//CP_VGT_GSINVOC_COUNT_HI
++#define CP_VGT_GSINVOC_COUNT_HI__GSINVOC_COUNT_HI__SHIFT 0x0
++#define CP_VGT_GSINVOC_COUNT_HI__GSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
++//CP_VGT_HSINVOC_COUNT_LO
++#define CP_VGT_HSINVOC_COUNT_LO__HSINVOC_COUNT_LO__SHIFT 0x0
++#define CP_VGT_HSINVOC_COUNT_LO__HSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
++//CP_VGT_HSINVOC_COUNT_HI
++#define CP_VGT_HSINVOC_COUNT_HI__HSINVOC_COUNT_HI__SHIFT 0x0
++#define CP_VGT_HSINVOC_COUNT_HI__HSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
++//CP_VGT_DSINVOC_COUNT_LO
++#define CP_VGT_DSINVOC_COUNT_LO__DSINVOC_COUNT_LO__SHIFT 0x0
++#define CP_VGT_DSINVOC_COUNT_LO__DSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
++//CP_VGT_DSINVOC_COUNT_HI
++#define CP_VGT_DSINVOC_COUNT_HI__DSINVOC_COUNT_HI__SHIFT 0x0
++#define CP_VGT_DSINVOC_COUNT_HI__DSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
++//CP_PA_CINVOC_COUNT_LO
++#define CP_PA_CINVOC_COUNT_LO__CINVOC_COUNT_LO__SHIFT 0x0
++#define CP_PA_CINVOC_COUNT_LO__CINVOC_COUNT_LO_MASK 0xFFFFFFFFL
++//CP_PA_CINVOC_COUNT_HI
++#define CP_PA_CINVOC_COUNT_HI__CINVOC_COUNT_HI__SHIFT 0x0
++#define CP_PA_CINVOC_COUNT_HI__CINVOC_COUNT_HI_MASK 0xFFFFFFFFL
++//CP_PA_CPRIM_COUNT_LO
++#define CP_PA_CPRIM_COUNT_LO__CPRIM_COUNT_LO__SHIFT 0x0
++#define CP_PA_CPRIM_COUNT_LO__CPRIM_COUNT_LO_MASK 0xFFFFFFFFL
++//CP_PA_CPRIM_COUNT_HI
++#define CP_PA_CPRIM_COUNT_HI__CPRIM_COUNT_HI__SHIFT 0x0
++#define CP_PA_CPRIM_COUNT_HI__CPRIM_COUNT_HI_MASK 0xFFFFFFFFL
++//CP_SC_PSINVOC_COUNT0_LO
++#define CP_SC_PSINVOC_COUNT0_LO__PSINVOC_COUNT0_LO__SHIFT 0x0
++#define CP_SC_PSINVOC_COUNT0_LO__PSINVOC_COUNT0_LO_MASK 0xFFFFFFFFL
++//CP_SC_PSINVOC_COUNT0_HI
++#define CP_SC_PSINVOC_COUNT0_HI__PSINVOC_COUNT0_HI__SHIFT 0x0
++#define CP_SC_PSINVOC_COUNT0_HI__PSINVOC_COUNT0_HI_MASK 0xFFFFFFFFL
++//CP_SC_PSINVOC_COUNT1_LO
++#define CP_SC_PSINVOC_COUNT1_LO__OBSOLETE__SHIFT 0x0
++#define CP_SC_PSINVOC_COUNT1_LO__OBSOLETE_MASK 0xFFFFFFFFL
++//CP_SC_PSINVOC_COUNT1_HI
++#define CP_SC_PSINVOC_COUNT1_HI__OBSOLETE__SHIFT 0x0
++#define CP_SC_PSINVOC_COUNT1_HI__OBSOLETE_MASK 0xFFFFFFFFL
++//CP_VGT_CSINVOC_COUNT_LO
++#define CP_VGT_CSINVOC_COUNT_LO__CSINVOC_COUNT_LO__SHIFT 0x0
++#define CP_VGT_CSINVOC_COUNT_LO__CSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
++//CP_VGT_CSINVOC_COUNT_HI
++#define CP_VGT_CSINVOC_COUNT_HI__CSINVOC_COUNT_HI__SHIFT 0x0
++#define CP_VGT_CSINVOC_COUNT_HI__CSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
++//CP_PIPE_STATS_CONTROL
++#define CP_PIPE_STATS_CONTROL__CACHE_POLICY__SHIFT 0x19
++#define CP_PIPE_STATS_CONTROL__CACHE_POLICY_MASK 0x02000000L
++//CP_STREAM_OUT_CONTROL
++#define CP_STREAM_OUT_CONTROL__CACHE_POLICY__SHIFT 0x19
++#define CP_STREAM_OUT_CONTROL__CACHE_POLICY_MASK 0x02000000L
++//CP_STRMOUT_CNTL
++#define CP_STRMOUT_CNTL__OFFSET_UPDATE_DONE__SHIFT 0x0
++#define CP_STRMOUT_CNTL__OFFSET_UPDATE_DONE_MASK 0x00000001L
++//SCRATCH_REG0
++#define SCRATCH_REG0__SCRATCH_REG0__SHIFT 0x0
++#define SCRATCH_REG0__SCRATCH_REG0_MASK 0xFFFFFFFFL
++//SCRATCH_REG1
++#define SCRATCH_REG1__SCRATCH_REG1__SHIFT 0x0
++#define SCRATCH_REG1__SCRATCH_REG1_MASK 0xFFFFFFFFL
++//SCRATCH_REG2
++#define SCRATCH_REG2__SCRATCH_REG2__SHIFT 0x0
++#define SCRATCH_REG2__SCRATCH_REG2_MASK 0xFFFFFFFFL
++//SCRATCH_REG3
++#define SCRATCH_REG3__SCRATCH_REG3__SHIFT 0x0
++#define SCRATCH_REG3__SCRATCH_REG3_MASK 0xFFFFFFFFL
++//SCRATCH_REG4
++#define SCRATCH_REG4__SCRATCH_REG4__SHIFT 0x0
++#define SCRATCH_REG4__SCRATCH_REG4_MASK 0xFFFFFFFFL
++//SCRATCH_REG5
++#define SCRATCH_REG5__SCRATCH_REG5__SHIFT 0x0
++#define SCRATCH_REG5__SCRATCH_REG5_MASK 0xFFFFFFFFL
++//SCRATCH_REG6
++#define SCRATCH_REG6__SCRATCH_REG6__SHIFT 0x0
++#define SCRATCH_REG6__SCRATCH_REG6_MASK 0xFFFFFFFFL
++//SCRATCH_REG7
++#define SCRATCH_REG7__SCRATCH_REG7__SHIFT 0x0
++#define SCRATCH_REG7__SCRATCH_REG7_MASK 0xFFFFFFFFL
++//CP_APPEND_DATA_HI
++#define CP_APPEND_DATA_HI__DATA__SHIFT 0x0
++#define CP_APPEND_DATA_HI__DATA_MASK 0xFFFFFFFFL
++//CP_APPEND_LAST_CS_FENCE_HI
++#define CP_APPEND_LAST_CS_FENCE_HI__LAST_FENCE__SHIFT 0x0
++#define CP_APPEND_LAST_CS_FENCE_HI__LAST_FENCE_MASK 0xFFFFFFFFL
++//CP_APPEND_LAST_PS_FENCE_HI
++#define CP_APPEND_LAST_PS_FENCE_HI__LAST_FENCE__SHIFT 0x0
++#define CP_APPEND_LAST_PS_FENCE_HI__LAST_FENCE_MASK 0xFFFFFFFFL
++//SCRATCH_UMSK
++#define SCRATCH_UMSK__OBSOLETE_UMSK__SHIFT 0x0
++#define SCRATCH_UMSK__OBSOLETE_SWAP__SHIFT 0x10
++#define SCRATCH_UMSK__OBSOLETE_UMSK_MASK 0x000000FFL
++#define SCRATCH_UMSK__OBSOLETE_SWAP_MASK 0x00030000L
++//SCRATCH_ADDR
++#define SCRATCH_ADDR__OBSOLETE_ADDR__SHIFT 0x0
++#define SCRATCH_ADDR__OBSOLETE_ADDR_MASK 0xFFFFFFFFL
++//CP_PFP_ATOMIC_PREOP_LO
++#define CP_PFP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO__SHIFT 0x0
++#define CP_PFP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO_MASK 0xFFFFFFFFL
++//CP_PFP_ATOMIC_PREOP_HI
++#define CP_PFP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI__SHIFT 0x0
++#define CP_PFP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI_MASK 0xFFFFFFFFL
++//CP_PFP_GDS_ATOMIC0_PREOP_LO
++#define CP_PFP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO__SHIFT 0x0
++#define CP_PFP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
++//CP_PFP_GDS_ATOMIC0_PREOP_HI
++#define CP_PFP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI__SHIFT 0x0
++#define CP_PFP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
++//CP_PFP_GDS_ATOMIC1_PREOP_LO
++#define CP_PFP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO__SHIFT 0x0
++#define CP_PFP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
++//CP_PFP_GDS_ATOMIC1_PREOP_HI
++#define CP_PFP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI__SHIFT 0x0
++#define CP_PFP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
++//CP_APPEND_ADDR_LO
++#define CP_APPEND_ADDR_LO__MEM_ADDR_LO__SHIFT 0x2
++#define CP_APPEND_ADDR_LO__MEM_ADDR_LO_MASK 0xFFFFFFFCL
++//CP_APPEND_ADDR_HI
++#define CP_APPEND_ADDR_HI__MEM_ADDR_HI__SHIFT 0x0
++#define CP_APPEND_ADDR_HI__CS_PS_SEL__SHIFT 0x10
++#define CP_APPEND_ADDR_HI__CACHE_POLICY__SHIFT 0x19
++#define CP_APPEND_ADDR_HI__COMMAND__SHIFT 0x1d
++#define CP_APPEND_ADDR_HI__MEM_ADDR_HI_MASK 0x0000FFFFL
++#define CP_APPEND_ADDR_HI__CS_PS_SEL_MASK 0x00010000L
++#define CP_APPEND_ADDR_HI__CACHE_POLICY_MASK 0x02000000L
++#define CP_APPEND_ADDR_HI__COMMAND_MASK 0xE0000000L
++//CP_APPEND_DATA_LO
++#define CP_APPEND_DATA_LO__DATA__SHIFT 0x0
++#define CP_APPEND_DATA_LO__DATA_MASK 0xFFFFFFFFL
++//CP_APPEND_LAST_CS_FENCE_LO
++#define CP_APPEND_LAST_CS_FENCE_LO__LAST_FENCE__SHIFT 0x0
++#define CP_APPEND_LAST_CS_FENCE_LO__LAST_FENCE_MASK 0xFFFFFFFFL
++//CP_APPEND_LAST_PS_FENCE_LO
++#define CP_APPEND_LAST_PS_FENCE_LO__LAST_FENCE__SHIFT 0x0
++#define CP_APPEND_LAST_PS_FENCE_LO__LAST_FENCE_MASK 0xFFFFFFFFL
++//CP_ATOMIC_PREOP_LO
++#define CP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO__SHIFT 0x0
++#define CP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO_MASK 0xFFFFFFFFL
++//CP_ME_ATOMIC_PREOP_LO
++#define CP_ME_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO__SHIFT 0x0
++#define CP_ME_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO_MASK 0xFFFFFFFFL
++//CP_ATOMIC_PREOP_HI
++#define CP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI__SHIFT 0x0
++#define CP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI_MASK 0xFFFFFFFFL
++//CP_ME_ATOMIC_PREOP_HI
++#define CP_ME_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI__SHIFT 0x0
++#define CP_ME_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI_MASK 0xFFFFFFFFL
++//CP_GDS_ATOMIC0_PREOP_LO
++#define CP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO__SHIFT 0x0
++#define CP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
++//CP_ME_GDS_ATOMIC0_PREOP_LO
++#define CP_ME_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO__SHIFT 0x0
++#define CP_ME_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
++//CP_GDS_ATOMIC0_PREOP_HI
++#define CP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI__SHIFT 0x0
++#define CP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
++//CP_ME_GDS_ATOMIC0_PREOP_HI
++#define CP_ME_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI__SHIFT 0x0
++#define CP_ME_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
++//CP_GDS_ATOMIC1_PREOP_LO
++#define CP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO__SHIFT 0x0
++#define CP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
++//CP_ME_GDS_ATOMIC1_PREOP_LO
++#define CP_ME_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO__SHIFT 0x0
++#define CP_ME_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
++//CP_GDS_ATOMIC1_PREOP_HI
++#define CP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI__SHIFT 0x0
++#define CP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
++//CP_ME_GDS_ATOMIC1_PREOP_HI
++#define CP_ME_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI__SHIFT 0x0
++#define CP_ME_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
++//CP_ME_MC_WADDR_LO
++#define CP_ME_MC_WADDR_LO__ME_MC_WADDR_LO__SHIFT 0x2
++#define CP_ME_MC_WADDR_LO__ME_MC_WADDR_LO_MASK 0xFFFFFFFCL
++//CP_ME_MC_WADDR_HI
++#define CP_ME_MC_WADDR_HI__ME_MC_WADDR_HI__SHIFT 0x0
++#define CP_ME_MC_WADDR_HI__CACHE_POLICY__SHIFT 0x16
++#define CP_ME_MC_WADDR_HI__ME_MC_WADDR_HI_MASK 0x0000FFFFL
++#define CP_ME_MC_WADDR_HI__CACHE_POLICY_MASK 0x00400000L
++//CP_ME_MC_WDATA_LO
++#define CP_ME_MC_WDATA_LO__ME_MC_WDATA_LO__SHIFT 0x0
++#define CP_ME_MC_WDATA_LO__ME_MC_WDATA_LO_MASK 0xFFFFFFFFL
++//CP_ME_MC_WDATA_HI
++#define CP_ME_MC_WDATA_HI__ME_MC_WDATA_HI__SHIFT 0x0
++#define CP_ME_MC_WDATA_HI__ME_MC_WDATA_HI_MASK 0xFFFFFFFFL
++//CP_ME_MC_RADDR_LO
++#define CP_ME_MC_RADDR_LO__ME_MC_RADDR_LO__SHIFT 0x2
++#define CP_ME_MC_RADDR_LO__ME_MC_RADDR_LO_MASK 0xFFFFFFFCL
++//CP_ME_MC_RADDR_HI
++#define CP_ME_MC_RADDR_HI__ME_MC_RADDR_HI__SHIFT 0x0
++#define CP_ME_MC_RADDR_HI__CACHE_POLICY__SHIFT 0x16
++#define CP_ME_MC_RADDR_HI__ME_MC_RADDR_HI_MASK 0x0000FFFFL
++#define CP_ME_MC_RADDR_HI__CACHE_POLICY_MASK 0x00400000L
++//CP_SEM_WAIT_TIMER
++#define CP_SEM_WAIT_TIMER__SEM_WAIT_TIMER__SHIFT 0x0
++#define CP_SEM_WAIT_TIMER__SEM_WAIT_TIMER_MASK 0xFFFFFFFFL
++//CP_SIG_SEM_ADDR_LO
++#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_SWAP__SHIFT 0x0
++#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_LO__SHIFT 0x3
++#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_SWAP_MASK 0x00000003L
++#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_LO_MASK 0xFFFFFFF8L
++//CP_SIG_SEM_ADDR_HI
++#define CP_SIG_SEM_ADDR_HI__SEM_ADDR_HI__SHIFT 0x0
++#define CP_SIG_SEM_ADDR_HI__SEM_USE_MAILBOX__SHIFT 0x10
++#define CP_SIG_SEM_ADDR_HI__SEM_SIGNAL_TYPE__SHIFT 0x14
++#define CP_SIG_SEM_ADDR_HI__SEM_CLIENT_CODE__SHIFT 0x18
++#define CP_SIG_SEM_ADDR_HI__SEM_SELECT__SHIFT 0x1d
++#define CP_SIG_SEM_ADDR_HI__SEM_ADDR_HI_MASK 0x0000FFFFL
++#define CP_SIG_SEM_ADDR_HI__SEM_USE_MAILBOX_MASK 0x00010000L
++#define CP_SIG_SEM_ADDR_HI__SEM_SIGNAL_TYPE_MASK 0x00100000L
++#define CP_SIG_SEM_ADDR_HI__SEM_CLIENT_CODE_MASK 0x03000000L
++#define CP_SIG_SEM_ADDR_HI__SEM_SELECT_MASK 0xE0000000L
++//CP_WAIT_REG_MEM_TIMEOUT
++#define CP_WAIT_REG_MEM_TIMEOUT__WAIT_REG_MEM_TIMEOUT__SHIFT 0x0
++#define CP_WAIT_REG_MEM_TIMEOUT__WAIT_REG_MEM_TIMEOUT_MASK 0xFFFFFFFFL
++//CP_WAIT_SEM_ADDR_LO
++#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_SWAP__SHIFT 0x0
++#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_LO__SHIFT 0x3
++#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_SWAP_MASK 0x00000003L
++#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_LO_MASK 0xFFFFFFF8L
++//CP_WAIT_SEM_ADDR_HI
++#define CP_WAIT_SEM_ADDR_HI__SEM_ADDR_HI__SHIFT 0x0
++#define CP_WAIT_SEM_ADDR_HI__SEM_USE_MAILBOX__SHIFT 0x10
++#define CP_WAIT_SEM_ADDR_HI__SEM_SIGNAL_TYPE__SHIFT 0x14
++#define CP_WAIT_SEM_ADDR_HI__SEM_CLIENT_CODE__SHIFT 0x18
++#define CP_WAIT_SEM_ADDR_HI__SEM_SELECT__SHIFT 0x1d
++#define CP_WAIT_SEM_ADDR_HI__SEM_ADDR_HI_MASK 0x0000FFFFL
++#define CP_WAIT_SEM_ADDR_HI__SEM_USE_MAILBOX_MASK 0x00010000L
++#define CP_WAIT_SEM_ADDR_HI__SEM_SIGNAL_TYPE_MASK 0x00100000L
++#define CP_WAIT_SEM_ADDR_HI__SEM_CLIENT_CODE_MASK 0x03000000L
++#define CP_WAIT_SEM_ADDR_HI__SEM_SELECT_MASK 0xE0000000L
++//CP_DMA_PFP_CONTROL
++#define CP_DMA_PFP_CONTROL__MEMLOG_CLEAR__SHIFT 0xa
++#define CP_DMA_PFP_CONTROL__SRC_CACHE_POLICY__SHIFT 0xd
++#define CP_DMA_PFP_CONTROL__DST_SELECT__SHIFT 0x14
++#define CP_DMA_PFP_CONTROL__DST_CACHE_POLICY__SHIFT 0x19
++#define CP_DMA_PFP_CONTROL__SRC_SELECT__SHIFT 0x1d
++#define CP_DMA_PFP_CONTROL__MEMLOG_CLEAR_MASK 0x00000400L
++#define CP_DMA_PFP_CONTROL__SRC_CACHE_POLICY_MASK 0x00002000L
++#define CP_DMA_PFP_CONTROL__DST_SELECT_MASK 0x00300000L
++#define CP_DMA_PFP_CONTROL__DST_CACHE_POLICY_MASK 0x02000000L
++#define CP_DMA_PFP_CONTROL__SRC_SELECT_MASK 0x60000000L
++//CP_DMA_ME_CONTROL
++#define CP_DMA_ME_CONTROL__MEMLOG_CLEAR__SHIFT 0xa
++#define CP_DMA_ME_CONTROL__SRC_CACHE_POLICY__SHIFT 0xd
++#define CP_DMA_ME_CONTROL__DST_SELECT__SHIFT 0x14
++#define CP_DMA_ME_CONTROL__DST_CACHE_POLICY__SHIFT 0x19
++#define CP_DMA_ME_CONTROL__SRC_SELECT__SHIFT 0x1d
++#define CP_DMA_ME_CONTROL__MEMLOG_CLEAR_MASK 0x00000400L
++#define CP_DMA_ME_CONTROL__SRC_CACHE_POLICY_MASK 0x00002000L
++#define CP_DMA_ME_CONTROL__DST_SELECT_MASK 0x00300000L
++#define CP_DMA_ME_CONTROL__DST_CACHE_POLICY_MASK 0x02000000L
++#define CP_DMA_ME_CONTROL__SRC_SELECT_MASK 0x60000000L
++//CP_COHER_BASE_HI
++#define CP_COHER_BASE_HI__COHER_BASE_HI_256B__SHIFT 0x0
++#define CP_COHER_BASE_HI__COHER_BASE_HI_256B_MASK 0x000000FFL
++//CP_COHER_START_DELAY
++#define CP_COHER_START_DELAY__START_DELAY_COUNT__SHIFT 0x0
++#define CP_COHER_START_DELAY__START_DELAY_COUNT_MASK 0x0000003FL
++//CP_COHER_CNTL
++#define CP_COHER_CNTL__TC_NC_ACTION_ENA__SHIFT 0x3
++#define CP_COHER_CNTL__TC_WC_ACTION_ENA__SHIFT 0x4
++#define CP_COHER_CNTL__TC_INV_METADATA_ACTION_ENA__SHIFT 0x5
++#define CP_COHER_CNTL__TCL1_VOL_ACTION_ENA__SHIFT 0xf
++#define CP_COHER_CNTL__TC_WB_ACTION_ENA__SHIFT 0x12
++#define CP_COHER_CNTL__TCL1_ACTION_ENA__SHIFT 0x16
++#define CP_COHER_CNTL__TC_ACTION_ENA__SHIFT 0x17
++#define CP_COHER_CNTL__CB_ACTION_ENA__SHIFT 0x19
++#define CP_COHER_CNTL__DB_ACTION_ENA__SHIFT 0x1a
++#define CP_COHER_CNTL__SH_KCACHE_ACTION_ENA__SHIFT 0x1b
++#define CP_COHER_CNTL__SH_KCACHE_VOL_ACTION_ENA__SHIFT 0x1c
++#define CP_COHER_CNTL__SH_ICACHE_ACTION_ENA__SHIFT 0x1d
++#define CP_COHER_CNTL__SH_KCACHE_WB_ACTION_ENA__SHIFT 0x1e
++#define CP_COHER_CNTL__TC_NC_ACTION_ENA_MASK 0x00000008L
++#define CP_COHER_CNTL__TC_WC_ACTION_ENA_MASK 0x00000010L
++#define CP_COHER_CNTL__TC_INV_METADATA_ACTION_ENA_MASK 0x00000020L
++#define CP_COHER_CNTL__TCL1_VOL_ACTION_ENA_MASK 0x00008000L
++#define CP_COHER_CNTL__TC_WB_ACTION_ENA_MASK 0x00040000L
++#define CP_COHER_CNTL__TCL1_ACTION_ENA_MASK 0x00400000L
++#define CP_COHER_CNTL__TC_ACTION_ENA_MASK 0x00800000L
++#define CP_COHER_CNTL__CB_ACTION_ENA_MASK 0x02000000L
++#define CP_COHER_CNTL__DB_ACTION_ENA_MASK 0x04000000L
++#define CP_COHER_CNTL__SH_KCACHE_ACTION_ENA_MASK 0x08000000L
++#define CP_COHER_CNTL__SH_KCACHE_VOL_ACTION_ENA_MASK 0x10000000L
++#define CP_COHER_CNTL__SH_ICACHE_ACTION_ENA_MASK 0x20000000L
++#define CP_COHER_CNTL__SH_KCACHE_WB_ACTION_ENA_MASK 0x40000000L
++//CP_COHER_SIZE
++#define CP_COHER_SIZE__COHER_SIZE_256B__SHIFT 0x0
++#define CP_COHER_SIZE__COHER_SIZE_256B_MASK 0xFFFFFFFFL
++//CP_COHER_BASE
++#define CP_COHER_BASE__COHER_BASE_256B__SHIFT 0x0
++#define CP_COHER_BASE__COHER_BASE_256B_MASK 0xFFFFFFFFL
++//CP_COHER_STATUS
++#define CP_COHER_STATUS__MEID__SHIFT 0x18
++#define CP_COHER_STATUS__STATUS__SHIFT 0x1f
++#define CP_COHER_STATUS__MEID_MASK 0x03000000L
++#define CP_COHER_STATUS__STATUS_MASK 0x80000000L
++//CP_DMA_ME_SRC_ADDR
++#define CP_DMA_ME_SRC_ADDR__SRC_ADDR__SHIFT 0x0
++#define CP_DMA_ME_SRC_ADDR__SRC_ADDR_MASK 0xFFFFFFFFL
++//CP_DMA_ME_SRC_ADDR_HI
++#define CP_DMA_ME_SRC_ADDR_HI__SRC_ADDR_HI__SHIFT 0x0
++#define CP_DMA_ME_SRC_ADDR_HI__SRC_ADDR_HI_MASK 0x0000FFFFL
++//CP_DMA_ME_DST_ADDR
++#define CP_DMA_ME_DST_ADDR__DST_ADDR__SHIFT 0x0
++#define CP_DMA_ME_DST_ADDR__DST_ADDR_MASK 0xFFFFFFFFL
++//CP_DMA_ME_DST_ADDR_HI
++#define CP_DMA_ME_DST_ADDR_HI__DST_ADDR_HI__SHIFT 0x0
++#define CP_DMA_ME_DST_ADDR_HI__DST_ADDR_HI_MASK 0x0000FFFFL
++//CP_DMA_ME_COMMAND
++#define CP_DMA_ME_COMMAND__BYTE_COUNT__SHIFT 0x0
++#define CP_DMA_ME_COMMAND__SAS__SHIFT 0x1a
++#define CP_DMA_ME_COMMAND__DAS__SHIFT 0x1b
++#define CP_DMA_ME_COMMAND__SAIC__SHIFT 0x1c
++#define CP_DMA_ME_COMMAND__DAIC__SHIFT 0x1d
++#define CP_DMA_ME_COMMAND__RAW_WAIT__SHIFT 0x1e
++#define CP_DMA_ME_COMMAND__DIS_WC__SHIFT 0x1f
++#define CP_DMA_ME_COMMAND__BYTE_COUNT_MASK 0x03FFFFFFL
++#define CP_DMA_ME_COMMAND__SAS_MASK 0x04000000L
++#define CP_DMA_ME_COMMAND__DAS_MASK 0x08000000L
++#define CP_DMA_ME_COMMAND__SAIC_MASK 0x10000000L
++#define CP_DMA_ME_COMMAND__DAIC_MASK 0x20000000L
++#define CP_DMA_ME_COMMAND__RAW_WAIT_MASK 0x40000000L
++#define CP_DMA_ME_COMMAND__DIS_WC_MASK 0x80000000L
++//CP_DMA_PFP_SRC_ADDR
++#define CP_DMA_PFP_SRC_ADDR__SRC_ADDR__SHIFT 0x0
++#define CP_DMA_PFP_SRC_ADDR__SRC_ADDR_MASK 0xFFFFFFFFL
++//CP_DMA_PFP_SRC_ADDR_HI
++#define CP_DMA_PFP_SRC_ADDR_HI__SRC_ADDR_HI__SHIFT 0x0
++#define CP_DMA_PFP_SRC_ADDR_HI__SRC_ADDR_HI_MASK 0x0000FFFFL
++//CP_DMA_PFP_DST_ADDR
++#define CP_DMA_PFP_DST_ADDR__DST_ADDR__SHIFT 0x0
++#define CP_DMA_PFP_DST_ADDR__DST_ADDR_MASK 0xFFFFFFFFL
++//CP_DMA_PFP_DST_ADDR_HI
++#define CP_DMA_PFP_DST_ADDR_HI__DST_ADDR_HI__SHIFT 0x0
++#define CP_DMA_PFP_DST_ADDR_HI__DST_ADDR_HI_MASK 0x0000FFFFL
++//CP_DMA_PFP_COMMAND
++#define CP_DMA_PFP_COMMAND__BYTE_COUNT__SHIFT 0x0
++#define CP_DMA_PFP_COMMAND__SAS__SHIFT 0x1a
++#define CP_DMA_PFP_COMMAND__DAS__SHIFT 0x1b
++#define CP_DMA_PFP_COMMAND__SAIC__SHIFT 0x1c
++#define CP_DMA_PFP_COMMAND__DAIC__SHIFT 0x1d
++#define CP_DMA_PFP_COMMAND__RAW_WAIT__SHIFT 0x1e
++#define CP_DMA_PFP_COMMAND__DIS_WC__SHIFT 0x1f
++#define CP_DMA_PFP_COMMAND__BYTE_COUNT_MASK 0x03FFFFFFL
++#define CP_DMA_PFP_COMMAND__SAS_MASK 0x04000000L
++#define CP_DMA_PFP_COMMAND__DAS_MASK 0x08000000L
++#define CP_DMA_PFP_COMMAND__SAIC_MASK 0x10000000L
++#define CP_DMA_PFP_COMMAND__DAIC_MASK 0x20000000L
++#define CP_DMA_PFP_COMMAND__RAW_WAIT_MASK 0x40000000L
++#define CP_DMA_PFP_COMMAND__DIS_WC_MASK 0x80000000L
++//CP_DMA_CNTL
++#define CP_DMA_CNTL__UTCL1_FAULT_CONTROL__SHIFT 0x0
++#define CP_DMA_CNTL__MIN_AVAILSZ__SHIFT 0x4
++#define CP_DMA_CNTL__BUFFER_DEPTH__SHIFT 0x10
++#define CP_DMA_CNTL__PIO_FIFO_EMPTY__SHIFT 0x1c
++#define CP_DMA_CNTL__PIO_FIFO_FULL__SHIFT 0x1d
++#define CP_DMA_CNTL__PIO_COUNT__SHIFT 0x1e
++#define CP_DMA_CNTL__UTCL1_FAULT_CONTROL_MASK 0x00000001L
++#define CP_DMA_CNTL__MIN_AVAILSZ_MASK 0x00000030L
++#define CP_DMA_CNTL__BUFFER_DEPTH_MASK 0x000F0000L
++#define CP_DMA_CNTL__PIO_FIFO_EMPTY_MASK 0x10000000L
++#define CP_DMA_CNTL__PIO_FIFO_FULL_MASK 0x20000000L
++#define CP_DMA_CNTL__PIO_COUNT_MASK 0xC0000000L
++//CP_DMA_READ_TAGS
++#define CP_DMA_READ_TAGS__DMA_READ_TAG__SHIFT 0x0
++#define CP_DMA_READ_TAGS__DMA_READ_TAG_VALID__SHIFT 0x1c
++#define CP_DMA_READ_TAGS__DMA_READ_TAG_MASK 0x03FFFFFFL
++#define CP_DMA_READ_TAGS__DMA_READ_TAG_VALID_MASK 0x10000000L
++//CP_COHER_SIZE_HI
++#define CP_COHER_SIZE_HI__COHER_SIZE_HI_256B__SHIFT 0x0
++#define CP_COHER_SIZE_HI__COHER_SIZE_HI_256B_MASK 0x000000FFL
++//CP_PFP_IB_CONTROL
++#define CP_PFP_IB_CONTROL__IB_EN__SHIFT 0x0
++#define CP_PFP_IB_CONTROL__IB_EN_MASK 0x000000FFL
++//CP_PFP_LOAD_CONTROL
++#define CP_PFP_LOAD_CONTROL__CONFIG_REG_EN__SHIFT 0x0
++#define CP_PFP_LOAD_CONTROL__CNTX_REG_EN__SHIFT 0x1
++#define CP_PFP_LOAD_CONTROL__SH_GFX_REG_EN__SHIFT 0x10
++#define CP_PFP_LOAD_CONTROL__SH_CS_REG_EN__SHIFT 0x18
++#define CP_PFP_LOAD_CONTROL__CONFIG_REG_EN_MASK 0x00000001L
++#define CP_PFP_LOAD_CONTROL__CNTX_REG_EN_MASK 0x00000002L
++#define CP_PFP_LOAD_CONTROL__SH_GFX_REG_EN_MASK 0x00010000L
++#define CP_PFP_LOAD_CONTROL__SH_CS_REG_EN_MASK 0x01000000L
++//CP_SCRATCH_INDEX
++#define CP_SCRATCH_INDEX__SCRATCH_INDEX__SHIFT 0x0
++#define CP_SCRATCH_INDEX__SCRATCH_INDEX_MASK 0x000000FFL
++//CP_SCRATCH_DATA
++#define CP_SCRATCH_DATA__SCRATCH_DATA__SHIFT 0x0
++#define CP_SCRATCH_DATA__SCRATCH_DATA_MASK 0xFFFFFFFFL
++//CP_RB_OFFSET
++#define CP_RB_OFFSET__RB_OFFSET__SHIFT 0x0
++#define CP_RB_OFFSET__RB_OFFSET_MASK 0x000FFFFFL
++//CP_IB1_OFFSET
++#define CP_IB1_OFFSET__IB1_OFFSET__SHIFT 0x0
++#define CP_IB1_OFFSET__IB1_OFFSET_MASK 0x000FFFFFL
++//CP_IB2_OFFSET
++#define CP_IB2_OFFSET__IB2_OFFSET__SHIFT 0x0
++#define CP_IB2_OFFSET__IB2_OFFSET_MASK 0x000FFFFFL
++//CP_IB1_PREAMBLE_BEGIN
++#define CP_IB1_PREAMBLE_BEGIN__IB1_PREAMBLE_BEGIN__SHIFT 0x0
++#define CP_IB1_PREAMBLE_BEGIN__IB1_PREAMBLE_BEGIN_MASK 0x000FFFFFL
++//CP_IB1_PREAMBLE_END
++#define CP_IB1_PREAMBLE_END__IB1_PREAMBLE_END__SHIFT 0x0
++#define CP_IB1_PREAMBLE_END__IB1_PREAMBLE_END_MASK 0x000FFFFFL
++//CP_IB2_PREAMBLE_BEGIN
++#define CP_IB2_PREAMBLE_BEGIN__IB2_PREAMBLE_BEGIN__SHIFT 0x0
++#define CP_IB2_PREAMBLE_BEGIN__IB2_PREAMBLE_BEGIN_MASK 0x000FFFFFL
++//CP_IB2_PREAMBLE_END
++#define CP_IB2_PREAMBLE_END__IB2_PREAMBLE_END__SHIFT 0x0
++#define CP_IB2_PREAMBLE_END__IB2_PREAMBLE_END_MASK 0x000FFFFFL
++//CP_CE_IB1_OFFSET
++#define CP_CE_IB1_OFFSET__IB1_OFFSET__SHIFT 0x0
++#define CP_CE_IB1_OFFSET__IB1_OFFSET_MASK 0x000FFFFFL
++//CP_CE_IB2_OFFSET
++#define CP_CE_IB2_OFFSET__IB2_OFFSET__SHIFT 0x0
++#define CP_CE_IB2_OFFSET__IB2_OFFSET_MASK 0x000FFFFFL
++//CP_CE_COUNTER
++#define CP_CE_COUNTER__CONST_ENGINE_COUNT__SHIFT 0x0
++#define CP_CE_COUNTER__CONST_ENGINE_COUNT_MASK 0xFFFFFFFFL
++//CP_CE_RB_OFFSET
++#define CP_CE_RB_OFFSET__RB_OFFSET__SHIFT 0x0
++#define CP_CE_RB_OFFSET__RB_OFFSET_MASK 0x000FFFFFL
++//CP_CE_INIT_CMD_BUFSZ
++#define CP_CE_INIT_CMD_BUFSZ__INIT_CMD_REQSZ__SHIFT 0x0
++#define CP_CE_INIT_CMD_BUFSZ__INIT_CMD_REQSZ_MASK 0x00000FFFL
++//CP_CE_IB1_CMD_BUFSZ
++#define CP_CE_IB1_CMD_BUFSZ__IB1_CMD_REQSZ__SHIFT 0x0
++#define CP_CE_IB1_CMD_BUFSZ__IB1_CMD_REQSZ_MASK 0x000FFFFFL
++//CP_CE_IB2_CMD_BUFSZ
++#define CP_CE_IB2_CMD_BUFSZ__IB2_CMD_REQSZ__SHIFT 0x0
++#define CP_CE_IB2_CMD_BUFSZ__IB2_CMD_REQSZ_MASK 0x000FFFFFL
++//CP_IB1_CMD_BUFSZ
++#define CP_IB1_CMD_BUFSZ__IB1_CMD_REQSZ__SHIFT 0x0
++#define CP_IB1_CMD_BUFSZ__IB1_CMD_REQSZ_MASK 0x000FFFFFL
++//CP_IB2_CMD_BUFSZ
++#define CP_IB2_CMD_BUFSZ__IB2_CMD_REQSZ__SHIFT 0x0
++#define CP_IB2_CMD_BUFSZ__IB2_CMD_REQSZ_MASK 0x000FFFFFL
++//CP_ST_CMD_BUFSZ
++#define CP_ST_CMD_BUFSZ__ST_CMD_REQSZ__SHIFT 0x0
++#define CP_ST_CMD_BUFSZ__ST_CMD_REQSZ_MASK 0x000FFFFFL
++//CP_CE_INIT_BASE_LO
++#define CP_CE_INIT_BASE_LO__INIT_BASE_LO__SHIFT 0x5
++#define CP_CE_INIT_BASE_LO__INIT_BASE_LO_MASK 0xFFFFFFE0L
++//CP_CE_INIT_BASE_HI
++#define CP_CE_INIT_BASE_HI__INIT_BASE_HI__SHIFT 0x0
++#define CP_CE_INIT_BASE_HI__INIT_BASE_HI_MASK 0x0000FFFFL
++//CP_CE_INIT_BUFSZ
++#define CP_CE_INIT_BUFSZ__INIT_BUFSZ__SHIFT 0x0
++#define CP_CE_INIT_BUFSZ__INIT_BUFSZ_MASK 0x00000FFFL
++//CP_CE_IB1_BASE_LO
++#define CP_CE_IB1_BASE_LO__IB1_BASE_LO__SHIFT 0x2
++#define CP_CE_IB1_BASE_LO__IB1_BASE_LO_MASK 0xFFFFFFFCL
++//CP_CE_IB1_BASE_HI
++#define CP_CE_IB1_BASE_HI__IB1_BASE_HI__SHIFT 0x0
++#define CP_CE_IB1_BASE_HI__IB1_BASE_HI_MASK 0x0000FFFFL
++//CP_CE_IB1_BUFSZ
++#define CP_CE_IB1_BUFSZ__IB1_BUFSZ__SHIFT 0x0
++#define CP_CE_IB1_BUFSZ__IB1_BUFSZ_MASK 0x000FFFFFL
++//CP_CE_IB2_BASE_LO
++#define CP_CE_IB2_BASE_LO__IB2_BASE_LO__SHIFT 0x2
++#define CP_CE_IB2_BASE_LO__IB2_BASE_LO_MASK 0xFFFFFFFCL
++//CP_CE_IB2_BASE_HI
++#define CP_CE_IB2_BASE_HI__IB2_BASE_HI__SHIFT 0x0
++#define CP_CE_IB2_BASE_HI__IB2_BASE_HI_MASK 0x0000FFFFL
++//CP_CE_IB2_BUFSZ
++#define CP_CE_IB2_BUFSZ__IB2_BUFSZ__SHIFT 0x0
++#define CP_CE_IB2_BUFSZ__IB2_BUFSZ_MASK 0x000FFFFFL
++//CP_IB1_BASE_LO
++#define CP_IB1_BASE_LO__IB1_BASE_LO__SHIFT 0x2
++#define CP_IB1_BASE_LO__IB1_BASE_LO_MASK 0xFFFFFFFCL
++//CP_IB1_BASE_HI
++#define CP_IB1_BASE_HI__IB1_BASE_HI__SHIFT 0x0
++#define CP_IB1_BASE_HI__IB1_BASE_HI_MASK 0x0000FFFFL
++//CP_IB1_BUFSZ
++#define CP_IB1_BUFSZ__IB1_BUFSZ__SHIFT 0x0
++#define CP_IB1_BUFSZ__IB1_BUFSZ_MASK 0x000FFFFFL
++//CP_IB2_BASE_LO
++#define CP_IB2_BASE_LO__IB2_BASE_LO__SHIFT 0x2
++#define CP_IB2_BASE_LO__IB2_BASE_LO_MASK 0xFFFFFFFCL
++//CP_IB2_BASE_HI
++#define CP_IB2_BASE_HI__IB2_BASE_HI__SHIFT 0x0
++#define CP_IB2_BASE_HI__IB2_BASE_HI_MASK 0x0000FFFFL
++//CP_IB2_BUFSZ
++#define CP_IB2_BUFSZ__IB2_BUFSZ__SHIFT 0x0
++#define CP_IB2_BUFSZ__IB2_BUFSZ_MASK 0x000FFFFFL
++//CP_ST_BASE_LO
++#define CP_ST_BASE_LO__ST_BASE_LO__SHIFT 0x2
++#define CP_ST_BASE_LO__ST_BASE_LO_MASK 0xFFFFFFFCL
++//CP_ST_BASE_HI
++#define CP_ST_BASE_HI__ST_BASE_HI__SHIFT 0x0
++#define CP_ST_BASE_HI__ST_BASE_HI_MASK 0x0000FFFFL
++//CP_ST_BUFSZ
++#define CP_ST_BUFSZ__ST_BUFSZ__SHIFT 0x0
++#define CP_ST_BUFSZ__ST_BUFSZ_MASK 0x000FFFFFL
++//CP_EOP_DONE_EVENT_CNTL
++#define CP_EOP_DONE_EVENT_CNTL__WBINV_TC_OP__SHIFT 0x0
++#define CP_EOP_DONE_EVENT_CNTL__WBINV_ACTION_ENA__SHIFT 0xc
++#define CP_EOP_DONE_EVENT_CNTL__CACHE_POLICY__SHIFT 0x19
++#define CP_EOP_DONE_EVENT_CNTL__EXECUTE__SHIFT 0x1c
++#define CP_EOP_DONE_EVENT_CNTL__WBINV_TC_OP_MASK 0x0000007FL
++#define CP_EOP_DONE_EVENT_CNTL__WBINV_ACTION_ENA_MASK 0x0003F000L
++#define CP_EOP_DONE_EVENT_CNTL__CACHE_POLICY_MASK 0x02000000L
++#define CP_EOP_DONE_EVENT_CNTL__EXECUTE_MASK 0x10000000L
++//CP_EOP_DONE_DATA_CNTL
++#define CP_EOP_DONE_DATA_CNTL__DST_SEL__SHIFT 0x10
++#define CP_EOP_DONE_DATA_CNTL__INT_SEL__SHIFT 0x18
++#define CP_EOP_DONE_DATA_CNTL__DATA_SEL__SHIFT 0x1d
++#define CP_EOP_DONE_DATA_CNTL__DST_SEL_MASK 0x00030000L
++#define CP_EOP_DONE_DATA_CNTL__INT_SEL_MASK 0x07000000L
++#define CP_EOP_DONE_DATA_CNTL__DATA_SEL_MASK 0xE0000000L
++//CP_EOP_DONE_CNTX_ID
++#define CP_EOP_DONE_CNTX_ID__CNTX_ID__SHIFT 0x0
++#define CP_EOP_DONE_CNTX_ID__CNTX_ID_MASK 0xFFFFFFFFL
++//CP_PFP_COMPLETION_STATUS
++#define CP_PFP_COMPLETION_STATUS__STATUS__SHIFT 0x0
++#define CP_PFP_COMPLETION_STATUS__STATUS_MASK 0x00000003L
++//CP_CE_COMPLETION_STATUS
++#define CP_CE_COMPLETION_STATUS__STATUS__SHIFT 0x0
++#define CP_CE_COMPLETION_STATUS__STATUS_MASK 0x00000003L
++//CP_PRED_NOT_VISIBLE
++#define CP_PRED_NOT_VISIBLE__NOT_VISIBLE__SHIFT 0x0
++#define CP_PRED_NOT_VISIBLE__NOT_VISIBLE_MASK 0x00000001L
++//CP_PFP_METADATA_BASE_ADDR
++#define CP_PFP_METADATA_BASE_ADDR__ADDR_LO__SHIFT 0x0
++#define CP_PFP_METADATA_BASE_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
++//CP_PFP_METADATA_BASE_ADDR_HI
++#define CP_PFP_METADATA_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
++#define CP_PFP_METADATA_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
++//CP_CE_METADATA_BASE_ADDR
++#define CP_CE_METADATA_BASE_ADDR__ADDR_LO__SHIFT 0x0
++#define CP_CE_METADATA_BASE_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
++//CP_CE_METADATA_BASE_ADDR_HI
++#define CP_CE_METADATA_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
++#define CP_CE_METADATA_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
++//CP_DRAW_INDX_INDR_ADDR
++#define CP_DRAW_INDX_INDR_ADDR__ADDR_LO__SHIFT 0x0
++#define CP_DRAW_INDX_INDR_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
++//CP_DRAW_INDX_INDR_ADDR_HI
++#define CP_DRAW_INDX_INDR_ADDR_HI__ADDR_HI__SHIFT 0x0
++#define CP_DRAW_INDX_INDR_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
++//CP_DISPATCH_INDR_ADDR
++#define CP_DISPATCH_INDR_ADDR__ADDR_LO__SHIFT 0x0
++#define CP_DISPATCH_INDR_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
++//CP_DISPATCH_INDR_ADDR_HI
++#define CP_DISPATCH_INDR_ADDR_HI__ADDR_HI__SHIFT 0x0
++#define CP_DISPATCH_INDR_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
++//CP_INDEX_BASE_ADDR
++#define CP_INDEX_BASE_ADDR__ADDR_LO__SHIFT 0x0
++#define CP_INDEX_BASE_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
++//CP_INDEX_BASE_ADDR_HI
++#define CP_INDEX_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
++#define CP_INDEX_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
++//CP_INDEX_TYPE
++#define CP_INDEX_TYPE__INDEX_TYPE__SHIFT 0x0
++#define CP_INDEX_TYPE__INDEX_TYPE_MASK 0x00000003L
++//CP_GDS_BKUP_ADDR
++#define CP_GDS_BKUP_ADDR__ADDR_LO__SHIFT 0x0
++#define CP_GDS_BKUP_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
++//CP_GDS_BKUP_ADDR_HI
++#define CP_GDS_BKUP_ADDR_HI__ADDR_HI__SHIFT 0x0
++#define CP_GDS_BKUP_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
++//CP_SAMPLE_STATUS
++#define CP_SAMPLE_STATUS__Z_PASS_ACITVE__SHIFT 0x0
++#define CP_SAMPLE_STATUS__STREAMOUT_ACTIVE__SHIFT 0x1
++#define CP_SAMPLE_STATUS__PIPELINE_ACTIVE__SHIFT 0x2
++#define CP_SAMPLE_STATUS__STIPPLE_ACTIVE__SHIFT 0x3
++#define CP_SAMPLE_STATUS__VGT_BUFFERS_ACTIVE__SHIFT 0x4
++#define CP_SAMPLE_STATUS__SCREEN_EXT_ACTIVE__SHIFT 0x5
++#define CP_SAMPLE_STATUS__DRAW_INDIRECT_ACTIVE__SHIFT 0x6
++#define CP_SAMPLE_STATUS__DISP_INDIRECT_ACTIVE__SHIFT 0x7
++#define CP_SAMPLE_STATUS__Z_PASS_ACITVE_MASK 0x00000001L
++#define CP_SAMPLE_STATUS__STREAMOUT_ACTIVE_MASK 0x00000002L
++#define CP_SAMPLE_STATUS__PIPELINE_ACTIVE_MASK 0x00000004L
++#define CP_SAMPLE_STATUS__STIPPLE_ACTIVE_MASK 0x00000008L
++#define CP_SAMPLE_STATUS__VGT_BUFFERS_ACTIVE_MASK 0x00000010L
++#define CP_SAMPLE_STATUS__SCREEN_EXT_ACTIVE_MASK 0x00000020L
++#define CP_SAMPLE_STATUS__DRAW_INDIRECT_ACTIVE_MASK 0x00000040L
++#define CP_SAMPLE_STATUS__DISP_INDIRECT_ACTIVE_MASK 0x00000080L
++//CP_ME_COHER_CNTL
++#define CP_ME_COHER_CNTL__DEST_BASE_0_ENA__SHIFT 0x0
++#define CP_ME_COHER_CNTL__DEST_BASE_1_ENA__SHIFT 0x1
++#define CP_ME_COHER_CNTL__CB0_DEST_BASE_ENA__SHIFT 0x6
++#define CP_ME_COHER_CNTL__CB1_DEST_BASE_ENA__SHIFT 0x7
++#define CP_ME_COHER_CNTL__CB2_DEST_BASE_ENA__SHIFT 0x8
++#define CP_ME_COHER_CNTL__CB3_DEST_BASE_ENA__SHIFT 0x9
++#define CP_ME_COHER_CNTL__CB4_DEST_BASE_ENA__SHIFT 0xa
++#define CP_ME_COHER_CNTL__CB5_DEST_BASE_ENA__SHIFT 0xb
++#define CP_ME_COHER_CNTL__CB6_DEST_BASE_ENA__SHIFT 0xc
++#define CP_ME_COHER_CNTL__CB7_DEST_BASE_ENA__SHIFT 0xd
++#define CP_ME_COHER_CNTL__DB_DEST_BASE_ENA__SHIFT 0xe
++#define CP_ME_COHER_CNTL__DEST_BASE_2_ENA__SHIFT 0x13
++#define CP_ME_COHER_CNTL__DEST_BASE_3_ENA__SHIFT 0x15
++#define CP_ME_COHER_CNTL__DEST_BASE_0_ENA_MASK 0x00000001L
++#define CP_ME_COHER_CNTL__DEST_BASE_1_ENA_MASK 0x00000002L
++#define CP_ME_COHER_CNTL__CB0_DEST_BASE_ENA_MASK 0x00000040L
++#define CP_ME_COHER_CNTL__CB1_DEST_BASE_ENA_MASK 0x00000080L
++#define CP_ME_COHER_CNTL__CB2_DEST_BASE_ENA_MASK 0x00000100L
++#define CP_ME_COHER_CNTL__CB3_DEST_BASE_ENA_MASK 0x00000200L
++#define CP_ME_COHER_CNTL__CB4_DEST_BASE_ENA_MASK 0x00000400L
++#define CP_ME_COHER_CNTL__CB5_DEST_BASE_ENA_MASK 0x00000800L
++#define CP_ME_COHER_CNTL__CB6_DEST_BASE_ENA_MASK 0x00001000L
++#define CP_ME_COHER_CNTL__CB7_DEST_BASE_ENA_MASK 0x00002000L
++#define CP_ME_COHER_CNTL__DB_DEST_BASE_ENA_MASK 0x00004000L
++#define CP_ME_COHER_CNTL__DEST_BASE_2_ENA_MASK 0x00080000L
++#define CP_ME_COHER_CNTL__DEST_BASE_3_ENA_MASK 0x00200000L
++//CP_ME_COHER_SIZE
++#define CP_ME_COHER_SIZE__COHER_SIZE_256B__SHIFT 0x0
++#define CP_ME_COHER_SIZE__COHER_SIZE_256B_MASK 0xFFFFFFFFL
++//CP_ME_COHER_SIZE_HI
++#define CP_ME_COHER_SIZE_HI__COHER_SIZE_HI_256B__SHIFT 0x0
++#define CP_ME_COHER_SIZE_HI__COHER_SIZE_HI_256B_MASK 0x000000FFL
++//CP_ME_COHER_BASE
++#define CP_ME_COHER_BASE__COHER_BASE_256B__SHIFT 0x0
++#define CP_ME_COHER_BASE__COHER_BASE_256B_MASK 0xFFFFFFFFL
++//CP_ME_COHER_BASE_HI
++#define CP_ME_COHER_BASE_HI__COHER_BASE_HI_256B__SHIFT 0x0
++#define CP_ME_COHER_BASE_HI__COHER_BASE_HI_256B_MASK 0x000000FFL
++//CP_ME_COHER_STATUS
++#define CP_ME_COHER_STATUS__MATCHING_GFX_CNTX__SHIFT 0x0
++#define CP_ME_COHER_STATUS__STATUS__SHIFT 0x1f
++#define CP_ME_COHER_STATUS__MATCHING_GFX_CNTX_MASK 0x000000FFL
++#define CP_ME_COHER_STATUS__STATUS_MASK 0x80000000L
++//RLC_GPM_PERF_COUNT_0
++#define RLC_GPM_PERF_COUNT_0__FEATURE_SEL__SHIFT 0x0
++#define RLC_GPM_PERF_COUNT_0__SE_INDEX__SHIFT 0x4
++#define RLC_GPM_PERF_COUNT_0__SH_INDEX__SHIFT 0x8
++#define RLC_GPM_PERF_COUNT_0__CU_INDEX__SHIFT 0xc
++#define RLC_GPM_PERF_COUNT_0__EVENT_SEL__SHIFT 0x10
++#define RLC_GPM_PERF_COUNT_0__UNUSED__SHIFT 0x12
++#define RLC_GPM_PERF_COUNT_0__ENABLE__SHIFT 0x14
++#define RLC_GPM_PERF_COUNT_0__RESERVED__SHIFT 0x15
++#define RLC_GPM_PERF_COUNT_0__FEATURE_SEL_MASK 0x0000000FL
++#define RLC_GPM_PERF_COUNT_0__SE_INDEX_MASK 0x000000F0L
++#define RLC_GPM_PERF_COUNT_0__SH_INDEX_MASK 0x00000F00L
++#define RLC_GPM_PERF_COUNT_0__CU_INDEX_MASK 0x0000F000L
++#define RLC_GPM_PERF_COUNT_0__EVENT_SEL_MASK 0x00030000L
++#define RLC_GPM_PERF_COUNT_0__UNUSED_MASK 0x000C0000L
++#define RLC_GPM_PERF_COUNT_0__ENABLE_MASK 0x00100000L
++#define RLC_GPM_PERF_COUNT_0__RESERVED_MASK 0xFFE00000L
++//RLC_GPM_PERF_COUNT_1
++#define RLC_GPM_PERF_COUNT_1__FEATURE_SEL__SHIFT 0x0
++#define RLC_GPM_PERF_COUNT_1__SE_INDEX__SHIFT 0x4
++#define RLC_GPM_PERF_COUNT_1__SH_INDEX__SHIFT 0x8
++#define RLC_GPM_PERF_COUNT_1__CU_INDEX__SHIFT 0xc
++#define RLC_GPM_PERF_COUNT_1__EVENT_SEL__SHIFT 0x10
++#define RLC_GPM_PERF_COUNT_1__UNUSED__SHIFT 0x12
++#define RLC_GPM_PERF_COUNT_1__ENABLE__SHIFT 0x14
++#define RLC_GPM_PERF_COUNT_1__RESERVED__SHIFT 0x15
++#define RLC_GPM_PERF_COUNT_1__FEATURE_SEL_MASK 0x0000000FL
++#define RLC_GPM_PERF_COUNT_1__SE_INDEX_MASK 0x000000F0L
++#define RLC_GPM_PERF_COUNT_1__SH_INDEX_MASK 0x00000F00L
++#define RLC_GPM_PERF_COUNT_1__CU_INDEX_MASK 0x0000F000L
++#define RLC_GPM_PERF_COUNT_1__EVENT_SEL_MASK 0x00030000L
++#define RLC_GPM_PERF_COUNT_1__UNUSED_MASK 0x000C0000L
++#define RLC_GPM_PERF_COUNT_1__ENABLE_MASK 0x00100000L
++#define RLC_GPM_PERF_COUNT_1__RESERVED_MASK 0xFFE00000L
++//GRBM_GFX_INDEX
++#define GRBM_GFX_INDEX__INSTANCE_INDEX__SHIFT 0x0
++#define GRBM_GFX_INDEX__SH_INDEX__SHIFT 0x8
++#define GRBM_GFX_INDEX__SE_INDEX__SHIFT 0x10
++#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES__SHIFT 0x1d
++#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT 0x1e
++#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT 0x1f
++#define GRBM_GFX_INDEX__INSTANCE_INDEX_MASK 0x000000FFL
++#define GRBM_GFX_INDEX__SH_INDEX_MASK 0x0000FF00L
++#define GRBM_GFX_INDEX__SE_INDEX_MASK 0x00FF0000L
++#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK 0x20000000L
++#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK 0x40000000L
++#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK 0x80000000L
++//VGT_GSVS_RING_SIZE
++#define VGT_GSVS_RING_SIZE__MEM_SIZE__SHIFT 0x0
++#define VGT_GSVS_RING_SIZE__MEM_SIZE_MASK 0xFFFFFFFFL
++//VGT_PRIMITIVE_TYPE
++#define VGT_PRIMITIVE_TYPE__PRIM_TYPE__SHIFT 0x0
++#define VGT_PRIMITIVE_TYPE__PRIM_TYPE_MASK 0x0000003FL
++//VGT_INDEX_TYPE
++#define VGT_INDEX_TYPE__INDEX_TYPE__SHIFT 0x0
++#define VGT_INDEX_TYPE__PRIMGEN_EN__SHIFT 0x8
++#define VGT_INDEX_TYPE__INDEX_TYPE_MASK 0x00000003L
++#define VGT_INDEX_TYPE__PRIMGEN_EN_MASK 0x00000100L
++//VGT_STRMOUT_BUFFER_FILLED_SIZE_0
++#define VGT_STRMOUT_BUFFER_FILLED_SIZE_0__SIZE__SHIFT 0x0
++#define VGT_STRMOUT_BUFFER_FILLED_SIZE_0__SIZE_MASK 0xFFFFFFFFL
++//VGT_STRMOUT_BUFFER_FILLED_SIZE_1
++#define VGT_STRMOUT_BUFFER_FILLED_SIZE_1__SIZE__SHIFT 0x0
++#define VGT_STRMOUT_BUFFER_FILLED_SIZE_1__SIZE_MASK 0xFFFFFFFFL
++//VGT_STRMOUT_BUFFER_FILLED_SIZE_2
++#define VGT_STRMOUT_BUFFER_FILLED_SIZE_2__SIZE__SHIFT 0x0
++#define VGT_STRMOUT_BUFFER_FILLED_SIZE_2__SIZE_MASK 0xFFFFFFFFL
++//VGT_STRMOUT_BUFFER_FILLED_SIZE_3
++#define VGT_STRMOUT_BUFFER_FILLED_SIZE_3__SIZE__SHIFT 0x0
++#define VGT_STRMOUT_BUFFER_FILLED_SIZE_3__SIZE_MASK 0xFFFFFFFFL
++//VGT_MAX_VTX_INDX
++#define VGT_MAX_VTX_INDX__MAX_INDX__SHIFT 0x0
++#define VGT_MAX_VTX_INDX__MAX_INDX_MASK 0xFFFFFFFFL
++//VGT_MIN_VTX_INDX
++#define VGT_MIN_VTX_INDX__MIN_INDX__SHIFT 0x0
++#define VGT_MIN_VTX_INDX__MIN_INDX_MASK 0xFFFFFFFFL
++//VGT_INDX_OFFSET
++#define VGT_INDX_OFFSET__INDX_OFFSET__SHIFT 0x0
++#define VGT_INDX_OFFSET__INDX_OFFSET_MASK 0xFFFFFFFFL
++//VGT_MULTI_PRIM_IB_RESET_EN
++#define VGT_MULTI_PRIM_IB_RESET_EN__RESET_EN__SHIFT 0x0
++#define VGT_MULTI_PRIM_IB_RESET_EN__MATCH_ALL_BITS__SHIFT 0x1
++#define VGT_MULTI_PRIM_IB_RESET_EN__RESET_EN_MASK 0x00000001L
++#define VGT_MULTI_PRIM_IB_RESET_EN__MATCH_ALL_BITS_MASK 0x00000002L
++//VGT_NUM_INDICES
++#define VGT_NUM_INDICES__NUM_INDICES__SHIFT 0x0
++#define VGT_NUM_INDICES__NUM_INDICES_MASK 0xFFFFFFFFL
++//VGT_NUM_INSTANCES
++#define VGT_NUM_INSTANCES__NUM_INSTANCES__SHIFT 0x0
++#define VGT_NUM_INSTANCES__NUM_INSTANCES_MASK 0xFFFFFFFFL
++//VGT_TF_RING_SIZE
++#define VGT_TF_RING_SIZE__SIZE__SHIFT 0x0
++#define VGT_TF_RING_SIZE__SIZE_MASK 0x0000FFFFL
++//VGT_HS_OFFCHIP_PARAM
++#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_BUFFERING__SHIFT 0x0
++#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_GRANULARITY__SHIFT 0x9
++#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_BUFFERING_MASK 0x000001FFL
++#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_GRANULARITY_MASK 0x00000600L
++//VGT_TF_MEMORY_BASE
++#define VGT_TF_MEMORY_BASE__BASE__SHIFT 0x0
++#define VGT_TF_MEMORY_BASE__BASE_MASK 0xFFFFFFFFL
++//VGT_TF_MEMORY_BASE_HI
++#define VGT_TF_MEMORY_BASE_HI__BASE_HI__SHIFT 0x0
++#define VGT_TF_MEMORY_BASE_HI__BASE_HI_MASK 0x000000FFL
++//WD_POS_BUF_BASE
++#define WD_POS_BUF_BASE__BASE__SHIFT 0x0
++#define WD_POS_BUF_BASE__BASE_MASK 0xFFFFFFFFL
++//WD_POS_BUF_BASE_HI
++#define WD_POS_BUF_BASE_HI__BASE_HI__SHIFT 0x0
++#define WD_POS_BUF_BASE_HI__BASE_HI_MASK 0x000000FFL
++//WD_CNTL_SB_BUF_BASE
++#define WD_CNTL_SB_BUF_BASE__BASE__SHIFT 0x0
++#define WD_CNTL_SB_BUF_BASE__BASE_MASK 0xFFFFFFFFL
++//WD_CNTL_SB_BUF_BASE_HI
++#define WD_CNTL_SB_BUF_BASE_HI__BASE_HI__SHIFT 0x0
++#define WD_CNTL_SB_BUF_BASE_HI__BASE_HI_MASK 0x000000FFL
++//WD_INDEX_BUF_BASE
++#define WD_INDEX_BUF_BASE__BASE__SHIFT 0x0
++#define WD_INDEX_BUF_BASE__BASE_MASK 0xFFFFFFFFL
++//WD_INDEX_BUF_BASE_HI
++#define WD_INDEX_BUF_BASE_HI__BASE_HI__SHIFT 0x0
++#define WD_INDEX_BUF_BASE_HI__BASE_HI_MASK 0x000000FFL
++//IA_MULTI_VGT_PARAM
++#define IA_MULTI_VGT_PARAM__PRIMGROUP_SIZE__SHIFT 0x0
++#define IA_MULTI_VGT_PARAM__PARTIAL_VS_WAVE_ON__SHIFT 0x10
++#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOP__SHIFT 0x11
++#define IA_MULTI_VGT_PARAM__PARTIAL_ES_WAVE_ON__SHIFT 0x12
++#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOI__SHIFT 0x13
++#define IA_MULTI_VGT_PARAM__WD_SWITCH_ON_EOP__SHIFT 0x14
++#define IA_MULTI_VGT_PARAM__EN_INST_OPT_BASIC__SHIFT 0x15
++#define IA_MULTI_VGT_PARAM__EN_INST_OPT_ADV__SHIFT 0x16
++#define IA_MULTI_VGT_PARAM__HW_USE_ONLY__SHIFT 0x17
++#define IA_MULTI_VGT_PARAM__PRIMGROUP_SIZE_MASK 0x0000FFFFL
++#define IA_MULTI_VGT_PARAM__PARTIAL_VS_WAVE_ON_MASK 0x00010000L
++#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOP_MASK 0x00020000L
++#define IA_MULTI_VGT_PARAM__PARTIAL_ES_WAVE_ON_MASK 0x00040000L
++#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOI_MASK 0x00080000L
++#define IA_MULTI_VGT_PARAM__WD_SWITCH_ON_EOP_MASK 0x00100000L
++#define IA_MULTI_VGT_PARAM__EN_INST_OPT_BASIC_MASK 0x00200000L
++#define IA_MULTI_VGT_PARAM__EN_INST_OPT_ADV_MASK 0x00400000L
++#define IA_MULTI_VGT_PARAM__HW_USE_ONLY_MASK 0x00800000L
++//VGT_INSTANCE_BASE_ID
++#define VGT_INSTANCE_BASE_ID__INSTANCE_BASE_ID__SHIFT 0x0
++#define VGT_INSTANCE_BASE_ID__INSTANCE_BASE_ID_MASK 0xFFFFFFFFL
++//PA_SU_LINE_STIPPLE_VALUE
++#define PA_SU_LINE_STIPPLE_VALUE__LINE_STIPPLE_VALUE__SHIFT 0x0
++#define PA_SU_LINE_STIPPLE_VALUE__LINE_STIPPLE_VALUE_MASK 0x00FFFFFFL
++//PA_SC_LINE_STIPPLE_STATE
++#define PA_SC_LINE_STIPPLE_STATE__CURRENT_PTR__SHIFT 0x0
++#define PA_SC_LINE_STIPPLE_STATE__CURRENT_COUNT__SHIFT 0x8
++#define PA_SC_LINE_STIPPLE_STATE__CURRENT_PTR_MASK 0x0000000FL
++#define PA_SC_LINE_STIPPLE_STATE__CURRENT_COUNT_MASK 0x0000FF00L
++//PA_SC_SCREEN_EXTENT_MIN_0
++#define PA_SC_SCREEN_EXTENT_MIN_0__X__SHIFT 0x0
++#define PA_SC_SCREEN_EXTENT_MIN_0__Y__SHIFT 0x10
++#define PA_SC_SCREEN_EXTENT_MIN_0__X_MASK 0x0000FFFFL
++#define PA_SC_SCREEN_EXTENT_MIN_0__Y_MASK 0xFFFF0000L
++//PA_SC_SCREEN_EXTENT_MAX_0
++#define PA_SC_SCREEN_EXTENT_MAX_0__X__SHIFT 0x0
++#define PA_SC_SCREEN_EXTENT_MAX_0__Y__SHIFT 0x10
++#define PA_SC_SCREEN_EXTENT_MAX_0__X_MASK 0x0000FFFFL
++#define PA_SC_SCREEN_EXTENT_MAX_0__Y_MASK 0xFFFF0000L
++//PA_SC_SCREEN_EXTENT_MIN_1
++#define PA_SC_SCREEN_EXTENT_MIN_1__X__SHIFT 0x0
++#define PA_SC_SCREEN_EXTENT_MIN_1__Y__SHIFT 0x10
++#define PA_SC_SCREEN_EXTENT_MIN_1__X_MASK 0x0000FFFFL
++#define PA_SC_SCREEN_EXTENT_MIN_1__Y_MASK 0xFFFF0000L
++//PA_SC_SCREEN_EXTENT_MAX_1
++#define PA_SC_SCREEN_EXTENT_MAX_1__X__SHIFT 0x0
++#define PA_SC_SCREEN_EXTENT_MAX_1__Y__SHIFT 0x10
++#define PA_SC_SCREEN_EXTENT_MAX_1__X_MASK 0x0000FFFFL
++#define PA_SC_SCREEN_EXTENT_MAX_1__Y_MASK 0xFFFF0000L
++//PA_SC_P3D_TRAP_SCREEN_HV_EN
++#define PA_SC_P3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER__SHIFT 0x0
++#define PA_SC_P3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS__SHIFT 0x1
++#define PA_SC_P3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER_MASK 0x00000001L
++#define PA_SC_P3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS_MASK 0x00000002L
++//PA_SC_P3D_TRAP_SCREEN_H
++#define PA_SC_P3D_TRAP_SCREEN_H__X_COORD__SHIFT 0x0
++#define PA_SC_P3D_TRAP_SCREEN_H__X_COORD_MASK 0x00003FFFL
++//PA_SC_P3D_TRAP_SCREEN_V
++#define PA_SC_P3D_TRAP_SCREEN_V__Y_COORD__SHIFT 0x0
++#define PA_SC_P3D_TRAP_SCREEN_V__Y_COORD_MASK 0x00003FFFL
++//PA_SC_P3D_TRAP_SCREEN_OCCURRENCE
++#define PA_SC_P3D_TRAP_SCREEN_OCCURRENCE__COUNT__SHIFT 0x0
++#define PA_SC_P3D_TRAP_SCREEN_OCCURRENCE__COUNT_MASK 0x0000FFFFL
++//PA_SC_P3D_TRAP_SCREEN_COUNT
++#define PA_SC_P3D_TRAP_SCREEN_COUNT__COUNT__SHIFT 0x0
++#define PA_SC_P3D_TRAP_SCREEN_COUNT__COUNT_MASK 0x0000FFFFL
++//PA_SC_HP3D_TRAP_SCREEN_HV_EN
++#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER__SHIFT 0x0
++#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS__SHIFT 0x1
++#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER_MASK 0x00000001L
++#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS_MASK 0x00000002L
++//PA_SC_HP3D_TRAP_SCREEN_H
++#define PA_SC_HP3D_TRAP_SCREEN_H__X_COORD__SHIFT 0x0
++#define PA_SC_HP3D_TRAP_SCREEN_H__X_COORD_MASK 0x00003FFFL
++//PA_SC_HP3D_TRAP_SCREEN_V
++#define PA_SC_HP3D_TRAP_SCREEN_V__Y_COORD__SHIFT 0x0
++#define PA_SC_HP3D_TRAP_SCREEN_V__Y_COORD_MASK 0x00003FFFL
++//PA_SC_HP3D_TRAP_SCREEN_OCCURRENCE
++#define PA_SC_HP3D_TRAP_SCREEN_OCCURRENCE__COUNT__SHIFT 0x0
++#define PA_SC_HP3D_TRAP_SCREEN_OCCURRENCE__COUNT_MASK 0x0000FFFFL
++//PA_SC_HP3D_TRAP_SCREEN_COUNT
++#define PA_SC_HP3D_TRAP_SCREEN_COUNT__COUNT__SHIFT 0x0
++#define PA_SC_HP3D_TRAP_SCREEN_COUNT__COUNT_MASK 0x0000FFFFL
++//PA_SC_TRAP_SCREEN_HV_EN
++#define PA_SC_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER__SHIFT 0x0
++#define PA_SC_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS__SHIFT 0x1
++#define PA_SC_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER_MASK 0x00000001L
++#define PA_SC_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS_MASK 0x00000002L
++//PA_SC_TRAP_SCREEN_H
++#define PA_SC_TRAP_SCREEN_H__X_COORD__SHIFT 0x0
++#define PA_SC_TRAP_SCREEN_H__X_COORD_MASK 0x00003FFFL
++//PA_SC_TRAP_SCREEN_V
++#define PA_SC_TRAP_SCREEN_V__Y_COORD__SHIFT 0x0
++#define PA_SC_TRAP_SCREEN_V__Y_COORD_MASK 0x00003FFFL
++//PA_SC_TRAP_SCREEN_OCCURRENCE
++#define PA_SC_TRAP_SCREEN_OCCURRENCE__COUNT__SHIFT 0x0
++#define PA_SC_TRAP_SCREEN_OCCURRENCE__COUNT_MASK 0x0000FFFFL
++//PA_SC_TRAP_SCREEN_COUNT
++#define PA_SC_TRAP_SCREEN_COUNT__COUNT__SHIFT 0x0
++#define PA_SC_TRAP_SCREEN_COUNT__COUNT_MASK 0x0000FFFFL
++//SQ_THREAD_TRACE_BASE
++#define SQ_THREAD_TRACE_BASE__ADDR__SHIFT 0x0
++#define SQ_THREAD_TRACE_BASE__ADDR_MASK 0xFFFFFFFFL
++//SQ_THREAD_TRACE_SIZE
++#define SQ_THREAD_TRACE_SIZE__SIZE__SHIFT 0x0
++#define SQ_THREAD_TRACE_SIZE__SIZE_MASK 0x003FFFFFL
++//SQ_THREAD_TRACE_MASK
++#define SQ_THREAD_TRACE_MASK__CU_SEL__SHIFT 0x0
++#define SQ_THREAD_TRACE_MASK__SH_SEL__SHIFT 0x5
++#define SQ_THREAD_TRACE_MASK__REG_STALL_EN__SHIFT 0x7
++#define SQ_THREAD_TRACE_MASK__SIMD_EN__SHIFT 0x8
++#define SQ_THREAD_TRACE_MASK__VM_ID_MASK__SHIFT 0xc
++#define SQ_THREAD_TRACE_MASK__SPI_STALL_EN__SHIFT 0xe
++#define SQ_THREAD_TRACE_MASK__SQ_STALL_EN__SHIFT 0xf
++#define SQ_THREAD_TRACE_MASK__CU_SEL_MASK 0x0000001FL
++#define SQ_THREAD_TRACE_MASK__SH_SEL_MASK 0x00000020L
++#define SQ_THREAD_TRACE_MASK__REG_STALL_EN_MASK 0x00000080L
++#define SQ_THREAD_TRACE_MASK__SIMD_EN_MASK 0x00000F00L
++#define SQ_THREAD_TRACE_MASK__VM_ID_MASK_MASK 0x00003000L
++#define SQ_THREAD_TRACE_MASK__SPI_STALL_EN_MASK 0x00004000L
++#define SQ_THREAD_TRACE_MASK__SQ_STALL_EN_MASK 0x00008000L
++//SQ_THREAD_TRACE_TOKEN_MASK
++#define SQ_THREAD_TRACE_TOKEN_MASK__TOKEN_MASK__SHIFT 0x0
++#define SQ_THREAD_TRACE_TOKEN_MASK__REG_MASK__SHIFT 0x10
++#define SQ_THREAD_TRACE_TOKEN_MASK__REG_DROP_ON_STALL__SHIFT 0x18
++#define SQ_THREAD_TRACE_TOKEN_MASK__TOKEN_MASK_MASK 0x0000FFFFL
++#define SQ_THREAD_TRACE_TOKEN_MASK__REG_MASK_MASK 0x00FF0000L
++#define SQ_THREAD_TRACE_TOKEN_MASK__REG_DROP_ON_STALL_MASK 0x01000000L
++//SQ_THREAD_TRACE_PERF_MASK
++#define SQ_THREAD_TRACE_PERF_MASK__SH0_MASK__SHIFT 0x0
++#define SQ_THREAD_TRACE_PERF_MASK__SH1_MASK__SHIFT 0x10
++#define SQ_THREAD_TRACE_PERF_MASK__SH0_MASK_MASK 0x0000FFFFL
++#define SQ_THREAD_TRACE_PERF_MASK__SH1_MASK_MASK 0xFFFF0000L
++//SQ_THREAD_TRACE_CTRL
++#define SQ_THREAD_TRACE_CTRL__RESET_BUFFER__SHIFT 0x1f
++#define SQ_THREAD_TRACE_CTRL__RESET_BUFFER_MASK 0x80000000L
++//SQ_THREAD_TRACE_MODE
++#define SQ_THREAD_TRACE_MODE__MASK_PS__SHIFT 0x0
++#define SQ_THREAD_TRACE_MODE__MASK_VS__SHIFT 0x3
++#define SQ_THREAD_TRACE_MODE__MASK_GS__SHIFT 0x6
++#define SQ_THREAD_TRACE_MODE__MASK_ES__SHIFT 0x9
++#define SQ_THREAD_TRACE_MODE__MASK_HS__SHIFT 0xc
++#define SQ_THREAD_TRACE_MODE__MASK_LS__SHIFT 0xf
++#define SQ_THREAD_TRACE_MODE__MASK_CS__SHIFT 0x12
++#define SQ_THREAD_TRACE_MODE__MODE__SHIFT 0x15
++#define SQ_THREAD_TRACE_MODE__CAPTURE_MODE__SHIFT 0x17
++#define SQ_THREAD_TRACE_MODE__AUTOFLUSH_EN__SHIFT 0x19
++#define SQ_THREAD_TRACE_MODE__TC_PERF_EN__SHIFT 0x1a
++#define SQ_THREAD_TRACE_MODE__ISSUE_MASK__SHIFT 0x1b
++#define SQ_THREAD_TRACE_MODE__TEST_MODE__SHIFT 0x1d
++#define SQ_THREAD_TRACE_MODE__INTERRUPT_EN__SHIFT 0x1e
++#define SQ_THREAD_TRACE_MODE__WRAP__SHIFT 0x1f
++#define SQ_THREAD_TRACE_MODE__MASK_PS_MASK 0x00000007L
++#define SQ_THREAD_TRACE_MODE__MASK_VS_MASK 0x00000038L
++#define SQ_THREAD_TRACE_MODE__MASK_GS_MASK 0x000001C0L
++#define SQ_THREAD_TRACE_MODE__MASK_ES_MASK 0x00000E00L
++#define SQ_THREAD_TRACE_MODE__MASK_HS_MASK 0x00007000L
++#define SQ_THREAD_TRACE_MODE__MASK_LS_MASK 0x00038000L
++#define SQ_THREAD_TRACE_MODE__MASK_CS_MASK 0x001C0000L
++#define SQ_THREAD_TRACE_MODE__MODE_MASK 0x00600000L
++#define SQ_THREAD_TRACE_MODE__CAPTURE_MODE_MASK 0x01800000L
++#define SQ_THREAD_TRACE_MODE__AUTOFLUSH_EN_MASK 0x02000000L
++#define SQ_THREAD_TRACE_MODE__TC_PERF_EN_MASK 0x04000000L
++#define SQ_THREAD_TRACE_MODE__ISSUE_MASK_MASK 0x18000000L
++#define SQ_THREAD_TRACE_MODE__TEST_MODE_MASK 0x20000000L
++#define SQ_THREAD_TRACE_MODE__INTERRUPT_EN_MASK 0x40000000L
++#define SQ_THREAD_TRACE_MODE__WRAP_MASK 0x80000000L
++//SQ_THREAD_TRACE_BASE2
++#define SQ_THREAD_TRACE_BASE2__ADDR_HI__SHIFT 0x0
++#define SQ_THREAD_TRACE_BASE2__ADDR_HI_MASK 0x0000000FL
++//SQ_THREAD_TRACE_TOKEN_MASK2
++#define SQ_THREAD_TRACE_TOKEN_MASK2__INST_MASK__SHIFT 0x0
++#define SQ_THREAD_TRACE_TOKEN_MASK2__INST_MASK_MASK 0xFFFFFFFFL
++//SQ_THREAD_TRACE_WPTR
++#define SQ_THREAD_TRACE_WPTR__WPTR__SHIFT 0x0
++#define SQ_THREAD_TRACE_WPTR__READ_OFFSET__SHIFT 0x1e
++#define SQ_THREAD_TRACE_WPTR__WPTR_MASK 0x3FFFFFFFL
++#define SQ_THREAD_TRACE_WPTR__READ_OFFSET_MASK 0xC0000000L
++//SQ_THREAD_TRACE_STATUS
++#define SQ_THREAD_TRACE_STATUS__FINISH_PENDING__SHIFT 0x0
++#define SQ_THREAD_TRACE_STATUS__FINISH_DONE__SHIFT 0x10
++#define SQ_THREAD_TRACE_STATUS__UTC_ERROR__SHIFT 0x1c
++#define SQ_THREAD_TRACE_STATUS__NEW_BUF__SHIFT 0x1d
++#define SQ_THREAD_TRACE_STATUS__BUSY__SHIFT 0x1e
++#define SQ_THREAD_TRACE_STATUS__FULL__SHIFT 0x1f
++#define SQ_THREAD_TRACE_STATUS__FINISH_PENDING_MASK 0x000003FFL
++#define SQ_THREAD_TRACE_STATUS__FINISH_DONE_MASK 0x03FF0000L
++#define SQ_THREAD_TRACE_STATUS__UTC_ERROR_MASK 0x10000000L
++#define SQ_THREAD_TRACE_STATUS__NEW_BUF_MASK 0x20000000L
++#define SQ_THREAD_TRACE_STATUS__BUSY_MASK 0x40000000L
++#define SQ_THREAD_TRACE_STATUS__FULL_MASK 0x80000000L
++//SQ_THREAD_TRACE_HIWATER
++#define SQ_THREAD_TRACE_HIWATER__HIWATER__SHIFT 0x0
++#define SQ_THREAD_TRACE_HIWATER__HIWATER_MASK 0x00000007L
++//SQ_THREAD_TRACE_CNTR
++#define SQ_THREAD_TRACE_CNTR__CNTR__SHIFT 0x0
++#define SQ_THREAD_TRACE_CNTR__CNTR_MASK 0xFFFFFFFFL
++//SQ_THREAD_TRACE_USERDATA_0
++#define SQ_THREAD_TRACE_USERDATA_0__DATA__SHIFT 0x0
++#define SQ_THREAD_TRACE_USERDATA_0__DATA_MASK 0xFFFFFFFFL
++//SQ_THREAD_TRACE_USERDATA_1
++#define SQ_THREAD_TRACE_USERDATA_1__DATA__SHIFT 0x0
++#define SQ_THREAD_TRACE_USERDATA_1__DATA_MASK 0xFFFFFFFFL
++//SQ_THREAD_TRACE_USERDATA_2
++#define SQ_THREAD_TRACE_USERDATA_2__DATA__SHIFT 0x0
++#define SQ_THREAD_TRACE_USERDATA_2__DATA_MASK 0xFFFFFFFFL
++//SQ_THREAD_TRACE_USERDATA_3
++#define SQ_THREAD_TRACE_USERDATA_3__DATA__SHIFT 0x0
++#define SQ_THREAD_TRACE_USERDATA_3__DATA_MASK 0xFFFFFFFFL
++//SQC_CACHES
++#define SQC_CACHES__TARGET_INST__SHIFT 0x0
++#define SQC_CACHES__TARGET_DATA__SHIFT 0x1
++#define SQC_CACHES__INVALIDATE__SHIFT 0x2
++#define SQC_CACHES__WRITEBACK__SHIFT 0x3
++#define SQC_CACHES__VOL__SHIFT 0x4
++#define SQC_CACHES__COMPLETE__SHIFT 0x10
++#define SQC_CACHES__TARGET_INST_MASK 0x00000001L
++#define SQC_CACHES__TARGET_DATA_MASK 0x00000002L
++#define SQC_CACHES__INVALIDATE_MASK 0x00000004L
++#define SQC_CACHES__WRITEBACK_MASK 0x00000008L
++#define SQC_CACHES__VOL_MASK 0x00000010L
++#define SQC_CACHES__COMPLETE_MASK 0x00010000L
++//SQC_WRITEBACK
++#define SQC_WRITEBACK__DWB__SHIFT 0x0
++#define SQC_WRITEBACK__DIRTY__SHIFT 0x1
++#define SQC_WRITEBACK__DWB_MASK 0x00000001L
++#define SQC_WRITEBACK__DIRTY_MASK 0x00000002L
++//TA_CS_BC_BASE_ADDR
++#define TA_CS_BC_BASE_ADDR__ADDRESS__SHIFT 0x0
++#define TA_CS_BC_BASE_ADDR__ADDRESS_MASK 0xFFFFFFFFL
++//TA_CS_BC_BASE_ADDR_HI
++#define TA_CS_BC_BASE_ADDR_HI__ADDRESS__SHIFT 0x0
++#define TA_CS_BC_BASE_ADDR_HI__ADDRESS_MASK 0x000000FFL
++//DB_OCCLUSION_COUNT0_LOW
++#define DB_OCCLUSION_COUNT0_LOW__COUNT_LOW__SHIFT 0x0
++#define DB_OCCLUSION_COUNT0_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
++//DB_OCCLUSION_COUNT0_HI
++#define DB_OCCLUSION_COUNT0_HI__COUNT_HI__SHIFT 0x0
++#define DB_OCCLUSION_COUNT0_HI__COUNT_HI_MASK 0x7FFFFFFFL
++//DB_OCCLUSION_COUNT1_LOW
++#define DB_OCCLUSION_COUNT1_LOW__COUNT_LOW__SHIFT 0x0
++#define DB_OCCLUSION_COUNT1_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
++//DB_OCCLUSION_COUNT1_HI
++#define DB_OCCLUSION_COUNT1_HI__COUNT_HI__SHIFT 0x0
++#define DB_OCCLUSION_COUNT1_HI__COUNT_HI_MASK 0x7FFFFFFFL
++//DB_OCCLUSION_COUNT2_LOW
++#define DB_OCCLUSION_COUNT2_LOW__COUNT_LOW__SHIFT 0x0
++#define DB_OCCLUSION_COUNT2_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
++//DB_OCCLUSION_COUNT2_HI
++#define DB_OCCLUSION_COUNT2_HI__COUNT_HI__SHIFT 0x0
++#define DB_OCCLUSION_COUNT2_HI__COUNT_HI_MASK 0x7FFFFFFFL
++//DB_OCCLUSION_COUNT3_LOW
++#define DB_OCCLUSION_COUNT3_LOW__COUNT_LOW__SHIFT 0x0
++#define DB_OCCLUSION_COUNT3_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
++//DB_OCCLUSION_COUNT3_HI
++#define DB_OCCLUSION_COUNT3_HI__COUNT_HI__SHIFT 0x0
++#define DB_OCCLUSION_COUNT3_HI__COUNT_HI_MASK 0x7FFFFFFFL
++//DB_ZPASS_COUNT_LOW
++#define DB_ZPASS_COUNT_LOW__COUNT_LOW__SHIFT 0x0
++#define DB_ZPASS_COUNT_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
++//DB_ZPASS_COUNT_HI
++#define DB_ZPASS_COUNT_HI__COUNT_HI__SHIFT 0x0
++#define DB_ZPASS_COUNT_HI__COUNT_HI_MASK 0x7FFFFFFFL
++//GDS_RD_ADDR
++#define GDS_RD_ADDR__READ_ADDR__SHIFT 0x0
++#define GDS_RD_ADDR__READ_ADDR_MASK 0xFFFFFFFFL
++//GDS_RD_DATA
++#define GDS_RD_DATA__READ_DATA__SHIFT 0x0
++#define GDS_RD_DATA__READ_DATA_MASK 0xFFFFFFFFL
++//GDS_RD_BURST_ADDR
++#define GDS_RD_BURST_ADDR__BURST_ADDR__SHIFT 0x0
++#define GDS_RD_BURST_ADDR__BURST_ADDR_MASK 0xFFFFFFFFL
++//GDS_RD_BURST_COUNT
++#define GDS_RD_BURST_COUNT__BURST_COUNT__SHIFT 0x0
++#define GDS_RD_BURST_COUNT__BURST_COUNT_MASK 0xFFFFFFFFL
++//GDS_RD_BURST_DATA
++#define GDS_RD_BURST_DATA__BURST_DATA__SHIFT 0x0
++#define GDS_RD_BURST_DATA__BURST_DATA_MASK 0xFFFFFFFFL
++//GDS_WR_ADDR
++#define GDS_WR_ADDR__WRITE_ADDR__SHIFT 0x0
++#define GDS_WR_ADDR__WRITE_ADDR_MASK 0xFFFFFFFFL
++//GDS_WR_DATA
++#define GDS_WR_DATA__WRITE_DATA__SHIFT 0x0
++#define GDS_WR_DATA__WRITE_DATA_MASK 0xFFFFFFFFL
++//GDS_WR_BURST_ADDR
++#define GDS_WR_BURST_ADDR__WRITE_ADDR__SHIFT 0x0
++#define GDS_WR_BURST_ADDR__WRITE_ADDR_MASK 0xFFFFFFFFL
++//GDS_WR_BURST_DATA
++#define GDS_WR_BURST_DATA__WRITE_DATA__SHIFT 0x0
++#define GDS_WR_BURST_DATA__WRITE_DATA_MASK 0xFFFFFFFFL
++//GDS_WRITE_COMPLETE
++#define GDS_WRITE_COMPLETE__WRITE_COMPLETE__SHIFT 0x0
++#define GDS_WRITE_COMPLETE__WRITE_COMPLETE_MASK 0xFFFFFFFFL
++//GDS_ATOM_CNTL
++#define GDS_ATOM_CNTL__AINC__SHIFT 0x0
++#define GDS_ATOM_CNTL__UNUSED1__SHIFT 0x6
++#define GDS_ATOM_CNTL__DMODE__SHIFT 0x8
++#define GDS_ATOM_CNTL__UNUSED2__SHIFT 0xa
++#define GDS_ATOM_CNTL__AINC_MASK 0x0000003FL
++#define GDS_ATOM_CNTL__UNUSED1_MASK 0x000000C0L
++#define GDS_ATOM_CNTL__DMODE_MASK 0x00000300L
++#define GDS_ATOM_CNTL__UNUSED2_MASK 0xFFFFFC00L
++//GDS_ATOM_COMPLETE
++#define GDS_ATOM_COMPLETE__COMPLETE__SHIFT 0x0
++#define GDS_ATOM_COMPLETE__UNUSED__SHIFT 0x1
++#define GDS_ATOM_COMPLETE__COMPLETE_MASK 0x00000001L
++#define GDS_ATOM_COMPLETE__UNUSED_MASK 0xFFFFFFFEL
++//GDS_ATOM_BASE
++#define GDS_ATOM_BASE__BASE__SHIFT 0x0
++#define GDS_ATOM_BASE__UNUSED__SHIFT 0x10
++#define GDS_ATOM_BASE__BASE_MASK 0x0000FFFFL
++#define GDS_ATOM_BASE__UNUSED_MASK 0xFFFF0000L
++//GDS_ATOM_SIZE
++#define GDS_ATOM_SIZE__SIZE__SHIFT 0x0
++#define GDS_ATOM_SIZE__UNUSED__SHIFT 0x10
++#define GDS_ATOM_SIZE__SIZE_MASK 0x0000FFFFL
++#define GDS_ATOM_SIZE__UNUSED_MASK 0xFFFF0000L
++//GDS_ATOM_OFFSET0
++#define GDS_ATOM_OFFSET0__OFFSET0__SHIFT 0x0
++#define GDS_ATOM_OFFSET0__UNUSED__SHIFT 0x8
++#define GDS_ATOM_OFFSET0__OFFSET0_MASK 0x000000FFL
++#define GDS_ATOM_OFFSET0__UNUSED_MASK 0xFFFFFF00L
++//GDS_ATOM_OFFSET1
++#define GDS_ATOM_OFFSET1__OFFSET1__SHIFT 0x0
++#define GDS_ATOM_OFFSET1__UNUSED__SHIFT 0x8
++#define GDS_ATOM_OFFSET1__OFFSET1_MASK 0x000000FFL
++#define GDS_ATOM_OFFSET1__UNUSED_MASK 0xFFFFFF00L
++//GDS_ATOM_DST
++#define GDS_ATOM_DST__DST__SHIFT 0x0
++#define GDS_ATOM_DST__DST_MASK 0xFFFFFFFFL
++//GDS_ATOM_OP
++#define GDS_ATOM_OP__OP__SHIFT 0x0
++#define GDS_ATOM_OP__UNUSED__SHIFT 0x8
++#define GDS_ATOM_OP__OP_MASK 0x000000FFL
++#define GDS_ATOM_OP__UNUSED_MASK 0xFFFFFF00L
++//GDS_ATOM_SRC0
++#define GDS_ATOM_SRC0__DATA__SHIFT 0x0
++#define GDS_ATOM_SRC0__DATA_MASK 0xFFFFFFFFL
++//GDS_ATOM_SRC0_U
++#define GDS_ATOM_SRC0_U__DATA__SHIFT 0x0
++#define GDS_ATOM_SRC0_U__DATA_MASK 0xFFFFFFFFL
++//GDS_ATOM_SRC1
++#define GDS_ATOM_SRC1__DATA__SHIFT 0x0
++#define GDS_ATOM_SRC1__DATA_MASK 0xFFFFFFFFL
++//GDS_ATOM_SRC1_U
++#define GDS_ATOM_SRC1_U__DATA__SHIFT 0x0
++#define GDS_ATOM_SRC1_U__DATA_MASK 0xFFFFFFFFL
++//GDS_ATOM_READ0
++#define GDS_ATOM_READ0__DATA__SHIFT 0x0
++#define GDS_ATOM_READ0__DATA_MASK 0xFFFFFFFFL
++//GDS_ATOM_READ0_U
++#define GDS_ATOM_READ0_U__DATA__SHIFT 0x0
++#define GDS_ATOM_READ0_U__DATA_MASK 0xFFFFFFFFL
++//GDS_ATOM_READ1
++#define GDS_ATOM_READ1__DATA__SHIFT 0x0
++#define GDS_ATOM_READ1__DATA_MASK 0xFFFFFFFFL
++//GDS_ATOM_READ1_U
++#define GDS_ATOM_READ1_U__DATA__SHIFT 0x0
++#define GDS_ATOM_READ1_U__DATA_MASK 0xFFFFFFFFL
++//GDS_GWS_RESOURCE_CNTL
++#define GDS_GWS_RESOURCE_CNTL__INDEX__SHIFT 0x0
++#define GDS_GWS_RESOURCE_CNTL__UNUSED__SHIFT 0x6
++#define GDS_GWS_RESOURCE_CNTL__INDEX_MASK 0x0000003FL
++#define GDS_GWS_RESOURCE_CNTL__UNUSED_MASK 0xFFFFFFC0L
++//GDS_GWS_RESOURCE
++#define GDS_GWS_RESOURCE__FLAG__SHIFT 0x0
++#define GDS_GWS_RESOURCE__COUNTER__SHIFT 0x1
++#define GDS_GWS_RESOURCE__TYPE__SHIFT 0xd
++#define GDS_GWS_RESOURCE__DED__SHIFT 0xe
++#define GDS_GWS_RESOURCE__RELEASE_ALL__SHIFT 0xf
++#define GDS_GWS_RESOURCE__HEAD_QUEUE__SHIFT 0x10
++#define GDS_GWS_RESOURCE__HEAD_VALID__SHIFT 0x1c
++#define GDS_GWS_RESOURCE__HEAD_FLAG__SHIFT 0x1d
++#define GDS_GWS_RESOURCE__HALTED__SHIFT 0x1e
++#define GDS_GWS_RESOURCE__UNUSED1__SHIFT 0x1f
++#define GDS_GWS_RESOURCE__FLAG_MASK 0x00000001L
++#define GDS_GWS_RESOURCE__COUNTER_MASK 0x00001FFEL
++#define GDS_GWS_RESOURCE__TYPE_MASK 0x00002000L
++#define GDS_GWS_RESOURCE__DED_MASK 0x00004000L
++#define GDS_GWS_RESOURCE__RELEASE_ALL_MASK 0x00008000L
++#define GDS_GWS_RESOURCE__HEAD_QUEUE_MASK 0x0FFF0000L
++#define GDS_GWS_RESOURCE__HEAD_VALID_MASK 0x10000000L
++#define GDS_GWS_RESOURCE__HEAD_FLAG_MASK 0x20000000L
++#define GDS_GWS_RESOURCE__HALTED_MASK 0x40000000L
++#define GDS_GWS_RESOURCE__UNUSED1_MASK 0x80000000L
++//GDS_GWS_RESOURCE_CNT
++#define GDS_GWS_RESOURCE_CNT__RESOURCE_CNT__SHIFT 0x0
++#define GDS_GWS_RESOURCE_CNT__UNUSED__SHIFT 0x10
++#define GDS_GWS_RESOURCE_CNT__RESOURCE_CNT_MASK 0x0000FFFFL
++#define GDS_GWS_RESOURCE_CNT__UNUSED_MASK 0xFFFF0000L
++//GDS_OA_CNTL
++#define GDS_OA_CNTL__INDEX__SHIFT 0x0
++#define GDS_OA_CNTL__UNUSED__SHIFT 0x4
++#define GDS_OA_CNTL__INDEX_MASK 0x0000000FL
++#define GDS_OA_CNTL__UNUSED_MASK 0xFFFFFFF0L
++//GDS_OA_COUNTER
++#define GDS_OA_COUNTER__SPACE_AVAILABLE__SHIFT 0x0
++#define GDS_OA_COUNTER__SPACE_AVAILABLE_MASK 0xFFFFFFFFL
++//GDS_OA_ADDRESS
++#define GDS_OA_ADDRESS__DS_ADDRESS__SHIFT 0x0
++#define GDS_OA_ADDRESS__CRAWLER__SHIFT 0x10
++#define GDS_OA_ADDRESS__CRAWLER_TYPE__SHIFT 0x14
++#define GDS_OA_ADDRESS__UNUSED__SHIFT 0x16
++#define GDS_OA_ADDRESS__NO_ALLOC__SHIFT 0x1e
++#define GDS_OA_ADDRESS__ENABLE__SHIFT 0x1f
++#define GDS_OA_ADDRESS__DS_ADDRESS_MASK 0x0000FFFFL
++#define GDS_OA_ADDRESS__CRAWLER_MASK 0x000F0000L
++#define GDS_OA_ADDRESS__CRAWLER_TYPE_MASK 0x00300000L
++#define GDS_OA_ADDRESS__UNUSED_MASK 0x3FC00000L
++#define GDS_OA_ADDRESS__NO_ALLOC_MASK 0x40000000L
++#define GDS_OA_ADDRESS__ENABLE_MASK 0x80000000L
++//GDS_OA_INCDEC
++#define GDS_OA_INCDEC__VALUE__SHIFT 0x0
++#define GDS_OA_INCDEC__INCDEC__SHIFT 0x1f
++#define GDS_OA_INCDEC__VALUE_MASK 0x7FFFFFFFL
++#define GDS_OA_INCDEC__INCDEC_MASK 0x80000000L
++//GDS_OA_RING_SIZE
++#define GDS_OA_RING_SIZE__RING_SIZE__SHIFT 0x0
++#define GDS_OA_RING_SIZE__RING_SIZE_MASK 0xFFFFFFFFL
++//SPI_CONFIG_CNTL
++#define SPI_CONFIG_CNTL__GPR_WRITE_PRIORITY__SHIFT 0x0
++#define SPI_CONFIG_CNTL__EXP_PRIORITY_ORDER__SHIFT 0x15
++#define SPI_CONFIG_CNTL__ENABLE_SQG_TOP_EVENTS__SHIFT 0x18
++#define SPI_CONFIG_CNTL__ENABLE_SQG_BOP_EVENTS__SHIFT 0x19
++#define SPI_CONFIG_CNTL__RSRC_MGMT_RESET__SHIFT 0x1a
++#define SPI_CONFIG_CNTL__TTRACE_STALL_ALL__SHIFT 0x1b
++#define SPI_CONFIG_CNTL__ALLOC_ARB_LRU_ENA__SHIFT 0x1c
++#define SPI_CONFIG_CNTL__EXP_ARB_LRU_ENA__SHIFT 0x1d
++#define SPI_CONFIG_CNTL__PS_PKR_PRIORITY_CNTL__SHIFT 0x1e
++#define SPI_CONFIG_CNTL__GPR_WRITE_PRIORITY_MASK 0x001FFFFFL
++#define SPI_CONFIG_CNTL__EXP_PRIORITY_ORDER_MASK 0x00E00000L
++#define SPI_CONFIG_CNTL__ENABLE_SQG_TOP_EVENTS_MASK 0x01000000L
++#define SPI_CONFIG_CNTL__ENABLE_SQG_BOP_EVENTS_MASK 0x02000000L
++#define SPI_CONFIG_CNTL__RSRC_MGMT_RESET_MASK 0x04000000L
++#define SPI_CONFIG_CNTL__TTRACE_STALL_ALL_MASK 0x08000000L
++#define SPI_CONFIG_CNTL__ALLOC_ARB_LRU_ENA_MASK 0x10000000L
++#define SPI_CONFIG_CNTL__EXP_ARB_LRU_ENA_MASK 0x20000000L
++#define SPI_CONFIG_CNTL__PS_PKR_PRIORITY_CNTL_MASK 0xC0000000L
++//SPI_CONFIG_CNTL_1
++#define SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT 0x0
++#define SPI_CONFIG_CNTL_1__INTERP_ONE_PRIM_PER_ROW__SHIFT 0x4
++#define SPI_CONFIG_CNTL_1__BATON_RESET_DISABLE__SHIFT 0x5
++#define SPI_CONFIG_CNTL_1__PC_LIMIT_ENABLE__SHIFT 0x6
++#define SPI_CONFIG_CNTL_1__PC_LIMIT_STRICT__SHIFT 0x7
++#define SPI_CONFIG_CNTL_1__CRC_SIMD_ID_WADDR_DISABLE__SHIFT 0x8
++#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_MODE__SHIFT 0x9
++#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_CNT__SHIFT 0xa
++#define SPI_CONFIG_CNTL_1__CSC_PWR_SAVE_DISABLE__SHIFT 0xe
++#define SPI_CONFIG_CNTL_1__CSG_PWR_SAVE_DISABLE__SHIFT 0xf
++#define SPI_CONFIG_CNTL_1__PC_LIMIT_SIZE__SHIFT 0x10
++#define SPI_CONFIG_CNTL_1__VTX_DONE_DELAY_MASK 0x0000000FL
++#define SPI_CONFIG_CNTL_1__INTERP_ONE_PRIM_PER_ROW_MASK 0x00000010L
++#define SPI_CONFIG_CNTL_1__BATON_RESET_DISABLE_MASK 0x00000020L
++#define SPI_CONFIG_CNTL_1__PC_LIMIT_ENABLE_MASK 0x00000040L
++#define SPI_CONFIG_CNTL_1__PC_LIMIT_STRICT_MASK 0x00000080L
++#define SPI_CONFIG_CNTL_1__CRC_SIMD_ID_WADDR_DISABLE_MASK 0x00000100L
++#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_MODE_MASK 0x00000200L
++#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_CNT_MASK 0x00003C00L
++#define SPI_CONFIG_CNTL_1__CSC_PWR_SAVE_DISABLE_MASK 0x00004000L
++#define SPI_CONFIG_CNTL_1__CSG_PWR_SAVE_DISABLE_MASK 0x00008000L
++#define SPI_CONFIG_CNTL_1__PC_LIMIT_SIZE_MASK 0xFFFF0000L
++//SPI_CONFIG_CNTL_2
++#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_REQUEST_CYCLE_OVHD__SHIFT 0x0
++#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_GRANT_CYCLE_OVHD__SHIFT 0x4
++#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_REQUEST_CYCLE_OVHD_MASK 0x0000000FL
++#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_GRANT_CYCLE_OVHD_MASK 0x000000F0L
++
++
++// addressBlock: gc_perfddec
++//CPG_PERFCOUNTER1_LO
++#define CPG_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define CPG_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//CPG_PERFCOUNTER1_HI
++#define CPG_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define CPG_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//CPG_PERFCOUNTER0_LO
++#define CPG_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define CPG_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//CPG_PERFCOUNTER0_HI
++#define CPG_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define CPG_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//CPC_PERFCOUNTER1_LO
++#define CPC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define CPC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//CPC_PERFCOUNTER1_HI
++#define CPC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define CPC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//CPC_PERFCOUNTER0_LO
++#define CPC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define CPC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//CPC_PERFCOUNTER0_HI
++#define CPC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define CPC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//CPF_PERFCOUNTER1_LO
++#define CPF_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define CPF_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//CPF_PERFCOUNTER1_HI
++#define CPF_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define CPF_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//CPF_PERFCOUNTER0_LO
++#define CPF_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define CPF_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//CPF_PERFCOUNTER0_HI
++#define CPF_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define CPF_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//CPF_LATENCY_STATS_DATA
++#define CPF_LATENCY_STATS_DATA__DATA__SHIFT 0x0
++#define CPF_LATENCY_STATS_DATA__DATA_MASK 0xFFFFFFFFL
++//CPG_LATENCY_STATS_DATA
++#define CPG_LATENCY_STATS_DATA__DATA__SHIFT 0x0
++#define CPG_LATENCY_STATS_DATA__DATA_MASK 0xFFFFFFFFL
++//CPC_LATENCY_STATS_DATA
++#define CPC_LATENCY_STATS_DATA__DATA__SHIFT 0x0
++#define CPC_LATENCY_STATS_DATA__DATA_MASK 0xFFFFFFFFL
++//GRBM_PERFCOUNTER0_LO
++#define GRBM_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define GRBM_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//GRBM_PERFCOUNTER0_HI
++#define GRBM_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define GRBM_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//GRBM_PERFCOUNTER1_LO
++#define GRBM_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define GRBM_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//GRBM_PERFCOUNTER1_HI
++#define GRBM_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define GRBM_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//GRBM_SE0_PERFCOUNTER_LO
++#define GRBM_SE0_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define GRBM_SE0_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//GRBM_SE0_PERFCOUNTER_HI
++#define GRBM_SE0_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define GRBM_SE0_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//GRBM_SE1_PERFCOUNTER_LO
++#define GRBM_SE1_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define GRBM_SE1_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//GRBM_SE1_PERFCOUNTER_HI
++#define GRBM_SE1_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define GRBM_SE1_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//GRBM_SE2_PERFCOUNTER_LO
++#define GRBM_SE2_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define GRBM_SE2_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//GRBM_SE2_PERFCOUNTER_HI
++#define GRBM_SE2_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define GRBM_SE2_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//GRBM_SE3_PERFCOUNTER_LO
++#define GRBM_SE3_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define GRBM_SE3_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//GRBM_SE3_PERFCOUNTER_HI
++#define GRBM_SE3_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define GRBM_SE3_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//WD_PERFCOUNTER0_LO
++#define WD_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define WD_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//WD_PERFCOUNTER0_HI
++#define WD_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define WD_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//WD_PERFCOUNTER1_LO
++#define WD_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define WD_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//WD_PERFCOUNTER1_HI
++#define WD_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define WD_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//WD_PERFCOUNTER2_LO
++#define WD_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define WD_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//WD_PERFCOUNTER2_HI
++#define WD_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define WD_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//WD_PERFCOUNTER3_LO
++#define WD_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define WD_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//WD_PERFCOUNTER3_HI
++#define WD_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define WD_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//IA_PERFCOUNTER0_LO
++#define IA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define IA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//IA_PERFCOUNTER0_HI
++#define IA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define IA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//IA_PERFCOUNTER1_LO
++#define IA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define IA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//IA_PERFCOUNTER1_HI
++#define IA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define IA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//IA_PERFCOUNTER2_LO
++#define IA_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define IA_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//IA_PERFCOUNTER2_HI
++#define IA_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define IA_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//IA_PERFCOUNTER3_LO
++#define IA_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define IA_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//IA_PERFCOUNTER3_HI
++#define IA_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define IA_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//VGT_PERFCOUNTER0_LO
++#define VGT_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define VGT_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//VGT_PERFCOUNTER0_HI
++#define VGT_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define VGT_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//VGT_PERFCOUNTER1_LO
++#define VGT_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define VGT_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//VGT_PERFCOUNTER1_HI
++#define VGT_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define VGT_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//VGT_PERFCOUNTER2_LO
++#define VGT_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define VGT_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//VGT_PERFCOUNTER2_HI
++#define VGT_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define VGT_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//VGT_PERFCOUNTER3_LO
++#define VGT_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define VGT_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//VGT_PERFCOUNTER3_HI
++#define VGT_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define VGT_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//PA_SU_PERFCOUNTER0_LO
++#define PA_SU_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define PA_SU_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//PA_SU_PERFCOUNTER0_HI
++#define PA_SU_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define PA_SU_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0x0000FFFFL
++//PA_SU_PERFCOUNTER1_LO
++#define PA_SU_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define PA_SU_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//PA_SU_PERFCOUNTER1_HI
++#define PA_SU_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define PA_SU_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0x0000FFFFL
++//PA_SU_PERFCOUNTER2_LO
++#define PA_SU_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define PA_SU_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//PA_SU_PERFCOUNTER2_HI
++#define PA_SU_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define PA_SU_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0x0000FFFFL
++//PA_SU_PERFCOUNTER3_LO
++#define PA_SU_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define PA_SU_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//PA_SU_PERFCOUNTER3_HI
++#define PA_SU_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define PA_SU_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0x0000FFFFL
++//PA_SC_PERFCOUNTER0_LO
++#define PA_SC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define PA_SC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//PA_SC_PERFCOUNTER0_HI
++#define PA_SC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define PA_SC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//PA_SC_PERFCOUNTER1_LO
++#define PA_SC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define PA_SC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//PA_SC_PERFCOUNTER1_HI
++#define PA_SC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define PA_SC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//PA_SC_PERFCOUNTER2_LO
++#define PA_SC_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define PA_SC_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//PA_SC_PERFCOUNTER2_HI
++#define PA_SC_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define PA_SC_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//PA_SC_PERFCOUNTER3_LO
++#define PA_SC_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define PA_SC_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//PA_SC_PERFCOUNTER3_HI
++#define PA_SC_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define PA_SC_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//PA_SC_PERFCOUNTER4_LO
++#define PA_SC_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define PA_SC_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//PA_SC_PERFCOUNTER4_HI
++#define PA_SC_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define PA_SC_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//PA_SC_PERFCOUNTER5_LO
++#define PA_SC_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define PA_SC_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//PA_SC_PERFCOUNTER5_HI
++#define PA_SC_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define PA_SC_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//PA_SC_PERFCOUNTER6_LO
++#define PA_SC_PERFCOUNTER6_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define PA_SC_PERFCOUNTER6_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//PA_SC_PERFCOUNTER6_HI
++#define PA_SC_PERFCOUNTER6_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define PA_SC_PERFCOUNTER6_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//PA_SC_PERFCOUNTER7_LO
++#define PA_SC_PERFCOUNTER7_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define PA_SC_PERFCOUNTER7_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//PA_SC_PERFCOUNTER7_HI
++#define PA_SC_PERFCOUNTER7_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define PA_SC_PERFCOUNTER7_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//SPI_PERFCOUNTER0_HI
++#define SPI_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define SPI_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//SPI_PERFCOUNTER0_LO
++#define SPI_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define SPI_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//SPI_PERFCOUNTER1_HI
++#define SPI_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define SPI_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//SPI_PERFCOUNTER1_LO
++#define SPI_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define SPI_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//SPI_PERFCOUNTER2_HI
++#define SPI_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define SPI_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//SPI_PERFCOUNTER2_LO
++#define SPI_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define SPI_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//SPI_PERFCOUNTER3_HI
++#define SPI_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define SPI_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//SPI_PERFCOUNTER3_LO
++#define SPI_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define SPI_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//SPI_PERFCOUNTER4_HI
++#define SPI_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define SPI_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//SPI_PERFCOUNTER4_LO
++#define SPI_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define SPI_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//SPI_PERFCOUNTER5_HI
++#define SPI_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define SPI_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//SPI_PERFCOUNTER5_LO
++#define SPI_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define SPI_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER0_LO
++#define SQ_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define SQ_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER0_HI
++#define SQ_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define SQ_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER1_LO
++#define SQ_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define SQ_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER1_HI
++#define SQ_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define SQ_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER2_LO
++#define SQ_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define SQ_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER2_HI
++#define SQ_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define SQ_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER3_LO
++#define SQ_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define SQ_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER3_HI
++#define SQ_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define SQ_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER4_LO
++#define SQ_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define SQ_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER4_HI
++#define SQ_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define SQ_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER5_LO
++#define SQ_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define SQ_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER5_HI
++#define SQ_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define SQ_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER6_LO
++#define SQ_PERFCOUNTER6_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define SQ_PERFCOUNTER6_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER6_HI
++#define SQ_PERFCOUNTER6_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define SQ_PERFCOUNTER6_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER7_LO
++#define SQ_PERFCOUNTER7_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define SQ_PERFCOUNTER7_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER7_HI
++#define SQ_PERFCOUNTER7_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define SQ_PERFCOUNTER7_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER8_LO
++#define SQ_PERFCOUNTER8_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define SQ_PERFCOUNTER8_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER8_HI
++#define SQ_PERFCOUNTER8_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define SQ_PERFCOUNTER8_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER9_LO
++#define SQ_PERFCOUNTER9_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define SQ_PERFCOUNTER9_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER9_HI
++#define SQ_PERFCOUNTER9_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define SQ_PERFCOUNTER9_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER10_LO
++#define SQ_PERFCOUNTER10_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define SQ_PERFCOUNTER10_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER10_HI
++#define SQ_PERFCOUNTER10_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define SQ_PERFCOUNTER10_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER11_LO
++#define SQ_PERFCOUNTER11_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define SQ_PERFCOUNTER11_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER11_HI
++#define SQ_PERFCOUNTER11_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define SQ_PERFCOUNTER11_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER12_LO
++#define SQ_PERFCOUNTER12_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define SQ_PERFCOUNTER12_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER12_HI
++#define SQ_PERFCOUNTER12_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define SQ_PERFCOUNTER12_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER13_LO
++#define SQ_PERFCOUNTER13_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define SQ_PERFCOUNTER13_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER13_HI
++#define SQ_PERFCOUNTER13_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define SQ_PERFCOUNTER13_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER14_LO
++#define SQ_PERFCOUNTER14_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define SQ_PERFCOUNTER14_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER14_HI
++#define SQ_PERFCOUNTER14_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define SQ_PERFCOUNTER14_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER15_LO
++#define SQ_PERFCOUNTER15_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define SQ_PERFCOUNTER15_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//SQ_PERFCOUNTER15_HI
++#define SQ_PERFCOUNTER15_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define SQ_PERFCOUNTER15_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//SX_PERFCOUNTER0_LO
++#define SX_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define SX_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//SX_PERFCOUNTER0_HI
++#define SX_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define SX_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//SX_PERFCOUNTER1_LO
++#define SX_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define SX_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//SX_PERFCOUNTER1_HI
++#define SX_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define SX_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//SX_PERFCOUNTER2_LO
++#define SX_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define SX_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//SX_PERFCOUNTER2_HI
++#define SX_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define SX_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//SX_PERFCOUNTER3_LO
++#define SX_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define SX_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//SX_PERFCOUNTER3_HI
++#define SX_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define SX_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//GDS_PERFCOUNTER0_LO
++#define GDS_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define GDS_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//GDS_PERFCOUNTER0_HI
++#define GDS_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define GDS_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//GDS_PERFCOUNTER1_LO
++#define GDS_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define GDS_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//GDS_PERFCOUNTER1_HI
++#define GDS_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define GDS_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//GDS_PERFCOUNTER2_LO
++#define GDS_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define GDS_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//GDS_PERFCOUNTER2_HI
++#define GDS_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define GDS_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//GDS_PERFCOUNTER3_LO
++#define GDS_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define GDS_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//GDS_PERFCOUNTER3_HI
++#define GDS_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define GDS_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//TA_PERFCOUNTER0_LO
++#define TA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define TA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//TA_PERFCOUNTER0_HI
++#define TA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define TA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//TA_PERFCOUNTER1_LO
++#define TA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define TA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//TA_PERFCOUNTER1_HI
++#define TA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define TA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//TD_PERFCOUNTER0_LO
++#define TD_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define TD_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//TD_PERFCOUNTER0_HI
++#define TD_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define TD_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//TD_PERFCOUNTER1_LO
++#define TD_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define TD_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//TD_PERFCOUNTER1_HI
++#define TD_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define TD_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//TCP_PERFCOUNTER0_LO
++#define TCP_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define TCP_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//TCP_PERFCOUNTER0_HI
++#define TCP_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define TCP_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//TCP_PERFCOUNTER1_LO
++#define TCP_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define TCP_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//TCP_PERFCOUNTER1_HI
++#define TCP_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define TCP_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//TCP_PERFCOUNTER2_LO
++#define TCP_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define TCP_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//TCP_PERFCOUNTER2_HI
++#define TCP_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define TCP_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//TCP_PERFCOUNTER3_LO
++#define TCP_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define TCP_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//TCP_PERFCOUNTER3_HI
++#define TCP_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define TCP_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//TCC_PERFCOUNTER0_LO
++#define TCC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define TCC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//TCC_PERFCOUNTER0_HI
++#define TCC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define TCC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//TCC_PERFCOUNTER1_LO
++#define TCC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define TCC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//TCC_PERFCOUNTER1_HI
++#define TCC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define TCC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//TCC_PERFCOUNTER2_LO
++#define TCC_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define TCC_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//TCC_PERFCOUNTER2_HI
++#define TCC_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define TCC_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//TCC_PERFCOUNTER3_LO
++#define TCC_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define TCC_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//TCC_PERFCOUNTER3_HI
++#define TCC_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define TCC_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//TCA_PERFCOUNTER0_LO
++#define TCA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define TCA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//TCA_PERFCOUNTER0_HI
++#define TCA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define TCA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//TCA_PERFCOUNTER1_LO
++#define TCA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define TCA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//TCA_PERFCOUNTER1_HI
++#define TCA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define TCA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//TCA_PERFCOUNTER2_LO
++#define TCA_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define TCA_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//TCA_PERFCOUNTER2_HI
++#define TCA_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define TCA_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//TCA_PERFCOUNTER3_LO
++#define TCA_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define TCA_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//TCA_PERFCOUNTER3_HI
++#define TCA_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define TCA_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//CB_PERFCOUNTER0_LO
++#define CB_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define CB_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//CB_PERFCOUNTER0_HI
++#define CB_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define CB_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//CB_PERFCOUNTER1_LO
++#define CB_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define CB_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//CB_PERFCOUNTER1_HI
++#define CB_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define CB_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//CB_PERFCOUNTER2_LO
++#define CB_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define CB_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//CB_PERFCOUNTER2_HI
++#define CB_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define CB_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//CB_PERFCOUNTER3_LO
++#define CB_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define CB_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//CB_PERFCOUNTER3_HI
++#define CB_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define CB_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//DB_PERFCOUNTER0_LO
++#define DB_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define DB_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//DB_PERFCOUNTER0_HI
++#define DB_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define DB_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//DB_PERFCOUNTER1_LO
++#define DB_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define DB_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//DB_PERFCOUNTER1_HI
++#define DB_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define DB_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//DB_PERFCOUNTER2_LO
++#define DB_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define DB_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//DB_PERFCOUNTER2_HI
++#define DB_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define DB_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//DB_PERFCOUNTER3_LO
++#define DB_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define DB_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//DB_PERFCOUNTER3_HI
++#define DB_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define DB_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//RLC_PERFCOUNTER0_LO
++#define RLC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define RLC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//RLC_PERFCOUNTER0_HI
++#define RLC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define RLC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//RLC_PERFCOUNTER1_LO
++#define RLC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define RLC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//RLC_PERFCOUNTER1_HI
++#define RLC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define RLC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//RMI_PERFCOUNTER0_LO
++#define RMI_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define RMI_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//RMI_PERFCOUNTER0_HI
++#define RMI_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define RMI_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//RMI_PERFCOUNTER1_LO
++#define RMI_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define RMI_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//RMI_PERFCOUNTER1_HI
++#define RMI_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define RMI_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//RMI_PERFCOUNTER2_LO
++#define RMI_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define RMI_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//RMI_PERFCOUNTER2_HI
++#define RMI_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define RMI_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++//RMI_PERFCOUNTER3_LO
++#define RMI_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
++#define RMI_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
++//RMI_PERFCOUNTER3_HI
++#define RMI_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
++#define RMI_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
++
++
++// addressBlock: gc_utcl2_atcl2pfcntrdec
++//ATC_L2_PERFCOUNTER_LO
++#define ATC_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
++#define ATC_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
++//ATC_L2_PERFCOUNTER_HI
++#define ATC_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
++#define ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
++#define ATC_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
++#define ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
++
++
++// addressBlock: gc_utcl2_vml2prdec
++//MC_VM_L2_PERFCOUNTER_LO
++#define MC_VM_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
++#define MC_VM_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
++//MC_VM_L2_PERFCOUNTER_HI
++#define MC_VM_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
++#define MC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
++#define MC_VM_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
++#define MC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
++
++
++// addressBlock: gc_perfsdec
++//CPG_PERFCOUNTER1_SELECT
++#define CPG_PERFCOUNTER1_SELECT__CNTR_SEL0__SHIFT 0x0
++#define CPG_PERFCOUNTER1_SELECT__CNTR_SEL1__SHIFT 0xa
++#define CPG_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
++#define CPG_PERFCOUNTER1_SELECT__CNTR_MODE1__SHIFT 0x18
++#define CPG_PERFCOUNTER1_SELECT__CNTR_MODE0__SHIFT 0x1c
++#define CPG_PERFCOUNTER1_SELECT__CNTR_SEL0_MASK 0x000003FFL
++#define CPG_PERFCOUNTER1_SELECT__CNTR_SEL1_MASK 0x000FFC00L
++#define CPG_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
++#define CPG_PERFCOUNTER1_SELECT__CNTR_MODE1_MASK 0x0F000000L
++#define CPG_PERFCOUNTER1_SELECT__CNTR_MODE0_MASK 0xF0000000L
++//CPG_PERFCOUNTER0_SELECT1
++#define CPG_PERFCOUNTER0_SELECT1__CNTR_SEL2__SHIFT 0x0
++#define CPG_PERFCOUNTER0_SELECT1__CNTR_SEL3__SHIFT 0xa
++#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE3__SHIFT 0x18
++#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE2__SHIFT 0x1c
++#define CPG_PERFCOUNTER0_SELECT1__CNTR_SEL2_MASK 0x000003FFL
++#define CPG_PERFCOUNTER0_SELECT1__CNTR_SEL3_MASK 0x000FFC00L
++#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE3_MASK 0x0F000000L
++#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE2_MASK 0xF0000000L
++//CPG_PERFCOUNTER0_SELECT
++#define CPG_PERFCOUNTER0_SELECT__CNTR_SEL0__SHIFT 0x0
++#define CPG_PERFCOUNTER0_SELECT__CNTR_SEL1__SHIFT 0xa
++#define CPG_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
++#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE1__SHIFT 0x18
++#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE0__SHIFT 0x1c
++#define CPG_PERFCOUNTER0_SELECT__CNTR_SEL0_MASK 0x000003FFL
++#define CPG_PERFCOUNTER0_SELECT__CNTR_SEL1_MASK 0x000FFC00L
++#define CPG_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
++#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE1_MASK 0x0F000000L
++#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE0_MASK 0xF0000000L
++//CPC_PERFCOUNTER1_SELECT
++#define CPC_PERFCOUNTER1_SELECT__CNTR_SEL0__SHIFT 0x0
++#define CPC_PERFCOUNTER1_SELECT__CNTR_SEL1__SHIFT 0xa
++#define CPC_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
++#define CPC_PERFCOUNTER1_SELECT__CNTR_MODE1__SHIFT 0x18
++#define CPC_PERFCOUNTER1_SELECT__CNTR_MODE0__SHIFT 0x1c
++#define CPC_PERFCOUNTER1_SELECT__CNTR_SEL0_MASK 0x000003FFL
++#define CPC_PERFCOUNTER1_SELECT__CNTR_SEL1_MASK 0x000FFC00L
++#define CPC_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
++#define CPC_PERFCOUNTER1_SELECT__CNTR_MODE1_MASK 0x0F000000L
++#define CPC_PERFCOUNTER1_SELECT__CNTR_MODE0_MASK 0xF0000000L
++//CPC_PERFCOUNTER0_SELECT1
++#define CPC_PERFCOUNTER0_SELECT1__CNTR_SEL2__SHIFT 0x0
++#define CPC_PERFCOUNTER0_SELECT1__CNTR_SEL3__SHIFT 0xa
++#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE3__SHIFT 0x18
++#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE2__SHIFT 0x1c
++#define CPC_PERFCOUNTER0_SELECT1__CNTR_SEL2_MASK 0x000003FFL
++#define CPC_PERFCOUNTER0_SELECT1__CNTR_SEL3_MASK 0x000FFC00L
++#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE3_MASK 0x0F000000L
++#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE2_MASK 0xF0000000L
++//CPF_PERFCOUNTER1_SELECT
++#define CPF_PERFCOUNTER1_SELECT__CNTR_SEL0__SHIFT 0x0
++#define CPF_PERFCOUNTER1_SELECT__CNTR_SEL1__SHIFT 0xa
++#define CPF_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
++#define CPF_PERFCOUNTER1_SELECT__CNTR_MODE1__SHIFT 0x18
++#define CPF_PERFCOUNTER1_SELECT__CNTR_MODE0__SHIFT 0x1c
++#define CPF_PERFCOUNTER1_SELECT__CNTR_SEL0_MASK 0x000003FFL
++#define CPF_PERFCOUNTER1_SELECT__CNTR_SEL1_MASK 0x000FFC00L
++#define CPF_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
++#define CPF_PERFCOUNTER1_SELECT__CNTR_MODE1_MASK 0x0F000000L
++#define CPF_PERFCOUNTER1_SELECT__CNTR_MODE0_MASK 0xF0000000L
++//CPF_PERFCOUNTER0_SELECT1
++#define CPF_PERFCOUNTER0_SELECT1__CNTR_SEL2__SHIFT 0x0
++#define CPF_PERFCOUNTER0_SELECT1__CNTR_SEL3__SHIFT 0xa
++#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE3__SHIFT 0x18
++#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE2__SHIFT 0x1c
++#define CPF_PERFCOUNTER0_SELECT1__CNTR_SEL2_MASK 0x000003FFL
++#define CPF_PERFCOUNTER0_SELECT1__CNTR_SEL3_MASK 0x000FFC00L
++#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE3_MASK 0x0F000000L
++#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE2_MASK 0xF0000000L
++//CPF_PERFCOUNTER0_SELECT
++#define CPF_PERFCOUNTER0_SELECT__CNTR_SEL0__SHIFT 0x0
++#define CPF_PERFCOUNTER0_SELECT__CNTR_SEL1__SHIFT 0xa
++#define CPF_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
++#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE1__SHIFT 0x18
++#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE0__SHIFT 0x1c
++#define CPF_PERFCOUNTER0_SELECT__CNTR_SEL0_MASK 0x000003FFL
++#define CPF_PERFCOUNTER0_SELECT__CNTR_SEL1_MASK 0x000FFC00L
++#define CPF_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
++#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE1_MASK 0x0F000000L
++#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE0_MASK 0xF0000000L
++//CP_PERFMON_CNTL
++#define CP_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
++#define CP_PERFMON_CNTL__SPM_PERFMON_STATE__SHIFT 0x4
++#define CP_PERFMON_CNTL__PERFMON_ENABLE_MODE__SHIFT 0x8
++#define CP_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE__SHIFT 0xa
++#define CP_PERFMON_CNTL__PERFMON_STATE_MASK 0x0000000FL
++#define CP_PERFMON_CNTL__SPM_PERFMON_STATE_MASK 0x000000F0L
++#define CP_PERFMON_CNTL__PERFMON_ENABLE_MODE_MASK 0x00000300L
++#define CP_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE_MASK 0x00000400L
++//CPC_PERFCOUNTER0_SELECT
++#define CPC_PERFCOUNTER0_SELECT__CNTR_SEL0__SHIFT 0x0
++#define CPC_PERFCOUNTER0_SELECT__CNTR_SEL1__SHIFT 0xa
++#define CPC_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
++#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE1__SHIFT 0x18
++#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE0__SHIFT 0x1c
++#define CPC_PERFCOUNTER0_SELECT__CNTR_SEL0_MASK 0x000003FFL
++#define CPC_PERFCOUNTER0_SELECT__CNTR_SEL1_MASK 0x000FFC00L
++#define CPC_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
++#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE1_MASK 0x0F000000L
++#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE0_MASK 0xF0000000L
++//CPF_TC_PERF_COUNTER_WINDOW_SELECT
++#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__INDEX__SHIFT 0x0
++#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS__SHIFT 0x1e
++#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE__SHIFT 0x1f
++#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__INDEX_MASK 0x00000007L
++#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS_MASK 0x40000000L
++#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE_MASK 0x80000000L
++//CPG_TC_PERF_COUNTER_WINDOW_SELECT
++#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__INDEX__SHIFT 0x0
++#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS__SHIFT 0x1e
++#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE__SHIFT 0x1f
++#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__INDEX_MASK 0x0000001FL
++#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS_MASK 0x40000000L
++#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE_MASK 0x80000000L
++//CPF_LATENCY_STATS_SELECT
++#define CPF_LATENCY_STATS_SELECT__INDEX__SHIFT 0x0
++#define CPF_LATENCY_STATS_SELECT__CLEAR__SHIFT 0x1e
++#define CPF_LATENCY_STATS_SELECT__ENABLE__SHIFT 0x1f
++#define CPF_LATENCY_STATS_SELECT__INDEX_MASK 0x0000000FL
++#define CPF_LATENCY_STATS_SELECT__CLEAR_MASK 0x40000000L
++#define CPF_LATENCY_STATS_SELECT__ENABLE_MASK 0x80000000L
++//CPG_LATENCY_STATS_SELECT
++#define CPG_LATENCY_STATS_SELECT__INDEX__SHIFT 0x0
++#define CPG_LATENCY_STATS_SELECT__CLEAR__SHIFT 0x1e
++#define CPG_LATENCY_STATS_SELECT__ENABLE__SHIFT 0x1f
++#define CPG_LATENCY_STATS_SELECT__INDEX_MASK 0x0000001FL
++#define CPG_LATENCY_STATS_SELECT__CLEAR_MASK 0x40000000L
++#define CPG_LATENCY_STATS_SELECT__ENABLE_MASK 0x80000000L
++//CPC_LATENCY_STATS_SELECT
++#define CPC_LATENCY_STATS_SELECT__INDEX__SHIFT 0x0
++#define CPC_LATENCY_STATS_SELECT__CLEAR__SHIFT 0x1e
++#define CPC_LATENCY_STATS_SELECT__ENABLE__SHIFT 0x1f
++#define CPC_LATENCY_STATS_SELECT__INDEX_MASK 0x00000007L
++#define CPC_LATENCY_STATS_SELECT__CLEAR_MASK 0x40000000L
++#define CPC_LATENCY_STATS_SELECT__ENABLE_MASK 0x80000000L
++//CP_DRAW_OBJECT
++#define CP_DRAW_OBJECT__OBJECT__SHIFT 0x0
++#define CP_DRAW_OBJECT__OBJECT_MASK 0xFFFFFFFFL
++//CP_DRAW_OBJECT_COUNTER
++#define CP_DRAW_OBJECT_COUNTER__COUNT__SHIFT 0x0
++#define CP_DRAW_OBJECT_COUNTER__COUNT_MASK 0x0000FFFFL
++//CP_DRAW_WINDOW_MASK_HI
++#define CP_DRAW_WINDOW_MASK_HI__WINDOW_MASK_HI__SHIFT 0x0
++#define CP_DRAW_WINDOW_MASK_HI__WINDOW_MASK_HI_MASK 0xFFFFFFFFL
++//CP_DRAW_WINDOW_HI
++#define CP_DRAW_WINDOW_HI__WINDOW_HI__SHIFT 0x0
++#define CP_DRAW_WINDOW_HI__WINDOW_HI_MASK 0xFFFFFFFFL
++//CP_DRAW_WINDOW_LO
++#define CP_DRAW_WINDOW_LO__MIN__SHIFT 0x0
++#define CP_DRAW_WINDOW_LO__MAX__SHIFT 0x10
++#define CP_DRAW_WINDOW_LO__MIN_MASK 0x0000FFFFL
++#define CP_DRAW_WINDOW_LO__MAX_MASK 0xFFFF0000L
++//CP_DRAW_WINDOW_CNTL
++#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MAX__SHIFT 0x0
++#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MIN__SHIFT 0x1
++#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_HI__SHIFT 0x2
++#define CP_DRAW_WINDOW_CNTL__MODE__SHIFT 0x8
++#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MAX_MASK 0x00000001L
++#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MIN_MASK 0x00000002L
++#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_HI_MASK 0x00000004L
++#define CP_DRAW_WINDOW_CNTL__MODE_MASK 0x00000100L
++//GRBM_PERFCOUNTER0_SELECT
++#define GRBM_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
++#define GRBM_PERFCOUNTER0_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
++#define GRBM_PERFCOUNTER0_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
++#define GRBM_PERFCOUNTER0_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0xc
++#define GRBM_PERFCOUNTER0_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xd
++#define GRBM_PERFCOUNTER0_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xe
++#define GRBM_PERFCOUNTER0_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0x10
++#define GRBM_PERFCOUNTER0_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x11
++#define GRBM_PERFCOUNTER0_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x12
++#define GRBM_PERFCOUNTER0_SELECT__GRBM_BUSY_USER_DEFINED_MASK__SHIFT 0x13
++#define GRBM_PERFCOUNTER0_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x14
++#define GRBM_PERFCOUNTER0_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x15
++#define GRBM_PERFCOUNTER0_SELECT__CP_BUSY_USER_DEFINED_MASK__SHIFT 0x16
++#define GRBM_PERFCOUNTER0_SELECT__IA_BUSY_USER_DEFINED_MASK__SHIFT 0x17
++#define GRBM_PERFCOUNTER0_SELECT__GDS_BUSY_USER_DEFINED_MASK__SHIFT 0x18
++#define GRBM_PERFCOUNTER0_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x19
++#define GRBM_PERFCOUNTER0_SELECT__RLC_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
++#define GRBM_PERFCOUNTER0_SELECT__TC_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
++#define GRBM_PERFCOUNTER0_SELECT__WD_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
++#define GRBM_PERFCOUNTER0_SELECT__UTCL2_BUSY_USER_DEFINED_MASK__SHIFT 0x1d
++#define GRBM_PERFCOUNTER0_SELECT__EA_BUSY_USER_DEFINED_MASK__SHIFT 0x1e
++#define GRBM_PERFCOUNTER0_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x1f
++#define GRBM_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x0000003FL
++#define GRBM_PERFCOUNTER0_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
++#define GRBM_PERFCOUNTER0_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
++#define GRBM_PERFCOUNTER0_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
++#define GRBM_PERFCOUNTER0_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
++#define GRBM_PERFCOUNTER0_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00004000L
++#define GRBM_PERFCOUNTER0_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
++#define GRBM_PERFCOUNTER0_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
++#define GRBM_PERFCOUNTER0_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
++#define GRBM_PERFCOUNTER0_SELECT__GRBM_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
++#define GRBM_PERFCOUNTER0_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
++#define GRBM_PERFCOUNTER0_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
++#define GRBM_PERFCOUNTER0_SELECT__CP_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
++#define GRBM_PERFCOUNTER0_SELECT__IA_BUSY_USER_DEFINED_MASK_MASK 0x00800000L
++#define GRBM_PERFCOUNTER0_SELECT__GDS_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
++#define GRBM_PERFCOUNTER0_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
++#define GRBM_PERFCOUNTER0_SELECT__RLC_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
++#define GRBM_PERFCOUNTER0_SELECT__TC_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
++#define GRBM_PERFCOUNTER0_SELECT__WD_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
++#define GRBM_PERFCOUNTER0_SELECT__UTCL2_BUSY_USER_DEFINED_MASK_MASK 0x20000000L
++#define GRBM_PERFCOUNTER0_SELECT__EA_BUSY_USER_DEFINED_MASK_MASK 0x40000000L
++#define GRBM_PERFCOUNTER0_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x80000000L
++//GRBM_PERFCOUNTER1_SELECT
++#define GRBM_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
++#define GRBM_PERFCOUNTER1_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
++#define GRBM_PERFCOUNTER1_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
++#define GRBM_PERFCOUNTER1_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0xc
++#define GRBM_PERFCOUNTER1_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xd
++#define GRBM_PERFCOUNTER1_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xe
++#define GRBM_PERFCOUNTER1_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0x10
++#define GRBM_PERFCOUNTER1_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x11
++#define GRBM_PERFCOUNTER1_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x12
++#define GRBM_PERFCOUNTER1_SELECT__GRBM_BUSY_USER_DEFINED_MASK__SHIFT 0x13
++#define GRBM_PERFCOUNTER1_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x14
++#define GRBM_PERFCOUNTER1_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x15
++#define GRBM_PERFCOUNTER1_SELECT__CP_BUSY_USER_DEFINED_MASK__SHIFT 0x16
++#define GRBM_PERFCOUNTER1_SELECT__IA_BUSY_USER_DEFINED_MASK__SHIFT 0x17
++#define GRBM_PERFCOUNTER1_SELECT__GDS_BUSY_USER_DEFINED_MASK__SHIFT 0x18
++#define GRBM_PERFCOUNTER1_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x19
++#define GRBM_PERFCOUNTER1_SELECT__RLC_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
++#define GRBM_PERFCOUNTER1_SELECT__TC_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
++#define GRBM_PERFCOUNTER1_SELECT__WD_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
++#define GRBM_PERFCOUNTER1_SELECT__UTCL2_BUSY_USER_DEFINED_MASK__SHIFT 0x1d
++#define GRBM_PERFCOUNTER1_SELECT__EA_BUSY_USER_DEFINED_MASK__SHIFT 0x1e
++#define GRBM_PERFCOUNTER1_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x1f
++#define GRBM_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x0000003FL
++#define GRBM_PERFCOUNTER1_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
++#define GRBM_PERFCOUNTER1_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
++#define GRBM_PERFCOUNTER1_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
++#define GRBM_PERFCOUNTER1_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
++#define GRBM_PERFCOUNTER1_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00004000L
++#define GRBM_PERFCOUNTER1_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
++#define GRBM_PERFCOUNTER1_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
++#define GRBM_PERFCOUNTER1_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
++#define GRBM_PERFCOUNTER1_SELECT__GRBM_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
++#define GRBM_PERFCOUNTER1_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
++#define GRBM_PERFCOUNTER1_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
++#define GRBM_PERFCOUNTER1_SELECT__CP_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
++#define GRBM_PERFCOUNTER1_SELECT__IA_BUSY_USER_DEFINED_MASK_MASK 0x00800000L
++#define GRBM_PERFCOUNTER1_SELECT__GDS_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
++#define GRBM_PERFCOUNTER1_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
++#define GRBM_PERFCOUNTER1_SELECT__RLC_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
++#define GRBM_PERFCOUNTER1_SELECT__TC_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
++#define GRBM_PERFCOUNTER1_SELECT__WD_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
++#define GRBM_PERFCOUNTER1_SELECT__UTCL2_BUSY_USER_DEFINED_MASK_MASK 0x20000000L
++#define GRBM_PERFCOUNTER1_SELECT__EA_BUSY_USER_DEFINED_MASK_MASK 0x40000000L
++#define GRBM_PERFCOUNTER1_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x80000000L
++//GRBM_SE0_PERFCOUNTER_SELECT
++#define GRBM_SE0_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
++#define GRBM_SE0_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
++#define GRBM_SE0_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
++#define GRBM_SE0_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
++#define GRBM_SE0_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
++#define GRBM_SE0_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
++#define GRBM_SE0_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
++#define GRBM_SE0_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
++#define GRBM_SE0_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
++#define GRBM_SE0_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x13
++#define GRBM_SE0_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
++#define GRBM_SE0_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
++#define GRBM_SE0_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
++#define GRBM_SE0_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
++#define GRBM_SE0_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
++#define GRBM_SE0_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
++#define GRBM_SE0_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
++#define GRBM_SE0_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
++#define GRBM_SE0_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
++#define GRBM_SE0_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
++#define GRBM_SE0_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
++#define GRBM_SE0_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
++#define GRBM_SE0_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
++#define GRBM_SE0_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
++#define GRBM_SE0_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
++#define GRBM_SE0_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
++//GRBM_SE1_PERFCOUNTER_SELECT
++#define GRBM_SE1_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
++#define GRBM_SE1_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
++#define GRBM_SE1_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
++#define GRBM_SE1_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
++#define GRBM_SE1_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
++#define GRBM_SE1_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
++#define GRBM_SE1_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
++#define GRBM_SE1_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
++#define GRBM_SE1_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
++#define GRBM_SE1_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x13
++#define GRBM_SE1_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
++#define GRBM_SE1_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
++#define GRBM_SE1_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
++#define GRBM_SE1_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
++#define GRBM_SE1_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
++#define GRBM_SE1_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
++#define GRBM_SE1_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
++#define GRBM_SE1_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
++#define GRBM_SE1_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
++#define GRBM_SE1_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
++#define GRBM_SE1_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
++#define GRBM_SE1_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
++#define GRBM_SE1_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
++#define GRBM_SE1_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
++#define GRBM_SE1_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
++#define GRBM_SE1_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
++//GRBM_SE2_PERFCOUNTER_SELECT
++#define GRBM_SE2_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
++#define GRBM_SE2_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
++#define GRBM_SE2_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
++#define GRBM_SE2_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
++#define GRBM_SE2_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
++#define GRBM_SE2_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
++#define GRBM_SE2_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
++#define GRBM_SE2_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
++#define GRBM_SE2_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
++#define GRBM_SE2_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x13
++#define GRBM_SE2_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
++#define GRBM_SE2_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
++#define GRBM_SE2_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
++#define GRBM_SE2_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
++#define GRBM_SE2_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
++#define GRBM_SE2_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
++#define GRBM_SE2_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
++#define GRBM_SE2_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
++#define GRBM_SE2_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
++#define GRBM_SE2_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
++#define GRBM_SE2_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
++#define GRBM_SE2_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
++#define GRBM_SE2_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
++#define GRBM_SE2_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
++#define GRBM_SE2_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
++#define GRBM_SE2_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
++//GRBM_SE3_PERFCOUNTER_SELECT
++#define GRBM_SE3_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
++#define GRBM_SE3_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
++#define GRBM_SE3_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
++#define GRBM_SE3_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
++#define GRBM_SE3_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
++#define GRBM_SE3_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
++#define GRBM_SE3_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
++#define GRBM_SE3_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
++#define GRBM_SE3_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
++#define GRBM_SE3_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x13
++#define GRBM_SE3_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
++#define GRBM_SE3_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
++#define GRBM_SE3_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
++#define GRBM_SE3_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
++#define GRBM_SE3_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
++#define GRBM_SE3_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
++#define GRBM_SE3_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
++#define GRBM_SE3_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
++#define GRBM_SE3_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
++#define GRBM_SE3_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
++#define GRBM_SE3_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
++#define GRBM_SE3_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
++#define GRBM_SE3_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
++#define GRBM_SE3_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
++#define GRBM_SE3_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
++#define GRBM_SE3_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
++//WD_PERFCOUNTER0_SELECT
++#define WD_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
++#define WD_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
++#define WD_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000FFL
++#define WD_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
++//WD_PERFCOUNTER1_SELECT
++#define WD_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
++#define WD_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
++#define WD_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000FFL
++#define WD_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
++//WD_PERFCOUNTER2_SELECT
++#define WD_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
++#define WD_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
++#define WD_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000FFL
++#define WD_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
++//WD_PERFCOUNTER3_SELECT
++#define WD_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
++#define WD_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
++#define WD_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000FFL
++#define WD_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
++//IA_PERFCOUNTER0_SELECT
++#define IA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
++#define IA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
++#define IA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
++#define IA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
++#define IA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
++#define IA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
++#define IA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
++#define IA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define IA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
++#define IA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
++//IA_PERFCOUNTER1_SELECT
++#define IA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
++#define IA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
++#define IA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000FFL
++#define IA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
++//IA_PERFCOUNTER2_SELECT
++#define IA_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
++#define IA_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
++#define IA_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000FFL
++#define IA_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
++//IA_PERFCOUNTER3_SELECT
++#define IA_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
++#define IA_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
++#define IA_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000FFL
++#define IA_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
++//IA_PERFCOUNTER0_SELECT1
++#define IA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
++#define IA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
++#define IA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
++#define IA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
++#define IA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
++#define IA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
++#define IA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
++#define IA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
++//VGT_PERFCOUNTER0_SELECT
++#define VGT_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
++#define VGT_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
++#define VGT_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
++#define VGT_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
++#define VGT_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
++#define VGT_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
++#define VGT_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
++#define VGT_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define VGT_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
++#define VGT_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
++//VGT_PERFCOUNTER1_SELECT
++#define VGT_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
++#define VGT_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
++#define VGT_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
++#define VGT_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
++#define VGT_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
++#define VGT_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
++#define VGT_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
++#define VGT_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define VGT_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
++#define VGT_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
++//VGT_PERFCOUNTER2_SELECT
++#define VGT_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
++#define VGT_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
++#define VGT_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000FFL
++#define VGT_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
++//VGT_PERFCOUNTER3_SELECT
++#define VGT_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
++#define VGT_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
++#define VGT_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000FFL
++#define VGT_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
++//VGT_PERFCOUNTER0_SELECT1
++#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
++#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
++#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
++#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
++#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
++#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
++#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
++#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
++//VGT_PERFCOUNTER1_SELECT1
++#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
++#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
++#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
++#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
++#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
++#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
++#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
++#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
++//VGT_PERFCOUNTER_SEID_MASK
++#define VGT_PERFCOUNTER_SEID_MASK__PERF_SEID_IGNORE_MASK__SHIFT 0x0
++#define VGT_PERFCOUNTER_SEID_MASK__PERF_SEID_IGNORE_MASK_MASK 0x000000FFL
++//PA_SU_PERFCOUNTER0_SELECT
++#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
++#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
++#define PA_SU_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
++#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
++#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
++#define PA_SU_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
++//PA_SU_PERFCOUNTER0_SELECT1
++#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
++#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
++#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
++#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
++//PA_SU_PERFCOUNTER1_SELECT
++#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
++#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
++#define PA_SU_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
++#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
++#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
++#define PA_SU_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
++//PA_SU_PERFCOUNTER1_SELECT1
++#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
++#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
++#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
++#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
++//PA_SU_PERFCOUNTER2_SELECT
++#define PA_SU_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
++#define PA_SU_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
++#define PA_SU_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
++#define PA_SU_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
++//PA_SU_PERFCOUNTER3_SELECT
++#define PA_SU_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
++#define PA_SU_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
++#define PA_SU_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
++#define PA_SU_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
++//PA_SC_PERFCOUNTER0_SELECT
++#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
++#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
++#define PA_SC_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
++#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
++#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
++#define PA_SC_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
++//PA_SC_PERFCOUNTER0_SELECT1
++#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
++#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
++#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
++#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
++//PA_SC_PERFCOUNTER1_SELECT
++#define PA_SC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
++#define PA_SC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
++//PA_SC_PERFCOUNTER2_SELECT
++#define PA_SC_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
++#define PA_SC_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
++//PA_SC_PERFCOUNTER3_SELECT
++#define PA_SC_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
++#define PA_SC_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
++//PA_SC_PERFCOUNTER4_SELECT
++#define PA_SC_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
++#define PA_SC_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000003FFL
++//PA_SC_PERFCOUNTER5_SELECT
++#define PA_SC_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
++#define PA_SC_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000003FFL
++//PA_SC_PERFCOUNTER6_SELECT
++#define PA_SC_PERFCOUNTER6_SELECT__PERF_SEL__SHIFT 0x0
++#define PA_SC_PERFCOUNTER6_SELECT__PERF_SEL_MASK 0x000003FFL
++//PA_SC_PERFCOUNTER7_SELECT
++#define PA_SC_PERFCOUNTER7_SELECT__PERF_SEL__SHIFT 0x0
++#define PA_SC_PERFCOUNTER7_SELECT__PERF_SEL_MASK 0x000003FFL
++//SPI_PERFCOUNTER0_SELECT
++#define SPI_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
++#define SPI_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
++#define SPI_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
++#define SPI_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
++#define SPI_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
++#define SPI_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
++#define SPI_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
++#define SPI_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define SPI_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
++#define SPI_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
++//SPI_PERFCOUNTER1_SELECT
++#define SPI_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
++#define SPI_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
++#define SPI_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
++#define SPI_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
++#define SPI_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
++#define SPI_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
++#define SPI_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
++#define SPI_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define SPI_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
++#define SPI_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
++//SPI_PERFCOUNTER2_SELECT
++#define SPI_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
++#define SPI_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
++#define SPI_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
++#define SPI_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
++#define SPI_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
++#define SPI_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
++#define SPI_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
++#define SPI_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define SPI_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
++#define SPI_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
++//SPI_PERFCOUNTER3_SELECT
++#define SPI_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
++#define SPI_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
++#define SPI_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
++#define SPI_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
++#define SPI_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
++#define SPI_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
++#define SPI_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
++#define SPI_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define SPI_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
++#define SPI_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
++//SPI_PERFCOUNTER0_SELECT1
++#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
++#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
++#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
++#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
++#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
++#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
++#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
++#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
++//SPI_PERFCOUNTER1_SELECT1
++#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
++#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
++#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
++#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
++#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
++#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
++#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
++#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
++//SPI_PERFCOUNTER2_SELECT1
++#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
++#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
++#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
++#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
++#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
++#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
++#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
++#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
++//SPI_PERFCOUNTER3_SELECT1
++#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
++#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
++#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
++#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
++#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
++#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
++#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
++#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
++//SPI_PERFCOUNTER4_SELECT
++#define SPI_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
++#define SPI_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000000FFL
++//SPI_PERFCOUNTER5_SELECT
++#define SPI_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
++#define SPI_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000000FFL
++//SPI_PERFCOUNTER_BINS
++#define SPI_PERFCOUNTER_BINS__BIN0_MIN__SHIFT 0x0
++#define SPI_PERFCOUNTER_BINS__BIN0_MAX__SHIFT 0x4
++#define SPI_PERFCOUNTER_BINS__BIN1_MIN__SHIFT 0x8
++#define SPI_PERFCOUNTER_BINS__BIN1_MAX__SHIFT 0xc
++#define SPI_PERFCOUNTER_BINS__BIN2_MIN__SHIFT 0x10
++#define SPI_PERFCOUNTER_BINS__BIN2_MAX__SHIFT 0x14
++#define SPI_PERFCOUNTER_BINS__BIN3_MIN__SHIFT 0x18
++#define SPI_PERFCOUNTER_BINS__BIN3_MAX__SHIFT 0x1c
++#define SPI_PERFCOUNTER_BINS__BIN0_MIN_MASK 0x0000000FL
++#define SPI_PERFCOUNTER_BINS__BIN0_MAX_MASK 0x000000F0L
++#define SPI_PERFCOUNTER_BINS__BIN1_MIN_MASK 0x00000F00L
++#define SPI_PERFCOUNTER_BINS__BIN1_MAX_MASK 0x0000F000L
++#define SPI_PERFCOUNTER_BINS__BIN2_MIN_MASK 0x000F0000L
++#define SPI_PERFCOUNTER_BINS__BIN2_MAX_MASK 0x00F00000L
++#define SPI_PERFCOUNTER_BINS__BIN3_MIN_MASK 0x0F000000L
++#define SPI_PERFCOUNTER_BINS__BIN3_MAX_MASK 0xF0000000L
++//SQ_PERFCOUNTER0_SELECT
++#define SQ_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
++#define SQ_PERFCOUNTER0_SELECT__SQC_BANK_MASK__SHIFT 0xc
++#define SQ_PERFCOUNTER0_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
++#define SQ_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
++#define SQ_PERFCOUNTER0_SELECT__SIMD_MASK__SHIFT 0x18
++#define SQ_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
++#define SQ_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000001FFL
++#define SQ_PERFCOUNTER0_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
++#define SQ_PERFCOUNTER0_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
++#define SQ_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
++#define SQ_PERFCOUNTER0_SELECT__SIMD_MASK_MASK 0x0F000000L
++#define SQ_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
++//SQ_PERFCOUNTER1_SELECT
++#define SQ_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
++#define SQ_PERFCOUNTER1_SELECT__SQC_BANK_MASK__SHIFT 0xc
++#define SQ_PERFCOUNTER1_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
++#define SQ_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
++#define SQ_PERFCOUNTER1_SELECT__SIMD_MASK__SHIFT 0x18
++#define SQ_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
++#define SQ_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000001FFL
++#define SQ_PERFCOUNTER1_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
++#define SQ_PERFCOUNTER1_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
++#define SQ_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
++#define SQ_PERFCOUNTER1_SELECT__SIMD_MASK_MASK 0x0F000000L
++#define SQ_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
++//SQ_PERFCOUNTER2_SELECT
++#define SQ_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
++#define SQ_PERFCOUNTER2_SELECT__SQC_BANK_MASK__SHIFT 0xc
++#define SQ_PERFCOUNTER2_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
++#define SQ_PERFCOUNTER2_SELECT__SPM_MODE__SHIFT 0x14
++#define SQ_PERFCOUNTER2_SELECT__SIMD_MASK__SHIFT 0x18
++#define SQ_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
++#define SQ_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000001FFL
++#define SQ_PERFCOUNTER2_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
++#define SQ_PERFCOUNTER2_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
++#define SQ_PERFCOUNTER2_SELECT__SPM_MODE_MASK 0x00F00000L
++#define SQ_PERFCOUNTER2_SELECT__SIMD_MASK_MASK 0x0F000000L
++#define SQ_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
++//SQ_PERFCOUNTER3_SELECT
++#define SQ_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
++#define SQ_PERFCOUNTER3_SELECT__SQC_BANK_MASK__SHIFT 0xc
++#define SQ_PERFCOUNTER3_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
++#define SQ_PERFCOUNTER3_SELECT__SPM_MODE__SHIFT 0x14
++#define SQ_PERFCOUNTER3_SELECT__SIMD_MASK__SHIFT 0x18
++#define SQ_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
++#define SQ_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000001FFL
++#define SQ_PERFCOUNTER3_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
++#define SQ_PERFCOUNTER3_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
++#define SQ_PERFCOUNTER3_SELECT__SPM_MODE_MASK 0x00F00000L
++#define SQ_PERFCOUNTER3_SELECT__SIMD_MASK_MASK 0x0F000000L
++#define SQ_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
++//SQ_PERFCOUNTER4_SELECT
++#define SQ_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
++#define SQ_PERFCOUNTER4_SELECT__SQC_BANK_MASK__SHIFT 0xc
++#define SQ_PERFCOUNTER4_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
++#define SQ_PERFCOUNTER4_SELECT__SPM_MODE__SHIFT 0x14
++#define SQ_PERFCOUNTER4_SELECT__SIMD_MASK__SHIFT 0x18
++#define SQ_PERFCOUNTER4_SELECT__PERF_MODE__SHIFT 0x1c
++#define SQ_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000001FFL
++#define SQ_PERFCOUNTER4_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
++#define SQ_PERFCOUNTER4_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
++#define SQ_PERFCOUNTER4_SELECT__SPM_MODE_MASK 0x00F00000L
++#define SQ_PERFCOUNTER4_SELECT__SIMD_MASK_MASK 0x0F000000L
++#define SQ_PERFCOUNTER4_SELECT__PERF_MODE_MASK 0xF0000000L
++//SQ_PERFCOUNTER5_SELECT
++#define SQ_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
++#define SQ_PERFCOUNTER5_SELECT__SQC_BANK_MASK__SHIFT 0xc
++#define SQ_PERFCOUNTER5_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
++#define SQ_PERFCOUNTER5_SELECT__SPM_MODE__SHIFT 0x14
++#define SQ_PERFCOUNTER5_SELECT__SIMD_MASK__SHIFT 0x18
++#define SQ_PERFCOUNTER5_SELECT__PERF_MODE__SHIFT 0x1c
++#define SQ_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000001FFL
++#define SQ_PERFCOUNTER5_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
++#define SQ_PERFCOUNTER5_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
++#define SQ_PERFCOUNTER5_SELECT__SPM_MODE_MASK 0x00F00000L
++#define SQ_PERFCOUNTER5_SELECT__SIMD_MASK_MASK 0x0F000000L
++#define SQ_PERFCOUNTER5_SELECT__PERF_MODE_MASK 0xF0000000L
++//SQ_PERFCOUNTER6_SELECT
++#define SQ_PERFCOUNTER6_SELECT__PERF_SEL__SHIFT 0x0
++#define SQ_PERFCOUNTER6_SELECT__SQC_BANK_MASK__SHIFT 0xc
++#define SQ_PERFCOUNTER6_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
++#define SQ_PERFCOUNTER6_SELECT__SPM_MODE__SHIFT 0x14
++#define SQ_PERFCOUNTER6_SELECT__SIMD_MASK__SHIFT 0x18
++#define SQ_PERFCOUNTER6_SELECT__PERF_MODE__SHIFT 0x1c
++#define SQ_PERFCOUNTER6_SELECT__PERF_SEL_MASK 0x000001FFL
++#define SQ_PERFCOUNTER6_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
++#define SQ_PERFCOUNTER6_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
++#define SQ_PERFCOUNTER6_SELECT__SPM_MODE_MASK 0x00F00000L
++#define SQ_PERFCOUNTER6_SELECT__SIMD_MASK_MASK 0x0F000000L
++#define SQ_PERFCOUNTER6_SELECT__PERF_MODE_MASK 0xF0000000L
++//SQ_PERFCOUNTER7_SELECT
++#define SQ_PERFCOUNTER7_SELECT__PERF_SEL__SHIFT 0x0
++#define SQ_PERFCOUNTER7_SELECT__SQC_BANK_MASK__SHIFT 0xc
++#define SQ_PERFCOUNTER7_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
++#define SQ_PERFCOUNTER7_SELECT__SPM_MODE__SHIFT 0x14
++#define SQ_PERFCOUNTER7_SELECT__SIMD_MASK__SHIFT 0x18
++#define SQ_PERFCOUNTER7_SELECT__PERF_MODE__SHIFT 0x1c
++#define SQ_PERFCOUNTER7_SELECT__PERF_SEL_MASK 0x000001FFL
++#define SQ_PERFCOUNTER7_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
++#define SQ_PERFCOUNTER7_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
++#define SQ_PERFCOUNTER7_SELECT__SPM_MODE_MASK 0x00F00000L
++#define SQ_PERFCOUNTER7_SELECT__SIMD_MASK_MASK 0x0F000000L
++#define SQ_PERFCOUNTER7_SELECT__PERF_MODE_MASK 0xF0000000L
++//SQ_PERFCOUNTER8_SELECT
++#define SQ_PERFCOUNTER8_SELECT__PERF_SEL__SHIFT 0x0
++#define SQ_PERFCOUNTER8_SELECT__SQC_BANK_MASK__SHIFT 0xc
++#define SQ_PERFCOUNTER8_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
++#define SQ_PERFCOUNTER8_SELECT__SPM_MODE__SHIFT 0x14
++#define SQ_PERFCOUNTER8_SELECT__SIMD_MASK__SHIFT 0x18
++#define SQ_PERFCOUNTER8_SELECT__PERF_MODE__SHIFT 0x1c
++#define SQ_PERFCOUNTER8_SELECT__PERF_SEL_MASK 0x000001FFL
++#define SQ_PERFCOUNTER8_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
++#define SQ_PERFCOUNTER8_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
++#define SQ_PERFCOUNTER8_SELECT__SPM_MODE_MASK 0x00F00000L
++#define SQ_PERFCOUNTER8_SELECT__SIMD_MASK_MASK 0x0F000000L
++#define SQ_PERFCOUNTER8_SELECT__PERF_MODE_MASK 0xF0000000L
++//SQ_PERFCOUNTER9_SELECT
++#define SQ_PERFCOUNTER9_SELECT__PERF_SEL__SHIFT 0x0
++#define SQ_PERFCOUNTER9_SELECT__SQC_BANK_MASK__SHIFT 0xc
++#define SQ_PERFCOUNTER9_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
++#define SQ_PERFCOUNTER9_SELECT__SPM_MODE__SHIFT 0x14
++#define SQ_PERFCOUNTER9_SELECT__SIMD_MASK__SHIFT 0x18
++#define SQ_PERFCOUNTER9_SELECT__PERF_MODE__SHIFT 0x1c
++#define SQ_PERFCOUNTER9_SELECT__PERF_SEL_MASK 0x000001FFL
++#define SQ_PERFCOUNTER9_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
++#define SQ_PERFCOUNTER9_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
++#define SQ_PERFCOUNTER9_SELECT__SPM_MODE_MASK 0x00F00000L
++#define SQ_PERFCOUNTER9_SELECT__SIMD_MASK_MASK 0x0F000000L
++#define SQ_PERFCOUNTER9_SELECT__PERF_MODE_MASK 0xF0000000L
++//SQ_PERFCOUNTER10_SELECT
++#define SQ_PERFCOUNTER10_SELECT__PERF_SEL__SHIFT 0x0
++#define SQ_PERFCOUNTER10_SELECT__SQC_BANK_MASK__SHIFT 0xc
++#define SQ_PERFCOUNTER10_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
++#define SQ_PERFCOUNTER10_SELECT__SPM_MODE__SHIFT 0x14
++#define SQ_PERFCOUNTER10_SELECT__SIMD_MASK__SHIFT 0x18
++#define SQ_PERFCOUNTER10_SELECT__PERF_MODE__SHIFT 0x1c
++#define SQ_PERFCOUNTER10_SELECT__PERF_SEL_MASK 0x000001FFL
++#define SQ_PERFCOUNTER10_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
++#define SQ_PERFCOUNTER10_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
++#define SQ_PERFCOUNTER10_SELECT__SPM_MODE_MASK 0x00F00000L
++#define SQ_PERFCOUNTER10_SELECT__SIMD_MASK_MASK 0x0F000000L
++#define SQ_PERFCOUNTER10_SELECT__PERF_MODE_MASK 0xF0000000L
++//SQ_PERFCOUNTER11_SELECT
++#define SQ_PERFCOUNTER11_SELECT__PERF_SEL__SHIFT 0x0
++#define SQ_PERFCOUNTER11_SELECT__SQC_BANK_MASK__SHIFT 0xc
++#define SQ_PERFCOUNTER11_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
++#define SQ_PERFCOUNTER11_SELECT__SPM_MODE__SHIFT 0x14
++#define SQ_PERFCOUNTER11_SELECT__SIMD_MASK__SHIFT 0x18
++#define SQ_PERFCOUNTER11_SELECT__PERF_MODE__SHIFT 0x1c
++#define SQ_PERFCOUNTER11_SELECT__PERF_SEL_MASK 0x000001FFL
++#define SQ_PERFCOUNTER11_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
++#define SQ_PERFCOUNTER11_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
++#define SQ_PERFCOUNTER11_SELECT__SPM_MODE_MASK 0x00F00000L
++#define SQ_PERFCOUNTER11_SELECT__SIMD_MASK_MASK 0x0F000000L
++#define SQ_PERFCOUNTER11_SELECT__PERF_MODE_MASK 0xF0000000L
++//SQ_PERFCOUNTER12_SELECT
++#define SQ_PERFCOUNTER12_SELECT__PERF_SEL__SHIFT 0x0
++#define SQ_PERFCOUNTER12_SELECT__SQC_BANK_MASK__SHIFT 0xc
++#define SQ_PERFCOUNTER12_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
++#define SQ_PERFCOUNTER12_SELECT__SPM_MODE__SHIFT 0x14
++#define SQ_PERFCOUNTER12_SELECT__SIMD_MASK__SHIFT 0x18
++#define SQ_PERFCOUNTER12_SELECT__PERF_MODE__SHIFT 0x1c
++#define SQ_PERFCOUNTER12_SELECT__PERF_SEL_MASK 0x000001FFL
++#define SQ_PERFCOUNTER12_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
++#define SQ_PERFCOUNTER12_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
++#define SQ_PERFCOUNTER12_SELECT__SPM_MODE_MASK 0x00F00000L
++#define SQ_PERFCOUNTER12_SELECT__SIMD_MASK_MASK 0x0F000000L
++#define SQ_PERFCOUNTER12_SELECT__PERF_MODE_MASK 0xF0000000L
++//SQ_PERFCOUNTER13_SELECT
++#define SQ_PERFCOUNTER13_SELECT__PERF_SEL__SHIFT 0x0
++#define SQ_PERFCOUNTER13_SELECT__SQC_BANK_MASK__SHIFT 0xc
++#define SQ_PERFCOUNTER13_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
++#define SQ_PERFCOUNTER13_SELECT__SPM_MODE__SHIFT 0x14
++#define SQ_PERFCOUNTER13_SELECT__SIMD_MASK__SHIFT 0x18
++#define SQ_PERFCOUNTER13_SELECT__PERF_MODE__SHIFT 0x1c
++#define SQ_PERFCOUNTER13_SELECT__PERF_SEL_MASK 0x000001FFL
++#define SQ_PERFCOUNTER13_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
++#define SQ_PERFCOUNTER13_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
++#define SQ_PERFCOUNTER13_SELECT__SPM_MODE_MASK 0x00F00000L
++#define SQ_PERFCOUNTER13_SELECT__SIMD_MASK_MASK 0x0F000000L
++#define SQ_PERFCOUNTER13_SELECT__PERF_MODE_MASK 0xF0000000L
++//SQ_PERFCOUNTER14_SELECT
++#define SQ_PERFCOUNTER14_SELECT__PERF_SEL__SHIFT 0x0
++#define SQ_PERFCOUNTER14_SELECT__SQC_BANK_MASK__SHIFT 0xc
++#define SQ_PERFCOUNTER14_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
++#define SQ_PERFCOUNTER14_SELECT__SPM_MODE__SHIFT 0x14
++#define SQ_PERFCOUNTER14_SELECT__SIMD_MASK__SHIFT 0x18
++#define SQ_PERFCOUNTER14_SELECT__PERF_MODE__SHIFT 0x1c
++#define SQ_PERFCOUNTER14_SELECT__PERF_SEL_MASK 0x000001FFL
++#define SQ_PERFCOUNTER14_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
++#define SQ_PERFCOUNTER14_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
++#define SQ_PERFCOUNTER14_SELECT__SPM_MODE_MASK 0x00F00000L
++#define SQ_PERFCOUNTER14_SELECT__SIMD_MASK_MASK 0x0F000000L
++#define SQ_PERFCOUNTER14_SELECT__PERF_MODE_MASK 0xF0000000L
++//SQ_PERFCOUNTER15_SELECT
++#define SQ_PERFCOUNTER15_SELECT__PERF_SEL__SHIFT 0x0
++#define SQ_PERFCOUNTER15_SELECT__SQC_BANK_MASK__SHIFT 0xc
++#define SQ_PERFCOUNTER15_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
++#define SQ_PERFCOUNTER15_SELECT__SPM_MODE__SHIFT 0x14
++#define SQ_PERFCOUNTER15_SELECT__SIMD_MASK__SHIFT 0x18
++#define SQ_PERFCOUNTER15_SELECT__PERF_MODE__SHIFT 0x1c
++#define SQ_PERFCOUNTER15_SELECT__PERF_SEL_MASK 0x000001FFL
++#define SQ_PERFCOUNTER15_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
++#define SQ_PERFCOUNTER15_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
++#define SQ_PERFCOUNTER15_SELECT__SPM_MODE_MASK 0x00F00000L
++#define SQ_PERFCOUNTER15_SELECT__SIMD_MASK_MASK 0x0F000000L
++#define SQ_PERFCOUNTER15_SELECT__PERF_MODE_MASK 0xF0000000L
++//SQ_PERFCOUNTER_CTRL
++#define SQ_PERFCOUNTER_CTRL__PS_EN__SHIFT 0x0
++#define SQ_PERFCOUNTER_CTRL__VS_EN__SHIFT 0x1
++#define SQ_PERFCOUNTER_CTRL__GS_EN__SHIFT 0x2
++#define SQ_PERFCOUNTER_CTRL__ES_EN__SHIFT 0x3
++#define SQ_PERFCOUNTER_CTRL__HS_EN__SHIFT 0x4
++#define SQ_PERFCOUNTER_CTRL__LS_EN__SHIFT 0x5
++#define SQ_PERFCOUNTER_CTRL__CS_EN__SHIFT 0x6
++#define SQ_PERFCOUNTER_CTRL__CNTR_RATE__SHIFT 0x8
++#define SQ_PERFCOUNTER_CTRL__DISABLE_FLUSH__SHIFT 0xd
++#define SQ_PERFCOUNTER_CTRL__PS_EN_MASK 0x00000001L
++#define SQ_PERFCOUNTER_CTRL__VS_EN_MASK 0x00000002L
++#define SQ_PERFCOUNTER_CTRL__GS_EN_MASK 0x00000004L
++#define SQ_PERFCOUNTER_CTRL__ES_EN_MASK 0x00000008L
++#define SQ_PERFCOUNTER_CTRL__HS_EN_MASK 0x00000010L
++#define SQ_PERFCOUNTER_CTRL__LS_EN_MASK 0x00000020L
++#define SQ_PERFCOUNTER_CTRL__CS_EN_MASK 0x00000040L
++#define SQ_PERFCOUNTER_CTRL__CNTR_RATE_MASK 0x00001F00L
++#define SQ_PERFCOUNTER_CTRL__DISABLE_FLUSH_MASK 0x00002000L
++//SQ_PERFCOUNTER_MASK
++#define SQ_PERFCOUNTER_MASK__SH0_MASK__SHIFT 0x0
++#define SQ_PERFCOUNTER_MASK__SH1_MASK__SHIFT 0x10
++#define SQ_PERFCOUNTER_MASK__SH0_MASK_MASK 0x0000FFFFL
++#define SQ_PERFCOUNTER_MASK__SH1_MASK_MASK 0xFFFF0000L
++//SQ_PERFCOUNTER_CTRL2
++#define SQ_PERFCOUNTER_CTRL2__FORCE_EN__SHIFT 0x0
++#define SQ_PERFCOUNTER_CTRL2__FORCE_EN_MASK 0x00000001L
++//SX_PERFCOUNTER0_SELECT
++#define SX_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
++#define SX_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT1__SHIFT 0xa
++#define SX_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
++#define SX_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT_MASK 0x000003FFL
++#define SX_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT1_MASK 0x000FFC00L
++#define SX_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
++//SX_PERFCOUNTER1_SELECT
++#define SX_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
++#define SX_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT1__SHIFT 0xa
++#define SX_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
++#define SX_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT_MASK 0x000003FFL
++#define SX_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT1_MASK 0x000FFC00L
++#define SX_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
++//SX_PERFCOUNTER2_SELECT
++#define SX_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
++#define SX_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT1__SHIFT 0xa
++#define SX_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
++#define SX_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT_MASK 0x000003FFL
++#define SX_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT1_MASK 0x000FFC00L
++#define SX_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
++//SX_PERFCOUNTER3_SELECT
++#define SX_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
++#define SX_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT1__SHIFT 0xa
++#define SX_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
++#define SX_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT_MASK 0x000003FFL
++#define SX_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT1_MASK 0x000FFC00L
++#define SX_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
++//SX_PERFCOUNTER0_SELECT1
++#define SX_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT2__SHIFT 0x0
++#define SX_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT3__SHIFT 0xa
++#define SX_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT2_MASK 0x000003FFL
++#define SX_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT3_MASK 0x000FFC00L
++//SX_PERFCOUNTER1_SELECT1
++#define SX_PERFCOUNTER1_SELECT1__PERFCOUNTER_SELECT2__SHIFT 0x0
++#define SX_PERFCOUNTER1_SELECT1__PERFCOUNTER_SELECT3__SHIFT 0xa
++#define SX_PERFCOUNTER1_SELECT1__PERFCOUNTER_SELECT2_MASK 0x000003FFL
++#define SX_PERFCOUNTER1_SELECT1__PERFCOUNTER_SELECT3_MASK 0x000FFC00L
++//GDS_PERFCOUNTER0_SELECT
++#define GDS_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
++#define GDS_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT1__SHIFT 0xa
++#define GDS_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
++#define GDS_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT_MASK 0x000003FFL
++#define GDS_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT1_MASK 0x000FFC00L
++#define GDS_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
++//GDS_PERFCOUNTER1_SELECT
++#define GDS_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
++#define GDS_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT1__SHIFT 0xa
++#define GDS_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
++#define GDS_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT_MASK 0x000003FFL
++#define GDS_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT1_MASK 0x000FFC00L
++#define GDS_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
++//GDS_PERFCOUNTER2_SELECT
++#define GDS_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
++#define GDS_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT1__SHIFT 0xa
++#define GDS_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
++#define GDS_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT_MASK 0x000003FFL
++#define GDS_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT1_MASK 0x000FFC00L
++#define GDS_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
++//GDS_PERFCOUNTER3_SELECT
++#define GDS_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
++#define GDS_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT1__SHIFT 0xa
++#define GDS_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
++#define GDS_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT_MASK 0x000003FFL
++#define GDS_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT1_MASK 0x000FFC00L
++#define GDS_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
++//GDS_PERFCOUNTER0_SELECT1
++#define GDS_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT2__SHIFT 0x0
++#define GDS_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT3__SHIFT 0xa
++#define GDS_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT2_MASK 0x000003FFL
++#define GDS_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT3_MASK 0x000FFC00L
++//TA_PERFCOUNTER0_SELECT
++#define TA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
++#define TA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
++#define TA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
++#define TA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
++#define TA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
++#define TA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000FFL
++#define TA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x0003FC00L
++#define TA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define TA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
++#define TA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
++//TA_PERFCOUNTER0_SELECT1
++#define TA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
++#define TA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
++#define TA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
++#define TA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
++#define TA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000000FFL
++#define TA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x0003FC00L
++#define TA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
++#define TA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
++//TA_PERFCOUNTER1_SELECT
++#define TA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
++#define TA_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
++#define TA_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
++#define TA_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
++#define TA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
++#define TA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000FFL
++#define TA_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x0003FC00L
++#define TA_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define TA_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
++#define TA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
++//TD_PERFCOUNTER0_SELECT
++#define TD_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
++#define TD_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
++#define TD_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
++#define TD_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
++#define TD_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
++#define TD_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000FFL
++#define TD_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x0003FC00L
++#define TD_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define TD_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
++#define TD_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
++//TD_PERFCOUNTER0_SELECT1
++#define TD_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
++#define TD_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
++#define TD_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
++#define TD_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
++#define TD_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000000FFL
++#define TD_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x0003FC00L
++#define TD_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
++#define TD_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
++//TD_PERFCOUNTER1_SELECT
++#define TD_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
++#define TD_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
++#define TD_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
++#define TD_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
++#define TD_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
++#define TD_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000FFL
++#define TD_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x0003FC00L
++#define TD_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define TD_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
++#define TD_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
++//TCP_PERFCOUNTER0_SELECT
++#define TCP_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
++#define TCP_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
++#define TCP_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
++#define TCP_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
++#define TCP_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
++#define TCP_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
++#define TCP_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
++#define TCP_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define TCP_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
++#define TCP_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
++//TCP_PERFCOUNTER0_SELECT1
++#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
++#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
++#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
++#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
++#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
++#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
++#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
++#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
++//TCP_PERFCOUNTER1_SELECT
++#define TCP_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
++#define TCP_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
++#define TCP_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
++#define TCP_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
++#define TCP_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
++#define TCP_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
++#define TCP_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
++#define TCP_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define TCP_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
++#define TCP_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
++//TCP_PERFCOUNTER1_SELECT1
++#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
++#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
++#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
++#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
++#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
++#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
++#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
++#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
++//TCP_PERFCOUNTER2_SELECT
++#define TCP_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
++#define TCP_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
++#define TCP_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
++#define TCP_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
++#define TCP_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define TCP_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
++//TCP_PERFCOUNTER3_SELECT
++#define TCP_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
++#define TCP_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
++#define TCP_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
++#define TCP_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
++#define TCP_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define TCP_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
++//TCC_PERFCOUNTER0_SELECT
++#define TCC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
++#define TCC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
++#define TCC_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
++#define TCC_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
++#define TCC_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
++#define TCC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
++#define TCC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
++#define TCC_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define TCC_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
++#define TCC_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
++//TCC_PERFCOUNTER0_SELECT1
++#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
++#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
++#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
++#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
++#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
++#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
++#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
++#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
++//TCC_PERFCOUNTER1_SELECT
++#define TCC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
++#define TCC_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
++#define TCC_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
++#define TCC_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
++#define TCC_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
++#define TCC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
++#define TCC_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
++#define TCC_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define TCC_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
++#define TCC_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
++//TCC_PERFCOUNTER1_SELECT1
++#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
++#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
++#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x18
++#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x1c
++#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
++#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
++#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0x0F000000L
++#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0xF0000000L
++//TCC_PERFCOUNTER2_SELECT
++#define TCC_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
++#define TCC_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
++#define TCC_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
++#define TCC_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
++#define TCC_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define TCC_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
++//TCC_PERFCOUNTER3_SELECT
++#define TCC_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
++#define TCC_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
++#define TCC_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
++#define TCC_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
++#define TCC_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define TCC_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
++//TCA_PERFCOUNTER0_SELECT
++#define TCA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
++#define TCA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
++#define TCA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
++#define TCA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
++#define TCA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
++#define TCA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
++#define TCA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
++#define TCA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define TCA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
++#define TCA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
++//TCA_PERFCOUNTER0_SELECT1
++#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
++#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
++#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
++#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
++#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
++#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
++#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
++#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
++//TCA_PERFCOUNTER1_SELECT
++#define TCA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
++#define TCA_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
++#define TCA_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
++#define TCA_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
++#define TCA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
++#define TCA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
++#define TCA_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
++#define TCA_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define TCA_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
++#define TCA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
++//TCA_PERFCOUNTER1_SELECT1
++#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
++#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
++#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x18
++#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x1c
++#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
++#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
++#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0x0F000000L
++#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0xF0000000L
++//TCA_PERFCOUNTER2_SELECT
++#define TCA_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
++#define TCA_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
++#define TCA_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
++#define TCA_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
++#define TCA_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define TCA_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
++//TCA_PERFCOUNTER3_SELECT
++#define TCA_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
++#define TCA_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
++#define TCA_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
++#define TCA_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
++#define TCA_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define TCA_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
++//CB_PERFCOUNTER_FILTER
++#define CB_PERFCOUNTER_FILTER__OP_FILTER_ENABLE__SHIFT 0x0
++#define CB_PERFCOUNTER_FILTER__OP_FILTER_SEL__SHIFT 0x1
++#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_ENABLE__SHIFT 0x4
++#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_SEL__SHIFT 0x5
++#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_ENABLE__SHIFT 0xa
++#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_SEL__SHIFT 0xb
++#define CB_PERFCOUNTER_FILTER__MRT_FILTER_ENABLE__SHIFT 0xc
++#define CB_PERFCOUNTER_FILTER__MRT_FILTER_SEL__SHIFT 0xd
++#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_ENABLE__SHIFT 0x11
++#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_SEL__SHIFT 0x12
++#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_ENABLE__SHIFT 0x15
++#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_SEL__SHIFT 0x16
++#define CB_PERFCOUNTER_FILTER__OP_FILTER_ENABLE_MASK 0x00000001L
++#define CB_PERFCOUNTER_FILTER__OP_FILTER_SEL_MASK 0x0000000EL
++#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_ENABLE_MASK 0x00000010L
++#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_SEL_MASK 0x000003E0L
++#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_ENABLE_MASK 0x00000400L
++#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_SEL_MASK 0x00000800L
++#define CB_PERFCOUNTER_FILTER__MRT_FILTER_ENABLE_MASK 0x00001000L
++#define CB_PERFCOUNTER_FILTER__MRT_FILTER_SEL_MASK 0x0000E000L
++#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_ENABLE_MASK 0x00020000L
++#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_SEL_MASK 0x001C0000L
++#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_ENABLE_MASK 0x00200000L
++#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_SEL_MASK 0x00C00000L
++//CB_PERFCOUNTER0_SELECT
++#define CB_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
++#define CB_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
++#define CB_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
++#define CB_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
++#define CB_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
++#define CB_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000001FFL
++#define CB_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x0007FC00L
++#define CB_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define CB_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
++#define CB_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
++//CB_PERFCOUNTER0_SELECT1
++#define CB_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
++#define CB_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
++#define CB_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
++#define CB_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
++#define CB_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000001FFL
++#define CB_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x0007FC00L
++#define CB_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
++#define CB_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
++//CB_PERFCOUNTER1_SELECT
++#define CB_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
++#define CB_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
++#define CB_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000001FFL
++#define CB_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
++//CB_PERFCOUNTER2_SELECT
++#define CB_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
++#define CB_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
++#define CB_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000001FFL
++#define CB_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
++//CB_PERFCOUNTER3_SELECT
++#define CB_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
++#define CB_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
++#define CB_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000001FFL
++#define CB_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
++//DB_PERFCOUNTER0_SELECT
++#define DB_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
++#define DB_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
++#define DB_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
++#define DB_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
++#define DB_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
++#define DB_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
++#define DB_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
++#define DB_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define DB_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
++#define DB_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
++//DB_PERFCOUNTER0_SELECT1
++#define DB_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
++#define DB_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
++#define DB_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
++#define DB_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
++#define DB_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
++#define DB_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
++#define DB_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
++#define DB_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
++//DB_PERFCOUNTER1_SELECT
++#define DB_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
++#define DB_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
++#define DB_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
++#define DB_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
++#define DB_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
++#define DB_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
++#define DB_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
++#define DB_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define DB_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
++#define DB_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
++//DB_PERFCOUNTER1_SELECT1
++#define DB_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
++#define DB_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
++#define DB_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
++#define DB_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
++#define DB_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
++#define DB_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
++#define DB_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
++#define DB_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
++//DB_PERFCOUNTER2_SELECT
++#define DB_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
++#define DB_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
++#define DB_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
++#define DB_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
++#define DB_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
++#define DB_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
++#define DB_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
++#define DB_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define DB_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
++#define DB_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
++//DB_PERFCOUNTER3_SELECT
++#define DB_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
++#define DB_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
++#define DB_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
++#define DB_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
++#define DB_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
++#define DB_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
++#define DB_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
++#define DB_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define DB_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
++#define DB_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
++//RLC_SPM_PERFMON_CNTL
++#define RLC_SPM_PERFMON_CNTL__RESERVED1__SHIFT 0x2
++#define RLC_SPM_PERFMON_CNTL__PERFMON_RING_MODE__SHIFT 0xc
++#define RLC_SPM_PERFMON_CNTL__RESERVED__SHIFT 0xe
++#define RLC_SPM_PERFMON_CNTL__PERFMON_SAMPLE_INTERVAL__SHIFT 0x10
++#define RLC_SPM_PERFMON_CNTL__RESERVED1_MASK 0x00000FFCL
++#define RLC_SPM_PERFMON_CNTL__PERFMON_RING_MODE_MASK 0x00003000L
++#define RLC_SPM_PERFMON_CNTL__RESERVED_MASK 0x0000C000L
++#define RLC_SPM_PERFMON_CNTL__PERFMON_SAMPLE_INTERVAL_MASK 0xFFFF0000L
++//RLC_SPM_PERFMON_RING_BASE_LO
++#define RLC_SPM_PERFMON_RING_BASE_LO__RING_BASE_LO__SHIFT 0x0
++#define RLC_SPM_PERFMON_RING_BASE_LO__RING_BASE_LO_MASK 0xFFFFFFFFL
++//RLC_SPM_PERFMON_RING_BASE_HI
++#define RLC_SPM_PERFMON_RING_BASE_HI__RING_BASE_HI__SHIFT 0x0
++#define RLC_SPM_PERFMON_RING_BASE_HI__RESERVED__SHIFT 0x10
++#define RLC_SPM_PERFMON_RING_BASE_HI__RING_BASE_HI_MASK 0x0000FFFFL
++#define RLC_SPM_PERFMON_RING_BASE_HI__RESERVED_MASK 0xFFFF0000L
++//RLC_SPM_PERFMON_RING_SIZE
++#define RLC_SPM_PERFMON_RING_SIZE__RING_BASE_SIZE__SHIFT 0x0
++#define RLC_SPM_PERFMON_RING_SIZE__RING_BASE_SIZE_MASK 0xFFFFFFFFL
++//RLC_SPM_PERFMON_SEGMENT_SIZE
++#define RLC_SPM_PERFMON_SEGMENT_SIZE__PERFMON_SEGMENT_SIZE__SHIFT 0x0
++#define RLC_SPM_PERFMON_SEGMENT_SIZE__RESERVED1__SHIFT 0x8
++#define RLC_SPM_PERFMON_SEGMENT_SIZE__GLOBAL_NUM_LINE__SHIFT 0xb
++#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE0_NUM_LINE__SHIFT 0x10
++#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE1_NUM_LINE__SHIFT 0x15
++#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE2_NUM_LINE__SHIFT 0x1a
++#define RLC_SPM_PERFMON_SEGMENT_SIZE__RESERVED__SHIFT 0x1f
++#define RLC_SPM_PERFMON_SEGMENT_SIZE__PERFMON_SEGMENT_SIZE_MASK 0x000000FFL
++#define RLC_SPM_PERFMON_SEGMENT_SIZE__RESERVED1_MASK 0x00000700L
++#define RLC_SPM_PERFMON_SEGMENT_SIZE__GLOBAL_NUM_LINE_MASK 0x0000F800L
++#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE0_NUM_LINE_MASK 0x001F0000L
++#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE1_NUM_LINE_MASK 0x03E00000L
++#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE2_NUM_LINE_MASK 0x7C000000L
++#define RLC_SPM_PERFMON_SEGMENT_SIZE__RESERVED_MASK 0x80000000L
++//RLC_SPM_SE_MUXSEL_ADDR
++#define RLC_SPM_SE_MUXSEL_ADDR__PERFMON_SEL_ADDR__SHIFT 0x0
++#define RLC_SPM_SE_MUXSEL_ADDR__PERFMON_SEL_ADDR_MASK 0xFFFFFFFFL
++//RLC_SPM_SE_MUXSEL_DATA
++#define RLC_SPM_SE_MUXSEL_DATA__PERFMON_SEL_DATA__SHIFT 0x0
++#define RLC_SPM_SE_MUXSEL_DATA__PERFMON_SEL_DATA_MASK 0xFFFFFFFFL
++//RLC_SPM_CPG_PERFMON_SAMPLE_DELAY
++#define RLC_SPM_CPG_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
++#define RLC_SPM_CPG_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
++#define RLC_SPM_CPG_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
++#define RLC_SPM_CPG_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
++//RLC_SPM_CPC_PERFMON_SAMPLE_DELAY
++#define RLC_SPM_CPC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
++#define RLC_SPM_CPC_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
++#define RLC_SPM_CPC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
++#define RLC_SPM_CPC_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
++//RLC_SPM_CPF_PERFMON_SAMPLE_DELAY
++#define RLC_SPM_CPF_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
++#define RLC_SPM_CPF_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
++#define RLC_SPM_CPF_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
++#define RLC_SPM_CPF_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
++//RLC_SPM_CB_PERFMON_SAMPLE_DELAY
++#define RLC_SPM_CB_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
++#define RLC_SPM_CB_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
++#define RLC_SPM_CB_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
++#define RLC_SPM_CB_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
++//RLC_SPM_DB_PERFMON_SAMPLE_DELAY
++#define RLC_SPM_DB_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
++#define RLC_SPM_DB_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
++#define RLC_SPM_DB_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
++#define RLC_SPM_DB_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
++//RLC_SPM_PA_PERFMON_SAMPLE_DELAY
++#define RLC_SPM_PA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
++#define RLC_SPM_PA_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
++#define RLC_SPM_PA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
++#define RLC_SPM_PA_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
++//RLC_SPM_GDS_PERFMON_SAMPLE_DELAY
++#define RLC_SPM_GDS_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
++#define RLC_SPM_GDS_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
++#define RLC_SPM_GDS_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
++#define RLC_SPM_GDS_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
++//RLC_SPM_IA_PERFMON_SAMPLE_DELAY
++#define RLC_SPM_IA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
++#define RLC_SPM_IA_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
++#define RLC_SPM_IA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
++#define RLC_SPM_IA_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
++//RLC_SPM_SC_PERFMON_SAMPLE_DELAY
++#define RLC_SPM_SC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
++#define RLC_SPM_SC_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
++#define RLC_SPM_SC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
++#define RLC_SPM_SC_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
++//RLC_SPM_TCC_PERFMON_SAMPLE_DELAY
++#define RLC_SPM_TCC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
++#define RLC_SPM_TCC_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
++#define RLC_SPM_TCC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
++#define RLC_SPM_TCC_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
++//RLC_SPM_TCA_PERFMON_SAMPLE_DELAY
++#define RLC_SPM_TCA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
++#define RLC_SPM_TCA_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
++#define RLC_SPM_TCA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
++#define RLC_SPM_TCA_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
++//RLC_SPM_TCP_PERFMON_SAMPLE_DELAY
++#define RLC_SPM_TCP_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
++#define RLC_SPM_TCP_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
++#define RLC_SPM_TCP_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
++#define RLC_SPM_TCP_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
++//RLC_SPM_TA_PERFMON_SAMPLE_DELAY
++#define RLC_SPM_TA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
++#define RLC_SPM_TA_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
++#define RLC_SPM_TA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
++#define RLC_SPM_TA_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
++//RLC_SPM_TD_PERFMON_SAMPLE_DELAY
++#define RLC_SPM_TD_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
++#define RLC_SPM_TD_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
++#define RLC_SPM_TD_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
++#define RLC_SPM_TD_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
++//RLC_SPM_VGT_PERFMON_SAMPLE_DELAY
++#define RLC_SPM_VGT_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
++#define RLC_SPM_VGT_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
++#define RLC_SPM_VGT_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
++#define RLC_SPM_VGT_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
++//RLC_SPM_SPI_PERFMON_SAMPLE_DELAY
++#define RLC_SPM_SPI_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
++#define RLC_SPM_SPI_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
++#define RLC_SPM_SPI_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
++#define RLC_SPM_SPI_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
++//RLC_SPM_SQG_PERFMON_SAMPLE_DELAY
++#define RLC_SPM_SQG_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
++#define RLC_SPM_SQG_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
++#define RLC_SPM_SQG_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
++#define RLC_SPM_SQG_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
++//RLC_SPM_SX_PERFMON_SAMPLE_DELAY
++#define RLC_SPM_SX_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
++#define RLC_SPM_SX_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
++#define RLC_SPM_SX_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
++#define RLC_SPM_SX_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
++//RLC_SPM_GLOBAL_MUXSEL_ADDR
++#define RLC_SPM_GLOBAL_MUXSEL_ADDR__PERFMON_SEL_ADDR__SHIFT 0x0
++#define RLC_SPM_GLOBAL_MUXSEL_ADDR__PERFMON_SEL_ADDR_MASK 0xFFFFFFFFL
++//RLC_SPM_GLOBAL_MUXSEL_DATA
++#define RLC_SPM_GLOBAL_MUXSEL_DATA__PERFMON_SEL_DATA__SHIFT 0x0
++#define RLC_SPM_GLOBAL_MUXSEL_DATA__PERFMON_SEL_DATA_MASK 0xFFFFFFFFL
++//RLC_SPM_RING_RDPTR
++#define RLC_SPM_RING_RDPTR__PERFMON_RING_RDPTR__SHIFT 0x0
++#define RLC_SPM_RING_RDPTR__PERFMON_RING_RDPTR_MASK 0xFFFFFFFFL
++//RLC_SPM_SEGMENT_THRESHOLD
++#define RLC_SPM_SEGMENT_THRESHOLD__NUM_SEGMENT_THRESHOLD__SHIFT 0x0
++#define RLC_SPM_SEGMENT_THRESHOLD__NUM_SEGMENT_THRESHOLD_MASK 0xFFFFFFFFL
++//RLC_SPM_RMI_PERFMON_SAMPLE_DELAY
++#define RLC_SPM_RMI_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
++#define RLC_SPM_RMI_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
++#define RLC_SPM_RMI_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
++#define RLC_SPM_RMI_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
++//RLC_PERFMON_CLK_CNTL
++#define RLC_PERFMON_CLK_CNTL__PERFMON_CLOCK_STATE__SHIFT 0x0
++#define RLC_PERFMON_CLK_CNTL__PERFMON_CLOCK_STATE_MASK 0x00000001L
++//RLC_PERFMON_CNTL
++#define RLC_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
++#define RLC_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE__SHIFT 0xa
++#define RLC_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000007L
++#define RLC_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE_MASK 0x00000400L
++//RLC_PERFCOUNTER0_SELECT
++#define RLC_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
++#define RLC_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT_MASK 0x00FFL
++//RLC_PERFCOUNTER1_SELECT
++#define RLC_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
++#define RLC_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT_MASK 0x00FFL
++//RLC_GPU_IOV_PERF_CNT_CNTL
++#define RLC_GPU_IOV_PERF_CNT_CNTL__ENABLE__SHIFT 0x0
++#define RLC_GPU_IOV_PERF_CNT_CNTL__MODE_SELECT__SHIFT 0x1
++#define RLC_GPU_IOV_PERF_CNT_CNTL__RESET__SHIFT 0x2
++#define RLC_GPU_IOV_PERF_CNT_CNTL__RESERVED__SHIFT 0x3
++#define RLC_GPU_IOV_PERF_CNT_CNTL__ENABLE_MASK 0x00000001L
++#define RLC_GPU_IOV_PERF_CNT_CNTL__MODE_SELECT_MASK 0x00000002L
++#define RLC_GPU_IOV_PERF_CNT_CNTL__RESET_MASK 0x00000004L
++#define RLC_GPU_IOV_PERF_CNT_CNTL__RESERVED_MASK 0xFFFFFFF8L
++//RLC_GPU_IOV_PERF_CNT_WR_ADDR
++#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__VFID__SHIFT 0x0
++#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__CNT_ID__SHIFT 0x4
++#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__RESERVED__SHIFT 0x6
++#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__VFID_MASK 0x0000000FL
++#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__CNT_ID_MASK 0x00000030L
++#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__RESERVED_MASK 0xFFFFFFC0L
++//RLC_GPU_IOV_PERF_CNT_WR_DATA
++#define RLC_GPU_IOV_PERF_CNT_WR_DATA__DATA__SHIFT 0x0
++#define RLC_GPU_IOV_PERF_CNT_WR_DATA__DATA_MASK 0x0000000FL
++//RLC_GPU_IOV_PERF_CNT_RD_ADDR
++#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__VFID__SHIFT 0x0
++#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__CNT_ID__SHIFT 0x4
++#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__RESERVED__SHIFT 0x6
++#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__VFID_MASK 0x0000000FL
++#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__CNT_ID_MASK 0x00000030L
++#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__RESERVED_MASK 0xFFFFFFC0L
++//RLC_GPU_IOV_PERF_CNT_RD_DATA
++#define RLC_GPU_IOV_PERF_CNT_RD_DATA__DATA__SHIFT 0x0
++#define RLC_GPU_IOV_PERF_CNT_RD_DATA__DATA_MASK 0x0000000FL
++//RMI_PERFCOUNTER0_SELECT
++#define RMI_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
++#define RMI_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
++#define RMI_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
++#define RMI_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
++#define RMI_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
++#define RMI_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000001FFL
++#define RMI_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x0007FC00L
++#define RMI_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define RMI_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
++#define RMI_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
++//RMI_PERFCOUNTER0_SELECT1
++#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
++#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
++#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
++#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
++#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000001FFL
++#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x0007FC00L
++#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
++#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
++//RMI_PERFCOUNTER1_SELECT
++#define RMI_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
++#define RMI_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
++#define RMI_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000001FFL
++#define RMI_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
++//RMI_PERFCOUNTER2_SELECT
++#define RMI_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
++#define RMI_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
++#define RMI_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
++#define RMI_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
++#define RMI_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
++#define RMI_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000001FFL
++#define RMI_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x0007FC00L
++#define RMI_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
++#define RMI_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
++#define RMI_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
++//RMI_PERFCOUNTER2_SELECT1
++#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
++#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
++#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
++#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
++#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000001FFL
++#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x0007FC00L
++#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
++#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
++//RMI_PERFCOUNTER3_SELECT
++#define RMI_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
++#define RMI_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
++#define RMI_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000001FFL
++#define RMI_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
++//RMI_PERF_COUNTER_CNTL
++#define RMI_PERF_COUNTER_CNTL__TRANS_BASED_PERF_EN_SEL__SHIFT 0x0
++#define RMI_PERF_COUNTER_CNTL__EVENT_BASED_PERF_EN_SEL__SHIFT 0x2
++#define RMI_PERF_COUNTER_CNTL__TC_PERF_EN_SEL__SHIFT 0x4
++#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK0__SHIFT 0x6
++#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK1__SHIFT 0x8
++#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_CID__SHIFT 0xa
++#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_VMID__SHIFT 0xe
++#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_BURST_LENGTH_THRESHOLD__SHIFT 0x13
++#define RMI_PERF_COUNTER_CNTL__PERF_SOFT_RESET__SHIFT 0x19
++#define RMI_PERF_COUNTER_CNTL__PERF_CNTR_SPM_SEL__SHIFT 0x1a
++#define RMI_PERF_COUNTER_CNTL__TRANS_BASED_PERF_EN_SEL_MASK 0x00000003L
++#define RMI_PERF_COUNTER_CNTL__EVENT_BASED_PERF_EN_SEL_MASK 0x0000000CL
++#define RMI_PERF_COUNTER_CNTL__TC_PERF_EN_SEL_MASK 0x00000030L
++#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK0_MASK 0x000000C0L
++#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK1_MASK 0x00000300L
++#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_CID_MASK 0x00003C00L
++#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_VMID_MASK 0x0007C000L
++#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_BURST_LENGTH_THRESHOLD_MASK 0x01F80000L
++#define RMI_PERF_COUNTER_CNTL__PERF_SOFT_RESET_MASK 0x02000000L
++#define RMI_PERF_COUNTER_CNTL__PERF_CNTR_SPM_SEL_MASK 0x04000000L
++
++
++// addressBlock: gc_utcl2_atcl2pfcntldec
++//ATC_L2_PERFCOUNTER0_CFG
++#define ATC_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
++#define ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
++#define ATC_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
++#define ATC_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
++#define ATC_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
++#define ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
++#define ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
++#define ATC_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
++#define ATC_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
++#define ATC_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
++//ATC_L2_PERFCOUNTER1_CFG
++#define ATC_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
++#define ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
++#define ATC_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
++#define ATC_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
++#define ATC_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
++#define ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
++#define ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
++#define ATC_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
++#define ATC_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
++#define ATC_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
++//ATC_L2_PERFCOUNTER_RSLT_CNTL
++#define ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
++#define ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
++#define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
++#define ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
++#define ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
++#define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
++#define ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
++#define ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
++#define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
++#define ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
++#define ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
++#define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
++
++
++// addressBlock: gc_utcl2_vml2pldec
++//MC_VM_L2_PERFCOUNTER0_CFG
++#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
++#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
++#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
++#define MC_VM_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
++#define MC_VM_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
++#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
++#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
++#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
++#define MC_VM_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
++#define MC_VM_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
++//MC_VM_L2_PERFCOUNTER1_CFG
++#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
++#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
++#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
++#define MC_VM_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
++#define MC_VM_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
++#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
++#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
++#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
++#define MC_VM_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
++#define MC_VM_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
++//MC_VM_L2_PERFCOUNTER2_CFG
++#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
++#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
++#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
++#define MC_VM_L2_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
++#define MC_VM_L2_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
++#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
++#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
++#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
++#define MC_VM_L2_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
++#define MC_VM_L2_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
++//MC_VM_L2_PERFCOUNTER3_CFG
++#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
++#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
++#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
++#define MC_VM_L2_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
++#define MC_VM_L2_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
++#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
++#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
++#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
++#define MC_VM_L2_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
++#define MC_VM_L2_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
++//MC_VM_L2_PERFCOUNTER4_CFG
++#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL__SHIFT 0x0
++#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END__SHIFT 0x8
++#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE__SHIFT 0x18
++#define MC_VM_L2_PERFCOUNTER4_CFG__ENABLE__SHIFT 0x1c
++#define MC_VM_L2_PERFCOUNTER4_CFG__CLEAR__SHIFT 0x1d
++#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_MASK 0x000000FFL
++#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END_MASK 0x0000FF00L
++#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE_MASK 0x0F000000L
++#define MC_VM_L2_PERFCOUNTER4_CFG__ENABLE_MASK 0x10000000L
++#define MC_VM_L2_PERFCOUNTER4_CFG__CLEAR_MASK 0x20000000L
++//MC_VM_L2_PERFCOUNTER5_CFG
++#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL__SHIFT 0x0
++#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END__SHIFT 0x8
++#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE__SHIFT 0x18
++#define MC_VM_L2_PERFCOUNTER5_CFG__ENABLE__SHIFT 0x1c
++#define MC_VM_L2_PERFCOUNTER5_CFG__CLEAR__SHIFT 0x1d
++#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_MASK 0x000000FFL
++#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END_MASK 0x0000FF00L
++#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE_MASK 0x0F000000L
++#define MC_VM_L2_PERFCOUNTER5_CFG__ENABLE_MASK 0x10000000L
++#define MC_VM_L2_PERFCOUNTER5_CFG__CLEAR_MASK 0x20000000L
++//MC_VM_L2_PERFCOUNTER6_CFG
++#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL__SHIFT 0x0
++#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END__SHIFT 0x8
++#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE__SHIFT 0x18
++#define MC_VM_L2_PERFCOUNTER6_CFG__ENABLE__SHIFT 0x1c
++#define MC_VM_L2_PERFCOUNTER6_CFG__CLEAR__SHIFT 0x1d
++#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_MASK 0x000000FFL
++#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END_MASK 0x0000FF00L
++#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE_MASK 0x0F000000L
++#define MC_VM_L2_PERFCOUNTER6_CFG__ENABLE_MASK 0x10000000L
++#define MC_VM_L2_PERFCOUNTER6_CFG__CLEAR_MASK 0x20000000L
++//MC_VM_L2_PERFCOUNTER7_CFG
++#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL__SHIFT 0x0
++#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END__SHIFT 0x8
++#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE__SHIFT 0x18
++#define MC_VM_L2_PERFCOUNTER7_CFG__ENABLE__SHIFT 0x1c
++#define MC_VM_L2_PERFCOUNTER7_CFG__CLEAR__SHIFT 0x1d
++#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_MASK 0x000000FFL
++#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END_MASK 0x0000FF00L
++#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE_MASK 0x0F000000L
++#define MC_VM_L2_PERFCOUNTER7_CFG__ENABLE_MASK 0x10000000L
++#define MC_VM_L2_PERFCOUNTER7_CFG__CLEAR_MASK 0x20000000L
++//MC_VM_L2_PERFCOUNTER_RSLT_CNTL
++#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
++#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
++#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
++#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
++#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
++#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
++#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
++#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
++#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
++#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
++#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
++#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
++
++
++// addressBlock: gc_rlcpdec
++//RLC_CNTL
++#define RLC_CNTL__RLC_ENABLE_F32__SHIFT 0x0
++#define RLC_CNTL__FORCE_RETRY__SHIFT 0x1
++#define RLC_CNTL__READ_CACHE_DISABLE__SHIFT 0x2
++#define RLC_CNTL__RLC_STEP_F32__SHIFT 0x3
++#define RLC_CNTL__RESERVED__SHIFT 0x4
++#define RLC_CNTL__RLC_ENABLE_F32_MASK 0x00000001L
++#define RLC_CNTL__FORCE_RETRY_MASK 0x00000002L
++#define RLC_CNTL__READ_CACHE_DISABLE_MASK 0x00000004L
++#define RLC_CNTL__RLC_STEP_F32_MASK 0x00000008L
++#define RLC_CNTL__RESERVED_MASK 0xFFFFFFF0L
++//RLC_STAT
++#define RLC_STAT__RLC_BUSY__SHIFT 0x0
++#define RLC_STAT__RLC_GPM_BUSY__SHIFT 0x1
++#define RLC_STAT__RLC_SPM_BUSY__SHIFT 0x2
++#define RLC_STAT__RLC_SRM_BUSY__SHIFT 0x3
++#define RLC_STAT__MC_BUSY__SHIFT 0x4
++#define RLC_STAT__RLC_THREAD_0_BUSY__SHIFT 0x5
++#define RLC_STAT__RLC_THREAD_1_BUSY__SHIFT 0x6
++#define RLC_STAT__RLC_THREAD_2_BUSY__SHIFT 0x7
++#define RLC_STAT__RESERVED__SHIFT 0x8
++#define RLC_STAT__RLC_BUSY_MASK 0x00000001L
++#define RLC_STAT__RLC_GPM_BUSY_MASK 0x00000002L
++#define RLC_STAT__RLC_SPM_BUSY_MASK 0x00000004L
++#define RLC_STAT__RLC_SRM_BUSY_MASK 0x00000008L
++#define RLC_STAT__MC_BUSY_MASK 0x00000010L
++#define RLC_STAT__RLC_THREAD_0_BUSY_MASK 0x00000020L
++#define RLC_STAT__RLC_THREAD_1_BUSY_MASK 0x00000040L
++#define RLC_STAT__RLC_THREAD_2_BUSY_MASK 0x00000080L
++#define RLC_STAT__RESERVED_MASK 0xFFFFFF00L
++//RLC_SAFE_MODE
++#define RLC_SAFE_MODE__CMD__SHIFT 0x0
++#define RLC_SAFE_MODE__MESSAGE__SHIFT 0x1
++#define RLC_SAFE_MODE__RESERVED1__SHIFT 0x5
++#define RLC_SAFE_MODE__RESPONSE__SHIFT 0x8
++#define RLC_SAFE_MODE__RESERVED__SHIFT 0xc
++#define RLC_SAFE_MODE__CMD_MASK 0x00000001L
++#define RLC_SAFE_MODE__MESSAGE_MASK 0x0000001EL
++#define RLC_SAFE_MODE__RESERVED1_MASK 0x000000E0L
++#define RLC_SAFE_MODE__RESPONSE_MASK 0x00000F00L
++#define RLC_SAFE_MODE__RESERVED_MASK 0xFFFFF000L
++//RLC_MEM_SLP_CNTL
++#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN__SHIFT 0x0
++#define RLC_MEM_SLP_CNTL__RLC_MEM_DS_EN__SHIFT 0x1
++#define RLC_MEM_SLP_CNTL__RESERVED__SHIFT 0x2
++#define RLC_MEM_SLP_CNTL__RLC_LS_DS_BUSY_OVERRIDE__SHIFT 0x7
++#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_ON_DELAY__SHIFT 0x8
++#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_OFF_DELAY__SHIFT 0x10
++#define RLC_MEM_SLP_CNTL__RESERVED1__SHIFT 0x18
++#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK 0x00000001L
++#define RLC_MEM_SLP_CNTL__RLC_MEM_DS_EN_MASK 0x00000002L
++#define RLC_MEM_SLP_CNTL__RESERVED_MASK 0x0000007CL
++#define RLC_MEM_SLP_CNTL__RLC_LS_DS_BUSY_OVERRIDE_MASK 0x00000080L
++#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_ON_DELAY_MASK 0x0000FF00L
++#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_OFF_DELAY_MASK 0x00FF0000L
++#define RLC_MEM_SLP_CNTL__RESERVED1_MASK 0xFF000000L
++//SMU_RLC_RESPONSE
++#define SMU_RLC_RESPONSE__RESP__SHIFT 0x0
++#define SMU_RLC_RESPONSE__RESP_MASK 0xFFFFFFFFL
++//RLC_RLCV_SAFE_MODE
++#define RLC_RLCV_SAFE_MODE__CMD__SHIFT 0x0
++#define RLC_RLCV_SAFE_MODE__MESSAGE__SHIFT 0x1
++#define RLC_RLCV_SAFE_MODE__RESERVED1__SHIFT 0x5
++#define RLC_RLCV_SAFE_MODE__RESPONSE__SHIFT 0x8
++#define RLC_RLCV_SAFE_MODE__RESERVED__SHIFT 0xc
++#define RLC_RLCV_SAFE_MODE__CMD_MASK 0x00000001L
++#define RLC_RLCV_SAFE_MODE__MESSAGE_MASK 0x0000001EL
++#define RLC_RLCV_SAFE_MODE__RESERVED1_MASK 0x000000E0L
++#define RLC_RLCV_SAFE_MODE__RESPONSE_MASK 0x00000F00L
++#define RLC_RLCV_SAFE_MODE__RESERVED_MASK 0xFFFFF000L
++//RLC_SMU_SAFE_MODE
++#define RLC_SMU_SAFE_MODE__CMD__SHIFT 0x0
++#define RLC_SMU_SAFE_MODE__MESSAGE__SHIFT 0x1
++#define RLC_SMU_SAFE_MODE__RESERVED1__SHIFT 0x5
++#define RLC_SMU_SAFE_MODE__RESPONSE__SHIFT 0x8
++#define RLC_SMU_SAFE_MODE__RESERVED__SHIFT 0xc
++#define RLC_SMU_SAFE_MODE__CMD_MASK 0x00000001L
++#define RLC_SMU_SAFE_MODE__MESSAGE_MASK 0x0000001EL
++#define RLC_SMU_SAFE_MODE__RESERVED1_MASK 0x000000E0L
++#define RLC_SMU_SAFE_MODE__RESPONSE_MASK 0x00000F00L
++#define RLC_SMU_SAFE_MODE__RESERVED_MASK 0xFFFFF000L
++//RLC_RLCV_COMMAND
++#define RLC_RLCV_COMMAND__CMD__SHIFT 0x0
++#define RLC_RLCV_COMMAND__RESERVED__SHIFT 0x4
++#define RLC_RLCV_COMMAND__CMD_MASK 0x0000000FL
++#define RLC_RLCV_COMMAND__RESERVED_MASK 0xFFFFFFF0L
++//RLC_REFCLOCK_TIMESTAMP_LSB
++#define RLC_REFCLOCK_TIMESTAMP_LSB__TIMESTAMP_LSB__SHIFT 0x0
++#define RLC_REFCLOCK_TIMESTAMP_LSB__TIMESTAMP_LSB_MASK 0xFFFFFFFFL
++//RLC_REFCLOCK_TIMESTAMP_MSB
++#define RLC_REFCLOCK_TIMESTAMP_MSB__TIMESTAMP_MSB__SHIFT 0x0
++#define RLC_REFCLOCK_TIMESTAMP_MSB__TIMESTAMP_MSB_MASK 0xFFFFFFFFL
++//RLC_GPM_TIMER_INT_0
++#define RLC_GPM_TIMER_INT_0__TIMER__SHIFT 0x0
++#define RLC_GPM_TIMER_INT_0__TIMER_MASK 0xFFFFFFFFL
++//RLC_GPM_TIMER_INT_1
++#define RLC_GPM_TIMER_INT_1__TIMER__SHIFT 0x0
++#define RLC_GPM_TIMER_INT_1__TIMER_MASK 0xFFFFFFFFL
++//RLC_GPM_TIMER_INT_2
++#define RLC_GPM_TIMER_INT_2__TIMER__SHIFT 0x0
++#define RLC_GPM_TIMER_INT_2__TIMER_MASK 0xFFFFFFFFL
++//RLC_GPM_TIMER_CTRL
++#define RLC_GPM_TIMER_CTRL__TIMER_0_EN__SHIFT 0x0
++#define RLC_GPM_TIMER_CTRL__TIMER_1_EN__SHIFT 0x1
++#define RLC_GPM_TIMER_CTRL__TIMER_2_EN__SHIFT 0x2
++#define RLC_GPM_TIMER_CTRL__TIMER_3_EN__SHIFT 0x3
++#define RLC_GPM_TIMER_CTRL__RESERVED__SHIFT 0x4
++#define RLC_GPM_TIMER_CTRL__TIMER_0_EN_MASK 0x00000001L
++#define RLC_GPM_TIMER_CTRL__TIMER_1_EN_MASK 0x00000002L
++#define RLC_GPM_TIMER_CTRL__TIMER_2_EN_MASK 0x00000004L
++#define RLC_GPM_TIMER_CTRL__TIMER_3_EN_MASK 0x00000008L
++#define RLC_GPM_TIMER_CTRL__RESERVED_MASK 0xFFFFFFF0L
++//RLC_LB_CNTR_MAX
++#define RLC_LB_CNTR_MAX__LB_CNTR_MAX__SHIFT 0x0
++#define RLC_LB_CNTR_MAX__LB_CNTR_MAX_MASK 0xFFFFFFFFL
++//RLC_GPM_TIMER_STAT
++#define RLC_GPM_TIMER_STAT__TIMER_0_STAT__SHIFT 0x0
++#define RLC_GPM_TIMER_STAT__TIMER_1_STAT__SHIFT 0x1
++#define RLC_GPM_TIMER_STAT__TIMER_2_STAT__SHIFT 0x2
++#define RLC_GPM_TIMER_STAT__TIMER_3_STAT__SHIFT 0x3
++#define RLC_GPM_TIMER_STAT__RESERVED__SHIFT 0x4
++#define RLC_GPM_TIMER_STAT__TIMER_0_STAT_MASK 0x00000001L
++#define RLC_GPM_TIMER_STAT__TIMER_1_STAT_MASK 0x00000002L
++#define RLC_GPM_TIMER_STAT__TIMER_2_STAT_MASK 0x00000004L
++#define RLC_GPM_TIMER_STAT__TIMER_3_STAT_MASK 0x00000008L
++#define RLC_GPM_TIMER_STAT__RESERVED_MASK 0xFFFFFFF0L
++//RLC_GPM_TIMER_INT_3
++#define RLC_GPM_TIMER_INT_3__TIMER__SHIFT 0x0
++#define RLC_GPM_TIMER_INT_3__TIMER_MASK 0xFFFFFFFFL
++//RLC_SERDES_WR_NONCU_MASTER_MASK_1
++#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SE_MASTER_MASK_1__SHIFT 0x0
++#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__GC_MASTER_MASK_1__SHIFT 0x10
++#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__GC_GFX_MASTER_MASK_1__SHIFT 0x11
++#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__TC0_1_MASTER_MASK__SHIFT 0x12
++#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__RESERVED_1__SHIFT 0x13
++#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE4_MASTER_MASK__SHIFT 0x14
++#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE5_MASTER_MASK__SHIFT 0x15
++#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE6_MASTER_MASK__SHIFT 0x16
++#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE7_MASTER_MASK__SHIFT 0x17
++#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__EA_1_MASTER_MASK__SHIFT 0x18
++#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__RESERVED__SHIFT 0x19
++#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SE_MASTER_MASK_1_MASK 0x0000FFFFL
++#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__GC_MASTER_MASK_1_MASK 0x00010000L
++#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__GC_GFX_MASTER_MASK_1_MASK 0x00020000L
++#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__TC0_1_MASTER_MASK_MASK 0x00040000L
++#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__RESERVED_1_MASK 0x00080000L
++#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE4_MASTER_MASK_MASK 0x00100000L
++#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE5_MASTER_MASK_MASK 0x00200000L
++#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE6_MASTER_MASK_MASK 0x00400000L
++#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE7_MASTER_MASK_MASK 0x00800000L
++#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__EA_1_MASTER_MASK_MASK 0x01000000L
++#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__RESERVED_MASK 0xFE000000L
++//RLC_SERDES_NONCU_MASTER_BUSY_1
++#define RLC_SERDES_NONCU_MASTER_BUSY_1__SE_MASTER_BUSY_1__SHIFT 0x0
++#define RLC_SERDES_NONCU_MASTER_BUSY_1__GC_MASTER_BUSY_1__SHIFT 0x10
++#define RLC_SERDES_NONCU_MASTER_BUSY_1__GC_GFX_MASTER_BUSY_1__SHIFT 0x11
++#define RLC_SERDES_NONCU_MASTER_BUSY_1__TC0_MASTER_BUSY_1__SHIFT 0x12
++#define RLC_SERDES_NONCU_MASTER_BUSY_1__RESERVED_1__SHIFT 0x13
++#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE4_MASTER_BUSY__SHIFT 0x14
++#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE5_MASTER_BUSY__SHIFT 0x15
++#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE6_MASTER_BUSY__SHIFT 0x16
++#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE7_MASTER_BUSY__SHIFT 0x17
++#define RLC_SERDES_NONCU_MASTER_BUSY_1__EA_1_MASTER_BUSY__SHIFT 0x18
++#define RLC_SERDES_NONCU_MASTER_BUSY_1__RESERVED__SHIFT 0x19
++#define RLC_SERDES_NONCU_MASTER_BUSY_1__SE_MASTER_BUSY_1_MASK 0x0000FFFFL
++#define RLC_SERDES_NONCU_MASTER_BUSY_1__GC_MASTER_BUSY_1_MASK 0x00010000L
++#define RLC_SERDES_NONCU_MASTER_BUSY_1__GC_GFX_MASTER_BUSY_1_MASK 0x00020000L
++#define RLC_SERDES_NONCU_MASTER_BUSY_1__TC0_MASTER_BUSY_1_MASK 0x00040000L
++#define RLC_SERDES_NONCU_MASTER_BUSY_1__RESERVED_1_MASK 0x00080000L
++#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE4_MASTER_BUSY_MASK 0x00100000L
++#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE5_MASTER_BUSY_MASK 0x00200000L
++#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE6_MASTER_BUSY_MASK 0x00400000L
++#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE7_MASTER_BUSY_MASK 0x00800000L
++#define RLC_SERDES_NONCU_MASTER_BUSY_1__EA_1_MASTER_BUSY_MASK 0x01000000L
++#define RLC_SERDES_NONCU_MASTER_BUSY_1__RESERVED_MASK 0xFE000000L
++//RLC_INT_STAT
++#define RLC_INT_STAT__LAST_CP_RLC_INT_ID__SHIFT 0x0
++#define RLC_INT_STAT__CP_RLC_INT_PENDING__SHIFT 0x8
++#define RLC_INT_STAT__RESERVED__SHIFT 0x9
++#define RLC_INT_STAT__LAST_CP_RLC_INT_ID_MASK 0x000000FFL
++#define RLC_INT_STAT__CP_RLC_INT_PENDING_MASK 0x00000100L
++#define RLC_INT_STAT__RESERVED_MASK 0xFFFFFE00L
++//RLC_LB_CNTL
++#define RLC_LB_CNTL__LOAD_BALANCE_ENABLE__SHIFT 0x0
++#define RLC_LB_CNTL__LB_CNT_CP_BUSY__SHIFT 0x1
++#define RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE__SHIFT 0x2
++#define RLC_LB_CNTL__LB_CNT_REG_INC__SHIFT 0x3
++#define RLC_LB_CNTL__CU_MASK_USED_OFF_HYST__SHIFT 0x4
++#define RLC_LB_CNTL__RESERVED__SHIFT 0xc
++#define RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK 0x00000001L
++#define RLC_LB_CNTL__LB_CNT_CP_BUSY_MASK 0x00000002L
++#define RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK 0x00000004L
++#define RLC_LB_CNTL__LB_CNT_REG_INC_MASK 0x00000008L
++#define RLC_LB_CNTL__CU_MASK_USED_OFF_HYST_MASK 0x00000FF0L
++#define RLC_LB_CNTL__RESERVED_MASK 0xFFFFF000L
++//RLC_MGCG_CTRL
++#define RLC_MGCG_CTRL__MGCG_EN__SHIFT 0x0
++#define RLC_MGCG_CTRL__SILICON_EN__SHIFT 0x1
++#define RLC_MGCG_CTRL__SIMULATION_EN__SHIFT 0x2
++#define RLC_MGCG_CTRL__ON_DELAY__SHIFT 0x3
++#define RLC_MGCG_CTRL__OFF_HYSTERESIS__SHIFT 0x7
++#define RLC_MGCG_CTRL__GC_CAC_MGCG_CLK_CNTL__SHIFT 0xf
++#define RLC_MGCG_CTRL__SE_CAC_MGCG_CLK_CNTL__SHIFT 0x10
++#define RLC_MGCG_CTRL__SPARE__SHIFT 0x11
++#define RLC_MGCG_CTRL__MGCG_EN_MASK 0x00000001L
++#define RLC_MGCG_CTRL__SILICON_EN_MASK 0x00000002L
++#define RLC_MGCG_CTRL__SIMULATION_EN_MASK 0x00000004L
++#define RLC_MGCG_CTRL__ON_DELAY_MASK 0x00000078L
++#define RLC_MGCG_CTRL__OFF_HYSTERESIS_MASK 0x00007F80L
++#define RLC_MGCG_CTRL__GC_CAC_MGCG_CLK_CNTL_MASK 0x00008000L
++#define RLC_MGCG_CTRL__SE_CAC_MGCG_CLK_CNTL_MASK 0x00010000L
++#define RLC_MGCG_CTRL__SPARE_MASK 0xFFFE0000L
++//RLC_LB_CNTR_INIT
++#define RLC_LB_CNTR_INIT__LB_CNTR_INIT__SHIFT 0x0
++#define RLC_LB_CNTR_INIT__LB_CNTR_INIT_MASK 0xFFFFFFFFL
++//RLC_LOAD_BALANCE_CNTR
++#define RLC_LOAD_BALANCE_CNTR__RLC_LOAD_BALANCE_CNTR__SHIFT 0x0
++#define RLC_LOAD_BALANCE_CNTR__RLC_LOAD_BALANCE_CNTR_MASK 0xFFFFFFFFL
++//RLC_JUMP_TABLE_RESTORE
++#define RLC_JUMP_TABLE_RESTORE__ADDR__SHIFT 0x0
++#define RLC_JUMP_TABLE_RESTORE__ADDR_MASK 0xFFFFFFFFL
++//RLC_PG_DELAY_2
++#define RLC_PG_DELAY_2__SERDES_TIMEOUT_VALUE__SHIFT 0x0
++#define RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT 0x8
++#define RLC_PG_DELAY_2__PERCU_TIMEOUT_VALUE__SHIFT 0x10
++#define RLC_PG_DELAY_2__SERDES_TIMEOUT_VALUE_MASK 0x000000FFL
++#define RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK 0x0000FF00L
++#define RLC_PG_DELAY_2__PERCU_TIMEOUT_VALUE_MASK 0xFFFF0000L
++//RLC_GPU_CLOCK_COUNT_LSB
++#define RLC_GPU_CLOCK_COUNT_LSB__GPU_CLOCKS_LSB__SHIFT 0x0
++#define RLC_GPU_CLOCK_COUNT_LSB__GPU_CLOCKS_LSB_MASK 0xFFFFFFFFL
++//RLC_GPU_CLOCK_COUNT_MSB
++#define RLC_GPU_CLOCK_COUNT_MSB__GPU_CLOCKS_MSB__SHIFT 0x0
++#define RLC_GPU_CLOCK_COUNT_MSB__GPU_CLOCKS_MSB_MASK 0xFFFFFFFFL
++//RLC_CAPTURE_GPU_CLOCK_COUNT
++#define RLC_CAPTURE_GPU_CLOCK_COUNT__CAPTURE__SHIFT 0x0
++#define RLC_CAPTURE_GPU_CLOCK_COUNT__RESERVED__SHIFT 0x1
++#define RLC_CAPTURE_GPU_CLOCK_COUNT__CAPTURE_MASK 0x00000001L
++#define RLC_CAPTURE_GPU_CLOCK_COUNT__RESERVED_MASK 0xFFFFFFFEL
++//RLC_UCODE_CNTL
++#define RLC_UCODE_CNTL__RLC_UCODE_FLAGS__SHIFT 0x0
++#define RLC_UCODE_CNTL__RLC_UCODE_FLAGS_MASK 0xFFFFFFFFL
++//RLC_GPM_THREAD_RESET
++#define RLC_GPM_THREAD_RESET__THREAD0_RESET__SHIFT 0x0
++#define RLC_GPM_THREAD_RESET__THREAD1_RESET__SHIFT 0x1
++#define RLC_GPM_THREAD_RESET__THREAD2_RESET__SHIFT 0x2
++#define RLC_GPM_THREAD_RESET__THREAD3_RESET__SHIFT 0x3
++#define RLC_GPM_THREAD_RESET__RESERVED__SHIFT 0x4
++#define RLC_GPM_THREAD_RESET__THREAD0_RESET_MASK 0x00000001L
++#define RLC_GPM_THREAD_RESET__THREAD1_RESET_MASK 0x00000002L
++#define RLC_GPM_THREAD_RESET__THREAD2_RESET_MASK 0x00000004L
++#define RLC_GPM_THREAD_RESET__THREAD3_RESET_MASK 0x00000008L
++#define RLC_GPM_THREAD_RESET__RESERVED_MASK 0xFFFFFFF0L
++//RLC_GPM_CP_DMA_COMPLETE_T0
++#define RLC_GPM_CP_DMA_COMPLETE_T0__DATA__SHIFT 0x0
++#define RLC_GPM_CP_DMA_COMPLETE_T0__RESERVED__SHIFT 0x1
++#define RLC_GPM_CP_DMA_COMPLETE_T0__DATA_MASK 0x00000001L
++#define RLC_GPM_CP_DMA_COMPLETE_T0__RESERVED_MASK 0xFFFFFFFEL
++//RLC_GPM_CP_DMA_COMPLETE_T1
++#define RLC_GPM_CP_DMA_COMPLETE_T1__DATA__SHIFT 0x0
++#define RLC_GPM_CP_DMA_COMPLETE_T1__RESERVED__SHIFT 0x1
++#define RLC_GPM_CP_DMA_COMPLETE_T1__DATA_MASK 0x00000001L
++#define RLC_GPM_CP_DMA_COMPLETE_T1__RESERVED_MASK 0xFFFFFFFEL
++//RLC_FIREWALL_VIOLATION
++#define RLC_FIREWALL_VIOLATION__ADDR__SHIFT 0x0
++#define RLC_FIREWALL_VIOLATION__ADDR_MASK 0xFFFFFFFFL
++//RLC_GPM_STAT
++#define RLC_GPM_STAT__RLC_BUSY__SHIFT 0x0
++#define RLC_GPM_STAT__GFX_POWER_STATUS__SHIFT 0x1
++#define RLC_GPM_STAT__GFX_CLOCK_STATUS__SHIFT 0x2
++#define RLC_GPM_STAT__GFX_LS_STATUS__SHIFT 0x3
++#define RLC_GPM_STAT__GFX_PIPELINE_POWER_STATUS__SHIFT 0x4
++#define RLC_GPM_STAT__CNTX_IDLE_BEING_PROCESSED__SHIFT 0x5
++#define RLC_GPM_STAT__CNTX_BUSY_BEING_PROCESSED__SHIFT 0x6
++#define RLC_GPM_STAT__GFX_IDLE_BEING_PROCESSED__SHIFT 0x7
++#define RLC_GPM_STAT__CMP_BUSY_BEING_PROCESSED__SHIFT 0x8
++#define RLC_GPM_STAT__SAVING_REGISTERS__SHIFT 0x9
++#define RLC_GPM_STAT__RESTORING_REGISTERS__SHIFT 0xa
++#define RLC_GPM_STAT__GFX3D_BLOCKS_CHANGING_POWER_STATE__SHIFT 0xb
++#define RLC_GPM_STAT__CMP_BLOCKS_CHANGING_POWER_STATE__SHIFT 0xc
++#define RLC_GPM_STAT__STATIC_CU_POWERING_UP__SHIFT 0xd
++#define RLC_GPM_STAT__STATIC_CU_POWERING_DOWN__SHIFT 0xe
++#define RLC_GPM_STAT__DYN_CU_POWERING_UP__SHIFT 0xf
++#define RLC_GPM_STAT__DYN_CU_POWERING_DOWN__SHIFT 0x10
++#define RLC_GPM_STAT__ABORTED_PD_SEQUENCE__SHIFT 0x11
++#define RLC_GPM_STAT__CMP_power_status__SHIFT 0x12
++#define RLC_GPM_STAT__GFX_LS_STATUS_3D__SHIFT 0x13
++#define RLC_GPM_STAT__GFX_CLOCK_STATUS_3D__SHIFT 0x14
++#define RLC_GPM_STAT__MGCG_OVERRIDE_STATUS__SHIFT 0x15
++#define RLC_GPM_STAT__RLC_EXEC_ROM_CODE__SHIFT 0x16
++#define RLC_GPM_STAT__RESERVED__SHIFT 0x17
++#define RLC_GPM_STAT__PG_ERROR_STATUS__SHIFT 0x18
++#define RLC_GPM_STAT__RLC_BUSY_MASK 0x00000001L
++#define RLC_GPM_STAT__GFX_POWER_STATUS_MASK 0x00000002L
++#define RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK 0x00000004L
++#define RLC_GPM_STAT__GFX_LS_STATUS_MASK 0x00000008L
++#define RLC_GPM_STAT__GFX_PIPELINE_POWER_STATUS_MASK 0x00000010L
++#define RLC_GPM_STAT__CNTX_IDLE_BEING_PROCESSED_MASK 0x00000020L
++#define RLC_GPM_STAT__CNTX_BUSY_BEING_PROCESSED_MASK 0x00000040L
++#define RLC_GPM_STAT__GFX_IDLE_BEING_PROCESSED_MASK 0x00000080L
++#define RLC_GPM_STAT__CMP_BUSY_BEING_PROCESSED_MASK 0x00000100L
++#define RLC_GPM_STAT__SAVING_REGISTERS_MASK 0x00000200L
++#define RLC_GPM_STAT__RESTORING_REGISTERS_MASK 0x00000400L
++#define RLC_GPM_STAT__GFX3D_BLOCKS_CHANGING_POWER_STATE_MASK 0x00000800L
++#define RLC_GPM_STAT__CMP_BLOCKS_CHANGING_POWER_STATE_MASK 0x00001000L
++#define RLC_GPM_STAT__STATIC_CU_POWERING_UP_MASK 0x00002000L
++#define RLC_GPM_STAT__STATIC_CU_POWERING_DOWN_MASK 0x00004000L
++#define RLC_GPM_STAT__DYN_CU_POWERING_UP_MASK 0x00008000L
++#define RLC_GPM_STAT__DYN_CU_POWERING_DOWN_MASK 0x00010000L
++#define RLC_GPM_STAT__ABORTED_PD_SEQUENCE_MASK 0x00020000L
++#define RLC_GPM_STAT__CMP_power_status_MASK 0x00040000L
++#define RLC_GPM_STAT__GFX_LS_STATUS_3D_MASK 0x00080000L
++#define RLC_GPM_STAT__GFX_CLOCK_STATUS_3D_MASK 0x00100000L
++#define RLC_GPM_STAT__MGCG_OVERRIDE_STATUS_MASK 0x00200000L
++#define RLC_GPM_STAT__RLC_EXEC_ROM_CODE_MASK 0x00400000L
++#define RLC_GPM_STAT__RESERVED_MASK 0x00800000L
++#define RLC_GPM_STAT__PG_ERROR_STATUS_MASK 0xFF000000L
++//RLC_GPU_CLOCK_32_RES_SEL
++#define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL__SHIFT 0x0
++#define RLC_GPU_CLOCK_32_RES_SEL__RESERVED__SHIFT 0x6
++#define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL_MASK 0x0000003FL
++#define RLC_GPU_CLOCK_32_RES_SEL__RESERVED_MASK 0xFFFFFFC0L
++//RLC_GPU_CLOCK_32
++#define RLC_GPU_CLOCK_32__GPU_CLOCK_32__SHIFT 0x0
++#define RLC_GPU_CLOCK_32__GPU_CLOCK_32_MASK 0xFFFFFFFFL
++//RLC_PG_CNTL
++#define RLC_PG_CNTL__GFX_POWER_GATING_ENABLE__SHIFT 0x0
++#define RLC_PG_CNTL__GFX_POWER_GATING_SRC__SHIFT 0x1
++#define RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE__SHIFT 0x2
++#define RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE__SHIFT 0x3
++#define RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE__SHIFT 0x4
++#define RLC_PG_CNTL__RESERVED__SHIFT 0x5
++#define RLC_PG_CNTL__PG_OVERRIDE__SHIFT 0xe
++#define RLC_PG_CNTL__CP_PG_DISABLE__SHIFT 0xf
++#define RLC_PG_CNTL__CHUB_HANDSHAKE_ENABLE__SHIFT 0x10
++#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE__SHIFT 0x11
++#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE__SHIFT 0x12
++#define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE__SHIFT 0x13
++#define RLC_PG_CNTL__RESERVED1__SHIFT 0x14
++#define RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK 0x00000001L
++#define RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK 0x00000002L
++#define RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK 0x00000004L
++#define RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK 0x00000008L
++#define RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK 0x00000010L
++#define RLC_PG_CNTL__RESERVED_MASK 0x00003FE0L
++#define RLC_PG_CNTL__PG_OVERRIDE_MASK 0x00004000L
++#define RLC_PG_CNTL__CP_PG_DISABLE_MASK 0x00008000L
++#define RLC_PG_CNTL__CHUB_HANDSHAKE_ENABLE_MASK 0x00010000L
++#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK 0x00020000L
++#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK 0x00040000L
++#define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE_MASK 0x00080000L
++#define RLC_PG_CNTL__RESERVED1_MASK 0x00F00000L
++//RLC_GPM_THREAD_PRIORITY
++#define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY__SHIFT 0x0
++#define RLC_GPM_THREAD_PRIORITY__THREAD1_PRIORITY__SHIFT 0x8
++#define RLC_GPM_THREAD_PRIORITY__THREAD2_PRIORITY__SHIFT 0x10
++#define RLC_GPM_THREAD_PRIORITY__THREAD3_PRIORITY__SHIFT 0x18
++#define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY_MASK 0x000000FFL
++#define RLC_GPM_THREAD_PRIORITY__THREAD1_PRIORITY_MASK 0x0000FF00L
++#define RLC_GPM_THREAD_PRIORITY__THREAD2_PRIORITY_MASK 0x00FF0000L
++#define RLC_GPM_THREAD_PRIORITY__THREAD3_PRIORITY_MASK 0xFF000000L
++//RLC_GPM_THREAD_ENABLE
++#define RLC_GPM_THREAD_ENABLE__THREAD0_ENABLE__SHIFT 0x0
++#define RLC_GPM_THREAD_ENABLE__THREAD1_ENABLE__SHIFT 0x1
++#define RLC_GPM_THREAD_ENABLE__THREAD2_ENABLE__SHIFT 0x2
++#define RLC_GPM_THREAD_ENABLE__THREAD3_ENABLE__SHIFT 0x3
++#define RLC_GPM_THREAD_ENABLE__RESERVED__SHIFT 0x4
++#define RLC_GPM_THREAD_ENABLE__THREAD0_ENABLE_MASK 0x00000001L
++#define RLC_GPM_THREAD_ENABLE__THREAD1_ENABLE_MASK 0x00000002L
++#define RLC_GPM_THREAD_ENABLE__THREAD2_ENABLE_MASK 0x00000004L
++#define RLC_GPM_THREAD_ENABLE__THREAD3_ENABLE_MASK 0x00000008L
++#define RLC_GPM_THREAD_ENABLE__RESERVED_MASK 0xFFFFFFF0L
++//RLC_CGTT_MGCG_OVERRIDE
++#define RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE__SHIFT 0x0
++#define RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE__SHIFT 0x1
++#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE__SHIFT 0x2
++#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE__SHIFT 0x3
++#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE__SHIFT 0x4
++#define RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE__SHIFT 0x5
++#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE__SHIFT 0x6
++#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE__SHIFT 0x7
++#define RLC_CGTT_MGCG_OVERRIDE__RESERVED__SHIFT 0x8
++#define RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK 0x00000001L
++#define RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK 0x00000002L
++#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK 0x00000004L
++#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK 0x00000008L
++#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK 0x00000010L
++#define RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK 0x00000020L
++#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK 0x00000040L
++#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK 0x00000080L
++#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_MASK 0xFFFFFF00L
++//RLC_CGCG_CGLS_CTRL
++#define RLC_CGCG_CGLS_CTRL__CGCG_EN__SHIFT 0x0
++#define RLC_CGCG_CGLS_CTRL__CGLS_EN__SHIFT 0x1
++#define RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT 0x2
++#define RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT 0x8
++#define RLC_CGCG_CGLS_CTRL__CGCG_CONTROLLER__SHIFT 0x1b
++#define RLC_CGCG_CGLS_CTRL__CGCG_REG_CTRL__SHIFT 0x1c
++#define RLC_CGCG_CGLS_CTRL__SLEEP_MODE__SHIFT 0x1d
++#define RLC_CGCG_CGLS_CTRL__SIM_SILICON_EN__SHIFT 0x1f
++#define RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK 0x00000001L
++#define RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK 0x00000002L
++#define RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK 0x000000FCL
++#define RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK 0x07FFFF00L
++#define RLC_CGCG_CGLS_CTRL__CGCG_CONTROLLER_MASK 0x08000000L
++#define RLC_CGCG_CGLS_CTRL__CGCG_REG_CTRL_MASK 0x10000000L
++#define RLC_CGCG_CGLS_CTRL__SLEEP_MODE_MASK 0x60000000L
++#define RLC_CGCG_CGLS_CTRL__SIM_SILICON_EN_MASK 0x80000000L
++//RLC_CGCG_RAMP_CTRL
++#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_START_UNIT__SHIFT 0x0
++#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_STEP_UNIT__SHIFT 0x4
++#define RLC_CGCG_RAMP_CTRL__UP_DIV_START_UNIT__SHIFT 0x8
++#define RLC_CGCG_RAMP_CTRL__UP_DIV_STEP_UNIT__SHIFT 0xc
++#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_CNT__SHIFT 0x10
++#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_UNIT__SHIFT 0x1c
++#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_START_UNIT_MASK 0x0000000FL
++#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_STEP_UNIT_MASK 0x000000F0L
++#define RLC_CGCG_RAMP_CTRL__UP_DIV_START_UNIT_MASK 0x00000F00L
++#define RLC_CGCG_RAMP_CTRL__UP_DIV_STEP_UNIT_MASK 0x0000F000L
++#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_CNT_MASK 0x0FFF0000L
++#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_UNIT_MASK 0xF0000000L
++//RLC_DYN_PG_STATUS
++#define RLC_DYN_PG_STATUS__PG_STATUS_CU_MASK__SHIFT 0x0
++#define RLC_DYN_PG_STATUS__PG_STATUS_CU_MASK_MASK 0xFFFFFFFFL
++//RLC_DYN_PG_REQUEST
++#define RLC_DYN_PG_REQUEST__PG_REQUEST_CU_MASK__SHIFT 0x0
++#define RLC_DYN_PG_REQUEST__PG_REQUEST_CU_MASK_MASK 0xFFFFFFFFL
++//RLC_PG_DELAY
++#define RLC_PG_DELAY__POWER_UP_DELAY__SHIFT 0x0
++#define RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT 0x8
++#define RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT 0x10
++#define RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT 0x18
++#define RLC_PG_DELAY__POWER_UP_DELAY_MASK 0x000000FFL
++#define RLC_PG_DELAY__POWER_DOWN_DELAY_MASK 0x0000FF00L
++#define RLC_PG_DELAY__CMD_PROPAGATE_DELAY_MASK 0x00FF0000L
++#define RLC_PG_DELAY__MEM_SLEEP_DELAY_MASK 0xFF000000L
++//RLC_CU_STATUS
++#define RLC_CU_STATUS__WORK_PENDING__SHIFT 0x0
++#define RLC_CU_STATUS__WORK_PENDING_MASK 0xFFFFFFFFL
++//RLC_LB_INIT_CU_MASK
++#define RLC_LB_INIT_CU_MASK__INIT_CU_MASK__SHIFT 0x0
++#define RLC_LB_INIT_CU_MASK__INIT_CU_MASK_MASK 0xFFFFFFFFL
++//RLC_LB_ALWAYS_ACTIVE_CU_MASK
++#define RLC_LB_ALWAYS_ACTIVE_CU_MASK__ALWAYS_ACTIVE_CU_MASK__SHIFT 0x0
++#define RLC_LB_ALWAYS_ACTIVE_CU_MASK__ALWAYS_ACTIVE_CU_MASK_MASK 0xFFFFFFFFL
++//RLC_LB_PARAMS
++#define RLC_LB_PARAMS__SKIP_L2_CHECK__SHIFT 0x0
++#define RLC_LB_PARAMS__FIFO_SAMPLES__SHIFT 0x1
++#define RLC_LB_PARAMS__PG_IDLE_SAMPLES__SHIFT 0x8
++#define RLC_LB_PARAMS__PG_IDLE_SAMPLE_INTERVAL__SHIFT 0x10
++#define RLC_LB_PARAMS__SKIP_L2_CHECK_MASK 0x00000001L
++#define RLC_LB_PARAMS__FIFO_SAMPLES_MASK 0x000000FEL
++#define RLC_LB_PARAMS__PG_IDLE_SAMPLES_MASK 0x0000FF00L
++#define RLC_LB_PARAMS__PG_IDLE_SAMPLE_INTERVAL_MASK 0xFFFF0000L
++//RLC_THREAD1_DELAY
++#define RLC_THREAD1_DELAY__CU_IDEL_DELAY__SHIFT 0x0
++#define RLC_THREAD1_DELAY__LBPW_INNER_LOOP_DELAY__SHIFT 0x8
++#define RLC_THREAD1_DELAY__LBPW_OUTER_LOOP_DELAY__SHIFT 0x10
++#define RLC_THREAD1_DELAY__SPARE__SHIFT 0x18
++#define RLC_THREAD1_DELAY__CU_IDEL_DELAY_MASK 0x000000FFL
++#define RLC_THREAD1_DELAY__LBPW_INNER_LOOP_DELAY_MASK 0x0000FF00L
++#define RLC_THREAD1_DELAY__LBPW_OUTER_LOOP_DELAY_MASK 0x00FF0000L
++#define RLC_THREAD1_DELAY__SPARE_MASK 0xFF000000L
++//RLC_PG_ALWAYS_ON_CU_MASK
++#define RLC_PG_ALWAYS_ON_CU_MASK__AON_CU_MASK__SHIFT 0x0
++#define RLC_PG_ALWAYS_ON_CU_MASK__AON_CU_MASK_MASK 0xFFFFFFFFL
++//RLC_MAX_PG_CU
++#define RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT 0x0
++#define RLC_MAX_PG_CU__SPARE__SHIFT 0x8
++#define RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK 0x000000FFL
++#define RLC_MAX_PG_CU__SPARE_MASK 0xFFFFFF00L
++//RLC_AUTO_PG_CTRL
++#define RLC_AUTO_PG_CTRL__AUTO_PG_EN__SHIFT 0x0
++#define RLC_AUTO_PG_CTRL__AUTO_GRBM_REG_SAVE_ON_IDLE_EN__SHIFT 0x1
++#define RLC_AUTO_PG_CTRL__AUTO_WAKE_UP_EN__SHIFT 0x2
++#define RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT 0x3
++#define RLC_AUTO_PG_CTRL__PG_AFTER_GRBM_REG_SAVE_THRESHOLD__SHIFT 0x13
++#define RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK 0x00000001L
++#define RLC_AUTO_PG_CTRL__AUTO_GRBM_REG_SAVE_ON_IDLE_EN_MASK 0x00000002L
++#define RLC_AUTO_PG_CTRL__AUTO_WAKE_UP_EN_MASK 0x00000004L
++#define RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK 0x0007FFF8L
++#define RLC_AUTO_PG_CTRL__PG_AFTER_GRBM_REG_SAVE_THRESHOLD_MASK 0xFFF80000L
++//RLC_SMU_GRBM_REG_SAVE_CTRL
++#define RLC_SMU_GRBM_REG_SAVE_CTRL__START_GRBM_REG_SAVE__SHIFT 0x0
++#define RLC_SMU_GRBM_REG_SAVE_CTRL__SPARE__SHIFT 0x1
++#define RLC_SMU_GRBM_REG_SAVE_CTRL__START_GRBM_REG_SAVE_MASK 0x00000001L
++#define RLC_SMU_GRBM_REG_SAVE_CTRL__SPARE_MASK 0xFFFFFFFEL
++//RLC_SERDES_RD_MASTER_INDEX
++#define RLC_SERDES_RD_MASTER_INDEX__CU_ID__SHIFT 0x0
++#define RLC_SERDES_RD_MASTER_INDEX__SH_ID__SHIFT 0x4
++#define RLC_SERDES_RD_MASTER_INDEX__SE_ID__SHIFT 0x6
++#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU_ID__SHIFT 0x9
++#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU__SHIFT 0xc
++#define RLC_SERDES_RD_MASTER_INDEX__NON_SE__SHIFT 0xd
++#define RLC_SERDES_RD_MASTER_INDEX__DATA_REG_ID__SHIFT 0x11
++#define RLC_SERDES_RD_MASTER_INDEX__SPARE__SHIFT 0x13
++#define RLC_SERDES_RD_MASTER_INDEX__CU_ID_MASK 0x0000000FL
++#define RLC_SERDES_RD_MASTER_INDEX__SH_ID_MASK 0x00000030L
++#define RLC_SERDES_RD_MASTER_INDEX__SE_ID_MASK 0x000001C0L
++#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU_ID_MASK 0x00000E00L
++#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU_MASK 0x00001000L
++#define RLC_SERDES_RD_MASTER_INDEX__NON_SE_MASK 0x0001E000L
++#define RLC_SERDES_RD_MASTER_INDEX__DATA_REG_ID_MASK 0x00060000L
++#define RLC_SERDES_RD_MASTER_INDEX__SPARE_MASK 0xFFF80000L
++//RLC_SERDES_RD_DATA_0
++#define RLC_SERDES_RD_DATA_0__DATA__SHIFT 0x0
++#define RLC_SERDES_RD_DATA_0__DATA_MASK 0xFFFFFFFFL
++//RLC_SERDES_RD_DATA_1
++#define RLC_SERDES_RD_DATA_1__DATA__SHIFT 0x0
++#define RLC_SERDES_RD_DATA_1__DATA_MASK 0xFFFFFFFFL
++//RLC_SERDES_RD_DATA_2
++#define RLC_SERDES_RD_DATA_2__DATA__SHIFT 0x0
++#define RLC_SERDES_RD_DATA_2__DATA_MASK 0xFFFFFFFFL
++//RLC_SERDES_WR_CU_MASTER_MASK
++#define RLC_SERDES_WR_CU_MASTER_MASK__MASTER_MASK__SHIFT 0x0
++#define RLC_SERDES_WR_CU_MASTER_MASK__MASTER_MASK_MASK 0xFFFFFFFFL
++//RLC_SERDES_WR_NONCU_MASTER_MASK
++#define RLC_SERDES_WR_NONCU_MASTER_MASK__SE_MASTER_MASK__SHIFT 0x0
++#define RLC_SERDES_WR_NONCU_MASTER_MASK__GC_MASTER_MASK__SHIFT 0x10
++#define RLC_SERDES_WR_NONCU_MASTER_MASK__GC_GFX_MASTER_MASK__SHIFT 0x11
++#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC0_MASTER_MASK__SHIFT 0x12
++#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC1_MASTER_MASK__SHIFT 0x13
++#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE0_MASTER_MASK__SHIFT 0x14
++#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE1_MASTER_MASK__SHIFT 0x15
++#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE2_MASTER_MASK__SHIFT 0x16
++#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE3_MASTER_MASK__SHIFT 0x17
++#define RLC_SERDES_WR_NONCU_MASTER_MASK__EA_0_MASTER_MASK__SHIFT 0x18
++#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC2_MASTER_MASK__SHIFT 0x19
++#define RLC_SERDES_WR_NONCU_MASTER_MASK__RESERVED__SHIFT 0x1a
++#define RLC_SERDES_WR_NONCU_MASTER_MASK__SE_MASTER_MASK_MASK 0x0000FFFFL
++#define RLC_SERDES_WR_NONCU_MASTER_MASK__GC_MASTER_MASK_MASK 0x00010000L
++#define RLC_SERDES_WR_NONCU_MASTER_MASK__GC_GFX_MASTER_MASK_MASK 0x00020000L
++#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC0_MASTER_MASK_MASK 0x00040000L
++#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC1_MASTER_MASK_MASK 0x00080000L
++#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE0_MASTER_MASK_MASK 0x00100000L
++#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE1_MASTER_MASK_MASK 0x00200000L
++#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE2_MASTER_MASK_MASK 0x00400000L
++#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE3_MASTER_MASK_MASK 0x00800000L
++#define RLC_SERDES_WR_NONCU_MASTER_MASK__EA_0_MASTER_MASK_MASK 0x01000000L
++#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC2_MASTER_MASK_MASK 0x02000000L
++#define RLC_SERDES_WR_NONCU_MASTER_MASK__RESERVED_MASK 0xFC000000L
++//RLC_SERDES_WR_CTRL
++#define RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT 0x0
++#define RLC_SERDES_WR_CTRL__POWER_DOWN__SHIFT 0x8
++#define RLC_SERDES_WR_CTRL__POWER_UP__SHIFT 0x9
++#define RLC_SERDES_WR_CTRL__P1_SELECT__SHIFT 0xa
++#define RLC_SERDES_WR_CTRL__P2_SELECT__SHIFT 0xb
++#define RLC_SERDES_WR_CTRL__WRITE_COMMAND__SHIFT 0xc
++#define RLC_SERDES_WR_CTRL__READ_COMMAND__SHIFT 0xd
++#define RLC_SERDES_WR_CTRL__RDDATA_RESET__SHIFT 0xe
++#define RLC_SERDES_WR_CTRL__SHORT_FORMAT__SHIFT 0xf
++#define RLC_SERDES_WR_CTRL__BPM_DATA__SHIFT 0x10
++#define RLC_SERDES_WR_CTRL__SRBM_OVERRIDE__SHIFT 0x1a
++#define RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR__SHIFT 0x1b
++#define RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT 0x1c
++#define RLC_SERDES_WR_CTRL__BPM_ADDR_MASK 0x000000FFL
++#define RLC_SERDES_WR_CTRL__POWER_DOWN_MASK 0x00000100L
++#define RLC_SERDES_WR_CTRL__POWER_UP_MASK 0x00000200L
++#define RLC_SERDES_WR_CTRL__P1_SELECT_MASK 0x00000400L
++#define RLC_SERDES_WR_CTRL__P2_SELECT_MASK 0x00000800L
++#define RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK 0x00001000L
++#define RLC_SERDES_WR_CTRL__READ_COMMAND_MASK 0x00002000L
++#define RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK 0x00004000L
++#define RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK 0x00008000L
++#define RLC_SERDES_WR_CTRL__BPM_DATA_MASK 0x03FF0000L
++#define RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK 0x04000000L
++#define RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR_MASK 0x08000000L
++#define RLC_SERDES_WR_CTRL__REG_ADDR_MASK 0xF0000000L
++//RLC_SERDES_WR_DATA
++#define RLC_SERDES_WR_DATA__DATA__SHIFT 0x0
++#define RLC_SERDES_WR_DATA__DATA_MASK 0xFFFFFFFFL
++//RLC_SERDES_CU_MASTER_BUSY
++#define RLC_SERDES_CU_MASTER_BUSY__BUSY_BUSY__SHIFT 0x0
++#define RLC_SERDES_CU_MASTER_BUSY__BUSY_BUSY_MASK 0xFFFFFFFFL
++//RLC_SERDES_NONCU_MASTER_BUSY
++#define RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY__SHIFT 0x0
++#define RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY__SHIFT 0x10
++#define RLC_SERDES_NONCU_MASTER_BUSY__GC_GFX_MASTER_BUSY__SHIFT 0x11
++#define RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY__SHIFT 0x12
++#define RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY__SHIFT 0x13
++#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE0_MASTER_BUSY__SHIFT 0x14
++#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE1_MASTER_BUSY__SHIFT 0x15
++#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE2_MASTER_BUSY__SHIFT 0x16
++#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE3_MASTER_BUSY__SHIFT 0x17
++#define RLC_SERDES_NONCU_MASTER_BUSY__EA_0_MASTER_BUSY__SHIFT 0x18
++#define RLC_SERDES_NONCU_MASTER_BUSY__TC2_MASTER_BUSY__SHIFT 0x19
++#define RLC_SERDES_NONCU_MASTER_BUSY__RESERVED__SHIFT 0x1a
++#define RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK 0x0000FFFFL
++#define RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK 0x00010000L
++#define RLC_SERDES_NONCU_MASTER_BUSY__GC_GFX_MASTER_BUSY_MASK 0x00020000L
++#define RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK 0x00040000L
++#define RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK 0x00080000L
++#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE0_MASTER_BUSY_MASK 0x00100000L
++#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE1_MASTER_BUSY_MASK 0x00200000L
++#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE2_MASTER_BUSY_MASK 0x00400000L
++#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE3_MASTER_BUSY_MASK 0x00800000L
++#define RLC_SERDES_NONCU_MASTER_BUSY__EA_0_MASTER_BUSY_MASK 0x01000000L
++#define RLC_SERDES_NONCU_MASTER_BUSY__TC2_MASTER_BUSY_MASK 0x02000000L
++#define RLC_SERDES_NONCU_MASTER_BUSY__RESERVED_MASK 0xFC000000L
++//RLC_GPM_GENERAL_0
++#define RLC_GPM_GENERAL_0__DATA__SHIFT 0x0
++#define RLC_GPM_GENERAL_0__DATA_MASK 0xFFFFFFFFL
++//RLC_GPM_GENERAL_1
++#define RLC_GPM_GENERAL_1__DATA__SHIFT 0x0
++#define RLC_GPM_GENERAL_1__DATA_MASK 0xFFFFFFFFL
++//RLC_GPM_GENERAL_2
++#define RLC_GPM_GENERAL_2__DATA__SHIFT 0x0
++#define RLC_GPM_GENERAL_2__DATA_MASK 0xFFFFFFFFL
++//RLC_GPM_GENERAL_3
++#define RLC_GPM_GENERAL_3__DATA__SHIFT 0x0
++#define RLC_GPM_GENERAL_3__DATA_MASK 0xFFFFFFFFL
++//RLC_GPM_GENERAL_4
++#define RLC_GPM_GENERAL_4__DATA__SHIFT 0x0
++#define RLC_GPM_GENERAL_4__DATA_MASK 0xFFFFFFFFL
++//RLC_GPM_GENERAL_5
++#define RLC_GPM_GENERAL_5__DATA__SHIFT 0x0
++#define RLC_GPM_GENERAL_5__DATA_MASK 0xFFFFFFFFL
++//RLC_GPM_GENERAL_6
++#define RLC_GPM_GENERAL_6__DATA__SHIFT 0x0
++#define RLC_GPM_GENERAL_6__DATA_MASK 0xFFFFFFFFL
++//RLC_GPM_GENERAL_7
++#define RLC_GPM_GENERAL_7__DATA__SHIFT 0x0
++#define RLC_GPM_GENERAL_7__DATA_MASK 0xFFFFFFFFL
++//RLC_GPM_SCRATCH_ADDR
++#define RLC_GPM_SCRATCH_ADDR__ADDR__SHIFT 0x0
++#define RLC_GPM_SCRATCH_ADDR__RESERVED__SHIFT 0x9
++#define RLC_GPM_SCRATCH_ADDR__ADDR_MASK 0x000001FFL
++#define RLC_GPM_SCRATCH_ADDR__RESERVED_MASK 0xFFFFFE00L
++//RLC_GPM_SCRATCH_DATA
++#define RLC_GPM_SCRATCH_DATA__DATA__SHIFT 0x0
++#define RLC_GPM_SCRATCH_DATA__DATA_MASK 0xFFFFFFFFL
++//RLC_STATIC_PG_STATUS
++#define RLC_STATIC_PG_STATUS__PG_STATUS_CU_MASK__SHIFT 0x0
++#define RLC_STATIC_PG_STATUS__PG_STATUS_CU_MASK_MASK 0xFFFFFFFFL
++//RLC_SPM_MC_CNTL
++#define RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT 0x0
++#define RLC_SPM_MC_CNTL__RLC_SPM_POLICY__SHIFT 0x4
++#define RLC_SPM_MC_CNTL__RLC_SPM_PERF_CNTR__SHIFT 0x5
++#define RLC_SPM_MC_CNTL__RLC_SPM_FED__SHIFT 0x6
++#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE_OVER__SHIFT 0x7
++#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE__SHIFT 0x8
++#define RLC_SPM_MC_CNTL__RESERVED__SHIFT 0xa
++#define RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK 0x0000000FL
++#define RLC_SPM_MC_CNTL__RLC_SPM_POLICY_MASK 0x00000010L
++#define RLC_SPM_MC_CNTL__RLC_SPM_PERF_CNTR_MASK 0x00000020L
++#define RLC_SPM_MC_CNTL__RLC_SPM_FED_MASK 0x00000040L
++#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE_OVER_MASK 0x00000080L
++#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE_MASK 0x00000300L
++#define RLC_SPM_MC_CNTL__RESERVED_MASK 0xFFFFFC00L
++//RLC_SPM_INT_CNTL
++#define RLC_SPM_INT_CNTL__RLC_SPM_INT_CNTL__SHIFT 0x0
++#define RLC_SPM_INT_CNTL__RESERVED__SHIFT 0x1
++#define RLC_SPM_INT_CNTL__RLC_SPM_INT_CNTL_MASK 0x00000001L
++#define RLC_SPM_INT_CNTL__RESERVED_MASK 0xFFFFFFFEL
++//RLC_SPM_INT_STATUS
++#define RLC_SPM_INT_STATUS__RLC_SPM_INT_STATUS__SHIFT 0x0
++#define RLC_SPM_INT_STATUS__RESERVED__SHIFT 0x1
++#define RLC_SPM_INT_STATUS__RLC_SPM_INT_STATUS_MASK 0x00000001L
++#define RLC_SPM_INT_STATUS__RESERVED_MASK 0xFFFFFFFEL
++//RLC_SMU_MESSAGE
++#define RLC_SMU_MESSAGE__CMD__SHIFT 0x0
++#define RLC_SMU_MESSAGE__CMD_MASK 0xFFFFFFFFL
++//RLC_GPM_LOG_SIZE
++#define RLC_GPM_LOG_SIZE__SIZE__SHIFT 0x0
++#define RLC_GPM_LOG_SIZE__SIZE_MASK 0xFFFFFFFFL
++//RLC_PG_DELAY_3
++#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT 0x0
++#define RLC_PG_DELAY_3__RESERVED__SHIFT 0x8
++#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK 0x000000FFL
++#define RLC_PG_DELAY_3__RESERVED_MASK 0xFFFFFF00L
++//RLC_GPR_REG1
++#define RLC_GPR_REG1__DATA__SHIFT 0x0
++#define RLC_GPR_REG1__DATA_MASK 0xFFFFFFFFL
++//RLC_GPR_REG2
++#define RLC_GPR_REG2__DATA__SHIFT 0x0
++#define RLC_GPR_REG2__DATA_MASK 0xFFFFFFFFL
++//RLC_GPM_LOG_CONT
++#define RLC_GPM_LOG_CONT__CONT__SHIFT 0x0
++#define RLC_GPM_LOG_CONT__CONT_MASK 0xFFFFFFFFL
++//RLC_GPM_INT_DISABLE_TH0
++#define RLC_GPM_INT_DISABLE_TH0__DISABLE__SHIFT 0x0
++#define RLC_GPM_INT_DISABLE_TH0__DISABLE_MASK 0xFFFFFFFFL
++//RLC_GPM_INT_DISABLE_TH1
++#define RLC_GPM_INT_DISABLE_TH1__DISABLE__SHIFT 0x0
++#define RLC_GPM_INT_DISABLE_TH1__DISABLE_MASK 0xFFFFFFFFL
++//RLC_GPM_INT_FORCE_TH0
++#define RLC_GPM_INT_FORCE_TH0__FORCE__SHIFT 0x0
++#define RLC_GPM_INT_FORCE_TH0__FORCE_MASK 0xFFFFFFFFL
++//RLC_GPM_INT_FORCE_TH1
++#define RLC_GPM_INT_FORCE_TH1__FORCE__SHIFT 0x0
++#define RLC_GPM_INT_FORCE_TH1__FORCE_MASK 0xFFFFFFFFL
++//RLC_SRM_CNTL
++#define RLC_SRM_CNTL__SRM_ENABLE__SHIFT 0x0
++#define RLC_SRM_CNTL__AUTO_INCR_ADDR__SHIFT 0x1
++#define RLC_SRM_CNTL__RESERVED__SHIFT 0x2
++#define RLC_SRM_CNTL__SRM_ENABLE_MASK 0x00000001L
++#define RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK 0x00000002L
++#define RLC_SRM_CNTL__RESERVED_MASK 0xFFFFFFFCL
++//RLC_SRM_ARAM_ADDR
++#define RLC_SRM_ARAM_ADDR__ADDR__SHIFT 0x0
++#define RLC_SRM_ARAM_ADDR__RESERVED__SHIFT 0xc
++#define RLC_SRM_ARAM_ADDR__ADDR_MASK 0x00000FFFL
++#define RLC_SRM_ARAM_ADDR__RESERVED_MASK 0xFFFFF000L
++//RLC_SRM_ARAM_DATA
++#define RLC_SRM_ARAM_DATA__DATA__SHIFT 0x0
++#define RLC_SRM_ARAM_DATA__DATA_MASK 0xFFFFFFFFL
++//RLC_SRM_DRAM_ADDR
++#define RLC_SRM_DRAM_ADDR__ADDR__SHIFT 0x0
++#define RLC_SRM_DRAM_ADDR__RESERVED__SHIFT 0xc
++#define RLC_SRM_DRAM_ADDR__ADDR_MASK 0x00000FFFL
++#define RLC_SRM_DRAM_ADDR__RESERVED_MASK 0xFFFFF000L
++//RLC_SRM_DRAM_DATA
++#define RLC_SRM_DRAM_DATA__DATA__SHIFT 0x0
++#define RLC_SRM_DRAM_DATA__DATA_MASK 0xFFFFFFFFL
++//RLC_SRM_GPM_COMMAND
++#define RLC_SRM_GPM_COMMAND__OP__SHIFT 0x0
++#define RLC_SRM_GPM_COMMAND__INDEX_CNTL__SHIFT 0x1
++#define RLC_SRM_GPM_COMMAND__INDEX_CNTL_NUM__SHIFT 0x2
++#define RLC_SRM_GPM_COMMAND__SIZE__SHIFT 0x5
++#define RLC_SRM_GPM_COMMAND__START_OFFSET__SHIFT 0x11
++#define RLC_SRM_GPM_COMMAND__RESERVED1__SHIFT 0x1d
++#define RLC_SRM_GPM_COMMAND__DEST_MEMORY__SHIFT 0x1f
++#define RLC_SRM_GPM_COMMAND__OP_MASK 0x00000001L
++#define RLC_SRM_GPM_COMMAND__INDEX_CNTL_MASK 0x00000002L
++#define RLC_SRM_GPM_COMMAND__INDEX_CNTL_NUM_MASK 0x0000001CL
++#define RLC_SRM_GPM_COMMAND__SIZE_MASK 0x0001FFE0L
++#define RLC_SRM_GPM_COMMAND__START_OFFSET_MASK 0x1FFE0000L
++#define RLC_SRM_GPM_COMMAND__RESERVED1_MASK 0x60000000L
++#define RLC_SRM_GPM_COMMAND__DEST_MEMORY_MASK 0x80000000L
++//RLC_SRM_GPM_COMMAND_STATUS
++#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_EMPTY__SHIFT 0x0
++#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_FULL__SHIFT 0x1
++#define RLC_SRM_GPM_COMMAND_STATUS__RESERVED__SHIFT 0x2
++#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_EMPTY_MASK 0x00000001L
++#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_FULL_MASK 0x00000002L
++#define RLC_SRM_GPM_COMMAND_STATUS__RESERVED_MASK 0xFFFFFFFCL
++//RLC_SRM_RLCV_COMMAND
++#define RLC_SRM_RLCV_COMMAND__OP__SHIFT 0x0
++#define RLC_SRM_RLCV_COMMAND__RESERVED__SHIFT 0x1
++#define RLC_SRM_RLCV_COMMAND__SIZE__SHIFT 0x4
++#define RLC_SRM_RLCV_COMMAND__START_OFFSET__SHIFT 0x10
++#define RLC_SRM_RLCV_COMMAND__RESERVED1__SHIFT 0x1c
++#define RLC_SRM_RLCV_COMMAND__DEST_MEMORY__SHIFT 0x1f
++#define RLC_SRM_RLCV_COMMAND__OP_MASK 0x00000001L
++#define RLC_SRM_RLCV_COMMAND__RESERVED_MASK 0x0000000EL
++#define RLC_SRM_RLCV_COMMAND__SIZE_MASK 0x0000FFF0L
++#define RLC_SRM_RLCV_COMMAND__START_OFFSET_MASK 0x0FFF0000L
++#define RLC_SRM_RLCV_COMMAND__RESERVED1_MASK 0x70000000L
++#define RLC_SRM_RLCV_COMMAND__DEST_MEMORY_MASK 0x80000000L
++//RLC_SRM_RLCV_COMMAND_STATUS
++#define RLC_SRM_RLCV_COMMAND_STATUS__FIFO_EMPTY__SHIFT 0x0
++#define RLC_SRM_RLCV_COMMAND_STATUS__FIFO_FULL__SHIFT 0x1
++#define RLC_SRM_RLCV_COMMAND_STATUS__RESERVED__SHIFT 0x2
++#define RLC_SRM_RLCV_COMMAND_STATUS__FIFO_EMPTY_MASK 0x00000001L
++#define RLC_SRM_RLCV_COMMAND_STATUS__FIFO_FULL_MASK 0x00000002L
++#define RLC_SRM_RLCV_COMMAND_STATUS__RESERVED_MASK 0xFFFFFFFCL
++//RLC_SRM_INDEX_CNTL_ADDR_0
++#define RLC_SRM_INDEX_CNTL_ADDR_0__ADDRESS__SHIFT 0x0
++#define RLC_SRM_INDEX_CNTL_ADDR_0__RESERVED__SHIFT 0x10
++#define RLC_SRM_INDEX_CNTL_ADDR_0__ADDRESS_MASK 0x0000FFFFL
++#define RLC_SRM_INDEX_CNTL_ADDR_0__RESERVED_MASK 0xFFFF0000L
++//RLC_SRM_INDEX_CNTL_ADDR_1
++#define RLC_SRM_INDEX_CNTL_ADDR_1__ADDRESS__SHIFT 0x0
++#define RLC_SRM_INDEX_CNTL_ADDR_1__RESERVED__SHIFT 0x10
++#define RLC_SRM_INDEX_CNTL_ADDR_1__ADDRESS_MASK 0x0000FFFFL
++#define RLC_SRM_INDEX_CNTL_ADDR_1__RESERVED_MASK 0xFFFF0000L
++//RLC_SRM_INDEX_CNTL_ADDR_2
++#define RLC_SRM_INDEX_CNTL_ADDR_2__ADDRESS__SHIFT 0x0
++#define RLC_SRM_INDEX_CNTL_ADDR_2__RESERVED__SHIFT 0x10
++#define RLC_SRM_INDEX_CNTL_ADDR_2__ADDRESS_MASK 0x0000FFFFL
++#define RLC_SRM_INDEX_CNTL_ADDR_2__RESERVED_MASK 0xFFFF0000L
++//RLC_SRM_INDEX_CNTL_ADDR_3
++#define RLC_SRM_INDEX_CNTL_ADDR_3__ADDRESS__SHIFT 0x0
++#define RLC_SRM_INDEX_CNTL_ADDR_3__RESERVED__SHIFT 0x10
++#define RLC_SRM_INDEX_CNTL_ADDR_3__ADDRESS_MASK 0x0000FFFFL
++#define RLC_SRM_INDEX_CNTL_ADDR_3__RESERVED_MASK 0xFFFF0000L
++//RLC_SRM_INDEX_CNTL_ADDR_4
++#define RLC_SRM_INDEX_CNTL_ADDR_4__ADDRESS__SHIFT 0x0
++#define RLC_SRM_INDEX_CNTL_ADDR_4__RESERVED__SHIFT 0x10
++#define RLC_SRM_INDEX_CNTL_ADDR_4__ADDRESS_MASK 0x0000FFFFL
++#define RLC_SRM_INDEX_CNTL_ADDR_4__RESERVED_MASK 0xFFFF0000L
++//RLC_SRM_INDEX_CNTL_ADDR_5
++#define RLC_SRM_INDEX_CNTL_ADDR_5__ADDRESS__SHIFT 0x0
++#define RLC_SRM_INDEX_CNTL_ADDR_5__RESERVED__SHIFT 0x10
++#define RLC_SRM_INDEX_CNTL_ADDR_5__ADDRESS_MASK 0x0000FFFFL
++#define RLC_SRM_INDEX_CNTL_ADDR_5__RESERVED_MASK 0xFFFF0000L
++//RLC_SRM_INDEX_CNTL_ADDR_6
++#define RLC_SRM_INDEX_CNTL_ADDR_6__ADDRESS__SHIFT 0x0
++#define RLC_SRM_INDEX_CNTL_ADDR_6__RESERVED__SHIFT 0x10
++#define RLC_SRM_INDEX_CNTL_ADDR_6__ADDRESS_MASK 0x0000FFFFL
++#define RLC_SRM_INDEX_CNTL_ADDR_6__RESERVED_MASK 0xFFFF0000L
++//RLC_SRM_INDEX_CNTL_ADDR_7
++#define RLC_SRM_INDEX_CNTL_ADDR_7__ADDRESS__SHIFT 0x0
++#define RLC_SRM_INDEX_CNTL_ADDR_7__RESERVED__SHIFT 0x10
++#define RLC_SRM_INDEX_CNTL_ADDR_7__ADDRESS_MASK 0x0000FFFFL
++#define RLC_SRM_INDEX_CNTL_ADDR_7__RESERVED_MASK 0xFFFF0000L
++//RLC_SRM_INDEX_CNTL_DATA_0
++#define RLC_SRM_INDEX_CNTL_DATA_0__DATA__SHIFT 0x0
++#define RLC_SRM_INDEX_CNTL_DATA_0__DATA_MASK 0xFFFFFFFFL
++//RLC_SRM_INDEX_CNTL_DATA_1
++#define RLC_SRM_INDEX_CNTL_DATA_1__DATA__SHIFT 0x0
++#define RLC_SRM_INDEX_CNTL_DATA_1__DATA_MASK 0xFFFFFFFFL
++//RLC_SRM_INDEX_CNTL_DATA_2
++#define RLC_SRM_INDEX_CNTL_DATA_2__DATA__SHIFT 0x0
++#define RLC_SRM_INDEX_CNTL_DATA_2__DATA_MASK 0xFFFFFFFFL
++//RLC_SRM_INDEX_CNTL_DATA_3
++#define RLC_SRM_INDEX_CNTL_DATA_3__DATA__SHIFT 0x0
++#define RLC_SRM_INDEX_CNTL_DATA_3__DATA_MASK 0xFFFFFFFFL
++//RLC_SRM_INDEX_CNTL_DATA_4
++#define RLC_SRM_INDEX_CNTL_DATA_4__DATA__SHIFT 0x0
++#define RLC_SRM_INDEX_CNTL_DATA_4__DATA_MASK 0xFFFFFFFFL
++//RLC_SRM_INDEX_CNTL_DATA_5
++#define RLC_SRM_INDEX_CNTL_DATA_5__DATA__SHIFT 0x0
++#define RLC_SRM_INDEX_CNTL_DATA_5__DATA_MASK 0xFFFFFFFFL
++//RLC_SRM_INDEX_CNTL_DATA_6
++#define RLC_SRM_INDEX_CNTL_DATA_6__DATA__SHIFT 0x0
++#define RLC_SRM_INDEX_CNTL_DATA_6__DATA_MASK 0xFFFFFFFFL
++//RLC_SRM_INDEX_CNTL_DATA_7
++#define RLC_SRM_INDEX_CNTL_DATA_7__DATA__SHIFT 0x0
++#define RLC_SRM_INDEX_CNTL_DATA_7__DATA_MASK 0xFFFFFFFFL
++//RLC_SRM_STAT
++#define RLC_SRM_STAT__SRM_BUSY__SHIFT 0x0
++#define RLC_SRM_STAT__SRM_BUSY_DELAY__SHIFT 0x1
++#define RLC_SRM_STAT__RESERVED__SHIFT 0x2
++#define RLC_SRM_STAT__SRM_BUSY_MASK 0x00000001L
++#define RLC_SRM_STAT__SRM_BUSY_DELAY_MASK 0x00000002L
++#define RLC_SRM_STAT__RESERVED_MASK 0xFFFFFFFCL
++//RLC_SRM_GPM_ABORT
++#define RLC_SRM_GPM_ABORT__ABORT__SHIFT 0x0
++#define RLC_SRM_GPM_ABORT__RESERVED__SHIFT 0x1
++#define RLC_SRM_GPM_ABORT__ABORT_MASK 0x00000001L
++#define RLC_SRM_GPM_ABORT__RESERVED_MASK 0xFFFFFFFEL
++//RLC_CSIB_ADDR_LO
++#define RLC_CSIB_ADDR_LO__ADDRESS__SHIFT 0x0
++#define RLC_CSIB_ADDR_LO__ADDRESS_MASK 0xFFFFFFFFL
++//RLC_CSIB_ADDR_HI
++#define RLC_CSIB_ADDR_HI__ADDRESS__SHIFT 0x0
++#define RLC_CSIB_ADDR_HI__ADDRESS_MASK 0x0000FFFFL
++//RLC_CSIB_LENGTH
++#define RLC_CSIB_LENGTH__LENGTH__SHIFT 0x0
++#define RLC_CSIB_LENGTH__LENGTH_MASK 0xFFFFFFFFL
++//RLC_SMU_COMMAND
++#define RLC_SMU_COMMAND__CMD__SHIFT 0x0
++#define RLC_SMU_COMMAND__CMD_MASK 0xFFFFFFFFL
++//RLC_CP_SCHEDULERS
++#define RLC_CP_SCHEDULERS__scheduler0__SHIFT 0x0
++#define RLC_CP_SCHEDULERS__scheduler1__SHIFT 0x8
++#define RLC_CP_SCHEDULERS__scheduler2__SHIFT 0x10
++#define RLC_CP_SCHEDULERS__scheduler3__SHIFT 0x18
++#define RLC_CP_SCHEDULERS__scheduler0_MASK 0x000000FFL
++#define RLC_CP_SCHEDULERS__scheduler1_MASK 0x0000FF00L
++#define RLC_CP_SCHEDULERS__scheduler2_MASK 0x00FF0000L
++#define RLC_CP_SCHEDULERS__scheduler3_MASK 0xFF000000L
++//RLC_SMU_ARGUMENT_1
++#define RLC_SMU_ARGUMENT_1__ARG__SHIFT 0x0
++#define RLC_SMU_ARGUMENT_1__ARG_MASK 0xFFFFFFFFL
++//RLC_SMU_ARGUMENT_2
++#define RLC_SMU_ARGUMENT_2__ARG__SHIFT 0x0
++#define RLC_SMU_ARGUMENT_2__ARG_MASK 0xFFFFFFFFL
++//RLC_GPM_GENERAL_8
++#define RLC_GPM_GENERAL_8__DATA__SHIFT 0x0
++#define RLC_GPM_GENERAL_8__DATA_MASK 0xFFFFFFFFL
++//RLC_GPM_GENERAL_9
++#define RLC_GPM_GENERAL_9__DATA__SHIFT 0x0
++#define RLC_GPM_GENERAL_9__DATA_MASK 0xFFFFFFFFL
++//RLC_GPM_GENERAL_10
++#define RLC_GPM_GENERAL_10__DATA__SHIFT 0x0
++#define RLC_GPM_GENERAL_10__DATA_MASK 0xFFFFFFFFL
++//RLC_GPM_GENERAL_11
++#define RLC_GPM_GENERAL_11__DATA__SHIFT 0x0
++#define RLC_GPM_GENERAL_11__DATA_MASK 0xFFFFFFFFL
++//RLC_GPM_GENERAL_12
++#define RLC_GPM_GENERAL_12__DATA__SHIFT 0x0
++#define RLC_GPM_GENERAL_12__DATA_MASK 0xFFFFFFFFL
++//RLC_GPM_UTCL1_CNTL_0
++#define RLC_GPM_UTCL1_CNTL_0__XNACK_REDO_TIMER_CNT__SHIFT 0x0
++#define RLC_GPM_UTCL1_CNTL_0__DROP_MODE__SHIFT 0x18
++#define RLC_GPM_UTCL1_CNTL_0__BYPASS__SHIFT 0x19
++#define RLC_GPM_UTCL1_CNTL_0__INVALIDATE__SHIFT 0x1a
++#define RLC_GPM_UTCL1_CNTL_0__FRAG_LIMIT_MODE__SHIFT 0x1b
++#define RLC_GPM_UTCL1_CNTL_0__FORCE_SNOOP__SHIFT 0x1c
++#define RLC_GPM_UTCL1_CNTL_0__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
++#define RLC_GPM_UTCL1_CNTL_0__RESERVED__SHIFT 0x1e
++#define RLC_GPM_UTCL1_CNTL_0__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
++#define RLC_GPM_UTCL1_CNTL_0__DROP_MODE_MASK 0x01000000L
++#define RLC_GPM_UTCL1_CNTL_0__BYPASS_MASK 0x02000000L
++#define RLC_GPM_UTCL1_CNTL_0__INVALIDATE_MASK 0x04000000L
++#define RLC_GPM_UTCL1_CNTL_0__FRAG_LIMIT_MODE_MASK 0x08000000L
++#define RLC_GPM_UTCL1_CNTL_0__FORCE_SNOOP_MASK 0x10000000L
++#define RLC_GPM_UTCL1_CNTL_0__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
++#define RLC_GPM_UTCL1_CNTL_0__RESERVED_MASK 0xC0000000L
++//RLC_GPM_UTCL1_CNTL_1
++#define RLC_GPM_UTCL1_CNTL_1__XNACK_REDO_TIMER_CNT__SHIFT 0x0
++#define RLC_GPM_UTCL1_CNTL_1__DROP_MODE__SHIFT 0x18
++#define RLC_GPM_UTCL1_CNTL_1__BYPASS__SHIFT 0x19
++#define RLC_GPM_UTCL1_CNTL_1__INVALIDATE__SHIFT 0x1a
++#define RLC_GPM_UTCL1_CNTL_1__FRAG_LIMIT_MODE__SHIFT 0x1b
++#define RLC_GPM_UTCL1_CNTL_1__FORCE_SNOOP__SHIFT 0x1c
++#define RLC_GPM_UTCL1_CNTL_1__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
++#define RLC_GPM_UTCL1_CNTL_1__RESERVED__SHIFT 0x1e
++#define RLC_GPM_UTCL1_CNTL_1__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
++#define RLC_GPM_UTCL1_CNTL_1__DROP_MODE_MASK 0x01000000L
++#define RLC_GPM_UTCL1_CNTL_1__BYPASS_MASK 0x02000000L
++#define RLC_GPM_UTCL1_CNTL_1__INVALIDATE_MASK 0x04000000L
++#define RLC_GPM_UTCL1_CNTL_1__FRAG_LIMIT_MODE_MASK 0x08000000L
++#define RLC_GPM_UTCL1_CNTL_1__FORCE_SNOOP_MASK 0x10000000L
++#define RLC_GPM_UTCL1_CNTL_1__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
++#define RLC_GPM_UTCL1_CNTL_1__RESERVED_MASK 0xC0000000L
++//RLC_GPM_UTCL1_CNTL_2
++#define RLC_GPM_UTCL1_CNTL_2__XNACK_REDO_TIMER_CNT__SHIFT 0x0
++#define RLC_GPM_UTCL1_CNTL_2__DROP_MODE__SHIFT 0x18
++#define RLC_GPM_UTCL1_CNTL_2__BYPASS__SHIFT 0x19
++#define RLC_GPM_UTCL1_CNTL_2__INVALIDATE__SHIFT 0x1a
++#define RLC_GPM_UTCL1_CNTL_2__FRAG_LIMIT_MODE__SHIFT 0x1b
++#define RLC_GPM_UTCL1_CNTL_2__FORCE_SNOOP__SHIFT 0x1c
++#define RLC_GPM_UTCL1_CNTL_2__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
++#define RLC_GPM_UTCL1_CNTL_2__RESERVED__SHIFT 0x1e
++#define RLC_GPM_UTCL1_CNTL_2__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
++#define RLC_GPM_UTCL1_CNTL_2__DROP_MODE_MASK 0x01000000L
++#define RLC_GPM_UTCL1_CNTL_2__BYPASS_MASK 0x02000000L
++#define RLC_GPM_UTCL1_CNTL_2__INVALIDATE_MASK 0x04000000L
++#define RLC_GPM_UTCL1_CNTL_2__FRAG_LIMIT_MODE_MASK 0x08000000L
++#define RLC_GPM_UTCL1_CNTL_2__FORCE_SNOOP_MASK 0x10000000L
++#define RLC_GPM_UTCL1_CNTL_2__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
++#define RLC_GPM_UTCL1_CNTL_2__RESERVED_MASK 0xC0000000L
++//RLC_SPM_UTCL1_CNTL
++#define RLC_SPM_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
++#define RLC_SPM_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
++#define RLC_SPM_UTCL1_CNTL__BYPASS__SHIFT 0x19
++#define RLC_SPM_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
++#define RLC_SPM_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
++#define RLC_SPM_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
++#define RLC_SPM_UTCL1_CNTL__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
++#define RLC_SPM_UTCL1_CNTL__RESERVED__SHIFT 0x1e
++#define RLC_SPM_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
++#define RLC_SPM_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
++#define RLC_SPM_UTCL1_CNTL__BYPASS_MASK 0x02000000L
++#define RLC_SPM_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
++#define RLC_SPM_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
++#define RLC_SPM_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
++#define RLC_SPM_UTCL1_CNTL__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
++#define RLC_SPM_UTCL1_CNTL__RESERVED_MASK 0xC0000000L
++//RLC_UTCL1_STATUS_2
++#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_BUSY__SHIFT 0x0
++#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_BUSY__SHIFT 0x1
++#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_BUSY__SHIFT 0x2
++#define RLC_UTCL1_STATUS_2__SPM_UTCL1_BUSY__SHIFT 0x3
++#define RLC_UTCL1_STATUS_2__PREWALKER_UTCL1_BUSY__SHIFT 0x4
++#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_StallOnTrans__SHIFT 0x5
++#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_StallOnTrans__SHIFT 0x6
++#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_StallOnTrans__SHIFT 0x7
++#define RLC_UTCL1_STATUS_2__SPM_UTCL1_StallOnTrans__SHIFT 0x8
++#define RLC_UTCL1_STATUS_2__PREWALKER_UTCL1_StallOnTrans__SHIFT 0x9
++#define RLC_UTCL1_STATUS_2__RESERVED__SHIFT 0xa
++#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_BUSY_MASK 0x00000001L
++#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_BUSY_MASK 0x00000002L
++#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_BUSY_MASK 0x00000004L
++#define RLC_UTCL1_STATUS_2__SPM_UTCL1_BUSY_MASK 0x00000008L
++#define RLC_UTCL1_STATUS_2__PREWALKER_UTCL1_BUSY_MASK 0x00000010L
++#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_StallOnTrans_MASK 0x00000020L
++#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_StallOnTrans_MASK 0x00000040L
++#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_StallOnTrans_MASK 0x00000080L
++#define RLC_UTCL1_STATUS_2__SPM_UTCL1_StallOnTrans_MASK 0x00000100L
++#define RLC_UTCL1_STATUS_2__PREWALKER_UTCL1_StallOnTrans_MASK 0x00000200L
++#define RLC_UTCL1_STATUS_2__RESERVED_MASK 0xFFFFFC00L
++//RLC_LB_THR_CONFIG_2
++#define RLC_LB_THR_CONFIG_2__DATA__SHIFT 0x0
++#define RLC_LB_THR_CONFIG_2__DATA_MASK 0xFFFFFFFFL
++//RLC_LB_THR_CONFIG_3
++#define RLC_LB_THR_CONFIG_3__DATA__SHIFT 0x0
++#define RLC_LB_THR_CONFIG_3__DATA_MASK 0xFFFFFFFFL
++//RLC_LB_THR_CONFIG_4
++#define RLC_LB_THR_CONFIG_4__DATA__SHIFT 0x0
++#define RLC_LB_THR_CONFIG_4__DATA_MASK 0xFFFFFFFFL
++//RLC_SPM_UTCL1_ERROR_1
++#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqError__SHIFT 0x0
++#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
++#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
++#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqError_MASK 0x00000003L
++#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
++#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
++//RLC_SPM_UTCL1_ERROR_2
++#define RLC_SPM_UTCL1_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
++#define RLC_SPM_UTCL1_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
++//RLC_GPM_UTCL1_TH0_ERROR_1
++#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqError__SHIFT 0x0
++#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
++#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
++#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqError_MASK 0x00000003L
++#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
++#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
++//RLC_LB_THR_CONFIG_1
++#define RLC_LB_THR_CONFIG_1__DATA__SHIFT 0x0
++#define RLC_LB_THR_CONFIG_1__DATA_MASK 0xFFFFFFFFL
++//RLC_GPM_UTCL1_TH0_ERROR_2
++#define RLC_GPM_UTCL1_TH0_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
++#define RLC_GPM_UTCL1_TH0_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
++//RLC_GPM_UTCL1_TH1_ERROR_1
++#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqError__SHIFT 0x0
++#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
++#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
++#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqError_MASK 0x00000003L
++#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
++#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
++//RLC_GPM_UTCL1_TH1_ERROR_2
++#define RLC_GPM_UTCL1_TH1_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
++#define RLC_GPM_UTCL1_TH1_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
++//RLC_GPM_UTCL1_TH2_ERROR_1
++#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqError__SHIFT 0x0
++#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
++#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
++#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqError_MASK 0x00000003L
++#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
++#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
++//RLC_GPM_UTCL1_TH2_ERROR_2
++#define RLC_GPM_UTCL1_TH2_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
++#define RLC_GPM_UTCL1_TH2_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
++//RLC_CGCG_CGLS_CTRL_3D
++#define RLC_CGCG_CGLS_CTRL_3D__CGCG_EN__SHIFT 0x0
++#define RLC_CGCG_CGLS_CTRL_3D__CGLS_EN__SHIFT 0x1
++#define RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT 0x2
++#define RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT 0x8
++#define RLC_CGCG_CGLS_CTRL_3D__CGCG_CONTROLLER__SHIFT 0x1b
++#define RLC_CGCG_CGLS_CTRL_3D__CGCG_REG_CTRL__SHIFT 0x1c
++#define RLC_CGCG_CGLS_CTRL_3D__SLEEP_MODE__SHIFT 0x1d
++#define RLC_CGCG_CGLS_CTRL_3D__SIM_SILICON_EN__SHIFT 0x1f
++#define RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK 0x00000001L
++#define RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK 0x00000002L
++#define RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK 0x000000FCL
++#define RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK 0x07FFFF00L
++#define RLC_CGCG_CGLS_CTRL_3D__CGCG_CONTROLLER_MASK 0x08000000L
++#define RLC_CGCG_CGLS_CTRL_3D__CGCG_REG_CTRL_MASK 0x10000000L
++#define RLC_CGCG_CGLS_CTRL_3D__SLEEP_MODE_MASK 0x60000000L
++#define RLC_CGCG_CGLS_CTRL_3D__SIM_SILICON_EN_MASK 0x80000000L
++//RLC_CGCG_RAMP_CTRL_3D
++#define RLC_CGCG_RAMP_CTRL_3D__DOWN_DIV_START_UNIT__SHIFT 0x0
++#define RLC_CGCG_RAMP_CTRL_3D__DOWN_DIV_STEP_UNIT__SHIFT 0x4
++#define RLC_CGCG_RAMP_CTRL_3D__UP_DIV_START_UNIT__SHIFT 0x8
++#define RLC_CGCG_RAMP_CTRL_3D__UP_DIV_STEP_UNIT__SHIFT 0xc
++#define RLC_CGCG_RAMP_CTRL_3D__STEP_DELAY_CNT__SHIFT 0x10
++#define RLC_CGCG_RAMP_CTRL_3D__STEP_DELAY_UNIT__SHIFT 0x1c
++#define RLC_CGCG_RAMP_CTRL_3D__DOWN_DIV_START_UNIT_MASK 0x0000000FL
++#define RLC_CGCG_RAMP_CTRL_3D__DOWN_DIV_STEP_UNIT_MASK 0x000000F0L
++#define RLC_CGCG_RAMP_CTRL_3D__UP_DIV_START_UNIT_MASK 0x00000F00L
++#define RLC_CGCG_RAMP_CTRL_3D__UP_DIV_STEP_UNIT_MASK 0x0000F000L
++#define RLC_CGCG_RAMP_CTRL_3D__STEP_DELAY_CNT_MASK 0x0FFF0000L
++#define RLC_CGCG_RAMP_CTRL_3D__STEP_DELAY_UNIT_MASK 0xF0000000L
++//RLC_SEMAPHORE_0
++#define RLC_SEMAPHORE_0__CLIENT_ID__SHIFT 0x0
++#define RLC_SEMAPHORE_0__RESERVED__SHIFT 0x5
++#define RLC_SEMAPHORE_0__CLIENT_ID_MASK 0x0000001FL
++#define RLC_SEMAPHORE_0__RESERVED_MASK 0xFFFFFFE0L
++//RLC_SEMAPHORE_1
++#define RLC_SEMAPHORE_1__CLIENT_ID__SHIFT 0x0
++#define RLC_SEMAPHORE_1__RESERVED__SHIFT 0x5
++#define RLC_SEMAPHORE_1__CLIENT_ID_MASK 0x0000001FL
++#define RLC_SEMAPHORE_1__RESERVED_MASK 0xFFFFFFE0L
++//RLC_CP_EOF_INT
++#define RLC_CP_EOF_INT__INTERRUPT__SHIFT 0x0
++#define RLC_CP_EOF_INT__RESERVED__SHIFT 0x1
++#define RLC_CP_EOF_INT__INTERRUPT_MASK 0x00000001L
++#define RLC_CP_EOF_INT__RESERVED_MASK 0xFFFFFFFEL
++//RLC_CP_EOF_INT_CNT
++#define RLC_CP_EOF_INT_CNT__CNT__SHIFT 0x0
++#define RLC_CP_EOF_INT_CNT__CNT_MASK 0xFFFFFFFFL
++//RLC_SPARE_INT
++#define RLC_SPARE_INT__INTERRUPT__SHIFT 0x0
++#define RLC_SPARE_INT__RESERVED__SHIFT 0x1
++#define RLC_SPARE_INT__INTERRUPT_MASK 0x00000001L
++#define RLC_SPARE_INT__RESERVED_MASK 0xFFFFFFFEL
++//RLC_PREWALKER_UTCL1_CNTL
++#define RLC_PREWALKER_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
++#define RLC_PREWALKER_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
++#define RLC_PREWALKER_UTCL1_CNTL__BYPASS__SHIFT 0x19
++#define RLC_PREWALKER_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
++#define RLC_PREWALKER_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
++#define RLC_PREWALKER_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
++#define RLC_PREWALKER_UTCL1_CNTL__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
++#define RLC_PREWALKER_UTCL1_CNTL__RESERVED__SHIFT 0x1e
++#define RLC_PREWALKER_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
++#define RLC_PREWALKER_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
++#define RLC_PREWALKER_UTCL1_CNTL__BYPASS_MASK 0x02000000L
++#define RLC_PREWALKER_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
++#define RLC_PREWALKER_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
++#define RLC_PREWALKER_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
++#define RLC_PREWALKER_UTCL1_CNTL__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
++#define RLC_PREWALKER_UTCL1_CNTL__RESERVED_MASK 0xC0000000L
++//RLC_PREWALKER_UTCL1_TRIG
++#define RLC_PREWALKER_UTCL1_TRIG__VALID__SHIFT 0x0
++#define RLC_PREWALKER_UTCL1_TRIG__VMID__SHIFT 0x1
++#define RLC_PREWALKER_UTCL1_TRIG__PRIME_MODE__SHIFT 0x5
++#define RLC_PREWALKER_UTCL1_TRIG__READ_PERM__SHIFT 0x6
++#define RLC_PREWALKER_UTCL1_TRIG__WRITE_PERM__SHIFT 0x7
++#define RLC_PREWALKER_UTCL1_TRIG__EXEC_PERM__SHIFT 0x8
++#define RLC_PREWALKER_UTCL1_TRIG__RESERVED__SHIFT 0x9
++#define RLC_PREWALKER_UTCL1_TRIG__READY__SHIFT 0x1f
++#define RLC_PREWALKER_UTCL1_TRIG__VALID_MASK 0x00000001L
++#define RLC_PREWALKER_UTCL1_TRIG__VMID_MASK 0x0000001EL
++#define RLC_PREWALKER_UTCL1_TRIG__PRIME_MODE_MASK 0x00000020L
++#define RLC_PREWALKER_UTCL1_TRIG__READ_PERM_MASK 0x00000040L
++#define RLC_PREWALKER_UTCL1_TRIG__WRITE_PERM_MASK 0x00000080L
++#define RLC_PREWALKER_UTCL1_TRIG__EXEC_PERM_MASK 0x00000100L
++#define RLC_PREWALKER_UTCL1_TRIG__RESERVED_MASK 0x7FFFFE00L
++#define RLC_PREWALKER_UTCL1_TRIG__READY_MASK 0x80000000L
++//RLC_PREWALKER_UTCL1_ADDR_LSB
++#define RLC_PREWALKER_UTCL1_ADDR_LSB__ADDR_LSB__SHIFT 0x0
++#define RLC_PREWALKER_UTCL1_ADDR_LSB__ADDR_LSB_MASK 0xFFFFFFFFL
++//RLC_PREWALKER_UTCL1_ADDR_MSB
++#define RLC_PREWALKER_UTCL1_ADDR_MSB__ADDR_MSB__SHIFT 0x0
++#define RLC_PREWALKER_UTCL1_ADDR_MSB__ADDR_MSB_MASK 0x0000FFFFL
++//RLC_PREWALKER_UTCL1_SIZE_LSB
++#define RLC_PREWALKER_UTCL1_SIZE_LSB__SIZE_LSB__SHIFT 0x0
++#define RLC_PREWALKER_UTCL1_SIZE_LSB__SIZE_LSB_MASK 0xFFFFFFFFL
++//RLC_PREWALKER_UTCL1_SIZE_MSB
++#define RLC_PREWALKER_UTCL1_SIZE_MSB__SIZE_MSB__SHIFT 0x0
++#define RLC_PREWALKER_UTCL1_SIZE_MSB__SIZE_MSB_MASK 0x00000003L
++//RLC_DSM_TRIG
++//RLC_UTCL1_STATUS
++#define RLC_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
++#define RLC_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
++#define RLC_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
++#define RLC_UTCL1_STATUS__RESERVED__SHIFT 0x3
++#define RLC_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
++#define RLC_UTCL1_STATUS__RESERVED_1__SHIFT 0xe
++#define RLC_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
++#define RLC_UTCL1_STATUS__RESERVED_2__SHIFT 0x16
++#define RLC_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
++#define RLC_UTCL1_STATUS__RESERVED_3__SHIFT 0x1e
++#define RLC_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
++#define RLC_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
++#define RLC_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
++#define RLC_UTCL1_STATUS__RESERVED_MASK 0x000000F8L
++#define RLC_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
++#define RLC_UTCL1_STATUS__RESERVED_1_MASK 0x0000C000L
++#define RLC_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
++#define RLC_UTCL1_STATUS__RESERVED_2_MASK 0x00C00000L
++#define RLC_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
++#define RLC_UTCL1_STATUS__RESERVED_3_MASK 0xC0000000L
++//RLC_R2I_CNTL_0
++#define RLC_R2I_CNTL_0__Data__SHIFT 0x0
++#define RLC_R2I_CNTL_0__Data_MASK 0xFFFFFFFFL
++//RLC_R2I_CNTL_1
++#define RLC_R2I_CNTL_1__Data__SHIFT 0x0
++#define RLC_R2I_CNTL_1__Data_MASK 0xFFFFFFFFL
++//RLC_R2I_CNTL_2
++#define RLC_R2I_CNTL_2__Data__SHIFT 0x0
++#define RLC_R2I_CNTL_2__Data_MASK 0xFFFFFFFFL
++//RLC_R2I_CNTL_3
++#define RLC_R2I_CNTL_3__Data__SHIFT 0x0
++#define RLC_R2I_CNTL_3__Data_MASK 0xFFFFFFFFL
++//RLC_UTCL2_CNTL
++#define RLC_UTCL2_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x0
++#define RLC_UTCL2_CNTL__RESERVED__SHIFT 0x1
++#define RLC_UTCL2_CNTL__MTYPE_NO_PTE_MODE_MASK 0x00000001L
++#define RLC_UTCL2_CNTL__RESERVED_MASK 0xFFFFFFFEL
++//RLC_LBPW_CU_STAT
++#define RLC_LBPW_CU_STAT__MAX_CU__SHIFT 0x0
++#define RLC_LBPW_CU_STAT__ON_CU__SHIFT 0x10
++#define RLC_LBPW_CU_STAT__MAX_CU_MASK 0x0000FFFFL
++#define RLC_LBPW_CU_STAT__ON_CU_MASK 0xFFFF0000L
++//RLC_DS_CNTL
++#define RLC_DS_CNTL__GFX_CLK_DS_RLC_BUSY_MASK__SHIFT 0x0
++#define RLC_DS_CNTL__GFX_CLK_DS_CP_BUSY_MASK__SHIFT 0x1
++#define RLC_DS_CNTL__RESRVED__SHIFT 0x2
++#define RLC_DS_CNTL__SOC_CLK_DS_RLC_BUSY_MASK__SHIFT 0x10
++#define RLC_DS_CNTL__SOC_CLK_DS_CP_BUSY_MASK__SHIFT 0x11
++#define RLC_DS_CNTL__RESRVED_1__SHIFT 0x12
++#define RLC_DS_CNTL__GFX_CLK_DS_RLC_BUSY_MASK_MASK 0x00000001L
++#define RLC_DS_CNTL__GFX_CLK_DS_CP_BUSY_MASK_MASK 0x00000002L
++#define RLC_DS_CNTL__RESRVED_MASK 0x0000FFFCL
++#define RLC_DS_CNTL__SOC_CLK_DS_RLC_BUSY_MASK_MASK 0x00010000L
++#define RLC_DS_CNTL__SOC_CLK_DS_CP_BUSY_MASK_MASK 0x00020000L
++#define RLC_DS_CNTL__RESRVED_1_MASK 0xFFFC0000L
++//RLC_RLCV_SPARE_INT
++#define RLC_RLCV_SPARE_INT__INTERRUPT__SHIFT 0x0
++#define RLC_RLCV_SPARE_INT__RESERVED__SHIFT 0x1
++#define RLC_RLCV_SPARE_INT__INTERRUPT_MASK 0x00000001L
++#define RLC_RLCV_SPARE_INT__RESERVED_MASK 0xFFFFFFFEL
++
++
++// addressBlock: gc_pwrdec
++//CGTS_SM_CTRL_REG
++#define CGTS_SM_CTRL_REG__ON_SEQ_DELAY__SHIFT 0x0
++#define CGTS_SM_CTRL_REG__OFF_SEQ_DELAY__SHIFT 0x4
++#define CGTS_SM_CTRL_REG__MGCG_ENABLED__SHIFT 0xc
++#define CGTS_SM_CTRL_REG__BASE_MODE__SHIFT 0x10
++#define CGTS_SM_CTRL_REG__SM_MODE__SHIFT 0x11
++#define CGTS_SM_CTRL_REG__SM_MODE_ENABLE__SHIFT 0x14
++#define CGTS_SM_CTRL_REG__OVERRIDE__SHIFT 0x15
++#define CGTS_SM_CTRL_REG__LS_OVERRIDE__SHIFT 0x16
++#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN__SHIFT 0x17
++#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT 0x18
++#define CGTS_SM_CTRL_REG__ON_SEQ_DELAY_MASK 0x0000000FL
++#define CGTS_SM_CTRL_REG__OFF_SEQ_DELAY_MASK 0x00000FF0L
++#define CGTS_SM_CTRL_REG__MGCG_ENABLED_MASK 0x00001000L
++#define CGTS_SM_CTRL_REG__BASE_MODE_MASK 0x00010000L
++#define CGTS_SM_CTRL_REG__SM_MODE_MASK 0x000E0000L
++#define CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK 0x00100000L
++#define CGTS_SM_CTRL_REG__OVERRIDE_MASK 0x00200000L
++#define CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK 0x00400000L
++#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK 0x00800000L
++#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK 0xFF000000L
++//CGTS_RD_CTRL_REG
++#define CGTS_RD_CTRL_REG__ROW_MUX_SEL__SHIFT 0x0
++#define CGTS_RD_CTRL_REG__REG_MUX_SEL__SHIFT 0x8
++#define CGTS_RD_CTRL_REG__ROW_MUX_SEL_MASK 0x0000001FL
++#define CGTS_RD_CTRL_REG__REG_MUX_SEL_MASK 0x00001F00L
++//CGTS_RD_REG
++#define CGTS_RD_REG__READ_DATA__SHIFT 0x0
++#define CGTS_RD_REG__READ_DATA_MASK 0x00003FFFL
++//CGTS_TCC_DISABLE
++#define CGTS_TCC_DISABLE__TCC_DISABLE__SHIFT 0x10
++#define CGTS_TCC_DISABLE__TCC_DISABLE_MASK 0xFFFF0000L
++//CGTS_USER_TCC_DISABLE
++#define CGTS_USER_TCC_DISABLE__TCC_DISABLE__SHIFT 0x10
++#define CGTS_USER_TCC_DISABLE__TCC_DISABLE_MASK 0xFFFF0000L
++//CGTS_CU0_SP0_CTRL_REG
++#define CGTS_CU0_SP0_CTRL_REG__SP00__SHIFT 0x0
++#define CGTS_CU0_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
++#define CGTS_CU0_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU0_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU0_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU0_SP0_CTRL_REG__SP01__SHIFT 0x10
++#define CGTS_CU0_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
++#define CGTS_CU0_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU0_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU0_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU0_SP0_CTRL_REG__SP00_MASK 0x0000007FL
++#define CGTS_CU0_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU0_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU0_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU0_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU0_SP0_CTRL_REG__SP01_MASK 0x007F0000L
++#define CGTS_CU0_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU0_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU0_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU0_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU0_LDS_SQ_CTRL_REG
++#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
++#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
++#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
++#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
++#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
++#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
++#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU0_TA_SQC_CTRL_REG
++#define CGTS_CU0_TA_SQC_CTRL_REG__TA__SHIFT 0x0
++#define CGTS_CU0_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
++#define CGTS_CU0_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU0_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU0_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU0_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
++#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
++#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU0_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
++#define CGTS_CU0_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU0_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU0_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU0_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_MASK 0x007F0000L
++#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU0_SP1_CTRL_REG
++#define CGTS_CU0_SP1_CTRL_REG__SP10__SHIFT 0x0
++#define CGTS_CU0_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
++#define CGTS_CU0_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU0_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU0_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU0_SP1_CTRL_REG__SP11__SHIFT 0x10
++#define CGTS_CU0_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
++#define CGTS_CU0_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU0_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU0_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU0_SP1_CTRL_REG__SP10_MASK 0x0000007FL
++#define CGTS_CU0_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU0_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU0_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU0_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU0_SP1_CTRL_REG__SP11_MASK 0x007F0000L
++#define CGTS_CU0_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU0_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU0_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU0_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU0_TD_TCP_CTRL_REG
++#define CGTS_CU0_TD_TCP_CTRL_REG__TD__SHIFT 0x0
++#define CGTS_CU0_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
++#define CGTS_CU0_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU0_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU0_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
++#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
++#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU0_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
++#define CGTS_CU0_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU0_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU0_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU0_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
++#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU1_SP0_CTRL_REG
++#define CGTS_CU1_SP0_CTRL_REG__SP00__SHIFT 0x0
++#define CGTS_CU1_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
++#define CGTS_CU1_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU1_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU1_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU1_SP0_CTRL_REG__SP01__SHIFT 0x10
++#define CGTS_CU1_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
++#define CGTS_CU1_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU1_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU1_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU1_SP0_CTRL_REG__SP00_MASK 0x0000007FL
++#define CGTS_CU1_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU1_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU1_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU1_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU1_SP0_CTRL_REG__SP01_MASK 0x007F0000L
++#define CGTS_CU1_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU1_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU1_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU1_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU1_LDS_SQ_CTRL_REG
++#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
++#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
++#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
++#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
++#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
++#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
++#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU1_TA_SQC_CTRL_REG
++#define CGTS_CU1_TA_SQC_CTRL_REG__TA__SHIFT 0x0
++#define CGTS_CU1_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
++#define CGTS_CU1_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU1_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU1_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU1_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
++#define CGTS_CU1_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU1_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU1_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU1_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++//CGTS_CU1_SP1_CTRL_REG
++#define CGTS_CU1_SP1_CTRL_REG__SP10__SHIFT 0x0
++#define CGTS_CU1_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
++#define CGTS_CU1_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU1_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU1_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU1_SP1_CTRL_REG__SP11__SHIFT 0x10
++#define CGTS_CU1_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
++#define CGTS_CU1_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU1_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU1_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU1_SP1_CTRL_REG__SP10_MASK 0x0000007FL
++#define CGTS_CU1_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU1_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU1_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU1_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU1_SP1_CTRL_REG__SP11_MASK 0x007F0000L
++#define CGTS_CU1_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU1_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU1_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU1_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU1_TD_TCP_CTRL_REG
++#define CGTS_CU1_TD_TCP_CTRL_REG__TD__SHIFT 0x0
++#define CGTS_CU1_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
++#define CGTS_CU1_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU1_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU1_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
++#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
++#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU1_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
++#define CGTS_CU1_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU1_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU1_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU1_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
++#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU2_SP0_CTRL_REG
++#define CGTS_CU2_SP0_CTRL_REG__SP00__SHIFT 0x0
++#define CGTS_CU2_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
++#define CGTS_CU2_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU2_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU2_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU2_SP0_CTRL_REG__SP01__SHIFT 0x10
++#define CGTS_CU2_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
++#define CGTS_CU2_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU2_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU2_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU2_SP0_CTRL_REG__SP00_MASK 0x0000007FL
++#define CGTS_CU2_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU2_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU2_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU2_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU2_SP0_CTRL_REG__SP01_MASK 0x007F0000L
++#define CGTS_CU2_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU2_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU2_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU2_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU2_LDS_SQ_CTRL_REG
++#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
++#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
++#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
++#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
++#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
++#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
++#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU2_TA_SQC_CTRL_REG
++#define CGTS_CU2_TA_SQC_CTRL_REG__TA__SHIFT 0x0
++#define CGTS_CU2_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
++#define CGTS_CU2_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU2_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU2_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU2_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
++#define CGTS_CU2_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU2_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU2_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU2_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++//CGTS_CU2_SP1_CTRL_REG
++#define CGTS_CU2_SP1_CTRL_REG__SP10__SHIFT 0x0
++#define CGTS_CU2_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
++#define CGTS_CU2_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU2_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU2_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU2_SP1_CTRL_REG__SP11__SHIFT 0x10
++#define CGTS_CU2_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
++#define CGTS_CU2_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU2_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU2_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU2_SP1_CTRL_REG__SP10_MASK 0x0000007FL
++#define CGTS_CU2_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU2_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU2_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU2_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU2_SP1_CTRL_REG__SP11_MASK 0x007F0000L
++#define CGTS_CU2_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU2_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU2_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU2_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU2_TD_TCP_CTRL_REG
++#define CGTS_CU2_TD_TCP_CTRL_REG__TD__SHIFT 0x0
++#define CGTS_CU2_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
++#define CGTS_CU2_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU2_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU2_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
++#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
++#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU2_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
++#define CGTS_CU2_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU2_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU2_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU2_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
++#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU3_SP0_CTRL_REG
++#define CGTS_CU3_SP0_CTRL_REG__SP00__SHIFT 0x0
++#define CGTS_CU3_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
++#define CGTS_CU3_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU3_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU3_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU3_SP0_CTRL_REG__SP01__SHIFT 0x10
++#define CGTS_CU3_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
++#define CGTS_CU3_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU3_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU3_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU3_SP0_CTRL_REG__SP00_MASK 0x0000007FL
++#define CGTS_CU3_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU3_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU3_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU3_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU3_SP0_CTRL_REG__SP01_MASK 0x007F0000L
++#define CGTS_CU3_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU3_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU3_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU3_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU3_LDS_SQ_CTRL_REG
++#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
++#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
++#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
++#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
++#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
++#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
++#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU3_TA_SQC_CTRL_REG
++#define CGTS_CU3_TA_SQC_CTRL_REG__TA__SHIFT 0x0
++#define CGTS_CU3_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
++#define CGTS_CU3_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU3_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU3_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU3_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
++#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
++#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU3_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
++#define CGTS_CU3_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU3_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU3_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU3_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_MASK 0x007F0000L
++#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU3_SP1_CTRL_REG
++#define CGTS_CU3_SP1_CTRL_REG__SP10__SHIFT 0x0
++#define CGTS_CU3_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
++#define CGTS_CU3_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU3_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU3_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU3_SP1_CTRL_REG__SP11__SHIFT 0x10
++#define CGTS_CU3_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
++#define CGTS_CU3_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU3_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU3_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU3_SP1_CTRL_REG__SP10_MASK 0x0000007FL
++#define CGTS_CU3_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU3_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU3_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU3_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU3_SP1_CTRL_REG__SP11_MASK 0x007F0000L
++#define CGTS_CU3_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU3_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU3_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU3_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU3_TD_TCP_CTRL_REG
++#define CGTS_CU3_TD_TCP_CTRL_REG__TD__SHIFT 0x0
++#define CGTS_CU3_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
++#define CGTS_CU3_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU3_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU3_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
++#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
++#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU3_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
++#define CGTS_CU3_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU3_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU3_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU3_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
++#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU4_SP0_CTRL_REG
++#define CGTS_CU4_SP0_CTRL_REG__SP00__SHIFT 0x0
++#define CGTS_CU4_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
++#define CGTS_CU4_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU4_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU4_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU4_SP0_CTRL_REG__SP01__SHIFT 0x10
++#define CGTS_CU4_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
++#define CGTS_CU4_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU4_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU4_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU4_SP0_CTRL_REG__SP00_MASK 0x0000007FL
++#define CGTS_CU4_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU4_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU4_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU4_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU4_SP0_CTRL_REG__SP01_MASK 0x007F0000L
++#define CGTS_CU4_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU4_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU4_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU4_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU4_LDS_SQ_CTRL_REG
++#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
++#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
++#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
++#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
++#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
++#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
++#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU4_TA_SQC_CTRL_REG
++#define CGTS_CU4_TA_SQC_CTRL_REG__TA__SHIFT 0x0
++#define CGTS_CU4_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
++#define CGTS_CU4_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU4_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU4_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU4_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
++#define CGTS_CU4_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU4_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU4_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU4_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++//CGTS_CU4_SP1_CTRL_REG
++#define CGTS_CU4_SP1_CTRL_REG__SP10__SHIFT 0x0
++#define CGTS_CU4_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
++#define CGTS_CU4_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU4_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU4_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU4_SP1_CTRL_REG__SP11__SHIFT 0x10
++#define CGTS_CU4_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
++#define CGTS_CU4_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU4_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU4_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU4_SP1_CTRL_REG__SP10_MASK 0x0000007FL
++#define CGTS_CU4_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU4_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU4_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU4_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU4_SP1_CTRL_REG__SP11_MASK 0x007F0000L
++#define CGTS_CU4_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU4_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU4_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU4_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU4_TD_TCP_CTRL_REG
++#define CGTS_CU4_TD_TCP_CTRL_REG__TD__SHIFT 0x0
++#define CGTS_CU4_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
++#define CGTS_CU4_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU4_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU4_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
++#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
++#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU4_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
++#define CGTS_CU4_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU4_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU4_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU4_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
++#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU5_SP0_CTRL_REG
++#define CGTS_CU5_SP0_CTRL_REG__SP00__SHIFT 0x0
++#define CGTS_CU5_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
++#define CGTS_CU5_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU5_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU5_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU5_SP0_CTRL_REG__SP01__SHIFT 0x10
++#define CGTS_CU5_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
++#define CGTS_CU5_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU5_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU5_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU5_SP0_CTRL_REG__SP00_MASK 0x0000007FL
++#define CGTS_CU5_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU5_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU5_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU5_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU5_SP0_CTRL_REG__SP01_MASK 0x007F0000L
++#define CGTS_CU5_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU5_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU5_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU5_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU5_LDS_SQ_CTRL_REG
++#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
++#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
++#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
++#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
++#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
++#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
++#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU5_TA_SQC_CTRL_REG
++#define CGTS_CU5_TA_SQC_CTRL_REG__TA__SHIFT 0x0
++#define CGTS_CU5_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
++#define CGTS_CU5_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU5_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU5_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU5_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
++#define CGTS_CU5_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU5_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU5_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU5_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++//CGTS_CU5_SP1_CTRL_REG
++#define CGTS_CU5_SP1_CTRL_REG__SP10__SHIFT 0x0
++#define CGTS_CU5_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
++#define CGTS_CU5_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU5_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU5_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU5_SP1_CTRL_REG__SP11__SHIFT 0x10
++#define CGTS_CU5_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
++#define CGTS_CU5_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU5_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU5_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU5_SP1_CTRL_REG__SP10_MASK 0x0000007FL
++#define CGTS_CU5_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU5_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU5_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU5_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU5_SP1_CTRL_REG__SP11_MASK 0x007F0000L
++#define CGTS_CU5_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU5_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU5_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU5_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU5_TD_TCP_CTRL_REG
++#define CGTS_CU5_TD_TCP_CTRL_REG__TD__SHIFT 0x0
++#define CGTS_CU5_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
++#define CGTS_CU5_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU5_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU5_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
++#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
++#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU5_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
++#define CGTS_CU5_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU5_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU5_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU5_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
++#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU6_SP0_CTRL_REG
++#define CGTS_CU6_SP0_CTRL_REG__SP00__SHIFT 0x0
++#define CGTS_CU6_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
++#define CGTS_CU6_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU6_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU6_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU6_SP0_CTRL_REG__SP01__SHIFT 0x10
++#define CGTS_CU6_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
++#define CGTS_CU6_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU6_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU6_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU6_SP0_CTRL_REG__SP00_MASK 0x0000007FL
++#define CGTS_CU6_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU6_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU6_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU6_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU6_SP0_CTRL_REG__SP01_MASK 0x007F0000L
++#define CGTS_CU6_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU6_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU6_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU6_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU6_LDS_SQ_CTRL_REG
++#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
++#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
++#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
++#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
++#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
++#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
++#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU6_TA_SQC_CTRL_REG
++#define CGTS_CU6_TA_SQC_CTRL_REG__TA__SHIFT 0x0
++#define CGTS_CU6_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
++#define CGTS_CU6_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU6_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU6_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU6_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
++#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
++#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU6_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
++#define CGTS_CU6_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU6_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU6_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU6_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_MASK 0x007F0000L
++#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU6_SP1_CTRL_REG
++#define CGTS_CU6_SP1_CTRL_REG__SP10__SHIFT 0x0
++#define CGTS_CU6_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
++#define CGTS_CU6_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU6_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU6_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU6_SP1_CTRL_REG__SP11__SHIFT 0x10
++#define CGTS_CU6_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
++#define CGTS_CU6_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU6_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU6_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU6_SP1_CTRL_REG__SP10_MASK 0x0000007FL
++#define CGTS_CU6_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU6_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU6_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU6_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU6_SP1_CTRL_REG__SP11_MASK 0x007F0000L
++#define CGTS_CU6_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU6_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU6_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU6_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU6_TD_TCP_CTRL_REG
++#define CGTS_CU6_TD_TCP_CTRL_REG__TD__SHIFT 0x0
++#define CGTS_CU6_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
++#define CGTS_CU6_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU6_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU6_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
++#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
++#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU6_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
++#define CGTS_CU6_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU6_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU6_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU6_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
++#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU7_SP0_CTRL_REG
++#define CGTS_CU7_SP0_CTRL_REG__SP00__SHIFT 0x0
++#define CGTS_CU7_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
++#define CGTS_CU7_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU7_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU7_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU7_SP0_CTRL_REG__SP01__SHIFT 0x10
++#define CGTS_CU7_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
++#define CGTS_CU7_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU7_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU7_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU7_SP0_CTRL_REG__SP00_MASK 0x0000007FL
++#define CGTS_CU7_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU7_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU7_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU7_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU7_SP0_CTRL_REG__SP01_MASK 0x007F0000L
++#define CGTS_CU7_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU7_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU7_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU7_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU7_LDS_SQ_CTRL_REG
++#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
++#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
++#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
++#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
++#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
++#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
++#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU7_TA_SQC_CTRL_REG
++#define CGTS_CU7_TA_SQC_CTRL_REG__TA__SHIFT 0x0
++#define CGTS_CU7_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
++#define CGTS_CU7_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU7_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU7_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU7_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
++#define CGTS_CU7_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU7_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU7_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU7_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++//CGTS_CU7_SP1_CTRL_REG
++#define CGTS_CU7_SP1_CTRL_REG__SP10__SHIFT 0x0
++#define CGTS_CU7_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
++#define CGTS_CU7_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU7_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU7_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU7_SP1_CTRL_REG__SP11__SHIFT 0x10
++#define CGTS_CU7_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
++#define CGTS_CU7_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU7_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU7_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU7_SP1_CTRL_REG__SP10_MASK 0x0000007FL
++#define CGTS_CU7_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU7_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU7_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU7_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU7_SP1_CTRL_REG__SP11_MASK 0x007F0000L
++#define CGTS_CU7_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU7_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU7_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU7_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU7_TD_TCP_CTRL_REG
++#define CGTS_CU7_TD_TCP_CTRL_REG__TD__SHIFT 0x0
++#define CGTS_CU7_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
++#define CGTS_CU7_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU7_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU7_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
++#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
++#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU7_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
++#define CGTS_CU7_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU7_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU7_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU7_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
++#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU8_SP0_CTRL_REG
++#define CGTS_CU8_SP0_CTRL_REG__SP00__SHIFT 0x0
++#define CGTS_CU8_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
++#define CGTS_CU8_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU8_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU8_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU8_SP0_CTRL_REG__SP01__SHIFT 0x10
++#define CGTS_CU8_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
++#define CGTS_CU8_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU8_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU8_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU8_SP0_CTRL_REG__SP00_MASK 0x0000007FL
++#define CGTS_CU8_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU8_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU8_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU8_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU8_SP0_CTRL_REG__SP01_MASK 0x007F0000L
++#define CGTS_CU8_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU8_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU8_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU8_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU8_LDS_SQ_CTRL_REG
++#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
++#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
++#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
++#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
++#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
++#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
++#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU8_TA_SQC_CTRL_REG
++#define CGTS_CU8_TA_SQC_CTRL_REG__TA__SHIFT 0x0
++#define CGTS_CU8_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
++#define CGTS_CU8_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU8_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU8_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU8_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
++#define CGTS_CU8_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU8_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU8_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU8_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++//CGTS_CU8_SP1_CTRL_REG
++#define CGTS_CU8_SP1_CTRL_REG__SP10__SHIFT 0x0
++#define CGTS_CU8_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
++#define CGTS_CU8_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU8_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU8_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU8_SP1_CTRL_REG__SP11__SHIFT 0x10
++#define CGTS_CU8_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
++#define CGTS_CU8_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU8_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU8_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU8_SP1_CTRL_REG__SP10_MASK 0x0000007FL
++#define CGTS_CU8_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU8_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU8_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU8_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU8_SP1_CTRL_REG__SP11_MASK 0x007F0000L
++#define CGTS_CU8_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU8_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU8_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU8_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU8_TD_TCP_CTRL_REG
++#define CGTS_CU8_TD_TCP_CTRL_REG__TD__SHIFT 0x0
++#define CGTS_CU8_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
++#define CGTS_CU8_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU8_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU8_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
++#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
++#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU8_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
++#define CGTS_CU8_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU8_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU8_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU8_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
++#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU9_SP0_CTRL_REG
++#define CGTS_CU9_SP0_CTRL_REG__SP00__SHIFT 0x0
++#define CGTS_CU9_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
++#define CGTS_CU9_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU9_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU9_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU9_SP0_CTRL_REG__SP01__SHIFT 0x10
++#define CGTS_CU9_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
++#define CGTS_CU9_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU9_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU9_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU9_SP0_CTRL_REG__SP00_MASK 0x0000007FL
++#define CGTS_CU9_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU9_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU9_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU9_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU9_SP0_CTRL_REG__SP01_MASK 0x007F0000L
++#define CGTS_CU9_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU9_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU9_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU9_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU9_LDS_SQ_CTRL_REG
++#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
++#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
++#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
++#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
++#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
++#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
++#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU9_TA_SQC_CTRL_REG
++#define CGTS_CU9_TA_SQC_CTRL_REG__TA__SHIFT 0x0
++#define CGTS_CU9_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
++#define CGTS_CU9_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU9_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU9_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU9_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
++#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
++#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU9_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
++#define CGTS_CU9_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU9_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU9_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU9_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_MASK 0x007F0000L
++#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU9_SP1_CTRL_REG
++#define CGTS_CU9_SP1_CTRL_REG__SP10__SHIFT 0x0
++#define CGTS_CU9_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
++#define CGTS_CU9_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU9_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU9_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU9_SP1_CTRL_REG__SP11__SHIFT 0x10
++#define CGTS_CU9_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
++#define CGTS_CU9_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU9_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU9_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU9_SP1_CTRL_REG__SP10_MASK 0x0000007FL
++#define CGTS_CU9_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU9_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU9_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU9_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU9_SP1_CTRL_REG__SP11_MASK 0x007F0000L
++#define CGTS_CU9_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU9_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU9_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU9_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU9_TD_TCP_CTRL_REG
++#define CGTS_CU9_TD_TCP_CTRL_REG__TD__SHIFT 0x0
++#define CGTS_CU9_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
++#define CGTS_CU9_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU9_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU9_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
++#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
++#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU9_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
++#define CGTS_CU9_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU9_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU9_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU9_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
++#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU10_SP0_CTRL_REG
++#define CGTS_CU10_SP0_CTRL_REG__SP00__SHIFT 0x0
++#define CGTS_CU10_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
++#define CGTS_CU10_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU10_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU10_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU10_SP0_CTRL_REG__SP01__SHIFT 0x10
++#define CGTS_CU10_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
++#define CGTS_CU10_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU10_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU10_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU10_SP0_CTRL_REG__SP00_MASK 0x0000007FL
++#define CGTS_CU10_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU10_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU10_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU10_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU10_SP0_CTRL_REG__SP01_MASK 0x007F0000L
++#define CGTS_CU10_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU10_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU10_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU10_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU10_LDS_SQ_CTRL_REG
++#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
++#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
++#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
++#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
++#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
++#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
++#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU10_TA_SQC_CTRL_REG
++#define CGTS_CU10_TA_SQC_CTRL_REG__TA__SHIFT 0x0
++#define CGTS_CU10_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
++#define CGTS_CU10_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU10_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU10_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU10_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
++#define CGTS_CU10_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU10_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU10_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU10_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++//CGTS_CU10_SP1_CTRL_REG
++#define CGTS_CU10_SP1_CTRL_REG__SP10__SHIFT 0x0
++#define CGTS_CU10_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
++#define CGTS_CU10_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU10_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU10_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU10_SP1_CTRL_REG__SP11__SHIFT 0x10
++#define CGTS_CU10_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
++#define CGTS_CU10_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU10_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU10_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU10_SP1_CTRL_REG__SP10_MASK 0x0000007FL
++#define CGTS_CU10_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU10_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU10_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU10_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU10_SP1_CTRL_REG__SP11_MASK 0x007F0000L
++#define CGTS_CU10_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU10_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU10_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU10_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU10_TD_TCP_CTRL_REG
++#define CGTS_CU10_TD_TCP_CTRL_REG__TD__SHIFT 0x0
++#define CGTS_CU10_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
++#define CGTS_CU10_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU10_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU10_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
++#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
++#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU10_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
++#define CGTS_CU10_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU10_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU10_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU10_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
++#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU11_SP0_CTRL_REG
++#define CGTS_CU11_SP0_CTRL_REG__SP00__SHIFT 0x0
++#define CGTS_CU11_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
++#define CGTS_CU11_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU11_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU11_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU11_SP0_CTRL_REG__SP01__SHIFT 0x10
++#define CGTS_CU11_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
++#define CGTS_CU11_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU11_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU11_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU11_SP0_CTRL_REG__SP00_MASK 0x0000007FL
++#define CGTS_CU11_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU11_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU11_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU11_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU11_SP0_CTRL_REG__SP01_MASK 0x007F0000L
++#define CGTS_CU11_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU11_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU11_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU11_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU11_LDS_SQ_CTRL_REG
++#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
++#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
++#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
++#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
++#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
++#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
++#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU11_TA_SQC_CTRL_REG
++#define CGTS_CU11_TA_SQC_CTRL_REG__TA__SHIFT 0x0
++#define CGTS_CU11_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
++#define CGTS_CU11_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU11_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU11_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU11_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
++#define CGTS_CU11_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU11_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU11_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU11_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++//CGTS_CU11_SP1_CTRL_REG
++#define CGTS_CU11_SP1_CTRL_REG__SP10__SHIFT 0x0
++#define CGTS_CU11_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
++#define CGTS_CU11_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU11_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU11_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU11_SP1_CTRL_REG__SP11__SHIFT 0x10
++#define CGTS_CU11_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
++#define CGTS_CU11_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU11_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU11_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU11_SP1_CTRL_REG__SP10_MASK 0x0000007FL
++#define CGTS_CU11_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU11_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU11_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU11_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU11_SP1_CTRL_REG__SP11_MASK 0x007F0000L
++#define CGTS_CU11_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU11_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU11_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU11_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU11_TD_TCP_CTRL_REG
++#define CGTS_CU11_TD_TCP_CTRL_REG__TD__SHIFT 0x0
++#define CGTS_CU11_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
++#define CGTS_CU11_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU11_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU11_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
++#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
++#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU11_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
++#define CGTS_CU11_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU11_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU11_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU11_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
++#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU12_SP0_CTRL_REG
++#define CGTS_CU12_SP0_CTRL_REG__SP00__SHIFT 0x0
++#define CGTS_CU12_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
++#define CGTS_CU12_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU12_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU12_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU12_SP0_CTRL_REG__SP01__SHIFT 0x10
++#define CGTS_CU12_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
++#define CGTS_CU12_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU12_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU12_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU12_SP0_CTRL_REG__SP00_MASK 0x0000007FL
++#define CGTS_CU12_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU12_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU12_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU12_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU12_SP0_CTRL_REG__SP01_MASK 0x007F0000L
++#define CGTS_CU12_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU12_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU12_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU12_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU12_LDS_SQ_CTRL_REG
++#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
++#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
++#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
++#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
++#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
++#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
++#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU12_TA_SQC_CTRL_REG
++#define CGTS_CU12_TA_SQC_CTRL_REG__TA__SHIFT 0x0
++#define CGTS_CU12_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
++#define CGTS_CU12_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU12_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU12_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU12_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
++#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
++#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU12_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
++#define CGTS_CU12_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU12_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU12_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU12_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_MASK 0x007F0000L
++#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU12_SP1_CTRL_REG
++#define CGTS_CU12_SP1_CTRL_REG__SP10__SHIFT 0x0
++#define CGTS_CU12_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
++#define CGTS_CU12_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU12_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU12_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU12_SP1_CTRL_REG__SP11__SHIFT 0x10
++#define CGTS_CU12_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
++#define CGTS_CU12_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU12_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU12_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU12_SP1_CTRL_REG__SP10_MASK 0x0000007FL
++#define CGTS_CU12_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU12_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU12_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU12_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU12_SP1_CTRL_REG__SP11_MASK 0x007F0000L
++#define CGTS_CU12_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU12_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU12_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU12_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU12_TD_TCP_CTRL_REG
++#define CGTS_CU12_TD_TCP_CTRL_REG__TD__SHIFT 0x0
++#define CGTS_CU12_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
++#define CGTS_CU12_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU12_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU12_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
++#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
++#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU12_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
++#define CGTS_CU12_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU12_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU12_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU12_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
++#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU13_SP0_CTRL_REG
++#define CGTS_CU13_SP0_CTRL_REG__SP00__SHIFT 0x0
++#define CGTS_CU13_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
++#define CGTS_CU13_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU13_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU13_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU13_SP0_CTRL_REG__SP01__SHIFT 0x10
++#define CGTS_CU13_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
++#define CGTS_CU13_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU13_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU13_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU13_SP0_CTRL_REG__SP00_MASK 0x0000007FL
++#define CGTS_CU13_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU13_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU13_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU13_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU13_SP0_CTRL_REG__SP01_MASK 0x007F0000L
++#define CGTS_CU13_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU13_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU13_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU13_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU13_LDS_SQ_CTRL_REG
++#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
++#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
++#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
++#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
++#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
++#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
++#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU13_TA_SQC_CTRL_REG
++#define CGTS_CU13_TA_SQC_CTRL_REG__TA__SHIFT 0x0
++#define CGTS_CU13_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
++#define CGTS_CU13_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU13_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU13_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU13_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
++#define CGTS_CU13_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU13_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU13_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU13_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++//CGTS_CU13_SP1_CTRL_REG
++#define CGTS_CU13_SP1_CTRL_REG__SP10__SHIFT 0x0
++#define CGTS_CU13_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
++#define CGTS_CU13_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU13_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU13_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU13_SP1_CTRL_REG__SP11__SHIFT 0x10
++#define CGTS_CU13_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
++#define CGTS_CU13_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU13_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU13_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU13_SP1_CTRL_REG__SP10_MASK 0x0000007FL
++#define CGTS_CU13_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU13_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU13_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU13_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU13_SP1_CTRL_REG__SP11_MASK 0x007F0000L
++#define CGTS_CU13_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU13_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU13_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU13_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU13_TD_TCP_CTRL_REG
++#define CGTS_CU13_TD_TCP_CTRL_REG__TD__SHIFT 0x0
++#define CGTS_CU13_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
++#define CGTS_CU13_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU13_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU13_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
++#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
++#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU13_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
++#define CGTS_CU13_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU13_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU13_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU13_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
++#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU14_SP0_CTRL_REG
++#define CGTS_CU14_SP0_CTRL_REG__SP00__SHIFT 0x0
++#define CGTS_CU14_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
++#define CGTS_CU14_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU14_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU14_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU14_SP0_CTRL_REG__SP01__SHIFT 0x10
++#define CGTS_CU14_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
++#define CGTS_CU14_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU14_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU14_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU14_SP0_CTRL_REG__SP00_MASK 0x0000007FL
++#define CGTS_CU14_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU14_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU14_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU14_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU14_SP0_CTRL_REG__SP01_MASK 0x007F0000L
++#define CGTS_CU14_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU14_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU14_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU14_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU14_LDS_SQ_CTRL_REG
++#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
++#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
++#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
++#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
++#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
++#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
++#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU14_TA_SQC_CTRL_REG
++#define CGTS_CU14_TA_SQC_CTRL_REG__TA__SHIFT 0x0
++#define CGTS_CU14_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
++#define CGTS_CU14_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU14_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU14_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU14_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
++#define CGTS_CU14_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU14_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU14_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU14_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++//CGTS_CU14_SP1_CTRL_REG
++#define CGTS_CU14_SP1_CTRL_REG__SP10__SHIFT 0x0
++#define CGTS_CU14_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
++#define CGTS_CU14_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU14_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU14_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU14_SP1_CTRL_REG__SP11__SHIFT 0x10
++#define CGTS_CU14_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
++#define CGTS_CU14_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU14_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU14_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU14_SP1_CTRL_REG__SP10_MASK 0x0000007FL
++#define CGTS_CU14_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU14_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU14_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU14_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU14_SP1_CTRL_REG__SP11_MASK 0x007F0000L
++#define CGTS_CU14_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU14_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU14_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU14_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU14_TD_TCP_CTRL_REG
++#define CGTS_CU14_TD_TCP_CTRL_REG__TD__SHIFT 0x0
++#define CGTS_CU14_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
++#define CGTS_CU14_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU14_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU14_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
++#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
++#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU14_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
++#define CGTS_CU14_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU14_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU14_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU14_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
++#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU15_SP0_CTRL_REG
++#define CGTS_CU15_SP0_CTRL_REG__SP00__SHIFT 0x0
++#define CGTS_CU15_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
++#define CGTS_CU15_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU15_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU15_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU15_SP0_CTRL_REG__SP01__SHIFT 0x10
++#define CGTS_CU15_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
++#define CGTS_CU15_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU15_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU15_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU15_SP0_CTRL_REG__SP00_MASK 0x0000007FL
++#define CGTS_CU15_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU15_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU15_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU15_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU15_SP0_CTRL_REG__SP01_MASK 0x007F0000L
++#define CGTS_CU15_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU15_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU15_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU15_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU15_LDS_SQ_CTRL_REG
++#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
++#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
++#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
++#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
++#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
++#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
++#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU15_TA_SQC_CTRL_REG
++#define CGTS_CU15_TA_SQC_CTRL_REG__TA__SHIFT 0x0
++#define CGTS_CU15_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
++#define CGTS_CU15_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU15_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU15_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU15_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
++#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
++#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU15_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
++#define CGTS_CU15_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU15_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU15_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU15_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_MASK 0x007F0000L
++#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU15_SP1_CTRL_REG
++#define CGTS_CU15_SP1_CTRL_REG__SP10__SHIFT 0x0
++#define CGTS_CU15_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
++#define CGTS_CU15_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU15_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU15_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU15_SP1_CTRL_REG__SP11__SHIFT 0x10
++#define CGTS_CU15_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
++#define CGTS_CU15_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU15_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU15_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU15_SP1_CTRL_REG__SP10_MASK 0x0000007FL
++#define CGTS_CU15_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU15_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU15_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU15_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU15_SP1_CTRL_REG__SP11_MASK 0x007F0000L
++#define CGTS_CU15_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU15_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU15_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU15_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU15_TD_TCP_CTRL_REG
++#define CGTS_CU15_TD_TCP_CTRL_REG__TD__SHIFT 0x0
++#define CGTS_CU15_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
++#define CGTS_CU15_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU15_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU15_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
++#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
++#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
++#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
++#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
++#define CGTS_CU15_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
++#define CGTS_CU15_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU15_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU15_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU15_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
++#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
++#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
++#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
++#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
++//CGTS_CU0_TCPI_CTRL_REG
++#define CGTS_CU0_TCPI_CTRL_REG__TCPI__SHIFT 0x0
++#define CGTS_CU0_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
++#define CGTS_CU0_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU0_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU0_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU0_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
++#define CGTS_CU0_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
++#define CGTS_CU0_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU0_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU0_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU0_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU0_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
++//CGTS_CU1_TCPI_CTRL_REG
++#define CGTS_CU1_TCPI_CTRL_REG__TCPI__SHIFT 0x0
++#define CGTS_CU1_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
++#define CGTS_CU1_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU1_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU1_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU1_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
++#define CGTS_CU1_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
++#define CGTS_CU1_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU1_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU1_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU1_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU1_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
++//CGTS_CU2_TCPI_CTRL_REG
++#define CGTS_CU2_TCPI_CTRL_REG__TCPI__SHIFT 0x0
++#define CGTS_CU2_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
++#define CGTS_CU2_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU2_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU2_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU2_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
++#define CGTS_CU2_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
++#define CGTS_CU2_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU2_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU2_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU2_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU2_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
++//CGTS_CU3_TCPI_CTRL_REG
++#define CGTS_CU3_TCPI_CTRL_REG__TCPI__SHIFT 0x0
++#define CGTS_CU3_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
++#define CGTS_CU3_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU3_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU3_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU3_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
++#define CGTS_CU3_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
++#define CGTS_CU3_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU3_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU3_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU3_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU3_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
++//CGTS_CU4_TCPI_CTRL_REG
++#define CGTS_CU4_TCPI_CTRL_REG__TCPI__SHIFT 0x0
++#define CGTS_CU4_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
++#define CGTS_CU4_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU4_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU4_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU4_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
++#define CGTS_CU4_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
++#define CGTS_CU4_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU4_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU4_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU4_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU4_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
++//CGTS_CU5_TCPI_CTRL_REG
++#define CGTS_CU5_TCPI_CTRL_REG__TCPI__SHIFT 0x0
++#define CGTS_CU5_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
++#define CGTS_CU5_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU5_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU5_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU5_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
++#define CGTS_CU5_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
++#define CGTS_CU5_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU5_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU5_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU5_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU5_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
++//CGTS_CU6_TCPI_CTRL_REG
++#define CGTS_CU6_TCPI_CTRL_REG__TCPI__SHIFT 0x0
++#define CGTS_CU6_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
++#define CGTS_CU6_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU6_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU6_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU6_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
++#define CGTS_CU6_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
++#define CGTS_CU6_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU6_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU6_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU6_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU6_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
++//CGTS_CU7_TCPI_CTRL_REG
++#define CGTS_CU7_TCPI_CTRL_REG__TCPI__SHIFT 0x0
++#define CGTS_CU7_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
++#define CGTS_CU7_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU7_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU7_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU7_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
++#define CGTS_CU7_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
++#define CGTS_CU7_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU7_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU7_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU7_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU7_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
++//CGTS_CU8_TCPI_CTRL_REG
++#define CGTS_CU8_TCPI_CTRL_REG__TCPI__SHIFT 0x0
++#define CGTS_CU8_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
++#define CGTS_CU8_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU8_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU8_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU8_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
++#define CGTS_CU8_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
++#define CGTS_CU8_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU8_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU8_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU8_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU8_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
++//CGTS_CU9_TCPI_CTRL_REG
++#define CGTS_CU9_TCPI_CTRL_REG__TCPI__SHIFT 0x0
++#define CGTS_CU9_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
++#define CGTS_CU9_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU9_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU9_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU9_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
++#define CGTS_CU9_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
++#define CGTS_CU9_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU9_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU9_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU9_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU9_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
++//CGTS_CU10_TCPI_CTRL_REG
++#define CGTS_CU10_TCPI_CTRL_REG__TCPI__SHIFT 0x0
++#define CGTS_CU10_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
++#define CGTS_CU10_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU10_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU10_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU10_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
++#define CGTS_CU10_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
++#define CGTS_CU10_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU10_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU10_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU10_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU10_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
++//CGTS_CU11_TCPI_CTRL_REG
++#define CGTS_CU11_TCPI_CTRL_REG__TCPI__SHIFT 0x0
++#define CGTS_CU11_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
++#define CGTS_CU11_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU11_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU11_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU11_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
++#define CGTS_CU11_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
++#define CGTS_CU11_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU11_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU11_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU11_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU11_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
++//CGTS_CU12_TCPI_CTRL_REG
++#define CGTS_CU12_TCPI_CTRL_REG__TCPI__SHIFT 0x0
++#define CGTS_CU12_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
++#define CGTS_CU12_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU12_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU12_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU12_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
++#define CGTS_CU12_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
++#define CGTS_CU12_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU12_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU12_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU12_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU12_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
++//CGTS_CU13_TCPI_CTRL_REG
++#define CGTS_CU13_TCPI_CTRL_REG__TCPI__SHIFT 0x0
++#define CGTS_CU13_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
++#define CGTS_CU13_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU13_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU13_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU13_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
++#define CGTS_CU13_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
++#define CGTS_CU13_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU13_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU13_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU13_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU13_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
++//CGTS_CU14_TCPI_CTRL_REG
++#define CGTS_CU14_TCPI_CTRL_REG__TCPI__SHIFT 0x0
++#define CGTS_CU14_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
++#define CGTS_CU14_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU14_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU14_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU14_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
++#define CGTS_CU14_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
++#define CGTS_CU14_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU14_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU14_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU14_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU14_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
++//CGTS_CU15_TCPI_CTRL_REG
++#define CGTS_CU15_TCPI_CTRL_REG__TCPI__SHIFT 0x0
++#define CGTS_CU15_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
++#define CGTS_CU15_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
++#define CGTS_CU15_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
++#define CGTS_CU15_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
++#define CGTS_CU15_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
++#define CGTS_CU15_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
++#define CGTS_CU15_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
++#define CGTS_CU15_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
++#define CGTS_CU15_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
++#define CGTS_CU15_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
++#define CGTS_CU15_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
++//CGTT_SPI_CLK_CTRL
++#define CGTT_SPI_CLK_CTRL__ON_DELAY__SHIFT 0x0
++#define CGTT_SPI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define CGTT_SPI_CLK_CTRL__GRP5_CG_OFF_HYST__SHIFT 0x12
++#define CGTT_SPI_CLK_CTRL__GRP5_CG_OVERRIDE__SHIFT 0x18
++#define CGTT_SPI_CLK_CTRL__ALL_CLK_ON_OVERRIDE__SHIFT 0x1a
++#define CGTT_SPI_CLK_CTRL__GRP3_OVERRIDE__SHIFT 0x1b
++#define CGTT_SPI_CLK_CTRL__GRP2_OVERRIDE__SHIFT 0x1c
++#define CGTT_SPI_CLK_CTRL__GRP1_OVERRIDE__SHIFT 0x1d
++#define CGTT_SPI_CLK_CTRL__GRP0_OVERRIDE__SHIFT 0x1e
++#define CGTT_SPI_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
++#define CGTT_SPI_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define CGTT_SPI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define CGTT_SPI_CLK_CTRL__GRP5_CG_OFF_HYST_MASK 0x00FC0000L
++#define CGTT_SPI_CLK_CTRL__GRP5_CG_OVERRIDE_MASK 0x01000000L
++#define CGTT_SPI_CLK_CTRL__ALL_CLK_ON_OVERRIDE_MASK 0x04000000L
++#define CGTT_SPI_CLK_CTRL__GRP3_OVERRIDE_MASK 0x08000000L
++#define CGTT_SPI_CLK_CTRL__GRP2_OVERRIDE_MASK 0x10000000L
++#define CGTT_SPI_CLK_CTRL__GRP1_OVERRIDE_MASK 0x20000000L
++#define CGTT_SPI_CLK_CTRL__GRP0_OVERRIDE_MASK 0x40000000L
++#define CGTT_SPI_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
++//CGTT_PC_CLK_CTRL
++#define CGTT_PC_CLK_CTRL__ON_DELAY__SHIFT 0x0
++#define CGTT_PC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define CGTT_PC_CLK_CTRL__GRP5_CG_OFF_HYST__SHIFT 0x12
++#define CGTT_PC_CLK_CTRL__GRP5_CG_OVERRIDE__SHIFT 0x18
++#define CGTT_PC_CLK_CTRL__PC_WRITE_CLK_EN_OVERRIDE__SHIFT 0x19
++#define CGTT_PC_CLK_CTRL__PC_READ_CLK_EN_OVERRIDE__SHIFT 0x1a
++#define CGTT_PC_CLK_CTRL__CORE3_OVERRIDE__SHIFT 0x1b
++#define CGTT_PC_CLK_CTRL__CORE2_OVERRIDE__SHIFT 0x1c
++#define CGTT_PC_CLK_CTRL__CORE1_OVERRIDE__SHIFT 0x1d
++#define CGTT_PC_CLK_CTRL__CORE0_OVERRIDE__SHIFT 0x1e
++#define CGTT_PC_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
++#define CGTT_PC_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define CGTT_PC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define CGTT_PC_CLK_CTRL__GRP5_CG_OFF_HYST_MASK 0x00FC0000L
++#define CGTT_PC_CLK_CTRL__GRP5_CG_OVERRIDE_MASK 0x01000000L
++#define CGTT_PC_CLK_CTRL__PC_WRITE_CLK_EN_OVERRIDE_MASK 0x02000000L
++#define CGTT_PC_CLK_CTRL__PC_READ_CLK_EN_OVERRIDE_MASK 0x04000000L
++#define CGTT_PC_CLK_CTRL__CORE3_OVERRIDE_MASK 0x08000000L
++#define CGTT_PC_CLK_CTRL__CORE2_OVERRIDE_MASK 0x10000000L
++#define CGTT_PC_CLK_CTRL__CORE1_OVERRIDE_MASK 0x20000000L
++#define CGTT_PC_CLK_CTRL__CORE0_OVERRIDE_MASK 0x40000000L
++#define CGTT_PC_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
++//CGTT_BCI_CLK_CTRL
++#define CGTT_BCI_CLK_CTRL__ON_DELAY__SHIFT 0x0
++#define CGTT_BCI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define CGTT_BCI_CLK_CTRL__RESERVED__SHIFT 0xc
++#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
++#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define CGTT_BCI_CLK_CTRL__CORE6_OVERRIDE__SHIFT 0x18
++#define CGTT_BCI_CLK_CTRL__CORE5_OVERRIDE__SHIFT 0x19
++#define CGTT_BCI_CLK_CTRL__CORE4_OVERRIDE__SHIFT 0x1a
++#define CGTT_BCI_CLK_CTRL__CORE3_OVERRIDE__SHIFT 0x1b
++#define CGTT_BCI_CLK_CTRL__CORE2_OVERRIDE__SHIFT 0x1c
++#define CGTT_BCI_CLK_CTRL__CORE1_OVERRIDE__SHIFT 0x1d
++#define CGTT_BCI_CLK_CTRL__CORE0_OVERRIDE__SHIFT 0x1e
++#define CGTT_BCI_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
++#define CGTT_BCI_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define CGTT_BCI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define CGTT_BCI_CLK_CTRL__RESERVED_MASK 0x0000F000L
++#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
++#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define CGTT_BCI_CLK_CTRL__CORE6_OVERRIDE_MASK 0x01000000L
++#define CGTT_BCI_CLK_CTRL__CORE5_OVERRIDE_MASK 0x02000000L
++#define CGTT_BCI_CLK_CTRL__CORE4_OVERRIDE_MASK 0x04000000L
++#define CGTT_BCI_CLK_CTRL__CORE3_OVERRIDE_MASK 0x08000000L
++#define CGTT_BCI_CLK_CTRL__CORE2_OVERRIDE_MASK 0x10000000L
++#define CGTT_BCI_CLK_CTRL__CORE1_OVERRIDE_MASK 0x20000000L
++#define CGTT_BCI_CLK_CTRL__CORE0_OVERRIDE_MASK 0x40000000L
++#define CGTT_BCI_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
++//CGTT_VGT_CLK_CTRL
++#define CGTT_VGT_CLK_CTRL__ON_DELAY__SHIFT 0x0
++#define CGTT_VGT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define CGTT_VGT_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
++#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE9__SHIFT 0x18
++#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE8__SHIFT 0x19
++#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x1a
++#define CGTT_VGT_CLK_CTRL__PRIMGEN_OVERRIDE__SHIFT 0x1b
++#define CGTT_VGT_CLK_CTRL__TESS_OVERRIDE__SHIFT 0x1c
++#define CGTT_VGT_CLK_CTRL__GS_OVERRIDE__SHIFT 0x1d
++#define CGTT_VGT_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
++#define CGTT_VGT_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
++#define CGTT_VGT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define CGTT_VGT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define CGTT_VGT_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
++#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE9_MASK 0x01000000L
++#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE8_MASK 0x02000000L
++#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x04000000L
++#define CGTT_VGT_CLK_CTRL__PRIMGEN_OVERRIDE_MASK 0x08000000L
++#define CGTT_VGT_CLK_CTRL__TESS_OVERRIDE_MASK 0x10000000L
++#define CGTT_VGT_CLK_CTRL__GS_OVERRIDE_MASK 0x20000000L
++#define CGTT_VGT_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
++#define CGTT_VGT_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
++//CGTT_IA_CLK_CTRL
++#define CGTT_IA_CLK_CTRL__ON_DELAY__SHIFT 0x0
++#define CGTT_IA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
++#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
++#define CGTT_IA_CLK_CTRL__PERF_ENABLE__SHIFT 0x19
++#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
++#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
++#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
++#define CGTT_IA_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
++#define CGTT_IA_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
++#define CGTT_IA_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define CGTT_IA_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
++#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
++#define CGTT_IA_CLK_CTRL__PERF_ENABLE_MASK 0x02000000L
++#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
++#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
++#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
++#define CGTT_IA_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
++#define CGTT_IA_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
++//CGTT_WD_CLK_CTRL
++#define CGTT_WD_CLK_CTRL__ON_DELAY__SHIFT 0x0
++#define CGTT_WD_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define CGTT_WD_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
++#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define CGTT_WD_CLK_CTRL__SOFT_OVERRIDE8__SHIFT 0x19
++#define CGTT_WD_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x1a
++#define CGTT_WD_CLK_CTRL__PRIMGEN_OVERRIDE__SHIFT 0x1b
++#define CGTT_WD_CLK_CTRL__TESS_OVERRIDE__SHIFT 0x1c
++#define CGTT_WD_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1d
++#define CGTT_WD_CLK_CTRL__RBIU_INPUT_OVERRIDE__SHIFT 0x1e
++#define CGTT_WD_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
++#define CGTT_WD_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define CGTT_WD_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define CGTT_WD_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
++#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define CGTT_WD_CLK_CTRL__SOFT_OVERRIDE8_MASK 0x02000000L
++#define CGTT_WD_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x04000000L
++#define CGTT_WD_CLK_CTRL__PRIMGEN_OVERRIDE_MASK 0x08000000L
++#define CGTT_WD_CLK_CTRL__TESS_OVERRIDE_MASK 0x10000000L
++#define CGTT_WD_CLK_CTRL__CORE_OVERRIDE_MASK 0x20000000L
++#define CGTT_WD_CLK_CTRL__RBIU_INPUT_OVERRIDE_MASK 0x40000000L
++#define CGTT_WD_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
++//CGTT_PA_CLK_CTRL
++#define CGTT_PA_CLK_CTRL__ON_DELAY__SHIFT 0x0
++#define CGTT_PA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
++#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
++#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
++#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
++#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
++#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
++#define CGTT_PA_CLK_CTRL__SU_CLK_OVERRIDE__SHIFT 0x1d
++#define CGTT_PA_CLK_CTRL__CL_CLK_OVERRIDE__SHIFT 0x1e
++#define CGTT_PA_CLK_CTRL__REG_CLK_OVERRIDE__SHIFT 0x1f
++#define CGTT_PA_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define CGTT_PA_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
++#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
++#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
++#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
++#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
++#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
++#define CGTT_PA_CLK_CTRL__SU_CLK_OVERRIDE_MASK 0x20000000L
++#define CGTT_PA_CLK_CTRL__CL_CLK_OVERRIDE_MASK 0x40000000L
++#define CGTT_PA_CLK_CTRL__REG_CLK_OVERRIDE_MASK 0x80000000L
++//CGTT_SC_CLK_CTRL0
++#define CGTT_SC_CLK_CTRL0__ON_DELAY__SHIFT 0x0
++#define CGTT_SC_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
++#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_STALL_OVERRIDE__SHIFT 0x10
++#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE5__SHIFT 0x11
++#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE4__SHIFT 0x12
++#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE3__SHIFT 0x13
++#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE2__SHIFT 0x14
++#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE1__SHIFT 0x15
++#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE0__SHIFT 0x16
++#define CGTT_SC_CLK_CTRL0__REG_CLK_STALL_OVERRIDE__SHIFT 0x17
++#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_OVERRIDE__SHIFT 0x18
++#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x19
++#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x1a
++#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x1b
++#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x1c
++#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1d
++#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1e
++#define CGTT_SC_CLK_CTRL0__REG_CLK_OVERRIDE__SHIFT 0x1f
++#define CGTT_SC_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
++#define CGTT_SC_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_STALL_OVERRIDE_MASK 0x00010000L
++#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
++#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
++#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
++#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
++#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
++#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
++#define CGTT_SC_CLK_CTRL0__REG_CLK_STALL_OVERRIDE_MASK 0x00800000L
++#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_OVERRIDE_MASK 0x01000000L
++#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x02000000L
++#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x04000000L
++#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x08000000L
++#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x10000000L
++#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x20000000L
++#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x40000000L
++#define CGTT_SC_CLK_CTRL0__REG_CLK_OVERRIDE_MASK 0x80000000L
++//CGTT_SC_CLK_CTRL1
++#define CGTT_SC_CLK_CTRL1__ON_DELAY__SHIFT 0x0
++#define CGTT_SC_CLK_CTRL1__OFF_HYSTERESIS__SHIFT 0x4
++#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE__SHIFT 0x11
++#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_STALL_OVERRIDE__SHIFT 0x12
++#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_STALL_OVERRIDE__SHIFT 0x13
++#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_STALL_OVERRIDE__SHIFT 0x14
++#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_STALL_OVERRIDE__SHIFT 0x15
++#define CGTT_SC_CLK_CTRL1__PBB_CLK_STALL_OVERRIDE__SHIFT 0x16
++#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE__SHIFT 0x19
++#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_OVERRIDE__SHIFT 0x1a
++#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_OVERRIDE__SHIFT 0x1b
++#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_OVERRIDE__SHIFT 0x1c
++#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_OVERRIDE__SHIFT 0x1d
++#define CGTT_SC_CLK_CTRL1__PBB_CLK_OVERRIDE__SHIFT 0x1e
++#define CGTT_SC_CLK_CTRL1__ON_DELAY_MASK 0x0000000FL
++#define CGTT_SC_CLK_CTRL1__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE_MASK 0x00020000L
++#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_STALL_OVERRIDE_MASK 0x00040000L
++#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_STALL_OVERRIDE_MASK 0x00080000L
++#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_STALL_OVERRIDE_MASK 0x00100000L
++#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_STALL_OVERRIDE_MASK 0x00200000L
++#define CGTT_SC_CLK_CTRL1__PBB_CLK_STALL_OVERRIDE_MASK 0x00400000L
++#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE_MASK 0x02000000L
++#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_OVERRIDE_MASK 0x04000000L
++#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_OVERRIDE_MASK 0x08000000L
++#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_OVERRIDE_MASK 0x10000000L
++#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_OVERRIDE_MASK 0x20000000L
++#define CGTT_SC_CLK_CTRL1__PBB_CLK_OVERRIDE_MASK 0x40000000L
++//CGTT_SQ_CLK_CTRL
++#define CGTT_SQ_CLK_CTRL__ON_DELAY__SHIFT 0x0
++#define CGTT_SQ_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
++#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define CGTT_SQ_CLK_CTRL__PERFMON_OVERRIDE__SHIFT 0x1d
++#define CGTT_SQ_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
++#define CGTT_SQ_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
++#define CGTT_SQ_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define CGTT_SQ_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
++#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define CGTT_SQ_CLK_CTRL__PERFMON_OVERRIDE_MASK 0x20000000L
++#define CGTT_SQ_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
++#define CGTT_SQ_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
++//CGTT_SQG_CLK_CTRL
++#define CGTT_SQG_CLK_CTRL__ON_DELAY__SHIFT 0x0
++#define CGTT_SQG_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
++#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define CGTT_SQG_CLK_CTRL__TTRACE_OVERRIDE__SHIFT 0x1c
++#define CGTT_SQG_CLK_CTRL__PERFMON_OVERRIDE__SHIFT 0x1d
++#define CGTT_SQG_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
++#define CGTT_SQG_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
++#define CGTT_SQG_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define CGTT_SQG_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
++#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define CGTT_SQG_CLK_CTRL__TTRACE_OVERRIDE_MASK 0x10000000L
++#define CGTT_SQG_CLK_CTRL__PERFMON_OVERRIDE_MASK 0x20000000L
++#define CGTT_SQG_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
++#define CGTT_SQG_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
++//SQ_ALU_CLK_CTRL
++#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH0__SHIFT 0x0
++#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH1__SHIFT 0x10
++#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH0_MASK 0x0000FFFFL
++#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH1_MASK 0xFFFF0000L
++//SQ_TEX_CLK_CTRL
++#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH0__SHIFT 0x0
++#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH1__SHIFT 0x10
++#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH0_MASK 0x0000FFFFL
++#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH1_MASK 0xFFFF0000L
++//SQ_LDS_CLK_CTRL
++#define SQ_LDS_CLK_CTRL__FORCE_CU_ON_SH0__SHIFT 0x0
++#define SQ_LDS_CLK_CTRL__FORCE_CU_ON_SH1__SHIFT 0x10
++#define SQ_LDS_CLK_CTRL__FORCE_CU_ON_SH0_MASK 0x0000FFFFL
++#define SQ_LDS_CLK_CTRL__FORCE_CU_ON_SH1_MASK 0xFFFF0000L
++//SQ_POWER_THROTTLE
++#define SQ_POWER_THROTTLE__MIN_POWER__SHIFT 0x0
++#define SQ_POWER_THROTTLE__MAX_POWER__SHIFT 0x10
++#define SQ_POWER_THROTTLE__PHASE_OFFSET__SHIFT 0x1e
++#define SQ_POWER_THROTTLE__MIN_POWER_MASK 0x00003FFFL
++#define SQ_POWER_THROTTLE__MAX_POWER_MASK 0x3FFF0000L
++#define SQ_POWER_THROTTLE__PHASE_OFFSET_MASK 0xC0000000L
++//SQ_POWER_THROTTLE2
++#define SQ_POWER_THROTTLE2__MAX_POWER_DELTA__SHIFT 0x0
++#define SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
++#define SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
++#define SQ_POWER_THROTTLE2__USE_REF_CLOCK__SHIFT 0x1f
++#define SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK 0x00003FFFL
++#define SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK 0x03FF0000L
++#define SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000L
++#define SQ_POWER_THROTTLE2__USE_REF_CLOCK_MASK 0x80000000L
++//CGTT_SX_CLK_CTRL0
++#define CGTT_SX_CLK_CTRL0__ON_DELAY__SHIFT 0x0
++#define CGTT_SX_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
++#define CGTT_SX_CLK_CTRL0__RESERVED__SHIFT 0xc
++#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE7__SHIFT 0x10
++#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE7__SHIFT 0x18
++#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE6__SHIFT 0x19
++#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x1a
++#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x1b
++#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x1c
++#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x1d
++#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1e
++#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1f
++#define CGTT_SX_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
++#define CGTT_SX_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define CGTT_SX_CLK_CTRL0__RESERVED_MASK 0x0000F000L
++#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
++#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE7_MASK 0x01000000L
++#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE6_MASK 0x02000000L
++#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x04000000L
++#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x08000000L
++#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x10000000L
++#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x20000000L
++#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x40000000L
++#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x80000000L
++//CGTT_SX_CLK_CTRL1
++#define CGTT_SX_CLK_CTRL1__ON_DELAY__SHIFT 0x0
++#define CGTT_SX_CLK_CTRL1__OFF_HYSTERESIS__SHIFT 0x4
++#define CGTT_SX_CLK_CTRL1__RESERVED__SHIFT 0xc
++#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE7__SHIFT 0x10
++#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE6__SHIFT 0x19
++#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE5__SHIFT 0x1a
++#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE4__SHIFT 0x1b
++#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE3__SHIFT 0x1c
++#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE2__SHIFT 0x1d
++#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE1__SHIFT 0x1e
++#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE0__SHIFT 0x1f
++#define CGTT_SX_CLK_CTRL1__ON_DELAY_MASK 0x0000000FL
++#define CGTT_SX_CLK_CTRL1__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define CGTT_SX_CLK_CTRL1__RESERVED_MASK 0x0000F000L
++#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
++#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE6_MASK 0x02000000L
++#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE5_MASK 0x04000000L
++#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE4_MASK 0x08000000L
++#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE3_MASK 0x10000000L
++#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE2_MASK 0x20000000L
++#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE1_MASK 0x40000000L
++#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE0_MASK 0x80000000L
++//CGTT_SX_CLK_CTRL2
++#define CGTT_SX_CLK_CTRL2__ON_DELAY__SHIFT 0x0
++#define CGTT_SX_CLK_CTRL2__OFF_HYSTERESIS__SHIFT 0x4
++#define CGTT_SX_CLK_CTRL2__RESERVED__SHIFT 0xd
++#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE7__SHIFT 0x10
++#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE6__SHIFT 0x19
++#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE5__SHIFT 0x1a
++#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE4__SHIFT 0x1b
++#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE3__SHIFT 0x1c
++#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE2__SHIFT 0x1d
++#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE1__SHIFT 0x1e
++#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE0__SHIFT 0x1f
++#define CGTT_SX_CLK_CTRL2__ON_DELAY_MASK 0x0000000FL
++#define CGTT_SX_CLK_CTRL2__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define CGTT_SX_CLK_CTRL2__RESERVED_MASK 0x0000E000L
++#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
++#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE6_MASK 0x02000000L
++#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE5_MASK 0x04000000L
++#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE4_MASK 0x08000000L
++#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE3_MASK 0x10000000L
++#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE2_MASK 0x20000000L
++#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE1_MASK 0x40000000L
++#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE0_MASK 0x80000000L
++//CGTT_SX_CLK_CTRL3
++#define CGTT_SX_CLK_CTRL3__ON_DELAY__SHIFT 0x0
++#define CGTT_SX_CLK_CTRL3__OFF_HYSTERESIS__SHIFT 0x4
++#define CGTT_SX_CLK_CTRL3__RESERVED__SHIFT 0xd
++#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE7__SHIFT 0x10
++#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE6__SHIFT 0x19
++#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE5__SHIFT 0x1a
++#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE4__SHIFT 0x1b
++#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE3__SHIFT 0x1c
++#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE2__SHIFT 0x1d
++#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE1__SHIFT 0x1e
++#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE0__SHIFT 0x1f
++#define CGTT_SX_CLK_CTRL3__ON_DELAY_MASK 0x0000000FL
++#define CGTT_SX_CLK_CTRL3__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define CGTT_SX_CLK_CTRL3__RESERVED_MASK 0x0000E000L
++#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
++#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE6_MASK 0x02000000L
++#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE5_MASK 0x04000000L
++#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE4_MASK 0x08000000L
++#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE3_MASK 0x10000000L
++#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE2_MASK 0x20000000L
++#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE1_MASK 0x40000000L
++#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE0_MASK 0x80000000L
++//CGTT_SX_CLK_CTRL4
++#define CGTT_SX_CLK_CTRL4__ON_DELAY__SHIFT 0x0
++#define CGTT_SX_CLK_CTRL4__OFF_HYSTERESIS__SHIFT 0x4
++#define CGTT_SX_CLK_CTRL4__RESERVED__SHIFT 0xc
++#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE7__SHIFT 0x10
++#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE6__SHIFT 0x19
++#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE5__SHIFT 0x1a
++#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE4__SHIFT 0x1b
++#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE3__SHIFT 0x1c
++#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE2__SHIFT 0x1d
++#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE1__SHIFT 0x1e
++#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE0__SHIFT 0x1f
++#define CGTT_SX_CLK_CTRL4__ON_DELAY_MASK 0x0000000FL
++#define CGTT_SX_CLK_CTRL4__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define CGTT_SX_CLK_CTRL4__RESERVED_MASK 0x0000F000L
++#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
++#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE6_MASK 0x02000000L
++#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE5_MASK 0x04000000L
++#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE4_MASK 0x08000000L
++#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE3_MASK 0x10000000L
++#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE2_MASK 0x20000000L
++#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE1_MASK 0x40000000L
++#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE0_MASK 0x80000000L
++//TD_CGTT_CTRL
++#define TD_CGTT_CTRL__ON_DELAY__SHIFT 0x0
++#define TD_CGTT_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
++#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define TD_CGTT_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
++#define TD_CGTT_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
++#define TD_CGTT_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
++#define TD_CGTT_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
++#define TD_CGTT_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
++#define TD_CGTT_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
++#define TD_CGTT_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
++#define TD_CGTT_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
++#define TD_CGTT_CTRL__ON_DELAY_MASK 0x0000000FL
++#define TD_CGTT_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
++#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define TD_CGTT_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
++#define TD_CGTT_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
++#define TD_CGTT_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
++#define TD_CGTT_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
++#define TD_CGTT_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
++#define TD_CGTT_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
++#define TD_CGTT_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
++#define TD_CGTT_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
++//TA_CGTT_CTRL
++#define TA_CGTT_CTRL__ON_DELAY__SHIFT 0x0
++#define TA_CGTT_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
++#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define TA_CGTT_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
++#define TA_CGTT_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
++#define TA_CGTT_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
++#define TA_CGTT_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
++#define TA_CGTT_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
++#define TA_CGTT_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
++#define TA_CGTT_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
++#define TA_CGTT_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
++#define TA_CGTT_CTRL__ON_DELAY_MASK 0x0000000FL
++#define TA_CGTT_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
++#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define TA_CGTT_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
++#define TA_CGTT_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
++#define TA_CGTT_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
++#define TA_CGTT_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
++#define TA_CGTT_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
++#define TA_CGTT_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
++#define TA_CGTT_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
++#define TA_CGTT_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
++//CGTT_TCPI_CLK_CTRL
++#define CGTT_TCPI_CLK_CTRL__ON_DELAY__SHIFT 0x0
++#define CGTT_TCPI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define CGTT_TCPI_CLK_CTRL__SPARE__SHIFT 0xc
++#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
++#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
++#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
++#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
++#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
++#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
++#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
++#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
++#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
++#define CGTT_TCPI_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define CGTT_TCPI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define CGTT_TCPI_CLK_CTRL__SPARE_MASK 0x0000F000L
++#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
++#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
++#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
++#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
++#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
++#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
++#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
++#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
++#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
++//CGTT_TCI_CLK_CTRL
++#define CGTT_TCI_CLK_CTRL__ON_DELAY__SHIFT 0x0
++#define CGTT_TCI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
++#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
++#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
++#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
++#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
++#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
++#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
++#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
++#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
++#define CGTT_TCI_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define CGTT_TCI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
++#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
++#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
++#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
++#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
++#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
++#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
++#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
++#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
++//CGTT_GDS_CLK_CTRL
++#define CGTT_GDS_CLK_CTRL__ON_DELAY__SHIFT 0x0
++#define CGTT_GDS_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
++#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
++#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
++#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
++#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
++#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
++#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
++#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
++#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
++#define CGTT_GDS_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define CGTT_GDS_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
++#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
++#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
++#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
++#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
++#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
++#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
++#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
++#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
++//DB_CGTT_CLK_CTRL_0
++#define DB_CGTT_CLK_CTRL_0__ON_DELAY__SHIFT 0x0
++#define DB_CGTT_CLK_CTRL_0__OFF_HYSTERESIS__SHIFT 0x4
++#define DB_CGTT_CLK_CTRL_0__RESERVED__SHIFT 0xc
++#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE7__SHIFT 0x10
++#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE7__SHIFT 0x18
++#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE6__SHIFT 0x19
++#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE5__SHIFT 0x1a
++#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE4__SHIFT 0x1b
++#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE3__SHIFT 0x1c
++#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE2__SHIFT 0x1d
++#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE1__SHIFT 0x1e
++#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE0__SHIFT 0x1f
++#define DB_CGTT_CLK_CTRL_0__ON_DELAY_MASK 0x0000000FL
++#define DB_CGTT_CLK_CTRL_0__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define DB_CGTT_CLK_CTRL_0__RESERVED_MASK 0x0000F000L
++#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
++#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE7_MASK 0x01000000L
++#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE6_MASK 0x02000000L
++#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE5_MASK 0x04000000L
++#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE4_MASK 0x08000000L
++#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE3_MASK 0x10000000L
++#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE2_MASK 0x20000000L
++#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE1_MASK 0x40000000L
++#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE0_MASK 0x80000000L
++//CB_CGTT_SCLK_CTRL
++#define CB_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
++#define CB_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
++#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
++#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
++#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
++#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
++#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
++#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
++#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
++#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
++#define CB_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define CB_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
++#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
++#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
++#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
++#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
++#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
++#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
++#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
++#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
++//TCC_CGTT_SCLK_CTRL
++#define TCC_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
++#define TCC_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
++#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
++#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
++#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
++#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
++#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
++#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
++#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
++#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
++#define TCC_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define TCC_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
++#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
++#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
++#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
++#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
++#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
++#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
++#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
++#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
++//TCA_CGTT_SCLK_CTRL
++#define TCA_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
++#define TCA_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
++#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
++#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
++#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
++#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
++#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
++#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
++#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
++#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
++#define TCA_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define TCA_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
++#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
++#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
++#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
++#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
++#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
++#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
++#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
++#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
++//CGTT_CP_CLK_CTRL
++#define CGTT_CP_CLK_CTRL__ON_DELAY__SHIFT 0x0
++#define CGTT_CP_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define CGTT_CP_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
++#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
++#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1d
++#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
++#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
++#define CGTT_CP_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define CGTT_CP_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define CGTT_CP_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
++#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
++#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x20000000L
++#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
++#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
++//CGTT_CPF_CLK_CTRL
++#define CGTT_CPF_CLK_CTRL__ON_DELAY__SHIFT 0x0
++#define CGTT_CPF_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define CGTT_CPF_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
++#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
++#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1d
++#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
++#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
++#define CGTT_CPF_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define CGTT_CPF_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define CGTT_CPF_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
++#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
++#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x20000000L
++#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
++#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
++//CGTT_CPC_CLK_CTRL
++#define CGTT_CPC_CLK_CTRL__ON_DELAY__SHIFT 0x0
++#define CGTT_CPC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define CGTT_CPC_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
++#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
++#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1d
++#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
++#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
++#define CGTT_CPC_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define CGTT_CPC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define CGTT_CPC_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
++#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
++#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x20000000L
++#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
++#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
++//RLC_PWR_CTRL
++#define RLC_PWR_CTRL__MON_CGPG_RTN_EN__SHIFT 0x0
++#define RLC_PWR_CTRL__RESERVED__SHIFT 0x1
++#define RLC_PWR_CTRL__DLDO_STATUS__SHIFT 0x8
++#define RLC_PWR_CTRL__MON_CGPG_RTN_EN_MASK 0x00000001L
++#define RLC_PWR_CTRL__RESERVED_MASK 0x000000FEL
++#define RLC_PWR_CTRL__DLDO_STATUS_MASK 0x00000100L
++//CGTT_RLC_CLK_CTRL
++#define CGTT_RLC_CLK_CTRL__ON_DELAY__SHIFT 0x0
++#define CGTT_RLC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
++#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
++#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
++#define CGTT_RLC_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define CGTT_RLC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
++#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
++#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
++//RLC_GFX_RM_CNTL
++#define RLC_GFX_RM_CNTL__RLC_GFX_RM_VALID__SHIFT 0x0
++#define RLC_GFX_RM_CNTL__RESERVED__SHIFT 0x1
++#define RLC_GFX_RM_CNTL__RLC_GFX_RM_VALID_MASK 0x00000001L
++#define RLC_GFX_RM_CNTL__RESERVED_MASK 0xFFFFFFFEL
++//RMI_CGTT_SCLK_CTRL
++#define RMI_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
++#define RMI_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
++#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
++#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
++#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
++#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
++#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
++#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
++#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
++#define RMI_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define RMI_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
++#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
++#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
++#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
++#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
++#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
++#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
++#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
++//CGTT_TCPF_CLK_CTRL
++#define CGTT_TCPF_CLK_CTRL__ON_DELAY__SHIFT 0x0
++#define CGTT_TCPF_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define CGTT_TCPF_CLK_CTRL__SPARE__SHIFT 0xc
++#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
++#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
++#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
++#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
++#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
++#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
++#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
++#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
++#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
++#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
++#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
++#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
++#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
++#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
++#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
++#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
++#define CGTT_TCPF_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define CGTT_TCPF_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define CGTT_TCPF_CLK_CTRL__SPARE_MASK 0x0000F000L
++#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
++#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
++#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
++#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
++#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
++#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
++#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
++#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
++#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
++#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
++#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
++#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
++#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
++#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
++#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
++#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
++
++
++// addressBlock: gc_ea_pwrdec
++//GCEA_CGTT_CLK_CTRL
++#define GCEA_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
++#define GCEA_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define GCEA_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
++#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e
++#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f
++#define GCEA_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define GCEA_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define GCEA_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
++#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L
++#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L
++
++
++// addressBlock: gc_utcl2_vmsharedhvdec
++//MC_VM_FB_SIZE_OFFSET_VF0
++#define MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE__SHIFT 0x0
++#define MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET__SHIFT 0x10
++#define MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE_MASK 0x0000FFFFL
++#define MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET_MASK 0xFFFF0000L
++//MC_VM_FB_SIZE_OFFSET_VF1
++#define MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE__SHIFT 0x0
++#define MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET__SHIFT 0x10
++#define MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE_MASK 0x0000FFFFL
++#define MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET_MASK 0xFFFF0000L
++//MC_VM_FB_SIZE_OFFSET_VF2
++#define MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE__SHIFT 0x0
++#define MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET__SHIFT 0x10
++#define MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE_MASK 0x0000FFFFL
++#define MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET_MASK 0xFFFF0000L
++//MC_VM_FB_SIZE_OFFSET_VF3
++#define MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE__SHIFT 0x0
++#define MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET__SHIFT 0x10
++#define MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE_MASK 0x0000FFFFL
++#define MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET_MASK 0xFFFF0000L
++//MC_VM_FB_SIZE_OFFSET_VF4
++#define MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE__SHIFT 0x0
++#define MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET__SHIFT 0x10
++#define MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE_MASK 0x0000FFFFL
++#define MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET_MASK 0xFFFF0000L
++//MC_VM_FB_SIZE_OFFSET_VF5
++#define MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE__SHIFT 0x0
++#define MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET__SHIFT 0x10
++#define MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE_MASK 0x0000FFFFL
++#define MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET_MASK 0xFFFF0000L
++//MC_VM_FB_SIZE_OFFSET_VF6
++#define MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE__SHIFT 0x0
++#define MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET__SHIFT 0x10
++#define MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE_MASK 0x0000FFFFL
++#define MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET_MASK 0xFFFF0000L
++//MC_VM_FB_SIZE_OFFSET_VF7
++#define MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE__SHIFT 0x0
++#define MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET__SHIFT 0x10
++#define MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE_MASK 0x0000FFFFL
++#define MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET_MASK 0xFFFF0000L
++//MC_VM_FB_SIZE_OFFSET_VF8
++#define MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE__SHIFT 0x0
++#define MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET__SHIFT 0x10
++#define MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE_MASK 0x0000FFFFL
++#define MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET_MASK 0xFFFF0000L
++//MC_VM_FB_SIZE_OFFSET_VF9
++#define MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE__SHIFT 0x0
++#define MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET__SHIFT 0x10
++#define MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE_MASK 0x0000FFFFL
++#define MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET_MASK 0xFFFF0000L
++//MC_VM_FB_SIZE_OFFSET_VF10
++#define MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE__SHIFT 0x0
++#define MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET__SHIFT 0x10
++#define MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE_MASK 0x0000FFFFL
++#define MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET_MASK 0xFFFF0000L
++//MC_VM_FB_SIZE_OFFSET_VF11
++#define MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE__SHIFT 0x0
++#define MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET__SHIFT 0x10
++#define MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE_MASK 0x0000FFFFL
++#define MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET_MASK 0xFFFF0000L
++//MC_VM_FB_SIZE_OFFSET_VF12
++#define MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE__SHIFT 0x0
++#define MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET__SHIFT 0x10
++#define MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE_MASK 0x0000FFFFL
++#define MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET_MASK 0xFFFF0000L
++//MC_VM_FB_SIZE_OFFSET_VF13
++#define MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE__SHIFT 0x0
++#define MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET__SHIFT 0x10
++#define MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE_MASK 0x0000FFFFL
++#define MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET_MASK 0xFFFF0000L
++//MC_VM_FB_SIZE_OFFSET_VF14
++#define MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE__SHIFT 0x0
++#define MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET__SHIFT 0x10
++#define MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE_MASK 0x0000FFFFL
++#define MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET_MASK 0xFFFF0000L
++//MC_VM_FB_SIZE_OFFSET_VF15
++#define MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE__SHIFT 0x0
++#define MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET__SHIFT 0x10
++#define MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE_MASK 0x0000FFFFL
++#define MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET_MASK 0xFFFF0000L
++//VM_IOMMU_MMIO_CNTRL_1
++#define VM_IOMMU_MMIO_CNTRL_1__MARC_EN__SHIFT 0x8
++#define VM_IOMMU_MMIO_CNTRL_1__MARC_EN_MASK 0x00000100L
++//MC_VM_MARC_BASE_LO_0
++#define MC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0__SHIFT 0xc
++#define MC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0_MASK 0xFFFFF000L
++//MC_VM_MARC_BASE_LO_1
++#define MC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1__SHIFT 0xc
++#define MC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1_MASK 0xFFFFF000L
++//MC_VM_MARC_BASE_LO_2
++#define MC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2__SHIFT 0xc
++#define MC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2_MASK 0xFFFFF000L
++//MC_VM_MARC_BASE_LO_3
++#define MC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3__SHIFT 0xc
++#define MC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3_MASK 0xFFFFF000L
++//MC_VM_MARC_BASE_HI_0
++#define MC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0__SHIFT 0x0
++#define MC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0_MASK 0x000FFFFFL
++//MC_VM_MARC_BASE_HI_1
++#define MC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1__SHIFT 0x0
++#define MC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1_MASK 0x000FFFFFL
++//MC_VM_MARC_BASE_HI_2
++#define MC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2__SHIFT 0x0
++#define MC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2_MASK 0x000FFFFFL
++//MC_VM_MARC_BASE_HI_3
++#define MC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3__SHIFT 0x0
++#define MC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3_MASK 0x000FFFFFL
++//MC_VM_MARC_RELOC_LO_0
++#define MC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0__SHIFT 0x0
++#define MC_VM_MARC_RELOC_LO_0__MARC_READONLY_0__SHIFT 0x1
++#define MC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0__SHIFT 0xc
++#define MC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0_MASK 0x00000001L
++#define MC_VM_MARC_RELOC_LO_0__MARC_READONLY_0_MASK 0x00000002L
++#define MC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0_MASK 0xFFFFF000L
++//MC_VM_MARC_RELOC_LO_1
++#define MC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1__SHIFT 0x0
++#define MC_VM_MARC_RELOC_LO_1__MARC_READONLY_1__SHIFT 0x1
++#define MC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1__SHIFT 0xc
++#define MC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1_MASK 0x00000001L
++#define MC_VM_MARC_RELOC_LO_1__MARC_READONLY_1_MASK 0x00000002L
++#define MC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1_MASK 0xFFFFF000L
++//MC_VM_MARC_RELOC_LO_2
++#define MC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2__SHIFT 0x0
++#define MC_VM_MARC_RELOC_LO_2__MARC_READONLY_2__SHIFT 0x1
++#define MC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2__SHIFT 0xc
++#define MC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2_MASK 0x00000001L
++#define MC_VM_MARC_RELOC_LO_2__MARC_READONLY_2_MASK 0x00000002L
++#define MC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2_MASK 0xFFFFF000L
++//MC_VM_MARC_RELOC_LO_3
++#define MC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3__SHIFT 0x0
++#define MC_VM_MARC_RELOC_LO_3__MARC_READONLY_3__SHIFT 0x1
++#define MC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3__SHIFT 0xc
++#define MC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3_MASK 0x00000001L
++#define MC_VM_MARC_RELOC_LO_3__MARC_READONLY_3_MASK 0x00000002L
++#define MC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3_MASK 0xFFFFF000L
++//MC_VM_MARC_RELOC_HI_0
++#define MC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0__SHIFT 0x0
++#define MC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0_MASK 0x000FFFFFL
++//MC_VM_MARC_RELOC_HI_1
++#define MC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1__SHIFT 0x0
++#define MC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1_MASK 0x000FFFFFL
++//MC_VM_MARC_RELOC_HI_2
++#define MC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2__SHIFT 0x0
++#define MC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2_MASK 0x000FFFFFL
++//MC_VM_MARC_RELOC_HI_3
++#define MC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3__SHIFT 0x0
++#define MC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3_MASK 0x000FFFFFL
++//MC_VM_MARC_LEN_LO_0
++#define MC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0__SHIFT 0xc
++#define MC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0_MASK 0xFFFFF000L
++//MC_VM_MARC_LEN_LO_1
++#define MC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1__SHIFT 0xc
++#define MC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1_MASK 0xFFFFF000L
++//MC_VM_MARC_LEN_LO_2
++#define MC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2__SHIFT 0xc
++#define MC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2_MASK 0xFFFFF000L
++//MC_VM_MARC_LEN_LO_3
++#define MC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3__SHIFT 0xc
++#define MC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3_MASK 0xFFFFF000L
++//MC_VM_MARC_LEN_HI_0
++#define MC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0__SHIFT 0x0
++#define MC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0_MASK 0x000FFFFFL
++//MC_VM_MARC_LEN_HI_1
++#define MC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1__SHIFT 0x0
++#define MC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1_MASK 0x000FFFFFL
++//MC_VM_MARC_LEN_HI_2
++#define MC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2__SHIFT 0x0
++#define MC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2_MASK 0x000FFFFFL
++//MC_VM_MARC_LEN_HI_3
++#define MC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3__SHIFT 0x0
++#define MC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3_MASK 0x000FFFFFL
++//VM_IOMMU_CONTROL_REGISTER
++#define VM_IOMMU_CONTROL_REGISTER__IOMMUEN__SHIFT 0x0
++#define VM_IOMMU_CONTROL_REGISTER__IOMMUEN_MASK 0x00000001L
++//VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER
++#define VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN__SHIFT 0xd
++#define VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN_MASK 0x00002000L
++//VM_PCIE_ATS_CNTL
++#define VM_PCIE_ATS_CNTL__STU__SHIFT 0x10
++#define VM_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0x1f
++#define VM_PCIE_ATS_CNTL__STU_MASK 0x001F0000L
++#define VM_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x80000000L
++//VM_PCIE_ATS_CNTL_VF_0
++#define VM_PCIE_ATS_CNTL_VF_0__ATC_ENABLE__SHIFT 0x1f
++#define VM_PCIE_ATS_CNTL_VF_0__ATC_ENABLE_MASK 0x80000000L
++//VM_PCIE_ATS_CNTL_VF_1
++#define VM_PCIE_ATS_CNTL_VF_1__ATC_ENABLE__SHIFT 0x1f
++#define VM_PCIE_ATS_CNTL_VF_1__ATC_ENABLE_MASK 0x80000000L
++//VM_PCIE_ATS_CNTL_VF_2
++#define VM_PCIE_ATS_CNTL_VF_2__ATC_ENABLE__SHIFT 0x1f
++#define VM_PCIE_ATS_CNTL_VF_2__ATC_ENABLE_MASK 0x80000000L
++//VM_PCIE_ATS_CNTL_VF_3
++#define VM_PCIE_ATS_CNTL_VF_3__ATC_ENABLE__SHIFT 0x1f
++#define VM_PCIE_ATS_CNTL_VF_3__ATC_ENABLE_MASK 0x80000000L
++//VM_PCIE_ATS_CNTL_VF_4
++#define VM_PCIE_ATS_CNTL_VF_4__ATC_ENABLE__SHIFT 0x1f
++#define VM_PCIE_ATS_CNTL_VF_4__ATC_ENABLE_MASK 0x80000000L
++//VM_PCIE_ATS_CNTL_VF_5
++#define VM_PCIE_ATS_CNTL_VF_5__ATC_ENABLE__SHIFT 0x1f
++#define VM_PCIE_ATS_CNTL_VF_5__ATC_ENABLE_MASK 0x80000000L
++//VM_PCIE_ATS_CNTL_VF_6
++#define VM_PCIE_ATS_CNTL_VF_6__ATC_ENABLE__SHIFT 0x1f
++#define VM_PCIE_ATS_CNTL_VF_6__ATC_ENABLE_MASK 0x80000000L
++//VM_PCIE_ATS_CNTL_VF_7
++#define VM_PCIE_ATS_CNTL_VF_7__ATC_ENABLE__SHIFT 0x1f
++#define VM_PCIE_ATS_CNTL_VF_7__ATC_ENABLE_MASK 0x80000000L
++//VM_PCIE_ATS_CNTL_VF_8
++#define VM_PCIE_ATS_CNTL_VF_8__ATC_ENABLE__SHIFT 0x1f
++#define VM_PCIE_ATS_CNTL_VF_8__ATC_ENABLE_MASK 0x80000000L
++//VM_PCIE_ATS_CNTL_VF_9
++#define VM_PCIE_ATS_CNTL_VF_9__ATC_ENABLE__SHIFT 0x1f
++#define VM_PCIE_ATS_CNTL_VF_9__ATC_ENABLE_MASK 0x80000000L
++//VM_PCIE_ATS_CNTL_VF_10
++#define VM_PCIE_ATS_CNTL_VF_10__ATC_ENABLE__SHIFT 0x1f
++#define VM_PCIE_ATS_CNTL_VF_10__ATC_ENABLE_MASK 0x80000000L
++//VM_PCIE_ATS_CNTL_VF_11
++#define VM_PCIE_ATS_CNTL_VF_11__ATC_ENABLE__SHIFT 0x1f
++#define VM_PCIE_ATS_CNTL_VF_11__ATC_ENABLE_MASK 0x80000000L
++//VM_PCIE_ATS_CNTL_VF_12
++#define VM_PCIE_ATS_CNTL_VF_12__ATC_ENABLE__SHIFT 0x1f
++#define VM_PCIE_ATS_CNTL_VF_12__ATC_ENABLE_MASK 0x80000000L
++//VM_PCIE_ATS_CNTL_VF_13
++#define VM_PCIE_ATS_CNTL_VF_13__ATC_ENABLE__SHIFT 0x1f
++#define VM_PCIE_ATS_CNTL_VF_13__ATC_ENABLE_MASK 0x80000000L
++//VM_PCIE_ATS_CNTL_VF_14
++#define VM_PCIE_ATS_CNTL_VF_14__ATC_ENABLE__SHIFT 0x1f
++#define VM_PCIE_ATS_CNTL_VF_14__ATC_ENABLE_MASK 0x80000000L
++//VM_PCIE_ATS_CNTL_VF_15
++#define VM_PCIE_ATS_CNTL_VF_15__ATC_ENABLE__SHIFT 0x1f
++#define VM_PCIE_ATS_CNTL_VF_15__ATC_ENABLE_MASK 0x80000000L
++//UTCL2_CGTT_CLK_CTRL
++#define UTCL2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
++#define UTCL2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_EXTRA__SHIFT 0xc
++#define UTCL2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
++#define UTCL2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
++#define UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
++#define UTCL2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define UTCL2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_EXTRA_MASK 0x00007000L
++#define UTCL2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
++#define UTCL2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
++#define UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
++
++
++// addressBlock: gc_hypdec
++//CP_HYP_PFP_UCODE_ADDR
++#define CP_HYP_PFP_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
++#define CP_HYP_PFP_UCODE_ADDR__UCODE_ADDR_MASK 0x00003FFFL
++//CP_PFP_UCODE_ADDR
++#define CP_PFP_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
++#define CP_PFP_UCODE_ADDR__UCODE_ADDR_MASK 0x00003FFFL
++//CP_HYP_PFP_UCODE_DATA
++#define CP_HYP_PFP_UCODE_DATA__UCODE_DATA__SHIFT 0x0
++#define CP_HYP_PFP_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
++//CP_PFP_UCODE_DATA
++#define CP_PFP_UCODE_DATA__UCODE_DATA__SHIFT 0x0
++#define CP_PFP_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
++//CP_HYP_ME_UCODE_ADDR
++#define CP_HYP_ME_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
++#define CP_HYP_ME_UCODE_ADDR__UCODE_ADDR_MASK 0x00001FFFL
++//CP_ME_RAM_RADDR
++#define CP_ME_RAM_RADDR__ME_RAM_RADDR__SHIFT 0x0
++#define CP_ME_RAM_RADDR__ME_RAM_RADDR_MASK 0x00001FFFL
++//CP_ME_RAM_WADDR
++#define CP_ME_RAM_WADDR__ME_RAM_WADDR__SHIFT 0x0
++#define CP_ME_RAM_WADDR__ME_RAM_WADDR_MASK 0x00001FFFL
++//CP_HYP_ME_UCODE_DATA
++#define CP_HYP_ME_UCODE_DATA__UCODE_DATA__SHIFT 0x0
++#define CP_HYP_ME_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
++//CP_ME_RAM_DATA
++#define CP_ME_RAM_DATA__ME_RAM_DATA__SHIFT 0x0
++#define CP_ME_RAM_DATA__ME_RAM_DATA_MASK 0xFFFFFFFFL
++//CP_CE_UCODE_ADDR
++#define CP_CE_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
++#define CP_CE_UCODE_ADDR__UCODE_ADDR_MASK 0x00000FFFL
++//CP_HYP_CE_UCODE_ADDR
++#define CP_HYP_CE_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
++#define CP_HYP_CE_UCODE_ADDR__UCODE_ADDR_MASK 0x00000FFFL
++//CP_CE_UCODE_DATA
++#define CP_CE_UCODE_DATA__UCODE_DATA__SHIFT 0x0
++#define CP_CE_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
++//CP_HYP_CE_UCODE_DATA
++#define CP_HYP_CE_UCODE_DATA__UCODE_DATA__SHIFT 0x0
++#define CP_HYP_CE_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
++//CP_HYP_MEC1_UCODE_ADDR
++#define CP_HYP_MEC1_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
++#define CP_HYP_MEC1_UCODE_ADDR__UCODE_ADDR_MASK 0x0001FFFFL
++//CP_MEC_ME1_UCODE_ADDR
++#define CP_MEC_ME1_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
++#define CP_MEC_ME1_UCODE_ADDR__UCODE_ADDR_MASK 0x0001FFFFL
++//CP_HYP_MEC1_UCODE_DATA
++#define CP_HYP_MEC1_UCODE_DATA__UCODE_DATA__SHIFT 0x0
++#define CP_HYP_MEC1_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
++//CP_MEC_ME1_UCODE_DATA
++#define CP_MEC_ME1_UCODE_DATA__UCODE_DATA__SHIFT 0x0
++#define CP_MEC_ME1_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
++//CP_HYP_MEC2_UCODE_ADDR
++#define CP_HYP_MEC2_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
++#define CP_HYP_MEC2_UCODE_ADDR__UCODE_ADDR_MASK 0x0001FFFFL
++//CP_MEC_ME2_UCODE_ADDR
++#define CP_MEC_ME2_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
++#define CP_MEC_ME2_UCODE_ADDR__UCODE_ADDR_MASK 0x0001FFFFL
++//CP_HYP_MEC2_UCODE_DATA
++#define CP_HYP_MEC2_UCODE_DATA__UCODE_DATA__SHIFT 0x0
++#define CP_HYP_MEC2_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
++//CP_MEC_ME2_UCODE_DATA
++#define CP_MEC_ME2_UCODE_DATA__UCODE_DATA__SHIFT 0x0
++#define CP_MEC_ME2_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
++//RLC_GPM_UCODE_ADDR
++#define RLC_GPM_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
++#define RLC_GPM_UCODE_ADDR__RESERVED__SHIFT 0xe
++#define RLC_GPM_UCODE_ADDR__UCODE_ADDR_MASK 0x00003FFFL
++#define RLC_GPM_UCODE_ADDR__RESERVED_MASK 0xFFFFC000L
++//RLC_GPM_UCODE_DATA
++#define RLC_GPM_UCODE_DATA__UCODE_DATA__SHIFT 0x0
++#define RLC_GPM_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
++//GRBM_GFX_INDEX_SR_SELECT
++#define GRBM_GFX_INDEX_SR_SELECT__INDEX__SHIFT 0x0
++#define GRBM_GFX_INDEX_SR_SELECT__INDEX_MASK 0x00000007L
++//GRBM_GFX_INDEX_SR_DATA
++#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_INDEX__SHIFT 0x0
++#define GRBM_GFX_INDEX_SR_DATA__SH_INDEX__SHIFT 0x8
++#define GRBM_GFX_INDEX_SR_DATA__SE_INDEX__SHIFT 0x10
++#define GRBM_GFX_INDEX_SR_DATA__SH_BROADCAST_WRITES__SHIFT 0x1d
++#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_BROADCAST_WRITES__SHIFT 0x1e
++#define GRBM_GFX_INDEX_SR_DATA__SE_BROADCAST_WRITES__SHIFT 0x1f
++#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_INDEX_MASK 0x000000FFL
++#define GRBM_GFX_INDEX_SR_DATA__SH_INDEX_MASK 0x0000FF00L
++#define GRBM_GFX_INDEX_SR_DATA__SE_INDEX_MASK 0x00FF0000L
++#define GRBM_GFX_INDEX_SR_DATA__SH_BROADCAST_WRITES_MASK 0x20000000L
++#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_BROADCAST_WRITES_MASK 0x40000000L
++#define GRBM_GFX_INDEX_SR_DATA__SE_BROADCAST_WRITES_MASK 0x80000000L
++//GRBM_GFX_CNTL_SR_SELECT
++#define GRBM_GFX_CNTL_SR_SELECT__INDEX__SHIFT 0x0
++#define GRBM_GFX_CNTL_SR_SELECT__INDEX_MASK 0x00000007L
++//GRBM_GFX_CNTL_SR_DATA
++#define GRBM_GFX_CNTL_SR_DATA__PIPEID__SHIFT 0x0
++#define GRBM_GFX_CNTL_SR_DATA__MEID__SHIFT 0x2
++#define GRBM_GFX_CNTL_SR_DATA__VMID__SHIFT 0x4
++#define GRBM_GFX_CNTL_SR_DATA__QUEUEID__SHIFT 0x8
++#define GRBM_GFX_CNTL_SR_DATA__PIPEID_MASK 0x00000003L
++#define GRBM_GFX_CNTL_SR_DATA__MEID_MASK 0x0000000CL
++#define GRBM_GFX_CNTL_SR_DATA__VMID_MASK 0x000000F0L
++#define GRBM_GFX_CNTL_SR_DATA__QUEUEID_MASK 0x00000700L
++//GRBM_CAM_INDEX
++#define GRBM_CAM_INDEX__CAM_INDEX__SHIFT 0x0
++#define GRBM_CAM_INDEX__CAM_INDEX_MASK 0x00000007L
++//GRBM_HYP_CAM_INDEX
++#define GRBM_HYP_CAM_INDEX__CAM_INDEX__SHIFT 0x0
++#define GRBM_HYP_CAM_INDEX__CAM_INDEX_MASK 0x00000007L
++//GRBM_CAM_DATA
++#define GRBM_CAM_DATA__CAM_ADDR__SHIFT 0x0
++#define GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT 0x10
++#define GRBM_CAM_DATA__CAM_ADDR_MASK 0x0000FFFFL
++#define GRBM_CAM_DATA__CAM_REMAPADDR_MASK 0xFFFF0000L
++//GRBM_HYP_CAM_DATA
++#define GRBM_HYP_CAM_DATA__CAM_ADDR__SHIFT 0x0
++#define GRBM_HYP_CAM_DATA__CAM_REMAPADDR__SHIFT 0x10
++#define GRBM_HYP_CAM_DATA__CAM_ADDR_MASK 0x0000FFFFL
++#define GRBM_HYP_CAM_DATA__CAM_REMAPADDR_MASK 0xFFFF0000L
++//RLC_GPU_IOV_VF_ENABLE
++#define RLC_GPU_IOV_VF_ENABLE__VF_ENABLE__SHIFT 0x0
++#define RLC_GPU_IOV_VF_ENABLE__RESERVED__SHIFT 0x1
++#define RLC_GPU_IOV_VF_ENABLE__VF_NUM__SHIFT 0x10
++#define RLC_GPU_IOV_VF_ENABLE__VF_ENABLE_MASK 0x00000001L
++#define RLC_GPU_IOV_VF_ENABLE__RESERVED_MASK 0x0000FFFEL
++#define RLC_GPU_IOV_VF_ENABLE__VF_NUM_MASK 0xFFFF0000L
++//RLC_GFX_RM_CNTL_ADJ
++#define RLC_GFX_RM_CNTL_ADJ__RLC_GFX_RM_VALID__SHIFT 0x0
++#define RLC_GFX_RM_CNTL_ADJ__RESERVED__SHIFT 0x1
++#define RLC_GFX_RM_CNTL_ADJ__RLC_GFX_RM_VALID_MASK 0x00000001L
++#define RLC_GFX_RM_CNTL_ADJ__RESERVED_MASK 0xFFFFFFFEL
++//RLC_GPU_IOV_CFG_REG6
++#define RLC_GPU_IOV_CFG_REG6__CNTXT_SIZE__SHIFT 0x0
++#define RLC_GPU_IOV_CFG_REG6__CNTXT_LOCATION__SHIFT 0x7
++#define RLC_GPU_IOV_CFG_REG6__RESERVED__SHIFT 0x8
++#define RLC_GPU_IOV_CFG_REG6__CNTXT_OFFSET__SHIFT 0xa
++#define RLC_GPU_IOV_CFG_REG6__CNTXT_SIZE_MASK 0x0000007FL
++#define RLC_GPU_IOV_CFG_REG6__CNTXT_LOCATION_MASK 0x00000080L
++#define RLC_GPU_IOV_CFG_REG6__RESERVED_MASK 0x00000300L
++#define RLC_GPU_IOV_CFG_REG6__CNTXT_OFFSET_MASK 0xFFFFFC00L
++//RLC_GPU_IOV_CFG_REG8
++#define RLC_GPU_IOV_CFG_REG8__VM_BUSY_STATUS__SHIFT 0x0
++#define RLC_GPU_IOV_CFG_REG8__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
++//RLC_RLCV_TIMER_INT_0
++#define RLC_RLCV_TIMER_INT_0__TIMER__SHIFT 0x0
++#define RLC_RLCV_TIMER_INT_0__TIMER_MASK 0xFFFFFFFFL
++//RLC_RLCV_TIMER_CTRL
++#define RLC_RLCV_TIMER_CTRL__TIMER_0_EN__SHIFT 0x0
++#define RLC_RLCV_TIMER_CTRL__RESERVED__SHIFT 0x1
++#define RLC_RLCV_TIMER_CTRL__TIMER_0_EN_MASK 0x00000001L
++#define RLC_RLCV_TIMER_CTRL__RESERVED_MASK 0xFFFFFFFEL
++//RLC_RLCV_TIMER_STAT
++#define RLC_RLCV_TIMER_STAT__TIMER_0_STAT__SHIFT 0x0
++#define RLC_RLCV_TIMER_STAT__RESERVED__SHIFT 0x1
++#define RLC_RLCV_TIMER_STAT__TIMER_0_STAT_MASK 0x00000001L
++#define RLC_RLCV_TIMER_STAT__RESERVED_MASK 0xFFFFFFFEL
++//RLC_GPU_IOV_VF_DOORBELL_STATUS
++#define RLC_GPU_IOV_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS__SHIFT 0x0
++#define RLC_GPU_IOV_VF_DOORBELL_STATUS__RESERVED__SHIFT 0x10
++#define RLC_GPU_IOV_VF_DOORBELL_STATUS__PF_DOORBELL_STATUS__SHIFT 0x1f
++#define RLC_GPU_IOV_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_MASK 0x0000FFFFL
++#define RLC_GPU_IOV_VF_DOORBELL_STATUS__RESERVED_MASK 0x7FFF0000L
++#define RLC_GPU_IOV_VF_DOORBELL_STATUS__PF_DOORBELL_STATUS_MASK 0x80000000L
++//RLC_GPU_IOV_VF_DOORBELL_STATUS_SET
++#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__VF_DOORBELL_STATUS_SET__SHIFT 0x0
++#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__RESERVED__SHIFT 0x10
++#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__PF_DOORBELL_STATUS_SET__SHIFT 0x1f
++#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__VF_DOORBELL_STATUS_SET_MASK 0x0000FFFFL
++#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__RESERVED_MASK 0x7FFF0000L
++#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__PF_DOORBELL_STATUS_SET_MASK 0x80000000L
++//RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR
++#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__VF_DOORBELL_STATUS_CLR__SHIFT 0x0
++#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__RESERVED__SHIFT 0x10
++#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__PF_DOORBELL_STATUS_CLR__SHIFT 0x1f
++#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__VF_DOORBELL_STATUS_CLR_MASK 0x0000FFFFL
++#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__RESERVED_MASK 0x7FFF0000L
++#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__PF_DOORBELL_STATUS_CLR_MASK 0x80000000L
++//RLC_GPU_IOV_VF_MASK
++#define RLC_GPU_IOV_VF_MASK__VF_MASK__SHIFT 0x0
++#define RLC_GPU_IOV_VF_MASK__RESERVED__SHIFT 0x10
++#define RLC_GPU_IOV_VF_MASK__VF_MASK_MASK 0x0000FFFFL
++#define RLC_GPU_IOV_VF_MASK__RESERVED_MASK 0xFFFF0000L
++//RLC_HYP_SEMAPHORE_2
++#define RLC_HYP_SEMAPHORE_2__CLIENT_ID__SHIFT 0x0
++#define RLC_HYP_SEMAPHORE_2__RESERVED__SHIFT 0x5
++#define RLC_HYP_SEMAPHORE_2__CLIENT_ID_MASK 0x0000001FL
++#define RLC_HYP_SEMAPHORE_2__RESERVED_MASK 0xFFFFFFE0L
++//RLC_HYP_SEMAPHORE_3
++#define RLC_HYP_SEMAPHORE_3__CLIENT_ID__SHIFT 0x0
++#define RLC_HYP_SEMAPHORE_3__RESERVED__SHIFT 0x5
++#define RLC_HYP_SEMAPHORE_3__CLIENT_ID_MASK 0x0000001FL
++#define RLC_HYP_SEMAPHORE_3__RESERVED_MASK 0xFFFFFFE0L
++//RLC_CLK_CNTL
++#define RLC_CLK_CNTL__RLC_SRM_CLK_CNTL__SHIFT 0x0
++#define RLC_CLK_CNTL__RLC_SPM_CLK_CNTL__SHIFT 0x1
++#define RLC_CLK_CNTL__RESERVED__SHIFT 0x2
++#define RLC_CLK_CNTL__RLC_SRM_CLK_CNTL_MASK 0x00000001L
++#define RLC_CLK_CNTL__RLC_SPM_CLK_CNTL_MASK 0x00000002L
++#define RLC_CLK_CNTL__RESERVED_MASK 0xFFFFFFFCL
++//RLC_GPU_IOV_SCH_BLOCK
++#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_ID__SHIFT 0x0
++#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Ver__SHIFT 0x4
++#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Size__SHIFT 0x8
++#define RLC_GPU_IOV_SCH_BLOCK__RESERVED__SHIFT 0x10
++#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_ID_MASK 0x0000000FL
++#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Ver_MASK 0x000000F0L
++#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Size_MASK 0x00007F00L
++#define RLC_GPU_IOV_SCH_BLOCK__RESERVED_MASK 0x7FFF0000L
++//RLC_GPU_IOV_CFG_REG1
++#define RLC_GPU_IOV_CFG_REG1__CMD_TYPE__SHIFT 0x0
++#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE__SHIFT 0x4
++#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE_INTR_EN__SHIFT 0x5
++#define RLC_GPU_IOV_CFG_REG1__RESERVED__SHIFT 0x6
++#define RLC_GPU_IOV_CFG_REG1__FCN_ID__SHIFT 0x8
++#define RLC_GPU_IOV_CFG_REG1__NEXT_FCN_ID__SHIFT 0x10
++#define RLC_GPU_IOV_CFG_REG1__RESERVED1__SHIFT 0x18
++#define RLC_GPU_IOV_CFG_REG1__CMD_TYPE_MASK 0x0000000FL
++#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE_MASK 0x00000010L
++#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE_INTR_EN_MASK 0x00000020L
++#define RLC_GPU_IOV_CFG_REG1__RESERVED_MASK 0x000000C0L
++#define RLC_GPU_IOV_CFG_REG1__FCN_ID_MASK 0x0000FF00L
++#define RLC_GPU_IOV_CFG_REG1__NEXT_FCN_ID_MASK 0x00FF0000L
++#define RLC_GPU_IOV_CFG_REG1__RESERVED1_MASK 0xFF000000L
++//RLC_GPU_IOV_CFG_REG2
++#define RLC_GPU_IOV_CFG_REG2__CMD_STATUS__SHIFT 0x0
++#define RLC_GPU_IOV_CFG_REG2__RESERVED__SHIFT 0x4
++#define RLC_GPU_IOV_CFG_REG2__CMD_STATUS_MASK 0x0000000FL
++#define RLC_GPU_IOV_CFG_REG2__RESERVED_MASK 0xFFFFFFF0L
++//RLC_GPU_IOV_VM_BUSY_STATUS
++#define RLC_GPU_IOV_VM_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
++#define RLC_GPU_IOV_VM_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
++//RLC_GPU_IOV_SCH_0
++#define RLC_GPU_IOV_SCH_0__ACTIVE_FUNCTIONS__SHIFT 0x0
++#define RLC_GPU_IOV_SCH_0__ACTIVE_FUNCTIONS_MASK 0xFFFFFFFFL
++//RLC_GPU_IOV_ACTIVE_FCN_ID
++#define RLC_GPU_IOV_ACTIVE_FCN_ID__VF_ID__SHIFT 0x0
++#define RLC_GPU_IOV_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
++#define RLC_GPU_IOV_ACTIVE_FCN_ID__PF_VF__SHIFT 0x1f
++#define RLC_GPU_IOV_ACTIVE_FCN_ID__VF_ID_MASK 0x0000000FL
++#define RLC_GPU_IOV_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
++#define RLC_GPU_IOV_ACTIVE_FCN_ID__PF_VF_MASK 0x80000000L
++//RLC_GPU_IOV_SCH_3
++#define RLC_GPU_IOV_SCH_3__Time_Quanta_Def__SHIFT 0x0
++#define RLC_GPU_IOV_SCH_3__Time_Quanta_Def_MASK 0xFFFFFFFFL
++//RLC_GPU_IOV_SCH_1
++#define RLC_GPU_IOV_SCH_1__DATA__SHIFT 0x0
++#define RLC_GPU_IOV_SCH_1__DATA_MASK 0xFFFFFFFFL
++//RLC_GPU_IOV_SCH_2
++#define RLC_GPU_IOV_SCH_2__DATA__SHIFT 0x0
++#define RLC_GPU_IOV_SCH_2__DATA_MASK 0xFFFFFFFFL
++//RLC_GPU_IOV_UCODE_ADDR
++#define RLC_GPU_IOV_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
++#define RLC_GPU_IOV_UCODE_ADDR__RESERVED__SHIFT 0xc
++#define RLC_GPU_IOV_UCODE_ADDR__UCODE_ADDR_MASK 0x00000FFFL
++#define RLC_GPU_IOV_UCODE_ADDR__RESERVED_MASK 0xFFFFF000L
++//RLC_GPU_IOV_UCODE_DATA
++#define RLC_GPU_IOV_UCODE_DATA__UCODE_DATA__SHIFT 0x0
++#define RLC_GPU_IOV_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
++//RLC_GPU_IOV_SCRATCH_ADDR
++#define RLC_GPU_IOV_SCRATCH_ADDR__ADDR__SHIFT 0x0
++#define RLC_GPU_IOV_SCRATCH_ADDR__RESERVED__SHIFT 0x9
++#define RLC_GPU_IOV_SCRATCH_ADDR__ADDR_MASK 0x000001FFL
++#define RLC_GPU_IOV_SCRATCH_ADDR__RESERVED_MASK 0xFFFFFE00L
++//RLC_GPU_IOV_SCRATCH_DATA
++#define RLC_GPU_IOV_SCRATCH_DATA__DATA__SHIFT 0x0
++#define RLC_GPU_IOV_SCRATCH_DATA__DATA_MASK 0xFFFFFFFFL
++//RLC_GPU_IOV_F32_CNTL
++#define RLC_GPU_IOV_F32_CNTL__ENABLE__SHIFT 0x0
++#define RLC_GPU_IOV_F32_CNTL__RESERVED__SHIFT 0x1
++#define RLC_GPU_IOV_F32_CNTL__ENABLE_MASK 0x00000001L
++#define RLC_GPU_IOV_F32_CNTL__RESERVED_MASK 0xFFFFFFFEL
++//RLC_GPU_IOV_F32_RESET
++#define RLC_GPU_IOV_F32_RESET__RESET__SHIFT 0x0
++#define RLC_GPU_IOV_F32_RESET__RESERVED__SHIFT 0x1
++#define RLC_GPU_IOV_F32_RESET__RESET_MASK 0x00000001L
++#define RLC_GPU_IOV_F32_RESET__RESERVED_MASK 0xFFFFFFFEL
++//RLC_GPU_IOV_SDMA0_STATUS
++#define RLC_GPU_IOV_SDMA0_STATUS__PREEMPTED__SHIFT 0x0
++#define RLC_GPU_IOV_SDMA0_STATUS__RESERVED__SHIFT 0x1
++#define RLC_GPU_IOV_SDMA0_STATUS__SAVED__SHIFT 0x8
++#define RLC_GPU_IOV_SDMA0_STATUS__RESERVED1__SHIFT 0x9
++#define RLC_GPU_IOV_SDMA0_STATUS__RESTORED__SHIFT 0xc
++#define RLC_GPU_IOV_SDMA0_STATUS__RESERVED2__SHIFT 0xd
++#define RLC_GPU_IOV_SDMA0_STATUS__PREEMPTED_MASK 0x00000001L
++#define RLC_GPU_IOV_SDMA0_STATUS__RESERVED_MASK 0x000000FEL
++#define RLC_GPU_IOV_SDMA0_STATUS__SAVED_MASK 0x00000100L
++#define RLC_GPU_IOV_SDMA0_STATUS__RESERVED1_MASK 0x00000E00L
++#define RLC_GPU_IOV_SDMA0_STATUS__RESTORED_MASK 0x00001000L
++#define RLC_GPU_IOV_SDMA0_STATUS__RESERVED2_MASK 0xFFFFE000L
++//RLC_GPU_IOV_SDMA1_STATUS
++#define RLC_GPU_IOV_SDMA1_STATUS__PREEMPTED__SHIFT 0x0
++#define RLC_GPU_IOV_SDMA1_STATUS__RESERVED__SHIFT 0x1
++#define RLC_GPU_IOV_SDMA1_STATUS__SAVED__SHIFT 0x8
++#define RLC_GPU_IOV_SDMA1_STATUS__RESERVED1__SHIFT 0x9
++#define RLC_GPU_IOV_SDMA1_STATUS__RESTORED__SHIFT 0xc
++#define RLC_GPU_IOV_SDMA1_STATUS__RESERVED2__SHIFT 0xd
++#define RLC_GPU_IOV_SDMA1_STATUS__PREEMPTED_MASK 0x00000001L
++#define RLC_GPU_IOV_SDMA1_STATUS__RESERVED_MASK 0x000000FEL
++#define RLC_GPU_IOV_SDMA1_STATUS__SAVED_MASK 0x00000100L
++#define RLC_GPU_IOV_SDMA1_STATUS__RESERVED1_MASK 0x00000E00L
++#define RLC_GPU_IOV_SDMA1_STATUS__RESTORED_MASK 0x00001000L
++#define RLC_GPU_IOV_SDMA1_STATUS__RESERVED2_MASK 0xFFFFE000L
++//RLC_GPU_IOV_SMU_RESPONSE
++#define RLC_GPU_IOV_SMU_RESPONSE__RESP__SHIFT 0x0
++#define RLC_GPU_IOV_SMU_RESPONSE__RESP_MASK 0xFFFFFFFFL
++//RLC_GPU_IOV_VIRT_RESET_REQ
++#define RLC_GPU_IOV_VIRT_RESET_REQ__VF_FLR__SHIFT 0x0
++#define RLC_GPU_IOV_VIRT_RESET_REQ__RESERVED__SHIFT 0x10
++#define RLC_GPU_IOV_VIRT_RESET_REQ__SOFT_PF_FLR__SHIFT 0x1f
++#define RLC_GPU_IOV_VIRT_RESET_REQ__VF_FLR_MASK 0x0000FFFFL
++#define RLC_GPU_IOV_VIRT_RESET_REQ__RESERVED_MASK 0x7FFF0000L
++#define RLC_GPU_IOV_VIRT_RESET_REQ__SOFT_PF_FLR_MASK 0x80000000L
++//RLC_GPU_IOV_RLC_RESPONSE
++#define RLC_GPU_IOV_RLC_RESPONSE__RESP__SHIFT 0x0
++#define RLC_GPU_IOV_RLC_RESPONSE__RESP_MASK 0xFFFFFFFFL
++//RLC_GPU_IOV_INT_DISABLE
++#define RLC_GPU_IOV_INT_DISABLE__DISABLE__SHIFT 0x0
++#define RLC_GPU_IOV_INT_DISABLE__DISABLE_MASK 0xFFFFFFFFL
++//RLC_GPU_IOV_INT_FORCE
++#define RLC_GPU_IOV_INT_FORCE__FORCE__SHIFT 0x0
++#define RLC_GPU_IOV_INT_FORCE__FORCE_MASK 0xFFFFFFFFL
++//RLC_GPU_IOV_SDMA0_BUSY_STATUS
++#define RLC_GPU_IOV_SDMA0_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
++#define RLC_GPU_IOV_SDMA0_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
++//RLC_GPU_IOV_SDMA1_BUSY_STATUS
++#define RLC_GPU_IOV_SDMA1_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
++#define RLC_GPU_IOV_SDMA1_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
++
++
++// addressBlock: gccacind
++//GC_CAC_CNTL
++#define GC_CAC_CNTL__CAC_ENABLE__SHIFT 0x0
++#define GC_CAC_CNTL__CAC_THRESHOLD__SHIFT 0x1
++#define GC_CAC_CNTL__CAC_BLOCK_ID__SHIFT 0x11
++#define GC_CAC_CNTL__CAC_SIGNAL_ID__SHIFT 0x17
++#define GC_CAC_CNTL__UNUSED_0__SHIFT 0x1f
++#define GC_CAC_CNTL__CAC_ENABLE_MASK 0x00000001L
++#define GC_CAC_CNTL__CAC_THRESHOLD_MASK 0x0001FFFEL
++#define GC_CAC_CNTL__CAC_BLOCK_ID_MASK 0x007E0000L
++#define GC_CAC_CNTL__CAC_SIGNAL_ID_MASK 0x7F800000L
++#define GC_CAC_CNTL__UNUSED_0_MASK 0x80000000L
++//GC_CAC_OVR_SEL
++#define GC_CAC_OVR_SEL__CAC_OVR_SEL__SHIFT 0x0
++#define GC_CAC_OVR_SEL__CAC_OVR_SEL_MASK 0xFFFFFFFFL
++//GC_CAC_OVR_VAL
++#define GC_CAC_OVR_VAL__CAC_OVR_VAL__SHIFT 0x0
++#define GC_CAC_OVR_VAL__CAC_OVR_VAL_MASK 0xFFFFFFFFL
++//GC_CAC_WEIGHT_BCI_0
++#define GC_CAC_WEIGHT_BCI_0__WEIGHT_BCI_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_BCI_0__WEIGHT_BCI_SIG1__SHIFT 0x10
++#define GC_CAC_WEIGHT_BCI_0__WEIGHT_BCI_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_BCI_0__WEIGHT_BCI_SIG1_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_CB_0
++#define GC_CAC_WEIGHT_CB_0__WEIGHT_CB_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_CB_0__WEIGHT_CB_SIG1__SHIFT 0x10
++#define GC_CAC_WEIGHT_CB_0__WEIGHT_CB_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_CB_0__WEIGHT_CB_SIG1_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_CB_1
++#define GC_CAC_WEIGHT_CB_1__WEIGHT_CB_SIG2__SHIFT 0x0
++#define GC_CAC_WEIGHT_CB_1__WEIGHT_CB_SIG3__SHIFT 0x10
++#define GC_CAC_WEIGHT_CB_1__WEIGHT_CB_SIG2_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_CB_1__WEIGHT_CB_SIG3_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_CP_0
++#define GC_CAC_WEIGHT_CP_0__WEIGHT_CP_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_CP_0__WEIGHT_CP_SIG1__SHIFT 0x10
++#define GC_CAC_WEIGHT_CP_0__WEIGHT_CP_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_CP_0__WEIGHT_CP_SIG1_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_CP_1
++#define GC_CAC_WEIGHT_CP_1__WEIGHT_CP_SIG2__SHIFT 0x0
++#define GC_CAC_WEIGHT_CP_1__UNUSED_0__SHIFT 0x10
++#define GC_CAC_WEIGHT_CP_1__WEIGHT_CP_SIG2_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_CP_1__UNUSED_0_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_DB_0
++#define GC_CAC_WEIGHT_DB_0__WEIGHT_DB_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_DB_0__WEIGHT_DB_SIG1__SHIFT 0x10
++#define GC_CAC_WEIGHT_DB_0__WEIGHT_DB_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_DB_0__WEIGHT_DB_SIG1_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_DB_1
++#define GC_CAC_WEIGHT_DB_1__WEIGHT_DB_SIG2__SHIFT 0x0
++#define GC_CAC_WEIGHT_DB_1__WEIGHT_DB_SIG3__SHIFT 0x10
++#define GC_CAC_WEIGHT_DB_1__WEIGHT_DB_SIG2_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_DB_1__WEIGHT_DB_SIG3_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_GDS_0
++#define GC_CAC_WEIGHT_GDS_0__WEIGHT_GDS_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_GDS_0__WEIGHT_GDS_SIG1__SHIFT 0x10
++#define GC_CAC_WEIGHT_GDS_0__WEIGHT_GDS_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_GDS_0__WEIGHT_GDS_SIG1_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_GDS_1
++#define GC_CAC_WEIGHT_GDS_1__WEIGHT_GDS_SIG2__SHIFT 0x0
++#define GC_CAC_WEIGHT_GDS_1__WEIGHT_GDS_SIG3__SHIFT 0x10
++#define GC_CAC_WEIGHT_GDS_1__WEIGHT_GDS_SIG2_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_GDS_1__WEIGHT_GDS_SIG3_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_IA_0
++#define GC_CAC_WEIGHT_IA_0__WEIGHT_IA_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_IA_0__UNUSED_0__SHIFT 0x10
++#define GC_CAC_WEIGHT_IA_0__WEIGHT_IA_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_IA_0__UNUSED_0_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_LDS_0
++#define GC_CAC_WEIGHT_LDS_0__WEIGHT_LDS_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_LDS_0__WEIGHT_LDS_SIG1__SHIFT 0x10
++#define GC_CAC_WEIGHT_LDS_0__WEIGHT_LDS_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_LDS_0__WEIGHT_LDS_SIG1_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_LDS_1
++#define GC_CAC_WEIGHT_LDS_1__WEIGHT_LDS_SIG2__SHIFT 0x0
++#define GC_CAC_WEIGHT_LDS_1__WEIGHT_LDS_SIG3__SHIFT 0x10
++#define GC_CAC_WEIGHT_LDS_1__WEIGHT_LDS_SIG2_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_LDS_1__WEIGHT_LDS_SIG3_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_PA_0
++#define GC_CAC_WEIGHT_PA_0__WEIGHT_PA_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_PA_0__WEIGHT_PA_SIG1__SHIFT 0x10
++#define GC_CAC_WEIGHT_PA_0__WEIGHT_PA_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_PA_0__WEIGHT_PA_SIG1_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_PC_0
++#define GC_CAC_WEIGHT_PC_0__WEIGHT_PC_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_PC_0__UNUSED_0__SHIFT 0x10
++#define GC_CAC_WEIGHT_PC_0__WEIGHT_PC_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_PC_0__UNUSED_0_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_SC_0
++#define GC_CAC_WEIGHT_SC_0__WEIGHT_SC_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_SC_0__UNUSED_0__SHIFT 0x10
++#define GC_CAC_WEIGHT_SC_0__WEIGHT_SC_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_SC_0__UNUSED_0_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_SPI_0
++#define GC_CAC_WEIGHT_SPI_0__WEIGHT_SPI_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_SPI_0__WEIGHT_SPI_SIG1__SHIFT 0x10
++#define GC_CAC_WEIGHT_SPI_0__WEIGHT_SPI_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_SPI_0__WEIGHT_SPI_SIG1_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_SPI_1
++#define GC_CAC_WEIGHT_SPI_1__WEIGHT_SPI_SIG2__SHIFT 0x0
++#define GC_CAC_WEIGHT_SPI_1__WEIGHT_SPI_SIG3__SHIFT 0x10
++#define GC_CAC_WEIGHT_SPI_1__WEIGHT_SPI_SIG2_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_SPI_1__WEIGHT_SPI_SIG3_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_SPI_2
++#define GC_CAC_WEIGHT_SPI_2__WEIGHT_SPI_SIG4__SHIFT 0x0
++#define GC_CAC_WEIGHT_SPI_2__WEIGHT_SPI_SIG5__SHIFT 0x10
++#define GC_CAC_WEIGHT_SPI_2__WEIGHT_SPI_SIG4_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_SPI_2__WEIGHT_SPI_SIG5_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_SQ_0
++#define GC_CAC_WEIGHT_SQ_0__WEIGHT_SQ_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_SQ_0__WEIGHT_SQ_SIG1__SHIFT 0x10
++#define GC_CAC_WEIGHT_SQ_0__WEIGHT_SQ_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_SQ_0__WEIGHT_SQ_SIG1_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_SQ_1
++#define GC_CAC_WEIGHT_SQ_1__WEIGHT_SQ_SIG2__SHIFT 0x0
++#define GC_CAC_WEIGHT_SQ_1__WEIGHT_SQ_SIG3__SHIFT 0x10
++#define GC_CAC_WEIGHT_SQ_1__WEIGHT_SQ_SIG2_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_SQ_1__WEIGHT_SQ_SIG3_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_SQ_2
++#define GC_CAC_WEIGHT_SQ_2__WEIGHT_SQ_SIG4__SHIFT 0x0
++#define GC_CAC_WEIGHT_SQ_2__WEIGHT_SQ_SIG5__SHIFT 0x10
++#define GC_CAC_WEIGHT_SQ_2__WEIGHT_SQ_SIG4_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_SQ_2__WEIGHT_SQ_SIG5_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_SQ_3
++#define GC_CAC_WEIGHT_SQ_3__WEIGHT_SQ_SIG6__SHIFT 0x0
++#define GC_CAC_WEIGHT_SQ_3__WEIGHT_SQ_SIG7__SHIFT 0x10
++#define GC_CAC_WEIGHT_SQ_3__WEIGHT_SQ_SIG6_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_SQ_3__WEIGHT_SQ_SIG7_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_SQ_4
++#define GC_CAC_WEIGHT_SQ_4__WEIGHT_SQ_SIG8__SHIFT 0x0
++#define GC_CAC_WEIGHT_SQ_4__UNUSED_0__SHIFT 0x10
++#define GC_CAC_WEIGHT_SQ_4__WEIGHT_SQ_SIG8_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_SQ_4__UNUSED_0_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_SX_0
++#define GC_CAC_WEIGHT_SX_0__WEIGHT_SX_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_SX_0__UNUSED_0__SHIFT 0x10
++#define GC_CAC_WEIGHT_SX_0__WEIGHT_SX_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_SX_0__UNUSED_0_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_SXRB_0
++#define GC_CAC_WEIGHT_SXRB_0__WEIGHT_SXRB_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_SXRB_0__WEIGHT_SXRB_SIG1__SHIFT 0x10
++#define GC_CAC_WEIGHT_SXRB_0__WEIGHT_SXRB_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_SXRB_0__WEIGHT_SXRB_SIG1_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_TA_0
++#define GC_CAC_WEIGHT_TA_0__WEIGHT_TA_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_TA_0__UNUSED_0__SHIFT 0x10
++#define GC_CAC_WEIGHT_TA_0__WEIGHT_TA_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_TA_0__UNUSED_0_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_TCC_0
++#define GC_CAC_WEIGHT_TCC_0__WEIGHT_TCC_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_TCC_0__WEIGHT_TCC_SIG1__SHIFT 0x10
++#define GC_CAC_WEIGHT_TCC_0__WEIGHT_TCC_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_TCC_0__WEIGHT_TCC_SIG1_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_TCC_1
++#define GC_CAC_WEIGHT_TCC_1__WEIGHT_TCC_SIG2__SHIFT 0x0
++#define GC_CAC_WEIGHT_TCC_1__WEIGHT_TCC_SIG3__SHIFT 0x10
++#define GC_CAC_WEIGHT_TCC_1__WEIGHT_TCC_SIG2_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_TCC_1__WEIGHT_TCC_SIG3_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_TCC_2
++#define GC_CAC_WEIGHT_TCC_2__WEIGHT_TCC_SIG4__SHIFT 0x0
++#define GC_CAC_WEIGHT_TCC_2__UNUSED_0__SHIFT 0x10
++#define GC_CAC_WEIGHT_TCC_2__WEIGHT_TCC_SIG4_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_TCC_2__UNUSED_0_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_TCP_0
++#define GC_CAC_WEIGHT_TCP_0__WEIGHT_TCP_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_TCP_0__WEIGHT_TCP_SIG1__SHIFT 0x10
++#define GC_CAC_WEIGHT_TCP_0__WEIGHT_TCP_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_TCP_0__WEIGHT_TCP_SIG1_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_TCP_1
++#define GC_CAC_WEIGHT_TCP_1__WEIGHT_TCP_SIG2__SHIFT 0x0
++#define GC_CAC_WEIGHT_TCP_1__WEIGHT_TCP_SIG3__SHIFT 0x10
++#define GC_CAC_WEIGHT_TCP_1__WEIGHT_TCP_SIG2_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_TCP_1__WEIGHT_TCP_SIG3_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_TCP_2
++#define GC_CAC_WEIGHT_TCP_2__WEIGHT_TCP_SIG4__SHIFT 0x0
++#define GC_CAC_WEIGHT_TCP_2__UNUSED_0__SHIFT 0x10
++#define GC_CAC_WEIGHT_TCP_2__WEIGHT_TCP_SIG4_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_TCP_2__UNUSED_0_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_TD_0
++#define GC_CAC_WEIGHT_TD_0__WEIGHT_TD_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_TD_0__WEIGHT_TD_SIG1__SHIFT 0x10
++#define GC_CAC_WEIGHT_TD_0__WEIGHT_TD_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_TD_0__WEIGHT_TD_SIG1_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_TD_1
++#define GC_CAC_WEIGHT_TD_1__WEIGHT_TD_SIG2__SHIFT 0x0
++#define GC_CAC_WEIGHT_TD_1__WEIGHT_TD_SIG3__SHIFT 0x10
++#define GC_CAC_WEIGHT_TD_1__WEIGHT_TD_SIG2_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_TD_1__WEIGHT_TD_SIG3_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_TD_2
++#define GC_CAC_WEIGHT_TD_2__WEIGHT_TD_SIG4__SHIFT 0x0
++#define GC_CAC_WEIGHT_TD_2__WEIGHT_TD_SIG5__SHIFT 0x10
++#define GC_CAC_WEIGHT_TD_2__WEIGHT_TD_SIG4_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_TD_2__WEIGHT_TD_SIG5_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_VGT_0
++#define GC_CAC_WEIGHT_VGT_0__WEIGHT_VGT_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_VGT_0__WEIGHT_VGT_SIG1__SHIFT 0x10
++#define GC_CAC_WEIGHT_VGT_0__WEIGHT_VGT_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_VGT_0__WEIGHT_VGT_SIG1_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_VGT_1
++#define GC_CAC_WEIGHT_VGT_1__WEIGHT_VGT_SIG2__SHIFT 0x0
++#define GC_CAC_WEIGHT_VGT_1__UNUSED_0__SHIFT 0x10
++#define GC_CAC_WEIGHT_VGT_1__WEIGHT_VGT_SIG2_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_VGT_1__UNUSED_0_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_WD_0
++#define GC_CAC_WEIGHT_WD_0__WEIGHT_WD_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_WD_0__UNUSED_0__SHIFT 0x10
++#define GC_CAC_WEIGHT_WD_0__WEIGHT_WD_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_WD_0__UNUSED_0_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_CU_0
++#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG1__SHIFT 0x10
++#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG1_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_CU_1
++#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG2__SHIFT 0x0
++#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG3__SHIFT 0x10
++#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG2_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG3_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_CU_2
++#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG4__SHIFT 0x0
++#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG5__SHIFT 0x10
++#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG4_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG5_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_CU_3
++#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG6__SHIFT 0x0
++#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG7__SHIFT 0x10
++#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG6_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG7_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_CU_4
++#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG8__SHIFT 0x0
++#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG9__SHIFT 0x10
++#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG8_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG9_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_CU_5
++#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG10__SHIFT 0x0
++#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG11__SHIFT 0x10
++#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG10_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG11_MASK 0xFFFF0000L
++//GC_CAC_ACC_BCI0
++#define GC_CAC_ACC_BCI0__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_BCI0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_CB0
++#define GC_CAC_ACC_CB0__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_CB0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_CB1
++#define GC_CAC_ACC_CB1__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_CB1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_CB2
++#define GC_CAC_ACC_CB2__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_CB2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_CB3
++#define GC_CAC_ACC_CB3__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_CB3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_CP0
++#define GC_CAC_ACC_CP0__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_CP0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_CP1
++#define GC_CAC_ACC_CP1__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_CP1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_CP2
++#define GC_CAC_ACC_CP2__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_CP2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_DB0
++#define GC_CAC_ACC_DB0__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_DB0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_DB1
++#define GC_CAC_ACC_DB1__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_DB1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_DB2
++#define GC_CAC_ACC_DB2__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_DB2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_DB3
++#define GC_CAC_ACC_DB3__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_DB3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_GDS0
++#define GC_CAC_ACC_GDS0__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_GDS0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_GDS1
++#define GC_CAC_ACC_GDS1__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_GDS1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_GDS2
++#define GC_CAC_ACC_GDS2__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_GDS2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_GDS3
++#define GC_CAC_ACC_GDS3__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_GDS3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_IA0
++#define GC_CAC_ACC_IA0__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_IA0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_LDS0
++#define GC_CAC_ACC_LDS0__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_LDS0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_LDS1
++#define GC_CAC_ACC_LDS1__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_LDS1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_LDS2
++#define GC_CAC_ACC_LDS2__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_LDS2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_LDS3
++#define GC_CAC_ACC_LDS3__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_LDS3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_PA0
++#define GC_CAC_ACC_PA0__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_PA0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_PA1
++#define GC_CAC_ACC_PA1__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_PA1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_PC0
++#define GC_CAC_ACC_PC0__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_PC0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_SC0
++#define GC_CAC_ACC_SC0__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_SC0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_SPI0
++#define GC_CAC_ACC_SPI0__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_SPI0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_SPI1
++#define GC_CAC_ACC_SPI1__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_SPI1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_SPI2
++#define GC_CAC_ACC_SPI2__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_SPI2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_SPI3
++#define GC_CAC_ACC_SPI3__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_SPI3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_SPI4
++#define GC_CAC_ACC_SPI4__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_SPI4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_SPI5
++#define GC_CAC_ACC_SPI5__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_SPI5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_WEIGHT_PG_0
++#define GC_CAC_WEIGHT_PG_0__WEIGHT_PG_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_PG_0__unused__SHIFT 0x10
++#define GC_CAC_WEIGHT_PG_0__WEIGHT_PG_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_PG_0__unused_MASK 0xFFFF0000L
++//GC_CAC_ACC_PG0
++#define GC_CAC_ACC_PG0__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_PG0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_OVRD_PG
++#define GC_CAC_OVRD_PG__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_PG__OVRRD_VALUE__SHIFT 0x10
++#define GC_CAC_OVRD_PG__OVRRD_SELECT_MASK 0x0000FFFFL
++#define GC_CAC_OVRD_PG__OVRRD_VALUE_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_UTCL2_ATCL2_0
++#define GC_CAC_WEIGHT_UTCL2_ATCL2_0__WEIGHT_UTCL2_ATCL2_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_UTCL2_ATCL2_0__WEIGHT_UTCL2_ATCL2_SIG1__SHIFT 0x10
++#define GC_CAC_WEIGHT_UTCL2_ATCL2_0__WEIGHT_UTCL2_ATCL2_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_UTCL2_ATCL2_0__WEIGHT_UTCL2_ATCL2_SIG1_MASK 0xFFFF0000L
++//GC_CAC_ACC_EA0
++#define GC_CAC_ACC_EA0__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_EA0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_EA1
++#define GC_CAC_ACC_EA1__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_EA1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_EA2
++#define GC_CAC_ACC_EA2__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_EA2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_EA3
++#define GC_CAC_ACC_EA3__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_EA3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_UTCL2_ATCL20
++#define GC_CAC_ACC_UTCL2_ATCL20__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_UTCL2_ATCL20__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_OVRD_EA
++#define GC_CAC_OVRD_EA__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_EA__OVRRD_VALUE__SHIFT 0x6
++#define GC_CAC_OVRD_EA__OVRRD_SELECT_MASK 0x0000003FL
++#define GC_CAC_OVRD_EA__OVRRD_VALUE_MASK 0x00000FC0L
++//GC_CAC_OVRD_UTCL2_ATCL2
++#define GC_CAC_OVRD_UTCL2_ATCL2__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_UTCL2_ATCL2__OVRRD_VALUE__SHIFT 0x5
++#define GC_CAC_OVRD_UTCL2_ATCL2__OVRRD_SELECT_MASK 0x0000001FL
++#define GC_CAC_OVRD_UTCL2_ATCL2__OVRRD_VALUE_MASK 0x000003E0L
++//GC_CAC_WEIGHT_EA_0
++#define GC_CAC_WEIGHT_EA_0__WEIGHT_EA_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_EA_0__WEIGHT_EA_SIG1__SHIFT 0x10
++#define GC_CAC_WEIGHT_EA_0__WEIGHT_EA_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_EA_0__WEIGHT_EA_SIG1_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_EA_1
++#define GC_CAC_WEIGHT_EA_1__WEIGHT_EA_SIG2__SHIFT 0x0
++#define GC_CAC_WEIGHT_EA_1__WEIGHT_EA_SIG3__SHIFT 0x10
++#define GC_CAC_WEIGHT_EA_1__WEIGHT_EA_SIG2_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_EA_1__WEIGHT_EA_SIG3_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_RMI_0
++#define GC_CAC_WEIGHT_RMI_0__WEIGHT_RMI_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_RMI_0__UNUSED__SHIFT 0x10
++#define GC_CAC_WEIGHT_RMI_0__WEIGHT_RMI_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_RMI_0__UNUSED_MASK 0xFFFF0000L
++//GC_CAC_ACC_RMI0
++#define GC_CAC_ACC_RMI0__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_RMI0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_OVRD_RMI
++#define GC_CAC_OVRD_RMI__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_RMI__OVRRD_VALUE__SHIFT 0x1
++#define GC_CAC_OVRD_RMI__OVRRD_SELECT_MASK 0x00000001L
++#define GC_CAC_OVRD_RMI__OVRRD_VALUE_MASK 0x00000002L
++//GC_CAC_WEIGHT_UTCL2_ATCL2_1
++#define GC_CAC_WEIGHT_UTCL2_ATCL2_1__WEIGHT_UTCL2_ATCL2_SIG2__SHIFT 0x0
++#define GC_CAC_WEIGHT_UTCL2_ATCL2_1__WEIGHT_UTCL2_ATCL2_SIG3__SHIFT 0x10
++#define GC_CAC_WEIGHT_UTCL2_ATCL2_1__WEIGHT_UTCL2_ATCL2_SIG2_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_UTCL2_ATCL2_1__WEIGHT_UTCL2_ATCL2_SIG3_MASK 0xFFFF0000L
++//GC_CAC_ACC_UTCL2_ATCL21
++#define GC_CAC_ACC_UTCL2_ATCL21__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_UTCL2_ATCL21__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_UTCL2_ATCL22
++#define GC_CAC_ACC_UTCL2_ATCL22__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_UTCL2_ATCL22__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_UTCL2_ATCL23
++#define GC_CAC_ACC_UTCL2_ATCL23__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_UTCL2_ATCL23__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_EA4
++#define GC_CAC_ACC_EA4__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_EA4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_EA5
++#define GC_CAC_ACC_EA5__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_EA5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_WEIGHT_EA_2
++#define GC_CAC_WEIGHT_EA_2__WEIGHT_EA_SIG4__SHIFT 0x0
++#define GC_CAC_WEIGHT_EA_2__WEIGHT_EA_SIG5__SHIFT 0x10
++#define GC_CAC_WEIGHT_EA_2__WEIGHT_EA_SIG4_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_EA_2__WEIGHT_EA_SIG5_MASK 0xFFFF0000L
++//GC_CAC_ACC_SQ0_LOWER
++#define GC_CAC_ACC_SQ0_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_SQ0_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_SQ0_UPPER
++#define GC_CAC_ACC_SQ0_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
++#define GC_CAC_ACC_SQ0_UPPER__UNUSED_0__SHIFT 0x8
++#define GC_CAC_ACC_SQ0_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
++#define GC_CAC_ACC_SQ0_UPPER__UNUSED_0_MASK 0xFFFFFF00L
++//GC_CAC_ACC_SQ1_LOWER
++#define GC_CAC_ACC_SQ1_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_SQ1_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_SQ1_UPPER
++#define GC_CAC_ACC_SQ1_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
++#define GC_CAC_ACC_SQ1_UPPER__UNUSED_0__SHIFT 0x8
++#define GC_CAC_ACC_SQ1_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
++#define GC_CAC_ACC_SQ1_UPPER__UNUSED_0_MASK 0xFFFFFF00L
++//GC_CAC_ACC_SQ2_LOWER
++#define GC_CAC_ACC_SQ2_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_SQ2_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_SQ2_UPPER
++#define GC_CAC_ACC_SQ2_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
++#define GC_CAC_ACC_SQ2_UPPER__UNUSED_0__SHIFT 0x8
++#define GC_CAC_ACC_SQ2_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
++#define GC_CAC_ACC_SQ2_UPPER__UNUSED_0_MASK 0xFFFFFF00L
++//GC_CAC_ACC_SQ3_LOWER
++#define GC_CAC_ACC_SQ3_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_SQ3_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_SQ3_UPPER
++#define GC_CAC_ACC_SQ3_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
++#define GC_CAC_ACC_SQ3_UPPER__UNUSED_0__SHIFT 0x8
++#define GC_CAC_ACC_SQ3_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
++#define GC_CAC_ACC_SQ3_UPPER__UNUSED_0_MASK 0xFFFFFF00L
++//GC_CAC_ACC_SQ4_LOWER
++#define GC_CAC_ACC_SQ4_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_SQ4_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_SQ4_UPPER
++#define GC_CAC_ACC_SQ4_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
++#define GC_CAC_ACC_SQ4_UPPER__UNUSED_0__SHIFT 0x8
++#define GC_CAC_ACC_SQ4_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
++#define GC_CAC_ACC_SQ4_UPPER__UNUSED_0_MASK 0xFFFFFF00L
++//GC_CAC_ACC_SQ5_LOWER
++#define GC_CAC_ACC_SQ5_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_SQ5_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_SQ5_UPPER
++#define GC_CAC_ACC_SQ5_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
++#define GC_CAC_ACC_SQ5_UPPER__UNUSED_0__SHIFT 0x8
++#define GC_CAC_ACC_SQ5_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
++#define GC_CAC_ACC_SQ5_UPPER__UNUSED_0_MASK 0xFFFFFF00L
++//GC_CAC_ACC_SQ6_LOWER
++#define GC_CAC_ACC_SQ6_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_SQ6_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_SQ6_UPPER
++#define GC_CAC_ACC_SQ6_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
++#define GC_CAC_ACC_SQ6_UPPER__UNUSED_0__SHIFT 0x8
++#define GC_CAC_ACC_SQ6_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
++#define GC_CAC_ACC_SQ6_UPPER__UNUSED_0_MASK 0xFFFFFF00L
++//GC_CAC_ACC_SQ7_LOWER
++#define GC_CAC_ACC_SQ7_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_SQ7_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_SQ7_UPPER
++#define GC_CAC_ACC_SQ7_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
++#define GC_CAC_ACC_SQ7_UPPER__UNUSED_0__SHIFT 0x8
++#define GC_CAC_ACC_SQ7_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
++#define GC_CAC_ACC_SQ7_UPPER__UNUSED_0_MASK 0xFFFFFF00L
++//GC_CAC_ACC_SQ8_LOWER
++#define GC_CAC_ACC_SQ8_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_SQ8_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_SQ8_UPPER
++#define GC_CAC_ACC_SQ8_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
++#define GC_CAC_ACC_SQ8_UPPER__UNUSED_0__SHIFT 0x8
++#define GC_CAC_ACC_SQ8_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
++#define GC_CAC_ACC_SQ8_UPPER__UNUSED_0_MASK 0xFFFFFF00L
++//GC_CAC_ACC_SX0
++#define GC_CAC_ACC_SX0__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_SX0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_SXRB0
++#define GC_CAC_ACC_SXRB0__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_SXRB0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_SXRB1
++#define GC_CAC_ACC_SXRB1__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_SXRB1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_TA0
++#define GC_CAC_ACC_TA0__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_TA0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_TCC0
++#define GC_CAC_ACC_TCC0__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_TCC0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_TCC1
++#define GC_CAC_ACC_TCC1__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_TCC1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_TCC2
++#define GC_CAC_ACC_TCC2__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_TCC2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_TCC3
++#define GC_CAC_ACC_TCC3__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_TCC3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_TCC4
++#define GC_CAC_ACC_TCC4__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_TCC4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_TCP0
++#define GC_CAC_ACC_TCP0__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_TCP0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_TCP1
++#define GC_CAC_ACC_TCP1__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_TCP1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_TCP2
++#define GC_CAC_ACC_TCP2__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_TCP2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_TCP3
++#define GC_CAC_ACC_TCP3__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_TCP3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_TCP4
++#define GC_CAC_ACC_TCP4__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_TCP4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_TD0
++#define GC_CAC_ACC_TD0__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_TD0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_TD1
++#define GC_CAC_ACC_TD1__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_TD1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_TD2
++#define GC_CAC_ACC_TD2__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_TD2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_TD3
++#define GC_CAC_ACC_TD3__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_TD3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_TD4
++#define GC_CAC_ACC_TD4__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_TD4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_TD5
++#define GC_CAC_ACC_TD5__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_TD5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_VGT0
++#define GC_CAC_ACC_VGT0__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_VGT0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_VGT1
++#define GC_CAC_ACC_VGT1__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_VGT1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_VGT2
++#define GC_CAC_ACC_VGT2__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_VGT2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_WD0
++#define GC_CAC_ACC_WD0__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_WD0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_CU0
++#define GC_CAC_ACC_CU0__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_CU0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_CU1
++#define GC_CAC_ACC_CU1__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_CU1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_CU2
++#define GC_CAC_ACC_CU2__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_CU2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_CU3
++#define GC_CAC_ACC_CU3__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_CU3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_CU4
++#define GC_CAC_ACC_CU4__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_CU4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_CU5
++#define GC_CAC_ACC_CU5__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_CU5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_CU6
++#define GC_CAC_ACC_CU6__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_CU6__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_CU7
++#define GC_CAC_ACC_CU7__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_CU7__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_CU8
++#define GC_CAC_ACC_CU8__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_CU8__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_CU9
++#define GC_CAC_ACC_CU9__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_CU9__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_CU10
++#define GC_CAC_ACC_CU10__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_CU10__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_OVRD_BCI
++#define GC_CAC_OVRD_BCI__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_BCI__OVRRD_VALUE__SHIFT 0x2
++#define GC_CAC_OVRD_BCI__OVRRD_SELECT_MASK 0x00000003L
++#define GC_CAC_OVRD_BCI__OVRRD_VALUE_MASK 0x0000000CL
++//GC_CAC_OVRD_CB
++#define GC_CAC_OVRD_CB__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_CB__OVRRD_VALUE__SHIFT 0x4
++#define GC_CAC_OVRD_CB__OVRRD_SELECT_MASK 0x0000000FL
++#define GC_CAC_OVRD_CB__OVRRD_VALUE_MASK 0x000000F0L
++//GC_CAC_OVRD_CP
++#define GC_CAC_OVRD_CP__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_CP__OVRRD_VALUE__SHIFT 0x3
++#define GC_CAC_OVRD_CP__OVRRD_SELECT_MASK 0x00000007L
++#define GC_CAC_OVRD_CP__OVRRD_VALUE_MASK 0x00000038L
++//GC_CAC_OVRD_DB
++#define GC_CAC_OVRD_DB__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_DB__OVRRD_VALUE__SHIFT 0x4
++#define GC_CAC_OVRD_DB__OVRRD_SELECT_MASK 0x0000000FL
++#define GC_CAC_OVRD_DB__OVRRD_VALUE_MASK 0x000000F0L
++//GC_CAC_OVRD_GDS
++#define GC_CAC_OVRD_GDS__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_GDS__OVRRD_VALUE__SHIFT 0x4
++#define GC_CAC_OVRD_GDS__OVRRD_SELECT_MASK 0x0000000FL
++#define GC_CAC_OVRD_GDS__OVRRD_VALUE_MASK 0x000000F0L
++//GC_CAC_OVRD_IA
++#define GC_CAC_OVRD_IA__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_IA__OVRRD_VALUE__SHIFT 0x1
++#define GC_CAC_OVRD_IA__OVRRD_SELECT_MASK 0x00000001L
++#define GC_CAC_OVRD_IA__OVRRD_VALUE_MASK 0x00000002L
++//GC_CAC_OVRD_LDS
++#define GC_CAC_OVRD_LDS__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_LDS__OVRRD_VALUE__SHIFT 0x4
++#define GC_CAC_OVRD_LDS__OVRRD_SELECT_MASK 0x0000000FL
++#define GC_CAC_OVRD_LDS__OVRRD_VALUE_MASK 0x000000F0L
++//GC_CAC_OVRD_PA
++#define GC_CAC_OVRD_PA__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_PA__OVRRD_VALUE__SHIFT 0x2
++#define GC_CAC_OVRD_PA__OVRRD_SELECT_MASK 0x00000003L
++#define GC_CAC_OVRD_PA__OVRRD_VALUE_MASK 0x0000000CL
++//GC_CAC_OVRD_PC
++#define GC_CAC_OVRD_PC__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_PC__OVRRD_VALUE__SHIFT 0x1
++#define GC_CAC_OVRD_PC__OVRRD_SELECT_MASK 0x00000001L
++#define GC_CAC_OVRD_PC__OVRRD_VALUE_MASK 0x00000002L
++//GC_CAC_OVRD_SC
++#define GC_CAC_OVRD_SC__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_SC__OVRRD_VALUE__SHIFT 0x1
++#define GC_CAC_OVRD_SC__OVRRD_SELECT_MASK 0x00000001L
++#define GC_CAC_OVRD_SC__OVRRD_VALUE_MASK 0x00000002L
++//GC_CAC_OVRD_SPI
++#define GC_CAC_OVRD_SPI__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_SPI__OVRRD_VALUE__SHIFT 0x6
++#define GC_CAC_OVRD_SPI__OVRRD_SELECT_MASK 0x0000003FL
++#define GC_CAC_OVRD_SPI__OVRRD_VALUE_MASK 0x00000FC0L
++//GC_CAC_OVRD_CU
++#define GC_CAC_OVRD_CU__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_CU__OVRRD_VALUE__SHIFT 0x1
++#define GC_CAC_OVRD_CU__OVRRD_SELECT_MASK 0x00000001L
++#define GC_CAC_OVRD_CU__OVRRD_VALUE_MASK 0x00000002L
++//GC_CAC_OVRD_SQ
++#define GC_CAC_OVRD_SQ__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_SQ__OVRRD_VALUE__SHIFT 0x9
++#define GC_CAC_OVRD_SQ__OVRRD_SELECT_MASK 0x000001FFL
++#define GC_CAC_OVRD_SQ__OVRRD_VALUE_MASK 0x0003FE00L
++//GC_CAC_OVRD_SX
++#define GC_CAC_OVRD_SX__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_SX__OVRRD_VALUE__SHIFT 0x1
++#define GC_CAC_OVRD_SX__OVRRD_SELECT_MASK 0x00000001L
++#define GC_CAC_OVRD_SX__OVRRD_VALUE_MASK 0x00000002L
++//GC_CAC_OVRD_SXRB
++#define GC_CAC_OVRD_SXRB__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_SXRB__OVRRD_VALUE__SHIFT 0x1
++#define GC_CAC_OVRD_SXRB__OVRRD_SELECT_MASK 0x00000001L
++#define GC_CAC_OVRD_SXRB__OVRRD_VALUE_MASK 0x00000002L
++//GC_CAC_OVRD_TA
++#define GC_CAC_OVRD_TA__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_TA__OVRRD_VALUE__SHIFT 0x1
++#define GC_CAC_OVRD_TA__OVRRD_SELECT_MASK 0x00000001L
++#define GC_CAC_OVRD_TA__OVRRD_VALUE_MASK 0x00000002L
++//GC_CAC_OVRD_TCC
++#define GC_CAC_OVRD_TCC__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_TCC__OVRRD_VALUE__SHIFT 0x5
++#define GC_CAC_OVRD_TCC__OVRRD_SELECT_MASK 0x0000001FL
++#define GC_CAC_OVRD_TCC__OVRRD_VALUE_MASK 0x000003E0L
++//GC_CAC_OVRD_TCP
++#define GC_CAC_OVRD_TCP__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_TCP__OVRRD_VALUE__SHIFT 0x5
++#define GC_CAC_OVRD_TCP__OVRRD_SELECT_MASK 0x0000001FL
++#define GC_CAC_OVRD_TCP__OVRRD_VALUE_MASK 0x000003E0L
++//GC_CAC_OVRD_TD
++#define GC_CAC_OVRD_TD__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_TD__OVRRD_VALUE__SHIFT 0x6
++#define GC_CAC_OVRD_TD__OVRRD_SELECT_MASK 0x0000003FL
++#define GC_CAC_OVRD_TD__OVRRD_VALUE_MASK 0x00000FC0L
++//GC_CAC_OVRD_VGT
++#define GC_CAC_OVRD_VGT__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_VGT__OVRRD_VALUE__SHIFT 0x3
++#define GC_CAC_OVRD_VGT__OVRRD_SELECT_MASK 0x00000007L
++#define GC_CAC_OVRD_VGT__OVRRD_VALUE_MASK 0x00000038L
++//GC_CAC_OVRD_WD
++#define GC_CAC_OVRD_WD__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_WD__OVRRD_VALUE__SHIFT 0x1
++#define GC_CAC_OVRD_WD__OVRRD_SELECT_MASK 0x00000001L
++#define GC_CAC_OVRD_WD__OVRRD_VALUE_MASK 0x00000002L
++//GC_CAC_ACC_BCI1
++#define GC_CAC_ACC_BCI1__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_BCI1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_WEIGHT_UTCL2_ATCL2_2
++#define GC_CAC_WEIGHT_UTCL2_ATCL2_2__WEIGHT_UTCL2_ATCL2_SIG4__SHIFT 0x0
++#define GC_CAC_WEIGHT_UTCL2_ATCL2_2__WEIGHT_UTCL2_ATCL2_SIG5__SHIFT 0x10
++#define GC_CAC_WEIGHT_UTCL2_ATCL2_2__WEIGHT_UTCL2_ATCL2_SIG4_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_UTCL2_ATCL2_2__WEIGHT_UTCL2_ATCL2_SIG5_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_UTCL2_ROUTER_0
++#define GC_CAC_WEIGHT_UTCL2_ROUTER_0__WEIGHT_UTCL2_ROUTER_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_UTCL2_ROUTER_0__WEIGHT_UTCL2_ROUTER_SIG1__SHIFT 0x10
++#define GC_CAC_WEIGHT_UTCL2_ROUTER_0__WEIGHT_UTCL2_ROUTER_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_UTCL2_ROUTER_0__WEIGHT_UTCL2_ROUTER_SIG1_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_UTCL2_ROUTER_1
++#define GC_CAC_WEIGHT_UTCL2_ROUTER_1__WEIGHT_UTCL2_ROUTER_SIG2__SHIFT 0x0
++#define GC_CAC_WEIGHT_UTCL2_ROUTER_1__WEIGHT_UTCL2_ROUTER_SIG3__SHIFT 0x10
++#define GC_CAC_WEIGHT_UTCL2_ROUTER_1__WEIGHT_UTCL2_ROUTER_SIG2_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_UTCL2_ROUTER_1__WEIGHT_UTCL2_ROUTER_SIG3_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_UTCL2_ROUTER_2
++#define GC_CAC_WEIGHT_UTCL2_ROUTER_2__WEIGHT_UTCL2_ROUTER_SIG4__SHIFT 0x0
++#define GC_CAC_WEIGHT_UTCL2_ROUTER_2__WEIGHT_UTCL2_ROUTER_SIG5__SHIFT 0x10
++#define GC_CAC_WEIGHT_UTCL2_ROUTER_2__WEIGHT_UTCL2_ROUTER_SIG4_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_UTCL2_ROUTER_2__WEIGHT_UTCL2_ROUTER_SIG5_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_UTCL2_ROUTER_3
++#define GC_CAC_WEIGHT_UTCL2_ROUTER_3__WEIGHT_UTCL2_ROUTER_SIG6__SHIFT 0x0
++#define GC_CAC_WEIGHT_UTCL2_ROUTER_3__WEIGHT_UTCL2_ROUTER_SIG7__SHIFT 0x10
++#define GC_CAC_WEIGHT_UTCL2_ROUTER_3__WEIGHT_UTCL2_ROUTER_SIG6_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_UTCL2_ROUTER_3__WEIGHT_UTCL2_ROUTER_SIG7_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_UTCL2_ROUTER_4
++#define GC_CAC_WEIGHT_UTCL2_ROUTER_4__WEIGHT_UTCL2_ROUTER_SIG8__SHIFT 0x0
++#define GC_CAC_WEIGHT_UTCL2_ROUTER_4__WEIGHT_UTCL2_ROUTER_SIG9__SHIFT 0x10
++#define GC_CAC_WEIGHT_UTCL2_ROUTER_4__WEIGHT_UTCL2_ROUTER_SIG8_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_UTCL2_ROUTER_4__WEIGHT_UTCL2_ROUTER_SIG9_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_UTCL2_VML2_0
++#define GC_CAC_WEIGHT_UTCL2_VML2_0__WEIGHT_UTCL2_VML2_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_UTCL2_VML2_0__WEIGHT_UTCL2_VML2_SIG1__SHIFT 0x10
++#define GC_CAC_WEIGHT_UTCL2_VML2_0__WEIGHT_UTCL2_VML2_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_UTCL2_VML2_0__WEIGHT_UTCL2_VML2_SIG1_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_UTCL2_VML2_1
++#define GC_CAC_WEIGHT_UTCL2_VML2_1__WEIGHT_UTCL2_VML2_SIG2__SHIFT 0x0
++#define GC_CAC_WEIGHT_UTCL2_VML2_1__WEIGHT_UTCL2_VML2_SIG3__SHIFT 0x10
++#define GC_CAC_WEIGHT_UTCL2_VML2_1__WEIGHT_UTCL2_VML2_SIG2_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_UTCL2_VML2_1__WEIGHT_UTCL2_VML2_SIG3_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_UTCL2_VML2_2
++#define GC_CAC_WEIGHT_UTCL2_VML2_2__WEIGHT_UTCL2_VML2_SIG4__SHIFT 0x0
++#define GC_CAC_WEIGHT_UTCL2_VML2_2__WEIGHT_UTCL2_VML2_SIG5__SHIFT 0x10
++#define GC_CAC_WEIGHT_UTCL2_VML2_2__WEIGHT_UTCL2_VML2_SIG4_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_UTCL2_VML2_2__WEIGHT_UTCL2_VML2_SIG5_MASK 0xFFFF0000L
++//GC_CAC_ACC_UTCL2_ATCL24
++#define GC_CAC_ACC_UTCL2_ATCL24__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_UTCL2_ATCL24__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_UTCL2_ROUTER0
++#define GC_CAC_ACC_UTCL2_ROUTER0__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_UTCL2_ROUTER0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_UTCL2_ROUTER1
++#define GC_CAC_ACC_UTCL2_ROUTER1__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_UTCL2_ROUTER1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_UTCL2_ROUTER2
++#define GC_CAC_ACC_UTCL2_ROUTER2__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_UTCL2_ROUTER2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_UTCL2_ROUTER3
++#define GC_CAC_ACC_UTCL2_ROUTER3__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_UTCL2_ROUTER3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_UTCL2_ROUTER4
++#define GC_CAC_ACC_UTCL2_ROUTER4__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_UTCL2_ROUTER4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_UTCL2_ROUTER5
++#define GC_CAC_ACC_UTCL2_ROUTER5__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_UTCL2_ROUTER5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_UTCL2_ROUTER6
++#define GC_CAC_ACC_UTCL2_ROUTER6__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_UTCL2_ROUTER6__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_UTCL2_ROUTER7
++#define GC_CAC_ACC_UTCL2_ROUTER7__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_UTCL2_ROUTER7__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_UTCL2_ROUTER8
++#define GC_CAC_ACC_UTCL2_ROUTER8__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_UTCL2_ROUTER8__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_UTCL2_ROUTER9
++#define GC_CAC_ACC_UTCL2_ROUTER9__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_UTCL2_ROUTER9__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_UTCL2_VML20
++#define GC_CAC_ACC_UTCL2_VML20__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_UTCL2_VML20__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_UTCL2_VML21
++#define GC_CAC_ACC_UTCL2_VML21__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_UTCL2_VML21__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_UTCL2_VML22
++#define GC_CAC_ACC_UTCL2_VML22__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_UTCL2_VML22__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_UTCL2_VML23
++#define GC_CAC_ACC_UTCL2_VML23__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_UTCL2_VML23__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_UTCL2_VML24
++#define GC_CAC_ACC_UTCL2_VML24__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_UTCL2_VML24__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_OVRD_UTCL2_ROUTER
++#define GC_CAC_OVRD_UTCL2_ROUTER__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_UTCL2_ROUTER__OVRRD_VALUE__SHIFT 0xa
++#define GC_CAC_OVRD_UTCL2_ROUTER__OVRRD_SELECT_MASK 0x000003FFL
++#define GC_CAC_OVRD_UTCL2_ROUTER__OVRRD_VALUE_MASK 0x000FFC00L
++//GC_CAC_OVRD_UTCL2_VML2
++#define GC_CAC_OVRD_UTCL2_VML2__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_UTCL2_VML2__OVRRD_VALUE__SHIFT 0x5
++#define GC_CAC_OVRD_UTCL2_VML2__OVRRD_SELECT_MASK 0x0000001FL
++#define GC_CAC_OVRD_UTCL2_VML2__OVRRD_VALUE_MASK 0x000003E0L
++//GC_CAC_WEIGHT_UTCL2_WALKER_0
++#define GC_CAC_WEIGHT_UTCL2_WALKER_0__WEIGHT_UTCL2_WALKER_SIG0__SHIFT 0x0
++#define GC_CAC_WEIGHT_UTCL2_WALKER_0__WEIGHT_UTCL2_WALKER_SIG1__SHIFT 0x10
++#define GC_CAC_WEIGHT_UTCL2_WALKER_0__WEIGHT_UTCL2_WALKER_SIG0_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_UTCL2_WALKER_0__WEIGHT_UTCL2_WALKER_SIG1_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_UTCL2_WALKER_1
++#define GC_CAC_WEIGHT_UTCL2_WALKER_1__WEIGHT_UTCL2_WALKER_SIG2__SHIFT 0x0
++#define GC_CAC_WEIGHT_UTCL2_WALKER_1__WEIGHT_UTCL2_WALKER_SIG3__SHIFT 0x10
++#define GC_CAC_WEIGHT_UTCL2_WALKER_1__WEIGHT_UTCL2_WALKER_SIG2_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_UTCL2_WALKER_1__WEIGHT_UTCL2_WALKER_SIG3_MASK 0xFFFF0000L
++//GC_CAC_WEIGHT_UTCL2_WALKER_2
++#define GC_CAC_WEIGHT_UTCL2_WALKER_2__WEIGHT_UTCL2_WALKER_SIG4__SHIFT 0x0
++#define GC_CAC_WEIGHT_UTCL2_WALKER_2__WEIGHT_UTCL2_WALKER_SIG5__SHIFT 0x10
++#define GC_CAC_WEIGHT_UTCL2_WALKER_2__WEIGHT_UTCL2_WALKER_SIG4_MASK 0x0000FFFFL
++#define GC_CAC_WEIGHT_UTCL2_WALKER_2__WEIGHT_UTCL2_WALKER_SIG5_MASK 0xFFFF0000L
++//GC_CAC_ACC_UTCL2_WALKER0
++#define GC_CAC_ACC_UTCL2_WALKER0__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_UTCL2_WALKER0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_UTCL2_WALKER1
++#define GC_CAC_ACC_UTCL2_WALKER1__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_UTCL2_WALKER1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_UTCL2_WALKER2
++#define GC_CAC_ACC_UTCL2_WALKER2__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_UTCL2_WALKER2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_UTCL2_WALKER3
++#define GC_CAC_ACC_UTCL2_WALKER3__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_UTCL2_WALKER3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_ACC_UTCL2_WALKER4
++#define GC_CAC_ACC_UTCL2_WALKER4__ACCUMULATOR_31_0__SHIFT 0x0
++#define GC_CAC_ACC_UTCL2_WALKER4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
++//GC_CAC_OVRD_UTCL2_WALKER
++#define GC_CAC_OVRD_UTCL2_WALKER__OVRRD_SELECT__SHIFT 0x0
++#define GC_CAC_OVRD_UTCL2_WALKER__OVRRD_VALUE__SHIFT 0x5
++#define GC_CAC_OVRD_UTCL2_WALKER__OVRRD_SELECT_MASK 0x0000001FL
++#define GC_CAC_OVRD_UTCL2_WALKER__OVRRD_VALUE_MASK 0x000003E0L
++
++
++// addressBlock: secacind
++//SE_CAC_CNTL
++#define SE_CAC_CNTL__CAC_ENABLE__SHIFT 0x0
++#define SE_CAC_CNTL__CAC_THRESHOLD__SHIFT 0x1
++#define SE_CAC_CNTL__CAC_BLOCK_ID__SHIFT 0x11
++#define SE_CAC_CNTL__CAC_SIGNAL_ID__SHIFT 0x17
++#define SE_CAC_CNTL__UNUSED_0__SHIFT 0x1f
++#define SE_CAC_CNTL__CAC_ENABLE_MASK 0x00000001L
++#define SE_CAC_CNTL__CAC_THRESHOLD_MASK 0x0001FFFEL
++#define SE_CAC_CNTL__CAC_BLOCK_ID_MASK 0x007E0000L
++#define SE_CAC_CNTL__CAC_SIGNAL_ID_MASK 0x7F800000L
++#define SE_CAC_CNTL__UNUSED_0_MASK 0x80000000L
++//SE_CAC_OVR_SEL
++#define SE_CAC_OVR_SEL__CAC_OVR_SEL__SHIFT 0x0
++#define SE_CAC_OVR_SEL__CAC_OVR_SEL_MASK 0xFFFFFFFFL
++//SE_CAC_OVR_VAL
++#define SE_CAC_OVR_VAL__CAC_OVR_VAL__SHIFT 0x0
++#define SE_CAC_OVR_VAL__CAC_OVR_VAL_MASK 0xFFFFFFFFL
++
++
++// addressBlock: sqind
++//SQ_WAVE_MODE
++#define SQ_WAVE_MODE__FP_ROUND__SHIFT 0x0
++#define SQ_WAVE_MODE__FP_DENORM__SHIFT 0x4
++#define SQ_WAVE_MODE__DX10_CLAMP__SHIFT 0x8
++#define SQ_WAVE_MODE__IEEE__SHIFT 0x9
++#define SQ_WAVE_MODE__LOD_CLAMPED__SHIFT 0xa
++#define SQ_WAVE_MODE__EXCP_EN__SHIFT 0xc
++#define SQ_WAVE_MODE__FP16_OVFL__SHIFT 0x17
++#define SQ_WAVE_MODE__POPS_PACKER0__SHIFT 0x18
++#define SQ_WAVE_MODE__POPS_PACKER1__SHIFT 0x19
++#define SQ_WAVE_MODE__DISABLE_PERF__SHIFT 0x1a
++#define SQ_WAVE_MODE__GPR_IDX_EN__SHIFT 0x1b
++#define SQ_WAVE_MODE__VSKIP__SHIFT 0x1c
++#define SQ_WAVE_MODE__CSP__SHIFT 0x1d
++#define SQ_WAVE_MODE__FP_ROUND_MASK 0x0000000FL
++#define SQ_WAVE_MODE__FP_DENORM_MASK 0x000000F0L
++#define SQ_WAVE_MODE__DX10_CLAMP_MASK 0x00000100L
++#define SQ_WAVE_MODE__IEEE_MASK 0x00000200L
++#define SQ_WAVE_MODE__LOD_CLAMPED_MASK 0x00000400L
++#define SQ_WAVE_MODE__EXCP_EN_MASK 0x001FF000L
++#define SQ_WAVE_MODE__FP16_OVFL_MASK 0x00800000L
++#define SQ_WAVE_MODE__POPS_PACKER0_MASK 0x01000000L
++#define SQ_WAVE_MODE__POPS_PACKER1_MASK 0x02000000L
++#define SQ_WAVE_MODE__DISABLE_PERF_MASK 0x04000000L
++#define SQ_WAVE_MODE__GPR_IDX_EN_MASK 0x08000000L
++#define SQ_WAVE_MODE__VSKIP_MASK 0x10000000L
++#define SQ_WAVE_MODE__CSP_MASK 0xE0000000L
++//SQ_WAVE_STATUS
++#define SQ_WAVE_STATUS__SCC__SHIFT 0x0
++#define SQ_WAVE_STATUS__SPI_PRIO__SHIFT 0x1
++#define SQ_WAVE_STATUS__USER_PRIO__SHIFT 0x3
++#define SQ_WAVE_STATUS__PRIV__SHIFT 0x5
++#define SQ_WAVE_STATUS__TRAP_EN__SHIFT 0x6
++#define SQ_WAVE_STATUS__TTRACE_EN__SHIFT 0x7
++#define SQ_WAVE_STATUS__EXPORT_RDY__SHIFT 0x8
++#define SQ_WAVE_STATUS__EXECZ__SHIFT 0x9
++#define SQ_WAVE_STATUS__VCCZ__SHIFT 0xa
++#define SQ_WAVE_STATUS__IN_TG__SHIFT 0xb
++#define SQ_WAVE_STATUS__IN_BARRIER__SHIFT 0xc
++#define SQ_WAVE_STATUS__HALT__SHIFT 0xd
++#define SQ_WAVE_STATUS__TRAP__SHIFT 0xe
++#define SQ_WAVE_STATUS__TTRACE_CU_EN__SHIFT 0xf
++#define SQ_WAVE_STATUS__VALID__SHIFT 0x10
++#define SQ_WAVE_STATUS__ECC_ERR__SHIFT 0x11
++#define SQ_WAVE_STATUS__SKIP_EXPORT__SHIFT 0x12
++#define SQ_WAVE_STATUS__PERF_EN__SHIFT 0x13
++#define SQ_WAVE_STATUS__ALLOW_REPLAY__SHIFT 0x16
++#define SQ_WAVE_STATUS__FATAL_HALT__SHIFT 0x17
++#define SQ_WAVE_STATUS__MUST_EXPORT__SHIFT 0x1b
++#define SQ_WAVE_STATUS__SCC_MASK 0x00000001L
++#define SQ_WAVE_STATUS__SPI_PRIO_MASK 0x00000006L
++#define SQ_WAVE_STATUS__USER_PRIO_MASK 0x00000018L
++#define SQ_WAVE_STATUS__PRIV_MASK 0x00000020L
++#define SQ_WAVE_STATUS__TRAP_EN_MASK 0x00000040L
++#define SQ_WAVE_STATUS__TTRACE_EN_MASK 0x00000080L
++#define SQ_WAVE_STATUS__EXPORT_RDY_MASK 0x00000100L
++#define SQ_WAVE_STATUS__EXECZ_MASK 0x00000200L
++#define SQ_WAVE_STATUS__VCCZ_MASK 0x00000400L
++#define SQ_WAVE_STATUS__IN_TG_MASK 0x00000800L
++#define SQ_WAVE_STATUS__IN_BARRIER_MASK 0x00001000L
++#define SQ_WAVE_STATUS__HALT_MASK 0x00002000L
++#define SQ_WAVE_STATUS__TRAP_MASK 0x00004000L
++#define SQ_WAVE_STATUS__TTRACE_CU_EN_MASK 0x00008000L
++#define SQ_WAVE_STATUS__VALID_MASK 0x00010000L
++#define SQ_WAVE_STATUS__ECC_ERR_MASK 0x00020000L
++#define SQ_WAVE_STATUS__SKIP_EXPORT_MASK 0x00040000L
++#define SQ_WAVE_STATUS__PERF_EN_MASK 0x00080000L
++#define SQ_WAVE_STATUS__ALLOW_REPLAY_MASK 0x00400000L
++#define SQ_WAVE_STATUS__FATAL_HALT_MASK 0x00800000L
++#define SQ_WAVE_STATUS__MUST_EXPORT_MASK 0x08000000L
++//SQ_WAVE_TRAPSTS
++#define SQ_WAVE_TRAPSTS__EXCP__SHIFT 0x0
++#define SQ_WAVE_TRAPSTS__SAVECTX__SHIFT 0xa
++#define SQ_WAVE_TRAPSTS__ILLEGAL_INST__SHIFT 0xb
++#define SQ_WAVE_TRAPSTS__EXCP_HI__SHIFT 0xc
++#define SQ_WAVE_TRAPSTS__EXCP_CYCLE__SHIFT 0x10
++#define SQ_WAVE_TRAPSTS__XNACK_ERROR__SHIFT 0x1c
++#define SQ_WAVE_TRAPSTS__DP_RATE__SHIFT 0x1d
++#define SQ_WAVE_TRAPSTS__EXCP_MASK 0x000001FFL
++#define SQ_WAVE_TRAPSTS__SAVECTX_MASK 0x00000400L
++#define SQ_WAVE_TRAPSTS__ILLEGAL_INST_MASK 0x00000800L
++#define SQ_WAVE_TRAPSTS__EXCP_HI_MASK 0x00007000L
++#define SQ_WAVE_TRAPSTS__EXCP_CYCLE_MASK 0x003F0000L
++#define SQ_WAVE_TRAPSTS__XNACK_ERROR_MASK 0x10000000L
++#define SQ_WAVE_TRAPSTS__DP_RATE_MASK 0xE0000000L
++//SQ_WAVE_HW_ID
++#define SQ_WAVE_HW_ID__WAVE_ID__SHIFT 0x0
++#define SQ_WAVE_HW_ID__SIMD_ID__SHIFT 0x4
++#define SQ_WAVE_HW_ID__PIPE_ID__SHIFT 0x6
++#define SQ_WAVE_HW_ID__CU_ID__SHIFT 0x8
++#define SQ_WAVE_HW_ID__SH_ID__SHIFT 0xc
++#define SQ_WAVE_HW_ID__SE_ID__SHIFT 0xd
++#define SQ_WAVE_HW_ID__TG_ID__SHIFT 0x10
++#define SQ_WAVE_HW_ID__VM_ID__SHIFT 0x14
++#define SQ_WAVE_HW_ID__QUEUE_ID__SHIFT 0x18
++#define SQ_WAVE_HW_ID__STATE_ID__SHIFT 0x1b
++#define SQ_WAVE_HW_ID__ME_ID__SHIFT 0x1e
++#define SQ_WAVE_HW_ID__WAVE_ID_MASK 0x0000000FL
++#define SQ_WAVE_HW_ID__SIMD_ID_MASK 0x00000030L
++#define SQ_WAVE_HW_ID__PIPE_ID_MASK 0x000000C0L
++#define SQ_WAVE_HW_ID__CU_ID_MASK 0x00000F00L
++#define SQ_WAVE_HW_ID__SH_ID_MASK 0x00001000L
++#define SQ_WAVE_HW_ID__SE_ID_MASK 0x00006000L
++#define SQ_WAVE_HW_ID__TG_ID_MASK 0x000F0000L
++#define SQ_WAVE_HW_ID__VM_ID_MASK 0x00F00000L
++#define SQ_WAVE_HW_ID__QUEUE_ID_MASK 0x07000000L
++#define SQ_WAVE_HW_ID__STATE_ID_MASK 0x38000000L
++#define SQ_WAVE_HW_ID__ME_ID_MASK 0xC0000000L
++//SQ_WAVE_GPR_ALLOC
++#define SQ_WAVE_GPR_ALLOC__VGPR_BASE__SHIFT 0x0
++#define SQ_WAVE_GPR_ALLOC__VGPR_SIZE__SHIFT 0x8
++#define SQ_WAVE_GPR_ALLOC__SGPR_BASE__SHIFT 0x10
++#define SQ_WAVE_GPR_ALLOC__SGPR_SIZE__SHIFT 0x18
++#define SQ_WAVE_GPR_ALLOC__VGPR_BASE_MASK 0x0000003FL
++#define SQ_WAVE_GPR_ALLOC__VGPR_SIZE_MASK 0x00003F00L
++#define SQ_WAVE_GPR_ALLOC__SGPR_BASE_MASK 0x003F0000L
++#define SQ_WAVE_GPR_ALLOC__SGPR_SIZE_MASK 0x0F000000L
++//SQ_WAVE_LDS_ALLOC
++#define SQ_WAVE_LDS_ALLOC__LDS_BASE__SHIFT 0x0
++#define SQ_WAVE_LDS_ALLOC__LDS_SIZE__SHIFT 0xc
++#define SQ_WAVE_LDS_ALLOC__LDS_BASE_MASK 0x000000FFL
++#define SQ_WAVE_LDS_ALLOC__LDS_SIZE_MASK 0x001FF000L
++//SQ_WAVE_IB_STS
++#define SQ_WAVE_IB_STS__VM_CNT__SHIFT 0x0
++#define SQ_WAVE_IB_STS__EXP_CNT__SHIFT 0x4
++#define SQ_WAVE_IB_STS__LGKM_CNT__SHIFT 0x8
++#define SQ_WAVE_IB_STS__VALU_CNT__SHIFT 0xc
++#define SQ_WAVE_IB_STS__FIRST_REPLAY__SHIFT 0xf
++#define SQ_WAVE_IB_STS__RCNT__SHIFT 0x10
++#define SQ_WAVE_IB_STS__VM_CNT_HI__SHIFT 0x16
++#define SQ_WAVE_IB_STS__VM_CNT_MASK 0x0000000FL
++#define SQ_WAVE_IB_STS__EXP_CNT_MASK 0x00000070L
++#define SQ_WAVE_IB_STS__LGKM_CNT_MASK 0x00000F00L
++#define SQ_WAVE_IB_STS__VALU_CNT_MASK 0x00007000L
++#define SQ_WAVE_IB_STS__FIRST_REPLAY_MASK 0x00008000L
++#define SQ_WAVE_IB_STS__RCNT_MASK 0x001F0000L
++#define SQ_WAVE_IB_STS__VM_CNT_HI_MASK 0x00C00000L
++//SQ_WAVE_PC_LO
++#define SQ_WAVE_PC_LO__PC_LO__SHIFT 0x0
++#define SQ_WAVE_PC_LO__PC_LO_MASK 0xFFFFFFFFL
++//SQ_WAVE_PC_HI
++#define SQ_WAVE_PC_HI__PC_HI__SHIFT 0x0
++#define SQ_WAVE_PC_HI__PC_HI_MASK 0x0000FFFFL
++//SQ_WAVE_INST_DW0
++#define SQ_WAVE_INST_DW0__INST_DW0__SHIFT 0x0
++#define SQ_WAVE_INST_DW0__INST_DW0_MASK 0xFFFFFFFFL
++//SQ_WAVE_INST_DW1
++#define SQ_WAVE_INST_DW1__INST_DW1__SHIFT 0x0
++#define SQ_WAVE_INST_DW1__INST_DW1_MASK 0xFFFFFFFFL
++//SQ_WAVE_IB_DBG0
++#define SQ_WAVE_IB_DBG0__IBUF_ST__SHIFT 0x0
++#define SQ_WAVE_IB_DBG0__PC_INVALID__SHIFT 0x3
++#define SQ_WAVE_IB_DBG0__NEED_NEXT_DW__SHIFT 0x4
++#define SQ_WAVE_IB_DBG0__NO_PREFETCH_CNT__SHIFT 0x5
++#define SQ_WAVE_IB_DBG0__IBUF_RPTR__SHIFT 0x8
++#define SQ_WAVE_IB_DBG0__IBUF_WPTR__SHIFT 0xa
++#define SQ_WAVE_IB_DBG0__INST_STR_ST__SHIFT 0x10
++#define SQ_WAVE_IB_DBG0__ECC_ST__SHIFT 0x18
++#define SQ_WAVE_IB_DBG0__IS_HYB__SHIFT 0x1a
++#define SQ_WAVE_IB_DBG0__HYB_CNT__SHIFT 0x1b
++#define SQ_WAVE_IB_DBG0__KILL__SHIFT 0x1d
++#define SQ_WAVE_IB_DBG0__NEED_KILL_IFETCH__SHIFT 0x1e
++#define SQ_WAVE_IB_DBG0__NO_PREFETCH_CNT_HI__SHIFT 0x1f
++#define SQ_WAVE_IB_DBG0__IBUF_ST_MASK 0x00000007L
++#define SQ_WAVE_IB_DBG0__PC_INVALID_MASK 0x00000008L
++#define SQ_WAVE_IB_DBG0__NEED_NEXT_DW_MASK 0x00000010L
++#define SQ_WAVE_IB_DBG0__NO_PREFETCH_CNT_MASK 0x000000E0L
++#define SQ_WAVE_IB_DBG0__IBUF_RPTR_MASK 0x00000300L
++#define SQ_WAVE_IB_DBG0__IBUF_WPTR_MASK 0x00000C00L
++#define SQ_WAVE_IB_DBG0__INST_STR_ST_MASK 0x000F0000L
++#define SQ_WAVE_IB_DBG0__ECC_ST_MASK 0x03000000L
++#define SQ_WAVE_IB_DBG0__IS_HYB_MASK 0x04000000L
++#define SQ_WAVE_IB_DBG0__HYB_CNT_MASK 0x18000000L
++#define SQ_WAVE_IB_DBG0__KILL_MASK 0x20000000L
++#define SQ_WAVE_IB_DBG0__NEED_KILL_IFETCH_MASK 0x40000000L
++#define SQ_WAVE_IB_DBG0__NO_PREFETCH_CNT_HI_MASK 0x80000000L
++//SQ_WAVE_IB_DBG1
++#define SQ_WAVE_IB_DBG1__IXNACK__SHIFT 0x0
++#define SQ_WAVE_IB_DBG1__XNACK__SHIFT 0x1
++#define SQ_WAVE_IB_DBG1__TA_NEED_RESET__SHIFT 0x2
++#define SQ_WAVE_IB_DBG1__XCNT__SHIFT 0x4
++#define SQ_WAVE_IB_DBG1__QCNT__SHIFT 0xb
++#define SQ_WAVE_IB_DBG1__RCNT__SHIFT 0x12
++#define SQ_WAVE_IB_DBG1__MISC_CNT__SHIFT 0x19
++#define SQ_WAVE_IB_DBG1__IXNACK_MASK 0x00000001L
++#define SQ_WAVE_IB_DBG1__XNACK_MASK 0x00000002L
++#define SQ_WAVE_IB_DBG1__TA_NEED_RESET_MASK 0x00000004L
++#define SQ_WAVE_IB_DBG1__XCNT_MASK 0x000001F0L
++#define SQ_WAVE_IB_DBG1__QCNT_MASK 0x0000F800L
++#define SQ_WAVE_IB_DBG1__RCNT_MASK 0x007C0000L
++#define SQ_WAVE_IB_DBG1__MISC_CNT_MASK 0xFE000000L
++//SQ_WAVE_FLUSH_IB
++#define SQ_WAVE_FLUSH_IB__UNUSED__SHIFT 0x0
++#define SQ_WAVE_FLUSH_IB__UNUSED_MASK 0xFFFFFFFFL
++//SQ_WAVE_TTMP0
++#define SQ_WAVE_TTMP0__DATA__SHIFT 0x0
++#define SQ_WAVE_TTMP0__DATA_MASK 0xFFFFFFFFL
++//SQ_WAVE_TTMP1
++#define SQ_WAVE_TTMP1__DATA__SHIFT 0x0
++#define SQ_WAVE_TTMP1__DATA_MASK 0xFFFFFFFFL
++//SQ_WAVE_TTMP2
++#define SQ_WAVE_TTMP2__DATA__SHIFT 0x0
++#define SQ_WAVE_TTMP2__DATA_MASK 0xFFFFFFFFL
++//SQ_WAVE_TTMP3
++#define SQ_WAVE_TTMP3__DATA__SHIFT 0x0
++#define SQ_WAVE_TTMP3__DATA_MASK 0xFFFFFFFFL
++//SQ_WAVE_TTMP4
++#define SQ_WAVE_TTMP4__DATA__SHIFT 0x0
++#define SQ_WAVE_TTMP4__DATA_MASK 0xFFFFFFFFL
++//SQ_WAVE_TTMP5
++#define SQ_WAVE_TTMP5__DATA__SHIFT 0x0
++#define SQ_WAVE_TTMP5__DATA_MASK 0xFFFFFFFFL
++//SQ_WAVE_TTMP6
++#define SQ_WAVE_TTMP6__DATA__SHIFT 0x0
++#define SQ_WAVE_TTMP6__DATA_MASK 0xFFFFFFFFL
++//SQ_WAVE_TTMP7
++#define SQ_WAVE_TTMP7__DATA__SHIFT 0x0
++#define SQ_WAVE_TTMP7__DATA_MASK 0xFFFFFFFFL
++//SQ_WAVE_TTMP8
++#define SQ_WAVE_TTMP8__DATA__SHIFT 0x0
++#define SQ_WAVE_TTMP8__DATA_MASK 0xFFFFFFFFL
++//SQ_WAVE_TTMP9
++#define SQ_WAVE_TTMP9__DATA__SHIFT 0x0
++#define SQ_WAVE_TTMP9__DATA_MASK 0xFFFFFFFFL
++//SQ_WAVE_TTMP10
++#define SQ_WAVE_TTMP10__DATA__SHIFT 0x0
++#define SQ_WAVE_TTMP10__DATA_MASK 0xFFFFFFFFL
++//SQ_WAVE_TTMP11
++#define SQ_WAVE_TTMP11__DATA__SHIFT 0x0
++#define SQ_WAVE_TTMP11__DATA_MASK 0xFFFFFFFFL
++//SQ_WAVE_TTMP12
++#define SQ_WAVE_TTMP12__DATA__SHIFT 0x0
++#define SQ_WAVE_TTMP12__DATA_MASK 0xFFFFFFFFL
++//SQ_WAVE_TTMP13
++#define SQ_WAVE_TTMP13__DATA__SHIFT 0x0
++#define SQ_WAVE_TTMP13__DATA_MASK 0xFFFFFFFFL
++//SQ_WAVE_TTMP14
++#define SQ_WAVE_TTMP14__DATA__SHIFT 0x0
++#define SQ_WAVE_TTMP14__DATA_MASK 0xFFFFFFFFL
++//SQ_WAVE_TTMP15
++#define SQ_WAVE_TTMP15__DATA__SHIFT 0x0
++#define SQ_WAVE_TTMP15__DATA_MASK 0xFFFFFFFFL
++//SQ_WAVE_M0
++#define SQ_WAVE_M0__M0__SHIFT 0x0
++#define SQ_WAVE_M0__M0_MASK 0xFFFFFFFFL
++//SQ_WAVE_EXEC_LO
++#define SQ_WAVE_EXEC_LO__EXEC_LO__SHIFT 0x0
++#define SQ_WAVE_EXEC_LO__EXEC_LO_MASK 0xFFFFFFFFL
++//SQ_WAVE_EXEC_HI
++#define SQ_WAVE_EXEC_HI__EXEC_HI__SHIFT 0x0
++#define SQ_WAVE_EXEC_HI__EXEC_HI_MASK 0xFFFFFFFFL
++//SQ_INTERRUPT_WORD_AUTO_CTXID
++#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE__SHIFT 0x0
++#define SQ_INTERRUPT_WORD_AUTO_CTXID__WLT__SHIFT 0x1
++#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_BUF_FULL__SHIFT 0x2
++#define SQ_INTERRUPT_WORD_AUTO_CTXID__REG_TIMESTAMP__SHIFT 0x3
++#define SQ_INTERRUPT_WORD_AUTO_CTXID__CMD_TIMESTAMP__SHIFT 0x4
++#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_CMD_OVERFLOW__SHIFT 0x5
++#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_REG_OVERFLOW__SHIFT 0x6
++#define SQ_INTERRUPT_WORD_AUTO_CTXID__IMMED_OVERFLOW__SHIFT 0x7
++#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_UTC_ERROR__SHIFT 0x8
++#define SQ_INTERRUPT_WORD_AUTO_CTXID__SE_ID__SHIFT 0x18
++#define SQ_INTERRUPT_WORD_AUTO_CTXID__ENCODING__SHIFT 0x1a
++#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_MASK 0x0000001L
++#define SQ_INTERRUPT_WORD_AUTO_CTXID__WLT_MASK 0x0000002L
++#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_BUF_FULL_MASK 0x0000004L
++#define SQ_INTERRUPT_WORD_AUTO_CTXID__REG_TIMESTAMP_MASK 0x0000008L
++#define SQ_INTERRUPT_WORD_AUTO_CTXID__CMD_TIMESTAMP_MASK 0x0000010L
++#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_CMD_OVERFLOW_MASK 0x0000020L
++#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_REG_OVERFLOW_MASK 0x0000040L
++#define SQ_INTERRUPT_WORD_AUTO_CTXID__IMMED_OVERFLOW_MASK 0x0000080L
++#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_UTC_ERROR_MASK 0x0000100L
++#define SQ_INTERRUPT_WORD_AUTO_CTXID__SE_ID_MASK 0x3000000L
++#define SQ_INTERRUPT_WORD_AUTO_CTXID__ENCODING_MASK 0xC000000L
++//SQ_INTERRUPT_WORD_AUTO_HI
++#define SQ_INTERRUPT_WORD_AUTO_HI__SE_ID__SHIFT 0x8
++#define SQ_INTERRUPT_WORD_AUTO_HI__ENCODING__SHIFT 0xa
++#define SQ_INTERRUPT_WORD_AUTO_HI__SE_ID_MASK 0x300L
++#define SQ_INTERRUPT_WORD_AUTO_HI__ENCODING_MASK 0xC00L
++//SQ_INTERRUPT_WORD_AUTO_LO
++#define SQ_INTERRUPT_WORD_AUTO_LO__THREAD_TRACE__SHIFT 0x0
++#define SQ_INTERRUPT_WORD_AUTO_LO__WLT__SHIFT 0x1
++#define SQ_INTERRUPT_WORD_AUTO_LO__THREAD_TRACE_BUF_FULL__SHIFT 0x2
++#define SQ_INTERRUPT_WORD_AUTO_LO__REG_TIMESTAMP__SHIFT 0x3
++#define SQ_INTERRUPT_WORD_AUTO_LO__CMD_TIMESTAMP__SHIFT 0x4
++#define SQ_INTERRUPT_WORD_AUTO_LO__HOST_CMD_OVERFLOW__SHIFT 0x5
++#define SQ_INTERRUPT_WORD_AUTO_LO__HOST_REG_OVERFLOW__SHIFT 0x6
++#define SQ_INTERRUPT_WORD_AUTO_LO__IMMED_OVERFLOW__SHIFT 0x7
++#define SQ_INTERRUPT_WORD_AUTO_LO__THREAD_TRACE_UTC_ERROR__SHIFT 0x8
++#define SQ_INTERRUPT_WORD_AUTO_LO__THREAD_TRACE_MASK 0x001L
++#define SQ_INTERRUPT_WORD_AUTO_LO__WLT_MASK 0x002L
++#define SQ_INTERRUPT_WORD_AUTO_LO__THREAD_TRACE_BUF_FULL_MASK 0x004L
++#define SQ_INTERRUPT_WORD_AUTO_LO__REG_TIMESTAMP_MASK 0x008L
++#define SQ_INTERRUPT_WORD_AUTO_LO__CMD_TIMESTAMP_MASK 0x010L
++#define SQ_INTERRUPT_WORD_AUTO_LO__HOST_CMD_OVERFLOW_MASK 0x020L
++#define SQ_INTERRUPT_WORD_AUTO_LO__HOST_REG_OVERFLOW_MASK 0x040L
++#define SQ_INTERRUPT_WORD_AUTO_LO__IMMED_OVERFLOW_MASK 0x080L
++#define SQ_INTERRUPT_WORD_AUTO_LO__THREAD_TRACE_UTC_ERROR_MASK 0x100L
++//SQ_INTERRUPT_WORD_CMN_CTXID
++#define SQ_INTERRUPT_WORD_CMN_CTXID__SE_ID__SHIFT 0x18
++#define SQ_INTERRUPT_WORD_CMN_CTXID__ENCODING__SHIFT 0x1a
++#define SQ_INTERRUPT_WORD_CMN_CTXID__SE_ID_MASK 0x3000000L
++#define SQ_INTERRUPT_WORD_CMN_CTXID__ENCODING_MASK 0xC000000L
++//SQ_INTERRUPT_WORD_CMN_HI
++#define SQ_INTERRUPT_WORD_CMN_HI__SE_ID__SHIFT 0x8
++#define SQ_INTERRUPT_WORD_CMN_HI__ENCODING__SHIFT 0xa
++#define SQ_INTERRUPT_WORD_CMN_HI__SE_ID_MASK 0x300L
++#define SQ_INTERRUPT_WORD_CMN_HI__ENCODING_MASK 0xC00L
++//SQ_INTERRUPT_WORD_WAVE_CTXID
++#define SQ_INTERRUPT_WORD_WAVE_CTXID__DATA__SHIFT 0x0
++#define SQ_INTERRUPT_WORD_WAVE_CTXID__SH_ID__SHIFT 0xc
++#define SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV__SHIFT 0xd
++#define SQ_INTERRUPT_WORD_WAVE_CTXID__WAVE_ID__SHIFT 0xe
++#define SQ_INTERRUPT_WORD_WAVE_CTXID__SIMD_ID__SHIFT 0x12
++#define SQ_INTERRUPT_WORD_WAVE_CTXID__CU_ID__SHIFT 0x14
++#define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID__SHIFT 0x18
++#define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING__SHIFT 0x1a
++#define SQ_INTERRUPT_WORD_WAVE_CTXID__DATA_MASK 0x0000FFFL
++#define SQ_INTERRUPT_WORD_WAVE_CTXID__SH_ID_MASK 0x0001000L
++#define SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV_MASK 0x0002000L
++#define SQ_INTERRUPT_WORD_WAVE_CTXID__WAVE_ID_MASK 0x003C000L
++#define SQ_INTERRUPT_WORD_WAVE_CTXID__SIMD_ID_MASK 0x00C0000L
++#define SQ_INTERRUPT_WORD_WAVE_CTXID__CU_ID_MASK 0x0F00000L
++#define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID_MASK 0x3000000L
++#define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING_MASK 0xC000000L
++//SQ_INTERRUPT_WORD_WAVE_HI
++#define SQ_INTERRUPT_WORD_WAVE_HI__CU_ID__SHIFT 0x0
++#define SQ_INTERRUPT_WORD_WAVE_HI__VM_ID__SHIFT 0x4
++#define SQ_INTERRUPT_WORD_WAVE_HI__SE_ID__SHIFT 0x8
++#define SQ_INTERRUPT_WORD_WAVE_HI__ENCODING__SHIFT 0xa
++#define SQ_INTERRUPT_WORD_WAVE_HI__CU_ID_MASK 0x00FL
++#define SQ_INTERRUPT_WORD_WAVE_HI__VM_ID_MASK 0x0F0L
++#define SQ_INTERRUPT_WORD_WAVE_HI__SE_ID_MASK 0x300L
++#define SQ_INTERRUPT_WORD_WAVE_HI__ENCODING_MASK 0xC00L
++//SQ_INTERRUPT_WORD_WAVE_LO
++#define SQ_INTERRUPT_WORD_WAVE_LO__DATA__SHIFT 0x0
++#define SQ_INTERRUPT_WORD_WAVE_LO__SH_ID__SHIFT 0x18
++#define SQ_INTERRUPT_WORD_WAVE_LO__PRIV__SHIFT 0x19
++#define SQ_INTERRUPT_WORD_WAVE_LO__WAVE_ID__SHIFT 0x1a
++#define SQ_INTERRUPT_WORD_WAVE_LO__SIMD_ID__SHIFT 0x1e
++#define SQ_INTERRUPT_WORD_WAVE_LO__DATA_MASK 0x00FFFFFFL
++#define SQ_INTERRUPT_WORD_WAVE_LO__SH_ID_MASK 0x01000000L
++#define SQ_INTERRUPT_WORD_WAVE_LO__PRIV_MASK 0x02000000L
++#define SQ_INTERRUPT_WORD_WAVE_LO__WAVE_ID_MASK 0x3C000000L
++#define SQ_INTERRUPT_WORD_WAVE_LO__SIMD_ID_MASK 0xC0000000L
++
++
++
++
++
++
++
++
++// addressBlock: didtind
++//DIDT_SQ_CTRL0
++#define DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT 0x0
++#define DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT 0x1
++#define DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT 0x3
++#define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x4
++#define DIDT_SQ_CTRL0__DIDT_STALL_CTRL_EN__SHIFT 0x5
++#define DIDT_SQ_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT 0x6
++#define DIDT_SQ_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT 0x7
++#define DIDT_SQ_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT 0x8
++#define DIDT_SQ_CTRL0__DIDT_AUTO_MPD_EN__SHIFT 0x18
++#define DIDT_SQ_CTRL0__DIDT_STALL_EVENT_EN__SHIFT 0x19
++#define DIDT_SQ_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT 0x1a
++#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x1b
++#define DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK 0x00000001L
++#define DIDT_SQ_CTRL0__PHASE_OFFSET_MASK 0x00000006L
++#define DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK 0x00000008L
++#define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x00000010L
++#define DIDT_SQ_CTRL0__DIDT_STALL_CTRL_EN_MASK 0x00000020L
++#define DIDT_SQ_CTRL0__DIDT_TUNING_CTRL_EN_MASK 0x00000040L
++#define DIDT_SQ_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK 0x00000080L
++#define DIDT_SQ_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK 0x00FFFF00L
++#define DIDT_SQ_CTRL0__DIDT_AUTO_MPD_EN_MASK 0x01000000L
++#define DIDT_SQ_CTRL0__DIDT_STALL_EVENT_EN_MASK 0x02000000L
++#define DIDT_SQ_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK 0x04000000L
++#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xF8000000L
++//DIDT_SQ_CTRL1
++#define DIDT_SQ_CTRL1__MIN_POWER__SHIFT 0x0
++#define DIDT_SQ_CTRL1__MAX_POWER__SHIFT 0x10
++#define DIDT_SQ_CTRL1__MIN_POWER_MASK 0x0000FFFFL
++#define DIDT_SQ_CTRL1__MAX_POWER_MASK 0xFFFF0000L
++//DIDT_SQ_CTRL2
++#define DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT 0x0
++#define DIDT_SQ_CTRL2__UNUSED_0__SHIFT 0xe
++#define DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
++#define DIDT_SQ_CTRL2__UNUSED_1__SHIFT 0x1a
++#define DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
++#define DIDT_SQ_CTRL2__UNUSED_2__SHIFT 0x1f
++#define DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK 0x00003FFFL
++#define DIDT_SQ_CTRL2__UNUSED_0_MASK 0x0000C000L
++#define DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK 0x03FF0000L
++#define DIDT_SQ_CTRL2__UNUSED_1_MASK 0x04000000L
++#define DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000L
++#define DIDT_SQ_CTRL2__UNUSED_2_MASK 0x80000000L
++//DIDT_SQ_STALL_CTRL
++#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x0
++#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x6
++#define DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0xc
++#define DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x12
++#define DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT 0x18
++#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000003FL
++#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00000FC0L
++#define DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x0003F000L
++#define DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x00FC0000L
++#define DIDT_SQ_STALL_CTRL__UNUSED_0_MASK 0xFF000000L
++//DIDT_SQ_TUNING_CTRL
++#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x0
++#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0xe
++#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00003FFFL
++#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x0FFFC000L
++//DIDT_SQ_STALL_AUTO_RELEASE_CTRL
++#define DIDT_SQ_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME__SHIFT 0x0
++#define DIDT_SQ_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME_MASK 0x00FFFFFFL
++//DIDT_SQ_CTRL3
++#define DIDT_SQ_CTRL3__GC_DIDT_ENABLE__SHIFT 0x0
++#define DIDT_SQ_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT 0x1
++#define DIDT_SQ_CTRL3__THROTTLE_POLICY__SHIFT 0x2
++#define DIDT_SQ_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
++#define DIDT_SQ_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT 0x9
++#define DIDT_SQ_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT 0xe
++#define DIDT_SQ_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT 0x16
++#define DIDT_SQ_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT 0x17
++#define DIDT_SQ_CTRL3__QUALIFY_STALL_EN__SHIFT 0x18
++#define DIDT_SQ_CTRL3__DIDT_STALL_SEL__SHIFT 0x19
++#define DIDT_SQ_CTRL3__DIDT_FORCE_STALL__SHIFT 0x1b
++#define DIDT_SQ_CTRL3__DIDT_STALL_DELAY_EN__SHIFT 0x1c
++#define DIDT_SQ_CTRL3__GC_DIDT_ENABLE_MASK 0x00000001L
++#define DIDT_SQ_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK 0x00000002L
++#define DIDT_SQ_CTRL3__THROTTLE_POLICY_MASK 0x0000000CL
++#define DIDT_SQ_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
++#define DIDT_SQ_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK 0x00003E00L
++#define DIDT_SQ_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK 0x003FC000L
++#define DIDT_SQ_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK 0x00400000L
++#define DIDT_SQ_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK 0x00800000L
++#define DIDT_SQ_CTRL3__QUALIFY_STALL_EN_MASK 0x01000000L
++#define DIDT_SQ_CTRL3__DIDT_STALL_SEL_MASK 0x06000000L
++#define DIDT_SQ_CTRL3__DIDT_FORCE_STALL_MASK 0x08000000L
++#define DIDT_SQ_CTRL3__DIDT_STALL_DELAY_EN_MASK 0x10000000L
++//DIDT_SQ_STALL_PATTERN_1_2
++#define DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT 0x0
++#define DIDT_SQ_STALL_PATTERN_1_2__UNUSED_0__SHIFT 0xf
++#define DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT 0x10
++#define DIDT_SQ_STALL_PATTERN_1_2__UNUSED_1__SHIFT 0x1f
++#define DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK 0x00007FFFL
++#define DIDT_SQ_STALL_PATTERN_1_2__UNUSED_0_MASK 0x00008000L
++#define DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK 0x7FFF0000L
++#define DIDT_SQ_STALL_PATTERN_1_2__UNUSED_1_MASK 0x80000000L
++//DIDT_SQ_STALL_PATTERN_3_4
++#define DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT 0x0
++#define DIDT_SQ_STALL_PATTERN_3_4__UNUSED_0__SHIFT 0xf
++#define DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT 0x10
++#define DIDT_SQ_STALL_PATTERN_3_4__UNUSED_1__SHIFT 0x1f
++#define DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK 0x00007FFFL
++#define DIDT_SQ_STALL_PATTERN_3_4__UNUSED_0_MASK 0x00008000L
++#define DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK 0x7FFF0000L
++#define DIDT_SQ_STALL_PATTERN_3_4__UNUSED_1_MASK 0x80000000L
++//DIDT_SQ_STALL_PATTERN_5_6
++#define DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT 0x0
++#define DIDT_SQ_STALL_PATTERN_5_6__UNUSED_0__SHIFT 0xf
++#define DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT 0x10
++#define DIDT_SQ_STALL_PATTERN_5_6__UNUSED_1__SHIFT 0x1f
++#define DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK 0x00007FFFL
++#define DIDT_SQ_STALL_PATTERN_5_6__UNUSED_0_MASK 0x00008000L
++#define DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK 0x7FFF0000L
++#define DIDT_SQ_STALL_PATTERN_5_6__UNUSED_1_MASK 0x80000000L
++//DIDT_SQ_STALL_PATTERN_7
++#define DIDT_SQ_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT 0x0
++#define DIDT_SQ_STALL_PATTERN_7__UNUSED_0__SHIFT 0xf
++#define DIDT_SQ_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK 0x00007FFFL
++#define DIDT_SQ_STALL_PATTERN_7__UNUSED_0_MASK 0xFFFF8000L
++//DIDT_SQ_WEIGHT0_3
++#define DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT 0x0
++#define DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT 0x8
++#define DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT 0x10
++#define DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT 0x18
++#define DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK 0x000000FFL
++#define DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK 0x0000FF00L
++#define DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK 0x00FF0000L
++#define DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK 0xFF000000L
++//DIDT_SQ_WEIGHT4_7
++#define DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT 0x0
++#define DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT 0x8
++#define DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT 0x10
++#define DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT 0x18
++#define DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK 0x000000FFL
++#define DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK 0x0000FF00L
++#define DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK 0x00FF0000L
++#define DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK 0xFF000000L
++//DIDT_SQ_WEIGHT8_11
++#define DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT 0x0
++#define DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT 0x8
++#define DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT 0x10
++#define DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT 0x18
++#define DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK 0x000000FFL
++#define DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK 0x0000FF00L
++#define DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK 0x00FF0000L
++#define DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK 0xFF000000L
++//DIDT_SQ_EDC_CTRL
++#define DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT 0x0
++#define DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT 0x1
++#define DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT 0x2
++#define DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT 0x3
++#define DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
++#define DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT 0x9
++#define DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT 0x11
++#define DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT 0x12
++#define DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT 0x13
++#define DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT 0x15
++#define DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT 0x16
++#define DIDT_SQ_EDC_CTRL__UNUSED_0__SHIFT 0x17
++#define DIDT_SQ_EDC_CTRL__EDC_EN_MASK 0x00000001L
++#define DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK 0x00000002L
++#define DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK 0x00000004L
++#define DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK 0x00000008L
++#define DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
++#define DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK 0x0001FE00L
++#define DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK 0x00020000L
++#define DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK 0x00040000L
++#define DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK 0x00180000L
++#define DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK 0x00200000L
++#define DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK 0x00400000L
++#define DIDT_SQ_EDC_CTRL__UNUSED_0_MASK 0xFF800000L
++//DIDT_SQ_EDC_THRESHOLD
++#define DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT 0x0
++#define DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD_MASK 0xFFFFFFFFL
++//DIDT_SQ_EDC_STALL_PATTERN_1_2
++#define DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT 0x0
++#define DIDT_SQ_EDC_STALL_PATTERN_1_2__UNUSED_0__SHIFT 0xf
++#define DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT 0x10
++#define DIDT_SQ_EDC_STALL_PATTERN_1_2__UNUSED_1__SHIFT 0x1f
++#define DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK 0x00007FFFL
++#define DIDT_SQ_EDC_STALL_PATTERN_1_2__UNUSED_0_MASK 0x00008000L
++#define DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK 0x7FFF0000L
++#define DIDT_SQ_EDC_STALL_PATTERN_1_2__UNUSED_1_MASK 0x80000000L
++//DIDT_SQ_EDC_STALL_PATTERN_3_4
++#define DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT 0x0
++#define DIDT_SQ_EDC_STALL_PATTERN_3_4__UNUSED_0__SHIFT 0xf
++#define DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT 0x10
++#define DIDT_SQ_EDC_STALL_PATTERN_3_4__UNUSED_1__SHIFT 0x1f
++#define DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK 0x00007FFFL
++#define DIDT_SQ_EDC_STALL_PATTERN_3_4__UNUSED_0_MASK 0x00008000L
++#define DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK 0x7FFF0000L
++#define DIDT_SQ_EDC_STALL_PATTERN_3_4__UNUSED_1_MASK 0x80000000L
++//DIDT_SQ_EDC_STALL_PATTERN_5_6
++#define DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT 0x0
++#define DIDT_SQ_EDC_STALL_PATTERN_5_6__UNUSED_0__SHIFT 0xf
++#define DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT 0x10
++#define DIDT_SQ_EDC_STALL_PATTERN_5_6__UNUSED_1__SHIFT 0x1f
++#define DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK 0x00007FFFL
++#define DIDT_SQ_EDC_STALL_PATTERN_5_6__UNUSED_0_MASK 0x00008000L
++#define DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK 0x7FFF0000L
++#define DIDT_SQ_EDC_STALL_PATTERN_5_6__UNUSED_1_MASK 0x80000000L
++//DIDT_SQ_EDC_STALL_PATTERN_7
++#define DIDT_SQ_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT 0x0
++#define DIDT_SQ_EDC_STALL_PATTERN_7__UNUSED_0__SHIFT 0xf
++#define DIDT_SQ_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK 0x00007FFFL
++#define DIDT_SQ_EDC_STALL_PATTERN_7__UNUSED_0_MASK 0xFFFF8000L
++//DIDT_SQ_EDC_STATUS
++#define DIDT_SQ_EDC_STATUS__EDC_FSM_STATE__SHIFT 0x0
++#define DIDT_SQ_EDC_STATUS__EDC_THROTTLE_LEVEL__SHIFT 0x1
++#define DIDT_SQ_EDC_STATUS__EDC_FSM_STATE_MASK 0x00000001L
++#define DIDT_SQ_EDC_STATUS__EDC_THROTTLE_LEVEL_MASK 0x0000000EL
++//DIDT_SQ_EDC_STALL_DELAY_1
++#define DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ0__SHIFT 0x0
++#define DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ1__SHIFT 0x6
++#define DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ2__SHIFT 0xc
++#define DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ3__SHIFT 0x12
++#define DIDT_SQ_EDC_STALL_DELAY_1__UNUSED__SHIFT 0x18
++#define DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ0_MASK 0x0000003FL
++#define DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ1_MASK 0x00000FC0L
++#define DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ2_MASK 0x0003F000L
++#define DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ3_MASK 0x00FC0000L
++#define DIDT_SQ_EDC_STALL_DELAY_1__UNUSED_MASK 0xFF000000L
++//DIDT_SQ_EDC_STALL_DELAY_2
++#define DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ4__SHIFT 0x0
++#define DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ5__SHIFT 0x6
++#define DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ6__SHIFT 0xc
++#define DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ7__SHIFT 0x12
++#define DIDT_SQ_EDC_STALL_DELAY_2__UNUSED__SHIFT 0x18
++#define DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ4_MASK 0x0000003FL
++#define DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ5_MASK 0x00000FC0L
++#define DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ6_MASK 0x0003F000L
++#define DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ7_MASK 0x00FC0000L
++#define DIDT_SQ_EDC_STALL_DELAY_2__UNUSED_MASK 0xFF000000L
++//DIDT_SQ_EDC_STALL_DELAY_3
++#define DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ8__SHIFT 0x0
++#define DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ9__SHIFT 0x6
++#define DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ10__SHIFT 0xc
++#define DIDT_SQ_EDC_STALL_DELAY_3__UNUSED__SHIFT 0x12
++#define DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ8_MASK 0x0000003FL
++#define DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ9_MASK 0x00000FC0L
++#define DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ10_MASK 0x0003F000L
++#define DIDT_SQ_EDC_STALL_DELAY_3__UNUSED_MASK 0xFFFC0000L
++//DIDT_SQ_EDC_OVERFLOW
++#define DIDT_SQ_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW__SHIFT 0x0
++#define DIDT_SQ_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER__SHIFT 0x1
++#define DIDT_SQ_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW_MASK 0x00000001L
++#define DIDT_SQ_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER_MASK 0x0001FFFEL
++//DIDT_SQ_EDC_ROLLING_POWER_DELTA
++#define DIDT_SQ_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA__SHIFT 0x0
++#define DIDT_SQ_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA_MASK 0xFFFFFFFFL
++//DIDT_DB_CTRL0
++#define DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT 0x0
++#define DIDT_DB_CTRL0__PHASE_OFFSET__SHIFT 0x1
++#define DIDT_DB_CTRL0__DIDT_CTRL_RST__SHIFT 0x3
++#define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x4
++#define DIDT_DB_CTRL0__DIDT_STALL_CTRL_EN__SHIFT 0x5
++#define DIDT_DB_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT 0x6
++#define DIDT_DB_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT 0x7
++#define DIDT_DB_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT 0x8
++#define DIDT_DB_CTRL0__DIDT_AUTO_MPD_EN__SHIFT 0x18
++#define DIDT_DB_CTRL0__DIDT_STALL_EVENT_EN__SHIFT 0x19
++#define DIDT_DB_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT 0x1a
++#define DIDT_DB_CTRL0__UNUSED_0__SHIFT 0x1b
++#define DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK 0x00000001L
++#define DIDT_DB_CTRL0__PHASE_OFFSET_MASK 0x00000006L
++#define DIDT_DB_CTRL0__DIDT_CTRL_RST_MASK 0x00000008L
++#define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x00000010L
++#define DIDT_DB_CTRL0__DIDT_STALL_CTRL_EN_MASK 0x00000020L
++#define DIDT_DB_CTRL0__DIDT_TUNING_CTRL_EN_MASK 0x00000040L
++#define DIDT_DB_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK 0x00000080L
++#define DIDT_DB_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK 0x00FFFF00L
++#define DIDT_DB_CTRL0__DIDT_AUTO_MPD_EN_MASK 0x01000000L
++#define DIDT_DB_CTRL0__DIDT_STALL_EVENT_EN_MASK 0x02000000L
++#define DIDT_DB_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK 0x04000000L
++#define DIDT_DB_CTRL0__UNUSED_0_MASK 0xF8000000L
++//DIDT_DB_CTRL1
++#define DIDT_DB_CTRL1__MIN_POWER__SHIFT 0x0
++#define DIDT_DB_CTRL1__MAX_POWER__SHIFT 0x10
++#define DIDT_DB_CTRL1__MIN_POWER_MASK 0x0000FFFFL
++#define DIDT_DB_CTRL1__MAX_POWER_MASK 0xFFFF0000L
++//DIDT_DB_CTRL2
++#define DIDT_DB_CTRL2__MAX_POWER_DELTA__SHIFT 0x0
++#define DIDT_DB_CTRL2__UNUSED_0__SHIFT 0xe
++#define DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
++#define DIDT_DB_CTRL2__UNUSED_1__SHIFT 0x1a
++#define DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
++#define DIDT_DB_CTRL2__UNUSED_2__SHIFT 0x1f
++#define DIDT_DB_CTRL2__MAX_POWER_DELTA_MASK 0x00003FFFL
++#define DIDT_DB_CTRL2__UNUSED_0_MASK 0x0000C000L
++#define DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK 0x03FF0000L
++#define DIDT_DB_CTRL2__UNUSED_1_MASK 0x04000000L
++#define DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000L
++#define DIDT_DB_CTRL2__UNUSED_2_MASK 0x80000000L
++//DIDT_DB_STALL_CTRL
++#define DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x0
++#define DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x6
++#define DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0xc
++#define DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x12
++#define DIDT_DB_STALL_CTRL__UNUSED_0__SHIFT 0x18
++#define DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000003FL
++#define DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00000FC0L
++#define DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x0003F000L
++#define DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x00FC0000L
++#define DIDT_DB_STALL_CTRL__UNUSED_0_MASK 0xFF000000L
++//DIDT_DB_TUNING_CTRL
++#define DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x0
++#define DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0xe
++#define DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00003FFFL
++#define DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x0FFFC000L
++//DIDT_DB_STALL_AUTO_RELEASE_CTRL
++#define DIDT_DB_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME__SHIFT 0x0
++#define DIDT_DB_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME_MASK 0x00FFFFFFL
++//DIDT_DB_CTRL3
++#define DIDT_DB_CTRL3__GC_DIDT_ENABLE__SHIFT 0x0
++#define DIDT_DB_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT 0x1
++#define DIDT_DB_CTRL3__THROTTLE_POLICY__SHIFT 0x2
++#define DIDT_DB_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
++#define DIDT_DB_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT 0x9
++#define DIDT_DB_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT 0xe
++#define DIDT_DB_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT 0x16
++#define DIDT_DB_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT 0x17
++#define DIDT_DB_CTRL3__QUALIFY_STALL_EN__SHIFT 0x18
++#define DIDT_DB_CTRL3__DIDT_STALL_SEL__SHIFT 0x19
++#define DIDT_DB_CTRL3__DIDT_FORCE_STALL__SHIFT 0x1b
++#define DIDT_DB_CTRL3__DIDT_STALL_DELAY_EN__SHIFT 0x1c
++#define DIDT_DB_CTRL3__GC_DIDT_ENABLE_MASK 0x00000001L
++#define DIDT_DB_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK 0x00000002L
++#define DIDT_DB_CTRL3__THROTTLE_POLICY_MASK 0x0000000CL
++#define DIDT_DB_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
++#define DIDT_DB_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK 0x00003E00L
++#define DIDT_DB_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK 0x003FC000L
++#define DIDT_DB_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK 0x00400000L
++#define DIDT_DB_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK 0x00800000L
++#define DIDT_DB_CTRL3__QUALIFY_STALL_EN_MASK 0x01000000L
++#define DIDT_DB_CTRL3__DIDT_STALL_SEL_MASK 0x06000000L
++#define DIDT_DB_CTRL3__DIDT_FORCE_STALL_MASK 0x08000000L
++#define DIDT_DB_CTRL3__DIDT_STALL_DELAY_EN_MASK 0x10000000L
++//DIDT_DB_STALL_PATTERN_1_2
++#define DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT 0x0
++#define DIDT_DB_STALL_PATTERN_1_2__UNUSED_0__SHIFT 0xf
++#define DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT 0x10
++#define DIDT_DB_STALL_PATTERN_1_2__UNUSED_1__SHIFT 0x1f
++#define DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK 0x00007FFFL
++#define DIDT_DB_STALL_PATTERN_1_2__UNUSED_0_MASK 0x00008000L
++#define DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK 0x7FFF0000L
++#define DIDT_DB_STALL_PATTERN_1_2__UNUSED_1_MASK 0x80000000L
++//DIDT_DB_STALL_PATTERN_3_4
++#define DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT 0x0
++#define DIDT_DB_STALL_PATTERN_3_4__UNUSED_0__SHIFT 0xf
++#define DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT 0x10
++#define DIDT_DB_STALL_PATTERN_3_4__UNUSED_1__SHIFT 0x1f
++#define DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK 0x00007FFFL
++#define DIDT_DB_STALL_PATTERN_3_4__UNUSED_0_MASK 0x00008000L
++#define DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK 0x7FFF0000L
++#define DIDT_DB_STALL_PATTERN_3_4__UNUSED_1_MASK 0x80000000L
++//DIDT_DB_STALL_PATTERN_5_6
++#define DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT 0x0
++#define DIDT_DB_STALL_PATTERN_5_6__UNUSED_0__SHIFT 0xf
++#define DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT 0x10
++#define DIDT_DB_STALL_PATTERN_5_6__UNUSED_1__SHIFT 0x1f
++#define DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK 0x00007FFFL
++#define DIDT_DB_STALL_PATTERN_5_6__UNUSED_0_MASK 0x00008000L
++#define DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK 0x7FFF0000L
++#define DIDT_DB_STALL_PATTERN_5_6__UNUSED_1_MASK 0x80000000L
++//DIDT_DB_STALL_PATTERN_7
++#define DIDT_DB_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT 0x0
++#define DIDT_DB_STALL_PATTERN_7__UNUSED_0__SHIFT 0xf
++#define DIDT_DB_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK 0x00007FFFL
++#define DIDT_DB_STALL_PATTERN_7__UNUSED_0_MASK 0xFFFF8000L
++//DIDT_DB_WEIGHT0_3
++#define DIDT_DB_WEIGHT0_3__WEIGHT0__SHIFT 0x0
++#define DIDT_DB_WEIGHT0_3__WEIGHT1__SHIFT 0x8
++#define DIDT_DB_WEIGHT0_3__WEIGHT2__SHIFT 0x10
++#define DIDT_DB_WEIGHT0_3__WEIGHT3__SHIFT 0x18
++#define DIDT_DB_WEIGHT0_3__WEIGHT0_MASK 0x000000FFL
++#define DIDT_DB_WEIGHT0_3__WEIGHT1_MASK 0x0000FF00L
++#define DIDT_DB_WEIGHT0_3__WEIGHT2_MASK 0x00FF0000L
++#define DIDT_DB_WEIGHT0_3__WEIGHT3_MASK 0xFF000000L
++//DIDT_DB_WEIGHT4_7
++#define DIDT_DB_WEIGHT4_7__WEIGHT4__SHIFT 0x0
++#define DIDT_DB_WEIGHT4_7__WEIGHT5__SHIFT 0x8
++#define DIDT_DB_WEIGHT4_7__WEIGHT6__SHIFT 0x10
++#define DIDT_DB_WEIGHT4_7__WEIGHT7__SHIFT 0x18
++#define DIDT_DB_WEIGHT4_7__WEIGHT4_MASK 0x000000FFL
++#define DIDT_DB_WEIGHT4_7__WEIGHT5_MASK 0x0000FF00L
++#define DIDT_DB_WEIGHT4_7__WEIGHT6_MASK 0x00FF0000L
++#define DIDT_DB_WEIGHT4_7__WEIGHT7_MASK 0xFF000000L
++//DIDT_DB_WEIGHT8_11
++#define DIDT_DB_WEIGHT8_11__WEIGHT8__SHIFT 0x0
++#define DIDT_DB_WEIGHT8_11__WEIGHT9__SHIFT 0x8
++#define DIDT_DB_WEIGHT8_11__WEIGHT10__SHIFT 0x10
++#define DIDT_DB_WEIGHT8_11__WEIGHT11__SHIFT 0x18
++#define DIDT_DB_WEIGHT8_11__WEIGHT8_MASK 0x000000FFL
++#define DIDT_DB_WEIGHT8_11__WEIGHT9_MASK 0x0000FF00L
++#define DIDT_DB_WEIGHT8_11__WEIGHT10_MASK 0x00FF0000L
++#define DIDT_DB_WEIGHT8_11__WEIGHT11_MASK 0xFF000000L
++//DIDT_DB_EDC_CTRL
++#define DIDT_DB_EDC_CTRL__EDC_EN__SHIFT 0x0
++#define DIDT_DB_EDC_CTRL__EDC_SW_RST__SHIFT 0x1
++#define DIDT_DB_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT 0x2
++#define DIDT_DB_EDC_CTRL__EDC_FORCE_STALL__SHIFT 0x3
++#define DIDT_DB_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
++#define DIDT_DB_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT 0x9
++#define DIDT_DB_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT 0x11
++#define DIDT_DB_EDC_CTRL__GC_EDC_EN__SHIFT 0x12
++#define DIDT_DB_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT 0x13
++#define DIDT_DB_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT 0x15
++#define DIDT_DB_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT 0x16
++#define DIDT_DB_EDC_CTRL__UNUSED_0__SHIFT 0x17
++#define DIDT_DB_EDC_CTRL__EDC_EN_MASK 0x00000001L
++#define DIDT_DB_EDC_CTRL__EDC_SW_RST_MASK 0x00000002L
++#define DIDT_DB_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK 0x00000004L
++#define DIDT_DB_EDC_CTRL__EDC_FORCE_STALL_MASK 0x00000008L
++#define DIDT_DB_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
++#define DIDT_DB_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK 0x0001FE00L
++#define DIDT_DB_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK 0x00020000L
++#define DIDT_DB_EDC_CTRL__GC_EDC_EN_MASK 0x00040000L
++#define DIDT_DB_EDC_CTRL__GC_EDC_STALL_POLICY_MASK 0x00180000L
++#define DIDT_DB_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK 0x00200000L
++#define DIDT_DB_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK 0x00400000L
++#define DIDT_DB_EDC_CTRL__UNUSED_0_MASK 0xFF800000L
++//DIDT_DB_EDC_THRESHOLD
++#define DIDT_DB_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT 0x0
++#define DIDT_DB_EDC_THRESHOLD__EDC_THRESHOLD_MASK 0xFFFFFFFFL
++//DIDT_DB_EDC_STALL_PATTERN_1_2
++#define DIDT_DB_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT 0x0
++#define DIDT_DB_EDC_STALL_PATTERN_1_2__UNUSED_0__SHIFT 0xf
++#define DIDT_DB_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT 0x10
++#define DIDT_DB_EDC_STALL_PATTERN_1_2__UNUSED_1__SHIFT 0x1f
++#define DIDT_DB_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK 0x00007FFFL
++#define DIDT_DB_EDC_STALL_PATTERN_1_2__UNUSED_0_MASK 0x00008000L
++#define DIDT_DB_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK 0x7FFF0000L
++#define DIDT_DB_EDC_STALL_PATTERN_1_2__UNUSED_1_MASK 0x80000000L
++//DIDT_DB_EDC_STALL_PATTERN_3_4
++#define DIDT_DB_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT 0x0
++#define DIDT_DB_EDC_STALL_PATTERN_3_4__UNUSED_0__SHIFT 0xf
++#define DIDT_DB_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT 0x10
++#define DIDT_DB_EDC_STALL_PATTERN_3_4__UNUSED_1__SHIFT 0x1f
++#define DIDT_DB_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK 0x00007FFFL
++#define DIDT_DB_EDC_STALL_PATTERN_3_4__UNUSED_0_MASK 0x00008000L
++#define DIDT_DB_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK 0x7FFF0000L
++#define DIDT_DB_EDC_STALL_PATTERN_3_4__UNUSED_1_MASK 0x80000000L
++//DIDT_DB_EDC_STALL_PATTERN_5_6
++#define DIDT_DB_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT 0x0
++#define DIDT_DB_EDC_STALL_PATTERN_5_6__UNUSED_0__SHIFT 0xf
++#define DIDT_DB_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT 0x10
++#define DIDT_DB_EDC_STALL_PATTERN_5_6__UNUSED_1__SHIFT 0x1f
++#define DIDT_DB_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK 0x00007FFFL
++#define DIDT_DB_EDC_STALL_PATTERN_5_6__UNUSED_0_MASK 0x00008000L
++#define DIDT_DB_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK 0x7FFF0000L
++#define DIDT_DB_EDC_STALL_PATTERN_5_6__UNUSED_1_MASK 0x80000000L
++//DIDT_DB_EDC_STALL_PATTERN_7
++#define DIDT_DB_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT 0x0
++#define DIDT_DB_EDC_STALL_PATTERN_7__UNUSED_0__SHIFT 0xf
++#define DIDT_DB_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK 0x00007FFFL
++#define DIDT_DB_EDC_STALL_PATTERN_7__UNUSED_0_MASK 0xFFFF8000L
++//DIDT_DB_EDC_STATUS
++#define DIDT_DB_EDC_STATUS__EDC_FSM_STATE__SHIFT 0x0
++#define DIDT_DB_EDC_STATUS__EDC_THROTTLE_LEVEL__SHIFT 0x1
++#define DIDT_DB_EDC_STATUS__EDC_FSM_STATE_MASK 0x00000001L
++#define DIDT_DB_EDC_STATUS__EDC_THROTTLE_LEVEL_MASK 0x0000000EL
++//DIDT_DB_EDC_STALL_DELAY_1
++#define DIDT_DB_EDC_STALL_DELAY_1__EDC_STALL_DELAY_DB0__SHIFT 0x0
++#define DIDT_DB_EDC_STALL_DELAY_1__EDC_STALL_DELAY_DB1__SHIFT 0x3
++#define DIDT_DB_EDC_STALL_DELAY_1__UNUSED__SHIFT 0x6
++#define DIDT_DB_EDC_STALL_DELAY_1__EDC_STALL_DELAY_DB0_MASK 0x00000007L
++#define DIDT_DB_EDC_STALL_DELAY_1__EDC_STALL_DELAY_DB1_MASK 0x00000038L
++#define DIDT_DB_EDC_STALL_DELAY_1__UNUSED_MASK 0xFFFFFFC0L
++//DIDT_DB_EDC_OVERFLOW
++#define DIDT_DB_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW__SHIFT 0x0
++#define DIDT_DB_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER__SHIFT 0x1
++#define DIDT_DB_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW_MASK 0x00000001L
++#define DIDT_DB_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER_MASK 0x0001FFFEL
++//DIDT_DB_EDC_ROLLING_POWER_DELTA
++#define DIDT_DB_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA__SHIFT 0x0
++#define DIDT_DB_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA_MASK 0xFFFFFFFFL
++//DIDT_TD_CTRL0
++#define DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT 0x0
++#define DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT 0x1
++#define DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT 0x3
++#define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x4
++#define DIDT_TD_CTRL0__DIDT_STALL_CTRL_EN__SHIFT 0x5
++#define DIDT_TD_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT 0x6
++#define DIDT_TD_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT 0x7
++#define DIDT_TD_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT 0x8
++#define DIDT_TD_CTRL0__DIDT_AUTO_MPD_EN__SHIFT 0x18
++#define DIDT_TD_CTRL0__DIDT_STALL_EVENT_EN__SHIFT 0x19
++#define DIDT_TD_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT 0x1a
++#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x1b
++#define DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK 0x00000001L
++#define DIDT_TD_CTRL0__PHASE_OFFSET_MASK 0x00000006L
++#define DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK 0x00000008L
++#define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x00000010L
++#define DIDT_TD_CTRL0__DIDT_STALL_CTRL_EN_MASK 0x00000020L
++#define DIDT_TD_CTRL0__DIDT_TUNING_CTRL_EN_MASK 0x00000040L
++#define DIDT_TD_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK 0x00000080L
++#define DIDT_TD_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK 0x00FFFF00L
++#define DIDT_TD_CTRL0__DIDT_AUTO_MPD_EN_MASK 0x01000000L
++#define DIDT_TD_CTRL0__DIDT_STALL_EVENT_EN_MASK 0x02000000L
++#define DIDT_TD_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK 0x04000000L
++#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xF8000000L
++//DIDT_TD_CTRL1
++#define DIDT_TD_CTRL1__MIN_POWER__SHIFT 0x0
++#define DIDT_TD_CTRL1__MAX_POWER__SHIFT 0x10
++#define DIDT_TD_CTRL1__MIN_POWER_MASK 0x0000FFFFL
++#define DIDT_TD_CTRL1__MAX_POWER_MASK 0xFFFF0000L
++//DIDT_TD_CTRL2
++#define DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT 0x0
++#define DIDT_TD_CTRL2__UNUSED_0__SHIFT 0xe
++#define DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
++#define DIDT_TD_CTRL2__UNUSED_1__SHIFT 0x1a
++#define DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
++#define DIDT_TD_CTRL2__UNUSED_2__SHIFT 0x1f
++#define DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK 0x00003FFFL
++#define DIDT_TD_CTRL2__UNUSED_0_MASK 0x0000C000L
++#define DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK 0x03FF0000L
++#define DIDT_TD_CTRL2__UNUSED_1_MASK 0x04000000L
++#define DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000L
++#define DIDT_TD_CTRL2__UNUSED_2_MASK 0x80000000L
++//DIDT_TD_STALL_CTRL
++#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x0
++#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x6
++#define DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0xc
++#define DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x12
++#define DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT 0x18
++#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000003FL
++#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00000FC0L
++#define DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x0003F000L
++#define DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x00FC0000L
++#define DIDT_TD_STALL_CTRL__UNUSED_0_MASK 0xFF000000L
++//DIDT_TD_TUNING_CTRL
++#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x0
++#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0xe
++#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00003FFFL
++#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x0FFFC000L
++//DIDT_TD_STALL_AUTO_RELEASE_CTRL
++#define DIDT_TD_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME__SHIFT 0x0
++#define DIDT_TD_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME_MASK 0x00FFFFFFL
++//DIDT_TD_CTRL3
++#define DIDT_TD_CTRL3__GC_DIDT_ENABLE__SHIFT 0x0
++#define DIDT_TD_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT 0x1
++#define DIDT_TD_CTRL3__THROTTLE_POLICY__SHIFT 0x2
++#define DIDT_TD_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
++#define DIDT_TD_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT 0x9
++#define DIDT_TD_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT 0xe
++#define DIDT_TD_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT 0x16
++#define DIDT_TD_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT 0x17
++#define DIDT_TD_CTRL3__QUALIFY_STALL_EN__SHIFT 0x18
++#define DIDT_TD_CTRL3__DIDT_STALL_SEL__SHIFT 0x19
++#define DIDT_TD_CTRL3__DIDT_FORCE_STALL__SHIFT 0x1b
++#define DIDT_TD_CTRL3__DIDT_STALL_DELAY_EN__SHIFT 0x1c
++#define DIDT_TD_CTRL3__GC_DIDT_ENABLE_MASK 0x00000001L
++#define DIDT_TD_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK 0x00000002L
++#define DIDT_TD_CTRL3__THROTTLE_POLICY_MASK 0x0000000CL
++#define DIDT_TD_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
++#define DIDT_TD_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK 0x00003E00L
++#define DIDT_TD_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK 0x003FC000L
++#define DIDT_TD_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK 0x00400000L
++#define DIDT_TD_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK 0x00800000L
++#define DIDT_TD_CTRL3__QUALIFY_STALL_EN_MASK 0x01000000L
++#define DIDT_TD_CTRL3__DIDT_STALL_SEL_MASK 0x06000000L
++#define DIDT_TD_CTRL3__DIDT_FORCE_STALL_MASK 0x08000000L
++#define DIDT_TD_CTRL3__DIDT_STALL_DELAY_EN_MASK 0x10000000L
++//DIDT_TD_STALL_PATTERN_1_2
++#define DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT 0x0
++#define DIDT_TD_STALL_PATTERN_1_2__UNUSED_0__SHIFT 0xf
++#define DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT 0x10
++#define DIDT_TD_STALL_PATTERN_1_2__UNUSED_1__SHIFT 0x1f
++#define DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK 0x00007FFFL
++#define DIDT_TD_STALL_PATTERN_1_2__UNUSED_0_MASK 0x00008000L
++#define DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK 0x7FFF0000L
++#define DIDT_TD_STALL_PATTERN_1_2__UNUSED_1_MASK 0x80000000L
++//DIDT_TD_STALL_PATTERN_3_4
++#define DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT 0x0
++#define DIDT_TD_STALL_PATTERN_3_4__UNUSED_0__SHIFT 0xf
++#define DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT 0x10
++#define DIDT_TD_STALL_PATTERN_3_4__UNUSED_1__SHIFT 0x1f
++#define DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK 0x00007FFFL
++#define DIDT_TD_STALL_PATTERN_3_4__UNUSED_0_MASK 0x00008000L
++#define DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK 0x7FFF0000L
++#define DIDT_TD_STALL_PATTERN_3_4__UNUSED_1_MASK 0x80000000L
++//DIDT_TD_STALL_PATTERN_5_6
++#define DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT 0x0
++#define DIDT_TD_STALL_PATTERN_5_6__UNUSED_0__SHIFT 0xf
++#define DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT 0x10
++#define DIDT_TD_STALL_PATTERN_5_6__UNUSED_1__SHIFT 0x1f
++#define DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK 0x00007FFFL
++#define DIDT_TD_STALL_PATTERN_5_6__UNUSED_0_MASK 0x00008000L
++#define DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK 0x7FFF0000L
++#define DIDT_TD_STALL_PATTERN_5_6__UNUSED_1_MASK 0x80000000L
++//DIDT_TD_STALL_PATTERN_7
++#define DIDT_TD_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT 0x0
++#define DIDT_TD_STALL_PATTERN_7__UNUSED_0__SHIFT 0xf
++#define DIDT_TD_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK 0x00007FFFL
++#define DIDT_TD_STALL_PATTERN_7__UNUSED_0_MASK 0xFFFF8000L
++//DIDT_TD_WEIGHT0_3
++#define DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT 0x0
++#define DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT 0x8
++#define DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT 0x10
++#define DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT 0x18
++#define DIDT_TD_WEIGHT0_3__WEIGHT0_MASK 0x000000FFL
++#define DIDT_TD_WEIGHT0_3__WEIGHT1_MASK 0x0000FF00L
++#define DIDT_TD_WEIGHT0_3__WEIGHT2_MASK 0x00FF0000L
++#define DIDT_TD_WEIGHT0_3__WEIGHT3_MASK 0xFF000000L
++//DIDT_TD_WEIGHT4_7
++#define DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT 0x0
++#define DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT 0x8
++#define DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT 0x10
++#define DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT 0x18
++#define DIDT_TD_WEIGHT4_7__WEIGHT4_MASK 0x000000FFL
++#define DIDT_TD_WEIGHT4_7__WEIGHT5_MASK 0x0000FF00L
++#define DIDT_TD_WEIGHT4_7__WEIGHT6_MASK 0x00FF0000L
++#define DIDT_TD_WEIGHT4_7__WEIGHT7_MASK 0xFF000000L
++//DIDT_TD_WEIGHT8_11
++#define DIDT_TD_WEIGHT8_11__WEIGHT8__SHIFT 0x0
++#define DIDT_TD_WEIGHT8_11__WEIGHT9__SHIFT 0x8
++#define DIDT_TD_WEIGHT8_11__WEIGHT10__SHIFT 0x10
++#define DIDT_TD_WEIGHT8_11__WEIGHT11__SHIFT 0x18
++#define DIDT_TD_WEIGHT8_11__WEIGHT8_MASK 0x000000FFL
++#define DIDT_TD_WEIGHT8_11__WEIGHT9_MASK 0x0000FF00L
++#define DIDT_TD_WEIGHT8_11__WEIGHT10_MASK 0x00FF0000L
++#define DIDT_TD_WEIGHT8_11__WEIGHT11_MASK 0xFF000000L
++//DIDT_TD_EDC_CTRL
++#define DIDT_TD_EDC_CTRL__EDC_EN__SHIFT 0x0
++#define DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT 0x1
++#define DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT 0x2
++#define DIDT_TD_EDC_CTRL__EDC_FORCE_STALL__SHIFT 0x3
++#define DIDT_TD_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
++#define DIDT_TD_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT 0x9
++#define DIDT_TD_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT 0x11
++#define DIDT_TD_EDC_CTRL__GC_EDC_EN__SHIFT 0x12
++#define DIDT_TD_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT 0x13
++#define DIDT_TD_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT 0x15
++#define DIDT_TD_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT 0x16
++#define DIDT_TD_EDC_CTRL__UNUSED_0__SHIFT 0x17
++#define DIDT_TD_EDC_CTRL__EDC_EN_MASK 0x00000001L
++#define DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK 0x00000002L
++#define DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK 0x00000004L
++#define DIDT_TD_EDC_CTRL__EDC_FORCE_STALL_MASK 0x00000008L
++#define DIDT_TD_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
++#define DIDT_TD_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK 0x0001FE00L
++#define DIDT_TD_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK 0x00020000L
++#define DIDT_TD_EDC_CTRL__GC_EDC_EN_MASK 0x00040000L
++#define DIDT_TD_EDC_CTRL__GC_EDC_STALL_POLICY_MASK 0x00180000L
++#define DIDT_TD_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK 0x00200000L
++#define DIDT_TD_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK 0x00400000L
++#define DIDT_TD_EDC_CTRL__UNUSED_0_MASK 0xFF800000L
++//DIDT_TD_EDC_THRESHOLD
++#define DIDT_TD_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT 0x0
++#define DIDT_TD_EDC_THRESHOLD__EDC_THRESHOLD_MASK 0xFFFFFFFFL
++//DIDT_TD_EDC_STALL_PATTERN_1_2
++#define DIDT_TD_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT 0x0
++#define DIDT_TD_EDC_STALL_PATTERN_1_2__UNUSED_0__SHIFT 0xf
++#define DIDT_TD_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT 0x10
++#define DIDT_TD_EDC_STALL_PATTERN_1_2__UNUSED_1__SHIFT 0x1f
++#define DIDT_TD_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK 0x00007FFFL
++#define DIDT_TD_EDC_STALL_PATTERN_1_2__UNUSED_0_MASK 0x00008000L
++#define DIDT_TD_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK 0x7FFF0000L
++#define DIDT_TD_EDC_STALL_PATTERN_1_2__UNUSED_1_MASK 0x80000000L
++//DIDT_TD_EDC_STALL_PATTERN_3_4
++#define DIDT_TD_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT 0x0
++#define DIDT_TD_EDC_STALL_PATTERN_3_4__UNUSED_0__SHIFT 0xf
++#define DIDT_TD_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT 0x10
++#define DIDT_TD_EDC_STALL_PATTERN_3_4__UNUSED_1__SHIFT 0x1f
++#define DIDT_TD_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK 0x00007FFFL
++#define DIDT_TD_EDC_STALL_PATTERN_3_4__UNUSED_0_MASK 0x00008000L
++#define DIDT_TD_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK 0x7FFF0000L
++#define DIDT_TD_EDC_STALL_PATTERN_3_4__UNUSED_1_MASK 0x80000000L
++//DIDT_TD_EDC_STALL_PATTERN_5_6
++#define DIDT_TD_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT 0x0
++#define DIDT_TD_EDC_STALL_PATTERN_5_6__UNUSED_0__SHIFT 0xf
++#define DIDT_TD_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT 0x10
++#define DIDT_TD_EDC_STALL_PATTERN_5_6__UNUSED_1__SHIFT 0x1f
++#define DIDT_TD_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK 0x00007FFFL
++#define DIDT_TD_EDC_STALL_PATTERN_5_6__UNUSED_0_MASK 0x00008000L
++#define DIDT_TD_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK 0x7FFF0000L
++#define DIDT_TD_EDC_STALL_PATTERN_5_6__UNUSED_1_MASK 0x80000000L
++//DIDT_TD_EDC_STALL_PATTERN_7
++#define DIDT_TD_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT 0x0
++#define DIDT_TD_EDC_STALL_PATTERN_7__UNUSED_0__SHIFT 0xf
++#define DIDT_TD_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK 0x00007FFFL
++#define DIDT_TD_EDC_STALL_PATTERN_7__UNUSED_0_MASK 0xFFFF8000L
++//DIDT_TD_EDC_STATUS
++#define DIDT_TD_EDC_STATUS__EDC_FSM_STATE__SHIFT 0x0
++#define DIDT_TD_EDC_STATUS__EDC_THROTTLE_LEVEL__SHIFT 0x1
++#define DIDT_TD_EDC_STATUS__EDC_FSM_STATE_MASK 0x00000001L
++#define DIDT_TD_EDC_STATUS__EDC_THROTTLE_LEVEL_MASK 0x0000000EL
++//DIDT_TD_EDC_STALL_DELAY_1
++#define DIDT_TD_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TD0__SHIFT 0x0
++#define DIDT_TD_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TD1__SHIFT 0x6
++#define DIDT_TD_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TD2__SHIFT 0xc
++#define DIDT_TD_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TD3__SHIFT 0x12
++#define DIDT_TD_EDC_STALL_DELAY_1__UNUSED__SHIFT 0x18
++#define DIDT_TD_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TD0_MASK 0x0000003FL
++#define DIDT_TD_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TD1_MASK 0x00000FC0L
++#define DIDT_TD_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TD2_MASK 0x0003F000L
++#define DIDT_TD_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TD3_MASK 0x00FC0000L
++#define DIDT_TD_EDC_STALL_DELAY_1__UNUSED_MASK 0xFF000000L
++//DIDT_TD_EDC_STALL_DELAY_2
++#define DIDT_TD_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TD4__SHIFT 0x0
++#define DIDT_TD_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TD5__SHIFT 0x6
++#define DIDT_TD_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TD6__SHIFT 0xc
++#define DIDT_TD_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TD7__SHIFT 0x12
++#define DIDT_TD_EDC_STALL_DELAY_2__UNUSED__SHIFT 0x18
++#define DIDT_TD_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TD4_MASK 0x0000003FL
++#define DIDT_TD_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TD5_MASK 0x00000FC0L
++#define DIDT_TD_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TD6_MASK 0x0003F000L
++#define DIDT_TD_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TD7_MASK 0x00FC0000L
++#define DIDT_TD_EDC_STALL_DELAY_2__UNUSED_MASK 0xFF000000L
++//DIDT_TD_EDC_STALL_DELAY_3
++#define DIDT_TD_EDC_STALL_DELAY_3__EDC_STALL_DELAY_TD8__SHIFT 0x0
++#define DIDT_TD_EDC_STALL_DELAY_3__EDC_STALL_DELAY_TD9__SHIFT 0x6
++#define DIDT_TD_EDC_STALL_DELAY_3__EDC_STALL_DELAY_TD10__SHIFT 0xc
++#define DIDT_TD_EDC_STALL_DELAY_3__UNUSED__SHIFT 0x12
++#define DIDT_TD_EDC_STALL_DELAY_3__EDC_STALL_DELAY_TD8_MASK 0x0000003FL
++#define DIDT_TD_EDC_STALL_DELAY_3__EDC_STALL_DELAY_TD9_MASK 0x00000FC0L
++#define DIDT_TD_EDC_STALL_DELAY_3__EDC_STALL_DELAY_TD10_MASK 0x0003F000L
++#define DIDT_TD_EDC_STALL_DELAY_3__UNUSED_MASK 0xFFFC0000L
++//DIDT_TD_EDC_OVERFLOW
++#define DIDT_TD_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW__SHIFT 0x0
++#define DIDT_TD_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER__SHIFT 0x1
++#define DIDT_TD_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW_MASK 0x00000001L
++#define DIDT_TD_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER_MASK 0x0001FFFEL
++//DIDT_TD_EDC_ROLLING_POWER_DELTA
++#define DIDT_TD_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA__SHIFT 0x0
++#define DIDT_TD_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA_MASK 0xFFFFFFFFL
++//DIDT_TCP_CTRL0
++#define DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT 0x0
++#define DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT 0x1
++#define DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT 0x3
++#define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x4
++#define DIDT_TCP_CTRL0__DIDT_STALL_CTRL_EN__SHIFT 0x5
++#define DIDT_TCP_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT 0x6
++#define DIDT_TCP_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT 0x7
++#define DIDT_TCP_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT 0x8
++#define DIDT_TCP_CTRL0__DIDT_AUTO_MPD_EN__SHIFT 0x18
++#define DIDT_TCP_CTRL0__DIDT_STALL_EVENT_EN__SHIFT 0x19
++#define DIDT_TCP_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT 0x1a
++#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x1b
++#define DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK 0x00000001L
++#define DIDT_TCP_CTRL0__PHASE_OFFSET_MASK 0x00000006L
++#define DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK 0x00000008L
++#define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x00000010L
++#define DIDT_TCP_CTRL0__DIDT_STALL_CTRL_EN_MASK 0x00000020L
++#define DIDT_TCP_CTRL0__DIDT_TUNING_CTRL_EN_MASK 0x00000040L
++#define DIDT_TCP_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK 0x00000080L
++#define DIDT_TCP_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK 0x00FFFF00L
++#define DIDT_TCP_CTRL0__DIDT_AUTO_MPD_EN_MASK 0x01000000L
++#define DIDT_TCP_CTRL0__DIDT_STALL_EVENT_EN_MASK 0x02000000L
++#define DIDT_TCP_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK 0x04000000L
++#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xF8000000L
++//DIDT_TCP_CTRL1
++#define DIDT_TCP_CTRL1__MIN_POWER__SHIFT 0x0
++#define DIDT_TCP_CTRL1__MAX_POWER__SHIFT 0x10
++#define DIDT_TCP_CTRL1__MIN_POWER_MASK 0x0000FFFFL
++#define DIDT_TCP_CTRL1__MAX_POWER_MASK 0xFFFF0000L
++//DIDT_TCP_CTRL2
++#define DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT 0x0
++#define DIDT_TCP_CTRL2__UNUSED_0__SHIFT 0xe
++#define DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
++#define DIDT_TCP_CTRL2__UNUSED_1__SHIFT 0x1a
++#define DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
++#define DIDT_TCP_CTRL2__UNUSED_2__SHIFT 0x1f
++#define DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK 0x00003FFFL
++#define DIDT_TCP_CTRL2__UNUSED_0_MASK 0x0000C000L
++#define DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK 0x03FF0000L
++#define DIDT_TCP_CTRL2__UNUSED_1_MASK 0x04000000L
++#define DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000L
++#define DIDT_TCP_CTRL2__UNUSED_2_MASK 0x80000000L
++//DIDT_TCP_STALL_CTRL
++#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x0
++#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x6
++#define DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0xc
++#define DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x12
++#define DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT 0x18
++#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000003FL
++#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00000FC0L
++#define DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x0003F000L
++#define DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x00FC0000L
++#define DIDT_TCP_STALL_CTRL__UNUSED_0_MASK 0xFF000000L
++//DIDT_TCP_TUNING_CTRL
++#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x0
++#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0xe
++#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00003FFFL
++#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x0FFFC000L
++//DIDT_TCP_STALL_AUTO_RELEASE_CTRL
++#define DIDT_TCP_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME__SHIFT 0x0
++#define DIDT_TCP_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME_MASK 0x00FFFFFFL
++//DIDT_TCP_CTRL3
++#define DIDT_TCP_CTRL3__GC_DIDT_ENABLE__SHIFT 0x0
++#define DIDT_TCP_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT 0x1
++#define DIDT_TCP_CTRL3__THROTTLE_POLICY__SHIFT 0x2
++#define DIDT_TCP_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
++#define DIDT_TCP_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT 0x9
++#define DIDT_TCP_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT 0xe
++#define DIDT_TCP_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT 0x16
++#define DIDT_TCP_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT 0x17
++#define DIDT_TCP_CTRL3__QUALIFY_STALL_EN__SHIFT 0x18
++#define DIDT_TCP_CTRL3__DIDT_STALL_SEL__SHIFT 0x19
++#define DIDT_TCP_CTRL3__DIDT_FORCE_STALL__SHIFT 0x1b
++#define DIDT_TCP_CTRL3__DIDT_STALL_DELAY_EN__SHIFT 0x1c
++#define DIDT_TCP_CTRL3__GC_DIDT_ENABLE_MASK 0x00000001L
++#define DIDT_TCP_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK 0x00000002L
++#define DIDT_TCP_CTRL3__THROTTLE_POLICY_MASK 0x0000000CL
++#define DIDT_TCP_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
++#define DIDT_TCP_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK 0x00003E00L
++#define DIDT_TCP_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK 0x003FC000L
++#define DIDT_TCP_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK 0x00400000L
++#define DIDT_TCP_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK 0x00800000L
++#define DIDT_TCP_CTRL3__QUALIFY_STALL_EN_MASK 0x01000000L
++#define DIDT_TCP_CTRL3__DIDT_STALL_SEL_MASK 0x06000000L
++#define DIDT_TCP_CTRL3__DIDT_FORCE_STALL_MASK 0x08000000L
++#define DIDT_TCP_CTRL3__DIDT_STALL_DELAY_EN_MASK 0x10000000L
++//DIDT_TCP_STALL_PATTERN_1_2
++#define DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT 0x0
++#define DIDT_TCP_STALL_PATTERN_1_2__UNUSED_0__SHIFT 0xf
++#define DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT 0x10
++#define DIDT_TCP_STALL_PATTERN_1_2__UNUSED_1__SHIFT 0x1f
++#define DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK 0x00007FFFL
++#define DIDT_TCP_STALL_PATTERN_1_2__UNUSED_0_MASK 0x00008000L
++#define DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK 0x7FFF0000L
++#define DIDT_TCP_STALL_PATTERN_1_2__UNUSED_1_MASK 0x80000000L
++//DIDT_TCP_STALL_PATTERN_3_4
++#define DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT 0x0
++#define DIDT_TCP_STALL_PATTERN_3_4__UNUSED_0__SHIFT 0xf
++#define DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT 0x10
++#define DIDT_TCP_STALL_PATTERN_3_4__UNUSED_1__SHIFT 0x1f
++#define DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK 0x00007FFFL
++#define DIDT_TCP_STALL_PATTERN_3_4__UNUSED_0_MASK 0x00008000L
++#define DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK 0x7FFF0000L
++#define DIDT_TCP_STALL_PATTERN_3_4__UNUSED_1_MASK 0x80000000L
++//DIDT_TCP_STALL_PATTERN_5_6
++#define DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT 0x0
++#define DIDT_TCP_STALL_PATTERN_5_6__UNUSED_0__SHIFT 0xf
++#define DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT 0x10
++#define DIDT_TCP_STALL_PATTERN_5_6__UNUSED_1__SHIFT 0x1f
++#define DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK 0x00007FFFL
++#define DIDT_TCP_STALL_PATTERN_5_6__UNUSED_0_MASK 0x00008000L
++#define DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK 0x7FFF0000L
++#define DIDT_TCP_STALL_PATTERN_5_6__UNUSED_1_MASK 0x80000000L
++//DIDT_TCP_STALL_PATTERN_7
++#define DIDT_TCP_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT 0x0
++#define DIDT_TCP_STALL_PATTERN_7__UNUSED_0__SHIFT 0xf
++#define DIDT_TCP_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK 0x00007FFFL
++#define DIDT_TCP_STALL_PATTERN_7__UNUSED_0_MASK 0xFFFF8000L
++//DIDT_TCP_WEIGHT0_3
++#define DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT 0x0
++#define DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT 0x8
++#define DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT 0x10
++#define DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT 0x18
++#define DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK 0x000000FFL
++#define DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK 0x0000FF00L
++#define DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK 0x00FF0000L
++#define DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK 0xFF000000L
++//DIDT_TCP_WEIGHT4_7
++#define DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT 0x0
++#define DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT 0x8
++#define DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT 0x10
++#define DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT 0x18
++#define DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK 0x000000FFL
++#define DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK 0x0000FF00L
++#define DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK 0x00FF0000L
++#define DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK 0xFF000000L
++//DIDT_TCP_WEIGHT8_11
++#define DIDT_TCP_WEIGHT8_11__WEIGHT8__SHIFT 0x0
++#define DIDT_TCP_WEIGHT8_11__WEIGHT9__SHIFT 0x8
++#define DIDT_TCP_WEIGHT8_11__WEIGHT10__SHIFT 0x10
++#define DIDT_TCP_WEIGHT8_11__WEIGHT11__SHIFT 0x18
++#define DIDT_TCP_WEIGHT8_11__WEIGHT8_MASK 0x000000FFL
++#define DIDT_TCP_WEIGHT8_11__WEIGHT9_MASK 0x0000FF00L
++#define DIDT_TCP_WEIGHT8_11__WEIGHT10_MASK 0x00FF0000L
++#define DIDT_TCP_WEIGHT8_11__WEIGHT11_MASK 0xFF000000L
++//DIDT_TCP_EDC_CTRL
++#define DIDT_TCP_EDC_CTRL__EDC_EN__SHIFT 0x0
++#define DIDT_TCP_EDC_CTRL__EDC_SW_RST__SHIFT 0x1
++#define DIDT_TCP_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT 0x2
++#define DIDT_TCP_EDC_CTRL__EDC_FORCE_STALL__SHIFT 0x3
++#define DIDT_TCP_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
++#define DIDT_TCP_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT 0x9
++#define DIDT_TCP_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT 0x11
++#define DIDT_TCP_EDC_CTRL__GC_EDC_EN__SHIFT 0x12
++#define DIDT_TCP_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT 0x13
++#define DIDT_TCP_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT 0x15
++#define DIDT_TCP_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT 0x16
++#define DIDT_TCP_EDC_CTRL__UNUSED_0__SHIFT 0x17
++#define DIDT_TCP_EDC_CTRL__EDC_EN_MASK 0x00000001L
++#define DIDT_TCP_EDC_CTRL__EDC_SW_RST_MASK 0x00000002L
++#define DIDT_TCP_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK 0x00000004L
++#define DIDT_TCP_EDC_CTRL__EDC_FORCE_STALL_MASK 0x00000008L
++#define DIDT_TCP_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
++#define DIDT_TCP_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK 0x0001FE00L
++#define DIDT_TCP_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK 0x00020000L
++#define DIDT_TCP_EDC_CTRL__GC_EDC_EN_MASK 0x00040000L
++#define DIDT_TCP_EDC_CTRL__GC_EDC_STALL_POLICY_MASK 0x00180000L
++#define DIDT_TCP_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK 0x00200000L
++#define DIDT_TCP_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK 0x00400000L
++#define DIDT_TCP_EDC_CTRL__UNUSED_0_MASK 0xFF800000L
++//DIDT_TCP_EDC_THRESHOLD
++#define DIDT_TCP_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT 0x0
++#define DIDT_TCP_EDC_THRESHOLD__EDC_THRESHOLD_MASK 0xFFFFFFFFL
++//DIDT_TCP_EDC_STALL_PATTERN_1_2
++#define DIDT_TCP_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT 0x0
++#define DIDT_TCP_EDC_STALL_PATTERN_1_2__UNUSED_0__SHIFT 0xf
++#define DIDT_TCP_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT 0x10
++#define DIDT_TCP_EDC_STALL_PATTERN_1_2__UNUSED_1__SHIFT 0x1f
++#define DIDT_TCP_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK 0x00007FFFL
++#define DIDT_TCP_EDC_STALL_PATTERN_1_2__UNUSED_0_MASK 0x00008000L
++#define DIDT_TCP_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK 0x7FFF0000L
++#define DIDT_TCP_EDC_STALL_PATTERN_1_2__UNUSED_1_MASK 0x80000000L
++//DIDT_TCP_EDC_STALL_PATTERN_3_4
++#define DIDT_TCP_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT 0x0
++#define DIDT_TCP_EDC_STALL_PATTERN_3_4__UNUSED_0__SHIFT 0xf
++#define DIDT_TCP_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT 0x10
++#define DIDT_TCP_EDC_STALL_PATTERN_3_4__UNUSED_1__SHIFT 0x1f
++#define DIDT_TCP_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK 0x00007FFFL
++#define DIDT_TCP_EDC_STALL_PATTERN_3_4__UNUSED_0_MASK 0x00008000L
++#define DIDT_TCP_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK 0x7FFF0000L
++#define DIDT_TCP_EDC_STALL_PATTERN_3_4__UNUSED_1_MASK 0x80000000L
++//DIDT_TCP_EDC_STALL_PATTERN_5_6
++#define DIDT_TCP_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT 0x0
++#define DIDT_TCP_EDC_STALL_PATTERN_5_6__UNUSED_0__SHIFT 0xf
++#define DIDT_TCP_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT 0x10
++#define DIDT_TCP_EDC_STALL_PATTERN_5_6__UNUSED_1__SHIFT 0x1f
++#define DIDT_TCP_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK 0x00007FFFL
++#define DIDT_TCP_EDC_STALL_PATTERN_5_6__UNUSED_0_MASK 0x00008000L
++#define DIDT_TCP_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK 0x7FFF0000L
++#define DIDT_TCP_EDC_STALL_PATTERN_5_6__UNUSED_1_MASK 0x80000000L
++//DIDT_TCP_EDC_STALL_PATTERN_7
++#define DIDT_TCP_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT 0x0
++#define DIDT_TCP_EDC_STALL_PATTERN_7__UNUSED_0__SHIFT 0xf
++#define DIDT_TCP_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK 0x00007FFFL
++#define DIDT_TCP_EDC_STALL_PATTERN_7__UNUSED_0_MASK 0xFFFF8000L
++//DIDT_TCP_EDC_STATUS
++#define DIDT_TCP_EDC_STATUS__EDC_FSM_STATE__SHIFT 0x0
++#define DIDT_TCP_EDC_STATUS__EDC_THROTTLE_LEVEL__SHIFT 0x1
++#define DIDT_TCP_EDC_STATUS__EDC_FSM_STATE_MASK 0x00000001L
++#define DIDT_TCP_EDC_STATUS__EDC_THROTTLE_LEVEL_MASK 0x0000000EL
++//DIDT_TCP_EDC_STALL_DELAY_1
++#define DIDT_TCP_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TCP0__SHIFT 0x0
++#define DIDT_TCP_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TCP1__SHIFT 0x6
++#define DIDT_TCP_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TCP2__SHIFT 0xc
++#define DIDT_TCP_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TCP3__SHIFT 0x12
++#define DIDT_TCP_EDC_STALL_DELAY_1__UNUSED__SHIFT 0x18
++#define DIDT_TCP_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TCP0_MASK 0x0000003FL
++#define DIDT_TCP_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TCP1_MASK 0x00000FC0L
++#define DIDT_TCP_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TCP2_MASK 0x0003F000L
++#define DIDT_TCP_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TCP3_MASK 0x00FC0000L
++#define DIDT_TCP_EDC_STALL_DELAY_1__UNUSED_MASK 0xFF000000L
++//DIDT_TCP_EDC_STALL_DELAY_2
++#define DIDT_TCP_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TCP4__SHIFT 0x0
++#define DIDT_TCP_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TCP5__SHIFT 0x6
++#define DIDT_TCP_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TCP6__SHIFT 0xc
++#define DIDT_TCP_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TCP7__SHIFT 0x12
++#define DIDT_TCP_EDC_STALL_DELAY_2__UNUSED__SHIFT 0x18
++#define DIDT_TCP_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TCP4_MASK 0x0000003FL
++#define DIDT_TCP_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TCP5_MASK 0x00000FC0L
++#define DIDT_TCP_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TCP6_MASK 0x0003F000L
++#define DIDT_TCP_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TCP7_MASK 0x00FC0000L
++#define DIDT_TCP_EDC_STALL_DELAY_2__UNUSED_MASK 0xFF000000L
++//DIDT_TCP_EDC_STALL_DELAY_3
++#define DIDT_TCP_EDC_STALL_DELAY_3__EDC_STALL_DELAY_TCP8__SHIFT 0x0
++#define DIDT_TCP_EDC_STALL_DELAY_3__EDC_STALL_DELAY_TCP9__SHIFT 0x6
++#define DIDT_TCP_EDC_STALL_DELAY_3__EDC_STALL_DELAY_TCP10__SHIFT 0xc
++#define DIDT_TCP_EDC_STALL_DELAY_3__UNUSED__SHIFT 0x12
++#define DIDT_TCP_EDC_STALL_DELAY_3__EDC_STALL_DELAY_TCP8_MASK 0x0000003FL
++#define DIDT_TCP_EDC_STALL_DELAY_3__EDC_STALL_DELAY_TCP9_MASK 0x00000FC0L
++#define DIDT_TCP_EDC_STALL_DELAY_3__EDC_STALL_DELAY_TCP10_MASK 0x0003F000L
++#define DIDT_TCP_EDC_STALL_DELAY_3__UNUSED_MASK 0xFFFC0000L
++//DIDT_TCP_EDC_OVERFLOW
++#define DIDT_TCP_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW__SHIFT 0x0
++#define DIDT_TCP_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER__SHIFT 0x1
++#define DIDT_TCP_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW_MASK 0x00000001L
++#define DIDT_TCP_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER_MASK 0x0001FFFEL
++//DIDT_TCP_EDC_ROLLING_POWER_DELTA
++#define DIDT_TCP_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA__SHIFT 0x0
++#define DIDT_TCP_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA_MASK 0xFFFFFFFFL
++//DIDT_DBR_CTRL0
++#define DIDT_DBR_CTRL0__DIDT_CTRL_EN__SHIFT 0x0
++#define DIDT_DBR_CTRL0__PHASE_OFFSET__SHIFT 0x1
++#define DIDT_DBR_CTRL0__DIDT_CTRL_RST__SHIFT 0x3
++#define DIDT_DBR_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x4
++#define DIDT_DBR_CTRL0__DIDT_STALL_CTRL_EN__SHIFT 0x5
++#define DIDT_DBR_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT 0x6
++#define DIDT_DBR_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT 0x7
++#define DIDT_DBR_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT 0x8
++#define DIDT_DBR_CTRL0__DIDT_AUTO_MPD_EN__SHIFT 0x18
++#define DIDT_DBR_CTRL0__DIDT_STALL_EVENT_EN__SHIFT 0x19
++#define DIDT_DBR_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT 0x1a
++#define DIDT_DBR_CTRL0__UNUSED_0__SHIFT 0x1b
++#define DIDT_DBR_CTRL0__DIDT_CTRL_EN_MASK 0x00000001L
++#define DIDT_DBR_CTRL0__PHASE_OFFSET_MASK 0x00000006L
++#define DIDT_DBR_CTRL0__DIDT_CTRL_RST_MASK 0x00000008L
++#define DIDT_DBR_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x00000010L
++#define DIDT_DBR_CTRL0__DIDT_STALL_CTRL_EN_MASK 0x00000020L
++#define DIDT_DBR_CTRL0__DIDT_TUNING_CTRL_EN_MASK 0x00000040L
++#define DIDT_DBR_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK 0x00000080L
++#define DIDT_DBR_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK 0x00FFFF00L
++#define DIDT_DBR_CTRL0__DIDT_AUTO_MPD_EN_MASK 0x01000000L
++#define DIDT_DBR_CTRL0__DIDT_STALL_EVENT_EN_MASK 0x02000000L
++#define DIDT_DBR_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK 0x04000000L
++#define DIDT_DBR_CTRL0__UNUSED_0_MASK 0xF8000000L
++//DIDT_DBR_CTRL1
++#define DIDT_DBR_CTRL1__MIN_POWER__SHIFT 0x0
++#define DIDT_DBR_CTRL1__MAX_POWER__SHIFT 0x10
++#define DIDT_DBR_CTRL1__MIN_POWER_MASK 0x0000FFFFL
++#define DIDT_DBR_CTRL1__MAX_POWER_MASK 0xFFFF0000L
++//DIDT_DBR_CTRL2
++#define DIDT_DBR_CTRL2__MAX_POWER_DELTA__SHIFT 0x0
++#define DIDT_DBR_CTRL2__UNUSED_0__SHIFT 0xe
++#define DIDT_DBR_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
++#define DIDT_DBR_CTRL2__UNUSED_1__SHIFT 0x1a
++#define DIDT_DBR_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
++#define DIDT_DBR_CTRL2__UNUSED_2__SHIFT 0x1f
++#define DIDT_DBR_CTRL2__MAX_POWER_DELTA_MASK 0x00003FFFL
++#define DIDT_DBR_CTRL2__UNUSED_0_MASK 0x0000C000L
++#define DIDT_DBR_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK 0x03FF0000L
++#define DIDT_DBR_CTRL2__UNUSED_1_MASK 0x04000000L
++#define DIDT_DBR_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000L
++#define DIDT_DBR_CTRL2__UNUSED_2_MASK 0x80000000L
++//DIDT_DBR_STALL_CTRL
++#define DIDT_DBR_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x0
++#define DIDT_DBR_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x6
++#define DIDT_DBR_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0xc
++#define DIDT_DBR_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x12
++#define DIDT_DBR_STALL_CTRL__UNUSED_0__SHIFT 0x18
++#define DIDT_DBR_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000003FL
++#define DIDT_DBR_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00000FC0L
++#define DIDT_DBR_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x0003F000L
++#define DIDT_DBR_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x00FC0000L
++#define DIDT_DBR_STALL_CTRL__UNUSED_0_MASK 0xFF000000L
++//DIDT_DBR_TUNING_CTRL
++#define DIDT_DBR_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x0
++#define DIDT_DBR_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0xe
++#define DIDT_DBR_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00003FFFL
++#define DIDT_DBR_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x0FFFC000L
++//DIDT_DBR_STALL_AUTO_RELEASE_CTRL
++#define DIDT_DBR_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME__SHIFT 0x0
++#define DIDT_DBR_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME_MASK 0x00FFFFFFL
++//DIDT_DBR_CTRL3
++#define DIDT_DBR_CTRL3__GC_DIDT_ENABLE__SHIFT 0x0
++#define DIDT_DBR_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT 0x1
++#define DIDT_DBR_CTRL3__THROTTLE_POLICY__SHIFT 0x2
++#define DIDT_DBR_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
++#define DIDT_DBR_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT 0x9
++#define DIDT_DBR_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT 0xe
++#define DIDT_DBR_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT 0x16
++#define DIDT_DBR_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT 0x17
++#define DIDT_DBR_CTRL3__QUALIFY_STALL_EN__SHIFT 0x18
++#define DIDT_DBR_CTRL3__DIDT_STALL_SEL__SHIFT 0x19
++#define DIDT_DBR_CTRL3__DIDT_FORCE_STALL__SHIFT 0x1b
++#define DIDT_DBR_CTRL3__DIDT_STALL_DELAY_EN__SHIFT 0x1c
++#define DIDT_DBR_CTRL3__GC_DIDT_ENABLE_MASK 0x00000001L
++#define DIDT_DBR_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK 0x00000002L
++#define DIDT_DBR_CTRL3__THROTTLE_POLICY_MASK 0x0000000CL
++#define DIDT_DBR_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
++#define DIDT_DBR_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK 0x00003E00L
++#define DIDT_DBR_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK 0x003FC000L
++#define DIDT_DBR_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK 0x00400000L
++#define DIDT_DBR_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK 0x00800000L
++#define DIDT_DBR_CTRL3__QUALIFY_STALL_EN_MASK 0x01000000L
++#define DIDT_DBR_CTRL3__DIDT_STALL_SEL_MASK 0x06000000L
++#define DIDT_DBR_CTRL3__DIDT_FORCE_STALL_MASK 0x08000000L
++#define DIDT_DBR_CTRL3__DIDT_STALL_DELAY_EN_MASK 0x10000000L
++//DIDT_DBR_STALL_PATTERN_1_2
++#define DIDT_DBR_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT 0x0
++#define DIDT_DBR_STALL_PATTERN_1_2__UNUSED_0__SHIFT 0xf
++#define DIDT_DBR_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT 0x10
++#define DIDT_DBR_STALL_PATTERN_1_2__UNUSED_1__SHIFT 0x1f
++#define DIDT_DBR_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK 0x00007FFFL
++#define DIDT_DBR_STALL_PATTERN_1_2__UNUSED_0_MASK 0x00008000L
++#define DIDT_DBR_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK 0x7FFF0000L
++#define DIDT_DBR_STALL_PATTERN_1_2__UNUSED_1_MASK 0x80000000L
++//DIDT_DBR_STALL_PATTERN_3_4
++#define DIDT_DBR_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT 0x0
++#define DIDT_DBR_STALL_PATTERN_3_4__UNUSED_0__SHIFT 0xf
++#define DIDT_DBR_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT 0x10
++#define DIDT_DBR_STALL_PATTERN_3_4__UNUSED_1__SHIFT 0x1f
++#define DIDT_DBR_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK 0x00007FFFL
++#define DIDT_DBR_STALL_PATTERN_3_4__UNUSED_0_MASK 0x00008000L
++#define DIDT_DBR_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK 0x7FFF0000L
++#define DIDT_DBR_STALL_PATTERN_3_4__UNUSED_1_MASK 0x80000000L
++//DIDT_DBR_STALL_PATTERN_5_6
++#define DIDT_DBR_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT 0x0
++#define DIDT_DBR_STALL_PATTERN_5_6__UNUSED_0__SHIFT 0xf
++#define DIDT_DBR_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT 0x10
++#define DIDT_DBR_STALL_PATTERN_5_6__UNUSED_1__SHIFT 0x1f
++#define DIDT_DBR_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK 0x00007FFFL
++#define DIDT_DBR_STALL_PATTERN_5_6__UNUSED_0_MASK 0x00008000L
++#define DIDT_DBR_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK 0x7FFF0000L
++#define DIDT_DBR_STALL_PATTERN_5_6__UNUSED_1_MASK 0x80000000L
++//DIDT_DBR_STALL_PATTERN_7
++#define DIDT_DBR_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT 0x0
++#define DIDT_DBR_STALL_PATTERN_7__UNUSED_0__SHIFT 0xf
++#define DIDT_DBR_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK 0x00007FFFL
++#define DIDT_DBR_STALL_PATTERN_7__UNUSED_0_MASK 0xFFFF8000L
++//DIDT_DBR_WEIGHT0_3
++#define DIDT_DBR_WEIGHT0_3__WEIGHT0__SHIFT 0x0
++#define DIDT_DBR_WEIGHT0_3__WEIGHT1__SHIFT 0x8
++#define DIDT_DBR_WEIGHT0_3__WEIGHT2__SHIFT 0x10
++#define DIDT_DBR_WEIGHT0_3__WEIGHT3__SHIFT 0x18
++#define DIDT_DBR_WEIGHT0_3__WEIGHT0_MASK 0x000000FFL
++#define DIDT_DBR_WEIGHT0_3__WEIGHT1_MASK 0x0000FF00L
++#define DIDT_DBR_WEIGHT0_3__WEIGHT2_MASK 0x00FF0000L
++#define DIDT_DBR_WEIGHT0_3__WEIGHT3_MASK 0xFF000000L
++//DIDT_DBR_WEIGHT4_7
++#define DIDT_DBR_WEIGHT4_7__WEIGHT4__SHIFT 0x0
++#define DIDT_DBR_WEIGHT4_7__WEIGHT5__SHIFT 0x8
++#define DIDT_DBR_WEIGHT4_7__WEIGHT6__SHIFT 0x10
++#define DIDT_DBR_WEIGHT4_7__WEIGHT7__SHIFT 0x18
++#define DIDT_DBR_WEIGHT4_7__WEIGHT4_MASK 0x000000FFL
++#define DIDT_DBR_WEIGHT4_7__WEIGHT5_MASK 0x0000FF00L
++#define DIDT_DBR_WEIGHT4_7__WEIGHT6_MASK 0x00FF0000L
++#define DIDT_DBR_WEIGHT4_7__WEIGHT7_MASK 0xFF000000L
++//DIDT_DBR_WEIGHT8_11
++#define DIDT_DBR_WEIGHT8_11__WEIGHT8__SHIFT 0x0
++#define DIDT_DBR_WEIGHT8_11__WEIGHT9__SHIFT 0x8
++#define DIDT_DBR_WEIGHT8_11__WEIGHT10__SHIFT 0x10
++#define DIDT_DBR_WEIGHT8_11__WEIGHT11__SHIFT 0x18
++#define DIDT_DBR_WEIGHT8_11__WEIGHT8_MASK 0x000000FFL
++#define DIDT_DBR_WEIGHT8_11__WEIGHT9_MASK 0x0000FF00L
++#define DIDT_DBR_WEIGHT8_11__WEIGHT10_MASK 0x00FF0000L
++#define DIDT_DBR_WEIGHT8_11__WEIGHT11_MASK 0xFF000000L
++//DIDT_DBR_EDC_CTRL
++#define DIDT_DBR_EDC_CTRL__EDC_EN__SHIFT 0x0
++#define DIDT_DBR_EDC_CTRL__EDC_SW_RST__SHIFT 0x1
++#define DIDT_DBR_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT 0x2
++#define DIDT_DBR_EDC_CTRL__EDC_FORCE_STALL__SHIFT 0x3
++#define DIDT_DBR_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
++#define DIDT_DBR_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT 0x9
++#define DIDT_DBR_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT 0x11
++#define DIDT_DBR_EDC_CTRL__GC_EDC_EN__SHIFT 0x12
++#define DIDT_DBR_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT 0x13
++#define DIDT_DBR_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT 0x15
++#define DIDT_DBR_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT 0x16
++#define DIDT_DBR_EDC_CTRL__UNUSED_0__SHIFT 0x17
++#define DIDT_DBR_EDC_CTRL__EDC_EN_MASK 0x00000001L
++#define DIDT_DBR_EDC_CTRL__EDC_SW_RST_MASK 0x00000002L
++#define DIDT_DBR_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK 0x00000004L
++#define DIDT_DBR_EDC_CTRL__EDC_FORCE_STALL_MASK 0x00000008L
++#define DIDT_DBR_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
++#define DIDT_DBR_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK 0x0001FE00L
++#define DIDT_DBR_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK 0x00020000L
++#define DIDT_DBR_EDC_CTRL__GC_EDC_EN_MASK 0x00040000L
++#define DIDT_DBR_EDC_CTRL__GC_EDC_STALL_POLICY_MASK 0x00180000L
++#define DIDT_DBR_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK 0x00200000L
++#define DIDT_DBR_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK 0x00400000L
++#define DIDT_DBR_EDC_CTRL__UNUSED_0_MASK 0xFF800000L
++//DIDT_DBR_EDC_THRESHOLD
++#define DIDT_DBR_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT 0x0
++#define DIDT_DBR_EDC_THRESHOLD__EDC_THRESHOLD_MASK 0xFFFFFFFFL
++//DIDT_DBR_EDC_STALL_PATTERN_1_2
++#define DIDT_DBR_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT 0x0
++#define DIDT_DBR_EDC_STALL_PATTERN_1_2__UNUSED_0__SHIFT 0xf
++#define DIDT_DBR_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT 0x10
++#define DIDT_DBR_EDC_STALL_PATTERN_1_2__UNUSED_1__SHIFT 0x1f
++#define DIDT_DBR_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK 0x00007FFFL
++#define DIDT_DBR_EDC_STALL_PATTERN_1_2__UNUSED_0_MASK 0x00008000L
++#define DIDT_DBR_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK 0x7FFF0000L
++#define DIDT_DBR_EDC_STALL_PATTERN_1_2__UNUSED_1_MASK 0x80000000L
++//DIDT_DBR_EDC_STALL_PATTERN_3_4
++#define DIDT_DBR_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT 0x0
++#define DIDT_DBR_EDC_STALL_PATTERN_3_4__UNUSED_0__SHIFT 0xf
++#define DIDT_DBR_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT 0x10
++#define DIDT_DBR_EDC_STALL_PATTERN_3_4__UNUSED_1__SHIFT 0x1f
++#define DIDT_DBR_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK 0x00007FFFL
++#define DIDT_DBR_EDC_STALL_PATTERN_3_4__UNUSED_0_MASK 0x00008000L
++#define DIDT_DBR_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK 0x7FFF0000L
++#define DIDT_DBR_EDC_STALL_PATTERN_3_4__UNUSED_1_MASK 0x80000000L
++//DIDT_DBR_EDC_STALL_PATTERN_5_6
++#define DIDT_DBR_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT 0x0
++#define DIDT_DBR_EDC_STALL_PATTERN_5_6__UNUSED_0__SHIFT 0xf
++#define DIDT_DBR_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT 0x10
++#define DIDT_DBR_EDC_STALL_PATTERN_5_6__UNUSED_1__SHIFT 0x1f
++#define DIDT_DBR_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK 0x00007FFFL
++#define DIDT_DBR_EDC_STALL_PATTERN_5_6__UNUSED_0_MASK 0x00008000L
++#define DIDT_DBR_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK 0x7FFF0000L
++#define DIDT_DBR_EDC_STALL_PATTERN_5_6__UNUSED_1_MASK 0x80000000L
++//DIDT_DBR_EDC_STALL_PATTERN_7
++#define DIDT_DBR_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT 0x0
++#define DIDT_DBR_EDC_STALL_PATTERN_7__UNUSED_0__SHIFT 0xf
++#define DIDT_DBR_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK 0x00007FFFL
++#define DIDT_DBR_EDC_STALL_PATTERN_7__UNUSED_0_MASK 0xFFFF8000L
++//DIDT_DBR_EDC_STATUS
++#define DIDT_DBR_EDC_STATUS__EDC_FSM_STATE__SHIFT 0x0
++#define DIDT_DBR_EDC_STATUS__EDC_THROTTLE_LEVEL__SHIFT 0x1
++#define DIDT_DBR_EDC_STATUS__UNUSED_0__SHIFT 0x4
++#define DIDT_DBR_EDC_STATUS__EDC_FSM_STATE_MASK 0x00000001L
++#define DIDT_DBR_EDC_STATUS__EDC_THROTTLE_LEVEL_MASK 0x0000000EL
++#define DIDT_DBR_EDC_STATUS__UNUSED_0_MASK 0xFFFFFFF0L
++//DIDT_DBR_EDC_STALL_DELAY_1
++#define DIDT_DBR_EDC_STALL_DELAY_1__EDC_STALL_DELAY_DBR0__SHIFT 0x0
++#define DIDT_DBR_EDC_STALL_DELAY_1__UNUSED__SHIFT 0x1
++#define DIDT_DBR_EDC_STALL_DELAY_1__EDC_STALL_DELAY_DBR0_MASK 0x00000001L
++#define DIDT_DBR_EDC_STALL_DELAY_1__UNUSED_MASK 0xFFFFFFFEL
++//DIDT_DBR_EDC_OVERFLOW
++#define DIDT_DBR_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW__SHIFT 0x0
++#define DIDT_DBR_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER__SHIFT 0x1
++#define DIDT_DBR_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW_MASK 0x00000001L
++#define DIDT_DBR_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER_MASK 0x0001FFFEL
++//DIDT_DBR_EDC_ROLLING_POWER_DELTA
++#define DIDT_DBR_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA__SHIFT 0x0
++#define DIDT_DBR_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA_MASK 0xFFFFFFFFL
++//DIDT_SQ_STALL_EVENT_COUNTER
++#define DIDT_SQ_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER__SHIFT 0x0
++#define DIDT_SQ_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER_MASK 0xFFFFFFFFL
++//DIDT_DB_STALL_EVENT_COUNTER
++#define DIDT_DB_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER__SHIFT 0x0
++#define DIDT_DB_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER_MASK 0xFFFFFFFFL
++//DIDT_TD_STALL_EVENT_COUNTER
++#define DIDT_TD_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER__SHIFT 0x0
++#define DIDT_TD_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER_MASK 0xFFFFFFFFL
++//DIDT_TCP_STALL_EVENT_COUNTER
++#define DIDT_TCP_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER__SHIFT 0x0
++#define DIDT_TCP_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER_MASK 0xFFFFFFFFL
++//DIDT_DBR_STALL_EVENT_COUNTER
++#define DIDT_DBR_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER__SHIFT 0x0
++#define DIDT_DBR_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER_MASK 0xFFFFFFFFL
++
++
++
++
++
++#endif
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_1_sh_mask.h
+new file mode 100644
+index 0000000..1445bba
+--- /dev/null
++++ b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_1_sh_mask.h
+@@ -0,0 +1,1658 @@
++/*
++ * Copyright (C) 2017 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
++ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++#ifndef _sdma0_4_1_SH_MASK_HEADER
++#define _sdma0_4_1_SH_MASK_HEADER
++
++
++// addressBlock: sdma0_sdma0dec
++//SDMA0_UCODE_ADDR
++#define SDMA0_UCODE_ADDR__VALUE__SHIFT 0x0
++#define SDMA0_UCODE_ADDR__VALUE_MASK 0x00001FFFL
++//SDMA0_UCODE_DATA
++#define SDMA0_UCODE_DATA__VALUE__SHIFT 0x0
++#define SDMA0_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
++//SDMA0_VM_CNTL
++#define SDMA0_VM_CNTL__CMD__SHIFT 0x0
++#define SDMA0_VM_CNTL__CMD_MASK 0x0000000FL
++//SDMA0_VM_CTX_LO
++#define SDMA0_VM_CTX_LO__ADDR__SHIFT 0x2
++#define SDMA0_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_VM_CTX_HI
++#define SDMA0_VM_CTX_HI__ADDR__SHIFT 0x0
++#define SDMA0_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_ACTIVE_FCN_ID
++#define SDMA0_ACTIVE_FCN_ID__VFID__SHIFT 0x0
++#define SDMA0_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
++#define SDMA0_ACTIVE_FCN_ID__VF__SHIFT 0x1f
++#define SDMA0_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
++#define SDMA0_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
++#define SDMA0_ACTIVE_FCN_ID__VF_MASK 0x80000000L
++//SDMA0_VM_CTX_CNTL
++#define SDMA0_VM_CTX_CNTL__PRIV__SHIFT 0x0
++#define SDMA0_VM_CTX_CNTL__VMID__SHIFT 0x4
++#define SDMA0_VM_CTX_CNTL__PRIV_MASK 0x00000001L
++#define SDMA0_VM_CTX_CNTL__VMID_MASK 0x000000F0L
++//SDMA0_VIRT_RESET_REQ
++#define SDMA0_VIRT_RESET_REQ__VF__SHIFT 0x0
++#define SDMA0_VIRT_RESET_REQ__PF__SHIFT 0x1f
++#define SDMA0_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
++#define SDMA0_VIRT_RESET_REQ__PF_MASK 0x80000000L
++//SDMA0_CONTEXT_REG_TYPE0
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_CNTL__SHIFT 0x0
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE__SHIFT 0x1
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE_HI__SHIFT 0x2
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR__SHIFT 0x3
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_HI__SHIFT 0x4
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR__SHIFT 0x5
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_HI__SHIFT 0x6
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_CNTL__SHIFT 0xa
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_RPTR__SHIFT 0xb
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_OFFSET__SHIFT 0xc
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_LO__SHIFT 0xd
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_HI__SHIFT 0xe
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_SIZE__SHIFT 0xf
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_SKIP_CNTL__SHIFT 0x10
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_STATUS__SHIFT 0x11
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_DOORBELL__SHIFT 0x12
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_CNTL__SHIFT 0x13
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_CNTL_MASK 0x00000001L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE_MASK 0x00000002L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE_HI_MASK 0x00000004L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_MASK 0x00000008L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_HI_MASK 0x00000010L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_MASK 0x00000020L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_HI_MASK 0x00000040L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_CNTL_MASK 0x00000400L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_RPTR_MASK 0x00000800L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_OFFSET_MASK 0x00001000L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_LO_MASK 0x00002000L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_HI_MASK 0x00004000L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_SIZE_MASK 0x00008000L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_SKIP_CNTL_MASK 0x00010000L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_STATUS_MASK 0x00020000L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_DOORBELL_MASK 0x00040000L
++#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_CNTL_MASK 0x00080000L
++//SDMA0_CONTEXT_REG_TYPE1
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_STATUS__SHIFT 0x8
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_LOG__SHIFT 0x9
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_WATERMARK__SHIFT 0xa
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_OFFSET__SHIFT 0xb
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_LO__SHIFT 0xc
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_HI__SHIFT 0xd
++#define SDMA0_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_IB_SUB_REMAIN__SHIFT 0xf
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_PREEMPT__SHIFT 0x10
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DUMMY_REG__SHIFT 0x11
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_AQL_CNTL__SHIFT 0x14
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_MINOR_PTR_UPDATE__SHIFT 0x15
++#define SDMA0_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_STATUS_MASK 0x00000100L
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_LOG_MASK 0x00000200L
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_WATERMARK_MASK 0x00000400L
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_OFFSET_MASK 0x00000800L
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_LO_MASK 0x00001000L
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_HI_MASK 0x00002000L
++#define SDMA0_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_IB_SUB_REMAIN_MASK 0x00008000L
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_PREEMPT_MASK 0x00010000L
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DUMMY_REG_MASK 0x00020000L
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_AQL_CNTL_MASK 0x00100000L
++#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L
++#define SDMA0_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L
++//SDMA0_CONTEXT_REG_TYPE2
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA0__SHIFT 0x0
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA1__SHIFT 0x1
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA2__SHIFT 0x2
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA3__SHIFT 0x3
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA4__SHIFT 0x4
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA5__SHIFT 0x5
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA6__SHIFT 0x6
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA7__SHIFT 0x7
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA8__SHIFT 0x8
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_CNTL__SHIFT 0x9
++#define SDMA0_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA0_MASK 0x00000001L
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA1_MASK 0x00000002L
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA2_MASK 0x00000004L
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA3_MASK 0x00000008L
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA4_MASK 0x00000010L
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA5_MASK 0x00000020L
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA6_MASK 0x00000040L
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA7_MASK 0x00000080L
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA8_MASK 0x00000100L
++#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_CNTL_MASK 0x00000200L
++#define SDMA0_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L
++//SDMA0_CONTEXT_REG_TYPE3
++#define SDMA0_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0
++#define SDMA0_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL
++//SDMA0_PUB_REG_TYPE0
++#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_ADDR__SHIFT 0x0
++#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_DATA__SHIFT 0x1
++#define SDMA0_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3
++#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CNTL__SHIFT 0x4
++#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_LO__SHIFT 0x5
++#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_HI__SHIFT 0x6
++#define SDMA0_PUB_REG_TYPE0__SDMA0_ACTIVE_FCN_ID__SHIFT 0x7
++#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_CNTL__SHIFT 0x8
++#define SDMA0_PUB_REG_TYPE0__SDMA0_VIRT_RESET_REQ__SHIFT 0x9
++#define SDMA0_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE0__SHIFT 0xb
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE1__SHIFT 0xc
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE2__SHIFT 0xd
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE3__SHIFT 0xe
++#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE0__SHIFT 0xf
++#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE1__SHIFT 0x10
++#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE2__SHIFT 0x11
++#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE3__SHIFT 0x12
++#define SDMA0_PUB_REG_TYPE0__SDMA0_MMHUB_CNTL__SHIFT 0x13
++#define SDMA0_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x14
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19
++#define SDMA0_PUB_REG_TYPE0__SDMA0_POWER_CNTL__SHIFT 0x1a
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CLK_CTRL__SHIFT 0x1b
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CNTL__SHIFT 0x1c
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CHICKEN_BITS__SHIFT 0x1d
++#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG__SHIFT 0x1e
++#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_READ__SHIFT 0x1f
++#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_ADDR_MASK 0x00000001L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_DATA_MASK 0x00000002L
++#define SDMA0_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CNTL_MASK 0x00000010L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_LO_MASK 0x00000020L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_HI_MASK 0x00000040L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_ACTIVE_FCN_ID_MASK 0x00000080L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_CNTL_MASK 0x00000100L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_VIRT_RESET_REQ_MASK 0x00000200L
++#define SDMA0_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE0_MASK 0x00000800L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE1_MASK 0x00001000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE2_MASK 0x00002000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE3_MASK 0x00004000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE0_MASK 0x00008000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE1_MASK 0x00010000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE2_MASK 0x00020000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE3_MASK 0x00040000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_MMHUB_CNTL_MASK 0x00080000L
++#define SDMA0_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01F00000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_POWER_CNTL_MASK 0x04000000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CLK_CTRL_MASK 0x08000000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CNTL_MASK 0x10000000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_CHICKEN_BITS_MASK 0x20000000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_MASK 0x40000000L
++#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_READ_MASK 0x80000000L
++//SDMA0_PUB_REG_TYPE1
++#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_HI__SHIFT 0x0
++#define SDMA0_PUB_REG_TYPE1__SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1
++#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH__SHIFT 0x2
++#define SDMA0_PUB_REG_TYPE1__SDMA0_IB_OFFSET_FETCH__SHIFT 0x3
++#define SDMA0_PUB_REG_TYPE1__SDMA0_PROGRAM__SHIFT 0x4
++#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS_REG__SHIFT 0x5
++#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS1_REG__SHIFT 0x6
++#define SDMA0_PUB_REG_TYPE1__SDMA0_RD_BURST_CNTL__SHIFT 0x7
++#define SDMA0_PUB_REG_TYPE1__SDMA0_HBM_PAGE_CONFIG__SHIFT 0x8
++#define SDMA0_PUB_REG_TYPE1__SDMA0_UCODE_CHECKSUM__SHIFT 0x9
++#define SDMA0_PUB_REG_TYPE1__SDMA0_F32_CNTL__SHIFT 0xa
++#define SDMA0_PUB_REG_TYPE1__SDMA0_FREEZE__SHIFT 0xb
++#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE0_QUANTUM__SHIFT 0xc
++#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE1_QUANTUM__SHIFT 0xd
++#define SDMA0_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe
++#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf
++#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10
++#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11
++#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_CONFIG__SHIFT 0x12
++#define SDMA0_PUB_REG_TYPE1__SDMA0_BA_THRESHOLD__SHIFT 0x13
++#define SDMA0_PUB_REG_TYPE1__SDMA0_ID__SHIFT 0x14
++#define SDMA0_PUB_REG_TYPE1__SDMA0_VERSION__SHIFT 0x15
++#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER__SHIFT 0x16
++#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_CLEAR__SHIFT 0x17
++#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS2_REG__SHIFT 0x18
++#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_CNTL__SHIFT 0x19
++#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_LO__SHIFT 0x1a
++#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_HI__SHIFT 0x1b
++#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_CNTL__SHIFT 0x1c
++#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WATERMK__SHIFT 0x1d
++#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_RD_STATUS__SHIFT 0x1e
++#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WR_STATUS__SHIFT 0x1f
++#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_HI_MASK 0x00000001L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_MASK 0x00000004L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_IB_OFFSET_FETCH_MASK 0x00000008L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_PROGRAM_MASK 0x00000010L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS_REG_MASK 0x00000020L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS1_REG_MASK 0x00000040L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_RD_BURST_CNTL_MASK 0x00000080L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_HBM_PAGE_CONFIG_MASK 0x00000100L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_UCODE_CHECKSUM_MASK 0x00000200L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_F32_CNTL_MASK 0x00000400L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_FREEZE_MASK 0x00000800L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE0_QUANTUM_MASK 0x00001000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE1_QUANTUM_MASK 0x00002000L
++#define SDMA0_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L
++#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L
++#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L
++#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_CONFIG_MASK 0x00040000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_BA_THRESHOLD_MASK 0x00080000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_ID_MASK 0x00100000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_VERSION_MASK 0x00200000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_MASK 0x00400000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_CLEAR_MASK 0x00800000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS2_REG_MASK 0x01000000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_CNTL_MASK 0x02000000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_LO_MASK 0x04000000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_HI_MASK 0x08000000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_CNTL_MASK 0x10000000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WATERMK_MASK 0x20000000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_RD_STATUS_MASK 0x40000000L
++#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WR_STATUS_MASK 0x80000000L
++//SDMA0_PUB_REG_TYPE2
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV0__SHIFT 0x0
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV1__SHIFT 0x1
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV2__SHIFT 0x2
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK0__SHIFT 0x3
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK1__SHIFT 0x4
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK0__SHIFT 0x5
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK1__SHIFT 0x6
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_TIMEOUT__SHIFT 0x7
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_PAGE__SHIFT 0x8
++#define SDMA0_PUB_REG_TYPE2__SDMA0_POWER_CNTL_IDLE__SHIFT 0x9
++#define SDMA0_PUB_REG_TYPE2__SDMA0_RELAX_ORDERING_LUT__SHIFT 0xa
++#define SDMA0_PUB_REG_TYPE2__SDMA0_CHICKEN_BITS_2__SHIFT 0xb
++#define SDMA0_PUB_REG_TYPE2__SDMA0_STATUS3_REG__SHIFT 0xc
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_LO__SHIFT 0xd
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_HI__SHIFT 0xe
++#define SDMA0_PUB_REG_TYPE2__SDMA0_ERROR_LOG__SHIFT 0x10
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG0__SHIFT 0x11
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG1__SHIFT 0x12
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG2__SHIFT 0x13
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG3__SHIFT 0x14
++#define SDMA0_PUB_REG_TYPE2__SDMA0_F32_COUNTER__SHIFT 0x15
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UNBREAKABLE__SHIFT 0x16
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFMON_CNTL__SHIFT 0x17
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER0_RESULT__SHIFT 0x18
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER1_RESULT__SHIFT 0x19
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a
++#define SDMA0_PUB_REG_TYPE2__SDMA0_CRD_CNTL__SHIFT 0x1b
++#define SDMA0_PUB_REG_TYPE2__SDMA0_MMHUB_TRUSTLVL__SHIFT 0x1c
++#define SDMA0_PUB_REG_TYPE2__SDMA0_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d
++#define SDMA0_PUB_REG_TYPE2__SDMA0_ULV_CNTL__SHIFT 0x1e
++#define SDMA0_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV0_MASK 0x00000001L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV1_MASK 0x00000002L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV2_MASK 0x00000004L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK0_MASK 0x00000008L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK1_MASK 0x00000010L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK0_MASK 0x00000020L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK1_MASK 0x00000040L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_TIMEOUT_MASK 0x00000080L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_PAGE_MASK 0x00000100L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_POWER_CNTL_IDLE_MASK 0x00000200L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_RELAX_ORDERING_LUT_MASK 0x00000400L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_CHICKEN_BITS_2_MASK 0x00000800L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_STATUS3_REG_MASK 0x00001000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_LO_MASK 0x00002000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_HI_MASK 0x00004000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_ERROR_LOG_MASK 0x00010000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG0_MASK 0x00020000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG1_MASK 0x00040000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG2_MASK 0x00080000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG3_MASK 0x00100000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_F32_COUNTER_MASK 0x00200000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_UNBREAKABLE_MASK 0x00400000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFMON_CNTL_MASK 0x00800000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER0_RESULT_MASK 0x01000000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER1_RESULT_MASK 0x02000000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_CRD_CNTL_MASK 0x08000000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_MMHUB_TRUSTLVL_MASK 0x10000000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L
++#define SDMA0_PUB_REG_TYPE2__SDMA0_ULV_CNTL_MASK 0x40000000L
++#define SDMA0_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L
++//SDMA0_PUB_REG_TYPE3
++#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_DATA__SHIFT 0x0
++#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_INDEX__SHIFT 0x1
++#define SDMA0_PUB_REG_TYPE3__RESERVED__SHIFT 0x2
++#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_DATA_MASK 0x00000001L
++#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_INDEX_MASK 0x00000002L
++#define SDMA0_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFFCL
++//SDMA0_MMHUB_CNTL
++#define SDMA0_MMHUB_CNTL__UNIT_ID__SHIFT 0x0
++#define SDMA0_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL
++//SDMA0_CONTEXT_GROUP_BOUNDARY
++#define SDMA0_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0
++#define SDMA0_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL
++//SDMA0_POWER_CNTL
++#define SDMA0_POWER_CNTL__PG_CNTL_ENABLE__SHIFT 0x0
++#define SDMA0_POWER_CNTL__EXT_PG_POWER_ON_REQ__SHIFT 0x1
++#define SDMA0_POWER_CNTL__EXT_PG_POWER_OFF_REQ__SHIFT 0x2
++#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME__SHIFT 0x3
++#define SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8
++#define SDMA0_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9
++#define SDMA0_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa
++#define SDMA0_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb
++#define SDMA0_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc
++#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME__SHIFT 0x1a
++#define SDMA0_POWER_CNTL__PG_CNTL_ENABLE_MASK 0x00000001L
++#define SDMA0_POWER_CNTL__EXT_PG_POWER_ON_REQ_MASK 0x00000002L
++#define SDMA0_POWER_CNTL__EXT_PG_POWER_OFF_REQ_MASK 0x00000004L
++#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
++#define SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L
++#define SDMA0_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L
++#define SDMA0_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L
++#define SDMA0_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L
++#define SDMA0_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L
++#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
++//SDMA0_CLK_CTRL
++#define SDMA0_CLK_CTRL__ON_DELAY__SHIFT 0x0
++#define SDMA0_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
++#define SDMA0_CLK_CTRL__RESERVED__SHIFT 0xc
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
++#define SDMA0_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
++#define SDMA0_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
++#define SDMA0_CLK_CTRL__RESERVED_MASK 0x00FFF000L
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
++#define SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
++//SDMA0_CNTL
++#define SDMA0_CNTL__TRAP_ENABLE__SHIFT 0x0
++#define SDMA0_CNTL__UTC_L1_ENABLE__SHIFT 0x1
++#define SDMA0_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
++#define SDMA0_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
++#define SDMA0_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
++#define SDMA0_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
++#define SDMA0_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
++#define SDMA0_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12
++#define SDMA0_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c
++#define SDMA0_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d
++#define SDMA0_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e
++#define SDMA0_CNTL__TRAP_ENABLE_MASK 0x00000001L
++#define SDMA0_CNTL__UTC_L1_ENABLE_MASK 0x00000002L
++#define SDMA0_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
++#define SDMA0_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
++#define SDMA0_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
++#define SDMA0_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
++#define SDMA0_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
++#define SDMA0_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L
++#define SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
++#define SDMA0_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L
++#define SDMA0_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L
++//SDMA0_CHICKEN_BITS
++#define SDMA0_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0
++#define SDMA0_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
++#define SDMA0_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
++#define SDMA0_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8
++#define SDMA0_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa
++#define SDMA0_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
++#define SDMA0_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
++#define SDMA0_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14
++#define SDMA0_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17
++#define SDMA0_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19
++#define SDMA0_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a
++#define SDMA0_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c
++#define SDMA0_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e
++#define SDMA0_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L
++#define SDMA0_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
++#define SDMA0_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
++#define SDMA0_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L
++#define SDMA0_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L
++#define SDMA0_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
++#define SDMA0_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
++#define SDMA0_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L
++#define SDMA0_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L
++#define SDMA0_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L
++#define SDMA0_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L
++#define SDMA0_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L
++#define SDMA0_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L
++//SDMA0_GB_ADDR_CONFIG
++#define SDMA0_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
++#define SDMA0_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
++#define SDMA0_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8
++#define SDMA0_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
++#define SDMA0_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
++#define SDMA0_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
++#define SDMA0_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
++#define SDMA0_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
++#define SDMA0_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
++#define SDMA0_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
++//SDMA0_GB_ADDR_CONFIG_READ
++#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
++#define SDMA0_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
++#define SDMA0_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8
++#define SDMA0_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc
++#define SDMA0_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
++#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
++#define SDMA0_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
++#define SDMA0_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
++#define SDMA0_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L
++#define SDMA0_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
++//SDMA0_RB_RPTR_FETCH_HI
++#define SDMA0_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
++#define SDMA0_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_SEM_WAIT_FAIL_TIMER_CNTL
++#define SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
++#define SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
++//SDMA0_RB_RPTR_FETCH
++#define SDMA0_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
++#define SDMA0_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
++//SDMA0_IB_OFFSET_FETCH
++#define SDMA0_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
++#define SDMA0_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
++//SDMA0_PROGRAM
++#define SDMA0_PROGRAM__STREAM__SHIFT 0x0
++#define SDMA0_PROGRAM__STREAM_MASK 0xFFFFFFFFL
++//SDMA0_STATUS_REG
++#define SDMA0_STATUS_REG__IDLE__SHIFT 0x0
++#define SDMA0_STATUS_REG__REG_IDLE__SHIFT 0x1
++#define SDMA0_STATUS_REG__RB_EMPTY__SHIFT 0x2
++#define SDMA0_STATUS_REG__RB_FULL__SHIFT 0x3
++#define SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
++#define SDMA0_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
++#define SDMA0_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
++#define SDMA0_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
++#define SDMA0_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
++#define SDMA0_STATUS_REG__INSIDE_IB__SHIFT 0x9
++#define SDMA0_STATUS_REG__EX_IDLE__SHIFT 0xa
++#define SDMA0_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb
++#define SDMA0_STATUS_REG__PACKET_READY__SHIFT 0xc
++#define SDMA0_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
++#define SDMA0_STATUS_REG__SRBM_IDLE__SHIFT 0xe
++#define SDMA0_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
++#define SDMA0_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
++#define SDMA0_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
++#define SDMA0_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
++#define SDMA0_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
++#define SDMA0_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
++#define SDMA0_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
++#define SDMA0_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
++#define SDMA0_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
++#define SDMA0_STATUS_REG__SEM_IDLE__SHIFT 0x1a
++#define SDMA0_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
++#define SDMA0_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
++#define SDMA0_STATUS_REG__INT_IDLE__SHIFT 0x1e
++#define SDMA0_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
++#define SDMA0_STATUS_REG__IDLE_MASK 0x00000001L
++#define SDMA0_STATUS_REG__REG_IDLE_MASK 0x00000002L
++#define SDMA0_STATUS_REG__RB_EMPTY_MASK 0x00000004L
++#define SDMA0_STATUS_REG__RB_FULL_MASK 0x00000008L
++#define SDMA0_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
++#define SDMA0_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
++#define SDMA0_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
++#define SDMA0_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
++#define SDMA0_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
++#define SDMA0_STATUS_REG__INSIDE_IB_MASK 0x00000200L
++#define SDMA0_STATUS_REG__EX_IDLE_MASK 0x00000400L
++#define SDMA0_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L
++#define SDMA0_STATUS_REG__PACKET_READY_MASK 0x00001000L
++#define SDMA0_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
++#define SDMA0_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
++#define SDMA0_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
++#define SDMA0_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
++#define SDMA0_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
++#define SDMA0_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
++#define SDMA0_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
++#define SDMA0_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
++#define SDMA0_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
++#define SDMA0_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
++#define SDMA0_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
++#define SDMA0_STATUS_REG__SEM_IDLE_MASK 0x04000000L
++#define SDMA0_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
++#define SDMA0_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
++#define SDMA0_STATUS_REG__INT_IDLE_MASK 0x40000000L
++#define SDMA0_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
++//SDMA0_STATUS1_REG
++#define SDMA0_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
++#define SDMA0_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
++#define SDMA0_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
++#define SDMA0_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
++#define SDMA0_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
++#define SDMA0_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
++#define SDMA0_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
++#define SDMA0_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
++#define SDMA0_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
++#define SDMA0_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd
++#define SDMA0_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe
++#define SDMA0_STATUS1_REG__EX_START__SHIFT 0xf
++#define SDMA0_STATUS1_REG__CE_RD_STALL__SHIFT 0x11
++#define SDMA0_STATUS1_REG__CE_WR_STALL__SHIFT 0x12
++#define SDMA0_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
++#define SDMA0_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
++#define SDMA0_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
++#define SDMA0_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
++#define SDMA0_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
++#define SDMA0_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
++#define SDMA0_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
++#define SDMA0_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
++#define SDMA0_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
++#define SDMA0_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L
++#define SDMA0_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L
++#define SDMA0_STATUS1_REG__EX_START_MASK 0x00008000L
++#define SDMA0_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L
++#define SDMA0_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L
++//SDMA0_RD_BURST_CNTL
++#define SDMA0_RD_BURST_CNTL__RD_BURST__SHIFT 0x0
++#define SDMA0_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L
++//SDMA0_HBM_PAGE_CONFIG
++#define SDMA0_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
++#define SDMA0_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000003L
++//SDMA0_UCODE_CHECKSUM
++#define SDMA0_UCODE_CHECKSUM__DATA__SHIFT 0x0
++#define SDMA0_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
++//SDMA0_F32_CNTL
++#define SDMA0_F32_CNTL__HALT__SHIFT 0x0
++#define SDMA0_F32_CNTL__STEP__SHIFT 0x1
++#define SDMA0_F32_CNTL__HALT_MASK 0x00000001L
++#define SDMA0_F32_CNTL__STEP_MASK 0x00000002L
++//SDMA0_FREEZE
++#define SDMA0_FREEZE__PREEMPT__SHIFT 0x0
++#define SDMA0_FREEZE__FREEZE__SHIFT 0x4
++#define SDMA0_FREEZE__FROZEN__SHIFT 0x5
++#define SDMA0_FREEZE__F32_FREEZE__SHIFT 0x6
++#define SDMA0_FREEZE__PREEMPT_MASK 0x00000001L
++#define SDMA0_FREEZE__FREEZE_MASK 0x00000010L
++#define SDMA0_FREEZE__FROZEN_MASK 0x00000020L
++#define SDMA0_FREEZE__F32_FREEZE_MASK 0x00000040L
++//SDMA0_PHASE0_QUANTUM
++#define SDMA0_PHASE0_QUANTUM__UNIT__SHIFT 0x0
++#define SDMA0_PHASE0_QUANTUM__VALUE__SHIFT 0x8
++#define SDMA0_PHASE0_QUANTUM__PREFER__SHIFT 0x1e
++#define SDMA0_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL
++#define SDMA0_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L
++#define SDMA0_PHASE0_QUANTUM__PREFER_MASK 0x40000000L
++//SDMA0_PHASE1_QUANTUM
++#define SDMA0_PHASE1_QUANTUM__UNIT__SHIFT 0x0
++#define SDMA0_PHASE1_QUANTUM__VALUE__SHIFT 0x8
++#define SDMA0_PHASE1_QUANTUM__PREFER__SHIFT 0x1e
++#define SDMA0_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL
++#define SDMA0_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L
++#define SDMA0_PHASE1_QUANTUM__PREFER_MASK 0x40000000L
++//SDMA_POWER_GATING
++#define SDMA_POWER_GATING__SDMA0_POWER_OFF_CONDITION__SHIFT 0x0
++#define SDMA_POWER_GATING__SDMA0_POWER_ON_CONDITION__SHIFT 0x1
++#define SDMA_POWER_GATING__SDMA0_POWER_OFF_REQ__SHIFT 0x2
++#define SDMA_POWER_GATING__SDMA0_POWER_ON_REQ__SHIFT 0x3
++#define SDMA_POWER_GATING__PG_CNTL_STATUS__SHIFT 0x4
++#define SDMA_POWER_GATING__SDMA0_POWER_OFF_CONDITION_MASK 0x00000001L
++#define SDMA_POWER_GATING__SDMA0_POWER_ON_CONDITION_MASK 0x00000002L
++#define SDMA_POWER_GATING__SDMA0_POWER_OFF_REQ_MASK 0x00000004L
++#define SDMA_POWER_GATING__SDMA0_POWER_ON_REQ_MASK 0x00000008L
++#define SDMA_POWER_GATING__PG_CNTL_STATUS_MASK 0x00000030L
++//SDMA_PGFSM_CONFIG
++#define SDMA_PGFSM_CONFIG__FSM_ADDR__SHIFT 0x0
++#define SDMA_PGFSM_CONFIG__POWER_DOWN__SHIFT 0x8
++#define SDMA_PGFSM_CONFIG__POWER_UP__SHIFT 0x9
++#define SDMA_PGFSM_CONFIG__P1_SELECT__SHIFT 0xa
++#define SDMA_PGFSM_CONFIG__P2_SELECT__SHIFT 0xb
++#define SDMA_PGFSM_CONFIG__WRITE__SHIFT 0xc
++#define SDMA_PGFSM_CONFIG__READ__SHIFT 0xd
++#define SDMA_PGFSM_CONFIG__SRBM_OVERRIDE__SHIFT 0x1b
++#define SDMA_PGFSM_CONFIG__REG_ADDR__SHIFT 0x1c
++#define SDMA_PGFSM_CONFIG__FSM_ADDR_MASK 0x000000FFL
++#define SDMA_PGFSM_CONFIG__POWER_DOWN_MASK 0x00000100L
++#define SDMA_PGFSM_CONFIG__POWER_UP_MASK 0x00000200L
++#define SDMA_PGFSM_CONFIG__P1_SELECT_MASK 0x00000400L
++#define SDMA_PGFSM_CONFIG__P2_SELECT_MASK 0x00000800L
++#define SDMA_PGFSM_CONFIG__WRITE_MASK 0x00001000L
++#define SDMA_PGFSM_CONFIG__READ_MASK 0x00002000L
++#define SDMA_PGFSM_CONFIG__SRBM_OVERRIDE_MASK 0x08000000L
++#define SDMA_PGFSM_CONFIG__REG_ADDR_MASK 0xF0000000L
++//SDMA_PGFSM_WRITE
++#define SDMA_PGFSM_WRITE__VALUE__SHIFT 0x0
++#define SDMA_PGFSM_WRITE__VALUE_MASK 0xFFFFFFFFL
++//SDMA_PGFSM_READ
++#define SDMA_PGFSM_READ__VALUE__SHIFT 0x0
++#define SDMA_PGFSM_READ__VALUE_MASK 0x00FFFFFFL
++//SDMA0_EDC_CONFIG
++#define SDMA0_EDC_CONFIG__DIS_EDC__SHIFT 0x1
++#define SDMA0_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2
++#define SDMA0_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
++#define SDMA0_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L
++//SDMA0_BA_THRESHOLD
++#define SDMA0_BA_THRESHOLD__READ_THRES__SHIFT 0x0
++#define SDMA0_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
++#define SDMA0_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
++#define SDMA0_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
++//SDMA0_ID
++#define SDMA0_ID__DEVICE_ID__SHIFT 0x0
++#define SDMA0_ID__DEVICE_ID_MASK 0x000000FFL
++//SDMA0_VERSION
++#define SDMA0_VERSION__MINVER__SHIFT 0x0
++#define SDMA0_VERSION__MAJVER__SHIFT 0x8
++#define SDMA0_VERSION__REV__SHIFT 0x10
++#define SDMA0_VERSION__MINVER_MASK 0x0000007FL
++#define SDMA0_VERSION__MAJVER_MASK 0x00007F00L
++#define SDMA0_VERSION__REV_MASK 0x003F0000L
++//SDMA0_EDC_COUNTER
++#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_DED__SHIFT 0x0
++#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_SEC__SHIFT 0x1
++#define SDMA0_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2
++#define SDMA0_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3
++#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4
++#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5
++#define SDMA0_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
++#define SDMA0_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0xf
++#define SDMA0_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x10
++#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_DED_MASK 0x00000001L
++#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_SEC_MASK 0x00000002L
++#define SDMA0_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L
++#define SDMA0_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L
++#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L
++#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L
++#define SDMA0_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L
++#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L
++#define SDMA0_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00008000L
++#define SDMA0_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x00010000L
++//SDMA0_EDC_COUNTER_CLEAR
++#define SDMA0_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0
++#define SDMA0_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L
++//SDMA0_STATUS2_REG
++#define SDMA0_STATUS2_REG__ID__SHIFT 0x0
++#define SDMA0_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x2
++#define SDMA0_STATUS2_REG__CMD_OP__SHIFT 0x10
++#define SDMA0_STATUS2_REG__ID_MASK 0x00000003L
++#define SDMA0_STATUS2_REG__F32_INSTR_PTR_MASK 0x00000FFCL
++#define SDMA0_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
++//SDMA0_ATOMIC_CNTL
++#define SDMA0_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
++#define SDMA0_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f
++#define SDMA0_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
++#define SDMA0_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L
++//SDMA0_ATOMIC_PREOP_LO
++#define SDMA0_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
++#define SDMA0_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
++//SDMA0_ATOMIC_PREOP_HI
++#define SDMA0_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
++#define SDMA0_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
++//SDMA0_UTCL1_CNTL
++#define SDMA0_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0
++#define SDMA0_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1
++#define SDMA0_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb
++#define SDMA0_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe
++#define SDMA0_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
++#define SDMA0_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d
++#define SDMA0_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L
++#define SDMA0_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL
++#define SDMA0_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L
++#define SDMA0_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L
++#define SDMA0_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L
++#define SDMA0_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L
++//SDMA0_UTCL1_WATERMK
++#define SDMA0_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0
++#define SDMA0_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0xa
++#define SDMA0_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x12
++#define SDMA0_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x1a
++#define SDMA0_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000003FFL
++#define SDMA0_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0003FC00L
++#define SDMA0_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x03FC0000L
++#define SDMA0_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFC000000L
++//SDMA0_UTCL1_RD_STATUS
++#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
++#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
++#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
++#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
++#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
++#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
++#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
++#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
++#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
++#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
++#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
++#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
++#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
++#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
++#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
++#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
++#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
++#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
++#define SDMA0_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12
++#define SDMA0_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13
++#define SDMA0_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14
++#define SDMA0_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15
++#define SDMA0_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16
++#define SDMA0_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a
++#define SDMA0_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d
++#define SDMA0_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e
++#define SDMA0_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f
++#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
++#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
++#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
++#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
++#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
++#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
++#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
++#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
++#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
++#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
++#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
++#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
++#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
++#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
++#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
++#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
++#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
++#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
++#define SDMA0_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L
++#define SDMA0_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L
++#define SDMA0_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L
++#define SDMA0_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L
++#define SDMA0_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L
++#define SDMA0_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L
++#define SDMA0_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L
++#define SDMA0_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L
++#define SDMA0_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L
++//SDMA0_UTCL1_WR_STATUS
++#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
++#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
++#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
++#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
++#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
++#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
++#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
++#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
++#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
++#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
++#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
++#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
++#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
++#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
++#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
++#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
++#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
++#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
++#define SDMA0_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12
++#define SDMA0_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13
++#define SDMA0_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14
++#define SDMA0_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15
++#define SDMA0_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16
++#define SDMA0_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19
++#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c
++#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d
++#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e
++#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f
++#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
++#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
++#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
++#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
++#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
++#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
++#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
++#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
++#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
++#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
++#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
++#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
++#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
++#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
++#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
++#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
++#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
++#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
++#define SDMA0_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L
++#define SDMA0_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L
++#define SDMA0_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L
++#define SDMA0_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L
++#define SDMA0_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L
++#define SDMA0_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L
++#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L
++#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L
++#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L
++#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L
++//SDMA0_UTCL1_INV0
++#define SDMA0_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0
++#define SDMA0_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1
++#define SDMA0_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2
++#define SDMA0_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3
++#define SDMA0_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4
++#define SDMA0_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5
++#define SDMA0_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6
++#define SDMA0_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7
++#define SDMA0_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8
++#define SDMA0_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9
++#define SDMA0_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa
++#define SDMA0_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb
++#define SDMA0_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc
++#define SDMA0_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c
++#define SDMA0_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L
++#define SDMA0_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L
++#define SDMA0_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L
++#define SDMA0_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L
++#define SDMA0_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L
++#define SDMA0_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L
++#define SDMA0_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L
++#define SDMA0_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L
++#define SDMA0_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L
++#define SDMA0_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L
++#define SDMA0_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L
++#define SDMA0_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L
++#define SDMA0_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L
++#define SDMA0_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L
++//SDMA0_UTCL1_INV1
++#define SDMA0_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
++#define SDMA0_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
++//SDMA0_UTCL1_INV2
++#define SDMA0_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0
++#define SDMA0_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL
++//SDMA0_UTCL1_RD_XNACK0
++#define SDMA0_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
++#define SDMA0_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
++//SDMA0_UTCL1_RD_XNACK1
++#define SDMA0_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
++#define SDMA0_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4
++#define SDMA0_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8
++#define SDMA0_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a
++#define SDMA0_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
++#define SDMA0_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L
++#define SDMA0_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
++#define SDMA0_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L
++//SDMA0_UTCL1_WR_XNACK0
++#define SDMA0_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
++#define SDMA0_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
++//SDMA0_UTCL1_WR_XNACK1
++#define SDMA0_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
++#define SDMA0_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4
++#define SDMA0_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8
++#define SDMA0_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a
++#define SDMA0_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
++#define SDMA0_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L
++#define SDMA0_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
++#define SDMA0_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L
++//SDMA0_UTCL1_TIMEOUT
++#define SDMA0_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0
++#define SDMA0_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10
++#define SDMA0_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL
++#define SDMA0_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L
++//SDMA0_UTCL1_PAGE
++#define SDMA0_UTCL1_PAGE__VM_HOLE__SHIFT 0x0
++#define SDMA0_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
++#define SDMA0_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
++#define SDMA0_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9
++#define SDMA0_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L
++#define SDMA0_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
++#define SDMA0_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L
++#define SDMA0_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L
++//SDMA0_POWER_CNTL_IDLE
++#define SDMA0_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0
++#define SDMA0_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10
++#define SDMA0_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18
++#define SDMA0_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL
++#define SDMA0_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L
++#define SDMA0_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L
++//SDMA0_RELAX_ORDERING_LUT
++#define SDMA0_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
++#define SDMA0_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
++#define SDMA0_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
++#define SDMA0_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
++#define SDMA0_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
++#define SDMA0_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
++#define SDMA0_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
++#define SDMA0_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
++#define SDMA0_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
++#define SDMA0_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
++#define SDMA0_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
++#define SDMA0_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
++#define SDMA0_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
++#define SDMA0_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
++#define SDMA0_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
++#define SDMA0_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
++#define SDMA0_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
++#define SDMA0_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
++#define SDMA0_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
++#define SDMA0_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
++#define SDMA0_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
++#define SDMA0_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
++#define SDMA0_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
++#define SDMA0_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
++#define SDMA0_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
++#define SDMA0_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
++#define SDMA0_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
++#define SDMA0_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
++#define SDMA0_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
++#define SDMA0_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
++#define SDMA0_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
++#define SDMA0_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
++#define SDMA0_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
++#define SDMA0_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
++#define SDMA0_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
++#define SDMA0_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
++#define SDMA0_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
++#define SDMA0_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
++//SDMA0_CHICKEN_BITS_2
++#define SDMA0_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
++#define SDMA0_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
++//SDMA0_STATUS3_REG
++#define SDMA0_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
++#define SDMA0_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
++#define SDMA0_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
++#define SDMA0_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
++#define SDMA0_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
++#define SDMA0_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
++//SDMA0_PHYSICAL_ADDR_LO
++#define SDMA0_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
++#define SDMA0_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
++#define SDMA0_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
++#define SDMA0_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
++#define SDMA0_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
++#define SDMA0_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
++#define SDMA0_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
++#define SDMA0_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
++//SDMA0_PHYSICAL_ADDR_HI
++#define SDMA0_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
++//SDMA0_ERROR_LOG
++#define SDMA0_ERROR_LOG__OVERRIDE__SHIFT 0x0
++#define SDMA0_ERROR_LOG__STATUS__SHIFT 0x10
++#define SDMA0_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
++#define SDMA0_ERROR_LOG__STATUS_MASK 0xFFFF0000L
++//SDMA0_PUB_DUMMY_REG0
++#define SDMA0_PUB_DUMMY_REG0__VALUE__SHIFT 0x0
++#define SDMA0_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL
++//SDMA0_PUB_DUMMY_REG1
++#define SDMA0_PUB_DUMMY_REG1__VALUE__SHIFT 0x0
++#define SDMA0_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL
++//SDMA0_PUB_DUMMY_REG2
++#define SDMA0_PUB_DUMMY_REG2__VALUE__SHIFT 0x0
++#define SDMA0_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL
++//SDMA0_PUB_DUMMY_REG3
++#define SDMA0_PUB_DUMMY_REG3__VALUE__SHIFT 0x0
++#define SDMA0_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL
++//SDMA0_F32_COUNTER
++#define SDMA0_F32_COUNTER__VALUE__SHIFT 0x0
++#define SDMA0_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
++//SDMA0_UNBREAKABLE
++#define SDMA0_UNBREAKABLE__VALUE__SHIFT 0x0
++#define SDMA0_UNBREAKABLE__VALUE_MASK 0x00000001L
++//SDMA0_PERFMON_CNTL
++#define SDMA0_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0
++#define SDMA0_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1
++#define SDMA0_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
++#define SDMA0_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa
++#define SDMA0_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb
++#define SDMA0_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc
++#define SDMA0_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L
++#define SDMA0_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L
++#define SDMA0_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL
++#define SDMA0_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L
++#define SDMA0_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L
++#define SDMA0_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L
++//SDMA0_PERFCOUNTER0_RESULT
++#define SDMA0_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
++#define SDMA0_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
++//SDMA0_PERFCOUNTER1_RESULT
++#define SDMA0_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
++#define SDMA0_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
++//SDMA0_PERFCOUNTER_TAG_DELAY_RANGE
++#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0
++#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe
++#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c
++#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL
++#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L
++#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L
++//SDMA0_CRD_CNTL
++#define SDMA0_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
++#define SDMA0_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
++#define SDMA0_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
++#define SDMA0_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
++//SDMA0_MMHUB_TRUSTLVL
++#define SDMA0_MMHUB_TRUSTLVL__SECFLAG0__SHIFT 0x0
++#define SDMA0_MMHUB_TRUSTLVL__SECFLAG1__SHIFT 0x3
++#define SDMA0_MMHUB_TRUSTLVL__SECFLAG2__SHIFT 0x6
++#define SDMA0_MMHUB_TRUSTLVL__SECFLAG3__SHIFT 0x9
++#define SDMA0_MMHUB_TRUSTLVL__SECFLAG4__SHIFT 0xc
++#define SDMA0_MMHUB_TRUSTLVL__SECFLAG5__SHIFT 0xf
++#define SDMA0_MMHUB_TRUSTLVL__SECFLAG6__SHIFT 0x12
++#define SDMA0_MMHUB_TRUSTLVL__SECFLAG7__SHIFT 0x15
++#define SDMA0_MMHUB_TRUSTLVL__SECFLAG0_MASK 0x00000007L
++#define SDMA0_MMHUB_TRUSTLVL__SECFLAG1_MASK 0x00000038L
++#define SDMA0_MMHUB_TRUSTLVL__SECFLAG2_MASK 0x000001C0L
++#define SDMA0_MMHUB_TRUSTLVL__SECFLAG3_MASK 0x00000E00L
++#define SDMA0_MMHUB_TRUSTLVL__SECFLAG4_MASK 0x00007000L
++#define SDMA0_MMHUB_TRUSTLVL__SECFLAG5_MASK 0x00038000L
++#define SDMA0_MMHUB_TRUSTLVL__SECFLAG6_MASK 0x001C0000L
++#define SDMA0_MMHUB_TRUSTLVL__SECFLAG7_MASK 0x00E00000L
++//SDMA0_GPU_IOV_VIOLATION_LOG
++#define SDMA0_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
++#define SDMA0_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
++#define SDMA0_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
++#define SDMA0_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x12
++#define SDMA0_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13
++#define SDMA0_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x14
++#define SDMA0_GPU_IOV_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x18
++#define SDMA0_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
++#define SDMA0_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
++#define SDMA0_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL
++#define SDMA0_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00040000L
++#define SDMA0_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L
++#define SDMA0_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x00F00000L
++#define SDMA0_GPU_IOV_VIOLATION_LOG__INITIATOR_ID_MASK 0xFF000000L
++//SDMA0_ULV_CNTL
++#define SDMA0_ULV_CNTL__HYSTERESIS__SHIFT 0x0
++#define SDMA0_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d
++#define SDMA0_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e
++#define SDMA0_ULV_CNTL__ULV_STATUS__SHIFT 0x1f
++#define SDMA0_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL
++#define SDMA0_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L
++#define SDMA0_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L
++#define SDMA0_ULV_CNTL__ULV_STATUS_MASK 0x80000000L
++//SDMA0_EA_DBIT_ADDR_DATA
++#define SDMA0_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
++#define SDMA0_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
++//SDMA0_EA_DBIT_ADDR_INDEX
++#define SDMA0_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
++#define SDMA0_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
++//SDMA0_GFX_RB_CNTL
++#define SDMA0_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0
++#define SDMA0_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1
++#define SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
++#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
++#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
++#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
++#define SDMA0_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17
++#define SDMA0_GFX_RB_CNTL__RB_VMID__SHIFT 0x18
++#define SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L
++#define SDMA0_GFX_RB_CNTL__RB_SIZE_MASK 0x0000007EL
++#define SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
++#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
++#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
++#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
++#define SDMA0_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L
++#define SDMA0_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L
++//SDMA0_GFX_RB_BASE
++#define SDMA0_GFX_RB_BASE__ADDR__SHIFT 0x0
++#define SDMA0_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_GFX_RB_BASE_HI
++#define SDMA0_GFX_RB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA0_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
++//SDMA0_GFX_RB_RPTR
++#define SDMA0_GFX_RB_RPTR__OFFSET__SHIFT 0x0
++#define SDMA0_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_GFX_RB_RPTR_HI
++#define SDMA0_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA0_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_GFX_RB_WPTR
++#define SDMA0_GFX_RB_WPTR__OFFSET__SHIFT 0x0
++#define SDMA0_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_GFX_RB_WPTR_HI
++#define SDMA0_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA0_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_GFX_RB_WPTR_POLL_CNTL
++#define SDMA0_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
++#define SDMA0_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
++#define SDMA0_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
++#define SDMA0_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
++#define SDMA0_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
++#define SDMA0_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
++#define SDMA0_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
++#define SDMA0_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
++#define SDMA0_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
++#define SDMA0_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
++//SDMA0_GFX_RB_RPTR_ADDR_HI
++#define SDMA0_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_GFX_RB_RPTR_ADDR_LO
++#define SDMA0_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_GFX_IB_CNTL
++#define SDMA0_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0
++#define SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
++#define SDMA0_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
++#define SDMA0_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10
++#define SDMA0_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L
++#define SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
++#define SDMA0_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
++#define SDMA0_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L
++//SDMA0_GFX_IB_RPTR
++#define SDMA0_GFX_IB_RPTR__OFFSET__SHIFT 0x2
++#define SDMA0_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL
++//SDMA0_GFX_IB_OFFSET
++#define SDMA0_GFX_IB_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA0_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
++//SDMA0_GFX_IB_BASE_LO
++#define SDMA0_GFX_IB_BASE_LO__ADDR__SHIFT 0x5
++#define SDMA0_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
++//SDMA0_GFX_IB_BASE_HI
++#define SDMA0_GFX_IB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA0_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_GFX_IB_SIZE
++#define SDMA0_GFX_IB_SIZE__SIZE__SHIFT 0x0
++#define SDMA0_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL
++//SDMA0_GFX_SKIP_CNTL
++#define SDMA0_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
++#define SDMA0_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x00003FFFL
++//SDMA0_GFX_CONTEXT_STATUS
++#define SDMA0_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0
++#define SDMA0_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2
++#define SDMA0_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
++#define SDMA0_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
++#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
++#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
++#define SDMA0_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
++#define SDMA0_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
++#define SDMA0_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
++#define SDMA0_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L
++#define SDMA0_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
++#define SDMA0_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
++#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
++#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
++#define SDMA0_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
++#define SDMA0_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
++//SDMA0_GFX_DOORBELL
++#define SDMA0_GFX_DOORBELL__ENABLE__SHIFT 0x1c
++#define SDMA0_GFX_DOORBELL__CAPTURED__SHIFT 0x1e
++#define SDMA0_GFX_DOORBELL__ENABLE_MASK 0x10000000L
++#define SDMA0_GFX_DOORBELL__CAPTURED_MASK 0x40000000L
++//SDMA0_GFX_CONTEXT_CNTL
++#define SDMA0_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10
++#define SDMA0_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L
++//SDMA0_GFX_STATUS
++#define SDMA0_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
++#define SDMA0_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
++#define SDMA0_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
++#define SDMA0_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
++//SDMA0_GFX_DOORBELL_LOG
++#define SDMA0_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
++#define SDMA0_GFX_DOORBELL_LOG__DATA__SHIFT 0x2
++#define SDMA0_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
++#define SDMA0_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
++//SDMA0_GFX_WATERMARK
++#define SDMA0_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
++#define SDMA0_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
++#define SDMA0_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
++#define SDMA0_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
++//SDMA0_GFX_DOORBELL_OFFSET
++#define SDMA0_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA0_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
++//SDMA0_GFX_CSA_ADDR_LO
++#define SDMA0_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_GFX_CSA_ADDR_HI
++#define SDMA0_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_GFX_IB_SUB_REMAIN
++#define SDMA0_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0
++#define SDMA0_GFX_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
++//SDMA0_GFX_PREEMPT
++#define SDMA0_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0
++#define SDMA0_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L
++//SDMA0_GFX_DUMMY_REG
++#define SDMA0_GFX_DUMMY_REG__DUMMY__SHIFT 0x0
++#define SDMA0_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
++//SDMA0_GFX_RB_WPTR_POLL_ADDR_HI
++#define SDMA0_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_GFX_RB_WPTR_POLL_ADDR_LO
++#define SDMA0_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_GFX_RB_AQL_CNTL
++#define SDMA0_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
++#define SDMA0_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
++#define SDMA0_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
++#define SDMA0_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
++#define SDMA0_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
++#define SDMA0_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
++//SDMA0_GFX_MINOR_PTR_UPDATE
++#define SDMA0_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
++#define SDMA0_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
++//SDMA0_GFX_MIDCMD_DATA0
++#define SDMA0_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0
++#define SDMA0_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
++//SDMA0_GFX_MIDCMD_DATA1
++#define SDMA0_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0
++#define SDMA0_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
++//SDMA0_GFX_MIDCMD_DATA2
++#define SDMA0_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0
++#define SDMA0_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
++//SDMA0_GFX_MIDCMD_DATA3
++#define SDMA0_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0
++#define SDMA0_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
++//SDMA0_GFX_MIDCMD_DATA4
++#define SDMA0_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0
++#define SDMA0_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
++//SDMA0_GFX_MIDCMD_DATA5
++#define SDMA0_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0
++#define SDMA0_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
++//SDMA0_GFX_MIDCMD_DATA6
++#define SDMA0_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0
++#define SDMA0_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
++//SDMA0_GFX_MIDCMD_DATA7
++#define SDMA0_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0
++#define SDMA0_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
++//SDMA0_GFX_MIDCMD_DATA8
++#define SDMA0_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0
++#define SDMA0_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
++//SDMA0_GFX_MIDCMD_CNTL
++#define SDMA0_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
++#define SDMA0_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
++#define SDMA0_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
++#define SDMA0_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
++#define SDMA0_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
++#define SDMA0_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
++#define SDMA0_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
++#define SDMA0_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
++//SDMA0_RLC0_RB_CNTL
++#define SDMA0_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0
++#define SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1
++#define SDMA0_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
++#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
++#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
++#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
++#define SDMA0_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17
++#define SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18
++#define SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000007EL
++#define SDMA0_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
++#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
++#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
++#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
++#define SDMA0_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L
++#define SDMA0_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L
++//SDMA0_RLC0_RB_BASE
++#define SDMA0_RLC0_RB_BASE__ADDR__SHIFT 0x0
++#define SDMA0_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_RB_BASE_HI
++#define SDMA0_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
++//SDMA0_RLC0_RB_RPTR
++#define SDMA0_RLC0_RB_RPTR__OFFSET__SHIFT 0x0
++#define SDMA0_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_RB_RPTR_HI
++#define SDMA0_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA0_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_RB_WPTR
++#define SDMA0_RLC0_RB_WPTR__OFFSET__SHIFT 0x0
++#define SDMA0_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_RB_WPTR_HI
++#define SDMA0_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA0_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_RB_WPTR_POLL_CNTL
++#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
++#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
++#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
++#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
++#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
++#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
++#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
++#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
++#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
++#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
++//SDMA0_RLC0_RB_RPTR_ADDR_HI
++#define SDMA0_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_RB_RPTR_ADDR_LO
++#define SDMA0_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC0_IB_CNTL
++#define SDMA0_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0
++#define SDMA0_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
++#define SDMA0_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
++#define SDMA0_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10
++#define SDMA0_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
++#define SDMA0_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
++#define SDMA0_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
++//SDMA0_RLC0_IB_RPTR
++#define SDMA0_RLC0_IB_RPTR__OFFSET__SHIFT 0x2
++#define SDMA0_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
++//SDMA0_RLC0_IB_OFFSET
++#define SDMA0_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA0_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
++//SDMA0_RLC0_IB_BASE_LO
++#define SDMA0_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5
++#define SDMA0_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
++//SDMA0_RLC0_IB_BASE_HI
++#define SDMA0_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_IB_SIZE
++#define SDMA0_RLC0_IB_SIZE__SIZE__SHIFT 0x0
++#define SDMA0_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL
++//SDMA0_RLC0_SKIP_CNTL
++#define SDMA0_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
++#define SDMA0_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x00003FFFL
++//SDMA0_RLC0_CONTEXT_STATUS
++#define SDMA0_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
++#define SDMA0_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2
++#define SDMA0_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
++#define SDMA0_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
++#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
++#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
++#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
++#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
++#define SDMA0_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
++#define SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
++#define SDMA0_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
++#define SDMA0_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
++#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
++#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
++#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
++#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
++//SDMA0_RLC0_DOORBELL
++#define SDMA0_RLC0_DOORBELL__ENABLE__SHIFT 0x1c
++#define SDMA0_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e
++#define SDMA0_RLC0_DOORBELL__ENABLE_MASK 0x10000000L
++#define SDMA0_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L
++//SDMA0_RLC0_STATUS
++#define SDMA0_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
++#define SDMA0_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
++#define SDMA0_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
++#define SDMA0_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
++//SDMA0_RLC0_DOORBELL_LOG
++#define SDMA0_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
++#define SDMA0_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2
++#define SDMA0_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
++#define SDMA0_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
++//SDMA0_RLC0_WATERMARK
++#define SDMA0_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
++#define SDMA0_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
++#define SDMA0_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
++#define SDMA0_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
++//SDMA0_RLC0_DOORBELL_OFFSET
++#define SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA0_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
++//SDMA0_RLC0_CSA_ADDR_LO
++#define SDMA0_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC0_CSA_ADDR_HI
++#define SDMA0_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_IB_SUB_REMAIN
++#define SDMA0_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
++#define SDMA0_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
++//SDMA0_RLC0_PREEMPT
++#define SDMA0_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0
++#define SDMA0_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
++//SDMA0_RLC0_DUMMY_REG
++#define SDMA0_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0
++#define SDMA0_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI
++#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO
++#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC0_RB_AQL_CNTL
++#define SDMA0_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
++#define SDMA0_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
++#define SDMA0_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
++#define SDMA0_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
++#define SDMA0_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
++//SDMA0_RLC0_MINOR_PTR_UPDATE
++#define SDMA0_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
++#define SDMA0_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
++//SDMA0_RLC0_MIDCMD_DATA0
++#define SDMA0_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0
++#define SDMA0_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_MIDCMD_DATA1
++#define SDMA0_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0
++#define SDMA0_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_MIDCMD_DATA2
++#define SDMA0_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0
++#define SDMA0_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_MIDCMD_DATA3
++#define SDMA0_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0
++#define SDMA0_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_MIDCMD_DATA4
++#define SDMA0_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0
++#define SDMA0_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_MIDCMD_DATA5
++#define SDMA0_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0
++#define SDMA0_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_MIDCMD_DATA6
++#define SDMA0_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0
++#define SDMA0_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_MIDCMD_DATA7
++#define SDMA0_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0
++#define SDMA0_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_MIDCMD_DATA8
++#define SDMA0_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0
++#define SDMA0_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
++//SDMA0_RLC0_MIDCMD_CNTL
++#define SDMA0_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
++#define SDMA0_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
++#define SDMA0_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
++#define SDMA0_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
++#define SDMA0_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
++#define SDMA0_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
++#define SDMA0_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
++#define SDMA0_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
++//SDMA0_RLC1_RB_CNTL
++#define SDMA0_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0
++#define SDMA0_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1
++#define SDMA0_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
++#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
++#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
++#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
++#define SDMA0_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17
++#define SDMA0_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18
++#define SDMA0_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000007EL
++#define SDMA0_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
++#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
++#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
++#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
++#define SDMA0_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L
++#define SDMA0_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L
++//SDMA0_RLC1_RB_BASE
++#define SDMA0_RLC1_RB_BASE__ADDR__SHIFT 0x0
++#define SDMA0_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_RB_BASE_HI
++#define SDMA0_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
++//SDMA0_RLC1_RB_RPTR
++#define SDMA0_RLC1_RB_RPTR__OFFSET__SHIFT 0x0
++#define SDMA0_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_RB_RPTR_HI
++#define SDMA0_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA0_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_RB_WPTR
++#define SDMA0_RLC1_RB_WPTR__OFFSET__SHIFT 0x0
++#define SDMA0_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_RB_WPTR_HI
++#define SDMA0_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0
++#define SDMA0_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_RB_WPTR_POLL_CNTL
++#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
++#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
++#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
++#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
++#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
++#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
++#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
++#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
++#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
++#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
++//SDMA0_RLC1_RB_RPTR_ADDR_HI
++#define SDMA0_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_RB_RPTR_ADDR_LO
++#define SDMA0_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC1_IB_CNTL
++#define SDMA0_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0
++#define SDMA0_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
++#define SDMA0_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
++#define SDMA0_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10
++#define SDMA0_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
++#define SDMA0_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
++#define SDMA0_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
++//SDMA0_RLC1_IB_RPTR
++#define SDMA0_RLC1_IB_RPTR__OFFSET__SHIFT 0x2
++#define SDMA0_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
++//SDMA0_RLC1_IB_OFFSET
++#define SDMA0_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA0_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
++//SDMA0_RLC1_IB_BASE_LO
++#define SDMA0_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5
++#define SDMA0_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
++//SDMA0_RLC1_IB_BASE_HI
++#define SDMA0_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_IB_SIZE
++#define SDMA0_RLC1_IB_SIZE__SIZE__SHIFT 0x0
++#define SDMA0_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL
++//SDMA0_RLC1_SKIP_CNTL
++#define SDMA0_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
++#define SDMA0_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x00003FFFL
++//SDMA0_RLC1_CONTEXT_STATUS
++#define SDMA0_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
++#define SDMA0_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2
++#define SDMA0_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
++#define SDMA0_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
++#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
++#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
++#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
++#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
++#define SDMA0_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
++#define SDMA0_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
++#define SDMA0_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
++#define SDMA0_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
++#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
++#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
++#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
++#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
++//SDMA0_RLC1_DOORBELL
++#define SDMA0_RLC1_DOORBELL__ENABLE__SHIFT 0x1c
++#define SDMA0_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e
++#define SDMA0_RLC1_DOORBELL__ENABLE_MASK 0x10000000L
++#define SDMA0_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L
++//SDMA0_RLC1_STATUS
++#define SDMA0_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
++#define SDMA0_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
++#define SDMA0_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
++#define SDMA0_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
++//SDMA0_RLC1_DOORBELL_LOG
++#define SDMA0_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
++#define SDMA0_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2
++#define SDMA0_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
++#define SDMA0_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
++//SDMA0_RLC1_WATERMARK
++#define SDMA0_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
++#define SDMA0_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
++#define SDMA0_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
++#define SDMA0_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
++//SDMA0_RLC1_DOORBELL_OFFSET
++#define SDMA0_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
++#define SDMA0_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
++//SDMA0_RLC1_CSA_ADDR_LO
++#define SDMA0_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC1_CSA_ADDR_HI
++#define SDMA0_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_IB_SUB_REMAIN
++#define SDMA0_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
++#define SDMA0_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
++//SDMA0_RLC1_PREEMPT
++#define SDMA0_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0
++#define SDMA0_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
++//SDMA0_RLC1_DUMMY_REG
++#define SDMA0_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0
++#define SDMA0_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_RB_WPTR_POLL_ADDR_HI
++#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
++#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_RB_WPTR_POLL_ADDR_LO
++#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
++#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
++//SDMA0_RLC1_RB_AQL_CNTL
++#define SDMA0_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
++#define SDMA0_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
++#define SDMA0_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
++#define SDMA0_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
++#define SDMA0_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
++#define SDMA0_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
++//SDMA0_RLC1_MINOR_PTR_UPDATE
++#define SDMA0_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
++#define SDMA0_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
++//SDMA0_RLC1_MIDCMD_DATA0
++#define SDMA0_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0
++#define SDMA0_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_MIDCMD_DATA1
++#define SDMA0_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0
++#define SDMA0_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_MIDCMD_DATA2
++#define SDMA0_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0
++#define SDMA0_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_MIDCMD_DATA3
++#define SDMA0_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0
++#define SDMA0_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_MIDCMD_DATA4
++#define SDMA0_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0
++#define SDMA0_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_MIDCMD_DATA5
++#define SDMA0_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0
++#define SDMA0_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_MIDCMD_DATA6
++#define SDMA0_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0
++#define SDMA0_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_MIDCMD_DATA7
++#define SDMA0_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0
++#define SDMA0_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_MIDCMD_DATA8
++#define SDMA0_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0
++#define SDMA0_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
++//SDMA0_RLC1_MIDCMD_CNTL
++#define SDMA0_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
++#define SDMA0_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
++#define SDMA0_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
++#define SDMA0_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
++#define SDMA0_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
++#define SDMA0_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
++#define SDMA0_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
++#define SDMA0_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
++
++#endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5375-vga_switcheroo-Use-device-link-for-HDA-controller.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5375-vga_switcheroo-Use-device-link-for-HDA-controller.patch
new file mode 100644
index 00000000..e10275ac
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5375-vga_switcheroo-Use-device-link-for-HDA-controller.patch
@@ -0,0 +1,141 @@
+From d15ce59ff967cd1764a492faa0c99ed7d00cdd7d Mon Sep 17 00:00:00 2001
+From: Lukas Wunner <lukas@wunner.de>
+Date: Sat, 3 Mar 2018 10:53:24 +0100
+Subject: [PATCH 5375/5725] vga_switcheroo: Use device link for HDA controller
+
+Back in 2013, runtime PM for GPUs with integrated HDA controller was
+introduced with commits 0d69704ae348 ("gpu/vga_switcheroo: add driver
+control power feature. (v3)") and 246efa4a072f ("snd/hda: add runtime
+suspend/resume on optimus support (v4)").
+
+Briefly, the idea was that the HDA controller is forced on and off in
+unison with the GPU.
+
+The original code is mostly still in place even though it was never a
+100% perfect solution: E.g. on access to the HDA controller, the GPU
+is powered up via vga_switcheroo_runtime_resume_hdmi_audio() but there
+are no provisions to keep it resumed until access to the HDA controller
+has ceased: The GPU autosuspends after 5 seconds, rendering the HDA
+controller inaccessible.
+
+Additionally, a kludge is required when hda_intel.c probes: It has to
+check whether the GPU is powered down (check_hdmi_disabled()) and defer
+probing if so.
+
+However in the meantime (in v4.10) the driver core has gained a feature
+called device links which promises to solve such issues in a clean way:
+It allows us to declare a dependency from the HDA controller (consumer)
+to the GPU (supplier). The PM core then automagically ensures that the
+GPU is runtime resumed as long as the HDA controller's ->probe hook is
+executed and whenever the HDA controller is accessed.
+
+By default, the HDA controller has a dependency on its parent, a PCIe
+Root Port. Adding a device link creates another dependency on its
+sibling:
+
+ PCIe Root Port
+ ^ ^
+ | |
+ | |
+ HDA ===> GPU
+
+The device link is not only used for runtime PM, it also guarantees that
+on system sleep, the HDA controller suspends before the GPU and resumes
+after the GPU, and on system shutdown the HDA controller's ->shutdown
+hook is executed before the one of the GPU. It is a complete solution.
+
+Using this functionality is as simple as calling device_link_add(),
+which results in a dmesg entry like this:
+
+ pci 0000:01:00.1: Linked as a consumer to 0000:01:00.0
+
+The code for the GPU-governed audio power management can thus be removed
+(except where it's still needed for legacy manual power control).
+
+The device link is added in a PCI quirk rather than in hda_intel.c.
+It is therefore legal for the GPU to runtime suspend to D3cold even if
+the HDA controller is not bound to a driver or if CONFIG_SND_HDA_INTEL
+is not enabled, for accesses to the HDA controller will cause the GPU to
+wake up regardless if they're occurring outside of hda_intel.c (think
+config space readout via sysfs).
+
+Contrary to the previous implementation, the HDA controller's power
+state is now self-governed, rather than GPU-governed, whereas the GPU's
+power state is no longer fully self-governed. (The HDA controller needs
+to runtime suspend before the GPU can.)
+
+It is thus crucial that runtime PM is always activated on the HDA
+controller even if CONFIG_SND_HDA_POWER_SAVE_DEFAULT is set to 0 (which
+is the default), lest the GPU stays awake. This is achieved by setting
+the auto_runtime_pm flag on every codec and the AZX_DCAPS_PM_RUNTIME
+flag on the HDA controller.
+
+A side effect is that power consumption might be reduced if the GPU is
+in use but the HDA controller is not, because the HDA controller is now
+allowed to go to D3hot. Before, it was forced to stay in D0 as long as
+the GPU was in use. (There is no reduction in power consumption on my
+Nvidia GK107, but there might be on other chips.)
+
+The code paths for legacy manual power control are adjusted such that
+runtime PM is disabled during power off, thereby preventing the PM core
+from resuming the HDA controller.
+
+Note that the device link is not only added on vga_switcheroo capable
+systems, but for *any* GPU with integrated HDA controller. The idea is
+that the HDA controller streams audio via connectors located on the GPU,
+so the GPU needs to be on for the HDA controller to do anything useful.
+
+This commit implicitly fixes an unbalanced runtime PM ref upon unbind of
+hda_intel.c: On ->probe, a runtime PM ref was previously released under
+the condition "azx_has_pm_runtime(chip) || hda->use_vga_switcheroo", but
+on ->remove a runtime PM ref was only acquired under the first of those
+conditions. Thus, binding and unbinding the driver twice on a
+vga_switcheroo capable system caused the runtime PM refcount to drop
+below zero. The issue is resolved because the AZX_DCAPS_PM_RUNTIME flag
+is now always set if use_vga_switcheroo is true.
+
+For more information on device links please refer to:
+https://www.kernel.org/doc/html/latest/driver-api/device_link.html
+Documentation/driver-api/device_link.rst
+
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Ben Skeggs <bskeggs@redhat.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Acked-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Takashi Iwai <tiwai@suse.de>
+Reviewed-by: Peter Wu <peter@lekensteyn.nl>
+Tested-by: Kai Heng Feng <kai.heng.feng@canonical.com> # AMD PowerXpress
+Tested-by: Mike Lothian <mike@fireburn.co.uk> # AMD PowerXpress
+Tested-by: Denis Lisov <dennis.lissov@gmail.com> # Nvidia Optimus
+Tested-by: Peter Wu <peter@lekensteyn.nl> # Nvidia Optimus
+Tested-by: Lukas Wunner <lukas@wunner.de> # MacBook Pro
+Signed-off-by: Lukas Wunner <lukas@wunner.de>
+Link: https://patchwork.freedesktop.org/patch/msgid/51bd38360ff502a8c42b1ebf4405ee1d3f27118d.1520068884.git.lukas@wunner.de
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 2e40e42..2607526 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -988,7 +988,6 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
+
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ drm_kms_helper_poll_disable(drm_dev);
+- vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
+
+ ret = amdgpu_device_suspend(drm_dev, false, false);
+ pci_save_state(pdev);
+@@ -1025,7 +1024,6 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
+
+ ret = amdgpu_device_resume(drm_dev, false, false);
+ drm_kms_helper_poll_enable(drm_dev);
+- vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+ return 0;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5376-drm-amd-pp-fix-missing-CONFIG_ACPI.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5376-drm-amd-pp-fix-missing-CONFIG_ACPI.patch
new file mode 100644
index 00000000..02572ef2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5376-drm-amd-pp-fix-missing-CONFIG_ACPI.patch
@@ -0,0 +1,34 @@
+From 45a7631016a0a97b25e6d226970462e2462d0d44 Mon Sep 17 00:00:00 2001
+From: Dave Airlie <airlied@redhat.com>
+Date: Wed, 14 Mar 2018 10:54:55 +1000
+Subject: [PATCH 5376/5725] drm/amd/pp: fix missing CONFIG_ACPI.
+
+This was stopping me building on ARM after last pull.
+
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index 263a781..0bfb297 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -3899,12 +3899,14 @@ static int smu7_notify_link_speed_change_after_state_change(
+ smu7_get_current_pcie_speed(hwmgr) > 0)
+ return 0;
+
++#ifdef CONFIG_ACPI
+ if (amdgpu_acpi_pcie_performance_request(hwmgr->adev, request, false)) {
+ if (PP_PCIEGen2 == target_link_speed)
+ pr_info("PSPP request to switch to Gen2 from Gen3 Failed!");
+ else
+ pr_info("PSPP request to switch to Gen1 from Gen2 Failed!");
+ }
++#endif
+ }
+
+ return 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5377-drm-amdgpu-sdma4-use-a-helper-for-SDMA_OP_POLL_REGME.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5377-drm-amdgpu-sdma4-use-a-helper-for-SDMA_OP_POLL_REGME.patch
new file mode 100644
index 00000000..35ae747d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5377-drm-amdgpu-sdma4-use-a-helper-for-SDMA_OP_POLL_REGME.patch
@@ -0,0 +1,116 @@
+From 9b328cc05c7271009511c7bbed89d2666a0253e0 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 27 Mar 2018 16:37:30 -0500
+Subject: [PATCH 5377/5725] drm/amdgpu/sdma4: use a helper for
+ SDMA_OP_POLL_REGMEM
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Rather than opencoding it in a bunch of functions.
+
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 62 +++++++++++++++++++---------------
+ 1 file changed, 34 insertions(+), 28 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index ff93ef6..0304797 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -459,6 +459,31 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
+
+ }
+
++static void sdma_v4_0_wait_reg_mem(struct amdgpu_ring *ring,
++ int mem_space, int hdp,
++ uint32_t addr0, uint32_t addr1,
++ uint32_t ref, uint32_t mask,
++ uint32_t inv)
++{
++ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
++ SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(hdp) |
++ SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(mem_space) |
++ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
++ if (mem_space) {
++ /* memory */
++ amdgpu_ring_write(ring, addr0);
++ amdgpu_ring_write(ring, addr1);
++ } else {
++ /* registers */
++ amdgpu_ring_write(ring, addr0 << 2);
++ amdgpu_ring_write(ring, addr1 << 2);
++ }
++ amdgpu_ring_write(ring, ref); /* reference */
++ amdgpu_ring_write(ring, mask); /* mask */
++ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
++ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(inv)); /* retry count, poll interval */
++}
++
+ /**
+ * sdma_v4_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
+ *
+@@ -477,15 +502,10 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+ else
+ ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1;
+
+- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+- SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
+- SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
+- amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2);
+- amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2);
+- amdgpu_ring_write(ring, ref_and_mask); /* reference */
+- amdgpu_ring_write(ring, ref_and_mask); /* mask */
+- amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+- SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
++ sdma_v4_0_wait_reg_mem(ring, 0, 1,
++ adev->nbio_funcs->get_hdp_flush_done_offset(adev),
++ adev->nbio_funcs->get_hdp_flush_req_offset(adev),
++ ref_and_mask, ref_and_mask, 10);
+ }
+
+ static void sdma_v4_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+@@ -1223,16 +1243,10 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ /* wait for idle */
+- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+- SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
+- SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
+- SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
+- amdgpu_ring_write(ring, addr & 0xfffffffc);
+- amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+- amdgpu_ring_write(ring, seq); /* reference */
+- amdgpu_ring_write(ring, 0xffffffff); /* mask */
+- amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+- SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
++ sdma_v4_0_wait_reg_mem(ring, 1, 0,
++ addr & 0xfffffffc,
++ upper_32_bits(addr) & 0xffffffff,
++ seq, 0xffffffff, 4);
+ }
+
+
+@@ -1263,15 +1277,7 @@ static void sdma_v4_0_ring_emit_wreg(struct amdgpu_ring *ring,
+ static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
+ {
+- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+- SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
+- SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
+- amdgpu_ring_write(ring, reg << 2);
+- amdgpu_ring_write(ring, 0);
+- amdgpu_ring_write(ring, val); /* reference */
+- amdgpu_ring_write(ring, mask); /* mask */
+- amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+- SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
++ sdma_v4_0_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);
+ }
+
+ static int sdma_v4_0_early_init(void *handle)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5378-drm-amdgpu-include-pagemap.h-for-release_pages.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5378-drm-amdgpu-include-pagemap.h-for-release_pages.patch
new file mode 100644
index 00000000..f5b64f38
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5378-drm-amdgpu-include-pagemap.h-for-release_pages.patch
@@ -0,0 +1,29 @@
+From 29be53e8944141d038206179d53514d077f9bad1 Mon Sep 17 00:00:00 2001
+From: Stephen Rothwell <sfr@canb.auug.org.au>
+Date: Wed, 16 May 2018 16:43:34 +1000
+Subject: [PATCH 5378/5725] drm/amdgpu: include pagemap.h for release_pages()
+
+Fixes: 5ae0283e831a ("drm/amdgpu: Add userptr support for KFD"
+Cc: Felix Kuehling <Felix.Kuehling@amd.com>
+Cc: Oded Gabbay <oded.gabbay@gmail.com>
+Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 0b73606..1390136 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -23,6 +23,7 @@
+ #define pr_fmt(fmt) "kfd2kgd: " fmt
+
+ #include <linux/list.h>
++#include <linux/pagemap.h>
+ #include <linux/sched/mm.h>
+ #include <drm/drmP.h>
+ #include "amdgpu_object.h"
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5379-drm-amdgpu-fix-32-bit-build-warning.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5379-drm-amdgpu-fix-32-bit-build-warning.patch
new file mode 100644
index 00000000..5cd60284
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5379-drm-amdgpu-fix-32-bit-build-warning.patch
@@ -0,0 +1,46 @@
+From fe216fb5fd5cbfcf2f0e88514529d9601ca1aaf2 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Fri, 25 May 2018 17:50:09 +0200
+Subject: [PATCH 5379/5725] drm/amdgpu: fix 32-bit build warning
+
+Casting a pointer to a 64-bit type causes a warning on 32-bit targets:
+
+drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c:473:24: error: cast from pointer to integer of different size [-Werror=pointer-to-int-cast]
+ lower_32_bits((uint64_t)wptr));
+ ^
+drivers/gpu/drm/amd/amdgpu/amdgpu.h:1701:53: note: in definition of macro 'WREG32'
+ #define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0)
+ ^
+drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c:473:10: note: in expansion of macro 'lower_32_bits'
+ lower_32_bits((uint64_t)wptr));
+ ^~~~~~~~~~~~~
+
+The correct method is to cast to 'uintptr_t'.
+
+Fixes: d5a114a6c5f7 ("drm/amdgpu: Add GFXv9 kfd2kgd interface functions")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
+Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+index b6852a1..c47a75d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+@@ -532,9 +532,9 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
+ upper_32_bits(guessed_wptr));
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
+- lower_32_bits((uint64_t)wptr));
++ lower_32_bits((uintptr_t)wptr));
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
+- upper_32_bits((uint64_t)wptr));
++ upper_32_bits((uintptr_t)wptr));
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
+ get_queue_mask(adev, pipe_id, queue_id));
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5380-drm-amdgpu-Add-AMDGPU_GPU_PAGES_IN_CPU_PAGE-define.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5380-drm-amdgpu-Add-AMDGPU_GPU_PAGES_IN_CPU_PAGE-define.patch
new file mode 100644
index 00000000..5146212a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5380-drm-amdgpu-Add-AMDGPU_GPU_PAGES_IN_CPU_PAGE-define.patch
@@ -0,0 +1,116 @@
+From a71470f6af615ad876e437d7e531bf5f674f604e Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
+Date: Fri, 22 Jun 2018 18:54:03 +0200
+Subject: [PATCH 5380/5725] drm/amdgpu: Add AMDGPU_GPU_PAGES_IN_CPU_PAGE define
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+To hopefully make the code dealing with GPU vs CPU pages a little
+clearer.
+
+Suggested-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 8 ++++----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h | 2 ++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 8 ++++----
+ 3 files changed, 10 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+index 5586874..5d141ab 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+@@ -231,7 +231,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
+ }
+
+ t = offset / AMDGPU_GPU_PAGE_SIZE;
+- p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
++ p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+ for (i = 0; i < pages; i++, p++) {
+ #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+ adev->gart.pages[p] = NULL;
+@@ -240,7 +240,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
+ if (!adev->gart.ptr)
+ continue;
+
+- for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
++ for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
+ amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
+ t, page_base, flags);
+ page_base += AMDGPU_GPU_PAGE_SIZE;
+@@ -279,7 +279,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
+
+ for (i = 0; i < pages; i++) {
+ page_base = dma_addr[i];
+- for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
++ for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
+ amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
+ page_base += AMDGPU_GPU_PAGE_SIZE;
+ }
+@@ -316,7 +316,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
+
+ #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+ t = offset / AMDGPU_GPU_PAGE_SIZE;
+- p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
++ p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+ for (i = 0; i < pages; i++, p++)
+ adev->gart.pages[p] = pagelist ? pagelist[i] : NULL;
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+index 5ee8f20..9ff6288 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+@@ -37,6 +37,8 @@ struct amdgpu_bo;
+ #define AMDGPU_GPU_PAGE_SHIFT 12
+ #define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
+
++#define AMDGPU_GPU_PAGES_IN_CPU_PAGE (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE)
++
+ struct amdgpu_gart {
+ struct amdgpu_bo *bo;
+ void *ptr;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 063f0d0..adb61a2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1574,13 +1574,13 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
+ if (nodes) {
+ addr = nodes->start << PAGE_SHIFT;
+ max_entries = (nodes->size - pfn) *
+- (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
++ AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+ switch (mem->mem_type) {
+ case TTM_PL_TT:
+ max_entries = min(max_entries, 16ull * 1024ull);
+
+ for (count = 1;
+- count < max_entries / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
++ count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+ ++count) {
+ uint64_t idx = pfn + count;
+
+@@ -1594,7 +1594,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
+ dma_addr = pages_addr;
+ } else {
+ addr = pages_addr[pfn];
+- max_entries = count * (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
++ max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+ }
+ break;
+ case AMDGPU_PL_DGMA_IMPORT:
+@@ -1630,7 +1630,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
+ if (r)
+ return r;
+
+- pfn += (last - start + 1) / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
++ pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+ if (nodes && nodes->size == pfn) {
+ pfn = 0;
+ ++nodes;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5381-drm-amd-display-Use-2-factor-allocator-calls.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5381-drm-amd-display-Use-2-factor-allocator-calls.patch
new file mode 100644
index 00000000..544388a3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5381-drm-amd-display-Use-2-factor-allocator-calls.patch
@@ -0,0 +1,54 @@
+From bea846ec51341d4256c2cc400aa7dc1d619863f9 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Wed, 4 Jul 2018 10:27:30 -0700
+Subject: [PATCH 5381/5725] drm/amd/display: Use 2-factor allocator calls
+
+As already done treewide, switch from open-coded multiplication to
+2-factor allocation helper.
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/display/modules/color/color_gamma.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+index e130aac..0fe9882 100644
+--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+@@ -32,6 +32,8 @@
+ #define NUM_REGIONS 32
+ #define MAX_HW_POINTS (NUM_PTS_IN_REGION*NUM_REGIONS)
+
++#define kvcalloc(n, size, gfp) kvzalloc(((n)*(size)), gfp)
++
+ static struct hw_x_point coordinates_x[MAX_HW_POINTS + 2];
+
+ static struct fixed31_32 pq_table[MAX_HW_POINTS + 2];
+@@ -1721,8 +1723,8 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
+ kvfree(rgb_regamma);
+ } else if (trans == TRANSFER_FUNCTION_HLG ||
+ trans == TRANSFER_FUNCTION_HLG12) {
+- rgb_regamma = kvzalloc(sizeof(*rgb_regamma) *
+- (MAX_HW_POINTS + _EXTRA_POINTS),
++ rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
++ sizeof(*rgb_regamma),
+ GFP_KERNEL);
+ if (!rgb_regamma)
+ goto rgb_regamma_alloc_fail;
+@@ -1800,8 +1802,8 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
+ kvfree(rgb_degamma);
+ } else if (trans == TRANSFER_FUNCTION_HLG ||
+ trans == TRANSFER_FUNCTION_HLG12) {
+- rgb_degamma = kvzalloc(sizeof(*rgb_degamma) *
+- (MAX_HW_POINTS + _EXTRA_POINTS),
++ rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
++ sizeof(*rgb_degamma),
+ GFP_KERNEL);
+ if (!rgb_degamma)
+ goto rgb_degamma_alloc_fail;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5382-drm-amdgpu-move-context-related-stuff-to-amdgpu_ctx..patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5382-drm-amdgpu-move-context-related-stuff-to-amdgpu_ctx..patch
new file mode 100644
index 00000000..bed7eac5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5382-drm-amdgpu-move-context-related-stuff-to-amdgpu_ctx..patch
@@ -0,0 +1,251 @@
+From 1d42babc16089c12bb60a35c9726619ce7d9d05d Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Wed, 18 Jul 2018 16:34:49 +0200
+Subject: [PATCH 5382/5725] drm/amdgpu: move context related stuff to
+ amdgpu_ctx.h
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Further unmangle amdgpu.h.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 75 +----------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h | 100 +++++++++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 1 +
+ 3 files changed, 103 insertions(+), 73 deletions(-)
+ create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 7a8b8fe..c7736d0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -28,6 +28,8 @@
+ #ifndef __AMDGPU_H__
+ #define __AMDGPU_H__
+
++#include "amdgpu_ctx.h"
++
+ #include <linux/atomic.h>
+ #include <linux/wait.h>
+ #include <linux/list.h>
+@@ -233,8 +235,6 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
+ bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type);
+
+-#define AMDGPU_MAX_IP_NUM 16
+-
+ struct amdgpu_ip_block_status {
+ bool valid;
+ bool sw;
+@@ -496,20 +496,6 @@ struct amdgpu_ib {
+
+ extern const struct drm_sched_backend_ops amdgpu_sched_ops;
+
+-/*
+- * Queue manager
+- */
+-struct amdgpu_queue_mapper {
+- int hw_ip;
+- struct mutex lock;
+- /* protected by lock */
+- struct amdgpu_ring *queue_map[AMDGPU_MAX_RINGS];
+-};
+-
+-struct amdgpu_queue_mgr {
+- struct amdgpu_queue_mapper mapper[AMDGPU_MAX_IP_NUM];
+-};
+-
+ int amdgpu_queue_mgr_init(struct amdgpu_device *adev,
+ struct amdgpu_queue_mgr *mgr);
+ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
+@@ -520,63 +506,6 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
+ struct amdgpu_ring **out_ring);
+
+ /*
+- * context related structures
+- */
+-
+-struct amdgpu_ctx_ring {
+- uint64_t sequence;
+- struct dma_fence **fences;
+- struct drm_sched_entity entity;
+- struct list_head sem_dep_list;
+- struct mutex sem_lock;
+-};
+-
+-struct amdgpu_ctx {
+- struct kref refcount;
+- struct amdgpu_device *adev;
+- struct amdgpu_queue_mgr queue_mgr;
+- unsigned reset_counter;
+- unsigned reset_counter_query;
+- uint32_t vram_lost_counter;
+- spinlock_t ring_lock;
+- struct dma_fence **fences;
+- struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
+- bool preamble_presented;
+- enum drm_sched_priority init_priority;
+- enum drm_sched_priority override_priority;
+- struct mutex lock;
+- atomic_t guilty;
+-};
+-
+-struct amdgpu_ctx_mgr {
+- struct amdgpu_device *adev;
+- struct mutex lock;
+- /* protected by lock */
+- struct idr ctx_handles;
+-};
+-
+-struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
+-int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
+-
+-void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+- struct dma_fence *fence, uint64_t *seq);
+-struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+- struct amdgpu_ring *ring, uint64_t seq);
+-void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
+- enum drm_sched_priority priority);
+-
+-int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *filp);
+-
+-int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
+-
+-void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
+-void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
+-void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr);
+-void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
+-
+-
+-/*
+ * file private structure
+ */
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+new file mode 100644
+index 0000000..b6eecc4
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+@@ -0,0 +1,100 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++#ifndef __AMDGPU_CTX_H__
++#define __AMDGPU_CTX_H__
++
++#include "amdgpu_ring.h"
++
++struct drm_device;
++struct drm_file;
++struct amdgpu_fpriv;
++
++#define AMDGPU_MAX_IP_NUM 16
++
++/*
++ * Queue manager
++ */
++struct amdgpu_queue_mapper {
++ int hw_ip;
++ struct mutex lock;
++ /* protected by lock */
++ struct amdgpu_ring *queue_map[AMDGPU_MAX_RINGS];
++};
++
++struct amdgpu_queue_mgr {
++ struct amdgpu_queue_mapper mapper[AMDGPU_MAX_IP_NUM];
++};
++
++struct amdgpu_ctx_ring {
++ uint64_t sequence;
++ struct dma_fence **fences;
++ struct drm_sched_entity entity;
++ struct list_head sem_dep_list;
++ struct mutex sem_lock;
++};
++
++struct amdgpu_ctx {
++ struct kref refcount;
++ struct amdgpu_device *adev;
++ struct amdgpu_queue_mgr queue_mgr;
++ unsigned reset_counter;
++ unsigned reset_counter_query;
++ uint32_t vram_lost_counter;
++ spinlock_t ring_lock;
++ struct dma_fence **fences;
++ struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
++ bool preamble_presented;
++ enum drm_sched_priority init_priority;
++ enum drm_sched_priority override_priority;
++ struct mutex lock;
++ atomic_t guilty;
++};
++
++struct amdgpu_ctx_mgr {
++ struct amdgpu_device *adev;
++ struct mutex lock;
++ /* protected by lock */
++ struct idr ctx_handles;
++};
++
++struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
++int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
++
++void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
++ struct dma_fence *fence, uint64_t *seq);
++struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
++ struct amdgpu_ring *ring, uint64_t seq);
++void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
++ enum drm_sched_priority priority);
++
++int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *filp);
++
++int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
++
++void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
++void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
++void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr);
++void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+index 6467c9c..a42130e 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+@@ -27,6 +27,7 @@
+ #include <drm/amdgpu_drm.h>
+ #include <drm/gpu_scheduler.h>
+ #include <drm/drm_print.h>
++#include <drm/drmP.h>
+
+ /* max number of rings */
+ #define AMDGPU_MAX_RINGS 21
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5383-drm-amdgpu-add-status-checking-after-fw-is-loaded.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5383-drm-amdgpu-add-status-checking-after-fw-is-loaded.patch
new file mode 100644
index 00000000..4ae26b1c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5383-drm-amdgpu-add-status-checking-after-fw-is-loaded.patch
@@ -0,0 +1,35 @@
+From 20b45778279695fa3384c3cee60889a8a880febb Mon Sep 17 00:00:00 2001
+From: Huang Rui <ray.huang@amd.com>
+Date: Sun, 5 Aug 2018 12:45:35 +0800
+Subject: [PATCH 5383/5725] drm/amdgpu: add status checking after fw is loaded
+
+The status field must be 0 after FW is loaded.
+
+Signed-off-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index f74f155..2ca9245 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -134,6 +134,13 @@ psp_cmd_submit_buf(struct psp_context *psp,
+ msleep(1);
+ }
+
++ /* the status field must be 0 after FW is loaded */
++ if (ucode && psp->cmd_buf_mem->resp.status) {
++ DRM_ERROR("failed loading with status (%d) and ucode id (%d)\n",
++ psp->cmd_buf_mem->resp.status, ucode->ucode_id);
++ return -EINVAL;
++ }
++
+ if (ucode) {
+ ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
+ ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5384-drm-amdgpu-revert-psp-firmware-load-status-check.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5384-drm-amdgpu-revert-psp-firmware-load-status-check.patch
new file mode 100644
index 00000000..45e12397
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5384-drm-amdgpu-revert-psp-firmware-load-status-check.patch
@@ -0,0 +1,32 @@
+From 36ad9fff8be4ff8f2ae9b9823a2098d6bc61df27 Mon Sep 17 00:00:00 2001
+From: Raveendra Talabattula <raveendra.talabattula@amd.com>
+Date: Fri, 28 Dec 2018 15:59:05 +0530
+Subject: [PATCH 5384/5725] drm/amdgpu: revert psp firmware load status check
+
+This check is causing boot failure on R1000 secure silicon
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 7 -------
+ 1 file changed, 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index 2ca9245..f74f155 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -134,13 +134,6 @@ psp_cmd_submit_buf(struct psp_context *psp,
+ msleep(1);
+ }
+
+- /* the status field must be 0 after FW is loaded */
+- if (ucode && psp->cmd_buf_mem->resp.status) {
+- DRM_ERROR("failed loading with status (%d) and ucode id (%d)\n",
+- psp->cmd_buf_mem->resp.status, ucode->ucode_id);
+- return -EINVAL;
+- }
+-
+ if (ucode) {
+ ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
+ ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5385-Hybrid-Version-18.50.0.418.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5385-Hybrid-Version-18.50.0.418.patch
new file mode 100644
index 00000000..fb8cc3b2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5385-Hybrid-Version-18.50.0.418.patch
@@ -0,0 +1,27 @@
+From ff572fd4f00c0a6402ce90896454e755701af169 Mon Sep 17 00:00:00 2001
+From: Junshan Fang <Junshan.Fang@amd.com>
+Date: Thu, 13 Sep 2018 13:36:40 +0800
+Subject: [PATCH 5385/5725] Hybrid Version: 18.50.0.418
+
+Change-Id: I214a8ffc634c184926f23b941f2d8519f5eec9c5
+Signed-off-by: Junshan Fang <Junshan.Fang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 2607526..626a172 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -76,7 +76,7 @@
+ #define KMS_DRIVER_MINOR 27
+ #define KMS_DRIVER_PATCHLEVEL 0
+
+-#define AMDGPU_VERSION "18.45.0.418"
++#define AMDGPU_VERSION "18.50.0.418"
+
+ int amdgpu_vram_limit = 0;
+ int amdgpu_vis_vram_limit = 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5386-drm-amdgpu-improve-VM-state-machine-documentation-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5386-drm-amdgpu-improve-VM-state-machine-documentation-v2.patch
new file mode 100644
index 00000000..7e5cece0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5386-drm-amdgpu-improve-VM-state-machine-documentation-v2.patch
@@ -0,0 +1,192 @@
+From f09a38bf9995b395d2553e91704b7d4e620287fa Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Thu, 30 Aug 2018 10:27:15 +0200
+Subject: [PATCH 5386/5725] drm/amdgpu: improve VM state machine documentation
+ v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Since we have a lot of FAQ on the VM state machine try to improve the
+documentation by adding functions for each state move.
+
+v2: fix typo in amdgpu_vm_bo_invalidated, use amdgpu_vm_bo_relocated in
+ one more place as well.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 91 +++++++++++++++++++++++++++-------
+ 1 file changed, 72 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index adb61a2..b54870f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -205,6 +205,64 @@ static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
+ }
+
+ /**
++ * amdgpu_vm_bo_evicted - vm_bo is evicted
++ *
++ * @vm_bo: vm_bo which is evicted
++ *
++ * State for PDs/PTs and per VM BOs which are not at the location they should
++ * be.
++ */
++static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
++{
++ struct amdgpu_vm *vm = vm_bo->vm;
++ struct amdgpu_bo *bo = vm_bo->bo;
++
++ vm_bo->moved = true;
++ if (bo->tbo.type == ttm_bo_type_kernel)
++ list_move(&vm_bo->vm_status, &vm->evicted);
++ else
++ list_move_tail(&vm_bo->vm_status, &vm->evicted);
++}
++
++/**
++ * amdgpu_vm_bo_relocated - vm_bo is reloacted
++ *
++ * @vm_bo: vm_bo which is relocated
++ *
++ * State for PDs/PTs which needs to update their parent PD.
++ */
++static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
++{
++ list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
++}
++
++/**
++ * amdgpu_vm_bo_moved - vm_bo is moved
++ *
++ * @vm_bo: vm_bo which is moved
++ *
++ * State for per VM BOs which are moved, but that change is not yet reflected
++ * in the page tables.
++ */
++static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
++{
++ list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
++}
++
++/**
++ * amdgpu_vm_bo_done - vm_bo is done
++ *
++ * @vm_bo: vm_bo which is now done
++ *
++ * State for normal BOs which are invalidated and that change has been updated
++ * in the PTs.
++ */
++static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
++{
++ list_del_init(&vm_bo->vm_status);
++}
++
++/**
+ * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
+ *
+ * @base: base structure for tracking BO usage in a VM
+@@ -228,7 +286,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
+ list_add_tail(&base->bo_list, &bo->va);
+
+ if (bo->tbo.type == ttm_bo_type_kernel)
+- list_move(&base->vm_status, &vm->relocated);
++ amdgpu_vm_bo_relocated(base);
+
+ if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
+ return;
+@@ -242,8 +300,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
+ * is currently evicted. add the bo to the evicted list to make sure it
+ * is validated on next vm use to avoid fault.
+ * */
+- list_move_tail(&base->vm_status, &vm->evicted);
+- base->moved = true;
++ amdgpu_vm_bo_evicted(base);
+ }
+
+ /**
+@@ -306,7 +363,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+
+ if (bo->tbo.type != ttm_bo_type_kernel) {
+ spin_lock(&vm->moved_lock);
+- list_move(&bo_base->vm_status, &vm->moved);
++ amdgpu_vm_bo_moved(bo_base);
+ spin_unlock(&vm->moved_lock);
+ } else {
+ if (vm->use_cpu_for_update)
+@@ -315,7 +372,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ r = amdgpu_ttm_alloc_gart(&bo->tbo);
+ if (r)
+ break;
+- list_move(&bo_base->vm_status, &vm->relocated);
++ amdgpu_vm_bo_relocated(bo_base);
+ }
+ }
+
+@@ -1031,7 +1088,7 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
+ continue;
+
+ if (!entry->base.moved)
+- list_move(&entry->base.vm_status, &vm->relocated);
++ amdgpu_vm_bo_relocated(&entry->base);
+ amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
+ }
+ }
+@@ -1206,7 +1263,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
+ if (entry->huge) {
+ /* Add the entry to the relocated list to update it. */
+ entry->huge = false;
+- list_move(&entry->base.vm_status, &p->vm->relocated);
++ amdgpu_vm_bo_relocated(&entry->base);
+ }
+ return;
+ }
+@@ -1737,7 +1794,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+ if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
+ !(bo->preferred_domains &
+ amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)))
+- list_add_tail(&bo_va->base.vm_status, &vm->evicted);
++ amdgpu_vm_bo_evicted(&bo_va->base);
+
+ list_splice_init(&bo_va->invalids, &bo_va->valids);
+ bo_va->cleared = clear;
+@@ -2461,26 +2518,22 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
+
+ list_for_each_entry(bo_base, &bo->va, bo_list) {
+ struct amdgpu_vm *vm = bo_base->vm;
+- bool was_moved = bo_base->moved;
+
+- bo_base->moved = true;
+ if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
+- if (bo->tbo.type == ttm_bo_type_kernel)
+- list_move(&bo_base->vm_status, &vm->evicted);
+- else
+- list_move_tail(&bo_base->vm_status,
+- &vm->evicted);
++ amdgpu_vm_bo_evicted(bo_base);
+ continue;
+ }
+
+- if (was_moved)
+- continue;
++ if (bo_base->moved)
++ continue;
++
++ bo_base->moved = true;
+
+ if (bo->tbo.type == ttm_bo_type_kernel) {
+- list_move(&bo_base->vm_status, &vm->relocated);
++ amdgpu_vm_bo_relocated(bo_base);
+ } else {
+ spin_lock(&bo_base->vm->moved_lock);
+- list_move(&bo_base->vm_status, &vm->moved);
++ amdgpu_vm_bo_moved(bo_base);
+ spin_unlock(&bo_base->vm->moved_lock);
+ }
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5387-drm-amdgpu-Fix-compute-VM-BO-params-after-rebase-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5387-drm-amdgpu-Fix-compute-VM-BO-params-after-rebase-v2.patch
new file mode 100644
index 00000000..964b4e50
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5387-drm-amdgpu-Fix-compute-VM-BO-params-after-rebase-v2.patch
@@ -0,0 +1,56 @@
+From c835eecfb7c3d3f20c1a115b8a8a139ea3095145 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Wed, 5 Sep 2018 20:19:54 -0400
+Subject: [PATCH 5387/5725] drm/amdgpu: Fix compute VM BO params after rebase
+ v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The intent of two commits was lost in the last rebase:
+
+810955b drm/amdgpu: Fix acquiring VM on large-BAR systems
+b5d21aa drm/amdgpu: Don't use shadow BO for compute context
+
+This commit restores the original behaviour:
+* Don't set AMDGPU_GEM_CREATE_NO_CPU_ACCESS for page directories
+ to allow them to be reused for compute VMs
+* Don't create shadow BOs for page tables in compute VMs
+
+v2: move more logic into amdgpu_vm_bo_param
+
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Tested-by: Kent Russell <Kent.Russell@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index b54870f..ccc007c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -524,9 +524,8 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ if (vm->use_cpu_for_update)
+ bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+- else
+- bp->flags |= AMDGPU_GEM_CREATE_SHADOW |
+- AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
++ else if (!vm->root.base.bo || vm->root.base.bo->shadow)
++ bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
+ bp->type = ttm_bo_type_kernel;
+ if (vm->root.base.bo)
+ bp->resv = vm->root.base.bo->tbo.resv;
+@@ -2716,6 +2715,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ vm->last_update = NULL;
+
+ amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp);
++ if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
++ bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW;
+ r = amdgpu_bo_create(adev, &bp, &root);
+ if (r)
+ goto error_free_sched_entity;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5388-drm-amdgpu-Fix-warnings-while-make-xmldocs.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5388-drm-amdgpu-Fix-warnings-while-make-xmldocs.patch
new file mode 100644
index 00000000..e3c70a61
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5388-drm-amdgpu-Fix-warnings-while-make-xmldocs.patch
@@ -0,0 +1,44 @@
+From e7918df24cb517a3d92559d80b66e49e5ba078b7 Mon Sep 17 00:00:00 2001
+From: Masanari Iida <standby24x7@gmail.com>
+Date: Thu, 6 Sep 2018 11:10:57 +0900
+Subject: [PATCH 5388/5725] drm/amdgpu: Fix warnings while make xmldocs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This patch fixes following warnings.
+
+./drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c:3011:
+warning: Excess function parameter 'dev' description
+in 'amdgpu_vm_get_task_info'
+
+./drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c:3012:
+warning: Function parameter or member 'adev' not
+described in 'amdgpu_vm_get_task_info'
+
+./drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c:3012:
+warning: Excess function parameter 'dev' description
+in 'amdgpu_vm_get_task_info'
+
+Signed-off-by: Masanari Iida <standby24x7@gmail.com>
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index ccc007c..64cbcff 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -3095,7 +3095,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ /**
+ * amdgpu_vm_get_task_info - Extracts task info for a PASID.
+ *
+- * @dev: drm device pointer
++ * @adev: drm device pointer
+ * @pasid: PASID identifier for VM
+ * @task_info: task_info to fill.
+ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5389-drm-amd-powerplay-fix-compile-warning-for-wrong-data.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5389-drm-amd-powerplay-fix-compile-warning-for-wrong-data.patch
new file mode 100644
index 00000000..4c2f473b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5389-drm-amd-powerplay-fix-compile-warning-for-wrong-data.patch
@@ -0,0 +1,43 @@
+From 0aa34248c3b24a8f52e3e5d7320afd779d7d3bcc Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Wed, 5 Sep 2018 10:03:13 +0800
+Subject: [PATCH 5389/5725] drm/amd/powerplay: fix compile warning for wrong
+ data type V2
+
+do_div expects the 1st argument in 64bit instead of 32bit.
+Drop the usage of do_div as it seems unnecessary.
+
+V2: drop usage of do_div completely
+
+Change-Id: Id2032a43727e7f1fa5163333d3565354d412a561
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index 7dcfc79..d45cbfe 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -1307,7 +1307,7 @@ static int vega20_set_sclk_od(
+ int ret = 0;
+
+ od_sclk = golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * value;
+- do_div(od_sclk, 100);
++ od_sclk /= 100;
+ od_sclk += golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_GFXCLK_FMAX, od_sclk);
+@@ -1352,7 +1352,7 @@ static int vega20_set_mclk_od(
+ int ret = 0;
+
+ od_mclk = golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * value;
+- do_div(od_mclk, 100);
++ od_mclk /= 100;
+ od_mclk += golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_UCLK_FMAX, od_mclk);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5390-drm-amdgpu-move-PSP-init-prior-to-IH-in-gpu-reset.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5390-drm-amdgpu-move-PSP-init-prior-to-IH-in-gpu-reset.patch
new file mode 100644
index 00000000..6f0e82db
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5390-drm-amdgpu-move-PSP-init-prior-to-IH-in-gpu-reset.patch
@@ -0,0 +1,41 @@
+From f5db1e85299aa44e74727f29bb157ea351f1e546 Mon Sep 17 00:00:00 2001
+From: Emily Deng <Emily.Deng@amd.com>
+Date: Mon, 10 Sep 2018 17:51:31 +0800
+Subject: [PATCH 5390/5725] drm/amdgpu: move PSP init prior to IH in gpu reset
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+since we use PSP to program IH regs now
+
+Signed-off-by: Monk Liu <Monk.Liu@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Emily Deng <Emily.Deng@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 13efee8..2db51ef 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2015,6 +2015,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
+ static enum amd_ip_block_type ip_order[] = {
+ AMD_IP_BLOCK_TYPE_GMC,
+ AMD_IP_BLOCK_TYPE_COMMON,
++ AMD_IP_BLOCK_TYPE_PSP,
+ AMD_IP_BLOCK_TYPE_IH,
+ };
+
+@@ -2045,7 +2046,6 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
+
+ static enum amd_ip_block_type ip_order[] = {
+ AMD_IP_BLOCK_TYPE_SMC,
+- AMD_IP_BLOCK_TYPE_PSP,
+ AMD_IP_BLOCK_TYPE_DCE,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_IP_BLOCK_TYPE_SDMA,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5391-drm-amd-include-update-the-bitfield-define-for-PF_MA.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5391-drm-amd-include-update-the-bitfield-define-for-PF_MA.patch
new file mode 100644
index 00000000..a1878745
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5391-drm-amd-include-update-the-bitfield-define-for-PF_MA.patch
@@ -0,0 +1,38 @@
+From 581ee5525d899baf9185a9086264d4761aaf4e38 Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Tue, 7 Aug 2018 11:44:26 -0400
+Subject: [PATCH 5391/5725] drm/amd/include: update the bitfield define for
+ PF_MAX_REGION
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Correct the definition based on vega20 register spec
+
+Change-Id: Ifde296134d00423cdf1078c8249d044f5b5cf5a5
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_2_1_sh_mask.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_2_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_2_1_sh_mask.h
+index 6626fc2..76ea902 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_2_1_sh_mask.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_2_1_sh_mask.h
+@@ -8241,9 +8241,9 @@
+ #define MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK_MASK 0x00000001L
+ //MC_VM_XGMI_LFB_CNTL
+ #define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0
+-#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x3
++#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x4
+ #define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x00000007L
+-#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x00000038L
++#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x00000070L
+ //MC_VM_XGMI_LFB_SIZE
+ #define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0
+ #define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0000FFFFL
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5392-drm-amdgpu-gmc-add-initial-xgmi-structure-to-amdgpu_.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5392-drm-amdgpu-gmc-add-initial-xgmi-structure-to-amdgpu_.patch
new file mode 100644
index 00000000..34e58d2d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5392-drm-amdgpu-gmc-add-initial-xgmi-structure-to-amdgpu_.patch
@@ -0,0 +1,54 @@
+From aaa24832763377d55bf00adf41b030ef7dfbb2fe Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 19 Jun 2018 16:00:47 -0500
+Subject: [PATCH 5392/5725] drm/amdgpu/gmc: add initial xgmi structure to
+ amdgpu_gmc structure
+
+Initial pass at a structure to store xgmi info. xgmi is a high
+speed cross gpu interconnect.
+
+Change-Id: I8b373bd847c857dd7cbefa55d1ede2a8785deb06
+Acked-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Slava Abramov <slava.abramov@amd.com>
+Reviewed-by :Shaoyun liu <Shaoyun.liu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+index 2189606..ab767aa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+@@ -87,6 +87,18 @@ struct amdgpu_gmc_funcs {
+ u64 *dst, u64 *flags);
+ };
+
++struct amdgpu_xgmi {
++ /* from psp */
++ u64 device_id;
++ u64 hive_id;
++ /* fixed per family */
++ u64 node_segment_size;
++ /* physical node (0-3) */
++ unsigned physical_node_id;
++ /* number of nodes (0-4) */
++ unsigned num_physical_nodes;
++};
++
+ struct amdgpu_gmc {
+ resource_size_t aper_size;
+ resource_size_t aper_base;
+@@ -126,6 +138,8 @@ struct amdgpu_gmc {
+ atomic_t vm_fault_info_updated;
+
+ const struct amdgpu_gmc_funcs *gmc_funcs;
++
++ struct amdgpu_xgmi xgmi;
+ };
+
+ #define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5393-drm-amdgpu-gmc9-add-a-new-gfxhub-1.1-helper-for-xgmi.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5393-drm-amdgpu-gmc9-add-a-new-gfxhub-1.1-helper-for-xgmi.patch
new file mode 100644
index 00000000..96894755
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5393-drm-amdgpu-gmc9-add-a-new-gfxhub-1.1-helper-for-xgmi.patch
@@ -0,0 +1,162 @@
+From 9d5a19c33b9d29bed81b9c735ff61fdb73074219 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 19 Jun 2018 17:03:27 -0500
+Subject: [PATCH 5393/5725] drm/amdgpu/gmc9: add a new gfxhub 1.1 helper for
+ xgmi
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Used to populate the xgmi info on vega20.
+
+v2: PF_MAX_REGION is val - 1 (Ray)
+
+Acked-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Slava Abramov <slava.abramov@amd.com>
+Reviewed-by :Shaoyun liu <Shaoyun.liu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by :Shaoyun liu <Shaoyun.liu@amd.com>
+Change-Id: Ia7b7f112880e69cdbcf73a8abf04cd6ef303940c
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/Makefile | 2 +-
+ drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c | 50 ++++++++++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.h | 29 ++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 7 +++++
+ 4 files changed, 87 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
+ create mode 100644 drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.h
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
+index 0e9fe5e..8c34388 100644
+--- a/drivers/gpu/drm/amd/amdgpu/Makefile
++++ b/drivers/gpu/drm/amd/amdgpu/Makefile
+@@ -73,7 +73,7 @@ amdgpu-y += \
+ amdgpu-y += \
+ gmc_v7_0.o \
+ gmc_v8_0.o \
+- gfxhub_v1_0.o mmhub_v1_0.o gmc_v9_0.o
++ gfxhub_v1_0.o mmhub_v1_0.o gmc_v9_0.o gfxhub_v1_1.o
+
+ # add IH block
+ amdgpu-y += \
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
+new file mode 100644
+index 0000000..d4170cb
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
+@@ -0,0 +1,50 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++#include "amdgpu.h"
++#include "gfxhub_v1_1.h"
++
++#include "gc/gc_9_2_1_offset.h"
++#include "gc/gc_9_2_1_sh_mask.h"
++
++#include "soc15_common.h"
++
++int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
++{
++ u32 xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL);
++ u32 max_region =
++ REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION);
++
++ /* PF_MAX_REGION=0 means xgmi is disabled */
++ if (max_region) {
++ adev->gmc.xgmi.num_physical_nodes = max_region + 1;
++ if (adev->gmc.xgmi.num_physical_nodes > 4)
++ return -EINVAL;
++
++ adev->gmc.xgmi.physical_node_id =
++ REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_LFB_REGION);
++ if (adev->gmc.xgmi.physical_node_id > 3)
++ return -EINVAL;
++ }
++
++ return 0;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.h
+new file mode 100644
+index 0000000..d753cf2
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.h
+@@ -0,0 +1,29 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __GFXHUB_V1_1_H__
++#define __GFXHUB_V1_1_H__
++
++int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index b5d849b..de699ea 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -42,6 +42,7 @@
+
+ #include "gfxhub_v1_0.h"
+ #include "mmhub_v1_0.h"
++#include "gfxhub_v1_1.h"
+
+ #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
+
+@@ -984,6 +985,12 @@ static int gmc_v9_0_sw_init(void *handle)
+ printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
+ }
+
++ if (adev->asic_type == CHIP_VEGA20) {
++ r = gfxhub_v1_1_get_xgmi_info(adev);
++ if (r)
++ return r;
++ }
++
+ r = gmc_v9_0_mc_init(adev);
+ if (r)
+ return r;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5394-drm-amdgpu-gmc9-Adjust-GART-and-AGP-location-with-xg.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5394-drm-amdgpu-gmc9-Adjust-GART-and-AGP-location-with-xg.patch
new file mode 100644
index 00000000..c0e412f1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5394-drm-amdgpu-gmc9-Adjust-GART-and-AGP-location-with-xg.patch
@@ -0,0 +1,158 @@
+From 0818807c9a7650ed1d8f54f2029aa2b2b300479c Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 19 Jun 2018 16:11:56 -0500
+Subject: [PATCH 5394/5725] drm/amdgpu/gmc9: Adjust GART and AGP location with
+ xgmi offset
+
+On hives with xgmi enabled, the fb_location aperture is a size
+which defines the total framebuffer size of all nodes in the
+hive. Each GPU in the hive has the same view via the fb_location
+aperture. GPU0 starts at offset (0 * segment size),
+GPU1 starts at offset (1 * segment size), etc.
+
+For access to local vram on each GPU, we need to take this offset into
+account. This including on setting up GPUVM page table and GART table
+
+Change-Id: I9efd510bed68fdb9afdfbdc76e1046792471ee78
+Acked-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Slava Abramov <slava.abramov@amd.com>
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Acked-by: Huang Rui <ray.huang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 20 ++++++++++----------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 8 ++++++++
+ drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c | 3 +++
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 6 ++++++
+ drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 7 +++++++
+ 5 files changed, 34 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+index fec88f6..77907e2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+@@ -147,8 +147,8 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
+ /* VCE doesn't like it when BOs cross a 4GB segment, so align
+ * the GART base on a 4GB boundary as well.
+ */
+- size_bf = mc->vram_start;
+- size_af = adev->gmc.mc_mask + 1 - ALIGN(mc->vram_end + 1, four_gb);
++ size_bf = mc->fb_start;
++ size_af = adev->gmc.mc_mask + 1 - ALIGN(mc->fb_end + 1, four_gb);
+
+ if (mc->gart_size > max(size_bf, size_af)) {
+ dev_warn(adev->dev, "limiting GART\n");
+@@ -184,23 +184,23 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
+ const uint64_t sixteen_gb_mask = ~(sixteen_gb - 1);
+ u64 size_af, size_bf;
+
+- if (mc->vram_start > mc->gart_start) {
+- size_bf = (mc->vram_start & sixteen_gb_mask) -
++ if (mc->fb_start > mc->gart_start) {
++ size_bf = (mc->fb_start & sixteen_gb_mask) -
+ ALIGN(mc->gart_end + 1, sixteen_gb);
+- size_af = mc->mc_mask + 1 - ALIGN(mc->vram_end + 1, sixteen_gb);
++ size_af = mc->mc_mask + 1 - ALIGN(mc->fb_end + 1, sixteen_gb);
+ } else {
+- size_bf = mc->vram_start & sixteen_gb_mask;
++ size_bf = mc->fb_start & sixteen_gb_mask;
+ size_af = (mc->gart_start & sixteen_gb_mask) -
+- ALIGN(mc->vram_end + 1, sixteen_gb);
++ ALIGN(mc->fb_end + 1, sixteen_gb);
+ }
+
+ if (size_bf > size_af) {
+- mc->agp_start = mc->vram_start > mc->gart_start ?
++ mc->agp_start = mc->fb_start > mc->gart_start ?
+ mc->gart_end + 1 : 0;
+ mc->agp_size = size_bf;
+ } else {
+- mc->agp_start = (mc->vram_start > mc->gart_start ?
+- mc->vram_end : mc->gart_end) + 1,
++ mc->agp_start = (mc->fb_start > mc->gart_start ?
++ mc->fb_end : mc->gart_end) + 1,
+ mc->agp_size = size_af;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+index ab767aa..491100f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+@@ -114,6 +114,14 @@ struct amdgpu_gmc {
+ u64 gart_end;
+ u64 vram_start;
+ u64 vram_end;
++ /* FB region , it's same as local vram region in single GPU, in XGMI
++ * configuration, this region covers all GPUs in the same hive ,
++ * each GPU in the hive has the same view of this FB region .
++ * GPU0's vram starts at offset (0 * segment size) ,
++ * GPU1 starts at offset (1 * segment size), etc.
++ */
++ u64 fb_start;
++ u64 fb_end;
+ unsigned vram_width;
+ u64 real_vram_size;
+ int vram_mtrr;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
+index d4170cb..5e9ab8e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
+@@ -44,6 +44,9 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
+ REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_LFB_REGION);
+ if (adev->gmc.xgmi.physical_node_id > 3)
+ return -EINVAL;
++ adev->gmc.xgmi.node_segment_size = REG_GET_FIELD(
++ RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE),
++ MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24;
+ }
+
+ return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index de699ea..a0fd3a7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -770,12 +770,18 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
+ u64 base = 0;
+ if (!amdgpu_sriov_vf(adev))
+ base = mmhub_v1_0_get_fb_location(adev);
++ /* add the xgmi offset of the physical node */
++ base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
+ amdgpu_gmc_vram_location(adev, &adev->gmc, base);
+ amdgpu_gmc_gart_location(adev, mc);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_gmc_agp_location(adev, mc);
+ /* base offset of vram pages */
+ adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
++
++ /* XXX: add the xgmi offset of the physical node? */
++ adev->vm_manager.vram_base_offset +=
++ adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+index cca6c1b..14649f8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+@@ -38,10 +38,17 @@
+ u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
+ {
+ u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE);
++ u64 top = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP);
+
+ base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
+ base <<= 24;
+
++ top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK;
++ top <<= 24;
++
++ adev->gmc.fb_start = base;
++ adev->gmc.fb_end = top;
++
+ return base;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5395-drm-amdgpu-Add-psp-function-interfaces-for-XGMI-supp.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5395-drm-amdgpu-Add-psp-function-interfaces-for-XGMI-supp.patch
new file mode 100644
index 00000000..e4b48060
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5395-drm-amdgpu-Add-psp-function-interfaces-for-XGMI-supp.patch
@@ -0,0 +1,88 @@
+From 2343498b7ea882056bdc6fd6f5d47c9f47d1bd75 Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Wed, 27 Jun 2018 17:24:46 -0400
+Subject: [PATCH 5395/5725] drm/amdgpu : Add psp function interfaces for XGMI
+ support
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Place holder for XGMI support
+
+Change-Id: I924fa3693366409de0218009c7f709cb464854cc
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 34 +++++++++++++++++++++++++++++++++
+ 1 file changed, 34 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+index 981887c..8b8720e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+@@ -35,6 +35,7 @@
+ #define PSP_TMR_SIZE 0x400000
+
+ struct psp_context;
++struct psp_xgmi_topology_info;
+
+ enum psp_ring_type
+ {
+@@ -79,6 +80,12 @@ struct psp_funcs
+ enum AMDGPU_UCODE_ID ucode_type);
+ bool (*smu_reload_quirk)(struct psp_context *psp);
+ int (*mode1_reset)(struct psp_context *psp);
++ uint64_t (*xgmi_get_device_id)(struct psp_context *psp);
++ uint64_t (*xgmi_get_hive_id)(struct psp_context *psp);
++ int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices,
++ struct psp_xgmi_topology_info *topology);
++ int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices,
++ struct psp_xgmi_topology_info *topology);
+ };
+
+ struct psp_context
+@@ -134,6 +141,23 @@ struct amdgpu_psp_funcs {
+ enum AMDGPU_UCODE_ID);
+ };
+
++struct psp_xgmi_topology_info {
++ /* Generated by PSP to identify the GPU instance within xgmi connection */
++ uint64_t device_id;
++ /*
++ * If all bits set to 0 , driver indicates it wants to retrieve the xgmi
++ * connection vector topology, but not access enable the connections
++ * if some or all bits are set to 1, driver indicates it want to retrieve the
++ * current xgmi topology and access enable the link to GPU[i] associated
++ * with the bit position in the vector.
++ * On return,: bits indicated which xgmi links are present/active depending
++ * on the value passed in. The relative bit offset for the relative GPU index
++ * within the hive is always marked active.
++ */
++ uint32_t connection_mask;
++ uint32_t reserved; /* must be 0 */
++};
++
+ #define psp_prep_cmd_buf(ucode, type) (psp)->funcs->prep_cmd_buf((ucode), (type))
+ #define psp_ring_init(psp, type) (psp)->funcs->ring_init((psp), (type))
+ #define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type))
+@@ -153,6 +177,16 @@ struct amdgpu_psp_funcs {
+ ((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false)
+ #define psp_mode1_reset(psp) \
+ ((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false)
++#define psp_xgmi_get_device_id(psp) \
++ ((psp)->funcs->xgmi_get_device_id ? (psp)->funcs->xgmi_get_device_id((psp)) : 0)
++#define psp_xgmi_get_hive_id(psp) \
++ ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp)) : 0)
++#define psp_xgmi_get_topology_info(psp, num_device, topology) \
++ ((psp)->funcs->xgmi_get_topology_info ? \
++ (psp)->funcs->xgmi_get_topology_info((psp), (num_device), (topology)) : -EINVAL)
++#define psp_xgmi_set_topology_info(psp, num_device, topology) \
++ ((psp)->funcs->xgmi_set_topology_info ? \
++ (psp)->funcs->xgmi_set_topology_info((psp), (num_device), (topology)) : -EINVAL)
+
+ #define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5396-drm-amdgpu-Add-place-holder-functions-for-xgmi-topol.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5396-drm-amdgpu-Add-place-holder-functions-for-xgmi-topol.patch
new file mode 100644
index 00000000..bf0efbc7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5396-drm-amdgpu-Add-place-holder-functions-for-xgmi-topol.patch
@@ -0,0 +1,67 @@
+From 83d313b35b447b8efa9e46e2e02bab00427edba3 Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Tue, 14 Aug 2018 13:30:00 -0400
+Subject: [PATCH 5396/5725] drm/amdgpu: Add place holder functions for xgmi
+ topology interface with psp
+
+Add dummy function for xgmi function interface with psp
+
+Change-Id: I01f35baf5a4b96e9654d448c9892be3cd72c05b7
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | 30 ++++++++++++++++++++++++++++++
+ 1 file changed, 30 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+index b70cfa3..9217af0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+@@ -548,6 +548,33 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
+ return 0;
+ }
+
++/* TODO: Fill in follow functions once PSP firmware interface for XGMI is ready.
++ * For now, return success and hack the hive_id so high level code can
++ * start testing
++ */
++static int psp_v11_0_xgmi_get_topology_info(struct psp_context *psp,
++ int number_devices, struct psp_xgmi_topology_info *topology)
++{
++ return 0;
++}
++
++static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp,
++ int number_devices, struct psp_xgmi_topology_info *topology)
++{
++ return 0;
++}
++
++static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp)
++{
++ u64 hive_id = 0;
++
++ /* Remove me when we can get correct hive_id through PSP */
++ if (psp->adev->gmc.xgmi.num_physical_nodes)
++ hive_id = 0x123456789abcdef;
++
++ return hive_id;
++}
++
+ static const struct psp_funcs psp_v11_0_funcs = {
+ .init_microcode = psp_v11_0_init_microcode,
+ .bootloader_load_sysdrv = psp_v11_0_bootloader_load_sysdrv,
+@@ -560,6 +587,9 @@ static const struct psp_funcs psp_v11_0_funcs = {
+ .cmd_submit = psp_v11_0_cmd_submit,
+ .compare_sram_data = psp_v11_0_compare_sram_data,
+ .mode1_reset = psp_v11_0_mode1_reset,
++ .xgmi_get_topology_info = psp_v11_0_xgmi_get_topology_info,
++ .xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info,
++ .xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id,
+ };
+
+ void psp_v11_0_set_psp_funcs(struct psp_context *psp)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5397-drm-amdgpu-Generate-XGMI-topology-info-from-driver-l.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5397-drm-amdgpu-Generate-XGMI-topology-info-from-driver-l.patch
new file mode 100644
index 00000000..3d46acd1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5397-drm-amdgpu-Generate-XGMI-topology-info-from-driver-l.patch
@@ -0,0 +1,205 @@
+From 3fafa0e86b60212fd6fd117df600aa7096478202 Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Wed, 27 Jun 2018 17:25:53 -0400
+Subject: [PATCH 5397/5725] drm/amdgpu : Generate XGMI topology info from
+ driver level
+
+Driver will save an array of XGMI hive info, each hive will have a list of devices
+that have the same hive ID.
+
+Change-Id: Ia2934d5b624cffa3283bc0a37679eddbd387cbdd
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/Makefile | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 ++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 2 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 119 +++++++++++++++++++++++++++++
+ 5 files changed, 129 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
+index 8c34388..3311402 100644
+--- a/drivers/gpu/drm/amd/amdgpu/Makefile
++++ b/drivers/gpu/drm/amd/amdgpu/Makefile
+@@ -52,7 +52,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
+ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
+ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
+ amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o amdgpu_sem.o amdgpu_gmc.o amdgpu_amdkfd_fence.o \
+- amdgpu_debugfs.o amdgpu_ids.o
++ amdgpu_debugfs.o amdgpu_ids.o amdgpu_xgmi.o
+
+ # add asic specific block
+ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index c7736d0..80fc9b3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1281,6 +1281,12 @@ void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
+ long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
+
++
++/*
++ * functions used by amdgpu_xgmi.c
++ */
++int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
++
+ /*
+ * functions used by amdgpu_encoder.c
+ */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 2db51ef..128ed6c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1645,6 +1645,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
+ adev->ip_blocks[i].status.hw = true;
+ }
+
++ amdgpu_xgmi_add_device(adev);
+ amdgpu_amdkfd_device_init(adev);
+
+ if (amdgpu_sriov_vf(adev))
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+index 491100f..313442e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+@@ -97,6 +97,8 @@ struct amdgpu_xgmi {
+ unsigned physical_node_id;
+ /* number of nodes (0-4) */
+ unsigned num_physical_nodes;
++ /* gpu list in the same hive */
++ struct list_head head;
+ };
+
+ struct amdgpu_gmc {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+new file mode 100644
+index 0000000..897afbb
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+@@ -0,0 +1,119 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ *
++ */
++#include <linux/list.h>
++#include "amdgpu.h"
++#include "amdgpu_psp.h"
++
++
++static DEFINE_MUTEX(xgmi_mutex);
++
++#define AMDGPU_MAX_XGMI_HIVE 8
++#define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE 4
++
++struct amdgpu_hive_info {
++ uint64_t hive_id;
++ struct list_head device_list;
++};
++
++static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
++static unsigned hive_count = 0;
++
++static struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
++{
++ int i;
++ struct amdgpu_hive_info *tmp;
++
++ if (!adev->gmc.xgmi.hive_id)
++ return NULL;
++ for (i = 0 ; i < hive_count; ++i) {
++ tmp = &xgmi_hives[i];
++ if (tmp->hive_id == adev->gmc.xgmi.hive_id)
++ return tmp;
++ }
++ if (i >= AMDGPU_MAX_XGMI_HIVE)
++ return NULL;
++
++ /* initialize new hive if not exist */
++ tmp = &xgmi_hives[hive_count++];
++ tmp->hive_id = adev->gmc.xgmi.hive_id;
++ INIT_LIST_HEAD(&tmp->device_list);
++ return tmp;
++}
++
++int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
++{
++ struct psp_xgmi_topology_info tmp_topology[AMDGPU_MAX_XGMI_DEVICE_PER_HIVE];
++ struct amdgpu_hive_info *hive;
++ struct amdgpu_xgmi *entry;
++ struct amdgpu_device *tmp_adev;
++
++ int count = 0, ret = -EINVAL;
++
++ if ((adev->asic_type < CHIP_VEGA20) ||
++ (adev->flags & AMD_IS_APU) )
++ return 0;
++ adev->gmc.xgmi.device_id = psp_xgmi_get_device_id(&adev->psp);
++ adev->gmc.xgmi.hive_id = psp_xgmi_get_hive_id(&adev->psp);
++
++ memset(&tmp_topology[0], 0, sizeof(tmp_topology));
++ mutex_lock(&xgmi_mutex);
++ hive = amdgpu_get_xgmi_hive(adev);
++ if (!hive)
++ goto exit;
++
++ list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
++ list_for_each_entry(entry, &hive->device_list, head)
++ tmp_topology[count++].device_id = entry->device_id;
++
++ ret = psp_xgmi_get_topology_info(&adev->psp, count, tmp_topology);
++ if (ret) {
++ dev_err(adev->dev,
++ "XGMI: Get topology failure on device %llx, hive %llx, ret %d",
++ adev->gmc.xgmi.device_id,
++ adev->gmc.xgmi.hive_id, ret);
++ goto exit;
++ }
++ /* Each psp need to set the latest topology */
++ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
++ ret = psp_xgmi_set_topology_info(&tmp_adev->psp, count, tmp_topology);
++ if (ret) {
++ dev_err(tmp_adev->dev,
++ "XGMI: Set topology failure on device %llx, hive %llx, ret %d",
++ tmp_adev->gmc.xgmi.device_id,
++ tmp_adev->gmc.xgmi.hive_id, ret);
++ /* To do : continue with some node failed or disable the whole hive */
++ break;
++ }
++ }
++ if (!ret)
++ dev_info(adev->dev, "XGMI: Add node %d to hive 0x%llx.\n",
++ adev->gmc.xgmi.physical_node_id,
++ adev->gmc.xgmi.hive_id);
++
++exit:
++ mutex_unlock(&xgmi_mutex);
++ return ret;
++}
++
++
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5398-drm-amdgpu-Init-correct-fb-region-for-none-XGMI-conf.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5398-drm-amdgpu-Init-correct-fb-region-for-none-XGMI-conf.patch
new file mode 100644
index 00000000..718c5dc8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5398-drm-amdgpu-Init-correct-fb-region-for-none-XGMI-conf.patch
@@ -0,0 +1,37 @@
+From 60d522653804dfc9ce1eced7176716c031c62562 Mon Sep 17 00:00:00 2001
+From: shaoyunl <Shaoyun.Liu@amd.com>
+Date: Mon, 10 Sep 2018 12:04:27 -0400
+Subject: [PATCH 5398/5725] drm/amdgpu: Init correct fb region for none XGMI
+ configuration
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Fix : 5c777a5 'Adjust GART and AGP location with xgmi offset'
+
+Change-Id: I2d78024fbe44a37f46a35d34c1e64dbd3937fdf1
+Signed-off-by: shaoyunl <Shaoyun.Liu@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+index 77907e2..9a5b252 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+@@ -121,6 +121,11 @@ void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ if (limit && limit < mc->real_vram_size)
+ mc->real_vram_size = limit;
++
++ if (mc->xgmi.num_physical_nodes == 0) {
++ mc->fb_start = mc->vram_start;
++ mc->fb_end = mc->vram_end;
++ }
+ dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
+ mc->mc_vram_size >> 20, mc->vram_start,
+ mc->vram_end, mc->real_vram_size >> 20);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5399-drm-amdgpu-fix-error-handling-in-amdgpu_cs_user_fenc.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5399-drm-amdgpu-fix-error-handling-in-amdgpu_cs_user_fenc.patch
new file mode 100644
index 00000000..cf6635f8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5399-drm-amdgpu-fix-error-handling-in-amdgpu_cs_user_fenc.patch
@@ -0,0 +1,69 @@
+From 89d9d72014cafc00f722161e1aa002a4c1083125 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 10 Sep 2018 15:52:55 +0200
+Subject: [PATCH 5399/5725] drm/amdgpu: fix error handling in
+ amdgpu_cs_user_fence_chunk
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Slowly leaking memory one page at a time :)
+
+Change-Id: Ia4831bb463402e5ae7b851c96f9deefb8ec6b7f6
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 23 +++++++++++++++--------
+ 1 file changed, 15 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index a22d8ce..151eba0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -40,6 +40,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
+ {
+ struct drm_gem_object *gobj;
+ unsigned long size;
++ int r;
+
+ gobj = drm_gem_object_lookup(p->filp, data->handle);
+ if (gobj == NULL)
+@@ -51,20 +52,26 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
+ p->uf_entry.tv.shared = true;
+ p->uf_entry.user_pages = NULL;
+
+- size = amdgpu_bo_size(p->uf_entry.robj);
+- if (size != PAGE_SIZE || (data->offset + 8) > size)
+- return -EINVAL;
+-
+- *offset = data->offset;
+-
+ drm_gem_object_put_unlocked(gobj);
+
++ size = amdgpu_bo_size(p->uf_entry.robj);
++ if (size != PAGE_SIZE || (data->offset + 8) > size) {
++ r = -EINVAL;
++ goto error_unref;
++ }
++
+ if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
+- amdgpu_bo_unref(&p->uf_entry.robj);
+- return -EINVAL;
++ r = -EINVAL;
++ goto error_unref;
+ }
+
++ *offset = data->offset;
++
+ return 0;
++
++error_unref:
++ amdgpu_bo_unref(&p->uf_entry.robj);
++ return r;
+ }
+
+ static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5400-drm-amdgpu-add-amdgpu_vm_pt_parent-helper.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5400-drm-amdgpu-add-amdgpu_vm_pt_parent-helper.patch
new file mode 100644
index 00000000..d0b6be66
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5400-drm-amdgpu-add-amdgpu_vm_pt_parent-helper.patch
@@ -0,0 +1,81 @@
+From f09c35c2eef64aa85c791a9781b6510ef3ab2792 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Thu, 30 Aug 2018 15:55:54 +0200
+Subject: [PATCH 5400/5725] drm/amdgpu: add amdgpu_vm_pt_parent helper
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add a function to get the parent of a PD/PT.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 37 +++++++++++++++++++++-------------
+ 1 file changed, 23 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 64cbcff..90e959d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -304,6 +304,24 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
+ }
+
+ /**
++ * amdgpu_vm_pt_parent - get the parent page directory
++ *
++ * @pt: child page table
++ *
++ * Helper to get the parent entry for the child page table. NULL if we are at
++ * the root page directory.
++ */
++static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
++{
++ struct amdgpu_bo *parent = pt->base.bo->parent;
++
++ if (!parent)
++ return NULL;
++
++ return list_first_entry(&parent->va, struct amdgpu_vm_pt, base.bo_list);
++}
++
++/**
+ * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
+ *
+ * @vm: vm providing the BOs
+@@ -1135,25 +1153,16 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
+ }
+
+ while (!list_empty(&vm->relocated)) {
+- struct amdgpu_vm_bo_base *bo_base, *parent;
+ struct amdgpu_vm_pt *pt, *entry;
+- struct amdgpu_bo *bo;
+
+- bo_base = list_first_entry(&vm->relocated,
+- struct amdgpu_vm_bo_base,
+- vm_status);
+- bo_base->moved = false;
+- list_del_init(&bo_base->vm_status);
++ entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt,
++ base.vm_status);
++ list_del_init(&entry->base.vm_status);
+
+- bo = bo_base->bo->parent;
+- if (!bo)
++ pt = amdgpu_vm_pt_parent(entry);
++ if (!pt)
+ continue;
+
+- parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
+- bo_list);
+- pt = container_of(parent, struct amdgpu_vm_pt, base);
+- entry = container_of(bo_base, struct amdgpu_vm_pt, base);
+-
+ amdgpu_vm_update_pde(&params, vm, pt, entry);
+
+ if (!vm->use_cpu_for_update &&
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5401-drm-amdgpu-add-amdgpu_vm_update_func.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5401-drm-amdgpu-add-amdgpu_vm_update_func.patch
new file mode 100644
index 00000000..bb17d4e6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5401-drm-amdgpu-add-amdgpu_vm_update_func.patch
@@ -0,0 +1,84 @@
+From c84fa23aef19e5674b09b849e69da16b505d7da3 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Thu, 30 Aug 2018 15:55:54 +0200
+Subject: [PATCH 5401/5725] drm/amdgpu: add amdgpu_vm_update_func
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add helper to call the update function for both BO and shadow.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 32 +++++++++++++++++++++-----------
+ 1 file changed, 21 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 90e959d..fc1bd7d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1042,6 +1042,22 @@ static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ return r;
+ }
+
++/**
++ * amdgpu_vm_update_func - helper to call update function
++ *
++ * Calls the update function for both the given BO as well as its shadow.
++ */
++static void amdgpu_vm_update_func(struct amdgpu_pte_update_params *params,
++ struct amdgpu_bo *bo,
++ uint64_t pe, uint64_t addr,
++ unsigned count, uint32_t incr,
++ uint64_t flags)
++{
++ if (bo->shadow)
++ params->func(params, bo->shadow, pe, addr, count, incr, flags);
++ params->func(params, bo, pe, addr, count, incr, flags);
++}
++
+ /*
+ * amdgpu_vm_update_pde - update a single level in the hierarchy
+ *
+@@ -1071,9 +1087,7 @@ static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
+ level += params->adev->vm_manager.root_level;
+ amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
+ pde = (entry - parent->entries) * 8;
+- if (bo->shadow)
+- params->func(params, bo->shadow, pde, pt, 1, 0, flags);
+- params->func(params, bo, pde, pt, 1, 0, flags);
++ amdgpu_vm_update_func(params, bo, pde, pt, 1, 0, flags);
+ }
+
+ /*
+@@ -1280,9 +1294,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
+ amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags);
+
+ pde = (entry - parent->entries) * 8;
+- if (parent->base.bo->shadow)
+- p->func(p, parent->base.bo->shadow, pde, dst, 1, 0, flags);
+- p->func(p, parent->base.bo, pde, dst, 1, 0, flags);
++ amdgpu_vm_update_func(p, parent->base.bo, pde, dst, 1, 0, flags);
+ }
+
+ /**
+@@ -1332,11 +1344,9 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
+
+ pt = entry->base.bo;
+ pe_start = (addr & mask) * 8;
+- if (pt->shadow)
+- params->func(params, pt->shadow, pe_start, dst, nptes,
+- AMDGPU_GPU_PAGE_SIZE, flags);
+- params->func(params, pt, pe_start, dst, nptes,
+- AMDGPU_GPU_PAGE_SIZE, flags);
++ amdgpu_vm_update_func(params, pt, pe_start, dst, nptes,
++ AMDGPU_GPU_PAGE_SIZE, flags);
++
+ }
+
+ return 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5402-drm-amdgpu-Fix-SDMA-TO-after-GPU-reset-v3.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5402-drm-amdgpu-Fix-SDMA-TO-after-GPU-reset-v3.patch
new file mode 100644
index 00000000..84973c6f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5402-drm-amdgpu-Fix-SDMA-TO-after-GPU-reset-v3.patch
@@ -0,0 +1,55 @@
+From 834c2f1eb23ce8db4720cf957a101195826fd1dd Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Mon, 10 Sep 2018 18:43:58 -0400
+Subject: [PATCH 5402/5725] drm/amdgpu: Fix SDMA TO after GPU reset v3
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+After GPU reset amdgpu_vm_clear_bo triggers VM flush
+but job->vm_pd_addr is not set causing SDMA TO.
+
+v2:
+Per advise by Christian König avoid flushing VM for jobs where
+job->vm_pd_addr wasn't explicitly set.
+
+v3:
+Shortcut vm_flush_needed early.
+
+Fixes cbd5285 drm/amdgpu: move setting the GART addr into TTM.
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 3 ++-
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+index 2d50825..5b2a38a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+@@ -73,6 +73,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
+ amdgpu_sync_create(&(*job)->sync);
+ amdgpu_sync_create(&(*job)->sched_sync);
+ (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
++ (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index fc1bd7d..1382b92 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -799,7 +799,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
+ }
+
+ gds_switch_needed &= !!ring->funcs->emit_gds_switch;
+- vm_flush_needed &= !!ring->funcs->emit_vm_flush;
++ vm_flush_needed &= !!ring->funcs->emit_vm_flush &&
++ job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
+ pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
+ ring->funcs->emit_wreg;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5403-drm-amdgpu-move-cs-dependencies-front-a-bit.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5403-drm-amdgpu-move-cs-dependencies-front-a-bit.patch
new file mode 100644
index 00000000..9d2495a1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5403-drm-amdgpu-move-cs-dependencies-front-a-bit.patch
@@ -0,0 +1,49 @@
+From 6136e70250fdb243f944b5f8d64ec220f36dde31 Mon Sep 17 00:00:00 2001
+From: Chunming Zhou <david1.zhou@amd.com>
+Date: Tue, 11 Sep 2018 17:22:40 +0800
+Subject: [PATCH 5403/5725] drm/amdgpu: move cs dependencies front a bit
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+cs dependencies handling doesn't need in vm resv
+
+Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 151eba0..bac68ca 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -1296,6 +1296,12 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ if (r)
+ goto out;
+
++ r = amdgpu_cs_dependencies(adev, &parser);
++ if (r) {
++ DRM_ERROR("Failed in the dependencies handling %d!\n", r);
++ goto out;
++ }
++
+ r = amdgpu_cs_parser_bos(&parser, data);
+ if (r) {
+ if (r == -ENOMEM)
+@@ -1307,12 +1313,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+
+ reserved_buffers = true;
+
+- r = amdgpu_cs_dependencies(adev, &parser);
+- if (r) {
+- DRM_ERROR("Failed in the dependencies handling %d!\n", r);
+- goto out;
+- }
+-
+ for (i = 0; i < parser.job->num_ibs; i++)
+ trace_amdgpu_cs(&parser, i);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5404-drm-amdgpu-Move-fault-hash-table-to-amdgpu-vm.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5404-drm-amdgpu-Move-fault-hash-table-to-amdgpu-vm.patch
new file mode 100644
index 00000000..fb6db1a6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5404-drm-amdgpu-Move-fault-hash-table-to-amdgpu-vm.patch
@@ -0,0 +1,411 @@
+From 5e67ffc6ef54eb3ce216765b6287d6573c4bd243 Mon Sep 17 00:00:00 2001
+From: Oak Zeng <Oak.Zeng@amd.com>
+Date: Wed, 5 Sep 2018 23:51:23 -0400
+Subject: [PATCH 5404/5725] drm/amdgpu: Move fault hash table to amdgpu vm
+
+In stead of share one fault hash table per device, make it
+per vm. This can avoid inter-process lock issue when fault
+hash table is full.
+
+Change-Id: I5d1281b7c41eddc8e26113e010516557588d3708
+Signed-off-by: Oak Zeng <Oak.Zeng@amd.com>
+Suggested-by: Christian Konig <Christian.Koenig@amd.com>
+Suggested-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Reviewed-by: Christian Konig <christian.koenig@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c | 75 ------------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h | 11 ----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 102 ++++++++++++++++++++++++++++++++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 12 ++++
+ drivers/gpu/drm/amd/amdgpu/vega10_ih.c | 40 +++++--------
+ 5 files changed, 128 insertions(+), 112 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+index 06373d4..4ed8621 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+@@ -197,78 +197,3 @@ int amdgpu_ih_process(struct amdgpu_device *adev)
+ return IRQ_HANDLED;
+ }
+
+-/**
+- * amdgpu_ih_add_fault - Add a page fault record
+- *
+- * @adev: amdgpu device pointer
+- * @key: 64-bit encoding of PASID and address
+- *
+- * This should be called when a retry page fault interrupt is
+- * received. If this is a new page fault, it will be added to a hash
+- * table. The return value indicates whether this is a new fault, or
+- * a fault that was already known and is already being handled.
+- *
+- * If there are too many pending page faults, this will fail. Retry
+- * interrupts should be ignored in this case until there is enough
+- * free space.
+- *
+- * Returns 0 if the fault was added, 1 if the fault was already known,
+- * -ENOSPC if there are too many pending faults.
+- */
+-int amdgpu_ih_add_fault(struct amdgpu_device *adev, u64 key)
+-{
+- unsigned long flags;
+- int r = -ENOSPC;
+-
+- if (WARN_ON_ONCE(!adev->irq.ih.faults))
+- /* Should be allocated in <IP>_ih_sw_init on GPUs that
+- * support retry faults and require retry filtering.
+- */
+- return r;
+-
+- spin_lock_irqsave(&adev->irq.ih.faults->lock, flags);
+-
+- /* Only let the hash table fill up to 50% for best performance */
+- if (adev->irq.ih.faults->count >= (1 << (AMDGPU_PAGEFAULT_HASH_BITS-1)))
+- goto unlock_out;
+-
+- r = chash_table_copy_in(&adev->irq.ih.faults->hash, key, NULL);
+- if (!r)
+- adev->irq.ih.faults->count++;
+-
+- /* chash_table_copy_in should never fail unless we're losing count */
+- WARN_ON_ONCE(r < 0);
+-
+-unlock_out:
+- spin_unlock_irqrestore(&adev->irq.ih.faults->lock, flags);
+- return r;
+-}
+-
+-/**
+- * amdgpu_ih_clear_fault - Remove a page fault record
+- *
+- * @adev: amdgpu device pointer
+- * @key: 64-bit encoding of PASID and address
+- *
+- * This should be called when a page fault has been handled. Any
+- * future interrupt with this key will be processed as a new
+- * page fault.
+- */
+-void amdgpu_ih_clear_fault(struct amdgpu_device *adev, u64 key)
+-{
+- unsigned long flags;
+- int r;
+-
+- if (!adev->irq.ih.faults)
+- return;
+-
+- spin_lock_irqsave(&adev->irq.ih.faults->lock, flags);
+-
+- r = chash_table_remove(&adev->irq.ih.faults->hash, key, NULL);
+- if (!WARN_ON_ONCE(r < 0)) {
+- adev->irq.ih.faults->count--;
+- WARN_ON_ONCE(adev->irq.ih.faults->count < 0);
+- }
+-
+- spin_unlock_irqrestore(&adev->irq.ih.faults->lock, flags);
+-}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+index a23e1c0..0d5b3f5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+@@ -24,7 +24,6 @@
+ #ifndef __AMDGPU_IH_H__
+ #define __AMDGPU_IH_H__
+
+-#include <linux/chash.h>
+ #include "soc15_ih_clientid.h"
+
+ struct amdgpu_device;
+@@ -32,13 +31,6 @@ struct amdgpu_device;
+ #define AMDGPU_IH_CLIENTID_LEGACY 0
+ #define AMDGPU_IH_CLIENTID_MAX SOC15_IH_CLIENTID_MAX
+
+-#define AMDGPU_PAGEFAULT_HASH_BITS 8
+-struct amdgpu_retryfault_hashtable {
+- DECLARE_CHASH_TABLE(hash, AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
+- spinlock_t lock;
+- int count;
+-};
+-
+ /*
+ * R6xx+ IH ring
+ */
+@@ -57,7 +49,6 @@ struct amdgpu_ih_ring {
+ bool use_doorbell;
+ bool use_bus_addr;
+ dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */
+- struct amdgpu_retryfault_hashtable *faults;
+ };
+
+ #define AMDGPU_IH_SRC_DATA_MAX_SIZE_DW 4
+@@ -95,7 +86,5 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
+ bool use_bus_addr);
+ void amdgpu_ih_ring_fini(struct amdgpu_device *adev);
+ int amdgpu_ih_process(struct amdgpu_device *adev);
+-int amdgpu_ih_add_fault(struct amdgpu_device *adev, u64 key);
+-void amdgpu_ih_clear_fault(struct amdgpu_device *adev, u64 key);
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 1382b92..12f132b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2673,6 +2673,22 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
+ adev->vm_manager.fragment_size);
+ }
+
++static struct amdgpu_retryfault_hashtable *init_fault_hash(void)
++{
++ struct amdgpu_retryfault_hashtable *fault_hash;
++
++ fault_hash = kmalloc(sizeof(*fault_hash), GFP_KERNEL);
++ if (!fault_hash)
++ return fault_hash;
++
++ INIT_CHASH_TABLE(fault_hash->hash,
++ AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
++ spin_lock_init(&fault_hash->lock);
++ fault_hash->count = 0;
++
++ return fault_hash;
++}
++
+ /**
+ * amdgpu_vm_init - initialize a vm instance
+ *
+@@ -2767,6 +2783,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ vm->pasid = pasid;
+ }
+
++ vm->fault_hash = init_fault_hash();
++ if (!vm->fault_hash) {
++ r = -ENOMEM;
++ goto error_free_root;
++ }
++
+ INIT_KFIFO(vm->faults);
+ vm->fault_credit = 16;
+
+@@ -2941,7 +2963,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+
+ /* Clear pending page faults from IH when the VM is destroyed */
+ while (kfifo_get(&vm->faults, &fault))
+- amdgpu_ih_clear_fault(adev, fault);
++ amdgpu_vm_clear_fault(vm->fault_hash, fault);
+
+ if (vm->pasid) {
+ unsigned long flags;
+@@ -2951,6 +2973,9 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+ }
+
++ kfree(vm->fault_hash);
++ vm->fault_hash = NULL;
++
+ drm_sched_entity_fini(vm->entity.sched, &vm->entity);
+
+ if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
+@@ -3150,3 +3175,78 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
+ }
+ }
+ }
++
++/**
++ * amdgpu_vm_add_fault - Add a page fault record to fault hash table
++ *
++ * @fault_hash: fault hash table
++ * @key: 64-bit encoding of PASID and address
++ *
++ * This should be called when a retry page fault interrupt is
++ * received. If this is a new page fault, it will be added to a hash
++ * table. The return value indicates whether this is a new fault, or
++ * a fault that was already known and is already being handled.
++ *
++ * If there are too many pending page faults, this will fail. Retry
++ * interrupts should be ignored in this case until there is enough
++ * free space.
++ *
++ * Returns 0 if the fault was added, 1 if the fault was already known,
++ * -ENOSPC if there are too many pending faults.
++ */
++int amdgpu_vm_add_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key)
++{
++ unsigned long flags;
++ int r = -ENOSPC;
++
++ if (WARN_ON_ONCE(!fault_hash))
++ /* Should be allocated in amdgpu_vm_init
++ */
++ return r;
++
++ spin_lock_irqsave(&fault_hash->lock, flags);
++
++ /* Only let the hash table fill up to 50% for best performance */
++ if (fault_hash->count >= (1 << (AMDGPU_PAGEFAULT_HASH_BITS-1)))
++ goto unlock_out;
++
++ r = chash_table_copy_in(&fault_hash->hash, key, NULL);
++ if (!r)
++ fault_hash->count++;
++
++ /* chash_table_copy_in should never fail unless we're losing count */
++ WARN_ON_ONCE(r < 0);
++
++unlock_out:
++ spin_unlock_irqrestore(&fault_hash->lock, flags);
++ return r;
++}
++
++/**
++ * amdgpu_vm_clear_fault - Remove a page fault record
++ *
++ * @fault_hash: fault hash table
++ * @key: 64-bit encoding of PASID and address
++ *
++ * This should be called when a page fault has been handled. Any
++ * future interrupt with this key will be processed as a new
++ * page fault.
++ */
++void amdgpu_vm_clear_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key)
++{
++ unsigned long flags;
++ int r;
++
++ if (!fault_hash)
++ return;
++
++ spin_lock_irqsave(&fault_hash->lock, flags);
++
++ r = chash_table_remove(&fault_hash->hash, key, NULL);
++ if (!WARN_ON_ONCE(r < 0)) {
++ fault_hash->count--;
++ WARN_ON_ONCE(fault_hash->count < 0);
++ }
++
++ spin_unlock_irqrestore(&fault_hash->lock, flags);
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+index 6adc59b..a869ec8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+@@ -33,6 +33,7 @@
+ #include <linux/rbtree.h>
+ #include <drm/gpu_scheduler.h>
+ #include <drm/drm_file.h>
++#include <linux/chash.h>
+
+ #include "amdgpu_sync.h"
+ #include "amdgpu_ring.h"
+@@ -181,6 +182,13 @@ struct amdgpu_task_info {
+ pid_t tgid;
+ };
+
++#define AMDGPU_PAGEFAULT_HASH_BITS 8
++struct amdgpu_retryfault_hashtable {
++ DECLARE_CHASH_TABLE(hash, AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
++ spinlock_t lock;
++ int count;
++};
++
+ struct amdgpu_vm {
+ /* tree of virtual addresses mapped */
+ struct rb_root_cached va;
+@@ -232,6 +240,7 @@ struct amdgpu_vm {
+
+ /* Some basic info about the task */
+ struct amdgpu_task_info task_info;
++ struct amdgpu_retryfault_hashtable *fault_hash;
+ };
+
+ struct amdgpu_vm_manager {
+@@ -344,4 +353,7 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
+
+ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
+
++int amdgpu_vm_add_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key);
++
++void amdgpu_vm_clear_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key);
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+index 5ae5ed2..2559498 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+@@ -265,35 +265,36 @@ static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
+ return true;
+ }
+
+- addr = ((u64)(dw5 & 0xf) << 44) | ((u64)dw4 << 12);
+- key = AMDGPU_VM_FAULT(pasid, addr);
+- r = amdgpu_ih_add_fault(adev, key);
+-
+- /* Hash table is full or the fault is already being processed,
+- * ignore further page faults
+- */
+- if (r != 0)
+- goto ignore_iv;
+-
+ /* Track retry faults in per-VM fault FIFO. */
+ spin_lock(&adev->vm_manager.pasid_lock);
+ vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
++ addr = ((u64)(dw5 & 0xf) << 44) | ((u64)dw4 << 12);
++ key = AMDGPU_VM_FAULT(pasid, addr);
+ if (!vm) {
+ /* VM not found, process it normally */
+ spin_unlock(&adev->vm_manager.pasid_lock);
+- amdgpu_ih_clear_fault(adev, key);
+ return true;
+- }
++ } else {
++ r = amdgpu_vm_add_fault(vm->fault_hash, key);
++
++ /* Hash table is full or the fault is already being processed,
++ * ignore further page faults
++ */
++ if (r != 0) {
++ spin_unlock(&adev->vm_manager.pasid_lock);
++ goto ignore_iv;
++ }
++ }
+ /* No locking required with single writer and single reader */
+ r = kfifo_put(&vm->faults, key);
+ if (!r) {
+ /* FIFO is full. Ignore it until there is space */
++ amdgpu_vm_clear_fault(vm->fault_hash, key);
+ spin_unlock(&adev->vm_manager.pasid_lock);
+- amdgpu_ih_clear_fault(adev, key);
+ goto ignore_iv;
+ }
+- spin_unlock(&adev->vm_manager.pasid_lock);
+
++ spin_unlock(&adev->vm_manager.pasid_lock);
+ /* It's the first fault for this address, process it normally */
+ return true;
+
+@@ -386,14 +387,6 @@ static int vega10_ih_sw_init(void *handle)
+ adev->irq.ih.use_doorbell = true;
+ adev->irq.ih.doorbell_index = AMDGPU_DOORBELL64_IH << 1;
+
+- adev->irq.ih.faults = kmalloc(sizeof(*adev->irq.ih.faults), GFP_KERNEL);
+- if (!adev->irq.ih.faults)
+- return -ENOMEM;
+- INIT_CHASH_TABLE(adev->irq.ih.faults->hash,
+- AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
+- spin_lock_init(&adev->irq.ih.faults->lock);
+- adev->irq.ih.faults->count = 0;
+-
+ r = amdgpu_irq_init(adev);
+
+ return r;
+@@ -406,9 +399,6 @@ static int vega10_ih_sw_fini(void *handle)
+ amdgpu_irq_fini(adev);
+ amdgpu_ih_ring_fini(adev);
+
+- kfree(adev->irq.ih.faults);
+- adev->irq.ih.faults = NULL;
+-
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5405-drm-amd-display-fix-ptr_ret.cocci-warnings.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5405-drm-amd-display-fix-ptr_ret.cocci-warnings.patch
new file mode 100644
index 00000000..d036227b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5405-drm-amd-display-fix-ptr_ret.cocci-warnings.patch
@@ -0,0 +1,36 @@
+From 83fb8cbedc86e3f28b68c2a343d11a78c454a1f7 Mon Sep 17 00:00:00 2001
+From: kbuild test robot <fengguang.wu@intel.com>
+Date: Wed, 12 Sep 2018 08:59:07 +0800
+Subject: [PATCH 5405/5725] drm/amd/display: fix ptr_ret.cocci warnings
+
+drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c:771:1-3: WARNING: PTR_ERR_OR_ZERO can be used
+
+ Use PTR_ERR_OR_ZERO rather than if(IS_ERR(...)) + PTR_ERR
+
+Generated by: scripts/coccinelle/api/ptr_ret.cocci
+
+Fixes: e498eb713604 ("drm/amd/display: Add support for hw_state logging via debugfs")
+CC: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Signed-off-by: kbuild test robot <fengguang.wu@intel.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+index 35ca732..0ef4a40 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+@@ -801,8 +801,5 @@ int dtn_debugfs_init(struct amdgpu_device *adev)
+ adev,
+ &dtn_log_fops);
+
+- if (IS_ERR(ent))
+- return PTR_ERR(ent);
+-
+- return 0;
++ return PTR_ERR_OR_ZERO(ent);
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5406-drm-amdgpu-Add-error-message-when-register-failed-to.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5406-drm-amdgpu-Add-error-message-when-register-failed-to.patch
new file mode 100644
index 00000000..dac38478
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5406-drm-amdgpu-Add-error-message-when-register-failed-to.patch
@@ -0,0 +1,31 @@
+From 3a6bb2a44986255a0c79632c0d8b30ebe60efc0b Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Mon, 10 Sep 2018 12:53:25 -0400
+Subject: [PATCH 5406/5725] drm/amdgpu:Add error message when register failed
+ to reach expected value
+
+Add error message when register failed to reach expected value, It will
+help discover potential issue.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15_common.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+index 0942f49..f5d6025 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
++++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+@@ -56,6 +56,8 @@
+ tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
+ loop--; \
+ if (!loop) { \
++ DRM_ERROR("Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n", \
++ inst, #reg, expected_value, (tmp_ & (mask))); \
+ ret = -ETIMEDOUT; \
+ break; \
+ } \
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5407-drm-amdgpu-add-some-VM-PD-PT-iterators-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5407-drm-amdgpu-add-some-VM-PD-PT-iterators-v2.patch
new file mode 100644
index 00000000..76fc4b63
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5407-drm-amdgpu-add-some-VM-PD-PT-iterators-v2.patch
@@ -0,0 +1,258 @@
+From 79666cb9cea0a983e99240bd1a1af571a0a26dab Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Sat, 1 Sep 2018 10:36:48 +0200
+Subject: [PATCH 5407/5725] drm/amdgpu: add some VM PD/PT iterators v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Both a leaf as well as dfs iterator to walk over all the PDs/PTs.
+
+v2: update comments and fix for_each_amdgpu_vm_pt_dfs_safe
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 224 +++++++++++++++++++++++++++++++++
+ 1 file changed, 224 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 12f132b..dd6d1c5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -322,6 +322,230 @@ static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
+ }
+
+ /**
++ * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
++ */
++struct amdgpu_vm_pt_cursor {
++ uint64_t pfn;
++ struct amdgpu_vm_pt *parent;
++ struct amdgpu_vm_pt *entry;
++ unsigned level;
++};
++
++/**
++ * amdgpu_vm_pt_start - start PD/PT walk
++ *
++ * @adev: amdgpu_device pointer
++ * @vm: amdgpu_vm structure
++ * @start: start address of the walk
++ * @cursor: state to initialize
++ *
++ * Initialize a amdgpu_vm_pt_cursor to start a walk.
++ */
++static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
++ struct amdgpu_vm *vm, uint64_t start,
++ struct amdgpu_vm_pt_cursor *cursor)
++{
++ cursor->pfn = start;
++ cursor->parent = NULL;
++ cursor->entry = &vm->root;
++ cursor->level = adev->vm_manager.root_level;
++}
++
++/**
++ * amdgpu_vm_pt_descendant - go to child node
++ *
++ * @adev: amdgpu_device pointer
++ * @cursor: current state
++ *
++ * Walk to the child node of the current node.
++ * Returns:
++ * True if the walk was possible, false otherwise.
++ */
++static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
++ struct amdgpu_vm_pt_cursor *cursor)
++{
++ unsigned num_entries, shift, idx;
++
++ if (!cursor->entry->entries)
++ return false;
++
++ BUG_ON(!cursor->entry->base.bo);
++ num_entries = amdgpu_vm_num_entries(adev, cursor->level);
++ shift = amdgpu_vm_level_shift(adev, cursor->level);
++
++ ++cursor->level;
++ idx = (cursor->pfn >> shift) % num_entries;
++ cursor->parent = cursor->entry;
++ cursor->entry = &cursor->entry->entries[idx];
++ return true;
++}
++
++/**
++ * amdgpu_vm_pt_sibling - go to sibling node
++ *
++ * @adev: amdgpu_device pointer
++ * @cursor: current state
++ *
++ * Walk to the sibling node of the current node.
++ * Returns:
++ * True if the walk was possible, false otherwise.
++ */
++static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
++ struct amdgpu_vm_pt_cursor *cursor)
++{
++ unsigned shift, num_entries;
++
++ /* Root doesn't have a sibling */
++ if (!cursor->parent)
++ return false;
++
++ /* Go to our parents and see if we got a sibling */
++ shift = amdgpu_vm_level_shift(adev, cursor->level - 1);
++ num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1);
++
++ if (cursor->entry == &cursor->parent->entries[num_entries - 1])
++ return false;
++
++ cursor->pfn += 1ULL << shift;
++ cursor->pfn &= ~((1ULL << shift) - 1);
++ ++cursor->entry;
++ return true;
++}
++
++/**
++ * amdgpu_vm_pt_ancestor - go to parent node
++ *
++ * @cursor: current state
++ *
++ * Walk to the parent node of the current node.
++ * Returns:
++ * True if the walk was possible, false otherwise.
++ */
++static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
++{
++ if (!cursor->parent)
++ return false;
++
++ --cursor->level;
++ cursor->entry = cursor->parent;
++ cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
++ return true;
++}
++
++/**
++ * amdgpu_vm_pt_next - get next PD/PT in hieratchy
++ *
++ * @adev: amdgpu_device pointer
++ * @cursor: current state
++ *
++ * Walk the PD/PT tree to the next node.
++ */
++static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
++ struct amdgpu_vm_pt_cursor *cursor)
++{
++ /* First try a newborn child */
++ if (amdgpu_vm_pt_descendant(adev, cursor))
++ return;
++
++ /* If that didn't worked try to find a sibling */
++ while (!amdgpu_vm_pt_sibling(adev, cursor)) {
++ /* No sibling, go to our parents and grandparents */
++ if (!amdgpu_vm_pt_ancestor(cursor)) {
++ cursor->pfn = ~0ll;
++ return;
++ }
++ }
++}
++
++/**
++ * amdgpu_vm_pt_first_leaf - get first leaf PD/PT
++ *
++ * @adev: amdgpu_device pointer
++ * @vm: amdgpu_vm structure
++ * @start: start addr of the walk
++ * @cursor: state to initialize
++ *
++ * Start a walk and go directly to the leaf node.
++ */
++static void amdgpu_vm_pt_first_leaf(struct amdgpu_device *adev,
++ struct amdgpu_vm *vm, uint64_t start,
++ struct amdgpu_vm_pt_cursor *cursor)
++{
++ amdgpu_vm_pt_start(adev, vm, start, cursor);
++ while (amdgpu_vm_pt_descendant(adev, cursor));
++}
++
++/**
++ * amdgpu_vm_pt_next_leaf - get next leaf PD/PT
++ *
++ * @adev: amdgpu_device pointer
++ * @cursor: current state
++ *
++ * Walk the PD/PT tree to the next leaf node.
++ */
++static void amdgpu_vm_pt_next_leaf(struct amdgpu_device *adev,
++ struct amdgpu_vm_pt_cursor *cursor)
++{
++ amdgpu_vm_pt_next(adev, cursor);
++ while (amdgpu_vm_pt_descendant(adev, cursor));
++}
++
++/**
++ * for_each_amdgpu_vm_pt_leaf - walk over all leaf PDs/PTs in the hierarchy
++ */
++#define for_each_amdgpu_vm_pt_leaf(adev, vm, start, end, cursor) \
++ for (amdgpu_vm_pt_first_leaf((adev), (vm), (start), &(cursor)); \
++ (cursor).pfn <= end; amdgpu_vm_pt_next_leaf((adev), &(cursor)))
++
++/**
++ * amdgpu_vm_pt_first_dfs - start a deep first search
++ *
++ * @adev: amdgpu_device structure
++ * @vm: amdgpu_vm structure
++ * @cursor: state to initialize
++ *
++ * Starts a deep first traversal of the PD/PT tree.
++ */
++static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
++ struct amdgpu_vm *vm,
++ struct amdgpu_vm_pt_cursor *cursor)
++{
++ amdgpu_vm_pt_start(adev, vm, 0, cursor);
++ while (amdgpu_vm_pt_descendant(adev, cursor));
++}
++
++/**
++ * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
++ *
++ * @adev: amdgpu_device structure
++ * @cursor: current state
++ *
++ * Move the cursor to the next node in a deep first search.
++ */
++static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
++ struct amdgpu_vm_pt_cursor *cursor)
++{
++ if (!cursor->entry)
++ return;
++
++ if (!cursor->parent)
++ cursor->entry = NULL;
++ else if (amdgpu_vm_pt_sibling(adev, cursor))
++ while (amdgpu_vm_pt_descendant(adev, cursor));
++ else
++ amdgpu_vm_pt_ancestor(cursor);
++}
++
++/**
++ * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
++ */
++#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) \
++ for (amdgpu_vm_pt_first_dfs((adev), (vm), &(cursor)), \
++ (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
++ (entry); (entry) = (cursor).entry, \
++ amdgpu_vm_pt_next_dfs((adev), &(cursor)))
++
++/**
+ * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
+ *
+ * @vm: vm providing the BOs
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5408-drm-amdgpu-use-leaf-iterator-for-allocating-PD-PT.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5408-drm-amdgpu-use-leaf-iterator-for-allocating-PD-PT.patch
new file mode 100644
index 00000000..c80708a8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5408-drm-amdgpu-use-leaf-iterator-for-allocating-PD-PT.patch
@@ -0,0 +1,212 @@
+From 4df7af9cc5ca7dd6876f2a8a7532c89145874e51 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Sat, 1 Sep 2018 12:03:37 +0200
+Subject: [PATCH 5408/5725] drm/amdgpu: use leaf iterator for allocating PD/PT
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Less code and allows for easier error handling.
+
+Change-Id: Iecf14e422b289d440b6d77547f9c3a1025635d55
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 156 ++++++++++++---------------------
+ 1 file changed, 55 insertions(+), 101 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index dd6d1c5..7499956 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -774,103 +774,6 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ }
+
+ /**
+- * amdgpu_vm_alloc_levels - allocate the PD/PT levels
+- *
+- * @adev: amdgpu_device pointer
+- * @vm: requested vm
+- * @parent: parent PT
+- * @saddr: start of the address range
+- * @eaddr: end of the address range
+- * @level: VMPT level
+- * @ats: indicate ATS support from PTE
+- *
+- * Make sure the page directories and page tables are allocated
+- *
+- * Returns:
+- * 0 on success, errno otherwise.
+- */
+-static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
+- struct amdgpu_vm *vm,
+- struct amdgpu_vm_pt *parent,
+- uint64_t saddr, uint64_t eaddr,
+- unsigned level, bool ats)
+-{
+- unsigned shift = amdgpu_vm_level_shift(adev, level);
+- struct amdgpu_bo_param bp;
+- unsigned pt_idx, from, to;
+- int r;
+-
+- if (!parent->entries) {
+- unsigned num_entries = amdgpu_vm_num_entries(adev, level);
+-
+- parent->entries = kvmalloc_array(num_entries,
+- sizeof(struct amdgpu_vm_pt),
+- GFP_KERNEL | __GFP_ZERO);
+- if (!parent->entries)
+- return -ENOMEM;
+- }
+-
+- from = saddr >> shift;
+- to = eaddr >> shift;
+- if (from >= amdgpu_vm_num_entries(adev, level) ||
+- to >= amdgpu_vm_num_entries(adev, level))
+- return -EINVAL;
+-
+- ++level;
+- saddr = saddr & ((1 << shift) - 1);
+- eaddr = eaddr & ((1 << shift) - 1);
+-
+- amdgpu_vm_bo_param(adev, vm, level, &bp);
+-
+- /* walk over the address space and allocate the page tables */
+- for (pt_idx = from; pt_idx <= to; ++pt_idx) {
+- struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
+- struct amdgpu_bo *pt;
+-
+- if (!entry->base.bo) {
+- r = amdgpu_bo_create(adev, &bp, &pt);
+- if (r)
+- return r;
+-
+- r = amdgpu_vm_clear_bo(adev, vm, pt, level, ats);
+- if (r) {
+- amdgpu_bo_unref(&pt->shadow);
+- amdgpu_bo_unref(&pt);
+- return r;
+- }
+-
+- if (vm->use_cpu_for_update) {
+- r = amdgpu_bo_kmap(pt, NULL);
+- if (r) {
+- amdgpu_bo_unref(&pt->shadow);
+- amdgpu_bo_unref(&pt);
+- return r;
+- }
+- }
+-
+- /* Keep a reference to the root directory to avoid
+- * freeing them up in the wrong order.
+- */
+- pt->parent = amdgpu_bo_ref(parent->base.bo);
+-
+- amdgpu_vm_bo_base_init(&entry->base, vm, pt);
+- }
+-
+- if (level < AMDGPU_VM_PTB) {
+- uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
+- uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
+- ((1 << shift) - 1);
+- r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
+- sub_eaddr, level, ats);
+- if (r)
+- return r;
+- }
+- }
+-
+- return 0;
+-}
+-
+-/**
+ * amdgpu_vm_alloc_pts - Allocate page tables.
+ *
+ * @adev: amdgpu_device pointer
+@@ -878,7 +781,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
+ * @saddr: Start address which needs to be allocated
+ * @size: Size from start address we need.
+ *
+- * Make sure the page tables are allocated.
++ * Make sure the page directories and page tables are allocated
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
+@@ -887,8 +790,11 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ uint64_t saddr, uint64_t size)
+ {
+- uint64_t eaddr;
++ struct amdgpu_vm_pt_cursor cursor;
++ struct amdgpu_bo *pt;
+ bool ats = false;
++ uint64_t eaddr;
++ int r;
+
+ /* validate the parameters */
+ if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
+@@ -908,8 +814,56 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
+ return -EINVAL;
+ }
+
+- return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
+- adev->vm_manager.root_level, ats);
++ for_each_amdgpu_vm_pt_leaf(adev, vm, saddr, eaddr, cursor) {
++ struct amdgpu_vm_pt *entry = cursor.entry;
++ struct amdgpu_bo_param bp;
++
++ if (cursor.level < AMDGPU_VM_PTB) {
++ unsigned num_entries;
++
++ num_entries = amdgpu_vm_num_entries(adev, cursor.level);
++ entry->entries = kvmalloc_array(num_entries,
++ sizeof(*entry->entries),
++ GFP_KERNEL |
++ __GFP_ZERO);
++ if (!entry->entries)
++ return -ENOMEM;
++ }
++
++
++ if (entry->base.bo)
++ continue;
++
++ amdgpu_vm_bo_param(adev, vm, cursor.level, &bp);
++
++ r = amdgpu_bo_create(adev, &bp, &pt);
++ if (r)
++ return r;
++
++ r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats);
++ if (r)
++ goto error_free_pt;
++
++ if (vm->use_cpu_for_update) {
++ r = amdgpu_bo_kmap(pt, NULL);
++ if (r)
++ goto error_free_pt;
++ }
++
++ /* Keep a reference to the root directory to avoid
++ * freeing them up in the wrong order.
++ */
++ pt->parent = amdgpu_bo_ref(cursor.parent->base.bo);
++
++ amdgpu_vm_bo_base_init(&entry->base, vm, pt);
++ }
++
++ return 0;
++
++error_free_pt:
++ amdgpu_bo_unref(&pt->shadow);
++ amdgpu_bo_unref(&pt);
++ return r;
+ }
+
+ /**
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5409-drm-amdgpu-use-dfs-iterator-to-free-PDs-PTs.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5409-drm-amdgpu-use-dfs-iterator-to-free-PDs-PTs.patch
new file mode 100644
index 00000000..106dd8ca
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5409-drm-amdgpu-use-dfs-iterator-to-free-PDs-PTs.patch
@@ -0,0 +1,110 @@
+From a133b900c05d632e06cc7d5a9136473e4f07f846 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Thu, 6 Sep 2018 15:35:13 +0200
+Subject: [PATCH 5409/5725] drm/amdgpu: use dfs iterator to free PDs/PTs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Allows us to free all PDs/PTs without recursion.
+
+Change-Id: I4d40cc769b5bf29e619fe2886ee785a62e72f961
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 62 ++++++++++++++++------------------
+ 1 file changed, 30 insertions(+), 32 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 7499956..f90fced 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -867,6 +867,35 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
+ }
+
+ /**
++ * amdgpu_vm_free_pts - free PD/PT levels
++ *
++ * @adev: amdgpu device structure
++ * @parent: PD/PT starting level to free
++ * @level: level of parent structure
++ *
++ * Free the page directory or page table level and all sub levels.
++ */
++static void amdgpu_vm_free_pts(struct amdgpu_device *adev,
++ struct amdgpu_vm *vm)
++{
++ struct amdgpu_vm_pt_cursor cursor;
++ struct amdgpu_vm_pt *entry;
++
++ for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) {
++
++ if (entry->base.bo) {
++ list_del(&entry->base.bo_list);
++ list_del(&entry->base.vm_status);
++ amdgpu_bo_unref(&entry->base.bo->shadow);
++ amdgpu_bo_unref(&entry->base.bo);
++ }
++ kvfree(entry->entries);
++ }
++
++ BUG_ON(vm->root.base.bo);
++}
++
++/**
+ * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
+ *
+ * @adev: amdgpu_device pointer
+@@ -3091,36 +3120,6 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
+ }
+
+ /**
+- * amdgpu_vm_free_levels - free PD/PT levels
+- *
+- * @adev: amdgpu device structure
+- * @parent: PD/PT starting level to free
+- * @level: level of parent structure
+- *
+- * Free the page directory or page table level and all sub levels.
+- */
+-static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
+- struct amdgpu_vm_pt *parent,
+- unsigned level)
+-{
+- unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
+-
+- if (parent->base.bo) {
+- list_del(&parent->base.bo_list);
+- list_del(&parent->base.vm_status);
+- amdgpu_bo_unref(&parent->base.bo->shadow);
+- amdgpu_bo_unref(&parent->base.bo);
+- }
+-
+- if (parent->entries)
+- for (i = 0; i < num_entries; i++)
+- amdgpu_vm_free_levels(adev, &parent->entries[i],
+- level + 1);
+-
+- kvfree(parent->entries);
+-}
+-
+-/**
+ * amdgpu_vm_fini - tear down a vm instance
+ *
+ * @adev: amdgpu_device pointer
+@@ -3179,8 +3178,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ if (r) {
+ dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
+ } else {
+- amdgpu_vm_free_levels(adev, &vm->root,
+- adev->vm_manager.root_level);
++ amdgpu_vm_free_pts(adev, vm);
+ amdgpu_bo_unreserve(root);
+ }
+ amdgpu_bo_unref(&root);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5410-drm-amdgpu-use-the-DFS-iterator-in-amdgpu_vm_invalid.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5410-drm-amdgpu-use-the-DFS-iterator-in-amdgpu_vm_invalid.patch
new file mode 100644
index 00000000..212ef081
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5410-drm-amdgpu-use-the-DFS-iterator-in-amdgpu_vm_invalid.patch
@@ -0,0 +1,84 @@
+From 24b8b95c7e40044418e6052d89aa9485d4c2c7f7 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Sat, 8 Sep 2018 13:05:34 +0200
+Subject: [PATCH 5410/5725] drm/amdgpu: use the DFS iterator in
+ amdgpu_vm_invalidate_pds v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Less code and easier to maintain.
+
+v2: rename the function as well
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 32 ++++++++------------------------
+ 1 file changed, 8 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index f90fced..4cffd7e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1299,37 +1299,22 @@ static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
+ }
+
+ /*
+- * amdgpu_vm_invalidate_level - mark all PD levels as invalid
++ * amdgpu_vm_invalidate_pds - mark all PDs as invalid
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: related vm
+- * @parent: parent PD
+- * @level: VMPT level
+ *
+ * Mark all PD level as invalid after an error.
+ */
+-static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
+- struct amdgpu_vm *vm,
+- struct amdgpu_vm_pt *parent,
+- unsigned level)
++static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
++ struct amdgpu_vm *vm)
+ {
+- unsigned pt_idx, num_entries;
+-
+- /*
+- * Recurse into the subdirectories. This recursion is harmless because
+- * we only have a maximum of 5 layers.
+- */
+- num_entries = amdgpu_vm_num_entries(adev, level);
+- for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
+- struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
+-
+- if (!entry->base.bo)
+- continue;
++ struct amdgpu_vm_pt_cursor cursor;
++ struct amdgpu_vm_pt *entry;
+
+- if (!entry->base.moved)
++ for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry)
++ if (entry->base.bo && !entry->base.moved)
+ amdgpu_vm_bo_relocated(&entry->base);
+- amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
+- }
+ }
+
+ /*
+@@ -1426,8 +1411,7 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
+ return 0;
+
+ error:
+- amdgpu_vm_invalidate_level(adev, vm, &vm->root,
+- adev->vm_manager.root_level);
++ amdgpu_vm_invalidate_pds(adev, vm);
+ amdgpu_job_free(job);
+ return r;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5411-drm-amdgpu-use-leaf-iterator-for-filling-PTs.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5411-drm-amdgpu-use-leaf-iterator-for-filling-PTs.patch
new file mode 100644
index 00000000..da1c7f48
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5411-drm-amdgpu-use-leaf-iterator-for-filling-PTs.patch
@@ -0,0 +1,115 @@
+From 55feb264cf39b73086e927d581f3e5fc442c877a Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 3 Sep 2018 14:34:51 +0200
+Subject: [PATCH 5411/5725] drm/amdgpu: use leaf iterator for filling PTs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Less overhead and is the starting point for further cleanups and
+improvements.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 62 ++++++++--------------------------
+ 1 file changed, 15 insertions(+), 47 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 4cffd7e..1a3d19f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1417,36 +1417,6 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
+ }
+
+ /**
+- * amdgpu_vm_find_entry - find the entry for an address
+- *
+- * @p: see amdgpu_pte_update_params definition
+- * @addr: virtual address in question
+- * @entry: resulting entry or NULL
+- * @parent: parent entry
+- *
+- * Find the vm_pt entry and it's parent for the given address.
+- */
+-void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
+- struct amdgpu_vm_pt **entry,
+- struct amdgpu_vm_pt **parent)
+-{
+- unsigned level = p->adev->vm_manager.root_level;
+-
+- *parent = NULL;
+- *entry = &p->vm->root;
+- while ((*entry)->entries) {
+- unsigned shift = amdgpu_vm_level_shift(p->adev, level++);
+-
+- *parent = *entry;
+- *entry = &(*entry)->entries[addr >> shift];
+- addr &= (1ULL << shift) - 1;
+- }
+-
+- if (level != AMDGPU_VM_PTB)
+- *entry = NULL;
+-}
+-
+-/**
+ * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
+ *
+ * @p: see amdgpu_pte_update_params definition
+@@ -1509,36 +1479,34 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
+ {
+ struct amdgpu_device *adev = params->adev;
+ const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
+-
+- uint64_t addr, pe_start;
+- struct amdgpu_bo *pt;
+- unsigned nptes;
++ struct amdgpu_vm_pt_cursor cursor;
+
+ /* walk over the address space and update the page tables */
+- for (addr = start; addr < end; addr += nptes,
+- dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
+- struct amdgpu_vm_pt *entry, *parent;
++ for_each_amdgpu_vm_pt_leaf(adev, params->vm, start, end - 1, cursor) {
++ struct amdgpu_bo *pt = cursor.entry->base.bo;
++ uint64_t pe_start;
++ unsigned nptes;
+
+- amdgpu_vm_get_entry(params, addr, &entry, &parent);
+- if (!entry)
++ if (!pt || cursor.level != AMDGPU_VM_PTB)
+ return -ENOENT;
+
+- if ((addr & ~mask) == (end & ~mask))
+- nptes = end - addr;
++ if ((cursor.pfn & ~mask) == (end & ~mask))
++ nptes = end - cursor.pfn;
+ else
+- nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
++ nptes = AMDGPU_VM_PTE_COUNT(adev) - (cursor.pfn & mask);
+
+- amdgpu_vm_handle_huge_pages(params, entry, parent,
++ amdgpu_vm_handle_huge_pages(params, cursor.entry, cursor.parent,
+ nptes, dst, flags);
+ /* We don't need to update PTEs for huge pages */
+- if (entry->huge)
++ if (cursor.entry->huge) {
++ dst += nptes * AMDGPU_GPU_PAGE_SIZE;
+ continue;
++ }
+
+- pt = entry->base.bo;
+- pe_start = (addr & mask) * 8;
++ pe_start = (cursor.pfn & mask) * 8;
+ amdgpu_vm_update_func(params, pt, pe_start, dst, nptes,
+ AMDGPU_GPU_PAGE_SIZE, flags);
+-
++ dst += nptes * AMDGPU_GPU_PAGE_SIZE;
+ }
+
+ return 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5412-drm-amd-display-Fix-pflip-IRQ-status-after-gpu-reset.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5412-drm-amd-display-Fix-pflip-IRQ-status-after-gpu-reset.patch
new file mode 100644
index 00000000..70bbe402
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5412-drm-amd-display-Fix-pflip-IRQ-status-after-gpu-reset.patch
@@ -0,0 +1,39 @@
+From 057709821d19a5c5a0d4a406e589394cf2f63769 Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Wed, 12 Sep 2018 16:38:57 -0400
+Subject: [PATCH 5412/5725] drm/amd/display: Fix pflip IRQ status after gpu
+ reset.
+
+Problem:
+After GPU reset pflip completion IRQ is disabled and hence
+any subsequent mode set or plane update leads to hang.
+
+Fix:
+Unless acrtc->otg_inst is initialized to -1 during display
+block initializtion then durng resume from GPU reset
+amdgpu_irq_gpu_reset_resume_helper will override CRTC 0 pflip
+IRQ value with whatever value was on every other unused CRTC because
+dm_irq_state will do irq_source = dal_irq_type + acrtc->otg_inst
+where acrtc->otg_inst will be 0 for every unused CRTC.
+
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 641f715..0c33419 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3697,6 +3697,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
+
+ acrtc->crtc_id = crtc_index;
+ acrtc->base.enabled = false;
++ acrtc->otg_inst = -1;
+
+ dm->adev->mode_info.crtcs[crtc_index] = acrtc;
+ drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5413-drm-amdgpu-remove-amdgpu_bo_list_entry.robj.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5413-drm-amdgpu-remove-amdgpu_bo_list_entry.robj.patch
new file mode 100644
index 00000000..36aa3590
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5413-drm-amdgpu-remove-amdgpu_bo_list_entry.robj.patch
@@ -0,0 +1,328 @@
+From 664fc2e9091d08e3d12771d5b230746a17aa061c Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Wed, 9 Jan 2019 21:06:53 +0530
+Subject: [PATCH 5413/5725] drm/amdgpu: remove amdgpu_bo_list_entry.robj
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+We can get that just by casting tv.bo.
+
+Change-Id: I68eb9bfc0048fb5ea093c6f9777bb9260a989235
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | 42 ++++++++++++----------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h | 1 -
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 55 +++++++++++++++++------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 3 +-
+ 4 files changed, 57 insertions(+), 44 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+index 06fa232..1d2e04c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+@@ -49,8 +49,11 @@ static void amdgpu_bo_list_free(struct kref *ref)
+ refcount);
+ struct amdgpu_bo_list_entry *e;
+
+- amdgpu_bo_list_for_each_entry(e, list)
+- amdgpu_bo_unref(&e->robj);
++ amdgpu_bo_list_for_each_entry(e, list) {
++ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
++
++ amdgpu_bo_unref(&bo);
++ }
+
+ call_rcu(&list->rhead, amdgpu_bo_list_free_rcu);
+ }
+@@ -112,21 +115,20 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
+ entry = &array[last_entry++];
+ }
+
+- entry->robj = bo;
+ entry->priority = min(info[i].bo_priority,
+ AMDGPU_BO_LIST_MAX_PRIORITY);
+- entry->tv.bo = &entry->robj->tbo;
+- entry->tv.shared = !entry->robj->prime_shared_count;
+-
+- if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
+- list->gds_obj = entry->robj;
+- if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
+- list->gws_obj = entry->robj;
+- if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
+- list->oa_obj = entry->robj;
+-
+- total_size += amdgpu_bo_size(entry->robj);
+- trace_amdgpu_bo_list_set(list, entry->robj);
++ entry->tv.bo = &bo->tbo;
++ entry->tv.shared = !bo->prime_shared_count;
++
++ if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
++ list->gds_obj = bo;
++ if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
++ list->gws_obj = bo;
++ if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
++ list->oa_obj = bo;
++
++ total_size += amdgpu_bo_size(bo);
++ trace_amdgpu_bo_list_set(list, bo);
+ }
+
+ list->first_userptr = first_userptr;
+@@ -138,8 +140,11 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
+ return 0;
+
+ error_free:
+- while (i--)
+- amdgpu_bo_unref(&array[i].robj);
++ while (i--) {
++ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
++
++ amdgpu_bo_unref(&bo);
++ }
+ kvfree(list);
+ return r;
+
+@@ -191,9 +196,10 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
+ * with the same priority, i.e. it must be stable.
+ */
+ amdgpu_bo_list_for_each_entry(e, list) {
++ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+ unsigned priority = e->priority;
+
+- if (!e->robj->parent)
++ if (!bo->parent)
+ list_add_tail(&e->tv.head, &bucket[priority]);
+
+ e->user_pages = NULL;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+index 61b0897..7c5f5d1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+@@ -32,7 +32,6 @@ struct amdgpu_bo_va;
+ struct amdgpu_fpriv;
+
+ struct amdgpu_bo_list_entry {
+- struct amdgpu_bo *robj;
+ struct ttm_validate_buffer tv;
+ struct amdgpu_bo_va *bo_va;
+ uint32_t priority;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index bac68ca..bbfcd76 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -39,6 +39,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
+ uint32_t *offset)
+ {
+ struct drm_gem_object *gobj;
++ struct amdgpu_bo *bo;
+ unsigned long size;
+ int r;
+
+@@ -46,21 +47,21 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
+ if (gobj == NULL)
+ return -EINVAL;
+
+- p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
++ bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
+ p->uf_entry.priority = 0;
+- p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
++ p->uf_entry.tv.bo = &bo->tbo;
+ p->uf_entry.tv.shared = true;
+ p->uf_entry.user_pages = NULL;
+
+ drm_gem_object_put_unlocked(gobj);
+
+- size = amdgpu_bo_size(p->uf_entry.robj);
++ size = amdgpu_bo_size(bo);
+ if (size != PAGE_SIZE || (data->offset + 8) > size) {
+ r = -EINVAL;
+ goto error_unref;
+ }
+
+- if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
++ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
+ r = -EINVAL;
+ goto error_unref;
+ }
+@@ -70,7 +71,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
+ return 0;
+
+ error_unref:
+- amdgpu_bo_unref(&p->uf_entry.robj);
++ amdgpu_bo_unref(&bo);
+ return r;
+ }
+
+@@ -229,7 +230,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
+ goto free_all_kdata;
+ }
+
+- if (p->uf_entry.robj)
++ if (p->uf_entry.tv.bo)
+ p->job->uf_addr = uf_offset;
+ kfree(chunk_array);
+
+@@ -458,13 +459,13 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
+ p->evictable = list_prev_entry(p->evictable, tv.head)) {
+
+ struct amdgpu_bo_list_entry *candidate = p->evictable;
+- struct amdgpu_bo *bo = candidate->robj;
++ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(candidate->tv.bo);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ bool update_bytes_moved_vis;
+ uint32_t other;
+
+ /* If we reached our current BO we can forget it */
+- if (candidate->robj == validated)
++ if (bo == validated)
+ break;
+
+ /* We can't move pinned BOs here */
+@@ -529,7 +530,7 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
+ int r;
+
+ list_for_each_entry(lobj, validated, tv.head) {
+- struct amdgpu_bo *bo = lobj->robj;
++ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
+ bool binding_userptr = false;
+ struct mm_struct *usermm;
+
+@@ -604,7 +605,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+ INIT_LIST_HEAD(&duplicates);
+ amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
+
+- if (p->uf_entry.robj && !p->uf_entry.robj->parent)
++ if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
+ list_add(&p->uf_entry.tv.head, &p->validated);
+
+ while (1) {
+@@ -620,7 +621,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+
+ INIT_LIST_HEAD(&need_pages);
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+- struct amdgpu_bo *bo = e->robj;
++ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+
+ if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
+ &e->user_invalidated) && e->user_pages) {
+@@ -640,7 +641,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+ list_del(&e->tv.head);
+ list_add(&e->tv.head, &need_pages);
+
+- amdgpu_bo_unreserve(e->robj);
++ amdgpu_bo_unreserve(bo);
+ }
+ }
+
+@@ -659,7 +660,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+
+ /* Fill the page arrays for all userptrs. */
+ list_for_each_entry(e, &need_pages, tv.head) {
+- struct ttm_tt *ttm = e->robj->tbo.ttm;
++ struct ttm_tt *ttm = e->tv.bo->ttm;
+
+ e->user_pages = kvmalloc_array(ttm->num_pages,
+ sizeof(struct page*),
+@@ -718,7 +719,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+ oa = p->bo_list->oa_obj;
+
+ amdgpu_bo_list_for_each_entry(e, p->bo_list)
+- e->bo_va = amdgpu_vm_bo_find(vm, e->robj);
++ e->bo_va = amdgpu_vm_bo_find(vm, ttm_to_amdgpu_bo(e->tv.bo));
+
+ if (gds) {
+ p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
+@@ -733,8 +734,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+ p->job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
+ }
+
+- if (!r && p->uf_entry.robj) {
+- struct amdgpu_bo *uf = p->uf_entry.robj;
++ if (!r && p->uf_entry.tv.bo) {
++ struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
+
+ r = amdgpu_ttm_alloc_gart(&uf->tbo);
+ p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
+@@ -765,9 +766,11 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
+ int r;
+
+ list_for_each_entry(e, &p->validated, tv.head) {
+- struct reservation_object *resv = e->robj->tbo.resv;
++ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
++ struct reservation_object *resv = bo->tbo.resv;
++
+ r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
+- amdgpu_bo_explicit_sync(e->robj));
++ amdgpu_bo_explicit_sync(bo));
+
+ if (r)
+ return r;
+@@ -810,7 +813,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
+ kfree(parser->chunks);
+ if (parser->job)
+ amdgpu_job_free(parser->job);
+- amdgpu_bo_unref(&parser->uf_entry.robj);
++ if (parser->uf_entry.tv.bo) {
++ struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
++
++ amdgpu_bo_unref(&uf);
++ }
+ }
+
+ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
+@@ -855,7 +862,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
+ struct dma_fence *f;
+
+ /* ignore duplicates */
+- bo = e->robj;
++ bo = ttm_to_amdgpu_bo(e->tv.bo);
+ if (!bo)
+ continue;
+
+@@ -888,11 +895,13 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
+ if (amdgpu_vm_debug) {
+ /* Invalidate all BOs to test for userspace bugs */
+ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
++ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
++
+ /* ignore duplicates */
+- if (!e->robj)
++ if (!bo)
+ continue;
+
+- amdgpu_vm_bo_invalidate(adev, e->robj, false);
++ amdgpu_vm_bo_invalidate(adev, bo, false);
+ }
+ }
+
+@@ -1223,7 +1232,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
+ /* No memory allocation is allowed while holding the mn lock */
+ amdgpu_mn_lock(p->mn);
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+- struct amdgpu_bo *bo = e->robj;
++ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+
+ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
+ r = -ERESTARTSYS;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 1a3d19f..d359284 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -559,9 +559,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
+ struct list_head *validated,
+ struct amdgpu_bo_list_entry *entry)
+ {
+- entry->robj = vm->root.base.bo;
+ entry->priority = 0;
+- entry->tv.bo = &entry->robj->tbo;
++ entry->tv.bo = &vm->root.base.bo->tbo;
+ entry->tv.shared = true;
+ entry->user_pages = NULL;
+ list_add(&entry->tv.head, validated);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5414-drm-amdgpu-remove-amdgpu_bo_list_entry.robj-for-rele.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5414-drm-amdgpu-remove-amdgpu_bo_list_entry.robj-for-rele.patch
new file mode 100644
index 00000000..0b8c7c34
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5414-drm-amdgpu-remove-amdgpu_bo_list_entry.robj-for-rele.patch
@@ -0,0 +1,36 @@
+From 71b04857751ff1d8d5b84d68bd776f4b6880d8e6 Mon Sep 17 00:00:00 2001
+From: Prike Liang <Prike.Liang@amd.com>
+Date: Thu, 20 Sep 2018 13:32:10 +0800
+Subject: [PATCH 5414/5725] drm/amdgpu:remove amdgpu_bo_list_entry.robj for
+ release_pages
+
+Change-Id: I7574db0f5e355a400bf023b8bc41775068217fd5
+Signed-off-by: Prike Liang <Prike.Liang@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index bbfcd76..a6e65a5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -748,12 +748,12 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+ error_free_pages:
+
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
++ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+ if (!e->user_pages)
+ continue;
+
+ release_pages(e->user_pages,
+- e->robj->tbo.ttm->num_pages,
+- false);
++ bo->tbo.ttm->num_pages, false);
+ kvfree(e->user_pages);
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5415-drm-amdgpu-fix-compilation-of-amdgpu_amdkfd_gpuvm.c.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5415-drm-amdgpu-fix-compilation-of-amdgpu_amdkfd_gpuvm.c.patch
new file mode 100644
index 00000000..9cd7b8e7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5415-drm-amdgpu-fix-compilation-of-amdgpu_amdkfd_gpuvm.c.patch
@@ -0,0 +1,38 @@
+From 0b8a75965c6a2351ba4e07a74e7268833bffea5c Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 13 Sep 2018 14:07:01 -0500
+Subject: [PATCH 5415/5725] drm/amdgpu: fix compilation of
+ amdgpu_amdkfd_gpuvm.c
+
+Trivial.
+
+Fixes: "drm/amdgpu: remove amdgpu_bo_list_entry.robj"
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 1390136..c3446ef 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -681,7 +681,6 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
+ if (!ctx->vm_pd)
+ return -ENOMEM;
+
+- ctx->kfd_bo.robj = bo;
+ ctx->kfd_bo.priority = 0;
+ ctx->kfd_bo.tv.bo = &bo->tbo;
+ ctx->kfd_bo.tv.shared = true;
+@@ -746,7 +745,6 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
+ return -ENOMEM;
+ }
+
+- ctx->kfd_bo.robj = bo;
+ ctx->kfd_bo.priority = 0;
+ ctx->kfd_bo.tv.bo = &bo->tbo;
+ ctx->kfd_bo.tv.shared = true;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5416-drm-amdgpu-use-a-single-linked-list-for-amdgpu_vm_bo.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5416-drm-amdgpu-use-a-single-linked-list-for-amdgpu_vm_bo.patch
new file mode 100644
index 00000000..d90d00ad
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5416-drm-amdgpu-use-a-single-linked-list-for-amdgpu_vm_bo.patch
@@ -0,0 +1,201 @@
+From deae27f7a94dbd5009815a3bc62443b3d5db9233 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 10 Sep 2018 20:02:46 +0200
+Subject: [PATCH 5416/5725] drm/amdgpu: use a single linked list for
+ amdgpu_vm_bo_base
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Instead of the double linked list. Gets the size of amdgpu_vm_pt down to
+64 bytes again.
+
+We could even reduce it down to 32 bytes, but that would require some
+rather extreme hacks.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 4 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 4 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 72 +++++++++++++++++-------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 2 +-
+ 4 files changed, 47 insertions(+), 35 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index b2947f4..77f0d2e 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -469,10 +469,10 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
+
+ bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
+ if (bo == NULL)
+- return -ENOMEM;
++ return -ENOMEM;
+ drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
+ INIT_LIST_HEAD(&bo->shadow_list);
+- INIT_LIST_HEAD(&bo->va);
++ bo->vm_bo = NULL;
+ bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
+ bp->domain;
+ bo->allowed_domains = bo->preferred_domains;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+index 3674265..9a5c138 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+@@ -89,8 +89,8 @@ struct amdgpu_bo {
+ void *metadata;
+ u32 metadata_size;
+ unsigned prime_shared_count;
+- /* list of all virtual address to which this bo is associated to */
+- struct list_head va;
++ /* per VM structure for page tables and with virtual addresses */
++ struct amdgpu_vm_bo_base *vm_bo;
+ /* Constant after initialization */
+ struct drm_gem_object gem_base;
+ struct amdgpu_bo *parent;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index d359284..1e09cf3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -273,33 +273,34 @@ static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
+ *
+ */
+ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
+- struct amdgpu_vm *vm,
+- struct amdgpu_bo *bo)
++ struct amdgpu_vm *vm,
++ struct amdgpu_bo *bo)
+ {
+- base->vm = vm;
+- base->bo = bo;
+- INIT_LIST_HEAD(&base->bo_list);
+- INIT_LIST_HEAD(&base->vm_status);
++ base->vm = vm;
++ base->bo = bo;
++ base->next = NULL;
++ INIT_LIST_HEAD(&base->vm_status);
+
+- if (!bo)
+- return;
+- list_add_tail(&base->bo_list, &bo->va);
++ if (!bo)
++ return;
++ base->next = bo->vm_bo;
++ bo->vm_bo = base;
+
+- if (bo->tbo.type == ttm_bo_type_kernel)
+- amdgpu_vm_bo_relocated(base);
++ if (bo->tbo.type == ttm_bo_type_kernel)
++ amdgpu_vm_bo_relocated(base);
+
+- if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
+- return;
++ if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
++ return;
+
+- if (bo->preferred_domains &
+- amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
+- return;
++ if (bo->preferred_domains &
++ amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
++ return;
+
+- /*
+- * we checked all the prerequisites, but it looks like this per vm bo
+- * is currently evicted. add the bo to the evicted list to make sure it
+- * is validated on next vm use to avoid fault.
+- * */
++ /*
++ * we checked all the prerequisites, but it looks like this per vm bo
++ * is currently evicted. add the bo to the evicted list to make sure it
++ * is validated on next vm use to avoid fault.
++ * */
+ amdgpu_vm_bo_evicted(base);
+ }
+
+@@ -318,7 +319,7 @@ static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
+ if (!parent)
+ return NULL;
+
+- return list_first_entry(&parent->va, struct amdgpu_vm_pt, base.bo_list);
++ return container_of(parent->vm_bo, struct amdgpu_vm_pt, base);
+ }
+
+ /**
+@@ -883,7 +884,7 @@ static void amdgpu_vm_free_pts(struct amdgpu_device *adev,
+ for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) {
+
+ if (entry->base.bo) {
+- list_del(&entry->base.bo_list);
++ entry->base.bo->vm_bo = NULL;
+ list_del(&entry->base.vm_status);
+ amdgpu_bo_unref(&entry->base.bo->shadow);
+ amdgpu_bo_unref(&entry->base.bo);
+@@ -1091,12 +1092,13 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
+ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo)
+ {
+- struct amdgpu_bo_va *bo_va;
++ struct amdgpu_vm_bo_base *base;
+
+- list_for_each_entry(bo_va, &bo->va, base.bo_list) {
+- if (bo_va->base.vm == vm) {
+- return bo_va;
+- }
++ for (base = bo->vm_bo; base; base = base->next) {
++ if (base->vm != vm)
++ continue;
++
++ return container_of(base, struct amdgpu_bo_va, base);
+ }
+ return NULL;
+ }
+@@ -2649,9 +2651,19 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va)
+ {
+ struct amdgpu_bo_va_mapping *mapping, *next;
++ struct amdgpu_bo *bo = bo_va->base.bo;
+ struct amdgpu_vm *vm = bo_va->base.vm;
++ struct amdgpu_vm_bo_base **base;
+
+- list_del(&bo_va->base.bo_list);
++ if (bo) {
++ for (base = &bo_va->base.bo->vm_bo; *base;
++ base = &(*base)->next) {
++ if (*base != &bo_va->base)
++ continue;
++ *base = bo_va->base.next;
++ break;
++ }
++ }
+
+ spin_lock(&vm->moved_lock);
+ list_del(&bo_va->base.vm_status);
+@@ -2693,7 +2705,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
+ if (bo->parent && bo->parent->shadow == bo)
+ bo = bo->parent;
+
+- list_for_each_entry(bo_base, &bo->va, bo_list) {
++ for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
+ struct amdgpu_vm *vm = bo_base->vm;
+
+ if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+index a869ec8..ff711a4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+@@ -132,7 +132,7 @@ struct amdgpu_vm_bo_base {
+ struct amdgpu_bo *bo;
+
+ /* protected by bo being reserved */
+- struct list_head bo_list;
++ struct amdgpu_vm_bo_base *next;
+
+ /* protected by spinlock */
+ struct list_head vm_status;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5417-drm-amdgpu-Style-fixes-to-PRIME-code-documentation.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5417-drm-amdgpu-Style-fixes-to-PRIME-code-documentation.patch
new file mode 100644
index 00000000..e1b4a63d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5417-drm-amdgpu-Style-fixes-to-PRIME-code-documentation.patch
@@ -0,0 +1,189 @@
+From 4e963b77c4568c5bf6222c24178112467dae6813 Mon Sep 17 00:00:00 2001
+From: Vijetha Malkai <vijetha.malkai@amd.com>
+Date: Thu, 13 Sep 2018 14:47:39 -0400
+Subject: [PATCH 5417/5725] drm/amdgpu: Style fixes to PRIME code documentation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+* Use consistent capitalization in the description of function arguments
+* Define and consistently use the BO acronym for buffer objects
+* Some minor wording improvements
+
+Change-Id: I6083b5e56e9e61868c15de1becfe9ad162c85dc1
+Signed-off-by: Vijetha Malkai <vijetha.malkai@amd.com>
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 52 +++++++++++++++----------------
+ 1 file changed, 26 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+index 8c93c9e..2a34639 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+@@ -42,10 +42,10 @@
+ /**
+ * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
+ * implementation
+- * @obj: GEM buffer object
++ * @obj: GEM buffer object (BO)
+ *
+ * Returns:
+- * A scatter/gather table for the pinned pages of the buffer object's memory.
++ * A scatter/gather table for the pinned pages of the BO's memory.
+ */
+ struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
+ {
+@@ -57,9 +57,9 @@ struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
+
+ /**
+ * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
+- * @obj: GEM buffer object
++ * @obj: GEM BO
+ *
+- * Sets up an in-kernel virtual mapping of the buffer object's memory.
++ * Sets up an in-kernel virtual mapping of the BO's memory.
+ *
+ * Returns:
+ * The virtual address of the mapping or an error pointer.
+@@ -79,10 +79,10 @@ void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
+
+ /**
+ * amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation
+- * @obj: GEM buffer object
+- * @vaddr: virtual address (unused)
++ * @obj: GEM BO
++ * @vaddr: Virtual address (unused)
+ *
+- * Tears down the in-kernel virtual mapping of the buffer object's memory.
++ * Tears down the in-kernel virtual mapping of the BO's memory.
+ */
+ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+ {
+@@ -93,14 +93,14 @@ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+
+ /**
+ * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
+- * @obj: GEM buffer object
+- * @vma: virtual memory area
++ * @obj: GEM BO
++ * @vma: Virtual memory area
+ *
+- * Sets up a userspace mapping of the buffer object's memory in the given
++ * Sets up a userspace mapping of the BO's memory in the given
+ * virtual memory area.
+ *
+ * Returns:
+- * 0 on success or negative error code.
++ * 0 on success or a negative error code on failure.
+ */
+ int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+ {
+@@ -143,10 +143,10 @@ int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma
+ * @attach: DMA-buf attachment
+ * @sg: Scatter/gather table
+ *
+- * Import shared DMA buffer memory exported by another device.
++ * Imports shared DMA buffer memory exported by another device.
+ *
+ * Returns:
+- * A new GEM buffer object of the given DRM device, representing the memory
++ * A new GEM BO of the given DRM device, representing the memory
+ * described by the given DMA-buf attachment and scatter/gather table.
+ */
+ struct drm_gem_object *
+@@ -191,7 +191,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) || !defined(BUILD_AS_DKMS)
+ /**
+ * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
+- * @dma_buf: shared DMA buffer
++ * @dma_buf: Shared DMA buffer
+ * @attach: DMA-buf attachment
+ *
+ * Makes sure that the shared DMA buffer can be accessed by the target device.
+@@ -199,7 +199,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
+ * all DMA devices.
+ *
+ * Returns:
+- * 0 on success or negative error code.
++ * 0 on success or negative error code on failure.
+ */
+ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
+ struct device *target_dev,
+@@ -252,11 +252,11 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
+
+ /**
+ * amdgpu_gem_map_detach - &dma_buf_ops.detach implementation
+- * @dma_buf: shared DMA buffer
++ * @dma_buf: Shared DMA buffer
+ * @attach: DMA-buf attachment
+ *
+ * This is called when a shared DMA buffer no longer needs to be accessible by
+- * the other device. For now, simply unpins the buffer from GTT.
++ * another device. For now, simply unpins the buffer from GTT.
+ */
+ static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach)
+@@ -281,10 +281,10 @@ static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
+
+ /**
+ * amdgpu_gem_prime_res_obj - &drm_driver.gem_prime_res_obj implementation
+- * @obj: GEM buffer object
++ * @obj: GEM BO
+ *
+ * Returns:
+- * The buffer object's reservation object.
++ * The BO's reservation object.
+ */
+ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
+ {
+@@ -296,15 +296,15 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) || !defined(BUILD_AS_DKMS)
+ /**
+ * amdgpu_gem_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
+- * @dma_buf: shared DMA buffer
+- * @direction: direction of DMA transfer
++ * @dma_buf: Shared DMA buffer
++ * @direction: Direction of DMA transfer
+ *
+ * This is called before CPU access to the shared DMA buffer's memory. If it's
+ * a read access, the buffer is moved to the GTT domain if possible, for optimal
+ * CPU read performance.
+ *
+ * Returns:
+- * 0 on success or negative error code.
++ * 0 on success or negative error code on failure.
+ */
+ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
+ enum dma_data_direction direction)
+@@ -354,14 +354,14 @@ const struct dma_buf_ops amdgpu_dmabuf_ops = {
+ /**
+ * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
+ * @dev: DRM device
+- * @gobj: GEM buffer object
+- * @flags: flags like DRM_CLOEXEC and DRM_RDWR
++ * @gobj: GEM BO
++ * @flags: Flags such as DRM_CLOEXEC and DRM_RDWR.
+ *
+ * The main work is done by the &drm_gem_prime_export helper, which in turn
+ * uses &amdgpu_gem_prime_res_obj.
+ *
+ * Returns:
+- * Shared DMA buffer representing the GEM buffer object from the given device.
++ * Shared DMA buffer representing the GEM BO from the given device.
+ */
+ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *gobj,
+@@ -394,7 +394,7 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
+ * uses &amdgpu_gem_prime_import_sg_table.
+ *
+ * Returns:
+- * GEM buffer object representing the shared DMA buffer for the given device.
++ * GEM BO representing the shared DMA buffer for the given device.
+ */
+ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5418-drm-amd-display-add-aux-i2c-event-log.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5418-drm-amd-display-add-aux-i2c-event-log.patch
new file mode 100644
index 00000000..fba70f00
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5418-drm-amd-display-add-aux-i2c-event-log.patch
@@ -0,0 +1,91 @@
+From 118a723ddf294b6b084744879bc907d3526a7ac0 Mon Sep 17 00:00:00 2001
+From: Chiawen Huang <chiawen.huang@amd.com>
+Date: Wed, 29 Aug 2018 18:39:38 +0800
+Subject: [PATCH 5418/5725] drm/amd/display: add aux i2c event log.
+
+[Why]
+support i2c transition event log
+
+[How]
+refined aux REQ and REP events in aux flow.
+commented REQ and REP events in i2c flow.
+
+note: i2c event log is currently commented out. more work is required
+to find an portocol parser to and generate event for the parser
+
+Signed-off-by: Chiawen Huang <chiawen.huang@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dm_event_log.h | 5 +++--
+ drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c | 9 +++++----
+ drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c | 4 ++++
+ 3 files changed, 12 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dm_event_log.h b/drivers/gpu/drm/amd/display/dc/dm_event_log.h
+index 00a275d..34a701c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dm_event_log.h
++++ b/drivers/gpu/drm/amd/display/dc/dm_event_log.h
+@@ -31,7 +31,8 @@
+
+ #define __DM_EVENT_LOG_H__
+
+-#define EVENT_LOG_AUX_REQ(dcc, type, action, address, len, data)
+-#define EVENT_LOG_AUX_Reply(dcc, type, swStatus, replyStatus, len, data)
++#define EVENT_LOG_AUX_REQ(ddc, type, action, address, len, data)
++#define EVENT_LOG_AUX_REP(ddc, type, replyStatus, len, data)
+
+ #endif
++
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
+index 4a88fc7..8eee8ac 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
+@@ -274,8 +274,8 @@ static void submit_channel_request(
+ REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
+ 10, aux110->timeout_period/10);
+ REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
+- EVENT_LOG_AUX_REQ(engine->base.ddc->pin_data->en, Native, request->action,
+- request->address, request->length, request->data);
++ EVENT_LOG_AUX_REQ(engine->base.ddc->pin_data->en, EVENT_LOG_AUX_ORIGIN_NATIVE,
++ request->action, request->address, request->length, request->data);
+ }
+
+ static int read_channel_reply(struct aux_engine *engine, uint32_t size,
+@@ -340,8 +340,9 @@ static void process_channel_reply(
+
+ bytes_replied = read_channel_reply(engine, reply->length, reply->data,
+ &reply_result, &sw_status);
+- EVENT_LOG_AUX_Reply(engine->base.ddc->pin_data->en, Native,
+- sw_status, reply_result, bytes_replied, reply->data);
++ EVENT_LOG_AUX_REP(engine->base.ddc->pin_data->en,
++ EVENT_LOG_AUX_ORIGIN_NATIVE, reply_result,
++ bytes_replied, reply->data);
+
+ /* in case HPD is LOW, exit AUX transaction */
+ if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
+diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c
+index c995ef4..1418985 100644
+--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c
++++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c
+@@ -121,6 +121,8 @@ bool dal_i2c_hw_engine_submit_request(
+
+ hw_engine->base.funcs->submit_channel_request(
+ &hw_engine->base, &request);
++ /* EVENT_LOG_AUX_REQ(engine->ddc->pin_data->en, EVENT_LOG_AUX_ORIGIN_I2C, */
++ /* request.action, request.address, request.length, request.data); */
+
+ if ((request.status == I2C_CHANNEL_OPERATION_FAILED) ||
+ (request.status == I2C_CHANNEL_OPERATION_ENGINE_BUSY)) {
+@@ -169,6 +171,8 @@ bool dal_i2c_hw_engine_submit_request(
+
+ hw_engine->base.funcs->
+ process_channel_reply(&hw_engine->base, &reply);
++ /* EVENT_LOG_AUX_REP(engine->ddc->pin_data->en, EVENT_LOG_AUX_ORIGIN_I2C, */
++ /* AUX_TRANSACTION_REPLY_I2C_ACK, reply.length, reply.data); */
+ }
+
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5419-drm-amdgpu-fix-parameter-documentation-for-amdgpu_vm.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5419-drm-amdgpu-fix-parameter-documentation-for-amdgpu_vm.patch
new file mode 100644
index 00000000..24307598
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5419-drm-amdgpu-fix-parameter-documentation-for-amdgpu_vm.patch
@@ -0,0 +1,34 @@
+From da5225e6b37767398878ee99236adf5ba94d5c5e Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Sat, 15 Sep 2018 10:04:54 +0200
+Subject: [PATCH 5419/5725] drm/amdgpu: fix parameter documentation for
+ amdgpu_vm_free_pts
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The function was modified without updating the documentation.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 1e09cf3..85d700b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -870,8 +870,7 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
+ * amdgpu_vm_free_pts - free PD/PT levels
+ *
+ * @adev: amdgpu device structure
+- * @parent: PD/PT starting level to free
+- * @level: level of parent structure
++ * @vm: amdgpu vm structure
+ *
+ * Free the page directory or page table level and all sub levels.
+ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5420-drm-amdgpu-add-vega20-sriov-capability-detection.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5420-drm-amdgpu-add-vega20-sriov-capability-detection.patch
new file mode 100644
index 00000000..bea16e17
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5420-drm-amdgpu-add-vega20-sriov-capability-detection.patch
@@ -0,0 +1,44 @@
+From 4803f1ac1eec36fa9441e700badd270a091e19ca Mon Sep 17 00:00:00 2001
+From: Frank Min <Frank.Min@amd.com>
+Date: Fri, 27 Apr 2018 03:44:11 +0800
+Subject: [PATCH 5420/5725] drm/amdgpu: add vega20 sriov capability detection
+
+Add sriov capability detection for vega20, then can check if device is
+virtual device.
+
+Signed-off-by: Frank Min <Frank.Min@amd.com>
+Signed-off-by: Xiangliang Yu <Xiangliang.Yu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c | 15 +++++++++++++--
+ 1 file changed, 13 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+index 2e65447..f8cee95 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+@@ -205,8 +205,19 @@ static const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
+
+ static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev)
+ {
+- if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
+- adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
++ uint32_t reg;
++
++ reg = RREG32_SOC15(NBIO, 0, mmRCC_IOV_FUNC_IDENTIFIER);
++ if (reg & 1)
++ adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
++
++ if (reg & 0x80000000)
++ adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
++
++ if (!reg) {
++ if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
++ adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
++ }
+ }
+
+ static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5421-drm-amdgpu-Exclude-MM-engines-for-vega20-virtual-dev.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5421-drm-amdgpu-Exclude-MM-engines-for-vega20-virtual-dev.patch
new file mode 100644
index 00000000..bc00711f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5421-drm-amdgpu-Exclude-MM-engines-for-vega20-virtual-dev.patch
@@ -0,0 +1,35 @@
+From a171659721f2ea8f287043de4aa4395ee4bf6a1d Mon Sep 17 00:00:00 2001
+From: Frank Min <Frank.Min@amd.com>
+Date: Fri, 27 Apr 2018 03:45:50 +0800
+Subject: [PATCH 5421/5725] drm/amdgpu: Exclude MM engines for vega20 virtual
+ device
+
+Temporary disable UVD/VCE block if is virtual device
+
+Signed-off-by: Frank Min <Frank.Min@amd.com>
+Signed-off-by: Xiangliang Yu <Xiangliang.Yu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 3c4f940..2802c39 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -541,8 +541,10 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+ #endif
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+- amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
+- amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
++ if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) {
++ amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
++ amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
++ }
+ break;
+ case CHIP_RAVEN:
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5422-drm-amd-dc-Trigger-set-power-state-task-when-display.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5422-drm-amd-dc-Trigger-set-power-state-task-when-display.patch
new file mode 100644
index 00000000..7abfea68
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5422-drm-amd-dc-Trigger-set-power-state-task-when-display.patch
@@ -0,0 +1,37 @@
+From 4d3b340f8350163912d14b88f007375bcc3b28ea Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Fri, 14 Sep 2018 11:32:52 +0800
+Subject: [PATCH 5422/5725] drm/amd/dc: Trigger set power state task when
+ display configuration changes
+
+Revert "drm/amd/display: Remove call to amdgpu_pm_compute_clocks"
+
+This reverts commit dcd473770e86517543691bdb227103d6c781cd0a.
+
+when display configuration changes, dc need to update the changes
+to powerplay, also need to trigger a power state task.
+amdgpu_pm_compute_clocks is the interface to set power state task
+either dpm enabled or powerplay enabled
+
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+index 6d16b4a..0fab64a 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+@@ -105,6 +105,8 @@ bool dm_pp_apply_display_requirements(
+ adev->powerplay.pp_funcs->display_configuration_change(
+ adev->powerplay.pp_handle,
+ &adev->pm.pm_display_cfg);
++
++ amdgpu_pm_compute_clocks(adev);
+ }
+
+ return true;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5423-drm-amd-pp-Honour-DC-s-clock-limits-on-Rv.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5423-drm-amd-pp-Honour-DC-s-clock-limits-on-Rv.patch
new file mode 100644
index 00000000..183875bb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5423-drm-amd-pp-Honour-DC-s-clock-limits-on-Rv.patch
@@ -0,0 +1,94 @@
+From f3c2df479495db2268c0713ed0237108f162c364 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Tue, 18 Sep 2018 18:07:54 +0800
+Subject: [PATCH 5423/5725] drm/amd/pp: Honour DC's clock limits on Rv
+
+Honour display's request for min engine clock/memory clock.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 25 +++++++++++++++--------
+ 1 file changed, 17 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+index 1e800c1..7a42959 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+@@ -552,6 +552,8 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
+ {
+ struct smu10_hwmgr *data = hwmgr->backend;
+ struct amdgpu_device *adev = hwmgr->adev;
++ uint32_t min_sclk = hwmgr->display_config->min_core_set_clock;
++ uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100;
+
+ if (hwmgr->smu_version < 0x1E3700) {
+ pr_info("smu firmware version too old, can not set dpm level\n");
+@@ -563,6 +565,13 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
+ (adev->rev_id >= 8))
+ return 0;
+
++ if (min_sclk < data->gfx_min_freq_limit)
++ min_sclk = data->gfx_min_freq_limit;
++
++ min_sclk /= 100; /* transfer 10KHz to MHz */
++ if (min_mclk < data->clock_table.FClocks[0].Freq)
++ min_mclk = data->clock_table.FClocks[0].Freq;
++
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+@@ -595,18 +604,18 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+- data->gfx_min_freq_limit/100);
++ min_sclk);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+- data->gfx_min_freq_limit/100);
++ min_sclk);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinFclkByFreq,
+- SMU10_UMD_PSTATE_MIN_FCLK);
++ min_mclk);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxFclkByFreq,
+- SMU10_UMD_PSTATE_MIN_FCLK);
++ min_mclk);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+@@ -638,12 +647,12 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+- data->gfx_min_freq_limit/100);
++ min_sclk);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinFclkByFreq,
+ hwmgr->display_config->num_display > 3 ?
+ SMU10_UMD_PSTATE_PEAK_FCLK :
+- SMU10_UMD_PSTATE_MIN_FCLK);
++ min_mclk);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinSocclkByFreq,
+@@ -674,10 +683,10 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
+ data->gfx_min_freq_limit/100);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinFclkByFreq,
+- SMU10_UMD_PSTATE_MIN_FCLK);
++ min_mclk);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxFclkByFreq,
+- SMU10_UMD_PSTATE_MIN_FCLK);
++ min_mclk);
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5424-drm-amd-pp-Return-error-immediately-if-load-firmware.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5424-drm-amd-pp-Return-error-immediately-if-load-firmware.patch
new file mode 100644
index 00000000..4e4188e7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5424-drm-amd-pp-Return-error-immediately-if-load-firmware.patch
@@ -0,0 +1,39 @@
+From 006026f528cfd3cbce68dcf06105a9e8fa02a652 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Tue, 18 Sep 2018 20:30:36 +0800
+Subject: [PATCH 5424/5725] drm/amd/pp: Return error immediately if load
+ firmware failed
+
+this can avoid hard hang and be useful for debug.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+index f7e3bc2..a74c5be 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+@@ -724,11 +724,13 @@ static int smu8_start_smu(struct pp_hwmgr *hwmgr)
+ if (hwmgr->chip_id == CHIP_STONEY)
+ fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
+
+- ret = smu8_request_smu_load_fw(hwmgr);
+- if (ret)
+- pr_err("SMU firmware load failed\n");
++ smu8_request_smu_load_fw(hwmgr);
+
+- smu8_check_fw_load_finish(hwmgr, fw_to_check);
++ ret = smu8_check_fw_load_finish(hwmgr, fw_to_check);
++ if (ret) {
++ pr_err("SMU firmware load failed\n");
++ return ret;
++ }
+
+ ret = smu8_load_mec_firmware(hwmgr);
+ if (ret)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5425-drm-amd-display-Refactor-FPGA-specific-link-setup.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5425-drm-amd-display-Refactor-FPGA-specific-link-setup.patch
new file mode 100644
index 00000000..5756cbe9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5425-drm-amd-display-Refactor-FPGA-specific-link-setup.patch
@@ -0,0 +1,147 @@
+From 1b5099a67b966ce7d2a8bf4562409e5bc19e48a2 Mon Sep 17 00:00:00 2001
+From: Nikola Cornij <nikola.cornij@amd.com>
+Date: Thu, 16 Aug 2018 14:27:11 -0400
+Subject: [PATCH 5425/5725] drm/amd/display: Refactor FPGA-specific link setup
+
+FPGA doesn't program backend, so we don't need certain link settings
+(audio stream for example).
+
+Change-Id: I12ebdbefb23a0c53a6c7edc749a5e47fbdbf68a6
+Signed-off-by: Nikola Cornij <nikola.cornij@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 56 ++++++++++++----------
+ .../amd/display/dc/dce110/dce110_hw_sequencer.c | 15 +-----
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 1 +
+ 3 files changed, 32 insertions(+), 40 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 2dfdcc9..bad65c8 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -2559,23 +2559,24 @@ void core_link_enable_stream(
+ pipe_ctx->stream_res.stream_enc,
+ &stream->timing);
+
+- resource_build_info_frame(pipe_ctx);
+- core_dc->hwss.update_info_frame(pipe_ctx);
+-
+- /* eDP lit up by bios already, no need to enable again. */
+- if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
+- pipe_ctx->stream->apply_edp_fast_boot_optimization) {
+- pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
+- pipe_ctx->stream->dpms_off = false;
+- return;
+- }
++ if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
++ resource_build_info_frame(pipe_ctx);
++ core_dc->hwss.update_info_frame(pipe_ctx);
++
++ /* eDP lit up by bios already, no need to enable again. */
++ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
++ pipe_ctx->stream->apply_edp_fast_boot_optimization) {
++ pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
++ pipe_ctx->stream->dpms_off = false;
++ return;
++ }
+
+- if (pipe_ctx->stream->dpms_off)
+- return;
++ if (pipe_ctx->stream->dpms_off)
++ return;
+
+- status = enable_link(state, pipe_ctx);
++ status = enable_link(state, pipe_ctx);
+
+- if (status != DC_OK) {
++ if (status != DC_OK) {
+ DC_LOG_WARNING("enabling link %u failed: %d\n",
+ pipe_ctx->stream->sink->link->link_index,
+ status);
+@@ -2590,23 +2591,26 @@ void core_link_enable_stream(
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+- }
++ }
+
+- core_dc->hwss.enable_audio_stream(pipe_ctx);
++ core_dc->hwss.enable_audio_stream(pipe_ctx);
+
+- /* turn off otg test pattern if enable */
+- if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+- pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+- CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+- COLOR_DEPTH_UNDEFINED);
++ /* turn off otg test pattern if enable */
++ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
++ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
++ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
++ COLOR_DEPTH_UNDEFINED);
+
+- core_dc->hwss.enable_stream(pipe_ctx);
++ core_dc->hwss.enable_stream(pipe_ctx);
+
+- if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+- allocate_mst_payload(pipe_ctx);
++ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
++ allocate_mst_payload(pipe_ctx);
++
++ core_dc->hwss.unblank_stream(pipe_ctx,
++ &pipe_ctx->stream->sink->link->cur_link_settings);
++
++ }
+
+- core_dc->hwss.unblank_stream(pipe_ctx,
+- &pipe_ctx->stream->sink->link->cur_link_settings);
+ }
+
+ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index d1add1b..2d7d13b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -1377,26 +1377,13 @@ static enum dc_status apply_single_controller_ctx_to_hw(
+ /* */
+ dc->hwss.enable_stream_timing(pipe_ctx, context, dc);
+
+- /* FPGA does not program backend */
+- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+- pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
+- pipe_ctx->stream_res.opp,
+- COLOR_SPACE_YCBCR601,
+- stream->timing.display_color_depth,
+- pipe_ctx->stream->signal);
+-
+- pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
+- pipe_ctx->stream_res.opp,
+- &stream->bit_depth_params,
+- &stream->clamping);
+- return DC_OK;
+- }
+ /* TODO: move to stream encoder */
+ if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL)
+ if (DC_OK != bios_parser_crtc_source_select(pipe_ctx)) {
+ BREAK_TO_DEBUGGER();
+ return DC_ERROR_UNEXPECTED;
+ }
++
+ pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
+ pipe_ctx->stream_res.opp,
+ COLOR_SPACE_YCBCR601,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 6bd4ec3..a881ff5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -44,6 +44,7 @@
+ #include "dcn10_hubp.h"
+ #include "dcn10_hubbub.h"
+ #include "dcn10_cm_common.h"
++#include "dc_link_dp.h"
+
+ #define DC_LOGGER_INIT(logger)
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5426-drm-amd-display-use-proper-pipe_ctx-index.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5426-drm-amd-display-use-proper-pipe_ctx-index.patch
new file mode 100644
index 00000000..74295be6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5426-drm-amd-display-use-proper-pipe_ctx-index.patch
@@ -0,0 +1,51 @@
+From 10b8bfbdfd0b380c72f20027823a2a43754ea286 Mon Sep 17 00:00:00 2001
+From: Samson Tam <Samson.Tam@amd.com>
+Date: Fri, 7 Sep 2018 10:13:55 -0400
+Subject: [PATCH 5426/5725] drm/amd/display: use proper pipe_ctx index
+
+Use link->link_index as index to pipe_ctx[] to get proper link
+information instead of using index 0 to avoid potential miss matches.
+
+Change-Id: If3f8c5f1e02396949d0a0a0d2e14400ecd52af87
+Signed-off-by: Samson Tam <Samson.Tam@amd.com>
+Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 20 ++++++++++++++++++--
+ 1 file changed, 18 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 7c5382a..1764137 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -460,9 +460,25 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link)
+ {
++ int i;
++ struct pipe_ctx *pipe;
++ struct dc_stream_state *link_stream;
+ struct dc_link_settings store_settings = *link_setting;
+- struct dc_stream_state *link_stream =
+- link->dc->current_state->res_ctx.pipe_ctx[0].stream;
++
++ for (i = 0; i < MAX_PIPES; i++) {
++ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
++ if (pipe->stream && pipe->stream->sink
++ && pipe->stream->sink->link) {
++ if (pipe->stream->sink->link == link)
++ break;
++ }
++ }
++
++ /* Stream not found */
++ if (i == MAX_PIPES)
++ return;
++
++ link_stream = link->dc->current_state->res_ctx.pipe_ctx[i].stream;
+
+ link->preferred_link_setting = store_settings;
+ if (link_stream)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5427-drm-amd-display-add-pp_smu-NULL-pointer-check.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5427-drm-amd-display-add-pp_smu-NULL-pointer-check.patch
new file mode 100644
index 00000000..8a9c8393
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5427-drm-amd-display-add-pp_smu-NULL-pointer-check.patch
@@ -0,0 +1,31 @@
+From b4fa0de44076517d605dbdbf1bc7b747585bcbed Mon Sep 17 00:00:00 2001
+From: Charlene Liu <charlene.liu@amd.com>
+Date: Fri, 7 Sep 2018 13:31:34 -0400
+Subject: [PATCH 5427/5725] drm/amd/display: add pp_smu NULL pointer check
+
+add pp_smu NULL ptr check
+
+Change-Id: Ib810078b1f4ea29d48e197539f3c24bfe7085b75
+Signed-off-by: Charlene Liu <charlene.liu@amd.com>
+Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 1764137..bdb03be 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1421,7 +1421,7 @@ static void notify_display_count_to_smu(
+ * sent as part of pplib_apply_display_requirements.
+ * So just return.
+ */
+- if (!pp_smu->set_display_count)
++ if (!pp_smu || !pp_smu->set_display_count)
+ return;
+
+ display_count = 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5428-drm-amd-display-Add-color-bit-info-to-freesync-infof.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5428-drm-amd-display-Add-color-bit-info-to-freesync-infof.patch
new file mode 100644
index 00000000..d5abf700
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5428-drm-amd-display-Add-color-bit-info-to-freesync-infof.patch
@@ -0,0 +1,331 @@
+From 83ad994fdc8f4891240d3e2af47779335a20f31b Mon Sep 17 00:00:00 2001
+From: SivapiriyanKumarasamy <sivapiriyan.kumarasamy@amd.com>
+Date: Thu, 30 Aug 2018 09:37:22 -0400
+Subject: [PATCH 5428/5725] drm/amd/display: Add color bit info to freesync
+ infoframe
+
+Parse the native color bit and send it to freesync module for future
+use
+
+Change-Id: I75e35194dd1aca45349b6274f85e9cd7472c2363
+Signed-off-by: SivapiriyanKumarasamy <sivapiriyan.kumarasamy@amd.com>
+Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 4 +-
+ .../drm/amd/display/modules/freesync/freesync.c | 164 ++++++++++++++++++---
+ .../gpu/drm/amd/display/modules/inc/mod_freesync.h | 4 +-
+ .../gpu/drm/amd/display/modules/inc/mod_shared.h | 49 ++++++
+ 4 files changed, 199 insertions(+), 22 deletions(-)
+ create mode 100644 drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 0c33419..684bac8 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -5073,7 +5073,9 @@ void set_freesync_on_stream(struct amdgpu_display_manager *dm,
+ mod_freesync_build_vrr_infopacket(dm->freesync_module,
+ new_stream,
+ &vrr,
+- &vrr_infopacket);
++ packet_type_fs1,
++ NULL,
++ &vrr_infopacket);
+
+ new_crtc_state->adjust = vrr.adjust;
+ new_crtc_state->vrr_infopacket = vrr_infopacket;
+diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+index e168890..4018c71 100644
+--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
++++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+@@ -480,22 +480,11 @@ bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync,
+ return false;
+ }
+
+-void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
+- const struct dc_stream_state *stream,
+- const struct mod_vrr_params *vrr,
+- struct dc_info_packet *infopacket)
++static void build_vrr_infopacket_header_v1(enum signal_type signal,
++ struct dc_info_packet *infopacket,
++ unsigned int *payload_size)
+ {
+- /* SPD info packet for FreeSync */
+- unsigned char checksum = 0;
+- unsigned int idx, payload_size = 0;
+-
+- /* Check if Freesync is supported. Return if false. If true,
+- * set the corresponding bit in the info packet
+- */
+- if (!vrr->supported || !vrr->send_vsif)
+- return;
+-
+- if (dc_is_hdmi_signal(stream->signal)) {
++ if (dc_is_hdmi_signal(signal)) {
+
+ /* HEADER */
+
+@@ -510,9 +499,9 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
+ /* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length = 0x08] */
+ infopacket->hb2 = 0x08;
+
+- payload_size = 0x08;
++ *payload_size = 0x08;
+
+- } else if (dc_is_dp_signal(stream->signal)) {
++ } else if (dc_is_dp_signal(signal)) {
+
+ /* HEADER */
+
+@@ -536,9 +525,62 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
+ */
+ infopacket->hb3 = 0x04;
+
+- payload_size = 0x1B;
++ *payload_size = 0x1B;
+ }
++}
++
++static void build_vrr_infopacket_header_v2(enum signal_type signal,
++ struct dc_info_packet *infopacket,
++ unsigned int *payload_size)
++{
++ if (dc_is_hdmi_signal(signal)) {
++
++ /* HEADER */
++
++ /* HB0 = Packet Type = 0x83 (Source Product
++ * Descriptor InfoFrame)
++ */
++ infopacket->hb0 = DC_HDMI_INFOFRAME_TYPE_SPD;
++
++ /* HB1 = Version = 0x02 */
++ infopacket->hb1 = 0x02;
++
++ /* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length = 0x09] */
++ infopacket->hb2 = 0x09;
++
++ *payload_size = 0x0A;
+
++ } else if (dc_is_dp_signal(signal)) {
++
++ /* HEADER */
++
++ /* HB0 = Secondary-data Packet ID = 0 - Only non-zero
++ * when used to associate audio related info packets
++ */
++ infopacket->hb0 = 0x00;
++
++ /* HB1 = Packet Type = 0x83 (Source Product
++ * Descriptor InfoFrame)
++ */
++ infopacket->hb1 = DC_HDMI_INFOFRAME_TYPE_SPD;
++
++ /* HB2 = [Bits 7:0 = Least significant eight bits -
++ * For INFOFRAME, the value must be 1Bh]
++ */
++ infopacket->hb2 = 0x1B;
++
++ /* HB3 = [Bits 7:2 = INFOFRAME SDP Version Number = 0x2]
++ * [Bits 1:0 = Most significant two bits = 0x00]
++ */
++ infopacket->hb3 = 0x08;
++
++ *payload_size = 0x1B;
++ }
++}
++
++static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr,
++ struct dc_info_packet *infopacket)
++{
+ /* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */
+ infopacket->sb[1] = 0x1A;
+
+@@ -576,15 +618,39 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
+ */
+ infopacket->sb[8] = (unsigned char)(vrr->max_refresh_in_uhz / 1000000);
+
+- /* PB9 - PB27 = Reserved */
+
++ //FreeSync HDR
++ infopacket->sb[9] = 0;
++ infopacket->sb[10] = 0;
++}
++
++static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf,
++ struct dc_info_packet *infopacket)
++{
++ if (app_tf != transfer_func_unknown) {
++ infopacket->valid = true;
++
++ infopacket->sb[6] |= 0x08; // PB6 = [Bit 3 = Native Color Active]
++
++ if (app_tf == transfer_func_gamma_22) {
++ infopacket->sb[9] |= 0x04; // PB6 = [Bit 2 = Gamma 2.2 EOTF Active]
++ }
++ }
++}
++
++static void build_vrr_infopacket_checksum(unsigned int *payload_size,
++ struct dc_info_packet *infopacket)
++{
+ /* Calculate checksum */
++ unsigned int idx = 0;
++ unsigned char checksum = 0;
++
+ checksum += infopacket->hb0;
+ checksum += infopacket->hb1;
+ checksum += infopacket->hb2;
+ checksum += infopacket->hb3;
+
+- for (idx = 1; idx <= payload_size; idx++)
++ for (idx = 1; idx <= *payload_size; idx++)
+ checksum += infopacket->sb[idx];
+
+ /* PB0 = Checksum (one byte complement) */
+@@ -593,6 +659,64 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
+ infopacket->valid = true;
+ }
+
++static void build_vrr_infopacket_v1(enum signal_type signal,
++ const struct mod_vrr_params *vrr,
++ struct dc_info_packet *infopacket)
++{
++ /* SPD info packet for FreeSync */
++ unsigned int payload_size = 0;
++
++ build_vrr_infopacket_header_v1(signal, infopacket, &payload_size);
++ build_vrr_infopacket_data(vrr, infopacket);
++ build_vrr_infopacket_checksum(&payload_size, infopacket);
++
++ infopacket->valid = true;
++}
++
++static void build_vrr_infopacket_v2(enum signal_type signal,
++ const struct mod_vrr_params *vrr,
++ const enum color_transfer_func *app_tf,
++ struct dc_info_packet *infopacket)
++{
++ unsigned int payload_size = 0;
++
++ build_vrr_infopacket_header_v2(signal, infopacket, &payload_size);
++ build_vrr_infopacket_data(vrr, infopacket);
++
++ if (app_tf != NULL)
++ build_vrr_infopacket_fs2_data(*app_tf, infopacket);
++
++ build_vrr_infopacket_checksum(&payload_size, infopacket);
++
++ infopacket->valid = true;
++}
++
++void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
++ const struct dc_stream_state *stream,
++ const struct mod_vrr_params *vrr,
++ enum vrr_packet_type packet_type,
++ const enum color_transfer_func *app_tf,
++ struct dc_info_packet *infopacket)
++{
++ /* SPD info packet for FreeSync */
++
++ /* Check if Freesync is supported. Return if false. If true,
++ * set the corresponding bit in the info packet
++ */
++ if (!vrr->supported || !vrr->send_vsif)
++ return;
++
++ switch (packet_type) {
++ case packet_type_fs2:
++ build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket);
++ break;
++ case packet_type_vrr:
++ case packet_type_fs1:
++ default:
++ build_vrr_infopacket_v1(stream->signal, vrr, infopacket);
++ }
++}
++
+ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
+ const struct dc_stream_state *stream,
+ struct mod_freesync_config *in_config,
+diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+index a0f32cd..949a8b6 100644
+--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
++++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+@@ -54,7 +54,7 @@
+ #ifndef MOD_FREESYNC_H_
+ #define MOD_FREESYNC_H_
+
+-#include "dm_services.h"
++#include "mod_shared.h"
+
+ // Access structures
+ struct mod_freesync {
+@@ -144,6 +144,8 @@ void mod_freesync_get_settings(struct mod_freesync *mod_freesync,
+ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
+ const struct dc_stream_state *stream,
+ const struct mod_vrr_params *vrr,
++ enum vrr_packet_type packet_type,
++ const enum color_transfer_func *app_tf,
+ struct dc_info_packet *infopacket);
+
+ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
+diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
+new file mode 100644
+index 0000000..238c431
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
+@@ -0,0 +1,49 @@
++/*
++ * Copyright 2016 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++
++#ifndef MOD_SHARED_H_
++#define MOD_SHARED_H_
++
++enum color_transfer_func {
++ transfer_func_unknown,
++ transfer_func_srgb,
++ transfer_func_bt709,
++ transfer_func_pq2084,
++ transfer_func_pq2084_interim,
++ transfer_func_linear_0_1,
++ transfer_func_linear_0_125,
++ transfer_func_dolbyvision,
++ transfer_func_gamma_22,
++ transfer_func_gamma_26
++};
++
++enum vrr_packet_type {
++ packet_type_vrr,
++ packet_type_fs1,
++ packet_type_fs2
++};
++
++#endif /* MOD_SHARED_H_ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5429-drm-amd-display-program-v_update-and-v_ready-with-pr.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5429-drm-amd-display-program-v_update-and-v_ready-with-pr.patch
new file mode 100644
index 00000000..860266cf
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5429-drm-amd-display-program-v_update-and-v_ready-with-pr.patch
@@ -0,0 +1,97 @@
+From bf53f5dcd7221d4138bdae96c9640477f4c4df03 Mon Sep 17 00:00:00 2001
+From: Su Sung Chung <su.chung@amd.com>
+Date: Fri, 7 Sep 2018 16:51:42 -0400
+Subject: [PATCH 5429/5725] drm/amd/display: program v_update and v_ready with
+ proper field
+
+[WHY]
+There are two different variables used to calculate v_update and v_ready,
+one for validation and the other for performance parameter calculation.
+Before the variable for validation was used which caused underflow on
+1080edp with vsr enabled
+
+[HOW]
+program v_update and v_ready with the variables for performance parameter
+calculation
+
+Change-Id: I23aaa71289fa6739422fb7c789cf63f2cdbdc6b2
+Signed-off-by: Su Sung Chung <su.chung@amd.com>
+Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c | 8 ++++----
+ drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 12 ++++++------
+ drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h | 6 +++---
+ 3 files changed, 13 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
+index 5e2ea12..d0fc54f 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
+@@ -1625,11 +1625,11 @@ void dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performan
+ else {
+ v->dsty_after_scaler = 0.0;
+ }
+- v->v_update_offset_pix =dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0);
++ v->v_update_offset_pix[k] = dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0);
+ v->total_repeater_delay_time = v->max_inter_dcn_tile_repeaters * (2.0 / v->dppclk + 3.0 / v->dispclk);
+- v->v_update_width_pix = (14.0 / v->dcf_clk_deep_sleep + 12.0 / v->dppclk + v->total_repeater_delay_time) * v->pixel_clock[k];
+- v->v_ready_offset_pix =dcn_bw_max2(150.0 / v->dppclk, v->total_repeater_delay_time + 20.0 / v->dcf_clk_deep_sleep + 10.0 / v->dppclk) * v->pixel_clock[k];
+- v->t_setup = (v->v_update_offset_pix + v->v_update_width_pix + v->v_ready_offset_pix) / v->pixel_clock[k];
++ v->v_update_width_pix[k] = (14.0 / v->dcf_clk_deep_sleep + 12.0 / v->dppclk + v->total_repeater_delay_time) * v->pixel_clock[k];
++ v->v_ready_offset_pix[k] = dcn_bw_max2(150.0 / v->dppclk, v->total_repeater_delay_time + 20.0 / v->dcf_clk_deep_sleep + 10.0 / v->dppclk) * v->pixel_clock[k];
++ v->t_setup = (v->v_update_offset_pix[k] + v->v_update_width_pix[k] + v->v_ready_offset_pix[k]) / v->pixel_clock[k];
+ v->v_startup[k] =dcn_bw_min2(v->v_startup_lines, v->max_vstartup_lines[k]);
+ if (v->prefetch_mode == 0.0) {
+ v->t_wait =dcn_bw_max3(v->dram_clock_change_latency + v->urgent_latency, v->sr_enter_plus_exit_time, v->urgent_latency);
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+index 80ec09e..3208188 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+@@ -1096,9 +1096,9 @@ bool dcn_validate_bandwidth(
+ if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
+ continue;
+
+- pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
+- pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
+- pipe->pipe_dlg_param.vready_offset = v->v_ready_offset[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
++ pipe->pipe_dlg_param.vupdate_width = v->v_update_width_pix[input_idx];
++ pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset_pix[input_idx];
++ pipe->pipe_dlg_param.vready_offset = v->v_ready_offset_pix[input_idx];
+ pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx];
+
+ pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total;
+@@ -1137,9 +1137,9 @@ bool dcn_validate_bandwidth(
+ TIMING_3D_FORMAT_SIDE_BY_SIDE))) {
+ if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
+ /* update previously split pipe */
+- hsplit_pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
+- hsplit_pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
+- hsplit_pipe->pipe_dlg_param.vready_offset = v->v_ready_offset[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
++ hsplit_pipe->pipe_dlg_param.vupdate_width = v->v_update_width_pix[input_idx];
++ hsplit_pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset_pix[input_idx];
++ hsplit_pipe->pipe_dlg_param.vready_offset = v->v_ready_offset_pix[input_idx];
+ hsplit_pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx];
+
+ hsplit_pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+index ddbb673..e688eb9 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+@@ -504,10 +504,10 @@ struct dcn_bw_internal_vars {
+ float prefetch_mode;
+ float dstx_after_scaler;
+ float dsty_after_scaler;
+- float v_update_offset_pix;
++ float v_update_offset_pix[number_of_planes_minus_one + 1];
+ float total_repeater_delay_time;
+- float v_update_width_pix;
+- float v_ready_offset_pix;
++ float v_update_width_pix[number_of_planes_minus_one + 1];
++ float v_ready_offset_pix[number_of_planes_minus_one + 1];
+ float t_setup;
+ float t_wait;
+ float bandwidth_available_for_immediate_flip;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5430-drm-amd-display-dc-3.1.67.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5430-drm-amd-display-dc-3.1.67.patch
new file mode 100644
index 00000000..be1166aa
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5430-drm-amd-display-dc-3.1.67.patch
@@ -0,0 +1,29 @@
+From 53dd6a31b6cec1816be0bb1dd1e3452324029b6f Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Mon, 10 Sep 2018 11:30:24 -0400
+Subject: [PATCH 5430/5725] drm/amd/display: dc 3.1.67
+
+Change-Id: Ib8a760749e950259214db6bd11167768058e5975
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Steven Chiu <Steven.Chiu@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index b56afdb..aa4d996 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -38,7 +38,7 @@
+ #include "inc/compressor.h"
+ #include "dml/display_mode_lib.h"
+
+-#define DC_VER "3.1.66"
++#define DC_VER "3.1.67"
+
+ #define MAX_SURFACES 3
+ #define MAX_STREAMS 6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5431-drm-amd-display-Stereo-3D-support-in-VSC.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5431-drm-amd-display-Stereo-3D-support-in-VSC.patch
new file mode 100644
index 00000000..38fa84dc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5431-drm-amd-display-Stereo-3D-support-in-VSC.patch
@@ -0,0 +1,103 @@
+From f18b2e297dc69b3fdc3c4b6a76512b0a584ccc8f Mon Sep 17 00:00:00 2001
+From: Eric Bernstein <eric.bernstein@amd.com>
+Date: Mon, 10 Sep 2018 10:11:01 -0400
+Subject: [PATCH 5431/5725] drm/amd/display: Stereo 3D support in VSC
+
+[Why]
+Need to add strere 3D information in VSC
+
+[How]
+Update mod_build_vsc_infopacket with stereo info
+
+Change-Id: Ie99f9f3259e10dcf37a92192cd08e2a65299cf9d
+Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ .../amd/display/modules/info_packet/info_packet.c | 58 ++++++++++++++++++++--
+ 1 file changed, 54 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+index 52378fc..ff8bfb9 100644
+--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
++++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+@@ -48,9 +48,12 @@ static void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
+ unsigned int i;
+ unsigned int pixelEncoding = 0;
+ unsigned int colorimetryFormat = 0;
++ bool stereo3dSupport = false;
+
+- if (stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE && stream->view_format != VIEW_3D_FORMAT_NONE)
++ if (stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE && stream->view_format != VIEW_3D_FORMAT_NONE) {
+ vscPacketRevision = 1;
++ stereo3dSupport = true;
++ }
+
+ /*VSC packet set to 2 when DP revision >= 1.2*/
+ if (stream->psr_version != 0)
+@@ -94,12 +97,59 @@ static void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
+ info_packet->hb2 = 0x01; // 01h = Revision number. VSC SDP supporting 3D stereo only
+ info_packet->hb3 = 0x01; // 01h = VSC SDP supporting 3D stereo only (HB2 = 01h).
+
+- if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_INBAND_FA)
+- info_packet->sb[0] = 0x1;
+-
+ info_packet->valid = true;
+ }
+
++ if (stereo3dSupport) {
++ /* ==============================================================================================================|
++ * A. STEREO 3D
++ * ==============================================================================================================|
++ * VSC Payload (1 byte) From DP1.2 spec
++ *
++ * Bits 3:0 (Stereo Interface Method Code) | Bits 7:4 (Stereo Interface Method Specific Parameter)
++ * -----------------------------------------------------------------------------------------------------
++ * 0 = Non Stereo Video | Must be set to 0x0
++ * -----------------------------------------------------------------------------------------------------
++ * 1 = Frame/Field Sequential | 0x0: L + R view indication based on MISC1 bit 2:1
++ * | 0x1: Right when Stereo Signal = 1
++ * | 0x2: Left when Stereo Signal = 1
++ * | (others reserved)
++ * -----------------------------------------------------------------------------------------------------
++ * 2 = Stacked Frame | 0x0: Left view is on top and right view on bottom
++ * | (others reserved)
++ * -----------------------------------------------------------------------------------------------------
++ * 3 = Pixel Interleaved | 0x0: horiz interleaved, right view pixels on even lines
++ * | 0x1: horiz interleaved, right view pixels on odd lines
++ * | 0x2: checker board, start with left view pixel
++ * | 0x3: vertical interleaved, start with left view pixels
++ * | 0x4: vertical interleaved, start with right view pixels
++ * | (others reserved)
++ * -----------------------------------------------------------------------------------------------------
++ * 4 = Side-by-side | 0x0: left half represents left eye view
++ * | 0x1: left half represents right eye view
++ */
++ switch (stream->timing.timing_3d_format) {
++ case TIMING_3D_FORMAT_HW_FRAME_PACKING:
++ case TIMING_3D_FORMAT_SW_FRAME_PACKING:
++ case TIMING_3D_FORMAT_TOP_AND_BOTTOM:
++ case TIMING_3D_FORMAT_TB_SW_PACKED:
++ info_packet->sb[0] = 0x02; // Stacked Frame, Left view is on top and right view on bottom.
++ break;
++ case TIMING_3D_FORMAT_DP_HDMI_INBAND_FA:
++ case TIMING_3D_FORMAT_INBAND_FA:
++ info_packet->sb[0] = 0x01; // Frame/Field Sequential, L + R view indication based on MISC1 bit 2:1
++ break;
++ case TIMING_3D_FORMAT_SIDE_BY_SIDE:
++ case TIMING_3D_FORMAT_SBS_SW_PACKED:
++ info_packet->sb[0] = 0x04; // Side-by-side
++ break;
++ default:
++ info_packet->sb[0] = 0x00; // No Stereo Video, Shall be cleared to 0x0.
++ break;
++ }
++
++ }
++
+ /* 05h = VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/Colorimetry Format indication.
+ * Added in DP1.3, a DP Source device is allowed to indicate the pixel encoding/colorimetry
+ * format to the DP Sink device with VSC SDP only when the DP Sink device supports it
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5432-drm-amd-display-Guard-against-null-stream-dereferenc.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5432-drm-amd-display-Guard-against-null-stream-dereferenc.patch
new file mode 100644
index 00000000..960c77bc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5432-drm-amd-display-Guard-against-null-stream-dereferenc.patch
@@ -0,0 +1,61 @@
+From 51970066886573e65ae6cec0c6f3399019709247 Mon Sep 17 00:00:00 2001
+From: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Date: Tue, 11 Sep 2018 14:29:47 -0400
+Subject: [PATCH 5432/5725] drm/amd/display: Guard against null stream
+ dereference in do flip
+
+[Why]
+
+During suspend under some hardware configurations can result in a
+series of atomic commits with a NULL stream status - which
+causes a NULL pointer dereference. This should be guarded.
+
+[How]
+
+Exit early from the function - if we can't access the stream then
+there isn't anything that can be done here.
+
+Change-Id: Ie846cce9eba7098b77cac87122c41a2ae69bf905
+Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 684bac8..9c1654d 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4352,6 +4352,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
+ /* TODO eliminate or rename surface_update */
+ struct dc_surface_update surface_updates[1] = { {0} };
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
++ struct dc_stream_status *stream_status;
+
+
+ /* Prepare wait for target vblank early - before the fence-waits */
+@@ -4409,7 +4410,19 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
+
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+- surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
++ stream_status = dc_stream_get_status(acrtc_state->stream);
++ if (!stream_status) {
++ DRM_ERROR("No stream status for CRTC: id=%d\n",
++ acrtc->crtc_id);
++ return;
++ }
++
++ surface_updates->surface = stream_status->plane_states[0];
++ if (!surface_updates->surface) {
++ DRM_ERROR("No surface for CRTC: id=%d\n",
++ acrtc->crtc_id);
++ return;
++ }
+ surface_updates->flip_addr = &addr;
+
+ dc_commit_updates_for_stream(adev->dm.dc,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5433-drm-amd-display-Remove-mst_hotplug_work.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5433-drm-amd-display-Remove-mst_hotplug_work.patch
new file mode 100644
index 00000000..24f01c26
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5433-drm-amd-display-Remove-mst_hotplug_work.patch
@@ -0,0 +1,65 @@
+From d3d77c923b97c6060dcb709721d92a5df5fcf58f Mon Sep 17 00:00:00 2001
+From: Leo Li <sunpeng.li@amd.com>
+Date: Wed, 12 Sep 2018 10:58:09 -0400
+Subject: [PATCH 5433/5725] drm/amd/display: Remove mst_hotplug_work
+
+[Why]
+The work struct's schedule call was removed a while ago, making this
+useless.
+
+[How]
+Remove it.
+
+Change-Id: I22139bcba4275ff679b2e054847e6241f660c8fc
+Signed-off-by: Leo Li <sunpeng.li@amd.com>
+Reviewed-by: David Francis <David.Francis@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 10 ----------
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 2 --
+ 2 files changed, 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 9c1654d..9fed53f 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -339,14 +339,6 @@ static int dm_set_powergating_state(void *handle,
+ /* Prototypes of private functions */
+ static int dm_early_init(void* handle);
+
+-static void hotplug_notify_work_func(struct work_struct *work)
+-{
+- struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
+- struct drm_device *dev = dm->ddev;
+-
+- drm_kms_helper_hotplug_event(dev);
+-}
+-
+ /* Allocate memory for FBC compressed data */
+ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
+ {
+@@ -448,8 +440,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
+ goto error;
+ }
+
+- INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
+-
+ adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
+ if (!adev->dm.freesync_module) {
+ DRM_ERROR(
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+index 2f94317..ba0182b 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+@@ -132,8 +132,6 @@ struct amdgpu_display_manager {
+
+ const struct dc_link *backlight_link;
+
+- struct work_struct mst_hotplug_work;
+-
+ struct mod_freesync *freesync_module;
+
+ /**
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5434-drm-amd-display-fix-gamma-not-being-applied.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5434-drm-amd-display-fix-gamma-not-being-applied.patch
new file mode 100644
index 00000000..c0411940
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5434-drm-amd-display-fix-gamma-not-being-applied.patch
@@ -0,0 +1,81 @@
+From 8aacd6af0e45ec33a1b85359af8b1138b6cf0afe Mon Sep 17 00:00:00 2001
+From: SivapiriyanKumarasamy <sivapiriyan.kumarasamy@amd.com>
+Date: Wed, 12 Sep 2018 14:15:42 -0400
+Subject: [PATCH 5434/5725] drm/amd/display: fix gamma not being applied
+
+[WHY]
+Previously night light forced a full update by
+applying a transfer function update regardless of if it was changed.
+This logic was removed,
+
+Now gamma surface updates are only applied when there is also a plane
+info update, this does not work in cases such as using the night light
+slider.
+
+[HOW]
+When moving the night light slider we will perform a full update if
+the gamma has changed and there is a surface, even when the surface
+has not changed. Also get stream updates in setgamma prior to
+update planes and stream.
+
+Change-Id: I6d960892fb981b7b94919c3060b891f7ce1d091c
+Signed-off-by: SivapiriyanKumarasamy <sivapiriyan.kumarasamy@amd.com>
+Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 19 ++++++++++++++-----
+ 1 file changed, 14 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index bdb03be..dd8babd 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1202,9 +1202,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
+ */
+ update_flags->bits.bpp_change = 1;
+
+- if (u->gamma && dce_use_lut(u->plane_info->format))
+- update_flags->bits.gamma_change = 1;
+-
+ if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
+ sizeof(union dc_tiling_info)) != 0) {
+ update_flags->bits.swizzle_change = 1;
+@@ -1221,7 +1218,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
+ if (update_flags->bits.rotation_change
+ || update_flags->bits.stereo_format_change
+ || update_flags->bits.pixel_format_change
+- || update_flags->bits.gamma_change
+ || update_flags->bits.bpp_change
+ || update_flags->bits.bandwidth_change
+ || update_flags->bits.output_tf_change)
+@@ -1311,13 +1307,26 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
+ if (u->coeff_reduction_factor)
+ update_flags->bits.coeff_reduction_change = 1;
+
++ if (u->gamma) {
++ enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
++
++ if (u->plane_info)
++ format = u->plane_info->format;
++ else if (u->surface)
++ format = u->surface->format;
++
++ if (dce_use_lut(format))
++ update_flags->bits.gamma_change = 1;
++ }
++
+ if (update_flags->bits.in_transfer_func_change) {
+ type = UPDATE_TYPE_MED;
+ elevate_update_type(&overall_type, type);
+ }
+
+ if (update_flags->bits.input_csc_change
+- || update_flags->bits.coeff_reduction_change) {
++ || update_flags->bits.coeff_reduction_change
++ || update_flags->bits.gamma_change) {
+ type = UPDATE_TYPE_FULL;
+ elevate_update_type(&overall_type, type);
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5435-drm-amd-display-Raise-dispclk-value-for-dce120-by-15.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5435-drm-amd-display-Raise-dispclk-value-for-dce120-by-15.patch
new file mode 100644
index 00000000..0043f617
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5435-drm-amd-display-Raise-dispclk-value-for-dce120-by-15.patch
@@ -0,0 +1,48 @@
+From 106310ab4e5a8993c7019461f26c141e40aa8611 Mon Sep 17 00:00:00 2001
+From: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Date: Wed, 12 Sep 2018 08:55:42 -0400
+Subject: [PATCH 5435/5725] drm/amd/display: Raise dispclk value for dce120 by
+ 15%
+
+[Why]
+
+The DISPCLK value was previously requested to be 15% higher for all
+ASICs that went through the dce110 bandwidth code path. As part of a
+refactoring of dce_clocks and the dce110 set bandwidth codepath this
+was removed for power saving considerations.
+
+That change caused display corruption under certain hardware
+configurations with Vega10.
+
+[How]
+
+The 15% DISPCLK increase is brought back but only on dce110 for now.
+This is should be a temporary workaround until the root cause is sorted
+out for why this occurs on Vega (or other ASICs, if reported).
+
+Change-Id: If3fe9d13baa3a430dd3b40bd6813f741ae451ddb
+Tested-by: Nick Sarnie <sarnex@gentoo.org>
+Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index d52dead..aa6bd41 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -462,6 +462,9 @@ static void dce12_update_clocks(struct dccg *dccg,
+ {
+ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+
++ /* TODO: Investigate why this is needed to fix display corruption. */
++ new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100;
++
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5436-drm-amdgpu-powerplay-add-get_argument-callback-for-v.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5436-drm-amdgpu-powerplay-add-get_argument-callback-for-v.patch
new file mode 100644
index 00000000..7e21cdfa
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5436-drm-amdgpu-powerplay-add-get_argument-callback-for-v.patch
@@ -0,0 +1,185 @@
+From 8b5e5d452bfab04a3af15ad9fe6db62e108528d8 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 20 Sep 2018 20:33:08 -0500
+Subject: [PATCH 5436/5725] drm/amdgpu/powerplay: add get_argument callback for
+ vega20
+
+For consistency with other vega parts.
+
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 18 ++++++++---------
+ .../gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c | 5 +----
+ .../gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c | 23 +++++-----------------
+ .../gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h | 1 -
+ 4 files changed, 15 insertions(+), 32 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index d45cbfe..7825c6a 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -461,7 +461,7 @@ static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
+ "[GetNumOfDpmLevel] failed to get dpm levels!",
+ return ret);
+
+- vega20_read_arg_from_smc(hwmgr, num_of_levels);
++ *num_of_levels = smum_get_argument(hwmgr);
+ PP_ASSERT_WITH_CODE(*num_of_levels > 0,
+ "[GetNumOfDpmLevel] number of clk levels is invalid!",
+ return -EINVAL);
+@@ -481,7 +481,7 @@ static int vega20_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
+ "[GetDpmFreqByIndex] failed to get dpm freq by index!",
+ return ret);
+
+- vega20_read_arg_from_smc(hwmgr, clk);
++ *clk = smum_get_argument(hwmgr);
+ PP_ASSERT_WITH_CODE(*clk,
+ "[GetDpmFreqByIndex] clk value is invalid!",
+ return -EINVAL);
+@@ -1044,7 +1044,7 @@ static int vega20_od8_get_gfx_clock_base_voltage(
+ "[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!",
+ return ret);
+
+- vega20_read_arg_from_smc(hwmgr, voltage);
++ *voltage = smum_get_argument(hwmgr);
+ *voltage = *voltage / VOLTAGE_SCALE;
+
+ return 0;
+@@ -1401,7 +1401,7 @@ static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr,
+ (clock_select << 16))) == 0,
+ "[GetMaxSustainableClock] Failed to get max DC clock from SMC!",
+ return ret);
+- vega20_read_arg_from_smc(hwmgr, clock);
++ *clock = smum_get_argument(hwmgr);
+
+ /* if DC limit is zero, return AC limit */
+ if (*clock == 0) {
+@@ -1410,7 +1410,7 @@ static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr,
+ (clock_select << 16))) == 0,
+ "[GetMaxSustainableClock] failed to get max AC clock from SMC!",
+ return ret);
+- vega20_read_arg_from_smc(hwmgr, clock);
++ *clock = smum_get_argument(hwmgr);
+ }
+
+ return 0;
+@@ -1770,14 +1770,14 @@ static int vega20_get_clock_ranges(struct pp_hwmgr *hwmgr,
+ PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16))) == 0,
+ "[GetClockRanges] Failed to get max clock from SMC!",
+ return ret);
+- vega20_read_arg_from_smc(hwmgr, clock);
++ *clock = smum_get_argument(hwmgr);
+ } else {
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_GetMinDpmFreq,
+ (clock_select << 16))) == 0,
+ "[GetClockRanges] Failed to get min clock from SMC!",
+ return ret);
+- vega20_read_arg_from_smc(hwmgr, clock);
++ *clock = smum_get_argument(hwmgr);
+ }
+
+ return 0;
+@@ -1862,7 +1862,7 @@ static int vega20_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx
+ PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16))) == 0,
+ "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
+ return ret);
+- vega20_read_arg_from_smc(hwmgr, &gfx_clk);
++ gfx_clk = smum_get_argument(hwmgr);
+
+ *gfx_freq = gfx_clk * 100;
+
+@@ -1880,7 +1880,7 @@ static int vega20_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_f
+ PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16))) == 0,
+ "[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
+ return ret);
+- vega20_read_arg_from_smc(hwmgr, &mem_clk);
++ mem_clk = smum_get_argument(hwmgr);
+
+ *mclk_freq = mem_clk * 100;
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
+index 2984ddd5..1c951a5 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
+@@ -37,10 +37,7 @@ static int vega20_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
+ PPSMC_MSG_GetCurrentRpm)) == 0,
+ "Attempt to get current RPM from SMC Failed!",
+ return ret);
+- PP_ASSERT_WITH_CODE((ret = vega20_read_arg_from_smc(hwmgr,
+- current_rpm)) == 0,
+- "Attempt to read current RPM from SMC Failed!",
+- return ret);
++ *current_rpm = smum_get_argument(hwmgr);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+index fe7f710..52438f5 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+@@ -148,19 +148,11 @@ static int vega20_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+ return (ret == PPSMC_Result_OK) ? 0 : -EIO;
+ }
+
+-/*
+- * Retrieve an argument from SMC.
+- * @param hwmgr the address of the powerplay hardware manager.
+- * @param arg pointer to store the argument from SMC.
+- * @return Always return 0.
+- */
+-int vega20_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
++static uint32_t vega20_get_argument(struct pp_hwmgr *hwmgr)
+ {
+ struct amdgpu_device *adev = hwmgr->adev;
+
+- *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+-
+- return 0;
++ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ }
+
+ /*
+@@ -345,18 +337,12 @@ int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
+ PPSMC_MSG_GetEnabledSmuFeaturesLow)) == 0,
+ "[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!",
+ return ret);
+- PP_ASSERT_WITH_CODE((ret = vega20_read_arg_from_smc(hwmgr,
+- &smc_features_low)) == 0,
+- "[GetEnabledSMCFeatures] Attemp to read SMU features Low argument failed!",
+- return ret);
++ smc_features_low = vega20_get_argument(hwmgr);
+ PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetEnabledSmuFeaturesHigh)) == 0,
+ "[GetEnabledSMCFeatures] Attemp to get SMU features High failed!",
+ return ret);
+- PP_ASSERT_WITH_CODE((ret = vega20_read_arg_from_smc(hwmgr,
+- &smc_features_high)) == 0,
+- "[GetEnabledSMCFeatures] Attemp to read SMU features High argument failed!",
+- return ret);
++ smc_features_high = vega20_get_argument(hwmgr);
+
+ *features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
+ (((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
+@@ -584,4 +570,5 @@ const struct pp_smumgr_func vega20_smu_funcs = {
+ .download_pptable_settings = NULL,
+ .upload_pptable_settings = NULL,
+ .is_dpm_running = vega20_is_dpm_running,
++ .get_argument = vega20_get_argument,
+ };
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
+index 505eb0d..fd17601 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
+@@ -47,7 +47,6 @@ struct vega20_smumgr {
+ #define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000
+ #define SMU_FEATURES_HIGH_SHIFT 32
+
+-int vega20_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg);
+ int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
+ uint8_t *table, int16_t table_id);
+ int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5437-drm-amdgpu-powerplay-Move-vega10_enable_smc_features.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5437-drm-amdgpu-powerplay-Move-vega10_enable_smc_features.patch
new file mode 100644
index 00000000..d6e3024a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5437-drm-amdgpu-powerplay-Move-vega10_enable_smc_features.patch
@@ -0,0 +1,121 @@
+From 51ed098c787864f247f5b9769a33822a0aa3a196 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 20 Sep 2018 20:50:54 -0500
+Subject: [PATCH 5437/5725] drm/amdgpu/powerplay: Move
+ vega10_enable_smc_features
+
+to vega10_smumgr.c. For consistency with other vega parts.
+
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 11 +----------
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h | 2 --
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c | 1 +
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c | 1 +
+ drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c | 10 ++++++++++
+ drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h | 2 ++
+ 6 files changed, 15 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+index 25397a3..6246d2c 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+@@ -39,6 +39,7 @@
+ #include "soc15_common.h"
+ #include "pppcielanes.h"
+ #include "vega10_hwmgr.h"
++#include "vega10_smumgr.h"
+ #include "vega10_processpptables.h"
+ #include "vega10_pptable.h"
+ #include "vega10_thermal.h"
+@@ -4937,16 +4938,6 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
+ .get_performance_level = vega10_get_performance_level,
+ };
+
+-int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
+- bool enable, uint32_t feature_mask)
+-{
+- int msg = enable ? PPSMC_MSG_EnableSmuFeatures :
+- PPSMC_MSG_DisableSmuFeatures;
+-
+- return smum_send_msg_to_smc_with_parameter(hwmgr,
+- msg, feature_mask);
+-}
+-
+ int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
+ {
+ hwmgr->hwmgr_func = &vega10_hwmgr_funcs;
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
+index 339820d..8987055 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
+@@ -441,7 +441,5 @@ int vega10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
+ int vega10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
+ int vega10_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate);
+ int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
+-int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
+- bool enable, uint32_t feature_mask);
+
+ #endif /* _VEGA10_HWMGR_H_ */
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+index 2236487..2d88abf 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+@@ -23,6 +23,7 @@
+
+ #include "hwmgr.h"
+ #include "vega10_hwmgr.h"
++#include "vega10_smumgr.h"
+ #include "vega10_powertune.h"
+ #include "vega10_ppsmc.h"
+ #include "vega10_inc.h"
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+index aa044c1..407762b 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+@@ -23,6 +23,7 @@
+
+ #include "vega10_thermal.h"
+ #include "vega10_hwmgr.h"
++#include "vega10_smumgr.h"
+ #include "vega10_ppsmc.h"
+ #include "vega10_inc.h"
+ #include "soc15_common.h"
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+index 5d19115..8176d33 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+@@ -88,6 +88,16 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
+ return 0;
+ }
+
++int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
++ bool enable, uint32_t feature_mask)
++{
++ int msg = enable ? PPSMC_MSG_EnableSmuFeatures :
++ PPSMC_MSG_DisableSmuFeatures;
++
++ return smum_send_msg_to_smc_with_parameter(hwmgr,
++ msg, feature_mask);
++}
++
+ static int vega10_get_smc_features(struct pp_hwmgr *hwmgr,
+ uint32_t *features_enabled)
+ {
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
+index 424e868..630c0ae 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
+@@ -42,6 +42,8 @@ struct vega10_smumgr {
+ struct smu_table_array smu_tables;
+ };
+
++int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
++ bool enable, uint32_t feature_mask);
+
+ #endif
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5438-drm-amdgpu-powerplay-add-smu-smc_table_manager-callb.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5438-drm-amdgpu-powerplay-add-smu-smc_table_manager-callb.patch
new file mode 100644
index 00000000..341741a9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5438-drm-amdgpu-powerplay-add-smu-smc_table_manager-callb.patch
@@ -0,0 +1,112 @@
+From 56b4a3866651d8e4a4a95e2ac386c9c21fb368fc Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 20 Sep 2018 21:15:39 -0500
+Subject: [PATCH 5438/5725] drm/amdgpu/powerplay: add smu smc_table_manager
+ callback for vega12
+
+For consistency with other asics.
+
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 8 ++++----
+ .../gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c | 22 ++++++++++++++++++----
+ .../gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h | 4 ----
+ 3 files changed, 22 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index 0789d64..de81abf 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -745,8 +745,8 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
+
+ memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
+
+- result = vega12_copy_table_to_smc(hwmgr,
+- (uint8_t *)pp_table, TABLE_PPTABLE);
++ result = smum_smc_table_manager(hwmgr,
++ (uint8_t *)pp_table, TABLE_PPTABLE, false);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to upload PPtable!", return result);
+
+@@ -2103,8 +2103,8 @@ static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+
+ if ((data->water_marks_bitmap & WaterMarksExist) &&
+ !(data->water_marks_bitmap & WaterMarksLoaded)) {
+- result = vega12_copy_table_to_smc(hwmgr,
+- (uint8_t *)wm_table, TABLE_WATERMARKS);
++ result = smum_smc_table_manager(hwmgr,
++ (uint8_t *)wm_table, TABLE_WATERMARKS, false);
+ PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
+ data->water_marks_bitmap |= WaterMarksLoaded;
+ }
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+index 7f0e210..ddb8015 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+@@ -37,8 +37,8 @@
+ * @param hwmgr the address of the HW manager
+ * @param table_id the driver's table ID to copy from
+ */
+-int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
+- uint8_t *table, int16_t table_id)
++static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
++ uint8_t *table, int16_t table_id)
+ {
+ struct vega12_smumgr *priv =
+ (struct vega12_smumgr *)(hwmgr->smu_backend);
+@@ -75,8 +75,8 @@ int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
+ * @param hwmgr the address of the HW manager
+ * @param table_id the table to copy from
+ */
+-int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
+- uint8_t *table, int16_t table_id)
++static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
++ uint8_t *table, int16_t table_id)
+ {
+ struct vega12_smumgr *priv =
+ (struct vega12_smumgr *)(hwmgr->smu_backend);
+@@ -351,6 +351,19 @@ static int vega12_start_smu(struct pp_hwmgr *hwmgr)
+ return 0;
+ }
+
++static int vega12_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table,
++ uint16_t table_id, bool rw)
++{
++ int ret;
++
++ if (rw)
++ ret = vega12_copy_table_from_smc(hwmgr, table, table_id);
++ else
++ ret = vega12_copy_table_to_smc(hwmgr, table, table_id);
++
++ return ret;
++}
++
+ const struct pp_smumgr_func vega12_smu_funcs = {
+ .smu_init = &vega12_smu_init,
+ .smu_fini = &vega12_smu_fini,
+@@ -362,4 +375,5 @@ const struct pp_smumgr_func vega12_smu_funcs = {
+ .upload_pptable_settings = NULL,
+ .is_dpm_running = vega12_is_dpm_running,
+ .get_argument = smu9_get_argument,
++ .smc_table_manager = vega12_smc_table_manager,
+ };
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h
+index b285cbc..aeec965 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h
+@@ -48,10 +48,6 @@ struct vega12_smumgr {
+ #define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000
+ #define SMU_FEATURES_HIGH_SHIFT 32
+
+-int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
+- uint8_t *table, int16_t table_id);
+-int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
+- uint8_t *table, int16_t table_id);
+ int vega12_enable_smc_features(struct pp_hwmgr *hwmgr,
+ bool enable, uint64_t feature_mask);
+ int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5439-drm-amdgpu-powerplay-add-smu-smc_table_manager-callb.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5439-drm-amdgpu-powerplay-add-smu-smc_table_manager-callb.patch
new file mode 100644
index 00000000..553be2d2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5439-drm-amdgpu-powerplay-add-smu-smc_table_manager-callb.patch
@@ -0,0 +1,191 @@
+From 773f9cfb05f9d96cfffc2b2b4706e3446a57855a Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 20 Sep 2018 21:17:17 -0500
+Subject: [PATCH 5439/5725] drm/amdgpu/powerplay: add smu smc_table_manager
+ callback for vega20
+
+For consistency with other asics.
+
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 32 +++++++++++-----------
+ .../gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c | 22 ++++++++++++---
+ .../gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h | 4 ---
+ 3 files changed, 34 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index 7825c6a..260e0e4 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -743,8 +743,8 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
+
+ memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
+
+- result = vega20_copy_table_to_smc(hwmgr,
+- (uint8_t *)pp_table, TABLE_PPTABLE);
++ result = smum_smc_table_manager(hwmgr,
++ (uint8_t *)pp_table, TABLE_PPTABLE, false);
+ PP_ASSERT_WITH_CODE(!result,
+ "[InitSMCTable] Failed to upload PPtable!",
+ return result);
+@@ -1067,7 +1067,7 @@ static int vega20_od8_initialize_default_settings(
+ vega20_od8_set_feature_id(hwmgr);
+
+ /* Set default values */
+- ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE);
++ ret = smum_smc_table_manager(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE, true);
+ PP_ASSERT_WITH_CODE(!ret,
+ "Failed to export over drive table!",
+ return ret);
+@@ -1195,7 +1195,7 @@ static int vega20_od8_initialize_default_settings(
+ }
+ }
+
+- ret = vega20_copy_table_to_smc(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE);
++ ret = smum_smc_table_manager(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE, false);
+ PP_ASSERT_WITH_CODE(!ret,
+ "Failed to import over drive table!",
+ return ret);
+@@ -1214,7 +1214,7 @@ static int vega20_od8_set_settings(
+ struct vega20_od8_single_setting *od8_settings =
+ data->od8_settings.od8_settings_array;
+
+- ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE);
++ ret = smum_smc_table_manager(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE, true);
+ PP_ASSERT_WITH_CODE(!ret,
+ "Failed to export over drive table!",
+ return ret);
+@@ -1271,7 +1271,7 @@ static int vega20_od8_set_settings(
+ break;
+ }
+
+- ret = vega20_copy_table_to_smc(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE);
++ ret = smum_smc_table_manager(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE, false);
+ PP_ASSERT_WITH_CODE(!ret,
+ "Failed to import over drive table!",
+ return ret);
+@@ -1841,7 +1841,7 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
+ int ret = 0;
+ SmuMetrics_t metrics_table;
+
+- ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)&metrics_table, TABLE_SMU_METRICS);
++ ret = smum_smc_table_manager(hwmgr, (uint8_t *)&metrics_table, TABLE_SMU_METRICS, true);
+ PP_ASSERT_WITH_CODE(!ret,
+ "Failed to export SMU METRICS table!",
+ return ret);
+@@ -1893,7 +1893,7 @@ static int vega20_get_current_activity_percent(struct pp_hwmgr *hwmgr,
+ int ret = 0;
+ SmuMetrics_t metrics_table;
+
+- ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)&metrics_table, TABLE_SMU_METRICS);
++ ret = smum_smc_table_manager(hwmgr, (uint8_t *)&metrics_table, TABLE_SMU_METRICS, true);
+ PP_ASSERT_WITH_CODE(!ret,
+ "Failed to export SMU METRICS table!",
+ return ret);
+@@ -2612,18 +2612,18 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
+ data->gfxclk_overdrive = false;
+ data->memclk_overdrive = false;
+
+- ret = vega20_copy_table_from_smc(hwmgr,
+- (uint8_t *)od_table,
+- TABLE_OVERDRIVE);
++ ret = smum_smc_table_manager(hwmgr,
++ (uint8_t *)od_table,
++ TABLE_OVERDRIVE, true);
+ PP_ASSERT_WITH_CODE(!ret,
+ "Failed to export overdrive table!",
+ return ret);
+ break;
+
+ case PP_OD_COMMIT_DPM_TABLE:
+- ret = vega20_copy_table_to_smc(hwmgr,
+- (uint8_t *)od_table,
+- TABLE_OVERDRIVE);
++ ret = smum_smc_table_manager(hwmgr,
++ (uint8_t *)od_table,
++ TABLE_OVERDRIVE, false);
+ PP_ASSERT_WITH_CODE(!ret,
+ "Failed to import overdrive table!",
+ return ret);
+@@ -2847,8 +2847,8 @@ static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+
+ if ((data->water_marks_bitmap & WaterMarksExist) &&
+ !(data->water_marks_bitmap & WaterMarksLoaded)) {
+- result = vega20_copy_table_to_smc(hwmgr,
+- (uint8_t *)wm_table, TABLE_WATERMARKS);
++ result = smum_smc_table_manager(hwmgr,
++ (uint8_t *)wm_table, TABLE_WATERMARKS, false);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to update WMTABLE!",
+ return result);
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+index 52438f5..b7ff7d4 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+@@ -160,8 +160,8 @@ static uint32_t vega20_get_argument(struct pp_hwmgr *hwmgr)
+ * @param hwmgr the address of the HW manager
+ * @param table_id the driver's table ID to copy from
+ */
+-int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
+- uint8_t *table, int16_t table_id)
++static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
++ uint8_t *table, int16_t table_id)
+ {
+ struct vega20_smumgr *priv =
+ (struct vega20_smumgr *)(hwmgr->smu_backend);
+@@ -200,8 +200,8 @@ int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
+ * @param hwmgr the address of the HW manager
+ * @param table_id the table to copy from
+ */
+-int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
+- uint8_t *table, int16_t table_id)
++static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
++ uint8_t *table, int16_t table_id)
+ {
+ struct vega20_smumgr *priv =
+ (struct vega20_smumgr *)(hwmgr->smu_backend);
+@@ -560,6 +560,19 @@ static bool vega20_is_dpm_running(struct pp_hwmgr *hwmgr)
+ return false;
+ }
+
++static int vega20_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table,
++ uint16_t table_id, bool rw)
++{
++ int ret;
++
++ if (rw)
++ ret = vega20_copy_table_from_smc(hwmgr, table, table_id);
++ else
++ ret = vega20_copy_table_to_smc(hwmgr, table, table_id);
++
++ return ret;
++}
++
+ const struct pp_smumgr_func vega20_smu_funcs = {
+ .smu_init = &vega20_smu_init,
+ .smu_fini = &vega20_smu_fini,
+@@ -571,4 +584,5 @@ const struct pp_smumgr_func vega20_smu_funcs = {
+ .upload_pptable_settings = NULL,
+ .is_dpm_running = vega20_is_dpm_running,
+ .get_argument = vega20_get_argument,
++ .smc_table_manager = vega20_smc_table_manager,
+ };
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
+index fd17601..77349c3 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
+@@ -47,10 +47,6 @@ struct vega20_smumgr {
+ #define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000
+ #define SMU_FEATURES_HIGH_SHIFT 32
+
+-int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
+- uint8_t *table, int16_t table_id);
+-int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
+- uint8_t *table, int16_t table_id);
+ int vega20_enable_smc_features(struct pp_hwmgr *hwmgr,
+ bool enable, uint64_t feature_mask);
+ int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5440-drm-amdgpu-add-new-AMDGPU_PP_SENSOR_ENABLED_SMC_FEAT.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5440-drm-amdgpu-add-new-AMDGPU_PP_SENSOR_ENABLED_SMC_FEAT.patch
new file mode 100644
index 00000000..bf7505f7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5440-drm-amdgpu-add-new-AMDGPU_PP_SENSOR_ENABLED_SMC_FEAT.patch
@@ -0,0 +1,29 @@
+From cb42c984af0dcd5dabdd62ded6f8d6e6e6c981dc Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 20 Sep 2018 22:16:45 -0500
+Subject: [PATCH 5440/5725] drm/amdgpu: add new
+ AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK sensor
+
+For getting the 64 bit enabled smc feature mask from vega parts.
+
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/include/kgd_pp_interface.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+index 448dee4..bd74045 100644
+--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+@@ -113,6 +113,7 @@ enum amd_pp_sensors {
+ AMDGPU_PP_SENSOR_GPU_POWER,
+ AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
+ AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
++ AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK,
+ };
+
+ enum amd_pp_task {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5441-drm-amdgpu-implement-ENABLED_SMC_FEATURES_MASK-senso.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5441-drm-amdgpu-implement-ENABLED_SMC_FEATURES_MASK-senso.patch
new file mode 100644
index 00000000..62b6da2a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5441-drm-amdgpu-implement-ENABLED_SMC_FEATURES_MASK-senso.patch
@@ -0,0 +1,75 @@
+From 88c0d1440d2e6c94ba36bc992251e15e9503fd59 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 20 Sep 2018 22:34:42 -0500
+Subject: [PATCH 5441/5725] drm/amdgpu: implement ENABLED_SMC_FEATURES_MASK
+ sensor for vega10
+
+So we can query what features are enabled for debugging.
+
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 5 +++++
+ drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c | 8 ++++----
+ drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h | 2 ++
+ 3 files changed, 11 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+index 6246d2c..2471170 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+@@ -3713,6 +3713,11 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
+ SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT;
+ *((uint32_t *)value) = (uint32_t)convert_to_vddc((uint8_t)val_vid);
+ return 0;
++ case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
++ ret = vega10_get_enabled_smc_features(hwmgr, (uint64_t *)value);
++ if (!ret)
++ *size = 8;
++ break;
+ default:
+ ret = -EINVAL;
+ break;
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+index 8176d33..c81acc3 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+@@ -98,8 +98,8 @@ int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
+ msg, feature_mask);
+ }
+
+-static int vega10_get_smc_features(struct pp_hwmgr *hwmgr,
+- uint32_t *features_enabled)
++int vega10_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
++ uint64_t *features_enabled)
+ {
+ if (features_enabled == NULL)
+ return -EINVAL;
+@@ -112,9 +112,9 @@ static int vega10_get_smc_features(struct pp_hwmgr *hwmgr,
+
+ static bool vega10_is_dpm_running(struct pp_hwmgr *hwmgr)
+ {
+- uint32_t features_enabled = 0;
++ uint64_t features_enabled = 0;
+
+- vega10_get_smc_features(hwmgr, &features_enabled);
++ vega10_get_enabled_smc_features(hwmgr, &features_enabled);
+
+ if (features_enabled & SMC_DPM_FEATURES)
+ return true;
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
+index 630c0ae..bad760f 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
+@@ -44,6 +44,8 @@ struct vega10_smumgr {
+
+ int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
+ bool enable, uint32_t feature_mask);
++int vega10_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
++ uint64_t *features_enabled);
+
+ #endif
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5442-drm-amdgpu-implement-ENABLED_SMC_FEATURES_MASK-senso.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5442-drm-amdgpu-implement-ENABLED_SMC_FEATURES_MASK-senso.patch
new file mode 100644
index 00000000..775f9095
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5442-drm-amdgpu-implement-ENABLED_SMC_FEATURES_MASK-senso.patch
@@ -0,0 +1,34 @@
+From d6fd029101c4f24fcf65e2aa897d659db265ea0c Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 20 Sep 2018 22:36:23 -0500
+Subject: [PATCH 5442/5725] drm/amdgpu: implement ENABLED_SMC_FEATURES_MASK
+ sensor for vega12
+
+So we can query what features are enabled for debugging.
+
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index de81abf..9600e2f 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -1317,7 +1317,11 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx,
+ break;
+ case AMDGPU_PP_SENSOR_GPU_POWER:
+ ret = vega12_get_gpu_power(hwmgr, (uint32_t *)value);
+-
++ break;
++ case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
++ ret = vega12_get_enabled_smc_features(hwmgr, (uint64_t *)value);
++ if (!ret)
++ *size = 8;
+ break;
+ default:
+ ret = -EINVAL;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5443-drm-amdgpu-implement-ENABLED_SMC_FEATURES_MASK-senso.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5443-drm-amdgpu-implement-ENABLED_SMC_FEATURES_MASK-senso.patch
new file mode 100644
index 00000000..935ef13e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5443-drm-amdgpu-implement-ENABLED_SMC_FEATURES_MASK-senso.patch
@@ -0,0 +1,33 @@
+From 3dc8e34ec7a31e9d5b62a6ca2cd79acd84f689b1 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 20 Sep 2018 22:36:38 -0500
+Subject: [PATCH 5443/5725] drm/amdgpu: implement ENABLED_SMC_FEATURES_MASK
+ sensor for vega20
+
+So we can query what features are enabled for debugging.
+
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index 260e0e4..2a554f9 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -1941,6 +1941,11 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
+ *size = 16;
+ ret = vega20_get_gpu_power(hwmgr, (uint32_t *)value);
+ break;
++ case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
++ ret = vega20_get_enabled_smc_features(hwmgr, (uint64_t *)value);
++ if (!ret)
++ *size = 8;
++ break;
+ default:
+ ret = -EINVAL;
+ break;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5444-drm-amdgpu-print-smc-feature-mask-in-debugfs-amdgpu_.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5444-drm-amdgpu-print-smc-feature-mask-in-debugfs-amdgpu_.patch
new file mode 100644
index 00000000..24885b98
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5444-drm-amdgpu-print-smc-feature-mask-in-debugfs-amdgpu_.patch
@@ -0,0 +1,40 @@
+From 26c0a0dff042d83beab601e0a70a525d493cd0a0 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 20 Sep 2018 22:50:07 -0500
+Subject: [PATCH 5444/5725] drm/amdgpu: print smc feature mask in debugfs
+ amdgpu_pm_info
+
+Print the enabled smc feature mask in amdgpu_pm_info for debugging.
+
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index 9c5036f..accf51e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -1975,6 +1975,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
+ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
+ {
+ uint32_t value;
++ uint64_t value64;
+ uint32_t query = 0;
+ int size;
+
+@@ -2013,6 +2014,10 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
+ seq_printf(m, "GPU Load: %u %%\n", value);
+ seq_printf(m, "\n");
+
++ /* SMC feature mask */
++ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
++ seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
++
+ /* UVD clocks */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
+ if (!value) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5445-drm-amd-display-remove-redundant-null-pointer-check-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5445-drm-amd-display-remove-redundant-null-pointer-check-.patch
new file mode 100644
index 00000000..d25f9e9a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5445-drm-amd-display-remove-redundant-null-pointer-check-.patch
@@ -0,0 +1,37 @@
+From 771455fda938b6dee45a33ca7067da4455547b16 Mon Sep 17 00:00:00 2001
+From: zhong jiang <zhongjiang@huawei.com>
+Date: Fri, 21 Sep 2018 21:12:11 +0800
+Subject: [PATCH 5445/5725] drm/amd/display: remove redundant null pointer
+ check before kfree
+
+kfree has taken the null pointer into account. hence it is safe
+to remove the redundant null pointer check before kfree.
+
+Signed-off-by: zhong jiang <zhongjiang@huawei.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/modules/stats/stats.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
+index 480eb2c..deadf51 100644
+--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
++++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
+@@ -188,12 +188,8 @@ void mod_stats_destroy(struct mod_stats *mod_stats)
+ if (mod_stats != NULL) {
+ struct core_stats *core_stats = MOD_STATS_TO_CORE(mod_stats);
+
+- if (core_stats->time != NULL)
+- kfree(core_stats->time);
+-
+- if (core_stats->events != NULL)
+- kfree(core_stats->events);
+-
++ kfree(core_stats->time);
++ kfree(core_stats->events);
+ kfree(core_stats);
+ }
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5446-drm-amdgpu-Add-warning-message-for-INT-SW-fallback.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5446-drm-amdgpu-Add-warning-message-for-INT-SW-fallback.patch
new file mode 100644
index 00000000..d7c025b9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5446-drm-amdgpu-Add-warning-message-for-INT-SW-fallback.patch
@@ -0,0 +1,28 @@
+From a994484a4dfadb035e215fb96c5b3f1051b23c07 Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Fri, 21 Sep 2018 15:41:52 -0400
+Subject: [PATCH 5446/5725] drm/amdgpu: Add warning message for INT SW
+ fallback.
+
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+index e7f6389..a9b4684 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+@@ -274,6 +274,7 @@ static void amdgpu_fence_fallback(struct timer_list *t)
+ struct amdgpu_ring *ring = from_timer(ring, t,
+ fence_drv.fallback_timer);
+
++ DRM_INFO("Fallback to SW interrupt on ring %s due to HW interrupt time out", ring->name);
+ amdgpu_fence_process(ring);
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5447-drm-amdgpu-sriov-Correct-the-setting-about-sdma-door.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5447-drm-amdgpu-sriov-Correct-the-setting-about-sdma-door.patch
new file mode 100644
index 00000000..853ed3fa
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5447-drm-amdgpu-sriov-Correct-the-setting-about-sdma-door.patch
@@ -0,0 +1,38 @@
+From d6ede3a756010bf332c74526bf6bf670d6ef3646 Mon Sep 17 00:00:00 2001
+From: Emily Deng <Emily.Deng@amd.com>
+Date: Thu, 9 Aug 2018 15:05:31 +0800
+Subject: [PATCH 5447/5725] drm/amdgpu/sriov: Correct the setting about sdma
+ doorbell offset of Vega10
+
+Correct the format
+
+For vega10 sriov, the sdma doorbell must be fixed as follow to keep the
+same setting with host driver, or it will happen conflicts.
+
+Change-Id: I0e7480c6516a4fe4c2646f1644f897e21e0f7f7b
+Signed-off-by: Emily Deng <Emily.Deng@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 80fc9b3..7d10f6e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -426,8 +426,9 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
+ AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xE9,
+
+ /* For vega10 sriov, the sdma doorbell must be fixed as follow
+- * to keep the same setting with host driver, or it will
+- * happen conflicts */
++ * to keep the same setting with host driver, or it will
++ * happen conflicts
++ */
+ AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 = 0xF0,
+ AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1,
+ AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 = 0xF2,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5448-drm-amdgpu-Deactivate-SW-interrupt-fallback-in-amdgp.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5448-drm-amdgpu-Deactivate-SW-interrupt-fallback-in-amdgp.patch
new file mode 100644
index 00000000..010e3d36
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5448-drm-amdgpu-Deactivate-SW-interrupt-fallback-in-amdgp.patch
@@ -0,0 +1,35 @@
+From a869776027af711358c268d7cd4032528f310321 Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Mon, 24 Sep 2018 14:10:22 +0200
+Subject: [PATCH 5448/5725] drm/amdgpu: Deactivate SW interrupt fallback in
+ amdgpu_fence_process v2
+
+Deactivate SW interrupt fallback when all emited fences are completed.
+Also switch interrupt SW fallback message from INFO to WARN.
+
+v2: shorten the warnign message a bit and only re-activate the timer during
+processing if it was already activated before.
+
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Suggested-by: Christian Konig <Christian.Koenig@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+index a9b4684..d169840 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+@@ -274,7 +274,7 @@ static void amdgpu_fence_fallback(struct timer_list *t)
+ struct amdgpu_ring *ring = from_timer(ring, t,
+ fence_drv.fallback_timer);
+
+- DRM_INFO("Fallback to SW interrupt on ring %s due to HW interrupt time out", ring->name);
++ DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
+ amdgpu_fence_process(ring);
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5449-drm-amdgpu-Refine-function-name.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5449-drm-amdgpu-Refine-function-name.patch
new file mode 100644
index 00000000..df202fd9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5449-drm-amdgpu-Refine-function-name.patch
@@ -0,0 +1,120 @@
+From c83e85880aa1af87e0968a32040d73399d604cfc Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Tue, 28 Aug 2018 18:20:19 +0800
+Subject: [PATCH 5449/5725] drm/amdgpu: Refine function name
+
+change function name gfx_v6/7/8/9_0_gpu_init to
+gfx_v6/7/8/9_0_constants_init.
+this function is just for init gfx constants such
+as max pipes, render backends...
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 10 +++++-----
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 4 ++--
+ 4 files changed, 11 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+index 4518021..b8176c2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+@@ -1552,7 +1552,7 @@ static void gfx_v6_0_config_init(struct amdgpu_device *adev)
+ adev->gfx.config.double_offchip_lds_buf = 0;
+ }
+
+-static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
++static void gfx_v6_0_constants_init(struct amdgpu_device *adev)
+ {
+ u32 gb_addr_config = 0;
+ u32 mc_shared_chmap, mc_arb_ramcfg;
+@@ -3204,7 +3204,7 @@ static int gfx_v6_0_hw_init(void *handle)
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- gfx_v6_0_gpu_init(adev);
++ gfx_v6_0_constants_init(adev);
+
+ r = gfx_v6_0_rlc_resume(adev);
+ if (r)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+index 01ca681..169dbf9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+@@ -1886,14 +1886,14 @@ static void gfx_v7_0_config_init(struct amdgpu_device *adev)
+ }
+
+ /**
+- * gfx_v7_0_gpu_init - setup the 3D engine
++ * gfx_v7_0_constants_init - setup the 3D engine
+ *
+ * @adev: amdgpu_device pointer
+ *
+- * Configures the 3D engine and tiling configuration
+- * registers so that the 3D engine is usable.
++ * init the gfx constants such as the 3D engine, tiling configuration
++ * registers, maximum number of quad pipes, render backends...
+ */
+-static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
++static void gfx_v7_0_constants_init(struct amdgpu_device *adev)
+ {
+ u32 sh_mem_cfg, sh_static_mem_cfg, sh_mem_base;
+ u32 tmp;
+@@ -4644,7 +4644,7 @@ static int gfx_v7_0_hw_init(void *handle)
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- gfx_v7_0_gpu_init(adev);
++ gfx_v7_0_constants_init(adev);
+
+ /* init rlc */
+ r = gfx_v7_0_rlc_resume(adev);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 096347a..d1c8311 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -3834,7 +3834,7 @@ static void gfx_v8_0_config_init(struct amdgpu_device *adev)
+ }
+ }
+
+-static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
++static void gfx_v8_0_constants_init(struct amdgpu_device *adev)
+ {
+ u32 tmp, sh_static_mem_cfg;
+ int i;
+@@ -5038,7 +5038,7 @@ static int gfx_v8_0_hw_init(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ gfx_v8_0_init_golden_registers(adev);
+- gfx_v8_0_gpu_init(adev);
++ gfx_v8_0_constants_init(adev);
+
+ r = gfx_v8_0_rlc_resume(adev);
+ if (r)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 27a5ada..6c44ce1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1848,7 +1848,7 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
+ mutex_unlock(&adev->srbm_mutex);
+ }
+
+-static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
++static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
+ {
+ u32 tmp;
+ int i;
+@@ -3240,7 +3240,7 @@ static int gfx_v9_0_hw_init(void *handle)
+
+ gfx_v9_0_init_golden_registers(adev);
+
+- gfx_v9_0_gpu_init(adev);
++ gfx_v9_0_constants_init(adev);
+
+ r = gfx_v9_0_csb_vram_pin(adev);
+ if (r)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5450-drm-amdgpu-Halt-rlc-cp-in-rlc_safe_mode.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5450-drm-amdgpu-Halt-rlc-cp-in-rlc_safe_mode.patch
new file mode 100644
index 00000000..6e8eec12
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5450-drm-amdgpu-Halt-rlc-cp-in-rlc_safe_mode.patch
@@ -0,0 +1,130 @@
+From c1018454bdefbf260d083e897f846e8b4bdcbc90 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Tue, 11 Sep 2018 10:33:38 +0800
+Subject: [PATCH 5450/5725] drm/amdgpu: Halt rlc/cp in rlc_safe_mode
+
+before halt rlc/cp, need to
+1. enter rlc safe mode
+2. wait rlc/cp idle
+
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Hang Zhou <hang.zhou@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 86 ++++++++++++++++++++++++-----------
+ 1 file changed, 59 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index d1c8311..a632c8d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -5079,6 +5079,55 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
+ return r;
+ }
+
++static bool gfx_v8_0_is_idle(void *handle)
++{
++ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++
++ if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE)
++ || RREG32(mmGRBM_STATUS2) != 0x8)
++ return false;
++ else
++ return true;
++}
++
++static bool gfx_v8_0_rlc_is_idle(void *handle)
++{
++ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++
++ if (RREG32(mmGRBM_STATUS2) != 0x8)
++ return false;
++ else
++ return true;
++}
++
++static int gfx_v8_0_wait_for_rlc_idle(void *handle)
++{
++ unsigned int i;
++ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++
++ for (i = 0; i < adev->usec_timeout; i++) {
++ if (gfx_v8_0_rlc_is_idle(handle))
++ return 0;
++
++ udelay(1);
++ }
++ return -ETIMEDOUT;
++}
++
++static int gfx_v8_0_wait_for_idle(void *handle)
++{
++ unsigned int i;
++ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++
++ for (i = 0; i < adev->usec_timeout; i++) {
++ if (gfx_v8_0_is_idle(handle))
++ return 0;
++
++ udelay(1);
++ }
++ return -ETIMEDOUT;
++}
++
+ static int gfx_v8_0_hw_fini(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+@@ -5097,9 +5146,16 @@ static int gfx_v8_0_hw_fini(void *handle)
+ pr_debug("For SRIOV client, shouldn't do anything.\n");
+ return 0;
+ }
+- gfx_v8_0_cp_enable(adev, false);
+- gfx_v8_0_rlc_stop(adev);
+-
++ adev->gfx.rlc.funcs->enter_safe_mode(adev);
++ if (!gfx_v8_0_wait_for_idle(adev))
++ gfx_v8_0_cp_enable(adev, false);
++ else
++ pr_err("cp is busy, skip halt cp\n");
++ if (!gfx_v8_0_wait_for_rlc_idle(adev))
++ gfx_v8_0_rlc_stop(adev);
++ else
++ pr_err("rlc is busy, skip halt rlc\n");
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ return 0;
+ }
+
+@@ -5120,30 +5176,6 @@ static int gfx_v8_0_resume(void *handle)
+ return r;
+ }
+
+-static bool gfx_v8_0_is_idle(void *handle)
+-{
+- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+-
+- if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE))
+- return false;
+- else
+- return true;
+-}
+-
+-static int gfx_v8_0_wait_for_idle(void *handle)
+-{
+- unsigned i;
+- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+-
+- for (i = 0; i < adev->usec_timeout; i++) {
+- if (gfx_v8_0_is_idle(handle))
+- return 0;
+-
+- udelay(1);
+- }
+- return -ETIMEDOUT;
+-}
+-
+ static bool gfx_v8_0_check_soft_reset(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5451-drm-amdgpu-Remove-redundant-code-in-gfx_v8_0.c.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5451-drm-amdgpu-Remove-redundant-code-in-gfx_v8_0.c.patch
new file mode 100644
index 00000000..7799f593
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5451-drm-amdgpu-Remove-redundant-code-in-gfx_v8_0.c.patch
@@ -0,0 +1,54 @@
+From c12637d9c69a5f16d7251d58e38b24d180386913 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Thu, 20 Sep 2018 17:06:22 +0800
+Subject: [PATCH 5451/5725] drm/amdgpu: Remove redundant code in gfx_v8_0.c
+
+the CG related registers have been programed in golden setting
+PG register default value is 0.
+
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Hang Zhou <hang.zhou@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 20 --------------------
+ 1 file changed, 20 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index a632c8d..6c026d76 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -4207,31 +4207,11 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
+ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
+ {
+ int r;
+- u32 tmp;
+
+ gfx_v8_0_rlc_stop(adev);
+-
+- /* disable CG */
+- tmp = RREG32(mmRLC_CGCG_CGLS_CTRL);
+- tmp &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
+- RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
+- WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
+- if (adev->asic_type == CHIP_POLARIS11 ||
+- adev->asic_type == CHIP_POLARIS10 ||
+- adev->asic_type == CHIP_POLARIS12 ||
+- adev->asic_type == CHIP_VEGAM) {
+- tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D);
+- tmp &= ~0x3;
+- WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp);
+- }
+-
+- /* disable PG */
+- WREG32(mmRLC_PG_CNTL, 0);
+-
+ gfx_v8_0_rlc_reset(adev);
+ gfx_v8_0_init_pg(adev);
+
+-
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+ /* legacy rlc firmware loading */
+ r = gfx_v8_0_rlc_load_microcode(adev);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5452-drm-amd-pp-Disable-dpm-features-on-smu7-8-when-suspe.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5452-drm-amd-pp-Disable-dpm-features-on-smu7-8-when-suspe.patch
new file mode 100644
index 00000000..15a997a5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5452-drm-amd-pp-Disable-dpm-features-on-smu7-8-when-suspe.patch
@@ -0,0 +1,121 @@
+From 925e268b8a29f03a10560c83d4142a4278314b6e Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Thu, 20 Sep 2018 16:47:06 +0800
+Subject: [PATCH 5452/5725] drm/amd/pp: Disable dpm features on smu7/8 when
+ suspend
+
+Need to disable dpm features before halt rlc.
+
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 13 +++++++++
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c | 37 ++++++++++++------------
+ 2 files changed, 32 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index 0bfb297..8118d4e 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -5037,6 +5037,18 @@ static int smu7_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw
+ return 0;
+ }
+
++static int smu7_power_off_asic(struct pp_hwmgr *hwmgr)
++{
++ int result;
++
++ result = smu7_disable_dpm_tasks(hwmgr);
++ PP_ASSERT_WITH_CODE((0 == result),
++ "[disable_dpm_tasks] Failed to disable DPM!",
++ );
++
++ return result;
++}
++
+ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
+ .backend_init = &smu7_hwmgr_backend_init,
+ .backend_fini = &smu7_hwmgr_backend_fini,
+@@ -5094,6 +5106,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
+ .get_power_profile_mode = smu7_get_power_profile_mode,
+ .set_power_profile_mode = smu7_set_power_profile_mode,
+ .get_performance_level = smu7_get_performance_level,
++ .power_off_asic = smu7_power_off_asic,
+ };
+
+ uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+index b863704..53cf787 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+@@ -880,7 +880,7 @@ static int smu8_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
+ smu8_update_low_mem_pstate(hwmgr, input);
+
+ return 0;
+-};
++}
+
+
+ static int smu8_setup_asic_task(struct pp_hwmgr *hwmgr)
+@@ -934,14 +934,6 @@ static void smu8_reset_cc6_data(struct pp_hwmgr *hwmgr)
+ hw_data->cc6_settings.cpu_pstate_disable = false;
+ }
+
+-static int smu8_power_off_asic(struct pp_hwmgr *hwmgr)
+-{
+- smu8_power_up_display_clock_sys_pll(hwmgr);
+- smu8_clear_nb_dpm_flag(hwmgr);
+- smu8_reset_cc6_data(hwmgr);
+- return 0;
+-};
+-
+ static void smu8_program_voting_clients(struct pp_hwmgr *hwmgr)
+ {
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+@@ -1011,6 +1003,17 @@ static void smu8_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
+ data->acp_boot_level = 0xff;
+ }
+
++static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
++{
++ smu8_program_voting_clients(hwmgr);
++ if (smu8_start_dpm(hwmgr))
++ return -EINVAL;
++ smu8_program_bootup_state(hwmgr);
++ smu8_reset_acp_boot_level(hwmgr);
++
++ return 0;
++}
++
+ static int smu8_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
+ {
+ smu8_disable_nb_dpm(hwmgr);
+@@ -1020,18 +1023,16 @@ static int smu8_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
+ return -EINVAL;
+
+ return 0;
+-};
++}
+
+-static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
++static int smu8_power_off_asic(struct pp_hwmgr *hwmgr)
+ {
+- smu8_program_voting_clients(hwmgr);
+- if (smu8_start_dpm(hwmgr))
+- return -EINVAL;
+- smu8_program_bootup_state(hwmgr);
+- smu8_reset_acp_boot_level(hwmgr);
+-
++ smu8_disable_dpm_tasks(hwmgr);
++ smu8_power_up_display_clock_sys_pll(hwmgr);
++ smu8_clear_nb_dpm_flag(hwmgr);
++ smu8_reset_cc6_data(hwmgr);
+ return 0;
+-};
++}
+
+ static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ struct pp_power_state *prequest_ps,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5453-drm-amdgpu-drop-extra-newline-in-amdgpu_iv-trace.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5453-drm-amdgpu-drop-extra-newline-in-amdgpu_iv-trace.patch
new file mode 100644
index 00000000..ad9f3056
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5453-drm-amdgpu-drop-extra-newline-in-amdgpu_iv-trace.patch
@@ -0,0 +1,32 @@
+From 69719a8432c68269ea93ba45111914cd63977cad Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Thu, 20 Sep 2018 13:26:18 +0200
+Subject: [PATCH 5453/5725] drm/amdgpu: drop extra newline in amdgpu_iv trace
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+That is superflous here.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+index 8c2dab2..aab0e93 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+@@ -103,7 +103,7 @@ TRACE_EVENT(amdgpu_iv,
+ __entry->src_data[2] = iv->src_data[2];
+ __entry->src_data[3] = iv->src_data[3];
+ ),
+- TP_printk("client_id:%u src_id:%u ring:%u vmid:%u timestamp: %llu pasid:%u src_data: %08x %08x %08x %08x\n",
++ TP_printk("client_id:%u src_id:%u ring:%u vmid:%u timestamp: %llu pasid:%u src_data: %08x %08x %08x %08x",
+ __entry->client_id, __entry->src_id,
+ __entry->ring_id, __entry->vmid,
+ __entry->timestamp, __entry->pasid,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5454-drm-amdgpu-make-function-pointers-mandatory.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5454-drm-amdgpu-make-function-pointers-mandatory.patch
new file mode 100644
index 00000000..734f17ef
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5454-drm-amdgpu-make-function-pointers-mandatory.patch
@@ -0,0 +1,434 @@
+From a9413fb0d2ce71dd013c2634a03789f30c74b31c Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 17 Sep 2018 15:41:45 +0200
+Subject: [PATCH 5454/5725] drm/amdgpu: make function pointers mandatory
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+We always want those to be setup correctly.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/cik_ih.c | 3 +--
+ drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 18 +++++++-----------
+ drivers/gpu/drm/amd/amdgpu/cz_ih.c | 3 +--
+ drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 3 +--
+ drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 3 +--
+ drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | 3 +--
+ drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 3 +--
+ drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 3 +--
+ drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 3 +--
+ drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 3 +--
+ drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 3 +--
+ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 3 +--
+ drivers/gpu/drm/amd/amdgpu/iceland_ih.c | 3 +--
+ drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 18 +++++++-----------
+ drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 18 +++++++-----------
+ drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 18 +++++++-----------
+ drivers/gpu/drm/amd/amdgpu/si_dma.c | 18 +++++++-----------
+ drivers/gpu/drm/amd/amdgpu/si_ih.c | 3 +--
+ drivers/gpu/drm/amd/amdgpu/tonga_ih.c | 3 +--
+ drivers/gpu/drm/amd/amdgpu/vega10_ih.c | 3 +--
+ 20 files changed, 50 insertions(+), 85 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+index 44d10c2..e75183e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+@@ -468,8 +468,7 @@ static const struct amdgpu_ih_funcs cik_ih_funcs = {
+
+ static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev)
+ {
+- if (adev->irq.ih_funcs == NULL)
+- adev->irq.ih_funcs = &cik_ih_funcs;
++ adev->irq.ih_funcs = &cik_ih_funcs;
+ }
+
+ const struct amdgpu_ip_block_version cik_ih_ip_block =
+diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+index e1b56e7..2a0dbac 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
++++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+@@ -1378,10 +1378,8 @@ static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = {
+
+ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
+ {
+- if (adev->mman.buffer_funcs == NULL) {
+- adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
+- adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+- }
++ adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
++ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+ }
+
+ static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
+@@ -1396,14 +1394,12 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
+ {
+ unsigned i;
+
+- if (adev->vm_manager.vm_pte_funcs == NULL) {
+- adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
+- for (i = 0; i < adev->sdma.num_instances; i++)
+- adev->vm_manager.vm_pte_rings[i] =
+- &adev->sdma.instance[i].ring;
++ adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
++ for (i = 0; i < adev->sdma.num_instances; i++)
++ adev->vm_manager.vm_pte_rings[i] =
++ &adev->sdma.instance[i].ring;
+
+- adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
+- }
++ adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
+ }
+
+ const struct amdgpu_ip_block_version cik_sdma_ip_block =
+diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+index 960c29e..9385da1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+@@ -449,8 +449,7 @@ static const struct amdgpu_ih_funcs cz_ih_funcs = {
+
+ static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev)
+ {
+- if (adev->irq.ih_funcs == NULL)
+- adev->irq.ih_funcs = &cz_ih_funcs;
++ adev->irq.ih_funcs = &cz_ih_funcs;
+ }
+
+ const struct amdgpu_ip_block_version cz_ih_ip_block =
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+index de88444..f4cbe2e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+@@ -3577,8 +3577,7 @@ static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
+
+ static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev)
+ {
+- if (adev->mode_info.funcs == NULL)
+- adev->mode_info.funcs = &dce_v10_0_display_funcs;
++ adev->mode_info.funcs = &dce_v10_0_display_funcs;
+ }
+
+ static const struct amdgpu_irq_src_funcs dce_v10_0_crtc_irq_funcs = {
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+index d45c2d8..0a5a3de 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+@@ -3710,8 +3710,7 @@ static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
+
+ static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev)
+ {
+- if (adev->mode_info.funcs == NULL)
+- adev->mode_info.funcs = &dce_v11_0_display_funcs;
++ adev->mode_info.funcs = &dce_v11_0_display_funcs;
+ }
+
+ static const struct amdgpu_irq_src_funcs dce_v11_0_crtc_irq_funcs = {
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+index 3d214bd..2204aad 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+@@ -3383,8 +3383,7 @@ static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
+
+ static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
+ {
+- if (adev->mode_info.funcs == NULL)
+- adev->mode_info.funcs = &dce_v6_0_display_funcs;
++ adev->mode_info.funcs = &dce_v6_0_display_funcs;
+ }
+
+ static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+index 4798f45..0f8a372 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+@@ -3465,8 +3465,7 @@ static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
+
+ static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
+ {
+- if (adev->mode_info.funcs == NULL)
+- adev->mode_info.funcs = &dce_v8_0_display_funcs;
++ adev->mode_info.funcs = &dce_v8_0_display_funcs;
+ }
+
+ static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+index 7145e7a..d089b25 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+@@ -659,8 +659,7 @@ static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
+
+ static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
+ {
+- if (adev->mode_info.funcs == NULL)
+- adev->mode_info.funcs = &dce_virtual_display_funcs;
++ adev->mode_info.funcs = &dce_virtual_display_funcs;
+ }
+
+ static int dce_virtual_pageflip(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+index b8335d8..890308e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+@@ -1178,8 +1178,7 @@ static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
+
+ static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev)
+ {
+- if (adev->gmc.gmc_funcs == NULL)
+- adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
++ adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
+ }
+
+ static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+index 61ffd5c..d7d5075 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+@@ -1388,8 +1388,7 @@ static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
+
+ static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev)
+ {
+- if (adev->gmc.gmc_funcs == NULL)
+- adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs;
++ adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs;
+ }
+
+ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index 199f1a5..1b3da69 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -1737,8 +1737,7 @@ static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
+
+ static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev)
+ {
+- if (adev->gmc.gmc_funcs == NULL)
+- adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
++ adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
+ }
+
+ static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index a0fd3a7..c804846 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -592,8 +592,7 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
+
+ static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
+ {
+- if (adev->gmc.gmc_funcs == NULL)
+- adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
++ adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
+ }
+
+ static int gmc_v9_0_early_init(void *handle)
+diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+index 842c4b6..45ef0a8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+@@ -447,8 +447,7 @@ static const struct amdgpu_ih_funcs iceland_ih_funcs = {
+
+ static void iceland_ih_set_interrupt_funcs(struct amdgpu_device *adev)
+ {
+- if (adev->irq.ih_funcs == NULL)
+- adev->irq.ih_funcs = &iceland_ih_funcs;
++ adev->irq.ih_funcs = &iceland_ih_funcs;
+ }
+
+ const struct amdgpu_ip_block_version iceland_ih_ip_block =
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+index cc22269..6db0d7a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+@@ -1304,10 +1304,8 @@ static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = {
+
+ static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
+ {
+- if (adev->mman.buffer_funcs == NULL) {
+- adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
+- adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+- }
++ adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
++ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+ }
+
+ static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
+@@ -1322,14 +1320,12 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
+ {
+ unsigned i;
+
+- if (adev->vm_manager.vm_pte_funcs == NULL) {
+- adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
+- for (i = 0; i < adev->sdma.num_instances; i++)
+- adev->vm_manager.vm_pte_rings[i] =
+- &adev->sdma.instance[i].ring;
++ adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
++ for (i = 0; i < adev->sdma.num_instances; i++)
++ adev->vm_manager.vm_pte_rings[i] =
++ &adev->sdma.instance[i].ring;
+
+- adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
+- }
++ adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
+ }
+
+ const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+index 4b7df45..deb16c5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+@@ -1745,10 +1745,8 @@ static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = {
+
+ static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
+ {
+- if (adev->mman.buffer_funcs == NULL) {
+- adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
+- adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+- }
++ adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
++ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+ }
+
+ static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
+@@ -1763,14 +1761,12 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
+ {
+ unsigned i;
+
+- if (adev->vm_manager.vm_pte_funcs == NULL) {
+- adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
+- for (i = 0; i < adev->sdma.num_instances; i++)
+- adev->vm_manager.vm_pte_rings[i] =
+- &adev->sdma.instance[i].ring;
++ adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
++ for (i = 0; i < adev->sdma.num_instances; i++)
++ adev->vm_manager.vm_pte_rings[i] =
++ &adev->sdma.instance[i].ring;
+
+- adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
+- }
++ adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
+ }
+
+ const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index 0304797..f797eaf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -1818,10 +1818,8 @@ static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = {
+
+ static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
+ {
+- if (adev->mman.buffer_funcs == NULL) {
+- adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
+- adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+- }
++ adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
++ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+ }
+
+ static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
+@@ -1836,14 +1834,12 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
+ {
+ unsigned i;
+
+- if (adev->vm_manager.vm_pte_funcs == NULL) {
+- adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
+- for (i = 0; i < adev->sdma.num_instances; i++)
+- adev->vm_manager.vm_pte_rings[i] =
+- &adev->sdma.instance[i].ring;
++ adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
++ for (i = 0; i < adev->sdma.num_instances; i++)
++ adev->vm_manager.vm_pte_rings[i] =
++ &adev->sdma.instance[i].ring;
+
+- adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
+- }
++ adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
+ }
+
+ const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
+index 93a7773..f3693dc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
++++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
+@@ -880,10 +880,8 @@ static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = {
+
+ static void si_dma_set_buffer_funcs(struct amdgpu_device *adev)
+ {
+- if (adev->mman.buffer_funcs == NULL) {
+- adev->mman.buffer_funcs = &si_dma_buffer_funcs;
+- adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+- }
++ adev->mman.buffer_funcs = &si_dma_buffer_funcs;
++ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+ }
+
+ static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
+@@ -898,14 +896,12 @@ static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
+ {
+ unsigned i;
+
+- if (adev->vm_manager.vm_pte_funcs == NULL) {
+- adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
+- for (i = 0; i < adev->sdma.num_instances; i++)
+- adev->vm_manager.vm_pte_rings[i] =
+- &adev->sdma.instance[i].ring;
++ adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
++ for (i = 0; i < adev->sdma.num_instances; i++)
++ adev->vm_manager.vm_pte_rings[i] =
++ &adev->sdma.instance[i].ring;
+
+- adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
+- }
++ adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
+ }
+
+ const struct amdgpu_ip_block_version si_dma_ip_block =
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
+index 60dad63..97711d3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
+@@ -308,8 +308,7 @@ static const struct amdgpu_ih_funcs si_ih_funcs = {
+
+ static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
+ {
+- if (adev->irq.ih_funcs == NULL)
+- adev->irq.ih_funcs = &si_ih_funcs;
++ adev->irq.ih_funcs = &si_ih_funcs;
+ }
+
+ const struct amdgpu_ip_block_version si_ih_ip_block =
+diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+index 52853d8..a79a377 100644
+--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+@@ -513,8 +513,7 @@ static const struct amdgpu_ih_funcs tonga_ih_funcs = {
+
+ static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev)
+ {
+- if (adev->irq.ih_funcs == NULL)
+- adev->irq.ih_funcs = &tonga_ih_funcs;
++ adev->irq.ih_funcs = &tonga_ih_funcs;
+ }
+
+ const struct amdgpu_ip_block_version tonga_ih_ip_block =
+diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+index 2559498..d0d23a6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+@@ -494,8 +494,7 @@ static const struct amdgpu_ih_funcs vega10_ih_funcs = {
+
+ static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev)
+ {
+- if (adev->irq.ih_funcs == NULL)
+- adev->irq.ih_funcs = &vega10_ih_funcs;
++ adev->irq.ih_funcs = &vega10_ih_funcs;
+ }
+
+ const struct amdgpu_ip_block_version vega10_ih_ip_block =
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5455-drm-amdgpu-cleanup-amdgpu_ih.c.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5455-drm-amdgpu-cleanup-amdgpu_ih.c.patch
new file mode 100644
index 00000000..1e8e1999
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5455-drm-amdgpu-cleanup-amdgpu_ih.c.patch
@@ -0,0 +1,435 @@
+From 474676b862cb6bc3c85dd701c6614787c7a6261e Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Sun, 16 Sep 2018 20:13:21 +0200
+Subject: [PATCH 5455/5725] drm/amdgpu: cleanup amdgpu_ih.c
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Cleanup amdgpu_ih.c to be able to handle multiple interrupt rings.
+
+Change-Id: I19a1eb3131e4a193d933033708f8f328c78e9ff0
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c | 152 ++++++++++++++------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h | 8 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/cik_ih.c | 4 +-
+ drivers/gpu/drm/amd/amdgpu/cz_ih.c | 4 +-
+ drivers/gpu/drm/amd/amdgpu/iceland_ih.c | 4 +-
+ drivers/gpu/drm/amd/amdgpu/si_ih.c | 4 +-
+ drivers/gpu/drm/amd/amdgpu/tonga_ih.c | 4 +-
+ drivers/gpu/drm/amd/amdgpu/vega10_ih.c | 4 +-
+ 9 files changed, 84 insertions(+), 102 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+index 4ed8621..15fb0f9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+@@ -27,43 +27,19 @@
+ #include "amdgpu_amdkfd.h"
+
+ /**
+- * amdgpu_ih_ring_alloc - allocate memory for the IH ring
+- *
+- * @adev: amdgpu_device pointer
+- *
+- * Allocate a ring buffer for the interrupt controller.
+- * Returns 0 for success, errors for failure.
+- */
+-static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev)
+-{
+- int r;
+-
+- /* Allocate ring buffer */
+- if (adev->irq.ih.ring_obj == NULL) {
+- r = amdgpu_bo_create_kernel(adev, adev->irq.ih.ring_size,
+- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
+- &adev->irq.ih.ring_obj,
+- &adev->irq.ih.gpu_addr,
+- (void **)&adev->irq.ih.ring);
+- if (r) {
+- DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r);
+- return r;
+- }
+- }
+- return 0;
+-}
+-
+-/**
+ * amdgpu_ih_ring_init - initialize the IH state
+ *
+ * @adev: amdgpu_device pointer
++ * @ih: ih ring to initialize
++ * @ring_size: ring size to allocate
++ * @use_bus_addr: true when we can use dma_alloc_coherent
+ *
+ * Initializes the IH state and allocates a buffer
+ * for the IH ring buffer.
+ * Returns 0 for success, errors for failure.
+ */
+-int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
+- bool use_bus_addr)
++int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
++ unsigned ring_size, bool use_bus_addr)
+ {
+ u32 rb_bufsz;
+ int r;
+@@ -71,70 +47,76 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
+ /* Align ring size */
+ rb_bufsz = order_base_2(ring_size / 4);
+ ring_size = (1 << rb_bufsz) * 4;
+- adev->irq.ih.ring_size = ring_size;
+- adev->irq.ih.ptr_mask = adev->irq.ih.ring_size - 1;
+- adev->irq.ih.rptr = 0;
+- adev->irq.ih.use_bus_addr = use_bus_addr;
+-
+- if (adev->irq.ih.use_bus_addr) {
+- if (!adev->irq.ih.ring) {
+- /* add 8 bytes for the rptr/wptr shadows and
+- * add them to the end of the ring allocation.
+- */
+- adev->irq.ih.ring = pci_alloc_consistent(adev->pdev,
+- adev->irq.ih.ring_size + 8,
+- &adev->irq.ih.rb_dma_addr);
+- if (adev->irq.ih.ring == NULL)
+- return -ENOMEM;
+- memset((void *)adev->irq.ih.ring, 0, adev->irq.ih.ring_size + 8);
+- adev->irq.ih.wptr_offs = (adev->irq.ih.ring_size / 4) + 0;
+- adev->irq.ih.rptr_offs = (adev->irq.ih.ring_size / 4) + 1;
+- }
+- return 0;
++ ih->ring_size = ring_size;
++ ih->ptr_mask = ih->ring_size - 1;
++ ih->rptr = 0;
++ ih->use_bus_addr = use_bus_addr;
++
++ if (use_bus_addr) {
++ if (ih->ring)
++ return 0;
++
++ /* add 8 bytes for the rptr/wptr shadows and
++ * add them to the end of the ring allocation.
++ */
++ ih->ring = dma_alloc_coherent(adev->dev, ih->ring_size + 8,
++ &ih->rb_dma_addr, GFP_KERNEL);
++ if (ih->ring == NULL)
++ return -ENOMEM;
++
++ memset((void *)ih->ring, 0, ih->ring_size + 8);
++ ih->wptr_offs = (ih->ring_size / 4) + 0;
++ ih->rptr_offs = (ih->ring_size / 4) + 1;
+ } else {
+- r = amdgpu_device_wb_get(adev, &adev->irq.ih.wptr_offs);
++ r = amdgpu_device_wb_get(adev, &ih->wptr_offs);
++ if (r)
++ return r;
++
++ r = amdgpu_device_wb_get(adev, &ih->rptr_offs);
+ if (r) {
+- dev_err(adev->dev, "(%d) ih wptr_offs wb alloc failed\n", r);
++ amdgpu_device_wb_free(adev, ih->wptr_offs);
+ return r;
+ }
+
+- r = amdgpu_device_wb_get(adev, &adev->irq.ih.rptr_offs);
++ r = amdgpu_bo_create_kernel(adev, ih->ring_size, PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_GTT,
++ &ih->ring_obj, &ih->gpu_addr,
++ (void **)&ih->ring);
+ if (r) {
+- amdgpu_device_wb_free(adev, adev->irq.ih.wptr_offs);
+- dev_err(adev->dev, "(%d) ih rptr_offs wb alloc failed\n", r);
++ amdgpu_device_wb_free(adev, ih->rptr_offs);
++ amdgpu_device_wb_free(adev, ih->wptr_offs);
+ return r;
+ }
+-
+- return amdgpu_ih_ring_alloc(adev);
+ }
++ return 0;
+ }
+
+ /**
+ * amdgpu_ih_ring_fini - tear down the IH state
+ *
+ * @adev: amdgpu_device pointer
++ * @ih: ih ring to tear down
+ *
+ * Tears down the IH state and frees buffer
+ * used for the IH ring buffer.
+ */
+-void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
++void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
+ {
+- if (adev->irq.ih.use_bus_addr) {
+- if (adev->irq.ih.ring) {
+- /* add 8 bytes for the rptr/wptr shadows and
+- * add them to the end of the ring allocation.
+- */
+- pci_free_consistent(adev->pdev, adev->irq.ih.ring_size + 8,
+- (void *)adev->irq.ih.ring,
+- adev->irq.ih.rb_dma_addr);
+- adev->irq.ih.ring = NULL;
+- }
++ if (ih->use_bus_addr) {
++ if (!ih->ring)
++ return;
++
++ /* add 8 bytes for the rptr/wptr shadows and
++ * add them to the end of the ring allocation.
++ */
++ dma_free_coherent(adev->dev, ih->ring_size + 8,
++ (void *)ih->ring, ih->rb_dma_addr);
++ ih->ring = NULL;
+ } else {
+- amdgpu_bo_free_kernel(&adev->irq.ih.ring_obj,
+- &adev->irq.ih.gpu_addr,
+- (void **)&adev->irq.ih.ring);
+- amdgpu_device_wb_free(adev, adev->irq.ih.wptr_offs);
+- amdgpu_device_wb_free(adev, adev->irq.ih.rptr_offs);
++ amdgpu_bo_free_kernel(&ih->ring_obj, &ih->gpu_addr,
++ (void **)&ih->ring);
++ amdgpu_device_wb_free(adev, ih->wptr_offs);
++ amdgpu_device_wb_free(adev, ih->rptr_offs);
+ }
+ }
+
+@@ -142,56 +124,56 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
+ * amdgpu_ih_process - interrupt handler
+ *
+ * @adev: amdgpu_device pointer
++ * @ih: ih ring to process
+ *
+ * Interrupt hander (VI), walk the IH ring.
+ * Returns irq process return code.
+ */
+-int amdgpu_ih_process(struct amdgpu_device *adev)
++int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
+ {
+ struct amdgpu_iv_entry entry;
+ u32 wptr;
+
+- if (!adev->irq.ih.enabled || adev->shutdown)
++ if (!ih->enabled || adev->shutdown)
+ return IRQ_NONE;
+
+ wptr = amdgpu_ih_get_wptr(adev);
+
+ restart_ih:
+ /* is somebody else already processing irqs? */
+- if (atomic_xchg(&adev->irq.ih.lock, 1))
++ if (atomic_xchg(&ih->lock, 1))
+ return IRQ_NONE;
+
+- DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, adev->irq.ih.rptr, wptr);
++ DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
+
+ /* Order reading of wptr vs. reading of IH ring data */
+ rmb();
+
+- while (adev->irq.ih.rptr != wptr) {
+- u32 ring_index = adev->irq.ih.rptr >> 2;
++ while (ih->rptr != wptr) {
++ u32 ring_index = ih->rptr >> 2;
+
+ /* Prescreening of high-frequency interrupts */
+ if (!amdgpu_ih_prescreen_iv(adev)) {
+- adev->irq.ih.rptr &= adev->irq.ih.ptr_mask;
++ ih->rptr &= ih->ptr_mask;
+ continue;
+ }
+
+ /* Before dispatching irq to IP blocks, send it to amdkfd */
+ amdgpu_amdkfd_interrupt(adev,
+- (const void *) &adev->irq.ih.ring[ring_index]);
++ (const void *) &ih->ring[ring_index]);
+
+- entry.iv_entry = (const uint32_t *)
+- &adev->irq.ih.ring[ring_index];
++ entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
+ amdgpu_ih_decode_iv(adev, &entry);
+- adev->irq.ih.rptr &= adev->irq.ih.ptr_mask;
++ ih->rptr &= ih->ptr_mask;
+
+ amdgpu_irq_dispatch(adev, &entry);
+ }
+ amdgpu_ih_set_rptr(adev);
+- atomic_set(&adev->irq.ih.lock, 0);
++ atomic_set(&ih->lock, 0);
+
+ /* make sure wptr hasn't changed while processing */
+ wptr = amdgpu_ih_get_wptr(adev);
+- if (wptr != adev->irq.ih.rptr)
++ if (wptr != ih->rptr)
+ goto restart_ih;
+
+ return IRQ_HANDLED;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+index 0d5b3f5..3e55f98 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+@@ -82,9 +82,9 @@ struct amdgpu_ih_funcs {
+ #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
+ #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
+
+-int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
+- bool use_bus_addr);
+-void amdgpu_ih_ring_fini(struct amdgpu_device *adev);
+-int amdgpu_ih_process(struct amdgpu_device *adev);
++int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
++ unsigned ring_size, bool use_bus_addr);
++void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
++int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+index 2d29753..ba004fe 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+@@ -163,7 +163,7 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
+ struct amdgpu_device *adev = dev->dev_private;
+ irqreturn_t ret;
+
+- ret = amdgpu_ih_process(adev);
++ ret = amdgpu_ih_process(adev, &adev->irq.ih);
+ if (ret == IRQ_HANDLED)
+ pm_runtime_mark_last_busy(dev->dev);
+ return ret;
+diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+index e75183e..c37c4b7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+@@ -318,7 +318,7 @@ static int cik_ih_sw_init(void *handle)
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
++ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
+ if (r)
+ return r;
+
+@@ -332,7 +332,7 @@ static int cik_ih_sw_fini(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_irq_fini(adev);
+- amdgpu_ih_ring_fini(adev);
++ amdgpu_ih_ring_fini(adev, &adev->irq.ih);
+ amdgpu_irq_remove_domain(adev);
+
+ return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+index 9385da1..306e0bd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+@@ -297,7 +297,7 @@ static int cz_ih_sw_init(void *handle)
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
++ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
+ if (r)
+ return r;
+
+@@ -311,7 +311,7 @@ static int cz_ih_sw_fini(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_irq_fini(adev);
+- amdgpu_ih_ring_fini(adev);
++ amdgpu_ih_ring_fini(adev, &adev->irq.ih);
+ amdgpu_irq_remove_domain(adev);
+
+ return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+index 45ef0a8..9005dee 100644
+--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+@@ -297,7 +297,7 @@ static int iceland_ih_sw_init(void *handle)
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
++ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
+ if (r)
+ return r;
+
+@@ -311,7 +311,7 @@ static int iceland_ih_sw_fini(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_irq_fini(adev);
+- amdgpu_ih_ring_fini(adev);
++ amdgpu_ih_ring_fini(adev, &adev->irq.ih);
+ amdgpu_irq_remove_domain(adev);
+
+ return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
+index 97711d3..acdf607 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
+@@ -170,7 +170,7 @@ static int si_ih_sw_init(void *handle)
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
++ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
+ if (r)
+ return r;
+
+@@ -182,7 +182,7 @@ static int si_ih_sw_fini(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_irq_fini(adev);
+- amdgpu_ih_ring_fini(adev);
++ amdgpu_ih_ring_fini(adev, &adev->irq.ih);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+index a79a377..83fdf81 100644
+--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+@@ -317,7 +317,7 @@ static int tonga_ih_sw_init(void *handle)
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- r = amdgpu_ih_ring_init(adev, 64 * 1024, true);
++ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, true);
+ if (r)
+ return r;
+
+@@ -334,7 +334,7 @@ static int tonga_ih_sw_fini(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_irq_fini(adev);
+- amdgpu_ih_ring_fini(adev);
++ amdgpu_ih_ring_fini(adev, &adev->irq.ih);
+ amdgpu_irq_remove_domain(adev);
+
+ return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+index d0d23a6..2f14516 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+@@ -380,7 +380,7 @@ static int vega10_ih_sw_init(void *handle)
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- r = amdgpu_ih_ring_init(adev, 256 * 1024, true);
++ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true);
+ if (r)
+ return r;
+
+@@ -397,7 +397,7 @@ static int vega10_ih_sw_fini(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_irq_fini(adev);
+- amdgpu_ih_ring_fini(adev);
++ amdgpu_ih_ring_fini(adev, &adev->irq.ih);
+
+ return 0;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5456-drm-amdgpu-Move-fence-SW-fallback-warning-v3.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5456-drm-amdgpu-Move-fence-SW-fallback-warning-v3.patch
new file mode 100644
index 00000000..75acd3c7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5456-drm-amdgpu-Move-fence-SW-fallback-warning-v3.patch
@@ -0,0 +1,86 @@
+From 0c4faa0700e18d0e07cbe14b7acc89a4182d5447 Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Tue, 25 Sep 2018 10:24:16 -0400
+Subject: [PATCH 5456/5725] drm/amdgpu: Move fence SW fallback warning v3
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Only print the warning if there was actually some fence processed
+from the SW fallback timer.
+
+v2: Add return value to amdgpu_fence_process to let
+amdgpu_fence_fallback know fences were actually
+processed and then print the warning.
+
+v3: Always return true if seq != last_seq
+
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 12 ++++++++----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 2 +-
+ 2 files changed, 9 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+index d169840..5952db9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+@@ -216,8 +216,10 @@ static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
+ * Checks the current fence value and calculates the last
+ * signalled fence value. Wakes the fence queue if the
+ * sequence number has increased.
++ *
++ * Returns true if fence was processed
+ */
+-void amdgpu_fence_process(struct amdgpu_ring *ring)
++bool amdgpu_fence_process(struct amdgpu_ring *ring)
+ {
+ struct amdgpu_fence_driver *drv = &ring->fence_drv;
+ uint32_t seq, last_seq;
+@@ -233,7 +235,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
+ amdgpu_fence_schedule_fallback(ring);
+
+ if (unlikely(seq == last_seq))
+- return;
++ return false;
+
+ last_seq &= drv->num_fences_mask;
+ seq &= drv->num_fences_mask;
+@@ -260,6 +262,8 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
+
+ dma_fence_put(fence);
+ } while (last_seq != seq);
++
++ return true;
+ }
+
+ /**
+@@ -274,8 +278,8 @@ static void amdgpu_fence_fallback(struct timer_list *t)
+ struct amdgpu_ring *ring = from_timer(ring, t,
+ fence_drv.fallback_timer);
+
+- DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
+- amdgpu_fence_process(ring);
++ if (amdgpu_fence_process(ring))
++ DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+index a42130e..53d5168 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+@@ -98,7 +98,7 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
+ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence,
+ unsigned flags);
+ int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s);
+-void amdgpu_fence_process(struct amdgpu_ring *ring);
++bool amdgpu_fence_process(struct amdgpu_ring *ring);
+ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
+ signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
+ uint32_t wait_seq,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5457-drm-amdgpu-move-more-interrupt-processing-into-amdgp.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5457-drm-amdgpu-move-more-interrupt-processing-into-amdgp.patch
new file mode 100644
index 00000000..61be4ef4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5457-drm-amdgpu-move-more-interrupt-processing-into-amdgp.patch
@@ -0,0 +1,144 @@
+From c3669eccc5527c52c079c987d09c103c0b2613eb Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 17 Sep 2018 15:18:37 +0200
+Subject: [PATCH 5457/5725] drm/amdgpu: move more interrupt processing into
+ amdgpu_irq.c
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add a callback to amdgpu_ih_process to remove most of the IV logic.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Acked-by: Huang Rui <ray.huang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c | 24 +++++-------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h | 4 +++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 31 ++++++++++++++++++++++++++++++-
+ 3 files changed, 38 insertions(+), 21 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+index 15fb0f9..8af67f6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+@@ -24,7 +24,6 @@
+ #include <drm/drmP.h>
+ #include "amdgpu.h"
+ #include "amdgpu_ih.h"
+-#include "amdgpu_amdkfd.h"
+
+ /**
+ * amdgpu_ih_ring_init - initialize the IH state
+@@ -129,9 +128,10 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
+ * Interrupt hander (VI), walk the IH ring.
+ * Returns irq process return code.
+ */
+-int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
++int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
++ void (*callback)(struct amdgpu_device *adev,
++ struct amdgpu_ih_ring *ih))
+ {
+- struct amdgpu_iv_entry entry;
+ u32 wptr;
+
+ if (!ih->enabled || adev->shutdown)
+@@ -150,24 +150,10 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
+ rmb();
+
+ while (ih->rptr != wptr) {
+- u32 ring_index = ih->rptr >> 2;
+-
+- /* Prescreening of high-frequency interrupts */
+- if (!amdgpu_ih_prescreen_iv(adev)) {
+- ih->rptr &= ih->ptr_mask;
+- continue;
+- }
+-
+- /* Before dispatching irq to IP blocks, send it to amdkfd */
+- amdgpu_amdkfd_interrupt(adev,
+- (const void *) &ih->ring[ring_index]);
+-
+- entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
+- amdgpu_ih_decode_iv(adev, &entry);
++ callback(adev, ih);
+ ih->rptr &= ih->ptr_mask;
+-
+- amdgpu_irq_dispatch(adev, &entry);
+ }
++
+ amdgpu_ih_set_rptr(adev);
+ atomic_set(&ih->lock, 0);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+index 3e55f98..fd2bbaa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+@@ -85,6 +85,8 @@ struct amdgpu_ih_funcs {
+ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
+ unsigned ring_size, bool use_bus_addr);
+ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
+-int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
++int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
++ void (*callback)(struct amdgpu_device *adev,
++ struct amdgpu_ih_ring *ih));
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+index ba004fe..d15410b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+@@ -51,6 +51,7 @@
+ #include "atom.h"
+ #include "amdgpu_connectors.h"
+ #include "amdgpu_trace.h"
++#include "amdgpu_amdkfd.h"
+
+ #include <linux/pm_runtime.h>
+
+@@ -147,6 +148,34 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
+ }
+
+ /**
++ * amdgpu_irq_callback - callback from the IH ring
++ *
++ * @adev: amdgpu device pointer
++ * @ih: amdgpu ih ring
++ *
++ * Callback from IH ring processing to handle the entry at the current position
++ * and advance the read pointer.
++ */
++static void amdgpu_irq_callback(struct amdgpu_device *adev,
++ struct amdgpu_ih_ring *ih)
++{
++ u32 ring_index = ih->rptr >> 2;
++ struct amdgpu_iv_entry entry;
++
++ /* Prescreening of high-frequency interrupts */
++ if (!amdgpu_ih_prescreen_iv(adev))
++ return;
++
++ /* Before dispatching irq to IP blocks, send it to amdkfd */
++ amdgpu_amdkfd_interrupt(adev, (const void *) &ih->ring[ring_index]);
++
++ entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
++ amdgpu_ih_decode_iv(adev, &entry);
++
++ amdgpu_irq_dispatch(adev, &entry);
++}
++
++/**
+ * amdgpu_irq_handler - IRQ handler
+ *
+ * @irq: IRQ number (unused)
+@@ -163,7 +192,7 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
+ struct amdgpu_device *adev = dev->dev_private;
+ irqreturn_t ret;
+
+- ret = amdgpu_ih_process(adev, &adev->irq.ih);
++ ret = amdgpu_ih_process(adev, &adev->irq.ih, amdgpu_irq_callback);
+ if (ret == IRQ_HANDLED)
+ pm_runtime_mark_last_busy(dev->dev);
+ return ret;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5458-drm-amdgpu-move-more-defines-into-amdgpu_irq.h.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5458-drm-amdgpu-move-more-defines-into-amdgpu_irq.h.patch
new file mode 100644
index 00000000..20403a6b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5458-drm-amdgpu-move-more-defines-into-amdgpu_irq.h.patch
@@ -0,0 +1,833 @@
+From cf747373985252fcb89a5bcb38c900cf2420c0b8 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 17 Sep 2018 15:29:28 +0200
+Subject: [PATCH 5458/5725] drm/amdgpu: move more defines into amdgpu_irq.h
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Everything that isn't related to the IH ring.
+
+Change-Id: I0690a6631e10370257833b1ccd4aca7da60181cd
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h | 22 +-----------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 10 ++++-----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h | 27 +++++++++++++++++++----
+ drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/cik_ih.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 6 ++---
+ drivers/gpu/drm/amd/amdgpu/cz_ih.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 6 ++---
+ drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 6 ++---
+ drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | 6 ++---
+ drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 6 ++---
+ drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 6 ++---
+ drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 6 ++---
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 12 +++++-----
+ drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/iceland_ih.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 6 ++---
+ drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 6 ++---
+ drivers/gpu/drm/amd/amdgpu/si_dma.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/si_dpm.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/si_ih.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/tonga_ih.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/vce_v2_0.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 2 +-
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +-
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 6 ++---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c | 2 +-
+ 35 files changed, 95 insertions(+), 96 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+index fd2bbaa..9ce8c93 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+@@ -24,12 +24,8 @@
+ #ifndef __AMDGPU_IH_H__
+ #define __AMDGPU_IH_H__
+
+-#include "soc15_ih_clientid.h"
+-
+ struct amdgpu_device;
+-
+-#define AMDGPU_IH_CLIENTID_LEGACY 0
+-#define AMDGPU_IH_CLIENTID_MAX SOC15_IH_CLIENTID_MAX
++struct amdgpu_iv_entry;
+
+ /*
+ * R6xx+ IH ring
+@@ -51,22 +47,6 @@ struct amdgpu_ih_ring {
+ dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */
+ };
+
+-#define AMDGPU_IH_SRC_DATA_MAX_SIZE_DW 4
+-
+-struct amdgpu_iv_entry {
+- unsigned client_id;
+- unsigned src_id;
+- unsigned ring_id;
+- unsigned vmid;
+- unsigned vmid_src;
+- uint64_t timestamp;
+- unsigned timestamp_src;
+- unsigned pasid;
+- unsigned pasid_src;
+- unsigned src_data[AMDGPU_IH_SRC_DATA_MAX_SIZE_DW];
+- const uint32_t *iv_entry;
+-};
+-
+ /* provided by the ih block */
+ struct amdgpu_ih_funcs {
+ /* ring read/write ptr handling, called from interrupt context */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+index d15410b..725b156 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+@@ -124,7 +124,7 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
+ int r;
+
+ spin_lock_irqsave(&adev->irq.lock, irqflags);
+- for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
++ for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
+ if (!adev->irq.client[i].sources)
+ continue;
+
+@@ -302,7 +302,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
+ cancel_work_sync(&adev->reset_work);
+ }
+
+- for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
++ for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
+ if (!adev->irq.client[i].sources)
+ continue;
+
+@@ -342,7 +342,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
+ unsigned client_id, unsigned src_id,
+ struct amdgpu_irq_src *source)
+ {
+- if (client_id >= AMDGPU_IH_CLIENTID_MAX)
++ if (client_id >= AMDGPU_IRQ_CLIENTID_MAX)
+ return -EINVAL;
+
+ if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
+@@ -396,7 +396,7 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
+
+ trace_amdgpu_iv(entry);
+
+- if (client_id >= AMDGPU_IH_CLIENTID_MAX) {
++ if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
+ DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
+ return;
+ }
+@@ -469,7 +469,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
+ {
+ int i, j, k;
+
+- for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
++ for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
+ if (!adev->irq.client[i].sources)
+ continue;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+index 3375ad7..de86be3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+@@ -25,19 +25,38 @@
+ #define __AMDGPU_IRQ_H__
+
+ #include <linux/irqdomain.h>
++#include "soc15_ih_clientid.h"
+ #include "amdgpu_ih.h"
+
+-#define AMDGPU_MAX_IRQ_SRC_ID 0x100
+-#define AMDGPU_MAX_IRQ_CLIENT_ID 0x100
++#define AMDGPU_MAX_IRQ_SRC_ID 0x100
++#define AMDGPU_MAX_IRQ_CLIENT_ID 0x100
++
++#define AMDGPU_IRQ_CLIENTID_LEGACY 0
++#define AMDGPU_IRQ_CLIENTID_MAX SOC15_IH_CLIENTID_MAX
++
++#define AMDGPU_IRQ_SRC_DATA_MAX_SIZE_DW 4
+
+ struct amdgpu_device;
+-struct amdgpu_iv_entry;
+
+ enum amdgpu_interrupt_state {
+ AMDGPU_IRQ_STATE_DISABLE,
+ AMDGPU_IRQ_STATE_ENABLE,
+ };
+
++struct amdgpu_iv_entry {
++ unsigned client_id;
++ unsigned src_id;
++ unsigned ring_id;
++ unsigned vmid;
++ unsigned vmid_src;
++ uint64_t timestamp;
++ unsigned timestamp_src;
++ unsigned pasid;
++ unsigned pasid_src;
++ unsigned src_data[AMDGPU_IRQ_SRC_DATA_MAX_SIZE_DW];
++ const uint32_t *iv_entry;
++};
++
+ struct amdgpu_irq_src {
+ unsigned num_types;
+ atomic_t *enabled_types;
+@@ -63,7 +82,7 @@ struct amdgpu_irq {
+ bool installed;
+ spinlock_t lock;
+ /* interrupt sources */
+- struct amdgpu_irq_client client[AMDGPU_IH_CLIENTID_MAX];
++ struct amdgpu_irq_client client[AMDGPU_IRQ_CLIENTID_MAX];
+
+ /* status, etc. */
+ bool msi_enabled; /* msi enabled */
+diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+index 9bf0b24..ecd69ab 100644
+--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+@@ -6274,12 +6274,12 @@ static int ci_dpm_sw_init(void *handle)
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230,
++ ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230,
+ &adev->pm.dpm.thermal.irq);
+ if (ret)
+ return ret;
+
+- ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231,
++ ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231,
+ &adev->pm.dpm.thermal.irq);
+ if (ret)
+ return ret;
+diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+index c37c4b7..b5775c6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+@@ -276,7 +276,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev,
+ dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
+ dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+
+- entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
++ entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+ entry->src_id = dw[0] & 0xff;
+ entry->src_data[0] = dw[1] & 0xfffffff;
+ entry->ring_id = dw[2] & 0xff;
+diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+index 2a0dbac..ef27a02 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
++++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+@@ -977,19 +977,19 @@ static int cik_sdma_sw_init(void *handle)
+ }
+
+ /* SDMA trap event */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224,
+ &adev->sdma.trap_irq);
+ if (r)
+ return r;
+
+ /* SDMA Privileged inst */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241,
+ &adev->sdma.illegal_inst_irq);
+ if (r)
+ return r;
+
+ /* SDMA Privileged inst */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 247,
+ &adev->sdma.illegal_inst_irq);
+ if (r)
+ return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+index 306e0bd..df5ac4d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+@@ -255,7 +255,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev,
+ dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
+ dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+
+- entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
++ entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+ entry->src_id = dw[0] & 0xff;
+ entry->src_data[0] = dw[1] & 0xfffffff;
+ entry->ring_id = dw[2] & 0xff;
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+index f4cbe2e..0d99bea 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+@@ -2753,19 +2753,19 @@ static int dce_v10_0_sw_init(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
+ if (r)
+ return r;
+ }
+
+ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
+ if (r)
+ return r;
+ }
+
+ /* HPD hotplug */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
+ if (r)
+ return r;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+index 0a5a3de..93a4449 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+@@ -2875,19 +2875,19 @@ static int dce_v11_0_sw_init(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
+ if (r)
+ return r;
+ }
+
+ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
+ if (r)
+ return r;
+ }
+
+ /* HPD hotplug */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
+ if (r)
+ return r;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+index 2204aad..2b9b81c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+@@ -2623,19 +2623,19 @@ static int dce_v6_0_sw_init(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
+ if (r)
+ return r;
+ }
+
+ for (i = 8; i < 20; i += 2) {
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
+ if (r)
+ return r;
+ }
+
+ /* HPD hotplug */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
+ if (r)
+ return r;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+index 0f8a372..7dc369d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+@@ -2650,19 +2650,19 @@ static int dce_v8_0_sw_init(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
+ if (r)
+ return r;
+ }
+
+ for (i = 8; i < 20; i += 2) {
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
+ if (r)
+ return r;
+ }
+
+ /* HPD hotplug */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
+ if (r)
+ return r;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+index d089b25..6a74baa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
++++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+@@ -382,7 +382,7 @@ static int dce_virtual_sw_init(void *handle)
+ int r, i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER, &adev->crtc_irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER, &adev->crtc_irq);
+ if (r)
+ return r;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+index b8176c2..4f8d6a2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+@@ -3123,15 +3123,15 @@ static int gfx_v6_0_sw_init(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, r;
+
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
+ if (r)
+ return r;
+
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184, &adev->gfx.priv_reg_irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 184, &adev->gfx.priv_reg_irq);
+ if (r)
+ return r;
+
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185, &adev->gfx.priv_inst_irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 185, &adev->gfx.priv_inst_irq);
+ if (r)
+ return r;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+index 169dbf9..05f7a29 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+@@ -4536,18 +4536,18 @@ static int gfx_v7_0_sw_init(void *handle)
+ adev->gfx.mec.num_queue_per_pipe = 8;
+
+ /* EOP Event */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
+ if (r)
+ return r;
+
+ /* Privileged reg */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184,
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 184,
+ &adev->gfx.priv_reg_irq);
+ if (r)
+ return r;
+
+ /* Privileged inst */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185,
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 185,
+ &adev->gfx.priv_inst_irq);
+ if (r)
+ return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 6c026d76..34aabe7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -2048,35 +2048,35 @@ static int gfx_v8_0_sw_init(void *handle)
+ adev->gfx.mec.num_queue_per_pipe = 8;
+
+ /* KIQ event */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_INT_IB2, &adev->gfx.kiq.irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_INT_IB2, &adev->gfx.kiq.irq);
+ if (r)
+ return r;
+
+ /* EOP Event */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq);
+ if (r)
+ return r;
+
+ /* Privileged reg */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT,
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT,
+ &adev->gfx.priv_reg_irq);
+ if (r)
+ return r;
+
+ /* Privileged inst */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT,
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT,
+ &adev->gfx.priv_inst_irq);
+ if (r)
+ return r;
+
+ /* Add CP EDC/ECC irq */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR,
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR,
+ &adev->gfx.cp_ecc_error_irq);
+ if (r)
+ return r;
+
+ /* SQ interrupts. */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG,
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG,
+ &adev->gfx.sq_irq);
+ if (r) {
+ DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+index 890308e..c0b0e86 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+@@ -858,11 +858,11 @@ static int gmc_v6_0_sw_init(void *handle)
+ adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp);
+ }
+
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
+ if (r)
+ return r;
+
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
+ if (r)
+ return r;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+index d7d5075..6eabb31 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+@@ -992,11 +992,11 @@ static int gmc_v7_0_sw_init(void *handle)
+ adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp);
+ }
+
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
+ if (r)
+ return r;
+
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
+ if (r)
+ return r;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index 1b3da69..98eead7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -1100,11 +1100,11 @@ static int gmc_v8_0_sw_init(void *handle)
+ adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
+ }
+
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
+ if (r)
+ return r;
+
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
+ if (r)
+ return r;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+index 9005dee..cf0fc61 100644
+--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+@@ -255,7 +255,7 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev,
+ dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
+ dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+
+- entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
++ entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+ entry->src_id = dw[0] & 0xff;
+ entry->src_data[0] = dw[1] & 0xfffffff;
+ entry->ring_id = dw[2] & 0xff;
+diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+index be880c3..faf06fd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+@@ -2994,12 +2994,12 @@ static int kv_dpm_sw_init(void *handle)
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230,
++ ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230,
+ &adev->pm.dpm.thermal.irq);
+ if (ret)
+ return ret;
+
+- ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231,
++ ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231,
+ &adev->pm.dpm.thermal.irq);
+ if (ret)
+ return ret;
+diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+index 842567b..64e875d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
++++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+@@ -580,11 +580,11 @@ int xgpu_vi_mailbox_add_irq_id(struct amdgpu_device *adev)
+ {
+ int r;
+
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
+ if (r)
+ return r;
+
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
+ if (r) {
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+ return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+index 6db0d7a..9232a8d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+@@ -905,19 +905,19 @@ static int sdma_v2_4_sw_init(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* SDMA trap event */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
+ &adev->sdma.trap_irq);
+ if (r)
+ return r;
+
+ /* SDMA Privileged inst */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241,
+ &adev->sdma.illegal_inst_irq);
+ if (r)
+ return r;
+
+ /* SDMA Privileged inst */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
+ &adev->sdma.illegal_inst_irq);
+ if (r)
+ return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+index deb16c5..a0eafe5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+@@ -1185,19 +1185,19 @@ static int sdma_v3_0_sw_init(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* SDMA trap event */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
+ &adev->sdma.trap_irq);
+ if (r)
+ return r;
+
+ /* SDMA Privileged inst */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241,
+ &adev->sdma.illegal_inst_irq);
+ if (r)
+ return r;
+
+ /* SDMA Privileged inst */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
+ &adev->sdma.illegal_inst_irq);
+ if (r)
+ return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
+index f3693dc..6cff583 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
++++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
+@@ -516,12 +516,12 @@ static int si_dma_sw_init(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* DMA0 trap event */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224, &adev->sdma.trap_irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224, &adev->sdma.trap_irq);
+ if (r)
+ return r;
+
+ /* DMA1 trap event */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 244, &adev->sdma.trap_irq_1);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 244, &adev->sdma.trap_irq_1);
+ if (r)
+ return r;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+index 9f7e63b..5654806 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+@@ -7684,11 +7684,11 @@ static int si_dpm_sw_init(void *handle)
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq);
++ ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq);
+ if (ret)
+ return ret;
+
+- ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231, &adev->pm.dpm.thermal.irq);
++ ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231, &adev->pm.dpm.thermal.irq);
+ if (ret)
+ return ret;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
+index acdf607..b3d7d9f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
+@@ -142,7 +142,7 @@ static void si_ih_decode_iv(struct amdgpu_device *adev,
+ dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
+ dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+
+- entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
++ entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+ entry->src_id = dw[0] & 0xff;
+ entry->src_data[0] = dw[1] & 0xfffffff;
+ entry->ring_id = dw[2] & 0xff;
+diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+index 83fdf81..3abffd0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+@@ -266,7 +266,7 @@ static void tonga_ih_decode_iv(struct amdgpu_device *adev,
+ dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
+ dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+
+- entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
++ entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+ entry->src_id = dw[0] & 0xff;
+ entry->src_data[0] = dw[1] & 0xfffffff;
+ entry->ring_id = dw[2] & 0xff;
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+index d2f6caa..af7cacd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+@@ -108,7 +108,7 @@ static int uvd_v4_2_sw_init(void *handle)
+ int r;
+
+ /* UVD TRAP */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
+ if (r)
+ return r;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+index ab3ad86..4349360 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+@@ -105,7 +105,7 @@ static int uvd_v5_0_sw_init(void *handle)
+ int r;
+
+ /* UVD TRAP */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
+ if (r)
+ return r;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+index f6522bc..0ef7ce4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+@@ -393,14 +393,14 @@ static int uvd_v6_0_sw_init(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* UVD TRAP */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
+ if (r)
+ return r;
+
+ /* UVD ENC TRAP */
+ if (uvd_v6_0_enc_support(adev)) {
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
+ if (r)
+ return r;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+index d48e877..d1c3212 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+@@ -417,7 +417,7 @@ static int vce_v2_0_sw_init(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* VCE */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 167, &adev->vce.irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 167, &adev->vce.irq);
+ if (r)
+ return r;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+index cc6ce6c..bd9a7ff 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+@@ -423,7 +423,7 @@ static int vce_v3_0_sw_init(void *handle)
+ int r, i;
+
+ /* VCE */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq);
+ if (r)
+ return r;
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 9fed53f..9363ca5 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1319,7 +1319,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
+ struct dc_interrupt_params int_params = {0};
+ int r;
+ int i;
+- unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
++ unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+
+ if (adev->asic_type == CHIP_VEGA10 ||
+ adev->asic_type == CHIP_VEGA12 ||
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index 8118d4e..ff84a03 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -4108,17 +4108,17 @@ static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr)
+ source->funcs = &smu7_irq_funcs;
+
+ amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
+- AMDGPU_IH_CLIENTID_LEGACY,
++ AMDGPU_IRQ_CLIENTID_LEGACY,
+ VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH,
+ source);
+ amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
+- AMDGPU_IH_CLIENTID_LEGACY,
++ AMDGPU_IRQ_CLIENTID_LEGACY,
+ VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW,
+ source);
+
+ /* Register CTF(GPIO_19) interrupt */
+ amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
+- AMDGPU_IH_CLIENTID_LEGACY,
++ AMDGPU_IRQ_CLIENTID_LEGACY,
+ VISLANDS30_IV_SRCID_GPIO_19,
+ source);
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+index 2aab1b4..8ad4e696 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+@@ -545,7 +545,7 @@ int phm_irq_process(struct amdgpu_device *adev,
+ uint32_t client_id = entry->client_id;
+ uint32_t src_id = entry->src_id;
+
+- if (client_id == AMDGPU_IH_CLIENTID_LEGACY) {
++ if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) {
+ if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH)
+ pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
+ PCI_BUS_NUM(adev->pdev->devfn),
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5459-drm-amdgpu-Use-register-UVD_SCRATCH9-for-VCN-ring-ib.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5459-drm-amdgpu-Use-register-UVD_SCRATCH9-for-VCN-ring-ib.patch
new file mode 100644
index 00000000..2acab2af
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5459-drm-amdgpu-Use-register-UVD_SCRATCH9-for-VCN-ring-ib.patch
@@ -0,0 +1,92 @@
+From 0d172de4060e4fa7de0ce4794d893a9f13ae37a6 Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Mon, 10 Sep 2018 14:06:08 -0400
+Subject: [PATCH 5459/5725] drm/amdgpu:Use register UVD_SCRATCH9 for VCN
+ ring/ib test
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Use register UVD_SCRATCH9 for VCN ring/ib test. Since those registers
+can't be directly accessed under DPG(Dynamic Power Gate) mode.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 86b1627..064475d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -264,7 +264,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
+ unsigned i;
+ int r;
+
+- WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
++ WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
+ r = amdgpu_ring_alloc(ring, 3);
+ if (r) {
+ DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
+@@ -272,11 +272,11 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
+ return r;
+ }
+ amdgpu_ring_write(ring,
+- PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
++ PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0));
+ amdgpu_ring_write(ring, 0xDEADBEEF);
+ amdgpu_ring_commit(ring);
+ for (i = 0; i < adev->usec_timeout; i++) {
+- tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
++ tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+@@ -618,7 +618,7 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
+ unsigned i;
+ int r;
+
+- WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
++ WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
+ r = amdgpu_ring_alloc(ring, 3);
+
+ if (r) {
+@@ -628,12 +628,12 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
+ }
+
+ amdgpu_ring_write(ring,
+- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0, 0, 0));
++ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, 0));
+ amdgpu_ring_write(ring, 0xDEADBEEF);
+ amdgpu_ring_commit(ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+- tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
++ tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+@@ -667,7 +667,7 @@ static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
+
+ ib = &job->ibs[0];
+
+- ib->ptr[0] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH), 0, 0, PACKETJ_TYPE0);
++ ib->ptr[0] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, PACKETJ_TYPE0);
+ ib->ptr[1] = 0xDEADBEEF;
+ for (i = 2; i < 16; i += 2) {
+ ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
+@@ -716,7 +716,7 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ r = 0;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+- tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH));
++ tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5460-drm-amdgpu-Add-new-register-offset-mask-to-support-V.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5460-drm-amdgpu-Add-new-register-offset-mask-to-support-V.patch
new file mode 100644
index 00000000..3216efc7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5460-drm-amdgpu-Add-new-register-offset-mask-to-support-V.patch
@@ -0,0 +1,93 @@
+From b0c3b7d4ab02b982db9a78252f41778e58f58e1d Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Mon, 10 Sep 2018 14:58:16 -0400
+Subject: [PATCH 5460/5725] drm/amdgpu:Add new register offset/mask to support
+ VCN DPG mode
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+New register offset/mask need to be added to support VCN DPG mode.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+---
+ .../drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h | 8 +++++++
+ .../drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h | 25 ++++++++++++++++++++++
+ 2 files changed, 33 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
+index 216a401..4b7da58 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
+@@ -33,6 +33,14 @@
+ #define mmUVD_POWER_STATUS_BASE_IDX 1
+ #define mmCC_UVD_HARVESTING 0x00c7
+ #define mmCC_UVD_HARVESTING_BASE_IDX 1
++#define mmUVD_DPG_LMA_CTL 0x00d1
++#define mmUVD_DPG_LMA_CTL_BASE_IDX 1
++#define mmUVD_DPG_LMA_DATA 0x00d2
++#define mmUVD_DPG_LMA_DATA_BASE_IDX 1
++#define mmUVD_DPG_LMA_MASK 0x00d3
++#define mmUVD_DPG_LMA_MASK_BASE_IDX 1
++#define mmUVD_DPG_PAUSE 0x00d4
++#define mmUVD_DPG_PAUSE_BASE_IDX 1
+ #define mmUVD_SCRATCH1 0x00d5
+ #define mmUVD_SCRATCH1_BASE_IDX 1
+ #define mmUVD_SCRATCH2 0x00d6
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h
+index 124383d..26382f5 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h
+@@ -87,6 +87,26 @@
+ //CC_UVD_HARVESTING
+ #define CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1
+ #define CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L
++//UVD_DPG_LMA_CTL
++#define UVD_DPG_LMA_CTL__READ_WRITE__SHIFT 0x0
++#define UVD_DPG_LMA_CTL__MASK_EN__SHIFT 0x1
++#define UVD_DPG_LMA_CTL__ADDR_AUTO_INCREMENT__SHIFT 0x2
++#define UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT 0x4
++#define UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT 0x10
++#define UVD_DPG_LMA_CTL__READ_WRITE_MASK 0x00000001L
++#define UVD_DPG_LMA_CTL__MASK_EN_MASK 0x00000002L
++#define UVD_DPG_LMA_CTL__ADDR_AUTO_INCREMENT_MASK 0x00000004L
++#define UVD_DPG_LMA_CTL__SRAM_SEL_MASK 0x00000010L
++#define UVD_DPG_LMA_CTL__READ_WRITE_ADDR_MASK 0xFFFF0000L
++//UVD_DPG_PAUSE
++#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ__SHIFT 0x0
++#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK__SHIFT 0x1
++#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ__SHIFT 0x2
++#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK__SHIFT 0x3
++#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK 0x00000001L
++#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK 0x00000002L
++#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK 0x00000004L
++#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK 0x00000008L
+ //UVD_SCRATCH1
+ #define UVD_SCRATCH1__SCRATCH1_DATA__SHIFT 0x0
+ #define UVD_SCRATCH1__SCRATCH1_DATA_MASK 0xFFFFFFFFL
+@@ -983,6 +1003,7 @@
+ #define UVD_MASTINT_EN__SYS_EN_MASK 0x00000004L
+ #define UVD_MASTINT_EN__INT_OVERRUN_MASK 0x007FFFF0L
+ //UVD_SYS_INT_EN
++#define UVD_SYS_INT_EN__UVD_JRBC_EN__SHIFT 0x4
+ #define UVD_SYS_INT_EN__UVD_JRBC_EN_MASK 0x00000010L
+ //JPEG_CGC_CTRL
+ #define JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT 0x0
+@@ -1138,7 +1159,11 @@
+ #define UVD_VCPU_CACHE_SIZE2__CACHE_SIZE2_MASK 0x001FFFFFL
+ //UVD_VCPU_CNTL
+ #define UVD_VCPU_CNTL__CLK_EN__SHIFT 0x9
++#define UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP__SHIFT 0x11
++#define UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT 0x14
+ #define UVD_VCPU_CNTL__CLK_EN_MASK 0x00000200L
++#define UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK 0x00020000L
++#define UVD_VCPU_CNTL__PRB_TIMEOUT_VAL_MASK 0x0FF00000L
+ //UVD_SOFT_RESET
+ #define UVD_SOFT_RESET__RBC_SOFT_RESET__SHIFT 0x0
+ #define UVD_SOFT_RESET__LBSI_SOFT_RESET__SHIFT 0x1
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5461-drm-amdgpu-Add-DPG-support-flag.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5461-drm-amdgpu-Add-DPG-support-flag.patch
new file mode 100644
index 00000000..ab2adfb0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5461-drm-amdgpu-Add-DPG-support-flag.patch
@@ -0,0 +1,33 @@
+From 209ba2ae43f262bb7cf17bc97bbf4a3e55cd3521 Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Mon, 10 Sep 2018 15:23:40 -0400
+Subject: [PATCH 5461/5725] drm/amdgpu:Add DPG support flag
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add DPG support flag for VCN DPG mode.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/include/amd_shared.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
+index f7b9453..76a7c1f 100644
+--- a/drivers/gpu/drm/amd/include/amd_shared.h
++++ b/drivers/gpu/drm/amd/include/amd_shared.h
+@@ -109,6 +109,7 @@ enum amd_powergating_state {
+ #define AMD_PG_SUPPORT_GFX_PIPELINE (1 << 12)
+ #define AMD_PG_SUPPORT_MMHUB (1 << 13)
+ #define AMD_PG_SUPPORT_VCN (1 << 14)
++#define AMD_PG_SUPPORT_VCN_DPG (1 << 15)
+
+ enum PP_FEATURE_MASK {
+ PP_SCLK_DPM_MASK = 0x1,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5462-drm-amdgpu-Add-DPG-mode-read-write-macro.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5462-drm-amdgpu-Add-DPG-mode-read-write-macro.patch
new file mode 100644
index 00000000..2fa161a3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5462-drm-amdgpu-Add-DPG-mode-read-write-macro.patch
@@ -0,0 +1,53 @@
+From f00b3dab37b6f456b7239255765ea7a71ba34d5b Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Mon, 10 Sep 2018 16:00:36 -0400
+Subject: [PATCH 5462/5725] drm/amdgpu:Add DPG mode read/write macro
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Some registers read/write needs program through SDRAM pool under
+DPG mode.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15_common.h | 20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+index f5d6025..d35fac5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
++++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+@@ -64,6 +64,26 @@
+ } \
+ } while (0)
+
++#define RREG32_SOC15_DPG_MODE(ip, inst, reg, mask, sram_sel) \
++ ({ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \
++ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \
++ UVD_DPG_LMA_CTL__MASK_EN_MASK | \
++ ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \
++ << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \
++ (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
++ RREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA); })
++
++#define WREG32_SOC15_DPG_MODE(ip, inst, reg, value, mask, sram_sel) \
++ do { \
++ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA, value); \
++ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \
++ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \
++ UVD_DPG_LMA_CTL__READ_WRITE_MASK | \
++ ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \
++ << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \
++ (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
++ } while (0)
++
+ #endif
+
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5463-drm-amdgpu-Add-DPG-mode-support-for-vcn-1.0.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5463-drm-amdgpu-Add-DPG-mode-support-for-vcn-1.0.patch
new file mode 100644
index 00000000..8e97b6e1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5463-drm-amdgpu-Add-DPG-mode-support-for-vcn-1.0.patch
@@ -0,0 +1,413 @@
+From f4de45359735eb37d77344c9084041f2475baead Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Fri, 21 Sep 2018 14:35:32 -0400
+Subject: [PATCH 5463/5725] drm/amdgpu:Add DPG mode support for vcn 1.0
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add DPG mode start/stop/mc_resume/clock_gating to
+support vcn 1.0 DPG mode.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 319 +++++++++++++++++++++++++++++++++-
+ 1 file changed, 313 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 9108230..2eec119 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -198,7 +198,8 @@ static int vcn_v1_0_hw_init(void *handle)
+
+ done:
+ if (!r)
+- DRM_INFO("VCN decode and encode initialized successfully.\n");
++ DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
++ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
+
+ return r;
+ }
+@@ -266,13 +267,13 @@ static int vcn_v1_0_resume(void *handle)
+ }
+
+ /**
+- * vcn_v1_0_mc_resume - memory controller programming
++ * vcn_v1_0_mc_resume_spg_mode - memory controller programming
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Let the VCN memory controller know it's offsets
+ */
+-static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
++static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
+ {
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t offset;
+@@ -319,6 +320,65 @@ static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
+ adev->gfx.config.gb_addr_config);
+ }
+
++static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
++{
++ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
++ uint32_t offset;
++
++ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
++ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo),
++ 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
++ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi),
++ 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0,
++ 0xFFFFFFFF, 0);
++ offset = 0;
++ } else {
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
++ lower_32_bits(adev->vcn.gpu_addr), 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
++ upper_32_bits(adev->vcn.gpu_addr), 0xFFFFFFFF, 0);
++ offset = size;
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
++ AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0);
++ }
++
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size, 0xFFFFFFFF, 0);
++
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
++ lower_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
++ upper_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0,
++ 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE,
++ 0xFFFFFFFF, 0);
++
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
++ lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE),
++ 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
++ upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE),
++ 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
++ AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40),
++ 0xFFFFFFFF, 0);
++
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
++ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
++ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
++ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_ADDR_CONFIG,
++ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG,
++ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
++}
++
+ /**
+ * vcn_v1_0_disable_clock_gating - disable VCN clock gating
+ *
+@@ -519,6 +579,62 @@ static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
+ WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
+ }
+
++static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel)
++{
++ uint32_t reg_data = 0;
++
++ /* disable JPEG CGC */
++ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
++ reg_data = 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
++ else
++ reg_data = 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
++ reg_data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
++ reg_data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmJPEG_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
++
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmJPEG_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
++
++ /* enable sw clock gating control */
++ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
++ reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
++ else
++ reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
++ reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
++ reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
++
++ reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
++ UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
++ UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
++ UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
++ UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
++ UVD_CGC_CTRL__SYS_MODE_MASK |
++ UVD_CGC_CTRL__UDEC_MODE_MASK |
++ UVD_CGC_CTRL__MPEG2_MODE_MASK |
++ UVD_CGC_CTRL__REGS_MODE_MASK |
++ UVD_CGC_CTRL__RBC_MODE_MASK |
++ UVD_CGC_CTRL__LMI_MC_MODE_MASK |
++ UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
++ UVD_CGC_CTRL__IDCT_MODE_MASK |
++ UVD_CGC_CTRL__MPRD_MODE_MASK |
++ UVD_CGC_CTRL__MPC_MODE_MASK |
++ UVD_CGC_CTRL__LBSI_MODE_MASK |
++ UVD_CGC_CTRL__LRBBM_MODE_MASK |
++ UVD_CGC_CTRL__WCB_MODE_MASK |
++ UVD_CGC_CTRL__VCPU_MODE_MASK |
++ UVD_CGC_CTRL__SCPU_MODE_MASK);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
++
++ /* turn off clock gating */
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
++
++ /* turn on SUVD clock gating */
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SUVD_CGC_GATE, 1, 0xFFFFFFFF, sram_sel);
++
++ /* turn on sw mode in UVD_SUVD_CGC_CTRL */
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SUVD_CGC_CTRL, 0, 0xFFFFFFFF, sram_sel);
++}
++
+ static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
+ {
+ uint32_t data = 0;
+@@ -614,7 +730,7 @@ static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
+ *
+ * Setup and start the VCN block
+ */
+-static int vcn_v1_0_start(struct amdgpu_device *adev)
++static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
+ {
+ struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+ uint32_t rb_bufsz, tmp;
+@@ -628,7 +744,7 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
+ /* disable clock gating */
+ vcn_v1_0_disable_clock_gating(adev);
+
+- vcn_v1_0_mc_resume(adev);
++ vcn_v1_0_mc_resume_spg_mode(adev);
+
+ /* disable interupt */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
+@@ -799,6 +915,170 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
+ return 0;
+ }
+
++static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
++{
++ struct amdgpu_ring *ring = &adev->vcn.ring_dec;
++ uint32_t rb_bufsz, tmp, reg_data;
++ uint32_t lmi_swap_cntl;
++
++ /* disable byte swapping */
++ lmi_swap_cntl = 0;
++
++ vcn_1_0_enable_static_power_gating(adev);
++
++ /* enable dynamic power gating mode */
++ reg_data = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
++ reg_data |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
++ reg_data |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
++ WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, reg_data);
++
++ /* enable clock gating */
++ vcn_v1_0_clock_gating_dpg_mode(adev, 0);
++
++ /* enable VCPU clock */
++ reg_data = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
++ reg_data |= UVD_VCPU_CNTL__CLK_EN_MASK;
++ reg_data |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CNTL, reg_data, 0xFFFFFFFF, 0);
++
++ /* disable interupt */
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN,
++ 0, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
++
++ /* stall UMC and register bus before resetting VCPU */
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL2,
++ UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
++
++ /* put LMI, VCPU, RBC etc... into reset */
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SOFT_RESET,
++ UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
++ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
++ UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
++ UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
++ UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
++ UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
++ UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
++ UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
++ 0xFFFFFFFF, 0);
++
++ /* initialize VCN memory controller */
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL,
++ (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
++ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
++ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
++ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
++ UVD_LMI_CTRL__REQ_MODE_MASK |
++ 0x00100000L, 0xFFFFFFFF, 0);
++
++#ifdef __BIG_ENDIAN
++ /* swap (8 in 32) RB and IB */
++ lmi_swap_cntl = 0xa;
++#endif
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl, 0xFFFFFFFF, 0);
++
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXA0, 0x40c2040, 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXA1, 0x0, 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXB0, 0x40c2040, 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXB1, 0x0, 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_ALU, 0, 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUX, 0x88, 0xFFFFFFFF, 0);
++
++ vcn_v1_0_mc_resume_dpg_mode(adev);
++
++ /* take all subblocks out of reset, except VCPU */
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SOFT_RESET,
++ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, 0xFFFFFFFF, 0);
++
++ /* enable VCPU clock */
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CNTL,
++ UVD_VCPU_CNTL__CLK_EN_MASK, 0xFFFFFFFF, 0);
++
++ /* enable UMC */
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL2,
++ 0, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
++
++ /* boot up the VCPU */
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SOFT_RESET, 0, 0xFFFFFFFF, 0);
++
++ /* enable master interrupt */
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN,
++ (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
++ (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), 0);
++
++ vcn_v1_0_clock_gating_dpg_mode(adev, 1);
++ /* setup mmUVD_LMI_CTRL */
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL,
++ (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
++ UVD_LMI_CTRL__CRC_RESET_MASK |
++ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
++ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
++ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
++ (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
++ 0x00100000L), 0xFFFFFFFF, 1);
++
++ tmp = adev->gfx.config.gb_addr_config;
++ /* setup VCN global tiling registers */
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
++
++ /* enable System Interrupt for JRBC */
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SYS_INT_EN,
++ UVD_SYS_INT_EN__UVD_JRBC_EN_MASK, 0xFFFFFFFF, 1);
++
++ /* force RBC into idle state */
++ rb_bufsz = order_base_2(ring->ring_size);
++ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
++ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
++ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
++ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
++ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
++ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
++ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
++
++ /* set the write pointer delay */
++ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
++
++ /* set the wb address */
++ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
++ (upper_32_bits(ring->gpu_addr) >> 2));
++
++ /* programm the RB_BASE for ring buffer */
++ WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
++ lower_32_bits(ring->gpu_addr));
++ WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
++ upper_32_bits(ring->gpu_addr));
++
++ /* Initialize the ring buffer's read and write pointers */
++ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
++
++ ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
++ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
++ lower_32_bits(ring->wptr));
++
++ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
++ ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
++
++ /* initialize wptr */
++ ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
++
++ /* copy patch commands to the jpeg ring */
++ vcn_v1_0_jpeg_ring_set_patch_ring(ring,
++ (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
++
++ return 0;
++}
++
++static int vcn_v1_0_start(struct amdgpu_device *adev)
++{
++ int r;
++
++ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
++ r = vcn_v1_0_start_dpg_mode(adev);
++ else
++ r = vcn_v1_0_start_spg_mode(adev);
++ return r;
++}
++
+ /**
+ * vcn_v1_0_stop - stop VCN block
+ *
+@@ -806,7 +1086,7 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
+ *
+ * stop the VCN block
+ */
+-static int vcn_v1_0_stop(struct amdgpu_device *adev)
++static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
+ {
+ /* force RBC into idle state */
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, 0x11010101);
+@@ -836,6 +1116,33 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev)
+ return 0;
+ }
+
++static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
++{
++ int ret_code;
++
++ /* Wait for power status to be 1 */
++ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 0x1,
++ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
++
++ /* disable dynamic power gating mode */
++ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
++ ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
++
++ return 0;
++}
++
++static int vcn_v1_0_stop(struct amdgpu_device *adev)
++{
++ int r;
++
++ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
++ r = vcn_v1_0_stop_dpg_mode(adev);
++ else
++ r = vcn_v1_0_stop_spg_mode(adev);
++
++ return r;
++}
++
+ static bool vcn_v1_0_is_idle(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5464-drm-amdgpu-Add-DPG-pause-state.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5464-drm-amdgpu-Add-DPG-pause-state.patch
new file mode 100644
index 00000000..de25d933
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5464-drm-amdgpu-Add-DPG-pause-state.patch
@@ -0,0 +1,50 @@
+From a1bb375aa9e523573a84168c24a3afaa7afc06a0 Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Mon, 10 Sep 2018 18:15:11 -0400
+Subject: [PATCH 5464/5725] drm/amdgpu:Add DPG pause state
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add DPG pause state to support VCN DPG mode.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+index d2219ab..0b88a46 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+@@ -56,6 +56,16 @@ enum engine_status_constants {
+ UVD_STATUS__RBC_BUSY = 0x1,
+ };
+
++enum internal_dpg_state {
++ VCN_DPG_STATE__UNPAUSE = 0,
++ VCN_DPG_STATE__PAUSE,
++};
++
++struct dpg_pause_state {
++ enum internal_dpg_state fw_based;
++ enum internal_dpg_state jpeg;
++};
++
+ struct amdgpu_vcn {
+ struct amdgpu_bo *vcpu_bo;
+ void *cpu_addr;
+@@ -70,6 +80,7 @@ struct amdgpu_vcn {
+ struct amdgpu_irq_src irq;
+ unsigned num_enc_rings;
+ enum amd_powergating_state cur_state;
++ struct dpg_pause_state pause_state;
+ };
+
+ int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5465-drm-amdgpu-Add-DPG-pause-mode-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5465-drm-amdgpu-Add-DPG-pause-mode-support.patch
new file mode 100644
index 00000000..c8ccc360
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5465-drm-amdgpu-Add-DPG-pause-mode-support.patch
@@ -0,0 +1,217 @@
+From f585d8d40fa3f664f7611d3ba37581d56c0e9f68 Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Fri, 21 Sep 2018 14:43:18 -0400
+Subject: [PATCH 5465/5725] drm/amdgpu:Add DPG pause mode support
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add functions to support VCN DPG pause mode.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 161 +++++++++++++++++++++++++++++++-
+ 1 file changed, 159 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 064475d..028839e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -36,6 +36,7 @@
+ #include "soc15_common.h"
+
+ #include "vcn/vcn_1_0_offset.h"
++#include "vcn/vcn_1_0_sh_mask.h"
+
+ /* 1 second timeout */
+ #define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
+@@ -212,18 +213,158 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
+ return 0;
+ }
+
++static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
++ struct dpg_pause_state *new_state)
++{
++ int ret_code;
++ uint32_t reg_data = 0;
++ uint32_t reg_data2 = 0;
++ struct amdgpu_ring *ring;
++
++ /* pause/unpause if state is changed */
++ if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
++ DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
++ adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
++ new_state->fw_based, new_state->jpeg);
++
++ reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
++ (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
++
++ if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
++ ret_code = 0;
++
++ if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK))
++ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
++ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
++ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
++
++ if (!ret_code) {
++ /* pause DPG non-jpeg */
++ reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
++ WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
++ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
++ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
++ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
++
++ /* Restore */
++ ring = &adev->vcn.ring_enc[0];
++ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
++ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
++ WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
++ WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
++ WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
++
++ ring = &adev->vcn.ring_enc[1];
++ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
++ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
++ WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
++ WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
++ WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
++
++ ring = &adev->vcn.ring_dec;
++ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
++ lower_32_bits(ring->wptr) | 0x80000000);
++ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
++ UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
++ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
++ }
++ } else {
++ /* unpause dpg non-jpeg, no need to wait */
++ reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
++ WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
++ }
++ adev->vcn.pause_state.fw_based = new_state->fw_based;
++ }
++
++ /* pause/unpause if state is changed */
++ if (adev->vcn.pause_state.jpeg != new_state->jpeg) {
++ DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
++ adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
++ new_state->fw_based, new_state->jpeg);
++
++ reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
++ (~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
++
++ if (new_state->jpeg == VCN_DPG_STATE__PAUSE) {
++ ret_code = 0;
++
++ if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK))
++ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
++ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
++ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
++
++ if (!ret_code) {
++ /* Make sure JPRG Snoop is disabled before sending the pause */
++ reg_data2 = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
++ reg_data2 |= UVD_POWER_STATUS__JRBC_SNOOP_DIS_MASK;
++ WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, reg_data2);
++
++ /* pause DPG jpeg */
++ reg_data |= UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
++ WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
++ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
++ UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK,
++ UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, ret_code);
++
++ /* Restore */
++ ring = &adev->vcn.ring_jpeg;
++ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
++ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000001L | 0x00000002L);
++ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
++ lower_32_bits(ring->gpu_addr));
++ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
++ upper_32_bits(ring->gpu_addr));
++ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, ring->wptr);
++ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, ring->wptr);
++ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
++
++ ring = &adev->vcn.ring_dec;
++ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
++ lower_32_bits(ring->wptr) | 0x80000000);
++ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
++ UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
++ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
++ }
++ } else {
++ /* unpause dpg jpeg, no need to wait */
++ reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
++ WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
++ }
++ adev->vcn.pause_state.jpeg = new_state->jpeg;
++ }
++
++ return 0;
++}
++
+ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
+ {
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device, vcn.idle_work.work);
+- unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
+- unsigned i;
++ unsigned int fences = 0;
++ unsigned int i;
+
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
+ }
+
++ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
++ struct dpg_pause_state new_state;
++
++ if (fences)
++ new_state.fw_based = VCN_DPG_STATE__PAUSE;
++ else
++ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
++
++ if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
++ new_state.jpeg = VCN_DPG_STATE__PAUSE;
++ else
++ new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
++
++ amdgpu_vcn_pause_dpg_mode(adev, &new_state);
++ }
++
+ fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
++ fences += amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
+
+ if (fences == 0) {
+ amdgpu_gfx_off_ctrl(adev, true);
+@@ -250,6 +391,22 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+ AMD_PG_STATE_UNGATE);
+ }
++
++ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
++ struct dpg_pause_state new_state;
++
++ if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
++ new_state.fw_based = VCN_DPG_STATE__PAUSE;
++ else
++ new_state.fw_based = adev->vcn.pause_state.fw_based;
++
++ if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
++ new_state.jpeg = VCN_DPG_STATE__PAUSE;
++ else
++ new_state.jpeg = adev->vcn.pause_state.jpeg;
++
++ amdgpu_vcn_pause_dpg_mode(adev, &new_state);
++ }
+ }
+
+ void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5466-drm-amdgpu-soc15-fix-warnings-in-register-macro.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5466-drm-amdgpu-soc15-fix-warnings-in-register-macro.patch
new file mode 100644
index 00000000..981b5fca
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5466-drm-amdgpu-soc15-fix-warnings-in-register-macro.patch
@@ -0,0 +1,34 @@
+From f50e82bb267b6472f56cb16af71fc46fd7d24edf Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed, 26 Sep 2018 11:18:47 -0500
+Subject: [PATCH 5466/5725] drm/amdgpu/soc15: fix warnings in register macro
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+expects argument of type ‘unsigned int’ has type ‘long int’
+
+Fixes: 52e211c1f04 ("drm/amdgpu:Add error message when register failed to reach expected value")
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: James Zhu <James.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15_common.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+index d35fac5..958b10a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
++++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+@@ -57,7 +57,7 @@
+ loop--; \
+ if (!loop) { \
+ DRM_ERROR("Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n", \
+- inst, #reg, expected_value, (tmp_ & (mask))); \
++ inst, #reg, (unsigned)expected_value, (unsigned)(tmp_ & (mask))); \
+ ret = -ETIMEDOUT; \
+ break; \
+ } \
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5467-drm-amdgpu-vcn-whitespace-cleanup.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5467-drm-amdgpu-vcn-whitespace-cleanup.patch
new file mode 100644
index 00000000..56e1c481
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5467-drm-amdgpu-vcn-whitespace-cleanup.patch
@@ -0,0 +1,129 @@
+From 7de63b3b0f1a5286b677242c313938439b901b90 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed, 26 Sep 2018 11:24:25 -0500
+Subject: [PATCH 5467/5725] drm/amdgpu/vcn: whitespace cleanup
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Fix some indentation issues.
+
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: James Zhu <James.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 36 ++++++++++++++++-----------------
+ 1 file changed, 18 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 028839e..e1a85f2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -214,7 +214,7 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
+ }
+
+ static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
+- struct dpg_pause_state *new_state)
++ struct dpg_pause_state *new_state)
+ {
+ int ret_code;
+ uint32_t reg_data = 0;
+@@ -228,23 +228,23 @@ static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
+ new_state->fw_based, new_state->jpeg);
+
+ reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
+- (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
++ (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
+
+ if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
+ ret_code = 0;
+
+ if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK))
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
+- UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
+- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
++ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
++ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+
+ if (!ret_code) {
+ /* pause DPG non-jpeg */
+ reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
+- UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
+- UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
++ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
++ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
+
+ /* Restore */
+ ring = &adev->vcn.ring_enc[0];
+@@ -252,7 +252,7 @@ static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
+ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
+ WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+- WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
++ WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+
+ ring = &adev->vcn.ring_enc[1];
+ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+@@ -263,10 +263,10 @@ static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
+
+ ring = &adev->vcn.ring_dec;
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
+- lower_32_bits(ring->wptr) | 0x80000000);
++ lower_32_bits(ring->wptr) | 0x80000000);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
+- UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
+- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
++ UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
++ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+ }
+ } else {
+ /* unpause dpg non-jpeg, no need to wait */
+@@ -283,15 +283,15 @@ static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
+ new_state->fw_based, new_state->jpeg);
+
+ reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
+- (~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
++ (~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
+
+ if (new_state->jpeg == VCN_DPG_STATE__PAUSE) {
+ ret_code = 0;
+
+ if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK))
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
+- UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
+- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
++ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
++ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+
+ if (!ret_code) {
+ /* Make sure JPRG Snoop is disabled before sending the pause */
+@@ -311,19 +311,19 @@ static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000001L | 0x00000002L);
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+- lower_32_bits(ring->gpu_addr));
++ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+- upper_32_bits(ring->gpu_addr));
++ upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, ring->wptr);
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, ring->wptr);
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
+
+ ring = &adev->vcn.ring_dec;
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
+- lower_32_bits(ring->wptr) | 0x80000000);
++ lower_32_bits(ring->wptr) | 0x80000000);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
+- UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
+- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
++ UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
++ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+ }
+ } else {
+ /* unpause dpg jpeg, no need to wait */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5468-drm-amd-powerplay-correct-the-hwmon-interface-ppt-li.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5468-drm-amd-powerplay-correct-the-hwmon-interface-ppt-li.patch
new file mode 100644
index 00000000..37df12c9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5468-drm-amd-powerplay-correct-the-hwmon-interface-ppt-li.patch
@@ -0,0 +1,57 @@
+From eb01badc256c148523f81097a6c4f53d75592107 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 17 Sep 2018 15:05:54 +0800
+Subject: [PATCH 5468/5725] drm/amd/powerplay: correct the hwmon interface ppt
+ limit output
+
+The ppt limit read out by hwmon interface is always 0.
+Correct this hwmon interface output.
+
+Change-Id: I2c537cacda04034201120b9c1ea82d113d99ced8
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 8 ++++++++
+ drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h | 6 ++++++
+ 2 files changed, 14 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index 2a554f9..2926313 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -1544,6 +1544,14 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+ "[EnableDPMTasks] Failed to populate umdpstate clocks!",
+ return result);
+
++ result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit,
++ POWER_SOURCE_AC << 16);
++ PP_ASSERT_WITH_CODE(!result,
++ "[GetPptLimit] get default PPT limit failed!",
++ return result);
++ hwmgr->power_limit =
++ hwmgr->default_power_limit = smum_get_argument(hwmgr);
++
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+index 71191de..a002021 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+@@ -269,6 +269,12 @@ typedef enum {
+ } PPCLK_e;
+
+ typedef enum {
++ POWER_SOURCE_AC,
++ POWER_SOURCE_DC,
++ POWER_SOURCE_COUNT,
++} POWER_SOURCE_e;
++
++typedef enum {
+ VOLTAGE_MODE_AVFS = 0,
+ VOLTAGE_MODE_AVFS_SS,
+ VOLTAGE_MODE_SS,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5469-drm-amd-powerplay-tell-the-correct-gfx-voltage-V2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5469-drm-amd-powerplay-tell-the-correct-gfx-voltage-V2.patch
new file mode 100644
index 00000000..d27af990
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5469-drm-amd-powerplay-tell-the-correct-gfx-voltage-V2.patch
@@ -0,0 +1,82 @@
+From 2b16be0adfa0bb255ae7fabce50500f57e80cc66 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 17 Sep 2018 18:41:28 +0800
+Subject: [PATCH 5469/5725] drm/amd/powerplay: tell the correct gfx voltage V2
+
+Export the correct gfx voltage by hwmon interface.
+
+V2: update the register naming for consistency
+
+Change-Id: I0390cadb2dc3423ffa4f09625e8abba8c4b5e230
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ .../gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_offset.h | 3 +++
+ .../gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_sh_mask.h | 3 +++
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 12 ++++++++++++
+ 3 files changed, 18 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_offset.h
+index efd2704..0d68910 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_offset.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_offset.h
+@@ -175,4 +175,7 @@
+ #define mmSMUSVI0_PLANE0_CURRENTVID_BASE_IDX 0
+ #define mmSMUSVI0_PLANE0_CURRENTVID 0x0013
+
++#define mmSMUSVI0_TEL_PLANE0_BASE_IDX 0
++#define mmSMUSVI0_TEL_PLANE0 0x0004
++
+ #endif
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_sh_mask.h
+index 2487ab9..b1d9d8b 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_sh_mask.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_sh_mask.h
+@@ -258,4 +258,7 @@
+ #define SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT 0x18
+ #define SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK 0xFF000000L
+
++#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT 0x10
++#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK 0x01FF0000L
++
+ #endif
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index 2926313..6ece7d7 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -46,6 +46,9 @@
+ #include "ppinterrupt.h"
+ #include "pp_overdriver.h"
+ #include "pp_thermal.h"
++#include "soc15_common.h"
++#include "smuio/smuio_9_0_offset.h"
++#include "smuio/smuio_9_0_sh_mask.h"
+
+ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
+ {
+@@ -1915,6 +1918,8 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
+ void *value, int *size)
+ {
+ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++ struct amdgpu_device *adev = hwmgr->adev;
++ uint32_t val_vid;
+ int ret = 0;
+
+ switch (idx) {
+@@ -1949,6 +1954,13 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
+ *size = 16;
+ ret = vega20_get_gpu_power(hwmgr, (uint32_t *)value);
+ break;
++ case AMDGPU_PP_SENSOR_VDDGFX:
++ val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
++ SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
++ SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
++ *((uint32_t *)value) =
++ (uint32_t)convert_to_vddc((uint8_t)val_vid);
++ break;
+ case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
+ ret = vega20_get_enabled_smc_features(hwmgr, (uint64_t *)value);
+ if (!ret)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5470-drm-amd-powerplay-enable-fan-RPM-and-pwm-settings-V2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5470-drm-amd-powerplay-enable-fan-RPM-and-pwm-settings-V2.patch
new file mode 100644
index 00000000..218f84b9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5470-drm-amd-powerplay-enable-fan-RPM-and-pwm-settings-V2.patch
@@ -0,0 +1,331 @@
+From e9bef44f9178019b2a951f1de4925e80e7da0569 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Tue, 18 Sep 2018 18:04:44 +0800
+Subject: [PATCH 5470/5725] drm/amd/powerplay: enable fan RPM and pwm settings
+ V2
+
+Manual fan RPM and pwm setting on vega20 are
+available now.
+
+V2: correct the register for fan speed setting and
+ avoid divide-by-zero
+
+Change-Id: Iad45a169d6984acc091c4efaf46973619fe43a29
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ .../amd/include/asic_reg/thm/thm_11_0_2_offset.h | 12 ++
+ .../amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h | 10 ++
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 27 ++++
+ .../gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c | 151 ++++++++++++++++++++-
+ .../gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h | 11 +-
+ 5 files changed, 207 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h
+index 510ec3c..a9eb57a 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h
+@@ -26,6 +26,18 @@
+ #define mmCG_MULT_THERMAL_STATUS 0x005f
+ #define mmCG_MULT_THERMAL_STATUS_BASE_IDX 0
+
++#define mmCG_FDO_CTRL0 0x0067
++#define mmCG_FDO_CTRL0_BASE_IDX 0
++
++#define mmCG_FDO_CTRL1 0x0068
++#define mmCG_FDO_CTRL1_BASE_IDX 0
++
++#define mmCG_FDO_CTRL2 0x0069
++#define mmCG_FDO_CTRL2_BASE_IDX 0
++
++#define mmCG_TACH_CTRL 0x006a
++#define mmCG_TACH_CTRL_BASE_IDX 0
++
+ #define mmTHM_THERMAL_INT_ENA 0x000a
+ #define mmTHM_THERMAL_INT_ENA_BASE_IDX 0
+ #define mmTHM_THERMAL_INT_CTRL 0x000b
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h
+index f69533f..d130d92 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h
+@@ -28,6 +28,16 @@
+ #define CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT 0x9
+ #define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP_MASK 0x000001FFL
+ #define CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK 0x0003FE00L
++#define CG_FDO_CTRL2__TMIN__SHIFT 0x0
++#define CG_FDO_CTRL2__TMIN_MASK 0x000000FFL
++#define CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT 0xb
++#define CG_FDO_CTRL2__FDO_PWM_MODE_MASK 0x00003800L
++#define CG_FDO_CTRL1__FMAX_DUTY100__SHIFT 0x0
++#define CG_FDO_CTRL1__FMAX_DUTY100_MASK 0x000000FFL
++#define CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT 0x0
++#define CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK 0x000000FFL
++#define CG_TACH_CTRL__TARGET_PERIOD__SHIFT 0x3
++#define CG_TACH_CTRL__TARGET_PERIOD_MASK 0xFFFFFFF8L
+
+ //THM_THERMAL_INT_ENA
+ #define THM_THERMAL_INT_ENA__THERM_INTH_SET__SHIFT 0x0
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index 6ece7d7..ee38ed5 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -2289,6 +2289,25 @@ static uint32_t vega20_get_fan_control_mode(struct pp_hwmgr *hwmgr)
+ return AMD_FAN_CTRL_AUTO;
+ }
+
++static void vega20_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
++{
++ switch (mode) {
++ case AMD_FAN_CTRL_NONE:
++ vega20_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
++ break;
++ case AMD_FAN_CTRL_MANUAL:
++ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
++ vega20_fan_ctrl_stop_smc_fan_control(hwmgr);
++ break;
++ case AMD_FAN_CTRL_AUTO:
++ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
++ vega20_fan_ctrl_start_smc_fan_control(hwmgr);
++ break;
++ default:
++ break;
++ }
++}
++
+ static int vega20_get_dal_power_level(struct pp_hwmgr *hwmgr,
+ struct amd_pp_simple_clock_info *info)
+ {
+@@ -3452,12 +3471,20 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
+ .disable_smc_firmware_ctf =
+ vega20_thermal_disable_alert,
+ /* fan control related */
++ .get_fan_speed_percent =
++ vega20_fan_ctrl_get_fan_speed_percent,
++ .set_fan_speed_percent =
++ vega20_fan_ctrl_set_fan_speed_percent,
+ .get_fan_speed_info =
+ vega20_fan_ctrl_get_fan_speed_info,
+ .get_fan_speed_rpm =
+ vega20_fan_ctrl_get_fan_speed_rpm,
++ .set_fan_speed_rpm =
++ vega20_fan_ctrl_set_fan_speed_rpm,
+ .get_fan_control_mode =
+ vega20_get_fan_control_mode,
++ .set_fan_control_mode =
++ vega20_set_fan_control_mode,
+ /* smu memory related */
+ .notify_cac_buffer_info =
+ vega20_notify_cac_buffer_info,
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
+index 1c951a5..ede54e8 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
+@@ -29,6 +29,78 @@
+ #include "soc15_common.h"
+ #include "pp_debug.h"
+
++static int vega20_disable_fan_control_feature(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data = hwmgr->backend;
++ int ret = 0;
++
++ if (data->smu_features[GNLD_FAN_CONTROL].supported) {
++ ret = vega20_enable_smc_features(
++ hwmgr, false,
++ data->smu_features[GNLD_FAN_CONTROL].
++ smu_feature_bitmap);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Disable FAN CONTROL feature Failed!",
++ return ret);
++ data->smu_features[GNLD_FAN_CONTROL].enabled = false;
++ }
++
++ return ret;
++}
++
++int vega20_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data = hwmgr->backend;
++
++ if (data->smu_features[GNLD_FAN_CONTROL].supported)
++ return vega20_disable_fan_control_feature(hwmgr);
++
++ return 0;
++}
++
++static int vega20_enable_fan_control_feature(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data = hwmgr->backend;
++ int ret = 0;
++
++ if (data->smu_features[GNLD_FAN_CONTROL].supported) {
++ ret = vega20_enable_smc_features(
++ hwmgr, true,
++ data->smu_features[GNLD_FAN_CONTROL].
++ smu_feature_bitmap);
++ PP_ASSERT_WITH_CODE(!ret,
++ "Enable FAN CONTROL feature Failed!",
++ return ret);
++ data->smu_features[GNLD_FAN_CONTROL].enabled = true;
++ }
++
++ return ret;
++}
++
++int vega20_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
++{
++ struct vega20_hwmgr *data = hwmgr->backend;
++
++ if (data->smu_features[GNLD_FAN_CONTROL].supported)
++ return vega20_enable_fan_control_feature(hwmgr);
++
++ return 0;
++}
++
++static int vega20_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
++{
++ struct amdgpu_device *adev = hwmgr->adev;
++
++ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
++ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
++ CG_FDO_CTRL2, TMIN, 0));
++ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
++ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
++ CG_FDO_CTRL2, FDO_PWM_MODE, mode));
++
++ return 0;
++}
++
+ static int vega20_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
+ {
+ int ret = 0;
+@@ -42,12 +114,62 @@ static int vega20_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
+ return 0;
+ }
+
++int vega20_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
++ uint32_t *speed)
++{
++ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
++ PPTable_t *pp_table = &(data->smc_state_table.pp_table);
++ uint32_t current_rpm, percent = 0;
++ int ret = 0;
++
++ ret = vega20_get_current_rpm(hwmgr, &current_rpm);
++ if (ret)
++ return ret;
++
++ percent = current_rpm * 100 / pp_table->FanMaximumRpm;
++
++ *speed = percent > 100 ? 100 : percent;
++
++ return 0;
++}
++
++int vega20_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
++ uint32_t speed)
++{
++ struct amdgpu_device *adev = hwmgr->adev;
++ uint32_t duty100;
++ uint32_t duty;
++ uint64_t tmp64;
++
++ if (speed > 100)
++ speed = 100;
++
++ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
++ vega20_fan_ctrl_stop_smc_fan_control(hwmgr);
++
++ duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
++ CG_FDO_CTRL1, FMAX_DUTY100);
++
++ if (duty100 == 0)
++ return -EINVAL;
++
++ tmp64 = (uint64_t)speed * duty100;
++ do_div(tmp64, 100);
++ duty = (uint32_t)tmp64;
++
++ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
++ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
++ CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
++
++ return vega20_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
++}
++
+ int vega20_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
+ struct phm_fan_speed_info *fan_speed_info)
+ {
+ memset(fan_speed_info, 0, sizeof(*fan_speed_info));
+- fan_speed_info->supports_percent_read = false;
+- fan_speed_info->supports_percent_write = false;
++ fan_speed_info->supports_percent_read = true;
++ fan_speed_info->supports_percent_write = true;
+ fan_speed_info->supports_rpm_read = true;
+ fan_speed_info->supports_rpm_write = true;
+
+@@ -61,6 +183,31 @@ int vega20_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
+ return vega20_get_current_rpm(hwmgr, speed);
+ }
+
++int vega20_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
++{
++ struct amdgpu_device *adev = hwmgr->adev;
++ uint32_t tach_period, crystal_clock_freq;
++ int result = 0;
++
++ if (!speed)
++ return -EINVAL;
++
++ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) {
++ result = vega20_fan_ctrl_stop_smc_fan_control(hwmgr);
++ if (result)
++ return result;
++ }
++
++ crystal_clock_freq = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
++ tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
++ WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
++ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
++ CG_TACH_CTRL, TARGET_PERIOD,
++ tach_period));
++
++ return vega20_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC_RPM);
++}
++
+ /**
+ * Reads the remote temperature from the SIslands thermal controller.
+ *
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h
+index 2a6d49f..2d1769b 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h
+@@ -50,15 +50,22 @@ struct vega20_temperature {
+ #define FDO_PWM_MODE_STATIC_RPM 5
+
+ extern int vega20_thermal_get_temperature(struct pp_hwmgr *hwmgr);
+-extern int vega20_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
+ extern int vega20_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
+ struct phm_fan_speed_info *fan_speed_info);
+-extern int vega20_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
+ extern int vega20_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr,
+ uint32_t *speed);
++extern int vega20_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr,
++ uint32_t speed);
++extern int vega20_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
++ uint32_t *speed);
++extern int vega20_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
++ uint32_t speed);
++extern int vega20_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
++extern int vega20_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr);
+ extern int vega20_thermal_disable_alert(struct pp_hwmgr *hwmgr);
+ extern int vega20_start_thermal_controller(struct pp_hwmgr *hwmgr,
+ struct PP_TemperatureRange *range);
++extern int vega20_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
+
+ #endif
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5471-drm-amdgpu-added-vega20-LBPW-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5471-drm-amdgpu-added-vega20-LBPW-support.patch
new file mode 100644
index 00000000..4b185d33
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5471-drm-amdgpu-added-vega20-LBPW-support.patch
@@ -0,0 +1,154 @@
+From 648bc421123bf628445946bffb57763fa54a28f2 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Fri, 24 Aug 2018 16:40:03 +0800
+Subject: [PATCH 5471/5725] drm/amdgpu: added vega20 LBPW support
+
+Enable LBPW support on vega20.
+
+Change-Id: I9fe3458207f958cb500ca34b8d807a7b96d3df74
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 103 +++++++++++++++++++++++++++++++++-
+ 1 file changed, 102 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 6c44ce1..2c4e595 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -908,6 +908,50 @@ static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
+ buffer[count++] = cpu_to_le32(0);
+ }
+
++static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
++{
++ struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
++ uint32_t pg_always_on_cu_num = 2;
++ uint32_t always_on_cu_num;
++ uint32_t i, j, k;
++ uint32_t mask, cu_bitmap, counter;
++
++ if (adev->flags & AMD_IS_APU)
++ always_on_cu_num = 4;
++ else if (adev->asic_type == CHIP_VEGA12)
++ always_on_cu_num = 8;
++ else
++ always_on_cu_num = 12;
++
++ mutex_lock(&adev->grbm_idx_mutex);
++ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
++ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
++ mask = 1;
++ cu_bitmap = 0;
++ counter = 0;
++ gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
++
++ for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
++ if (cu_info->bitmap[i][j] & mask) {
++ if (counter == pg_always_on_cu_num)
++ WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
++ if (counter < always_on_cu_num)
++ cu_bitmap |= mask;
++ else
++ break;
++ counter++;
++ }
++ mask <<= 1;
++ }
++
++ WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
++ cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
++ }
++ }
++ gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
++ mutex_unlock(&adev->grbm_idx_mutex);
++}
++
+ static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
+ {
+ uint32_t data;
+@@ -953,6 +997,55 @@ static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
+ mutex_unlock(&adev->grbm_idx_mutex);
+ }
+
++static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
++{
++ uint32_t data;
++
++ /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
++ WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
++ WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
++ WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
++ WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
++
++ /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
++ WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
++
++ /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
++ WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
++
++ mutex_lock(&adev->grbm_idx_mutex);
++ /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
++ gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
++ WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
++
++ /* set mmRLC_LB_PARAMS = 0x003F_1006 */
++ data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
++ data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
++ data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
++ WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
++
++ /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
++ data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
++ data &= 0x0000FFFF;
++ data |= 0x00C00000;
++ WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
++
++ /*
++ * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
++ * programmed in gfx_v9_0_init_always_on_cu_mask()
++ */
++
++ /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
++ * but used for RLC_LB_CNTL configuration */
++ data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
++ data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
++ data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
++ WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
++ mutex_unlock(&adev->grbm_idx_mutex);
++
++ gfx_v9_0_init_always_on_cu_mask(adev);
++}
++
+ static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
+ {
+ WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
+@@ -1084,8 +1177,15 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
+ rv_init_cp_jump_table(adev);
+ amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
++ }
+
++ switch (adev->asic_type) {
++ case CHIP_RAVEN:
+ gfx_v9_0_init_lbpw(adev);
++ break;
++ case CHIP_VEGA20:
++ gfx_v9_4_init_lbpw(adev);
++ break;
+ }
+
+ return 0;
+@@ -2408,7 +2508,8 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
+ return r;
+ }
+
+- if (adev->asic_type == CHIP_RAVEN) {
++ if (adev->asic_type == CHIP_RAVEN ||
++ adev->asic_type == CHIP_VEGA20) {
+ if (amdgpu_lbpw != 0)
+ gfx_v9_0_enable_lbpw(adev, true);
+ else
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5472-drm-amdgpu-change-Raven-always-on-CUs-to-4.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5472-drm-amdgpu-change-Raven-always-on-CUs-to-4.patch
new file mode 100644
index 00000000..ec49de4a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5472-drm-amdgpu-change-Raven-always-on-CUs-to-4.patch
@@ -0,0 +1,44 @@
+From 15825749712c2b0a7f34709a2d7c1f1cd6cdfeaf Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Wed, 19 Sep 2018 19:07:19 +0800
+Subject: [PATCH 5472/5725] drm/amdgpu: change Raven always on CUs to 4
+
+For Vega10 and Vega20, the always on CUs are 12.
+For Raven, it's 4.
+
+Change-Id: I0b864d77f65d72697e65e21be5dc2c1930ed8d53
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 2c4e595..920f84e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -985,8 +985,10 @@ static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
+ data |= 0x00C00000;
+ WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
+
+- /* set RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF */
+- WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, 0xFFF);
++ /*
++ * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
++ * programmed in gfx_v9_0_init_always_on_cu_mask()
++ */
+
+ /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
+ * but used for RLC_LB_CNTL configuration */
+@@ -995,6 +997,8 @@ static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
+ data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
+ WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
+ mutex_unlock(&adev->grbm_idx_mutex);
++
++ gfx_v9_0_init_always_on_cu_mask(adev);
+ }
+
+ static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5473-drm-amdgpu-vega20-make-power-profile-output-more-con.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5473-drm-amdgpu-vega20-make-power-profile-output-more-con.patch
new file mode 100644
index 00000000..afeb7fea
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5473-drm-amdgpu-vega20-make-power-profile-output-more-con.patch
@@ -0,0 +1,35 @@
+From ee6281d670bf4bb0ca6e9a46d4cee1684ac675f0 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed, 26 Sep 2018 22:55:47 -0500
+Subject: [PATCH 5473/5725] drm/amdgpu/vega20: make power profile output more
+ consistent
+
+Make the profile name line match previous generations more closely.
+
+E.g.,
+0 3D_FULL_SCREEN :
+vs:
+0(3D_FULL_SCREEN )
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index ee38ed5..7884ae3 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -3204,7 +3204,7 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
+ "[GetPowerProfile] Failed to get activity monitor!",
+ return result);
+
+- size += sprintf(buf + size, "%2d(%14s%s)\n",
++ size += sprintf(buf + size, "%2d %14s%s:\n",
+ i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ");
+
+ size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5474-drm-amdgpu-add-default-case-to-switch-statement.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5474-drm-amdgpu-add-default-case-to-switch-statement.patch
new file mode 100644
index 00000000..bd346a00
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5474-drm-amdgpu-add-default-case-to-switch-statement.patch
@@ -0,0 +1,29 @@
+From 334db8a60422b98ecc9b04f67494c7aff9c86490 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 27 Sep 2018 22:33:31 -0500
+Subject: [PATCH 5474/5725] drm/amdgpu: add default case to switch statement
+
+Fixes unhandled case warnings. Trivial.
+
+Fixes: "drm/amdgpu: added vega20 LBPW support"
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 920f84e..17be234 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1190,6 +1190,8 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
+ case CHIP_VEGA20:
+ gfx_v9_4_init_lbpw(adev);
+ break;
++ default:
++ break;
+ }
+
+ return 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5475-drm-amdgpu-added-AMD-GPU-instance-counting-V2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5475-drm-amdgpu-added-AMD-GPU-instance-counting-V2.patch
new file mode 100644
index 00000000..79a3b7d5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5475-drm-amdgpu-added-AMD-GPU-instance-counting-V2.patch
@@ -0,0 +1,158 @@
+From 3a4bcdefa362f429f7ee5e3d4fbf50a4ae32d370 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Thu, 27 Sep 2018 13:26:58 +0800
+Subject: [PATCH 5475/5725] drm/amdgpu: added AMD GPU instance counting V2
+
+Count all GPU instances from AMD(including iGPUs and
+dGPUs) in the system.
+
+V2: drop unnecessary initialization for other gpu_info
+ members except mutex
+
+Change-Id: If62a0873c64857a3fcdf9785557e24cb3456c12e
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 18 ++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 ++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 52 +++++++++++++++++++++++++++++++++
+ 3 files changed, 73 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 7d10f6e..15e12e7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -82,6 +82,23 @@
+ #include "amdgpu_bo_list.h"
+ #include "amdgpu_gem.h"
+
++#define MAX_GPU_INSTANCE 16
++
++struct amdgpu_gpu_instance
++{
++ struct amdgpu_device *adev;
++ int mgpu_fan_enabled;
++};
++
++struct amdgpu_mgpu_info
++{
++ struct amdgpu_gpu_instance gpu_ins[MAX_GPU_INSTANCE];
++ struct mutex mutex;
++ uint32_t num_gpu;
++ uint32_t num_dgpu;
++ uint32_t num_apu;
++};
++
+ /*
+ * Modules parameters.
+ */
+@@ -139,6 +156,7 @@ extern int amdgpu_compute_multipipe;
+ extern int amdgpu_gpu_recovery;
+ extern int amdgpu_emu_mode;
+ extern uint amdgpu_smu_memory_pool_size;
++extern struct amdgpu_mgpu_info mgpu_info;
+
+ #ifdef CONFIG_DRM_AMDGPU_SI
+ extern int amdgpu_si_support;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 626a172..5f59a07 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -133,6 +133,9 @@ int amdgpu_compute_multipipe = -1;
+ int amdgpu_gpu_recovery = -1; /* auto */
+ int amdgpu_emu_mode = 0;
+ uint amdgpu_smu_memory_pool_size = 0;
++struct amdgpu_mgpu_info mgpu_info = {
++ .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
++};
+
+ /**
+ * DOC: vramlimit (int)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index 42b9d30..625eb29 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -40,6 +40,30 @@
+ #include "amdgpu_gem.h"
+ #include "amdgpu_display.h"
+
++static void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
++{
++ struct amdgpu_gpu_instance *gpu_instance;
++ int i;
++
++ mutex_lock(&mgpu_info.mutex);
++
++ for (i = 0; i < mgpu_info.num_gpu; i++) {
++ gpu_instance = &(mgpu_info.gpu_ins[i]);
++ if (gpu_instance->adev == adev) {
++ mgpu_info.gpu_ins[i] =
++ mgpu_info.gpu_ins[mgpu_info.num_gpu - 1];
++ mgpu_info.num_gpu--;
++ if (adev->flags & AMD_IS_APU)
++ mgpu_info.num_apu--;
++ else
++ mgpu_info.num_dgpu--;
++ break;
++ }
++ }
++
++ mutex_unlock(&mgpu_info.mutex);
++}
++
+ /**
+ * amdgpu_driver_unload_kms - Main unload function for KMS.
+ *
+@@ -55,6 +79,8 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
+ if (adev == NULL)
+ return;
+
++ amdgpu_unregister_gpu_instance(adev);
++
+ if (adev->rmmio == NULL)
+ goto done_free;
+
+@@ -75,6 +101,31 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
+ dev->dev_private = NULL;
+ }
+
++static void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
++{
++ struct amdgpu_gpu_instance *gpu_instance;
++
++ mutex_lock(&mgpu_info.mutex);
++
++ if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) {
++ DRM_ERROR("Cannot register more gpu instance\n");
++ mutex_unlock(&mgpu_info.mutex);
++ return;
++ }
++
++ gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]);
++ gpu_instance->adev = adev;
++ gpu_instance->mgpu_fan_enabled = 0;
++
++ mgpu_info.num_gpu++;
++ if (adev->flags & AMD_IS_APU)
++ mgpu_info.num_apu++;
++ else
++ mgpu_info.num_dgpu++;
++
++ mutex_unlock(&mgpu_info.mutex);
++}
++
+ /**
+ * amdgpu_driver_load_kms - Main load function for KMS.
+ *
+@@ -166,6 +217,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
+ pm_runtime_put_autosuspend(dev->dev);
+ }
+
++ amdgpu_register_gpu_instance(adev);
+ out:
+ if (r) {
+ /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5476-drm-amd-powerplay-helper-interfaces-for-MGPU-fan-boo.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5476-drm-amd-powerplay-helper-interfaces-for-MGPU-fan-boo.patch
new file mode 100644
index 00000000..8e5f26de
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5476-drm-amd-powerplay-helper-interfaces-for-MGPU-fan-boo.patch
@@ -0,0 +1,157 @@
+From c85ac6b963dc6307d0f1757ce3935f29d768a39e Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Thu, 27 Sep 2018 13:43:16 +0800
+Subject: [PATCH 5476/5725] drm/amd/powerplay: helper interfaces for MGPU fan
+ boost feature
+
+MGPU fan boost feature is enabled only when two or more dGPUs
+in the system.
+
+Change-Id: Ie1baebb04c1dfa05a090ad8a59089f072b0b0f32
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 41 ++++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h | 4 +++
+ drivers/gpu/drm/amd/include/kgd_pp_interface.h | 1 +
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 19 ++++++++++++
+ drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 1 +
+ 5 files changed, 66 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 128ed6c..b573d9f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1882,6 +1882,43 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
+ return 0;
+ }
+
++static int amdgpu_device_enable_mgpu_fan_boost(void)
++{
++ struct amdgpu_gpu_instance *gpu_ins;
++ struct amdgpu_device *adev;
++ int i, ret = 0;
++
++ mutex_lock(&mgpu_info.mutex);
++
++ /*
++ * MGPU fan boost feature should be enabled
++ * only when there are two or more dGPUs in
++ * the system
++ */
++ if (mgpu_info.num_dgpu < 2)
++ goto out;
++
++ for (i = 0; i < mgpu_info.num_dgpu; i++) {
++ gpu_ins = &(mgpu_info.gpu_ins[i]);
++ adev = gpu_ins->adev;
++ if (!(adev->flags & AMD_IS_APU) &&
++ !gpu_ins->mgpu_fan_enabled &&
++ adev->powerplay.pp_funcs &&
++ adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
++ ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
++ if (ret)
++ break;
++
++ gpu_ins->mgpu_fan_enabled = 1;
++ }
++ }
++
++out:
++ mutex_unlock(&mgpu_info.mutex);
++
++ return ret;
++}
++
+ /**
+ * amdgpu_device_ip_late_init_func_handler - work handler for ib test
+ *
+@@ -1896,6 +1933,10 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
+ r = amdgpu_ib_ring_tests(adev);
+ if (r)
+ DRM_ERROR("ib ring test failed (%d).\n", r);
++
++ r = amdgpu_device_enable_mgpu_fan_boost();
++ if (r)
++ DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
+ }
+
+ static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+index ff24e1c..42568ae 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+@@ -357,6 +357,10 @@ enum amdgpu_pcie_gen {
+ ((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\
+ (adev)->powerplay.pp_handle, type, parameter, size))
+
++#define amdgpu_dpm_enable_mgpu_fan_boost(adev) \
++ ((adev)->powerplay.pp_funcs->enable_mgpu_fan_boost(\
++ (adev)->powerplay.pp_handle))
++
+ struct amdgpu_dpm {
+ struct amdgpu_ps *ps;
+ /* number of valid power states */
+diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+index bd74045..8593850 100644
+--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+@@ -272,6 +272,7 @@ struct amd_pm_funcs {
+ int (*get_display_mode_validation_clocks)(void *handle,
+ struct amd_pp_simple_clock_info *clocks);
+ int (*notify_smu_enable_pwe)(void *handle);
++ int (*enable_mgpu_fan_boost)(void *handle);
+ };
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index 2bdef16..147fdbb 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -1254,6 +1254,24 @@ static int pp_notify_smu_enable_pwe(void *handle)
+ return 0;
+ }
+
++static int pp_enable_mgpu_fan_boost(void *handle)
++{
++ struct pp_hwmgr *hwmgr = handle;
++
++ if (!hwmgr || !hwmgr->pm_en)
++ return -EINVAL;
++
++ if (hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL) {
++ return 0;
++ }
++
++ mutex_lock(&hwmgr->smu_lock);
++ hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
++ mutex_unlock(&hwmgr->smu_lock);
++
++ return 0;
++}
++
+ static const struct amd_pm_funcs pp_dpm_funcs = {
+ .load_firmware = pp_dpm_load_fw,
+ .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
+@@ -1298,5 +1316,6 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
+ .display_clock_voltage_request = pp_display_clock_voltage_request,
+ .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
+ .notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
++ .enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
+ };
+
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+index e0cb7d0..f25f6b7 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+@@ -329,6 +329,7 @@ struct pp_hwmgr_func {
+ int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n);
+ int (*powergate_mmhub)(struct pp_hwmgr *hwmgr);
+ int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr);
++ int (*enable_mgpu_fan_boost)(struct pp_hwmgr *hwmgr);
+ };
+
+ struct pp_table_func {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5477-drm-amd-powerplay-enable-MGPU-fan-boost-feature-on-V.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5477-drm-amd-powerplay-enable-MGPU-fan-boost-feature-on-V.patch
new file mode 100644
index 00000000..57af6574
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5477-drm-amd-powerplay-enable-MGPU-fan-boost-feature-on-V.patch
@@ -0,0 +1,52 @@
+From e3071552df93aefed1c7fdcdad5c5ace47c53e16 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Thu, 27 Sep 2018 13:48:45 +0800
+Subject: [PATCH 5477/5725] drm/amd/powerplay: enable MGPU fan boost feature on
+ Vega20
+
+Added Vega20 specific implementation for MGPU fan boost
+feature.
+
+Change-Id: I2521c5b5019261228302c51aa74fc3d9d249c87c
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index 7884ae3..958af7b 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -1477,6 +1477,19 @@ static int vega20_init_max_sustainable_clocks(struct pp_hwmgr *hwmgr)
+ return 0;
+ }
+
++static int vega20_enable_mgpu_fan_boost(struct pp_hwmgr *hwmgr)
++{
++ int result;
++
++ result = smum_send_msg_to_smc(hwmgr,
++ PPSMC_MSG_SetMGpuFanBoostLimitRpm);
++ PP_ASSERT_WITH_CODE(!result,
++ "[EnableMgpuFan] Failed to enable mgpu fan boost!",
++ return result);
++
++ return 0;
++}
++
+ static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr)
+ {
+ struct vega20_hwmgr *data =
+@@ -3488,6 +3501,8 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
+ /* smu memory related */
+ .notify_cac_buffer_info =
+ vega20_notify_cac_buffer_info,
++ .enable_mgpu_fan_boost =
++ vega20_enable_mgpu_fan_boost,
+ };
+
+ int vega20_hwmgr_init(struct pp_hwmgr *hwmgr)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5478-drm-amdgpu-Fix-comments-error-in-sdma_v4_1_update_po.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5478-drm-amdgpu-Fix-comments-error-in-sdma_v4_1_update_po.patch
new file mode 100644
index 00000000..b08b46fe
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5478-drm-amdgpu-Fix-comments-error-in-sdma_v4_1_update_po.patch
@@ -0,0 +1,28 @@
+From 4514875783ccb1bcf1aef0cfe0c56b6a1b80cb38 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Tue, 25 Sep 2018 13:27:00 +0800
+Subject: [PATCH 5478/5725] drm/amdgpu: Fix comments error in
+ sdma_v4_1_update_power_gating
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index f797eaf..8a99832 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -828,7 +828,7 @@ sdma_v4_1_update_power_gating(struct amdgpu_device *adev, bool enable)
+ uint32_t def, data;
+
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_SDMA)) {
+- /* disable idle interrupt */
++ /* enable idle interrupt */
+ def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
+ data |= SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5479-drm-amd-pp-Fix-fan-s-RPM-setting-not-work-on-VI-Vega.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5479-drm-amd-pp-Fix-fan-s-RPM-setting-not-work-on-VI-Vega.patch
new file mode 100644
index 00000000..a5d2275a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5479-drm-amd-pp-Fix-fan-s-RPM-setting-not-work-on-VI-Vega.patch
@@ -0,0 +1,48 @@
+From c086d59d8a6165a5cc5298af314ce3274476b5ae Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Thu, 20 Sep 2018 15:11:08 +0800
+Subject: [PATCH 5479/5725] drm/amd/pp: Fix fan's RPM setting not work on
+ VI/Vega10
+
+set the target rpm value to wrong register.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c | 2 +-
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c | 6 +++---
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
+index 44527755..eef086c 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
+@@ -272,7 +272,7 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
+ tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
+
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+- CG_TACH_STATUS, TACH_PERIOD, tach_period);
++ CG_TACH_CTRL, TARGET_PERIOD, tach_period);
+
+ return smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC_RPM);
+ }
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+index 407762b..538de6c 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+@@ -322,9 +322,9 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
+ if (!result) {
+ crystal_clock_freq = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
+ tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
+- WREG32_SOC15(THM, 0, mmCG_TACH_STATUS,
+- REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_STATUS),
+- CG_TACH_STATUS, TACH_PERIOD,
++ WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
++ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
++ CG_TACH_CTRL, TARGET_PERIOD,
+ tach_period));
+ }
+ return vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC_RPM);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5480-drm-amd-pp-Avoid-divide-by-zero-in-fan_ctrl_set_fan_.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5480-drm-amd-pp-Avoid-divide-by-zero-in-fan_ctrl_set_fan_.patch
new file mode 100644
index 00000000..21108c47
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5480-drm-amd-pp-Avoid-divide-by-zero-in-fan_ctrl_set_fan_.patch
@@ -0,0 +1,43 @@
+From f1d64f3280a69b994c6295c374d1ae22c2764c92 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Thu, 20 Sep 2018 11:50:26 +0800
+Subject: [PATCH 5480/5725] drm/amd/pp: Avoid divide-by-zero in
+ fan_ctrl_set_fan_speed_rpm
+
+The minRPM speed maybe equal to zero. so need to check
+input RPM not equal to 0, otherwise cause divide-by-zero driver crash.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c | 1 +
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c | 1 +
+ 2 files changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
+index eef086c..5bdc0df 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
+@@ -260,6 +260,7 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
+ if (hwmgr->thermal_controller.fanInfo.bNoFan ||
+ (hwmgr->thermal_controller.fanInfo.
+ ucTachometerPulsesPerRevolution == 0) ||
++ speed == 0 ||
+ (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
+ (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
+ return 0;
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+index 538de6c..3f807d6 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+@@ -312,6 +312,7 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
+ int result = 0;
+
+ if (hwmgr->thermal_controller.fanInfo.bNoFan ||
++ speed == 0 ||
+ (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
+ (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
+ return -1;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5481-drm-amd-pp-Expose-the-smu-support-for-SDMA-PG-cntl.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5481-drm-amd-pp-Expose-the-smu-support-for-SDMA-PG-cntl.patch
new file mode 100644
index 00000000..f6c2c73a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5481-drm-amd-pp-Expose-the-smu-support-for-SDMA-PG-cntl.patch
@@ -0,0 +1,85 @@
+From fe8cf7a311c4869a33a2922423e510cb2e7eaec6 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Tue, 25 Sep 2018 19:45:46 +0800
+Subject: [PATCH 5481/5725] drm/amd/pp: Expose the smu support for SDMA PG cntl
+
+SDMA IP can be power up/down via smu message
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 18 ++++++++++++++++++
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 8 ++++++++
+ drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 1 +
+ 3 files changed, 27 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index 147fdbb..d38ba0f 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -1207,6 +1207,21 @@ static void pp_dpm_powergate_acp(void *handle, bool gate)
+ hwmgr->hwmgr_func->powergate_acp(hwmgr, gate);
+ }
+
++static void pp_dpm_powergate_sdma(void *handle, bool gate)
++{
++ struct pp_hwmgr *hwmgr = handle;
++
++ if (!hwmgr)
++ return;
++
++ if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
++ pr_info("%s was not implemented.\n", __func__);
++ return;
++ }
++
++ hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate);
++}
++
+ static int pp_set_powergating_by_smu(void *handle,
+ uint32_t block_type, bool gate)
+ {
+@@ -1229,6 +1244,9 @@ static int pp_set_powergating_by_smu(void *handle,
+ case AMD_IP_BLOCK_TYPE_ACP:
+ pp_dpm_powergate_acp(handle, gate);
+ break;
++ case AMD_IP_BLOCK_TYPE_SDMA:
++ pp_dpm_powergate_sdma(handle, gate);
++ break;
+ default:
+ break;
+ }
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+index 7a42959..fa59be3 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+@@ -1153,6 +1153,14 @@ static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr)
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
+ }
+
++static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate)
++{
++ if (gate)
++ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma);
++ else
++ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma);
++}
++
+ static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
+ {
+ if (bgate) {
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+index f25f6b7..f07d93e 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+@@ -329,6 +329,7 @@ struct pp_hwmgr_func {
+ int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n);
+ int (*powergate_mmhub)(struct pp_hwmgr *hwmgr);
+ int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr);
++ int (*powergate_sdma)(struct pp_hwmgr *hwmgr, bool bgate);
+ int (*enable_mgpu_fan_boost)(struct pp_hwmgr *hwmgr);
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5482-drm-amdgpu-Move-out-power-up-down-sdma-out-of-smu.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5482-drm-amdgpu-Move-out-power-up-down-sdma-out-of-smu.patch
new file mode 100644
index 00000000..11a0cc51
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5482-drm-amdgpu-Move-out-power-up-down-sdma-out-of-smu.patch
@@ -0,0 +1,96 @@
+From b5fde60c3bb2d968aa921c6098a758ad2e624bc4 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Tue, 25 Sep 2018 19:53:30 +0800
+Subject: [PATCH 5482/5725] drm/amdgpu: Move out power up/down sdma out of smu
+
+smu only expose interface to other ip blocks.
+in order to reduce dependence between smu and other ip blocks
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 6 ++++++
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 1 +
+ drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c | 15 ---------------
+ 3 files changed, 7 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index 8a99832..d05951f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -1374,6 +1374,9 @@ static int sdma_v4_0_hw_init(void *handle)
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
++ if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs->set_powergating_by_smu)
++ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
++
+ sdma_v4_0_init_golden_registers(adev);
+
+ r = sdma_v4_0_start(adev);
+@@ -1391,6 +1394,9 @@ static int sdma_v4_0_hw_fini(void *handle)
+ sdma_v4_0_ctx_switch_enable(adev, false);
+ sdma_v4_0_enable(adev, false);
+
++ if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs->set_powergating_by_smu)
++ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, true);
++
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+index fa59be3..7100c74 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+@@ -1217,6 +1217,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
+ .gfx_off_control = smu10_gfx_off_control,
+ .display_clock_voltage_request = smu10_display_clock_voltage_request,
+ .powergate_gfx = smu10_gfx_off_control,
++ .powergate_sdma = smu10_powergate_sdma,
+ };
+
+ int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+index 6f961de..d78d864 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+@@ -186,19 +186,6 @@ static int smu10_verify_smc_interface(struct pp_hwmgr *hwmgr)
+ return 0;
+ }
+
+-/* sdma is disabled by default in vbios, need to re-enable in driver */
+-static void smu10_smc_enable_sdma(struct pp_hwmgr *hwmgr)
+-{
+- smu10_send_msg_to_smc(hwmgr,
+- PPSMC_MSG_PowerUpSdma);
+-}
+-
+-static void smu10_smc_disable_sdma(struct pp_hwmgr *hwmgr)
+-{
+- smu10_send_msg_to_smc(hwmgr,
+- PPSMC_MSG_PowerDownSdma);
+-}
+-
+ /* vcn is disabled by default in vbios, need to re-enable in driver */
+ static void smu10_smc_enable_vcn(struct pp_hwmgr *hwmgr)
+ {
+@@ -218,7 +205,6 @@ static int smu10_smu_fini(struct pp_hwmgr *hwmgr)
+ (struct smu10_smumgr *)(hwmgr->smu_backend);
+
+ if (priv) {
+- smu10_smc_disable_sdma(hwmgr);
+ smu10_smc_disable_vcn(hwmgr);
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_WMTABLE].handle,
+ &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr,
+@@ -243,7 +229,6 @@ static int smu10_start_smu(struct pp_hwmgr *hwmgr)
+
+ if (smu10_verify_smc_interface(hwmgr))
+ return -EINVAL;
+- smu10_smc_enable_sdma(hwmgr);
+ smu10_smc_enable_vcn(hwmgr);
+ return 0;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5483-drm-amd-pp-Remove-uncessary-extra-vcn-pg-cntl-in-smu.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5483-drm-amd-pp-Remove-uncessary-extra-vcn-pg-cntl-in-smu.patch
new file mode 100644
index 00000000..2d4ccd7c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5483-drm-amd-pp-Remove-uncessary-extra-vcn-pg-cntl-in-smu.patch
@@ -0,0 +1,57 @@
+From 3d821a20d973174032f63283b6cfe7183af03d40 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Fri, 28 Sep 2018 16:57:34 +0800
+Subject: [PATCH 5483/5725] drm/amd/pp: Remove uncessary extra vcn pg cntl in
+ smu
+
+the vcn power will be controlled by VCN.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c | 16 +---------------
+ 1 file changed, 1 insertion(+), 15 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+index d78d864..d0eb8ab 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+@@ -186,26 +186,12 @@ static int smu10_verify_smc_interface(struct pp_hwmgr *hwmgr)
+ return 0;
+ }
+
+-/* vcn is disabled by default in vbios, need to re-enable in driver */
+-static void smu10_smc_enable_vcn(struct pp_hwmgr *hwmgr)
+-{
+- smu10_send_msg_to_smc_with_parameter(hwmgr,
+- PPSMC_MSG_PowerUpVcn, 0);
+-}
+-
+-static void smu10_smc_disable_vcn(struct pp_hwmgr *hwmgr)
+-{
+- smu10_send_msg_to_smc_with_parameter(hwmgr,
+- PPSMC_MSG_PowerDownVcn, 0);
+-}
+-
+ static int smu10_smu_fini(struct pp_hwmgr *hwmgr)
+ {
+ struct smu10_smumgr *priv =
+ (struct smu10_smumgr *)(hwmgr->smu_backend);
+
+ if (priv) {
+- smu10_smc_disable_vcn(hwmgr);
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_WMTABLE].handle,
+ &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr,
+ &priv->smu_tables.entry[SMU10_WMTABLE].table);
+@@ -229,7 +215,7 @@ static int smu10_start_smu(struct pp_hwmgr *hwmgr)
+
+ if (smu10_verify_smc_interface(hwmgr))
+ return -EINVAL;
+- smu10_smc_enable_vcn(hwmgr);
++
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5484-drm-amd-pp-Remove-wrong-code-in-fiji_start_smu.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5484-drm-amd-pp-Remove-wrong-code-in-fiji_start_smu.patch
new file mode 100644
index 00000000..906a043c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5484-drm-amd-pp-Remove-wrong-code-in-fiji_start_smu.patch
@@ -0,0 +1,37 @@
+From 6ed98a6ce4d1a41c02016b41f71a7700438eb436 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Wed, 26 Sep 2018 12:17:52 +0800
+Subject: [PATCH 5484/5725] drm/amd/pp: Remove wrong code in fiji_start_smu
+
+HW CG feature will be enabled after hw ip initialized
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c | 10 ----------
+ 1 file changed, 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+index ec14798..b6b62a7 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+@@ -302,16 +302,6 @@ static int fiji_start_smu(struct pp_hwmgr *hwmgr)
+ hwmgr->avfs_supported = false;
+ }
+
+- /* To initialize all clock gating before RLC loaded and running.*/
+- amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
+- AMD_IP_BLOCK_TYPE_GFX, AMD_CG_STATE_GATE);
+- amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
+- AMD_IP_BLOCK_TYPE_GMC, AMD_CG_STATE_GATE);
+- amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
+- AMD_IP_BLOCK_TYPE_SDMA, AMD_CG_STATE_GATE);
+- amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
+- AMD_IP_BLOCK_TYPE_COMMON, AMD_CG_STATE_GATE);
+-
+ /* Setup SoftRegsStart here for register lookup in case
+ * DummyBackEnd is used and ProcessFirmwareHeader is not executed
+ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5485-drm-amd-powerplay-Enable-Disable-NBPSTATE-on-On-OFF-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5485-drm-amd-powerplay-Enable-Disable-NBPSTATE-on-On-OFF-.patch
new file mode 100644
index 00000000..7db568e0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5485-drm-amd-powerplay-Enable-Disable-NBPSTATE-on-On-OFF-.patch
@@ -0,0 +1,44 @@
+From af55bdc563626770696074c47acf0b5bee61447d Mon Sep 17 00:00:00 2001
+From: Akshu Agrawal <akshu.agrawal@amd.com>
+Date: Mon, 24 Sep 2018 15:48:02 +0530
+Subject: [PATCH 5485/5725] drm/amd/powerplay: Enable/Disable NBPSTATE on
+ On/OFF of UVD
+
+We observe black lines (underflow) on display when playing a
+4K video with UVD. On Disabling Low memory P state this issue is
+not seen.
+Multiple runs of power measurement shows no imapct.
+
+Signed-off-by: Akshu Agrawal <akshu.agrawal@amd.com>
+Signed-off-by: Satyajit Sahu <satyajit.sahu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+index 53cf787..fef111d 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+@@ -1228,14 +1228,17 @@ static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
+
+ static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
+ {
+- if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
++ if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
++ smu8_nbdpm_pstate_enable_disable(hwmgr, true, true);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
++ }
+ return 0;
+ }
+
+ static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
+ {
+ if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
++ smu8_nbdpm_pstate_enable_disable(hwmgr, false, true);
+ return smum_send_msg_to_smc_with_parameter(
+ hwmgr,
+ PPSMC_MSG_UVDPowerON,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5486-drm-amd-display-Add-DC-build_id-to-determine-build-t.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5486-drm-amd-display-Add-DC-build_id-to-determine-build-t.patch
new file mode 100644
index 00000000..953eb911
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5486-drm-amd-display-Add-DC-build_id-to-determine-build-t.patch
@@ -0,0 +1,61 @@
+From 764c7c6ecd746b9c91bc62c435af7e340ea07120 Mon Sep 17 00:00:00 2001
+From: Jun Lei <Jun.Lei@amd.com>
+Date: Thu, 13 Sep 2018 09:32:26 -0400
+Subject: [PATCH 5486/5725] drm/amd/display: Add DC build_id to determine build
+ type
+
+[why]
+Sometimes there are indications that the incorrect driver is being
+loaded in automated tests. This change adds the ability for builds to
+be tagged with a string, and picked up by the test infrastructure.
+
+[how]
+dc.c will allocate const for build id, which is init-ed with default
+value, indicating production build. For test builds, build server will
+find/replace this value. The test machine will then verify this value.
+
+Signed-off-by: Jun Lei <Jun.Lei@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 3 +++
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 ++
+ 2 files changed, 5 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index dd8babd..080b9cb 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -60,6 +60,7 @@
+ #define DC_LOGGER \
+ dc->ctx->logger
+
++const static char DC_BUILD_ID[] = "production-build";
+
+ /*******************************************************************************
+ * Private functions
+@@ -758,6 +759,8 @@ struct dc *dc_create(const struct dc_init_data *init_params)
+
+ dc->config = init_params->flags;
+
++ dc->build_id = DC_BUILD_ID;
++
+ DC_LOG_DC("Display Core initialized\n");
+
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index aa4d996..748f484 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -315,6 +315,8 @@ struct dc {
+ struct compressor *fbc_compressor;
+
+ struct dc_debug_data debug_data;
++
++ const char *build_id;
+ };
+
+ enum frame_buffer_mode {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5487-drm-amd-display-fix-4K-stereo-screen-flash-issue.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5487-drm-amd-display-fix-4K-stereo-screen-flash-issue.patch
new file mode 100644
index 00000000..7d823ccd
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5487-drm-amd-display-fix-4K-stereo-screen-flash-issue.patch
@@ -0,0 +1,34 @@
+From 4356c9c65a29b834d5cdb40ff3b5367e2189c643 Mon Sep 17 00:00:00 2001
+From: Charlene Liu <charlene.liu@amd.com>
+Date: Wed, 12 Sep 2018 18:22:16 -0400
+Subject: [PATCH 5487/5725] drm/amd/display: fix 4K stereo screen flash issue
+
+[Why]
+HDMI_scramber is not enabled for pixel rate >340Mhz.
+[How]
+Calculate the phy clock to include the Hw frame packing factor.
+
+Signed-off-by: Charlene Liu <charlene.liu@amd.com>
+Reviewed-by: Chris Park <Chris.Park@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 6d27db6..28a9b24 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -1956,6 +1956,9 @@ static void calculate_phy_pix_clks(struct dc_stream_state *stream)
+ else
+ stream->phy_pix_clk =
+ stream->timing.pix_clk_khz;
++
++ if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
++ stream->phy_pix_clk *= 2;
+ }
+
+ enum dc_status resource_map_pool_resources(
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5488-drm-amd-display-Add-a-check-function-for-virtual-sig.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5488-drm-amd-display-Add-a-check-function-for-virtual-sig.patch
new file mode 100644
index 00000000..ee42314c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5488-drm-amd-display-Add-a-check-function-for-virtual-sig.patch
@@ -0,0 +1,35 @@
+From 39b7969a617d0efa3a08f5428bd0f21d1d8fbd5f Mon Sep 17 00:00:00 2001
+From: Nikola Cornij <nikola.cornij@amd.com>
+Date: Wed, 12 Sep 2018 15:17:51 -0400
+Subject: [PATCH 5488/5725] drm/amd/display: Add a check-function for virtual
+ signal type
+
+[why]
+Same functions exist for all other signal types.
+
+[how]
+Add a function that checks against virtual signal type.
+
+Signed-off-by: Nikola Cornij <nikola.cornij@amd.com>
+Reviewed-by: Leo Li <sunpeng.li@amd.com>
+---
+ drivers/gpu/drm/amd/display/include/signal_types.h | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h
+index 03476b1..f56d289 100644
+--- a/drivers/gpu/drm/amd/display/include/signal_types.h
++++ b/drivers/gpu/drm/amd/display/include/signal_types.h
+@@ -102,4 +102,9 @@ static inline bool dc_is_audio_capable_signal(enum signal_type signal)
+ dc_is_hdmi_signal(signal));
+ }
+
++static inline bool dc_is_virtual_signal(enum signal_type signal)
++{
++ return (signal == SIGNAL_TYPE_VIRTUAL);
++}
++
+ #endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5489-drm-amd-display-Calculate-swizzle-mode-using-bpp-dur.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5489-drm-amd-display-Calculate-swizzle-mode-using-bpp-dur.patch
new file mode 100644
index 00000000..da2be7dc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5489-drm-amd-display-Calculate-swizzle-mode-using-bpp-dur.patch
@@ -0,0 +1,213 @@
+From d65fce193c443c6d29ac5252941de442203a633a Mon Sep 17 00:00:00 2001
+From: Su Sung Chung <Su.Chung@amd.com>
+Date: Thu, 13 Sep 2018 15:26:08 -0400
+Subject: [PATCH 5489/5725] drm/amd/display: Calculate swizzle mode using bpp
+ during validation
+
+[Why]
+Previously bandwidth validation was failing because swizzle mode was not
+initialized during plane_state allocation. The swizzle mode was
+calculated using pixed format which is how swizzle mode is initially
+calculated in addrlib.
+
+[How]
+* Set default swizzle mode for validation to DC_SW_UNKNOWN
+* Created new function in dcn10_assign_swizzle_mode which sets the
+ plane swizzle mode based on selected pixed format
+* Added the call of assign_swizzle_mode into dc_validate_global_state
+* Set failsafe swizzle mode back to DC_SW_LINEAR
+
+Signed-off-by: Su Sung Chung <Su.Chung@amd.com>
+Reviewed-by: Eric Yang <eric.yang2@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 30 ++----------------
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 37 ++++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/dc_hw_types.h | 3 +-
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 21 +++++++++++-
+ drivers/gpu/drm/amd/display/dc/inc/core_types.h | 3 ++
+ drivers/gpu/drm/amd/display/dc/inc/resource.h | 3 ++
+ 6 files changed, 67 insertions(+), 30 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 080b9cb..2d1e774 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1139,32 +1139,6 @@ static bool is_surface_in_context(
+ return false;
+ }
+
+-static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
+-{
+- switch (format) {
+- case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+- case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+- return 12;
+- case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+- case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+- case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+- case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+- return 16;
+- case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+- case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+- case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+- case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+- return 32;
+- case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+- case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+- case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+- return 64;
+- default:
+- ASSERT_CRITICAL(false);
+- return -1;
+- }
+-}
+-
+ static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
+ {
+ union surface_update_flags *update_flags = &u->surface->update_flags;
+@@ -1198,8 +1172,8 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
+ || u->plane_info->dcc.grph.meta_pitch != u->surface->dcc.grph.meta_pitch)
+ update_flags->bits.dcc_change = 1;
+
+- if (pixel_format_to_bpp(u->plane_info->format) !=
+- pixel_format_to_bpp(u->surface->format))
++ if (resource_pixel_format_to_bpp(u->plane_info->format) !=
++ resource_pixel_format_to_bpp(u->surface->format))
+ /* different bytes per element will require full bandwidth
+ * and DML calculation
+ */
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 28a9b24..4942810 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -2082,6 +2082,14 @@ enum dc_status dc_validate_global_state(
+ if (pipe_ctx->stream != stream)
+ continue;
+
++ if (dc->res_pool->funcs->get_default_swizzle_mode &&
++ pipe_ctx->plane_state &&
++ pipe_ctx->plane_state->tiling_info.gfx9.swizzle == DC_SW_UNKNOWN) {
++ result = dc->res_pool->funcs->get_default_swizzle_mode(pipe_ctx->plane_state);
++ if (result != DC_OK)
++ return result;
++ }
++
+ /* Switch to dp clock source only if there is
+ * no non dp stream that shares the same timing
+ * with the dp stream.
+@@ -2871,3 +2879,32 @@ enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *pla
+
+ return res;
+ }
++
++unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format)
++{
++ switch (format) {
++ case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
++ return 8;
++ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
++ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
++ return 12;
++ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
++ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
++ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
++ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
++ return 16;
++ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
++ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
++ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
++ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
++ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
++ return 32;
++ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
++ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
++ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
++ return 64;
++ default:
++ ASSERT_CRITICAL(false);
++ return -1;
++ }
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+index 57f57cf..7825e4b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+@@ -289,7 +289,8 @@ enum swizzle_mode_values {
+ DC_SW_VAR_S_X = 29,
+ DC_SW_VAR_D_X = 30,
+ DC_SW_VAR_R_X = 31,
+- DC_SW_MAX
++ DC_SW_MAX = 32,
++ DC_SW_UNKNOWN = DC_SW_MAX
+ };
+
+ union dc_tiling_info {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index e148f70..910f0b4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -1131,6 +1131,24 @@ static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_st
+ return DC_OK;
+ }
+
++static enum dc_status dcn10_get_default_swizzle_mode(struct dc_plane_state *plane_state)
++{
++ enum dc_status result = DC_OK;
++
++ enum surface_pixel_format surf_pix_format = plane_state->format;
++ unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format);
++
++ enum swizzle_mode_values swizzle = DC_SW_LINEAR;
++
++ if (bpp == 64)
++ swizzle = DC_SW_64KB_D;
++ else
++ swizzle = DC_SW_64KB_S;
++
++ plane_state->tiling_info.gfx9.swizzle = swizzle;
++ return result;
++}
++
+ static const struct dc_cap_funcs cap_funcs = {
+ .get_dcc_compression_cap = dcn10_get_dcc_compression_cap
+ };
+@@ -1141,7 +1159,8 @@ static const struct resource_funcs dcn10_res_pool_funcs = {
+ .validate_bandwidth = dcn_validate_bandwidth,
+ .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer,
+ .validate_plane = dcn10_validate_plane,
+- .add_stream_to_ctx = dcn10_add_stream_to_ctx
++ .add_stream_to_ctx = dcn10_add_stream_to_ctx,
++ .get_default_swizzle_mode = dcn10_get_default_swizzle_mode
+ };
+
+ static uint32_t read_pipe_fuses(struct dc_context *ctx)
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+index d7dadfd..879c34e 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+@@ -119,6 +119,9 @@ struct resource_funcs {
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *stream);
++ enum dc_status (*get_default_swizzle_mode)(
++ struct dc_plane_state *plane_state);
++
+ };
+
+ struct audio_support{
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
+index 76d00c6..33b99e3 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
+@@ -172,4 +172,7 @@ void update_audio_usage(
+ const struct resource_pool *pool,
+ struct audio *audio,
+ bool acquired);
++
++unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
++
+ #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5490-drm-amd-display-Add-function-to-fetch-clock-requirem.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5490-drm-amd-display-Add-function-to-fetch-clock-requirem.patch
new file mode 100644
index 00000000..4e041eb7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5490-drm-amd-display-Add-function-to-fetch-clock-requirem.patch
@@ -0,0 +1,92 @@
+From b47834e8cdc08bac7a352af0fb857ae093857c4b Mon Sep 17 00:00:00 2001
+From: Eryk Brol <eryk.brol@amd.com>
+Date: Fri, 7 Sep 2018 13:24:28 -0400
+Subject: [PATCH 5490/5725] drm/amd/display: Add function to fetch clock
+ requirements
+
+Also add dram clock to clocks struct, for systems that uses them.
+
+Signed-off-by: Eryk Brol <eryk.brol@amd.com>
+Reviewed-by: Jun Lei <Jun.Lei@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 13 +++++++++++++
+ drivers/gpu/drm/amd/display/dc/dc.h | 4 +++-
+ drivers/gpu/drm/amd/display/dc/dc_types.h | 12 ++++++++++++
+ 3 files changed, 28 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 2d1e774..fc77cbf 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1887,3 +1887,16 @@ void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
+ }
+ }
+ }
++
++void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
++{
++ info->displayClock = (unsigned int)state->bw.dcn.clk.dispclk_khz;
++ info->engineClock = (unsigned int)state->bw.dcn.clk.dcfclk_khz;
++ info->memoryClock = (unsigned int)state->bw.dcn.clk.dramclk_khz;
++ info->maxSupportedDppClock = (unsigned int)state->bw.dcn.clk.max_supported_dppclk_khz;
++ info->dppClock = (unsigned int)state->bw.dcn.clk.dppclk_khz;
++ info->socClock = (unsigned int)state->bw.dcn.clk.socclk_khz;
++ info->dcfClockDeepSleep = (unsigned int)state->bw.dcn.clk.dcfclk_deep_sleep_khz;
++ info->fClock = (unsigned int)state->bw.dcn.clk.fclk_khz;
++ info->phyClock = (unsigned int)state->bw.dcn.clk.phyclk_khz;
++}
+\ No newline at end of file
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 748f484..7185cfa 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -44,7 +44,6 @@
+ #define MAX_STREAMS 6
+ #define MAX_SINKS_PER_LINK 4
+
+-
+ /*******************************************************************************
+ * Display Core Interfaces
+ ******************************************************************************/
+@@ -208,6 +207,7 @@ struct dc_clocks {
+ int dcfclk_deep_sleep_khz;
+ int fclk_khz;
+ int phyclk_khz;
++ int dramclk_khz;
+ };
+
+ struct dc_debug_options {
+@@ -613,6 +613,8 @@ struct dc_validation_set {
+
+ enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *plane_state);
+
++void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info);
++
+ enum dc_status dc_validate_global_state(
+ struct dc *dc,
+ struct dc_state *new_ctx);
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
+index 4fb6278..6e12d64 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
+@@ -659,4 +659,16 @@ enum i2c_mot_mode {
+ I2C_MOT_FALSE
+ };
+
++struct AsicStateEx {
++ unsigned int memoryClock;
++ unsigned int displayClock;
++ unsigned int engineClock;
++ unsigned int maxSupportedDppClock;
++ unsigned int dppClock;
++ unsigned int socClock;
++ unsigned int dcfClockDeepSleep;
++ unsigned int fClock;
++ unsigned int phyClock;
++};
++
+ #endif /* DC_TYPES_H_ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5491-drm-amd-display-block-DP-YCbCr420-modes.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5491-drm-amd-display-block-DP-YCbCr420-modes.patch
new file mode 100644
index 00000000..eb1f1440
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5491-drm-amd-display-block-DP-YCbCr420-modes.patch
@@ -0,0 +1,50 @@
+From 35024217fc7222f67364cb82ab5847115afd19de Mon Sep 17 00:00:00 2001
+From: Eric Yang <Eric.Yang2@amd.com>
+Date: Fri, 14 Sep 2018 13:53:14 -0400
+Subject: [PATCH 5491/5725] drm/amd/display: block DP YCbCr420 modes
+
+[why]
+Currently not supported, will black screen when set.
+
+[How]
+Fail validate timing helper for those modes.
+
+Signed-off-by: Eric Yang <Eric.Yang2@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c | 3 +++
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c | 3 +++
+ 2 files changed, 6 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+index 4942590..70eb9472 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+@@ -662,6 +662,9 @@ bool dce110_link_encoder_validate_dp_output(
+ const struct dce110_link_encoder *enc110,
+ const struct dc_crtc_timing *crtc_timing)
+ {
++ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
++ return false;
++
+ /* default RGB only */
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
+ return true;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+index 6f67520..bef0011 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+@@ -606,6 +606,9 @@ bool dcn10_link_encoder_validate_dp_output(
+ const struct dcn10_link_encoder *enc10,
+ const struct dc_crtc_timing *crtc_timing)
+ {
++ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
++ return false;
++
+ /* default RGB only */
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
+ return true;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5492-drm-amd-display-clean-up-encoding-checks.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5492-drm-amd-display-clean-up-encoding-checks.patch
new file mode 100644
index 00000000..d42138a7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5492-drm-amd-display-clean-up-encoding-checks.patch
@@ -0,0 +1,178 @@
+From 3f1f482713a1c19fb678d484c609bebacfc808f3 Mon Sep 17 00:00:00 2001
+From: Eric Yang <Eric.Yang2@amd.com>
+Date: Fri, 14 Sep 2018 15:55:01 -0400
+Subject: [PATCH 5492/5725] drm/amd/display: clean up encoding checks
+
+[Why]
+All ASICS we support has YCbCr support, so
+the check is unnecessary, the currently logic
+in validate output also returns true all
+the time, so the unneccessary logic is removed
+
+Signed-off-by: Eric Yang <Eric.Yang2@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c | 16 +---------------
+ drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c | 3 +--
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c | 3 +--
+ drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c | 3 +--
+ drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c | 1 -
+ drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c | 3 +--
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c | 17 +----------------
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 3 +--
+ drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h | 1 -
+ 9 files changed, 7 insertions(+), 43 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+index 70eb9472..366bc8c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+@@ -665,21 +665,7 @@ bool dce110_link_encoder_validate_dp_output(
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ return false;
+
+- /* default RGB only */
+- if (crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
+- return true;
+-
+- if (enc110->base.features.flags.bits.IS_YCBCR_CAPABLE)
+- return true;
+-
+- /* for DCE 8.x or later DP Y-only feature,
+- * we need ASIC cap + FeatureSupportDPYonly, not support 666 */
+- if (crtc_timing->flags.Y_ONLY &&
+- enc110->base.features.flags.bits.IS_YCBCR_CAPABLE &&
+- crtc_timing->display_color_depth != COLOR_DEPTH_666)
+- return true;
+-
+- return false;
++ return true;
+ }
+
+ void dce110_link_encoder_construct(
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+index b1cc388..5b75460 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+@@ -551,8 +551,7 @@ static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 300000,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+- .flags.bits.IS_TPS3_CAPABLE = true,
+- .flags.bits.IS_YCBCR_CAPABLE = true
++ .flags.bits.IS_TPS3_CAPABLE = true
+ };
+
+ struct link_encoder *dce100_link_encoder_create(
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+index b44cc70..4607a6a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+@@ -570,8 +570,7 @@ static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 594000,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+- .flags.bits.IS_TPS3_CAPABLE = true,
+- .flags.bits.IS_YCBCR_CAPABLE = true
++ .flags.bits.IS_TPS3_CAPABLE = true
+ };
+
+ static struct link_encoder *dce110_link_encoder_create(
+diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+index 0f8332e..8b5a269 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+@@ -555,8 +555,7 @@ static const struct encoder_feature_support link_enc_feature = {
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+- .flags.bits.IS_TPS4_CAPABLE = true,
+- .flags.bits.IS_YCBCR_CAPABLE = true
++ .flags.bits.IS_TPS4_CAPABLE = true
+ };
+
+ struct link_encoder *dce112_link_encoder_create(
+diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+index 5905580..53a7a2f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+@@ -609,7 +609,6 @@ static const struct encoder_feature_support link_enc_feature = {
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_TPS4_CAPABLE = true,
+- .flags.bits.IS_YCBCR_CAPABLE = true
+ };
+
+ static struct link_encoder *dce120_link_encoder_create(
+diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+index 1dc590c..79e5c5c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+@@ -650,8 +650,7 @@ static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 297000,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+- .flags.bits.IS_TPS3_CAPABLE = true,
+- .flags.bits.IS_YCBCR_CAPABLE = true
++ .flags.bits.IS_TPS3_CAPABLE = true
+ };
+
+ struct link_encoder *dce80_link_encoder_create(
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+index bef0011..ba6a8686 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+@@ -609,22 +609,7 @@ bool dcn10_link_encoder_validate_dp_output(
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ return false;
+
+- /* default RGB only */
+- if (crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
+- return true;
+-
+- if (enc10->base.features.flags.bits.IS_YCBCR_CAPABLE)
+- return true;
+-
+- /* for DCE 8.x or later DP Y-only feature,
+- * we need ASIC cap + FeatureSupportDPYonly, not support 666
+- */
+- if (crtc_timing->flags.Y_ONLY &&
+- enc10->base.features.flags.bits.IS_YCBCR_CAPABLE &&
+- crtc_timing->display_color_depth != COLOR_DEPTH_666)
+- return true;
+-
+- return false;
++ return true;
+ }
+
+ void dcn10_link_encoder_construct(
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index 910f0b4..d58fbb2 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -723,8 +723,7 @@ static const struct encoder_feature_support link_enc_feature = {
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+- .flags.bits.IS_TPS4_CAPABLE = true,
+- .flags.bits.IS_YCBCR_CAPABLE = true
++ .flags.bits.IS_TPS4_CAPABLE = true
+ };
+
+ struct link_encoder *dcn10_link_encoder_create(
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+index 5881892..e28e977 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+@@ -58,7 +58,6 @@ struct encoder_feature_support {
+ uint32_t IS_HBR3_CAPABLE:1;
+ uint32_t IS_TPS3_CAPABLE:1;
+ uint32_t IS_TPS4_CAPABLE:1;
+- uint32_t IS_YCBCR_CAPABLE:1;
+ uint32_t HDMI_6GB_EN:1;
+ } bits;
+ uint32_t raw;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5493-drm-amd-display-WA-for-DF-keeps-awake-after-S0i3.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5493-drm-amd-display-WA-for-DF-keeps-awake-after-S0i3.patch
new file mode 100644
index 00000000..74be82d5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5493-drm-amd-display-WA-for-DF-keeps-awake-after-S0i3.patch
@@ -0,0 +1,95 @@
+From 81dad169a27bc975b282f480585a0cc1abc12a9a Mon Sep 17 00:00:00 2001
+From: Yongqiang Sun <yongqiang.sun@amd.com>
+Date: Mon, 17 Sep 2018 10:05:51 -0400
+Subject: [PATCH 5493/5725] drm/amd/display: WA for DF keeps awake after S0i3.
+
+[Why]
+DF keeps awake after S0i3 resume due to DRAM_STATE_CNTL
+is set by bios command table during dcn init_hw.
+
+[How]
+As a work around, check STATE_CNTL status before init_hw,
+if it is 0 before init_hw and set to 1 after init_hw,
+change it to 0.
+
+Signed-off-by: Yongqiang Sun <yongqiang.sun@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c | 17 +++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h | 4 ++++
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 14 ++++++++++++++
+ 3 files changed, 35 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+index 297e1e5..4254e7e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+@@ -87,6 +87,23 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,
+ s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
+ }
+
++void hubbub1_disable_allow_self_refresh(struct hubbub *hubbub)
++{
++ REG_UPDATE(DCHUBBUB_ARB_DRAM_STATE_CNTL,
++ DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, 0);
++}
++
++bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubbub)
++{
++ uint32_t enable = 0;
++
++ REG_GET(DCHUBBUB_ARB_DRAM_STATE_CNTL,
++ DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, &enable);
++
++ return true ? false : enable;
++}
++
++
+ bool hubbub1_verify_allow_pstate_change_high(
+ struct hubbub *hubbub)
+ {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+index d6e596e..d0f03d1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+@@ -203,6 +203,10 @@ void hubbub1_program_watermarks(
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+
++void hubbub1_disable_allow_self_refresh(struct hubbub *hubbub);
++
++bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubub);
++
+ void hubbub1_toggle_watermark_change_req(
+ struct hubbub *hubbub);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index a881ff5..193184a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -997,7 +997,21 @@ static void dcn10_init_hw(struct dc *dc)
+ } else {
+
+ if (!dcb->funcs->is_accelerated_mode(dcb)) {
++ bool allow_self_fresh_force_enable =
++ hububu1_is_allow_self_refresh_enabled(dc->res_pool->hubbub);
++
+ bios_golden_init(dc);
++
++ /* WA for making DF sleep when idle after resume from S0i3.
++ * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
++ * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
++ * before calling command table and it changed to 1 after,
++ * it should be set back to 0.
++ */
++ if (allow_self_fresh_force_enable == false &&
++ hububu1_is_allow_self_refresh_enabled(dc->res_pool->hubbub))
++ hubbub1_disable_allow_self_refresh(dc->res_pool->hubbub);
++
+ disable_vga(dc->hwseq);
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5494-drm-amd-display-dc-3.1.68.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5494-drm-amd-display-dc-3.1.68.patch
new file mode 100644
index 00000000..3a087354
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5494-drm-amd-display-dc-3.1.68.patch
@@ -0,0 +1,28 @@
+From 2f3361580a5c9363abb4e562098710bac8a7b113 Mon Sep 17 00:00:00 2001
+From: Tony Cheng <tony.cheng@amd.com>
+Date: Mon, 10 Sep 2018 11:30:52 -0400
+Subject: [PATCH 5494/5725] drm/amd/display: dc 3.1.68
+
+Signed-off-by: Tony Cheng <tony.cheng@amd.com>
+Reviewed-by: Steven Chiu <Steven.Chiu@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 7185cfa..b2fd563 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -38,7 +38,7 @@
+ #include "inc/compressor.h"
+ #include "dml/display_mode_lib.h"
+
+-#define DC_VER "3.1.67"
++#define DC_VER "3.1.68"
+
+ #define MAX_SURFACES 3
+ #define MAX_STREAMS 6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5495-drm-amd-display-fix-memory-leak-in-resource-pools.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5495-drm-amd-display-fix-memory-leak-in-resource-pools.patch
new file mode 100644
index 00000000..2ee5d22f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5495-drm-amd-display-fix-memory-leak-in-resource-pools.patch
@@ -0,0 +1,126 @@
+From 9f9c10ebe22771d2ff803f39589146aafdda4b23 Mon Sep 17 00:00:00 2001
+From: Jun Lei <Jun.Lei@amd.com>
+Date: Tue, 18 Sep 2018 09:38:20 -0400
+Subject: [PATCH 5495/5725] drm/amd/display: fix memory leak in resource pools
+
+[why]
+ddc engines were recently changed to be independently tracked
+from pipe count. the change was reflected in resource constructor
+but not in destructor. this manifests as a memory leak when
+pipe harvesting is enabled, since not all constructed ddc engines
+are freed
+
+[how]
+make destructor symmetric with constructor for all dcX_resource
+
+Signed-off-by: Jun Lei <Jun.Lei@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c | 2 ++
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c | 2 ++
+ drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c | 8 +++++---
+ drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c | 2 ++
+ drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c | 2 ++
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 2 ++
+ 6 files changed, 15 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+index 5b75460..14754a8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+@@ -689,7 +689,9 @@ static void destruct(struct dce110_resource_pool *pool)
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
++ }
+
++ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+ if (pool->base.hw_i2cs[i] != NULL) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+index 4607a6a..de19093 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+@@ -719,7 +719,9 @@ static void destruct(struct dce110_resource_pool *pool)
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
++ }
+
++ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+ if (pool->base.hw_i2cs[i] != NULL) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+index 8b5a269..3ce79c2 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+@@ -693,9 +693,6 @@ static void destruct(struct dce110_resource_pool *pool)
+ if (pool->base.opps[i] != NULL)
+ dce110_opp_destroy(&pool->base.opps[i]);
+
+- if (pool->base.engines[i] != NULL)
+- dce110_engine_destroy(&pool->base.engines[i]);
+-
+ if (pool->base.transforms[i] != NULL)
+ dce112_transform_destroy(&pool->base.transforms[i]);
+
+@@ -711,6 +708,11 @@ static void destruct(struct dce110_resource_pool *pool)
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
++ }
++
++ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
++ if (pool->base.engines[i] != NULL)
++ dce110_engine_destroy(&pool->base.engines[i]);
+ if (pool->base.hw_i2cs[i] != NULL) {
+ kfree(pool->base.hw_i2cs[i]);
+ pool->base.hw_i2cs[i] = NULL;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+index 53a7a2f..79ab5f9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+@@ -533,7 +533,9 @@ static void destruct(struct dce110_resource_pool *pool)
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
++ }
+
++ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+ if (pool->base.hw_i2cs[i] != NULL) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+index 79e5c5c..d68f951 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+@@ -738,7 +738,9 @@ static void destruct(struct dce110_resource_pool *pool)
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
++ }
+
++ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+ if (pool->base.hw_i2cs[i] != NULL) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index d58fbb2..a71453a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -908,7 +908,9 @@ static void destruct(struct dcn10_resource_pool *pool)
+ kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
++ }
+
++ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ if (pool->base.engines[i] != NULL)
+ pool->base.engines[i]->funcs->destroy_engine(&pool->base.engines[i]);
+ if (pool->base.hw_i2cs[i] != NULL) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5496-drm-amd-display-Flatten-irq-handler-data-struct.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5496-drm-amd-display-Flatten-irq-handler-data-struct.patch
new file mode 100644
index 00000000..35779a9c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5496-drm-amd-display-Flatten-irq-handler-data-struct.patch
@@ -0,0 +1,130 @@
+From fbe7a9daea04a0b735d742dc179f0a7935ca25c0 Mon Sep 17 00:00:00 2001
+From: Leo Li <sunpeng.li@amd.com>
+Date: Tue, 18 Sep 2018 10:21:35 -0400
+Subject: [PATCH 5496/5725] drm/amd/display: Flatten irq handler data struct
+
+[Why]
+There is no reason why the common data needs to be kept separate.
+
+[How]
+Flatten the struct by moving common data into the DM IRQ struct.
+
+Signed-off-by: Leo Li <sunpeng.li@amd.com>
+Reviewed-by: David Francis <David.Francis@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+---
+ .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c | 37 ++++++++--------------
+ 1 file changed, 14 insertions(+), 23 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+index 034aa76..e23bd93 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+@@ -36,17 +36,13 @@
+ * Private declarations.
+ *****************************************************************************/
+
+-struct handler_common_data {
++struct amdgpu_dm_irq_handler_data {
+ struct list_head list;
+ interrupt_handler handler;
+ void *handler_arg;
+
+ /* DM which this handler belongs to */
+ struct amdgpu_display_manager *dm;
+-};
+-
+-struct amdgpu_dm_irq_handler_data {
+- struct handler_common_data hcd;
+ /* DAL irq source which registered for this interrupt. */
+ enum dc_irq_source irq_source;
+ };
+@@ -61,7 +57,7 @@ struct amdgpu_dm_irq_handler_data {
+ * Private functions.
+ *****************************************************************************/
+
+-static void init_handler_common_data(struct handler_common_data *hcd,
++static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
+ void (*ih)(void *),
+ void *args,
+ struct amdgpu_display_manager *dm)
+@@ -85,11 +81,9 @@ static void dm_irq_work_func(struct work_struct *work)
+ struct amdgpu_dm_irq_handler_data *handler_data;
+
+ list_for_each(entry, handler_list) {
+- handler_data =
+- list_entry(
+- entry,
+- struct amdgpu_dm_irq_handler_data,
+- hcd.list);
++ handler_data = list_entry(entry,
++ struct amdgpu_dm_irq_handler_data,
++ list);
+
+ DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
+ handler_data->irq_source);
+@@ -97,7 +91,7 @@ static void dm_irq_work_func(struct work_struct *work)
+ DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
+ handler_data->irq_source);
+
+- handler_data->hcd.handler(handler_data->hcd.handler_arg);
++ handler_data->handler(handler_data->handler_arg);
+ }
+
+ /* Call a DAL subcomponent which registered for interrupt notification
+@@ -137,11 +131,11 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
+ list_for_each_safe(entry, tmp, hnd_list) {
+
+ handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
+- hcd.list);
++ list);
+
+ if (ih == handler) {
+ /* Found our handler. Remove it from the list. */
+- list_del(&handler->hcd.list);
++ list_del(&handler->list);
+ handler_removed = true;
+ break;
+ }
+@@ -230,8 +224,7 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
+
+ memset(handler_data, 0, sizeof(*handler_data));
+
+- init_handler_common_data(&handler_data->hcd, ih, handler_args,
+- &adev->dm);
++ init_handler_common_data(handler_data, ih, handler_args, &adev->dm);
+
+ irq_source = int_params->irq_source;
+
+@@ -250,7 +243,7 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
+ break;
+ }
+
+- list_add_tail(&handler_data->hcd.list, hnd_list);
++ list_add_tail(&handler_data->list, hnd_list);
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+@@ -462,15 +455,13 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
+ entry,
+ &adev->dm.irq_handler_list_high_tab[irq_source]) {
+
+- handler_data =
+- list_entry(
+- entry,
+- struct amdgpu_dm_irq_handler_data,
+- hcd.list);
++ handler_data = list_entry(entry,
++ struct amdgpu_dm_irq_handler_data,
++ list);
+
+ /* Call a subcomponent which registered for immediate
+ * interrupt notification */
+- handler_data->hcd.handler(handler_data->hcd.handler_arg);
++ handler_data->handler(handler_data->handler_arg);
+ }
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5497-drm-amd-display-fix-Interlace-video-timing.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5497-drm-amd-display-fix-Interlace-video-timing.patch
new file mode 100644
index 00000000..50c3745f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5497-drm-amd-display-fix-Interlace-video-timing.patch
@@ -0,0 +1,132 @@
+From 090fa0865f4be923cd817efb84633f5fae31d23d Mon Sep 17 00:00:00 2001
+From: Charlene Liu <charlene.liu@amd.com>
+Date: Tue, 18 Sep 2018 13:23:42 -0400
+Subject: [PATCH 5497/5725] drm/amd/display: fix Interlace video timing.
+
+[Description] interlace mode shows wrong vertical timing.
+Interface timing in Edid is half vertical timing as progressive timing.
+driver doubled the vertical timing in edid_paser,
+no need to double in optc again.
+
+Signed-off-by: Charlene Liu <charlene.liu@amd.com>
+Reviewed-by: Chris Park <Chris.Park@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | 32 +++++------------------
+ 1 file changed, 7 insertions(+), 25 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+index 411f892..ad46294 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+@@ -98,7 +98,6 @@ static uint32_t get_start_vline(struct timing_generator *optc, const struct dc_c
+ struct dc_crtc_timing patched_crtc_timing;
+ int vesa_sync_start;
+ int asic_blank_end;
+- int interlace_factor;
+ int vertical_line_start;
+
+ patched_crtc_timing = *dc_crtc_timing;
+@@ -112,16 +111,13 @@ static uint32_t get_start_vline(struct timing_generator *optc, const struct dc_c
+ vesa_sync_start -
+ patched_crtc_timing.h_border_left;
+
+- interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
+-
+ vesa_sync_start = patched_crtc_timing.v_addressable +
+ patched_crtc_timing.v_border_bottom +
+ patched_crtc_timing.v_front_porch;
+
+ asic_blank_end = (patched_crtc_timing.v_total -
+ vesa_sync_start -
+- patched_crtc_timing.v_border_top)
+- * interlace_factor;
++ patched_crtc_timing.v_border_top);
+
+ vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
+ if (vertical_line_start < 0) {
+@@ -186,7 +182,6 @@ void optc1_program_timing(
+ uint32_t v_sync_end;
+ uint32_t v_init, v_fp2;
+ uint32_t h_sync_polarity, v_sync_polarity;
+- uint32_t interlace_factor;
+ uint32_t start_point = 0;
+ uint32_t field_num = 0;
+ uint32_t h_div_2;
+@@ -237,16 +232,8 @@ void optc1_program_timing(
+ REG_UPDATE(OTG_H_SYNC_A_CNTL,
+ OTG_H_SYNC_A_POL, h_sync_polarity);
+
+- /* Load vertical timing */
++ v_total = patched_crtc_timing.v_total - 1;
+
+- /* CRTC_V_TOTAL = v_total - 1 */
+- if (patched_crtc_timing.flags.INTERLACE) {
+- interlace_factor = 2;
+- v_total = 2 * patched_crtc_timing.v_total;
+- } else {
+- interlace_factor = 1;
+- v_total = patched_crtc_timing.v_total - 1;
+- }
+ REG_SET(OTG_V_TOTAL, 0,
+ OTG_V_TOTAL, v_total);
+
+@@ -259,7 +246,7 @@ void optc1_program_timing(
+ OTG_V_TOTAL_MIN, v_total);
+
+ /* v_sync_start = 0, v_sync_end = v_sync_width */
+- v_sync_end = patched_crtc_timing.v_sync_width * interlace_factor;
++ v_sync_end = patched_crtc_timing.v_sync_width;
+
+ REG_UPDATE_2(OTG_V_SYNC_A,
+ OTG_V_SYNC_A_START, 0,
+@@ -271,15 +258,13 @@ void optc1_program_timing(
+
+ asic_blank_end = (patched_crtc_timing.v_total -
+ vesa_sync_start -
+- patched_crtc_timing.v_border_top)
+- * interlace_factor;
++ patched_crtc_timing.v_border_top);
+
+ /* v_blank_start = v_blank_end + v_active */
+ asic_blank_start = asic_blank_end +
+ (patched_crtc_timing.v_border_top +
+ patched_crtc_timing.v_addressable +
+- patched_crtc_timing.v_border_bottom)
+- * interlace_factor;
++ patched_crtc_timing.v_border_bottom);
+
+ REG_UPDATE_2(OTG_V_BLANK_START_END,
+ OTG_V_BLANK_START, asic_blank_start,
+@@ -301,7 +286,7 @@ void optc1_program_timing(
+ 0 : 1;
+
+ REG_UPDATE(OTG_V_SYNC_A_CNTL,
+- OTG_V_SYNC_A_POL, v_sync_polarity);
++ OTG_V_SYNC_A_POL, v_sync_polarity);
+
+ v_init = asic_blank_start;
+ if (optc->dlg_otg_param.signal == SIGNAL_TYPE_DISPLAY_PORT ||
+@@ -532,7 +517,6 @@ bool optc1_validate_timing(
+ struct timing_generator *optc,
+ const struct dc_crtc_timing *timing)
+ {
+- uint32_t interlace_factor;
+ uint32_t v_blank;
+ uint32_t h_blank;
+ uint32_t min_v_blank;
+@@ -540,10 +524,8 @@ bool optc1_validate_timing(
+
+ ASSERT(timing != NULL);
+
+- interlace_factor = timing->flags.INTERLACE ? 2 : 1;
+ v_blank = (timing->v_total - timing->v_addressable -
+- timing->v_border_top - timing->v_border_bottom) *
+- interlace_factor;
++ timing->v_border_top - timing->v_border_bottom);
+
+ h_blank = (timing->h_total - timing->h_addressable -
+ timing->h_border_right -
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5498-drm-amd-display-HLK-Periodic-Frame-Notification-test.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5498-drm-amd-display-HLK-Periodic-Frame-Notification-test.patch
new file mode 100644
index 00000000..11c1ecc2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5498-drm-amd-display-HLK-Periodic-Frame-Notification-test.patch
@@ -0,0 +1,36 @@
+From 5d99868cb5063852ccfe8d0e9f249baded91dc4c Mon Sep 17 00:00:00 2001
+From: Murton Liu <murton.liu@amd.com>
+Date: Wed, 19 Sep 2018 14:31:12 -0400
+Subject: [PATCH 5498/5725] drm/amd/display: HLK Periodic Frame Notification
+ test failed
+
+[Why]
+Due to a small pre-fetch window, the active vline timing is a couple
+of lines off when compared to what it should be.
+
+[How]
+Changed the calculation for the start vline to account for this window.
+
+Signed-off-by: Murton Liu <murton.liu@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+index ad46294..5462668 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+@@ -150,7 +150,7 @@ void optc1_program_vline_interrupt(
+ req_delta_lines--;
+
+ if (req_delta_lines > vsync_line)
+- start_line = dc_crtc_timing->v_total - (req_delta_lines - vsync_line) - 1;
++ start_line = dc_crtc_timing->v_total - (req_delta_lines - vsync_line) + 2;
+ else
+ start_line = vsync_line - req_delta_lines;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5499-drm-amd-display-Fix-Vega10-lightup-on-S3-resume.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5499-drm-amd-display-Fix-Vega10-lightup-on-S3-resume.patch
new file mode 100644
index 00000000..c5641457
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5499-drm-amd-display-Fix-Vega10-lightup-on-S3-resume.patch
@@ -0,0 +1,88 @@
+From f559629353208259508efaad6fa36d1796383eeb Mon Sep 17 00:00:00 2001
+From: Roman Li <Roman.Li@amd.com>
+Date: Wed, 19 Sep 2018 15:46:53 -0400
+Subject: [PATCH 5499/5725] drm/amd/display: Fix Vega10 lightup on S3 resume
+
+[Why]
+There have been a few reports of Vega10 display remaining blank
+after S3 resume. The regression is caused by workaround for mode
+change on Vega10 - skip set_bandwidth if stream count is 0.
+As a result we skipped dispclk reset on suspend, thus on resume
+we may skip the clock update assuming it hasn't been changed.
+On some systems it causes display blank or 'out of range'.
+
+[How]
+Revert "drm/amd/display: Fix Vega10 black screen after mode change"
+Verified that it hadn't cause mode change regression.
+
+Signed-off-by: Roman Li <Roman.Li@amd.com>
+Reviewed-by: Sun peng Li <Sunpeng.Li@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h | 5 -----
+ drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c | 12 ------------
+ 3 files changed, 1 insertion(+), 18 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 2d7d13b..419d0e4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -2534,7 +2534,7 @@ static void pplib_apply_display_requirements(
+ dc->prev_display_config = *pp_display_cfg;
+ }
+
+-void dce110_set_bandwidth(
++static void dce110_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed)
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+index a226a3d..d6db3db 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+@@ -68,11 +68,6 @@ void dce110_fill_display_configs(
+ const struct dc_state *context,
+ struct dm_pp_display_configuration *pp_display_cfg);
+
+-void dce110_set_bandwidth(
+- struct dc *dc,
+- struct dc_state *context,
+- bool decrease_allowed);
+-
+ uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
+
+ void dp_receiver_power_ctrl(struct dc_link *link, bool on);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
+index 5853522..eb0f5f9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
+@@ -244,17 +244,6 @@ static void dce120_update_dchub(
+ dh_data->dchub_info_valid = false;
+ }
+
+-static void dce120_set_bandwidth(
+- struct dc *dc,
+- struct dc_state *context,
+- bool decrease_allowed)
+-{
+- if (context->stream_count <= 0)
+- return;
+-
+- dce110_set_bandwidth(dc, context, decrease_allowed);
+-}
+-
+ void dce120_hw_sequencer_construct(struct dc *dc)
+ {
+ /* All registers used by dce11.2 match those in dce11 in offset and
+@@ -263,6 +252,5 @@ void dce120_hw_sequencer_construct(struct dc *dc)
+ dce110_hw_sequencer_construct(dc);
+ dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
+ dc->hwss.update_dchub = dce120_update_dchub;
+- dc->hwss.set_bandwidth = dce120_set_bandwidth;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5500-drm-amd-display-Raise-dispclk-value-for-dce_update_c.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5500-drm-amd-display-Raise-dispclk-value-for-dce_update_c.patch
new file mode 100644
index 00000000..42eea237
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5500-drm-amd-display-Raise-dispclk-value-for-dce_update_c.patch
@@ -0,0 +1,50 @@
+From d408874575d5a406bbbfa8f7e5706fd6891f73b3 Mon Sep 17 00:00:00 2001
+From: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Date: Fri, 21 Sep 2018 09:35:24 -0400
+Subject: [PATCH 5500/5725] drm/amd/display: Raise dispclk value for
+ dce_update_clocks
+
+[Why]
+
+The DISPCLK value was previously requested to be 15% higher for all
+ASICS that went through the dce110 bandwidth code path. As part of a
+refactoring of dce_clocks and dce110 set_bandwidth this was removed
+for power saving considerations.
+
+This changed caused corruption under certain display configurations.
+Originally thought to be Vega specific, it was also observed on Polaris.
+
+[How]
+
+The 15% is brought back but its placement differs from the original
+patch. This boost should only be enable while DFS bypass is inactive.
+
+This (like the Vega patch) is also a workaround that should be
+removed after the root cause is identified.
+
+Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index aa6bd41..fb962503 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -658,6 +658,11 @@ static void dce_update_clocks(struct dccg *dccg,
+ bool safe_to_lower)
+ {
+ struct dm_pp_power_level_change_request level_change_req;
++ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(dccg);
++
++ /* TODO: Investigate why this is needed to fix display corruption. */
++ if (!clk_dce->dfs_bypass_active)
++ new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100;
+
+ level_change_req.power_level = dce_get_required_clocks_state(dccg, new_clocks);
+ /* get max clock state from PPLIB */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5501-drm-amd-display-Signal-hw_done-after-waiting-for-fli.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5501-drm-amd-display-Signal-hw_done-after-waiting-for-fli.patch
new file mode 100644
index 00000000..11817c29
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5501-drm-amd-display-Signal-hw_done-after-waiting-for-fli.patch
@@ -0,0 +1,113 @@
+From 6b4a08f22320f335167ff37f462a8125d440cffc Mon Sep 17 00:00:00 2001
+From: Shirish S <shirish.s@amd.com>
+Date: Mon, 24 Sep 2018 19:01:47 +0530
+Subject: [PATCH 5501/5725] drm/amd/display: Signal hw_done() after waiting for
+ flip_done()
+
+In amdgpu_dm_commit_tail(), wait until flip_done() is signaled before
+we signal hw_done().
+
+[Why]
+
+This is to temporarily address a paging error that occurs when a
+nonblocking commit contends with another commit, particularly in a
+mirrored display configuration where at least 2 CRTCs are updated.
+The error occurs in drm_atomic_helper_wait_for_flip_done(), when we
+attempt to access the contents of new_crtc_state->commit.
+
+Here's the sequence for a mirrored 2 display setup (irrelevant steps
+left out for clarity):
+
+**THREAD 1** | **THREAD 2**
+ |
+Initialize atomic state for flip |
+ |
+Queue worker |
+ ...
+
+ | Do work for flip
+ |
+ | Signal hw_done() on CRTC 1
+ | Signal hw_done() on CRTC 2
+ |
+ | Wait for flip_done() on CRTC 1
+
+ <---- **PREEMPTED BY THREAD 1**
+
+Initialize atomic state for cursor |
+update (1) |
+ |
+Do cursor update work on both CRTCs |
+ |
+Clear atomic state (2) |
+**DONE** |
+ ...
+ |
+ | Wait for flip_done() on CRTC 2
+ | *ERROR*
+ |
+
+The issue starts with (1). When the atomic state is initialized, the
+current CRTC states are duplicated to be the new_crtc_states, and
+referenced to be the old_crtc_states. (The new_crtc_states are to be
+filled with update data.)
+
+Some things to note:
+
+* Due to the mirrored configuration, the cursor updates on both CRTCs.
+
+* At this point, the pflip IRQ has already been handled, and flip_done
+ signaled on all CRTCs. The cursor commit can therefore continue.
+
+* The old_crtc_states used by the cursor update are the **same states**
+ as the new_crtc_states used by the flip worker.
+
+At (2), the old_crtc_state is freed (*), and the cursor commit
+completes. We then context switch back to the flip worker, where we
+attempt to access the new_crtc_state->commit object. This is
+problematic, as this state has already been freed.
+
+(*) Technically, 'state->crtcs[i].state' is freed, which was made to
+ reference old_crtc_state in drm_atomic_helper_swap_state()
+
+[How]
+
+By moving hw_done() after wait_for_flip_done(), we're guaranteed that
+the new_crtc_state (from the flip worker's perspective) still exists.
+This is because any other commit will be blocked, waiting for the
+hw_done() signal.
+
+Note that both the i915 and imx drivers have this sequence flipped
+already, masking this problem.
+
+Change-Id: Ib064b1009936c5048baf041d6809c92283cba08d
+Signed-off-by: Shirish S <shirish.s@amd.com>
+Signed-off-by: Leo Li <sunpeng.li@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 9363ca5..a488601 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4893,6 +4893,14 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+ if (wait_for_vblank)
+ drm_atomic_helper_wait_for_flip_done(dev, state);
+
++ /*
++ * FIXME:
++ * Delay hw_done() until flip_done() is signaled. This is to block
++ * another commit from freeing the CRTC state while we're still
++ * waiting on flip_done.
++ */
++ drm_atomic_helper_commit_hw_done(state);
++
+ drm_atomic_helper_cleanup_planes(dev, state);
+
+ /*
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5502-drm-amdgpu-Refine-uvd_v6-7_0_enc_get_destroy_msg.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5502-drm-amdgpu-Refine-uvd_v6-7_0_enc_get_destroy_msg.patch
new file mode 100644
index 00000000..dab5e373
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5502-drm-amdgpu-Refine-uvd_v6-7_0_enc_get_destroy_msg.patch
@@ -0,0 +1,90 @@
+From 1fc9dbf19e4dc263df0509c2045d1d83beac864a Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Fri, 28 Sep 2018 15:25:06 +0800
+Subject: [PATCH 5502/5725] drm/amdgpu: Refine uvd_v6/7_0_enc_get_destroy_msg
+
+1. make uvd_v7_0_enc_get_destroy_msg static
+2. drop a function variable that always true
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 10 +++-------
+ drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 12 ++++--------
+ 2 files changed, 7 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+index 0ef7ce4..2fcafb4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+@@ -274,7 +274,7 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
+ */
+ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
+ uint32_t handle,
+- bool direct, struct dma_fence **fence)
++ struct dma_fence **fence)
+ {
+ const unsigned ib_size_dw = 16;
+ struct amdgpu_job *job;
+@@ -310,11 +310,7 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+
+- if (direct)
+- r = amdgpu_job_submit_direct(job, ring, &f);
+- else
+- r = amdgpu_job_submit(job, &ring->adev->vce.entity,
+- AMDGPU_FENCE_OWNER_UNDEFINED, &f);
++ r = amdgpu_job_submit_direct(job, ring, &f);
+ if (r)
+ goto err;
+
+@@ -345,7 +341,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ goto error;
+ }
+
+- r = uvd_v6_0_enc_get_destroy_msg(ring, 1, true, &fence);
++ r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+ goto error;
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+index cc705b4..07cb92e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+@@ -280,8 +280,8 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
+ *
+ * Close up a stream for HW test or if userspace failed to do so
+ */
+-int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+- bool direct, struct dma_fence **fence)
++static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
++ struct dma_fence **fence)
+ {
+ const unsigned ib_size_dw = 16;
+ struct amdgpu_job *job;
+@@ -317,11 +317,7 @@ int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+
+- if (direct)
+- r = amdgpu_job_submit_direct(job, ring, &f);
+- else
+- r = amdgpu_job_submit(job, &ring->adev->vce.entity,
+- AMDGPU_FENCE_OWNER_UNDEFINED, &f);
++ r = amdgpu_job_submit_direct(job, ring, &f);
+ if (r)
+ goto err;
+
+@@ -352,7 +348,7 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ goto error;
+ }
+
+- r = uvd_v7_0_enc_get_destroy_msg(ring, 1, true, &fence);
++ r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence);
+ if (r) {
+ DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ring->me, r);
+ goto error;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5503-drm-amdgpu-Add-new-AMDGPU_PP_SENSOR_MIN-MAX_FAN_RPM-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5503-drm-amdgpu-Add-new-AMDGPU_PP_SENSOR_MIN-MAX_FAN_RPM-.patch
new file mode 100644
index 00000000..1aa373eb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5503-drm-amdgpu-Add-new-AMDGPU_PP_SENSOR_MIN-MAX_FAN_RPM-.patch
@@ -0,0 +1,30 @@
+From 8eaa9abe3b522075f8b8a5918f048a6b8fff058a Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Sun, 30 Sep 2018 13:18:17 +0800
+Subject: [PATCH 5503/5725] drm/amdgpu: Add new
+ AMDGPU_PP_SENSOR_MIN/MAX_FAN_RPM sensor
+
+For getting the min/max fan speed in RPM units.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/include/kgd_pp_interface.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+index 8593850..97001a6 100644
+--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+@@ -114,6 +114,8 @@ enum amd_pp_sensors {
+ AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
+ AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
+ AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK,
++ AMDGPU_PP_SENSOR_MIN_FAN_RPM,
++ AMDGPU_PP_SENSOR_MAX_FAN_RPM,
+ };
+
+ enum amd_pp_task {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5504-drm-amd-pp-Implement-AMDGPU_PP_SENSOR_MIN-MAX_FAN_RP.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5504-drm-amd-pp-Implement-AMDGPU_PP_SENSOR_MIN-MAX_FAN_RP.patch
new file mode 100644
index 00000000..bcc5586a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5504-drm-amd-pp-Implement-AMDGPU_PP_SENSOR_MIN-MAX_FAN_RP.patch
@@ -0,0 +1,48 @@
+From 05fadfcd17af1ab6bffe816163409aeef84098cd Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Sun, 30 Sep 2018 13:19:00 +0800
+Subject: [PATCH 5504/5725] drm/amd/pp: Implement
+ AMDGPU_PP_SENSOR_MIN/MAX_FAN_RPM
+
+so user can query the RPM range
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 6 ++++++
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c | 2 ++
+ 2 files changed, 8 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index d38ba0f..a92d765 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -823,6 +823,12 @@ static int pp_dpm_read_sensor(void *handle, int idx,
+ case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
+ *((uint32_t *)value) = hwmgr->pstate_mclk;
+ return 0;
++ case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
++ *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
++ return 0;
++ case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
++ *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
++ return 0;
+ default:
+ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+index 5f1f7a3..c9b93e6 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+@@ -834,6 +834,8 @@ static int init_powerplay_table_information(
+
+ hwmgr->thermal_controller.ucType = powerplay_table->ucThermalControllerType;
+ pptable_information->uc_thermal_controller_type = powerplay_table->ucThermalControllerType;
++ hwmgr->thermal_controller.fanInfo.ulMinRPM = 0;
++ hwmgr->thermal_controller.fanInfo.ulMaxRPM = powerplay_table->smcPPTable.FanMaximumRpm;
+
+ set_hw_cap(hwmgr,
+ ATOM_VEGA20_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5505-drm-amdgpu-Add-fan-RPM-setting-via-sysfs.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5505-drm-amdgpu-Add-fan-RPM-setting-via-sysfs.patch
new file mode 100644
index 00000000..2d2c314c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5505-drm-amdgpu-Add-fan-RPM-setting-via-sysfs.patch
@@ -0,0 +1,349 @@
+From 00aacfc2768c650722569be5e4068bc12f17e534 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Thu, 20 Sep 2018 14:30:55 +0800
+Subject: [PATCH 5505/5725] drm/amdgpu: Add fan RPM setting via sysfs
+
+Add fan1_target for get/set fan speed in RPM unit
+Add fan1_min/fan1_max for get min, max fan speed in RPM unit
+Add fan1_enable to enable/disable the fan1 sensor
+
+v3: drop the hardcode value of min/max rpm in comments pointed
+ out by Alex.
+v2: query the min/max rpm gpu support instand of hardcode value.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h | 3 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 190 ++++++++++++++++++++++++-
+ drivers/gpu/drm/amd/include/kgd_pp_interface.h | 1 +
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 19 +++
+ 4 files changed, 210 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+index 42568ae..f972cd1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+@@ -278,6 +278,9 @@ enum amdgpu_pcie_gen {
+ #define amdgpu_dpm_get_fan_speed_rpm(adev, s) \
+ ((adev)->powerplay.pp_funcs->get_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
+
++#define amdgpu_dpm_set_fan_speed_rpm(adev, s) \
++ ((adev)->powerplay.pp_funcs->set_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
++
+ #define amdgpu_dpm_get_sclk(adev, l) \
+ ((adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)))
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index accf51e..486ac6e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -1172,6 +1172,11 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ int err;
+ u32 speed = 0;
++ u32 pwm_mode;
++
++ pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
++ if (pwm_mode != AMD_FAN_CTRL_MANUAL)
++ return -ENODATA;
+
+ /* Can't adjust fan when the card is off */
+ if ((adev->flags & AMD_IS_PX) &&
+@@ -1187,6 +1192,153 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
+ return sprintf(buf, "%i\n", speed);
+ }
+
++static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct amdgpu_device *adev = dev_get_drvdata(dev);
++ u32 min_rpm = 0;
++ u32 size = sizeof(min_rpm);
++ int r;
++
++ if (!adev->powerplay.pp_funcs->read_sensor)
++ return -EINVAL;
++
++ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
++ (void *)&min_rpm, &size);
++ if (r)
++ return r;
++
++ return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm);
++}
++
++static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct amdgpu_device *adev = dev_get_drvdata(dev);
++ u32 max_rpm = 0;
++ u32 size = sizeof(max_rpm);
++ int r;
++
++ if (!adev->powerplay.pp_funcs->read_sensor)
++ return -EINVAL;
++
++ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
++ (void *)&max_rpm, &size);
++ if (r)
++ return r;
++
++ return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm);
++}
++
++static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct amdgpu_device *adev = dev_get_drvdata(dev);
++ int err;
++ u32 rpm = 0;
++ u32 pwm_mode;
++
++ pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
++ if (pwm_mode != AMD_FAN_CTRL_MANUAL)
++ return -ENODATA;
++
++ /* Can't adjust fan when the card is off */
++ if ((adev->flags & AMD_IS_PX) &&
++ (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
++ return -EINVAL;
++
++ if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
++ err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
++ if (err)
++ return err;
++ }
++
++ return sprintf(buf, "%i\n", rpm);
++}
++
++static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct amdgpu_device *adev = dev_get_drvdata(dev);
++ int err;
++ u32 value;
++ u32 pwm_mode;
++
++ pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
++ if (pwm_mode != AMD_FAN_CTRL_MANUAL)
++ return -ENODATA;
++
++ /* Can't adjust fan when the card is off */
++ if ((adev->flags & AMD_IS_PX) &&
++ (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
++ return -EINVAL;
++
++ err = kstrtou32(buf, 10, &value);
++ if (err)
++ return err;
++
++ if (adev->powerplay.pp_funcs->set_fan_speed_rpm) {
++ err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
++ if (err)
++ return err;
++ }
++
++ return count;
++}
++
++static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct amdgpu_device *adev = dev_get_drvdata(dev);
++ u32 pwm_mode = 0;
++
++ if (!adev->powerplay.pp_funcs->get_fan_control_mode)
++ return -EINVAL;
++
++ pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
++
++ return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
++}
++
++static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf,
++ size_t count)
++{
++ struct amdgpu_device *adev = dev_get_drvdata(dev);
++ int err;
++ int value;
++ u32 pwm_mode;
++
++ /* Can't adjust fan when the card is off */
++ if ((adev->flags & AMD_IS_PX) &&
++ (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
++ return -EINVAL;
++
++ if (!adev->powerplay.pp_funcs->set_fan_control_mode)
++ return -EINVAL;
++
++ err = kstrtoint(buf, 10, &value);
++ if (err)
++ return err;
++
++ if (value == 0)
++ pwm_mode = AMD_FAN_CTRL_AUTO;
++ else if (value == 1)
++ pwm_mode = AMD_FAN_CTRL_MANUAL;
++ else
++ return -EINVAL;
++
++ amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
++
++ return count;
++}
++
+ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+@@ -1406,8 +1558,16 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
+ *
+ * - pwm1_max: pulse width modulation fan control maximum level (255)
+ *
++ * - fan1_min: an minimum value Unit: revolution/min (RPM)
++ *
++ * - fan1_max: an maxmum value Unit: revolution/max (RPM)
++ *
+ * - fan1_input: fan speed in RPM
+ *
++ * - fan[1-*]_target: Desired fan speed Unit: revolution/min (RPM)
++ *
++ * - fan[1-*]_enable: Enable or disable the sensors.1: Enable 0: Disable
++ *
+ * You can use hwmon tools like sensors to view this information on your system.
+ *
+ */
+@@ -1420,6 +1580,10 @@ static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_
+ static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
+ static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
+ static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
++static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
++static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
++static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
++static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
+ static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
+ static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
+ static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
+@@ -1438,6 +1602,10 @@ static struct attribute *hwmon_attributes[] = {
+ &sensor_dev_attr_pwm1_min.dev_attr.attr,
+ &sensor_dev_attr_pwm1_max.dev_attr.attr,
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
++ &sensor_dev_attr_fan1_min.dev_attr.attr,
++ &sensor_dev_attr_fan1_max.dev_attr.attr,
++ &sensor_dev_attr_fan1_target.dev_attr.attr,
++ &sensor_dev_attr_fan1_enable.dev_attr.attr,
+ &sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_in0_label.dev_attr.attr,
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+@@ -1456,13 +1624,16 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ umode_t effective_mode = attr->mode;
+
+-
+ /* Skip fan attributes if fan is not present */
+ if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
+- attr == &sensor_dev_attr_fan1_input.dev_attr.attr))
++ attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
++ attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
++ attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
++ attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
++ attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
+ return 0;
+
+ /* Skip limit attributes if DPM is not enabled */
+@@ -1472,7 +1643,12 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
+ attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
++ attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
++ attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
++ attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
++ attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
++ attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
++ attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
+ return 0;
+
+ /* mask fan attributes if we have no bindings for this asic to expose */
+@@ -1497,10 +1673,18 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
+ /* hide max/min values if we can't both query and manage the fan */
+ if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
+ !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
++ (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
++ !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
+ (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
+ return 0;
+
++ if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
++ !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
++ (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
++ attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
++ return 0;
++
+ /* only APUs have vddnb */
+ if (!(adev->flags & AMD_IS_APU) &&
+ (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
+diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+index 97001a6..980e696 100644
+--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+@@ -230,6 +230,7 @@ struct amd_pm_funcs {
+ enum amd_dpm_forced_level (*get_performance_level)(void *handle);
+ enum amd_pm_state_type (*get_current_power_state)(void *handle);
+ int (*get_fan_speed_rpm)(void *handle, uint32_t *rpm);
++ int (*set_fan_speed_rpm)(void *handle, uint32_t rpm);
+ int (*get_pp_num_states)(void *handle, struct pp_states_info *data);
+ int (*get_pp_table)(void *handle, char **table);
+ int (*set_pp_table)(void *handle, const char *buf, size_t size);
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index a92d765..e9bf118 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -586,6 +586,24 @@ static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
+ return ret;
+ }
+
++static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
++{
++ struct pp_hwmgr *hwmgr = handle;
++ int ret = 0;
++
++ if (!hwmgr || !hwmgr->pm_en)
++ return -EINVAL;
++
++ if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) {
++ pr_info("%s was not implemented.\n", __func__);
++ return 0;
++ }
++ mutex_lock(&hwmgr->smu_lock);
++ ret = hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
++ mutex_unlock(&hwmgr->smu_lock);
++ return ret;
++}
++
+ static int pp_dpm_get_pp_num_states(void *handle,
+ struct pp_states_info *data)
+ {
+@@ -1308,6 +1326,7 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
+ .set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
+ .get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
+ .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
++ .set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
+ .get_pp_num_states = pp_dpm_get_pp_num_states,
+ .get_pp_table = pp_dpm_get_pp_table,
+ .set_pp_table = pp_dpm_set_pp_table,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5506-drm-amdgpu-Disable-sysfs-pwm1-if-not-in-manual-fan-c.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5506-drm-amdgpu-Disable-sysfs-pwm1-if-not-in-manual-fan-c.patch
new file mode 100644
index 00000000..9646674b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5506-drm-amdgpu-Disable-sysfs-pwm1-if-not-in-manual-fan-c.patch
@@ -0,0 +1,43 @@
+From c9f834e307c51b24706f80e4d9d4c4d6c98247f6 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Fri, 28 Sep 2018 16:01:48 +0800
+Subject: [PATCH 5506/5725] drm/amdgpu: Disable sysfs pwm1 if not in manual fan
+ control
+
+Following lm-sensors 3.0.0,
+Only enable pwm1 sysfs when fan control mode(pwm1_enable)
+in manual
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index 486ac6e..9a487d2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -1120,12 +1120,19 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ int err;
+ u32 value;
++ u32 pwm_mode;
+
+ /* Can't adjust fan when the card is off */
+ if ((adev->flags & AMD_IS_PX) &&
+ (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
++ pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
++ if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
++ pr_info("manual fan speed control should be enabled first\n");
++ return -EINVAL;
++ }
++
+ err = kstrtou32(buf, 10, &value);
+ if (err)
+ return err;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5507-drm-amdgpu-Drop-dead-define-in-amdgpu.h.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5507-drm-amdgpu-Drop-dead-define-in-amdgpu.h.patch
new file mode 100644
index 00000000..9edc9cb1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5507-drm-amdgpu-Drop-dead-define-in-amdgpu.h.patch
@@ -0,0 +1,63 @@
+From a4601651f4de7bc96728b4202b1c816680bbd496 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Sat, 29 Sep 2018 17:07:48 +0800
+Subject: [PATCH 5507/5725] drm/amdgpu: Drop dead define in amdgpu.h
+
+the struct was not in use any more.
+
+Change-Id: I9c45b405c6cbf69a3c95d5fcd59e99bf3b489486
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 27 ---------------------------
+ 1 file changed, 27 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 15e12e7..662f265 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -633,30 +633,6 @@ void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
+ void amdgpu_test_moves(struct amdgpu_device *adev);
+
+ /*
+- * amdgpu smumgr functions
+- */
+-struct amdgpu_smumgr_funcs {
+- int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype);
+- int (*request_smu_load_fw)(struct amdgpu_device *adev);
+- int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype);
+-};
+-
+-/*
+- * amdgpu smumgr
+- */
+-struct amdgpu_smumgr {
+- struct amdgpu_bo *toc_buf;
+- struct amdgpu_bo *smu_buf;
+- /* asic priv smu data */
+- void *priv;
+- spinlock_t smu_lock;
+- /* smumgr functions */
+- const struct amdgpu_smumgr_funcs *smumgr_funcs;
+- /* ucode loading complete flag */
+- uint32_t fw_flags;
+-};
+-
+-/*
+ * ASIC specific register table accessible by UMD
+ */
+ struct amdgpu_allowed_register_entry {
+@@ -1032,9 +1008,6 @@ struct amdgpu_device {
+ u32 cg_flags;
+ u32 pg_flags;
+
+- /* amdgpu smumgr */
+- struct amdgpu_smumgr smu;
+-
+ /* gfx */
+ struct amdgpu_gfx gfx;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5508-drm-amd-pp-Fix-memory-leak-on-CI-AI.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5508-drm-amd-pp-Fix-memory-leak-on-CI-AI.patch
new file mode 100644
index 00000000..f8f54ee4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5508-drm-amd-pp-Fix-memory-leak-on-CI-AI.patch
@@ -0,0 +1,38 @@
+From 1c8c19ed5de4c19381755c4af40fa8b32d92009e Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Sat, 29 Sep 2018 14:52:31 +0800
+Subject: [PATCH 5508/5725] drm/amd/pp: Fix memory leak on CI/AI
+
+On CI/AI, fw was not loaded by smu, but
+smu's fw still need to be released
+when driver fini.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index e9bf118..3bfe0cf 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -109,11 +109,11 @@ static int pp_sw_fini(void *handle)
+
+ hwmgr_sw_fini(hwmgr);
+
+- if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
+- release_firmware(adev->pm.fw);
+- adev->pm.fw = NULL;
++ if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
+ amdgpu_ucode_fini_bo(adev);
+- }
++
++ release_firmware(adev->pm.fw);
++ adev->pm.fw = NULL;
+
+ return 0;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5509-drm-amdgpu-Move-gfx-flag-in_suspend-to-adev.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5509-drm-amdgpu-Move-gfx-flag-in_suspend-to-adev.patch
new file mode 100644
index 00000000..cfcd4e02
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5509-drm-amdgpu-Move-gfx-flag-in_suspend-to-adev.patch
@@ -0,0 +1,152 @@
+From bdc18c8fcd9c4a5a53a7ab65691919b9e85f9fb0 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Sat, 29 Sep 2018 15:27:02 +0800
+Subject: [PATCH 5509/5725] drm/amdgpu: Move gfx flag in_suspend to adev
+
+Move in_suspend flag to adev from gfx, so
+can be used in other ip blocks, also keep
+consistent with gpu_in_reset flag.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 +++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 +++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 3 +--
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 13 +++----------
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 16 ++++------------
+ 5 files changed, 14 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 662f265..5e6df72 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1073,6 +1073,9 @@ struct amdgpu_device {
+
+ u8 reset_magic[AMDGPU_RESET_MAGIC_NUM];
+
++ /* s3/s4 mask */
++ bool in_suspend;
++
+ /* record last mm index being written through WREG32*/
+ unsigned long last_mm_index;
+ bool in_gpu_reset;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index b573d9f..1de2728 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2701,6 +2701,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
++ adev->in_suspend = true;
+ drm_kms_helper_poll_disable(dev);
+
+ if (fbcon)
+@@ -2887,6 +2888,8 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
+ #ifdef CONFIG_PM
+ dev->dev->power.disable_depth--;
+ #endif
++ adev->in_suspend = false;
++
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+index f172e92..b61b5c1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+@@ -297,8 +297,7 @@ struct amdgpu_gfx {
+ /* reset mask */
+ uint32_t grbm_soft_reset;
+ uint32_t srbm_soft_reset;
+- /* s3/s4 mask */
+- bool in_suspend;
++
+ /* NGG */
+ struct amdgpu_ngg ngg;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 34aabe7..09eab2b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -4871,7 +4871,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
+ struct vi_mqd *mqd = ring->mqd_ptr;
+ int mqd_idx = ring - &adev->gfx.compute_ring[0];
+
+- if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
++ if (!adev->in_gpu_reset && !adev->in_suspend) {
+ memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
+ ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
+ ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
+@@ -5141,19 +5141,12 @@ static int gfx_v8_0_hw_fini(void *handle)
+
+ static int gfx_v8_0_suspend(void *handle)
+ {
+- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+- adev->gfx.in_suspend = true;
+- return gfx_v8_0_hw_fini(adev);
++ return gfx_v8_0_hw_fini(handle);
+ }
+
+ static int gfx_v8_0_resume(void *handle)
+ {
+- int r;
+- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+-
+- r = gfx_v8_0_hw_init(adev);
+- adev->gfx.in_suspend = false;
+- return r;
++ return gfx_v8_0_hw_init(handle);
+ }
+
+ static bool gfx_v8_0_check_soft_reset(void *handle)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 17be234..a71c27e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -3203,7 +3203,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
+ struct v9_mqd *mqd = ring->mqd_ptr;
+ int mqd_idx = ring - &adev->gfx.compute_ring[0];
+
+- if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
++ if (!adev->in_gpu_reset && !adev->in_suspend) {
+ memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
+ ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
+ ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
+@@ -3422,7 +3422,7 @@ static int gfx_v9_0_hw_fini(void *handle)
+ /* Use deinitialize sequence from CAIL when unbinding device from driver,
+ * otherwise KIQ is hanging when binding back
+ */
+- if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
++ if (!adev->in_gpu_reset && !adev->in_suspend) {
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
+ adev->gfx.kiq.ring.pipe,
+@@ -3442,20 +3442,12 @@ static int gfx_v9_0_hw_fini(void *handle)
+
+ static int gfx_v9_0_suspend(void *handle)
+ {
+- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+-
+- adev->gfx.in_suspend = true;
+- return gfx_v9_0_hw_fini(adev);
++ return gfx_v9_0_hw_fini(handle);
+ }
+
+ static int gfx_v9_0_resume(void *handle)
+ {
+- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+- int r;
+-
+- r = gfx_v9_0_hw_init(adev);
+- adev->gfx.in_suspend = false;
+- return r;
++ return gfx_v9_0_hw_init(handle);
+ }
+
+ static bool gfx_v9_0_is_idle(void *handle)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5510-drm-amd-pp-Refine-function-iceland_start_smu.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5510-drm-amd-pp-Refine-function-iceland_start_smu.patch
new file mode 100644
index 00000000..c3d78edf
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5510-drm-amd-pp-Refine-function-iceland_start_smu.patch
@@ -0,0 +1,46 @@
+From 584f1d962d639abcc17d0f8321894e3028d5c2c6 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Sat, 29 Sep 2018 13:28:20 +0800
+Subject: [PATCH 5510/5725] drm/amd/pp: Refine function iceland_start_smu
+
+if upload firmware failed, no matter how many times
+the function runs again, the same error will be encountered.
+so remove the duplicated code.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c | 12 +-----------
+ 1 file changed, 1 insertion(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+index 73aa368..c712d93 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+@@ -234,22 +234,12 @@ static int iceland_start_smu(struct pp_hwmgr *hwmgr)
+ {
+ int result;
+
+- result = iceland_smu_upload_firmware_image(hwmgr);
+- if (result)
+- return result;
+- result = iceland_smu_start_smc(hwmgr);
+- if (result)
+- return result;
+-
+ if (!smu7_is_smc_ram_running(hwmgr)) {
+- pr_info("smu not running, upload firmware again \n");
+ result = iceland_smu_upload_firmware_image(hwmgr);
+ if (result)
+ return result;
+
+- result = iceland_smu_start_smc(hwmgr);
+- if (result)
+- return result;
++ iceland_smu_start_smc(hwmgr);
+ }
+
+ result = smu7_request_smu_load_fw(hwmgr);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5511-drm-amd-pp-Setup-SoftRegsStart-before-request-smu-lo.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5511-drm-amd-pp-Setup-SoftRegsStart-before-request-smu-lo.patch
new file mode 100644
index 00000000..58f4c6f3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5511-drm-amd-pp-Setup-SoftRegsStart-before-request-smu-lo.patch
@@ -0,0 +1,82 @@
+From 265b030d561898e35b89d0929afdee56536f971f Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Sat, 29 Sep 2018 14:32:47 +0800
+Subject: [PATCH 5511/5725] drm/amd/pp: Setup SoftRegsStart before request smu
+ load fw
+
+need to know SoftRegsStart value to visit the register
+UcodeLoadStatus to check fw loading state.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c | 11 ++++++++++-
+ drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c | 9 +++++++++
+ 2 files changed, 19 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+index c712d93..374aa4a 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+@@ -232,6 +232,7 @@ static int iceland_request_smu_load_specific_fw(struct pp_hwmgr *hwmgr,
+
+ static int iceland_start_smu(struct pp_hwmgr *hwmgr)
+ {
++ struct iceland_smumgr *priv = hwmgr->smu_backend;
+ int result;
+
+ if (!smu7_is_smc_ram_running(hwmgr)) {
+@@ -242,6 +243,14 @@ static int iceland_start_smu(struct pp_hwmgr *hwmgr)
+ iceland_smu_start_smc(hwmgr);
+ }
+
++ /* Setup SoftRegsStart here to visit the register UcodeLoadStatus
++ * to check fw loading state
++ */
++ smu7_read_smc_sram_dword(hwmgr,
++ SMU71_FIRMWARE_HEADER_LOCATION +
++ offsetof(SMU71_Firmware_Header, SoftRegisters),
++ &(priv->smu7_data.soft_regs_start), 0x40000);
++
+ result = smu7_request_smu_load_fw(hwmgr);
+
+ return result;
+@@ -2652,7 +2661,7 @@ const struct pp_smumgr_func iceland_smu_funcs = {
+ .smu_fini = &smu7_smu_fini,
+ .start_smu = &iceland_start_smu,
+ .check_fw_load_finish = &smu7_check_fw_load_finish,
+- .request_smu_load_fw = &smu7_reload_firmware,
++ .request_smu_load_fw = &smu7_request_smu_load_fw,
+ .request_smu_load_specific_fw = &iceland_request_smu_load_specific_fw,
+ .send_msg_to_smc = &smu7_send_msg_to_smc,
+ .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+index ae8378e..1f366c0 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+@@ -192,6 +192,7 @@ static int tonga_start_in_non_protection_mode(struct pp_hwmgr *hwmgr)
+
+ static int tonga_start_smu(struct pp_hwmgr *hwmgr)
+ {
++ struct tonga_smumgr *priv = hwmgr->smu_backend;
+ int result;
+
+ /* Only start SMC if SMC RAM is not running */
+@@ -209,6 +210,14 @@ static int tonga_start_smu(struct pp_hwmgr *hwmgr)
+ }
+ }
+
++ /* Setup SoftRegsStart here to visit the register UcodeLoadStatus
++ * to check fw loading state
++ */
++ smu7_read_smc_sram_dword(hwmgr,
++ SMU72_FIRMWARE_HEADER_LOCATION +
++ offsetof(SMU72_Firmware_Header, SoftRegisters),
++ &(priv->smu7_data.soft_regs_start), 0x40000);
++
+ result = smu7_request_smu_load_fw(hwmgr);
+
+ return result;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5512-drm-amd-pp-Refine-smu7-8-request_smu_load_fw-callbac.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5512-drm-amd-pp-Refine-smu7-8-request_smu_load_fw-callbac.patch
new file mode 100644
index 00000000..a5933b00
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5512-drm-amd-pp-Refine-smu7-8-request_smu_load_fw-callbac.patch
@@ -0,0 +1,228 @@
+From 9b5db2f7d3c0c3e2408b9a4999721b242fe36f4f Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Sat, 29 Sep 2018 13:54:33 +0800
+Subject: [PATCH 5512/5725] drm/amd/pp: Refine smu7/8 request_smu_load_fw
+ callback function
+
+The request_smu_load_fw of VI is used to load gfx/sdma
+ip's firmware.
+
+Check whether the gfx/sdma firmware have been loaded successfully
+in this callback function.
+if failed, driver can exit to avoid gpu hard hung.
+if successful, clean the flag reload_fw to avoid duplicated fw load.
+when suspend/resume, driver need to reload fw.
+so in suspend, reset the reload_fw flag to true to enable load fw when
+resume.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 1 +
+ drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 55 ++++-----------------
+ drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c | 56 +++++++++++-----------
+ 3 files changed, 39 insertions(+), 73 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+index 7500a3e..d552af2 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+@@ -301,6 +301,7 @@ int hwmgr_suspend(struct pp_hwmgr *hwmgr)
+ if (!hwmgr || !hwmgr->pm_en)
+ return 0;
+
++ hwmgr->reload_fw = true;
+ phm_disable_smc_firmware_ctf(hwmgr);
+ ret = psm_set_boot_states(hwmgr);
+ if (ret)
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+index 1234400..80a567a 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+@@ -304,44 +304,6 @@ int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_
+ return 0;
+ }
+
+-/* Convert the firmware type to SMU type mask. For MEC, we need to check all MEC related type */
+-
+-static uint32_t smu7_get_mask_for_firmware_type(uint32_t fw_type)
+-{
+- uint32_t result = 0;
+-
+- switch (fw_type) {
+- case UCODE_ID_SDMA0:
+- result = UCODE_ID_SDMA0_MASK;
+- break;
+- case UCODE_ID_SDMA1:
+- result = UCODE_ID_SDMA1_MASK;
+- break;
+- case UCODE_ID_CP_CE:
+- result = UCODE_ID_CP_CE_MASK;
+- break;
+- case UCODE_ID_CP_PFP:
+- result = UCODE_ID_CP_PFP_MASK;
+- break;
+- case UCODE_ID_CP_ME:
+- result = UCODE_ID_CP_ME_MASK;
+- break;
+- case UCODE_ID_CP_MEC:
+- case UCODE_ID_CP_MEC_JT1:
+- case UCODE_ID_CP_MEC_JT2:
+- result = UCODE_ID_CP_MEC_MASK;
+- break;
+- case UCODE_ID_RLC_G:
+- result = UCODE_ID_RLC_G_MASK;
+- break;
+- default:
+- pr_info("UCode type is out of range! \n");
+- result = 0;
+- }
+-
+- return result;
+-}
+-
+ static int smu7_populate_single_firmware_entry(struct pp_hwmgr *hwmgr,
+ uint32_t fw_type,
+ struct SMU_Entry *entry)
+@@ -383,10 +345,8 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
+ uint32_t fw_to_load;
+ int r = 0;
+
+- if (!hwmgr->reload_fw) {
+- pr_info("skip reloading...\n");
++ if (!hwmgr->reload_fw)
+ return 0;
+- }
+
+ if (smu_data->soft_regs_start)
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+@@ -469,10 +429,14 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
+ smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr));
+ smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr));
+
+- if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load))
+- pr_err("Fail to Request SMU Load uCode");
++ smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load);
+
+- return r;
++ r = smu7_check_fw_load_finish(hwmgr, fw_to_load);
++ if (!r) {
++ hwmgr->reload_fw = 0;
++ return 0;
++ }
++ pr_err("SMU load firmware failed\n");
+
+ failed:
+ kfree(smu_data->toc);
+@@ -484,13 +448,12 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
+ int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type)
+ {
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
+- uint32_t fw_mask = smu7_get_mask_for_firmware_type(fw_type);
+ uint32_t ret;
+
+ ret = phm_wait_on_indirect_register(hwmgr, mmSMC_IND_INDEX_11,
+ smu_data->soft_regs_start + smum_get_offsetof(hwmgr,
+ SMU_SoftRegisters, UcodeLoadStatus),
+- fw_mask, fw_mask);
++ fw_type, fw_type);
+ return ret;
+ }
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+index a74c5be..7b3b66d 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+@@ -658,11 +658,11 @@ static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr)
+ {
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+ uint32_t smc_address;
++ uint32_t fw_to_check = 0;
++ int ret;
+
+- if (!hwmgr->reload_fw) {
+- pr_info("skip reloading...\n");
++ if (!hwmgr->reload_fw)
+ return 0;
+- }
+
+ smu8_smu_populate_firmware_entries(hwmgr);
+
+@@ -689,28 +689,9 @@ static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr)
+ smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
+ smu8_smu->toc_entry_power_profiling_index);
+
+- return smu8_send_msg_to_smc_with_parameter(hwmgr,
++ smu8_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_ExecuteJob,
+ smu8_smu->toc_entry_initialize_index);
+-}
+-
+-static int smu8_start_smu(struct pp_hwmgr *hwmgr)
+-{
+- int ret = 0;
+- uint32_t fw_to_check = 0;
+- struct amdgpu_device *adev = hwmgr->adev;
+-
+- uint32_t index = SMN_MP1_SRAM_START_ADDR +
+- SMU8_FIRMWARE_HEADER_LOCATION +
+- offsetof(struct SMU8_Firmware_Header, Version);
+-
+-
+- if (hwmgr == NULL || hwmgr->device == NULL)
+- return -EINVAL;
+-
+- cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
+- hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
+- adev->pm.fw_version = hwmgr->smu_version >> 8;
+
+ fw_to_check = UCODE_ID_RLC_G_MASK |
+ UCODE_ID_SDMA0_MASK |
+@@ -724,8 +705,6 @@ static int smu8_start_smu(struct pp_hwmgr *hwmgr)
+ if (hwmgr->chip_id == CHIP_STONEY)
+ fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
+
+- smu8_request_smu_load_fw(hwmgr);
+-
+ ret = smu8_check_fw_load_finish(hwmgr, fw_to_check);
+ if (ret) {
+ pr_err("SMU firmware load failed\n");
+@@ -733,10 +712,33 @@ static int smu8_start_smu(struct pp_hwmgr *hwmgr)
+ }
+
+ ret = smu8_load_mec_firmware(hwmgr);
+- if (ret)
++ if (ret) {
+ pr_err("Mec Firmware load failed\n");
++ return ret;
++ }
+
+- return ret;
++ hwmgr->reload_fw = 0;
++
++ return 0;
++}
++
++static int smu8_start_smu(struct pp_hwmgr *hwmgr)
++{
++ struct amdgpu_device *adev = hwmgr->adev;
++
++ uint32_t index = SMN_MP1_SRAM_START_ADDR +
++ SMU8_FIRMWARE_HEADER_LOCATION +
++ offsetof(struct SMU8_Firmware_Header, Version);
++
++
++ if (hwmgr == NULL || hwmgr->device == NULL)
++ return -EINVAL;
++
++ cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
++ hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
++ adev->pm.fw_version = hwmgr->smu_version >> 8;
++
++ return smu8_request_smu_load_fw(hwmgr);
+ }
+
+ static int smu8_smu_init(struct pp_hwmgr *hwmgr)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5513-drm-amdgpu-Remove-FW_LOAD_DIRECT-type-support-on-VI.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5513-drm-amdgpu-Remove-FW_LOAD_DIRECT-type-support-on-VI.patch
new file mode 100644
index 00000000..6c3896ac
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5513-drm-amdgpu-Remove-FW_LOAD_DIRECT-type-support-on-VI.patch
@@ -0,0 +1,422 @@
+From 0934c4c0a6d3ca034edbb1481656773c64aba774 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Sat, 29 Sep 2018 20:09:00 +0800
+Subject: [PATCH 5513/5725] drm/amdgpu: Remove FW_LOAD_DIRECT type support on
+ VI
+
+AMDGPU_FW_LOAD_DIRECT is used for bring up.
+Now it don't work any more. so remove the support.
+
+v2: Add warning message if user select
+ AMDGPU_FW_LOAD_DIRECT/AMDGPU_FW_LOAD_PSP on VI.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 7 +-
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 249 ++++++------------------------
+ drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 57 +------
+ 3 files changed, 59 insertions(+), 254 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+index 59fe359..ccffc1d4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+@@ -297,10 +297,9 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+- if (!load_type)
+- return AMDGPU_FW_LOAD_DIRECT;
+- else
+- return AMDGPU_FW_LOAD_SMU;
++ if (load_type != AMDGPU_FW_LOAD_SMU)
++ pr_warning("%d is not supported on VI\n", load_type);
++ return AMDGPU_FW_LOAD_SMU;
+ case CHIP_VEGA10:
+ case CHIP_RAVEN:
+ case CHIP_VEGA12:
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 09eab2b..7a8e211 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -1173,64 +1173,61 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
+ }
+ }
+
+- if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
+- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
+- info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
+- info->fw = adev->gfx.pfp_fw;
+- header = (const struct common_firmware_header *)info->fw->data;
+- adev->firmware.fw_size +=
+- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+-
+- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
+- info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
+- info->fw = adev->gfx.me_fw;
+- header = (const struct common_firmware_header *)info->fw->data;
+- adev->firmware.fw_size +=
+- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+-
+- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
+- info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
+- info->fw = adev->gfx.ce_fw;
+- header = (const struct common_firmware_header *)info->fw->data;
+- adev->firmware.fw_size +=
+- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
++ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
++ info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
++ info->fw = adev->gfx.pfp_fw;
++ header = (const struct common_firmware_header *)info->fw->data;
++ adev->firmware.fw_size +=
++ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
++
++ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
++ info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
++ info->fw = adev->gfx.me_fw;
++ header = (const struct common_firmware_header *)info->fw->data;
++ adev->firmware.fw_size +=
++ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
++
++ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
++ info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
++ info->fw = adev->gfx.ce_fw;
++ header = (const struct common_firmware_header *)info->fw->data;
++ adev->firmware.fw_size +=
++ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
++
++ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
++ info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
++ info->fw = adev->gfx.rlc_fw;
++ header = (const struct common_firmware_header *)info->fw->data;
++ adev->firmware.fw_size +=
++ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
++
++ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
++ info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
++ info->fw = adev->gfx.mec_fw;
++ header = (const struct common_firmware_header *)info->fw->data;
++ adev->firmware.fw_size +=
++ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
++
++ /* we need account JT in */
++ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
++ adev->firmware.fw_size +=
++ ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
+
+- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
+- info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
+- info->fw = adev->gfx.rlc_fw;
+- header = (const struct common_firmware_header *)info->fw->data;
++ if (amdgpu_sriov_vf(adev)) {
++ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
++ info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
++ info->fw = adev->gfx.mec_fw;
+ adev->firmware.fw_size +=
+- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
++ ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
++ }
+
+- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
+- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
+- info->fw = adev->gfx.mec_fw;
++ if (adev->gfx.mec2_fw) {
++ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
++ info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
++ info->fw = adev->gfx.mec2_fw;
+ header = (const struct common_firmware_header *)info->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+-
+- /* we need account JT in */
+- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+- adev->firmware.fw_size +=
+- ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
+-
+- if (amdgpu_sriov_vf(adev)) {
+- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
+- info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
+- info->fw = adev->gfx.mec_fw;
+- adev->firmware.fw_size +=
+- ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
+- }
+-
+- if (adev->gfx.mec2_fw) {
+- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
+- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
+- info->fw = adev->gfx.mec2_fw;
+- header = (const struct common_firmware_header *)info->fw->data;
+- adev->firmware.fw_size +=
+- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+- }
+-
+ }
+
+ out:
+@@ -4180,45 +4177,11 @@ static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
+ udelay(50);
+ }
+
+-static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
+-{
+- const struct rlc_firmware_header_v2_0 *hdr;
+- const __le32 *fw_data;
+- unsigned i, fw_size;
+-
+- if (!adev->gfx.rlc_fw)
+- return -EINVAL;
+-
+- hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
+- amdgpu_ucode_print_rlc_hdr(&hdr->header);
+-
+- fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
+- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+- fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+-
+- WREG32(mmRLC_GPM_UCODE_ADDR, 0);
+- for (i = 0; i < fw_size; i++)
+- WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
+- WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
+-
+- return 0;
+-}
+-
+ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
+ {
+- int r;
+-
+ gfx_v8_0_rlc_stop(adev);
+ gfx_v8_0_rlc_reset(adev);
+ gfx_v8_0_init_pg(adev);
+-
+- if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+- /* legacy rlc firmware loading */
+- r = gfx_v8_0_rlc_load_microcode(adev);
+- if (r)
+- return r;
+- }
+-
+ gfx_v8_0_rlc_start(adev);
+
+ return 0;
+@@ -4244,63 +4207,6 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
+ udelay(50);
+ }
+
+-static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
+-{
+- const struct gfx_firmware_header_v1_0 *pfp_hdr;
+- const struct gfx_firmware_header_v1_0 *ce_hdr;
+- const struct gfx_firmware_header_v1_0 *me_hdr;
+- const __le32 *fw_data;
+- unsigned i, fw_size;
+-
+- if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
+- return -EINVAL;
+-
+- pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
+- adev->gfx.pfp_fw->data;
+- ce_hdr = (const struct gfx_firmware_header_v1_0 *)
+- adev->gfx.ce_fw->data;
+- me_hdr = (const struct gfx_firmware_header_v1_0 *)
+- adev->gfx.me_fw->data;
+-
+- amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
+- amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
+- amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
+-
+- gfx_v8_0_cp_gfx_enable(adev, false);
+-
+- /* PFP */
+- fw_data = (const __le32 *)
+- (adev->gfx.pfp_fw->data +
+- le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
+- fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
+- WREG32(mmCP_PFP_UCODE_ADDR, 0);
+- for (i = 0; i < fw_size; i++)
+- WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
+- WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
+-
+- /* CE */
+- fw_data = (const __le32 *)
+- (adev->gfx.ce_fw->data +
+- le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
+- fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
+- WREG32(mmCP_CE_UCODE_ADDR, 0);
+- for (i = 0; i < fw_size; i++)
+- WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
+- WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
+-
+- /* ME */
+- fw_data = (const __le32 *)
+- (adev->gfx.me_fw->data +
+- le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
+- fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
+- WREG32(mmCP_ME_RAM_WADDR, 0);
+- for (i = 0; i < fw_size; i++)
+- WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
+- WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
+-
+- return 0;
+-}
+-
+ static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev)
+ {
+ u32 count = 0;
+@@ -4500,52 +4406,6 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
+ udelay(50);
+ }
+
+-static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
+-{
+- const struct gfx_firmware_header_v1_0 *mec_hdr;
+- const __le32 *fw_data;
+- unsigned i, fw_size;
+-
+- if (!adev->gfx.mec_fw)
+- return -EINVAL;
+-
+- gfx_v8_0_cp_compute_enable(adev, false);
+-
+- mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+- amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
+-
+- fw_data = (const __le32 *)
+- (adev->gfx.mec_fw->data +
+- le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
+- fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
+-
+- /* MEC1 */
+- WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
+- for (i = 0; i < fw_size; i++)
+- WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data+i));
+- WREG32(mmCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
+-
+- /* Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
+- if (adev->gfx.mec2_fw) {
+- const struct gfx_firmware_header_v1_0 *mec2_hdr;
+-
+- mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+- amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
+-
+- fw_data = (const __le32 *)
+- (adev->gfx.mec2_fw->data +
+- le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
+- fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
+-
+- WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
+- for (i = 0; i < fw_size; i++)
+- WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data+i));
+- WREG32(mmCP_MEC_ME2_UCODE_ADDR, adev->gfx.mec2_fw_version);
+- }
+-
+- return 0;
+-}
+-
+ /* KIQ functions */
+ static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring)
+ {
+@@ -4979,17 +4839,6 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
+ if (!(adev->flags & AMD_IS_APU))
+ gfx_v8_0_enable_gui_idle_interrupt(adev, false);
+
+- if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+- /* legacy firmware loading */
+- r = gfx_v8_0_cp_gfx_load_microcode(adev);
+- if (r)
+- return r;
+-
+- r = gfx_v8_0_cp_compute_load_microcode(adev);
+- if (r)
+- return r;
+- }
+-
+ r = gfx_v8_0_kiq_resume(adev);
+ if (r)
+ return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+index a0eafe5..24b5252 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+@@ -318,14 +318,13 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
+ if (adev->sdma.instance[i].feature_version >= 20)
+ adev->sdma.instance[i].burst_nop = true;
+
+- if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
+- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
+- info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
+- info->fw = adev->sdma.instance[i].fw;
+- header = (const struct common_firmware_header *)info->fw->data;
+- adev->firmware.fw_size +=
+- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+- }
++ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
++ info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
++ info->fw = adev->sdma.instance[i].fw;
++ header = (const struct common_firmware_header *)info->fw->data;
++ adev->firmware.fw_size +=
++ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
++
+ }
+ out:
+ if (err) {
+@@ -786,42 +785,6 @@ static int sdma_v3_0_rlc_resume(struct amdgpu_device *adev)
+ }
+
+ /**
+- * sdma_v3_0_load_microcode - load the sDMA ME ucode
+- *
+- * @adev: amdgpu_device pointer
+- *
+- * Loads the sDMA0/1 ucode.
+- * Returns 0 for success, -EINVAL if the ucode is not available.
+- */
+-static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
+-{
+- const struct sdma_firmware_header_v1_0 *hdr;
+- const __le32 *fw_data;
+- u32 fw_size;
+- int i, j;
+-
+- /* halt the MEs */
+- sdma_v3_0_enable(adev, false);
+-
+- for (i = 0; i < adev->sdma.num_instances; i++) {
+- if (!adev->sdma.instance[i].fw)
+- return -EINVAL;
+- hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
+- amdgpu_ucode_print_sdma_hdr(&hdr->header);
+- fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+- fw_data = (const __le32 *)
+- (adev->sdma.instance[i].fw->data +
+- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+- WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
+- for (j = 0; j < fw_size; j++)
+- WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
+- WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
+- }
+-
+- return 0;
+-}
+-
+-/**
+ * sdma_v3_0_start - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+@@ -833,12 +796,6 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
+ {
+ int r;
+
+- if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+- r = sdma_v3_0_load_microcode(adev);
+- if (r)
+- return r;
+- }
+-
+ /* disable sdma engine before programing it */
+ sdma_v3_0_ctx_switch_enable(adev, false);
+ sdma_v3_0_enable(adev, false);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5514-drm-amdgpu-Don-t-reallocate-ucode-bo-when-suspend.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5514-drm-amdgpu-Don-t-reallocate-ucode-bo-when-suspend.patch
new file mode 100644
index 00000000..6aa529b0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5514-drm-amdgpu-Don-t-reallocate-ucode-bo-when-suspend.patch
@@ -0,0 +1,30 @@
+From 77cffd6bacdf0dc19383cf5184f223777052c960 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Sat, 29 Sep 2018 15:30:11 +0800
+Subject: [PATCH 5514/5725] drm/amdgpu: Don't reallocate ucode bo when suspend
+
+driver don't release the ucode memory when suspend. so don't
+need to allocate bo when resume back.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+index ccffc1d4..ce4044a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+@@ -434,7 +434,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
+ return 0;
+ }
+
+- if (!adev->in_gpu_reset) {
++ if (!adev->in_gpu_reset && !adev->in_suspend) {
+ err = amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE,
+ amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+ &adev->firmware.fw_buf,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5515-drm-amd-pp-Allocate-ucode-bo-in-request_smu_load_fw.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5515-drm-amd-pp-Allocate-ucode-bo-in-request_smu_load_fw.patch
new file mode 100644
index 00000000..50c912a5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5515-drm-amd-pp-Allocate-ucode-bo-in-request_smu_load_fw.patch
@@ -0,0 +1,63 @@
+From 86b262c92458eccad955790d3002ad9b88f847d2 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Sat, 29 Sep 2018 15:42:52 +0800
+Subject: [PATCH 5515/5725] drm/amd/pp: Allocate ucode bo in
+ request_smu_load_fw
+
+ucode bo is needed by request_smu_load_fw,
+the request_smu_load_fw maybe called by gfx/sdma
+before smu hw init.
+so move amdgpu_ucode_bo_init to request_smu_lowd_fw
+from smu hw init.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 3 ---
+ drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 2 ++
+ drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c | 2 ++
+ 3 files changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index 3bfe0cf..b5893d29 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -124,9 +124,6 @@ static int pp_hw_init(void *handle)
+ struct amdgpu_device *adev = handle;
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+
+- if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
+- amdgpu_ucode_init_bo(adev);
+-
+ ret = hwmgr_hw_init(hwmgr);
+
+ if (ret)
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+index 80a567a..5db9215 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+@@ -348,6 +348,8 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
+ if (!hwmgr->reload_fw)
+ return 0;
+
++ amdgpu_ucode_init_bo(hwmgr->adev);
++
+ if (smu_data->soft_regs_start)
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ smu_data->soft_regs_start + smum_get_offsetof(hwmgr,
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+index 7b3b66d..abbf2f2 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+@@ -664,6 +664,8 @@ static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr)
+ if (!hwmgr->reload_fw)
+ return 0;
+
++ amdgpu_ucode_init_bo(hwmgr->adev);
++
+ smu8_smu_populate_firmware_entries(hwmgr);
+
+ smu8_smu_construct_toc(hwmgr);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5516-drm-amd-pp-Implement-load_firmware-interface.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5516-drm-amd-pp-Implement-load_firmware-interface.patch
new file mode 100644
index 00000000..0959adb0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5516-drm-amd-pp-Implement-load_firmware-interface.patch
@@ -0,0 +1,45 @@
+From ea713cff5b773eb6d691379fc4e9c267be6e19ab Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Sat, 29 Sep 2018 20:28:14 +0800
+Subject: [PATCH 5516/5725] drm/amd/pp: Implement load_firmware interface
+
+with this interface, gfx/sdma can be initialized
+before smu.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index b5893d29..8ca308e 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -280,8 +280,23 @@ const struct amdgpu_ip_block_version pp_smu_ip_block =
+ .funcs = &pp_ip_funcs,
+ };
+
++/* This interface only be supported On Vi,
++ * because only smu7/8 can help to load gfx/sdma fw,
++ * smu need to be enabled before load other ip's fw.
++ * so call start smu to load smu7 fw and other ip's fw
++ */
+ static int pp_dpm_load_fw(void *handle)
+ {
++ struct pp_hwmgr *hwmgr = handle;
++
++ if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
++ return -EINVAL;
++
++ if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
++ pr_err("fw load failed\n");
++ return -EINVAL;
++ }
++
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5517-drm-amdgpu-Add-fw-load-in-gfx_v8-and-sdma_v3.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5517-drm-amdgpu-Add-fw-load-in-gfx_v8-and-sdma_v3.patch
new file mode 100644
index 00000000..5a183d41
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5517-drm-amdgpu-Add-fw-load-in-gfx_v8-and-sdma_v3.patch
@@ -0,0 +1,61 @@
+From 5d8d5b66b50ce307973db42a622b5239b49add24 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Sat, 29 Sep 2018 15:57:31 +0800
+Subject: [PATCH 5517/5725] drm/amdgpu: Add fw load in gfx_v8 and sdma_v3
+
+gfx and sdma can be initialized before smu.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 11 +++++++++++
+ drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 8 ++++++++
+ 2 files changed, 19 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 7a8e211..f9e5990 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -4179,9 +4179,20 @@ static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
+
+ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
+ {
++ int r;
++
+ gfx_v8_0_rlc_stop(adev);
+ gfx_v8_0_rlc_reset(adev);
+ gfx_v8_0_init_pg(adev);
++
++ if (adev->powerplay.pp_funcs->load_firmware) {
++ r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
++ if (r) {
++ pr_err("firmware loading failed\n");
++ return r;
++ }
++ }
++
+ gfx_v8_0_rlc_start(adev);
+
+ return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+index 24b5252..573284f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+@@ -796,6 +796,14 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
+ {
+ int r;
+
++ if (adev->powerplay.pp_funcs->load_firmware) {
++ r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
++ if (r) {
++ pr_err("firmware loading failed\n");
++ return r;
++ }
++ }
++
+ /* disable sdma engine before programing it */
+ sdma_v3_0_ctx_switch_enable(adev, false);
+ sdma_v3_0_enable(adev, false);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5518-drm-amdgpu-Change-VI-gfx-sdma-smu-init-sequence.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5518-drm-amdgpu-Change-VI-gfx-sdma-smu-init-sequence.patch
new file mode 100644
index 00000000..01a930b3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5518-drm-amdgpu-Change-VI-gfx-sdma-smu-init-sequence.patch
@@ -0,0 +1,122 @@
+From 3310542a4df2ba6ec036c96ee167b601a0304b3b Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Sun, 30 Sep 2018 17:35:12 +0800
+Subject: [PATCH 5518/5725] drm/amdgpu: Change VI gfx/sdma/smu init sequence
+
+initialize gfx/sdma before dpm features enabled.
+
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vi.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
+index f9c62a7..1bda26a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vi.c
++++ b/drivers/gpu/drm/amd/amdgpu/vi.c
+@@ -1601,16 +1601,18 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
+ amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
++ amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
++ amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+- amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
+- amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
+ break;
+ case CHIP_FIJI:
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
+ amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
++ amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
++ amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+@@ -1620,8 +1622,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
+ #endif
+ else
+ amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block);
+- amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
+- amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
+ if (!amdgpu_sriov_vf(adev)) {
+ amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
+@@ -1631,6 +1631,8 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
++ amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
++ amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+@@ -1640,8 +1642,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
+ #endif
+ else
+ amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block);
+- amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
+- amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
+ if (!amdgpu_sriov_vf(adev)) {
+ amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
+@@ -1654,6 +1654,8 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
++ amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
++ amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+@@ -1663,8 +1665,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
+ #endif
+ else
+ amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
+- amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
+- amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
+ break;
+@@ -1672,6 +1672,8 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
++ amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
++ amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+@@ -1681,8 +1683,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
+ #endif
+ else
+ amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
+- amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
+- amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
+ #if defined(CONFIG_DRM_AMD_ACP)
+@@ -1693,6 +1693,8 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
++ amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block);
++ amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+@@ -1702,8 +1704,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
+ #endif
+ else
+ amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
+- amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block);
+- amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
+ #if defined(CONFIG_DRM_AMD_ACP)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5519-drm-amdgpu-skip-IB-tests-for-KIQ-in-general.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5519-drm-amdgpu-skip-IB-tests-for-KIQ-in-general.patch
new file mode 100644
index 00000000..e2890c63
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5519-drm-amdgpu-skip-IB-tests-for-KIQ-in-general.patch
@@ -0,0 +1,43 @@
+From a295d107e3c3f20796832bca017558b4b9fecbde Mon Sep 17 00:00:00 2001
+From: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+Date: Wed, 3 Oct 2018 20:45:11 +0530
+Subject: [PATCH 5519/5725] drm/amdgpu: skip IB tests for KIQ in general
+
+[Why]
+1. We never submit IBs to KIQ.
+2. Ring test pass without KIQ's ring also.
+3. By skipping we see an improvement of around 500ms
+ in the amdgpu's resume time.
+
+[How]
+skip IB tests for KIQ ring type.
+
+Signed-off-by: Shirish S <shirish.s@amd.com>
+Signed-off-by: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+index 1d86c3b..47c8257 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+@@ -361,6 +361,14 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
+ if (!ring || !ring->ready)
+ continue;
+
++ /* skip IB tests for KIQ in general for the below reasons:
++ * 1. We never submit IBs to the KIQ
++ * 2. KIQ doesn't use the EOP interrupts,
++ * we use some other CP interrupt.
++ */
++ if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
++ continue;
++
+ /* MM engine need more time */
+ if (ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
+ ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5520-drm-amdgpu-Always-enable-fan-sensors-for-read.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5520-drm-amdgpu-Always-enable-fan-sensors-for-read.patch
new file mode 100644
index 00000000..0ed788e6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5520-drm-amdgpu-Always-enable-fan-sensors-for-read.patch
@@ -0,0 +1,44 @@
+From 5f926114b36c3955224cdd57da8125559f8d3d8a Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Sat, 6 Oct 2018 00:11:25 +0800
+Subject: [PATCH 5520/5725] drm/amdgpu: Always enable fan sensors for read
+
+don't need to set fan1_enable to read fan sensors.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 10 ----------
+ 1 file changed, 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index 9a487d2..811e09f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -1179,11 +1179,6 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ int err;
+ u32 speed = 0;
+- u32 pwm_mode;
+-
+- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+- if (pwm_mode != AMD_FAN_CTRL_MANUAL)
+- return -ENODATA;
+
+ /* Can't adjust fan when the card is off */
+ if ((adev->flags & AMD_IS_PX) &&
+@@ -1246,11 +1241,6 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ int err;
+ u32 rpm = 0;
+- u32 pwm_mode;
+-
+- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+- if (pwm_mode != AMD_FAN_CTRL_MANUAL)
+- return -ENODATA;
+
+ /* Can't adjust fan when the card is off */
+ if ((adev->flags & AMD_IS_PX) &&
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5521-drm-amdgpu-remove-the-intterupt-handling-for-the-KIQ.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5521-drm-amdgpu-remove-the-intterupt-handling-for-the-KIQ.patch
new file mode 100644
index 00000000..c43d5d4e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5521-drm-amdgpu-remove-the-intterupt-handling-for-the-KIQ.patch
@@ -0,0 +1,225 @@
+From 7c822d6891b6c67215e93282efc5f912333a0025 Mon Sep 17 00:00:00 2001
+From: Shirish S <shirish.s@amd.com>
+Date: Fri, 5 Oct 2018 10:54:21 +0530
+Subject: [PATCH 5521/5725] drm/amdgpu: remove the intterupt handling for the
+ KIQ events
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[Why]
+1. we never submit IBs to the KIQ
+2. there seems to be ~500ms delay during amdgpu resume spent in KIQ,
+ hence pointing toward interrupts are not working correctly.
+
+[How]
+remove interrupt handling for KIQ.
+
+Signed-off-by: Shirish S <shirish.s@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>i
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 59 ---------------------------
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 75 -----------------------------------
+ 2 files changed, 134 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index f9e5990..575f363 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -2044,11 +2044,6 @@ static int gfx_v8_0_sw_init(void *handle)
+ adev->gfx.mec.num_pipe_per_mec = 4;
+ adev->gfx.mec.num_queue_per_pipe = 8;
+
+- /* KIQ event */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_INT_IB2, &adev->gfx.kiq.irq);
+- if (r)
+- return r;
+-
+ /* EOP Event */
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq);
+ if (r)
+@@ -6890,52 +6885,6 @@ static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
+ return 0;
+ }
+
+-static int gfx_v8_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
+- struct amdgpu_irq_src *src,
+- unsigned int type,
+- enum amdgpu_interrupt_state state)
+-{
+- struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
+-
+- switch (type) {
+- case AMDGPU_CP_KIQ_IRQ_DRIVER0:
+- WREG32_FIELD(CPC_INT_CNTL, GENERIC2_INT_ENABLE,
+- state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
+- if (ring->me == 1)
+- WREG32_FIELD_OFFSET(CP_ME1_PIPE0_INT_CNTL,
+- ring->pipe,
+- GENERIC2_INT_ENABLE,
+- state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
+- else
+- WREG32_FIELD_OFFSET(CP_ME2_PIPE0_INT_CNTL,
+- ring->pipe,
+- GENERIC2_INT_ENABLE,
+- state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
+- break;
+- default:
+- BUG(); /* kiq only support GENERIC2_INT now */
+- break;
+- }
+- return 0;
+-}
+-
+-static int gfx_v8_0_kiq_irq(struct amdgpu_device *adev,
+- struct amdgpu_irq_src *source,
+- struct amdgpu_iv_entry *entry)
+-{
+- u8 me_id, pipe_id, queue_id;
+- struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
+-
+- me_id = (entry->ring_id & 0x0c) >> 2;
+- pipe_id = (entry->ring_id & 0x03) >> 0;
+- queue_id = (entry->ring_id & 0x70) >> 4;
+- DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
+- me_id, pipe_id, queue_id);
+-
+- amdgpu_fence_process(ring);
+- return 0;
+-}
+-
+ static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
+ .name = "gfx_v8_0",
+ .early_init = gfx_v8_0_early_init,
+@@ -7088,11 +7037,6 @@ static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_inst_irq_funcs = {
+ .process = gfx_v8_0_priv_inst_irq,
+ };
+
+-static const struct amdgpu_irq_src_funcs gfx_v8_0_kiq_irq_funcs = {
+- .set = gfx_v8_0_kiq_set_interrupt_state,
+- .process = gfx_v8_0_kiq_irq,
+-};
+-
+ static const struct amdgpu_irq_src_funcs gfx_v8_0_cp_ecc_error_irq_funcs = {
+ .set = gfx_v8_0_set_cp_ecc_int_state,
+ .process = gfx_v8_0_cp_ecc_error_irq,
+@@ -7114,9 +7058,6 @@ static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
+ adev->gfx.priv_inst_irq.num_types = 1;
+ adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs;
+
+- adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
+- adev->gfx.kiq.irq.funcs = &gfx_v8_0_kiq_irq_funcs;
+-
+ adev->gfx.cp_ecc_error_irq.num_types = 1;
+ adev->gfx.cp_ecc_error_irq.funcs = &gfx_v8_0_cp_ecc_error_irq_funcs;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index a71c27e..ea6c1a6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1712,11 +1712,6 @@ static int gfx_v9_0_sw_init(void *handle)
+ adev->gfx.mec.num_pipe_per_mec = 4;
+ adev->gfx.mec.num_queue_per_pipe = 8;
+
+- /* KIQ event */
+- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_IB2_INTERRUPT_PKT, &adev->gfx.kiq.irq);
+- if (r)
+- return r;
+-
+ /* EOP Event */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
+ if (r)
+@@ -4721,68 +4716,6 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
+ return 0;
+ }
+
+-static int gfx_v9_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
+- struct amdgpu_irq_src *src,
+- unsigned int type,
+- enum amdgpu_interrupt_state state)
+-{
+- uint32_t tmp, target;
+- struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
+-
+- if (ring->me == 1)
+- target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
+- else
+- target = SOC15_REG_OFFSET(GC, 0, mmCP_ME2_PIPE0_INT_CNTL);
+- target += ring->pipe;
+-
+- switch (type) {
+- case AMDGPU_CP_KIQ_IRQ_DRIVER0:
+- if (state == AMDGPU_IRQ_STATE_DISABLE) {
+- tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
+- tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
+- GENERIC2_INT_ENABLE, 0);
+- WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
+-
+- tmp = RREG32(target);
+- tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
+- GENERIC2_INT_ENABLE, 0);
+- WREG32(target, tmp);
+- } else {
+- tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
+- tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
+- GENERIC2_INT_ENABLE, 1);
+- WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
+-
+- tmp = RREG32(target);
+- tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
+- GENERIC2_INT_ENABLE, 1);
+- WREG32(target, tmp);
+- }
+- break;
+- default:
+- BUG(); /* kiq only support GENERIC2_INT now */
+- break;
+- }
+- return 0;
+-}
+-
+-static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev,
+- struct amdgpu_irq_src *source,
+- struct amdgpu_iv_entry *entry)
+-{
+- u8 me_id, pipe_id, queue_id;
+- struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
+-
+- me_id = (entry->ring_id & 0x0c) >> 2;
+- pipe_id = (entry->ring_id & 0x03) >> 0;
+- queue_id = (entry->ring_id & 0x70) >> 4;
+- DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
+- me_id, pipe_id, queue_id);
+-
+- amdgpu_fence_process(ring);
+- return 0;
+-}
+-
+ static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
+ .name = "gfx_v9_0",
+ .early_init = gfx_v9_0_early_init,
+@@ -4933,11 +4866,6 @@ static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
+ adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
+ }
+
+-static const struct amdgpu_irq_src_funcs gfx_v9_0_kiq_irq_funcs = {
+- .set = gfx_v9_0_kiq_set_interrupt_state,
+- .process = gfx_v9_0_kiq_irq,
+-};
+-
+ static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
+ .set = gfx_v9_0_set_eop_interrupt_state,
+ .process = gfx_v9_0_eop_irq,
+@@ -4963,9 +4891,6 @@ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
+
+ adev->gfx.priv_inst_irq.num_types = 1;
+ adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
+-
+- adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
+- adev->gfx.kiq.irq.funcs = &gfx_v9_0_kiq_irq_funcs;
+ }
+
+ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5522-drm-amdgpu-fix-AGP-location-with-VRAM-at-0x0.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5522-drm-amdgpu-fix-AGP-location-with-VRAM-at-0x0.patch
new file mode 100644
index 00000000..288f7c55
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5522-drm-amdgpu-fix-AGP-location-with-VRAM-at-0x0.patch
@@ -0,0 +1,43 @@
+From 70945135305d8980b95b3c848b7ac480946e2c99 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Thu, 4 Oct 2018 10:27:48 +0200
+Subject: [PATCH 5522/5725] drm/amdgpu: fix AGP location with VRAM at 0x0
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+That also simplifies handling quite a bit.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+index 9a5b252..999e159 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+@@ -200,16 +200,13 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
+ }
+
+ if (size_bf > size_af) {
+- mc->agp_start = mc->fb_start > mc->gart_start ?
+- mc->gart_end + 1 : 0;
++ mc->agp_start = (mc->fb_start - size_bf) & sixteen_gb_mask;
+ mc->agp_size = size_bf;
+ } else {
+- mc->agp_start = (mc->fb_start > mc->gart_start ?
+- mc->fb_end : mc->gart_end) + 1,
++ mc->agp_start = ALIGN(mc->fb_end + 1, sixteen_gb);
+ mc->agp_size = size_af;
+ }
+
+- mc->agp_start = ALIGN(mc->agp_start, sixteen_gb);
+ mc->agp_end = mc->agp_start + mc->agp_size - 1;
+ dev_info(adev->dev, "AGP: %lluM 0x%016llX - 0x%016llX\n",
+ mc->agp_size >> 20, mc->agp_start, mc->agp_end);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5523-drm-amdgpu-fix-incorrect-use-of-amdgpu_irq_add_id-in.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5523-drm-amdgpu-fix-incorrect-use-of-amdgpu_irq_add_id-in.patch
new file mode 100644
index 00000000..93c13ef3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5523-drm-amdgpu-fix-incorrect-use-of-amdgpu_irq_add_id-in.patch
@@ -0,0 +1,100 @@
+From aeb1595cc719f5cced013633123dee1c0be7dda5 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Wed, 26 Sep 2018 16:15:44 +0200
+Subject: [PATCH 5523/5725] drm/amdgpu: fix incorrect use of amdgpu_irq_add_id
+ in si_dma.c
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Adding a second irq source because of a different src_id is actually a
+bug.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h | 4 ----
+ drivers/gpu/drm/amd/amdgpu/si_dma.c | 27 ++++++++-------------------
+ 2 files changed, 8 insertions(+), 23 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+index d17503f..500113e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+@@ -46,10 +46,6 @@ struct amdgpu_sdma_instance {
+
+ struct amdgpu_sdma {
+ struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
+-#ifdef CONFIG_DRM_AMDGPU_SI
+- //SI DMA has a difference trap irq number for the second engine
+- struct amdgpu_irq_src trap_irq_1;
+-#endif
+ struct amdgpu_irq_src trap_irq;
+ struct amdgpu_irq_src illegal_inst_irq;
+ int num_instances;
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
+index 6cff583..47e5dde 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
++++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
+@@ -516,12 +516,14 @@ static int si_dma_sw_init(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* DMA0 trap event */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224, &adev->sdma.trap_irq);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224,
++ &adev->sdma.trap_irq);
+ if (r)
+ return r;
+
+ /* DMA1 trap event */
+- r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 244, &adev->sdma.trap_irq_1);
++ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 244,
++ &adev->sdma.trap_irq);
+ if (r)
+ return r;
+
+@@ -663,17 +665,10 @@ static int si_dma_process_trap_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+ {
+- amdgpu_fence_process(&adev->sdma.instance[0].ring);
+-
+- return 0;
+-}
+-
+-static int si_dma_process_trap_irq_1(struct amdgpu_device *adev,
+- struct amdgpu_irq_src *source,
+- struct amdgpu_iv_entry *entry)
+-{
+- amdgpu_fence_process(&adev->sdma.instance[1].ring);
+-
++ if (entry->src_id == 224)
++ amdgpu_fence_process(&adev->sdma.instance[0].ring);
++ else
++ amdgpu_fence_process(&adev->sdma.instance[1].ring);
+ return 0;
+ }
+
+@@ -803,11 +798,6 @@ static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = {
+ .process = si_dma_process_trap_irq,
+ };
+
+-static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs_1 = {
+- .set = si_dma_set_trap_irq_state,
+- .process = si_dma_process_trap_irq_1,
+-};
+-
+ static const struct amdgpu_irq_src_funcs si_dma_illegal_inst_irq_funcs = {
+ .process = si_dma_process_illegal_inst_irq,
+ };
+@@ -816,7 +806,6 @@ static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
+ {
+ adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+ adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs;
+- adev->sdma.trap_irq_1.funcs = &si_dma_trap_irq_funcs_1;
+ adev->sdma.illegal_inst_irq.funcs = &si_dma_illegal_inst_irq_funcs;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5524-drm-amdgpu-vcn-Remove-unused-code.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5524-drm-amdgpu-vcn-Remove-unused-code.patch
new file mode 100644
index 00000000..7c13ff4b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5524-drm-amdgpu-vcn-Remove-unused-code.patch
@@ -0,0 +1,30 @@
+From 76cb14f0d10e25bcccb421b3bef873c3257e8dec Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Mon, 1 Oct 2018 18:18:59 -0400
+Subject: [PATCH 5524/5725] drm/amdgpu/vcn:Remove unused code
+
+The following WREG32_SOC15_DPG_MODE will overwrite register
+mmUVD_CGC_CTRL. This code can be removed.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Reviewed-by: Leo Liu <leo.liu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 2eec119..9441ec4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -601,8 +601,6 @@ static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t s
+ reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+- WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
+-
+ reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5525-drm-amdgpu-vcn-fix-dpg-pause-mode-hang-issue.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5525-drm-amdgpu-vcn-fix-dpg-pause-mode-hang-issue.patch
new file mode 100644
index 00000000..808eb6a3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5525-drm-amdgpu-vcn-fix-dpg-pause-mode-hang-issue.patch
@@ -0,0 +1,73 @@
+From 45d3b650600862799fab1e5c0f9badb35611bbf6 Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Tue, 2 Oct 2018 11:44:50 -0400
+Subject: [PATCH 5525/5725] drm/amdgpu/vcn:fix dpg pause mode hang issue
+
+Use mmUVD_SCRATCH2 tracking decode write point.
+It will help avoid dpg pause mode hang issue.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Acked-by: Leo Liu <leo.liu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 8 ++++++++
+ 2 files changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index e1a85f2..7fda071 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -263,7 +263,7 @@ static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
+
+ ring = &adev->vcn.ring_dec;
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
+- lower_32_bits(ring->wptr) | 0x80000000);
++ RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2));
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
+ UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+@@ -320,7 +320,7 @@ static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
+
+ ring = &adev->vcn.ring_dec;
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
+- lower_32_bits(ring->wptr) | 0x80000000);
++ RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2));
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
+ UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 9441ec4..0ff3ff6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -873,6 +873,8 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
+
++ WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
++
+ ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
+ lower_32_bits(ring->wptr));
+@@ -1049,6 +1051,8 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
+
++ WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
++
+ ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
+ lower_32_bits(ring->wptr));
+@@ -1215,6 +1219,10 @@ static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
+ {
+ struct amdgpu_device *adev = ring->adev;
+
++ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
++ WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2,
++ lower_32_bits(ring->wptr) | 0x80000000);
++
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5526-drm-amdgpu-vcn-Replace-value-with-defined-macro.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5526-drm-amdgpu-vcn-Replace-value-with-defined-macro.patch
new file mode 100644
index 00000000..bd72c8b9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5526-drm-amdgpu-vcn-Replace-value-with-defined-macro.patch
@@ -0,0 +1,110 @@
+From 346fdf489332d380d284fcd37926c0c09e19908c Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Tue, 2 Oct 2018 12:56:32 -0400
+Subject: [PATCH 5526/5725] drm/amdgpu/vcn:Replace value with defined macro
+
+Replace value with defined macro to make
+code more readable
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Acked-by: Leo Liu <leo.liu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 11 +++++++----
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 19 +++++++++++--------
+ 2 files changed, 18 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 7fda071..b608f85 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -309,14 +309,17 @@ static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
+ /* Restore */
+ ring = &adev->vcn.ring_jpeg;
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
+- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000001L | 0x00000002L);
++ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
++ UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
++ UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+- lower_32_bits(ring->gpu_addr));
++ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+- upper_32_bits(ring->gpu_addr));
++ upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, ring->wptr);
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, ring->wptr);
+- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
++ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
++ UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
+
+ ring = &adev->vcn.ring_dec;
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 0ff3ff6..a62f0ce 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -810,12 +810,12 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
+
+ for (j = 0; j < 100; ++j) {
+ status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
+- if (status & 2)
++ if (status & UVD_STATUS__IDLE)
+ break;
+ mdelay(10);
+ }
+ r = 0;
+- if (status & 2)
++ if (status & UVD_STATUS__IDLE)
+ break;
+
+ DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
+@@ -898,12 +898,13 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
+
+ ring = &adev->vcn.ring_jpeg;
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
+- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
++ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
++ UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0);
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0);
+- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
++ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
+
+ /* initialize wptr */
+ ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
+@@ -1122,8 +1123,9 @@ static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
+ {
+ int ret_code;
+
+- /* Wait for power status to be 1 */
+- SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 0x1,
++ /* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */
++ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
++ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+
+ /* disable dynamic power gating mode */
+@@ -1149,7 +1151,7 @@ static bool vcn_v1_0_is_idle(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == 0x2);
++ return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
+ }
+
+ static int vcn_v1_0_wait_for_idle(void *handle)
+@@ -1157,7 +1159,8 @@ static int vcn_v1_0_wait_for_idle(void *handle)
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int ret = 0;
+
+- SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, 0x2, 0x2, ret);
++ SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
++ UVD_STATUS__IDLE, ret);
+
+ return ret;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5527-drm-amdgpu-vcn-Correct-VCN-cache-window-definition.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5527-drm-amdgpu-vcn-Correct-VCN-cache-window-definition.patch
new file mode 100644
index 00000000..e3b2bcb2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5527-drm-amdgpu-vcn-Correct-VCN-cache-window-definition.patch
@@ -0,0 +1,132 @@
+From 8c4c8f386001c2e4cda7d1768540119982d45122 Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Wed, 9 Jan 2019 21:15:31 +0530
+Subject: [PATCH 5527/5725] drm/amdgpu/vcn:Correct VCN cache window definition
+
+Correct VCN cache window definition. The old one
+is reused from UVD, and it is not fully correct.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Acked-by: Leo Liu<leo.liu@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 3 +--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 6 +++---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 24 ++++++++++++++----------
+ 3 files changed, 18 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index b608f85..9aba3af 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -121,8 +121,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+ version_major, version_minor, family_id);
+ }
+
+- bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
+- + AMDGPU_VCN_SESSION_SIZE * 40;
++ bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
+ r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+index 0b88a46..a0ad19a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+@@ -24,9 +24,9 @@
+ #ifndef __AMDGPU_VCN_H__
+ #define __AMDGPU_VCN_H__
+
+-#define AMDGPU_VCN_STACK_SIZE (200*1024)
+-#define AMDGPU_VCN_HEAP_SIZE (256*1024)
+-#define AMDGPU_VCN_SESSION_SIZE (50*1024)
++#define AMDGPU_VCN_STACK_SIZE (128*1024)
++#define AMDGPU_VCN_CONTEXT_SIZE (512*1024)
++
+ #define AMDGPU_VCN_FIRMWARE_OFFSET 256
+ #define AMDGPU_VCN_MAX_ENC_RINGS 3
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index a62f0ce..9cf544d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -278,6 +278,7 @@ static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t offset;
+
++ /* cache window 0: fw */
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
+@@ -295,6 +296,7 @@ static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ }
+
++ /* cache window 1: stack */
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
+
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
+@@ -302,15 +304,15 @@ static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.gpu_addr + offset));
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
+- WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE);
++ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
+
++ /* cache window 2: context */
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
+- lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
++ lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
+- upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
++ upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
+- WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
+- AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40));
++ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
+
+ WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+@@ -325,6 +327,7 @@ static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t offset;
+
++ /* cache window 0: fw */
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo),
+@@ -347,24 +350,25 @@ static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
+
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size, 0xFFFFFFFF, 0);
+
++ /* cache window 1: stack */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0,
+ 0xFFFFFFFF, 0);
+- WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE,
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE,
+ 0xFFFFFFFF, 0);
+
++ /* cache window 2: context */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
+- lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE),
++ lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
+ 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
+- upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE),
++ upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
+ 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0);
+- WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
+- AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40),
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE,
+ 0xFFFFFFFF, 0);
+
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5528-drm-amdgpu-add-CP_DEBUG-register-definition-for-GC9..patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5528-drm-amdgpu-add-CP_DEBUG-register-definition-for-GC9..patch
new file mode 100644
index 00000000..d790d887
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5528-drm-amdgpu-add-CP_DEBUG-register-definition-for-GC9..patch
@@ -0,0 +1,34 @@
+From 5bb7aa8a7789356b29e4420a7fe7eddb8651362c Mon Sep 17 00:00:00 2001
+From: Tao Zhou <tao.zhou1@amd.com>
+Date: Tue, 9 Oct 2018 11:30:36 +0800
+Subject: [PATCH 5528/5725] drm/amdgpu: add CP_DEBUG register definition for
+ GC9.0
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add CP_DEBUG register definition.
+
+Change-Id: I38b0e5accc9ed2f516f409f1ffd88a9690356083
+Signed-off-by: Tao Zhou <tao.zhou1@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
+index 4ce090d..529b37d 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
+@@ -2449,6 +2449,8 @@
+ #define mmCP_ECC_FIRSTOCCURRENCE_RING2_BASE_IDX 0
+ #define mmGB_EDC_MODE 0x107e
+ #define mmGB_EDC_MODE_BASE_IDX 0
++#define mmCP_DEBUG 0x107f
++#define mmCP_DEBUG_BASE_IDX 0
+ #define mmCP_CPF_DEBUG 0x1080
+ #define mmCP_PQ_WPTR_POLL_CNTL 0x1083
+ #define mmCP_PQ_WPTR_POLL_CNTL_BASE_IDX 0
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5529-drm-amdgpu-fix-CPDMA-hang-in-PRT-mode.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5529-drm-amdgpu-fix-CPDMA-hang-in-PRT-mode.patch
new file mode 100644
index 00000000..09640d11
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5529-drm-amdgpu-fix-CPDMA-hang-in-PRT-mode.patch
@@ -0,0 +1,64 @@
+From 2639667f7ab04e19fb8e831cfb3de0b3f52df971 Mon Sep 17 00:00:00 2001
+From: Tao Zhou <tao.zhou1@amd.com>
+Date: Tue, 9 Oct 2018 11:40:31 +0800
+Subject: [PATCH 5529/5725] drm/amdgpu: fix CPDMA hang in PRT mode
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Fix CPDMA hang in PRT mode, set CPF_INT_DMA in reg CP_MECx_F32_INT_DIS for Compute and set DISABLE_GFX_HALT_ON_UTCL1_ERROR in reg CP_DEBUG for GFX
+
+Affected ASICs: Vega10 Vega12 Raven
+
+Change-Id: I1029c9cf39c82f8415af77012cb289b565ba996b
+Signed-off-by: Tao Zhou <tao.zhou1@amd.com>
+Tested-by: Yukun.Li <yukun1.li@amd.com>
+Tested-by: Maciej.Jesionowski <maciej.jesionowski@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index ea6c1a6..178b375 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -133,7 +133,10 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
+- SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
+ };
+
+ static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
+@@ -173,7 +176,10 @@ static const struct soc15_reg_golden golden_settings_gc_9_1[] =
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
+- SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
+ };
+
+ static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
+@@ -247,7 +253,10 @@ static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
+- SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000)
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
+ };
+
+ static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5530-drm-amdgpu-Limit-the-max-mc-address-to-hole-start.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5530-drm-amdgpu-Limit-the-max-mc-address-to-hole-start.patch
new file mode 100644
index 00000000..a35a0bbf
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5530-drm-amdgpu-Limit-the-max-mc-address-to-hole-start.patch
@@ -0,0 +1,53 @@
+From 85969a172fdad2bc4260ee3b7c0566772b9ab617 Mon Sep 17 00:00:00 2001
+From: Emily Deng <Emily.Deng@amd.com>
+Date: Wed, 10 Oct 2018 15:43:47 +0800
+Subject: [PATCH 5530/5725] drm/amdgpu: Limit the max mc address to hole start
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+For the vram_start is 0 case, the gart range will be from 0x0000FFFF00000000
+to 0x0000FFFF1FFFFFFF, which will cause the engine hang.
+
+So to avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START.:wq
+
+Signed-off-by: Emily Deng <Emily.Deng@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+index 999e159..d73367c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+@@ -146,6 +146,8 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
+ {
+ const uint64_t four_gb = 0x100000000ULL;
+ u64 size_af, size_bf;
++ /*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/
++ u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1);
+
+ mc->gart_size += adev->pm.smu_prv_buffer_size;
+
+@@ -153,7 +155,7 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
+ * the GART base on a 4GB boundary as well.
+ */
+ size_bf = mc->fb_start;
+- size_af = adev->gmc.mc_mask + 1 - ALIGN(mc->fb_end + 1, four_gb);
++ size_af = max_mc_address + 1 - ALIGN(mc->fb_end + 1, four_gb);
+
+ if (mc->gart_size > max(size_bf, size_af)) {
+ dev_warn(adev->dev, "limiting GART\n");
+@@ -164,7 +166,7 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
+ (size_af < mc->gart_size))
+ mc->gart_start = 0;
+ else
+- mc->gart_start = mc->mc_mask - mc->gart_size + 1;
++ mc->gart_start = max_mc_address - mc->gart_size + 1;
+
+ mc->gart_start &= ~(four_gb - 1);
+ mc->gart_end = mc->gart_start + mc->gart_size - 1;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5531-drm-amdgpu-Change-SI-CI-gfx-sdma-smu-init-sequence.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5531-drm-amdgpu-Change-SI-CI-gfx-sdma-smu-init-sequence.patch
new file mode 100644
index 00000000..cae65538
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5531-drm-amdgpu-Change-SI-CI-gfx-sdma-smu-init-sequence.patch
@@ -0,0 +1,145 @@
+From c5a7b2d2d5f980619f2747e5aadd0eae667d88ee Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Sun, 30 Sep 2018 17:32:36 +0800
+Subject: [PATCH 5531/5725] drm/amdgpu: Change SI/CI gfx/sdma/smu init sequence
+
+initialize gfx/sdma before dpm features enabled.
+
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/cik.c | 17 +++++++++--------
+ drivers/gpu/drm/amd/amdgpu/si.c | 13 +++++++------
+ 2 files changed, 16 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
+index 78ab939..f41f5f5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cik.c
++++ b/drivers/gpu/drm/amd/amdgpu/cik.c
+@@ -2002,6 +2002,8 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
++ amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
++ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
+ if (amdgpu_dpm == -1)
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ else
+@@ -2014,8 +2016,6 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
+ #endif
+ else
+ amdgpu_device_ip_block_add(adev, &dce_v8_2_ip_block);
+- amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
+- amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
+ break;
+@@ -2023,6 +2023,8 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
++ amdgpu_device_ip_block_add(adev, &gfx_v7_3_ip_block);
++ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
+ if (amdgpu_dpm == -1)
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ else
+@@ -2035,8 +2037,6 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
+ #endif
+ else
+ amdgpu_device_ip_block_add(adev, &dce_v8_5_ip_block);
+- amdgpu_device_ip_block_add(adev, &gfx_v7_3_ip_block);
+- amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
+ break;
+@@ -2044,6 +2044,8 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
++ amdgpu_device_ip_block_add(adev, &gfx_v7_1_ip_block);
++ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+@@ -2053,8 +2055,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
+ #endif
+ else
+ amdgpu_device_ip_block_add(adev, &dce_v8_1_ip_block);
+- amdgpu_device_ip_block_add(adev, &gfx_v7_1_ip_block);
+- amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
++
+ amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
+ break;
+@@ -2063,6 +2064,8 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
++ amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
++ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+@@ -2072,8 +2075,6 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
+ #endif
+ else
+ amdgpu_device_ip_block_add(adev, &dce_v8_3_ip_block);
+- amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
+- amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
+ break;
+diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
+index feb86ee..1980e43 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si.c
++++ b/drivers/gpu/drm/amd/amdgpu/si.c
+@@ -2057,13 +2057,13 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
++ amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
++ amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_device_ip_block_add(adev, &dce_v6_0_ip_block);
+- amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
+- amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
+ /* amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); */
+ /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */
+ break;
+@@ -2071,13 +2071,14 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
++ amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
++ amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_device_ip_block_add(adev, &dce_v6_4_ip_block);
+- amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
+- amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
++
+ /* amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); */
+ /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */
+ break;
+@@ -2085,11 +2086,11 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
++ amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
++ amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+- amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
+- amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
+ break;
+ default:
+ BUG();
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5532-drm-amdgpu-Change-AI-gfx-sdma-smu-init-sequence.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5532-drm-amdgpu-Change-AI-gfx-sdma-smu-init-sequence.patch
new file mode 100644
index 00000000..93698fe9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5532-drm-amdgpu-Change-AI-gfx-sdma-smu-init-sequence.patch
@@ -0,0 +1,56 @@
+From b16527c5452d5ed2b782bede47d79bb772b76e0c Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Sun, 30 Sep 2018 17:37:27 +0800
+Subject: [PATCH 5532/5725] drm/amdgpu: Change AI gfx/sdma/smu init sequence
+
+initialize gfx/sdma before dpm features enabled.
+
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 2802c39..5614c2b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -529,6 +529,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+ else
+ amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
++ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
++ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
+@@ -539,8 +541,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+ #else
+ # warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
+ #endif
+- amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+- amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+ if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) {
+ amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
+@@ -551,6 +551,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
++ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
++ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+@@ -560,8 +562,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
+ #else
+ # warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
+ #endif
+- amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+- amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
+ break;
+ default:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5533-drm-amdgpu-Refine-function-amdgpu_device_ip_late_ini.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5533-drm-amdgpu-Refine-function-amdgpu_device_ip_late_ini.patch
new file mode 100644
index 00000000..19de4b5f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5533-drm-amdgpu-Refine-function-amdgpu_device_ip_late_ini.patch
@@ -0,0 +1,42 @@
+From 7119e74a762cdfed1f0d7b839f038650effdbec0 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Wed, 3 Oct 2018 16:10:45 +0800
+Subject: [PATCH 5533/5725] drm/amdgpu: Refine function
+ amdgpu_device_ip_late_init
+
+1. only call late_init when hw_init successful,
+ so check status.hw instand of status.valid in late_init.
+2. set status.late_initialized true if late_init was not implemented.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 1de2728..9230cdb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1773,7 +1773,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
+ int i = 0, r;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+- if (!adev->ip_blocks[i].status.valid)
++ if (!adev->ip_blocks[i].status.hw)
+ continue;
+ if (adev->ip_blocks[i].version->funcs->late_init) {
+ r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
+@@ -1782,8 +1782,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+- adev->ip_blocks[i].status.late_initialized = true;
+ }
++ adev->ip_blocks[i].status.late_initialized = true;
+ }
+
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5534-drm-amdgpu-Check-late_init-status-before-set-cg-pg-s.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5534-drm-amdgpu-Check-late_init-status-before-set-cg-pg-s.patch
new file mode 100644
index 00000000..b5d9d8b7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5534-drm-amdgpu-Check-late_init-status-before-set-cg-pg-s.patch
@@ -0,0 +1,39 @@
+From 8b57a6d371949e570b1a0444f8c8e1abbd791ad4 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Wed, 3 Oct 2018 16:19:50 +0800
+Subject: [PATCH 5534/5725] drm/amdgpu: Check late_init status before set cg/pg
+ state
+
+Fix cg/pg unexpected set in hw init failed case.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 9230cdb..0cce3fa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1706,7 +1706,7 @@ static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
+
+ for (j = 0; j < adev->num_ip_blocks; j++) {
+ i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
+- if (!adev->ip_blocks[i].status.valid)
++ if (!adev->ip_blocks[i].status.late_initialized)
+ continue;
+ /* skip CG for VCE/UVD, it's handled specially */
+ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
+@@ -1736,7 +1736,7 @@ static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_power
+
+ for (j = 0; j < adev->num_ip_blocks; j++) {
+ i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
+- if (!adev->ip_blocks[i].status.valid)
++ if (!adev->ip_blocks[i].status.late_initialized)
+ continue;
+ /* skip CG for VCE/UVD, it's handled specially */
+ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5535-drm-amdgpu-Split-amdgpu_ucode_init-fini_bo-into-two-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5535-drm-amdgpu-Split-amdgpu_ucode_init-fini_bo-into-two-.patch
new file mode 100644
index 00000000..944aa75a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5535-drm-amdgpu-Split-amdgpu_ucode_init-fini_bo-into-two-.patch
@@ -0,0 +1,155 @@
+From 856c717f3a3c0d982d8a8bdfc41527b1cefca8bf Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Tue, 9 Oct 2018 13:55:49 +0800
+Subject: [PATCH 5535/5725] drm/amdgpu: Split amdgpu_ucode_init/fini_bo into
+ two functions
+
+1. one is for create/free bo when init/fini
+2. one is for fill the bo before fw loading
+
+the ucode bo only need to be created when load driver
+and free when driver unload.
+
+when resume/reset, driver only need to re-fill the bo
+if the bo is allocated in vram.
+
+Suggested by Christian.
+
+v2: Return error when bo create failed.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 +++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 58 +++++++++++++++---------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h | 3 ++
+ 3 files changed, 36 insertions(+), 29 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 0cce3fa..0aa2ca1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1631,6 +1631,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
+ }
+ }
+
++ r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
++ if (r)
++ return r;
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.sw)
+ continue;
+@@ -1852,6 +1855,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
+ continue;
+
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
++ amdgpu_ucode_free_bo(adev);
+ amdgpu_free_static_csa(adev);
+ amdgpu_device_wb_fini(adev);
+ amdgpu_device_vram_scratch_fini(adev);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+index ce4044a..e15dfa6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+@@ -422,32 +422,42 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
+ return 0;
+ }
+
++int amdgpu_ucode_create_bo(struct amdgpu_device *adev)
++{
++ if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) {
++ amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE,
++ amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
++ &adev->firmware.fw_buf,
++ &adev->firmware.fw_buf_mc,
++ &adev->firmware.fw_buf_ptr);
++ if (!adev->firmware.fw_buf) {
++ dev_err(adev->dev, "failed to create kernel buffer for firmware.fw_buf\n");
++ return -ENOMEM;
++ } else if (amdgpu_sriov_vf(adev)) {
++ memset(adev->firmware.fw_buf_ptr, 0, adev->firmware.fw_size);
++ }
++ }
++ return 0;
++}
++
++void amdgpu_ucode_free_bo(struct amdgpu_device *adev)
++{
++ if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT)
++ amdgpu_bo_free_kernel(&adev->firmware.fw_buf,
++ &adev->firmware.fw_buf_mc,
++ &adev->firmware.fw_buf_ptr);
++}
++
+ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
+ {
+ uint64_t fw_offset = 0;
+- int i, err;
++ int i;
+ struct amdgpu_firmware_info *ucode = NULL;
+ const struct common_firmware_header *header = NULL;
+
+- if (!adev->firmware.fw_size) {
+- dev_warn(adev->dev, "No ip firmware need to load\n");
++ /* for baremetal, the ucode is allocated in gtt, so don't need to fill the bo when reset/suspend */
++ if (!amdgpu_sriov_vf(adev) && (adev->in_gpu_reset || adev->in_suspend))
+ return 0;
+- }
+-
+- if (!adev->in_gpu_reset && !adev->in_suspend) {
+- err = amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE,
+- amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+- &adev->firmware.fw_buf,
+- &adev->firmware.fw_buf_mc,
+- &adev->firmware.fw_buf_ptr);
+- if (err) {
+- dev_err(adev->dev, "failed to create kernel buffer for firmware.fw_buf\n");
+- goto failed;
+- }
+- }
+-
+- memset(adev->firmware.fw_buf_ptr, 0, adev->firmware.fw_size);
+-
+ /*
+ * if SMU loaded firmware, it needn't add SMC, UVD, and VCE
+ * ucode info here
+@@ -479,12 +489,6 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
+ }
+ }
+ return 0;
+-
+-failed:
+- if (err)
+- adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
+-
+- return err;
+ }
+
+ int amdgpu_ucode_fini_bo(struct amdgpu_device *adev)
+@@ -503,9 +507,5 @@ int amdgpu_ucode_fini_bo(struct amdgpu_device *adev)
+ }
+ }
+
+- amdgpu_bo_free_kernel(&adev->firmware.fw_buf,
+- &adev->firmware.fw_buf_mc,
+- &adev->firmware.fw_buf_ptr);
+-
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+index 15791af..cbd69fe 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+@@ -280,6 +280,9 @@ bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
+ int amdgpu_ucode_init_bo(struct amdgpu_device *adev);
+ int amdgpu_ucode_fini_bo(struct amdgpu_device *adev);
+
++int amdgpu_ucode_create_bo(struct amdgpu_device *adev);
++void amdgpu_ucode_free_bo(struct amdgpu_device *adev);
++
+ enum amdgpu_firmware_load_type
+ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5536-drm-amdgpu-Remove-amdgpu_ucode_fini_bo.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5536-drm-amdgpu-Remove-amdgpu_ucode_fini_bo.patch
new file mode 100644
index 00000000..36a98959
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5536-drm-amdgpu-Remove-amdgpu_ucode_fini_bo.patch
@@ -0,0 +1,88 @@
+From f698389435a51b85d5fc98e186f5114526caf40b Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Tue, 9 Oct 2018 14:22:04 +0800
+Subject: [PATCH 5536/5725] drm/amdgpu: Remove amdgpu_ucode_fini_bo
+
+The variable clean is unnecessary.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 2 --
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 19 -------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h | 3 +--
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 3 ---
+ 4 files changed, 1 insertion(+), 26 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index f74f155..a70657d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -444,8 +444,6 @@ static int psp_hw_fini(void *handle)
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ return 0;
+
+- amdgpu_ucode_fini_bo(adev);
+-
+ psp_ring_destroy(psp, PSP_RING_TYPE__KM);
+
+ amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+index e15dfa6..b41bba4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+@@ -490,22 +490,3 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
+ }
+ return 0;
+ }
+-
+-int amdgpu_ucode_fini_bo(struct amdgpu_device *adev)
+-{
+- int i;
+- struct amdgpu_firmware_info *ucode = NULL;
+-
+- if (!adev->firmware.fw_size)
+- return 0;
+-
+- for (i = 0; i < adev->firmware.max_ucodes; i++) {
+- ucode = &adev->firmware.ucode[i];
+- if (ucode->fw) {
+- ucode->mc_addr = 0;
+- ucode->kaddr = NULL;
+- }
+- }
+-
+- return 0;
+-}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+index cbd69fe..3b194e8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+@@ -277,9 +277,8 @@ void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr);
+ int amdgpu_ucode_validate(const struct firmware *fw);
+ bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
+ uint16_t hdr_major, uint16_t hdr_minor);
+-int amdgpu_ucode_init_bo(struct amdgpu_device *adev);
+-int amdgpu_ucode_fini_bo(struct amdgpu_device *adev);
+
++int amdgpu_ucode_init_bo(struct amdgpu_device *adev);
+ int amdgpu_ucode_create_bo(struct amdgpu_device *adev);
+ void amdgpu_ucode_free_bo(struct amdgpu_device *adev);
+
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index 8ca308e..f2cb6e1 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -109,9 +109,6 @@ static int pp_sw_fini(void *handle)
+
+ hwmgr_sw_fini(hwmgr);
+
+- if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
+- amdgpu_ucode_fini_bo(adev);
+-
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5537-drm-amdgpu-split-ip-hw_init-into-2-phases.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5537-drm-amdgpu-split-ip-hw_init-into-2-phases.patch
new file mode 100644
index 00000000..2c163c5c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5537-drm-amdgpu-split-ip-hw_init-into-2-phases.patch
@@ -0,0 +1,103 @@
+From 19d2598b199a09123f9d0826fdd1f2b72deea95e Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Wed, 10 Oct 2018 19:28:30 +0800
+Subject: [PATCH 5537/5725] drm/amdgpu: split ip hw_init into 2 phases
+
+We need to do some IPs earlier to deal with ordering issues
+similar to how resume is split into two phases.
+
+Will do fw loading via smu/psp between the two phases.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 66 ++++++++++++++++++++++++------
+ 1 file changed, 53 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 0aa2ca1..a8b95cd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1575,6 +1575,51 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
+ return 0;
+ }
+
++static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
++{
++ int i, r;
++
++ for (i = 0; i < adev->num_ip_blocks; i++) {
++ if (!adev->ip_blocks[i].status.sw)
++ continue;
++ if (adev->ip_blocks[i].status.hw)
++ continue;
++ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
++ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
++ r = adev->ip_blocks[i].version->funcs->hw_init(adev);
++ if (r) {
++ DRM_ERROR("hw_init of IP block <%s> failed %d\n",
++ adev->ip_blocks[i].version->funcs->name, r);
++ return r;
++ }
++ adev->ip_blocks[i].status.hw = true;
++ }
++ }
++
++ return 0;
++}
++
++static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
++{
++ int i, r;
++
++ for (i = 0; i < adev->num_ip_blocks; i++) {
++ if (!adev->ip_blocks[i].status.sw)
++ continue;
++ if (adev->ip_blocks[i].status.hw)
++ continue;
++ r = adev->ip_blocks[i].version->funcs->hw_init(adev);
++ if (r) {
++ DRM_ERROR("hw_init of IP block <%s> failed %d\n",
++ adev->ip_blocks[i].version->funcs->name, r);
++ return r;
++ }
++ adev->ip_blocks[i].status.hw = true;
++ }
++
++ return 0;
++}
++
+ /**
+ * amdgpu_device_ip_init - run init for hardware IPs
+ *
+@@ -1634,19 +1679,14 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
+ r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
+ if (r)
+ return r;
+- for (i = 0; i < adev->num_ip_blocks; i++) {
+- if (!adev->ip_blocks[i].status.sw)
+- continue;
+- if (adev->ip_blocks[i].status.hw)
+- continue;
+- r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
+- if (r) {
+- DRM_ERROR("hw_init of IP block <%s> failed %d\n",
+- adev->ip_blocks[i].version->funcs->name, r);
+- return r;
+- }
+- adev->ip_blocks[i].status.hw = true;
+- }
++
++ r = amdgpu_device_ip_hw_init_phase1(adev);
++ if (r)
++ return r;
++
++ r = amdgpu_device_ip_hw_init_phase2(adev);
++ if (r)
++ return r;
+
+ amdgpu_xgmi_add_device(adev);
+ amdgpu_amdkfd_device_init(adev);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5538-drm-amdgpu-Load-fw-between-hw_init-resume_phase1-and.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5538-drm-amdgpu-Load-fw-between-hw_init-resume_phase1-and.patch
new file mode 100644
index 00000000..ac25aedc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5538-drm-amdgpu-Load-fw-between-hw_init-resume_phase1-and.patch
@@ -0,0 +1,290 @@
+From ed92a4ecd991b2e62b49f9d607c0fa1e3d163546 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Wed, 10 Oct 2018 20:41:32 +0800
+Subject: [PATCH 5538/5725] drm/amdgpu: Load fw between hw_init/resume_phase1
+ and phase2
+
+Extract the function of fw loading out of powerplay.
+Do fw loading between hw_init/resuem_phase1 and phase2
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 61 +++++++++++++++++++++-
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 11 ----
+ drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 8 ---
+ drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 20 -------
+ drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 1 -
+ drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 8 +--
+ drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c | 5 --
+ 7 files changed, 62 insertions(+), 52 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index a8b95cd..6260307 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1620,6 +1620,47 @@ static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
+ return 0;
+ }
+
++static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
++{
++ int r = 0;
++ int i;
++
++ if (adev->asic_type >= CHIP_VEGA10) {
++ for (i = 0; i < adev->num_ip_blocks; i++) {
++ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
++ if (adev->in_gpu_reset || adev->in_suspend) {
++ if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset)
++ break; /* sriov gpu reset, psp need to do hw_init before IH because of hw limit */
++ r = adev->ip_blocks[i].version->funcs->resume(adev);
++ if (r) {
++ DRM_ERROR("resume of IP block <%s> failed %d\n",
++ adev->ip_blocks[i].version->funcs->name, r);
++ return r;
++ }
++ } else {
++ r = adev->ip_blocks[i].version->funcs->hw_init(adev);
++ if (r) {
++ DRM_ERROR("hw_init of IP block <%s> failed %d\n",
++ adev->ip_blocks[i].version->funcs->name, r);
++ return r;
++ }
++ }
++ adev->ip_blocks[i].status.hw = true;
++ }
++ }
++ }
++
++ if (adev->powerplay.pp_funcs->load_firmware) {
++ r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
++ if (r) {
++ pr_err("firmware loading failed\n");
++ return r;
++ }
++ }
++
++ return 0;
++}
++
+ /**
+ * amdgpu_device_ip_init - run init for hardware IPs
+ *
+@@ -1684,6 +1725,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
+ if (r)
+ return r;
+
++ r = amdgpu_device_fw_loading(adev);
++ if (r)
++ return r;
++
+ r = amdgpu_device_ip_hw_init_phase2(adev);
+ if (r)
+ return r;
+@@ -2216,7 +2261,8 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
+ continue;
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
+- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)
++ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
++ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
+ continue;
+ r = adev->ip_blocks[i].version->funcs->resume(adev);
+ if (r) {
+@@ -2248,6 +2294,11 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
+ r = amdgpu_device_ip_resume_phase1(adev);
+ if (r)
+ return r;
++
++ r = amdgpu_device_fw_loading(adev);
++ if (r)
++ return r;
++
+ r = amdgpu_device_ip_resume_phase2(adev);
+
+ return r;
+@@ -3204,6 +3255,10 @@ static int amdgpu_device_reset(struct amdgpu_device *adev)
+ if (r)
+ goto out;
+
++ r = amdgpu_device_fw_loading(adev);
++ if (r)
++ return r;
++
+ r = amdgpu_device_ip_resume_phase2(adev);
+ if (r)
+ goto out;
+@@ -3264,6 +3319,10 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
+ /* we need recover gart prior to run SMC/CP/SDMA resume */
+ amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
+
++ r = amdgpu_device_fw_loading(adev);
++ if (r)
++ return r;
++
+ /* now we are okay to resume SMC/CP/SDMA */
+ r = amdgpu_device_ip_reinit_late_sriov(adev);
+ if (r)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 575f363..a0fc2c2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -4174,20 +4174,9 @@ static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
+
+ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
+ {
+- int r;
+-
+ gfx_v8_0_rlc_stop(adev);
+ gfx_v8_0_rlc_reset(adev);
+ gfx_v8_0_init_pg(adev);
+-
+- if (adev->powerplay.pp_funcs->load_firmware) {
+- r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
+- if (r) {
+- pr_err("firmware loading failed\n");
+- return r;
+- }
+- }
+-
+ gfx_v8_0_rlc_start(adev);
+
+ return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+index 573284f..24b5252 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+@@ -796,14 +796,6 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
+ {
+ int r;
+
+- if (adev->powerplay.pp_funcs->load_firmware) {
+- r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
+- if (r) {
+- pr_err("firmware loading failed\n");
+- return r;
+- }
+- }
+-
+ /* disable sdma engine before programing it */
+ sdma_v3_0_ctx_switch_enable(adev, false);
+ sdma_v3_0_enable(adev, false);
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+index d552af2..47ac923 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+@@ -89,7 +89,6 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
+ hwmgr_init_default_caps(hwmgr);
+ hwmgr_set_user_specify_caps(hwmgr);
+ hwmgr->fan_ctrl_is_in_default_mode = true;
+- hwmgr->reload_fw = 1;
+ hwmgr_init_workload_prority(hwmgr);
+
+ switch (hwmgr->chip_family) {
+@@ -209,17 +208,6 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
+ {
+ int ret = 0;
+
+- if (!hwmgr || !hwmgr->smumgr_funcs)
+- return -EINVAL;
+-
+- if (hwmgr->smumgr_funcs->start_smu) {
+- ret = hwmgr->smumgr_funcs->start_smu(hwmgr);
+- if (ret) {
+- pr_err("smc start failed\n");
+- return -EINVAL;
+- }
+- }
+-
+ if (!hwmgr->pm_en)
+ return 0;
+
+@@ -301,7 +289,6 @@ int hwmgr_suspend(struct pp_hwmgr *hwmgr)
+ if (!hwmgr || !hwmgr->pm_en)
+ return 0;
+
+- hwmgr->reload_fw = true;
+ phm_disable_smc_firmware_ctf(hwmgr);
+ ret = psm_set_boot_states(hwmgr);
+ if (ret)
+@@ -321,13 +308,6 @@ int hwmgr_resume(struct pp_hwmgr *hwmgr)
+ if (!hwmgr)
+ return -EINVAL;
+
+- if (hwmgr->smumgr_funcs && hwmgr->smumgr_funcs->start_smu) {
+- if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
+- pr_err("smc start failed\n");
+- return -EINVAL;
+- }
+- }
+-
+ if (!hwmgr->pm_en)
+ return 0;
+
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+index f07d93e..0d00dc3 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+@@ -735,7 +735,6 @@ struct pp_hwmgr {
+ void *smu_backend;
+ const struct pp_smumgr_func *smumgr_funcs;
+ bool is_kicker;
+- bool reload_fw;
+
+ enum PP_DAL_POWERLEVEL dal_power_level;
+ struct phm_dynamic_state_info dyn_state;
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+index 5db9215..f189c40 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+@@ -345,9 +345,6 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
+ uint32_t fw_to_load;
+ int r = 0;
+
+- if (!hwmgr->reload_fw)
+- return 0;
+-
+ amdgpu_ucode_init_bo(hwmgr->adev);
+
+ if (smu_data->soft_regs_start)
+@@ -434,10 +431,9 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
+ smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load);
+
+ r = smu7_check_fw_load_finish(hwmgr, fw_to_load);
+- if (!r) {
+- hwmgr->reload_fw = 0;
++ if (!r)
+ return 0;
+- }
++
+ pr_err("SMU load firmware failed\n");
+
+ failed:
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+index abbf2f2..f836d30 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+@@ -661,9 +661,6 @@ static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr)
+ uint32_t fw_to_check = 0;
+ int ret;
+
+- if (!hwmgr->reload_fw)
+- return 0;
+-
+ amdgpu_ucode_init_bo(hwmgr->adev);
+
+ smu8_smu_populate_firmware_entries(hwmgr);
+@@ -719,8 +716,6 @@ static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr)
+ return ret;
+ }
+
+- hwmgr->reload_fw = 0;
+-
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5539-drm-amdgpu-Remove-wrong-fw-loading-type-warning.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5539-drm-amdgpu-Remove-wrong-fw-loading-type-warning.patch
new file mode 100644
index 00000000..51957d0a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5539-drm-amdgpu-Remove-wrong-fw-loading-type-warning.patch
@@ -0,0 +1,31 @@
+From ac392c813afb5917d9bceb75f4281f9e57a90804 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Wed, 10 Oct 2018 21:04:14 +0800
+Subject: [PATCH 5539/5725] drm/amdgpu: Remove wrong fw loading type warning
+
+Remove the warning message:
+"-1 is not supported on VI"
+the -1 is the default fw load type, mean auto.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+index b41bba4..29b8b82 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+@@ -297,8 +297,6 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+- if (load_type != AMDGPU_FW_LOAD_SMU)
+- pr_warning("%d is not supported on VI\n", load_type);
+ return AMDGPU_FW_LOAD_SMU;
+ case CHIP_VEGA10:
+ case CHIP_RAVEN:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5540-drm-amdgpu-Remove-the-direct-fw-loading-support-for-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5540-drm-amdgpu-Remove-the-direct-fw-loading-support-for-.patch
new file mode 100644
index 00000000..8259ad3e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5540-drm-amdgpu-Remove-the-direct-fw-loading-support-for-.patch
@@ -0,0 +1,78 @@
+From 7ff4a9d6b2ae52a803b377cd6234055be5259a80 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Tue, 9 Oct 2018 18:46:12 +0800
+Subject: [PATCH 5540/5725] drm/amdgpu: Remove the direct fw loading support
+ for sdma2.4
+
+sdma2.4 is only for iceland. For Vi, we don't maintain the
+direct fw loading.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 42 ----------------------------------
+ 1 file changed, 42 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+index 9232a8d..7939dc6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+@@ -511,41 +511,6 @@ static int sdma_v2_4_rlc_resume(struct amdgpu_device *adev)
+ return 0;
+ }
+
+-/**
+- * sdma_v2_4_load_microcode - load the sDMA ME ucode
+- *
+- * @adev: amdgpu_device pointer
+- *
+- * Loads the sDMA0/1 ucode.
+- * Returns 0 for success, -EINVAL if the ucode is not available.
+- */
+-static int sdma_v2_4_load_microcode(struct amdgpu_device *adev)
+-{
+- const struct sdma_firmware_header_v1_0 *hdr;
+- const __le32 *fw_data;
+- u32 fw_size;
+- int i, j;
+-
+- /* halt the MEs */
+- sdma_v2_4_enable(adev, false);
+-
+- for (i = 0; i < adev->sdma.num_instances; i++) {
+- if (!adev->sdma.instance[i].fw)
+- return -EINVAL;
+- hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
+- amdgpu_ucode_print_sdma_hdr(&hdr->header);
+- fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+- fw_data = (const __le32 *)
+- (adev->sdma.instance[i].fw->data +
+- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+- WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
+- for (j = 0; j < fw_size; j++)
+- WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
+- WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
+- }
+-
+- return 0;
+-}
+
+ /**
+ * sdma_v2_4_start - setup and start the async dma engines
+@@ -559,13 +524,6 @@ static int sdma_v2_4_start(struct amdgpu_device *adev)
+ {
+ int r;
+
+-
+- if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+- r = sdma_v2_4_load_microcode(adev);
+- if (r)
+- return r;
+- }
+-
+ /* halt the engine before programing */
+ sdma_v2_4_enable(adev, false);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5541-drm-amdgpu-powerplay-endian-fixes-for-vega10_process.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5541-drm-amdgpu-powerplay-endian-fixes-for-vega10_process.patch
new file mode 100644
index 00000000..98b0863c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5541-drm-amdgpu-powerplay-endian-fixes-for-vega10_process.patch
@@ -0,0 +1,60 @@
+From 2b43c799762efd00e9e582b73169f4282b6f7f75 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 9 Oct 2018 15:23:15 -0500
+Subject: [PATCH 5541/5725] drm/amdgpu/powerplay: endian fixes for
+ vega10_processpptables.c
+
+Properly swap data from vbios.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../amd/powerplay/hwmgr/vega10_processpptables.c | 30 +++++++++++-----------
+ 1 file changed, 15 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+index 16b1a9c..b8747a5 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+@@ -451,23 +451,23 @@ static int get_tdp_table(
+ le16_to_cpu(power_tune_table_v2->usLoadLineResistance);
+ } else {
+ power_tune_table_v3 = (ATOM_Vega10_PowerTune_Table_V3 *)table;
+- tdp_table->usMaximumPowerDeliveryLimit = power_tune_table_v3->usSocketPowerLimit;
+- tdp_table->usTDC = power_tune_table_v3->usTdcLimit;
+- tdp_table->usEDCLimit = power_tune_table_v3->usEdcLimit;
+- tdp_table->usSoftwareShutdownTemp = power_tune_table_v3->usSoftwareShutdownTemp;
+- tdp_table->usTemperatureLimitTedge = power_tune_table_v3->usTemperatureLimitTedge;
+- tdp_table->usTemperatureLimitHotspot = power_tune_table_v3->usTemperatureLimitHotSpot;
+- tdp_table->usTemperatureLimitLiquid1 = power_tune_table_v3->usTemperatureLimitLiquid1;
+- tdp_table->usTemperatureLimitLiquid2 = power_tune_table_v3->usTemperatureLimitLiquid2;
+- tdp_table->usTemperatureLimitHBM = power_tune_table_v3->usTemperatureLimitHBM;
+- tdp_table->usTemperatureLimitVrVddc = power_tune_table_v3->usTemperatureLimitVrSoc;
+- tdp_table->usTemperatureLimitVrMvdd = power_tune_table_v3->usTemperatureLimitVrMem;
+- tdp_table->usTemperatureLimitPlx = power_tune_table_v3->usTemperatureLimitPlx;
++ tdp_table->usMaximumPowerDeliveryLimit = le16_to_cpu(power_tune_table_v3->usSocketPowerLimit);
++ tdp_table->usTDC = le16_to_cpu(power_tune_table_v3->usTdcLimit);
++ tdp_table->usEDCLimit = le16_to_cpu(power_tune_table_v3->usEdcLimit);
++ tdp_table->usSoftwareShutdownTemp = le16_to_cpu(power_tune_table_v3->usSoftwareShutdownTemp);
++ tdp_table->usTemperatureLimitTedge = le16_to_cpu(power_tune_table_v3->usTemperatureLimitTedge);
++ tdp_table->usTemperatureLimitHotspot = le16_to_cpu(power_tune_table_v3->usTemperatureLimitHotSpot);
++ tdp_table->usTemperatureLimitLiquid1 = le16_to_cpu(power_tune_table_v3->usTemperatureLimitLiquid1);
++ tdp_table->usTemperatureLimitLiquid2 = le16_to_cpu(power_tune_table_v3->usTemperatureLimitLiquid2);
++ tdp_table->usTemperatureLimitHBM = le16_to_cpu(power_tune_table_v3->usTemperatureLimitHBM);
++ tdp_table->usTemperatureLimitVrVddc = le16_to_cpu(power_tune_table_v3->usTemperatureLimitVrSoc);
++ tdp_table->usTemperatureLimitVrMvdd = le16_to_cpu(power_tune_table_v3->usTemperatureLimitVrMem);
++ tdp_table->usTemperatureLimitPlx = le16_to_cpu(power_tune_table_v3->usTemperatureLimitPlx);
+ tdp_table->ucLiquid1_I2C_address = power_tune_table_v3->ucLiquid1_I2C_address;
+ tdp_table->ucLiquid2_I2C_address = power_tune_table_v3->ucLiquid2_I2C_address;
+- tdp_table->usBoostStartTemperature = power_tune_table_v3->usBoostStartTemperature;
+- tdp_table->usBoostStopTemperature = power_tune_table_v3->usBoostStopTemperature;
+- tdp_table->ulBoostClock = power_tune_table_v3->ulBoostClock;
++ tdp_table->usBoostStartTemperature = le16_to_cpu(power_tune_table_v3->usBoostStartTemperature);
++ tdp_table->usBoostStopTemperature = le16_to_cpu(power_tune_table_v3->usBoostStopTemperature);
++ tdp_table->ulBoostClock = le32_to_cpu(power_tune_table_v3->ulBoostClock);
+
+ get_scl_sda_value(power_tune_table_v3->ucLiquid_I2C_Line, &scl, &sda);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5542-drm-amdgpu-powerplay-endian-fixes-for-vega12_process.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5542-drm-amdgpu-powerplay-endian-fixes-for-vega12_process.patch
new file mode 100644
index 00000000..4adab90a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5542-drm-amdgpu-powerplay-endian-fixes-for-vega12_process.patch
@@ -0,0 +1,79 @@
+From bc6c353b2134921090d7bd85d9c40d3171eeb051 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 9 Oct 2018 15:33:16 -0500
+Subject: [PATCH 5542/5725] drm/amdgpu/powerplay: endian fixes for
+ vega12_processpptables.c
+
+Properly swap data from vbios.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../amd/powerplay/hwmgr/vega12_processpptables.c | 26 ++++++++++++----------
+ 1 file changed, 14 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+index f4f366b..e330058 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+@@ -114,7 +114,7 @@ static int copy_clock_limits_array(
+ return -ENOMEM;
+
+ for (i = 0; i < ATOM_VEGA12_PPCLOCK_COUNT; i++)
+- table[i] = pptable_array[i];
++ table[i] = le32_to_cpu(pptable_array[i]);
+
+ *pptable_info_array = table;
+
+@@ -136,7 +136,7 @@ static int copy_overdrive_settings_limits_array(
+ return -ENOMEM;
+
+ for (i = 0; i < ATOM_VEGA12_ODSETTING_COUNT; i++)
+- table[i] = pptable_array[i];
++ table[i] = le32_to_cpu(pptable_array[i]);
+
+ *pptable_info_array = table;
+
+@@ -248,11 +248,13 @@ static int init_powerplay_table_information(
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+
+- if (powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_GFXCLKFMAX] > VEGA12_ENGINECLOCK_HARDMAX)
++ if (le32_to_cpu(powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_GFXCLKFMAX]) > VEGA12_ENGINECLOCK_HARDMAX)
+ hwmgr->platform_descriptor.overdriveLimit.engineClock = VEGA12_ENGINECLOCK_HARDMAX;
+ else
+- hwmgr->platform_descriptor.overdriveLimit.engineClock = powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_GFXCLKFMAX];
+- hwmgr->platform_descriptor.overdriveLimit.memoryClock = powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_UCLKFMAX];
++ hwmgr->platform_descriptor.overdriveLimit.engineClock =
++ le32_to_cpu(powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_GFXCLKFMAX]);
++ hwmgr->platform_descriptor.overdriveLimit.memoryClock =
++ le32_to_cpu(powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_UCLKFMAX]);
+
+ copy_overdrive_settings_limits_array(hwmgr, &pptable_information->od_settings_max, powerplay_table->ODSettingsMax);
+ copy_overdrive_settings_limits_array(hwmgr, &pptable_information->od_settings_min, powerplay_table->ODSettingsMin);
+@@ -265,15 +267,15 @@ static int init_powerplay_table_information(
+ && hwmgr->platform_descriptor.overdriveLimit.memoryClock > 0)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ACOverdriveSupport);
+
+- pptable_information->us_small_power_limit1 = powerplay_table->usSmallPowerLimit1;
+- pptable_information->us_small_power_limit2 = powerplay_table->usSmallPowerLimit2;
+- pptable_information->us_boost_power_limit = powerplay_table->usBoostPowerLimit;
+- pptable_information->us_od_turbo_power_limit = powerplay_table->usODTurboPowerLimit;
+- pptable_information->us_od_powersave_power_limit = powerplay_table->usODPowerSavePowerLimit;
++ pptable_information->us_small_power_limit1 = le16_to_cpu(powerplay_table->usSmallPowerLimit1);
++ pptable_information->us_small_power_limit2 = le16_to_cpu(powerplay_table->usSmallPowerLimit2);
++ pptable_information->us_boost_power_limit = le16_to_cpu(powerplay_table->usBoostPowerLimit);
++ pptable_information->us_od_turbo_power_limit = le16_to_cpu(powerplay_table->usODTurboPowerLimit);
++ pptable_information->us_od_powersave_power_limit = le16_to_cpu(powerplay_table->usODPowerSavePowerLimit);
+
+- pptable_information->us_software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp;
++ pptable_information->us_software_shutdown_temp = le16_to_cpu(powerplay_table->usSoftwareShutdownTemp);
+
+- hwmgr->platform_descriptor.TDPODLimit = (uint16_t)powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_POWERPERCENTAGE];
++ hwmgr->platform_descriptor.TDPODLimit = le32_to_cpu(powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_POWERPERCENTAGE]);
+
+ disable_power_control = 0;
+ if (!disable_power_control) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5543-drm-amdgpu-powerplay-endian-fixes-for-vega20_process.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5543-drm-amdgpu-powerplay-endian-fixes-for-vega20_process.patch
new file mode 100644
index 00000000..505fb1ff
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5543-drm-amdgpu-powerplay-endian-fixes-for-vega20_process.patch
@@ -0,0 +1,106 @@
+From 3df3f9de37fd855b84d76c5958ea02ea96291994 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 9 Oct 2018 15:50:38 -0500
+Subject: [PATCH 5543/5725] drm/amdgpu/powerplay: endian fixes for
+ vega20_processpptables.c
+
+Properly swap data from vbios.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../amd/powerplay/hwmgr/vega20_processpptables.c | 41 +++++++++++++---------
+ 1 file changed, 25 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+index c9b93e6..956aa6a 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+@@ -676,7 +676,7 @@ static int copy_clock_limits_array(
+ return -ENOMEM;
+
+ for (i = 0; i < power_saving_clock_count; i++)
+- table[i] = pptable_array[i];
++ table[i] = le32_to_cpu(pptable_array[i]);
+
+ *pptable_info_array = table;
+
+@@ -698,7 +698,7 @@ static int copy_overdrive_settings_limits_array(
+ return -ENOMEM;
+
+ for (i = 0; i < od_setting_count; i++)
+- table[i] = pptable_array[i];
++ table[i] = le32_to_cpu(pptable_array[i]);
+
+ *pptable_info_array = table;
+
+@@ -721,7 +721,7 @@ static int copy_overdrive_feature_capabilities_array(
+ return -ENOMEM;
+
+ for (i = 0; i < od_feature_count; i++) {
+- table[i] = pptable_array[i];
++ table[i] = le32_to_cpu(pptable_array[i]);
+ if (table[i])
+ od_supported = true;
+ }
+@@ -844,10 +844,16 @@ static int init_powerplay_table_information(
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+
+ if (powerplay_table->OverDrive8Table.ucODTableRevision == 1) {
+- od_feature_count = (powerplay_table->OverDrive8Table.ODFeatureCount > ATOM_VEGA20_ODFEATURE_COUNT) ?
+- ATOM_VEGA20_ODFEATURE_COUNT : powerplay_table->OverDrive8Table.ODFeatureCount;
+- od_setting_count = (powerplay_table->OverDrive8Table.ODSettingCount > ATOM_VEGA20_ODSETTING_COUNT) ?
+- ATOM_VEGA20_ODSETTING_COUNT : powerplay_table->OverDrive8Table.ODSettingCount;
++ od_feature_count =
++ (le32_to_cpu(powerplay_table->OverDrive8Table.ODFeatureCount) >
++ ATOM_VEGA20_ODFEATURE_COUNT) ?
++ ATOM_VEGA20_ODFEATURE_COUNT :
++ le32_to_cpu(powerplay_table->OverDrive8Table.ODFeatureCount);
++ od_setting_count =
++ (le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingCount) >
++ ATOM_VEGA20_ODSETTING_COUNT) ?
++ ATOM_VEGA20_ODSETTING_COUNT :
++ le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingCount);
+
+ copy_overdrive_feature_capabilities_array(hwmgr,
+ &pptable_information->od_feature_capabilities,
+@@ -863,15 +869,15 @@ static int init_powerplay_table_information(
+ od_setting_count);
+ }
+
+- pptable_information->us_small_power_limit1 = powerplay_table->usSmallPowerLimit1;
+- pptable_information->us_small_power_limit2 = powerplay_table->usSmallPowerLimit2;
+- pptable_information->us_boost_power_limit = powerplay_table->usBoostPowerLimit;
+- pptable_information->us_od_turbo_power_limit = powerplay_table->usODTurboPowerLimit;
+- pptable_information->us_od_powersave_power_limit = powerplay_table->usODPowerSavePowerLimit;
++ pptable_information->us_small_power_limit1 = le16_to_cpu(powerplay_table->usSmallPowerLimit1);
++ pptable_information->us_small_power_limit2 = le16_to_cpu(powerplay_table->usSmallPowerLimit2);
++ pptable_information->us_boost_power_limit = le16_to_cpu(powerplay_table->usBoostPowerLimit);
++ pptable_information->us_od_turbo_power_limit = le16_to_cpu(powerplay_table->usODTurboPowerLimit);
++ pptable_information->us_od_powersave_power_limit = le16_to_cpu(powerplay_table->usODPowerSavePowerLimit);
+
+- pptable_information->us_software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp;
++ pptable_information->us_software_shutdown_temp = le16_to_cpu(powerplay_table->usSoftwareShutdownTemp);
+
+- hwmgr->platform_descriptor.TDPODLimit = (uint16_t)powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE];
++ hwmgr->platform_descriptor.TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]);
+
+ disable_power_control = 0;
+ if (!disable_power_control && hwmgr->platform_descriptor.TDPODLimit)
+@@ -879,8 +885,11 @@ static int init_powerplay_table_information(
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerControl);
+
+ if (powerplay_table->PowerSavingClockTable.ucTableRevision == 1) {
+- power_saving_clock_count = (powerplay_table->PowerSavingClockTable.PowerSavingClockCount >= ATOM_VEGA20_PPCLOCK_COUNT) ?
+- ATOM_VEGA20_PPCLOCK_COUNT : powerplay_table->PowerSavingClockTable.PowerSavingClockCount;
++ power_saving_clock_count =
++ (le32_to_cpu(powerplay_table->PowerSavingClockTable.PowerSavingClockCount) >=
++ ATOM_VEGA20_PPCLOCK_COUNT) ?
++ ATOM_VEGA20_PPCLOCK_COUNT :
++ le32_to_cpu(powerplay_table->PowerSavingClockTable.PowerSavingClockCount);
+ copy_clock_limits_array(hwmgr,
+ &pptable_information->power_saving_clock_max,
+ powerplay_table->PowerSavingClockTable.PowerSavingClockMax,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5544-drm-amdgpu-powerplay-factor-out-some-pptable-helpers.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5544-drm-amdgpu-powerplay-factor-out-some-pptable-helpers.patch
new file mode 100644
index 00000000..b3cbf5ef
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5544-drm-amdgpu-powerplay-factor-out-some-pptable-helpers.patch
@@ -0,0 +1,265 @@
+From 787c5a404019f291420d8dfe62ae5753c8d71b32 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 9 Oct 2018 16:03:53 -0500
+Subject: [PATCH 5544/5725] drm/amdgpu/powerplay: factor out some pptable
+ helpers
+
+Move copy_array helpers to smu_helper.c and share between
+vega12 and vega20.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c | 44 ++++++++++++++++
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h | 12 +++++
+ .../amd/powerplay/hwmgr/vega12_processpptables.c | 58 ++++------------------
+ .../amd/powerplay/hwmgr/vega20_processpptables.c | 52 ++-----------------
+ 4 files changed, 70 insertions(+), 96 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+index 8ad4e696..4714b5b 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+@@ -39,6 +39,50 @@ uint16_t convert_to_vddc(uint8_t vid)
+ return (uint16_t) ((6200 - (vid * 25)) / VOLTAGE_SCALE);
+ }
+
++int phm_copy_clock_limits_array(
++ struct pp_hwmgr *hwmgr,
++ uint32_t **pptable_info_array,
++ const uint32_t *pptable_array,
++ uint32_t power_saving_clock_count)
++{
++ uint32_t array_size, i;
++ uint32_t *table;
++
++ array_size = sizeof(uint32_t) * power_saving_clock_count;
++ table = kzalloc(array_size, GFP_KERNEL);
++ if (NULL == table)
++ return -ENOMEM;
++
++ for (i = 0; i < power_saving_clock_count; i++)
++ table[i] = le32_to_cpu(pptable_array[i]);
++
++ *pptable_info_array = table;
++
++ return 0;
++}
++
++int phm_copy_overdrive_settings_limits_array(
++ struct pp_hwmgr *hwmgr,
++ uint32_t **pptable_info_array,
++ const uint32_t *pptable_array,
++ uint32_t od_setting_count)
++{
++ uint32_t array_size, i;
++ uint32_t *table;
++
++ array_size = sizeof(uint32_t) * od_setting_count;
++ table = kzalloc(array_size, GFP_KERNEL);
++ if (NULL == table)
++ return -ENOMEM;
++
++ for (i = 0; i < od_setting_count; i++)
++ table[i] = le32_to_cpu(pptable_array[i]);
++
++ *pptable_info_array = table;
++
++ return 0;
++}
++
+ uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size)
+ {
+ u32 mask = 0;
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
+index 5454289..ad33983 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
+@@ -47,6 +47,18 @@ struct watermarks {
+ uint32_t padding[7];
+ };
+
++int phm_copy_clock_limits_array(
++ struct pp_hwmgr *hwmgr,
++ uint32_t **pptable_info_array,
++ const uint32_t *pptable_array,
++ uint32_t power_saving_clock_count);
++
++int phm_copy_overdrive_settings_limits_array(
++ struct pp_hwmgr *hwmgr,
++ uint32_t **pptable_info_array,
++ const uint32_t *pptable_array,
++ uint32_t od_setting_count);
++
+ extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
+ uint32_t index,
+ uint32_t value, uint32_t mask);
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+index e330058..a4ab78e 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+@@ -99,50 +99,6 @@ static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps)
+ return 0;
+ }
+
+-static int copy_clock_limits_array(
+- struct pp_hwmgr *hwmgr,
+- uint32_t **pptable_info_array,
+- const uint32_t *pptable_array)
+-{
+- uint32_t array_size, i;
+- uint32_t *table;
+-
+- array_size = sizeof(uint32_t) * ATOM_VEGA12_PPCLOCK_COUNT;
+-
+- table = kzalloc(array_size, GFP_KERNEL);
+- if (NULL == table)
+- return -ENOMEM;
+-
+- for (i = 0; i < ATOM_VEGA12_PPCLOCK_COUNT; i++)
+- table[i] = le32_to_cpu(pptable_array[i]);
+-
+- *pptable_info_array = table;
+-
+- return 0;
+-}
+-
+-static int copy_overdrive_settings_limits_array(
+- struct pp_hwmgr *hwmgr,
+- uint32_t **pptable_info_array,
+- const uint32_t *pptable_array)
+-{
+- uint32_t array_size, i;
+- uint32_t *table;
+-
+- array_size = sizeof(uint32_t) * ATOM_VEGA12_ODSETTING_COUNT;
+-
+- table = kzalloc(array_size, GFP_KERNEL);
+- if (NULL == table)
+- return -ENOMEM;
+-
+- for (i = 0; i < ATOM_VEGA12_ODSETTING_COUNT; i++)
+- table[i] = le32_to_cpu(pptable_array[i]);
+-
+- *pptable_info_array = table;
+-
+- return 0;
+-}
+-
+ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable)
+ {
+ struct pp_atomfwctrl_smc_dpm_parameters smc_dpm_table;
+@@ -256,8 +212,14 @@ static int init_powerplay_table_information(
+ hwmgr->platform_descriptor.overdriveLimit.memoryClock =
+ le32_to_cpu(powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_UCLKFMAX]);
+
+- copy_overdrive_settings_limits_array(hwmgr, &pptable_information->od_settings_max, powerplay_table->ODSettingsMax);
+- copy_overdrive_settings_limits_array(hwmgr, &pptable_information->od_settings_min, powerplay_table->ODSettingsMin);
++ phm_copy_overdrive_settings_limits_array(hwmgr,
++ &pptable_information->od_settings_max,
++ powerplay_table->ODSettingsMax,
++ ATOM_VEGA12_ODSETTING_COUNT);
++ phm_copy_overdrive_settings_limits_array(hwmgr,
++ &pptable_information->od_settings_min,
++ powerplay_table->ODSettingsMin,
++ ATOM_VEGA12_ODSETTING_COUNT);
+
+ /* hwmgr->platformDescriptor.minOverdriveVDDC = 0;
+ hwmgr->platformDescriptor.maxOverdriveVDDC = 0;
+@@ -285,8 +247,8 @@ static int init_powerplay_table_information(
+ PHM_PlatformCaps_PowerControl);
+ }
+
+- copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_max, powerplay_table->PowerSavingClockMax);
+- copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_min, powerplay_table->PowerSavingClockMin);
++ phm_copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_max, powerplay_table->PowerSavingClockMax, ATOM_VEGA12_PPCLOCK_COUNT);
++ phm_copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_min, powerplay_table->PowerSavingClockMin, ATOM_VEGA12_PPCLOCK_COUNT);
+
+ pptable_information->smc_pptable = (PPTable_t *)kmalloc(sizeof(PPTable_t), GFP_KERNEL);
+ if (pptable_information->smc_pptable == NULL)
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+index 956aa6a..32fe384 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+@@ -661,50 +661,6 @@ static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps)
+ return 0;
+ }
+
+-static int copy_clock_limits_array(
+- struct pp_hwmgr *hwmgr,
+- uint32_t **pptable_info_array,
+- const uint32_t *pptable_array,
+- uint32_t power_saving_clock_count)
+-{
+- uint32_t array_size, i;
+- uint32_t *table;
+-
+- array_size = sizeof(uint32_t) * power_saving_clock_count;
+- table = kzalloc(array_size, GFP_KERNEL);
+- if (NULL == table)
+- return -ENOMEM;
+-
+- for (i = 0; i < power_saving_clock_count; i++)
+- table[i] = le32_to_cpu(pptable_array[i]);
+-
+- *pptable_info_array = table;
+-
+- return 0;
+-}
+-
+-static int copy_overdrive_settings_limits_array(
+- struct pp_hwmgr *hwmgr,
+- uint32_t **pptable_info_array,
+- const uint32_t *pptable_array,
+- uint32_t od_setting_count)
+-{
+- uint32_t array_size, i;
+- uint32_t *table;
+-
+- array_size = sizeof(uint32_t) * od_setting_count;
+- table = kzalloc(array_size, GFP_KERNEL);
+- if (NULL == table)
+- return -ENOMEM;
+-
+- for (i = 0; i < od_setting_count; i++)
+- table[i] = le32_to_cpu(pptable_array[i]);
+-
+- *pptable_info_array = table;
+-
+- return 0;
+-}
+-
+ static int copy_overdrive_feature_capabilities_array(
+ struct pp_hwmgr *hwmgr,
+ uint8_t **pptable_info_array,
+@@ -859,11 +815,11 @@ static int init_powerplay_table_information(
+ &pptable_information->od_feature_capabilities,
+ powerplay_table->OverDrive8Table.ODFeatureCapabilities,
+ od_feature_count);
+- copy_overdrive_settings_limits_array(hwmgr,
++ phm_copy_overdrive_settings_limits_array(hwmgr,
+ &pptable_information->od_settings_max,
+ powerplay_table->OverDrive8Table.ODSettingsMax,
+ od_setting_count);
+- copy_overdrive_settings_limits_array(hwmgr,
++ phm_copy_overdrive_settings_limits_array(hwmgr,
+ &pptable_information->od_settings_min,
+ powerplay_table->OverDrive8Table.ODSettingsMin,
+ od_setting_count);
+@@ -890,11 +846,11 @@ static int init_powerplay_table_information(
+ ATOM_VEGA20_PPCLOCK_COUNT) ?
+ ATOM_VEGA20_PPCLOCK_COUNT :
+ le32_to_cpu(powerplay_table->PowerSavingClockTable.PowerSavingClockCount);
+- copy_clock_limits_array(hwmgr,
++ phm_copy_clock_limits_array(hwmgr,
+ &pptable_information->power_saving_clock_max,
+ powerplay_table->PowerSavingClockTable.PowerSavingClockMax,
+ power_saving_clock_count);
+- copy_clock_limits_array(hwmgr,
++ phm_copy_clock_limits_array(hwmgr,
+ &pptable_information->power_saving_clock_min,
+ powerplay_table->PowerSavingClockTable.PowerSavingClockMin,
+ power_saving_clock_count);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5545-drm-amdgpu-Suppress-keypresses-from-ACPI_VIDEO-event.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5545-drm-amdgpu-Suppress-keypresses-from-ACPI_VIDEO-event.patch
new file mode 100644
index 00000000..39bad758
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5545-drm-amdgpu-Suppress-keypresses-from-ACPI_VIDEO-event.patch
@@ -0,0 +1,71 @@
+From 3eea51edb250242164611dfe50154dc10c810998 Mon Sep 17 00:00:00 2001
+From: Lyude Paul <lyude@redhat.com>
+Date: Fri, 21 Sep 2018 20:43:44 -0400
+Subject: [PATCH 5545/5725] drm/amdgpu: Suppress keypresses from ACPI_VIDEO
+ events
+
+Currently we return NOTIFY_DONE for any event which we don't think is
+ours. However, many laptops will send more then just an ATIF event and
+will also send an ACPI_VIDEO_NOTIFY_PROBE event as well. Since we don't
+check for this, we return NOTIFY_DONE which causes a keypress for the
+ACPI event to be propogated to userspace. This is the equivalent of
+someone pressing the display key on a laptop every time there's a
+hotplug event.
+
+So, check for ACPI_VIDEO_NOTIFY_PROBE events and suppress keypresses
+from them.
+
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | 17 ++++++++++++-----
+ 1 file changed, 12 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+index 6488e90..7f0afc5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+@@ -359,7 +359,9 @@ static int amdgpu_atif_get_sbios_requests(struct amdgpu_atif *atif,
+ *
+ * Checks the acpi event and if it matches an atif event,
+ * handles it.
+- * Returns NOTIFY code
++ *
++ * Returns:
++ * NOTIFY_BAD or NOTIFY_DONE, depending on the event.
+ */
+ static int amdgpu_atif_handler(struct amdgpu_device *adev,
+ struct acpi_bus_event *event)
+@@ -373,11 +375,16 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
+ if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
+ return NOTIFY_DONE;
+
++ /* Is this actually our event? */
+ if (!atif ||
+ !atif->notification_cfg.enabled ||
+- event->type != atif->notification_cfg.command_code)
+- /* Not our event */
+- return NOTIFY_DONE;
++ event->type != atif->notification_cfg.command_code) {
++ /* These events will generate keypresses otherwise */
++ if (event->type == ACPI_VIDEO_NOTIFY_PROBE)
++ return NOTIFY_BAD;
++ else
++ return NOTIFY_DONE;
++ }
+
+ if (atif->functions.sbios_requests) {
+ struct atif_sbios_requests req;
+@@ -386,7 +393,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
+ count = amdgpu_atif_get_sbios_requests(atif, &req);
+
+ if (count <= 0)
+- return NOTIFY_DONE;
++ return NOTIFY_BAD;
+
+ DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5546-drm-amdgpu-powerplay-fix-missing-break-in-switch-sta.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5546-drm-amdgpu-powerplay-fix-missing-break-in-switch-sta.patch
new file mode 100644
index 00000000..cf9d3a70
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5546-drm-amdgpu-powerplay-fix-missing-break-in-switch-sta.patch
@@ -0,0 +1,122 @@
+From 52ef6795eeeaef4314fdc5c0dc9034cc6598d7d9 Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.king@canonical.com>
+Date: Mon, 8 Oct 2018 17:22:28 +0100
+Subject: [PATCH 5546/5725] drm/amdgpu/powerplay: fix missing break in switch
+ statements
+
+There are several switch statements that are missing break statements.
+Add missing breaks to handle any fall-throughs corner cases.
+
+Detected by CoverityScan, CID#1457175 ("Missing break in switch")
+
+Fixes: 18aafc59b106 ("drm/amd/powerplay: implement fw related smu interface for iceland.")
+Acked-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c | 2 ++
+ drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c | 2 ++
+ drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c | 2 ++
+ drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c | 2 ++
+ drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c | 2 ++
+ 5 files changed, 10 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+index 18643e0..669bd0c 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+@@ -2269,11 +2269,13 @@ static uint32_t ci_get_offsetof(uint32_t type, uint32_t member)
+ case DRAM_LOG_BUFF_SIZE:
+ return offsetof(SMU7_SoftRegisters, DRAM_LOG_BUFF_SIZE);
+ }
++ break;
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT);
+ }
++ break;
+ }
+ pr_debug("can't get the offset of type %x member %x\n", type, member);
+ return 0;
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+index b6b62a7..bc8375c 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+@@ -2321,6 +2321,7 @@ static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
+ case DRAM_LOG_BUFF_SIZE:
+ return offsetof(SMU73_SoftRegisters, DRAM_LOG_BUFF_SIZE);
+ }
++ break;
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case UvdBootLevel:
+@@ -2330,6 +2331,7 @@ static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
++ break;
+ }
+ pr_warn("can't get the offset of type %x member %x\n", type, member);
+ return 0;
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+index 374aa4a..375ccf6 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+@@ -2236,11 +2236,13 @@ static uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
+ case DRAM_LOG_BUFF_SIZE:
+ return offsetof(SMU71_SoftRegisters, DRAM_LOG_BUFF_SIZE);
+ }
++ break;
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
++ break;
+ }
+ pr_warn("can't get the offset of type %x member %x\n", type, member);
+ return 0;
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+index 1f366c0..3ed6c5f 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+@@ -2628,6 +2628,7 @@ static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
+ case DRAM_LOG_BUFF_SIZE:
+ return offsetof(SMU72_SoftRegisters, DRAM_LOG_BUFF_SIZE);
+ }
++ break;
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case UvdBootLevel:
+@@ -2637,6 +2638,7 @@ static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
++ break;
+ }
+ pr_warn("can't get the offset of type %x member %x\n", type, member);
+ return 0;
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+index 3d415fa..9f71512 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+@@ -2185,6 +2185,7 @@ static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member)
+ case DRAM_LOG_BUFF_SIZE:
+ return offsetof(SMU75_SoftRegisters, DRAM_LOG_BUFF_SIZE);
+ }
++ break;
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case UvdBootLevel:
+@@ -2194,6 +2195,7 @@ static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member)
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU75_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
++ break;
+ }
+ pr_warn("can't get the offset of type %x member %x\n", type, member);
+ return 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5547-drm-amdgpu-remove-set-but-not-used-variable-ring-in-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5547-drm-amdgpu-remove-set-but-not-used-variable-ring-in-.patch
new file mode 100644
index 00000000..d1109fbc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5547-drm-amdgpu-remove-set-but-not-used-variable-ring-in-.patch
@@ -0,0 +1,37 @@
+From 9ef2f2d94bda0f783eb142a2b55b4715af83a570 Mon Sep 17 00:00:00 2001
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Sat, 29 Sep 2018 11:39:14 +0000
+Subject: [PATCH 5547/5725] drm/amdgpu: remove set but not used variable 'ring'
+ in psp_v11_0_ring_stop
+
+Fixes gcc '-Wunused-but-set-variable' warning:
+
+drivers/gpu/drm/amd/amdgpu/psp_v11_0.c: In function 'psp_v11_0_ring_stop':
+drivers/gpu/drm/amd/amdgpu/psp_v11_0.c:309:19: warning:
+ variable 'ring' set but not used [-Wunused-but-set-variable]
+
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+index 9217af0..3f3fac2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+@@ -306,11 +306,8 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+ {
+ int ret = 0;
+- struct psp_ring *ring;
+ struct amdgpu_device *adev = psp->adev;
+
+- ring = &psp->km_ring;
+-
+ /* Write the ring destroy command to C2PMSG_64 */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_DESTROY_RINGS);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5548-drm-amdgpu-remove-set-but-not-used-variable-header.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5548-drm-amdgpu-remove-set-but-not-used-variable-header.patch
new file mode 100644
index 00000000..4fc19059
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5548-drm-amdgpu-remove-set-but-not-used-variable-header.patch
@@ -0,0 +1,41 @@
+From ece9ca999f04d15ee05d2985f7cfcdc5a30126f9 Mon Sep 17 00:00:00 2001
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Wed, 26 Sep 2018 14:15:34 +0000
+Subject: [PATCH 5548/5725] drm/amdgpu: remove set but not used variable
+ 'header'
+
+Fixes gcc '-Wunused-but-set-variable' warning:
+
+drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c: In function 'amdgpu_ucode_init_bo':
+drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c:431:39: warning:
+ variable 'header' set but not used [-Wunused-but-set-variable]
+
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+index 29b8b82..67fc281 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+@@ -451,7 +451,6 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
+ uint64_t fw_offset = 0;
+ int i;
+ struct amdgpu_firmware_info *ucode = NULL;
+- const struct common_firmware_header *header = NULL;
+
+ /* for baremetal, the ucode is allocated in gtt, so don't need to fill the bo when reset/suspend */
+ if (!amdgpu_sriov_vf(adev) && (adev->in_gpu_reset || adev->in_suspend))
+@@ -472,7 +471,6 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
+ for (i = 0; i < adev->firmware.max_ucodes; i++) {
+ ucode = &adev->firmware.ucode[i];
+ if (ucode->fw) {
+- header = (const struct common_firmware_header *)ucode->fw->data;
+ amdgpu_ucode_init_single_fw(adev, ucode, adev->firmware.fw_buf_mc + fw_offset,
+ adev->firmware.fw_buf_ptr + fw_offset);
+ if (i == AMDGPU_UCODE_ID_CP_MEC1 &&
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5549-drm-amd-powerplay-translate-power_profile-mode-to-pp.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5549-drm-amd-powerplay-translate-power_profile-mode-to-pp.patch
new file mode 100644
index 00000000..2c262aeb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5549-drm-amd-powerplay-translate-power_profile-mode-to-pp.patch
@@ -0,0 +1,97 @@
+From fe06020354ac862136b95365cb4aa9a5a7233eb4 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Wed, 10 Oct 2018 15:00:28 +0800
+Subject: [PATCH 5549/5725] drm/amd/powerplay: translate power_profile mode to
+ pplib workload type
+
+Correctly translate the power profile specified by user to workload
+type accepted by SMU fw.
+
+Change-Id: I4de525d6a84a80c2fcfc1a6de2a465a7a07868a4
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 42 ++++++++++++++++++++--
+ 1 file changed, 39 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index 958af7b..b4dbbb7 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -3175,6 +3175,34 @@ static int vega20_power_off_asic(struct pp_hwmgr *hwmgr)
+ return result;
+ }
+
++static int conv_power_profile_to_pplib_workload(int power_profile)
++{
++ int pplib_workload = 0;
++
++ switch (power_profile) {
++ case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
++ pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
++ break;
++ case PP_SMC_POWER_PROFILE_POWERSAVING:
++ pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT;
++ break;
++ case PP_SMC_POWER_PROFILE_VIDEO:
++ pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
++ break;
++ case PP_SMC_POWER_PROFILE_VR:
++ pplib_workload = WORKLOAD_PPLIB_VR_BIT;
++ break;
++ case PP_SMC_POWER_PROFILE_COMPUTE:
++ pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
++ break;
++ case PP_SMC_POWER_PROFILE_CUSTOM:
++ pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT;
++ break;
++ }
++
++ return pplib_workload;
++}
++
+ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
+ {
+ DpmActivityMonitorCoeffInt_t activity_monitor;
+@@ -3210,7 +3238,7 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
+
+ for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+- workload_type = i + 1;
++ workload_type = conv_power_profile_to_pplib_workload(i);
+ result = vega20_get_activity_monitor_coeff(hwmgr,
+ (uint8_t *)(&activity_monitor), workload_type);
+ PP_ASSERT_WITH_CODE(!result,
+@@ -3283,10 +3311,15 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
+ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
+ {
+ DpmActivityMonitorCoeffInt_t activity_monitor;
+- int result = 0;
++ int workload_type, result = 0;
+
+ hwmgr->power_profile_mode = input[size];
+
++ if (hwmgr->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
++ pr_err("Invalid power profile mode %d\n", hwmgr->power_profile_mode);
++ return -EINVAL;
++ }
++
+ if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
+ if (size < 10)
+ return -EINVAL;
+@@ -3353,8 +3386,11 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
+ return result);
+ }
+
++ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
++ workload_type =
++ conv_power_profile_to_pplib_workload(hwmgr->power_profile_mode);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
+- 1 << hwmgr->power_profile_mode);
++ 1 << workload_type);
+
+ return 0;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5550-drm-amd-powerplay-hint-when-power-profile-setting-is.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5550-drm-amd-powerplay-hint-when-power-profile-setting-is.patch
new file mode 100644
index 00000000..69b9b33b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5550-drm-amd-powerplay-hint-when-power-profile-setting-is.patch
@@ -0,0 +1,39 @@
+From be628927eb7def6162fbc201756acecfbe269c74 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Wed, 10 Oct 2018 15:24:59 +0800
+Subject: [PATCH 5550/5725] drm/amd/powerplay: hint when power profile setting
+ is not supported
+
+Give user some hints when the power profile setting is not supported.
+
+Change-Id: Iba2b938d02a039ccdee32f9aca185f79fd818796
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index f2cb6e1..d6f2e05 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -904,9 +904,14 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
+ pr_info("%s was not implemented.\n", __func__);
+ return ret;
+ }
++
++ if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
++ pr_info("power profile setting is for manual dpm mode only.\n");
++ return ret;
++ }
++
+ mutex_lock(&hwmgr->smu_lock);
+- if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
+- ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
++ ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
+ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5551-drm-amdgpu-Set-the-default-value-about-gds-vmid0-siz.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5551-drm-amdgpu-Set-the-default-value-about-gds-vmid0-siz.patch
new file mode 100644
index 00000000..01c65730
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5551-drm-amdgpu-Set-the-default-value-about-gds-vmid0-siz.patch
@@ -0,0 +1,51 @@
+From 0e43ca7630a631e18fddc5c1b9a9a1be079b48f7 Mon Sep 17 00:00:00 2001
+From: Emily Deng <Emily.Deng@amd.com>
+Date: Fri, 12 Oct 2018 18:14:32 +0800
+Subject: [PATCH 5551/5725] drm/amdgpu: Set the default value about gds vmid0
+ size
+
+For sriov, when first run windows guest, then run linux guest, the gds
+vmid0 size will be reset to 0 by windows guest. So if the value has been
+reset to 0, then set the value to the default value in linux guest.
+
+v2:
+Fixed value instead of reading mmGDS_VMID0_SIZE.
+
+v3:
+Set the default value of the switch.
+
+Signed-off-by: Emily Deng <Emily.Deng@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 178b375..60b1e11 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -4919,7 +4919,20 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
+ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
+ {
+ /* init asci gds info */
+- adev->gds.mem.total_size = RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
++ switch (adev->asic_type) {
++ case CHIP_VEGA10:
++ case CHIP_VEGA12:
++ case CHIP_VEGA20:
++ adev->gds.mem.total_size = 0x10000;
++ break;
++ case CHIP_RAVEN:
++ adev->gds.mem.total_size = 0x1000;
++ break;
++ default:
++ adev->gds.mem.total_size = 0x10000;
++ break;
++ }
++
+ adev->gds.gws.total_size = 64;
+ adev->gds.oa.total_size = 16;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5552-drm-amdgpu-vcn-Add-new-register-offset-mask-for-VCN.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5552-drm-amdgpu-vcn-Add-new-register-offset-mask-for-VCN.patch
new file mode 100644
index 00000000..5a8e8d1d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5552-drm-amdgpu-vcn-Add-new-register-offset-mask-for-VCN.patch
@@ -0,0 +1,101 @@
+From 75272cc31f41c912385622cfc5fcf1b9765252fb Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Tue, 2 Oct 2018 14:38:18 -0400
+Subject: [PATCH 5552/5725] drm/amdgpu/vcn:Add new register offset/mask for VCN
+
+Add new register offset/mask for VCN to support
+latest VCN implementation.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Acked-by: Leo Liu <leo.liu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h | 14 ++++++++++++++
+ .../gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h | 18 ++++++++++++++++++
+ 2 files changed, 32 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
+index 4b7da58..442ca7c 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
+@@ -82,6 +82,18 @@
+ #define mmUVD_LCM_CGC_CNTRL 0x0123
+ #define mmUVD_LCM_CGC_CNTRL_BASE_IDX 1
+
++#define mmUVD_MIF_CURR_UV_ADDR_CONFIG 0x0184
++#define mmUVD_MIF_CURR_UV_ADDR_CONFIG_BASE_IDX 1
++#define mmUVD_MIF_REF_UV_ADDR_CONFIG 0x0185
++#define mmUVD_MIF_REF_UV_ADDR_CONFIG_BASE_IDX 1
++#define mmUVD_MIF_RECON1_UV_ADDR_CONFIG 0x0186
++#define mmUVD_MIF_RECON1_UV_ADDR_CONFIG_BASE_IDX 1
++#define mmUVD_MIF_CURR_ADDR_CONFIG 0x0192
++#define mmUVD_MIF_CURR_ADDR_CONFIG_BASE_IDX 1
++#define mmUVD_MIF_REF_ADDR_CONFIG 0x0193
++#define mmUVD_MIF_REF_ADDR_CONFIG_BASE_IDX 1
++#define mmUVD_MIF_RECON1_ADDR_CONFIG 0x01c5
++#define mmUVD_MIF_RECON1_ADDR_CONFIG_BASE_IDX 1
+
+ // addressBlock: uvd_uvdnpdec
+ // base address: 0x20000
+@@ -327,6 +339,8 @@
+ #define mmUVD_LMI_VM_CTRL_BASE_IDX 1
+ #define mmUVD_LMI_SWAP_CNTL 0x056d
+ #define mmUVD_LMI_SWAP_CNTL_BASE_IDX 1
++#define mmUVD_MPC_CNTL 0x0577
++#define mmUVD_MPC_CNTL_BASE_IDX 1
+ #define mmUVD_MPC_SET_MUXA0 0x0579
+ #define mmUVD_MPC_SET_MUXA0_BASE_IDX 1
+ #define mmUVD_MPC_SET_MUXA1 0x057a
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h
+index 26382f5..63457f9 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h
+@@ -985,6 +985,7 @@
+ #define UVD_LMI_CTRL2__STALL_ARB_UMC__SHIFT 0x8
+ #define UVD_LMI_CTRL2__MC_READ_ID_SEL__SHIFT 0x9
+ #define UVD_LMI_CTRL2__MC_WRITE_ID_SEL__SHIFT 0xb
++#define UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT 0x11
+ #define UVD_LMI_CTRL2__SPH_DIS_MASK 0x00000001L
+ #define UVD_LMI_CTRL2__STALL_ARB_MASK 0x00000002L
+ #define UVD_LMI_CTRL2__ASSERT_UMC_URGENT_MASK 0x00000004L
+@@ -993,6 +994,7 @@
+ #define UVD_LMI_CTRL2__STALL_ARB_UMC_MASK 0x00000100L
+ #define UVD_LMI_CTRL2__MC_READ_ID_SEL_MASK 0x00000600L
+ #define UVD_LMI_CTRL2__MC_WRITE_ID_SEL_MASK 0x00001800L
++#define UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM_MASK 0x01FE0000L
+ //UVD_MASTINT_EN
+ #define UVD_MASTINT_EN__OVERRUN_RST__SHIFT 0x0
+ #define UVD_MASTINT_EN__VCPU_EN__SHIFT 0x1
+@@ -1045,6 +1047,19 @@
+ #define UVD_LMI_CTRL__DB_IT_DATA_COHERENCY_EN_MASK 0x01000000L
+ #define UVD_LMI_CTRL__IT_IT_DATA_COHERENCY_EN_MASK 0x02000000L
+ #define UVD_LMI_CTRL__RFU_MASK 0xF8000000L
++//UVD_LMI_STATUS
++#define UVD_LMI_STATUS__READ_CLEAN__SHIFT 0x0
++#define UVD_LMI_STATUS__WRITE_CLEAN__SHIFT 0x1
++#define UVD_LMI_STATUS__WRITE_CLEAN_RAW__SHIFT 0x2
++#define UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN__SHIFT 0x3
++#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW__SHIFT 0x6
++#define UVD_LMI_STATUS__UMC_READ_CLEAN_RAW__SHIFT 0x9
++#define UVD_LMI_STATUS__READ_CLEAN_MASK 0x00000001L
++#define UVD_LMI_STATUS__WRITE_CLEAN_MASK 0x00000002L
++#define UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK 0x00000004L
++#define UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK 0x00000008L
++#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK 0x00000040L
++#define UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK 0x00000200L
+ //UVD_LMI_SWAP_CNTL
+ #define UVD_LMI_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0
+ #define UVD_LMI_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2
+@@ -1078,6 +1093,9 @@
+ #define UVD_LMI_SWAP_CNTL__RB_WR_MC_SWAP_MASK 0x0C000000L
+ #define UVD_LMI_SWAP_CNTL__RE_MC_SWAP_MASK 0x30000000L
+ #define UVD_LMI_SWAP_CNTL__MP_MC_SWAP_MASK 0xC0000000L
++//UVD_MPC_CNTL
++#define UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT 0x3
++#define UVD_MPC_CNTL__REPLACEMENT_MODE_MASK 0x00000038L
+ //UVD_MPC_SET_MUXA0
+ #define UVD_MPC_SET_MUXA0__VARA_0__SHIFT 0x0
+ #define UVD_MPC_SET_MUXA0__VARA_1__SHIFT 0x6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5553-drm-amdgpu-vcn-Update-latest-UVD_MPC-register-for-VC.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5553-drm-amdgpu-vcn-Update-latest-UVD_MPC-register-for-VC.patch
new file mode 100644
index 00000000..39f59202
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5553-drm-amdgpu-vcn-Update-latest-UVD_MPC-register-for-VC.patch
@@ -0,0 +1,89 @@
+From d4b9bec1d6b4c89cd1b220849de0dde8795aad51 Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Tue, 2 Oct 2018 14:55:46 -0400
+Subject: [PATCH 5553/5725] drm/amdgpu/vcn:Update latest UVD_MPC register for
+ VCN
+
+Update latest UVD_MPC register for VCN. Use defined
+macro to replace value for readability.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Acked-by: Leo Liu <leo.liu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 52 +++++++++++++++++++++++++++--------
+ 1 file changed, 40 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 9cf544d..6796a82 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -785,12 +785,27 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
+ #endif
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
+
+- WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0, 0x40c2040);
+- WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA1, 0x0);
+- WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0, 0x40c2040);
+- WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB1, 0x0);
+- WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_ALU, 0);
+- WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX, 0x88);
++ tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL);
++ tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
++ tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
++ WREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL, tmp);
++
++ WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0,
++ ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
++ (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
++ (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
++ (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
++
++ WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0,
++ ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
++ (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
++ (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
++ (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
++
++ WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX,
++ ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
++ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
++ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
+
+ /* take all subblocks out of reset, except VCPU */
+ WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
+@@ -981,12 +996,25 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
+ #endif
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl, 0xFFFFFFFF, 0);
+
+- WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXA0, 0x40c2040, 0xFFFFFFFF, 0);
+- WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXA1, 0x0, 0xFFFFFFFF, 0);
+- WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXB0, 0x40c2040, 0xFFFFFFFF, 0);
+- WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXB1, 0x0, 0xFFFFFFFF, 0);
+- WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_ALU, 0, 0xFFFFFFFF, 0);
+- WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUX, 0x88, 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_CNTL,
++ 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0xFFFFFFFF, 0);
++
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXA0,
++ ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
++ (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
++ (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
++ (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0xFFFFFFFF, 0);
++
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXB0,
++ ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
++ (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
++ (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
++ (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0xFFFFFFFF, 0);
++
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUX,
++ ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
++ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
++ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0xFFFFFFFF, 0);
+
+ vcn_v1_0_mc_resume_dpg_mode(adev);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5554-drm-amdgpu-vcn-Update-latest-spg-mode-stop-for-VCN.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5554-drm-amdgpu-vcn-Update-latest-spg-mode-stop-for-VCN.patch
new file mode 100644
index 00000000..e36ea39f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5554-drm-amdgpu-vcn-Update-latest-spg-mode-stop-for-VCN.patch
@@ -0,0 +1,76 @@
+From b98960548b4802edbdfd1d8ff01bb266f66c15c0 Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Wed, 3 Oct 2018 10:24:43 -0400
+Subject: [PATCH 5554/5725] drm/amdgpu/vcn:Update latest spg mode stop for VCN
+
+Update latest static power gate mode stop function for VCN
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Acked-by: Leo Liu <leo.liu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 41 ++++++++++++++++++++++-------------
+ 1 file changed, 26 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 6796a82..015b97f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -1123,28 +1123,39 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
+ */
+ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
+ {
+- /* force RBC into idle state */
+- WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, 0x11010101);
++ int ret_code, tmp;
+
+- /* Stall UMC and register bus before resetting VCPU */
+- WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
+- UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
+- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+- mdelay(1);
++ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, ret_code);
++
++ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
++ UVD_LMI_STATUS__READ_CLEAN_MASK |
++ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
++ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
++ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp, ret_code);
+
+ /* put VCPU into reset */
+- WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
+- UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+- mdelay(5);
++ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
++ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
++ ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
++
++ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
++ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
++ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp, ret_code);
+
+ /* disable VCPU clock */
+- WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, 0x0);
++ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0,
++ ~UVD_VCPU_CNTL__CLK_EN_MASK);
+
+- /* Unstall UMC and register bus */
+- WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
+- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
++ /* reset LMI UMC/LMI */
++ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
++ UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
++ ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
++
++ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
++ UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
++ ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
+
+- WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0);
++ WREG32_SOC15(UVD, 0, mmUVD_STATUS, 0);
+
+ vcn_v1_0_enable_clock_gating(adev);
+ vcn_1_0_enable_static_power_gating(adev);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5555-drm-amdgpu-vcn-Add-ring-W-R-PTR-check-for-VCN-DPG-mo.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5555-drm-amdgpu-vcn-Add-ring-W-R-PTR-check-for-VCN-DPG-mo.patch
new file mode 100644
index 00000000..2c3123bd
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5555-drm-amdgpu-vcn-Add-ring-W-R-PTR-check-for-VCN-DPG-mo.patch
@@ -0,0 +1,40 @@
+From 2878fdbb87fda3bc0734772aaf7e4769046cd0b8 Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Wed, 3 Oct 2018 17:36:58 -0400
+Subject: [PATCH 5555/5725] drm/amdgpu/vcn:Add ring W/R PTR check for VCN DPG
+ mode stop
+
+Add ring write/read pointer check for VCN dynamic power gate mode
+stop,to make sure that no job is left in ring before turn off DPG mode.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Acked-by: Leo Liu <leo.liu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 015b97f..7a226d0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -1171,6 +1171,16 @@ static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+
++ if (ret_code) {
++ int tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
++ /* wait for read ptr to be equal to write ptr */
++ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
++
++ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
++ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
++ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
++ }
++
+ /* disable dynamic power gating mode */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
+ ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5556-drm-amdgpu-vcn-Reduce-unnecessary-local-variable.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5556-drm-amdgpu-vcn-Reduce-unnecessary-local-variable.patch
new file mode 100644
index 00000000..6dfe6069
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5556-drm-amdgpu-vcn-Reduce-unnecessary-local-variable.patch
@@ -0,0 +1,58 @@
+From a6a1fb11d6b8e0a3750d69bf97d83c5be4b492f4 Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Thu, 4 Oct 2018 09:29:22 -0400
+Subject: [PATCH 5556/5725] drm/amdgpu/vcn:Reduce unnecessary local variable
+
+Reduce unnecessary local variable.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Acked-by: Leo Liu <leo.liu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 7a226d0..dd99e88 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -938,7 +938,7 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
+ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
+ {
+ struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+- uint32_t rb_bufsz, tmp, reg_data;
++ uint32_t rb_bufsz, tmp;
+ uint32_t lmi_swap_cntl;
+
+ /* disable byte swapping */
+@@ -947,19 +947,19 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
+ vcn_1_0_enable_static_power_gating(adev);
+
+ /* enable dynamic power gating mode */
+- reg_data = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
+- reg_data |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
+- reg_data |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
+- WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, reg_data);
++ tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
++ tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
++ tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
++ WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
+
+ /* enable clock gating */
+ vcn_v1_0_clock_gating_dpg_mode(adev, 0);
+
+ /* enable VCPU clock */
+- reg_data = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
+- reg_data |= UVD_VCPU_CNTL__CLK_EN_MASK;
+- reg_data |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
+- WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CNTL, reg_data, 0xFFFFFFFF, 0);
++ tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
++ tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
++ tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CNTL, tmp, 0xFFFFFFFF, 0);
+
+ /* disable interupt */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5557-drm-amdgpu-vcn-Update-DPG-mode-VCN-memory-control.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5557-drm-amdgpu-vcn-Update-DPG-mode-VCN-memory-control.patch
new file mode 100644
index 00000000..b9ec375d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5557-drm-amdgpu-vcn-Update-DPG-mode-VCN-memory-control.patch
@@ -0,0 +1,58 @@
+From b339780ba40c96eb43c52c6064be76e90970a1b9 Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Thu, 4 Oct 2018 15:10:52 -0400
+Subject: [PATCH 5557/5725] drm/amdgpu/vcn:Update DPG mode VCN memory control
+
+Update Dynamic Power Gate mode VCN memory control
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Acked-by: Leo Liu <leo.liu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 19 +++++++++++--------
+ 1 file changed, 11 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index dd99e88..e147b3e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -983,11 +983,13 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
+
+ /* initialize VCN memory controller */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL,
+- (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
++ (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__REQ_MODE_MASK |
++ UVD_LMI_CTRL__CRC_RESET_MASK |
++ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ 0x00100000L, 0xFFFFFFFF, 0);
+
+ #ifdef __BIG_ENDIAN
+@@ -1041,13 +1043,14 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
+ vcn_v1_0_clock_gating_dpg_mode(adev, 1);
+ /* setup mmUVD_LMI_CTRL */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL,
+- (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+- UVD_LMI_CTRL__CRC_RESET_MASK |
+- UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+- UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+- UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+- (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+- 0x00100000L), 0xFFFFFFFF, 1);
++ (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
++ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
++ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
++ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
++ UVD_LMI_CTRL__REQ_MODE_MASK |
++ UVD_LMI_CTRL__CRC_RESET_MASK |
++ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
++ 0x00100000L, 0xFFFFFFFF, 1);
+
+ tmp = adev->gfx.config.gb_addr_config;
+ /* setup VCN global tiling registers */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5558-drm-amdgpu-vcn-Update-DPG-mode-VCN-global-tiling-reg.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5558-drm-amdgpu-vcn-Update-DPG-mode-VCN-global-tiling-reg.patch
new file mode 100644
index 00000000..d88ac239
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5558-drm-amdgpu-vcn-Update-DPG-mode-VCN-global-tiling-reg.patch
@@ -0,0 +1,54 @@
+From 0af9e3444ceaa0a56685be421eac1f71b8b4cb0c Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Thu, 4 Oct 2018 15:42:51 -0400
+Subject: [PATCH 5558/5725] drm/amdgpu/vcn:Update DPG mode VCN global tiling
+ registers
+
+Update Dynamic Power Gate mode VCN global tiling registers
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Acked-by: Leo Liu <leo.liu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 19 +++++++++++++++----
+ 1 file changed, 15 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index e147b3e..10e0b19 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -371,16 +371,27 @@ static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE,
+ 0xFFFFFFFF, 0);
+
++ /* VCN global tiling registers */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
+- WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_ADDR_CONFIG,
+- adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
+- WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG,
+- adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG,
++ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG,
++ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG,
++ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG,
++ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG,
++ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG,
++ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG,
++ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
+ }
+
+ /**
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5559-drm-amdgpu-vcn-Add-DPG-mode-Register-XX-check.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5559-drm-amdgpu-vcn-Add-DPG-mode-Register-XX-check.patch
new file mode 100644
index 00000000..ec9bfeb6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5559-drm-amdgpu-vcn-Add-DPG-mode-Register-XX-check.patch
@@ -0,0 +1,43 @@
+From bc096e2aedaf0ced0b8371d217541c757701b74e Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Thu, 4 Oct 2018 16:02:51 -0400
+Subject: [PATCH 5559/5725] drm/amdgpu/vcn:Add DPG mode Register XX check
+
+Add Dynamic Power Gate mode Register XX check
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Acked-by: Leo Liu <leo.liu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 10e0b19..86bb57c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -37,6 +37,11 @@
+
+ #include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
+
++#define mmUVD_RBC_XX_IB_REG_CHECK 0x05ab
++#define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1
++#define mmUVD_REG_XX_MASK 0x05ac
++#define mmUVD_REG_XX_MASK_BASE_IDX 1
++
+ static int vcn_v1_0_stop(struct amdgpu_device *adev);
+ static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
+ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
+@@ -1031,6 +1036,9 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
+
+ vcn_v1_0_mc_resume_dpg_mode(adev);
+
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_REG_XX_MASK, 0x10, 0xFFFFFFFF, 0);
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, 0x3, 0xFFFFFFFF, 0);
++
+ /* take all subblocks out of reset, except VCPU */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SOFT_RESET,
+ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, 0xFFFFFFFF, 0);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5560-drm-amdgpu-vcn-Remove-DPG-mode-unused-steps-during-v.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5560-drm-amdgpu-vcn-Remove-DPG-mode-unused-steps-during-v.patch
new file mode 100644
index 00000000..39a79a8f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5560-drm-amdgpu-vcn-Remove-DPG-mode-unused-steps-during-v.patch
@@ -0,0 +1,78 @@
+From 1afaff692799e1bdc080b413fec9d9076499745b Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Thu, 4 Oct 2018 16:09:33 -0400
+Subject: [PATCH 5560/5725] drm/amdgpu/vcn:Remove DPG mode unused steps during
+ vcn start
+
+Remove Dynamic Power Gate mode unused steps during VCN start
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Acked-by: Leo Liu <leo.liu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 28 +---------------------------
+ 1 file changed, 1 insertion(+), 27 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 86bb57c..bc58658 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -981,22 +981,6 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN,
+ 0, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
+
+- /* stall UMC and register bus before resetting VCPU */
+- WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL2,
+- UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
+-
+- /* put LMI, VCPU, RBC etc... into reset */
+- WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SOFT_RESET,
+- UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
+- 0xFFFFFFFF, 0);
+-
+ /* initialize VCN memory controller */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL,
+ (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+@@ -1039,14 +1023,6 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_REG_XX_MASK, 0x10, 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, 0x3, 0xFFFFFFFF, 0);
+
+- /* take all subblocks out of reset, except VCPU */
+- WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SOFT_RESET,
+- UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, 0xFFFFFFFF, 0);
+-
+- /* enable VCPU clock */
+- WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CNTL,
+- UVD_VCPU_CNTL__CLK_EN_MASK, 0xFFFFFFFF, 0);
+-
+ /* enable UMC */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL2,
+ 0, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
+@@ -1056,8 +1032,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
+
+ /* enable master interrupt */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN,
+- (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
+- (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), 0);
++ UVD_MASTINT_EN__VCPU_EN_MASK, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
+
+ vcn_v1_0_clock_gating_dpg_mode(adev, 1);
+ /* setup mmUVD_LMI_CTRL */
+@@ -1085,7 +1060,6 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
+ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5561-drm-amdgpu-vcn-Apply-new-UMC-enable-for-VNC-DPG-mode.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5561-drm-amdgpu-vcn-Apply-new-UMC-enable-for-VNC-DPG-mode.patch
new file mode 100644
index 00000000..50475516
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5561-drm-amdgpu-vcn-Apply-new-UMC-enable-for-VNC-DPG-mode.patch
@@ -0,0 +1,41 @@
+From 9f2690300ba457c44fca9f38b6fa8a89cd1e2751 Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Tue, 9 Oct 2018 13:05:15 -0400
+Subject: [PATCH 5561/5725] drm/amdgpu/vcn:Apply new UMC enable for VNC DPG
+ mode start
+
+Apply new UMC enable for VNC Dynamic Power Gate mode start
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Acked-by: Leo Liu <leo.liu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index bc58658..0371b67 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -1023,13 +1023,14 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_REG_XX_MASK, 0x10, 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, 0x3, 0xFFFFFFFF, 0);
+
+- /* enable UMC */
+- WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL2,
+- 0, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
+-
+ /* boot up the VCPU */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SOFT_RESET, 0, 0xFFFFFFFF, 0);
+
++ /* enable UMC */
++ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL2,
++ 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT,
++ 0xFFFFFFFF, 0);
++
+ /* enable master interrupt */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN,
+ UVD_MASTINT_EN__VCPU_EN_MASK, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5562-drm-amdgpu-vcn-Update-SPG-mode-VCN-memory-control.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5562-drm-amdgpu-vcn-Update-SPG-mode-VCN-memory-control.patch
new file mode 100644
index 00000000..ec66d0da
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5562-drm-amdgpu-vcn-Update-SPG-mode-VCN-memory-control.patch
@@ -0,0 +1,41 @@
+From 341ac73f0f5d693ef6797a6ab1c795abef03e2b7 Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Tue, 9 Oct 2018 16:40:56 -0400
+Subject: [PATCH 5562/5725] drm/amdgpu/vcn:Update SPG mode VCN memory control
+
+Update Static Power Gate mode VCN memory control
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Acked-by: Leo Liu <leo.liu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 0371b67..e1df632 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -787,13 +787,12 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
+ mdelay(5);
+
+ /* initialize VCN memory controller */
+- WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL,
+- (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+- UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+- UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+- UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+- UVD_LMI_CTRL__REQ_MODE_MASK |
+- 0x00100000L);
++ tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL);
++ WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp |
++ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
++ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
++ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
++ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ #ifdef __BIG_ENDIAN
+ /* swap (8 in 32) RB and IB */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5563-drm-amdgpu-vcn-Update-SPG-mode-VCN-global-tiling.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5563-drm-amdgpu-vcn-Update-SPG-mode-VCN-global-tiling.patch
new file mode 100644
index 00000000..df6d8ab6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5563-drm-amdgpu-vcn-Update-SPG-mode-VCN-global-tiling.patch
@@ -0,0 +1,46 @@
+From 7d915a79568896e3f9a9d53a98a9f43571124a87 Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Tue, 9 Oct 2018 16:43:32 -0400
+Subject: [PATCH 5563/5725] drm/amdgpu/vcn:Update SPG mode VCN global tiling
+
+Update Static Power Gate mode VCN global tiling
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Acked-by: Leo Liu <leo.liu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index e1df632..d21c242 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -325,6 +325,24 @@ static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
+ adev->gfx.config.gb_addr_config);
+ WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
++ WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG,
++ adev->gfx.config.gb_addr_config);
++ WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG,
++ adev->gfx.config.gb_addr_config);
++ WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG,
++ adev->gfx.config.gb_addr_config);
++ WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG,
++ adev->gfx.config.gb_addr_config);
++ WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG,
++ adev->gfx.config.gb_addr_config);
++ WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG,
++ adev->gfx.config.gb_addr_config);
++ WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG,
++ adev->gfx.config.gb_addr_config);
++ WREG32_SOC15(UVD, 0, mmUVD_JPEG_ADDR_CONFIG,
++ adev->gfx.config.gb_addr_config);
++ WREG32_SOC15(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG,
++ adev->gfx.config.gb_addr_config);
+ }
+
+ static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5564-drm-amdgpu-vcn-Move-SPG-mode-mc-resume-after-MPC-con.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5564-drm-amdgpu-vcn-Move-SPG-mode-mc-resume-after-MPC-con.patch
new file mode 100644
index 00000000..bf4f3a42
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5564-drm-amdgpu-vcn-Move-SPG-mode-mc-resume-after-MPC-con.patch
@@ -0,0 +1,40 @@
+From 86e1bc245fcf02167966b187fca9694bc798fb74 Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Tue, 9 Oct 2018 16:46:53 -0400
+Subject: [PATCH 5564/5725] drm/amdgpu/vcn:Move SPG mode mc resume after MPC
+ control
+
+Move Static Power Gate mode mc resume after MPC control
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Acked-by: Leo Liu <leo.liu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index d21c242..b8b2974 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -780,8 +780,6 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
+ /* disable clock gating */
+ vcn_v1_0_disable_clock_gating(adev);
+
+- vcn_v1_0_mc_resume_spg_mode(adev);
+-
+ /* disable interupt */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+@@ -840,6 +838,8 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
+ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
+
++ vcn_v1_0_mc_resume_spg_mode(adev);
++
+ /* take all subblocks out of reset, except VCPU */
+ WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
+ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5565-drm-amdgpu-vcn-Add-SPG-mode-Register-XX-check.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5565-drm-amdgpu-vcn-Add-SPG-mode-Register-XX-check.patch
new file mode 100644
index 00000000..d8c52134
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5565-drm-amdgpu-vcn-Add-SPG-mode-Register-XX-check.patch
@@ -0,0 +1,32 @@
+From 0979c49841895490016e54d4f58ab9116fe171a6 Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Tue, 9 Oct 2018 16:48:29 -0400
+Subject: [PATCH 5565/5725] drm/amdgpu/vcn:Add SPG mode Register XX check
+
+Add Static Power Gate mode Register XX check
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Acked-by: Leo Liu <leo.liu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index b8b2974..78860f4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -840,6 +840,10 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
+
+ vcn_v1_0_mc_resume_spg_mode(adev);
+
++ WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK, 0x10);
++ WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK,
++ RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK) | 0x3);
++
+ /* take all subblocks out of reset, except VCPU */
+ WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
+ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5566-drm-amdgpu-vcn-Remove-SPG-mode-unused-steps-during-v.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5566-drm-amdgpu-vcn-Remove-SPG-mode-unused-steps-during-v.patch
new file mode 100644
index 00000000..8a04b9bb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5566-drm-amdgpu-vcn-Remove-SPG-mode-unused-steps-during-v.patch
@@ -0,0 +1,81 @@
+From 2f0a97ffe6e4ce024dc3a4bfc6b07c027feae5cf Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Tue, 9 Oct 2018 16:53:42 -0400
+Subject: [PATCH 5566/5725] drm/amdgpu/vcn:Remove SPG mode unused steps during
+ vcn start
+
+Remove Sitatic Power Gate mode unused steps during vcn start
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Acked-by: Leo Liu <leo.liu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 30 ++----------------------------
+ 1 file changed, 2 insertions(+), 28 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 78860f4..158541c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -784,24 +784,6 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+- /* stall UMC and register bus before resetting VCPU */
+- WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
+- UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
+- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+- mdelay(1);
+-
+- /* put LMI, VCPU, RBC etc... into reset */
+- WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
+- UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
+- UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
+- mdelay(5);
+-
+ /* initialize VCN memory controller */
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL);
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp |
+@@ -844,14 +826,8 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK,
+ RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK) | 0x3);
+
+- /* take all subblocks out of reset, except VCPU */
+- WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
+- UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+- mdelay(5);
+-
+ /* enable VCPU clock */
+- WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL,
+- UVD_VCPU_CNTL__CLK_EN_MASK);
++ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
+
+ /* enable UMC */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
+@@ -891,8 +867,7 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
+ }
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
+- (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
+- ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
++ UVD_MASTINT_EN__VCPU_EN_MASK, ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* enable system interrupt for JRBC, TODO: move to set interrupt*/
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SYS_INT_EN),
+@@ -908,7 +883,6 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
+ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5567-drm-amdgpu-vcn-Apply-new-UMC-enable-for-VNC-DPG-mode.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5567-drm-amdgpu-vcn-Apply-new-UMC-enable-for-VNC-DPG-mode.patch
new file mode 100644
index 00000000..523730e4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5567-drm-amdgpu-vcn-Apply-new-UMC-enable-for-VNC-DPG-mode.patch
@@ -0,0 +1,44 @@
+From 14a0c02792b0591b8f0e83dedceecc7ebc2829af Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Tue, 9 Oct 2018 16:57:26 -0400
+Subject: [PATCH 5567/5725] drm/amdgpu/vcn:Apply new UMC enable for VNC DPG
+ mode
+
+Apply new UMC enable for VNC Dynamic Power Gate mode
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Acked-by: Leo Liu <leo.liu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 158541c..a513518 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -829,13 +829,18 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
+ /* enable VCPU clock */
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
+
++ /* boot up the VCPU */
++ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
++ ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
++
+ /* enable UMC */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+- /* boot up the VCPU */
+- WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, 0);
+- mdelay(10);
++ tmp = RREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET);
++ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
++ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
++ WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, tmp);
+
+ for (i = 0; i < 10; ++i) {
+ uint32_t status;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5568-drm-amdgpu-vcn-Set-VCPU-busy-after-gate-power-during.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5568-drm-amdgpu-vcn-Set-VCPU-busy-after-gate-power-during.patch
new file mode 100644
index 00000000..44b0db3a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5568-drm-amdgpu-vcn-Set-VCPU-busy-after-gate-power-during.patch
@@ -0,0 +1,33 @@
+From 91d597931959f494b800222a34ba6539fcdcf6ee Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Tue, 9 Oct 2018 16:59:57 -0400
+Subject: [PATCH 5568/5725] drm/amdgpu/vcn:Set VCPU busy after gate power
+ during vcn SPG start
+
+Set VCPU busy after gate power during vcn Static Power Gate start
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Acked-by: Leo Liu <leo.liu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index a513518..9f650a3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -777,6 +777,10 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
+ lmi_swap_cntl = 0;
+
+ vcn_1_0_disable_static_power_gating(adev);
++
++ tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
++ WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
++
+ /* disable clock gating */
+ vcn_v1_0_disable_clock_gating(adev);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5569-drm-amdgpu-vcn-Update-SPG-mode-UVD-status-clear.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5569-drm-amdgpu-vcn-Update-SPG-mode-UVD-status-clear.patch
new file mode 100644
index 00000000..fc29f3d5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5569-drm-amdgpu-vcn-Update-SPG-mode-UVD-status-clear.patch
@@ -0,0 +1,34 @@
+From 4f99d8407a9a0e04bae6aa32acf97635667f4f31 Mon Sep 17 00:00:00 2001
+From: James Zhu <James.Zhu@amd.com>
+Date: Tue, 9 Oct 2018 17:06:56 -0400
+Subject: [PATCH 5569/5725] drm/amdgpu/vcn:Update SPG mode UVD status clear
+
+Update Static Power Gate mode UVD status clear
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Acked-by: Leo Liu <leo.liu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 9f650a3..0d80b44 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -883,9 +883,9 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
+ UVD_SYS_INT_EN__UVD_JRBC_EN_MASK,
+ ~UVD_SYS_INT_EN__UVD_JRBC_EN_MASK);
+
+- /* clear the bit 4 of VCN_STATUS */
+- WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
+- ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
++ /* clear the busy bit of UVD_STATUS */
++ tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) & ~UVD_STATUS__UVD_BUSY;
++ WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
+
+ /* force RBC into idle state */
+ rb_bufsz = order_base_2(ring->ring_size);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5570-drm-amdgpu-display-dm-amdgpu-make-dp-phy-debugfs-for.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5570-drm-amdgpu-display-dm-amdgpu-make-dp-phy-debugfs-for.patch
new file mode 100644
index 00000000..be0447c6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5570-drm-amdgpu-display-dm-amdgpu-make-dp-phy-debugfs-for.patch
@@ -0,0 +1,35 @@
+From 82c11dd37118a7e60b882e0b978786ed627b31d5 Mon Sep 17 00:00:00 2001
+From: hersen wu <hersenxs.wu@amd.com>
+Date: Thu, 4 Oct 2018 09:28:20 -0400
+Subject: [PATCH 5570/5725] drm/amdgpu/display: dm/amdgpu: make dp phy debugfs
+ for eDP
+
+[WHY] dp debugfs file does not exist for eDP under
+ /sys/kernel/debug/dri/0/eDP-1. the root is phy debugfs
+ is created for dp connector only.
+[HOW] for eDP connector, create phy debugfs too.
+
+Signed-off-by: Hersen Wu <hersenxs.wu@amd.com>
+Reviewed-by: David Francis <David.Francis@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+index 0ef4a40..9a7ac58 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+@@ -705,7 +705,8 @@ int connector_debugfs_init(struct amdgpu_dm_connector *connector)
+ int i;
+ struct dentry *ent, *dir = connector->base.debugfs_entry;
+
+- if (connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
++ if (connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
++ connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) {
+ for (i = 0; i < ARRAY_SIZE(dp_debugfs_entries); i++) {
+ ent = debugfs_create_file(dp_debugfs_entries[i].name,
+ 0644,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5571-drm-amdgpu-update-Vega20-SDMA-golden-setting.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5571-drm-amdgpu-update-Vega20-SDMA-golden-setting.patch
new file mode 100644
index 00000000..685a4feb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5571-drm-amdgpu-update-Vega20-SDMA-golden-setting.patch
@@ -0,0 +1,37 @@
+From a9a10398aa412a077d2aaaedbbffe6d9e4b4b617 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Fri, 12 Oct 2018 09:37:29 +0800
+Subject: [PATCH 5571/5725] drm/amdgpu: update Vega20 SDMA golden setting
+
+Update SDMA golden settings.
+
+Change-Id: Icffa86e1a2c057466e1280fb195070c769d51eab
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Feifei Xu <Feifei.Xu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index d05951f..fa76e9a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -148,6 +148,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xFE000000, 0x00000000),
+ };
+
+ static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
+@@ -177,6 +178,7 @@ static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
++ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xFE000000, 0x00000000),
+ };
+
+ static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5572-drm-amd-powerplay-added-I2C-controller-configuration.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5572-drm-amd-powerplay-added-I2C-controller-configuration.patch
new file mode 100644
index 00000000..7dfa1c43
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5572-drm-amd-powerplay-added-I2C-controller-configuration.patch
@@ -0,0 +1,440 @@
+From adcc6ab72c857d9a1f55110e61ea72daa13dc4d0 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Fri, 28 Sep 2018 16:19:08 +0800
+Subject: [PATCH 5572/5725] drm/amd/powerplay: added I2C controller
+ configuration
+
+PPTABLE structure is stretched to add I2C controller
+configuration. Hold on the PPTABLE_V20_SMU_VERSION bump
+until the VBIOS is ready.
+
+Change-Id: I079154b36e4bddba9fa40ce3abc6517ad9e9b5f1
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Feifei Xu <Feifei.Xu@amd.com>
+---
+ drivers/gpu/drm/amd/include/atomfirmware.h | 88 +++++++++++++++++
+ .../amd/powerplay/hwmgr/vega20_processpptables.c | 94 +++++++++++-------
+ .../gpu/drm/amd/powerplay/inc/smu11_driver_if.h | 108 +++++++++++++++------
+ 3 files changed, 227 insertions(+), 63 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
+index 8ae7adb..d2e7c0f 100644
+--- a/drivers/gpu/drm/amd/include/atomfirmware.h
++++ b/drivers/gpu/drm/amd/include/atomfirmware.h
+@@ -1532,6 +1532,94 @@ struct atom_smc_dpm_info_v4_3
+ uint32_t boardreserved[10];
+ };
+
++struct smudpm_i2ccontrollerconfig_t {
++ uint32_t enabled;
++ uint32_t slaveaddress;
++ uint32_t controllerport;
++ uint32_t controllername;
++ uint32_t thermalthrottler;
++ uint32_t i2cprotocol;
++ uint32_t i2cspeed;
++};
++
++struct atom_smc_dpm_info_v4_4
++{
++ struct atom_common_table_header table_header;
++ uint32_t i2c_padding[3];
++
++ uint16_t maxvoltagestepgfx;
++ uint16_t maxvoltagestepsoc;
++
++ uint8_t vddgfxvrmapping;
++ uint8_t vddsocvrmapping;
++ uint8_t vddmem0vrmapping;
++ uint8_t vddmem1vrmapping;
++
++ uint8_t gfxulvphasesheddingmask;
++ uint8_t soculvphasesheddingmask;
++ uint8_t externalsensorpresent;
++ uint8_t padding8_v;
++
++ uint16_t gfxmaxcurrent;
++ uint8_t gfxoffset;
++ uint8_t padding_telemetrygfx;
++
++ uint16_t socmaxcurrent;
++ uint8_t socoffset;
++ uint8_t padding_telemetrysoc;
++
++ uint16_t mem0maxcurrent;
++ uint8_t mem0offset;
++ uint8_t padding_telemetrymem0;
++
++ uint16_t mem1maxcurrent;
++ uint8_t mem1offset;
++ uint8_t padding_telemetrymem1;
++
++
++ uint8_t acdcgpio;
++ uint8_t acdcpolarity;
++ uint8_t vr0hotgpio;
++ uint8_t vr0hotpolarity;
++
++ uint8_t vr1hotgpio;
++ uint8_t vr1hotpolarity;
++ uint8_t padding1;
++ uint8_t padding2;
++
++
++ uint8_t ledpin0;
++ uint8_t ledpin1;
++ uint8_t ledpin2;
++ uint8_t padding8_4;
++
++
++ uint8_t pllgfxclkspreadenabled;
++ uint8_t pllgfxclkspreadpercent;
++ uint16_t pllgfxclkspreadfreq;
++
++
++ uint8_t uclkspreadenabled;
++ uint8_t uclkspreadpercent;
++ uint16_t uclkspreadfreq;
++
++
++ uint8_t fclkspreadenabled;
++ uint8_t fclkspreadpercent;
++ uint16_t fclkspreadfreq;
++
++
++ uint8_t fllgfxclkspreadenabled;
++ uint8_t fllgfxclkspreadpercent;
++ uint16_t fllgfxclkspreadfreq;
++
++
++ struct smudpm_i2ccontrollerconfig_t i2ccontrollers[7];
++
++
++ uint32_t boardreserved[10];
++};
++
+ /*
+ ***************************************************************************
+ Data Table asic_profiling_info structure
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+index 32fe384..e717404 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+@@ -417,8 +417,8 @@ static void dump_pptable(PPTable_t *pptable)
+ pr_info("FanGainEdge = %d\n", pptable->FanGainEdge);
+ pr_info("FanGainHotspot = %d\n", pptable->FanGainHotspot);
+ pr_info("FanGainLiquid = %d\n", pptable->FanGainLiquid);
+- pr_info("FanGainVrVddc = %d\n", pptable->FanGainVrVddc);
+- pr_info("FanGainVrMvdd = %d\n", pptable->FanGainVrMvdd);
++ pr_info("FanGainVrGfx = %d\n", pptable->FanGainVrGfx);
++ pr_info("FanGainVrSoc = %d\n", pptable->FanGainVrSoc);
+ pr_info("FanGainPlx = %d\n", pptable->FanGainPlx);
+ pr_info("FanGainHbm = %d\n", pptable->FanGainHbm);
+ pr_info("FanPwmMin = %d\n", pptable->FanPwmMin);
+@@ -533,23 +533,17 @@ static void dump_pptable(PPTable_t *pptable)
+ pr_info("MinVoltageUlvGfx = %d\n", pptable->MinVoltageUlvGfx);
+ pr_info("MinVoltageUlvSoc = %d\n", pptable->MinVoltageUlvSoc);
+
+- for (i = 0; i < 14; i++)
+- pr_info("Reserved[%d] = 0x%x\n", i, pptable->Reserved[i]);
++ pr_info("MGpuFanBoostLimitRpm = %d\n", pptable->MGpuFanBoostLimitRpm);
++ pr_info("padding16_Fan = %d\n", pptable->padding16_Fan);
+
+- pr_info("Liquid1_I2C_address = 0x%x\n", pptable->Liquid1_I2C_address);
+- pr_info("Liquid2_I2C_address = 0x%x\n", pptable->Liquid2_I2C_address);
+- pr_info("Vr_I2C_address = 0x%x\n", pptable->Vr_I2C_address);
+- pr_info("Plx_I2C_address = 0x%x\n", pptable->Plx_I2C_address);
++ pr_info("FanGainVrMem0 = %d\n", pptable->FanGainVrMem0);
++ pr_info("FanGainVrMem0 = %d\n", pptable->FanGainVrMem0);
+
+- pr_info("Liquid_I2C_LineSCL = 0x%x\n", pptable->Liquid_I2C_LineSCL);
+- pr_info("Liquid_I2C_LineSDA = 0x%x\n", pptable->Liquid_I2C_LineSDA);
+- pr_info("Vr_I2C_LineSCL = 0x%x\n", pptable->Vr_I2C_LineSCL);
+- pr_info("Vr_I2C_LineSDA = 0x%x\n", pptable->Vr_I2C_LineSDA);
++ for (i = 0; i < 12; i++)
++ pr_info("Reserved[%d] = 0x%x\n", i, pptable->Reserved[i]);
+
+- pr_info("Plx_I2C_LineSCL = 0x%x\n", pptable->Plx_I2C_LineSCL);
+- pr_info("Plx_I2C_LineSDA = 0x%x\n", pptable->Plx_I2C_LineSDA);
+- pr_info("VrSensorPresent = 0x%x\n", pptable->VrSensorPresent);
+- pr_info("LiquidSensorPresent = 0x%x\n", pptable->LiquidSensorPresent);
++ for (i = 0; i < 3; i++)
++ pr_info("Padding32[%d] = 0x%x\n", i, pptable->Padding32[i]);
+
+ pr_info("MaxVoltageStepGfx = 0x%x\n", pptable->MaxVoltageStepGfx);
+ pr_info("MaxVoltageStepSoc = 0x%x\n", pptable->MaxVoltageStepSoc);
+@@ -611,6 +605,24 @@ static void dump_pptable(PPTable_t *pptable)
+ pr_info("FllGfxclkSpreadPercent = %d\n", pptable->FllGfxclkSpreadPercent);
+ pr_info("FllGfxclkSpreadFreq = %d\n", pptable->FllGfxclkSpreadFreq);
+
++ for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) {
++ pr_info("I2cControllers[%d]:\n", i);
++ pr_info(" .Enabled = %d\n",
++ pptable->I2cControllers[i].Enabled);
++ pr_info(" .SlaveAddress = 0x%x\n",
++ pptable->I2cControllers[i].SlaveAddress);
++ pr_info(" .ControllerPort = %d\n",
++ pptable->I2cControllers[i].ControllerPort);
++ pr_info(" .ControllerName = %d\n",
++ pptable->I2cControllers[i].ControllerName);
++ pr_info(" .ThermalThrottler = %d\n",
++ pptable->I2cControllers[i].ThermalThrottler);
++ pr_info(" .I2cProtocol = %d\n",
++ pptable->I2cControllers[i].I2cProtocol);
++ pr_info(" .I2cSpeed = %d\n",
++ pptable->I2cControllers[i].I2cSpeed);
++ }
++
+ for (i = 0; i < 10; i++)
+ pr_info("BoardReserved[%d] = 0x%x\n", i, pptable->BoardReserved[i]);
+
+@@ -693,29 +705,19 @@ static int copy_overdrive_feature_capabilities_array(
+
+ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable)
+ {
+- struct atom_smc_dpm_info_v4_3 *smc_dpm_table;
++ struct atom_smc_dpm_info_v4_4 *smc_dpm_table;
+ int index = GetIndexIntoMasterDataTable(smc_dpm_info);
++ int i;
+
+ PP_ASSERT_WITH_CODE(
+ smc_dpm_table = smu_atom_get_data_table(hwmgr->adev, index, NULL, NULL, NULL),
+ "[appendVbiosPPTable] Failed to retrieve Smc Dpm Table from VBIOS!",
+ return -1);
+
+- ppsmc_pptable->Liquid1_I2C_address = smc_dpm_table->liquid1_i2c_address;
+- ppsmc_pptable->Liquid2_I2C_address = smc_dpm_table->liquid2_i2c_address;
+- ppsmc_pptable->Vr_I2C_address = smc_dpm_table->vr_i2c_address;
+- ppsmc_pptable->Plx_I2C_address = smc_dpm_table->plx_i2c_address;
+-
+- ppsmc_pptable->Liquid_I2C_LineSCL = smc_dpm_table->liquid_i2c_linescl;
+- ppsmc_pptable->Liquid_I2C_LineSDA = smc_dpm_table->liquid_i2c_linesda;
+- ppsmc_pptable->Vr_I2C_LineSCL = smc_dpm_table->vr_i2c_linescl;
+- ppsmc_pptable->Vr_I2C_LineSDA = smc_dpm_table->vr_i2c_linesda;
+-
+- ppsmc_pptable->Plx_I2C_LineSCL = smc_dpm_table->plx_i2c_linescl;
+- ppsmc_pptable->Plx_I2C_LineSDA = smc_dpm_table->plx_i2c_linesda;
+- ppsmc_pptable->VrSensorPresent = smc_dpm_table->vrsensorpresent;
+- ppsmc_pptable->LiquidSensorPresent = smc_dpm_table->liquidsensorpresent;
+-
++ memset(ppsmc_pptable->Padding32,
++ 0,
++ sizeof(struct atom_smc_dpm_info_v4_4) -
++ sizeof(struct atom_common_table_header));
+ ppsmc_pptable->MaxVoltageStepGfx = smc_dpm_table->maxvoltagestepgfx;
+ ppsmc_pptable->MaxVoltageStepSoc = smc_dpm_table->maxvoltagestepsoc;
+
+@@ -774,6 +776,24 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
+ ppsmc_pptable->FllGfxclkSpreadPercent = smc_dpm_table->fllgfxclkspreadpercent;
+ ppsmc_pptable->FllGfxclkSpreadFreq = smc_dpm_table->fllgfxclkspreadfreq;
+
++ if ((smc_dpm_table->table_header.format_revision == 4) &&
++ (smc_dpm_table->table_header.content_revision == 4)) {
++ for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) {
++ ppsmc_pptable->I2cControllers[i].Enabled =
++ smc_dpm_table->i2ccontrollers[i].enabled;
++ ppsmc_pptable->I2cControllers[i].SlaveAddress =
++ smc_dpm_table->i2ccontrollers[i].slaveaddress;
++ ppsmc_pptable->I2cControllers[i].ControllerPort =
++ smc_dpm_table->i2ccontrollers[i].controllerport;
++ ppsmc_pptable->I2cControllers[i].ThermalThrottler =
++ smc_dpm_table->i2ccontrollers[i].thermalthrottler;
++ ppsmc_pptable->I2cControllers[i].I2cProtocol =
++ smc_dpm_table->i2ccontrollers[i].i2cprotocol;
++ ppsmc_pptable->I2cControllers[i].I2cSpeed =
++ smc_dpm_table->i2ccontrollers[i].i2cspeed;
++ }
++ }
++
+ return 0;
+ }
+
+@@ -860,7 +880,15 @@ static int init_powerplay_table_information(
+ if (pptable_information->smc_pptable == NULL)
+ return -ENOMEM;
+
+- memcpy(pptable_information->smc_pptable, &(powerplay_table->smcPPTable), sizeof(PPTable_t));
++ if (powerplay_table->smcPPTable.Version <= 2)
++ memcpy(pptable_information->smc_pptable,
++ &(powerplay_table->smcPPTable),
++ sizeof(PPTable_t) -
++ sizeof(I2cControllerConfig_t) * I2C_CONTROLLER_NAME_COUNT);
++ else
++ memcpy(pptable_information->smc_pptable,
++ &(powerplay_table->smcPPTable),
++ sizeof(PPTable_t));
+
+ result = append_vbios_pptable(hwmgr, (pptable_information->smc_pptable));
+
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+index a002021..c72cfab 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+@@ -27,7 +27,7 @@
+ // *** IMPORTANT ***
+ // SMU TEAM: Always increment the interface version if
+ // any structure is changed in this file
+-#define SMU11_DRIVER_IF_VERSION 0x11
++#define SMU11_DRIVER_IF_VERSION 0x12
+
+ #define PPTABLE_V20_SMU_VERSION 2
+
+@@ -186,6 +186,9 @@
+ #define DPM_OVERRIDE_ENABLE_GFXOFF_UCLK_SWITCH 0x00010000
+ #define DPM_OVERRIDE_ENABLE_GFXOFF_FCLK_SWITCH 0x00020000
+
++#define I2C_CONTROLLER_ENABLED 1
++#define I2C_CONTROLLER_DISABLED 0
++
+ #define VR_MAPPING_VR_SELECT_MASK 0x01
+ #define VR_MAPPING_VR_SELECT_SHIFT 0x00
+
+@@ -208,15 +211,17 @@
+ #define THROTTLER_STATUS_TEMP_HOTSPOT_BIT 2
+ #define THROTTLER_STATUS_TEMP_HBM_BIT 3
+ #define THROTTLER_STATUS_TEMP_VR_GFX_BIT 4
+-#define THROTTLER_STATUS_TEMP_VR_MEM_BIT 5
+-#define THROTTLER_STATUS_TEMP_LIQUID_BIT 6
+-#define THROTTLER_STATUS_TEMP_PLX_BIT 7
+-#define THROTTLER_STATUS_TEMP_SKIN_BIT 8
+-#define THROTTLER_STATUS_TDC_GFX_BIT 9
+-#define THROTTLER_STATUS_TDC_SOC_BIT 10
+-#define THROTTLER_STATUS_PPT_BIT 11
+-#define THROTTLER_STATUS_FIT_BIT 12
+-#define THROTTLER_STATUS_PPM_BIT 13
++#define THROTTLER_STATUS_TEMP_VR_SOC_BIT 5
++#define THROTTLER_STATUS_TEMP_VR_MEM0_BIT 6
++#define THROTTLER_STATUS_TEMP_VR_MEM1_BIT 7
++#define THROTTLER_STATUS_TEMP_LIQUID_BIT 8
++#define THROTTLER_STATUS_TEMP_PLX_BIT 9
++#define THROTTLER_STATUS_TEMP_SKIN_BIT 10
++#define THROTTLER_STATUS_TDC_GFX_BIT 11
++#define THROTTLER_STATUS_TDC_SOC_BIT 12
++#define THROTTLER_STATUS_PPT_BIT 13
++#define THROTTLER_STATUS_FIT_BIT 14
++#define THROTTLER_STATUS_PPM_BIT 15
+
+
+ #define TABLE_TRANSFER_OK 0x0
+@@ -236,6 +241,58 @@
+ #define XGMI_STATE_D0 1
+ #define XGMI_STATE_D3 0
+
++typedef enum {
++ I2C_CONTROLLER_PORT_0 = 0,
++ I2C_CONTROLLER_PORT_1 = 1,
++} I2cControllerPort_e;
++
++typedef enum {
++ I2C_CONTROLLER_NAME_VR_GFX = 0,
++ I2C_CONTROLLER_NAME_VR_SOC,
++ I2C_CONTROLLER_NAME_VR_VDDCI,
++ I2C_CONTROLLER_NAME_VR_HBM,
++ I2C_CONTROLLER_NAME_LIQUID_0,
++ I2C_CONTROLLER_NAME_LIQUID_1,
++ I2C_CONTROLLER_NAME_PLX,
++ I2C_CONTROLLER_NAME_COUNT,
++} I2cControllerName_e;
++
++typedef enum {
++ I2C_CONTROLLER_THROTTLER_TYPE_NONE = 0,
++ I2C_CONTROLLER_THROTTLER_VR_GFX,
++ I2C_CONTROLLER_THROTTLER_VR_SOC,
++ I2C_CONTROLLER_THROTTLER_VR_VDDCI,
++ I2C_CONTROLLER_THROTTLER_VR_HBM,
++ I2C_CONTROLLER_THROTTLER_LIQUID_0,
++ I2C_CONTROLLER_THROTTLER_LIQUID_1,
++ I2C_CONTROLLER_THROTTLER_PLX,
++} I2cControllerThrottler_e;
++
++typedef enum {
++ I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5,
++ I2C_CONTROLLER_PROTOCOL_VR_IR35217,
++ I2C_CONTROLLER_PROTOCOL_TMP_TMP102A,
++ I2C_CONTROLLER_PROTOCOL_SPARE_0,
++ I2C_CONTROLLER_PROTOCOL_SPARE_1,
++ I2C_CONTROLLER_PROTOCOL_SPARE_2,
++} I2cControllerProtocol_e;
++
++typedef enum {
++ I2C_CONTROLLER_SPEED_SLOW = 0,
++ I2C_CONTROLLER_SPEED_FAST = 1,
++} I2cControllerSpeed_e;
++
++typedef struct {
++ uint32_t Enabled;
++ uint32_t SlaveAddress;
++ uint32_t ControllerPort;
++ uint32_t ControllerName;
++
++ uint32_t ThermalThrottler;
++ uint32_t I2cProtocol;
++ uint32_t I2cSpeed;
++} I2cControllerConfig_t;
++
+ typedef struct {
+ uint32_t a;
+ uint32_t b;
+@@ -406,8 +463,8 @@ typedef struct {
+ uint16_t FanGainEdge;
+ uint16_t FanGainHotspot;
+ uint16_t FanGainLiquid;
+- uint16_t FanGainVrVddc;
+- uint16_t FanGainVrMvdd;
++ uint16_t FanGainVrGfx;
++ uint16_t FanGainVrSoc;
+ uint16_t FanGainPlx;
+ uint16_t FanGainHbm;
+ uint16_t FanPwmMin;
+@@ -467,24 +524,11 @@ typedef struct {
+ uint16_t MGpuFanBoostLimitRpm;
+ uint16_t padding16_Fan;
+
+- uint32_t Reserved[13];
+-
++ uint16_t FanGainVrMem0;
++ uint16_t FanGainVrMem1;
++ uint32_t Reserved[12];
+
+-
+- uint8_t Liquid1_I2C_address;
+- uint8_t Liquid2_I2C_address;
+- uint8_t Vr_I2C_address;
+- uint8_t Plx_I2C_address;
+-
+- uint8_t Liquid_I2C_LineSCL;
+- uint8_t Liquid_I2C_LineSDA;
+- uint8_t Vr_I2C_LineSCL;
+- uint8_t Vr_I2C_LineSDA;
+-
+- uint8_t Plx_I2C_LineSCL;
+- uint8_t Plx_I2C_LineSDA;
+- uint8_t VrSensorPresent;
+- uint8_t LiquidSensorPresent;
++ uint32_t Padding32[3];
+
+ uint16_t MaxVoltageStepGfx;
+ uint16_t MaxVoltageStepSoc;
+@@ -551,6 +595,8 @@ typedef struct {
+ uint8_t FllGfxclkSpreadPercent;
+ uint16_t FllGfxclkSpreadFreq;
+
++ I2cControllerConfig_t I2cControllers[I2C_CONTROLLER_NAME_COUNT];
++
+ uint32_t BoardReserved[10];
+
+
+@@ -607,7 +653,9 @@ typedef struct {
+ uint16_t TemperatureHotspot ;
+ uint16_t TemperatureHBM ;
+ uint16_t TemperatureVrGfx ;
+- uint16_t TemperatureVrMem ;
++ uint16_t TemperatureVrSoc ;
++ uint16_t TemperatureVrMem0 ;
++ uint16_t TemperatureVrMem1 ;
+ uint16_t TemperatureLiquid ;
+ uint16_t TemperaturePlx ;
+ uint32_t ThrottlerStatus ;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5573-drm-amd-powerplay-update-PPtable-with-DC-BTC-and-Tvr.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5573-drm-amd-powerplay-update-PPtable-with-DC-BTC-and-Tvr.patch
new file mode 100644
index 00000000..bd77ef09
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5573-drm-amd-powerplay-update-PPtable-with-DC-BTC-and-Tvr.patch
@@ -0,0 +1,92 @@
+From 6792074f8f9c1a9704c0ec618b7218283fed9a3e Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 8 Oct 2018 12:41:19 +0800
+Subject: [PATCH 5573/5725] drm/amd/powerplay: update PPtable with DC BTC and
+ Tvr SocLimit fields
+
+Update the PPtable structure to fit the latest SMC firmware.
+
+Change-Id: I97db5955085efa1ecf44ae23d26fdcc70ec2fc9a
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Feifei Xu <Feifei.Xu@amd.com>
+---
+ .../gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c | 10 ++++++----
+ drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h | 13 ++++++++-----
+ 2 files changed, 14 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+index e717404..e5f7f82 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+@@ -100,9 +100,8 @@ static void dump_pptable(PPTable_t *pptable)
+ pr_info("PpmTemperatureThreshold = %d\n", pptable->PpmTemperatureThreshold);
+
+ pr_info("MemoryOnPackage = 0x%02x\n", pptable->MemoryOnPackage);
+- pr_info("padding8_limits[0] = 0x%02x\n", pptable->padding8_limits[0]);
+- pr_info("padding8_limits[1] = 0x%02x\n", pptable->padding8_limits[1]);
+- pr_info("padding8_limits[2] = 0x%02x\n", pptable->padding8_limits[2]);
++ pr_info("padding8_limits = 0x%02x\n", pptable->padding8_limits);
++ pr_info("Tvr_SocLimit = %d\n", pptable->Tvr_SocLimit);
+
+ pr_info("UlvVoltageOffsetSoc = %d\n", pptable->UlvVoltageOffsetSoc);
+ pr_info("UlvVoltageOffsetGfx = %d\n", pptable->UlvVoltageOffsetGfx);
+@@ -539,7 +538,10 @@ static void dump_pptable(PPTable_t *pptable)
+ pr_info("FanGainVrMem0 = %d\n", pptable->FanGainVrMem0);
+ pr_info("FanGainVrMem0 = %d\n", pptable->FanGainVrMem0);
+
+- for (i = 0; i < 12; i++)
++ pr_info("DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]);
++ pr_info("DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]);
++
++ for (i = 0; i < 11; i++)
+ pr_info("Reserved[%d] = 0x%x\n", i, pptable->Reserved[i]);
+
+ for (i = 0; i < 3; i++)
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+index c72cfab..2998a49 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+@@ -165,7 +165,7 @@
+ #define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT )
+ #define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT )
+ #define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT )
+-
++#define FEATURE_XGMI_MASK (1 << FEATURE_XGMI_BIT )
+
+ #define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001
+ #define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002
+@@ -391,8 +391,8 @@ typedef struct {
+ uint16_t PpmTemperatureThreshold;
+
+ uint8_t MemoryOnPackage;
+- uint8_t padding8_limits[3];
+-
++ uint8_t padding8_limits;
++ uint16_t Tvr_SocLimit;
+
+ uint16_t UlvVoltageOffsetSoc;
+ uint16_t UlvVoltageOffsetGfx;
+@@ -501,7 +501,7 @@ typedef struct {
+ uint8_t DcBtcEnabled[AVFS_VOLTAGE_COUNT];
+ uint8_t Padding8_GfxBtc[2];
+
+- uint16_t DcBtcMin[AVFS_VOLTAGE_COUNT];
++ int16_t DcBtcMin[AVFS_VOLTAGE_COUNT];
+ uint16_t DcBtcMax[AVFS_VOLTAGE_COUNT];
+
+
+@@ -526,7 +526,10 @@ typedef struct {
+
+ uint16_t FanGainVrMem0;
+ uint16_t FanGainVrMem1;
+- uint32_t Reserved[12];
++
++ uint16_t DcBtcGb[AVFS_VOLTAGE_COUNT];
++
++ uint32_t Reserved[11];
+
+ uint32_t Padding32[3];
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5574-drm-amdgpu-Update-gc_9_0-golden-settings.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5574-drm-amdgpu-Update-gc_9_0-golden-settings.patch
new file mode 100644
index 00000000..722ea47f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5574-drm-amdgpu-Update-gc_9_0-golden-settings.patch
@@ -0,0 +1,29 @@
+From 490a57f735e369ee81b5fa912abb32457b848841 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Tue, 16 Oct 2018 14:54:46 +0800
+Subject: [PATCH 5574/5725] drm/amdgpu: Update gc_9_0 golden settings.
+
+Add mmDB_DEBUG3 settings.
+
+Change-Id: I5797f8973673d94c090f1c4c3a1baf082b8ecba4
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 60b1e11..7acf02d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -97,6 +97,7 @@ MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
+ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
+ {
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5575-drm-amdgpu-fix-sdma-doorbell-comments-typo.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5575-drm-amdgpu-fix-sdma-doorbell-comments-typo.patch
new file mode 100644
index 00000000..c23861e9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5575-drm-amdgpu-fix-sdma-doorbell-comments-typo.patch
@@ -0,0 +1,28 @@
+From 92c42fccdc78d6f3f383a572c7358fb162c7e147 Mon Sep 17 00:00:00 2001
+From: "Frank.Min" <Frank.Min@amd.com>
+Date: Tue, 2 Oct 2018 15:02:09 +0800
+Subject: [PATCH 5575/5725] drm/amdgpu: fix sdma doorbell comments typo
+
+Change-Id: I0a3dff9f01a90717e0c32b7fa81a5e891bd1d52d
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Frank.Min <Frank.Min@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 5e6df72..ef666f8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -437,7 +437,7 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
+ * default non-graphics QWORD index is 0xe0 - 0xFF inclusive
+ */
+
+- /* sDMA engines reserved from 0xe0 -oxef */
++ /* sDMA engines reserved from 0xe0 -0xef */
+ AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xE0,
+ AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xE1,
+ AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xE8,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5576-drm-amd-display-fix-bug-of-accessing-invalid-memory.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5576-drm-amd-display-fix-bug-of-accessing-invalid-memory.patch
new file mode 100644
index 00000000..5528f1b6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5576-drm-amd-display-fix-bug-of-accessing-invalid-memory.patch
@@ -0,0 +1,55 @@
+From 558a81423955eb43a8e73ad01e46d0fee589c44b Mon Sep 17 00:00:00 2001
+From: Su Sung Chung <Su.Chung@amd.com>
+Date: Thu, 20 Sep 2018 15:03:27 -0400
+Subject: [PATCH 5576/5725] drm/amd/display: fix bug of accessing invalid
+ memory
+
+[Why]
+A loop inside of build_evenly_distributed_points function that traverse through
+the array of points become an infinite loop when m_GammaUpdates does not
+get assigned to any value.
+
+[How]
+In DMColor, clear m_gammaIsValid bit just before writting all Zeromem for
+m_GammaUpdates, to prevent calling build_evenly_distributed_points
+before m_GammaUpdates gets assigned to some value.
+
+Signed-off-by: Su Sung Chung <Su.Chung@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/modules/color/color_gamma.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+index 0fe9882..16f7ea8 100644
+--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+@@ -1071,10 +1071,14 @@ static void build_evenly_distributed_points(
+ struct dividers dividers)
+ {
+ struct gamma_pixel *p = points;
+- struct gamma_pixel *p_last = p + numberof_points - 1;
++ struct gamma_pixel *p_last;
+
+ uint32_t i = 0;
+
++ // This function should not gets called with 0 as a parameter
++ ASSERT(numberof_points > 0);
++ p_last = p + numberof_points - 1;
++
+ do {
+ struct fixed31_32 value = dc_fixpt_from_fraction(i,
+ numberof_points - 1);
+@@ -1085,7 +1089,7 @@ static void build_evenly_distributed_points(
+
+ ++p;
+ ++i;
+- } while (i != numberof_points);
++ } while (i < numberof_points);
+
+ p->r = dc_fixpt_div(p_last->r, dividers.divider1);
+ p->g = dc_fixpt_div(p_last->g, dividers.divider1);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5577-drm-amd-display-dc-3.2.01.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5577-drm-amd-display-dc-3.2.01.patch
new file mode 100644
index 00000000..bf1b73c3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5577-drm-amd-display-dc-3.2.01.patch
@@ -0,0 +1,28 @@
+From 29b861f79ea8d534959ee631ce8f7ab9ee2205a3 Mon Sep 17 00:00:00 2001
+From: Fatemeh Darbehani <fatemeh.darbehani@amd.com>
+Date: Mon, 24 Sep 2018 15:50:37 -0400
+Subject: [PATCH 5577/5725] drm/amd/display: dc 3.2.01
+
+Signed-off-by: Fatemeh Darbehani <fatemeh.darbehani@amd.com>
+Reviewed-by: Steven Chiu <Steven.Chiu@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index b2fd563..4c08a99 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -38,7 +38,7 @@
+ #include "inc/compressor.h"
+ #include "dml/display_mode_lib.h"
+
+-#define DC_VER "3.1.68"
++#define DC_VER "3.2.01"
+
+ #define MAX_SURFACES 3
+ #define MAX_STREAMS 6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5578-drm-amd-display-handle-max_vstartup-larger-than-vbla.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5578-drm-amd-display-handle-max_vstartup-larger-than-vbla.patch
new file mode 100644
index 00000000..2129c459
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5578-drm-amd-display-handle-max_vstartup-larger-than-vbla.patch
@@ -0,0 +1,49 @@
+From d93cd7318470428c5d4d1f0d306c4347dc2ac2ad Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Fri, 14 Sep 2018 15:32:33 -0400
+Subject: [PATCH 5578/5725] drm/amd/display: handle max_vstartup larger than
+ vblank_end
+
+When vstartup is larger than vblank end we need to set v_fp2
+to allow for this early start
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+index 5462668..47f80e0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+@@ -274,10 +274,12 @@ void optc1_program_timing(
+ * program the reg for interrupt postition.
+ */
+ vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
+- if (vertical_line_start < 0) {
+- ASSERT(0);
++ v_fp2 = 0;
++ if (vertical_line_start < 0)
++ v_fp2 = -vertical_line_start;
++ if (vertical_line_start < 0)
+ vertical_line_start = 0;
+- }
++
+ REG_SET(OTG_VERTICAL_INTERRUPT2_POSITION, 0,
+ OTG_VERTICAL_INTERRUPT2_LINE_START, vertical_line_start);
+
+@@ -296,9 +298,6 @@ void optc1_program_timing(
+ if (patched_crtc_timing.flags.INTERLACE == 1)
+ field_num = 1;
+ }
+- v_fp2 = 0;
+- if (optc->dlg_otg_param.vstartup_start > asic_blank_end)
+- v_fp2 = optc->dlg_otg_param.vstartup_start > asic_blank_end;
+
+ /* Interlace */
+ if (patched_crtc_timing.flags.INTERLACE == 1) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5579-drm-amd-display-move-pplib-smu-notification-to-dccg-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5579-drm-amd-display-move-pplib-smu-notification-to-dccg-.patch
new file mode 100644
index 00000000..2d7476b1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5579-drm-amd-display-move-pplib-smu-notification-to-dccg-.patch
@@ -0,0 +1,1973 @@
+From 96393f2207edd26b723a7e78d473178673577ef9 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Thu, 13 Sep 2018 17:42:14 -0400
+Subject: [PATCH 5579/5725] drm/amd/display: move pplib/smu notification to
+ dccg block
+
+This is done to clear up the clock programming sequence
+since the only time we need to notify pplib is after
+clock update.
+
+This also renames the clk block to dccg, at the moment
+this block contains both clock management and dccg
+functionality.
+
+Change-Id: I2cb2ac583a5fac038618ee75608c48a76b343ce7
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 8 -
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 21 +-
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/dc.h | 5 -
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 661 +++++++++++++--------
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h | 6 +-
+ .../amd/display/dc/dce100/dce100_hw_sequencer.c | 60 +-
+ .../drm/amd/display/dc/dce100/dce100_resource.c | 4 +-
+ .../amd/display/dc/dce110/dce110_hw_sequencer.c | 212 +------
+ .../amd/display/dc/dce110/dce110_hw_sequencer.h | 10 +-
+ .../drm/amd/display/dc/dce110/dce110_resource.c | 8 +-
+ .../drm/amd/display/dc/dce112/dce112_resource.c | 14 +-
+ .../drm/amd/display/dc/dce120/dce120_resource.c | 9 +-
+ .../gpu/drm/amd/display/dc/dce80/dce80_resource.c | 4 +-
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 51 +-
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/inc/core_types.h | 2 +-
+ .../gpu/drm/amd/display/dc/inc/hw/display_clock.h | 13 +-
+ drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 5 -
+ drivers/gpu/drm/amd/display/dc/inc/resource.h | 3 -
+ 20 files changed, 476 insertions(+), 624 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index fc77cbf..289d12f 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -983,8 +983,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
+ }
+
+ /* Program hardware */
+- dc->hwss.ready_shared_resources(dc, context);
+-
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
+@@ -1046,8 +1044,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
+
+ dc_retain_state(dc->current_state);
+
+- dc->hwss.optimize_shared_resources(dc);
+-
+ return result;
+ }
+
+@@ -1474,12 +1470,8 @@ static void commit_planes_do_stream_update(struct dc *dc,
+ if (stream_update->dpms_off) {
+ if (*stream_update->dpms_off) {
+ core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE);
+- dc->hwss.pplib_apply_display_requirements(
+- dc, dc->current_state);
+ notify_display_count_to_smu(dc, dc->current_state);
+ } else {
+- dc->hwss.pplib_apply_display_requirements(
+- dc, dc->current_state);
+ notify_display_count_to_smu(dc, dc->current_state);
+ core_link_enable_stream(dc->current_state, pipe_ctx);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index bad65c8..e3157018 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -1357,28 +1357,13 @@ static enum dc_status enable_link_dp(
+ struct dc_link *link = stream->sink->link;
+ struct dc_link_settings link_settings = {0};
+ enum dp_panel_mode panel_mode;
+- enum dc_link_rate max_link_rate = LINK_RATE_HIGH2;
+
+ /* get link settings for video mode timing */
+ decide_link_settings(stream, &link_settings);
+
+- /* raise clock state for HBR3 if required. Confirmed with HW DCE/DPCS
+- * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
+- */
+- if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
+- max_link_rate = LINK_RATE_HIGH3;
+-
+- if (link_settings.link_rate == max_link_rate) {
+- struct dc_clocks clocks = state->bw.dcn.clk;
+-
+- /* dce/dcn compat, do not update dispclk */
+- clocks.dispclk_khz = 0;
+- /* 27mhz = 27000000hz= 27000khz */
+- clocks.phyclk_khz = link_settings.link_rate * 27000;
+-
+- state->dis_clk->funcs->update_clocks(
+- state->dis_clk, &clocks, false);
+- }
++ pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
++ link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
++ state->dccg->funcs->update_clocks(state->dccg, state, false);
+
+ dp_enable_link_phy(
+ link,
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 4942810..69bb5b0 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -2054,7 +2054,7 @@ void dc_resource_state_construct(
+ const struct dc *dc,
+ struct dc_state *dst_ctx)
+ {
+- dst_ctx->dis_clk = dc->res_pool->dccg;
++ dst_ctx->dccg = dc->res_pool->dccg;
+ }
+
+ enum dc_status dc_validate_global_state(
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 4c08a99..1e26623 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -304,11 +304,6 @@ struct dc {
+ struct hw_sequencer_funcs hwss;
+ struct dce_hwseq *hwseq;
+
+- /* temp store of dm_pp_display_configuration
+- * to compare to see if display config changed
+- */
+- struct dm_pp_display_configuration prev_display_config;
+-
+ bool optimized_required;
+
+ /* FBC compressor */
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index fb962503..e633bc0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -23,34 +23,28 @@
+ *
+ */
+
+-#include "dce_clocks.h"
+-#include "dm_services.h"
+ #include "reg_helper.h"
+-#include "fixed31_32.h"
+ #include "bios_parser_interface.h"
+ #include "dc.h"
++#include "dce_clocks.h"
+ #include "dmcu.h"
+-#ifdef CONFIG_X86
+-#include "dcn_calcs.h"
+-#endif
+ #include "core_types.h"
+-#include "dc_types.h"
+ #include "dal_asic_id.h"
+
+-#define TO_DCE_CLOCKS(clocks)\
++#define TO_DCE_DCCG(clocks)\
+ container_of(clocks, struct dce_dccg, base)
+
+ #define REG(reg) \
+- (clk_dce->regs->reg)
++ (dccg_dce->regs->reg)
+
+ #undef FN
+ #define FN(reg_name, field_name) \
+- clk_dce->clk_shift->field_name, clk_dce->clk_mask->field_name
++ dccg_dce->dccg_shift->field_name, dccg_dce->dccg_mask->field_name
+
+ #define CTX \
+- clk_dce->base.ctx
++ dccg_dce->base.ctx
+ #define DC_LOGGER \
+- clk->ctx->logger
++ dccg->ctx->logger
+
+ /* Max clock values for each state indexed by "enum clocks_state": */
+ static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
+@@ -151,12 +145,12 @@ static int dentist_get_divider_from_did(int did)
+ (should not be case with CIK) then SW should program all rates
+ generated according to average value (case as with previous ASICs)
+ */
+-static int dccg_adjust_dp_ref_freq_for_ss(struct dce_dccg *clk_dce, int dp_ref_clk_khz)
++static int dccg_adjust_dp_ref_freq_for_ss(struct dce_dccg *dccg_dce, int dp_ref_clk_khz)
+ {
+- if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
++ if (dccg_dce->ss_on_dprefclk && dccg_dce->dprefclk_ss_divider != 0) {
+ struct fixed31_32 ss_percentage = dc_fixpt_div_int(
+- dc_fixpt_from_fraction(clk_dce->dprefclk_ss_percentage,
+- clk_dce->dprefclk_ss_divider), 200);
++ dc_fixpt_from_fraction(dccg_dce->dprefclk_ss_percentage,
++ dccg_dce->dprefclk_ss_divider), 200);
+ struct fixed31_32 adj_dp_ref_clk_khz;
+
+ ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
+@@ -166,9 +160,9 @@ static int dccg_adjust_dp_ref_freq_for_ss(struct dce_dccg *clk_dce, int dp_ref_c
+ return dp_ref_clk_khz;
+ }
+
+-static int dce_get_dp_ref_freq_khz(struct dccg *clk)
++static int dce_get_dp_ref_freq_khz(struct dccg *dccg)
+ {
+- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
++ struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+ int dprefclk_wdivider;
+ int dprefclk_src_sel;
+ int dp_ref_clk_khz = 600000;
+@@ -187,76 +181,110 @@ static int dce_get_dp_ref_freq_khz(struct dccg *clk)
+
+ /* Calculate the current DFS clock, in kHz.*/
+ dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+- * clk_dce->dentist_vco_freq_khz) / target_div;
++ * dccg_dce->dentist_vco_freq_khz) / target_div;
+
+- return dccg_adjust_dp_ref_freq_for_ss(clk_dce, dp_ref_clk_khz);
++ return dccg_adjust_dp_ref_freq_for_ss(dccg_dce, dp_ref_clk_khz);
+ }
+
+-static int dce12_get_dp_ref_freq_khz(struct dccg *clk)
++static int dce12_get_dp_ref_freq_khz(struct dccg *dccg)
+ {
+- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
++ struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
++
++ return dccg_adjust_dp_ref_freq_for_ss(dccg_dce, dccg_dce->dprefclk_khz);
++}
++
++/* unit: in_khz before mode set, get pixel clock from context. ASIC register
++ * may not be programmed yet
++ */
++static uint32_t get_max_pixel_clock_for_all_paths(struct dc_state *context)
++{
++ uint32_t max_pix_clk = 0;
++ int i;
++
++ for (i = 0; i < MAX_PIPES; i++) {
++ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
++
++ if (pipe_ctx->stream == NULL)
++ continue;
++
++ /* do not check under lay */
++ if (pipe_ctx->top_pipe)
++ continue;
+
+- return dccg_adjust_dp_ref_freq_for_ss(clk_dce, clk_dce->dprefclk_khz);
++ if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
++ max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
++
++ /* raise clock state for HBR3/2 if required. Confirmed with HW DCE/DPCS
++ * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
++ */
++ if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
++ pipe_ctx->stream_res.pix_clk_params.requested_sym_clk > max_pix_clk)
++ max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_sym_clk;
++ }
++
++ return max_pix_clk;
+ }
+
+ static enum dm_pp_clocks_state dce_get_required_clocks_state(
+- struct dccg *clk,
+- struct dc_clocks *req_clocks)
++ struct dccg *dccg,
++ struct dc_state *context)
+ {
+- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
++ struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+ int i;
+ enum dm_pp_clocks_state low_req_clk;
++ int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
+
+ /* Iterate from highest supported to lowest valid state, and update
+ * lowest RequiredState with the lowest state that satisfies
+ * all required clocks
+ */
+- for (i = clk->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
+- if (req_clocks->dispclk_khz >
+- clk_dce->max_clks_by_state[i].display_clk_khz
+- || req_clocks->phyclk_khz >
+- clk_dce->max_clks_by_state[i].pixel_clk_khz)
++ for (i = dccg->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
++ if (context->bw.dce.dispclk_khz >
++ dccg_dce->max_clks_by_state[i].display_clk_khz
++ || max_pix_clk >
++ dccg_dce->max_clks_by_state[i].pixel_clk_khz)
+ break;
+
+ low_req_clk = i + 1;
+- if (low_req_clk > clk->max_clks_state) {
++ if (low_req_clk > dccg->max_clks_state) {
+ /* set max clock state for high phyclock, invalid on exceeding display clock */
+- if (clk_dce->max_clks_by_state[clk->max_clks_state].display_clk_khz
+- < req_clocks->dispclk_khz)
++ if (dccg_dce->max_clks_by_state[dccg->max_clks_state].display_clk_khz
++ < context->bw.dce.dispclk_khz)
+ low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
+ else
+- low_req_clk = clk->max_clks_state;
++ low_req_clk = dccg->max_clks_state;
+ }
+
+ return low_req_clk;
+ }
+
+ static int dce_set_clock(
+- struct dccg *clk,
++ struct dccg *dccg,
+ int requested_clk_khz)
+ {
+- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
++ struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+ struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
+- struct dc_bios *bp = clk->ctx->dc_bios;
++ struct dc_bios *bp = dccg->ctx->dc_bios;
+ int actual_clock = requested_clk_khz;
++ struct dmcu *dmcu = dccg_dce->base.ctx->dc->res_pool->dmcu;
+
+ /* Make sure requested clock isn't lower than minimum threshold*/
+ if (requested_clk_khz > 0)
+ requested_clk_khz = max(requested_clk_khz,
+- clk_dce->dentist_vco_freq_khz / 64);
++ dccg_dce->dentist_vco_freq_khz / 64);
+
+ /* Prepare to program display clock*/
+ pxl_clk_params.target_pixel_clock = requested_clk_khz;
+ pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+
+- if (clk_dce->dfs_bypass_active)
++ if (dccg_dce->dfs_bypass_active)
+ pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
+
+ bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
+
+- if (clk_dce->dfs_bypass_active) {
++ if (dccg_dce->dfs_bypass_active) {
+ /* Cache the fixed display clock*/
+- clk_dce->dfs_bypass_disp_clk =
++ dccg_dce->dfs_bypass_disp_clk =
+ pxl_clk_params.dfs_bypass_display_clock;
+ actual_clock = pxl_clk_params.dfs_bypass_display_clock;
+ }
+@@ -264,34 +292,21 @@ static int dce_set_clock(
+ /* from power down, we need mark the clock state as ClocksStateNominal
+ * from HWReset, so when resume we will call pplib voltage regulator.*/
+ if (requested_clk_khz == 0)
+- clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+- return actual_clock;
+-}
++ dccg->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+
+-static int dce_psr_set_clock(
+- struct dccg *clk,
+- int requested_clk_khz)
+-{
+- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
+- struct dc_context *ctx = clk_dce->base.ctx;
+- struct dc *core_dc = ctx->dc;
+- struct dmcu *dmcu = core_dc->res_pool->dmcu;
+- int actual_clk_khz = requested_clk_khz;
+-
+- actual_clk_khz = dce_set_clock(clk, requested_clk_khz);
++ dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7);
+
+- dmcu->funcs->set_psr_wait_loop(dmcu, actual_clk_khz / 1000 / 7);
+- return actual_clk_khz;
++ return actual_clock;
+ }
+
+ static int dce112_set_clock(
+- struct dccg *clk,
++ struct dccg *dccg,
+ int requested_clk_khz)
+ {
+- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
++ struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+ struct bp_set_dce_clock_parameters dce_clk_params;
+- struct dc_bios *bp = clk->ctx->dc_bios;
+- struct dc *core_dc = clk->ctx->dc;
++ struct dc_bios *bp = dccg->ctx->dc_bios;
++ struct dc *core_dc = dccg->ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ int actual_clock = requested_clk_khz;
+ /* Prepare to program display clock*/
+@@ -300,7 +315,7 @@ static int dce112_set_clock(
+ /* Make sure requested clock isn't lower than minimum threshold*/
+ if (requested_clk_khz > 0)
+ requested_clk_khz = max(requested_clk_khz,
+- clk_dce->dentist_vco_freq_khz / 62);
++ dccg_dce->dentist_vco_freq_khz / 62);
+
+ dce_clk_params.target_clock_frequency = requested_clk_khz;
+ dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+@@ -312,13 +327,13 @@ static int dce112_set_clock(
+ /* from power down, we need mark the clock state as ClocksStateNominal
+ * from HWReset, so when resume we will call pplib voltage regulator.*/
+ if (requested_clk_khz == 0)
+- clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
++ dccg->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+
+ /*Program DP ref Clock*/
+ /*VBIOS will determine DPREFCLK frequency, so we don't set it*/
+ dce_clk_params.target_clock_frequency = 0;
+ dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
+- if (!ASICREV_IS_VEGA20_P(clk->ctx->asic_id.hw_internal_rev))
++ if (!ASICREV_IS_VEGA20_P(dccg->ctx->asic_id.hw_internal_rev))
+ dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
+ (dce_clk_params.pll_id ==
+ CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
+@@ -328,19 +343,19 @@ static int dce112_set_clock(
+ bp->funcs->set_dce_clock(bp, &dce_clk_params);
+
+ if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+- if (clk_dce->dfs_bypass_disp_clk != actual_clock)
++ if (dccg_dce->dfs_bypass_disp_clk != actual_clock)
+ dmcu->funcs->set_psr_wait_loop(dmcu,
+ actual_clock / 1000 / 7);
+ }
+
+- clk_dce->dfs_bypass_disp_clk = actual_clock;
++ dccg_dce->dfs_bypass_disp_clk = actual_clock;
+ return actual_clock;
+ }
+
+-static void dce_clock_read_integrated_info(struct dce_dccg *clk_dce)
++static void dce_clock_read_integrated_info(struct dce_dccg *dccg_dce)
+ {
+- struct dc_debug_options *debug = &clk_dce->base.ctx->dc->debug;
+- struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
++ struct dc_debug_options *debug = &dccg_dce->base.ctx->dc->debug;
++ struct dc_bios *bp = dccg_dce->base.ctx->dc_bios;
+ struct integrated_info info = { { { 0 } } };
+ struct dc_firmware_info fw_info = { { 0 } };
+ int i;
+@@ -348,13 +363,13 @@ static void dce_clock_read_integrated_info(struct dce_dccg *clk_dce)
+ if (bp->integrated_info)
+ info = *bp->integrated_info;
+
+- clk_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
+- if (clk_dce->dentist_vco_freq_khz == 0) {
++ dccg_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
++ if (dccg_dce->dentist_vco_freq_khz == 0) {
+ bp->funcs->get_firmware_info(bp, &fw_info);
+- clk_dce->dentist_vco_freq_khz =
++ dccg_dce->dentist_vco_freq_khz =
+ fw_info.smu_gpu_pll_output_freq;
+- if (clk_dce->dentist_vco_freq_khz == 0)
+- clk_dce->dentist_vco_freq_khz = 3600000;
++ if (dccg_dce->dentist_vco_freq_khz == 0)
++ dccg_dce->dentist_vco_freq_khz = 3600000;
+ }
+
+ /*update the maximum display clock for each power state*/
+@@ -386,18 +401,18 @@ static void dce_clock_read_integrated_info(struct dce_dccg *clk_dce)
+ /*Do not allow bad VBIOS/SBIOS to override with invalid values,
+ * check for > 100MHz*/
+ if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
+- clk_dce->max_clks_by_state[clk_state].display_clk_khz =
++ dccg_dce->max_clks_by_state[clk_state].display_clk_khz =
+ info.disp_clk_voltage[i].max_supported_clk;
+ }
+
+ if (!debug->disable_dfs_bypass && bp->integrated_info)
+ if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+- clk_dce->dfs_bypass_enabled = true;
++ dccg_dce->dfs_bypass_enabled = true;
+ }
+
+-static void dce_clock_read_ss_info(struct dce_dccg *clk_dce)
++static void dce_clock_read_ss_info(struct dce_dccg *dccg_dce)
+ {
+- struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
++ struct dc_bios *bp = dccg_dce->base.ctx->dc_bios;
+ int ss_info_num = bp->funcs->get_ss_entry_number(
+ bp, AS_SIGNAL_TYPE_GPU_PLL);
+
+@@ -413,14 +428,14 @@ static void dce_clock_read_ss_info(struct dce_dccg *clk_dce)
+ */
+ if (result == BP_RESULT_OK &&
+ info.spread_spectrum_percentage != 0) {
+- clk_dce->ss_on_dprefclk = true;
+- clk_dce->dprefclk_ss_divider = info.spread_percentage_divider;
++ dccg_dce->ss_on_dprefclk = true;
++ dccg_dce->dprefclk_ss_divider = info.spread_percentage_divider;
+
+ if (info.type.CENTER_MODE == 0) {
+ /* TODO: Currently for DP Reference clock we
+ * need only SS percentage for
+ * downspread */
+- clk_dce->dprefclk_ss_percentage =
++ dccg_dce->dprefclk_ss_percentage =
+ info.spread_spectrum_percentage;
+ }
+
+@@ -437,14 +452,14 @@ static void dce_clock_read_ss_info(struct dce_dccg *clk_dce)
+ */
+ if (result == BP_RESULT_OK &&
+ info.spread_spectrum_percentage != 0) {
+- clk_dce->ss_on_dprefclk = true;
+- clk_dce->dprefclk_ss_divider = info.spread_percentage_divider;
++ dccg_dce->ss_on_dprefclk = true;
++ dccg_dce->dprefclk_ss_divider = info.spread_percentage_divider;
+
+ if (info.type.CENTER_MODE == 0) {
+ /* Currently for DP Reference clock we
+ * need only SS percentage for
+ * downspread */
+- clk_dce->dprefclk_ss_percentage =
++ dccg_dce->dprefclk_ss_percentage =
+ info.spread_spectrum_percentage;
+ }
+ }
+@@ -456,31 +471,189 @@ static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_cl
+ return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
+ }
+
+-static void dce12_update_clocks(struct dccg *dccg,
+- struct dc_clocks *new_clocks,
+- bool safe_to_lower)
++static void dce110_fill_display_configs(
++ const struct dc_state *context,
++ struct dm_pp_display_configuration *pp_display_cfg)
+ {
+- struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
++ int j;
++ int num_cfgs = 0;
+
+- /* TODO: Investigate why this is needed to fix display corruption. */
+- new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100;
++ for (j = 0; j < context->stream_count; j++) {
++ int k;
+
+- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
+- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
+- clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
+- new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
+- dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
++ const struct dc_stream_state *stream = context->streams[j];
++ struct dm_pp_single_disp_config *cfg =
++ &pp_display_cfg->disp_configs[num_cfgs];
++ const struct pipe_ctx *pipe_ctx = NULL;
+
+- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
++ for (k = 0; k < MAX_PIPES; k++)
++ if (stream == context->res_ctx.pipe_ctx[k].stream) {
++ pipe_ctx = &context->res_ctx.pipe_ctx[k];
++ break;
++ }
++
++ ASSERT(pipe_ctx != NULL);
++
++ /* only notify active stream */
++ if (stream->dpms_off)
++ continue;
++
++ num_cfgs++;
++ cfg->signal = pipe_ctx->stream->signal;
++ cfg->pipe_idx = pipe_ctx->stream_res.tg->inst;
++ cfg->src_height = stream->src.height;
++ cfg->src_width = stream->src.width;
++ cfg->ddi_channel_mapping =
++ stream->sink->link->ddi_channel_mapping.raw;
++ cfg->transmitter =
++ stream->sink->link->link_enc->transmitter;
++ cfg->link_settings.lane_count =
++ stream->sink->link->cur_link_settings.lane_count;
++ cfg->link_settings.link_rate =
++ stream->sink->link->cur_link_settings.link_rate;
++ cfg->link_settings.link_spread =
++ stream->sink->link->cur_link_settings.link_spread;
++ cfg->sym_clock = stream->phy_pix_clk;
++ /* Round v_refresh*/
++ cfg->v_refresh = stream->timing.pix_clk_khz * 1000;
++ cfg->v_refresh /= stream->timing.h_total;
++ cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
++ / stream->timing.v_total;
+ }
+
+- if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
+- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
+- clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz;
+- dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
++ pp_display_cfg->display_count = num_cfgs;
++}
+
+- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
++static uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
++{
++ uint8_t j;
++ uint32_t min_vertical_blank_time = -1;
++
++ for (j = 0; j < context->stream_count; j++) {
++ struct dc_stream_state *stream = context->streams[j];
++ uint32_t vertical_blank_in_pixels = 0;
++ uint32_t vertical_blank_time = 0;
++
++ vertical_blank_in_pixels = stream->timing.h_total *
++ (stream->timing.v_total
++ - stream->timing.v_addressable);
++
++ vertical_blank_time = vertical_blank_in_pixels
++ * 1000 / stream->timing.pix_clk_khz;
++
++ if (min_vertical_blank_time > vertical_blank_time)
++ min_vertical_blank_time = vertical_blank_time;
++ }
++
++ return min_vertical_blank_time;
++}
++
++static int determine_sclk_from_bounding_box(
++ const struct dc *dc,
++ int required_sclk)
++{
++ int i;
++
++ /*
++ * Some asics do not give us sclk levels, so we just report the actual
++ * required sclk
++ */
++ if (dc->sclk_lvls.num_levels == 0)
++ return required_sclk;
++
++ for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
++ if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
++ return dc->sclk_lvls.clocks_in_khz[i];
++ }
++ /*
++ * even maximum level could not satisfy requirement, this
++ * is unexpected at this stage, should have been caught at
++ * validation time
++ */
++ ASSERT(0);
++ return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
++}
++
++static void dce_pplib_apply_display_requirements(
++ struct dc *dc,
++ struct dc_state *context)
++{
++ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
++
++ pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
++
++ dce110_fill_display_configs(context, pp_display_cfg);
++
++ if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
++ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
++}
++
++static void dce11_pplib_apply_display_requirements(
++ struct dc *dc,
++ struct dc_state *context)
++{
++ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
++
++ pp_display_cfg->all_displays_in_sync =
++ context->bw.dce.all_displays_in_sync;
++ pp_display_cfg->nb_pstate_switch_disable =
++ context->bw.dce.nbp_state_change_enable == false;
++ pp_display_cfg->cpu_cc6_disable =
++ context->bw.dce.cpuc_state_change_enable == false;
++ pp_display_cfg->cpu_pstate_disable =
++ context->bw.dce.cpup_state_change_enable == false;
++ pp_display_cfg->cpu_pstate_separation_time =
++ context->bw.dce.blackout_recovery_time_us;
++
++ pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
++ / MEMORY_TYPE_MULTIPLIER_CZ;
++
++ pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
++ dc,
++ context->bw.dce.sclk_khz);
++
++ pp_display_cfg->min_engine_clock_deep_sleep_khz
++ = context->bw.dce.sclk_deep_sleep_khz;
++
++ pp_display_cfg->avail_mclk_switch_time_us =
++ dce110_get_min_vblank_time_us(context);
++ /* TODO: dce11.2*/
++ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
++
++ pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
++
++ dce110_fill_display_configs(context, pp_display_cfg);
++
++ /* TODO: is this still applicable?*/
++ if (pp_display_cfg->display_count == 1) {
++ const struct dc_crtc_timing *timing =
++ &context->streams[0]->timing;
++
++ pp_display_cfg->crtc_index =
++ pp_display_cfg->disp_configs[0].pipe_idx;
++ pp_display_cfg->line_time_in_us = timing->h_total * 1000 / timing->pix_clk_khz;
+ }
++
++ if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
++ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
++}
++
++static void dcn1_pplib_apply_display_requirements(
++ struct dc *dc,
++ struct dc_state *context)
++{
++ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
++
++ pp_display_cfg->min_engine_clock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
++ pp_display_cfg->min_memory_clock_khz = dc->res_pool->dccg->clks.fclk_khz;
++ pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
++ pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
++ pp_display_cfg->min_dcfclock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
++ pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
++ dce110_fill_display_configs(context, pp_display_cfg);
++
++ if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
++ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+ }
+
+ #ifdef CONFIG_X86
+@@ -538,7 +711,7 @@ static void dcn1_ramp_up_dispclk_with_dpp(struct dccg *dccg, struct dc_clocks *n
+ int i;
+
+ /* set disp clk to dpp clk threshold */
+- dccg->funcs->set_dispclk(dccg, dispclk_to_dpp_threshold);
++ dce112_set_clock(dccg, dispclk_to_dpp_threshold);
+
+ /* update request dpp clk division option */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+@@ -555,7 +728,7 @@ static void dcn1_ramp_up_dispclk_with_dpp(struct dccg *dccg, struct dc_clocks *n
+
+ /* If target clk not same as dppclk threshold, set to target clock */
+ if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz)
+- dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
++ dce112_set_clock(dccg, new_clocks->dispclk_khz);
+
+ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+ dccg->clks.dppclk_khz = new_clocks->dppclk_khz;
+@@ -563,10 +736,11 @@ static void dcn1_ramp_up_dispclk_with_dpp(struct dccg *dccg, struct dc_clocks *n
+ }
+
+ static void dcn1_update_clocks(struct dccg *dccg,
+- struct dc_clocks *new_clocks,
++ struct dc_state *context,
+ bool safe_to_lower)
+ {
+ struct dc *dc = dccg->ctx->dc;
++ struct dc_clocks *new_clocks = &context->bw.dcn.clk;
+ struct pp_smu_display_requirement_rv *smu_req_cur =
+ &dc->res_pool->pp_smu_req;
+ struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
+@@ -627,6 +801,7 @@ static void dcn1_update_clocks(struct dccg *dccg,
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ if (pp_smu->set_display_requirement)
+ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
++ dcn1_pplib_apply_display_requirements(dc, context);
+ }
+
+ /* dcn1 dppclk is tied to dispclk */
+@@ -646,6 +821,7 @@ static void dcn1_update_clocks(struct dccg *dccg,
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ if (pp_smu->set_display_requirement)
+ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
++ dcn1_pplib_apply_display_requirements(dc, context);
+ }
+
+
+@@ -654,17 +830,18 @@ static void dcn1_update_clocks(struct dccg *dccg,
+ #endif
+
+ static void dce_update_clocks(struct dccg *dccg,
+- struct dc_clocks *new_clocks,
++ struct dc_state *context,
+ bool safe_to_lower)
+ {
+ struct dm_pp_power_level_change_request level_change_req;
+- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(dccg);
++ int unpatched_disp_clk = context->bw.dce.dispclk_khz;
++ struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+
+- /* TODO: Investigate why this is needed to fix display corruption. */
+- if (!clk_dce->dfs_bypass_active)
+- new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100;
++ /*TODO: W/A for dal3 linux, investigate why this works */
++ if (!dccg_dce->dfs_bypass_active)
++ context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+
+- level_change_req.power_level = dce_get_required_clocks_state(dccg, new_clocks);
++ level_change_req.power_level = dce_get_required_clocks_state(dccg, context);
+ /* get max clock state from PPLIB */
+ if ((level_change_req.power_level < dccg->cur_min_clks_state && safe_to_lower)
+ || level_change_req.power_level > dccg->cur_min_clks_state) {
+@@ -672,127 +849,143 @@ static void dce_update_clocks(struct dccg *dccg,
+ dccg->cur_min_clks_state = level_change_req.power_level;
+ }
+
+- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
+- new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
+- dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
++ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, dccg->clks.dispclk_khz)) {
++ context->bw.dce.dispclk_khz = dce_set_clock(dccg, context->bw.dce.dispclk_khz);
++ dccg->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+ }
++ dce_pplib_apply_display_requirements(dccg->ctx->dc, context);
++
++ context->bw.dce.dispclk_khz = unpatched_disp_clk;
+ }
+
+-static bool dce_update_dfs_bypass(
+- struct dccg *dccg,
+- struct dc *dc,
+- struct dc_state *context,
+- int requested_clock_khz)
++static void dce11_update_clocks(struct dccg *dccg,
++ struct dc_state *context,
++ bool safe_to_lower)
+ {
+- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(dccg);
+- struct resource_context *res_ctx = &context->res_ctx;
+- enum signal_type signal_type = SIGNAL_TYPE_NONE;
+- bool was_active = clk_dce->dfs_bypass_active;
+- int i;
+-
+- /* Disable DFS bypass by default. */
+- clk_dce->dfs_bypass_active = false;
+-
+- /* Check that DFS bypass is available. */
+- if (!clk_dce->dfs_bypass_enabled)
+- goto update;
+-
+- /* Check if the requested display clock is below the threshold. */
+- if (requested_clock_khz >= 400000)
+- goto update;
+-
+- /* DFS-bypass should only be enabled on single stream setups */
+- if (context->stream_count != 1)
+- goto update;
+-
+- /* Check that the stream's signal type is an embedded panel */
+- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+- if (res_ctx->pipe_ctx[i].stream) {
+- struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+-
+- signal_type = pipe_ctx->stream->sink->link->connector_signal;
+- break;
+- }
+- }
+-
+- if (signal_type == SIGNAL_TYPE_EDP ||
+- signal_type == SIGNAL_TYPE_LVDS)
+- clk_dce->dfs_bypass_active = true;
+-
+-update:
+- /* Update the clock state. We don't need to respect safe_to_lower
+- * because DFS bypass should always be greater than the current
+- * display clock frequency.
+- */
+- if (was_active != clk_dce->dfs_bypass_active) {
+- dccg->clks.dispclk_khz =
+- dccg->funcs->set_dispclk(dccg, dccg->clks.dispclk_khz);
+- return true;
+- }
+-
+- return false;
++ struct dm_pp_power_level_change_request level_change_req;
++
++ level_change_req.power_level = dce_get_required_clocks_state(dccg, context);
++ /* get max clock state from PPLIB */
++ if ((level_change_req.power_level < dccg->cur_min_clks_state && safe_to_lower)
++ || level_change_req.power_level > dccg->cur_min_clks_state) {
++ if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
++ dccg->cur_min_clks_state = level_change_req.power_level;
++ }
++
++ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, dccg->clks.dispclk_khz)) {
++ context->bw.dce.dispclk_khz = dce_set_clock(dccg, context->bw.dce.dispclk_khz);
++ dccg->clks.dispclk_khz = context->bw.dce.dispclk_khz;
++ }
++ dce11_pplib_apply_display_requirements(dccg->ctx->dc, context);
+ }
+
+-#ifdef CONFIG_X86
+-static const struct display_clock_funcs dcn1_funcs = {
++static void dce112_update_clocks(struct dccg *dccg,
++ struct dc_state *context,
++ bool safe_to_lower)
++{
++ struct dm_pp_power_level_change_request level_change_req;
++
++ level_change_req.power_level = dce_get_required_clocks_state(dccg, context);
++ /* get max clock state from PPLIB */
++ if ((level_change_req.power_level < dccg->cur_min_clks_state && safe_to_lower)
++ || level_change_req.power_level > dccg->cur_min_clks_state) {
++ if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
++ dccg->cur_min_clks_state = level_change_req.power_level;
++ }
++
++ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, dccg->clks.dispclk_khz)) {
++ context->bw.dce.dispclk_khz = dce112_set_clock(dccg, context->bw.dce.dispclk_khz);
++ dccg->clks.dispclk_khz = context->bw.dce.dispclk_khz;
++ }
++ dce11_pplib_apply_display_requirements(dccg->ctx->dc, context);
++}
++
++static void dce12_update_clocks(struct dccg *dccg,
++ struct dc_state *context,
++ bool safe_to_lower)
++{
++ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
++ int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
++ int unpatched_disp_clk = context->bw.dce.dispclk_khz;
++
++ /* W/A for dal3 linux */
++ context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
++
++ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, dccg->clks.dispclk_khz)) {
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
++ clock_voltage_req.clocks_in_khz = context->bw.dce.dispclk_khz;
++ context->bw.dce.dispclk_khz = dce112_set_clock(dccg, context->bw.dce.dispclk_khz);
++ dccg->clks.dispclk_khz = context->bw.dce.dispclk_khz;
++
++ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
++ }
++
++ if (should_set_clock(safe_to_lower, max_pix_clk, dccg->clks.phyclk_khz)) {
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
++ clock_voltage_req.clocks_in_khz = max_pix_clk;
++ dccg->clks.phyclk_khz = max_pix_clk;
++
++ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
++ }
++ dce11_pplib_apply_display_requirements(dccg->ctx->dc, context);
++
++ context->bw.dce.dispclk_khz = unpatched_disp_clk;
++}
++
++#ifdef CONFIG_DRM_AMD_DC_DCN1_0
++static const struct dccg_funcs dcn1_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+- .set_dispclk = dce112_set_clock,
+ .update_clocks = dcn1_update_clocks
+ };
+ #endif
+
+-static const struct display_clock_funcs dce120_funcs = {
++static const struct dccg_funcs dce120_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+- .set_dispclk = dce112_set_clock,
+ .update_clocks = dce12_update_clocks
+ };
+
+-static const struct display_clock_funcs dce112_funcs = {
++static const struct dccg_funcs dce112_funcs = {
+ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+- .set_dispclk = dce112_set_clock,
+- .update_clocks = dce_update_clocks
++ .update_clocks = dce112_update_clocks
+ };
+
+-static const struct display_clock_funcs dce110_funcs = {
++static const struct dccg_funcs dce110_funcs = {
+ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+- .set_dispclk = dce_psr_set_clock,
+- .update_clocks = dce_update_clocks,
+- .update_dfs_bypass = dce_update_dfs_bypass
++ .update_clocks = dce11_update_clocks,
+ };
+
+-static const struct display_clock_funcs dce_funcs = {
++static const struct dccg_funcs dce_funcs = {
+ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+- .set_dispclk = dce_set_clock,
+ .update_clocks = dce_update_clocks
+ };
+
+ static void dce_dccg_construct(
+- struct dce_dccg *clk_dce,
++ struct dce_dccg *dccg_dce,
+ struct dc_context *ctx,
+ const struct dccg_registers *regs,
+ const struct dccg_shift *clk_shift,
+ const struct dccg_mask *clk_mask)
+ {
+- struct dccg *base = &clk_dce->base;
++ struct dccg *base = &dccg_dce->base;
+
+ base->ctx = ctx;
+ base->funcs = &dce_funcs;
+
+- clk_dce->regs = regs;
+- clk_dce->clk_shift = clk_shift;
+- clk_dce->clk_mask = clk_mask;
++ dccg_dce->regs = regs;
++ dccg_dce->dccg_shift = clk_shift;
++ dccg_dce->dccg_mask = clk_mask;
+
+- clk_dce->dfs_bypass_disp_clk = 0;
++ dccg_dce->dfs_bypass_disp_clk = 0;
+
+- clk_dce->dprefclk_ss_percentage = 0;
+- clk_dce->dprefclk_ss_divider = 1000;
+- clk_dce->ss_on_dprefclk = false;
++ dccg_dce->dprefclk_ss_percentage = 0;
++ dccg_dce->dprefclk_ss_divider = 1000;
++ dccg_dce->ss_on_dprefclk = false;
+
+ base->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+ base->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
+
+- dce_clock_read_integrated_info(clk_dce);
+- dce_clock_read_ss_info(clk_dce);
++ dce_clock_read_integrated_info(dccg_dce);
++ dce_clock_read_ss_info(dccg_dce);
+ }
+
+ struct dccg *dce_dccg_create(
+@@ -801,21 +994,21 @@ struct dccg *dce_dccg_create(
+ const struct dccg_shift *clk_shift,
+ const struct dccg_mask *clk_mask)
+ {
+- struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
++ struct dce_dccg *dccg_dce = kzalloc(sizeof(*dccg_dce), GFP_KERNEL);
+
+- if (clk_dce == NULL) {
++ if (dccg_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+- memcpy(clk_dce->max_clks_by_state,
++ memcpy(dccg_dce->max_clks_by_state,
+ dce80_max_clks_by_state,
+ sizeof(dce80_max_clks_by_state));
+
+ dce_dccg_construct(
+- clk_dce, ctx, regs, clk_shift, clk_mask);
++ dccg_dce, ctx, regs, clk_shift, clk_mask);
+
+- return &clk_dce->base;
++ return &dccg_dce->base;
+ }
+
+ struct dccg *dce110_dccg_create(
+@@ -824,23 +1017,23 @@ struct dccg *dce110_dccg_create(
+ const struct dccg_shift *clk_shift,
+ const struct dccg_mask *clk_mask)
+ {
+- struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
++ struct dce_dccg *dccg_dce = kzalloc(sizeof(*dccg_dce), GFP_KERNEL);
+
+- if (clk_dce == NULL) {
++ if (dccg_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+- memcpy(clk_dce->max_clks_by_state,
++ memcpy(dccg_dce->max_clks_by_state,
+ dce110_max_clks_by_state,
+ sizeof(dce110_max_clks_by_state));
+
+ dce_dccg_construct(
+- clk_dce, ctx, regs, clk_shift, clk_mask);
++ dccg_dce, ctx, regs, clk_shift, clk_mask);
+
+- clk_dce->base.funcs = &dce110_funcs;
++ dccg_dce->base.funcs = &dce110_funcs;
+
+- return &clk_dce->base;
++ return &dccg_dce->base;
+ }
+
+ struct dccg *dce112_dccg_create(
+@@ -849,45 +1042,45 @@ struct dccg *dce112_dccg_create(
+ const struct dccg_shift *clk_shift,
+ const struct dccg_mask *clk_mask)
+ {
+- struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
++ struct dce_dccg *dccg_dce = kzalloc(sizeof(*dccg_dce), GFP_KERNEL);
+
+- if (clk_dce == NULL) {
++ if (dccg_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+- memcpy(clk_dce->max_clks_by_state,
++ memcpy(dccg_dce->max_clks_by_state,
+ dce112_max_clks_by_state,
+ sizeof(dce112_max_clks_by_state));
+
+ dce_dccg_construct(
+- clk_dce, ctx, regs, clk_shift, clk_mask);
++ dccg_dce, ctx, regs, clk_shift, clk_mask);
+
+- clk_dce->base.funcs = &dce112_funcs;
++ dccg_dce->base.funcs = &dce112_funcs;
+
+- return &clk_dce->base;
++ return &dccg_dce->base;
+ }
+
+ struct dccg *dce120_dccg_create(struct dc_context *ctx)
+ {
+- struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
++ struct dce_dccg *dccg_dce = kzalloc(sizeof(*dccg_dce), GFP_KERNEL);
+
+- if (clk_dce == NULL) {
++ if (dccg_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+- memcpy(clk_dce->max_clks_by_state,
++ memcpy(dccg_dce->max_clks_by_state,
+ dce120_max_clks_by_state,
+ sizeof(dce120_max_clks_by_state));
+
+ dce_dccg_construct(
+- clk_dce, ctx, NULL, NULL, NULL);
++ dccg_dce, ctx, NULL, NULL, NULL);
+
+- clk_dce->dprefclk_khz = 600000;
+- clk_dce->base.funcs = &dce120_funcs;
++ dccg_dce->dprefclk_khz = 600000;
++ dccg_dce->base.funcs = &dce120_funcs;
+
+- return &clk_dce->base;
++ return &dccg_dce->base;
+ }
+
+ #ifdef CONFIG_X86
+@@ -896,46 +1089,46 @@ struct dccg *dcn1_dccg_create(struct dc_context *ctx)
+ struct dc_debug_options *debug = &ctx->dc->debug;
+ struct dc_bios *bp = ctx->dc_bios;
+ struct dc_firmware_info fw_info = { { 0 } };
+- struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
++ struct dce_dccg *dccg_dce = kzalloc(sizeof(*dccg_dce), GFP_KERNEL);
+
+- if (clk_dce == NULL) {
++ if (dccg_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+- clk_dce->base.ctx = ctx;
+- clk_dce->base.funcs = &dcn1_funcs;
++ dccg_dce->base.ctx = ctx;
++ dccg_dce->base.funcs = &dcn1_funcs;
+
+- clk_dce->dfs_bypass_disp_clk = 0;
++ dccg_dce->dfs_bypass_disp_clk = 0;
+
+- clk_dce->dprefclk_ss_percentage = 0;
+- clk_dce->dprefclk_ss_divider = 1000;
+- clk_dce->ss_on_dprefclk = false;
++ dccg_dce->dprefclk_ss_percentage = 0;
++ dccg_dce->dprefclk_ss_divider = 1000;
++ dccg_dce->ss_on_dprefclk = false;
+
+- clk_dce->dprefclk_khz = 600000;
++ dccg_dce->dprefclk_khz = 600000;
+ if (bp->integrated_info)
+- clk_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
+- if (clk_dce->dentist_vco_freq_khz == 0) {
++ dccg_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
++ if (dccg_dce->dentist_vco_freq_khz == 0) {
+ bp->funcs->get_firmware_info(bp, &fw_info);
+- clk_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
+- if (clk_dce->dentist_vco_freq_khz == 0)
+- clk_dce->dentist_vco_freq_khz = 3600000;
++ dccg_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
++ if (dccg_dce->dentist_vco_freq_khz == 0)
++ dccg_dce->dentist_vco_freq_khz = 3600000;
+ }
+
+ if (!debug->disable_dfs_bypass && bp->integrated_info)
+ if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+- clk_dce->dfs_bypass_enabled = true;
++ dccg_dce->dfs_bypass_enabled = true;
+
+- dce_clock_read_ss_info(clk_dce);
++ dce_clock_read_ss_info(dccg_dce);
+
+- return &clk_dce->base;
++ return &dccg_dce->base;
+ }
+ #endif
+
+ void dce_dccg_destroy(struct dccg **dccg)
+ {
+- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(*dccg);
++ struct dce_dccg *dccg_dce = TO_DCE_DCCG(*dccg);
+
+- kfree(clk_dce);
++ kfree(dccg_dce);
+ *dccg = NULL;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+index 9179173..104145f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+@@ -29,6 +29,8 @@
+
+ #include "display_clock.h"
+
++#define MEMORY_TYPE_MULTIPLIER_CZ 4
++
+ #define CLK_COMMON_REG_LIST_DCE_BASE() \
+ .DPREFCLK_CNTL = mmDPREFCLK_CNTL, \
+ .DENTIST_DISPCLK_CNTL = mmDENTIST_DISPCLK_CNTL
+@@ -69,8 +71,8 @@ struct dccg_registers {
+ struct dce_dccg {
+ struct dccg base;
+ const struct dccg_registers *regs;
+- const struct dccg_shift *clk_shift;
+- const struct dccg_mask *clk_mask;
++ const struct dccg_shift *dccg_shift;
++ const struct dccg_mask *dccg_mask;
+
+ struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+index 74c05e8..2725eac 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+@@ -105,74 +105,24 @@ bool dce100_enable_display_power_gating(
+ return false;
+ }
+
+-static void dce100_pplib_apply_display_requirements(
+- struct dc *dc,
+- struct dc_state *context)
+-{
+- struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+-
+- pp_display_cfg->avail_mclk_switch_time_us =
+- dce110_get_min_vblank_time_us(context);
+- /*pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
+- / MEMORY_TYPE_MULTIPLIER;*/
+-
+- dce110_fill_display_configs(context, pp_display_cfg);
+-
+- if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
+- struct dm_pp_display_configuration)) != 0)
+- dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+-
+- dc->prev_display_config = *pp_display_cfg;
+-}
+-
+-/* unit: in_khz before mode set, get pixel clock from context. ASIC register
+- * may not be programmed yet
+- */
+-static uint32_t get_max_pixel_clock_for_all_paths(
+- struct dc *dc,
+- struct dc_state *context)
+-{
+- uint32_t max_pix_clk = 0;
+- int i;
+-
+- for (i = 0; i < MAX_PIPES; i++) {
+- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+-
+- if (pipe_ctx->stream == NULL)
+- continue;
+-
+- /* do not check under lay */
+- if (pipe_ctx->top_pipe)
+- continue;
+-
+- if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
+- max_pix_clk =
+- pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
+- }
+- return max_pix_clk;
+-}
+-
+ void dce100_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed)
+ {
+- struct dc_clocks req_clks;
++ int dispclk_khz = context->bw.dce.dispclk_khz;
+
+- req_clks.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+- req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
++ context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+
+ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+
+ dc->res_pool->dccg->funcs->update_clocks(
+ dc->res_pool->dccg,
+- &req_clks,
++ context,
+ decrease_allowed);
+-
+- dce100_pplib_apply_display_requirements(dc, context);
++ context->bw.dce.dispclk_khz = dispclk_khz;
+ }
+
+-
+ /**************************************************************************/
+
+ void dce100_hw_sequencer_construct(struct dc *dc)
+@@ -181,7 +131,5 @@ void dce100_hw_sequencer_construct(struct dc *dc)
+
+ dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
+ dc->hwss.set_bandwidth = dce100_set_bandwidth;
+- dc->hwss.pplib_apply_display_requirements =
+- dce100_pplib_apply_display_requirements;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+index 14754a8..ae70004 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+@@ -22,6 +22,7 @@
+ * Authors: AMD
+ *
+ */
++#include "../dce/dce_clocks.h"
+ #include "dm_services.h"
+
+ #include "link_encoder.h"
+@@ -40,7 +41,6 @@
+ #include "dce/dce_ipp.h"
+ #include "dce/dce_transform.h"
+ #include "dce/dce_opp.h"
+-#include "dce/dce_clocks.h"
+ #include "dce/dce_clock_source.h"
+ #include "dce/dce_audio.h"
+ #include "dce/dce_hwseq.h"
+@@ -767,7 +767,7 @@ bool dce100_validate_bandwidth(
+ if (at_least_one_pipe) {
+ /* TODO implement when needed but for now hardcode max value*/
+ context->bw.dce.dispclk_khz = 681000;
+- context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
++ context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
+ } else {
+ context->bw.dce.dispclk_khz = 0;
+ context->bw.dce.yclk_khz = 0;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 419d0e4..6a9b4c2 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -1192,8 +1192,8 @@ static void build_audio_output(
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ audio_output->pll_info.dp_dto_source_clock_in_khz =
+- state->dis_clk->funcs->get_dp_ref_clk_frequency(
+- state->dis_clk);
++ state->dccg->funcs->get_dp_ref_clk_frequency(
++ state->dccg);
+ }
+
+ audio_output->pll_info.feed_back_divider =
+@@ -1743,34 +1743,6 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ set_static_screen_control(pipe_ctx[i]->stream_res.tg, value);
+ }
+
+-/* unit: in_khz before mode set, get pixel clock from context. ASIC register
+- * may not be programmed yet
+- */
+-static uint32_t get_max_pixel_clock_for_all_paths(
+- struct dc *dc,
+- struct dc_state *context)
+-{
+- uint32_t max_pix_clk = 0;
+- int i;
+-
+- for (i = 0; i < MAX_PIPES; i++) {
+- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+-
+- if (pipe_ctx->stream == NULL)
+- continue;
+-
+- /* do not check under lay */
+- if (pipe_ctx->top_pipe)
+- continue;
+-
+- if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
+- max_pix_clk =
+- pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
+- }
+-
+- return max_pix_clk;
+-}
+-
+ /*
+ * Check if FBC can be enabled
+ */
+@@ -2377,191 +2349,22 @@ static void init_hw(struct dc *dc)
+
+ }
+
+-void dce110_fill_display_configs(
+- const struct dc_state *context,
+- struct dm_pp_display_configuration *pp_display_cfg)
+-{
+- int j;
+- int num_cfgs = 0;
+-
+- for (j = 0; j < context->stream_count; j++) {
+- int k;
+-
+- const struct dc_stream_state *stream = context->streams[j];
+- struct dm_pp_single_disp_config *cfg =
+- &pp_display_cfg->disp_configs[num_cfgs];
+- const struct pipe_ctx *pipe_ctx = NULL;
+-
+- for (k = 0; k < MAX_PIPES; k++)
+- if (stream == context->res_ctx.pipe_ctx[k].stream) {
+- pipe_ctx = &context->res_ctx.pipe_ctx[k];
+- break;
+- }
+-
+- ASSERT(pipe_ctx != NULL);
+-
+- /* only notify active stream */
+- if (stream->dpms_off)
+- continue;
+-
+- num_cfgs++;
+- cfg->signal = pipe_ctx->stream->signal;
+- cfg->pipe_idx = pipe_ctx->stream_res.tg->inst;
+- cfg->src_height = stream->src.height;
+- cfg->src_width = stream->src.width;
+- cfg->ddi_channel_mapping =
+- stream->sink->link->ddi_channel_mapping.raw;
+- cfg->transmitter =
+- stream->sink->link->link_enc->transmitter;
+- cfg->link_settings.lane_count =
+- stream->sink->link->cur_link_settings.lane_count;
+- cfg->link_settings.link_rate =
+- stream->sink->link->cur_link_settings.link_rate;
+- cfg->link_settings.link_spread =
+- stream->sink->link->cur_link_settings.link_spread;
+- cfg->sym_clock = stream->phy_pix_clk;
+- /* Round v_refresh*/
+- cfg->v_refresh = stream->timing.pix_clk_khz * 1000;
+- cfg->v_refresh /= stream->timing.h_total;
+- cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
+- / stream->timing.v_total;
+- }
+-
+- pp_display_cfg->display_count = num_cfgs;
+-}
+-
+-uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
+-{
+- uint8_t j;
+- uint32_t min_vertical_blank_time = -1;
+-
+- for (j = 0; j < context->stream_count; j++) {
+- struct dc_stream_state *stream = context->streams[j];
+- uint32_t vertical_blank_in_pixels = 0;
+- uint32_t vertical_blank_time = 0;
+-
+- vertical_blank_in_pixels = stream->timing.h_total *
+- (stream->timing.v_total
+- - stream->timing.v_addressable);
+-
+- vertical_blank_time = vertical_blank_in_pixels
+- * 1000 / stream->timing.pix_clk_khz;
+-
+- if (min_vertical_blank_time > vertical_blank_time)
+- min_vertical_blank_time = vertical_blank_time;
+- }
+-
+- return min_vertical_blank_time;
+-}
+-
+-static int determine_sclk_from_bounding_box(
+- const struct dc *dc,
+- int required_sclk)
+-{
+- int i;
+-
+- /*
+- * Some asics do not give us sclk levels, so we just report the actual
+- * required sclk
+- */
+- if (dc->sclk_lvls.num_levels == 0)
+- return required_sclk;
+-
+- for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
+- if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
+- return dc->sclk_lvls.clocks_in_khz[i];
+- }
+- /*
+- * even maximum level could not satisfy requirement, this
+- * is unexpected at this stage, should have been caught at
+- * validation time
+- */
+- ASSERT(0);
+- return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
+-}
+-
+-static void pplib_apply_display_requirements(
+- struct dc *dc,
+- struct dc_state *context)
+-{
+- struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+-
+- pp_display_cfg->all_displays_in_sync =
+- context->bw.dce.all_displays_in_sync;
+- pp_display_cfg->nb_pstate_switch_disable =
+- context->bw.dce.nbp_state_change_enable == false;
+- pp_display_cfg->cpu_cc6_disable =
+- context->bw.dce.cpuc_state_change_enable == false;
+- pp_display_cfg->cpu_pstate_disable =
+- context->bw.dce.cpup_state_change_enable == false;
+- pp_display_cfg->cpu_pstate_separation_time =
+- context->bw.dce.blackout_recovery_time_us;
+-
+- pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
+- / MEMORY_TYPE_MULTIPLIER;
+-
+- pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
+- dc,
+- context->bw.dce.sclk_khz);
+-
+- pp_display_cfg->min_engine_clock_deep_sleep_khz
+- = context->bw.dce.sclk_deep_sleep_khz;
+-
+- pp_display_cfg->avail_mclk_switch_time_us =
+- dce110_get_min_vblank_time_us(context);
+- /* TODO: dce11.2*/
+- pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
+-
+- pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
+-
+- dce110_fill_display_configs(context, pp_display_cfg);
+-
+- /* TODO: is this still applicable?*/
+- if (pp_display_cfg->display_count == 1) {
+- const struct dc_crtc_timing *timing =
+- &context->streams[0]->timing;
+-
+- pp_display_cfg->crtc_index =
+- pp_display_cfg->disp_configs[0].pipe_idx;
+- pp_display_cfg->line_time_in_us = timing->h_total * 1000
+- / timing->pix_clk_khz;
+- }
+-
+- if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
+- struct dm_pp_display_configuration)) != 0)
+- dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+-
+- dc->prev_display_config = *pp_display_cfg;
+-}
+-
+-static void dce110_set_bandwidth(
++void dce110_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed)
+ {
+- struct dc_clocks req_clks;
+ struct dccg *dccg = dc->res_pool->dccg;
+
+- req_clks.dispclk_khz = context->bw.dce.dispclk_khz;
+- req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
+-
+ if (decrease_allowed)
+ dce110_set_displaymarks(dc, context);
+ else
+ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+
+- if (dccg->funcs->update_dfs_bypass)
+- dccg->funcs->update_dfs_bypass(
+- dccg,
+- dc,
+- context,
+- req_clks.dispclk_khz);
+-
+ dccg->funcs->update_clocks(
+ dccg,
+- &req_clks,
++ context,
+ decrease_allowed);
+- pplib_apply_display_requirements(dc, context);
+ }
+
+ static void dce110_program_front_end_for_pipe(
+@@ -2836,10 +2639,6 @@ void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
+ pipe_ctx->plane_res.xfm, attributes);
+ }
+
+-static void ready_shared_resources(struct dc *dc, struct dc_state *context) {}
+-
+-static void optimize_shared_resources(struct dc *dc) {}
+-
+ static const struct hw_sequencer_funcs dce110_funcs = {
+ .program_gamut_remap = program_gamut_remap,
+ .program_csc_matrix = program_csc_matrix,
+@@ -2874,9 +2673,6 @@ static const struct hw_sequencer_funcs dce110_funcs = {
+ .setup_stereo = NULL,
+ .set_avmute = dce110_set_avmute,
+ .wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect,
+- .ready_shared_resources = ready_shared_resources,
+- .optimize_shared_resources = optimize_shared_resources,
+- .pplib_apply_display_requirements = pplib_apply_display_requirements,
+ .edp_backlight_control = hwss_edp_backlight_control,
+ .edp_power_control = hwss_edp_power_control,
+ .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+index d6db3db..c5e04f8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+@@ -40,7 +40,6 @@ enum dc_status dce110_apply_ctx_to_hw(
+ struct dc_state *context);
+
+
+-
+ void dce110_enable_stream(struct pipe_ctx *pipe_ctx);
+
+ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option);
+@@ -64,11 +63,10 @@ void dce110_set_safe_displaymarks(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool);
+
+-void dce110_fill_display_configs(
+- const struct dc_state *context,
+- struct dm_pp_display_configuration *pp_display_cfg);
+-
+-uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
++void dce110_set_bandwidth(
++ struct dc *dc,
++ struct dc_state *context,
++ bool decrease_allowed);
+
+ void dp_receiver_power_ctrl(struct dc_link *link, bool on);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+index de19093..ea8a8bb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+@@ -31,6 +31,7 @@
+ #include "resource.h"
+ #include "dce110/dce110_resource.h"
+
++#include "../dce/dce_clocks.h"
+ #include "include/irq_service_interface.h"
+ #include "dce/dce_audio.h"
+ #include "dce110/dce110_timing_generator.h"
+@@ -45,7 +46,6 @@
+ #include "dce110/dce110_transform_v.h"
+ #include "dce/dce_opp.h"
+ #include "dce110/dce110_opp_v.h"
+-#include "dce/dce_clocks.h"
+ #include "dce/dce_clock_source.h"
+ #include "dce/dce_hwseq.h"
+ #include "dce110/dce110_hw_sequencer.h"
+@@ -1173,12 +1173,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
+ &clks);
+
+ dc->bw_vbios->low_yclk = bw_frc_to_fixed(
+- clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000);
++ clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
+ dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
+- clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER,
++ clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ,
+ 1000);
+ dc->bw_vbios->high_yclk = bw_frc_to_fixed(
+- clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER,
++ clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ,
+ 1000);
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+index 3ce79c2..c7e2189 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+@@ -23,6 +23,7 @@
+ *
+ */
+
++#include "../dce/dce_clocks.h"
+ #include "dm_services.h"
+
+ #include "link_encoder.h"
+@@ -42,7 +43,6 @@
+ #include "dce/dce_audio.h"
+ #include "dce/dce_opp.h"
+ #include "dce/dce_ipp.h"
+-#include "dce/dce_clocks.h"
+ #include "dce/dce_clock_source.h"
+
+ #include "dce/dce_hwseq.h"
+@@ -1015,12 +1015,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
+ &clks);
+
+ dc->bw_vbios->low_yclk = bw_frc_to_fixed(
+- clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000);
++ clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
+ dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
+- clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER,
++ clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ,
+ 1000);
+ dc->bw_vbios->high_yclk = bw_frc_to_fixed(
+- clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER,
++ clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ,
+ 1000);
+
+ return;
+@@ -1056,12 +1056,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
+ * YCLK = UMACLK*m_memoryTypeMultiplier
+ */
+ dc->bw_vbios->low_yclk = bw_frc_to_fixed(
+- mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 1000);
++ mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
+ dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
+- mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
++ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
+ 1000);
+ dc->bw_vbios->high_yclk = bw_frc_to_fixed(
+- mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
++ mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
+ 1000);
+
+ /* Now notify PPLib/SMU about which Watermarks sets they should select
+diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+index 79ab5f9..da2d50d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+@@ -31,6 +31,8 @@
+ #include "resource.h"
+ #include "include/irq_service_interface.h"
+ #include "dce120_resource.h"
++
++#include "../dce/dce_clocks.h"
+ #include "dce112/dce112_resource.h"
+
+ #include "dce110/dce110_resource.h"
+@@ -39,7 +41,6 @@
+ #include "irq/dce120/irq_service_dce120.h"
+ #include "dce/dce_opp.h"
+ #include "dce/dce_clock_source.h"
+-#include "dce/dce_clocks.h"
+ #include "dce/dce_ipp.h"
+ #include "dce/dce_mem_input.h"
+
+@@ -834,12 +835,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
+ * YCLK = UMACLK*m_memoryTypeMultiplier
+ */
+ dc->bw_vbios->low_yclk = bw_frc_to_fixed(
+- mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 1000);
++ mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
+ dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
+- mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
++ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
+ 1000);
+ dc->bw_vbios->high_yclk = bw_frc_to_fixed(
+- mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
++ mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
+ 1000);
+
+ /* Now notify PPLib/SMU about which Watermarks sets they should select
+diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+index d68f951..76f58c6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+@@ -23,6 +23,7 @@
+ *
+ */
+
++#include "../dce/dce_clocks.h"
+ #include "dce/dce_8_0_d.h"
+ #include "dce/dce_8_0_sh_mask.h"
+
+@@ -44,7 +45,6 @@
+ #include "dce/dce_ipp.h"
+ #include "dce/dce_transform.h"
+ #include "dce/dce_opp.h"
+-#include "dce/dce_clocks.h"
+ #include "dce/dce_clock_source.h"
+ #include "dce/dce_audio.h"
+ #include "dce/dce_hwseq.h"
+@@ -793,7 +793,7 @@ bool dce80_validate_bandwidth(
+ {
+ /* TODO implement when needed but for now hardcode max value*/
+ context->bw.dce.dispclk_khz = 681000;
+- context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
++ context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
+
+ return true;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 193184a..4976230 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2257,46 +2257,6 @@ static void program_all_pipe_in_tree(
+ }
+ }
+
+-static void dcn10_pplib_apply_display_requirements(
+- struct dc *dc,
+- struct dc_state *context)
+-{
+- struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+-
+- pp_display_cfg->min_engine_clock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
+- pp_display_cfg->min_memory_clock_khz = dc->res_pool->dccg->clks.fclk_khz;
+- pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
+- pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
+- pp_display_cfg->min_dcfclock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
+- pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
+- dce110_fill_display_configs(context, pp_display_cfg);
+-
+- if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
+- struct dm_pp_display_configuration)) != 0)
+- dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+-
+- dc->prev_display_config = *pp_display_cfg;
+-}
+-
+-static void optimize_shared_resources(struct dc *dc)
+-{
+- if (dc->current_state->stream_count == 0) {
+- /* S0i2 message */
+- dcn10_pplib_apply_display_requirements(dc, dc->current_state);
+- }
+-
+- if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
+- dcn_bw_notify_pplib_of_wm_ranges(dc);
+-}
+-
+-static void ready_shared_resources(struct dc *dc, struct dc_state *context)
+-{
+- /* S0i2 message */
+- if (dc->current_state->stream_count == 0 &&
+- context->stream_count != 0)
+- dcn10_pplib_apply_display_requirements(dc, context);
+-}
+-
+ static struct pipe_ctx *find_top_pipe_for_stream(
+ struct dc *dc,
+ struct dc_state *context,
+@@ -2412,10 +2372,8 @@ static void dcn10_set_bandwidth(
+
+ dc->res_pool->dccg->funcs->update_clocks(
+ dc->res_pool->dccg,
+- &context->bw.dcn.clk,
++ context,
+ safe_to_lower);
+-
+- dcn10_pplib_apply_display_requirements(dc, context);
+ }
+
+ hubbub1_program_watermarks(dc->res_pool->hubbub,
+@@ -2423,6 +2381,9 @@ static void dcn10_set_bandwidth(
+ dc->res_pool->ref_clock_inKhz / 1000,
+ true);
+
++ if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
++ dcn_bw_notify_pplib_of_wm_ranges(dc);
++
+ if (dc->debug.sanity_checks)
+ dcn10_verify_allow_pstate_change_high(dc);
+ }
+@@ -2732,10 +2693,6 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
+ .log_hw_state = dcn10_log_hw_state,
+ .get_hw_state = dcn10_get_hw_state,
+ .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+- .ready_shared_resources = ready_shared_resources,
+- .optimize_shared_resources = optimize_shared_resources,
+- .pplib_apply_display_requirements =
+- dcn10_pplib_apply_display_requirements,
+ .edp_backlight_control = hwss_edp_backlight_control,
+ .edp_power_control = hwss_edp_power_control,
+ .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index a71453a..6227db6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -40,7 +40,7 @@
+ #include "dcn10/dcn10_opp.h"
+ #include "dcn10/dcn10_link_encoder.h"
+ #include "dcn10/dcn10_stream_encoder.h"
+-#include "dce/dce_clocks.h"
++#include "../dce/dce_clocks.h"
+ #include "dce/dce_clock_source.h"
+ #include "dce/dce_audio.h"
+ #include "dce/dce_hwseq.h"
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+index 879c34e..99e2868 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+@@ -287,7 +287,7 @@ struct dc_state {
+ struct dcn_bw_internal_vars dcn_bw_vars;
+ #endif
+
+- struct dccg *dis_clk;
++ struct dccg *dccg;
+
+ struct kref refcount;
+ };
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
+index 689faa1..14eb0e4 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
+@@ -38,26 +38,19 @@ struct state_dependent_clocks {
+
+ struct dccg {
+ struct dc_context *ctx;
+- const struct display_clock_funcs *funcs;
++ const struct dccg_funcs *funcs;
+
+ enum dm_pp_clocks_state max_clks_state;
+ enum dm_pp_clocks_state cur_min_clks_state;
+ struct dc_clocks clks;
+ };
+
+-struct display_clock_funcs {
++struct dccg_funcs {
+ void (*update_clocks)(struct dccg *dccg,
+- struct dc_clocks *new_clocks,
++ struct dc_state *context,
+ bool safe_to_lower);
+- int (*set_dispclk)(struct dccg *dccg,
+- int requested_clock_khz);
+
+ int (*get_dp_ref_clk_frequency)(struct dccg *dccg);
+-
+- bool (*update_dfs_bypass)(struct dccg *dccg,
+- struct dc *dc,
+- struct dc_state *context,
+- int requested_clock_khz);
+ };
+
+ #endif /* __DISPLAY_CLOCK_H__ */
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+index 26f29d5..c673d3e 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+@@ -210,11 +210,6 @@ struct hw_sequencer_funcs {
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx);
+
+- void (*ready_shared_resources)(struct dc *dc, struct dc_state *context);
+- void (*optimize_shared_resources)(struct dc *dc);
+- void (*pplib_apply_display_requirements)(
+- struct dc *dc,
+- struct dc_state *context);
+ void (*edp_power_control)(
+ struct dc_link *link,
+ bool enable);
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
+index 33b99e3..0086a2f 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
+@@ -30,9 +30,6 @@
+ #include "dal_asic_id.h"
+ #include "dm_pp_smu.h"
+
+-/* TODO unhardcode, 4 for CZ*/
+-#define MEMORY_TYPE_MULTIPLIER 4
+-
+ enum dce_version resource_parse_asic_id(
+ struct hw_asic_id asic_id);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5580-drm-amd-display-remove-safe_to_lower-flag-from-dc-us.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5580-drm-amd-display-remove-safe_to_lower-flag-from-dc-us.patch
new file mode 100644
index 00000000..d131a59d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5580-drm-amd-display-remove-safe_to_lower-flag-from-dc-us.patch
@@ -0,0 +1,297 @@
+From 3ab294894c23bf30a57dd7df2fceb30e6c1b27ce Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Tue, 18 Sep 2018 15:00:49 -0400
+Subject: [PATCH 5580/5725] drm/amd/display: remove safe_to_lower flag from dc,
+ use 2 functions instead
+
+This is done to keep things more readable, avoids a true/false flag
+in dc interface layer.
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 8 ++---
+ .../amd/display/dc/dce100/dce100_hw_sequencer.c | 15 +++------
+ .../amd/display/dc/dce100/dce100_hw_sequencer.h | 5 ++-
+ .../amd/display/dc/dce110/dce110_hw_sequencer.c | 30 ++++++++++++-----
+ .../amd/display/dc/dce110/dce110_hw_sequencer.h | 9 +++--
+ .../drm/amd/display/dc/dce80/dce80_hw_sequencer.c | 3 +-
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 39 +++++++++++++++++++---
+ drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 8 +++--
+ 8 files changed, 79 insertions(+), 38 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 289d12f..3842541 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -967,7 +967,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
+ if (!dcb->funcs->is_accelerated_mode(dcb))
+ dc->hwss.enable_accelerated_mode(dc, context);
+
+- dc->hwss.set_bandwidth(dc, context, false);
++ dc->hwss.prepare_bandwidth(dc, context);
+
+ /* re-program planes for existing stream, in case we need to
+ * free up plane resource for later use
+@@ -1036,7 +1036,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
+ dc_enable_stereo(dc, context, dc_streams, context->stream_count);
+
+ /* pplib is notified if disp_num changed */
+- dc->hwss.set_bandwidth(dc, context, true);
++ dc->hwss.optimize_bandwidth(dc, context);
+
+ dc_release_state(dc->current_state);
+
+@@ -1085,7 +1085,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
+
+ dc->optimized_required = false;
+
+- dc->hwss.set_bandwidth(dc, context, true);
++ dc->hwss.optimize_bandwidth(dc, context);
+ return true;
+ }
+
+@@ -1505,7 +1505,7 @@ static void commit_planes_for_stream(struct dc *dc,
+ struct pipe_ctx *top_pipe_to_program = NULL;
+
+ if (update_type == UPDATE_TYPE_FULL) {
+- dc->hwss.set_bandwidth(dc, context, false);
++ dc->hwss.prepare_bandwidth(dc, context);
+ context_clock_trace(dc, context);
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+index 2725eac..5055026 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+@@ -105,22 +105,16 @@ bool dce100_enable_display_power_gating(
+ return false;
+ }
+
+-void dce100_set_bandwidth(
++void dce100_prepare_bandwidth(
+ struct dc *dc,
+- struct dc_state *context,
+- bool decrease_allowed)
++ struct dc_state *context)
+ {
+- int dispclk_khz = context->bw.dce.dispclk_khz;
+-
+- context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+-
+ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+
+ dc->res_pool->dccg->funcs->update_clocks(
+ dc->res_pool->dccg,
+ context,
+- decrease_allowed);
+- context->bw.dce.dispclk_khz = dispclk_khz;
++ false);
+ }
+
+ /**************************************************************************/
+@@ -130,6 +124,7 @@ void dce100_hw_sequencer_construct(struct dc *dc)
+ dce110_hw_sequencer_construct(dc);
+
+ dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
+- dc->hwss.set_bandwidth = dce100_set_bandwidth;
++ dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
++ dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
+index c6ec0ed..acd4185 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
+@@ -33,10 +33,9 @@ struct dc_state;
+
+ void dce100_hw_sequencer_construct(struct dc *dc);
+
+-void dce100_set_bandwidth(
++void dce100_prepare_bandwidth(
+ struct dc *dc,
+- struct dc_state *context,
+- bool decrease_allowed);
++ struct dc_state *context);
+
+ bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id,
+ struct dc_bios *dcb,
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 6a9b4c2..e58a34e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -2349,22 +2349,33 @@ static void init_hw(struct dc *dc)
+
+ }
+
+-void dce110_set_bandwidth(
++
++void dce110_prepare_bandwidth(
+ struct dc *dc,
+- struct dc_state *context,
+- bool decrease_allowed)
++ struct dc_state *context)
+ {
+ struct dccg *dccg = dc->res_pool->dccg;
+
+- if (decrease_allowed)
+- dce110_set_displaymarks(dc, context);
+- else
+- dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
++ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+
+ dccg->funcs->update_clocks(
+ dccg,
+ context,
+- decrease_allowed);
++ false);
++}
++
++void dce110_optimize_bandwidth(
++ struct dc *dc,
++ struct dc_state *context)
++{
++ struct dccg *dccg = dc->res_pool->dccg;
++
++ dce110_set_displaymarks(dc, context);
++
++ dccg->funcs->update_clocks(
++ dccg,
++ context,
++ true);
+ }
+
+ static void dce110_program_front_end_for_pipe(
+@@ -2664,7 +2675,8 @@ static const struct hw_sequencer_funcs dce110_funcs = {
+ .enable_display_power_gating = dce110_enable_display_power_gating,
+ .disable_plane = dce110_power_down_fe,
+ .pipe_control_lock = dce_pipe_control_lock,
+- .set_bandwidth = dce110_set_bandwidth,
++ .prepare_bandwidth = dce110_prepare_bandwidth,
++ .optimize_bandwidth = dce110_optimize_bandwidth,
+ .set_drr = set_drr,
+ .get_position = get_position,
+ .set_static_screen_control = set_static_screen_control,
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+index c5e04f8..cd3e36d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+@@ -63,10 +63,13 @@ void dce110_set_safe_displaymarks(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool);
+
+-void dce110_set_bandwidth(
++void dce110_prepare_bandwidth(
+ struct dc *dc,
+- struct dc_state *context,
+- bool decrease_allowed);
++ struct dc_state *context);
++
++void dce110_optimize_bandwidth(
++ struct dc *dc,
++ struct dc_state *context);
+
+ void dp_receiver_power_ctrl(struct dc_link *link, bool on);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
+index 6c6a1a1..a60a90e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
+@@ -76,6 +76,7 @@ void dce80_hw_sequencer_construct(struct dc *dc)
+
+ dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
+ dc->hwss.pipe_control_lock = dce_pipe_control_lock;
+- dc->hwss.set_bandwidth = dce100_set_bandwidth;
++ dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
++ dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 4976230..6375241 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2358,10 +2358,9 @@ static void dcn10_apply_ctx_for_surface(
+ hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
+ }
+
+-static void dcn10_set_bandwidth(
++static void dcn10_prepare_bandwidth(
+ struct dc *dc,
+- struct dc_state *context,
+- bool safe_to_lower)
++ struct dc_state *context)
+ {
+ if (dc->debug.sanity_checks)
+ dcn10_verify_allow_pstate_change_high(dc);
+@@ -2373,7 +2372,36 @@ static void dcn10_set_bandwidth(
+ dc->res_pool->dccg->funcs->update_clocks(
+ dc->res_pool->dccg,
+ context,
+- safe_to_lower);
++ false);
++ }
++
++ hubbub1_program_watermarks(dc->res_pool->hubbub,
++ &context->bw.dcn.watermarks,
++ dc->res_pool->ref_clock_inKhz / 1000,
++ true);
++
++ if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
++ dcn_bw_notify_pplib_of_wm_ranges(dc);
++
++ if (dc->debug.sanity_checks)
++ dcn10_verify_allow_pstate_change_high(dc);
++}
++
++static void dcn10_optimize_bandwidth(
++ struct dc *dc,
++ struct dc_state *context)
++{
++ if (dc->debug.sanity_checks)
++ dcn10_verify_allow_pstate_change_high(dc);
++
++ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
++ if (context->stream_count == 0)
++ context->bw.dcn.clk.phyclk_khz = 0;
++
++ dc->res_pool->dccg->funcs->update_clocks(
++ dc->res_pool->dccg,
++ context,
++ true);
+ }
+
+ hubbub1_program_watermarks(dc->res_pool->hubbub,
+@@ -2682,7 +2710,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
+ .disable_plane = dcn10_disable_plane,
+ .blank_pixel_data = dcn10_blank_pixel_data,
+ .pipe_control_lock = dcn10_pipe_control_lock,
+- .set_bandwidth = dcn10_set_bandwidth,
++ .prepare_bandwidth = dcn10_prepare_bandwidth,
++ .optimize_bandwidth = dcn10_optimize_bandwidth,
+ .reset_hw_ctx_wrap = reset_hw_ctx_wrap,
+ .enable_stream_timing = dcn10_enable_stream_timing,
+ .set_drr = set_drr,
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+index c673d3e..75de1d8 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+@@ -177,10 +177,12 @@ struct hw_sequencer_funcs {
+ struct pipe_ctx *pipe_ctx,
+ bool blank);
+
+- void (*set_bandwidth)(
++ void (*prepare_bandwidth)(
+ struct dc *dc,
+- struct dc_state *context,
+- bool safe_to_lower);
++ struct dc_state *context);
++ void (*optimize_bandwidth)(
++ struct dc *dc,
++ struct dc_state *context);
+
+ void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
+ int vmin, int vmax);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5581-drm-amd-display-Freesync-does-not-engage-on-some-dis.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5581-drm-amd-display-Freesync-does-not-engage-on-some-dis.patch
new file mode 100644
index 00000000..43054462
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5581-drm-amd-display-Freesync-does-not-engage-on-some-dis.patch
@@ -0,0 +1,94 @@
+From c7af9b5d8fc3742d8185d2ac49cfe870c096c36b Mon Sep 17 00:00:00 2001
+From: Harmanprit Tatla <htatla@amd.com>
+Date: Mon, 24 Sep 2018 16:46:38 -0400
+Subject: [PATCH 5581/5725] drm/amd/display: Freesync does not engage on some
+ displays
+
+[Why]
+Current render margin time is not sufficient to compute exit frame
+time for most monitors.
+
+[How]
+Declared render margin in FPS to compute a exit frame rate that is
+4 FPS above the minimum FPS required to engage FreeSync.
+ Also did code clean-up to remove redundancies.
+
+Signed-off-by: Harmanprit Tatla <htatla@amd.com>
+Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ .../drm/amd/display/modules/freesync/freesync.c | 37 ++++++----------------
+ 1 file changed, 9 insertions(+), 28 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+index 4018c71..620a171 100644
+--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
++++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+@@ -37,6 +37,8 @@
+ #define RENDER_TIMES_MAX_COUNT 10
+ /* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
+ #define BTR_EXIT_MARGIN 2000
++/*Threshold to exit fixed refresh rate*/
++#define FIXED_REFRESH_EXIT_MARGIN_IN_HZ 4
+ /* Number of consecutive frames to check before entering/exiting fixed refresh*/
+ #define FIXED_REFRESH_ENTER_FRAME_COUNT 5
+ #define FIXED_REFRESH_EXIT_FRAME_COUNT 5
+@@ -257,40 +259,14 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
+ if (in_out_vrr->btr.btr_active) {
+ in_out_vrr->btr.frame_counter = 0;
+ in_out_vrr->btr.btr_active = false;
+-
+- /* Exit Fixed Refresh mode */
+- } else if (in_out_vrr->fixed.fixed_active) {
+-
+- in_out_vrr->fixed.frame_counter++;
+-
+- if (in_out_vrr->fixed.frame_counter >
+- FIXED_REFRESH_EXIT_FRAME_COUNT) {
+- in_out_vrr->fixed.frame_counter = 0;
+- in_out_vrr->fixed.fixed_active = false;
+- }
+ }
+ } else if (last_render_time_in_us > max_render_time_in_us) {
+ /* Enter Below the Range */
+- if (!in_out_vrr->btr.btr_active &&
+- in_out_vrr->btr.btr_enabled) {
+- in_out_vrr->btr.btr_active = true;
+-
+- /* Enter Fixed Refresh mode */
+- } else if (!in_out_vrr->fixed.fixed_active &&
+- !in_out_vrr->btr.btr_enabled) {
+- in_out_vrr->fixed.frame_counter++;
+-
+- if (in_out_vrr->fixed.frame_counter >
+- FIXED_REFRESH_ENTER_FRAME_COUNT) {
+- in_out_vrr->fixed.frame_counter = 0;
+- in_out_vrr->fixed.fixed_active = true;
+- }
+- }
++ in_out_vrr->btr.btr_active = true;
+ }
+
+ /* BTR set to "not active" so disengage */
+ if (!in_out_vrr->btr.btr_active) {
+- in_out_vrr->btr.btr_active = false;
+ in_out_vrr->btr.inserted_duration_in_us = 0;
+ in_out_vrr->btr.frames_to_insert = 0;
+ in_out_vrr->btr.frame_counter = 0;
+@@ -375,7 +351,12 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync,
+ bool update = false;
+ unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
+
+- if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) {
++ //Compute the exit refresh rate and exit frame duration
++ unsigned int exit_refresh_rate_in_milli_hz = ((1000000000/max_render_time_in_us)
++ + (1000*FIXED_REFRESH_EXIT_MARGIN_IN_HZ));
++ unsigned int exit_frame_duration_in_us = 1000000000/exit_refresh_rate_in_milli_hz;
++
++ if (last_render_time_in_us < exit_frame_duration_in_us) {
+ /* Exit Fixed Refresh mode */
+ if (in_out_vrr->fixed.fixed_active) {
+ in_out_vrr->fixed.frame_counter++;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5582-drm-amd-display-clean-up-base-dccg-struct.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5582-drm-amd-display-clean-up-base-dccg-struct.patch
new file mode 100644
index 00000000..60c1dc23
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5582-drm-amd-display-clean-up-base-dccg-struct.patch
@@ -0,0 +1,363 @@
+From b5044afa43ded060c542bcc7d88006c507aeaef4 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Tue, 18 Sep 2018 15:37:36 -0400
+Subject: [PATCH 5582/5725] drm/amd/display: clean up base dccg struct
+
+Move things not accessed outside dccg block into dce specific
+struct
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 49 +++++++++++++---------
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h | 8 ++++
+ .../drm/amd/display/dc/dce100/dce100_resource.c | 7 ----
+ .../drm/amd/display/dc/dce110/dce110_resource.c | 8 ----
+ .../drm/amd/display/dc/dce112/dce112_resource.c | 8 ----
+ .../gpu/drm/amd/display/dc/dce80/dce80_resource.c | 15 -------
+ .../gpu/drm/amd/display/dc/inc/hw/display_clock.h | 9 ----
+ 7 files changed, 37 insertions(+), 67 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index e633bc0..6f4a10b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -238,7 +238,7 @@ static enum dm_pp_clocks_state dce_get_required_clocks_state(
+ * lowest RequiredState with the lowest state that satisfies
+ * all required clocks
+ */
+- for (i = dccg->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
++ for (i = dccg_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
+ if (context->bw.dce.dispclk_khz >
+ dccg_dce->max_clks_by_state[i].display_clk_khz
+ || max_pix_clk >
+@@ -246,13 +246,13 @@ static enum dm_pp_clocks_state dce_get_required_clocks_state(
+ break;
+
+ low_req_clk = i + 1;
+- if (low_req_clk > dccg->max_clks_state) {
++ if (low_req_clk > dccg_dce->max_clks_state) {
+ /* set max clock state for high phyclock, invalid on exceeding display clock */
+- if (dccg_dce->max_clks_by_state[dccg->max_clks_state].display_clk_khz
++ if (dccg_dce->max_clks_by_state[dccg_dce->max_clks_state].display_clk_khz
+ < context->bw.dce.dispclk_khz)
+ low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
+ else
+- low_req_clk = dccg->max_clks_state;
++ low_req_clk = dccg_dce->max_clks_state;
+ }
+
+ return low_req_clk;
+@@ -292,7 +292,7 @@ static int dce_set_clock(
+ /* from power down, we need mark the clock state as ClocksStateNominal
+ * from HWReset, so when resume we will call pplib voltage regulator.*/
+ if (requested_clk_khz == 0)
+- dccg->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
++ dccg_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+
+ dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7);
+
+@@ -327,7 +327,7 @@ static int dce112_set_clock(
+ /* from power down, we need mark the clock state as ClocksStateNominal
+ * from HWReset, so when resume we will call pplib voltage regulator.*/
+ if (requested_clk_khz == 0)
+- dccg->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
++ dccg_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+
+ /*Program DP ref Clock*/
+ /*VBIOS will determine DPREFCLK frequency, so we don't set it*/
+@@ -833,9 +833,9 @@ static void dce_update_clocks(struct dccg *dccg,
+ struct dc_state *context,
+ bool safe_to_lower)
+ {
++ struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+ struct dm_pp_power_level_change_request level_change_req;
+ int unpatched_disp_clk = context->bw.dce.dispclk_khz;
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+
+ /*TODO: W/A for dal3 linux, investigate why this works */
+ if (!dccg_dce->dfs_bypass_active)
+@@ -843,10 +843,10 @@ static void dce_update_clocks(struct dccg *dccg,
+
+ level_change_req.power_level = dce_get_required_clocks_state(dccg, context);
+ /* get max clock state from PPLIB */
+- if ((level_change_req.power_level < dccg->cur_min_clks_state && safe_to_lower)
+- || level_change_req.power_level > dccg->cur_min_clks_state) {
++ if ((level_change_req.power_level < dccg_dce->cur_min_clks_state && safe_to_lower)
++ || level_change_req.power_level > dccg_dce->cur_min_clks_state) {
+ if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
+- dccg->cur_min_clks_state = level_change_req.power_level;
++ dccg_dce->cur_min_clks_state = level_change_req.power_level;
+ }
+
+ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, dccg->clks.dispclk_khz)) {
+@@ -862,14 +862,15 @@ static void dce11_update_clocks(struct dccg *dccg,
+ struct dc_state *context,
+ bool safe_to_lower)
+ {
++ struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+ struct dm_pp_power_level_change_request level_change_req;
+
+ level_change_req.power_level = dce_get_required_clocks_state(dccg, context);
+ /* get max clock state from PPLIB */
+- if ((level_change_req.power_level < dccg->cur_min_clks_state && safe_to_lower)
+- || level_change_req.power_level > dccg->cur_min_clks_state) {
++ if ((level_change_req.power_level < dccg_dce->cur_min_clks_state && safe_to_lower)
++ || level_change_req.power_level > dccg_dce->cur_min_clks_state) {
+ if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
+- dccg->cur_min_clks_state = level_change_req.power_level;
++ dccg_dce->cur_min_clks_state = level_change_req.power_level;
+ }
+
+ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, dccg->clks.dispclk_khz)) {
+@@ -883,14 +884,15 @@ static void dce112_update_clocks(struct dccg *dccg,
+ struct dc_state *context,
+ bool safe_to_lower)
+ {
++ struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+ struct dm_pp_power_level_change_request level_change_req;
+
+ level_change_req.power_level = dce_get_required_clocks_state(dccg, context);
+ /* get max clock state from PPLIB */
+- if ((level_change_req.power_level < dccg->cur_min_clks_state && safe_to_lower)
+- || level_change_req.power_level > dccg->cur_min_clks_state) {
++ if ((level_change_req.power_level < dccg_dce->cur_min_clks_state && safe_to_lower)
++ || level_change_req.power_level > dccg_dce->cur_min_clks_state) {
+ if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
+- dccg->cur_min_clks_state = level_change_req.power_level;
++ dccg_dce->cur_min_clks_state = level_change_req.power_level;
+ }
+
+ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, dccg->clks.dispclk_khz)) {
+@@ -904,12 +906,14 @@ static void dce12_update_clocks(struct dccg *dccg,
+ struct dc_state *context,
+ bool safe_to_lower)
+ {
++ struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+ int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
+ int unpatched_disp_clk = context->bw.dce.dispclk_khz;
+
+- /* W/A for dal3 linux */
+- context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
++ /*TODO: W/A for dal3 linux, investigate why this works */
++ if (!dccg_dce->dfs_bypass_active)
++ context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+
+ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, dccg->clks.dispclk_khz)) {
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
+@@ -967,6 +971,7 @@ static void dce_dccg_construct(
+ const struct dccg_mask *clk_mask)
+ {
+ struct dccg *base = &dccg_dce->base;
++ struct dm_pp_static_clock_info static_clk_info = {0};
+
+ base->ctx = ctx;
+ base->funcs = &dce_funcs;
+@@ -981,8 +986,12 @@ static void dce_dccg_construct(
+ dccg_dce->dprefclk_ss_divider = 1000;
+ dccg_dce->ss_on_dprefclk = false;
+
+- base->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+- base->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
++
++ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
++ dccg_dce->max_clks_state = static_clk_info.max_clocks_state;
++ else
++ dccg_dce->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
++ dccg_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
+
+ dce_clock_read_integrated_info(dccg_dce);
+ dce_clock_read_ss_info(dccg_dce);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+index 104145f..8f902f2 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+@@ -68,6 +68,11 @@ struct dccg_registers {
+ uint32_t DENTIST_DISPCLK_CNTL;
+ };
+
++struct state_dependent_clocks {
++ int display_clk_khz;
++ int pixel_clk_khz;
++};
++
+ struct dce_dccg {
+ struct dccg base;
+ const struct dccg_registers *regs;
+@@ -93,6 +98,9 @@ struct dce_dccg {
+ /* DPREFCLK SS percentage Divider (100 or 1000) */
+ int dprefclk_ss_divider;
+ int dprefclk_khz;
++
++ enum dm_pp_clocks_state max_clks_state;
++ enum dm_pp_clocks_state cur_min_clks_state;
+ };
+
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+index ae70004..5d62561 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+@@ -860,7 +860,6 @@ static bool construct(
+ struct dc_context *ctx = dc->ctx;
+ struct dc_firmware_info info;
+ struct dc_bios *bp;
+- struct dm_pp_static_clock_info static_clk_info = {0};
+
+ ctx->dc_bios->regs = &bios_regs;
+
+@@ -938,12 +937,6 @@ static bool construct(
+ goto res_create_fail;
+ }
+
+- /* get static clock information for PPLIB or firmware, save
+- * max_clock_state
+- */
+- if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+- pool->base.dccg->max_clks_state =
+- static_clk_info.max_clocks_state;
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+index ea8a8bb..18f9135 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+@@ -1201,7 +1201,6 @@ static bool construct(
+ struct dc_context *ctx = dc->ctx;
+ struct dc_firmware_info info;
+ struct dc_bios *bp;
+- struct dm_pp_static_clock_info static_clk_info = {0};
+
+ ctx->dc_bios->regs = &bios_regs;
+
+@@ -1287,13 +1286,6 @@ static bool construct(
+ goto res_create_fail;
+ }
+
+- /* get static clock information for PPLIB or firmware, save
+- * max_clock_state
+- */
+- if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+- pool->base.dccg->max_clks_state =
+- static_clk_info.max_clocks_state;
+-
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+index c7e2189..cc48a87 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+@@ -1131,7 +1131,6 @@ static bool construct(
+ {
+ unsigned int i;
+ struct dc_context *ctx = dc->ctx;
+- struct dm_pp_static_clock_info static_clk_info = {0};
+
+ ctx->dc_bios->regs = &bios_regs;
+
+@@ -1229,13 +1228,6 @@ static bool construct(
+ goto res_create_fail;
+ }
+
+- /* get static clock information for PPLIB or firmware, save
+- * max_clock_state
+- */
+- if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+- pool->base.dccg->max_clks_state =
+- static_clk_info.max_clocks_state;
+-
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+index 76f58c6..313141b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+@@ -855,7 +855,6 @@ static bool dce80_construct(
+ struct dc_context *ctx = dc->ctx;
+ struct dc_firmware_info info;
+ struct dc_bios *bp;
+- struct dm_pp_static_clock_info static_clk_info = {0};
+
+ ctx->dc_bios->regs = &bios_regs;
+
+@@ -948,10 +947,6 @@ static bool dce80_construct(
+ goto res_create_fail;
+ }
+
+- if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+- pool->base.dccg->max_clks_state =
+- static_clk_info.max_clocks_state;
+-
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+@@ -1065,7 +1060,6 @@ static bool dce81_construct(
+ struct dc_context *ctx = dc->ctx;
+ struct dc_firmware_info info;
+ struct dc_bios *bp;
+- struct dm_pp_static_clock_info static_clk_info = {0};
+
+ ctx->dc_bios->regs = &bios_regs;
+
+@@ -1158,10 +1152,6 @@ static bool dce81_construct(
+ goto res_create_fail;
+ }
+
+- if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+- pool->base.dccg->max_clks_state =
+- static_clk_info.max_clocks_state;
+-
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+@@ -1275,7 +1265,6 @@ static bool dce83_construct(
+ struct dc_context *ctx = dc->ctx;
+ struct dc_firmware_info info;
+ struct dc_bios *bp;
+- struct dm_pp_static_clock_info static_clk_info = {0};
+
+ ctx->dc_bios->regs = &bios_regs;
+
+@@ -1364,10 +1353,6 @@ static bool dce83_construct(
+ goto res_create_fail;
+ }
+
+- if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+- pool->base.dccg->max_clks_state =
+- static_clk_info.max_clocks_state;
+-
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
+index 14eb0e4..e1ec42b 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
+@@ -29,19 +29,10 @@
+ #include "dm_services_types.h"
+ #include "dc.h"
+
+-/* Structure containing all state-dependent clocks
+- * (dependent on "enum clocks_state") */
+-struct state_dependent_clocks {
+- int display_clk_khz;
+- int pixel_clk_khz;
+-};
+-
+ struct dccg {
+ struct dc_context *ctx;
+ const struct dccg_funcs *funcs;
+
+- enum dm_pp_clocks_state max_clks_state;
+- enum dm_pp_clocks_state cur_min_clks_state;
+ struct dc_clocks clks;
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5583-drm-amd-display-split-dccg-clock-manager-into-asic-f.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5583-drm-amd-display-split-dccg-clock-manager-into-asic-f.patch
new file mode 100644
index 00000000..ba779bd8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5583-drm-amd-display-split-dccg-clock-manager-into-asic-f.patch
@@ -0,0 +1,2952 @@
+From 74fed69c0dc1c52a43b8fb15271ab8a7ee93780f Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Mon, 24 Sep 2018 15:28:00 -0400
+Subject: [PATCH 5583/5725] drm/amd/display: split dccg clock manager into asic
+ folders
+
+Currently dccg contains code related to every dcn revision in
+a single file.
+
+This change splits out the dcn parts of code into correct folders
+
+Change-Id: I603351bdf753312e6b28e09320318d2afe7db852
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/Makefile | 2 +-
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 1143 --------------------
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h | 133 ---
+ drivers/gpu/drm/amd/display/dc/dce/dce_dccg.c | 876 +++++++++++++++
+ drivers/gpu/drm/amd/display/dc/dce/dce_dccg.h | 165 +++
+ .../drm/amd/display/dc/dce100/dce100_resource.c | 2 +-
+ .../drm/amd/display/dc/dce110/dce110_resource.c | 2 +-
+ .../drm/amd/display/dc/dce112/dce112_resource.c | 2 +-
+ .../drm/amd/display/dc/dce120/dce120_resource.c | 2 +-
+ .../gpu/drm/amd/display/dc/dce80/dce80_resource.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/dcn10/Makefile | 2 +-
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c | 278 +++++
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.h | 37 +
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/inc/core_types.h | 2 +-
+ drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h | 2 +-
+ drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h | 47 +
+ .../gpu/drm/amd/display/dc/inc/hw/display_clock.h | 47 -
+ 18 files changed, 1413 insertions(+), 1333 deletions(-)
+ delete mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+ delete mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+ create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_dccg.c
+ create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_dccg.h
+ create mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c
+ create mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.h
+ create mode 100644 drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+ delete mode 100644 drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
+index 8f7f0e8..f4ce7f5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
+@@ -28,7 +28,7 @@
+
+ DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
+ dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
+-dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
++dce_dccg.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
+ dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o
+
+ AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+deleted file mode 100644
+index 6f4a10b..0000000
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ /dev/null
+@@ -1,1143 +0,0 @@
+-/*
+- * Copyright 2012-16 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- * Authors: AMD
+- *
+- */
+-
+-#include "reg_helper.h"
+-#include "bios_parser_interface.h"
+-#include "dc.h"
+-#include "dce_clocks.h"
+-#include "dmcu.h"
+-#include "core_types.h"
+-#include "dal_asic_id.h"
+-
+-#define TO_DCE_DCCG(clocks)\
+- container_of(clocks, struct dce_dccg, base)
+-
+-#define REG(reg) \
+- (dccg_dce->regs->reg)
+-
+-#undef FN
+-#define FN(reg_name, field_name) \
+- dccg_dce->dccg_shift->field_name, dccg_dce->dccg_mask->field_name
+-
+-#define CTX \
+- dccg_dce->base.ctx
+-#define DC_LOGGER \
+- dccg->ctx->logger
+-
+-/* Max clock values for each state indexed by "enum clocks_state": */
+-static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
+-/* ClocksStateInvalid - should not be used */
+-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+-/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
+-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+-/* ClocksStateLow */
+-{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
+-/* ClocksStateNominal */
+-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
+-/* ClocksStatePerformance */
+-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
+-
+-static const struct state_dependent_clocks dce110_max_clks_by_state[] = {
+-/*ClocksStateInvalid - should not be used*/
+-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+-/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+-{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
+-/*ClocksStateLow*/
+-{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
+-/*ClocksStateNominal*/
+-{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
+-/*ClocksStatePerformance*/
+-{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };
+-
+-static const struct state_dependent_clocks dce112_max_clks_by_state[] = {
+-/*ClocksStateInvalid - should not be used*/
+-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+-/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+-{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
+-/*ClocksStateLow*/
+-{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
+-/*ClocksStateNominal*/
+-{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
+-/*ClocksStatePerformance*/
+-{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };
+-
+-static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
+-/*ClocksStateInvalid - should not be used*/
+-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+-/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+-/*ClocksStateLow*/
+-{ .display_clk_khz = 460000, .pixel_clk_khz = 400000 },
+-/*ClocksStateNominal*/
+-{ .display_clk_khz = 670000, .pixel_clk_khz = 600000 },
+-/*ClocksStatePerformance*/
+-{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
+-
+-/* Starting DID for each range */
+-enum dentist_base_divider_id {
+- DENTIST_BASE_DID_1 = 0x08,
+- DENTIST_BASE_DID_2 = 0x40,
+- DENTIST_BASE_DID_3 = 0x60,
+- DENTIST_MAX_DID = 0x80
+-};
+-
+-/* Starting point and step size for each divider range.*/
+-enum dentist_divider_range {
+- DENTIST_DIVIDER_RANGE_1_START = 8, /* 2.00 */
+- DENTIST_DIVIDER_RANGE_1_STEP = 1, /* 0.25 */
+- DENTIST_DIVIDER_RANGE_2_START = 64, /* 16.00 */
+- DENTIST_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */
+- DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
+- DENTIST_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */
+- DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
+-};
+-
+-static int dentist_get_divider_from_did(int did)
+-{
+- if (did < DENTIST_BASE_DID_1)
+- did = DENTIST_BASE_DID_1;
+- if (did > DENTIST_MAX_DID)
+- did = DENTIST_MAX_DID;
+-
+- if (did < DENTIST_BASE_DID_2) {
+- return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
+- * (did - DENTIST_BASE_DID_1);
+- } else if (did < DENTIST_BASE_DID_3) {
+- return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
+- * (did - DENTIST_BASE_DID_2);
+- } else {
+- return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
+- * (did - DENTIST_BASE_DID_3);
+- }
+-}
+-
+-/* SW will adjust DP REF Clock average value for all purposes
+- * (DP DTO / DP Audio DTO and DP GTC)
+- if clock is spread for all cases:
+- -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
+- calculations for DS_INCR/DS_MODULO (this is planned to be default case)
+- -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
+- calculations (not planned to be used, but average clock should still
+- be valid)
+- -if SS enabled on DP Ref clock and HW de-spreading disabled
+- (should not be case with CIK) then SW should program all rates
+- generated according to average value (case as with previous ASICs)
+- */
+-static int dccg_adjust_dp_ref_freq_for_ss(struct dce_dccg *dccg_dce, int dp_ref_clk_khz)
+-{
+- if (dccg_dce->ss_on_dprefclk && dccg_dce->dprefclk_ss_divider != 0) {
+- struct fixed31_32 ss_percentage = dc_fixpt_div_int(
+- dc_fixpt_from_fraction(dccg_dce->dprefclk_ss_percentage,
+- dccg_dce->dprefclk_ss_divider), 200);
+- struct fixed31_32 adj_dp_ref_clk_khz;
+-
+- ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
+- adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
+- dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
+- }
+- return dp_ref_clk_khz;
+-}
+-
+-static int dce_get_dp_ref_freq_khz(struct dccg *dccg)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+- int dprefclk_wdivider;
+- int dprefclk_src_sel;
+- int dp_ref_clk_khz = 600000;
+- int target_div;
+-
+- /* ASSERT DP Reference Clock source is from DFS*/
+- REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
+- ASSERT(dprefclk_src_sel == 0);
+-
+- /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
+- * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
+- REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
+-
+- /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
+- target_div = dentist_get_divider_from_did(dprefclk_wdivider);
+-
+- /* Calculate the current DFS clock, in kHz.*/
+- dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+- * dccg_dce->dentist_vco_freq_khz) / target_div;
+-
+- return dccg_adjust_dp_ref_freq_for_ss(dccg_dce, dp_ref_clk_khz);
+-}
+-
+-static int dce12_get_dp_ref_freq_khz(struct dccg *dccg)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+-
+- return dccg_adjust_dp_ref_freq_for_ss(dccg_dce, dccg_dce->dprefclk_khz);
+-}
+-
+-/* unit: in_khz before mode set, get pixel clock from context. ASIC register
+- * may not be programmed yet
+- */
+-static uint32_t get_max_pixel_clock_for_all_paths(struct dc_state *context)
+-{
+- uint32_t max_pix_clk = 0;
+- int i;
+-
+- for (i = 0; i < MAX_PIPES; i++) {
+- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+-
+- if (pipe_ctx->stream == NULL)
+- continue;
+-
+- /* do not check under lay */
+- if (pipe_ctx->top_pipe)
+- continue;
+-
+- if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
+- max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
+-
+- /* raise clock state for HBR3/2 if required. Confirmed with HW DCE/DPCS
+- * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
+- */
+- if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
+- pipe_ctx->stream_res.pix_clk_params.requested_sym_clk > max_pix_clk)
+- max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_sym_clk;
+- }
+-
+- return max_pix_clk;
+-}
+-
+-static enum dm_pp_clocks_state dce_get_required_clocks_state(
+- struct dccg *dccg,
+- struct dc_state *context)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+- int i;
+- enum dm_pp_clocks_state low_req_clk;
+- int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
+-
+- /* Iterate from highest supported to lowest valid state, and update
+- * lowest RequiredState with the lowest state that satisfies
+- * all required clocks
+- */
+- for (i = dccg_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
+- if (context->bw.dce.dispclk_khz >
+- dccg_dce->max_clks_by_state[i].display_clk_khz
+- || max_pix_clk >
+- dccg_dce->max_clks_by_state[i].pixel_clk_khz)
+- break;
+-
+- low_req_clk = i + 1;
+- if (low_req_clk > dccg_dce->max_clks_state) {
+- /* set max clock state for high phyclock, invalid on exceeding display clock */
+- if (dccg_dce->max_clks_by_state[dccg_dce->max_clks_state].display_clk_khz
+- < context->bw.dce.dispclk_khz)
+- low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
+- else
+- low_req_clk = dccg_dce->max_clks_state;
+- }
+-
+- return low_req_clk;
+-}
+-
+-static int dce_set_clock(
+- struct dccg *dccg,
+- int requested_clk_khz)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+- struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
+- struct dc_bios *bp = dccg->ctx->dc_bios;
+- int actual_clock = requested_clk_khz;
+- struct dmcu *dmcu = dccg_dce->base.ctx->dc->res_pool->dmcu;
+-
+- /* Make sure requested clock isn't lower than minimum threshold*/
+- if (requested_clk_khz > 0)
+- requested_clk_khz = max(requested_clk_khz,
+- dccg_dce->dentist_vco_freq_khz / 64);
+-
+- /* Prepare to program display clock*/
+- pxl_clk_params.target_pixel_clock = requested_clk_khz;
+- pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+-
+- if (dccg_dce->dfs_bypass_active)
+- pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
+-
+- bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
+-
+- if (dccg_dce->dfs_bypass_active) {
+- /* Cache the fixed display clock*/
+- dccg_dce->dfs_bypass_disp_clk =
+- pxl_clk_params.dfs_bypass_display_clock;
+- actual_clock = pxl_clk_params.dfs_bypass_display_clock;
+- }
+-
+- /* from power down, we need mark the clock state as ClocksStateNominal
+- * from HWReset, so when resume we will call pplib voltage regulator.*/
+- if (requested_clk_khz == 0)
+- dccg_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+-
+- dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7);
+-
+- return actual_clock;
+-}
+-
+-static int dce112_set_clock(
+- struct dccg *dccg,
+- int requested_clk_khz)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+- struct bp_set_dce_clock_parameters dce_clk_params;
+- struct dc_bios *bp = dccg->ctx->dc_bios;
+- struct dc *core_dc = dccg->ctx->dc;
+- struct dmcu *dmcu = core_dc->res_pool->dmcu;
+- int actual_clock = requested_clk_khz;
+- /* Prepare to program display clock*/
+- memset(&dce_clk_params, 0, sizeof(dce_clk_params));
+-
+- /* Make sure requested clock isn't lower than minimum threshold*/
+- if (requested_clk_khz > 0)
+- requested_clk_khz = max(requested_clk_khz,
+- dccg_dce->dentist_vco_freq_khz / 62);
+-
+- dce_clk_params.target_clock_frequency = requested_clk_khz;
+- dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+- dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
+-
+- bp->funcs->set_dce_clock(bp, &dce_clk_params);
+- actual_clock = dce_clk_params.target_clock_frequency;
+-
+- /* from power down, we need mark the clock state as ClocksStateNominal
+- * from HWReset, so when resume we will call pplib voltage regulator.*/
+- if (requested_clk_khz == 0)
+- dccg_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+-
+- /*Program DP ref Clock*/
+- /*VBIOS will determine DPREFCLK frequency, so we don't set it*/
+- dce_clk_params.target_clock_frequency = 0;
+- dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
+- if (!ASICREV_IS_VEGA20_P(dccg->ctx->asic_id.hw_internal_rev))
+- dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
+- (dce_clk_params.pll_id ==
+- CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
+- else
+- dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
+-
+- bp->funcs->set_dce_clock(bp, &dce_clk_params);
+-
+- if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+- if (dccg_dce->dfs_bypass_disp_clk != actual_clock)
+- dmcu->funcs->set_psr_wait_loop(dmcu,
+- actual_clock / 1000 / 7);
+- }
+-
+- dccg_dce->dfs_bypass_disp_clk = actual_clock;
+- return actual_clock;
+-}
+-
+-static void dce_clock_read_integrated_info(struct dce_dccg *dccg_dce)
+-{
+- struct dc_debug_options *debug = &dccg_dce->base.ctx->dc->debug;
+- struct dc_bios *bp = dccg_dce->base.ctx->dc_bios;
+- struct integrated_info info = { { { 0 } } };
+- struct dc_firmware_info fw_info = { { 0 } };
+- int i;
+-
+- if (bp->integrated_info)
+- info = *bp->integrated_info;
+-
+- dccg_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
+- if (dccg_dce->dentist_vco_freq_khz == 0) {
+- bp->funcs->get_firmware_info(bp, &fw_info);
+- dccg_dce->dentist_vco_freq_khz =
+- fw_info.smu_gpu_pll_output_freq;
+- if (dccg_dce->dentist_vco_freq_khz == 0)
+- dccg_dce->dentist_vco_freq_khz = 3600000;
+- }
+-
+- /*update the maximum display clock for each power state*/
+- for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+- enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID;
+-
+- switch (i) {
+- case 0:
+- clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW;
+- break;
+-
+- case 1:
+- clk_state = DM_PP_CLOCKS_STATE_LOW;
+- break;
+-
+- case 2:
+- clk_state = DM_PP_CLOCKS_STATE_NOMINAL;
+- break;
+-
+- case 3:
+- clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE;
+- break;
+-
+- default:
+- clk_state = DM_PP_CLOCKS_STATE_INVALID;
+- break;
+- }
+-
+- /*Do not allow bad VBIOS/SBIOS to override with invalid values,
+- * check for > 100MHz*/
+- if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
+- dccg_dce->max_clks_by_state[clk_state].display_clk_khz =
+- info.disp_clk_voltage[i].max_supported_clk;
+- }
+-
+- if (!debug->disable_dfs_bypass && bp->integrated_info)
+- if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+- dccg_dce->dfs_bypass_enabled = true;
+-}
+-
+-static void dce_clock_read_ss_info(struct dce_dccg *dccg_dce)
+-{
+- struct dc_bios *bp = dccg_dce->base.ctx->dc_bios;
+- int ss_info_num = bp->funcs->get_ss_entry_number(
+- bp, AS_SIGNAL_TYPE_GPU_PLL);
+-
+- if (ss_info_num) {
+- struct spread_spectrum_info info = { { 0 } };
+- enum bp_result result = bp->funcs->get_spread_spectrum_info(
+- bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);
+-
+- /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
+- * even if SS not enabled and in that case
+- * SSInfo.spreadSpectrumPercentage !=0 would be sign
+- * that SS is enabled
+- */
+- if (result == BP_RESULT_OK &&
+- info.spread_spectrum_percentage != 0) {
+- dccg_dce->ss_on_dprefclk = true;
+- dccg_dce->dprefclk_ss_divider = info.spread_percentage_divider;
+-
+- if (info.type.CENTER_MODE == 0) {
+- /* TODO: Currently for DP Reference clock we
+- * need only SS percentage for
+- * downspread */
+- dccg_dce->dprefclk_ss_percentage =
+- info.spread_spectrum_percentage;
+- }
+-
+- return;
+- }
+-
+- result = bp->funcs->get_spread_spectrum_info(
+- bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info);
+-
+- /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS
+- * even if SS not enabled and in that case
+- * SSInfo.spreadSpectrumPercentage !=0 would be sign
+- * that SS is enabled
+- */
+- if (result == BP_RESULT_OK &&
+- info.spread_spectrum_percentage != 0) {
+- dccg_dce->ss_on_dprefclk = true;
+- dccg_dce->dprefclk_ss_divider = info.spread_percentage_divider;
+-
+- if (info.type.CENTER_MODE == 0) {
+- /* Currently for DP Reference clock we
+- * need only SS percentage for
+- * downspread */
+- dccg_dce->dprefclk_ss_percentage =
+- info.spread_spectrum_percentage;
+- }
+- }
+- }
+-}
+-
+-static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
+-{
+- return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
+-}
+-
+-static void dce110_fill_display_configs(
+- const struct dc_state *context,
+- struct dm_pp_display_configuration *pp_display_cfg)
+-{
+- int j;
+- int num_cfgs = 0;
+-
+- for (j = 0; j < context->stream_count; j++) {
+- int k;
+-
+- const struct dc_stream_state *stream = context->streams[j];
+- struct dm_pp_single_disp_config *cfg =
+- &pp_display_cfg->disp_configs[num_cfgs];
+- const struct pipe_ctx *pipe_ctx = NULL;
+-
+- for (k = 0; k < MAX_PIPES; k++)
+- if (stream == context->res_ctx.pipe_ctx[k].stream) {
+- pipe_ctx = &context->res_ctx.pipe_ctx[k];
+- break;
+- }
+-
+- ASSERT(pipe_ctx != NULL);
+-
+- /* only notify active stream */
+- if (stream->dpms_off)
+- continue;
+-
+- num_cfgs++;
+- cfg->signal = pipe_ctx->stream->signal;
+- cfg->pipe_idx = pipe_ctx->stream_res.tg->inst;
+- cfg->src_height = stream->src.height;
+- cfg->src_width = stream->src.width;
+- cfg->ddi_channel_mapping =
+- stream->sink->link->ddi_channel_mapping.raw;
+- cfg->transmitter =
+- stream->sink->link->link_enc->transmitter;
+- cfg->link_settings.lane_count =
+- stream->sink->link->cur_link_settings.lane_count;
+- cfg->link_settings.link_rate =
+- stream->sink->link->cur_link_settings.link_rate;
+- cfg->link_settings.link_spread =
+- stream->sink->link->cur_link_settings.link_spread;
+- cfg->sym_clock = stream->phy_pix_clk;
+- /* Round v_refresh*/
+- cfg->v_refresh = stream->timing.pix_clk_khz * 1000;
+- cfg->v_refresh /= stream->timing.h_total;
+- cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
+- / stream->timing.v_total;
+- }
+-
+- pp_display_cfg->display_count = num_cfgs;
+-}
+-
+-static uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
+-{
+- uint8_t j;
+- uint32_t min_vertical_blank_time = -1;
+-
+- for (j = 0; j < context->stream_count; j++) {
+- struct dc_stream_state *stream = context->streams[j];
+- uint32_t vertical_blank_in_pixels = 0;
+- uint32_t vertical_blank_time = 0;
+-
+- vertical_blank_in_pixels = stream->timing.h_total *
+- (stream->timing.v_total
+- - stream->timing.v_addressable);
+-
+- vertical_blank_time = vertical_blank_in_pixels
+- * 1000 / stream->timing.pix_clk_khz;
+-
+- if (min_vertical_blank_time > vertical_blank_time)
+- min_vertical_blank_time = vertical_blank_time;
+- }
+-
+- return min_vertical_blank_time;
+-}
+-
+-static int determine_sclk_from_bounding_box(
+- const struct dc *dc,
+- int required_sclk)
+-{
+- int i;
+-
+- /*
+- * Some asics do not give us sclk levels, so we just report the actual
+- * required sclk
+- */
+- if (dc->sclk_lvls.num_levels == 0)
+- return required_sclk;
+-
+- for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
+- if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
+- return dc->sclk_lvls.clocks_in_khz[i];
+- }
+- /*
+- * even maximum level could not satisfy requirement, this
+- * is unexpected at this stage, should have been caught at
+- * validation time
+- */
+- ASSERT(0);
+- return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
+-}
+-
+-static void dce_pplib_apply_display_requirements(
+- struct dc *dc,
+- struct dc_state *context)
+-{
+- struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+-
+- pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
+-
+- dce110_fill_display_configs(context, pp_display_cfg);
+-
+- if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
+- dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+-}
+-
+-static void dce11_pplib_apply_display_requirements(
+- struct dc *dc,
+- struct dc_state *context)
+-{
+- struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+-
+- pp_display_cfg->all_displays_in_sync =
+- context->bw.dce.all_displays_in_sync;
+- pp_display_cfg->nb_pstate_switch_disable =
+- context->bw.dce.nbp_state_change_enable == false;
+- pp_display_cfg->cpu_cc6_disable =
+- context->bw.dce.cpuc_state_change_enable == false;
+- pp_display_cfg->cpu_pstate_disable =
+- context->bw.dce.cpup_state_change_enable == false;
+- pp_display_cfg->cpu_pstate_separation_time =
+- context->bw.dce.blackout_recovery_time_us;
+-
+- pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
+- / MEMORY_TYPE_MULTIPLIER_CZ;
+-
+- pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
+- dc,
+- context->bw.dce.sclk_khz);
+-
+- pp_display_cfg->min_engine_clock_deep_sleep_khz
+- = context->bw.dce.sclk_deep_sleep_khz;
+-
+- pp_display_cfg->avail_mclk_switch_time_us =
+- dce110_get_min_vblank_time_us(context);
+- /* TODO: dce11.2*/
+- pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
+-
+- pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
+-
+- dce110_fill_display_configs(context, pp_display_cfg);
+-
+- /* TODO: is this still applicable?*/
+- if (pp_display_cfg->display_count == 1) {
+- const struct dc_crtc_timing *timing =
+- &context->streams[0]->timing;
+-
+- pp_display_cfg->crtc_index =
+- pp_display_cfg->disp_configs[0].pipe_idx;
+- pp_display_cfg->line_time_in_us = timing->h_total * 1000 / timing->pix_clk_khz;
+- }
+-
+- if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
+- dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+-}
+-
+-static void dcn1_pplib_apply_display_requirements(
+- struct dc *dc,
+- struct dc_state *context)
+-{
+- struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+-
+- pp_display_cfg->min_engine_clock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
+- pp_display_cfg->min_memory_clock_khz = dc->res_pool->dccg->clks.fclk_khz;
+- pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
+- pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
+- pp_display_cfg->min_dcfclock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
+- pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
+- dce110_fill_display_configs(context, pp_display_cfg);
+-
+- if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
+- dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+-}
+-
+-#ifdef CONFIG_X86
+-static int dcn1_determine_dppclk_threshold(struct dccg *dccg, struct dc_clocks *new_clocks)
+-{
+- bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+- bool dispclk_increase = new_clocks->dispclk_khz > dccg->clks.dispclk_khz;
+- int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
+- bool cur_dpp_div = dccg->clks.dispclk_khz > dccg->clks.dppclk_khz;
+-
+- /* increase clock, looking for div is 0 for current, request div is 1*/
+- if (dispclk_increase) {
+- /* already divided by 2, no need to reach target clk with 2 steps*/
+- if (cur_dpp_div)
+- return new_clocks->dispclk_khz;
+-
+- /* request disp clk is lower than maximum supported dpp clk,
+- * no need to reach target clk with two steps.
+- */
+- if (new_clocks->dispclk_khz <= disp_clk_threshold)
+- return new_clocks->dispclk_khz;
+-
+- /* target dpp clk not request divided by 2, still within threshold */
+- if (!request_dpp_div)
+- return new_clocks->dispclk_khz;
+-
+- } else {
+- /* decrease clock, looking for current dppclk divided by 2,
+- * request dppclk not divided by 2.
+- */
+-
+- /* current dpp clk not divided by 2, no need to ramp*/
+- if (!cur_dpp_div)
+- return new_clocks->dispclk_khz;
+-
+- /* current disp clk is lower than current maximum dpp clk,
+- * no need to ramp
+- */
+- if (dccg->clks.dispclk_khz <= disp_clk_threshold)
+- return new_clocks->dispclk_khz;
+-
+- /* request dpp clk need to be divided by 2 */
+- if (request_dpp_div)
+- return new_clocks->dispclk_khz;
+- }
+-
+- return disp_clk_threshold;
+-}
+-
+-static void dcn1_ramp_up_dispclk_with_dpp(struct dccg *dccg, struct dc_clocks *new_clocks)
+-{
+- struct dc *dc = dccg->ctx->dc;
+- int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(dccg, new_clocks);
+- bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+- int i;
+-
+- /* set disp clk to dpp clk threshold */
+- dce112_set_clock(dccg, dispclk_to_dpp_threshold);
+-
+- /* update request dpp clk division option */
+- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+- struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+-
+- if (!pipe_ctx->plane_state)
+- continue;
+-
+- pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
+- pipe_ctx->plane_res.dpp,
+- request_dpp_div,
+- true);
+- }
+-
+- /* If target clk not same as dppclk threshold, set to target clock */
+- if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz)
+- dce112_set_clock(dccg, new_clocks->dispclk_khz);
+-
+- dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+- dccg->clks.dppclk_khz = new_clocks->dppclk_khz;
+- dccg->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
+-}
+-
+-static void dcn1_update_clocks(struct dccg *dccg,
+- struct dc_state *context,
+- bool safe_to_lower)
+-{
+- struct dc *dc = dccg->ctx->dc;
+- struct dc_clocks *new_clocks = &context->bw.dcn.clk;
+- struct pp_smu_display_requirement_rv *smu_req_cur =
+- &dc->res_pool->pp_smu_req;
+- struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
+- struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+- struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+- bool send_request_to_increase = false;
+- bool send_request_to_lower = false;
+-
+- if (new_clocks->phyclk_khz)
+- smu_req.display_count = 1;
+- else
+- smu_req.display_count = 0;
+-
+- if (new_clocks->dispclk_khz > dccg->clks.dispclk_khz
+- || new_clocks->phyclk_khz > dccg->clks.phyclk_khz
+- || new_clocks->fclk_khz > dccg->clks.fclk_khz
+- || new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz)
+- send_request_to_increase = true;
+-
+- if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
+- dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
+-
+- send_request_to_lower = true;
+- }
+-
+- if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, dccg->clks.fclk_khz)) {
+- dccg->clks.fclk_khz = new_clocks->fclk_khz;
+- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
+- clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
+- smu_req.hard_min_fclk_khz = new_clocks->fclk_khz;
+-
+- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+- send_request_to_lower = true;
+- }
+-
+- if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, dccg->clks.dcfclk_khz)) {
+- dccg->clks.dcfclk_khz = new_clocks->dcfclk_khz;
+- smu_req.hard_min_dcefclk_khz = new_clocks->dcfclk_khz;
+-
+- send_request_to_lower = true;
+- }
+-
+- if (should_set_clock(safe_to_lower,
+- new_clocks->dcfclk_deep_sleep_khz, dccg->clks.dcfclk_deep_sleep_khz)) {
+- dccg->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
+- smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz;
+-
+- send_request_to_lower = true;
+- }
+-
+- /* make sure dcf clk is before dpp clk to
+- * make sure we have enough voltage to run dpp clk
+- */
+- if (send_request_to_increase) {
+- /*use dcfclk to request voltage*/
+- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+- clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+- if (pp_smu->set_display_requirement)
+- pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+- dcn1_pplib_apply_display_requirements(dc, context);
+- }
+-
+- /* dcn1 dppclk is tied to dispclk */
+- /* program dispclk on = as a w/a for sleep resume clock ramping issues */
+- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)
+- || new_clocks->dispclk_khz == dccg->clks.dispclk_khz) {
+- dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks);
+- dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+-
+- send_request_to_lower = true;
+- }
+-
+- if (!send_request_to_increase && send_request_to_lower) {
+- /*use dcfclk to request voltage*/
+- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+- clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+- if (pp_smu->set_display_requirement)
+- pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+- dcn1_pplib_apply_display_requirements(dc, context);
+- }
+-
+-
+- *smu_req_cur = smu_req;
+-}
+-#endif
+-
+-static void dce_update_clocks(struct dccg *dccg,
+- struct dc_state *context,
+- bool safe_to_lower)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+- struct dm_pp_power_level_change_request level_change_req;
+- int unpatched_disp_clk = context->bw.dce.dispclk_khz;
+-
+- /*TODO: W/A for dal3 linux, investigate why this works */
+- if (!dccg_dce->dfs_bypass_active)
+- context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+-
+- level_change_req.power_level = dce_get_required_clocks_state(dccg, context);
+- /* get max clock state from PPLIB */
+- if ((level_change_req.power_level < dccg_dce->cur_min_clks_state && safe_to_lower)
+- || level_change_req.power_level > dccg_dce->cur_min_clks_state) {
+- if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
+- dccg_dce->cur_min_clks_state = level_change_req.power_level;
+- }
+-
+- if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, dccg->clks.dispclk_khz)) {
+- context->bw.dce.dispclk_khz = dce_set_clock(dccg, context->bw.dce.dispclk_khz);
+- dccg->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+- }
+- dce_pplib_apply_display_requirements(dccg->ctx->dc, context);
+-
+- context->bw.dce.dispclk_khz = unpatched_disp_clk;
+-}
+-
+-static void dce11_update_clocks(struct dccg *dccg,
+- struct dc_state *context,
+- bool safe_to_lower)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+- struct dm_pp_power_level_change_request level_change_req;
+-
+- level_change_req.power_level = dce_get_required_clocks_state(dccg, context);
+- /* get max clock state from PPLIB */
+- if ((level_change_req.power_level < dccg_dce->cur_min_clks_state && safe_to_lower)
+- || level_change_req.power_level > dccg_dce->cur_min_clks_state) {
+- if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
+- dccg_dce->cur_min_clks_state = level_change_req.power_level;
+- }
+-
+- if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, dccg->clks.dispclk_khz)) {
+- context->bw.dce.dispclk_khz = dce_set_clock(dccg, context->bw.dce.dispclk_khz);
+- dccg->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+- }
+- dce11_pplib_apply_display_requirements(dccg->ctx->dc, context);
+-}
+-
+-static void dce112_update_clocks(struct dccg *dccg,
+- struct dc_state *context,
+- bool safe_to_lower)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+- struct dm_pp_power_level_change_request level_change_req;
+-
+- level_change_req.power_level = dce_get_required_clocks_state(dccg, context);
+- /* get max clock state from PPLIB */
+- if ((level_change_req.power_level < dccg_dce->cur_min_clks_state && safe_to_lower)
+- || level_change_req.power_level > dccg_dce->cur_min_clks_state) {
+- if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
+- dccg_dce->cur_min_clks_state = level_change_req.power_level;
+- }
+-
+- if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, dccg->clks.dispclk_khz)) {
+- context->bw.dce.dispclk_khz = dce112_set_clock(dccg, context->bw.dce.dispclk_khz);
+- dccg->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+- }
+- dce11_pplib_apply_display_requirements(dccg->ctx->dc, context);
+-}
+-
+-static void dce12_update_clocks(struct dccg *dccg,
+- struct dc_state *context,
+- bool safe_to_lower)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+- struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+- int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
+- int unpatched_disp_clk = context->bw.dce.dispclk_khz;
+-
+- /*TODO: W/A for dal3 linux, investigate why this works */
+- if (!dccg_dce->dfs_bypass_active)
+- context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+-
+- if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, dccg->clks.dispclk_khz)) {
+- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
+- clock_voltage_req.clocks_in_khz = context->bw.dce.dispclk_khz;
+- context->bw.dce.dispclk_khz = dce112_set_clock(dccg, context->bw.dce.dispclk_khz);
+- dccg->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+-
+- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+- }
+-
+- if (should_set_clock(safe_to_lower, max_pix_clk, dccg->clks.phyclk_khz)) {
+- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
+- clock_voltage_req.clocks_in_khz = max_pix_clk;
+- dccg->clks.phyclk_khz = max_pix_clk;
+-
+- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+- }
+- dce11_pplib_apply_display_requirements(dccg->ctx->dc, context);
+-
+- context->bw.dce.dispclk_khz = unpatched_disp_clk;
+-}
+-
+-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+-static const struct dccg_funcs dcn1_funcs = {
+- .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+- .update_clocks = dcn1_update_clocks
+-};
+-#endif
+-
+-static const struct dccg_funcs dce120_funcs = {
+- .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+- .update_clocks = dce12_update_clocks
+-};
+-
+-static const struct dccg_funcs dce112_funcs = {
+- .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+- .update_clocks = dce112_update_clocks
+-};
+-
+-static const struct dccg_funcs dce110_funcs = {
+- .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+- .update_clocks = dce11_update_clocks,
+-};
+-
+-static const struct dccg_funcs dce_funcs = {
+- .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+- .update_clocks = dce_update_clocks
+-};
+-
+-static void dce_dccg_construct(
+- struct dce_dccg *dccg_dce,
+- struct dc_context *ctx,
+- const struct dccg_registers *regs,
+- const struct dccg_shift *clk_shift,
+- const struct dccg_mask *clk_mask)
+-{
+- struct dccg *base = &dccg_dce->base;
+- struct dm_pp_static_clock_info static_clk_info = {0};
+-
+- base->ctx = ctx;
+- base->funcs = &dce_funcs;
+-
+- dccg_dce->regs = regs;
+- dccg_dce->dccg_shift = clk_shift;
+- dccg_dce->dccg_mask = clk_mask;
+-
+- dccg_dce->dfs_bypass_disp_clk = 0;
+-
+- dccg_dce->dprefclk_ss_percentage = 0;
+- dccg_dce->dprefclk_ss_divider = 1000;
+- dccg_dce->ss_on_dprefclk = false;
+-
+-
+- if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+- dccg_dce->max_clks_state = static_clk_info.max_clocks_state;
+- else
+- dccg_dce->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+- dccg_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
+-
+- dce_clock_read_integrated_info(dccg_dce);
+- dce_clock_read_ss_info(dccg_dce);
+-}
+-
+-struct dccg *dce_dccg_create(
+- struct dc_context *ctx,
+- const struct dccg_registers *regs,
+- const struct dccg_shift *clk_shift,
+- const struct dccg_mask *clk_mask)
+-{
+- struct dce_dccg *dccg_dce = kzalloc(sizeof(*dccg_dce), GFP_KERNEL);
+-
+- if (dccg_dce == NULL) {
+- BREAK_TO_DEBUGGER();
+- return NULL;
+- }
+-
+- memcpy(dccg_dce->max_clks_by_state,
+- dce80_max_clks_by_state,
+- sizeof(dce80_max_clks_by_state));
+-
+- dce_dccg_construct(
+- dccg_dce, ctx, regs, clk_shift, clk_mask);
+-
+- return &dccg_dce->base;
+-}
+-
+-struct dccg *dce110_dccg_create(
+- struct dc_context *ctx,
+- const struct dccg_registers *regs,
+- const struct dccg_shift *clk_shift,
+- const struct dccg_mask *clk_mask)
+-{
+- struct dce_dccg *dccg_dce = kzalloc(sizeof(*dccg_dce), GFP_KERNEL);
+-
+- if (dccg_dce == NULL) {
+- BREAK_TO_DEBUGGER();
+- return NULL;
+- }
+-
+- memcpy(dccg_dce->max_clks_by_state,
+- dce110_max_clks_by_state,
+- sizeof(dce110_max_clks_by_state));
+-
+- dce_dccg_construct(
+- dccg_dce, ctx, regs, clk_shift, clk_mask);
+-
+- dccg_dce->base.funcs = &dce110_funcs;
+-
+- return &dccg_dce->base;
+-}
+-
+-struct dccg *dce112_dccg_create(
+- struct dc_context *ctx,
+- const struct dccg_registers *regs,
+- const struct dccg_shift *clk_shift,
+- const struct dccg_mask *clk_mask)
+-{
+- struct dce_dccg *dccg_dce = kzalloc(sizeof(*dccg_dce), GFP_KERNEL);
+-
+- if (dccg_dce == NULL) {
+- BREAK_TO_DEBUGGER();
+- return NULL;
+- }
+-
+- memcpy(dccg_dce->max_clks_by_state,
+- dce112_max_clks_by_state,
+- sizeof(dce112_max_clks_by_state));
+-
+- dce_dccg_construct(
+- dccg_dce, ctx, regs, clk_shift, clk_mask);
+-
+- dccg_dce->base.funcs = &dce112_funcs;
+-
+- return &dccg_dce->base;
+-}
+-
+-struct dccg *dce120_dccg_create(struct dc_context *ctx)
+-{
+- struct dce_dccg *dccg_dce = kzalloc(sizeof(*dccg_dce), GFP_KERNEL);
+-
+- if (dccg_dce == NULL) {
+- BREAK_TO_DEBUGGER();
+- return NULL;
+- }
+-
+- memcpy(dccg_dce->max_clks_by_state,
+- dce120_max_clks_by_state,
+- sizeof(dce120_max_clks_by_state));
+-
+- dce_dccg_construct(
+- dccg_dce, ctx, NULL, NULL, NULL);
+-
+- dccg_dce->dprefclk_khz = 600000;
+- dccg_dce->base.funcs = &dce120_funcs;
+-
+- return &dccg_dce->base;
+-}
+-
+-#ifdef CONFIG_X86
+-struct dccg *dcn1_dccg_create(struct dc_context *ctx)
+-{
+- struct dc_debug_options *debug = &ctx->dc->debug;
+- struct dc_bios *bp = ctx->dc_bios;
+- struct dc_firmware_info fw_info = { { 0 } };
+- struct dce_dccg *dccg_dce = kzalloc(sizeof(*dccg_dce), GFP_KERNEL);
+-
+- if (dccg_dce == NULL) {
+- BREAK_TO_DEBUGGER();
+- return NULL;
+- }
+-
+- dccg_dce->base.ctx = ctx;
+- dccg_dce->base.funcs = &dcn1_funcs;
+-
+- dccg_dce->dfs_bypass_disp_clk = 0;
+-
+- dccg_dce->dprefclk_ss_percentage = 0;
+- dccg_dce->dprefclk_ss_divider = 1000;
+- dccg_dce->ss_on_dprefclk = false;
+-
+- dccg_dce->dprefclk_khz = 600000;
+- if (bp->integrated_info)
+- dccg_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
+- if (dccg_dce->dentist_vco_freq_khz == 0) {
+- bp->funcs->get_firmware_info(bp, &fw_info);
+- dccg_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
+- if (dccg_dce->dentist_vco_freq_khz == 0)
+- dccg_dce->dentist_vco_freq_khz = 3600000;
+- }
+-
+- if (!debug->disable_dfs_bypass && bp->integrated_info)
+- if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+- dccg_dce->dfs_bypass_enabled = true;
+-
+- dce_clock_read_ss_info(dccg_dce);
+-
+- return &dccg_dce->base;
+-}
+-#endif
+-
+-void dce_dccg_destroy(struct dccg **dccg)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(*dccg);
+-
+- kfree(dccg_dce);
+- *dccg = NULL;
+-}
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+deleted file mode 100644
+index 8f902f2..0000000
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
++++ /dev/null
+@@ -1,133 +0,0 @@
+-/*
+- * Copyright 2012-16 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- * Authors: AMD
+- *
+- */
+-
+-
+-#ifndef _DCE_CLOCKS_H_
+-#define _DCE_CLOCKS_H_
+-
+-#include "display_clock.h"
+-
+-#define MEMORY_TYPE_MULTIPLIER_CZ 4
+-
+-#define CLK_COMMON_REG_LIST_DCE_BASE() \
+- .DPREFCLK_CNTL = mmDPREFCLK_CNTL, \
+- .DENTIST_DISPCLK_CNTL = mmDENTIST_DISPCLK_CNTL
+-
+-#define CLK_COMMON_REG_LIST_DCN_BASE() \
+- SR(DENTIST_DISPCLK_CNTL)
+-
+-#define CLK_SF(reg_name, field_name, post_fix)\
+- .field_name = reg_name ## __ ## field_name ## post_fix
+-
+-#define CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
+- CLK_SF(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, mask_sh), \
+- CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, mask_sh)
+-
+-#define CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh) \
+- CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh),\
+- CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh)
+-
+-#define CLK_REG_FIELD_LIST(type) \
+- type DPREFCLK_SRC_SEL; \
+- type DENTIST_DPREFCLK_WDIVIDER; \
+- type DENTIST_DISPCLK_WDIVIDER; \
+- type DENTIST_DISPCLK_CHG_DONE;
+-
+-struct dccg_shift {
+- CLK_REG_FIELD_LIST(uint8_t)
+-};
+-
+-struct dccg_mask {
+- CLK_REG_FIELD_LIST(uint32_t)
+-};
+-
+-struct dccg_registers {
+- uint32_t DPREFCLK_CNTL;
+- uint32_t DENTIST_DISPCLK_CNTL;
+-};
+-
+-struct state_dependent_clocks {
+- int display_clk_khz;
+- int pixel_clk_khz;
+-};
+-
+-struct dce_dccg {
+- struct dccg base;
+- const struct dccg_registers *regs;
+- const struct dccg_shift *dccg_shift;
+- const struct dccg_mask *dccg_mask;
+-
+- struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
+-
+- int dentist_vco_freq_khz;
+-
+- /* Cache the status of DFS-bypass feature*/
+- bool dfs_bypass_enabled;
+- /* True if the DFS-bypass feature is enabled and active. */
+- bool dfs_bypass_active;
+- /* Cache the display clock returned by VBIOS if DFS-bypass is enabled.
+- * This is basically "Crystal Frequency In KHz" (XTALIN) frequency */
+- int dfs_bypass_disp_clk;
+-
+- /* Flag for Enabled SS on DPREFCLK */
+- bool ss_on_dprefclk;
+- /* DPREFCLK SS percentage (if down-spread enabled) */
+- int dprefclk_ss_percentage;
+- /* DPREFCLK SS percentage Divider (100 or 1000) */
+- int dprefclk_ss_divider;
+- int dprefclk_khz;
+-
+- enum dm_pp_clocks_state max_clks_state;
+- enum dm_pp_clocks_state cur_min_clks_state;
+-};
+-
+-
+-struct dccg *dce_dccg_create(
+- struct dc_context *ctx,
+- const struct dccg_registers *regs,
+- const struct dccg_shift *clk_shift,
+- const struct dccg_mask *clk_mask);
+-
+-struct dccg *dce110_dccg_create(
+- struct dc_context *ctx,
+- const struct dccg_registers *regs,
+- const struct dccg_shift *clk_shift,
+- const struct dccg_mask *clk_mask);
+-
+-struct dccg *dce112_dccg_create(
+- struct dc_context *ctx,
+- const struct dccg_registers *regs,
+- const struct dccg_shift *clk_shift,
+- const struct dccg_mask *clk_mask);
+-
+-struct dccg *dce120_dccg_create(struct dc_context *ctx);
+-
+-#ifdef CONFIG_X86
+-struct dccg *dcn1_dccg_create(struct dc_context *ctx);
+-#endif
+-
+-void dce_dccg_destroy(struct dccg **dccg);
+-
+-#endif /* _DCE_CLOCKS_H_ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dccg.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dccg.c
+new file mode 100644
+index 0000000..97c143b
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dccg.c
+@@ -0,0 +1,876 @@
++/*
++ * Copyright 2012-16 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#include "dce_dccg.h"
++
++#include "reg_helper.h"
++#include "dmcu.h"
++#include "core_types.h"
++#include "dal_asic_id.h"
++
++#define TO_DCE_DCCG(clocks)\
++ container_of(clocks, struct dce_dccg, base)
++
++#define REG(reg) \
++ (dccg_dce->regs->reg)
++
++#undef FN
++#define FN(reg_name, field_name) \
++ dccg_dce->dccg_shift->field_name, dccg_dce->dccg_mask->field_name
++
++#define CTX \
++ dccg_dce->base.ctx
++#define DC_LOGGER \
++ dccg->ctx->logger
++
++/* Max clock values for each state indexed by "enum clocks_state": */
++static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
++/* ClocksStateInvalid - should not be used */
++{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
++/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
++{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
++/* ClocksStateLow */
++{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
++/* ClocksStateNominal */
++{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
++/* ClocksStatePerformance */
++{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
++
++static const struct state_dependent_clocks dce110_max_clks_by_state[] = {
++/*ClocksStateInvalid - should not be used*/
++{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
++/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
++{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
++/*ClocksStateLow*/
++{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
++/*ClocksStateNominal*/
++{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
++/*ClocksStatePerformance*/
++{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };
++
++static const struct state_dependent_clocks dce112_max_clks_by_state[] = {
++/*ClocksStateInvalid - should not be used*/
++{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
++/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
++{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
++/*ClocksStateLow*/
++{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
++/*ClocksStateNominal*/
++{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
++/*ClocksStatePerformance*/
++{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };
++
++static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
++/*ClocksStateInvalid - should not be used*/
++{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
++/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
++{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
++/*ClocksStateLow*/
++{ .display_clk_khz = 460000, .pixel_clk_khz = 400000 },
++/*ClocksStateNominal*/
++{ .display_clk_khz = 670000, .pixel_clk_khz = 600000 },
++/*ClocksStatePerformance*/
++{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
++
++static int dentist_get_divider_from_did(int did)
++{
++ if (did < DENTIST_BASE_DID_1)
++ did = DENTIST_BASE_DID_1;
++ if (did > DENTIST_MAX_DID)
++ did = DENTIST_MAX_DID;
++
++ if (did < DENTIST_BASE_DID_2) {
++ return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
++ * (did - DENTIST_BASE_DID_1);
++ } else if (did < DENTIST_BASE_DID_3) {
++ return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
++ * (did - DENTIST_BASE_DID_2);
++ } else {
++ return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
++ * (did - DENTIST_BASE_DID_3);
++ }
++}
++
++/* SW will adjust DP REF Clock average value for all purposes
++ * (DP DTO / DP Audio DTO and DP GTC)
++ if clock is spread for all cases:
++ -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
++ calculations for DS_INCR/DS_MODULO (this is planned to be default case)
++ -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
++ calculations (not planned to be used, but average clock should still
++ be valid)
++ -if SS enabled on DP Ref clock and HW de-spreading disabled
++ (should not be case with CIK) then SW should program all rates
++ generated according to average value (case as with previous ASICs)
++ */
++static int dccg_adjust_dp_ref_freq_for_ss(struct dce_dccg *dccg_dce, int dp_ref_clk_khz)
++{
++ if (dccg_dce->ss_on_dprefclk && dccg_dce->dprefclk_ss_divider != 0) {
++ struct fixed31_32 ss_percentage = dc_fixpt_div_int(
++ dc_fixpt_from_fraction(dccg_dce->dprefclk_ss_percentage,
++ dccg_dce->dprefclk_ss_divider), 200);
++ struct fixed31_32 adj_dp_ref_clk_khz;
++
++ ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
++ adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
++ dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
++ }
++ return dp_ref_clk_khz;
++}
++
++static int dce_get_dp_ref_freq_khz(struct dccg *dccg)
++{
++ struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
++ int dprefclk_wdivider;
++ int dprefclk_src_sel;
++ int dp_ref_clk_khz = 600000;
++ int target_div;
++
++ /* ASSERT DP Reference Clock source is from DFS*/
++ REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
++ ASSERT(dprefclk_src_sel == 0);
++
++ /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
++ * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
++ REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
++
++ /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
++ target_div = dentist_get_divider_from_did(dprefclk_wdivider);
++
++ /* Calculate the current DFS clock, in kHz.*/
++ dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
++ * dccg_dce->dentist_vco_freq_khz) / target_div;
++
++ return dccg_adjust_dp_ref_freq_for_ss(dccg_dce, dp_ref_clk_khz);
++}
++
++int dce12_get_dp_ref_freq_khz(struct dccg *dccg)
++{
++ struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
++
++ return dccg_adjust_dp_ref_freq_for_ss(dccg_dce, dccg_dce->dprefclk_khz);
++}
++
++/* unit: in_khz before mode set, get pixel clock from context. ASIC register
++ * may not be programmed yet
++ */
++static uint32_t get_max_pixel_clock_for_all_paths(struct dc_state *context)
++{
++ uint32_t max_pix_clk = 0;
++ int i;
++
++ for (i = 0; i < MAX_PIPES; i++) {
++ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
++
++ if (pipe_ctx->stream == NULL)
++ continue;
++
++ /* do not check under lay */
++ if (pipe_ctx->top_pipe)
++ continue;
++
++ if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
++ max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
++
++ /* raise clock state for HBR3/2 if required. Confirmed with HW DCE/DPCS
++ * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
++ */
++ if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
++ pipe_ctx->stream_res.pix_clk_params.requested_sym_clk > max_pix_clk)
++ max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_sym_clk;
++ }
++
++ return max_pix_clk;
++}
++
++static enum dm_pp_clocks_state dce_get_required_clocks_state(
++ struct dccg *dccg,
++ struct dc_state *context)
++{
++ struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
++ int i;
++ enum dm_pp_clocks_state low_req_clk;
++ int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
++
++ /* Iterate from highest supported to lowest valid state, and update
++ * lowest RequiredState with the lowest state that satisfies
++ * all required clocks
++ */
++ for (i = dccg_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
++ if (context->bw.dce.dispclk_khz >
++ dccg_dce->max_clks_by_state[i].display_clk_khz
++ || max_pix_clk >
++ dccg_dce->max_clks_by_state[i].pixel_clk_khz)
++ break;
++
++ low_req_clk = i + 1;
++ if (low_req_clk > dccg_dce->max_clks_state) {
++ /* set max clock state for high phyclock, invalid on exceeding display clock */
++ if (dccg_dce->max_clks_by_state[dccg_dce->max_clks_state].display_clk_khz
++ < context->bw.dce.dispclk_khz)
++ low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
++ else
++ low_req_clk = dccg_dce->max_clks_state;
++ }
++
++ return low_req_clk;
++}
++
++static int dce_set_clock(
++ struct dccg *dccg,
++ int requested_clk_khz)
++{
++ struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
++ struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
++ struct dc_bios *bp = dccg->ctx->dc_bios;
++ int actual_clock = requested_clk_khz;
++ struct dmcu *dmcu = dccg_dce->base.ctx->dc->res_pool->dmcu;
++
++ /* Make sure requested clock isn't lower than minimum threshold*/
++ if (requested_clk_khz > 0)
++ requested_clk_khz = max(requested_clk_khz,
++ dccg_dce->dentist_vco_freq_khz / 64);
++
++ /* Prepare to program display clock*/
++ pxl_clk_params.target_pixel_clock = requested_clk_khz;
++ pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
++
++ if (dccg_dce->dfs_bypass_active)
++ pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
++
++ bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
++
++ if (dccg_dce->dfs_bypass_active) {
++ /* Cache the fixed display clock*/
++ dccg_dce->dfs_bypass_disp_clk =
++ pxl_clk_params.dfs_bypass_display_clock;
++ actual_clock = pxl_clk_params.dfs_bypass_display_clock;
++ }
++
++ /* from power down, we need mark the clock state as ClocksStateNominal
++ * from HWReset, so when resume we will call pplib voltage regulator.*/
++ if (requested_clk_khz == 0)
++ dccg_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
++
++ dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7);
++
++ return actual_clock;
++}
++
++int dce112_set_clock(struct dccg *dccg, int requested_clk_khz)
++{
++ struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
++ struct bp_set_dce_clock_parameters dce_clk_params;
++ struct dc_bios *bp = dccg->ctx->dc_bios;
++ struct dc *core_dc = dccg->ctx->dc;
++ struct dmcu *dmcu = core_dc->res_pool->dmcu;
++ int actual_clock = requested_clk_khz;
++ /* Prepare to program display clock*/
++ memset(&dce_clk_params, 0, sizeof(dce_clk_params));
++
++ /* Make sure requested clock isn't lower than minimum threshold*/
++ if (requested_clk_khz > 0)
++ requested_clk_khz = max(requested_clk_khz,
++ dccg_dce->dentist_vco_freq_khz / 62);
++
++ dce_clk_params.target_clock_frequency = requested_clk_khz;
++ dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
++ dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
++
++ bp->funcs->set_dce_clock(bp, &dce_clk_params);
++ actual_clock = dce_clk_params.target_clock_frequency;
++
++ /* from power down, we need mark the clock state as ClocksStateNominal
++ * from HWReset, so when resume we will call pplib voltage regulator.*/
++ if (requested_clk_khz == 0)
++ dccg_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
++
++ /*Program DP ref Clock*/
++ /*VBIOS will determine DPREFCLK frequency, so we don't set it*/
++ dce_clk_params.target_clock_frequency = 0;
++ dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
++ if (!ASICREV_IS_VEGA20_P(dccg->ctx->asic_id.hw_internal_rev))
++ dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
++ (dce_clk_params.pll_id ==
++ CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
++ else
++ dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
++
++ bp->funcs->set_dce_clock(bp, &dce_clk_params);
++
++ if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
++ if (dccg_dce->dfs_bypass_disp_clk != actual_clock)
++ dmcu->funcs->set_psr_wait_loop(dmcu,
++ actual_clock / 1000 / 7);
++ }
++
++ dccg_dce->dfs_bypass_disp_clk = actual_clock;
++ return actual_clock;
++}
++
++static void dce_clock_read_integrated_info(struct dce_dccg *dccg_dce)
++{
++ struct dc_debug_options *debug = &dccg_dce->base.ctx->dc->debug;
++ struct dc_bios *bp = dccg_dce->base.ctx->dc_bios;
++ struct integrated_info info = { { { 0 } } };
++ struct dc_firmware_info fw_info = { { 0 } };
++ int i;
++
++ if (bp->integrated_info)
++ info = *bp->integrated_info;
++
++ dccg_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
++ if (dccg_dce->dentist_vco_freq_khz == 0) {
++ bp->funcs->get_firmware_info(bp, &fw_info);
++ dccg_dce->dentist_vco_freq_khz =
++ fw_info.smu_gpu_pll_output_freq;
++ if (dccg_dce->dentist_vco_freq_khz == 0)
++ dccg_dce->dentist_vco_freq_khz = 3600000;
++ }
++
++ /*update the maximum display clock for each power state*/
++ for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
++ enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID;
++
++ switch (i) {
++ case 0:
++ clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW;
++ break;
++
++ case 1:
++ clk_state = DM_PP_CLOCKS_STATE_LOW;
++ break;
++
++ case 2:
++ clk_state = DM_PP_CLOCKS_STATE_NOMINAL;
++ break;
++
++ case 3:
++ clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE;
++ break;
++
++ default:
++ clk_state = DM_PP_CLOCKS_STATE_INVALID;
++ break;
++ }
++
++ /*Do not allow bad VBIOS/SBIOS to override with invalid values,
++ * check for > 100MHz*/
++ if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
++ dccg_dce->max_clks_by_state[clk_state].display_clk_khz =
++ info.disp_clk_voltage[i].max_supported_clk;
++ }
++
++ if (!debug->disable_dfs_bypass && bp->integrated_info)
++ if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
++ dccg_dce->dfs_bypass_enabled = true;
++}
++
++void dce_clock_read_ss_info(struct dce_dccg *dccg_dce)
++{
++ struct dc_bios *bp = dccg_dce->base.ctx->dc_bios;
++ int ss_info_num = bp->funcs->get_ss_entry_number(
++ bp, AS_SIGNAL_TYPE_GPU_PLL);
++
++ if (ss_info_num) {
++ struct spread_spectrum_info info = { { 0 } };
++ enum bp_result result = bp->funcs->get_spread_spectrum_info(
++ bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);
++
++ /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
++ * even if SS not enabled and in that case
++ * SSInfo.spreadSpectrumPercentage !=0 would be sign
++ * that SS is enabled
++ */
++ if (result == BP_RESULT_OK &&
++ info.spread_spectrum_percentage != 0) {
++ dccg_dce->ss_on_dprefclk = true;
++ dccg_dce->dprefclk_ss_divider = info.spread_percentage_divider;
++
++ if (info.type.CENTER_MODE == 0) {
++ /* TODO: Currently for DP Reference clock we
++ * need only SS percentage for
++ * downspread */
++ dccg_dce->dprefclk_ss_percentage =
++ info.spread_spectrum_percentage;
++ }
++
++ return;
++ }
++
++ result = bp->funcs->get_spread_spectrum_info(
++ bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info);
++
++ /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS
++ * even if SS not enabled and in that case
++ * SSInfo.spreadSpectrumPercentage !=0 would be sign
++ * that SS is enabled
++ */
++ if (result == BP_RESULT_OK &&
++ info.spread_spectrum_percentage != 0) {
++ dccg_dce->ss_on_dprefclk = true;
++ dccg_dce->dprefclk_ss_divider = info.spread_percentage_divider;
++
++ if (info.type.CENTER_MODE == 0) {
++ /* Currently for DP Reference clock we
++ * need only SS percentage for
++ * downspread */
++ dccg_dce->dprefclk_ss_percentage =
++ info.spread_spectrum_percentage;
++ }
++ }
++ }
++}
++
++void dce110_fill_display_configs(
++ const struct dc_state *context,
++ struct dm_pp_display_configuration *pp_display_cfg)
++{
++ int j;
++ int num_cfgs = 0;
++
++ for (j = 0; j < context->stream_count; j++) {
++ int k;
++
++ const struct dc_stream_state *stream = context->streams[j];
++ struct dm_pp_single_disp_config *cfg =
++ &pp_display_cfg->disp_configs[num_cfgs];
++ const struct pipe_ctx *pipe_ctx = NULL;
++
++ for (k = 0; k < MAX_PIPES; k++)
++ if (stream == context->res_ctx.pipe_ctx[k].stream) {
++ pipe_ctx = &context->res_ctx.pipe_ctx[k];
++ break;
++ }
++
++ ASSERT(pipe_ctx != NULL);
++
++ /* only notify active stream */
++ if (stream->dpms_off)
++ continue;
++
++ num_cfgs++;
++ cfg->signal = pipe_ctx->stream->signal;
++ cfg->pipe_idx = pipe_ctx->stream_res.tg->inst;
++ cfg->src_height = stream->src.height;
++ cfg->src_width = stream->src.width;
++ cfg->ddi_channel_mapping =
++ stream->sink->link->ddi_channel_mapping.raw;
++ cfg->transmitter =
++ stream->sink->link->link_enc->transmitter;
++ cfg->link_settings.lane_count =
++ stream->sink->link->cur_link_settings.lane_count;
++ cfg->link_settings.link_rate =
++ stream->sink->link->cur_link_settings.link_rate;
++ cfg->link_settings.link_spread =
++ stream->sink->link->cur_link_settings.link_spread;
++ cfg->sym_clock = stream->phy_pix_clk;
++ /* Round v_refresh*/
++ cfg->v_refresh = stream->timing.pix_clk_khz * 1000;
++ cfg->v_refresh /= stream->timing.h_total;
++ cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
++ / stream->timing.v_total;
++ }
++
++ pp_display_cfg->display_count = num_cfgs;
++}
++
++static uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
++{
++ uint8_t j;
++ uint32_t min_vertical_blank_time = -1;
++
++ for (j = 0; j < context->stream_count; j++) {
++ struct dc_stream_state *stream = context->streams[j];
++ uint32_t vertical_blank_in_pixels = 0;
++ uint32_t vertical_blank_time = 0;
++
++ vertical_blank_in_pixels = stream->timing.h_total *
++ (stream->timing.v_total
++ - stream->timing.v_addressable);
++
++ vertical_blank_time = vertical_blank_in_pixels
++ * 1000 / stream->timing.pix_clk_khz;
++
++ if (min_vertical_blank_time > vertical_blank_time)
++ min_vertical_blank_time = vertical_blank_time;
++ }
++
++ return min_vertical_blank_time;
++}
++
++static int determine_sclk_from_bounding_box(
++ const struct dc *dc,
++ int required_sclk)
++{
++ int i;
++
++ /*
++ * Some asics do not give us sclk levels, so we just report the actual
++ * required sclk
++ */
++ if (dc->sclk_lvls.num_levels == 0)
++ return required_sclk;
++
++ for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
++ if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
++ return dc->sclk_lvls.clocks_in_khz[i];
++ }
++ /*
++ * even maximum level could not satisfy requirement, this
++ * is unexpected at this stage, should have been caught at
++ * validation time
++ */
++ ASSERT(0);
++ return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
++}
++
++static void dce_pplib_apply_display_requirements(
++ struct dc *dc,
++ struct dc_state *context)
++{
++ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
++
++ pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
++
++ dce110_fill_display_configs(context, pp_display_cfg);
++
++ if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
++ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
++}
++
++static void dce11_pplib_apply_display_requirements(
++ struct dc *dc,
++ struct dc_state *context)
++{
++ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
++
++ pp_display_cfg->all_displays_in_sync =
++ context->bw.dce.all_displays_in_sync;
++ pp_display_cfg->nb_pstate_switch_disable =
++ context->bw.dce.nbp_state_change_enable == false;
++ pp_display_cfg->cpu_cc6_disable =
++ context->bw.dce.cpuc_state_change_enable == false;
++ pp_display_cfg->cpu_pstate_disable =
++ context->bw.dce.cpup_state_change_enable == false;
++ pp_display_cfg->cpu_pstate_separation_time =
++ context->bw.dce.blackout_recovery_time_us;
++
++ pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
++ / MEMORY_TYPE_MULTIPLIER_CZ;
++
++ pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
++ dc,
++ context->bw.dce.sclk_khz);
++
++ pp_display_cfg->min_engine_clock_deep_sleep_khz
++ = context->bw.dce.sclk_deep_sleep_khz;
++
++ pp_display_cfg->avail_mclk_switch_time_us =
++ dce110_get_min_vblank_time_us(context);
++ /* TODO: dce11.2*/
++ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
++
++ pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
++
++ dce110_fill_display_configs(context, pp_display_cfg);
++
++ /* TODO: is this still applicable?*/
++ if (pp_display_cfg->display_count == 1) {
++ const struct dc_crtc_timing *timing =
++ &context->streams[0]->timing;
++
++ pp_display_cfg->crtc_index =
++ pp_display_cfg->disp_configs[0].pipe_idx;
++ pp_display_cfg->line_time_in_us = timing->h_total * 1000 / timing->pix_clk_khz;
++ }
++
++ if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
++ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
++}
++
++static void dce_update_clocks(struct dccg *dccg,
++ struct dc_state *context,
++ bool safe_to_lower)
++{
++ struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
++ struct dm_pp_power_level_change_request level_change_req;
++ int unpatched_disp_clk = context->bw.dce.dispclk_khz;
++
++ /*TODO: W/A for dal3 linux, investigate why this works */
++ if (!dccg_dce->dfs_bypass_active)
++ context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
++
++ level_change_req.power_level = dce_get_required_clocks_state(dccg, context);
++ /* get max clock state from PPLIB */
++ if ((level_change_req.power_level < dccg_dce->cur_min_clks_state && safe_to_lower)
++ || level_change_req.power_level > dccg_dce->cur_min_clks_state) {
++ if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
++ dccg_dce->cur_min_clks_state = level_change_req.power_level;
++ }
++
++ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, dccg->clks.dispclk_khz)) {
++ context->bw.dce.dispclk_khz = dce_set_clock(dccg, context->bw.dce.dispclk_khz);
++ dccg->clks.dispclk_khz = context->bw.dce.dispclk_khz;
++ }
++ dce_pplib_apply_display_requirements(dccg->ctx->dc, context);
++
++ context->bw.dce.dispclk_khz = unpatched_disp_clk;
++}
++
++static void dce11_update_clocks(struct dccg *dccg,
++ struct dc_state *context,
++ bool safe_to_lower)
++{
++ struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
++ struct dm_pp_power_level_change_request level_change_req;
++
++ level_change_req.power_level = dce_get_required_clocks_state(dccg, context);
++ /* get max clock state from PPLIB */
++ if ((level_change_req.power_level < dccg_dce->cur_min_clks_state && safe_to_lower)
++ || level_change_req.power_level > dccg_dce->cur_min_clks_state) {
++ if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
++ dccg_dce->cur_min_clks_state = level_change_req.power_level;
++ }
++
++ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, dccg->clks.dispclk_khz)) {
++ context->bw.dce.dispclk_khz = dce_set_clock(dccg, context->bw.dce.dispclk_khz);
++ dccg->clks.dispclk_khz = context->bw.dce.dispclk_khz;
++ }
++ dce11_pplib_apply_display_requirements(dccg->ctx->dc, context);
++}
++
++static void dce112_update_clocks(struct dccg *dccg,
++ struct dc_state *context,
++ bool safe_to_lower)
++{
++ struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
++ struct dm_pp_power_level_change_request level_change_req;
++
++ level_change_req.power_level = dce_get_required_clocks_state(dccg, context);
++ /* get max clock state from PPLIB */
++ if ((level_change_req.power_level < dccg_dce->cur_min_clks_state && safe_to_lower)
++ || level_change_req.power_level > dccg_dce->cur_min_clks_state) {
++ if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
++ dccg_dce->cur_min_clks_state = level_change_req.power_level;
++ }
++
++ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, dccg->clks.dispclk_khz)) {
++ context->bw.dce.dispclk_khz = dce112_set_clock(dccg, context->bw.dce.dispclk_khz);
++ dccg->clks.dispclk_khz = context->bw.dce.dispclk_khz;
++ }
++ dce11_pplib_apply_display_requirements(dccg->ctx->dc, context);
++}
++
++static void dce12_update_clocks(struct dccg *dccg,
++ struct dc_state *context,
++ bool safe_to_lower)
++{
++ struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
++ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
++ int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
++ int unpatched_disp_clk = context->bw.dce.dispclk_khz;
++
++ /*TODO: W/A for dal3 linux, investigate why this works */
++ if (!dccg_dce->dfs_bypass_active)
++ context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
++
++ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, dccg->clks.dispclk_khz)) {
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
++ clock_voltage_req.clocks_in_khz = context->bw.dce.dispclk_khz;
++ context->bw.dce.dispclk_khz = dce112_set_clock(dccg, context->bw.dce.dispclk_khz);
++ dccg->clks.dispclk_khz = context->bw.dce.dispclk_khz;
++
++ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
++ }
++
++ if (should_set_clock(safe_to_lower, max_pix_clk, dccg->clks.phyclk_khz)) {
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
++ clock_voltage_req.clocks_in_khz = max_pix_clk;
++ dccg->clks.phyclk_khz = max_pix_clk;
++
++ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
++ }
++ dce11_pplib_apply_display_requirements(dccg->ctx->dc, context);
++
++ context->bw.dce.dispclk_khz = unpatched_disp_clk;
++}
++
++static const struct dccg_funcs dce120_funcs = {
++ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
++ .update_clocks = dce12_update_clocks
++};
++
++static const struct dccg_funcs dce112_funcs = {
++ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
++ .update_clocks = dce112_update_clocks
++};
++
++static const struct dccg_funcs dce110_funcs = {
++ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
++ .update_clocks = dce11_update_clocks,
++};
++
++static const struct dccg_funcs dce_funcs = {
++ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
++ .update_clocks = dce_update_clocks
++};
++
++static void dce_dccg_construct(
++ struct dce_dccg *dccg_dce,
++ struct dc_context *ctx,
++ const struct dccg_registers *regs,
++ const struct dccg_shift *clk_shift,
++ const struct dccg_mask *clk_mask)
++{
++ struct dccg *base = &dccg_dce->base;
++ struct dm_pp_static_clock_info static_clk_info = {0};
++
++ base->ctx = ctx;
++ base->funcs = &dce_funcs;
++
++ dccg_dce->regs = regs;
++ dccg_dce->dccg_shift = clk_shift;
++ dccg_dce->dccg_mask = clk_mask;
++
++ dccg_dce->dfs_bypass_disp_clk = 0;
++
++ dccg_dce->dprefclk_ss_percentage = 0;
++ dccg_dce->dprefclk_ss_divider = 1000;
++ dccg_dce->ss_on_dprefclk = false;
++
++
++ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
++ dccg_dce->max_clks_state = static_clk_info.max_clocks_state;
++ else
++ dccg_dce->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
++ dccg_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
++
++ dce_clock_read_integrated_info(dccg_dce);
++ dce_clock_read_ss_info(dccg_dce);
++}
++
++struct dccg *dce_dccg_create(
++ struct dc_context *ctx,
++ const struct dccg_registers *regs,
++ const struct dccg_shift *clk_shift,
++ const struct dccg_mask *clk_mask)
++{
++ struct dce_dccg *dccg_dce = kzalloc(sizeof(*dccg_dce), GFP_KERNEL);
++
++ if (dccg_dce == NULL) {
++ BREAK_TO_DEBUGGER();
++ return NULL;
++ }
++
++ memcpy(dccg_dce->max_clks_by_state,
++ dce80_max_clks_by_state,
++ sizeof(dce80_max_clks_by_state));
++
++ dce_dccg_construct(
++ dccg_dce, ctx, regs, clk_shift, clk_mask);
++
++ return &dccg_dce->base;
++}
++
++struct dccg *dce110_dccg_create(
++ struct dc_context *ctx,
++ const struct dccg_registers *regs,
++ const struct dccg_shift *clk_shift,
++ const struct dccg_mask *clk_mask)
++{
++ struct dce_dccg *dccg_dce = kzalloc(sizeof(*dccg_dce), GFP_KERNEL);
++
++ if (dccg_dce == NULL) {
++ BREAK_TO_DEBUGGER();
++ return NULL;
++ }
++
++ memcpy(dccg_dce->max_clks_by_state,
++ dce110_max_clks_by_state,
++ sizeof(dce110_max_clks_by_state));
++
++ dce_dccg_construct(
++ dccg_dce, ctx, regs, clk_shift, clk_mask);
++
++ dccg_dce->base.funcs = &dce110_funcs;
++
++ return &dccg_dce->base;
++}
++
++struct dccg *dce112_dccg_create(
++ struct dc_context *ctx,
++ const struct dccg_registers *regs,
++ const struct dccg_shift *clk_shift,
++ const struct dccg_mask *clk_mask)
++{
++ struct dce_dccg *dccg_dce = kzalloc(sizeof(*dccg_dce), GFP_KERNEL);
++
++ if (dccg_dce == NULL) {
++ BREAK_TO_DEBUGGER();
++ return NULL;
++ }
++
++ memcpy(dccg_dce->max_clks_by_state,
++ dce112_max_clks_by_state,
++ sizeof(dce112_max_clks_by_state));
++
++ dce_dccg_construct(
++ dccg_dce, ctx, regs, clk_shift, clk_mask);
++
++ dccg_dce->base.funcs = &dce112_funcs;
++
++ return &dccg_dce->base;
++}
++
++struct dccg *dce120_dccg_create(struct dc_context *ctx)
++{
++ struct dce_dccg *dccg_dce = kzalloc(sizeof(*dccg_dce), GFP_KERNEL);
++
++ if (dccg_dce == NULL) {
++ BREAK_TO_DEBUGGER();
++ return NULL;
++ }
++
++ memcpy(dccg_dce->max_clks_by_state,
++ dce120_max_clks_by_state,
++ sizeof(dce120_max_clks_by_state));
++
++ dce_dccg_construct(
++ dccg_dce, ctx, NULL, NULL, NULL);
++
++ dccg_dce->dprefclk_khz = 600000;
++ dccg_dce->base.funcs = &dce120_funcs;
++
++ return &dccg_dce->base;
++}
++
++void dce_dccg_destroy(struct dccg **dccg)
++{
++ struct dce_dccg *dccg_dce = TO_DCE_DCCG(*dccg);
++
++ kfree(dccg_dce);
++ *dccg = NULL;
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dccg.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dccg.h
+new file mode 100644
+index 0000000..786d963
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dccg.h
+@@ -0,0 +1,165 @@
++/*
++ * Copyright 2012-16 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++
++#ifndef _DCE_DCCG_H_
++#define _DCE_DCCG_H_
++
++#include "dccg.h"
++
++#define MEMORY_TYPE_MULTIPLIER_CZ 4
++
++#define CLK_COMMON_REG_LIST_DCE_BASE() \
++ .DPREFCLK_CNTL = mmDPREFCLK_CNTL, \
++ .DENTIST_DISPCLK_CNTL = mmDENTIST_DISPCLK_CNTL
++
++#define CLK_COMMON_REG_LIST_DCN_BASE() \
++ SR(DENTIST_DISPCLK_CNTL)
++
++#define CLK_SF(reg_name, field_name, post_fix)\
++ .field_name = reg_name ## __ ## field_name ## post_fix
++
++#define CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
++ CLK_SF(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, mask_sh), \
++ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, mask_sh)
++
++#define CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh) \
++ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh),\
++ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh)
++
++#define CLK_REG_FIELD_LIST(type) \
++ type DPREFCLK_SRC_SEL; \
++ type DENTIST_DPREFCLK_WDIVIDER; \
++ type DENTIST_DISPCLK_WDIVIDER; \
++ type DENTIST_DISPCLK_CHG_DONE;
++
++struct dccg_shift {
++ CLK_REG_FIELD_LIST(uint8_t)
++};
++
++struct dccg_mask {
++ CLK_REG_FIELD_LIST(uint32_t)
++};
++
++struct dccg_registers {
++ uint32_t DPREFCLK_CNTL;
++ uint32_t DENTIST_DISPCLK_CNTL;
++};
++
++struct state_dependent_clocks {
++ int display_clk_khz;
++ int pixel_clk_khz;
++};
++
++struct dce_dccg {
++ struct dccg base;
++ const struct dccg_registers *regs;
++ const struct dccg_shift *dccg_shift;
++ const struct dccg_mask *dccg_mask;
++
++ struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
++
++ int dentist_vco_freq_khz;
++
++ /* Cache the status of DFS-bypass feature*/
++ bool dfs_bypass_enabled;
++ /* True if the DFS-bypass feature is enabled and active. */
++ bool dfs_bypass_active;
++ /* Cache the display clock returned by VBIOS if DFS-bypass is enabled.
++ * This is basically "Crystal Frequency In KHz" (XTALIN) frequency */
++ int dfs_bypass_disp_clk;
++
++ /* Flag for Enabled SS on DPREFCLK */
++ bool ss_on_dprefclk;
++ /* DPREFCLK SS percentage (if down-spread enabled) */
++ int dprefclk_ss_percentage;
++ /* DPREFCLK SS percentage Divider (100 or 1000) */
++ int dprefclk_ss_divider;
++ int dprefclk_khz;
++
++ enum dm_pp_clocks_state max_clks_state;
++ enum dm_pp_clocks_state cur_min_clks_state;
++};
++
++/* Starting DID for each range */
++enum dentist_base_divider_id {
++ DENTIST_BASE_DID_1 = 0x08,
++ DENTIST_BASE_DID_2 = 0x40,
++ DENTIST_BASE_DID_3 = 0x60,
++ DENTIST_BASE_DID_4 = 0x7e,
++ DENTIST_MAX_DID = 0x7f
++};
++
++/* Starting point and step size for each divider range.*/
++enum dentist_divider_range {
++ DENTIST_DIVIDER_RANGE_1_START = 8, /* 2.00 */
++ DENTIST_DIVIDER_RANGE_1_STEP = 1, /* 0.25 */
++ DENTIST_DIVIDER_RANGE_2_START = 64, /* 16.00 */
++ DENTIST_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */
++ DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
++ DENTIST_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */
++ DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */
++ DENTIST_DIVIDER_RANGE_4_STEP = 264, /* 66.00 */
++ DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
++};
++
++static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
++{
++ return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
++}
++
++void dce_clock_read_ss_info(struct dce_dccg *dccg_dce);
++
++int dce12_get_dp_ref_freq_khz(struct dccg *dccg);
++
++void dce110_fill_display_configs(
++ const struct dc_state *context,
++ struct dm_pp_display_configuration *pp_display_cfg);
++
++int dce112_set_clock(struct dccg *dccg, int requested_clk_khz);
++
++struct dccg *dce_dccg_create(
++ struct dc_context *ctx,
++ const struct dccg_registers *regs,
++ const struct dccg_shift *clk_shift,
++ const struct dccg_mask *clk_mask);
++
++struct dccg *dce110_dccg_create(
++ struct dc_context *ctx,
++ const struct dccg_registers *regs,
++ const struct dccg_shift *clk_shift,
++ const struct dccg_mask *clk_mask);
++
++struct dccg *dce112_dccg_create(
++ struct dc_context *ctx,
++ const struct dccg_registers *regs,
++ const struct dccg_shift *clk_shift,
++ const struct dccg_mask *clk_mask);
++
++struct dccg *dce120_dccg_create(struct dc_context *ctx);
++
++void dce_dccg_destroy(struct dccg **dccg);
++
++#endif /* _DCE_DCCG_H_ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+index 5d62561..36015f7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+@@ -22,7 +22,7 @@
+ * Authors: AMD
+ *
+ */
+-#include "../dce/dce_clocks.h"
++#include "../dce/dce_dccg.h"
+ #include "dm_services.h"
+
+ #include "link_encoder.h"
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+index 18f9135..d78b064 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+@@ -31,7 +31,7 @@
+ #include "resource.h"
+ #include "dce110/dce110_resource.h"
+
+-#include "../dce/dce_clocks.h"
++#include "../dce/dce_dccg.h"
+ #include "include/irq_service_interface.h"
+ #include "dce/dce_audio.h"
+ #include "dce110/dce110_timing_generator.h"
+diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+index cc48a87..b3d00d7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+@@ -23,7 +23,7 @@
+ *
+ */
+
+-#include "../dce/dce_clocks.h"
++#include "../dce/dce_dccg.h"
+ #include "dm_services.h"
+
+ #include "link_encoder.h"
+diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+index da2d50d..512a22d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+@@ -32,7 +32,7 @@
+ #include "include/irq_service_interface.h"
+ #include "dce120_resource.h"
+
+-#include "../dce/dce_clocks.h"
++#include "../dce/dce_dccg.h"
+ #include "dce112/dce112_resource.h"
+
+ #include "dce110/dce110_resource.h"
+diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+index 313141b..1fccb52 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+@@ -23,7 +23,7 @@
+ *
+ */
+
+-#include "../dce/dce_clocks.h"
++#include "../dce/dce_dccg.h"
+ #include "dce/dce_8_0_d.h"
+ #include "dce/dce_8_0_sh_mask.h"
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+index 032f872..e13ab66 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+@@ -24,7 +24,7 @@
+
+ DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o dcn10_hw_sequencer_debug.o \
+ dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
+- dcn10_hubp.o dcn10_mpc.o \
++ dcn10_hubp.o dcn10_mpc.o dcn10_dccg.o \
+ dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
+ dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c
+new file mode 100644
+index 0000000..abfe82f
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c
+@@ -0,0 +1,278 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#include "dcn10_dccg.h"
++
++#include "reg_helper.h"
++#include "core_types.h"
++
++#define TO_DCE_DCCG(clocks)\
++ container_of(clocks, struct dce_dccg, base)
++
++#define REG(reg) \
++ (dccg_dce->regs->reg)
++
++#undef FN
++#define FN(reg_name, field_name) \
++ dccg_dce->dccg_shift->field_name, dccg_dce->dccg_mask->field_name
++
++#define CTX \
++ dccg_dce->base.ctx
++#define DC_LOGGER \
++ dccg->ctx->logger
++
++void dcn1_pplib_apply_display_requirements(
++ struct dc *dc,
++ struct dc_state *context)
++{
++ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
++
++ pp_display_cfg->min_engine_clock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
++ pp_display_cfg->min_memory_clock_khz = dc->res_pool->dccg->clks.fclk_khz;
++ pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
++ pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
++ pp_display_cfg->min_dcfclock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
++ pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
++ dce110_fill_display_configs(context, pp_display_cfg);
++
++ if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
++ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
++}
++
++static int dcn1_determine_dppclk_threshold(struct dccg *dccg, struct dc_clocks *new_clocks)
++{
++ bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
++ bool dispclk_increase = new_clocks->dispclk_khz > dccg->clks.dispclk_khz;
++ int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
++ bool cur_dpp_div = dccg->clks.dispclk_khz > dccg->clks.dppclk_khz;
++
++ /* increase clock, looking for div is 0 for current, request div is 1*/
++ if (dispclk_increase) {
++ /* already divided by 2, no need to reach target clk with 2 steps*/
++ if (cur_dpp_div)
++ return new_clocks->dispclk_khz;
++
++ /* request disp clk is lower than maximum supported dpp clk,
++ * no need to reach target clk with two steps.
++ */
++ if (new_clocks->dispclk_khz <= disp_clk_threshold)
++ return new_clocks->dispclk_khz;
++
++ /* target dpp clk not request divided by 2, still within threshold */
++ if (!request_dpp_div)
++ return new_clocks->dispclk_khz;
++
++ } else {
++ /* decrease clock, looking for current dppclk divided by 2,
++ * request dppclk not divided by 2.
++ */
++
++ /* current dpp clk not divided by 2, no need to ramp*/
++ if (!cur_dpp_div)
++ return new_clocks->dispclk_khz;
++
++ /* current disp clk is lower than current maximum dpp clk,
++ * no need to ramp
++ */
++ if (dccg->clks.dispclk_khz <= disp_clk_threshold)
++ return new_clocks->dispclk_khz;
++
++ /* request dpp clk need to be divided by 2 */
++ if (request_dpp_div)
++ return new_clocks->dispclk_khz;
++ }
++
++ return disp_clk_threshold;
++}
++
++static void dcn1_ramp_up_dispclk_with_dpp(struct dccg *dccg, struct dc_clocks *new_clocks)
++{
++ struct dc *dc = dccg->ctx->dc;
++ int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(dccg, new_clocks);
++ bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
++ int i;
++
++ /* set disp clk to dpp clk threshold */
++ dce112_set_clock(dccg, dispclk_to_dpp_threshold);
++
++ /* update request dpp clk division option */
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
++
++ if (!pipe_ctx->plane_state)
++ continue;
++
++ pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
++ pipe_ctx->plane_res.dpp,
++ request_dpp_div,
++ true);
++ }
++
++ /* If target clk not same as dppclk threshold, set to target clock */
++ if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz)
++ dce112_set_clock(dccg, new_clocks->dispclk_khz);
++
++ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
++ dccg->clks.dppclk_khz = new_clocks->dppclk_khz;
++ dccg->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
++}
++
++static void dcn1_update_clocks(struct dccg *dccg,
++ struct dc_state *context,
++ bool safe_to_lower)
++{
++ struct dc *dc = dccg->ctx->dc;
++ struct dc_clocks *new_clocks = &context->bw.dcn.clk;
++ struct pp_smu_display_requirement_rv *smu_req_cur =
++ &dc->res_pool->pp_smu_req;
++ struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
++ struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
++ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
++ bool send_request_to_increase = false;
++ bool send_request_to_lower = false;
++
++ if (new_clocks->phyclk_khz)
++ smu_req.display_count = 1;
++ else
++ smu_req.display_count = 0;
++
++ if (new_clocks->dispclk_khz > dccg->clks.dispclk_khz
++ || new_clocks->phyclk_khz > dccg->clks.phyclk_khz
++ || new_clocks->fclk_khz > dccg->clks.fclk_khz
++ || new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz)
++ send_request_to_increase = true;
++
++ if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
++ dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
++
++ send_request_to_lower = true;
++ }
++
++ if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, dccg->clks.fclk_khz)) {
++ dccg->clks.fclk_khz = new_clocks->fclk_khz;
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
++ clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
++ smu_req.hard_min_fclk_khz = new_clocks->fclk_khz;
++
++ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
++ send_request_to_lower = true;
++ }
++
++ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, dccg->clks.dcfclk_khz)) {
++ dccg->clks.dcfclk_khz = new_clocks->dcfclk_khz;
++ smu_req.hard_min_dcefclk_khz = new_clocks->dcfclk_khz;
++
++ send_request_to_lower = true;
++ }
++
++ if (should_set_clock(safe_to_lower,
++ new_clocks->dcfclk_deep_sleep_khz, dccg->clks.dcfclk_deep_sleep_khz)) {
++ dccg->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
++ smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz;
++
++ send_request_to_lower = true;
++ }
++
++ /* make sure dcf clk is before dpp clk to
++ * make sure we have enough voltage to run dpp clk
++ */
++ if (send_request_to_increase) {
++ /*use dcfclk to request voltage*/
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
++ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
++ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
++ if (pp_smu->set_display_requirement)
++ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
++ dcn1_pplib_apply_display_requirements(dc, context);
++ }
++
++ /* dcn1 dppclk is tied to dispclk */
++ /* program dispclk on = as a w/a for sleep resume clock ramping issues */
++ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)
++ || new_clocks->dispclk_khz == dccg->clks.dispclk_khz) {
++ dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks);
++ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
++
++ send_request_to_lower = true;
++ }
++
++ if (!send_request_to_increase && send_request_to_lower) {
++ /*use dcfclk to request voltage*/
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
++ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
++ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
++ if (pp_smu->set_display_requirement)
++ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
++ dcn1_pplib_apply_display_requirements(dc, context);
++ }
++
++
++ *smu_req_cur = smu_req;
++}
++
++static const struct dccg_funcs dcn1_funcs = {
++ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
++ .update_clocks = dcn1_update_clocks
++};
++
++struct dccg *dcn1_dccg_create(struct dc_context *ctx)
++{
++ struct dc_debug_options *debug = &ctx->dc->debug;
++ struct dc_bios *bp = ctx->dc_bios;
++ struct dc_firmware_info fw_info = { { 0 } };
++ struct dce_dccg *dccg_dce = kzalloc(sizeof(*dccg_dce), GFP_KERNEL);
++
++ if (dccg_dce == NULL) {
++ BREAK_TO_DEBUGGER();
++ return NULL;
++ }
++
++ dccg_dce->base.ctx = ctx;
++ dccg_dce->base.funcs = &dcn1_funcs;
++
++ dccg_dce->dfs_bypass_disp_clk = 0;
++
++ dccg_dce->dprefclk_ss_percentage = 0;
++ dccg_dce->dprefclk_ss_divider = 1000;
++ dccg_dce->ss_on_dprefclk = false;
++
++ dccg_dce->dprefclk_khz = 600000;
++ if (bp->integrated_info)
++ dccg_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
++ if (dccg_dce->dentist_vco_freq_khz == 0) {
++ bp->funcs->get_firmware_info(bp, &fw_info);
++ dccg_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
++ if (dccg_dce->dentist_vco_freq_khz == 0)
++ dccg_dce->dentist_vco_freq_khz = 3600000;
++ }
++
++ if (!debug->disable_dfs_bypass && bp->integrated_info)
++ if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
++ dccg_dce->dfs_bypass_enabled = true;
++
++ dce_clock_read_ss_info(dccg_dce);
++
++ return &dccg_dce->base;
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.h
+new file mode 100644
+index 0000000..7f3dd84
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.h
+@@ -0,0 +1,37 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef __DCN10_DCCG_H__
++#define __DCN10_DCCG_H__
++
++#include "../dce/dce_dccg.h"
++
++void dcn1_pplib_apply_display_requirements(
++ struct dc *dc,
++ struct dc_state *context);
++
++struct dccg *dcn1_dccg_create(struct dc_context *ctx);
++
++#endif //__DCN10_DCCG_H__
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index 6227db6..55d2d17 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -40,7 +40,7 @@
+ #include "dcn10/dcn10_opp.h"
+ #include "dcn10/dcn10_link_encoder.h"
+ #include "dcn10/dcn10_stream_encoder.h"
+-#include "../dce/dce_clocks.h"
++#include "dcn10/dcn10_dccg.h"
+ #include "dce/dce_clock_source.h"
+ #include "dce/dce_audio.h"
+ #include "dce/dce_hwseq.h"
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+index 99e2868..d4eaf7f 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+@@ -82,7 +82,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option);
+
+ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
+ /********** DAL Core*********************/
+-#include "display_clock.h"
++#include "hw/dccg.h"
+ #include "transform.h"
+ #include "dpp.h"
+
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+index e688eb9..ac9b4906 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+@@ -31,8 +31,8 @@
+ #define __DCN_CALCS_H__
+
+ #include "bw_fixed.h"
+-#include "display_clock.h"
+ #include "../dml/display_mode_lib.h"
++#include "hw/dccg.h"
+
+ struct dc;
+ struct dc_state;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+new file mode 100644
+index 0000000..6fd923d
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+@@ -0,0 +1,47 @@
++/*
++ * Copyright 2012-16 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef __DAL_DCCG_H__
++#define __DAL_DCCG_H__
++
++#include "dm_services_types.h"
++#include "dc.h"
++
++struct dccg {
++ struct dc_context *ctx;
++ const struct dccg_funcs *funcs;
++
++ struct dc_clocks clks;
++};
++
++struct dccg_funcs {
++ void (*update_clocks)(struct dccg *dccg,
++ struct dc_state *context,
++ bool safe_to_lower);
++
++ int (*get_dp_ref_clk_frequency)(struct dccg *dccg);
++};
++
++#endif /* __DAL_DCCG_H__ */
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
+deleted file mode 100644
+index e1ec42b..0000000
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
++++ /dev/null
+@@ -1,47 +0,0 @@
+-/*
+- * Copyright 2012-16 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- * Authors: AMD
+- *
+- */
+-
+-#ifndef __DISPLAY_CLOCK_H__
+-#define __DISPLAY_CLOCK_H__
+-
+-#include "dm_services_types.h"
+-#include "dc.h"
+-
+-struct dccg {
+- struct dc_context *ctx;
+- const struct dccg_funcs *funcs;
+-
+- struct dc_clocks clks;
+-};
+-
+-struct dccg_funcs {
+- void (*update_clocks)(struct dccg *dccg,
+- struct dc_state *context,
+- bool safe_to_lower);
+-
+- int (*get_dp_ref_clk_frequency)(struct dccg *dccg);
+-};
+-
+-#endif /* __DISPLAY_CLOCK_H__ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5584-drm-amd-display-Add-support-for-Freesync-2-HDR-and-C.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5584-drm-amd-display-Add-support-for-Freesync-2-HDR-and-C.patch
new file mode 100644
index 00000000..7b38b974
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5584-drm-amd-display-Add-support-for-Freesync-2-HDR-and-C.patch
@@ -0,0 +1,290 @@
+From 7f1a0453dd8fef5ca6a1679f628abc21e6d38f1b Mon Sep 17 00:00:00 2001
+From: SivapiriyanKumarasamy <sivapiriyan.kumarasamy@amd.com>
+Date: Tue, 11 Sep 2018 17:48:07 -0400
+Subject: [PATCH 5584/5725] drm/amd/display: Add support for Freesync 2 HDR and
+ Content to Display Mapping
+
+[Why]
+Freesync 2 HDR and support for HDR content
+outside the range of the HDR display
+require implementation on Dal 3 to better match
+Dal2.
+
+[How]
+Add support for Freesync HDR and mapping
+of source content to display ranges for better
+representation of HDR content.
+
+Signed-off-by: SivapiriyanKumarasamy <sivapiriyan.kumarasamy@amd.com>
+Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_color.c | 2 +-
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c | 2 +-
+ .../drm/amd/display/modules/color/color_gamma.c | 175 ++++++++++++++++++++-
+ .../drm/amd/display/modules/color/color_gamma.h | 11 +-
+ 4 files changed, 186 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+index be19e68..216e48c 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+@@ -164,7 +164,7 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc)
+ */
+ stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
+ ret = mod_color_calculate_regamma_params(stream->out_transfer_func,
+- gamma, true, adev->asic_type <= CHIP_RAVEN);
++ gamma, true, adev->asic_type <= CHIP_RAVEN, NULL);
+ dc_gamma_release(&gamma);
+ if (!ret) {
+ stream->out_transfer_func->type = old_type;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+index 5d95a99..97c05993 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+@@ -268,7 +268,7 @@ bool cm_helper_translate_curve_to_hw_format(
+ memset(lut_params, 0, sizeof(struct pwl_params));
+ memset(seg_distr, 0, sizeof(seg_distr));
+
+- if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
++ if (output_tf->tf == TRANSFER_FUNCTION_PQ || output_tf->tf == TRANSFER_FUNCTION_GAMMA22) {
+ /* 32 segments
+ * segments are from 2^-25 to 2^7
+ */
+diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+index 16f7ea8..423e8c1 100644
+--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+@@ -308,6 +308,18 @@ static struct fixed31_32 translate_from_linear_space(
+ a1);
+ }
+
++static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg)
++{
++ struct fixed31_32 gamma = dc_fixpt_from_fraction(22, 10);
++
++ return translate_from_linear_space(arg,
++ dc_fixpt_zero,
++ dc_fixpt_zero,
++ dc_fixpt_zero,
++ dc_fixpt_zero,
++ gamma);
++}
++
+ static struct fixed31_32 translate_to_linear_space(
+ struct fixed31_32 arg,
+ struct fixed31_32 a0,
+@@ -711,6 +723,160 @@ static void build_regamma(struct pwl_float_data_ex *rgb_regamma,
+ }
+ }
+
++static void hermite_spline_eetf(struct fixed31_32 input_x,
++ struct fixed31_32 max_display,
++ struct fixed31_32 min_display,
++ struct fixed31_32 max_content,
++ struct fixed31_32 *out_x)
++{
++ struct fixed31_32 min_lum_pq;
++ struct fixed31_32 max_lum_pq;
++ struct fixed31_32 max_content_pq;
++ struct fixed31_32 ks;
++ struct fixed31_32 E1;
++ struct fixed31_32 E2;
++ struct fixed31_32 E3;
++ struct fixed31_32 t;
++ struct fixed31_32 t2;
++ struct fixed31_32 t3;
++ struct fixed31_32 two;
++ struct fixed31_32 three;
++ struct fixed31_32 temp1;
++ struct fixed31_32 temp2;
++ struct fixed31_32 a = dc_fixpt_from_fraction(15, 10);
++ struct fixed31_32 b = dc_fixpt_from_fraction(5, 10);
++ struct fixed31_32 epsilon = dc_fixpt_from_fraction(1, 1000000); // dc_fixpt_epsilon is a bit too small
++
++ if (dc_fixpt_eq(max_content, dc_fixpt_zero)) {
++ *out_x = dc_fixpt_zero;
++ return;
++ }
++
++ compute_pq(input_x, &E1);
++ compute_pq(dc_fixpt_div(min_display, max_content), &min_lum_pq);
++ compute_pq(dc_fixpt_div(max_display, max_content), &max_lum_pq);
++ compute_pq(dc_fixpt_one, &max_content_pq); // always 1? DAL2 code is weird
++ a = dc_fixpt_div(dc_fixpt_add(dc_fixpt_one, b), max_content_pq); // (1+b)/maxContent
++ ks = dc_fixpt_sub(dc_fixpt_mul(a, max_lum_pq), b); // a * max_lum_pq - b
++
++ if (dc_fixpt_lt(E1, ks))
++ E2 = E1;
++ else if (dc_fixpt_le(ks, E1) && dc_fixpt_le(E1, dc_fixpt_one)) {
++ if (dc_fixpt_lt(epsilon, dc_fixpt_sub(dc_fixpt_one, ks)))
++ // t = (E1 - ks) / (1 - ks)
++ t = dc_fixpt_div(dc_fixpt_sub(E1, ks),
++ dc_fixpt_sub(dc_fixpt_one, ks));
++ else
++ t = dc_fixpt_zero;
++
++ two = dc_fixpt_from_int(2);
++ three = dc_fixpt_from_int(3);
++
++ t2 = dc_fixpt_mul(t, t);
++ t3 = dc_fixpt_mul(t2, t);
++ temp1 = dc_fixpt_mul(two, t3);
++ temp2 = dc_fixpt_mul(three, t2);
++
++ // (2t^3 - 3t^2 + 1) * ks
++ E2 = dc_fixpt_mul(ks, dc_fixpt_add(dc_fixpt_one,
++ dc_fixpt_sub(temp1, temp2)));
++
++ // (-2t^3 + 3t^2) * max_lum_pq
++ E2 = dc_fixpt_add(E2, dc_fixpt_mul(max_lum_pq,
++ dc_fixpt_sub(temp2, temp1)));
++
++ temp1 = dc_fixpt_mul(two, t2);
++ temp2 = dc_fixpt_sub(dc_fixpt_one, ks);
++
++ // (t^3 - 2t^2 + t) * (1-ks)
++ E2 = dc_fixpt_add(E2, dc_fixpt_mul(temp2,
++ dc_fixpt_add(t, dc_fixpt_sub(t3, temp1))));
++ }
++
++ temp1 = dc_fixpt_sub(dc_fixpt_one, E2);
++ temp2 = dc_fixpt_mul(temp1, temp1);
++ temp2 = dc_fixpt_mul(temp2, temp2);
++ // temp2 = (1-E2)^4
++
++ E3 = dc_fixpt_add(E2, dc_fixpt_mul(min_lum_pq, temp2));
++ compute_de_pq(E3, out_x);
++
++ *out_x = dc_fixpt_div(*out_x, dc_fixpt_div(max_display, max_content));
++}
++
++static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
++ uint32_t hw_points_num,
++ const struct hw_x_point *coordinate_x,
++ const struct freesync_hdr_tf_params *fs_params)
++{
++ uint32_t i;
++ struct pwl_float_data_ex *rgb = rgb_regamma;
++ const struct hw_x_point *coord_x = coordinate_x;
++ struct fixed31_32 scaledX = dc_fixpt_zero;
++ struct fixed31_32 scaledX1 = dc_fixpt_zero;
++ struct fixed31_32 max_display = dc_fixpt_from_int(fs_params->max_display);
++ struct fixed31_32 min_display = dc_fixpt_from_fraction(fs_params->min_display, 10000);
++ struct fixed31_32 max_content = dc_fixpt_from_int(fs_params->max_content);
++ struct fixed31_32 min_content = dc_fixpt_from_fraction(fs_params->min_content, 10000);
++ struct fixed31_32 clip = dc_fixpt_one;
++ struct fixed31_32 output;
++ bool use_eetf = false;
++ struct fixed31_32 sdr_white_level = dc_fixpt_from_int(fs_params->sdr_white_level);
++
++ if (fs_params == NULL || fs_params->max_content == 0 ||
++ fs_params->max_display == 0)
++ return false;
++
++ if (fs_params->min_display > 1000) // cap at 0.1 at the bottom
++ min_display = dc_fixpt_from_fraction(1, 10);
++ if (fs_params->max_display < 100) // cap at 100 at the top
++ max_display = dc_fixpt_from_int(100);
++
++ if (fs_params->min_content < fs_params->min_display)
++ use_eetf = true;
++ else
++ min_content = min_display;
++
++ if (fs_params->max_content > fs_params->max_display)
++ use_eetf = true;
++ else
++ max_content = max_display;
++
++ rgb += 32; // first 32 points have problems with fixed point, too small
++ coord_x += 32;
++ for (i = 32; i <= hw_points_num; i++) {
++ if (use_eetf) {
++ /*max content is equal 1 */
++ scaledX1 = dc_fixpt_div(coord_x->x,
++ dc_fixpt_div(max_content, sdr_white_level));
++ hermite_spline_eetf(scaledX1, max_display, min_display,
++ max_content, &scaledX);
++ } else
++ scaledX = dc_fixpt_div(coord_x->x,
++ dc_fixpt_div(max_display, sdr_white_level));
++
++ if (dc_fixpt_lt(scaledX, clip)) {
++ if (dc_fixpt_lt(scaledX, dc_fixpt_zero))
++ output = dc_fixpt_zero;
++ else
++ output = calculate_gamma22(scaledX);
++
++ rgb->r = output;
++ rgb->g = output;
++ rgb->b = output;
++ } else {
++ rgb->r = clip;
++ rgb->g = clip;
++ rgb->b = clip;
++ }
++
++ ++coord_x;
++ ++rgb;
++ }
++
++ return true;
++}
++
+ static void build_degamma(struct pwl_float_data_ex *curve,
+ uint32_t hw_points_num,
+ const struct hw_x_point *coordinate_x, bool is_2_4)
+@@ -1358,7 +1524,8 @@ static bool map_regamma_hw_to_x_user(
+ #define _EXTRA_POINTS 3
+
+ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
+- const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed)
++ const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
++ const struct freesync_hdr_tf_params *fs_params)
+ {
+ struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
+ struct dividers dividers;
+@@ -1423,6 +1590,12 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
+ MAX_HW_POINTS,
+ coordinates_x,
+ output_tf->sdr_ref_white_level);
++ } else if (tf == TRANSFER_FUNCTION_GAMMA22 &&
++ fs_params != NULL) {
++ build_freesync_hdr(rgb_regamma,
++ MAX_HW_POINTS,
++ coordinates_x,
++ fs_params);
+ } else {
+ tf_pts->end_exponent = 0;
+ tf_pts->x_point_at_y1_red = 1;
+diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
+index 63ccb9c..a6e164d 100644
+--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
+@@ -73,12 +73,21 @@ struct regamma_lut {
+ };
+ };
+
++struct freesync_hdr_tf_params {
++ unsigned int sdr_white_level;
++ unsigned int min_content; // luminance in 1/10000 nits
++ unsigned int max_content; // luminance in nits
++ unsigned int min_display; // luminance in 1/10000 nits
++ unsigned int max_display; // luminance in nits
++};
++
+ void setup_x_points_distribution(void);
+ void precompute_pq(void);
+ void precompute_de_pq(void);
+
+ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
+- const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed);
++ const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
++ const struct freesync_hdr_tf_params *fs_params);
+
+ bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf,
+ const struct dc_gamma *ramp, bool mapUserRamp);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5585-drm-amd-display-initialize-dc_transfer_func-ctx.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5585-drm-amd-display-initialize-dc_transfer_func-ctx.patch
new file mode 100644
index 00000000..bc65f2f6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5585-drm-amd-display-initialize-dc_transfer_func-ctx.patch
@@ -0,0 +1,47 @@
+From 8930929914d3b1ea34564fa6db967effc25a300a Mon Sep 17 00:00:00 2001
+From: David Francis <David.Francis@amd.com>
+Date: Fri, 21 Sep 2018 14:36:17 -0400
+Subject: [PATCH 5585/5725] drm/amd/display: initialize dc_transfer_func->ctx
+
+[Why]
+dc_transfer_func structs were being passed around with a null
+pointer, waiting for unsuspecting programmers to dereference it.
+
+[How]
+Initialize it
+
+Signed-off-by: David Francis <David.Francis@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 1 +
+ drivers/gpu/drm/amd/display/dc/core/dc_surface.c | 1 +
+ 2 files changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+index 2ac848a1..e113439 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+@@ -106,6 +106,7 @@ static void construct(struct dc_stream_state *stream,
+
+ stream->out_transfer_func = dc_create_transfer_func();
+ stream->out_transfer_func->type = TF_TYPE_BYPASS;
++ stream->out_transfer_func->ctx = stream->ctx;
+ }
+
+ static void destruct(struct dc_stream_state *stream)
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+index 8fb3aef..c60c9b4 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+@@ -44,6 +44,7 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state
+
+ plane_state->in_transfer_func = dc_create_transfer_func();
+ plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
++ plane_state->in_transfer_func->ctx = ctx;
+ }
+
+ static void destruct(struct dc_plane_state *plane_state)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5586-drm-amd-display-expose-hwseq-functions-and-add-regis.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5586-drm-amd-display-expose-hwseq-functions-and-add-regis.patch
new file mode 100644
index 00000000..d77df950
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5586-drm-amd-display-expose-hwseq-functions-and-add-regis.patch
@@ -0,0 +1,220 @@
+From 70c664c4a187fa5b9a985cbd3dec4dad48c4e1d2 Mon Sep 17 00:00:00 2001
+From: Nevenko Stupar <Nevenko.Stupar@amd.com>
+Date: Tue, 25 Sep 2018 18:18:33 -0400
+Subject: [PATCH 5586/5725] drm/amd/display: expose hwseq functions and add
+ registers
+
+Make these functions non static and define registers for future use
+
+ is_lower_pipe_tree_visible();
+ is_upper_pipe_tree_visible();
+ is_pipe_tree_visible();
+ dcn10_program_pte_vm();
+ set_hdr_multiplier();
+ update_dchubp_dpp()
+ find_top_pipe_for_stream()
+
+Signed-off-by: Nevenko Stupar <Nevenko.Stupar@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 -
+ drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h | 50 ++++++++++++++++++++++
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 14 +++---
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h | 20 +++++++++
+ 4 files changed, 77 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 1e26623..f4db4d7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -249,8 +249,6 @@ struct dc_debug_options {
+ bool disable_dmcu;
+ bool disable_psr;
+ bool force_abm_enable;
+- bool disable_hbup_pg;
+- bool disable_dpp_pg;
+ bool disable_stereo_support;
+ bool vsr_support;
+ bool performance_trace;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+index 64dc753..7d97787 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+@@ -233,6 +233,16 @@ struct dce_hwseq_registers {
+ uint32_t DOMAIN5_PG_CONFIG;
+ uint32_t DOMAIN6_PG_CONFIG;
+ uint32_t DOMAIN7_PG_CONFIG;
++ uint32_t DOMAIN8_PG_CONFIG;
++ uint32_t DOMAIN9_PG_CONFIG;
++ uint32_t DOMAIN10_PG_CONFIG;
++ uint32_t DOMAIN11_PG_CONFIG;
++ uint32_t DOMAIN16_PG_CONFIG;
++ uint32_t DOMAIN17_PG_CONFIG;
++ uint32_t DOMAIN18_PG_CONFIG;
++ uint32_t DOMAIN19_PG_CONFIG;
++ uint32_t DOMAIN20_PG_CONFIG;
++ uint32_t DOMAIN21_PG_CONFIG;
+ uint32_t DOMAIN0_PG_STATUS;
+ uint32_t DOMAIN1_PG_STATUS;
+ uint32_t DOMAIN2_PG_STATUS;
+@@ -241,6 +251,16 @@ struct dce_hwseq_registers {
+ uint32_t DOMAIN5_PG_STATUS;
+ uint32_t DOMAIN6_PG_STATUS;
+ uint32_t DOMAIN7_PG_STATUS;
++ uint32_t DOMAIN8_PG_STATUS;
++ uint32_t DOMAIN9_PG_STATUS;
++ uint32_t DOMAIN10_PG_STATUS;
++ uint32_t DOMAIN11_PG_STATUS;
++ uint32_t DOMAIN16_PG_STATUS;
++ uint32_t DOMAIN17_PG_STATUS;
++ uint32_t DOMAIN18_PG_STATUS;
++ uint32_t DOMAIN19_PG_STATUS;
++ uint32_t DOMAIN20_PG_STATUS;
++ uint32_t DOMAIN21_PG_STATUS;
+ uint32_t DIO_MEM_PWR_CTRL;
+ uint32_t DCCG_GATE_DISABLE_CNTL;
+ uint32_t DCCG_GATE_DISABLE_CNTL2;
+@@ -489,6 +509,26 @@ struct dce_hwseq_registers {
+ type DOMAIN6_POWER_GATE; \
+ type DOMAIN7_POWER_FORCEON; \
+ type DOMAIN7_POWER_GATE; \
++ type DOMAIN8_POWER_FORCEON; \
++ type DOMAIN8_POWER_GATE; \
++ type DOMAIN9_POWER_FORCEON; \
++ type DOMAIN9_POWER_GATE; \
++ type DOMAIN10_POWER_FORCEON; \
++ type DOMAIN10_POWER_GATE; \
++ type DOMAIN11_POWER_FORCEON; \
++ type DOMAIN11_POWER_GATE; \
++ type DOMAIN16_POWER_FORCEON; \
++ type DOMAIN16_POWER_GATE; \
++ type DOMAIN17_POWER_FORCEON; \
++ type DOMAIN17_POWER_GATE; \
++ type DOMAIN18_POWER_FORCEON; \
++ type DOMAIN18_POWER_GATE; \
++ type DOMAIN19_POWER_FORCEON; \
++ type DOMAIN19_POWER_GATE; \
++ type DOMAIN20_POWER_FORCEON; \
++ type DOMAIN20_POWER_GATE; \
++ type DOMAIN21_POWER_FORCEON; \
++ type DOMAIN21_POWER_GATE; \
+ type DOMAIN0_PGFSM_PWR_STATUS; \
+ type DOMAIN1_PGFSM_PWR_STATUS; \
+ type DOMAIN2_PGFSM_PWR_STATUS; \
+@@ -497,6 +537,16 @@ struct dce_hwseq_registers {
+ type DOMAIN5_PGFSM_PWR_STATUS; \
+ type DOMAIN6_PGFSM_PWR_STATUS; \
+ type DOMAIN7_PGFSM_PWR_STATUS; \
++ type DOMAIN8_PGFSM_PWR_STATUS; \
++ type DOMAIN9_PGFSM_PWR_STATUS; \
++ type DOMAIN10_PGFSM_PWR_STATUS; \
++ type DOMAIN11_PGFSM_PWR_STATUS; \
++ type DOMAIN16_PGFSM_PWR_STATUS; \
++ type DOMAIN17_PGFSM_PWR_STATUS; \
++ type DOMAIN18_PGFSM_PWR_STATUS; \
++ type DOMAIN19_PGFSM_PWR_STATUS; \
++ type DOMAIN20_PGFSM_PWR_STATUS; \
++ type DOMAIN21_PGFSM_PWR_STATUS; \
+ type DCFCLK_GATE_DIS; \
+ type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
+ type VGA_TEST_ENABLE; \
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 6375241..d1f8c8e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -1603,7 +1603,7 @@ static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
+ }
+
+
+-static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
++void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
+ {
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ struct vm_system_aperture_param apt = { {{ 0 } } };
+@@ -1729,7 +1729,7 @@ static void dcn10_program_output_csc(struct dc *dc,
+ matrix);
+ }
+
+-static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
++bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+ {
+ if (pipe_ctx->plane_state->visible)
+ return true;
+@@ -1738,7 +1738,7 @@ static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+ return false;
+ }
+
+-static bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
++bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+ {
+ if (pipe_ctx->plane_state->visible)
+ return true;
+@@ -1747,7 +1747,7 @@ static bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+ return false;
+ }
+
+-static bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
++bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+ {
+ if (pipe_ctx->plane_state->visible)
+ return true;
+@@ -2035,7 +2035,7 @@ static void update_scaler(struct pipe_ctx *pipe_ctx)
+ pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
+ }
+
+-static void update_dchubp_dpp(
++void update_dchubp_dpp(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+@@ -2182,7 +2182,7 @@ static void dcn10_blank_pixel_data(
+ }
+ }
+
+-static void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
++void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
+ {
+ struct fixed31_32 multiplier = dc_fixpt_from_fraction(
+ pipe_ctx->plane_state->sdr_white_level, 80);
+@@ -2257,7 +2257,7 @@ static void program_all_pipe_in_tree(
+ }
+ }
+
+-static struct pipe_ctx *find_top_pipe_for_stream(
++struct pipe_ctx *find_top_pipe_for_stream(
+ struct dc *dc,
+ struct dc_state *context,
+ const struct dc_stream_state *stream)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+index 84d461e..5e5610c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+@@ -51,4 +51,24 @@ void dcn10_get_hw_state(
+ char *pBuf, unsigned int bufSize,
+ unsigned int mask);
+
++bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
++
++bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
++
++bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
++
++void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp);
++
++void set_hdr_multiplier(struct pipe_ctx *pipe_ctx);
++
++void update_dchubp_dpp(
++ struct dc *dc,
++ struct pipe_ctx *pipe_ctx,
++ struct dc_state *context);
++
++struct pipe_ctx *find_top_pipe_for_stream(
++ struct dc *dc,
++ struct dc_state *context,
++ const struct dc_stream_state *stream);
++
+ #endif /* __DC_HWSS_DCN10_H__ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5587-drm-amd-display-fix-report-display-count-logic.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5587-drm-amd-display-fix-report-display-count-logic.patch
new file mode 100644
index 00000000..ead169a8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5587-drm-amd-display-fix-report-display-count-logic.patch
@@ -0,0 +1,148 @@
+From 27063ce35b2acbf08520558666cb881e85139316 Mon Sep 17 00:00:00 2001
+From: Eric Yang <Eric.Yang2@amd.com>
+Date: Wed, 26 Sep 2018 15:52:19 -0400
+Subject: [PATCH 5587/5725] drm/amd/display: fix report display count logic
+
+[Why]
+Previous logic to update display count in commit_planes_do_stream_update
+doesn't cover all cases.
+
+[How]
+Update display count as part of clock updates. Count virtual stream
+as active to work around headless situation.
+
+Signed-off-by: Eric Yang <Eric.Yang2@amd.com>
+Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 35 +----------------
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c | 47 +++++++++++++++++++++--
+ 2 files changed, 45 insertions(+), 37 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 3842541..e7be6bf 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1391,35 +1391,6 @@ static struct dc_stream_status *stream_get_status(
+
+ static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
+
+-static void notify_display_count_to_smu(
+- struct dc *dc,
+- struct dc_state *context)
+-{
+- int i, display_count;
+- struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+-
+- /*
+- * if function pointer not set up, this message is
+- * sent as part of pplib_apply_display_requirements.
+- * So just return.
+- */
+- if (!pp_smu || !pp_smu->set_display_count)
+- return;
+-
+- display_count = 0;
+- for (i = 0; i < context->stream_count; i++) {
+- const struct dc_stream_state *stream = context->streams[i];
+-
+- /* only notify active stream */
+- if (stream->dpms_off)
+- continue;
+-
+- display_count++;
+- }
+-
+- pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
+-}
+-
+ static void commit_planes_do_stream_update(struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+@@ -1470,15 +1441,13 @@ static void commit_planes_do_stream_update(struct dc *dc,
+ if (stream_update->dpms_off) {
+ if (*stream_update->dpms_off) {
+ core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE);
+- notify_display_count_to_smu(dc, dc->current_state);
++ dc->hwss.optimize_bandwidth(dc, dc->current_state);
+ } else {
+- notify_display_count_to_smu(dc, dc->current_state);
++ dc->hwss.prepare_bandwidth(dc, dc->current_state);
+ core_link_enable_stream(dc->current_state, pipe_ctx);
+ }
+ }
+
+-
+-
+ if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
+ if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
+ // if otg funcs defined check if blanked before programming
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c
+index abfe82f..0a9f944 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c
+@@ -139,6 +139,29 @@ static void dcn1_ramp_up_dispclk_with_dpp(struct dccg *dccg, struct dc_clocks *n
+ dccg->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
+ }
+
++static int get_active_display_cnt(
++ struct dc *dc,
++ struct dc_state *context)
++{
++ int i, display_count;
++
++ display_count = 0;
++ for (i = 0; i < context->stream_count; i++) {
++ const struct dc_stream_state *stream = context->streams[i];
++
++ /*
++ * Only notify active stream or virtual stream.
++ * Need to notify virtual stream to work around
++ * headless case. HPD does not fire when system is in
++ * S0i2.
++ */
++ if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL)
++ display_count++;
++ }
++
++ return display_count;
++}
++
+ static void dcn1_update_clocks(struct dccg *dccg,
+ struct dc_state *context,
+ bool safe_to_lower)
+@@ -152,11 +175,27 @@ static void dcn1_update_clocks(struct dccg *dccg,
+ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+ bool send_request_to_increase = false;
+ bool send_request_to_lower = false;
++ int display_count;
++
++ bool enter_display_off = false;
++
++ display_count = get_active_display_cnt(dc, context);
++
++ if (display_count == 0)
++ enter_display_off = true;
+
+- if (new_clocks->phyclk_khz)
+- smu_req.display_count = 1;
+- else
+- smu_req.display_count = 0;
++ if (enter_display_off == safe_to_lower) {
++ /*
++ * Notify SMU active displays
++ * if function pointer not set up, this message is
++ * sent as part of pplib_apply_display_requirements.
++ */
++ if (pp_smu->set_display_count)
++ pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
++ else
++ smu_req.display_count = display_count;
++
++ }
+
+ if (new_clocks->dispclk_khz > dccg->clks.dispclk_khz
+ || new_clocks->phyclk_khz > dccg->clks.phyclk_khz
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5588-drm-amd-display-Add-link-encoder-dp_ycbcr420_support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5588-drm-amd-display-Add-link-encoder-dp_ycbcr420_support.patch
new file mode 100644
index 00000000..d69ffa29
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5588-drm-amd-display-Add-link-encoder-dp_ycbcr420_support.patch
@@ -0,0 +1,145 @@
+From 47eb73de936323ae1bb9d9d33bbed3c41dea3b8c Mon Sep 17 00:00:00 2001
+From: Eric Bernstein <eric.bernstein@amd.com>
+Date: Tue, 25 Sep 2018 15:56:41 -0400
+Subject: [PATCH 5588/5725] drm/amd/display: Add link encoder
+ dp_ycbcr420_supported feature flag
+
+[Why]
+Need separate feature flag for DP 4:2:0 support, since existing
+flag is used for HDMI
+
+[How]
+Added dp_ycbcr420_supported to struct encoder_feature_support
+
+Change-Id: Ied533de8fd0778beb88deed20121acd0a331d6e2
+Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 4 ++--
+ drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c | 3 ++-
+ drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c | 3 ++-
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c | 8 +++++---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 3 ++-
+ drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h | 3 ++-
+ 7 files changed, 16 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index a488601..a50cfa2 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3910,12 +3910,12 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
+ case DRM_MODE_CONNECTOR_HDMIA:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ aconnector->base.ycbcr_420_allowed =
+- link->link_enc->features.ycbcr420_supported ? true : false;
++ link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ aconnector->base.ycbcr_420_allowed =
+- link->link_enc->features.ycbcr420_supported ? true : false;
++ link->link_enc->features.dp_ycbcr420_supported ? true : false;
+ break;
+ case DRM_MODE_CONNECTOR_DVID:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+index 366bc8c..3e18ea8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+@@ -645,7 +645,7 @@ static bool dce110_link_encoder_validate_hdmi_output(
+ return false;
+
+ /* DCE11 HW does not support 420 */
+- if (!enc110->base.features.ycbcr420_supported &&
++ if (!enc110->base.features.hdmi_ycbcr420_supported &&
+ crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ return false;
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+index b3d00d7..e73b139 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+@@ -551,7 +551,8 @@ static struct transform *dce112_transform_create(
+ static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 600000,
+- .ycbcr420_supported = true,
++ .hdmi_ycbcr420_supported = true,
++ .dp_ycbcr420_supported = false,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+index 512a22d..a69e89f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+@@ -607,7 +607,8 @@ static struct audio *create_audio(
+ static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 600000,
+- .ycbcr420_supported = true,
++ .hdmi_ycbcr420_supported = true,
++ .dp_ycbcr420_supported = false,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+index ba6a8686..477ab92 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+@@ -589,7 +589,7 @@ static bool dcn10_link_encoder_validate_hdmi_output(
+ return false;
+
+ /* DCE11 HW does not support 420 */
+- if (!enc10->base.features.ycbcr420_supported &&
++ if (!enc10->base.features.hdmi_ycbcr420_supported &&
+ crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ return false;
+
+@@ -606,8 +606,10 @@ bool dcn10_link_encoder_validate_dp_output(
+ const struct dcn10_link_encoder *enc10,
+ const struct dc_crtc_timing *crtc_timing)
+ {
+- if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+- return false;
++ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
++ if (!enc10->base.features.dp_ycbcr420_supported)
++ return false;
++ }
+
+ return true;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index 55d2d17..3d9118e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -719,7 +719,8 @@ static struct timing_generator *dcn10_timing_generator_create(
+ static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 600000,
+- .ycbcr420_supported = true,
++ .hdmi_ycbcr420_supported = true,
++ .dp_ycbcr420_supported = false,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+index e28e977..c20fdca 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+@@ -65,7 +65,8 @@ struct encoder_feature_support {
+
+ enum dc_color_depth max_hdmi_deep_color;
+ unsigned int max_hdmi_pixel_clock;
+- bool ycbcr420_supported;
++ bool hdmi_ycbcr420_supported;
++ bool dp_ycbcr420_supported;
+ };
+
+ union dpcd_psr_configuration {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5589-drm-amd-display-Retiring-set_display_requirements-in.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5589-drm-amd-display-Retiring-set_display_requirements-in.patch
new file mode 100644
index 00000000..75df7bba
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5589-drm-amd-display-Retiring-set_display_requirements-in.patch
@@ -0,0 +1,74 @@
+From 2d731a5977cdd94dfa8935c9d405ebc6001350c2 Mon Sep 17 00:00:00 2001
+From: Fatemeh Darbehani <fatemeh.darbehani@amd.com>
+Date: Wed, 26 Sep 2018 19:12:26 -0400
+Subject: [PATCH 5589/5725] drm/amd/display: Retiring set_display_requirements
+ in dm_pp_smu.h - part1
+
+[Why]
+In DCN we want direct DAL to SMU calls, with as little as possible
+interference by pplib. The reason for each pp_smu interface mapping
+to 1 SMU message is so we can have the sequencing of different SMU
+message in dal and shared across different OS. This will also simplify
+debugging as DAL owns this interaction and there's no confusion about
+division of ownership.
+
+[How]
+Part 1: Separate set_min_deep_Sleep_dcfclk message from the SMU
+messages that are sent as part of dcn10_pplib_apply_display_requirements.
+Notify deep sleep dcfclk to smu directly
+
+Signed-off-by: Fatemeh Darbehani <fatemeh.darbehani@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c | 20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c
+index 0a9f944..5159a7e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c
+@@ -162,6 +162,22 @@ static int get_active_display_cnt(
+ return display_count;
+ }
+
++static void notify_deep_sleep_dcfclk_to_smu(
++ struct pp_smu_funcs_rv *pp_smu, int min_dcef_deep_sleep_clk_khz)
++{
++ int min_dcef_deep_sleep_clk_mhz; //minimum required DCEF Deep Sleep clock in mhz
++ /*
++ * if function pointer not set up, this message is
++ * sent as part of pplib_apply_display_requirements.
++ * So just return.
++ */
++ if (!pp_smu || !pp_smu->set_min_deep_sleep_dcfclk)
++ return;
++
++ min_dcef_deep_sleep_clk_mhz = (min_dcef_deep_sleep_clk_khz + 999) / 1000; //Round up
++ pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, min_dcef_deep_sleep_clk_mhz);
++}
++
+ static void dcn1_update_clocks(struct dccg *dccg,
+ struct dc_state *context,
+ bool safe_to_lower)
+@@ -244,6 +260,8 @@ static void dcn1_update_clocks(struct dccg *dccg,
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ if (pp_smu->set_display_requirement)
+ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
++
++ notify_deep_sleep_dcfclk_to_smu(pp_smu, dccg->clks.dcfclk_deep_sleep_khz);
+ dcn1_pplib_apply_display_requirements(dc, context);
+ }
+
+@@ -264,6 +282,8 @@ static void dcn1_update_clocks(struct dccg *dccg,
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ if (pp_smu->set_display_requirement)
+ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
++
++ notify_deep_sleep_dcfclk_to_smu(pp_smu, dccg->clks.dcfclk_deep_sleep_khz);
+ dcn1_pplib_apply_display_requirements(dc, context);
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5590-drm-amd-display-Retiring-set_display_requirements-in.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5590-drm-amd-display-Retiring-set_display_requirements-in.patch
new file mode 100644
index 00000000..f4f45cac
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5590-drm-amd-display-Retiring-set_display_requirements-in.patch
@@ -0,0 +1,125 @@
+From 0550aba51eb20ffa07b107bd2bd639c152d47577 Mon Sep 17 00:00:00 2001
+From: Fatemeh Darbehani <fatemeh.darbehani@amd.com>
+Date: Thu, 27 Sep 2018 17:06:15 -0400
+Subject: [PATCH 5590/5725] drm/amd/display: Retiring set_display_requirements
+ in dm_pp_smu.h - part2
+
+[Why]
+In DCN we want direct DAL to SMU calls, with as little as possible
+interference by pplib. The reason for each pp_smu interface mapping to
+1 SMU message is so we can have the sequencing of different SMU message
+in dal and shared across different OS. This will also simplify
+debugging as DAL owns this interaction and there's no confusion about
+division of ownership.
+
+[How]
+Part 2: Separate set_min_deep_sleep_dcfclk message from the SMU
+messages that are sent as part of dm_pp_apply_clock_for_voltage_request.
+Directly notify min dcfclk to smu
+
+Signed-off-by: Fatemeh Darbehani <fatemeh.darbehani@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c | 28 +++++++++++++++++++++--
+ drivers/gpu/drm/amd/display/dc/dm_pp_smu.h | 6 ++---
+ 2 files changed, 29 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c
+index 5159a7e..5ffc367 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c
+@@ -178,6 +178,24 @@ static void notify_deep_sleep_dcfclk_to_smu(
+ pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, min_dcef_deep_sleep_clk_mhz);
+ }
+
++static void notify_hard_min_dcfclk_to_smu(
++ struct pp_smu_funcs_rv *pp_smu, int min_dcf_clk_khz)
++{
++ int min_dcf_clk_mhz; //minimum required DCF clock in mhz
++
++ /*
++ * if function pointer not set up, this message is
++ * sent as part of pplib_apply_display_requirements.
++ * So just return.
++ */
++ if (!pp_smu || !pp_smu->set_hard_min_dcfclk_by_freq)
++ return;
++
++ min_dcf_clk_mhz = min_dcf_clk_khz / 1000;
++
++ pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, min_dcf_clk_mhz);
++}
++
+ static void dcn1_update_clocks(struct dccg *dccg,
+ struct dc_state *context,
+ bool safe_to_lower)
+@@ -225,6 +243,7 @@ static void dcn1_update_clocks(struct dccg *dccg,
+ send_request_to_lower = true;
+ }
+
++ // F Clock
+ if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, dccg->clks.fclk_khz)) {
+ dccg->clks.fclk_khz = new_clocks->fclk_khz;
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
+@@ -235,6 +254,7 @@ static void dcn1_update_clocks(struct dccg *dccg,
+ send_request_to_lower = true;
+ }
+
++ //DCF Clock
+ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, dccg->clks.dcfclk_khz)) {
+ dccg->clks.dcfclk_khz = new_clocks->dcfclk_khz;
+ smu_req.hard_min_dcefclk_khz = new_clocks->dcfclk_khz;
+@@ -257,7 +277,9 @@ static void dcn1_update_clocks(struct dccg *dccg,
+ /*use dcfclk to request voltage*/
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
++
++ notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz);
++
+ if (pp_smu->set_display_requirement)
+ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+
+@@ -279,7 +301,9 @@ static void dcn1_update_clocks(struct dccg *dccg,
+ /*use dcfclk to request voltage*/
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
++
++ notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz);
++
+ if (pp_smu->set_display_requirement)
+ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+index f2ea845..e955029 100644
+--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
++++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+@@ -109,7 +109,7 @@ struct pp_smu_funcs_rv {
+ /* PPSMC_MSG_SetHardMinDcfclkByFreq
+ * fixed clock at requested freq, either from FCH bypass or DFS
+ */
+- void (*set_hard_min_dcfclk_by_freq)(struct pp_smu *pp, int khz);
++ void (*set_hard_min_dcfclk_by_freq)(struct pp_smu *pp, int mhz);
+
+ /* PPSMC_MSG_SetMinDeepSleepDcfclk
+ * when DF is in cstate, dcf clock is further divided down
+@@ -120,12 +120,12 @@ struct pp_smu_funcs_rv {
+ /* PPSMC_MSG_SetHardMinFclkByFreq
+ * FCLK will vary with DPM, but never below requested hard min
+ */
+- void (*set_hard_min_fclk_by_freq)(struct pp_smu *pp, int khz);
++ void (*set_hard_min_fclk_by_freq)(struct pp_smu *pp, int mhz);
+
+ /* PPSMC_MSG_SetHardMinSocclkByFreq
+ * Needed for DWB support
+ */
+- void (*set_hard_min_socclk_by_freq)(struct pp_smu *pp, int khz);
++ void (*set_hard_min_socclk_by_freq)(struct pp_smu *pp, int mhz);
+
+ /* PME w/a */
+ void (*set_pme_wa_enable)(struct pp_smu *pp);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5591-drm-amd-display-rename-dccg-to-clk_mgr.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5591-drm-amd-display-rename-dccg-to-clk_mgr.patch
new file mode 100644
index 00000000..1dc90f77
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5591-drm-amd-display-rename-dccg-to-clk_mgr.patch
@@ -0,0 +1,3638 @@
+From 699ae1c6ed5317a39cc550294bf67b9bb636c25e Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Fri, 28 Sep 2018 07:46:42 -0400
+Subject: [PATCH 5591/5725] drm/amd/display: rename dccg to clk_mgr
+
+In preparation for adding the actual dccg block since the
+current implementation of dccg is mor eof a clock manager
+than a hw block
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/dce/Makefile | 2 +-
+ drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c | 876 +++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h | 165 ++++
+ drivers/gpu/drm/amd/display/dc/dce/dce_dccg.c | 876 ---------------------
+ drivers/gpu/drm/amd/display/dc/dce/dce_dccg.h | 165 ----
+ .../amd/display/dc/dce100/dce100_hw_sequencer.c | 4 +-
+ .../drm/amd/display/dc/dce100/dce100_resource.c | 16 +-
+ .../amd/display/dc/dce110/dce110_hw_sequencer.c | 4 +-
+ .../drm/amd/display/dc/dce110/dce110_resource.c | 16 +-
+ .../drm/amd/display/dc/dce112/dce112_resource.c | 16 +-
+ .../drm/amd/display/dc/dce120/dce120_resource.c | 10 +-
+ .../gpu/drm/amd/display/dc/dce80/dce80_resource.c | 24 +-
+ drivers/gpu/drm/amd/display/dc/dcn10/Makefile | 2 +-
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c | 360 +++++++++
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h | 37 +
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c | 361 ---------
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.h | 37 -
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 18 +-
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 28 +-
+ drivers/gpu/drm/amd/display/dc/inc/core_types.h | 6 +-
+ drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h | 2 +-
+ drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h | 47 ++
+ drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h | 47 --
+ 24 files changed, 1560 insertions(+), 1561 deletions(-)
+ create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+ create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
+ delete mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_dccg.c
+ delete mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_dccg.h
+ create mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
+ create mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
+ delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c
+ delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.h
+ create mode 100644 drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+ delete mode 100644 drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 69bb5b0..4cfc20d 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -2054,7 +2054,7 @@ void dc_resource_state_construct(
+ const struct dc *dc,
+ struct dc_state *dst_ctx)
+ {
+- dst_ctx->dccg = dc->res_pool->dccg;
++ dst_ctx->dccg = dc->res_pool->clk_mgr;
+ }
+
+ enum dc_status dc_validate_global_state(
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
+index f4ce7f5..6d7b64a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
+@@ -28,7 +28,7 @@
+
+ DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
+ dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
+-dce_dccg.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
++dce_clk_mgr.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
+ dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o
+
+ AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+new file mode 100644
+index 0000000..02ddc94
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+@@ -0,0 +1,876 @@
++/*
++ * Copyright 2012-16 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#include "dce_clk_mgr.h"
++
++#include "reg_helper.h"
++#include "dmcu.h"
++#include "core_types.h"
++#include "dal_asic_id.h"
++
++#define TO_DCE_CLK_MGR(clocks)\
++ container_of(clocks, struct dce_clk_mgr, base)
++
++#define REG(reg) \
++ (clk_mgr_dce->regs->reg)
++
++#undef FN
++#define FN(reg_name, field_name) \
++ clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name
++
++#define CTX \
++ clk_mgr_dce->base.ctx
++#define DC_LOGGER \
++ clk_mgr->ctx->logger
++
++/* Max clock values for each state indexed by "enum clocks_state": */
++static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
++/* ClocksStateInvalid - should not be used */
++{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
++/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
++{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
++/* ClocksStateLow */
++{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
++/* ClocksStateNominal */
++{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
++/* ClocksStatePerformance */
++{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
++
++static const struct state_dependent_clocks dce110_max_clks_by_state[] = {
++/*ClocksStateInvalid - should not be used*/
++{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
++/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
++{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
++/*ClocksStateLow*/
++{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
++/*ClocksStateNominal*/
++{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
++/*ClocksStatePerformance*/
++{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };
++
++static const struct state_dependent_clocks dce112_max_clks_by_state[] = {
++/*ClocksStateInvalid - should not be used*/
++{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
++/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
++{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
++/*ClocksStateLow*/
++{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
++/*ClocksStateNominal*/
++{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
++/*ClocksStatePerformance*/
++{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };
++
++static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
++/*ClocksStateInvalid - should not be used*/
++{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
++/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
++{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
++/*ClocksStateLow*/
++{ .display_clk_khz = 460000, .pixel_clk_khz = 400000 },
++/*ClocksStateNominal*/
++{ .display_clk_khz = 670000, .pixel_clk_khz = 600000 },
++/*ClocksStatePerformance*/
++{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
++
++static int dentist_get_divider_from_did(int did)
++{
++ if (did < DENTIST_BASE_DID_1)
++ did = DENTIST_BASE_DID_1;
++ if (did > DENTIST_MAX_DID)
++ did = DENTIST_MAX_DID;
++
++ if (did < DENTIST_BASE_DID_2) {
++ return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
++ * (did - DENTIST_BASE_DID_1);
++ } else if (did < DENTIST_BASE_DID_3) {
++ return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
++ * (did - DENTIST_BASE_DID_2);
++ } else {
++ return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
++ * (did - DENTIST_BASE_DID_3);
++ }
++}
++
++/* SW will adjust DP REF Clock average value for all purposes
++ * (DP DTO / DP Audio DTO and DP GTC)
++ if clock is spread for all cases:
++ -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
++ calculations for DS_INCR/DS_MODULO (this is planned to be default case)
++ -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
++ calculations (not planned to be used, but average clock should still
++ be valid)
++ -if SS enabled on DP Ref clock and HW de-spreading disabled
++ (should not be case with CIK) then SW should program all rates
++ generated according to average value (case as with previous ASICs)
++ */
++static int clk_mgr_adjust_dp_ref_freq_for_ss(struct dce_clk_mgr *clk_mgr_dce, int dp_ref_clk_khz)
++{
++ if (clk_mgr_dce->ss_on_dprefclk && clk_mgr_dce->dprefclk_ss_divider != 0) {
++ struct fixed31_32 ss_percentage = dc_fixpt_div_int(
++ dc_fixpt_from_fraction(clk_mgr_dce->dprefclk_ss_percentage,
++ clk_mgr_dce->dprefclk_ss_divider), 200);
++ struct fixed31_32 adj_dp_ref_clk_khz;
++
++ ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
++ adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
++ dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
++ }
++ return dp_ref_clk_khz;
++}
++
++static int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr)
++{
++ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
++ int dprefclk_wdivider;
++ int dprefclk_src_sel;
++ int dp_ref_clk_khz = 600000;
++ int target_div;
++
++ /* ASSERT DP Reference Clock source is from DFS*/
++ REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
++ ASSERT(dprefclk_src_sel == 0);
++
++ /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
++ * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
++ REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
++
++ /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
++ target_div = dentist_get_divider_from_did(dprefclk_wdivider);
++
++ /* Calculate the current DFS clock, in kHz.*/
++ dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
++ * clk_mgr_dce->dentist_vco_freq_khz) / target_div;
++
++ return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, dp_ref_clk_khz);
++}
++
++int dce12_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr)
++{
++ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
++
++ return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, clk_mgr_dce->dprefclk_khz);
++}
++
++/* unit: in_khz before mode set, get pixel clock from context. ASIC register
++ * may not be programmed yet
++ */
++static uint32_t get_max_pixel_clock_for_all_paths(struct dc_state *context)
++{
++ uint32_t max_pix_clk = 0;
++ int i;
++
++ for (i = 0; i < MAX_PIPES; i++) {
++ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
++
++ if (pipe_ctx->stream == NULL)
++ continue;
++
++ /* do not check under lay */
++ if (pipe_ctx->top_pipe)
++ continue;
++
++ if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
++ max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
++
++ /* raise clock state for HBR3/2 if required. Confirmed with HW DCE/DPCS
++ * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
++ */
++ if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
++ pipe_ctx->stream_res.pix_clk_params.requested_sym_clk > max_pix_clk)
++ max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_sym_clk;
++ }
++
++ return max_pix_clk;
++}
++
++static enum dm_pp_clocks_state dce_get_required_clocks_state(
++ struct clk_mgr *clk_mgr,
++ struct dc_state *context)
++{
++ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
++ int i;
++ enum dm_pp_clocks_state low_req_clk;
++ int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
++
++ /* Iterate from highest supported to lowest valid state, and update
++ * lowest RequiredState with the lowest state that satisfies
++ * all required clocks
++ */
++ for (i = clk_mgr_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
++ if (context->bw.dce.dispclk_khz >
++ clk_mgr_dce->max_clks_by_state[i].display_clk_khz
++ || max_pix_clk >
++ clk_mgr_dce->max_clks_by_state[i].pixel_clk_khz)
++ break;
++
++ low_req_clk = i + 1;
++ if (low_req_clk > clk_mgr_dce->max_clks_state) {
++ /* set max clock state for high phyclock, invalid on exceeding display clock */
++ if (clk_mgr_dce->max_clks_by_state[clk_mgr_dce->max_clks_state].display_clk_khz
++ < context->bw.dce.dispclk_khz)
++ low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
++ else
++ low_req_clk = clk_mgr_dce->max_clks_state;
++ }
++
++ return low_req_clk;
++}
++
++static int dce_set_clock(
++ struct clk_mgr *clk_mgr,
++ int requested_clk_khz)
++{
++ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
++ struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
++ struct dc_bios *bp = clk_mgr->ctx->dc_bios;
++ int actual_clock = requested_clk_khz;
++ struct dmcu *dmcu = clk_mgr_dce->base.ctx->dc->res_pool->dmcu;
++
++ /* Make sure requested clock isn't lower than minimum threshold*/
++ if (requested_clk_khz > 0)
++ requested_clk_khz = max(requested_clk_khz,
++ clk_mgr_dce->dentist_vco_freq_khz / 64);
++
++ /* Prepare to program display clock*/
++ pxl_clk_params.target_pixel_clock = requested_clk_khz;
++ pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
++
++ if (clk_mgr_dce->dfs_bypass_active)
++ pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
++
++ bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
++
++ if (clk_mgr_dce->dfs_bypass_active) {
++ /* Cache the fixed display clock*/
++ clk_mgr_dce->dfs_bypass_disp_clk =
++ pxl_clk_params.dfs_bypass_display_clock;
++ actual_clock = pxl_clk_params.dfs_bypass_display_clock;
++ }
++
++ /* from power down, we need mark the clock state as ClocksStateNominal
++ * from HWReset, so when resume we will call pplib voltage regulator.*/
++ if (requested_clk_khz == 0)
++ clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
++
++ dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7);
++
++ return actual_clock;
++}
++
++int dce112_set_clock(struct clk_mgr *clk_mgr, int requested_clk_khz)
++{
++ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
++ struct bp_set_dce_clock_parameters dce_clk_params;
++ struct dc_bios *bp = clk_mgr->ctx->dc_bios;
++ struct dc *core_dc = clk_mgr->ctx->dc;
++ struct dmcu *dmcu = core_dc->res_pool->dmcu;
++ int actual_clock = requested_clk_khz;
++ /* Prepare to program display clock*/
++ memset(&dce_clk_params, 0, sizeof(dce_clk_params));
++
++ /* Make sure requested clock isn't lower than minimum threshold*/
++ if (requested_clk_khz > 0)
++ requested_clk_khz = max(requested_clk_khz,
++ clk_mgr_dce->dentist_vco_freq_khz / 62);
++
++ dce_clk_params.target_clock_frequency = requested_clk_khz;
++ dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
++ dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
++
++ bp->funcs->set_dce_clock(bp, &dce_clk_params);
++ actual_clock = dce_clk_params.target_clock_frequency;
++
++ /* from power down, we need mark the clock state as ClocksStateNominal
++ * from HWReset, so when resume we will call pplib voltage regulator.*/
++ if (requested_clk_khz == 0)
++ clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
++
++ /*Program DP ref Clock*/
++ /*VBIOS will determine DPREFCLK frequency, so we don't set it*/
++ dce_clk_params.target_clock_frequency = 0;
++ dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
++ if (!ASICREV_IS_VEGA20_P(clk_mgr->ctx->asic_id.hw_internal_rev))
++ dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
++ (dce_clk_params.pll_id ==
++ CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
++ else
++ dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
++
++ bp->funcs->set_dce_clock(bp, &dce_clk_params);
++
++ if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
++ if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock)
++ dmcu->funcs->set_psr_wait_loop(dmcu,
++ actual_clock / 1000 / 7);
++ }
++
++ clk_mgr_dce->dfs_bypass_disp_clk = actual_clock;
++ return actual_clock;
++}
++
++static void dce_clock_read_integrated_info(struct dce_clk_mgr *clk_mgr_dce)
++{
++ struct dc_debug_options *debug = &clk_mgr_dce->base.ctx->dc->debug;
++ struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
++ struct integrated_info info = { { { 0 } } };
++ struct dc_firmware_info fw_info = { { 0 } };
++ int i;
++
++ if (bp->integrated_info)
++ info = *bp->integrated_info;
++
++ clk_mgr_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
++ if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
++ bp->funcs->get_firmware_info(bp, &fw_info);
++ clk_mgr_dce->dentist_vco_freq_khz =
++ fw_info.smu_gpu_pll_output_freq;
++ if (clk_mgr_dce->dentist_vco_freq_khz == 0)
++ clk_mgr_dce->dentist_vco_freq_khz = 3600000;
++ }
++
++ /*update the maximum display clock for each power state*/
++ for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
++ enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID;
++
++ switch (i) {
++ case 0:
++ clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW;
++ break;
++
++ case 1:
++ clk_state = DM_PP_CLOCKS_STATE_LOW;
++ break;
++
++ case 2:
++ clk_state = DM_PP_CLOCKS_STATE_NOMINAL;
++ break;
++
++ case 3:
++ clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE;
++ break;
++
++ default:
++ clk_state = DM_PP_CLOCKS_STATE_INVALID;
++ break;
++ }
++
++ /*Do not allow bad VBIOS/SBIOS to override with invalid values,
++ * check for > 100MHz*/
++ if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
++ clk_mgr_dce->max_clks_by_state[clk_state].display_clk_khz =
++ info.disp_clk_voltage[i].max_supported_clk;
++ }
++
++ if (!debug->disable_dfs_bypass && bp->integrated_info)
++ if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
++ clk_mgr_dce->dfs_bypass_enabled = true;
++}
++
++void dce_clock_read_ss_info(struct dce_clk_mgr *clk_mgr_dce)
++{
++ struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
++ int ss_info_num = bp->funcs->get_ss_entry_number(
++ bp, AS_SIGNAL_TYPE_GPU_PLL);
++
++ if (ss_info_num) {
++ struct spread_spectrum_info info = { { 0 } };
++ enum bp_result result = bp->funcs->get_spread_spectrum_info(
++ bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);
++
++ /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
++ * even if SS not enabled and in that case
++ * SSInfo.spreadSpectrumPercentage !=0 would be sign
++ * that SS is enabled
++ */
++ if (result == BP_RESULT_OK &&
++ info.spread_spectrum_percentage != 0) {
++ clk_mgr_dce->ss_on_dprefclk = true;
++ clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider;
++
++ if (info.type.CENTER_MODE == 0) {
++ /* TODO: Currently for DP Reference clock we
++ * need only SS percentage for
++ * downspread */
++ clk_mgr_dce->dprefclk_ss_percentage =
++ info.spread_spectrum_percentage;
++ }
++
++ return;
++ }
++
++ result = bp->funcs->get_spread_spectrum_info(
++ bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info);
++
++ /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS
++ * even if SS not enabled and in that case
++ * SSInfo.spreadSpectrumPercentage !=0 would be sign
++ * that SS is enabled
++ */
++ if (result == BP_RESULT_OK &&
++ info.spread_spectrum_percentage != 0) {
++ clk_mgr_dce->ss_on_dprefclk = true;
++ clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider;
++
++ if (info.type.CENTER_MODE == 0) {
++ /* Currently for DP Reference clock we
++ * need only SS percentage for
++ * downspread */
++ clk_mgr_dce->dprefclk_ss_percentage =
++ info.spread_spectrum_percentage;
++ }
++ }
++ }
++}
++
++void dce110_fill_display_configs(
++ const struct dc_state *context,
++ struct dm_pp_display_configuration *pp_display_cfg)
++{
++ int j;
++ int num_cfgs = 0;
++
++ for (j = 0; j < context->stream_count; j++) {
++ int k;
++
++ const struct dc_stream_state *stream = context->streams[j];
++ struct dm_pp_single_disp_config *cfg =
++ &pp_display_cfg->disp_configs[num_cfgs];
++ const struct pipe_ctx *pipe_ctx = NULL;
++
++ for (k = 0; k < MAX_PIPES; k++)
++ if (stream == context->res_ctx.pipe_ctx[k].stream) {
++ pipe_ctx = &context->res_ctx.pipe_ctx[k];
++ break;
++ }
++
++ ASSERT(pipe_ctx != NULL);
++
++ /* only notify active stream */
++ if (stream->dpms_off)
++ continue;
++
++ num_cfgs++;
++ cfg->signal = pipe_ctx->stream->signal;
++ cfg->pipe_idx = pipe_ctx->stream_res.tg->inst;
++ cfg->src_height = stream->src.height;
++ cfg->src_width = stream->src.width;
++ cfg->ddi_channel_mapping =
++ stream->sink->link->ddi_channel_mapping.raw;
++ cfg->transmitter =
++ stream->sink->link->link_enc->transmitter;
++ cfg->link_settings.lane_count =
++ stream->sink->link->cur_link_settings.lane_count;
++ cfg->link_settings.link_rate =
++ stream->sink->link->cur_link_settings.link_rate;
++ cfg->link_settings.link_spread =
++ stream->sink->link->cur_link_settings.link_spread;
++ cfg->sym_clock = stream->phy_pix_clk;
++ /* Round v_refresh*/
++ cfg->v_refresh = stream->timing.pix_clk_khz * 1000;
++ cfg->v_refresh /= stream->timing.h_total;
++ cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
++ / stream->timing.v_total;
++ }
++
++ pp_display_cfg->display_count = num_cfgs;
++}
++
++static uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
++{
++ uint8_t j;
++ uint32_t min_vertical_blank_time = -1;
++
++ for (j = 0; j < context->stream_count; j++) {
++ struct dc_stream_state *stream = context->streams[j];
++ uint32_t vertical_blank_in_pixels = 0;
++ uint32_t vertical_blank_time = 0;
++
++ vertical_blank_in_pixels = stream->timing.h_total *
++ (stream->timing.v_total
++ - stream->timing.v_addressable);
++
++ vertical_blank_time = vertical_blank_in_pixels
++ * 1000 / stream->timing.pix_clk_khz;
++
++ if (min_vertical_blank_time > vertical_blank_time)
++ min_vertical_blank_time = vertical_blank_time;
++ }
++
++ return min_vertical_blank_time;
++}
++
++static int determine_sclk_from_bounding_box(
++ const struct dc *dc,
++ int required_sclk)
++{
++ int i;
++
++ /*
++ * Some asics do not give us sclk levels, so we just report the actual
++ * required sclk
++ */
++ if (dc->sclk_lvls.num_levels == 0)
++ return required_sclk;
++
++ for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
++ if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
++ return dc->sclk_lvls.clocks_in_khz[i];
++ }
++ /*
++ * even maximum level could not satisfy requirement, this
++ * is unexpected at this stage, should have been caught at
++ * validation time
++ */
++ ASSERT(0);
++ return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
++}
++
++static void dce_pplib_apply_display_requirements(
++ struct dc *dc,
++ struct dc_state *context)
++{
++ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
++
++ pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
++
++ dce110_fill_display_configs(context, pp_display_cfg);
++
++ if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
++ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
++}
++
++static void dce11_pplib_apply_display_requirements(
++ struct dc *dc,
++ struct dc_state *context)
++{
++ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
++
++ pp_display_cfg->all_displays_in_sync =
++ context->bw.dce.all_displays_in_sync;
++ pp_display_cfg->nb_pstate_switch_disable =
++ context->bw.dce.nbp_state_change_enable == false;
++ pp_display_cfg->cpu_cc6_disable =
++ context->bw.dce.cpuc_state_change_enable == false;
++ pp_display_cfg->cpu_pstate_disable =
++ context->bw.dce.cpup_state_change_enable == false;
++ pp_display_cfg->cpu_pstate_separation_time =
++ context->bw.dce.blackout_recovery_time_us;
++
++ pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
++ / MEMORY_TYPE_MULTIPLIER_CZ;
++
++ pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
++ dc,
++ context->bw.dce.sclk_khz);
++
++ pp_display_cfg->min_engine_clock_deep_sleep_khz
++ = context->bw.dce.sclk_deep_sleep_khz;
++
++ pp_display_cfg->avail_mclk_switch_time_us =
++ dce110_get_min_vblank_time_us(context);
++ /* TODO: dce11.2*/
++ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
++
++ pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz;
++
++ dce110_fill_display_configs(context, pp_display_cfg);
++
++ /* TODO: is this still applicable?*/
++ if (pp_display_cfg->display_count == 1) {
++ const struct dc_crtc_timing *timing =
++ &context->streams[0]->timing;
++
++ pp_display_cfg->crtc_index =
++ pp_display_cfg->disp_configs[0].pipe_idx;
++ pp_display_cfg->line_time_in_us = timing->h_total * 1000 / timing->pix_clk_khz;
++ }
++
++ if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
++ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
++}
++
++static void dce_update_clocks(struct clk_mgr *clk_mgr,
++ struct dc_state *context,
++ bool safe_to_lower)
++{
++ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
++ struct dm_pp_power_level_change_request level_change_req;
++ int unpatched_disp_clk = context->bw.dce.dispclk_khz;
++
++ /*TODO: W/A for dal3 linux, investigate why this works */
++ if (!clk_mgr_dce->dfs_bypass_active)
++ context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
++
++ level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
++ /* get max clock state from PPLIB */
++ if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
++ || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
++ if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req))
++ clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
++ }
++
++ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
++ context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
++ clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
++ }
++ dce_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
++
++ context->bw.dce.dispclk_khz = unpatched_disp_clk;
++}
++
++static void dce11_update_clocks(struct clk_mgr *clk_mgr,
++ struct dc_state *context,
++ bool safe_to_lower)
++{
++ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
++ struct dm_pp_power_level_change_request level_change_req;
++
++ level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
++ /* get max clock state from PPLIB */
++ if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
++ || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
++ if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req))
++ clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
++ }
++
++ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
++ context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
++ clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
++ }
++ dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
++}
++
++static void dce112_update_clocks(struct clk_mgr *clk_mgr,
++ struct dc_state *context,
++ bool safe_to_lower)
++{
++ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
++ struct dm_pp_power_level_change_request level_change_req;
++
++ level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
++ /* get max clock state from PPLIB */
++ if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
++ || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
++ if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req))
++ clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
++ }
++
++ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
++ context->bw.dce.dispclk_khz = dce112_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
++ clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
++ }
++ dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
++}
++
++static void dce12_update_clocks(struct clk_mgr *clk_mgr,
++ struct dc_state *context,
++ bool safe_to_lower)
++{
++ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
++ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
++ int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
++ int unpatched_disp_clk = context->bw.dce.dispclk_khz;
++
++ /*TODO: W/A for dal3 linux, investigate why this works */
++ if (!clk_mgr_dce->dfs_bypass_active)
++ context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
++
++ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
++ clock_voltage_req.clocks_in_khz = context->bw.dce.dispclk_khz;
++ context->bw.dce.dispclk_khz = dce112_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
++ clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
++
++ dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req);
++ }
++
++ if (should_set_clock(safe_to_lower, max_pix_clk, clk_mgr->clks.phyclk_khz)) {
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
++ clock_voltage_req.clocks_in_khz = max_pix_clk;
++ clk_mgr->clks.phyclk_khz = max_pix_clk;
++
++ dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req);
++ }
++ dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
++
++ context->bw.dce.dispclk_khz = unpatched_disp_clk;
++}
++
++static const struct clk_mgr_funcs dce120_funcs = {
++ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
++ .update_clocks = dce12_update_clocks
++};
++
++static const struct clk_mgr_funcs dce112_funcs = {
++ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
++ .update_clocks = dce112_update_clocks
++};
++
++static const struct clk_mgr_funcs dce110_funcs = {
++ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
++ .update_clocks = dce11_update_clocks,
++};
++
++static const struct clk_mgr_funcs dce_funcs = {
++ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
++ .update_clocks = dce_update_clocks
++};
++
++static void dce_clk_mgr_construct(
++ struct dce_clk_mgr *clk_mgr_dce,
++ struct dc_context *ctx,
++ const struct clk_mgr_registers *regs,
++ const struct clk_mgr_shift *clk_shift,
++ const struct clk_mgr_mask *clk_mask)
++{
++ struct clk_mgr *base = &clk_mgr_dce->base;
++ struct dm_pp_static_clock_info static_clk_info = {0};
++
++ base->ctx = ctx;
++ base->funcs = &dce_funcs;
++
++ clk_mgr_dce->regs = regs;
++ clk_mgr_dce->clk_mgr_shift = clk_shift;
++ clk_mgr_dce->clk_mgr_mask = clk_mask;
++
++ clk_mgr_dce->dfs_bypass_disp_clk = 0;
++
++ clk_mgr_dce->dprefclk_ss_percentage = 0;
++ clk_mgr_dce->dprefclk_ss_divider = 1000;
++ clk_mgr_dce->ss_on_dprefclk = false;
++
++
++ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
++ clk_mgr_dce->max_clks_state = static_clk_info.max_clocks_state;
++ else
++ clk_mgr_dce->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
++ clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
++
++ dce_clock_read_integrated_info(clk_mgr_dce);
++ dce_clock_read_ss_info(clk_mgr_dce);
++}
++
++struct clk_mgr *dce_clk_mgr_create(
++ struct dc_context *ctx,
++ const struct clk_mgr_registers *regs,
++ const struct clk_mgr_shift *clk_shift,
++ const struct clk_mgr_mask *clk_mask)
++{
++ struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
++
++ if (clk_mgr_dce == NULL) {
++ BREAK_TO_DEBUGGER();
++ return NULL;
++ }
++
++ memcpy(clk_mgr_dce->max_clks_by_state,
++ dce80_max_clks_by_state,
++ sizeof(dce80_max_clks_by_state));
++
++ dce_clk_mgr_construct(
++ clk_mgr_dce, ctx, regs, clk_shift, clk_mask);
++
++ return &clk_mgr_dce->base;
++}
++
++struct clk_mgr *dce110_clk_mgr_create(
++ struct dc_context *ctx,
++ const struct clk_mgr_registers *regs,
++ const struct clk_mgr_shift *clk_shift,
++ const struct clk_mgr_mask *clk_mask)
++{
++ struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
++
++ if (clk_mgr_dce == NULL) {
++ BREAK_TO_DEBUGGER();
++ return NULL;
++ }
++
++ memcpy(clk_mgr_dce->max_clks_by_state,
++ dce110_max_clks_by_state,
++ sizeof(dce110_max_clks_by_state));
++
++ dce_clk_mgr_construct(
++ clk_mgr_dce, ctx, regs, clk_shift, clk_mask);
++
++ clk_mgr_dce->base.funcs = &dce110_funcs;
++
++ return &clk_mgr_dce->base;
++}
++
++struct clk_mgr *dce112_clk_mgr_create(
++ struct dc_context *ctx,
++ const struct clk_mgr_registers *regs,
++ const struct clk_mgr_shift *clk_shift,
++ const struct clk_mgr_mask *clk_mask)
++{
++ struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
++
++ if (clk_mgr_dce == NULL) {
++ BREAK_TO_DEBUGGER();
++ return NULL;
++ }
++
++ memcpy(clk_mgr_dce->max_clks_by_state,
++ dce112_max_clks_by_state,
++ sizeof(dce112_max_clks_by_state));
++
++ dce_clk_mgr_construct(
++ clk_mgr_dce, ctx, regs, clk_shift, clk_mask);
++
++ clk_mgr_dce->base.funcs = &dce112_funcs;
++
++ return &clk_mgr_dce->base;
++}
++
++struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx)
++{
++ struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
++
++ if (clk_mgr_dce == NULL) {
++ BREAK_TO_DEBUGGER();
++ return NULL;
++ }
++
++ memcpy(clk_mgr_dce->max_clks_by_state,
++ dce120_max_clks_by_state,
++ sizeof(dce120_max_clks_by_state));
++
++ dce_clk_mgr_construct(
++ clk_mgr_dce, ctx, NULL, NULL, NULL);
++
++ clk_mgr_dce->dprefclk_khz = 600000;
++ clk_mgr_dce->base.funcs = &dce120_funcs;
++
++ return &clk_mgr_dce->base;
++}
++
++void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr)
++{
++ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(*clk_mgr);
++
++ kfree(clk_mgr_dce);
++ *clk_mgr = NULL;
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
+new file mode 100644
+index 0000000..2668d56
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
+@@ -0,0 +1,165 @@
++/*
++ * Copyright 2012-16 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++
++#ifndef _DCE_CLK_MGR_H_
++#define _DCE_CLK_MGR_H_
++
++#include "../inc/hw/clk_mgr.h"
++
++#define MEMORY_TYPE_MULTIPLIER_CZ 4
++
++#define CLK_COMMON_REG_LIST_DCE_BASE() \
++ .DPREFCLK_CNTL = mmDPREFCLK_CNTL, \
++ .DENTIST_DISPCLK_CNTL = mmDENTIST_DISPCLK_CNTL
++
++#define CLK_COMMON_REG_LIST_DCN_BASE() \
++ SR(DENTIST_DISPCLK_CNTL)
++
++#define CLK_SF(reg_name, field_name, post_fix)\
++ .field_name = reg_name ## __ ## field_name ## post_fix
++
++#define CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
++ CLK_SF(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, mask_sh), \
++ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, mask_sh)
++
++#define CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh) \
++ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh),\
++ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh)
++
++#define CLK_REG_FIELD_LIST(type) \
++ type DPREFCLK_SRC_SEL; \
++ type DENTIST_DPREFCLK_WDIVIDER; \
++ type DENTIST_DISPCLK_WDIVIDER; \
++ type DENTIST_DISPCLK_CHG_DONE;
++
++struct clk_mgr_shift {
++ CLK_REG_FIELD_LIST(uint8_t)
++};
++
++struct clk_mgr_mask {
++ CLK_REG_FIELD_LIST(uint32_t)
++};
++
++struct clk_mgr_registers {
++ uint32_t DPREFCLK_CNTL;
++ uint32_t DENTIST_DISPCLK_CNTL;
++};
++
++struct state_dependent_clocks {
++ int display_clk_khz;
++ int pixel_clk_khz;
++};
++
++struct dce_clk_mgr {
++ struct clk_mgr base;
++ const struct clk_mgr_registers *regs;
++ const struct clk_mgr_shift *clk_mgr_shift;
++ const struct clk_mgr_mask *clk_mgr_mask;
++
++ struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
++
++ int dentist_vco_freq_khz;
++
++ /* Cache the status of DFS-bypass feature*/
++ bool dfs_bypass_enabled;
++ /* True if the DFS-bypass feature is enabled and active. */
++ bool dfs_bypass_active;
++ /* Cache the display clock returned by VBIOS if DFS-bypass is enabled.
++ * This is basically "Crystal Frequency In KHz" (XTALIN) frequency */
++ int dfs_bypass_disp_clk;
++
++ /* Flag for Enabled SS on DPREFCLK */
++ bool ss_on_dprefclk;
++ /* DPREFCLK SS percentage (if down-spread enabled) */
++ int dprefclk_ss_percentage;
++ /* DPREFCLK SS percentage Divider (100 or 1000) */
++ int dprefclk_ss_divider;
++ int dprefclk_khz;
++
++ enum dm_pp_clocks_state max_clks_state;
++ enum dm_pp_clocks_state cur_min_clks_state;
++};
++
++/* Starting DID for each range */
++enum dentist_base_divider_id {
++ DENTIST_BASE_DID_1 = 0x08,
++ DENTIST_BASE_DID_2 = 0x40,
++ DENTIST_BASE_DID_3 = 0x60,
++ DENTIST_BASE_DID_4 = 0x7e,
++ DENTIST_MAX_DID = 0x7f
++};
++
++/* Starting point and step size for each divider range.*/
++enum dentist_divider_range {
++ DENTIST_DIVIDER_RANGE_1_START = 8, /* 2.00 */
++ DENTIST_DIVIDER_RANGE_1_STEP = 1, /* 0.25 */
++ DENTIST_DIVIDER_RANGE_2_START = 64, /* 16.00 */
++ DENTIST_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */
++ DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
++ DENTIST_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */
++ DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */
++ DENTIST_DIVIDER_RANGE_4_STEP = 264, /* 66.00 */
++ DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
++};
++
++static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
++{
++ return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
++}
++
++void dce_clock_read_ss_info(struct dce_clk_mgr *dccg_dce);
++
++int dce12_get_dp_ref_freq_khz(struct clk_mgr *dccg);
++
++void dce110_fill_display_configs(
++ const struct dc_state *context,
++ struct dm_pp_display_configuration *pp_display_cfg);
++
++int dce112_set_clock(struct clk_mgr *dccg, int requested_clk_khz);
++
++struct clk_mgr *dce_clk_mgr_create(
++ struct dc_context *ctx,
++ const struct clk_mgr_registers *regs,
++ const struct clk_mgr_shift *clk_shift,
++ const struct clk_mgr_mask *clk_mask);
++
++struct clk_mgr *dce110_clk_mgr_create(
++ struct dc_context *ctx,
++ const struct clk_mgr_registers *regs,
++ const struct clk_mgr_shift *clk_shift,
++ const struct clk_mgr_mask *clk_mask);
++
++struct clk_mgr *dce112_clk_mgr_create(
++ struct dc_context *ctx,
++ const struct clk_mgr_registers *regs,
++ const struct clk_mgr_shift *clk_shift,
++ const struct clk_mgr_mask *clk_mask);
++
++struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx);
++
++void dce_clk_mgr_destroy(struct clk_mgr **dccg);
++
++#endif /* _DCE_CLK_MGR_H_ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dccg.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dccg.c
+deleted file mode 100644
+index 97c143b..0000000
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dccg.c
++++ /dev/null
+@@ -1,876 +0,0 @@
+-/*
+- * Copyright 2012-16 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- * Authors: AMD
+- *
+- */
+-
+-#include "dce_dccg.h"
+-
+-#include "reg_helper.h"
+-#include "dmcu.h"
+-#include "core_types.h"
+-#include "dal_asic_id.h"
+-
+-#define TO_DCE_DCCG(clocks)\
+- container_of(clocks, struct dce_dccg, base)
+-
+-#define REG(reg) \
+- (dccg_dce->regs->reg)
+-
+-#undef FN
+-#define FN(reg_name, field_name) \
+- dccg_dce->dccg_shift->field_name, dccg_dce->dccg_mask->field_name
+-
+-#define CTX \
+- dccg_dce->base.ctx
+-#define DC_LOGGER \
+- dccg->ctx->logger
+-
+-/* Max clock values for each state indexed by "enum clocks_state": */
+-static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
+-/* ClocksStateInvalid - should not be used */
+-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+-/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
+-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+-/* ClocksStateLow */
+-{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
+-/* ClocksStateNominal */
+-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
+-/* ClocksStatePerformance */
+-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
+-
+-static const struct state_dependent_clocks dce110_max_clks_by_state[] = {
+-/*ClocksStateInvalid - should not be used*/
+-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+-/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+-{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
+-/*ClocksStateLow*/
+-{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
+-/*ClocksStateNominal*/
+-{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
+-/*ClocksStatePerformance*/
+-{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };
+-
+-static const struct state_dependent_clocks dce112_max_clks_by_state[] = {
+-/*ClocksStateInvalid - should not be used*/
+-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+-/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+-{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
+-/*ClocksStateLow*/
+-{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
+-/*ClocksStateNominal*/
+-{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
+-/*ClocksStatePerformance*/
+-{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };
+-
+-static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
+-/*ClocksStateInvalid - should not be used*/
+-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+-/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+-/*ClocksStateLow*/
+-{ .display_clk_khz = 460000, .pixel_clk_khz = 400000 },
+-/*ClocksStateNominal*/
+-{ .display_clk_khz = 670000, .pixel_clk_khz = 600000 },
+-/*ClocksStatePerformance*/
+-{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
+-
+-static int dentist_get_divider_from_did(int did)
+-{
+- if (did < DENTIST_BASE_DID_1)
+- did = DENTIST_BASE_DID_1;
+- if (did > DENTIST_MAX_DID)
+- did = DENTIST_MAX_DID;
+-
+- if (did < DENTIST_BASE_DID_2) {
+- return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
+- * (did - DENTIST_BASE_DID_1);
+- } else if (did < DENTIST_BASE_DID_3) {
+- return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
+- * (did - DENTIST_BASE_DID_2);
+- } else {
+- return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
+- * (did - DENTIST_BASE_DID_3);
+- }
+-}
+-
+-/* SW will adjust DP REF Clock average value for all purposes
+- * (DP DTO / DP Audio DTO and DP GTC)
+- if clock is spread for all cases:
+- -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
+- calculations for DS_INCR/DS_MODULO (this is planned to be default case)
+- -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
+- calculations (not planned to be used, but average clock should still
+- be valid)
+- -if SS enabled on DP Ref clock and HW de-spreading disabled
+- (should not be case with CIK) then SW should program all rates
+- generated according to average value (case as with previous ASICs)
+- */
+-static int dccg_adjust_dp_ref_freq_for_ss(struct dce_dccg *dccg_dce, int dp_ref_clk_khz)
+-{
+- if (dccg_dce->ss_on_dprefclk && dccg_dce->dprefclk_ss_divider != 0) {
+- struct fixed31_32 ss_percentage = dc_fixpt_div_int(
+- dc_fixpt_from_fraction(dccg_dce->dprefclk_ss_percentage,
+- dccg_dce->dprefclk_ss_divider), 200);
+- struct fixed31_32 adj_dp_ref_clk_khz;
+-
+- ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
+- adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
+- dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
+- }
+- return dp_ref_clk_khz;
+-}
+-
+-static int dce_get_dp_ref_freq_khz(struct dccg *dccg)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+- int dprefclk_wdivider;
+- int dprefclk_src_sel;
+- int dp_ref_clk_khz = 600000;
+- int target_div;
+-
+- /* ASSERT DP Reference Clock source is from DFS*/
+- REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
+- ASSERT(dprefclk_src_sel == 0);
+-
+- /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
+- * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
+- REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
+-
+- /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
+- target_div = dentist_get_divider_from_did(dprefclk_wdivider);
+-
+- /* Calculate the current DFS clock, in kHz.*/
+- dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+- * dccg_dce->dentist_vco_freq_khz) / target_div;
+-
+- return dccg_adjust_dp_ref_freq_for_ss(dccg_dce, dp_ref_clk_khz);
+-}
+-
+-int dce12_get_dp_ref_freq_khz(struct dccg *dccg)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+-
+- return dccg_adjust_dp_ref_freq_for_ss(dccg_dce, dccg_dce->dprefclk_khz);
+-}
+-
+-/* unit: in_khz before mode set, get pixel clock from context. ASIC register
+- * may not be programmed yet
+- */
+-static uint32_t get_max_pixel_clock_for_all_paths(struct dc_state *context)
+-{
+- uint32_t max_pix_clk = 0;
+- int i;
+-
+- for (i = 0; i < MAX_PIPES; i++) {
+- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+-
+- if (pipe_ctx->stream == NULL)
+- continue;
+-
+- /* do not check under lay */
+- if (pipe_ctx->top_pipe)
+- continue;
+-
+- if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
+- max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
+-
+- /* raise clock state for HBR3/2 if required. Confirmed with HW DCE/DPCS
+- * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
+- */
+- if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
+- pipe_ctx->stream_res.pix_clk_params.requested_sym_clk > max_pix_clk)
+- max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_sym_clk;
+- }
+-
+- return max_pix_clk;
+-}
+-
+-static enum dm_pp_clocks_state dce_get_required_clocks_state(
+- struct dccg *dccg,
+- struct dc_state *context)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+- int i;
+- enum dm_pp_clocks_state low_req_clk;
+- int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
+-
+- /* Iterate from highest supported to lowest valid state, and update
+- * lowest RequiredState with the lowest state that satisfies
+- * all required clocks
+- */
+- for (i = dccg_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
+- if (context->bw.dce.dispclk_khz >
+- dccg_dce->max_clks_by_state[i].display_clk_khz
+- || max_pix_clk >
+- dccg_dce->max_clks_by_state[i].pixel_clk_khz)
+- break;
+-
+- low_req_clk = i + 1;
+- if (low_req_clk > dccg_dce->max_clks_state) {
+- /* set max clock state for high phyclock, invalid on exceeding display clock */
+- if (dccg_dce->max_clks_by_state[dccg_dce->max_clks_state].display_clk_khz
+- < context->bw.dce.dispclk_khz)
+- low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
+- else
+- low_req_clk = dccg_dce->max_clks_state;
+- }
+-
+- return low_req_clk;
+-}
+-
+-static int dce_set_clock(
+- struct dccg *dccg,
+- int requested_clk_khz)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+- struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
+- struct dc_bios *bp = dccg->ctx->dc_bios;
+- int actual_clock = requested_clk_khz;
+- struct dmcu *dmcu = dccg_dce->base.ctx->dc->res_pool->dmcu;
+-
+- /* Make sure requested clock isn't lower than minimum threshold*/
+- if (requested_clk_khz > 0)
+- requested_clk_khz = max(requested_clk_khz,
+- dccg_dce->dentist_vco_freq_khz / 64);
+-
+- /* Prepare to program display clock*/
+- pxl_clk_params.target_pixel_clock = requested_clk_khz;
+- pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+-
+- if (dccg_dce->dfs_bypass_active)
+- pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
+-
+- bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
+-
+- if (dccg_dce->dfs_bypass_active) {
+- /* Cache the fixed display clock*/
+- dccg_dce->dfs_bypass_disp_clk =
+- pxl_clk_params.dfs_bypass_display_clock;
+- actual_clock = pxl_clk_params.dfs_bypass_display_clock;
+- }
+-
+- /* from power down, we need mark the clock state as ClocksStateNominal
+- * from HWReset, so when resume we will call pplib voltage regulator.*/
+- if (requested_clk_khz == 0)
+- dccg_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+-
+- dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7);
+-
+- return actual_clock;
+-}
+-
+-int dce112_set_clock(struct dccg *dccg, int requested_clk_khz)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+- struct bp_set_dce_clock_parameters dce_clk_params;
+- struct dc_bios *bp = dccg->ctx->dc_bios;
+- struct dc *core_dc = dccg->ctx->dc;
+- struct dmcu *dmcu = core_dc->res_pool->dmcu;
+- int actual_clock = requested_clk_khz;
+- /* Prepare to program display clock*/
+- memset(&dce_clk_params, 0, sizeof(dce_clk_params));
+-
+- /* Make sure requested clock isn't lower than minimum threshold*/
+- if (requested_clk_khz > 0)
+- requested_clk_khz = max(requested_clk_khz,
+- dccg_dce->dentist_vco_freq_khz / 62);
+-
+- dce_clk_params.target_clock_frequency = requested_clk_khz;
+- dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+- dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
+-
+- bp->funcs->set_dce_clock(bp, &dce_clk_params);
+- actual_clock = dce_clk_params.target_clock_frequency;
+-
+- /* from power down, we need mark the clock state as ClocksStateNominal
+- * from HWReset, so when resume we will call pplib voltage regulator.*/
+- if (requested_clk_khz == 0)
+- dccg_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+-
+- /*Program DP ref Clock*/
+- /*VBIOS will determine DPREFCLK frequency, so we don't set it*/
+- dce_clk_params.target_clock_frequency = 0;
+- dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
+- if (!ASICREV_IS_VEGA20_P(dccg->ctx->asic_id.hw_internal_rev))
+- dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
+- (dce_clk_params.pll_id ==
+- CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
+- else
+- dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
+-
+- bp->funcs->set_dce_clock(bp, &dce_clk_params);
+-
+- if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+- if (dccg_dce->dfs_bypass_disp_clk != actual_clock)
+- dmcu->funcs->set_psr_wait_loop(dmcu,
+- actual_clock / 1000 / 7);
+- }
+-
+- dccg_dce->dfs_bypass_disp_clk = actual_clock;
+- return actual_clock;
+-}
+-
+-static void dce_clock_read_integrated_info(struct dce_dccg *dccg_dce)
+-{
+- struct dc_debug_options *debug = &dccg_dce->base.ctx->dc->debug;
+- struct dc_bios *bp = dccg_dce->base.ctx->dc_bios;
+- struct integrated_info info = { { { 0 } } };
+- struct dc_firmware_info fw_info = { { 0 } };
+- int i;
+-
+- if (bp->integrated_info)
+- info = *bp->integrated_info;
+-
+- dccg_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
+- if (dccg_dce->dentist_vco_freq_khz == 0) {
+- bp->funcs->get_firmware_info(bp, &fw_info);
+- dccg_dce->dentist_vco_freq_khz =
+- fw_info.smu_gpu_pll_output_freq;
+- if (dccg_dce->dentist_vco_freq_khz == 0)
+- dccg_dce->dentist_vco_freq_khz = 3600000;
+- }
+-
+- /*update the maximum display clock for each power state*/
+- for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+- enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID;
+-
+- switch (i) {
+- case 0:
+- clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW;
+- break;
+-
+- case 1:
+- clk_state = DM_PP_CLOCKS_STATE_LOW;
+- break;
+-
+- case 2:
+- clk_state = DM_PP_CLOCKS_STATE_NOMINAL;
+- break;
+-
+- case 3:
+- clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE;
+- break;
+-
+- default:
+- clk_state = DM_PP_CLOCKS_STATE_INVALID;
+- break;
+- }
+-
+- /*Do not allow bad VBIOS/SBIOS to override with invalid values,
+- * check for > 100MHz*/
+- if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
+- dccg_dce->max_clks_by_state[clk_state].display_clk_khz =
+- info.disp_clk_voltage[i].max_supported_clk;
+- }
+-
+- if (!debug->disable_dfs_bypass && bp->integrated_info)
+- if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+- dccg_dce->dfs_bypass_enabled = true;
+-}
+-
+-void dce_clock_read_ss_info(struct dce_dccg *dccg_dce)
+-{
+- struct dc_bios *bp = dccg_dce->base.ctx->dc_bios;
+- int ss_info_num = bp->funcs->get_ss_entry_number(
+- bp, AS_SIGNAL_TYPE_GPU_PLL);
+-
+- if (ss_info_num) {
+- struct spread_spectrum_info info = { { 0 } };
+- enum bp_result result = bp->funcs->get_spread_spectrum_info(
+- bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);
+-
+- /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
+- * even if SS not enabled and in that case
+- * SSInfo.spreadSpectrumPercentage !=0 would be sign
+- * that SS is enabled
+- */
+- if (result == BP_RESULT_OK &&
+- info.spread_spectrum_percentage != 0) {
+- dccg_dce->ss_on_dprefclk = true;
+- dccg_dce->dprefclk_ss_divider = info.spread_percentage_divider;
+-
+- if (info.type.CENTER_MODE == 0) {
+- /* TODO: Currently for DP Reference clock we
+- * need only SS percentage for
+- * downspread */
+- dccg_dce->dprefclk_ss_percentage =
+- info.spread_spectrum_percentage;
+- }
+-
+- return;
+- }
+-
+- result = bp->funcs->get_spread_spectrum_info(
+- bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info);
+-
+- /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS
+- * even if SS not enabled and in that case
+- * SSInfo.spreadSpectrumPercentage !=0 would be sign
+- * that SS is enabled
+- */
+- if (result == BP_RESULT_OK &&
+- info.spread_spectrum_percentage != 0) {
+- dccg_dce->ss_on_dprefclk = true;
+- dccg_dce->dprefclk_ss_divider = info.spread_percentage_divider;
+-
+- if (info.type.CENTER_MODE == 0) {
+- /* Currently for DP Reference clock we
+- * need only SS percentage for
+- * downspread */
+- dccg_dce->dprefclk_ss_percentage =
+- info.spread_spectrum_percentage;
+- }
+- }
+- }
+-}
+-
+-void dce110_fill_display_configs(
+- const struct dc_state *context,
+- struct dm_pp_display_configuration *pp_display_cfg)
+-{
+- int j;
+- int num_cfgs = 0;
+-
+- for (j = 0; j < context->stream_count; j++) {
+- int k;
+-
+- const struct dc_stream_state *stream = context->streams[j];
+- struct dm_pp_single_disp_config *cfg =
+- &pp_display_cfg->disp_configs[num_cfgs];
+- const struct pipe_ctx *pipe_ctx = NULL;
+-
+- for (k = 0; k < MAX_PIPES; k++)
+- if (stream == context->res_ctx.pipe_ctx[k].stream) {
+- pipe_ctx = &context->res_ctx.pipe_ctx[k];
+- break;
+- }
+-
+- ASSERT(pipe_ctx != NULL);
+-
+- /* only notify active stream */
+- if (stream->dpms_off)
+- continue;
+-
+- num_cfgs++;
+- cfg->signal = pipe_ctx->stream->signal;
+- cfg->pipe_idx = pipe_ctx->stream_res.tg->inst;
+- cfg->src_height = stream->src.height;
+- cfg->src_width = stream->src.width;
+- cfg->ddi_channel_mapping =
+- stream->sink->link->ddi_channel_mapping.raw;
+- cfg->transmitter =
+- stream->sink->link->link_enc->transmitter;
+- cfg->link_settings.lane_count =
+- stream->sink->link->cur_link_settings.lane_count;
+- cfg->link_settings.link_rate =
+- stream->sink->link->cur_link_settings.link_rate;
+- cfg->link_settings.link_spread =
+- stream->sink->link->cur_link_settings.link_spread;
+- cfg->sym_clock = stream->phy_pix_clk;
+- /* Round v_refresh*/
+- cfg->v_refresh = stream->timing.pix_clk_khz * 1000;
+- cfg->v_refresh /= stream->timing.h_total;
+- cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
+- / stream->timing.v_total;
+- }
+-
+- pp_display_cfg->display_count = num_cfgs;
+-}
+-
+-static uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
+-{
+- uint8_t j;
+- uint32_t min_vertical_blank_time = -1;
+-
+- for (j = 0; j < context->stream_count; j++) {
+- struct dc_stream_state *stream = context->streams[j];
+- uint32_t vertical_blank_in_pixels = 0;
+- uint32_t vertical_blank_time = 0;
+-
+- vertical_blank_in_pixels = stream->timing.h_total *
+- (stream->timing.v_total
+- - stream->timing.v_addressable);
+-
+- vertical_blank_time = vertical_blank_in_pixels
+- * 1000 / stream->timing.pix_clk_khz;
+-
+- if (min_vertical_blank_time > vertical_blank_time)
+- min_vertical_blank_time = vertical_blank_time;
+- }
+-
+- return min_vertical_blank_time;
+-}
+-
+-static int determine_sclk_from_bounding_box(
+- const struct dc *dc,
+- int required_sclk)
+-{
+- int i;
+-
+- /*
+- * Some asics do not give us sclk levels, so we just report the actual
+- * required sclk
+- */
+- if (dc->sclk_lvls.num_levels == 0)
+- return required_sclk;
+-
+- for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
+- if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
+- return dc->sclk_lvls.clocks_in_khz[i];
+- }
+- /*
+- * even maximum level could not satisfy requirement, this
+- * is unexpected at this stage, should have been caught at
+- * validation time
+- */
+- ASSERT(0);
+- return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
+-}
+-
+-static void dce_pplib_apply_display_requirements(
+- struct dc *dc,
+- struct dc_state *context)
+-{
+- struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+-
+- pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
+-
+- dce110_fill_display_configs(context, pp_display_cfg);
+-
+- if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
+- dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+-}
+-
+-static void dce11_pplib_apply_display_requirements(
+- struct dc *dc,
+- struct dc_state *context)
+-{
+- struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+-
+- pp_display_cfg->all_displays_in_sync =
+- context->bw.dce.all_displays_in_sync;
+- pp_display_cfg->nb_pstate_switch_disable =
+- context->bw.dce.nbp_state_change_enable == false;
+- pp_display_cfg->cpu_cc6_disable =
+- context->bw.dce.cpuc_state_change_enable == false;
+- pp_display_cfg->cpu_pstate_disable =
+- context->bw.dce.cpup_state_change_enable == false;
+- pp_display_cfg->cpu_pstate_separation_time =
+- context->bw.dce.blackout_recovery_time_us;
+-
+- pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
+- / MEMORY_TYPE_MULTIPLIER_CZ;
+-
+- pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
+- dc,
+- context->bw.dce.sclk_khz);
+-
+- pp_display_cfg->min_engine_clock_deep_sleep_khz
+- = context->bw.dce.sclk_deep_sleep_khz;
+-
+- pp_display_cfg->avail_mclk_switch_time_us =
+- dce110_get_min_vblank_time_us(context);
+- /* TODO: dce11.2*/
+- pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
+-
+- pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
+-
+- dce110_fill_display_configs(context, pp_display_cfg);
+-
+- /* TODO: is this still applicable?*/
+- if (pp_display_cfg->display_count == 1) {
+- const struct dc_crtc_timing *timing =
+- &context->streams[0]->timing;
+-
+- pp_display_cfg->crtc_index =
+- pp_display_cfg->disp_configs[0].pipe_idx;
+- pp_display_cfg->line_time_in_us = timing->h_total * 1000 / timing->pix_clk_khz;
+- }
+-
+- if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
+- dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+-}
+-
+-static void dce_update_clocks(struct dccg *dccg,
+- struct dc_state *context,
+- bool safe_to_lower)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+- struct dm_pp_power_level_change_request level_change_req;
+- int unpatched_disp_clk = context->bw.dce.dispclk_khz;
+-
+- /*TODO: W/A for dal3 linux, investigate why this works */
+- if (!dccg_dce->dfs_bypass_active)
+- context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+-
+- level_change_req.power_level = dce_get_required_clocks_state(dccg, context);
+- /* get max clock state from PPLIB */
+- if ((level_change_req.power_level < dccg_dce->cur_min_clks_state && safe_to_lower)
+- || level_change_req.power_level > dccg_dce->cur_min_clks_state) {
+- if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
+- dccg_dce->cur_min_clks_state = level_change_req.power_level;
+- }
+-
+- if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, dccg->clks.dispclk_khz)) {
+- context->bw.dce.dispclk_khz = dce_set_clock(dccg, context->bw.dce.dispclk_khz);
+- dccg->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+- }
+- dce_pplib_apply_display_requirements(dccg->ctx->dc, context);
+-
+- context->bw.dce.dispclk_khz = unpatched_disp_clk;
+-}
+-
+-static void dce11_update_clocks(struct dccg *dccg,
+- struct dc_state *context,
+- bool safe_to_lower)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+- struct dm_pp_power_level_change_request level_change_req;
+-
+- level_change_req.power_level = dce_get_required_clocks_state(dccg, context);
+- /* get max clock state from PPLIB */
+- if ((level_change_req.power_level < dccg_dce->cur_min_clks_state && safe_to_lower)
+- || level_change_req.power_level > dccg_dce->cur_min_clks_state) {
+- if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
+- dccg_dce->cur_min_clks_state = level_change_req.power_level;
+- }
+-
+- if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, dccg->clks.dispclk_khz)) {
+- context->bw.dce.dispclk_khz = dce_set_clock(dccg, context->bw.dce.dispclk_khz);
+- dccg->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+- }
+- dce11_pplib_apply_display_requirements(dccg->ctx->dc, context);
+-}
+-
+-static void dce112_update_clocks(struct dccg *dccg,
+- struct dc_state *context,
+- bool safe_to_lower)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+- struct dm_pp_power_level_change_request level_change_req;
+-
+- level_change_req.power_level = dce_get_required_clocks_state(dccg, context);
+- /* get max clock state from PPLIB */
+- if ((level_change_req.power_level < dccg_dce->cur_min_clks_state && safe_to_lower)
+- || level_change_req.power_level > dccg_dce->cur_min_clks_state) {
+- if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
+- dccg_dce->cur_min_clks_state = level_change_req.power_level;
+- }
+-
+- if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, dccg->clks.dispclk_khz)) {
+- context->bw.dce.dispclk_khz = dce112_set_clock(dccg, context->bw.dce.dispclk_khz);
+- dccg->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+- }
+- dce11_pplib_apply_display_requirements(dccg->ctx->dc, context);
+-}
+-
+-static void dce12_update_clocks(struct dccg *dccg,
+- struct dc_state *context,
+- bool safe_to_lower)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+- struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+- int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
+- int unpatched_disp_clk = context->bw.dce.dispclk_khz;
+-
+- /*TODO: W/A for dal3 linux, investigate why this works */
+- if (!dccg_dce->dfs_bypass_active)
+- context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+-
+- if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, dccg->clks.dispclk_khz)) {
+- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
+- clock_voltage_req.clocks_in_khz = context->bw.dce.dispclk_khz;
+- context->bw.dce.dispclk_khz = dce112_set_clock(dccg, context->bw.dce.dispclk_khz);
+- dccg->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+-
+- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+- }
+-
+- if (should_set_clock(safe_to_lower, max_pix_clk, dccg->clks.phyclk_khz)) {
+- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
+- clock_voltage_req.clocks_in_khz = max_pix_clk;
+- dccg->clks.phyclk_khz = max_pix_clk;
+-
+- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+- }
+- dce11_pplib_apply_display_requirements(dccg->ctx->dc, context);
+-
+- context->bw.dce.dispclk_khz = unpatched_disp_clk;
+-}
+-
+-static const struct dccg_funcs dce120_funcs = {
+- .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+- .update_clocks = dce12_update_clocks
+-};
+-
+-static const struct dccg_funcs dce112_funcs = {
+- .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+- .update_clocks = dce112_update_clocks
+-};
+-
+-static const struct dccg_funcs dce110_funcs = {
+- .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+- .update_clocks = dce11_update_clocks,
+-};
+-
+-static const struct dccg_funcs dce_funcs = {
+- .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+- .update_clocks = dce_update_clocks
+-};
+-
+-static void dce_dccg_construct(
+- struct dce_dccg *dccg_dce,
+- struct dc_context *ctx,
+- const struct dccg_registers *regs,
+- const struct dccg_shift *clk_shift,
+- const struct dccg_mask *clk_mask)
+-{
+- struct dccg *base = &dccg_dce->base;
+- struct dm_pp_static_clock_info static_clk_info = {0};
+-
+- base->ctx = ctx;
+- base->funcs = &dce_funcs;
+-
+- dccg_dce->regs = regs;
+- dccg_dce->dccg_shift = clk_shift;
+- dccg_dce->dccg_mask = clk_mask;
+-
+- dccg_dce->dfs_bypass_disp_clk = 0;
+-
+- dccg_dce->dprefclk_ss_percentage = 0;
+- dccg_dce->dprefclk_ss_divider = 1000;
+- dccg_dce->ss_on_dprefclk = false;
+-
+-
+- if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+- dccg_dce->max_clks_state = static_clk_info.max_clocks_state;
+- else
+- dccg_dce->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+- dccg_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
+-
+- dce_clock_read_integrated_info(dccg_dce);
+- dce_clock_read_ss_info(dccg_dce);
+-}
+-
+-struct dccg *dce_dccg_create(
+- struct dc_context *ctx,
+- const struct dccg_registers *regs,
+- const struct dccg_shift *clk_shift,
+- const struct dccg_mask *clk_mask)
+-{
+- struct dce_dccg *dccg_dce = kzalloc(sizeof(*dccg_dce), GFP_KERNEL);
+-
+- if (dccg_dce == NULL) {
+- BREAK_TO_DEBUGGER();
+- return NULL;
+- }
+-
+- memcpy(dccg_dce->max_clks_by_state,
+- dce80_max_clks_by_state,
+- sizeof(dce80_max_clks_by_state));
+-
+- dce_dccg_construct(
+- dccg_dce, ctx, regs, clk_shift, clk_mask);
+-
+- return &dccg_dce->base;
+-}
+-
+-struct dccg *dce110_dccg_create(
+- struct dc_context *ctx,
+- const struct dccg_registers *regs,
+- const struct dccg_shift *clk_shift,
+- const struct dccg_mask *clk_mask)
+-{
+- struct dce_dccg *dccg_dce = kzalloc(sizeof(*dccg_dce), GFP_KERNEL);
+-
+- if (dccg_dce == NULL) {
+- BREAK_TO_DEBUGGER();
+- return NULL;
+- }
+-
+- memcpy(dccg_dce->max_clks_by_state,
+- dce110_max_clks_by_state,
+- sizeof(dce110_max_clks_by_state));
+-
+- dce_dccg_construct(
+- dccg_dce, ctx, regs, clk_shift, clk_mask);
+-
+- dccg_dce->base.funcs = &dce110_funcs;
+-
+- return &dccg_dce->base;
+-}
+-
+-struct dccg *dce112_dccg_create(
+- struct dc_context *ctx,
+- const struct dccg_registers *regs,
+- const struct dccg_shift *clk_shift,
+- const struct dccg_mask *clk_mask)
+-{
+- struct dce_dccg *dccg_dce = kzalloc(sizeof(*dccg_dce), GFP_KERNEL);
+-
+- if (dccg_dce == NULL) {
+- BREAK_TO_DEBUGGER();
+- return NULL;
+- }
+-
+- memcpy(dccg_dce->max_clks_by_state,
+- dce112_max_clks_by_state,
+- sizeof(dce112_max_clks_by_state));
+-
+- dce_dccg_construct(
+- dccg_dce, ctx, regs, clk_shift, clk_mask);
+-
+- dccg_dce->base.funcs = &dce112_funcs;
+-
+- return &dccg_dce->base;
+-}
+-
+-struct dccg *dce120_dccg_create(struct dc_context *ctx)
+-{
+- struct dce_dccg *dccg_dce = kzalloc(sizeof(*dccg_dce), GFP_KERNEL);
+-
+- if (dccg_dce == NULL) {
+- BREAK_TO_DEBUGGER();
+- return NULL;
+- }
+-
+- memcpy(dccg_dce->max_clks_by_state,
+- dce120_max_clks_by_state,
+- sizeof(dce120_max_clks_by_state));
+-
+- dce_dccg_construct(
+- dccg_dce, ctx, NULL, NULL, NULL);
+-
+- dccg_dce->dprefclk_khz = 600000;
+- dccg_dce->base.funcs = &dce120_funcs;
+-
+- return &dccg_dce->base;
+-}
+-
+-void dce_dccg_destroy(struct dccg **dccg)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(*dccg);
+-
+- kfree(dccg_dce);
+- *dccg = NULL;
+-}
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dccg.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dccg.h
+deleted file mode 100644
+index 786d963..0000000
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dccg.h
++++ /dev/null
+@@ -1,165 +0,0 @@
+-/*
+- * Copyright 2012-16 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- * Authors: AMD
+- *
+- */
+-
+-
+-#ifndef _DCE_DCCG_H_
+-#define _DCE_DCCG_H_
+-
+-#include "dccg.h"
+-
+-#define MEMORY_TYPE_MULTIPLIER_CZ 4
+-
+-#define CLK_COMMON_REG_LIST_DCE_BASE() \
+- .DPREFCLK_CNTL = mmDPREFCLK_CNTL, \
+- .DENTIST_DISPCLK_CNTL = mmDENTIST_DISPCLK_CNTL
+-
+-#define CLK_COMMON_REG_LIST_DCN_BASE() \
+- SR(DENTIST_DISPCLK_CNTL)
+-
+-#define CLK_SF(reg_name, field_name, post_fix)\
+- .field_name = reg_name ## __ ## field_name ## post_fix
+-
+-#define CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
+- CLK_SF(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, mask_sh), \
+- CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, mask_sh)
+-
+-#define CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh) \
+- CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh),\
+- CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh)
+-
+-#define CLK_REG_FIELD_LIST(type) \
+- type DPREFCLK_SRC_SEL; \
+- type DENTIST_DPREFCLK_WDIVIDER; \
+- type DENTIST_DISPCLK_WDIVIDER; \
+- type DENTIST_DISPCLK_CHG_DONE;
+-
+-struct dccg_shift {
+- CLK_REG_FIELD_LIST(uint8_t)
+-};
+-
+-struct dccg_mask {
+- CLK_REG_FIELD_LIST(uint32_t)
+-};
+-
+-struct dccg_registers {
+- uint32_t DPREFCLK_CNTL;
+- uint32_t DENTIST_DISPCLK_CNTL;
+-};
+-
+-struct state_dependent_clocks {
+- int display_clk_khz;
+- int pixel_clk_khz;
+-};
+-
+-struct dce_dccg {
+- struct dccg base;
+- const struct dccg_registers *regs;
+- const struct dccg_shift *dccg_shift;
+- const struct dccg_mask *dccg_mask;
+-
+- struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
+-
+- int dentist_vco_freq_khz;
+-
+- /* Cache the status of DFS-bypass feature*/
+- bool dfs_bypass_enabled;
+- /* True if the DFS-bypass feature is enabled and active. */
+- bool dfs_bypass_active;
+- /* Cache the display clock returned by VBIOS if DFS-bypass is enabled.
+- * This is basically "Crystal Frequency In KHz" (XTALIN) frequency */
+- int dfs_bypass_disp_clk;
+-
+- /* Flag for Enabled SS on DPREFCLK */
+- bool ss_on_dprefclk;
+- /* DPREFCLK SS percentage (if down-spread enabled) */
+- int dprefclk_ss_percentage;
+- /* DPREFCLK SS percentage Divider (100 or 1000) */
+- int dprefclk_ss_divider;
+- int dprefclk_khz;
+-
+- enum dm_pp_clocks_state max_clks_state;
+- enum dm_pp_clocks_state cur_min_clks_state;
+-};
+-
+-/* Starting DID for each range */
+-enum dentist_base_divider_id {
+- DENTIST_BASE_DID_1 = 0x08,
+- DENTIST_BASE_DID_2 = 0x40,
+- DENTIST_BASE_DID_3 = 0x60,
+- DENTIST_BASE_DID_4 = 0x7e,
+- DENTIST_MAX_DID = 0x7f
+-};
+-
+-/* Starting point and step size for each divider range.*/
+-enum dentist_divider_range {
+- DENTIST_DIVIDER_RANGE_1_START = 8, /* 2.00 */
+- DENTIST_DIVIDER_RANGE_1_STEP = 1, /* 0.25 */
+- DENTIST_DIVIDER_RANGE_2_START = 64, /* 16.00 */
+- DENTIST_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */
+- DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
+- DENTIST_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */
+- DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */
+- DENTIST_DIVIDER_RANGE_4_STEP = 264, /* 66.00 */
+- DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
+-};
+-
+-static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
+-{
+- return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
+-}
+-
+-void dce_clock_read_ss_info(struct dce_dccg *dccg_dce);
+-
+-int dce12_get_dp_ref_freq_khz(struct dccg *dccg);
+-
+-void dce110_fill_display_configs(
+- const struct dc_state *context,
+- struct dm_pp_display_configuration *pp_display_cfg);
+-
+-int dce112_set_clock(struct dccg *dccg, int requested_clk_khz);
+-
+-struct dccg *dce_dccg_create(
+- struct dc_context *ctx,
+- const struct dccg_registers *regs,
+- const struct dccg_shift *clk_shift,
+- const struct dccg_mask *clk_mask);
+-
+-struct dccg *dce110_dccg_create(
+- struct dc_context *ctx,
+- const struct dccg_registers *regs,
+- const struct dccg_shift *clk_shift,
+- const struct dccg_mask *clk_mask);
+-
+-struct dccg *dce112_dccg_create(
+- struct dc_context *ctx,
+- const struct dccg_registers *regs,
+- const struct dccg_shift *clk_shift,
+- const struct dccg_mask *clk_mask);
+-
+-struct dccg *dce120_dccg_create(struct dc_context *ctx);
+-
+-void dce_dccg_destroy(struct dccg **dccg);
+-
+-#endif /* _DCE_DCCG_H_ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+index 5055026..bc50a8e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+@@ -111,8 +111,8 @@ void dce100_prepare_bandwidth(
+ {
+ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+
+- dc->res_pool->dccg->funcs->update_clocks(
+- dc->res_pool->dccg,
++ dc->res_pool->clk_mgr->funcs->update_clocks(
++ dc->res_pool->clk_mgr,
+ context,
+ false);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+index 36015f7..6ae51a5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+@@ -22,7 +22,6 @@
+ * Authors: AMD
+ *
+ */
+-#include "../dce/dce_dccg.h"
+ #include "dm_services.h"
+
+ #include "link_encoder.h"
+@@ -37,6 +36,7 @@
+ #include "dce/dce_link_encoder.h"
+ #include "dce/dce_stream_encoder.h"
+
++#include "dce/dce_clk_mgr.h"
+ #include "dce/dce_mem_input.h"
+ #include "dce/dce_ipp.h"
+ #include "dce/dce_transform.h"
+@@ -137,15 +137,15 @@ static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = {
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+
+-static const struct dccg_registers disp_clk_regs = {
++static const struct clk_mgr_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+ };
+
+-static const struct dccg_shift disp_clk_shift = {
++static const struct clk_mgr_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+ };
+
+-static const struct dccg_mask disp_clk_mask = {
++static const struct clk_mgr_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+ };
+
+@@ -722,8 +722,8 @@ static void destruct(struct dce110_resource_pool *pool)
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+
+- if (pool->base.dccg != NULL)
+- dce_dccg_destroy(&pool->base.dccg);
++ if (pool->base.clk_mgr != NULL)
++ dce_clk_mgr_destroy(&pool->base.clk_mgr);
+
+ if (pool->base.abm != NULL)
+ dce_abm_destroy(&pool->base.abm);
+@@ -907,11 +907,11 @@ static bool construct(
+ }
+ }
+
+- pool->base.dccg = dce_dccg_create(ctx,
++ pool->base.clk_mgr = dce_clk_mgr_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+- if (pool->base.dccg == NULL) {
++ if (pool->base.clk_mgr == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index e58a34e..d8b0533 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -2354,7 +2354,7 @@ void dce110_prepare_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+ {
+- struct dccg *dccg = dc->res_pool->dccg;
++ struct clk_mgr *dccg = dc->res_pool->clk_mgr;
+
+ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+
+@@ -2368,7 +2368,7 @@ void dce110_optimize_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+ {
+- struct dccg *dccg = dc->res_pool->dccg;
++ struct clk_mgr *dccg = dc->res_pool->clk_mgr;
+
+ dce110_set_displaymarks(dc, context);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+index d78b064..c5714eb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+@@ -31,7 +31,7 @@
+ #include "resource.h"
+ #include "dce110/dce110_resource.h"
+
+-#include "../dce/dce_dccg.h"
++#include "dce/dce_clk_mgr.h"
+ #include "include/irq_service_interface.h"
+ #include "dce/dce_audio.h"
+ #include "dce110/dce110_timing_generator.h"
+@@ -148,15 +148,15 @@ static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = {
+ #define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+-static const struct dccg_registers disp_clk_regs = {
++static const struct clk_mgr_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+ };
+
+-static const struct dccg_shift disp_clk_shift = {
++static const struct clk_mgr_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+ };
+
+-static const struct dccg_mask disp_clk_mask = {
++static const struct clk_mgr_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+ };
+
+@@ -760,8 +760,8 @@ static void destruct(struct dce110_resource_pool *pool)
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+- if (pool->base.dccg != NULL)
+- dce_dccg_destroy(&pool->base.dccg);
++ if (pool->base.clk_mgr != NULL)
++ dce_clk_mgr_destroy(&pool->base.clk_mgr);
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+@@ -1256,11 +1256,11 @@ static bool construct(
+ }
+ }
+
+- pool->base.dccg = dce110_dccg_create(ctx,
++ pool->base.clk_mgr = dce110_clk_mgr_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+- if (pool->base.dccg == NULL) {
++ if (pool->base.clk_mgr == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+index e73b139..969d4e7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+@@ -23,7 +23,6 @@
+ *
+ */
+
+-#include "../dce/dce_dccg.h"
+ #include "dm_services.h"
+
+ #include "link_encoder.h"
+@@ -36,6 +35,7 @@
+
+ #include "irq/dce110/irq_service_dce110.h"
+
++#include "dce/dce_clk_mgr.h"
+ #include "dce/dce_mem_input.h"
+ #include "dce/dce_transform.h"
+ #include "dce/dce_link_encoder.h"
+@@ -148,15 +148,15 @@ static const struct dce110_timing_generator_offsets dce112_tg_offsets[] = {
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+
+-static const struct dccg_registers disp_clk_regs = {
++static const struct clk_mgr_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+ };
+
+-static const struct dccg_shift disp_clk_shift = {
++static const struct clk_mgr_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+ };
+
+-static const struct dccg_mask disp_clk_mask = {
++static const struct clk_mgr_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+ };
+
+@@ -750,8 +750,8 @@ static void destruct(struct dce110_resource_pool *pool)
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+- if (pool->base.dccg != NULL)
+- dce_dccg_destroy(&pool->base.dccg);
++ if (pool->base.clk_mgr != NULL)
++ dce_clk_mgr_destroy(&pool->base.clk_mgr);
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+@@ -1199,11 +1199,11 @@ static bool construct(
+ }
+ }
+
+- pool->base.dccg = dce112_dccg_create(ctx,
++ pool->base.clk_mgr = dce112_clk_mgr_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+- if (pool->base.dccg == NULL) {
++ if (pool->base.clk_mgr == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+index a69e89f..f126966 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+@@ -32,7 +32,6 @@
+ #include "include/irq_service_interface.h"
+ #include "dce120_resource.h"
+
+-#include "../dce/dce_dccg.h"
+ #include "dce112/dce112_resource.h"
+
+ #include "dce110/dce110_resource.h"
+@@ -48,6 +47,7 @@
+ #include "dce120/dce120_hw_sequencer.h"
+ #include "dce/dce_transform.h"
+
++#include "dce/dce_clk_mgr.h"
+ #include "dce/dce_audio.h"
+ #include "dce/dce_link_encoder.h"
+ #include "dce/dce_stream_encoder.h"
+@@ -574,8 +574,8 @@ static void destruct(struct dce110_resource_pool *pool)
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+- if (pool->base.dccg != NULL)
+- dce_dccg_destroy(&pool->base.dccg);
++ if (pool->base.clk_mgr != NULL)
++ dce_clk_mgr_destroy(&pool->base.clk_mgr);
+ }
+
+ static void read_dce_straps(
+@@ -975,8 +975,8 @@ static bool construct(
+ }
+ }
+
+- pool->base.dccg = dce120_dccg_create(ctx);
+- if (pool->base.dccg == NULL) {
++ pool->base.clk_mgr = dce120_clk_mgr_create(ctx);
++ if (pool->base.clk_mgr == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto dccg_create_fail;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+index 1fccb52..6d40b3d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+@@ -23,7 +23,6 @@
+ *
+ */
+
+-#include "../dce/dce_dccg.h"
+ #include "dce/dce_8_0_d.h"
+ #include "dce/dce_8_0_sh_mask.h"
+
+@@ -38,6 +37,7 @@
+ #include "dce110/dce110_timing_generator.h"
+ #include "dce110/dce110_resource.h"
+ #include "dce80/dce80_timing_generator.h"
++#include "dce/dce_clk_mgr.h"
+ #include "dce/dce_mem_input.h"
+ #include "dce/dce_link_encoder.h"
+ #include "dce/dce_stream_encoder.h"
+@@ -155,15 +155,15 @@ static const struct dce110_timing_generator_offsets dce80_tg_offsets[] = {
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+
+-static const struct dccg_registers disp_clk_regs = {
++static const struct clk_mgr_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+ };
+
+-static const struct dccg_shift disp_clk_shift = {
++static const struct clk_mgr_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+ };
+
+-static const struct dccg_mask disp_clk_mask = {
++static const struct clk_mgr_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+ };
+
+@@ -779,8 +779,8 @@ static void destruct(struct dce110_resource_pool *pool)
+ }
+ }
+
+- if (pool->base.dccg != NULL)
+- dce_dccg_destroy(&pool->base.dccg);
++ if (pool->base.clk_mgr != NULL)
++ dce_clk_mgr_destroy(&pool->base.clk_mgr);
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+@@ -917,11 +917,11 @@ static bool dce80_construct(
+ }
+ }
+
+- pool->base.dccg = dce_dccg_create(ctx,
++ pool->base.clk_mgr = dce_clk_mgr_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+- if (pool->base.dccg == NULL) {
++ if (pool->base.clk_mgr == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+@@ -1122,11 +1122,11 @@ static bool dce81_construct(
+ }
+ }
+
+- pool->base.dccg = dce_dccg_create(ctx,
++ pool->base.clk_mgr = dce_clk_mgr_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+- if (pool->base.dccg == NULL) {
++ if (pool->base.clk_mgr == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+@@ -1323,11 +1323,11 @@ static bool dce83_construct(
+ }
+ }
+
+- pool->base.dccg = dce_dccg_create(ctx,
++ pool->base.clk_mgr = dce_clk_mgr_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+- if (pool->base.dccg == NULL) {
++ if (pool->base.clk_mgr == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+index e13ab66..55f293c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+@@ -24,7 +24,7 @@
+
+ DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o dcn10_hw_sequencer_debug.o \
+ dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
+- dcn10_hubp.o dcn10_mpc.o dcn10_dccg.o \
++ dcn10_hubp.o dcn10_mpc.o dcn10_clk_mgr.o \
+ dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
+ dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
+new file mode 100644
+index 0000000..6f329d1
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
+@@ -0,0 +1,360 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#include "dcn10_clk_mgr.h"
++
++#include "reg_helper.h"
++#include "core_types.h"
++
++#define TO_DCE_CLK_MGR(clocks)\
++ container_of(clocks, struct dce_clk_mgr, base)
++
++#define REG(reg) \
++ (clk_mgr_dce->regs->reg)
++
++#undef FN
++#define FN(reg_name, field_name) \
++ clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name
++
++#define CTX \
++ clk_mgr_dce->base.ctx
++#define DC_LOGGER \
++ clk_mgr->ctx->logger
++
++void dcn1_pplib_apply_display_requirements(
++ struct dc *dc,
++ struct dc_state *context)
++{
++ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
++
++ pp_display_cfg->min_engine_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz;
++ pp_display_cfg->min_memory_clock_khz = dc->res_pool->clk_mgr->clks.fclk_khz;
++ pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz;
++ pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz;
++ pp_display_cfg->min_dcfclock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz;
++ pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz;
++ dce110_fill_display_configs(context, pp_display_cfg);
++
++ if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
++ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
++}
++
++static int dcn1_determine_dppclk_threshold(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks)
++{
++ bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
++ bool dispclk_increase = new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz;
++ int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
++ bool cur_dpp_div = clk_mgr->clks.dispclk_khz > clk_mgr->clks.dppclk_khz;
++
++ /* increase clock, looking for div is 0 for current, request div is 1*/
++ if (dispclk_increase) {
++ /* already divided by 2, no need to reach target clk with 2 steps*/
++ if (cur_dpp_div)
++ return new_clocks->dispclk_khz;
++
++ /* request disp clk is lower than maximum supported dpp clk,
++ * no need to reach target clk with two steps.
++ */
++ if (new_clocks->dispclk_khz <= disp_clk_threshold)
++ return new_clocks->dispclk_khz;
++
++ /* target dpp clk not request divided by 2, still within threshold */
++ if (!request_dpp_div)
++ return new_clocks->dispclk_khz;
++
++ } else {
++ /* decrease clock, looking for current dppclk divided by 2,
++ * request dppclk not divided by 2.
++ */
++
++ /* current dpp clk not divided by 2, no need to ramp*/
++ if (!cur_dpp_div)
++ return new_clocks->dispclk_khz;
++
++ /* current disp clk is lower than current maximum dpp clk,
++ * no need to ramp
++ */
++ if (clk_mgr->clks.dispclk_khz <= disp_clk_threshold)
++ return new_clocks->dispclk_khz;
++
++ /* request dpp clk need to be divided by 2 */
++ if (request_dpp_div)
++ return new_clocks->dispclk_khz;
++ }
++
++ return disp_clk_threshold;
++}
++
++static void dcn1_ramp_up_dispclk_with_dpp(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks)
++{
++ struct dc *dc = clk_mgr->ctx->dc;
++ int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(clk_mgr, new_clocks);
++ bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
++ int i;
++
++ /* set disp clk to dpp clk threshold */
++ dce112_set_clock(clk_mgr, dispclk_to_dpp_threshold);
++
++ /* update request dpp clk division option */
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
++
++ if (!pipe_ctx->plane_state)
++ continue;
++
++ pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
++ pipe_ctx->plane_res.dpp,
++ request_dpp_div,
++ true);
++ }
++
++ /* If target clk not same as dppclk threshold, set to target clock */
++ if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz)
++ dce112_set_clock(clk_mgr, new_clocks->dispclk_khz);
++
++ clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
++ clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz;
++ clk_mgr->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
++}
++
++static int get_active_display_cnt(
++ struct dc *dc,
++ struct dc_state *context)
++{
++ int i, display_count;
++
++ display_count = 0;
++ for (i = 0; i < context->stream_count; i++) {
++ const struct dc_stream_state *stream = context->streams[i];
++
++ /*
++ * Only notify active stream or virtual stream.
++ * Need to notify virtual stream to work around
++ * headless case. HPD does not fire when system is in
++ * S0i2.
++ */
++ if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL)
++ display_count++;
++ }
++
++ return display_count;
++}
++
++static void notify_deep_sleep_dcfclk_to_smu(
++ struct pp_smu_funcs_rv *pp_smu, int min_dcef_deep_sleep_clk_khz)
++{
++ int min_dcef_deep_sleep_clk_mhz; //minimum required DCEF Deep Sleep clock in mhz
++ /*
++ * if function pointer not set up, this message is
++ * sent as part of pplib_apply_display_requirements.
++ * So just return.
++ */
++ if (!pp_smu || !pp_smu->set_min_deep_sleep_dcfclk)
++ return;
++
++ min_dcef_deep_sleep_clk_mhz = (min_dcef_deep_sleep_clk_khz + 999) / 1000; //Round up
++ pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, min_dcef_deep_sleep_clk_mhz);
++}
++
++static void notify_hard_min_dcfclk_to_smu(
++ struct pp_smu_funcs_rv *pp_smu, int min_dcf_clk_khz)
++{
++ int min_dcf_clk_mhz; //minimum required DCF clock in mhz
++
++ /*
++ * if function pointer not set up, this message is
++ * sent as part of pplib_apply_display_requirements.
++ * So just return.
++ */
++ if (!pp_smu || !pp_smu->set_hard_min_dcfclk_by_freq)
++ return;
++
++ min_dcf_clk_mhz = min_dcf_clk_khz / 1000;
++
++ pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, min_dcf_clk_mhz);
++}
++
++static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
++ struct dc_state *context,
++ bool safe_to_lower)
++{
++ struct dc *dc = clk_mgr->ctx->dc;
++ struct dc_clocks *new_clocks = &context->bw.dcn.clk;
++ struct pp_smu_display_requirement_rv *smu_req_cur =
++ &dc->res_pool->pp_smu_req;
++ struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
++ struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
++ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
++ bool send_request_to_increase = false;
++ bool send_request_to_lower = false;
++ int display_count;
++
++ bool enter_display_off = false;
++
++ display_count = get_active_display_cnt(dc, context);
++
++ if (display_count == 0)
++ enter_display_off = true;
++
++ if (enter_display_off == safe_to_lower) {
++ /*
++ * Notify SMU active displays
++ * if function pointer not set up, this message is
++ * sent as part of pplib_apply_display_requirements.
++ */
++ if (pp_smu->set_display_count)
++ pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
++ else
++ smu_req.display_count = display_count;
++
++ }
++
++ if (new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz
++ || new_clocks->phyclk_khz > clk_mgr->clks.phyclk_khz
++ || new_clocks->fclk_khz > clk_mgr->clks.fclk_khz
++ || new_clocks->dcfclk_khz > clk_mgr->clks.dcfclk_khz)
++ send_request_to_increase = true;
++
++ if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr->clks.phyclk_khz)) {
++ clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz;
++
++ send_request_to_lower = true;
++ }
++
++ // F Clock
++ if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) {
++ clk_mgr->clks.fclk_khz = new_clocks->fclk_khz;
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
++ clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
++ smu_req.hard_min_fclk_khz = new_clocks->fclk_khz;
++
++ dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req);
++ send_request_to_lower = true;
++ }
++
++ //DCF Clock
++ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr->clks.dcfclk_khz)) {
++ clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz;
++ smu_req.hard_min_dcefclk_khz = new_clocks->dcfclk_khz;
++
++ send_request_to_lower = true;
++ }
++
++ if (should_set_clock(safe_to_lower,
++ new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) {
++ clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
++ smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz;
++
++ send_request_to_lower = true;
++ }
++
++ /* make sure dcf clk is before dpp clk to
++ * make sure we have enough voltage to run dpp clk
++ */
++ if (send_request_to_increase) {
++ /*use dcfclk to request voltage*/
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
++ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
++
++ notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz);
++ if (pp_smu->set_display_requirement)
++ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
++
++ notify_deep_sleep_dcfclk_to_smu(pp_smu, clk_mgr->clks.dcfclk_deep_sleep_khz);
++ dcn1_pplib_apply_display_requirements(dc, context);
++ }
++
++ /* dcn1 dppclk is tied to dispclk */
++ /* program dispclk on = as a w/a for sleep resume clock ramping issues */
++ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz)
++ || new_clocks->dispclk_khz == clk_mgr->clks.dispclk_khz) {
++ dcn1_ramp_up_dispclk_with_dpp(clk_mgr, new_clocks);
++ clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
++
++ send_request_to_lower = true;
++ }
++
++ if (!send_request_to_increase && send_request_to_lower) {
++ /*use dcfclk to request voltage*/
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
++ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
++
++ notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz);
++
++ if (pp_smu->set_display_requirement)
++ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
++
++ notify_deep_sleep_dcfclk_to_smu(pp_smu, clk_mgr->clks.dcfclk_deep_sleep_khz);
++ dcn1_pplib_apply_display_requirements(dc, context);
++ }
++
++
++ *smu_req_cur = smu_req;
++}
++
++static const struct clk_mgr_funcs dcn1_funcs = {
++ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
++ .update_clocks = dcn1_update_clocks
++};
++
++struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx)
++{
++ struct dc_debug_options *debug = &ctx->dc->debug;
++ struct dc_bios *bp = ctx->dc_bios;
++ struct dc_firmware_info fw_info = { { 0 } };
++ struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
++
++ if (clk_mgr_dce == NULL) {
++ BREAK_TO_DEBUGGER();
++ return NULL;
++ }
++
++ clk_mgr_dce->base.ctx = ctx;
++ clk_mgr_dce->base.funcs = &dcn1_funcs;
++
++ clk_mgr_dce->dfs_bypass_disp_clk = 0;
++
++ clk_mgr_dce->dprefclk_ss_percentage = 0;
++ clk_mgr_dce->dprefclk_ss_divider = 1000;
++ clk_mgr_dce->ss_on_dprefclk = false;
++
++ clk_mgr_dce->dprefclk_khz = 600000;
++ if (bp->integrated_info)
++ clk_mgr_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
++ if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
++ bp->funcs->get_firmware_info(bp, &fw_info);
++ clk_mgr_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
++ if (clk_mgr_dce->dentist_vco_freq_khz == 0)
++ clk_mgr_dce->dentist_vco_freq_khz = 3600000;
++ }
++
++ if (!debug->disable_dfs_bypass && bp->integrated_info)
++ if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
++ clk_mgr_dce->dfs_bypass_enabled = true;
++
++ dce_clock_read_ss_info(clk_mgr_dce);
++
++ return &clk_mgr_dce->base;
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
+new file mode 100644
+index 0000000..9dbaf65
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
+@@ -0,0 +1,37 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef __DCN10_CLK_MGR_H__
++#define __DCN10_CLK_MGR_H__
++
++#include "../dce/dce_clk_mgr.h"
++
++void dcn1_pplib_apply_display_requirements(
++ struct dc *dc,
++ struct dc_state *context);
++
++struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx);
++
++#endif //__DCN10_CLK_MGR_H__
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c
+deleted file mode 100644
+index 5ffc367..0000000
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c
++++ /dev/null
+@@ -1,361 +0,0 @@
+-/*
+- * Copyright 2018 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- * Authors: AMD
+- *
+- */
+-
+-#include "dcn10_dccg.h"
+-
+-#include "reg_helper.h"
+-#include "core_types.h"
+-
+-#define TO_DCE_DCCG(clocks)\
+- container_of(clocks, struct dce_dccg, base)
+-
+-#define REG(reg) \
+- (dccg_dce->regs->reg)
+-
+-#undef FN
+-#define FN(reg_name, field_name) \
+- dccg_dce->dccg_shift->field_name, dccg_dce->dccg_mask->field_name
+-
+-#define CTX \
+- dccg_dce->base.ctx
+-#define DC_LOGGER \
+- dccg->ctx->logger
+-
+-void dcn1_pplib_apply_display_requirements(
+- struct dc *dc,
+- struct dc_state *context)
+-{
+- struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+-
+- pp_display_cfg->min_engine_clock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
+- pp_display_cfg->min_memory_clock_khz = dc->res_pool->dccg->clks.fclk_khz;
+- pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
+- pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
+- pp_display_cfg->min_dcfclock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
+- pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
+- dce110_fill_display_configs(context, pp_display_cfg);
+-
+- if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
+- dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+-}
+-
+-static int dcn1_determine_dppclk_threshold(struct dccg *dccg, struct dc_clocks *new_clocks)
+-{
+- bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+- bool dispclk_increase = new_clocks->dispclk_khz > dccg->clks.dispclk_khz;
+- int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
+- bool cur_dpp_div = dccg->clks.dispclk_khz > dccg->clks.dppclk_khz;
+-
+- /* increase clock, looking for div is 0 for current, request div is 1*/
+- if (dispclk_increase) {
+- /* already divided by 2, no need to reach target clk with 2 steps*/
+- if (cur_dpp_div)
+- return new_clocks->dispclk_khz;
+-
+- /* request disp clk is lower than maximum supported dpp clk,
+- * no need to reach target clk with two steps.
+- */
+- if (new_clocks->dispclk_khz <= disp_clk_threshold)
+- return new_clocks->dispclk_khz;
+-
+- /* target dpp clk not request divided by 2, still within threshold */
+- if (!request_dpp_div)
+- return new_clocks->dispclk_khz;
+-
+- } else {
+- /* decrease clock, looking for current dppclk divided by 2,
+- * request dppclk not divided by 2.
+- */
+-
+- /* current dpp clk not divided by 2, no need to ramp*/
+- if (!cur_dpp_div)
+- return new_clocks->dispclk_khz;
+-
+- /* current disp clk is lower than current maximum dpp clk,
+- * no need to ramp
+- */
+- if (dccg->clks.dispclk_khz <= disp_clk_threshold)
+- return new_clocks->dispclk_khz;
+-
+- /* request dpp clk need to be divided by 2 */
+- if (request_dpp_div)
+- return new_clocks->dispclk_khz;
+- }
+-
+- return disp_clk_threshold;
+-}
+-
+-static void dcn1_ramp_up_dispclk_with_dpp(struct dccg *dccg, struct dc_clocks *new_clocks)
+-{
+- struct dc *dc = dccg->ctx->dc;
+- int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(dccg, new_clocks);
+- bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+- int i;
+-
+- /* set disp clk to dpp clk threshold */
+- dce112_set_clock(dccg, dispclk_to_dpp_threshold);
+-
+- /* update request dpp clk division option */
+- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+- struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+-
+- if (!pipe_ctx->plane_state)
+- continue;
+-
+- pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
+- pipe_ctx->plane_res.dpp,
+- request_dpp_div,
+- true);
+- }
+-
+- /* If target clk not same as dppclk threshold, set to target clock */
+- if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz)
+- dce112_set_clock(dccg, new_clocks->dispclk_khz);
+-
+- dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+- dccg->clks.dppclk_khz = new_clocks->dppclk_khz;
+- dccg->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
+-}
+-
+-static int get_active_display_cnt(
+- struct dc *dc,
+- struct dc_state *context)
+-{
+- int i, display_count;
+-
+- display_count = 0;
+- for (i = 0; i < context->stream_count; i++) {
+- const struct dc_stream_state *stream = context->streams[i];
+-
+- /*
+- * Only notify active stream or virtual stream.
+- * Need to notify virtual stream to work around
+- * headless case. HPD does not fire when system is in
+- * S0i2.
+- */
+- if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL)
+- display_count++;
+- }
+-
+- return display_count;
+-}
+-
+-static void notify_deep_sleep_dcfclk_to_smu(
+- struct pp_smu_funcs_rv *pp_smu, int min_dcef_deep_sleep_clk_khz)
+-{
+- int min_dcef_deep_sleep_clk_mhz; //minimum required DCEF Deep Sleep clock in mhz
+- /*
+- * if function pointer not set up, this message is
+- * sent as part of pplib_apply_display_requirements.
+- * So just return.
+- */
+- if (!pp_smu || !pp_smu->set_min_deep_sleep_dcfclk)
+- return;
+-
+- min_dcef_deep_sleep_clk_mhz = (min_dcef_deep_sleep_clk_khz + 999) / 1000; //Round up
+- pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, min_dcef_deep_sleep_clk_mhz);
+-}
+-
+-static void notify_hard_min_dcfclk_to_smu(
+- struct pp_smu_funcs_rv *pp_smu, int min_dcf_clk_khz)
+-{
+- int min_dcf_clk_mhz; //minimum required DCF clock in mhz
+-
+- /*
+- * if function pointer not set up, this message is
+- * sent as part of pplib_apply_display_requirements.
+- * So just return.
+- */
+- if (!pp_smu || !pp_smu->set_hard_min_dcfclk_by_freq)
+- return;
+-
+- min_dcf_clk_mhz = min_dcf_clk_khz / 1000;
+-
+- pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, min_dcf_clk_mhz);
+-}
+-
+-static void dcn1_update_clocks(struct dccg *dccg,
+- struct dc_state *context,
+- bool safe_to_lower)
+-{
+- struct dc *dc = dccg->ctx->dc;
+- struct dc_clocks *new_clocks = &context->bw.dcn.clk;
+- struct pp_smu_display_requirement_rv *smu_req_cur =
+- &dc->res_pool->pp_smu_req;
+- struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
+- struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+- struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+- bool send_request_to_increase = false;
+- bool send_request_to_lower = false;
+- int display_count;
+-
+- bool enter_display_off = false;
+-
+- display_count = get_active_display_cnt(dc, context);
+-
+- if (display_count == 0)
+- enter_display_off = true;
+-
+- if (enter_display_off == safe_to_lower) {
+- /*
+- * Notify SMU active displays
+- * if function pointer not set up, this message is
+- * sent as part of pplib_apply_display_requirements.
+- */
+- if (pp_smu->set_display_count)
+- pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
+- else
+- smu_req.display_count = display_count;
+-
+- }
+-
+- if (new_clocks->dispclk_khz > dccg->clks.dispclk_khz
+- || new_clocks->phyclk_khz > dccg->clks.phyclk_khz
+- || new_clocks->fclk_khz > dccg->clks.fclk_khz
+- || new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz)
+- send_request_to_increase = true;
+-
+- if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
+- dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
+-
+- send_request_to_lower = true;
+- }
+-
+- // F Clock
+- if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, dccg->clks.fclk_khz)) {
+- dccg->clks.fclk_khz = new_clocks->fclk_khz;
+- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
+- clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
+- smu_req.hard_min_fclk_khz = new_clocks->fclk_khz;
+-
+- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+- send_request_to_lower = true;
+- }
+-
+- //DCF Clock
+- if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, dccg->clks.dcfclk_khz)) {
+- dccg->clks.dcfclk_khz = new_clocks->dcfclk_khz;
+- smu_req.hard_min_dcefclk_khz = new_clocks->dcfclk_khz;
+-
+- send_request_to_lower = true;
+- }
+-
+- if (should_set_clock(safe_to_lower,
+- new_clocks->dcfclk_deep_sleep_khz, dccg->clks.dcfclk_deep_sleep_khz)) {
+- dccg->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
+- smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz;
+-
+- send_request_to_lower = true;
+- }
+-
+- /* make sure dcf clk is before dpp clk to
+- * make sure we have enough voltage to run dpp clk
+- */
+- if (send_request_to_increase) {
+- /*use dcfclk to request voltage*/
+- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+- clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+-
+- notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz);
+-
+- if (pp_smu->set_display_requirement)
+- pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+-
+- notify_deep_sleep_dcfclk_to_smu(pp_smu, dccg->clks.dcfclk_deep_sleep_khz);
+- dcn1_pplib_apply_display_requirements(dc, context);
+- }
+-
+- /* dcn1 dppclk is tied to dispclk */
+- /* program dispclk on = as a w/a for sleep resume clock ramping issues */
+- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)
+- || new_clocks->dispclk_khz == dccg->clks.dispclk_khz) {
+- dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks);
+- dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+-
+- send_request_to_lower = true;
+- }
+-
+- if (!send_request_to_increase && send_request_to_lower) {
+- /*use dcfclk to request voltage*/
+- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+- clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+-
+- notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz);
+-
+- if (pp_smu->set_display_requirement)
+- pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+-
+- notify_deep_sleep_dcfclk_to_smu(pp_smu, dccg->clks.dcfclk_deep_sleep_khz);
+- dcn1_pplib_apply_display_requirements(dc, context);
+- }
+-
+-
+- *smu_req_cur = smu_req;
+-}
+-
+-static const struct dccg_funcs dcn1_funcs = {
+- .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+- .update_clocks = dcn1_update_clocks
+-};
+-
+-struct dccg *dcn1_dccg_create(struct dc_context *ctx)
+-{
+- struct dc_debug_options *debug = &ctx->dc->debug;
+- struct dc_bios *bp = ctx->dc_bios;
+- struct dc_firmware_info fw_info = { { 0 } };
+- struct dce_dccg *dccg_dce = kzalloc(sizeof(*dccg_dce), GFP_KERNEL);
+-
+- if (dccg_dce == NULL) {
+- BREAK_TO_DEBUGGER();
+- return NULL;
+- }
+-
+- dccg_dce->base.ctx = ctx;
+- dccg_dce->base.funcs = &dcn1_funcs;
+-
+- dccg_dce->dfs_bypass_disp_clk = 0;
+-
+- dccg_dce->dprefclk_ss_percentage = 0;
+- dccg_dce->dprefclk_ss_divider = 1000;
+- dccg_dce->ss_on_dprefclk = false;
+-
+- dccg_dce->dprefclk_khz = 600000;
+- if (bp->integrated_info)
+- dccg_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
+- if (dccg_dce->dentist_vco_freq_khz == 0) {
+- bp->funcs->get_firmware_info(bp, &fw_info);
+- dccg_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
+- if (dccg_dce->dentist_vco_freq_khz == 0)
+- dccg_dce->dentist_vco_freq_khz = 3600000;
+- }
+-
+- if (!debug->disable_dfs_bypass && bp->integrated_info)
+- if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+- dccg_dce->dfs_bypass_enabled = true;
+-
+- dce_clock_read_ss_info(dccg_dce);
+-
+- return &dccg_dce->base;
+-}
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.h
+deleted file mode 100644
+index 7f3dd84..0000000
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.h
++++ /dev/null
+@@ -1,37 +0,0 @@
+-/*
+- * Copyright 2018 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- * Authors: AMD
+- *
+- */
+-
+-#ifndef __DCN10_DCCG_H__
+-#define __DCN10_DCCG_H__
+-
+-#include "../dce/dce_dccg.h"
+-
+-void dcn1_pplib_apply_display_requirements(
+- struct dc *dc,
+- struct dc_state *context);
+-
+-struct dccg *dcn1_dccg_create(struct dc_context *ctx);
+-
+-#endif //__DCN10_DCCG_H__
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index d1f8c8e..5c4a4f6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -1126,7 +1126,7 @@ static void dcn10_init_hw(struct dc *dc)
+
+ enable_power_gating_plane(dc->hwseq, true);
+
+- memset(&dc->res_pool->dccg->clks, 0, sizeof(dc->res_pool->dccg->clks));
++ memset(&dc->res_pool->clk_mgr->clks, 0, sizeof(dc->res_pool->clk_mgr->clks));
+ }
+
+ static void reset_hw_ctx_wrap(
+@@ -2052,16 +2052,16 @@ void update_dchubp_dpp(
+ */
+ if (plane_state->update_flags.bits.full_update) {
+ bool should_divided_by_2 = context->bw.dcn.clk.dppclk_khz <=
+- dc->res_pool->dccg->clks.dispclk_khz / 2;
++ dc->res_pool->clk_mgr->clks.dispclk_khz / 2;
+
+ dpp->funcs->dpp_dppclk_control(
+ dpp,
+ should_divided_by_2,
+ true);
+
+- dc->res_pool->dccg->clks.dppclk_khz = should_divided_by_2 ?
+- dc->res_pool->dccg->clks.dispclk_khz / 2 :
+- dc->res_pool->dccg->clks.dispclk_khz;
++ dc->res_pool->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
++ dc->res_pool->clk_mgr->clks.dispclk_khz / 2 :
++ dc->res_pool->clk_mgr->clks.dispclk_khz;
+ }
+
+ /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
+@@ -2369,8 +2369,8 @@ static void dcn10_prepare_bandwidth(
+ if (context->stream_count == 0)
+ context->bw.dcn.clk.phyclk_khz = 0;
+
+- dc->res_pool->dccg->funcs->update_clocks(
+- dc->res_pool->dccg,
++ dc->res_pool->clk_mgr->funcs->update_clocks(
++ dc->res_pool->clk_mgr,
+ context,
+ false);
+ }
+@@ -2398,8 +2398,8 @@ static void dcn10_optimize_bandwidth(
+ if (context->stream_count == 0)
+ context->bw.dcn.clk.phyclk_khz = 0;
+
+- dc->res_pool->dccg->funcs->update_clocks(
+- dc->res_pool->dccg,
++ dc->res_pool->clk_mgr->funcs->update_clocks(
++ dc->res_pool->clk_mgr,
+ context,
+ true);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index 3d9118e..acb917d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -28,23 +28,23 @@
+
+ #include "resource.h"
+ #include "include/irq_service_interface.h"
+-#include "dcn10/dcn10_resource.h"
++#include "dcn10_resource.h"
+
+-#include "dcn10/dcn10_ipp.h"
+-#include "dcn10/dcn10_mpc.h"
++#include "dcn10_ipp.h"
++#include "dcn10_mpc.h"
+ #include "irq/dcn10/irq_service_dcn10.h"
+-#include "dcn10/dcn10_dpp.h"
++#include "dcn10_dpp.h"
+ #include "dcn10_optc.h"
+-#include "dcn10/dcn10_hw_sequencer.h"
++#include "dcn10_hw_sequencer.h"
+ #include "dce110/dce110_hw_sequencer.h"
+-#include "dcn10/dcn10_opp.h"
+-#include "dcn10/dcn10_link_encoder.h"
+-#include "dcn10/dcn10_stream_encoder.h"
+-#include "dcn10/dcn10_dccg.h"
++#include "dcn10_opp.h"
++#include "dcn10_link_encoder.h"
++#include "dcn10_stream_encoder.h"
++#include "dcn10_clk_mgr.h"
+ #include "dce/dce_clock_source.h"
+ #include "dce/dce_audio.h"
+ #include "dce/dce_hwseq.h"
+-#include "../virtual/virtual_stream_encoder.h"
++#include "virtual/virtual_stream_encoder.h"
+ #include "dce110/dce110_resource.h"
+ #include "dce112/dce112_resource.h"
+ #include "dcn10_hubp.h"
+@@ -950,8 +950,8 @@ static void destruct(struct dcn10_resource_pool *pool)
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+- if (pool->base.dccg != NULL)
+- dce_dccg_destroy(&pool->base.dccg);
++ if (pool->base.clk_mgr != NULL)
++ dce_clk_mgr_destroy(&pool->base.clk_mgr);
+
+ kfree(pool->base.pp_smu);
+ }
+@@ -1277,8 +1277,8 @@ static bool construct(
+ }
+ }
+
+- pool->base.dccg = dcn1_dccg_create(ctx);
+- if (pool->base.dccg == NULL) {
++ pool->base.clk_mgr = dcn1_clk_mgr_create(ctx);
++ if (pool->base.clk_mgr == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto fail;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+index d4eaf7f..4ef56ea 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+@@ -82,7 +82,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option);
+
+ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
+ /********** DAL Core*********************/
+-#include "hw/dccg.h"
++#include "hw/clk_mgr.h"
+ #include "transform.h"
+ #include "dpp.h"
+
+@@ -168,7 +168,7 @@ struct resource_pool {
+ unsigned int audio_count;
+ struct audio_support audio_support;
+
+- struct dccg *dccg;
++ struct clk_mgr *clk_mgr;
+ struct irq_service *irqs;
+
+ struct abm *abm;
+@@ -287,7 +287,7 @@ struct dc_state {
+ struct dcn_bw_internal_vars dcn_bw_vars;
+ #endif
+
+- struct dccg *dccg;
++ struct clk_mgr *dccg;
+
+ struct kref refcount;
+ };
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+index ac9b4906..ece954a 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+@@ -32,7 +32,7 @@
+
+ #include "bw_fixed.h"
+ #include "../dml/display_mode_lib.h"
+-#include "hw/dccg.h"
++#include "hw/clk_mgr.h"
+
+ struct dc;
+ struct dc_state;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+new file mode 100644
+index 0000000..23a4b18
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+@@ -0,0 +1,47 @@
++/*
++ * Copyright 2012-16 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef __DAL_CLK_MGR_H__
++#define __DAL_CLK_MGR_H__
++
++#include "dm_services_types.h"
++#include "dc.h"
++
++struct clk_mgr {
++ struct dc_context *ctx;
++ const struct clk_mgr_funcs *funcs;
++
++ struct dc_clocks clks;
++};
++
++struct clk_mgr_funcs {
++ void (*update_clocks)(struct clk_mgr *clk_mgr,
++ struct dc_state *context,
++ bool safe_to_lower);
++
++ int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr);
++};
++
++#endif /* __DAL_CLK_MGR_H__ */
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+deleted file mode 100644
+index 6fd923d..0000000
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
++++ /dev/null
+@@ -1,47 +0,0 @@
+-/*
+- * Copyright 2012-16 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- * Authors: AMD
+- *
+- */
+-
+-#ifndef __DAL_DCCG_H__
+-#define __DAL_DCCG_H__
+-
+-#include "dm_services_types.h"
+-#include "dc.h"
+-
+-struct dccg {
+- struct dc_context *ctx;
+- const struct dccg_funcs *funcs;
+-
+- struct dc_clocks clks;
+-};
+-
+-struct dccg_funcs {
+- void (*update_clocks)(struct dccg *dccg,
+- struct dc_state *context,
+- bool safe_to_lower);
+-
+- int (*get_dp_ref_clk_frequency)(struct dccg *dccg);
+-};
+-
+-#endif /* __DAL_DCCG_H__ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5592-drm-amd-display-add-dccg-block.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5592-drm-amd-display-add-dccg-block.patch
new file mode 100644
index 00000000..5852c5be
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5592-drm-amd-display-add-dccg-block.patch
@@ -0,0 +1,142 @@
+From 3ba5cd1ccaed0f7ecff398a767c42bad63737550 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Fri, 28 Sep 2018 08:42:52 -0400
+Subject: [PATCH 5592/5725] drm/amd/display: add dccg block
+
+This adds the hw block as well as hooks up dppclk dto
+programming
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h | 7 +++-
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 9 ++++-
+ drivers/gpu/drm/amd/display/dc/inc/core_types.h | 1 +
+ drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h | 44 ++++++++++++++++++++++
+ 4 files changed, 58 insertions(+), 3 deletions(-)
+ create mode 100644 drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
+index 2668d56..0460777 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
+@@ -27,7 +27,8 @@
+ #ifndef _DCE_CLK_MGR_H_
+ #define _DCE_CLK_MGR_H_
+
+-#include "../inc/hw/clk_mgr.h"
++#include "clk_mgr.h"
++#include "dccg.h"
+
+ #define MEMORY_TYPE_MULTIPLIER_CZ 4
+
+@@ -79,6 +80,8 @@ struct dce_clk_mgr {
+ const struct clk_mgr_shift *clk_mgr_shift;
+ const struct clk_mgr_mask *clk_mgr_mask;
+
++ struct dccg *dccg;
++
+ struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
+
+ int dentist_vco_freq_khz;
+@@ -160,6 +163,6 @@ struct clk_mgr *dce112_clk_mgr_create(
+
+ struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx);
+
+-void dce_clk_mgr_destroy(struct clk_mgr **dccg);
++void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr);
+
+ #endif /* _DCE_CLK_MGR_H_ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 5c4a4f6..f88c440 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -45,6 +45,7 @@
+ #include "dcn10_hubbub.h"
+ #include "dcn10_cm_common.h"
+ #include "dc_link_dp.h"
++#include "dccg.h"
+
+ #define DC_LOGGER_INIT(logger)
+
+@@ -2059,7 +2060,13 @@ void update_dchubp_dpp(
+ should_divided_by_2,
+ true);
+
+- dc->res_pool->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
++ if (dc->res_pool->dccg)
++ dc->res_pool->dccg->funcs->update_dpp_dto(
++ dc->res_pool->dccg,
++ dpp->inst,
++ pipe_ctx->plane_res.bw.calc.dppclk_khz);
++ else
++ dc->res_pool->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
+ dc->res_pool->clk_mgr->clks.dispclk_khz / 2 :
+ dc->res_pool->clk_mgr->clks.dispclk_khz;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+index 4ef56ea..4e2cc7d 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+@@ -169,6 +169,7 @@ struct resource_pool {
+ struct audio_support audio_support;
+
+ struct clk_mgr *clk_mgr;
++ struct dccg *dccg;
+ struct irq_service *irqs;
+
+ struct abm *abm;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+new file mode 100644
+index 0000000..95a56d0
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+@@ -0,0 +1,44 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef __DAL_DCCG_H__
++#define __DAL_DCCG_H__
++
++#include "dc_types.h"
++
++struct dccg {
++ struct dc_context *ctx;
++ const struct dccg_funcs *funcs;
++
++ int ref_dppclk;
++};
++
++struct dccg_funcs {
++ void (*update_dpp_dto)(struct dccg *dccg,
++ int dpp_inst,
++ int req_dppclk);
++};
++
++#endif //__DAL_DCCG_H__
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5593-drm-amd-display-dc-3.2.02.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5593-drm-amd-display-dc-3.2.02.patch
new file mode 100644
index 00000000..2c8c9748
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5593-drm-amd-display-dc-3.2.02.patch
@@ -0,0 +1,28 @@
+From 26942a38cff6381ebb868d8a7889b28506dc2049 Mon Sep 17 00:00:00 2001
+From: SivapiriyanKumarasamy <sivapiriyan.kumarasamy@amd.com>
+Date: Mon, 1 Oct 2018 14:45:05 -0400
+Subject: [PATCH 5593/5725] drm/amd/display: dc 3.2.02
+
+Signed-off-by: SivapiriyanKumarasamy <sivapiriyan.kumarasamy@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index f4db4d7..a3049f3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -38,7 +38,7 @@
+ #include "inc/compressor.h"
+ #include "dml/display_mode_lib.h"
+
+-#define DC_VER "3.2.01"
++#define DC_VER "3.2.02"
+
+ #define MAX_SURFACES 3
+ #define MAX_STREAMS 6
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5594-drm-amd-display-explicit-uint64_t-casting.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5594-drm-amd-display-explicit-uint64_t-casting.patch
new file mode 100644
index 00000000..faccc867
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5594-drm-amd-display-explicit-uint64_t-casting.patch
@@ -0,0 +1,31 @@
+From 898aa860e82e25e9ea481a0d5b7efd9e13273590 Mon Sep 17 00:00:00 2001
+From: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Date: Thu, 4 Oct 2018 12:11:28 -0400
+Subject: [PATCH 5594/5725] drm/amd/display: explicit uint64_t casting
+
+explicitly cast uint64_t in div64_u64_rem()
+
+Signed-off-by: vikrant mhaske <vikrant.mhaske@amd.com>
+Signed-off-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h b/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h
+index 39ee8eba3..d1656c9 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h
+@@ -126,7 +126,7 @@ static inline struct bw_fixed bw_div(const struct bw_fixed arg1, const struct bw
+ static inline struct bw_fixed bw_mod(const struct bw_fixed arg1, const struct bw_fixed arg2)
+ {
+ struct bw_fixed res;
+- div64_u64_rem(arg1.value, arg2.value, &res.value);
++ div64_u64_rem(arg1.value, arg2.value, (uint64_t *)&res.value);
+ return res;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5595-drm-amd-display-rename-cstate_pstate_watermarks_st1.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5595-drm-amd-display-rename-cstate_pstate_watermarks_st1.patch
new file mode 100644
index 00000000..1f7508ec
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5595-drm-amd-display-rename-cstate_pstate_watermarks_st1.patch
@@ -0,0 +1,41 @@
+From 92afd579eca0ee3c8f354c7019256545d86f018b Mon Sep 17 00:00:00 2001
+From: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Date: Thu, 4 Oct 2018 12:13:53 -0400
+Subject: [PATCH 5595/5725] drm/amd/display: rename
+ cstate_pstate_watermarks_st1
+
+cstate_pstate_watermarks_st1 -> cstate_pstate_watermarks_st
+
+Signed-off-by: vikrant mhaske <vikrant.mhaske@amd.com>
+Signed-off-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+index da89c2e..06df02d 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+@@ -31,7 +31,7 @@
+ #include "dml/display_mode_structs.h"
+
+ struct dchub_init_data;
+-struct cstate_pstate_watermarks_st {
++struct cstate_pstate_watermarks_st1 {
+ uint32_t cstate_exit_ns;
+ uint32_t cstate_enter_plus_exit_ns;
+ uint32_t pstate_change_ns;
+@@ -40,7 +40,7 @@ struct cstate_pstate_watermarks_st {
+ struct dcn_watermarks {
+ uint32_t pte_meta_urgent_ns;
+ uint32_t urgent_ns;
+- struct cstate_pstate_watermarks_st cstate_pstate;
++ struct cstate_pstate_watermarks_st1 cstate_pstate;
+ };
+
+ struct dcn_watermark_set {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5596-drm-amd-display-Fix-incorrect-end-slope-of-EETF.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5596-drm-amd-display-Fix-incorrect-end-slope-of-EETF.patch
new file mode 100644
index 00000000..f7aa2847
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5596-drm-amd-display-Fix-incorrect-end-slope-of-EETF.patch
@@ -0,0 +1,32 @@
+From 74bc5d8dfd1da9b570159b1708c42273ef913e0f Mon Sep 17 00:00:00 2001
+From: SivapiriyanKumarasamy <sivapiriyan.kumarasamy@amd.com>
+Date: Tue, 2 Oct 2018 08:44:04 -0400
+Subject: [PATCH 5596/5725] drm/amd/display: Fix incorrect end slope of EETF
+
+Force the E2 to dc_fixpt_one when E1 exceeds that value. This is the
+correct thing to do to avoid corruption.
+
+Signed-off-by: SivapiriyanKumarasamy <sivapiriyan.kumarasamy@amd.com>
+Reviewed-by: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/modules/color/color_gamma.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+index 423e8c1..7b10c74 100644
+--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+@@ -791,7 +791,8 @@ static void hermite_spline_eetf(struct fixed31_32 input_x,
+ // (t^3 - 2t^2 + t) * (1-ks)
+ E2 = dc_fixpt_add(E2, dc_fixpt_mul(temp2,
+ dc_fixpt_add(t, dc_fixpt_sub(t3, temp1))));
+- }
++ } else
++ E2 = dc_fixpt_one;
+
+ temp1 = dc_fixpt_sub(dc_fixpt_one, E2);
+ temp2 = dc_fixpt_mul(temp1, temp1);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5597-drm-amdgpu-correct-SPDX-identifier-in-amdgpu_trace_p.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5597-drm-amdgpu-correct-SPDX-identifier-in-amdgpu_trace_p.patch
new file mode 100644
index 00000000..91c0bdec
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5597-drm-amdgpu-correct-SPDX-identifier-in-amdgpu_trace_p.patch
@@ -0,0 +1,30 @@
+From 17ce1faf68e7c8bec33be3a3bfeeec37f8db81a9 Mon Sep 17 00:00:00 2001
+From: Jonathan Gray <jsg@jsg.id.au>
+Date: Mon, 15 Oct 2018 15:45:49 +1100
+Subject: [PATCH 5597/5725] drm/amdgpu: correct SPDX identifier in
+ amdgpu_trace_points.c
+
+Commit b24413180f5600bcb3bb70fbed5cf186b60864bd
+'License cleanup: add SPDX GPL-2.0 license identifier to files with no license'
+incorrectly added "SPDX-License-Identifier: GPL-2.0" to a file with MIT
+license text. Change the SPDX identifier to match the license text.
+
+Signed-off-by: Jonathan Gray <jsg@jsg.id.au>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
+index df5e0f0..2fbe6ac 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
+@@ -1,4 +1,4 @@
+-// SPDX-License-Identifier: GPL-2.0
++// SPDX-License-Identifier: MIT
+ /* Copyright Red Hat Inc 2010.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5598-drm-amd-powerplay-bump-the-PPtable-version-supported.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5598-drm-amd-powerplay-bump-the-PPtable-version-supported.patch
new file mode 100644
index 00000000..0873cd45
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5598-drm-amd-powerplay-bump-the-PPtable-version-supported.patch
@@ -0,0 +1,104 @@
+From 10d85908c382374b351d1cbb84f34aebd1dbef8e Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Fri, 19 Oct 2018 15:41:20 +0800
+Subject: [PATCH 5598/5725] drm/amd/powerplay: bump the PPtable version
+ supported
+
+As the matching VBIOS is already ready. Also drop the
+temporary workarounds applied before.
+
+Change-Id: If5b78298bc0817b06e11aba49d390fa341d714b4
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Feifei Xu <Feifei.Xu@amd.com>
+---
+ .../amd/powerplay/hwmgr/vega20_processpptables.c | 46 ++++++++--------------
+ .../gpu/drm/amd/powerplay/inc/smu11_driver_if.h | 2 +-
+ 2 files changed, 18 insertions(+), 30 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+index e5f7f82..f7e8bbd 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+@@ -716,10 +716,6 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
+ "[appendVbiosPPTable] Failed to retrieve Smc Dpm Table from VBIOS!",
+ return -1);
+
+- memset(ppsmc_pptable->Padding32,
+- 0,
+- sizeof(struct atom_smc_dpm_info_v4_4) -
+- sizeof(struct atom_common_table_header));
+ ppsmc_pptable->MaxVoltageStepGfx = smc_dpm_table->maxvoltagestepgfx;
+ ppsmc_pptable->MaxVoltageStepSoc = smc_dpm_table->maxvoltagestepsoc;
+
+@@ -778,22 +774,19 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
+ ppsmc_pptable->FllGfxclkSpreadPercent = smc_dpm_table->fllgfxclkspreadpercent;
+ ppsmc_pptable->FllGfxclkSpreadFreq = smc_dpm_table->fllgfxclkspreadfreq;
+
+- if ((smc_dpm_table->table_header.format_revision == 4) &&
+- (smc_dpm_table->table_header.content_revision == 4)) {
+- for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) {
+- ppsmc_pptable->I2cControllers[i].Enabled =
+- smc_dpm_table->i2ccontrollers[i].enabled;
+- ppsmc_pptable->I2cControllers[i].SlaveAddress =
+- smc_dpm_table->i2ccontrollers[i].slaveaddress;
+- ppsmc_pptable->I2cControllers[i].ControllerPort =
+- smc_dpm_table->i2ccontrollers[i].controllerport;
+- ppsmc_pptable->I2cControllers[i].ThermalThrottler =
+- smc_dpm_table->i2ccontrollers[i].thermalthrottler;
+- ppsmc_pptable->I2cControllers[i].I2cProtocol =
+- smc_dpm_table->i2ccontrollers[i].i2cprotocol;
+- ppsmc_pptable->I2cControllers[i].I2cSpeed =
+- smc_dpm_table->i2ccontrollers[i].i2cspeed;
+- }
++ for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) {
++ ppsmc_pptable->I2cControllers[i].Enabled =
++ smc_dpm_table->i2ccontrollers[i].enabled;
++ ppsmc_pptable->I2cControllers[i].SlaveAddress =
++ smc_dpm_table->i2ccontrollers[i].slaveaddress;
++ ppsmc_pptable->I2cControllers[i].ControllerPort =
++ smc_dpm_table->i2ccontrollers[i].controllerport;
++ ppsmc_pptable->I2cControllers[i].ThermalThrottler =
++ smc_dpm_table->i2ccontrollers[i].thermalthrottler;
++ ppsmc_pptable->I2cControllers[i].I2cProtocol =
++ smc_dpm_table->i2ccontrollers[i].i2cprotocol;
++ ppsmc_pptable->I2cControllers[i].I2cSpeed =
++ smc_dpm_table->i2ccontrollers[i].i2cspeed;
+ }
+
+ return 0;
+@@ -882,15 +875,10 @@ static int init_powerplay_table_information(
+ if (pptable_information->smc_pptable == NULL)
+ return -ENOMEM;
+
+- if (powerplay_table->smcPPTable.Version <= 2)
+- memcpy(pptable_information->smc_pptable,
+- &(powerplay_table->smcPPTable),
+- sizeof(PPTable_t) -
+- sizeof(I2cControllerConfig_t) * I2C_CONTROLLER_NAME_COUNT);
+- else
+- memcpy(pptable_information->smc_pptable,
+- &(powerplay_table->smcPPTable),
+- sizeof(PPTable_t));
++ memcpy(pptable_information->smc_pptable,
++ &(powerplay_table->smcPPTable),
++ sizeof(PPTable_t));
++
+
+ result = append_vbios_pptable(hwmgr, (pptable_information->smc_pptable));
+
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+index 2998a49..63d5cf6 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+@@ -29,7 +29,7 @@
+ // any structure is changed in this file
+ #define SMU11_DRIVER_IF_VERSION 0x12
+
+-#define PPTABLE_V20_SMU_VERSION 2
++#define PPTABLE_V20_SMU_VERSION 3
+
+ #define NUM_GFXCLK_DPM_LEVELS 16
+ #define NUM_VCLK_DPM_LEVELS 8
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5599-drm-amd-powerplay-correct-the-clocks-for-DAL-to-be-K.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5599-drm-amd-powerplay-correct-the-clocks-for-DAL-to-be-K.patch
new file mode 100644
index 00000000..2f96c681
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5599-drm-amd-powerplay-correct-the-clocks-for-DAL-to-be-K.patch
@@ -0,0 +1,118 @@
+From 68a7016ad9c8231d3fd2d2a1d106ecf21a5d6205 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Tue, 23 Oct 2018 14:31:38 +0800
+Subject: [PATCH 5599/5725] drm/amd/powerplay: correct the clocks for DAL to be
+ Khz unit
+
+Currently the clocks reported are in 10Khz unit. Correct them
+as Khz unit as DAL wanted.
+
+Change-Id: I91e9f4b460efbdc0ba223901b6c40e576523686d
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Feifei Xu<Feifei.Xu@amd.com>
+Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 21 ++++++++++-----------
+ 1 file changed, 10 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index b4dbbb7..810c609 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -2012,7 +2012,6 @@ int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
+ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+ switch (clk_type) {
+ case amd_pp_dcef_clock:
+- clk_freq = clock_req->clock_freq_in_khz / 100;
+ clk_select = PPCLK_DCEFCLK;
+ break;
+ case amd_pp_disp_clock:
+@@ -2063,7 +2062,7 @@ static int vega20_notify_smc_display_config_after_ps_adjustment(
+
+ if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
+ clock_req.clock_type = amd_pp_dcef_clock;
+- clock_req.clock_freq_in_khz = min_clocks.dcefClock;
++ clock_req.clock_freq_in_khz = min_clocks.dcefClock * 10;
+ if (!vega20_display_clock_voltage_request(hwmgr, &clock_req)) {
+ if (data->smu_features[GNLD_DS_DCEFCLK].supported)
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(
+@@ -2353,7 +2352,7 @@ static int vega20_get_sclks(struct pp_hwmgr *hwmgr,
+
+ for (i = 0; i < count; i++) {
+ clocks->data[i].clocks_in_khz =
+- dpm_table->dpm_levels[i].value * 100;
++ dpm_table->dpm_levels[i].value * 1000;
+ clocks->data[i].latency_in_us = 0;
+ }
+
+@@ -2383,7 +2382,7 @@ static int vega20_get_memclocks(struct pp_hwmgr *hwmgr,
+ for (i = 0; i < count; i++) {
+ clocks->data[i].clocks_in_khz =
+ data->mclk_latency_table.entries[i].frequency =
+- dpm_table->dpm_levels[i].value * 100;
++ dpm_table->dpm_levels[i].value * 1000;
+ clocks->data[i].latency_in_us =
+ data->mclk_latency_table.entries[i].latency =
+ vega20_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value);
+@@ -2408,7 +2407,7 @@ static int vega20_get_dcefclocks(struct pp_hwmgr *hwmgr,
+
+ for (i = 0; i < count; i++) {
+ clocks->data[i].clocks_in_khz =
+- dpm_table->dpm_levels[i].value * 100;
++ dpm_table->dpm_levels[i].value * 1000;
+ clocks->data[i].latency_in_us = 0;
+ }
+
+@@ -2431,7 +2430,7 @@ static int vega20_get_socclocks(struct pp_hwmgr *hwmgr,
+
+ for (i = 0; i < count; i++) {
+ clocks->data[i].clocks_in_khz =
+- dpm_table->dpm_levels[i].value * 100;
++ dpm_table->dpm_levels[i].value * 1000;
+ clocks->data[i].latency_in_us = 0;
+ }
+
+@@ -2582,11 +2581,11 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
+ return -EINVAL;
+ }
+
+- if (input_clk < clocks.data[0].clocks_in_khz / 100 ||
++ if (input_clk < clocks.data[0].clocks_in_khz / 1000 ||
+ input_clk > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) {
+ pr_info("clock freq %d is not within allowed range [%d - %d]\n",
+ input_clk,
+- clocks.data[0].clocks_in_khz / 100,
++ clocks.data[0].clocks_in_khz / 1000,
+ od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
+ return -EINVAL;
+ }
+@@ -2738,7 +2737,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
+
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+- i, clocks.data[i].clocks_in_khz / 100,
++ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz == now) ? "*" : "");
+ break;
+
+@@ -2755,7 +2754,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
+
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+- i, clocks.data[i].clocks_in_khz / 100,
++ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz == now) ? "*" : "");
+ break;
+
+@@ -2820,7 +2819,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
+ return ret);
+
+ size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
+- clocks.data[0].clocks_in_khz / 100,
++ clocks.data[0].clocks_in_khz / 1000,
+ od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5600-drm-amd-powerplay-revise-Vega20-pptable-version-chec.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5600-drm-amd-powerplay-revise-Vega20-pptable-version-chec.patch
new file mode 100644
index 00000000..2e8beb0b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5600-drm-amd-powerplay-revise-Vega20-pptable-version-chec.patch
@@ -0,0 +1,39 @@
+From 45f4ddfb98317f053585271b01c816c143262eb6 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Tue, 30 Oct 2018 09:12:22 +0800
+Subject: [PATCH 5600/5725] drm/amd/powerplay: revise Vega20 pptable version
+ check
+
+Tell the version numbers when the pptable versions do not match.
+
+Change-Id: I3ea8aac7493927281b14d28866fa87690621f0f0
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+index f7e8bbd..97f8a1a 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+@@ -642,8 +642,14 @@ static int check_powerplay_tables(
+ "Unsupported PPTable format!", return -1);
+ PP_ASSERT_WITH_CODE(powerplay_table->sHeader.structuresize > 0,
+ "Invalid PowerPlay Table!", return -1);
+- PP_ASSERT_WITH_CODE(powerplay_table->smcPPTable.Version == PPTABLE_V20_SMU_VERSION,
+- "Unmatch PPTable version, vbios update may be needed!", return -1);
++
++ if (powerplay_table->smcPPTable.Version != PPTABLE_V20_SMU_VERSION) {
++ pr_info("Unmatch PPTable version: "
++ "pptable from VBIOS is V%d while driver supported is V%d!",
++ powerplay_table->smcPPTable.Version,
++ PPTABLE_V20_SMU_VERSION);
++ return -EINVAL;
++ }
+
+ //dump_pptable(&powerplay_table->smcPPTable);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5601-drm-amdgpu-support-Vega20-A1-ASICs.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5601-drm-amdgpu-support-Vega20-A1-ASICs.patch
new file mode 100644
index 00000000..67a01863
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5601-drm-amdgpu-support-Vega20-A1-ASICs.patch
@@ -0,0 +1,60 @@
+From 98afeff8d1bfb86fe256e0320e641e606418e4e0 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Tue, 30 Oct 2018 10:38:41 +0800
+Subject: [PATCH 5601/5725] drm/amdgpu: support Vega20 A1 ASICs
+
+Since they use different PSP firmwares from A0 ASICs for now.
+This will be dropped after they are unified to share the same
+PSP firmwares.
+
+Change-Id: I3bc8956dac62607e8771757858b1286a87f76cf3
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+index 3f3fac2..0a3f774 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+@@ -34,10 +34,13 @@
+ #include "nbio/nbio_7_4_offset.h"
+
+ MODULE_FIRMWARE("amdgpu/vega20_sos.bin");
++MODULE_FIRMWARE("amdgpu/vega20_sos_old.bin");
+
+ /* address block */
+ #define smnMP1_FIRMWARE_FLAGS 0x3010024
+
++#define VEGA20_BL_VERSION_VAR_NEW 0xA1
++
+ static int
+ psp_v11_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type)
+ {
+@@ -99,6 +102,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
+ char fw_name[30];
+ int err = 0;
+ const struct psp_firmware_header_v1_0 *hdr;
++ uint32_t bl_version;
+
+ DRM_DEBUG("\n");
+
+@@ -110,7 +114,13 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
+ BUG();
+ }
+
+- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
++ bl_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_100);
++ bl_version = (bl_version & 0xFF0000) >> 16;
++
++ if (bl_version == VEGA20_BL_VERSION_VAR_NEW)
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
++ else
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos_old.bin", chip_name);
+ err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5602-Revert-drm-amdgpu-add-amdgpu_gmc_get_pde_for_bo-help.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5602-Revert-drm-amdgpu-add-amdgpu_gmc_get_pde_for_bo-help.patch
new file mode 100644
index 00000000..28775f34
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5602-Revert-drm-amdgpu-add-amdgpu_gmc_get_pde_for_bo-help.patch
@@ -0,0 +1,33 @@
+From 9780cb36039355a6e502849c838534c66cfcc21e Mon Sep 17 00:00:00 2001
+From: Prike Liang <Prike.Liang@amd.com>
+Date: Fri, 16 Nov 2018 14:05:34 +0800
+Subject: [PATCH 5602/5725] Revert "drm/amdgpu: add amdgpu_gmc_get_pde_for_bo
+ helper v2"
+
+This reverts commit d278082be7312ae62cd54cb1a984027171726ab5.
+
+For the jira SWDEV-167318
+
+Change-Id: I4a18fd3a159dacba793a57dc0f5fd2c45cefbb9b
+Signed-off-by: Prike Liang <Prike.Liang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index f70687d..94b28af 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -1496,6 +1496,9 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
+ flags |= AMDGPU_PTE_SNOOPED;
+ }
+
++ if (mem && mem->mem_type == AMDGPU_PL_DGMA_IMPORT)
++ flags |= AMDGPU_PTE_SYSTEM;
++
+ return flags;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5603-drm-amdgpu-update-smu-firmware-images-for-VI-variant.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5603-drm-amdgpu-update-smu-firmware-images-for-VI-variant.patch
new file mode 100644
index 00000000..a829c99f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5603-drm-amdgpu-update-smu-firmware-images-for-VI-variant.patch
@@ -0,0 +1,114 @@
+From 21aae6887c7514ecf1e82ca73f7692394ff4576e Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed, 17 Oct 2018 11:24:26 -0500
+Subject: [PATCH 5603/5725] drm/amdgpu: update smu firmware images for VI
+ variants (v2)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Some new variants require updated firmware.
+
+V2: add MODULE_FIRMWARE for new firmwares
+
+Reviewed-by: Huang Rui <ray.huang@amd.com> (v1)
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | 33 +++++++++++++++++++++++----
+ drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | 3 +++
+ 2 files changed, 31 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+index 8816c69..ceadeea 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+@@ -330,7 +330,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
+ case CHIP_TOPAZ:
+ if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
+ ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
+- ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) {
++ ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)) ||
++ ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) ||
++ ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
+ } else
+@@ -351,7 +353,6 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
+ if (type == CGS_UCODE_ID_SMU) {
+ if (((adev->pdev->device == 0x67ef) &&
+ ((adev->pdev->revision == 0xe0) ||
+- (adev->pdev->revision == 0xe2) ||
+ (adev->pdev->revision == 0xe5))) ||
+ ((adev->pdev->device == 0x67ff) &&
+ ((adev->pdev->revision == 0xcf) ||
+@@ -359,8 +360,13 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
+ (adev->pdev->revision == 0xff)))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
+- } else
++ } else if ((adev->pdev->device == 0x67ef) &&
++ (adev->pdev->revision == 0xe2)) {
++ info->is_kicker = true;
++ strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
++ } else {
+ strcpy(fw_name, "amdgpu/polaris11_smc.bin");
++ }
+ } else if (type == CGS_UCODE_ID_SMU_SK) {
+ strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
+ }
+@@ -378,14 +384,31 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
+ (adev->pdev->revision == 0xef))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
+- } else
++ } else if ((adev->pdev->device == 0x67df) &&
++ ((adev->pdev->revision == 0xe1) ||
++ (adev->pdev->revision == 0xf7))) {
++ info->is_kicker = true;
++ strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
++ } else {
+ strcpy(fw_name, "amdgpu/polaris10_smc.bin");
++ }
+ } else if (type == CGS_UCODE_ID_SMU_SK) {
+ strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
+ }
+ break;
+ case CHIP_POLARIS12:
+- strcpy(fw_name, "amdgpu/polaris12_smc.bin");
++ if (((adev->pdev->device == 0x6987) &&
++ ((adev->pdev->revision == 0xc0) ||
++ (adev->pdev->revision == 0xc3))) ||
++ ((adev->pdev->device == 0x6981) &&
++ ((adev->pdev->revision == 0x00) ||
++ (adev->pdev->revision == 0x01) ||
++ (adev->pdev->revision == 0x10)))) {
++ info->is_kicker = true;
++ strcpy(fw_name, "amdgpu/polaris12_k_smc.bin");
++ } else {
++ strcpy(fw_name, "amdgpu/polaris12_smc.bin");
++ }
+ break;
+ case CHIP_VEGAM:
+ strcpy(fw_name, "amdgpu/vegam_smc.bin");
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+index 99d5e4f..a6edd5d 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+@@ -37,10 +37,13 @@ MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
+ MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin");
++MODULE_FIRMWARE("amdgpu/polaris10_k2_smc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
+ MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
++MODULE_FIRMWARE("amdgpu/polaris11_k2_smc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
++MODULE_FIRMWARE("amdgpu/polaris12_k_smc.bin");
+ MODULE_FIRMWARE("amdgpu/vegam_smc.bin");
+ MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
+ MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5604-drm-amd-display-Raise-dispclk-value-for-Polaris.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5604-drm-amd-display-Raise-dispclk-value-for-Polaris.patch
new file mode 100644
index 00000000..5e2ecd04
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5604-drm-amd-display-Raise-dispclk-value-for-Polaris.patch
@@ -0,0 +1,47 @@
+From 74b4b9a2b34d3f34b509427e16c4ede8ea5f22e2 Mon Sep 17 00:00:00 2001
+From: Roman Li <Roman.Li@amd.com>
+Date: Tue, 20 Nov 2018 16:50:29 -0500
+Subject: [PATCH 5604/5725] drm/amd/display: Raise dispclk value for Polaris
+
+[Why]
+The visual corruption due to low display clock value.
+Observed on RHEL7.6/Polaris at 2K@120Hz.
+
+[How]
+There was earlier patch for dspclk:
+'drm/amd/display: Raise dispclk value for dce_update_clocks'
+Adding +15% workaround also to to dce112_update_clocks
+
+Signed-off-by: Roman Li <Roman.Li@amd.com>
+Reviewed-by: Nicholas Kazlauskas <Nicholas.Kazlauskas@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+index 02ddc94..f1e71a8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+@@ -668,6 +668,10 @@ static void dce112_update_clocks(struct clk_mgr *clk_mgr,
+ {
+ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+ struct dm_pp_power_level_change_request level_change_req;
++ int unpatched_disp_clk = context->bw.dce.dispclk_khz;
++
++ if (!clk_mgr_dce->dfs_bypass_active)
++ context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+
+ level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
+ /* get max clock state from PPLIB */
+@@ -682,6 +686,8 @@ static void dce112_update_clocks(struct clk_mgr *clk_mgr,
+ clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+ }
+ dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
++
++ context->bw.dce.dispclk_khz = unpatched_disp_clk;
+ }
+
+ static void dce12_update_clocks(struct clk_mgr *clk_mgr,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5605-drm-amdgpu-update-mc-firmware-image-for-polaris12-va.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5605-drm-amdgpu-update-mc-firmware-image-for-polaris12-va.patch
new file mode 100644
index 00000000..75a43015
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5605-drm-amdgpu-update-mc-firmware-image-for-polaris12-va.patch
@@ -0,0 +1,45 @@
+From 3e2cc1223ec77ddf2c061bc6dbc1be52c1ddd87c Mon Sep 17 00:00:00 2001
+From: Junwei Zhang <Jerry.Zhang@amd.com>
+Date: Thu, 22 Nov 2018 17:53:00 +0800
+Subject: [PATCH 5605/5725] drm/amdgpu: update mc firmware image for polaris12
+ variants
+
+Some new variants require updated firmware.
+
+Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index 98eead7..7fbede6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -55,6 +55,7 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
++MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
+
+ static const u32 golden_settings_tonga_a11[] =
+ {
+@@ -230,6 +231,15 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
+ break;
+ case CHIP_POLARIS12:
+ chip_name = "polaris12";
++ if (((adev->pdev->device == 0x6987) &&
++ ((adev->pdev->revision == 0xc0) ||
++ (adev->pdev->revision == 0xc3))) ||
++ ((adev->pdev->device == 0x6981) &&
++ ((adev->pdev->revision == 0x00) ||
++ (adev->pdev->revision == 0x01) ||
++ (adev->pdev->revision == 0x10)))) {
++ chip_name = "polaris12_k";
++ }
+ break;
+ case CHIP_FIJI:
+ case CHIP_CARRIZO:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5606-drm-amd-display-Fix-6x4K-displays-light-up-on-Vega20.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5606-drm-amd-display-Fix-6x4K-displays-light-up-on-Vega20.patch
new file mode 100644
index 00000000..c2107cb1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5606-drm-amd-display-Fix-6x4K-displays-light-up-on-Vega20.patch
@@ -0,0 +1,39 @@
+From bd03930cf94ffab0bcf85092dbfcabbfe233a034 Mon Sep 17 00:00:00 2001
+From: Roman Li <Roman.Li@amd.com>
+Date: Tue, 27 Nov 2018 17:16:37 -0500
+Subject: [PATCH 5606/5725] drm/amd/display: Fix 6x4K displays light-up on
+ Vega20
+
+[Why]
+More than 4x4K didn't lightup on Vega20 due to low dcfclk value.
+Powerplay expects valid min requirement for dcfclk from DC.
+
+[How]
+Update min_dcfclock_khz based on min_engine_clock value.
+
+Change-Id: I123f5f98cb02fc8cb5e3c9ea619efc8aa5aa4463
+Reviewed-by: Hersen Wu <hersenxs.wu@amd.com>
+Reviewed-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Roman Li <Roman.Li@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+index f1e71a8..493e2f4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+@@ -585,6 +585,8 @@ static void dce11_pplib_apply_display_requirements(
+ dc,
+ context->bw.dce.sclk_khz);
+
++ pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz;
++
+ pp_display_cfg->min_engine_clock_deep_sleep_khz
+ = context->bw.dce.sclk_deep_sleep_khz;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5607-drm-amdgpu-gmc8-update-MC-firmware-for-polaris.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5607-drm-amdgpu-gmc8-update-MC-firmware-for-polaris.patch
new file mode 100644
index 00000000..0a083126
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5607-drm-amdgpu-gmc8-update-MC-firmware-for-polaris.patch
@@ -0,0 +1,74 @@
+From a309caf9fef6647f29093e6ac557eef539cf9a5b Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed, 28 Nov 2018 23:25:41 -0500
+Subject: [PATCH 5607/5725] drm/amdgpu/gmc8: update MC firmware for polaris
+
+Some variants require different MC firmware images.
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 29 ++++++++++++++++++++++++-----
+ 1 file changed, 24 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index 7fbede6..e729f38 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -55,6 +55,8 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
++MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
++MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
+
+ static const u32 golden_settings_tonga_a11[] =
+@@ -224,22 +226,39 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
+ chip_name = "tonga";
+ break;
+ case CHIP_POLARIS11:
+- chip_name = "polaris11";
++ if (((adev->pdev->device == 0x67ef) &&
++ ((adev->pdev->revision == 0xe0) ||
++ (adev->pdev->revision == 0xe5))) ||
++ ((adev->pdev->device == 0x67ff) &&
++ ((adev->pdev->revision == 0xcf) ||
++ (adev->pdev->revision == 0xef) ||
++ (adev->pdev->revision == 0xff))))
++ chip_name = "polaris11_k";
++ else if ((adev->pdev->device == 0x67ef) &&
++ (adev->pdev->revision == 0xe2))
++ chip_name = "polaris11_k";
++ else
++ chip_name = "polaris11";
+ break;
+ case CHIP_POLARIS10:
+- chip_name = "polaris10";
++ if ((adev->pdev->device == 0x67df) &&
++ ((adev->pdev->revision == 0xe1) ||
++ (adev->pdev->revision == 0xf7)))
++ chip_name = "polaris10_k";
++ else
++ chip_name = "polaris10";
+ break;
+ case CHIP_POLARIS12:
+- chip_name = "polaris12";
+ if (((adev->pdev->device == 0x6987) &&
+ ((adev->pdev->revision == 0xc0) ||
+ (adev->pdev->revision == 0xc3))) ||
+ ((adev->pdev->device == 0x6981) &&
+ ((adev->pdev->revision == 0x00) ||
+ (adev->pdev->revision == 0x01) ||
+- (adev->pdev->revision == 0x10)))) {
++ (adev->pdev->revision == 0x10))))
+ chip_name = "polaris12_k";
+- }
++ else
++ chip_name = "polaris12";
+ break;
+ case CHIP_FIJI:
+ case CHIP_CARRIZO:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5608-drm-amdgpu-gmc8-always-load-MC-firmware-in-the-drive.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5608-drm-amdgpu-gmc8-always-load-MC-firmware-in-the-drive.patch
new file mode 100644
index 00000000..e461abec
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5608-drm-amdgpu-gmc8-always-load-MC-firmware-in-the-drive.patch
@@ -0,0 +1,46 @@
+From b7a5a8408361ba9967e10e227a4d007b5926a9e4 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed, 28 Nov 2018 23:28:17 -0500
+Subject: [PATCH 5608/5725] drm/amdgpu/gmc8: always load MC firmware in the
+ driver
+
+Some power features rely on the driver loaded version so always
+load the MC firmware from the driver even if the vbios loaded
+a version already.
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 9 +--------
+ 1 file changed, 1 insertion(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index e729f38..9c1b2d3a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -365,7 +365,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
+ const struct mc_firmware_header_v1_0 *hdr;
+ const __le32 *fw_data = NULL;
+ const __le32 *io_mc_regs = NULL;
+- u32 data, vbios_version;
++ u32 data;
+ int i, ucode_size, regs_size;
+
+ /* Skip MC ucode loading on SR-IOV capable boards.
+@@ -376,13 +376,6 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
+ if (amdgpu_sriov_bios(adev))
+ return 0;
+
+- WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
+- data = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
+- vbios_version = data & 0xf;
+-
+- if (vbios_version == 0)
+- return 0;
+-
+ if (!adev->gmc.fw)
+ return -EINVAL;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5609-drm-amdgpu-both-support-PCO-FP5-AM4-rlc-fw.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5609-drm-amdgpu-both-support-PCO-FP5-AM4-rlc-fw.patch
new file mode 100644
index 00000000..81559706
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5609-drm-amdgpu-both-support-PCO-FP5-AM4-rlc-fw.patch
@@ -0,0 +1,58 @@
+From 034fa5b0c714971b86a6be60c0ab4b91b5165335 Mon Sep 17 00:00:00 2001
+From: Aaron Liu <aaron.liu@amd.com>
+Date: Wed, 5 Dec 2018 11:07:55 +0800
+Subject: [PATCH 5609/5725] drm/amdgpu: both support PCO FP5/AM4 rlc fw
+
+For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
+For Picasso && FP5 SOCKET board, we use picasso_rlc.bin
+
+Judgment method:
+PCO AM4: revision >= 0xC8 && revision <= 0xCF
+ or revision >= 0xD8 && revision <= 0xDF
+otherwise is PCO FP5
+
+Change-Id: I359f0a3d1bc7d4d49c871cb3fb82797c7b91b259
+Signed-off-by: Aaron Liu <aaron.liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher at amd.com>
+Reviewed-by: Huang Rui <ray.huang at amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 16 +++++++++++++++-
+ 1 file changed, 15 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 7acf02d..0481e21 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -86,6 +86,7 @@ MODULE_FIRMWARE("amdgpu/picasso_me.bin");
+ MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
+ MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
+ MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
++MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
+
+ MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
+ MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
+@@ -660,7 +661,20 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
+ adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
++ /*
++ * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
++ * instead of picasso_rlc.bin.
++ * Judgment method:
++ * PCO AM4: revision >= 0xC8 && revision <= 0xCF
++ * or revision >= 0xD8 && revision <= 0xDF
++ * otherwise is PCO FP5
++ */
++ if (!strcmp(chip_name, "picasso") &&
++ (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
++ ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
++ else
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
+ err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5610-drm-amdgpu-update-SMC-firmware-image-for-polaris10-v.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5610-drm-amdgpu-update-SMC-firmware-image-for-polaris10-v.patch
new file mode 100644
index 00000000..79b710b5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5610-drm-amdgpu-update-SMC-firmware-image-for-polaris10-v.patch
@@ -0,0 +1,31 @@
+From e07f346525aeefa91f9e9511f6bf729a0e862da1 Mon Sep 17 00:00:00 2001
+From: Junwei Zhang <Jerry.Zhang@amd.com>
+Date: Fri, 7 Dec 2018 15:15:03 +0800
+Subject: [PATCH 5610/5725] drm/amdgpu: update SMC firmware image for polaris10
+ variants
+
+Some new variants require different firmwares.
+
+Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+index ceadeea..387f1cf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+@@ -381,7 +381,8 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
+ (adev->pdev->revision == 0xe7) ||
+ (adev->pdev->revision == 0xef))) ||
+ ((adev->pdev->device == 0x6fdf) &&
+- (adev->pdev->revision == 0xef))) {
++ ((adev->pdev->revision == 0xef) ||
++ (adev->pdev->revision == 0xff)))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
+ } else if ((adev->pdev->device == 0x67df) &&
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5611-drm-amdgpu-powerplay-fix-mclk-switch-limit-on-polari.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5611-drm-amdgpu-powerplay-fix-mclk-switch-limit-on-polari.patch
new file mode 100644
index 00000000..a7ef68f6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5611-drm-amdgpu-powerplay-fix-mclk-switch-limit-on-polari.patch
@@ -0,0 +1,34 @@
+From 11258361667f6b968b2ba4708bc0d54bf8063bf7 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 29 Nov 2018 19:20:28 -0500
+Subject: [PATCH 5611/5725] drm/amdgpu/powerplay: fix mclk switch limit on
+ polaris
+
+Update switch limit on newer polaris variants. This may fix
+flickering with high refresh rates with mclk switching enabled.
+
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index ff84a03..0364018 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -2859,7 +2859,10 @@ static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+- switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
++ if (hwmgr->is_kicker)
++ switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
++ else
++ switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
+ break;
+ case CHIP_VEGAM:
+ switch_limit_us = 30;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5612-drm-amdgpu-powerplay-fix-clock-stretcher-limits-on-p.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5612-drm-amdgpu-powerplay-fix-clock-stretcher-limits-on-p.patch
new file mode 100644
index 00000000..2a9fa6b7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5612-drm-amdgpu-powerplay-fix-clock-stretcher-limits-on-p.patch
@@ -0,0 +1,47 @@
+From 9364c81c25cf72d25fbe2d0281e12ba4b8f76dfc Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 29 Nov 2018 19:22:07 -0500
+Subject: [PATCH 5612/5725] drm/amdgpu/powerplay: fix clock stretcher limits on
+ polaris (v2)
+
+Adjust limits for newer polaris variants.
+
+v2: fix polaris11 kicker (Jerry)
+
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c | 17 +++++++++++++++--
+ 1 file changed, 15 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+index 872d382..51892cc 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+@@ -1529,8 +1529,21 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+ efuse = efuse >> 24;
+
+ if (hwmgr->chip_id == CHIP_POLARIS10) {
+- min = 1000;
+- max = 2300;
++ if (hwmgr->is_kicker) {
++ min = 1200;
++ max = 2500;
++ } else {
++ min = 1000;
++ max = 2300;
++ }
++ } else if (hwmgr->chip_id == CHIP_POLARIS11) {
++ if (hwmgr->is_kicker) {
++ min = 900;
++ max = 2100;
++ } else {
++ min = 1100;
++ max = 2100;
++ }
+ } else {
+ min = 1100;
+ max = 2100;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5613-drm-amdgpu-powerplay-Apply-avfs-cks-off-voltages-on-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5613-drm-amdgpu-powerplay-Apply-avfs-cks-off-voltages-on-.patch
new file mode 100644
index 00000000..314c9b67
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5613-drm-amdgpu-powerplay-Apply-avfs-cks-off-voltages-on-.patch
@@ -0,0 +1,49 @@
+From e991787e85a908ca0139bdf202b6174584c08272 Mon Sep 17 00:00:00 2001
+From: Kenneth Feng <kenneth.feng@amd.com>
+Date: Thu, 6 Dec 2018 11:56:14 +0800
+Subject: [PATCH 5613/5725] drm/amdgpu/powerplay: Apply avfs cks-off voltages
+ on VI
+
+Instead of EVV cks-off voltages, avfs cks-off voltages can avoid
+the overshoot voltages when switching sclk.
+
+Signed-off-by: Kenneth Feng <kenneth.feng@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h | 2 ++
+ drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c | 6 ++++++
+ 2 files changed, 8 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
+index 62f36ba..c1a99df 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
+@@ -386,6 +386,8 @@ typedef uint16_t PPSMC_Result;
+ #define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403)
+ #define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404)
+
++#define PPSMC_MSG_ApplyAvfsCksOffVoltage ((uint16_t) 0x415)
++
+ #define PPSMC_MSG_GFX_CU_PG_ENABLE ((uint16_t) 0x280)
+ #define PPSMC_MSG_GFX_CU_PG_DISABLE ((uint16_t) 0x281)
+ #define PPSMC_MSG_GetCurrPkgPwr ((uint16_t) 0x282)
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+index 51892cc..90c1215 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+@@ -1998,6 +1998,12 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+
++ /* Apply avfs cks-off voltages to avoid the overshoot
++ * when switching to the highest sclk frequency
++ */
++ if (data->apply_avfs_cks_off_voltage)
++ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
++
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5614-drm-amdgpu-revert-the-commit-interim-disable-RV2-GFX.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5614-drm-amdgpu-revert-the-commit-interim-disable-RV2-GFX.patch
new file mode 100644
index 00000000..cbed6f2f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5614-drm-amdgpu-revert-the-commit-interim-disable-RV2-GFX.patch
@@ -0,0 +1,32 @@
+From 250b27fcfedb6c5cb2dd677bf38269a57e3c81ae Mon Sep 17 00:00:00 2001
+From: Raveendra Talabattula <raveendra.talabattula@amd.com>
+Date: Fri, 4 Jan 2019 12:39:53 +0530
+Subject: [PATCH 5614/5725] drm/amdgpu: revert the commit interim disable RV2
+ GFX CG flag
+
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 5614c2b..a741913 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -705,9 +705,12 @@ static int soc15_common_early_init(void *handle)
+ adev->external_rev_id = 0x1;
+
+ if (adev->rev_id >= 0x8) {
+- adev->cg_flags = AMD_CG_SUPPORT_GFX_MGLS |
++ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
++ AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
++ AMD_CG_SUPPORT_GFX_3D_CGCG |
+ AMD_CG_SUPPORT_GFX_3D_CGLS |
++ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_HDP_LS |
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5615-drm-amdgpu-separate-amdgpu_rlc-into-a-single-file.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5615-drm-amdgpu-separate-amdgpu_rlc-into-a-single-file.patch
new file mode 100644
index 00000000..431ba4f8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5615-drm-amdgpu-separate-amdgpu_rlc-into-a-single-file.patch
@@ -0,0 +1,463 @@
+From e0b15fefb0d56698110950ac61cef6fe24eb056c Mon Sep 17 00:00:00 2001
+From: Likun Gao <Likun.Gao@amd.com>
+Date: Thu, 8 Nov 2018 13:43:46 +0800
+Subject: [PATCH 5615/5725] drm/amdgpu: separate amdgpu_rlc into a single file
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Separate the function and struct of RLC from the file of GFX.
+Abstract the function of amdgpu_gfx_rlc_fini.
+
+Signed-off-by: Likun Gao <Likun.Gao@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/Makefile | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 54 +-------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c | 57 +++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h | 89 +++++++++++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 13 ++---
+ drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 15 ++----
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 10 +---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 17 +------
+ 9 files changed, 160 insertions(+), 97 deletions(-)
+ create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+ create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
+index 3311402..5edff50 100644
+--- a/drivers/gpu/drm/amd/amdgpu/Makefile
++++ b/drivers/gpu/drm/amd/amdgpu/Makefile
+@@ -104,6 +104,7 @@ amdgpu-y += \
+ # add GFX block
+ amdgpu-y += \
+ amdgpu_gfx.o \
++ amdgpu_rlc.o \
+ gfx_v8_0.o \
+ gfx_v9_0.o
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index a750242..54ee584 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -25,6 +25,7 @@
+ #include <drm/drmP.h>
+ #include "amdgpu.h"
+ #include "amdgpu_gfx.h"
++#include "amdgpu_rlc.h"
+
+ /* delay 0.1 second to enable gfx off feature */
+ #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+index b61b5c1..f790e15 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+@@ -29,6 +29,7 @@
+ */
+ #include "clearstate_defs.h"
+ #include "amdgpu_ring.h"
++#include "amdgpu_rlc.h"
+
+ /* GFX current status */
+ #define AMDGPU_GFX_NORMAL_MODE 0x00000000L
+@@ -37,59 +38,6 @@
+ #define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L
+ #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
+
+-
+-struct amdgpu_rlc_funcs {
+- void (*enter_safe_mode)(struct amdgpu_device *adev);
+- void (*exit_safe_mode)(struct amdgpu_device *adev);
+-};
+-
+-struct amdgpu_rlc {
+- /* for power gating */
+- struct amdgpu_bo *save_restore_obj;
+- uint64_t save_restore_gpu_addr;
+- volatile uint32_t *sr_ptr;
+- const u32 *reg_list;
+- u32 reg_list_size;
+- /* for clear state */
+- struct amdgpu_bo *clear_state_obj;
+- uint64_t clear_state_gpu_addr;
+- volatile uint32_t *cs_ptr;
+- const struct cs_section_def *cs_data;
+- u32 clear_state_size;
+- /* for cp tables */
+- struct amdgpu_bo *cp_table_obj;
+- uint64_t cp_table_gpu_addr;
+- volatile uint32_t *cp_table_ptr;
+- u32 cp_table_size;
+-
+- /* safe mode for updating CG/PG state */
+- bool in_safe_mode;
+- const struct amdgpu_rlc_funcs *funcs;
+-
+- /* for firmware data */
+- u32 save_and_restore_offset;
+- u32 clear_state_descriptor_offset;
+- u32 avail_scratch_ram_locations;
+- u32 reg_restore_list_size;
+- u32 reg_list_format_start;
+- u32 reg_list_format_separate_start;
+- u32 starting_offsets_start;
+- u32 reg_list_format_size_bytes;
+- u32 reg_list_size_bytes;
+- u32 reg_list_format_direct_reg_list_length;
+- u32 save_restore_list_cntl_size_bytes;
+- u32 save_restore_list_gpm_size_bytes;
+- u32 save_restore_list_srm_size_bytes;
+-
+- u32 *register_list_format;
+- u32 *register_restore;
+- u8 *save_restore_list_cntl;
+- u8 *save_restore_list_gpm;
+- u8 *save_restore_list_srm;
+-
+- bool is_rlc_v2_1;
+-};
+-
+ #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
+
+ struct amdgpu_mec {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+new file mode 100644
+index 0000000..c5459ab
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+@@ -0,0 +1,57 @@
++
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ * Copyright 2008 Red Hat Inc.
++ * Copyright 2009 Jerome Glisse.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "amdgpu.h"
++#include "amdgpu_gfx.h"
++#include "amdgpu_rlc.h"
++
++/**
++ * amdgpu_gfx_rlc_fini - Free BO which used for RLC
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Free three BO which is used for rlc_save_restore_block, rlc_clear_state_block
++ * and rlc_jump_table_block.
++ */
++void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev)
++{
++ /* save restore block */
++ if (adev->gfx.rlc.save_restore_obj) {
++ amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj,
++ &adev->gfx.rlc.save_restore_gpu_addr,
++ (void **)&adev->gfx.rlc.sr_ptr);
++ }
++
++ /* clear state block */
++ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
++ &adev->gfx.rlc.clear_state_gpu_addr,
++ (void **)&adev->gfx.rlc.cs_ptr);
++
++ /* jump table block */
++ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
++ &adev->gfx.rlc.cp_table_gpu_addr,
++ (void **)&adev->gfx.rlc.cp_table_ptr);
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+new file mode 100644
+index 0000000..b3b0920
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+@@ -0,0 +1,89 @@
++
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __AMDGPU_RLC_H__
++#define __AMDGPU_RLC_H__
++
++#include "clearstate_defs.h"
++
++struct amdgpu_rlc_funcs {
++ void (*enter_safe_mode)(struct amdgpu_device *adev);
++ void (*exit_safe_mode)(struct amdgpu_device *adev);
++ int (*init)(struct amdgpu_device *adev);
++ int (*resume)(struct amdgpu_device *adev);
++ void (*stop)(struct amdgpu_device *adev);
++ void (*reset)(struct amdgpu_device *adev);
++ void (*start)(struct amdgpu_device *adev);
++};
++
++struct amdgpu_rlc {
++ /* for power gating */
++ struct amdgpu_bo *save_restore_obj;
++ uint64_t save_restore_gpu_addr;
++ volatile uint32_t *sr_ptr;
++ const u32 *reg_list;
++ u32 reg_list_size;
++ /* for clear state */
++ struct amdgpu_bo *clear_state_obj;
++ uint64_t clear_state_gpu_addr;
++ volatile uint32_t *cs_ptr;
++ const struct cs_section_def *cs_data;
++ u32 clear_state_size;
++ /* for cp tables */
++ struct amdgpu_bo *cp_table_obj;
++ uint64_t cp_table_gpu_addr;
++ volatile uint32_t *cp_table_ptr;
++ u32 cp_table_size;
++
++ /* safe mode for updating CG/PG state */
++ bool in_safe_mode;
++ const struct amdgpu_rlc_funcs *funcs;
++
++ /* for firmware data */
++ u32 save_and_restore_offset;
++ u32 clear_state_descriptor_offset;
++ u32 avail_scratch_ram_locations;
++ u32 reg_restore_list_size;
++ u32 reg_list_format_start;
++ u32 reg_list_format_separate_start;
++ u32 starting_offsets_start;
++ u32 reg_list_format_size_bytes;
++ u32 reg_list_size_bytes;
++ u32 reg_list_format_direct_reg_list_length;
++ u32 save_restore_list_cntl_size_bytes;
++ u32 save_restore_list_gpm_size_bytes;
++ u32 save_restore_list_srm_size_bytes;
++
++ u32 *register_list_format;
++ u32 *register_restore;
++ u8 *save_restore_list_cntl;
++ u8 *save_restore_list_gpm;
++ u8 *save_restore_list_srm;
++
++ bool is_rlc_v2_1;
++};
++
++void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+index 4f8d6a2..abc8ec6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+@@ -2397,13 +2397,6 @@ static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
+ amdgpu_ring_write(ring, val);
+ }
+
+-static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
+-{
+- amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
+- amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
+- amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
+-}
+-
+ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
+ {
+ const u32 *src_ptr;
+@@ -2432,7 +2425,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
+ if (r) {
+ dev_warn(adev->dev, "(%d) create RLC sr bo failed\n",
+ r);
+- gfx_v6_0_rlc_fini(adev);
++ amdgpu_gfx_rlc_fini(adev);
+ return r;
+ }
+
+@@ -2457,7 +2450,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
+- gfx_v6_0_rlc_fini(adev);
++ amdgpu_gfx_rlc_fini(adev);
+ return r;
+ }
+
+@@ -3194,7 +3187,7 @@ static int gfx_v6_0_sw_fini(void *handle)
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+ amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
+
+- gfx_v6_0_rlc_fini(adev);
++ amdgpu_gfx_rlc_fini(adev);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+index 05f7a29..19a0e4f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+@@ -3288,13 +3288,6 @@ static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
+ * The RLC is a multi-purpose microengine that handles a
+ * variety of functions.
+ */
+-static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
+-{
+- amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
+- amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
+- amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
+-}
+-
+ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
+ {
+ const u32 *src_ptr;
+@@ -3334,7 +3327,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
+ (void **)&adev->gfx.rlc.sr_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r);
+- gfx_v7_0_rlc_fini(adev);
++ amdgpu_gfx_rlc_fini(adev);
+ return r;
+ }
+
+@@ -3357,7 +3350,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
+- gfx_v7_0_rlc_fini(adev);
++ amdgpu_gfx_rlc_fini(adev);
+ return r;
+ }
+
+@@ -3377,7 +3370,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
+- gfx_v7_0_rlc_fini(adev);
++ amdgpu_gfx_rlc_fini(adev);
+ return r;
+ }
+
+@@ -4624,7 +4617,7 @@ static int gfx_v7_0_sw_fini(void *handle)
+ amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
+
+ gfx_v7_0_cp_compute_fini(adev);
+- gfx_v7_0_rlc_fini(adev);
++ amdgpu_gfx_rlc_fini(adev);
+ gfx_v7_0_mec_fini(adev);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index a0fc2c2..bdfa613 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -1363,12 +1363,6 @@ static void cz_init_cp_jump_table(struct amdgpu_device *adev)
+ }
+ }
+
+-static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
+-{
+- amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
+- amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
+-}
+-
+ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
+ {
+ volatile u32 *dst_ptr;
+@@ -1391,7 +1385,7 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
+- gfx_v8_0_rlc_fini(adev);
++ amdgpu_gfx_rlc_fini(adev);
+ return r;
+ }
+
+@@ -2180,7 +2174,7 @@ static int gfx_v8_0_sw_fini(void *handle)
+ amdgpu_gfx_kiq_fini(adev);
+
+ gfx_v8_0_mec_fini(adev);
+- gfx_v8_0_rlc_fini(adev);
++ amdgpu_gfx_rlc_fini(adev);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 0481e21..69fcc77 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1141,19 +1141,6 @@ static void rv_init_cp_jump_table(struct amdgpu_device *adev)
+ }
+ }
+
+-static void gfx_v9_0_rlc_fini(struct amdgpu_device *adev)
+-{
+- /* clear state block */
+- amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+- &adev->gfx.rlc.clear_state_gpu_addr,
+- (void **)&adev->gfx.rlc.cs_ptr);
+-
+- /* jump table block */
+- amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+- &adev->gfx.rlc.cp_table_gpu_addr,
+- (void **)&adev->gfx.rlc.cp_table_ptr);
+-}
+-
+ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
+ {
+ volatile u32 *dst_ptr;
+@@ -1176,7 +1163,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
+ r);
+- gfx_v9_0_rlc_fini(adev);
++ amdgpu_gfx_rlc_fini(adev);
+ return r;
+ }
+ /* set up the cs buffer */
+@@ -1198,7 +1185,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
+ if (r) {
+ dev_err(adev->dev,
+ "(%d) failed to create cp table bo\n", r);
+- gfx_v9_0_rlc_fini(adev);
++ amdgpu_gfx_rlc_fini(adev);
+ return r;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5616-drm-amdgpu-abstract-the-function-of-enter-exit-safe-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5616-drm-amdgpu-abstract-the-function-of-enter-exit-safe-.patch
new file mode 100644
index 00000000..f394779a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5616-drm-amdgpu-abstract-the-function-of-enter-exit-safe-.patch
@@ -0,0 +1,1456 @@
+From 989d6d01b64562ed579cc2ea1cadd01de0d99c35 Mon Sep 17 00:00:00 2001
+From: Likun Gao <Likun.Gao@amd.com>
+Date: Thu, 8 Nov 2018 20:19:54 +0800
+Subject: [PATCH 5616/5725] drm/amdgpu: abstract the function of enter/exit
+ safe mode for RLC
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Abstract the function of amdgpu_gfx_rlc_enter/exit_safe_mode and some part of
+rlc_init to improve the reusability of RLC.
+
+Signed-off-by: Likun Gao <Likun.Gao@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c | 229 ++++++++++++++++++++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h | 33 +--
+ drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 6 +-
+ drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 24 +--
+ drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 148 +++----------
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 201 ++++++------------
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 183 ++++------------
+ drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 6 +-
+ .../gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c | 12 +-
+ .../gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c | 36 ++--
+ 10 files changed, 408 insertions(+), 470 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+index c5459ab..c8793e6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+@@ -1,4 +1,3 @@
+-
+ /*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+@@ -23,12 +22,238 @@
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+-
++#include <linux/firmware.h>
+ #include "amdgpu.h"
+ #include "amdgpu_gfx.h"
+ #include "amdgpu_rlc.h"
+
+ /**
++ * amdgpu_gfx_rlc_enter_safe_mode - Set RLC into safe mode
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Set RLC enter into safe mode if RLC is enabled and haven't in safe mode.
++ */
++void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev)
++{
++ if (adev->gfx.rlc.in_safe_mode)
++ return;
++
++ /* if RLC is not enabled, do nothing */
++ if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
++ return;
++
++ if (adev->cg_flags &
++ (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
++ AMD_CG_SUPPORT_GFX_3D_CGCG)) {
++ adev->gfx.rlc.funcs->set_safe_mode(adev);
++ adev->gfx.rlc.in_safe_mode = true;
++ }
++}
++
++/**
++ * amdgpu_gfx_rlc_exit_safe_mode - Set RLC out of safe mode
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Set RLC exit safe mode if RLC is enabled and have entered into safe mode.
++ */
++void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev)
++{
++ if (!(adev->gfx.rlc.in_safe_mode))
++ return;
++
++ /* if RLC is not enabled, do nothing */
++ if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
++ return;
++
++ if (adev->cg_flags &
++ (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
++ AMD_CG_SUPPORT_GFX_3D_CGCG)) {
++ adev->gfx.rlc.funcs->unset_safe_mode(adev);
++ adev->gfx.rlc.in_safe_mode = false;
++ }
++}
++
++/**
++ * amdgpu_gfx_rlc_init_sr - Init save restore block
++ *
++ * @adev: amdgpu_device pointer
++ * @dws: the size of save restore block
++ *
++ * Allocate and setup value to save restore block of rlc.
++ * Returns 0 on succeess or negative error code if allocate failed.
++ */
++int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
++{
++ const u32 *src_ptr;
++ volatile u32 *dst_ptr;
++ u32 i;
++ int r;
++
++ /* allocate save restore block */
++ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ &adev->gfx.rlc.save_restore_obj,
++ &adev->gfx.rlc.save_restore_gpu_addr,
++ (void **)&adev->gfx.rlc.sr_ptr);
++ if (r) {
++ dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
++ amdgpu_gfx_rlc_fini(adev);
++ return r;
++ }
++
++ /* write the sr buffer */
++ src_ptr = adev->gfx.rlc.reg_list;
++ dst_ptr = adev->gfx.rlc.sr_ptr;
++ for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
++ dst_ptr[i] = cpu_to_le32(src_ptr[i]);
++ amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
++ amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
++
++ return 0;
++}
++
++/**
++ * amdgpu_gfx_rlc_init_csb - Init clear state block
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Allocate and setup value to clear state block of rlc.
++ * Returns 0 on succeess or negative error code if allocate failed.
++ */
++int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
++{
++ volatile u32 *dst_ptr;
++ u32 dws;
++ int r;
++
++ /* allocate clear state block */
++ adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev);
++ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ &adev->gfx.rlc.clear_state_obj,
++ &adev->gfx.rlc.clear_state_gpu_addr,
++ (void **)&adev->gfx.rlc.cs_ptr);
++ if (r) {
++ dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", r);
++ amdgpu_gfx_rlc_fini(adev);
++ return r;
++ }
++
++ /* set up the cs buffer */
++ dst_ptr = adev->gfx.rlc.cs_ptr;
++ adev->gfx.rlc.funcs->get_csb_buffer(adev, dst_ptr);
++ amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
++ amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
++ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
++
++ return 0;
++}
++
++/**
++ * amdgpu_gfx_rlc_init_cpt - Init cp table
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Allocate and setup value to cp table of rlc.
++ * Returns 0 on succeess or negative error code if allocate failed.
++ */
++int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
++{
++ int r;
++
++ r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
++ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
++ &adev->gfx.rlc.cp_table_obj,
++ &adev->gfx.rlc.cp_table_gpu_addr,
++ (void **)&adev->gfx.rlc.cp_table_ptr);
++ if (r) {
++ dev_err(adev->dev, "(%d) failed to create cp table bo\n", r);
++ amdgpu_gfx_rlc_fini(adev);
++ return r;
++ }
++
++ /* set up the cp table */
++ amdgpu_gfx_rlc_setup_cp_table(adev);
++ amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
++ amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
++
++ return 0;
++}
++
++/**
++ * amdgpu_gfx_rlc_setup_cp_table - setup cp the buffer of cp table
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Write cp firmware data into cp table.
++ */
++void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
++{
++ const __le32 *fw_data;
++ volatile u32 *dst_ptr;
++ int me, i, max_me;
++ u32 bo_offset = 0;
++ u32 table_offset, table_size;
++
++ max_me = adev->gfx.rlc.funcs->get_cp_table_num(adev);
++
++ /* write the cp table buffer */
++ dst_ptr = adev->gfx.rlc.cp_table_ptr;
++ for (me = 0; me < max_me; me++) {
++ if (me == 0) {
++ const struct gfx_firmware_header_v1_0 *hdr =
++ (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
++ fw_data = (const __le32 *)
++ (adev->gfx.ce_fw->data +
++ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
++ table_offset = le32_to_cpu(hdr->jt_offset);
++ table_size = le32_to_cpu(hdr->jt_size);
++ } else if (me == 1) {
++ const struct gfx_firmware_header_v1_0 *hdr =
++ (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
++ fw_data = (const __le32 *)
++ (adev->gfx.pfp_fw->data +
++ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
++ table_offset = le32_to_cpu(hdr->jt_offset);
++ table_size = le32_to_cpu(hdr->jt_size);
++ } else if (me == 2) {
++ const struct gfx_firmware_header_v1_0 *hdr =
++ (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
++ fw_data = (const __le32 *)
++ (adev->gfx.me_fw->data +
++ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
++ table_offset = le32_to_cpu(hdr->jt_offset);
++ table_size = le32_to_cpu(hdr->jt_size);
++ } else if (me == 3) {
++ const struct gfx_firmware_header_v1_0 *hdr =
++ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
++ fw_data = (const __le32 *)
++ (adev->gfx.mec_fw->data +
++ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
++ table_offset = le32_to_cpu(hdr->jt_offset);
++ table_size = le32_to_cpu(hdr->jt_size);
++ } else if (me == 4) {
++ const struct gfx_firmware_header_v1_0 *hdr =
++ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
++ fw_data = (const __le32 *)
++ (adev->gfx.mec2_fw->data +
++ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
++ table_offset = le32_to_cpu(hdr->jt_offset);
++ table_size = le32_to_cpu(hdr->jt_size);
++ }
++
++ for (i = 0; i < table_size; i ++) {
++ dst_ptr[bo_offset + i] =
++ cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
++ }
++
++ bo_offset += table_size;
++ }
++}
++
++/**
+ * amdgpu_gfx_rlc_fini - Free BO which used for RLC
+ *
+ * @adev: amdgpu_device pointer
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+index b3b0920..49a8ab5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+@@ -1,4 +1,3 @@
+-
+ /*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+@@ -28,9 +27,13 @@
+ #include "clearstate_defs.h"
+
+ struct amdgpu_rlc_funcs {
+- void (*enter_safe_mode)(struct amdgpu_device *adev);
+- void (*exit_safe_mode)(struct amdgpu_device *adev);
++ bool (*is_rlc_enabled)(struct amdgpu_device *adev);
++ void (*set_safe_mode)(struct amdgpu_device *adev);
++ void (*unset_safe_mode)(struct amdgpu_device *adev);
+ int (*init)(struct amdgpu_device *adev);
++ u32 (*get_csb_size)(struct amdgpu_device *adev);
++ void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer);
++ int (*get_cp_table_num)(struct amdgpu_device *adev);
+ int (*resume)(struct amdgpu_device *adev);
+ void (*stop)(struct amdgpu_device *adev);
+ void (*reset)(struct amdgpu_device *adev);
+@@ -39,21 +42,21 @@ struct amdgpu_rlc_funcs {
+
+ struct amdgpu_rlc {
+ /* for power gating */
+- struct amdgpu_bo *save_restore_obj;
+- uint64_t save_restore_gpu_addr;
+- volatile uint32_t *sr_ptr;
++ struct amdgpu_bo *save_restore_obj;
++ uint64_t save_restore_gpu_addr;
++ volatile uint32_t *sr_ptr;
+ const u32 *reg_list;
+ u32 reg_list_size;
+ /* for clear state */
+- struct amdgpu_bo *clear_state_obj;
+- uint64_t clear_state_gpu_addr;
+- volatile uint32_t *cs_ptr;
++ struct amdgpu_bo *clear_state_obj;
++ uint64_t clear_state_gpu_addr;
++ volatile uint32_t *cs_ptr;
+ const struct cs_section_def *cs_data;
+ u32 clear_state_size;
+ /* for cp tables */
+- struct amdgpu_bo *cp_table_obj;
+- uint64_t cp_table_gpu_addr;
+- volatile uint32_t *cp_table_ptr;
++ struct amdgpu_bo *cp_table_obj;
++ uint64_t cp_table_gpu_addr;
++ volatile uint32_t *cp_table_ptr;
+ u32 cp_table_size;
+
+ /* safe mode for updating CG/PG state */
+@@ -84,6 +87,12 @@ struct amdgpu_rlc {
+ bool is_rlc_v2_1;
+ };
+
++void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev);
++void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev);
++int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws);
++int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev);
++int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev);
++void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev);
+ void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev);
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+index ecd69ab..e02631d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+@@ -743,19 +743,19 @@ static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
+
+ if (pi->caps_sq_ramping || pi->caps_db_ramping ||
+ pi->caps_td_ramping || pi->caps_tcp_ramping) {
+- adev->gfx.rlc.funcs->enter_safe_mode(adev);
++ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
+ if (enable) {
+ ret = ci_program_pt_config_registers(adev, didt_config_ci);
+ if (ret) {
+- adev->gfx.rlc.funcs->exit_safe_mode(adev);
++ amdgpu_gfx_rlc_exit_safe_mode(adev);
+ return ret;
+ }
+ }
+
+ ci_do_enable_didt(adev, enable);
+
+- adev->gfx.rlc.funcs->exit_safe_mode(adev);
++ amdgpu_gfx_rlc_exit_safe_mode(adev);
+ }
+
+ return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+index abc8ec6..075407e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+@@ -2401,7 +2401,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
+ {
+ const u32 *src_ptr;
+ volatile u32 *dst_ptr;
+- u32 dws, i;
++ u32 dws;
+ u64 reg_list_mc_addr;
+ const struct cs_section_def *cs_data;
+ int r;
+@@ -2416,26 +2416,10 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
+ cs_data = adev->gfx.rlc.cs_data;
+
+ if (src_ptr) {
+- /* save restore block */
+- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+- AMDGPU_GEM_DOMAIN_VRAM,
+- &adev->gfx.rlc.save_restore_obj,
+- &adev->gfx.rlc.save_restore_gpu_addr,
+- (void **)&adev->gfx.rlc.sr_ptr);
+- if (r) {
+- dev_warn(adev->dev, "(%d) create RLC sr bo failed\n",
+- r);
+- amdgpu_gfx_rlc_fini(adev);
++ /* init save restore block */
++ r = amdgpu_gfx_rlc_init_sr(adev, dws);
++ if (r)
+ return r;
+- }
+-
+- /* write the sr buffer */
+- dst_ptr = adev->gfx.rlc.sr_ptr;
+- for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
+- dst_ptr[i] = cpu_to_le32(src_ptr[i]);
+-
+- amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
+- amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+ }
+
+ if (cs_data) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+index 19a0e4f..6815153 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+@@ -882,7 +882,6 @@ static const u32 kalindi_rlc_save_restore_register_list[] =
+
+ static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
+ static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
+-static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev);
+ static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
+ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
+
+@@ -3291,8 +3290,7 @@ static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
+ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
+ {
+ const u32 *src_ptr;
+- volatile u32 *dst_ptr;
+- u32 dws, i;
++ u32 dws;
+ const struct cs_section_def *cs_data;
+ int r;
+
+@@ -3319,66 +3317,23 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
+ cs_data = adev->gfx.rlc.cs_data;
+
+ if (src_ptr) {
+- /* save restore block */
+- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+- AMDGPU_GEM_DOMAIN_VRAM,
+- &adev->gfx.rlc.save_restore_obj,
+- &adev->gfx.rlc.save_restore_gpu_addr,
+- (void **)&adev->gfx.rlc.sr_ptr);
+- if (r) {
+- dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r);
+- amdgpu_gfx_rlc_fini(adev);
++ /* init save restore block */
++ r = amdgpu_gfx_rlc_init_sr(adev, dws);
++ if (r)
+ return r;
+- }
+-
+- /* write the sr buffer */
+- dst_ptr = adev->gfx.rlc.sr_ptr;
+- for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
+- dst_ptr[i] = cpu_to_le32(src_ptr[i]);
+- amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
+- amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+ }
+
+ if (cs_data) {
+- /* clear state block */
+- adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev);
+-
+- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+- AMDGPU_GEM_DOMAIN_VRAM,
+- &adev->gfx.rlc.clear_state_obj,
+- &adev->gfx.rlc.clear_state_gpu_addr,
+- (void **)&adev->gfx.rlc.cs_ptr);
+- if (r) {
+- dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
+- amdgpu_gfx_rlc_fini(adev);
++ /* init clear state block */
++ r = amdgpu_gfx_rlc_init_csb(adev);
++ if (r)
+ return r;
+- }
+-
+- /* set up the cs buffer */
+- dst_ptr = adev->gfx.rlc.cs_ptr;
+- gfx_v7_0_get_csb_buffer(adev, dst_ptr);
+- amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+ }
+
+ if (adev->gfx.rlc.cp_table_size) {
+-
+- r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
+- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+- &adev->gfx.rlc.cp_table_obj,
+- &adev->gfx.rlc.cp_table_gpu_addr,
+- (void **)&adev->gfx.rlc.cp_table_ptr);
+- if (r) {
+- dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
+- amdgpu_gfx_rlc_fini(adev);
++ r = amdgpu_gfx_rlc_init_cpt(adev);
++ if (r)
+ return r;
+- }
+-
+- gfx_v7_0_init_cp_pg_table(adev);
+-
+- amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
+- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+-
+ }
+
+ return 0;
+@@ -3459,7 +3414,12 @@ static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
+ return orig;
+ }
+
+-static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
++static bool gfx_v7_0_is_rlc_enabled(struct amdgpu_device *adev)
++{
++ return true;
++}
++
++static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev)
+ {
+ u32 tmp, i, mask;
+
+@@ -3481,7 +3441,7 @@ static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
+ }
+ }
+
+-static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
++static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev)
+ {
+ u32 tmp;
+
+@@ -3797,72 +3757,12 @@ static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
+ WREG32(mmRLC_PG_CNTL, data);
+ }
+
+-static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev)
++static int gfx_v7_0_cp_pg_table_num(struct amdgpu_device *adev)
+ {
+- const __le32 *fw_data;
+- volatile u32 *dst_ptr;
+- int me, i, max_me = 4;
+- u32 bo_offset = 0;
+- u32 table_offset, table_size;
+-
+ if (adev->asic_type == CHIP_KAVERI)
+- max_me = 5;
+-
+- if (adev->gfx.rlc.cp_table_ptr == NULL)
+- return;
+-
+- /* write the cp table buffer */
+- dst_ptr = adev->gfx.rlc.cp_table_ptr;
+- for (me = 0; me < max_me; me++) {
+- if (me == 0) {
+- const struct gfx_firmware_header_v1_0 *hdr =
+- (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+- fw_data = (const __le32 *)
+- (adev->gfx.ce_fw->data +
+- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+- table_offset = le32_to_cpu(hdr->jt_offset);
+- table_size = le32_to_cpu(hdr->jt_size);
+- } else if (me == 1) {
+- const struct gfx_firmware_header_v1_0 *hdr =
+- (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+- fw_data = (const __le32 *)
+- (adev->gfx.pfp_fw->data +
+- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+- table_offset = le32_to_cpu(hdr->jt_offset);
+- table_size = le32_to_cpu(hdr->jt_size);
+- } else if (me == 2) {
+- const struct gfx_firmware_header_v1_0 *hdr =
+- (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+- fw_data = (const __le32 *)
+- (adev->gfx.me_fw->data +
+- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+- table_offset = le32_to_cpu(hdr->jt_offset);
+- table_size = le32_to_cpu(hdr->jt_size);
+- } else if (me == 3) {
+- const struct gfx_firmware_header_v1_0 *hdr =
+- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+- fw_data = (const __le32 *)
+- (adev->gfx.mec_fw->data +
+- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+- table_offset = le32_to_cpu(hdr->jt_offset);
+- table_size = le32_to_cpu(hdr->jt_size);
+- } else {
+- const struct gfx_firmware_header_v1_0 *hdr =
+- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+- fw_data = (const __le32 *)
+- (adev->gfx.mec2_fw->data +
+- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+- table_offset = le32_to_cpu(hdr->jt_offset);
+- table_size = le32_to_cpu(hdr->jt_size);
+- }
+-
+- for (i = 0; i < table_size; i ++) {
+- dst_ptr[bo_offset + i] =
+- cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+- }
+-
+- bo_offset += table_size;
+- }
++ return 5;
++ else
++ return 4;
+ }
+
+ static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
+@@ -4301,8 +4201,12 @@ static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
+ };
+
+ static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
+- .enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode,
+- .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode
++ .is_rlc_enabled = gfx_v7_0_is_rlc_enabled,
++ .set_safe_mode = gfx_v7_0_set_safe_mode,
++ .unset_safe_mode = gfx_v7_0_unset_safe_mode,
++ .get_csb_size = gfx_v7_0_get_csb_size,
++ .get_csb_buffer = gfx_v7_0_get_csb_buffer,
++ .get_cp_table_num = gfx_v7_0_cp_pg_table_num,
+ };
+
+ static int gfx_v7_0_early_init(void *handle)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index bdfa613..90cbf66 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -1298,75 +1298,16 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
+ buffer[count++] = cpu_to_le32(0);
+ }
+
+-static void cz_init_cp_jump_table(struct amdgpu_device *adev)
++static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev)
+ {
+- const __le32 *fw_data;
+- volatile u32 *dst_ptr;
+- int me, i, max_me = 4;
+- u32 bo_offset = 0;
+- u32 table_offset, table_size;
+-
+ if (adev->asic_type == CHIP_CARRIZO)
+- max_me = 5;
+-
+- /* write the cp table buffer */
+- dst_ptr = adev->gfx.rlc.cp_table_ptr;
+- for (me = 0; me < max_me; me++) {
+- if (me == 0) {
+- const struct gfx_firmware_header_v1_0 *hdr =
+- (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+- fw_data = (const __le32 *)
+- (adev->gfx.ce_fw->data +
+- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+- table_offset = le32_to_cpu(hdr->jt_offset);
+- table_size = le32_to_cpu(hdr->jt_size);
+- } else if (me == 1) {
+- const struct gfx_firmware_header_v1_0 *hdr =
+- (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+- fw_data = (const __le32 *)
+- (adev->gfx.pfp_fw->data +
+- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+- table_offset = le32_to_cpu(hdr->jt_offset);
+- table_size = le32_to_cpu(hdr->jt_size);
+- } else if (me == 2) {
+- const struct gfx_firmware_header_v1_0 *hdr =
+- (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+- fw_data = (const __le32 *)
+- (adev->gfx.me_fw->data +
+- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+- table_offset = le32_to_cpu(hdr->jt_offset);
+- table_size = le32_to_cpu(hdr->jt_size);
+- } else if (me == 3) {
+- const struct gfx_firmware_header_v1_0 *hdr =
+- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+- fw_data = (const __le32 *)
+- (adev->gfx.mec_fw->data +
+- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+- table_offset = le32_to_cpu(hdr->jt_offset);
+- table_size = le32_to_cpu(hdr->jt_size);
+- } else if (me == 4) {
+- const struct gfx_firmware_header_v1_0 *hdr =
+- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+- fw_data = (const __le32 *)
+- (adev->gfx.mec2_fw->data +
+- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+- table_offset = le32_to_cpu(hdr->jt_offset);
+- table_size = le32_to_cpu(hdr->jt_size);
+- }
+-
+- for (i = 0; i < table_size; i ++) {
+- dst_ptr[bo_offset + i] =
+- cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+- }
+-
+- bo_offset += table_size;
+- }
++ return 5;
++ else
++ return 4;
+ }
+
+ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
+ {
+- volatile u32 *dst_ptr;
+- u32 dws;
+ const struct cs_section_def *cs_data;
+ int r;
+
+@@ -1375,44 +1316,18 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
+ cs_data = adev->gfx.rlc.cs_data;
+
+ if (cs_data) {
+- /* clear state block */
+- adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev);
+-
+- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+- AMDGPU_GEM_DOMAIN_VRAM,
+- &adev->gfx.rlc.clear_state_obj,
+- &adev->gfx.rlc.clear_state_gpu_addr,
+- (void **)&adev->gfx.rlc.cs_ptr);
+- if (r) {
+- dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
+- amdgpu_gfx_rlc_fini(adev);
++ /* init clear state block */
++ r = amdgpu_gfx_rlc_init_csb(adev);
++ if (r)
+ return r;
+- }
+-
+- /* set up the cs buffer */
+- dst_ptr = adev->gfx.rlc.cs_ptr;
+- gfx_v8_0_get_csb_buffer(adev, dst_ptr);
+- amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+ }
+
+ if ((adev->asic_type == CHIP_CARRIZO) ||
+ (adev->asic_type == CHIP_STONEY)) {
+ adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
+- r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
+- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+- &adev->gfx.rlc.cp_table_obj,
+- &adev->gfx.rlc.cp_table_gpu_addr,
+- (void **)&adev->gfx.rlc.cp_table_ptr);
+- if (r) {
+- dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
++ r = amdgpu_gfx_rlc_init_cpt(adev);
++ if (r)
+ return r;
+- }
+-
+- cz_init_cp_jump_table(adev);
+-
+- amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
+- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+ }
+
+ return 0;
+@@ -4964,7 +4879,7 @@ static int gfx_v8_0_hw_fini(void *handle)
+ pr_debug("For SRIOV client, shouldn't do anything.\n");
+ return 0;
+ }
+- adev->gfx.rlc.funcs->enter_safe_mode(adev);
++ amdgpu_gfx_rlc_enter_safe_mode(adev);
+ if (!gfx_v8_0_wait_for_idle(adev))
+ gfx_v8_0_cp_enable(adev, false);
+ else
+@@ -4973,7 +4888,7 @@ static int gfx_v8_0_hw_fini(void *handle)
+ gfx_v8_0_rlc_stop(adev);
+ else
+ pr_err("rlc is busy, skip halt rlc\n");
+- adev->gfx.rlc.funcs->exit_safe_mode(adev);
++ amdgpu_gfx_rlc_exit_safe_mode(adev);
+ return 0;
+ }
+
+@@ -5436,7 +5351,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
+ AMD_PG_SUPPORT_RLC_SMU_HS |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GFX_DMG))
+- adev->gfx.rlc.funcs->enter_safe_mode(adev);
++ amdgpu_gfx_rlc_enter_safe_mode(adev);
+ switch (adev->asic_type) {
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+@@ -5490,7 +5405,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
+ AMD_PG_SUPPORT_RLC_SMU_HS |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GFX_DMG))
+- adev->gfx.rlc.funcs->exit_safe_mode(adev);
++ amdgpu_gfx_rlc_exit_safe_mode(adev);
+ return 0;
+ }
+
+@@ -5584,57 +5499,53 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
+ #define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
+ #define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
+
+-static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev)
++static bool gfx_v8_0_is_rlc_enabled(struct amdgpu_device *adev)
+ {
+- u32 data;
+- unsigned i;
++ uint32_t rlc_setting;
+
+- data = RREG32(mmRLC_CNTL);
+- if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
+- return;
++ rlc_setting = RREG32(mmRLC_CNTL);
++ if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
++ return false;
+
+- if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
+- data |= RLC_SAFE_MODE__CMD_MASK;
+- data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
+- data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
+- WREG32(mmRLC_SAFE_MODE, data);
++ return true;
++}
+
+- for (i = 0; i < adev->usec_timeout; i++) {
+- if ((RREG32(mmRLC_GPM_STAT) &
+- (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
+- RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
+- (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
+- RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
+- break;
+- udelay(1);
+- }
++static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev)
++{
++ uint32_t data;
++ unsigned i;
++ data = RREG32(mmRLC_CNTL);
++ data |= RLC_SAFE_MODE__CMD_MASK;
++ data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
++ data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
++ WREG32(mmRLC_SAFE_MODE, data);
+
+- for (i = 0; i < adev->usec_timeout; i++) {
+- if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+- break;
+- udelay(1);
+- }
+- adev->gfx.rlc.in_safe_mode = true;
++ /* wait for RLC_SAFE_MODE */
++ for (i = 0; i < adev->usec_timeout; i++) {
++ if ((RREG32(mmRLC_GPM_STAT) &
++ (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
++ RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
++ (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
++ RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
++ break;
++ udelay(1);
++ }
++ for (i = 0; i < adev->usec_timeout; i++) {
++ if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
++ break;
++ udelay(1);
+ }
+ }
+
+-static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev)
++static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev)
+ {
+- u32 data = 0;
++ uint32_t data;
+ unsigned i;
+
+ data = RREG32(mmRLC_CNTL);
+- if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
+- return;
+-
+- if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
+- if (adev->gfx.rlc.in_safe_mode) {
+- data |= RLC_SAFE_MODE__CMD_MASK;
+- data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
+- WREG32(mmRLC_SAFE_MODE, data);
+- adev->gfx.rlc.in_safe_mode = false;
+- }
+- }
++ data |= RLC_SAFE_MODE__CMD_MASK;
++ data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
++ WREG32(mmRLC_SAFE_MODE, data);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+@@ -5644,8 +5555,12 @@ static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev)
+ }
+
+ static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
+- .enter_safe_mode = iceland_enter_rlc_safe_mode,
+- .exit_safe_mode = iceland_exit_rlc_safe_mode
++ .is_rlc_enabled = gfx_v8_0_is_rlc_enabled,
++ .set_safe_mode = gfx_v8_0_set_safe_mode,
++ .unset_safe_mode = gfx_v8_0_unset_safe_mode,
++ .get_csb_size = gfx_v8_0_get_csb_size,
++ .get_csb_buffer = gfx_v8_0_get_csb_buffer,
++ .get_cp_table_num = gfx_v8_0_cp_jump_table_num,
+ };
+
+ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+@@ -5653,7 +5568,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
+ {
+ uint32_t temp, data;
+
+- adev->gfx.rlc.funcs->enter_safe_mode(adev);
++ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
+ /* It is disabled by HW by default */
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
+@@ -5749,7 +5664,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
+ gfx_v8_0_wait_for_rlc_serdes(adev);
+ }
+
+- adev->gfx.rlc.funcs->exit_safe_mode(adev);
++ amdgpu_gfx_rlc_exit_safe_mode(adev);
+ }
+
+ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
+@@ -5759,7 +5674,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
+
+ temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
+
+- adev->gfx.rlc.funcs->enter_safe_mode(adev);
++ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
+ temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
+@@ -5842,7 +5757,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
+
+ gfx_v8_0_wait_for_rlc_serdes(adev);
+
+- adev->gfx.rlc.funcs->exit_safe_mode(adev);
++ amdgpu_gfx_rlc_exit_safe_mode(adev);
+ }
+ static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 69fcc77..ac2a843 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1079,72 +1079,13 @@ static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
+ WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
+ }
+
+-static void rv_init_cp_jump_table(struct amdgpu_device *adev)
++static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
+ {
+- const __le32 *fw_data;
+- volatile u32 *dst_ptr;
+- int me, i, max_me = 5;
+- u32 bo_offset = 0;
+- u32 table_offset, table_size;
+-
+- /* write the cp table buffer */
+- dst_ptr = adev->gfx.rlc.cp_table_ptr;
+- for (me = 0; me < max_me; me++) {
+- if (me == 0) {
+- const struct gfx_firmware_header_v1_0 *hdr =
+- (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+- fw_data = (const __le32 *)
+- (adev->gfx.ce_fw->data +
+- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+- table_offset = le32_to_cpu(hdr->jt_offset);
+- table_size = le32_to_cpu(hdr->jt_size);
+- } else if (me == 1) {
+- const struct gfx_firmware_header_v1_0 *hdr =
+- (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+- fw_data = (const __le32 *)
+- (adev->gfx.pfp_fw->data +
+- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+- table_offset = le32_to_cpu(hdr->jt_offset);
+- table_size = le32_to_cpu(hdr->jt_size);
+- } else if (me == 2) {
+- const struct gfx_firmware_header_v1_0 *hdr =
+- (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+- fw_data = (const __le32 *)
+- (adev->gfx.me_fw->data +
+- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+- table_offset = le32_to_cpu(hdr->jt_offset);
+- table_size = le32_to_cpu(hdr->jt_size);
+- } else if (me == 3) {
+- const struct gfx_firmware_header_v1_0 *hdr =
+- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+- fw_data = (const __le32 *)
+- (adev->gfx.mec_fw->data +
+- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+- table_offset = le32_to_cpu(hdr->jt_offset);
+- table_size = le32_to_cpu(hdr->jt_size);
+- } else if (me == 4) {
+- const struct gfx_firmware_header_v1_0 *hdr =
+- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+- fw_data = (const __le32 *)
+- (adev->gfx.mec2_fw->data +
+- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+- table_offset = le32_to_cpu(hdr->jt_offset);
+- table_size = le32_to_cpu(hdr->jt_size);
+- }
+-
+- for (i = 0; i < table_size; i ++) {
+- dst_ptr[bo_offset + i] =
+- cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+- }
+-
+- bo_offset += table_size;
+- }
++ return 5;
+ }
+
+ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
+ {
+- volatile u32 *dst_ptr;
+- u32 dws;
+ const struct cs_section_def *cs_data;
+ int r;
+
+@@ -1153,45 +1094,18 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
+ cs_data = adev->gfx.rlc.cs_data;
+
+ if (cs_data) {
+- /* clear state block */
+- adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev);
+- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+- AMDGPU_GEM_DOMAIN_VRAM,
+- &adev->gfx.rlc.clear_state_obj,
+- &adev->gfx.rlc.clear_state_gpu_addr,
+- (void **)&adev->gfx.rlc.cs_ptr);
+- if (r) {
+- dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
+- r);
+- amdgpu_gfx_rlc_fini(adev);
++ /* init clear state block */
++ r = amdgpu_gfx_rlc_init_csb(adev);
++ if (r)
+ return r;
+- }
+- /* set up the cs buffer */
+- dst_ptr = adev->gfx.rlc.cs_ptr;
+- gfx_v9_0_get_csb_buffer(adev, dst_ptr);
+- amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
+- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+ }
+
+ if (adev->asic_type == CHIP_RAVEN) {
+ /* TODO: double check the cp_table_size for RV */
+ adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
+- r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
+- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+- &adev->gfx.rlc.cp_table_obj,
+- &adev->gfx.rlc.cp_table_gpu_addr,
+- (void **)&adev->gfx.rlc.cp_table_ptr);
+- if (r) {
+- dev_err(adev->dev,
+- "(%d) failed to create cp table bo\n", r);
+- amdgpu_gfx_rlc_fini(adev);
++ r = amdgpu_gfx_rlc_init_cpt(adev);
++ if (r)
+ return r;
+- }
+-
+- rv_init_cp_jump_table(adev);
+- amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
+- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+ }
+
+ switch (adev->asic_type) {
+@@ -3613,64 +3527,47 @@ static int gfx_v9_0_late_init(void *handle)
+ return 0;
+ }
+
+-static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
++static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
+ {
+- uint32_t rlc_setting, data;
+- unsigned i;
+-
+- if (adev->gfx.rlc.in_safe_mode)
+- return;
++ uint32_t rlc_setting;
+
+ /* if RLC is not enabled, do nothing */
+ rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
+ if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
+- return;
+-
+- if (adev->cg_flags &
+- (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
+- AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+- data = RLC_SAFE_MODE__CMD_MASK;
+- data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
+- WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
++ return false;
+
+- /* wait for RLC_SAFE_MODE */
+- for (i = 0; i < adev->usec_timeout; i++) {
+- if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+- break;
+- udelay(1);
+- }
+- adev->gfx.rlc.in_safe_mode = true;
+- }
++ return true;
+ }
+
+-static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
++static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
+ {
+- uint32_t rlc_setting, data;
+-
+- if (!adev->gfx.rlc.in_safe_mode)
+- return;
++ uint32_t data;
++ unsigned i;
+
+- /* if RLC is not enabled, do nothing */
+- rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
+- if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
+- return;
++ data = RLC_SAFE_MODE__CMD_MASK;
++ data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
++ WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
+
+- if (adev->cg_flags &
+- (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
+- /*
+- * Try to exit safe mode only if it is already in safe
+- * mode.
+- */
+- data = RLC_SAFE_MODE__CMD_MASK;
+- WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
+- adev->gfx.rlc.in_safe_mode = false;
++ /* wait for RLC_SAFE_MODE */
++ for (i = 0; i < adev->usec_timeout; i++) {
++ if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
++ break;
++ udelay(1);
+ }
+ }
+
++static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
++{
++ uint32_t data;
++
++ data = RLC_SAFE_MODE__CMD_MASK;
++ WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
++}
++
+ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
+ bool enable)
+ {
+- gfx_v9_0_enter_rlc_safe_mode(adev);
++ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
+ gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
+@@ -3681,7 +3578,7 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
+ gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
+ }
+
+- gfx_v9_0_exit_rlc_safe_mode(adev);
++ amdgpu_gfx_rlc_exit_safe_mode(adev);
+ }
+
+ static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
+@@ -3779,7 +3676,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
+ {
+ uint32_t data, def;
+
+- adev->gfx.rlc.funcs->enter_safe_mode(adev);
++ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
+ /* Enable 3D CGCG/CGLS */
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+@@ -3819,7 +3716,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
+ WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
+ }
+
+- adev->gfx.rlc.funcs->exit_safe_mode(adev);
++ amdgpu_gfx_rlc_exit_safe_mode(adev);
+ }
+
+ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
+@@ -3827,7 +3724,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
+ {
+ uint32_t def, data;
+
+- adev->gfx.rlc.funcs->enter_safe_mode(adev);
++ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
+ def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
+@@ -3867,7 +3764,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
+ WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
+ }
+
+- adev->gfx.rlc.funcs->exit_safe_mode(adev);
++ amdgpu_gfx_rlc_exit_safe_mode(adev);
+ }
+
+ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
+@@ -3896,8 +3793,12 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
+ }
+
+ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
+- .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode,
+- .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode
++ .is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
++ .set_safe_mode = gfx_v9_0_set_safe_mode,
++ .unset_safe_mode = gfx_v9_0_unset_safe_mode,
++ .get_csb_size = gfx_v9_0_get_csb_size,
++ .get_csb_buffer = gfx_v9_0_get_csb_buffer,
++ .get_cp_table_num = gfx_v9_0_cp_jump_table_num,
+ };
+
+ static int gfx_v9_0_set_powergating_state(void *handle,
+diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+index faf06fd..36bcba96 100644
+--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+@@ -508,19 +508,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
+ pi->caps_db_ramping ||
+ pi->caps_td_ramping ||
+ pi->caps_tcp_ramping) {
+- adev->gfx.rlc.funcs->enter_safe_mode(adev);
++ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
+ if (enable) {
+ ret = kv_program_pt_config_registers(adev, didt_config_kv);
+ if (ret) {
+- adev->gfx.rlc.funcs->exit_safe_mode(adev);
++ amdgpu_gfx_rlc_exit_safe_mode(adev);
+ return ret;
+ }
+ }
+
+ kv_do_enable_didt(adev, enable);
+
+- adev->gfx.rlc.funcs->exit_safe_mode(adev);
++ amdgpu_gfx_rlc_exit_safe_mode(adev);
+ }
+
+ return 0;
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+index 5e19f59..d138ddae 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+@@ -967,7 +967,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
+ PP_CAP(PHM_PlatformCaps_TDRamping) ||
+ PP_CAP(PHM_PlatformCaps_TCPRamping)) {
+
+- adev->gfx.rlc.funcs->enter_safe_mode(adev);
++ amdgpu_gfx_rlc_enter_safe_mode(adev);
+ mutex_lock(&adev->grbm_idx_mutex);
+ value = 0;
+ value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX);
+@@ -1014,13 +1014,13 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
+ "Failed to enable DPM DIDT.", goto error);
+ }
+ mutex_unlock(&adev->grbm_idx_mutex);
+- adev->gfx.rlc.funcs->exit_safe_mode(adev);
++ amdgpu_gfx_rlc_exit_safe_mode(adev);
+ }
+
+ return 0;
+ error:
+ mutex_unlock(&adev->grbm_idx_mutex);
+- adev->gfx.rlc.funcs->exit_safe_mode(adev);
++ amdgpu_gfx_rlc_exit_safe_mode(adev);
+ return result;
+ }
+
+@@ -1034,7 +1034,7 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
+ PP_CAP(PHM_PlatformCaps_TDRamping) ||
+ PP_CAP(PHM_PlatformCaps_TCPRamping)) {
+
+- adev->gfx.rlc.funcs->enter_safe_mode(adev);
++ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
+ result = smu7_enable_didt(hwmgr, false);
+ PP_ASSERT_WITH_CODE((result == 0),
+@@ -1046,12 +1046,12 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to disable DPM DIDT.", goto error);
+ }
+- adev->gfx.rlc.funcs->exit_safe_mode(adev);
++ amdgpu_gfx_rlc_exit_safe_mode(adev);
+ }
+
+ return 0;
+ error:
+- adev->gfx.rlc.funcs->exit_safe_mode(adev);
++ amdgpu_gfx_rlc_exit_safe_mode(adev);
+ return result;
+ }
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+index 2d88abf..6f26cb2 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+@@ -937,7 +937,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
+
+ num_se = adev->gfx.config.max_shader_engines;
+
+- adev->gfx.rlc.funcs->enter_safe_mode(adev);
++ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (count = 0; count < num_se; count++) {
+@@ -962,7 +962,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
+
+ vega10_didt_set_mask(hwmgr, true);
+
+- adev->gfx.rlc.funcs->exit_safe_mode(adev);
++ amdgpu_gfx_rlc_exit_safe_mode(adev);
+
+ return 0;
+ }
+@@ -971,11 +971,11 @@ static int vega10_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
+ {
+ struct amdgpu_device *adev = hwmgr->adev;
+
+- adev->gfx.rlc.funcs->enter_safe_mode(adev);
++ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
+ vega10_didt_set_mask(hwmgr, false);
+
+- adev->gfx.rlc.funcs->exit_safe_mode(adev);
++ amdgpu_gfx_rlc_exit_safe_mode(adev);
+
+ return 0;
+ }
+@@ -988,7 +988,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
+
+ num_se = adev->gfx.config.max_shader_engines;
+
+- adev->gfx.rlc.funcs->enter_safe_mode(adev);
++ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (count = 0; count < num_se; count++) {
+@@ -1007,7 +1007,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
+
+ vega10_didt_set_mask(hwmgr, true);
+
+- adev->gfx.rlc.funcs->exit_safe_mode(adev);
++ amdgpu_gfx_rlc_exit_safe_mode(adev);
+
+ vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10);
+ if (PP_CAP(PHM_PlatformCaps_GCEDC))
+@@ -1024,11 +1024,11 @@ static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t data;
+
+- adev->gfx.rlc.funcs->enter_safe_mode(adev);
++ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
+ vega10_didt_set_mask(hwmgr, false);
+
+- adev->gfx.rlc.funcs->exit_safe_mode(adev);
++ amdgpu_gfx_rlc_exit_safe_mode(adev);
+
+ if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
+ data = 0x00000000;
+@@ -1049,7 +1049,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
+
+ num_se = adev->gfx.config.max_shader_engines;
+
+- adev->gfx.rlc.funcs->enter_safe_mode(adev);
++ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (count = 0; count < num_se; count++) {
+@@ -1070,7 +1070,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
+
+ vega10_didt_set_mask(hwmgr, true);
+
+- adev->gfx.rlc.funcs->exit_safe_mode(adev);
++ amdgpu_gfx_rlc_exit_safe_mode(adev);
+
+ return 0;
+ }
+@@ -1079,11 +1079,11 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
+ {
+ struct amdgpu_device *adev = hwmgr->adev;
+
+- adev->gfx.rlc.funcs->enter_safe_mode(adev);
++ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
+ vega10_didt_set_mask(hwmgr, false);
+
+- adev->gfx.rlc.funcs->exit_safe_mode(adev);
++ amdgpu_gfx_rlc_exit_safe_mode(adev);
+
+ return 0;
+ }
+@@ -1097,7 +1097,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
+
+ num_se = adev->gfx.config.max_shader_engines;
+
+- adev->gfx.rlc.funcs->enter_safe_mode(adev);
++ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
+ vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
+
+@@ -1118,7 +1118,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
+
+ vega10_didt_set_mask(hwmgr, true);
+
+- adev->gfx.rlc.funcs->exit_safe_mode(adev);
++ amdgpu_gfx_rlc_exit_safe_mode(adev);
+
+ vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10);
+
+@@ -1138,11 +1138,11 @@ static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t data;
+
+- adev->gfx.rlc.funcs->enter_safe_mode(adev);
++ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
+ vega10_didt_set_mask(hwmgr, false);
+
+- adev->gfx.rlc.funcs->exit_safe_mode(adev);
++ amdgpu_gfx_rlc_exit_safe_mode(adev);
+
+ if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
+ data = 0x00000000;
+@@ -1160,7 +1160,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
+ struct amdgpu_device *adev = hwmgr->adev;
+ int result;
+
+- adev->gfx.rlc.funcs->enter_safe_mode(adev);
++ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000);
+@@ -1173,7 +1173,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
+
+ vega10_didt_set_mask(hwmgr, false);
+
+- adev->gfx.rlc.funcs->exit_safe_mode(adev);
++ amdgpu_gfx_rlc_exit_safe_mode(adev);
+
+ return 0;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5617-drm-amdgpu-make-gfx9-enter-into-rlc-safe-mode-when-s.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5617-drm-amdgpu-make-gfx9-enter-into-rlc-safe-mode-when-s.patch
new file mode 100644
index 00000000..bdf144a2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5617-drm-amdgpu-make-gfx9-enter-into-rlc-safe-mode-when-s.patch
@@ -0,0 +1,39 @@
+From 823211258f318d331f52028f04a1238b7a2ba400 Mon Sep 17 00:00:00 2001
+From: Likun Gao <Likun.Gao@amd.com>
+Date: Wed, 2 Jan 2019 12:20:12 +0800
+Subject: [PATCH 5617/5725] drm/amdgpu: make gfx9 enter into rlc safe mode when
+ set MGCG
+
+MGCG should RLC enter into safe mode first.
+
+Signed-off-by: Likun Gao <Likun.Gao@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index ac2a843..ce4bb14 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -3605,6 +3605,8 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
+ {
+ uint32_t data, def;
+
++ amdgpu_gfx_rlc_enter_safe_mode(adev);
++
+ /* It is disabled by HW by default */
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
+ /* 1 - RLC_CGTT_MGCG_OVERRIDE */
+@@ -3669,6 +3671,8 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
+ WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
+ }
+ }
++
++ amdgpu_gfx_rlc_exit_safe_mode(adev);
+ }
+
+ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5618-drm-amdkfd-Roll-back-all-q4-amdkfd-patches-added-by-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5618-drm-amdkfd-Roll-back-all-q4-amdkfd-patches-added-by-.patch
new file mode 100644
index 00000000..7798330d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5618-drm-amdkfd-Roll-back-all-q4-amdkfd-patches-added-by-.patch
@@ -0,0 +1,8070 @@
+From f00599ff354b3f061df8ce41217562f7c1bfcc2d Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Wed, 9 Jan 2019 21:21:38 +0530
+Subject: [PATCH 5618/5725] drm/amdkfd: Roll back all q4 amdkfd patches added
+ by Kalyan.
+
+Signed-off-by: Ravi Kumar <ravi1.kumar@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/Makefile | 4 +-
+ drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c | 78 +-
+ drivers/gpu/drm/amd/amdkfd/cik_int.h | 25 +-
+ drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h | 568 ----------
+ .../gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm | 298 +++++-
+ .../gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 439 +++++---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 1090 ++++++--------------
+ drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 60 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_crat.h | 48 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c | 50 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 230 ++---
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 213 ++--
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 16 +-
+ .../drm/amd/amdkfd/kfd_device_queue_manager_v9.c | 6 +-
+ .../drm/amd/amdkfd/kfd_device_queue_manager_vi.c | 29 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c | 22 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_events.c | 129 +--
+ drivers/gpu/drm/amd/amdkfd/kfd_events.h | 1 -
+ drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | 61 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c | 81 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_iommu.c | 3 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_ipc.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 26 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c | 119 +++
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c | 78 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c | 180 ++--
+ drivers/gpu/drm/amd/amdkfd/kfd_module.c | 21 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c | 1 -
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h | 2 -
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 28 -
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 63 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 47 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 102 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_peerdirect.c | 8 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 178 ++--
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 212 ++--
+ .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 26 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_queue.c | 8 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_rdma.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 94 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_topology.h | 13 +-
+ drivers/gpu/drm/amd/amdkfd/soc15_int.h | 41 +-
+ 43 files changed, 1930 insertions(+), 2774 deletions(-)
+ delete mode 100644 drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+ mode change 100644 => 100755 drivers/gpu/drm/amd/amdkfd/kfd_device.c
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
+index 4804f9c..b65537a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/Makefile
++++ b/drivers/gpu/drm/amd/amdkfd/Makefile
+@@ -24,7 +24,9 @@
+ #
+
+ FULL_AMD_PATH=$(src)/..
+-ccflags-y := -I$(FULL_AMD_PATH)/include \
++
++ccflags-y := -Iinclude/drm \
++ -I$(FULL_AMD_PATH)/include/ \
+ -I$(FULL_AMD_PATH)/include/asic_reg
+
+ amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
+diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+index 5d2475d..751c004 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
++++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+@@ -24,6 +24,20 @@
+ #include "kfd_events.h"
+ #include "cik_int.h"
+
++static bool is_cpc_vm_fault(struct kfd_dev *dev,
++ const uint32_t *ih_ring_entry)
++{
++ const struct cik_ih_ring_entry *ihre =
++ (const struct cik_ih_ring_entry *)ih_ring_entry;
++
++ if ((ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
++ ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) &&
++ ihre->vmid >= dev->vm_info.first_vmid_kfd &&
++ ihre->vmid <= dev->vm_info.last_vmid_kfd)
++ return true;
++ return false;
++}
++
+ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
+ const uint32_t *ih_ring_entry,
+ uint32_t *patched_ihre,
+@@ -32,7 +46,8 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
+ const struct cik_ih_ring_entry *ihre =
+ (const struct cik_ih_ring_entry *)ih_ring_entry;
+ const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
+- unsigned int vmid, pasid;
++ struct cik_ih_ring_entry *tmp_ihre =
++ (struct cik_ih_ring_entry *) patched_ihre;
+
+ /* This workaround is due to HW/FW limitation on Hawaii that
+ * VMID and PASID are not written into ih_ring_entry
+@@ -40,44 +55,23 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
+ if ((ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
+ ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) &&
+ dev->device_info->asic_family == CHIP_HAWAII) {
+- struct cik_ih_ring_entry *tmp_ihre =
+- (struct cik_ih_ring_entry *)patched_ihre;
+-
+ *patched_flag = true;
+ *tmp_ihre = *ihre;
+
+- vmid = f2g->read_vmid_from_vmfault_reg(dev->kgd);
+- pasid = f2g->get_atc_vmid_pasid_mapping_pasid(dev->kgd, vmid);
+-
+- tmp_ihre->ring_id &= 0x000000ff;
+- tmp_ihre->ring_id |= vmid << 8;
+- tmp_ihre->ring_id |= pasid << 16;
+-
+- return (pasid != 0) &&
+- vmid >= dev->vm_info.first_vmid_kfd &&
+- vmid <= dev->vm_info.last_vmid_kfd;
++ tmp_ihre->vmid = f2g->read_vmid_from_vmfault_reg(dev->kgd);
++ tmp_ihre->pasid = f2g->get_atc_vmid_pasid_mapping_pasid(
++ dev->kgd, tmp_ihre->vmid);
++ return (tmp_ihre->pasid != 0) &&
++ tmp_ihre->vmid >= dev->vm_info.first_vmid_kfd &&
++ tmp_ihre->vmid <= dev->vm_info.last_vmid_kfd;
+ }
+-
+- /* Only handle interrupts from KFD VMIDs */
+- vmid = (ihre->ring_id & 0x0000ff00) >> 8;
+- if (vmid < dev->vm_info.first_vmid_kfd ||
+- vmid > dev->vm_info.last_vmid_kfd)
+- return 0;
+-
+- /* If there is no valid PASID, it's likely a firmware bug */
+- pasid = (ihre->ring_id & 0xffff0000) >> 16;
+- if (WARN_ONCE(pasid == 0, "FW bug: No PASID in KFD interrupt"))
+- return 0;
+-
+- /* Interrupt types we care about: various signals and faults.
+- * They will be forwarded to a work queue (see below).
+- */
+- return ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE ||
++ /* Do not process in ISR, just request it to be forwarded to WQ. */
++ return (ihre->pasid != 0) &&
++ (ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE ||
+ ihre->source_id == CIK_INTSRC_SDMA_TRAP ||
+ ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG ||
+ ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE ||
+- ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
+- ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT;
++ is_cpc_vm_fault(dev, ih_ring_entry));
+ }
+
+ static void cik_event_interrupt_wq(struct kfd_dev *dev,
+@@ -86,35 +80,33 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev,
+ const struct cik_ih_ring_entry *ihre =
+ (const struct cik_ih_ring_entry *)ih_ring_entry;
+ uint32_t context_id = ihre->data & 0xfffffff;
+- unsigned int vmid = (ihre->ring_id & 0x0000ff00) >> 8;
+- unsigned int pasid = (ihre->ring_id & 0xffff0000) >> 16;
+
+- if (pasid == 0)
++ if (ihre->pasid == 0)
+ return;
+
+ if (ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE)
+- kfd_signal_event_interrupt(pasid, context_id, 28);
++ kfd_signal_event_interrupt(ihre->pasid, context_id, 28);
+ else if (ihre->source_id == CIK_INTSRC_SDMA_TRAP)
+- kfd_signal_event_interrupt(pasid, context_id, 28);
++ kfd_signal_event_interrupt(ihre->pasid, context_id, 28);
+ else if (ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG)
+- kfd_signal_event_interrupt(pasid, context_id & 0xff, 8);
++ kfd_signal_event_interrupt(ihre->pasid, context_id & 0xff, 8);
+ else if (ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE)
+- kfd_signal_hw_exception_event(pasid);
++ kfd_signal_hw_exception_event(ihre->pasid);
+ else if (ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
+ ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) {
+ struct kfd_vm_fault_info info;
+
+- kfd_process_vm_fault(dev->dqm, pasid);
++ kfd_process_vm_fault(dev->dqm, ihre->pasid);
+
+ memset(&info, 0, sizeof(info));
+ dev->kfd2kgd->get_vm_fault_info(dev->kgd, &info);
+ if (!info.page_addr && !info.status)
+ return;
+
+- if (info.vmid == vmid)
+- kfd_signal_vm_fault_event(dev, pasid, &info);
++ if (info.vmid == ihre->vmid)
++ kfd_signal_vm_fault_event(dev, ihre->pasid, &info);
+ else
+- kfd_signal_vm_fault_event(dev, pasid, NULL);
++ kfd_signal_vm_fault_event(dev, ihre->pasid, NULL);
+ }
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/cik_int.h b/drivers/gpu/drm/amd/amdkfd/cik_int.h
+index a2079a0..ff8255d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cik_int.h
++++ b/drivers/gpu/drm/amd/amdkfd/cik_int.h
+@@ -26,19 +26,32 @@
+ #include <linux/types.h>
+
+ struct cik_ih_ring_entry {
+- uint32_t source_id;
+- uint32_t data;
+- uint32_t ring_id;
+- uint32_t reserved;
++ uint32_t source_id:8;
++ uint32_t reserved1:8;
++ uint32_t reserved2:16;
++
++ uint32_t data:28;
++ uint32_t reserved3:4;
++
++ /* pipeid, meid and unused3 are officially called RINGID,
++ * but for our purposes, they always decode into pipe and ME.
++ */
++ uint32_t pipeid:2;
++ uint32_t meid:2;
++ uint32_t reserved4:4;
++ uint32_t vmid:8;
++ uint32_t pasid:16;
++
++ uint32_t reserved5;
+ };
+
++#define CIK_INTSRC_DEQUEUE_COMPLETE 0xC6
+ #define CIK_INTSRC_CP_END_OF_PIPE 0xB5
+ #define CIK_INTSRC_CP_BAD_OPCODE 0xB7
+-#define CIK_INTSRC_DEQUEUE_COMPLETE 0xC6
+-#define CIK_INTSRC_SDMA_TRAP 0xE0
+ #define CIK_INTSRC_SQ_INTERRUPT_MSG 0xEF
+ #define CIK_INTSRC_GFX_PAGE_INV_FAULT 0x92
+ #define CIK_INTSRC_GFX_MEM_PROT_FAULT 0x93
++#define CIK_INTSRC_SDMA_TRAP 0xE0
+
+ #endif
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+deleted file mode 100644
+index 3621efb..0000000
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
++++ /dev/null
+@@ -1,568 +0,0 @@
+-/*
+- * Copyright 2018 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- */
+-
+-static const uint32_t cwsr_trap_gfx8_hex[] = {
+- 0xbf820001, 0xbf82012b,
+- 0xb8f4f802, 0x89748674,
+- 0xb8f5f803, 0x8675ff75,
+- 0x00000400, 0xbf850017,
+- 0xc00a1e37, 0x00000000,
+- 0xbf8c007f, 0x87777978,
+- 0xbf840005, 0x8f728374,
+- 0xb972e0c2, 0xbf800002,
+- 0xb9740002, 0xbe801d78,
+- 0xb8f5f803, 0x8675ff75,
+- 0x000001ff, 0xbf850002,
+- 0x80708470, 0x82718071,
+- 0x8671ff71, 0x0000ffff,
+- 0x8f728374, 0xb972e0c2,
+- 0xbf800002, 0xb9740002,
+- 0xbe801f70, 0xb8f5f803,
+- 0x8675ff75, 0x00000100,
+- 0xbf840006, 0xbefa0080,
+- 0xb97a0203, 0x8671ff71,
+- 0x0000ffff, 0x80f08870,
+- 0x82f18071, 0xbefa0080,
+- 0xb97a0283, 0xbef60068,
+- 0xbef70069, 0xb8fa1c07,
+- 0x8e7a9c7a, 0x87717a71,
+- 0xb8fa03c7, 0x8e7a9b7a,
+- 0x87717a71, 0xb8faf807,
+- 0x867aff7a, 0x00007fff,
+- 0xb97af807, 0xbef2007e,
+- 0xbef3007f, 0xbefe0180,
+- 0xbf900004, 0x877a8474,
+- 0xb97af802, 0xbf8e0002,
+- 0xbf88fffe, 0xbef8007e,
+- 0x8679ff7f, 0x0000ffff,
+- 0x8779ff79, 0x00040000,
+- 0xbefa0080, 0xbefb00ff,
+- 0x00807fac, 0x867aff7f,
+- 0x08000000, 0x8f7a837a,
+- 0x877b7a7b, 0x867aff7f,
+- 0x70000000, 0x8f7a817a,
+- 0x877b7a7b, 0xbeef007c,
+- 0xbeee0080, 0xb8ee2a05,
+- 0x806e816e, 0x8e6e8a6e,
+- 0xb8fa1605, 0x807a817a,
+- 0x8e7a867a, 0x806e7a6e,
+- 0xbefa0084, 0xbefa00ff,
+- 0x01000000, 0xbefe007c,
+- 0xbefc006e, 0xc0611bfc,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc006e, 0xc0611c3c,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc006e, 0xc0611c7c,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc006e, 0xc0611cbc,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc006e, 0xc0611cfc,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc006e, 0xc0611d3c,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0xb8f5f803,
+- 0xbefe007c, 0xbefc006e,
+- 0xc0611d7c, 0x0000007c,
+- 0x806e846e, 0xbefc007e,
+- 0xbefe007c, 0xbefc006e,
+- 0xc0611dbc, 0x0000007c,
+- 0x806e846e, 0xbefc007e,
+- 0xbefe007c, 0xbefc006e,
+- 0xc0611dfc, 0x0000007c,
+- 0x806e846e, 0xbefc007e,
+- 0xb8eff801, 0xbefe007c,
+- 0xbefc006e, 0xc0611bfc,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc006e, 0xc0611b3c,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc006e, 0xc0611b7c,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0x867aff7f,
+- 0x04000000, 0xbef30080,
+- 0x8773737a, 0xb8ee2a05,
+- 0x806e816e, 0x8e6e8a6e,
+- 0xb8f51605, 0x80758175,
+- 0x8e758475, 0x8e7a8275,
+- 0xbefa00ff, 0x01000000,
+- 0xbef60178, 0x80786e78,
+- 0x82798079, 0xbefc0080,
+- 0xbe802b00, 0xbe822b02,
+- 0xbe842b04, 0xbe862b06,
+- 0xbe882b08, 0xbe8a2b0a,
+- 0xbe8c2b0c, 0xbe8e2b0e,
+- 0xc06b003c, 0x00000000,
+- 0xc06b013c, 0x00000010,
+- 0xc06b023c, 0x00000020,
+- 0xc06b033c, 0x00000030,
+- 0x8078c078, 0x82798079,
+- 0x807c907c, 0xbf0a757c,
+- 0xbf85ffeb, 0xbef80176,
+- 0xbeee0080, 0xbefe00c1,
+- 0xbeff00c1, 0xbefa00ff,
+- 0x01000000, 0xe0724000,
+- 0x6e1e0000, 0xe0724100,
+- 0x6e1e0100, 0xe0724200,
+- 0x6e1e0200, 0xe0724300,
+- 0x6e1e0300, 0xbefe00c1,
+- 0xbeff00c1, 0xb8f54306,
+- 0x8675c175, 0xbf84002c,
+- 0xbf8a0000, 0x867aff73,
+- 0x04000000, 0xbf840028,
+- 0x8e758675, 0x8e758275,
+- 0xbefa0075, 0xb8ee2a05,
+- 0x806e816e, 0x8e6e8a6e,
+- 0xb8fa1605, 0x807a817a,
+- 0x8e7a867a, 0x806e7a6e,
+- 0x806eff6e, 0x00000080,
+- 0xbefa00ff, 0x01000000,
+- 0xbefc0080, 0xd28c0002,
+- 0x000100c1, 0xd28d0003,
+- 0x000204c1, 0xd1060002,
+- 0x00011103, 0x7e0602ff,
+- 0x00000200, 0xbefc00ff,
+- 0x00010000, 0xbe80007b,
+- 0x867bff7b, 0xff7fffff,
+- 0x877bff7b, 0x00058000,
+- 0xd8ec0000, 0x00000002,
+- 0xbf8c007f, 0xe0765000,
+- 0x6e1e0002, 0x32040702,
+- 0xd0c9006a, 0x0000eb02,
+- 0xbf87fff7, 0xbefb0000,
+- 0xbeee00ff, 0x00000400,
+- 0xbefe00c1, 0xbeff00c1,
+- 0xb8f52a05, 0x80758175,
+- 0x8e758275, 0x8e7a8875,
+- 0xbefa00ff, 0x01000000,
+- 0xbefc0084, 0xbf0a757c,
+- 0xbf840015, 0xbf11017c,
+- 0x8075ff75, 0x00001000,
+- 0x7e000300, 0x7e020301,
+- 0x7e040302, 0x7e060303,
+- 0xe0724000, 0x6e1e0000,
+- 0xe0724100, 0x6e1e0100,
+- 0xe0724200, 0x6e1e0200,
+- 0xe0724300, 0x6e1e0300,
+- 0x807c847c, 0x806eff6e,
+- 0x00000400, 0xbf0a757c,
+- 0xbf85ffef, 0xbf9c0000,
+- 0xbf8200cd, 0xbef8007e,
+- 0x8679ff7f, 0x0000ffff,
+- 0x8779ff79, 0x00040000,
+- 0xbefa0080, 0xbefb00ff,
+- 0x00807fac, 0x8676ff7f,
+- 0x08000000, 0x8f768376,
+- 0x877b767b, 0x8676ff7f,
+- 0x70000000, 0x8f768176,
+- 0x877b767b, 0x8676ff7f,
+- 0x04000000, 0xbf84001e,
+- 0xbefe00c1, 0xbeff00c1,
+- 0xb8f34306, 0x8673c173,
+- 0xbf840019, 0x8e738673,
+- 0x8e738273, 0xbefa0073,
+- 0xb8f22a05, 0x80728172,
+- 0x8e728a72, 0xb8f61605,
+- 0x80768176, 0x8e768676,
+- 0x80727672, 0x8072ff72,
+- 0x00000080, 0xbefa00ff,
+- 0x01000000, 0xbefc0080,
+- 0xe0510000, 0x721e0000,
+- 0xe0510100, 0x721e0000,
+- 0x807cff7c, 0x00000200,
+- 0x8072ff72, 0x00000200,
+- 0xbf0a737c, 0xbf85fff6,
+- 0xbef20080, 0xbefe00c1,
+- 0xbeff00c1, 0xb8f32a05,
+- 0x80738173, 0x8e738273,
+- 0x8e7a8873, 0xbefa00ff,
+- 0x01000000, 0xbef60072,
+- 0x8072ff72, 0x00000400,
+- 0xbefc0084, 0xbf11087c,
+- 0x8073ff73, 0x00008000,
+- 0xe0524000, 0x721e0000,
+- 0xe0524100, 0x721e0100,
+- 0xe0524200, 0x721e0200,
+- 0xe0524300, 0x721e0300,
+- 0xbf8c0f70, 0x7e000300,
+- 0x7e020301, 0x7e040302,
+- 0x7e060303, 0x807c847c,
+- 0x8072ff72, 0x00000400,
+- 0xbf0a737c, 0xbf85ffee,
+- 0xbf9c0000, 0xe0524000,
+- 0x761e0000, 0xe0524100,
+- 0x761e0100, 0xe0524200,
+- 0x761e0200, 0xe0524300,
+- 0x761e0300, 0xb8f22a05,
+- 0x80728172, 0x8e728a72,
+- 0xb8f61605, 0x80768176,
+- 0x8e768676, 0x80727672,
+- 0x80f2c072, 0xb8f31605,
+- 0x80738173, 0x8e738473,
+- 0x8e7a8273, 0xbefa00ff,
+- 0x01000000, 0xbefc0073,
+- 0xc031003c, 0x00000072,
+- 0x80f2c072, 0xbf8c007f,
+- 0x80fc907c, 0xbe802d00,
+- 0xbe822d02, 0xbe842d04,
+- 0xbe862d06, 0xbe882d08,
+- 0xbe8a2d0a, 0xbe8c2d0c,
+- 0xbe8e2d0e, 0xbf06807c,
+- 0xbf84fff1, 0xb8f22a05,
+- 0x80728172, 0x8e728a72,
+- 0xb8f61605, 0x80768176,
+- 0x8e768676, 0x80727672,
+- 0xbefa0084, 0xbefa00ff,
+- 0x01000000, 0xc0211cfc,
+- 0x00000072, 0x80728472,
+- 0xc0211c3c, 0x00000072,
+- 0x80728472, 0xc0211c7c,
+- 0x00000072, 0x80728472,
+- 0xc0211bbc, 0x00000072,
+- 0x80728472, 0xc0211bfc,
+- 0x00000072, 0x80728472,
+- 0xc0211d3c, 0x00000072,
+- 0x80728472, 0xc0211d7c,
+- 0x00000072, 0x80728472,
+- 0xc0211a3c, 0x00000072,
+- 0x80728472, 0xc0211a7c,
+- 0x00000072, 0x80728472,
+- 0xc0211dfc, 0x00000072,
+- 0x80728472, 0xc0211b3c,
+- 0x00000072, 0x80728472,
+- 0xc0211b7c, 0x00000072,
+- 0x80728472, 0xbf8c007f,
+- 0xbefc0073, 0xbefe006e,
+- 0xbeff006f, 0x867375ff,
+- 0x000003ff, 0xb9734803,
+- 0x867375ff, 0xfffff800,
+- 0x8f738b73, 0xb973a2c3,
+- 0xb977f801, 0x8673ff71,
+- 0xf0000000, 0x8f739c73,
+- 0x8e739073, 0xbef60080,
+- 0x87767376, 0x8673ff71,
+- 0x08000000, 0x8f739b73,
+- 0x8e738f73, 0x87767376,
+- 0x8673ff74, 0x00800000,
+- 0x8f739773, 0xb976f807,
+- 0x8671ff71, 0x0000ffff,
+- 0x86fe7e7e, 0x86ea6a6a,
+- 0x8f768374, 0xb976e0c2,
+- 0xbf800002, 0xb9740002,
+- 0xbf8a0000, 0x95807370,
+- 0xbf810000, 0x00000000,
+-};
+-
+-
+-static const uint32_t cwsr_trap_gfx9_hex[] = {
+- 0xbf820001, 0xbf82015d,
+- 0xb8f8f802, 0x89788678,
+- 0xb8f1f803, 0x866eff71,
+- 0x00000400, 0xbf850037,
+- 0x866eff71, 0x00000800,
+- 0xbf850003, 0x866eff71,
+- 0x00000100, 0xbf840008,
+- 0x866eff78, 0x00002000,
+- 0xbf840001, 0xbf810000,
+- 0x8778ff78, 0x00002000,
+- 0x80ec886c, 0x82ed806d,
+- 0xb8eef807, 0x866fff6e,
+- 0x001f8000, 0x8e6f8b6f,
+- 0x8977ff77, 0xfc000000,
+- 0x87776f77, 0x896eff6e,
+- 0x001f8000, 0xb96ef807,
+- 0xb8f0f812, 0xb8f1f813,
+- 0x8ef08870, 0xc0071bb8,
+- 0x00000000, 0xbf8cc07f,
+- 0xc0071c38, 0x00000008,
+- 0xbf8cc07f, 0x86ee6e6e,
+- 0xbf840001, 0xbe801d6e,
+- 0xb8f1f803, 0x8671ff71,
+- 0x000001ff, 0xbf850002,
+- 0x806c846c, 0x826d806d,
+- 0x866dff6d, 0x0000ffff,
+- 0x8f6e8b77, 0x866eff6e,
+- 0x001f8000, 0xb96ef807,
+- 0x86fe7e7e, 0x86ea6a6a,
+- 0x8f6e8378, 0xb96ee0c2,
+- 0xbf800002, 0xb9780002,
+- 0xbe801f6c, 0x866dff6d,
+- 0x0000ffff, 0xbef00080,
+- 0xb9700283, 0xb8f02407,
+- 0x8e709c70, 0x876d706d,
+- 0xb8f003c7, 0x8e709b70,
+- 0x876d706d, 0xb8f0f807,
+- 0x8670ff70, 0x00007fff,
+- 0xb970f807, 0xbeee007e,
+- 0xbeef007f, 0xbefe0180,
+- 0xbf900004, 0x87708478,
+- 0xb970f802, 0xbf8e0002,
+- 0xbf88fffe, 0xb8f02a05,
+- 0x80708170, 0x8e708a70,
+- 0xb8f11605, 0x80718171,
+- 0x8e718671, 0x80707170,
+- 0x80707e70, 0x8271807f,
+- 0x8671ff71, 0x0000ffff,
+- 0xc0471cb8, 0x00000040,
+- 0xbf8cc07f, 0xc04b1d38,
+- 0x00000048, 0xbf8cc07f,
+- 0xc0431e78, 0x00000058,
+- 0xbf8cc07f, 0xc0471eb8,
+- 0x0000005c, 0xbf8cc07f,
+- 0xbef4007e, 0x8675ff7f,
+- 0x0000ffff, 0x8775ff75,
+- 0x00040000, 0xbef60080,
+- 0xbef700ff, 0x00807fac,
+- 0x8670ff7f, 0x08000000,
+- 0x8f708370, 0x87777077,
+- 0x8670ff7f, 0x70000000,
+- 0x8f708170, 0x87777077,
+- 0xbefb007c, 0xbefa0080,
+- 0xb8fa2a05, 0x807a817a,
+- 0x8e7a8a7a, 0xb8f01605,
+- 0x80708170, 0x8e708670,
+- 0x807a707a, 0xbef60084,
+- 0xbef600ff, 0x01000000,
+- 0xbefe007c, 0xbefc007a,
+- 0xc0611efa, 0x0000007c,
+- 0xbf8cc07f, 0x807a847a,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc007a, 0xc0611b3a,
+- 0x0000007c, 0xbf8cc07f,
+- 0x807a847a, 0xbefc007e,
+- 0xbefe007c, 0xbefc007a,
+- 0xc0611b7a, 0x0000007c,
+- 0xbf8cc07f, 0x807a847a,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc007a, 0xc0611bba,
+- 0x0000007c, 0xbf8cc07f,
+- 0x807a847a, 0xbefc007e,
+- 0xbefe007c, 0xbefc007a,
+- 0xc0611bfa, 0x0000007c,
+- 0xbf8cc07f, 0x807a847a,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc007a, 0xc0611e3a,
+- 0x0000007c, 0xbf8cc07f,
+- 0x807a847a, 0xbefc007e,
+- 0xb8f1f803, 0xbefe007c,
+- 0xbefc007a, 0xc0611c7a,
+- 0x0000007c, 0xbf8cc07f,
+- 0x807a847a, 0xbefc007e,
+- 0xbefe007c, 0xbefc007a,
+- 0xc0611a3a, 0x0000007c,
+- 0xbf8cc07f, 0x807a847a,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc007a, 0xc0611a7a,
+- 0x0000007c, 0xbf8cc07f,
+- 0x807a847a, 0xbefc007e,
+- 0xb8fbf801, 0xbefe007c,
+- 0xbefc007a, 0xc0611efa,
+- 0x0000007c, 0xbf8cc07f,
+- 0x807a847a, 0xbefc007e,
+- 0x8670ff7f, 0x04000000,
+- 0xbeef0080, 0x876f6f70,
+- 0xb8fa2a05, 0x807a817a,
+- 0x8e7a8a7a, 0xb8f11605,
+- 0x80718171, 0x8e718471,
+- 0x8e768271, 0xbef600ff,
+- 0x01000000, 0xbef20174,
+- 0x80747a74, 0x82758075,
+- 0xbefc0080, 0xbf800000,
+- 0xbe802b00, 0xbe822b02,
+- 0xbe842b04, 0xbe862b06,
+- 0xbe882b08, 0xbe8a2b0a,
+- 0xbe8c2b0c, 0xbe8e2b0e,
+- 0xc06b003a, 0x00000000,
+- 0xbf8cc07f, 0xc06b013a,
+- 0x00000010, 0xbf8cc07f,
+- 0xc06b023a, 0x00000020,
+- 0xbf8cc07f, 0xc06b033a,
+- 0x00000030, 0xbf8cc07f,
+- 0x8074c074, 0x82758075,
+- 0x807c907c, 0xbf0a717c,
+- 0xbf85ffe7, 0xbef40172,
+- 0xbefa0080, 0xbefe00c1,
+- 0xbeff00c1, 0xbee80080,
+- 0xbee90080, 0xbef600ff,
+- 0x01000000, 0xe0724000,
+- 0x7a1d0000, 0xe0724100,
+- 0x7a1d0100, 0xe0724200,
+- 0x7a1d0200, 0xe0724300,
+- 0x7a1d0300, 0xbefe00c1,
+- 0xbeff00c1, 0xb8f14306,
+- 0x8671c171, 0xbf84002c,
+- 0xbf8a0000, 0x8670ff6f,
+- 0x04000000, 0xbf840028,
+- 0x8e718671, 0x8e718271,
+- 0xbef60071, 0xb8fa2a05,
+- 0x807a817a, 0x8e7a8a7a,
+- 0xb8f01605, 0x80708170,
+- 0x8e708670, 0x807a707a,
+- 0x807aff7a, 0x00000080,
+- 0xbef600ff, 0x01000000,
+- 0xbefc0080, 0xd28c0002,
+- 0x000100c1, 0xd28d0003,
+- 0x000204c1, 0xd1060002,
+- 0x00011103, 0x7e0602ff,
+- 0x00000200, 0xbefc00ff,
+- 0x00010000, 0xbe800077,
+- 0x8677ff77, 0xff7fffff,
+- 0x8777ff77, 0x00058000,
+- 0xd8ec0000, 0x00000002,
+- 0xbf8cc07f, 0xe0765000,
+- 0x7a1d0002, 0x68040702,
+- 0xd0c9006a, 0x0000e302,
+- 0xbf87fff7, 0xbef70000,
+- 0xbefa00ff, 0x00000400,
+- 0xbefe00c1, 0xbeff00c1,
+- 0xb8f12a05, 0x80718171,
+- 0x8e718271, 0x8e768871,
+- 0xbef600ff, 0x01000000,
+- 0xbefc0084, 0xbf0a717c,
+- 0xbf840015, 0xbf11017c,
+- 0x8071ff71, 0x00001000,
+- 0x7e000300, 0x7e020301,
+- 0x7e040302, 0x7e060303,
+- 0xe0724000, 0x7a1d0000,
+- 0xe0724100, 0x7a1d0100,
+- 0xe0724200, 0x7a1d0200,
+- 0xe0724300, 0x7a1d0300,
+- 0x807c847c, 0x807aff7a,
+- 0x00000400, 0xbf0a717c,
+- 0xbf85ffef, 0xbf9c0000,
+- 0xbf8200dc, 0xbef4007e,
+- 0x8675ff7f, 0x0000ffff,
+- 0x8775ff75, 0x00040000,
+- 0xbef60080, 0xbef700ff,
+- 0x00807fac, 0x866eff7f,
+- 0x08000000, 0x8f6e836e,
+- 0x87776e77, 0x866eff7f,
+- 0x70000000, 0x8f6e816e,
+- 0x87776e77, 0x866eff7f,
+- 0x04000000, 0xbf84001e,
+- 0xbefe00c1, 0xbeff00c1,
+- 0xb8ef4306, 0x866fc16f,
+- 0xbf840019, 0x8e6f866f,
+- 0x8e6f826f, 0xbef6006f,
+- 0xb8f82a05, 0x80788178,
+- 0x8e788a78, 0xb8ee1605,
+- 0x806e816e, 0x8e6e866e,
+- 0x80786e78, 0x8078ff78,
+- 0x00000080, 0xbef600ff,
+- 0x01000000, 0xbefc0080,
+- 0xe0510000, 0x781d0000,
+- 0xe0510100, 0x781d0000,
+- 0x807cff7c, 0x00000200,
+- 0x8078ff78, 0x00000200,
+- 0xbf0a6f7c, 0xbf85fff6,
+- 0xbef80080, 0xbefe00c1,
+- 0xbeff00c1, 0xb8ef2a05,
+- 0x806f816f, 0x8e6f826f,
+- 0x8e76886f, 0xbef600ff,
+- 0x01000000, 0xbeee0078,
+- 0x8078ff78, 0x00000400,
+- 0xbefc0084, 0xbf11087c,
+- 0x806fff6f, 0x00008000,
+- 0xe0524000, 0x781d0000,
+- 0xe0524100, 0x781d0100,
+- 0xe0524200, 0x781d0200,
+- 0xe0524300, 0x781d0300,
+- 0xbf8c0f70, 0x7e000300,
+- 0x7e020301, 0x7e040302,
+- 0x7e060303, 0x807c847c,
+- 0x8078ff78, 0x00000400,
+- 0xbf0a6f7c, 0xbf85ffee,
+- 0xbf9c0000, 0xe0524000,
+- 0x6e1d0000, 0xe0524100,
+- 0x6e1d0100, 0xe0524200,
+- 0x6e1d0200, 0xe0524300,
+- 0x6e1d0300, 0xb8f82a05,
+- 0x80788178, 0x8e788a78,
+- 0xb8ee1605, 0x806e816e,
+- 0x8e6e866e, 0x80786e78,
+- 0x80f8c078, 0xb8ef1605,
+- 0x806f816f, 0x8e6f846f,
+- 0x8e76826f, 0xbef600ff,
+- 0x01000000, 0xbefc006f,
+- 0xc031003a, 0x00000078,
+- 0x80f8c078, 0xbf8cc07f,
+- 0x80fc907c, 0xbf800000,
+- 0xbe802d00, 0xbe822d02,
+- 0xbe842d04, 0xbe862d06,
+- 0xbe882d08, 0xbe8a2d0a,
+- 0xbe8c2d0c, 0xbe8e2d0e,
+- 0xbf06807c, 0xbf84fff0,
+- 0xb8f82a05, 0x80788178,
+- 0x8e788a78, 0xb8ee1605,
+- 0x806e816e, 0x8e6e866e,
+- 0x80786e78, 0xbef60084,
+- 0xbef600ff, 0x01000000,
+- 0xc0211bfa, 0x00000078,
+- 0x80788478, 0xc0211b3a,
+- 0x00000078, 0x80788478,
+- 0xc0211b7a, 0x00000078,
+- 0x80788478, 0xc0211eba,
+- 0x00000078, 0x80788478,
+- 0xc0211efa, 0x00000078,
+- 0x80788478, 0xc0211c3a,
+- 0x00000078, 0x80788478,
+- 0xc0211c7a, 0x00000078,
+- 0x80788478, 0xc0211a3a,
+- 0x00000078, 0x80788478,
+- 0xc0211a7a, 0x00000078,
+- 0x80788478, 0xc0211cfa,
+- 0x00000078, 0x80788478,
+- 0xbf8cc07f, 0xbefc006f,
+- 0xbefe007a, 0xbeff007b,
+- 0x866f71ff, 0x000003ff,
+- 0xb96f4803, 0x866f71ff,
+- 0xfffff800, 0x8f6f8b6f,
+- 0xb96fa2c3, 0xb973f801,
+- 0xb8ee2a05, 0x806e816e,
+- 0x8e6e8a6e, 0xb8ef1605,
+- 0x806f816f, 0x8e6f866f,
+- 0x806e6f6e, 0x806e746e,
+- 0x826f8075, 0x866fff6f,
+- 0x0000ffff, 0xc0071cb7,
+- 0x00000040, 0xc00b1d37,
+- 0x00000048, 0xc0031e77,
+- 0x00000058, 0xc0071eb7,
+- 0x0000005c, 0xbf8cc07f,
+- 0x866fff6d, 0xf0000000,
+- 0x8f6f9c6f, 0x8e6f906f,
+- 0xbeee0080, 0x876e6f6e,
+- 0x866fff6d, 0x08000000,
+- 0x8f6f9b6f, 0x8e6f8f6f,
+- 0x876e6f6e, 0x866fff70,
+- 0x00800000, 0x8f6f976f,
+- 0xb96ef807, 0x866dff6d,
+- 0x0000ffff, 0x86fe7e7e,
+- 0x86ea6a6a, 0x8f6e8370,
+- 0xb96ee0c2, 0xbf800002,
+- 0xb9700002, 0xbf8a0000,
+- 0x95806f6c, 0xbf810000,
+-};
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+index abe1a5d..751cc2e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+@@ -20,12 +20,9 @@
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-/* To compile this assembly code:
+- * PROJECT=vi ./sp3 cwsr_trap_handler_gfx8.asm -hex tmp.hex
+- */
+-
+-/* HW (VI) source code for CWSR trap handler */
+-/* Version 18 + multiple trap handler */
++#if 0
++HW (VI) source code for CWSR trap handler
++#Version 18 + multiple trap handler
+
+ // this performance-optimal version was originally from Seven Xu at SRDC
+
+@@ -77,7 +74,7 @@ var G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 = G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_D
+ /*************************************************************************/
+ /* control on how to run the shader */
+ /*************************************************************************/
+-//any hack that needs to be made to run this code in EMU (either because various EMU code are not ready or no compute save & restore in EMU run)
++//any hack that needs to be made to run this code in EMU (either becasue various EMU code are not ready or no compute save & restore in EMU run)
+ var EMU_RUN_HACK = 0
+ var EMU_RUN_HACK_RESTORE_NORMAL = 0
+ var EMU_RUN_HACK_SAVE_NORMAL_EXIT = 0
+@@ -91,9 +88,9 @@ var WG_BASE_ADDR_HI = 0x0
+ var WAVE_SPACE = 0x5000 //memory size that each wave occupies in workgroup state mem
+ var CTX_SAVE_CONTROL = 0x0
+ var CTX_RESTORE_CONTROL = CTX_SAVE_CONTROL
+-var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either because various RTL code are not ready or no compute save & restore in RTL run)
++var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either becasue various RTL code are not ready or no compute save & restore in RTL run)
+ var SGPR_SAVE_USE_SQC = 1 //use SQC D$ to do the write
+-var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //because TC EMU currently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes
++var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //becasue TC EMU curently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes
+ var SWIZZLE_EN = 0 //whether we use swizzled buffer addressing
+
+ /**************************************************************************/
+@@ -101,12 +98,7 @@ var SWIZZLE_EN = 0 //whether we use swi
+ /**************************************************************************/
+ var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23
+ var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
+-var SQ_WAVE_STATUS_SPI_PRIO_SHIFT = 1
+ var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
+-var SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT = 0
+-var SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE = 1
+-var SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT = 3
+-var SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE = 29
+
+ var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
+ var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
+@@ -157,7 +149,7 @@ var s_save_spi_init_lo = exec_lo
+ var s_save_spi_init_hi = exec_hi
+
+ //tba_lo and tba_hi need to be saved/restored
+-var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3'h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]}
++var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3¡¯h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]}
+ var s_save_pc_hi = ttmp1
+ var s_save_exec_lo = ttmp2
+ var s_save_exec_hi = ttmp3
+@@ -255,7 +247,7 @@ if (!EMU_RUN_HACK)
+ s_waitcnt lgkmcnt(0)
+ s_or_b32 ttmp7, ttmp8, ttmp9
+ s_cbranch_scc0 L_NO_NEXT_TRAP //next level trap handler not been set
+- set_status_without_spi_prio(s_save_status, ttmp2) //restore HW status(SCC)
++ s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //restore HW status(SCC)
+ s_setpc_b64 [ttmp8,ttmp9] //jump to next level trap handler
+
+ L_NO_NEXT_TRAP:
+@@ -266,7 +258,7 @@ L_NO_NEXT_TRAP:
+ s_addc_u32 ttmp1, ttmp1, 0
+ L_EXCP_CASE:
+ s_and_b32 ttmp1, ttmp1, 0xFFFF
+- set_status_without_spi_prio(s_save_status, ttmp2) //restore HW status(SCC)
++ s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //restore HW status(SCC)
+ s_rfe_b64 [ttmp0, ttmp1]
+ end
+ // ********* End handling of non-CWSR traps *******************
+@@ -327,10 +319,6 @@ end
+ s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC
+ end
+
+- // Set SPI_PRIO=2 to avoid starving instruction fetch in the waves we're waiting for.
+- s_or_b32 s_save_tmp, s_save_status, (2 << SQ_WAVE_STATUS_SPI_PRIO_SHIFT)
+- s_setreg_b32 hwreg(HW_REG_STATUS), s_save_tmp
+-
+ L_SLEEP:
+ s_sleep 0x2 // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause SQ hang, since the 7,8th wave could not get arbit to exec inst, while other waves are stuck into the sleep-loop and waiting for wrexec!=0
+
+@@ -1019,6 +1007,8 @@ end
+
+ s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS
+
++ s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
++
+ //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise:
+ if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
+ s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8 //pc[31:0]+8 //two back-to-back s_trap are used (first for save and second for restore)
+@@ -1054,12 +1044,11 @@ end
+ s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT
+ s_setreg_b32 hwreg(HW_REG_IB_STS), s_restore_tmp
+
+- s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+- set_status_without_spi_prio(s_restore_status, s_restore_tmp) // SCC is included, which is changed by previous salu
++ s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
+
+- s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
++ s_barrier //barrier to ensure the readiness of LDS before access attemps from any other wave in the same TG //FIXME not performance-optimal at this time
+
+ if G8SR_DEBUG_TIMESTAMP
+ s_memrealtime s_g8sr_ts_restore_d
+@@ -1139,10 +1128,257 @@ function get_hwreg_size_bytes
+ return 128 //HWREG size 128 bytes
+ end
+
+-function set_status_without_spi_prio(status, tmp)
+- // Do not restore STATUS.SPI_PRIO since scheduler may have raised it.
+- s_lshr_b32 tmp, status, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT
+- s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE), tmp
+- s_nop 0x2 // avoid S_SETREG => S_SETREG hazard
+- s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE), status
+-end
++
++#endif
++
++static const uint32_t cwsr_trap_gfx8_hex[] = {
++ 0xbf820001, 0xbf820123,
++ 0xb8f4f802, 0x89748674,
++ 0xb8f5f803, 0x8675ff75,
++ 0x00000400, 0xbf850011,
++ 0xc00a1e37, 0x00000000,
++ 0xbf8c007f, 0x87777978,
++ 0xbf840002, 0xb974f802,
++ 0xbe801d78, 0xb8f5f803,
++ 0x8675ff75, 0x000001ff,
++ 0xbf850002, 0x80708470,
++ 0x82718071, 0x8671ff71,
++ 0x0000ffff, 0xb974f802,
++ 0xbe801f70, 0xb8f5f803,
++ 0x8675ff75, 0x00000100,
++ 0xbf840006, 0xbefa0080,
++ 0xb97a0203, 0x8671ff71,
++ 0x0000ffff, 0x80f08870,
++ 0x82f18071, 0xbefa0080,
++ 0xb97a0283, 0xbef60068,
++ 0xbef70069, 0xb8fa1c07,
++ 0x8e7a9c7a, 0x87717a71,
++ 0xb8fa03c7, 0x8e7a9b7a,
++ 0x87717a71, 0xb8faf807,
++ 0x867aff7a, 0x00007fff,
++ 0xb97af807, 0xbef2007e,
++ 0xbef3007f, 0xbefe0180,
++ 0xbf900004, 0xbf8e0002,
++ 0xbf88fffe, 0xbef8007e,
++ 0x8679ff7f, 0x0000ffff,
++ 0x8779ff79, 0x00040000,
++ 0xbefa0080, 0xbefb00ff,
++ 0x00807fac, 0x867aff7f,
++ 0x08000000, 0x8f7a837a,
++ 0x877b7a7b, 0x867aff7f,
++ 0x70000000, 0x8f7a817a,
++ 0x877b7a7b, 0xbeef007c,
++ 0xbeee0080, 0xb8ee2a05,
++ 0x806e816e, 0x8e6e8a6e,
++ 0xb8fa1605, 0x807a817a,
++ 0x8e7a867a, 0x806e7a6e,
++ 0xbefa0084, 0xbefa00ff,
++ 0x01000000, 0xbefe007c,
++ 0xbefc006e, 0xc0611bfc,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc006e, 0xc0611c3c,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc006e, 0xc0611c7c,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc006e, 0xc0611cbc,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc006e, 0xc0611cfc,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc006e, 0xc0611d3c,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0xb8f5f803,
++ 0xbefe007c, 0xbefc006e,
++ 0xc0611d7c, 0x0000007c,
++ 0x806e846e, 0xbefc007e,
++ 0xbefe007c, 0xbefc006e,
++ 0xc0611dbc, 0x0000007c,
++ 0x806e846e, 0xbefc007e,
++ 0xbefe007c, 0xbefc006e,
++ 0xc0611dfc, 0x0000007c,
++ 0x806e846e, 0xbefc007e,
++ 0xb8eff801, 0xbefe007c,
++ 0xbefc006e, 0xc0611bfc,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc006e, 0xc0611b3c,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc006e, 0xc0611b7c,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0x867aff7f,
++ 0x04000000, 0xbef30080,
++ 0x8773737a, 0xb8ee2a05,
++ 0x806e816e, 0x8e6e8a6e,
++ 0xb8f51605, 0x80758175,
++ 0x8e758475, 0x8e7a8275,
++ 0xbefa00ff, 0x01000000,
++ 0xbef60178, 0x80786e78,
++ 0x82798079, 0xbefc0080,
++ 0xbe802b00, 0xbe822b02,
++ 0xbe842b04, 0xbe862b06,
++ 0xbe882b08, 0xbe8a2b0a,
++ 0xbe8c2b0c, 0xbe8e2b0e,
++ 0xc06b003c, 0x00000000,
++ 0xc06b013c, 0x00000010,
++ 0xc06b023c, 0x00000020,
++ 0xc06b033c, 0x00000030,
++ 0x8078c078, 0x82798079,
++ 0x807c907c, 0xbf0a757c,
++ 0xbf85ffeb, 0xbef80176,
++ 0xbeee0080, 0xbefe00c1,
++ 0xbeff00c1, 0xbefa00ff,
++ 0x01000000, 0xe0724000,
++ 0x6e1e0000, 0xe0724100,
++ 0x6e1e0100, 0xe0724200,
++ 0x6e1e0200, 0xe0724300,
++ 0x6e1e0300, 0xbefe00c1,
++ 0xbeff00c1, 0xb8f54306,
++ 0x8675c175, 0xbf84002c,
++ 0xbf8a0000, 0x867aff73,
++ 0x04000000, 0xbf840028,
++ 0x8e758675, 0x8e758275,
++ 0xbefa0075, 0xb8ee2a05,
++ 0x806e816e, 0x8e6e8a6e,
++ 0xb8fa1605, 0x807a817a,
++ 0x8e7a867a, 0x806e7a6e,
++ 0x806eff6e, 0x00000080,
++ 0xbefa00ff, 0x01000000,
++ 0xbefc0080, 0xd28c0002,
++ 0x000100c1, 0xd28d0003,
++ 0x000204c1, 0xd1060002,
++ 0x00011103, 0x7e0602ff,
++ 0x00000200, 0xbefc00ff,
++ 0x00010000, 0xbe80007b,
++ 0x867bff7b, 0xff7fffff,
++ 0x877bff7b, 0x00058000,
++ 0xd8ec0000, 0x00000002,
++ 0xbf8c007f, 0xe0765000,
++ 0x6e1e0002, 0x32040702,
++ 0xd0c9006a, 0x0000eb02,
++ 0xbf87fff7, 0xbefb0000,
++ 0xbeee00ff, 0x00000400,
++ 0xbefe00c1, 0xbeff00c1,
++ 0xb8f52a05, 0x80758175,
++ 0x8e758275, 0x8e7a8875,
++ 0xbefa00ff, 0x01000000,
++ 0xbefc0084, 0xbf0a757c,
++ 0xbf840015, 0xbf11017c,
++ 0x8075ff75, 0x00001000,
++ 0x7e000300, 0x7e020301,
++ 0x7e040302, 0x7e060303,
++ 0xe0724000, 0x6e1e0000,
++ 0xe0724100, 0x6e1e0100,
++ 0xe0724200, 0x6e1e0200,
++ 0xe0724300, 0x6e1e0300,
++ 0x807c847c, 0x806eff6e,
++ 0x00000400, 0xbf0a757c,
++ 0xbf85ffef, 0xbf9c0000,
++ 0xbf8200ca, 0xbef8007e,
++ 0x8679ff7f, 0x0000ffff,
++ 0x8779ff79, 0x00040000,
++ 0xbefa0080, 0xbefb00ff,
++ 0x00807fac, 0x8676ff7f,
++ 0x08000000, 0x8f768376,
++ 0x877b767b, 0x8676ff7f,
++ 0x70000000, 0x8f768176,
++ 0x877b767b, 0x8676ff7f,
++ 0x04000000, 0xbf84001e,
++ 0xbefe00c1, 0xbeff00c1,
++ 0xb8f34306, 0x8673c173,
++ 0xbf840019, 0x8e738673,
++ 0x8e738273, 0xbefa0073,
++ 0xb8f22a05, 0x80728172,
++ 0x8e728a72, 0xb8f61605,
++ 0x80768176, 0x8e768676,
++ 0x80727672, 0x8072ff72,
++ 0x00000080, 0xbefa00ff,
++ 0x01000000, 0xbefc0080,
++ 0xe0510000, 0x721e0000,
++ 0xe0510100, 0x721e0000,
++ 0x807cff7c, 0x00000200,
++ 0x8072ff72, 0x00000200,
++ 0xbf0a737c, 0xbf85fff6,
++ 0xbef20080, 0xbefe00c1,
++ 0xbeff00c1, 0xb8f32a05,
++ 0x80738173, 0x8e738273,
++ 0x8e7a8873, 0xbefa00ff,
++ 0x01000000, 0xbef60072,
++ 0x8072ff72, 0x00000400,
++ 0xbefc0084, 0xbf11087c,
++ 0x8073ff73, 0x00008000,
++ 0xe0524000, 0x721e0000,
++ 0xe0524100, 0x721e0100,
++ 0xe0524200, 0x721e0200,
++ 0xe0524300, 0x721e0300,
++ 0xbf8c0f70, 0x7e000300,
++ 0x7e020301, 0x7e040302,
++ 0x7e060303, 0x807c847c,
++ 0x8072ff72, 0x00000400,
++ 0xbf0a737c, 0xbf85ffee,
++ 0xbf9c0000, 0xe0524000,
++ 0x761e0000, 0xe0524100,
++ 0x761e0100, 0xe0524200,
++ 0x761e0200, 0xe0524300,
++ 0x761e0300, 0xb8f22a05,
++ 0x80728172, 0x8e728a72,
++ 0xb8f61605, 0x80768176,
++ 0x8e768676, 0x80727672,
++ 0x80f2c072, 0xb8f31605,
++ 0x80738173, 0x8e738473,
++ 0x8e7a8273, 0xbefa00ff,
++ 0x01000000, 0xbefc0073,
++ 0xc031003c, 0x00000072,
++ 0x80f2c072, 0xbf8c007f,
++ 0x80fc907c, 0xbe802d00,
++ 0xbe822d02, 0xbe842d04,
++ 0xbe862d06, 0xbe882d08,
++ 0xbe8a2d0a, 0xbe8c2d0c,
++ 0xbe8e2d0e, 0xbf06807c,
++ 0xbf84fff1, 0xb8f22a05,
++ 0x80728172, 0x8e728a72,
++ 0xb8f61605, 0x80768176,
++ 0x8e768676, 0x80727672,
++ 0xbefa0084, 0xbefa00ff,
++ 0x01000000, 0xc0211cfc,
++ 0x00000072, 0x80728472,
++ 0xc0211c3c, 0x00000072,
++ 0x80728472, 0xc0211c7c,
++ 0x00000072, 0x80728472,
++ 0xc0211bbc, 0x00000072,
++ 0x80728472, 0xc0211bfc,
++ 0x00000072, 0x80728472,
++ 0xc0211d3c, 0x00000072,
++ 0x80728472, 0xc0211d7c,
++ 0x00000072, 0x80728472,
++ 0xc0211a3c, 0x00000072,
++ 0x80728472, 0xc0211a7c,
++ 0x00000072, 0x80728472,
++ 0xc0211dfc, 0x00000072,
++ 0x80728472, 0xc0211b3c,
++ 0x00000072, 0x80728472,
++ 0xc0211b7c, 0x00000072,
++ 0x80728472, 0xbf8c007f,
++ 0x8671ff71, 0x0000ffff,
++ 0xbefc0073, 0xbefe006e,
++ 0xbeff006f, 0x867375ff,
++ 0x000003ff, 0xb9734803,
++ 0x867375ff, 0xfffff800,
++ 0x8f738b73, 0xb973a2c3,
++ 0xb977f801, 0x8673ff71,
++ 0xf0000000, 0x8f739c73,
++ 0x8e739073, 0xbef60080,
++ 0x87767376, 0x8673ff71,
++ 0x08000000, 0x8f739b73,
++ 0x8e738f73, 0x87767376,
++ 0x8673ff74, 0x00800000,
++ 0x8f739773, 0xb976f807,
++ 0x86fe7e7e, 0x86ea6a6a,
++ 0xb974f802, 0xbf8a0000,
++ 0x95807370, 0xbf810000,
++};
++
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+index 0bb9c57..bd2957c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+@@ -20,12 +20,9 @@
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-/* To compile this assembly code:
+- * PROJECT=greenland ./sp3 cwsr_trap_handler_gfx9.asm -hex tmp.hex
+- */
+-
+-/* HW (GFX9) source code for CWSR trap handler */
+-/* Version 18 + multiple trap handler */
++#if 0
++HW (GFX9) source code for CWSR trap handler
++#Version 18 + multiple trap handler
+
+ // this performance-optimal version was originally from Seven Xu at SRDC
+
+@@ -77,7 +74,7 @@ var G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 = G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_D
+ /*************************************************************************/
+ /* control on how to run the shader */
+ /*************************************************************************/
+-//any hack that needs to be made to run this code in EMU (either because various EMU code are not ready or no compute save & restore in EMU run)
++//any hack that needs to be made to run this code in EMU (either becasue various EMU code are not ready or no compute save & restore in EMU run)
+ var EMU_RUN_HACK = 0
+ var EMU_RUN_HACK_RESTORE_NORMAL = 0
+ var EMU_RUN_HACK_SAVE_NORMAL_EXIT = 0
+@@ -89,9 +86,9 @@ var WG_BASE_ADDR_HI = 0x0
+ var WAVE_SPACE = 0x5000 //memory size that each wave occupies in workgroup state mem
+ var CTX_SAVE_CONTROL = 0x0
+ var CTX_RESTORE_CONTROL = CTX_SAVE_CONTROL
+-var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either because various RTL code are not ready or no compute save & restore in RTL run)
++var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either becasue various RTL code are not ready or no compute save & restore in RTL run)
+ var SGPR_SAVE_USE_SQC = 1 //use SQC D$ to do the write
+-var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //because TC EMU currently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes
++var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //becasue TC EMU curently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes
+ var SWIZZLE_EN = 0 //whether we use swizzled buffer addressing
+ var ACK_SQC_STORE = 1 //workaround for suspected SQC store bug causing incorrect stores under concurrency
+
+@@ -100,13 +97,8 @@ var ACK_SQC_STORE = 1 //workaround for suspected SQC store bug causing
+ /**************************************************************************/
+ var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23
+ var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
+-var SQ_WAVE_STATUS_SPI_PRIO_SHIFT = 1
+ var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
+ var SQ_WAVE_STATUS_HALT_MASK = 0x2000
+-var SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT = 0
+-var SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE = 1
+-var SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT = 3
+-var SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE = 29
+
+ var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
+ var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
+@@ -130,14 +122,11 @@ var SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK = 0x800
+
+ var SQ_WAVE_IB_STS_RCNT_SHIFT = 16 //FIXME
+ var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT = 15 //FIXME
+-var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK = 0x1F8000
+ var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG = 0x00007FFF //FIXME
+
+ var SQ_BUF_RSRC_WORD1_ATC_SHIFT = 24
+ var SQ_BUF_RSRC_WORD3_MTYPE_SHIFT = 27
+
+-var TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT = 26 // bits [31:26] unused by SPI debug data
+-var TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK = 0xFC000000
+
+ /* Save */
+ var S_SAVE_BUF_RSRC_WORD1_STRIDE = 0x00040000 //stride is 4 bytes
+@@ -158,11 +147,11 @@ var S_SAVE_PC_HI_FIRST_REPLAY_MASK = 0x08000000 //FIXME
+ var s_save_spi_init_lo = exec_lo
+ var s_save_spi_init_hi = exec_hi
+
+-var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3'h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]}
++var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3¡¯h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]}
+ var s_save_pc_hi = ttmp1
+ var s_save_exec_lo = ttmp2
+ var s_save_exec_hi = ttmp3
+-var s_save_tmp = ttmp4
++var s_save_status = ttmp4
+ var s_save_trapsts = ttmp5 //not really used until the end of the SAVE routine
+ var s_save_xnack_mask_lo = ttmp6
+ var s_save_xnack_mask_hi = ttmp7
+@@ -170,12 +159,11 @@ var s_save_buf_rsrc0 = ttmp8
+ var s_save_buf_rsrc1 = ttmp9
+ var s_save_buf_rsrc2 = ttmp10
+ var s_save_buf_rsrc3 = ttmp11
+-var s_save_status = ttmp12
++
+ var s_save_mem_offset = ttmp14
+ var s_save_alloc_size = s_save_trapsts //conflict
++var s_save_tmp = s_save_buf_rsrc2 //shared with s_save_buf_rsrc2 (conflict: should not use mem access with s_save_tmp at the same time)
+ var s_save_m0 = ttmp15
+-var s_save_ttmps_lo = s_save_tmp //no conflict
+-var s_save_ttmps_hi = s_save_trapsts //no conflict
+
+ /* Restore */
+ var S_RESTORE_BUF_RSRC_WORD1_STRIDE = S_SAVE_BUF_RSRC_WORD1_STRIDE
+@@ -198,7 +186,7 @@ var s_restore_spi_init_hi = exec_hi
+
+ var s_restore_mem_offset = ttmp12
+ var s_restore_alloc_size = ttmp3
+-var s_restore_tmp = ttmp2
++var s_restore_tmp = ttmp6
+ var s_restore_mem_offset_save = s_restore_tmp //no conflict
+
+ var s_restore_m0 = s_restore_alloc_size //no conflict
+@@ -217,8 +205,6 @@ var s_restore_buf_rsrc0 = ttmp8
+ var s_restore_buf_rsrc1 = ttmp9
+ var s_restore_buf_rsrc2 = ttmp10
+ var s_restore_buf_rsrc3 = ttmp11
+-var s_restore_ttmps_lo = s_restore_tmp //no conflict
+-var s_restore_ttmps_hi = s_restore_alloc_size //no conflict
+
+ /**************************************************************************/
+ /* trap handler entry points */
+@@ -249,25 +235,25 @@ L_SKIP_RESTORE:
+ s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC
+ s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK //check whether this is for save
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+- s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK //check whether this is for save
++ s_and_b32 ttmp8, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK //check whether this is for save
+ s_cbranch_scc1 L_SAVE //this is the operation for save
+
+ // ********* Handle non-CWSR traps *******************
+ if (!EMU_RUN_HACK)
+ // Illegal instruction is a non-maskable exception which blocks context save.
+ // Halt the wavefront and return from the trap.
+- s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
++ s_and_b32 ttmp8, s_save_trapsts, SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
+ s_cbranch_scc1 L_HALT_WAVE
+
+ // If STATUS.MEM_VIOL is asserted then we cannot fetch from the TMA.
+ // Instead, halt the wavefront and return from the trap.
+- s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK
+- s_cbranch_scc0 L_FETCH_2ND_TRAP
++ s_and_b32 ttmp8, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK
++ s_cbranch_scc0 L_NO_MEM_VIOL
+
+ L_HALT_WAVE:
+ // If STATUS.HALT is set then this fault must come from SQC instruction fetch.
+ // We cannot prevent further faults so just terminate the wavefront.
+- s_and_b32 ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK
++ s_and_b32 ttmp8, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+ s_cbranch_scc0 L_NOT_ALREADY_HALTED
+ s_endpgm
+ L_NOT_ALREADY_HALTED:
+@@ -278,31 +264,19 @@ L_NOT_ALREADY_HALTED:
+ s_sub_u32 ttmp0, ttmp0, 0x8
+ s_subb_u32 ttmp1, ttmp1, 0x0
+
+-L_FETCH_2ND_TRAP:
+- // Preserve and clear scalar XNACK state before issuing scalar reads.
+- // Save IB_STS.FIRST_REPLAY[15] and IB_STS.RCNT[20:16] into unused space ttmp11[31:26].
+- s_getreg_b32 ttmp2, hwreg(HW_REG_IB_STS)
+- s_and_b32 ttmp3, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
+- s_lshl_b32 ttmp3, ttmp3, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
+- s_andn2_b32 ttmp11, ttmp11, TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK
+- s_or_b32 ttmp11, ttmp11, ttmp3
+-
+- s_andn2_b32 ttmp2, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
+- s_setreg_b32 hwreg(HW_REG_IB_STS), ttmp2
+-
+- // Read second-level TBA/TMA from first-level TMA and jump if available.
+- // ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data)
+- // ttmp12 holds SQ_WAVE_STATUS
+- s_getreg_b32 ttmp4, hwreg(HW_REG_SQ_SHADER_TMA_LO)
+- s_getreg_b32 ttmp5, hwreg(HW_REG_SQ_SHADER_TMA_HI)
+- s_lshl_b64 [ttmp4, ttmp5], [ttmp4, ttmp5], 0x8
+- s_load_dwordx2 [ttmp2, ttmp3], [ttmp4, ttmp5], 0x0 glc:1 // second-level TBA
+- s_waitcnt lgkmcnt(0)
+- s_load_dwordx2 [ttmp4, ttmp5], [ttmp4, ttmp5], 0x8 glc:1 // second-level TMA
+- s_waitcnt lgkmcnt(0)
+- s_and_b64 [ttmp2, ttmp3], [ttmp2, ttmp3], [ttmp2, ttmp3]
+- s_cbranch_scc0 L_NO_NEXT_TRAP // second-level trap handler not been set
+- s_setpc_b64 [ttmp2, ttmp3] // jump to second-level trap handler
++ s_branch L_EXCP_CASE
++
++L_NO_MEM_VIOL:
++ /* read tba and tma for next level trap handler, ttmp4 is used as s_save_status */
++ s_getreg_b32 ttmp14,hwreg(HW_REG_SQ_SHADER_TMA_LO)
++ s_getreg_b32 ttmp15,hwreg(HW_REG_SQ_SHADER_TMA_HI)
++ s_lshl_b64 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8
++ s_load_dwordx4 [ttmp8, ttmp9, ttmp10, ttmp11], [ttmp14, ttmp15], 0
++ s_waitcnt lgkmcnt(0)
++ s_or_b32 ttmp7, ttmp8, ttmp9
++ s_cbranch_scc0 L_NO_NEXT_TRAP //next level trap handler not been set
++ s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //restore HW status(SCC)
++ s_setpc_b64 [ttmp8,ttmp9] //jump to next level trap handler
+
+ L_NO_NEXT_TRAP:
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+@@ -312,18 +286,8 @@ L_NO_NEXT_TRAP:
+ s_addc_u32 ttmp1, ttmp1, 0
+ L_EXCP_CASE:
+ s_and_b32 ttmp1, ttmp1, 0xFFFF
+-
+- // Restore SQ_WAVE_IB_STS.
+- s_lshr_b32 ttmp2, ttmp11, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
+- s_and_b32 ttmp2, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
+- s_setreg_b32 hwreg(HW_REG_IB_STS), ttmp2
+-
+- // Restore SQ_WAVE_STATUS.
+- s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+- s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+- set_status_without_spi_prio(s_save_status, ttmp2)
+-
+- s_rfe_b64 [ttmp0, ttmp1]
++ s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //restore HW status(SCC)
++ s_rfe_b64 [ttmp0, ttmp1]
+ end
+ // ********* End handling of non-CWSR traps *******************
+
+@@ -343,6 +307,8 @@ end
+ s_mov_b32 s_save_tmp, 0 //clear saveCtx bit
+ s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp //clear saveCtx bit
+
++ s_mov_b32 s_save_xnack_mask_lo, xnack_mask_lo //save XNACK_MASK
++ s_mov_b32 s_save_xnack_mask_hi, xnack_mask_hi //save XNACK must before any memory operation
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_RCNT_SHIFT, SQ_WAVE_IB_STS_RCNT_SIZE) //save RCNT
+ s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_RCNT_SHIFT
+ s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
+@@ -370,10 +336,6 @@ end
+ s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC
+ end
+
+- // Set SPI_PRIO=2 to avoid starving instruction fetch in the waves we're waiting for.
+- s_or_b32 s_save_tmp, s_save_status, (2 << SQ_WAVE_STATUS_SPI_PRIO_SHIFT)
+- s_setreg_b32 hwreg(HW_REG_STATUS), s_save_tmp
+-
+ L_SLEEP:
+ s_sleep 0x2 // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause SQ hang, since the 7,8th wave could not get arbit to exec inst, while other waves are stuck into the sleep-loop and waiting for wrexec!=0
+
+@@ -388,6 +350,7 @@ if G8SR_DEBUG_TIMESTAMP
+ s_waitcnt lgkmcnt(0)
+ end
+
++ /* setup Resource Contants */
+ if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_SINGLE_WAVE))
+ //calculate wd_addr using absolute thread id
+ v_readlane_b32 s_save_tmp, v9, 0
+@@ -405,24 +368,7 @@ end
+ else
+ end
+
+- // Save trap temporaries 6-11, 13-15 initialized by SPI debug dispatch logic
+- // ttmp SR memory offset : size(VGPR)+size(SGPR)+0x40
+- get_vgpr_size_bytes(s_save_ttmps_lo)
+- get_sgpr_size_bytes(s_save_ttmps_hi)
+- s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, s_save_ttmps_hi
+- s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, s_save_spi_init_lo
+- s_addc_u32 s_save_ttmps_hi, s_save_spi_init_hi, 0x0
+- s_and_b32 s_save_ttmps_hi, s_save_ttmps_hi, 0xFFFF
+- s_store_dwordx2 [ttmp6, ttmp7], [s_save_ttmps_lo, s_save_ttmps_hi], 0x40 glc:1
+- ack_sqc_store_workaround()
+- s_store_dwordx4 [ttmp8, ttmp9, ttmp10, ttmp11], [s_save_ttmps_lo, s_save_ttmps_hi], 0x48 glc:1
+- ack_sqc_store_workaround()
+- s_store_dword ttmp13, [s_save_ttmps_lo, s_save_ttmps_hi], 0x58 glc:1
+- ack_sqc_store_workaround()
+- s_store_dwordx2 [ttmp14, ttmp15], [s_save_ttmps_lo, s_save_ttmps_hi], 0x5C glc:1
+- ack_sqc_store_workaround()
+
+- /* setup Resource Contants */
+ s_mov_b32 s_save_buf_rsrc0, s_save_spi_init_lo //base_addr_lo
+ s_and_b32 s_save_buf_rsrc1, s_save_spi_init_hi, 0x0000FFFF //base_addr_hi
+ s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE
+@@ -479,8 +425,8 @@ end
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+ write_hwreg_to_mem(s_save_trapsts, s_save_buf_rsrc0, s_save_mem_offset) //TRAPSTS
+
+- write_hwreg_to_mem(xnack_mask_lo, s_save_buf_rsrc0, s_save_mem_offset) //XNACK_MASK_LO
+- write_hwreg_to_mem(xnack_mask_hi, s_save_buf_rsrc0, s_save_mem_offset) //XNACK_MASK_HI
++ write_hwreg_to_mem(s_save_xnack_mask_lo, s_save_buf_rsrc0, s_save_mem_offset) //XNACK_MASK_LO
++ write_hwreg_to_mem(s_save_xnack_mask_hi, s_save_buf_rsrc0, s_save_mem_offset) //XNACK_MASK_HI
+
+ //use s_save_tmp would introduce conflict here between s_save_tmp and s_save_buf_rsrc2
+ s_getreg_b32 s_save_m0, hwreg(HW_REG_MODE) //MODE
+@@ -556,8 +502,6 @@ end
+ s_mov_b32 s_save_mem_offset, 0
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+- s_mov_b32 xnack_mask_lo, 0x0
+- s_mov_b32 xnack_mask_hi, 0x0
+
+ if (SWIZZLE_EN)
+ s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+@@ -1071,6 +1015,8 @@ end
+
+ s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS
+
++ s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
++
+ //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise:
+ if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
+ s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8 //pc[31:0]+8 //two back-to-back s_trap are used (first for save and second for restore)
+@@ -1092,21 +1038,6 @@ end
+ s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE), s_restore_m0
+ //s_setreg_b32 hwreg(HW_REG_TRAPSTS), s_restore_trapsts //don't overwrite SAVECTX bit as it may be set through external SAVECTX during restore
+ s_setreg_b32 hwreg(HW_REG_MODE), s_restore_mode
+-
+- // Restore trap temporaries 6-11, 13-15 initialized by SPI debug dispatch logic
+- // ttmp SR memory offset : size(VGPR)+size(SGPR)+0x40
+- get_vgpr_size_bytes(s_restore_ttmps_lo)
+- get_sgpr_size_bytes(s_restore_ttmps_hi)
+- s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_ttmps_hi
+- s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_buf_rsrc0
+- s_addc_u32 s_restore_ttmps_hi, s_restore_buf_rsrc1, 0x0
+- s_and_b32 s_restore_ttmps_hi, s_restore_ttmps_hi, 0xFFFF
+- s_load_dwordx2 [ttmp6, ttmp7], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x40 glc:1
+- s_load_dwordx4 [ttmp8, ttmp9, ttmp10, ttmp11], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x48 glc:1
+- s_load_dword ttmp13, [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x58 glc:1
+- s_load_dwordx2 [ttmp14, ttmp15], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x5C glc:1
+- s_waitcnt lgkmcnt(0)
+-
+ //reuse s_restore_m0 as a temp register
+ s_and_b32 s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_RCNT_MASK
+ s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_RCNT_SHIFT
+@@ -1121,12 +1052,11 @@ end
+ s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT
+ s_setreg_b32 hwreg(HW_REG_IB_STS), s_restore_tmp
+
+- s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+- set_status_without_spi_prio(s_restore_status, s_restore_tmp) // SCC is included, which is changed by previous salu
++ s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
+
+- s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
++ s_barrier //barrier to ensure the readiness of LDS before access attemps from any other wave in the same TG //FIXME not performance-optimal at this time
+
+ if G8SR_DEBUG_TIMESTAMP
+ s_memrealtime s_g8sr_ts_restore_d
+@@ -1155,7 +1085,9 @@ function write_hwreg_to_mem(s, s_rsrc, s_mem_offset)
+ s_mov_b32 exec_lo, m0 //assuming exec_lo is not needed anymore from this point on
+ s_mov_b32 m0, s_mem_offset
+ s_buffer_store_dword s, s_rsrc, m0 glc:1
+- ack_sqc_store_workaround()
++if ACK_SQC_STORE
++ s_waitcnt lgkmcnt(0)
++end
+ s_add_u32 s_mem_offset, s_mem_offset, 4
+ s_mov_b32 m0, exec_lo
+ end
+@@ -1165,13 +1097,21 @@ end
+ function write_16sgpr_to_mem(s, s_rsrc, s_mem_offset)
+
+ s_buffer_store_dwordx4 s[0], s_rsrc, 0 glc:1
+- ack_sqc_store_workaround()
++if ACK_SQC_STORE
++ s_waitcnt lgkmcnt(0)
++end
+ s_buffer_store_dwordx4 s[4], s_rsrc, 16 glc:1
+- ack_sqc_store_workaround()
++if ACK_SQC_STORE
++ s_waitcnt lgkmcnt(0)
++end
+ s_buffer_store_dwordx4 s[8], s_rsrc, 32 glc:1
+- ack_sqc_store_workaround()
++if ACK_SQC_STORE
++ s_waitcnt lgkmcnt(0)
++end
+ s_buffer_store_dwordx4 s[12], s_rsrc, 48 glc:1
+- ack_sqc_store_workaround()
++if ACK_SQC_STORE
++ s_waitcnt lgkmcnt(0)
++end
+ s_add_u32 s_rsrc[0], s_rsrc[0], 4*16
+ s_addc_u32 s_rsrc[1], s_rsrc[1], 0x0 // +scc
+ end
+@@ -1211,16 +1151,261 @@ function get_hwreg_size_bytes
+ return 128 //HWREG size 128 bytes
+ end
+
+-function ack_sqc_store_workaround
+- if ACK_SQC_STORE
+- s_waitcnt lgkmcnt(0)
+- end
+-end
+
+-function set_status_without_spi_prio(status, tmp)
+- // Do not restore STATUS.SPI_PRIO since scheduler may have raised it.
+- s_lshr_b32 tmp, status, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT
+- s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE), tmp
+- s_nop 0x2 // avoid S_SETREG => S_SETREG hazard
+- s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE), status
+-end
++
++#endif
++
++static const uint32_t cwsr_trap_gfx9_hex[] = {
++ 0xbf820001, 0xbf820130,
++ 0xb8f0f802, 0x89708670,
++ 0xb8f1f803, 0x8674ff71,
++ 0x00000400, 0xbf850023,
++ 0x8674ff71, 0x00000800,
++ 0xbf850003, 0x8674ff71,
++ 0x00000100, 0xbf840009,
++ 0x8674ff70, 0x00002000,
++ 0xbf840001, 0xbf810000,
++ 0x8770ff70, 0x00002000,
++ 0x80ec886c, 0x82ed806d,
++ 0xbf820010, 0xb8faf812,
++ 0xb8fbf813, 0x8efa887a,
++ 0xc00a1d3d, 0x00000000,
++ 0xbf8cc07f, 0x87737574,
++ 0xbf840002, 0xb970f802,
++ 0xbe801d74, 0xb8f1f803,
++ 0x8671ff71, 0x000001ff,
++ 0xbf850002, 0x806c846c,
++ 0x826d806d, 0x866dff6d,
++ 0x0000ffff, 0xb970f802,
++ 0xbe801f6c, 0x866dff6d,
++ 0x0000ffff, 0xbef60080,
++ 0xb9760283, 0xbef20068,
++ 0xbef30069, 0xb8f62407,
++ 0x8e769c76, 0x876d766d,
++ 0xb8f603c7, 0x8e769b76,
++ 0x876d766d, 0xb8f6f807,
++ 0x8676ff76, 0x00007fff,
++ 0xb976f807, 0xbeee007e,
++ 0xbeef007f, 0xbefe0180,
++ 0xbf900004, 0xbf8e0002,
++ 0xbf88fffe, 0xbef4007e,
++ 0x8675ff7f, 0x0000ffff,
++ 0x8775ff75, 0x00040000,
++ 0xbef60080, 0xbef700ff,
++ 0x00807fac, 0x8676ff7f,
++ 0x08000000, 0x8f768376,
++ 0x87777677, 0x8676ff7f,
++ 0x70000000, 0x8f768176,
++ 0x87777677, 0xbefb007c,
++ 0xbefa0080, 0xb8fa2a05,
++ 0x807a817a, 0x8e7a8a7a,
++ 0xb8f61605, 0x80768176,
++ 0x8e768676, 0x807a767a,
++ 0xbef60084, 0xbef600ff,
++ 0x01000000, 0xbefe007c,
++ 0xbefc007a, 0xc0611efa,
++ 0x0000007c, 0xbf8cc07f,
++ 0x807a847a, 0xbefc007e,
++ 0xbefe007c, 0xbefc007a,
++ 0xc0611b3a, 0x0000007c,
++ 0xbf8cc07f, 0x807a847a,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc007a, 0xc0611b7a,
++ 0x0000007c, 0xbf8cc07f,
++ 0x807a847a, 0xbefc007e,
++ 0xbefe007c, 0xbefc007a,
++ 0xc0611bba, 0x0000007c,
++ 0xbf8cc07f, 0x807a847a,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc007a, 0xc0611bfa,
++ 0x0000007c, 0xbf8cc07f,
++ 0x807a847a, 0xbefc007e,
++ 0xbefe007c, 0xbefc007a,
++ 0xc0611c3a, 0x0000007c,
++ 0xbf8cc07f, 0x807a847a,
++ 0xbefc007e, 0xb8f1f803,
++ 0xbefe007c, 0xbefc007a,
++ 0xc0611c7a, 0x0000007c,
++ 0xbf8cc07f, 0x807a847a,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc007a, 0xc0611cba,
++ 0x0000007c, 0xbf8cc07f,
++ 0x807a847a, 0xbefc007e,
++ 0xbefe007c, 0xbefc007a,
++ 0xc0611cfa, 0x0000007c,
++ 0xbf8cc07f, 0x807a847a,
++ 0xbefc007e, 0xb8fbf801,
++ 0xbefe007c, 0xbefc007a,
++ 0xc0611efa, 0x0000007c,
++ 0xbf8cc07f, 0x807a847a,
++ 0xbefc007e, 0x8676ff7f,
++ 0x04000000, 0xbeef0080,
++ 0x876f6f76, 0xb8fa2a05,
++ 0x807a817a, 0x8e7a8a7a,
++ 0xb8f11605, 0x80718171,
++ 0x8e718471, 0x8e768271,
++ 0xbef600ff, 0x01000000,
++ 0xbef20174, 0x80747a74,
++ 0x82758075, 0xbefc0080,
++ 0xbf800000, 0xbe802b00,
++ 0xbe822b02, 0xbe842b04,
++ 0xbe862b06, 0xbe882b08,
++ 0xbe8a2b0a, 0xbe8c2b0c,
++ 0xbe8e2b0e, 0xc06b003a,
++ 0x00000000, 0xbf8cc07f,
++ 0xc06b013a, 0x00000010,
++ 0xbf8cc07f, 0xc06b023a,
++ 0x00000020, 0xbf8cc07f,
++ 0xc06b033a, 0x00000030,
++ 0xbf8cc07f, 0x8074c074,
++ 0x82758075, 0x807c907c,
++ 0xbf0a717c, 0xbf85ffe7,
++ 0xbef40172, 0xbefa0080,
++ 0xbefe00c1, 0xbeff00c1,
++ 0xbef600ff, 0x01000000,
++ 0xe0724000, 0x7a1d0000,
++ 0xe0724100, 0x7a1d0100,
++ 0xe0724200, 0x7a1d0200,
++ 0xe0724300, 0x7a1d0300,
++ 0xbefe00c1, 0xbeff00c1,
++ 0xb8f14306, 0x8671c171,
++ 0xbf84002c, 0xbf8a0000,
++ 0x8676ff6f, 0x04000000,
++ 0xbf840028, 0x8e718671,
++ 0x8e718271, 0xbef60071,
++ 0xb8fa2a05, 0x807a817a,
++ 0x8e7a8a7a, 0xb8f61605,
++ 0x80768176, 0x8e768676,
++ 0x807a767a, 0x807aff7a,
++ 0x00000080, 0xbef600ff,
++ 0x01000000, 0xbefc0080,
++ 0xd28c0002, 0x000100c1,
++ 0xd28d0003, 0x000204c1,
++ 0xd1060002, 0x00011103,
++ 0x7e0602ff, 0x00000200,
++ 0xbefc00ff, 0x00010000,
++ 0xbe800077, 0x8677ff77,
++ 0xff7fffff, 0x8777ff77,
++ 0x00058000, 0xd8ec0000,
++ 0x00000002, 0xbf8cc07f,
++ 0xe0765000, 0x7a1d0002,
++ 0x68040702, 0xd0c9006a,
++ 0x0000e302, 0xbf87fff7,
++ 0xbef70000, 0xbefa00ff,
++ 0x00000400, 0xbefe00c1,
++ 0xbeff00c1, 0xb8f12a05,
++ 0x80718171, 0x8e718271,
++ 0x8e768871, 0xbef600ff,
++ 0x01000000, 0xbefc0084,
++ 0xbf0a717c, 0xbf840015,
++ 0xbf11017c, 0x8071ff71,
++ 0x00001000, 0x7e000300,
++ 0x7e020301, 0x7e040302,
++ 0x7e060303, 0xe0724000,
++ 0x7a1d0000, 0xe0724100,
++ 0x7a1d0100, 0xe0724200,
++ 0x7a1d0200, 0xe0724300,
++ 0x7a1d0300, 0x807c847c,
++ 0x807aff7a, 0x00000400,
++ 0xbf0a717c, 0xbf85ffef,
++ 0xbf9c0000, 0xbf8200c5,
++ 0xbef4007e, 0x8675ff7f,
++ 0x0000ffff, 0x8775ff75,
++ 0x00040000, 0xbef60080,
++ 0xbef700ff, 0x00807fac,
++ 0x8672ff7f, 0x08000000,
++ 0x8f728372, 0x87777277,
++ 0x8672ff7f, 0x70000000,
++ 0x8f728172, 0x87777277,
++ 0x8672ff7f, 0x04000000,
++ 0xbf84001e, 0xbefe00c1,
++ 0xbeff00c1, 0xb8ef4306,
++ 0x866fc16f, 0xbf840019,
++ 0x8e6f866f, 0x8e6f826f,
++ 0xbef6006f, 0xb8f82a05,
++ 0x80788178, 0x8e788a78,
++ 0xb8f21605, 0x80728172,
++ 0x8e728672, 0x80787278,
++ 0x8078ff78, 0x00000080,
++ 0xbef600ff, 0x01000000,
++ 0xbefc0080, 0xe0510000,
++ 0x781d0000, 0xe0510100,
++ 0x781d0000, 0x807cff7c,
++ 0x00000200, 0x8078ff78,
++ 0x00000200, 0xbf0a6f7c,
++ 0xbf85fff6, 0xbef80080,
++ 0xbefe00c1, 0xbeff00c1,
++ 0xb8ef2a05, 0x806f816f,
++ 0x8e6f826f, 0x8e76886f,
++ 0xbef600ff, 0x01000000,
++ 0xbef20078, 0x8078ff78,
++ 0x00000400, 0xbefc0084,
++ 0xbf11087c, 0x806fff6f,
++ 0x00008000, 0xe0524000,
++ 0x781d0000, 0xe0524100,
++ 0x781d0100, 0xe0524200,
++ 0x781d0200, 0xe0524300,
++ 0x781d0300, 0xbf8c0f70,
++ 0x7e000300, 0x7e020301,
++ 0x7e040302, 0x7e060303,
++ 0x807c847c, 0x8078ff78,
++ 0x00000400, 0xbf0a6f7c,
++ 0xbf85ffee, 0xbf9c0000,
++ 0xe0524000, 0x721d0000,
++ 0xe0524100, 0x721d0100,
++ 0xe0524200, 0x721d0200,
++ 0xe0524300, 0x721d0300,
++ 0xb8f82a05, 0x80788178,
++ 0x8e788a78, 0xb8f21605,
++ 0x80728172, 0x8e728672,
++ 0x80787278, 0x80f8c078,
++ 0xb8ef1605, 0x806f816f,
++ 0x8e6f846f, 0x8e76826f,
++ 0xbef600ff, 0x01000000,
++ 0xbefc006f, 0xc031003a,
++ 0x00000078, 0x80f8c078,
++ 0xbf8cc07f, 0x80fc907c,
++ 0xbf800000, 0xbe802d00,
++ 0xbe822d02, 0xbe842d04,
++ 0xbe862d06, 0xbe882d08,
++ 0xbe8a2d0a, 0xbe8c2d0c,
++ 0xbe8e2d0e, 0xbf06807c,
++ 0xbf84fff0, 0xb8f82a05,
++ 0x80788178, 0x8e788a78,
++ 0xb8f21605, 0x80728172,
++ 0x8e728672, 0x80787278,
++ 0xbef60084, 0xbef600ff,
++ 0x01000000, 0xc0211bfa,
++ 0x00000078, 0x80788478,
++ 0xc0211b3a, 0x00000078,
++ 0x80788478, 0xc0211b7a,
++ 0x00000078, 0x80788478,
++ 0xc0211eba, 0x00000078,
++ 0x80788478, 0xc0211efa,
++ 0x00000078, 0x80788478,
++ 0xc0211c3a, 0x00000078,
++ 0x80788478, 0xc0211c7a,
++ 0x00000078, 0x80788478,
++ 0xc0211a3a, 0x00000078,
++ 0x80788478, 0xc0211a7a,
++ 0x00000078, 0x80788478,
++ 0xc0211cfa, 0x00000078,
++ 0x80788478, 0xbf8cc07f,
++ 0x866dff6d, 0x0000ffff,
++ 0xbefc006f, 0xbefe007a,
++ 0xbeff007b, 0x866f71ff,
++ 0x000003ff, 0xb96f4803,
++ 0x866f71ff, 0xfffff800,
++ 0x8f6f8b6f, 0xb96fa2c3,
++ 0xb973f801, 0x866fff6d,
++ 0xf0000000, 0x8f6f9c6f,
++ 0x8e6f906f, 0xbef20080,
++ 0x87726f72, 0x866fff6d,
++ 0x08000000, 0x8f6f9b6f,
++ 0x8e6f8f6f, 0x87726f72,
++ 0x866fff70, 0x00800000,
++ 0x8f6f976f, 0xb972f807,
++ 0x86fe7e7e, 0x86ea6a6a,
++ 0xb970f802, 0xbf8a0000,
++ 0x95806f6c, 0xbf810000,
++};
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 56c1230..01c8b19 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -24,7 +24,6 @@
+ #include <linux/export.h>
+ #include <linux/err.h>
+ #include <linux/fs.h>
+-#include <linux/file.h>
+ #include <linux/sched.h>
+ #include <linux/sched/mm.h>
+ #include <linux/slab.h>
+@@ -36,7 +35,6 @@
+ #include <linux/mman.h>
+ #include <asm/processor.h>
+ #include <linux/ptrace.h>
+-#include <linux/pagemap.h>
+
+ #include "kfd_priv.h"
+ #include "kfd_device_queue_manager.h"
+@@ -46,6 +44,7 @@
+ static long kfd_ioctl(struct file *, unsigned int, unsigned long);
+ static int kfd_open(struct inode *, struct file *);
+ static int kfd_mmap(struct file *, struct vm_area_struct *);
++static bool kfd_dev_is_large_bar(struct kfd_dev *dev);
+
+ static const char kfd_dev_name[] = "kfd";
+
+@@ -137,9 +136,6 @@ static int kfd_open(struct inode *inode, struct file *filep)
+ if (IS_ERR(process))
+ return PTR_ERR(process);
+
+- if (kfd_is_locked())
+- return -EAGAIN;
+-
+ dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
+ process->pasid, process->is_32bit_user_mode);
+
+@@ -251,7 +247,7 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
+ pr_debug("Queue Size: 0x%llX, %u\n",
+ q_properties->queue_size, args->ring_size);
+
+- pr_debug("Queue r/w Pointers: %px, %px\n",
++ pr_debug("Queue r/w Pointers: %p, %p\n",
+ q_properties->read_ptr,
+ q_properties->write_ptr);
+
+@@ -903,7 +899,7 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
+ mutex_lock(&p->mutex);
+
+ if (!kfd_has_process_device_data(p))
+- goto out_unlock;
++ goto out_upwrite;
+
+ /* Run over all pdd of the process */
+ pdd = kfd_get_first_process_device_data(p);
+@@ -912,7 +908,7 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
+ pdd = kfd_get_next_process_device_data(p, pdd);
+ } while (pdd);
+
+- goto out_unlock;
++ goto out_upwrite;
+ }
+
+ /* Fill in process-aperture information for all available
+@@ -929,7 +925,7 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
+ if (!kfd_has_process_device_data(p)) {
+ args->num_of_nodes = 0;
+ kfree(pa);
+- goto out_unlock;
++ goto out_upwrite;
+ }
+
+ /* Run over all pdd of the process */
+@@ -971,7 +967,7 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
+ kfree(pa);
+ return ret ? -EFAULT : 0;
+
+-out_unlock:
++out_upwrite:
+ mutex_unlock(&p->mutex);
+ return 0;
+ }
+@@ -980,70 +976,55 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
+ void *data)
+ {
+ struct kfd_ioctl_create_event_args *args = data;
+- int err;
+-
+- /* For dGPUs the event page is allocated in user mode. The
+- * handle is passed to KFD with the first call to this IOCTL
+- * through the event_page_offset field.
+- */
+- if (args->event_page_offset) {
+- struct kfd_dev *kfd;
+- struct kfd_process_device *pdd;
+- void *mem, *kern_addr;
+- uint64_t size;
++ struct kfd_dev *kfd;
++ struct kfd_process_device *pdd;
++ int err = -EINVAL;
++ void *mem, *kern_addr = NULL;
+
+- if (p->signal_page) {
+- pr_err("Event page is already set\n");
+- return -EINVAL;
+- }
++ pr_debug("Event page offset 0x%llx\n", args->event_page_offset);
+
++ if (args->event_page_offset) {
+ kfd = kfd_device_by_id(GET_GPU_ID(args->event_page_offset));
+ if (!kfd) {
+ pr_err("Getting device by id failed in %s\n", __func__);
+- return -EINVAL;
++ return -EFAULT;
+ }
+-
+- mutex_lock(&p->mutex);
+- pdd = kfd_bind_process_to_device(kfd, p);
+- if (IS_ERR(pdd)) {
+- err = PTR_ERR(pdd);
+- goto out_unlock;
+- }
+-
+- mem = kfd_process_device_translate_handle(pdd,
++ if (!kfd->device_info->needs_iommu_device) {
++ mutex_lock(&p->mutex);
++ pdd = kfd_bind_process_to_device(kfd, p);
++ if (IS_ERR(pdd)) {
++ err = PTR_ERR(pdd);
++ goto out_upwrite;
++ }
++ mem = kfd_process_device_translate_handle(pdd,
+ GET_IDR_HANDLE(args->event_page_offset));
+- if (!mem) {
+- pr_err("Can't find BO, offset is 0x%llx\n",
+- args->event_page_offset);
+- err = -EINVAL;
+- goto out_unlock;
+- }
+- mutex_unlock(&p->mutex);
+-
+- err = kfd->kfd2kgd->map_gtt_bo_to_kernel(kfd->kgd,
+- mem, &kern_addr, &size);
+- if (err) {
+- pr_err("Failed to map event page to kernel\n");
+- return err;
+- }
++ if (!mem) {
++ pr_err("Can't find BO, offset is 0x%llx\n",
++ args->event_page_offset);
++ err = -EFAULT;
++ goto out_upwrite;
++ }
++ mutex_unlock(&p->mutex);
+
+- err = kfd_event_page_set(p, kern_addr, size);
+- if (err) {
+- pr_err("Failed to set event page\n");
+- return err;
++ /* Map dGPU gtt BO to kernel */
++ kfd->kfd2kgd->map_gtt_bo_to_kernel(kfd->kgd,
++ mem, &kern_addr, NULL);
+ }
+ }
+
+-
+- err = kfd_event_create(filp, p, args->event_type,
+- args->auto_reset != 0, args->node_id,
+- &args->event_id, &args->event_trigger_data,
+- &args->event_page_offset,
+- &args->event_slot_index);
++ err = kfd_event_create(filp, p,
++ args->event_type,
++ args->auto_reset != 0,
++ args->node_id,
++ &args->event_id,
++ &args->event_trigger_data,
++ &args->event_page_offset,
++ &args->event_slot_index,
++ kern_addr);
+
+ return err;
+
+-out_unlock:
++out_upwrite:
+ mutex_unlock(&p->mutex);
+ return err;
+ }
+@@ -1085,14 +1066,17 @@ static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
+
+ return err;
+ }
+-static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
++static int kfd_ioctl_alloc_scratch_memory(struct file *filep,
+ struct kfd_process *p, void *data)
+ {
+- struct kfd_ioctl_set_scratch_backing_va_args *args = data;
++ struct kfd_ioctl_alloc_memory_of_scratch_args *args = data;
+ struct kfd_process_device *pdd;
+ struct kfd_dev *dev;
+ long err;
+
++ if (args->size == 0)
++ return -EINVAL;
++
+ dev = kfd_device_by_id(args->gpu_id);
+ if (!dev)
+ return -EINVAL;
+@@ -1242,8 +1226,6 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ uint64_t offset = args->mmap_offset;
+ uint32_t flags = args->flags;
+ struct vm_area_struct *vma;
+- uint64_t cpuva = 0;
+- unsigned int mem_type = 0;
+
+ if (args->size == 0)
+ return -EINVAL;
+@@ -1273,13 +1255,6 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ flags |= KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL;
+ flags &= ~KFD_IOC_ALLOC_MEM_FLAGS_USERPTR;
+ offset = (pfn << PAGE_SHIFT);
+- } else {
+- if (offset & (PAGE_SIZE - 1)) {
+- pr_debug("Unaligned userptr address:%llx\n",
+- offset);
+- return -EINVAL;
+- }
+- cpuva = offset;
+ }
+ } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
+ if (args->size != kfd_doorbell_process_slice(dev))
+@@ -1297,18 +1272,14 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+
+ err = dev->kfd2kgd->alloc_memory_of_gpu(
+ dev->kgd, args->va_addr, args->size,
+- pdd->vm, NULL, (struct kgd_mem **) &mem, &offset,
++ pdd->vm, (struct kgd_mem **) &mem, &offset,
+ flags);
+
+ if (err)
+ goto err_unlock;
+
+- mem_type = flags & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM |
+- KFD_IOC_ALLOC_MEM_FLAGS_GTT |
+- KFD_IOC_ALLOC_MEM_FLAGS_USERPTR |
+- KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL);
+ idr_handle = kfd_process_device_create_obj_handle(pdd, mem,
+- args->va_addr, args->size, cpuva, mem_type, NULL);
++ args->va_addr, args->size, NULL);
+ if (idr_handle < 0) {
+ err = -EFAULT;
+ goto err_free;
+@@ -1322,7 +1293,8 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ return 0;
+
+ err_free:
+- dev->kfd2kgd->free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
++ dev->kfd2kgd->free_memory_of_gpu(dev->kgd,
++ (struct kgd_mem *) mem);
+ err_unlock:
+ mutex_unlock(&p->mutex);
+ return err;
+@@ -1363,7 +1335,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
+ /* If freeing the buffer failed, leave the handle in place for
+ * clean-up during process tear-down.
+ */
+- if (!ret)
++ if (ret == 0)
+ kfd_process_device_remove_obj_handle(
+ pdd, GET_IDR_HANDLE(args->handle));
+
+@@ -1380,30 +1352,31 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ void *mem;
+ struct kfd_dev *dev, *peer;
+ long err = 0;
+- int i;
++ int i, num_dev = 0;
+ uint32_t *devices_arr = NULL;
+
+ dev = kfd_device_by_id(GET_GPU_ID(args->handle));
+ if (!dev)
+ return -EINVAL;
+
+- if (!args->n_devices) {
+- pr_debug("Device IDs array empty\n");
++ if (args->device_ids_array_size == 0) {
++ pr_debug("Device ID array size is 0\n");
+ return -EINVAL;
+ }
+- if (args->n_success > args->n_devices) {
+- pr_debug("n_success exceeds n_devices\n");
++
++ if (args->device_ids_array_size % sizeof(uint32_t)) {
++ pr_debug("Node IDs array size %u\n",
++ args->device_ids_array_size);
+ return -EINVAL;
+ }
+
+- devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
+- GFP_KERNEL);
++ devices_arr = kmalloc(args->device_ids_array_size, GFP_KERNEL);
+ if (!devices_arr)
+ return -ENOMEM;
+
+ err = copy_from_user(devices_arr,
+- (void __user *)args->device_ids_array_ptr,
+- args->n_devices * sizeof(*devices_arr));
++ (void __user *)args->device_ids_array_ptr,
++ args->device_ids_array_size);
+ if (err != 0) {
+ err = -EFAULT;
+ goto copy_from_user_failed;
+@@ -1424,11 +1397,12 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ goto get_mem_obj_from_handle_failed;
+ }
+
+- for (i = args->n_success; i < args->n_devices; i++) {
++ num_dev = args->device_ids_array_size / sizeof(uint32_t);
++ for (i = 0 ; i < num_dev; i++) {
+ peer = kfd_device_by_id(devices_arr[i]);
+ if (!peer) {
+ pr_debug("Getting device by id failed for 0x%x\n",
+- devices_arr[i]);
++ devices_arr[i]);
+ err = -EINVAL;
+ goto get_mem_obj_from_handle_failed;
+ }
+@@ -1439,13 +1413,12 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ goto get_mem_obj_from_handle_failed;
+ }
+ err = peer->kfd2kgd->map_memory_to_gpu(
+- peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
+- if (err) {
+- pr_err("Failed to map to gpu %d/%d\n",
+- i, args->n_devices);
++ peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
++ if (err != 0) {
++ pr_err("Failed to map to gpu %d, num_dev=%d\n",
++ i, num_dev);
+ goto map_memory_to_gpu_failed;
+ }
+- args->n_success = i+1;
+ }
+
+ mutex_unlock(&p->mutex);
+@@ -1457,7 +1430,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ }
+
+ /* Flush TLBs after waiting for the page table updates to complete */
+- for (i = 0; i < args->n_devices; i++) {
++ for (i = 0; i < num_dev; i++) {
+ peer = kfd_device_by_id(devices_arr[i]);
+ if (WARN_ON_ONCE(!peer))
+ continue;
+@@ -1490,29 +1463,30 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ void *mem;
+ struct kfd_dev *dev, *peer;
+ long err = 0;
+- uint32_t *devices_arr = NULL, i;
++ uint32_t *devices_arr = NULL, num_dev, i;
+
+ dev = kfd_device_by_id(GET_GPU_ID(args->handle));
+ if (!dev)
+ return -EINVAL;
+
+- if (!args->n_devices) {
+- pr_debug("Device IDs array empty\n");
++ if (args->device_ids_array_size == 0) {
++ pr_debug("Device ID array size is 0\n");
+ return -EINVAL;
+ }
+- if (args->n_success > args->n_devices) {
+- pr_debug("n_success exceeds n_devices\n");
++
++ if (args->device_ids_array_size % sizeof(uint32_t)) {
++ pr_debug("Node IDs array size %u\n",
++ args->device_ids_array_size);
+ return -EINVAL;
+ }
+
+- devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
+- GFP_KERNEL);
++ devices_arr = kmalloc(args->device_ids_array_size, GFP_KERNEL);
+ if (!devices_arr)
+ return -ENOMEM;
+
+ err = copy_from_user(devices_arr,
+- (void __user *)args->device_ids_array_ptr,
+- args->n_devices * sizeof(*devices_arr));
++ (void __user *)args->device_ids_array_ptr,
++ args->device_ids_array_size);
+ if (err != 0) {
+ err = -EFAULT;
+ goto copy_from_user_failed;
+@@ -1522,7 +1496,8 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+
+ pdd = kfd_get_process_device_data(dev, p);
+ if (!pdd) {
+- err = -EINVAL;
++ pr_debug("Process device data doesn't exist\n");
++ err = -ENODEV;
+ goto bind_process_to_device_failed;
+ }
+
+@@ -1533,7 +1508,8 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ goto get_mem_obj_from_handle_failed;
+ }
+
+- for (i = args->n_success; i < args->n_devices; i++) {
++ num_dev = args->device_ids_array_size / sizeof(uint32_t);
++ for (i = 0 ; i < num_dev; i++) {
+ peer = kfd_device_by_id(devices_arr[i]);
+ if (!peer) {
+ err = -EINVAL;
+@@ -1549,10 +1525,9 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
+ if (err) {
+ pr_err("Failed to unmap from gpu %d/%d\n",
+- i, args->n_devices);
++ i, num_dev);
+ goto unmap_memory_from_gpu_failed;
+ }
+- args->n_success = i+1;
+ }
+ kfree(devices_arr);
+
+@@ -1569,6 +1544,34 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ return err;
+ }
+
++static int kfd_ioctl_set_process_dgpu_aperture(struct file *filep,
++ struct kfd_process *p, void *data)
++{
++ struct kfd_ioctl_set_process_dgpu_aperture_args *args = data;
++ struct kfd_dev *dev;
++ struct kfd_process_device *pdd;
++ long err;
++
++ dev = kfd_device_by_id(args->gpu_id);
++ if (!dev)
++ return -EINVAL;
++
++ mutex_lock(&p->mutex);
++
++ pdd = kfd_bind_process_to_device(dev, p);
++ if (IS_ERR(pdd)) {
++ err = PTR_ERR(pdd);
++ goto exit;
++ }
++
++ err = kfd_set_process_dgpu_aperture(pdd, args->dgpu_base,
++ args->dgpu_limit);
++
++exit:
++ mutex_unlock(&p->mutex);
++ return err;
++}
++
+ static int kfd_ioctl_get_dmabuf_info(struct file *filep,
+ struct kfd_process *p, void *data)
+ {
+@@ -1683,636 +1686,22 @@ static int kfd_ioctl_ipc_import_handle(struct file *filep,
+ return r;
+ }
+
+-/* Maximum number of entries for process pages array which lives on stack */
+-#define MAX_PP_STACK_COUNT 16
+-/* Maximum number of pages kmalloc'd to hold struct page's during copy */
+-#define MAX_KMALLOC_PAGES (PAGE_SIZE * 2)
+-#define MAX_PP_KMALLOC_COUNT (MAX_KMALLOC_PAGES/sizeof(struct page *))
+-
+-static void kfd_put_sg_table(struct sg_table *sg)
+-{
+- unsigned int i;
+- struct scatterlist *s;
+-
+- for_each_sg(sg->sgl, s, sg->nents, i)
+- put_page(sg_page(s));
+-}
+-
+-
+-/* Create a sg table for the given userptr BO by pinning its system pages
+- * @bo: userptr BO
+- * @offset: Offset into BO
+- * @mm/@task: mm_struct & task_struct of the process that holds the BO
+- * @size: in/out: desired size / actual size which could be smaller
+- * @sg_size: out: Size of sg table. This is ALIGN_UP(@size)
+- * @ret_sg: out sg table
+- */
+-static int kfd_create_sg_table_from_userptr_bo(struct kfd_bo *bo,
+- int64_t offset, int cma_write,
+- struct mm_struct *mm,
+- struct task_struct *task,
+- uint64_t *size,
+- uint64_t *sg_size,
+- struct sg_table **ret_sg)
+-{
+- int ret, locked = 1;
+- struct sg_table *sg = NULL;
+- unsigned int i, offset_in_page, flags = 0;
+- unsigned long nents, n;
+- unsigned long pa = (bo->cpuva + offset) & PAGE_MASK;
+- unsigned int cur_page = 0;
+- struct scatterlist *s;
+- uint64_t sz = *size;
+- struct page **process_pages;
+-
+- *sg_size = 0;
+- sg = kmalloc(sizeof(*sg), GFP_KERNEL);
+- if (!sg)
+- return -ENOMEM;
+-
+- offset_in_page = offset & (PAGE_SIZE - 1);
+- nents = (sz + offset_in_page + PAGE_SIZE - 1) / PAGE_SIZE;
+-
+- ret = sg_alloc_table(sg, nents, GFP_KERNEL);
+- if (unlikely(ret)) {
+- ret = -ENOMEM;
+- goto sg_alloc_fail;
+- }
+- process_pages = kmalloc_array(nents, sizeof(struct pages *),
+- GFP_KERNEL);
+- if (!process_pages) {
+- ret = -ENOMEM;
+- goto page_alloc_fail;
+- }
+-
+- if (cma_write)
+- flags = FOLL_WRITE;
+- locked = 1;
+- down_read(&mm->mmap_sem);
+- n = get_user_pages_remote(task, mm, pa, nents, flags, process_pages,
+- NULL, &locked);
+- if (locked)
+- up_read(&mm->mmap_sem);
+- if (n <= 0) {
+- pr_err("CMA: Invalid virtual address 0x%lx\n", pa);
+- ret = -EFAULT;
+- goto get_user_fail;
+- }
+- if (n != nents) {
+- /* Pages pinned < requested. Set the size accordingly */
+- *size = (n * PAGE_SIZE) - offset_in_page;
+- pr_debug("Requested %lx but pinned %lx\n", nents, n);
+- }
+-
+- sz = 0;
+- for_each_sg(sg->sgl, s, n, i) {
+- sg_set_page(s, process_pages[cur_page], PAGE_SIZE,
+- offset_in_page);
+- sg_dma_address(s) = page_to_phys(process_pages[cur_page]);
+- offset_in_page = 0;
+- cur_page++;
+- sz += PAGE_SIZE;
+- }
+- *ret_sg = sg;
+- *sg_size = sz;
+-
+- kfree(process_pages);
+- return 0;
+-
+-get_user_fail:
+- kfree(process_pages);
+-page_alloc_fail:
+- sg_free_table(sg);
+-sg_alloc_fail:
+- kfree(sg);
+- return ret;
+-}
+-
+-static void kfd_free_cma_bos(struct cma_iter *ci)
+-{
+- struct cma_system_bo *cma_bo, *tmp;
+-
+- list_for_each_entry_safe(cma_bo, tmp, &ci->cma_list, list) {
+- struct kfd_dev *dev = cma_bo->dev;
+-
+- /* sg table is deleted by free_memory_of_gpu */
+- if (cma_bo->sg)
+- kfd_put_sg_table(cma_bo->sg);
+- dev->kfd2kgd->free_memory_of_gpu(dev->kgd, cma_bo->mem);
+- list_del(&cma_bo->list);
+- kfree(cma_bo);
+- }
+-}
+-
+-/* 1 second timeout */
+-#define CMA_WAIT_TIMEOUT msecs_to_jiffies(1000)
+-
+-static int kfd_cma_fence_wait(struct dma_fence *f)
+-{
+- int ret;
+-
+- ret = dma_fence_wait_timeout(f, false, CMA_WAIT_TIMEOUT);
+- if (likely(ret > 0))
+- return 0;
+- if (!ret)
+- ret = -ETIME;
+- return ret;
+-}
+-
+-/* Put previous (old) fence @pf but it waits for @pf to signal if the context
+- * of the current fence @cf is different.
+- */
+-static int kfd_fence_put_wait_if_diff_context(struct dma_fence *cf,
+- struct dma_fence *pf)
+-{
+- int ret = 0;
+-
+- if (pf && cf && cf->context != pf->context)
+- ret = kfd_cma_fence_wait(pf);
+- dma_fence_put(pf);
+- return ret;
+-}
+-
+-#define MAX_SYSTEM_BO_SIZE (512*PAGE_SIZE)
+-
+-/* Create an equivalent system BO for the given @bo. If @bo is a userptr then
+- * create a new system BO by pinning underlying system pages of the given
+- * userptr BO. If @bo is in Local Memory then create an empty system BO and
+- * then copy @bo into this new BO.
+- * @bo: Userptr BO or Local Memory BO
+- * @offset: Offset into bo
+- * @size: in/out: The size of the new BO could be less than requested if all
+- * the pages couldn't be pinned or size > MAX_SYSTEM_BO_SIZE. This would
+- * be reflected in @size
+- * @mm/@task: mm/task to which @bo belongs to
+- * @cma_bo: out: new system BO
+- */
+-static int kfd_create_cma_system_bo(struct kfd_dev *kdev, struct kfd_bo *bo,
+- uint64_t *size, uint64_t offset,
+- int cma_write, struct kfd_process *p,
+- struct mm_struct *mm,
+- struct task_struct *task,
+- struct cma_system_bo **cma_bo)
+-{
+- int ret;
+- struct kfd_process_device *pdd = NULL;
+- struct cma_system_bo *cbo;
+- uint64_t bo_size = 0;
+- struct dma_fence *f;
+-
+- uint32_t flags = ALLOC_MEM_FLAGS_GTT | ALLOC_MEM_FLAGS_WRITABLE |
+- ALLOC_MEM_FLAGS_NO_SUBSTITUTE;
+-
+- *cma_bo = NULL;
+- cbo = kzalloc(sizeof(**cma_bo), GFP_KERNEL);
+- if (!cbo)
+- return -ENOMEM;
+-
+- INIT_LIST_HEAD(&cbo->list);
+- if (bo->mem_type == KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
+- bo_size = min_t(uint64_t, *size, MAX_SYSTEM_BO_SIZE);
+- else if (bo->cpuva) {
+- ret = kfd_create_sg_table_from_userptr_bo(bo, offset,
+- cma_write, mm, task,
+- size, &bo_size,
+- &cbo->sg);
+- if (ret) {
+- pr_err("CMA: BO create with sg failed %d\n", ret);
+- goto sg_fail;
+- }
+- } else {
+- WARN_ON(1);
+- ret = -EINVAL;
+- goto sg_fail;
+- }
+- mutex_lock(&p->mutex);
+- pdd = kfd_get_process_device_data(kdev, p);
+- if (!pdd) {
+- mutex_unlock(&p->mutex);
+- pr_err("Process device data doesn't exist\n");
+- ret = -EINVAL;
+- goto pdd_fail;
+- }
+-
+- ret = kdev->kfd2kgd->alloc_memory_of_gpu(kdev->kgd, 0ULL, bo_size,
+- pdd->vm, cbo->sg,
+- &cbo->mem, NULL, flags);
+- mutex_unlock(&p->mutex);
+- if (ret) {
+- pr_err("Failed to create shadow system BO %d\n", ret);
+- goto pdd_fail;
+- }
+-
+- if (bo->mem_type == KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
+- ret = kdev->kfd2kgd->copy_mem_to_mem(kdev->kgd, bo->mem,
+- offset, cbo->mem, 0,
+- bo_size, &f, size);
+- if (ret) {
+- pr_err("CMA: Intermediate copy failed %d\n", ret);
+- goto copy_fail;
+- }
+-
+- /* Wait for the copy to finish as subsequent copy will be done
+- * by different device
+- */
+- ret = kfd_cma_fence_wait(f);
+- dma_fence_put(f);
+- if (ret) {
+- pr_err("CMA: Intermediate copy timed out %d\n", ret);
+- goto copy_fail;
+- }
+- }
+-
+- cbo->dev = kdev;
+- *cma_bo = cbo;
+-
+- return ret;
+-
+-copy_fail:
+- kdev->kfd2kgd->free_memory_of_gpu(kdev->kgd, bo->mem);
+-pdd_fail:
+- if (cbo->sg) {
+- kfd_put_sg_table(cbo->sg);
+- sg_free_table(cbo->sg);
+- kfree(cbo->sg);
+- }
+-sg_fail:
+- kfree(cbo);
+- return ret;
+-}
+-
+-/* Update cma_iter.cur_bo with KFD BO that is assocaited with
+- * cma_iter.array.va_addr
+- */
+-static int kfd_cma_iter_update_bo(struct cma_iter *ci)
+-{
+- struct kfd_memory_range *arr = ci->array;
+- uint64_t va_end = arr->va_addr + arr->size - 1;
+-
+- mutex_lock(&ci->p->mutex);
+- ci->cur_bo = kfd_process_find_bo_from_interval(ci->p, arr->va_addr,
+- va_end);
+- mutex_unlock(&ci->p->mutex);
+-
+- if (!ci->cur_bo || va_end > ci->cur_bo->it.last) {
+- pr_err("CMA failed. Range out of bounds\n");
+- return -EFAULT;
+- }
+- return 0;
+-}
+-
+-/* Advance iter by @size bytes. */
+-static int kfd_cma_iter_advance(struct cma_iter *ci, unsigned long size)
+-{
+- int ret = 0;
+-
+- ci->offset += size;
+- if (WARN_ON(size > ci->total || ci->offset > ci->array->size))
+- return -EFAULT;
+- ci->total -= size;
+- /* If current range is copied, move to next range if available. */
+- if (ci->offset == ci->array->size) {
+-
+- /* End of all ranges */
+- if (!(--ci->nr_segs))
+- return 0;
+-
+- ci->array++;
+- ci->offset = 0;
+- ret = kfd_cma_iter_update_bo(ci);
+- if (ret)
+- return ret;
+- }
+- ci->bo_offset = (ci->array->va_addr + ci->offset) -
+- ci->cur_bo->it.start;
+- return ret;
+-}
+-
+-static int kfd_cma_iter_init(struct kfd_memory_range *arr, unsigned long segs,
+- struct kfd_process *p, struct mm_struct *mm,
+- struct task_struct *task, struct cma_iter *ci)
+-{
+- int ret;
+- int nr;
+-
+- if (!arr || !segs)
+- return -EINVAL;
+-
+- memset(ci, 0, sizeof(*ci));
+- INIT_LIST_HEAD(&ci->cma_list);
+- ci->array = arr;
+- ci->nr_segs = segs;
+- ci->p = p;
+- ci->offset = 0;
+- ci->mm = mm;
+- ci->task = task;
+- for (nr = 0; nr < segs; nr++)
+- ci->total += arr[nr].size;
+-
+- /* Valid but size is 0. So copied will also be 0 */
+- if (!ci->total)
+- return 0;
+-
+- ret = kfd_cma_iter_update_bo(ci);
+- if (!ret)
+- ci->bo_offset = arr->va_addr - ci->cur_bo->it.start;
+- return ret;
+-}
+-
+-static bool kfd_cma_iter_end(struct cma_iter *ci)
+-{
+- if (!(ci->nr_segs) || !(ci->total))
+- return true;
+- return false;
+-}
+-
+-/* Copies @size bytes from si->cur_bo to di->cur_bo BO. The function assumes
+- * both source and dest. BOs are userptr BOs. Both BOs can either belong to
+- * current process or one of the BOs can belong to a differnt
+- * process. @Returns 0 on success, -ve on failure
+- *
+- * @si: Source iter
+- * @di: Dest. iter
+- * @cma_write: Indicates if it is write to remote or read from remote
+- * @size: amount of bytes to be copied
+- * @copied: Return number of bytes actually copied.
+- */
+-static int kfd_copy_userptr_bos(struct cma_iter *si, struct cma_iter *di,
+- bool cma_write, uint64_t size,
+- uint64_t *copied)
+-{
+- int i, ret = 0, locked;
+- unsigned int nents, nl;
+- unsigned int offset_in_page;
+- struct page *pp_stack[MAX_PP_STACK_COUNT];
+- struct page **process_pages = pp_stack;
+- unsigned long rva, lva = 0, flags = 0;
+- uint64_t copy_size, to_copy = size;
+- struct cma_iter *li, *ri;
+-
+- if (cma_write) {
+- ri = di;
+- li = si;
+- flags |= FOLL_WRITE;
+- } else {
+- li = di;
+- ri = si;
+- }
+- /* rva: remote virtual address. Page aligned to start page.
+- * rva + offset_in_page: Points to remote start address
+- * lva: local virtual address. Points to the start address.
+- * nents: computes number of remote pages to request
+- */
+- offset_in_page = ri->bo_offset & (PAGE_SIZE - 1);
+- rva = (ri->cur_bo->cpuva + ri->bo_offset) & PAGE_MASK;
+- lva = li->cur_bo->cpuva + li->bo_offset;
+-
+- nents = (size + offset_in_page + PAGE_SIZE - 1) / PAGE_SIZE;
+-
+- copy_size = min_t(uint64_t, size, PAGE_SIZE - offset_in_page);
+- *copied = 0;
+-
+- if (nents > MAX_PP_STACK_COUNT) {
+- /* For reliability kmalloc only 2 pages worth */
+- process_pages = kmalloc(min_t(size_t, MAX_KMALLOC_PAGES,
+- sizeof(struct pages *)*nents),
+- GFP_KERNEL);
+-
+- if (!process_pages)
+- return -ENOMEM;
+- }
+-
+- while (nents && to_copy) {
+- nl = min_t(unsigned int, MAX_PP_KMALLOC_COUNT, nents);
+- locked = 1;
+- down_read(&ri->mm->mmap_sem);
+- nl = get_user_pages_remote(ri->task, ri->mm, rva, nl,
+- flags, process_pages, NULL,
+- &locked);
+- if (locked)
+- up_read(&ri->mm->mmap_sem);
+- if (nl <= 0) {
+- pr_err("CMA: Invalid virtual address 0x%lx\n", rva);
+- ret = -EFAULT;
+- break;
+- }
+-
+- for (i = 0; i < nl; i++) {
+- unsigned int n;
+- void *kaddr = kmap(process_pages[i]);
+-
+- if (cma_write) {
+- n = copy_from_user(kaddr+offset_in_page,
+- (void *)lva, copy_size);
+- set_page_dirty(process_pages[i]);
+- } else {
+- n = copy_to_user((void *)lva,
+- kaddr+offset_in_page,
+- copy_size);
+- }
+- kunmap(kaddr);
+- if (n) {
+- ret = -EFAULT;
+- break;
+- }
+- to_copy -= copy_size;
+- if (!to_copy)
+- break;
+- lva += copy_size;
+- rva += (copy_size + offset_in_page);
+- WARN_ONCE(rva & (PAGE_SIZE - 1),
+- "CMA: Error in remote VA computation");
+- offset_in_page = 0;
+- copy_size = min_t(uint64_t, to_copy, PAGE_SIZE);
+- }
+-
+- for (i = 0; i < nl; i++)
+- put_page(process_pages[i]);
+-
+- if (ret)
+- break;
+- nents -= nl;
+- }
+-
+- if (process_pages != pp_stack)
+- kfree(process_pages);
+-
+- *copied = (size - to_copy);
+- return ret;
+-
+-}
+-
+-/* Copies @size bytes from si->cur_bo to di->cur_bo starting at their
+- * respective offset.
+- * @si: Source iter
+- * @di: Dest. iter
+- * @cma_write: Indicates if it is write to remote or read from remote
+- * @size: amount of bytes to be copied
+- * @f: Return the last fence if any
+- * @copied: Return number of bytes actually copied.
+- */
+-static int kfd_copy_bos(struct cma_iter *si, struct cma_iter *di,
+- int cma_write, uint64_t size,
+- struct dma_fence **f, uint64_t *copied)
+-{
+- int err = 0;
+- struct kfd_bo *dst_bo = di->cur_bo, *src_bo = si->cur_bo;
+- uint64_t src_offset = si->bo_offset, dst_offset = di->bo_offset;
+- struct kgd_mem *src_mem = src_bo->mem, *dst_mem = dst_bo->mem;
+- struct kfd_dev *dev = dst_bo->dev;
+- struct cma_system_bo *tmp_bo = NULL;
+-
+- *copied = 0;
+- if (f)
+- *f = NULL;
+- if (src_bo->cpuva && dst_bo->cpuva)
+- return kfd_copy_userptr_bos(si, di, cma_write, size, copied);
+-
+- /* If either source or dest. is userptr, create a shadow system BO
+- * by using the underlying userptr BO pages. Then use this shadow
+- * BO for copy. src_offset & dst_offset are adjusted because the new BO
+- * is only created for the window (offset, size) requested.
+- * The shadow BO is created on the other device. This means if the
+- * other BO is a device memory, the copy will be using that device.
+- * The BOs are stored in cma_list for deferred cleanup. This minimizes
+- * fence waiting just to the last fence.
+- */
+- if (src_bo->cpuva) {
+- dev = dst_bo->dev;
+- err = kfd_create_cma_system_bo(dev, src_bo, &size,
+- si->bo_offset, cma_write,
+- si->p, si->mm, si->task,
+- &si->cma_bo);
+- src_mem = si->cma_bo->mem;
+- src_offset = si->bo_offset & (PAGE_SIZE - 1);
+- list_add_tail(&si->cma_bo->list, &si->cma_list);
+- } else if (dst_bo->cpuva) {
+- dev = src_bo->dev;
+- err = kfd_create_cma_system_bo(dev, dst_bo, &size,
+- di->bo_offset, cma_write,
+- di->p, di->mm, di->task,
+- &di->cma_bo);
+- dst_mem = di->cma_bo->mem;
+- dst_offset = di->bo_offset & (PAGE_SIZE - 1);
+- list_add_tail(&di->cma_bo->list, &di->cma_list);
+- } else if (src_bo->dev->kgd != dst_bo->dev->kgd) {
+- /* This indicates that atleast on of the BO is in local mem.
+- * If both are in local mem of different devices then create an
+- * intermediate System BO and do a double copy
+- * [VRAM]--gpu1-->[System BO]--gpu2-->[VRAM].
+- * If only one BO is in VRAM then use that GPU to do the copy
+- */
+- if (src_bo->mem_type == KFD_IOC_ALLOC_MEM_FLAGS_VRAM &&
+- dst_bo->mem_type == KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
+- dev = dst_bo->dev;
+- err = kfd_create_cma_system_bo(src_bo->dev, src_bo,
+- &size, si->bo_offset,
+- cma_write, si->p,
+- si->mm, si->task,
+- &tmp_bo);
+- src_mem = tmp_bo->mem;
+- src_offset = 0;
+- } else if (src_bo->mem_type == KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
+- dev = src_bo->dev;
+- /* else already set to dst_bo->dev */
+- }
+-
+- if (err) {
+- pr_err("Failed to create system BO %d", err);
+- return -EINVAL;
+- }
+-
+- err = dev->kfd2kgd->copy_mem_to_mem(dev->kgd, src_mem, src_offset,
+- dst_mem, dst_offset, size, f,
+- copied);
+- /* The tmp_bo allocates additional memory. So it is better to wait and
+- * delete. Also since multiple GPUs are involved the copies are
+- * currently not pipelined.
+- */
+- if (tmp_bo) {
+- if (!err) {
+- kfd_cma_fence_wait(*f);
+- dma_fence_put(*f);
+- *f = NULL;
+- }
+- dev->kfd2kgd->free_memory_of_gpu(dev->kgd, tmp_bo->mem);
+- kfree(tmp_bo);
+- }
+- return err;
+-}
+-
+-/* Copy single range from source iterator @si to destination iterator @di.
+- * @si will move to next range and @di will move by bytes copied.
+- * @return : 0 for success or -ve for failure
+- * @f: The last fence if any
+- * @copied: out: number of bytes copied
+- */
+-static int kfd_copy_single_range(struct cma_iter *si, struct cma_iter *di,
+- bool cma_write, struct dma_fence **f,
+- uint64_t *copied)
+-{
+- int err = 0;
+- uint64_t copy_size, n;
+- uint64_t size = si->array->size;
+- struct kfd_bo *src_bo = si->cur_bo;
+- struct dma_fence *lfence = NULL;
+-
+- if (!src_bo || !di || !copied)
+- return -EINVAL;
+- *copied = 0;
+- if (f)
+- *f = NULL;
+-
+- while (size && !kfd_cma_iter_end(di)) {
+- struct dma_fence *fence = NULL;
+-
+- copy_size = min(size, (di->array->size - di->offset));
+-
+- err = kfd_copy_bos(si, di, cma_write, copy_size, &fence, &n);
+- if (err) {
+- pr_err("CMA %d failed\n", err);
+- break;
+- }
+-
+- if (fence) {
+- err = kfd_fence_put_wait_if_diff_context(fence,
+- lfence);
+- lfence = fence;
+- if (err)
+- break;
+- }
+-
+- size -= n;
+- *copied += n;
+- err = kfd_cma_iter_advance(si, n);
+- if (err)
+- break;
+- err = kfd_cma_iter_advance(di, n);
+- if (err)
+- break;
+- }
+-
+- if (f)
+- *f = dma_fence_get(lfence);
+- dma_fence_put(lfence);
+-
+- return err;
+-}
+-
+ static int kfd_ioctl_cross_memory_copy(struct file *filep,
+ struct kfd_process *local_p, void *data)
+ {
+ struct kfd_ioctl_cross_memory_copy_args *args = data;
+ struct kfd_memory_range *src_array, *dst_array;
+- struct kfd_process *remote_p;
++ struct kfd_bo *src_bo, *dst_bo;
++ struct kfd_process *remote_p, *src_p, *dst_p;
+ struct task_struct *remote_task;
+ struct mm_struct *remote_mm;
+ struct pid *remote_pid;
+- struct dma_fence *lfence = NULL;
+- uint64_t copied = 0, total_copied = 0;
+- struct cma_iter di, si;
++ struct dma_fence *fence = NULL, *lfence = NULL;
++ uint64_t dst_va_addr;
++ uint64_t copied, total_copied = 0;
++ uint64_t src_offset, dst_offset, dst_va_addr_end;
+ const char *cma_op;
+- int err = 0;
++ int i, j = 0, err = 0;
+
+ /* Check parameters */
+ if (args->src_mem_range_array == 0 || args->dst_mem_range_array == 0 ||
+@@ -2372,76 +1761,169 @@ static int kfd_ioctl_cross_memory_copy(struct file *filep,
+ }
+
+ remote_p = kfd_get_process(remote_task);
+- if (IS_ERR(remote_p)) {
++ if (!remote_p) {
+ pr_err("Cross mem copy failed. Invalid kfd process %d\n",
+ args->pid);
+ err = -EINVAL;
+ goto kfd_process_fail;
+ }
+- /* Initialise cma_iter si & @di with source & destination range. */
++
+ if (KFD_IS_CROSS_MEMORY_WRITE(args->flags)) {
++ src_p = local_p;
++ dst_p = remote_p;
+ cma_op = "WRITE";
+ pr_debug("CMA WRITE: local -> remote\n");
+- err = kfd_cma_iter_init(dst_array, args->dst_mem_array_size,
+- remote_p, remote_mm, remote_task, &di);
+- if (err)
+- goto kfd_process_fail;
+- err = kfd_cma_iter_init(src_array, args->src_mem_array_size,
+- local_p, current->mm, current, &si);
+- if (err)
+- goto kfd_process_fail;
+ } else {
++ src_p = remote_p;
++ dst_p = local_p;
+ cma_op = "READ";
+ pr_debug("CMA READ: remote -> local\n");
+-
+- err = kfd_cma_iter_init(dst_array, args->dst_mem_array_size,
+- local_p, current->mm, current, &di);
+- if (err)
+- goto kfd_process_fail;
+- err = kfd_cma_iter_init(src_array, args->src_mem_array_size,
+- remote_p, remote_mm, remote_task, &si);
+- if (err)
+- goto kfd_process_fail;
+ }
+
+- /* Copy one si range at a time into di. After each call to
+- * kfd_copy_single_range() si will move to next range. di will be
+- * incremented by bytes copied
+- */
+- while (!kfd_cma_iter_end(&si) && !kfd_cma_iter_end(&di)) {
+- struct dma_fence *fence = NULL;
+-
+- err = kfd_copy_single_range(&si, &di,
+- KFD_IS_CROSS_MEMORY_WRITE(args->flags),
+- &fence, &copied);
+- total_copied += copied;
+
+- if (err)
++ /* For each source kfd_range:
++ * - Find the BO. Each range has to be within the same BO.
++ * - Copy this range to single or multiple destination BOs.
++ * - dst_va_addr - will point to next va address into which data will
++ * be copied.
++ * - dst_bo & src_bo - the current destination and source BOs
++ * - src_offset & dst_offset - offset into the respective BOs from
++ * data will be sourced or copied
++ */
++ dst_va_addr = dst_array[0].va_addr;
++ dst_va_addr_end = dst_va_addr + dst_array[0].size - 1;
++ mutex_lock(&dst_p->mutex);
++ dst_bo = kfd_process_find_bo_from_interval(dst_p,
++ dst_va_addr,
++ dst_va_addr_end);
++ mutex_unlock(&dst_p->mutex);
++ if (!dst_bo || dst_va_addr_end > dst_bo->it.last) {
++ pr_err("CMA %s failed. Invalid dst range\n", cma_op);
++ err = -EFAULT;
++ goto kfd_process_fail;
++ }
++ dst_offset = dst_va_addr - dst_bo->it.start;
++
++ for (i = 0; i < args->src_mem_array_size; i++) {
++ uint64_t src_va_addr_end = src_array[i].va_addr +
++ src_array[i].size - 1;
++ uint64_t src_size_to_copy = src_array[i].size;
++
++ mutex_lock(&src_p->mutex);
++ src_bo = kfd_process_find_bo_from_interval(src_p,
++ src_array[i].va_addr,
++ src_va_addr_end);
++ mutex_unlock(&src_p->mutex);
++ if (!src_bo || src_va_addr_end > src_bo->it.last) {
++ pr_err("CMA %s failed. Invalid src range\n", cma_op);
++ err = -EFAULT;
+ break;
++ }
++
++ src_offset = src_array[i].va_addr - src_bo->it.start;
+
+- /* Release old fence if a later fence is created. If no
+- * new fence is created, then keep the preivous fence
++ /* Copy src_bo to one or multiple dst_bo(s) based on size and
++ * and current copy location.
+ */
+- if (fence) {
+- err = kfd_fence_put_wait_if_diff_context(fence,
+- lfence);
++ while (j < args->dst_mem_array_size) {
++ uint64_t copy_size;
++ int64_t space_left;
++
++ /* Find the current copy_size. This will be smaller of
++ * the following
++ * - space left in the current dest memory range
++ * - data left to copy from source range
++ */
++ space_left = (dst_array[j].va_addr + dst_array[j].size)
++ - dst_va_addr;
++ copy_size = (src_size_to_copy < space_left) ?
++ src_size_to_copy : space_left;
++
++ /* Check both BOs belong to same device */
++ if (src_bo->dev->kgd != dst_bo->dev->kgd) {
++ pr_err("CMA %s fail. Not same dev\n", cma_op);
++ err = -EINVAL;
++ break;
++ }
++
++ /* Store prev fence. Release it when a later fence is
++ * created
++ */
+ lfence = fence;
+- if (err)
++ fence = NULL;
++
++ err = dst_bo->dev->kfd2kgd->copy_mem_to_mem(
++ src_bo->dev->kgd,
++ src_bo->mem, src_offset,
++ dst_bo->mem, dst_offset,
++ copy_size,
++ &fence, &copied);
++
++ if (err) {
++ pr_err("GPU CMA %s failed\n", cma_op);
++ break;
++ }
++
++ /* Later fence available. Release old fence */
++ if (fence && lfence) {
++ dma_fence_put(lfence);
++ lfence = NULL;
++ }
++
++ total_copied += copied;
++ src_size_to_copy -= copied;
++ space_left -= copied;
++ dst_va_addr += copied;
++ dst_offset += copied;
++ src_offset += copied;
++ if (dst_va_addr > dst_bo->it.last + 1) {
++ pr_err("CMA %s fail. Mem overflow\n", cma_op);
++ err = -EFAULT;
++ break;
++ }
++
++ /* If the cur dest range is full move to next one */
++ if (space_left <= 0) {
++ if (++j >= args->dst_mem_array_size)
++ break;
++
++ dst_va_addr = dst_array[j].va_addr;
++ dst_va_addr_end = dst_va_addr +
++ dst_array[j].size - 1;
++ dst_bo = kfd_process_find_bo_from_interval(
++ dst_p,
++ dst_va_addr,
++ dst_va_addr_end);
++ if (!dst_bo ||
++ dst_va_addr_end > dst_bo->it.last) {
++ pr_err("CMA %s failed. Invalid dst range\n",
++ cma_op);
++ err = -EFAULT;
++ break;
++ }
++ dst_offset = dst_va_addr - dst_bo->it.start;
++ }
++
++ /* If the cur src range is done, move to next one */
++ if (src_size_to_copy <= 0)
+ break;
+ }
++ if (err)
++ break;
+ }
+
+ /* Wait for the last fence irrespective of error condition */
+- if (lfence) {
+- err = kfd_cma_fence_wait(lfence);
+- dma_fence_put(lfence);
+- if (err)
++ if (fence) {
++ if (dma_fence_wait_timeout(fence, false, msecs_to_jiffies(1000))
++ < 0)
+ pr_err("CMA %s failed. BO timed out\n", cma_op);
++ dma_fence_put(fence);
++ } else if (lfence) {
++ pr_debug("GPU copy fail. But wait for prev DMA to finish\n");
++ dma_fence_wait_timeout(lfence, true, msecs_to_jiffies(1000));
++ dma_fence_put(lfence);
+ }
+
+- kfd_free_cma_bos(&si);
+- kfd_free_cma_bos(&di);
+-
+ kfd_process_fail:
+ mmput(remote_mm);
+ mm_access_fail:
+@@ -2530,21 +2012,6 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL,
+ kfd_ioctl_dbg_wave_control, 0),
+
+- AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_SCRATCH_BACKING_VA,
+- kfd_ioctl_set_scratch_backing_va, 0),
+-
+- AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG,
+- kfd_ioctl_get_tile_config, 0),
+-
+- AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_TRAP_HANDLER,
+- kfd_ioctl_set_trap_handler, 0),
+-
+- AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES_NEW,
+- kfd_ioctl_get_process_apertures_new, 0),
+-
+- AMDKFD_IOCTL_DEF(AMDKFD_IOC_ACQUIRE_VM,
+- kfd_ioctl_acquire_vm, 0),
+-
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_MEMORY_OF_GPU,
+ kfd_ioctl_alloc_memory_of_gpu, 0),
+
+@@ -2557,15 +2024,30 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU,
+ kfd_ioctl_unmap_memory_from_gpu, 0),
+
++ AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_MEMORY_OF_SCRATCH,
++ kfd_ioctl_alloc_scratch_memory, 0),
++
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_CU_MASK,
+ kfd_ioctl_set_cu_mask, 0),
+
++ AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_PROCESS_DGPU_APERTURE,
++ kfd_ioctl_set_process_dgpu_aperture, 0),
++
++ AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_TRAP_HANDLER,
++ kfd_ioctl_set_trap_handler, 0),
++
++ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES_NEW,
++ kfd_ioctl_get_process_apertures_new, 0),
++
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_DMABUF_INFO,
+ kfd_ioctl_get_dmabuf_info, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
+ kfd_ioctl_import_dmabuf, 0),
+
++ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG,
++ kfd_ioctl_get_tile_config, 0),
++
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_IPC_IMPORT_HANDLE,
+ kfd_ioctl_ipc_import_handle, 0),
+
+@@ -2578,6 +2060,9 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE,
+ kfd_ioctl_get_queue_wave_state, 0),
+
++ AMDKFD_IOCTL_DEF(AMDKFD_IOC_ACQUIRE_VM,
++ kfd_ioctl_acquire_vm, 0)
++
+ };
+
+ #define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
+@@ -2673,33 +2158,34 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+ static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
+ {
+ struct kfd_process *process;
+- struct kfd_dev *dev = NULL;
++ struct kfd_dev *kfd;
+ unsigned long vm_pgoff;
+- unsigned int gpu_id;
++ unsigned long long mmap_type;
+
+ process = kfd_get_process(current);
+ if (IS_ERR(process))
+ return PTR_ERR(process);
+
+ vm_pgoff = vma->vm_pgoff;
+- vma->vm_pgoff = KFD_MMAP_OFFSET_VALUE_GET(vm_pgoff);
+- gpu_id = KFD_MMAP_GPU_ID_GET(vm_pgoff);
+- if (gpu_id)
+- dev = kfd_device_by_id(gpu_id);
++ vma->vm_pgoff = KFD_MMAP_OFFSET_VALUE_GET(vma->vm_pgoff);
++ mmap_type = vm_pgoff & KFD_MMAP_TYPE_MASK;
+
+- switch (vm_pgoff & KFD_MMAP_TYPE_MASK) {
++ switch (mmap_type) {
+ case KFD_MMAP_TYPE_DOORBELL:
+- if (!dev)
+- return -ENODEV;
+- return kfd_doorbell_mmap(dev, process, vma);
++ kfd = kfd_device_by_id(KFD_MMAP_GPU_ID_GET(vm_pgoff));
++ if (!kfd)
++ return -EFAULT;
++ return kfd_doorbell_mmap(kfd, process, vma);
+
+ case KFD_MMAP_TYPE_EVENTS:
+ return kfd_event_mmap(process, vma);
+
+ case KFD_MMAP_TYPE_RESERVED_MEM:
+- if (!dev)
+- return -ENODEV;
+- return kfd_reserved_mem_mmap(dev, process, vma);
++ return kfd_reserved_mem_mmap(process, vma);
++
++ default:
++ pr_err("Unsupported kfd mmap type %llx\n", mmap_type);
++ break;
+ }
+
+ return -EFAULT;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+index c540b65..24d0634 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+@@ -1,27 +1,7 @@
+-/*
+- * Copyright 2015-2017 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- */
+-
+-#include <linux/pci.h>
++#include <linux/kernel.h>
+ #include <linux/acpi.h>
++#include <linux/mm.h>
++#include <linux/pci.h>
+ #include "kfd_crat.h"
+ #include "kfd_priv.h"
+ #include "kfd_topology.h"
+@@ -286,7 +266,6 @@ static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache,
+
+ id = cache->processor_id_low;
+
+- pr_debug("Found cache entry in CRAT table with processor_id=%d\n", id);
+ list_for_each_entry(dev, device_list, list) {
+ total_num_of_cu = (dev->node_props.array_count *
+ dev->node_props.cu_per_simd_array);
+@@ -436,15 +415,11 @@ static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr,
+ ret = kfd_parse_subtype_cache(cache, device_list);
+ break;
+ case CRAT_SUBTYPE_TLB_AFFINITY:
+- /*
+- * For now, nothing to do here
+- */
++ /* For now, nothing to do here */
+ pr_debug("Found TLB entry in CRAT table (not processing)\n");
+ break;
+ case CRAT_SUBTYPE_CCOMPUTE_AFFINITY:
+- /*
+- * For now, nothing to do here
+- */
++ /* For now, nothing to do here */
+ pr_debug("Found CCOMPUTE entry in CRAT table (not processing)\n");
+ break;
+ case CRAT_SUBTYPE_IOLINK_AFFINITY:
+@@ -469,8 +444,9 @@ static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr,
+ *
+ * Return - 0 if successful else -ve value
+ */
+-int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
+- uint32_t proximity_domain)
++int kfd_parse_crat_table(void *crat_image,
++ struct list_head *device_list,
++ uint32_t proximity_domain)
+ {
+ struct kfd_topology_device *top_dev = NULL;
+ struct crat_subtype_generic *sub_type_hdr;
+@@ -642,7 +618,6 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
+ num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
+ break;
+ case CHIP_VEGA10:
+- case CHIP_VEGA20:
+ pcache_info = vega10_cache_info;
+ num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
+ break;
+@@ -718,7 +693,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
+ * crat_image will be NULL
+ * @size: [OUT] size of crat_image
+ *
+- * Return 0 if successful else return error code
++ * Return 0 if successful else return -ve value
+ */
+ #ifdef CONFIG_ACPI
+ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
+@@ -750,8 +725,10 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
+ }
+
+ pcrat_image = kmalloc(crat_table->length, GFP_KERNEL);
+- if (!pcrat_image)
++ if (!pcrat_image) {
++ pr_err("No memory for allocating CRAT image\n");
+ return -ENOMEM;
++ }
+
+ memcpy(pcrat_image, crat_table, crat_table->length);
+
+@@ -938,7 +915,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
+
+ #ifdef CONFIG_ACPI
+ status = acpi_get_table("DSDT", 0, &acpi_table);
+- if (status != AE_OK)
++ if (status == AE_NOT_FOUND)
+ pr_warn("DSDT table not found for OEM information\n");
+ else {
+ crat_table->oem_revision = acpi_table->revision;
+@@ -1095,8 +1072,8 @@ static int kfd_fill_gpu_direct_io_link(int *avail_size,
+ * [OUT] actual size of data filled in crat_image
+ */
+ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
+- size_t *size, struct kfd_dev *kdev,
+- uint32_t proximity_domain)
++ size_t *size, struct kfd_dev *kdev,
++ uint32_t proximity_domain)
+ {
+ struct crat_header *crat_table = (struct crat_header *)pcrat_image;
+ struct crat_subtype_generic *sub_type_hdr;
+@@ -1264,8 +1241,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
+ * Return 0 if successful else return -ve value
+ */
+ int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
+- int flags, struct kfd_dev *kdev,
+- uint32_t proximity_domain)
++ int flags, struct kfd_dev *kdev, uint32_t proximity_domain)
+ {
+ void *pcrat_image = NULL;
+ int ret = 0;
+@@ -1295,8 +1271,8 @@ int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
+ if (!pcrat_image)
+ return -ENOMEM;
+ *size = VCRAT_SIZE_FOR_GPU;
+- ret = kfd_create_vcrat_image_gpu(pcrat_image, size, kdev,
+- proximity_domain);
++ ret = kfd_create_vcrat_image_gpu(pcrat_image, size,
++ kdev, proximity_domain);
+ break;
+ case (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU):
+ /* TODO: */
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+index cd7ee6d..00de41f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+@@ -24,6 +24,7 @@
+ #define KFD_CRAT_H_INCLUDED
+
+ #include <linux/types.h>
++#include "kfd_priv.h"
+
+ #pragma pack(1)
+
+@@ -227,12 +228,12 @@ struct crat_subtype_ccompute {
+ /*
+ * HSA IO Link Affinity structure and definitions
+ */
+-#define CRAT_IOLINK_FLAGS_ENABLED (1 << 0)
+-#define CRAT_IOLINK_FLAGS_NON_COHERENT (1 << 1)
+-#define CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT (1 << 2)
+-#define CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT (1 << 3)
+-#define CRAT_IOLINK_FLAGS_NO_PEER_TO_PEER_DMA (1 << 4)
+-#define CRAT_IOLINK_FLAGS_RESERVED_MASK 0xffffffe0
++#define CRAT_IOLINK_FLAGS_ENABLED (1 << 0)
++#define CRAT_IOLINK_FLAGS_NON_COHERENT (1 << 1)
++#define CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT (1 << 2)
++#define CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT (1 << 3)
++#define CRAT_IOLINK_FLAGS_NO_PEER_TO_PEER_DMA (1 << 4)
++#define CRAT_IOLINK_FLAGS_RESERVED_MASK 0xffffffe0
+
+ /*
+ * IO interface types
+@@ -240,18 +241,18 @@ struct crat_subtype_ccompute {
+ #define CRAT_IOLINK_TYPE_UNDEFINED 0
+ #define CRAT_IOLINK_TYPE_HYPERTRANSPORT 1
+ #define CRAT_IOLINK_TYPE_PCIEXPRESS 2
+-#define CRAT_IOLINK_TYPE_AMBA 3
+-#define CRAT_IOLINK_TYPE_MIPI 4
+-#define CRAT_IOLINK_TYPE_QPI_1_1 5
+-#define CRAT_IOLINK_TYPE_RESERVED1 6
+-#define CRAT_IOLINK_TYPE_RESERVED2 7
+-#define CRAT_IOLINK_TYPE_RAPID_IO 8
+-#define CRAT_IOLINK_TYPE_INFINIBAND 9
+-#define CRAT_IOLINK_TYPE_RESERVED3 10
+-#define CRAT_IOLINK_TYPE_OTHER 11
+-#define CRAT_IOLINK_TYPE_MAX 255
+-
+-#define CRAT_IOLINK_RESERVED_LENGTH 24
++#define CRAT_IOLINK_TYPE_AMBA 3
++#define CRAT_IOLINK_TYPE_MIPI 4
++#define CRAT_IOLINK_TYPE_QPI_1_1 5
++#define CRAT_IOLINK_TYPE_RESERVED1 6
++#define CRAT_IOLINK_TYPE_RESERVED2 7
++#define CRAT_IOLINK_TYPE_RAPID_IO 8
++#define CRAT_IOLINK_TYPE_INFINIBAND 9
++#define CRAT_IOLINK_TYPE_RESERVED3 10
++#define CRAT_IOLINK_TYPE_OTHER 11
++#define CRAT_IOLINK_TYPE_MAX 255
++
++#define CRAT_IOLINK_RESERVED_LENGTH 24
+
+ struct crat_subtype_iolink {
+ uint8_t type;
+@@ -307,16 +308,13 @@ struct cdit_header {
+
+ #pragma pack()
+
+-struct kfd_dev;
+-
+ #ifdef CONFIG_ACPI
+ int kfd_create_crat_image_acpi(void **crat_image, size_t *size);
+ #endif
+ void kfd_destroy_crat_image(void *crat_image);
+-int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
+- uint32_t proximity_domain);
++int kfd_parse_crat_table(void *crat_image,
++ struct list_head *device_list,
++ uint32_t proximity_domain);
+ int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
+- int flags, struct kfd_dev *kdev,
+- uint32_t proximity_domain);
+-
++ int flags, struct kfd_dev *kdev, uint32_t proximity_domain);
+ #endif /* KFD_CRAT_H_INCLUDED */
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+index ab37d36..232e28f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2016-2017 Advanced Micro Devices, Inc.
++ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+@@ -21,8 +21,6 @@
+ */
+
+ #include <linux/debugfs.h>
+-#include <linux/uaccess.h>
+-
+ #include "kfd_priv.h"
+
+ static struct dentry *debugfs_root;
+@@ -34,38 +32,6 @@ static int kfd_debugfs_open(struct inode *inode, struct file *file)
+ return single_open(file, show, NULL);
+ }
+
+-static ssize_t kfd_debugfs_hang_hws_write(struct file *file,
+- const char __user *user_buf, size_t size, loff_t *ppos)
+-{
+- struct kfd_dev *dev;
+- char tmp[16];
+- uint32_t gpu_id;
+- int ret = -EINVAL;
+-
+- memset(tmp, 0, 16);
+- if (size >= 16) {
+- pr_err("Invalid input for gpu id.\n");
+- goto out;
+- }
+- if (copy_from_user(tmp, user_buf, size)) {
+- ret = -EFAULT;
+- goto out;
+- }
+- if (kstrtoint(tmp, 10, &gpu_id)) {
+- pr_err("Invalid input for gpu id.\n");
+- goto out;
+- }
+- dev = kfd_device_by_id(gpu_id);
+- if (dev) {
+- kfd_debugfs_hang_hws(dev);
+- ret = size;
+- } else
+- pr_err("Cannot find device %d.\n", gpu_id);
+-
+-out:
+- return ret;
+-}
+-
+ static const struct file_operations kfd_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = kfd_debugfs_open,
+@@ -74,15 +40,6 @@ static const struct file_operations kfd_debugfs_fops = {
+ .release = single_release,
+ };
+
+-static const struct file_operations kfd_debugfs_hang_hws_fops = {
+- .owner = THIS_MODULE,
+- .open = kfd_debugfs_open,
+- .read = seq_read,
+- .write = kfd_debugfs_hang_hws_write,
+- .llseek = seq_lseek,
+- .release = single_release,
+-};
+-
+ void kfd_debugfs_init(void)
+ {
+ struct dentry *ent;
+@@ -108,11 +65,6 @@ void kfd_debugfs_init(void)
+ ent = debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
+ kfd_debugfs_rls_by_device,
+ &kfd_debugfs_fops);
+-
+- ent = debugfs_create_file("hang_hws", S_IFREG | 0644, debugfs_root,
+- NULL,
+- &kfd_debugfs_hang_hws_fops);
+-
+ if (!ent)
+ pr_warn("Failed to create rls in kfd debugfs\n");
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+old mode 100644
+new mode 100755
+index 10095087..a9ad2a8
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -27,17 +27,12 @@
+ #include "kfd_priv.h"
+ #include "kfd_device_queue_manager.h"
+ #include "kfd_pm4_headers_vi.h"
+-#include "cwsr_trap_handler.h"
++#include "cwsr_trap_handler_gfx8.asm"
++#include "cwsr_trap_handler_gfx9.asm"
+ #include "kfd_iommu.h"
+
+ #define MQD_SIZE_ALIGNED 768
+-
+-/*
+- * kfd_locked is used to lock the kfd driver during suspend or reset
+- * once locked, kfd driver will stop any further GPU execution.
+- * create process (open) will return -EAGAIN.
+- */
+-static atomic_t kfd_locked = ATOMIC_INIT(0);
++static atomic_t kfd_device_suspended = ATOMIC_INIT(0);
+
+ #ifdef KFD_SUPPORT_IOMMU_V2
+ static const struct kfd_device_info kaveri_device_info = {
+@@ -54,7 +49,6 @@ static const struct kfd_device_info kaveri_device_info = {
+ .needs_iommu_device = true,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+- .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info carrizo_device_info = {
+@@ -71,7 +65,6 @@ static const struct kfd_device_info carrizo_device_info = {
+ .needs_iommu_device = true,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+- .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info raven_device_info = {
+@@ -87,7 +80,6 @@ static const struct kfd_device_info raven_device_info = {
+ .needs_iommu_device = true,
+ .needs_pci_atomics = true,
+ .num_sdma_engines = 1,
+- .num_sdma_queues_per_engine = 2,
+ };
+ #endif
+
+@@ -105,7 +97,6 @@ static const struct kfd_device_info hawaii_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+- .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info tonga_device_info = {
+@@ -121,7 +112,6 @@ static const struct kfd_device_info tonga_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = true,
+ .num_sdma_engines = 2,
+- .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info tonga_vf_device_info = {
+@@ -137,7 +127,6 @@ static const struct kfd_device_info tonga_vf_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+- .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info fiji_device_info = {
+@@ -153,7 +142,6 @@ static const struct kfd_device_info fiji_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = true,
+ .num_sdma_engines = 2,
+- .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info fiji_vf_device_info = {
+@@ -169,7 +157,6 @@ static const struct kfd_device_info fiji_vf_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+- .num_sdma_queues_per_engine = 2,
+ };
+
+
+@@ -186,7 +173,6 @@ static const struct kfd_device_info polaris10_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = true,
+ .num_sdma_engines = 2,
+- .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info polaris10_vf_device_info = {
+@@ -202,7 +188,6 @@ static const struct kfd_device_info polaris10_vf_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+- .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info polaris11_device_info = {
+@@ -218,7 +203,6 @@ static const struct kfd_device_info polaris11_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = true,
+ .num_sdma_engines = 2,
+- .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info vega10_device_info = {
+@@ -232,9 +216,8 @@ static const struct kfd_device_info vega10_device_info = {
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = false,
+- .needs_pci_atomics = false,
++ .needs_pci_atomics = true,
+ .num_sdma_engines = 2,
+- .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info vega10_vf_device_info = {
+@@ -250,23 +233,6 @@ static const struct kfd_device_info vega10_vf_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+- .num_sdma_queues_per_engine = 2,
+-};
+-
+-static const struct kfd_device_info vega20_device_info = {
+- .asic_family = CHIP_VEGA20,
+- .max_pasid_bits = 16,
+- .max_no_of_hqd = 24,
+- .doorbell_size = 8,
+- .ih_ring_entry_size = 8 * sizeof(uint32_t),
+- .event_interrupt_class = &event_interrupt_class_v9,
+- .num_of_watch_points = 4,
+- .mqd_size_aligned = MQD_SIZE_ALIGNED,
+- .supports_cwsr = true,
+- .needs_iommu_device = false,
+- .needs_pci_atomics = true,
+- .num_sdma_engines = 2,
+- .num_sdma_queues_per_engine = 8,
+ };
+
+ struct kfd_deviceid {
+@@ -317,35 +283,35 @@ static const struct kfd_deviceid supported_devices[] = {
+ { 0x67B9, &hawaii_device_info }, /* Hawaii */
+ { 0x67BA, &hawaii_device_info }, /* Hawaii */
+ { 0x67BE, &hawaii_device_info }, /* Hawaii */
+- { 0x6920, &tonga_device_info }, /* Tonga */
+- { 0x6921, &tonga_device_info }, /* Tonga */
+- { 0x6928, &tonga_device_info }, /* Tonga */
+- { 0x6929, &tonga_device_info }, /* Tonga */
+- { 0x692B, &tonga_device_info }, /* Tonga */
+- { 0x692F, &tonga_vf_device_info }, /* Tonga vf */
+- { 0x6938, &tonga_device_info }, /* Tonga */
+- { 0x6939, &tonga_device_info }, /* Tonga */
+- { 0x7300, &fiji_device_info }, /* Fiji */
+- { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/
+- { 0x67C0, &polaris10_device_info }, /* Polaris10 */
+- { 0x67C1, &polaris10_device_info }, /* Polaris10 */
+- { 0x67C2, &polaris10_device_info }, /* Polaris10 */
++ { 0x6920, &tonga_device_info }, /* Tonga */
++ { 0x6921, &tonga_device_info }, /* Tonga */
++ { 0x6928, &tonga_device_info }, /* Tonga */
++ { 0x6929, &tonga_device_info }, /* Tonga */
++ { 0x692B, &tonga_device_info }, /* Tonga */
++ { 0x692F, &tonga_vf_device_info }, /* Tonga vf */
++ { 0x6938, &tonga_device_info }, /* Tonga */
++ { 0x6939, &tonga_device_info }, /* Tonga */
++ { 0x7300, &fiji_device_info }, /* Fiji */
++ { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/
++ { 0x67C0, &polaris10_device_info }, /* Polaris10 */
++ { 0x67C1, &polaris10_device_info }, /* Polaris10 */
++ { 0x67C2, &polaris10_device_info }, /* Polaris10 */
+ { 0x67C4, &polaris10_device_info }, /* Polaris10 */
+ { 0x67C7, &polaris10_device_info }, /* Polaris10 */
+- { 0x67C8, &polaris10_device_info }, /* Polaris10 */
+- { 0x67C9, &polaris10_device_info }, /* Polaris10 */
+- { 0x67CA, &polaris10_device_info }, /* Polaris10 */
+- { 0x67CC, &polaris10_device_info }, /* Polaris10 */
+- { 0x67CF, &polaris10_device_info }, /* Polaris10 */
+- { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/
++ { 0x67C8, &polaris10_device_info }, /* Polaris10 */
++ { 0x67C9, &polaris10_device_info }, /* Polaris10 */
++ { 0x67CA, &polaris10_device_info }, /* Polaris10 */
++ { 0x67CC, &polaris10_device_info }, /* Polaris10 */
++ { 0x67CF, &polaris10_device_info }, /* Polaris10 */
++ { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/
+ { 0x67DF, &polaris10_device_info }, /* Polaris10 */
+- { 0x67E0, &polaris11_device_info }, /* Polaris11 */
+- { 0x67E1, &polaris11_device_info }, /* Polaris11 */
++ { 0x67E0, &polaris11_device_info }, /* Polaris11 */
++ { 0x67E1, &polaris11_device_info }, /* Polaris11 */
+ { 0x67E3, &polaris11_device_info }, /* Polaris11 */
+- { 0x67E7, &polaris11_device_info }, /* Polaris11 */
+- { 0x67E8, &polaris11_device_info }, /* Polaris11 */
+- { 0x67E9, &polaris11_device_info }, /* Polaris11 */
+- { 0x67EB, &polaris11_device_info }, /* Polaris11 */
++ { 0x67E7, &polaris11_device_info }, /* Polaris11 */
++ { 0x67E8, &polaris11_device_info }, /* Polaris11 */
++ { 0x67E9, &polaris11_device_info }, /* Polaris11 */
++ { 0x67EB, &polaris11_device_info }, /* Polaris11 */
+ { 0x67EF, &polaris11_device_info }, /* Polaris11 */
+ { 0x67FF, &polaris11_device_info }, /* Polaris11 */
+ { 0x6860, &vega10_device_info }, /* Vega10 */
+@@ -357,12 +323,6 @@ static const struct kfd_deviceid supported_devices[] = {
+ { 0x6868, &vega10_device_info }, /* Vega10 */
+ { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/
+ { 0x687F, &vega10_device_info }, /* Vega10 */
+- { 0x66a0, &vega20_device_info }, /* Vega20 */
+- { 0x66a1, &vega20_device_info }, /* Vega20 */
+- { 0x66a2, &vega20_device_info }, /* Vega20 */
+- { 0x66a3, &vega20_device_info }, /* Vega20 */
+- { 0x66a7, &vega20_device_info }, /* Vega20 */
+- { 0x66af, &vega20_device_info } /* Vega20 */
+ };
+
+ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
+@@ -392,7 +352,7 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ struct pci_dev *pdev, const struct kfd2kgd_calls *f2g)
+ {
+ struct kfd_dev *kfd;
+- int ret;
++
+ const struct kfd_device_info *device_info =
+ lookup_device_info(pdev->device);
+
+@@ -400,27 +360,24 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ dev_err(kfd_device, "kgd2kfd_probe failed\n");
+ return NULL;
+ }
+-
++
++ if (device_info->needs_pci_atomics) {
++ /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
++ * 32 and 64-bit requests are possible and must be
++ * supported.
++ */
++ if (pci_enable_atomic_ops_to_root(pdev) < 0) {
++ dev_info(kfd_device,
++ "skipped device %x:%x, PCI rejects atomics",
++ pdev->vendor, pdev->device);
++ return NULL;
++ }
++ }
++
+ kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
+ if (!kfd)
+ return NULL;
+
+- /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
+- * 32 and 64-bit requests are possible and must be
+- * supported.
+- */
+- ret = pci_enable_atomic_ops_to_root(pdev,
+- PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
+- PCI_EXP_DEVCAP2_ATOMIC_COMP64);
+- if (device_info->needs_pci_atomics && ret < 0) {
+- dev_info(kfd_device,
+- "skipped device %x:%x, PCI rejects atomics",
+- pdev->vendor, pdev->device);
+- kfree(kfd);
+- return NULL;
+- } else if (!ret)
+- kfd->pci_atomic_requested = true;
+-
+ kfd->kgd = kgd;
+ kfd->device_info = device_info;
+ kfd->pdev = pdev;
+@@ -462,6 +419,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ KGD_ENGINE_SDMA1);
+ kfd->shared_resources = *gpu_resources;
+
++ /* Usually first_vmid_kfd = 8, last_vmid_kfd = 15 */
+ kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
+ kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
+ kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
+@@ -498,8 +456,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
+
+ if (kfd->kfd2kgd->init_gtt_mem_allocation(
+ kfd->kgd, size, &kfd->gtt_mem,
+- &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
+- false)) {
++ &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){
+ dev_err(kfd_device, "Could not allocate %d bytes\n", size);
+ goto out;
+ }
+@@ -592,52 +549,21 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
+
+ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
+ {
+- if (!kfd->init_complete)
+- return 0;
+- kgd2kfd_suspend(kfd);
+-
+- /* hold dqm->lock to prevent further execution*/
+- mutex_lock(&kfd->dqm->lock);
+-
+- kfd_signal_reset_event(kfd);
+ return 0;
+ }
+
+-/*
+- * Fix me. KFD won't be able to resume existing process for now.
+- * We will keep all existing process in a evicted state and
+- * wait the process to be terminated.
+- */
+-
+ int kgd2kfd_post_reset(struct kfd_dev *kfd)
+ {
+- int ret, count;
+-
+- if (!kfd->init_complete)
+- return 0;
+-
+- mutex_unlock(&kfd->dqm->lock);
+-
+- ret = kfd_resume(kfd);
+- if (ret)
+- return ret;
+- count = atomic_dec_return(&kfd_locked);
+- WARN_ONCE(count != 0, "KFD reset ref. error");
+ return 0;
+ }
+
+-bool kfd_is_locked(void)
+-{
+- return (atomic_read(&kfd_locked) > 0);
+-}
+-
+ void kgd2kfd_suspend(struct kfd_dev *kfd)
+ {
+ if (!kfd->init_complete)
+ return;
+
+ /* For first KFD device suspend all the KFD processes */
+- if (atomic_inc_return(&kfd_locked) == 1)
++ if (atomic_inc_return(&kfd_device_suspended) == 1)
+ kfd_suspend_all_processes();
+
+ kfd->dqm->ops.stop(kfd->dqm);
+@@ -656,7 +582,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
+ if (ret)
+ return ret;
+
+- count = atomic_dec_return(&kfd_locked);
++ count = atomic_dec_return(&kfd_device_suspended);
+ WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
+ if (count == 0)
+ ret = kfd_resume_all_processes();
+@@ -704,19 +630,19 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
+
+ spin_lock(&kfd->interrupt_lock);
+
+- if (kfd->interrupts_active
+- && interrupt_is_wanted(kfd, ih_ring_entry,
+- patched_ihre, &is_patched)
++ if (kfd->interrupts_active && interrupt_is_wanted(kfd, ih_ring_entry,
++ patched_ihre, &is_patched)
+ && enqueue_ih_ring_entry(kfd,
+- is_patched ? patched_ihre : ih_ring_entry))
++ is_patched ? patched_ihre : ih_ring_entry))
+ queue_work(kfd->ih_wq, &kfd->interrupt_work);
+
+ spin_unlock(&kfd->interrupt_lock);
+ }
+
+-int kgd2kfd_quiesce_mm(struct mm_struct *mm)
++int kgd2kfd_quiesce_mm(struct kfd_dev *kfd, struct mm_struct *mm)
+ {
+ struct kfd_process *p;
++ struct kfd_process_device *pdd;
+ int r;
+
+ /* Because we are called from arbitrary context (workqueue) as opposed
+@@ -725,17 +651,26 @@ int kgd2kfd_quiesce_mm(struct mm_struct *mm)
+ */
+ p = kfd_lookup_process_by_mm(mm);
+ if (!p)
+- return -ESRCH;
++ return -ENODEV;
+
+- r = kfd_process_evict_queues(p);
++ if (kfd) {
++ r = -ENODEV;
++ pdd = kfd_get_process_device_data(kfd, p);
++ if (pdd)
++ r = kfd->dqm->ops.evict_process_queues(kfd->dqm,
++ &pdd->qpd);
++ } else {
++ r = kfd_process_evict_queues(p);
++ }
+
+ kfd_unref_process(p);
+ return r;
+ }
+
+-int kgd2kfd_resume_mm(struct mm_struct *mm)
++int kgd2kfd_resume_mm(struct kfd_dev *kfd, struct mm_struct *mm)
+ {
+ struct kfd_process *p;
++ struct kfd_process_device *pdd;
+ int r;
+
+ /* Because we are called from arbitrary context (workqueue) as opposed
+@@ -744,9 +679,17 @@ int kgd2kfd_resume_mm(struct mm_struct *mm)
+ */
+ p = kfd_lookup_process_by_mm(mm);
+ if (!p)
+- return -ESRCH;
++ return -ENODEV;
+
+- r = kfd_process_restore_queues(p);
++ if (kfd) {
++ r = -ENODEV;
++ pdd = kfd_get_process_device_data(kfd, p);
++ if (pdd)
++ r = kfd->dqm->ops.restore_process_queues(kfd->dqm,
++ &pdd->qpd);
++ } else {
++ r = kfd_process_restore_queues(p);
++ }
+
+ kfd_unref_process(p);
+ return r;
+@@ -981,26 +924,3 @@ int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
+ kfree(mem_obj);
+ return 0;
+ }
+-
+-#if defined(CONFIG_DEBUG_FS)
+-
+-/* This function will send a package to HIQ to hang the HWS
+- * which will trigger a GPU reset and bring the HWS back to normal state
+- */
+-int kfd_debugfs_hang_hws(struct kfd_dev *dev)
+-{
+- int r = 0;
+-
+- if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
+- pr_err("HWS is not enabled");
+- return -EINVAL;
+- }
+-
+- r = pm_debugfs_hang_hws(&dev->dqm->packets);
+- if (!r)
+- r = dqm_debugfs_execute_queues(dev->dqm);
+-
+- return r;
+-}
+-
+-#endif
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index ae6f7d8..8c04f7a2 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -21,11 +21,10 @@
+ *
+ */
+
+-#include <linux/ratelimit.h>
+-#include <linux/printk.h>
+ #include <linux/slab.h>
+ #include <linux/list.h>
+ #include <linux/types.h>
++#include <linux/printk.h>
+ #include <linux/bitops.h>
+ #include <linux/sched.h>
+ #include "kfd_priv.h"
+@@ -61,8 +60,6 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
+ static void deallocate_sdma_queue(struct device_queue_manager *dqm,
+ unsigned int sdma_queue_id);
+
+-static void kfd_process_hw_exception(struct work_struct *work);
+-
+ static inline
+ enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
+ {
+@@ -109,7 +106,7 @@ static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm)
+ unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
+ {
+ return dqm->dev->device_info->num_sdma_engines
+- * dqm->dev->device_info->num_sdma_queues_per_engine;
++ * KFD_SDMA_QUEUES_PER_ENGINE;
+ }
+
+ void program_sh_mem_settings(struct device_queue_manager *dqm,
+@@ -200,7 +197,7 @@ static int allocate_vmid(struct device_queue_manager *dqm,
+ dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd,
+ qpd->vmid,
+ qpd->page_table_base);
+- /* invalidate the VM context after pasid and vmid mapping is set up */
++ /*invalidate the VM context after pasid and vmid mapping is set up*/
+ kfd_flush_tlb(qpd_to_pdd(qpd));
+
+ return 0;
+@@ -209,19 +206,16 @@ static int allocate_vmid(struct device_queue_manager *dqm,
+ static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
+ struct qcm_process_device *qpd)
+ {
+- const struct packet_manager_funcs *pmf = qpd->dqm->packets.pmf;
+- int ret;
++ uint32_t len;
+
+ if (!qpd->ib_kaddr)
+ return -ENOMEM;
+
+- ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
+- if (ret)
+- return ret;
++ len = qpd->dqm->packets.pmf->release_mem(qpd->ib_base,
++ (uint32_t *)qpd->ib_kaddr);
+
+ return kdev->kfd2kgd->submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
+- qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
+- pmf->release_mem_size / sizeof(uint32_t));
++ qpd->ib_base, (uint32_t *)qpd->ib_kaddr, len);
+ }
+
+ static void deallocate_vmid(struct device_queue_manager *dqm,
+@@ -290,6 +284,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
+ if (retval) {
+ if (list_empty(&qpd->queues_list))
+ deallocate_vmid(dqm, qpd, q);
++
+ goto out_unlock;
+ }
+
+@@ -359,10 +354,10 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+ {
+ int retval;
+- struct mqd_manager *mqd_mgr;
++ struct mqd_manager *mqd;
+
+- mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+- if (!mqd_mgr)
++ mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
++ if (!mqd)
+ return -ENOMEM;
+
+ retval = allocate_hqd(dqm, q);
+@@ -373,7 +368,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
+ if (retval)
+ goto out_deallocate_hqd;
+
+- retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
++ retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+ &q->gart_mqd_addr, &q->properties);
+ if (retval)
+ goto out_deallocate_doorbell;
+@@ -387,15 +382,15 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
+ if (!q->properties.is_active)
+ return 0;
+
+- retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
+- &q->properties, q->process->mm);
++ retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue, &q->properties,
++ q->process->mm);
+ if (retval)
+ goto out_uninit_mqd;
+
+ return 0;
+
+ out_uninit_mqd:
+- mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
++ mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ out_deallocate_doorbell:
+ deallocate_doorbell(qpd, q);
+ out_deallocate_hqd:
+@@ -412,11 +407,11 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
+ struct queue *q)
+ {
+ int retval;
+- struct mqd_manager *mqd_mgr;
++ struct mqd_manager *mqd;
+
+- mqd_mgr = dqm->ops.get_mqd_manager(dqm,
++ mqd = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+- if (!mqd_mgr)
++ if (!mqd)
+ return -ENOMEM;
+
+ if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
+@@ -433,14 +428,14 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
+
+ deallocate_doorbell(qpd, q);
+
+- retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
++ retval = mqd->destroy_mqd(mqd, q->mqd,
+ KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
+ KFD_UNMAP_LATENCY_MS,
+ q->pipe, q->queue);
+ if (retval == -ETIME)
+ qpd->reset_wavefronts = true;
+
+- mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
++ mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+
+ list_del(&q->list);
+ if (list_empty(&qpd->queues_list)) {
+@@ -480,19 +475,21 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
+ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ {
+ int retval;
+- struct mqd_manager *mqd_mgr;
++ struct mqd_manager *mqd;
+ struct kfd_process_device *pdd;
++
+ bool prev_active = false;
+
+ mutex_lock(&dqm->lock);
++
+ pdd = kfd_get_process_device_data(q->device, q->process);
+ if (!pdd) {
+ retval = -ENODEV;
+ goto out_unlock;
+ }
+- mqd_mgr = dqm->ops.get_mqd_manager(dqm,
++ mqd = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+- if (!mqd_mgr) {
++ if (!mqd) {
+ retval = -ENOMEM;
+ goto out_unlock;
+ }
+@@ -500,7 +497,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ * Eviction state logic: we only mark active queues as evicted
+ * to avoid the overhead of restoring inactive queues later
+ */
+- if (pdd->qpd.evicted)
++ if (pdd->qpd.evicted > 0)
+ q->properties.is_evicted = (q->properties.queue_size > 0 &&
+ q->properties.queue_percent > 0 &&
+ q->properties.queue_address != 0);
+@@ -519,7 +516,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ } else if (prev_active &&
+ (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
+ q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
+- retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
++ retval = mqd->destroy_mqd(mqd, q->mqd,
+ KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
+ KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
+ if (retval) {
+@@ -528,7 +525,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ }
+ }
+
+- retval = mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties);
++ retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
+
+ /*
+ * check active state vs. the previous state and modify
+@@ -546,7 +543,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ else if (q->properties.is_active &&
+ (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
+ q->properties.type == KFD_QUEUE_TYPE_SDMA))
+- retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
++ retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue,
+ &q->properties, q->process->mm);
+
+ out_unlock:
+@@ -557,29 +554,29 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ static struct mqd_manager *get_mqd_manager(
+ struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
+ {
+- struct mqd_manager *mqd_mgr;
++ struct mqd_manager *mqd;
+
+ if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
+ return NULL;
+
+ pr_debug("mqd type %d\n", type);
+
+- mqd_mgr = dqm->mqd_mgrs[type];
+- if (!mqd_mgr) {
+- mqd_mgr = mqd_manager_init(type, dqm->dev);
+- if (!mqd_mgr)
++ mqd = dqm->mqds[type];
++ if (!mqd) {
++ mqd = mqd_manager_init(type, dqm->dev);
++ if (!mqd)
+ pr_err("mqd manager is NULL");
+- dqm->mqd_mgrs[type] = mqd_mgr;
++ dqm->mqds[type] = mqd;
+ }
+
+- return mqd_mgr;
++ return mqd;
+ }
+
+ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+ {
+ struct queue *q;
+- struct mqd_manager *mqd_mgr;
++ struct mqd_manager *mqd;
+ struct kfd_process_device *pdd;
+ int retval = 0;
+
+@@ -595,16 +592,16 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ if (!q->properties.is_active)
+ continue;
+- mqd_mgr = dqm->ops.get_mqd_manager(dqm,
++ mqd = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+- if (!mqd_mgr) { /* should not be here */
++ if (!mqd) { /* should not be here */
+ pr_err("Cannot evict queue, mqd mgr is NULL\n");
+ retval = -ENOMEM;
+ goto out;
+ }
+ q->properties.is_evicted = true;
+ q->properties.is_active = false;
+- retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
++ retval = mqd->destroy_mqd(mqd, q->mqd,
+ KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
+ KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
+ if (retval)
+@@ -654,9 +651,9 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+ {
+ struct queue *q;
+- struct mqd_manager *mqd_mgr;
++ struct mqd_manager *mqd;
+ struct kfd_process_device *pdd;
+- uint64_t pd_base;
++ uint32_t pd_base;
+ int retval = 0;
+
+ pdd = qpd_to_pdd(qpd);
+@@ -676,7 +673,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
+
+ /* Update PD Base in QPD */
+ qpd->page_table_base = pd_base;
+- pr_debug("Updated PD address to 0x%llx\n", pd_base);
++ pr_debug("Updated PD address to 0x%08x\n", pd_base);
+
+ if (!list_empty(&qpd->queues_list)) {
+ dqm->dev->kfd2kgd->set_vm_context_page_table_base(
+@@ -690,16 +687,16 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ if (!q->properties.is_evicted)
+ continue;
+- mqd_mgr = dqm->ops.get_mqd_manager(dqm,
++ mqd = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+- if (!mqd_mgr) { /* should not be here */
++ if (!mqd) { /* should not be here */
+ pr_err("Cannot restore queue, mqd mgr is NULL\n");
+ retval = -ENOMEM;
+ goto out;
+ }
+ q->properties.is_evicted = false;
+ q->properties.is_active = true;
+- retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
++ retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
+ q->queue, &q->properties,
+ q->process->mm);
+ if (retval)
+@@ -717,7 +714,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
+ {
+ struct queue *q;
+ struct kfd_process_device *pdd;
+- uint64_t pd_base;
++ uint32_t pd_base;
+ int retval = 0;
+
+ pdd = qpd_to_pdd(qpd);
+@@ -737,7 +734,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
+
+ /* Update PD Base in QPD */
+ qpd->page_table_base = pd_base;
+- pr_debug("Updated PD address to 0x%llx\n", pd_base);
++ pr_debug("Updated PD address to 0x%08x\n", pd_base);
+
+ /* activate all active queues on the qpd */
+ list_for_each_entry(q, &qpd->queues_list, list) {
+@@ -760,9 +757,9 @@ static int register_process(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+ {
+ struct device_process_node *n;
+- struct kfd_process_device *pdd;
+- uint64_t pd_base;
+ int retval;
++ struct kfd_process_device *pdd;
++ uint32_t pd_base;
+
+ n = kzalloc(sizeof(*n), GFP_KERNEL);
+ if (!n)
+@@ -779,7 +776,7 @@ static int register_process(struct device_queue_manager *dqm,
+
+ /* Update PD Base in QPD */
+ qpd->page_table_base = pd_base;
+- pr_debug("Updated PD address to 0x%llx\n", pd_base);
++ pr_debug("Updated PD address to 0x%08x\n", pd_base);
+
+ retval = dqm->asic_ops.update_qpd(dqm, qpd);
+
+@@ -880,7 +877,7 @@ static void uninitialize(struct device_queue_manager *dqm)
+
+ kfree(dqm->allocated_queues);
+ for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
+- kfree(dqm->mqd_mgrs[i]);
++ kfree(dqm->mqds[i]);
+ mutex_destroy(&dqm->lock);
+ kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
+ }
+@@ -888,7 +885,7 @@ static void uninitialize(struct device_queue_manager *dqm)
+ static int start_nocpsch(struct device_queue_manager *dqm)
+ {
+ init_interrupts(dqm);
+- return pm_init(&dqm->packets, dqm);
++ return pm_init(&dqm->packets, dqm, dqm->dev->mec_fw_version);
+ }
+
+ static int stop_nocpsch(struct device_queue_manager *dqm)
+@@ -924,11 +921,11 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd)
+ {
+- struct mqd_manager *mqd_mgr;
++ struct mqd_manager *mqd;
+ int retval;
+
+- mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
+- if (!mqd_mgr)
++ mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
++ if (!mqd)
+ return -ENOMEM;
+
+ retval = allocate_sdma_queue(dqm, &q->sdma_id);
+@@ -947,20 +944,19 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
+ pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
+
+ dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
+- retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
++ retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+ &q->gart_mqd_addr, &q->properties);
+ if (retval)
+ goto out_deallocate_doorbell;
+
+- retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, 0, 0, &q->properties,
+- NULL);
++ retval = mqd->load_mqd(mqd, q->mqd, 0, 0, &q->properties, NULL);
+ if (retval)
+ goto out_uninit_mqd;
+
+ return 0;
+
+ out_uninit_mqd:
+- mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
++ mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ out_deallocate_doorbell:
+ deallocate_doorbell(qpd, q);
+ out_deallocate_sdma_queue:
+@@ -1025,8 +1021,6 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
+ dqm->active_runlist = false;
+ dqm->sdma_bitmap = (1 << get_num_sdma_queues(dqm)) - 1;
+
+- INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
+-
+ return 0;
+ }
+
+@@ -1036,7 +1030,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
+
+ retval = 0;
+
+- retval = pm_init(&dqm->packets, dqm);
++ retval = pm_init(&dqm->packets, dqm, dqm->dev->mec_fw_version);
+ if (retval)
+ goto fail_packet_manager_init;
+
+@@ -1059,8 +1053,6 @@ static int start_cpsch(struct device_queue_manager *dqm)
+ init_interrupts(dqm);
+
+ mutex_lock(&dqm->lock);
+- /* clear hang status when driver try to start the hw scheduler */
+- dqm->is_hws_hang = false;
+ execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+ mutex_unlock(&dqm->lock);
+
+@@ -1075,7 +1067,9 @@ static int start_cpsch(struct device_queue_manager *dqm)
+ static int stop_cpsch(struct device_queue_manager *dqm)
+ {
+ mutex_lock(&dqm->lock);
++
+ unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
++
+ mutex_unlock(&dqm->lock);
+
+ kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
+@@ -1136,7 +1130,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+ struct qcm_process_device *qpd)
+ {
+ int retval;
+- struct mqd_manager *mqd_mgr;
++ struct mqd_manager *mqd;
+
+ retval = 0;
+
+@@ -1163,10 +1157,10 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+ if (retval)
+ goto out_deallocate_sdma_queue;
+
+- mqd_mgr = dqm->ops.get_mqd_manager(dqm,
++ mqd = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+
+- if (!mqd_mgr) {
++ if (!mqd) {
+ retval = -ENOMEM;
+ goto out_deallocate_doorbell;
+ }
+@@ -1183,7 +1177,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+
+ q->properties.tba_addr = qpd->tba_addr;
+ q->properties.tma_addr = qpd->tma_addr;
+- retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
++ retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+ &q->gart_mqd_addr, &q->properties);
+ if (retval)
+ goto out_deallocate_doorbell;
+@@ -1230,13 +1224,6 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
+ while (*fence_addr != fence_value) {
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("qcm fence wait loop timeout expired\n");
+- /* In HWS case, this is used to halt the driver thread
+- * in order not to mess up CP states before doing
+- * scandumps for FW debugging.
+- */
+- while (halt_if_hws_hang)
+- schedule();
+-
+ return -ETIME;
+ }
+ schedule();
+@@ -1281,8 +1268,6 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
+ {
+ int retval = 0;
+
+- if (dqm->is_hws_hang)
+- return -EIO;
+ if (!dqm->active_runlist)
+ return retval;
+
+@@ -1321,13 +1306,9 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm,
+ {
+ int retval;
+
+- if (dqm->is_hws_hang)
+- return -EIO;
+ retval = unmap_queues_cpsch(dqm, filter, filter_param);
+ if (retval) {
+ pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
+- dqm->is_hws_hang = true;
+- schedule_work(&dqm->hw_exception_work);
+ return retval;
+ }
+
+@@ -1339,7 +1320,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
+ struct queue *q)
+ {
+ int retval;
+- struct mqd_manager *mqd_mgr;
++ struct mqd_manager *mqd;
+ bool preempt_all_queues;
+
+ preempt_all_queues = false;
+@@ -1359,9 +1340,9 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
+
+ }
+
+- mqd_mgr = dqm->ops.get_mqd_manager(dqm,
++ mqd = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+- if (!mqd_mgr) {
++ if (!mqd) {
+ retval = -ENOMEM;
+ goto failed;
+ }
+@@ -1382,7 +1363,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
+ if (retval == -ETIME)
+ qpd->reset_wavefronts = true;
+
+- mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
++ mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+
+ /*
+ * Unconditionally decrement this counter, regardless of the queue's
+@@ -1531,7 +1512,7 @@ static int get_wave_state(struct device_queue_manager *dqm,
+ u32 *ctl_stack_used_size,
+ u32 *save_area_used_size)
+ {
+- struct mqd_manager *mqd_mgr;
++ struct mqd_manager *mqd;
+ int r;
+
+ mutex_lock(&dqm->lock);
+@@ -1542,19 +1523,19 @@ static int get_wave_state(struct device_queue_manager *dqm,
+ goto dqm_unlock;
+ }
+
+- mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+- if (!mqd_mgr) {
++ mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
++ if (!mqd) {
+ r = -ENOMEM;
+ goto dqm_unlock;
+ }
+
+- if (!mqd_mgr->get_wave_state) {
++ if (!mqd->get_wave_state) {
+ r = -EINVAL;
+ goto dqm_unlock;
+ }
+
+- r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
+- ctl_stack_used_size, save_area_used_size);
++ r = mqd->get_wave_state(mqd, q->mqd, ctl_stack, ctl_stack_used_size,
++ save_area_used_size);
+
+ dqm_unlock:
+ mutex_unlock(&dqm->lock);
+@@ -1567,7 +1548,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
+ int retval;
+ struct queue *q, *next;
+ struct kernel_queue *kq, *kq_next;
+- struct mqd_manager *mqd_mgr;
++ struct mqd_manager *mqd;
+ struct device_process_node *cur, *next_dpn;
+ enum kfd_unmap_queues_filter filter =
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
+@@ -1609,7 +1590,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
+ }
+
+ retval = execute_queues_cpsch(dqm, filter, 0);
+- if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
++ if (retval || qpd->reset_wavefronts) {
+ pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
+ dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
+ qpd->reset_wavefronts = false;
+@@ -1617,15 +1598,15 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
+
+ /* lastly, free mqd resources */
+ list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
+- mqd_mgr = dqm->ops.get_mqd_manager(dqm,
++ mqd = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+- if (!mqd_mgr) {
++ if (!mqd) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ list_del(&q->list);
+ qpd->queue_count--;
+- mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
++ mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ }
+
+ out:
+@@ -1644,13 +1625,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
+ return NULL;
+
+ switch (dev->device_info->asic_family) {
+- /* HWS is not available on Hawaii. */
+ case CHIP_HAWAII:
+- /* HWS depends on CWSR for timely dequeue. CWSR is not
+- * available on Tonga.
+- *
+- * FIXME: This argument also applies to Kaveri.
+- */
+ case CHIP_TONGA:
+ dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
+ break;
+@@ -1729,9 +1704,8 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
+ break;
+
+ case CHIP_VEGA10:
+- case CHIP_VEGA20:
+ case CHIP_RAVEN:
+- device_queue_manager_init_v9(&dqm->asic_ops);
++ device_queue_manager_init_v9_vega10(&dqm->asic_ops);
+ break;
+ default:
+ WARN(1, "Unexpected ASIC family %u",
+@@ -1770,13 +1744,6 @@ int kfd_process_vm_fault(struct device_queue_manager *dqm,
+ return ret;
+ }
+
+-static void kfd_process_hw_exception(struct work_struct *work)
+-{
+- struct device_queue_manager *dqm = container_of(work,
+- struct device_queue_manager, hw_exception_work);
+- dqm->dev->kfd2kgd->gpu_recover(dqm->dev->kgd);
+-}
+-
+ #if defined(CONFIG_DEBUG_FS)
+
+ static void seq_reg_dump(struct seq_file *m,
+@@ -1841,9 +1808,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
+ }
+
+ for (pipe = 0; pipe < get_num_sdma_engines(dqm); pipe++) {
+- for (queue = 0;
+- queue < dqm->dev->device_info->num_sdma_queues_per_engine;
+- queue++) {
++ for (queue = 0; queue < KFD_SDMA_QUEUES_PER_ENGINE; queue++) {
+ r = dqm->dev->kfd2kgd->hqd_sdma_dump(
+ dqm->dev->kgd, pipe, queue, &dump, &n_regs);
+ if (r)
+@@ -1860,16 +1825,4 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
+ return r;
+ }
+
+-int dqm_debugfs_execute_queues(struct device_queue_manager *dqm)
+-{
+- int r = 0;
+-
+- mutex_lock(&dqm->lock);
+- dqm->active_runlist = true;
+- r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+- mutex_unlock(&dqm->lock);
+-
+- return r;
+-}
+-
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+index 1c4ef00..978458a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+@@ -31,6 +31,7 @@
+
+ #define KFD_UNMAP_LATENCY_MS (4000)
+ #define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS (2 * KFD_UNMAP_LATENCY_MS + 1000)
++#define KFD_SDMA_QUEUES_PER_ENGINE (2)
+
+ struct device_process_node {
+ struct qcm_process_device *qpd;
+@@ -174,7 +175,7 @@ struct device_queue_manager {
+ struct device_queue_manager_ops ops;
+ struct device_queue_manager_asic_ops asic_ops;
+
+- struct mqd_manager *mqd_mgrs[KFD_MQD_TYPE_MAX];
++ struct mqd_manager *mqds[KFD_MQD_TYPE_MAX];
+ struct packet_manager packets;
+ struct kfd_dev *dev;
+ struct mutex lock;
+@@ -194,10 +195,6 @@ struct device_queue_manager {
+ struct kfd_mem_obj *fence_mem;
+ bool active_runlist;
+ int sched_policy;
+-
+- /* hw exception */
+- bool is_hws_hang;
+- struct work_struct hw_exception_work;
+ };
+
+ void device_queue_manager_init_cik(
+@@ -208,7 +205,7 @@ void device_queue_manager_init_vi(
+ struct device_queue_manager_asic_ops *asic_ops);
+ void device_queue_manager_init_vi_tonga(
+ struct device_queue_manager_asic_ops *asic_ops);
+-void device_queue_manager_init_v9(
++void device_queue_manager_init_v9_vega10(
+ struct device_queue_manager_asic_ops *asic_ops);
+ void program_sh_mem_settings(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd);
+@@ -217,11 +214,18 @@ unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
+ unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
+ unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
+
++int process_evict_queues(struct device_queue_manager *dqm,
++ struct qcm_process_device *qpd);
++int process_restore_queues(struct device_queue_manager *dqm,
++ struct qcm_process_device *qpd);
++
++
+ static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
+ {
+ return (pdd->lds_base >> 16) & 0xFF;
+ }
+
++/* This function is only useful for GFXv7 and v8 */
+ static inline unsigned int
+ get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
+ {
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+index 4175153..6198bf2 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2016-2018 Advanced Micro Devices, Inc.
++ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+@@ -32,7 +32,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
+ static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q,
+ struct qcm_process_device *qpd);
+
+-void device_queue_manager_init_v9(
++void device_queue_manager_init_v9_vega10(
+ struct device_queue_manager_asic_ops *asic_ops)
+ {
+ asic_ops->update_qpd = update_qpd_v9;
+@@ -60,7 +60,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
+ qpd->sh_mem_config =
+ SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
+- if (noretry &&
++ if (vega10_noretry &&
+ !dqm->dev->device_info->needs_iommu_device)
+ qpd->sh_mem_config |=
+ 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+index fd60a11..030b014 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+@@ -33,30 +33,26 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size);
++static int update_qpd_vi(struct device_queue_manager *dqm,
++ struct qcm_process_device *qpd);
++static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
++ struct qcm_process_device *qpd);
++
++/*
++ * Tonga device queue manager functions
++ */
+ static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size);
+-static int update_qpd_vi(struct device_queue_manager *dqm,
+- struct qcm_process_device *qpd);
+ static int update_qpd_vi_tonga(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd);
+-static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
+- struct qcm_process_device *qpd);
+ static void init_sdma_vm_tonga(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd);
+
+-void device_queue_manager_init_vi(
+- struct device_queue_manager_asic_ops *asic_ops)
+-{
+- asic_ops->set_cache_memory_policy = set_cache_memory_policy_vi;
+- asic_ops->update_qpd = update_qpd_vi;
+- asic_ops->init_sdma_vm = init_sdma_vm;
+-}
+-
+ void device_queue_manager_init_vi_tonga(
+ struct device_queue_manager_asic_ops *asic_ops)
+ {
+@@ -65,6 +61,15 @@ void device_queue_manager_init_vi_tonga(
+ asic_ops->init_sdma_vm = init_sdma_vm_tonga;
+ }
+
++
++void device_queue_manager_init_vi(
++ struct device_queue_manager_asic_ops *asic_ops)
++{
++ asic_ops->set_cache_memory_policy = set_cache_memory_policy_vi;
++ asic_ops->update_qpd = update_qpd_vi;
++ asic_ops->init_sdma_vm = init_sdma_vm;
++}
++
+ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
+ {
+ /* In 64-bit mode, we can only control the top 3 bits of the LDS,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+index ebe79bf..fc41689 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+@@ -115,7 +115,7 @@ int kfd_doorbell_init(struct kfd_dev *kfd)
+ pr_debug("doorbell aperture size == 0x%08lX\n",
+ kfd->shared_resources.doorbell_aperture_size);
+
+- pr_debug("doorbell kernel address == %p\n", kfd->doorbell_kernel_ptr);
++ pr_debug("doorbell kernel address == 0x%p\n", kfd->doorbell_kernel_ptr);
+
+ return 0;
+ }
+@@ -188,9 +188,9 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
+ *doorbell_off = kfd->doorbell_id_offset + inx;
+
+ pr_debug("Get kernel queue doorbell\n"
+- " doorbell offset == 0x%08X\n"
+- " doorbell index == 0x%x\n",
+- *doorbell_off, inx);
++ " doorbell offset == 0x%08X\n"
++ " kernel address == 0x%p\n",
++ *doorbell_off, (kfd->doorbell_kernel_ptr + inx));
+
+ return kfd->doorbell_kernel_ptr + inx;
+ }
+@@ -199,8 +199,7 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
+ {
+ unsigned int inx;
+
+- inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr)
+- * sizeof(u32) / kfd->device_info->doorbell_size;
++ inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
+
+ mutex_lock(&kfd->doorbell_mutex);
+ __clear_bit(inx, kfd->doorbell_available_index);
+@@ -211,7 +210,7 @@ void write_kernel_doorbell(void __iomem *db, u32 value)
+ {
+ if (db) {
+ writel(value, db);
+- pr_debug("Writing %d to doorbell address %p\n", value, db);
++ pr_debug("Writing %d to doorbell address 0x%p\n", value, db);
+ }
+ }
+
+@@ -221,10 +220,14 @@ void write_kernel_doorbell64(void __iomem *db, u64 value)
+ WARN(((unsigned long)db & 7) != 0,
+ "Unaligned 64-bit doorbell");
+ writeq(value, (u64 __iomem *)db);
+- pr_debug("writing %llu to doorbell address %p\n", value, db);
++ pr_debug("writing %llu to doorbell address 0x%p\n", value, db);
+ }
+ }
+
++/*
++ * queue_ids are in the range [0,MAX_PROCESS_QUEUES) and are mapped 1:1
++ * to doorbells with the process's doorbell page
++ */
+ unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd,
+ struct kfd_process *process,
+ unsigned int doorbell_id)
+@@ -236,8 +239,7 @@ unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd,
+ * units regardless of the ASIC-dependent doorbell size.
+ */
+ return kfd->doorbell_id_offset +
+- process->doorbell_index
+- * kfd_doorbell_process_slice(kfd) / sizeof(u32) +
++ process->doorbell_index * (kfd_doorbell_process_slice(kfd)/sizeof(u32)) +
+ doorbell_id * kfd->device_info->doorbell_size / sizeof(u32);
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+index 1dc1584..a92ca78 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+@@ -51,8 +51,8 @@ struct kfd_event_waiter {
+ */
+ struct kfd_signal_page {
+ uint64_t *kernel_address;
++ uint64_t handle;
+ uint64_t __user *user_address;
+- bool need_to_free_pages;
+ };
+
+
+@@ -80,7 +80,6 @@ static struct kfd_signal_page *allocate_signal_page(struct kfd_process *p)
+ KFD_SIGNAL_EVENT_LIMIT * 8);
+
+ page->kernel_address = backing_store;
+- page->need_to_free_pages = true;
+ pr_debug("Allocated new event signal page at %p, for process %p\n",
+ page, p);
+
+@@ -100,17 +99,9 @@ static int allocate_event_notification_slot(struct kfd_process *p,
+ p->signal_page = allocate_signal_page(p);
+ if (!p->signal_page)
+ return -ENOMEM;
+- /* Oldest user mode expects 256 event slots */
+- p->signal_mapped_size = 256*8;
+ }
+
+- /*
+- * Compatibility with old user mode: Only use signal slots
+- * user mode has mapped, may be less than
+- * KFD_SIGNAL_EVENT_LIMIT. This also allows future increase
+- * of the event limit without breaking user mode.
+- */
+- id = idr_alloc(&p->event_idr, ev, 0, p->signal_mapped_size / 8,
++ id = idr_alloc(&p->event_idr, ev, 0, KFD_SIGNAL_EVENT_LIMIT,
+ GFP_KERNEL);
+ if (id < 0)
+ return id;
+@@ -121,6 +112,29 @@ static int allocate_event_notification_slot(struct kfd_process *p,
+ return 0;
+ }
+
++static struct kfd_signal_page *allocate_signal_page_dgpu(
++ struct kfd_process *p, uint64_t *kernel_address, uint64_t handle)
++{
++ struct kfd_signal_page *my_page;
++
++ my_page = kzalloc(sizeof(*my_page), GFP_KERNEL);
++ if (!my_page)
++ return NULL;
++
++ /* Initialize all events to unsignaled */
++ memset(kernel_address, (uint8_t) UNSIGNALED_EVENT_SLOT,
++ KFD_SIGNAL_EVENT_LIMIT * 8);
++
++ my_page->kernel_address = kernel_address;
++ my_page->handle = handle;
++ my_page->user_address = NULL;
++
++ pr_debug("Allocated new event signal page at %p, for process %p\n",
++ my_page, p);
++
++ return my_page;
++}
++
+ /*
+ * Assumes that p->event_mutex is held and of course that p is not going
+ * away (current or locked).
+@@ -184,8 +198,7 @@ static int create_signal_event(struct file *devkfd,
+ {
+ int ret;
+
+- if (p->signal_mapped_size &&
+- p->signal_event_count == p->signal_mapped_size / 8) {
++ if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) {
+ if (!p->signal_event_limit_reached) {
+ pr_warn("Signal event wasn't created because limit was reached\n");
+ p->signal_event_limit_reached = true;
+@@ -271,9 +284,9 @@ static void shutdown_signal_page(struct kfd_process *p)
+ struct kfd_signal_page *page = p->signal_page;
+
+ if (page) {
+- if (page->need_to_free_pages)
++ if (page->user_address)
+ free_pages((unsigned long)page->kernel_address,
+- get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
++ get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
+ kfree(page);
+ }
+ }
+@@ -295,34 +308,11 @@ static bool event_can_be_cpu_signaled(const struct kfd_event *ev)
+ return ev->type == KFD_EVENT_TYPE_SIGNAL;
+ }
+
+-int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
+- uint64_t size)
+-{
+- struct kfd_signal_page *page;
+-
+- if (p->signal_page)
+- return -EBUSY;
+-
+- page = kzalloc(sizeof(*page), GFP_KERNEL);
+- if (!page)
+- return -ENOMEM;
+-
+- /* Initialize all events to unsignaled */
+- memset(kernel_address, (uint8_t) UNSIGNALED_EVENT_SLOT,
+- KFD_SIGNAL_EVENT_LIMIT * 8);
+-
+- page->kernel_address = kernel_address;
+-
+- p->signal_page = page;
+- p->signal_mapped_size = size;
+-
+- return 0;
+-}
+-
+ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
+ uint32_t event_type, bool auto_reset, uint32_t node_id,
+ uint32_t *event_id, uint32_t *event_trigger_data,
+- uint64_t *event_page_offset, uint32_t *event_slot_index)
++ uint64_t *event_page_offset, uint32_t *event_slot_index,
++ void *kern_addr)
+ {
+ int ret = 0;
+ struct kfd_event *ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+@@ -336,10 +326,19 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
+
+ init_waitqueue_head(&ev->wq);
+
+- *event_page_offset = 0;
+-
+ mutex_lock(&p->event_mutex);
+
++ if (kern_addr && !p->signal_page) {
++ p->signal_page = allocate_signal_page_dgpu(p, kern_addr,
++ *event_page_offset);
++ if (!p->signal_page) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ }
++
++ *event_page_offset = 0;
++
+ switch (event_type) {
+ case KFD_EVENT_TYPE_SIGNAL:
+ case KFD_EVENT_TYPE_DEBUG:
+@@ -362,6 +361,7 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
+ kfree(ev);
+ }
+
++out:
+ mutex_unlock(&p->event_mutex);
+
+ return ret;
+@@ -390,11 +390,7 @@ static void set_event(struct kfd_event *ev)
+ {
+ struct kfd_event_waiter *waiter;
+
+- /* Auto reset if the list is non-empty and we're waking
+- * someone. waitqueue_active is safe here because we're
+- * protected by the p->event_mutex, which is also held when
+- * updating the wait queues in kfd_wait_on_events.
+- */
++ /* Auto reset if the list is non-empty and we're waking someone. */
+ ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq);
+
+ list_for_each_entry(waiter, &ev->wq.head, wait.entry)
+@@ -781,12 +777,12 @@ int kfd_wait_on_events(struct kfd_process *p,
+
+ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
+ {
++
+ unsigned long pfn;
+ struct kfd_signal_page *page;
+- int ret;
+
+- /* check required size doesn't exceed the allocated size */
+- if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) <
++ /* check required size is logical */
++ if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) !=
+ get_order(vma->vm_end - vma->vm_start)) {
+ pr_err("Event page mmap requested illegal size\n");
+ return -EINVAL;
+@@ -816,12 +812,8 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
+ page->user_address = (uint64_t __user *)vma->vm_start;
+
+ /* mapping the page to user process */
+- ret = remap_pfn_range(vma, vma->vm_start, pfn,
++ return remap_pfn_range(vma, vma->vm_start, pfn,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+- if (!ret)
+- p->signal_mapped_size = vma->vm_end - vma->vm_start;
+-
+- return ret;
+ }
+
+ /*
+@@ -1012,30 +1004,3 @@ void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
+ mutex_unlock(&p->event_mutex);
+ kfd_unref_process(p);
+ }
+-
+-void kfd_signal_reset_event(struct kfd_dev *dev)
+-{
+- struct kfd_hsa_hw_exception_data hw_exception_data;
+- struct kfd_process *p;
+- struct kfd_event *ev;
+- unsigned int temp;
+- uint32_t id, idx;
+-
+- /* Whole gpu reset caused by GPU hang , and memory is lost */
+- memset(&hw_exception_data, 0, sizeof(hw_exception_data));
+- hw_exception_data.gpu_id = dev->id;
+- hw_exception_data.memory_lost = 1;
+-
+- idx = srcu_read_lock(&kfd_processes_srcu);
+- hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+- mutex_lock(&p->event_mutex);
+- id = KFD_FIRST_NONSIGNAL_EVENT_ID;
+- idr_for_each_entry_continue(&p->event_idr, ev, id)
+- if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
+- ev->hw_exception_data = hw_exception_data;
+- set_event(ev);
+- }
+- mutex_unlock(&p->event_mutex);
+- }
+- srcu_read_unlock(&kfd_processes_srcu, idx);
+-}
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
+index c7ac6c7..abca5bf 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
+@@ -66,7 +66,6 @@ struct kfd_event {
+ /* type specific data */
+ union {
+ struct kfd_hsa_memory_exception_data memory_exception_data;
+- struct kfd_hsa_hw_exception_data hw_exception_data;
+ };
+ };
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+index 8f123a2..2c00711 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+@@ -289,6 +289,7 @@
+
+ #define MAKE_LDS_APP_BASE_VI() \
+ (((uint64_t)(0x1UL) << 61) + 0x0)
++
+ #define MAKE_LDS_APP_LIMIT(base) \
+ (((uint64_t)(base) & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF)
+
+@@ -312,7 +313,17 @@
+ #define SVM_CWSR_BASE (SVM_USER_BASE - KFD_CWSR_TBA_TMA_SIZE)
+ #define SVM_IB_BASE (SVM_CWSR_BASE - PAGE_SIZE)
+
+-static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
++int kfd_set_process_dgpu_aperture(struct kfd_process_device *pdd,
++ uint64_t base, uint64_t limit)
++{
++ if (base < SVM_USER_BASE) {
++ pr_err("Set dgpu vm base 0x%llx failed.\n", base);
++ return -EINVAL;
++ }
++ return 0;
++}
++
++void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
+ {
+ /*
+ * node id couldn't be 0 - the three MSB bits of
+@@ -321,42 +332,19 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
+ pdd->lds_base = MAKE_LDS_APP_BASE_VI();
+ pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
+
+- if (!pdd->dev->device_info->needs_iommu_device) {
+- /* dGPUs: SVM aperture starting at 0
+- * with small reserved space for kernel.
+- * Set them to CANONICAL addresses.
+- */
+- pdd->gpuvm_base = SVM_USER_BASE;
+- pdd->gpuvm_limit =
+- pdd->dev->shared_resources.gpuvm_size - 1;
+- } else {
+- /* set them to non CANONICAL addresses, and no SVM is
+- * allocated.
+- */
+- pdd->gpuvm_base = MAKE_GPUVM_APP_BASE_VI(id + 1);
+- pdd->gpuvm_limit = MAKE_GPUVM_APP_LIMIT(pdd->gpuvm_base,
+- pdd->dev->shared_resources.gpuvm_size);
+- }
++ pdd->gpuvm_base = MAKE_GPUVM_APP_BASE_VI(id + 1);
++ pdd->gpuvm_limit = MAKE_GPUVM_APP_LIMIT(
++ pdd->gpuvm_base, pdd->dev->shared_resources.gpuvm_size);
+
+ pdd->scratch_base = MAKE_SCRATCH_APP_BASE_VI();
+ pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
+ }
+
+-static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id)
++void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id)
+ {
+ pdd->lds_base = MAKE_LDS_APP_BASE_V9();
+ pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
+
+- /* Raven needs SVM to support graphic handle, etc. Leave the small
+- * reserved space before SVM on Raven as well, even though we don't
+- * have to.
+- * Set gpuvm_base and gpuvm_limit to CANONICAL addresses so that they
+- * are used in Thunk to reserve SVM.
+- */
+- pdd->gpuvm_base = SVM_USER_BASE;
+- pdd->gpuvm_limit =
+- pdd->dev->shared_resources.gpuvm_size - 1;
+-
+ pdd->scratch_base = MAKE_SCRATCH_APP_BASE_V9();
+ pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
+ }
+@@ -377,10 +365,10 @@ int kfd_init_apertures(struct kfd_process *process)
+ pdd = kfd_create_process_device_data(dev, process);
+ if (!pdd) {
+ pr_err("Failed to create process device data\n");
+- return -ENOMEM;
++ return -1;
+ }
+ /*
+- * For 64 bit process apertures will be statically reserved in
++ * For 64 bit process aperture will be statically reserved in
+ * the x86_64 non canonical process address space
+ * amdkfd doesn't currently support apertures for 32 bit process
+ */
+@@ -400,20 +388,21 @@ int kfd_init_apertures(struct kfd_process *process)
+ kfd_init_apertures_vi(pdd, id);
+ break;
+ case CHIP_VEGA10:
+- case CHIP_VEGA20:
+ case CHIP_RAVEN:
+ kfd_init_apertures_v9(pdd, id);
+ break;
+ default:
+- WARN(1, "Unexpected ASIC family %u",
+- dev->device_info->asic_family);
+- return -EINVAL;
++ pr_err("Unknown chip in kfd_init_apertures\n");
++ return -1;
+ }
+
+ if (!dev->device_info->needs_iommu_device) {
+- /* dGPUs: the reserved space for kernel
+- * before SVM
++ /* dGPUs: SVM aperture starting at 0
++ * with small reserved space for kernel
+ */
++ pdd->gpuvm_base = SVM_USER_BASE;
++ pdd->gpuvm_limit =
++ dev->shared_resources.gpuvm_size - 1;
+ pdd->qpd.cwsr_base = SVM_CWSR_BASE;
+ pdd->qpd.ib_base = SVM_IB_BASE;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+index f836897..009d6f4 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2016-2018 Advanced Micro Devices, Inc.
++ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+@@ -25,43 +25,70 @@
+ #include "soc15_int.h"
+
+
++static uint32_t kfd_get_pasid_from_vmid(struct kfd_dev *dev, uint8_t vmid)
++{
++ uint32_t pasid = 0;
++ const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
++
++ if (f2g->get_atc_vmid_pasid_mapping_valid(dev->kgd, vmid))
++ pasid = f2g->get_atc_vmid_pasid_mapping_pasid(dev->kgd, vmid);
++
++ return pasid;
++}
++
+ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
+ const uint32_t *ih_ring_entry,
+ uint32_t *patched_ihre,
+ bool *patched_flag)
+ {
+ uint16_t source_id, client_id, pasid, vmid;
+- const uint32_t *data = ih_ring_entry;
++ bool result = false;
+
+- /* Only handle interrupts from KFD VMIDs */
++ source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
++ client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
++ pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
+ vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
+- if (vmid < dev->vm_info.first_vmid_kfd ||
+- vmid > dev->vm_info.last_vmid_kfd)
+- return 0;
+
+- /* If there is no valid PASID, it's likely a firmware bug */
+- pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
+- if (WARN_ONCE(pasid == 0, "FW bug: No PASID in KFD interrupt"))
+- return 0;
++ if (pasid) {
++ const uint32_t *data = ih_ring_entry;
+
+- source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
+- client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
++ pr_debug("client id 0x%x, source id %d, pasid 0x%x. raw data:\n",
++ client_id, source_id, pasid);
++ pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
++ data[0], data[1], data[2], data[3],
++ data[4], data[5], data[6], data[7]);
++ }
++
++ if ((vmid >= dev->vm_info.first_vmid_kfd &&
++ vmid <= dev->vm_info.last_vmid_kfd) &&
++ (source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
++ source_id == SOC15_INTSRC_SDMA_TRAP ||
++ source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
++ source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
++ client_id == SOC15_IH_CLIENTID_VMC ||
++ client_id == SOC15_IH_CLIENTID_UTCL2)) {
++
++ /*
++ * KFD want to handle this INT, but MEC firmware did
++ * not send pasid. Try to get it from vmid mapping
++ * and patch the ih entry. It's a temp workaround.
++ */
++ WARN_ONCE((!pasid), "Fix me.\n");
++ if (!pasid) {
++ uint32_t temp = le32_to_cpu(ih_ring_entry[3]);
++
++ pasid = kfd_get_pasid_from_vmid(dev, vmid);
++ memcpy(patched_ihre, ih_ring_entry,
++ dev->device_info->ih_ring_entry_size);
++ patched_ihre[3] = cpu_to_le32(temp | pasid);
++ *patched_flag = true;
++ }
++ result = pasid ? true : false;
++ }
++
++ /* Do not process in ISR, just request it to be forwarded to WQ. */
++ return result;
+
+- pr_debug("client id 0x%x, source id %d, pasid 0x%x. raw data:\n",
+- client_id, source_id, pasid);
+- pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
+- data[0], data[1], data[2], data[3],
+- data[4], data[5], data[6], data[7]);
+-
+- /* Interrupt types we care about: various signals and faults.
+- * They will be forwarded to a work queue (see below).
+- */
+- return source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
+- source_id == SOC15_INTSRC_SDMA_TRAP ||
+- source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
+- source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
+- client_id == SOC15_IH_CLIENTID_VMC ||
+- client_id == SOC15_IH_CLIENTID_UTCL2;
+ }
+
+ static void event_interrupt_wq_v9(struct kfd_dev *dev,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+index 7a61f38..5b798f9 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+@@ -75,8 +75,7 @@ int kfd_iommu_device_init(struct kfd_dev *kfd)
+ }
+
+ if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) {
+- dev_err(kfd_device,
+- "error required iommu flags ats %i, pri %i, pasid %i\n",
++ dev_err(kfd_device, "error required iommu flags ats %i, pri %i, pasid %i\n",
+ (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0,
+ (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0,
+ (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_ipc.c b/drivers/gpu/drm/amd/amdkfd/kfd_ipc.c
+index a53d954..97806ed 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_ipc.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_ipc.c
+@@ -140,7 +140,7 @@ static int kfd_import_dmabuf_create_kfd_bo(struct kfd_dev *dev,
+ goto err_unlock;
+
+ idr_handle = kfd_process_device_create_obj_handle(pdd, mem,
+- va_addr, size, 0, 0,
++ va_addr, size,
+ ipc_obj);
+ if (idr_handle < 0) {
+ r = -EFAULT;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+index e78445d..8cf9d44 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+@@ -59,7 +59,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+ switch (type) {
+ case KFD_QUEUE_TYPE_DIQ:
+ case KFD_QUEUE_TYPE_HIQ:
+- kq->mqd_mgr = dev->dqm->ops.get_mqd_manager(dev->dqm,
++ kq->mqd = dev->dqm->ops.get_mqd_manager(dev->dqm,
+ KFD_MQD_TYPE_HIQ);
+ break;
+ default:
+@@ -67,7 +67,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+ return false;
+ }
+
+- if (!kq->mqd_mgr)
++ if (!kq->mqd)
+ return false;
+
+ prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
+@@ -131,7 +131,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+ kq->queue->device = dev;
+ kq->queue->process = kfd_get_process(current);
+
+- retval = kq->mqd_mgr->init_mqd(kq->mqd_mgr, &kq->queue->mqd,
++ retval = kq->mqd->init_mqd(kq->mqd, &kq->queue->mqd,
+ &kq->queue->mqd_mem_obj,
+ &kq->queue->gart_mqd_addr,
+ &kq->queue->properties);
+@@ -143,9 +143,9 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+ pr_debug("Assigning hiq to hqd\n");
+ kq->queue->pipe = KFD_CIK_HIQ_PIPE;
+ kq->queue->queue = KFD_CIK_HIQ_QUEUE;
+- kq->mqd_mgr->load_mqd(kq->mqd_mgr, kq->queue->mqd,
+- kq->queue->pipe, kq->queue->queue,
+- &kq->queue->properties, NULL);
++ kq->mqd->load_mqd(kq->mqd, kq->queue->mqd, kq->queue->pipe,
++ kq->queue->queue, &kq->queue->properties,
++ NULL);
+ } else {
+ /* allocate fence for DIQ */
+
+@@ -183,7 +183,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+ static void uninitialize(struct kernel_queue *kq)
+ {
+ if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
+- kq->mqd_mgr->destroy_mqd(kq->mqd_mgr,
++ kq->mqd->destroy_mqd(kq->mqd,
+ kq->queue->mqd,
+ KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
+ KFD_UNMAP_LATENCY_MS,
+@@ -192,8 +192,7 @@ static void uninitialize(struct kernel_queue *kq)
+ else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ)
+ kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj);
+
+- kq->mqd_mgr->uninit_mqd(kq->mqd_mgr, kq->queue->mqd,
+- kq->queue->mqd_mem_obj);
++ kq->mqd->uninit_mqd(kq->mqd, kq->queue->mqd, kq->queue->mqd_mem_obj);
+
+ kfd_gtt_sa_free(kq->dev, kq->rptr_mem);
+ kfd_gtt_sa_free(kq->dev, kq->wptr_mem);
+@@ -316,13 +315,7 @@ static void submit_packet(struct kernel_queue *kq)
+
+ static void rollback_packet(struct kernel_queue *kq)
+ {
+- if (kq->dev->device_info->doorbell_size == 8) {
+- kq->pending_wptr64 = *kq->wptr64_kernel;
+- kq->pending_wptr = *kq->wptr_kernel %
+- (kq->queue->properties.queue_size / 4);
+- } else {
+- kq->pending_wptr = *kq->wptr_kernel;
+- }
++ kq->pending_wptr = *kq->queue->properties.write_ptr;
+ }
+
+ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
+@@ -356,7 +349,6 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
+ break;
+
+ case CHIP_VEGA10:
+- case CHIP_VEGA20:
+ case CHIP_RAVEN:
+ kernel_queue_init_v9(&kq->ops_asic_specific);
+ break;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
+index 384d7a3..82c94a6 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
+@@ -80,7 +80,7 @@ struct kernel_queue {
+
+ /* data */
+ struct kfd_dev *dev;
+- struct mqd_manager *mqd_mgr;
++ struct mqd_manager *mqd;
+ struct queue *queue;
+ uint64_t pending_wptr64;
+ uint32_t pending_wptr;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
+index 19e54ac..2808422 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
+@@ -22,6 +22,8 @@
+ */
+
+ #include "kfd_kernel_queue.h"
++#include "kfd_pm4_headers.h"
++#include "kfd_pm4_opcodes.h"
+
+ static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev,
+ enum kfd_queue_type type, unsigned int queue_size);
+@@ -51,3 +53,120 @@ static void submit_packet_cik(struct kernel_queue *kq)
+ write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
+ kq->pending_wptr);
+ }
++
++static int pm_map_process_cik(struct packet_manager *pm, uint32_t *buffer,
++ struct qcm_process_device *qpd)
++{
++ struct pm4_map_process *packet;
++
++ packet = (struct pm4_map_process *)buffer;
++
++ memset(buffer, 0, sizeof(struct pm4_map_process));
++
++ packet->header.u32all = pm_build_pm4_header(IT_MAP_PROCESS,
++ sizeof(struct pm4_map_process));
++ packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
++ packet->bitfields2.process_quantum = 1;
++ packet->bitfields2.pasid = qpd->pqm->process->pasid;
++ packet->bitfields3.page_table_base = qpd->page_table_base;
++ packet->bitfields10.gds_size = qpd->gds_size;
++ packet->bitfields10.num_gws = qpd->num_gws;
++ packet->bitfields10.num_oac = qpd->num_oac;
++ packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
++
++ packet->sh_mem_config = qpd->sh_mem_config;
++ packet->sh_mem_bases = qpd->sh_mem_bases;
++ packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
++ packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
++
++ packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
++ packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
++
++ return 0;
++}
++
++static int pm_map_process_scratch_cik(struct packet_manager *pm,
++ uint32_t *buffer, struct qcm_process_device *qpd)
++{
++ struct pm4_map_process_scratch_kv *packet;
++
++ packet = (struct pm4_map_process_scratch_kv *)buffer;
++
++ memset(buffer, 0, sizeof(struct pm4_map_process_scratch_kv));
++
++ packet->header.u32all = pm_build_pm4_header(IT_MAP_PROCESS,
++ sizeof(struct pm4_map_process_scratch_kv));
++ packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
++ packet->bitfields2.process_quantum = 1;
++ packet->bitfields2.pasid = qpd->pqm->process->pasid;
++ packet->bitfields3.page_table_base = qpd->page_table_base;
++ packet->bitfields14.gds_size = qpd->gds_size;
++ packet->bitfields14.num_gws = qpd->num_gws;
++ packet->bitfields14.num_oac = qpd->num_oac;
++ packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
++
++ packet->sh_mem_config = qpd->sh_mem_config;
++ packet->sh_mem_bases = qpd->sh_mem_bases;
++ packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
++ packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
++
++ packet->sh_hidden_private_base_vmid = qpd->sh_hidden_private_base;
++
++ packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
++ packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
++
++ return 0;
++}
++
++static uint32_t pm_get_map_process_packet_size_cik(void)
++{
++ return sizeof(struct pm4_map_process);
++}
++static uint32_t pm_get_map_process_scratch_packet_size_cik(void)
++{
++ return sizeof(struct pm4_map_process_scratch_kv);
++}
++
++
++static struct packet_manager_funcs kfd_cik_pm_funcs = {
++ .map_process = pm_map_process_cik,
++ .runlist = pm_runlist_vi,
++ .set_resources = pm_set_resources_vi,
++ .map_queues = pm_map_queues_vi,
++ .unmap_queues = pm_unmap_queues_vi,
++ .query_status = pm_query_status_vi,
++ .release_mem = pm_release_mem_vi,
++ .get_map_process_packet_size = pm_get_map_process_packet_size_cik,
++ .get_runlist_packet_size = pm_get_runlist_packet_size_vi,
++ .get_set_resources_packet_size = pm_get_set_resources_packet_size_vi,
++ .get_map_queues_packet_size = pm_get_map_queues_packet_size_vi,
++ .get_unmap_queues_packet_size = pm_get_unmap_queues_packet_size_vi,
++ .get_query_status_packet_size = pm_get_query_status_packet_size_vi,
++ .get_release_mem_packet_size = pm_get_release_mem_packet_size_vi,
++};
++
++static struct packet_manager_funcs kfd_cik_scratch_pm_funcs = {
++ .map_process = pm_map_process_scratch_cik,
++ .runlist = pm_runlist_vi,
++ .set_resources = pm_set_resources_vi,
++ .map_queues = pm_map_queues_vi,
++ .unmap_queues = pm_unmap_queues_vi,
++ .query_status = pm_query_status_vi,
++ .release_mem = pm_release_mem_vi,
++ .get_map_process_packet_size =
++ pm_get_map_process_scratch_packet_size_cik,
++ .get_runlist_packet_size = pm_get_runlist_packet_size_vi,
++ .get_set_resources_packet_size = pm_get_set_resources_packet_size_vi,
++ .get_map_queues_packet_size = pm_get_map_queues_packet_size_vi,
++ .get_unmap_queues_packet_size = pm_get_unmap_queues_packet_size_vi,
++ .get_query_status_packet_size = pm_get_query_status_packet_size_vi,
++ .get_release_mem_packet_size = pm_get_release_mem_packet_size_vi,
++};
++
++void kfd_pm_func_init_cik(struct packet_manager *pm, uint16_t fw_ver)
++{
++ if (fw_ver >= KFD_SCRATCH_KV_FW_VER)
++ pm->pmf = &kfd_cik_scratch_pm_funcs;
++ else
++ pm->pmf = &kfd_cik_pm_funcs;
++}
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+index 33830b1..5fe4f60 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2016-2018 Advanced Micro Devices, Inc.
++ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+@@ -44,7 +44,7 @@ static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev,
+ int retval;
+
+ retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
+- if (retval)
++ if (retval != 0)
+ return false;
+
+ kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
+@@ -71,7 +71,8 @@ static int pm_map_process_v9(struct packet_manager *pm,
+ uint32_t *buffer, struct qcm_process_device *qpd)
+ {
+ struct pm4_mes_map_process *packet;
+- uint64_t vm_page_table_base_addr = qpd->page_table_base;
++ uint64_t vm_page_table_base_addr =
++ (uint64_t)(qpd->page_table_base) << 12;
+
+ packet = (struct pm4_mes_map_process *)buffer;
+ memset(buffer, 0, sizeof(struct pm4_mes_map_process));
+@@ -125,6 +126,7 @@ static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer,
+ concurrent_proc_cnt = min(pm->dqm->processes_count,
+ kfd->max_proc_per_quantum);
+
++
+ packet = (struct pm4_mes_runlist *)buffer;
+
+ memset(buffer, 0, sizeof(struct pm4_mes_runlist));
+@@ -293,7 +295,7 @@ static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
+ }
+
+
+-static int pm_release_mem_v9(uint64_t gpu_addr, uint32_t *buffer)
++static uint32_t pm_release_mem_v9(uint64_t gpu_addr, uint32_t *buffer)
+ {
+ struct pm4_mec_release_mem *packet;
+
+@@ -318,22 +320,58 @@ static int pm_release_mem_v9(uint64_t gpu_addr, uint32_t *buffer)
+
+ packet->data_lo = 0;
+
+- return 0;
++ return sizeof(struct pm4_mec_release_mem) / sizeof(unsigned int);
++}
++
++static uint32_t pm_get_map_process_packet_size_v9(void)
++{
++ return sizeof(struct pm4_mes_map_process);
++}
++
++static uint32_t pm_get_runlist_packet_size_v9(void)
++{
++ return sizeof(struct pm4_mes_runlist);
++}
++
++static uint32_t pm_get_map_queues_packet_size_v9(void)
++{
++ return sizeof(struct pm4_mes_map_queues);
++}
++
++static uint32_t pm_get_unmap_queues_packet_size_v9(void)
++{
++ return sizeof(struct pm4_mes_unmap_queues);
++}
++
++static uint32_t pm_get_query_status_packet_size_v9(void)
++{
++ return sizeof(struct pm4_mes_query_status);
++}
++
++static uint32_t pm_get_release_mem_packet_size_v9(void)
++{
++ return sizeof(struct pm4_mec_release_mem);
+ }
+
+-const struct packet_manager_funcs kfd_v9_pm_funcs = {
+- .map_process = pm_map_process_v9,
+- .runlist = pm_runlist_v9,
+- .set_resources = pm_set_resources_vi,
+- .map_queues = pm_map_queues_v9,
+- .unmap_queues = pm_unmap_queues_v9,
+- .query_status = pm_query_status_v9,
+- .release_mem = pm_release_mem_v9,
+- .map_process_size = sizeof(struct pm4_mes_map_process),
+- .runlist_size = sizeof(struct pm4_mes_runlist),
+- .set_resources_size = sizeof(struct pm4_mes_set_resources),
+- .map_queues_size = sizeof(struct pm4_mes_map_queues),
+- .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
+- .query_status_size = sizeof(struct pm4_mes_query_status),
+- .release_mem_size = sizeof(struct pm4_mec_release_mem)
++static struct packet_manager_funcs kfd_v9_pm_funcs = {
++ .map_process = pm_map_process_v9,
++ .runlist = pm_runlist_v9,
++ .set_resources = pm_set_resources_vi,
++ .map_queues = pm_map_queues_v9,
++ .unmap_queues = pm_unmap_queues_v9,
++ .query_status = pm_query_status_v9,
++ .release_mem = pm_release_mem_v9,
++ .get_map_process_packet_size = pm_get_map_process_packet_size_v9,
++ .get_runlist_packet_size = pm_get_runlist_packet_size_v9,
++ .get_set_resources_packet_size = pm_get_set_resources_packet_size_vi,
++ .get_map_queues_packet_size = pm_get_map_queues_packet_size_v9,
++ .get_unmap_queues_packet_size = pm_get_unmap_queues_packet_size_v9,
++ .get_query_status_packet_size = pm_get_query_status_packet_size_v9,
++ .get_release_mem_packet_size = pm_get_release_mem_packet_size_v9,
+ };
++
++void kfd_pm_func_init_v9(struct packet_manager *pm, uint16_t fw_ver)
++{
++ pm->pmf = &kfd_v9_pm_funcs;
++}
++
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+index bf20c6d..9022ecb 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+@@ -67,25 +67,12 @@ static void submit_packet_vi(struct kernel_queue *kq)
+ kq->pending_wptr);
+ }
+
+-unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size)
+-{
+- union PM4_MES_TYPE_3_HEADER header;
+-
+- header.u32All = 0;
+- header.opcode = opcode;
+- header.count = packet_size / 4 - 2;
+- header.type = PM4_TYPE_3;
+-
+- return header.u32All;
+-}
+-
+-static int pm_map_process_vi(struct packet_manager *pm, uint32_t *buffer,
+- struct qcm_process_device *qpd)
++static int pm_map_process_vi(struct packet_manager *pm,
++ uint32_t *buffer, struct qcm_process_device *qpd)
+ {
+ struct pm4_mes_map_process *packet;
+
+ packet = (struct pm4_mes_map_process *)buffer;
+-
+ memset(buffer, 0, sizeof(struct pm4_mes_map_process));
+
+ packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
+@@ -112,16 +99,27 @@ static int pm_map_process_vi(struct packet_manager *pm, uint32_t *buffer,
+ return 0;
+ }
+
+-static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
++
++unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size)
++{
++ union PM4_MES_TYPE_3_HEADER header;
++
++ header.u32All = 0;
++ header.opcode = opcode;
++ header.count = packet_size / 4 - 2;
++ header.type = PM4_TYPE_3;
++
++ return header.u32All;
++}
++
++int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
+ uint64_t ib, size_t ib_size_in_dwords, bool chain)
+ {
+ struct pm4_mes_runlist *packet;
++
+ int concurrent_proc_cnt = 0;
+ struct kfd_dev *kfd = pm->dqm->dev;
+
+- if (WARN_ON(!ib))
+- return -EFAULT;
+-
+ /* Determine the number of processes to map together to HW:
+ * it can not exceed the number of VMIDs available to the
+ * scheduler, and it is determined by the smaller of the number
+@@ -134,6 +132,7 @@ static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
+ concurrent_proc_cnt = min(pm->dqm->processes_count,
+ kfd->max_proc_per_quantum);
+
++
+ packet = (struct pm4_mes_runlist *)buffer;
+
+ memset(buffer, 0, sizeof(struct pm4_mes_runlist));
+@@ -151,35 +150,7 @@ static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
+ return 0;
+ }
+
+-int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
+- struct scheduling_resources *res)
+-{
+- struct pm4_mes_set_resources *packet;
+-
+- packet = (struct pm4_mes_set_resources *)buffer;
+- memset(buffer, 0, sizeof(struct pm4_mes_set_resources));
+-
+- packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES,
+- sizeof(struct pm4_mes_set_resources));
+-
+- packet->bitfields2.queue_type =
+- queue_type__mes_set_resources__hsa_interface_queue_hiq;
+- packet->bitfields2.vmid_mask = res->vmid_mask;
+- packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
+- packet->bitfields7.oac_mask = res->oac_mask;
+- packet->bitfields8.gds_heap_base = res->gds_heap_base;
+- packet->bitfields8.gds_heap_size = res->gds_heap_size;
+-
+- packet->gws_mask_lo = lower_32_bits(res->gws_mask);
+- packet->gws_mask_hi = upper_32_bits(res->gws_mask);
+-
+- packet->queue_mask_lo = lower_32_bits(res->queue_mask);
+- packet->queue_mask_hi = upper_32_bits(res->queue_mask);
+-
+- return 0;
+-}
+-
+-static int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer,
++int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+ struct queue *q, bool is_static)
+ {
+ struct pm4_mes_map_queues *packet;
+@@ -238,7 +209,35 @@ static int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+ return 0;
+ }
+
+-static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
++int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
++ struct scheduling_resources *res)
++{
++ struct pm4_mes_set_resources *packet;
++
++ packet = (struct pm4_mes_set_resources *)buffer;
++ memset(buffer, 0, sizeof(struct pm4_mes_set_resources));
++
++ packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES,
++ sizeof(struct pm4_mes_set_resources));
++
++ packet->bitfields2.queue_type =
++ queue_type__mes_set_resources__hsa_interface_queue_hiq;
++ packet->bitfields2.vmid_mask = res->vmid_mask;
++ packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
++ packet->bitfields7.oac_mask = res->oac_mask;
++ packet->bitfields8.gds_heap_base = res->gds_heap_base;
++ packet->bitfields8.gds_heap_size = res->gds_heap_size;
++
++ packet->gws_mask_lo = lower_32_bits(res->gws_mask);
++ packet->gws_mask_hi = upper_32_bits(res->gws_mask);
++
++ packet->queue_mask_lo = lower_32_bits(res->queue_mask);
++ packet->queue_mask_hi = upper_32_bits(res->queue_mask);
++
++ return 0;
++}
++
++int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+ enum kfd_queue_type type,
+ enum kfd_unmap_queues_filter filter,
+ uint32_t filter_param, bool reset,
+@@ -303,7 +302,7 @@ static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+
+ }
+
+-static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
++int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
+ uint64_t fence_address, uint32_t fence_value)
+ {
+ struct pm4_mes_query_status *packet;
+@@ -311,6 +310,7 @@ static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
+ packet = (struct pm4_mes_query_status *)buffer;
+ memset(buffer, 0, sizeof(struct pm4_mes_query_status));
+
++
+ packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS,
+ sizeof(struct pm4_mes_query_status));
+
+@@ -328,15 +328,16 @@ static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
+ return 0;
+ }
+
+-static int pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
++
++uint32_t pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
+ {
+ struct pm4_mec_release_mem *packet;
+
+ packet = (struct pm4_mec_release_mem *)buffer;
+- memset(buffer, 0, sizeof(*packet));
++ memset(buffer, 0, sizeof(struct pm4_mec_release_mem));
+
+ packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM,
+- sizeof(*packet));
++ sizeof(struct pm4_mec_release_mem));
+
+ packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
+ packet->bitfields2.event_index = event_index___release_mem__end_of_pipe;
+@@ -354,22 +355,63 @@ static int pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
+
+ packet->data_lo = 0;
+
+- return 0;
++ return sizeof(struct pm4_mec_release_mem) / sizeof(unsigned int);
++}
++
++uint32_t pm_get_map_process_packet_size_vi(void)
++{
++ return sizeof(struct pm4_mes_map_process);
++}
++
++uint32_t pm_get_runlist_packet_size_vi(void)
++{
++ return sizeof(struct pm4_mes_runlist);
++}
++
++uint32_t pm_get_set_resources_packet_size_vi(void)
++{
++ return sizeof(struct pm4_mes_set_resources);
++}
++
++uint32_t pm_get_map_queues_packet_size_vi(void)
++{
++ return sizeof(struct pm4_mes_map_queues);
++}
++
++uint32_t pm_get_unmap_queues_packet_size_vi(void)
++{
++ return sizeof(struct pm4_mes_unmap_queues);
++}
++
++uint32_t pm_get_query_status_packet_size_vi(void)
++{
++ return sizeof(struct pm4_mes_query_status);
+ }
+
+-const struct packet_manager_funcs kfd_vi_pm_funcs = {
+- .map_process = pm_map_process_vi,
+- .runlist = pm_runlist_vi,
+- .set_resources = pm_set_resources_vi,
+- .map_queues = pm_map_queues_vi,
+- .unmap_queues = pm_unmap_queues_vi,
+- .query_status = pm_query_status_vi,
+- .release_mem = pm_release_mem_vi,
+- .map_process_size = sizeof(struct pm4_mes_map_process),
+- .runlist_size = sizeof(struct pm4_mes_runlist),
+- .set_resources_size = sizeof(struct pm4_mes_set_resources),
+- .map_queues_size = sizeof(struct pm4_mes_map_queues),
+- .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
+- .query_status_size = sizeof(struct pm4_mes_query_status),
+- .release_mem_size = sizeof(struct pm4_mec_release_mem)
++uint32_t pm_get_release_mem_packet_size_vi(void)
++{
++ return sizeof(struct pm4_mec_release_mem);
++}
++
++
++static struct packet_manager_funcs kfd_vi_pm_funcs = {
++ .map_process = pm_map_process_vi,
++ .runlist = pm_runlist_vi,
++ .set_resources = pm_set_resources_vi,
++ .map_queues = pm_map_queues_vi,
++ .unmap_queues = pm_unmap_queues_vi,
++ .query_status = pm_query_status_vi,
++ .release_mem = pm_release_mem_vi,
++ .get_map_process_packet_size = pm_get_map_process_packet_size_vi,
++ .get_runlist_packet_size = pm_get_runlist_packet_size_vi,
++ .get_set_resources_packet_size = pm_get_set_resources_packet_size_vi,
++ .get_map_queues_packet_size = pm_get_map_queues_packet_size_vi,
++ .get_unmap_queues_packet_size = pm_get_unmap_queues_packet_size_vi,
++ .get_query_status_packet_size = pm_get_query_status_packet_size_vi,
++ .get_release_mem_packet_size = pm_get_release_mem_packet_size_vi,
+ };
++
++void kfd_pm_func_init_vi(struct packet_manager *pm, uint16_t fw_ver)
++{
++ pm->pmf = &kfd_vi_pm_funcs;
++}
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+index 261657f..34d44ff 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+@@ -63,7 +63,7 @@ MODULE_PARM_DESC(hws_max_conc_proc,
+
+ int cwsr_enable = 1;
+ module_param(cwsr_enable, int, 0444);
+-MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = off, 1 = on (default))");
++MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = Off, 1 = On (Default))");
+
+ int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
+ module_param(max_num_of_queues_per_device, int, 0444);
+@@ -75,6 +75,8 @@ module_param(send_sigterm, int, 0444);
+ MODULE_PARM_DESC(send_sigterm,
+ "Send sigterm to HSA process on unhandled exception (0 = disable, 1 = enable)");
+
++static int amdkfd_init_completed;
++
+ int debug_largebar;
+ module_param(debug_largebar, int, 0444);
+ MODULE_PARM_DESC(debug_largebar,
+@@ -85,23 +87,16 @@ module_param(ignore_crat, int, 0444);
+ MODULE_PARM_DESC(ignore_crat,
+ "Ignore CRAT table during KFD initialization (0 = use CRAT (default), 1 = ignore CRAT)");
+
+-int noretry = 1;
+-module_param(noretry, int, 0644);
++int vega10_noretry = 1;
++module_param_named(noretry, vega10_noretry, int, 0644);
+ MODULE_PARM_DESC(noretry,
+- "Set sh_mem_config.retry_disable on GFXv9+ dGPUs (0 = retry enabled, 1 = retry disabled (default))");
++ "Set sh_mem_config.retry_disable on Vega10 (0 = retry enabled, 1 = retry disabled (default))");
+
+ int priv_cp_queues;
+ module_param(priv_cp_queues, int, 0644);
+ MODULE_PARM_DESC(priv_cp_queues,
+ "Enable privileged mode for CP queues (0 = off (default), 1 = on)");
+
+-int halt_if_hws_hang;
+-module_param(halt_if_hws_hang, int, 0644);
+-MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
+-
+-
+-static int amdkfd_init_completed;
+-
+ int kgd2kfd_init(unsigned int interface_version,
+ const struct kgd2kfd_calls **g2f)
+ {
+@@ -154,7 +149,7 @@ static int __init kfd_module_init(void)
+
+ err = kfd_ipc_init();
+ if (err < 0)
+- goto err_ipc;
++ goto err_topology;
+
+ err = kfd_process_create_wq();
+ if (err < 0)
+@@ -171,8 +166,6 @@ static int __init kfd_module_init(void)
+ return 0;
+
+ err_create_wq:
+-err_ipc:
+- kfd_topology_shutdown();
+ err_topology:
+ kfd_chardev_exit();
+ err_ioctl:
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+index d39e81c..8279b74 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+@@ -81,7 +81,6 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
+ case CHIP_POLARIS11:
+ return mqd_manager_init_vi_tonga(type, dev);
+ case CHIP_VEGA10:
+- case CHIP_VEGA20:
+ case CHIP_RAVEN:
+ return mqd_manager_init_v9(type, dev);
+ default:
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+index 336ea9c..dcaeda8 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+@@ -94,8 +94,6 @@ struct mqd_manager {
+ u32 *ctl_stack_used_size,
+ u32 *save_area_used_size);
+
+- bool (*check_queue_active)(struct queue *q);
+-
+ #if defined(CONFIG_DEBUG_FS)
+ int (*debugfs_show_mqd)(struct seq_file *m, void *data);
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+index 2441834..bd44a23 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+@@ -42,31 +42,6 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
+ return (struct cik_sdma_rlc_registers *)mqd;
+ }
+
+-static bool check_sdma_queue_active(struct queue *q)
+-{
+- uint32_t rptr, wptr;
+- struct cik_sdma_rlc_registers *m = get_sdma_mqd(q->mqd);
+-
+- rptr = m->sdma_rlc_rb_rptr;
+- wptr = m->sdma_rlc_rb_wptr;
+- pr_debug("rptr=%d, wptr=%d\n", rptr, wptr);
+-
+- return (rptr != wptr);
+-}
+-
+-static bool check_queue_active(struct queue *q)
+-{
+- uint32_t rptr, wptr;
+- struct cik_mqd *m = get_mqd(q->mqd);
+-
+- rptr = m->cp_hqd_pq_rptr;
+- wptr = m->cp_hqd_pq_wptr;
+-
+- pr_debug("rptr=%d, wptr=%d\n", rptr, wptr);
+-
+- return (rptr != wptr);
+-}
+-
+ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+ {
+@@ -516,7 +491,6 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
+ mqd->update_mqd = update_mqd;
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
+- mqd->check_queue_active = check_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+ #endif
+@@ -528,7 +502,6 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
+ mqd->update_mqd = update_mqd_hiq;
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
+- mqd->check_queue_active = check_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+ #endif
+@@ -540,7 +513,6 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
+ mqd->update_mqd = update_mqd_sdma;
+ mqd->destroy_mqd = destroy_mqd_sdma;
+ mqd->is_occupied = is_occupied_sdma;
+- mqd->check_queue_active = check_sdma_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+index dcd24c4..f4e8efc 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2016-2018 Advanced Micro Devices, Inc.
++ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+@@ -41,49 +41,6 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
+ return (struct v9_sdma_mqd *)mqd;
+ }
+
+-static bool check_sdma_queue_active(struct queue *q)
+-{
+- uint32_t rptr, wptr;
+- uint32_t rptr_hi, wptr_hi;
+- struct v9_sdma_mqd *m = get_sdma_mqd(q->mqd);
+-
+- rptr = m->sdmax_rlcx_rb_rptr;
+- wptr = m->sdmax_rlcx_rb_wptr;
+- rptr_hi = m->sdmax_rlcx_rb_rptr_hi;
+- wptr_hi = m->sdmax_rlcx_rb_wptr_hi;
+- pr_debug("rptr=%d, wptr=%d\n", rptr, wptr);
+- pr_debug("rptr_hi=%d, wptr_hi=%d\n", rptr_hi, wptr_hi);
+-
+- return (rptr != wptr || rptr_hi != wptr_hi);
+-}
+-
+-static bool check_queue_active(struct queue *q)
+-{
+- uint32_t rptr, wptr;
+- uint32_t cntl_stack_offset, cntl_stack_size;
+- struct v9_mqd *m = get_mqd(q->mqd);
+-
+- rptr = m->cp_hqd_pq_rptr;
+- wptr = m->cp_hqd_pq_wptr_lo % q->properties.queue_size;
+- cntl_stack_offset = m->cp_hqd_cntl_stack_offset;
+- cntl_stack_size = m->cp_hqd_cntl_stack_size;
+-
+- pr_debug("rptr=%d, wptr=%d\n", rptr, wptr);
+- pr_debug("m->cp_hqd_cntl_stack_offset=0x%08x\n", cntl_stack_offset);
+- pr_debug("m->cp_hqd_cntl_stack_size=0x%08x\n", cntl_stack_size);
+-
+- if ((rptr == 0 && wptr == 0) ||
+- cntl_stack_offset == 0xffffffff ||
+- cntl_stack_size > 0x5000)
+- return false;
+-
+- /* Process is idle if both conditions are meet:
+- * queue's rptr equals to wptr
+- * control stack is empty, cntl_stack_offset = cntl_stack_size
+- */
+- return (rptr != wptr || cntl_stack_offset != cntl_stack_size);
+-}
+-
+ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+ {
+@@ -158,7 +115,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
+ ALIGN(sizeof(struct v9_mqd), PAGE_SIZE),
+ &((*mqd_mem_obj)->gtt_mem),
+ &((*mqd_mem_obj)->gpu_addr),
+- (void *)&((*mqd_mem_obj)->cpu_ptr), true);
++ (void *)&((*mqd_mem_obj)->cpu_ptr));
+ } else
+ retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd),
+ mqd_mem_obj);
+@@ -202,7 +159,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
+ (1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT);
+ }
+
+- if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) {
++ if (mm->dev->cwsr_enabled) {
+ m->cp_hqd_persistent_state |=
+ (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
+ m->cp_hqd_ctx_save_base_addr_lo =
+@@ -260,9 +217,8 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
+ pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
+ m->cp_hqd_pq_doorbell_control);
+
+- m->cp_hqd_ib_control =
+- 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT |
+- 1 << CP_HQD_IB_CONTROL__IB_EXE_DISABLE__SHIFT;
++ m->cp_hqd_ib_control = 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT |
++ 1 << CP_HQD_IB_CONTROL__IB_EXE_DISABLE__SHIFT;
+
+ /*
+ * HW does not clamp this field correctly. Maximum EOP queue size
+@@ -287,13 +243,13 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
+ 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT |
+ 1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT |
+ 1 << CP_HQD_PQ_CONTROL__WPP_CLAMP_EN__SHIFT;
+- m->cp_hqd_pq_doorbell_control |= 1 <<
+- CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
++ m->cp_hqd_pq_doorbell_control |=
++ 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
+ }
+ if (priv_cp_queues)
+ m->cp_hqd_pq_control |=
+ 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT;
+- if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address)
++ if (mm->dev->cwsr_enabled)
+ m->cp_hqd_ctx_save_control = 0;
+
+ update_cu_mask(mm, mqd, q);
+@@ -532,7 +488,6 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
+ mqd->get_wave_state = get_wave_state;
+- mqd->check_queue_active = check_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+ #endif
+@@ -544,7 +499,6 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
+ mqd->update_mqd = update_mqd_hiq;
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
+- mqd->check_queue_active = check_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+ #endif
+@@ -556,7 +510,6 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
+ mqd->update_mqd = update_mqd_sdma;
+ mqd->destroy_mqd = destroy_mqd_sdma;
+ mqd->is_occupied = is_occupied_sdma;
+- mqd->check_queue_active = check_sdma_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+index 246fe6c..eff7580 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+@@ -44,45 +44,6 @@ static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
+ return (struct vi_sdma_mqd *)mqd;
+ }
+
+-static bool check_sdma_queue_active(struct queue *q)
+-{
+- uint32_t rptr, wptr;
+- struct vi_sdma_mqd *m = get_sdma_mqd(q->mqd);
+-
+- rptr = m->sdmax_rlcx_rb_rptr;
+- wptr = m->sdmax_rlcx_rb_wptr;
+- pr_debug("rptr=%d, wptr=%d\n", rptr, wptr);
+-
+- return (rptr != wptr);
+-}
+-
+-static bool check_queue_active(struct queue *q)
+-{
+- uint32_t rptr, wptr;
+- uint32_t cntl_stack_offset, cntl_stack_size;
+- struct vi_mqd *m = get_mqd(q->mqd);
+-
+- rptr = m->cp_hqd_pq_rptr;
+- wptr = m->cp_hqd_pq_wptr;
+- cntl_stack_offset = m->cp_hqd_cntl_stack_offset;
+- cntl_stack_size = m->cp_hqd_cntl_stack_size;
+-
+- pr_debug("rptr=%d, wptr=%d\n", rptr, wptr);
+- pr_debug("m->cp_hqd_cntl_stack_offset=0x%08x\n", cntl_stack_offset);
+- pr_debug("m->cp_hqd_cntl_stack_size=0x%08x\n", cntl_stack_size);
+-
+- if ((rptr == 0 && wptr == 0) ||
+- cntl_stack_offset == 0xffffffff ||
+- cntl_stack_size > 0x5000)
+- return false;
+-
+- /* Process is idle if both conditions are meet:
+- * queue's rptr equals to wptr
+- * control stack is empty, cntl_stack_offset = cntl_stack_size
+- */
+- return (rptr != wptr || cntl_stack_offset != cntl_stack_size);
+-}
+-
+ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+ {
+@@ -198,7 +159,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
+ (1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT);
+ }
+
+- if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) {
++ if (mm->dev->cwsr_enabled) {
+ m->cp_hqd_persistent_state |=
+ (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
+ m->cp_hqd_ctx_save_base_addr_lo =
+@@ -293,7 +254,7 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
+ if (priv_cp_queues)
+ m->cp_hqd_pq_control |=
+ 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT;
+- if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address)
++ if (mm->dev->cwsr_enabled)
+ m->cp_hqd_ctx_save_control =
+ atc_bit << CP_HQD_CTX_SAVE_CONTROL__ATC__SHIFT |
+ mtype << CP_HQD_CTX_SAVE_CONTROL__MTYPE__SHIFT;
+@@ -537,7 +498,6 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
+ mqd->get_wave_state = get_wave_state;
+- mqd->check_queue_active = check_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+ #endif
+@@ -549,7 +509,6 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
+ mqd->update_mqd = update_mqd_hiq;
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
+- mqd->check_queue_active = check_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+ #endif
+@@ -561,7 +520,6 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
+ mqd->update_mqd = update_mqd_sdma;
+ mqd->destroy_mqd = destroy_mqd_sdma;
+ mqd->is_occupied = is_occupied_sdma;
+- mqd->check_queue_active = check_sdma_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
+ #endif
+@@ -586,3 +544,4 @@ struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type,
+ mqd->update_mqd = update_mqd_tonga;
+ return mqd;
+ }
++
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+index c6080ed3..98c89d2 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+@@ -26,6 +26,7 @@
+ #include "kfd_device_queue_manager.h"
+ #include "kfd_kernel_queue.h"
+ #include "kfd_priv.h"
++#include "kfd_pm4_opcodes.h"
+
+ static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
+ unsigned int buffer_size_bytes)
+@@ -44,7 +45,8 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
+ unsigned int process_count, queue_count, compute_queue_count;
+ unsigned int map_queue_size;
+ unsigned int max_proc_per_quantum = 1;
+- struct kfd_dev *dev = pm->dqm->dev;
++
++ struct kfd_dev *dev = pm->dqm->dev;
+
+ process_count = pm->dqm->processes_count;
+ queue_count = pm->dqm->queue_count;
+@@ -55,20 +57,21 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
+ * hws_max_conc_proc has been done in
+ * kgd2kfd_device_init().
+ */
++
+ *over_subscription = false;
+
+ if (dev->max_proc_per_quantum > 1)
+ max_proc_per_quantum = dev->max_proc_per_quantum;
+
+ if ((process_count > max_proc_per_quantum) ||
+- compute_queue_count > get_queues_num(pm->dqm)) {
++ compute_queue_count > get_queues_num(pm->dqm)) {
+ *over_subscription = true;
+ pr_debug("Over subscribed runlist\n");
+ }
+
+- map_queue_size = pm->pmf->map_queues_size;
++ map_queue_size = pm->pmf->get_map_queues_packet_size();
+ /* calculate run list ib allocation size */
+- *rlib_size = process_count * pm->pmf->map_process_size +
++ *rlib_size = process_count * pm->pmf->get_map_process_packet_size() +
+ queue_count * map_queue_size;
+
+ /*
+@@ -76,7 +79,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
+ * when over subscription
+ */
+ if (*over_subscription)
+- *rlib_size += pm->pmf->runlist_size;
++ *rlib_size += pm->pmf->get_runlist_packet_size();
+
+ pr_debug("runlist ib size %d\n", *rlib_size);
+ }
+@@ -157,7 +160,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
+ return retval;
+
+ proccesses_mapped++;
+- inc_wptr(&rl_wptr, pm->pmf->map_process_size,
++ inc_wptr(&rl_wptr, pm->pmf->get_map_process_packet_size(),
+ alloc_size_bytes);
+
+ list_for_each_entry(kq, &qpd->priv_queue_list, list) {
+@@ -175,7 +178,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
+ return retval;
+
+ inc_wptr(&rl_wptr,
+- pm->pmf->map_queues_size,
++ pm->pmf->get_map_queues_packet_size(),
+ alloc_size_bytes);
+ }
+
+@@ -190,12 +193,11 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
+ &rl_buffer[rl_wptr],
+ q,
+ qpd->is_debug);
+-
+ if (retval)
+ return retval;
+
+ inc_wptr(&rl_wptr,
+- pm->pmf->map_queues_size,
++ pm->pmf->get_map_queues_packet_size(),
+ alloc_size_bytes);
+ }
+ }
+@@ -215,38 +217,37 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
+ return retval;
+ }
+
+-int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
++int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm,
++ uint16_t fw_ver)
+ {
+- switch (dqm->dev->device_info->asic_family) {
++ pm->dqm = dqm;
++ mutex_init(&pm->lock);
++ pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
++ if (!pm->priv_queue) {
++ mutex_destroy(&pm->lock);
++ return -ENOMEM;
++ }
++ pm->allocated = false;
++
++ switch (pm->dqm->dev->device_info->asic_family) {
+ case CHIP_KAVERI:
+ case CHIP_HAWAII:
+- /* PM4 packet structures on CIK are the same as on VI */
++ kfd_pm_func_init_cik(pm, fw_ver);
++ break;
+ case CHIP_CARRIZO:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+- pm->pmf = &kfd_vi_pm_funcs;
++ kfd_pm_func_init_vi(pm, fw_ver);
+ break;
+ case CHIP_VEGA10:
+- case CHIP_VEGA20:
+ case CHIP_RAVEN:
+- pm->pmf = &kfd_v9_pm_funcs;
++ kfd_pm_func_init_v9(pm, fw_ver);
+ break;
+ default:
+- WARN(1, "Unexpected ASIC family %u",
+- dqm->dev->device_info->asic_family);
+- return -EINVAL;
+- }
+-
+- pm->dqm = dqm;
+- mutex_init(&pm->lock);
+- pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
+- if (!pm->priv_queue) {
+- mutex_destroy(&pm->lock);
+- return -ENOMEM;
++ BUG();
+ }
+- pm->allocated = false;
+
+ return 0;
+ }
+@@ -263,7 +264,7 @@ int pm_send_set_resources(struct packet_manager *pm,
+ uint32_t *buffer, size;
+ int retval = 0;
+
+- size = pm->pmf->set_resources_size;
++ size = pm->pmf->get_set_resources_packet_size();
+ mutex_lock(&pm->lock);
+ pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ size / sizeof(uint32_t),
+@@ -300,7 +301,8 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
+
+ pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
+
+- packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
++ packet_size_dwords = pm->pmf->get_runlist_packet_size() /
++ sizeof(uint32_t);
+ mutex_lock(&pm->lock);
+
+ retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+@@ -309,7 +311,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
+ goto fail_acquire_packet_buffer;
+
+ retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr,
+- rl_ib_size / sizeof(uint32_t), false);
++ rl_ib_size / sizeof(uint32_t), false);
+ if (retval)
+ goto fail_create_runlist;
+
+@@ -337,7 +339,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
+ if (WARN_ON(!fence_address))
+ return -EFAULT;
+
+- size = pm->pmf->query_status_size;
++ size = pm->pmf->get_query_status_packet_size();
+ mutex_lock(&pm->lock);
+ pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ size / sizeof(uint32_t), (unsigned int **)&buffer);
+@@ -366,7 +368,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
+ uint32_t *buffer, size;
+ int retval = 0;
+
+- size = pm->pmf->unmap_queues_size;
++ size = pm->pmf->get_unmap_queues_packet_size();
+ mutex_lock(&pm->lock);
+ pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ size / sizeof(uint32_t), (unsigned int **)&buffer);
+@@ -398,51 +400,17 @@ void pm_release_ib(struct packet_manager *pm)
+ mutex_unlock(&pm->lock);
+ }
+
+-#if defined(CONFIG_DEBUG_FS)
+-
+ int pm_debugfs_runlist(struct seq_file *m, void *data)
+ {
+ struct packet_manager *pm = data;
+
+- mutex_lock(&pm->lock);
+-
+ if (!pm->allocated) {
+ seq_puts(m, " No active runlist\n");
+- goto out;
++ return 0;
+ }
+
+ seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
+ pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false);
+
+-out:
+- mutex_unlock(&pm->lock);
+ return 0;
+ }
+-
+-int pm_debugfs_hang_hws(struct packet_manager *pm)
+-{
+- uint32_t *buffer, size;
+- int r = 0;
+-
+- size = pm->pmf->query_status_size;
+- mutex_lock(&pm->lock);
+- pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+- size / sizeof(uint32_t), (unsigned int **)&buffer);
+- if (!buffer) {
+- pr_err("Failed to allocate buffer on kernel queue\n");
+- r = -ENOMEM;
+- goto out;
+- }
+- memset(buffer, 0x55, size);
+- pm->priv_queue->ops.submit_packet(pm->priv_queue);
+-
+- pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
+- buffer[0], buffer[1], buffer[2], buffer[3],
+- buffer[4], buffer[5], buffer[6]);
+-out:
+- mutex_unlock(&pm->lock);
+- return r;
+-}
+-
+-
+-#endif
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_peerdirect.c b/drivers/gpu/drm/amd/amdkfd/kfd_peerdirect.c
+index 87344cc..fae8e8c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_peerdirect.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_peerdirect.c
+@@ -49,9 +49,9 @@
+ #include <linux/slab.h>
+ #include <linux/scatterlist.h>
+ #include <linux/module.h>
+-#include <drm/amd_rdma.h>
+
+ #include "kfd_priv.h"
++#include "amd_rdma.h"
+
+
+
+@@ -137,6 +137,7 @@ static void (*pfn_ib_unregister_peer_memory_client)(void *reg_handle);
+
+ static const struct amd_rdma_interface *rdma_interface;
+
++static invalidate_peer_memory ib_invalidate_callback;
+ static void *ib_reg_handle;
+
+ struct amd_mem_context {
+@@ -168,6 +169,9 @@ static void free_callback(void *client_priv)
+
+ pr_debug("mem_context->core_context 0x%p\n", mem_context->core_context);
+
++ /* Call back IB stack asking to invalidate memory */
++ (*ib_invalidate_callback) (ib_reg_handle, mem_context->core_context);
++
+ /* amdkfd will free resources when we return from this callback.
+ * Set flag to inform that there is nothing to do on "put_pages", etc.
+ */
+@@ -474,7 +478,7 @@ void kfd_init_peer_direct(void)
+ strcpy(amd_mem_client.version, AMD_PEER_BRIDGE_DRIVER_VERSION);
+
+ ib_reg_handle = pfn_ib_register_peer_memory_client(&amd_mem_client,
+- NULL);
++ &ib_invalidate_callback);
+
+ if (!ib_reg_handle) {
+ pr_err("Cannot register peer memory client\n");
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 7869a9d..b2ef0f5 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -30,15 +30,16 @@
+ #include <linux/atomic.h>
+ #include <linux/workqueue.h>
+ #include <linux/spinlock.h>
+-#include <linux/kfd_ioctl.h>
+ #include <linux/idr.h>
++#include <linux/kfd_ioctl.h>
++#include <linux/pid.h>
++#include <linux/interval_tree.h>
+ #include <linux/seq_file.h>
+ #include <linux/kref.h>
+ #include <linux/kfifo.h>
+-#include <linux/pid.h>
+-#include <linux/interval_tree.h>
+ #include <kgd_kfd_interface.h>
+
++#include "amd_rdma.h"
+ #include "amd_shared.h"
+
+ #define KFD_SYSFS_FILE_MODE 0444
+@@ -49,7 +50,8 @@
+ /* Use upper bits of mmap offset to store KFD driver specific information.
+ * BITS[63:62] - Encode MMAP type
+ * BITS[61:46] - Encode gpu_id. To identify to which GPU the offset belongs to
+- * BITS[45:0] - MMAP offset value
++ * BITS[45:40] - Reserved. Not Used.
++ * BITS[39:0] - MMAP offset value. Used by TTM.
+ *
+ * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these
+ * defines are w.r.t to PAGE_SIZE
+@@ -68,7 +70,7 @@
+ #define KFD_MMAP_GPU_ID_GET(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \
+ >> KFD_MMAP_GPU_ID_SHIFT)
+
+-#define KFD_MMAP_OFFSET_VALUE_MASK (0x3FFFFFFFFFFFULL >> PAGE_SHIFT)
++#define KFD_MMAP_OFFSET_VALUE_MASK (0xFFFFFFFFFFULL >> PAGE_SHIFT)
+ #define KFD_MMAP_OFFSET_VALUE_GET(offset) (offset & KFD_MMAP_OFFSET_VALUE_MASK)
+
+ /*
+@@ -81,6 +83,7 @@
+ #define KFD_CIK_HIQ_PIPE 4
+ #define KFD_CIK_HIQ_QUEUE 0
+
++
+ /* Macro for allocating structures */
+ #define kfd_alloc_struct(ptr_to_struct) \
+ ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
+@@ -113,14 +116,14 @@ extern int max_num_of_queues_per_device;
+ /* Kernel module parameter to specify the scheduling policy */
+ extern int sched_policy;
+
++extern int cwsr_enable;
++
+ /*
+ * Kernel module parameter to specify the maximum process
+ * number per HW scheduler
+ */
+ extern int hws_max_conc_proc;
+
+-extern int cwsr_enable;
+-
+ /*
+ * Kernel module parameter to specify whether to send sigterm to HSA process on
+ * unhandled exception
+@@ -142,18 +145,13 @@ extern int ignore_crat;
+ /*
+ * Set sh_mem_config.retry_disable on Vega10
+ */
+-extern int noretry;
++extern int vega10_noretry;
+
+ /*
+ * Enable privileged mode for all CP queues including user queues
+ */
+ extern int priv_cp_queues;
+
+-/*
+- * Halt if HWS hang is detected
+- */
+-extern int halt_if_hws_hang;
+-
+ /**
+ * enum kfd_sched_policy
+ *
+@@ -210,7 +208,6 @@ struct kfd_device_info {
+ bool needs_pci_atomics;
+ /* obtain from adev->sdma.num_instances */
+ unsigned int num_sdma_engines;
+- unsigned int num_sdma_queues_per_engine;
+ };
+
+ struct kfd_mem_obj {
+@@ -294,8 +291,6 @@ struct kfd_dev {
+ bool cwsr_enabled;
+ const void *cwsr_isa;
+ unsigned int cwsr_isa_size;
+-
+- bool pci_atomic_requested;
+ };
+
+ struct kfd_ipc_obj;
+@@ -306,41 +301,6 @@ struct kfd_bo {
+ struct kfd_dev *dev;
+ struct list_head cb_data_head;
+ struct kfd_ipc_obj *kfd_ipc_obj;
+- /* page-aligned VA address */
+- uint64_t cpuva;
+- unsigned int mem_type;
+-};
+-
+-struct cma_system_bo {
+- struct kgd_mem *mem;
+- struct sg_table *sg;
+- struct kfd_dev *dev;
+- struct list_head list;
+-};
+-
+-/* Similar to iov_iter */
+-struct cma_iter {
+- /* points to current entry of range array */
+- struct kfd_memory_range *array;
+- /* total number of entries in the initial array */
+- unsigned long nr_segs;
+- /* total amount of data pointed by kfd array*/
+- unsigned long total;
+- /* offset into the entry pointed by cma_iter.array */
+- unsigned long offset;
+- struct kfd_process *p;
+- struct mm_struct *mm;
+- struct task_struct *task;
+- /* current kfd_bo associated with cma_iter.array.va_addr */
+- struct kfd_bo *cur_bo;
+- /* offset w.r.t cur_bo */
+- unsigned long bo_offset;
+- /* If cur_bo is a userptr BO, then a shadow system BO is created
+- * using its underlying pages. cma_bo holds this BO. cma_list is a
+- * list cma_bos created in one session
+- */
+- struct cma_system_bo *cma_bo;
+- struct list_head cma_list;
+ };
+
+ /* KGD2KFD callbacks */
+@@ -444,11 +404,7 @@ enum KFD_QUEUE_PRIORITY {
+ * @is_interop: Defines if this is a interop queue. Interop queue means that
+ * the queue can access both graphics and compute resources.
+ *
+- * @is_evicted: Defines if the queue is evicted. Only active queues
+- * are evicted, rendering them inactive.
+- *
+- * @is_active: Defines if the queue is active or not. @is_active and
+- * @is_evicted are protected by the DQM lock.
++ * @is_active: Defines if the queue is active or not.
+ *
+ * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
+ * of the queue.
+@@ -470,7 +426,7 @@ struct queue_properties {
+ void __iomem *doorbell_ptr;
+ uint32_t doorbell_off;
+ bool is_interop;
+- bool is_evicted;
++ bool is_evicted; /* true -> queue is evicted */
+ bool is_active;
+ /* Not relevant for user mode queues in cp scheduling */
+ unsigned int vmid;
+@@ -589,6 +545,7 @@ struct qcm_process_device {
+ struct list_head priv_queue_list;
+
+ unsigned int queue_count;
++ /* a data field only meaningful for non-HWS case */
+ unsigned int vmid;
+ bool is_debug;
+ unsigned int evicted; /* eviction counter, 0=active */
+@@ -602,11 +559,11 @@ struct qcm_process_device {
+ * All the memory management data should be here too
+ */
+ uint64_t gds_context_area;
+- uint64_t page_table_base;
+ uint32_t sh_mem_config;
+ uint32_t sh_mem_bases;
+ uint32_t sh_mem_ape1_base;
+ uint32_t sh_mem_ape1_limit;
++ uint32_t page_table_base;
+ uint32_t gds_size;
+ uint32_t num_gws;
+ uint32_t num_oac;
+@@ -619,11 +576,11 @@ struct qcm_process_device {
+ uint64_t tma_addr;
+
+ /* IB memory */
+- uint64_t ib_base;
++ uint64_t ib_base; /* ib_base+ib_size must be below cwsr_base */
+ void *ib_kaddr;
+
+ /*doorbell resources per process per device*/
+- unsigned long *doorbell_bitmap;
++ unsigned long *doorbell_bitmap;
+ };
+
+ /* KFD Memory Eviction */
+@@ -635,10 +592,11 @@ struct qcm_process_device {
+ /* Approx. time before evicting the process again */
+ #define PROCESS_ACTIVE_TIME_MS 10
+
+-int kgd2kfd_quiesce_mm(struct mm_struct *mm);
+-int kgd2kfd_resume_mm(struct mm_struct *mm);
+ int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
+ struct dma_fence *fence);
++int kfd_process_evict_queues(struct kfd_process *p);
++int kfd_process_restore_queues(struct kfd_process *p);
++
+
+ /* 8 byte handle containing GPU ID in the most significant 4 bytes and
+ * idr_handle in the least significant 4 bytes
+@@ -754,14 +712,13 @@ struct kfd_process {
+ struct idr event_idr;
+ /* Event page */
+ struct kfd_signal_page *signal_page;
+- size_t signal_mapped_size;
+ size_t signal_event_count;
+ bool signal_event_limit_reached;
+
+ struct rb_root_cached bo_interval_tree;
+
+ /* Information used for memory eviction */
+- void *kgd_process_info;
++ void *process_info;
+ /* Eviction fence that is attached to all the BOs of this process. The
+ * fence will be triggered during eviction and new one will be created
+ * during restore
+@@ -804,32 +761,29 @@ struct amdkfd_ioctl_desc {
+ int kfd_process_create_wq(void);
+ void kfd_process_destroy_wq(void);
+ struct kfd_process *kfd_create_process(struct file *filep);
+-struct kfd_process *kfd_get_process(const struct task_struct *);
++struct kfd_process *kfd_get_process(const struct task_struct *task);
+ struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid);
+ struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm);
+ void kfd_unref_process(struct kfd_process *p);
+-int kfd_process_evict_queues(struct kfd_process *p);
+-int kfd_process_restore_queues(struct kfd_process *p);
+ void kfd_suspend_all_processes(void);
+ int kfd_resume_all_processes(void);
+
+ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
+ struct file *drm_file);
+ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
+- struct kfd_process *p);
++ struct kfd_process *p);
+ struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
+ struct kfd_process *p);
+ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
+ struct kfd_process *p);
+
+-int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
+- struct vm_area_struct *vma);
++int kfd_reserved_mem_mmap(struct kfd_process *process,
++ struct vm_area_struct *vma);
+
+ /* KFD process API for creating and translating handles */
+ int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
+ void *mem, uint64_t start,
+- uint64_t length, uint64_t cpuva,
+- unsigned int mem_type,
++ uint64_t length,
+ struct kfd_ipc_obj *ipc_obj);
+ void *kfd_process_device_translate_handle(struct kfd_process_device *p,
+ int handle);
+@@ -864,7 +818,7 @@ void kfd_pasid_free(unsigned int pasid);
+ size_t kfd_doorbell_process_slice(struct kfd_dev *kfd);
+ int kfd_doorbell_init(struct kfd_dev *kfd);
+ void kfd_doorbell_fini(struct kfd_dev *kfd);
+-int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
++int kfd_doorbell_mmap(struct kfd_dev *kfd, struct kfd_process *process,
+ struct vm_area_struct *vma);
+ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
+ unsigned int *doorbell_off);
+@@ -921,6 +875,8 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd);
+
+ /* amdkfd Apertures */
+ int kfd_init_apertures(struct kfd_process *process);
++int kfd_set_process_dgpu_aperture(struct kfd_process_device *pdd,
++ uint64_t base, uint64_t limit);
+
+ /* Queue Context Management */
+ int init_queue(struct queue **q, const struct queue_properties *properties);
+@@ -975,6 +931,8 @@ int pqm_get_wave_state(struct process_queue_manager *pqm,
+ void __user *ctl_stack,
+ u32 *ctl_stack_used_size,
+ u32 *save_area_used_size);
++int kgd2kfd_quiesce_mm(struct kfd_dev *kfd, struct mm_struct *mm);
++int kgd2kfd_resume_mm(struct kfd_dev *kfd, struct mm_struct *mm);
+
+ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
+ unsigned int fence_value,
+@@ -985,6 +943,8 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
+ #define KFD_FENCE_COMPLETED (100)
+ #define KFD_FENCE_INIT (10)
+
++struct packet_manager_func;
++
+ struct packet_manager {
+ struct device_queue_manager *dqm;
+ struct kernel_queue *priv_queue;
+@@ -993,11 +953,11 @@ struct packet_manager {
+ struct kfd_mem_obj *ib_buffer_obj;
+ unsigned int ib_size_bytes;
+
+- const struct packet_manager_funcs *pmf;
++ struct packet_manager_funcs *pmf;
+ };
+
+ struct packet_manager_funcs {
+- /* Support ASIC-specific packet formats for PM4 packets */
++ /* Support different firmware versions for PM4 packets */
+ int (*map_process)(struct packet_manager *pm, uint32_t *buffer,
+ struct qcm_process_device *qpd);
+ int (*runlist)(struct packet_manager *pm, uint32_t *buffer,
+@@ -1013,22 +973,20 @@ struct packet_manager_funcs {
+ unsigned int sdma_engine);
+ int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
+ uint64_t fence_address, uint32_t fence_value);
+- int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
+-
+- /* Packet sizes */
+- int map_process_size;
+- int runlist_size;
+- int set_resources_size;
+- int map_queues_size;
+- int unmap_queues_size;
+- int query_status_size;
+- int release_mem_size;
+-};
++ uint32_t (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
+
+-extern const struct packet_manager_funcs kfd_vi_pm_funcs;
+-extern const struct packet_manager_funcs kfd_v9_pm_funcs;
++ uint32_t (*get_map_process_packet_size)(void);
++ uint32_t (*get_runlist_packet_size)(void);
++ uint32_t (*get_set_resources_packet_size)(void);
++ uint32_t (*get_map_queues_packet_size)(void);
++ uint32_t (*get_unmap_queues_packet_size)(void);
++ uint32_t (*get_query_status_packet_size)(void);
++ uint32_t (*get_release_mem_packet_size)(void);
+
+-int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
++};
++
++int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm,
++ uint16_t fw_ver);
+ void pm_uninit(struct packet_manager *pm);
+ int pm_send_set_resources(struct packet_manager *pm,
+ struct scheduling_resources *res);
+@@ -1043,10 +1001,37 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
+
+ void pm_release_ib(struct packet_manager *pm);
+
+-/* Following PM funcs can be shared among VI and AI */
++/* Following PM funcs can be shared among CIK and VI */
+ unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
++int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
++ uint64_t ib, size_t ib_size_in_dwords, bool chain);
++int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer,
++ struct queue *q, bool is_static);
+ int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
+ struct scheduling_resources *res);
++int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
++ enum kfd_queue_type type,
++ enum kfd_unmap_queues_filter filter,
++ uint32_t filter_param, bool reset,
++ unsigned int sdma_engine);
++int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
++ uint64_t fence_address, uint32_t fence_value);
++uint32_t pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer);
++
++uint32_t pm_get_map_process_packet_size_vi(void);
++uint32_t pm_get_runlist_packet_size_vi(void);
++uint32_t pm_get_set_resources_packet_size_vi(void);
++uint32_t pm_get_map_queues_packet_size_vi(void);
++uint32_t pm_get_unmap_queues_packet_size_vi(void);
++uint32_t pm_get_query_status_packet_size_vi(void);
++uint32_t pm_get_release_mem_packet_size_vi(void);
++
++
++void kfd_pm_func_init_vi(struct packet_manager *pm, uint16_t fw_ver);
++void kfd_pm_func_init_cik(struct packet_manager *pm, uint16_t fw_ver);
++
++void kfd_pm_func_init_v9(struct packet_manager *pm, uint16_t fw_ver);
++
+
+ uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
+
+@@ -1071,24 +1056,21 @@ void kfd_signal_iommu_event(struct kfd_dev *dev,
+ void kfd_signal_hw_exception_event(unsigned int pasid);
+ int kfd_set_event(struct kfd_process *p, uint32_t event_id);
+ int kfd_reset_event(struct kfd_process *p, uint32_t event_id);
+-int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
+- uint64_t size);
+ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
+ uint32_t event_type, bool auto_reset, uint32_t node_id,
+ uint32_t *event_id, uint32_t *event_trigger_data,
+- uint64_t *event_page_offset, uint32_t *event_slot_index);
++ uint64_t *event_page_offset, uint32_t *event_slot_index,
++ void *kern_addr);
+ int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
+
+ void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
+ struct kfd_vm_fault_info *info);
+
+-void kfd_signal_reset_event(struct kfd_dev *dev);
+-
+ void kfd_flush_tlb(struct kfd_process_device *pdd);
+
+ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
+
+-bool kfd_is_locked(void);
++#define KFD_SCRATCH_KV_FW_VER 413
+
+ /* PeerDirect support */
+ void kfd_init_peer_direct(void);
+@@ -1109,10 +1091,6 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data);
+ int kfd_debugfs_rls_by_device(struct seq_file *m, void *data);
+ int pm_debugfs_runlist(struct seq_file *m, void *data);
+
+-int kfd_debugfs_hang_hws(struct kfd_dev *dev);
+-int pm_debugfs_hang_hws(struct packet_manager *pm);
+-int dqm_debugfs_execute_queues(struct device_queue_manager *dqm);
+-
+ #else
+
+ static inline void kfd_debugfs_init(void) {}
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index da67302..c627b63 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -30,7 +30,6 @@
+ #include <linux/notifier.h>
+ #include <linux/compat.h>
+ #include <linux/mman.h>
+-#include <linux/file.h>
+ #include <asm/page.h>
+ #include "kfd_ipc.h"
+
+@@ -61,6 +60,9 @@ static struct workqueue_struct *kfd_process_wq;
+ */
+ static struct workqueue_struct *kfd_restore_wq;
+
++#define MIN_IDR_ID 1
++#define MAX_IDR_ID 0 /*0 - for unlimited*/
++
+ static struct kfd_process *find_process(const struct task_struct *thread,
+ bool ref);
+ static void kfd_process_ref_release(struct kref *ref);
+@@ -78,12 +80,7 @@ int kfd_process_create_wq(void)
+ if (!kfd_restore_wq)
+ kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
+
+- if (!kfd_process_wq || !kfd_restore_wq) {
+- kfd_process_destroy_wq();
+- return -ENOMEM;
+- }
+-
+- return 0;
++ return kfd_process_wq && kfd_restore_wq ? 0 : -ENOMEM;
+ }
+
+ void kfd_process_destroy_wq(void)
+@@ -121,11 +118,9 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
+ struct kgd_mem *mem = NULL;
+ int handle;
+ int err;
+- unsigned int mem_type;
+
+ err = kdev->kfd2kgd->alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
+- pdd->vm, NULL, &mem, NULL,
+- flags);
++ pdd->vm, &mem, NULL, flags);
+ if (err)
+ goto err_alloc_mem;
+
+@@ -139,18 +134,13 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
+ goto sync_memory_failed;
+ }
+
+- mem_type = flags & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM |
+- KFD_IOC_ALLOC_MEM_FLAGS_GTT |
+- KFD_IOC_ALLOC_MEM_FLAGS_USERPTR |
+- KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL);
+-
+ /* Create an obj handle so kfd_process_device_remove_obj_handle
+ * will take care of the bo removal when the process finishes.
+ * We do not need to take p->mutex, because the process is just
+ * created and the ioctls have not had the chance to run.
+ */
+ handle = kfd_process_device_create_obj_handle(
+- pdd, mem, gpu_va, size, 0, mem_type, NULL);
++ pdd, mem, gpu_va, size, NULL);
+
+ if (handle < 0) {
+ err = handle;
+@@ -185,16 +175,14 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
+ /* kfd_process_device_reserve_ib_mem - Reserve memory inside the
+ * process for IB usage The memory reserved is for KFD to submit
+ * IB to AMDGPU from kernel. If the memory is reserved
+- * successfully, ib_kaddr will have the CPU/kernel
+- * address. Check ib_kaddr before accessing the memory.
++ * successfully, ib_kaddr_assigned will have the CPU/kernel
++ * address. Check ib_kaddr_assigned before accessing the memory.
+ */
+ static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
+ {
+ struct qcm_process_device *qpd = &pdd->qpd;
+- uint32_t flags = ALLOC_MEM_FLAGS_GTT |
+- ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
+- ALLOC_MEM_FLAGS_WRITABLE |
+- ALLOC_MEM_FLAGS_EXECUTABLE;
++ uint32_t flags = ALLOC_MEM_FLAGS_GTT | ALLOC_MEM_FLAGS_NONPAGED |
++ ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTE_ACCESS;
+ void *kaddr;
+ int ret;
+
+@@ -215,6 +203,7 @@ static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
+ struct kfd_process *kfd_create_process(struct file *filep)
+ {
+ struct kfd_process *process;
++
+ struct task_struct *thread = current;
+
+ if (!thread->mm)
+@@ -255,8 +244,6 @@ struct kfd_process *kfd_get_process(const struct task_struct *thread)
+ return ERR_PTR(-EINVAL);
+
+ process = find_process(thread, false);
+- if (!process)
+- return ERR_PTR(-EINVAL);
+
+ return process;
+ }
+@@ -352,9 +339,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
+
+ list_for_each_entry_safe(pdd, temp, &p->per_device_data,
+ per_device_list) {
+- pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n",
+- pdd->dev->id, p->pasid);
+-
++ /* Destroy the GPUVM VM context */
+ if (pdd->drm_file)
+ fput(pdd->drm_file);
+ else if (pdd->vm)
+@@ -407,6 +392,9 @@ static void kfd_process_ref_release(struct kref *ref)
+ {
+ struct kfd_process *p = container_of(ref, struct kfd_process, ref);
+
++ if (WARN_ON(!kfd_process_wq))
++ return;
++
+ INIT_WORK(&p->release_work, kfd_process_wq_release);
+ queue_work(kfd_process_wq, &p->release_work);
+ }
+@@ -487,19 +475,17 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
+ if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
+ continue;
+
+- offset = (KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id))
+- << PAGE_SHIFT;
+- qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
+- KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
+- MAP_SHARED, offset);
++ offset = (dev->id | KFD_MMAP_TYPE_RESERVED_MEM) << PAGE_SHIFT;
++ qpd->tba_addr = (uint64_t)vm_mmap(filep, 0,
++ KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
++ MAP_SHARED, offset);
+
+ if (IS_ERR_VALUE(qpd->tba_addr)) {
+- int err = qpd->tba_addr;
+-
+- pr_err("Failure to set tba address. error %d.\n", err);
++ pr_err("Failure to set tba address. error -%d.\n",
++ (int)qpd->tba_addr);
+ qpd->tba_addr = 0;
+ qpd->cwsr_kaddr = NULL;
+- return err;
++ return -ENOMEM;
+ }
+
+ memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
+@@ -516,8 +502,9 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
+ {
+ struct kfd_dev *dev = pdd->dev;
+ struct qcm_process_device *qpd = &pdd->qpd;
+- uint32_t flags = ALLOC_MEM_FLAGS_GTT |
+- ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE;
++ uint32_t flags = ALLOC_MEM_FLAGS_GTT | ALLOC_MEM_FLAGS_NONPAGED |
++ ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_READONLY |
++ ALLOC_MEM_FLAGS_EXECUTE_ACCESS;
+ void *kaddr;
+ int ret;
+
+@@ -675,12 +662,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
+ if (!pdd)
+ return NULL;
+
+- if (init_doorbell_bitmap(&pdd->qpd, dev)) {
+- pr_err("Failed to init doorbell for process\n");
+- kfree(pdd);
+- return NULL;
+- }
+-
+ pdd->dev = dev;
+ INIT_LIST_HEAD(&pdd->qpd.queues_list);
+ INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
+@@ -694,8 +675,19 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
+
+ /* Init idr used for memory handle translation */
+ idr_init(&pdd->alloc_idr);
++ if (init_doorbell_bitmap(&pdd->qpd, dev)) {
++ pr_err("Failed to init doorbell for process\n");
++ goto err_create_pdd;
++ }
+
+ return pdd;
++
++err_create_pdd:
++ kfree(pdd->qpd.doorbell_bitmap);
++ idr_destroy(&pdd->alloc_idr);
++ list_del(&pdd->per_device_list);
++ kfree(pdd);
++ return NULL;
+ }
+
+ /**
+@@ -720,18 +712,17 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
+ int ret;
+
+ if (pdd->vm)
+- return drm_file ? -EBUSY : 0;
++ return 0;
+
+ p = pdd->process;
+ dev = pdd->dev;
+
+ if (drm_file)
+ ret = dev->kfd2kgd->acquire_process_vm(
+- dev->kgd, drm_file, p->pasid,
+- &pdd->vm, &p->kgd_process_info, &p->ef);
++ dev->kgd, drm_file, &pdd->vm, &p->process_info, &p->ef);
+ else
+ ret = dev->kfd2kgd->create_process_vm(
+- dev->kgd, p->pasid, &pdd->vm, &p->kgd_process_info, &p->ef);
++ dev->kgd, &pdd->vm, &p->process_info, &p->ef);
+ if (ret) {
+ pr_err("Failed to create process VM object\n");
+ return ret;
+@@ -815,8 +806,7 @@ bool kfd_has_process_device_data(struct kfd_process *p)
+ */
+ int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
+ void *mem, uint64_t start,
+- uint64_t length, uint64_t cpuva,
+- unsigned int mem_type,
++ uint64_t length,
+ struct kfd_ipc_obj *ipc_obj)
+ {
+ int handle;
+@@ -837,12 +827,15 @@ int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
+ buf_obj->mem = mem;
+ buf_obj->dev = pdd->dev;
+ buf_obj->kfd_ipc_obj = ipc_obj;
+- buf_obj->cpuva = cpuva;
+- buf_obj->mem_type = mem_type;
+
+ INIT_LIST_HEAD(&buf_obj->cb_data_head);
+
+- handle = idr_alloc(&pdd->alloc_idr, buf_obj, 0, 0, GFP_KERNEL);
++ idr_preload(GFP_KERNEL);
++
++ handle = idr_alloc(&pdd->alloc_idr, buf_obj, MIN_IDR_ID, MAX_IDR_ID,
++ GFP_NOWAIT);
++
++ idr_preload_end();
+
+ if (handle < 0)
+ kfree(buf_obj);
+@@ -945,6 +938,42 @@ struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
+ return ret_p;
+ }
+
++void kfd_suspend_all_processes(void)
++{
++ struct kfd_process *p;
++ unsigned int temp;
++ int idx = srcu_read_lock(&kfd_processes_srcu);
++
++ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
++ cancel_delayed_work_sync(&p->eviction_work);
++ cancel_delayed_work_sync(&p->restore_work);
++
++ if (kfd_process_evict_queues(p))
++ pr_err("Failed to suspend process %d\n", p->pasid);
++ dma_fence_signal(p->ef);
++ dma_fence_put(p->ef);
++ p->ef = NULL;
++ }
++ srcu_read_unlock(&kfd_processes_srcu, idx);
++}
++
++int kfd_resume_all_processes(void)
++{
++ struct kfd_process *p;
++ unsigned int temp;
++ int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
++
++ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
++ if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
++ pr_err("Restore process %d failed during resume\n",
++ p->pasid);
++ ret = -EFAULT;
++ }
++ }
++ srcu_read_unlock(&kfd_processes_srcu, idx);
++ return ret;
++}
++
+ /* This increments the process->ref counter. */
+ struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
+ {
+@@ -1036,14 +1065,15 @@ static void evict_process_worker(struct work_struct *work)
+ "Eviction fence mismatch\n");
+
+ /* Narrow window of overlap between restore and evict work
+- * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
+- * unreserves KFD BOs, it is possible to evicted again. But
+- * restore has few more steps of finish. So lets wait for any
+- * previous restore work to complete
++ * item is possible. Once
++ * amdgpu_amdkfd_gpuvm_restore_process_bos unreserves KFD BOs,
++ * it is possible to evicted again. But restore has few more
++ * steps of finish. So lets wait for any previous restore work
++ * to complete
+ */
+ flush_delayed_work(&p->restore_work);
+
+- pr_info("Started evicting pasid %d\n", p->pasid);
++ pr_info("Started evicting process of pasid %d\n", p->pasid);
+ ret = kfd_process_evict_queues(p);
+ if (!ret) {
+ dma_fence_signal(p->ef);
+@@ -1052,9 +1082,10 @@ static void evict_process_worker(struct work_struct *work)
+ queue_delayed_work(kfd_restore_wq, &p->restore_work,
+ msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
+
+- pr_info("Finished evicting pasid %d\n", p->pasid);
++ pr_info("Finished evicting process of pasid %d\n", p->pasid);
+ } else
+- pr_err("Failed to evict queues of pasid %d\n", p->pasid);
++ pr_err("Failed to quiesce user queues. Cannot evict pasid %d\n",
++ p->pasid);
+ }
+
+ static void restore_process_worker(struct work_struct *work)
+@@ -1080,7 +1111,7 @@ static void restore_process_worker(struct work_struct *work)
+ struct kfd_process_device,
+ per_device_list);
+
+- pr_info("Started restoring pasid %d\n", p->pasid);
++ pr_info("Started restoring process of pasid %d\n", p->pasid);
+
+ /* Setting last_restore_timestamp before successful restoration.
+ * Otherwise this would have to be set by KGD (restore_process_bos)
+@@ -1093,11 +1124,10 @@ static void restore_process_worker(struct work_struct *work)
+ */
+
+ p->last_restore_timestamp = get_jiffies_64();
+- ret = pdd->dev->kfd2kgd->restore_process_bos(p->kgd_process_info,
+- &p->ef);
++ ret = pdd->dev->kfd2kgd->restore_process_bos(p->process_info, &p->ef);
+ if (ret) {
+- pr_info("Failed to restore BOs of pasid %d, retry after %d ms\n",
+- p->pasid, PROCESS_BACK_OFF_TIME_MS);
++ pr_info("Restore failed, try again after %d ms\n",
++ PROCESS_BACK_OFF_TIME_MS);
+ ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
+ msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
+ WARN(!ret, "reschedule restore work failed\n");
+@@ -1105,54 +1135,21 @@ static void restore_process_worker(struct work_struct *work)
+ }
+
+ ret = kfd_process_restore_queues(p);
+- if (!ret)
+- pr_info("Finished restoring pasid %d\n", p->pasid);
+- else
+- pr_err("Failed to restore queues of pasid %d\n", p->pasid);
+-}
+-
+-void kfd_suspend_all_processes(void)
+-{
+- struct kfd_process *p;
+- unsigned int temp;
+- int idx = srcu_read_lock(&kfd_processes_srcu);
+-
+- hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+- cancel_delayed_work_sync(&p->eviction_work);
+- cancel_delayed_work_sync(&p->restore_work);
+-
+- if (kfd_process_evict_queues(p))
+- pr_err("Failed to suspend process %d\n", p->pasid);
+- dma_fence_signal(p->ef);
+- dma_fence_put(p->ef);
+- p->ef = NULL;
+- }
+- srcu_read_unlock(&kfd_processes_srcu, idx);
+-}
+-
+-int kfd_resume_all_processes(void)
+-{
+- struct kfd_process *p;
+- unsigned int temp;
+- int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
++ if (ret)
++ pr_err("Failed to resume user queues\n");
+
+- hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+- if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
+- pr_err("Restore process %d failed during resume\n",
+- p->pasid);
+- ret = -EFAULT;
+- }
+- }
+- srcu_read_unlock(&kfd_processes_srcu, idx);
+- return ret;
++ pr_info("Finished restoring process of pasid %d\n", p->pasid);
+ }
+
+-int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
+- struct vm_area_struct *vma)
++int kfd_reserved_mem_mmap(struct kfd_process *process,
++ struct vm_area_struct *vma)
+ {
++ struct kfd_dev *dev = kfd_device_by_id(vma->vm_pgoff);
+ struct kfd_process_device *pdd;
+ struct qcm_process_device *qpd;
+
++ if (!dev)
++ return -EINVAL;
+ if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
+ pr_err("Incorrect CWSR mapping size.\n");
+ return -EINVAL;
+@@ -1178,6 +1175,7 @@ int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
+ KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
+ }
+
++
+ void kfd_flush_tlb(struct kfd_process_device *pdd)
+ {
+ struct kfd_dev *dev = pdd->dev;
+@@ -1212,7 +1210,7 @@ int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
+ r = pqm_debugfs_mqds(m, &p->pqm);
+ mutex_unlock(&p->mutex);
+
+- if (r)
++ if (r != 0)
+ break;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index 8933323..52882e0 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -188,7 +188,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
+ case KFD_QUEUE_TYPE_SDMA:
+ if (dev->dqm->sdma_queue_count
+ >= get_num_sdma_queues(dev->dqm)) {
+- pr_debug("Over-subscription is not allowed for SDMA.\n");
++ pr_debug("Over-subscription is not allowed for SDMA\n");
+ retval = -EPERM;
+ goto err_create_queue;
+ }
+@@ -206,7 +206,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
+ case KFD_QUEUE_TYPE_COMPUTE:
+ /* check if there is over subscription */
+ if ((dev->dqm->sched_policy ==
+- KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
++ KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
+ ((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
+ (dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
+ pr_debug("Over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
+@@ -241,8 +241,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
+ }
+
+ if (retval != 0) {
+- pr_err("Pasid %d DQM create queue %d failed. ret %d\n",
+- pqm->process->pasid, type, retval);
++ pr_err("DQM create queue failed\n");
+ goto err_create_queue;
+ }
+
+@@ -318,16 +317,13 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
+
+ if (pqn->q) {
+ dqm = pqn->q->device->dqm;
++ kfree(pqn->q->properties.cu_mask);
++ pqn->q->properties.cu_mask = NULL;
+ retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
+ if (retval) {
+- pr_err("Pasid %d destroy queue %d failed, ret %d\n",
+- pqm->process->pasid,
+- pqn->q->properties.queue_id, retval);
+- if (retval != -ETIME)
+- goto err_destroy_queue;
++ pr_debug("Destroy queue failed, returned %d\n", retval);
++ goto err_destroy_queue;
+ }
+- kfree(pqn->q->properties.cu_mask);
+- pqn->q->properties.cu_mask = NULL;
+ uninit_queue(pqn->q);
+ }
+
+@@ -439,7 +435,7 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
+ struct process_queue_node *pqn;
+ struct queue *q;
+ enum KFD_MQD_TYPE mqd_type;
+- struct mqd_manager *mqd_mgr;
++ struct mqd_manager *mqd_manager;
+ int r = 0;
+
+ list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
+@@ -462,11 +458,11 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
+ q->properties.type, q->device->id);
+ continue;
+ }
+- mqd_mgr = q->device->dqm->ops.get_mqd_manager(
++ mqd_manager = q->device->dqm->ops.get_mqd_manager(
+ q->device->dqm, mqd_type);
+ } else if (pqn->kq) {
+ q = pqn->kq->queue;
+- mqd_mgr = pqn->kq->mqd_mgr;
++ mqd_manager = pqn->kq->mqd;
+ switch (q->properties.type) {
+ case KFD_QUEUE_TYPE_DIQ:
+ seq_printf(m, " DIQ on device %x\n",
+@@ -486,7 +482,7 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
+ continue;
+ }
+
+- r = mqd_mgr->debugfs_show_mqd(m, q->mqd);
++ r = mqd_manager->debugfs_show_mqd(m, q->mqd);
+ if (r != 0)
+ break;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+index 6dcd621..a5315d4 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+@@ -36,8 +36,8 @@ void print_queue_properties(struct queue_properties *q)
+ pr_debug("Queue Address: 0x%llX\n", q->queue_address);
+ pr_debug("Queue Id: %u\n", q->queue_id);
+ pr_debug("Queue Process Vmid: %u\n", q->vmid);
+- pr_debug("Queue Read Pointer: 0x%px\n", q->read_ptr);
+- pr_debug("Queue Write Pointer: 0x%px\n", q->write_ptr);
++ pr_debug("Queue Read Pointer: 0x%p\n", q->read_ptr);
++ pr_debug("Queue Write Pointer: 0x%p\n", q->write_ptr);
+ pr_debug("Queue Doorbell Pointer: 0x%p\n", q->doorbell_ptr);
+ pr_debug("Queue Doorbell Offset: %u\n", q->doorbell_off);
+ }
+@@ -53,8 +53,8 @@ void print_queue(struct queue *q)
+ pr_debug("Queue Address: 0x%llX\n", q->properties.queue_address);
+ pr_debug("Queue Id: %u\n", q->properties.queue_id);
+ pr_debug("Queue Process Vmid: %u\n", q->properties.vmid);
+- pr_debug("Queue Read Pointer: 0x%px\n", q->properties.read_ptr);
+- pr_debug("Queue Write Pointer: 0x%px\n", q->properties.write_ptr);
++ pr_debug("Queue Read Pointer: 0x%p\n", q->properties.read_ptr);
++ pr_debug("Queue Write Pointer: 0x%p\n", q->properties.write_ptr);
+ pr_debug("Queue Doorbell Pointer: 0x%p\n", q->properties.doorbell_ptr);
+ pr_debug("Queue Doorbell Offset: %u\n", q->properties.doorbell_off);
+ pr_debug("Queue MQD Address: 0x%p\n", q->mqd);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_rdma.c b/drivers/gpu/drm/amd/amdkfd/kfd_rdma.c
+index 3454514..985855f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_rdma.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_rdma.c
+@@ -25,7 +25,7 @@
+ #include <linux/pid.h>
+ #include <linux/err.h>
+ #include <linux/slab.h>
+-#include <drm/amd_rdma.h>
++#include "amd_rdma.h"
+ #include "kfd_priv.h"
+
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+index 7702156..320c8d3 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -196,7 +196,6 @@ struct kfd_topology_device *kfd_create_topology_device(
+ return dev;
+ }
+
+-
+ #define sysfs_show_gen_prop(buffer, fmt, ...) \
+ snprintf(buffer, PAGE_SIZE, "%s"fmt, buffer, __VA_ARGS__)
+ #define sysfs_show_32bit_prop(buffer, name, value) \
+@@ -740,7 +739,7 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
+ }
+
+ /* All hardware blocks have the same number of attributes. */
+- num_attrs = ARRAY_SIZE(perf_attr_iommu);
++ num_attrs = sizeof(perf_attr_iommu)/sizeof(struct kfd_perf_attr);
+ list_for_each_entry(perf, &dev->perf_props, list) {
+ perf->attr_group = kzalloc(sizeof(struct kfd_perf_attr)
+ * num_attrs + sizeof(struct attribute_group),
+@@ -891,8 +890,7 @@ static void kfd_debug_print_topology(void)
+ up_read(&topology_lock);
+ }
+
+-/* Helper function for intializing platform_xx members of
+- * kfd_system_properties. Uses OEM info from the last CPU/APU node.
++/* Helper function for intializing platform_xx members of kfd_system_properties
+ */
+ static void kfd_update_system_properties(void)
+ {
+@@ -1015,12 +1013,13 @@ int kfd_topology_init(void)
+ */
+ #ifdef CONFIG_ACPI
+ ret = kfd_create_crat_image_acpi(&crat_image, &image_size);
+- if (!ret) {
++ if (ret == 0) {
+ ret = kfd_parse_crat_table(crat_image,
+ &temp_topology_device_list,
+ proximity_domain);
+ if (ret ||
+- kfd_is_acpi_crat_invalid(&temp_topology_device_list)) {
++ kfd_is_acpi_crat_invalid(&temp_topology_device_list)) {
++
+ kfd_release_topology_device_list(
+ &temp_topology_device_list);
+ kfd_destroy_crat_image(crat_image);
+@@ -1030,8 +1029,8 @@ int kfd_topology_init(void)
+ #endif
+ if (!crat_image) {
+ ret = kfd_create_crat_image_virtual(&crat_image, &image_size,
+- COMPUTE_UNIT_CPU, NULL,
+- proximity_domain);
++ COMPUTE_UNIT_CPU, NULL,
++ proximity_domain);
+ cpu_only_node = 1;
+ if (ret) {
+ pr_err("Error creating VCRAT table for CPU\n");
+@@ -1039,8 +1038,8 @@ int kfd_topology_init(void)
+ }
+
+ ret = kfd_parse_crat_table(crat_image,
+- &temp_topology_device_list,
+- proximity_domain);
++ &temp_topology_device_list,
++ proximity_domain);
+ if (ret) {
+ pr_err("Error parsing VCRAT table for CPU\n");
+ goto err;
+@@ -1052,12 +1051,12 @@ int kfd_topology_init(void)
+
+ down_write(&topology_lock);
+ kfd_topology_update_device_list(&temp_topology_device_list,
+- &topology_device_list);
++ &topology_device_list);
+ atomic_set(&topology_crat_proximity_domain, sys_props.num_devices-1);
+ ret = kfd_topology_update_sysfs();
+ up_write(&topology_lock);
+
+- if (!ret) {
++ if (ret == 0) {
+ sys_props.generation_count++;
+ kfd_update_system_properties();
+ kfd_debug_print_topology();
+@@ -1145,6 +1144,7 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
+ break;
+ }
+ up_write(&topology_lock);
++
+ return out_dev;
+ }
+
+@@ -1182,40 +1182,17 @@ static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev)
+
+ static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev)
+ {
+- struct kfd_iolink_properties *link, *cpu_link;
+- struct kfd_topology_device *cpu_dev;
+- uint32_t cap;
+- uint32_t cpu_flag = CRAT_IOLINK_FLAGS_ENABLED;
+- uint32_t flag = CRAT_IOLINK_FLAGS_ENABLED;
++ struct kfd_iolink_properties *link;
+
+ if (!dev || !dev->gpu)
+ return;
+
+- pcie_capability_read_dword(dev->gpu->pdev,
+- PCI_EXP_DEVCAP2, &cap);
+-
+- if (!(cap & (PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
+- PCI_EXP_DEVCAP2_ATOMIC_COMP64)))
+- cpu_flag |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
+- CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
+-
+- if (!dev->gpu->pci_atomic_requested ||
+- dev->gpu->device_info->asic_family == CHIP_HAWAII)
+- flag |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
++ /* GPU only creates direck links so apply flags setting to all */
++ if (dev->gpu->device_info->asic_family == CHIP_HAWAII)
++ list_for_each_entry(link, &dev->io_link_props, list)
++ link->flags = CRAT_IOLINK_FLAGS_ENABLED |
++ CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
+ CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
+-
+- /* GPU only creates direct links so apply flags setting to all */
+- list_for_each_entry(link, &dev->io_link_props, list) {
+- link->flags = flag;
+- cpu_dev = kfd_topology_device_by_proximity_domain(
+- link->node_to);
+- if (cpu_dev) {
+- list_for_each_entry(cpu_link,
+- &cpu_dev->io_link_props, list)
+- if (cpu_link->node_to == link->node_from)
+- cpu_link->flags = cpu_flag;
+- }
+- }
+ }
+
+ int kfd_topology_add_device(struct kfd_dev *gpu)
+@@ -1235,7 +1212,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
+
+ pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
+
+- proximity_domain = atomic_inc_return(&topology_crat_proximity_domain);
++ proximity_domain = atomic_inc_return(&
++ topology_crat_proximity_domain);
+
+ /* Check to see if this gpu device exists in the topology_device_list.
+ * If so, assign the gpu to that device,
+@@ -1246,16 +1224,15 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
+ dev = kfd_assign_gpu(gpu);
+ if (!dev) {
+ res = kfd_create_crat_image_virtual(&crat_image, &image_size,
+- COMPUTE_UNIT_GPU, gpu,
+- proximity_domain);
++ COMPUTE_UNIT_GPU,
++ gpu, proximity_domain);
+ if (res) {
+ pr_err("Error creating VCRAT for GPU (ID: 0x%x)\n",
+ gpu_id);
+ return res;
+ }
+ res = kfd_parse_crat_table(crat_image,
+- &temp_topology_device_list,
+- proximity_domain);
++ &temp_topology_device_list, proximity_domain);
+ if (res) {
+ pr_err("Error parsing VCRAT for GPU (ID: 0x%x)\n",
+ gpu_id);
+@@ -1272,13 +1249,14 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
+ res = kfd_topology_update_sysfs();
+ up_write(&topology_lock);
+
+- if (!res)
++ if (res == 0)
+ sys_props.generation_count++;
+ else
+ pr_err("Failed to update GPU (ID: 0x%x) to sysfs topology. res=%d\n",
+ gpu_id, res);
+ dev = kfd_assign_gpu(gpu);
+- if (WARN_ON(!dev)) {
++ if (!dev) {
++ pr_err("Could not assign GPU\n");
+ res = -ENODEV;
+ goto err;
+ }
+@@ -1331,22 +1309,20 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
+ HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
+ break;
+ case CHIP_VEGA10:
+- case CHIP_VEGA20:
+ case CHIP_RAVEN:
+ dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
+ HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
+ HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
+ break;
+ default:
+- WARN(1, "Unexpected ASIC family %u",
+- dev->gpu->device_info->asic_family);
++ BUG();
+ }
+
+ /* Fix errors in CZ CRAT.
+- * simd_count: Carrizo CRAT reports wrong simd_count, probably
+- * because it doesn't consider masked out CUs
+- * max_waves_per_simd: Carrizo reports wrong max_waves_per_simd
+- * capability flag: Carrizo CRAT doesn't report IOMMU flags
++ * simd_count: Carrizo CRAT reports wrong simd_count, probably because
++ * it doesn't consider masked out CUs
++ * max_waves_per_simd: Carrizo reports wrong max_waves_per_simd.
++ * capability flag: Carrizo CRAT doesn't report IOMMU flags.
+ */
+ if (dev->gpu->device_info->asic_family == CHIP_CARRIZO) {
+ dev->node_props.simd_count =
+@@ -1386,7 +1362,7 @@ int kfd_topology_remove_device(struct kfd_dev *gpu)
+
+ up_write(&topology_lock);
+
+- if (!res)
++ if (res == 0)
+ kfd_notify_gpu_change(gpu_id, 0);
+
+ return res;
+@@ -1427,7 +1403,7 @@ static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
+ {
+ int first_cpu_of_numa_node;
+
+- if (!cpumask || cpumask == cpu_none_mask)
++ if (!cpumask || (cpumask == cpu_none_mask))
+ return -1;
+ first_cpu_of_numa_node = cpumask_first(cpumask);
+ if (first_cpu_of_numa_node >= nr_cpu_ids)
+@@ -1470,7 +1446,7 @@ int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data)
+
+ seq_printf(m, "Node %u, gpu_id %x:\n", i++, dev->gpu->id);
+ r = dqm_debugfs_hqds(m, dev->gpu->dqm);
+- if (r)
++ if (r != 0)
+ break;
+ }
+
+@@ -1495,7 +1471,7 @@ int kfd_debugfs_rls_by_device(struct seq_file *m, void *data)
+
+ seq_printf(m, "Node %u, gpu_id %x:\n", i++, dev->gpu->id);
+ r = pm_debugfs_runlist(m, &dev->gpu->dqm->packets);
+- if (r)
++ if (r != 0)
+ break;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+index 2b36baf..f4d29c4 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+@@ -46,6 +46,9 @@
+ #define HSA_CAP_DOORBELL_TYPE_PRE_1_0 0x0
+ #define HSA_CAP_DOORBELL_TYPE_1_0 0x1
+ #define HSA_CAP_DOORBELL_TYPE_2_0 0x2
++#define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK 0x00000f00
++#define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT 8
++#define HSA_CAP_DOORBELL_PACKET_TYPE 0x00001000
+ #define HSA_CAP_AQL_QUEUE_DOUBLE_MAP 0x00004000
+
+ struct kfd_node_properties {
+@@ -166,9 +169,9 @@ struct kfd_topology_device {
+ struct attribute attr_gpuid;
+ struct attribute attr_name;
+ struct attribute attr_props;
+- uint8_t oem_id[CRAT_OEMID_LENGTH];
+- uint8_t oem_table_id[CRAT_OEMTABLEID_LENGTH];
+- uint32_t oem_revision;
++ uint8_t oem_id[CRAT_OEMID_LENGTH];
++ uint8_t oem_table_id[CRAT_OEMTABLEID_LENGTH];
++ uint32_t oem_revision;
+ };
+
+ struct kfd_system_properties {
+@@ -187,8 +190,4 @@ struct kfd_topology_device *kfd_create_topology_device(
+ struct list_head *device_list);
+ void kfd_release_topology_device_list(struct list_head *device_list);
+
+-extern bool amd_iommu_pc_supported(void);
+-extern u8 amd_iommu_pc_get_max_banks(u16 devid);
+-extern u8 amd_iommu_pc_get_max_counters(u16 devid);
+-
+ #endif /* __KFD_TOPOLOGY_H__ */
+diff --git a/drivers/gpu/drm/amd/amdkfd/soc15_int.h b/drivers/gpu/drm/amd/amdkfd/soc15_int.h
+index 0bc0b25..e00d03d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/soc15_int.h
++++ b/drivers/gpu/drm/amd/amdkfd/soc15_int.h
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2016-2018 Advanced Micro Devices, Inc.
++ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+@@ -22,8 +22,45 @@
+
+ #ifndef HSA_SOC15_INT_H_INCLUDED
+ #define HSA_SOC15_INT_H_INCLUDED
++/*
++ * vega10+ IH clients
++ */
++enum soc15_ih_client_id {
++ SOC15_IH_CLIENTID_IH = 0x00,
++ SOC15_IH_CLIENTID_ACP = 0x01,
++ SOC15_IH_CLIENTID_ATHUB = 0x02,
++ SOC15_IH_CLIENTID_BIF = 0x03,
++ SOC15_IH_CLIENTID_DCE = 0x04,
++ SOC15_IH_CLIENTID_ISP = 0x05,
++ SOC15_IH_CLIENTID_PCIE0 = 0x06,
++ SOC15_IH_CLIENTID_RLC = 0x07,
++ SOC15_IH_CLIENTID_SDMA0 = 0x08,
++ SOC15_IH_CLIENTID_SDMA1 = 0x09,
++ SOC15_IH_CLIENTID_SE0SH = 0x0a,
++ SOC15_IH_CLIENTID_SE1SH = 0x0b,
++ SOC15_IH_CLIENTID_SE2SH = 0x0c,
++ SOC15_IH_CLIENTID_SE3SH = 0x0d,
++ SOC15_IH_CLIENTID_SYSHUB = 0x0e,
++ SOC15_IH_CLIENTID_THM = 0x0f,
++ SOC15_IH_CLIENTID_UVD = 0x10,
++ SOC15_IH_CLIENTID_VCE0 = 0x11,
++ SOC15_IH_CLIENTID_VMC = 0x12,
++ SOC15_IH_CLIENTID_XDMA = 0x13,
++ SOC15_IH_CLIENTID_GRBM_CP = 0x14,
++ SOC15_IH_CLIENTID_ATS = 0x15,
++ SOC15_IH_CLIENTID_ROM_SMUIO = 0x16,
++ SOC15_IH_CLIENTID_DF = 0x17,
++ SOC15_IH_CLIENTID_VCE1 = 0x18,
++ SOC15_IH_CLIENTID_PWR = 0x19,
++ SOC15_IH_CLIENTID_UTCL2 = 0x1b,
++ SOC15_IH_CLIENTID_EA = 0x1c,
++ SOC15_IH_CLIENTID_UTCL2LOG = 0x1d,
++ SOC15_IH_CLIENTID_MP0 = 0x1e,
++ SOC15_IH_CLIENTID_MP1 = 0x1f,
++
++ SOC15_IH_CLIENTID_MAX
++};
+
+-#include "soc15_ih_clientid.h"
+
+ #define SOC15_INTSRC_CP_END_OF_PIPE 181
+ #define SOC15_INTSRC_CP_BAD_OPCODE 183
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5619-drm-amdkfd-Change-the-control-stack-mtype-from-UC-to.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5619-drm-amdkfd-Change-the-control-stack-mtype-from-UC-to.patch
new file mode 100644
index 00000000..d715ee6f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5619-drm-amdkfd-Change-the-control-stack-mtype-from-UC-to.patch
@@ -0,0 +1,51 @@
+From 5efc2a33758376e129f341ace63f5015af7f370d Mon Sep 17 00:00:00 2001
+From: Yong Zhao <yong.zhao@amd.com>
+Date: Mon, 14 May 2018 12:19:22 -0400
+Subject: [PATCH 5619/5725] drm/amdkfd: Change the control stack mtype from UC
+ to NC on GFX9
+
+Due to a HW bug on GFX9, the mtype of control stack buffers, which are
+allocated in mqd BOs on VMID 0 gart and are one page offset from mqd
+starting addresses, should be set to NC rather than the default gart
+mtype UC.
+
+Fix: KFD-381
+
+Change-Id: I865756efb038512ecb5d4071b2e3d3784db5d4ff
+Signed-off-by: Yong Zhao <yong.zhao@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 3 ++-
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 2 +-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index a9ad2a8..ad01a983 100755
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -456,7 +456,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
+
+ if (kfd->kfd2kgd->init_gtt_mem_allocation(
+ kfd->kgd, size, &kfd->gtt_mem,
+- &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){
++ &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
++ false)) {
+ dev_err(kfd_device, "Could not allocate %d bytes\n", size);
+ goto out;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+index f4e8efc..5118995 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+@@ -115,7 +115,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
+ ALIGN(sizeof(struct v9_mqd), PAGE_SIZE),
+ &((*mqd_mem_obj)->gtt_mem),
+ &((*mqd_mem_obj)->gpu_addr),
+- (void *)&((*mqd_mem_obj)->cpu_ptr));
++ (void *)&((*mqd_mem_obj)->cpu_ptr), true);
+ } else
+ retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd),
+ mqd_mem_obj);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5620-drm-amdkfd-remove-check-for-PCIe-upstream-bridge.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5620-drm-amdkfd-remove-check-for-PCIe-upstream-bridge.patch
new file mode 100644
index 00000000..43d864cc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5620-drm-amdkfd-remove-check-for-PCIe-upstream-bridge.patch
@@ -0,0 +1,69 @@
+From 38ec2766a26d04c4b5f559bb6e2dc128c7cd1925 Mon Sep 17 00:00:00 2001
+From: welu <Wei.Lu2@amd.com>
+Date: Wed, 4 Apr 2018 11:44:04 -0400
+Subject: [PATCH 5620/5725] drm/amdkfd: remove check for PCIe upstream bridge
+
+atomic support for GFX9 GPUs.
+1. set vega10 needs_pci_atomics as false because vega10 do not need
+pci atomics;
+2. firstly try to enable atomics in pci_enable_atomic_ops_to_root()
+and if this function failed and need_pic_atomics is true,
+we need to report the error and return NULL.
+
+Bug:SWDEV-149359
+
+Change-Id: I71cbbe63cb1f03f606f8f4b5e4b8c796e164e0d1
+Signed-off-by: welu <Wei.Lu2@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 17 ++++++++++-------
+ 1 file changed, 10 insertions(+), 7 deletions(-)
+ mode change 100755 => 100644 drivers/gpu/drm/amd/amdkfd/kfd_device.c
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+old mode 100755
+new mode 100644
+index ad01a983..6f9a8e5
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -216,7 +216,7 @@ static const struct kfd_device_info vega10_device_info = {
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = false,
+- .needs_pci_atomics = true,
++ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+ };
+
+@@ -352,7 +352,7 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ struct pci_dev *pdev, const struct kfd2kgd_calls *f2g)
+ {
+ struct kfd_dev *kfd;
+-
++ int ret;
+ const struct kfd_device_info *device_info =
+ lookup_device_info(pdev->device);
+
+@@ -366,11 +366,14 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ * 32 and 64-bit requests are possible and must be
+ * supported.
+ */
+- if (pci_enable_atomic_ops_to_root(pdev) < 0) {
+- dev_info(kfd_device,
+- "skipped device %x:%x, PCI rejects atomics",
+- pdev->vendor, pdev->device);
+- return NULL;
++ ret = pci_enable_atomic_ops_to_root(pdev,
++ PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
++ PCI_EXP_DEVCAP2_ATOMIC_COMP64);
++ if (device_info->needs_pci_atomics && ret < 0) {
++ dev_info(kfd_device,
++ "skipped device %x:%x, PCI rejects atomics",
++ pdev->vendor, pdev->device);
++ return NULL;
+ }
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5621-drm-amdgpu-kfd2kgd-Support-BO-create-from-sg.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5621-drm-amdgpu-kfd2kgd-Support-BO-create-from-sg.patch
new file mode 100644
index 00000000..0dc0007b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5621-drm-amdgpu-kfd2kgd-Support-BO-create-from-sg.patch
@@ -0,0 +1,42 @@
+From 0936adc809c9d0d1781edce5a3f723fc43d187ed Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Tue, 27 Mar 2018 11:28:55 -0400
+Subject: [PATCH 5621/5725] drm/amdgpu: kfd2kgd: Support BO create from sg
+
+Change-Id: I3d50a285f6c5645995dcd45b66129fb8837f2bd4
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 3 ++-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 01c8b19..33a5793 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1272,7 +1272,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+
+ err = dev->kfd2kgd->alloc_memory_of_gpu(
+ dev->kgd, args->va_addr, args->size,
+- pdd->vm, (struct kgd_mem **) &mem, &offset,
++ pdd->vm, NULL, (struct kgd_mem **) &mem, &offset,
+ flags);
+
+ if (err)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index c627b63..5fa3559 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -120,7 +120,8 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
+ int err;
+
+ err = kdev->kfd2kgd->alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
+- pdd->vm, &mem, NULL, flags);
++ pdd->vm, NULL, &mem, NULL,
++ flags);
+ if (err)
+ goto err_alloc_mem;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5622-drm-amd-Update-KFD-Thunk-ioctl-ABI-to-match-upstream.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5622-drm-amd-Update-KFD-Thunk-ioctl-ABI-to-match-upstream.patch
new file mode 100644
index 00000000..001a560b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5622-drm-amd-Update-KFD-Thunk-ioctl-ABI-to-match-upstream.patch
@@ -0,0 +1,374 @@
+From bd5bcbb17667e86b7225710b8bb4018ae942205d Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Thu, 24 May 2018 14:53:40 -0400
+Subject: [PATCH 5622/5725] drm/amd: Update KFD-Thunk ioctl ABI to match
+ upstream
+
+- Clean up and renumber scratch memory ioctl
+- Renumber get_tile_config ioctl
+- Renumber set_trap_handler ioctl
+- Update KFD_IOC_ALLOC_MEM_FLAGS
+- Renumber GPUVM memory management ioctls
+- Remove unused SEP_PROCESS_DGPU_APERTURE ioctl
+- Update memory management ioctls
+ Replace device_ids_array_size (in bytes) with n_devices. Fix error
+ handling and use n_success to update device_id arrays in objects.
+
+This commit breaks the ABI and requires a corresponding Thunk change.
+
+Change-Id: I62149841f1603ec36143836d2eb5ab0fcaf37cf5
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 131 ++++++++++-----------------
+ drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | 10 --
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 -
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 11 ++-
+ 4 files changed, 53 insertions(+), 101 deletions(-)
+ mode change 100644 => 100755 drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 33a5793..1b96337 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1066,17 +1066,14 @@ static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
+
+ return err;
+ }
+-static int kfd_ioctl_alloc_scratch_memory(struct file *filep,
++static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
+ struct kfd_process *p, void *data)
+ {
+- struct kfd_ioctl_alloc_memory_of_scratch_args *args = data;
++ struct kfd_ioctl_set_scratch_backing_va_args *args = data;
+ struct kfd_process_device *pdd;
+ struct kfd_dev *dev;
+ long err;
+
+- if (args->size == 0)
+- return -EINVAL;
+-
+ dev = kfd_device_by_id(args->gpu_id);
+ if (!dev)
+ return -EINVAL;
+@@ -1352,31 +1349,30 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ void *mem;
+ struct kfd_dev *dev, *peer;
+ long err = 0;
+- int i, num_dev = 0;
++ int i;
+ uint32_t *devices_arr = NULL;
+
+ dev = kfd_device_by_id(GET_GPU_ID(args->handle));
+ if (!dev)
+ return -EINVAL;
+
+- if (args->device_ids_array_size == 0) {
+- pr_debug("Device ID array size is 0\n");
++ if (!args->n_devices) {
++ pr_debug("Device IDs array empty\n");
+ return -EINVAL;
+ }
+-
+- if (args->device_ids_array_size % sizeof(uint32_t)) {
+- pr_debug("Node IDs array size %u\n",
+- args->device_ids_array_size);
++ if (args->n_success > args->n_devices) {
++ pr_debug("n_success exceeds n_devices\n");
+ return -EINVAL;
+ }
+
+- devices_arr = kmalloc(args->device_ids_array_size, GFP_KERNEL);
++ devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
++ GFP_KERNEL);
+ if (!devices_arr)
+ return -ENOMEM;
+
+ err = copy_from_user(devices_arr,
+- (void __user *)args->device_ids_array_ptr,
+- args->device_ids_array_size);
++ (void __user *)args->device_ids_array_ptr,
++ args->n_devices * sizeof(*devices_arr));
+ if (err != 0) {
+ err = -EFAULT;
+ goto copy_from_user_failed;
+@@ -1397,12 +1393,11 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ goto get_mem_obj_from_handle_failed;
+ }
+
+- num_dev = args->device_ids_array_size / sizeof(uint32_t);
+- for (i = 0 ; i < num_dev; i++) {
++ for (i = args->n_success; i < args->n_devices; i++) {
+ peer = kfd_device_by_id(devices_arr[i]);
+ if (!peer) {
+ pr_debug("Getting device by id failed for 0x%x\n",
+- devices_arr[i]);
++ devices_arr[i]);
+ err = -EINVAL;
+ goto get_mem_obj_from_handle_failed;
+ }
+@@ -1413,12 +1408,13 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ goto get_mem_obj_from_handle_failed;
+ }
+ err = peer->kfd2kgd->map_memory_to_gpu(
+- peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
+- if (err != 0) {
+- pr_err("Failed to map to gpu %d, num_dev=%d\n",
+- i, num_dev);
++ peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
++ if (err) {
++ pr_err("Failed to map to gpu %d/%d\n",
++ i, args->n_devices);
+ goto map_memory_to_gpu_failed;
+ }
++ args->n_success = i+1;
+ }
+
+ mutex_unlock(&p->mutex);
+@@ -1430,7 +1426,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ }
+
+ /* Flush TLBs after waiting for the page table updates to complete */
+- for (i = 0; i < num_dev; i++) {
++ for (i = 0; i < args->n_devices; i++) {
+ peer = kfd_device_by_id(devices_arr[i]);
+ if (WARN_ON_ONCE(!peer))
+ continue;
+@@ -1463,30 +1459,29 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ void *mem;
+ struct kfd_dev *dev, *peer;
+ long err = 0;
+- uint32_t *devices_arr = NULL, num_dev, i;
++ uint32_t *devices_arr = NULL, i;
+
+ dev = kfd_device_by_id(GET_GPU_ID(args->handle));
+ if (!dev)
+ return -EINVAL;
+
+- if (args->device_ids_array_size == 0) {
+- pr_debug("Device ID array size is 0\n");
++ if (!args->n_devices) {
++ pr_debug("Device IDs array empty\n");
+ return -EINVAL;
+ }
+-
+- if (args->device_ids_array_size % sizeof(uint32_t)) {
+- pr_debug("Node IDs array size %u\n",
+- args->device_ids_array_size);
++ if (args->n_success > args->n_devices) {
++ pr_debug("n_success exceeds n_devices\n");
+ return -EINVAL;
+ }
+
+- devices_arr = kmalloc(args->device_ids_array_size, GFP_KERNEL);
++ devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
++ GFP_KERNEL);
+ if (!devices_arr)
+ return -ENOMEM;
+
+ err = copy_from_user(devices_arr,
+- (void __user *)args->device_ids_array_ptr,
+- args->device_ids_array_size);
++ (void __user *)args->device_ids_array_ptr,
++ args->n_devices * sizeof(*devices_arr));
+ if (err != 0) {
+ err = -EFAULT;
+ goto copy_from_user_failed;
+@@ -1496,8 +1491,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+
+ pdd = kfd_get_process_device_data(dev, p);
+ if (!pdd) {
+- pr_debug("Process device data doesn't exist\n");
+- err = -ENODEV;
++ err = -EINVAL;
+ goto bind_process_to_device_failed;
+ }
+
+@@ -1508,8 +1502,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ goto get_mem_obj_from_handle_failed;
+ }
+
+- num_dev = args->device_ids_array_size / sizeof(uint32_t);
+- for (i = 0 ; i < num_dev; i++) {
++ for (i = args->n_success; i < args->n_devices; i++) {
+ peer = kfd_device_by_id(devices_arr[i]);
+ if (!peer) {
+ err = -EINVAL;
+@@ -1525,9 +1518,10 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
+ if (err) {
+ pr_err("Failed to unmap from gpu %d/%d\n",
+- i, num_dev);
++ i, args->n_devices);
+ goto unmap_memory_from_gpu_failed;
+ }
++ args->n_success = i+1;
+ }
+ kfree(devices_arr);
+
+@@ -1544,34 +1538,6 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ return err;
+ }
+
+-static int kfd_ioctl_set_process_dgpu_aperture(struct file *filep,
+- struct kfd_process *p, void *data)
+-{
+- struct kfd_ioctl_set_process_dgpu_aperture_args *args = data;
+- struct kfd_dev *dev;
+- struct kfd_process_device *pdd;
+- long err;
+-
+- dev = kfd_device_by_id(args->gpu_id);
+- if (!dev)
+- return -EINVAL;
+-
+- mutex_lock(&p->mutex);
+-
+- pdd = kfd_bind_process_to_device(dev, p);
+- if (IS_ERR(pdd)) {
+- err = PTR_ERR(pdd);
+- goto exit;
+- }
+-
+- err = kfd_set_process_dgpu_aperture(pdd, args->dgpu_base,
+- args->dgpu_limit);
+-
+-exit:
+- mutex_unlock(&p->mutex);
+- return err;
+-}
+-
+ static int kfd_ioctl_get_dmabuf_info(struct file *filep,
+ struct kfd_process *p, void *data)
+ {
+@@ -2012,6 +1978,21 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL,
+ kfd_ioctl_dbg_wave_control, 0),
+
++ AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_SCRATCH_BACKING_VA,
++ kfd_ioctl_set_scratch_backing_va, 0),
++
++ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG,
++ kfd_ioctl_get_tile_config, 0),
++
++ AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_TRAP_HANDLER,
++ kfd_ioctl_set_trap_handler, 0),
++
++ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES_NEW,
++ kfd_ioctl_get_process_apertures_new, 0),
++
++ AMDKFD_IOCTL_DEF(AMDKFD_IOC_ACQUIRE_VM,
++ kfd_ioctl_acquire_vm, 0),
++
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_MEMORY_OF_GPU,
+ kfd_ioctl_alloc_memory_of_gpu, 0),
+
+@@ -2024,30 +2005,15 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU,
+ kfd_ioctl_unmap_memory_from_gpu, 0),
+
+- AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_MEMORY_OF_SCRATCH,
+- kfd_ioctl_alloc_scratch_memory, 0),
+-
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_CU_MASK,
+ kfd_ioctl_set_cu_mask, 0),
+
+- AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_PROCESS_DGPU_APERTURE,
+- kfd_ioctl_set_process_dgpu_aperture, 0),
+-
+- AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_TRAP_HANDLER,
+- kfd_ioctl_set_trap_handler, 0),
+-
+- AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES_NEW,
+- kfd_ioctl_get_process_apertures_new, 0),
+-
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_DMABUF_INFO,
+ kfd_ioctl_get_dmabuf_info, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
+ kfd_ioctl_import_dmabuf, 0),
+
+- AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG,
+- kfd_ioctl_get_tile_config, 0),
+-
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_IPC_IMPORT_HANDLE,
+ kfd_ioctl_ipc_import_handle, 0),
+
+@@ -2060,9 +2026,6 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE,
+ kfd_ioctl_get_queue_wave_state, 0),
+
+- AMDKFD_IOCTL_DEF(AMDKFD_IOC_ACQUIRE_VM,
+- kfd_ioctl_acquire_vm, 0)
+-
+ };
+
+ #define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+old mode 100644
+new mode 100755
+index 2c00711..be376d93
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+@@ -313,16 +313,6 @@
+ #define SVM_CWSR_BASE (SVM_USER_BASE - KFD_CWSR_TBA_TMA_SIZE)
+ #define SVM_IB_BASE (SVM_CWSR_BASE - PAGE_SIZE)
+
+-int kfd_set_process_dgpu_aperture(struct kfd_process_device *pdd,
+- uint64_t base, uint64_t limit)
+-{
+- if (base < SVM_USER_BASE) {
+- pr_err("Set dgpu vm base 0x%llx failed.\n", base);
+- return -EINVAL;
+- }
+- return 0;
+-}
+-
+ void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
+ {
+ /*
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index b2ef0f5..be66eae 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -875,8 +875,6 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd);
+
+ /* amdkfd Apertures */
+ int kfd_init_apertures(struct kfd_process *process);
+-int kfd_set_process_dgpu_aperture(struct kfd_process_device *pdd,
+- uint64_t base, uint64_t limit);
+
+ /* Queue Context Management */
+ int init_queue(struct queue **q, const struct queue_properties *properties);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 5fa3559..2b4c5bd 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -182,8 +182,10 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
+ static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
+ {
+ struct qcm_process_device *qpd = &pdd->qpd;
+- uint32_t flags = ALLOC_MEM_FLAGS_GTT | ALLOC_MEM_FLAGS_NONPAGED |
+- ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTE_ACCESS;
++ uint32_t flags = ALLOC_MEM_FLAGS_GTT |
++ ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
++ ALLOC_MEM_FLAGS_WRITABLE |
++ ALLOC_MEM_FLAGS_EXECUTABLE;
+ void *kaddr;
+ int ret;
+
+@@ -503,9 +505,8 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
+ {
+ struct kfd_dev *dev = pdd->dev;
+ struct qcm_process_device *qpd = &pdd->qpd;
+- uint32_t flags = ALLOC_MEM_FLAGS_GTT | ALLOC_MEM_FLAGS_NONPAGED |
+- ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_READONLY |
+- ALLOC_MEM_FLAGS_EXECUTE_ACCESS;
++ uint32_t flags = ALLOC_MEM_FLAGS_GTT |
++ ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE;
+ void *kaddr;
+ int ret;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5623-drm-amdkfd-Fixing-compilation-issues.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5623-drm-amdkfd-Fixing-compilation-issues.patch
new file mode 100644
index 00000000..f4168e1a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5623-drm-amdkfd-Fixing-compilation-issues.patch
@@ -0,0 +1,57 @@
+From 7b42a01a2719991ab56e3dc990430d317adfd60b Mon Sep 17 00:00:00 2001
+From: Ravi Kumar <ravi1.kumar@amd.com>
+Date: Fri, 2 Nov 2018 18:43:17 +0530
+Subject: [PATCH 5623/5725] drm/amdkfd: Fixing compilation issues.
+
+Signed-off-by: Ravi Kumar <ravi1.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 4 ++--
+ drivers/gpu/drm/amd/include/kgd_kfd_interface.h | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+ mode change 100644 => 100755 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+ mode change 100644 => 100755 drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+old mode 100644
+new mode 100755
+index c3446ef..1fd2b33
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -1941,7 +1941,7 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
+ evicted_bos = atomic_inc_return(&process_info->evicted_bos);
+ if (evicted_bos == 1) {
+ /* First eviction, stop the queues */
+- r = kgd2kfd->quiesce_mm(mm);
++ r = kgd2kfd->quiesce_mm(NULL, mm);
+ if (r)
+ pr_err("Failed to quiesce KFD\n");
+ schedule_delayed_work(&process_info->work, 1);
+@@ -2231,7 +2231,7 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
+ evicted_bos)
+ goto unlock_out;
+ evicted_bos = 0;
+- if (kgd2kfd->resume_mm(mm)) {
++ if (kgd2kfd->resume_mm(NULL, mm)) {
+ pr_err("%s: Failed to resume KFD\n", __func__);
+ /* No recovery from this failure. Probably the CP is
+ * hanging. No point trying again.
+diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+old mode 100644
+new mode 100755
+index da7c6f5..ad6ee1b
+--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+@@ -445,8 +445,8 @@ struct kgd2kfd_calls {
+ void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry);
+ void (*suspend)(struct kfd_dev *kfd);
+ int (*resume)(struct kfd_dev *kfd);
+- int (*quiesce_mm)(struct mm_struct *mm);
+- int (*resume_mm)(struct mm_struct *mm);
++ int (*quiesce_mm)(struct kfd_dev *kfd, struct mm_struct *mm);
++ int (*resume_mm)(struct kfd_dev *kfd, struct mm_struct *mm);
+ int (*schedule_evict_and_restore_process)(struct mm_struct *mm,
+ struct dma_fence *fence);
+ int (*pre_reset)(struct kfd_dev *kfd);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5624-drm-amdkfd-Disable-the-perf-counters-for-old-kernels.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5624-drm-amdkfd-Disable-the-perf-counters-for-old-kernels.patch
new file mode 100644
index 00000000..d27dc967
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5624-drm-amdkfd-Disable-the-perf-counters-for-old-kernels.patch
@@ -0,0 +1,32 @@
+From d9d090893b22e3bb4703986813ff20ddca34b3a9 Mon Sep 17 00:00:00 2001
+From: Yong Zhao <Yong.Zhao@amd.com>
+Date: Fri, 28 Apr 2017 18:08:09 -0400
+Subject: [PATCH 5624/5725] drm/amdkfd: Disable the perf counters for old
+ kernels
+
+Because IOMMU functions are missing for old kernels such as 3.10 on
+Redhat 7.3, we choose to disable the performance counter feature on
+those kernels.
+
+Change-Id: Ie159d61a9b36cc38bd306b5e28fa5a3b83646d09
+Signed-off-by: Yong Zhao <Yong.Zhao@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_topology.h | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+index f4d29c4..4c518fe8 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+@@ -190,4 +190,8 @@ struct kfd_topology_device *kfd_create_topology_device(
+ struct list_head *device_list);
+ void kfd_release_topology_device_list(struct list_head *device_list);
+
++extern bool amd_iommu_pc_supported(void);
++extern u8 amd_iommu_pc_get_max_banks(u16 devid);
++extern u8 amd_iommu_pc_get_max_counters(u16 devid);
++
+ #endif /* __KFD_TOPOLOGY_H__ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5625-drm-amdkfd-use-px-to-print-user-space-address-instea.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5625-drm-amdkfd-use-px-to-print-user-space-address-instea.patch
new file mode 100644
index 00000000..219c22d1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5625-drm-amdkfd-use-px-to-print-user-space-address-instea.patch
@@ -0,0 +1,55 @@
+From 5672120c5010f0b5a41e6a9e5c8203735f78750a Mon Sep 17 00:00:00 2001
+From: Philip Yang <Philip.Yang@amd.com>
+Date: Tue, 20 Mar 2018 10:45:26 -0400
+Subject: [PATCH 5625/5725] drm/amdkfd: use %px to print user space address
+ instead of %p
+
+Change-Id: I003ad6f543ca472dafb67ad986ff36a56a225494
+Signed-off-by: Philip Yang <Philip.Yang@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_queue.c | 8 ++++----
+ 2 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 1b96337..e0d1577 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -247,7 +247,7 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
+ pr_debug("Queue Size: 0x%llX, %u\n",
+ q_properties->queue_size, args->ring_size);
+
+- pr_debug("Queue r/w Pointers: %p, %p\n",
++ pr_debug("Queue r/w Pointers: %px, %px\n",
+ q_properties->read_ptr,
+ q_properties->write_ptr);
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+index a5315d4..6dcd621 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+@@ -36,8 +36,8 @@ void print_queue_properties(struct queue_properties *q)
+ pr_debug("Queue Address: 0x%llX\n", q->queue_address);
+ pr_debug("Queue Id: %u\n", q->queue_id);
+ pr_debug("Queue Process Vmid: %u\n", q->vmid);
+- pr_debug("Queue Read Pointer: 0x%p\n", q->read_ptr);
+- pr_debug("Queue Write Pointer: 0x%p\n", q->write_ptr);
++ pr_debug("Queue Read Pointer: 0x%px\n", q->read_ptr);
++ pr_debug("Queue Write Pointer: 0x%px\n", q->write_ptr);
+ pr_debug("Queue Doorbell Pointer: 0x%p\n", q->doorbell_ptr);
+ pr_debug("Queue Doorbell Offset: %u\n", q->doorbell_off);
+ }
+@@ -53,8 +53,8 @@ void print_queue(struct queue *q)
+ pr_debug("Queue Address: 0x%llX\n", q->properties.queue_address);
+ pr_debug("Queue Id: %u\n", q->properties.queue_id);
+ pr_debug("Queue Process Vmid: %u\n", q->properties.vmid);
+- pr_debug("Queue Read Pointer: 0x%p\n", q->properties.read_ptr);
+- pr_debug("Queue Write Pointer: 0x%p\n", q->properties.write_ptr);
++ pr_debug("Queue Read Pointer: 0x%px\n", q->properties.read_ptr);
++ pr_debug("Queue Write Pointer: 0x%px\n", q->properties.write_ptr);
+ pr_debug("Queue Doorbell Pointer: 0x%p\n", q->properties.doorbell_ptr);
+ pr_debug("Queue Doorbell Offset: %u\n", q->properties.doorbell_off);
+ pr_debug("Queue MQD Address: 0x%p\n", q->mqd);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5626-drm-amdkfd-Simplify-dGPU-event-page-allocation.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5626-drm-amdkfd-Simplify-dGPU-event-page-allocation.patch
new file mode 100644
index 00000000..f0e2bf41
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5626-drm-amdkfd-Simplify-dGPU-event-page-allocation.patch
@@ -0,0 +1,284 @@
+From e3f7969f17cb2d91a977887af3dcd87c39f388ff Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Mon, 19 Mar 2018 18:02:07 -0400
+Subject: [PATCH 5626/5725] drm/amdkfd: Simplify dGPU event page allocation
+
+Deal with all the events page allocation in kfd_chardev.c and remove
+unnecessary checks for APU. This will also potentially allow mixed
+configurations of dGPUs with APUs.
+
+Explicitly set the events page in the ioctl instead of doing it
+implicitly in kfd_event_create. This also fixes a potential memory
+leak if the events page was already set in a previous call. This
+will now fail.
+
+Explicitly remember how the events page was allocated so it can be
+freed correctly.
+
+Change-Id: I77ecd0b699c20d2e9a1ff7226e387df143ad6a5b
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 84 +++++++++++++++++++-------------
+ drivers/gpu/drm/amd/amdkfd/kfd_events.c | 69 +++++++++++---------------
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 4 +-
+ 3 files changed, 80 insertions(+), 77 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index e0d1577..978f329 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -976,55 +976,69 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
+ void *data)
+ {
+ struct kfd_ioctl_create_event_args *args = data;
+- struct kfd_dev *kfd;
+- struct kfd_process_device *pdd;
+- int err = -EINVAL;
+- void *mem, *kern_addr = NULL;
+-
+- pr_debug("Event page offset 0x%llx\n", args->event_page_offset);
++ int err;
+
++ /* For dGPUs the event page is allocated in user mode. The
++ * handle is passed to KFD with the first call to this IOCTL
++ * through the event_page_offset field.
++ */
+ if (args->event_page_offset) {
++ struct kfd_dev *kfd;
++ struct kfd_process_device *pdd;
++ void *mem, *kern_addr;
++
++ if (p->signal_page) {
++ pr_err("Event page is already set\n");
++ return -EINVAL;
++ }
++
+ kfd = kfd_device_by_id(GET_GPU_ID(args->event_page_offset));
+ if (!kfd) {
+ pr_err("Getting device by id failed in %s\n", __func__);
+- return -EFAULT;
++ return -EINVAL;
+ }
+- if (!kfd->device_info->needs_iommu_device) {
+- mutex_lock(&p->mutex);
+- pdd = kfd_bind_process_to_device(kfd, p);
+- if (IS_ERR(pdd)) {
+- err = PTR_ERR(pdd);
+- goto out_upwrite;
+- }
+- mem = kfd_process_device_translate_handle(pdd,
++
++ mutex_lock(&p->mutex);
++ pdd = kfd_bind_process_to_device(kfd, p);
++ if (IS_ERR(pdd)) {
++ err = PTR_ERR(pdd);
++ goto out_unlock;
++ }
++
++ mem = kfd_process_device_translate_handle(pdd,
+ GET_IDR_HANDLE(args->event_page_offset));
+- if (!mem) {
+- pr_err("Can't find BO, offset is 0x%llx\n",
+- args->event_page_offset);
+- err = -EFAULT;
+- goto out_upwrite;
+- }
+- mutex_unlock(&p->mutex);
++ if (!mem) {
++ pr_err("Can't find BO, offset is 0x%llx\n",
++ args->event_page_offset);
++ err = -EINVAL;
++ goto out_unlock;
++ }
++ mutex_unlock(&p->mutex);
+
+- /* Map dGPU gtt BO to kernel */
+- kfd->kfd2kgd->map_gtt_bo_to_kernel(kfd->kgd,
+- mem, &kern_addr, NULL);
++ err = kfd->kfd2kgd->map_gtt_bo_to_kernel(kfd->kgd,
++ mem, &kern_addr, NULL);
++ if (err) {
++ pr_err("Failed to map event page to kernel\n");
++ return err;
++ }
++
++ err = kfd_event_page_set(p, kern_addr);
++ if (err) {
++ pr_err("Failed to set event page\n");
++ return err;
+ }
+ }
+
+- err = kfd_event_create(filp, p,
+- args->event_type,
+- args->auto_reset != 0,
+- args->node_id,
+- &args->event_id,
+- &args->event_trigger_data,
+- &args->event_page_offset,
+- &args->event_slot_index,
+- kern_addr);
++
++ err = kfd_event_create(filp, p, args->event_type,
++ args->auto_reset != 0, args->node_id,
++ &args->event_id, &args->event_trigger_data,
++ &args->event_page_offset,
++ &args->event_slot_index);
+
+ return err;
+
+-out_upwrite:
++out_unlock:
+ mutex_unlock(&p->mutex);
+ return err;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+index a92ca78..d002016 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+@@ -51,8 +51,8 @@ struct kfd_event_waiter {
+ */
+ struct kfd_signal_page {
+ uint64_t *kernel_address;
+- uint64_t handle;
+ uint64_t __user *user_address;
++ bool need_to_free_pages;
+ };
+
+
+@@ -80,6 +80,7 @@ static struct kfd_signal_page *allocate_signal_page(struct kfd_process *p)
+ KFD_SIGNAL_EVENT_LIMIT * 8);
+
+ page->kernel_address = backing_store;
++ page->need_to_free_pages = true;
+ pr_debug("Allocated new event signal page at %p, for process %p\n",
+ page, p);
+
+@@ -112,29 +113,6 @@ static int allocate_event_notification_slot(struct kfd_process *p,
+ return 0;
+ }
+
+-static struct kfd_signal_page *allocate_signal_page_dgpu(
+- struct kfd_process *p, uint64_t *kernel_address, uint64_t handle)
+-{
+- struct kfd_signal_page *my_page;
+-
+- my_page = kzalloc(sizeof(*my_page), GFP_KERNEL);
+- if (!my_page)
+- return NULL;
+-
+- /* Initialize all events to unsignaled */
+- memset(kernel_address, (uint8_t) UNSIGNALED_EVENT_SLOT,
+- KFD_SIGNAL_EVENT_LIMIT * 8);
+-
+- my_page->kernel_address = kernel_address;
+- my_page->handle = handle;
+- my_page->user_address = NULL;
+-
+- pr_debug("Allocated new event signal page at %p, for process %p\n",
+- my_page, p);
+-
+- return my_page;
+-}
+-
+ /*
+ * Assumes that p->event_mutex is held and of course that p is not going
+ * away (current or locked).
+@@ -284,9 +262,9 @@ static void shutdown_signal_page(struct kfd_process *p)
+ struct kfd_signal_page *page = p->signal_page;
+
+ if (page) {
+- if (page->user_address)
++ if (page->need_to_free_pages)
+ free_pages((unsigned long)page->kernel_address,
+- get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
++ get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
+ kfree(page);
+ }
+ }
+@@ -308,11 +286,32 @@ static bool event_can_be_cpu_signaled(const struct kfd_event *ev)
+ return ev->type == KFD_EVENT_TYPE_SIGNAL;
+ }
+
++int kfd_event_page_set(struct kfd_process *p, void *kernel_address)
++{
++ struct kfd_signal_page *page;
++
++ if (p->signal_page)
++ return -EBUSY;
++
++ page = kzalloc(sizeof(*page), GFP_KERNEL);
++ if (!page)
++ return -ENOMEM;
++
++ /* Initialize all events to unsignaled */
++ memset(kernel_address, (uint8_t) UNSIGNALED_EVENT_SLOT,
++ KFD_SIGNAL_EVENT_LIMIT * 8);
++
++ page->kernel_address = kernel_address;
++
++ p->signal_page = page;
++
++ return 0;
++}
++
+ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
+ uint32_t event_type, bool auto_reset, uint32_t node_id,
+ uint32_t *event_id, uint32_t *event_trigger_data,
+- uint64_t *event_page_offset, uint32_t *event_slot_index,
+- void *kern_addr)
++ uint64_t *event_page_offset, uint32_t *event_slot_index)
+ {
+ int ret = 0;
+ struct kfd_event *ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+@@ -326,19 +325,10 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
+
+ init_waitqueue_head(&ev->wq);
+
+- mutex_lock(&p->event_mutex);
+-
+- if (kern_addr && !p->signal_page) {
+- p->signal_page = allocate_signal_page_dgpu(p, kern_addr,
+- *event_page_offset);
+- if (!p->signal_page) {
+- ret = -ENOMEM;
+- goto out;
+- }
+- }
+-
+ *event_page_offset = 0;
+
++ mutex_lock(&p->event_mutex);
++
+ switch (event_type) {
+ case KFD_EVENT_TYPE_SIGNAL:
+ case KFD_EVENT_TYPE_DEBUG:
+@@ -361,7 +351,6 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
+ kfree(ev);
+ }
+
+-out:
+ mutex_unlock(&p->event_mutex);
+
+ return ret;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index be66eae..38aa868 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -1054,11 +1054,11 @@ void kfd_signal_iommu_event(struct kfd_dev *dev,
+ void kfd_signal_hw_exception_event(unsigned int pasid);
+ int kfd_set_event(struct kfd_process *p, uint32_t event_id);
+ int kfd_reset_event(struct kfd_process *p, uint32_t event_id);
++int kfd_event_page_set(struct kfd_process *p, void *kernel_address);
+ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
+ uint32_t event_type, bool auto_reset, uint32_t node_id,
+ uint32_t *event_id, uint32_t *event_trigger_data,
+- uint64_t *event_page_offset, uint32_t *event_slot_index,
+- void *kern_addr);
++ uint64_t *event_page_offset, uint32_t *event_slot_index);
+ int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
+
+ void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5627-drm-amdkfd-Backwards-compatibility-with-old-Thunk.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5627-drm-amdkfd-Backwards-compatibility-with-old-Thunk.patch
new file mode 100644
index 00000000..ac7f3781
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5627-drm-amdkfd-Backwards-compatibility-with-old-Thunk.patch
@@ -0,0 +1,148 @@
+From 625cdf6c7a014b8d93b8a9bea0acf529d45ee161 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Mon, 19 Mar 2018 18:35:35 -0400
+Subject: [PATCH 5627/5725] drm/amdkfd: Backwards compatibility with old Thunk
+
+Don't assume a fixed events page size. Old upstream KFD versions and
+corresponding Thunk builds used a smaller size. Instead use the size
+of the actual allocation or mapping to determine the event limit.
+
+Change-Id: I759095f15c2d5cd9414dc9c292fd1e2889ef45a0
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 5 +++--
+ drivers/gpu/drm/amd/amdkfd/kfd_events.c | 28 ++++++++++++++++++++++------
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 4 +++-
+ 3 files changed, 28 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 978f329..1fbde9b 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -986,6 +986,7 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
+ struct kfd_dev *kfd;
+ struct kfd_process_device *pdd;
+ void *mem, *kern_addr;
++ uint64_t size;
+
+ if (p->signal_page) {
+ pr_err("Event page is already set\n");
+@@ -1016,13 +1017,13 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
+ mutex_unlock(&p->mutex);
+
+ err = kfd->kfd2kgd->map_gtt_bo_to_kernel(kfd->kgd,
+- mem, &kern_addr, NULL);
++ mem, &kern_addr, &size);
+ if (err) {
+ pr_err("Failed to map event page to kernel\n");
+ return err;
+ }
+
+- err = kfd_event_page_set(p, kern_addr);
++ err = kfd_event_page_set(p, kern_addr, size);
+ if (err) {
+ pr_err("Failed to set event page\n");
+ return err;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+index d002016..644ce9d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+@@ -100,9 +100,17 @@ static int allocate_event_notification_slot(struct kfd_process *p,
+ p->signal_page = allocate_signal_page(p);
+ if (!p->signal_page)
+ return -ENOMEM;
++ /* Oldest user mode expects 256 event slots */
++ p->signal_mapped_size = 256*8;
+ }
+
+- id = idr_alloc(&p->event_idr, ev, 0, KFD_SIGNAL_EVENT_LIMIT,
++ /*
++ * Compatibility with old user mode: Only use signal slots
++ * user mode has mapped, may be less than
++ * KFD_SIGNAL_EVENT_LIMIT. This also allows future increase
++ * of the event limit without breaking user mode.
++ */
++ id = idr_alloc(&p->event_idr, ev, 0, p->signal_mapped_size / 8,
+ GFP_KERNEL);
+ if (id < 0)
+ return id;
+@@ -176,7 +184,8 @@ static int create_signal_event(struct file *devkfd,
+ {
+ int ret;
+
+- if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) {
++ if (p->signal_mapped_size &&
++ p->signal_event_count == p->signal_mapped_size / 8) {
+ if (!p->signal_event_limit_reached) {
+ pr_warn("Signal event wasn't created because limit was reached\n");
+ p->signal_event_limit_reached = true;
+@@ -286,7 +295,8 @@ static bool event_can_be_cpu_signaled(const struct kfd_event *ev)
+ return ev->type == KFD_EVENT_TYPE_SIGNAL;
+ }
+
+-int kfd_event_page_set(struct kfd_process *p, void *kernel_address)
++int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
++ uint64_t size)
+ {
+ struct kfd_signal_page *page;
+
+@@ -304,6 +314,7 @@ int kfd_event_page_set(struct kfd_process *p, void *kernel_address)
+ page->kernel_address = kernel_address;
+
+ p->signal_page = page;
++ p->signal_mapped_size = size;
+
+ return 0;
+ }
+@@ -769,9 +780,10 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
+
+ unsigned long pfn;
+ struct kfd_signal_page *page;
++ int ret;
+
+- /* check required size is logical */
+- if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) !=
++ /* check required size doesn't exceed the allocated size */
++ if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) <
+ get_order(vma->vm_end - vma->vm_start)) {
+ pr_err("Event page mmap requested illegal size\n");
+ return -EINVAL;
+@@ -801,8 +813,12 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
+ page->user_address = (uint64_t __user *)vma->vm_start;
+
+ /* mapping the page to user process */
+- return remap_pfn_range(vma, vma->vm_start, pfn,
++ ret = remap_pfn_range(vma, vma->vm_start, pfn,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
++ if (!ret)
++ p->signal_mapped_size = vma->vm_end - vma->vm_start;
++
++ return ret;
+ }
+
+ /*
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 38aa868..25a227d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -712,6 +712,7 @@ struct kfd_process {
+ struct idr event_idr;
+ /* Event page */
+ struct kfd_signal_page *signal_page;
++ size_t signal_mapped_size;
+ size_t signal_event_count;
+ bool signal_event_limit_reached;
+
+@@ -1054,7 +1055,8 @@ void kfd_signal_iommu_event(struct kfd_dev *dev,
+ void kfd_signal_hw_exception_event(unsigned int pasid);
+ int kfd_set_event(struct kfd_process *p, uint32_t event_id);
+ int kfd_reset_event(struct kfd_process *p, uint32_t event_id);
+-int kfd_event_page_set(struct kfd_process *p, void *kernel_address);
++int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
++ uint64_t size);
+ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
+ uint32_t event_type, bool auto_reset, uint32_t node_id,
+ uint32_t *event_id, uint32_t *event_trigger_data,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5628-drm-amdkfd-Remove-pm_map_process_scratch_cik.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5628-drm-amdkfd-Remove-pm_map_process_scratch_cik.patch
new file mode 100644
index 00000000..834e415b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5628-drm-amdkfd-Remove-pm_map_process_scratch_cik.patch
@@ -0,0 +1,115 @@
+From 2e2d68fe8a658305478d2e96b076f8d86c163f5d Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Tue, 27 Mar 2018 15:07:49 -0400
+Subject: [PATCH 5628/5725] drm/amdkfd: Remove pm_map_process_scratch_cik
+
+The packet structure is identical with the VI packet. So we can use
+pm_map_process_vi instead.
+
+Change-Id: Ifff68999017d86f91869ab40435b9f973e37dd3b
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c | 42 ++---------------------
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 ++
+ 3 files changed, 5 insertions(+), 41 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
+index 2808422..b8a7c4a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
+@@ -85,47 +85,10 @@ static int pm_map_process_cik(struct packet_manager *pm, uint32_t *buffer,
+ return 0;
+ }
+
+-static int pm_map_process_scratch_cik(struct packet_manager *pm,
+- uint32_t *buffer, struct qcm_process_device *qpd)
+-{
+- struct pm4_map_process_scratch_kv *packet;
+-
+- packet = (struct pm4_map_process_scratch_kv *)buffer;
+-
+- memset(buffer, 0, sizeof(struct pm4_map_process_scratch_kv));
+-
+- packet->header.u32all = pm_build_pm4_header(IT_MAP_PROCESS,
+- sizeof(struct pm4_map_process_scratch_kv));
+- packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
+- packet->bitfields2.process_quantum = 1;
+- packet->bitfields2.pasid = qpd->pqm->process->pasid;
+- packet->bitfields3.page_table_base = qpd->page_table_base;
+- packet->bitfields14.gds_size = qpd->gds_size;
+- packet->bitfields14.num_gws = qpd->num_gws;
+- packet->bitfields14.num_oac = qpd->num_oac;
+- packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
+-
+- packet->sh_mem_config = qpd->sh_mem_config;
+- packet->sh_mem_bases = qpd->sh_mem_bases;
+- packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
+- packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
+-
+- packet->sh_hidden_private_base_vmid = qpd->sh_hidden_private_base;
+-
+- packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
+- packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
+-
+- return 0;
+-}
+-
+ static uint32_t pm_get_map_process_packet_size_cik(void)
+ {
+ return sizeof(struct pm4_map_process);
+ }
+-static uint32_t pm_get_map_process_scratch_packet_size_cik(void)
+-{
+- return sizeof(struct pm4_map_process_scratch_kv);
+-}
+
+
+ static struct packet_manager_funcs kfd_cik_pm_funcs = {
+@@ -146,15 +109,14 @@ static struct packet_manager_funcs kfd_cik_pm_funcs = {
+ };
+
+ static struct packet_manager_funcs kfd_cik_scratch_pm_funcs = {
+- .map_process = pm_map_process_scratch_cik,
++ .map_process = pm_map_process_vi,
+ .runlist = pm_runlist_vi,
+ .set_resources = pm_set_resources_vi,
+ .map_queues = pm_map_queues_vi,
+ .unmap_queues = pm_unmap_queues_vi,
+ .query_status = pm_query_status_vi,
+ .release_mem = pm_release_mem_vi,
+- .get_map_process_packet_size =
+- pm_get_map_process_scratch_packet_size_cik,
++ .get_map_process_packet_size = pm_get_map_process_packet_size_vi,
+ .get_runlist_packet_size = pm_get_runlist_packet_size_vi,
+ .get_set_resources_packet_size = pm_get_set_resources_packet_size_vi,
+ .get_map_queues_packet_size = pm_get_map_queues_packet_size_vi,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+index 9022ecb..13ff604d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+@@ -67,7 +67,7 @@ static void submit_packet_vi(struct kernel_queue *kq)
+ kq->pending_wptr);
+ }
+
+-static int pm_map_process_vi(struct packet_manager *pm,
++int pm_map_process_vi(struct packet_manager *pm,
+ uint32_t *buffer, struct qcm_process_device *qpd)
+ {
+ struct pm4_mes_map_process *packet;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 25a227d..bca1322 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -1002,6 +1002,8 @@ void pm_release_ib(struct packet_manager *pm);
+
+ /* Following PM funcs can be shared among CIK and VI */
+ unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
++int pm_map_process_vi(struct packet_manager *pm,
++ uint32_t *buffer, struct qcm_process_device *qpd);
+ int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
+ uint64_t ib, size_t ib_size_in_dwords, bool chain);
+ int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5629-drm-amdgpu-Remove-pm_map_process_cik.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5629-drm-amdgpu-Remove-pm_map_process_cik.patch
new file mode 100644
index 00000000..858d5d85
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5629-drm-amdgpu-Remove-pm_map_process_cik.patch
@@ -0,0 +1,347 @@
+From 893a19c112bcae9c2118041ed5432d69a371fc0b Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Tue, 27 Mar 2018 15:23:19 -0400
+Subject: [PATCH 5629/5725] drm/amdgpu: Remove pm_map_process_cik
+
+This deprecated packet format does not support scratch memory, which
+has long been required by the runtime. It was not upstreamed and can
+be removed.
+
+Now CIK and VI use the same packets across the board, so there is no
+more need to maintain a separate function table for CIK. The FW
+version check is also no longer needed.
+
+Change-Id: Icb2d9fb0e83eb0dc1547fd85bf4cd971b4b08fec
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 4 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c | 79 ----------------------
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c | 26 +++----
+ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 10 ++-
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 31 +--------
+ 6 files changed, 23 insertions(+), 129 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 8c04f7a2..b0c159a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -885,7 +885,7 @@ static void uninitialize(struct device_queue_manager *dqm)
+ static int start_nocpsch(struct device_queue_manager *dqm)
+ {
+ init_interrupts(dqm);
+- return pm_init(&dqm->packets, dqm, dqm->dev->mec_fw_version);
++ return pm_init(&dqm->packets, dqm);
+ }
+
+ static int stop_nocpsch(struct device_queue_manager *dqm)
+@@ -1030,7 +1030,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
+
+ retval = 0;
+
+- retval = pm_init(&dqm->packets, dqm, dqm->dev->mec_fw_version);
++ retval = pm_init(&dqm->packets, dqm);
+ if (retval)
+ goto fail_packet_manager_init;
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
+index b8a7c4a..b48c29f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
+@@ -53,82 +53,3 @@ static void submit_packet_cik(struct kernel_queue *kq)
+ write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
+ kq->pending_wptr);
+ }
+-
+-static int pm_map_process_cik(struct packet_manager *pm, uint32_t *buffer,
+- struct qcm_process_device *qpd)
+-{
+- struct pm4_map_process *packet;
+-
+- packet = (struct pm4_map_process *)buffer;
+-
+- memset(buffer, 0, sizeof(struct pm4_map_process));
+-
+- packet->header.u32all = pm_build_pm4_header(IT_MAP_PROCESS,
+- sizeof(struct pm4_map_process));
+- packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
+- packet->bitfields2.process_quantum = 1;
+- packet->bitfields2.pasid = qpd->pqm->process->pasid;
+- packet->bitfields3.page_table_base = qpd->page_table_base;
+- packet->bitfields10.gds_size = qpd->gds_size;
+- packet->bitfields10.num_gws = qpd->num_gws;
+- packet->bitfields10.num_oac = qpd->num_oac;
+- packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
+-
+- packet->sh_mem_config = qpd->sh_mem_config;
+- packet->sh_mem_bases = qpd->sh_mem_bases;
+- packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
+- packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
+-
+- packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
+- packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
+-
+- return 0;
+-}
+-
+-static uint32_t pm_get_map_process_packet_size_cik(void)
+-{
+- return sizeof(struct pm4_map_process);
+-}
+-
+-
+-static struct packet_manager_funcs kfd_cik_pm_funcs = {
+- .map_process = pm_map_process_cik,
+- .runlist = pm_runlist_vi,
+- .set_resources = pm_set_resources_vi,
+- .map_queues = pm_map_queues_vi,
+- .unmap_queues = pm_unmap_queues_vi,
+- .query_status = pm_query_status_vi,
+- .release_mem = pm_release_mem_vi,
+- .get_map_process_packet_size = pm_get_map_process_packet_size_cik,
+- .get_runlist_packet_size = pm_get_runlist_packet_size_vi,
+- .get_set_resources_packet_size = pm_get_set_resources_packet_size_vi,
+- .get_map_queues_packet_size = pm_get_map_queues_packet_size_vi,
+- .get_unmap_queues_packet_size = pm_get_unmap_queues_packet_size_vi,
+- .get_query_status_packet_size = pm_get_query_status_packet_size_vi,
+- .get_release_mem_packet_size = pm_get_release_mem_packet_size_vi,
+-};
+-
+-static struct packet_manager_funcs kfd_cik_scratch_pm_funcs = {
+- .map_process = pm_map_process_vi,
+- .runlist = pm_runlist_vi,
+- .set_resources = pm_set_resources_vi,
+- .map_queues = pm_map_queues_vi,
+- .unmap_queues = pm_unmap_queues_vi,
+- .query_status = pm_query_status_vi,
+- .release_mem = pm_release_mem_vi,
+- .get_map_process_packet_size = pm_get_map_process_packet_size_vi,
+- .get_runlist_packet_size = pm_get_runlist_packet_size_vi,
+- .get_set_resources_packet_size = pm_get_set_resources_packet_size_vi,
+- .get_map_queues_packet_size = pm_get_map_queues_packet_size_vi,
+- .get_unmap_queues_packet_size = pm_get_unmap_queues_packet_size_vi,
+- .get_query_status_packet_size = pm_get_query_status_packet_size_vi,
+- .get_release_mem_packet_size = pm_get_release_mem_packet_size_vi,
+-};
+-
+-void kfd_pm_func_init_cik(struct packet_manager *pm, uint16_t fw_ver)
+-{
+- if (fw_ver >= KFD_SCRATCH_KV_FW_VER)
+- pm->pmf = &kfd_cik_scratch_pm_funcs;
+- else
+- pm->pmf = &kfd_cik_pm_funcs;
+-}
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+index 5fe4f60..b53e5ee 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+@@ -370,7 +370,7 @@ static struct packet_manager_funcs kfd_v9_pm_funcs = {
+ .get_release_mem_packet_size = pm_get_release_mem_packet_size_v9,
+ };
+
+-void kfd_pm_func_init_v9(struct packet_manager *pm, uint16_t fw_ver)
++void kfd_pm_func_init_v9(struct packet_manager *pm)
+ {
+ pm->pmf = &kfd_v9_pm_funcs;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+index 13ff604d..e798873 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+@@ -67,7 +67,7 @@ static void submit_packet_vi(struct kernel_queue *kq)
+ kq->pending_wptr);
+ }
+
+-int pm_map_process_vi(struct packet_manager *pm,
++static int pm_map_process_vi(struct packet_manager *pm,
+ uint32_t *buffer, struct qcm_process_device *qpd)
+ {
+ struct pm4_mes_map_process *packet;
+@@ -112,7 +112,7 @@ unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size)
+ return header.u32All;
+ }
+
+-int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
++static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
+ uint64_t ib, size_t ib_size_in_dwords, bool chain)
+ {
+ struct pm4_mes_runlist *packet;
+@@ -150,7 +150,7 @@ int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
+ return 0;
+ }
+
+-int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer,
++static int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+ struct queue *q, bool is_static)
+ {
+ struct pm4_mes_map_queues *packet;
+@@ -237,7 +237,7 @@ int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
+ return 0;
+ }
+
+-int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
++static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+ enum kfd_queue_type type,
+ enum kfd_unmap_queues_filter filter,
+ uint32_t filter_param, bool reset,
+@@ -302,7 +302,7 @@ int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+
+ }
+
+-int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
++static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
+ uint64_t fence_address, uint32_t fence_value)
+ {
+ struct pm4_mes_query_status *packet;
+@@ -329,7 +329,7 @@ int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
+ }
+
+
+-uint32_t pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
++static uint32_t pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
+ {
+ struct pm4_mec_release_mem *packet;
+
+@@ -358,12 +358,12 @@ uint32_t pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
+ return sizeof(struct pm4_mec_release_mem) / sizeof(unsigned int);
+ }
+
+-uint32_t pm_get_map_process_packet_size_vi(void)
++static uint32_t pm_get_map_process_packet_size_vi(void)
+ {
+ return sizeof(struct pm4_mes_map_process);
+ }
+
+-uint32_t pm_get_runlist_packet_size_vi(void)
++static uint32_t pm_get_runlist_packet_size_vi(void)
+ {
+ return sizeof(struct pm4_mes_runlist);
+ }
+@@ -373,22 +373,22 @@ uint32_t pm_get_set_resources_packet_size_vi(void)
+ return sizeof(struct pm4_mes_set_resources);
+ }
+
+-uint32_t pm_get_map_queues_packet_size_vi(void)
++static uint32_t pm_get_map_queues_packet_size_vi(void)
+ {
+ return sizeof(struct pm4_mes_map_queues);
+ }
+
+-uint32_t pm_get_unmap_queues_packet_size_vi(void)
++static uint32_t pm_get_unmap_queues_packet_size_vi(void)
+ {
+ return sizeof(struct pm4_mes_unmap_queues);
+ }
+
+-uint32_t pm_get_query_status_packet_size_vi(void)
++static uint32_t pm_get_query_status_packet_size_vi(void)
+ {
+ return sizeof(struct pm4_mes_query_status);
+ }
+
+-uint32_t pm_get_release_mem_packet_size_vi(void)
++static uint32_t pm_get_release_mem_packet_size_vi(void)
+ {
+ return sizeof(struct pm4_mec_release_mem);
+ }
+@@ -411,7 +411,7 @@ static struct packet_manager_funcs kfd_vi_pm_funcs = {
+ .get_release_mem_packet_size = pm_get_release_mem_packet_size_vi,
+ };
+
+-void kfd_pm_func_init_vi(struct packet_manager *pm, uint16_t fw_ver)
++void kfd_pm_func_init_vi(struct packet_manager *pm)
+ {
+ pm->pmf = &kfd_vi_pm_funcs;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+index 98c89d2..8abefd7 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+@@ -217,8 +217,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
+ return retval;
+ }
+
+-int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm,
+- uint16_t fw_ver)
++int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
+ {
+ pm->dqm = dqm;
+ mutex_init(&pm->lock);
+@@ -232,18 +231,17 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm,
+ switch (pm->dqm->dev->device_info->asic_family) {
+ case CHIP_KAVERI:
+ case CHIP_HAWAII:
+- kfd_pm_func_init_cik(pm, fw_ver);
+- break;
++ /* PM4 packet structures on CIK are the same as on VI */
+ case CHIP_CARRIZO:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+- kfd_pm_func_init_vi(pm, fw_ver);
++ kfd_pm_func_init_vi(pm);
+ break;
+ case CHIP_VEGA10:
+ case CHIP_RAVEN:
+- kfd_pm_func_init_v9(pm, fw_ver);
++ kfd_pm_func_init_v9(pm);
+ break;
+ default:
+ BUG();
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index bca1322..fe25058 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -984,8 +984,7 @@ struct packet_manager_funcs {
+
+ };
+
+-int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm,
+- uint16_t fw_ver);
++int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
+ void pm_uninit(struct packet_manager *pm);
+ int pm_send_set_resources(struct packet_manager *pm,
+ struct scheduling_resources *res);
+@@ -1002,36 +1001,12 @@ void pm_release_ib(struct packet_manager *pm);
+
+ /* Following PM funcs can be shared among CIK and VI */
+ unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
+-int pm_map_process_vi(struct packet_manager *pm,
+- uint32_t *buffer, struct qcm_process_device *qpd);
+-int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
+- uint64_t ib, size_t ib_size_in_dwords, bool chain);
+-int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+- struct queue *q, bool is_static);
+ int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
+ struct scheduling_resources *res);
+-int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+- enum kfd_queue_type type,
+- enum kfd_unmap_queues_filter filter,
+- uint32_t filter_param, bool reset,
+- unsigned int sdma_engine);
+-int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
+- uint64_t fence_address, uint32_t fence_value);
+-uint32_t pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer);
+-
+-uint32_t pm_get_map_process_packet_size_vi(void);
+-uint32_t pm_get_runlist_packet_size_vi(void);
+ uint32_t pm_get_set_resources_packet_size_vi(void);
+-uint32_t pm_get_map_queues_packet_size_vi(void);
+-uint32_t pm_get_unmap_queues_packet_size_vi(void);
+-uint32_t pm_get_query_status_packet_size_vi(void);
+-uint32_t pm_get_release_mem_packet_size_vi(void);
+-
+-
+-void kfd_pm_func_init_vi(struct packet_manager *pm, uint16_t fw_ver);
+-void kfd_pm_func_init_cik(struct packet_manager *pm, uint16_t fw_ver);
+
+-void kfd_pm_func_init_v9(struct packet_manager *pm, uint16_t fw_ver);
++void kfd_pm_func_init_vi(struct packet_manager *pm);
++void kfd_pm_func_init_v9(struct packet_manager *pm);
+
+
+ uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5630-drm-amdkfd-Put-packet-sizes-directly-into-packet_man.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5630-drm-amdkfd-Put-packet-sizes-directly-into-packet_man.patch
new file mode 100644
index 00000000..db935237
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5630-drm-amdkfd-Put-packet-sizes-directly-into-packet_man.patch
@@ -0,0 +1,290 @@
+From ac3a7f1f0b4f8163bf43a6b97c29aa310edb8192 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Tue, 27 Mar 2018 15:50:08 -0400
+Subject: [PATCH 5630/5725] drm/amdkfd: Put packet sizes directly into
+ packet_manager_funcs
+
+This is more efficient than indirectly calling a size query function
+that just returns the constant size.
+
+Change-Id: Ifbab7d7ea74b66e7de56e061a2c8fa78cfc0db47
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c | 58 ++++++---------------
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c | 64 ++++++------------------
+ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 20 ++++----
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 17 +++----
+ 4 files changed, 46 insertions(+), 113 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+index b53e5ee..f311f13 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+@@ -323,51 +323,21 @@ static uint32_t pm_release_mem_v9(uint64_t gpu_addr, uint32_t *buffer)
+ return sizeof(struct pm4_mec_release_mem) / sizeof(unsigned int);
+ }
+
+-static uint32_t pm_get_map_process_packet_size_v9(void)
+-{
+- return sizeof(struct pm4_mes_map_process);
+-}
+-
+-static uint32_t pm_get_runlist_packet_size_v9(void)
+-{
+- return sizeof(struct pm4_mes_runlist);
+-}
+-
+-static uint32_t pm_get_map_queues_packet_size_v9(void)
+-{
+- return sizeof(struct pm4_mes_map_queues);
+-}
+-
+-static uint32_t pm_get_unmap_queues_packet_size_v9(void)
+-{
+- return sizeof(struct pm4_mes_unmap_queues);
+-}
+-
+-static uint32_t pm_get_query_status_packet_size_v9(void)
+-{
+- return sizeof(struct pm4_mes_query_status);
+-}
+-
+-static uint32_t pm_get_release_mem_packet_size_v9(void)
+-{
+- return sizeof(struct pm4_mec_release_mem);
+-}
+-
+ static struct packet_manager_funcs kfd_v9_pm_funcs = {
+- .map_process = pm_map_process_v9,
+- .runlist = pm_runlist_v9,
+- .set_resources = pm_set_resources_vi,
+- .map_queues = pm_map_queues_v9,
+- .unmap_queues = pm_unmap_queues_v9,
+- .query_status = pm_query_status_v9,
+- .release_mem = pm_release_mem_v9,
+- .get_map_process_packet_size = pm_get_map_process_packet_size_v9,
+- .get_runlist_packet_size = pm_get_runlist_packet_size_v9,
+- .get_set_resources_packet_size = pm_get_set_resources_packet_size_vi,
+- .get_map_queues_packet_size = pm_get_map_queues_packet_size_v9,
+- .get_unmap_queues_packet_size = pm_get_unmap_queues_packet_size_v9,
+- .get_query_status_packet_size = pm_get_query_status_packet_size_v9,
+- .get_release_mem_packet_size = pm_get_release_mem_packet_size_v9,
++ .map_process = pm_map_process_v9,
++ .runlist = pm_runlist_v9,
++ .set_resources = pm_set_resources_vi,
++ .map_queues = pm_map_queues_v9,
++ .unmap_queues = pm_unmap_queues_v9,
++ .query_status = pm_query_status_v9,
++ .release_mem = pm_release_mem_v9,
++ .map_process_size = sizeof(struct pm4_mes_map_process),
++ .runlist_size = sizeof(struct pm4_mes_runlist),
++ .set_resources_size = sizeof(struct pm4_mes_set_resources),
++ .map_queues_size = sizeof(struct pm4_mes_map_queues),
++ .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
++ .query_status_size = sizeof(struct pm4_mes_query_status),
++ .release_mem_size = sizeof(struct pm4_mec_release_mem)
+ };
+
+ void kfd_pm_func_init_v9(struct packet_manager *pm)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+index e798873..178c5d0 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+@@ -358,57 +358,21 @@ static uint32_t pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
+ return sizeof(struct pm4_mec_release_mem) / sizeof(unsigned int);
+ }
+
+-static uint32_t pm_get_map_process_packet_size_vi(void)
+-{
+- return sizeof(struct pm4_mes_map_process);
+-}
+-
+-static uint32_t pm_get_runlist_packet_size_vi(void)
+-{
+- return sizeof(struct pm4_mes_runlist);
+-}
+-
+-uint32_t pm_get_set_resources_packet_size_vi(void)
+-{
+- return sizeof(struct pm4_mes_set_resources);
+-}
+-
+-static uint32_t pm_get_map_queues_packet_size_vi(void)
+-{
+- return sizeof(struct pm4_mes_map_queues);
+-}
+-
+-static uint32_t pm_get_unmap_queues_packet_size_vi(void)
+-{
+- return sizeof(struct pm4_mes_unmap_queues);
+-}
+-
+-static uint32_t pm_get_query_status_packet_size_vi(void)
+-{
+- return sizeof(struct pm4_mes_query_status);
+-}
+-
+-static uint32_t pm_get_release_mem_packet_size_vi(void)
+-{
+- return sizeof(struct pm4_mec_release_mem);
+-}
+-
+-
+ static struct packet_manager_funcs kfd_vi_pm_funcs = {
+- .map_process = pm_map_process_vi,
+- .runlist = pm_runlist_vi,
+- .set_resources = pm_set_resources_vi,
+- .map_queues = pm_map_queues_vi,
+- .unmap_queues = pm_unmap_queues_vi,
+- .query_status = pm_query_status_vi,
+- .release_mem = pm_release_mem_vi,
+- .get_map_process_packet_size = pm_get_map_process_packet_size_vi,
+- .get_runlist_packet_size = pm_get_runlist_packet_size_vi,
+- .get_set_resources_packet_size = pm_get_set_resources_packet_size_vi,
+- .get_map_queues_packet_size = pm_get_map_queues_packet_size_vi,
+- .get_unmap_queues_packet_size = pm_get_unmap_queues_packet_size_vi,
+- .get_query_status_packet_size = pm_get_query_status_packet_size_vi,
+- .get_release_mem_packet_size = pm_get_release_mem_packet_size_vi,
++ .map_process = pm_map_process_vi,
++ .runlist = pm_runlist_vi,
++ .set_resources = pm_set_resources_vi,
++ .map_queues = pm_map_queues_vi,
++ .unmap_queues = pm_unmap_queues_vi,
++ .query_status = pm_query_status_vi,
++ .release_mem = pm_release_mem_vi,
++ .map_process_size = sizeof(struct pm4_mes_map_process),
++ .runlist_size = sizeof(struct pm4_mes_runlist),
++ .set_resources_size = sizeof(struct pm4_mes_set_resources),
++ .map_queues_size = sizeof(struct pm4_mes_map_queues),
++ .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
++ .query_status_size = sizeof(struct pm4_mes_query_status),
++ .release_mem_size = sizeof(struct pm4_mec_release_mem)
+ };
+
+ void kfd_pm_func_init_vi(struct packet_manager *pm)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+index 8abefd7..699352b 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+@@ -69,9 +69,9 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
+ pr_debug("Over subscribed runlist\n");
+ }
+
+- map_queue_size = pm->pmf->get_map_queues_packet_size();
++ map_queue_size = pm->pmf->map_queues_size;
+ /* calculate run list ib allocation size */
+- *rlib_size = process_count * pm->pmf->get_map_process_packet_size() +
++ *rlib_size = process_count * pm->pmf->map_process_size +
+ queue_count * map_queue_size;
+
+ /*
+@@ -79,7 +79,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
+ * when over subscription
+ */
+ if (*over_subscription)
+- *rlib_size += pm->pmf->get_runlist_packet_size();
++ *rlib_size += pm->pmf->runlist_size;
+
+ pr_debug("runlist ib size %d\n", *rlib_size);
+ }
+@@ -160,7 +160,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
+ return retval;
+
+ proccesses_mapped++;
+- inc_wptr(&rl_wptr, pm->pmf->get_map_process_packet_size(),
++ inc_wptr(&rl_wptr, pm->pmf->map_process_size,
+ alloc_size_bytes);
+
+ list_for_each_entry(kq, &qpd->priv_queue_list, list) {
+@@ -178,7 +178,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
+ return retval;
+
+ inc_wptr(&rl_wptr,
+- pm->pmf->get_map_queues_packet_size(),
++ pm->pmf->map_queues_size,
+ alloc_size_bytes);
+ }
+
+@@ -197,7 +197,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
+ return retval;
+
+ inc_wptr(&rl_wptr,
+- pm->pmf->get_map_queues_packet_size(),
++ pm->pmf->map_queues_size,
+ alloc_size_bytes);
+ }
+ }
+@@ -262,7 +262,7 @@ int pm_send_set_resources(struct packet_manager *pm,
+ uint32_t *buffer, size;
+ int retval = 0;
+
+- size = pm->pmf->get_set_resources_packet_size();
++ size = pm->pmf->set_resources_size;
+ mutex_lock(&pm->lock);
+ pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ size / sizeof(uint32_t),
+@@ -299,7 +299,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
+
+ pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
+
+- packet_size_dwords = pm->pmf->get_runlist_packet_size() /
++ packet_size_dwords = pm->pmf->runlist_size /
+ sizeof(uint32_t);
+ mutex_lock(&pm->lock);
+
+@@ -337,7 +337,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
+ if (WARN_ON(!fence_address))
+ return -EFAULT;
+
+- size = pm->pmf->get_query_status_packet_size();
++ size = pm->pmf->query_status_size;
+ mutex_lock(&pm->lock);
+ pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ size / sizeof(uint32_t), (unsigned int **)&buffer);
+@@ -366,7 +366,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
+ uint32_t *buffer, size;
+ int retval = 0;
+
+- size = pm->pmf->get_unmap_queues_packet_size();
++ size = pm->pmf->unmap_queues_size;
+ mutex_lock(&pm->lock);
+ pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ size / sizeof(uint32_t), (unsigned int **)&buffer);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index fe25058..319a8b7 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -974,14 +974,14 @@ struct packet_manager_funcs {
+ uint64_t fence_address, uint32_t fence_value);
+ uint32_t (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
+
+- uint32_t (*get_map_process_packet_size)(void);
+- uint32_t (*get_runlist_packet_size)(void);
+- uint32_t (*get_set_resources_packet_size)(void);
+- uint32_t (*get_map_queues_packet_size)(void);
+- uint32_t (*get_unmap_queues_packet_size)(void);
+- uint32_t (*get_query_status_packet_size)(void);
+- uint32_t (*get_release_mem_packet_size)(void);
+-
++ /* Packet sizes */
++ int map_process_size;
++ int runlist_size;
++ int set_resources_size;
++ int map_queues_size;
++ int unmap_queues_size;
++ int query_status_size;
++ int release_mem_size;
+ };
+
+ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
+@@ -1003,7 +1003,6 @@ void pm_release_ib(struct packet_manager *pm);
+ unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
+ int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
+ struct scheduling_resources *res);
+-uint32_t pm_get_set_resources_packet_size_vi(void);
+
+ void kfd_pm_func_init_vi(struct packet_manager *pm);
+ void kfd_pm_func_init_v9(struct packet_manager *pm);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5631-drm-amdkfd-GPU-recovery-support-from-KFD-step-1.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5631-drm-amdkfd-GPU-recovery-support-from-KFD-step-1.patch
new file mode 100644
index 00000000..8c51e51d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5631-drm-amdkfd-GPU-recovery-support-from-KFD-step-1.patch
@@ -0,0 +1,150 @@
+From 04955c40500e36a943a4b91a00126c3cc6eb42a6 Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Wed, 28 Feb 2018 11:46:32 -0500
+Subject: [PATCH 5631/5725] drm/amdkfd: GPU recovery support from KFD (step 1)
+
+Lock KFD and evict existing queues on reset
+
+Change-Id: I0f0526b5beac68bd7a96ead58b95a57d4f7f8b13
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 5 ++++
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 43 +++++++++++++++++++++++++++++---
+ drivers/gpu/drm/amd/amdkfd/kfd_events.c | 5 ++++
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 4 +++
+ 4 files changed, 54 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 1fbde9b..98b000b 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -136,6 +136,11 @@ static int kfd_open(struct inode *inode, struct file *filep)
+ if (IS_ERR(process))
+ return PTR_ERR(process);
+
++ if (kfd_is_locked()) {
++ kfd_unref_process(process);
++ return -EAGAIN;
++ }
++
+ dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
+ process->pasid, process->is_32bit_user_mode);
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 6f9a8e5..26c6163 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -32,7 +32,13 @@
+ #include "kfd_iommu.h"
+
+ #define MQD_SIZE_ALIGNED 768
+-static atomic_t kfd_device_suspended = ATOMIC_INIT(0);
++
++/*
++ * kfd_locked is used to lock the kfd driver during suspend or reset
++ * once locked, kfd driver will stop any further GPU execution.
++ * create process (open) will return -EAGAIN.
++ */
++static atomic_t kfd_locked = ATOMIC_INIT(0);
+
+ #ifdef KFD_SUPPORT_IOMMU_V2
+ static const struct kfd_device_info kaveri_device_info = {
+@@ -553,21 +559,52 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
+
+ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
+ {
++ if (!kfd->init_complete)
++ return 0;
++ kgd2kfd_suspend(kfd);
++
++ /* hold dqm->lock to prevent further execution*/
++ mutex_lock(&kfd->dqm->lock);
++
++ kfd_signal_reset_event(kfd);
+ return 0;
+ }
+
++/*
++ * Fix me. KFD won't be able to resume existing process for now.
++ * We will keep all existing process in a evicted state and
++ * wait the process to be terminated.
++ */
++
+ int kgd2kfd_post_reset(struct kfd_dev *kfd)
+ {
++ int ret, count;
++
++ if (!kfd->init_complete)
++ return 0;
++
++ mutex_unlock(&kfd->dqm->lock);
++
++ ret = kfd_resume(kfd);
++ if (ret)
++ return ret;
++ count = atomic_dec_return(&kfd_locked);
++ WARN_ONCE(count != 0, "KFD reset ref. error");
+ return 0;
+ }
+
++bool kfd_is_locked(void)
++{
++ return (atomic_read(&kfd_locked) > 0);
++}
++
+ void kgd2kfd_suspend(struct kfd_dev *kfd)
+ {
+ if (!kfd->init_complete)
+ return;
+
+ /* For first KFD device suspend all the KFD processes */
+- if (atomic_inc_return(&kfd_device_suspended) == 1)
++ if (atomic_inc_return(&kfd_locked) == 1)
+ kfd_suspend_all_processes();
+
+ kfd->dqm->ops.stop(kfd->dqm);
+@@ -586,7 +623,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
+ if (ret)
+ return ret;
+
+- count = atomic_dec_return(&kfd_device_suspended);
++ count = atomic_dec_return(&kfd_locked);
+ WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
+ if (count == 0)
+ ret = kfd_resume_all_processes();
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+index 644ce9d..09c1c31 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+@@ -1009,3 +1009,8 @@ void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
+ mutex_unlock(&p->event_mutex);
+ kfd_unref_process(p);
+ }
++
++void kfd_signal_reset_event(struct kfd_dev *dev)
++{
++ /*todo*/
++}
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 319a8b7..97f729c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -1042,10 +1042,14 @@ int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
+ void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
+ struct kfd_vm_fault_info *info);
+
++void kfd_signal_reset_event(struct kfd_dev *dev);
++
+ void kfd_flush_tlb(struct kfd_process_device *pdd);
+
+ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
+
++bool kfd_is_locked(void);
++
+ #define KFD_SCRATCH_KV_FW_VER 413
+
+ /* PeerDirect support */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5632-drm-amd-Add-kfd-ioctl-defines-for-hw_exception-event.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5632-drm-amd-Add-kfd-ioctl-defines-for-hw_exception-event.patch
new file mode 100644
index 00000000..f9d4f0fd
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5632-drm-amd-Add-kfd-ioctl-defines-for-hw_exception-event.patch
@@ -0,0 +1,49 @@
+From 018cc8b6204b0cce3be79c30706179d841953e2c Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Wed, 11 Jul 2018 22:32:53 -0400
+Subject: [PATCH 5632/5725] drm/amd: Add kfd ioctl defines for hw_exception
+ event
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
+---
+ include/uapi/linux/kfd_ioctl.h | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
+index 7a6bb85..1ec5289 100644
+--- a/include/uapi/linux/kfd_ioctl.h
++++ b/include/uapi/linux/kfd_ioctl.h
+@@ -210,6 +210,15 @@ struct kfd_ioctl_dbg_wave_control_args {
+
+ #define KFD_SIGNAL_EVENT_LIMIT 4096
+
++/* For kfd_event_data.hw_exception_data.reset_type. */
++#define KFD_HW_EXCEPTION_WHOLE_GPU_RESET 0
++#define KFD_HW_EXCEPTION_PER_ENGINE_RESET 1
++
++/* For kfd_event_data.hw_exception_data.reset_cause. */
++#define KFD_HW_EXCEPTION_GPU_HANG 0
++#define KFD_HW_EXCEPTION_ECC 1
++
++
+ struct kfd_ioctl_create_event_args {
+ uint64_t event_page_offset; /* from KFD */
+ uint32_t event_trigger_data; /* from KFD - signal events only */
+@@ -263,6 +272,7 @@ struct kfd_hsa_hw_exception_data {
+ struct kfd_event_data {
+ union {
+ struct kfd_hsa_memory_exception_data memory_exception_data;
++ struct kfd_hsa_hw_exception_data hw_exception_data;
+ }; /* From KFD */
+ uint64_t kfd_event_data_ext; /* pointer to an extension structure
+ for future exception types */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5633-drm-amdkfd-signal-hw_exception-event-on-GPU-reset.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5633-drm-amdkfd-signal-hw_exception-event-on-GPU-reset.patch
new file mode 100644
index 00000000..21b7f84f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5633-drm-amdkfd-signal-hw_exception-event-on-GPU-reset.patch
@@ -0,0 +1,62 @@
+From 50f70911de48a5002f7aa4bb871bb7417fd9bdf9 Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Tue, 3 Apr 2018 16:11:00 -0400
+Subject: [PATCH 5633/5725] drm/amdkfd: signal hw_exception event on GPU reset
+
+Change-Id: I8fae18208103920796f81858f359a9cec563125c
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_events.c | 24 +++++++++++++++++++++++-
+ drivers/gpu/drm/amd/amdkfd/kfd_events.h | 1 +
+ 2 files changed, 24 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+index 09c1c31..24d8a21 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+@@ -1012,5 +1012,27 @@ void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
+
+ void kfd_signal_reset_event(struct kfd_dev *dev)
+ {
+- /*todo*/
++ struct kfd_hsa_hw_exception_data hw_exception_data;
++ struct kfd_process *p;
++ struct kfd_event *ev;
++ unsigned int temp;
++ uint32_t id, idx;
++
++ /* Whole gpu reset caused by GPU hang , and memory is lost */
++ memset(&hw_exception_data, 0, sizeof(hw_exception_data));
++ hw_exception_data.gpu_id = dev->id;
++ hw_exception_data.memory_lost = 1;
++
++ idx = srcu_read_lock(&kfd_processes_srcu);
++ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
++ mutex_lock(&p->event_mutex);
++ id = KFD_FIRST_NONSIGNAL_EVENT_ID;
++ idr_for_each_entry_continue(&p->event_idr, ev, id)
++ if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
++ ev->hw_exception_data = hw_exception_data;
++ set_event(ev);
++ }
++ mutex_unlock(&p->event_mutex);
++ }
++ srcu_read_unlock(&kfd_processes_srcu, idx);
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
+index abca5bf..c7ac6c7 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
+@@ -66,6 +66,7 @@ struct kfd_event {
+ /* type specific data */
+ union {
+ struct kfd_hsa_memory_exception_data memory_exception_data;
++ struct kfd_hsa_hw_exception_data hw_exception_data;
+ };
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5634-drm-amdkfd-remove-check-for-PCIe-upstream-bridge.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5634-drm-amdkfd-remove-check-for-PCIe-upstream-bridge.patch
new file mode 100644
index 00000000..d603bcca
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5634-drm-amdkfd-remove-check-for-PCIe-upstream-bridge.patch
@@ -0,0 +1,37 @@
+From 3944f92b3180fc83bf15a96bb3ec4d1b979a58ad Mon Sep 17 00:00:00 2001
+From: welu <Wei.Lu2@amd.com>
+Date: Wed, 4 Apr 2018 11:44:04 -0400
+Subject: [PATCH 5634/5725] drm/amdkfd: remove check for PCIe upstream bridge
+
+atomic support for GFX9 GPUs.
+1. set vega10 needs_pci_atomics as false because vega10 do not need
+pci atomics;
+2. firstly try to enable atomics in pci_enable_atomic_ops_to_root()
+and if this function failed and need_pic_atomics is true,
+we need to report the error and return NULL.
+
+Bug:SWDEV-149359
+
+Change-Id: I71cbbe63cb1f03f606f8f4b5e4b8c796e164e0d1
+Signed-off-by: welu <Wei.Lu2@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 26c6163..088f5db 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -207,7 +207,7 @@ static const struct kfd_device_info polaris11_device_info = {
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = false,
+- .needs_pci_atomics = true,
++ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5635-drm-amdkfd-CMA-Refactor-CMA-code.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5635-drm-amdkfd-CMA-Refactor-CMA-code.patch
new file mode 100644
index 00000000..aef93aa8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5635-drm-amdkfd-CMA-Refactor-CMA-code.patch
@@ -0,0 +1,430 @@
+From d4bee1f1396310283c73937b00a0b5e8997ffd59 Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Thu, 22 Mar 2018 17:25:54 -0400
+Subject: [PATCH 5635/5725] drm/amdkfd: CMA: Refactor CMA code
+
+This is similar to process_vm_rw() functions. This refactoring is also
+helpful for the special handling of userptr BOs (upcoming commits).
+
+This commit does not change any functionality.
+
+v2: Fix potential fence leak
+
+Change-Id: Ic8f9c6a7599d2beac54d768831618df0207f10e9
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 337 +++++++++++++++++--------------
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 17 ++
+ 2 files changed, 206 insertions(+), 148 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 98b000b..a7f0bdc 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1672,22 +1672,165 @@ static int kfd_ioctl_ipc_import_handle(struct file *filep,
+ return r;
+ }
+
++
++/* Update cma_iter.cur_bo with KFD BO that is assocaited with
++ * cma_iter.array.va_addr
++ */
++static int kfd_cma_iter_update_bo(struct cma_iter *ci)
++{
++ struct kfd_memory_range *arr = ci->array;
++ uint64_t va_end = arr->va_addr + arr->size - 1;
++
++ mutex_lock(&ci->p->mutex);
++ ci->cur_bo = kfd_process_find_bo_from_interval(ci->p, arr->va_addr,
++ va_end);
++ mutex_unlock(&ci->p->mutex);
++
++ if (!ci->cur_bo || va_end > ci->cur_bo->it.last) {
++ pr_err("CMA failed. Range out of bounds\n");
++ return -EFAULT;
++ }
++ return 0;
++}
++
++/* Advance iter by @size bytes. */
++static int kfd_cma_iter_advance(struct cma_iter *ci, unsigned long size)
++{
++ int ret = 0;
++
++ ci->offset += size;
++ if (WARN_ON(size > ci->total || ci->offset > ci->array->size))
++ return -EFAULT;
++ ci->total -= size;
++ /* If current range is copied, move to next range if available. */
++ if (ci->offset == ci->array->size) {
++
++ /* End of all ranges */
++ if (!(--ci->nr_segs))
++ return 0;
++
++ ci->array++;
++ ci->offset = 0;
++ ret = kfd_cma_iter_update_bo(ci);
++ if (ret)
++ return ret;
++ }
++ ci->bo_offset = (ci->array->va_addr + ci->offset) -
++ ci->cur_bo->it.start;
++ return ret;
++}
++
++static int kfd_cma_iter_init(struct kfd_memory_range *arr, unsigned long segs,
++ struct kfd_process *p, struct cma_iter *ci)
++{
++ int ret;
++ int nr;
++
++ if (!arr || !segs)
++ return -EINVAL;
++
++ memset(ci, 0, sizeof(*ci));
++ ci->array = arr;
++ ci->nr_segs = segs;
++ ci->p = p;
++ ci->offset = 0;
++ for (nr = 0; nr < segs; nr++)
++ ci->total += arr[nr].size;
++
++ /* Valid but size is 0. So copied will also be 0 */
++ if (!ci->total)
++ return 0;
++
++ ret = kfd_cma_iter_update_bo(ci);
++ if (!ret)
++ ci->bo_offset = arr->va_addr - ci->cur_bo->it.start;
++ return ret;
++}
++
++static bool kfd_cma_iter_end(struct cma_iter *ci)
++{
++ if (!(ci->nr_segs) || !(ci->total))
++ return true;
++ return false;
++}
++
++/* Copy single range from source iterator @si to destination iterator @di.
++ * @si will move to next range and @di will move by bytes copied.
++ * @return : 0 for success or -ve for failure
++ * @f: The last fence if any
++ * @copied: out: number of bytes copied
++ */
++static int kfd_copy_single_range(struct cma_iter *si, struct cma_iter *di,
++ bool cma_write, struct dma_fence **f,
++ uint64_t *copied)
++{
++ int err = 0;
++ uint64_t copy_size, n;
++ uint64_t size = si->array->size;
++ struct kfd_bo *src_bo = si->cur_bo;
++ struct dma_fence *lfence = NULL;
++
++ if (!src_bo || !di || !copied)
++ return -EINVAL;
++ *copied = 0;
++ if (f)
++ *f = NULL;
++
++ while (size && !kfd_cma_iter_end(di)) {
++ struct dma_fence *fence = NULL;
++ struct kfd_bo *dst_bo = di->cur_bo;
++
++ copy_size = min(size, (di->array->size - di->offset));
++
++ /* Check both BOs belong to same device */
++ if (src_bo->dev->kgd != dst_bo->dev->kgd) {
++ pr_err("CMA fail. Not same dev\n");
++ return -EINVAL;
++ }
++
++ err = dst_bo->dev->kfd2kgd->copy_mem_to_mem(src_bo->dev->kgd,
++ src_bo->mem, si->bo_offset, dst_bo->mem, di->bo_offset,
++ copy_size, &fence, &n);
++ if (err) {
++ pr_err("GPU CMA %d failed\n", err);
++ break;
++ }
++
++ if (fence) {
++ dma_fence_put(lfence);
++ lfence = fence;
++ }
++ size -= n;
++ *copied += n;
++ err = kfd_cma_iter_advance(si, n);
++ if (err)
++ break;
++ err = kfd_cma_iter_advance(di, n);
++ if (err)
++ break;
++ }
++
++ if (f)
++ *f = dma_fence_get(lfence);
++ dma_fence_put(lfence);
++
++ return err;
++}
++
+ static int kfd_ioctl_cross_memory_copy(struct file *filep,
+ struct kfd_process *local_p, void *data)
+ {
+ struct kfd_ioctl_cross_memory_copy_args *args = data;
+ struct kfd_memory_range *src_array, *dst_array;
+- struct kfd_bo *src_bo, *dst_bo;
+- struct kfd_process *remote_p, *src_p, *dst_p;
++ struct kfd_process *remote_p;
+ struct task_struct *remote_task;
+ struct mm_struct *remote_mm;
+ struct pid *remote_pid;
+- struct dma_fence *fence = NULL, *lfence = NULL;
+- uint64_t dst_va_addr;
+- uint64_t copied, total_copied = 0;
+- uint64_t src_offset, dst_offset, dst_va_addr_end;
++ struct dma_fence *lfence = NULL;
++ uint64_t copied = 0, total_copied = 0;
++ struct cma_iter di, si;
+ const char *cma_op;
+- int i, j = 0, err = 0;
++ int err = 0;
+
+ /* Check parameters */
+ if (args->src_mem_range_array == 0 || args->dst_mem_range_array == 0 ||
+@@ -1754,159 +1897,57 @@ static int kfd_ioctl_cross_memory_copy(struct file *filep,
+ goto kfd_process_fail;
+ }
+
++ /* Initialise cma_iter si & @di with source & destination range. */
+ if (KFD_IS_CROSS_MEMORY_WRITE(args->flags)) {
+- src_p = local_p;
+- dst_p = remote_p;
+ cma_op = "WRITE";
+ pr_debug("CMA WRITE: local -> remote\n");
++ err = kfd_cma_iter_init(dst_array, args->dst_mem_array_size,
++ remote_p, &di);
++ if (err)
++ goto kfd_process_fail;
++ err = kfd_cma_iter_init(src_array, args->src_mem_array_size,
++ local_p, &si);
++ if (err)
++ goto kfd_process_fail;
+ } else {
+- src_p = remote_p;
+- dst_p = local_p;
+ cma_op = "READ";
+ pr_debug("CMA READ: remote -> local\n");
+- }
+-
+-
+- /* For each source kfd_range:
+- * - Find the BO. Each range has to be within the same BO.
+- * - Copy this range to single or multiple destination BOs.
+- * - dst_va_addr - will point to next va address into which data will
+- * be copied.
+- * - dst_bo & src_bo - the current destination and source BOs
+- * - src_offset & dst_offset - offset into the respective BOs from
+- * data will be sourced or copied
++ err = kfd_cma_iter_init(dst_array, args->dst_mem_array_size,
++ local_p, &di);
++ if (err)
++ goto kfd_process_fail;
++ err = kfd_cma_iter_init(src_array, args->src_mem_array_size,
++ remote_p, &si);
++ if (err)
++ goto kfd_process_fail;
++ }
++
++ /* Copy one si range at a time into di. After each call to
++ * kfd_copy_single_range() si will move to next range. di will be
++ * incremented by bytes copied
+ */
+- dst_va_addr = dst_array[0].va_addr;
+- dst_va_addr_end = dst_va_addr + dst_array[0].size - 1;
+- mutex_lock(&dst_p->mutex);
+- dst_bo = kfd_process_find_bo_from_interval(dst_p,
+- dst_va_addr,
+- dst_va_addr_end);
+- mutex_unlock(&dst_p->mutex);
+- if (!dst_bo || dst_va_addr_end > dst_bo->it.last) {
+- pr_err("CMA %s failed. Invalid dst range\n", cma_op);
+- err = -EFAULT;
+- goto kfd_process_fail;
+- }
+- dst_offset = dst_va_addr - dst_bo->it.start;
+-
+- for (i = 0; i < args->src_mem_array_size; i++) {
+- uint64_t src_va_addr_end = src_array[i].va_addr +
+- src_array[i].size - 1;
+- uint64_t src_size_to_copy = src_array[i].size;
+-
+- mutex_lock(&src_p->mutex);
+- src_bo = kfd_process_find_bo_from_interval(src_p,
+- src_array[i].va_addr,
+- src_va_addr_end);
+- mutex_unlock(&src_p->mutex);
+- if (!src_bo || src_va_addr_end > src_bo->it.last) {
+- pr_err("CMA %s failed. Invalid src range\n", cma_op);
+- err = -EFAULT;
+- break;
+- }
+-
+- src_offset = src_array[i].va_addr - src_bo->it.start;
+-
+- /* Copy src_bo to one or multiple dst_bo(s) based on size and
+- * and current copy location.
+- */
+- while (j < args->dst_mem_array_size) {
+- uint64_t copy_size;
+- int64_t space_left;
+-
+- /* Find the current copy_size. This will be smaller of
+- * the following
+- * - space left in the current dest memory range
+- * - data left to copy from source range
+- */
+- space_left = (dst_array[j].va_addr + dst_array[j].size)
+- - dst_va_addr;
+- copy_size = (src_size_to_copy < space_left) ?
+- src_size_to_copy : space_left;
+-
+- /* Check both BOs belong to same device */
+- if (src_bo->dev->kgd != dst_bo->dev->kgd) {
+- pr_err("CMA %s fail. Not same dev\n", cma_op);
+- err = -EINVAL;
+- break;
+- }
+-
+- /* Store prev fence. Release it when a later fence is
+- * created
+- */
++ while (!kfd_cma_iter_end(&si) && !kfd_cma_iter_end(&di)) {
++ struct dma_fence *fence = NULL;
++ err = kfd_copy_single_range(&si, &di,
++ KFD_IS_CROSS_MEMORY_WRITE(args->flags),
++ &fence, &copied);
++ total_copied += copied;
++ if (err)
++ break;
++ /* Release old fence if a later fence is created. If no
++ * new fence is created, then keep the preivous fence
++ */
++ if (fence) {
++ dma_fence_put(lfence);
+ lfence = fence;
+- fence = NULL;
+-
+- err = dst_bo->dev->kfd2kgd->copy_mem_to_mem(
+- src_bo->dev->kgd,
+- src_bo->mem, src_offset,
+- dst_bo->mem, dst_offset,
+- copy_size,
+- &fence, &copied);
+-
+- if (err) {
+- pr_err("GPU CMA %s failed\n", cma_op);
+- break;
+- }
+-
+- /* Later fence available. Release old fence */
+- if (fence && lfence) {
+- dma_fence_put(lfence);
+- lfence = NULL;
+- }
+-
+- total_copied += copied;
+- src_size_to_copy -= copied;
+- space_left -= copied;
+- dst_va_addr += copied;
+- dst_offset += copied;
+- src_offset += copied;
+- if (dst_va_addr > dst_bo->it.last + 1) {
+- pr_err("CMA %s fail. Mem overflow\n", cma_op);
+- err = -EFAULT;
+- break;
+- }
+-
+- /* If the cur dest range is full move to next one */
+- if (space_left <= 0) {
+- if (++j >= args->dst_mem_array_size)
+- break;
+-
+- dst_va_addr = dst_array[j].va_addr;
+- dst_va_addr_end = dst_va_addr +
+- dst_array[j].size - 1;
+- dst_bo = kfd_process_find_bo_from_interval(
+- dst_p,
+- dst_va_addr,
+- dst_va_addr_end);
+- if (!dst_bo ||
+- dst_va_addr_end > dst_bo->it.last) {
+- pr_err("CMA %s failed. Invalid dst range\n",
+- cma_op);
+- err = -EFAULT;
+- break;
+- }
+- dst_offset = dst_va_addr - dst_bo->it.start;
+- }
+-
+- /* If the cur src range is done, move to next one */
+- if (src_size_to_copy <= 0)
+- break;
+ }
+- if (err)
+- break;
+ }
+
+ /* Wait for the last fence irrespective of error condition */
+- if (fence) {
+- if (dma_fence_wait_timeout(fence, false, msecs_to_jiffies(1000))
+- < 0)
+- pr_err("CMA %s failed. BO timed out\n", cma_op);
+- dma_fence_put(fence);
+- } else if (lfence) {
+- pr_debug("GPU copy fail. But wait for prev DMA to finish\n");
+- dma_fence_wait_timeout(lfence, true, msecs_to_jiffies(1000));
++ if (lfence) {
++ if (dma_fence_wait_timeout(lfence, false,
++ msecs_to_jiffies(1000)) < 0)
++ pr_err("CMA %s failed. BO timed out\n", cma_op);
+ dma_fence_put(lfence);
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 97f729c..a74cfbc 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -303,6 +303,23 @@ struct kfd_bo {
+ struct kfd_ipc_obj *kfd_ipc_obj;
+ };
+
++/* Similar to iov_iter */
++struct cma_iter {
++ /* points to current entry of range array */
++ struct kfd_memory_range *array;
++ /* total number of entries in the initial array */
++ unsigned long nr_segs;
++ /* total amount of data pointed by kfd array*/
++ unsigned long total;
++ /* offset into the entry pointed by cma_iter.array */
++ unsigned long offset;
++ struct kfd_process *p;
++ /* current kfd_bo associated with cma_iter.array.va_addr */
++ struct kfd_bo *cur_bo;
++ /* offset w.r.t cur_bo */
++ unsigned long bo_offset;
++};
++
+ /* KGD2KFD callbacks */
+ void kgd2kfd_exit(void);
+ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5636-drm-amdkfd-CMA-Store-cpuva-in-KFD-BO.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5636-drm-amdkfd-CMA-Store-cpuva-in-KFD-BO.patch
new file mode 100644
index 00000000..dcc91fb2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5636-drm-amdkfd-CMA-Store-cpuva-in-KFD-BO.patch
@@ -0,0 +1,120 @@
+From ee57215dd055c532f9bc28056ba79af005d11b12 Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Thu, 22 Mar 2018 17:43:59 -0400
+Subject: [PATCH 5636/5725] drm/amdkfd: CMA: Store cpuva in KFD BO
+
+For userptr BOs store cpu VA in KFD BO. This is needed for supporting
+CMA operations on userptr
+
+Change-Id: I95e96f487fbc64957ceaf3f2875bd773d2bf9970
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 10 +++++++++-
+ drivers/gpu/drm/amd/amdkfd/kfd_ipc.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 4 +++-
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 5 +++--
+ 4 files changed, 16 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index a7f0bdc..91223e2 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1243,6 +1243,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ uint64_t offset = args->mmap_offset;
+ uint32_t flags = args->flags;
+ struct vm_area_struct *vma;
++ uint64_t cpuva = 0;
+
+ if (args->size == 0)
+ return -EINVAL;
+@@ -1272,6 +1273,13 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ flags |= KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL;
+ flags &= ~KFD_IOC_ALLOC_MEM_FLAGS_USERPTR;
+ offset = (pfn << PAGE_SHIFT);
++ } else {
++ if (offset & (PAGE_SIZE - 1)) {
++ pr_debug("Unaligned userptr address:%llx\n",
++ offset);
++ return -EINVAL;
++ }
++ cpuva = offset;
+ }
+ } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
+ if (args->size != kfd_doorbell_process_slice(dev))
+@@ -1296,7 +1304,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ goto err_unlock;
+
+ idr_handle = kfd_process_device_create_obj_handle(pdd, mem,
+- args->va_addr, args->size, NULL);
++ args->va_addr, args->size, cpuva, NULL);
+ if (idr_handle < 0) {
+ err = -EFAULT;
+ goto err_free;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_ipc.c b/drivers/gpu/drm/amd/amdkfd/kfd_ipc.c
+index 97806ed..845dbf7 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_ipc.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_ipc.c
+@@ -140,7 +140,7 @@ static int kfd_import_dmabuf_create_kfd_bo(struct kfd_dev *dev,
+ goto err_unlock;
+
+ idr_handle = kfd_process_device_create_obj_handle(pdd, mem,
+- va_addr, size,
++ va_addr, size, 0,
+ ipc_obj);
+ if (idr_handle < 0) {
+ r = -EFAULT;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index a74cfbc..8adfe21 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -301,6 +301,8 @@ struct kfd_bo {
+ struct kfd_dev *dev;
+ struct list_head cb_data_head;
+ struct kfd_ipc_obj *kfd_ipc_obj;
++ /* page-aligned VA address */
++ uint64_t cpuva;
+ };
+
+ /* Similar to iov_iter */
+@@ -801,7 +803,7 @@ int kfd_reserved_mem_mmap(struct kfd_process *process,
+ /* KFD process API for creating and translating handles */
+ int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
+ void *mem, uint64_t start,
+- uint64_t length,
++ uint64_t length, uint64_t cpuva,
+ struct kfd_ipc_obj *ipc_obj);
+ void *kfd_process_device_translate_handle(struct kfd_process_device *p,
+ int handle);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 2b4c5bd..bba2d78 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -141,7 +141,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
+ * created and the ioctls have not had the chance to run.
+ */
+ handle = kfd_process_device_create_obj_handle(
+- pdd, mem, gpu_va, size, NULL);
++ pdd, mem, gpu_va, size, 0, NULL);
+
+ if (handle < 0) {
+ err = handle;
+@@ -808,7 +808,7 @@ bool kfd_has_process_device_data(struct kfd_process *p)
+ */
+ int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
+ void *mem, uint64_t start,
+- uint64_t length,
++ uint64_t length, uint64_t cpuva,
+ struct kfd_ipc_obj *ipc_obj)
+ {
+ int handle;
+@@ -829,6 +829,7 @@ int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
+ buf_obj->mem = mem;
+ buf_obj->dev = pdd->dev;
+ buf_obj->kfd_ipc_obj = ipc_obj;
++ buf_obj->cpuva = cpuva;
+
+ INIT_LIST_HEAD(&buf_obj->cb_data_head);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5637-drm-amdkfd-CMA-Handle-userptr-to-userptr-BO-copy.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5637-drm-amdkfd-CMA-Handle-userptr-to-userptr-BO-copy.patch
new file mode 100644
index 00000000..71bba1fa
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5637-drm-amdkfd-CMA-Handle-userptr-to-userptr-BO-copy.patch
@@ -0,0 +1,399 @@
+From de7edd2adbdcbd3a34f3d1df96884b4a59904b29 Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Mon, 26 Mar 2018 16:45:06 -0400
+Subject: [PATCH 5637/5725] drm/amdkfd: CMA: Handle userptr to userptr BO copy
+
+CMA userptr implementations are incomplete because it doesn't properly
+handle if the BO is evicted. This patch handles the case where both
+source and destination BOs are userptr. It is more efficient to use CPU
+to do the copy in this case, very similar to process_vm_read/write()
+functions.
+
+Change-Id: I5d01d906f04190d71e8663785718060411dede4e
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 273 ++++++++++++++++++++++++-------
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 +
+ mm/gup.c | 11 ++
+ 3 files changed, 230 insertions(+), 56 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 91223e2..8941312 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -35,6 +35,7 @@
+ #include <linux/mman.h>
+ #include <asm/processor.h>
+ #include <linux/ptrace.h>
++#include <linux/pagemap.h>
+
+ #include "kfd_priv.h"
+ #include "kfd_device_queue_manager.h"
+@@ -1681,6 +1682,12 @@ static int kfd_ioctl_ipc_import_handle(struct file *filep,
+ }
+
+
++/* Maximum number of entries for process pages array which lives on stack */
++#define MAX_PP_STACK_COUNT 16
++/* Maximum number of pages kmalloc'd to hold struct page's during copy */
++#define MAX_KMALLOC_PAGES (PAGE_SIZE * 2)
++#define MAX_PP_KMALLOC_COUNT (MAX_KMALLOC_PAGES/sizeof(struct page *))
++
+ /* Update cma_iter.cur_bo with KFD BO that is assocaited with
+ * cma_iter.array.va_addr
+ */
+@@ -1729,7 +1736,8 @@ static int kfd_cma_iter_advance(struct cma_iter *ci, unsigned long size)
+ }
+
+ static int kfd_cma_iter_init(struct kfd_memory_range *arr, unsigned long segs,
+- struct kfd_process *p, struct cma_iter *ci)
++ struct kfd_process *p, struct mm_struct *mm,
++ struct task_struct *task, struct cma_iter *ci)
+ {
+ int ret;
+ int nr;
+@@ -1742,6 +1750,8 @@ static int kfd_cma_iter_init(struct kfd_memory_range *arr, unsigned long segs,
+ ci->nr_segs = segs;
+ ci->p = p;
+ ci->offset = 0;
++ ci->mm = mm;
++ ci->task = task;
+ for (nr = 0; nr < segs; nr++)
+ ci->total += arr[nr].size;
+
+@@ -1762,6 +1772,159 @@ static bool kfd_cma_iter_end(struct cma_iter *ci)
+ return false;
+ }
+
++/* Copies @size bytes from si->cur_bo to di->cur_bo BO. The function assumes
++ * both source and dest. BOs are userptr BOs. Both BOs can either belong to
++ * current process or one of the BOs can belong to a differnt
++ * process. @Returns 0 on success, -ve on failure
++ *
++ * @si: Source iter
++ * @di: Dest. iter
++ * @cma_write: Indicates if it is write to remote or read from remote
++ * @size: amount of bytes to be copied
++ * @copied: Return number of bytes actually copied.
++ */
++static int kfd_copy_userptr_bos(struct cma_iter *si, struct cma_iter *di,
++ bool cma_write, uint64_t size,
++ uint64_t *copied)
++{
++ int i, ret = 0, locked;
++ unsigned int nents, nl;
++ unsigned int offset_in_page;
++ struct page *pp_stack[MAX_PP_STACK_COUNT];
++ struct page **process_pages = pp_stack;
++ unsigned long rva, lva = 0, flags = 0;
++ uint64_t copy_size, to_copy = size;
++ struct cma_iter *li, *ri;
++
++ if (cma_write) {
++ ri = di;
++ li = si;
++ flags |= FOLL_WRITE;
++ } else {
++ li = di;
++ ri = si;
++ }
++ /* rva: remote virtual address. Page aligned to start page.
++ * rva + offset_in_page: Points to remote start address
++ * lva: local virtual address. Points to the start address.
++ * nents: computes number of remote pages to request
++ */
++ offset_in_page = ri->bo_offset & (PAGE_SIZE - 1);
++ rva = (ri->cur_bo->cpuva + ri->bo_offset) & PAGE_MASK;
++ lva = li->cur_bo->cpuva + li->bo_offset;
++
++ nents = (size + offset_in_page + PAGE_SIZE - 1) / PAGE_SIZE;
++
++ copy_size = min_t(uint64_t, size, PAGE_SIZE - offset_in_page);
++ *copied = 0;
++
++ if (nents > MAX_PP_STACK_COUNT) {
++ /* For reliability kmalloc only 2 pages worth */
++ process_pages = kmalloc(min_t(size_t, MAX_KMALLOC_PAGES,
++ sizeof(struct pages *)*nents),
++ GFP_KERNEL);
++
++ if (!process_pages)
++ return -ENOMEM;
++ }
++
++ while (nents && to_copy) {
++ nl = min_t(unsigned int, MAX_PP_KMALLOC_COUNT, nents);
++ locked = 1;
++ down_read(&ri->mm->mmap_sem);
++ nl = get_user_pages_remote(ri->task, ri->mm, rva, nl,
++ flags, process_pages, NULL,
++ &locked);
++ if (locked)
++ up_read(&ri->mm->mmap_sem);
++ if (nl <= 0) {
++ pr_err("CMA: Invalid virtual address 0x%lx\n", rva);
++ ret = -EFAULT;
++ break;
++ }
++
++ for (i = 0; i < nl; i++) {
++ unsigned int n;
++ void *kaddr = kmap_atomic(process_pages[i]);
++
++ if (cma_write) {
++ n = copy_from_user(kaddr+offset_in_page,
++ (void *)lva, copy_size);
++ set_page_dirty(process_pages[i]);
++ } else {
++ n = copy_to_user((void *)lva,
++ kaddr+offset_in_page,
++ copy_size);
++ }
++ kunmap_atomic(kaddr);
++ if (n) {
++ ret = -EFAULT;
++ break;
++ }
++ to_copy -= copy_size;
++ if (!to_copy)
++ break;
++ lva += copy_size;
++ rva += (copy_size + offset_in_page);
++ WARN_ONCE(rva & (PAGE_SIZE - 1),
++ "CMA: Error in remote VA computation");
++ offset_in_page = 0;
++ copy_size = min_t(uint64_t, to_copy, PAGE_SIZE);
++ }
++
++ for (i = 0; i < nl; i++)
++ put_page(process_pages[i]);
++
++ if (ret)
++ break;
++ nents -= nl;
++ }
++
++ if (process_pages != pp_stack)
++ kfree(process_pages);
++
++ *copied = (size - to_copy);
++ return ret;
++
++}
++
++/* Copies @size bytes from si->cur_bo to di->cur_bo starting at their
++ * respective offset.
++ * @si: Source iter
++ * @di: Dest. iter
++ * @cma_write: Indicates if it is write to remote or read from remote
++ * @size: amount of bytes to be copied
++ * @f: Return the last fence if any
++ * @copied: Return number of bytes actually copied.
++ */
++static int kfd_copy_bos(struct cma_iter *si, struct cma_iter *di,
++ int cma_write, uint64_t size,
++ struct dma_fence **f, uint64_t *copied)
++{
++ int err = 0;
++ struct kfd_bo *dst_bo = di->cur_bo, *src_bo = si->cur_bo;
++ uint64_t src_offset = si->bo_offset, dst_offset = di->bo_offset;
++ struct kgd_mem *src_mem = src_bo->mem, *dst_mem = dst_bo->mem;
++
++ *copied = 0;
++ if (f)
++ *f = NULL;
++ if (src_bo->cpuva && dst_bo->cpuva)
++ return kfd_copy_userptr_bos(si, di, cma_write, size, copied);
++
++ if (src_bo->dev->kgd != dst_bo->dev->kgd) {
++ pr_err("CMA %d fail. Not same dev\n", cma_write);
++ err = -EINVAL;
++ }
++
++ err = dst_bo->dev->kfd2kgd->copy_mem_to_mem(src_bo->dev->kgd, src_mem,
++ src_offset, dst_mem,
++ dst_offset, size, f,
++ copied);
++
++ return err;
++}
++
+ /* Copy single range from source iterator @si to destination iterator @di.
+ * @si will move to next range and @di will move by bytes copied.
+ * @return : 0 for success or -ve for failure
+@@ -1772,57 +1935,55 @@ static int kfd_copy_single_range(struct cma_iter *si, struct cma_iter *di,
+ bool cma_write, struct dma_fence **f,
+ uint64_t *copied)
+ {
+- int err = 0;
+- uint64_t copy_size, n;
+- uint64_t size = si->array->size;
+- struct kfd_bo *src_bo = si->cur_bo;
+- struct dma_fence *lfence = NULL;
+-
+- if (!src_bo || !di || !copied)
+- return -EINVAL;
+- *copied = 0;
+- if (f)
+- *f = NULL;
+-
+- while (size && !kfd_cma_iter_end(di)) {
+- struct dma_fence *fence = NULL;
+- struct kfd_bo *dst_bo = di->cur_bo;
+-
+- copy_size = min(size, (di->array->size - di->offset));
+-
+- /* Check both BOs belong to same device */
+- if (src_bo->dev->kgd != dst_bo->dev->kgd) {
+- pr_err("CMA fail. Not same dev\n");
+- return -EINVAL;
+- }
+-
+- err = dst_bo->dev->kfd2kgd->copy_mem_to_mem(src_bo->dev->kgd,
+- src_bo->mem, si->bo_offset, dst_bo->mem, di->bo_offset,
+- copy_size, &fence, &n);
+- if (err) {
+- pr_err("GPU CMA %d failed\n", err);
+- break;
+- }
+-
+- if (fence) {
+- dma_fence_put(lfence);
+- lfence = fence;
+- }
+- size -= n;
+- *copied += n;
+- err = kfd_cma_iter_advance(si, n);
+- if (err)
+- break;
+- err = kfd_cma_iter_advance(di, n);
+- if (err)
+- break;
+- }
+-
+- if (f)
+- *f = dma_fence_get(lfence);
+- dma_fence_put(lfence);
+-
+- return err;
++ int err = 0;
++ uint64_t copy_size, n;
++ uint64_t size = si->array->size;
++ struct kfd_bo *src_bo = si->cur_bo;
++ struct dma_fence *lfence = NULL;
++
++ if (!src_bo || !di || !copied)
++ return -EINVAL;
++ *copied = 0;
++ if (f)
++ *f = NULL;
++
++ while (size && !kfd_cma_iter_end(di)) {
++ struct dma_fence *fence = NULL;
++ struct kfd_bo *dst_bo = di->cur_bo;
++
++ copy_size = min(size, (di->array->size - di->offset));
++
++ /* Check both BOs belong to same device */
++ if (src_bo->dev->kgd != dst_bo->dev->kgd) {
++ pr_err("CMA fail. Not same dev\n");
++ return -EINVAL;
++ }
++
++ err = kfd_copy_bos(si, di, cma_write, copy_size, &fence, &n);
++ if (err) {
++ pr_err("CMA %d failed\n", err);
++ break;
++ }
++
++ if (fence) {
++ dma_fence_put(lfence);
++ lfence = fence;
++ }
++ size -= n;
++ *copied += n;
++ err = kfd_cma_iter_advance(si, n);
++ if (err)
++ break;
++ err = kfd_cma_iter_advance(di, n);
++ if (err)
++ break;
++ }
++
++ if (f)
++ *f = dma_fence_get(lfence);
++ dma_fence_put(lfence);
++
++ return err;
+ }
+
+ static int kfd_ioctl_cross_memory_copy(struct file *filep,
+@@ -1910,22 +2071,22 @@ static int kfd_ioctl_cross_memory_copy(struct file *filep,
+ cma_op = "WRITE";
+ pr_debug("CMA WRITE: local -> remote\n");
+ err = kfd_cma_iter_init(dst_array, args->dst_mem_array_size,
+- remote_p, &di);
++ remote_p, remote_mm, remote_task, &di);
+ if (err)
+ goto kfd_process_fail;
+ err = kfd_cma_iter_init(src_array, args->src_mem_array_size,
+- local_p, &si);
++ local_p, current->mm, current, &si);
+ if (err)
+ goto kfd_process_fail;
+ } else {
+ cma_op = "READ";
+ pr_debug("CMA READ: remote -> local\n");
+ err = kfd_cma_iter_init(dst_array, args->dst_mem_array_size,
+- local_p, &di);
++ local_p, current->mm, current, &di);
+ if (err)
+ goto kfd_process_fail;
+ err = kfd_cma_iter_init(src_array, args->src_mem_array_size,
+- remote_p, &si);
++ remote_p, remote_mm, remote_task, &si);
+ if (err)
+ goto kfd_process_fail;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 8adfe21..93462fa 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -316,6 +316,8 @@ struct cma_iter {
+ /* offset into the entry pointed by cma_iter.array */
+ unsigned long offset;
+ struct kfd_process *p;
++ struct mm_struct *mm;
++ struct task_struct *task;
+ /* current kfd_bo associated with cma_iter.array.va_addr */
+ struct kfd_bo *cur_bo;
+ /* offset w.r.t cur_bo */
+diff --git a/mm/gup.c b/mm/gup.c
+index 4cc8a6f..35c9f0b 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -1081,6 +1081,17 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
+ }
+ EXPORT_SYMBOL(get_user_pages_remote);
+
++long get_user_pages_remote_locked(struct task_struct *tsk, struct mm_struct *mm,
++ unsigned long start, unsigned long nr_pages,
++ unsigned int gup_flags, struct page **pages,
++ struct vm_area_struct **vmas, int *locked)
++{
++ return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
++ locked, false,
++ gup_flags | FOLL_TOUCH | FOLL_REMOTE);
++}
++EXPORT_SYMBOL(get_user_pages_remote_locked);
++
+ /*
+ * This is the same as get_user_pages_remote(), just with a
+ * less-flexible calling convention where we assume that the task
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5638-drm-amdkfd-CMA-Use-shadow-system-BO-for-userptr.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5638-drm-amdkfd-CMA-Use-shadow-system-BO-for-userptr.patch
new file mode 100644
index 00000000..27461e6c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5638-drm-amdkfd-CMA-Use-shadow-system-BO-for-userptr.patch
@@ -0,0 +1,359 @@
+From 9559d8e4c93ecb9e2ed7f98cd1116947fc243aa4 Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Tue, 27 Mar 2018 14:36:18 -0400
+Subject: [PATCH 5638/5725] drm/amdkfd: CMA: Use shadow system BO for userptr
+
+userptrs BO could be evicted during CMA operations. If one of the BO
+involved is a userptr, then a shadow BO is created using its underlying
+pages. A sg table is created by pinning the backing system pages and
+system BO is created using this sg table. This temporary BO is used for
+the copy operation.
+
+v2: get_user_pages() could return less than requrested pages. Handle
+this condition
+
+Change-Id: Ied26bb481bfa8bb5b488f46f94451477b45746e0
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 264 ++++++++++++++++++++++++++++---
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 13 ++
+ 2 files changed, 251 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 8941312..4953374 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1688,6 +1688,187 @@ static int kfd_ioctl_ipc_import_handle(struct file *filep,
+ #define MAX_KMALLOC_PAGES (PAGE_SIZE * 2)
+ #define MAX_PP_KMALLOC_COUNT (MAX_KMALLOC_PAGES/sizeof(struct page *))
+
++static void kfd_put_sg_table(struct sg_table *sg)
++{
++ unsigned int i;
++ struct scatterlist *s;
++
++ for_each_sg(sg->sgl, s, sg->nents, i)
++ put_page(sg_page(s));
++}
++
++
++/* Create a sg table for the given userptr BO by pinning its system pages
++ * @bo: userptr BO
++ * @offset: Offset into BO
++ * @mm/@task: mm_struct & task_struct of the process that holds the BO
++ * @size: in/out: desired size / actual size which could be smaller
++ * @sg_size: out: Size of sg table. This is ALIGN_UP(@size)
++ * @ret_sg: out sg table
++ */
++static int kfd_create_sg_table_from_userptr_bo(struct kfd_bo *bo,
++ int64_t offset, int cma_write,
++ struct mm_struct *mm,
++ struct task_struct *task,
++ uint64_t *size,
++ uint64_t *sg_size,
++ struct sg_table **ret_sg)
++{
++ int ret, locked = 1;
++ struct sg_table *sg = NULL;
++ unsigned int i, offset_in_page, flags = 0;
++ unsigned long nents, n;
++ unsigned long pa = (bo->cpuva + offset) & PAGE_MASK;
++ unsigned int cur_page = 0;
++ struct scatterlist *s;
++ uint64_t sz = *size;
++ struct page **process_pages;
++
++ *sg_size = 0;
++ sg = kmalloc(sizeof(*sg), GFP_KERNEL);
++ if (!sg)
++ return -ENOMEM;
++
++ offset_in_page = offset & (PAGE_SIZE - 1);
++ nents = (sz + offset_in_page + PAGE_SIZE - 1) / PAGE_SIZE;
++
++ ret = sg_alloc_table(sg, nents, GFP_KERNEL);
++ if (unlikely(ret)) {
++ ret = -ENOMEM;
++ goto sg_alloc_fail;
++ }
++ process_pages = kmalloc_array(nents, sizeof(struct pages *),
++ GFP_KERNEL);
++ if (!process_pages) {
++ ret = -ENOMEM;
++ goto page_alloc_fail;
++ }
++
++ if (cma_write)
++ flags = FOLL_WRITE;
++ locked = 1;
++ down_read(&mm->mmap_sem);
++ n = get_user_pages_remote(task, mm, pa, nents, flags, process_pages,
++ NULL, &locked);
++ if (locked)
++ up_read(&mm->mmap_sem);
++ if (n <= 0) {
++ pr_err("CMA: Invalid virtual address 0x%lx\n", pa);
++ ret = -EFAULT;
++ goto get_user_fail;
++ }
++ if (n != nents) {
++ /* Pages pinned < requested. Set the size accordingly */
++ *size = (n * PAGE_SIZE) - offset_in_page;
++ pr_debug("Requested %lx but pinned %lx\n", nents, n);
++ }
++
++ sz = 0;
++ for_each_sg(sg->sgl, s, n, i) {
++ sg_set_page(s, process_pages[cur_page], PAGE_SIZE,
++ offset_in_page);
++ sg_dma_address(s) = page_to_phys(process_pages[cur_page]);
++ offset_in_page = 0;
++ cur_page++;
++ sz += PAGE_SIZE;
++ }
++ *ret_sg = sg;
++ *sg_size = sz;
++
++ kfree(process_pages);
++ return 0;
++
++get_user_fail:
++ kfree(process_pages);
++page_alloc_fail:
++ sg_free_table(sg);
++sg_alloc_fail:
++ kfree(sg);
++ return ret;
++}
++
++static void kfd_free_cma_bos(struct cma_iter *ci)
++{
++ struct cma_system_bo *cma_bo, *tmp;
++
++ list_for_each_entry_safe(cma_bo, tmp, &ci->cma_list, list) {
++ struct kfd_dev *dev = cma_bo->dev;
++
++ /* sg table is deleted by free_memory_of_gpu */
++ kfd_put_sg_table(cma_bo->sg);
++ dev->kfd2kgd->free_memory_of_gpu(dev->kgd, cma_bo->mem);
++ list_del(&cma_bo->list);
++ kfree(cma_bo);
++ }
++}
++
++/* Create a system BO by pinning underlying system pages of the given userptr
++ * BO @ubo
++ * @ubo: Userptr BO
++ * @offset: Offset into ubo
++ * @size: in/out: The size of the new BO could be less than requested if all
++ * the pages couldn't be pinned. This would be reflected in @size
++ * @mm/@task: mm/task to which @ubo belongs to
++ * @cma_bo: out: new system BO
++ */
++static int kfd_create_cma_system_bo(struct kfd_dev *kdev, struct kfd_bo *ubo,
++ uint64_t *size, uint64_t offset,
++ int cma_write, struct kfd_process *p,
++ struct mm_struct *mm,
++ struct task_struct *task,
++ struct cma_system_bo **cma_bo)
++{
++ int ret;
++ struct kfd_process_device *pdd = NULL;
++ struct cma_system_bo *cbo;
++ uint64_t sg_size;
++
++ uint32_t flags = ALLOC_MEM_FLAGS_GTT | ALLOC_MEM_FLAGS_WRITABLE |
++ ALLOC_MEM_FLAGS_NO_SUBSTITUTE;
++
++ *cma_bo = NULL;
++ cbo = kzalloc(sizeof(**cma_bo), GFP_KERNEL);
++ if (!cbo)
++ return -ENOMEM;
++
++ INIT_LIST_HEAD(&cbo->list);
++ ret = kfd_create_sg_table_from_userptr_bo(ubo, offset, cma_write, mm,
++ task, size, &sg_size,
++ &cbo->sg);
++ if (ret) {
++ pr_err("Failed to create system BO. sg table error %d\n", ret);
++ return ret;
++ }
++
++ mutex_lock(&p->mutex);
++ pdd = kfd_get_process_device_data(kdev, p);
++ if (!pdd) {
++ pr_err("Process device data doesn't exist\n");
++ ret = -EINVAL;
++ goto pdd_fail;
++ }
++
++ ret = kdev->kfd2kgd->alloc_memory_of_gpu(kdev->kgd, 0ULL, sg_size,
++ pdd->vm, cbo->sg,
++ &cbo->mem, NULL, flags);
++ if (ret) {
++ pr_err("Failed to create shadow system BO %d\n", ret);
++ goto pdd_fail;
++ }
++ mutex_unlock(&p->mutex);
++ cbo->dev = kdev;
++ *cma_bo = cbo;
++
++ return ret;
++
++pdd_fail:
++ mutex_unlock(&p->mutex);
++ kfd_put_sg_table(cbo->sg);
++ sg_free_table(cbo->sg);
++ kfree(cbo->sg);
++ return ret;
++}
++
+ /* Update cma_iter.cur_bo with KFD BO that is assocaited with
+ * cma_iter.array.va_addr
+ */
+@@ -1739,30 +1920,31 @@ static int kfd_cma_iter_init(struct kfd_memory_range *arr, unsigned long segs,
+ struct kfd_process *p, struct mm_struct *mm,
+ struct task_struct *task, struct cma_iter *ci)
+ {
+- int ret;
+- int nr;
+-
+- if (!arr || !segs)
+- return -EINVAL;
+-
+- memset(ci, 0, sizeof(*ci));
+- ci->array = arr;
+- ci->nr_segs = segs;
+- ci->p = p;
+- ci->offset = 0;
+- ci->mm = mm;
+- ci->task = task;
+- for (nr = 0; nr < segs; nr++)
+- ci->total += arr[nr].size;
+-
+- /* Valid but size is 0. So copied will also be 0 */
+- if (!ci->total)
+- return 0;
+-
+- ret = kfd_cma_iter_update_bo(ci);
+- if (!ret)
+- ci->bo_offset = arr->va_addr - ci->cur_bo->it.start;
+- return ret;
++ int ret;
++ int nr;
++
++ if (!arr || !segs)
++ return -EINVAL;
++
++ memset(ci, 0, sizeof(*ci));
++ INIT_LIST_HEAD(&ci->cma_list);
++ ci->array = arr;
++ ci->nr_segs = segs;
++ ci->p = p;
++ ci->offset = 0;
++ ci->mm = mm;
++ ci->task = task;
++ for (nr = 0; nr < segs; nr++)
++ ci->total += arr[nr].size;
++
++ /* Valid but size is 0. So copied will also be 0 */
++ if (!ci->total)
++ return 0;
++
++ ret = kfd_cma_iter_update_bo(ci);
++ if (!ret)
++ ci->bo_offset = arr->va_addr - ci->cur_bo->it.start;
++ return ret;
+ }
+
+ static bool kfd_cma_iter_end(struct cma_iter *ci)
+@@ -1912,16 +2094,43 @@ static int kfd_copy_bos(struct cma_iter *si, struct cma_iter *di,
+ if (src_bo->cpuva && dst_bo->cpuva)
+ return kfd_copy_userptr_bos(si, di, cma_write, size, copied);
+
+- if (src_bo->dev->kgd != dst_bo->dev->kgd) {
++ /* If either source or dest. is userptr, create a shadow system BO
++ * by using the underlying userptr BO pages. Then use this shadow
++ * BO for copy. src_offset & dst_offset are adjusted because the new BO
++ * is only created for the window (offset, size) requested.
++ * The BOs are stored in cma_list for deferred cleanup. This minimizes
++ * fence waiting just to the last fence.
++ */
++ if (src_bo->cpuva) {
++ err = kfd_create_cma_system_bo(dst_bo->dev, src_bo, &size,
++ si->bo_offset, cma_write,
++ si->p, si->mm, si->task,
++ &si->cma_bo);
++ src_mem = si->cma_bo->mem;
++ src_offset = si->bo_offset & (PAGE_SIZE - 1);
++ list_add_tail(&si->cma_bo->list, &si->cma_list);
++ } else if (dst_bo->cpuva) {
++ err = kfd_create_cma_system_bo(src_bo->dev, dst_bo, &size,
++ di->bo_offset, cma_write,
++ di->p, di->mm, di->task,
++ &di->cma_bo);
++ dst_mem = di->cma_bo->mem;
++ dst_offset = di->bo_offset & (PAGE_SIZE - 1);
++ list_add_tail(&di->cma_bo->list, &di->cma_list);
++ } else if (src_bo->dev->kgd != dst_bo->dev->kgd) {
+ pr_err("CMA %d fail. Not same dev\n", cma_write);
+ err = -EINVAL;
+ }
+
++ if (err) {
++ pr_err("Failed to create system BO %d", err);
++ err = -EINVAL;
++ }
++
+ err = dst_bo->dev->kfd2kgd->copy_mem_to_mem(src_bo->dev->kgd, src_mem,
+ src_offset, dst_mem,
+ dst_offset, size, f,
+ copied);
+-
+ return err;
+ }
+
+@@ -2120,6 +2329,9 @@ static int kfd_ioctl_cross_memory_copy(struct file *filep,
+ dma_fence_put(lfence);
+ }
+
++ kfd_free_cma_bos(&si);
++ kfd_free_cma_bos(&di);
++
+ kfd_process_fail:
+ mmput(remote_mm);
+ mm_access_fail:
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 93462fa..d4b802e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -305,6 +305,13 @@ struct kfd_bo {
+ uint64_t cpuva;
+ };
+
++struct cma_system_bo {
++ struct kgd_mem *mem;
++ struct sg_table *sg;
++ struct kfd_dev *dev;
++ struct list_head list;
++};
++
+ /* Similar to iov_iter */
+ struct cma_iter {
+ /* points to current entry of range array */
+@@ -322,6 +329,12 @@ struct cma_iter {
+ struct kfd_bo *cur_bo;
+ /* offset w.r.t cur_bo */
+ unsigned long bo_offset;
++ /* If cur_bo is a userptr BO, then a shadow system BO is created
++ * using its underlying pages. cma_bo holds this BO. cma_list is a
++ * list cma_bos created in one session
++ */
++ struct cma_system_bo *cma_bo;
++ struct list_head cma_list;
+ };
+
+ /* KGD2KFD callbacks */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5639-Fix-SVM-missing-on-Raven.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5639-Fix-SVM-missing-on-Raven.patch
new file mode 100644
index 00000000..dd85d031
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5639-Fix-SVM-missing-on-Raven.patch
@@ -0,0 +1,85 @@
+From 83027299004492ab1997f774b21c421399e251a5 Mon Sep 17 00:00:00 2001
+From: Yong Zhao <yong.zhao@amd.com>
+Date: Thu, 5 Apr 2018 15:37:09 -0400
+Subject: [PATCH 5639/5725] Fix SVM missing on Raven
+
+gpuvm_base and gpuvm_limit are used in Thunk to reserve SVM, but we
+accidentally set them as 0, resulting in no SVM on Raven. To fix that,
+we set both the value the same as on Vega10.
+
+As part of the fix, we moved GPUVM aperture initialization into
+ASIC-specific kfd_init_apertures_* functions for all ASICs.
+
+Fix: SWDEV-149576
+
+Change-Id: I76ab262900ed8880944b755080f93dca5c8ea8bb
+Signed-off-by: Yong Zhao <yong.zhao@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | 36 +++++++++++++++++++++-------
+ 1 file changed, 28 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+index be376d93..df81e59 100755
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+@@ -322,9 +322,22 @@ void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
+ pdd->lds_base = MAKE_LDS_APP_BASE_VI();
+ pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
+
+- pdd->gpuvm_base = MAKE_GPUVM_APP_BASE_VI(id + 1);
+- pdd->gpuvm_limit = MAKE_GPUVM_APP_LIMIT(
+- pdd->gpuvm_base, pdd->dev->shared_resources.gpuvm_size);
++ if (!pdd->dev->device_info->needs_iommu_device) {
++ /* dGPUs: SVM aperture starting at 0
++ * with small reserved space for kernel.
++ * Set them to CANONICAL addresses.
++ */
++ pdd->gpuvm_base = SVM_USER_BASE;
++ pdd->gpuvm_limit =
++ pdd->dev->shared_resources.gpuvm_size - 1;
++ } else {
++ /* set them to non CANONICAL addresses, and no SVM is
++ * allocated.
++ */
++ pdd->gpuvm_base = MAKE_GPUVM_APP_BASE_VI(id + 1);
++ pdd->gpuvm_limit = MAKE_GPUVM_APP_LIMIT(pdd->gpuvm_base,
++ pdd->dev->shared_resources.gpuvm_size);
++ }
+
+ pdd->scratch_base = MAKE_SCRATCH_APP_BASE_VI();
+ pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
+@@ -335,6 +348,16 @@ void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id)
+ pdd->lds_base = MAKE_LDS_APP_BASE_V9();
+ pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
+
++ /* Raven needs SVM to support graphic handle, etc. Leave the small
++ * reserved space before SVM on Raven as well, even though we don't
++ * have to.
++ * Set gpuvm_base and gpuvm_limit to CANONICAL addresses so that they
++ * are used in Thunk to reserve SVM.
++ */
++ pdd->gpuvm_base = SVM_USER_BASE;
++ pdd->gpuvm_limit =
++ pdd->dev->shared_resources.gpuvm_size - 1;
++
+ pdd->scratch_base = MAKE_SCRATCH_APP_BASE_V9();
+ pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
+ }
+@@ -387,12 +410,9 @@ int kfd_init_apertures(struct kfd_process *process)
+ }
+
+ if (!dev->device_info->needs_iommu_device) {
+- /* dGPUs: SVM aperture starting at 0
+- * with small reserved space for kernel
++ /* dGPUs: the reserved space for kernel
++ * before SVM
+ */
+- pdd->gpuvm_base = SVM_USER_BASE;
+- pdd->gpuvm_limit =
+- dev->shared_resources.gpuvm_size - 1;
+ pdd->qpd.cwsr_base = SVM_CWSR_BASE;
+ pdd->qpd.ib_base = SVM_IB_BASE;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5640-drm-amdkfd-Implement-SPI-debug-and-exception-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5640-drm-amdkfd-Implement-SPI-debug-and-exception-support.patch
new file mode 100644
index 00000000..e5eb54e8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5640-drm-amdkfd-Implement-SPI-debug-and-exception-support.patch
@@ -0,0 +1,587 @@
+From 7b49f168ec466f382964775737605d0d44afef30 Mon Sep 17 00:00:00 2001
+From: Jay Cornwall <Jay.Cornwall@amd.com>
+Date: Tue, 3 Apr 2018 18:41:50 -0500
+Subject: [PATCH 5640/5725] drm/amdkfd: Implement SPI debug and exception
+ support in gfx9 trap handler
+
+The SPI can be configured to populate trap temporary SGPRs with data
+specific to individual wavefronts. These SGPRs are currently trashed
+by the context save/restore handler and trap/exception handler.
+
+- Shuffle some ttmp register usage to preserve SPI debug data
+- Save/restore SPI debug ttmps 6-11 and 13-15 in context save area
+- Propagate exceptions to second-level trap handler
+- Modify second-level jump protocol to preserve SPI debug ttmps
+- Defer VGPR XNACK mask save until VGPR save, clear mask before using
+- Save/restore scalar XNACK state
+
+Change-Id: I7699ea7a0e61b32c532e50c26a3e24976660960f
+Signed-off-by: Jay Cornwall <Jay.Cornwall@amd.com>
+---
+ .../gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 312 +++++++++++++--------
+ 1 file changed, 198 insertions(+), 114 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+index bd2957c..8ef6b44 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+@@ -122,11 +122,14 @@ var SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK = 0x800
+
+ var SQ_WAVE_IB_STS_RCNT_SHIFT = 16 //FIXME
+ var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT = 15 //FIXME
++var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK = 0x1F8000
+ var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG = 0x00007FFF //FIXME
+
+ var SQ_BUF_RSRC_WORD1_ATC_SHIFT = 24
+ var SQ_BUF_RSRC_WORD3_MTYPE_SHIFT = 27
+
++var TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT = 26 // bits [31:26] unused by SPI debug data
++var TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK = 0xFC000000
+
+ /* Save */
+ var S_SAVE_BUF_RSRC_WORD1_STRIDE = 0x00040000 //stride is 4 bytes
+@@ -151,7 +154,7 @@ var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3
+ var s_save_pc_hi = ttmp1
+ var s_save_exec_lo = ttmp2
+ var s_save_exec_hi = ttmp3
+-var s_save_status = ttmp4
++var s_save_tmp = ttmp4
+ var s_save_trapsts = ttmp5 //not really used until the end of the SAVE routine
+ var s_save_xnack_mask_lo = ttmp6
+ var s_save_xnack_mask_hi = ttmp7
+@@ -159,11 +162,12 @@ var s_save_buf_rsrc0 = ttmp8
+ var s_save_buf_rsrc1 = ttmp9
+ var s_save_buf_rsrc2 = ttmp10
+ var s_save_buf_rsrc3 = ttmp11
+-
++var s_save_status = ttmp12
+ var s_save_mem_offset = ttmp14
+ var s_save_alloc_size = s_save_trapsts //conflict
+-var s_save_tmp = s_save_buf_rsrc2 //shared with s_save_buf_rsrc2 (conflict: should not use mem access with s_save_tmp at the same time)
+ var s_save_m0 = ttmp15
++var s_save_ttmps_lo = s_save_tmp //no conflict
++var s_save_ttmps_hi = s_save_trapsts //no conflict
+
+ /* Restore */
+ var S_RESTORE_BUF_RSRC_WORD1_STRIDE = S_SAVE_BUF_RSRC_WORD1_STRIDE
+@@ -186,7 +190,7 @@ var s_restore_spi_init_hi = exec_hi
+
+ var s_restore_mem_offset = ttmp12
+ var s_restore_alloc_size = ttmp3
+-var s_restore_tmp = ttmp6
++var s_restore_tmp = ttmp2
+ var s_restore_mem_offset_save = s_restore_tmp //no conflict
+
+ var s_restore_m0 = s_restore_alloc_size //no conflict
+@@ -205,6 +209,8 @@ var s_restore_buf_rsrc0 = ttmp8
+ var s_restore_buf_rsrc1 = ttmp9
+ var s_restore_buf_rsrc2 = ttmp10
+ var s_restore_buf_rsrc3 = ttmp11
++var s_restore_ttmps_lo = s_restore_tmp //no conflict
++var s_restore_ttmps_hi = s_restore_alloc_size //no conflict
+
+ /**************************************************************************/
+ /* trap handler entry points */
+@@ -235,25 +241,25 @@ L_SKIP_RESTORE:
+ s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC
+ s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK //check whether this is for save
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+- s_and_b32 ttmp8, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK //check whether this is for save
++ s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK //check whether this is for save
+ s_cbranch_scc1 L_SAVE //this is the operation for save
+
+ // ********* Handle non-CWSR traps *******************
+ if (!EMU_RUN_HACK)
+ // Illegal instruction is a non-maskable exception which blocks context save.
+ // Halt the wavefront and return from the trap.
+- s_and_b32 ttmp8, s_save_trapsts, SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
++ s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
+ s_cbranch_scc1 L_HALT_WAVE
+
+ // If STATUS.MEM_VIOL is asserted then we cannot fetch from the TMA.
+ // Instead, halt the wavefront and return from the trap.
+- s_and_b32 ttmp8, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK
+- s_cbranch_scc0 L_NO_MEM_VIOL
++ s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK
++ s_cbranch_scc0 L_FETCH_2ND_TRAP
+
+ L_HALT_WAVE:
+ // If STATUS.HALT is set then this fault must come from SQC instruction fetch.
+ // We cannot prevent further faults so just terminate the wavefront.
+- s_and_b32 ttmp8, s_save_status, SQ_WAVE_STATUS_HALT_MASK
++ s_and_b32 ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+ s_cbranch_scc0 L_NOT_ALREADY_HALTED
+ s_endpgm
+ L_NOT_ALREADY_HALTED:
+@@ -264,19 +270,31 @@ L_NOT_ALREADY_HALTED:
+ s_sub_u32 ttmp0, ttmp0, 0x8
+ s_subb_u32 ttmp1, ttmp1, 0x0
+
+- s_branch L_EXCP_CASE
+-
+-L_NO_MEM_VIOL:
+- /* read tba and tma for next level trap handler, ttmp4 is used as s_save_status */
+- s_getreg_b32 ttmp14,hwreg(HW_REG_SQ_SHADER_TMA_LO)
+- s_getreg_b32 ttmp15,hwreg(HW_REG_SQ_SHADER_TMA_HI)
+- s_lshl_b64 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8
+- s_load_dwordx4 [ttmp8, ttmp9, ttmp10, ttmp11], [ttmp14, ttmp15], 0
+- s_waitcnt lgkmcnt(0)
+- s_or_b32 ttmp7, ttmp8, ttmp9
+- s_cbranch_scc0 L_NO_NEXT_TRAP //next level trap handler not been set
+- s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //restore HW status(SCC)
+- s_setpc_b64 [ttmp8,ttmp9] //jump to next level trap handler
++L_FETCH_2ND_TRAP:
++ // Preserve and clear scalar XNACK state before issuing scalar reads.
++ // Save IB_STS.FIRST_REPLAY[15] and IB_STS.RCNT[20:16] into unused space ttmp11[31:26].
++ s_getreg_b32 ttmp2, hwreg(HW_REG_IB_STS)
++ s_and_b32 ttmp3, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
++ s_lshl_b32 ttmp3, ttmp3, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
++ s_andn2_b32 ttmp11, ttmp11, TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK
++ s_or_b32 ttmp11, ttmp11, ttmp3
++
++ s_andn2_b32 ttmp2, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
++ s_setreg_b32 hwreg(HW_REG_IB_STS), ttmp2
++
++ // Read second-level TBA/TMA from first-level TMA and jump if available.
++ // ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data)
++ // ttmp12 holds SQ_WAVE_STATUS
++ s_getreg_b32 ttmp4, hwreg(HW_REG_SQ_SHADER_TMA_LO)
++ s_getreg_b32 ttmp5, hwreg(HW_REG_SQ_SHADER_TMA_HI)
++ s_lshl_b64 [ttmp4, ttmp5], [ttmp4, ttmp5], 0x8
++ s_load_dwordx2 [ttmp2, ttmp3], [ttmp4, ttmp5], 0x0 glc:1 // second-level TBA
++ s_waitcnt lgkmcnt(0)
++ s_load_dwordx2 [ttmp4, ttmp5], [ttmp4, ttmp5], 0x8 glc:1 // second-level TMA
++ s_waitcnt lgkmcnt(0)
++ s_and_b64 [ttmp2, ttmp3], [ttmp2, ttmp3], [ttmp2, ttmp3]
++ s_cbranch_scc0 L_NO_NEXT_TRAP // second-level trap handler not been set
++ s_setpc_b64 [ttmp2, ttmp3] // jump to second-level trap handler
+
+ L_NO_NEXT_TRAP:
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+@@ -286,8 +304,18 @@ L_NO_NEXT_TRAP:
+ s_addc_u32 ttmp1, ttmp1, 0
+ L_EXCP_CASE:
+ s_and_b32 ttmp1, ttmp1, 0xFFFF
+- s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //restore HW status(SCC)
+- s_rfe_b64 [ttmp0, ttmp1]
++
++ // Restore SQ_WAVE_IB_STS.
++ s_lshr_b32 ttmp2, ttmp11, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
++ s_and_b32 ttmp2, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
++ s_setreg_b32 hwreg(HW_REG_IB_STS), ttmp2
++
++ // Restore SQ_WAVE_STATUS.
++ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
++ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
++ s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status
++
++ s_rfe_b64 [ttmp0, ttmp1]
+ end
+ // ********* End handling of non-CWSR traps *******************
+
+@@ -307,8 +335,6 @@ end
+ s_mov_b32 s_save_tmp, 0 //clear saveCtx bit
+ s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp //clear saveCtx bit
+
+- s_mov_b32 s_save_xnack_mask_lo, xnack_mask_lo //save XNACK_MASK
+- s_mov_b32 s_save_xnack_mask_hi, xnack_mask_hi //save XNACK must before any memory operation
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_RCNT_SHIFT, SQ_WAVE_IB_STS_RCNT_SIZE) //save RCNT
+ s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_RCNT_SHIFT
+ s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
+@@ -350,7 +376,6 @@ if G8SR_DEBUG_TIMESTAMP
+ s_waitcnt lgkmcnt(0)
+ end
+
+- /* setup Resource Contants */
+ if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_SINGLE_WAVE))
+ //calculate wd_addr using absolute thread id
+ v_readlane_b32 s_save_tmp, v9, 0
+@@ -368,7 +393,24 @@ end
+ else
+ end
+
++ // Save trap temporaries 6-11, 13-15 initialized by SPI debug dispatch logic
++ // ttmp SR memory offset : size(VGPR)+size(SGPR)+0x40
++ get_vgpr_size_bytes(s_save_ttmps_lo)
++ get_sgpr_size_bytes(s_save_ttmps_hi)
++ s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, s_save_ttmps_hi
++ s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, s_save_spi_init_lo
++ s_addc_u32 s_save_ttmps_hi, s_save_spi_init_hi, 0x0
++ s_and_b32 s_save_ttmps_hi, s_save_ttmps_hi, 0xFFFF
++ s_store_dwordx2 [ttmp6, ttmp7], [s_save_ttmps_lo, s_save_ttmps_hi], 0x40 glc:1
++ ack_sqc_store_workaround()
++ s_store_dwordx4 [ttmp8, ttmp9, ttmp10, ttmp11], [s_save_ttmps_lo, s_save_ttmps_hi], 0x48 glc:1
++ ack_sqc_store_workaround()
++ s_store_dword ttmp13, [s_save_ttmps_lo, s_save_ttmps_hi], 0x58 glc:1
++ ack_sqc_store_workaround()
++ s_store_dwordx2 [ttmp14, ttmp15], [s_save_ttmps_lo, s_save_ttmps_hi], 0x5C glc:1
++ ack_sqc_store_workaround()
+
++ /* setup Resource Contants */
+ s_mov_b32 s_save_buf_rsrc0, s_save_spi_init_lo //base_addr_lo
+ s_and_b32 s_save_buf_rsrc1, s_save_spi_init_hi, 0x0000FFFF //base_addr_hi
+ s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE
+@@ -425,8 +467,8 @@ end
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+ write_hwreg_to_mem(s_save_trapsts, s_save_buf_rsrc0, s_save_mem_offset) //TRAPSTS
+
+- write_hwreg_to_mem(s_save_xnack_mask_lo, s_save_buf_rsrc0, s_save_mem_offset) //XNACK_MASK_LO
+- write_hwreg_to_mem(s_save_xnack_mask_hi, s_save_buf_rsrc0, s_save_mem_offset) //XNACK_MASK_HI
++ write_hwreg_to_mem(xnack_mask_lo, s_save_buf_rsrc0, s_save_mem_offset) //XNACK_MASK_LO
++ write_hwreg_to_mem(xnack_mask_hi, s_save_buf_rsrc0, s_save_mem_offset) //XNACK_MASK_HI
+
+ //use s_save_tmp would introduce conflict here between s_save_tmp and s_save_buf_rsrc2
+ s_getreg_b32 s_save_m0, hwreg(HW_REG_MODE) //MODE
+@@ -502,6 +544,8 @@ end
+ s_mov_b32 s_save_mem_offset, 0
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_mov_b32 exec_hi, 0xFFFFFFFF
++ s_mov_b32 xnack_mask_lo, 0x0
++ s_mov_b32 xnack_mask_hi, 0x0
+
+ if (SWIZZLE_EN)
+ s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+@@ -1038,6 +1082,21 @@ end
+ s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE), s_restore_m0
+ //s_setreg_b32 hwreg(HW_REG_TRAPSTS), s_restore_trapsts //don't overwrite SAVECTX bit as it may be set through external SAVECTX during restore
+ s_setreg_b32 hwreg(HW_REG_MODE), s_restore_mode
++
++ // Restore trap temporaries 6-11, 13-15 initialized by SPI debug dispatch logic
++ // ttmp SR memory offset : size(VGPR)+size(SGPR)+0x40
++ get_vgpr_size_bytes(s_restore_ttmps_lo)
++ get_sgpr_size_bytes(s_restore_ttmps_hi)
++ s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_ttmps_hi
++ s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_buf_rsrc0
++ s_addc_u32 s_restore_ttmps_hi, s_restore_buf_rsrc1, 0x0
++ s_and_b32 s_restore_ttmps_hi, s_restore_ttmps_hi, 0xFFFF
++ s_load_dwordx2 [ttmp6, ttmp7], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x40 glc:1
++ s_load_dwordx4 [ttmp8, ttmp9, ttmp10, ttmp11], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x48 glc:1
++ s_load_dword ttmp13, [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x58 glc:1
++ s_load_dwordx2 [ttmp14, ttmp15], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x5C glc:1
++ s_waitcnt lgkmcnt(0)
++
+ //reuse s_restore_m0 as a temp register
+ s_and_b32 s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_RCNT_MASK
+ s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_RCNT_SHIFT
+@@ -1085,9 +1144,7 @@ function write_hwreg_to_mem(s, s_rsrc, s_mem_offset)
+ s_mov_b32 exec_lo, m0 //assuming exec_lo is not needed anymore from this point on
+ s_mov_b32 m0, s_mem_offset
+ s_buffer_store_dword s, s_rsrc, m0 glc:1
+-if ACK_SQC_STORE
+- s_waitcnt lgkmcnt(0)
+-end
++ ack_sqc_store_workaround()
+ s_add_u32 s_mem_offset, s_mem_offset, 4
+ s_mov_b32 m0, exec_lo
+ end
+@@ -1097,21 +1154,13 @@ end
+ function write_16sgpr_to_mem(s, s_rsrc, s_mem_offset)
+
+ s_buffer_store_dwordx4 s[0], s_rsrc, 0 glc:1
+-if ACK_SQC_STORE
+- s_waitcnt lgkmcnt(0)
+-end
++ ack_sqc_store_workaround()
+ s_buffer_store_dwordx4 s[4], s_rsrc, 16 glc:1
+-if ACK_SQC_STORE
+- s_waitcnt lgkmcnt(0)
+-end
++ ack_sqc_store_workaround()
+ s_buffer_store_dwordx4 s[8], s_rsrc, 32 glc:1
+-if ACK_SQC_STORE
+- s_waitcnt lgkmcnt(0)
+-end
++ ack_sqc_store_workaround()
+ s_buffer_store_dwordx4 s[12], s_rsrc, 48 glc:1
+-if ACK_SQC_STORE
+- s_waitcnt lgkmcnt(0)
+-end
++ ack_sqc_store_workaround()
+ s_add_u32 s_rsrc[0], s_rsrc[0], 4*16
+ s_addc_u32 s_rsrc[1], s_rsrc[1], 0x0 // +scc
+ end
+@@ -1151,56 +1200,80 @@ function get_hwreg_size_bytes
+ return 128 //HWREG size 128 bytes
+ end
+
++function ack_sqc_store_workaround
++ if ACK_SQC_STORE
++ s_waitcnt lgkmcnt(0)
++ end
++end
+
+
+ #endif
+
+ static const uint32_t cwsr_trap_gfx9_hex[] = {
+- 0xbf820001, 0xbf820130,
+- 0xb8f0f802, 0x89708670,
+- 0xb8f1f803, 0x8674ff71,
+- 0x00000400, 0xbf850023,
+- 0x8674ff71, 0x00000800,
+- 0xbf850003, 0x8674ff71,
+- 0x00000100, 0xbf840009,
+- 0x8674ff70, 0x00002000,
++ 0xbf820001, 0xbf820158,
++ 0xb8f8f802, 0x89788678,
++ 0xb8f1f803, 0x866eff71,
++ 0x00000400, 0xbf850034,
++ 0x866eff71, 0x00000800,
++ 0xbf850003, 0x866eff71,
++ 0x00000100, 0xbf840008,
++ 0x866eff78, 0x00002000,
+ 0xbf840001, 0xbf810000,
+- 0x8770ff70, 0x00002000,
++ 0x8778ff78, 0x00002000,
+ 0x80ec886c, 0x82ed806d,
+- 0xbf820010, 0xb8faf812,
+- 0xb8fbf813, 0x8efa887a,
+- 0xc00a1d3d, 0x00000000,
+- 0xbf8cc07f, 0x87737574,
+- 0xbf840002, 0xb970f802,
+- 0xbe801d74, 0xb8f1f803,
+- 0x8671ff71, 0x000001ff,
+- 0xbf850002, 0x806c846c,
+- 0x826d806d, 0x866dff6d,
+- 0x0000ffff, 0xb970f802,
+- 0xbe801f6c, 0x866dff6d,
+- 0x0000ffff, 0xbef60080,
+- 0xb9760283, 0xbef20068,
+- 0xbef30069, 0xb8f62407,
+- 0x8e769c76, 0x876d766d,
+- 0xb8f603c7, 0x8e769b76,
+- 0x876d766d, 0xb8f6f807,
+- 0x8676ff76, 0x00007fff,
+- 0xb976f807, 0xbeee007e,
+- 0xbeef007f, 0xbefe0180,
+- 0xbf900004, 0xbf8e0002,
+- 0xbf88fffe, 0xbef4007e,
++ 0xb8eef807, 0x866fff6e,
++ 0x001f8000, 0x8e6f8b6f,
++ 0x8977ff77, 0xfc000000,
++ 0x87776f77, 0x896eff6e,
++ 0x001f8000, 0xb96ef807,
++ 0xb8f0f812, 0xb8f1f813,
++ 0x8ef08870, 0xc0071bb8,
++ 0x00000000, 0xbf8cc07f,
++ 0xc0071c38, 0x00000008,
++ 0xbf8cc07f, 0x86ee6e6e,
++ 0xbf840001, 0xbe801d6e,
++ 0xb8f1f803, 0x8671ff71,
++ 0x000001ff, 0xbf850002,
++ 0x806c846c, 0x826d806d,
++ 0x866dff6d, 0x0000ffff,
++ 0x8f6e8b77, 0x866eff6e,
++ 0x001f8000, 0xb96ef807,
++ 0x86fe7e7e, 0x86ea6a6a,
++ 0xb978f802, 0xbe801f6c,
++ 0x866dff6d, 0x0000ffff,
++ 0xbef00080, 0xb9700283,
++ 0xb8f02407, 0x8e709c70,
++ 0x876d706d, 0xb8f003c7,
++ 0x8e709b70, 0x876d706d,
++ 0xb8f0f807, 0x8670ff70,
++ 0x00007fff, 0xb970f807,
++ 0xbeee007e, 0xbeef007f,
++ 0xbefe0180, 0xbf900004,
++ 0xbf8e0002, 0xbf88fffe,
++ 0xb8f02a05, 0x80708170,
++ 0x8e708a70, 0xb8f11605,
++ 0x80718171, 0x8e718671,
++ 0x80707170, 0x80707e70,
++ 0x8271807f, 0x8671ff71,
++ 0x0000ffff, 0xc0471cb8,
++ 0x00000040, 0xbf8cc07f,
++ 0xc04b1d38, 0x00000048,
++ 0xbf8cc07f, 0xc0431e78,
++ 0x00000058, 0xbf8cc07f,
++ 0xc0471eb8, 0x0000005c,
++ 0xbf8cc07f, 0xbef4007e,
+ 0x8675ff7f, 0x0000ffff,
+ 0x8775ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+- 0x00807fac, 0x8676ff7f,
+- 0x08000000, 0x8f768376,
+- 0x87777677, 0x8676ff7f,
+- 0x70000000, 0x8f768176,
+- 0x87777677, 0xbefb007c,
++ 0x00807fac, 0x8670ff7f,
++ 0x08000000, 0x8f708370,
++ 0x87777077, 0x8670ff7f,
++ 0x70000000, 0x8f708170,
++ 0x87777077, 0xbefb007c,
+ 0xbefa0080, 0xb8fa2a05,
+ 0x807a817a, 0x8e7a8a7a,
+- 0xb8f61605, 0x80768176,
+- 0x8e768676, 0x807a767a,
++ 0xb8f01605, 0x80708170,
++ 0x8e708670, 0x807a707a,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xbefe007c,
+ 0xbefc007a, 0xc0611efa,
+@@ -1221,26 +1294,26 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
+ 0x0000007c, 0xbf8cc07f,
+ 0x807a847a, 0xbefc007e,
+ 0xbefe007c, 0xbefc007a,
+- 0xc0611c3a, 0x0000007c,
++ 0xc0611e3a, 0x0000007c,
+ 0xbf8cc07f, 0x807a847a,
+ 0xbefc007e, 0xb8f1f803,
+ 0xbefe007c, 0xbefc007a,
+ 0xc0611c7a, 0x0000007c,
+ 0xbf8cc07f, 0x807a847a,
+ 0xbefc007e, 0xbefe007c,
+- 0xbefc007a, 0xc0611cba,
++ 0xbefc007a, 0xc0611a3a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x807a847a, 0xbefc007e,
+ 0xbefe007c, 0xbefc007a,
+- 0xc0611cfa, 0x0000007c,
++ 0xc0611a7a, 0x0000007c,
+ 0xbf8cc07f, 0x807a847a,
+ 0xbefc007e, 0xb8fbf801,
+ 0xbefe007c, 0xbefc007a,
+ 0xc0611efa, 0x0000007c,
+ 0xbf8cc07f, 0x807a847a,
+- 0xbefc007e, 0x8676ff7f,
++ 0xbefc007e, 0x8670ff7f,
+ 0x04000000, 0xbeef0080,
+- 0x876f6f76, 0xb8fa2a05,
++ 0x876f6f70, 0xb8fa2a05,
+ 0x807a817a, 0x8e7a8a7a,
+ 0xb8f11605, 0x80718171,
+ 0x8e718471, 0x8e768271,
+@@ -1262,6 +1335,7 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
+ 0xbf0a717c, 0xbf85ffe7,
+ 0xbef40172, 0xbefa0080,
+ 0xbefe00c1, 0xbeff00c1,
++ 0xbee80080, 0xbee90080,
+ 0xbef600ff, 0x01000000,
+ 0xe0724000, 0x7a1d0000,
+ 0xe0724100, 0x7a1d0100,
+@@ -1270,13 +1344,13 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
+ 0xbefe00c1, 0xbeff00c1,
+ 0xb8f14306, 0x8671c171,
+ 0xbf84002c, 0xbf8a0000,
+- 0x8676ff6f, 0x04000000,
++ 0x8670ff6f, 0x04000000,
+ 0xbf840028, 0x8e718671,
+ 0x8e718271, 0xbef60071,
+ 0xb8fa2a05, 0x807a817a,
+- 0x8e7a8a7a, 0xb8f61605,
+- 0x80768176, 0x8e768676,
+- 0x807a767a, 0x807aff7a,
++ 0x8e7a8a7a, 0xb8f01605,
++ 0x80708170, 0x8e708670,
++ 0x807a707a, 0x807aff7a,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xd28c0002, 0x000100c1,
+@@ -1308,24 +1382,24 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
+ 0x7a1d0300, 0x807c847c,
+ 0x807aff7a, 0x00000400,
+ 0xbf0a717c, 0xbf85ffef,
+- 0xbf9c0000, 0xbf8200c5,
++ 0xbf9c0000, 0xbf8200d9,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+- 0x8672ff7f, 0x08000000,
+- 0x8f728372, 0x87777277,
+- 0x8672ff7f, 0x70000000,
+- 0x8f728172, 0x87777277,
+- 0x8672ff7f, 0x04000000,
++ 0x866eff7f, 0x08000000,
++ 0x8f6e836e, 0x87776e77,
++ 0x866eff7f, 0x70000000,
++ 0x8f6e816e, 0x87776e77,
++ 0x866eff7f, 0x04000000,
+ 0xbf84001e, 0xbefe00c1,
+ 0xbeff00c1, 0xb8ef4306,
+ 0x866fc16f, 0xbf840019,
+ 0x8e6f866f, 0x8e6f826f,
+ 0xbef6006f, 0xb8f82a05,
+ 0x80788178, 0x8e788a78,
+- 0xb8f21605, 0x80728172,
+- 0x8e728672, 0x80787278,
++ 0xb8ee1605, 0x806e816e,
++ 0x8e6e866e, 0x80786e78,
+ 0x8078ff78, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0080, 0xe0510000,
+@@ -1338,7 +1412,7 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
+ 0xb8ef2a05, 0x806f816f,
+ 0x8e6f826f, 0x8e76886f,
+ 0xbef600ff, 0x01000000,
+- 0xbef20078, 0x8078ff78,
++ 0xbeee0078, 0x8078ff78,
+ 0x00000400, 0xbefc0084,
+ 0xbf11087c, 0x806fff6f,
+ 0x00008000, 0xe0524000,
+@@ -1351,14 +1425,14 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
+ 0x807c847c, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7c,
+ 0xbf85ffee, 0xbf9c0000,
+- 0xe0524000, 0x721d0000,
+- 0xe0524100, 0x721d0100,
+- 0xe0524200, 0x721d0200,
+- 0xe0524300, 0x721d0300,
++ 0xe0524000, 0x6e1d0000,
++ 0xe0524100, 0x6e1d0100,
++ 0xe0524200, 0x6e1d0200,
++ 0xe0524300, 0x6e1d0300,
+ 0xb8f82a05, 0x80788178,
+- 0x8e788a78, 0xb8f21605,
+- 0x80728172, 0x8e728672,
+- 0x80787278, 0x80f8c078,
++ 0x8e788a78, 0xb8ee1605,
++ 0x806e816e, 0x8e6e866e,
++ 0x80786e78, 0x80f8c078,
+ 0xb8ef1605, 0x806f816f,
+ 0x8e6f846f, 0x8e76826f,
+ 0xbef600ff, 0x01000000,
+@@ -1372,8 +1446,8 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
+ 0xbe8e2d0e, 0xbf06807c,
+ 0xbf84fff0, 0xb8f82a05,
+ 0x80788178, 0x8e788a78,
+- 0xb8f21605, 0x80728172,
+- 0x8e728672, 0x80787278,
++ 0xb8ee1605, 0x806e816e,
++ 0x8e6e866e, 0x80786e78,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xc0211bfa,
+ 0x00000078, 0x80788478,
+@@ -1397,14 +1471,24 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
+ 0x000003ff, 0xb96f4803,
+ 0x866f71ff, 0xfffff800,
+ 0x8f6f8b6f, 0xb96fa2c3,
+- 0xb973f801, 0x866fff6d,
++ 0xb973f801, 0xb8ee2a05,
++ 0x806e816e, 0x8e6e8a6e,
++ 0xb8ef1605, 0x806f816f,
++ 0x8e6f866f, 0x806e6f6e,
++ 0x806e746e, 0x826f8075,
++ 0x866fff6f, 0x0000ffff,
++ 0xc0071cb7, 0x00000040,
++ 0xc00b1d37, 0x00000048,
++ 0xc0031e77, 0x00000058,
++ 0xc0071eb7, 0x0000005c,
++ 0xbf8cc07f, 0x866fff6d,
+ 0xf0000000, 0x8f6f9c6f,
+- 0x8e6f906f, 0xbef20080,
+- 0x87726f72, 0x866fff6d,
++ 0x8e6f906f, 0xbeee0080,
++ 0x876e6f6e, 0x866fff6d,
+ 0x08000000, 0x8f6f9b6f,
+- 0x8e6f8f6f, 0x87726f72,
++ 0x8e6f8f6f, 0x876e6f6e,
+ 0x866fff70, 0x00800000,
+- 0x8f6f976f, 0xb972f807,
++ 0x8f6f976f, 0xb96ef807,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0xb970f802, 0xbf8a0000,
+ 0x95806f6c, 0xbf810000,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5641-drm-amdkfd-Implement-hw_exception-work-thread-to-han.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5641-drm-amdkfd-Implement-hw_exception-work-thread-to-han.patch
new file mode 100644
index 00000000..4ef12c5c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5641-drm-amdkfd-Implement-hw_exception-work-thread-to-han.patch
@@ -0,0 +1,134 @@
+From 020476b4cb6f58460cee35a7cc946776ba396a9d Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Thu, 5 Apr 2018 15:01:40 -0400
+Subject: [PATCH 5641/5725] drm/amdkfd: Implement hw_exception work thread to
+ handle hws hang
+
+Change-Id: I021fe1e875baa4242c5347e02559a414937dfa96
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 4 +---
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 22 +++++++++++++++++++++-
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 4 ++++
+ 3 files changed, 26 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 4953374..17ca06d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -137,10 +137,8 @@ static int kfd_open(struct inode *inode, struct file *filep)
+ if (IS_ERR(process))
+ return PTR_ERR(process);
+
+- if (kfd_is_locked()) {
+- kfd_unref_process(process);
++ if (kfd_is_locked())
+ return -EAGAIN;
+- }
+
+ dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
+ process->pasid, process->is_32bit_user_mode);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index b0c159a..82c7dbe 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -60,6 +60,8 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
+ static void deallocate_sdma_queue(struct device_queue_manager *dqm,
+ unsigned int sdma_queue_id);
+
++static void kfd_process_hw_exception(struct work_struct *work);
++
+ static inline
+ enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
+ {
+@@ -1021,6 +1023,8 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
+ dqm->active_runlist = false;
+ dqm->sdma_bitmap = (1 << get_num_sdma_queues(dqm)) - 1;
+
++ INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
++
+ return 0;
+ }
+
+@@ -1053,6 +1057,8 @@ static int start_cpsch(struct device_queue_manager *dqm)
+ init_interrupts(dqm);
+
+ mutex_lock(&dqm->lock);
++ /* clear hang status when driver try to start the hw scheduler */
++ dqm->is_hws_hang = false;
+ execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+ mutex_unlock(&dqm->lock);
+
+@@ -1268,6 +1274,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
+ {
+ int retval = 0;
+
++ if (dqm->is_hws_hang)
++ return -EIO;
+ if (!dqm->active_runlist)
+ return retval;
+
+@@ -1306,9 +1314,13 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm,
+ {
+ int retval;
+
++ if (dqm->is_hws_hang)
++ return -EIO;
+ retval = unmap_queues_cpsch(dqm, filter, filter_param);
+ if (retval) {
+ pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
++ dqm->is_hws_hang = true;
++ schedule_work(&dqm->hw_exception_work);
+ return retval;
+ }
+
+@@ -1590,7 +1602,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
+ }
+
+ retval = execute_queues_cpsch(dqm, filter, 0);
+- if (retval || qpd->reset_wavefronts) {
++ if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
+ pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
+ dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
+ qpd->reset_wavefronts = false;
+@@ -1611,6 +1623,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
+
+ out:
+ mutex_unlock(&dqm->lock);
++
+ return retval;
+ }
+
+@@ -1744,6 +1757,13 @@ int kfd_process_vm_fault(struct device_queue_manager *dqm,
+ return ret;
+ }
+
++static void kfd_process_hw_exception(struct work_struct *work)
++{
++ struct device_queue_manager *dqm = container_of(work,
++ struct device_queue_manager, hw_exception_work);
++ dqm->dev->kfd2kgd->gpu_recover(dqm->dev->kgd);
++}
++
+ #if defined(CONFIG_DEBUG_FS)
+
+ static void seq_reg_dump(struct seq_file *m,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+index 978458a..3f17e5e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+@@ -195,6 +195,10 @@ struct device_queue_manager {
+ struct kfd_mem_obj *fence_mem;
+ bool active_runlist;
+ int sched_policy;
++
++ /* hw exception */
++ bool is_hws_hang;
++ struct work_struct hw_exception_work;
+ };
+
+ void device_queue_manager_init_cik(
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5642-drm-amdkfd-CMA-Remove-diff.-device-restriction.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5642-drm-amdkfd-CMA-Remove-diff.-device-restriction.patch
new file mode 100644
index 00000000..341feb77
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5642-drm-amdkfd-CMA-Remove-diff.-device-restriction.patch
@@ -0,0 +1,102 @@
+From d6ffb77f6e17e41e55f2b8bb66b8dc4288f764ce Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Fri, 6 Apr 2018 18:07:25 -0400
+Subject: [PATCH 5642/5725] drm/amdkfd: CMA: Remove diff. device restriction
+
+CMA is supported in certain situations if the BOs are registered to differnt
+devices. They are -
+
+a) If both source and destination are userptr then device doens't matter
+as CPU is used to copy.
+b) If one of them is a userptr, then the shadow system BO will be created
+on the other device. So the copy will done by that device.
+
+The non supported cases are -
+
+a) The system BOs are always registered to the first device. So if one
+BO is system and the other BO is a local memory in different device then
+it is not supported currently.
+b) If both BOs are in local memory of different devices then it is not
+supported.
+
+BUG:SWDEV-146559
+
+Change-Id: I0ff5426402c147dd19ec15abafd18807ecca25fe
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 21 ++++++++++-----------
+ 1 file changed, 10 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 17ca06d..3b4465a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -2085,6 +2085,7 @@ static int kfd_copy_bos(struct cma_iter *si, struct cma_iter *di,
+ struct kfd_bo *dst_bo = di->cur_bo, *src_bo = si->cur_bo;
+ uint64_t src_offset = si->bo_offset, dst_offset = di->bo_offset;
+ struct kgd_mem *src_mem = src_bo->mem, *dst_mem = dst_bo->mem;
++ struct kfd_dev *dev = dst_bo->dev;
+
+ *copied = 0;
+ if (f)
+@@ -2096,11 +2097,14 @@ static int kfd_copy_bos(struct cma_iter *si, struct cma_iter *di,
+ * by using the underlying userptr BO pages. Then use this shadow
+ * BO for copy. src_offset & dst_offset are adjusted because the new BO
+ * is only created for the window (offset, size) requested.
++ * The shadow BO is created on the other device. This means if the
++ * other BO is a device memory, the copy will be using that device.
+ * The BOs are stored in cma_list for deferred cleanup. This minimizes
+ * fence waiting just to the last fence.
+ */
+ if (src_bo->cpuva) {
+- err = kfd_create_cma_system_bo(dst_bo->dev, src_bo, &size,
++ dev = dst_bo->dev;
++ err = kfd_create_cma_system_bo(dev, src_bo, &size,
+ si->bo_offset, cma_write,
+ si->p, si->mm, si->task,
+ &si->cma_bo);
+@@ -2108,7 +2112,8 @@ static int kfd_copy_bos(struct cma_iter *si, struct cma_iter *di,
+ src_offset = si->bo_offset & (PAGE_SIZE - 1);
+ list_add_tail(&si->cma_bo->list, &si->cma_list);
+ } else if (dst_bo->cpuva) {
+- err = kfd_create_cma_system_bo(src_bo->dev, dst_bo, &size,
++ dev = src_bo->dev;
++ err = kfd_create_cma_system_bo(dev, dst_bo, &size,
+ di->bo_offset, cma_write,
+ di->p, di->mm, di->task,
+ &di->cma_bo);
+@@ -2117,15 +2122,15 @@ static int kfd_copy_bos(struct cma_iter *si, struct cma_iter *di,
+ list_add_tail(&di->cma_bo->list, &di->cma_list);
+ } else if (src_bo->dev->kgd != dst_bo->dev->kgd) {
+ pr_err("CMA %d fail. Not same dev\n", cma_write);
+- err = -EINVAL;
++ return -EINVAL;
+ }
+
+ if (err) {
+ pr_err("Failed to create system BO %d", err);
+- err = -EINVAL;
++ return -EINVAL;
+ }
+
+- err = dst_bo->dev->kfd2kgd->copy_mem_to_mem(src_bo->dev->kgd, src_mem,
++ err = dst_bo->dev->kfd2kgd->copy_mem_to_mem(dev->kgd, src_mem,
+ src_offset, dst_mem,
+ dst_offset, size, f,
+ copied);
+@@ -2160,12 +2165,6 @@ static int kfd_copy_single_range(struct cma_iter *si, struct cma_iter *di,
+
+ copy_size = min(size, (di->array->size - di->offset));
+
+- /* Check both BOs belong to same device */
+- if (src_bo->dev->kgd != dst_bo->dev->kgd) {
+- pr_err("CMA fail. Not same dev\n");
+- return -EINVAL;
+- }
+-
+ err = kfd_copy_bos(si, di, cma_write, copy_size, &fence, &n);
+ if (err) {
+ pr_err("CMA %d failed\n", err);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5643-drm-amdkfd-CMA-Store-mem_type-in-KFD-BO.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5643-drm-amdkfd-CMA-Store-mem_type-in-KFD-BO.patch
new file mode 100644
index 00000000..bf3fece3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5643-drm-amdkfd-CMA-Store-mem_type-in-KFD-BO.patch
@@ -0,0 +1,127 @@
+From 03dce550e91d0e15ba750a54f596f352911d2deb Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Mon, 9 Apr 2018 16:03:21 -0400
+Subject: [PATCH 5643/5725] drm/amdkfd: CMA: Store mem_type in KFD BO
+
+It is needed for supporting CMA when the BOs belong to different
+devices.
+
+Change-Id: I9acc5595e574141d8955e36ff0a98e5bac9b6fc1
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 7 ++++++-
+ drivers/gpu/drm/amd/amdkfd/kfd_ipc.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 ++
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 10 +++++++++-
+ 4 files changed, 18 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 3b4465a..f480453 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1243,6 +1243,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ uint32_t flags = args->flags;
+ struct vm_area_struct *vma;
+ uint64_t cpuva = 0;
++ unsigned int mem_type = 0;
+
+ if (args->size == 0)
+ return -EINVAL;
+@@ -1302,8 +1303,12 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ if (err)
+ goto err_unlock;
+
++ mem_type = flags & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM |
++ KFD_IOC_ALLOC_MEM_FLAGS_GTT |
++ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR |
++ KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL);
+ idr_handle = kfd_process_device_create_obj_handle(pdd, mem,
+- args->va_addr, args->size, cpuva, NULL);
++ args->va_addr, args->size, cpuva, mem_type, NULL);
+ if (idr_handle < 0) {
+ err = -EFAULT;
+ goto err_free;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_ipc.c b/drivers/gpu/drm/amd/amdkfd/kfd_ipc.c
+index 845dbf7..a53d954 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_ipc.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_ipc.c
+@@ -140,7 +140,7 @@ static int kfd_import_dmabuf_create_kfd_bo(struct kfd_dev *dev,
+ goto err_unlock;
+
+ idr_handle = kfd_process_device_create_obj_handle(pdd, mem,
+- va_addr, size, 0,
++ va_addr, size, 0, 0,
+ ipc_obj);
+ if (idr_handle < 0) {
+ r = -EFAULT;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index d4b802e..846765e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -303,6 +303,7 @@ struct kfd_bo {
+ struct kfd_ipc_obj *kfd_ipc_obj;
+ /* page-aligned VA address */
+ uint64_t cpuva;
++ unsigned int mem_type;
+ };
+
+ struct cma_system_bo {
+@@ -819,6 +820,7 @@ int kfd_reserved_mem_mmap(struct kfd_process *process,
+ int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
+ void *mem, uint64_t start,
+ uint64_t length, uint64_t cpuva,
++ unsigned int mem_type,
+ struct kfd_ipc_obj *ipc_obj);
+ void *kfd_process_device_translate_handle(struct kfd_process_device *p,
+ int handle);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index bba2d78..78ccac0 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -118,6 +118,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
+ struct kgd_mem *mem = NULL;
+ int handle;
+ int err;
++ unsigned int mem_type;
+
+ err = kdev->kfd2kgd->alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
+ pdd->vm, NULL, &mem, NULL,
+@@ -135,13 +136,18 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
+ goto sync_memory_failed;
+ }
+
++ mem_type = flags & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM |
++ KFD_IOC_ALLOC_MEM_FLAGS_GTT |
++ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR |
++ KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL);
++
+ /* Create an obj handle so kfd_process_device_remove_obj_handle
+ * will take care of the bo removal when the process finishes.
+ * We do not need to take p->mutex, because the process is just
+ * created and the ioctls have not had the chance to run.
+ */
+ handle = kfd_process_device_create_obj_handle(
+- pdd, mem, gpu_va, size, 0, NULL);
++ pdd, mem, gpu_va, size, 0, mem_type, NULL);
+
+ if (handle < 0) {
+ err = handle;
+@@ -809,6 +815,7 @@ bool kfd_has_process_device_data(struct kfd_process *p)
+ int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
+ void *mem, uint64_t start,
+ uint64_t length, uint64_t cpuva,
++ unsigned int mem_type,
+ struct kfd_ipc_obj *ipc_obj)
+ {
+ int handle;
+@@ -830,6 +837,7 @@ int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
+ buf_obj->dev = pdd->dev;
+ buf_obj->kfd_ipc_obj = ipc_obj;
+ buf_obj->cpuva = cpuva;
++ buf_obj->mem_type = mem_type;
+
+ INIT_LIST_HEAD(&buf_obj->cb_data_head);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5644-drm-amdkfd-CMA-Support-for-diff.-devices.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5644-drm-amdkfd-CMA-Support-for-diff.-devices.patch
new file mode 100644
index 00000000..42a0b39e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5644-drm-amdkfd-CMA-Support-for-diff.-devices.patch
@@ -0,0 +1,40 @@
+From f0f6de00e634304b1cfffdc3a4525141bcaebdb0 Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Mon, 9 Apr 2018 16:27:07 -0400
+Subject: [PATCH 5644/5725] drm/amdkfd: CMA: Support for diff. devices
+
+Support CMA between System Memory BO and Local Memory BO even if they
+are registered to separate devices. The copy will be done by the device
+to which Local Memory BO belongs to. The system memory BO will be
+temporarily mapped into this device's gart.
+
+Change-Id: Ief4af0db8b5f6af1a2fa1ed0596cf9e2fd953841
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index f480453..1bdbb36 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -2126,8 +2126,14 @@ static int kfd_copy_bos(struct cma_iter *si, struct cma_iter *di,
+ dst_offset = di->bo_offset & (PAGE_SIZE - 1);
+ list_add_tail(&di->cma_bo->list, &di->cma_list);
+ } else if (src_bo->dev->kgd != dst_bo->dev->kgd) {
+- pr_err("CMA %d fail. Not same dev\n", cma_write);
+- return -EINVAL;
++ /* This indicates that either or/both BOs are in local mem. */
++ if (src_bo->mem_type == KFD_IOC_ALLOC_MEM_FLAGS_VRAM &&
++ dst_bo->mem_type == KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
++ pr_err("CMA fail. Local mem & not in same dev\n");
++ return -EINVAL;
++ } else if (src_bo->mem_type == KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
++ dev = src_bo->dev;
++ /* else already set to dst_bo->dev */
+ }
+
+ if (err) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5645-drm-amdkfd-Remove-unused-variable.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5645-drm-amdkfd-Remove-unused-variable.patch
new file mode 100644
index 00000000..eb70f30a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5645-drm-amdkfd-Remove-unused-variable.patch
@@ -0,0 +1,26 @@
+From fe0d76fccda51e2977add5dd9699b6943d7141b2 Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Mon, 9 Apr 2018 16:37:04 -0400
+Subject: [PATCH 5645/5725] drm/amdkfd: Remove unused variable
+
+Change-Id: Ic3d7beda97308b09b8765ce1dc69970814943dfe
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 1bdbb36..548775e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -2172,7 +2172,6 @@ static int kfd_copy_single_range(struct cma_iter *si, struct cma_iter *di,
+
+ while (size && !kfd_cma_iter_end(di)) {
+ struct dma_fence *fence = NULL;
+- struct kfd_bo *dst_bo = di->cur_bo;
+
+ copy_size = min(size, (di->array->size - di->offset));
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5646-drm-amdfd-Don-t-hard-code-wait-time.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5646-drm-amdfd-Don-t-hard-code-wait-time.patch
new file mode 100644
index 00000000..ccf5a962
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5646-drm-amdfd-Don-t-hard-code-wait-time.patch
@@ -0,0 +1,56 @@
+From 7b94aa6477399501eea2ddd3293277a540706253 Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Fri, 13 Apr 2018 14:48:10 -0400
+Subject: [PATCH 5646/5725] drm/amdfd: Don't hard code wait time
+
+Also dma_fence_wait_timeout() returns 0 if fence timed out. Handle that.
+
+Change-Id: Ia5f4f97f35d3dac0b5263449a366d9a051664598
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 21 ++++++++++++++++++---
+ 1 file changed, 18 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 548775e..035f10b 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1805,6 +1805,21 @@ static void kfd_free_cma_bos(struct cma_iter *ci)
+ }
+ }
+
++/* 1 second timeout */
++#define CMA_WAIT_TIMEOUT msecs_to_jiffies(1000)
++
++static int kfd_cma_fence_wait(struct dma_fence *f)
++{
++ int ret;
++
++ ret = dma_fence_wait_timeout(f, false, CMA_WAIT_TIMEOUT);
++ if (likely(ret > 0))
++ return 0;
++ if (!ret)
++ ret = -ETIME;
++ return ret;
++}
++
+ /* Create a system BO by pinning underlying system pages of the given userptr
+ * BO @ubo
+ * @ubo: Userptr BO
+@@ -2330,10 +2345,10 @@ static int kfd_ioctl_cross_memory_copy(struct file *filep,
+
+ /* Wait for the last fence irrespective of error condition */
+ if (lfence) {
+- if (dma_fence_wait_timeout(lfence, false,
+- msecs_to_jiffies(1000)) < 0)
+- pr_err("CMA %s failed. BO timed out\n", cma_op);
++ err = kfd_cma_fence_wait(lfence);
+ dma_fence_put(lfence);
++ if (err)
++ pr_err("CMA %s failed. BO timed out\n", cma_op);
+ }
+
+ kfd_free_cma_bos(&si);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5647-drm-amdkfd-CMA-Add-intermediate-wait-if-mGPU.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5647-drm-amdkfd-CMA-Add-intermediate-wait-if-mGPU.patch
new file mode 100644
index 00000000..a86b4512
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5647-drm-amdkfd-CMA-Add-intermediate-wait-if-mGPU.patch
@@ -0,0 +1,74 @@
+From 1cd6781342f4471a93a7f0b82607afbaaf9d63d7 Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Thu, 12 Apr 2018 14:24:22 -0400
+Subject: [PATCH 5647/5725] drm/amdkfd: CMA: Add intermediate wait if mGPU
+
+CMA can happen on multiple GPUs. The current approach of keeping track
+of only the latest fence is not sufficient. Before throwing away the old
+fence check if it belongs to the same context. If not wait before
+releasing it.
+
+The current approach will be suboptimal in a mGPU (> 2) system if CMA
+ioctl is called with a long list of memory ranges where potentially each
+range copy could be done by different GPU. In this situation, the better
+approach would be to call the ioctl repeatedly with shorter list.
+
+Change-Id: Icf522cf8bfa648e24900745622600f920c0de320
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 23 +++++++++++++++++++++--
+ 1 file changed, 21 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 035f10b..6154dc8 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1820,6 +1820,20 @@ static int kfd_cma_fence_wait(struct dma_fence *f)
+ return ret;
+ }
+
++/* Put previous (old) fence @pf but it waits for @pf to signal if the context
++ * of the current fence @cf is different.
++ */
++static int kfd_fence_put_wait_if_diff_context(struct dma_fence *cf,
++ struct dma_fence *pf)
++{
++ int ret = 0;
++
++ if (pf && cf && cf->context != pf->context)
++ ret = kfd_cma_fence_wait(pf);
++ dma_fence_put(pf);
++ return ret;
++}
++
+ /* Create a system BO by pinning underlying system pages of the given userptr
+ * BO @ubo
+ * @ubo: Userptr BO
+@@ -2197,8 +2211,10 @@ static int kfd_copy_single_range(struct cma_iter *si, struct cma_iter *di,
+ }
+
+ if (fence) {
+- dma_fence_put(lfence);
++ err = kfd_fence_put_wait_if_diff_context(fence,lfence);
+ lfence = fence;
++ if (err)
++ break;
+ }
+ size -= n;
+ *copied += n;
+@@ -2338,8 +2354,11 @@ static int kfd_ioctl_cross_memory_copy(struct file *filep,
+ * new fence is created, then keep the preivous fence
+ */
+ if (fence) {
+- dma_fence_put(lfence);
++ err = kfd_fence_put_wait_if_diff_context(fence,
++ lfence);
+ lfence = fence;
++ if (err)
++ break;
+ }
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5648-drm-amdkfd-CMA-Support-multi-device-VRAM-copy.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5648-drm-amdkfd-CMA-Support-multi-device-VRAM-copy.patch
new file mode 100644
index 00000000..d5ce5ceb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5648-drm-amdkfd-CMA-Support-multi-device-VRAM-copy.patch
@@ -0,0 +1,223 @@
+From 53f369299263a855a88aae9c68bfa4ee9aca921e Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Thu, 12 Apr 2018 14:56:17 -0400
+Subject: [PATCH 5648/5725] drm/amdkfd: CMA: Support multi device VRAM copy
+
+Support copy from VRAM on device1 to VRAM on device2. This is done using
+an intermediate System BO and double copy.
+ [VRAM]--gpu1-->[System BO]--gpu2-->[VRAM]
+
+BUG: SWDEV-150755
+
+Change-Id: I7edf2df3cc1688c1ebd1fa0ea8fa82d39cbf50d1
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 124 +++++++++++++++++++++++--------
+ 1 file changed, 95 insertions(+), 29 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 6154dc8..0190734 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1798,7 +1798,8 @@ static void kfd_free_cma_bos(struct cma_iter *ci)
+ struct kfd_dev *dev = cma_bo->dev;
+
+ /* sg table is deleted by free_memory_of_gpu */
+- kfd_put_sg_table(cma_bo->sg);
++ if (cma_bo->sg)
++ kfd_put_sg_table(cma_bo->sg);
+ dev->kfd2kgd->free_memory_of_gpu(dev->kgd, cma_bo->mem);
+ list_del(&cma_bo->list);
+ kfree(cma_bo);
+@@ -1834,16 +1835,21 @@ static int kfd_fence_put_wait_if_diff_context(struct dma_fence *cf,
+ return ret;
+ }
+
+-/* Create a system BO by pinning underlying system pages of the given userptr
+- * BO @ubo
+- * @ubo: Userptr BO
+- * @offset: Offset into ubo
++#define MAX_SYSTEM_BO_SIZE (512*PAGE_SIZE)
++
++/* Create an equivalent system BO for the given @bo. If @bo is a userptr then
++ * create a new system BO by pinning underlying system pages of the given
++ * userptr BO. If @bo is in Local Memory then create an empty system BO and
++ * then copy @bo into this new BO.
++ * @bo: Userptr BO or Local Memory BO
++ * @offset: Offset into bo
+ * @size: in/out: The size of the new BO could be less than requested if all
+- * the pages couldn't be pinned. This would be reflected in @size
+- * @mm/@task: mm/task to which @ubo belongs to
++ * the pages couldn't be pinned or size > MAX_SYSTEM_BO_SIZE. This would
++ * be reflected in @size
++ * @mm/@task: mm/task to which @bo belongs to
+ * @cma_bo: out: new system BO
+ */
+-static int kfd_create_cma_system_bo(struct kfd_dev *kdev, struct kfd_bo *ubo,
++static int kfd_create_cma_system_bo(struct kfd_dev *kdev, struct kfd_bo *bo,
+ uint64_t *size, uint64_t offset,
+ int cma_write, struct kfd_process *p,
+ struct mm_struct *mm,
+@@ -1853,7 +1859,8 @@ static int kfd_create_cma_system_bo(struct kfd_dev *kdev, struct kfd_bo *ubo,
+ int ret;
+ struct kfd_process_device *pdd = NULL;
+ struct cma_system_bo *cbo;
+- uint64_t sg_size;
++ uint64_t bo_size = 0;
++ struct dma_fence *f;
+
+ uint32_t flags = ALLOC_MEM_FLAGS_GTT | ALLOC_MEM_FLAGS_WRITABLE |
+ ALLOC_MEM_FLAGS_NO_SUBSTITUTE;
+@@ -1864,40 +1871,75 @@ static int kfd_create_cma_system_bo(struct kfd_dev *kdev, struct kfd_bo *ubo,
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cbo->list);
+- ret = kfd_create_sg_table_from_userptr_bo(ubo, offset, cma_write, mm,
+- task, size, &sg_size,
+- &cbo->sg);
+- if (ret) {
+- pr_err("Failed to create system BO. sg table error %d\n", ret);
+- return ret;
++ if (bo->mem_type == KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
++ bo_size = min(*size, MAX_SYSTEM_BO_SIZE);
++ else if (bo->cpuva) {
++ ret = kfd_create_sg_table_from_userptr_bo(bo, offset,
++ cma_write, mm, task,
++ size, &bo_size,
++ &cbo->sg);
++ if (ret) {
++ pr_err("CMA: BO create with sg failed %d\n", ret);
++ goto sg_fail;
++ }
++ } else {
++ WARN_ON(1);
++ ret = -EINVAL;
++ goto sg_fail;
+ }
+-
+ mutex_lock(&p->mutex);
+ pdd = kfd_get_process_device_data(kdev, p);
+ if (!pdd) {
++ mutex_unlock(&p->mutex);
+ pr_err("Process device data doesn't exist\n");
+ ret = -EINVAL;
+ goto pdd_fail;
+ }
+
+- ret = kdev->kfd2kgd->alloc_memory_of_gpu(kdev->kgd, 0ULL, sg_size,
++ ret = kdev->kfd2kgd->alloc_memory_of_gpu(kdev->kgd, 0ULL, bo_size,
+ pdd->vm, cbo->sg,
+ &cbo->mem, NULL, flags);
++ mutex_unlock(&p->mutex);
+ if (ret) {
+ pr_err("Failed to create shadow system BO %d\n", ret);
+ goto pdd_fail;
+ }
+- mutex_unlock(&p->mutex);
++
++ if (bo->mem_type == KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
++ ret = kdev->kfd2kgd->copy_mem_to_mem(kdev->kgd, bo->mem,
++ offset, cbo->mem, 0,
++ bo_size, &f, size);
++ if (ret) {
++ pr_err("CMA: Intermediate copy failed %d\n", ret);
++ goto copy_fail;
++ }
++
++ /* Wait for the copy to finish as subsequent copy will be done
++ * by different device
++ */
++ ret = kfd_cma_fence_wait(f);
++ dma_fence_put(f);
++ if (ret) {
++ pr_err("CMA: Intermediate copy timed out %d\n", ret);
++ goto copy_fail;
++ }
++ }
++
+ cbo->dev = kdev;
+ *cma_bo = cbo;
+
+ return ret;
+
++copy_fail:
++ kdev->kfd2kgd->free_memory_of_gpu(kdev->kgd, bo->mem);
+ pdd_fail:
+- mutex_unlock(&p->mutex);
+- kfd_put_sg_table(cbo->sg);
+- sg_free_table(cbo->sg);
+- kfree(cbo->sg);
++ if (cbo->sg) {
++ kfd_put_sg_table(cbo->sg);
++ sg_free_table(cbo->sg);
++ kfree(cbo->sg);
++ }
++sg_fail:
++ kfree(cbo);
+ return ret;
+ }
+
+@@ -2120,6 +2162,7 @@ static int kfd_copy_bos(struct cma_iter *si, struct cma_iter *di,
+ uint64_t src_offset = si->bo_offset, dst_offset = di->bo_offset;
+ struct kgd_mem *src_mem = src_bo->mem, *dst_mem = dst_bo->mem;
+ struct kfd_dev *dev = dst_bo->dev;
++ struct cma_system_bo *tmp_bo = NULL;
+
+ *copied = 0;
+ if (f)
+@@ -2155,11 +2198,22 @@ static int kfd_copy_bos(struct cma_iter *si, struct cma_iter *di,
+ dst_offset = di->bo_offset & (PAGE_SIZE - 1);
+ list_add_tail(&di->cma_bo->list, &di->cma_list);
+ } else if (src_bo->dev->kgd != dst_bo->dev->kgd) {
+- /* This indicates that either or/both BOs are in local mem. */
++ /* This indicates that atleast on of the BO is in local mem.
++ * If both are in local mem of different devices then create an
++ * intermediate System BO and do a double copy
++ * [VRAM]--gpu1-->[System BO]--gpu2-->[VRAM].
++ * If only one BO is in VRAM then use that GPU to do the copy
++ */
+ if (src_bo->mem_type == KFD_IOC_ALLOC_MEM_FLAGS_VRAM &&
+ dst_bo->mem_type == KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
+- pr_err("CMA fail. Local mem & not in same dev\n");
+- return -EINVAL;
++ dev = dst_bo->dev;
++ err = kfd_create_cma_system_bo(src_bo->dev, src_bo,
++ &size, si->bo_offset,
++ cma_write, si->p,
++ si->mm, si->task,
++ &tmp_bo);
++ src_mem = tmp_bo->mem;
++ src_offset = 0;
+ } else if (src_bo->mem_type == KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
+ dev = src_bo->dev;
+ /* else already set to dst_bo->dev */
+@@ -2170,10 +2224,22 @@ static int kfd_copy_bos(struct cma_iter *si, struct cma_iter *di,
+ return -EINVAL;
+ }
+
+- err = dst_bo->dev->kfd2kgd->copy_mem_to_mem(dev->kgd, src_mem,
+- src_offset, dst_mem,
+- dst_offset, size, f,
+- copied);
++ err = dev->kfd2kgd->copy_mem_to_mem(dev->kgd, src_mem, src_offset,
++ dst_mem, dst_offset, size, f,
++ copied);
++ /* The tmp_bo allocates additional memory. So it is better to wait and
++ * delete. Also since multiple GPUs are involved the copies are
++ * currently not pipelined.
++ */
++ if (tmp_bo) {
++ if (!err) {
++ kfd_cma_fence_wait(*f);
++ dma_fence_put(*f);
++ *f = NULL;
++ }
++ dev->kfd2kgd->free_memory_of_gpu(dev->kgd, tmp_bo->mem);
++ kfree(tmp_bo);
++ }
+ return err;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5649-drm-amdkfd-Reduce-priority-of-context-saving-waves-b.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5649-drm-amdkfd-Reduce-priority-of-context-saving-waves-b.patch
new file mode 100644
index 00000000..f1c85328
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5649-drm-amdkfd-Reduce-priority-of-context-saving-waves-b.patch
@@ -0,0 +1,107 @@
+From 5a75eead6d6f40c82fdb59a445e7d1aac8072cda Mon Sep 17 00:00:00 2001
+From: Jay Cornwall <Jay.Cornwall@amd.com>
+Date: Mon, 16 Apr 2018 18:39:08 -0500
+Subject: [PATCH 5649/5725] drm/amdkfd: Reduce priority of context-saving waves
+ before spin-wait
+
+Synchronization between context-saving wavefronts is achieved by
+sending a SAVEWAVE message to the SPI and then spin-waiting for a
+response. These spin-waiting wavefronts may inhibit the progress
+of other wavefronts in the context save handler, leading to the
+synchronization condition never being achieved.
+
+Before spin-waiting reduce the priority of each wavefront to
+guarantee foward progress in the others.
+
+Change-Id: Ibd10aa30f7d836a6c4890f68887c9b62b676aabc
+Signed-off-by: Jay Cornwall <Jay.Cornwall@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm | 10 ++++++++--
+ drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 8 +++++++-
+ 2 files changed, 15 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+index 751cc2e..dec5ea4 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+@@ -98,6 +98,7 @@ var SWIZZLE_EN = 0 //whether we use swi
+ /**************************************************************************/
+ var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23
+ var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
++var SQ_WAVE_STATUS_SPI_PRIO_SHIFT = 1
+ var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
+
+ var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
+@@ -319,6 +320,10 @@ end
+ s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC
+ end
+
++ // Set SPI_PRIO=2 to avoid starving instruction fetch in the waves we're waiting for.
++ s_or_b32 s_save_tmp, s_save_status, (2 << SQ_WAVE_STATUS_SPI_PRIO_SHIFT)
++ s_setreg_b32 hwreg(HW_REG_STATUS), s_save_tmp
++
+ L_SLEEP:
+ s_sleep 0x2 // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause SQ hang, since the 7,8th wave could not get arbit to exec inst, while other waves are stuck into the sleep-loop and waiting for wrexec!=0
+
+@@ -1132,7 +1137,7 @@ end
+ #endif
+
+ static const uint32_t cwsr_trap_gfx8_hex[] = {
+- 0xbf820001, 0xbf820123,
++ 0xbf820001, 0xbf820125,
+ 0xb8f4f802, 0x89748674,
+ 0xb8f5f803, 0x8675ff75,
+ 0x00000400, 0xbf850011,
+@@ -1158,7 +1163,8 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
+ 0x867aff7a, 0x00007fff,
+ 0xb97af807, 0xbef2007e,
+ 0xbef3007f, 0xbefe0180,
+- 0xbf900004, 0xbf8e0002,
++ 0xbf900004, 0x877a8474,
++ 0xb97af802, 0xbf8e0002,
+ 0xbf88fffe, 0xbef8007e,
+ 0x8679ff7f, 0x0000ffff,
+ 0x8779ff79, 0x00040000,
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+index 8ef6b44..adb3308 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+@@ -97,6 +97,7 @@ var ACK_SQC_STORE = 1 //workaround for suspected SQC store bug causing
+ /**************************************************************************/
+ var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23
+ var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
++var SQ_WAVE_STATUS_SPI_PRIO_SHIFT = 1
+ var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
+ var SQ_WAVE_STATUS_HALT_MASK = 0x2000
+
+@@ -362,6 +363,10 @@ end
+ s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC
+ end
+
++ // Set SPI_PRIO=2 to avoid starving instruction fetch in the waves we're waiting for.
++ s_or_b32 s_save_tmp, s_save_status, (2 << SQ_WAVE_STATUS_SPI_PRIO_SHIFT)
++ s_setreg_b32 hwreg(HW_REG_STATUS), s_save_tmp
++
+ L_SLEEP:
+ s_sleep 0x2 // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause SQ hang, since the 7,8th wave could not get arbit to exec inst, while other waves are stuck into the sleep-loop and waiting for wrexec!=0
+
+@@ -1210,7 +1215,7 @@ end
+ #endif
+
+ static const uint32_t cwsr_trap_gfx9_hex[] = {
+- 0xbf820001, 0xbf820158,
++ 0xbf820001, 0xbf82015a,
+ 0xb8f8f802, 0x89788678,
+ 0xb8f1f803, 0x866eff71,
+ 0x00000400, 0xbf850034,
+@@ -1249,6 +1254,7 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
+ 0x00007fff, 0xb970f807,
+ 0xbeee007e, 0xbeef007f,
+ 0xbefe0180, 0xbf900004,
++ 0x87708478, 0xb970f802,
+ 0xbf8e0002, 0xbf88fffe,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0xb8f11605,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5650-drm-amdkfd-Introduce-kfd-kernel-module-parameter-hal.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5650-drm-amdkfd-Introduce-kfd-kernel-module-parameter-hal.patch
new file mode 100644
index 00000000..db11153b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5650-drm-amdkfd-Introduce-kfd-kernel-module-parameter-hal.patch
@@ -0,0 +1,75 @@
+From 10042641c3c9dc26281189919009f65f373309d7 Mon Sep 17 00:00:00 2001
+From: Yong Zhao <yong.zhao@amd.com>
+Date: Fri, 20 Apr 2018 15:45:02 -0400
+Subject: [PATCH 5650/5725] drm/amdkfd: Introduce kfd kernel module parameter
+ halt_if_hws_hang
+
+The parameter will enable developers to do scandumps without the need to
+change, rebuild and redeploy the kernel.
+
+Signed-off-by: Yong Zhao <yong.zhao@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/amdkfd/kfd_module.c
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+
+Change-Id: Iefe34cbaafb3831c3f008ca81bcbd0a3304e692a
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 7 +++++++
+ drivers/gpu/drm/amd/amdkfd/kfd_module.c | 4 ++++
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 5 +++++
+ 3 files changed, 16 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 82c7dbe..e60aaf8 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -1230,6 +1230,13 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
+ while (*fence_addr != fence_value) {
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("qcm fence wait loop timeout expired\n");
++ /* In HWS case, this is used to halt the driver thread
++ * in order not to mess up CP states before doing
++ * scandumps for FW debugging.
++ */
++ while (halt_if_hws_hang)
++ schedule();
++
+ return -ETIME;
+ }
+ schedule();
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+index 34d44ff..ab0bb2d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+@@ -97,6 +97,10 @@ module_param(priv_cp_queues, int, 0644);
+ MODULE_PARM_DESC(priv_cp_queues,
+ "Enable privileged mode for CP queues (0 = off (default), 1 = on)");
+
++int halt_if_hws_hang;
++module_param(halt_if_hws_hang, int, 0644);
++MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
++
+ int kgd2kfd_init(unsigned int interface_version,
+ const struct kgd2kfd_calls **g2f)
+ {
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 846765e..efaf1e9 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -152,6 +152,11 @@ extern int vega10_noretry;
+ */
+ extern int priv_cp_queues;
+
++/*
++ * Halt if HWS hang is detected
++ */
++extern int halt_if_hws_hang;
++
+ /**
+ * enum kfd_sched_policy
+ *
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5651-drm-amdkfd-Use-module-parameters-noretry-as-the-inte.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5651-drm-amdkfd-Use-module-parameters-noretry-as-the-inte.patch
new file mode 100644
index 00000000..594993b1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5651-drm-amdkfd-Use-module-parameters-noretry-as-the-inte.patch
@@ -0,0 +1,93 @@
+From 140e359292b5e925b9c922968393d9c49194b143 Mon Sep 17 00:00:00 2001
+From: Yong Zhao <yong.zhao@amd.com>
+Date: Fri, 20 Apr 2018 15:50:28 -0400
+Subject: [PATCH 5651/5725] drm/amdkfd: Use module parameters noretry as the
+ internal variable name
+
+This makes all module parameters use the same form. Meanwhile clean up
+the surrounding code.
+
+Change-Id: I0f6d8db10e66256f3971cc4da4c1328a63b0101c
+Signed-off-by: Yong Zhao <yong.zhao@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_module.c | 14 ++++++++------
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 +-
+ 3 files changed, 10 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+index 6198bf2..cc27190 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+@@ -60,7 +60,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
+ qpd->sh_mem_config =
+ SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
+- if (vega10_noretry &&
++ if (noretry &&
+ !dqm->dev->device_info->needs_iommu_device)
+ qpd->sh_mem_config |=
+ 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+index ab0bb2d..a05f734 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+@@ -63,7 +63,7 @@ MODULE_PARM_DESC(hws_max_conc_proc,
+
+ int cwsr_enable = 1;
+ module_param(cwsr_enable, int, 0444);
+-MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = Off, 1 = On (Default))");
++MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = off, 1 = on (default))");
+
+ int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
+ module_param(max_num_of_queues_per_device, int, 0444);
+@@ -75,8 +75,6 @@ module_param(send_sigterm, int, 0444);
+ MODULE_PARM_DESC(send_sigterm,
+ "Send sigterm to HSA process on unhandled exception (0 = disable, 1 = enable)");
+
+-static int amdkfd_init_completed;
+-
+ int debug_largebar;
+ module_param(debug_largebar, int, 0444);
+ MODULE_PARM_DESC(debug_largebar,
+@@ -87,10 +85,10 @@ module_param(ignore_crat, int, 0444);
+ MODULE_PARM_DESC(ignore_crat,
+ "Ignore CRAT table during KFD initialization (0 = use CRAT (default), 1 = ignore CRAT)");
+
+-int vega10_noretry = 1;
+-module_param_named(noretry, vega10_noretry, int, 0644);
++int noretry = 1;
++module_param(noretry, int, 0644);
+ MODULE_PARM_DESC(noretry,
+- "Set sh_mem_config.retry_disable on Vega10 (0 = retry enabled, 1 = retry disabled (default))");
++ "Set sh_mem_config.retry_disable on GFXv9+ dGPUs (0 = retry enabled, 1 = retry disabled (default))");
+
+ int priv_cp_queues;
+ module_param(priv_cp_queues, int, 0644);
+@@ -101,6 +99,10 @@ int halt_if_hws_hang;
+ module_param(halt_if_hws_hang, int, 0644);
+ MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
+
++
++static int amdkfd_init_completed;
++
++
+ int kgd2kfd_init(unsigned int interface_version,
+ const struct kgd2kfd_calls **g2f)
+ {
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index efaf1e9..da27bb9 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -145,7 +145,7 @@ extern int ignore_crat;
+ /*
+ * Set sh_mem_config.retry_disable on Vega10
+ */
+-extern int vega10_noretry;
++extern int noretry;
+
+ /*
+ * Enable privileged mode for all CP queues including user queues
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5652-drm-amdkfd-Separate-trap-handler-assembly-code-and-i.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5652-drm-amdkfd-Separate-trap-handler-assembly-code-and-i.patch
new file mode 100644
index 00000000..ea109a82
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5652-drm-amdkfd-Separate-trap-handler-assembly-code-and-i.patch
@@ -0,0 +1,1220 @@
+From ca75dfe5349dd9f67d121552699e4f0e7dd85fea Mon Sep 17 00:00:00 2001
+From: Yong Zhao <yong.zhao@amd.com>
+Date: Fri, 20 Apr 2018 14:57:04 -0400
+Subject: [PATCH 5652/5725] drm/amdkfd: Separate trap handler assembly code and
+ its hex values
+
+Since the assembly code is inside "#if 0", it is ineffective. Despite that,
+during debugging, we need to change the assembly code, extract it into
+a separate file and compile the new file into hex values using sp3.
+That process also requires us to remove "#if 0" and modify lines starting
+with "#", so that sp3 can successfully compile the new file.
+
+With this change, all the above chore is no longer needed, and
+cwsr_trap_handler_gfx*.asm can be directly used by sp3 to generate its
+hex values.
+
+Change-Id: Iadcff11fc21beecfed215c12ff257d5a1d0f7486
+Signed-off-by: Yong Zhao <yong.zhao@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h | 560 +++++++++++++++++++++
+ .../gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm | 267 +---------
+ .../gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 300 +----------
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 3 +-
+ 4 files changed, 575 insertions(+), 555 deletions(-)
+ create mode 100644 drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+new file mode 100644
+index 0000000..a546a21
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+@@ -0,0 +1,560 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++static const uint32_t cwsr_trap_gfx8_hex[] = {
++ 0xbf820001, 0xbf820125,
++ 0xb8f4f802, 0x89748674,
++ 0xb8f5f803, 0x8675ff75,
++ 0x00000400, 0xbf850011,
++ 0xc00a1e37, 0x00000000,
++ 0xbf8c007f, 0x87777978,
++ 0xbf840002, 0xb974f802,
++ 0xbe801d78, 0xb8f5f803,
++ 0x8675ff75, 0x000001ff,
++ 0xbf850002, 0x80708470,
++ 0x82718071, 0x8671ff71,
++ 0x0000ffff, 0xb974f802,
++ 0xbe801f70, 0xb8f5f803,
++ 0x8675ff75, 0x00000100,
++ 0xbf840006, 0xbefa0080,
++ 0xb97a0203, 0x8671ff71,
++ 0x0000ffff, 0x80f08870,
++ 0x82f18071, 0xbefa0080,
++ 0xb97a0283, 0xbef60068,
++ 0xbef70069, 0xb8fa1c07,
++ 0x8e7a9c7a, 0x87717a71,
++ 0xb8fa03c7, 0x8e7a9b7a,
++ 0x87717a71, 0xb8faf807,
++ 0x867aff7a, 0x00007fff,
++ 0xb97af807, 0xbef2007e,
++ 0xbef3007f, 0xbefe0180,
++ 0xbf900004, 0x877a8474,
++ 0xb97af802, 0xbf8e0002,
++ 0xbf88fffe, 0xbef8007e,
++ 0x8679ff7f, 0x0000ffff,
++ 0x8779ff79, 0x00040000,
++ 0xbefa0080, 0xbefb00ff,
++ 0x00807fac, 0x867aff7f,
++ 0x08000000, 0x8f7a837a,
++ 0x877b7a7b, 0x867aff7f,
++ 0x70000000, 0x8f7a817a,
++ 0x877b7a7b, 0xbeef007c,
++ 0xbeee0080, 0xb8ee2a05,
++ 0x806e816e, 0x8e6e8a6e,
++ 0xb8fa1605, 0x807a817a,
++ 0x8e7a867a, 0x806e7a6e,
++ 0xbefa0084, 0xbefa00ff,
++ 0x01000000, 0xbefe007c,
++ 0xbefc006e, 0xc0611bfc,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc006e, 0xc0611c3c,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc006e, 0xc0611c7c,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc006e, 0xc0611cbc,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc006e, 0xc0611cfc,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc006e, 0xc0611d3c,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0xb8f5f803,
++ 0xbefe007c, 0xbefc006e,
++ 0xc0611d7c, 0x0000007c,
++ 0x806e846e, 0xbefc007e,
++ 0xbefe007c, 0xbefc006e,
++ 0xc0611dbc, 0x0000007c,
++ 0x806e846e, 0xbefc007e,
++ 0xbefe007c, 0xbefc006e,
++ 0xc0611dfc, 0x0000007c,
++ 0x806e846e, 0xbefc007e,
++ 0xb8eff801, 0xbefe007c,
++ 0xbefc006e, 0xc0611bfc,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc006e, 0xc0611b3c,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc006e, 0xc0611b7c,
++ 0x0000007c, 0x806e846e,
++ 0xbefc007e, 0x867aff7f,
++ 0x04000000, 0xbef30080,
++ 0x8773737a, 0xb8ee2a05,
++ 0x806e816e, 0x8e6e8a6e,
++ 0xb8f51605, 0x80758175,
++ 0x8e758475, 0x8e7a8275,
++ 0xbefa00ff, 0x01000000,
++ 0xbef60178, 0x80786e78,
++ 0x82798079, 0xbefc0080,
++ 0xbe802b00, 0xbe822b02,
++ 0xbe842b04, 0xbe862b06,
++ 0xbe882b08, 0xbe8a2b0a,
++ 0xbe8c2b0c, 0xbe8e2b0e,
++ 0xc06b003c, 0x00000000,
++ 0xc06b013c, 0x00000010,
++ 0xc06b023c, 0x00000020,
++ 0xc06b033c, 0x00000030,
++ 0x8078c078, 0x82798079,
++ 0x807c907c, 0xbf0a757c,
++ 0xbf85ffeb, 0xbef80176,
++ 0xbeee0080, 0xbefe00c1,
++ 0xbeff00c1, 0xbefa00ff,
++ 0x01000000, 0xe0724000,
++ 0x6e1e0000, 0xe0724100,
++ 0x6e1e0100, 0xe0724200,
++ 0x6e1e0200, 0xe0724300,
++ 0x6e1e0300, 0xbefe00c1,
++ 0xbeff00c1, 0xb8f54306,
++ 0x8675c175, 0xbf84002c,
++ 0xbf8a0000, 0x867aff73,
++ 0x04000000, 0xbf840028,
++ 0x8e758675, 0x8e758275,
++ 0xbefa0075, 0xb8ee2a05,
++ 0x806e816e, 0x8e6e8a6e,
++ 0xb8fa1605, 0x807a817a,
++ 0x8e7a867a, 0x806e7a6e,
++ 0x806eff6e, 0x00000080,
++ 0xbefa00ff, 0x01000000,
++ 0xbefc0080, 0xd28c0002,
++ 0x000100c1, 0xd28d0003,
++ 0x000204c1, 0xd1060002,
++ 0x00011103, 0x7e0602ff,
++ 0x00000200, 0xbefc00ff,
++ 0x00010000, 0xbe80007b,
++ 0x867bff7b, 0xff7fffff,
++ 0x877bff7b, 0x00058000,
++ 0xd8ec0000, 0x00000002,
++ 0xbf8c007f, 0xe0765000,
++ 0x6e1e0002, 0x32040702,
++ 0xd0c9006a, 0x0000eb02,
++ 0xbf87fff7, 0xbefb0000,
++ 0xbeee00ff, 0x00000400,
++ 0xbefe00c1, 0xbeff00c1,
++ 0xb8f52a05, 0x80758175,
++ 0x8e758275, 0x8e7a8875,
++ 0xbefa00ff, 0x01000000,
++ 0xbefc0084, 0xbf0a757c,
++ 0xbf840015, 0xbf11017c,
++ 0x8075ff75, 0x00001000,
++ 0x7e000300, 0x7e020301,
++ 0x7e040302, 0x7e060303,
++ 0xe0724000, 0x6e1e0000,
++ 0xe0724100, 0x6e1e0100,
++ 0xe0724200, 0x6e1e0200,
++ 0xe0724300, 0x6e1e0300,
++ 0x807c847c, 0x806eff6e,
++ 0x00000400, 0xbf0a757c,
++ 0xbf85ffef, 0xbf9c0000,
++ 0xbf8200ca, 0xbef8007e,
++ 0x8679ff7f, 0x0000ffff,
++ 0x8779ff79, 0x00040000,
++ 0xbefa0080, 0xbefb00ff,
++ 0x00807fac, 0x8676ff7f,
++ 0x08000000, 0x8f768376,
++ 0x877b767b, 0x8676ff7f,
++ 0x70000000, 0x8f768176,
++ 0x877b767b, 0x8676ff7f,
++ 0x04000000, 0xbf84001e,
++ 0xbefe00c1, 0xbeff00c1,
++ 0xb8f34306, 0x8673c173,
++ 0xbf840019, 0x8e738673,
++ 0x8e738273, 0xbefa0073,
++ 0xb8f22a05, 0x80728172,
++ 0x8e728a72, 0xb8f61605,
++ 0x80768176, 0x8e768676,
++ 0x80727672, 0x8072ff72,
++ 0x00000080, 0xbefa00ff,
++ 0x01000000, 0xbefc0080,
++ 0xe0510000, 0x721e0000,
++ 0xe0510100, 0x721e0000,
++ 0x807cff7c, 0x00000200,
++ 0x8072ff72, 0x00000200,
++ 0xbf0a737c, 0xbf85fff6,
++ 0xbef20080, 0xbefe00c1,
++ 0xbeff00c1, 0xb8f32a05,
++ 0x80738173, 0x8e738273,
++ 0x8e7a8873, 0xbefa00ff,
++ 0x01000000, 0xbef60072,
++ 0x8072ff72, 0x00000400,
++ 0xbefc0084, 0xbf11087c,
++ 0x8073ff73, 0x00008000,
++ 0xe0524000, 0x721e0000,
++ 0xe0524100, 0x721e0100,
++ 0xe0524200, 0x721e0200,
++ 0xe0524300, 0x721e0300,
++ 0xbf8c0f70, 0x7e000300,
++ 0x7e020301, 0x7e040302,
++ 0x7e060303, 0x807c847c,
++ 0x8072ff72, 0x00000400,
++ 0xbf0a737c, 0xbf85ffee,
++ 0xbf9c0000, 0xe0524000,
++ 0x761e0000, 0xe0524100,
++ 0x761e0100, 0xe0524200,
++ 0x761e0200, 0xe0524300,
++ 0x761e0300, 0xb8f22a05,
++ 0x80728172, 0x8e728a72,
++ 0xb8f61605, 0x80768176,
++ 0x8e768676, 0x80727672,
++ 0x80f2c072, 0xb8f31605,
++ 0x80738173, 0x8e738473,
++ 0x8e7a8273, 0xbefa00ff,
++ 0x01000000, 0xbefc0073,
++ 0xc031003c, 0x00000072,
++ 0x80f2c072, 0xbf8c007f,
++ 0x80fc907c, 0xbe802d00,
++ 0xbe822d02, 0xbe842d04,
++ 0xbe862d06, 0xbe882d08,
++ 0xbe8a2d0a, 0xbe8c2d0c,
++ 0xbe8e2d0e, 0xbf06807c,
++ 0xbf84fff1, 0xb8f22a05,
++ 0x80728172, 0x8e728a72,
++ 0xb8f61605, 0x80768176,
++ 0x8e768676, 0x80727672,
++ 0xbefa0084, 0xbefa00ff,
++ 0x01000000, 0xc0211cfc,
++ 0x00000072, 0x80728472,
++ 0xc0211c3c, 0x00000072,
++ 0x80728472, 0xc0211c7c,
++ 0x00000072, 0x80728472,
++ 0xc0211bbc, 0x00000072,
++ 0x80728472, 0xc0211bfc,
++ 0x00000072, 0x80728472,
++ 0xc0211d3c, 0x00000072,
++ 0x80728472, 0xc0211d7c,
++ 0x00000072, 0x80728472,
++ 0xc0211a3c, 0x00000072,
++ 0x80728472, 0xc0211a7c,
++ 0x00000072, 0x80728472,
++ 0xc0211dfc, 0x00000072,
++ 0x80728472, 0xc0211b3c,
++ 0x00000072, 0x80728472,
++ 0xc0211b7c, 0x00000072,
++ 0x80728472, 0xbf8c007f,
++ 0x8671ff71, 0x0000ffff,
++ 0xbefc0073, 0xbefe006e,
++ 0xbeff006f, 0x867375ff,
++ 0x000003ff, 0xb9734803,
++ 0x867375ff, 0xfffff800,
++ 0x8f738b73, 0xb973a2c3,
++ 0xb977f801, 0x8673ff71,
++ 0xf0000000, 0x8f739c73,
++ 0x8e739073, 0xbef60080,
++ 0x87767376, 0x8673ff71,
++ 0x08000000, 0x8f739b73,
++ 0x8e738f73, 0x87767376,
++ 0x8673ff74, 0x00800000,
++ 0x8f739773, 0xb976f807,
++ 0x86fe7e7e, 0x86ea6a6a,
++ 0xb974f802, 0xbf8a0000,
++ 0x95807370, 0xbf810000,
++};
++
++
++static const uint32_t cwsr_trap_gfx9_hex[] = {
++ 0xbf820001, 0xbf82015a,
++ 0xb8f8f802, 0x89788678,
++ 0xb8f1f803, 0x866eff71,
++ 0x00000400, 0xbf850034,
++ 0x866eff71, 0x00000800,
++ 0xbf850003, 0x866eff71,
++ 0x00000100, 0xbf840008,
++ 0x866eff78, 0x00002000,
++ 0xbf840001, 0xbf810000,
++ 0x8778ff78, 0x00002000,
++ 0x80ec886c, 0x82ed806d,
++ 0xb8eef807, 0x866fff6e,
++ 0x001f8000, 0x8e6f8b6f,
++ 0x8977ff77, 0xfc000000,
++ 0x87776f77, 0x896eff6e,
++ 0x001f8000, 0xb96ef807,
++ 0xb8f0f812, 0xb8f1f813,
++ 0x8ef08870, 0xc0071bb8,
++ 0x00000000, 0xbf8cc07f,
++ 0xc0071c38, 0x00000008,
++ 0xbf8cc07f, 0x86ee6e6e,
++ 0xbf840001, 0xbe801d6e,
++ 0xb8f1f803, 0x8671ff71,
++ 0x000001ff, 0xbf850002,
++ 0x806c846c, 0x826d806d,
++ 0x866dff6d, 0x0000ffff,
++ 0x8f6e8b77, 0x866eff6e,
++ 0x001f8000, 0xb96ef807,
++ 0x86fe7e7e, 0x86ea6a6a,
++ 0xb978f802, 0xbe801f6c,
++ 0x866dff6d, 0x0000ffff,
++ 0xbef00080, 0xb9700283,
++ 0xb8f02407, 0x8e709c70,
++ 0x876d706d, 0xb8f003c7,
++ 0x8e709b70, 0x876d706d,
++ 0xb8f0f807, 0x8670ff70,
++ 0x00007fff, 0xb970f807,
++ 0xbeee007e, 0xbeef007f,
++ 0xbefe0180, 0xbf900004,
++ 0x87708478, 0xb970f802,
++ 0xbf8e0002, 0xbf88fffe,
++ 0xb8f02a05, 0x80708170,
++ 0x8e708a70, 0xb8f11605,
++ 0x80718171, 0x8e718671,
++ 0x80707170, 0x80707e70,
++ 0x8271807f, 0x8671ff71,
++ 0x0000ffff, 0xc0471cb8,
++ 0x00000040, 0xbf8cc07f,
++ 0xc04b1d38, 0x00000048,
++ 0xbf8cc07f, 0xc0431e78,
++ 0x00000058, 0xbf8cc07f,
++ 0xc0471eb8, 0x0000005c,
++ 0xbf8cc07f, 0xbef4007e,
++ 0x8675ff7f, 0x0000ffff,
++ 0x8775ff75, 0x00040000,
++ 0xbef60080, 0xbef700ff,
++ 0x00807fac, 0x8670ff7f,
++ 0x08000000, 0x8f708370,
++ 0x87777077, 0x8670ff7f,
++ 0x70000000, 0x8f708170,
++ 0x87777077, 0xbefb007c,
++ 0xbefa0080, 0xb8fa2a05,
++ 0x807a817a, 0x8e7a8a7a,
++ 0xb8f01605, 0x80708170,
++ 0x8e708670, 0x807a707a,
++ 0xbef60084, 0xbef600ff,
++ 0x01000000, 0xbefe007c,
++ 0xbefc007a, 0xc0611efa,
++ 0x0000007c, 0xbf8cc07f,
++ 0x807a847a, 0xbefc007e,
++ 0xbefe007c, 0xbefc007a,
++ 0xc0611b3a, 0x0000007c,
++ 0xbf8cc07f, 0x807a847a,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc007a, 0xc0611b7a,
++ 0x0000007c, 0xbf8cc07f,
++ 0x807a847a, 0xbefc007e,
++ 0xbefe007c, 0xbefc007a,
++ 0xc0611bba, 0x0000007c,
++ 0xbf8cc07f, 0x807a847a,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc007a, 0xc0611bfa,
++ 0x0000007c, 0xbf8cc07f,
++ 0x807a847a, 0xbefc007e,
++ 0xbefe007c, 0xbefc007a,
++ 0xc0611e3a, 0x0000007c,
++ 0xbf8cc07f, 0x807a847a,
++ 0xbefc007e, 0xb8f1f803,
++ 0xbefe007c, 0xbefc007a,
++ 0xc0611c7a, 0x0000007c,
++ 0xbf8cc07f, 0x807a847a,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc007a, 0xc0611a3a,
++ 0x0000007c, 0xbf8cc07f,
++ 0x807a847a, 0xbefc007e,
++ 0xbefe007c, 0xbefc007a,
++ 0xc0611a7a, 0x0000007c,
++ 0xbf8cc07f, 0x807a847a,
++ 0xbefc007e, 0xb8fbf801,
++ 0xbefe007c, 0xbefc007a,
++ 0xc0611efa, 0x0000007c,
++ 0xbf8cc07f, 0x807a847a,
++ 0xbefc007e, 0x8670ff7f,
++ 0x04000000, 0xbeef0080,
++ 0x876f6f70, 0xb8fa2a05,
++ 0x807a817a, 0x8e7a8a7a,
++ 0xb8f11605, 0x80718171,
++ 0x8e718471, 0x8e768271,
++ 0xbef600ff, 0x01000000,
++ 0xbef20174, 0x80747a74,
++ 0x82758075, 0xbefc0080,
++ 0xbf800000, 0xbe802b00,
++ 0xbe822b02, 0xbe842b04,
++ 0xbe862b06, 0xbe882b08,
++ 0xbe8a2b0a, 0xbe8c2b0c,
++ 0xbe8e2b0e, 0xc06b003a,
++ 0x00000000, 0xbf8cc07f,
++ 0xc06b013a, 0x00000010,
++ 0xbf8cc07f, 0xc06b023a,
++ 0x00000020, 0xbf8cc07f,
++ 0xc06b033a, 0x00000030,
++ 0xbf8cc07f, 0x8074c074,
++ 0x82758075, 0x807c907c,
++ 0xbf0a717c, 0xbf85ffe7,
++ 0xbef40172, 0xbefa0080,
++ 0xbefe00c1, 0xbeff00c1,
++ 0xbee80080, 0xbee90080,
++ 0xbef600ff, 0x01000000,
++ 0xe0724000, 0x7a1d0000,
++ 0xe0724100, 0x7a1d0100,
++ 0xe0724200, 0x7a1d0200,
++ 0xe0724300, 0x7a1d0300,
++ 0xbefe00c1, 0xbeff00c1,
++ 0xb8f14306, 0x8671c171,
++ 0xbf84002c, 0xbf8a0000,
++ 0x8670ff6f, 0x04000000,
++ 0xbf840028, 0x8e718671,
++ 0x8e718271, 0xbef60071,
++ 0xb8fa2a05, 0x807a817a,
++ 0x8e7a8a7a, 0xb8f01605,
++ 0x80708170, 0x8e708670,
++ 0x807a707a, 0x807aff7a,
++ 0x00000080, 0xbef600ff,
++ 0x01000000, 0xbefc0080,
++ 0xd28c0002, 0x000100c1,
++ 0xd28d0003, 0x000204c1,
++ 0xd1060002, 0x00011103,
++ 0x7e0602ff, 0x00000200,
++ 0xbefc00ff, 0x00010000,
++ 0xbe800077, 0x8677ff77,
++ 0xff7fffff, 0x8777ff77,
++ 0x00058000, 0xd8ec0000,
++ 0x00000002, 0xbf8cc07f,
++ 0xe0765000, 0x7a1d0002,
++ 0x68040702, 0xd0c9006a,
++ 0x0000e302, 0xbf87fff7,
++ 0xbef70000, 0xbefa00ff,
++ 0x00000400, 0xbefe00c1,
++ 0xbeff00c1, 0xb8f12a05,
++ 0x80718171, 0x8e718271,
++ 0x8e768871, 0xbef600ff,
++ 0x01000000, 0xbefc0084,
++ 0xbf0a717c, 0xbf840015,
++ 0xbf11017c, 0x8071ff71,
++ 0x00001000, 0x7e000300,
++ 0x7e020301, 0x7e040302,
++ 0x7e060303, 0xe0724000,
++ 0x7a1d0000, 0xe0724100,
++ 0x7a1d0100, 0xe0724200,
++ 0x7a1d0200, 0xe0724300,
++ 0x7a1d0300, 0x807c847c,
++ 0x807aff7a, 0x00000400,
++ 0xbf0a717c, 0xbf85ffef,
++ 0xbf9c0000, 0xbf8200d9,
++ 0xbef4007e, 0x8675ff7f,
++ 0x0000ffff, 0x8775ff75,
++ 0x00040000, 0xbef60080,
++ 0xbef700ff, 0x00807fac,
++ 0x866eff7f, 0x08000000,
++ 0x8f6e836e, 0x87776e77,
++ 0x866eff7f, 0x70000000,
++ 0x8f6e816e, 0x87776e77,
++ 0x866eff7f, 0x04000000,
++ 0xbf84001e, 0xbefe00c1,
++ 0xbeff00c1, 0xb8ef4306,
++ 0x866fc16f, 0xbf840019,
++ 0x8e6f866f, 0x8e6f826f,
++ 0xbef6006f, 0xb8f82a05,
++ 0x80788178, 0x8e788a78,
++ 0xb8ee1605, 0x806e816e,
++ 0x8e6e866e, 0x80786e78,
++ 0x8078ff78, 0x00000080,
++ 0xbef600ff, 0x01000000,
++ 0xbefc0080, 0xe0510000,
++ 0x781d0000, 0xe0510100,
++ 0x781d0000, 0x807cff7c,
++ 0x00000200, 0x8078ff78,
++ 0x00000200, 0xbf0a6f7c,
++ 0xbf85fff6, 0xbef80080,
++ 0xbefe00c1, 0xbeff00c1,
++ 0xb8ef2a05, 0x806f816f,
++ 0x8e6f826f, 0x8e76886f,
++ 0xbef600ff, 0x01000000,
++ 0xbeee0078, 0x8078ff78,
++ 0x00000400, 0xbefc0084,
++ 0xbf11087c, 0x806fff6f,
++ 0x00008000, 0xe0524000,
++ 0x781d0000, 0xe0524100,
++ 0x781d0100, 0xe0524200,
++ 0x781d0200, 0xe0524300,
++ 0x781d0300, 0xbf8c0f70,
++ 0x7e000300, 0x7e020301,
++ 0x7e040302, 0x7e060303,
++ 0x807c847c, 0x8078ff78,
++ 0x00000400, 0xbf0a6f7c,
++ 0xbf85ffee, 0xbf9c0000,
++ 0xe0524000, 0x6e1d0000,
++ 0xe0524100, 0x6e1d0100,
++ 0xe0524200, 0x6e1d0200,
++ 0xe0524300, 0x6e1d0300,
++ 0xb8f82a05, 0x80788178,
++ 0x8e788a78, 0xb8ee1605,
++ 0x806e816e, 0x8e6e866e,
++ 0x80786e78, 0x80f8c078,
++ 0xb8ef1605, 0x806f816f,
++ 0x8e6f846f, 0x8e76826f,
++ 0xbef600ff, 0x01000000,
++ 0xbefc006f, 0xc031003a,
++ 0x00000078, 0x80f8c078,
++ 0xbf8cc07f, 0x80fc907c,
++ 0xbf800000, 0xbe802d00,
++ 0xbe822d02, 0xbe842d04,
++ 0xbe862d06, 0xbe882d08,
++ 0xbe8a2d0a, 0xbe8c2d0c,
++ 0xbe8e2d0e, 0xbf06807c,
++ 0xbf84fff0, 0xb8f82a05,
++ 0x80788178, 0x8e788a78,
++ 0xb8ee1605, 0x806e816e,
++ 0x8e6e866e, 0x80786e78,
++ 0xbef60084, 0xbef600ff,
++ 0x01000000, 0xc0211bfa,
++ 0x00000078, 0x80788478,
++ 0xc0211b3a, 0x00000078,
++ 0x80788478, 0xc0211b7a,
++ 0x00000078, 0x80788478,
++ 0xc0211eba, 0x00000078,
++ 0x80788478, 0xc0211efa,
++ 0x00000078, 0x80788478,
++ 0xc0211c3a, 0x00000078,
++ 0x80788478, 0xc0211c7a,
++ 0x00000078, 0x80788478,
++ 0xc0211a3a, 0x00000078,
++ 0x80788478, 0xc0211a7a,
++ 0x00000078, 0x80788478,
++ 0xc0211cfa, 0x00000078,
++ 0x80788478, 0xbf8cc07f,
++ 0x866dff6d, 0x0000ffff,
++ 0xbefc006f, 0xbefe007a,
++ 0xbeff007b, 0x866f71ff,
++ 0x000003ff, 0xb96f4803,
++ 0x866f71ff, 0xfffff800,
++ 0x8f6f8b6f, 0xb96fa2c3,
++ 0xb973f801, 0xb8ee2a05,
++ 0x806e816e, 0x8e6e8a6e,
++ 0xb8ef1605, 0x806f816f,
++ 0x8e6f866f, 0x806e6f6e,
++ 0x806e746e, 0x826f8075,
++ 0x866fff6f, 0x0000ffff,
++ 0xc0071cb7, 0x00000040,
++ 0xc00b1d37, 0x00000048,
++ 0xc0031e77, 0x00000058,
++ 0xc0071eb7, 0x0000005c,
++ 0xbf8cc07f, 0x866fff6d,
++ 0xf0000000, 0x8f6f9c6f,
++ 0x8e6f906f, 0xbeee0080,
++ 0x876e6f6e, 0x866fff6d,
++ 0x08000000, 0x8f6f9b6f,
++ 0x8e6f8f6f, 0x876e6f6e,
++ 0x866fff70, 0x00800000,
++ 0x8f6f976f, 0xb96ef807,
++ 0x86fe7e7e, 0x86ea6a6a,
++ 0xb970f802, 0xbf8a0000,
++ 0x95806f6c, 0xbf810000,
++};
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+index dec5ea4..6641348 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+@@ -20,9 +20,12 @@
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-#if 0
+-HW (VI) source code for CWSR trap handler
+-#Version 18 + multiple trap handler
++/* To compile this assembly code:
++ * PROJECT=vi ./sp3 cwsr_trap_handler_gfx8.asm -hex tmp.hex
++ */
++
++/* HW (VI) source code for CWSR trap handler */
++/* Version 18 + multiple trap handler */
+
+ // this performance-optimal version was originally from Seven Xu at SRDC
+
+@@ -150,7 +153,7 @@ var s_save_spi_init_lo = exec_lo
+ var s_save_spi_init_hi = exec_hi
+
+ //tba_lo and tba_hi need to be saved/restored
+-var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3¡¯h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]}
++var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3'h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]}
+ var s_save_pc_hi = ttmp1
+ var s_save_exec_lo = ttmp2
+ var s_save_exec_hi = ttmp3
+@@ -1132,259 +1135,3 @@ end
+ function get_hwreg_size_bytes
+ return 128 //HWREG size 128 bytes
+ end
+-
+-
+-#endif
+-
+-static const uint32_t cwsr_trap_gfx8_hex[] = {
+- 0xbf820001, 0xbf820125,
+- 0xb8f4f802, 0x89748674,
+- 0xb8f5f803, 0x8675ff75,
+- 0x00000400, 0xbf850011,
+- 0xc00a1e37, 0x00000000,
+- 0xbf8c007f, 0x87777978,
+- 0xbf840002, 0xb974f802,
+- 0xbe801d78, 0xb8f5f803,
+- 0x8675ff75, 0x000001ff,
+- 0xbf850002, 0x80708470,
+- 0x82718071, 0x8671ff71,
+- 0x0000ffff, 0xb974f802,
+- 0xbe801f70, 0xb8f5f803,
+- 0x8675ff75, 0x00000100,
+- 0xbf840006, 0xbefa0080,
+- 0xb97a0203, 0x8671ff71,
+- 0x0000ffff, 0x80f08870,
+- 0x82f18071, 0xbefa0080,
+- 0xb97a0283, 0xbef60068,
+- 0xbef70069, 0xb8fa1c07,
+- 0x8e7a9c7a, 0x87717a71,
+- 0xb8fa03c7, 0x8e7a9b7a,
+- 0x87717a71, 0xb8faf807,
+- 0x867aff7a, 0x00007fff,
+- 0xb97af807, 0xbef2007e,
+- 0xbef3007f, 0xbefe0180,
+- 0xbf900004, 0x877a8474,
+- 0xb97af802, 0xbf8e0002,
+- 0xbf88fffe, 0xbef8007e,
+- 0x8679ff7f, 0x0000ffff,
+- 0x8779ff79, 0x00040000,
+- 0xbefa0080, 0xbefb00ff,
+- 0x00807fac, 0x867aff7f,
+- 0x08000000, 0x8f7a837a,
+- 0x877b7a7b, 0x867aff7f,
+- 0x70000000, 0x8f7a817a,
+- 0x877b7a7b, 0xbeef007c,
+- 0xbeee0080, 0xb8ee2a05,
+- 0x806e816e, 0x8e6e8a6e,
+- 0xb8fa1605, 0x807a817a,
+- 0x8e7a867a, 0x806e7a6e,
+- 0xbefa0084, 0xbefa00ff,
+- 0x01000000, 0xbefe007c,
+- 0xbefc006e, 0xc0611bfc,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc006e, 0xc0611c3c,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc006e, 0xc0611c7c,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc006e, 0xc0611cbc,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc006e, 0xc0611cfc,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc006e, 0xc0611d3c,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0xb8f5f803,
+- 0xbefe007c, 0xbefc006e,
+- 0xc0611d7c, 0x0000007c,
+- 0x806e846e, 0xbefc007e,
+- 0xbefe007c, 0xbefc006e,
+- 0xc0611dbc, 0x0000007c,
+- 0x806e846e, 0xbefc007e,
+- 0xbefe007c, 0xbefc006e,
+- 0xc0611dfc, 0x0000007c,
+- 0x806e846e, 0xbefc007e,
+- 0xb8eff801, 0xbefe007c,
+- 0xbefc006e, 0xc0611bfc,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc006e, 0xc0611b3c,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc006e, 0xc0611b7c,
+- 0x0000007c, 0x806e846e,
+- 0xbefc007e, 0x867aff7f,
+- 0x04000000, 0xbef30080,
+- 0x8773737a, 0xb8ee2a05,
+- 0x806e816e, 0x8e6e8a6e,
+- 0xb8f51605, 0x80758175,
+- 0x8e758475, 0x8e7a8275,
+- 0xbefa00ff, 0x01000000,
+- 0xbef60178, 0x80786e78,
+- 0x82798079, 0xbefc0080,
+- 0xbe802b00, 0xbe822b02,
+- 0xbe842b04, 0xbe862b06,
+- 0xbe882b08, 0xbe8a2b0a,
+- 0xbe8c2b0c, 0xbe8e2b0e,
+- 0xc06b003c, 0x00000000,
+- 0xc06b013c, 0x00000010,
+- 0xc06b023c, 0x00000020,
+- 0xc06b033c, 0x00000030,
+- 0x8078c078, 0x82798079,
+- 0x807c907c, 0xbf0a757c,
+- 0xbf85ffeb, 0xbef80176,
+- 0xbeee0080, 0xbefe00c1,
+- 0xbeff00c1, 0xbefa00ff,
+- 0x01000000, 0xe0724000,
+- 0x6e1e0000, 0xe0724100,
+- 0x6e1e0100, 0xe0724200,
+- 0x6e1e0200, 0xe0724300,
+- 0x6e1e0300, 0xbefe00c1,
+- 0xbeff00c1, 0xb8f54306,
+- 0x8675c175, 0xbf84002c,
+- 0xbf8a0000, 0x867aff73,
+- 0x04000000, 0xbf840028,
+- 0x8e758675, 0x8e758275,
+- 0xbefa0075, 0xb8ee2a05,
+- 0x806e816e, 0x8e6e8a6e,
+- 0xb8fa1605, 0x807a817a,
+- 0x8e7a867a, 0x806e7a6e,
+- 0x806eff6e, 0x00000080,
+- 0xbefa00ff, 0x01000000,
+- 0xbefc0080, 0xd28c0002,
+- 0x000100c1, 0xd28d0003,
+- 0x000204c1, 0xd1060002,
+- 0x00011103, 0x7e0602ff,
+- 0x00000200, 0xbefc00ff,
+- 0x00010000, 0xbe80007b,
+- 0x867bff7b, 0xff7fffff,
+- 0x877bff7b, 0x00058000,
+- 0xd8ec0000, 0x00000002,
+- 0xbf8c007f, 0xe0765000,
+- 0x6e1e0002, 0x32040702,
+- 0xd0c9006a, 0x0000eb02,
+- 0xbf87fff7, 0xbefb0000,
+- 0xbeee00ff, 0x00000400,
+- 0xbefe00c1, 0xbeff00c1,
+- 0xb8f52a05, 0x80758175,
+- 0x8e758275, 0x8e7a8875,
+- 0xbefa00ff, 0x01000000,
+- 0xbefc0084, 0xbf0a757c,
+- 0xbf840015, 0xbf11017c,
+- 0x8075ff75, 0x00001000,
+- 0x7e000300, 0x7e020301,
+- 0x7e040302, 0x7e060303,
+- 0xe0724000, 0x6e1e0000,
+- 0xe0724100, 0x6e1e0100,
+- 0xe0724200, 0x6e1e0200,
+- 0xe0724300, 0x6e1e0300,
+- 0x807c847c, 0x806eff6e,
+- 0x00000400, 0xbf0a757c,
+- 0xbf85ffef, 0xbf9c0000,
+- 0xbf8200ca, 0xbef8007e,
+- 0x8679ff7f, 0x0000ffff,
+- 0x8779ff79, 0x00040000,
+- 0xbefa0080, 0xbefb00ff,
+- 0x00807fac, 0x8676ff7f,
+- 0x08000000, 0x8f768376,
+- 0x877b767b, 0x8676ff7f,
+- 0x70000000, 0x8f768176,
+- 0x877b767b, 0x8676ff7f,
+- 0x04000000, 0xbf84001e,
+- 0xbefe00c1, 0xbeff00c1,
+- 0xb8f34306, 0x8673c173,
+- 0xbf840019, 0x8e738673,
+- 0x8e738273, 0xbefa0073,
+- 0xb8f22a05, 0x80728172,
+- 0x8e728a72, 0xb8f61605,
+- 0x80768176, 0x8e768676,
+- 0x80727672, 0x8072ff72,
+- 0x00000080, 0xbefa00ff,
+- 0x01000000, 0xbefc0080,
+- 0xe0510000, 0x721e0000,
+- 0xe0510100, 0x721e0000,
+- 0x807cff7c, 0x00000200,
+- 0x8072ff72, 0x00000200,
+- 0xbf0a737c, 0xbf85fff6,
+- 0xbef20080, 0xbefe00c1,
+- 0xbeff00c1, 0xb8f32a05,
+- 0x80738173, 0x8e738273,
+- 0x8e7a8873, 0xbefa00ff,
+- 0x01000000, 0xbef60072,
+- 0x8072ff72, 0x00000400,
+- 0xbefc0084, 0xbf11087c,
+- 0x8073ff73, 0x00008000,
+- 0xe0524000, 0x721e0000,
+- 0xe0524100, 0x721e0100,
+- 0xe0524200, 0x721e0200,
+- 0xe0524300, 0x721e0300,
+- 0xbf8c0f70, 0x7e000300,
+- 0x7e020301, 0x7e040302,
+- 0x7e060303, 0x807c847c,
+- 0x8072ff72, 0x00000400,
+- 0xbf0a737c, 0xbf85ffee,
+- 0xbf9c0000, 0xe0524000,
+- 0x761e0000, 0xe0524100,
+- 0x761e0100, 0xe0524200,
+- 0x761e0200, 0xe0524300,
+- 0x761e0300, 0xb8f22a05,
+- 0x80728172, 0x8e728a72,
+- 0xb8f61605, 0x80768176,
+- 0x8e768676, 0x80727672,
+- 0x80f2c072, 0xb8f31605,
+- 0x80738173, 0x8e738473,
+- 0x8e7a8273, 0xbefa00ff,
+- 0x01000000, 0xbefc0073,
+- 0xc031003c, 0x00000072,
+- 0x80f2c072, 0xbf8c007f,
+- 0x80fc907c, 0xbe802d00,
+- 0xbe822d02, 0xbe842d04,
+- 0xbe862d06, 0xbe882d08,
+- 0xbe8a2d0a, 0xbe8c2d0c,
+- 0xbe8e2d0e, 0xbf06807c,
+- 0xbf84fff1, 0xb8f22a05,
+- 0x80728172, 0x8e728a72,
+- 0xb8f61605, 0x80768176,
+- 0x8e768676, 0x80727672,
+- 0xbefa0084, 0xbefa00ff,
+- 0x01000000, 0xc0211cfc,
+- 0x00000072, 0x80728472,
+- 0xc0211c3c, 0x00000072,
+- 0x80728472, 0xc0211c7c,
+- 0x00000072, 0x80728472,
+- 0xc0211bbc, 0x00000072,
+- 0x80728472, 0xc0211bfc,
+- 0x00000072, 0x80728472,
+- 0xc0211d3c, 0x00000072,
+- 0x80728472, 0xc0211d7c,
+- 0x00000072, 0x80728472,
+- 0xc0211a3c, 0x00000072,
+- 0x80728472, 0xc0211a7c,
+- 0x00000072, 0x80728472,
+- 0xc0211dfc, 0x00000072,
+- 0x80728472, 0xc0211b3c,
+- 0x00000072, 0x80728472,
+- 0xc0211b7c, 0x00000072,
+- 0x80728472, 0xbf8c007f,
+- 0x8671ff71, 0x0000ffff,
+- 0xbefc0073, 0xbefe006e,
+- 0xbeff006f, 0x867375ff,
+- 0x000003ff, 0xb9734803,
+- 0x867375ff, 0xfffff800,
+- 0x8f738b73, 0xb973a2c3,
+- 0xb977f801, 0x8673ff71,
+- 0xf0000000, 0x8f739c73,
+- 0x8e739073, 0xbef60080,
+- 0x87767376, 0x8673ff71,
+- 0x08000000, 0x8f739b73,
+- 0x8e738f73, 0x87767376,
+- 0x8673ff74, 0x00800000,
+- 0x8f739773, 0xb976f807,
+- 0x86fe7e7e, 0x86ea6a6a,
+- 0xb974f802, 0xbf8a0000,
+- 0x95807370, 0xbf810000,
+-};
+-
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+index adb3308..e4e7c1d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+@@ -20,9 +20,12 @@
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-#if 0
+-HW (GFX9) source code for CWSR trap handler
+-#Version 18 + multiple trap handler
++/* To compile this assembly code:
++ * PROJECT=greenland ./sp3 cwsr_trap_handler_gfx9.asm -hex tmp.hex
++ */
++
++/* HW (GFX9) source code for CWSR trap handler */
++/* Version 18 + multiple trap handler */
+
+ // this performance-optimal version was originally from Seven Xu at SRDC
+
+@@ -151,7 +154,7 @@ var S_SAVE_PC_HI_FIRST_REPLAY_MASK = 0x08000000 //FIXME
+ var s_save_spi_init_lo = exec_lo
+ var s_save_spi_init_hi = exec_hi
+
+-var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3¡¯h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]}
++var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3'h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]}
+ var s_save_pc_hi = ttmp1
+ var s_save_exec_lo = ttmp2
+ var s_save_exec_hi = ttmp3
+@@ -1210,292 +1213,3 @@ function ack_sqc_store_workaround
+ s_waitcnt lgkmcnt(0)
+ end
+ end
+-
+-
+-#endif
+-
+-static const uint32_t cwsr_trap_gfx9_hex[] = {
+- 0xbf820001, 0xbf82015a,
+- 0xb8f8f802, 0x89788678,
+- 0xb8f1f803, 0x866eff71,
+- 0x00000400, 0xbf850034,
+- 0x866eff71, 0x00000800,
+- 0xbf850003, 0x866eff71,
+- 0x00000100, 0xbf840008,
+- 0x866eff78, 0x00002000,
+- 0xbf840001, 0xbf810000,
+- 0x8778ff78, 0x00002000,
+- 0x80ec886c, 0x82ed806d,
+- 0xb8eef807, 0x866fff6e,
+- 0x001f8000, 0x8e6f8b6f,
+- 0x8977ff77, 0xfc000000,
+- 0x87776f77, 0x896eff6e,
+- 0x001f8000, 0xb96ef807,
+- 0xb8f0f812, 0xb8f1f813,
+- 0x8ef08870, 0xc0071bb8,
+- 0x00000000, 0xbf8cc07f,
+- 0xc0071c38, 0x00000008,
+- 0xbf8cc07f, 0x86ee6e6e,
+- 0xbf840001, 0xbe801d6e,
+- 0xb8f1f803, 0x8671ff71,
+- 0x000001ff, 0xbf850002,
+- 0x806c846c, 0x826d806d,
+- 0x866dff6d, 0x0000ffff,
+- 0x8f6e8b77, 0x866eff6e,
+- 0x001f8000, 0xb96ef807,
+- 0x86fe7e7e, 0x86ea6a6a,
+- 0xb978f802, 0xbe801f6c,
+- 0x866dff6d, 0x0000ffff,
+- 0xbef00080, 0xb9700283,
+- 0xb8f02407, 0x8e709c70,
+- 0x876d706d, 0xb8f003c7,
+- 0x8e709b70, 0x876d706d,
+- 0xb8f0f807, 0x8670ff70,
+- 0x00007fff, 0xb970f807,
+- 0xbeee007e, 0xbeef007f,
+- 0xbefe0180, 0xbf900004,
+- 0x87708478, 0xb970f802,
+- 0xbf8e0002, 0xbf88fffe,
+- 0xb8f02a05, 0x80708170,
+- 0x8e708a70, 0xb8f11605,
+- 0x80718171, 0x8e718671,
+- 0x80707170, 0x80707e70,
+- 0x8271807f, 0x8671ff71,
+- 0x0000ffff, 0xc0471cb8,
+- 0x00000040, 0xbf8cc07f,
+- 0xc04b1d38, 0x00000048,
+- 0xbf8cc07f, 0xc0431e78,
+- 0x00000058, 0xbf8cc07f,
+- 0xc0471eb8, 0x0000005c,
+- 0xbf8cc07f, 0xbef4007e,
+- 0x8675ff7f, 0x0000ffff,
+- 0x8775ff75, 0x00040000,
+- 0xbef60080, 0xbef700ff,
+- 0x00807fac, 0x8670ff7f,
+- 0x08000000, 0x8f708370,
+- 0x87777077, 0x8670ff7f,
+- 0x70000000, 0x8f708170,
+- 0x87777077, 0xbefb007c,
+- 0xbefa0080, 0xb8fa2a05,
+- 0x807a817a, 0x8e7a8a7a,
+- 0xb8f01605, 0x80708170,
+- 0x8e708670, 0x807a707a,
+- 0xbef60084, 0xbef600ff,
+- 0x01000000, 0xbefe007c,
+- 0xbefc007a, 0xc0611efa,
+- 0x0000007c, 0xbf8cc07f,
+- 0x807a847a, 0xbefc007e,
+- 0xbefe007c, 0xbefc007a,
+- 0xc0611b3a, 0x0000007c,
+- 0xbf8cc07f, 0x807a847a,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc007a, 0xc0611b7a,
+- 0x0000007c, 0xbf8cc07f,
+- 0x807a847a, 0xbefc007e,
+- 0xbefe007c, 0xbefc007a,
+- 0xc0611bba, 0x0000007c,
+- 0xbf8cc07f, 0x807a847a,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc007a, 0xc0611bfa,
+- 0x0000007c, 0xbf8cc07f,
+- 0x807a847a, 0xbefc007e,
+- 0xbefe007c, 0xbefc007a,
+- 0xc0611e3a, 0x0000007c,
+- 0xbf8cc07f, 0x807a847a,
+- 0xbefc007e, 0xb8f1f803,
+- 0xbefe007c, 0xbefc007a,
+- 0xc0611c7a, 0x0000007c,
+- 0xbf8cc07f, 0x807a847a,
+- 0xbefc007e, 0xbefe007c,
+- 0xbefc007a, 0xc0611a3a,
+- 0x0000007c, 0xbf8cc07f,
+- 0x807a847a, 0xbefc007e,
+- 0xbefe007c, 0xbefc007a,
+- 0xc0611a7a, 0x0000007c,
+- 0xbf8cc07f, 0x807a847a,
+- 0xbefc007e, 0xb8fbf801,
+- 0xbefe007c, 0xbefc007a,
+- 0xc0611efa, 0x0000007c,
+- 0xbf8cc07f, 0x807a847a,
+- 0xbefc007e, 0x8670ff7f,
+- 0x04000000, 0xbeef0080,
+- 0x876f6f70, 0xb8fa2a05,
+- 0x807a817a, 0x8e7a8a7a,
+- 0xb8f11605, 0x80718171,
+- 0x8e718471, 0x8e768271,
+- 0xbef600ff, 0x01000000,
+- 0xbef20174, 0x80747a74,
+- 0x82758075, 0xbefc0080,
+- 0xbf800000, 0xbe802b00,
+- 0xbe822b02, 0xbe842b04,
+- 0xbe862b06, 0xbe882b08,
+- 0xbe8a2b0a, 0xbe8c2b0c,
+- 0xbe8e2b0e, 0xc06b003a,
+- 0x00000000, 0xbf8cc07f,
+- 0xc06b013a, 0x00000010,
+- 0xbf8cc07f, 0xc06b023a,
+- 0x00000020, 0xbf8cc07f,
+- 0xc06b033a, 0x00000030,
+- 0xbf8cc07f, 0x8074c074,
+- 0x82758075, 0x807c907c,
+- 0xbf0a717c, 0xbf85ffe7,
+- 0xbef40172, 0xbefa0080,
+- 0xbefe00c1, 0xbeff00c1,
+- 0xbee80080, 0xbee90080,
+- 0xbef600ff, 0x01000000,
+- 0xe0724000, 0x7a1d0000,
+- 0xe0724100, 0x7a1d0100,
+- 0xe0724200, 0x7a1d0200,
+- 0xe0724300, 0x7a1d0300,
+- 0xbefe00c1, 0xbeff00c1,
+- 0xb8f14306, 0x8671c171,
+- 0xbf84002c, 0xbf8a0000,
+- 0x8670ff6f, 0x04000000,
+- 0xbf840028, 0x8e718671,
+- 0x8e718271, 0xbef60071,
+- 0xb8fa2a05, 0x807a817a,
+- 0x8e7a8a7a, 0xb8f01605,
+- 0x80708170, 0x8e708670,
+- 0x807a707a, 0x807aff7a,
+- 0x00000080, 0xbef600ff,
+- 0x01000000, 0xbefc0080,
+- 0xd28c0002, 0x000100c1,
+- 0xd28d0003, 0x000204c1,
+- 0xd1060002, 0x00011103,
+- 0x7e0602ff, 0x00000200,
+- 0xbefc00ff, 0x00010000,
+- 0xbe800077, 0x8677ff77,
+- 0xff7fffff, 0x8777ff77,
+- 0x00058000, 0xd8ec0000,
+- 0x00000002, 0xbf8cc07f,
+- 0xe0765000, 0x7a1d0002,
+- 0x68040702, 0xd0c9006a,
+- 0x0000e302, 0xbf87fff7,
+- 0xbef70000, 0xbefa00ff,
+- 0x00000400, 0xbefe00c1,
+- 0xbeff00c1, 0xb8f12a05,
+- 0x80718171, 0x8e718271,
+- 0x8e768871, 0xbef600ff,
+- 0x01000000, 0xbefc0084,
+- 0xbf0a717c, 0xbf840015,
+- 0xbf11017c, 0x8071ff71,
+- 0x00001000, 0x7e000300,
+- 0x7e020301, 0x7e040302,
+- 0x7e060303, 0xe0724000,
+- 0x7a1d0000, 0xe0724100,
+- 0x7a1d0100, 0xe0724200,
+- 0x7a1d0200, 0xe0724300,
+- 0x7a1d0300, 0x807c847c,
+- 0x807aff7a, 0x00000400,
+- 0xbf0a717c, 0xbf85ffef,
+- 0xbf9c0000, 0xbf8200d9,
+- 0xbef4007e, 0x8675ff7f,
+- 0x0000ffff, 0x8775ff75,
+- 0x00040000, 0xbef60080,
+- 0xbef700ff, 0x00807fac,
+- 0x866eff7f, 0x08000000,
+- 0x8f6e836e, 0x87776e77,
+- 0x866eff7f, 0x70000000,
+- 0x8f6e816e, 0x87776e77,
+- 0x866eff7f, 0x04000000,
+- 0xbf84001e, 0xbefe00c1,
+- 0xbeff00c1, 0xb8ef4306,
+- 0x866fc16f, 0xbf840019,
+- 0x8e6f866f, 0x8e6f826f,
+- 0xbef6006f, 0xb8f82a05,
+- 0x80788178, 0x8e788a78,
+- 0xb8ee1605, 0x806e816e,
+- 0x8e6e866e, 0x80786e78,
+- 0x8078ff78, 0x00000080,
+- 0xbef600ff, 0x01000000,
+- 0xbefc0080, 0xe0510000,
+- 0x781d0000, 0xe0510100,
+- 0x781d0000, 0x807cff7c,
+- 0x00000200, 0x8078ff78,
+- 0x00000200, 0xbf0a6f7c,
+- 0xbf85fff6, 0xbef80080,
+- 0xbefe00c1, 0xbeff00c1,
+- 0xb8ef2a05, 0x806f816f,
+- 0x8e6f826f, 0x8e76886f,
+- 0xbef600ff, 0x01000000,
+- 0xbeee0078, 0x8078ff78,
+- 0x00000400, 0xbefc0084,
+- 0xbf11087c, 0x806fff6f,
+- 0x00008000, 0xe0524000,
+- 0x781d0000, 0xe0524100,
+- 0x781d0100, 0xe0524200,
+- 0x781d0200, 0xe0524300,
+- 0x781d0300, 0xbf8c0f70,
+- 0x7e000300, 0x7e020301,
+- 0x7e040302, 0x7e060303,
+- 0x807c847c, 0x8078ff78,
+- 0x00000400, 0xbf0a6f7c,
+- 0xbf85ffee, 0xbf9c0000,
+- 0xe0524000, 0x6e1d0000,
+- 0xe0524100, 0x6e1d0100,
+- 0xe0524200, 0x6e1d0200,
+- 0xe0524300, 0x6e1d0300,
+- 0xb8f82a05, 0x80788178,
+- 0x8e788a78, 0xb8ee1605,
+- 0x806e816e, 0x8e6e866e,
+- 0x80786e78, 0x80f8c078,
+- 0xb8ef1605, 0x806f816f,
+- 0x8e6f846f, 0x8e76826f,
+- 0xbef600ff, 0x01000000,
+- 0xbefc006f, 0xc031003a,
+- 0x00000078, 0x80f8c078,
+- 0xbf8cc07f, 0x80fc907c,
+- 0xbf800000, 0xbe802d00,
+- 0xbe822d02, 0xbe842d04,
+- 0xbe862d06, 0xbe882d08,
+- 0xbe8a2d0a, 0xbe8c2d0c,
+- 0xbe8e2d0e, 0xbf06807c,
+- 0xbf84fff0, 0xb8f82a05,
+- 0x80788178, 0x8e788a78,
+- 0xb8ee1605, 0x806e816e,
+- 0x8e6e866e, 0x80786e78,
+- 0xbef60084, 0xbef600ff,
+- 0x01000000, 0xc0211bfa,
+- 0x00000078, 0x80788478,
+- 0xc0211b3a, 0x00000078,
+- 0x80788478, 0xc0211b7a,
+- 0x00000078, 0x80788478,
+- 0xc0211eba, 0x00000078,
+- 0x80788478, 0xc0211efa,
+- 0x00000078, 0x80788478,
+- 0xc0211c3a, 0x00000078,
+- 0x80788478, 0xc0211c7a,
+- 0x00000078, 0x80788478,
+- 0xc0211a3a, 0x00000078,
+- 0x80788478, 0xc0211a7a,
+- 0x00000078, 0x80788478,
+- 0xc0211cfa, 0x00000078,
+- 0x80788478, 0xbf8cc07f,
+- 0x866dff6d, 0x0000ffff,
+- 0xbefc006f, 0xbefe007a,
+- 0xbeff007b, 0x866f71ff,
+- 0x000003ff, 0xb96f4803,
+- 0x866f71ff, 0xfffff800,
+- 0x8f6f8b6f, 0xb96fa2c3,
+- 0xb973f801, 0xb8ee2a05,
+- 0x806e816e, 0x8e6e8a6e,
+- 0xb8ef1605, 0x806f816f,
+- 0x8e6f866f, 0x806e6f6e,
+- 0x806e746e, 0x826f8075,
+- 0x866fff6f, 0x0000ffff,
+- 0xc0071cb7, 0x00000040,
+- 0xc00b1d37, 0x00000048,
+- 0xc0031e77, 0x00000058,
+- 0xc0071eb7, 0x0000005c,
+- 0xbf8cc07f, 0x866fff6d,
+- 0xf0000000, 0x8f6f9c6f,
+- 0x8e6f906f, 0xbeee0080,
+- 0x876e6f6e, 0x866fff6d,
+- 0x08000000, 0x8f6f9b6f,
+- 0x8e6f8f6f, 0x876e6f6e,
+- 0x866fff70, 0x00800000,
+- 0x8f6f976f, 0xb96ef807,
+- 0x86fe7e7e, 0x86ea6a6a,
+- 0xb970f802, 0xbf8a0000,
+- 0x95806f6c, 0xbf810000,
+-};
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 088f5db..bc5c642 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -27,8 +27,7 @@
+ #include "kfd_priv.h"
+ #include "kfd_device_queue_manager.h"
+ #include "kfd_pm4_headers_vi.h"
+-#include "cwsr_trap_handler_gfx8.asm"
+-#include "cwsr_trap_handler_gfx9.asm"
++#include "cwsr_trap_handler.h"
+ #include "kfd_iommu.h"
+
+ #define MQD_SIZE_ALIGNED 768
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5653-drm-amdkfd-Mellanox-Support-PeerSync-interface.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5653-drm-amdkfd-Mellanox-Support-PeerSync-interface.patch
new file mode 100644
index 00000000..8a8bceb9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5653-drm-amdkfd-Mellanox-Support-PeerSync-interface.patch
@@ -0,0 +1,57 @@
+From cc6d04d756b873efbb147ffc2c52d56f91c31baf Mon Sep 17 00:00:00 2001
+From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+Date: Tue, 24 Apr 2018 17:57:53 -0400
+Subject: [PATCH 5653/5725] drm/amdkfd: Mellanox: Support PeerSync interface
+
+The mellanox driver doesn't support memory invalidation for their
+new PeerSync interface. If a non NULL pointer is passed into
+ib_register_peer_memory_client() the Mellanox driver assumes peer device
+(AMD) requires invalidation. This would end in ignoring AMD device.
+
+The current kfd implementation of rdma doesn't use the invalidate and
+keeps the memory pinned for the entire duration. So passing NULL doesn't
+change the current behaviour. However, for a robust and secure solution
+this needs to be revisted.
+
+BUG: SWDEV-149064
+
+Change-Id: I6737331d65b1d2e63c2ebb970c40fe61d32f8d22
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_peerdirect.c | 6 +-----
+ 1 file changed, 1 insertion(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_peerdirect.c b/drivers/gpu/drm/amd/amdkfd/kfd_peerdirect.c
+index fae8e8c..1b1a0ca 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_peerdirect.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_peerdirect.c
+@@ -137,7 +137,6 @@ static void (*pfn_ib_unregister_peer_memory_client)(void *reg_handle);
+
+ static const struct amd_rdma_interface *rdma_interface;
+
+-static invalidate_peer_memory ib_invalidate_callback;
+ static void *ib_reg_handle;
+
+ struct amd_mem_context {
+@@ -169,9 +168,6 @@ static void free_callback(void *client_priv)
+
+ pr_debug("mem_context->core_context 0x%p\n", mem_context->core_context);
+
+- /* Call back IB stack asking to invalidate memory */
+- (*ib_invalidate_callback) (ib_reg_handle, mem_context->core_context);
+-
+ /* amdkfd will free resources when we return from this callback.
+ * Set flag to inform that there is nothing to do on "put_pages", etc.
+ */
+@@ -478,7 +474,7 @@ void kfd_init_peer_direct(void)
+ strcpy(amd_mem_client.version, AMD_PEER_BRIDGE_DRIVER_VERSION);
+
+ ib_reg_handle = pfn_ib_register_peer_memory_client(&amd_mem_client,
+- &ib_invalidate_callback);
++ NULL);
+
+ if (!ib_reg_handle) {
+ pr_err("Cannot register peer memory client\n");
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5654-drm-amdkfd-Fix-CP-soft-hang-on-APUs.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5654-drm-amdkfd-Fix-CP-soft-hang-on-APUs.patch
new file mode 100644
index 00000000..1c082bb1
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5654-drm-amdkfd-Fix-CP-soft-hang-on-APUs.patch
@@ -0,0 +1,103 @@
+From cc7a16bf7e858e0d90bb25e4f3ab59983df60bb0 Mon Sep 17 00:00:00 2001
+From: Yong Zhao <yong.zhao@amd.com>
+Date: Wed, 25 Apr 2018 11:56:55 -0400
+Subject: [PATCH 5654/5725] drm/amdkfd: Fix CP soft hang on APUs
+
+The problem happens on Raven and Carrizo. The context save handler
+should not clear the high bits of PC_HI before extracting the bits
+of IB_STS.
+
+The bug is not relevant to VEGA10 until we enable demand paging.
+
+Fix: KFD-381
+
+Change-Id: I85615c9dad965972cc039074bfcd4c18e370ad34
+Signed-off-by: Jay Cornwall <Jay.Cornwall@amd.com>
+Signed-off-by: Yong Zhao <yong.zhao@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h | 4 ++--
+ drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm | 3 +--
+ drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 3 +--
+ 3 files changed, 4 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+index a546a21..f68aef0 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+@@ -253,7 +253,6 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
+ 0x00000072, 0x80728472,
+ 0xc0211b7c, 0x00000072,
+ 0x80728472, 0xbf8c007f,
+- 0x8671ff71, 0x0000ffff,
+ 0xbefc0073, 0xbefe006e,
+ 0xbeff006f, 0x867375ff,
+ 0x000003ff, 0xb9734803,
+@@ -267,6 +266,7 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
+ 0x8e738f73, 0x87767376,
+ 0x8673ff74, 0x00800000,
+ 0x8f739773, 0xb976f807,
++ 0x8671ff71, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0xb974f802, 0xbf8a0000,
+ 0x95807370, 0xbf810000,
+@@ -530,7 +530,6 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
+ 0x00000078, 0x80788478,
+ 0xc0211cfa, 0x00000078,
+ 0x80788478, 0xbf8cc07f,
+- 0x866dff6d, 0x0000ffff,
+ 0xbefc006f, 0xbefe007a,
+ 0xbeff007b, 0x866f71ff,
+ 0x000003ff, 0xb96f4803,
+@@ -554,6 +553,7 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
+ 0x8e6f8f6f, 0x876e6f6e,
+ 0x866fff70, 0x00800000,
+ 0x8f6f976f, 0xb96ef807,
++ 0x866dff6d, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0xb970f802, 0xbf8a0000,
+ 0x95806f6c, 0xbf810000,
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+index 6641348..6302402 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+@@ -1015,8 +1015,6 @@ end
+
+ s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS
+
+- s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
+-
+ //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise:
+ if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
+ s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8 //pc[31:0]+8 //two back-to-back s_trap are used (first for save and second for restore)
+@@ -1052,6 +1050,7 @@ end
+ s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT
+ s_setreg_b32 hwreg(HW_REG_IB_STS), s_restore_tmp
+
++ s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+ s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+index e4e7c1d..fc62fb8 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+@@ -1067,8 +1067,6 @@ end
+
+ s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS
+
+- s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
+-
+ //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise:
+ if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
+ s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8 //pc[31:0]+8 //two back-to-back s_trap are used (first for save and second for restore)
+@@ -1119,6 +1117,7 @@ end
+ s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT
+ s_setreg_b32 hwreg(HW_REG_IB_STS), s_restore_tmp
+
++ s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+ s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5655-drm-amdkfd-Fix-typos-in-trap-handler-comments.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5655-drm-amdkfd-Fix-typos-in-trap-handler-comments.patch
new file mode 100644
index 00000000..d5302495
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5655-drm-amdkfd-Fix-typos-in-trap-handler-comments.patch
@@ -0,0 +1,85 @@
+From 5959173de5a69d8412edc7adc7c4215c1015b9a8 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Tue, 17 Apr 2018 18:30:24 -0400
+Subject: [PATCH 5655/5725] drm/amdkfd: Fix typos in trap handler comments
+
+Fixed for upstreaming to avoid checkpatch warnings.
+
+Change-Id: Id0e78f1be11c3ee03b2d99bcb9c77cf1221c77fa
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm | 8 ++++----
+ drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 8 ++++----
+ 2 files changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+index 6302402..a2a04bb 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+@@ -77,7 +77,7 @@ var G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 = G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_D
+ /*************************************************************************/
+ /* control on how to run the shader */
+ /*************************************************************************/
+-//any hack that needs to be made to run this code in EMU (either becasue various EMU code are not ready or no compute save & restore in EMU run)
++//any hack that needs to be made to run this code in EMU (either because various EMU code are not ready or no compute save & restore in EMU run)
+ var EMU_RUN_HACK = 0
+ var EMU_RUN_HACK_RESTORE_NORMAL = 0
+ var EMU_RUN_HACK_SAVE_NORMAL_EXIT = 0
+@@ -91,9 +91,9 @@ var WG_BASE_ADDR_HI = 0x0
+ var WAVE_SPACE = 0x5000 //memory size that each wave occupies in workgroup state mem
+ var CTX_SAVE_CONTROL = 0x0
+ var CTX_RESTORE_CONTROL = CTX_SAVE_CONTROL
+-var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either becasue various RTL code are not ready or no compute save & restore in RTL run)
++var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either because various RTL code are not ready or no compute save & restore in RTL run)
+ var SGPR_SAVE_USE_SQC = 1 //use SQC D$ to do the write
+-var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //becasue TC EMU curently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes
++var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //because TC EMU currently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes
+ var SWIZZLE_EN = 0 //whether we use swizzled buffer addressing
+
+ /**************************************************************************/
+@@ -1055,7 +1055,7 @@ end
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+ s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
+
+- s_barrier //barrier to ensure the readiness of LDS before access attemps from any other wave in the same TG //FIXME not performance-optimal at this time
++ s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
+
+ if G8SR_DEBUG_TIMESTAMP
+ s_memrealtime s_g8sr_ts_restore_d
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+index fc62fb8..998be96 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+@@ -77,7 +77,7 @@ var G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 = G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_D
+ /*************************************************************************/
+ /* control on how to run the shader */
+ /*************************************************************************/
+-//any hack that needs to be made to run this code in EMU (either becasue various EMU code are not ready or no compute save & restore in EMU run)
++//any hack that needs to be made to run this code in EMU (either because various EMU code are not ready or no compute save & restore in EMU run)
+ var EMU_RUN_HACK = 0
+ var EMU_RUN_HACK_RESTORE_NORMAL = 0
+ var EMU_RUN_HACK_SAVE_NORMAL_EXIT = 0
+@@ -89,9 +89,9 @@ var WG_BASE_ADDR_HI = 0x0
+ var WAVE_SPACE = 0x5000 //memory size that each wave occupies in workgroup state mem
+ var CTX_SAVE_CONTROL = 0x0
+ var CTX_RESTORE_CONTROL = CTX_SAVE_CONTROL
+-var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either becasue various RTL code are not ready or no compute save & restore in RTL run)
++var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either because various RTL code are not ready or no compute save & restore in RTL run)
+ var SGPR_SAVE_USE_SQC = 1 //use SQC D$ to do the write
+-var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //becasue TC EMU curently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes
++var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //because TC EMU currently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes
+ var SWIZZLE_EN = 0 //whether we use swizzled buffer addressing
+ var ACK_SQC_STORE = 1 //workaround for suspected SQC store bug causing incorrect stores under concurrency
+
+@@ -1122,7 +1122,7 @@ end
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+ s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
+
+- s_barrier //barrier to ensure the readiness of LDS before access attemps from any other wave in the same TG //FIXME not performance-optimal at this time
++ s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
+
+ if G8SR_DEBUG_TIMESTAMP
+ s_memrealtime s_g8sr_ts_restore_d
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5656-drm-amdkfd-Align-Makefile-with-upstream.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5656-drm-amdkfd-Align-Makefile-with-upstream.patch
new file mode 100644
index 00000000..f122ff9a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5656-drm-amdkfd-Align-Makefile-with-upstream.patch
@@ -0,0 +1,79 @@
+From 082c224b6b5915ac31c2574f3341195334b8db63 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Mon, 23 Apr 2018 21:16:21 -0400
+Subject: [PATCH 5656/5725] drm/amdkfd: Align Makefile with upstream
+
+Changed includes of amd_rdma.h to work with the upstream include paths
+and removed unnecessary inclusion in kfd_priv.h.
+
+Change-Id: Id624bf0e358bd416348986f756be7b51e242fc3c
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/Makefile | 7 ++-----
+ drivers/gpu/drm/amd/amdkfd/kfd_peerdirect.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 1 -
+ drivers/gpu/drm/amd/amdkfd/kfd_rdma.c | 2 +-
+ 4 files changed, 4 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
+index b65537a..66f1921 100644
+--- a/drivers/gpu/drm/amd/amdkfd/Makefile
++++ b/drivers/gpu/drm/amd/amdkfd/Makefile
+@@ -23,11 +23,8 @@
+ # Makefile for Heterogenous System Architecture support for AMD GPU devices
+ #
+
+-FULL_AMD_PATH=$(src)/..
+-
+-ccflags-y := -Iinclude/drm \
+- -I$(FULL_AMD_PATH)/include/ \
+- -I$(FULL_AMD_PATH)/include/asic_reg
++ccflags-y := -Idrivers/gpu/drm/amd/include/ \
++ -Idrivers/gpu/drm/amd/include/asic_reg
+
+ amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
+ kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_peerdirect.c b/drivers/gpu/drm/amd/amdkfd/kfd_peerdirect.c
+index 1b1a0ca..87344cc 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_peerdirect.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_peerdirect.c
+@@ -49,9 +49,9 @@
+ #include <linux/slab.h>
+ #include <linux/scatterlist.h>
+ #include <linux/module.h>
++#include <drm/amd_rdma.h>
+
+ #include "kfd_priv.h"
+-#include "amd_rdma.h"
+
+
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index da27bb9..bb3b020 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -39,7 +39,6 @@
+ #include <linux/kfifo.h>
+ #include <kgd_kfd_interface.h>
+
+-#include "amd_rdma.h"
+ #include "amd_shared.h"
+
+ #define KFD_SYSFS_FILE_MODE 0444
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_rdma.c b/drivers/gpu/drm/amd/amdkfd/kfd_rdma.c
+index 985855f..3454514 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_rdma.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_rdma.c
+@@ -25,7 +25,7 @@
+ #include <linux/pid.h>
+ #include <linux/err.h>
+ #include <linux/slab.h>
+-#include "amd_rdma.h"
++#include <drm/amd_rdma.h>
+ #include "kfd_priv.h"
+
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5657-drm-amdkfd-Align-CIK-interrupt-processing-with-upstr.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5657-drm-amdkfd-Align-CIK-interrupt-processing-with-upstr.patch
new file mode 100644
index 00000000..f9324020
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5657-drm-amdkfd-Align-CIK-interrupt-processing-with-upstr.patch
@@ -0,0 +1,194 @@
+From dc143f18f00607ab184654bf48c60456587ce209 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Mon, 23 Apr 2018 21:59:05 -0400
+Subject: [PATCH 5657/5725] drm/amdkfd: Align CIK interrupt processing with
+ upstream
+
+Remove bitfields from struct cik_ih_ring_entry and use shiftr and
+masks instead. Reorder the INTSRC definitions to match upstream.
+Minor clean-up and simplification of VM-fault related code that
+hasn't been upstreamed yet.
+
+Change-Id: I23ded8d8b3b2731bf28517bb84023fa8d1d893cf
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c | 73 +++++++++++++-----------
+ drivers/gpu/drm/amd/amdkfd/cik_int.h | 25 ++------
+ 2 files changed, 46 insertions(+), 52 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+index 751c004..1261432 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
++++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+@@ -24,18 +24,13 @@
+ #include "kfd_events.h"
+ #include "cik_int.h"
+
+-static bool is_cpc_vm_fault(struct kfd_dev *dev,
+- const uint32_t *ih_ring_entry)
++static bool is_cpc_vm_fault(struct kfd_dev *dev, uint32_t source_id,
++ unsigned int vmid)
+ {
+- const struct cik_ih_ring_entry *ihre =
+- (const struct cik_ih_ring_entry *)ih_ring_entry;
+-
+- if ((ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
+- ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) &&
+- ihre->vmid >= dev->vm_info.first_vmid_kfd &&
+- ihre->vmid <= dev->vm_info.last_vmid_kfd)
+- return true;
+- return false;
++ return (source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
++ source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) &&
++ vmid >= dev->vm_info.first_vmid_kfd &&
++ vmid <= dev->vm_info.last_vmid_kfd;
+ }
+
+ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
+@@ -46,8 +41,7 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
+ const struct cik_ih_ring_entry *ihre =
+ (const struct cik_ih_ring_entry *)ih_ring_entry;
+ const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
+- struct cik_ih_ring_entry *tmp_ihre =
+- (struct cik_ih_ring_entry *) patched_ihre;
++ unsigned int vmid, pasid;
+
+ /* This workaround is due to HW/FW limitation on Hawaii that
+ * VMID and PASID are not written into ih_ring_entry
+@@ -55,23 +49,34 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
+ if ((ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
+ ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) &&
+ dev->device_info->asic_family == CHIP_HAWAII) {
++ struct cik_ih_ring_entry *tmp_ihre =
++ (struct cik_ih_ring_entry *)patched_ihre;
++
+ *patched_flag = true;
+ *tmp_ihre = *ihre;
+
+- tmp_ihre->vmid = f2g->read_vmid_from_vmfault_reg(dev->kgd);
+- tmp_ihre->pasid = f2g->get_atc_vmid_pasid_mapping_pasid(
+- dev->kgd, tmp_ihre->vmid);
+- return (tmp_ihre->pasid != 0) &&
+- tmp_ihre->vmid >= dev->vm_info.first_vmid_kfd &&
+- tmp_ihre->vmid <= dev->vm_info.last_vmid_kfd;
++ vmid = f2g->read_vmid_from_vmfault_reg(dev->kgd);
++ pasid = f2g->get_atc_vmid_pasid_mapping_pasid(dev->kgd, vmid);
++
++ tmp_ihre->ring_id &= 0x000000ff;
++ tmp_ihre->ring_id |= vmid << 8;
++ tmp_ihre->ring_id |= pasid << 16;
++
++ return (pasid != 0) &&
++ vmid >= dev->vm_info.first_vmid_kfd &&
++ vmid <= dev->vm_info.last_vmid_kfd;
+ }
++
++ vmid = (ihre->ring_id & 0x0000ff00) >> 8;
++ pasid = (ihre->ring_id & 0xffff0000) >> 16;
++
+ /* Do not process in ISR, just request it to be forwarded to WQ. */
+- return (ihre->pasid != 0) &&
++ return (pasid != 0) &&
+ (ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE ||
+- ihre->source_id == CIK_INTSRC_SDMA_TRAP ||
+- ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG ||
+- ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE ||
+- is_cpc_vm_fault(dev, ih_ring_entry));
++ ihre->source_id == CIK_INTSRC_SDMA_TRAP ||
++ ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG ||
++ ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE ||
++ is_cpc_vm_fault(dev, ihre->source_id, vmid));
+ }
+
+ static void cik_event_interrupt_wq(struct kfd_dev *dev,
+@@ -80,33 +85,35 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev,
+ const struct cik_ih_ring_entry *ihre =
+ (const struct cik_ih_ring_entry *)ih_ring_entry;
+ uint32_t context_id = ihre->data & 0xfffffff;
++ unsigned int vmid = (ihre->ring_id & 0x0000ff00) >> 8;
++ unsigned int pasid = (ihre->ring_id & 0xffff0000) >> 16;
+
+- if (ihre->pasid == 0)
++ if (pasid == 0)
+ return;
+
+ if (ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE)
+- kfd_signal_event_interrupt(ihre->pasid, context_id, 28);
++ kfd_signal_event_interrupt(pasid, context_id, 28);
+ else if (ihre->source_id == CIK_INTSRC_SDMA_TRAP)
+- kfd_signal_event_interrupt(ihre->pasid, context_id, 28);
++ kfd_signal_event_interrupt(pasid, context_id, 28);
+ else if (ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG)
+- kfd_signal_event_interrupt(ihre->pasid, context_id & 0xff, 8);
++ kfd_signal_event_interrupt(pasid, context_id & 0xff, 8);
+ else if (ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE)
+- kfd_signal_hw_exception_event(ihre->pasid);
++ kfd_signal_hw_exception_event(pasid);
+ else if (ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
+ ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) {
+ struct kfd_vm_fault_info info;
+
+- kfd_process_vm_fault(dev->dqm, ihre->pasid);
++ kfd_process_vm_fault(dev->dqm, pasid);
+
+ memset(&info, 0, sizeof(info));
+ dev->kfd2kgd->get_vm_fault_info(dev->kgd, &info);
+ if (!info.page_addr && !info.status)
+ return;
+
+- if (info.vmid == ihre->vmid)
+- kfd_signal_vm_fault_event(dev, ihre->pasid, &info);
++ if (info.vmid == vmid)
++ kfd_signal_vm_fault_event(dev, pasid, &info);
+ else
+- kfd_signal_vm_fault_event(dev, ihre->pasid, NULL);
++ kfd_signal_vm_fault_event(dev, pasid, NULL);
+ }
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/cik_int.h b/drivers/gpu/drm/amd/amdkfd/cik_int.h
+index ff8255d..a2079a0 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cik_int.h
++++ b/drivers/gpu/drm/amd/amdkfd/cik_int.h
+@@ -26,32 +26,19 @@
+ #include <linux/types.h>
+
+ struct cik_ih_ring_entry {
+- uint32_t source_id:8;
+- uint32_t reserved1:8;
+- uint32_t reserved2:16;
+-
+- uint32_t data:28;
+- uint32_t reserved3:4;
+-
+- /* pipeid, meid and unused3 are officially called RINGID,
+- * but for our purposes, they always decode into pipe and ME.
+- */
+- uint32_t pipeid:2;
+- uint32_t meid:2;
+- uint32_t reserved4:4;
+- uint32_t vmid:8;
+- uint32_t pasid:16;
+-
+- uint32_t reserved5;
++ uint32_t source_id;
++ uint32_t data;
++ uint32_t ring_id;
++ uint32_t reserved;
+ };
+
+-#define CIK_INTSRC_DEQUEUE_COMPLETE 0xC6
+ #define CIK_INTSRC_CP_END_OF_PIPE 0xB5
+ #define CIK_INTSRC_CP_BAD_OPCODE 0xB7
++#define CIK_INTSRC_DEQUEUE_COMPLETE 0xC6
++#define CIK_INTSRC_SDMA_TRAP 0xE0
+ #define CIK_INTSRC_SQ_INTERRUPT_MSG 0xEF
+ #define CIK_INTSRC_GFX_PAGE_INV_FAULT 0x92
+ #define CIK_INTSRC_GFX_MEM_PROT_FAULT 0x93
+-#define CIK_INTSRC_SDMA_TRAP 0xE0
+
+ #endif
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5658-drm-amdkfd-Remove-IH-patching-workaround-for-Vega10.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5658-drm-amdkfd-Remove-IH-patching-workaround-for-Vega10.patch
new file mode 100644
index 00000000..205e57e5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5658-drm-amdkfd-Remove-IH-patching-workaround-for-Vega10.patch
@@ -0,0 +1,92 @@
+From 713f0d3793452f4fccf30e32847b20e2c6ba2cf1 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Wed, 25 Apr 2018 17:06:33 -0400
+Subject: [PATCH 5658/5725] drm/amdkfd: Remove IH patching workaround for
+ Vega10
+
+Early CP firmware during bring-up failed to set the pasid in the IH
+ring entries. We had a racy driver workaround at the time. Current
+production firmware no longer requires this hack.
+
+Change-Id: Iccd0a4412918645e0b985be9eb9bb2aaeb486d37
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c | 49 ++++---------------------
+ 1 file changed, 7 insertions(+), 42 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+index 009d6f4..728aaad 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+@@ -25,24 +25,12 @@
+ #include "soc15_int.h"
+
+
+-static uint32_t kfd_get_pasid_from_vmid(struct kfd_dev *dev, uint8_t vmid)
+-{
+- uint32_t pasid = 0;
+- const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
+-
+- if (f2g->get_atc_vmid_pasid_mapping_valid(dev->kgd, vmid))
+- pasid = f2g->get_atc_vmid_pasid_mapping_pasid(dev->kgd, vmid);
+-
+- return pasid;
+-}
+-
+ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
+ const uint32_t *ih_ring_entry,
+ uint32_t *patched_ihre,
+ bool *patched_flag)
+ {
+ uint16_t source_id, client_id, pasid, vmid;
+- bool result = false;
+
+ source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
+ client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
+@@ -59,36 +47,13 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
+ data[4], data[5], data[6], data[7]);
+ }
+
+- if ((vmid >= dev->vm_info.first_vmid_kfd &&
+- vmid <= dev->vm_info.last_vmid_kfd) &&
+- (source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
+- source_id == SOC15_INTSRC_SDMA_TRAP ||
+- source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
+- source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
+- client_id == SOC15_IH_CLIENTID_VMC ||
+- client_id == SOC15_IH_CLIENTID_UTCL2)) {
+-
+- /*
+- * KFD want to handle this INT, but MEC firmware did
+- * not send pasid. Try to get it from vmid mapping
+- * and patch the ih entry. It's a temp workaround.
+- */
+- WARN_ONCE((!pasid), "Fix me.\n");
+- if (!pasid) {
+- uint32_t temp = le32_to_cpu(ih_ring_entry[3]);
+-
+- pasid = kfd_get_pasid_from_vmid(dev, vmid);
+- memcpy(patched_ihre, ih_ring_entry,
+- dev->device_info->ih_ring_entry_size);
+- patched_ihre[3] = cpu_to_le32(temp | pasid);
+- *patched_flag = true;
+- }
+- result = pasid ? true : false;
+- }
+-
+- /* Do not process in ISR, just request it to be forwarded to WQ. */
+- return result;
+-
++ return (pasid != 0) &&
++ (source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
++ source_id == SOC15_INTSRC_SDMA_TRAP ||
++ source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
++ source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
++ client_id == SOC15_IH_CLIENTID_VMC ||
++ client_id == SOC15_IH_CLIENTID_UTCL2);
+ }
+
+ static void event_interrupt_wq_v9(struct kfd_dev *dev,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5659-drm-amdkfd-Clean-up-mmap-handling.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5659-drm-amdkfd-Clean-up-mmap-handling.patch
new file mode 100644
index 00000000..99acdcfa
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5659-drm-amdkfd-Clean-up-mmap-handling.patch
@@ -0,0 +1,142 @@
+From ab152eae9ef649cd118b265e9230d127ce061b2f Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Mon, 23 Apr 2018 22:36:47 -0400
+Subject: [PATCH 5659/5725] drm/amdkfd: Clean up mmap handling
+
+Remove reserved bits in mmap addresses. The mmap offset is no longer
+used for TTM/DRM mappings. So it makes no sense to encode TTM/DRM
+address space limitations.
+
+Centralize encoding and parsing of the GPU ID in the mmap offset.
+
+Cosmetic changes to match upstream.
+
+Change-Id: I5edb10d257006ee915534756d9b0e28381c889ef
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 27 +++++++++++++--------------
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 9 ++++-----
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 10 ++++------
+ 3 files changed, 21 insertions(+), 25 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 0190734..99a29f7 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -2670,34 +2670,33 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+ static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
+ {
+ struct kfd_process *process;
+- struct kfd_dev *kfd;
++ struct kfd_dev *dev = NULL;
+ unsigned long vm_pgoff;
+- unsigned long long mmap_type;
++ unsigned int gpu_id;
+
+ process = kfd_get_process(current);
+ if (IS_ERR(process))
+ return PTR_ERR(process);
+
+ vm_pgoff = vma->vm_pgoff;
+- vma->vm_pgoff = KFD_MMAP_OFFSET_VALUE_GET(vma->vm_pgoff);
+- mmap_type = vm_pgoff & KFD_MMAP_TYPE_MASK;
++ vma->vm_pgoff = KFD_MMAP_OFFSET_VALUE_GET(vm_pgoff);
++ gpu_id = KFD_MMAP_GPU_ID_GET(vm_pgoff);
++ if (gpu_id)
++ dev = kfd_device_by_id(gpu_id);
+
+- switch (mmap_type) {
++ switch (vm_pgoff & KFD_MMAP_TYPE_MASK) {
+ case KFD_MMAP_TYPE_DOORBELL:
+- kfd = kfd_device_by_id(KFD_MMAP_GPU_ID_GET(vm_pgoff));
+- if (!kfd)
+- return -EFAULT;
+- return kfd_doorbell_mmap(kfd, process, vma);
++ if (!dev)
++ return -ENODEV;
++ return kfd_doorbell_mmap(dev, process, vma);
+
+ case KFD_MMAP_TYPE_EVENTS:
+ return kfd_event_mmap(process, vma);
+
+ case KFD_MMAP_TYPE_RESERVED_MEM:
+- return kfd_reserved_mem_mmap(process, vma);
+-
+- default:
+- pr_err("Unsupported kfd mmap type %llx\n", mmap_type);
+- break;
++ if (!dev)
++ return -ENODEV;
++ return kfd_reserved_mem_mmap(dev, process, vma);
+ }
+
+ return -EFAULT;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index bb3b020..e9c64ef 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -49,8 +49,7 @@
+ /* Use upper bits of mmap offset to store KFD driver specific information.
+ * BITS[63:62] - Encode MMAP type
+ * BITS[61:46] - Encode gpu_id. To identify to which GPU the offset belongs to
+- * BITS[45:40] - Reserved. Not Used.
+- * BITS[39:0] - MMAP offset value. Used by TTM.
++ * BITS[45:0] - MMAP offset value
+ *
+ * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these
+ * defines are w.r.t to PAGE_SIZE
+@@ -69,7 +68,7 @@
+ #define KFD_MMAP_GPU_ID_GET(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \
+ >> KFD_MMAP_GPU_ID_SHIFT)
+
+-#define KFD_MMAP_OFFSET_VALUE_MASK (0xFFFFFFFFFFULL >> PAGE_SHIFT)
++#define KFD_MMAP_OFFSET_VALUE_MASK (0x3FFFFFFFFFFFULL >> PAGE_SHIFT)
+ #define KFD_MMAP_OFFSET_VALUE_GET(offset) (offset & KFD_MMAP_OFFSET_VALUE_MASK)
+
+ /*
+@@ -817,8 +816,8 @@ struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
+ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
+ struct kfd_process *p);
+
+-int kfd_reserved_mem_mmap(struct kfd_process *process,
+- struct vm_area_struct *vma);
++int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
++ struct vm_area_struct *vma);
+
+ /* KFD process API for creating and translating handles */
+ int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 78ccac0..7459d39 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -484,7 +484,8 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
+ if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
+ continue;
+
+- offset = (dev->id | KFD_MMAP_TYPE_RESERVED_MEM) << PAGE_SHIFT;
++ offset = (KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id))
++ << PAGE_SHIFT;
+ qpd->tba_addr = (uint64_t)vm_mmap(filep, 0,
+ KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
+ MAP_SHARED, offset);
+@@ -1152,15 +1153,12 @@ static void restore_process_worker(struct work_struct *work)
+ pr_info("Finished restoring process of pasid %d\n", p->pasid);
+ }
+
+-int kfd_reserved_mem_mmap(struct kfd_process *process,
+- struct vm_area_struct *vma)
++int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
++ struct vm_area_struct *vma)
+ {
+- struct kfd_dev *dev = kfd_device_by_id(vma->vm_pgoff);
+ struct kfd_process_device *pdd;
+ struct qcm_process_device *qpd;
+
+- if (!dev)
+- return -EINVAL;
+ if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
+ pr_err("Incorrect CWSR mapping size.\n");
+ return -EINVAL;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5660-drm-amdkfd-fix-uninitialized-variable-use.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5660-drm-amdkfd-fix-uninitialized-variable-use.patch
new file mode 100644
index 00000000..6bdbc5ad
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5660-drm-amdkfd-fix-uninitialized-variable-use.patch
@@ -0,0 +1,42 @@
+From c91b2d182335283fb57db60089656ceabbe9064a Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Thu, 15 Mar 2018 17:49:40 +0100
+Subject: [PATCH 5660/5725] drm/amdkfd: fix uninitialized variable use
+
+When CONFIG_ACPI is disabled, we never initialize the acpi_table
+structure in kfd_create_crat_image_virtual:
+
+drivers/gpu/drm/amd/amdkfd/kfd_crat.c: In function 'kfd_create_crat_image_virtual':
+drivers/gpu/drm/amd/amdkfd/kfd_crat.c:888:40: error: 'acpi_table' may be used uninitialized in this function [-Werror=maybe-uninitialized]
+
+The undefined behavior also happens for any other acpi_get_table()
+failure, but then the compiler can't warn about it.
+
+This adds an error check that prevents the structure from
+being used in error, avoiding both the undefined behavior and
+the warning about it.
+
+Change-Id: Ib90f712f6964f4c94f65f8c3a9153ac51f65504c
+Fixes: 520b8fb755cc ("drm/amdkfd: Add topology support for CPUs")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+index 24d0634..a803898 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+@@ -915,7 +915,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
+
+ #ifdef CONFIG_ACPI
+ status = acpi_get_table("DSDT", 0, &acpi_table);
+- if (status == AE_NOT_FOUND)
++ if (status != AE_OK)
+ pr_warn("DSDT table not found for OEM information\n");
+ else {
+ crat_table->oem_revision = acpi_table->revision;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5661-drm-amdkfd-Fix-kernel-queue-rollback-for-64-bit-wptr.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5661-drm-amdkfd-Fix-kernel-queue-rollback-for-64-bit-wptr.patch
new file mode 100644
index 00000000..a98a21d0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5661-drm-amdkfd-Fix-kernel-queue-rollback-for-64-bit-wptr.patch
@@ -0,0 +1,34 @@
+From 1b679043b6299098208d8d4f08120ab5ed5d2cbf Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Tue, 24 Apr 2018 00:05:49 -0400
+Subject: [PATCH 5661/5725] drm/amdkfd: Fix kernel queue rollback for 64-bit
+ wptr
+
+Change-Id: I41afaa534cf23ba77e522729bc113d258b402b46
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+index 8cf9d44..51b976d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+@@ -315,7 +315,13 @@ static void submit_packet(struct kernel_queue *kq)
+
+ static void rollback_packet(struct kernel_queue *kq)
+ {
+- kq->pending_wptr = *kq->queue->properties.write_ptr;
++ if (kq->dev->device_info->doorbell_size == 8) {
++ kq->pending_wptr64 = *kq->wptr64_kernel;
++ kq->pending_wptr = *kq->wptr_kernel %
++ (kq->queue->properties.queue_size / 4);
++ } else {
++ kq->pending_wptr = *kq->wptr_kernel;
++ }
+ }
+
+ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5662-drm-amdkfd-Match-release_mem-interface-with-other-PM.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5662-drm-amdkfd-Match-release_mem-interface-with-other-PM.patch
new file mode 100644
index 00000000..0bbeeeac
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5662-drm-amdkfd-Match-release_mem-interface-with-other-PM.patch
@@ -0,0 +1,107 @@
+From 470406effaeb6612042eb4584c6ba5fecd55d221 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Tue, 24 Apr 2018 17:59:05 -0400
+Subject: [PATCH 5662/5725] drm/amdkfd: Match release_mem interface with other
+ PM functions
+
+Return an error status instead of the length of the packet. The
+packet size can be read from pmf->release_mem_size. This makes the
+interface consistent with the other packet manager functions.
+
+Change-Id: I1980dc11738b9233aa549044afd5f3c6c564ff60
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 11 +++++++----
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c | 4 ++--
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c | 4 ++--
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 +-
+ 4 files changed, 12 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index e60aaf8..8067092 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -208,16 +208,19 @@ static int allocate_vmid(struct device_queue_manager *dqm,
+ static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
+ struct qcm_process_device *qpd)
+ {
+- uint32_t len;
++ const struct packet_manager_funcs *pmf = qpd->dqm->packets.pmf;
++ int ret;
+
+ if (!qpd->ib_kaddr)
+ return -ENOMEM;
+
+- len = qpd->dqm->packets.pmf->release_mem(qpd->ib_base,
+- (uint32_t *)qpd->ib_kaddr);
++ ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
++ if (ret)
++ return ret;
+
+ return kdev->kfd2kgd->submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
+- qpd->ib_base, (uint32_t *)qpd->ib_kaddr, len);
++ qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
++ pmf->release_mem_size / sizeof(uint32_t));
+ }
+
+ static void deallocate_vmid(struct device_queue_manager *dqm,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+index f311f13..c6d5a33 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+@@ -295,7 +295,7 @@ static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
+ }
+
+
+-static uint32_t pm_release_mem_v9(uint64_t gpu_addr, uint32_t *buffer)
++static int pm_release_mem_v9(uint64_t gpu_addr, uint32_t *buffer)
+ {
+ struct pm4_mec_release_mem *packet;
+
+@@ -320,7 +320,7 @@ static uint32_t pm_release_mem_v9(uint64_t gpu_addr, uint32_t *buffer)
+
+ packet->data_lo = 0;
+
+- return sizeof(struct pm4_mec_release_mem) / sizeof(unsigned int);
++ return 0;
+ }
+
+ static struct packet_manager_funcs kfd_v9_pm_funcs = {
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+index 178c5d0..a1a2e7b 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+@@ -329,7 +329,7 @@ static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
+ }
+
+
+-static uint32_t pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
++static int pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
+ {
+ struct pm4_mec_release_mem *packet;
+
+@@ -355,7 +355,7 @@ static uint32_t pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
+
+ packet->data_lo = 0;
+
+- return sizeof(struct pm4_mec_release_mem) / sizeof(unsigned int);
++ return 0;
+ }
+
+ static struct packet_manager_funcs kfd_vi_pm_funcs = {
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index e9c64ef..52740ae 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -1011,7 +1011,7 @@ struct packet_manager_funcs {
+ unsigned int sdma_engine);
+ int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
+ uint64_t fence_address, uint32_t fence_value);
+- uint32_t (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
++ int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
+
+ /* Packet sizes */
+ int map_process_size;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5663-drm-amdkfd-Simplify-packet-manager-initialization.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5663-drm-amdkfd-Simplify-packet-manager-initialization.patch
new file mode 100644
index 00000000..81745ccd
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5663-drm-amdkfd-Simplify-packet-manager-initialization.patch
@@ -0,0 +1,117 @@
+From ec949130a270007e1ed2ebbd91b40134fbce8352 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Tue, 24 Apr 2018 18:05:50 -0400
+Subject: [PATCH 5663/5725] drm/amdkfd: Simplify packet manager initialization
+
+Assign the function tables directly instead of doing it in a one-line
+function. Also making the tables const while I'm at it.
+
+Change-Id: If03fe1f89fc5badf15f3e9dd356b44601152bd7b
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c | 8 +-------
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c | 7 +------
+ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 4 ++--
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 9 ++++-----
+ 4 files changed, 8 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+index c6d5a33..6724b1a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+@@ -323,7 +323,7 @@ static int pm_release_mem_v9(uint64_t gpu_addr, uint32_t *buffer)
+ return 0;
+ }
+
+-static struct packet_manager_funcs kfd_v9_pm_funcs = {
++const struct packet_manager_funcs kfd_v9_pm_funcs = {
+ .map_process = pm_map_process_v9,
+ .runlist = pm_runlist_v9,
+ .set_resources = pm_set_resources_vi,
+@@ -339,9 +339,3 @@ static struct packet_manager_funcs kfd_v9_pm_funcs = {
+ .query_status_size = sizeof(struct pm4_mes_query_status),
+ .release_mem_size = sizeof(struct pm4_mec_release_mem)
+ };
+-
+-void kfd_pm_func_init_v9(struct packet_manager *pm)
+-{
+- pm->pmf = &kfd_v9_pm_funcs;
+-}
+-
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+index a1a2e7b..357478f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+@@ -358,7 +358,7 @@ static int pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
+ return 0;
+ }
+
+-static struct packet_manager_funcs kfd_vi_pm_funcs = {
++const struct packet_manager_funcs kfd_vi_pm_funcs = {
+ .map_process = pm_map_process_vi,
+ .runlist = pm_runlist_vi,
+ .set_resources = pm_set_resources_vi,
+@@ -374,8 +374,3 @@ static struct packet_manager_funcs kfd_vi_pm_funcs = {
+ .query_status_size = sizeof(struct pm4_mes_query_status),
+ .release_mem_size = sizeof(struct pm4_mec_release_mem)
+ };
+-
+-void kfd_pm_func_init_vi(struct packet_manager *pm)
+-{
+- pm->pmf = &kfd_vi_pm_funcs;
+-}
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+index 699352b..bc6e854 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+@@ -237,11 +237,11 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
+ case CHIP_FIJI:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+- kfd_pm_func_init_vi(pm);
++ pm->pmf = &kfd_vi_pm_funcs;
+ break;
+ case CHIP_VEGA10:
+ case CHIP_RAVEN:
+- kfd_pm_func_init_v9(pm);
++ pm->pmf = &kfd_v9_pm_funcs;
+ break;
+ default:
+ BUG();
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 52740ae..8513abf 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -991,7 +991,7 @@ struct packet_manager {
+ struct kfd_mem_obj *ib_buffer_obj;
+ unsigned int ib_size_bytes;
+
+- struct packet_manager_funcs *pmf;
++ const struct packet_manager_funcs *pmf;
+ };
+
+ struct packet_manager_funcs {
+@@ -1023,6 +1023,9 @@ struct packet_manager_funcs {
+ int release_mem_size;
+ };
+
++extern const struct packet_manager_funcs kfd_vi_pm_funcs;
++extern const struct packet_manager_funcs kfd_v9_pm_funcs;
++
+ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
+ void pm_uninit(struct packet_manager *pm);
+ int pm_send_set_resources(struct packet_manager *pm,
+@@ -1043,10 +1046,6 @@ unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
+ int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
+ struct scheduling_resources *res);
+
+-void kfd_pm_func_init_vi(struct packet_manager *pm);
+-void kfd_pm_func_init_v9(struct packet_manager *pm);
+-
+-
+ uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
+
+ /* Events */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5664-drm-amdkfd-Fix-error-handling-in-pm_init.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5664-drm-amdkfd-Fix-error-handling-in-pm_init.patch
new file mode 100644
index 00000000..8a71793e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5664-drm-amdkfd-Fix-error-handling-in-pm_init.patch
@@ -0,0 +1,61 @@
+From 7c12ed786fd6619c009a95ea5e9601805ec5d380 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Wed, 25 Apr 2018 17:50:18 -0400
+Subject: [PATCH 5664/5725] drm/amdkfd: Fix error handling in pm_init
+
+Avoid BUG_ON. To avoid the need for cleaning up the kernel queue in
+case of an error, do the pm->pmf initialization first.
+
+Change-Id: I8260eacbff5101205aeab26d28a6f106eff5b00b
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 24 +++++++++++++-----------
+ 1 file changed, 13 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+index bc6e854..c6f3218 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+@@ -219,16 +219,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
+
+ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
+ {
+- pm->dqm = dqm;
+- mutex_init(&pm->lock);
+- pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
+- if (!pm->priv_queue) {
+- mutex_destroy(&pm->lock);
+- return -ENOMEM;
+- }
+- pm->allocated = false;
+-
+- switch (pm->dqm->dev->device_info->asic_family) {
++ switch (dqm->dev->device_info->asic_family) {
+ case CHIP_KAVERI:
+ case CHIP_HAWAII:
+ /* PM4 packet structures on CIK are the same as on VI */
+@@ -244,9 +235,20 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
+ pm->pmf = &kfd_v9_pm_funcs;
+ break;
+ default:
+- BUG();
++ WARN(1, "Unexpected ASIC family %u",
++ dqm->dev->device_info->asic_family);
++ return -EINVAL;
+ }
+
++ pm->dqm = dqm;
++ mutex_init(&pm->lock);
++ pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
++ if (!pm->priv_queue) {
++ mutex_destroy(&pm->lock);
++ return -ENOMEM;
++ }
++ pm->allocated = false;
++
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5665-drm-amdkfd-Fix-pm_debugfs_runlist.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5665-drm-amdkfd-Fix-pm_debugfs_runlist.patch
new file mode 100644
index 00000000..3a06de62
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5665-drm-amdkfd-Fix-pm_debugfs_runlist.patch
@@ -0,0 +1,49 @@
+From d3e8bb3cdfdc8d79fd2bad788dbfd4de0dec09c6 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Wed, 25 Apr 2018 17:53:13 -0400
+Subject: [PATCH 5665/5725] drm/amdkfd: Fix pm_debugfs_runlist
+
+Guard it with #if defined(CONFIG_DEBUG_FS) and take the pm->lock while
+dumping the runlist IB.
+
+Change-Id: If52078d2003c34b44e8f19996c0263d6211dea13
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+index c6f3218..cd380ad 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+@@ -400,17 +400,25 @@ void pm_release_ib(struct packet_manager *pm)
+ mutex_unlock(&pm->lock);
+ }
+
++#if defined(CONFIG_DEBUG_FS)
++
+ int pm_debugfs_runlist(struct seq_file *m, void *data)
+ {
+ struct packet_manager *pm = data;
+
++ mutex_lock(&pm->lock);
++
+ if (!pm->allocated) {
+ seq_puts(m, " No active runlist\n");
+- return 0;
++ goto out;
+ }
+
+ seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
+ pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false);
+
++out:
++ mutex_unlock(&pm->lock);
+ return 0;
+ }
++
++#endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5666-drm-amdkfd-Check-ctx_save_restore_area_address.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5666-drm-amdkfd-Check-ctx_save_restore_area_address.patch
new file mode 100644
index 00000000..20cdd0d4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5666-drm-amdkfd-Check-ctx_save_restore_area_address.patch
@@ -0,0 +1,63 @@
+From b71eb443cca193b451af611cf2951d1ec925abfd Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Wed, 25 Apr 2018 17:34:07 -0400
+Subject: [PATCH 5666/5725] drm/amdkfd: Check ctx_save_restore_area_address
+
+Only program cp_hqd_ctx_save_control if the save restore area has a
+valid virtual address. Otherwise save restore can not be safely
+enabled for a queue.
+
+Change-Id: Ibcf19713068c5733988f8a4472755d56d2e72d8b
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 4 ++--
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+index 5118995..f4eced5 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+@@ -159,7 +159,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
+ (1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT);
+ }
+
+- if (mm->dev->cwsr_enabled) {
++ if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) {
+ m->cp_hqd_persistent_state |=
+ (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
+ m->cp_hqd_ctx_save_base_addr_lo =
+@@ -249,7 +249,7 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
+ if (priv_cp_queues)
+ m->cp_hqd_pq_control |=
+ 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT;
+- if (mm->dev->cwsr_enabled)
++ if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address)
+ m->cp_hqd_ctx_save_control = 0;
+
+ update_cu_mask(mm, mqd, q);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+index eff7580..c537f37 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+@@ -159,7 +159,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
+ (1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT);
+ }
+
+- if (mm->dev->cwsr_enabled) {
++ if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) {
+ m->cp_hqd_persistent_state |=
+ (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
+ m->cp_hqd_ctx_save_base_addr_lo =
+@@ -254,7 +254,7 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
+ if (priv_cp_queues)
+ m->cp_hqd_pq_control |=
+ 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT;
+- if (mm->dev->cwsr_enabled)
++ if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address)
+ m->cp_hqd_ctx_save_control =
+ atc_bit << CP_HQD_CTX_SAVE_CONTROL__ATC__SHIFT |
+ mtype << CP_HQD_CTX_SAVE_CONTROL__MTYPE__SHIFT;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5667-drm-amdkfd-Fix-error-handling-around-kfd_process_cre.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5667-drm-amdkfd-Fix-error-handling-around-kfd_process_cre.patch
new file mode 100644
index 00000000..f0cadd3a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5667-drm-amdkfd-Fix-error-handling-around-kfd_process_cre.patch
@@ -0,0 +1,64 @@
+From f8a6cd4103937c915212ef74e85167d09b3c513c Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Tue, 24 Apr 2018 18:26:51 -0400
+Subject: [PATCH 5667/5725] drm/amdkfd: Fix error handling around
+ kfd_process_create_wq
+
+Change-Id: Ic4c2b210db0cd248e82916f7f4b04b6c2071ed69
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_module.c | 5 +++--
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 7 ++++++-
+ 2 files changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+index a05f734..261657f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+@@ -102,7 +102,6 @@ MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (defau
+
+ static int amdkfd_init_completed;
+
+-
+ int kgd2kfd_init(unsigned int interface_version,
+ const struct kgd2kfd_calls **g2f)
+ {
+@@ -155,7 +154,7 @@ static int __init kfd_module_init(void)
+
+ err = kfd_ipc_init();
+ if (err < 0)
+- goto err_topology;
++ goto err_ipc;
+
+ err = kfd_process_create_wq();
+ if (err < 0)
+@@ -172,6 +171,8 @@ static int __init kfd_module_init(void)
+ return 0;
+
+ err_create_wq:
++err_ipc:
++ kfd_topology_shutdown();
+ err_topology:
+ kfd_chardev_exit();
+ err_ioctl:
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 7459d39..4f2f285 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -80,7 +80,12 @@ int kfd_process_create_wq(void)
+ if (!kfd_restore_wq)
+ kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
+
+- return kfd_process_wq && kfd_restore_wq ? 0 : -ENOMEM;
++ if (!kfd_process_wq || !kfd_restore_wq) {
++ kfd_process_destroy_wq();
++ return -ENOMEM;
++ }
++
++ return 0;
+ }
+
+ void kfd_process_destroy_wq(void)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5668-drm-amdkfd-Fix-error-handling-in-APU-CWSR-mapping.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5668-drm-amdkfd-Fix-error-handling-in-APU-CWSR-mapping.patch
new file mode 100644
index 00000000..e1bdfedd
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5668-drm-amdkfd-Fix-error-handling-in-APU-CWSR-mapping.patch
@@ -0,0 +1,34 @@
+From a165a7269599f8b0cab75796ba2b83951591c075 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Wed, 25 Apr 2018 18:21:26 -0400
+Subject: [PATCH 5668/5725] drm/amdkfd: Fix error handling in APU CWSR mapping
+
+Change-Id: Id808e7e2161be85ae771440d7fbcff087ba6154b
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 4f2f285..b80018e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -496,11 +496,12 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
+ MAP_SHARED, offset);
+
+ if (IS_ERR_VALUE(qpd->tba_addr)) {
+- pr_err("Failure to set tba address. error -%d.\n",
+- (int)qpd->tba_addr);
++ int err = qpd->tba_addr;
++
++ pr_err("Failure to set tba address. error %d.\n", err);
+ qpd->tba_addr = 0;
+ qpd->cwsr_kaddr = NULL;
+- return -ENOMEM;
++ return err;
+ }
+
+ memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5669-drm-amdkfd-Simplify-error-handling-in-kfd_create_pro.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5669-drm-amdkfd-Simplify-error-handling-in-kfd_create_pro.patch
new file mode 100644
index 00000000..ebb8ce6a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5669-drm-amdkfd-Simplify-error-handling-in-kfd_create_pro.patch
@@ -0,0 +1,54 @@
+From 7d50d2ad1878b9a7ff483323ff6c77ab7a732bb8 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Wed, 25 Apr 2018 18:23:39 -0400
+Subject: [PATCH 5669/5725] drm/amdkfd: Simplify error handling in
+ kfd_create_process_device_data
+
+Call init_doorbell_bitmap early to avoid excessive cleanup on failure.
+
+Change-Id: I59667a6313b0fb8192761a3287461f0a3d438928
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 17 ++++++-----------
+ 1 file changed, 6 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index b80018e..13fd54a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -677,6 +677,12 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
+ if (!pdd)
+ return NULL;
+
++ if (init_doorbell_bitmap(&pdd->qpd, dev)) {
++ pr_err("Failed to init doorbell for process\n");
++ kfree(pdd);
++ return NULL;
++ }
++
+ pdd->dev = dev;
+ INIT_LIST_HEAD(&pdd->qpd.queues_list);
+ INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
+@@ -690,19 +696,8 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
+
+ /* Init idr used for memory handle translation */
+ idr_init(&pdd->alloc_idr);
+- if (init_doorbell_bitmap(&pdd->qpd, dev)) {
+- pr_err("Failed to init doorbell for process\n");
+- goto err_create_pdd;
+- }
+
+ return pdd;
+-
+-err_create_pdd:
+- kfree(pdd->qpd.doorbell_bitmap);
+- idr_destroy(&pdd->alloc_idr);
+- list_del(&pdd->per_device_list);
+- kfree(pdd);
+- return NULL;
+ }
+
+ /**
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5670-drm-amdkfd-Simplify-obj-handle-allocation.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5670-drm-amdkfd-Simplify-obj-handle-allocation.patch
new file mode 100644
index 00000000..17c107b7
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5670-drm-amdkfd-Simplify-obj-handle-allocation.patch
@@ -0,0 +1,51 @@
+From 7f516b0ee139edb309654861d744e063a33c9331 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Thu, 26 Apr 2018 15:11:48 -0400
+Subject: [PATCH 5670/5725] drm/amdkfd: Simplify obj handle allocation
+
+MIN and MAX_IDR_IDs aren't necessary because the entire ID range is
+fine for this purpose, including 0.
+
+Don't use the idr_preload functionality. This is meant for situations
+where an ID allocation is done in a place where it cannot fail. Then
+idr_preload can be done in a place where it's still OK to fail. Here
+both are in the same place, so it's not necessary to preload the memory
+allocation.
+
+Change-Id: I484657cd43904b546a5c605b766955925068ff99
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 10 +---------
+ 1 file changed, 1 insertion(+), 9 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 13fd54a..182bf1c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -60,9 +60,6 @@ static struct workqueue_struct *kfd_process_wq;
+ */
+ static struct workqueue_struct *kfd_restore_wq;
+
+-#define MIN_IDR_ID 1
+-#define MAX_IDR_ID 0 /*0 - for unlimited*/
+-
+ static struct kfd_process *find_process(const struct task_struct *thread,
+ bool ref);
+ static void kfd_process_ref_release(struct kref *ref);
+@@ -843,12 +840,7 @@ int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
+
+ INIT_LIST_HEAD(&buf_obj->cb_data_head);
+
+- idr_preload(GFP_KERNEL);
+-
+- handle = idr_alloc(&pdd->alloc_idr, buf_obj, MIN_IDR_ID, MAX_IDR_ID,
+- GFP_NOWAIT);
+-
+- idr_preload_end();
++ handle = idr_alloc(&pdd->alloc_idr, buf_obj, 0, 0, GFP_KERNEL);
+
+ if (handle < 0)
+ kfree(buf_obj);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5671-drm-amdkfd-Error-if-trying-to-acquire-VM-for-a-PDD-t.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5671-drm-amdkfd-Error-if-trying-to-acquire-VM-for-a-PDD-t.patch
new file mode 100644
index 00000000..e642e7d2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5671-drm-amdkfd-Error-if-trying-to-acquire-VM-for-a-PDD-t.patch
@@ -0,0 +1,37 @@
+From ddaefc39a8a975b84b43dad9875fc7ce4ce3f208 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Thu, 26 Apr 2018 15:22:46 -0400
+Subject: [PATCH 5671/5725] drm/amdkfd: Error if trying to acquire VM for a PDD
+ twice
+
+Return an error in kfd_process_device_init_vm is an attempt is made
+to acquire a VM for a PDD that already has a VM. This could happen
+if kfd_ioctl_acquire_vm is called multiple times for the same device
+and process, or if it is called too late, after the process has
+already been bound to the device by another ioctl.
+
+Returning an error here can help detect potential future problems in
+user mode code instead of silently masking them.
+
+Change-Id: I55e46e2654e4d761ae4b43c194bd9a7f1dd3eefa
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 182bf1c..3ecaad5 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -719,7 +719,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
+ int ret;
+
+ if (pdd->vm)
+- return 0;
++ return drm_file ? -EBUSY : 0;
+
+ p = pdd->process;
+ dev = pdd->dev;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5672-drm-amdkfd-Cosmetic-changes-to-match-upstream.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5672-drm-amdkfd-Cosmetic-changes-to-match-upstream.patch
new file mode 100644
index 00000000..c589b8e0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5672-drm-amdkfd-Cosmetic-changes-to-match-upstream.patch
@@ -0,0 +1,1825 @@
+From 1e7b9f1f1656fb95a3003a21ba45f30e46b69e0d Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Wed, 9 Jan 2019 21:27:13 +0530
+Subject: [PATCH 5672/5725] drm/amdkfd: Cosmetic changes to match upstream
+
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 15 ++-
+ drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 57 ++++++---
+ drivers/gpu/drm/amd/amdkfd/kfd_crat.h | 48 ++++----
+ drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 69 ++++++-----
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 24 ++--
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 9 +-
+ .../drm/amd/amdkfd/kfd_device_queue_manager_v9.c | 4 +-
+ .../drm/amd/amdkfd/kfd_device_queue_manager_vi.c | 29 ++---
+ drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c | 15 +--
+ drivers/gpu/drm/amd/amdkfd/kfd_events.c | 7 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | 14 +--
+ drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_iommu.c | 3 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c | 2 -
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c | 5 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c | 97 ++++++++-------
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 11 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 1 -
+ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 13 +--
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 40 +++----
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 130 ++++++++++-----------
+ .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 4 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 58 ++++-----
+ drivers/gpu/drm/amd/amdkfd/kfd_topology.h | 9 +-
+ drivers/gpu/drm/amd/amdkfd/soc15_int.h | 2 +-
+ 26 files changed, 334 insertions(+), 336 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 99a29f7..73338f0 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -24,6 +24,7 @@
+ #include <linux/export.h>
+ #include <linux/err.h>
+ #include <linux/fs.h>
++#include <linux/file.h>
+ #include <linux/sched.h>
+ #include <linux/sched/mm.h>
+ #include <linux/slab.h>
+@@ -45,7 +46,6 @@
+ static long kfd_ioctl(struct file *, unsigned int, unsigned long);
+ static int kfd_open(struct inode *, struct file *);
+ static int kfd_mmap(struct file *, struct vm_area_struct *);
+-static bool kfd_dev_is_large_bar(struct kfd_dev *dev);
+
+ static const char kfd_dev_name[] = "kfd";
+
+@@ -903,7 +903,7 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
+ mutex_lock(&p->mutex);
+
+ if (!kfd_has_process_device_data(p))
+- goto out_upwrite;
++ goto out_unlock;
+
+ /* Run over all pdd of the process */
+ pdd = kfd_get_first_process_device_data(p);
+@@ -912,7 +912,7 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
+ pdd = kfd_get_next_process_device_data(p, pdd);
+ } while (pdd);
+
+- goto out_upwrite;
++ goto out_unlock;
+ }
+
+ /* Fill in process-aperture information for all available
+@@ -929,7 +929,7 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
+ if (!kfd_has_process_device_data(p)) {
+ args->num_of_nodes = 0;
+ kfree(pa);
+- goto out_upwrite;
++ goto out_unlock;
+ }
+
+ /* Run over all pdd of the process */
+@@ -971,7 +971,7 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
+ kfree(pa);
+ return ret ? -EFAULT : 0;
+
+-out_upwrite:
++out_unlock:
+ mutex_unlock(&p->mutex);
+ return 0;
+ }
+@@ -1322,8 +1322,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ return 0;
+
+ err_free:
+- dev->kfd2kgd->free_memory_of_gpu(dev->kgd,
+- (struct kgd_mem *) mem);
++ dev->kfd2kgd->free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+ err_unlock:
+ mutex_unlock(&p->mutex);
+ return err;
+@@ -1364,7 +1363,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
+ /* If freeing the buffer failed, leave the handle in place for
+ * clean-up during process tear-down.
+ */
+- if (ret == 0)
++ if (!ret)
+ kfd_process_device_remove_obj_handle(
+ pdd, GET_IDR_HANDLE(args->handle));
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+index a803898..6688882 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+@@ -1,7 +1,27 @@
+-#include <linux/kernel.h>
+-#include <linux/acpi.h>
+-#include <linux/mm.h>
++/*
++ * Copyright 2015-2017 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
+ #include <linux/pci.h>
++#include <linux/acpi.h>
+ #include "kfd_crat.h"
+ #include "kfd_priv.h"
+ #include "kfd_topology.h"
+@@ -266,6 +286,7 @@ static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache,
+
+ id = cache->processor_id_low;
+
++ pr_debug("Found cache entry in CRAT table with processor_id=%d\n", id);
+ list_for_each_entry(dev, device_list, list) {
+ total_num_of_cu = (dev->node_props.array_count *
+ dev->node_props.cu_per_simd_array);
+@@ -415,11 +436,15 @@ static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr,
+ ret = kfd_parse_subtype_cache(cache, device_list);
+ break;
+ case CRAT_SUBTYPE_TLB_AFFINITY:
+- /* For now, nothing to do here */
++ /*
++ * For now, nothing to do here
++ */
+ pr_debug("Found TLB entry in CRAT table (not processing)\n");
+ break;
+ case CRAT_SUBTYPE_CCOMPUTE_AFFINITY:
+- /* For now, nothing to do here */
++ /*
++ * For now, nothing to do here
++ */
+ pr_debug("Found CCOMPUTE entry in CRAT table (not processing)\n");
+ break;
+ case CRAT_SUBTYPE_IOLINK_AFFINITY:
+@@ -444,9 +469,8 @@ static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr,
+ *
+ * Return - 0 if successful else -ve value
+ */
+-int kfd_parse_crat_table(void *crat_image,
+- struct list_head *device_list,
+- uint32_t proximity_domain)
++int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
++ uint32_t proximity_domain)
+ {
+ struct kfd_topology_device *top_dev = NULL;
+ struct crat_subtype_generic *sub_type_hdr;
+@@ -693,7 +717,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
+ * crat_image will be NULL
+ * @size: [OUT] size of crat_image
+ *
+- * Return 0 if successful else return -ve value
++ * Return 0 if successful else return error code
+ */
+ #ifdef CONFIG_ACPI
+ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
+@@ -725,10 +749,8 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
+ }
+
+ pcrat_image = kmalloc(crat_table->length, GFP_KERNEL);
+- if (!pcrat_image) {
+- pr_err("No memory for allocating CRAT image\n");
++ if (!pcrat_image)
+ return -ENOMEM;
+- }
+
+ memcpy(pcrat_image, crat_table, crat_table->length);
+
+@@ -1072,8 +1094,8 @@ static int kfd_fill_gpu_direct_io_link(int *avail_size,
+ * [OUT] actual size of data filled in crat_image
+ */
+ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
+- size_t *size, struct kfd_dev *kdev,
+- uint32_t proximity_domain)
++ size_t *size, struct kfd_dev *kdev,
++ uint32_t proximity_domain)
+ {
+ struct crat_header *crat_table = (struct crat_header *)pcrat_image;
+ struct crat_subtype_generic *sub_type_hdr;
+@@ -1241,7 +1263,8 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
+ * Return 0 if successful else return -ve value
+ */
+ int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
+- int flags, struct kfd_dev *kdev, uint32_t proximity_domain)
++ int flags, struct kfd_dev *kdev,
++ uint32_t proximity_domain)
+ {
+ void *pcrat_image = NULL;
+ int ret = 0;
+@@ -1271,8 +1294,8 @@ int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
+ if (!pcrat_image)
+ return -ENOMEM;
+ *size = VCRAT_SIZE_FOR_GPU;
+- ret = kfd_create_vcrat_image_gpu(pcrat_image, size,
+- kdev, proximity_domain);
++ ret = kfd_create_vcrat_image_gpu(pcrat_image, size, kdev,
++ proximity_domain);
+ break;
+ case (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU):
+ /* TODO: */
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+index 00de41f..cd7ee6d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+@@ -24,7 +24,6 @@
+ #define KFD_CRAT_H_INCLUDED
+
+ #include <linux/types.h>
+-#include "kfd_priv.h"
+
+ #pragma pack(1)
+
+@@ -228,12 +227,12 @@ struct crat_subtype_ccompute {
+ /*
+ * HSA IO Link Affinity structure and definitions
+ */
+-#define CRAT_IOLINK_FLAGS_ENABLED (1 << 0)
+-#define CRAT_IOLINK_FLAGS_NON_COHERENT (1 << 1)
+-#define CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT (1 << 2)
+-#define CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT (1 << 3)
+-#define CRAT_IOLINK_FLAGS_NO_PEER_TO_PEER_DMA (1 << 4)
+-#define CRAT_IOLINK_FLAGS_RESERVED_MASK 0xffffffe0
++#define CRAT_IOLINK_FLAGS_ENABLED (1 << 0)
++#define CRAT_IOLINK_FLAGS_NON_COHERENT (1 << 1)
++#define CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT (1 << 2)
++#define CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT (1 << 3)
++#define CRAT_IOLINK_FLAGS_NO_PEER_TO_PEER_DMA (1 << 4)
++#define CRAT_IOLINK_FLAGS_RESERVED_MASK 0xffffffe0
+
+ /*
+ * IO interface types
+@@ -241,18 +240,18 @@ struct crat_subtype_ccompute {
+ #define CRAT_IOLINK_TYPE_UNDEFINED 0
+ #define CRAT_IOLINK_TYPE_HYPERTRANSPORT 1
+ #define CRAT_IOLINK_TYPE_PCIEXPRESS 2
+-#define CRAT_IOLINK_TYPE_AMBA 3
+-#define CRAT_IOLINK_TYPE_MIPI 4
+-#define CRAT_IOLINK_TYPE_QPI_1_1 5
+-#define CRAT_IOLINK_TYPE_RESERVED1 6
+-#define CRAT_IOLINK_TYPE_RESERVED2 7
+-#define CRAT_IOLINK_TYPE_RAPID_IO 8
+-#define CRAT_IOLINK_TYPE_INFINIBAND 9
+-#define CRAT_IOLINK_TYPE_RESERVED3 10
+-#define CRAT_IOLINK_TYPE_OTHER 11
+-#define CRAT_IOLINK_TYPE_MAX 255
+-
+-#define CRAT_IOLINK_RESERVED_LENGTH 24
++#define CRAT_IOLINK_TYPE_AMBA 3
++#define CRAT_IOLINK_TYPE_MIPI 4
++#define CRAT_IOLINK_TYPE_QPI_1_1 5
++#define CRAT_IOLINK_TYPE_RESERVED1 6
++#define CRAT_IOLINK_TYPE_RESERVED2 7
++#define CRAT_IOLINK_TYPE_RAPID_IO 8
++#define CRAT_IOLINK_TYPE_INFINIBAND 9
++#define CRAT_IOLINK_TYPE_RESERVED3 10
++#define CRAT_IOLINK_TYPE_OTHER 11
++#define CRAT_IOLINK_TYPE_MAX 255
++
++#define CRAT_IOLINK_RESERVED_LENGTH 24
+
+ struct crat_subtype_iolink {
+ uint8_t type;
+@@ -308,13 +307,16 @@ struct cdit_header {
+
+ #pragma pack()
+
++struct kfd_dev;
++
+ #ifdef CONFIG_ACPI
+ int kfd_create_crat_image_acpi(void **crat_image, size_t *size);
+ #endif
+ void kfd_destroy_crat_image(void *crat_image);
+-int kfd_parse_crat_table(void *crat_image,
+- struct list_head *device_list,
+- uint32_t proximity_domain);
++int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
++ uint32_t proximity_domain);
+ int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
+- int flags, struct kfd_dev *kdev, uint32_t proximity_domain);
++ int flags, struct kfd_dev *kdev,
++ uint32_t proximity_domain);
++
+ #endif /* KFD_CRAT_H_INCLUDED */
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+index 232e28f..4bd6ebf 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2014 Advanced Micro Devices, Inc.
++ * Copyright 2016-2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index bc5c642..9ce20da 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -240,6 +240,7 @@ static const struct kfd_device_info vega10_vf_device_info = {
+ .num_sdma_engines = 2,
+ };
+
++
+ struct kfd_deviceid {
+ unsigned short did;
+ const struct kfd_device_info *device_info;
+@@ -288,35 +289,35 @@ static const struct kfd_deviceid supported_devices[] = {
+ { 0x67B9, &hawaii_device_info }, /* Hawaii */
+ { 0x67BA, &hawaii_device_info }, /* Hawaii */
+ { 0x67BE, &hawaii_device_info }, /* Hawaii */
+- { 0x6920, &tonga_device_info }, /* Tonga */
+- { 0x6921, &tonga_device_info }, /* Tonga */
+- { 0x6928, &tonga_device_info }, /* Tonga */
+- { 0x6929, &tonga_device_info }, /* Tonga */
+- { 0x692B, &tonga_device_info }, /* Tonga */
+- { 0x692F, &tonga_vf_device_info }, /* Tonga vf */
+- { 0x6938, &tonga_device_info }, /* Tonga */
+- { 0x6939, &tonga_device_info }, /* Tonga */
+- { 0x7300, &fiji_device_info }, /* Fiji */
+- { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/
+- { 0x67C0, &polaris10_device_info }, /* Polaris10 */
+- { 0x67C1, &polaris10_device_info }, /* Polaris10 */
+- { 0x67C2, &polaris10_device_info }, /* Polaris10 */
++ { 0x6920, &tonga_device_info }, /* Tonga */
++ { 0x6921, &tonga_device_info }, /* Tonga */
++ { 0x6928, &tonga_device_info }, /* Tonga */
++ { 0x6929, &tonga_device_info }, /* Tonga */
++ { 0x692B, &tonga_device_info }, /* Tonga */
++ { 0x692F, &tonga_vf_device_info }, /* Tonga vf */
++ { 0x6938, &tonga_device_info }, /* Tonga */
++ { 0x6939, &tonga_device_info }, /* Tonga */
++ { 0x7300, &fiji_device_info }, /* Fiji */
++ { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/
++ { 0x67C0, &polaris10_device_info }, /* Polaris10 */
++ { 0x67C1, &polaris10_device_info }, /* Polaris10 */
++ { 0x67C2, &polaris10_device_info }, /* Polaris10 */
+ { 0x67C4, &polaris10_device_info }, /* Polaris10 */
+ { 0x67C7, &polaris10_device_info }, /* Polaris10 */
+- { 0x67C8, &polaris10_device_info }, /* Polaris10 */
+- { 0x67C9, &polaris10_device_info }, /* Polaris10 */
+- { 0x67CA, &polaris10_device_info }, /* Polaris10 */
+- { 0x67CC, &polaris10_device_info }, /* Polaris10 */
+- { 0x67CF, &polaris10_device_info }, /* Polaris10 */
+- { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/
++ { 0x67C8, &polaris10_device_info }, /* Polaris10 */
++ { 0x67C9, &polaris10_device_info }, /* Polaris10 */
++ { 0x67CA, &polaris10_device_info }, /* Polaris10 */
++ { 0x67CC, &polaris10_device_info }, /* Polaris10 */
++ { 0x67CF, &polaris10_device_info }, /* Polaris10 */
++ { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/
+ { 0x67DF, &polaris10_device_info }, /* Polaris10 */
+- { 0x67E0, &polaris11_device_info }, /* Polaris11 */
+- { 0x67E1, &polaris11_device_info }, /* Polaris11 */
++ { 0x67E0, &polaris11_device_info }, /* Polaris11 */
++ { 0x67E1, &polaris11_device_info }, /* Polaris11 */
+ { 0x67E3, &polaris11_device_info }, /* Polaris11 */
+- { 0x67E7, &polaris11_device_info }, /* Polaris11 */
+- { 0x67E8, &polaris11_device_info }, /* Polaris11 */
+- { 0x67E9, &polaris11_device_info }, /* Polaris11 */
+- { 0x67EB, &polaris11_device_info }, /* Polaris11 */
++ { 0x67E7, &polaris11_device_info }, /* Polaris11 */
++ { 0x67E8, &polaris11_device_info }, /* Polaris11 */
++ { 0x67E9, &polaris11_device_info }, /* Polaris11 */
++ { 0x67EB, &polaris11_device_info }, /* Polaris11 */
+ { 0x67EF, &polaris11_device_info }, /* Polaris11 */
+ { 0x67FF, &polaris11_device_info }, /* Polaris11 */
+ { 0x6860, &vega10_device_info }, /* Vega10 */
+@@ -366,11 +367,10 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ return NULL;
+ }
+
+- if (device_info->needs_pci_atomics) {
+- /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
+- * 32 and 64-bit requests are possible and must be
+- * supported.
+- */
++ /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
++ * 32 and 64-bit requests are possible and must be
++ * supported.
++ */
+ ret = pci_enable_atomic_ops_to_root(pdev,
+ PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
+ PCI_EXP_DEVCAP2_ATOMIC_COMP64);
+@@ -379,7 +379,6 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ "skipped device %x:%x, PCI rejects atomics",
+ pdev->vendor, pdev->device);
+ return NULL;
+- }
+ }
+
+ kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
+@@ -427,7 +426,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ KGD_ENGINE_SDMA1);
+ kfd->shared_resources = *gpu_resources;
+
+- /* Usually first_vmid_kfd = 8, last_vmid_kfd = 15 */
+ kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
+ kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
+ kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
+@@ -670,10 +668,11 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
+
+ spin_lock(&kfd->interrupt_lock);
+
+- if (kfd->interrupts_active && interrupt_is_wanted(kfd, ih_ring_entry,
+- patched_ihre, &is_patched)
++ if (kfd->interrupts_active
++ && interrupt_is_wanted(kfd, ih_ring_entry,
++ patched_ihre, &is_patched)
+ && enqueue_ih_ring_entry(kfd,
+- is_patched ? patched_ihre : ih_ring_entry))
++ is_patched ? patched_ihre : ih_ring_entry))
+ queue_work(kfd->ih_wq, &kfd->interrupt_work);
+
+ spin_unlock(&kfd->interrupt_lock);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 8067092..d7822e2 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -21,10 +21,11 @@
+ *
+ */
+
++#include <linux/ratelimit.h>
++#include <linux/printk.h>
+ #include <linux/slab.h>
+ #include <linux/list.h>
+ #include <linux/types.h>
+-#include <linux/printk.h>
+ #include <linux/bitops.h>
+ #include <linux/sched.h>
+ #include "kfd_priv.h"
+@@ -199,7 +200,7 @@ static int allocate_vmid(struct device_queue_manager *dqm,
+ dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd,
+ qpd->vmid,
+ qpd->page_table_base);
+- /*invalidate the VM context after pasid and vmid mapping is set up*/
++ /* invalidate the VM context after pasid and vmid mapping is set up */
+ kfd_flush_tlb(qpd_to_pdd(qpd));
+
+ return 0;
+@@ -289,7 +290,6 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
+ if (retval) {
+ if (list_empty(&qpd->queues_list))
+ deallocate_vmid(dqm, qpd, q);
+-
+ goto out_unlock;
+ }
+
+@@ -482,11 +482,9 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ int retval;
+ struct mqd_manager *mqd;
+ struct kfd_process_device *pdd;
+-
+ bool prev_active = false;
+
+ mutex_lock(&dqm->lock);
+-
+ pdd = kfd_get_process_device_data(q->device, q->process);
+ if (!pdd) {
+ retval = -ENODEV;
+@@ -502,7 +500,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ * Eviction state logic: we only mark active queues as evicted
+ * to avoid the overhead of restoring inactive queues later
+ */
+- if (pdd->qpd.evicted > 0)
++ if (pdd->qpd.evicted)
+ q->properties.is_evicted = (q->properties.queue_size > 0 &&
+ q->properties.queue_percent > 0 &&
+ q->properties.queue_address != 0);
+@@ -762,9 +760,9 @@ static int register_process(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+ {
+ struct device_process_node *n;
+- int retval;
+ struct kfd_process_device *pdd;
+ uint32_t pd_base;
++ int retval;
+
+ n = kzalloc(sizeof(*n), GFP_KERNEL);
+ if (!n)
+@@ -781,7 +779,6 @@ static int register_process(struct device_queue_manager *dqm,
+
+ /* Update PD Base in QPD */
+ qpd->page_table_base = pd_base;
+- pr_debug("Updated PD address to 0x%08x\n", pd_base);
+
+ retval = dqm->asic_ops.update_qpd(dqm, qpd);
+
+@@ -1076,9 +1073,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
+ static int stop_cpsch(struct device_queue_manager *dqm)
+ {
+ mutex_lock(&dqm->lock);
+-
+ unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+-
+ mutex_unlock(&dqm->lock);
+
+ kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
+@@ -1633,7 +1628,6 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
+
+ out:
+ mutex_unlock(&dqm->lock);
+-
+ return retval;
+ }
+
+@@ -1648,7 +1642,13 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
+ return NULL;
+
+ switch (dev->device_info->asic_family) {
++ /* HWS is not available on Hawaii. */
+ case CHIP_HAWAII:
++ /* HWS depends on CWSR for timely dequeue. CWSR is not
++ * available on Tonga.
++ *
++ * FIXME: This argument also applies to Kaveri.
++ */
+ case CHIP_TONGA:
+ dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
+ break;
+@@ -1728,7 +1728,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
+
+ case CHIP_VEGA10:
+ case CHIP_RAVEN:
+- device_queue_manager_init_v9_vega10(&dqm->asic_ops);
++ device_queue_manager_init_v9(&dqm->asic_ops);
+ break;
+ default:
+ WARN(1, "Unexpected ASIC family %u",
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+index 3f17e5e..82fafd0 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+@@ -209,7 +209,7 @@ void device_queue_manager_init_vi(
+ struct device_queue_manager_asic_ops *asic_ops);
+ void device_queue_manager_init_vi_tonga(
+ struct device_queue_manager_asic_ops *asic_ops);
+-void device_queue_manager_init_v9_vega10(
++void device_queue_manager_init_v9(
+ struct device_queue_manager_asic_ops *asic_ops);
+ void program_sh_mem_settings(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd);
+@@ -218,18 +218,11 @@ unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
+ unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
+ unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
+
+-int process_evict_queues(struct device_queue_manager *dqm,
+- struct qcm_process_device *qpd);
+-int process_restore_queues(struct device_queue_manager *dqm,
+- struct qcm_process_device *qpd);
+-
+-
+ static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
+ {
+ return (pdd->lds_base >> 16) & 0xFF;
+ }
+
+-/* This function is only useful for GFXv7 and v8 */
+ static inline unsigned int
+ get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
+ {
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+index cc27190..4175153 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2016 Advanced Micro Devices, Inc.
++ * Copyright 2016-2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+@@ -32,7 +32,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
+ static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q,
+ struct qcm_process_device *qpd);
+
+-void device_queue_manager_init_v9_vega10(
++void device_queue_manager_init_v9(
+ struct device_queue_manager_asic_ops *asic_ops)
+ {
+ asic_ops->update_qpd = update_qpd_v9;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+index 030b014..fd60a11 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+@@ -33,35 +33,22 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size);
+-static int update_qpd_vi(struct device_queue_manager *dqm,
+- struct qcm_process_device *qpd);
+-static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
+- struct qcm_process_device *qpd);
+-
+-/*
+- * Tonga device queue manager functions
+- */
+ static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size);
++static int update_qpd_vi(struct device_queue_manager *dqm,
++ struct qcm_process_device *qpd);
+ static int update_qpd_vi_tonga(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd);
++static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
++ struct qcm_process_device *qpd);
+ static void init_sdma_vm_tonga(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd);
+
+-void device_queue_manager_init_vi_tonga(
+- struct device_queue_manager_asic_ops *asic_ops)
+-{
+- asic_ops->set_cache_memory_policy = set_cache_memory_policy_vi_tonga;
+- asic_ops->update_qpd = update_qpd_vi_tonga;
+- asic_ops->init_sdma_vm = init_sdma_vm_tonga;
+-}
+-
+-
+ void device_queue_manager_init_vi(
+ struct device_queue_manager_asic_ops *asic_ops)
+ {
+@@ -70,6 +57,14 @@ void device_queue_manager_init_vi(
+ asic_ops->init_sdma_vm = init_sdma_vm;
+ }
+
++void device_queue_manager_init_vi_tonga(
++ struct device_queue_manager_asic_ops *asic_ops)
++{
++ asic_ops->set_cache_memory_policy = set_cache_memory_policy_vi_tonga;
++ asic_ops->update_qpd = update_qpd_vi_tonga;
++ asic_ops->init_sdma_vm = init_sdma_vm_tonga;
++}
++
+ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
+ {
+ /* In 64-bit mode, we can only control the top 3 bits of the LDS,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+index fc41689..c3744d8 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+@@ -115,7 +115,7 @@ int kfd_doorbell_init(struct kfd_dev *kfd)
+ pr_debug("doorbell aperture size == 0x%08lX\n",
+ kfd->shared_resources.doorbell_aperture_size);
+
+- pr_debug("doorbell kernel address == 0x%p\n", kfd->doorbell_kernel_ptr);
++ pr_debug("doorbell kernel address == %p\n", kfd->doorbell_kernel_ptr);
+
+ return 0;
+ }
+@@ -189,7 +189,7 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
+
+ pr_debug("Get kernel queue doorbell\n"
+ " doorbell offset == 0x%08X\n"
+- " kernel address == 0x%p\n",
++ " kernel address == %p\n",
+ *doorbell_off, (kfd->doorbell_kernel_ptr + inx));
+
+ return kfd->doorbell_kernel_ptr + inx;
+@@ -210,7 +210,7 @@ void write_kernel_doorbell(void __iomem *db, u32 value)
+ {
+ if (db) {
+ writel(value, db);
+- pr_debug("Writing %d to doorbell address 0x%p\n", value, db);
++ pr_debug("Writing %d to doorbell address %p\n", value, db);
+ }
+ }
+
+@@ -220,14 +220,10 @@ void write_kernel_doorbell64(void __iomem *db, u64 value)
+ WARN(((unsigned long)db & 7) != 0,
+ "Unaligned 64-bit doorbell");
+ writeq(value, (u64 __iomem *)db);
+- pr_debug("writing %llu to doorbell address 0x%p\n", value, db);
++ pr_debug("writing %llu to doorbell address %p\n", value, db);
+ }
+ }
+
+-/*
+- * queue_ids are in the range [0,MAX_PROCESS_QUEUES) and are mapped 1:1
+- * to doorbells with the process's doorbell page
+- */
+ unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd,
+ struct kfd_process *process,
+ unsigned int doorbell_id)
+@@ -239,7 +235,8 @@ unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd,
+ * units regardless of the ASIC-dependent doorbell size.
+ */
+ return kfd->doorbell_id_offset +
+- process->doorbell_index * (kfd_doorbell_process_slice(kfd)/sizeof(u32)) +
++ process->doorbell_index
++ * kfd_doorbell_process_slice(kfd) / sizeof(u32) +
+ doorbell_id * kfd->device_info->doorbell_size / sizeof(u32);
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+index 24d8a21..1dc1584 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+@@ -390,7 +390,11 @@ static void set_event(struct kfd_event *ev)
+ {
+ struct kfd_event_waiter *waiter;
+
+- /* Auto reset if the list is non-empty and we're waking someone. */
++ /* Auto reset if the list is non-empty and we're waking
++ * someone. waitqueue_active is safe here because we're
++ * protected by the p->event_mutex, which is also held when
++ * updating the wait queues in kfd_wait_on_events.
++ */
+ ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq);
+
+ list_for_each_entry(waiter, &ev->wq.head, wait.entry)
+@@ -777,7 +781,6 @@ int kfd_wait_on_events(struct kfd_process *p,
+
+ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
+ {
+-
+ unsigned long pfn;
+ struct kfd_signal_page *page;
+ int ret;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+index df81e59..f7de732 100755
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+@@ -289,7 +289,6 @@
+
+ #define MAKE_LDS_APP_BASE_VI() \
+ (((uint64_t)(0x1UL) << 61) + 0x0)
+-
+ #define MAKE_LDS_APP_LIMIT(base) \
+ (((uint64_t)(base) & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF)
+
+@@ -313,7 +312,7 @@
+ #define SVM_CWSR_BASE (SVM_USER_BASE - KFD_CWSR_TBA_TMA_SIZE)
+ #define SVM_IB_BASE (SVM_CWSR_BASE - PAGE_SIZE)
+
+-void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
++static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
+ {
+ /*
+ * node id couldn't be 0 - the three MSB bits of
+@@ -343,7 +342,7 @@ void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
+ pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
+ }
+
+-void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id)
++static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id)
+ {
+ pdd->lds_base = MAKE_LDS_APP_BASE_V9();
+ pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
+@@ -378,10 +377,10 @@ int kfd_init_apertures(struct kfd_process *process)
+ pdd = kfd_create_process_device_data(dev, process);
+ if (!pdd) {
+ pr_err("Failed to create process device data\n");
+- return -1;
++ return -ENOMEM;
+ }
+ /*
+- * For 64 bit process aperture will be statically reserved in
++ * For 64 bit process apertures will be statically reserved in
+ * the x86_64 non canonical process address space
+ * amdkfd doesn't currently support apertures for 32 bit process
+ */
+@@ -405,8 +404,9 @@ int kfd_init_apertures(struct kfd_process *process)
+ kfd_init_apertures_v9(pdd, id);
+ break;
+ default:
+- pr_err("Unknown chip in kfd_init_apertures\n");
+- return -1;
++ WARN(1, "Unexpected ASIC family %u",
++ dev->device_info->asic_family);
++ return -EINVAL;
+ }
+
+ if (!dev->device_info->needs_iommu_device) {
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+index 728aaad..5217e51 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2016 Advanced Micro Devices, Inc.
++ * Copyright 2016-2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+index 5b798f9..7a61f38 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+@@ -75,7 +75,8 @@ int kfd_iommu_device_init(struct kfd_dev *kfd)
+ }
+
+ if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) {
+- dev_err(kfd_device, "error required iommu flags ats %i, pri %i, pasid %i\n",
++ dev_err(kfd_device,
++ "error required iommu flags ats %i, pri %i, pasid %i\n",
+ (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0,
+ (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0,
+ (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
+index b48c29f..19e54ac 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
+@@ -22,8 +22,6 @@
+ */
+
+ #include "kfd_kernel_queue.h"
+-#include "kfd_pm4_headers.h"
+-#include "kfd_pm4_opcodes.h"
+
+ static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev,
+ enum kfd_queue_type type, unsigned int queue_size);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+index 6724b1a..684a3bf 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2016 Advanced Micro Devices, Inc.
++ * Copyright 2016-2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+@@ -44,7 +44,7 @@ static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev,
+ int retval;
+
+ retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
+- if (retval != 0)
++ if (retval)
+ return false;
+
+ kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
+@@ -126,7 +126,6 @@ static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer,
+ concurrent_proc_cnt = min(pm->dqm->processes_count,
+ kfd->max_proc_per_quantum);
+
+-
+ packet = (struct pm4_mes_runlist *)buffer;
+
+ memset(buffer, 0, sizeof(struct pm4_mes_runlist));
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+index 357478f..bf20c6d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+@@ -67,12 +67,25 @@ static void submit_packet_vi(struct kernel_queue *kq)
+ kq->pending_wptr);
+ }
+
+-static int pm_map_process_vi(struct packet_manager *pm,
+- uint32_t *buffer, struct qcm_process_device *qpd)
++unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size)
++{
++ union PM4_MES_TYPE_3_HEADER header;
++
++ header.u32All = 0;
++ header.opcode = opcode;
++ header.count = packet_size / 4 - 2;
++ header.type = PM4_TYPE_3;
++
++ return header.u32All;
++}
++
++static int pm_map_process_vi(struct packet_manager *pm, uint32_t *buffer,
++ struct qcm_process_device *qpd)
+ {
+ struct pm4_mes_map_process *packet;
+
+ packet = (struct pm4_mes_map_process *)buffer;
++
+ memset(buffer, 0, sizeof(struct pm4_mes_map_process));
+
+ packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
+@@ -99,27 +112,16 @@ static int pm_map_process_vi(struct packet_manager *pm,
+ return 0;
+ }
+
+-
+-unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size)
+-{
+- union PM4_MES_TYPE_3_HEADER header;
+-
+- header.u32All = 0;
+- header.opcode = opcode;
+- header.count = packet_size / 4 - 2;
+- header.type = PM4_TYPE_3;
+-
+- return header.u32All;
+-}
+-
+ static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
+ uint64_t ib, size_t ib_size_in_dwords, bool chain)
+ {
+ struct pm4_mes_runlist *packet;
+-
+ int concurrent_proc_cnt = 0;
+ struct kfd_dev *kfd = pm->dqm->dev;
+
++ if (WARN_ON(!ib))
++ return -EFAULT;
++
+ /* Determine the number of processes to map together to HW:
+ * it can not exceed the number of VMIDs available to the
+ * scheduler, and it is determined by the smaller of the number
+@@ -132,7 +134,6 @@ static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
+ concurrent_proc_cnt = min(pm->dqm->processes_count,
+ kfd->max_proc_per_quantum);
+
+-
+ packet = (struct pm4_mes_runlist *)buffer;
+
+ memset(buffer, 0, sizeof(struct pm4_mes_runlist));
+@@ -150,6 +151,34 @@ static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
+ return 0;
+ }
+
++int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
++ struct scheduling_resources *res)
++{
++ struct pm4_mes_set_resources *packet;
++
++ packet = (struct pm4_mes_set_resources *)buffer;
++ memset(buffer, 0, sizeof(struct pm4_mes_set_resources));
++
++ packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES,
++ sizeof(struct pm4_mes_set_resources));
++
++ packet->bitfields2.queue_type =
++ queue_type__mes_set_resources__hsa_interface_queue_hiq;
++ packet->bitfields2.vmid_mask = res->vmid_mask;
++ packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
++ packet->bitfields7.oac_mask = res->oac_mask;
++ packet->bitfields8.gds_heap_base = res->gds_heap_base;
++ packet->bitfields8.gds_heap_size = res->gds_heap_size;
++
++ packet->gws_mask_lo = lower_32_bits(res->gws_mask);
++ packet->gws_mask_hi = upper_32_bits(res->gws_mask);
++
++ packet->queue_mask_lo = lower_32_bits(res->queue_mask);
++ packet->queue_mask_hi = upper_32_bits(res->queue_mask);
++
++ return 0;
++}
++
+ static int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+ struct queue *q, bool is_static)
+ {
+@@ -209,34 +238,6 @@ static int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+ return 0;
+ }
+
+-int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
+- struct scheduling_resources *res)
+-{
+- struct pm4_mes_set_resources *packet;
+-
+- packet = (struct pm4_mes_set_resources *)buffer;
+- memset(buffer, 0, sizeof(struct pm4_mes_set_resources));
+-
+- packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES,
+- sizeof(struct pm4_mes_set_resources));
+-
+- packet->bitfields2.queue_type =
+- queue_type__mes_set_resources__hsa_interface_queue_hiq;
+- packet->bitfields2.vmid_mask = res->vmid_mask;
+- packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
+- packet->bitfields7.oac_mask = res->oac_mask;
+- packet->bitfields8.gds_heap_base = res->gds_heap_base;
+- packet->bitfields8.gds_heap_size = res->gds_heap_size;
+-
+- packet->gws_mask_lo = lower_32_bits(res->gws_mask);
+- packet->gws_mask_hi = upper_32_bits(res->gws_mask);
+-
+- packet->queue_mask_lo = lower_32_bits(res->queue_mask);
+- packet->queue_mask_hi = upper_32_bits(res->queue_mask);
+-
+- return 0;
+-}
+-
+ static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+ enum kfd_queue_type type,
+ enum kfd_unmap_queues_filter filter,
+@@ -310,7 +311,6 @@ static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
+ packet = (struct pm4_mes_query_status *)buffer;
+ memset(buffer, 0, sizeof(struct pm4_mes_query_status));
+
+-
+ packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS,
+ sizeof(struct pm4_mes_query_status));
+
+@@ -328,16 +328,15 @@ static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
+ return 0;
+ }
+
+-
+ static int pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
+ {
+ struct pm4_mec_release_mem *packet;
+
+ packet = (struct pm4_mec_release_mem *)buffer;
+- memset(buffer, 0, sizeof(struct pm4_mec_release_mem));
++ memset(buffer, 0, sizeof(*packet));
+
+ packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM,
+- sizeof(struct pm4_mec_release_mem));
++ sizeof(*packet));
+
+ packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
+ packet->bitfields2.event_index = event_index___release_mem__end_of_pipe;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+index f4eced5..58ea1fe 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2016 Advanced Micro Devices, Inc.
++ * Copyright 2016-2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+@@ -217,8 +217,9 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
+ pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
+ m->cp_hqd_pq_doorbell_control);
+
+- m->cp_hqd_ib_control = 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT |
+- 1 << CP_HQD_IB_CONTROL__IB_EXE_DISABLE__SHIFT;
++ m->cp_hqd_ib_control =
++ 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT |
++ 1 << CP_HQD_IB_CONTROL__IB_EXE_DISABLE__SHIFT;
+
+ /*
+ * HW does not clamp this field correctly. Maximum EOP queue size
+@@ -243,8 +244,8 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
+ 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT |
+ 1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT |
+ 1 << CP_HQD_PQ_CONTROL__WPP_CLAMP_EN__SHIFT;
+- m->cp_hqd_pq_doorbell_control |=
+- 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
++ m->cp_hqd_pq_doorbell_control |= 1 <<
++ CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
+ }
+ if (priv_cp_queues)
+ m->cp_hqd_pq_control |=
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+index c537f37..e3ae2d4 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+@@ -544,4 +544,3 @@ struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type,
+ mqd->update_mqd = update_mqd_tonga;
+ return mqd;
+ }
+-
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+index cd380ad..c317feb4 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+@@ -26,7 +26,6 @@
+ #include "kfd_device_queue_manager.h"
+ #include "kfd_kernel_queue.h"
+ #include "kfd_priv.h"
+-#include "kfd_pm4_opcodes.h"
+
+ static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
+ unsigned int buffer_size_bytes)
+@@ -45,8 +44,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
+ unsigned int process_count, queue_count, compute_queue_count;
+ unsigned int map_queue_size;
+ unsigned int max_proc_per_quantum = 1;
+-
+- struct kfd_dev *dev = pm->dqm->dev;
++ struct kfd_dev *dev = pm->dqm->dev;
+
+ process_count = pm->dqm->processes_count;
+ queue_count = pm->dqm->queue_count;
+@@ -57,14 +55,13 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
+ * hws_max_conc_proc has been done in
+ * kgd2kfd_device_init().
+ */
+-
+ *over_subscription = false;
+
+ if (dev->max_proc_per_quantum > 1)
+ max_proc_per_quantum = dev->max_proc_per_quantum;
+
+ if ((process_count > max_proc_per_quantum) ||
+- compute_queue_count > get_queues_num(pm->dqm)) {
++ compute_queue_count > get_queues_num(pm->dqm)) {
+ *over_subscription = true;
+ pr_debug("Over subscribed runlist\n");
+ }
+@@ -193,6 +190,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
+ &rl_buffer[rl_wptr],
+ q,
+ qpd->is_debug);
++
+ if (retval)
+ return retval;
+
+@@ -301,8 +299,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
+
+ pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
+
+- packet_size_dwords = pm->pmf->runlist_size /
+- sizeof(uint32_t);
++ packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
+ mutex_lock(&pm->lock);
+
+ retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+@@ -311,7 +308,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
+ goto fail_acquire_packet_buffer;
+
+ retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr,
+- rl_ib_size / sizeof(uint32_t), false);
++ rl_ib_size / sizeof(uint32_t), false);
+ if (retval)
+ goto fail_create_runlist;
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 8513abf..969dac2 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -30,13 +30,13 @@
+ #include <linux/atomic.h>
+ #include <linux/workqueue.h>
+ #include <linux/spinlock.h>
+-#include <linux/idr.h>
+ #include <linux/kfd_ioctl.h>
+-#include <linux/pid.h>
+-#include <linux/interval_tree.h>
++#include <linux/idr.h>
+ #include <linux/seq_file.h>
+ #include <linux/kref.h>
+ #include <linux/kfifo.h>
++#include <linux/pid.h>
++#include <linux/interval_tree.h>
+ #include <kgd_kfd_interface.h>
+
+ #include "amd_shared.h"
+@@ -81,7 +81,6 @@
+ #define KFD_CIK_HIQ_PIPE 4
+ #define KFD_CIK_HIQ_QUEUE 0
+
+-
+ /* Macro for allocating structures */
+ #define kfd_alloc_struct(ptr_to_struct) \
+ ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
+@@ -114,14 +113,14 @@ extern int max_num_of_queues_per_device;
+ /* Kernel module parameter to specify the scheduling policy */
+ extern int sched_policy;
+
+-extern int cwsr_enable;
+-
+ /*
+ * Kernel module parameter to specify the maximum process
+ * number per HW scheduler
+ */
+ extern int hws_max_conc_proc;
+
++extern int cwsr_enable;
++
+ /*
+ * Kernel module parameter to specify whether to send sigterm to HSA process on
+ * unhandled exception
+@@ -442,7 +441,11 @@ enum KFD_QUEUE_PRIORITY {
+ * @is_interop: Defines if this is a interop queue. Interop queue means that
+ * the queue can access both graphics and compute resources.
+ *
+- * @is_active: Defines if the queue is active or not.
++ * @is_evicted: Defines if the queue is evicted. Only active queues
++ * are evicted, rendering them inactive.
++ *
++ * @is_active: Defines if the queue is active or not. @is_active and
++ * @is_evicted are protected by the DQM lock.
+ *
+ * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
+ * of the queue.
+@@ -464,7 +467,7 @@ struct queue_properties {
+ void __iomem *doorbell_ptr;
+ uint32_t doorbell_off;
+ bool is_interop;
+- bool is_evicted; /* true -> queue is evicted */
++ bool is_evicted;
+ bool is_active;
+ /* Not relevant for user mode queues in cp scheduling */
+ unsigned int vmid;
+@@ -583,7 +586,6 @@ struct qcm_process_device {
+ struct list_head priv_queue_list;
+
+ unsigned int queue_count;
+- /* a data field only meaningful for non-HWS case */
+ unsigned int vmid;
+ bool is_debug;
+ unsigned int evicted; /* eviction counter, 0=active */
+@@ -614,11 +616,11 @@ struct qcm_process_device {
+ uint64_t tma_addr;
+
+ /* IB memory */
+- uint64_t ib_base; /* ib_base+ib_size must be below cwsr_base */
++ uint64_t ib_base;
+ void *ib_kaddr;
+
+ /*doorbell resources per process per device*/
+- unsigned long *doorbell_bitmap;
++ unsigned long *doorbell_bitmap;
+ };
+
+ /* KFD Memory Eviction */
+@@ -757,7 +759,7 @@ struct kfd_process {
+ struct rb_root_cached bo_interval_tree;
+
+ /* Information used for memory eviction */
+- void *process_info;
++ void *kgd_process_info;
+ /* Eviction fence that is attached to all the BOs of this process. The
+ * fence will be triggered during eviction and new one will be created
+ * during restore
+@@ -800,7 +802,7 @@ struct amdkfd_ioctl_desc {
+ int kfd_process_create_wq(void);
+ void kfd_process_destroy_wq(void);
+ struct kfd_process *kfd_create_process(struct file *filep);
+-struct kfd_process *kfd_get_process(const struct task_struct *task);
++struct kfd_process *kfd_get_process(const struct task_struct *);
+ struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid);
+ struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm);
+ void kfd_unref_process(struct kfd_process *p);
+@@ -810,7 +812,7 @@ int kfd_resume_all_processes(void);
+ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
+ struct file *drm_file);
+ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
+- struct kfd_process *p);
++ struct kfd_process *p);
+ struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
+ struct kfd_process *p);
+ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
+@@ -858,7 +860,7 @@ void kfd_pasid_free(unsigned int pasid);
+ size_t kfd_doorbell_process_slice(struct kfd_dev *kfd);
+ int kfd_doorbell_init(struct kfd_dev *kfd);
+ void kfd_doorbell_fini(struct kfd_dev *kfd);
+-int kfd_doorbell_mmap(struct kfd_dev *kfd, struct kfd_process *process,
++int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
+ struct vm_area_struct *vma);
+ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
+ unsigned int *doorbell_off);
+@@ -981,8 +983,6 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
+ #define KFD_FENCE_COMPLETED (100)
+ #define KFD_FENCE_INIT (10)
+
+-struct packet_manager_func;
+-
+ struct packet_manager {
+ struct device_queue_manager *dqm;
+ struct kernel_queue *priv_queue;
+@@ -995,7 +995,7 @@ struct packet_manager {
+ };
+
+ struct packet_manager_funcs {
+- /* Support different firmware versions for PM4 packets */
++ /* Support ASIC-specific packet formats for PM4 packets */
+ int (*map_process)(struct packet_manager *pm, uint32_t *buffer,
+ struct qcm_process_device *qpd);
+ int (*runlist)(struct packet_manager *pm, uint32_t *buffer,
+@@ -1041,7 +1041,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
+
+ void pm_release_ib(struct packet_manager *pm);
+
+-/* Following PM funcs can be shared among CIK and VI */
++/* Following PM funcs can be shared among VI and AI */
+ unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
+ int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
+ struct scheduling_resources *res);
+@@ -1088,8 +1088,6 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
+
+ bool kfd_is_locked(void);
+
+-#define KFD_SCRATCH_KV_FW_VER 413
+-
+ /* PeerDirect support */
+ void kfd_init_peer_direct(void);
+ void kfd_close_peer_direct(void);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 3ecaad5..e79479b 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -30,6 +30,7 @@
+ #include <linux/notifier.h>
+ #include <linux/compat.h>
+ #include <linux/mman.h>
++#include <linux/file.h>
+ #include <asm/page.h>
+ #include "kfd_ipc.h"
+
+@@ -184,8 +185,8 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
+ /* kfd_process_device_reserve_ib_mem - Reserve memory inside the
+ * process for IB usage The memory reserved is for KFD to submit
+ * IB to AMDGPU from kernel. If the memory is reserved
+- * successfully, ib_kaddr_assigned will have the CPU/kernel
+- * address. Check ib_kaddr_assigned before accessing the memory.
++ * successfully, ib_kaddr will have the CPU/kernel
++ * address. Check ib_kaddr before accessing the memory.
+ */
+ static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
+ {
+@@ -214,7 +215,6 @@ static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
+ struct kfd_process *kfd_create_process(struct file *filep)
+ {
+ struct kfd_process *process;
+-
+ struct task_struct *thread = current;
+
+ if (!thread->mm)
+@@ -350,7 +350,9 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
+
+ list_for_each_entry_safe(pdd, temp, &p->per_device_data,
+ per_device_list) {
+- /* Destroy the GPUVM VM context */
++ pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n",
++ pdd->dev->id, p->pasid);
++
+ if (pdd->drm_file)
+ fput(pdd->drm_file);
+ else if (pdd->vm)
+@@ -403,9 +405,6 @@ static void kfd_process_ref_release(struct kref *ref)
+ {
+ struct kfd_process *p = container_of(ref, struct kfd_process, ref);
+
+- if (WARN_ON(!kfd_process_wq))
+- return;
+-
+ INIT_WORK(&p->release_work, kfd_process_wq_release);
+ queue_work(kfd_process_wq, &p->release_work);
+ }
+@@ -488,9 +487,9 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
+
+ offset = (KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id))
+ << PAGE_SHIFT;
+- qpd->tba_addr = (uint64_t)vm_mmap(filep, 0,
+- KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
+- MAP_SHARED, offset);
++ qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
++ KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
++ MAP_SHARED, offset);
+
+ if (IS_ERR_VALUE(qpd->tba_addr)) {
+ int err = qpd->tba_addr;
+@@ -726,10 +725,11 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
+
+ if (drm_file)
+ ret = dev->kfd2kgd->acquire_process_vm(
+- dev->kgd, drm_file, &pdd->vm, &p->process_info, &p->ef);
++ dev->kgd, drm_file,
++ &pdd->vm, &p->kgd_process_info, &p->ef);
+ else
+ ret = dev->kfd2kgd->create_process_vm(
+- dev->kgd, &pdd->vm, &p->process_info, &p->ef);
++ dev->kgd, &pdd->vm, &p->kgd_process_info, &p->ef);
+ if (ret) {
+ pr_err("Failed to create process VM object\n");
+ return ret;
+@@ -943,42 +943,6 @@ struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
+ return ret_p;
+ }
+
+-void kfd_suspend_all_processes(void)
+-{
+- struct kfd_process *p;
+- unsigned int temp;
+- int idx = srcu_read_lock(&kfd_processes_srcu);
+-
+- hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+- cancel_delayed_work_sync(&p->eviction_work);
+- cancel_delayed_work_sync(&p->restore_work);
+-
+- if (kfd_process_evict_queues(p))
+- pr_err("Failed to suspend process %d\n", p->pasid);
+- dma_fence_signal(p->ef);
+- dma_fence_put(p->ef);
+- p->ef = NULL;
+- }
+- srcu_read_unlock(&kfd_processes_srcu, idx);
+-}
+-
+-int kfd_resume_all_processes(void)
+-{
+- struct kfd_process *p;
+- unsigned int temp;
+- int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
+-
+- hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+- if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
+- pr_err("Restore process %d failed during resume\n",
+- p->pasid);
+- ret = -EFAULT;
+- }
+- }
+- srcu_read_unlock(&kfd_processes_srcu, idx);
+- return ret;
+-}
+-
+ /* This increments the process->ref counter. */
+ struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
+ {
+@@ -1070,15 +1034,14 @@ static void evict_process_worker(struct work_struct *work)
+ "Eviction fence mismatch\n");
+
+ /* Narrow window of overlap between restore and evict work
+- * item is possible. Once
+- * amdgpu_amdkfd_gpuvm_restore_process_bos unreserves KFD BOs,
+- * it is possible to evicted again. But restore has few more
+- * steps of finish. So lets wait for any previous restore work
+- * to complete
++ * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
++ * unreserves KFD BOs, it is possible to evicted again. But
++ * restore has few more steps of finish. So lets wait for any
++ * previous restore work to complete
+ */
+ flush_delayed_work(&p->restore_work);
+
+- pr_info("Started evicting process of pasid %d\n", p->pasid);
++ pr_info("Started evicting pasid %d\n", p->pasid);
+ ret = kfd_process_evict_queues(p);
+ if (!ret) {
+ dma_fence_signal(p->ef);
+@@ -1087,10 +1050,9 @@ static void evict_process_worker(struct work_struct *work)
+ queue_delayed_work(kfd_restore_wq, &p->restore_work,
+ msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
+
+- pr_info("Finished evicting process of pasid %d\n", p->pasid);
++ pr_info("Finished evicting pasid %d\n", p->pasid);
+ } else
+- pr_err("Failed to quiesce user queues. Cannot evict pasid %d\n",
+- p->pasid);
++ pr_err("Failed to evict queues of pasid %d\n", p->pasid);
+ }
+
+ static void restore_process_worker(struct work_struct *work)
+@@ -1116,7 +1078,7 @@ static void restore_process_worker(struct work_struct *work)
+ struct kfd_process_device,
+ per_device_list);
+
+- pr_info("Started restoring process of pasid %d\n", p->pasid);
++ pr_info("Started restoring pasid %d\n", p->pasid);
+
+ /* Setting last_restore_timestamp before successful restoration.
+ * Otherwise this would have to be set by KGD (restore_process_bos)
+@@ -1129,10 +1091,11 @@ static void restore_process_worker(struct work_struct *work)
+ */
+
+ p->last_restore_timestamp = get_jiffies_64();
+- ret = pdd->dev->kfd2kgd->restore_process_bos(p->process_info, &p->ef);
++ ret = pdd->dev->kfd2kgd->restore_process_bos(p->kgd_process_info,
++ &p->ef);
+ if (ret) {
+- pr_info("Restore failed, try again after %d ms\n",
+- PROCESS_BACK_OFF_TIME_MS);
++ pr_info("Failed to restore BOs of pasid %d, retry after %d ms\n",
++ p->pasid, PROCESS_BACK_OFF_TIME_MS);
+ ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
+ msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
+ WARN(!ret, "reschedule restore work failed\n");
+@@ -1140,10 +1103,46 @@ static void restore_process_worker(struct work_struct *work)
+ }
+
+ ret = kfd_process_restore_queues(p);
+- if (ret)
+- pr_err("Failed to resume user queues\n");
++ if (!ret)
++ pr_info("Finished restoring pasid %d\n", p->pasid);
++ else
++ pr_err("Failed to restore queues of pasid %d\n", p->pasid);
++}
++
++void kfd_suspend_all_processes(void)
++{
++ struct kfd_process *p;
++ unsigned int temp;
++ int idx = srcu_read_lock(&kfd_processes_srcu);
+
+- pr_info("Finished restoring process of pasid %d\n", p->pasid);
++ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
++ cancel_delayed_work_sync(&p->eviction_work);
++ cancel_delayed_work_sync(&p->restore_work);
++
++ if (kfd_process_evict_queues(p))
++ pr_err("Failed to suspend process %d\n", p->pasid);
++ dma_fence_signal(p->ef);
++ dma_fence_put(p->ef);
++ p->ef = NULL;
++ }
++ srcu_read_unlock(&kfd_processes_srcu, idx);
++}
++
++int kfd_resume_all_processes(void)
++{
++ struct kfd_process *p;
++ unsigned int temp;
++ int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
++
++ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
++ if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
++ pr_err("Restore process %d failed during resume\n",
++ p->pasid);
++ ret = -EFAULT;
++ }
++ }
++ srcu_read_unlock(&kfd_processes_srcu, idx);
++ return ret;
+ }
+
+ int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
+@@ -1177,7 +1176,6 @@ int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
+ KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
+ }
+
+-
+ void kfd_flush_tlb(struct kfd_process_device *pdd)
+ {
+ struct kfd_dev *dev = pdd->dev;
+@@ -1212,7 +1210,7 @@ int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
+ r = pqm_debugfs_mqds(m, &p->pqm);
+ mutex_unlock(&p->mutex);
+
+- if (r != 0)
++ if (r)
+ break;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index 52882e0..fbaca3b 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -188,7 +188,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
+ case KFD_QUEUE_TYPE_SDMA:
+ if (dev->dqm->sdma_queue_count
+ >= get_num_sdma_queues(dev->dqm)) {
+- pr_debug("Over-subscription is not allowed for SDMA\n");
++ pr_debug("Over-subscription is not allowed for SDMA.\n");
+ retval = -EPERM;
+ goto err_create_queue;
+ }
+@@ -206,7 +206,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
+ case KFD_QUEUE_TYPE_COMPUTE:
+ /* check if there is over subscription */
+ if ((dev->dqm->sched_policy ==
+- KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
++ KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
+ ((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
+ (dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
+ pr_debug("Over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+index 320c8d3..82cff10 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -196,6 +196,7 @@ struct kfd_topology_device *kfd_create_topology_device(
+ return dev;
+ }
+
++
+ #define sysfs_show_gen_prop(buffer, fmt, ...) \
+ snprintf(buffer, PAGE_SIZE, "%s"fmt, buffer, __VA_ARGS__)
+ #define sysfs_show_32bit_prop(buffer, name, value) \
+@@ -739,7 +740,7 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
+ }
+
+ /* All hardware blocks have the same number of attributes. */
+- num_attrs = sizeof(perf_attr_iommu)/sizeof(struct kfd_perf_attr);
++ num_attrs = ARRAY_SIZE(perf_attr_iommu);
+ list_for_each_entry(perf, &dev->perf_props, list) {
+ perf->attr_group = kzalloc(sizeof(struct kfd_perf_attr)
+ * num_attrs + sizeof(struct attribute_group),
+@@ -890,7 +891,8 @@ static void kfd_debug_print_topology(void)
+ up_read(&topology_lock);
+ }
+
+-/* Helper function for intializing platform_xx members of kfd_system_properties
++/* Helper function for intializing platform_xx members of
++ * kfd_system_properties. Uses OEM info from the last CPU/APU node.
+ */
+ static void kfd_update_system_properties(void)
+ {
+@@ -1013,13 +1015,12 @@ int kfd_topology_init(void)
+ */
+ #ifdef CONFIG_ACPI
+ ret = kfd_create_crat_image_acpi(&crat_image, &image_size);
+- if (ret == 0) {
++ if (!ret) {
+ ret = kfd_parse_crat_table(crat_image,
+ &temp_topology_device_list,
+ proximity_domain);
+ if (ret ||
+- kfd_is_acpi_crat_invalid(&temp_topology_device_list)) {
+-
++ kfd_is_acpi_crat_invalid(&temp_topology_device_list)) {
+ kfd_release_topology_device_list(
+ &temp_topology_device_list);
+ kfd_destroy_crat_image(crat_image);
+@@ -1029,8 +1030,8 @@ int kfd_topology_init(void)
+ #endif
+ if (!crat_image) {
+ ret = kfd_create_crat_image_virtual(&crat_image, &image_size,
+- COMPUTE_UNIT_CPU, NULL,
+- proximity_domain);
++ COMPUTE_UNIT_CPU, NULL,
++ proximity_domain);
+ cpu_only_node = 1;
+ if (ret) {
+ pr_err("Error creating VCRAT table for CPU\n");
+@@ -1038,8 +1039,8 @@ int kfd_topology_init(void)
+ }
+
+ ret = kfd_parse_crat_table(crat_image,
+- &temp_topology_device_list,
+- proximity_domain);
++ &temp_topology_device_list,
++ proximity_domain);
+ if (ret) {
+ pr_err("Error parsing VCRAT table for CPU\n");
+ goto err;
+@@ -1051,12 +1052,12 @@ int kfd_topology_init(void)
+
+ down_write(&topology_lock);
+ kfd_topology_update_device_list(&temp_topology_device_list,
+- &topology_device_list);
++ &topology_device_list);
+ atomic_set(&topology_crat_proximity_domain, sys_props.num_devices-1);
+ ret = kfd_topology_update_sysfs();
+ up_write(&topology_lock);
+
+- if (ret == 0) {
++ if (!ret) {
+ sys_props.generation_count++;
+ kfd_update_system_properties();
+ kfd_debug_print_topology();
+@@ -1144,7 +1145,6 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
+ break;
+ }
+ up_write(&topology_lock);
+-
+ return out_dev;
+ }
+
+@@ -1212,8 +1212,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
+
+ pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
+
+- proximity_domain = atomic_inc_return(&
+- topology_crat_proximity_domain);
++ proximity_domain = atomic_inc_return(&topology_crat_proximity_domain);
+
+ /* Check to see if this gpu device exists in the topology_device_list.
+ * If so, assign the gpu to that device,
+@@ -1224,15 +1223,16 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
+ dev = kfd_assign_gpu(gpu);
+ if (!dev) {
+ res = kfd_create_crat_image_virtual(&crat_image, &image_size,
+- COMPUTE_UNIT_GPU,
+- gpu, proximity_domain);
++ COMPUTE_UNIT_GPU, gpu,
++ proximity_domain);
+ if (res) {
+ pr_err("Error creating VCRAT for GPU (ID: 0x%x)\n",
+ gpu_id);
+ return res;
+ }
+ res = kfd_parse_crat_table(crat_image,
+- &temp_topology_device_list, proximity_domain);
++ &temp_topology_device_list,
++ proximity_domain);
+ if (res) {
+ pr_err("Error parsing VCRAT for GPU (ID: 0x%x)\n",
+ gpu_id);
+@@ -1249,14 +1249,13 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
+ res = kfd_topology_update_sysfs();
+ up_write(&topology_lock);
+
+- if (res == 0)
++ if (!res)
+ sys_props.generation_count++;
+ else
+ pr_err("Failed to update GPU (ID: 0x%x) to sysfs topology. res=%d\n",
+ gpu_id, res);
+ dev = kfd_assign_gpu(gpu);
+- if (!dev) {
+- pr_err("Could not assign GPU\n");
++ if (WARN_ON(!dev)) {
+ res = -ENODEV;
+ goto err;
+ }
+@@ -1315,14 +1314,15 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
+ HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
+ break;
+ default:
+- BUG();
++ WARN(1, "Unexpected ASIC family %u",
++ dev->gpu->device_info->asic_family);
+ }
+
+ /* Fix errors in CZ CRAT.
+- * simd_count: Carrizo CRAT reports wrong simd_count, probably because
+- * it doesn't consider masked out CUs
+- * max_waves_per_simd: Carrizo reports wrong max_waves_per_simd.
+- * capability flag: Carrizo CRAT doesn't report IOMMU flags.
++ * simd_count: Carrizo CRAT reports wrong simd_count, probably
++ * because it doesn't consider masked out CUs
++ * max_waves_per_simd: Carrizo reports wrong max_waves_per_simd
++ * capability flag: Carrizo CRAT doesn't report IOMMU flags
+ */
+ if (dev->gpu->device_info->asic_family == CHIP_CARRIZO) {
+ dev->node_props.simd_count =
+@@ -1362,7 +1362,7 @@ int kfd_topology_remove_device(struct kfd_dev *gpu)
+
+ up_write(&topology_lock);
+
+- if (res == 0)
++ if (!res)
+ kfd_notify_gpu_change(gpu_id, 0);
+
+ return res;
+@@ -1403,7 +1403,7 @@ static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
+ {
+ int first_cpu_of_numa_node;
+
+- if (!cpumask || (cpumask == cpu_none_mask))
++ if (!cpumask || cpumask == cpu_none_mask)
+ return -1;
+ first_cpu_of_numa_node = cpumask_first(cpumask);
+ if (first_cpu_of_numa_node >= nr_cpu_ids)
+@@ -1446,7 +1446,7 @@ int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data)
+
+ seq_printf(m, "Node %u, gpu_id %x:\n", i++, dev->gpu->id);
+ r = dqm_debugfs_hqds(m, dev->gpu->dqm);
+- if (r != 0)
++ if (r)
+ break;
+ }
+
+@@ -1471,7 +1471,7 @@ int kfd_debugfs_rls_by_device(struct seq_file *m, void *data)
+
+ seq_printf(m, "Node %u, gpu_id %x:\n", i++, dev->gpu->id);
+ r = pm_debugfs_runlist(m, &dev->gpu->dqm->packets);
+- if (r != 0)
++ if (r)
+ break;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+index 4c518fe8..2b36baf 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+@@ -46,9 +46,6 @@
+ #define HSA_CAP_DOORBELL_TYPE_PRE_1_0 0x0
+ #define HSA_CAP_DOORBELL_TYPE_1_0 0x1
+ #define HSA_CAP_DOORBELL_TYPE_2_0 0x2
+-#define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK 0x00000f00
+-#define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT 8
+-#define HSA_CAP_DOORBELL_PACKET_TYPE 0x00001000
+ #define HSA_CAP_AQL_QUEUE_DOUBLE_MAP 0x00004000
+
+ struct kfd_node_properties {
+@@ -169,9 +166,9 @@ struct kfd_topology_device {
+ struct attribute attr_gpuid;
+ struct attribute attr_name;
+ struct attribute attr_props;
+- uint8_t oem_id[CRAT_OEMID_LENGTH];
+- uint8_t oem_table_id[CRAT_OEMTABLEID_LENGTH];
+- uint32_t oem_revision;
++ uint8_t oem_id[CRAT_OEMID_LENGTH];
++ uint8_t oem_table_id[CRAT_OEMTABLEID_LENGTH];
++ uint32_t oem_revision;
+ };
+
+ struct kfd_system_properties {
+diff --git a/drivers/gpu/drm/amd/amdkfd/soc15_int.h b/drivers/gpu/drm/amd/amdkfd/soc15_int.h
+index e00d03d..d581d1a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/soc15_int.h
++++ b/drivers/gpu/drm/amd/amdkfd/soc15_int.h
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2016 Advanced Micro Devices, Inc.
++ * Copyright 2016-2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5673-drm-amdkfd-Add-sanity-checks-in-IRQ-handlers.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5673-drm-amdkfd-Add-sanity-checks-in-IRQ-handlers.patch
new file mode 100644
index 00000000..106a6358
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5673-drm-amdkfd-Add-sanity-checks-in-IRQ-handlers.patch
@@ -0,0 +1,137 @@
+From 8042b50efc0c528f43bfff0400c0d7323a93fe26 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Mon, 30 Apr 2018 19:22:49 -0400
+Subject: [PATCH 5673/5725] drm/amdkfd: Add sanity checks in IRQ handlers
+
+Only accept interrupts from KFD VMIDs. Just checking for a PASID may
+not be enough because amdgpu started using PASIDs to map VM faults
+to processes.
+
+Warn if an IRQ doesn't have a valid PASID (indicating a firmware bug).
+
+Change-Id: I34ca5b4b03ffe51a23d03490fc65b6c946bbbf51
+Suggested-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Suggested-by: Oak Zeng <Oak.Zeng@amd.com>
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c | 33 +++++++++---------
+ drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c | 44 ++++++++++++++----------
+ 2 files changed, 43 insertions(+), 34 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+index 1261432..5d2475d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
++++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+@@ -24,15 +24,6 @@
+ #include "kfd_events.h"
+ #include "cik_int.h"
+
+-static bool is_cpc_vm_fault(struct kfd_dev *dev, uint32_t source_id,
+- unsigned int vmid)
+-{
+- return (source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
+- source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) &&
+- vmid >= dev->vm_info.first_vmid_kfd &&
+- vmid <= dev->vm_info.last_vmid_kfd;
+-}
+-
+ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
+ const uint32_t *ih_ring_entry,
+ uint32_t *patched_ihre,
+@@ -67,16 +58,26 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
+ vmid <= dev->vm_info.last_vmid_kfd;
+ }
+
++ /* Only handle interrupts from KFD VMIDs */
+ vmid = (ihre->ring_id & 0x0000ff00) >> 8;
++ if (vmid < dev->vm_info.first_vmid_kfd ||
++ vmid > dev->vm_info.last_vmid_kfd)
++ return 0;
++
++ /* If there is no valid PASID, it's likely a firmware bug */
+ pasid = (ihre->ring_id & 0xffff0000) >> 16;
++ if (WARN_ONCE(pasid == 0, "FW bug: No PASID in KFD interrupt"))
++ return 0;
+
+- /* Do not process in ISR, just request it to be forwarded to WQ. */
+- return (pasid != 0) &&
+- (ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE ||
+- ihre->source_id == CIK_INTSRC_SDMA_TRAP ||
+- ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG ||
+- ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE ||
+- is_cpc_vm_fault(dev, ihre->source_id, vmid));
++ /* Interrupt types we care about: various signals and faults.
++ * They will be forwarded to a work queue (see below).
++ */
++ return ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE ||
++ ihre->source_id == CIK_INTSRC_SDMA_TRAP ||
++ ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG ||
++ ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE ||
++ ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
++ ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT;
+ }
+
+ static void cik_event_interrupt_wq(struct kfd_dev *dev,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+index 5217e51..f836897 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+@@ -31,29 +31,37 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
+ bool *patched_flag)
+ {
+ uint16_t source_id, client_id, pasid, vmid;
++ const uint32_t *data = ih_ring_entry;
+
+- source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
+- client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
+- pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
++ /* Only handle interrupts from KFD VMIDs */
+ vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
++ if (vmid < dev->vm_info.first_vmid_kfd ||
++ vmid > dev->vm_info.last_vmid_kfd)
++ return 0;
++
++ /* If there is no valid PASID, it's likely a firmware bug */
++ pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
++ if (WARN_ONCE(pasid == 0, "FW bug: No PASID in KFD interrupt"))
++ return 0;
+
+- if (pasid) {
+- const uint32_t *data = ih_ring_entry;
++ source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
++ client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
+
+- pr_debug("client id 0x%x, source id %d, pasid 0x%x. raw data:\n",
+- client_id, source_id, pasid);
+- pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
+- data[0], data[1], data[2], data[3],
+- data[4], data[5], data[6], data[7]);
+- }
++ pr_debug("client id 0x%x, source id %d, pasid 0x%x. raw data:\n",
++ client_id, source_id, pasid);
++ pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
++ data[0], data[1], data[2], data[3],
++ data[4], data[5], data[6], data[7]);
+
+- return (pasid != 0) &&
+- (source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
+- source_id == SOC15_INTSRC_SDMA_TRAP ||
+- source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
+- source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
+- client_id == SOC15_IH_CLIENTID_VMC ||
+- client_id == SOC15_IH_CLIENTID_UTCL2);
++ /* Interrupt types we care about: various signals and faults.
++ * They will be forwarded to a work queue (see below).
++ */
++ return source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
++ source_id == SOC15_INTSRC_SDMA_TRAP ||
++ source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
++ source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
++ client_id == SOC15_IH_CLIENTID_VMC ||
++ client_id == SOC15_IH_CLIENTID_UTCL2;
+ }
+
+ static void event_interrupt_wq_v9(struct kfd_dev *dev,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5674-drm-amdkfd-Don-t-use-kmap_atomic.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5674-drm-amdkfd-Don-t-use-kmap_atomic.patch
new file mode 100644
index 00000000..1e5d8b68
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5674-drm-amdkfd-Don-t-use-kmap_atomic.patch
@@ -0,0 +1,52 @@
+From 8d0304033d057267bdd902ae3bc2d05619b8f123 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Fri, 4 May 2018 18:59:38 -0400
+Subject: [PATCH 5674/5725] drm/amdkfd: Don't use kmap_atomic
+
+kmap_atomic is an optimization that's only useful for CONFIG_HIGHMEM
+which isn't applicable on x86_64. It also requires a lot more care
+because it disabled page faults. This causes problems with
+copy_from_user in the atomic section unless pages are faulted in
+explicitly.
+
+Since KFD only supports 64-bit kernels, we don't need to handle the
+complication of HIGHMEM and kmap_atomic. Use plain kmap instead.
+
+Bug: SWDEV-138474
+
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+
+Change-Id: I6ecbd7c10fb8b589dc1ab8af8795ff3e6d416db1
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 73338f0..9ec224c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -2100,7 +2100,7 @@ static int kfd_copy_userptr_bos(struct cma_iter *si, struct cma_iter *di,
+
+ for (i = 0; i < nl; i++) {
+ unsigned int n;
+- void *kaddr = kmap_atomic(process_pages[i]);
++ void *kaddr = kmap(process_pages[i]);
+
+ if (cma_write) {
+ n = copy_from_user(kaddr+offset_in_page,
+@@ -2111,7 +2111,7 @@ static int kfd_copy_userptr_bos(struct cma_iter *si, struct cma_iter *di,
+ kaddr+offset_in_page,
+ copy_size);
+ }
+- kunmap_atomic(kaddr);
++ kunmap(kaddr);
+ if (n) {
+ ret = -EFAULT;
+ break;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5675-drm-amdkcl-fixed-can-t-find-kgd_kfd_interface.h-head.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5675-drm-amdkcl-fixed-can-t-find-kgd_kfd_interface.h-head.patch
new file mode 100644
index 00000000..37eefc70
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5675-drm-amdkcl-fixed-can-t-find-kgd_kfd_interface.h-head.patch
@@ -0,0 +1,32 @@
+From d986ae58523ec91ee9a6b59370f54f5ec1e57323 Mon Sep 17 00:00:00 2001
+From: Kevin Wang <Kevin1.Wang@amd.com>
+Date: Wed, 9 May 2018 10:12:21 +0800
+Subject: [PATCH 5675/5725] drm/amdkcl: fixed can't find kgd_kfd_interface.h
+ header error
+
+Change-Id: Ic38c5e605ba98183f4efaf68e0523dfa8aa22d8c
+Signed-off-by: Kevin Wang <Kevin1.Wang@amd.com>
+Reviewed-by: Le Ma <Le.Ma@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/Makefile | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
+index 66f1921..4804f9c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/Makefile
++++ b/drivers/gpu/drm/amd/amdkfd/Makefile
+@@ -23,8 +23,9 @@
+ # Makefile for Heterogenous System Architecture support for AMD GPU devices
+ #
+
+-ccflags-y := -Idrivers/gpu/drm/amd/include/ \
+- -Idrivers/gpu/drm/amd/include/asic_reg
++FULL_AMD_PATH=$(src)/..
++ccflags-y := -I$(FULL_AMD_PATH)/include \
++ -I$(FULL_AMD_PATH)/include/asic_reg
+
+ amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
+ kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5676-drm-amdkfd-Fix-kernel-queue-64-bit-doorbell-offset-c.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5676-drm-amdkfd-Fix-kernel-queue-64-bit-doorbell-offset-c.patch
new file mode 100644
index 00000000..475b2701
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5676-drm-amdkfd-Fix-kernel-queue-64-bit-doorbell-offset-c.patch
@@ -0,0 +1,45 @@
+From ee746a476c690b3778c5f46ea9128387772b16cf Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Mon, 7 May 2018 16:50:26 -0400
+Subject: [PATCH 5676/5725] drm/amdkfd: Fix kernel queue 64 bit doorbell offset
+ calculation
+
+The bitmap index calculation should reverse the logic used on allocation
+so it will clear the same bit used on allocation
+
+Change-Id: Idae2b7df4eef7f51f61294223e883916ded619ed
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+index c3744d8..ebe79bf 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+@@ -188,9 +188,9 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
+ *doorbell_off = kfd->doorbell_id_offset + inx;
+
+ pr_debug("Get kernel queue doorbell\n"
+- " doorbell offset == 0x%08X\n"
+- " kernel address == %p\n",
+- *doorbell_off, (kfd->doorbell_kernel_ptr + inx));
++ " doorbell offset == 0x%08X\n"
++ " doorbell index == 0x%x\n",
++ *doorbell_off, inx);
+
+ return kfd->doorbell_kernel_ptr + inx;
+ }
+@@ -199,7 +199,8 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
+ {
+ unsigned int inx;
+
+- inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
++ inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr)
++ * sizeof(u32) / kfd->device_info->doorbell_size;
+
+ mutex_lock(&kfd->doorbell_mutex);
+ __clear_bit(inx, kfd->doorbell_available_index);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5677-drm-amdkfd-Fix-race-between-scheduler-and-context-re.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5677-drm-amdkfd-Fix-race-between-scheduler-and-context-re.patch
new file mode 100644
index 00000000..d00fbc53
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5677-drm-amdkfd-Fix-race-between-scheduler-and-context-re.patch
@@ -0,0 +1,665 @@
+From cd4d0d2c2eeacf28f278ca64c36072f31b293080 Mon Sep 17 00:00:00 2001
+From: Jay Cornwall <Jay.Cornwall@amd.com>
+Date: Fri, 11 May 2018 10:58:12 -0500
+Subject: [PATCH 5677/5725] drm/amdkfd: Fix race between scheduler and context
+ restore
+
+The scheduler may raise SQ_WAVE_STATUS.SPI_PRIO via SQ_CMD before
+context restore has completed. Restoring SPI_PRIO=0 after this point
+may cause context save to fail as the lower priority wavefronts
+are not selected for execution among spin-waiting wavefronts.
+
+Leave SPI_PRIO at its SPI-initialized or scheduler-raised value.
+
+v2: Also fix race with exception handler
+
+Change-Id: I82ad581824d956587f4110cdebc39e06c438d62f
+Signed-off-by: Jay Cornwall <Jay.Cornwall@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h | 458 +++++++++++----------
+ .../gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm | 18 +-
+ .../gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 16 +-
+ 3 files changed, 262 insertions(+), 230 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+index f68aef0..3621efb 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+@@ -21,18 +21,21 @@
+ */
+
+ static const uint32_t cwsr_trap_gfx8_hex[] = {
+- 0xbf820001, 0xbf820125,
++ 0xbf820001, 0xbf82012b,
+ 0xb8f4f802, 0x89748674,
+ 0xb8f5f803, 0x8675ff75,
+- 0x00000400, 0xbf850011,
++ 0x00000400, 0xbf850017,
+ 0xc00a1e37, 0x00000000,
+ 0xbf8c007f, 0x87777978,
+- 0xbf840002, 0xb974f802,
+- 0xbe801d78, 0xb8f5f803,
+- 0x8675ff75, 0x000001ff,
+- 0xbf850002, 0x80708470,
+- 0x82718071, 0x8671ff71,
+- 0x0000ffff, 0xb974f802,
++ 0xbf840005, 0x8f728374,
++ 0xb972e0c2, 0xbf800002,
++ 0xb9740002, 0xbe801d78,
++ 0xb8f5f803, 0x8675ff75,
++ 0x000001ff, 0xbf850002,
++ 0x80708470, 0x82718071,
++ 0x8671ff71, 0x0000ffff,
++ 0x8f728374, 0xb972e0c2,
++ 0xbf800002, 0xb9740002,
+ 0xbe801f70, 0xb8f5f803,
+ 0x8675ff75, 0x00000100,
+ 0xbf840006, 0xbefa0080,
+@@ -168,7 +171,7 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
+ 0x807c847c, 0x806eff6e,
+ 0x00000400, 0xbf0a757c,
+ 0xbf85ffef, 0xbf9c0000,
+- 0xbf8200ca, 0xbef8007e,
++ 0xbf8200cd, 0xbef8007e,
+ 0x8679ff7f, 0x0000ffff,
+ 0x8779ff79, 0x00040000,
+ 0xbefa0080, 0xbefb00ff,
+@@ -268,16 +271,18 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
+ 0x8f739773, 0xb976f807,
+ 0x8671ff71, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+- 0xb974f802, 0xbf8a0000,
+- 0x95807370, 0xbf810000,
++ 0x8f768374, 0xb976e0c2,
++ 0xbf800002, 0xb9740002,
++ 0xbf8a0000, 0x95807370,
++ 0xbf810000, 0x00000000,
+ };
+
+
+ static const uint32_t cwsr_trap_gfx9_hex[] = {
+- 0xbf820001, 0xbf82015a,
++ 0xbf820001, 0xbf82015d,
+ 0xb8f8f802, 0x89788678,
+ 0xb8f1f803, 0x866eff71,
+- 0x00000400, 0xbf850034,
++ 0x00000400, 0xbf850037,
+ 0x866eff71, 0x00000800,
+ 0xbf850003, 0x866eff71,
+ 0x00000100, 0xbf840008,
+@@ -303,258 +308,261 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
+ 0x8f6e8b77, 0x866eff6e,
+ 0x001f8000, 0xb96ef807,
+ 0x86fe7e7e, 0x86ea6a6a,
+- 0xb978f802, 0xbe801f6c,
+- 0x866dff6d, 0x0000ffff,
+- 0xbef00080, 0xb9700283,
+- 0xb8f02407, 0x8e709c70,
+- 0x876d706d, 0xb8f003c7,
+- 0x8e709b70, 0x876d706d,
+- 0xb8f0f807, 0x8670ff70,
+- 0x00007fff, 0xb970f807,
+- 0xbeee007e, 0xbeef007f,
+- 0xbefe0180, 0xbf900004,
+- 0x87708478, 0xb970f802,
+- 0xbf8e0002, 0xbf88fffe,
+- 0xb8f02a05, 0x80708170,
+- 0x8e708a70, 0xb8f11605,
+- 0x80718171, 0x8e718671,
+- 0x80707170, 0x80707e70,
+- 0x8271807f, 0x8671ff71,
+- 0x0000ffff, 0xc0471cb8,
+- 0x00000040, 0xbf8cc07f,
+- 0xc04b1d38, 0x00000048,
+- 0xbf8cc07f, 0xc0431e78,
+- 0x00000058, 0xbf8cc07f,
+- 0xc0471eb8, 0x0000005c,
+- 0xbf8cc07f, 0xbef4007e,
+- 0x8675ff7f, 0x0000ffff,
+- 0x8775ff75, 0x00040000,
+- 0xbef60080, 0xbef700ff,
+- 0x00807fac, 0x8670ff7f,
+- 0x08000000, 0x8f708370,
+- 0x87777077, 0x8670ff7f,
+- 0x70000000, 0x8f708170,
+- 0x87777077, 0xbefb007c,
+- 0xbefa0080, 0xb8fa2a05,
+- 0x807a817a, 0x8e7a8a7a,
+- 0xb8f01605, 0x80708170,
+- 0x8e708670, 0x807a707a,
+- 0xbef60084, 0xbef600ff,
+- 0x01000000, 0xbefe007c,
+- 0xbefc007a, 0xc0611efa,
+- 0x0000007c, 0xbf8cc07f,
+- 0x807a847a, 0xbefc007e,
++ 0x8f6e8378, 0xb96ee0c2,
++ 0xbf800002, 0xb9780002,
++ 0xbe801f6c, 0x866dff6d,
++ 0x0000ffff, 0xbef00080,
++ 0xb9700283, 0xb8f02407,
++ 0x8e709c70, 0x876d706d,
++ 0xb8f003c7, 0x8e709b70,
++ 0x876d706d, 0xb8f0f807,
++ 0x8670ff70, 0x00007fff,
++ 0xb970f807, 0xbeee007e,
++ 0xbeef007f, 0xbefe0180,
++ 0xbf900004, 0x87708478,
++ 0xb970f802, 0xbf8e0002,
++ 0xbf88fffe, 0xb8f02a05,
++ 0x80708170, 0x8e708a70,
++ 0xb8f11605, 0x80718171,
++ 0x8e718671, 0x80707170,
++ 0x80707e70, 0x8271807f,
++ 0x8671ff71, 0x0000ffff,
++ 0xc0471cb8, 0x00000040,
++ 0xbf8cc07f, 0xc04b1d38,
++ 0x00000048, 0xbf8cc07f,
++ 0xc0431e78, 0x00000058,
++ 0xbf8cc07f, 0xc0471eb8,
++ 0x0000005c, 0xbf8cc07f,
++ 0xbef4007e, 0x8675ff7f,
++ 0x0000ffff, 0x8775ff75,
++ 0x00040000, 0xbef60080,
++ 0xbef700ff, 0x00807fac,
++ 0x8670ff7f, 0x08000000,
++ 0x8f708370, 0x87777077,
++ 0x8670ff7f, 0x70000000,
++ 0x8f708170, 0x87777077,
++ 0xbefb007c, 0xbefa0080,
++ 0xb8fa2a05, 0x807a817a,
++ 0x8e7a8a7a, 0xb8f01605,
++ 0x80708170, 0x8e708670,
++ 0x807a707a, 0xbef60084,
++ 0xbef600ff, 0x01000000,
+ 0xbefe007c, 0xbefc007a,
+- 0xc0611b3a, 0x0000007c,
++ 0xc0611efa, 0x0000007c,
+ 0xbf8cc07f, 0x807a847a,
+ 0xbefc007e, 0xbefe007c,
+- 0xbefc007a, 0xc0611b7a,
++ 0xbefc007a, 0xc0611b3a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x807a847a, 0xbefc007e,
+ 0xbefe007c, 0xbefc007a,
+- 0xc0611bba, 0x0000007c,
++ 0xc0611b7a, 0x0000007c,
+ 0xbf8cc07f, 0x807a847a,
+ 0xbefc007e, 0xbefe007c,
+- 0xbefc007a, 0xc0611bfa,
++ 0xbefc007a, 0xc0611bba,
+ 0x0000007c, 0xbf8cc07f,
+ 0x807a847a, 0xbefc007e,
+ 0xbefe007c, 0xbefc007a,
+- 0xc0611e3a, 0x0000007c,
+- 0xbf8cc07f, 0x807a847a,
+- 0xbefc007e, 0xb8f1f803,
+- 0xbefe007c, 0xbefc007a,
+- 0xc0611c7a, 0x0000007c,
++ 0xc0611bfa, 0x0000007c,
+ 0xbf8cc07f, 0x807a847a,
+ 0xbefc007e, 0xbefe007c,
+- 0xbefc007a, 0xc0611a3a,
++ 0xbefc007a, 0xc0611e3a,
++ 0x0000007c, 0xbf8cc07f,
++ 0x807a847a, 0xbefc007e,
++ 0xb8f1f803, 0xbefe007c,
++ 0xbefc007a, 0xc0611c7a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x807a847a, 0xbefc007e,
+ 0xbefe007c, 0xbefc007a,
+- 0xc0611a7a, 0x0000007c,
+- 0xbf8cc07f, 0x807a847a,
+- 0xbefc007e, 0xb8fbf801,
+- 0xbefe007c, 0xbefc007a,
+- 0xc0611efa, 0x0000007c,
++ 0xc0611a3a, 0x0000007c,
+ 0xbf8cc07f, 0x807a847a,
+- 0xbefc007e, 0x8670ff7f,
+- 0x04000000, 0xbeef0080,
+- 0x876f6f70, 0xb8fa2a05,
++ 0xbefc007e, 0xbefe007c,
++ 0xbefc007a, 0xc0611a7a,
++ 0x0000007c, 0xbf8cc07f,
++ 0x807a847a, 0xbefc007e,
++ 0xb8fbf801, 0xbefe007c,
++ 0xbefc007a, 0xc0611efa,
++ 0x0000007c, 0xbf8cc07f,
++ 0x807a847a, 0xbefc007e,
++ 0x8670ff7f, 0x04000000,
++ 0xbeef0080, 0x876f6f70,
++ 0xb8fa2a05, 0x807a817a,
++ 0x8e7a8a7a, 0xb8f11605,
++ 0x80718171, 0x8e718471,
++ 0x8e768271, 0xbef600ff,
++ 0x01000000, 0xbef20174,
++ 0x80747a74, 0x82758075,
++ 0xbefc0080, 0xbf800000,
++ 0xbe802b00, 0xbe822b02,
++ 0xbe842b04, 0xbe862b06,
++ 0xbe882b08, 0xbe8a2b0a,
++ 0xbe8c2b0c, 0xbe8e2b0e,
++ 0xc06b003a, 0x00000000,
++ 0xbf8cc07f, 0xc06b013a,
++ 0x00000010, 0xbf8cc07f,
++ 0xc06b023a, 0x00000020,
++ 0xbf8cc07f, 0xc06b033a,
++ 0x00000030, 0xbf8cc07f,
++ 0x8074c074, 0x82758075,
++ 0x807c907c, 0xbf0a717c,
++ 0xbf85ffe7, 0xbef40172,
++ 0xbefa0080, 0xbefe00c1,
++ 0xbeff00c1, 0xbee80080,
++ 0xbee90080, 0xbef600ff,
++ 0x01000000, 0xe0724000,
++ 0x7a1d0000, 0xe0724100,
++ 0x7a1d0100, 0xe0724200,
++ 0x7a1d0200, 0xe0724300,
++ 0x7a1d0300, 0xbefe00c1,
++ 0xbeff00c1, 0xb8f14306,
++ 0x8671c171, 0xbf84002c,
++ 0xbf8a0000, 0x8670ff6f,
++ 0x04000000, 0xbf840028,
++ 0x8e718671, 0x8e718271,
++ 0xbef60071, 0xb8fa2a05,
+ 0x807a817a, 0x8e7a8a7a,
+- 0xb8f11605, 0x80718171,
+- 0x8e718471, 0x8e768271,
++ 0xb8f01605, 0x80708170,
++ 0x8e708670, 0x807a707a,
++ 0x807aff7a, 0x00000080,
+ 0xbef600ff, 0x01000000,
+- 0xbef20174, 0x80747a74,
+- 0x82758075, 0xbefc0080,
+- 0xbf800000, 0xbe802b00,
+- 0xbe822b02, 0xbe842b04,
+- 0xbe862b06, 0xbe882b08,
+- 0xbe8a2b0a, 0xbe8c2b0c,
+- 0xbe8e2b0e, 0xc06b003a,
+- 0x00000000, 0xbf8cc07f,
+- 0xc06b013a, 0x00000010,
+- 0xbf8cc07f, 0xc06b023a,
+- 0x00000020, 0xbf8cc07f,
+- 0xc06b033a, 0x00000030,
+- 0xbf8cc07f, 0x8074c074,
+- 0x82758075, 0x807c907c,
+- 0xbf0a717c, 0xbf85ffe7,
+- 0xbef40172, 0xbefa0080,
++ 0xbefc0080, 0xd28c0002,
++ 0x000100c1, 0xd28d0003,
++ 0x000204c1, 0xd1060002,
++ 0x00011103, 0x7e0602ff,
++ 0x00000200, 0xbefc00ff,
++ 0x00010000, 0xbe800077,
++ 0x8677ff77, 0xff7fffff,
++ 0x8777ff77, 0x00058000,
++ 0xd8ec0000, 0x00000002,
++ 0xbf8cc07f, 0xe0765000,
++ 0x7a1d0002, 0x68040702,
++ 0xd0c9006a, 0x0000e302,
++ 0xbf87fff7, 0xbef70000,
++ 0xbefa00ff, 0x00000400,
+ 0xbefe00c1, 0xbeff00c1,
+- 0xbee80080, 0xbee90080,
++ 0xb8f12a05, 0x80718171,
++ 0x8e718271, 0x8e768871,
+ 0xbef600ff, 0x01000000,
++ 0xbefc0084, 0xbf0a717c,
++ 0xbf840015, 0xbf11017c,
++ 0x8071ff71, 0x00001000,
++ 0x7e000300, 0x7e020301,
++ 0x7e040302, 0x7e060303,
+ 0xe0724000, 0x7a1d0000,
+ 0xe0724100, 0x7a1d0100,
+ 0xe0724200, 0x7a1d0200,
+ 0xe0724300, 0x7a1d0300,
++ 0x807c847c, 0x807aff7a,
++ 0x00000400, 0xbf0a717c,
++ 0xbf85ffef, 0xbf9c0000,
++ 0xbf8200dc, 0xbef4007e,
++ 0x8675ff7f, 0x0000ffff,
++ 0x8775ff75, 0x00040000,
++ 0xbef60080, 0xbef700ff,
++ 0x00807fac, 0x866eff7f,
++ 0x08000000, 0x8f6e836e,
++ 0x87776e77, 0x866eff7f,
++ 0x70000000, 0x8f6e816e,
++ 0x87776e77, 0x866eff7f,
++ 0x04000000, 0xbf84001e,
+ 0xbefe00c1, 0xbeff00c1,
+- 0xb8f14306, 0x8671c171,
+- 0xbf84002c, 0xbf8a0000,
+- 0x8670ff6f, 0x04000000,
+- 0xbf840028, 0x8e718671,
+- 0x8e718271, 0xbef60071,
+- 0xb8fa2a05, 0x807a817a,
+- 0x8e7a8a7a, 0xb8f01605,
+- 0x80708170, 0x8e708670,
+- 0x807a707a, 0x807aff7a,
++ 0xb8ef4306, 0x866fc16f,
++ 0xbf840019, 0x8e6f866f,
++ 0x8e6f826f, 0xbef6006f,
++ 0xb8f82a05, 0x80788178,
++ 0x8e788a78, 0xb8ee1605,
++ 0x806e816e, 0x8e6e866e,
++ 0x80786e78, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+- 0xd28c0002, 0x000100c1,
+- 0xd28d0003, 0x000204c1,
+- 0xd1060002, 0x00011103,
+- 0x7e0602ff, 0x00000200,
+- 0xbefc00ff, 0x00010000,
+- 0xbe800077, 0x8677ff77,
+- 0xff7fffff, 0x8777ff77,
+- 0x00058000, 0xd8ec0000,
+- 0x00000002, 0xbf8cc07f,
+- 0xe0765000, 0x7a1d0002,
+- 0x68040702, 0xd0c9006a,
+- 0x0000e302, 0xbf87fff7,
+- 0xbef70000, 0xbefa00ff,
+- 0x00000400, 0xbefe00c1,
+- 0xbeff00c1, 0xb8f12a05,
+- 0x80718171, 0x8e718271,
+- 0x8e768871, 0xbef600ff,
+- 0x01000000, 0xbefc0084,
+- 0xbf0a717c, 0xbf840015,
+- 0xbf11017c, 0x8071ff71,
+- 0x00001000, 0x7e000300,
++ 0xe0510000, 0x781d0000,
++ 0xe0510100, 0x781d0000,
++ 0x807cff7c, 0x00000200,
++ 0x8078ff78, 0x00000200,
++ 0xbf0a6f7c, 0xbf85fff6,
++ 0xbef80080, 0xbefe00c1,
++ 0xbeff00c1, 0xb8ef2a05,
++ 0x806f816f, 0x8e6f826f,
++ 0x8e76886f, 0xbef600ff,
++ 0x01000000, 0xbeee0078,
++ 0x8078ff78, 0x00000400,
++ 0xbefc0084, 0xbf11087c,
++ 0x806fff6f, 0x00008000,
++ 0xe0524000, 0x781d0000,
++ 0xe0524100, 0x781d0100,
++ 0xe0524200, 0x781d0200,
++ 0xe0524300, 0x781d0300,
++ 0xbf8c0f70, 0x7e000300,
+ 0x7e020301, 0x7e040302,
+- 0x7e060303, 0xe0724000,
+- 0x7a1d0000, 0xe0724100,
+- 0x7a1d0100, 0xe0724200,
+- 0x7a1d0200, 0xe0724300,
+- 0x7a1d0300, 0x807c847c,
+- 0x807aff7a, 0x00000400,
+- 0xbf0a717c, 0xbf85ffef,
+- 0xbf9c0000, 0xbf8200d9,
+- 0xbef4007e, 0x8675ff7f,
+- 0x0000ffff, 0x8775ff75,
+- 0x00040000, 0xbef60080,
+- 0xbef700ff, 0x00807fac,
+- 0x866eff7f, 0x08000000,
+- 0x8f6e836e, 0x87776e77,
+- 0x866eff7f, 0x70000000,
+- 0x8f6e816e, 0x87776e77,
+- 0x866eff7f, 0x04000000,
+- 0xbf84001e, 0xbefe00c1,
+- 0xbeff00c1, 0xb8ef4306,
+- 0x866fc16f, 0xbf840019,
+- 0x8e6f866f, 0x8e6f826f,
+- 0xbef6006f, 0xb8f82a05,
++ 0x7e060303, 0x807c847c,
++ 0x8078ff78, 0x00000400,
++ 0xbf0a6f7c, 0xbf85ffee,
++ 0xbf9c0000, 0xe0524000,
++ 0x6e1d0000, 0xe0524100,
++ 0x6e1d0100, 0xe0524200,
++ 0x6e1d0200, 0xe0524300,
++ 0x6e1d0300, 0xb8f82a05,
+ 0x80788178, 0x8e788a78,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+- 0x8078ff78, 0x00000080,
+- 0xbef600ff, 0x01000000,
+- 0xbefc0080, 0xe0510000,
+- 0x781d0000, 0xe0510100,
+- 0x781d0000, 0x807cff7c,
+- 0x00000200, 0x8078ff78,
+- 0x00000200, 0xbf0a6f7c,
+- 0xbf85fff6, 0xbef80080,
+- 0xbefe00c1, 0xbeff00c1,
+- 0xb8ef2a05, 0x806f816f,
+- 0x8e6f826f, 0x8e76886f,
+- 0xbef600ff, 0x01000000,
+- 0xbeee0078, 0x8078ff78,
+- 0x00000400, 0xbefc0084,
+- 0xbf11087c, 0x806fff6f,
+- 0x00008000, 0xe0524000,
+- 0x781d0000, 0xe0524100,
+- 0x781d0100, 0xe0524200,
+- 0x781d0200, 0xe0524300,
+- 0x781d0300, 0xbf8c0f70,
+- 0x7e000300, 0x7e020301,
+- 0x7e040302, 0x7e060303,
+- 0x807c847c, 0x8078ff78,
+- 0x00000400, 0xbf0a6f7c,
+- 0xbf85ffee, 0xbf9c0000,
+- 0xe0524000, 0x6e1d0000,
+- 0xe0524100, 0x6e1d0100,
+- 0xe0524200, 0x6e1d0200,
+- 0xe0524300, 0x6e1d0300,
++ 0x80f8c078, 0xb8ef1605,
++ 0x806f816f, 0x8e6f846f,
++ 0x8e76826f, 0xbef600ff,
++ 0x01000000, 0xbefc006f,
++ 0xc031003a, 0x00000078,
++ 0x80f8c078, 0xbf8cc07f,
++ 0x80fc907c, 0xbf800000,
++ 0xbe802d00, 0xbe822d02,
++ 0xbe842d04, 0xbe862d06,
++ 0xbe882d08, 0xbe8a2d0a,
++ 0xbe8c2d0c, 0xbe8e2d0e,
++ 0xbf06807c, 0xbf84fff0,
+ 0xb8f82a05, 0x80788178,
+ 0x8e788a78, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+- 0x80786e78, 0x80f8c078,
+- 0xb8ef1605, 0x806f816f,
+- 0x8e6f846f, 0x8e76826f,
++ 0x80786e78, 0xbef60084,
+ 0xbef600ff, 0x01000000,
+- 0xbefc006f, 0xc031003a,
+- 0x00000078, 0x80f8c078,
+- 0xbf8cc07f, 0x80fc907c,
+- 0xbf800000, 0xbe802d00,
+- 0xbe822d02, 0xbe842d04,
+- 0xbe862d06, 0xbe882d08,
+- 0xbe8a2d0a, 0xbe8c2d0c,
+- 0xbe8e2d0e, 0xbf06807c,
+- 0xbf84fff0, 0xb8f82a05,
+- 0x80788178, 0x8e788a78,
+- 0xb8ee1605, 0x806e816e,
+- 0x8e6e866e, 0x80786e78,
+- 0xbef60084, 0xbef600ff,
+- 0x01000000, 0xc0211bfa,
++ 0xc0211bfa, 0x00000078,
++ 0x80788478, 0xc0211b3a,
+ 0x00000078, 0x80788478,
+- 0xc0211b3a, 0x00000078,
+- 0x80788478, 0xc0211b7a,
++ 0xc0211b7a, 0x00000078,
++ 0x80788478, 0xc0211eba,
+ 0x00000078, 0x80788478,
+- 0xc0211eba, 0x00000078,
+- 0x80788478, 0xc0211efa,
++ 0xc0211efa, 0x00000078,
++ 0x80788478, 0xc0211c3a,
+ 0x00000078, 0x80788478,
+- 0xc0211c3a, 0x00000078,
+- 0x80788478, 0xc0211c7a,
++ 0xc0211c7a, 0x00000078,
++ 0x80788478, 0xc0211a3a,
+ 0x00000078, 0x80788478,
+- 0xc0211a3a, 0x00000078,
+- 0x80788478, 0xc0211a7a,
++ 0xc0211a7a, 0x00000078,
++ 0x80788478, 0xc0211cfa,
+ 0x00000078, 0x80788478,
+- 0xc0211cfa, 0x00000078,
+- 0x80788478, 0xbf8cc07f,
+- 0xbefc006f, 0xbefe007a,
+- 0xbeff007b, 0x866f71ff,
+- 0x000003ff, 0xb96f4803,
+- 0x866f71ff, 0xfffff800,
+- 0x8f6f8b6f, 0xb96fa2c3,
+- 0xb973f801, 0xb8ee2a05,
+- 0x806e816e, 0x8e6e8a6e,
+- 0xb8ef1605, 0x806f816f,
+- 0x8e6f866f, 0x806e6f6e,
+- 0x806e746e, 0x826f8075,
+- 0x866fff6f, 0x0000ffff,
+- 0xc0071cb7, 0x00000040,
+- 0xc00b1d37, 0x00000048,
+- 0xc0031e77, 0x00000058,
+- 0xc0071eb7, 0x0000005c,
+- 0xbf8cc07f, 0x866fff6d,
+- 0xf0000000, 0x8f6f9c6f,
+- 0x8e6f906f, 0xbeee0080,
+- 0x876e6f6e, 0x866fff6d,
+- 0x08000000, 0x8f6f9b6f,
+- 0x8e6f8f6f, 0x876e6f6e,
+- 0x866fff70, 0x00800000,
+- 0x8f6f976f, 0xb96ef807,
+- 0x866dff6d, 0x0000ffff,
+- 0x86fe7e7e, 0x86ea6a6a,
+- 0xb970f802, 0xbf8a0000,
++ 0xbf8cc07f, 0xbefc006f,
++ 0xbefe007a, 0xbeff007b,
++ 0x866f71ff, 0x000003ff,
++ 0xb96f4803, 0x866f71ff,
++ 0xfffff800, 0x8f6f8b6f,
++ 0xb96fa2c3, 0xb973f801,
++ 0xb8ee2a05, 0x806e816e,
++ 0x8e6e8a6e, 0xb8ef1605,
++ 0x806f816f, 0x8e6f866f,
++ 0x806e6f6e, 0x806e746e,
++ 0x826f8075, 0x866fff6f,
++ 0x0000ffff, 0xc0071cb7,
++ 0x00000040, 0xc00b1d37,
++ 0x00000048, 0xc0031e77,
++ 0x00000058, 0xc0071eb7,
++ 0x0000005c, 0xbf8cc07f,
++ 0x866fff6d, 0xf0000000,
++ 0x8f6f9c6f, 0x8e6f906f,
++ 0xbeee0080, 0x876e6f6e,
++ 0x866fff6d, 0x08000000,
++ 0x8f6f9b6f, 0x8e6f8f6f,
++ 0x876e6f6e, 0x866fff70,
++ 0x00800000, 0x8f6f976f,
++ 0xb96ef807, 0x866dff6d,
++ 0x0000ffff, 0x86fe7e7e,
++ 0x86ea6a6a, 0x8f6e8370,
++ 0xb96ee0c2, 0xbf800002,
++ 0xb9700002, 0xbf8a0000,
+ 0x95806f6c, 0xbf810000,
+ };
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+index a2a04bb..abe1a5d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+@@ -103,6 +103,10 @@ var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23
+ var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
+ var SQ_WAVE_STATUS_SPI_PRIO_SHIFT = 1
+ var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
++var SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT = 0
++var SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE = 1
++var SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT = 3
++var SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE = 29
+
+ var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
+ var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
+@@ -251,7 +255,7 @@ if (!EMU_RUN_HACK)
+ s_waitcnt lgkmcnt(0)
+ s_or_b32 ttmp7, ttmp8, ttmp9
+ s_cbranch_scc0 L_NO_NEXT_TRAP //next level trap handler not been set
+- s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //restore HW status(SCC)
++ set_status_without_spi_prio(s_save_status, ttmp2) //restore HW status(SCC)
+ s_setpc_b64 [ttmp8,ttmp9] //jump to next level trap handler
+
+ L_NO_NEXT_TRAP:
+@@ -262,7 +266,7 @@ L_NO_NEXT_TRAP:
+ s_addc_u32 ttmp1, ttmp1, 0
+ L_EXCP_CASE:
+ s_and_b32 ttmp1, ttmp1, 0xFFFF
+- s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //restore HW status(SCC)
++ set_status_without_spi_prio(s_save_status, ttmp2) //restore HW status(SCC)
+ s_rfe_b64 [ttmp0, ttmp1]
+ end
+ // ********* End handling of non-CWSR traps *******************
+@@ -1053,7 +1057,7 @@ end
+ s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+- s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
++ set_status_without_spi_prio(s_restore_status, s_restore_tmp) // SCC is included, which is changed by previous salu
+
+ s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
+
+@@ -1134,3 +1138,11 @@ end
+ function get_hwreg_size_bytes
+ return 128 //HWREG size 128 bytes
+ end
++
++function set_status_without_spi_prio(status, tmp)
++ // Do not restore STATUS.SPI_PRIO since scheduler may have raised it.
++ s_lshr_b32 tmp, status, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT
++ s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE), tmp
++ s_nop 0x2 // avoid S_SETREG => S_SETREG hazard
++ s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE), status
++end
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+index 998be96..0bb9c57 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+@@ -103,6 +103,10 @@ var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
+ var SQ_WAVE_STATUS_SPI_PRIO_SHIFT = 1
+ var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
+ var SQ_WAVE_STATUS_HALT_MASK = 0x2000
++var SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT = 0
++var SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE = 1
++var SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT = 3
++var SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE = 29
+
+ var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
+ var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
+@@ -317,7 +321,7 @@ L_EXCP_CASE:
+ // Restore SQ_WAVE_STATUS.
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+- s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status
++ set_status_without_spi_prio(s_save_status, ttmp2)
+
+ s_rfe_b64 [ttmp0, ttmp1]
+ end
+@@ -1120,7 +1124,7 @@ end
+ s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+- s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
++ set_status_without_spi_prio(s_restore_status, s_restore_tmp) // SCC is included, which is changed by previous salu
+
+ s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
+
+@@ -1212,3 +1216,11 @@ function ack_sqc_store_workaround
+ s_waitcnt lgkmcnt(0)
+ end
+ end
++
++function set_status_without_spi_prio(status, tmp)
++ // Do not restore STATUS.SPI_PRIO since scheduler may have raised it.
++ s_lshr_b32 tmp, status, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT
++ s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE), tmp
++ s_nop 0x2 // avoid S_SETREG => S_SETREG hazard
++ s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE), status
++end
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5678-drm-amdkfd-Add-debugfs-interface-to-trigger-HWS-hang.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5678-drm-amdkfd-Add-debugfs-interface-to-trigger-HWS-hang.patch
new file mode 100644
index 00000000..db026a15
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5678-drm-amdkfd-Add-debugfs-interface-to-trigger-HWS-hang.patch
@@ -0,0 +1,201 @@
+From 39af7ed025c18487e8759afd404582f2b029b59c Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Tue, 8 May 2018 18:30:56 -0400
+Subject: [PATCH 5678/5725] drm/amdkfd: Add debugfs interface to trigger HWS
+ hang
+
+Change-Id: I7c08975b93a734d3075654edecd716db3a8ee7ea
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c | 48 ++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 23 +++++++++++
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 12 ++++++
+ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 26 ++++++++++++
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 4 ++
+ 5 files changed, 113 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+index 4bd6ebf..ab37d36 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+@@ -21,6 +21,8 @@
+ */
+
+ #include <linux/debugfs.h>
++#include <linux/uaccess.h>
++
+ #include "kfd_priv.h"
+
+ static struct dentry *debugfs_root;
+@@ -32,6 +34,38 @@ static int kfd_debugfs_open(struct inode *inode, struct file *file)
+ return single_open(file, show, NULL);
+ }
+
++static ssize_t kfd_debugfs_hang_hws_write(struct file *file,
++ const char __user *user_buf, size_t size, loff_t *ppos)
++{
++ struct kfd_dev *dev;
++ char tmp[16];
++ uint32_t gpu_id;
++ int ret = -EINVAL;
++
++ memset(tmp, 0, 16);
++ if (size >= 16) {
++ pr_err("Invalid input for gpu id.\n");
++ goto out;
++ }
++ if (copy_from_user(tmp, user_buf, size)) {
++ ret = -EFAULT;
++ goto out;
++ }
++ if (kstrtoint(tmp, 10, &gpu_id)) {
++ pr_err("Invalid input for gpu id.\n");
++ goto out;
++ }
++ dev = kfd_device_by_id(gpu_id);
++ if (dev) {
++ kfd_debugfs_hang_hws(dev);
++ ret = size;
++ } else
++ pr_err("Cannot find device %d.\n", gpu_id);
++
++out:
++ return ret;
++}
++
+ static const struct file_operations kfd_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = kfd_debugfs_open,
+@@ -40,6 +74,15 @@ static const struct file_operations kfd_debugfs_fops = {
+ .release = single_release,
+ };
+
++static const struct file_operations kfd_debugfs_hang_hws_fops = {
++ .owner = THIS_MODULE,
++ .open = kfd_debugfs_open,
++ .read = seq_read,
++ .write = kfd_debugfs_hang_hws_write,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
+ void kfd_debugfs_init(void)
+ {
+ struct dentry *ent;
+@@ -65,6 +108,11 @@ void kfd_debugfs_init(void)
+ ent = debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
+ kfd_debugfs_rls_by_device,
+ &kfd_debugfs_fops);
++
++ ent = debugfs_create_file("hang_hws", S_IFREG | 0644, debugfs_root,
++ NULL,
++ &kfd_debugfs_hang_hws_fops);
++
+ if (!ent)
+ pr_warn("Failed to create rls in kfd debugfs\n");
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 9ce20da..f78ab7c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -963,3 +963,26 @@ int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
+ kfree(mem_obj);
+ return 0;
+ }
++
++#if defined(CONFIG_DEBUG_FS)
++
++/* This function will send a package to HIQ to hang the HWS
++ * which will trigger a GPU reset and bring the HWS back to normal state
++ */
++int kfd_debugfs_hang_hws(struct kfd_dev *dev)
++{
++ int r = 0;
++
++ if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
++ pr_err("HWS is not enabled");
++ return -EINVAL;
++ }
++
++ r = pm_debugfs_hang_hws(&dev->dqm->packets);
++ if (!r)
++ r = dqm_debugfs_execute_queues(dev->dqm);
++
++ return r;
++}
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index d7822e2..2c5d330 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -1855,4 +1855,16 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
+ return r;
+ }
+
++int dqm_debugfs_execute_queues(struct device_queue_manager *dqm)
++{
++ int r = 0;
++
++ mutex_lock(&dqm->lock);
++ dqm->active_runlist = true;
++ r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
++ mutex_unlock(&dqm->lock);
++
++ return r;
++}
++
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+index c317feb4..1092631 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+@@ -418,4 +418,30 @@ int pm_debugfs_runlist(struct seq_file *m, void *data)
+ return 0;
+ }
+
++int pm_debugfs_hang_hws(struct packet_manager *pm)
++{
++ uint32_t *buffer, size;
++ int r = 0;
++
++ size = pm->pmf->query_status_size;
++ mutex_lock(&pm->lock);
++ pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
++ size / sizeof(uint32_t), (unsigned int **)&buffer);
++ if (!buffer) {
++ pr_err("Failed to allocate buffer on kernel queue\n");
++ r = -ENOMEM;
++ goto out;
++ }
++ memset(buffer, 0x55, size);
++ pm->priv_queue->ops.submit_packet(pm->priv_queue);
++
++ pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
++ buffer[0], buffer[1], buffer[2], buffer[3],
++ buffer[4], buffer[5], buffer[6]);
++out:
++ mutex_unlock(&pm->lock);
++ return r;
++}
++
++
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 969dac2..ae12ab7 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -1107,6 +1107,10 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data);
+ int kfd_debugfs_rls_by_device(struct seq_file *m, void *data);
+ int pm_debugfs_runlist(struct seq_file *m, void *data);
+
++int kfd_debugfs_hang_hws(struct kfd_dev *dev);
++int pm_debugfs_hang_hws(struct packet_manager *pm);
++int dqm_debugfs_execute_queues(struct device_queue_manager *dqm);
++
+ #else
+
+ static inline void kfd_debugfs_init(void) {}
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5679-drm-amdkfd-Make-the-number-of-SDMA-queues-variable.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5679-drm-amdkfd-Make-the-number-of-SDMA-queues-variable.patch
new file mode 100644
index 00000000..9f5b00c3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5679-drm-amdkfd-Make-the-number-of-SDMA-queues-variable.patch
@@ -0,0 +1,180 @@
+From 1916ada4c970a27f36efa1bbd56e3749ef70280e Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Fri, 9 Feb 2018 16:29:14 -0500
+Subject: [PATCH 5679/5725] drm/amdkfd: Make the number of SDMA queues variable
+
+Vega20 supports 8 SDMA queues per engine
+
+Change-Id: I0df3b0c1a4df253e7b25aa4df3746334d39c8848
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 14 +++++++++++++-
+ drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 6 ++++--
+ drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 1 -
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 1 +
+ 4 files changed, 18 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index f78ab7c..4db30d2 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -54,6 +54,7 @@ static const struct kfd_device_info kaveri_device_info = {
+ .needs_iommu_device = true,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
++ .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info carrizo_device_info = {
+@@ -70,6 +71,7 @@ static const struct kfd_device_info carrizo_device_info = {
+ .needs_iommu_device = true,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
++ .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info raven_device_info = {
+@@ -85,6 +87,7 @@ static const struct kfd_device_info raven_device_info = {
+ .needs_iommu_device = true,
+ .needs_pci_atomics = true,
+ .num_sdma_engines = 1,
++ .num_sdma_queues_per_engine = 2,
+ };
+ #endif
+
+@@ -102,6 +105,7 @@ static const struct kfd_device_info hawaii_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
++ .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info tonga_device_info = {
+@@ -117,6 +121,7 @@ static const struct kfd_device_info tonga_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = true,
+ .num_sdma_engines = 2,
++ .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info tonga_vf_device_info = {
+@@ -132,6 +137,7 @@ static const struct kfd_device_info tonga_vf_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
++ .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info fiji_device_info = {
+@@ -147,6 +153,7 @@ static const struct kfd_device_info fiji_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = true,
+ .num_sdma_engines = 2,
++ .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info fiji_vf_device_info = {
+@@ -162,6 +169,7 @@ static const struct kfd_device_info fiji_vf_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
++ .num_sdma_queues_per_engine = 2,
+ };
+
+
+@@ -178,6 +186,7 @@ static const struct kfd_device_info polaris10_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = true,
+ .num_sdma_engines = 2,
++ .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info polaris10_vf_device_info = {
+@@ -193,6 +202,7 @@ static const struct kfd_device_info polaris10_vf_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
++ .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info polaris11_device_info = {
+@@ -208,6 +218,7 @@ static const struct kfd_device_info polaris11_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
++ .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info vega10_device_info = {
+@@ -223,6 +234,7 @@ static const struct kfd_device_info vega10_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
++ .num_sdma_queues_per_engine = 2,
+ };
+
+ static const struct kfd_device_info vega10_vf_device_info = {
+@@ -238,9 +250,9 @@ static const struct kfd_device_info vega10_vf_device_info = {
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
++ .num_sdma_queues_per_engine = 2,
+ };
+
+-
+ struct kfd_deviceid {
+ unsigned short did;
+ const struct kfd_device_info *device_info;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 2c5d330..91b88d2 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -109,7 +109,7 @@ static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm)
+ unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
+ {
+ return dqm->dev->device_info->num_sdma_engines
+- * KFD_SDMA_QUEUES_PER_ENGINE;
++ * dqm->dev->device_info->num_sdma_queues_per_engine;
+ }
+
+ void program_sh_mem_settings(struct device_queue_manager *dqm,
+@@ -1838,7 +1838,9 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
+ }
+
+ for (pipe = 0; pipe < get_num_sdma_engines(dqm); pipe++) {
+- for (queue = 0; queue < KFD_SDMA_QUEUES_PER_ENGINE; queue++) {
++ for (queue = 0;
++ queue < dqm->dev->device_info->num_sdma_queues_per_engine;
++ queue++) {
+ r = dqm->dev->kfd2kgd->hqd_sdma_dump(
+ dqm->dev->kgd, pipe, queue, &dump, &n_regs);
+ if (r)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+index 82fafd0..ad5c449 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+@@ -31,7 +31,6 @@
+
+ #define KFD_UNMAP_LATENCY_MS (4000)
+ #define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS (2 * KFD_UNMAP_LATENCY_MS + 1000)
+-#define KFD_SDMA_QUEUES_PER_ENGINE (2)
+
+ struct device_process_node {
+ struct qcm_process_device *qpd;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index ae12ab7..62e8def 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -210,6 +210,7 @@ struct kfd_device_info {
+ bool needs_pci_atomics;
+ /* obtain from adev->sdma.num_instances */
+ unsigned int num_sdma_engines;
++ unsigned int num_sdma_queues_per_engine;
+ };
+
+ struct kfd_mem_obj {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5680-drm-amdkfd-Vega20-bring-up-on-amdkfd-side.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5680-drm-amdkfd-Vega20-bring-up-on-amdkfd-side.patch
new file mode 100644
index 00000000..9d81a54b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5680-drm-amdkfd-Vega20-bring-up-on-amdkfd-side.patch
@@ -0,0 +1,148 @@
+From 640741018c6037ba919804ef9ee61688c1755f99 Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Tue, 31 Oct 2017 13:32:53 -0400
+Subject: [PATCH 5680/5725] drm/amdkfd: Vega20 bring up on amdkfd side
+
+Change-Id: I6a2572ad6caf92e3feaf29a7b14fd4f0daa16dbc
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 1 +
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 22 ++++++++++++++++++++++
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 1 +
+ drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | 1 +
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 1 +
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c | 1 +
+ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 1 +
+ drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 1 +
+ 8 files changed, 29 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+index 6688882..c540b65 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+@@ -642,6 +642,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
+ num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
+ break;
+ case CHIP_VEGA10:
++ case CHIP_VEGA20:
+ pcache_info = vega10_cache_info;
+ num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
+ break;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 4db30d2..895f82f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -253,6 +253,22 @@ static const struct kfd_device_info vega10_vf_device_info = {
+ .num_sdma_queues_per_engine = 2,
+ };
+
++static const struct kfd_device_info vega20_device_info = {
++ .asic_family = CHIP_VEGA20,
++ .max_pasid_bits = 16,
++ .max_no_of_hqd = 24,
++ .doorbell_size = 8,
++ .ih_ring_entry_size = 8 * sizeof(uint32_t),
++ .event_interrupt_class = &event_interrupt_class_v9,
++ .num_of_watch_points = 4,
++ .mqd_size_aligned = MQD_SIZE_ALIGNED,
++ .supports_cwsr = true,
++ .needs_iommu_device = false,
++ .needs_pci_atomics = true,
++ .num_sdma_engines = 2,
++ .num_sdma_queues_per_engine = 8,
++};
++
+ struct kfd_deviceid {
+ unsigned short did;
+ const struct kfd_device_info *device_info;
+@@ -341,6 +357,12 @@ static const struct kfd_deviceid supported_devices[] = {
+ { 0x6868, &vega10_device_info }, /* Vega10 */
+ { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/
+ { 0x687F, &vega10_device_info }, /* Vega10 */
++ { 0x66a0, &vega20_device_info }, /* Vega20 */
++ { 0x66a1, &vega20_device_info }, /* Vega20 */
++ { 0x66a2, &vega20_device_info }, /* Vega20 */
++ { 0x66a3, &vega20_device_info }, /* Vega20 */
++ { 0x66a7, &vega20_device_info }, /* Vega20 */
++ { 0x66af, &vega20_device_info } /* Vega20 */
+ };
+
+ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 91b88d2..8ea9a29 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -1727,6 +1727,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
+ break;
+
+ case CHIP_VEGA10:
++ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+ device_queue_manager_init_v9(&dqm->asic_ops);
+ break;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+index f7de732..8f123a2 100755
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+@@ -400,6 +400,7 @@ int kfd_init_apertures(struct kfd_process *process)
+ kfd_init_apertures_vi(pdd, id);
+ break;
+ case CHIP_VEGA10:
++ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+ kfd_init_apertures_v9(pdd, id);
+ break;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+index 51b976d..be038c5 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+@@ -355,6 +355,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
+ break;
+
+ case CHIP_VEGA10:
++ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+ kernel_queue_init_v9(&kq->ops_asic_specific);
+ break;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+index 8279b74..d39e81c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+@@ -81,6 +81,7 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
+ case CHIP_POLARIS11:
+ return mqd_manager_init_vi_tonga(type, dev);
+ case CHIP_VEGA10:
++ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+ return mqd_manager_init_v9(type, dev);
+ default:
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+index 1092631..c6080ed3 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+@@ -229,6 +229,7 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
+ pm->pmf = &kfd_vi_pm_funcs;
+ break;
+ case CHIP_VEGA10:
++ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+ pm->pmf = &kfd_v9_pm_funcs;
+ break;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+index 82cff10..4fe5ebc 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -1308,6 +1308,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
+ HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
+ break;
+ case CHIP_VEGA10:
++ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+ dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
+ HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5681-drm-amdkfd-reflect-atomic-support-in-IO-link-propert.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5681-drm-amdkfd-reflect-atomic-support-in-IO-link-propert.patch
new file mode 100644
index 00000000..27ea953d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5681-drm-amdkfd-reflect-atomic-support-in-IO-link-propert.patch
@@ -0,0 +1,118 @@
+From 604ce8ee842bd925ef1b69312368aaf950873772 Mon Sep 17 00:00:00 2001
+From: Eric Huang <JinHuiEric.Huang@amd.com>
+Date: Mon, 4 Jun 2018 15:22:24 -0400
+Subject: [PATCH 5681/5725] drm/amdkfd: reflect atomic support in IO link
+ properties
+
+Add the flags of properties according to Asic type and pcie
+capabilities.
+
+BUG: KFD-386
+
+Change-Id: I64c670d86c6a3992203948547eb87c5466662dfc
+Signed-off-by: Eric Huang <JinHuiEric.Huang@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 12 ++++++-----
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 ++
+ drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 35 +++++++++++++++++++++++++------
+ 3 files changed, 38 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 895f82f..d9505d2 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -400,6 +400,10 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ dev_err(kfd_device, "kgd2kfd_probe failed\n");
+ return NULL;
+ }
++
++ kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
++ if (!kfd)
++ return NULL;
+
+ /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
+ * 32 and 64-bit requests are possible and must be
+@@ -412,12 +416,10 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ dev_info(kfd_device,
+ "skipped device %x:%x, PCI rejects atomics",
+ pdev->vendor, pdev->device);
++ kfree(kfd);
+ return NULL;
+- }
+-
+- kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
+- if (!kfd)
+- return NULL;
++ } else if (!ret)
++ kfd->pci_atomic_requested = true;
+
+ kfd->kgd = kgd;
+ kfd->device_info = device_info;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 62e8def..be46966 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -294,6 +294,8 @@ struct kfd_dev {
+ bool cwsr_enabled;
+ const void *cwsr_isa;
+ unsigned int cwsr_isa_size;
++
++ bool pci_atomic_requested;
+ };
+
+ struct kfd_ipc_obj;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+index 4fe5ebc..7702156 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -1182,17 +1182,40 @@ static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev)
+
+ static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev)
+ {
+- struct kfd_iolink_properties *link;
++ struct kfd_iolink_properties *link, *cpu_link;
++ struct kfd_topology_device *cpu_dev;
++ uint32_t cap;
++ uint32_t cpu_flag = CRAT_IOLINK_FLAGS_ENABLED;
++ uint32_t flag = CRAT_IOLINK_FLAGS_ENABLED;
+
+ if (!dev || !dev->gpu)
+ return;
+
+- /* GPU only creates direck links so apply flags setting to all */
+- if (dev->gpu->device_info->asic_family == CHIP_HAWAII)
+- list_for_each_entry(link, &dev->io_link_props, list)
+- link->flags = CRAT_IOLINK_FLAGS_ENABLED |
+- CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
++ pcie_capability_read_dword(dev->gpu->pdev,
++ PCI_EXP_DEVCAP2, &cap);
++
++ if (!(cap & (PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
++ PCI_EXP_DEVCAP2_ATOMIC_COMP64)))
++ cpu_flag |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
++ CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
++
++ if (!dev->gpu->pci_atomic_requested ||
++ dev->gpu->device_info->asic_family == CHIP_HAWAII)
++ flag |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
+ CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
++
++ /* GPU only creates direct links so apply flags setting to all */
++ list_for_each_entry(link, &dev->io_link_props, list) {
++ link->flags = flag;
++ cpu_dev = kfd_topology_device_by_proximity_domain(
++ link->node_to);
++ if (cpu_dev) {
++ list_for_each_entry(cpu_link,
++ &cpu_dev->io_link_props, list)
++ if (cpu_link->node_to == link->node_from)
++ cpu_link->flags = cpu_flag;
++ }
++ }
+ }
+
+ int kfd_topology_add_device(struct kfd_dev *gpu)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5682-drm-amdkfd-Add-check-user-queue-busy-interface.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5682-drm-amdkfd-Add-check-user-queue-busy-interface.patch
new file mode 100644
index 00000000..20492f5d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5682-drm-amdkfd-Add-check-user-queue-busy-interface.patch
@@ -0,0 +1,246 @@
+From aea46ffaaafb1fa4bb41a3626217bbf165f4f0e7 Mon Sep 17 00:00:00 2001
+From: Philip Yang <Philip.Yang@amd.com>
+Date: Mon, 28 May 2018 16:22:24 -0400
+Subject: [PATCH 5682/5725] drm/amdkfd: Add check user queue busy interface
+
+Process is idle if both conditions are meet:
+ queue's rptr equals to wptr
+ control stack is empty, cntl_stack_offset = cntl_stack_size
+
+Change-Id: I316341eeea8ada302d216d1df36d2d8a6951c573
+Signed-off-by: Philip Yang <Philip.Yang@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h | 2 ++
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 28 +++++++++++++++
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 46 ++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 42 ++++++++++++++++++++++
+ 4 files changed, 118 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+index dcaeda8..336ea9c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+@@ -94,6 +94,8 @@ struct mqd_manager {
+ u32 *ctl_stack_used_size,
+ u32 *save_area_used_size);
+
++ bool (*check_queue_active)(struct queue *q);
++
+ #if defined(CONFIG_DEBUG_FS)
+ int (*debugfs_show_mqd)(struct seq_file *m, void *data);
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+index bd44a23..2441834 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+@@ -42,6 +42,31 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
+ return (struct cik_sdma_rlc_registers *)mqd;
+ }
+
++static bool check_sdma_queue_active(struct queue *q)
++{
++ uint32_t rptr, wptr;
++ struct cik_sdma_rlc_registers *m = get_sdma_mqd(q->mqd);
++
++ rptr = m->sdma_rlc_rb_rptr;
++ wptr = m->sdma_rlc_rb_wptr;
++ pr_debug("rptr=%d, wptr=%d\n", rptr, wptr);
++
++ return (rptr != wptr);
++}
++
++static bool check_queue_active(struct queue *q)
++{
++ uint32_t rptr, wptr;
++ struct cik_mqd *m = get_mqd(q->mqd);
++
++ rptr = m->cp_hqd_pq_rptr;
++ wptr = m->cp_hqd_pq_wptr;
++
++ pr_debug("rptr=%d, wptr=%d\n", rptr, wptr);
++
++ return (rptr != wptr);
++}
++
+ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+ {
+@@ -491,6 +516,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
+ mqd->update_mqd = update_mqd;
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
++ mqd->check_queue_active = check_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+ #endif
+@@ -502,6 +528,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
+ mqd->update_mqd = update_mqd_hiq;
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
++ mqd->check_queue_active = check_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+ #endif
+@@ -513,6 +540,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
+ mqd->update_mqd = update_mqd_sdma;
+ mqd->destroy_mqd = destroy_mqd_sdma;
+ mqd->is_occupied = is_occupied_sdma;
++ mqd->check_queue_active = check_sdma_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+index 58ea1fe..dcd24c4 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+@@ -41,6 +41,49 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
+ return (struct v9_sdma_mqd *)mqd;
+ }
+
++static bool check_sdma_queue_active(struct queue *q)
++{
++ uint32_t rptr, wptr;
++ uint32_t rptr_hi, wptr_hi;
++ struct v9_sdma_mqd *m = get_sdma_mqd(q->mqd);
++
++ rptr = m->sdmax_rlcx_rb_rptr;
++ wptr = m->sdmax_rlcx_rb_wptr;
++ rptr_hi = m->sdmax_rlcx_rb_rptr_hi;
++ wptr_hi = m->sdmax_rlcx_rb_wptr_hi;
++ pr_debug("rptr=%d, wptr=%d\n", rptr, wptr);
++ pr_debug("rptr_hi=%d, wptr_hi=%d\n", rptr_hi, wptr_hi);
++
++ return (rptr != wptr || rptr_hi != wptr_hi);
++}
++
++static bool check_queue_active(struct queue *q)
++{
++ uint32_t rptr, wptr;
++ uint32_t cntl_stack_offset, cntl_stack_size;
++ struct v9_mqd *m = get_mqd(q->mqd);
++
++ rptr = m->cp_hqd_pq_rptr;
++ wptr = m->cp_hqd_pq_wptr_lo % q->properties.queue_size;
++ cntl_stack_offset = m->cp_hqd_cntl_stack_offset;
++ cntl_stack_size = m->cp_hqd_cntl_stack_size;
++
++ pr_debug("rptr=%d, wptr=%d\n", rptr, wptr);
++ pr_debug("m->cp_hqd_cntl_stack_offset=0x%08x\n", cntl_stack_offset);
++ pr_debug("m->cp_hqd_cntl_stack_size=0x%08x\n", cntl_stack_size);
++
++ if ((rptr == 0 && wptr == 0) ||
++ cntl_stack_offset == 0xffffffff ||
++ cntl_stack_size > 0x5000)
++ return false;
++
++ /* Process is idle if both conditions are meet:
++ * queue's rptr equals to wptr
++ * control stack is empty, cntl_stack_offset = cntl_stack_size
++ */
++ return (rptr != wptr || cntl_stack_offset != cntl_stack_size);
++}
++
+ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+ {
+@@ -489,6 +532,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
+ mqd->get_wave_state = get_wave_state;
++ mqd->check_queue_active = check_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+ #endif
+@@ -500,6 +544,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
+ mqd->update_mqd = update_mqd_hiq;
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
++ mqd->check_queue_active = check_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+ #endif
+@@ -511,6 +556,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
+ mqd->update_mqd = update_mqd_sdma;
+ mqd->destroy_mqd = destroy_mqd_sdma;
+ mqd->is_occupied = is_occupied_sdma;
++ mqd->check_queue_active = check_sdma_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+index e3ae2d4..246fe6c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+@@ -44,6 +44,45 @@ static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
+ return (struct vi_sdma_mqd *)mqd;
+ }
+
++static bool check_sdma_queue_active(struct queue *q)
++{
++ uint32_t rptr, wptr;
++ struct vi_sdma_mqd *m = get_sdma_mqd(q->mqd);
++
++ rptr = m->sdmax_rlcx_rb_rptr;
++ wptr = m->sdmax_rlcx_rb_wptr;
++ pr_debug("rptr=%d, wptr=%d\n", rptr, wptr);
++
++ return (rptr != wptr);
++}
++
++static bool check_queue_active(struct queue *q)
++{
++ uint32_t rptr, wptr;
++ uint32_t cntl_stack_offset, cntl_stack_size;
++ struct vi_mqd *m = get_mqd(q->mqd);
++
++ rptr = m->cp_hqd_pq_rptr;
++ wptr = m->cp_hqd_pq_wptr;
++ cntl_stack_offset = m->cp_hqd_cntl_stack_offset;
++ cntl_stack_size = m->cp_hqd_cntl_stack_size;
++
++ pr_debug("rptr=%d, wptr=%d\n", rptr, wptr);
++ pr_debug("m->cp_hqd_cntl_stack_offset=0x%08x\n", cntl_stack_offset);
++ pr_debug("m->cp_hqd_cntl_stack_size=0x%08x\n", cntl_stack_size);
++
++ if ((rptr == 0 && wptr == 0) ||
++ cntl_stack_offset == 0xffffffff ||
++ cntl_stack_size > 0x5000)
++ return false;
++
++ /* Process is idle if both conditions are meet:
++ * queue's rptr equals to wptr
++ * control stack is empty, cntl_stack_offset = cntl_stack_size
++ */
++ return (rptr != wptr || cntl_stack_offset != cntl_stack_size);
++}
++
+ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+ {
+@@ -498,6 +537,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
+ mqd->get_wave_state = get_wave_state;
++ mqd->check_queue_active = check_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+ #endif
+@@ -509,6 +549,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
+ mqd->update_mqd = update_mqd_hiq;
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
++ mqd->check_queue_active = check_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+ #endif
+@@ -520,6 +561,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
+ mqd->update_mqd = update_mqd_sdma;
+ mqd->destroy_mqd = destroy_mqd_sdma;
+ mqd->is_occupied = is_occupied_sdma;
++ mqd->check_queue_active = check_sdma_queue_active;
+ #if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
+ #endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5683-drm-amdkfd-Replace-mqd-with-mqd_mgr-as-the-variable-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5683-drm-amdkfd-Replace-mqd-with-mqd_mgr-as-the-variable-.patch
new file mode 100644
index 00000000..3f69d9b6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5683-drm-amdkfd-Replace-mqd-with-mqd_mgr-as-the-variable-.patch
@@ -0,0 +1,531 @@
+From 027157f741ee75b0ba9122b65a48dab71aba9f9f Mon Sep 17 00:00:00 2001
+From: Yong Zhao <yong.zhao@amd.com>
+Date: Mon, 4 Jun 2018 14:33:13 -0400
+Subject: [PATCH 5683/5725] drm/amdkfd: Replace mqd with mqd_mgr as the
+ variable name for mqd_manager
+
+This will make reading code much easier.
+
+Change-Id: If57ec96c8b22d3e0c6dd0ff04a17dcb8ff3a27c4
+Signed-off-by: Yong Zhao <yong.zhao@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 117 +++++++++++----------
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 17 +--
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h | 2 +-
+ .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 8 +-
+ 5 files changed, 74 insertions(+), 72 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 8ea9a29..3bd8f96cb 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -359,10 +359,10 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+ {
+ int retval;
+- struct mqd_manager *mqd;
++ struct mqd_manager *mqd_mgr;
+
+- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+- if (!mqd)
++ mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
++ if (!mqd_mgr)
+ return -ENOMEM;
+
+ retval = allocate_hqd(dqm, q);
+@@ -373,7 +373,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
+ if (retval)
+ goto out_deallocate_hqd;
+
+- retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
++ retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
+ &q->gart_mqd_addr, &q->properties);
+ if (retval)
+ goto out_deallocate_doorbell;
+@@ -387,15 +387,15 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
+ if (!q->properties.is_active)
+ return 0;
+
+- retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue, &q->properties,
+- q->process->mm);
++ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
++ &q->properties, q->process->mm);
+ if (retval)
+ goto out_uninit_mqd;
+
+ return 0;
+
+ out_uninit_mqd:
+- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
++ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
+ out_deallocate_doorbell:
+ deallocate_doorbell(qpd, q);
+ out_deallocate_hqd:
+@@ -412,11 +412,11 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
+ struct queue *q)
+ {
+ int retval;
+- struct mqd_manager *mqd;
++ struct mqd_manager *mqd_mgr;
+
+- mqd = dqm->ops.get_mqd_manager(dqm,
++ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+- if (!mqd)
++ if (!mqd_mgr)
+ return -ENOMEM;
+
+ if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
+@@ -433,14 +433,14 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
+
+ deallocate_doorbell(qpd, q);
+
+- retval = mqd->destroy_mqd(mqd, q->mqd,
++ retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
+ KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
+ KFD_UNMAP_LATENCY_MS,
+ q->pipe, q->queue);
+ if (retval == -ETIME)
+ qpd->reset_wavefronts = true;
+
+- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
++ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
+
+ list_del(&q->list);
+ if (list_empty(&qpd->queues_list)) {
+@@ -480,7 +480,7 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
+ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ {
+ int retval;
+- struct mqd_manager *mqd;
++ struct mqd_manager *mqd_mgr;
+ struct kfd_process_device *pdd;
+ bool prev_active = false;
+
+@@ -490,9 +490,9 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ retval = -ENODEV;
+ goto out_unlock;
+ }
+- mqd = dqm->ops.get_mqd_manager(dqm,
++ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+- if (!mqd) {
++ if (!mqd_mgr) {
+ retval = -ENOMEM;
+ goto out_unlock;
+ }
+@@ -519,7 +519,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ } else if (prev_active &&
+ (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
+ q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
+- retval = mqd->destroy_mqd(mqd, q->mqd,
++ retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
+ KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
+ KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
+ if (retval) {
+@@ -528,7 +528,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ }
+ }
+
+- retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
++ retval = mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties);
+
+ /*
+ * check active state vs. the previous state and modify
+@@ -546,7 +546,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ else if (q->properties.is_active &&
+ (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
+ q->properties.type == KFD_QUEUE_TYPE_SDMA))
+- retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue,
++ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
+ &q->properties, q->process->mm);
+
+ out_unlock:
+@@ -557,29 +557,29 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ static struct mqd_manager *get_mqd_manager(
+ struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
+ {
+- struct mqd_manager *mqd;
++ struct mqd_manager *mqd_mgr;
+
+ if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
+ return NULL;
+
+ pr_debug("mqd type %d\n", type);
+
+- mqd = dqm->mqds[type];
+- if (!mqd) {
+- mqd = mqd_manager_init(type, dqm->dev);
+- if (!mqd)
++ mqd_mgr = dqm->mqd_mgrs[type];
++ if (!mqd_mgr) {
++ mqd_mgr = mqd_manager_init(type, dqm->dev);
++ if (!mqd_mgr)
+ pr_err("mqd manager is NULL");
+- dqm->mqds[type] = mqd;
++ dqm->mqd_mgrs[type] = mqd_mgr;
+ }
+
+- return mqd;
++ return mqd_mgr;
+ }
+
+ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+ {
+ struct queue *q;
+- struct mqd_manager *mqd;
++ struct mqd_manager *mqd_mgr;
+ struct kfd_process_device *pdd;
+ int retval = 0;
+
+@@ -595,16 +595,16 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ if (!q->properties.is_active)
+ continue;
+- mqd = dqm->ops.get_mqd_manager(dqm,
++ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+- if (!mqd) { /* should not be here */
++ if (!mqd_mgr) { /* should not be here */
+ pr_err("Cannot evict queue, mqd mgr is NULL\n");
+ retval = -ENOMEM;
+ goto out;
+ }
+ q->properties.is_evicted = true;
+ q->properties.is_active = false;
+- retval = mqd->destroy_mqd(mqd, q->mqd,
++ retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
+ KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
+ KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
+ if (retval)
+@@ -654,7 +654,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+ {
+ struct queue *q;
+- struct mqd_manager *mqd;
++ struct mqd_manager *mqd_mgr;
+ struct kfd_process_device *pdd;
+ uint32_t pd_base;
+ int retval = 0;
+@@ -690,16 +690,16 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ if (!q->properties.is_evicted)
+ continue;
+- mqd = dqm->ops.get_mqd_manager(dqm,
++ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+- if (!mqd) { /* should not be here */
++ if (!mqd_mgr) { /* should not be here */
+ pr_err("Cannot restore queue, mqd mgr is NULL\n");
+ retval = -ENOMEM;
+ goto out;
+ }
+ q->properties.is_evicted = false;
+ q->properties.is_active = true;
+- retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
++ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
+ q->queue, &q->properties,
+ q->process->mm);
+ if (retval)
+@@ -879,7 +879,7 @@ static void uninitialize(struct device_queue_manager *dqm)
+
+ kfree(dqm->allocated_queues);
+ for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
+- kfree(dqm->mqds[i]);
++ kfree(dqm->mqd_mgrs[i]);
+ mutex_destroy(&dqm->lock);
+ kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
+ }
+@@ -923,11 +923,11 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd)
+ {
+- struct mqd_manager *mqd;
++ struct mqd_manager *mqd_mgr;
+ int retval;
+
+- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
+- if (!mqd)
++ mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
++ if (!mqd_mgr)
+ return -ENOMEM;
+
+ retval = allocate_sdma_queue(dqm, &q->sdma_id);
+@@ -946,19 +946,20 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
+ pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
+
+ dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
+- retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
++ retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
+ &q->gart_mqd_addr, &q->properties);
+ if (retval)
+ goto out_deallocate_doorbell;
+
+- retval = mqd->load_mqd(mqd, q->mqd, 0, 0, &q->properties, NULL);
++ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, 0, 0, &q->properties,
++ NULL);
+ if (retval)
+ goto out_uninit_mqd;
+
+ return 0;
+
+ out_uninit_mqd:
+- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
++ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
+ out_deallocate_doorbell:
+ deallocate_doorbell(qpd, q);
+ out_deallocate_sdma_queue:
+@@ -1134,7 +1135,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+ struct qcm_process_device *qpd)
+ {
+ int retval;
+- struct mqd_manager *mqd;
++ struct mqd_manager *mqd_mgr;
+
+ retval = 0;
+
+@@ -1161,10 +1162,10 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+ if (retval)
+ goto out_deallocate_sdma_queue;
+
+- mqd = dqm->ops.get_mqd_manager(dqm,
++ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+
+- if (!mqd) {
++ if (!mqd_mgr) {
+ retval = -ENOMEM;
+ goto out_deallocate_doorbell;
+ }
+@@ -1181,7 +1182,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+
+ q->properties.tba_addr = qpd->tba_addr;
+ q->properties.tma_addr = qpd->tma_addr;
+- retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
++ retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
+ &q->gart_mqd_addr, &q->properties);
+ if (retval)
+ goto out_deallocate_doorbell;
+@@ -1337,7 +1338,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
+ struct queue *q)
+ {
+ int retval;
+- struct mqd_manager *mqd;
++ struct mqd_manager *mqd_mgr;
+ bool preempt_all_queues;
+
+ preempt_all_queues = false;
+@@ -1357,9 +1358,9 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
+
+ }
+
+- mqd = dqm->ops.get_mqd_manager(dqm,
++ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+- if (!mqd) {
++ if (!mqd_mgr) {
+ retval = -ENOMEM;
+ goto failed;
+ }
+@@ -1380,7 +1381,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
+ if (retval == -ETIME)
+ qpd->reset_wavefronts = true;
+
+- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
++ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
+
+ /*
+ * Unconditionally decrement this counter, regardless of the queue's
+@@ -1529,7 +1530,7 @@ static int get_wave_state(struct device_queue_manager *dqm,
+ u32 *ctl_stack_used_size,
+ u32 *save_area_used_size)
+ {
+- struct mqd_manager *mqd;
++ struct mqd_manager *mqd_mgr;
+ int r;
+
+ mutex_lock(&dqm->lock);
+@@ -1540,19 +1541,19 @@ static int get_wave_state(struct device_queue_manager *dqm,
+ goto dqm_unlock;
+ }
+
+- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+- if (!mqd) {
++ mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
++ if (!mqd_mgr) {
+ r = -ENOMEM;
+ goto dqm_unlock;
+ }
+
+- if (!mqd->get_wave_state) {
++ if (!mqd_mgr->get_wave_state) {
+ r = -EINVAL;
+ goto dqm_unlock;
+ }
+
+- r = mqd->get_wave_state(mqd, q->mqd, ctl_stack, ctl_stack_used_size,
+- save_area_used_size);
++ r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
++ ctl_stack_used_size, save_area_used_size);
+
+ dqm_unlock:
+ mutex_unlock(&dqm->lock);
+@@ -1565,7 +1566,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
+ int retval;
+ struct queue *q, *next;
+ struct kernel_queue *kq, *kq_next;
+- struct mqd_manager *mqd;
++ struct mqd_manager *mqd_mgr;
+ struct device_process_node *cur, *next_dpn;
+ enum kfd_unmap_queues_filter filter =
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
+@@ -1615,15 +1616,15 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
+
+ /* lastly, free mqd resources */
+ list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
+- mqd = dqm->ops.get_mqd_manager(dqm,
++ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+- if (!mqd) {
++ if (!mqd_mgr) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ list_del(&q->list);
+ qpd->queue_count--;
+- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
++ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
+ }
+
+ out:
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+index ad5c449..1c4ef00 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+@@ -174,7 +174,7 @@ struct device_queue_manager {
+ struct device_queue_manager_ops ops;
+ struct device_queue_manager_asic_ops asic_ops;
+
+- struct mqd_manager *mqds[KFD_MQD_TYPE_MAX];
++ struct mqd_manager *mqd_mgrs[KFD_MQD_TYPE_MAX];
+ struct packet_manager packets;
+ struct kfd_dev *dev;
+ struct mutex lock;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+index be038c5..e78445d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+@@ -59,7 +59,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+ switch (type) {
+ case KFD_QUEUE_TYPE_DIQ:
+ case KFD_QUEUE_TYPE_HIQ:
+- kq->mqd = dev->dqm->ops.get_mqd_manager(dev->dqm,
++ kq->mqd_mgr = dev->dqm->ops.get_mqd_manager(dev->dqm,
+ KFD_MQD_TYPE_HIQ);
+ break;
+ default:
+@@ -67,7 +67,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+ return false;
+ }
+
+- if (!kq->mqd)
++ if (!kq->mqd_mgr)
+ return false;
+
+ prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
+@@ -131,7 +131,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+ kq->queue->device = dev;
+ kq->queue->process = kfd_get_process(current);
+
+- retval = kq->mqd->init_mqd(kq->mqd, &kq->queue->mqd,
++ retval = kq->mqd_mgr->init_mqd(kq->mqd_mgr, &kq->queue->mqd,
+ &kq->queue->mqd_mem_obj,
+ &kq->queue->gart_mqd_addr,
+ &kq->queue->properties);
+@@ -143,9 +143,9 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+ pr_debug("Assigning hiq to hqd\n");
+ kq->queue->pipe = KFD_CIK_HIQ_PIPE;
+ kq->queue->queue = KFD_CIK_HIQ_QUEUE;
+- kq->mqd->load_mqd(kq->mqd, kq->queue->mqd, kq->queue->pipe,
+- kq->queue->queue, &kq->queue->properties,
+- NULL);
++ kq->mqd_mgr->load_mqd(kq->mqd_mgr, kq->queue->mqd,
++ kq->queue->pipe, kq->queue->queue,
++ &kq->queue->properties, NULL);
+ } else {
+ /* allocate fence for DIQ */
+
+@@ -183,7 +183,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+ static void uninitialize(struct kernel_queue *kq)
+ {
+ if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
+- kq->mqd->destroy_mqd(kq->mqd,
++ kq->mqd_mgr->destroy_mqd(kq->mqd_mgr,
+ kq->queue->mqd,
+ KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
+ KFD_UNMAP_LATENCY_MS,
+@@ -192,7 +192,8 @@ static void uninitialize(struct kernel_queue *kq)
+ else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ)
+ kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj);
+
+- kq->mqd->uninit_mqd(kq->mqd, kq->queue->mqd, kq->queue->mqd_mem_obj);
++ kq->mqd_mgr->uninit_mqd(kq->mqd_mgr, kq->queue->mqd,
++ kq->queue->mqd_mem_obj);
+
+ kfd_gtt_sa_free(kq->dev, kq->rptr_mem);
+ kfd_gtt_sa_free(kq->dev, kq->wptr_mem);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
+index 82c94a6..384d7a3 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
+@@ -80,7 +80,7 @@ struct kernel_queue {
+
+ /* data */
+ struct kfd_dev *dev;
+- struct mqd_manager *mqd;
++ struct mqd_manager *mqd_mgr;
+ struct queue *queue;
+ uint64_t pending_wptr64;
+ uint32_t pending_wptr;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index fbaca3b..a3c22a3 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -435,7 +435,7 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
+ struct process_queue_node *pqn;
+ struct queue *q;
+ enum KFD_MQD_TYPE mqd_type;
+- struct mqd_manager *mqd_manager;
++ struct mqd_manager *mqd_mgr;
+ int r = 0;
+
+ list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
+@@ -458,11 +458,11 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
+ q->properties.type, q->device->id);
+ continue;
+ }
+- mqd_manager = q->device->dqm->ops.get_mqd_manager(
++ mqd_mgr = q->device->dqm->ops.get_mqd_manager(
+ q->device->dqm, mqd_type);
+ } else if (pqn->kq) {
+ q = pqn->kq->queue;
+- mqd_manager = pqn->kq->mqd;
++ mqd_mgr = pqn->kq->mqd_mgr;
+ switch (q->properties.type) {
+ case KFD_QUEUE_TYPE_DIQ:
+ seq_printf(m, " DIQ on device %x\n",
+@@ -482,7 +482,7 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
+ continue;
+ }
+
+- r = mqd_manager->debugfs_show_mqd(m, q->mqd);
++ r = mqd_mgr->debugfs_show_mqd(m, q->mqd);
+ if (r != 0)
+ break;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5684-drm-amd-amdgpu-Removing-unwanted-code-from-the-below.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5684-drm-amd-amdgpu-Removing-unwanted-code-from-the-below.patch
new file mode 100644
index 00000000..28532f44
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5684-drm-amd-amdgpu-Removing-unwanted-code-from-the-below.patch
@@ -0,0 +1,32 @@
+From 2e6c51aad5fd03aa9045720b68056ca77f3ab5c7 Mon Sep 17 00:00:00 2001
+From: Kalyan Alle <kalyan.alle@amd.com>
+Date: Wed, 26 Sep 2018 18:27:52 +0530
+Subject: [PATCH 5684/5725] drm/amd/amdgpu: Removing unwanted code from the
+ below files
+
+Removing the unwanted code :
+1. Some of the structure members removed are not used in the code
+2. amdgpu_gem_prime_foreign_bo is not used anywhere in the amdgpu driver code
+3. amdgpu_ttm_bind , not used in the code.
+
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 9ec224c..ba5fe07 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1871,7 +1871,7 @@ static int kfd_create_cma_system_bo(struct kfd_dev *kdev, struct kfd_bo *bo,
+
+ INIT_LIST_HEAD(&cbo->list);
+ if (bo->mem_type == KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
+- bo_size = min(*size, MAX_SYSTEM_BO_SIZE);
++ bo_size = min_t(uint64_t, *size, MAX_SYSTEM_BO_SIZE);
+ else if (bo->cpuva) {
+ ret = kfd_create_sg_table_from_userptr_bo(bo, offset,
+ cma_write, mm, task,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5685-drm-amdkfd-Conditionally-enable-PCIe-atomics.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5685-drm-amdkfd-Conditionally-enable-PCIe-atomics.patch
new file mode 100644
index 00000000..d8f66233
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5685-drm-amdkfd-Conditionally-enable-PCIe-atomics.patch
@@ -0,0 +1,43 @@
+From 0c9943d2bd46c230091568c08411de879426ea4c Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Thu, 4 Jan 2018 17:17:41 -0500
+Subject: [PATCH 5685/5725] drm/amdkfd: Conditionally enable PCIe atomics
+
+This will be needed for most dGPUs.
+
+CC: linux-pci@vger.kernel.org
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
+Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index d9505d2..20a20ed 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -400,6 +400,20 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ dev_err(kfd_device, "kgd2kfd_probe failed\n");
+ return NULL;
+ }
++ if (device_info->needs_pci_atomics) {
++ /* Allow BIF to recode atomics to PCIe 3.0
++ * AtomicOps. 32 and 64-bit requests are possible and
++ * must be supported.
++ */
++ if (pci_enable_atomic_ops_to_root(pdev,
++ PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
++ PCI_EXP_DEVCAP2_ATOMIC_COMP64) < 0) {
++ dev_info(kfd_device,
++ "skipped device %x:%x, PCI rejects atomics",
++ pdev->vendor, pdev->device);
++ return NULL;
++ }
++ }
+
+ kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
+ if (!kfd)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5686-drm-amdkfd-Fix-return-value-0-when-execute_queues_cp.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5686-drm-amdkfd-Fix-return-value-0-when-execute_queues_cp.patch
new file mode 100644
index 00000000..d89d0aea
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5686-drm-amdkfd-Fix-return-value-0-when-execute_queues_cp.patch
@@ -0,0 +1,29 @@
+From b27bb1e0d5421b33cdea50f4e28e6ae1ee1d7f01 Mon Sep 17 00:00:00 2001
+From: Yong Zhao <yong.zhao@amd.com>
+Date: Tue, 2 Jan 2018 13:10:49 -0500
+Subject: [PATCH 5686/5725] drm/amdkfd: Fix return value 0 when
+ execute_queues_cpsch fails
+
+Signed-off-by: Yong Zhao <yong.zhao@amd.com>
+Reviewed-by: Oak Zeng <oak.zeng@amd.com>
+Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 3bd8f96cb..9befdb8 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -1393,7 +1393,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
+
+ mutex_unlock(&dqm->lock);
+
+- return 0;
++ return retval;
+
+ failed:
+ failed_try_destroy_debugged_queue:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5687-drm-amdkfd-don-t-always-call-execute_queues_cpsch.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5687-drm-amdkfd-don-t-always-call-execute_queues_cpsch.patch
new file mode 100644
index 00000000..cd6392ec
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5687-drm-amdkfd-don-t-always-call-execute_queues_cpsch.patch
@@ -0,0 +1,39 @@
+From 793b1e6781101025173666788ca5bf3a013259bd Mon Sep 17 00:00:00 2001
+From: Yong Zhao <yong.zhao@amd.com>
+Date: Tue, 2 Jan 2018 13:10:50 -0500
+Subject: [PATCH 5687/5725] drm/amdkfd: don't always call
+ execute_queues_cpsch()
+
+When destroying an inactive queue, we don't need to call
+execute_queues_cpsch.
+
+Signed-off-by: Yong Zhao <yong.zhao@amd.com>
+Reviewed-by: Oak Zeng <oak.zeng@amd.com>
+Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 9befdb8..d976f1c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -1374,13 +1374,13 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
+
+ list_del(&q->list);
+ qpd->queue_count--;
+- if (q->properties.is_active)
++ if (q->properties.is_active) {
+ dqm->queue_count--;
+ retval = execute_queues_cpsch(dqm,
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+ if (retval == -ETIME)
+ qpd->reset_wavefronts = true;
+-
++ }
+ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
+
+ /*
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5688-drm-amdkfd-kfd_dev_is_large_bar-can-be-static.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5688-drm-amdkfd-kfd_dev_is_large_bar-can-be-static.patch
new file mode 100644
index 00000000..e2f5a9cc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5688-drm-amdkfd-kfd_dev_is_large_bar-can-be-static.patch
@@ -0,0 +1,28 @@
+From 8b1690d4d9861b22a4029f106465736e463b0fb1 Mon Sep 17 00:00:00 2001
+From: kbuild test robot <fengguang.wu@intel.com>
+Date: Wed, 28 Mar 2018 00:55:26 +0800
+Subject: [PATCH 5688/5725] drm/amdkfd: kfd_dev_is_large_bar() can be static
+
+Fixes: 5ec7e02854b3 ("drm/amdkfd: Add ioctls for GPUVM memory management")
+Signed-off-by: Fengguang Wu <fengguang.wu@intel.com>
+Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index ba5fe07..3b38253 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1211,7 +1211,7 @@ static int kfd_ioctl_acquire_vm(struct file *filep, struct kfd_process *p,
+ return ret;
+ }
+
+-bool kfd_dev_is_large_bar(struct kfd_dev *dev)
++static bool kfd_dev_is_large_bar(struct kfd_dev *dev)
+ {
+ struct kfd_local_mem_info mem_info;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5689-drm-amdkfd-fix-build-select-MMU_NOTIFIER.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5689-drm-amdkfd-fix-build-select-MMU_NOTIFIER.patch
new file mode 100644
index 00000000..20a13275
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5689-drm-amdkfd-fix-build-select-MMU_NOTIFIER.patch
@@ -0,0 +1,41 @@
+From 642bf4e1b54e0b6cdc73ea0b33df3f2ce22ec847 Mon Sep 17 00:00:00 2001
+From: Randy Dunlap <rdunlap@infradead.org>
+Date: Fri, 13 Apr 2018 19:49:28 -0700
+Subject: [PATCH 5689/5725] drm/amdkfd: fix build, select MMU_NOTIFIER
+
+When CONFIG_MMU_NOTIFIER is not enabled, struct mmu_notifier has an
+incomplete type definition, which causes build errors.
+
+../drivers/gpu/drm/amd/amdkfd/kfd_priv.h:607:22: error: field 'mmu_notifier' has incomplete type
+../include/linux/kernel.h:979:32: error: dereferencing pointer to incomplete type
+../include/linux/kernel.h:980:18: error: dereferencing pointer to incomplete type
+../drivers/gpu/drm/amd/amdkfd/kfd_process.c:434:2: error: implicit declaration of function 'mmu_notifier_unregister_no_release' [-Werror=implicit-function-declaration]
+../drivers/gpu/drm/amd/amdkfd/kfd_process.c:435:2: error: implicit declaration of function 'mmu_notifier_call_srcu' [-Werror=implicit-function-declaration]
+../drivers/gpu/drm/amd/amdkfd/kfd_process.c:438:21: error: variable 'kfd_process_mmu_notifier_ops' has initializer but incomplete type
+../drivers/gpu/drm/amd/amdkfd/kfd_process.c:439:2: error: unknown field 'release' specified in initializer
+../drivers/gpu/drm/amd/amdkfd/kfd_process.c:439:2: warning: excess elements in struct initializer [enabled by default]
+../drivers/gpu/drm/amd/amdkfd/kfd_process.c:439:2: warning: (near initialization for 'kfd_process_mmu_notifier_ops') [enabled by default]
+../drivers/gpu/drm/amd/amdkfd/kfd_process.c:534:2: error: implicit declaration of function 'mmu_notifier_register' [-Werror=implicit-function-declaration]
+
+Signed-off-by: Randy Dunlap <rdunlap@infradead.org>
+Tested-by: Anders Roxell <anders.roxell@linaro.org>
+Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
+Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
+---
+ drivers/gpu/drm/amd/amdkfd/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
+index 50b8b56..bc8d8f7 100644
+--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
++++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
+@@ -7,5 +7,6 @@ config HSA_AMD
+ depends on (DRM_RADEON || DRM_AMDGPU) && (X86_64 || PPC64 || ARM64)
+ select DRM_AMDGPU_USERPTR
+ imply AMD_IOMMU_V2
++ select MMU_NOTIFIER
+ help
+ Enable this if you want to use HSA features on AMD GPU devices.
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5690-drm-amdkfd-Try-to-enable-atomics-for-all-GPUs.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5690-drm-amdkfd-Try-to-enable-atomics-for-all-GPUs.patch
new file mode 100644
index 00000000..d11b5556
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5690-drm-amdkfd-Try-to-enable-atomics-for-all-GPUs.patch
@@ -0,0 +1,60 @@
+From 447658699284a95a9ce3458415c954206dd3882c Mon Sep 17 00:00:00 2001
+From: welu <Wei.Lu2@amd.com>
+Date: Tue, 10 Apr 2018 17:33:17 -0400
+Subject: [PATCH 5690/5725] drm/amdkfd: Try to enable atomics for all GPUs
+
+Report failure to enable atomics only on GPUs that require them.
+This allows GPUs that don't require atomics to function, but can
+benefit if they are available. This is the case for Vega10, which
+doesn't use atomics for basic functioning of the MEC, AQL and HWS
+microcode. So it can work without atomics. But shader programs can
+still use atomic instructions on systems that support PCIe atomics.
+
+Signed-off-by: welu <Wei.Lu2@amd.com>
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
+Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 20a20ed..fc7eb66 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -400,19 +400,19 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ dev_err(kfd_device, "kgd2kfd_probe failed\n");
+ return NULL;
+ }
+- if (device_info->needs_pci_atomics) {
+- /* Allow BIF to recode atomics to PCIe 3.0
+- * AtomicOps. 32 and 64-bit requests are possible and
+- * must be supported.
+- */
+- if (pci_enable_atomic_ops_to_root(pdev,
+- PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
+- PCI_EXP_DEVCAP2_ATOMIC_COMP64) < 0) {
+- dev_info(kfd_device,
+- "skipped device %x:%x, PCI rejects atomics",
+- pdev->vendor, pdev->device);
+- return NULL;
+- }
++
++ /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
++ * 32 and 64-bit requests are possible and must be
++ * supported.
++ */
++ ret = pci_enable_atomic_ops_to_root(pdev,
++ PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
++ PCI_EXP_DEVCAP2_ATOMIC_COMP64);
++ if (device_info->needs_pci_atomics && ret < 0) {
++ dev_info(kfd_device,
++ "skipped device %x:%x, PCI rejects atomics\n",
++ pdev->vendor, pdev->device);
++ return NULL;
+ }
+
+ kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5691-drm-amdkfd-Remove-queue-node-when-destroy-queue-fail.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5691-drm-amdkfd-Remove-queue-node-when-destroy-queue-fail.patch
new file mode 100644
index 00000000..1a54e5c3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5691-drm-amdkfd-Remove-queue-node-when-destroy-queue-fail.patch
@@ -0,0 +1,48 @@
+From 4ba7777945d81d93eb326dd3fa5702eb8121366e Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Tue, 1 May 2018 17:56:11 -0400
+Subject: [PATCH 5691/5725] drm/amdkfd: Remove queue node when destroy queue
+ failed
+
+HWS may hang in the middle of destroy queue, remove the queue from the
+process queue list so it won't be freed again in the future
+
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index a3c22a3..566e205a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -241,7 +241,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
+ }
+
+ if (retval != 0) {
+- pr_err("DQM create queue failed\n");
++ pr_err("Pasid %d DQM create queue %d failed. ret %d\n",
++ pqm->process->pasid, type, retval);
+ goto err_create_queue;
+ }
+
+@@ -321,8 +322,11 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
+ pqn->q->properties.cu_mask = NULL;
+ retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
+ if (retval) {
+- pr_debug("Destroy queue failed, returned %d\n", retval);
+- goto err_destroy_queue;
++ pr_err("Pasid %d destroy queue %d failed, ret %d\n",
++ pqm->process->pasid,
++ pqn->q->properties.queue_id, retval);
++ if (retval != -ETIME)
++ goto err_destroy_queue;
+ }
+ uninit_queue(pqn->q);
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5692-drm-amdkfd-Remove-vla.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5692-drm-amdkfd-Remove-vla.patch
new file mode 100644
index 00000000..2a512dd9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5692-drm-amdkfd-Remove-vla.patch
@@ -0,0 +1,58 @@
+From f41ca13059858dd5f06c3e0563ca08bf72116276 Mon Sep 17 00:00:00 2001
+From: Laura Abbott <labbott@redhat.com>
+Date: Fri, 13 Apr 2018 14:24:12 -0700
+Subject: [PATCH 5692/5725] drm/amdkfd: Remove vla
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+There's an ongoing effort to remove VLAs[1] from the kernel to eventually
+turn on -Wvla. Switch to a constant value that covers all hardware.
+
+[1] https://lkml.org/lkml/2018/3/7/621
+
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Laura Abbott <labbott@redhat.com>
+Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c | 8 +++++---
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 ++
+ 2 files changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+index cda36c8..c56ac47 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+@@ -139,10 +139,12 @@ static void interrupt_wq(struct work_struct *work)
+ {
+ struct kfd_dev *dev = container_of(work, struct kfd_dev,
+ interrupt_work);
++ uint32_t ih_ring_entry[KFD_MAX_RING_ENTRY_SIZE];
+
+- uint32_t ih_ring_entry[DIV_ROUND_UP(
+- dev->device_info->ih_ring_entry_size,
+- sizeof(uint32_t))];
++ if (dev->device_info->ih_ring_entry_size > sizeof(ih_ring_entry)) {
++ dev_err_once(kfd_chardev(), "Ring entry too small\n");
++ return;
++ }
+
+ while (dequeue_ih_ring_entry(dev, ih_ring_entry))
+ dev->device_info->event_interrupt_class->interrupt_wq(dev,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index be46966..a21bab4 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -41,6 +41,8 @@
+
+ #include "amd_shared.h"
+
++#define KFD_MAX_RING_ENTRY_SIZE 8
++
+ #define KFD_SYSFS_FILE_MODE 0444
+
+ /* GPU ID hash width in bits */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5693-drm-admkfd-use-modern-ktime-accessors.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5693-drm-admkfd-use-modern-ktime-accessors.patch
new file mode 100644
index 00000000..2086c5ff
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5693-drm-admkfd-use-modern-ktime-accessors.patch
@@ -0,0 +1,47 @@
+From c56b97c38e22e14d2ba8a5f21cc2e9fe1b81f941 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Wed, 11 Jul 2018 14:41:00 +0200
+Subject: [PATCH 5693/5725] drm/admkfd use modern ktime accessors
+
+getrawmonotonic64() and get_monotonic_boottime64() are deprecated
+because of the nonstandard naming.
+
+The replacement functions ktime_get_raw_ns() and ktime_get_boot_ns()
+also simplify the callers.
+
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>.
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 3b38253..759d59e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -804,7 +804,6 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
+ {
+ struct kfd_ioctl_get_clock_counters_args *args = data;
+ struct kfd_dev *dev;
+- struct timespec64 time;
+
+ dev = kfd_device_by_id(args->gpu_id);
+ if (dev)
+@@ -816,11 +815,8 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
+ args->gpu_clock_counter = 0;
+
+ /* No access to rdtsc. Using raw monotonic time */
+- getrawmonotonic64(&time);
+- args->cpu_clock_counter = (uint64_t)timespec64_to_ns(&time);
+-
+- get_monotonic_boottime64(&time);
+- args->system_clock_counter = (uint64_t)timespec64_to_ns(&time);
++ args->cpu_clock_counter = ktime_get_raw_ns();
++ args->system_clock_counter = ktime_get_boot_ns();
+
+ /* Since the counter is in nano-seconds we use 1GHz frequency */
+ args->system_clock_freq = 1000000000;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5694-drm-amdkfd-Stop-using-GFP_NOIO-explicitly.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5694-drm-amdkfd-Stop-using-GFP_NOIO-explicitly.patch
new file mode 100644
index 00000000..9ba8033e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5694-drm-amdkfd-Stop-using-GFP_NOIO-explicitly.patch
@@ -0,0 +1,85 @@
+From 5fba7085f47f5cbb0b753d3e7874bc86ec1fc38e Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Wed, 11 Jul 2018 22:32:45 -0400
+Subject: [PATCH 5694/5725] drm/amdkfd: Stop using GFP_NOIO explicitly
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This is no longer needed with the memalloc_nofs_save/restore in
+dqm_lock/unlock.
+
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 4 ++--
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 2 +-
+ 4 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index fc7eb66..5b0738f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -895,7 +895,7 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
+ if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
+ return -ENOMEM;
+
+- *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
++ *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
+ if (!(*mem_obj))
+ return -ENOMEM;
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+index 2441834..6611c7a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+@@ -501,7 +501,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
+ if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
+ return NULL;
+
+- mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
++ mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
+ if (!mqd)
+ return NULL;
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+index dcd24c4..ea1d01d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+@@ -150,7 +150,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
+ * instead of sub-allocation function.
+ */
+ if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
+- *mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
++ *mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
+ if (!*mqd_mem_obj)
+ return -ENOMEM;
+ retval = kfd->kfd2kgd->init_gtt_mem_allocation(kfd->kgd,
+@@ -516,7 +516,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
+ if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
+ return NULL;
+
+- mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
++ mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
+ if (!mqd)
+ return NULL;
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+index 246fe6c..9b1eca7 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+@@ -521,7 +521,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
+ if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
+ return NULL;
+
+- mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
++ mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
+ if (!mqd)
+ return NULL;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5695-drm-amdkfd-fix-zero-reading-of-VMID-and-PASID-for-Ha.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5695-drm-amdkfd-fix-zero-reading-of-VMID-and-PASID-for-Ha.patch
new file mode 100644
index 00000000..d79eee30
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5695-drm-amdkfd-fix-zero-reading-of-VMID-and-PASID-for-Ha.patch
@@ -0,0 +1,49 @@
+From c47bbfb389df61f0f5b4fff8bab7ae261c0d77c3 Mon Sep 17 00:00:00 2001
+From: Lan Xiao <Lan.Xiao@amd.com>
+Date: Wed, 11 Jul 2018 22:32:51 -0400
+Subject: [PATCH 5695/5725] drm/amdkfd: fix zero reading of VMID and PASID for
+ Hawaii
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Upon VM Fault, the VMID and PASID written by HW are zeros in
+Hawaii. Instead of reading from ih_ring_entry, read directly
+from the registers. This workaround fix the soft hang issues
+caused by mishandled VM Fault in Hawaii.
+
+Signed-off-by: Lan Xiao <Lan.Xiao@amd.com>
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 5b0738f..daa35c5 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -716,13 +716,18 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
+ if (!kfd->init_complete)
+ return;
+
++ if (kfd->device_info->ih_ring_entry_size > sizeof(patched_ihre)) {
++ dev_err_once(kfd_device, "Ring entry too small\n");
++ return;
++ }
++
+ spin_lock(&kfd->interrupt_lock);
+
+ if (kfd->interrupts_active
+ && interrupt_is_wanted(kfd, ih_ring_entry,
+ patched_ihre, &is_patched)
+ && enqueue_ih_ring_entry(kfd,
+- is_patched ? patched_ihre : ih_ring_entry))
++ is_patched ? patched_ihre : ih_ring_entry))
+ queue_work(kfd->ih_wq, &kfd->interrupt_work);
+
+ spin_unlock(&kfd->interrupt_lock);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5696-drm-amdkfd-Clean-up-reference-of-radeon.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5696-drm-amdkfd-Clean-up-reference-of-radeon.patch
new file mode 100644
index 00000000..d8a4a2df
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5696-drm-amdkfd-Clean-up-reference-of-radeon.patch
@@ -0,0 +1,124 @@
+From 13a312a8fab28d683de537f751b51f1e817e72da Mon Sep 17 00:00:00 2001
+From: Yong Zhao <yong.zhao@amd.com>
+Date: Wed, 11 Jul 2018 22:33:08 -0400
+Subject: [PATCH 5696/5725] drm/amdkfd: Clean up reference of radeon
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Signed-off-by: Yong Zhao <yong.zhao@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
+---
+ drivers/gpu/drm/amd/amdkfd/cik_int.h | 5 ++---
+ drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c | 1 -
+ drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h | 37 +++++++++++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 +-
+ 4 files changed, 40 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/cik_int.h b/drivers/gpu/drm/amd/amdkfd/cik_int.h
+index a2079a0..76f8677 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cik_int.h
++++ b/drivers/gpu/drm/amd/amdkfd/cik_int.h
+@@ -20,8 +20,8 @@
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-#ifndef HSA_RADEON_CIK_INT_H_INCLUDED
+-#define HSA_RADEON_CIK_INT_H_INCLUDED
++#ifndef CIK_INT_H_INCLUDED
++#define CIK_INT_H_INCLUDED
+
+ #include <linux/types.h>
+
+@@ -34,7 +34,6 @@ struct cik_ih_ring_entry {
+
+ #define CIK_INTSRC_CP_END_OF_PIPE 0xB5
+ #define CIK_INTSRC_CP_BAD_OPCODE 0xB7
+-#define CIK_INTSRC_DEQUEUE_COMPLETE 0xC6
+ #define CIK_INTSRC_SDMA_TRAP 0xE0
+ #define CIK_INTSRC_SQ_INTERRUPT_MSG 0xEF
+ #define CIK_INTSRC_GFX_PAGE_INV_FAULT 0x92
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+index 8d85e28..c411090 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+@@ -38,7 +38,6 @@
+ #include "kfd_dbgmgr.h"
+ #include "kfd_dbgdev.h"
+ #include "kfd_device_queue_manager.h"
+-#include "../../radeon/cik_reg.h"
+
+ static void dbgdev_address_watch_disable_nodiq(struct kfd_dev *dev)
+ {
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h
+index 583aaa9..dde7bfb 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h
+@@ -78,6 +78,9 @@ enum SQ_IND_CMD_NEW {
+
+ };
+
++/* SQ_CMD definitions */
++#define SQ_CMD 0x8DEC
++
+ enum SQ_IND_CMD_CMD {
+ SQ_IND_CMD_CMD_NULL = 0x00000000,
+ SQ_IND_CMD_CMD_HALT = 0x00000001,
+@@ -222,4 +225,38 @@ union ULARGE_INTEGER {
+ void kfd_dbgdev_init(struct kfd_dbgdev *pdbgdev, struct kfd_dev *pdev,
+ enum DBGDEV_TYPE type);
+
++union TCP_WATCH_CNTL_BITS {
++ struct {
++ uint32_t mask:24;
++ uint32_t vmid:4;
++ uint32_t atc:1;
++ uint32_t mode:2;
++ uint32_t valid:1;
++ } bitfields, bits;
++ uint32_t u32All;
++ signed int i32All;
++ float f32All;
++};
++
++enum {
++ ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL,
++ ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF,
++ ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000,
++ /* extend the mask to 26 bits in order to match the low address field */
++ ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6,
++ ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF
++};
++
++enum {
++ MAX_TRAPID = 8, /* 3 bits in the bitfield. */
++ MAX_WATCH_ADDRESSES = 4
++};
++
++enum {
++ ADDRESS_WATCH_REG_ADDR_HI = 0,
++ ADDRESS_WATCH_REG_ADDR_LO,
++ ADDRESS_WATCH_REG_CNTL,
++ ADDRESS_WATCH_REG_MAX
++};
++
+ #endif /* KFD_DBGDEV_H_ */
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index a21bab4..21dfc72 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -75,7 +75,7 @@
+
+ /*
+ * When working with cp scheduler we should assign the HIQ manually or via
+- * the radeon driver to a fixed hqd slot, here are the fixed HIQ hqd slot
++ * the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot
+ * definitions for Kaveri. In Kaveri only the first ME queues participates
+ * in the cp scheduling taking that in mind we set the HIQ slot in the
+ * second ME.
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5697-drm-amdkfd-Optimize-out-some-duplicated-code-in-kfd_.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5697-drm-amdkfd-Optimize-out-some-duplicated-code-in-kfd_.patch
new file mode 100644
index 00000000..0cbc703b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5697-drm-amdkfd-Optimize-out-some-duplicated-code-in-kfd_.patch
@@ -0,0 +1,59 @@
+From 029d9b0aadf9aa3491b2f6f16b894d5f17d5d45b Mon Sep 17 00:00:00 2001
+From: Yong Zhao <yong.zhao@amd.com>
+Date: Fri, 13 Jul 2018 16:17:47 -0400
+Subject: [PATCH 5697/5725] drm/amdkfd: Optimize out some duplicated code in
+ kfd_signal_iommu_event()
+
+memory_exception_data is already initialized for not-present faults.
+It only needs to be overridden for permission faults.
+
+Signed-off-by: Yong Zhao <yong.zhao@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_events.c | 26 +++++++++++---------------
+ 1 file changed, 11 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+index 1dc1584..a4cb7b0 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+@@ -911,22 +911,18 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
+ memory_exception_data.failure.NotPresent = 1;
+ memory_exception_data.failure.NoExecute = 0;
+ memory_exception_data.failure.ReadOnly = 0;
+- if (vma) {
+- if (vma->vm_start > address) {
+- memory_exception_data.failure.NotPresent = 1;
+- memory_exception_data.failure.NoExecute = 0;
++ if (vma && address >= vma->vm_start) {
++ memory_exception_data.failure.NotPresent = 0;
++
++ if (is_write_requested && !(vma->vm_flags & VM_WRITE))
++ memory_exception_data.failure.ReadOnly = 1;
++ else
+ memory_exception_data.failure.ReadOnly = 0;
+- } else {
+- memory_exception_data.failure.NotPresent = 0;
+- if (is_write_requested && !(vma->vm_flags & VM_WRITE))
+- memory_exception_data.failure.ReadOnly = 1;
+- else
+- memory_exception_data.failure.ReadOnly = 0;
+- if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
+- memory_exception_data.failure.NoExecute = 1;
+- else
+- memory_exception_data.failure.NoExecute = 0;
+- }
++
++ if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
++ memory_exception_data.failure.NoExecute = 1;
++ else
++ memory_exception_data.failure.NoExecute = 0;
+ }
+
+ up_read(&mm->mmap_sem);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5698-drm-amdkfd-Add-CU-masking-ioctl-to-KFD.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5698-drm-amdkfd-Add-CU-masking-ioctl-to-KFD.patch
new file mode 100644
index 00000000..8697b2ca
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5698-drm-amdkfd-Add-CU-masking-ioctl-to-KFD.patch
@@ -0,0 +1,162 @@
+From 1b59abe4ac41fc09e657c6d33b0202bb0a8fa186 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Sat, 14 Jul 2018 19:05:59 -0400
+Subject: [PATCH 5698/5725] drm/amdkfd: Add CU-masking ioctl to KFD
+
+CU-masking allows a KFD client to control the set of CUs used by a
+user mode queue for executing compute dispatches. This can be used
+for optimizing the partitioning of the GPU and minimize conflicts
+between concurrent tasks.
+
+Signed-off-by: Flora Cui <flora.cui@amd.com>
+Signed-off-by: Kent Russell <kent.russell@amd.com>
+Signed-off-by: Eric Huang <JinHuiEric.Huang@amd.com>
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Acked-by: Oded Gabbay <oded.gabbay@gmail.com>
+Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 1 -
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c | 41 +++++++++++++++++++++-
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h | 4 +++
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 2 ++
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 2 ++
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 2 ++
+ .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 2 ++
+ 7 files changed, 52 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 759d59e..8d56004 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -2569,7 +2569,6 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE,
+ kfd_ioctl_get_queue_wave_state, 0),
+-
+ };
+
+ #define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+index d39e81c..85b5954 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+@@ -21,7 +21,7 @@
+ *
+ */
+
+-#include "kfd_priv.h"
++#include "kfd_mqd_manager.h"
+
+ /* Mapping queue priority to pipe priority, indexed by queue priority */
+ int pipe_priority_map[] = {
+@@ -91,3 +91,42 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
+
+ return NULL;
+ }
++
++void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
++ const uint32_t *cu_mask, uint32_t cu_mask_count,
++ uint32_t *se_mask)
++{
++ struct kfd_cu_info cu_info;
++ uint32_t cu_per_sh[4] = {0};
++ int i, se, cu = 0;
++
++ mm->dev->kfd2kgd->get_cu_info(mm->dev->kgd, &cu_info);
++
++ if (cu_mask_count > cu_info.cu_active_number)
++ cu_mask_count = cu_info.cu_active_number;
++
++ for (se = 0; se < cu_info.num_shader_engines; se++)
++ for (i = 0; i < 4; i++)
++ cu_per_sh[se] += hweight32(cu_info.cu_bitmap[se][i]);
++
++ /* Symmetrically map cu_mask to all SEs:
++ * cu_mask[0] bit0 -> se_mask[0] bit0;
++ * cu_mask[0] bit1 -> se_mask[1] bit0;
++ * ... (if # SE is 4)
++ * cu_mask[0] bit4 -> se_mask[0] bit1;
++ * ...
++ */
++ se = 0;
++ for (i = 0; i < cu_mask_count; i++) {
++ if (cu_mask[i / 32] & (1 << (i % 32)))
++ se_mask[se] |= 1 << cu;
++
++ do {
++ se++;
++ if (se == cu_info.num_shader_engines) {
++ se = 0;
++ cu++;
++ }
++ } while (cu >= cu_per_sh[se] && cu < 32);
++ }
++}
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+index 336ea9c..5fd379d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+@@ -104,4 +104,8 @@ struct mqd_manager {
+ struct kfd_dev *dev;
+ };
+
++void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
++ const uint32_t *cu_mask, uint32_t cu_mask_count,
++ uint32_t *se_mask);
++
+ #endif /* KFD_MQD_MANAGER_H_ */
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+index 6611c7a..2de16bd 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+@@ -289,6 +289,8 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
+ update_cu_mask(mm, mqd, q);
+ set_priority(m, q);
+
++ update_cu_mask(mm, mqd, q);
++
+ q->is_active = (q->queue_size > 0 &&
+ q->queue_address != 0 &&
+ q->queue_percent > 0 &&
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+index ea1d01d..1fcf0c4 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+@@ -298,6 +298,8 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
+
+ update_cu_mask(mm, mqd, q);
+
++ update_cu_mask(mm, mqd, q);
++
+ q->is_active = (q->queue_size > 0 &&
+ q->queue_address != 0 &&
+ q->queue_percent > 0 &&
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+index 9b1eca7..5b800d2 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+@@ -301,6 +301,8 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
+ update_cu_mask(mm, mqd, q);
+ set_priority(m, q);
+
++ update_cu_mask(mm, mqd, q);
++
+ q->is_active = (q->queue_size > 0 &&
+ q->queue_address != 0 &&
+ q->queue_percent > 0 &&
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index 566e205a..6940db1 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -328,6 +328,8 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
+ if (retval != -ETIME)
+ goto err_destroy_queue;
+ }
++ kfree(pqn->q->properties.cu_mask);
++ pqn->q->properties.cu_mask = NULL;
+ uninit_queue(pqn->q);
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5699-drm-amdkfd-Call-kfd2kgd.set_compute_idle.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5699-drm-amdkfd-Call-kfd2kgd.set_compute_idle.patch
new file mode 100644
index 00000000..aa84f48b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5699-drm-amdkfd-Call-kfd2kgd.set_compute_idle.patch
@@ -0,0 +1,153 @@
+From cde4316001fa528d4c9dd7d5a3d79a2ac696ac16 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Mon, 16 Jul 2018 19:10:37 -0400
+Subject: [PATCH 5699/5725] drm/amdkfd: Call kfd2kgd.set_compute_idle
+
+User mode queue submissions don't go through KFD. Therefore we don't
+know exactly when compute is idle or not idle. We use the existence
+of user mode queues on a device as an approximation.
+
+register_process is called when the first queue of a process is
+created. Conversely unregister_process is called when the last queue
+is destroyed. The first process that is registered takes compute
+out of idle. The last process that is unregisters sets compute back
+to idle.
+
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Reviewed-by: Eric Huang <JinHuiEric.Huang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 8 ++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 3 ++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 3 ++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 3 ++-
+ drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 7 +++++--
+ drivers/gpu/drm/amd/include/kgd_kfd_interface.h | 5 +++++
+ 7 files changed, 25 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index 9ff80a5..f27bcd0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -583,6 +583,14 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
+ return ret;
+ }
+
++void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
++{
++ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
++
++ amdgpu_dpm_switch_power_profile(adev,
++ PP_SMC_POWER_PROFILE_COMPUTE, !idle);
++}
++
+ bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev,
+ u32 vmid)
+ {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+index 23ac8a6..f9d21a6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+@@ -117,6 +117,7 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
+ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
+ uint32_t vmid, uint64_t gpu_addr,
+ uint32_t *ib_cmd, uint32_t ib_len);
++void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle);
+ struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void);
+ struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void);
+ struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+index d2702b0..ef482bc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+@@ -229,7 +229,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
+ .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
+ .copy_mem_to_mem = amdgpu_amdkfd_copy_mem_to_mem,
+ .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
+- .gpu_recover = amdgpu_amdkfd_gpu_reset
++ .gpu_recover = amdgpu_amdkfd_gpu_reset,
++ .set_compute_idle = amdgpu_amdkfd_set_compute_idle
+ };
+
+ struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions()
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+index 69ac7be..e6cfa22 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+@@ -202,7 +202,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
+ .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
+ .copy_mem_to_mem = amdgpu_amdkfd_copy_mem_to_mem,
+ .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
+- .gpu_recover = amdgpu_amdkfd_gpu_reset
++ .gpu_recover = amdgpu_amdkfd_gpu_reset,
++ .set_compute_idle = amdgpu_amdkfd_set_compute_idle
+ };
+
+ struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions()
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+index c47a75d..eee3a3e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+@@ -252,7 +252,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
+ .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
+ .copy_mem_to_mem = amdgpu_amdkfd_copy_mem_to_mem,
+ .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
+- .gpu_recover = amdgpu_amdkfd_gpu_reset
++ .gpu_recover = amdgpu_amdkfd_gpu_reset,
++ .set_compute_idle = amdgpu_amdkfd_set_compute_idle
+ };
+
+ struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions()
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index d976f1c..147dc01 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -782,7 +782,8 @@ static int register_process(struct device_queue_manager *dqm,
+
+ retval = dqm->asic_ops.update_qpd(dqm, qpd);
+
+- dqm->processes_count++;
++ if (dqm->processes_count++ == 0)
++ dqm->dev->kfd2kgd->set_compute_idle(dqm->dev->kgd, false);
+
+ mutex_unlock(&dqm->lock);
+
+@@ -805,7 +806,9 @@ static int unregister_process(struct device_queue_manager *dqm,
+ if (qpd == cur->qpd) {
+ list_del(&cur->list);
+ kfree(cur);
+- dqm->processes_count--;
++ if (--dqm->processes_count == 0)
++ dqm->dev->kfd2kgd->set_compute_idle(
++ dqm->dev->kgd, true);
+ goto out;
+ }
+ }
+diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+index ad6ee1b..88dbade 100755
+--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+@@ -250,6 +250,9 @@ struct tile_config {
+ *
+ * @gpu_recover: let kgd reset gpu after kfd detect CPC hang
+ *
++ * @set_compute_idle: Indicates that compute is idle on a device. This
++ * can be used to change power profiles depending on compute activity.
++ *
+ * This structure contains function pointers to services that the kgd driver
+ * provides to amdkfd driver.
+ *
+@@ -402,6 +405,8 @@ struct kfd2kgd_calls {
+ uint64_t (*get_vram_usage)(struct kgd_dev *kgd);
+
+ void (*gpu_recover)(struct kgd_dev *kgd);
++
++ void (*set_compute_idle)(struct kgd_dev *kgd, bool idle);
+ };
+
+ /**
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5700-Removed-DKMS-installed-KFD-check-for-kernel-version.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5700-Removed-DKMS-installed-KFD-check-for-kernel-version.patch
new file mode 100644
index 00000000..75c9f46e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5700-Removed-DKMS-installed-KFD-check-for-kernel-version.patch
@@ -0,0 +1,34 @@
+From 52a5d19741832052f7549c0d06acc7262f5b9f2f Mon Sep 17 00:00:00 2001
+From: Ravi Kumar <ravi1.kumar@amd.com>
+Date: Sun, 4 Nov 2018 13:37:53 +0530
+Subject: [PATCH 5700/5725] Removed DKMS installed KFD check for kernel
+ version.
+
+Signed-off-by: Ravi Kumar <ravi1.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 6 ------
+ 1 file changed, 6 deletions(-)
+ mode change 100644 => 100755 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+old mode 100644
+new mode 100755
+index f27bcd0..1aff1f6
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -96,12 +96,6 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
+ case CHIP_VEGA10:
+ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)
+- if (adev->asic_type == CHIP_RAVEN) {
+- dev_dbg(adev->dev, "DKMS installed kfd does not support Raven for kernel < 4.16\n");
+- return;
+- }
+-#endif
+ kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
+ break;
+ default:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5701-drm-amdgpu-Merge-amdkfd-into-amdgpu.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5701-drm-amdgpu-Merge-amdkfd-into-amdgpu.patch
new file mode 100644
index 00000000..4da9a675
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5701-drm-amdgpu-Merge-amdkfd-into-amdgpu.patch
@@ -0,0 +1,291 @@
+From b9e93608c6e89441477a797c2c8196d3d7984755 Mon Sep 17 00:00:00 2001
+From: Amber Lin <Amber.Lin@amd.com>
+Date: Wed, 22 Aug 2018 16:48:50 -0400
+Subject: [PATCH 5701/5725] drm/amdgpu: Merge amdkfd into amdgpu
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Since KFD is only supported by single GPU driver, it makes sense to merge
+amdgpu and amdkfd into one module. This patch is the initial step: merge
+Kconfig and Makefile.
+
+v2: also remove kfd from drm Kconfig
+
+Signed-off-by: Amber Lin <Amber.Lin@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/Kconfig | 2 --
+ drivers/gpu/drm/amd/amdgpu/Kconfig | 1 +
+ drivers/gpu/drm/amd/amdgpu/Makefile | 9 +++--
+ drivers/gpu/drm/amd/amdkfd/Kconfig | 2 +-
+ drivers/gpu/drm/amd/amdkfd/Makefile | 57 ++++++++++++++++++-----------
+ drivers/gpu/drm/amd/amdkfd/kfd_module.c | 64 +++++++++++----------------------
+ 6 files changed, 66 insertions(+), 69 deletions(-)
+
+diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
+index 1a4bda3..1745b8a 100644
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -262,8 +262,6 @@ source "drivers/gpu/drm/bridge/Kconfig"
+
+ source "drivers/gpu/drm/sti/Kconfig"
+
+-source "drivers/gpu/drm/amd/amdkfd/Kconfig"
+-
+ source "drivers/gpu/drm/imx/Kconfig"
+
+ source "drivers/gpu/drm/vc4/Kconfig"
+diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
+index 468a19b..d2ea710 100644
+--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
++++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
+@@ -42,3 +42,4 @@ config DRM_AMDGPU_GART_DEBUGFS
+
+ source "drivers/gpu/drm/amd/acp/Kconfig"
+ source "drivers/gpu/drm/amd/display/Kconfig"
++source "drivers/gpu/drm/amd/amdkfd/Kconfig"
+diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
+index 5edff50..e351f2d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/Makefile
++++ b/drivers/gpu/drm/amd/amdgpu/Makefile
+@@ -35,7 +35,8 @@ ccflags-y := -Iinclude/drm -I$(FULL_AMD_PATH)/include/asic_reg \
+ -I$(FULL_AMD_DISPLAY_PATH) \
+ -I$(FULL_AMD_DISPLAY_PATH)/include \
+ -I$(FULL_AMD_DISPLAY_PATH)/dc \
+- -I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm
++ -I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \
++ -I$(FULL_AMD_PATH)/amdkfd
+
+ amdgpu-y := amdgpu_drv.o
+
+@@ -134,13 +135,17 @@ amdgpu-y += \
+ vcn_v1_0.o
+
+ # add amdkfd interfaces
++ifneq ($(CONFIG_HSA_AMD),)
++AMDKFD_PATH := ../amdkfd
++include $(FULL_AMD_PATH)/amdkfd/Makefile
++amdgpu-y += $(AMDKFD_FILES)
+ amdgpu-y += \
+ amdgpu_amdkfd.o \
+ amdgpu_amdkfd_gfx_v7.o \
+ amdgpu_amdkfd_gfx_v8.o \
+ amdgpu_amdkfd_gfx_v9.o \
+ amdgpu_amdkfd_gpuvm.o
+-
++endif
+ # add cgs
+ amdgpu-y += amdgpu_cgs.o
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
+index bc8d8f7..94cbfbc 100644
+--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
++++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
+@@ -3,7 +3,7 @@
+ #
+
+ config HSA_AMD
+- tristate "HSA kernel driver for AMD GPU devices"
++ bool "HSA kernel driver for AMD GPU devices"
+ depends on (DRM_RADEON || DRM_AMDGPU) && (X86_64 || PPC64 || ARM64)
+ select DRM_AMDGPU_USERPTR
+ imply AMD_IOMMU_V2
+diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
+index 4804f9c..911e67d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/Makefile
++++ b/drivers/gpu/drm/amd/amdkfd/Makefile
+@@ -23,28 +23,43 @@
+ # Makefile for Heterogenous System Architecture support for AMD GPU devices
+ #
+
+-FULL_AMD_PATH=$(src)/..
+-ccflags-y := -I$(FULL_AMD_PATH)/include \
+- -I$(FULL_AMD_PATH)/include/asic_reg
+-
+-amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
+- kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \
+- kfd_process.o kfd_queue.o kfd_mqd_manager.o \
+- kfd_mqd_manager_cik.o kfd_mqd_manager_vi.o \
+- kfd_mqd_manager_v9.o \
+- kfd_kernel_queue.o kfd_kernel_queue_cik.o \
+- kfd_kernel_queue_vi.o kfd_kernel_queue_v9.o \
+- kfd_packet_manager.o kfd_process_queue_manager.o \
+- kfd_device_queue_manager.o kfd_device_queue_manager_cik.o \
+- kfd_device_queue_manager_vi.o kfd_device_queue_manager_v9.o \
+- kfd_interrupt.o kfd_events.o cik_event_interrupt.o kfd_int_process_v9.o \
+- kfd_dbgdev.o kfd_dbgmgr.o kfd_flat_memory.o kfd_crat.o kfd_rdma.o \
+- kfd_peerdirect.o kfd_ipc.o
++AMDKFD_FILES := $(AMDKFD_PATH)/kfd_module.o \
++ $(AMDKFD_PATH)/kfd_device.o \
++ $(AMDKFD_PATH)/kfd_chardev.o \
++ $(AMDKFD_PATH)/kfd_topology.o \
++ $(AMDKFD_PATH)/kfd_pasid.o \
++ $(AMDKFD_PATH)/kfd_doorbell.o \
++ $(AMDKFD_PATH)/kfd_flat_memory.o \
++ $(AMDKFD_PATH)/kfd_process.o \
++ $(AMDKFD_PATH)/kfd_queue.o \
++ $(AMDKFD_PATH)/kfd_mqd_manager.o \
++ $(AMDKFD_PATH)/kfd_mqd_manager_cik.o \
++ $(AMDKFD_PATH)/kfd_mqd_manager_vi.o \
++ $(AMDKFD_PATH)/kfd_mqd_manager_v9.o \
++ $(AMDKFD_PATH)/kfd_kernel_queue.o \
++ $(AMDKFD_PATH)/kfd_kernel_queue_cik.o \
++ $(AMDKFD_PATH)/kfd_kernel_queue_vi.o \
++ $(AMDKFD_PATH)/kfd_kernel_queue_v9.o \
++ $(AMDKFD_PATH)/kfd_packet_manager.o \
++ $(AMDKFD_PATH)/kfd_process_queue_manager.o \
++ $(AMDKFD_PATH)/kfd_device_queue_manager.o \
++ $(AMDKFD_PATH)/kfd_device_queue_manager_cik.o \
++ $(AMDKFD_PATH)/kfd_device_queue_manager_vi.o \
++ $(AMDKFD_PATH)/kfd_device_queue_manager_v9.o \
++ $(AMDKFD_PATH)/kfd_interrupt.o \
++ $(AMDKFD_PATH)/kfd_events.o \
++ $(AMDKFD_PATH)/cik_event_interrupt.o \
++ $(AMDKFD_PATH)/kfd_int_process_v9.o \
++ $(AMDKFD_PATH)/kfd_dbgdev.o \
++ $(AMDKFD_PATH)/kfd_dbgmgr.o \
++ $(AMDKFD_PATH)/kfd_flat_memory.o \
++ $(AMDKFD_PATH)/kfd_rdma.o \
++ $(AMDKFD_PATH)/kfd_crat.o
+
+ ifneq ($(CONFIG_AMD_IOMMU_V2),)
+-amdkfd-y += kfd_iommu.o
++AMDKFD_FILES += $(AMDKFD_PATH)/kfd_iommu.o
+ endif
+
+-amdkfd-$(CONFIG_DEBUG_FS) += kfd_debugfs.o
+-
+-obj-$(CONFIG_HSA_AMD) += amdkfd.o
++ifneq ($(CONFIG_DEBUG_FS),)
++AMDKFD_FILES += $(AMDKFD_PATH)/kfd_debugfs.o
++endif
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+index 261657f..8675222 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+@@ -20,15 +20,11 @@
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-#include <linux/module.h>
+ #include <linux/sched.h>
+ #include <linux/moduleparam.h>
+ #include <linux/device.h>
+-#include <linux/printk.h>
+ #include "kfd_priv.h"
+
+-#define KFD_DRIVER_AUTHOR "AMD Inc. and others"
+-
+ #define KFD_DRIVER_DESC "Standalone HSA driver for AMD's GPUs"
+ #define KFD_DRIVER_DATE "20160408"
+ #define KFD_DRIVER_MAJOR 2
+@@ -99,33 +95,7 @@ int halt_if_hws_hang;
+ module_param(halt_if_hws_hang, int, 0644);
+ MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
+
+-
+-static int amdkfd_init_completed;
+-
+-int kgd2kfd_init(unsigned int interface_version,
+- const struct kgd2kfd_calls **g2f)
+-{
+- if (!amdkfd_init_completed)
+- return -EPROBE_DEFER;
+-
+- /*
+- * Only one interface version is supported,
+- * no kfd/kgd version skew allowed.
+- */
+- if (interface_version != KFD_INTERFACE_VERSION)
+- return -EINVAL;
+-
+- *g2f = &kgd2kfd;
+-
+- return 0;
+-}
+-EXPORT_SYMBOL(kgd2kfd_init);
+-
+-void kgd2kfd_exit(void)
+-{
+-}
+-
+-static int __init kfd_module_init(void)
++static int kfd_init(void)
+ {
+ int err;
+
+@@ -133,7 +103,7 @@ static int __init kfd_module_init(void)
+ if ((sched_policy < KFD_SCHED_POLICY_HWS) ||
+ (sched_policy > KFD_SCHED_POLICY_NO_HWS)) {
+ pr_err("sched_policy has invalid value\n");
+- return -1;
++ return -EINVAL;
+ }
+
+ /* Verify module parameters */
+@@ -141,7 +111,7 @@ static int __init kfd_module_init(void)
+ (max_num_of_queues_per_device >
+ KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) {
+ pr_err("max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
+- return -1;
++ return -EINVAL;
+ }
+
+ err = kfd_chardev_init();
+@@ -164,10 +134,6 @@ static int __init kfd_module_init(void)
+
+ kfd_debugfs_init();
+
+- amdkfd_init_completed = 1;
+-
+- dev_info(kfd_device, "Initialized module\n");
+-
+ return 0;
+
+ err_create_wq:
+@@ -179,21 +145,33 @@ static int __init kfd_module_init(void)
+ return err;
+ }
+
+-static void __exit kfd_module_exit(void)
++static void kfd_exit(void)
+ {
+- amdkfd_init_completed = 0;
+-
+ kfd_debugfs_fini();
+ kfd_close_peer_direct();
+ kfd_process_destroy_wq();
+ kfd_topology_shutdown();
+ kfd_chardev_exit();
+- pr_info("amdkfd: Removed module\n");
+ }
+
+-module_init(kfd_module_init);
+-module_exit(kfd_module_exit);
++int kgd2kfd_init(unsigned int interface_version,
++ const struct kgd2kfd_calls **g2f)
++{
++ int err;
+
++ err = kfd_init();
++ if (err)
++ return err;
++ *g2f = &kgd2kfd;
++
++ return 0;
++}
++EXPORT_SYMBOL(kgd2kfd_init);
++
++void kgd2kfd_exit(void)
++{
++ kfd_exit();
++}
+ MODULE_AUTHOR(KFD_DRIVER_AUTHOR);
+ MODULE_DESCRIPTION(KFD_DRIVER_DESC);
+ MODULE_LICENSE("GPL and additional rights");
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5702-drm-amdgpu-Move-KFD-parameters-to-amdgpu-v3.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5702-drm-amdgpu-Move-KFD-parameters-to-amdgpu-v3.patch
new file mode 100644
index 00000000..fcbc846e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5702-drm-amdgpu-Move-KFD-parameters-to-amdgpu-v3.patch
@@ -0,0 +1,348 @@
+From 0c1308516ee7898e840e5fdfd60f0ded405b7268 Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Mon, 7 Jan 2019 16:26:05 +0530
+Subject: [PATCH 5702/5725] drm/amdgpu: Move KFD parameters to amdgpu (v3)
+
+After merging KFD into amdgpu, move module parameters defined in KFD to
+amdgpu_drv.c, where other module parameters are declared.
+
+v2: add kernel-doc comments
+v3: rebase and fix parameter variable name (Alex)
+
+Signed-off-by: Amber Lin <Amber.Lin@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/Makefile | 3 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 26 ++------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 97 ++++++++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdkfd/kfd_module.c | 69 ---------------------
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 5 --
+ 5 files changed, 104 insertions(+), 96 deletions(-)
+ mode change 100644 => 100755 drivers/gpu/drm/amd/amdkfd/kfd_module.c
+ mode change 100644 => 100755 drivers/gpu/drm/amd/amdkfd/kfd_process.c
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
+index e351f2d..467125d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/Makefile
++++ b/drivers/gpu/drm/amd/amdgpu/Makefile
+@@ -135,7 +135,7 @@ amdgpu-y += \
+ vcn_v1_0.o
+
+ # add amdkfd interfaces
+-ifneq ($(CONFIG_HSA_AMD),)
++# ifneq ($(CONFIG_HSA_AMD),)
+ AMDKFD_PATH := ../amdkfd
+ include $(FULL_AMD_PATH)/amdkfd/Makefile
+ amdgpu-y += $(AMDKFD_FILES)
+@@ -145,7 +145,6 @@ amdgpu-y += \
+ amdgpu_amdkfd_gfx_v8.o \
+ amdgpu_amdkfd_gfx_v9.o \
+ amdgpu_amdkfd_gpuvm.o
+-endif
+ # add cgs
+ amdgpu-y += amdgpu_cgs.o
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index 1aff1f6..68f3891 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -38,29 +38,15 @@ int amdgpu_amdkfd_init(void)
+ {
+ int ret;
+
+-#if defined(CONFIG_HSA_AMD_MODULE)
+- int (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**);
+-
+- kgd2kfd_init_p = symbol_request(kgd2kfd_init);
+-
+- if (kgd2kfd_init_p == NULL)
+- return -ENOENT;
+-
+- ret = kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd);
+- if (ret) {
+- symbol_put(kgd2kfd_init);
+- kgd2kfd = NULL;
+- }
+-
+-#elif defined(CONFIG_HSA_AMD)
+- ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
+- if (ret)
+- kgd2kfd = NULL;
+-
++#ifdef CONFIG_HSA_AMD
++ ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
++ if (ret)
++ kgd2kfd = NULL;
++ amdgpu_amdkfd_gpuvm_init_mem_limits();
+ #else
++ kgd2kfd = NULL;
+ ret = -ENOENT;
+ #endif
+- amdgpu_amdkfd_gpuvm_init_mem_limits();
+ return ret;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 5f59a07..7753af9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -39,6 +39,7 @@
+ #include "amdgpu_gem.h"
+
+ #include "amdgpu_amdkfd.h"
++#include "kfd_priv.h"
+
+ /*
+ * KMS wrapper.
+@@ -137,6 +138,17 @@ struct amdgpu_mgpu_info mgpu_info = {
+ .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
+ };
+
++/* KFD parameters */
++int sched_policy = KFD_SCHED_POLICY_HWS;
++int hws_max_conc_proc = 8;
++int cwsr_enable = 1;
++int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
++int send_sigterm;
++int debug_largebar;
++int ignore_crat;
++int noretry;
++int halt_if_hws_hang;
++
+ /**
+ * DOC: vramlimit (int)
+ * Restrict the total amount of VRAM in MiB for testing. The default is 0 (Use full VRAM).
+@@ -551,6 +563,91 @@ MODULE_PARM_DESC(smu_memory_pool_size,
+ "0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte");
+ module_param_named(smu_memory_pool_size, amdgpu_smu_memory_pool_size, uint, 0444);
+
++/**
++ * DOC: sched_policy (int)
++ * Set scheduling policy. Default is HWS(hardware scheduling) with over-subscription.
++ * Setting 1 disables over-subscription. Setting 2 disables HWS and statically
++ * assigns queues to HQDs.
++ */
++module_param(sched_policy, int, 0444);
++MODULE_PARM_DESC(sched_policy,
++ "Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)");
++
++/**
++ * DOC: hws_max_conc_proc (int)
++ * Maximum number of processes that HWS can schedule concurrently. The maximum is the
++ * number of VMIDs assigned to the HWS, which is also the default.
++ */
++module_param(hws_max_conc_proc, int, 0444);
++MODULE_PARM_DESC(hws_max_conc_proc,
++ "Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))");
++
++/**
++ * DOC: cwsr_enable (int)
++ * CWSR(compute wave store and resume) allows the GPU to preempt shader execution in
++ * the middle of a compute wave. Default is 1 to enable this feature. Setting 0
++ * disables it.
++ */
++module_param(cwsr_enable, int, 0444);
++MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = Off, 1 = On (Default))");
++
++/**
++ * DOC: max_num_of_queues_per_device (int)
++ * Maximum number of queues per device. Valid setting is between 1 and 4096. Default
++ * is 4096.
++ */
++module_param(max_num_of_queues_per_device, int, 0444);
++MODULE_PARM_DESC(max_num_of_queues_per_device,
++ "Maximum number of supported queues per device (1 = Minimum, 4096 = default)");
++
++/**
++ * DOC: send_sigterm (int)
++ * Send sigterm to HSA process on unhandled exceptions. Default is not to send sigterm
++ * but just print errors on dmesg. Setting 1 enables sending sigterm.
++ */
++module_param(send_sigterm, int, 0444);
++MODULE_PARM_DESC(send_sigterm,
++ "Send sigterm to HSA process on unhandled exception (0 = disable, 1 = enable)");
++
++/**
++ * DOC: debug_largebar (int)
++ * Set debug_largebar as 1 to enable simulating large-bar capability on non-large bar
++ * system. This limits the VRAM size reported to ROCm applications to the visible
++ * size, usually 256MB.
++ * Default value is 0, diabled.
++ */
++module_param(debug_largebar, int, 0444);
++MODULE_PARM_DESC(debug_largebar,
++ "Debug large-bar flag used to simulate large-bar capability on non-large bar machine (0 = disable, 1 = enable)");
++
++/**
++ * DOC: ignore_crat (int)
++ * Ignore CRAT table during KFD initialization. By default, KFD uses the ACPI CRAT
++ * table to get information about AMD APUs. This option can serve as a workaround on
++ * systems with a broken CRAT table.
++ */
++module_param(ignore_crat, int, 0444);
++MODULE_PARM_DESC(ignore_crat,
++ "Ignore CRAT table during KFD initialization (0 = use CRAT (default), 1 = ignore CRAT)");
++
++/**
++ * DOC: noretry (int)
++ * This parameter sets sh_mem_config.retry_disable. Default value, 0, enables retry.
++ * Setting 1 disables retry.
++ * Retry is needed for recoverable page faults.
++ */
++module_param(noretry, int, 0644);
++MODULE_PARM_DESC(noretry,
++ "Set sh_mem_config.retry_disable on Vega10 (0 = retry enabled (default), 1 = retry disabled)");
++
++/**
++ * DOC: halt_if_hws_hang (int)
++ * Halt if HWS hang is detected. Default value, 0, disables the halt on hang.
++ * Setting 1 enables halt on hang.
++ */
++module_param(halt_if_hws_hang, int, 0644);
++MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
++
+ static const struct pci_device_id pciidlist[] = {
+ #ifdef CONFIG_DRM_AMDGPU_SI
+ {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+old mode 100644
+new mode 100755
+index 8675222..43cecf2
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+@@ -21,16 +21,9 @@
+ */
+
+ #include <linux/sched.h>
+-#include <linux/moduleparam.h>
+ #include <linux/device.h>
+ #include "kfd_priv.h"
+
+-#define KFD_DRIVER_DESC "Standalone HSA driver for AMD's GPUs"
+-#define KFD_DRIVER_DATE "20160408"
+-#define KFD_DRIVER_MAJOR 2
+-#define KFD_DRIVER_MINOR 0
+-#define KFD_DRIVER_PATCHLEVEL 0
+-
+ static const struct kgd2kfd_calls kgd2kfd = {
+ .exit = kgd2kfd_exit,
+ .probe = kgd2kfd_probe,
+@@ -47,54 +40,6 @@ static const struct kgd2kfd_calls kgd2kfd = {
+ .post_reset = kgd2kfd_post_reset,
+ };
+
+-int sched_policy = KFD_SCHED_POLICY_HWS;
+-module_param(sched_policy, int, 0444);
+-MODULE_PARM_DESC(sched_policy,
+- "Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)");
+-
+-int hws_max_conc_proc = 8;
+-module_param(hws_max_conc_proc, int, 0444);
+-MODULE_PARM_DESC(hws_max_conc_proc,
+- "Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))");
+-
+-int cwsr_enable = 1;
+-module_param(cwsr_enable, int, 0444);
+-MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = off, 1 = on (default))");
+-
+-int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
+-module_param(max_num_of_queues_per_device, int, 0444);
+-MODULE_PARM_DESC(max_num_of_queues_per_device,
+- "Maximum number of supported queues per device (1 = Minimum, 4096 = default)");
+-
+-int send_sigterm;
+-module_param(send_sigterm, int, 0444);
+-MODULE_PARM_DESC(send_sigterm,
+- "Send sigterm to HSA process on unhandled exception (0 = disable, 1 = enable)");
+-
+-int debug_largebar;
+-module_param(debug_largebar, int, 0444);
+-MODULE_PARM_DESC(debug_largebar,
+- "Debug large-bar flag used to simulate large-bar capability on non-large bar machine (0 = disable, 1 = enable)");
+-
+-int ignore_crat;
+-module_param(ignore_crat, int, 0444);
+-MODULE_PARM_DESC(ignore_crat,
+- "Ignore CRAT table during KFD initialization (0 = use CRAT (default), 1 = ignore CRAT)");
+-
+-int noretry = 1;
+-module_param(noretry, int, 0644);
+-MODULE_PARM_DESC(noretry,
+- "Set sh_mem_config.retry_disable on GFXv9+ dGPUs (0 = retry enabled, 1 = retry disabled (default))");
+-
+-int priv_cp_queues;
+-module_param(priv_cp_queues, int, 0644);
+-MODULE_PARM_DESC(priv_cp_queues,
+- "Enable privileged mode for CP queues (0 = off (default), 1 = on)");
+-
+-int halt_if_hws_hang;
+-module_param(halt_if_hws_hang, int, 0644);
+-MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
+-
+ static int kfd_init(void)
+ {
+ int err;
+@@ -122,22 +67,15 @@ static int kfd_init(void)
+ if (err < 0)
+ goto err_topology;
+
+- err = kfd_ipc_init();
+- if (err < 0)
+- goto err_ipc;
+-
+ err = kfd_process_create_wq();
+ if (err < 0)
+ goto err_create_wq;
+
+- kfd_init_peer_direct();
+-
+ kfd_debugfs_init();
+
+ return 0;
+
+ err_create_wq:
+-err_ipc:
+ kfd_topology_shutdown();
+ err_topology:
+ kfd_chardev_exit();
+@@ -148,7 +86,6 @@ static int kfd_init(void)
+ static void kfd_exit(void)
+ {
+ kfd_debugfs_fini();
+- kfd_close_peer_direct();
+ kfd_process_destroy_wq();
+ kfd_topology_shutdown();
+ kfd_chardev_exit();
+@@ -172,9 +109,3 @@ void kgd2kfd_exit(void)
+ {
+ kfd_exit();
+ }
+-MODULE_AUTHOR(KFD_DRIVER_AUTHOR);
+-MODULE_DESCRIPTION(KFD_DRIVER_DESC);
+-MODULE_LICENSE("GPL and additional rights");
+-MODULE_VERSION(__stringify(KFD_DRIVER_MAJOR) "."
+- __stringify(KFD_DRIVER_MINOR) "."
+- __stringify(KFD_DRIVER_PATCHLEVEL));
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+old mode 100644
+new mode 100755
+index e79479b..73a1adc
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -600,11 +600,6 @@ static struct kfd_process *create_process(const struct task_struct *thread,
+ if (err)
+ goto err_init_cwsr;
+
+- /* If PeerDirect interface was not detected try to detect it again
+- * in case if network driver was loaded later.
+- */
+- kfd_init_peer_direct();
+-
+ return process;
+
+ err_init_cwsr:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5703-kbuild-create-built-in.o-automatically-if-parent-dir.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5703-kbuild-create-built-in.o-automatically-if-parent-dir.patch
new file mode 100644
index 00000000..5e3b33e2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5703-kbuild-create-built-in.o-automatically-if-parent-dir.patch
@@ -0,0 +1,91 @@
+From 17664764f28baa4968970151f11e358b1a5be05a Mon Sep 17 00:00:00 2001
+From: Masahiro Yamada <yamada.masahiro@socionext.com>
+Date: Wed, 8 Nov 2017 01:31:46 +0900
+Subject: [PATCH 5703/5725] kbuild: create built-in.o automatically if parent
+ directory wants it
+
+"obj-y += foo/" syntax requires Kbuild to visit the "foo" subdirectory
+and link built-in.o from that directory. This means foo/Makefile is
+responsible for creating built-in.o even if there is no object to
+link (in this case, built-in.o is an empty archive).
+
+We have had several fixups like commit 4b024242e8a4 ("kbuild: Fix
+linking error built-in.o no such file or directory"), then ended up
+with a complex condition as follows:
+
+ ifneq ($(strip $(obj-y) $(obj-m) $(obj-) $(subdir-m) $(lib-target)),)
+ builtin-target := $(obj)/built-in.o
+ endif
+
+We still have more cases not covered by the above, so we need to add
+ obj- := dummy.o
+in several places just for creating empty built-in.o.
+
+A key point is, the parent Makefile knows whether built-in.o is needed
+or not. If a subdirectory needs to create built-in.o, its parent can
+tell the fact when descending.
+
+If non-empty $(need-builtin) flag is passed from the parent, built-in.o
+should be created. $(obj-y) should be still checked to support the
+single target "%/". All of ugly tricks will go away.
+
+Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
+Reviewed-by: Sam Ravnborg <sam@ravnborg.org>
+---
+ Makefile | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 +++
+ scripts/Makefile.build | 4 ++--
+ 3 files changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/Makefile b/Makefile
+index e91a096..2dc724e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1036,7 +1036,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
+
+ PHONY += $(vmlinux-dirs)
+ $(vmlinux-dirs): prepare scripts
+- $(Q)$(MAKE) $(build)=$@
++ $(Q)$(MAKE) $(build)=$@ need-builtin=1
+
+ define filechk_kernel.release
+ echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))"
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 6260307..bf7fa00 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1394,6 +1394,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
++
++ printk(">>Ravi: fw_name: %s adev->dev: %d adev->firmware.gpuinfo_fw: %d\n", fw_name, adev->dev, amdgpu_device_parse_gpu_info_fw);
++
+ err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
+ if (err) {
+ dev_err(adev->dev,
+diff --git a/scripts/Makefile.build b/scripts/Makefile.build
+index 7143da0..08de8ba 100644
+--- a/scripts/Makefile.build
++++ b/scripts/Makefile.build
+@@ -85,7 +85,7 @@ lib-target := $(obj)/lib.a
+ obj-y += $(obj)/lib-ksyms.o
+ endif
+
+-ifneq ($(strip $(obj-y) $(obj-m) $(obj-) $(subdir-m) $(lib-target)),)
++ifneq ($(strip $(obj-y) $(need-builtin)),)
+ builtin-target := $(obj)/built-in.o
+ endif
+
+@@ -584,7 +584,7 @@ targets += $(multi-used-y) $(multi-used-m)
+
+ PHONY += $(subdir-ym)
+ $(subdir-ym):
+- $(Q)$(MAKE) $(build)=$@
++ $(Q)$(MAKE) $(build)=$@ need-builtin=$(if $(findstring $@,$(subdir-obj-y)),1)
+
+ # Add FORCE to the prequisites of a target to force it to be always rebuilt.
+ # ---------------------------------------------------------------------------
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5704-kbuild-remove-incremental-linking-option.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5704-kbuild-remove-incremental-linking-option.patch
new file mode 100644
index 00000000..51cc7500
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5704-kbuild-remove-incremental-linking-option.patch
@@ -0,0 +1,233 @@
+From 70031d535f39c6f112d1f0b2e5ab0122108a8eb3 Mon Sep 17 00:00:00 2001
+From: Nicholas Piggin <npiggin@gmail.com>
+Date: Sun, 11 Feb 2018 00:25:03 +1000
+Subject: [PATCH 5704/5725] kbuild: remove incremental linking option
+
+This removes the old `ld -r` incremental link option, which has not
+been selected by any architecture since June 2017.
+
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
+---
+ Documentation/kbuild/makefiles.txt | 10 ++++-
+ arch/Kconfig | 6 ---
+ scripts/Makefile.build | 32 +++++---------
+ scripts/link-vmlinux.sh | 91 ++++++++++++++------------------------
+ 4 files changed, 51 insertions(+), 88 deletions(-)
+
+diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
+index f6f8038..a933c71 100644
+--- a/Documentation/kbuild/makefiles.txt
++++ b/Documentation/kbuild/makefiles.txt
+@@ -153,8 +153,14 @@ more details, with real examples.
+ configuration.
+
+ Kbuild compiles all the $(obj-y) files. It then calls
+- "$(LD) -r" to merge these files into one built-in.o file.
+- built-in.o is later linked into vmlinux by the parent Makefile.
++ "$(AR) rcSTP" to merge these files into one built-in.o file.
++ This is a thin archive without a symbol table, which makes it
++ unsuitable as a linker input.
++
++ The scripts/link-vmlinux.sh script later makes an aggregate
++ built-in.o with "${AR} rcsTP", which creates the thin archive
++ with a symbol table and an index, making it a valid input for
++ the final vmlinux link passes.
+
+ The order of files in $(obj-y) is significant. Duplicates in
+ the lists are allowed: the first instance will be linked into
+diff --git a/arch/Kconfig b/arch/Kconfig
+index 40dc31f..139f5a3 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -590,12 +590,6 @@ config CC_STACKPROTECTOR_STRONG
+
+ endchoice
+
+-config THIN_ARCHIVES
+- def_bool y
+- help
+- Select this if the architecture wants to use thin archives
+- instead of ld -r to create the built-in.o files.
+-
+ config LD_DEAD_CODE_DATA_ELIMINATION
+ bool
+ help
+diff --git a/scripts/Makefile.build b/scripts/Makefile.build
+index 08de8ba..e1bd7c2 100644
+--- a/scripts/Makefile.build
++++ b/scripts/Makefile.build
+@@ -462,15 +462,13 @@ $(sort $(subdir-obj-y)): $(subdir-ym) ;
+ #
+ ifdef builtin-target
+
+-ifdef CONFIG_THIN_ARCHIVES
+- cmd_make_builtin = rm -f $@; $(AR) rcSTP$(KBUILD_ARFLAGS)
+- cmd_make_empty_builtin = rm -f $@; $(AR) rcSTP$(KBUILD_ARFLAGS)
+- quiet_cmd_link_o_target = AR $@
+-else
+- cmd_make_builtin = $(LD) $(ld_flags) -r -o
+- cmd_make_empty_builtin = rm -f $@; $(AR) rcs$(KBUILD_ARFLAGS)
+- quiet_cmd_link_o_target = LD $@
+-endif
++# built-in.o archives are made with no symbol table or index which
++# makes them small and fast, but unable to be used by the linker.
++# scripts/link-vmlinux.sh builds an aggregate built-in.o with a symbol
++# table and index.
++cmd_make_builtin = rm -f $@; $(AR) rcSTP$(KBUILD_ARFLAGS)
++cmd_make_empty_builtin = rm -f $@; $(AR) rcSTP$(KBUILD_ARFLAGS)
++quiet_cmd_link_o_target = AR $@
+
+ # If the list of objects to link is empty, just create an empty built-in.o
+ cmd_link_o_target = $(if $(strip $(obj-y)),\
+@@ -503,11 +501,8 @@ $(modorder-target): $(subdir-ym) FORCE
+ ifdef lib-target
+ quiet_cmd_link_l_target = AR $@
+
+-ifdef CONFIG_THIN_ARCHIVES
+- cmd_link_l_target = rm -f $@; $(AR) rcsTP$(KBUILD_ARFLAGS) $@ $(lib-y)
+-else
+- cmd_link_l_target = rm -f $@; $(AR) rcs$(KBUILD_ARFLAGS) $@ $(lib-y)
+-endif
++# lib target archives do get a symbol table and index
++cmd_link_l_target = rm -f $@; $(AR) rcsTP$(KBUILD_ARFLAGS) $@ $(lib-y)
+
+ $(lib-target): $(lib-y) FORCE
+ $(call if_changed,link_l_target)
+@@ -555,13 +550,8 @@ $($(subst $(obj)/,,$(@:.o=-m)))), $^)
+
+ cmd_link_multi-link = $(LD) $(ld_flags) -r -o $@ $(link_multi_deps) $(cmd_secanalysis)
+
+-ifdef CONFIG_THIN_ARCHIVES
+- quiet_cmd_link_multi-y = AR $@
+- cmd_link_multi-y = rm -f $@; $(AR) rcSTP$(KBUILD_ARFLAGS) $@ $(link_multi_deps)
+-else
+- quiet_cmd_link_multi-y = LD $@
+- cmd_link_multi-y = $(cmd_link_multi-link)
+-endif
++quiet_cmd_link_multi-y = AR $@
++cmd_link_multi-y = rm -f $@; $(AR) rcSTP$(KBUILD_ARFLAGS) $@ $(link_multi_deps)
+
+ quiet_cmd_link_multi-m = LD [M] $@
+ cmd_link_multi-m = $(cmd_link_multi-link)
+diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
+index e6818b8e..50ede7b 100755
+--- a/scripts/link-vmlinux.sh
++++ b/scripts/link-vmlinux.sh
+@@ -55,13 +55,11 @@ info()
+ #
+ archive_builtin()
+ {
+- if [ -n "${CONFIG_THIN_ARCHIVES}" ]; then
+- info AR built-in.o
+- rm -f built-in.o;
+- ${AR} rcsTP${KBUILD_ARFLAGS} built-in.o \
+- ${KBUILD_VMLINUX_INIT} \
+- ${KBUILD_VMLINUX_MAIN}
+- fi
++ info AR built-in.o
++ rm -f built-in.o;
++ ${AR} rcsTP${KBUILD_ARFLAGS} built-in.o \
++ ${KBUILD_VMLINUX_INIT} \
++ ${KBUILD_VMLINUX_MAIN}
+ }
+
+ # Link of vmlinux.o used for section mismatch analysis
+@@ -70,20 +68,13 @@ modpost_link()
+ {
+ local objects
+
+- if [ -n "${CONFIG_THIN_ARCHIVES}" ]; then
+- objects="--whole-archive \
+- built-in.o \
+- --no-whole-archive \
+- --start-group \
+- ${KBUILD_VMLINUX_LIBS} \
+- --end-group"
+- else
+- objects="${KBUILD_VMLINUX_INIT} \
+- --start-group \
+- ${KBUILD_VMLINUX_MAIN} \
+- ${KBUILD_VMLINUX_LIBS} \
+- --end-group"
+- fi
++ objects="--whole-archive \
++ built-in.o \
++ --no-whole-archive \
++ --start-group \
++ ${KBUILD_VMLINUX_LIBS} \
++ --end-group"
++
+ ${LD} ${LDFLAGS} -r -o ${1} ${objects}
+ }
+
+@@ -96,46 +87,28 @@ vmlinux_link()
+ local objects
+
+ if [ "${SRCARCH}" != "um" ]; then
+- if [ -n "${CONFIG_THIN_ARCHIVES}" ]; then
+- objects="--whole-archive \
+- built-in.o \
+- --no-whole-archive \
+- --start-group \
+- ${KBUILD_VMLINUX_LIBS} \
+- --end-group \
+- ${1}"
+- else
+- objects="${KBUILD_VMLINUX_INIT} \
+- --start-group \
+- ${KBUILD_VMLINUX_MAIN} \
+- ${KBUILD_VMLINUX_LIBS} \
+- --end-group \
+- ${1}"
+- fi
+-
+- ${LD} ${LDFLAGS} ${LDFLAGS_vmlinux} -o ${2} \
++ objects="--whole-archive \
++ built-in.o \
++ --no-whole-archive \
++ --start-group \
++ ${KBUILD_VMLINUX_LIBS} \
++ --end-group \
++ ${1}"
++
++ ${LD} ${LDFLAGS} ${LDFLAGS_vmlinux} -o ${2} \
+ -T ${lds} ${objects}
+ else
+- if [ -n "${CONFIG_THIN_ARCHIVES}" ]; then
+- objects="-Wl,--whole-archive \
+- built-in.o \
+- -Wl,--no-whole-archive \
+- -Wl,--start-group \
+- ${KBUILD_VMLINUX_LIBS} \
+- -Wl,--end-group \
+- ${1}"
+- else
+- objects="${KBUILD_VMLINUX_INIT} \
+- -Wl,--start-group \
+- ${KBUILD_VMLINUX_MAIN} \
+- ${KBUILD_VMLINUX_LIBS} \
+- -Wl,--end-group \
+- ${1}"
+- fi
+-
+- ${CC} ${CFLAGS_vmlinux} -o ${2} \
+- -Wl,-T,${lds} \
+- ${objects} \
++ objects="-Wl,--whole-archive \
++ built-in.o \
++ -Wl,--no-whole-archive \
++ -Wl,--start-group \
++ ${KBUILD_VMLINUX_LIBS} \
++ -Wl,--end-group \
++ ${1}"
++
++ ${CC} ${CFLAGS_vmlinux} -o ${2} \
++ -Wl,-T,${lds} \
++ ${objects} \
+ -lutil -lrt -lpthread
+ rm -f linux
+ fi
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5705-kbuild-rename-built-in.o-to-built-in.a.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5705-kbuild-rename-built-in.o-to-built-in.a.patch
new file mode 100644
index 00000000..e29bc186
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5705-kbuild-rename-built-in.o-to-built-in.a.patch
@@ -0,0 +1,387 @@
+From 4c22123ac68afac89aeb11697c9483dd4201a9d3 Mon Sep 17 00:00:00 2001
+From: Nicholas Piggin <npiggin@gmail.com>
+Date: Sun, 11 Feb 2018 00:25:04 +1000
+Subject: [PATCH 5705/5725] kbuild: rename built-in.o to built-in.a
+
+Incremental linking is gone, so rename built-in.o to built-in.a, which
+is the usual extension for archive files.
+
+This patch does two things, first is a simple search/replace:
+
+git grep -l 'built-in\.o' | xargs sed -i 's/built-in\.o/built-in\.a/g'
+
+The second is to invert nesting of nested text manipulations to avoid
+filtering built-in.a out from libs-y2:
+
+-libs-y2 := $(filter-out %.a, $(patsubst %/, %/built-in.a, $(libs-y)))
++libs-y2 := $(patsubst %/, %/built-in.a, $(filter-out %.a, $(libs-y)))
+
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
+---
+ Documentation/kbuild/makefiles.txt | 16 ++++++++--------
+ Documentation/process/changes.rst | 2 +-
+ Makefile | 14 +++++++-------
+ arch/blackfin/kernel/bfin_ksyms.c | 2 +-
+ arch/powerpc/kernel/Makefile | 2 +-
+ drivers/s390/Makefile | 2 +-
+ lib/Kconfig.debug | 4 ++--
+ scripts/Makefile.build | 10 +++++-----
+ scripts/Makefile.lib | 6 +++---
+ scripts/link-vmlinux.sh | 20 ++++++++++----------
+ scripts/namespace.pl | 2 +-
+ usr/initramfs_data.S | 2 +-
+ 12 files changed, 41 insertions(+), 41 deletions(-)
+ mode change 100644 => 100755 scripts/Makefile.build
+
+diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
+index a933c71..cb95620 100644
+--- a/Documentation/kbuild/makefiles.txt
++++ b/Documentation/kbuild/makefiles.txt
+@@ -153,18 +153,18 @@ more details, with real examples.
+ configuration.
+
+ Kbuild compiles all the $(obj-y) files. It then calls
+- "$(AR) rcSTP" to merge these files into one built-in.o file.
++ "$(AR) rcSTP" to merge these files into one built-in.a file.
+ This is a thin archive without a symbol table, which makes it
+ unsuitable as a linker input.
+
+ The scripts/link-vmlinux.sh script later makes an aggregate
+- built-in.o with "${AR} rcsTP", which creates the thin archive
++ built-in.a with "${AR} rcsTP", which creates the thin archive
+ with a symbol table and an index, making it a valid input for
+ the final vmlinux link passes.
+
+ The order of files in $(obj-y) is significant. Duplicates in
+ the lists are allowed: the first instance will be linked into
+- built-in.o and succeeding instances will be ignored.
++ built-in.a and succeeding instances will be ignored.
+
+ Link order is significant, because certain functions
+ (module_init() / __initcall) will be called during boot in the
+@@ -228,7 +228,7 @@ more details, with real examples.
+ Note: Of course, when you are building objects into the kernel,
+ the syntax above will also work. So, if you have CONFIG_EXT2_FS=y,
+ kbuild will build an ext2.o file for you out of the individual
+- parts and then link this into built-in.o, as you would expect.
++ parts and then link this into built-in.a, as you would expect.
+
+ --- 3.4 Objects which export symbols
+
+@@ -238,7 +238,7 @@ more details, with real examples.
+ --- 3.5 Library file goals - lib-y
+
+ Objects listed with obj-* are used for modules, or
+- combined in a built-in.o for that specific directory.
++ combined in a built-in.a for that specific directory.
+ There is also the possibility to list objects that will
+ be included in a library, lib.a.
+ All objects listed with lib-y are combined in a single
+@@ -250,7 +250,7 @@ more details, with real examples.
+
+ Note that the same kbuild makefile may list files to be built-in
+ and to be part of a library. Therefore the same directory
+- may contain both a built-in.o and a lib.a file.
++ may contain both a built-in.a and a lib.a file.
+
+ Example:
+ #arch/x86/lib/Makefile
+@@ -992,7 +992,7 @@ When kbuild executes, the following steps are followed (roughly):
+
+ $(head-y) lists objects to be linked first in vmlinux.
+ $(libs-y) lists directories where a lib.a archive can be located.
+- The rest list directories where a built-in.o object file can be
++ The rest list directories where a built-in.a object file can be
+ located.
+
+ $(init-y) objects will be located after $(head-y).
+@@ -1077,7 +1077,7 @@ When kbuild executes, the following steps are followed (roughly):
+ extra-y := head.o init_task.o
+
+ In this example, extra-y is used to list object files that
+- shall be built, but shall not be linked as part of built-in.o.
++ shall be built, but shall not be linked as part of built-in.a.
+
+
+ --- 6.7 Commands useful for building a boot image
+diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
+index 73fcdcd..ceeeda8 100644
+--- a/Documentation/process/changes.rst
++++ b/Documentation/process/changes.rst
+@@ -76,7 +76,7 @@ Binutils
+ --------
+
+ The build system has, as of 4.13, switched to using thin archives (`ar T`)
+-rather than incremental linking (`ld -r`) for built-in.o intermediate steps.
++rather than incremental linking (`ld -r`) for built-in.a intermediate steps.
+ This requires binutils 2.20 or newer.
+
+ Perl
+diff --git a/Makefile b/Makefile
+index 2dc724e..abba7b3 100644
+--- a/Makefile
++++ b/Makefile
+@@ -35,7 +35,7 @@ unexport GREP_OPTIONS
+ # Most importantly: sub-Makefiles should only ever modify files in
+ # their own directory. If in some directory we have a dependency on
+ # a file in another dir (which doesn't happen often, but it's often
+-# unavoidable when linking the built-in.o targets which finally
++# unavoidable when linking the built-in.a targets which finally
+ # turn into vmlinux), we will call a sub make in that other dir, and
+ # after that we are sure that everything which is in that other dir
+ # is now up to date.
+@@ -970,13 +970,13 @@ vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
+ vmlinux-alldirs := $(sort $(vmlinux-dirs) $(patsubst %/,%,$(filter %/, \
+ $(init-) $(core-) $(drivers-) $(net-) $(libs-) $(virt-))))
+
+-init-y := $(patsubst %/, %/built-in.o, $(init-y))
+-core-y := $(patsubst %/, %/built-in.o, $(core-y))
+-drivers-y := $(patsubst %/, %/built-in.o, $(drivers-y))
+-net-y := $(patsubst %/, %/built-in.o, $(net-y))
++init-y := $(patsubst %/, %/built-in.a, $(init-y))
++core-y := $(patsubst %/, %/built-in.a, $(core-y))
++drivers-y := $(patsubst %/, %/built-in.a, $(drivers-y))
++net-y := $(patsubst %/, %/built-in.a, $(net-y))
+ libs-y1 := $(patsubst %/, %/lib.a, $(libs-y))
+-libs-y2 := $(filter-out %.a, $(patsubst %/, %/built-in.o, $(libs-y)))
+-virt-y := $(patsubst %/, %/built-in.o, $(virt-y))
++libs-y2 := $(patsubst %/, %/built-in.a, $(filter-out %.a, $(libs-y)))
++virt-y := $(patsubst %/, %/built-in.a, $(virt-y))
+
+ # Externally visible symbols (used by link-vmlinux.sh)
+ export KBUILD_VMLINUX_INIT := $(head-y) $(init-y)
+diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
+index 68096e8..c0038ee 100644
+--- a/arch/blackfin/kernel/bfin_ksyms.c
++++ b/arch/blackfin/kernel/bfin_ksyms.c
+@@ -36,7 +36,7 @@ EXPORT_SYMBOL(memchr);
+ /*
+ * Because string functions are both inline and exported functions and
+ * folder arch/blackfin/lib is configured as a library path in Makefile,
+- * symbols exported in folder lib is not linked into built-in.o but
++ * symbols exported in folder lib is not linked into built-in.a but
+ * inlined only. In order to export string symbols to kernel module
+ * properly, they should be exported here.
+ */
+diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
+index 21bd995..bf191e6 100644
+--- a/arch/powerpc/kernel/Makefile
++++ b/arch/powerpc/kernel/Makefile
+@@ -167,7 +167,7 @@ systbl_chk: $(src)/systbl_chk.sh $(obj)/systbl_chk.i
+ $(call cmd,systbl_chk)
+
+ ifeq ($(CONFIG_PPC_OF_BOOT_TRAMPOLINE),y)
+-$(obj)/built-in.o: prom_init_check
++$(obj)/built-in.a: prom_init_check
+
+ quiet_cmd_prom_init_check = CALL $<
+ cmd_prom_init_check = $(CONFIG_SHELL) $< "$(NM)" "$(obj)/prom_init.o"
+diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
+index e5225ad..f6df691 100644
+--- a/drivers/s390/Makefile
++++ b/drivers/s390/Makefile
+@@ -4,5 +4,5 @@
+
+ obj-y += cio/ block/ char/ crypto/ net/ scsi/ virtio/
+
+-drivers-y += drivers/s390/built-in.o
++drivers-y += drivers/s390/built-in.a
+
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index 62d0e25..62a59ee 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -325,11 +325,11 @@ config DEBUG_SECTION_MISMATCH
+ the analysis would not catch the illegal reference.
+ This option tells gcc to inline less (but it does result in
+ a larger kernel).
+- - Run the section mismatch analysis for each module/built-in.o file.
++ - Run the section mismatch analysis for each module/built-in.a file.
+ When we run the section mismatch analysis on vmlinux.o, we
+ lose valuable information about where the mismatch was
+ introduced.
+- Running the analysis for each module/built-in.o file
++ Running the analysis for each module/built-in.a file
+ tells where the mismatch happens much closer to the
+ source. The drawback is that the same mismatch is
+ reported at least twice.
+diff --git a/scripts/Makefile.build b/scripts/Makefile.build
+old mode 100644
+new mode 100755
+index e1bd7c2..4a2529f
+--- a/scripts/Makefile.build
++++ b/scripts/Makefile.build
+@@ -86,7 +86,7 @@ obj-y += $(obj)/lib-ksyms.o
+ endif
+
+ ifneq ($(strip $(obj-y) $(need-builtin)),)
+-builtin-target := $(obj)/built-in.o
++builtin-target := $(obj)/built-in.a
+ endif
+
+ modorder-target := $(obj)/modules.order
+@@ -109,7 +109,7 @@ ifneq ($(KBUILD_CHECKSRC),0)
+ endif
+ endif
+
+-# Do section mismatch analysis for each module/built-in.o
++# Do section mismatch analysis for each module/built-in.a
+ ifdef CONFIG_DEBUG_SECTION_MISMATCH
+ cmd_secanalysis = ; scripts/mod/modpost $@
+ endif
+@@ -462,15 +462,15 @@ $(sort $(subdir-obj-y)): $(subdir-ym) ;
+ #
+ ifdef builtin-target
+
+-# built-in.o archives are made with no symbol table or index which
++# built-in.a archives are made with no symbol table or index which
+ # makes them small and fast, but unable to be used by the linker.
+-# scripts/link-vmlinux.sh builds an aggregate built-in.o with a symbol
++# scripts/link-vmlinux.sh builds an aggregate built-in.a with a symbol
+ # table and index.
+ cmd_make_builtin = rm -f $@; $(AR) rcSTP$(KBUILD_ARFLAGS)
+ cmd_make_empty_builtin = rm -f $@; $(AR) rcSTP$(KBUILD_ARFLAGS)
+ quiet_cmd_link_o_target = AR $@
+
+-# If the list of objects to link is empty, just create an empty built-in.o
++# If the list of objects to link is empty, just create an empty built-in.a
+ cmd_link_o_target = $(if $(strip $(obj-y)),\
+ $(cmd_make_builtin) $@ $(filter $(obj-y), $^) \
+ $(cmd_secanalysis),\
+diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
+index aac94d9..3219687 100644
+--- a/scripts/Makefile.lib
++++ b/scripts/Makefile.lib
+@@ -26,7 +26,7 @@ lib-y := $(filter-out $(obj-y), $(sort $(lib-y) $(lib-m)))
+
+ # Handle objects in subdirs
+ # ---------------------------------------------------------------------------
+-# o if we encounter foo/ in $(obj-y), replace it by foo/built-in.o
++# o if we encounter foo/ in $(obj-y), replace it by foo/built-in.a
+ # and add the directory to the list of dirs to descend into: $(subdir-y)
+ # o if we encounter foo/ in $(obj-m), remove it from $(obj-m)
+ # and add the directory to the list of dirs to descend into: $(subdir-m)
+@@ -40,7 +40,7 @@ __subdir-y := $(patsubst %/,%,$(filter %/, $(obj-y)))
+ subdir-y += $(__subdir-y)
+ __subdir-m := $(patsubst %/,%,$(filter %/, $(obj-m)))
+ subdir-m += $(__subdir-m)
+-obj-y := $(patsubst %/, %/built-in.o, $(obj-y))
++obj-y := $(patsubst %/, %/built-in.a, $(obj-y))
+ obj-m := $(filter-out %/, $(obj-m))
+
+ # Subdirectories we need to descend into
+@@ -61,7 +61,7 @@ multi-objs := $(multi-objs-y) $(multi-objs-m)
+
+ # $(subdir-obj-y) is the list of objects in $(obj-y) which uses dir/ to
+ # tell kbuild to descend
+-subdir-obj-y := $(filter %/built-in.o, $(obj-y))
++subdir-obj-y := $(filter %/built-in.a, $(obj-y))
+
+ # $(obj-dirs) is a list of directories that contain object files
+ obj-dirs := $(dir $(multi-objs) $(obj-y))
+diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
+index 50ede7b..6fba5ab 100755
+--- a/scripts/link-vmlinux.sh
++++ b/scripts/link-vmlinux.sh
+@@ -4,7 +4,7 @@
+ # link vmlinux
+ #
+ # vmlinux is linked from the objects selected by $(KBUILD_VMLINUX_INIT) and
+-# $(KBUILD_VMLINUX_MAIN) and $(KBUILD_VMLINUX_LIBS). Most are built-in.o files
++# $(KBUILD_VMLINUX_MAIN) and $(KBUILD_VMLINUX_LIBS). Most are built-in.a files
+ # from top-level directories in the kernel tree, others are specified in
+ # arch/$(ARCH)/Makefile. Ordering when linking is important, and
+ # $(KBUILD_VMLINUX_INIT) must be first. $(KBUILD_VMLINUX_LIBS) are archives
+@@ -18,7 +18,7 @@
+ # | +--< init/version.o + more
+ # |
+ # +--< $(KBUILD_VMLINUX_MAIN)
+-# | +--< drivers/built-in.o mm/built-in.o + more
++# | +--< drivers/built-in.a mm/built-in.a + more
+ # |
+ # +--< $(KBUILD_VMLINUX_LIBS)
+ # | +--< lib/lib.a + more
+@@ -51,13 +51,13 @@ info()
+ #
+ # Traditional incremental style of link does not require this step
+ #
+-# built-in.o output file
++# built-in.a output file
+ #
+ archive_builtin()
+ {
+- info AR built-in.o
+- rm -f built-in.o;
+- ${AR} rcsTP${KBUILD_ARFLAGS} built-in.o \
++ info AR built-in.a
++ rm -f built-in.a;
++ ${AR} rcsTP${KBUILD_ARFLAGS} built-in.a \
+ ${KBUILD_VMLINUX_INIT} \
+ ${KBUILD_VMLINUX_MAIN}
+ }
+@@ -69,7 +69,7 @@ modpost_link()
+ local objects
+
+ objects="--whole-archive \
+- built-in.o \
++ built-in.a \
+ --no-whole-archive \
+ --start-group \
+ ${KBUILD_VMLINUX_LIBS} \
+@@ -88,7 +88,7 @@ vmlinux_link()
+
+ if [ "${SRCARCH}" != "um" ]; then
+ objects="--whole-archive \
+- built-in.o \
++ built-in.a \
+ --no-whole-archive \
+ --start-group \
+ ${KBUILD_VMLINUX_LIBS} \
+@@ -99,7 +99,7 @@ vmlinux_link()
+ -T ${lds} ${objects}
+ else
+ objects="-Wl,--whole-archive \
+- built-in.o \
++ built-in.a \
+ -Wl,--no-whole-archive \
+ -Wl,--start-group \
+ ${KBUILD_VMLINUX_LIBS} \
+@@ -166,7 +166,7 @@ cleanup()
+ rm -f .tmp_kallsyms*
+ rm -f .tmp_version
+ rm -f .tmp_vmlinux*
+- rm -f built-in.o
++ rm -f built-in.a
+ rm -f System.map
+ rm -f vmlinux
+ rm -f vmlinux.o
+diff --git a/scripts/namespace.pl b/scripts/namespace.pl
+index 729c547..6135574 100755
+--- a/scripts/namespace.pl
++++ b/scripts/namespace.pl
+@@ -164,7 +164,7 @@ sub linux_objects
+ s:^\./::;
+ if (/.*\.o$/ &&
+ ! (
+- m:/built-in.o$:
++ m:/built-in.a$:
+ || m:arch/x86/vdso/:
+ || m:arch/x86/boot/:
+ || m:arch/ia64/ia32/ia32.o$:
+diff --git a/usr/initramfs_data.S b/usr/initramfs_data.S
+index 10d325e..b28da79 100644
+--- a/usr/initramfs_data.S
++++ b/usr/initramfs_data.S
+@@ -10,7 +10,7 @@
+
+ ld -m elf_i386 --format binary --oformat elf32-i386 -r \
+ -T initramfs_data.scr initramfs_data.cpio.gz -o initramfs_data.o
+- ld -m elf_i386 -r -o built-in.o initramfs_data.o
++ ld -m elf_i386 -r -o built-in.a initramfs_data.o
+
+ For including the .init.ramfs sections, see include/asm-generic/vmlinux.lds.
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5706-drm-amdgpu-Need-to-set-moved-to-true-when-evict-bo.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5706-drm-amdgpu-Need-to-set-moved-to-true-when-evict-bo.patch
new file mode 100644
index 00000000..d2a82f6b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5706-drm-amdgpu-Need-to-set-moved-to-true-when-evict-bo.patch
@@ -0,0 +1,46 @@
+From 90189f809eba2f5a88ae234177a8f65320f12f26 Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Mon, 7 Jan 2019 16:31:53 +0530
+Subject: [PATCH 5706/5725] drm/amdgpu: Need to set moved to true when evict bo
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Fix the VMC page fault when the running sequence is as below:
+1.amdgpu_gem_create_ioctl
+2.ttm_bo_swapout->amdgpu_vm_bo_invalidate, as not called
+amdgpu_vm_bo_base_init, so won't called
+list_add_tail(&base->bo_list, &bo->va). Even the bo was evicted,
+it won't set the bo_base->moved.
+3.drm_gem_open_ioctl->amdgpu_vm_bo_base_init, here only called
+list_move_tail(&base->vm_status, &vm->evicted), but not set the
+bo_base->moved.
+4.amdgpu_vm_bo_map->amdgpu_vm_bo_insert_map, as the bo_base->moved is
+not set true, the function amdgpu_vm_bo_insert_map will call
+list_move(&bo_va->base.vm_status, &vm->moved)
+5.amdgpu_cs_ioctl won't validate the swapout bo, as it is only in the
+moved list, not in the evict list. So VMC page fault occurs.
+
+Signed-off-by: Emily Deng <Emily.Deng@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 85d700b..50d5fa3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -302,6 +302,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
+ * is validated on next vm use to avoid fault.
+ * */
+ amdgpu_vm_bo_evicted(base);
++ base->moved = true;
+ }
+
+ /**
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5707-Fix-compilation-error.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5707-Fix-compilation-error.patch
new file mode 100644
index 00000000..dfde752d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5707-Fix-compilation-error.patch
@@ -0,0 +1,54 @@
+From c4a7ff25c5a2c7a0b37a53f8866db83b94704947 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Thu, 1 Nov 2018 14:04:34 +0530
+Subject: [PATCH 5707/5725] Fix compilation error
+
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 16 ++++++++++++++++
+ drivers/gpu/drm/amd/amdkfd/Makefile | 3 ++-
+ 2 files changed, 18 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 7753af9..795d306 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -648,6 +648,22 @@ MODULE_PARM_DESC(noretry,
+ module_param(halt_if_hws_hang, int, 0644);
+ MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
+
++/**
++ * DOC: priv_cp_queues (int)
++ * Enable privileged mode for CP queues. Default value: 0 (off)
++ */
++int priv_cp_queues;
++module_param(priv_cp_queues, int, 0644);
++MODULE_PARM_DESC(priv_cp_queues, "Enable privileged mode for CP queues (0 = off (default), 1 = on)");
++
++/**
++ * DOC: keep_udle_process_evicted (bool)
++ * Keep an evicted process evicted if it is idle. Default value: false (off)
++ */
++bool keep_idle_process_evicted;
++module_param(keep_idle_process_evicted, bool, 0444);
++MODULE_PARM_DESC(keep_idle_process_evicted, "Restore evicted process only if queues are active (N = off(default), Y = on)");
++
+ static const struct pci_device_id pciidlist[] = {
+ #ifdef CONFIG_DRM_AMDGPU_SI
+ {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
+index 911e67d..9ef632a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/Makefile
++++ b/drivers/gpu/drm/amd/amdkfd/Makefile
+@@ -54,7 +54,8 @@ AMDKFD_FILES := $(AMDKFD_PATH)/kfd_module.o \
+ $(AMDKFD_PATH)/kfd_dbgmgr.o \
+ $(AMDKFD_PATH)/kfd_flat_memory.o \
+ $(AMDKFD_PATH)/kfd_rdma.o \
+- $(AMDKFD_PATH)/kfd_crat.o
++ $(AMDKFD_PATH)/kfd_crat.o \
++ $(AMDKFD_PATH)/kfd_ipc.o
+
+ ifneq ($(CONFIG_AMD_IOMMU_V2),)
+ AMDKFD_FILES += $(AMDKFD_PATH)/kfd_iommu.o
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5708-drm-amdkfd-Release-an-acquired-process-vm.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5708-drm-amdkfd-Release-an-acquired-process-vm.patch
new file mode 100644
index 00000000..680a3531
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5708-drm-amdkfd-Release-an-acquired-process-vm.patch
@@ -0,0 +1,177 @@
+From f7766c20130673e6490a69c7ca3b76dda524f201 Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Mon, 7 Jan 2019 16:39:15 +0530
+Subject: [PATCH 5708/5725] drm/amdkfd: Release an acquired process vm
+
+For compute vm acquired from amdgpu, vm.pasid is managed
+by kfd. Decouple pasid from such vm on process destroy
+to avoid duplicate pasid release.
+
+Signed-off-by: Oak Zeng <Oak.Zeng@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 19 +++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 20 ++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 1 +
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 4 +++-
+ drivers/gpu/drm/amd/include/kgd_kfd_interface.h | 1 +
+ 9 files changed, 48 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+index f9d21a6..7ffeb38 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+@@ -179,6 +179,7 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
+ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
+ void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm);
++void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm);
+ uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm);
+ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+ struct kgd_dev *kgd, uint64_t va, uint64_t size,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+index ef482bc..29de7a7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+@@ -182,6 +182,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
+ .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
+ .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
+ .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
++ .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
+ .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
+ .alloc_pasid = amdgpu_pasid_alloc,
+ .free_pasid = amdgpu_pasid_free,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+index e6cfa22..9388f6a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+@@ -154,6 +154,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
+ .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
+ .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
+ .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
++ .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
+ .create_process_gpumem = create_process_gpumem,
+ .destroy_process_gpumem = destroy_process_gpumem,
+ .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+index eee3a3e..0409867 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+@@ -204,6 +204,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
+ .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
+ .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
+ .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
++ .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
+ .create_process_gpumem = create_process_gpumem,
+ .destroy_process_gpumem = destroy_process_gpumem,
+ .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 1fd2b33..c8c00b4 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -1160,6 +1160,25 @@ void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
+ kfree(vm);
+ }
+
++void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
++{
++ struct amdgpu_device *adev = get_amdgpu_device(kgd);
++ struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
++
++ if (WARN_ON(!kgd || !vm))
++ return;
++
++ pr_debug("Releasing process vm %p\n", vm);
++
++ /* The original pasid of amdgpu vm has already been
++ * released during making a amdgpu vm to a compute vm
++ * The current pasid is managed by kfd and will be
++ * released on kfd process destroy. Set amdgpu pasid
++ * to 0 to avoid duplicate release.
++ */
++ amdgpu_vm_release_compute(adev, avm);
++}
++
+ uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
+ {
+ struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 50d5fa3..ef4f121 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -868,6 +868,26 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
+ }
+
+ /**
++ * amdgpu_vm_release_compute - release a compute vm
++ * @adev: amdgpu_device pointer
++ * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
++ *
++ * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
++ * pasid from vm. Compute should stop use of vm after this call.
++ */
++void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
++{
++ if (vm->pasid) {
++ unsigned long flags;
++
++ spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
++ idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
++ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
++ }
++ vm->pasid = 0;
++}
++
++/**
+ * amdgpu_vm_free_pts - free PD/PT levels
+ *
+ * @adev: amdgpu device structure
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+index ff711a4..233741f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+@@ -290,6 +290,7 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
+ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int vm_context, unsigned int pasid);
+ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid);
++void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+ bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
+ unsigned int pasid);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 73a1adc..304c116 100755
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -353,8 +353,10 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
+ pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n",
+ pdd->dev->id, p->pasid);
+
+- if (pdd->drm_file)
++ if (pdd->drm_file) {
++ pdd->dev->kfd2kgd->release_process_vm(pdd->dev->kgd, pdd->vm);
+ fput(pdd->drm_file);
++ }
+ else if (pdd->vm)
+ pdd->dev->kfd2kgd->destroy_process_vm(
+ pdd->dev->kgd, pdd->vm);
+diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+index 88dbade..20b403a 100755
+--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+@@ -276,6 +276,7 @@ struct kfd2kgd_calls {
+ unsigned int pasid, void **vm, void **process_info,
+ struct dma_fence **ef);
+ void (*destroy_process_vm)(struct kgd_dev *kgd, void *vm);
++ void (*release_process_vm)(struct kgd_dev *kgd, void *vm);
+
+ int (*create_process_gpumem)(struct kgd_dev *kgd, uint64_t va, size_t size, void *vm, struct kgd_mem **mem);
+ void (*destroy_process_gpumem)(struct kgd_dev *kgd, struct kgd_mem *mem);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5709-drm-amdgpu-Relocate-some-definitions-v2.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5709-drm-amdgpu-Relocate-some-definitions-v2.patch
new file mode 100644
index 00000000..88fee7af
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5709-drm-amdgpu-Relocate-some-definitions-v2.patch
@@ -0,0 +1,232 @@
+From 303990a34bbd20a051272f32853604e679eaee52 Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Mon, 7 Jan 2019 17:31:08 +0530
+Subject: [PATCH 5709/5725] drm/amdgpu: Relocate some definitions v2
+
+Move some KFD-related (but used in amdgpu_drv.c) definitions from
+kfd_priv.h to kgd_kfd_interface.h so we don't need to include kfd_priv.h
+in amdgpu_drv.c. This fixes a build failure when AMDGPU is enabled but
+MMU_NOTIFIER is not.
+This patch also disables KFD-related module options when HSA_AMD is not
+enabled.
+
+v2: rebase (Alex)
+
+Signed-off-by: Amber Lin <Amber.Lin@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 23 ++++++++++----------
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 28 -------------------------
+ drivers/gpu/drm/amd/include/kgd_kfd_interface.h | 28 +++++++++++++++++++++++++
+ 3 files changed, 39 insertions(+), 40 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 795d306..bc8f35a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -39,7 +39,6 @@
+ #include "amdgpu_gem.h"
+
+ #include "amdgpu_amdkfd.h"
+-#include "kfd_priv.h"
+
+ /*
+ * KMS wrapper.
+@@ -138,17 +137,6 @@ struct amdgpu_mgpu_info mgpu_info = {
+ .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
+ };
+
+-/* KFD parameters */
+-int sched_policy = KFD_SCHED_POLICY_HWS;
+-int hws_max_conc_proc = 8;
+-int cwsr_enable = 1;
+-int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
+-int send_sigterm;
+-int debug_largebar;
+-int ignore_crat;
+-int noretry;
+-int halt_if_hws_hang;
+-
+ /**
+ * DOC: vramlimit (int)
+ * Restrict the total amount of VRAM in MiB for testing. The default is 0 (Use full VRAM).
+@@ -563,12 +551,14 @@ MODULE_PARM_DESC(smu_memory_pool_size,
+ "0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte");
+ module_param_named(smu_memory_pool_size, amdgpu_smu_memory_pool_size, uint, 0444);
+
++#ifdef CONFIG_HSA_AMD
+ /**
+ * DOC: sched_policy (int)
+ * Set scheduling policy. Default is HWS(hardware scheduling) with over-subscription.
+ * Setting 1 disables over-subscription. Setting 2 disables HWS and statically
+ * assigns queues to HQDs.
+ */
++int sched_policy = KFD_SCHED_POLICY_HWS;
+ module_param(sched_policy, int, 0444);
+ MODULE_PARM_DESC(sched_policy,
+ "Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)");
+@@ -578,6 +568,7 @@ MODULE_PARM_DESC(sched_policy,
+ * Maximum number of processes that HWS can schedule concurrently. The maximum is the
+ * number of VMIDs assigned to the HWS, which is also the default.
+ */
++int hws_max_conc_proc = 8;
+ module_param(hws_max_conc_proc, int, 0444);
+ MODULE_PARM_DESC(hws_max_conc_proc,
+ "Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))");
+@@ -588,6 +579,7 @@ MODULE_PARM_DESC(hws_max_conc_proc,
+ * the middle of a compute wave. Default is 1 to enable this feature. Setting 0
+ * disables it.
+ */
++int cwsr_enable = 1;
+ module_param(cwsr_enable, int, 0444);
+ MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = Off, 1 = On (Default))");
+
+@@ -596,6 +588,7 @@ MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = Off, 1 = On (Default))");
+ * Maximum number of queues per device. Valid setting is between 1 and 4096. Default
+ * is 4096.
+ */
++int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
+ module_param(max_num_of_queues_per_device, int, 0444);
+ MODULE_PARM_DESC(max_num_of_queues_per_device,
+ "Maximum number of supported queues per device (1 = Minimum, 4096 = default)");
+@@ -605,6 +598,7 @@ MODULE_PARM_DESC(max_num_of_queues_per_device,
+ * Send sigterm to HSA process on unhandled exceptions. Default is not to send sigterm
+ * but just print errors on dmesg. Setting 1 enables sending sigterm.
+ */
++int send_sigterm;
+ module_param(send_sigterm, int, 0444);
+ MODULE_PARM_DESC(send_sigterm,
+ "Send sigterm to HSA process on unhandled exception (0 = disable, 1 = enable)");
+@@ -616,6 +610,7 @@ MODULE_PARM_DESC(send_sigterm,
+ * size, usually 256MB.
+ * Default value is 0, diabled.
+ */
++int debug_largebar;
+ module_param(debug_largebar, int, 0444);
+ MODULE_PARM_DESC(debug_largebar,
+ "Debug large-bar flag used to simulate large-bar capability on non-large bar machine (0 = disable, 1 = enable)");
+@@ -626,6 +621,7 @@ MODULE_PARM_DESC(debug_largebar,
+ * table to get information about AMD APUs. This option can serve as a workaround on
+ * systems with a broken CRAT table.
+ */
++int ignore_crat;
+ module_param(ignore_crat, int, 0444);
+ MODULE_PARM_DESC(ignore_crat,
+ "Ignore CRAT table during KFD initialization (0 = use CRAT (default), 1 = ignore CRAT)");
+@@ -636,6 +632,7 @@ MODULE_PARM_DESC(ignore_crat,
+ * Setting 1 disables retry.
+ * Retry is needed for recoverable page faults.
+ */
++int noretry;
+ module_param(noretry, int, 0644);
+ MODULE_PARM_DESC(noretry,
+ "Set sh_mem_config.retry_disable on Vega10 (0 = retry enabled (default), 1 = retry disabled)");
+@@ -645,8 +642,10 @@ MODULE_PARM_DESC(noretry,
+ * Halt if HWS hang is detected. Default value, 0, disables the halt on hang.
+ * Setting 1 enables halt on hang.
+ */
++int halt_if_hws_hang;
+ module_param(halt_if_hws_hang, int, 0644);
+ MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
++#endif
+
+ /**
+ * DOC: priv_cp_queues (int)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 21dfc72..b3f3e86 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -105,7 +105,6 @@
+ */
+ extern int max_num_of_queues_per_device;
+
+-#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
+ #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
+ (KFD_MAX_NUM_OF_PROCESSES * \
+ KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
+@@ -156,33 +155,6 @@ extern int priv_cp_queues;
+ */
+ extern int halt_if_hws_hang;
+
+-/**
+- * enum kfd_sched_policy
+- *
+- * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp)
+- * scheduling. In this scheduling mode we're using the firmware code to
+- * schedule the user mode queues and kernel queues such as HIQ and DIQ.
+- * the HIQ queue is used as a special queue that dispatches the configuration
+- * to the cp and the user mode queues list that are currently running.
+- * the DIQ queue is a debugging queue that dispatches debugging commands to the
+- * firmware.
+- * in this scheduling mode user mode queues over subscription feature is
+- * enabled.
+- *
+- * @KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: The same as above but the over
+- * subscription feature disabled.
+- *
+- * @KFD_SCHED_POLICY_NO_HWS: no H/W scheduling policy is a mode which directly
+- * set the command processor registers and sets the queues "manually". This
+- * mode is used *ONLY* for debugging proposes.
+- *
+- */
+-enum kfd_sched_policy {
+- KFD_SCHED_POLICY_HWS = 0,
+- KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION,
+- KFD_SCHED_POLICY_NO_HWS
+-};
+-
+ enum cache_policy {
+ cache_policy_coherent,
+ cache_policy_noncoherent
+diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+index 20b403a..9595d40 100755
+--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+@@ -103,6 +103,33 @@ enum kgd_engine_type {
+ KGD_ENGINE_MAX
+ };
+
++/**
++ * enum kfd_sched_policy
++ *
++ * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp)
++ * scheduling. In this scheduling mode we're using the firmware code to
++ * schedule the user mode queues and kernel queues such as HIQ and DIQ.
++ * the HIQ queue is used as a special queue that dispatches the configuration
++ * to the cp and the user mode queues list that are currently running.
++ * the DIQ queue is a debugging queue that dispatches debugging commands to the
++ * firmware.
++ * in this scheduling mode user mode queues over subscription feature is
++ * enabled.
++ *
++ * @KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: The same as above but the over
++ * subscription feature disabled.
++ *
++ * @KFD_SCHED_POLICY_NO_HWS: no H/W scheduling policy is a mode which directly
++ * set the command processor registers and sets the queues "manually". This
++ * mode is used *ONLY* for debugging proposes.
++ *
++ */
++enum kfd_sched_policy {
++ KFD_SCHED_POLICY_HWS = 0,
++ KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION,
++ KFD_SCHED_POLICY_NO_HWS
++};
++
+ struct kgd2kfd_shared_resources {
+ /* Bit n == 1 means VMID n is available for KFD. */
+ unsigned int compute_vmid_bitmap;
+@@ -156,6 +183,7 @@ struct tile_config {
+ uint32_t num_ranks;
+ };
+
++#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
+
+ /*
+ * Allocation flag domains
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5710-drm-amdkfd-Copy-in-KFD-related-files.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5710-drm-amdkfd-Copy-in-KFD-related-files.patch
new file mode 100644
index 00000000..20a682da
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5710-drm-amdkfd-Copy-in-KFD-related-files.patch
@@ -0,0 +1,28459 @@
+From 49741ab925ddf71f1640329fd24b6d271e14c544 Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Wed, 9 Jan 2019 21:31:05 +0530
+Subject: [PATCH 5710/5725] drm/amdkfd: Copy in KFD-related files
+
+This includes kernel configs, README.md, includes, and
+amdgpu/*kfd* files
+
+Change-Id: I3c75ebbe9c248ae8b8d73fe8236f43cfd6ba7c95
+Signed-off-by: Kent Russell <kent.russell@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ README.md | 78 +
+ arch/arm64/configs/rock-dbg_defconfig | 4479 ++++++++++
+ arch/powerpc/configs/rock-dbg_defconfig | 7821 +++++++++++++++++
+ arch/x86/configs/rock-dbg_defconfig | 4293 +++++++++
+ arch/x86/configs/rock-rel_defconfig | 9244 ++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 9 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 11 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 44 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 163 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 171 +-
+ drivers/gpu/drm/amd/amdkfd/Makefile | 4 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 231 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h | 14 +
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 40 +-
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 155 +-
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 5 +
+ drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c | 116 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_iommu.c | 13 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h | 6 +
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c | 6 +
+ drivers/gpu/drm/amd/amdkfd/kfd_module.c | 8 +
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 19 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h | 11 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 66 +
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 120 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 31 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_topology.h | 9 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_trace.c | 26 +
+ drivers/gpu/drm/amd/amdkfd/kfd_trace.h | 151 +
+ drivers/gpu/drm/amd/include/kgd_kfd_interface.h | 27 +
+ include/uapi/linux/kfd_ioctl.h | 95 +-
+ 31 files changed, 27300 insertions(+), 166 deletions(-)
+ create mode 100644 README.md
+ create mode 100644 arch/arm64/configs/rock-dbg_defconfig
+ create mode 100644 arch/powerpc/configs/rock-dbg_defconfig
+ create mode 100644 arch/x86/configs/rock-dbg_defconfig
+ create mode 100644 arch/x86/configs/rock-rel_defconfig
+ create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_trace.c
+ create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_trace.h
+
+diff --git a/README.md b/README.md
+new file mode 100644
+index 0000000..bb68b27
+--- /dev/null
++++ b/README.md
+@@ -0,0 +1,78 @@
++### AMD Radeon Open Compute Kernel driver
++
++#### What's New in this tree ?
++
++* dGPU support for Fiji
++* device and host memory support
++* multiple GPU support
++* host memory allocations are shared between GPUs
++
++#### Known Issues
++
++* On consumer grade products (Nano, Fury, Fury X), thermal control is not
++ working correctly. As a workaround, fans are hardcoded to 100% to prevent
++ overheating.
++
++#### Package Contents
++
++The kernel image is built from a source tree based on the 4.17 upstream
++release plus:
++
++* Features in the HSA kernel driver ("amdkfd") that are not yet
++ upstreamed to the mainline Linux kernel.
++* Changes in the AMDGPU kernel driver ("amdgpu") that may not yet be
++ upstreamed to the mainline Linux kernel.
++
++##### Note regarding libhsakmt compatibility
++Please note that the libhsakmt library in this repository is NOT compatible
++with amdkfd that is distributed as part of the mainline Linux kernel
++from 3.19 and onward.
++
++#### Target Platform
++
++This release is intended for use with any hardware configuration that
++contains only Carrizo APU, or configurations which contain
++an Intel Haswell or newer CPU plus Fiji dGPUs.
++
++APU motherboards must support run latest BIOS version and have the IOMMU
++enabled in the BIOS.
++
++The following is a reference hardware configuration that was used for
++testing purposes:
++
++
++
++dGPU Config:
++* CPU: Intel i7-4790
++* Motherboard: ASUS Z97-PRO
++* Memory: G.Skill Ripjaws 4 32GB RAM (4 x 8GB)
++* OS: Ubuntu 14.04.03 64-bit edition
++* dGPU: ASUS R9 Nano
++
++#### Installing and configuring the kernel
++
++> **NOTE:** Binary packages are no longer part of this git repository. Please
++> refer to the [ROCm project](https://github.com/RadeonOpenCompute/ROCm/wiki)
++> for instructions on configuring the AMD apt/yum package server
++
++#### Config files for building the kernel
++
++The configuration used to build our kernel can be re-created by running:
++`make rock-rel_defconfig`
++
++This config is based on the Ubuntu 14.04 build patches by Canonical.
++
++##### Obtaining kernel and libhsakmt source code
++
++* Source code used to build the kernel is in this repo. Source code to
++ build libhsakmt is in the
++ [ROCT-Thunk-Interface](https://github.com/RadeonOpenCompute/ROCT-Thunk-Interface)
++ repository
++
++### LICENSE
++
++The following lists the different licenses that apply to the different
++components in this repository:
++
++* the Linux kernel images are covered by the modified GPL license in COPYING
++* the firmware image is covered by the license in LICENSE.ucode
+diff --git a/arch/arm64/configs/rock-dbg_defconfig b/arch/arm64/configs/rock-dbg_defconfig
+new file mode 100644
+index 0000000..d86c7cb
+--- /dev/null
++++ b/arch/arm64/configs/rock-dbg_defconfig
+@@ -0,0 +1,4479 @@
++#
++# Automatically generated file; DO NOT EDIT.
++# Linux/arm64 4.6.0 Kernel Configuration
++#
++CONFIG_ARM64=y
++CONFIG_64BIT=y
++CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
++CONFIG_MMU=y
++CONFIG_ARCH_MMAP_RND_BITS_MIN=18
++CONFIG_ARCH_MMAP_RND_BITS_MAX=33
++CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11
++CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16
++CONFIG_STACKTRACE_SUPPORT=y
++CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
++CONFIG_LOCKDEP_SUPPORT=y
++CONFIG_TRACE_IRQFLAGS_SUPPORT=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_GENERIC_BUG=y
++CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CSUM=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_ZONE_DMA=y
++CONFIG_HAVE_GENERIC_RCU_GUP=y
++CONFIG_ARCH_DMA_ADDR_T_64BIT=y
++CONFIG_NEED_DMA_MAP_STATE=y
++CONFIG_NEED_SG_DMA_LENGTH=y
++CONFIG_SMP=y
++CONFIG_SWIOTLB=y
++CONFIG_IOMMU_HELPER=y
++CONFIG_KERNEL_MODE_NEON=y
++CONFIG_FIX_EARLYCON_MEM=y
++CONFIG_PGTABLE_LEVELS=4
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++CONFIG_IRQ_WORK=y
++CONFIG_BUILDTIME_EXTABLE_SORT=y
++
++#
++# General setup
++#
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_CROSS_COMPILE=""
++# CONFIG_COMPILE_TEST is not set
++CONFIG_LOCALVERSION="-kfd"
++# CONFIG_LOCALVERSION_AUTO is not set
++CONFIG_DEFAULT_HOSTNAME="(none)"
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_POSIX_MQUEUE_SYSCTL=y
++CONFIG_CROSS_MEMORY_ATTACH=y
++CONFIG_FHANDLE=y
++CONFIG_USELIB=y
++CONFIG_AUDIT=y
++CONFIG_HAVE_ARCH_AUDITSYSCALL=y
++CONFIG_AUDITSYSCALL=y
++CONFIG_AUDIT_WATCH=y
++CONFIG_AUDIT_TREE=y
++
++#
++# IRQ subsystem
++#
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_GENERIC_IRQ_SHOW=y
++CONFIG_GENERIC_IRQ_SHOW_LEVEL=y
++CONFIG_GENERIC_IRQ_MIGRATION=y
++CONFIG_HARDIRQS_SW_RESEND=y
++CONFIG_GENERIC_IRQ_CHIP=y
++CONFIG_IRQ_DOMAIN=y
++CONFIG_IRQ_DOMAIN_HIERARCHY=y
++CONFIG_GENERIC_MSI_IRQ=y
++CONFIG_GENERIC_MSI_IRQ_DOMAIN=y
++CONFIG_HANDLE_DOMAIN_IRQ=y
++# CONFIG_IRQ_DOMAIN_DEBUG is not set
++CONFIG_IRQ_FORCED_THREADING=y
++CONFIG_SPARSE_IRQ=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_ARCH_HAS_TICK_BROADCAST=y
++CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
++
++#
++# Timers subsystem
++#
++CONFIG_TICK_ONESHOT=y
++CONFIG_NO_HZ_COMMON=y
++# CONFIG_HZ_PERIODIC is not set
++CONFIG_NO_HZ_IDLE=y
++# CONFIG_NO_HZ_FULL is not set
++CONFIG_NO_HZ=y
++CONFIG_HIGH_RES_TIMERS=y
++
++#
++# CPU/Task time and stats accounting
++#
++CONFIG_TICK_CPU_ACCOUNTING=y
++# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set
++# CONFIG_IRQ_TIME_ACCOUNTING is not set
++CONFIG_BSD_PROCESS_ACCT=y
++CONFIG_BSD_PROCESS_ACCT_V3=y
++CONFIG_TASKSTATS=y
++CONFIG_TASK_DELAY_ACCT=y
++CONFIG_TASK_XACCT=y
++CONFIG_TASK_IO_ACCOUNTING=y
++
++#
++# RCU Subsystem
++#
++CONFIG_TREE_RCU=y
++# CONFIG_RCU_EXPERT is not set
++CONFIG_SRCU=y
++# CONFIG_TASKS_RCU is not set
++CONFIG_RCU_STALL_COMMON=y
++# CONFIG_TREE_RCU_TRACE is not set
++# CONFIG_RCU_EXPEDITE_BOOT is not set
++CONFIG_BUILD_BIN2C=y
++CONFIG_IKCONFIG=m
++# CONFIG_IKCONFIG_PROC is not set
++CONFIG_LOG_BUF_SHIFT=14
++CONFIG_LOG_CPU_MAX_BUF_SHIFT=12
++CONFIG_GENERIC_SCHED_CLOCK=y
++CONFIG_CGROUPS=y
++CONFIG_PAGE_COUNTER=y
++CONFIG_MEMCG=y
++CONFIG_MEMCG_SWAP=y
++# CONFIG_MEMCG_SWAP_ENABLED is not set
++CONFIG_BLK_CGROUP=y
++# CONFIG_DEBUG_BLK_CGROUP is not set
++CONFIG_CGROUP_WRITEBACK=y
++CONFIG_CGROUP_SCHED=y
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_CFS_BANDWIDTH=y
++# CONFIG_RT_GROUP_SCHED is not set
++CONFIG_CGROUP_PIDS=y
++CONFIG_CGROUP_FREEZER=y
++CONFIG_CGROUP_HUGETLB=y
++CONFIG_CPUSETS=y
++CONFIG_PROC_PID_CPUSET=y
++CONFIG_CGROUP_DEVICE=y
++CONFIG_CGROUP_CPUACCT=y
++CONFIG_CGROUP_PERF=y
++# CONFIG_CGROUP_DEBUG is not set
++CONFIG_CHECKPOINT_RESTORE=y
++CONFIG_NAMESPACES=y
++CONFIG_UTS_NS=y
++CONFIG_IPC_NS=y
++CONFIG_USER_NS=y
++CONFIG_PID_NS=y
++CONFIG_NET_NS=y
++CONFIG_SCHED_AUTOGROUP=y
++# CONFIG_SYSFS_DEPRECATED is not set
++CONFIG_RELAY=y
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++CONFIG_RD_GZIP=y
++CONFIG_RD_BZIP2=y
++CONFIG_RD_LZMA=y
++CONFIG_RD_XZ=y
++CONFIG_RD_LZO=y
++CONFIG_RD_LZ4=y
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_ANON_INODES=y
++CONFIG_HAVE_UID16=y
++CONFIG_SYSCTL_EXCEPTION_TRACE=y
++CONFIG_BPF=y
++CONFIG_EXPERT=y
++CONFIG_UID16=y
++CONFIG_MULTIUSER=y
++CONFIG_SGETMASK_SYSCALL=y
++CONFIG_SYSFS_SYSCALL=y
++CONFIG_SYSCTL_SYSCALL=y
++CONFIG_KALLSYMS=y
++CONFIG_KALLSYMS_ALL=y
++# CONFIG_KALLSYMS_ABSOLUTE_PERCPU is not set
++CONFIG_KALLSYMS_BASE_RELATIVE=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SIGNALFD=y
++CONFIG_TIMERFD=y
++CONFIG_EVENTFD=y
++CONFIG_BPF_SYSCALL=y
++CONFIG_SHMEM=y
++CONFIG_AIO=y
++CONFIG_ADVISE_SYSCALLS=y
++CONFIG_USERFAULTFD=y
++CONFIG_PCI_QUIRKS=y
++CONFIG_MEMBARRIER=y
++# CONFIG_EMBEDDED is not set
++CONFIG_HAVE_PERF_EVENTS=y
++CONFIG_PERF_USE_VMALLOC=y
++
++#
++# Kernel Performance Events And Counters
++#
++CONFIG_PERF_EVENTS=y
++# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLUB_DEBUG=y
++# CONFIG_COMPAT_BRK is not set
++# CONFIG_SLAB is not set
++CONFIG_SLUB=y
++# CONFIG_SLOB is not set
++CONFIG_SLUB_CPU_PARTIAL=y
++CONFIG_SYSTEM_DATA_VERIFICATION=y
++CONFIG_PROFILING=y
++CONFIG_TRACEPOINTS=y
++CONFIG_JUMP_LABEL=y
++# CONFIG_STATIC_KEYS_SELFTEST is not set
++# CONFIG_UPROBES is not set
++# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
++CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
++CONFIG_HAVE_ARCH_TRACEHOOK=y
++CONFIG_HAVE_DMA_CONTIGUOUS=y
++CONFIG_GENERIC_SMP_IDLE_THREAD=y
++CONFIG_GENERIC_IDLE_POLL_SETUP=y
++CONFIG_HAVE_CLK=y
++CONFIG_HAVE_DMA_API_DEBUG=y
++CONFIG_HAVE_HW_BREAKPOINT=y
++CONFIG_HAVE_PERF_REGS=y
++CONFIG_HAVE_PERF_USER_STACK_DUMP=y
++CONFIG_HAVE_ARCH_JUMP_LABEL=y
++CONFIG_HAVE_RCU_TABLE_FREE=y
++CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
++CONFIG_HAVE_CMPXCHG_LOCAL=y
++CONFIG_HAVE_CMPXCHG_DOUBLE=y
++CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y
++CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
++CONFIG_SECCOMP_FILTER=y
++CONFIG_HAVE_CC_STACKPROTECTOR=y
++CONFIG_CC_STACKPROTECTOR=y
++# CONFIG_CC_STACKPROTECTOR_NONE is not set
++# CONFIG_CC_STACKPROTECTOR_REGULAR is not set
++CONFIG_CC_STACKPROTECTOR_STRONG=y
++CONFIG_HAVE_CONTEXT_TRACKING=y
++CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
++CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
++CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
++CONFIG_HAVE_ARCH_HUGE_VMAP=y
++CONFIG_MODULES_USE_ELF_RELA=y
++CONFIG_ARCH_HAS_ELF_RANDOMIZE=y
++CONFIG_HAVE_ARCH_MMAP_RND_BITS=y
++CONFIG_ARCH_MMAP_RND_BITS=18
++CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y
++CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11
++CONFIG_CLONE_BACKWARDS=y
++CONFIG_OLD_SIGSUSPEND3=y
++CONFIG_COMPAT_OLD_SIGACTION=y
++
++#
++# GCOV-based kernel profiling
++#
++# CONFIG_GCOV_KERNEL is not set
++CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y
++CONFIG_HAVE_GENERIC_DMA_COHERENT=y
++CONFIG_SLABINFO=y
++CONFIG_RT_MUTEXES=y
++CONFIG_BASE_SMALL=0
++CONFIG_MODULES=y
++# CONFIG_MODULE_FORCE_LOAD is not set
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++CONFIG_MODVERSIONS=y
++CONFIG_MODULE_SRCVERSION_ALL=y
++CONFIG_MODULE_SIG=y
++# CONFIG_MODULE_SIG_FORCE is not set
++CONFIG_MODULE_SIG_ALL=y
++# CONFIG_MODULE_SIG_SHA1 is not set
++# CONFIG_MODULE_SIG_SHA224 is not set
++# CONFIG_MODULE_SIG_SHA256 is not set
++# CONFIG_MODULE_SIG_SHA384 is not set
++CONFIG_MODULE_SIG_SHA512=y
++CONFIG_MODULE_SIG_HASH="sha512"
++# CONFIG_MODULE_COMPRESS is not set
++CONFIG_MODULES_TREE_LOOKUP=y
++CONFIG_BLOCK=y
++CONFIG_BLK_DEV_BSG=y
++CONFIG_BLK_DEV_BSGLIB=y
++CONFIG_BLK_DEV_INTEGRITY=y
++CONFIG_BLK_DEV_THROTTLING=y
++CONFIG_BLK_CMDLINE_PARSER=y
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++CONFIG_AIX_PARTITION=y
++CONFIG_OSF_PARTITION=y
++CONFIG_AMIGA_PARTITION=y
++CONFIG_ATARI_PARTITION=y
++CONFIG_MAC_PARTITION=y
++CONFIG_MSDOS_PARTITION=y
++CONFIG_BSD_DISKLABEL=y
++CONFIG_MINIX_SUBPARTITION=y
++CONFIG_SOLARIS_X86_PARTITION=y
++CONFIG_UNIXWARE_DISKLABEL=y
++CONFIG_LDM_PARTITION=y
++# CONFIG_LDM_DEBUG is not set
++CONFIG_SGI_PARTITION=y
++CONFIG_ULTRIX_PARTITION=y
++CONFIG_SUN_PARTITION=y
++CONFIG_KARMA_PARTITION=y
++CONFIG_EFI_PARTITION=y
++CONFIG_SYSV68_PARTITION=y
++CONFIG_CMDLINE_PARTITION=y
++CONFIG_BLOCK_COMPAT=y
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_CFQ_GROUP_IOSCHED=y
++CONFIG_DEFAULT_DEADLINE=y
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="deadline"
++CONFIG_PREEMPT_NOTIFIERS=y
++CONFIG_ASN1=y
++CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
++CONFIG_INLINE_READ_UNLOCK=y
++CONFIG_INLINE_READ_UNLOCK_IRQ=y
++CONFIG_INLINE_WRITE_UNLOCK=y
++CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
++CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
++CONFIG_MUTEX_SPIN_ON_OWNER=y
++CONFIG_RWSEM_SPIN_ON_OWNER=y
++CONFIG_LOCK_SPIN_ON_OWNER=y
++CONFIG_FREEZER=y
++
++#
++# Platform selection
++#
++# CONFIG_ARCH_SUNXI is not set
++# CONFIG_ARCH_ALPINE is not set
++CONFIG_ARCH_BCM_IPROC=y
++CONFIG_ARCH_BERLIN=y
++# CONFIG_ARCH_EXYNOS is not set
++CONFIG_ARCH_LAYERSCAPE=y
++CONFIG_ARCH_HISI=y
++CONFIG_ARCH_MEDIATEK=y
++# CONFIG_ARCH_MESON is not set
++# CONFIG_ARCH_MVEBU is not set
++CONFIG_ARCH_QCOM=y
++# CONFIG_ARCH_ROCKCHIP is not set
++# CONFIG_ARCH_SEATTLE is not set
++# CONFIG_ARCH_RENESAS is not set
++CONFIG_ARCH_STRATIX10=y
++# CONFIG_ARCH_TEGRA is not set
++CONFIG_ARCH_SPRD=y
++CONFIG_ARCH_THUNDER=y
++# CONFIG_ARCH_UNIPHIER is not set
++CONFIG_ARCH_VEXPRESS=y
++# CONFIG_ARCH_VULCAN is not set
++CONFIG_ARCH_XGENE=y
++CONFIG_ARCH_ZYNQMP=y
++
++#
++# Bus support
++#
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++CONFIG_PCI_DOMAINS_GENERIC=y
++CONFIG_PCI_SYSCALL=y
++CONFIG_PCIEPORTBUS=y
++CONFIG_HOTPLUG_PCI_PCIE=y
++CONFIG_PCIEAER=y
++# CONFIG_PCIE_ECRC is not set
++# CONFIG_PCIEAER_INJECT is not set
++CONFIG_PCIEASPM=y
++CONFIG_PCIEASPM_DEBUG=y
++CONFIG_PCIEASPM_DEFAULT=y
++# CONFIG_PCIEASPM_POWERSAVE is not set
++# CONFIG_PCIEASPM_PERFORMANCE is not set
++CONFIG_PCIE_PME=y
++CONFIG_PCI_BUS_ADDR_T_64BIT=y
++CONFIG_PCI_MSI=y
++CONFIG_PCI_MSI_IRQ_DOMAIN=y
++# CONFIG_PCI_DEBUG is not set
++CONFIG_PCI_REALLOC_ENABLE_AUTO=y
++# CONFIG_PCI_STUB is not set
++CONFIG_PCI_ATS=y
++CONFIG_PCI_IOV=y
++CONFIG_PCI_PRI=y
++CONFIG_PCI_PASID=y
++CONFIG_PCI_LABEL=y
++CONFIG_HOTPLUG_PCI=y
++CONFIG_HOTPLUG_PCI_ACPI=y
++# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set
++CONFIG_HOTPLUG_PCI_CPCI=y
++CONFIG_HOTPLUG_PCI_SHPC=m
++
++#
++# PCI host controller drivers
++#
++# CONFIG_PCIE_XILINX_NWL is not set
++# CONFIG_PCIE_DW_PLAT is not set
++CONFIG_PCIE_DW=y
++CONFIG_PCI_HOST_COMMON=y
++CONFIG_PCI_HOST_GENERIC=y
++CONFIG_PCI_XGENE=y
++CONFIG_PCI_XGENE_MSI=y
++# CONFIG_PCI_LAYERSCAPE is not set
++# CONFIG_PCIE_IPROC_PLATFORM is not set
++CONFIG_PCI_HISI=y
++# CONFIG_PCIE_QCOM is not set
++CONFIG_PCI_HOST_THUNDER_PEM=y
++CONFIG_PCI_HOST_THUNDER_ECAM=y
++
++#
++# Kernel Features
++#
++
++#
++# ARM errata workarounds via the alternatives framework
++#
++CONFIG_ARM64_ERRATUM_826319=y
++CONFIG_ARM64_ERRATUM_827319=y
++CONFIG_ARM64_ERRATUM_824069=y
++CONFIG_ARM64_ERRATUM_819472=y
++CONFIG_ARM64_ERRATUM_832075=y
++CONFIG_ARM64_ERRATUM_834220=y
++CONFIG_ARM64_ERRATUM_845719=y
++CONFIG_ARM64_ERRATUM_843419=y
++CONFIG_CAVIUM_ERRATUM_22375=y
++CONFIG_CAVIUM_ERRATUM_23154=y
++CONFIG_CAVIUM_ERRATUM_27456=y
++CONFIG_ARM64_4K_PAGES=y
++# CONFIG_ARM64_16K_PAGES is not set
++# CONFIG_ARM64_64K_PAGES is not set
++# CONFIG_ARM64_VA_BITS_39 is not set
++CONFIG_ARM64_VA_BITS_48=y
++CONFIG_ARM64_VA_BITS=48
++# CONFIG_CPU_BIG_ENDIAN is not set
++CONFIG_SCHED_MC=y
++CONFIG_SCHED_SMT=y
++CONFIG_NR_CPUS=128
++CONFIG_HOTPLUG_CPU=y
++# CONFIG_PREEMPT_NONE is not set
++CONFIG_PREEMPT_VOLUNTARY=y
++# CONFIG_PREEMPT is not set
++# CONFIG_HZ_100 is not set
++CONFIG_HZ_250=y
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=250
++CONFIG_SCHED_HRTICK=y
++CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
++CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y
++CONFIG_ARCH_SPARSEMEM_ENABLE=y
++CONFIG_ARCH_SPARSEMEM_DEFAULT=y
++CONFIG_ARCH_SELECT_MEMORY_MODEL=y
++CONFIG_HAVE_ARCH_PFN_VALID=y
++CONFIG_HW_PERF_EVENTS=y
++CONFIG_SYS_SUPPORTS_HUGETLBFS=y
++CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y
++CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_SPARSEMEM_MANUAL=y
++CONFIG_SPARSEMEM=y
++CONFIG_HAVE_MEMORY_PRESENT=y
++CONFIG_SPARSEMEM_EXTREME=y
++CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
++CONFIG_SPARSEMEM_VMEMMAP=y
++CONFIG_HAVE_MEMBLOCK=y
++CONFIG_NO_BOOTMEM=y
++CONFIG_MEMORY_ISOLATION=y
++# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_MEMORY_BALLOON=y
++CONFIG_BALLOON_COMPACTION=y
++CONFIG_COMPACTION=y
++CONFIG_MIGRATION=y
++CONFIG_PHYS_ADDR_T_64BIT=y
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_MMU_NOTIFIER=y
++CONFIG_KSM=y
++CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
++CONFIG_TRANSPARENT_HUGEPAGE=y
++CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y
++# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set
++CONFIG_CLEANCACHE=y
++CONFIG_FRONTSWAP=y
++CONFIG_CMA=y
++# CONFIG_CMA_DEBUG is not set
++# CONFIG_CMA_DEBUGFS is not set
++CONFIG_CMA_AREAS=7
++CONFIG_ZSWAP=y
++CONFIG_ZPOOL=y
++CONFIG_ZBUD=y
++CONFIG_ZSMALLOC=y
++CONFIG_PGTABLE_MAPPING=y
++# CONFIG_ZSMALLOC_STAT is not set
++CONFIG_GENERIC_EARLY_IOREMAP=y
++CONFIG_IDLE_PAGE_TRACKING=y
++CONFIG_SECCOMP=y
++CONFIG_PARAVIRT=y
++# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set
++CONFIG_XEN_DOM0=y
++CONFIG_XEN=y
++CONFIG_FORCE_MAX_ZONEORDER=11
++CONFIG_ARMV8_DEPRECATED=y
++CONFIG_SWP_EMULATION=y
++CONFIG_CP15_BARRIER_EMULATION=y
++CONFIG_SETEND_EMULATION=y
++
++#
++# ARMv8.1 architectural features
++#
++CONFIG_ARM64_HW_AFDBM=y
++CONFIG_ARM64_PAN=y
++# CONFIG_ARM64_LSE_ATOMICS is not set
++CONFIG_ARM64_VHE=y
++
++#
++# ARMv8.2 architectural features
++#
++CONFIG_ARM64_UAO=y
++CONFIG_ARM64_MODULE_CMODEL_LARGE=y
++# CONFIG_RANDOMIZE_BASE is not set
++
++#
++# Boot options
++#
++CONFIG_ARM64_ACPI_PARKING_PROTOCOL=y
++CONFIG_CMDLINE="console=ttyAMA0"
++# CONFIG_CMDLINE_FORCE is not set
++CONFIG_EFI_STUB=y
++CONFIG_EFI=y
++CONFIG_DMI=y
++
++#
++# Userspace binary formats
++#
++CONFIG_BINFMT_ELF=y
++CONFIG_COMPAT_BINFMT_ELF=y
++CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
++CONFIG_BINFMT_SCRIPT=y
++# CONFIG_HAVE_AOUT is not set
++# CONFIG_BINFMT_MISC is not set
++CONFIG_COREDUMP=y
++CONFIG_COMPAT=y
++CONFIG_SYSVIPC_COMPAT=y
++
++#
++# Power management options
++#
++CONFIG_SUSPEND=y
++CONFIG_SUSPEND_FREEZER=y
++# CONFIG_SUSPEND_SKIP_SYNC is not set
++CONFIG_PM_SLEEP=y
++CONFIG_PM_SLEEP_SMP=y
++# CONFIG_PM_AUTOSLEEP is not set
++CONFIG_PM_WAKELOCKS=y
++CONFIG_PM_WAKELOCKS_LIMIT=100
++CONFIG_PM_WAKELOCKS_GC=y
++CONFIG_PM=y
++CONFIG_PM_DEBUG=y
++CONFIG_PM_ADVANCED_DEBUG=y
++# CONFIG_PM_TEST_SUSPEND is not set
++CONFIG_PM_SLEEP_DEBUG=y
++# CONFIG_DPM_WATCHDOG is not set
++CONFIG_PM_OPP=y
++CONFIG_PM_CLK=y
++CONFIG_PM_GENERIC_DOMAINS=y
++CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
++CONFIG_PM_GENERIC_DOMAINS_SLEEP=y
++CONFIG_PM_GENERIC_DOMAINS_OF=y
++CONFIG_CPU_PM=y
++CONFIG_ARCH_SUSPEND_POSSIBLE=y
++
++#
++# CPU Power Management
++#
++
++#
++# CPU Idle
++#
++CONFIG_CPU_IDLE=y
++CONFIG_CPU_IDLE_GOV_LADDER=y
++CONFIG_CPU_IDLE_GOV_MENU=y
++CONFIG_DT_IDLE_STATES=y
++
++#
++# ARM CPU Idle Drivers
++#
++CONFIG_ARM_CPUIDLE=y
++# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set
++
++#
++# CPU Frequency scaling
++#
++CONFIG_CPU_FREQ=y
++CONFIG_CPU_FREQ_GOV_COMMON=y
++CONFIG_CPU_FREQ_STAT=y
++CONFIG_CPU_FREQ_STAT_DETAILS=y
++CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
++# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
++# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
++# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
++# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
++CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
++CONFIG_CPU_FREQ_GOV_POWERSAVE=y
++CONFIG_CPU_FREQ_GOV_USERSPACE=y
++CONFIG_CPU_FREQ_GOV_ONDEMAND=y
++CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
++
++#
++# CPU frequency scaling drivers
++#
++CONFIG_CPUFREQ_DT=y
++# CONFIG_ARM_BIG_LITTLE_CPUFREQ is not set
++# CONFIG_ARM_HISI_ACPU_CPUFREQ is not set
++# CONFIG_ARM_KIRKWOOD_CPUFREQ is not set
++CONFIG_ARM_MT8173_CPUFREQ=y
++# CONFIG_ACPI_CPPC_CPUFREQ is not set
++CONFIG_NET=y
++CONFIG_NET_INGRESS=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_DIAG is not set
++CONFIG_UNIX=y
++# CONFIG_UNIX_DIAG is not set
++# CONFIG_XFRM_USER is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++CONFIG_IP_ADVANCED_ROUTER=y
++CONFIG_IP_FIB_TRIE_STATS=y
++CONFIG_IP_MULTIPLE_TABLES=y
++CONFIG_IP_ROUTE_MULTIPATH=y
++CONFIG_IP_ROUTE_VERBOSE=y
++# CONFIG_IP_PNP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE_DEMUX is not set
++# CONFIG_NET_IP_TUNNEL is not set
++CONFIG_IP_MROUTE=y
++# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set
++CONFIG_IP_PIMSM_V1=y
++CONFIG_IP_PIMSM_V2=y
++CONFIG_SYN_COOKIES=y
++# CONFIG_NET_UDP_TUNNEL is not set
++# CONFIG_NET_FOU is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
++# CONFIG_INET_XFRM_MODE_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_BEET is not set
++# CONFIG_INET_DIAG is not set
++CONFIG_TCP_CONG_ADVANCED=y
++# CONFIG_TCP_CONG_BIC is not set
++CONFIG_TCP_CONG_CUBIC=y
++# CONFIG_TCP_CONG_WESTWOOD is not set
++# CONFIG_TCP_CONG_HTCP is not set
++# CONFIG_TCP_CONG_HSTCP is not set
++# CONFIG_TCP_CONG_HYBLA is not set
++# CONFIG_TCP_CONG_VEGAS is not set
++# CONFIG_TCP_CONG_SCALABLE is not set
++# CONFIG_TCP_CONG_LP is not set
++# CONFIG_TCP_CONG_VENO is not set
++# CONFIG_TCP_CONG_YEAH is not set
++# CONFIG_TCP_CONG_ILLINOIS is not set
++# CONFIG_TCP_CONG_DCTCP is not set
++# CONFIG_TCP_CONG_CDG is not set
++CONFIG_DEFAULT_CUBIC=y
++# CONFIG_DEFAULT_RENO is not set
++CONFIG_DEFAULT_TCP_CONG="cubic"
++CONFIG_TCP_MD5SIG=y
++CONFIG_IPV6=y
++CONFIG_IPV6_ROUTER_PREF=y
++CONFIG_IPV6_ROUTE_INFO=y
++# CONFIG_IPV6_OPTIMISTIC_DAD is not set
++# CONFIG_INET6_AH is not set
++# CONFIG_INET6_ESP is not set
++# CONFIG_INET6_IPCOMP is not set
++# CONFIG_IPV6_MIP6 is not set
++# CONFIG_IPV6_ILA is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
++# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
++# CONFIG_INET6_XFRM_MODE_BEET is not set
++# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
++# CONFIG_IPV6_SIT is not set
++# CONFIG_IPV6_TUNNEL is not set
++# CONFIG_IPV6_GRE is not set
++CONFIG_IPV6_MULTIPLE_TABLES=y
++CONFIG_IPV6_SUBTREES=y
++CONFIG_IPV6_MROUTE=y
++CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
++CONFIG_IPV6_PIMSM_V2=y
++CONFIG_NETLABEL=y
++CONFIG_NETWORK_SECMARK=y
++CONFIG_NET_PTP_CLASSIFY=y
++# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_NETFILTER_ADVANCED=y
++
++#
++# Core Netfilter Configuration
++#
++CONFIG_NETFILTER_INGRESS=y
++# CONFIG_NETFILTER_NETLINK_ACCT is not set
++# CONFIG_NETFILTER_NETLINK_QUEUE is not set
++# CONFIG_NETFILTER_NETLINK_LOG is not set
++# CONFIG_NF_CONNTRACK is not set
++# CONFIG_NF_TABLES is not set
++# CONFIG_NETFILTER_XTABLES is not set
++# CONFIG_IP_SET is not set
++# CONFIG_IP_VS is not set
++
++#
++# IP: Netfilter Configuration
++#
++# CONFIG_NF_DEFRAG_IPV4 is not set
++# CONFIG_NF_DUP_IPV4 is not set
++# CONFIG_NF_LOG_ARP is not set
++# CONFIG_NF_LOG_IPV4 is not set
++# CONFIG_NF_REJECT_IPV4 is not set
++# CONFIG_IP_NF_IPTABLES is not set
++# CONFIG_IP_NF_ARPTABLES is not set
++
++#
++# IPv6: Netfilter Configuration
++#
++# CONFIG_NF_DEFRAG_IPV6 is not set
++# CONFIG_NF_DUP_IPV6 is not set
++# CONFIG_NF_REJECT_IPV6 is not set
++# CONFIG_NF_LOG_IPV6 is not set
++# CONFIG_IP6_NF_IPTABLES is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_RDS is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_L2TP is not set
++# CONFIG_BRIDGE is not set
++CONFIG_HAVE_NET_DSA=y
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_PHONET is not set
++# CONFIG_6LOWPAN is not set
++# CONFIG_IEEE802154 is not set
++CONFIG_NET_SCHED=y
++
++#
++# Queueing/Scheduling
++#
++# CONFIG_NET_SCH_CBQ is not set
++# CONFIG_NET_SCH_HTB is not set
++# CONFIG_NET_SCH_HFSC is not set
++# CONFIG_NET_SCH_PRIO is not set
++# CONFIG_NET_SCH_MULTIQ is not set
++# CONFIG_NET_SCH_RED is not set
++# CONFIG_NET_SCH_SFB is not set
++# CONFIG_NET_SCH_SFQ is not set
++# CONFIG_NET_SCH_TEQL is not set
++# CONFIG_NET_SCH_TBF is not set
++# CONFIG_NET_SCH_GRED is not set
++# CONFIG_NET_SCH_DSMARK is not set
++# CONFIG_NET_SCH_NETEM is not set
++# CONFIG_NET_SCH_DRR is not set
++# CONFIG_NET_SCH_MQPRIO is not set
++# CONFIG_NET_SCH_CHOKE is not set
++# CONFIG_NET_SCH_QFQ is not set
++# CONFIG_NET_SCH_CODEL is not set
++# CONFIG_NET_SCH_FQ_CODEL is not set
++# CONFIG_NET_SCH_FQ is not set
++# CONFIG_NET_SCH_HHF is not set
++# CONFIG_NET_SCH_PIE is not set
++# CONFIG_NET_SCH_INGRESS is not set
++# CONFIG_NET_SCH_PLUG is not set
++
++#
++# Classification
++#
++CONFIG_NET_CLS=y
++# CONFIG_NET_CLS_BASIC is not set
++# CONFIG_NET_CLS_TCINDEX is not set
++# CONFIG_NET_CLS_ROUTE4 is not set
++# CONFIG_NET_CLS_FW is not set
++# CONFIG_NET_CLS_U32 is not set
++# CONFIG_NET_CLS_RSVP is not set
++# CONFIG_NET_CLS_RSVP6 is not set
++# CONFIG_NET_CLS_FLOW is not set
++# CONFIG_NET_CLS_CGROUP is not set
++# CONFIG_NET_CLS_BPF is not set
++# CONFIG_NET_CLS_FLOWER is not set
++CONFIG_NET_EMATCH=y
++CONFIG_NET_EMATCH_STACK=32
++# CONFIG_NET_EMATCH_CMP is not set
++# CONFIG_NET_EMATCH_NBYTE is not set
++# CONFIG_NET_EMATCH_U32 is not set
++# CONFIG_NET_EMATCH_META is not set
++# CONFIG_NET_EMATCH_TEXT is not set
++CONFIG_NET_CLS_ACT=y
++# CONFIG_NET_ACT_POLICE is not set
++# CONFIG_NET_ACT_GACT is not set
++# CONFIG_NET_ACT_MIRRED is not set
++# CONFIG_NET_ACT_NAT is not set
++# CONFIG_NET_ACT_PEDIT is not set
++# CONFIG_NET_ACT_SIMP is not set
++# CONFIG_NET_ACT_SKBEDIT is not set
++# CONFIG_NET_ACT_CSUM is not set
++# CONFIG_NET_ACT_VLAN is not set
++# CONFIG_NET_ACT_BPF is not set
++# CONFIG_NET_ACT_IFE is not set
++CONFIG_NET_SCH_FIFO=y
++CONFIG_DCB=y
++CONFIG_DNS_RESOLVER=y
++# CONFIG_BATMAN_ADV is not set
++# CONFIG_OPENVSWITCH is not set
++# CONFIG_VSOCKETS is not set
++# CONFIG_NETLINK_DIAG is not set
++CONFIG_MPLS=y
++# CONFIG_NET_MPLS_GSO is not set
++# CONFIG_MPLS_ROUTING is not set
++# CONFIG_HSR is not set
++# CONFIG_NET_SWITCHDEV is not set
++CONFIG_NET_L3_MASTER_DEV=y
++CONFIG_RPS=y
++CONFIG_RFS_ACCEL=y
++CONFIG_XPS=y
++CONFIG_SOCK_CGROUP_DATA=y
++CONFIG_CGROUP_NET_PRIO=y
++CONFIG_CGROUP_NET_CLASSID=y
++CONFIG_NET_RX_BUSY_POLL=y
++CONFIG_BQL=y
++CONFIG_BPF_JIT=y
++CONFIG_NET_FLOW_LIMIT=y
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_NET_DROP_MONITOR is not set
++CONFIG_HAMRADIO=y
++
++#
++# Packet Radio protocols
++#
++# CONFIG_AX25 is not set
++# CONFIG_CAN is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++# CONFIG_AF_KCM is not set
++CONFIG_FIB_RULES=y
++CONFIG_WIRELESS=y
++# CONFIG_CFG80211 is not set
++# CONFIG_LIB80211 is not set
++
++#
++# CFG80211 needs to be enabled for MAC80211
++#
++CONFIG_MAC80211_STA_HASH_MAX_SIZE=0
++# CONFIG_WIMAX is not set
++CONFIG_RFKILL=y
++CONFIG_RFKILL_LEDS=y
++CONFIG_RFKILL_INPUT=y
++# CONFIG_RFKILL_REGULATOR is not set
++# CONFIG_RFKILL_GPIO is not set
++# CONFIG_NET_9P is not set
++# CONFIG_CAIF is not set
++# CONFIG_CEPH_LIB is not set
++# CONFIG_NFC is not set
++CONFIG_LWTUNNEL=y
++# CONFIG_DST_CACHE is not set
++# CONFIG_NET_DEVLINK is not set
++CONFIG_MAY_USE_DEVLINK=y
++CONFIG_HAVE_BPF_JIT=y
++
++#
++# Device Drivers
++#
++CONFIG_ARM_AMBA=y
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER=y
++CONFIG_UEVENT_HELPER_PATH=""
++CONFIG_DEVTMPFS=y
++CONFIG_DEVTMPFS_MOUNT=y
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=y
++CONFIG_FIRMWARE_IN_KERNEL=y
++CONFIG_EXTRA_FIRMWARE=""
++# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set
++CONFIG_ALLOW_DEV_COREDUMP=y
++# CONFIG_DEBUG_DRIVER is not set
++# CONFIG_DEBUG_DEVRES is not set
++CONFIG_SYS_HYPERVISOR=y
++# CONFIG_GENERIC_CPU_DEVICES is not set
++CONFIG_GENERIC_CPU_AUTOPROBE=y
++CONFIG_REGMAP=y
++CONFIG_REGMAP_I2C=y
++CONFIG_REGMAP_SPI=y
++CONFIG_REGMAP_MMIO=y
++CONFIG_REGMAP_IRQ=y
++CONFIG_DMA_SHARED_BUFFER=y
++# CONFIG_FENCE_TRACE is not set
++# CONFIG_DMA_CMA is not set
++
++#
++# Bus devices
++#
++CONFIG_ARM_CCI=y
++CONFIG_ARM_CCI_PMU=y
++CONFIG_ARM_CCI400_COMMON=y
++CONFIG_ARM_CCI400_PMU=y
++# CONFIG_ARM_CCI5xx_PMU is not set
++CONFIG_ARM_CCN=y
++CONFIG_VEXPRESS_CONFIG=y
++CONFIG_CONNECTOR=y
++CONFIG_PROC_EVENTS=y
++# CONFIG_MTD is not set
++CONFIG_DTC=y
++CONFIG_OF=y
++# CONFIG_OF_UNITTEST is not set
++CONFIG_OF_FLATTREE=y
++CONFIG_OF_EARLY_FLATTREE=y
++CONFIG_OF_DYNAMIC=y
++CONFIG_OF_ADDRESS=y
++CONFIG_OF_ADDRESS_PCI=y
++CONFIG_OF_IRQ=y
++CONFIG_OF_NET=y
++CONFIG_OF_MDIO=y
++CONFIG_OF_PCI=y
++CONFIG_OF_PCI_IRQ=y
++CONFIG_OF_RESERVED_MEM=y
++CONFIG_OF_RESOLVE=y
++CONFIG_OF_OVERLAY=y
++# CONFIG_PARPORT is not set
++CONFIG_PNP=y
++# CONFIG_PNP_DEBUG_MESSAGES is not set
++
++#
++# Protocols
++#
++CONFIG_PNPACPI=y
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_NULL_BLK is not set
++# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set
++# CONFIG_ZRAM is not set
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_DRBD is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SKD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=65536
++CONFIG_BLK_DEV_RAM_DAX=y
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++CONFIG_XEN_BLKDEV_FRONTEND=y
++# CONFIG_XEN_BLKDEV_BACKEND is not set
++CONFIG_VIRTIO_BLK=y
++# CONFIG_BLK_DEV_RBD is not set
++# CONFIG_BLK_DEV_RSXX is not set
++# CONFIG_BLK_DEV_NVME is not set
++
++#
++# Misc devices
++#
++# CONFIG_SENSORS_LIS3LV02D is not set
++# CONFIG_AD525X_DPOT is not set
++# CONFIG_DUMMY_IRQ is not set
++# CONFIG_PHANTOM is not set
++# CONFIG_SGI_IOC4 is not set
++# CONFIG_TIFM_CORE is not set
++# CONFIG_ICS932S401 is not set
++# CONFIG_ENCLOSURE_SERVICES is not set
++# CONFIG_HP_ILO is not set
++# CONFIG_APDS9802ALS is not set
++# CONFIG_ISL29003 is not set
++# CONFIG_ISL29020 is not set
++# CONFIG_SENSORS_TSL2550 is not set
++# CONFIG_SENSORS_BH1780 is not set
++# CONFIG_SENSORS_BH1770 is not set
++# CONFIG_SENSORS_APDS990X is not set
++# CONFIG_HMC6352 is not set
++# CONFIG_DS1682 is not set
++# CONFIG_TI_DAC7512 is not set
++# CONFIG_BMP085_I2C is not set
++# CONFIG_BMP085_SPI is not set
++# CONFIG_USB_SWITCH_FSA9480 is not set
++# CONFIG_LATTICE_ECP3_CONFIG is not set
++CONFIG_SRAM=y
++CONFIG_VEXPRESS_SYSCFG=y
++# CONFIG_C2PORT is not set
++
++#
++# EEPROM support
++#
++# CONFIG_EEPROM_AT24 is not set
++# CONFIG_EEPROM_AT25 is not set
++# CONFIG_EEPROM_LEGACY is not set
++# CONFIG_EEPROM_MAX6875 is not set
++# CONFIG_EEPROM_93CX6 is not set
++# CONFIG_EEPROM_93XX46 is not set
++# CONFIG_CB710_CORE is not set
++
++#
++# Texas Instruments shared transport line discipline
++#
++# CONFIG_TI_ST is not set
++# CONFIG_SENSORS_LIS3_I2C is not set
++
++#
++# Altera FPGA firmware download module
++#
++# CONFIG_ALTERA_STAPL is not set
++
++#
++# Intel MIC Bus Driver
++#
++
++#
++# SCIF Bus Driver
++#
++
++#
++# VOP Bus Driver
++#
++
++#
++# Intel MIC Host Driver
++#
++
++#
++# Intel MIC Card Driver
++#
++
++#
++# SCIF Driver
++#
++
++#
++# Intel MIC Coprocessor State Management (COSM) Drivers
++#
++
++#
++# VOP Driver
++#
++# CONFIG_GENWQE is not set
++# CONFIG_ECHO is not set
++# CONFIG_CXL_BASE is not set
++# CONFIG_CXL_KERNEL_API is not set
++# CONFIG_CXL_EEH is not set
++
++#
++# SCSI device support
++#
++CONFIG_SCSI_MOD=y
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++CONFIG_SCSI_DMA=y
++# CONFIG_SCSI_NETLINK is not set
++# CONFIG_SCSI_MQ_DEFAULT is not set
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++CONFIG_BLK_DEV_SR=y
++# CONFIG_BLK_DEV_SR_VENDOR is not set
++CONFIG_CHR_DEV_SG=y
++# CONFIG_CHR_DEV_SCH is not set
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++CONFIG_SCSI_SCAN_ASYNC=y
++
++#
++# SCSI Transports
++#
++# CONFIG_SCSI_SPI_ATTRS is not set
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++# CONFIG_SCSI_SAS_ATTRS is not set
++# CONFIG_SCSI_SAS_LIBSAS is not set
++# CONFIG_SCSI_SRP_ATTRS is not set
++CONFIG_SCSI_LOWLEVEL=y
++# CONFIG_ISCSI_TCP is not set
++# CONFIG_ISCSI_BOOT_SYSFS is not set
++# CONFIG_SCSI_CXGB3_ISCSI is not set
++# CONFIG_SCSI_CXGB4_ISCSI is not set
++# CONFIG_SCSI_BNX2_ISCSI is not set
++# CONFIG_BE2ISCSI is not set
++# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
++# CONFIG_SCSI_HPSA is not set
++# CONFIG_SCSI_3W_9XXX is not set
++# CONFIG_SCSI_3W_SAS is not set
++# CONFIG_SCSI_ACARD is not set
++# CONFIG_SCSI_AACRAID is not set
++# CONFIG_SCSI_AIC7XXX is not set
++# CONFIG_SCSI_AIC79XX is not set
++# CONFIG_SCSI_AIC94XX is not set
++# CONFIG_SCSI_HISI_SAS is not set
++# CONFIG_SCSI_MVSAS is not set
++# CONFIG_SCSI_MVUMI is not set
++# CONFIG_SCSI_ADVANSYS is not set
++# CONFIG_SCSI_ARCMSR is not set
++# CONFIG_SCSI_ESAS2R is not set
++CONFIG_MEGARAID_NEWGEN=y
++# CONFIG_MEGARAID_MM is not set
++# CONFIG_MEGARAID_LEGACY is not set
++# CONFIG_MEGARAID_SAS is not set
++# CONFIG_SCSI_MPT3SAS is not set
++# CONFIG_SCSI_MPT2SAS is not set
++# CONFIG_SCSI_UFSHCD is not set
++# CONFIG_SCSI_HPTIOP is not set
++# CONFIG_XEN_SCSI_FRONTEND is not set
++# CONFIG_SCSI_SNIC is not set
++# CONFIG_SCSI_DMX3191D is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++# CONFIG_SCSI_IPS is not set
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++# CONFIG_SCSI_STEX is not set
++# CONFIG_SCSI_SYM53C8XX_2 is not set
++# CONFIG_SCSI_IPR is not set
++# CONFIG_SCSI_QLOGIC_1280 is not set
++# CONFIG_SCSI_QLA_ISCSI is not set
++# CONFIG_SCSI_DC395x is not set
++# CONFIG_SCSI_AM53C974 is not set
++# CONFIG_SCSI_WD719X is not set
++# CONFIG_SCSI_DEBUG is not set
++# CONFIG_SCSI_PMCRAID is not set
++# CONFIG_SCSI_PM8001 is not set
++# CONFIG_SCSI_VIRTIO is not set
++CONFIG_SCSI_LOWLEVEL_PCMCIA=y
++CONFIG_SCSI_DH=y
++# CONFIG_SCSI_DH_RDAC is not set
++# CONFIG_SCSI_DH_HP_SW is not set
++# CONFIG_SCSI_DH_EMC is not set
++# CONFIG_SCSI_DH_ALUA is not set
++# CONFIG_SCSI_OSD_INITIATOR is not set
++CONFIG_HAVE_PATA_PLATFORM=y
++CONFIG_ATA=y
++# CONFIG_ATA_NONSTANDARD is not set
++CONFIG_ATA_VERBOSE_ERROR=y
++CONFIG_ATA_ACPI=y
++CONFIG_SATA_ZPODD=y
++CONFIG_SATA_PMP=y
++
++#
++# Controllers with non-SFF native interface
++#
++CONFIG_SATA_AHCI=m
++CONFIG_SATA_AHCI_PLATFORM=m
++CONFIG_AHCI_CEVA=m
++CONFIG_AHCI_XGENE=m
++CONFIG_AHCI_QORIQ=m
++# CONFIG_SATA_INIC162X is not set
++CONFIG_SATA_ACARD_AHCI=m
++# CONFIG_SATA_SIL24 is not set
++CONFIG_ATA_SFF=y
++
++#
++# SFF controllers with custom DMA interface
++#
++# CONFIG_PDC_ADMA is not set
++# CONFIG_SATA_QSTOR is not set
++# CONFIG_SATA_SX4 is not set
++CONFIG_ATA_BMDMA=y
++
++#
++# SATA SFF controllers with BMDMA
++#
++# CONFIG_ATA_PIIX is not set
++# CONFIG_SATA_MV is not set
++# CONFIG_SATA_NV is not set
++# CONFIG_SATA_PROMISE is not set
++# CONFIG_SATA_SIL is not set
++# CONFIG_SATA_SIS is not set
++# CONFIG_SATA_SVW is not set
++# CONFIG_SATA_ULI is not set
++# CONFIG_SATA_VIA is not set
++# CONFIG_SATA_VITESSE is not set
++
++#
++# PATA SFF controllers with BMDMA
++#
++# CONFIG_PATA_ALI is not set
++# CONFIG_PATA_AMD is not set
++# CONFIG_PATA_ARTOP is not set
++# CONFIG_PATA_ATIIXP is not set
++# CONFIG_PATA_ATP867X is not set
++# CONFIG_PATA_CMD64X is not set
++# CONFIG_PATA_CYPRESS is not set
++# CONFIG_PATA_EFAR is not set
++# CONFIG_PATA_HPT366 is not set
++# CONFIG_PATA_HPT37X is not set
++# CONFIG_PATA_HPT3X2N is not set
++# CONFIG_PATA_HPT3X3 is not set
++# CONFIG_PATA_IT8213 is not set
++# CONFIG_PATA_IT821X is not set
++# CONFIG_PATA_JMICRON is not set
++# CONFIG_PATA_MARVELL is not set
++# CONFIG_PATA_NETCELL is not set
++# CONFIG_PATA_NINJA32 is not set
++# CONFIG_PATA_NS87415 is not set
++# CONFIG_PATA_OLDPIIX is not set
++# CONFIG_PATA_OPTIDMA is not set
++# CONFIG_PATA_PDC2027X is not set
++# CONFIG_PATA_PDC_OLD is not set
++# CONFIG_PATA_RADISYS is not set
++# CONFIG_PATA_RDC is not set
++# CONFIG_PATA_SCH is not set
++# CONFIG_PATA_SERVERWORKS is not set
++# CONFIG_PATA_SIL680 is not set
++# CONFIG_PATA_SIS is not set
++# CONFIG_PATA_TOSHIBA is not set
++# CONFIG_PATA_TRIFLEX is not set
++# CONFIG_PATA_VIA is not set
++# CONFIG_PATA_WINBOND is not set
++
++#
++# PIO-only SFF controllers
++#
++# CONFIG_PATA_CMD640_PCI is not set
++# CONFIG_PATA_MPIIX is not set
++# CONFIG_PATA_NS87410 is not set
++# CONFIG_PATA_OPTI is not set
++# CONFIG_PATA_PLATFORM is not set
++# CONFIG_PATA_RZ1000 is not set
++
++#
++# Generic fallback / legacy drivers
++#
++# CONFIG_PATA_ACPI is not set
++# CONFIG_ATA_GENERIC is not set
++# CONFIG_PATA_LEGACY is not set
++CONFIG_MD=y
++CONFIG_BLK_DEV_MD=y
++CONFIG_MD_AUTODETECT=y
++# CONFIG_MD_LINEAR is not set
++# CONFIG_MD_RAID0 is not set
++# CONFIG_MD_RAID1 is not set
++# CONFIG_MD_RAID10 is not set
++# CONFIG_MD_RAID456 is not set
++# CONFIG_MD_MULTIPATH is not set
++# CONFIG_MD_FAULTY is not set
++# CONFIG_BCACHE is not set
++CONFIG_BLK_DEV_DM_BUILTIN=y
++CONFIG_BLK_DEV_DM=y
++# CONFIG_DM_MQ_DEFAULT is not set
++# CONFIG_DM_DEBUG is not set
++# CONFIG_DM_CRYPT is not set
++# CONFIG_DM_SNAPSHOT is not set
++# CONFIG_DM_THIN_PROVISIONING is not set
++# CONFIG_DM_CACHE is not set
++# CONFIG_DM_ERA is not set
++# CONFIG_DM_MIRROR is not set
++# CONFIG_DM_RAID is not set
++# CONFIG_DM_ZERO is not set
++# CONFIG_DM_MULTIPATH is not set
++# CONFIG_DM_DELAY is not set
++CONFIG_DM_UEVENT=y
++# CONFIG_DM_FLAKEY is not set
++# CONFIG_DM_VERITY is not set
++# CONFIG_DM_SWITCH is not set
++# CONFIG_DM_LOG_WRITES is not set
++# CONFIG_TARGET_CORE is not set
++CONFIG_FUSION=y
++# CONFIG_FUSION_SPI is not set
++# CONFIG_FUSION_SAS is not set
++CONFIG_FUSION_MAX_SGE=128
++CONFIG_FUSION_LOGGING=y
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_FIREWIRE is not set
++# CONFIG_FIREWIRE_NOSY is not set
++CONFIG_NETDEVICES=y
++CONFIG_MII=y
++CONFIG_NET_CORE=y
++# CONFIG_BONDING is not set
++# CONFIG_DUMMY is not set
++# CONFIG_EQUALIZER is not set
++CONFIG_NET_FC=y
++# CONFIG_IFB is not set
++# CONFIG_NET_TEAM is not set
++# CONFIG_MACVLAN is not set
++# CONFIG_IPVLAN is not set
++# CONFIG_VXLAN is not set
++# CONFIG_MACSEC is not set
++CONFIG_NETCONSOLE=y
++CONFIG_NETPOLL=y
++CONFIG_NET_POLL_CONTROLLER=y
++CONFIG_TUN=y
++# CONFIG_TUN_VNET_CROSS_LE is not set
++# CONFIG_VETH is not set
++CONFIG_VIRTIO_NET=y
++# CONFIG_NLMON is not set
++# CONFIG_NET_VRF is not set
++# CONFIG_ARCNET is not set
++
++#
++# CAIF transport drivers
++#
++# CONFIG_VHOST_NET is not set
++# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set
++
++#
++# Distributed Switch Architecture drivers
++#
++# CONFIG_NET_DSA_MV88E6XXX is not set
++# CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set
++CONFIG_ETHERNET=y
++CONFIG_MDIO=m
++CONFIG_NET_VENDOR_3COM=y
++# CONFIG_VORTEX is not set
++# CONFIG_TYPHOON is not set
++CONFIG_NET_VENDOR_ADAPTEC=y
++# CONFIG_ADAPTEC_STARFIRE is not set
++CONFIG_NET_VENDOR_AGERE=y
++# CONFIG_ET131X is not set
++CONFIG_NET_VENDOR_ALTEON=y
++# CONFIG_ACENIC is not set
++# CONFIG_ALTERA_TSE is not set
++CONFIG_NET_VENDOR_AMD=y
++# CONFIG_AMD8111_ETH is not set
++# CONFIG_PCNET32 is not set
++# CONFIG_AMD_XGBE is not set
++# CONFIG_NET_XGENE is not set
++CONFIG_NET_VENDOR_ARC=y
++# CONFIG_ARC_EMAC is not set
++# CONFIG_EMAC_ROCKCHIP is not set
++CONFIG_NET_VENDOR_ATHEROS=y
++# CONFIG_ATL2 is not set
++# CONFIG_ATL1 is not set
++# CONFIG_ATL1E is not set
++# CONFIG_ATL1C is not set
++# CONFIG_ALX is not set
++CONFIG_NET_VENDOR_AURORA=y
++# CONFIG_AURORA_NB8800 is not set
++CONFIG_NET_CADENCE=y
++# CONFIG_MACB is not set
++CONFIG_NET_VENDOR_BROADCOM=y
++# CONFIG_B44 is not set
++# CONFIG_BCMGENET is not set
++# CONFIG_BNX2 is not set
++# CONFIG_CNIC is not set
++# CONFIG_TIGON3 is not set
++# CONFIG_BNX2X is not set
++# CONFIG_SYSTEMPORT is not set
++# CONFIG_BNXT is not set
++CONFIG_NET_VENDOR_BROCADE=y
++# CONFIG_BNA is not set
++CONFIG_NET_VENDOR_CAVIUM=y
++CONFIG_THUNDER_NIC_PF=m
++CONFIG_THUNDER_NIC_VF=m
++CONFIG_THUNDER_NIC_BGX=m
++# CONFIG_LIQUIDIO is not set
++CONFIG_NET_VENDOR_CHELSIO=y
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_CHELSIO_T3 is not set
++# CONFIG_CHELSIO_T4 is not set
++# CONFIG_CHELSIO_T4VF is not set
++CONFIG_NET_VENDOR_CISCO=y
++# CONFIG_ENIC is not set
++# CONFIG_DNET is not set
++CONFIG_NET_VENDOR_DEC=y
++CONFIG_NET_TULIP=y
++# CONFIG_DE2104X is not set
++# CONFIG_TULIP is not set
++# CONFIG_WINBOND_840 is not set
++# CONFIG_DM9102 is not set
++# CONFIG_ULI526X is not set
++CONFIG_NET_VENDOR_DLINK=y
++# CONFIG_DL2K is not set
++# CONFIG_SUNDANCE is not set
++CONFIG_NET_VENDOR_EMULEX=y
++# CONFIG_BE2NET is not set
++CONFIG_NET_VENDOR_EZCHIP=y
++# CONFIG_EZCHIP_NPS_MANAGEMENT_ENET is not set
++CONFIG_NET_VENDOR_EXAR=y
++# CONFIG_S2IO is not set
++# CONFIG_VXGE is not set
++CONFIG_NET_VENDOR_FREESCALE=y
++# CONFIG_FSL_PQ_MDIO is not set
++CONFIG_FSL_XGMAC_MDIO=y
++# CONFIG_GIANFAR is not set
++CONFIG_NET_VENDOR_HISILICON=y
++# CONFIG_HIX5HD2_GMAC is not set
++# CONFIG_HIP04_ETH is not set
++# CONFIG_HNS is not set
++# CONFIG_HNS_DSAF is not set
++# CONFIG_HNS_ENET is not set
++CONFIG_NET_VENDOR_HP=y
++# CONFIG_HP100 is not set
++CONFIG_NET_VENDOR_INTEL=y
++# CONFIG_E100 is not set
++# CONFIG_E1000 is not set
++# CONFIG_E1000E is not set
++# CONFIG_IGB is not set
++# CONFIG_IGBVF is not set
++# CONFIG_IXGB is not set
++CONFIG_IXGBE=m
++CONFIG_IXGBE_HWMON=y
++CONFIG_IXGBE_DCB=y
++# CONFIG_IXGBEVF is not set
++# CONFIG_I40E is not set
++# CONFIG_I40EVF is not set
++# CONFIG_FM10K is not set
++CONFIG_NET_VENDOR_I825XX=y
++# CONFIG_JME is not set
++CONFIG_NET_VENDOR_MARVELL=y
++# CONFIG_MVMDIO is not set
++# CONFIG_MVNETA_BM is not set
++# CONFIG_PXA168_ETH is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++# CONFIG_NET_VENDOR_MEDIATEK is not set
++CONFIG_NET_VENDOR_MELLANOX=y
++# CONFIG_MLX4_EN is not set
++# CONFIG_MLX4_CORE is not set
++# CONFIG_MLX5_CORE is not set
++# CONFIG_MLXSW_CORE is not set
++CONFIG_NET_VENDOR_MICREL=y
++# CONFIG_KS8842 is not set
++# CONFIG_KS8851 is not set
++# CONFIG_KS8851_MLL is not set
++# CONFIG_KSZ884X_PCI is not set
++CONFIG_NET_VENDOR_MICROCHIP=y
++# CONFIG_ENC28J60 is not set
++# CONFIG_ENCX24J600 is not set
++CONFIG_NET_VENDOR_MYRI=y
++# CONFIG_MYRI10GE is not set
++# CONFIG_FEALNX is not set
++CONFIG_NET_VENDOR_NATSEMI=y
++# CONFIG_NATSEMI is not set
++# CONFIG_NS83820 is not set
++CONFIG_NET_VENDOR_NETRONOME=y
++# CONFIG_NFP_NETVF is not set
++CONFIG_NET_VENDOR_8390=y
++# CONFIG_NE2K_PCI is not set
++CONFIG_NET_VENDOR_NVIDIA=y
++# CONFIG_FORCEDETH is not set
++CONFIG_NET_VENDOR_OKI=y
++# CONFIG_ETHOC is not set
++CONFIG_NET_PACKET_ENGINE=y
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++CONFIG_NET_VENDOR_QLOGIC=y
++# CONFIG_QLA3XXX is not set
++# CONFIG_QLCNIC is not set
++# CONFIG_QLGE is not set
++# CONFIG_NETXEN_NIC is not set
++# CONFIG_QED is not set
++CONFIG_NET_VENDOR_QUALCOMM=y
++# CONFIG_QCA7000 is not set
++CONFIG_NET_VENDOR_REALTEK=y
++# CONFIG_8139CP is not set
++# CONFIG_8139TOO is not set
++# CONFIG_R8169 is not set
++CONFIG_NET_VENDOR_RENESAS=y
++CONFIG_NET_VENDOR_RDC=y
++# CONFIG_R6040 is not set
++CONFIG_NET_VENDOR_ROCKER=y
++CONFIG_NET_VENDOR_SAMSUNG=y
++# CONFIG_SXGBE_ETH is not set
++CONFIG_NET_VENDOR_SEEQ=y
++CONFIG_NET_VENDOR_SILAN=y
++# CONFIG_SC92031 is not set
++CONFIG_NET_VENDOR_SIS=y
++# CONFIG_SIS900 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SFC is not set
++CONFIG_NET_VENDOR_SMSC=y
++CONFIG_SMC91X=y
++# CONFIG_EPIC100 is not set
++# CONFIG_SMSC911X is not set
++# CONFIG_SMSC9420 is not set
++CONFIG_NET_VENDOR_STMICRO=y
++# CONFIG_STMMAC_ETH is not set
++CONFIG_NET_VENDOR_SUN=y
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NIU is not set
++CONFIG_NET_VENDOR_SYNOPSYS=y
++# CONFIG_SYNOPSYS_DWC_ETH_QOS is not set
++CONFIG_NET_VENDOR_TEHUTI=y
++# CONFIG_TEHUTI is not set
++CONFIG_NET_VENDOR_TI=y
++# CONFIG_TI_CPSW_ALE is not set
++# CONFIG_TLAN is not set
++CONFIG_NET_VENDOR_VIA=y
++# CONFIG_VIA_RHINE is not set
++# CONFIG_VIA_VELOCITY is not set
++CONFIG_NET_VENDOR_WIZNET=y
++# CONFIG_WIZNET_W5100 is not set
++# CONFIG_WIZNET_W5300 is not set
++CONFIG_FDDI=y
++# CONFIG_DEFXX is not set
++# CONFIG_SKFP is not set
++# CONFIG_HIPPI is not set
++# CONFIG_NET_SB1000 is not set
++CONFIG_PHYLIB=y
++
++#
++# MII PHY device drivers
++#
++# CONFIG_AQUANTIA_PHY is not set
++# CONFIG_AT803X_PHY is not set
++# CONFIG_AMD_PHY is not set
++# CONFIG_MARVELL_PHY is not set
++# CONFIG_DAVICOM_PHY is not set
++# CONFIG_QSEMI_PHY is not set
++# CONFIG_LXT_PHY is not set
++# CONFIG_CICADA_PHY is not set
++# CONFIG_VITESSE_PHY is not set
++# CONFIG_TERANETICS_PHY is not set
++# CONFIG_SMSC_PHY is not set
++# CONFIG_BROADCOM_PHY is not set
++# CONFIG_BCM7XXX_PHY is not set
++# CONFIG_BCM87XX_PHY is not set
++# CONFIG_ICPLUS_PHY is not set
++# CONFIG_REALTEK_PHY is not set
++# CONFIG_NATIONAL_PHY is not set
++# CONFIG_STE10XP is not set
++# CONFIG_LSI_ET1011C_PHY is not set
++# CONFIG_MICREL_PHY is not set
++# CONFIG_DP83848_PHY is not set
++# CONFIG_DP83867_PHY is not set
++# CONFIG_MICROCHIP_PHY is not set
++CONFIG_FIXED_PHY=y
++# CONFIG_MDIO_BITBANG is not set
++CONFIG_MDIO_CAVIUM=m
++# CONFIG_MDIO_OCTEON is not set
++CONFIG_MDIO_THUNDER=m
++# CONFIG_MDIO_BUS_MUX_GPIO is not set
++# CONFIG_MDIO_BUS_MUX_MMIOREG is not set
++# CONFIG_MDIO_BCM_UNIMAC is not set
++# CONFIG_MDIO_BCM_IPROC is not set
++# CONFIG_MICREL_KS8995MA is not set
++CONFIG_PPP=y
++# CONFIG_PPP_BSDCOMP is not set
++# CONFIG_PPP_DEFLATE is not set
++CONFIG_PPP_FILTER=y
++# CONFIG_PPP_MPPE is not set
++CONFIG_PPP_MULTILINK=y
++# CONFIG_PPPOE is not set
++# CONFIG_PPP_ASYNC is not set
++# CONFIG_PPP_SYNC_TTY is not set
++# CONFIG_SLIP is not set
++CONFIG_SLHC=y
++CONFIG_USB_NET_DRIVERS=m
++# CONFIG_USB_CATC is not set
++# CONFIG_USB_KAWETH is not set
++# CONFIG_USB_PEGASUS is not set
++# CONFIG_USB_RTL8150 is not set
++# CONFIG_USB_RTL8152 is not set
++# CONFIG_USB_LAN78XX is not set
++CONFIG_USB_USBNET=m
++CONFIG_USB_NET_AX8817X=m
++# CONFIG_USB_NET_AX88179_178A is not set
++# CONFIG_USB_NET_CDCETHER is not set
++# CONFIG_USB_NET_CDC_EEM is not set
++# CONFIG_USB_NET_CDC_NCM is not set
++# CONFIG_USB_NET_HUAWEI_CDC_NCM is not set
++# CONFIG_USB_NET_CDC_MBIM is not set
++# CONFIG_USB_NET_DM9601 is not set
++# CONFIG_USB_NET_SR9700 is not set
++# CONFIG_USB_NET_SR9800 is not set
++# CONFIG_USB_NET_SMSC75XX is not set
++# CONFIG_USB_NET_SMSC95XX is not set
++# CONFIG_USB_NET_GL620A is not set
++# CONFIG_USB_NET_NET1080 is not set
++# CONFIG_USB_NET_PLUSB is not set
++# CONFIG_USB_NET_MCS7830 is not set
++# CONFIG_USB_NET_RNDIS_HOST is not set
++# CONFIG_USB_NET_CDC_SUBSET is not set
++# CONFIG_USB_NET_ZAURUS is not set
++# CONFIG_USB_NET_CX82310_ETH is not set
++# CONFIG_USB_NET_KALMIA is not set
++# CONFIG_USB_NET_QMI_WWAN is not set
++# CONFIG_USB_HSO is not set
++# CONFIG_USB_NET_INT51X1 is not set
++# CONFIG_USB_IPHETH is not set
++# CONFIG_USB_SIERRA_NET is not set
++# CONFIG_USB_NET_CH9200 is not set
++CONFIG_WLAN=y
++CONFIG_WLAN_VENDOR_ADMTEK=y
++CONFIG_WLAN_VENDOR_ATH=y
++# CONFIG_ATH_DEBUG is not set
++CONFIG_ATH5K_PCI=y
++CONFIG_WLAN_VENDOR_ATMEL=y
++CONFIG_WLAN_VENDOR_BROADCOM=y
++CONFIG_WLAN_VENDOR_CISCO=y
++CONFIG_WLAN_VENDOR_INTEL=y
++CONFIG_WLAN_VENDOR_INTERSIL=y
++# CONFIG_HOSTAP is not set
++# CONFIG_PRISM54 is not set
++CONFIG_WLAN_VENDOR_MARVELL=y
++CONFIG_WLAN_VENDOR_MEDIATEK=y
++CONFIG_WLAN_VENDOR_RALINK=y
++CONFIG_WLAN_VENDOR_REALTEK=y
++CONFIG_WLAN_VENDOR_RSI=y
++CONFIG_WLAN_VENDOR_ST=y
++CONFIG_WLAN_VENDOR_TI=y
++CONFIG_WLAN_VENDOR_ZYDAS=y
++
++#
++# Enable WiMAX (Networking options) to see the WiMAX drivers
++#
++CONFIG_WAN=y
++# CONFIG_HDLC is not set
++# CONFIG_DLCI is not set
++CONFIG_XEN_NETDEV_FRONTEND=y
++# CONFIG_XEN_NETDEV_BACKEND is not set
++# CONFIG_VMXNET3 is not set
++# CONFIG_FUJITSU_ES is not set
++CONFIG_ISDN=y
++# CONFIG_ISDN_I4L is not set
++# CONFIG_ISDN_CAPI is not set
++# CONFIG_ISDN_DRV_GIGASET is not set
++# CONFIG_HYSDN is not set
++# CONFIG_MISDN is not set
++CONFIG_NVM=y
++# CONFIG_NVM_DEBUG is not set
++# CONFIG_NVM_GENNVM is not set
++# CONFIG_NVM_RRPC is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++CONFIG_INPUT_LEDS=m
++# CONFIG_INPUT_FF_MEMLESS is not set
++# CONFIG_INPUT_POLLDEV is not set
++# CONFIG_INPUT_SPARSEKMAP is not set
++# CONFIG_INPUT_MATRIXKMAP is not set
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++CONFIG_INPUT_JOYDEV=m
++CONFIG_INPUT_EVDEV=y
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++# CONFIG_KEYBOARD_ADP5520 is not set
++# CONFIG_KEYBOARD_ADP5588 is not set
++# CONFIG_KEYBOARD_ADP5589 is not set
++CONFIG_KEYBOARD_ATKBD=y
++# CONFIG_KEYBOARD_QT1070 is not set
++# CONFIG_KEYBOARD_QT2160 is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++CONFIG_KEYBOARD_GPIO=m
++# CONFIG_KEYBOARD_GPIO_POLLED is not set
++# CONFIG_KEYBOARD_TCA6416 is not set
++# CONFIG_KEYBOARD_TCA8418 is not set
++# CONFIG_KEYBOARD_MATRIX is not set
++# CONFIG_KEYBOARD_LM8323 is not set
++# CONFIG_KEYBOARD_LM8333 is not set
++# CONFIG_KEYBOARD_MAX7359 is not set
++# CONFIG_KEYBOARD_MCS is not set
++# CONFIG_KEYBOARD_MPR121 is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++# CONFIG_KEYBOARD_OPENCORES is not set
++# CONFIG_KEYBOARD_SAMSUNG is not set
++# CONFIG_KEYBOARD_STOWAWAY is not set
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_STMPE is not set
++# CONFIG_KEYBOARD_OMAP4 is not set
++# CONFIG_KEYBOARD_TC3589X is not set
++# CONFIG_KEYBOARD_TWL4030 is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++# CONFIG_KEYBOARD_CAP11XX is not set
++# CONFIG_KEYBOARD_BCM is not set
++CONFIG_INPUT_MOUSE=y
++# CONFIG_MOUSE_PS2 is not set
++# CONFIG_MOUSE_SERIAL is not set
++# CONFIG_MOUSE_APPLETOUCH is not set
++# CONFIG_MOUSE_BCM5974 is not set
++# CONFIG_MOUSE_CYAPA is not set
++# CONFIG_MOUSE_ELAN_I2C is not set
++# CONFIG_MOUSE_VSXXXAA is not set
++# CONFIG_MOUSE_GPIO is not set
++# CONFIG_MOUSE_SYNAPTICS_I2C is not set
++# CONFIG_MOUSE_SYNAPTICS_USB is not set
++CONFIG_INPUT_JOYSTICK=y
++# CONFIG_JOYSTICK_ANALOG is not set
++# CONFIG_JOYSTICK_A3D is not set
++# CONFIG_JOYSTICK_ADI is not set
++# CONFIG_JOYSTICK_COBRA is not set
++# CONFIG_JOYSTICK_GF2K is not set
++# CONFIG_JOYSTICK_GRIP is not set
++# CONFIG_JOYSTICK_GRIP_MP is not set
++# CONFIG_JOYSTICK_GUILLEMOT is not set
++# CONFIG_JOYSTICK_INTERACT is not set
++# CONFIG_JOYSTICK_SIDEWINDER is not set
++# CONFIG_JOYSTICK_TMDC is not set
++# CONFIG_JOYSTICK_IFORCE is not set
++# CONFIG_JOYSTICK_WARRIOR is not set
++# CONFIG_JOYSTICK_MAGELLAN is not set
++# CONFIG_JOYSTICK_SPACEORB is not set
++# CONFIG_JOYSTICK_SPACEBALL is not set
++# CONFIG_JOYSTICK_STINGER is not set
++# CONFIG_JOYSTICK_TWIDJOY is not set
++# CONFIG_JOYSTICK_ZHENHUA is not set
++# CONFIG_JOYSTICK_AS5011 is not set
++# CONFIG_JOYSTICK_JOYDUMP is not set
++# CONFIG_JOYSTICK_XPAD is not set
++CONFIG_INPUT_TABLET=y
++# CONFIG_TABLET_USB_ACECAD is not set
++# CONFIG_TABLET_USB_AIPTEK is not set
++# CONFIG_TABLET_USB_GTCO is not set
++# CONFIG_TABLET_USB_HANWANG is not set
++# CONFIG_TABLET_USB_KBTAB is not set
++# CONFIG_TABLET_SERIAL_WACOM4 is not set
++CONFIG_INPUT_TOUCHSCREEN=y
++CONFIG_TOUCHSCREEN_PROPERTIES=y
++# CONFIG_TOUCHSCREEN_88PM860X is not set
++# CONFIG_TOUCHSCREEN_ADS7846 is not set
++# CONFIG_TOUCHSCREEN_AD7877 is not set
++# CONFIG_TOUCHSCREEN_AD7879 is not set
++# CONFIG_TOUCHSCREEN_AR1021_I2C is not set
++# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set
++# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set
++# CONFIG_TOUCHSCREEN_BU21013 is not set
++# CONFIG_TOUCHSCREEN_CHIPONE_ICN8318 is not set
++# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set
++# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set
++# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set
++# CONFIG_TOUCHSCREEN_DA9034 is not set
++# CONFIG_TOUCHSCREEN_DA9052 is not set
++# CONFIG_TOUCHSCREEN_DYNAPRO is not set
++# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
++# CONFIG_TOUCHSCREEN_EETI is not set
++# CONFIG_TOUCHSCREEN_EGALAX is not set
++# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set
++# CONFIG_TOUCHSCREEN_FT6236 is not set
++# CONFIG_TOUCHSCREEN_FUJITSU is not set
++# CONFIG_TOUCHSCREEN_GOODIX is not set
++# CONFIG_TOUCHSCREEN_ILI210X is not set
++# CONFIG_TOUCHSCREEN_IPROC is not set
++# CONFIG_TOUCHSCREEN_GUNZE is not set
++# CONFIG_TOUCHSCREEN_ELAN is not set
++# CONFIG_TOUCHSCREEN_ELO is not set
++# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
++# CONFIG_TOUCHSCREEN_WACOM_I2C is not set
++# CONFIG_TOUCHSCREEN_MAX11801 is not set
++# CONFIG_TOUCHSCREEN_MCS5000 is not set
++# CONFIG_TOUCHSCREEN_MMS114 is not set
++# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set
++# CONFIG_TOUCHSCREEN_MTOUCH is not set
++# CONFIG_TOUCHSCREEN_IMX6UL_TSC is not set
++# CONFIG_TOUCHSCREEN_INEXIO is not set
++# CONFIG_TOUCHSCREEN_MK712 is not set
++# CONFIG_TOUCHSCREEN_PENMOUNT is not set
++# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set
++# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
++# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
++# CONFIG_TOUCHSCREEN_PIXCIR is not set
++# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set
++# CONFIG_TOUCHSCREEN_WM831X is not set
++# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
++# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
++# CONFIG_TOUCHSCREEN_TSC_SERIO is not set
++# CONFIG_TOUCHSCREEN_TSC2004 is not set
++# CONFIG_TOUCHSCREEN_TSC2005 is not set
++# CONFIG_TOUCHSCREEN_TSC2007 is not set
++# CONFIG_TOUCHSCREEN_PCAP is not set
++# CONFIG_TOUCHSCREEN_ST1232 is not set
++# CONFIG_TOUCHSCREEN_STMPE is not set
++# CONFIG_TOUCHSCREEN_SX8654 is not set
++# CONFIG_TOUCHSCREEN_TPS6507X is not set
++# CONFIG_TOUCHSCREEN_ZFORCE is not set
++# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set
++CONFIG_INPUT_MISC=y
++# CONFIG_INPUT_88PM860X_ONKEY is not set
++# CONFIG_INPUT_AD714X is not set
++# CONFIG_INPUT_BMA150 is not set
++# CONFIG_INPUT_E3X0_BUTTON is not set
++# CONFIG_INPUT_MAX77693_HAPTIC is not set
++# CONFIG_INPUT_MAX8925_ONKEY is not set
++# CONFIG_INPUT_MAX8997_HAPTIC is not set
++# CONFIG_INPUT_MMA8450 is not set
++# CONFIG_INPUT_MPU3050 is not set
++# CONFIG_INPUT_GP2A is not set
++# CONFIG_INPUT_GPIO_BEEPER is not set
++# CONFIG_INPUT_GPIO_TILT_POLLED is not set
++# CONFIG_INPUT_ATI_REMOTE2 is not set
++# CONFIG_INPUT_KEYSPAN_REMOTE is not set
++# CONFIG_INPUT_KXTJ9 is not set
++# CONFIG_INPUT_POWERMATE is not set
++# CONFIG_INPUT_YEALINK is not set
++# CONFIG_INPUT_CM109 is not set
++# CONFIG_INPUT_REGULATOR_HAPTIC is not set
++# CONFIG_INPUT_TWL4030_PWRBUTTON is not set
++# CONFIG_INPUT_TWL4030_VIBRA is not set
++# CONFIG_INPUT_TWL6040_VIBRA is not set
++CONFIG_INPUT_UINPUT=y
++# CONFIG_INPUT_PALMAS_PWRBUTTON is not set
++# CONFIG_INPUT_PCF8574 is not set
++# CONFIG_INPUT_PWM_BEEPER is not set
++# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
++# CONFIG_INPUT_DA9052_ONKEY is not set
++# CONFIG_INPUT_DA9055_ONKEY is not set
++# CONFIG_INPUT_DA9063_ONKEY is not set
++# CONFIG_INPUT_WM831X_ON is not set
++# CONFIG_INPUT_PCAP is not set
++# CONFIG_INPUT_ADXL34X is not set
++# CONFIG_INPUT_IMS_PCU is not set
++# CONFIG_INPUT_CMA3000 is not set
++# CONFIG_INPUT_XEN_KBDDEV_FRONTEND is not set
++# CONFIG_INPUT_SOC_BUTTON_ARRAY is not set
++# CONFIG_INPUT_DRV260X_HAPTICS is not set
++# CONFIG_INPUT_DRV2665_HAPTICS is not set
++# CONFIG_INPUT_DRV2667_HAPTICS is not set
++# CONFIG_RMI4_CORE is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++# CONFIG_SERIO_SERPORT is not set
++# CONFIG_SERIO_AMBAKMI is not set
++# CONFIG_SERIO_PCIPS2 is not set
++CONFIG_SERIO_LIBPS2=y
++# CONFIG_SERIO_RAW is not set
++# CONFIG_SERIO_ALTERA_PS2 is not set
++# CONFIG_SERIO_PS2MULT is not set
++# CONFIG_SERIO_ARC_PS2 is not set
++# CONFIG_SERIO_APBPS2 is not set
++# CONFIG_USERIO is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++CONFIG_TTY=y
++CONFIG_VT=y
++CONFIG_CONSOLE_TRANSLATIONS=y
++CONFIG_VT_CONSOLE=y
++CONFIG_VT_CONSOLE_SLEEP=y
++CONFIG_HW_CONSOLE=y
++CONFIG_VT_HW_CONSOLE_BINDING=y
++CONFIG_UNIX98_PTYS=y
++# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=0
++CONFIG_SERIAL_NONSTANDARD=y
++# CONFIG_ROCKETPORT is not set
++# CONFIG_CYCLADES is not set
++# CONFIG_MOXA_INTELLIO is not set
++# CONFIG_MOXA_SMARTIO is not set
++# CONFIG_SYNCLINKMP is not set
++# CONFIG_SYNCLINK_GT is not set
++# CONFIG_NOZOMI is not set
++# CONFIG_ISI is not set
++# CONFIG_N_HDLC is not set
++# CONFIG_N_GSM is not set
++# CONFIG_TRACE_SINK is not set
++CONFIG_DEVMEM=y
++# CONFIG_DEVKMEM is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_EARLYCON=y
++CONFIG_SERIAL_8250=y
++# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
++CONFIG_SERIAL_8250_PNP=y
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_DMA=y
++CONFIG_SERIAL_8250_PCI=y
++CONFIG_SERIAL_8250_NR_UARTS=48
++CONFIG_SERIAL_8250_RUNTIME_UARTS=32
++CONFIG_SERIAL_8250_EXTENDED=y
++CONFIG_SERIAL_8250_MANY_PORTS=y
++CONFIG_SERIAL_8250_SHARE_IRQ=y
++# CONFIG_SERIAL_8250_DETECT_IRQ is not set
++CONFIG_SERIAL_8250_RSA=y
++CONFIG_SERIAL_8250_FSL=y
++CONFIG_SERIAL_8250_DW=y
++CONFIG_SERIAL_8250_RT288X=y
++# CONFIG_SERIAL_8250_FINTEK is not set
++CONFIG_SERIAL_8250_MT6577=y
++# CONFIG_SERIAL_8250_MID is not set
++# CONFIG_SERIAL_8250_MOXA is not set
++CONFIG_SERIAL_OF_PLATFORM=y
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_AMBA_PL010 is not set
++CONFIG_SERIAL_AMBA_PL011=y
++CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
++CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST=y
++CONFIG_SERIAL_KGDB_NMI=y
++# CONFIG_SERIAL_MAX3100 is not set
++CONFIG_SERIAL_MAX310X=y
++# CONFIG_SERIAL_UARTLITE is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++CONFIG_CONSOLE_POLL=y
++# CONFIG_SERIAL_JSM is not set
++CONFIG_SERIAL_MSM=y
++CONFIG_SERIAL_MSM_CONSOLE=y
++CONFIG_SERIAL_SCCNXP=y
++CONFIG_SERIAL_SCCNXP_CONSOLE=y
++# CONFIG_SERIAL_SC16IS7XX is not set
++# CONFIG_SERIAL_ALTERA_JTAGUART is not set
++# CONFIG_SERIAL_ALTERA_UART is not set
++# CONFIG_SERIAL_IFX6X60 is not set
++# CONFIG_SERIAL_XILINX_PS_UART is not set
++# CONFIG_SERIAL_ARC is not set
++# CONFIG_SERIAL_RP2 is not set
++# CONFIG_SERIAL_FSL_LPUART is not set
++# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set
++# CONFIG_SERIAL_SPRD is not set
++# CONFIG_SERIAL_MVEBU_UART is not set
++CONFIG_TTY_PRINTK=y
++CONFIG_HVC_DRIVER=y
++CONFIG_HVC_IRQ=y
++CONFIG_HVC_XEN=y
++CONFIG_HVC_XEN_FRONTEND=y
++# CONFIG_HVC_DCC is not set
++CONFIG_VIRTIO_CONSOLE=y
++CONFIG_IPMI_HANDLER=m
++# CONFIG_IPMI_PANIC_EVENT is not set
++# CONFIG_IPMI_DEVICE_INTERFACE is not set
++# CONFIG_IPMI_SI is not set
++# CONFIG_IPMI_SSIF is not set
++# CONFIG_IPMI_WATCHDOG is not set
++# CONFIG_IPMI_POWEROFF is not set
++CONFIG_HW_RANDOM=y
++# CONFIG_HW_RANDOM_TIMERIOMEM is not set
++# CONFIG_HW_RANDOM_IPROC_RNG200 is not set
++# CONFIG_HW_RANDOM_VIRTIO is not set
++# CONFIG_HW_RANDOM_TPM is not set
++# CONFIG_HW_RANDOM_MSM is not set
++# CONFIG_HW_RANDOM_XGENE is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++
++#
++# PCMCIA character devices
++#
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_HPET is not set
++CONFIG_TCG_TPM=y
++# CONFIG_TCG_TIS_I2C_ATMEL is not set
++# CONFIG_TCG_TIS_I2C_INFINEON is not set
++# CONFIG_TCG_TIS_I2C_NUVOTON is not set
++# CONFIG_TCG_ATMEL is not set
++# CONFIG_TCG_INFINEON is not set
++# CONFIG_TCG_XEN is not set
++# CONFIG_TCG_TIS_ST33ZP24 is not set
++CONFIG_DEVPORT=y
++# CONFIG_XILLYBUS is not set
++
++#
++# I2C support
++#
++CONFIG_I2C=y
++CONFIG_ACPI_I2C_OPREGION=y
++CONFIG_I2C_BOARDINFO=y
++CONFIG_I2C_COMPAT=y
++CONFIG_I2C_CHARDEV=y
++# CONFIG_I2C_MUX is not set
++CONFIG_I2C_HELPER_AUTO=y
++CONFIG_I2C_ALGOBIT=m
++
++#
++# I2C Hardware Bus support
++#
++
++#
++# PC SMBus host controller drivers
++#
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++# CONFIG_I2C_AMD756 is not set
++# CONFIG_I2C_AMD8111 is not set
++# CONFIG_I2C_I801 is not set
++# CONFIG_I2C_ISCH is not set
++# CONFIG_I2C_PIIX4 is not set
++# CONFIG_I2C_NFORCE2 is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++# CONFIG_I2C_SIS96X is not set
++# CONFIG_I2C_VIA is not set
++# CONFIG_I2C_VIAPRO is not set
++
++#
++# ACPI drivers
++#
++# CONFIG_I2C_SCMI is not set
++
++#
++# I2C system bus drivers (mostly embedded / system-on-chip)
++#
++# CONFIG_I2C_BCM_IPROC is not set
++# CONFIG_I2C_CADENCE is not set
++# CONFIG_I2C_CBUS_GPIO is not set
++# CONFIG_I2C_DESIGNWARE_PLATFORM is not set
++# CONFIG_I2C_DESIGNWARE_PCI is not set
++# CONFIG_I2C_EMEV2 is not set
++# CONFIG_I2C_GPIO is not set
++CONFIG_I2C_IMX=y
++# CONFIG_I2C_MT65XX is not set
++# CONFIG_I2C_NOMADIK is not set
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PCA_PLATFORM is not set
++# CONFIG_I2C_PXA_PCI is not set
++# CONFIG_I2C_QUP is not set
++# CONFIG_I2C_RK3X is not set
++# CONFIG_I2C_SIMTEC is not set
++# CONFIG_I2C_VERSATILE is not set
++# CONFIG_I2C_XILINX is not set
++
++#
++# External I2C/SMBus adapter drivers
++#
++# CONFIG_I2C_DIOLAN_U2C is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_ROBOTFUZZ_OSIF is not set
++# CONFIG_I2C_TAOS_EVM is not set
++# CONFIG_I2C_TINY_USB is not set
++
++#
++# Other I2C/SMBus bus drivers
++#
++# CONFIG_I2C_XGENE_SLIMPRO is not set
++# CONFIG_I2C_STUB is not set
++CONFIG_I2C_SLAVE=y
++# CONFIG_I2C_SLAVE_EEPROM is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++CONFIG_SPI=y
++# CONFIG_SPI_DEBUG is not set
++CONFIG_SPI_MASTER=y
++
++#
++# SPI Master Controller Drivers
++#
++# CONFIG_SPI_ALTERA is not set
++# CONFIG_SPI_AXI_SPI_ENGINE is not set
++# CONFIG_SPI_BITBANG is not set
++# CONFIG_SPI_CADENCE is not set
++# CONFIG_SPI_DESIGNWARE is not set
++# CONFIG_SPI_GPIO is not set
++CONFIG_SPI_FSL_LIB=y
++CONFIG_SPI_FSL_SPI=y
++# CONFIG_SPI_FSL_DSPI is not set
++# CONFIG_SPI_MT65XX is not set
++# CONFIG_SPI_OC_TINY is not set
++# CONFIG_SPI_PL022 is not set
++# CONFIG_SPI_PXA2XX is not set
++# CONFIG_SPI_PXA2XX_PCI is not set
++# CONFIG_SPI_ROCKCHIP is not set
++# CONFIG_SPI_QUP is not set
++# CONFIG_SPI_SC18IS602 is not set
++# CONFIG_SPI_XCOMM is not set
++# CONFIG_SPI_XILINX is not set
++# CONFIG_SPI_ZYNQMP_GQSPI is not set
++
++#
++# SPI Protocol Masters
++#
++# CONFIG_SPI_SPIDEV is not set
++# CONFIG_SPI_LOOPBACK_TEST is not set
++# CONFIG_SPI_TLE62X0 is not set
++# CONFIG_SPMI is not set
++# CONFIG_HSI is not set
++
++#
++# PPS support
++#
++CONFIG_PPS=m
++# CONFIG_PPS_DEBUG is not set
++
++#
++# PPS clients support
++#
++# CONFIG_PPS_CLIENT_KTIMER is not set
++# CONFIG_PPS_CLIENT_LDISC is not set
++# CONFIG_PPS_CLIENT_GPIO is not set
++
++#
++# PPS generators support
++#
++
++#
++# PTP clock support
++#
++CONFIG_PTP_1588_CLOCK=m
++
++#
++# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks.
++#
++CONFIG_PINCTRL=y
++
++#
++# Pin controllers
++#
++CONFIG_PINMUX=y
++CONFIG_PINCONF=y
++CONFIG_GENERIC_PINCONF=y
++# CONFIG_DEBUG_PINCTRL is not set
++CONFIG_PINCTRL_AS3722=y
++CONFIG_PINCTRL_AMD=y
++CONFIG_PINCTRL_SINGLE=y
++CONFIG_PINCTRL_PALMAS=y
++CONFIG_PINCTRL_IPROC_GPIO=y
++CONFIG_PINCTRL_BERLIN=y
++# CONFIG_PINCTRL_BERLIN_BG2 is not set
++# CONFIG_PINCTRL_BERLIN_BG2CD is not set
++# CONFIG_PINCTRL_BERLIN_BG2Q is not set
++CONFIG_PINCTRL_BERLIN_BG4CT=y
++CONFIG_PINCTRL_BAYTRAIL=y
++# CONFIG_PINCTRL_CHERRYVIEW is not set
++# CONFIG_PINCTRL_BROXTON is not set
++# CONFIG_PINCTRL_SUNRISEPOINT is not set
++# CONFIG_PINCTRL_APQ8064 is not set
++# CONFIG_PINCTRL_APQ8084 is not set
++# CONFIG_PINCTRL_IPQ4019 is not set
++# CONFIG_PINCTRL_IPQ8064 is not set
++# CONFIG_PINCTRL_MSM8660 is not set
++# CONFIG_PINCTRL_MSM8960 is not set
++# CONFIG_PINCTRL_MSM8X74 is not set
++# CONFIG_PINCTRL_MSM8916 is not set
++# CONFIG_PINCTRL_MSM8996 is not set
++# CONFIG_PINCTRL_QDF2XXX is not set
++# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set
++CONFIG_PINCTRL_MTK=y
++# CONFIG_PINCTRL_MT2701 is not set
++# CONFIG_PINCTRL_MT7623 is not set
++# CONFIG_PINCTRL_MT8135 is not set
++# CONFIG_PINCTRL_MT8127 is not set
++CONFIG_PINCTRL_MT8173=y
++# CONFIG_PINCTRL_MT6397 is not set
++CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
++CONFIG_ARCH_REQUIRE_GPIOLIB=y
++CONFIG_GPIOLIB=y
++CONFIG_GPIO_DEVRES=y
++CONFIG_OF_GPIO=y
++CONFIG_GPIO_ACPI=y
++CONFIG_GPIOLIB_IRQCHIP=y
++# CONFIG_DEBUG_GPIO is not set
++CONFIG_GPIO_SYSFS=y
++CONFIG_GPIO_GENERIC=y
++
++#
++# Memory mapped GPIO drivers
++#
++# CONFIG_GPIO_74XX_MMIO is not set
++# CONFIG_GPIO_ALTERA is not set
++# CONFIG_GPIO_AMDPT is not set
++# CONFIG_GPIO_DWAPB is not set
++CONFIG_GPIO_GENERIC_PLATFORM=y
++# CONFIG_GPIO_GRGPIO is not set
++# CONFIG_GPIO_MPC8XXX is not set
++CONFIG_GPIO_PL061=y
++# CONFIG_GPIO_SYSCON is not set
++# CONFIG_GPIO_VX855 is not set
++CONFIG_GPIO_XGENE=y
++# CONFIG_GPIO_XGENE_SB is not set
++CONFIG_GPIO_XILINX=y
++# CONFIG_GPIO_ZYNQ is not set
++CONFIG_GPIO_ZX=y
++
++#
++# I2C GPIO expanders
++#
++# CONFIG_GPIO_ADP5588 is not set
++# CONFIG_GPIO_ADNP is not set
++# CONFIG_GPIO_MAX7300 is not set
++# CONFIG_GPIO_MAX732X is not set
++# CONFIG_GPIO_PCA953X is not set
++# CONFIG_GPIO_PCF857X is not set
++CONFIG_GPIO_SX150X=y
++# CONFIG_GPIO_TPIC2810 is not set
++
++#
++# MFD GPIO expanders
++#
++# CONFIG_GPIO_ADP5520 is not set
++# CONFIG_GPIO_CRYSTAL_COVE is not set
++# CONFIG_GPIO_DA9052 is not set
++# CONFIG_GPIO_DA9055 is not set
++CONFIG_GPIO_PALMAS=y
++CONFIG_GPIO_RC5T583=y
++CONFIG_GPIO_STMPE=y
++CONFIG_GPIO_TC3589X=y
++CONFIG_GPIO_TPS6586X=y
++CONFIG_GPIO_TPS65910=y
++# CONFIG_GPIO_TPS65912 is not set
++# CONFIG_GPIO_TWL4030 is not set
++# CONFIG_GPIO_TWL6040 is not set
++# CONFIG_GPIO_WM831X is not set
++# CONFIG_GPIO_WM8350 is not set
++
++#
++# PCI GPIO expanders
++#
++# CONFIG_GPIO_AMD8111 is not set
++# CONFIG_GPIO_BT8XX is not set
++# CONFIG_GPIO_ML_IOH is not set
++# CONFIG_GPIO_RDC321X is not set
++
++#
++# SPI GPIO expanders
++#
++# CONFIG_GPIO_74X164 is not set
++# CONFIG_GPIO_MAX7301 is not set
++# CONFIG_GPIO_MC33880 is not set
++# CONFIG_GPIO_PISOSR is not set
++
++#
++# SPI or I2C GPIO expanders
++#
++# CONFIG_GPIO_MCP23S08 is not set
++
++#
++# USB GPIO expanders
++#
++# CONFIG_W1 is not set
++CONFIG_POWER_SUPPLY=y
++# CONFIG_POWER_SUPPLY_DEBUG is not set
++# CONFIG_PDA_POWER is not set
++# CONFIG_MAX8925_POWER is not set
++# CONFIG_WM831X_BACKUP is not set
++# CONFIG_WM831X_POWER is not set
++# CONFIG_WM8350_POWER is not set
++# CONFIG_TEST_POWER is not set
++# CONFIG_BATTERY_88PM860X is not set
++# CONFIG_BATTERY_DS2780 is not set
++# CONFIG_BATTERY_DS2781 is not set
++# CONFIG_BATTERY_DS2782 is not set
++# CONFIG_BATTERY_SBS is not set
++# CONFIG_BATTERY_BQ27XXX is not set
++# CONFIG_BATTERY_DA9030 is not set
++# CONFIG_BATTERY_DA9052 is not set
++# CONFIG_BATTERY_MAX17040 is not set
++# CONFIG_BATTERY_MAX17042 is not set
++# CONFIG_CHARGER_MAX8903 is not set
++# CONFIG_CHARGER_LP8727 is not set
++# CONFIG_CHARGER_GPIO is not set
++CONFIG_CHARGER_MANAGER=y
++# CONFIG_CHARGER_MAX14577 is not set
++# CONFIG_CHARGER_MAX77693 is not set
++# CONFIG_CHARGER_BQ2415X is not set
++# CONFIG_CHARGER_BQ24190 is not set
++# CONFIG_CHARGER_BQ24257 is not set
++# CONFIG_CHARGER_BQ24735 is not set
++# CONFIG_CHARGER_BQ25890 is not set
++# CONFIG_CHARGER_SMB347 is not set
++# CONFIG_CHARGER_TPS65090 is not set
++# CONFIG_CHARGER_TPS65217 is not set
++# CONFIG_BATTERY_GAUGE_LTC2941 is not set
++# CONFIG_CHARGER_RT9455 is not set
++CONFIG_POWER_RESET=y
++CONFIG_POWER_RESET_AS3722=y
++CONFIG_POWER_RESET_GPIO=y
++CONFIG_POWER_RESET_GPIO_RESTART=y
++CONFIG_POWER_RESET_HISI=y
++CONFIG_POWER_RESET_MSM=y
++CONFIG_POWER_RESET_LTC2952=y
++CONFIG_POWER_RESET_RESTART=y
++CONFIG_POWER_RESET_VEXPRESS=y
++# CONFIG_POWER_RESET_XGENE is not set
++CONFIG_POWER_RESET_SYSCON=y
++CONFIG_POWER_RESET_SYSCON_POWEROFF=y
++CONFIG_POWER_AVS=y
++CONFIG_HWMON=y
++# CONFIG_HWMON_VID is not set
++# CONFIG_HWMON_DEBUG_CHIP is not set
++
++#
++# Native drivers
++#
++# CONFIG_SENSORS_AD7314 is not set
++# CONFIG_SENSORS_AD7414 is not set
++# CONFIG_SENSORS_AD7418 is not set
++# CONFIG_SENSORS_ADM1021 is not set
++# CONFIG_SENSORS_ADM1025 is not set
++# CONFIG_SENSORS_ADM1026 is not set
++# CONFIG_SENSORS_ADM1029 is not set
++# CONFIG_SENSORS_ADM1031 is not set
++# CONFIG_SENSORS_ADM9240 is not set
++# CONFIG_SENSORS_ADT7310 is not set
++# CONFIG_SENSORS_ADT7410 is not set
++# CONFIG_SENSORS_ADT7411 is not set
++# CONFIG_SENSORS_ADT7462 is not set
++# CONFIG_SENSORS_ADT7470 is not set
++# CONFIG_SENSORS_ADT7475 is not set
++# CONFIG_SENSORS_ASC7621 is not set
++# CONFIG_SENSORS_ATXP1 is not set
++# CONFIG_SENSORS_DS620 is not set
++# CONFIG_SENSORS_DS1621 is not set
++# CONFIG_SENSORS_DA9052_ADC is not set
++# CONFIG_SENSORS_DA9055 is not set
++# CONFIG_SENSORS_I5K_AMB is not set
++# CONFIG_SENSORS_F71805F is not set
++# CONFIG_SENSORS_F71882FG is not set
++# CONFIG_SENSORS_F75375S is not set
++# CONFIG_SENSORS_GL518SM is not set
++# CONFIG_SENSORS_GL520SM is not set
++# CONFIG_SENSORS_G760A is not set
++# CONFIG_SENSORS_G762 is not set
++# CONFIG_SENSORS_GPIO_FAN is not set
++# CONFIG_SENSORS_HIH6130 is not set
++# CONFIG_SENSORS_IBMAEM is not set
++# CONFIG_SENSORS_IBMPEX is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_JC42 is not set
++# CONFIG_SENSORS_POWR1220 is not set
++# CONFIG_SENSORS_LINEAGE is not set
++# CONFIG_SENSORS_LTC2945 is not set
++# CONFIG_SENSORS_LTC2990 is not set
++# CONFIG_SENSORS_LTC4151 is not set
++# CONFIG_SENSORS_LTC4215 is not set
++# CONFIG_SENSORS_LTC4222 is not set
++# CONFIG_SENSORS_LTC4245 is not set
++# CONFIG_SENSORS_LTC4260 is not set
++# CONFIG_SENSORS_LTC4261 is not set
++# CONFIG_SENSORS_MAX1111 is not set
++# CONFIG_SENSORS_MAX16065 is not set
++# CONFIG_SENSORS_MAX1619 is not set
++# CONFIG_SENSORS_MAX1668 is not set
++# CONFIG_SENSORS_MAX197 is not set
++# CONFIG_SENSORS_MAX6639 is not set
++# CONFIG_SENSORS_MAX6642 is not set
++# CONFIG_SENSORS_MAX6650 is not set
++# CONFIG_SENSORS_MAX6697 is not set
++# CONFIG_SENSORS_MAX31790 is not set
++# CONFIG_SENSORS_MCP3021 is not set
++# CONFIG_SENSORS_ADCXX is not set
++# CONFIG_SENSORS_LM63 is not set
++# CONFIG_SENSORS_LM70 is not set
++# CONFIG_SENSORS_LM73 is not set
++# CONFIG_SENSORS_LM75 is not set
++# CONFIG_SENSORS_LM77 is not set
++# CONFIG_SENSORS_LM78 is not set
++# CONFIG_SENSORS_LM80 is not set
++# CONFIG_SENSORS_LM83 is not set
++# CONFIG_SENSORS_LM85 is not set
++# CONFIG_SENSORS_LM87 is not set
++# CONFIG_SENSORS_LM90 is not set
++# CONFIG_SENSORS_LM92 is not set
++# CONFIG_SENSORS_LM93 is not set
++# CONFIG_SENSORS_LM95234 is not set
++# CONFIG_SENSORS_LM95241 is not set
++# CONFIG_SENSORS_LM95245 is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_PC87427 is not set
++# CONFIG_SENSORS_NTC_THERMISTOR is not set
++# CONFIG_SENSORS_NCT6683 is not set
++# CONFIG_SENSORS_NCT6775 is not set
++# CONFIG_SENSORS_NCT7802 is not set
++# CONFIG_SENSORS_NCT7904 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_PMBUS is not set
++# CONFIG_SENSORS_PWM_FAN is not set
++# CONFIG_SENSORS_SHT15 is not set
++# CONFIG_SENSORS_SHT21 is not set
++# CONFIG_SENSORS_SHTC1 is not set
++# CONFIG_SENSORS_SIS5595 is not set
++# CONFIG_SENSORS_DME1737 is not set
++# CONFIG_SENSORS_EMC1403 is not set
++# CONFIG_SENSORS_EMC2103 is not set
++# CONFIG_SENSORS_EMC6W201 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_SMSC47M192 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_SCH56XX_COMMON is not set
++# CONFIG_SENSORS_SCH5627 is not set
++# CONFIG_SENSORS_SCH5636 is not set
++# CONFIG_SENSORS_SMM665 is not set
++# CONFIG_SENSORS_ADC128D818 is not set
++# CONFIG_SENSORS_ADS1015 is not set
++# CONFIG_SENSORS_ADS7828 is not set
++# CONFIG_SENSORS_ADS7871 is not set
++# CONFIG_SENSORS_AMC6821 is not set
++# CONFIG_SENSORS_INA209 is not set
++# CONFIG_SENSORS_INA2XX is not set
++# CONFIG_SENSORS_TC74 is not set
++# CONFIG_SENSORS_THMC50 is not set
++# CONFIG_SENSORS_TMP102 is not set
++# CONFIG_SENSORS_TMP103 is not set
++# CONFIG_SENSORS_TMP401 is not set
++# CONFIG_SENSORS_TMP421 is not set
++# CONFIG_SENSORS_VEXPRESS is not set
++# CONFIG_SENSORS_VIA686A is not set
++# CONFIG_SENSORS_VT1211 is not set
++# CONFIG_SENSORS_VT8231 is not set
++# CONFIG_SENSORS_W83781D is not set
++# CONFIG_SENSORS_W83791D is not set
++# CONFIG_SENSORS_W83792D is not set
++# CONFIG_SENSORS_W83793 is not set
++# CONFIG_SENSORS_W83795 is not set
++# CONFIG_SENSORS_W83L785TS is not set
++# CONFIG_SENSORS_W83L786NG is not set
++# CONFIG_SENSORS_W83627HF is not set
++# CONFIG_SENSORS_W83627EHF is not set
++# CONFIG_SENSORS_WM831X is not set
++# CONFIG_SENSORS_WM8350 is not set
++
++#
++# ACPI drivers
++#
++# CONFIG_SENSORS_ACPI_POWER is not set
++CONFIG_THERMAL=y
++CONFIG_THERMAL_HWMON=y
++CONFIG_THERMAL_OF=y
++CONFIG_THERMAL_WRITABLE_TRIPS=y
++CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
++# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set
++# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
++# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set
++CONFIG_THERMAL_GOV_FAIR_SHARE=y
++CONFIG_THERMAL_GOV_STEP_WISE=y
++CONFIG_THERMAL_GOV_BANG_BANG=y
++CONFIG_THERMAL_GOV_USER_SPACE=y
++CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
++CONFIG_CPU_THERMAL=y
++# CONFIG_CLOCK_THERMAL is not set
++CONFIG_DEVFREQ_THERMAL=y
++CONFIG_THERMAL_EMULATION=y
++# CONFIG_HISI_THERMAL is not set
++# CONFIG_IMX_THERMAL is not set
++# CONFIG_MTK_THERMAL is not set
++CONFIG_WATCHDOG=y
++CONFIG_WATCHDOG_CORE=y
++# CONFIG_WATCHDOG_NOWAYOUT is not set
++# CONFIG_WATCHDOG_SYSFS is not set
++
++#
++# Watchdog Device Drivers
++#
++# CONFIG_SOFT_WATCHDOG is not set
++# CONFIG_DA9052_WATCHDOG is not set
++# CONFIG_DA9055_WATCHDOG is not set
++# CONFIG_DA9063_WATCHDOG is not set
++# CONFIG_GPIO_WATCHDOG is not set
++# CONFIG_WM831X_WATCHDOG is not set
++# CONFIG_WM8350_WATCHDOG is not set
++# CONFIG_XILINX_WATCHDOG is not set
++# CONFIG_ZIIRAVE_WATCHDOG is not set
++# CONFIG_ARM_SP805_WATCHDOG is not set
++# CONFIG_ARM_SBSA_WATCHDOG is not set
++# CONFIG_CADENCE_WATCHDOG is not set
++# CONFIG_DW_WATCHDOG is not set
++# CONFIG_TWL4030_WATCHDOG is not set
++# CONFIG_MAX63XX_WATCHDOG is not set
++# CONFIG_IMX2_WDT is not set
++# CONFIG_QCOM_WDT is not set
++# CONFIG_MEDIATEK_WATCHDOG is not set
++# CONFIG_ALIM7101_WDT is not set
++# CONFIG_I6300ESB_WDT is not set
++# CONFIG_MEN_A21_WDT is not set
++# CONFIG_XEN_WDT is not set
++
++#
++# PCI-based Watchdog Cards
++#
++# CONFIG_PCIPCWATCHDOG is not set
++# CONFIG_WDTPCI is not set
++
++#
++# USB-based Watchdog Cards
++#
++# CONFIG_USBPCWATCHDOG is not set
++CONFIG_SSB_POSSIBLE=y
++
++#
++# Sonics Silicon Backplane
++#
++# CONFIG_SSB is not set
++CONFIG_BCMA_POSSIBLE=y
++
++#
++# Broadcom specific AMBA
++#
++# CONFIG_BCMA is not set
++
++#
++# Multifunction device drivers
++#
++CONFIG_MFD_CORE=y
++# CONFIG_MFD_ACT8945A is not set
++CONFIG_MFD_AS3711=y
++CONFIG_MFD_AS3722=y
++CONFIG_PMIC_ADP5520=y
++CONFIG_MFD_AAT2870_CORE=y
++# CONFIG_MFD_ATMEL_FLEXCOM is not set
++# CONFIG_MFD_ATMEL_HLCDC is not set
++# CONFIG_MFD_BCM590XX is not set
++# CONFIG_MFD_AXP20X_I2C is not set
++CONFIG_PMIC_DA903X=y
++CONFIG_PMIC_DA9052=y
++CONFIG_MFD_DA9052_SPI=y
++CONFIG_MFD_DA9052_I2C=y
++CONFIG_MFD_DA9055=y
++# CONFIG_MFD_DA9062 is not set
++CONFIG_MFD_DA9063=y
++# CONFIG_MFD_DA9150 is not set
++# CONFIG_MFD_DLN2 is not set
++# CONFIG_MFD_MC13XXX_SPI is not set
++# CONFIG_MFD_MC13XXX_I2C is not set
++# CONFIG_MFD_HI6421_PMIC is not set
++# CONFIG_HTC_PASIC3 is not set
++CONFIG_HTC_I2CPLD=y
++# CONFIG_LPC_ICH is not set
++# CONFIG_LPC_SCH is not set
++CONFIG_INTEL_SOC_PMIC=y
++# CONFIG_MFD_JANZ_CMODIO is not set
++# CONFIG_MFD_KEMPLD is not set
++# CONFIG_MFD_88PM800 is not set
++# CONFIG_MFD_88PM805 is not set
++CONFIG_MFD_88PM860X=y
++CONFIG_MFD_MAX14577=y
++CONFIG_MFD_MAX77686=y
++CONFIG_MFD_MAX77693=y
++CONFIG_MFD_MAX77843=y
++# CONFIG_MFD_MAX8907 is not set
++CONFIG_MFD_MAX8925=y
++CONFIG_MFD_MAX8997=y
++CONFIG_MFD_MAX8998=y
++# CONFIG_MFD_MT6397 is not set
++# CONFIG_MFD_MENF21BMC is not set
++CONFIG_EZX_PCAP=y
++# CONFIG_MFD_VIPERBOARD is not set
++# CONFIG_MFD_RETU is not set
++# CONFIG_MFD_PCF50633 is not set
++# CONFIG_MFD_QCOM_RPM is not set
++# CONFIG_MFD_RDC321X is not set
++# CONFIG_MFD_RTSX_PCI is not set
++# CONFIG_MFD_RT5033 is not set
++# CONFIG_MFD_RTSX_USB is not set
++CONFIG_MFD_RC5T583=y
++# CONFIG_MFD_RK808 is not set
++# CONFIG_MFD_RN5T618 is not set
++CONFIG_MFD_SEC_CORE=y
++# CONFIG_MFD_SI476X_CORE is not set
++# CONFIG_MFD_SM501 is not set
++# CONFIG_MFD_SKY81452 is not set
++CONFIG_MFD_SMSC=y
++CONFIG_ABX500_CORE=y
++CONFIG_AB3100_CORE=y
++# CONFIG_AB3100_OTP is not set
++CONFIG_MFD_STMPE=y
++
++#
++# STMicroelectronics STMPE Interface Drivers
++#
++CONFIG_STMPE_I2C=y
++CONFIG_STMPE_SPI=y
++CONFIG_MFD_SYSCON=y
++# CONFIG_MFD_TI_AM335X_TSCADC is not set
++# CONFIG_MFD_LP3943 is not set
++CONFIG_MFD_LP8788=y
++CONFIG_MFD_PALMAS=y
++# CONFIG_TPS6105X is not set
++# CONFIG_TPS65010 is not set
++# CONFIG_TPS6507X is not set
++# CONFIG_MFD_TPS65086 is not set
++CONFIG_MFD_TPS65090=y
++CONFIG_MFD_TPS65217=y
++# CONFIG_MFD_TPS65218 is not set
++CONFIG_MFD_TPS6586X=y
++CONFIG_MFD_TPS65910=y
++CONFIG_MFD_TPS65912=y
++CONFIG_MFD_TPS65912_I2C=y
++CONFIG_MFD_TPS65912_SPI=y
++CONFIG_MFD_TPS80031=y
++CONFIG_TWL4030_CORE=y
++CONFIG_MFD_TWL4030_AUDIO=y
++CONFIG_TWL6040_CORE=y
++# CONFIG_MFD_WL1273_CORE is not set
++# CONFIG_MFD_LM3533 is not set
++CONFIG_MFD_TC3589X=y
++# CONFIG_MFD_TMIO is not set
++# CONFIG_MFD_VX855 is not set
++# CONFIG_MFD_ARIZONA_I2C is not set
++# CONFIG_MFD_ARIZONA_SPI is not set
++CONFIG_MFD_WM8400=y
++CONFIG_MFD_WM831X=y
++CONFIG_MFD_WM831X_I2C=y
++CONFIG_MFD_WM831X_SPI=y
++CONFIG_MFD_WM8350=y
++CONFIG_MFD_WM8350_I2C=y
++# CONFIG_MFD_WM8994 is not set
++CONFIG_MFD_VEXPRESS_SYSREG=y
++CONFIG_REGULATOR=y
++# CONFIG_REGULATOR_DEBUG is not set
++# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
++# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
++# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
++# CONFIG_REGULATOR_88PM8607 is not set
++# CONFIG_REGULATOR_ACT8865 is not set
++# CONFIG_REGULATOR_AD5398 is not set
++# CONFIG_REGULATOR_ANATOP is not set
++# CONFIG_REGULATOR_AAT2870 is not set
++# CONFIG_REGULATOR_AB3100 is not set
++# CONFIG_REGULATOR_AS3711 is not set
++# CONFIG_REGULATOR_AS3722 is not set
++# CONFIG_REGULATOR_DA903X is not set
++# CONFIG_REGULATOR_DA9052 is not set
++# CONFIG_REGULATOR_DA9055 is not set
++# CONFIG_REGULATOR_DA9063 is not set
++# CONFIG_REGULATOR_DA9210 is not set
++# CONFIG_REGULATOR_DA9211 is not set
++# CONFIG_REGULATOR_FAN53555 is not set
++# CONFIG_REGULATOR_GPIO is not set
++# CONFIG_REGULATOR_ISL9305 is not set
++# CONFIG_REGULATOR_ISL6271A is not set
++# CONFIG_REGULATOR_LP3971 is not set
++# CONFIG_REGULATOR_LP3972 is not set
++# CONFIG_REGULATOR_LP872X is not set
++# CONFIG_REGULATOR_LP8755 is not set
++# CONFIG_REGULATOR_LP8788 is not set
++# CONFIG_REGULATOR_LTC3589 is not set
++# CONFIG_REGULATOR_MAX14577 is not set
++# CONFIG_REGULATOR_MAX1586 is not set
++# CONFIG_REGULATOR_MAX8649 is not set
++# CONFIG_REGULATOR_MAX8660 is not set
++# CONFIG_REGULATOR_MAX8925 is not set
++# CONFIG_REGULATOR_MAX8952 is not set
++# CONFIG_REGULATOR_MAX8973 is not set
++# CONFIG_REGULATOR_MAX8997 is not set
++# CONFIG_REGULATOR_MAX8998 is not set
++# CONFIG_REGULATOR_MAX77686 is not set
++# CONFIG_REGULATOR_MAX77693 is not set
++# CONFIG_REGULATOR_MAX77802 is not set
++# CONFIG_REGULATOR_MT6311 is not set
++# CONFIG_REGULATOR_PALMAS is not set
++# CONFIG_REGULATOR_PCAP is not set
++# CONFIG_REGULATOR_PFUZE100 is not set
++# CONFIG_REGULATOR_PV88060 is not set
++# CONFIG_REGULATOR_PV88090 is not set
++# CONFIG_REGULATOR_PWM is not set
++# CONFIG_REGULATOR_RC5T583 is not set
++# CONFIG_REGULATOR_S2MPA01 is not set
++# CONFIG_REGULATOR_S2MPS11 is not set
++# CONFIG_REGULATOR_S5M8767 is not set
++# CONFIG_REGULATOR_TPS51632 is not set
++# CONFIG_REGULATOR_TPS62360 is not set
++# CONFIG_REGULATOR_TPS65023 is not set
++# CONFIG_REGULATOR_TPS6507X is not set
++# CONFIG_REGULATOR_TPS65090 is not set
++CONFIG_REGULATOR_TPS65217=y
++# CONFIG_REGULATOR_TPS6524X is not set
++# CONFIG_REGULATOR_TPS6586X is not set
++# CONFIG_REGULATOR_TPS65910 is not set
++# CONFIG_REGULATOR_TPS65912 is not set
++# CONFIG_REGULATOR_TPS80031 is not set
++# CONFIG_REGULATOR_TWL4030 is not set
++# CONFIG_REGULATOR_VEXPRESS is not set
++# CONFIG_REGULATOR_WM831X is not set
++# CONFIG_REGULATOR_WM8350 is not set
++# CONFIG_REGULATOR_WM8400 is not set
++# CONFIG_MEDIA_SUPPORT is not set
++
++#
++# Graphics support
++#
++CONFIG_VGA_ARB=y
++CONFIG_VGA_ARB_MAX_GPUS=16
++CONFIG_DRM=m
++# CONFIG_DRM_DP_AUX_CHARDEV is not set
++CONFIG_DRM_KMS_HELPER=m
++CONFIG_DRM_KMS_FB_HELPER=y
++CONFIG_DRM_FBDEV_EMULATION=y
++CONFIG_DRM_LOAD_EDID_FIRMWARE=y
++CONFIG_DRM_TTM=m
++
++#
++# I2C encoder or helper chips
++#
++# CONFIG_DRM_I2C_ADV7511 is not set
++# CONFIG_DRM_I2C_CH7006 is not set
++# CONFIG_DRM_I2C_SIL164 is not set
++# CONFIG_DRM_I2C_NXP_TDA998X is not set
++# CONFIG_DRM_TDFX is not set
++# CONFIG_DRM_HDLCD is not set
++# CONFIG_DRM_R128 is not set
++CONFIG_DRM_RADEON=m
++# CONFIG_DRM_RADEON_USERPTR is not set
++CONFIG_DRM_AMDGPU=m
++# CONFIG_DRM_AMDGPU_CIK is not set
++CONFIG_DRM_AMDGPU_USERPTR=y
++# CONFIG_DRM_AMDGPU_GART_DEBUGFS is not set
++CONFIG_DRM_AMD_POWERPLAY=y
++
++#
++# ACP (Audio CoProcessor) Configuration
++#
++# CONFIG_DRM_AMD_ACP is not set
++
++#
++# Display Engine Configuration
++#
++# CONFIG_DRM_AMD_DAL is not set
++# CONFIG_DRM_NOUVEAU is not set
++# CONFIG_DRM_MGA is not set
++# CONFIG_DRM_VIA is not set
++# CONFIG_DRM_SAVAGE is not set
++# CONFIG_DRM_VGEM is not set
++# CONFIG_DRM_UDL is not set
++# CONFIG_DRM_AST is not set
++# CONFIG_DRM_MGAG200 is not set
++# CONFIG_DRM_CIRRUS_QEMU is not set
++# CONFIG_DRM_QXL is not set
++# CONFIG_DRM_BOCHS is not set
++# CONFIG_DRM_VIRTIO_GPU is not set
++# CONFIG_DRM_MSM is not set
++CONFIG_DRM_BRIDGE=y
++
++#
++# Display Interface Bridges
++#
++# CONFIG_DRM_NXP_PTN3460 is not set
++# CONFIG_DRM_PARADE_PS8622 is not set
++CONFIG_HSA_AMD=m
++
++#
++# Frame buffer Devices
++#
++CONFIG_FB=y
++CONFIG_FIRMWARE_EDID=y
++CONFIG_FB_CMDLINE=y
++CONFIG_FB_NOTIFY=y
++# CONFIG_FB_DDC is not set
++# CONFIG_FB_BOOT_VESA_SUPPORT is not set
++CONFIG_FB_CFB_FILLRECT=y
++CONFIG_FB_CFB_COPYAREA=y
++CONFIG_FB_CFB_IMAGEBLIT=y
++# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
++CONFIG_FB_SYS_FILLRECT=m
++CONFIG_FB_SYS_COPYAREA=m
++CONFIG_FB_SYS_IMAGEBLIT=m
++# CONFIG_FB_FOREIGN_ENDIAN is not set
++CONFIG_FB_SYS_FOPS=m
++# CONFIG_FB_SVGALIB is not set
++# CONFIG_FB_MACMODES is not set
++# CONFIG_FB_BACKLIGHT is not set
++CONFIG_FB_MODE_HELPERS=y
++CONFIG_FB_TILEBLITTING=y
++
++#
++# Frame buffer hardware drivers
++#
++# CONFIG_FB_CIRRUS is not set
++# CONFIG_FB_PM2 is not set
++CONFIG_FB_ARMCLCD=y
++# CONFIG_FB_CYBER2000 is not set
++CONFIG_FB_ASILIANT=y
++CONFIG_FB_IMSTT=y
++# CONFIG_FB_UVESA is not set
++# CONFIG_FB_OPENCORES is not set
++# CONFIG_FB_S1D13XXX is not set
++# CONFIG_FB_NVIDIA is not set
++# CONFIG_FB_RIVA is not set
++# CONFIG_FB_I740 is not set
++# CONFIG_FB_MATROX is not set
++# CONFIG_FB_RADEON is not set
++# CONFIG_FB_ATY128 is not set
++# CONFIG_FB_ATY is not set
++# CONFIG_FB_S3 is not set
++# CONFIG_FB_SAVAGE is not set
++# CONFIG_FB_SIS is not set
++# CONFIG_FB_NEOMAGIC is not set
++# CONFIG_FB_KYRO is not set
++# CONFIG_FB_3DFX is not set
++# CONFIG_FB_VOODOO1 is not set
++# CONFIG_FB_VT8623 is not set
++# CONFIG_FB_TRIDENT is not set
++# CONFIG_FB_ARK is not set
++# CONFIG_FB_PM3 is not set
++# CONFIG_FB_CARMINE is not set
++# CONFIG_FB_SMSCUFX is not set
++# CONFIG_FB_UDL is not set
++# CONFIG_FB_IBM_GXT4500 is not set
++# CONFIG_FB_VIRTUAL is not set
++# CONFIG_XEN_FBDEV_FRONTEND is not set
++# CONFIG_FB_METRONOME is not set
++# CONFIG_FB_MB862XX is not set
++# CONFIG_FB_BROADSHEET is not set
++# CONFIG_FB_AUO_K190X is not set
++CONFIG_FB_SIMPLE=y
++# CONFIG_FB_SSD1307 is not set
++# CONFIG_FB_SM712 is not set
++CONFIG_BACKLIGHT_LCD_SUPPORT=y
++# CONFIG_LCD_CLASS_DEVICE is not set
++CONFIG_BACKLIGHT_CLASS_DEVICE=y
++# CONFIG_BACKLIGHT_GENERIC is not set
++# CONFIG_BACKLIGHT_PWM is not set
++# CONFIG_BACKLIGHT_DA903X is not set
++# CONFIG_BACKLIGHT_DA9052 is not set
++# CONFIG_BACKLIGHT_MAX8925 is not set
++# CONFIG_BACKLIGHT_PM8941_WLED is not set
++# CONFIG_BACKLIGHT_WM831X is not set
++# CONFIG_BACKLIGHT_ADP5520 is not set
++# CONFIG_BACKLIGHT_ADP8860 is not set
++# CONFIG_BACKLIGHT_ADP8870 is not set
++# CONFIG_BACKLIGHT_88PM860X is not set
++# CONFIG_BACKLIGHT_AAT2870 is not set
++# CONFIG_BACKLIGHT_LM3630A is not set
++# CONFIG_BACKLIGHT_LM3639 is not set
++# CONFIG_BACKLIGHT_LP855X is not set
++# CONFIG_BACKLIGHT_LP8788 is not set
++# CONFIG_BACKLIGHT_PANDORA is not set
++# CONFIG_BACKLIGHT_TPS65217 is not set
++# CONFIG_BACKLIGHT_AS3711 is not set
++# CONFIG_BACKLIGHT_GPIO is not set
++# CONFIG_BACKLIGHT_LV5207LP is not set
++# CONFIG_BACKLIGHT_BD6107 is not set
++# CONFIG_VGASTATE is not set
++CONFIG_VIDEOMODE_HELPERS=y
++CONFIG_HDMI=y
++
++#
++# Console display driver support
++#
++CONFIG_DUMMY_CONSOLE=y
++CONFIG_DUMMY_CONSOLE_COLUMNS=80
++CONFIG_DUMMY_CONSOLE_ROWS=25
++CONFIG_FRAMEBUFFER_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
++CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
++# CONFIG_LOGO is not set
++CONFIG_SOUND=m
++# CONFIG_SOUND_OSS_CORE is not set
++CONFIG_SND=m
++CONFIG_SND_TIMER=m
++CONFIG_SND_PCM=m
++CONFIG_SND_HWDEP=m
++CONFIG_SND_JACK=y
++CONFIG_SND_JACK_INPUT_DEV=y
++# CONFIG_SND_SEQUENCER is not set
++# CONFIG_SND_MIXER_OSS is not set
++# CONFIG_SND_PCM_OSS is not set
++CONFIG_SND_PCM_TIMER=y
++# CONFIG_SND_HRTIMER is not set
++CONFIG_SND_DYNAMIC_MINORS=y
++CONFIG_SND_MAX_CARDS=32
++CONFIG_SND_SUPPORT_OLD_API=y
++CONFIG_SND_PROC_FS=y
++CONFIG_SND_VERBOSE_PROCFS=y
++# CONFIG_SND_VERBOSE_PRINTK is not set
++# CONFIG_SND_DEBUG is not set
++CONFIG_SND_VMASTER=y
++# CONFIG_SND_RAWMIDI_SEQ is not set
++# CONFIG_SND_OPL3_LIB_SEQ is not set
++# CONFIG_SND_OPL4_LIB_SEQ is not set
++# CONFIG_SND_SBAWE_SEQ is not set
++# CONFIG_SND_EMU10K1_SEQ is not set
++CONFIG_SND_DRIVERS=y
++# CONFIG_SND_DUMMY is not set
++# CONFIG_SND_ALOOP is not set
++# CONFIG_SND_MTPAV is not set
++# CONFIG_SND_SERIAL_U16550 is not set
++# CONFIG_SND_MPU401 is not set
++CONFIG_SND_PCI=y
++# CONFIG_SND_AD1889 is not set
++# CONFIG_SND_ALS300 is not set
++# CONFIG_SND_ALI5451 is not set
++# CONFIG_SND_ATIIXP is not set
++# CONFIG_SND_ATIIXP_MODEM is not set
++# CONFIG_SND_AU8810 is not set
++# CONFIG_SND_AU8820 is not set
++# CONFIG_SND_AU8830 is not set
++# CONFIG_SND_AW2 is not set
++# CONFIG_SND_AZT3328 is not set
++# CONFIG_SND_BT87X is not set
++# CONFIG_SND_CA0106 is not set
++# CONFIG_SND_CMIPCI is not set
++# CONFIG_SND_OXYGEN is not set
++# CONFIG_SND_CS4281 is not set
++# CONFIG_SND_CS46XX is not set
++# CONFIG_SND_CTXFI is not set
++# CONFIG_SND_DARLA20 is not set
++# CONFIG_SND_GINA20 is not set
++# CONFIG_SND_LAYLA20 is not set
++# CONFIG_SND_DARLA24 is not set
++# CONFIG_SND_GINA24 is not set
++# CONFIG_SND_LAYLA24 is not set
++# CONFIG_SND_MONA is not set
++# CONFIG_SND_MIA is not set
++# CONFIG_SND_ECHO3G is not set
++# CONFIG_SND_INDIGO is not set
++# CONFIG_SND_INDIGOIO is not set
++# CONFIG_SND_INDIGODJ is not set
++# CONFIG_SND_INDIGOIOX is not set
++# CONFIG_SND_INDIGODJX is not set
++# CONFIG_SND_EMU10K1 is not set
++# CONFIG_SND_EMU10K1X is not set
++# CONFIG_SND_ENS1370 is not set
++# CONFIG_SND_ENS1371 is not set
++# CONFIG_SND_ES1938 is not set
++# CONFIG_SND_ES1968 is not set
++# CONFIG_SND_FM801 is not set
++# CONFIG_SND_HDSP is not set
++# CONFIG_SND_HDSPM is not set
++# CONFIG_SND_ICE1712 is not set
++# CONFIG_SND_ICE1724 is not set
++# CONFIG_SND_INTEL8X0 is not set
++# CONFIG_SND_INTEL8X0M is not set
++# CONFIG_SND_KORG1212 is not set
++# CONFIG_SND_LOLA is not set
++# CONFIG_SND_LX6464ES is not set
++# CONFIG_SND_MAESTRO3 is not set
++# CONFIG_SND_MIXART is not set
++# CONFIG_SND_NM256 is not set
++# CONFIG_SND_PCXHR is not set
++# CONFIG_SND_RIPTIDE is not set
++# CONFIG_SND_RME32 is not set
++# CONFIG_SND_RME96 is not set
++# CONFIG_SND_RME9652 is not set
++# CONFIG_SND_SE6X is not set
++# CONFIG_SND_SONICVIBES is not set
++# CONFIG_SND_TRIDENT is not set
++# CONFIG_SND_VIA82XX is not set
++# CONFIG_SND_VIA82XX_MODEM is not set
++# CONFIG_SND_VIRTUOSO is not set
++# CONFIG_SND_VX222 is not set
++# CONFIG_SND_YMFPCI is not set
++
++#
++# HD-Audio
++#
++CONFIG_SND_HDA=m
++CONFIG_SND_HDA_INTEL=m
++CONFIG_SND_HDA_HWDEP=y
++CONFIG_SND_HDA_RECONFIG=y
++CONFIG_SND_HDA_INPUT_BEEP=y
++CONFIG_SND_HDA_INPUT_BEEP_MODE=0
++CONFIG_SND_HDA_PATCH_LOADER=y
++# CONFIG_SND_HDA_CODEC_REALTEK is not set
++# CONFIG_SND_HDA_CODEC_ANALOG is not set
++# CONFIG_SND_HDA_CODEC_SIGMATEL is not set
++# CONFIG_SND_HDA_CODEC_VIA is not set
++CONFIG_SND_HDA_CODEC_HDMI=m
++# CONFIG_SND_HDA_CODEC_CIRRUS is not set
++# CONFIG_SND_HDA_CODEC_CONEXANT is not set
++# CONFIG_SND_HDA_CODEC_CA0110 is not set
++# CONFIG_SND_HDA_CODEC_CA0132 is not set
++# CONFIG_SND_HDA_CODEC_CMEDIA is not set
++# CONFIG_SND_HDA_CODEC_SI3054 is not set
++# CONFIG_SND_HDA_GENERIC is not set
++CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0
++CONFIG_SND_HDA_CORE=m
++CONFIG_SND_HDA_PREALLOC_SIZE=64
++CONFIG_SND_SPI=y
++CONFIG_SND_USB=y
++# CONFIG_SND_USB_AUDIO is not set
++# CONFIG_SND_USB_UA101 is not set
++# CONFIG_SND_USB_CAIAQ is not set
++# CONFIG_SND_USB_6FIRE is not set
++# CONFIG_SND_USB_HIFACE is not set
++# CONFIG_SND_BCD2000 is not set
++# CONFIG_SND_USB_POD is not set
++# CONFIG_SND_USB_PODHD is not set
++# CONFIG_SND_USB_TONEPORT is not set
++# CONFIG_SND_USB_VARIAX is not set
++# CONFIG_SND_SOC is not set
++# CONFIG_SOUND_PRIME is not set
++
++#
++# HID support
++#
++CONFIG_HID=m
++CONFIG_HID_BATTERY_STRENGTH=y
++CONFIG_HIDRAW=y
++# CONFIG_UHID is not set
++CONFIG_HID_GENERIC=m
++
++#
++# Special HID drivers
++#
++# CONFIG_HID_A4TECH is not set
++# CONFIG_HID_ACRUX is not set
++# CONFIG_HID_APPLE is not set
++# CONFIG_HID_APPLEIR is not set
++# CONFIG_HID_AUREAL is not set
++# CONFIG_HID_BELKIN is not set
++# CONFIG_HID_BETOP_FF is not set
++# CONFIG_HID_CHERRY is not set
++# CONFIG_HID_CHICONY is not set
++# CONFIG_HID_CORSAIR is not set
++# CONFIG_HID_PRODIKEYS is not set
++# CONFIG_HID_CMEDIA is not set
++# CONFIG_HID_CP2112 is not set
++# CONFIG_HID_CYPRESS is not set
++# CONFIG_HID_DRAGONRISE is not set
++# CONFIG_HID_EMS_FF is not set
++# CONFIG_HID_ELECOM is not set
++# CONFIG_HID_ELO is not set
++# CONFIG_HID_EZKEY is not set
++# CONFIG_HID_GEMBIRD is not set
++# CONFIG_HID_GFRM is not set
++# CONFIG_HID_HOLTEK is not set
++# CONFIG_HID_GT683R is not set
++# CONFIG_HID_KEYTOUCH is not set
++# CONFIG_HID_KYE is not set
++# CONFIG_HID_UCLOGIC is not set
++# CONFIG_HID_WALTOP is not set
++# CONFIG_HID_GYRATION is not set
++# CONFIG_HID_ICADE is not set
++# CONFIG_HID_TWINHAN is not set
++# CONFIG_HID_KENSINGTON is not set
++# CONFIG_HID_LCPOWER is not set
++# CONFIG_HID_LENOVO is not set
++# CONFIG_HID_LOGITECH is not set
++# CONFIG_HID_MAGICMOUSE is not set
++# CONFIG_HID_MICROSOFT is not set
++# CONFIG_HID_MONTEREY is not set
++# CONFIG_HID_MULTITOUCH is not set
++# CONFIG_HID_NTRIG is not set
++# CONFIG_HID_ORTEK is not set
++# CONFIG_HID_PANTHERLORD is not set
++# CONFIG_HID_PENMOUNT is not set
++# CONFIG_HID_PETALYNX is not set
++# CONFIG_HID_PICOLCD is not set
++# CONFIG_HID_PLANTRONICS is not set
++# CONFIG_HID_PRIMAX is not set
++# CONFIG_HID_ROCCAT is not set
++# CONFIG_HID_SAITEK is not set
++# CONFIG_HID_SAMSUNG is not set
++# CONFIG_HID_SONY is not set
++# CONFIG_HID_SPEEDLINK is not set
++# CONFIG_HID_STEELSERIES is not set
++# CONFIG_HID_SUNPLUS is not set
++# CONFIG_HID_RMI is not set
++# CONFIG_HID_GREENASIA is not set
++# CONFIG_HID_SMARTJOYPLUS is not set
++# CONFIG_HID_TIVO is not set
++# CONFIG_HID_TOPSEED is not set
++# CONFIG_HID_THINGM is not set
++# CONFIG_HID_THRUSTMASTER is not set
++# CONFIG_HID_WACOM is not set
++# CONFIG_HID_WIIMOTE is not set
++# CONFIG_HID_XINMO is not set
++# CONFIG_HID_ZEROPLUS is not set
++# CONFIG_HID_ZYDACRON is not set
++# CONFIG_HID_SENSOR_HUB is not set
++
++#
++# USB HID support
++#
++CONFIG_USB_HID=m
++CONFIG_HID_PID=y
++CONFIG_USB_HIDDEV=y
++
++#
++# USB HID Boot Protocol drivers
++#
++# CONFIG_USB_KBD is not set
++# CONFIG_USB_MOUSE is not set
++
++#
++# I2C HID support
++#
++# CONFIG_I2C_HID is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_SUPPORT=y
++CONFIG_USB_COMMON=y
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB=y
++CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
++
++#
++# Miscellaneous USB options
++#
++CONFIG_USB_DEFAULT_PERSIST=y
++CONFIG_USB_DYNAMIC_MINORS=y
++# CONFIG_USB_OTG is not set
++# CONFIG_USB_OTG_WHITELIST is not set
++# CONFIG_USB_OTG_BLACKLIST_HUB is not set
++# CONFIG_USB_ULPI_BUS is not set
++# CONFIG_USB_MON is not set
++# CONFIG_USB_WUSB_CBAF is not set
++
++#
++# USB Host Controller Drivers
++#
++# CONFIG_USB_C67X00_HCD is not set
++CONFIG_USB_XHCI_HCD=y
++CONFIG_USB_XHCI_PCI=y
++# CONFIG_USB_XHCI_PLATFORM is not set
++# CONFIG_USB_XHCI_MTK is not set
++CONFIG_USB_EHCI_HCD=y
++CONFIG_USB_EHCI_ROOT_HUB_TT=y
++CONFIG_USB_EHCI_TT_NEWSCHED=y
++CONFIG_USB_EHCI_PCI=y
++# CONFIG_USB_EHCI_MSM is not set
++# CONFIG_USB_EHCI_HCD_PLATFORM is not set
++# CONFIG_USB_OXU210HP_HCD is not set
++# CONFIG_USB_ISP116X_HCD is not set
++# CONFIG_USB_ISP1362_HCD is not set
++# CONFIG_USB_FOTG210_HCD is not set
++# CONFIG_USB_MAX3421_HCD is not set
++CONFIG_USB_OHCI_HCD=y
++CONFIG_USB_OHCI_HCD_PCI=y
++# CONFIG_USB_OHCI_HCD_PLATFORM is not set
++CONFIG_USB_UHCI_HCD=y
++# CONFIG_USB_SL811_HCD is not set
++# CONFIG_USB_R8A66597_HCD is not set
++# CONFIG_USB_HCD_TEST_MODE is not set
++
++#
++# USB Device Class drivers
++#
++# CONFIG_USB_ACM is not set
++# CONFIG_USB_PRINTER is not set
++# CONFIG_USB_WDM is not set
++# CONFIG_USB_TMC is not set
++
++#
++# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
++#
++
++#
++# also be needed; see USB_STORAGE Help for more info
++#
++# CONFIG_USB_STORAGE is not set
++
++#
++# USB Imaging devices
++#
++# CONFIG_USB_MDC800 is not set
++# CONFIG_USB_MICROTEK is not set
++# CONFIG_USBIP_CORE is not set
++# CONFIG_USB_MUSB_HDRC is not set
++# CONFIG_USB_DWC3 is not set
++CONFIG_USB_DWC2=y
++CONFIG_USB_DWC2_HOST=y
++
++#
++# Gadget/Dual-role mode requires USB Gadget support to be enabled
++#
++# CONFIG_USB_DWC2_PCI is not set
++# CONFIG_USB_DWC2_DEBUG is not set
++# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set
++# CONFIG_USB_CHIPIDEA is not set
++# CONFIG_USB_ISP1760 is not set
++
++#
++# USB port drivers
++#
++# CONFIG_USB_SERIAL is not set
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++# CONFIG_USB_ADUTUX is not set
++# CONFIG_USB_SEVSEG is not set
++# CONFIG_USB_RIO500 is not set
++# CONFIG_USB_LEGOTOWER is not set
++# CONFIG_USB_LCD is not set
++# CONFIG_USB_LED is not set
++# CONFIG_USB_CYPRESS_CY7C63 is not set
++# CONFIG_USB_CYTHERM is not set
++# CONFIG_USB_IDMOUSE is not set
++# CONFIG_USB_FTDI_ELAN is not set
++# CONFIG_USB_APPLEDISPLAY is not set
++# CONFIG_USB_SISUSBVGA is not set
++# CONFIG_USB_LD is not set
++# CONFIG_USB_TRANCEVIBRATOR is not set
++# CONFIG_USB_IOWARRIOR is not set
++# CONFIG_USB_TEST is not set
++# CONFIG_USB_EHSET_TEST_FIXTURE is not set
++# CONFIG_USB_ISIGHTFW is not set
++# CONFIG_USB_YUREX is not set
++# CONFIG_USB_EZUSB_FX2 is not set
++# CONFIG_USB_HSIC_USB3503 is not set
++# CONFIG_USB_LINK_LAYER_TEST is not set
++# CONFIG_USB_CHAOSKEY is not set
++
++#
++# USB Physical Layer drivers
++#
++# CONFIG_USB_PHY is not set
++# CONFIG_NOP_USB_XCEIV is not set
++# CONFIG_USB_GPIO_VBUS is not set
++# CONFIG_USB_ISP1301 is not set
++# CONFIG_USB_MSM_OTG is not set
++# CONFIG_USB_QCOM_8X16_PHY is not set
++CONFIG_USB_ULPI=y
++CONFIG_USB_ULPI_VIEWPORT=y
++# CONFIG_USB_GADGET is not set
++CONFIG_USB_LED_TRIG=y
++# CONFIG_UWB is not set
++CONFIG_MMC=y
++# CONFIG_MMC_DEBUG is not set
++
++#
++# MMC/SD/SDIO Card Drivers
++#
++CONFIG_MMC_BLOCK=y
++CONFIG_MMC_BLOCK_MINORS=8
++CONFIG_MMC_BLOCK_BOUNCE=y
++# CONFIG_SDIO_UART is not set
++# CONFIG_MMC_TEST is not set
++
++#
++# MMC/SD/SDIO Host Controller Drivers
++#
++CONFIG_MMC_ARMMMCI=y
++# CONFIG_MMC_SDHCI is not set
++# CONFIG_MMC_TIFM_SD is not set
++# CONFIG_MMC_SPI is not set
++# CONFIG_MMC_CB710 is not set
++# CONFIG_MMC_VIA_SDMMC is not set
++# CONFIG_MMC_DW is not set
++# CONFIG_MMC_VUB300 is not set
++# CONFIG_MMC_USHC is not set
++# CONFIG_MMC_USDHI6ROL0 is not set
++# CONFIG_MMC_TOSHIBA_PCI is not set
++# CONFIG_MMC_MTK is not set
++# CONFIG_MEMSTICK is not set
++CONFIG_NEW_LEDS=y
++CONFIG_LEDS_CLASS=y
++# CONFIG_LEDS_CLASS_FLASH is not set
++
++#
++# LED drivers
++#
++# CONFIG_LEDS_88PM860X is not set
++# CONFIG_LEDS_BCM6328 is not set
++# CONFIG_LEDS_BCM6358 is not set
++# CONFIG_LEDS_LM3530 is not set
++# CONFIG_LEDS_LM3642 is not set
++# CONFIG_LEDS_PCA9532 is not set
++# CONFIG_LEDS_GPIO is not set
++# CONFIG_LEDS_LP3944 is not set
++# CONFIG_LEDS_LP5521 is not set
++# CONFIG_LEDS_LP5523 is not set
++# CONFIG_LEDS_LP5562 is not set
++# CONFIG_LEDS_LP8501 is not set
++# CONFIG_LEDS_LP8788 is not set
++# CONFIG_LEDS_LP8860 is not set
++# CONFIG_LEDS_PCA955X is not set
++# CONFIG_LEDS_PCA963X is not set
++# CONFIG_LEDS_WM831X_STATUS is not set
++# CONFIG_LEDS_WM8350 is not set
++# CONFIG_LEDS_DA903X is not set
++# CONFIG_LEDS_DA9052 is not set
++# CONFIG_LEDS_DAC124S085 is not set
++# CONFIG_LEDS_PWM is not set
++# CONFIG_LEDS_REGULATOR is not set
++# CONFIG_LEDS_BD2802 is not set
++# CONFIG_LEDS_INTEL_SS4200 is not set
++# CONFIG_LEDS_LT3593 is not set
++# CONFIG_LEDS_ADP5520 is not set
++# CONFIG_LEDS_TCA6507 is not set
++# CONFIG_LEDS_TLC591XX is not set
++# CONFIG_LEDS_MAX8997 is not set
++# CONFIG_LEDS_LM355x is not set
++# CONFIG_LEDS_IS31FL32XX is not set
++
++#
++# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)
++#
++# CONFIG_LEDS_BLINKM is not set
++CONFIG_LEDS_SYSCON=y
++
++#
++# LED Triggers
++#
++CONFIG_LEDS_TRIGGERS=y
++# CONFIG_LEDS_TRIGGER_TIMER is not set
++# CONFIG_LEDS_TRIGGER_ONESHOT is not set
++# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
++# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
++CONFIG_LEDS_TRIGGER_CPU=y
++# CONFIG_LEDS_TRIGGER_GPIO is not set
++# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
++
++#
++# iptables trigger is under Netfilter config (LED target)
++#
++# CONFIG_LEDS_TRIGGER_TRANSIENT is not set
++# CONFIG_LEDS_TRIGGER_CAMERA is not set
++# CONFIG_ACCESSIBILITY is not set
++# CONFIG_INFINIBAND is not set
++CONFIG_EDAC_SUPPORT=y
++CONFIG_EDAC=y
++# CONFIG_EDAC_LEGACY_SYSFS is not set
++# CONFIG_EDAC_DEBUG is not set
++# CONFIG_EDAC_MM_EDAC is not set
++CONFIG_RTC_LIB=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_HCTOSYS=y
++CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
++CONFIG_RTC_SYSTOHC=y
++CONFIG_RTC_SYSTOHC_DEVICE="rtc0"
++# CONFIG_RTC_DEBUG is not set
++
++#
++# RTC interfaces
++#
++CONFIG_RTC_INTF_SYSFS=y
++CONFIG_RTC_INTF_PROC=y
++CONFIG_RTC_INTF_DEV=y
++# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
++# CONFIG_RTC_DRV_TEST is not set
++
++#
++# I2C RTC drivers
++#
++# CONFIG_RTC_DRV_88PM860X is not set
++# CONFIG_RTC_DRV_ABB5ZES3 is not set
++# CONFIG_RTC_DRV_ABX80X is not set
++# CONFIG_RTC_DRV_AS3722 is not set
++# CONFIG_RTC_DRV_DS1307 is not set
++# CONFIG_RTC_DRV_DS1374 is not set
++# CONFIG_RTC_DRV_DS1672 is not set
++# CONFIG_RTC_DRV_HYM8563 is not set
++# CONFIG_RTC_DRV_LP8788 is not set
++# CONFIG_RTC_DRV_MAX6900 is not set
++# CONFIG_RTC_DRV_MAX8925 is not set
++# CONFIG_RTC_DRV_MAX8998 is not set
++# CONFIG_RTC_DRV_MAX8997 is not set
++# CONFIG_RTC_DRV_MAX77686 is not set
++# CONFIG_RTC_DRV_RS5C372 is not set
++# CONFIG_RTC_DRV_ISL1208 is not set
++# CONFIG_RTC_DRV_ISL12022 is not set
++# CONFIG_RTC_DRV_ISL12057 is not set
++# CONFIG_RTC_DRV_X1205 is not set
++# CONFIG_RTC_DRV_PCF8523 is not set
++# CONFIG_RTC_DRV_PCF85063 is not set
++# CONFIG_RTC_DRV_PCF8563 is not set
++# CONFIG_RTC_DRV_PCF8583 is not set
++# CONFIG_RTC_DRV_M41T80 is not set
++# CONFIG_RTC_DRV_BQ32K is not set
++# CONFIG_RTC_DRV_TWL4030 is not set
++# CONFIG_RTC_DRV_PALMAS is not set
++# CONFIG_RTC_DRV_TPS6586X is not set
++# CONFIG_RTC_DRV_TPS65910 is not set
++# CONFIG_RTC_DRV_TPS80031 is not set
++# CONFIG_RTC_DRV_RC5T583 is not set
++# CONFIG_RTC_DRV_S35390A is not set
++# CONFIG_RTC_DRV_FM3130 is not set
++# CONFIG_RTC_DRV_RX8010 is not set
++# CONFIG_RTC_DRV_RX8581 is not set
++# CONFIG_RTC_DRV_RX8025 is not set
++# CONFIG_RTC_DRV_EM3027 is not set
++# CONFIG_RTC_DRV_RV3029C2 is not set
++# CONFIG_RTC_DRV_RV8803 is not set
++# CONFIG_RTC_DRV_S5M is not set
++
++#
++# SPI RTC drivers
++#
++# CONFIG_RTC_DRV_M41T93 is not set
++# CONFIG_RTC_DRV_M41T94 is not set
++# CONFIG_RTC_DRV_DS1305 is not set
++# CONFIG_RTC_DRV_DS1343 is not set
++# CONFIG_RTC_DRV_DS1347 is not set
++# CONFIG_RTC_DRV_DS1390 is not set
++# CONFIG_RTC_DRV_R9701 is not set
++# CONFIG_RTC_DRV_RX4581 is not set
++# CONFIG_RTC_DRV_RX6110 is not set
++# CONFIG_RTC_DRV_RS5C348 is not set
++# CONFIG_RTC_DRV_MAX6902 is not set
++# CONFIG_RTC_DRV_PCF2123 is not set
++# CONFIG_RTC_DRV_MCP795 is not set
++CONFIG_RTC_I2C_AND_SPI=y
++
++#
++# SPI and I2C RTC drivers
++#
++# CONFIG_RTC_DRV_DS3232 is not set
++# CONFIG_RTC_DRV_PCF2127 is not set
++
++#
++# Platform RTC drivers
++#
++# CONFIG_RTC_DRV_DS1286 is not set
++# CONFIG_RTC_DRV_DS1511 is not set
++# CONFIG_RTC_DRV_DS1553 is not set
++# CONFIG_RTC_DRV_DS1685_FAMILY is not set
++# CONFIG_RTC_DRV_DS1742 is not set
++# CONFIG_RTC_DRV_DS2404 is not set
++# CONFIG_RTC_DRV_DA9052 is not set
++# CONFIG_RTC_DRV_DA9055 is not set
++# CONFIG_RTC_DRV_DA9063 is not set
++CONFIG_RTC_DRV_EFI=m
++# CONFIG_RTC_DRV_STK17TA8 is not set
++# CONFIG_RTC_DRV_M48T86 is not set
++# CONFIG_RTC_DRV_M48T35 is not set
++# CONFIG_RTC_DRV_M48T59 is not set
++# CONFIG_RTC_DRV_MSM6242 is not set
++# CONFIG_RTC_DRV_BQ4802 is not set
++# CONFIG_RTC_DRV_RP5C01 is not set
++# CONFIG_RTC_DRV_V3020 is not set
++# CONFIG_RTC_DRV_WM831X is not set
++# CONFIG_RTC_DRV_WM8350 is not set
++# CONFIG_RTC_DRV_AB3100 is not set
++# CONFIG_RTC_DRV_ZYNQMP is not set
++
++#
++# on-CPU RTC drivers
++#
++# CONFIG_RTC_DRV_PL030 is not set
++# CONFIG_RTC_DRV_PL031 is not set
++# CONFIG_RTC_DRV_PCAP is not set
++# CONFIG_RTC_DRV_SNVS is not set
++CONFIG_RTC_DRV_XGENE=y
++
++#
++# HID Sensor RTC drivers
++#
++# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set
++CONFIG_DMADEVICES=y
++# CONFIG_DMADEVICES_DEBUG is not set
++
++#
++# DMA Devices
++#
++CONFIG_DMA_ENGINE=y
++CONFIG_DMA_VIRTUAL_CHANNELS=y
++CONFIG_DMA_ACPI=y
++CONFIG_DMA_OF=y
++CONFIG_AMBA_PL08X=y
++# CONFIG_FSL_EDMA is not set
++# CONFIG_INTEL_IDMA64 is not set
++# CONFIG_PL330_DMA is not set
++# CONFIG_XGENE_DMA is not set
++# CONFIG_QCOM_BAM_DMA is not set
++# CONFIG_QCOM_HIDMA_MGMT is not set
++# CONFIG_QCOM_HIDMA is not set
++# CONFIG_DW_DMAC is not set
++# CONFIG_DW_DMAC_PCI is not set
++
++#
++# DMA Clients
++#
++CONFIG_ASYNC_TX_DMA=y
++# CONFIG_DMATEST is not set
++CONFIG_AUXDISPLAY=y
++# CONFIG_UIO is not set
++# CONFIG_VFIO is not set
++CONFIG_VIRT_DRIVERS=y
++CONFIG_VIRTIO=y
++
++#
++# Virtio drivers
++#
++CONFIG_VIRTIO_PCI=y
++CONFIG_VIRTIO_PCI_LEGACY=y
++CONFIG_VIRTIO_BALLOON=y
++# CONFIG_VIRTIO_INPUT is not set
++CONFIG_VIRTIO_MMIO=y
++CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
++
++#
++# Microsoft Hyper-V guest support
++#
++
++#
++# Xen driver support
++#
++CONFIG_XEN_BALLOON=y
++CONFIG_XEN_SCRUB_PAGES=y
++# CONFIG_XEN_DEV_EVTCHN is not set
++CONFIG_XEN_BACKEND=y
++# CONFIG_XENFS is not set
++CONFIG_XEN_SYS_HYPERVISOR=y
++CONFIG_XEN_XENBUS_FRONTEND=y
++# CONFIG_XEN_GNTDEV is not set
++# CONFIG_XEN_GRANT_DEV_ALLOC is not set
++CONFIG_SWIOTLB_XEN=y
++CONFIG_XEN_PRIVCMD=m
++CONFIG_XEN_AUTO_XLATE=y
++CONFIG_STAGING=y
++# CONFIG_COMEDI is not set
++# CONFIG_RTL8192U is not set
++# CONFIG_RTLLIB is not set
++# CONFIG_R8712U is not set
++# CONFIG_R8723AU is not set
++# CONFIG_RTS5208 is not set
++# CONFIG_FB_SM750 is not set
++# CONFIG_FB_XGI is not set
++
++#
++# Speakup console speech
++#
++# CONFIG_SPEAKUP is not set
++CONFIG_STAGING_MEDIA=y
++
++#
++# Android
++#
++# CONFIG_STAGING_BOARD is not set
++# CONFIG_LTE_GDM724X is not set
++# CONFIG_LNET is not set
++# CONFIG_DGNC is not set
++# CONFIG_GS_FPGABOOT is not set
++# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set
++# CONFIG_FB_TFT is not set
++# CONFIG_FSL_MC_BUS is not set
++# CONFIG_MOST is not set
++# CONFIG_GOLDFISH is not set
++CONFIG_CHROME_PLATFORMS=y
++CONFIG_CLKDEV_LOOKUP=y
++CONFIG_HAVE_CLK_PREPARE=y
++CONFIG_COMMON_CLK=y
++
++#
++# Common Clock Framework
++#
++# CONFIG_COMMON_CLK_WM831X is not set
++CONFIG_COMMON_CLK_VERSATILE=y
++CONFIG_CLK_SP810=y
++CONFIG_CLK_VEXPRESS_OSC=y
++# CONFIG_COMMON_CLK_MAX77686 is not set
++# CONFIG_COMMON_CLK_MAX77802 is not set
++# CONFIG_COMMON_CLK_SI5351 is not set
++# CONFIG_COMMON_CLK_SI514 is not set
++# CONFIG_COMMON_CLK_SI570 is not set
++# CONFIG_COMMON_CLK_CDCE706 is not set
++# CONFIG_COMMON_CLK_CDCE925 is not set
++# CONFIG_COMMON_CLK_CS2000_CP is not set
++# CONFIG_COMMON_CLK_S2MPS11 is not set
++# CONFIG_CLK_TWL6040 is not set
++CONFIG_CLK_QORIQ=y
++CONFIG_COMMON_CLK_XGENE=y
++# CONFIG_COMMON_CLK_NXP is not set
++# CONFIG_COMMON_CLK_PALMAS is not set
++# CONFIG_COMMON_CLK_PWM is not set
++# CONFIG_COMMON_CLK_PXA is not set
++CONFIG_COMMON_CLK_IPROC=y
++CONFIG_COMMON_CLK_HI6220=y
++CONFIG_STUB_CLK_HI6220=y
++# CONFIG_COMMON_CLK_QCOM is not set
++
++#
++# Hardware Spinlock drivers
++#
++# CONFIG_HWSPINLOCK_QCOM is not set
++
++#
++# Clock Source drivers
++#
++CONFIG_CLKSRC_OF=y
++CONFIG_CLKSRC_ACPI=y
++CONFIG_CLKSRC_PROBE=y
++CONFIG_CLKSRC_MMIO=y
++CONFIG_ARM_ARCH_TIMER=y
++CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y
++CONFIG_ARM_TIMER_SP804=y
++# CONFIG_ATMEL_PIT is not set
++CONFIG_MTK_TIMER=y
++# CONFIG_SH_TIMER_CMT is not set
++# CONFIG_SH_TIMER_MTU2 is not set
++# CONFIG_SH_TIMER_TMU is not set
++# CONFIG_EM_TIMER_STI is not set
++CONFIG_MAILBOX=y
++# CONFIG_ARM_MHU is not set
++CONFIG_PL320_MBOX=y
++CONFIG_PCC=y
++# CONFIG_ALTERA_MBOX is not set
++# CONFIG_HI6220_MBOX is not set
++# CONFIG_MAILBOX_TEST is not set
++# CONFIG_XGENE_SLIMPRO_MBOX is not set
++CONFIG_IOMMU_API=y
++CONFIG_IOMMU_SUPPORT=y
++
++#
++# Generic IOMMU Pagetable Support
++#
++CONFIG_IOMMU_IO_PGTABLE=y
++CONFIG_IOMMU_IO_PGTABLE_LPAE=y
++# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set
++# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set
++CONFIG_IOMMU_IOVA=y
++CONFIG_OF_IOMMU=y
++CONFIG_IOMMU_DMA=y
++CONFIG_ARM_SMMU=y
++CONFIG_ARM_SMMU_V3=y
++# CONFIG_MTK_IOMMU is not set
++
++#
++# Remoteproc drivers
++#
++# CONFIG_STE_MODEM_RPROC is not set
++
++#
++# Rpmsg drivers
++#
++
++#
++# SOC (System On Chip) specific Drivers
++#
++CONFIG_MTK_INFRACFG=y
++# CONFIG_MTK_PMIC_WRAP is not set
++CONFIG_MTK_SCPSYS=y
++# CONFIG_QCOM_GSBI is not set
++# CONFIG_SUNXI_SRAM is not set
++CONFIG_SOC_TI=y
++CONFIG_PM_DEVFREQ=y
++
++#
++# DEVFREQ Governors
++#
++CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
++CONFIG_DEVFREQ_GOV_PERFORMANCE=y
++CONFIG_DEVFREQ_GOV_POWERSAVE=y
++CONFIG_DEVFREQ_GOV_USERSPACE=y
++
++#
++# DEVFREQ Drivers
++#
++CONFIG_PM_DEVFREQ_EVENT=y
++CONFIG_EXTCON=y
++
++#
++# Extcon Device Drivers
++#
++# CONFIG_EXTCON_GPIO is not set
++# CONFIG_EXTCON_MAX14577 is not set
++# CONFIG_EXTCON_MAX3355 is not set
++# CONFIG_EXTCON_MAX77693 is not set
++# CONFIG_EXTCON_MAX77843 is not set
++# CONFIG_EXTCON_MAX8997 is not set
++# CONFIG_EXTCON_PALMAS is not set
++# CONFIG_EXTCON_RT8973A is not set
++# CONFIG_EXTCON_SM5502 is not set
++# CONFIG_EXTCON_USB_GPIO is not set
++CONFIG_MEMORY=y
++# CONFIG_ARM_PL172_MPMC is not set
++# CONFIG_IIO is not set
++# CONFIG_NTB is not set
++CONFIG_VME_BUS=y
++
++#
++# VME Bridge Drivers
++#
++# CONFIG_VME_TSI148 is not set
++
++#
++# VME Board Drivers
++#
++# CONFIG_VMIVME_7805 is not set
++
++#
++# VME Device Drivers
++#
++# CONFIG_VME_USER is not set
++# CONFIG_VME_PIO2 is not set
++CONFIG_PWM=y
++CONFIG_PWM_SYSFS=y
++# CONFIG_PWM_BERLIN is not set
++# CONFIG_PWM_FSL_FTM is not set
++# CONFIG_PWM_MTK_DISP is not set
++# CONFIG_PWM_PCA9685 is not set
++# CONFIG_PWM_TWL is not set
++# CONFIG_PWM_TWL_LED is not set
++CONFIG_IRQCHIP=y
++CONFIG_ARM_GIC=y
++CONFIG_ARM_GIC_MAX_NR=1
++CONFIG_ARM_GIC_V2M=y
++CONFIG_ARM_GIC_V3=y
++CONFIG_ARM_GIC_V3_ITS=y
++CONFIG_DW_APB_ICTL=y
++CONFIG_HISILICON_IRQ_MBIGEN=y
++# CONFIG_IPACK_BUS is not set
++CONFIG_RESET_CONTROLLER=y
++CONFIG_COMMON_RESET_HI6220=y
++# CONFIG_FMC is not set
++
++#
++# PHY Subsystem
++#
++CONFIG_GENERIC_PHY=y
++# CONFIG_PHY_BERLIN_USB is not set
++# CONFIG_PHY_BERLIN_SATA is not set
++# CONFIG_PHY_PXA_28NM_HSIC is not set
++# CONFIG_PHY_PXA_28NM_USB2 is not set
++# CONFIG_BCM_KONA_USB2_PHY is not set
++# CONFIG_PHY_MT65XX_USB3 is not set
++# CONFIG_PHY_HI6220_USB is not set
++# CONFIG_PHY_SAMSUNG_USB2 is not set
++# CONFIG_PHY_QCOM_APQ8064_SATA is not set
++# CONFIG_PHY_QCOM_IPQ806X_SATA is not set
++CONFIG_PHY_XGENE=y
++# CONFIG_PHY_QCOM_UFS is not set
++CONFIG_POWERCAP=y
++# CONFIG_MCB is not set
++
++#
++# Performance monitor support
++#
++CONFIG_ARM_PMU=y
++CONFIG_RAS=y
++# CONFIG_THUNDERBOLT is not set
++
++#
++# Android
++#
++# CONFIG_ANDROID is not set
++CONFIG_LIBNVDIMM=y
++# CONFIG_BLK_DEV_PMEM is not set
++# CONFIG_ND_BLK is not set
++CONFIG_ND_CLAIM=y
++CONFIG_BTT=y
++# CONFIG_NVMEM is not set
++# CONFIG_STM is not set
++# CONFIG_INTEL_TH is not set
++
++#
++# FPGA Configuration Support
++#
++# CONFIG_FPGA is not set
++
++#
++# Firmware Drivers
++#
++CONFIG_ARM_PSCI_FW=y
++CONFIG_FIRMWARE_MEMMAP=y
++CONFIG_DMIID=y
++# CONFIG_DMI_SYSFS is not set
++# CONFIG_FW_CFG_SYSFS is not set
++CONFIG_HAVE_ARM_SMCCC=y
++
++#
++# EFI (Extensible Firmware Interface) Support
++#
++CONFIG_EFI_VARS=y
++CONFIG_EFI_ESRT=y
++# CONFIG_EFI_VARS_PSTORE is not set
++CONFIG_EFI_PARAMS_FROM_FDT=y
++CONFIG_EFI_RUNTIME_WRAPPERS=y
++CONFIG_EFI_ARMSTUB=y
++CONFIG_ACPI=y
++CONFIG_ACPI_GENERIC_GSI=y
++CONFIG_ACPI_CCA_REQUIRED=y
++# CONFIG_ACPI_DEBUGGER is not set
++# CONFIG_ACPI_EC_DEBUGFS is not set
++CONFIG_ACPI_BUTTON=y
++CONFIG_ACPI_FAN=y
++CONFIG_ACPI_DOCK=y
++CONFIG_ACPI_PROCESSOR=y
++CONFIG_ACPI_HOTPLUG_CPU=y
++CONFIG_ACPI_THERMAL=y
++# CONFIG_ACPI_CUSTOM_DSDT is not set
++# CONFIG_ACPI_DEBUG is not set
++CONFIG_ACPI_PCI_SLOT=y
++CONFIG_ACPI_CONTAINER=y
++CONFIG_ACPI_HED=y
++# CONFIG_ACPI_CUSTOM_METHOD is not set
++CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y
++# CONFIG_PMIC_OPREGION is not set
++
++#
++# File systems
++#
++CONFIG_DCACHE_WORD_ACCESS=y
++# CONFIG_EXT2_FS is not set
++# CONFIG_EXT3_FS is not set
++CONFIG_EXT4_FS=y
++CONFIG_EXT4_USE_FOR_EXT2=y
++CONFIG_EXT4_FS_POSIX_ACL=y
++CONFIG_EXT4_FS_SECURITY=y
++# CONFIG_EXT4_ENCRYPTION is not set
++# CONFIG_EXT4_DEBUG is not set
++CONFIG_JBD2=y
++# CONFIG_JBD2_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_BTRFS_FS is not set
++# CONFIG_NILFS2_FS is not set
++# CONFIG_F2FS_FS is not set
++CONFIG_FS_DAX=y
++CONFIG_FS_POSIX_ACL=y
++CONFIG_EXPORTFS=y
++CONFIG_FILE_LOCKING=y
++CONFIG_MANDATORY_FILE_LOCKING=y
++# CONFIG_FS_ENCRYPTION is not set
++CONFIG_FSNOTIFY=y
++CONFIG_DNOTIFY=y
++CONFIG_INOTIFY_USER=y
++CONFIG_FANOTIFY=y
++CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
++CONFIG_QUOTA=y
++CONFIG_QUOTA_NETLINK_INTERFACE=y
++# CONFIG_PRINT_QUOTA_WARNING is not set
++# CONFIG_QUOTA_DEBUG is not set
++# CONFIG_QFMT_V1 is not set
++# CONFIG_QFMT_V2 is not set
++CONFIG_QUOTACTL=y
++CONFIG_AUTOFS4_FS=m
++CONFIG_FUSE_FS=y
++# CONFIG_CUSE is not set
++# CONFIG_OVERLAY_FS is not set
++
++#
++# Caches
++#
++# CONFIG_FSCACHE is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++# CONFIG_ISO9660_FS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=y
++# CONFIG_MSDOS_FS is not set
++CONFIG_VFAT_FS=y
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_FAT_DEFAULT_UTF8 is not set
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_PROC_PAGE_MONITOR=y
++CONFIG_PROC_CHILDREN=y
++CONFIG_KERNFS=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++CONFIG_TMPFS_POSIX_ACL=y
++CONFIG_TMPFS_XATTR=y
++CONFIG_HUGETLBFS=y
++CONFIG_HUGETLB_PAGE=y
++# CONFIG_CONFIGFS_FS is not set
++CONFIG_EFIVAR_FS=y
++CONFIG_MISC_FILESYSTEMS=y
++# CONFIG_ORANGEFS_FS is not set
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++CONFIG_ECRYPT_FS=y
++CONFIG_ECRYPT_FS_MESSAGING=y
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++# CONFIG_LOGFS is not set
++# CONFIG_CRAMFS is not set
++# CONFIG_SQUASHFS is not set
++# CONFIG_VXFS_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_OMFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_QNX6FS_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_PSTORE=y
++# CONFIG_PSTORE_CONSOLE is not set
++# CONFIG_PSTORE_PMSG is not set
++# CONFIG_PSTORE_FTRACE is not set
++# CONFIG_PSTORE_RAM is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++# CONFIG_NFS_FS is not set
++# CONFIG_NFSD is not set
++# CONFIG_CEPH_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="utf8"
++CONFIG_NLS_CODEPAGE_437=y
++# CONFIG_NLS_CODEPAGE_737 is not set
++# CONFIG_NLS_CODEPAGE_775 is not set
++# CONFIG_NLS_CODEPAGE_850 is not set
++# CONFIG_NLS_CODEPAGE_852 is not set
++# CONFIG_NLS_CODEPAGE_855 is not set
++# CONFIG_NLS_CODEPAGE_857 is not set
++# CONFIG_NLS_CODEPAGE_860 is not set
++# CONFIG_NLS_CODEPAGE_861 is not set
++# CONFIG_NLS_CODEPAGE_862 is not set
++# CONFIG_NLS_CODEPAGE_863 is not set
++# CONFIG_NLS_CODEPAGE_864 is not set
++# CONFIG_NLS_CODEPAGE_865 is not set
++# CONFIG_NLS_CODEPAGE_866 is not set
++# CONFIG_NLS_CODEPAGE_869 is not set
++# CONFIG_NLS_CODEPAGE_936 is not set
++# CONFIG_NLS_CODEPAGE_950 is not set
++# CONFIG_NLS_CODEPAGE_932 is not set
++# CONFIG_NLS_CODEPAGE_949 is not set
++# CONFIG_NLS_CODEPAGE_874 is not set
++# CONFIG_NLS_ISO8859_8 is not set
++# CONFIG_NLS_CODEPAGE_1250 is not set
++# CONFIG_NLS_CODEPAGE_1251 is not set
++# CONFIG_NLS_ASCII is not set
++CONFIG_NLS_ISO8859_1=m
++# CONFIG_NLS_ISO8859_2 is not set
++# CONFIG_NLS_ISO8859_3 is not set
++# CONFIG_NLS_ISO8859_4 is not set
++# CONFIG_NLS_ISO8859_5 is not set
++# CONFIG_NLS_ISO8859_6 is not set
++# CONFIG_NLS_ISO8859_7 is not set
++# CONFIG_NLS_ISO8859_9 is not set
++# CONFIG_NLS_ISO8859_13 is not set
++# CONFIG_NLS_ISO8859_14 is not set
++# CONFIG_NLS_ISO8859_15 is not set
++# CONFIG_NLS_KOI8_R is not set
++# CONFIG_NLS_KOI8_U is not set
++# CONFIG_NLS_MAC_ROMAN is not set
++# CONFIG_NLS_MAC_CELTIC is not set
++# CONFIG_NLS_MAC_CENTEURO is not set
++# CONFIG_NLS_MAC_CROATIAN is not set
++# CONFIG_NLS_MAC_CYRILLIC is not set
++# CONFIG_NLS_MAC_GAELIC is not set
++# CONFIG_NLS_MAC_GREEK is not set
++# CONFIG_NLS_MAC_ICELAND is not set
++# CONFIG_NLS_MAC_INUIT is not set
++# CONFIG_NLS_MAC_ROMANIAN is not set
++# CONFIG_NLS_MAC_TURKISH is not set
++# CONFIG_NLS_UTF8 is not set
++CONFIG_HAVE_KVM_IRQFD=y
++CONFIG_HAVE_KVM_EVENTFD=y
++CONFIG_KVM_MMIO=y
++CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y
++CONFIG_KVM_VFIO=y
++CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL=y
++CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y
++CONFIG_KVM_COMPAT=y
++CONFIG_VIRTUALIZATION=y
++CONFIG_KVM_ARM_VGIC_V3=y
++CONFIG_KVM=y
++CONFIG_KVM_ARM_HOST=y
++CONFIG_KVM_ARM_PMU=y
++
++#
++# Kernel hacking
++#
++
++#
++# printk and dmesg options
++#
++CONFIG_PRINTK_TIME=y
++CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4
++CONFIG_BOOT_PRINTK_DELAY=y
++CONFIG_DYNAMIC_DEBUG=y
++
++#
++# Compile-time checks and compiler options
++#
++CONFIG_DEBUG_INFO=y
++# CONFIG_DEBUG_INFO_REDUCED is not set
++# CONFIG_DEBUG_INFO_SPLIT is not set
++CONFIG_DEBUG_INFO_DWARF4=y
++CONFIG_GDB_SCRIPTS=y
++# CONFIG_ENABLE_WARN_DEPRECATED is not set
++# CONFIG_ENABLE_MUST_CHECK is not set
++CONFIG_FRAME_WARN=1024
++# CONFIG_STRIP_ASM_SYMS is not set
++# CONFIG_READABLE_ASM is not set
++CONFIG_UNUSED_SYMBOLS=y
++# CONFIG_PAGE_OWNER is not set
++CONFIG_DEBUG_FS=y
++# CONFIG_HEADERS_CHECK is not set
++# CONFIG_DEBUG_SECTION_MISMATCH is not set
++CONFIG_SECTION_MISMATCH_WARN_ONLY=y
++CONFIG_ARCH_WANT_FRAME_POINTERS=y
++CONFIG_FRAME_POINTER=y
++# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1
++CONFIG_DEBUG_KERNEL=y
++
++#
++# Memory Debugging
++#
++# CONFIG_PAGE_EXTENSION is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_PAGE_POISONING is not set
++# CONFIG_DEBUG_PAGE_REF is not set
++# CONFIG_DEBUG_OBJECTS is not set
++# CONFIG_SLUB_DEBUG_ON is not set
++# CONFIG_SLUB_STATS is not set
++CONFIG_HAVE_DEBUG_KMEMLEAK=y
++# CONFIG_DEBUG_KMEMLEAK is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_VM is not set
++# CONFIG_DEBUG_MEMORY_INIT is not set
++# CONFIG_DEBUG_PER_CPU_MAPS is not set
++CONFIG_HAVE_ARCH_KASAN=y
++# CONFIG_KASAN is not set
++# CONFIG_DEBUG_SHIRQ is not set
++
++#
++# Debug Lockups and Hangs
++#
++CONFIG_LOCKUP_DETECTOR=y
++# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
++CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
++CONFIG_DETECT_HUNG_TASK=y
++CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
++# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
++CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
++# CONFIG_WQ_WATCHDOG is not set
++# CONFIG_PANIC_ON_OOPS is not set
++CONFIG_PANIC_ON_OOPS_VALUE=0
++CONFIG_PANIC_TIMEOUT=0
++CONFIG_SCHED_DEBUG=y
++CONFIG_SCHED_INFO=y
++CONFIG_SCHEDSTATS=y
++CONFIG_SCHED_STACK_END_CHECK=y
++# CONFIG_DEBUG_TIMEKEEPING is not set
++CONFIG_TIMER_STATS=y
++
++#
++# Lock Debugging (spinlocks, mutexes, etc...)
++#
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set
++# CONFIG_DEBUG_LOCK_ALLOC is not set
++# CONFIG_PROVE_LOCKING is not set
++# CONFIG_LOCK_STAT is not set
++# CONFIG_DEBUG_ATOMIC_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_LOCK_TORTURE_TEST is not set
++CONFIG_STACKTRACE=y
++# CONFIG_DEBUG_KOBJECT is not set
++CONFIG_HAVE_DEBUG_BUGVERBOSE=y
++CONFIG_DEBUG_BUGVERBOSE=y
++# CONFIG_DEBUG_LIST is not set
++# CONFIG_DEBUG_PI_LIST is not set
++# CONFIG_DEBUG_SG is not set
++# CONFIG_DEBUG_NOTIFIERS is not set
++# CONFIG_DEBUG_CREDENTIALS is not set
++
++#
++# RCU Debugging
++#
++# CONFIG_PROVE_RCU is not set
++# CONFIG_SPARSE_RCU_POINTER is not set
++# CONFIG_TORTURE_TEST is not set
++# CONFIG_RCU_TORTURE_TEST is not set
++CONFIG_RCU_CPU_STALL_TIMEOUT=60
++# CONFIG_RCU_TRACE is not set
++# CONFIG_RCU_EQS_DEBUG is not set
++# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set
++# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
++# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set
++# CONFIG_NOTIFIER_ERROR_INJECTION is not set
++# CONFIG_FAULT_INJECTION is not set
++# CONFIG_LATENCYTOP is not set
++CONFIG_NOP_TRACER=y
++CONFIG_HAVE_FUNCTION_TRACER=y
++CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
++CONFIG_HAVE_DYNAMIC_FTRACE=y
++CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
++CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
++CONFIG_HAVE_C_RECORDMCOUNT=y
++CONFIG_TRACER_MAX_TRACE=y
++CONFIG_TRACE_CLOCK=y
++CONFIG_RING_BUFFER=y
++CONFIG_EVENT_TRACING=y
++CONFIG_CONTEXT_SWITCH_TRACER=y
++CONFIG_TRACING=y
++CONFIG_GENERIC_TRACER=y
++CONFIG_TRACING_SUPPORT=y
++CONFIG_FTRACE=y
++CONFIG_FUNCTION_TRACER=y
++CONFIG_FUNCTION_GRAPH_TRACER=y
++# CONFIG_IRQSOFF_TRACER is not set
++CONFIG_SCHED_TRACER=y
++CONFIG_FTRACE_SYSCALLS=y
++CONFIG_TRACER_SNAPSHOT=y
++# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set
++CONFIG_BRANCH_PROFILE_NONE=y
++# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
++# CONFIG_PROFILE_ALL_BRANCHES is not set
++CONFIG_STACK_TRACER=y
++CONFIG_BLK_DEV_IO_TRACE=y
++# CONFIG_PROBE_EVENTS is not set
++CONFIG_DYNAMIC_FTRACE=y
++CONFIG_FUNCTION_PROFILER=y
++CONFIG_FTRACE_MCOUNT_RECORD=y
++# CONFIG_FTRACE_STARTUP_TEST is not set
++# CONFIG_TRACEPOINT_BENCHMARK is not set
++# CONFIG_RING_BUFFER_BENCHMARK is not set
++# CONFIG_RING_BUFFER_STARTUP_TEST is not set
++# CONFIG_TRACE_ENUM_MAP_FILE is not set
++CONFIG_TRACING_EVENTS_GPIO=y
++
++#
++# Runtime Testing
++#
++# CONFIG_LKDTM is not set
++# CONFIG_TEST_LIST_SORT is not set
++# CONFIG_BACKTRACE_SELF_TEST is not set
++# CONFIG_RBTREE_TEST is not set
++# CONFIG_INTERVAL_TREE_TEST is not set
++# CONFIG_PERCPU_TEST is not set
++# CONFIG_ATOMIC64_SELFTEST is not set
++# CONFIG_TEST_HEXDUMP is not set
++# CONFIG_TEST_STRING_HELPERS is not set
++# CONFIG_TEST_KSTRTOX is not set
++# CONFIG_TEST_PRINTF is not set
++# CONFIG_TEST_BITMAP is not set
++# CONFIG_TEST_RHASHTABLE is not set
++# CONFIG_DMA_API_DEBUG is not set
++# CONFIG_TEST_LKM is not set
++# CONFIG_TEST_USER_COPY is not set
++# CONFIG_TEST_BPF is not set
++# CONFIG_TEST_FIRMWARE is not set
++# CONFIG_TEST_UDELAY is not set
++CONFIG_MEMTEST=y
++# CONFIG_TEST_STATIC_KEYS is not set
++# CONFIG_SAMPLES is not set
++CONFIG_HAVE_ARCH_KGDB=y
++CONFIG_KGDB=y
++CONFIG_KGDB_SERIAL_CONSOLE=y
++# CONFIG_KGDB_TESTS is not set
++CONFIG_KGDB_KDB=y
++CONFIG_KDB_DEFAULT_ENABLE=0x1
++CONFIG_KDB_KEYBOARD=y
++CONFIG_KDB_CONTINUE_CATASTROPHIC=0
++CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y
++# CONFIG_UBSAN is not set
++CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y
++# CONFIG_STRICT_DEVMEM is not set
++CONFIG_ARM64_PTDUMP=y
++# CONFIG_PID_IN_CONTEXTIDR is not set
++# CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set
++CONFIG_DEBUG_SET_MODULE_RONX=y
++CONFIG_DEBUG_RODATA=y
++# CONFIG_DEBUG_ALIGN_RODATA is not set
++# CONFIG_CORESIGHT is not set
++
++#
++# Security options
++#
++CONFIG_KEYS=y
++CONFIG_PERSISTENT_KEYRINGS=y
++CONFIG_BIG_KEYS=y
++CONFIG_TRUSTED_KEYS=y
++CONFIG_ENCRYPTED_KEYS=y
++# CONFIG_SECURITY_DMESG_RESTRICT is not set
++CONFIG_SECURITY=y
++CONFIG_SECURITYFS=y
++CONFIG_SECURITY_NETWORK=y
++CONFIG_SECURITY_PATH=y
++CONFIG_LSM_MMAP_MIN_ADDR=0
++CONFIG_SECURITY_SELINUX=y
++CONFIG_SECURITY_SELINUX_BOOTPARAM=y
++CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
++CONFIG_SECURITY_SELINUX_DISABLE=y
++CONFIG_SECURITY_SELINUX_DEVELOP=y
++CONFIG_SECURITY_SELINUX_AVC_STATS=y
++CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
++# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set
++CONFIG_SECURITY_SMACK=y
++# CONFIG_SECURITY_SMACK_BRINGUP is not set
++CONFIG_SECURITY_SMACK_NETFILTER=y
++CONFIG_SECURITY_TOMOYO=y
++CONFIG_SECURITY_TOMOYO_MAX_ACCEPT_ENTRY=2048
++CONFIG_SECURITY_TOMOYO_MAX_AUDIT_LOG=1024
++# CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER is not set
++CONFIG_SECURITY_TOMOYO_POLICY_LOADER="/sbin/tomoyo-init"
++CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER="/sbin/init"
++CONFIG_SECURITY_APPARMOR=y
++CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE=1
++CONFIG_SECURITY_APPARMOR_HASH=y
++CONFIG_SECURITY_YAMA=y
++CONFIG_INTEGRITY=y
++CONFIG_INTEGRITY_SIGNATURE=y
++CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
++CONFIG_INTEGRITY_TRUSTED_KEYRING=y
++CONFIG_INTEGRITY_AUDIT=y
++CONFIG_IMA=y
++CONFIG_IMA_MEASURE_PCR_IDX=10
++CONFIG_IMA_LSM_RULES=y
++# CONFIG_IMA_TEMPLATE is not set
++CONFIG_IMA_NG_TEMPLATE=y
++# CONFIG_IMA_SIG_TEMPLATE is not set
++CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng"
++CONFIG_IMA_DEFAULT_HASH_SHA1=y
++# CONFIG_IMA_DEFAULT_HASH_SHA256 is not set
++# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set
++CONFIG_IMA_DEFAULT_HASH="sha1"
++# CONFIG_IMA_WRITE_POLICY is not set
++# CONFIG_IMA_READ_POLICY is not set
++CONFIG_IMA_APPRAISE=y
++CONFIG_IMA_TRUSTED_KEYRING=y
++# CONFIG_IMA_MOK_KEYRING is not set
++# CONFIG_IMA_LOAD_X509 is not set
++CONFIG_EVM=y
++CONFIG_EVM_ATTR_FSUUID=y
++CONFIG_EVM_EXTRA_SMACK_XATTRS=y
++# CONFIG_EVM_LOAD_X509 is not set
++# CONFIG_DEFAULT_SECURITY_SELINUX is not set
++# CONFIG_DEFAULT_SECURITY_SMACK is not set
++# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
++CONFIG_DEFAULT_SECURITY_APPARMOR=y
++# CONFIG_DEFAULT_SECURITY_DAC is not set
++CONFIG_DEFAULT_SECURITY="apparmor"
++CONFIG_CRYPTO=y
++
++#
++# Crypto core or helper
++#
++CONFIG_CRYPTO_ALGAPI=y
++CONFIG_CRYPTO_ALGAPI2=y
++CONFIG_CRYPTO_AEAD2=y
++CONFIG_CRYPTO_BLKCIPHER=y
++CONFIG_CRYPTO_BLKCIPHER2=y
++CONFIG_CRYPTO_HASH=y
++CONFIG_CRYPTO_HASH2=y
++CONFIG_CRYPTO_RNG=y
++CONFIG_CRYPTO_RNG2=y
++CONFIG_CRYPTO_AKCIPHER2=y
++CONFIG_CRYPTO_AKCIPHER=y
++CONFIG_CRYPTO_RSA=y
++CONFIG_CRYPTO_MANAGER=y
++CONFIG_CRYPTO_MANAGER2=y
++# CONFIG_CRYPTO_USER is not set
++CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
++# CONFIG_CRYPTO_GF128MUL is not set
++# CONFIG_CRYPTO_NULL is not set
++CONFIG_CRYPTO_NULL2=y
++# CONFIG_CRYPTO_PCRYPT is not set
++CONFIG_CRYPTO_WORKQUEUE=y
++# CONFIG_CRYPTO_CRYPTD is not set
++# CONFIG_CRYPTO_MCRYPTD is not set
++# CONFIG_CRYPTO_AUTHENC is not set
++# CONFIG_CRYPTO_TEST is not set
++
++#
++# Authenticated Encryption with Associated Data
++#
++# CONFIG_CRYPTO_CCM is not set
++# CONFIG_CRYPTO_GCM is not set
++# CONFIG_CRYPTO_CHACHA20POLY1305 is not set
++# CONFIG_CRYPTO_SEQIV is not set
++# CONFIG_CRYPTO_ECHAINIV is not set
++
++#
++# Block modes
++#
++CONFIG_CRYPTO_CBC=y
++# CONFIG_CRYPTO_CTR is not set
++# CONFIG_CRYPTO_CTS is not set
++CONFIG_CRYPTO_ECB=y
++# CONFIG_CRYPTO_LRW is not set
++# CONFIG_CRYPTO_PCBC is not set
++# CONFIG_CRYPTO_XTS is not set
++# CONFIG_CRYPTO_KEYWRAP is not set
++
++#
++# Hash modes
++#
++# CONFIG_CRYPTO_CMAC is not set
++CONFIG_CRYPTO_HMAC=y
++# CONFIG_CRYPTO_XCBC is not set
++# CONFIG_CRYPTO_VMAC is not set
++
++#
++# Digest
++#
++CONFIG_CRYPTO_CRC32C=y
++# CONFIG_CRYPTO_CRC32 is not set
++CONFIG_CRYPTO_CRCT10DIF=y
++# CONFIG_CRYPTO_GHASH is not set
++# CONFIG_CRYPTO_POLY1305 is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=y
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_RMD128 is not set
++# CONFIG_CRYPTO_RMD160 is not set
++# CONFIG_CRYPTO_RMD256 is not set
++# CONFIG_CRYPTO_RMD320 is not set
++CONFIG_CRYPTO_SHA1=y
++CONFIG_CRYPTO_SHA256=y
++CONFIG_CRYPTO_SHA512=y
++# CONFIG_CRYPTO_TGR192 is not set
++# CONFIG_CRYPTO_WP512 is not set
++
++#
++# Ciphers
++#
++CONFIG_CRYPTO_AES=y
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_CAMELLIA is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_DES is not set
++# CONFIG_CRYPTO_FCRYPT is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_SALSA20 is not set
++# CONFIG_CRYPTO_CHACHA20 is not set
++# CONFIG_CRYPTO_SEED is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++
++#
++# Compression
++#
++# CONFIG_CRYPTO_DEFLATE is not set
++CONFIG_CRYPTO_LZO=y
++# CONFIG_CRYPTO_842 is not set
++# CONFIG_CRYPTO_LZ4 is not set
++# CONFIG_CRYPTO_LZ4HC is not set
++
++#
++# Random Number Generation
++#
++# CONFIG_CRYPTO_ANSI_CPRNG is not set
++# CONFIG_CRYPTO_DRBG_MENU is not set
++# CONFIG_CRYPTO_JITTERENTROPY is not set
++# CONFIG_CRYPTO_USER_API_HASH is not set
++# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
++# CONFIG_CRYPTO_USER_API_RNG is not set
++# CONFIG_CRYPTO_USER_API_AEAD is not set
++CONFIG_CRYPTO_HASH_INFO=y
++CONFIG_CRYPTO_HW=y
++CONFIG_CRYPTO_DEV_CCP=y
++# CONFIG_CRYPTO_DEV_CCP_DD is not set
++# CONFIG_CRYPTO_DEV_QCE is not set
++CONFIG_ASYMMETRIC_KEY_TYPE=y
++CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
++CONFIG_X509_CERTIFICATE_PARSER=y
++CONFIG_PKCS7_MESSAGE_PARSER=y
++# CONFIG_PKCS7_TEST_KEY is not set
++CONFIG_SIGNED_PE_FILE_VERIFICATION=y
++
++#
++# Certificates for signature checking
++#
++CONFIG_MODULE_SIG_KEY="certs/signing_key.pem"
++CONFIG_SYSTEM_TRUSTED_KEYRING=y
++CONFIG_SYSTEM_TRUSTED_KEYS=""
++CONFIG_SYSTEM_EXTRA_CERTIFICATE=y
++CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=4096
++CONFIG_ARM64_CRYPTO=y
++CONFIG_CRYPTO_SHA1_ARM64_CE=m
++CONFIG_CRYPTO_SHA2_ARM64_CE=m
++CONFIG_CRYPTO_GHASH_ARM64_CE=m
++# CONFIG_CRYPTO_AES_ARM64_CE is not set
++# CONFIG_CRYPTO_AES_ARM64_CE_CCM is not set
++# CONFIG_CRYPTO_AES_ARM64_CE_BLK is not set
++# CONFIG_CRYPTO_AES_ARM64_NEON_BLK is not set
++# CONFIG_CRYPTO_CRC32_ARM64 is not set
++CONFIG_BINARY_PRINTF=y
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++CONFIG_HAVE_ARCH_BITREVERSE=y
++CONFIG_RATIONAL=y
++CONFIG_GENERIC_STRNCPY_FROM_USER=y
++CONFIG_GENERIC_STRNLEN_USER=y
++CONFIG_GENERIC_NET_UTILS=y
++CONFIG_GENERIC_PCI_IOMAP=y
++CONFIG_GENERIC_IO=y
++CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y
++CONFIG_CRC_CCITT=y
++CONFIG_CRC16=y
++CONFIG_CRC_T10DIF=y
++# CONFIG_CRC_ITU_T is not set
++CONFIG_CRC32=y
++# CONFIG_CRC32_SELFTEST is not set
++CONFIG_CRC32_SLICEBY8=y
++# CONFIG_CRC32_SLICEBY4 is not set
++# CONFIG_CRC32_SARWATE is not set
++# CONFIG_CRC32_BIT is not set
++# CONFIG_CRC7 is not set
++# CONFIG_LIBCRC32C is not set
++# CONFIG_CRC8 is not set
++CONFIG_AUDIT_GENERIC=y
++CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y
++CONFIG_AUDIT_COMPAT_GENERIC=y
++# CONFIG_RANDOM32_SELFTEST is not set
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=y
++CONFIG_LZO_COMPRESS=y
++CONFIG_LZO_DECOMPRESS=y
++CONFIG_LZ4_DECOMPRESS=y
++CONFIG_XZ_DEC=y
++CONFIG_XZ_DEC_X86=y
++CONFIG_XZ_DEC_POWERPC=y
++CONFIG_XZ_DEC_IA64=y
++CONFIG_XZ_DEC_ARM=y
++CONFIG_XZ_DEC_ARMTHUMB=y
++CONFIG_XZ_DEC_SPARC=y
++CONFIG_XZ_DEC_BCJ=y
++# CONFIG_XZ_DEC_TEST is not set
++CONFIG_DECOMPRESS_GZIP=y
++CONFIG_DECOMPRESS_BZIP2=y
++CONFIG_DECOMPRESS_LZMA=y
++CONFIG_DECOMPRESS_XZ=y
++CONFIG_DECOMPRESS_LZO=y
++CONFIG_DECOMPRESS_LZ4=y
++CONFIG_GENERIC_ALLOCATOR=y
++CONFIG_INTERVAL_TREE=y
++CONFIG_ASSOCIATIVE_ARRAY=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT_MAP=y
++CONFIG_HAS_DMA=y
++CONFIG_CPU_RMAP=y
++CONFIG_DQL=y
++CONFIG_GLOB=y
++# CONFIG_GLOB_SELFTEST is not set
++CONFIG_NLATTR=y
++CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y
++CONFIG_CLZ_TAB=y
++# CONFIG_CORDIC is not set
++CONFIG_DDR=y
++CONFIG_IRQ_POLL=y
++CONFIG_MPILIB=y
++CONFIG_SIGNATURE=y
++CONFIG_LIBFDT=y
++CONFIG_OID_REGISTRY=y
++CONFIG_UCS2_STRING=y
++CONFIG_FONT_SUPPORT=y
++# CONFIG_FONTS is not set
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++# CONFIG_SG_SPLIT is not set
++CONFIG_ARCH_HAS_SG_CHAIN=y
+diff --git a/arch/powerpc/configs/rock-dbg_defconfig b/arch/powerpc/configs/rock-dbg_defconfig
+new file mode 100644
+index 0000000..c15490d
+--- /dev/null
++++ b/arch/powerpc/configs/rock-dbg_defconfig
+@@ -0,0 +1,7821 @@
++#
++# Automatically generated file; DO NOT EDIT.
++# Linux/powerpc 4.4.0 Kernel Configuration
++#
++CONFIG_PPC64=y
++
++#
++# Processor support
++#
++CONFIG_PPC_BOOK3S_64=y
++# CONFIG_PPC_BOOK3E_64 is not set
++CONFIG_POWER7_CPU=y
++# CONFIG_POWER8_CPU is not set
++CONFIG_PPC_BOOK3S=y
++CONFIG_PPC_FPU=y
++CONFIG_ALTIVEC=y
++CONFIG_VSX=y
++# CONFIG_PPC_ICSWX is not set
++CONFIG_PPC_STD_MMU=y
++CONFIG_PPC_STD_MMU_64=y
++CONFIG_PPC_MM_SLICES=y
++CONFIG_PPC_HAVE_PMU_SUPPORT=y
++CONFIG_PPC_PERF_CTRS=y
++CONFIG_SMP=y
++CONFIG_NR_CPUS=2048
++CONFIG_PPC_DOORBELL=y
++# CONFIG_CPU_BIG_ENDIAN is not set
++CONFIG_CPU_LITTLE_ENDIAN=y
++CONFIG_PPC64_BOOT_WRAPPER=y
++CONFIG_64BIT=y
++CONFIG_WORD_SIZE=64
++CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
++CONFIG_ARCH_DMA_ADDR_T_64BIT=y
++CONFIG_MMU=y
++CONFIG_HAVE_SETUP_PER_CPU_AREA=y
++CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
++CONFIG_NR_IRQS=512
++CONFIG_STACKTRACE_SUPPORT=y
++CONFIG_HAVE_LATENCYTOP_SUPPORT=y
++CONFIG_TRACE_IRQFLAGS_SUPPORT=y
++CONFIG_LOCKDEP_SUPPORT=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_ARCH_HAS_ILOG2_U32=y
++CONFIG_ARCH_HAS_ILOG2_U64=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK=y
++CONFIG_PPC=y
++CONFIG_GENERIC_CSUM=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_PANIC_TIMEOUT=0
++CONFIG_COMPAT=y
++CONFIG_SYSVIPC_COMPAT=y
++CONFIG_SCHED_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_UDBG_16550=y
++# CONFIG_GENERIC_TBSYNC is not set
++CONFIG_AUDIT_ARCH=y
++CONFIG_GENERIC_BUG=y
++CONFIG_EPAPR_BOOT=y
++# CONFIG_DEFAULT_UIMAGE is not set
++CONFIG_ARCH_HIBERNATION_POSSIBLE=y
++CONFIG_ARCH_SUSPEND_POSSIBLE=y
++# CONFIG_PPC_DCR_NATIVE is not set
++# CONFIG_PPC_DCR_MMIO is not set
++# CONFIG_PPC_OF_PLATFORM_PCI is not set
++CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
++CONFIG_ARCH_SUPPORTS_UPROBES=y
++CONFIG_PPC_EMULATE_SSTEP=y
++CONFIG_ZONE_DMA32=y
++CONFIG_PGTABLE_LEVELS=3
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++CONFIG_IRQ_WORK=y
++
++#
++# General setup
++#
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_CROSS_COMPILE=""
++# CONFIG_COMPILE_TEST is not set
++CONFIG_LOCALVERSION="-kfd"
++# CONFIG_LOCALVERSION_AUTO is not set
++CONFIG_DEFAULT_HOSTNAME="(none)"
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_POSIX_MQUEUE_SYSCTL=y
++CONFIG_CROSS_MEMORY_ATTACH=y
++CONFIG_FHANDLE=y
++CONFIG_USELIB=y
++CONFIG_AUDIT=y
++CONFIG_HAVE_ARCH_AUDITSYSCALL=y
++CONFIG_AUDITSYSCALL=y
++CONFIG_AUDIT_WATCH=y
++CONFIG_AUDIT_TREE=y
++
++#
++# IRQ subsystem
++#
++CONFIG_GENERIC_IRQ_SHOW=y
++CONFIG_GENERIC_IRQ_SHOW_LEVEL=y
++CONFIG_GENERIC_IRQ_CHIP=y
++CONFIG_IRQ_DOMAIN=y
++CONFIG_GENERIC_MSI_IRQ=y
++# CONFIG_IRQ_DOMAIN_DEBUG is not set
++CONFIG_IRQ_FORCED_THREADING=y
++CONFIG_SPARSE_IRQ=y
++CONFIG_GENERIC_TIME_VSYSCALL_OLD=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_ARCH_HAS_TICK_BROADCAST=y
++CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
++CONFIG_GENERIC_CMOS_UPDATE=y
++
++#
++# Timers subsystem
++#
++CONFIG_TICK_ONESHOT=y
++CONFIG_NO_HZ_COMMON=y
++# CONFIG_HZ_PERIODIC is not set
++CONFIG_NO_HZ_IDLE=y
++# CONFIG_NO_HZ_FULL is not set
++CONFIG_NO_HZ=y
++CONFIG_HIGH_RES_TIMERS=y
++
++#
++# CPU/Task time and stats accounting
++#
++CONFIG_TICK_CPU_ACCOUNTING=y
++# CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set
++# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set
++CONFIG_BSD_PROCESS_ACCT=y
++CONFIG_BSD_PROCESS_ACCT_V3=y
++CONFIG_TASKSTATS=y
++CONFIG_TASK_DELAY_ACCT=y
++CONFIG_TASK_XACCT=y
++CONFIG_TASK_IO_ACCOUNTING=y
++
++#
++# RCU Subsystem
++#
++CONFIG_TREE_RCU=y
++# CONFIG_RCU_EXPERT is not set
++CONFIG_SRCU=y
++# CONFIG_TASKS_RCU is not set
++CONFIG_RCU_STALL_COMMON=y
++# CONFIG_TREE_RCU_TRACE is not set
++# CONFIG_RCU_EXPEDITE_BOOT is not set
++CONFIG_BUILD_BIN2C=y
++# CONFIG_IKCONFIG is not set
++CONFIG_LOG_BUF_SHIFT=17
++CONFIG_LOG_CPU_MAX_BUF_SHIFT=12
++CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
++CONFIG_NUMA_BALANCING=y
++CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y
++CONFIG_CGROUPS=y
++# CONFIG_CGROUP_DEBUG is not set
++CONFIG_CGROUP_FREEZER=y
++CONFIG_CGROUP_PIDS=y
++CONFIG_CGROUP_DEVICE=y
++CONFIG_CPUSETS=y
++CONFIG_PROC_PID_CPUSET=y
++CONFIG_CGROUP_CPUACCT=y
++CONFIG_PAGE_COUNTER=y
++CONFIG_MEMCG=y
++CONFIG_MEMCG_SWAP=y
++# CONFIG_MEMCG_SWAP_ENABLED is not set
++CONFIG_MEMCG_KMEM=y
++CONFIG_CGROUP_HUGETLB=y
++CONFIG_CGROUP_PERF=y
++CONFIG_CGROUP_SCHED=y
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_CFS_BANDWIDTH=y
++# CONFIG_RT_GROUP_SCHED is not set
++CONFIG_BLK_CGROUP=y
++# CONFIG_DEBUG_BLK_CGROUP is not set
++CONFIG_CGROUP_WRITEBACK=y
++CONFIG_CHECKPOINT_RESTORE=y
++CONFIG_NAMESPACES=y
++CONFIG_UTS_NS=y
++CONFIG_IPC_NS=y
++CONFIG_USER_NS=y
++CONFIG_PID_NS=y
++CONFIG_NET_NS=y
++CONFIG_SCHED_AUTOGROUP=y
++# CONFIG_SYSFS_DEPRECATED is not set
++CONFIG_RELAY=y
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++CONFIG_RD_GZIP=y
++CONFIG_RD_BZIP2=y
++CONFIG_RD_LZMA=y
++CONFIG_RD_XZ=y
++CONFIG_RD_LZO=y
++CONFIG_RD_LZ4=y
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_ANON_INODES=y
++CONFIG_SYSCTL_EXCEPTION_TRACE=y
++CONFIG_HAVE_PCSPKR_PLATFORM=y
++CONFIG_BPF=y
++CONFIG_EXPERT=y
++CONFIG_MULTIUSER=y
++CONFIG_SGETMASK_SYSCALL=y
++CONFIG_SYSFS_SYSCALL=y
++CONFIG_SYSCTL_SYSCALL=y
++CONFIG_KALLSYMS=y
++CONFIG_KALLSYMS_ALL=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_PCSPKR_PLATFORM=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SIGNALFD=y
++CONFIG_TIMERFD=y
++CONFIG_EVENTFD=y
++CONFIG_BPF_SYSCALL=y
++CONFIG_SHMEM=y
++CONFIG_AIO=y
++CONFIG_ADVISE_SYSCALLS=y
++CONFIG_USERFAULTFD=y
++CONFIG_PCI_QUIRKS=y
++CONFIG_MEMBARRIER=y
++# CONFIG_EMBEDDED is not set
++CONFIG_HAVE_PERF_EVENTS=y
++
++#
++# Kernel Performance Events And Counters
++#
++CONFIG_PERF_EVENTS=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLUB_DEBUG=y
++# CONFIG_COMPAT_BRK is not set
++# CONFIG_SLAB is not set
++CONFIG_SLUB=y
++# CONFIG_SLOB is not set
++CONFIG_SLUB_CPU_PARTIAL=y
++CONFIG_SYSTEM_DATA_VERIFICATION=y
++CONFIG_PROFILING=y
++CONFIG_TRACEPOINTS=y
++CONFIG_KEXEC_CORE=y
++CONFIG_OPROFILE=m
++CONFIG_HAVE_OPROFILE=y
++CONFIG_KPROBES=y
++CONFIG_JUMP_LABEL=y
++# CONFIG_STATIC_KEYS_SELFTEST is not set
++CONFIG_UPROBES=y
++CONFIG_HAVE_64BIT_ALIGNED_ACCESS=y
++CONFIG_ARCH_USE_BUILTIN_BSWAP=y
++CONFIG_KRETPROBES=y
++CONFIG_HAVE_IOREMAP_PROT=y
++CONFIG_HAVE_KPROBES=y
++CONFIG_HAVE_KRETPROBES=y
++CONFIG_HAVE_ARCH_TRACEHOOK=y
++CONFIG_HAVE_DMA_ATTRS=y
++CONFIG_GENERIC_SMP_IDLE_THREAD=y
++CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
++CONFIG_HAVE_DMA_API_DEBUG=y
++CONFIG_HAVE_HW_BREAKPOINT=y
++CONFIG_HAVE_PERF_EVENTS_NMI=y
++CONFIG_HAVE_ARCH_JUMP_LABEL=y
++CONFIG_HAVE_RCU_TABLE_FREE=y
++CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
++CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y
++CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y
++CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y
++CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
++CONFIG_SECCOMP_FILTER=y
++# CONFIG_CC_STACKPROTECTOR is not set
++CONFIG_HAVE_CONTEXT_TRACKING=y
++CONFIG_HAVE_VIRT_CPU_ACCOUNTING=y
++CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
++CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
++CONFIG_HAVE_MOD_ARCH_SPECIFIC=y
++CONFIG_MODULES_USE_ELF_RELA=y
++CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y
++CONFIG_ARCH_HAS_ELF_RANDOMIZE=y
++CONFIG_CLONE_BACKWARDS=y
++CONFIG_OLD_SIGSUSPEND=y
++CONFIG_COMPAT_OLD_SIGACTION=y
++
++#
++# GCOV-based kernel profiling
++#
++# CONFIG_GCOV_KERNEL is not set
++CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y
++# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
++CONFIG_SLABINFO=y
++CONFIG_RT_MUTEXES=y
++CONFIG_BASE_SMALL=0
++CONFIG_MODULES=y
++# CONFIG_MODULE_FORCE_LOAD is not set
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++CONFIG_MODVERSIONS=y
++CONFIG_MODULE_SRCVERSION_ALL=y
++CONFIG_MODULE_SIG=y
++# CONFIG_MODULE_SIG_FORCE is not set
++CONFIG_MODULE_SIG_ALL=y
++# CONFIG_MODULE_SIG_SHA1 is not set
++# CONFIG_MODULE_SIG_SHA224 is not set
++# CONFIG_MODULE_SIG_SHA256 is not set
++# CONFIG_MODULE_SIG_SHA384 is not set
++CONFIG_MODULE_SIG_SHA512=y
++CONFIG_MODULE_SIG_HASH="sha512"
++# CONFIG_MODULE_COMPRESS is not set
++CONFIG_MODULES_TREE_LOOKUP=y
++CONFIG_BLOCK=y
++CONFIG_BLK_DEV_BSG=y
++CONFIG_BLK_DEV_BSGLIB=y
++CONFIG_BLK_DEV_INTEGRITY=y
++CONFIG_BLK_DEV_THROTTLING=y
++CONFIG_BLK_CMDLINE_PARSER=y
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++CONFIG_AIX_PARTITION=y
++CONFIG_OSF_PARTITION=y
++CONFIG_AMIGA_PARTITION=y
++CONFIG_ATARI_PARTITION=y
++CONFIG_MAC_PARTITION=y
++CONFIG_MSDOS_PARTITION=y
++CONFIG_BSD_DISKLABEL=y
++CONFIG_MINIX_SUBPARTITION=y
++CONFIG_SOLARIS_X86_PARTITION=y
++CONFIG_UNIXWARE_DISKLABEL=y
++CONFIG_LDM_PARTITION=y
++# CONFIG_LDM_DEBUG is not set
++CONFIG_SGI_PARTITION=y
++CONFIG_ULTRIX_PARTITION=y
++CONFIG_SUN_PARTITION=y
++CONFIG_KARMA_PARTITION=y
++CONFIG_EFI_PARTITION=y
++CONFIG_SYSV68_PARTITION=y
++CONFIG_CMDLINE_PARTITION=y
++CONFIG_BLOCK_COMPAT=y
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_CFQ_GROUP_IOSCHED=y
++CONFIG_DEFAULT_DEADLINE=y
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="deadline"
++CONFIG_PREEMPT_NOTIFIERS=y
++CONFIG_PADATA=y
++CONFIG_ASN1=y
++CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
++CONFIG_INLINE_READ_UNLOCK=y
++CONFIG_INLINE_READ_UNLOCK_IRQ=y
++CONFIG_INLINE_WRITE_UNLOCK=y
++CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
++CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
++CONFIG_MUTEX_SPIN_ON_OWNER=y
++CONFIG_RWSEM_SPIN_ON_OWNER=y
++CONFIG_LOCK_SPIN_ON_OWNER=y
++CONFIG_FREEZER=y
++CONFIG_PPC_MSI_BITMAP=y
++CONFIG_PPC_XICS=y
++CONFIG_PPC_ICP_NATIVE=y
++CONFIG_PPC_ICP_HV=y
++CONFIG_PPC_ICS_RTAS=y
++CONFIG_PPC_SCOM=y
++CONFIG_SCOM_DEBUGFS=y
++# CONFIG_GE_FPGA is not set
++
++#
++# Platform support
++#
++CONFIG_PPC_POWERNV=y
++CONFIG_OPAL_PRD=m
++CONFIG_PPC_PSERIES=y
++CONFIG_PPC_SPLPAR=y
++CONFIG_DTL=y
++CONFIG_PSERIES_ENERGY=m
++CONFIG_SCANLOG=m
++CONFIG_IO_EVENT_IRQ=y
++CONFIG_LPARCFG=y
++CONFIG_PPC_SMLPAR=y
++CONFIG_CMM=m
++CONFIG_HV_PERF_CTRS=y
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++# CONFIG_PQ2ADS is not set
++CONFIG_KVM_GUEST=y
++CONFIG_EPAPR_PARAVIRT=y
++CONFIG_PPC_NATIVE=y
++CONFIG_PPC_OF_BOOT_TRAMPOLINE=y
++# CONFIG_UDBG_RTAS_CONSOLE is not set
++CONFIG_PPC_SMP_MUXED_IPI=y
++# CONFIG_IPIC is not set
++CONFIG_MPIC=y
++# CONFIG_PPC_EPAPR_HV_PIC is not set
++# CONFIG_MPIC_WEIRD is not set
++# CONFIG_MPIC_MSGR is not set
++CONFIG_PPC_I8259=y
++# CONFIG_U3_DART is not set
++CONFIG_PPC_RTAS=y
++CONFIG_RTAS_ERROR_LOGGING=y
++CONFIG_PPC_RTAS_DAEMON=y
++CONFIG_RTAS_PROC=y
++CONFIG_RTAS_FLASH=m
++# CONFIG_MMIO_NVRAM is not set
++# CONFIG_MPIC_U3_HT_IRQS is not set
++CONFIG_IBMVIO=y
++CONFIG_IBMEBUS=y
++CONFIG_EEH=y
++# CONFIG_PPC_MPC106 is not set
++# CONFIG_PPC_970_NAP is not set
++CONFIG_PPC_P7_NAP=y
++CONFIG_PPC_INDIRECT_PIO=y
++
++#
++# CPU Frequency scaling
++#
++CONFIG_CPU_FREQ=y
++CONFIG_CPU_FREQ_GOV_COMMON=y
++CONFIG_CPU_FREQ_STAT=y
++CONFIG_CPU_FREQ_STAT_DETAILS=y
++CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
++# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
++# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
++# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
++# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
++CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
++CONFIG_CPU_FREQ_GOV_POWERSAVE=y
++CONFIG_CPU_FREQ_GOV_USERSPACE=y
++CONFIG_CPU_FREQ_GOV_ONDEMAND=y
++CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
++
++#
++# CPU frequency scaling drivers
++#
++CONFIG_POWERNV_CPUFREQ=y
++
++#
++# CPUIdle driver
++#
++
++#
++# CPU Idle
++#
++CONFIG_CPU_IDLE=y
++CONFIG_CPU_IDLE_GOV_LADDER=y
++CONFIG_CPU_IDLE_GOV_MENU=y
++
++#
++# POWERPC CPU Idle Drivers
++#
++CONFIG_PSERIES_CPUIDLE=y
++CONFIG_POWERNV_CPUIDLE=y
++# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set
++# CONFIG_FSL_ULI1575 is not set
++# CONFIG_SIMPLE_GPIO is not set
++
++#
++# Kernel options
++#
++# CONFIG_HZ_100 is not set
++CONFIG_HZ_250=y
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=250
++CONFIG_SCHED_HRTICK=y
++# CONFIG_PREEMPT_NONE is not set
++CONFIG_PREEMPT_VOLUNTARY=y
++# CONFIG_PREEMPT is not set
++CONFIG_BINFMT_ELF=y
++CONFIG_COMPAT_BINFMT_ELF=y
++CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
++CONFIG_BINFMT_SCRIPT=y
++# CONFIG_HAVE_AOUT is not set
++CONFIG_BINFMT_MISC=m
++CONFIG_COREDUMP=y
++CONFIG_HUGETLB_PAGE_SIZE_VARIABLE=y
++CONFIG_PPC_TRANSACTIONAL_MEM=y
++CONFIG_IOMMU_HELPER=y
++CONFIG_SWIOTLB=y
++CONFIG_HOTPLUG_CPU=y
++CONFIG_ARCH_CPU_PROBE_RELEASE=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_ARCH_HAS_WALK_MEMORY=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
++CONFIG_PPC64_SUPPORTS_MEMORY_FAILURE=y
++CONFIG_KEXEC=y
++CONFIG_CRASH_DUMP=y
++CONFIG_FA_DUMP=y
++CONFIG_IRQ_ALL_CPUS=y
++CONFIG_NUMA=y
++CONFIG_NODES_SHIFT=8
++CONFIG_USE_PERCPU_NUMA_NODE_ID=y
++CONFIG_HAVE_MEMORYLESS_NODES=y
++CONFIG_ARCH_SELECT_MEMORY_MODEL=y
++CONFIG_ARCH_SPARSEMEM_ENABLE=y
++CONFIG_ARCH_SPARSEMEM_DEFAULT=y
++CONFIG_SYS_SUPPORTS_HUGETLBFS=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_SPARSEMEM_MANUAL=y
++CONFIG_SPARSEMEM=y
++CONFIG_NEED_MULTIPLE_NODES=y
++CONFIG_HAVE_MEMORY_PRESENT=y
++CONFIG_SPARSEMEM_EXTREME=y
++CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
++CONFIG_SPARSEMEM_VMEMMAP=y
++CONFIG_HAVE_MEMBLOCK=y
++CONFIG_HAVE_MEMBLOCK_NODE_MAP=y
++CONFIG_HAVE_GENERIC_RCU_GUP=y
++CONFIG_NO_BOOTMEM=y
++CONFIG_MEMORY_ISOLATION=y
++CONFIG_HAVE_BOOTMEM_INFO_NODE=y
++CONFIG_MEMORY_HOTPLUG=y
++CONFIG_MEMORY_HOTPLUG_SPARSE=y
++CONFIG_MEMORY_HOTREMOVE=y
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_MEMORY_BALLOON=y
++CONFIG_BALLOON_COMPACTION=y
++CONFIG_COMPACTION=y
++CONFIG_MIGRATION=y
++CONFIG_PHYS_ADDR_T_64BIT=y
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_MMU_NOTIFIER=y
++CONFIG_KSM=y
++CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
++CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
++CONFIG_MEMORY_FAILURE=y
++CONFIG_HWPOISON_INJECT=m
++CONFIG_TRANSPARENT_HUGEPAGE=y
++CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y
++# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set
++CONFIG_CLEANCACHE=y
++CONFIG_FRONTSWAP=y
++CONFIG_CMA=y
++# CONFIG_CMA_DEBUG is not set
++# CONFIG_CMA_DEBUGFS is not set
++CONFIG_CMA_AREAS=7
++CONFIG_ZSWAP=y
++CONFIG_ZPOOL=y
++CONFIG_ZBUD=y
++CONFIG_ZSMALLOC=y
++CONFIG_PGTABLE_MAPPING=y
++# CONFIG_ZSMALLOC_STAT is not set
++CONFIG_IDLE_PAGE_TRACKING=y
++CONFIG_FRAME_VECTOR=y
++CONFIG_ARCH_MEMORY_PROBE=y
++CONFIG_NODES_SPAN_OTHER_NODES=y
++# CONFIG_PPC_4K_PAGES is not set
++CONFIG_PPC_64K_PAGES=y
++CONFIG_FORCE_MAX_ZONEORDER=9
++CONFIG_PPC_SUBPAGE_PROT=y
++CONFIG_PPC_COPRO_BASE=y
++CONFIG_SCHED_SMT=y
++CONFIG_PPC_DENORMALISATION=y
++# CONFIG_CMDLINE_BOOL is not set
++CONFIG_EXTRA_TARGETS=""
++CONFIG_SUSPEND=y
++CONFIG_SUSPEND_FREEZER=y
++# CONFIG_SUSPEND_SKIP_SYNC is not set
++# CONFIG_HIBERNATION is not set
++CONFIG_PM_SLEEP=y
++CONFIG_PM_SLEEP_SMP=y
++# CONFIG_PM_AUTOSLEEP is not set
++CONFIG_PM_WAKELOCKS=y
++CONFIG_PM_WAKELOCKS_LIMIT=100
++CONFIG_PM_WAKELOCKS_GC=y
++CONFIG_PM=y
++CONFIG_PM_DEBUG=y
++CONFIG_PM_ADVANCED_DEBUG=y
++# CONFIG_PM_TEST_SUSPEND is not set
++CONFIG_PM_SLEEP_DEBUG=y
++# CONFIG_DPM_WATCHDOG is not set
++CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
++CONFIG_SECCOMP=y
++CONFIG_ISA_DMA_API=y
++
++#
++# Bus options
++#
++CONFIG_ZONE_DMA=y
++CONFIG_NEED_DMA_MAP_STATE=y
++CONFIG_NEED_SG_DMA_LENGTH=y
++CONFIG_GENERIC_ISA_DMA=y
++# CONFIG_PPC_INDIRECT_PCI is not set
++CONFIG_FSL_LBC=y
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++CONFIG_PCI_SYSCALL=y
++CONFIG_PCIEPORTBUS=y
++# CONFIG_HOTPLUG_PCI_PCIE is not set
++CONFIG_PCIEAER=y
++# CONFIG_PCIE_ECRC is not set
++# CONFIG_PCIEAER_INJECT is not set
++CONFIG_PCIEASPM=y
++CONFIG_PCIEASPM_DEBUG=y
++CONFIG_PCIEASPM_DEFAULT=y
++# CONFIG_PCIEASPM_POWERSAVE is not set
++# CONFIG_PCIEASPM_PERFORMANCE is not set
++CONFIG_PCIE_PME=y
++CONFIG_PCI_BUS_ADDR_T_64BIT=y
++CONFIG_PCI_MSI=y
++# CONFIG_PCI_DEBUG is not set
++CONFIG_PCI_REALLOC_ENABLE_AUTO=y
++CONFIG_PCI_STUB=m
++CONFIG_PCI_ATS=y
++CONFIG_PCI_IOV=y
++CONFIG_PCI_PRI=y
++CONFIG_PCI_PASID=y
++
++#
++# PCI host controller drivers
++#
++# CONFIG_PCCARD is not set
++CONFIG_HOTPLUG_PCI=y
++CONFIG_HOTPLUG_PCI_CPCI=y
++# CONFIG_HOTPLUG_PCI_SHPC is not set
++CONFIG_HOTPLUG_PCI_RPA=m
++CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
++# CONFIG_HAS_RAPIDIO is not set
++CONFIG_RAPIDIO=y
++CONFIG_RAPIDIO_TSI721=m
++CONFIG_RAPIDIO_DISC_TIMEOUT=30
++# CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS is not set
++CONFIG_RAPIDIO_DMA_ENGINE=y
++# CONFIG_RAPIDIO_DEBUG is not set
++CONFIG_RAPIDIO_ENUM_BASIC=m
++
++#
++# RapidIO Switch drivers
++#
++CONFIG_RAPIDIO_TSI57X=m
++CONFIG_RAPIDIO_CPS_XX=m
++CONFIG_RAPIDIO_TSI568=m
++CONFIG_RAPIDIO_CPS_GEN2=m
++CONFIG_NONSTATIC_KERNEL=y
++CONFIG_RELOCATABLE=y
++CONFIG_PAGE_OFFSET=0xc000000000000000
++CONFIG_KERNEL_START=0xc000000000000000
++CONFIG_PHYSICAL_START=0x00000000
++CONFIG_ARCH_RANDOM=y
++CONFIG_NET=y
++CONFIG_COMPAT_NETLINK_MESSAGES=y
++CONFIG_NET_INGRESS=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++CONFIG_PACKET_DIAG=m
++CONFIG_UNIX=y
++CONFIG_UNIX_DIAG=m
++CONFIG_XFRM=y
++CONFIG_XFRM_ALGO=m
++CONFIG_XFRM_USER=m
++# CONFIG_XFRM_SUB_POLICY is not set
++# CONFIG_XFRM_MIGRATE is not set
++CONFIG_XFRM_STATISTICS=y
++CONFIG_XFRM_IPCOMP=m
++CONFIG_NET_KEY=m
++# CONFIG_NET_KEY_MIGRATE is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++CONFIG_IP_ADVANCED_ROUTER=y
++CONFIG_IP_FIB_TRIE_STATS=y
++CONFIG_IP_MULTIPLE_TABLES=y
++CONFIG_IP_ROUTE_MULTIPATH=y
++CONFIG_IP_ROUTE_VERBOSE=y
++CONFIG_IP_ROUTE_CLASSID=y
++# CONFIG_IP_PNP is not set
++CONFIG_NET_IPIP=m
++CONFIG_NET_IPGRE_DEMUX=m
++CONFIG_NET_IP_TUNNEL=m
++CONFIG_NET_IPGRE=m
++CONFIG_NET_IPGRE_BROADCAST=y
++CONFIG_IP_MROUTE=y
++# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set
++CONFIG_IP_PIMSM_V1=y
++CONFIG_IP_PIMSM_V2=y
++CONFIG_SYN_COOKIES=y
++CONFIG_NET_IPVTI=m
++CONFIG_NET_UDP_TUNNEL=m
++CONFIG_NET_FOU=m
++CONFIG_NET_FOU_IP_TUNNELS=y
++CONFIG_INET_AH=m
++CONFIG_INET_ESP=m
++CONFIG_INET_IPCOMP=m
++CONFIG_INET_XFRM_TUNNEL=m
++CONFIG_INET_TUNNEL=m
++CONFIG_INET_XFRM_MODE_TRANSPORT=m
++CONFIG_INET_XFRM_MODE_TUNNEL=m
++CONFIG_INET_XFRM_MODE_BEET=m
++CONFIG_INET_LRO=y
++CONFIG_INET_DIAG=m
++CONFIG_INET_TCP_DIAG=m
++CONFIG_INET_UDP_DIAG=m
++CONFIG_TCP_CONG_ADVANCED=y
++CONFIG_TCP_CONG_BIC=m
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_TCP_CONG_WESTWOOD=m
++CONFIG_TCP_CONG_HTCP=m
++CONFIG_TCP_CONG_HSTCP=m
++CONFIG_TCP_CONG_HYBLA=m
++CONFIG_TCP_CONG_VEGAS=m
++CONFIG_TCP_CONG_SCALABLE=m
++CONFIG_TCP_CONG_LP=m
++CONFIG_TCP_CONG_VENO=m
++CONFIG_TCP_CONG_YEAH=m
++CONFIG_TCP_CONG_ILLINOIS=m
++CONFIG_TCP_CONG_DCTCP=m
++CONFIG_TCP_CONG_CDG=m
++CONFIG_DEFAULT_CUBIC=y
++# CONFIG_DEFAULT_RENO is not set
++CONFIG_DEFAULT_TCP_CONG="cubic"
++CONFIG_TCP_MD5SIG=y
++CONFIG_IPV6=y
++CONFIG_IPV6_ROUTER_PREF=y
++CONFIG_IPV6_ROUTE_INFO=y
++# CONFIG_IPV6_OPTIMISTIC_DAD is not set
++CONFIG_INET6_AH=m
++CONFIG_INET6_ESP=m
++CONFIG_INET6_IPCOMP=m
++CONFIG_IPV6_MIP6=m
++CONFIG_IPV6_ILA=m
++CONFIG_INET6_XFRM_TUNNEL=m
++CONFIG_INET6_TUNNEL=m
++CONFIG_INET6_XFRM_MODE_TRANSPORT=m
++CONFIG_INET6_XFRM_MODE_TUNNEL=m
++CONFIG_INET6_XFRM_MODE_BEET=m
++CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
++CONFIG_IPV6_VTI=m
++CONFIG_IPV6_SIT=m
++CONFIG_IPV6_SIT_6RD=y
++CONFIG_IPV6_NDISC_NODETYPE=y
++CONFIG_IPV6_TUNNEL=m
++CONFIG_IPV6_GRE=m
++CONFIG_IPV6_MULTIPLE_TABLES=y
++CONFIG_IPV6_SUBTREES=y
++CONFIG_IPV6_MROUTE=y
++CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
++CONFIG_IPV6_PIMSM_V2=y
++CONFIG_NETLABEL=y
++CONFIG_NETWORK_SECMARK=y
++CONFIG_NET_PTP_CLASSIFY=y
++# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_NETFILTER_ADVANCED=y
++CONFIG_BRIDGE_NETFILTER=m
++
++#
++# Core Netfilter Configuration
++#
++CONFIG_NETFILTER_INGRESS=y
++CONFIG_NETFILTER_NETLINK=m
++CONFIG_NETFILTER_NETLINK_ACCT=m
++CONFIG_NETFILTER_NETLINK_QUEUE=m
++CONFIG_NETFILTER_NETLINK_LOG=m
++CONFIG_NF_CONNTRACK=m
++CONFIG_NF_LOG_COMMON=m
++CONFIG_NF_CONNTRACK_MARK=y
++CONFIG_NF_CONNTRACK_SECMARK=y
++CONFIG_NF_CONNTRACK_ZONES=y
++# CONFIG_NF_CONNTRACK_PROCFS is not set
++CONFIG_NF_CONNTRACK_EVENTS=y
++CONFIG_NF_CONNTRACK_TIMEOUT=y
++CONFIG_NF_CONNTRACK_TIMESTAMP=y
++CONFIG_NF_CONNTRACK_LABELS=y
++CONFIG_NF_CT_PROTO_DCCP=m
++CONFIG_NF_CT_PROTO_GRE=m
++CONFIG_NF_CT_PROTO_SCTP=m
++CONFIG_NF_CT_PROTO_UDPLITE=m
++CONFIG_NF_CONNTRACK_AMANDA=m
++CONFIG_NF_CONNTRACK_FTP=m
++CONFIG_NF_CONNTRACK_H323=m
++CONFIG_NF_CONNTRACK_IRC=m
++CONFIG_NF_CONNTRACK_BROADCAST=m
++CONFIG_NF_CONNTRACK_NETBIOS_NS=m
++CONFIG_NF_CONNTRACK_SNMP=m
++CONFIG_NF_CONNTRACK_PPTP=m
++CONFIG_NF_CONNTRACK_SANE=m
++CONFIG_NF_CONNTRACK_SIP=m
++CONFIG_NF_CONNTRACK_TFTP=m
++CONFIG_NF_CT_NETLINK=m
++CONFIG_NF_CT_NETLINK_TIMEOUT=m
++CONFIG_NF_CT_NETLINK_HELPER=m
++CONFIG_NETFILTER_NETLINK_GLUE_CT=y
++CONFIG_NF_NAT=m
++CONFIG_NF_NAT_NEEDED=y
++CONFIG_NF_NAT_PROTO_DCCP=m
++CONFIG_NF_NAT_PROTO_UDPLITE=m
++CONFIG_NF_NAT_PROTO_SCTP=m
++CONFIG_NF_NAT_AMANDA=m
++CONFIG_NF_NAT_FTP=m
++CONFIG_NF_NAT_IRC=m
++CONFIG_NF_NAT_SIP=m
++CONFIG_NF_NAT_TFTP=m
++CONFIG_NF_NAT_REDIRECT=m
++CONFIG_NETFILTER_SYNPROXY=m
++CONFIG_NF_TABLES=m
++CONFIG_NF_TABLES_INET=m
++CONFIG_NF_TABLES_NETDEV=m
++CONFIG_NFT_EXTHDR=m
++CONFIG_NFT_META=m
++CONFIG_NFT_CT=m
++CONFIG_NFT_RBTREE=m
++CONFIG_NFT_HASH=m
++CONFIG_NFT_COUNTER=m
++CONFIG_NFT_LOG=m
++CONFIG_NFT_LIMIT=m
++CONFIG_NFT_MASQ=m
++CONFIG_NFT_REDIR=m
++CONFIG_NFT_NAT=m
++CONFIG_NFT_QUEUE=m
++CONFIG_NFT_REJECT=m
++CONFIG_NFT_REJECT_INET=m
++CONFIG_NFT_COMPAT=m
++CONFIG_NETFILTER_XTABLES=m
++
++#
++# Xtables combined modules
++#
++CONFIG_NETFILTER_XT_MARK=m
++CONFIG_NETFILTER_XT_CONNMARK=m
++CONFIG_NETFILTER_XT_SET=m
++
++#
++# Xtables targets
++#
++CONFIG_NETFILTER_XT_TARGET_AUDIT=m
++CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
++CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
++CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
++CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
++CONFIG_NETFILTER_XT_TARGET_CT=m
++CONFIG_NETFILTER_XT_TARGET_DSCP=m
++CONFIG_NETFILTER_XT_TARGET_HL=m
++CONFIG_NETFILTER_XT_TARGET_HMARK=m
++CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
++CONFIG_NETFILTER_XT_TARGET_LED=m
++CONFIG_NETFILTER_XT_TARGET_LOG=m
++CONFIG_NETFILTER_XT_TARGET_MARK=m
++CONFIG_NETFILTER_XT_NAT=m
++CONFIG_NETFILTER_XT_TARGET_NETMAP=m
++CONFIG_NETFILTER_XT_TARGET_NFLOG=m
++CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
++# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
++CONFIG_NETFILTER_XT_TARGET_RATEEST=m
++CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
++CONFIG_NETFILTER_XT_TARGET_TEE=m
++CONFIG_NETFILTER_XT_TARGET_TPROXY=m
++CONFIG_NETFILTER_XT_TARGET_TRACE=m
++CONFIG_NETFILTER_XT_TARGET_SECMARK=m
++CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
++CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
++
++#
++# Xtables matches
++#
++CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
++CONFIG_NETFILTER_XT_MATCH_BPF=m
++CONFIG_NETFILTER_XT_MATCH_CGROUP=m
++CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
++CONFIG_NETFILTER_XT_MATCH_COMMENT=m
++CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
++CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
++CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
++CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
++CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
++CONFIG_NETFILTER_XT_MATCH_CPU=m
++CONFIG_NETFILTER_XT_MATCH_DCCP=m
++CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
++CONFIG_NETFILTER_XT_MATCH_DSCP=m
++CONFIG_NETFILTER_XT_MATCH_ECN=m
++CONFIG_NETFILTER_XT_MATCH_ESP=m
++CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
++CONFIG_NETFILTER_XT_MATCH_HELPER=m
++CONFIG_NETFILTER_XT_MATCH_HL=m
++CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
++CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
++CONFIG_NETFILTER_XT_MATCH_IPVS=m
++CONFIG_NETFILTER_XT_MATCH_L2TP=m
++CONFIG_NETFILTER_XT_MATCH_LENGTH=m
++CONFIG_NETFILTER_XT_MATCH_LIMIT=m
++CONFIG_NETFILTER_XT_MATCH_MAC=m
++CONFIG_NETFILTER_XT_MATCH_MARK=m
++CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
++CONFIG_NETFILTER_XT_MATCH_NFACCT=m
++CONFIG_NETFILTER_XT_MATCH_OSF=m
++CONFIG_NETFILTER_XT_MATCH_OWNER=m
++CONFIG_NETFILTER_XT_MATCH_POLICY=m
++CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
++CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
++CONFIG_NETFILTER_XT_MATCH_QUOTA=m
++CONFIG_NETFILTER_XT_MATCH_RATEEST=m
++CONFIG_NETFILTER_XT_MATCH_REALM=m
++CONFIG_NETFILTER_XT_MATCH_RECENT=m
++CONFIG_NETFILTER_XT_MATCH_SCTP=m
++CONFIG_NETFILTER_XT_MATCH_SOCKET=m
++CONFIG_NETFILTER_XT_MATCH_STATE=m
++CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
++CONFIG_NETFILTER_XT_MATCH_STRING=m
++CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
++CONFIG_NETFILTER_XT_MATCH_TIME=m
++CONFIG_NETFILTER_XT_MATCH_U32=m
++CONFIG_IP_SET=m
++CONFIG_IP_SET_MAX=256
++CONFIG_IP_SET_BITMAP_IP=m
++CONFIG_IP_SET_BITMAP_IPMAC=m
++CONFIG_IP_SET_BITMAP_PORT=m
++CONFIG_IP_SET_HASH_IP=m
++CONFIG_IP_SET_HASH_IPMARK=m
++CONFIG_IP_SET_HASH_IPPORT=m
++CONFIG_IP_SET_HASH_IPPORTIP=m
++CONFIG_IP_SET_HASH_IPPORTNET=m
++CONFIG_IP_SET_HASH_MAC=m
++CONFIG_IP_SET_HASH_NETPORTNET=m
++CONFIG_IP_SET_HASH_NET=m
++CONFIG_IP_SET_HASH_NETNET=m
++CONFIG_IP_SET_HASH_NETPORT=m
++CONFIG_IP_SET_HASH_NETIFACE=m
++CONFIG_IP_SET_LIST_SET=m
++CONFIG_IP_VS=m
++CONFIG_IP_VS_IPV6=y
++# CONFIG_IP_VS_DEBUG is not set
++CONFIG_IP_VS_TAB_BITS=12
++
++#
++# IPVS transport protocol load balancing support
++#
++CONFIG_IP_VS_PROTO_TCP=y
++CONFIG_IP_VS_PROTO_UDP=y
++CONFIG_IP_VS_PROTO_AH_ESP=y
++CONFIG_IP_VS_PROTO_ESP=y
++CONFIG_IP_VS_PROTO_AH=y
++CONFIG_IP_VS_PROTO_SCTP=y
++
++#
++# IPVS scheduler
++#
++CONFIG_IP_VS_RR=m
++CONFIG_IP_VS_WRR=m
++CONFIG_IP_VS_LC=m
++CONFIG_IP_VS_WLC=m
++CONFIG_IP_VS_FO=m
++CONFIG_IP_VS_OVF=m
++CONFIG_IP_VS_LBLC=m
++CONFIG_IP_VS_LBLCR=m
++CONFIG_IP_VS_DH=m
++CONFIG_IP_VS_SH=m
++CONFIG_IP_VS_SED=m
++CONFIG_IP_VS_NQ=m
++
++#
++# IPVS SH scheduler
++#
++CONFIG_IP_VS_SH_TAB_BITS=8
++
++#
++# IPVS application helper
++#
++CONFIG_IP_VS_FTP=m
++CONFIG_IP_VS_NFCT=y
++CONFIG_IP_VS_PE_SIP=m
++
++#
++# IP: Netfilter Configuration
++#
++CONFIG_NF_DEFRAG_IPV4=m
++CONFIG_NF_CONNTRACK_IPV4=m
++CONFIG_NF_TABLES_IPV4=m
++CONFIG_NFT_CHAIN_ROUTE_IPV4=m
++CONFIG_NFT_REJECT_IPV4=m
++CONFIG_NFT_DUP_IPV4=m
++CONFIG_NF_TABLES_ARP=m
++CONFIG_NF_DUP_IPV4=m
++CONFIG_NF_LOG_ARP=m
++CONFIG_NF_LOG_IPV4=m
++CONFIG_NF_REJECT_IPV4=m
++CONFIG_NF_NAT_IPV4=m
++CONFIG_NFT_CHAIN_NAT_IPV4=m
++CONFIG_NF_NAT_MASQUERADE_IPV4=m
++CONFIG_NFT_MASQ_IPV4=m
++CONFIG_NFT_REDIR_IPV4=m
++CONFIG_NF_NAT_SNMP_BASIC=m
++CONFIG_NF_NAT_PROTO_GRE=m
++CONFIG_NF_NAT_PPTP=m
++CONFIG_NF_NAT_H323=m
++CONFIG_IP_NF_IPTABLES=m
++CONFIG_IP_NF_MATCH_AH=m
++CONFIG_IP_NF_MATCH_ECN=m
++CONFIG_IP_NF_MATCH_RPFILTER=m
++CONFIG_IP_NF_MATCH_TTL=m
++CONFIG_IP_NF_FILTER=m
++CONFIG_IP_NF_TARGET_REJECT=m
++CONFIG_IP_NF_TARGET_SYNPROXY=m
++CONFIG_IP_NF_NAT=m
++CONFIG_IP_NF_TARGET_MASQUERADE=m
++CONFIG_IP_NF_TARGET_NETMAP=m
++CONFIG_IP_NF_TARGET_REDIRECT=m
++CONFIG_IP_NF_MANGLE=m
++CONFIG_IP_NF_TARGET_CLUSTERIP=m
++CONFIG_IP_NF_TARGET_ECN=m
++CONFIG_IP_NF_TARGET_TTL=m
++CONFIG_IP_NF_RAW=m
++CONFIG_IP_NF_SECURITY=m
++CONFIG_IP_NF_ARPTABLES=m
++CONFIG_IP_NF_ARPFILTER=m
++CONFIG_IP_NF_ARP_MANGLE=m
++
++#
++# IPv6: Netfilter Configuration
++#
++CONFIG_NF_DEFRAG_IPV6=m
++CONFIG_NF_CONNTRACK_IPV6=m
++CONFIG_NF_TABLES_IPV6=m
++CONFIG_NFT_CHAIN_ROUTE_IPV6=m
++CONFIG_NFT_REJECT_IPV6=m
++CONFIG_NFT_DUP_IPV6=m
++CONFIG_NF_DUP_IPV6=m
++CONFIG_NF_REJECT_IPV6=m
++CONFIG_NF_LOG_IPV6=m
++CONFIG_NF_NAT_IPV6=m
++CONFIG_NFT_CHAIN_NAT_IPV6=m
++CONFIG_NF_NAT_MASQUERADE_IPV6=m
++CONFIG_NFT_MASQ_IPV6=m
++CONFIG_NFT_REDIR_IPV6=m
++CONFIG_IP6_NF_IPTABLES=m
++CONFIG_IP6_NF_MATCH_AH=m
++CONFIG_IP6_NF_MATCH_EUI64=m
++CONFIG_IP6_NF_MATCH_FRAG=m
++CONFIG_IP6_NF_MATCH_OPTS=m
++CONFIG_IP6_NF_MATCH_HL=m
++CONFIG_IP6_NF_MATCH_IPV6HEADER=m
++CONFIG_IP6_NF_MATCH_MH=m
++CONFIG_IP6_NF_MATCH_RPFILTER=m
++CONFIG_IP6_NF_MATCH_RT=m
++CONFIG_IP6_NF_TARGET_HL=m
++CONFIG_IP6_NF_FILTER=m
++CONFIG_IP6_NF_TARGET_REJECT=m
++CONFIG_IP6_NF_TARGET_SYNPROXY=m
++CONFIG_IP6_NF_MANGLE=m
++CONFIG_IP6_NF_RAW=m
++CONFIG_IP6_NF_SECURITY=m
++CONFIG_IP6_NF_NAT=m
++CONFIG_IP6_NF_TARGET_MASQUERADE=m
++CONFIG_IP6_NF_TARGET_NPT=m
++
++#
++# DECnet: Netfilter Configuration
++#
++CONFIG_DECNET_NF_GRABULATOR=m
++CONFIG_NF_TABLES_BRIDGE=m
++CONFIG_NFT_BRIDGE_META=m
++CONFIG_NFT_BRIDGE_REJECT=m
++CONFIG_NF_LOG_BRIDGE=m
++CONFIG_BRIDGE_NF_EBTABLES=m
++CONFIG_BRIDGE_EBT_BROUTE=m
++CONFIG_BRIDGE_EBT_T_FILTER=m
++CONFIG_BRIDGE_EBT_T_NAT=m
++CONFIG_BRIDGE_EBT_802_3=m
++CONFIG_BRIDGE_EBT_AMONG=m
++CONFIG_BRIDGE_EBT_ARP=m
++CONFIG_BRIDGE_EBT_IP=m
++CONFIG_BRIDGE_EBT_IP6=m
++CONFIG_BRIDGE_EBT_LIMIT=m
++CONFIG_BRIDGE_EBT_MARK=m
++CONFIG_BRIDGE_EBT_PKTTYPE=m
++CONFIG_BRIDGE_EBT_STP=m
++CONFIG_BRIDGE_EBT_VLAN=m
++CONFIG_BRIDGE_EBT_ARPREPLY=m
++CONFIG_BRIDGE_EBT_DNAT=m
++CONFIG_BRIDGE_EBT_MARK_T=m
++CONFIG_BRIDGE_EBT_REDIRECT=m
++CONFIG_BRIDGE_EBT_SNAT=m
++CONFIG_BRIDGE_EBT_LOG=m
++CONFIG_BRIDGE_EBT_NFLOG=m
++CONFIG_IP_DCCP=m
++CONFIG_INET_DCCP_DIAG=m
++
++#
++# DCCP CCIDs Configuration
++#
++# CONFIG_IP_DCCP_CCID2_DEBUG is not set
++# CONFIG_IP_DCCP_CCID3 is not set
++
++#
++# DCCP Kernel Hacking
++#
++# CONFIG_IP_DCCP_DEBUG is not set
++CONFIG_NET_DCCPPROBE=m
++CONFIG_IP_SCTP=m
++CONFIG_NET_SCTPPROBE=m
++# CONFIG_SCTP_DBG_OBJCNT is not set
++# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set
++CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y
++# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set
++CONFIG_SCTP_COOKIE_HMAC_MD5=y
++CONFIG_SCTP_COOKIE_HMAC_SHA1=y
++CONFIG_RDS=m
++CONFIG_RDS_RDMA=m
++CONFIG_RDS_TCP=m
++# CONFIG_RDS_DEBUG is not set
++CONFIG_TIPC=m
++CONFIG_TIPC_MEDIA_IB=y
++CONFIG_TIPC_MEDIA_UDP=y
++CONFIG_ATM=m
++CONFIG_ATM_CLIP=m
++# CONFIG_ATM_CLIP_NO_ICMP is not set
++CONFIG_ATM_LANE=m
++CONFIG_ATM_MPOA=m
++CONFIG_ATM_BR2684=m
++# CONFIG_ATM_BR2684_IPFILTER is not set
++CONFIG_L2TP=m
++CONFIG_L2TP_DEBUGFS=m
++CONFIG_L2TP_V3=y
++CONFIG_L2TP_IP=m
++CONFIG_L2TP_ETH=m
++CONFIG_STP=m
++CONFIG_GARP=m
++CONFIG_MRP=m
++CONFIG_BRIDGE=m
++CONFIG_BRIDGE_IGMP_SNOOPING=y
++CONFIG_BRIDGE_VLAN_FILTERING=y
++CONFIG_HAVE_NET_DSA=y
++CONFIG_VLAN_8021Q=m
++CONFIG_VLAN_8021Q_GVRP=y
++CONFIG_VLAN_8021Q_MVRP=y
++CONFIG_DECNET=m
++# CONFIG_DECNET_ROUTER is not set
++CONFIG_LLC=m
++CONFIG_LLC2=m
++CONFIG_IPX=m
++# CONFIG_IPX_INTERN is not set
++CONFIG_ATALK=m
++CONFIG_DEV_APPLETALK=m
++CONFIG_IPDDP=m
++CONFIG_IPDDP_ENCAP=y
++CONFIG_X25=m
++CONFIG_LAPB=m
++CONFIG_PHONET=m
++CONFIG_6LOWPAN=m
++CONFIG_6LOWPAN_NHC=m
++CONFIG_6LOWPAN_NHC_DEST=m
++CONFIG_6LOWPAN_NHC_FRAGMENT=m
++CONFIG_6LOWPAN_NHC_HOP=m
++CONFIG_6LOWPAN_NHC_IPV6=m
++CONFIG_6LOWPAN_NHC_MOBILITY=m
++CONFIG_6LOWPAN_NHC_ROUTING=m
++CONFIG_6LOWPAN_NHC_UDP=m
++CONFIG_IEEE802154=m
++CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y
++CONFIG_IEEE802154_SOCKET=m
++CONFIG_IEEE802154_6LOWPAN=m
++CONFIG_MAC802154=m
++CONFIG_NET_SCHED=y
++
++#
++# Queueing/Scheduling
++#
++CONFIG_NET_SCH_CBQ=m
++CONFIG_NET_SCH_HTB=m
++CONFIG_NET_SCH_HFSC=m
++CONFIG_NET_SCH_ATM=m
++CONFIG_NET_SCH_PRIO=m
++CONFIG_NET_SCH_MULTIQ=m
++CONFIG_NET_SCH_RED=m
++CONFIG_NET_SCH_SFB=m
++CONFIG_NET_SCH_SFQ=m
++CONFIG_NET_SCH_TEQL=m
++CONFIG_NET_SCH_TBF=m
++CONFIG_NET_SCH_GRED=m
++CONFIG_NET_SCH_DSMARK=m
++CONFIG_NET_SCH_NETEM=m
++CONFIG_NET_SCH_DRR=m
++CONFIG_NET_SCH_MQPRIO=m
++CONFIG_NET_SCH_CHOKE=m
++CONFIG_NET_SCH_QFQ=m
++CONFIG_NET_SCH_CODEL=m
++CONFIG_NET_SCH_FQ_CODEL=m
++CONFIG_NET_SCH_FQ=m
++CONFIG_NET_SCH_HHF=m
++CONFIG_NET_SCH_PIE=m
++CONFIG_NET_SCH_INGRESS=m
++CONFIG_NET_SCH_PLUG=m
++
++#
++# Classification
++#
++CONFIG_NET_CLS=y
++CONFIG_NET_CLS_BASIC=m
++CONFIG_NET_CLS_TCINDEX=m
++CONFIG_NET_CLS_ROUTE4=m
++CONFIG_NET_CLS_FW=m
++CONFIG_NET_CLS_U32=m
++# CONFIG_CLS_U32_PERF is not set
++CONFIG_CLS_U32_MARK=y
++CONFIG_NET_CLS_RSVP=m
++CONFIG_NET_CLS_RSVP6=m
++CONFIG_NET_CLS_FLOW=m
++CONFIG_NET_CLS_CGROUP=m
++CONFIG_NET_CLS_BPF=m
++CONFIG_NET_CLS_FLOWER=m
++CONFIG_NET_EMATCH=y
++CONFIG_NET_EMATCH_STACK=32
++CONFIG_NET_EMATCH_CMP=m
++CONFIG_NET_EMATCH_NBYTE=m
++CONFIG_NET_EMATCH_U32=m
++CONFIG_NET_EMATCH_META=m
++CONFIG_NET_EMATCH_TEXT=m
++CONFIG_NET_EMATCH_CANID=m
++CONFIG_NET_EMATCH_IPSET=m
++CONFIG_NET_CLS_ACT=y
++CONFIG_NET_ACT_POLICE=m
++CONFIG_NET_ACT_GACT=m
++CONFIG_GACT_PROB=y
++CONFIG_NET_ACT_MIRRED=m
++CONFIG_NET_ACT_IPT=m
++CONFIG_NET_ACT_NAT=m
++CONFIG_NET_ACT_PEDIT=m
++CONFIG_NET_ACT_SIMP=m
++CONFIG_NET_ACT_SKBEDIT=m
++CONFIG_NET_ACT_CSUM=m
++CONFIG_NET_ACT_VLAN=m
++CONFIG_NET_ACT_BPF=m
++CONFIG_NET_ACT_CONNMARK=m
++# CONFIG_NET_CLS_IND is not set
++CONFIG_NET_SCH_FIFO=y
++CONFIG_DCB=y
++CONFIG_DNS_RESOLVER=y
++CONFIG_BATMAN_ADV=m
++CONFIG_BATMAN_ADV_BLA=y
++CONFIG_BATMAN_ADV_DAT=y
++CONFIG_BATMAN_ADV_NC=y
++CONFIG_BATMAN_ADV_MCAST=y
++# CONFIG_BATMAN_ADV_DEBUG is not set
++CONFIG_OPENVSWITCH=m
++CONFIG_OPENVSWITCH_GRE=m
++CONFIG_OPENVSWITCH_VXLAN=m
++CONFIG_OPENVSWITCH_GENEVE=m
++CONFIG_VSOCKETS=m
++CONFIG_NETLINK_MMAP=y
++CONFIG_NETLINK_DIAG=m
++CONFIG_MPLS=y
++CONFIG_NET_MPLS_GSO=m
++CONFIG_MPLS_ROUTING=m
++CONFIG_MPLS_IPTUNNEL=m
++CONFIG_HSR=m
++# CONFIG_NET_SWITCHDEV is not set
++CONFIG_NET_L3_MASTER_DEV=y
++CONFIG_RPS=y
++CONFIG_RFS_ACCEL=y
++CONFIG_XPS=y
++CONFIG_CGROUP_NET_PRIO=y
++CONFIG_CGROUP_NET_CLASSID=y
++CONFIG_NET_RX_BUSY_POLL=y
++CONFIG_BQL=y
++CONFIG_BPF_JIT=y
++CONFIG_NET_FLOW_LIMIT=y
++
++#
++# Network testing
++#
++CONFIG_NET_PKTGEN=m
++CONFIG_NET_TCPPROBE=m
++# CONFIG_NET_DROP_MONITOR is not set
++CONFIG_HAMRADIO=y
++
++#
++# Packet Radio protocols
++#
++CONFIG_AX25=m
++CONFIG_AX25_DAMA_SLAVE=y
++CONFIG_NETROM=m
++CONFIG_ROSE=m
++
++#
++# AX.25 network device drivers
++#
++CONFIG_MKISS=m
++CONFIG_6PACK=m
++CONFIG_BPQETHER=m
++CONFIG_BAYCOM_SER_FDX=m
++CONFIG_BAYCOM_SER_HDX=m
++CONFIG_BAYCOM_PAR=m
++CONFIG_YAM=m
++CONFIG_CAN=m
++CONFIG_CAN_RAW=m
++CONFIG_CAN_BCM=m
++CONFIG_CAN_GW=m
++
++#
++# CAN Device Drivers
++#
++CONFIG_CAN_VCAN=m
++CONFIG_CAN_SLCAN=m
++CONFIG_CAN_DEV=m
++CONFIG_CAN_CALC_BITTIMING=y
++CONFIG_CAN_LEDS=y
++CONFIG_CAN_JANZ_ICAN3=m
++CONFIG_CAN_FLEXCAN=m
++CONFIG_CAN_GRCAN=m
++CONFIG_CAN_MSCAN=m
++CONFIG_CAN_SJA1000=m
++CONFIG_CAN_SJA1000_ISA=m
++CONFIG_CAN_SJA1000_PLATFORM=m
++CONFIG_CAN_EMS_PCI=m
++CONFIG_CAN_PEAK_PCI=m
++CONFIG_CAN_PEAK_PCIEC=y
++CONFIG_CAN_KVASER_PCI=m
++CONFIG_CAN_PLX_PCI=m
++CONFIG_CAN_C_CAN=m
++CONFIG_CAN_C_CAN_PLATFORM=m
++CONFIG_CAN_C_CAN_PCI=m
++CONFIG_CAN_M_CAN=m
++CONFIG_CAN_CC770=m
++CONFIG_CAN_CC770_ISA=m
++CONFIG_CAN_CC770_PLATFORM=m
++
++#
++# CAN SPI interfaces
++#
++CONFIG_CAN_MCP251X=m
++
++#
++# CAN USB interfaces
++#
++CONFIG_CAN_EMS_USB=m
++CONFIG_CAN_ESD_USB2=m
++CONFIG_CAN_GS_USB=m
++CONFIG_CAN_KVASER_USB=m
++CONFIG_CAN_PEAK_USB=m
++CONFIG_CAN_8DEV_USB=m
++CONFIG_CAN_SOFTING=m
++# CONFIG_CAN_DEBUG_DEVICES is not set
++CONFIG_IRDA=m
++
++#
++# IrDA protocols
++#
++CONFIG_IRLAN=m
++CONFIG_IRNET=m
++CONFIG_IRCOMM=m
++CONFIG_IRDA_ULTRA=y
++
++#
++# IrDA options
++#
++CONFIG_IRDA_CACHE_LAST_LSAP=y
++CONFIG_IRDA_FAST_RR=y
++# CONFIG_IRDA_DEBUG is not set
++
++#
++# Infrared-port device drivers
++#
++
++#
++# SIR device drivers
++#
++CONFIG_IRTTY_SIR=m
++
++#
++# Dongle support
++#
++CONFIG_DONGLE=y
++CONFIG_ESI_DONGLE=m
++CONFIG_ACTISYS_DONGLE=m
++CONFIG_TEKRAM_DONGLE=m
++CONFIG_TOIM3232_DONGLE=m
++CONFIG_LITELINK_DONGLE=m
++CONFIG_MA600_DONGLE=m
++CONFIG_GIRBIL_DONGLE=m
++CONFIG_MCP2120_DONGLE=m
++CONFIG_OLD_BELKIN_DONGLE=m
++CONFIG_ACT200L_DONGLE=m
++CONFIG_KINGSUN_DONGLE=m
++CONFIG_KSDAZZLE_DONGLE=m
++CONFIG_KS959_DONGLE=m
++
++#
++# FIR device drivers
++#
++CONFIG_USB_IRDA=m
++CONFIG_SIGMATEL_FIR=m
++CONFIG_NSC_FIR=m
++CONFIG_WINBOND_FIR=m
++CONFIG_SMC_IRCC_FIR=m
++CONFIG_ALI_FIR=m
++CONFIG_VLSI_FIR=m
++CONFIG_VIA_FIR=m
++CONFIG_MCS_FIR=m
++CONFIG_BT=m
++CONFIG_BT_BREDR=y
++CONFIG_BT_RFCOMM=m
++CONFIG_BT_RFCOMM_TTY=y
++CONFIG_BT_BNEP=m
++CONFIG_BT_BNEP_MC_FILTER=y
++CONFIG_BT_BNEP_PROTO_FILTER=y
++CONFIG_BT_CMTP=m
++CONFIG_BT_HIDP=m
++CONFIG_BT_HS=y
++CONFIG_BT_LE=y
++CONFIG_BT_6LOWPAN=m
++# CONFIG_BT_SELFTEST is not set
++CONFIG_BT_DEBUGFS=y
++
++#
++# Bluetooth device drivers
++#
++CONFIG_BT_INTEL=m
++CONFIG_BT_BCM=m
++CONFIG_BT_RTL=m
++CONFIG_BT_QCA=m
++CONFIG_BT_HCIBTUSB=m
++CONFIG_BT_HCIBTUSB_BCM=y
++CONFIG_BT_HCIBTUSB_RTL=y
++CONFIG_BT_HCIBTSDIO=m
++CONFIG_BT_HCIUART=m
++CONFIG_BT_HCIUART_H4=y
++CONFIG_BT_HCIUART_BCSP=y
++CONFIG_BT_HCIUART_ATH3K=y
++CONFIG_BT_HCIUART_LL=y
++CONFIG_BT_HCIUART_3WIRE=y
++CONFIG_BT_HCIUART_INTEL=y
++CONFIG_BT_HCIUART_BCM=y
++CONFIG_BT_HCIUART_QCA=y
++CONFIG_BT_HCIBCM203X=m
++CONFIG_BT_HCIBPA10X=m
++CONFIG_BT_HCIBFUSB=m
++CONFIG_BT_HCIVHCI=m
++CONFIG_BT_MRVL=m
++CONFIG_BT_MRVL_SDIO=m
++CONFIG_BT_ATH3K=m
++CONFIG_BT_WILINK=m
++CONFIG_AF_RXRPC=m
++# CONFIG_AF_RXRPC_DEBUG is not set
++CONFIG_RXKAD=m
++CONFIG_FIB_RULES=y
++CONFIG_WIRELESS=y
++CONFIG_WIRELESS_EXT=y
++CONFIG_WEXT_CORE=y
++CONFIG_WEXT_PROC=y
++CONFIG_WEXT_SPY=y
++CONFIG_WEXT_PRIV=y
++CONFIG_CFG80211=m
++# CONFIG_NL80211_TESTMODE is not set
++# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
++# CONFIG_CFG80211_REG_DEBUG is not set
++# CONFIG_CFG80211_CERTIFICATION_ONUS is not set
++CONFIG_CFG80211_DEFAULT_PS=y
++CONFIG_CFG80211_DEBUGFS=y
++# CONFIG_CFG80211_INTERNAL_REGDB is not set
++CONFIG_CFG80211_CRDA_SUPPORT=y
++CONFIG_CFG80211_WEXT=y
++CONFIG_CFG80211_WEXT_EXPORT=y
++CONFIG_LIB80211=m
++CONFIG_LIB80211_CRYPT_WEP=m
++CONFIG_LIB80211_CRYPT_CCMP=m
++CONFIG_LIB80211_CRYPT_TKIP=m
++# CONFIG_LIB80211_DEBUG is not set
++CONFIG_MAC80211=m
++CONFIG_MAC80211_HAS_RC=y
++CONFIG_MAC80211_RC_MINSTREL=y
++CONFIG_MAC80211_RC_MINSTREL_HT=y
++CONFIG_MAC80211_RC_MINSTREL_VHT=y
++CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
++CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
++CONFIG_MAC80211_MESH=y
++CONFIG_MAC80211_LEDS=y
++CONFIG_MAC80211_DEBUGFS=y
++CONFIG_MAC80211_MESSAGE_TRACING=y
++# CONFIG_MAC80211_DEBUG_MENU is not set
++CONFIG_MAC80211_STA_HASH_MAX_SIZE=0
++CONFIG_WIMAX=m
++CONFIG_WIMAX_DEBUG_LEVEL=8
++CONFIG_RFKILL=y
++CONFIG_RFKILL_LEDS=y
++CONFIG_RFKILL_INPUT=y
++CONFIG_RFKILL_REGULATOR=m
++CONFIG_RFKILL_GPIO=m
++CONFIG_NET_9P=m
++CONFIG_NET_9P_VIRTIO=m
++CONFIG_NET_9P_RDMA=m
++# CONFIG_NET_9P_DEBUG is not set
++CONFIG_CAIF=m
++# CONFIG_CAIF_DEBUG is not set
++CONFIG_CAIF_NETDEV=m
++CONFIG_CAIF_USB=m
++CONFIG_CEPH_LIB=m
++# CONFIG_CEPH_LIB_PRETTYDEBUG is not set
++CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y
++CONFIG_NFC=m
++CONFIG_NFC_DIGITAL=m
++CONFIG_NFC_NCI=m
++CONFIG_NFC_NCI_SPI=m
++CONFIG_NFC_NCI_UART=m
++CONFIG_NFC_HCI=m
++CONFIG_NFC_SHDLC=y
++
++#
++# Near Field Communication (NFC) devices
++#
++CONFIG_NFC_PN533=m
++CONFIG_NFC_WILINK=m
++CONFIG_NFC_TRF7970A=m
++CONFIG_NFC_SIM=m
++CONFIG_NFC_PORT100=m
++CONFIG_NFC_FDP=m
++CONFIG_NFC_FDP_I2C=m
++CONFIG_NFC_PN544=m
++CONFIG_NFC_PN544_I2C=m
++CONFIG_NFC_MICROREAD=m
++CONFIG_NFC_MICROREAD_I2C=m
++CONFIG_NFC_MRVL=m
++CONFIG_NFC_MRVL_USB=m
++CONFIG_NFC_MRVL_UART=m
++CONFIG_NFC_MRVL_I2C=m
++CONFIG_NFC_MRVL_SPI=m
++CONFIG_NFC_ST21NFCA=m
++CONFIG_NFC_ST21NFCA_I2C=m
++CONFIG_NFC_ST_NCI=m
++CONFIG_NFC_ST_NCI_I2C=m
++CONFIG_NFC_ST_NCI_SPI=m
++CONFIG_NFC_NXP_NCI=m
++CONFIG_NFC_NXP_NCI_I2C=m
++CONFIG_NFC_S3FWRN5=m
++CONFIG_NFC_S3FWRN5_I2C=m
++CONFIG_LWTUNNEL=y
++CONFIG_HAVE_BPF_JIT=y
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER=y
++CONFIG_UEVENT_HELPER_PATH=""
++CONFIG_DEVTMPFS=y
++CONFIG_DEVTMPFS_MOUNT=y
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=y
++CONFIG_FIRMWARE_IN_KERNEL=y
++CONFIG_EXTRA_FIRMWARE=""
++CONFIG_FW_LOADER_USER_HELPER=y
++# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set
++CONFIG_WANT_DEV_COREDUMP=y
++CONFIG_ALLOW_DEV_COREDUMP=y
++CONFIG_DEV_COREDUMP=y
++# CONFIG_DEBUG_DRIVER is not set
++# CONFIG_DEBUG_DEVRES is not set
++# CONFIG_SYS_HYPERVISOR is not set
++# CONFIG_GENERIC_CPU_DEVICES is not set
++CONFIG_REGMAP=y
++CONFIG_REGMAP_I2C=y
++CONFIG_REGMAP_SPI=y
++CONFIG_REGMAP_SPMI=m
++CONFIG_REGMAP_MMIO=y
++CONFIG_REGMAP_IRQ=y
++CONFIG_DMA_SHARED_BUFFER=y
++# CONFIG_FENCE_TRACE is not set
++
++#
++# Bus devices
++#
++CONFIG_CONNECTOR=y
++CONFIG_PROC_EVENTS=y
++CONFIG_MTD=m
++# CONFIG_MTD_TESTS is not set
++CONFIG_MTD_REDBOOT_PARTS=m
++CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
++# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
++# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
++CONFIG_MTD_CMDLINE_PARTS=m
++CONFIG_MTD_OF_PARTS=m
++CONFIG_MTD_AR7_PARTS=m
++
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_BLKDEVS=m
++CONFIG_MTD_BLOCK=m
++CONFIG_MTD_BLOCK_RO=m
++CONFIG_FTL=m
++CONFIG_NFTL=m
++CONFIG_NFTL_RW=y
++CONFIG_INFTL=m
++CONFIG_RFD_FTL=m
++CONFIG_SSFDC=m
++CONFIG_SM_FTL=m
++CONFIG_MTD_OOPS=m
++CONFIG_MTD_SWAP=m
++# CONFIG_MTD_PARTITIONED_MASTER is not set
++
++#
++# RAM/ROM/Flash chip drivers
++#
++CONFIG_MTD_CFI=m
++CONFIG_MTD_JEDECPROBE=m
++CONFIG_MTD_GEN_PROBE=m
++# CONFIG_MTD_CFI_ADV_OPTIONS is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++CONFIG_MTD_CFI_INTELEXT=m
++CONFIG_MTD_CFI_AMDSTD=m
++CONFIG_MTD_CFI_STAA=m
++CONFIG_MTD_CFI_UTIL=m
++CONFIG_MTD_RAM=m
++CONFIG_MTD_ROM=m
++CONFIG_MTD_ABSENT=m
++
++#
++# Mapping drivers for chip access
++#
++CONFIG_MTD_COMPLEX_MAPPINGS=y
++CONFIG_MTD_PHYSMAP=m
++# CONFIG_MTD_PHYSMAP_COMPAT is not set
++CONFIG_MTD_PHYSMAP_OF=m
++CONFIG_MTD_PCI=m
++CONFIG_MTD_GPIO_ADDR=m
++CONFIG_MTD_INTEL_VR_NOR=m
++CONFIG_MTD_PLATRAM=m
++CONFIG_MTD_LATCH_ADDR=m
++
++#
++# Self-contained MTD device drivers
++#
++CONFIG_MTD_PMC551=m
++# CONFIG_MTD_PMC551_BUGFIX is not set
++# CONFIG_MTD_PMC551_DEBUG is not set
++CONFIG_MTD_DATAFLASH=m
++# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
++CONFIG_MTD_DATAFLASH_OTP=y
++CONFIG_MTD_M25P80=m
++CONFIG_MTD_SST25L=m
++CONFIG_MTD_SLRAM=m
++CONFIG_MTD_PHRAM=m
++CONFIG_MTD_MTDRAM=m
++CONFIG_MTDRAM_TOTAL_SIZE=4096
++CONFIG_MTDRAM_ERASE_SIZE=128
++CONFIG_MTD_BLOCK2MTD=m
++CONFIG_MTD_POWERNV_FLASH=m
++
++#
++# Disk-On-Chip Device Drivers
++#
++CONFIG_MTD_DOCG3=m
++CONFIG_BCH_CONST_M=14
++CONFIG_BCH_CONST_T=4
++CONFIG_MTD_NAND_ECC=m
++# CONFIG_MTD_NAND_ECC_SMC is not set
++CONFIG_MTD_NAND=m
++CONFIG_MTD_NAND_BCH=m
++CONFIG_MTD_NAND_ECC_BCH=y
++CONFIG_MTD_SM_COMMON=m
++CONFIG_MTD_NAND_DENALI=m
++CONFIG_MTD_NAND_DENALI_PCI=m
++CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR=0xFF108018
++CONFIG_MTD_NAND_GPIO=m
++# CONFIG_MTD_NAND_OMAP_BCH_BUILD is not set
++CONFIG_MTD_NAND_IDS=m
++CONFIG_MTD_NAND_RICOH=m
++CONFIG_MTD_NAND_DISKONCHIP=m
++# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
++CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
++# CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE is not set
++CONFIG_MTD_NAND_DOCG4=m
++CONFIG_MTD_NAND_CAFE=m
++CONFIG_MTD_NAND_NANDSIM=m
++CONFIG_MTD_NAND_PLATFORM=m
++CONFIG_MTD_NAND_FSL_ELBC=m
++CONFIG_MTD_NAND_HISI504=m
++CONFIG_MTD_ONENAND=m
++CONFIG_MTD_ONENAND_VERIFY_WRITE=y
++CONFIG_MTD_ONENAND_GENERIC=m
++# CONFIG_MTD_ONENAND_OTP is not set
++CONFIG_MTD_ONENAND_2X_PROGRAM=y
++
++#
++# LPDDR & LPDDR2 PCM memory drivers
++#
++CONFIG_MTD_LPDDR=m
++CONFIG_MTD_QINFO_PROBE=m
++CONFIG_MTD_SPI_NOR=m
++CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y
++CONFIG_MTD_UBI=m
++CONFIG_MTD_UBI_WL_THRESHOLD=4096
++CONFIG_MTD_UBI_BEB_LIMIT=20
++CONFIG_MTD_UBI_FASTMAP=y
++CONFIG_MTD_UBI_GLUEBI=m
++CONFIG_MTD_UBI_BLOCK=y
++CONFIG_DTC=y
++CONFIG_OF=y
++# CONFIG_OF_UNITTEST is not set
++CONFIG_OF_FLATTREE=y
++CONFIG_OF_EARLY_FLATTREE=y
++CONFIG_OF_DYNAMIC=y
++CONFIG_OF_ADDRESS=y
++CONFIG_OF_ADDRESS_PCI=y
++CONFIG_OF_IRQ=y
++CONFIG_OF_NET=y
++CONFIG_OF_MDIO=y
++CONFIG_OF_PCI=y
++CONFIG_OF_PCI_IRQ=y
++CONFIG_OF_MTD=y
++CONFIG_OF_RESERVED_MEM=y
++CONFIG_OF_RESOLVE=y
++CONFIG_OF_OVERLAY=y
++CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y
++CONFIG_PARPORT=m
++CONFIG_PARPORT_PC=m
++CONFIG_PARPORT_SERIAL=m
++CONFIG_PARPORT_PC_FIFO=y
++# CONFIG_PARPORT_PC_SUPERIO is not set
++# CONFIG_PARPORT_GSC is not set
++CONFIG_PARPORT_AX88796=m
++CONFIG_PARPORT_1284=y
++CONFIG_PARPORT_NOT_PC=y
++CONFIG_BLK_DEV=y
++CONFIG_BLK_DEV_NULL_BLK=m
++CONFIG_BLK_DEV_FD=m
++CONFIG_PARIDE=m
++
++#
++# Parallel IDE high-level drivers
++#
++CONFIG_PARIDE_PD=m
++CONFIG_PARIDE_PCD=m
++CONFIG_PARIDE_PF=m
++CONFIG_PARIDE_PT=m
++CONFIG_PARIDE_PG=m
++
++#
++# Parallel IDE protocol modules
++#
++CONFIG_PARIDE_ATEN=m
++CONFIG_PARIDE_BPCK=m
++CONFIG_PARIDE_COMM=m
++CONFIG_PARIDE_DSTR=m
++CONFIG_PARIDE_FIT2=m
++CONFIG_PARIDE_FIT3=m
++CONFIG_PARIDE_EPAT=m
++CONFIG_PARIDE_EPATC8=y
++CONFIG_PARIDE_EPIA=m
++CONFIG_PARIDE_FRIQ=m
++CONFIG_PARIDE_FRPW=m
++CONFIG_PARIDE_KBIC=m
++CONFIG_PARIDE_KTTI=m
++CONFIG_PARIDE_ON20=m
++CONFIG_PARIDE_ON26=m
++CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m
++CONFIG_ZRAM=m
++CONFIG_ZRAM_LZ4_COMPRESS=y
++CONFIG_BLK_CPQ_CISS_DA=m
++CONFIG_CISS_SCSI_TAPE=y
++CONFIG_BLK_DEV_DAC960=m
++CONFIG_BLK_DEV_UMEM=m
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
++CONFIG_BLK_DEV_CRYPTOLOOP=m
++CONFIG_BLK_DEV_DRBD=m
++# CONFIG_DRBD_FAULT_INJECTION is not set
++CONFIG_BLK_DEV_NBD=m
++CONFIG_BLK_DEV_SKD=m
++CONFIG_BLK_DEV_OSD=m
++CONFIG_BLK_DEV_SX8=m
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=65536
++CONFIG_BLK_DEV_RAM_DAX=y
++CONFIG_CDROM_PKTCDVD=m
++CONFIG_CDROM_PKTCDVD_BUFFERS=8
++# CONFIG_CDROM_PKTCDVD_WCACHE is not set
++CONFIG_ATA_OVER_ETH=m
++CONFIG_VIRTIO_BLK=y
++# CONFIG_BLK_DEV_HD is not set
++CONFIG_BLK_DEV_RBD=m
++CONFIG_BLK_DEV_RSXX=m
++CONFIG_BLK_DEV_NVME=m
++
++#
++# Misc devices
++#
++CONFIG_SENSORS_LIS3LV02D=m
++CONFIG_AD525X_DPOT=m
++CONFIG_AD525X_DPOT_I2C=m
++CONFIG_AD525X_DPOT_SPI=m
++CONFIG_DUMMY_IRQ=m
++CONFIG_PHANTOM=m
++CONFIG_SGI_IOC4=m
++CONFIG_TIFM_CORE=m
++CONFIG_TIFM_7XX1=m
++CONFIG_ICS932S401=m
++CONFIG_ENCLOSURE_SERVICES=m
++CONFIG_HP_ILO=m
++CONFIG_APDS9802ALS=m
++CONFIG_ISL29003=m
++CONFIG_ISL29020=m
++CONFIG_SENSORS_TSL2550=m
++CONFIG_SENSORS_BH1780=m
++CONFIG_SENSORS_BH1770=m
++CONFIG_SENSORS_APDS990X=m
++CONFIG_HMC6352=m
++CONFIG_DS1682=m
++CONFIG_TI_DAC7512=m
++CONFIG_BMP085=y
++CONFIG_BMP085_I2C=m
++CONFIG_BMP085_SPI=m
++CONFIG_USB_SWITCH_FSA9480=m
++CONFIG_LATTICE_ECP3_CONFIG=m
++CONFIG_SRAM=y
++CONFIG_C2PORT=m
++
++#
++# EEPROM support
++#
++CONFIG_EEPROM_AT24=m
++CONFIG_EEPROM_AT25=m
++CONFIG_EEPROM_LEGACY=m
++CONFIG_EEPROM_MAX6875=m
++CONFIG_EEPROM_93CX6=m
++CONFIG_EEPROM_93XX46=m
++CONFIG_CB710_CORE=m
++# CONFIG_CB710_DEBUG is not set
++CONFIG_CB710_DEBUG_ASSUMPTIONS=y
++
++#
++# Texas Instruments shared transport line discipline
++#
++CONFIG_TI_ST=m
++CONFIG_SENSORS_LIS3_SPI=m
++CONFIG_SENSORS_LIS3_I2C=m
++
++#
++# Altera FPGA firmware download module
++#
++CONFIG_ALTERA_STAPL=m
++
++#
++# Intel MIC Bus Driver
++#
++
++#
++# SCIF Bus Driver
++#
++
++#
++# Intel MIC Host Driver
++#
++
++#
++# Intel MIC Card Driver
++#
++
++#
++# SCIF Driver
++#
++
++#
++# Intel MIC Coprocessor State Management (COSM) Drivers
++#
++CONFIG_GENWQE=m
++CONFIG_GENWQE_PLATFORM_ERROR_RECOVERY=0
++CONFIG_ECHO=m
++CONFIG_CXL_BASE=y
++CONFIG_CXL_KERNEL_API=y
++CONFIG_CXL_EEH=y
++CONFIG_CXL=m
++CONFIG_HAVE_IDE=y
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++CONFIG_SCSI_MOD=y
++CONFIG_RAID_ATTRS=m
++CONFIG_SCSI=y
++CONFIG_SCSI_DMA=y
++CONFIG_SCSI_NETLINK=y
++# CONFIG_SCSI_MQ_DEFAULT is not set
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++CONFIG_CHR_DEV_ST=m
++CONFIG_CHR_DEV_OSST=m
++CONFIG_BLK_DEV_SR=y
++# CONFIG_BLK_DEV_SR_VENDOR is not set
++CONFIG_CHR_DEV_SG=y
++CONFIG_CHR_DEV_SCH=m
++CONFIG_SCSI_ENCLOSURE=m
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++CONFIG_SCSI_SCAN_ASYNC=y
++
++#
++# SCSI Transports
++#
++CONFIG_SCSI_SPI_ATTRS=m
++CONFIG_SCSI_FC_ATTRS=m
++CONFIG_SCSI_ISCSI_ATTRS=m
++CONFIG_SCSI_SAS_ATTRS=m
++CONFIG_SCSI_SAS_LIBSAS=m
++CONFIG_SCSI_SAS_ATA=y
++CONFIG_SCSI_SAS_HOST_SMP=y
++CONFIG_SCSI_SRP_ATTRS=y
++CONFIG_SCSI_LOWLEVEL=y
++CONFIG_ISCSI_TCP=m
++CONFIG_ISCSI_BOOT_SYSFS=m
++CONFIG_SCSI_CXGB3_ISCSI=m
++CONFIG_SCSI_CXGB4_ISCSI=m
++CONFIG_SCSI_BNX2_ISCSI=m
++CONFIG_SCSI_BNX2X_FCOE=m
++CONFIG_BE2ISCSI=m
++CONFIG_CXLFLASH=m
++CONFIG_BLK_DEV_3W_XXXX_RAID=m
++CONFIG_SCSI_HPSA=m
++CONFIG_SCSI_3W_9XXX=m
++CONFIG_SCSI_3W_SAS=m
++CONFIG_SCSI_ACARD=m
++CONFIG_SCSI_AACRAID=m
++CONFIG_SCSI_AIC7XXX=m
++CONFIG_AIC7XXX_CMDS_PER_DEVICE=8
++CONFIG_AIC7XXX_RESET_DELAY_MS=5000
++# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
++CONFIG_AIC7XXX_DEBUG_MASK=0
++CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
++CONFIG_SCSI_AIC79XX=m
++CONFIG_AIC79XX_CMDS_PER_DEVICE=32
++CONFIG_AIC79XX_RESET_DELAY_MS=5000
++# CONFIG_AIC79XX_DEBUG_ENABLE is not set
++CONFIG_AIC79XX_DEBUG_MASK=0
++CONFIG_AIC79XX_REG_PRETTY_PRINT=y
++CONFIG_SCSI_AIC94XX=m
++# CONFIG_AIC94XX_DEBUG is not set
++CONFIG_SCSI_MVSAS=m
++# CONFIG_SCSI_MVSAS_DEBUG is not set
++# CONFIG_SCSI_MVSAS_TASKLET is not set
++CONFIG_SCSI_MVUMI=m
++CONFIG_SCSI_ADVANSYS=m
++CONFIG_SCSI_ARCMSR=m
++CONFIG_SCSI_ESAS2R=m
++CONFIG_MEGARAID_NEWGEN=y
++CONFIG_MEGARAID_MM=m
++CONFIG_MEGARAID_MAILBOX=m
++CONFIG_MEGARAID_LEGACY=m
++CONFIG_MEGARAID_SAS=m
++CONFIG_SCSI_MPT3SAS=m
++CONFIG_SCSI_MPT2SAS_MAX_SGE=128
++CONFIG_SCSI_MPT3SAS_MAX_SGE=128
++CONFIG_SCSI_MPT2SAS=m
++CONFIG_SCSI_UFSHCD=m
++CONFIG_SCSI_UFSHCD_PCI=m
++CONFIG_SCSI_UFSHCD_PLATFORM=m
++CONFIG_SCSI_HPTIOP=m
++CONFIG_LIBFC=m
++CONFIG_LIBFCOE=m
++CONFIG_FCOE=m
++CONFIG_SCSI_SNIC=m
++# CONFIG_SCSI_SNIC_DEBUG_FS is not set
++CONFIG_SCSI_DMX3191D=m
++CONFIG_SCSI_EATA=m
++CONFIG_SCSI_EATA_TAGGED_QUEUE=y
++CONFIG_SCSI_EATA_LINKED_COMMANDS=y
++CONFIG_SCSI_EATA_MAX_TAGS=16
++CONFIG_SCSI_FUTURE_DOMAIN=m
++CONFIG_SCSI_GDTH=m
++CONFIG_SCSI_IPS=m
++CONFIG_SCSI_IBMVSCSI=m
++CONFIG_SCSI_IBMVFC=m
++CONFIG_SCSI_IBMVFC_TRACE=y
++CONFIG_SCSI_INITIO=m
++CONFIG_SCSI_INIA100=m
++CONFIG_SCSI_PPA=m
++CONFIG_SCSI_IMM=m
++# CONFIG_SCSI_IZIP_EPP16 is not set
++# CONFIG_SCSI_IZIP_SLOW_CTR is not set
++CONFIG_SCSI_STEX=m
++CONFIG_SCSI_SYM53C8XX_2=m
++CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
++CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
++CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
++CONFIG_SCSI_SYM53C8XX_MMIO=y
++CONFIG_SCSI_IPR=m
++CONFIG_SCSI_IPR_TRACE=y
++CONFIG_SCSI_IPR_DUMP=y
++CONFIG_SCSI_QLOGIC_1280=m
++CONFIG_SCSI_QLA_FC=m
++CONFIG_TCM_QLA2XXX=m
++CONFIG_SCSI_QLA_ISCSI=m
++CONFIG_SCSI_LPFC=m
++# CONFIG_SCSI_LPFC_DEBUG_FS is not set
++CONFIG_SCSI_DC395x=m
++CONFIG_SCSI_AM53C974=m
++CONFIG_SCSI_WD719X=m
++CONFIG_SCSI_DEBUG=m
++CONFIG_SCSI_PMCRAID=m
++CONFIG_SCSI_PM8001=m
++CONFIG_SCSI_BFA_FC=m
++CONFIG_SCSI_VIRTIO=m
++CONFIG_SCSI_CHELSIO_FCOE=m
++CONFIG_SCSI_DH=y
++CONFIG_SCSI_DH_RDAC=m
++CONFIG_SCSI_DH_HP_SW=m
++CONFIG_SCSI_DH_EMC=m
++CONFIG_SCSI_DH_ALUA=m
++CONFIG_SCSI_OSD_INITIATOR=m
++CONFIG_SCSI_OSD_ULD=m
++CONFIG_SCSI_OSD_DPRINT_SENSE=1
++# CONFIG_SCSI_OSD_DEBUG is not set
++CONFIG_ATA=y
++# CONFIG_ATA_NONSTANDARD is not set
++CONFIG_ATA_VERBOSE_ERROR=y
++CONFIG_SATA_PMP=y
++
++#
++# Controllers with non-SFF native interface
++#
++CONFIG_SATA_AHCI=m
++CONFIG_SATA_AHCI_PLATFORM=m
++CONFIG_AHCI_CEVA=m
++CONFIG_AHCI_QORIQ=m
++CONFIG_SATA_INIC162X=m
++CONFIG_SATA_ACARD_AHCI=m
++CONFIG_SATA_SIL24=m
++CONFIG_ATA_SFF=y
++
++#
++# SFF controllers with custom DMA interface
++#
++CONFIG_PDC_ADMA=m
++CONFIG_SATA_QSTOR=m
++CONFIG_SATA_SX4=m
++CONFIG_ATA_BMDMA=y
++
++#
++# SATA SFF controllers with BMDMA
++#
++CONFIG_ATA_PIIX=m
++CONFIG_SATA_MV=m
++CONFIG_SATA_NV=m
++CONFIG_SATA_PROMISE=m
++CONFIG_SATA_SIL=m
++CONFIG_SATA_SIS=m
++CONFIG_SATA_SVW=m
++CONFIG_SATA_ULI=m
++CONFIG_SATA_VIA=m
++CONFIG_SATA_VITESSE=m
++
++#
++# PATA SFF controllers with BMDMA
++#
++CONFIG_PATA_ALI=m
++CONFIG_PATA_AMD=m
++CONFIG_PATA_ARTOP=m
++CONFIG_PATA_ATIIXP=m
++CONFIG_PATA_ATP867X=m
++CONFIG_PATA_CMD64X=m
++CONFIG_PATA_CYPRESS=m
++CONFIG_PATA_EFAR=m
++CONFIG_PATA_HPT366=m
++CONFIG_PATA_HPT37X=m
++CONFIG_PATA_HPT3X2N=m
++CONFIG_PATA_HPT3X3=m
++# CONFIG_PATA_HPT3X3_DMA is not set
++CONFIG_PATA_IT8213=m
++CONFIG_PATA_IT821X=m
++CONFIG_PATA_JMICRON=m
++CONFIG_PATA_MARVELL=m
++CONFIG_PATA_NETCELL=m
++CONFIG_PATA_NINJA32=m
++CONFIG_PATA_NS87415=m
++CONFIG_PATA_OLDPIIX=m
++CONFIG_PATA_OPTIDMA=m
++CONFIG_PATA_PDC2027X=m
++CONFIG_PATA_PDC_OLD=m
++CONFIG_PATA_RADISYS=m
++CONFIG_PATA_RDC=m
++CONFIG_PATA_SCH=m
++CONFIG_PATA_SERVERWORKS=m
++CONFIG_PATA_SIL680=m
++CONFIG_PATA_SIS=m
++CONFIG_PATA_TOSHIBA=m
++CONFIG_PATA_TRIFLEX=m
++CONFIG_PATA_VIA=m
++CONFIG_PATA_WINBOND=m
++
++#
++# PIO-only SFF controllers
++#
++CONFIG_PATA_CMD640_PCI=m
++CONFIG_PATA_MPIIX=m
++CONFIG_PATA_NS87410=m
++CONFIG_PATA_OPTI=m
++CONFIG_PATA_PLATFORM=m
++CONFIG_PATA_OF_PLATFORM=m
++CONFIG_PATA_RZ1000=m
++
++#
++# Generic fallback / legacy drivers
++#
++CONFIG_ATA_GENERIC=m
++CONFIG_PATA_LEGACY=m
++CONFIG_MD=y
++CONFIG_BLK_DEV_MD=y
++CONFIG_MD_AUTODETECT=y
++CONFIG_MD_LINEAR=m
++CONFIG_MD_RAID0=m
++CONFIG_MD_RAID1=m
++CONFIG_MD_RAID10=m
++CONFIG_MD_RAID456=m
++CONFIG_MD_MULTIPATH=m
++CONFIG_MD_FAULTY=m
++CONFIG_MD_CLUSTER=m
++CONFIG_BCACHE=m
++# CONFIG_BCACHE_DEBUG is not set
++# CONFIG_BCACHE_CLOSURES_DEBUG is not set
++CONFIG_BLK_DEV_DM_BUILTIN=y
++CONFIG_BLK_DEV_DM=y
++# CONFIG_DM_MQ_DEFAULT is not set
++# CONFIG_DM_DEBUG is not set
++CONFIG_DM_BUFIO=m
++CONFIG_DM_BIO_PRISON=m
++CONFIG_DM_PERSISTENT_DATA=m
++# CONFIG_DM_DEBUG_BLOCK_STACK_TRACING is not set
++CONFIG_DM_CRYPT=m
++CONFIG_DM_SNAPSHOT=m
++CONFIG_DM_THIN_PROVISIONING=m
++CONFIG_DM_CACHE=m
++CONFIG_DM_CACHE_MQ=m
++CONFIG_DM_CACHE_SMQ=m
++CONFIG_DM_CACHE_CLEANER=m
++CONFIG_DM_ERA=m
++CONFIG_DM_MIRROR=m
++CONFIG_DM_LOG_USERSPACE=m
++CONFIG_DM_RAID=m
++CONFIG_DM_ZERO=m
++CONFIG_DM_MULTIPATH=m
++CONFIG_DM_MULTIPATH_QL=m
++CONFIG_DM_MULTIPATH_ST=m
++CONFIG_DM_DELAY=m
++CONFIG_DM_UEVENT=y
++CONFIG_DM_FLAKEY=m
++CONFIG_DM_VERITY=m
++CONFIG_DM_SWITCH=m
++CONFIG_DM_LOG_WRITES=m
++CONFIG_TARGET_CORE=m
++CONFIG_TCM_IBLOCK=m
++CONFIG_TCM_FILEIO=m
++CONFIG_TCM_PSCSI=m
++CONFIG_TCM_USER2=m
++CONFIG_LOOPBACK_TARGET=m
++CONFIG_TCM_FC=m
++CONFIG_ISCSI_TARGET=m
++CONFIG_SBP_TARGET=m
++CONFIG_FUSION=y
++CONFIG_FUSION_SPI=m
++CONFIG_FUSION_FC=m
++CONFIG_FUSION_SAS=m
++CONFIG_FUSION_MAX_SGE=128
++CONFIG_FUSION_CTL=m
++CONFIG_FUSION_LAN=m
++CONFIG_FUSION_LOGGING=y
++
++#
++# IEEE 1394 (FireWire) support
++#
++CONFIG_FIREWIRE=m
++CONFIG_FIREWIRE_OHCI=m
++CONFIG_FIREWIRE_SBP2=m
++CONFIG_FIREWIRE_NET=m
++CONFIG_FIREWIRE_NOSY=m
++CONFIG_MACINTOSH_DRIVERS=y
++CONFIG_MAC_EMUMOUSEBTN=m
++CONFIG_WINDFARM=m
++CONFIG_NETDEVICES=y
++CONFIG_MII=m
++CONFIG_NET_CORE=y
++CONFIG_BONDING=m
++CONFIG_DUMMY=m
++CONFIG_EQUALIZER=m
++CONFIG_NET_FC=y
++CONFIG_IFB=m
++CONFIG_NET_TEAM=m
++CONFIG_NET_TEAM_MODE_BROADCAST=m
++CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
++CONFIG_NET_TEAM_MODE_RANDOM=m
++CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
++CONFIG_NET_TEAM_MODE_LOADBALANCE=m
++CONFIG_MACVLAN=m
++CONFIG_MACVTAP=m
++CONFIG_IPVLAN=m
++CONFIG_VXLAN=m
++CONFIG_GENEVE=m
++CONFIG_NETCONSOLE=m
++CONFIG_NETCONSOLE_DYNAMIC=y
++CONFIG_NETPOLL=y
++CONFIG_NET_POLL_CONTROLLER=y
++CONFIG_NTB_NETDEV=m
++CONFIG_RIONET=m
++CONFIG_RIONET_TX_SIZE=128
++CONFIG_RIONET_RX_SIZE=128
++CONFIG_TUN=y
++# CONFIG_TUN_VNET_CROSS_LE is not set
++CONFIG_VETH=m
++CONFIG_VIRTIO_NET=y
++CONFIG_NLMON=m
++CONFIG_NET_VRF=m
++CONFIG_SUNGEM_PHY=m
++CONFIG_ARCNET=m
++CONFIG_ARCNET_1201=m
++CONFIG_ARCNET_1051=m
++CONFIG_ARCNET_RAW=m
++CONFIG_ARCNET_CAP=m
++CONFIG_ARCNET_COM90xx=m
++CONFIG_ARCNET_COM90xxIO=m
++CONFIG_ARCNET_RIM_I=m
++CONFIG_ARCNET_COM20020=m
++CONFIG_ARCNET_COM20020_PCI=m
++CONFIG_ATM_DRIVERS=y
++CONFIG_ATM_DUMMY=m
++CONFIG_ATM_TCP=m
++CONFIG_ATM_LANAI=m
++CONFIG_ATM_ENI=m
++# CONFIG_ATM_ENI_DEBUG is not set
++# CONFIG_ATM_ENI_TUNE_BURST is not set
++CONFIG_ATM_NICSTAR=m
++# CONFIG_ATM_NICSTAR_USE_SUNI is not set
++# CONFIG_ATM_NICSTAR_USE_IDT77105 is not set
++CONFIG_ATM_IDT77252=m
++# CONFIG_ATM_IDT77252_DEBUG is not set
++# CONFIG_ATM_IDT77252_RCV_ALL is not set
++CONFIG_ATM_IDT77252_USE_SUNI=y
++CONFIG_ATM_IA=m
++# CONFIG_ATM_IA_DEBUG is not set
++CONFIG_ATM_FORE200E=m
++# CONFIG_ATM_FORE200E_USE_TASKLET is not set
++CONFIG_ATM_FORE200E_TX_RETRY=16
++CONFIG_ATM_FORE200E_DEBUG=0
++CONFIG_ATM_HE=m
++CONFIG_ATM_HE_USE_SUNI=y
++CONFIG_ATM_SOLOS=m
++
++#
++# CAIF transport drivers
++#
++CONFIG_CAIF_TTY=m
++CONFIG_CAIF_SPI_SLAVE=m
++# CONFIG_CAIF_SPI_SYNC is not set
++CONFIG_CAIF_HSI=m
++CONFIG_CAIF_VIRTIO=m
++CONFIG_VHOST_NET=m
++CONFIG_VHOST_SCSI=m
++CONFIG_VHOST_RING=m
++CONFIG_VHOST=m
++# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set
++
++#
++# Distributed Switch Architecture drivers
++#
++# CONFIG_NET_DSA_MV88E6XXX is not set
++# CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set
++CONFIG_ETHERNET=y
++CONFIG_MDIO=m
++CONFIG_NET_VENDOR_3COM=y
++CONFIG_VORTEX=m
++CONFIG_TYPHOON=m
++CONFIG_NET_VENDOR_ADAPTEC=y
++CONFIG_ADAPTEC_STARFIRE=m
++CONFIG_NET_VENDOR_AGERE=y
++CONFIG_ET131X=m
++CONFIG_NET_VENDOR_ALTEON=y
++CONFIG_ACENIC=m
++# CONFIG_ACENIC_OMIT_TIGON_I is not set
++CONFIG_ALTERA_TSE=m
++CONFIG_NET_VENDOR_AMD=y
++CONFIG_AMD8111_ETH=m
++CONFIG_PCNET32=m
++CONFIG_NET_VENDOR_ARC=y
++CONFIG_ARC_EMAC_CORE=m
++CONFIG_ARC_EMAC=m
++CONFIG_EMAC_ROCKCHIP=m
++CONFIG_NET_VENDOR_ATHEROS=y
++CONFIG_ATL2=m
++CONFIG_ATL1=m
++CONFIG_ATL1E=m
++CONFIG_ATL1C=m
++CONFIG_ALX=m
++CONFIG_NET_VENDOR_AURORA=y
++CONFIG_AURORA_NB8800=m
++CONFIG_NET_CADENCE=y
++CONFIG_MACB=m
++CONFIG_NET_VENDOR_BROADCOM=y
++CONFIG_B44=m
++CONFIG_B44_PCI_AUTOSELECT=y
++CONFIG_B44_PCICORE_AUTOSELECT=y
++CONFIG_B44_PCI=y
++CONFIG_BCMGENET=m
++CONFIG_BNX2=m
++CONFIG_CNIC=m
++CONFIG_TIGON3=y
++CONFIG_BNX2X=m
++CONFIG_BNX2X_SRIOV=y
++CONFIG_BNX2X_VXLAN=y
++CONFIG_SYSTEMPORT=m
++CONFIG_BNXT=m
++CONFIG_BNXT_SRIOV=y
++CONFIG_NET_VENDOR_BROCADE=y
++CONFIG_BNA=m
++CONFIG_NET_VENDOR_CAVIUM=y
++CONFIG_THUNDER_NIC_PF=m
++CONFIG_THUNDER_NIC_VF=m
++CONFIG_THUNDER_NIC_BGX=m
++CONFIG_LIQUIDIO=m
++CONFIG_NET_VENDOR_CHELSIO=y
++CONFIG_CHELSIO_T1=m
++CONFIG_CHELSIO_T1_1G=y
++CONFIG_CHELSIO_T3=m
++CONFIG_CHELSIO_T4=m
++CONFIG_CHELSIO_T4_DCB=y
++CONFIG_CHELSIO_T4_FCOE=y
++CONFIG_CHELSIO_T4VF=m
++CONFIG_NET_VENDOR_CISCO=y
++CONFIG_ENIC=m
++CONFIG_DNET=m
++CONFIG_NET_VENDOR_DEC=y
++CONFIG_NET_TULIP=y
++CONFIG_DE2104X=m
++CONFIG_DE2104X_DSL=0
++CONFIG_TULIP=m
++# CONFIG_TULIP_MWI is not set
++# CONFIG_TULIP_MMIO is not set
++# CONFIG_TULIP_NAPI is not set
++CONFIG_DE4X5=m
++CONFIG_WINBOND_840=m
++CONFIG_DM9102=m
++CONFIG_ULI526X=m
++CONFIG_NET_VENDOR_DLINK=y
++CONFIG_DL2K=m
++CONFIG_SUNDANCE=m
++# CONFIG_SUNDANCE_MMIO is not set
++CONFIG_NET_VENDOR_EMULEX=y
++CONFIG_BE2NET=m
++CONFIG_BE2NET_HWMON=y
++CONFIG_BE2NET_VXLAN=y
++CONFIG_NET_VENDOR_EZCHIP=y
++CONFIG_EZCHIP_NPS_MANAGEMENT_ENET=m
++CONFIG_NET_VENDOR_EXAR=y
++CONFIG_S2IO=m
++CONFIG_VXGE=m
++# CONFIG_VXGE_DEBUG_TRACE_ALL is not set
++CONFIG_NET_VENDOR_HP=y
++CONFIG_HP100=m
++CONFIG_NET_VENDOR_IBM=y
++CONFIG_IBMVETH=m
++# CONFIG_IBM_EMAC_ZMII is not set
++# CONFIG_IBM_EMAC_RGMII is not set
++# CONFIG_IBM_EMAC_TAH is not set
++# CONFIG_IBM_EMAC_EMAC4 is not set
++# CONFIG_IBM_EMAC_NO_FLOW_CTRL is not set
++# CONFIG_IBM_EMAC_MAL_CLR_ICINTSTAT is not set
++# CONFIG_IBM_EMAC_MAL_COMMON_ERR is not set
++CONFIG_EHEA=y
++CONFIG_NET_VENDOR_INTEL=y
++CONFIG_E100=m
++CONFIG_E1000=m
++CONFIG_E1000E=m
++CONFIG_IGB=m
++CONFIG_IGB_HWMON=y
++CONFIG_IGBVF=m
++CONFIG_IXGB=m
++CONFIG_IXGBE=m
++CONFIG_IXGBE_VXLAN=y
++CONFIG_IXGBE_HWMON=y
++CONFIG_IXGBE_DCB=y
++CONFIG_IXGBEVF=m
++CONFIG_I40E=m
++CONFIG_I40E_VXLAN=y
++CONFIG_I40E_DCB=y
++CONFIG_I40E_FCOE=y
++CONFIG_I40EVF=m
++CONFIG_FM10K=m
++CONFIG_FM10K_VXLAN=y
++CONFIG_NET_VENDOR_I825XX=y
++CONFIG_JME=m
++CONFIG_NET_VENDOR_MARVELL=y
++CONFIG_MVMDIO=m
++CONFIG_SKGE=m
++# CONFIG_SKGE_DEBUG is not set
++CONFIG_SKGE_GENESIS=y
++CONFIG_SKY2=m
++# CONFIG_SKY2_DEBUG is not set
++CONFIG_NET_VENDOR_MELLANOX=y
++CONFIG_MLX4_EN=m
++CONFIG_MLX4_EN_DCB=y
++CONFIG_MLX4_EN_VXLAN=y
++CONFIG_MLX4_CORE=m
++CONFIG_MLX4_DEBUG=y
++CONFIG_MLX5_CORE=m
++CONFIG_MLX5_CORE_EN=y
++CONFIG_MLXSW_CORE=m
++CONFIG_MLXSW_PCI=m
++CONFIG_NET_VENDOR_MICREL=y
++CONFIG_KS8842=m
++CONFIG_KS8851=m
++CONFIG_KS8851_MLL=m
++CONFIG_KSZ884X_PCI=m
++CONFIG_NET_VENDOR_MICROCHIP=y
++CONFIG_ENC28J60=m
++# CONFIG_ENC28J60_WRITEVERIFY is not set
++CONFIG_ENCX24J600=m
++CONFIG_NET_VENDOR_MYRI=y
++CONFIG_MYRI10GE=m
++CONFIG_FEALNX=m
++CONFIG_NET_VENDOR_NATSEMI=y
++CONFIG_NATSEMI=m
++CONFIG_NS83820=m
++CONFIG_NET_VENDOR_8390=y
++CONFIG_NE2K_PCI=m
++CONFIG_NET_VENDOR_NVIDIA=y
++CONFIG_FORCEDETH=m
++CONFIG_NET_VENDOR_OKI=y
++CONFIG_ETHOC=m
++CONFIG_NET_PACKET_ENGINE=y
++CONFIG_HAMACHI=m
++CONFIG_YELLOWFIN=m
++CONFIG_NET_VENDOR_QLOGIC=y
++CONFIG_QLA3XXX=m
++CONFIG_QLCNIC=m
++CONFIG_QLCNIC_SRIOV=y
++CONFIG_QLCNIC_DCB=y
++CONFIG_QLCNIC_VXLAN=y
++CONFIG_QLCNIC_HWMON=y
++CONFIG_QLGE=m
++CONFIG_NETXEN_NIC=m
++CONFIG_QED=m
++CONFIG_QEDE=m
++CONFIG_NET_VENDOR_QUALCOMM=y
++CONFIG_QCA7000=m
++CONFIG_NET_VENDOR_REALTEK=y
++CONFIG_8139CP=m
++CONFIG_8139TOO=m
++CONFIG_8139TOO_PIO=y
++# CONFIG_8139TOO_TUNE_TWISTER is not set
++CONFIG_8139TOO_8129=y
++# CONFIG_8139_OLD_RX_RESET is not set
++CONFIG_R8169=m
++CONFIG_NET_VENDOR_RENESAS=y
++CONFIG_NET_VENDOR_RDC=y
++CONFIG_R6040=m
++CONFIG_NET_VENDOR_ROCKER=y
++CONFIG_NET_VENDOR_SAMSUNG=y
++CONFIG_SXGBE_ETH=m
++CONFIG_NET_VENDOR_SEEQ=y
++CONFIG_NET_VENDOR_SILAN=y
++CONFIG_SC92031=m
++CONFIG_NET_VENDOR_SIS=y
++CONFIG_SIS900=m
++CONFIG_SIS190=m
++CONFIG_SFC=m
++CONFIG_SFC_MTD=y
++CONFIG_SFC_MCDI_MON=y
++CONFIG_SFC_SRIOV=y
++CONFIG_SFC_MCDI_LOGGING=y
++CONFIG_NET_VENDOR_SMSC=y
++CONFIG_EPIC100=m
++CONFIG_SMSC911X=m
++# CONFIG_SMSC911X_ARCH_HOOKS is not set
++CONFIG_SMSC9420=m
++CONFIG_NET_VENDOR_STMICRO=y
++CONFIG_STMMAC_ETH=m
++CONFIG_STMMAC_PLATFORM=m
++CONFIG_DWMAC_GENERIC=m
++CONFIG_DWMAC_IPQ806X=m
++CONFIG_DWMAC_LPC18XX=m
++CONFIG_DWMAC_MESON=m
++CONFIG_DWMAC_ROCKCHIP=m
++CONFIG_DWMAC_SOCFPGA=m
++CONFIG_DWMAC_STI=m
++CONFIG_DWMAC_SUNXI=m
++# CONFIG_STMMAC_PCI is not set
++CONFIG_NET_VENDOR_SUN=y
++CONFIG_HAPPYMEAL=m
++CONFIG_SUNGEM=m
++CONFIG_CASSINI=m
++CONFIG_NIU=m
++CONFIG_NET_VENDOR_SYNOPSYS=y
++CONFIG_SYNOPSYS_DWC_ETH_QOS=m
++CONFIG_NET_VENDOR_TEHUTI=y
++CONFIG_TEHUTI=m
++CONFIG_NET_VENDOR_TI=y
++CONFIG_TI_CPSW_ALE=m
++CONFIG_TLAN=m
++CONFIG_NET_VENDOR_VIA=y
++CONFIG_VIA_RHINE=m
++CONFIG_VIA_RHINE_MMIO=y
++CONFIG_VIA_VELOCITY=m
++CONFIG_NET_VENDOR_WIZNET=y
++CONFIG_WIZNET_W5100=m
++CONFIG_WIZNET_W5300=m
++# CONFIG_WIZNET_BUS_DIRECT is not set
++# CONFIG_WIZNET_BUS_INDIRECT is not set
++CONFIG_WIZNET_BUS_ANY=y
++CONFIG_NET_VENDOR_XILINX=y
++CONFIG_XILINX_LL_TEMAC=m
++CONFIG_FDDI=y
++CONFIG_DEFXX=m
++# CONFIG_DEFXX_MMIO is not set
++CONFIG_SKFP=m
++# CONFIG_HIPPI is not set
++CONFIG_PHYLIB=y
++
++#
++# MII PHY device drivers
++#
++CONFIG_AQUANTIA_PHY=m
++CONFIG_AT803X_PHY=m
++CONFIG_AMD_PHY=m
++CONFIG_MARVELL_PHY=m
++CONFIG_DAVICOM_PHY=m
++CONFIG_QSEMI_PHY=m
++CONFIG_LXT_PHY=m
++CONFIG_CICADA_PHY=m
++CONFIG_VITESSE_PHY=m
++CONFIG_TERANETICS_PHY=m
++CONFIG_SMSC_PHY=m
++CONFIG_BCM_NET_PHYLIB=m
++CONFIG_BROADCOM_PHY=m
++CONFIG_BCM7XXX_PHY=m
++CONFIG_BCM87XX_PHY=m
++CONFIG_ICPLUS_PHY=m
++CONFIG_REALTEK_PHY=m
++CONFIG_NATIONAL_PHY=m
++CONFIG_STE10XP=m
++CONFIG_LSI_ET1011C_PHY=m
++CONFIG_MICREL_PHY=m
++CONFIG_DP83848_PHY=m
++CONFIG_DP83867_PHY=m
++CONFIG_MICROCHIP_PHY=m
++CONFIG_FIXED_PHY=y
++CONFIG_MDIO_BITBANG=m
++CONFIG_MDIO_GPIO=m
++CONFIG_MDIO_OCTEON=m
++CONFIG_MDIO_BUS_MUX=m
++CONFIG_MDIO_BUS_MUX_GPIO=m
++CONFIG_MDIO_BUS_MUX_MMIOREG=m
++CONFIG_MDIO_BCM_UNIMAC=m
++CONFIG_MICREL_KS8995MA=m
++CONFIG_PLIP=m
++CONFIG_PPP=y
++CONFIG_PPP_BSDCOMP=m
++CONFIG_PPP_DEFLATE=m
++CONFIG_PPP_FILTER=y
++CONFIG_PPP_MPPE=m
++CONFIG_PPP_MULTILINK=y
++CONFIG_PPPOATM=m
++CONFIG_PPPOE=m
++CONFIG_PPTP=m
++CONFIG_PPPOL2TP=m
++CONFIG_PPP_ASYNC=m
++CONFIG_PPP_SYNC_TTY=m
++CONFIG_SLIP=m
++CONFIG_SLHC=y
++CONFIG_SLIP_COMPRESSED=y
++CONFIG_SLIP_SMART=y
++CONFIG_SLIP_MODE_SLIP6=y
++CONFIG_USB_NET_DRIVERS=m
++CONFIG_USB_CATC=m
++CONFIG_USB_KAWETH=m
++CONFIG_USB_PEGASUS=m
++CONFIG_USB_RTL8150=m
++CONFIG_USB_RTL8152=m
++CONFIG_USB_LAN78XX=m
++CONFIG_USB_USBNET=m
++CONFIG_USB_NET_AX8817X=m
++CONFIG_USB_NET_AX88179_178A=m
++CONFIG_USB_NET_CDCETHER=m
++CONFIG_USB_NET_CDC_EEM=m
++CONFIG_USB_NET_CDC_NCM=m
++CONFIG_USB_NET_HUAWEI_CDC_NCM=m
++CONFIG_USB_NET_CDC_MBIM=m
++CONFIG_USB_NET_DM9601=m
++CONFIG_USB_NET_SR9700=m
++CONFIG_USB_NET_SR9800=m
++CONFIG_USB_NET_SMSC75XX=m
++CONFIG_USB_NET_SMSC95XX=m
++CONFIG_USB_NET_GL620A=m
++CONFIG_USB_NET_NET1080=m
++CONFIG_USB_NET_PLUSB=m
++CONFIG_USB_NET_MCS7830=m
++CONFIG_USB_NET_RNDIS_HOST=m
++CONFIG_USB_NET_CDC_SUBSET=m
++CONFIG_USB_ALI_M5632=y
++CONFIG_USB_AN2720=y
++CONFIG_USB_BELKIN=y
++CONFIG_USB_ARMLINUX=y
++CONFIG_USB_EPSON2888=y
++CONFIG_USB_KC2190=y
++CONFIG_USB_NET_ZAURUS=m
++CONFIG_USB_NET_CX82310_ETH=m
++CONFIG_USB_NET_KALMIA=m
++CONFIG_USB_NET_QMI_WWAN=m
++CONFIG_USB_HSO=m
++CONFIG_USB_NET_INT51X1=m
++CONFIG_USB_CDC_PHONET=m
++CONFIG_USB_IPHETH=m
++CONFIG_USB_SIERRA_NET=m
++CONFIG_USB_VL600=m
++CONFIG_USB_NET_CH9200=m
++CONFIG_WLAN=y
++CONFIG_LIBERTAS_THINFIRM=m
++# CONFIG_LIBERTAS_THINFIRM_DEBUG is not set
++CONFIG_LIBERTAS_THINFIRM_USB=m
++CONFIG_AIRO=m
++CONFIG_ATMEL=m
++CONFIG_PCI_ATMEL=m
++CONFIG_AT76C50X_USB=m
++# CONFIG_PRISM54 is not set
++CONFIG_USB_ZD1201=m
++CONFIG_USB_NET_RNDIS_WLAN=m
++CONFIG_ADM8211=m
++CONFIG_RTL8180=m
++CONFIG_RTL8187=m
++CONFIG_RTL8187_LEDS=y
++CONFIG_MAC80211_HWSIM=m
++CONFIG_MWL8K=m
++CONFIG_ATH_COMMON=m
++CONFIG_ATH_CARDS=m
++# CONFIG_ATH_DEBUG is not set
++CONFIG_ATH5K=m
++# CONFIG_ATH5K_DEBUG is not set
++# CONFIG_ATH5K_TRACER is not set
++CONFIG_ATH5K_PCI=y
++CONFIG_ATH9K_HW=m
++CONFIG_ATH9K_COMMON=m
++CONFIG_ATH9K_BTCOEX_SUPPORT=y
++CONFIG_ATH9K=m
++CONFIG_ATH9K_PCI=y
++CONFIG_ATH9K_AHB=y
++CONFIG_ATH9K_DEBUGFS=y
++CONFIG_ATH9K_STATION_STATISTICS=y
++# CONFIG_ATH9K_DYNACK is not set
++CONFIG_ATH9K_WOW=y
++CONFIG_ATH9K_RFKILL=y
++CONFIG_ATH9K_CHANNEL_CONTEXT=y
++CONFIG_ATH9K_PCOEM=y
++CONFIG_ATH9K_HTC=m
++CONFIG_ATH9K_HTC_DEBUGFS=y
++CONFIG_CARL9170=m
++CONFIG_CARL9170_LEDS=y
++# CONFIG_CARL9170_DEBUGFS is not set
++CONFIG_CARL9170_WPC=y
++CONFIG_CARL9170_HWRNG=y
++CONFIG_ATH6KL=m
++CONFIG_ATH6KL_SDIO=m
++CONFIG_ATH6KL_USB=m
++# CONFIG_ATH6KL_DEBUG is not set
++# CONFIG_ATH6KL_TRACING is not set
++CONFIG_AR5523=m
++CONFIG_WIL6210=m
++CONFIG_WIL6210_ISR_COR=y
++CONFIG_WIL6210_TRACING=y
++CONFIG_ATH10K=m
++CONFIG_ATH10K_PCI=m
++# CONFIG_ATH10K_DEBUG is not set
++CONFIG_ATH10K_DEBUGFS=y
++CONFIG_ATH10K_TRACING=y
++CONFIG_WCN36XX=m
++# CONFIG_WCN36XX_DEBUGFS is not set
++CONFIG_B43=m
++CONFIG_B43_BCMA=y
++CONFIG_B43_SSB=y
++CONFIG_B43_BUSES_BCMA_AND_SSB=y
++# CONFIG_B43_BUSES_BCMA is not set
++# CONFIG_B43_BUSES_SSB is not set
++CONFIG_B43_PCI_AUTOSELECT=y
++CONFIG_B43_PCICORE_AUTOSELECT=y
++# CONFIG_B43_SDIO is not set
++CONFIG_B43_BCMA_PIO=y
++CONFIG_B43_PIO=y
++CONFIG_B43_PHY_G=y
++CONFIG_B43_PHY_N=y
++CONFIG_B43_PHY_LP=y
++CONFIG_B43_PHY_HT=y
++CONFIG_B43_LEDS=y
++CONFIG_B43_HWRNG=y
++# CONFIG_B43_DEBUG is not set
++CONFIG_B43LEGACY=m
++CONFIG_B43LEGACY_PCI_AUTOSELECT=y
++CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y
++CONFIG_B43LEGACY_LEDS=y
++CONFIG_B43LEGACY_HWRNG=y
++# CONFIG_B43LEGACY_DEBUG is not set
++CONFIG_B43LEGACY_DMA=y
++CONFIG_B43LEGACY_PIO=y
++CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
++# CONFIG_B43LEGACY_DMA_MODE is not set
++# CONFIG_B43LEGACY_PIO_MODE is not set
++CONFIG_BRCMUTIL=m
++CONFIG_BRCMSMAC=m
++CONFIG_BRCMFMAC=m
++CONFIG_BRCMFMAC_PROTO_BCDC=y
++CONFIG_BRCMFMAC_PROTO_MSGBUF=y
++CONFIG_BRCMFMAC_SDIO=y
++CONFIG_BRCMFMAC_USB=y
++CONFIG_BRCMFMAC_PCIE=y
++CONFIG_BRCM_TRACING=y
++# CONFIG_BRCMDBG is not set
++CONFIG_HOSTAP=m
++CONFIG_HOSTAP_FIRMWARE=y
++CONFIG_HOSTAP_FIRMWARE_NVRAM=y
++CONFIG_HOSTAP_PLX=m
++CONFIG_HOSTAP_PCI=m
++CONFIG_IPW2100=m
++CONFIG_IPW2100_MONITOR=y
++# CONFIG_IPW2100_DEBUG is not set
++CONFIG_IPW2200=m
++CONFIG_IPW2200_MONITOR=y
++CONFIG_IPW2200_RADIOTAP=y
++CONFIG_IPW2200_PROMISCUOUS=y
++CONFIG_IPW2200_QOS=y
++# CONFIG_IPW2200_DEBUG is not set
++CONFIG_LIBIPW=m
++# CONFIG_LIBIPW_DEBUG is not set
++CONFIG_IWLWIFI=m
++CONFIG_IWLWIFI_LEDS=y
++CONFIG_IWLDVM=m
++CONFIG_IWLMVM=m
++CONFIG_IWLWIFI_OPMODE_MODULAR=y
++# CONFIG_IWLWIFI_BCAST_FILTERING is not set
++# CONFIG_IWLWIFI_UAPSD is not set
++
++#
++# Debugging Options
++#
++# CONFIG_IWLWIFI_DEBUG is not set
++CONFIG_IWLWIFI_DEBUGFS=y
++CONFIG_IWLWIFI_DEVICE_TRACING=y
++CONFIG_IWLEGACY=m
++CONFIG_IWL4965=m
++CONFIG_IWL3945=m
++
++#
++# iwl3945 / iwl4965 Debugging Options
++#
++# CONFIG_IWLEGACY_DEBUG is not set
++CONFIG_IWLEGACY_DEBUGFS=y
++CONFIG_LIBERTAS=m
++CONFIG_LIBERTAS_USB=m
++CONFIG_LIBERTAS_SDIO=m
++CONFIG_LIBERTAS_SPI=m
++# CONFIG_LIBERTAS_DEBUG is not set
++CONFIG_LIBERTAS_MESH=y
++CONFIG_HERMES=m
++# CONFIG_HERMES_PRISM is not set
++CONFIG_HERMES_CACHE_FW_ON_INIT=y
++CONFIG_PLX_HERMES=m
++CONFIG_TMD_HERMES=m
++CONFIG_NORTEL_HERMES=m
++CONFIG_ORINOCO_USB=m
++CONFIG_P54_COMMON=m
++CONFIG_P54_USB=m
++CONFIG_P54_PCI=m
++CONFIG_P54_SPI=m
++# CONFIG_P54_SPI_DEFAULT_EEPROM is not set
++CONFIG_P54_LEDS=y
++CONFIG_RT2X00=m
++CONFIG_RT2400PCI=m
++CONFIG_RT2500PCI=m
++CONFIG_RT61PCI=m
++CONFIG_RT2800PCI=m
++CONFIG_RT2800PCI_RT33XX=y
++CONFIG_RT2800PCI_RT35XX=y
++CONFIG_RT2800PCI_RT53XX=y
++CONFIG_RT2800PCI_RT3290=y
++CONFIG_RT2500USB=m
++CONFIG_RT73USB=m
++CONFIG_RT2800USB=m
++CONFIG_RT2800USB_RT33XX=y
++CONFIG_RT2800USB_RT35XX=y
++CONFIG_RT2800USB_RT3573=y
++CONFIG_RT2800USB_RT53XX=y
++CONFIG_RT2800USB_RT55XX=y
++CONFIG_RT2800USB_UNKNOWN=y
++CONFIG_RT2800_LIB=m
++CONFIG_RT2800_LIB_MMIO=m
++CONFIG_RT2X00_LIB_MMIO=m
++CONFIG_RT2X00_LIB_PCI=m
++CONFIG_RT2X00_LIB_USB=m
++CONFIG_RT2X00_LIB=m
++CONFIG_RT2X00_LIB_FIRMWARE=y
++CONFIG_RT2X00_LIB_CRYPTO=y
++CONFIG_RT2X00_LIB_LEDS=y
++# CONFIG_RT2X00_LIB_DEBUGFS is not set
++# CONFIG_RT2X00_DEBUG is not set
++CONFIG_WL_MEDIATEK=y
++CONFIG_MT7601U=m
++CONFIG_RTL_CARDS=m
++CONFIG_RTL8192CE=m
++CONFIG_RTL8192SE=m
++CONFIG_RTL8192DE=m
++CONFIG_RTL8723AE=m
++CONFIG_RTL8723BE=m
++CONFIG_RTL8188EE=m
++CONFIG_RTL8192EE=m
++CONFIG_RTL8821AE=m
++CONFIG_RTL8192CU=m
++CONFIG_RTLWIFI=m
++CONFIG_RTLWIFI_PCI=m
++CONFIG_RTLWIFI_USB=m
++# CONFIG_RTLWIFI_DEBUG is not set
++CONFIG_RTL8192C_COMMON=m
++CONFIG_RTL8723_COMMON=m
++CONFIG_RTLBTCOEXIST=m
++CONFIG_RTL8XXXU=m
++CONFIG_RTL8XXXU_UNTESTED=y
++CONFIG_WL_TI=y
++CONFIG_WL1251=m
++CONFIG_WL1251_SPI=m
++CONFIG_WL1251_SDIO=m
++CONFIG_WL12XX=m
++CONFIG_WL18XX=m
++CONFIG_WLCORE=m
++CONFIG_WLCORE_SPI=m
++CONFIG_WLCORE_SDIO=m
++CONFIG_WILINK_PLATFORM_DATA=y
++CONFIG_ZD1211RW=m
++# CONFIG_ZD1211RW_DEBUG is not set
++CONFIG_MWIFIEX=m
++CONFIG_MWIFIEX_SDIO=m
++CONFIG_MWIFIEX_PCIE=m
++CONFIG_MWIFIEX_USB=m
++CONFIG_CW1200=m
++CONFIG_CW1200_WLAN_SDIO=m
++CONFIG_CW1200_WLAN_SPI=m
++CONFIG_RSI_91X=m
++# CONFIG_RSI_DEBUGFS is not set
++CONFIG_RSI_SDIO=m
++CONFIG_RSI_USB=m
++
++#
++# WiMAX Wireless Broadband devices
++#
++CONFIG_WIMAX_I2400M=m
++CONFIG_WIMAX_I2400M_USB=m
++CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8
++CONFIG_WAN=y
++CONFIG_HDLC=m
++CONFIG_HDLC_RAW=m
++CONFIG_HDLC_RAW_ETH=m
++CONFIG_HDLC_CISCO=m
++CONFIG_HDLC_FR=m
++CONFIG_HDLC_PPP=m
++CONFIG_HDLC_X25=m
++CONFIG_PCI200SYN=m
++CONFIG_WANXL=m
++CONFIG_PC300TOO=m
++CONFIG_FARSYNC=m
++CONFIG_DSCC4=m
++CONFIG_DSCC4_PCISYNC=y
++CONFIG_DSCC4_PCI_RST=y
++CONFIG_DLCI=m
++CONFIG_DLCI_MAX=8
++CONFIG_LAPBETHER=m
++CONFIG_X25_ASY=m
++CONFIG_IEEE802154_DRIVERS=m
++CONFIG_IEEE802154_FAKELB=m
++CONFIG_IEEE802154_AT86RF230=m
++CONFIG_IEEE802154_AT86RF230_DEBUGFS=y
++CONFIG_IEEE802154_MRF24J40=m
++CONFIG_IEEE802154_CC2520=m
++CONFIG_IEEE802154_ATUSB=m
++CONFIG_VMXNET3=m
++CONFIG_ISDN=y
++CONFIG_ISDN_I4L=m
++CONFIG_ISDN_PPP=y
++CONFIG_ISDN_PPP_VJ=y
++CONFIG_ISDN_MPP=y
++CONFIG_IPPP_FILTER=y
++CONFIG_ISDN_PPP_BSDCOMP=m
++CONFIG_ISDN_AUDIO=y
++CONFIG_ISDN_TTY_FAX=y
++CONFIG_ISDN_X25=y
++
++#
++# ISDN feature submodules
++#
++CONFIG_ISDN_DIVERSION=m
++
++#
++# ISDN4Linux hardware drivers
++#
++
++#
++# Passive cards
++#
++CONFIG_ISDN_DRV_HISAX=m
++
++#
++# D-channel protocol features
++#
++CONFIG_HISAX_EURO=y
++CONFIG_DE_AOC=y
++# CONFIG_HISAX_NO_SENDCOMPLETE is not set
++# CONFIG_HISAX_NO_LLC is not set
++# CONFIG_HISAX_NO_KEYPAD is not set
++CONFIG_HISAX_1TR6=y
++CONFIG_HISAX_NI1=y
++CONFIG_HISAX_MAX_CARDS=8
++
++#
++# HiSax supported cards
++#
++CONFIG_HISAX_16_3=y
++CONFIG_HISAX_S0BOX=y
++CONFIG_HISAX_AVM_A1_PCMCIA=y
++CONFIG_HISAX_ELSA=y
++CONFIG_HISAX_DIEHLDIVA=y
++CONFIG_HISAX_SEDLBAUER=y
++CONFIG_HISAX_NICCY=y
++CONFIG_HISAX_BKM_A4T=y
++CONFIG_HISAX_SCT_QUADRO=y
++CONFIG_HISAX_GAZEL=y
++CONFIG_HISAX_W6692=y
++CONFIG_HISAX_HFC_SX=y
++# CONFIG_HISAX_DEBUG is not set
++
++#
++# HiSax PCMCIA card service modules
++#
++
++#
++# HiSax sub driver modules
++#
++CONFIG_HISAX_ST5481=m
++CONFIG_HISAX_HFCUSB=m
++CONFIG_HISAX_HFC4S8S=m
++CONFIG_HISAX_FRITZ_PCIPNP=m
++
++#
++# Active cards
++#
++CONFIG_ISDN_CAPI=m
++CONFIG_CAPI_TRACE=y
++CONFIG_ISDN_CAPI_CAPI20=m
++CONFIG_ISDN_CAPI_MIDDLEWARE=y
++CONFIG_ISDN_CAPI_CAPIDRV=m
++# CONFIG_ISDN_CAPI_CAPIDRV_VERBOSE is not set
++
++#
++# CAPI hardware drivers
++#
++CONFIG_CAPI_AVM=y
++CONFIG_ISDN_DRV_AVMB1_B1PCI=m
++CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
++CONFIG_ISDN_DRV_AVMB1_T1PCI=m
++CONFIG_ISDN_DRV_AVMB1_C4=m
++CONFIG_CAPI_EICON=y
++CONFIG_ISDN_DIVAS=m
++CONFIG_ISDN_DIVAS_BRIPCI=y
++CONFIG_ISDN_DIVAS_PRIPCI=y
++CONFIG_ISDN_DIVAS_DIVACAPI=m
++CONFIG_ISDN_DIVAS_USERIDI=m
++CONFIG_ISDN_DIVAS_MAINT=m
++CONFIG_ISDN_DRV_GIGASET=m
++# CONFIG_GIGASET_CAPI is not set
++CONFIG_GIGASET_I4L=y
++# CONFIG_GIGASET_DUMMYLL is not set
++CONFIG_GIGASET_BASE=m
++CONFIG_GIGASET_M105=m
++CONFIG_GIGASET_M101=m
++# CONFIG_GIGASET_DEBUG is not set
++CONFIG_HYSDN=m
++CONFIG_HYSDN_CAPI=y
++CONFIG_MISDN=m
++CONFIG_MISDN_DSP=m
++CONFIG_MISDN_L1OIP=m
++
++#
++# mISDN hardware drivers
++#
++CONFIG_MISDN_HFCPCI=m
++CONFIG_MISDN_HFCMULTI=m
++CONFIG_MISDN_HFCUSB=m
++CONFIG_MISDN_AVMFRITZ=m
++CONFIG_MISDN_SPEEDFAX=m
++CONFIG_MISDN_INFINEON=m
++CONFIG_MISDN_W6692=m
++CONFIG_MISDN_NETJET=m
++CONFIG_MISDN_IPAC=m
++CONFIG_MISDN_ISAR=m
++CONFIG_ISDN_HDLC=m
++CONFIG_NVM=y
++# CONFIG_NVM_DEBUG is not set
++CONFIG_NVM_GENNVM=m
++CONFIG_NVM_RRPC=m
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++CONFIG_INPUT_LEDS=m
++CONFIG_INPUT_FF_MEMLESS=m
++CONFIG_INPUT_POLLDEV=m
++CONFIG_INPUT_SPARSEKMAP=m
++CONFIG_INPUT_MATRIXKMAP=m
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++CONFIG_INPUT_JOYDEV=m
++CONFIG_INPUT_EVDEV=y
++CONFIG_INPUT_EVBUG=m
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++CONFIG_KEYBOARD_ADP5520=m
++CONFIG_KEYBOARD_ADP5588=m
++CONFIG_KEYBOARD_ADP5589=m
++CONFIG_KEYBOARD_ATKBD=y
++CONFIG_KEYBOARD_QT1070=m
++CONFIG_KEYBOARD_QT2160=m
++CONFIG_KEYBOARD_LKKBD=m
++CONFIG_KEYBOARD_GPIO=m
++CONFIG_KEYBOARD_GPIO_POLLED=m
++CONFIG_KEYBOARD_TCA6416=m
++CONFIG_KEYBOARD_TCA8418=m
++CONFIG_KEYBOARD_MATRIX=m
++CONFIG_KEYBOARD_LM8323=m
++CONFIG_KEYBOARD_LM8333=m
++CONFIG_KEYBOARD_MAX7359=m
++CONFIG_KEYBOARD_MCS=m
++CONFIG_KEYBOARD_MPR121=m
++CONFIG_KEYBOARD_NEWTON=m
++CONFIG_KEYBOARD_OPENCORES=m
++CONFIG_KEYBOARD_STOWAWAY=m
++CONFIG_KEYBOARD_SUNKBD=m
++CONFIG_KEYBOARD_STMPE=m
++CONFIG_KEYBOARD_OMAP4=m
++CONFIG_KEYBOARD_TC3589X=m
++CONFIG_KEYBOARD_TWL4030=m
++CONFIG_KEYBOARD_XTKBD=m
++CONFIG_KEYBOARD_CAP11XX=m
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=m
++CONFIG_MOUSE_PS2_ALPS=y
++CONFIG_MOUSE_PS2_LOGIPS2PP=y
++CONFIG_MOUSE_PS2_SYNAPTICS=y
++CONFIG_MOUSE_PS2_CYPRESS=y
++CONFIG_MOUSE_PS2_TRACKPOINT=y
++CONFIG_MOUSE_PS2_ELANTECH=y
++CONFIG_MOUSE_PS2_SENTELIC=y
++CONFIG_MOUSE_PS2_TOUCHKIT=y
++CONFIG_MOUSE_PS2_FOCALTECH=y
++CONFIG_MOUSE_SERIAL=m
++CONFIG_MOUSE_APPLETOUCH=m
++CONFIG_MOUSE_BCM5974=m
++CONFIG_MOUSE_CYAPA=m
++CONFIG_MOUSE_ELAN_I2C=m
++CONFIG_MOUSE_ELAN_I2C_I2C=y
++CONFIG_MOUSE_ELAN_I2C_SMBUS=y
++CONFIG_MOUSE_VSXXXAA=m
++CONFIG_MOUSE_GPIO=m
++CONFIG_MOUSE_SYNAPTICS_I2C=m
++CONFIG_MOUSE_SYNAPTICS_USB=m
++CONFIG_INPUT_JOYSTICK=y
++CONFIG_JOYSTICK_ANALOG=m
++CONFIG_JOYSTICK_A3D=m
++CONFIG_JOYSTICK_ADI=m
++CONFIG_JOYSTICK_COBRA=m
++CONFIG_JOYSTICK_GF2K=m
++CONFIG_JOYSTICK_GRIP=m
++CONFIG_JOYSTICK_GRIP_MP=m
++CONFIG_JOYSTICK_GUILLEMOT=m
++CONFIG_JOYSTICK_INTERACT=m
++CONFIG_JOYSTICK_SIDEWINDER=m
++CONFIG_JOYSTICK_TMDC=m
++CONFIG_JOYSTICK_IFORCE=m
++CONFIG_JOYSTICK_IFORCE_USB=y
++CONFIG_JOYSTICK_IFORCE_232=y
++CONFIG_JOYSTICK_WARRIOR=m
++CONFIG_JOYSTICK_MAGELLAN=m
++CONFIG_JOYSTICK_SPACEORB=m
++CONFIG_JOYSTICK_SPACEBALL=m
++CONFIG_JOYSTICK_STINGER=m
++CONFIG_JOYSTICK_TWIDJOY=m
++CONFIG_JOYSTICK_ZHENHUA=m
++CONFIG_JOYSTICK_DB9=m
++CONFIG_JOYSTICK_GAMECON=m
++CONFIG_JOYSTICK_TURBOGRAFX=m
++CONFIG_JOYSTICK_AS5011=m
++CONFIG_JOYSTICK_JOYDUMP=m
++CONFIG_JOYSTICK_XPAD=m
++CONFIG_JOYSTICK_XPAD_FF=y
++CONFIG_JOYSTICK_XPAD_LEDS=y
++CONFIG_JOYSTICK_WALKERA0701=m
++CONFIG_INPUT_TABLET=y
++CONFIG_TABLET_USB_ACECAD=m
++CONFIG_TABLET_USB_AIPTEK=m
++CONFIG_TABLET_USB_GTCO=m
++CONFIG_TABLET_USB_HANWANG=m
++CONFIG_TABLET_USB_KBTAB=m
++CONFIG_TABLET_SERIAL_WACOM4=m
++CONFIG_INPUT_TOUCHSCREEN=y
++CONFIG_TOUCHSCREEN_PROPERTIES=y
++CONFIG_TOUCHSCREEN_88PM860X=m
++CONFIG_TOUCHSCREEN_ADS7846=m
++CONFIG_TOUCHSCREEN_AD7877=m
++CONFIG_TOUCHSCREEN_AD7879=m
++CONFIG_TOUCHSCREEN_AD7879_I2C=m
++CONFIG_TOUCHSCREEN_AD7879_SPI=m
++CONFIG_TOUCHSCREEN_AR1021_I2C=m
++CONFIG_TOUCHSCREEN_ATMEL_MXT=m
++CONFIG_TOUCHSCREEN_AUO_PIXCIR=m
++CONFIG_TOUCHSCREEN_BU21013=m
++CONFIG_TOUCHSCREEN_CHIPONE_ICN8318=m
++CONFIG_TOUCHSCREEN_CY8CTMG110=m
++CONFIG_TOUCHSCREEN_CYTTSP_CORE=m
++CONFIG_TOUCHSCREEN_CYTTSP_I2C=m
++CONFIG_TOUCHSCREEN_CYTTSP_SPI=m
++CONFIG_TOUCHSCREEN_CYTTSP4_CORE=m
++CONFIG_TOUCHSCREEN_CYTTSP4_I2C=m
++CONFIG_TOUCHSCREEN_CYTTSP4_SPI=m
++CONFIG_TOUCHSCREEN_DA9034=m
++CONFIG_TOUCHSCREEN_DA9052=m
++CONFIG_TOUCHSCREEN_DYNAPRO=m
++CONFIG_TOUCHSCREEN_HAMPSHIRE=m
++CONFIG_TOUCHSCREEN_EETI=m
++CONFIG_TOUCHSCREEN_EGALAX=m
++CONFIG_TOUCHSCREEN_FT6236=m
++CONFIG_TOUCHSCREEN_FUJITSU=m
++CONFIG_TOUCHSCREEN_GOODIX=m
++CONFIG_TOUCHSCREEN_ILI210X=m
++CONFIG_TOUCHSCREEN_GUNZE=m
++CONFIG_TOUCHSCREEN_ELAN=m
++CONFIG_TOUCHSCREEN_ELO=m
++CONFIG_TOUCHSCREEN_WACOM_W8001=m
++CONFIG_TOUCHSCREEN_WACOM_I2C=m
++CONFIG_TOUCHSCREEN_MAX11801=m
++CONFIG_TOUCHSCREEN_MCS5000=m
++CONFIG_TOUCHSCREEN_MMS114=m
++CONFIG_TOUCHSCREEN_MTOUCH=m
++CONFIG_TOUCHSCREEN_IMX6UL_TSC=m
++CONFIG_TOUCHSCREEN_INEXIO=m
++CONFIG_TOUCHSCREEN_MK712=m
++CONFIG_TOUCHSCREEN_PENMOUNT=m
++CONFIG_TOUCHSCREEN_EDT_FT5X06=m
++CONFIG_TOUCHSCREEN_TOUCHRIGHT=m
++CONFIG_TOUCHSCREEN_TOUCHWIN=m
++CONFIG_TOUCHSCREEN_TI_AM335X_TSC=m
++CONFIG_TOUCHSCREEN_UCB1400=m
++CONFIG_TOUCHSCREEN_PIXCIR=m
++CONFIG_TOUCHSCREEN_WDT87XX_I2C=m
++CONFIG_TOUCHSCREEN_WM831X=m
++CONFIG_TOUCHSCREEN_WM97XX=m
++CONFIG_TOUCHSCREEN_WM9705=y
++CONFIG_TOUCHSCREEN_WM9712=y
++CONFIG_TOUCHSCREEN_WM9713=y
++CONFIG_TOUCHSCREEN_USB_COMPOSITE=m
++CONFIG_TOUCHSCREEN_MC13783=m
++CONFIG_TOUCHSCREEN_USB_EGALAX=y
++CONFIG_TOUCHSCREEN_USB_PANJIT=y
++CONFIG_TOUCHSCREEN_USB_3M=y
++CONFIG_TOUCHSCREEN_USB_ITM=y
++CONFIG_TOUCHSCREEN_USB_ETURBO=y
++CONFIG_TOUCHSCREEN_USB_GUNZE=y
++CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y
++CONFIG_TOUCHSCREEN_USB_IRTOUCH=y
++CONFIG_TOUCHSCREEN_USB_IDEALTEK=y
++CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y
++CONFIG_TOUCHSCREEN_USB_GOTOP=y
++CONFIG_TOUCHSCREEN_USB_JASTEC=y
++CONFIG_TOUCHSCREEN_USB_ELO=y
++CONFIG_TOUCHSCREEN_USB_E2I=y
++CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y
++CONFIG_TOUCHSCREEN_USB_ETT_TC45USB=y
++CONFIG_TOUCHSCREEN_USB_NEXIO=y
++CONFIG_TOUCHSCREEN_USB_EASYTOUCH=y
++CONFIG_TOUCHSCREEN_TOUCHIT213=m
++CONFIG_TOUCHSCREEN_TSC_SERIO=m
++CONFIG_TOUCHSCREEN_TSC200X_CORE=m
++CONFIG_TOUCHSCREEN_TSC2004=m
++CONFIG_TOUCHSCREEN_TSC2005=m
++CONFIG_TOUCHSCREEN_TSC2007=m
++CONFIG_TOUCHSCREEN_PCAP=m
++CONFIG_TOUCHSCREEN_ST1232=m
++CONFIG_TOUCHSCREEN_STMPE=m
++CONFIG_TOUCHSCREEN_SUR40=m
++CONFIG_TOUCHSCREEN_SX8654=m
++CONFIG_TOUCHSCREEN_TPS6507X=m
++CONFIG_TOUCHSCREEN_ZFORCE=m
++CONFIG_TOUCHSCREEN_COLIBRI_VF50=m
++CONFIG_TOUCHSCREEN_ROHM_BU21023=m
++CONFIG_INPUT_MISC=y
++CONFIG_INPUT_88PM860X_ONKEY=m
++CONFIG_INPUT_88PM80X_ONKEY=m
++CONFIG_INPUT_AD714X=m
++CONFIG_INPUT_AD714X_I2C=m
++CONFIG_INPUT_AD714X_SPI=m
++CONFIG_INPUT_ARIZONA_HAPTICS=m
++CONFIG_INPUT_BMA150=m
++CONFIG_INPUT_E3X0_BUTTON=m
++CONFIG_INPUT_PCSPKR=m
++CONFIG_INPUT_MAX77693_HAPTIC=m
++CONFIG_INPUT_MAX8925_ONKEY=m
++CONFIG_INPUT_MAX8997_HAPTIC=m
++CONFIG_INPUT_MC13783_PWRBUTTON=m
++CONFIG_INPUT_MMA8450=m
++CONFIG_INPUT_MPU3050=m
++CONFIG_INPUT_GP2A=m
++CONFIG_INPUT_GPIO_BEEPER=m
++CONFIG_INPUT_GPIO_TILT_POLLED=m
++CONFIG_INPUT_ATI_REMOTE2=m
++CONFIG_INPUT_KEYSPAN_REMOTE=m
++CONFIG_INPUT_KXTJ9=m
++# CONFIG_INPUT_KXTJ9_POLLED_MODE is not set
++CONFIG_INPUT_POWERMATE=m
++CONFIG_INPUT_YEALINK=m
++CONFIG_INPUT_CM109=m
++CONFIG_INPUT_REGULATOR_HAPTIC=m
++CONFIG_INPUT_RETU_PWRBUTTON=m
++CONFIG_INPUT_TPS65218_PWRBUTTON=m
++CONFIG_INPUT_AXP20X_PEK=m
++CONFIG_INPUT_TWL4030_PWRBUTTON=m
++CONFIG_INPUT_TWL4030_VIBRA=m
++CONFIG_INPUT_TWL6040_VIBRA=m
++CONFIG_INPUT_UINPUT=y
++CONFIG_INPUT_PALMAS_PWRBUTTON=m
++CONFIG_INPUT_PCF50633_PMU=m
++CONFIG_INPUT_PCF8574=m
++CONFIG_INPUT_PWM_BEEPER=m
++CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
++CONFIG_INPUT_DA9052_ONKEY=m
++CONFIG_INPUT_DA9055_ONKEY=m
++CONFIG_INPUT_DA9063_ONKEY=m
++CONFIG_INPUT_WM831X_ON=m
++CONFIG_INPUT_PCAP=m
++CONFIG_INPUT_ADXL34X=m
++CONFIG_INPUT_ADXL34X_I2C=m
++CONFIG_INPUT_ADXL34X_SPI=m
++CONFIG_INPUT_IMS_PCU=m
++CONFIG_INPUT_CMA3000=m
++CONFIG_INPUT_CMA3000_I2C=m
++CONFIG_INPUT_IDEAPAD_SLIDEBAR=m
++CONFIG_INPUT_SOC_BUTTON_ARRAY=m
++CONFIG_INPUT_DRV260X_HAPTICS=m
++CONFIG_INPUT_DRV2665_HAPTICS=m
++CONFIG_INPUT_DRV2667_HAPTICS=m
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_SERPORT=m
++CONFIG_SERIO_PARKBD=m
++CONFIG_SERIO_PCIPS2=m
++CONFIG_SERIO_LIBPS2=y
++CONFIG_SERIO_RAW=m
++CONFIG_SERIO_XILINX_XPS_PS2=m
++CONFIG_SERIO_ALTERA_PS2=m
++CONFIG_SERIO_PS2MULT=m
++CONFIG_SERIO_ARC_PS2=m
++CONFIG_SERIO_APBPS2=m
++CONFIG_USERIO=m
++CONFIG_GAMEPORT=m
++CONFIG_GAMEPORT_NS558=m
++CONFIG_GAMEPORT_L4=m
++CONFIG_GAMEPORT_EMU10K1=m
++CONFIG_GAMEPORT_FM801=m
++
++#
++# Character devices
++#
++CONFIG_TTY=y
++CONFIG_VT=y
++CONFIG_CONSOLE_TRANSLATIONS=y
++CONFIG_VT_CONSOLE=y
++CONFIG_VT_CONSOLE_SLEEP=y
++CONFIG_HW_CONSOLE=y
++CONFIG_VT_HW_CONSOLE_BINDING=y
++CONFIG_UNIX98_PTYS=y
++CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=0
++CONFIG_SERIAL_NONSTANDARD=y
++CONFIG_ROCKETPORT=m
++CONFIG_CYCLADES=m
++# CONFIG_CYZ_INTR is not set
++CONFIG_MOXA_INTELLIO=m
++CONFIG_MOXA_SMARTIO=m
++CONFIG_SYNCLINK=m
++CONFIG_SYNCLINKMP=m
++CONFIG_SYNCLINK_GT=m
++CONFIG_NOZOMI=m
++CONFIG_ISI=m
++CONFIG_N_HDLC=m
++CONFIG_N_GSM=m
++CONFIG_TRACE_ROUTER=m
++CONFIG_TRACE_SINK=m
++# CONFIG_PPC_EPAPR_HV_BYTECHAN is not set
++CONFIG_DEVMEM=y
++# CONFIG_DEVKMEM is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_EARLYCON=y
++CONFIG_SERIAL_8250=y
++# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_DMA=y
++CONFIG_SERIAL_8250_PCI=y
++CONFIG_SERIAL_8250_NR_UARTS=48
++CONFIG_SERIAL_8250_RUNTIME_UARTS=32
++CONFIG_SERIAL_8250_EXTENDED=y
++CONFIG_SERIAL_8250_MANY_PORTS=y
++CONFIG_SERIAL_8250_SHARE_IRQ=y
++# CONFIG_SERIAL_8250_DETECT_IRQ is not set
++CONFIG_SERIAL_8250_RSA=y
++CONFIG_SERIAL_8250_FSL=y
++CONFIG_SERIAL_8250_DW=m
++CONFIG_SERIAL_8250_RT288X=y
++CONFIG_SERIAL_8250_INGENIC=y
++CONFIG_SERIAL_8250_MID=m
++
++#
++# Non-8250 serial port support
++#
++CONFIG_SERIAL_KGDB_NMI=y
++CONFIG_SERIAL_MAX3100=m
++CONFIG_SERIAL_MAX310X=y
++CONFIG_SERIAL_UARTLITE=m
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++CONFIG_CONSOLE_POLL=y
++CONFIG_SERIAL_ICOM=m
++CONFIG_SERIAL_JSM=m
++CONFIG_SERIAL_OF_PLATFORM=y
++CONFIG_SERIAL_SCCNXP=y
++CONFIG_SERIAL_SCCNXP_CONSOLE=y
++CONFIG_SERIAL_SC16IS7XX_CORE=m
++CONFIG_SERIAL_SC16IS7XX=m
++CONFIG_SERIAL_SC16IS7XX_I2C=y
++CONFIG_SERIAL_SC16IS7XX_SPI=y
++CONFIG_SERIAL_ALTERA_JTAGUART=m
++CONFIG_SERIAL_ALTERA_UART=m
++CONFIG_SERIAL_ALTERA_UART_MAXPORTS=4
++CONFIG_SERIAL_ALTERA_UART_BAUDRATE=115200
++# CONFIG_SERIAL_IFX6X60 is not set
++CONFIG_SERIAL_XILINX_PS_UART=m
++CONFIG_SERIAL_ARC=m
++CONFIG_SERIAL_ARC_NR_PORTS=1
++CONFIG_SERIAL_RP2=m
++CONFIG_SERIAL_RP2_NR_UARTS=32
++CONFIG_SERIAL_FSL_LPUART=m
++CONFIG_SERIAL_CONEXANT_DIGICOLOR=m
++CONFIG_SERIAL_MEN_Z135=m
++CONFIG_TTY_PRINTK=y
++CONFIG_PRINTER=m
++# CONFIG_LP_CONSOLE is not set
++CONFIG_PPDEV=m
++CONFIG_HVC_DRIVER=y
++CONFIG_HVC_IRQ=y
++CONFIG_HVC_CONSOLE=y
++CONFIG_HVC_OLD_HVSI=y
++CONFIG_HVC_OPAL=y
++CONFIG_HVC_RTAS=y
++CONFIG_HVC_UDBG=y
++CONFIG_HVCS=m
++CONFIG_VIRTIO_CONSOLE=y
++CONFIG_IBM_BSR=m
++CONFIG_IPMI_HANDLER=m
++# CONFIG_IPMI_PANIC_EVENT is not set
++CONFIG_IPMI_DEVICE_INTERFACE=m
++CONFIG_IPMI_SI=m
++CONFIG_IPMI_SI_PROBE_DEFAULTS=y
++CONFIG_IPMI_SSIF=m
++CONFIG_IPMI_POWERNV=m
++CONFIG_IPMI_WATCHDOG=m
++CONFIG_IPMI_POWEROFF=m
++CONFIG_HW_RANDOM=y
++CONFIG_HW_RANDOM_TIMERIOMEM=m
++CONFIG_HW_RANDOM_VIRTIO=m
++CONFIG_HW_RANDOM_PSERIES=m
++CONFIG_HW_RANDOM_POWERNV=m
++CONFIG_HW_RANDOM_TPM=m
++CONFIG_R3964=m
++CONFIG_APPLICOM=m
++CONFIG_RAW_DRIVER=m
++CONFIG_MAX_RAW_DEVS=256
++CONFIG_HANGCHECK_TIMER=m
++CONFIG_TCG_TPM=y
++CONFIG_TCG_TIS_I2C_ATMEL=m
++CONFIG_TCG_TIS_I2C_INFINEON=m
++CONFIG_TCG_TIS_I2C_NUVOTON=m
++CONFIG_TCG_ATMEL=m
++CONFIG_TCG_IBMVTPM=y
++CONFIG_TCG_TIS_ST33ZP24=m
++CONFIG_TCG_TIS_ST33ZP24_I2C=m
++CONFIG_TCG_TIS_ST33ZP24_SPI=m
++CONFIG_DEVPORT=y
++CONFIG_XILLYBUS=m
++CONFIG_XILLYBUS_PCIE=m
++CONFIG_XILLYBUS_OF=m
++
++#
++# I2C support
++#
++CONFIG_I2C=y
++CONFIG_I2C_BOARDINFO=y
++CONFIG_I2C_COMPAT=y
++CONFIG_I2C_CHARDEV=y
++CONFIG_I2C_MUX=m
++
++#
++# Multiplexer I2C Chip support
++#
++CONFIG_I2C_ARB_GPIO_CHALLENGE=m
++CONFIG_I2C_MUX_GPIO=m
++CONFIG_I2C_MUX_PCA9541=m
++CONFIG_I2C_MUX_PCA954x=m
++CONFIG_I2C_MUX_REG=m
++CONFIG_I2C_HELPER_AUTO=y
++CONFIG_I2C_SMBUS=m
++CONFIG_I2C_ALGOBIT=m
++CONFIG_I2C_ALGOPCA=m
++
++#
++# I2C Hardware Bus support
++#
++
++#
++# PC SMBus host controller drivers
++#
++CONFIG_I2C_ALI1535=m
++CONFIG_I2C_ALI1563=m
++CONFIG_I2C_ALI15X3=m
++CONFIG_I2C_AMD756=m
++CONFIG_I2C_AMD8111=m
++CONFIG_I2C_I801=m
++CONFIG_I2C_ISCH=m
++CONFIG_I2C_PIIX4=m
++CONFIG_I2C_NFORCE2=m
++CONFIG_I2C_SIS5595=m
++CONFIG_I2C_SIS630=m
++CONFIG_I2C_SIS96X=m
++CONFIG_I2C_VIA=m
++CONFIG_I2C_VIAPRO=m
++
++#
++# I2C system bus drivers (mostly embedded / system-on-chip)
++#
++CONFIG_I2C_CBUS_GPIO=m
++CONFIG_I2C_DESIGNWARE_CORE=m
++CONFIG_I2C_DESIGNWARE_PLATFORM=m
++CONFIG_I2C_DESIGNWARE_PCI=m
++CONFIG_I2C_GPIO=m
++CONFIG_I2C_KEMPLD=m
++CONFIG_I2C_MPC=m
++CONFIG_I2C_OCORES=m
++CONFIG_I2C_PCA_PLATFORM=m
++# CONFIG_I2C_PXA_PCI is not set
++CONFIG_I2C_SIMTEC=m
++CONFIG_I2C_XILINX=m
++
++#
++# External I2C/SMBus adapter drivers
++#
++CONFIG_I2C_DIOLAN_U2C=m
++CONFIG_I2C_DLN2=m
++CONFIG_I2C_PARPORT=m
++CONFIG_I2C_PARPORT_LIGHT=m
++CONFIG_I2C_ROBOTFUZZ_OSIF=m
++CONFIG_I2C_TAOS_EVM=m
++CONFIG_I2C_TINY_USB=m
++CONFIG_I2C_VIPERBOARD=m
++
++#
++# Other I2C/SMBus bus drivers
++#
++CONFIG_I2C_OPAL=y
++CONFIG_I2C_STUB=m
++# CONFIG_I2C_SLAVE is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++CONFIG_SPI=y
++# CONFIG_SPI_DEBUG is not set
++CONFIG_SPI_MASTER=y
++
++#
++# SPI Master Controller Drivers
++#
++CONFIG_SPI_ALTERA=m
++CONFIG_SPI_BITBANG=m
++CONFIG_SPI_BUTTERFLY=m
++CONFIG_SPI_CADENCE=m
++CONFIG_SPI_DLN2=m
++CONFIG_SPI_GPIO=m
++CONFIG_SPI_LM70_LLP=m
++CONFIG_SPI_FSL_LIB=y
++CONFIG_SPI_FSL_SPI=y
++CONFIG_SPI_OC_TINY=m
++CONFIG_SPI_PXA2XX_DMA=y
++CONFIG_SPI_PXA2XX=m
++# CONFIG_SPI_PXA2XX_PCI is not set
++CONFIG_SPI_SC18IS602=m
++CONFIG_SPI_XCOMM=m
++# CONFIG_SPI_XILINX is not set
++CONFIG_SPI_ZYNQMP_GQSPI=m
++CONFIG_SPI_DESIGNWARE=m
++CONFIG_SPI_DW_PCI=m
++CONFIG_SPI_DW_MID_DMA=y
++CONFIG_SPI_DW_MMIO=m
++
++#
++# SPI Protocol Masters
++#
++CONFIG_SPI_SPIDEV=m
++CONFIG_SPI_TLE62X0=m
++CONFIG_SPMI=m
++CONFIG_HSI=m
++CONFIG_HSI_BOARDINFO=y
++
++#
++# HSI controllers
++#
++
++#
++# HSI clients
++#
++CONFIG_HSI_CHAR=m
++
++#
++# PPS support
++#
++CONFIG_PPS=y
++# CONFIG_PPS_DEBUG is not set
++
++#
++# PPS clients support
++#
++# CONFIG_PPS_CLIENT_KTIMER is not set
++CONFIG_PPS_CLIENT_LDISC=m
++CONFIG_PPS_CLIENT_PARPORT=m
++CONFIG_PPS_CLIENT_GPIO=m
++
++#
++# PPS generators support
++#
++
++#
++# PTP clock support
++#
++CONFIG_PTP_1588_CLOCK=y
++
++#
++# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks.
++#
++CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
++CONFIG_GPIOLIB=y
++CONFIG_GPIO_DEVRES=y
++CONFIG_OF_GPIO=y
++CONFIG_GPIOLIB_IRQCHIP=y
++# CONFIG_DEBUG_GPIO is not set
++CONFIG_GPIO_SYSFS=y
++CONFIG_GPIO_GENERIC=m
++CONFIG_GPIO_MAX730X=m
++
++#
++# Memory mapped GPIO drivers
++#
++CONFIG_GPIO_74XX_MMIO=m
++CONFIG_GPIO_ALTERA=m
++CONFIG_GPIO_DWAPB=m
++CONFIG_GPIO_GENERIC_PLATFORM=m
++CONFIG_GPIO_GRGPIO=m
++CONFIG_GPIO_SYSCON=m
++CONFIG_GPIO_VX855=m
++CONFIG_GPIO_XILINX=y
++CONFIG_GPIO_ZX=y
++
++#
++# I2C GPIO expanders
++#
++CONFIG_GPIO_ADP5588=m
++CONFIG_GPIO_ADNP=m
++CONFIG_GPIO_MAX7300=m
++CONFIG_GPIO_MAX732X=m
++CONFIG_GPIO_PCA953X=m
++CONFIG_GPIO_PCF857X=m
++CONFIG_GPIO_SX150X=y
++
++#
++# MFD GPIO expanders
++#
++CONFIG_GPIO_ADP5520=m
++CONFIG_GPIO_ARIZONA=m
++CONFIG_GPIO_CRYSTAL_COVE=m
++CONFIG_GPIO_DA9052=m
++CONFIG_GPIO_DA9055=m
++CONFIG_GPIO_DLN2=m
++CONFIG_GPIO_JANZ_TTL=m
++CONFIG_GPIO_KEMPLD=m
++CONFIG_GPIO_LP3943=m
++CONFIG_GPIO_PALMAS=y
++CONFIG_GPIO_RC5T583=y
++CONFIG_GPIO_STMPE=y
++CONFIG_GPIO_TC3589X=y
++CONFIG_GPIO_TPS6586X=y
++CONFIG_GPIO_TPS65910=y
++CONFIG_GPIO_TPS65912=m
++CONFIG_GPIO_TWL4030=m
++CONFIG_GPIO_TWL6040=m
++CONFIG_GPIO_UCB1400=m
++CONFIG_GPIO_WM831X=m
++CONFIG_GPIO_WM8350=m
++CONFIG_GPIO_WM8994=m
++
++#
++# PCI GPIO expanders
++#
++CONFIG_GPIO_AMD8111=m
++CONFIG_GPIO_ML_IOH=m
++CONFIG_GPIO_RDC321X=m
++
++#
++# SPI GPIO expanders
++#
++CONFIG_GPIO_74X164=m
++CONFIG_GPIO_MAX7301=m
++CONFIG_GPIO_MC33880=m
++
++#
++# SPI or I2C GPIO expanders
++#
++CONFIG_GPIO_MCP23S08=m
++
++#
++# USB GPIO expanders
++#
++CONFIG_GPIO_VIPERBOARD=m
++CONFIG_W1=m
++CONFIG_W1_CON=y
++
++#
++# 1-wire Bus Masters
++#
++CONFIG_W1_MASTER_MATROX=m
++CONFIG_W1_MASTER_DS2490=m
++CONFIG_W1_MASTER_DS2482=m
++CONFIG_W1_MASTER_DS1WM=m
++CONFIG_W1_MASTER_GPIO=m
++
++#
++# 1-wire Slaves
++#
++CONFIG_W1_SLAVE_THERM=m
++CONFIG_W1_SLAVE_SMEM=m
++CONFIG_W1_SLAVE_DS2408=m
++CONFIG_W1_SLAVE_DS2408_READBACK=y
++CONFIG_W1_SLAVE_DS2413=m
++CONFIG_W1_SLAVE_DS2406=m
++CONFIG_W1_SLAVE_DS2423=m
++CONFIG_W1_SLAVE_DS2431=m
++CONFIG_W1_SLAVE_DS2433=m
++# CONFIG_W1_SLAVE_DS2433_CRC is not set
++CONFIG_W1_SLAVE_DS2760=m
++CONFIG_W1_SLAVE_DS2780=m
++CONFIG_W1_SLAVE_DS2781=m
++CONFIG_W1_SLAVE_DS28E04=m
++CONFIG_W1_SLAVE_BQ27000=m
++CONFIG_POWER_SUPPLY=y
++# CONFIG_POWER_SUPPLY_DEBUG is not set
++CONFIG_PDA_POWER=m
++CONFIG_GENERIC_ADC_BATTERY=m
++CONFIG_MAX8925_POWER=m
++CONFIG_WM831X_BACKUP=m
++CONFIG_WM831X_POWER=m
++CONFIG_WM8350_POWER=m
++CONFIG_TEST_POWER=m
++CONFIG_BATTERY_88PM860X=m
++CONFIG_BATTERY_DS2760=m
++CONFIG_BATTERY_DS2780=m
++CONFIG_BATTERY_DS2781=m
++CONFIG_BATTERY_DS2782=m
++CONFIG_BATTERY_SBS=m
++CONFIG_BATTERY_BQ27XXX=m
++CONFIG_BATTERY_BQ27XXX_I2C=y
++CONFIG_BATTERY_BQ27XXX_PLATFORM=y
++CONFIG_BATTERY_DA9030=m
++CONFIG_BATTERY_DA9052=m
++CONFIG_CHARGER_DA9150=m
++CONFIG_BATTERY_DA9150=m
++CONFIG_AXP288_CHARGER=m
++CONFIG_AXP288_FUEL_GAUGE=m
++CONFIG_BATTERY_MAX17040=m
++CONFIG_BATTERY_MAX17042=m
++CONFIG_BATTERY_TWL4030_MADC=m
++CONFIG_CHARGER_88PM860X=m
++CONFIG_CHARGER_PCF50633=m
++CONFIG_BATTERY_RX51=m
++CONFIG_CHARGER_ISP1704=m
++CONFIG_CHARGER_MAX8903=m
++CONFIG_CHARGER_TWL4030=m
++CONFIG_CHARGER_LP8727=m
++CONFIG_CHARGER_LP8788=m
++CONFIG_CHARGER_GPIO=m
++CONFIG_CHARGER_MANAGER=y
++CONFIG_CHARGER_MAX14577=m
++CONFIG_CHARGER_MAX77693=m
++CONFIG_CHARGER_MAX8997=m
++CONFIG_CHARGER_MAX8998=m
++CONFIG_CHARGER_BQ2415X=m
++CONFIG_CHARGER_BQ24190=m
++CONFIG_CHARGER_BQ24257=m
++CONFIG_CHARGER_BQ24735=m
++CONFIG_CHARGER_BQ25890=m
++CONFIG_CHARGER_SMB347=m
++CONFIG_CHARGER_TPS65090=m
++CONFIG_CHARGER_TPS65217=m
++CONFIG_BATTERY_GAUGE_LTC2941=m
++CONFIG_BATTERY_RT5033=m
++CONFIG_CHARGER_RT9455=m
++CONFIG_AXP20X_POWER=m
++CONFIG_POWER_RESET=y
++CONFIG_POWER_RESET_AS3722=y
++CONFIG_POWER_RESET_GPIO=y
++CONFIG_POWER_RESET_GPIO_RESTART=y
++CONFIG_POWER_RESET_LTC2952=y
++CONFIG_POWER_RESET_RESTART=y
++CONFIG_POWER_RESET_SYSCON=y
++CONFIG_POWER_RESET_SYSCON_POWEROFF=y
++CONFIG_POWER_AVS=y
++CONFIG_HWMON=y
++CONFIG_HWMON_VID=m
++# CONFIG_HWMON_DEBUG_CHIP is not set
++
++#
++# Native drivers
++#
++CONFIG_SENSORS_AD7314=m
++CONFIG_SENSORS_AD7414=m
++CONFIG_SENSORS_AD7418=m
++CONFIG_SENSORS_ADM1021=m
++CONFIG_SENSORS_ADM1025=m
++CONFIG_SENSORS_ADM1026=m
++CONFIG_SENSORS_ADM1029=m
++CONFIG_SENSORS_ADM1031=m
++CONFIG_SENSORS_ADM9240=m
++CONFIG_SENSORS_ADT7X10=m
++CONFIG_SENSORS_ADT7310=m
++CONFIG_SENSORS_ADT7410=m
++CONFIG_SENSORS_ADT7411=m
++CONFIG_SENSORS_ADT7462=m
++CONFIG_SENSORS_ADT7470=m
++CONFIG_SENSORS_ADT7475=m
++CONFIG_SENSORS_ASC7621=m
++CONFIG_SENSORS_ATXP1=m
++CONFIG_SENSORS_DS620=m
++CONFIG_SENSORS_DS1621=m
++CONFIG_SENSORS_DA9052_ADC=m
++CONFIG_SENSORS_DA9055=m
++CONFIG_SENSORS_I5K_AMB=m
++CONFIG_SENSORS_F75375S=m
++CONFIG_SENSORS_MC13783_ADC=m
++CONFIG_SENSORS_GL518SM=m
++CONFIG_SENSORS_GL520SM=m
++CONFIG_SENSORS_G760A=m
++CONFIG_SENSORS_G762=m
++CONFIG_SENSORS_GPIO_FAN=m
++CONFIG_SENSORS_HIH6130=m
++CONFIG_SENSORS_IBMAEM=m
++CONFIG_SENSORS_IBMPEX=m
++CONFIG_SENSORS_IBMPOWERNV=m
++CONFIG_SENSORS_IIO_HWMON=m
++CONFIG_SENSORS_JC42=m
++CONFIG_SENSORS_POWR1220=m
++CONFIG_SENSORS_LINEAGE=m
++CONFIG_SENSORS_LTC2945=m
++CONFIG_SENSORS_LTC4151=m
++CONFIG_SENSORS_LTC4215=m
++CONFIG_SENSORS_LTC4222=m
++CONFIG_SENSORS_LTC4245=m
++CONFIG_SENSORS_LTC4260=m
++CONFIG_SENSORS_LTC4261=m
++CONFIG_SENSORS_MAX1111=m
++CONFIG_SENSORS_MAX16065=m
++CONFIG_SENSORS_MAX1619=m
++CONFIG_SENSORS_MAX1668=m
++CONFIG_SENSORS_MAX197=m
++CONFIG_SENSORS_MAX6639=m
++CONFIG_SENSORS_MAX6642=m
++CONFIG_SENSORS_MAX6650=m
++CONFIG_SENSORS_MAX6697=m
++CONFIG_SENSORS_MAX31790=m
++CONFIG_SENSORS_HTU21=m
++CONFIG_SENSORS_MCP3021=m
++CONFIG_SENSORS_MENF21BMC_HWMON=m
++CONFIG_SENSORS_ADCXX=m
++CONFIG_SENSORS_LM63=m
++CONFIG_SENSORS_LM70=m
++CONFIG_SENSORS_LM73=m
++CONFIG_SENSORS_LM75=m
++CONFIG_SENSORS_LM77=m
++CONFIG_SENSORS_LM78=m
++CONFIG_SENSORS_LM80=m
++CONFIG_SENSORS_LM83=m
++CONFIG_SENSORS_LM85=m
++CONFIG_SENSORS_LM87=m
++CONFIG_SENSORS_LM90=m
++CONFIG_SENSORS_LM92=m
++CONFIG_SENSORS_LM93=m
++CONFIG_SENSORS_LM95234=m
++CONFIG_SENSORS_LM95241=m
++CONFIG_SENSORS_LM95245=m
++CONFIG_SENSORS_NTC_THERMISTOR=m
++CONFIG_SENSORS_NCT7802=m
++CONFIG_SENSORS_NCT7904=m
++CONFIG_SENSORS_PCF8591=m
++CONFIG_PMBUS=m
++CONFIG_SENSORS_PMBUS=m
++CONFIG_SENSORS_ADM1275=m
++CONFIG_SENSORS_LM25066=m
++CONFIG_SENSORS_LTC2978=m
++CONFIG_SENSORS_LTC2978_REGULATOR=y
++CONFIG_SENSORS_MAX16064=m
++CONFIG_SENSORS_MAX20751=m
++CONFIG_SENSORS_MAX34440=m
++CONFIG_SENSORS_MAX8688=m
++CONFIG_SENSORS_TPS40422=m
++CONFIG_SENSORS_UCD9000=m
++CONFIG_SENSORS_UCD9200=m
++CONFIG_SENSORS_ZL6100=m
++CONFIG_SENSORS_PWM_FAN=m
++CONFIG_SENSORS_SHT15=m
++CONFIG_SENSORS_SHT21=m
++CONFIG_SENSORS_SHTC1=m
++CONFIG_SENSORS_SIS5595=m
++CONFIG_SENSORS_EMC1403=m
++CONFIG_SENSORS_EMC2103=m
++CONFIG_SENSORS_EMC6W201=m
++CONFIG_SENSORS_SMSC47M192=m
++# CONFIG_SENSORS_SCH56XX_COMMON is not set
++CONFIG_SENSORS_SMM665=m
++CONFIG_SENSORS_ADC128D818=m
++CONFIG_SENSORS_ADS1015=m
++CONFIG_SENSORS_ADS7828=m
++CONFIG_SENSORS_ADS7871=m
++CONFIG_SENSORS_AMC6821=m
++CONFIG_SENSORS_INA209=m
++CONFIG_SENSORS_INA2XX=m
++CONFIG_SENSORS_TC74=m
++CONFIG_SENSORS_THMC50=m
++CONFIG_SENSORS_TMP102=m
++CONFIG_SENSORS_TMP103=m
++CONFIG_SENSORS_TMP401=m
++CONFIG_SENSORS_TMP421=m
++CONFIG_SENSORS_TWL4030_MADC=m
++CONFIG_SENSORS_VIA686A=m
++CONFIG_SENSORS_VT8231=m
++CONFIG_SENSORS_W83781D=m
++CONFIG_SENSORS_W83791D=m
++CONFIG_SENSORS_W83792D=m
++CONFIG_SENSORS_W83793=m
++CONFIG_SENSORS_W83795=m
++# CONFIG_SENSORS_W83795_FANCTRL is not set
++CONFIG_SENSORS_W83L785TS=m
++CONFIG_SENSORS_W83L786NG=m
++CONFIG_SENSORS_WM831X=m
++CONFIG_SENSORS_WM8350=m
++CONFIG_THERMAL=y
++CONFIG_THERMAL_HWMON=y
++CONFIG_THERMAL_OF=y
++CONFIG_THERMAL_WRITABLE_TRIPS=y
++CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
++# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set
++# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
++# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set
++CONFIG_THERMAL_GOV_FAIR_SHARE=y
++CONFIG_THERMAL_GOV_STEP_WISE=y
++CONFIG_THERMAL_GOV_BANG_BANG=y
++CONFIG_THERMAL_GOV_USER_SPACE=y
++CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
++CONFIG_CPU_THERMAL=y
++CONFIG_THERMAL_EMULATION=y
++CONFIG_IMX_THERMAL=m
++CONFIG_QCOM_SPMI_TEMP_ALARM=m
++CONFIG_WATCHDOG=y
++CONFIG_WATCHDOG_CORE=y
++# CONFIG_WATCHDOG_NOWAYOUT is not set
++
++#
++# Watchdog Device Drivers
++#
++CONFIG_SOFT_WATCHDOG=m
++CONFIG_DA9052_WATCHDOG=m
++CONFIG_DA9055_WATCHDOG=m
++CONFIG_DA9063_WATCHDOG=m
++CONFIG_DA9062_WATCHDOG=m
++CONFIG_GPIO_WATCHDOG=m
++CONFIG_MENF21BMC_WATCHDOG=m
++CONFIG_WM831X_WATCHDOG=m
++CONFIG_WM8350_WATCHDOG=m
++CONFIG_XILINX_WATCHDOG=m
++CONFIG_CADENCE_WATCHDOG=m
++CONFIG_DW_WATCHDOG=m
++CONFIG_RN5T618_WATCHDOG=m
++CONFIG_TWL4030_WATCHDOG=m
++CONFIG_MAX63XX_WATCHDOG=m
++CONFIG_RETU_WATCHDOG=m
++CONFIG_ALIM7101_WDT=m
++CONFIG_I6300ESB_WDT=m
++CONFIG_KEMPLD_WDT=m
++CONFIG_BCM7038_WDT=m
++CONFIG_MEN_A21_WDT=m
++CONFIG_WATCHDOG_RTAS=m
++
++#
++# PCI-based Watchdog Cards
++#
++CONFIG_PCIPCWATCHDOG=m
++CONFIG_WDTPCI=m
++
++#
++# USB-based Watchdog Cards
++#
++CONFIG_USBPCWATCHDOG=m
++CONFIG_SSB_POSSIBLE=y
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB=m
++CONFIG_SSB_SPROM=y
++CONFIG_SSB_BLOCKIO=y
++CONFIG_SSB_PCIHOST_POSSIBLE=y
++CONFIG_SSB_PCIHOST=y
++CONFIG_SSB_B43_PCI_BRIDGE=y
++CONFIG_SSB_SDIOHOST_POSSIBLE=y
++CONFIG_SSB_SDIOHOST=y
++CONFIG_SSB_HOST_SOC=y
++# CONFIG_SSB_SILENT is not set
++# CONFIG_SSB_DEBUG is not set
++CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
++CONFIG_SSB_DRIVER_PCICORE=y
++CONFIG_SSB_DRIVER_GPIO=y
++CONFIG_BCMA_POSSIBLE=y
++
++#
++# Broadcom specific AMBA
++#
++CONFIG_BCMA=m
++CONFIG_BCMA_BLOCKIO=y
++CONFIG_BCMA_HOST_PCI_POSSIBLE=y
++CONFIG_BCMA_HOST_PCI=y
++CONFIG_BCMA_HOST_SOC=y
++CONFIG_BCMA_DRIVER_PCI=y
++CONFIG_BCMA_DRIVER_GMAC_CMN=y
++CONFIG_BCMA_DRIVER_GPIO=y
++# CONFIG_BCMA_DEBUG is not set
++
++#
++# Multifunction device drivers
++#
++CONFIG_MFD_CORE=y
++CONFIG_MFD_AS3711=y
++CONFIG_MFD_AS3722=y
++CONFIG_PMIC_ADP5520=y
++CONFIG_MFD_AAT2870_CORE=y
++CONFIG_MFD_ATMEL_FLEXCOM=m
++CONFIG_MFD_ATMEL_HLCDC=m
++CONFIG_MFD_BCM590XX=m
++CONFIG_MFD_AXP20X=y
++CONFIG_PMIC_DA903X=y
++CONFIG_PMIC_DA9052=y
++CONFIG_MFD_DA9052_SPI=y
++CONFIG_MFD_DA9052_I2C=y
++CONFIG_MFD_DA9055=y
++CONFIG_MFD_DA9062=m
++CONFIG_MFD_DA9063=y
++CONFIG_MFD_DA9150=m
++CONFIG_MFD_DLN2=m
++CONFIG_MFD_MC13XXX=m
++CONFIG_MFD_MC13XXX_SPI=m
++CONFIG_MFD_MC13XXX_I2C=m
++CONFIG_MFD_HI6421_PMIC=m
++CONFIG_HTC_PASIC3=m
++CONFIG_HTC_I2CPLD=y
++CONFIG_LPC_ICH=m
++CONFIG_LPC_SCH=m
++CONFIG_INTEL_SOC_PMIC=y
++CONFIG_MFD_JANZ_CMODIO=m
++CONFIG_MFD_KEMPLD=m
++CONFIG_MFD_88PM800=m
++CONFIG_MFD_88PM805=m
++CONFIG_MFD_88PM860X=y
++CONFIG_MFD_MAX14577=y
++CONFIG_MFD_MAX77686=y
++CONFIG_MFD_MAX77693=y
++CONFIG_MFD_MAX77843=y
++CONFIG_MFD_MAX8907=m
++CONFIG_MFD_MAX8925=y
++CONFIG_MFD_MAX8997=y
++CONFIG_MFD_MAX8998=y
++CONFIG_MFD_MT6397=m
++CONFIG_MFD_MENF21BMC=m
++CONFIG_EZX_PCAP=y
++CONFIG_MFD_VIPERBOARD=m
++CONFIG_MFD_RETU=m
++CONFIG_MFD_PCF50633=m
++CONFIG_PCF50633_ADC=m
++CONFIG_PCF50633_GPIO=m
++CONFIG_UCB1400_CORE=m
++CONFIG_MFD_RDC321X=m
++CONFIG_MFD_RTSX_PCI=m
++CONFIG_MFD_RT5033=m
++CONFIG_MFD_RTSX_USB=m
++CONFIG_MFD_RC5T583=y
++CONFIG_MFD_RK808=m
++CONFIG_MFD_RN5T618=m
++CONFIG_MFD_SEC_CORE=y
++CONFIG_MFD_SI476X_CORE=m
++CONFIG_MFD_SM501=m
++CONFIG_MFD_SM501_GPIO=y
++CONFIG_MFD_SKY81452=m
++CONFIG_MFD_SMSC=y
++CONFIG_ABX500_CORE=y
++CONFIG_AB3100_CORE=y
++CONFIG_AB3100_OTP=m
++CONFIG_MFD_STMPE=y
++
++#
++# STMicroelectronics STMPE Interface Drivers
++#
++CONFIG_STMPE_I2C=y
++CONFIG_STMPE_SPI=y
++CONFIG_MFD_SYSCON=y
++CONFIG_MFD_TI_AM335X_TSCADC=m
++CONFIG_MFD_LP3943=m
++CONFIG_MFD_LP8788=y
++CONFIG_MFD_PALMAS=y
++CONFIG_TPS6105X=m
++CONFIG_TPS65010=m
++CONFIG_TPS6507X=m
++CONFIG_MFD_TPS65090=y
++CONFIG_MFD_TPS65217=y
++CONFIG_MFD_TPS65218=m
++CONFIG_MFD_TPS6586X=y
++CONFIG_MFD_TPS65910=y
++CONFIG_MFD_TPS65912=y
++CONFIG_MFD_TPS65912_I2C=y
++CONFIG_MFD_TPS65912_SPI=y
++CONFIG_MFD_TPS80031=y
++CONFIG_TWL4030_CORE=y
++CONFIG_MFD_TWL4030_AUDIO=y
++CONFIG_TWL6040_CORE=y
++CONFIG_MFD_WL1273_CORE=m
++CONFIG_MFD_LM3533=m
++CONFIG_MFD_TC3589X=y
++# CONFIG_MFD_TMIO is not set
++CONFIG_MFD_VX855=m
++CONFIG_MFD_ARIZONA=y
++CONFIG_MFD_ARIZONA_I2C=m
++CONFIG_MFD_ARIZONA_SPI=m
++CONFIG_MFD_WM5102=y
++CONFIG_MFD_WM5110=y
++CONFIG_MFD_WM8997=y
++CONFIG_MFD_WM8998=y
++CONFIG_MFD_WM8400=y
++CONFIG_MFD_WM831X=y
++CONFIG_MFD_WM831X_I2C=y
++CONFIG_MFD_WM831X_SPI=y
++CONFIG_MFD_WM8350=y
++CONFIG_MFD_WM8350_I2C=y
++CONFIG_MFD_WM8994=m
++CONFIG_REGULATOR=y
++# CONFIG_REGULATOR_DEBUG is not set
++CONFIG_REGULATOR_FIXED_VOLTAGE=m
++CONFIG_REGULATOR_VIRTUAL_CONSUMER=m
++CONFIG_REGULATOR_USERSPACE_CONSUMER=m
++CONFIG_REGULATOR_88PM800=m
++CONFIG_REGULATOR_88PM8607=m
++CONFIG_REGULATOR_ACT8865=m
++CONFIG_REGULATOR_AD5398=m
++CONFIG_REGULATOR_ANATOP=m
++CONFIG_REGULATOR_AAT2870=m
++CONFIG_REGULATOR_AB3100=m
++CONFIG_REGULATOR_ARIZONA=m
++CONFIG_REGULATOR_AS3711=m
++CONFIG_REGULATOR_AS3722=m
++CONFIG_REGULATOR_AXP20X=m
++CONFIG_REGULATOR_BCM590XX=m
++CONFIG_REGULATOR_DA903X=m
++CONFIG_REGULATOR_DA9052=m
++CONFIG_REGULATOR_DA9055=m
++CONFIG_REGULATOR_DA9062=m
++CONFIG_REGULATOR_DA9063=m
++CONFIG_REGULATOR_DA9210=m
++CONFIG_REGULATOR_DA9211=m
++CONFIG_REGULATOR_FAN53555=m
++CONFIG_REGULATOR_GPIO=m
++CONFIG_REGULATOR_HI6421=m
++CONFIG_REGULATOR_ISL9305=m
++CONFIG_REGULATOR_ISL6271A=m
++CONFIG_REGULATOR_LP3971=m
++CONFIG_REGULATOR_LP3972=m
++CONFIG_REGULATOR_LP872X=m
++CONFIG_REGULATOR_LP8755=m
++CONFIG_REGULATOR_LP8788=m
++CONFIG_REGULATOR_LTC3589=m
++CONFIG_REGULATOR_MAX14577=m
++CONFIG_REGULATOR_MAX1586=m
++CONFIG_REGULATOR_MAX8649=m
++CONFIG_REGULATOR_MAX8660=m
++CONFIG_REGULATOR_MAX8907=m
++CONFIG_REGULATOR_MAX8925=m
++CONFIG_REGULATOR_MAX8952=m
++CONFIG_REGULATOR_MAX8973=m
++CONFIG_REGULATOR_MAX8997=m
++CONFIG_REGULATOR_MAX8998=m
++CONFIG_REGULATOR_MAX77686=m
++CONFIG_REGULATOR_MAX77693=m
++CONFIG_REGULATOR_MAX77802=m
++CONFIG_REGULATOR_MC13XXX_CORE=m
++CONFIG_REGULATOR_MC13783=m
++CONFIG_REGULATOR_MC13892=m
++CONFIG_REGULATOR_MT6311=m
++CONFIG_REGULATOR_MT6397=m
++CONFIG_REGULATOR_PALMAS=m
++CONFIG_REGULATOR_PCAP=m
++CONFIG_REGULATOR_PCF50633=m
++CONFIG_REGULATOR_PFUZE100=m
++CONFIG_REGULATOR_PWM=m
++CONFIG_REGULATOR_QCOM_SPMI=m
++CONFIG_REGULATOR_RC5T583=m
++CONFIG_REGULATOR_RK808=m
++CONFIG_REGULATOR_RN5T618=m
++CONFIG_REGULATOR_RT5033=m
++CONFIG_REGULATOR_S2MPA01=m
++CONFIG_REGULATOR_S2MPS11=m
++CONFIG_REGULATOR_S5M8767=m
++CONFIG_REGULATOR_SKY81452=m
++CONFIG_REGULATOR_TPS51632=m
++CONFIG_REGULATOR_TPS6105X=m
++CONFIG_REGULATOR_TPS62360=m
++CONFIG_REGULATOR_TPS65023=m
++CONFIG_REGULATOR_TPS6507X=m
++CONFIG_REGULATOR_TPS65090=m
++CONFIG_REGULATOR_TPS65217=y
++CONFIG_REGULATOR_TPS65218=m
++CONFIG_REGULATOR_TPS6524X=m
++CONFIG_REGULATOR_TPS6586X=m
++CONFIG_REGULATOR_TPS65910=m
++CONFIG_REGULATOR_TPS65912=m
++CONFIG_REGULATOR_TPS80031=m
++CONFIG_REGULATOR_TWL4030=m
++CONFIG_REGULATOR_WM831X=m
++CONFIG_REGULATOR_WM8350=m
++CONFIG_REGULATOR_WM8400=m
++CONFIG_REGULATOR_WM8994=m
++CONFIG_MEDIA_SUPPORT=m
++
++#
++# Multimedia core support
++#
++CONFIG_MEDIA_CAMERA_SUPPORT=y
++CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
++CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
++CONFIG_MEDIA_RADIO_SUPPORT=y
++CONFIG_MEDIA_SDR_SUPPORT=y
++CONFIG_MEDIA_RC_SUPPORT=y
++CONFIG_MEDIA_CONTROLLER=y
++CONFIG_VIDEO_DEV=m
++CONFIG_VIDEO_V4L2_SUBDEV_API=y
++CONFIG_VIDEO_V4L2=m
++# CONFIG_VIDEO_ADV_DEBUG is not set
++# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
++CONFIG_VIDEO_TUNER=m
++CONFIG_V4L2_MEM2MEM_DEV=m
++CONFIG_V4L2_FLASH_LED_CLASS=m
++CONFIG_VIDEOBUF_GEN=m
++CONFIG_VIDEOBUF_DMA_SG=m
++CONFIG_VIDEOBUF_VMALLOC=m
++CONFIG_VIDEOBUF_DVB=m
++CONFIG_VIDEOBUF2_CORE=m
++CONFIG_VIDEOBUF2_MEMOPS=m
++CONFIG_VIDEOBUF2_DMA_CONTIG=m
++CONFIG_VIDEOBUF2_VMALLOC=m
++CONFIG_VIDEOBUF2_DMA_SG=m
++CONFIG_VIDEOBUF2_DVB=m
++CONFIG_DVB_CORE=m
++CONFIG_DVB_NET=y
++CONFIG_TTPCI_EEPROM=m
++CONFIG_DVB_MAX_ADAPTERS=8
++CONFIG_DVB_DYNAMIC_MINORS=y
++
++#
++# Media drivers
++#
++CONFIG_RC_CORE=m
++CONFIG_RC_MAP=m
++CONFIG_RC_DECODERS=y
++CONFIG_LIRC=m
++CONFIG_IR_LIRC_CODEC=m
++CONFIG_IR_NEC_DECODER=m
++CONFIG_IR_RC5_DECODER=m
++CONFIG_IR_RC6_DECODER=m
++CONFIG_IR_JVC_DECODER=m
++CONFIG_IR_SONY_DECODER=m
++CONFIG_IR_SANYO_DECODER=m
++CONFIG_IR_SHARP_DECODER=m
++CONFIG_IR_MCE_KBD_DECODER=m
++CONFIG_IR_XMP_DECODER=m
++CONFIG_RC_DEVICES=y
++CONFIG_RC_ATI_REMOTE=m
++CONFIG_IR_HIX5HD2=m
++CONFIG_IR_IMON=m
++CONFIG_IR_MCEUSB=m
++CONFIG_IR_REDRAT3=m
++CONFIG_IR_STREAMZAP=m
++CONFIG_IR_IGORPLUGUSB=m
++CONFIG_IR_IGUANA=m
++CONFIG_IR_TTUSBIR=m
++CONFIG_RC_LOOPBACK=m
++CONFIG_IR_GPIO_CIR=m
++CONFIG_MEDIA_USB_SUPPORT=y
++
++#
++# Webcam devices
++#
++CONFIG_USB_VIDEO_CLASS=m
++CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
++CONFIG_USB_GSPCA=m
++CONFIG_USB_M5602=m
++CONFIG_USB_STV06XX=m
++CONFIG_USB_GL860=m
++CONFIG_USB_GSPCA_BENQ=m
++CONFIG_USB_GSPCA_CONEX=m
++CONFIG_USB_GSPCA_CPIA1=m
++CONFIG_USB_GSPCA_DTCS033=m
++CONFIG_USB_GSPCA_ETOMS=m
++CONFIG_USB_GSPCA_FINEPIX=m
++CONFIG_USB_GSPCA_JEILINJ=m
++CONFIG_USB_GSPCA_JL2005BCD=m
++CONFIG_USB_GSPCA_KINECT=m
++CONFIG_USB_GSPCA_KONICA=m
++CONFIG_USB_GSPCA_MARS=m
++CONFIG_USB_GSPCA_MR97310A=m
++CONFIG_USB_GSPCA_NW80X=m
++CONFIG_USB_GSPCA_OV519=m
++CONFIG_USB_GSPCA_OV534=m
++CONFIG_USB_GSPCA_OV534_9=m
++CONFIG_USB_GSPCA_PAC207=m
++CONFIG_USB_GSPCA_PAC7302=m
++CONFIG_USB_GSPCA_PAC7311=m
++CONFIG_USB_GSPCA_SE401=m
++CONFIG_USB_GSPCA_SN9C2028=m
++CONFIG_USB_GSPCA_SN9C20X=m
++CONFIG_USB_GSPCA_SONIXB=m
++CONFIG_USB_GSPCA_SONIXJ=m
++CONFIG_USB_GSPCA_SPCA500=m
++CONFIG_USB_GSPCA_SPCA501=m
++CONFIG_USB_GSPCA_SPCA505=m
++CONFIG_USB_GSPCA_SPCA506=m
++CONFIG_USB_GSPCA_SPCA508=m
++CONFIG_USB_GSPCA_SPCA561=m
++CONFIG_USB_GSPCA_SPCA1528=m
++CONFIG_USB_GSPCA_SQ905=m
++CONFIG_USB_GSPCA_SQ905C=m
++CONFIG_USB_GSPCA_SQ930X=m
++CONFIG_USB_GSPCA_STK014=m
++CONFIG_USB_GSPCA_STK1135=m
++CONFIG_USB_GSPCA_STV0680=m
++CONFIG_USB_GSPCA_SUNPLUS=m
++CONFIG_USB_GSPCA_T613=m
++CONFIG_USB_GSPCA_TOPRO=m
++CONFIG_USB_GSPCA_TOUPTEK=m
++CONFIG_USB_GSPCA_TV8532=m
++CONFIG_USB_GSPCA_VC032X=m
++CONFIG_USB_GSPCA_VICAM=m
++CONFIG_USB_GSPCA_XIRLINK_CIT=m
++CONFIG_USB_GSPCA_ZC3XX=m
++CONFIG_USB_PWC=m
++# CONFIG_USB_PWC_DEBUG is not set
++CONFIG_USB_PWC_INPUT_EVDEV=y
++CONFIG_VIDEO_CPIA2=m
++CONFIG_USB_ZR364XX=m
++CONFIG_USB_STKWEBCAM=m
++CONFIG_USB_S2255=m
++CONFIG_VIDEO_USBTV=m
++
++#
++# Analog TV USB devices
++#
++CONFIG_VIDEO_PVRUSB2=m
++CONFIG_VIDEO_PVRUSB2_SYSFS=y
++CONFIG_VIDEO_PVRUSB2_DVB=y
++# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
++CONFIG_VIDEO_HDPVR=m
++CONFIG_VIDEO_USBVISION=m
++CONFIG_VIDEO_STK1160_COMMON=m
++CONFIG_VIDEO_STK1160_AC97=y
++CONFIG_VIDEO_STK1160=m
++CONFIG_VIDEO_GO7007=m
++CONFIG_VIDEO_GO7007_USB=m
++CONFIG_VIDEO_GO7007_LOADER=m
++CONFIG_VIDEO_GO7007_USB_S2250_BOARD=m
++
++#
++# Analog/digital TV USB devices
++#
++CONFIG_VIDEO_AU0828=m
++CONFIG_VIDEO_AU0828_V4L2=y
++CONFIG_VIDEO_AU0828_RC=y
++CONFIG_VIDEO_CX231XX=m
++CONFIG_VIDEO_CX231XX_RC=y
++CONFIG_VIDEO_CX231XX_ALSA=m
++CONFIG_VIDEO_CX231XX_DVB=m
++CONFIG_VIDEO_TM6000=m
++CONFIG_VIDEO_TM6000_ALSA=m
++CONFIG_VIDEO_TM6000_DVB=m
++
++#
++# Digital TV USB devices
++#
++CONFIG_DVB_USB=m
++# CONFIG_DVB_USB_DEBUG is not set
++CONFIG_DVB_USB_A800=m
++CONFIG_DVB_USB_DIBUSB_MB=m
++# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set
++CONFIG_DVB_USB_DIBUSB_MC=m
++CONFIG_DVB_USB_DIB0700=m
++CONFIG_DVB_USB_UMT_010=m
++CONFIG_DVB_USB_CXUSB=m
++CONFIG_DVB_USB_M920X=m
++CONFIG_DVB_USB_DIGITV=m
++CONFIG_DVB_USB_VP7045=m
++CONFIG_DVB_USB_VP702X=m
++CONFIG_DVB_USB_GP8PSK=m
++CONFIG_DVB_USB_NOVA_T_USB2=m
++CONFIG_DVB_USB_TTUSB2=m
++CONFIG_DVB_USB_DTT200U=m
++CONFIG_DVB_USB_OPERA1=m
++CONFIG_DVB_USB_AF9005=m
++CONFIG_DVB_USB_AF9005_REMOTE=m
++CONFIG_DVB_USB_PCTV452E=m
++CONFIG_DVB_USB_DW2102=m
++CONFIG_DVB_USB_CINERGY_T2=m
++CONFIG_DVB_USB_DTV5100=m
++CONFIG_DVB_USB_FRIIO=m
++CONFIG_DVB_USB_AZ6027=m
++CONFIG_DVB_USB_TECHNISAT_USB2=m
++CONFIG_DVB_USB_V2=m
++CONFIG_DVB_USB_AF9015=m
++CONFIG_DVB_USB_AF9035=m
++CONFIG_DVB_USB_ANYSEE=m
++CONFIG_DVB_USB_AU6610=m
++CONFIG_DVB_USB_AZ6007=m
++CONFIG_DVB_USB_CE6230=m
++CONFIG_DVB_USB_EC168=m
++CONFIG_DVB_USB_GL861=m
++CONFIG_DVB_USB_LME2510=m
++CONFIG_DVB_USB_MXL111SF=m
++CONFIG_DVB_USB_RTL28XXU=m
++CONFIG_DVB_USB_DVBSKY=m
++CONFIG_DVB_TTUSB_BUDGET=m
++CONFIG_DVB_TTUSB_DEC=m
++CONFIG_SMS_USB_DRV=m
++CONFIG_DVB_B2C2_FLEXCOP_USB=m
++# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set
++CONFIG_DVB_AS102=m
++
++#
++# Webcam, TV (analog/digital) USB devices
++#
++CONFIG_VIDEO_EM28XX=m
++CONFIG_VIDEO_EM28XX_V4L2=m
++CONFIG_VIDEO_EM28XX_ALSA=m
++CONFIG_VIDEO_EM28XX_DVB=m
++CONFIG_VIDEO_EM28XX_RC=m
++
++#
++# Software defined radio USB devices
++#
++CONFIG_USB_AIRSPY=m
++CONFIG_USB_HACKRF=m
++CONFIG_USB_MSI2500=m
++CONFIG_MEDIA_PCI_SUPPORT=y
++
++#
++# Media capture support
++#
++CONFIG_VIDEO_SOLO6X10=m
++CONFIG_VIDEO_TW68=m
++
++#
++# Media capture/analog TV support
++#
++CONFIG_VIDEO_IVTV=m
++CONFIG_VIDEO_IVTV_ALSA=m
++CONFIG_VIDEO_FB_IVTV=m
++CONFIG_VIDEO_HEXIUM_GEMINI=m
++CONFIG_VIDEO_HEXIUM_ORION=m
++CONFIG_VIDEO_MXB=m
++CONFIG_VIDEO_DT3155=m
++
++#
++# Media capture/analog/hybrid TV support
++#
++CONFIG_VIDEO_CX18=m
++CONFIG_VIDEO_CX18_ALSA=m
++CONFIG_VIDEO_CX23885=m
++CONFIG_MEDIA_ALTERA_CI=m
++CONFIG_VIDEO_CX25821=m
++CONFIG_VIDEO_CX25821_ALSA=m
++CONFIG_VIDEO_CX88=m
++CONFIG_VIDEO_CX88_ALSA=m
++CONFIG_VIDEO_CX88_BLACKBIRD=m
++CONFIG_VIDEO_CX88_DVB=m
++CONFIG_VIDEO_CX88_ENABLE_VP3054=y
++CONFIG_VIDEO_CX88_VP3054=m
++CONFIG_VIDEO_CX88_MPEG=m
++CONFIG_VIDEO_BT848=m
++CONFIG_DVB_BT8XX=m
++CONFIG_VIDEO_SAA7134=m
++CONFIG_VIDEO_SAA7134_ALSA=m
++CONFIG_VIDEO_SAA7134_RC=y
++CONFIG_VIDEO_SAA7134_DVB=m
++CONFIG_VIDEO_SAA7134_GO7007=m
++CONFIG_VIDEO_SAA7164=m
++CONFIG_VIDEO_COBALT=m
++
++#
++# Media digital TV PCI Adapters
++#
++CONFIG_DVB_AV7110_IR=y
++CONFIG_DVB_AV7110=m
++CONFIG_DVB_AV7110_OSD=y
++CONFIG_DVB_BUDGET_CORE=m
++CONFIG_DVB_BUDGET=m
++CONFIG_DVB_BUDGET_CI=m
++CONFIG_DVB_BUDGET_AV=m
++CONFIG_DVB_BUDGET_PATCH=m
++CONFIG_DVB_B2C2_FLEXCOP_PCI=m
++# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set
++CONFIG_DVB_PLUTO2=m
++CONFIG_DVB_DM1105=m
++CONFIG_DVB_PT1=m
++CONFIG_DVB_PT3=m
++CONFIG_MANTIS_CORE=m
++CONFIG_DVB_MANTIS=m
++CONFIG_DVB_HOPPER=m
++CONFIG_DVB_NGENE=m
++CONFIG_DVB_DDBRIDGE=m
++CONFIG_DVB_SMIPCIE=m
++CONFIG_DVB_NETUP_UNIDVB=m
++CONFIG_V4L_PLATFORM_DRIVERS=y
++CONFIG_VIDEO_CAFE_CCIC=m
++CONFIG_SOC_CAMERA=m
++CONFIG_SOC_CAMERA_PLATFORM=m
++CONFIG_VIDEO_XILINX=m
++CONFIG_VIDEO_XILINX_TPG=m
++CONFIG_VIDEO_XILINX_VTC=m
++CONFIG_V4L_MEM2MEM_DRIVERS=y
++CONFIG_VIDEO_MEM2MEM_DEINTERLACE=m
++CONFIG_VIDEO_SH_VEU=m
++CONFIG_V4L_TEST_DRIVERS=y
++CONFIG_VIDEO_VIVID=m
++CONFIG_VIDEO_VIVID_MAX_DEVS=64
++CONFIG_VIDEO_VIM2M=m
++CONFIG_DVB_PLATFORM_DRIVERS=y
++
++#
++# Supported MMC/SDIO adapters
++#
++CONFIG_SMS_SDIO_DRV=m
++CONFIG_RADIO_ADAPTERS=y
++CONFIG_RADIO_TEA575X=m
++CONFIG_RADIO_SI470X=y
++CONFIG_USB_SI470X=m
++CONFIG_I2C_SI470X=m
++CONFIG_RADIO_SI4713=m
++CONFIG_USB_SI4713=m
++CONFIG_PLATFORM_SI4713=m
++CONFIG_I2C_SI4713=m
++CONFIG_RADIO_SI476X=m
++CONFIG_USB_MR800=m
++CONFIG_USB_DSBR=m
++CONFIG_RADIO_MAXIRADIO=m
++CONFIG_RADIO_SHARK=m
++CONFIG_RADIO_SHARK2=m
++CONFIG_USB_KEENE=m
++CONFIG_USB_RAREMONO=m
++CONFIG_USB_MA901=m
++CONFIG_RADIO_TEA5764=m
++CONFIG_RADIO_SAA7706H=m
++CONFIG_RADIO_TEF6862=m
++CONFIG_RADIO_WL1273=m
++
++#
++# Texas Instruments WL128x FM driver (ST based)
++#
++CONFIG_RADIO_WL128X=m
++
++#
++# Supported FireWire (IEEE 1394) Adapters
++#
++CONFIG_DVB_FIREDTV=m
++CONFIG_DVB_FIREDTV_INPUT=y
++CONFIG_MEDIA_COMMON_OPTIONS=y
++
++#
++# common driver options
++#
++CONFIG_VIDEO_CX2341X=m
++CONFIG_VIDEO_TVEEPROM=m
++CONFIG_CYPRESS_FIRMWARE=m
++CONFIG_DVB_B2C2_FLEXCOP=m
++CONFIG_VIDEO_SAA7146=m
++CONFIG_VIDEO_SAA7146_VV=m
++CONFIG_SMS_SIANO_MDTV=m
++CONFIG_SMS_SIANO_RC=y
++CONFIG_SMS_SIANO_DEBUGFS=y
++
++#
++# Media ancillary drivers (tuners, sensors, i2c, frontends)
++#
++CONFIG_MEDIA_SUBDRV_AUTOSELECT=y
++CONFIG_MEDIA_ATTACH=y
++CONFIG_VIDEO_IR_I2C=m
++
++#
++# Audio decoders, processors and mixers
++#
++CONFIG_VIDEO_TVAUDIO=m
++CONFIG_VIDEO_TDA7432=m
++CONFIG_VIDEO_TDA9840=m
++CONFIG_VIDEO_TEA6415C=m
++CONFIG_VIDEO_TEA6420=m
++CONFIG_VIDEO_MSP3400=m
++CONFIG_VIDEO_CS5345=m
++CONFIG_VIDEO_CS53L32A=m
++CONFIG_VIDEO_UDA1342=m
++CONFIG_VIDEO_WM8775=m
++CONFIG_VIDEO_WM8739=m
++CONFIG_VIDEO_VP27SMPX=m
++CONFIG_VIDEO_SONY_BTF_MPX=m
++
++#
++# RDS decoders
++#
++CONFIG_VIDEO_SAA6588=m
++
++#
++# Video decoders
++#
++CONFIG_VIDEO_ADV7604=m
++CONFIG_VIDEO_ADV7842=m
++CONFIG_VIDEO_SAA711X=m
++CONFIG_VIDEO_TVP5150=m
++CONFIG_VIDEO_TW2804=m
++CONFIG_VIDEO_TW9903=m
++CONFIG_VIDEO_TW9906=m
++
++#
++# Video and audio decoders
++#
++CONFIG_VIDEO_SAA717X=m
++CONFIG_VIDEO_CX25840=m
++
++#
++# Video encoders
++#
++CONFIG_VIDEO_SAA7127=m
++CONFIG_VIDEO_ADV7511=m
++
++#
++# Camera sensor devices
++#
++CONFIG_VIDEO_OV7640=m
++CONFIG_VIDEO_OV7670=m
++CONFIG_VIDEO_MT9V011=m
++
++#
++# Flash devices
++#
++
++#
++# Video improvement chips
++#
++CONFIG_VIDEO_UPD64031A=m
++CONFIG_VIDEO_UPD64083=m
++
++#
++# Audio/Video compression chips
++#
++CONFIG_VIDEO_SAA6752HS=m
++
++#
++# Miscellaneous helper chips
++#
++CONFIG_VIDEO_M52790=m
++
++#
++# Sensors used on soc_camera driver
++#
++
++#
++# soc_camera sensor drivers
++#
++CONFIG_SOC_CAMERA_IMX074=m
++CONFIG_SOC_CAMERA_MT9M001=m
++CONFIG_SOC_CAMERA_MT9M111=m
++CONFIG_SOC_CAMERA_MT9T031=m
++CONFIG_SOC_CAMERA_MT9T112=m
++CONFIG_SOC_CAMERA_MT9V022=m
++CONFIG_SOC_CAMERA_OV2640=m
++CONFIG_SOC_CAMERA_OV5642=m
++CONFIG_SOC_CAMERA_OV6650=m
++CONFIG_SOC_CAMERA_OV772X=m
++CONFIG_SOC_CAMERA_OV9640=m
++CONFIG_SOC_CAMERA_OV9740=m
++CONFIG_SOC_CAMERA_RJ54N1=m
++CONFIG_SOC_CAMERA_TW9910=m
++CONFIG_MEDIA_TUNER=m
++CONFIG_MEDIA_TUNER_SIMPLE=m
++CONFIG_MEDIA_TUNER_TDA8290=m
++CONFIG_MEDIA_TUNER_TDA827X=m
++CONFIG_MEDIA_TUNER_TDA18271=m
++CONFIG_MEDIA_TUNER_TDA9887=m
++CONFIG_MEDIA_TUNER_TEA5761=m
++CONFIG_MEDIA_TUNER_TEA5767=m
++CONFIG_MEDIA_TUNER_MSI001=m
++CONFIG_MEDIA_TUNER_MT20XX=m
++CONFIG_MEDIA_TUNER_MT2060=m
++CONFIG_MEDIA_TUNER_MT2063=m
++CONFIG_MEDIA_TUNER_MT2266=m
++CONFIG_MEDIA_TUNER_MT2131=m
++CONFIG_MEDIA_TUNER_QT1010=m
++CONFIG_MEDIA_TUNER_XC2028=m
++CONFIG_MEDIA_TUNER_XC5000=m
++CONFIG_MEDIA_TUNER_XC4000=m
++CONFIG_MEDIA_TUNER_MXL5005S=m
++CONFIG_MEDIA_TUNER_MXL5007T=m
++CONFIG_MEDIA_TUNER_MC44S803=m
++CONFIG_MEDIA_TUNER_MAX2165=m
++CONFIG_MEDIA_TUNER_TDA18218=m
++CONFIG_MEDIA_TUNER_FC0011=m
++CONFIG_MEDIA_TUNER_FC0012=m
++CONFIG_MEDIA_TUNER_FC0013=m
++CONFIG_MEDIA_TUNER_TDA18212=m
++CONFIG_MEDIA_TUNER_E4000=m
++CONFIG_MEDIA_TUNER_FC2580=m
++CONFIG_MEDIA_TUNER_M88RS6000T=m
++CONFIG_MEDIA_TUNER_TUA9001=m
++CONFIG_MEDIA_TUNER_SI2157=m
++CONFIG_MEDIA_TUNER_IT913X=m
++CONFIG_MEDIA_TUNER_R820T=m
++CONFIG_MEDIA_TUNER_MXL301RF=m
++CONFIG_MEDIA_TUNER_QM1D1C0042=m
++
++#
++# Multistandard (satellite) frontends
++#
++CONFIG_DVB_STB0899=m
++CONFIG_DVB_STB6100=m
++CONFIG_DVB_STV090x=m
++CONFIG_DVB_STV6110x=m
++CONFIG_DVB_M88DS3103=m
++
++#
++# Multistandard (cable + terrestrial) frontends
++#
++CONFIG_DVB_DRXK=m
++CONFIG_DVB_TDA18271C2DD=m
++CONFIG_DVB_SI2165=m
++
++#
++# DVB-S (satellite) frontends
++#
++CONFIG_DVB_CX24110=m
++CONFIG_DVB_CX24123=m
++CONFIG_DVB_MT312=m
++CONFIG_DVB_ZL10036=m
++CONFIG_DVB_ZL10039=m
++CONFIG_DVB_S5H1420=m
++CONFIG_DVB_STV0288=m
++CONFIG_DVB_STB6000=m
++CONFIG_DVB_STV0299=m
++CONFIG_DVB_STV6110=m
++CONFIG_DVB_STV0900=m
++CONFIG_DVB_TDA8083=m
++CONFIG_DVB_TDA10086=m
++CONFIG_DVB_TDA8261=m
++CONFIG_DVB_VES1X93=m
++CONFIG_DVB_TUNER_ITD1000=m
++CONFIG_DVB_TUNER_CX24113=m
++CONFIG_DVB_TDA826X=m
++CONFIG_DVB_TUA6100=m
++CONFIG_DVB_CX24116=m
++CONFIG_DVB_CX24117=m
++CONFIG_DVB_CX24120=m
++CONFIG_DVB_SI21XX=m
++CONFIG_DVB_TS2020=m
++CONFIG_DVB_DS3000=m
++CONFIG_DVB_MB86A16=m
++CONFIG_DVB_TDA10071=m
++
++#
++# DVB-T (terrestrial) frontends
++#
++CONFIG_DVB_SP8870=m
++CONFIG_DVB_SP887X=m
++CONFIG_DVB_CX22700=m
++CONFIG_DVB_CX22702=m
++CONFIG_DVB_DRXD=m
++CONFIG_DVB_L64781=m
++CONFIG_DVB_TDA1004X=m
++CONFIG_DVB_NXT6000=m
++CONFIG_DVB_MT352=m
++CONFIG_DVB_ZL10353=m
++CONFIG_DVB_DIB3000MB=m
++CONFIG_DVB_DIB3000MC=m
++CONFIG_DVB_DIB7000M=m
++CONFIG_DVB_DIB7000P=m
++CONFIG_DVB_TDA10048=m
++CONFIG_DVB_AF9013=m
++CONFIG_DVB_EC100=m
++CONFIG_DVB_STV0367=m
++CONFIG_DVB_CXD2820R=m
++CONFIG_DVB_CXD2841ER=m
++CONFIG_DVB_RTL2830=m
++CONFIG_DVB_RTL2832=m
++CONFIG_DVB_RTL2832_SDR=m
++CONFIG_DVB_SI2168=m
++CONFIG_DVB_AS102_FE=m
++
++#
++# DVB-C (cable) frontends
++#
++CONFIG_DVB_VES1820=m
++CONFIG_DVB_TDA10021=m
++CONFIG_DVB_TDA10023=m
++CONFIG_DVB_STV0297=m
++
++#
++# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
++#
++CONFIG_DVB_NXT200X=m
++CONFIG_DVB_OR51211=m
++CONFIG_DVB_OR51132=m
++CONFIG_DVB_BCM3510=m
++CONFIG_DVB_LGDT330X=m
++CONFIG_DVB_LGDT3305=m
++CONFIG_DVB_LGDT3306A=m
++CONFIG_DVB_LG2160=m
++CONFIG_DVB_S5H1409=m
++CONFIG_DVB_AU8522=m
++CONFIG_DVB_AU8522_DTV=m
++CONFIG_DVB_AU8522_V4L=m
++CONFIG_DVB_S5H1411=m
++
++#
++# ISDB-T (terrestrial) frontends
++#
++CONFIG_DVB_S921=m
++CONFIG_DVB_DIB8000=m
++CONFIG_DVB_MB86A20S=m
++
++#
++# ISDB-S (satellite) & ISDB-T (terrestrial) frontends
++#
++CONFIG_DVB_TC90522=m
++
++#
++# Digital terrestrial only tuners/PLL
++#
++CONFIG_DVB_PLL=m
++CONFIG_DVB_TUNER_DIB0070=m
++CONFIG_DVB_TUNER_DIB0090=m
++
++#
++# SEC control devices for DVB-S
++#
++CONFIG_DVB_DRX39XYJ=m
++CONFIG_DVB_LNBH25=m
++CONFIG_DVB_LNBP21=m
++CONFIG_DVB_LNBP22=m
++CONFIG_DVB_ISL6405=m
++CONFIG_DVB_ISL6421=m
++CONFIG_DVB_ISL6423=m
++CONFIG_DVB_A8293=m
++CONFIG_DVB_SP2=m
++CONFIG_DVB_LGS8GXX=m
++CONFIG_DVB_ATBM8830=m
++CONFIG_DVB_TDA665x=m
++CONFIG_DVB_IX2505V=m
++CONFIG_DVB_M88RS2000=m
++CONFIG_DVB_AF9033=m
++CONFIG_DVB_HORUS3A=m
++CONFIG_DVB_ASCOT2E=m
++
++#
++# Tools to develop new frontends
++#
++# CONFIG_DVB_DUMMY_FE is not set
++
++#
++# Graphics support
++#
++CONFIG_AGP=y
++CONFIG_VGA_ARB=y
++CONFIG_VGA_ARB_MAX_GPUS=16
++CONFIG_DRM=m
++CONFIG_DRM_MIPI_DSI=y
++CONFIG_DRM_KMS_HELPER=m
++CONFIG_DRM_KMS_FB_HELPER=y
++CONFIG_DRM_FBDEV_EMULATION=y
++CONFIG_DRM_LOAD_EDID_FIRMWARE=y
++CONFIG_DRM_TTM=m
++
++#
++# I2C encoder or helper chips
++#
++CONFIG_DRM_I2C_ADV7511=m
++CONFIG_DRM_I2C_CH7006=m
++CONFIG_DRM_I2C_SIL164=m
++CONFIG_DRM_I2C_NXP_TDA998X=m
++CONFIG_DRM_TDFX=m
++CONFIG_DRM_R128=m
++CONFIG_DRM_RADEON=m
++# CONFIG_DRM_RADEON_USERPTR is not set
++CONFIG_DRM_AMDGPU=m
++CONFIG_DRM_AMDGPU_CIK=y
++CONFIG_DRM_AMDGPU_USERPTR=y
++# CONFIG_DRM_AMDGPU_GART_DEBUGFS is not set
++CONFIG_DRM_AMD_POWERPLAY=y
++
++#
++# ACP (Audio CoProcessor) Configuration
++#
++# CONFIG_DRM_AMD_ACP is not set
++
++#
++# Display Engine Configuration
++#
++# CONFIG_DRM_AMD_DAL is not set
++CONFIG_DRM_NOUVEAU=m
++CONFIG_NOUVEAU_DEBUG=5
++CONFIG_NOUVEAU_DEBUG_DEFAULT=3
++CONFIG_DRM_NOUVEAU_BACKLIGHT=y
++CONFIG_DRM_MGA=m
++CONFIG_DRM_SIS=m
++CONFIG_DRM_VIA=m
++CONFIG_DRM_SAVAGE=m
++CONFIG_DRM_VGEM=m
++CONFIG_DRM_UDL=m
++CONFIG_DRM_AST=m
++# CONFIG_DRM_MGAG200 is not set
++CONFIG_DRM_CIRRUS_QEMU=m
++CONFIG_DRM_QXL=m
++# CONFIG_DRM_BOCHS is not set
++CONFIG_DRM_VIRTIO_GPU=m
++CONFIG_DRM_PANEL=y
++
++#
++# Display Panels
++#
++CONFIG_DRM_PANEL_SIMPLE=m
++CONFIG_DRM_PANEL_SAMSUNG_LD9040=m
++CONFIG_DRM_PANEL_LG_LG4573=m
++CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=m
++CONFIG_DRM_PANEL_SHARP_LQ101R1SX01=m
++CONFIG_DRM_BRIDGE=y
++
++#
++# Display Interface Bridges
++#
++CONFIG_DRM_NXP_PTN3460=m
++CONFIG_DRM_PARADE_PS8622=m
++CONFIG_HSA_AMD=m
++
++#
++# Frame buffer Devices
++#
++CONFIG_FB=y
++CONFIG_FIRMWARE_EDID=y
++CONFIG_FB_CMDLINE=y
++CONFIG_FB_DDC=m
++# CONFIG_FB_BOOT_VESA_SUPPORT is not set
++CONFIG_FB_CFB_FILLRECT=y
++CONFIG_FB_CFB_COPYAREA=y
++CONFIG_FB_CFB_IMAGEBLIT=y
++# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
++CONFIG_FB_SYS_FILLRECT=m
++CONFIG_FB_SYS_COPYAREA=m
++CONFIG_FB_SYS_IMAGEBLIT=m
++# CONFIG_FB_FOREIGN_ENDIAN is not set
++CONFIG_FB_SYS_FOPS=m
++CONFIG_FB_DEFERRED_IO=y
++CONFIG_FB_SVGALIB=m
++CONFIG_FB_MACMODES=y
++CONFIG_FB_BACKLIGHT=y
++CONFIG_FB_MODE_HELPERS=y
++CONFIG_FB_TILEBLITTING=y
++
++#
++# Frame buffer hardware drivers
++#
++CONFIG_FB_CIRRUS=m
++CONFIG_FB_PM2=m
++CONFIG_FB_PM2_FIFO_DISCONNECT=y
++CONFIG_FB_CYBER2000=m
++CONFIG_FB_CYBER2000_DDC=y
++CONFIG_FB_OF=y
++CONFIG_FB_ASILIANT=y
++CONFIG_FB_IMSTT=y
++CONFIG_FB_VGA16=m
++CONFIG_FB_UVESA=m
++CONFIG_FB_OPENCORES=m
++CONFIG_FB_S1D13XXX=m
++CONFIG_FB_NVIDIA=m
++CONFIG_FB_NVIDIA_I2C=y
++# CONFIG_FB_NVIDIA_DEBUG is not set
++CONFIG_FB_NVIDIA_BACKLIGHT=y
++CONFIG_FB_RIVA=m
++CONFIG_FB_RIVA_I2C=y
++# CONFIG_FB_RIVA_DEBUG is not set
++CONFIG_FB_RIVA_BACKLIGHT=y
++CONFIG_FB_I740=m
++CONFIG_FB_MATROX=m
++CONFIG_FB_MATROX_MILLENIUM=y
++CONFIG_FB_MATROX_MYSTIQUE=y
++CONFIG_FB_MATROX_G=y
++CONFIG_FB_MATROX_I2C=m
++CONFIG_FB_MATROX_MAVEN=m
++CONFIG_FB_RADEON=m
++CONFIG_FB_RADEON_I2C=y
++CONFIG_FB_RADEON_BACKLIGHT=y
++# CONFIG_FB_RADEON_DEBUG is not set
++CONFIG_FB_ATY128=m
++CONFIG_FB_ATY128_BACKLIGHT=y
++CONFIG_FB_ATY=m
++CONFIG_FB_ATY_CT=y
++# CONFIG_FB_ATY_GENERIC_LCD is not set
++CONFIG_FB_ATY_GX=y
++CONFIG_FB_ATY_BACKLIGHT=y
++CONFIG_FB_S3=m
++CONFIG_FB_S3_DDC=y
++CONFIG_FB_SAVAGE=m
++CONFIG_FB_SAVAGE_I2C=y
++# CONFIG_FB_SAVAGE_ACCEL is not set
++CONFIG_FB_SIS=m
++CONFIG_FB_SIS_300=y
++CONFIG_FB_SIS_315=y
++CONFIG_FB_NEOMAGIC=m
++CONFIG_FB_KYRO=m
++CONFIG_FB_3DFX=m
++# CONFIG_FB_3DFX_ACCEL is not set
++# CONFIG_FB_3DFX_I2C is not set
++CONFIG_FB_VOODOO1=m
++CONFIG_FB_VT8623=m
++CONFIG_FB_TRIDENT=m
++CONFIG_FB_ARK=m
++CONFIG_FB_PM3=m
++CONFIG_FB_CARMINE=m
++CONFIG_FB_CARMINE_DRAM_EVAL=y
++# CONFIG_CARMINE_DRAM_CUSTOM is not set
++CONFIG_FB_SM501=m
++CONFIG_FB_SMSCUFX=m
++CONFIG_FB_UDL=m
++CONFIG_FB_IBM_GXT4500=y
++# CONFIG_FB_VIRTUAL is not set
++CONFIG_FB_METRONOME=m
++CONFIG_FB_MB862XX=m
++CONFIG_FB_MB862XX_PCI_GDC=y
++# CONFIG_FB_MB862XX_LIME is not set
++CONFIG_FB_MB862XX_I2C=y
++CONFIG_FB_BROADSHEET=m
++CONFIG_FB_AUO_K190X=m
++CONFIG_FB_AUO_K1900=m
++CONFIG_FB_AUO_K1901=m
++CONFIG_FB_SIMPLE=y
++CONFIG_FB_SSD1307=m
++CONFIG_FB_SM712=m
++CONFIG_BACKLIGHT_LCD_SUPPORT=y
++CONFIG_LCD_CLASS_DEVICE=m
++CONFIG_LCD_L4F00242T03=m
++CONFIG_LCD_LMS283GF05=m
++CONFIG_LCD_LTV350QV=m
++CONFIG_LCD_ILI922X=m
++CONFIG_LCD_ILI9320=m
++CONFIG_LCD_TDO24M=m
++CONFIG_LCD_VGG2432A4=m
++CONFIG_LCD_PLATFORM=m
++CONFIG_LCD_S6E63M0=m
++CONFIG_LCD_LD9040=m
++CONFIG_LCD_AMS369FG06=m
++CONFIG_LCD_LMS501KF03=m
++CONFIG_LCD_HX8357=m
++CONFIG_BACKLIGHT_CLASS_DEVICE=y
++CONFIG_BACKLIGHT_GENERIC=m
++CONFIG_BACKLIGHT_LM3533=m
++CONFIG_BACKLIGHT_PWM=m
++CONFIG_BACKLIGHT_DA903X=m
++CONFIG_BACKLIGHT_DA9052=m
++CONFIG_BACKLIGHT_MAX8925=m
++CONFIG_BACKLIGHT_PM8941_WLED=m
++CONFIG_BACKLIGHT_WM831X=m
++CONFIG_BACKLIGHT_ADP5520=m
++CONFIG_BACKLIGHT_ADP8860=m
++CONFIG_BACKLIGHT_ADP8870=m
++CONFIG_BACKLIGHT_88PM860X=m
++CONFIG_BACKLIGHT_PCF50633=m
++CONFIG_BACKLIGHT_AAT2870=m
++CONFIG_BACKLIGHT_LM3630A=m
++CONFIG_BACKLIGHT_LM3639=m
++CONFIG_BACKLIGHT_LP855X=m
++CONFIG_BACKLIGHT_LP8788=m
++CONFIG_BACKLIGHT_PANDORA=m
++CONFIG_BACKLIGHT_SKY81452=m
++CONFIG_BACKLIGHT_TPS65217=m
++CONFIG_BACKLIGHT_AS3711=m
++CONFIG_BACKLIGHT_GPIO=m
++CONFIG_BACKLIGHT_LV5207LP=m
++CONFIG_BACKLIGHT_BD6107=m
++CONFIG_VGASTATE=m
++CONFIG_VIDEOMODE_HELPERS=y
++CONFIG_HDMI=y
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++# CONFIG_VGACON_SOFT_SCROLLBACK is not set
++CONFIG_DUMMY_CONSOLE=y
++CONFIG_DUMMY_CONSOLE_COLUMNS=80
++CONFIG_DUMMY_CONSOLE_ROWS=25
++CONFIG_FRAMEBUFFER_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
++CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
++# CONFIG_LOGO is not set
++CONFIG_SOUND=m
++CONFIG_SOUND_OSS_CORE=y
++# CONFIG_SOUND_OSS_CORE_PRECLAIM is not set
++CONFIG_SND=m
++CONFIG_SND_TIMER=m
++CONFIG_SND_PCM=m
++CONFIG_SND_DMAENGINE_PCM=m
++CONFIG_SND_HWDEP=m
++CONFIG_SND_RAWMIDI=m
++CONFIG_SND_JACK=y
++CONFIG_SND_SEQUENCER=m
++CONFIG_SND_SEQ_DUMMY=m
++CONFIG_SND_OSSEMUL=y
++CONFIG_SND_MIXER_OSS=m
++CONFIG_SND_PCM_OSS=m
++CONFIG_SND_PCM_OSS_PLUGINS=y
++CONFIG_SND_PCM_TIMER=y
++# CONFIG_SND_SEQUENCER_OSS is not set
++CONFIG_SND_HRTIMER=m
++CONFIG_SND_SEQ_HRTIMER_DEFAULT=y
++CONFIG_SND_DYNAMIC_MINORS=y
++CONFIG_SND_MAX_CARDS=32
++CONFIG_SND_SUPPORT_OLD_API=y
++CONFIG_SND_PROC_FS=y
++CONFIG_SND_VERBOSE_PROCFS=y
++# CONFIG_SND_VERBOSE_PRINTK is not set
++# CONFIG_SND_DEBUG is not set
++CONFIG_SND_VMASTER=y
++CONFIG_SND_RAWMIDI_SEQ=m
++CONFIG_SND_OPL3_LIB_SEQ=m
++# CONFIG_SND_OPL4_LIB_SEQ is not set
++# CONFIG_SND_SBAWE_SEQ is not set
++CONFIG_SND_EMU10K1_SEQ=m
++CONFIG_SND_MPU401_UART=m
++CONFIG_SND_OPL3_LIB=m
++CONFIG_SND_VX_LIB=m
++CONFIG_SND_AC97_CODEC=m
++CONFIG_SND_DRIVERS=y
++CONFIG_SND_DUMMY=m
++CONFIG_SND_ALOOP=m
++CONFIG_SND_VIRMIDI=m
++CONFIG_SND_MTPAV=m
++CONFIG_SND_MTS64=m
++CONFIG_SND_SERIAL_U16550=m
++CONFIG_SND_MPU401=m
++CONFIG_SND_PORTMAN2X4=m
++CONFIG_SND_AC97_POWER_SAVE=y
++CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0
++CONFIG_SND_SB_COMMON=m
++CONFIG_SND_PCI=y
++CONFIG_SND_AD1889=m
++CONFIG_SND_ALS300=m
++CONFIG_SND_ALS4000=m
++CONFIG_SND_ALI5451=m
++CONFIG_SND_ATIIXP=m
++CONFIG_SND_ATIIXP_MODEM=m
++CONFIG_SND_AU8810=m
++CONFIG_SND_AU8820=m
++CONFIG_SND_AU8830=m
++CONFIG_SND_AW2=m
++CONFIG_SND_AZT3328=m
++CONFIG_SND_BT87X=m
++# CONFIG_SND_BT87X_OVERCLOCK is not set
++CONFIG_SND_CA0106=m
++CONFIG_SND_CMIPCI=m
++CONFIG_SND_OXYGEN_LIB=m
++CONFIG_SND_OXYGEN=m
++CONFIG_SND_CS4281=m
++CONFIG_SND_CS46XX=m
++CONFIG_SND_CS46XX_NEW_DSP=y
++CONFIG_SND_CTXFI=m
++CONFIG_SND_DARLA20=m
++CONFIG_SND_GINA20=m
++CONFIG_SND_LAYLA20=m
++CONFIG_SND_DARLA24=m
++CONFIG_SND_GINA24=m
++CONFIG_SND_LAYLA24=m
++CONFIG_SND_MONA=m
++CONFIG_SND_MIA=m
++CONFIG_SND_ECHO3G=m
++CONFIG_SND_INDIGO=m
++CONFIG_SND_INDIGOIO=m
++CONFIG_SND_INDIGODJ=m
++CONFIG_SND_INDIGOIOX=m
++CONFIG_SND_INDIGODJX=m
++CONFIG_SND_EMU10K1=m
++CONFIG_SND_EMU10K1X=m
++CONFIG_SND_ENS1370=m
++CONFIG_SND_ENS1371=m
++CONFIG_SND_ES1938=m
++CONFIG_SND_ES1968=m
++CONFIG_SND_ES1968_INPUT=y
++CONFIG_SND_ES1968_RADIO=y
++CONFIG_SND_FM801=m
++CONFIG_SND_FM801_TEA575X_BOOL=y
++CONFIG_SND_HDSP=m
++CONFIG_SND_HDSPM=m
++CONFIG_SND_ICE1712=m
++CONFIG_SND_ICE1724=m
++CONFIG_SND_INTEL8X0=m
++CONFIG_SND_INTEL8X0M=m
++CONFIG_SND_KORG1212=m
++CONFIG_SND_LOLA=m
++CONFIG_SND_LX6464ES=m
++CONFIG_SND_MAESTRO3=m
++CONFIG_SND_MAESTRO3_INPUT=y
++CONFIG_SND_MIXART=m
++CONFIG_SND_NM256=m
++CONFIG_SND_PCXHR=m
++CONFIG_SND_RIPTIDE=m
++CONFIG_SND_RME32=m
++CONFIG_SND_RME96=m
++CONFIG_SND_RME9652=m
++CONFIG_SND_SONICVIBES=m
++CONFIG_SND_TRIDENT=m
++CONFIG_SND_VIA82XX=m
++CONFIG_SND_VIA82XX_MODEM=m
++CONFIG_SND_VIRTUOSO=m
++CONFIG_SND_VX222=m
++CONFIG_SND_YMFPCI=m
++
++#
++# HD-Audio
++#
++CONFIG_SND_HDA=m
++CONFIG_SND_HDA_INTEL=m
++CONFIG_SND_HDA_HWDEP=y
++CONFIG_SND_HDA_RECONFIG=y
++CONFIG_SND_HDA_INPUT_BEEP=y
++CONFIG_SND_HDA_INPUT_BEEP_MODE=0
++CONFIG_SND_HDA_PATCH_LOADER=y
++CONFIG_SND_HDA_CODEC_REALTEK=m
++CONFIG_SND_HDA_CODEC_ANALOG=m
++CONFIG_SND_HDA_CODEC_SIGMATEL=m
++CONFIG_SND_HDA_CODEC_VIA=m
++CONFIG_SND_HDA_CODEC_HDMI=m
++CONFIG_SND_HDA_CODEC_CIRRUS=m
++CONFIG_SND_HDA_CODEC_CONEXANT=m
++CONFIG_SND_HDA_CODEC_CA0110=m
++CONFIG_SND_HDA_CODEC_CA0132=m
++CONFIG_SND_HDA_CODEC_CA0132_DSP=y
++CONFIG_SND_HDA_CODEC_CMEDIA=m
++CONFIG_SND_HDA_CODEC_SI3054=m
++CONFIG_SND_HDA_GENERIC=m
++CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0
++CONFIG_SND_HDA_CORE=m
++CONFIG_SND_HDA_DSP_LOADER=y
++CONFIG_SND_HDA_PREALLOC_SIZE=64
++CONFIG_SND_PPC=y
++CONFIG_SND_SPI=y
++CONFIG_SND_USB=y
++CONFIG_SND_USB_AUDIO=m
++CONFIG_SND_USB_UA101=m
++CONFIG_SND_USB_USX2Y=m
++CONFIG_SND_USB_CAIAQ=m
++CONFIG_SND_USB_CAIAQ_INPUT=y
++CONFIG_SND_USB_6FIRE=m
++CONFIG_SND_USB_HIFACE=m
++CONFIG_SND_BCD2000=m
++CONFIG_SND_USB_LINE6=m
++CONFIG_SND_USB_POD=m
++CONFIG_SND_USB_PODHD=m
++CONFIG_SND_USB_TONEPORT=m
++CONFIG_SND_USB_VARIAX=m
++CONFIG_SND_FIREWIRE=y
++CONFIG_SND_FIREWIRE_LIB=m
++CONFIG_SND_DICE=m
++CONFIG_SND_OXFW=m
++CONFIG_SND_ISIGHT=m
++CONFIG_SND_SCS1X=m
++CONFIG_SND_FIREWORKS=m
++CONFIG_SND_BEBOB=m
++CONFIG_SND_FIREWIRE_DIGI00X=m
++CONFIG_SND_FIREWIRE_TASCAM=m
++CONFIG_SND_SOC=m
++CONFIG_SND_SOC_AC97_BUS=y
++CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM=y
++# CONFIG_SND_SOC_AMD_ACP is not set
++CONFIG_SND_ATMEL_SOC=m
++
++#
++# SoC Audio for Freescale CPUs
++#
++
++#
++# Common SoC Audio options for Freescale CPUs:
++#
++CONFIG_SND_SOC_FSL_ASRC=m
++CONFIG_SND_SOC_FSL_SAI=m
++CONFIG_SND_SOC_FSL_SSI=m
++CONFIG_SND_SOC_FSL_SPDIF=m
++CONFIG_SND_SOC_FSL_ESAI=m
++CONFIG_SND_SOC_IMX_AUDMUX=m
++
++#
++# Allwinner SoC Audio support
++#
++CONFIG_SND_SUN4I_CODEC=m
++CONFIG_SND_SOC_XTFPGA_I2S=m
++CONFIG_SND_SOC_I2C_AND_SPI=m
++
++#
++# CODEC drivers
++#
++CONFIG_SND_SOC_AC97_CODEC=m
++CONFIG_SND_SOC_ADAU1701=m
++CONFIG_SND_SOC_AK4104=m
++CONFIG_SND_SOC_AK4554=m
++CONFIG_SND_SOC_AK4613=m
++CONFIG_SND_SOC_AK4642=m
++CONFIG_SND_SOC_AK5386=m
++CONFIG_SND_SOC_ALC5623=m
++CONFIG_SND_SOC_CS35L32=m
++CONFIG_SND_SOC_CS42L51=m
++CONFIG_SND_SOC_CS42L51_I2C=m
++CONFIG_SND_SOC_CS42L52=m
++CONFIG_SND_SOC_CS42L56=m
++CONFIG_SND_SOC_CS42L73=m
++CONFIG_SND_SOC_CS4265=m
++CONFIG_SND_SOC_CS4270=m
++CONFIG_SND_SOC_CS4271=m
++CONFIG_SND_SOC_CS4271_I2C=m
++CONFIG_SND_SOC_CS4271_SPI=m
++CONFIG_SND_SOC_CS42XX8=m
++CONFIG_SND_SOC_CS42XX8_I2C=m
++CONFIG_SND_SOC_CS4349=m
++CONFIG_SND_SOC_ES8328=m
++CONFIG_SND_SOC_GTM601=m
++CONFIG_SND_SOC_PCM1681=m
++CONFIG_SND_SOC_PCM1792A=m
++CONFIG_SND_SOC_PCM512x=m
++CONFIG_SND_SOC_PCM512x_I2C=m
++CONFIG_SND_SOC_PCM512x_SPI=m
++CONFIG_SND_SOC_RT5631=m
++# CONFIG_SND_SOC_RT5677_SPI is not set
++CONFIG_SND_SOC_SGTL5000=m
++CONFIG_SND_SOC_SI476X=m
++CONFIG_SND_SOC_SIGMADSP=m
++CONFIG_SND_SOC_SIGMADSP_I2C=m
++CONFIG_SND_SOC_SIRF_AUDIO_CODEC=m
++CONFIG_SND_SOC_SPDIF=m
++CONFIG_SND_SOC_SSM2602=m
++CONFIG_SND_SOC_SSM2602_SPI=m
++CONFIG_SND_SOC_SSM2602_I2C=m
++CONFIG_SND_SOC_SSM4567=m
++CONFIG_SND_SOC_STA32X=m
++CONFIG_SND_SOC_STA350=m
++CONFIG_SND_SOC_STI_SAS=m
++CONFIG_SND_SOC_TAS2552=m
++CONFIG_SND_SOC_TAS5086=m
++CONFIG_SND_SOC_TAS571X=m
++CONFIG_SND_SOC_TFA9879=m
++CONFIG_SND_SOC_TLV320AIC23=m
++CONFIG_SND_SOC_TLV320AIC23_I2C=m
++CONFIG_SND_SOC_TLV320AIC23_SPI=m
++CONFIG_SND_SOC_TLV320AIC31XX=m
++CONFIG_SND_SOC_TLV320AIC3X=m
++CONFIG_SND_SOC_TS3A227E=m
++CONFIG_SND_SOC_WM8510=m
++CONFIG_SND_SOC_WM8523=m
++CONFIG_SND_SOC_WM8580=m
++CONFIG_SND_SOC_WM8711=m
++CONFIG_SND_SOC_WM8728=m
++CONFIG_SND_SOC_WM8731=m
++CONFIG_SND_SOC_WM8737=m
++CONFIG_SND_SOC_WM8741=m
++CONFIG_SND_SOC_WM8750=m
++CONFIG_SND_SOC_WM8753=m
++CONFIG_SND_SOC_WM8770=m
++CONFIG_SND_SOC_WM8776=m
++CONFIG_SND_SOC_WM8804=m
++CONFIG_SND_SOC_WM8804_I2C=m
++CONFIG_SND_SOC_WM8804_SPI=m
++CONFIG_SND_SOC_WM8903=m
++CONFIG_SND_SOC_WM8962=m
++CONFIG_SND_SOC_WM8978=m
++CONFIG_SND_SOC_TPA6130A2=m
++CONFIG_SND_SIMPLE_CARD=m
++# CONFIG_SOUND_PRIME is not set
++CONFIG_AC97_BUS=m
++
++#
++# HID support
++#
++CONFIG_HID=m
++CONFIG_HID_BATTERY_STRENGTH=y
++CONFIG_HIDRAW=y
++CONFIG_UHID=m
++CONFIG_HID_GENERIC=m
++
++#
++# Special HID drivers
++#
++CONFIG_HID_A4TECH=m
++CONFIG_HID_ACRUX=m
++CONFIG_HID_ACRUX_FF=y
++CONFIG_HID_APPLE=m
++CONFIG_HID_APPLEIR=m
++CONFIG_HID_AUREAL=m
++CONFIG_HID_BELKIN=m
++CONFIG_HID_BETOP_FF=m
++CONFIG_HID_CHERRY=m
++CONFIG_HID_CHICONY=m
++CONFIG_HID_CORSAIR=m
++CONFIG_HID_PRODIKEYS=m
++CONFIG_HID_CP2112=m
++CONFIG_HID_CYPRESS=m
++CONFIG_HID_DRAGONRISE=m
++CONFIG_DRAGONRISE_FF=y
++CONFIG_HID_EMS_FF=m
++CONFIG_HID_ELECOM=m
++CONFIG_HID_ELO=m
++CONFIG_HID_EZKEY=m
++CONFIG_HID_GEMBIRD=m
++CONFIG_HID_GFRM=m
++CONFIG_HID_HOLTEK=m
++CONFIG_HOLTEK_FF=y
++CONFIG_HID_GT683R=m
++CONFIG_HID_KEYTOUCH=m
++CONFIG_HID_KYE=m
++CONFIG_HID_UCLOGIC=m
++CONFIG_HID_WALTOP=m
++CONFIG_HID_GYRATION=m
++CONFIG_HID_ICADE=m
++CONFIG_HID_TWINHAN=m
++CONFIG_HID_KENSINGTON=m
++CONFIG_HID_LCPOWER=m
++CONFIG_HID_LENOVO=m
++CONFIG_HID_LOGITECH=m
++CONFIG_HID_LOGITECH_DJ=m
++CONFIG_HID_LOGITECH_HIDPP=m
++CONFIG_LOGITECH_FF=y
++CONFIG_LOGIRUMBLEPAD2_FF=y
++CONFIG_LOGIG940_FF=y
++CONFIG_LOGIWHEELS_FF=y
++CONFIG_HID_MAGICMOUSE=m
++CONFIG_HID_MICROSOFT=m
++CONFIG_HID_MONTEREY=m
++CONFIG_HID_MULTITOUCH=m
++CONFIG_HID_NTRIG=m
++CONFIG_HID_ORTEK=m
++CONFIG_HID_PANTHERLORD=m
++CONFIG_PANTHERLORD_FF=y
++CONFIG_HID_PENMOUNT=m
++CONFIG_HID_PETALYNX=m
++CONFIG_HID_PICOLCD=m
++CONFIG_HID_PICOLCD_FB=y
++CONFIG_HID_PICOLCD_BACKLIGHT=y
++CONFIG_HID_PICOLCD_LCD=y
++CONFIG_HID_PICOLCD_LEDS=y
++CONFIG_HID_PICOLCD_CIR=y
++CONFIG_HID_PLANTRONICS=m
++CONFIG_HID_PRIMAX=m
++CONFIG_HID_ROCCAT=m
++CONFIG_HID_SAITEK=m
++CONFIG_HID_SAMSUNG=m
++CONFIG_HID_SONY=m
++CONFIG_SONY_FF=y
++CONFIG_HID_SPEEDLINK=m
++CONFIG_HID_STEELSERIES=m
++CONFIG_HID_SUNPLUS=m
++CONFIG_HID_RMI=m
++CONFIG_HID_GREENASIA=m
++CONFIG_GREENASIA_FF=y
++CONFIG_HID_SMARTJOYPLUS=m
++CONFIG_SMARTJOYPLUS_FF=y
++CONFIG_HID_TIVO=m
++CONFIG_HID_TOPSEED=m
++CONFIG_HID_THINGM=m
++CONFIG_HID_THRUSTMASTER=m
++CONFIG_THRUSTMASTER_FF=y
++CONFIG_HID_WACOM=m
++CONFIG_HID_WIIMOTE=m
++CONFIG_HID_XINMO=m
++CONFIG_HID_ZEROPLUS=m
++CONFIG_ZEROPLUS_FF=y
++CONFIG_HID_ZYDACRON=m
++CONFIG_HID_SENSOR_HUB=m
++CONFIG_HID_SENSOR_CUSTOM_SENSOR=m
++
++#
++# USB HID support
++#
++CONFIG_USB_HID=m
++CONFIG_HID_PID=y
++CONFIG_USB_HIDDEV=y
++
++#
++# USB HID Boot Protocol drivers
++#
++CONFIG_USB_KBD=m
++CONFIG_USB_MOUSE=m
++
++#
++# I2C HID support
++#
++CONFIG_I2C_HID=m
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_SUPPORT=y
++CONFIG_USB_COMMON=y
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB=y
++CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
++
++#
++# Miscellaneous USB options
++#
++CONFIG_USB_DEFAULT_PERSIST=y
++CONFIG_USB_DYNAMIC_MINORS=y
++# CONFIG_USB_OTG is not set
++# CONFIG_USB_OTG_WHITELIST is not set
++# CONFIG_USB_OTG_BLACKLIST_HUB is not set
++CONFIG_USB_ULPI_BUS=m
++CONFIG_USB_MON=m
++CONFIG_USB_WUSB=m
++CONFIG_USB_WUSB_CBAF=m
++# CONFIG_USB_WUSB_CBAF_DEBUG is not set
++
++#
++# USB Host Controller Drivers
++#
++CONFIG_USB_C67X00_HCD=m
++CONFIG_USB_XHCI_HCD=y
++CONFIG_USB_XHCI_PCI=y
++CONFIG_USB_XHCI_PLATFORM=m
++CONFIG_USB_EHCI_HCD=y
++CONFIG_USB_EHCI_ROOT_HUB_TT=y
++CONFIG_USB_EHCI_TT_NEWSCHED=y
++CONFIG_USB_EHCI_PCI=y
++CONFIG_USB_EHCI_HCD_PPC_OF=y
++CONFIG_USB_EHCI_HCD_PLATFORM=m
++CONFIG_USB_OXU210HP_HCD=m
++CONFIG_USB_ISP116X_HCD=m
++CONFIG_USB_ISP1362_HCD=m
++CONFIG_USB_FOTG210_HCD=m
++CONFIG_USB_MAX3421_HCD=m
++CONFIG_USB_OHCI_HCD=y
++# CONFIG_USB_OHCI_HCD_PPC_OF_BE is not set
++# CONFIG_USB_OHCI_HCD_PPC_OF_LE is not set
++# CONFIG_USB_OHCI_HCD_PPC_OF is not set
++CONFIG_USB_OHCI_HCD_PCI=y
++CONFIG_USB_OHCI_HCD_PLATFORM=m
++CONFIG_USB_UHCI_HCD=y
++CONFIG_USB_U132_HCD=m
++CONFIG_USB_SL811_HCD=m
++CONFIG_USB_SL811_HCD_ISO=y
++CONFIG_USB_R8A66597_HCD=m
++CONFIG_USB_WHCI_HCD=m
++CONFIG_USB_HWA_HCD=m
++CONFIG_USB_HCD_BCMA=m
++CONFIG_USB_HCD_SSB=m
++# CONFIG_USB_HCD_TEST_MODE is not set
++
++#
++# USB Device Class drivers
++#
++CONFIG_USB_ACM=m
++CONFIG_USB_PRINTER=m
++CONFIG_USB_WDM=m
++CONFIG_USB_TMC=m
++
++#
++# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
++#
++
++#
++# also be needed; see USB_STORAGE Help for more info
++#
++CONFIG_USB_STORAGE=m
++# CONFIG_USB_STORAGE_DEBUG is not set
++CONFIG_USB_STORAGE_REALTEK=m
++CONFIG_REALTEK_AUTOPM=y
++CONFIG_USB_STORAGE_DATAFAB=m
++CONFIG_USB_STORAGE_FREECOM=m
++CONFIG_USB_STORAGE_ISD200=m
++CONFIG_USB_STORAGE_USBAT=m
++CONFIG_USB_STORAGE_SDDR09=m
++CONFIG_USB_STORAGE_SDDR55=m
++CONFIG_USB_STORAGE_JUMPSHOT=m
++CONFIG_USB_STORAGE_ALAUDA=m
++CONFIG_USB_STORAGE_ONETOUCH=m
++CONFIG_USB_STORAGE_KARMA=m
++CONFIG_USB_STORAGE_CYPRESS_ATACB=m
++CONFIG_USB_STORAGE_ENE_UB6250=m
++CONFIG_USB_UAS=m
++
++#
++# USB Imaging devices
++#
++CONFIG_USB_MDC800=m
++CONFIG_USB_MICROTEK=m
++CONFIG_USBIP_CORE=m
++CONFIG_USBIP_VHCI_HCD=m
++CONFIG_USBIP_HOST=m
++# CONFIG_USBIP_DEBUG is not set
++CONFIG_USB_MUSB_HDRC=m
++# CONFIG_USB_MUSB_HOST is not set
++# CONFIG_USB_MUSB_GADGET is not set
++CONFIG_USB_MUSB_DUAL_ROLE=y
++
++#
++# Platform Glue Layer
++#
++
++#
++# MUSB DMA mode
++#
++CONFIG_MUSB_PIO_ONLY=y
++CONFIG_USB_DWC3=m
++CONFIG_USB_DWC3_ULPI=y
++# CONFIG_USB_DWC3_HOST is not set
++# CONFIG_USB_DWC3_GADGET is not set
++CONFIG_USB_DWC3_DUAL_ROLE=y
++
++#
++# Platform Glue Driver Support
++#
++CONFIG_USB_DWC3_PCI=m
++CONFIG_USB_DWC2=y
++CONFIG_USB_DWC2_HOST=y
++
++#
++# Gadget/Dual-role mode requires USB Gadget support to be enabled
++#
++CONFIG_USB_DWC2_PCI=y
++# CONFIG_USB_DWC2_DEBUG is not set
++# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set
++CONFIG_USB_CHIPIDEA=m
++CONFIG_USB_CHIPIDEA_OF=m
++CONFIG_USB_CHIPIDEA_PCI=m
++CONFIG_USB_CHIPIDEA_UDC=y
++CONFIG_USB_CHIPIDEA_HOST=y
++# CONFIG_USB_CHIPIDEA_DEBUG is not set
++CONFIG_USB_ISP1760=m
++CONFIG_USB_ISP1760_HCD=y
++CONFIG_USB_ISP1761_UDC=y
++# CONFIG_USB_ISP1760_HOST_ROLE is not set
++# CONFIG_USB_ISP1760_GADGET_ROLE is not set
++CONFIG_USB_ISP1760_DUAL_ROLE=y
++
++#
++# USB port drivers
++#
++CONFIG_USB_USS720=m
++CONFIG_USB_SERIAL=m
++CONFIG_USB_SERIAL_GENERIC=y
++CONFIG_USB_SERIAL_SIMPLE=m
++CONFIG_USB_SERIAL_AIRCABLE=m
++CONFIG_USB_SERIAL_ARK3116=m
++CONFIG_USB_SERIAL_BELKIN=m
++CONFIG_USB_SERIAL_CH341=m
++CONFIG_USB_SERIAL_WHITEHEAT=m
++CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
++CONFIG_USB_SERIAL_CP210X=m
++CONFIG_USB_SERIAL_CYPRESS_M8=m
++CONFIG_USB_SERIAL_EMPEG=m
++CONFIG_USB_SERIAL_FTDI_SIO=m
++CONFIG_USB_SERIAL_VISOR=m
++CONFIG_USB_SERIAL_IPAQ=m
++CONFIG_USB_SERIAL_IR=m
++CONFIG_USB_SERIAL_EDGEPORT=m
++CONFIG_USB_SERIAL_EDGEPORT_TI=m
++CONFIG_USB_SERIAL_F81232=m
++CONFIG_USB_SERIAL_GARMIN=m
++CONFIG_USB_SERIAL_IPW=m
++CONFIG_USB_SERIAL_IUU=m
++CONFIG_USB_SERIAL_KEYSPAN_PDA=m
++CONFIG_USB_SERIAL_KEYSPAN=m
++CONFIG_USB_SERIAL_KEYSPAN_MPR=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19=y
++CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
++CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
++CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
++CONFIG_USB_SERIAL_KLSI=m
++CONFIG_USB_SERIAL_KOBIL_SCT=m
++CONFIG_USB_SERIAL_MCT_U232=m
++CONFIG_USB_SERIAL_METRO=m
++CONFIG_USB_SERIAL_MOS7720=m
++CONFIG_USB_SERIAL_MOS7715_PARPORT=y
++CONFIG_USB_SERIAL_MOS7840=m
++CONFIG_USB_SERIAL_MXUPORT=m
++CONFIG_USB_SERIAL_NAVMAN=m
++CONFIG_USB_SERIAL_PL2303=m
++CONFIG_USB_SERIAL_OTI6858=m
++CONFIG_USB_SERIAL_QCAUX=m
++CONFIG_USB_SERIAL_QUALCOMM=m
++CONFIG_USB_SERIAL_SPCP8X5=m
++CONFIG_USB_SERIAL_SAFE=m
++# CONFIG_USB_SERIAL_SAFE_PADDED is not set
++CONFIG_USB_SERIAL_SIERRAWIRELESS=m
++CONFIG_USB_SERIAL_SYMBOL=m
++CONFIG_USB_SERIAL_TI=m
++CONFIG_USB_SERIAL_CYBERJACK=m
++CONFIG_USB_SERIAL_XIRCOM=m
++CONFIG_USB_SERIAL_WWAN=m
++CONFIG_USB_SERIAL_OPTION=m
++CONFIG_USB_SERIAL_OMNINET=m
++CONFIG_USB_SERIAL_OPTICON=m
++CONFIG_USB_SERIAL_XSENS_MT=m
++CONFIG_USB_SERIAL_WISHBONE=m
++CONFIG_USB_SERIAL_SSU100=m
++CONFIG_USB_SERIAL_QT2=m
++CONFIG_USB_SERIAL_DEBUG=m
++
++#
++# USB Miscellaneous drivers
++#
++CONFIG_USB_EMI62=m
++CONFIG_USB_EMI26=m
++CONFIG_USB_ADUTUX=m
++CONFIG_USB_SEVSEG=m
++CONFIG_USB_RIO500=m
++CONFIG_USB_LEGOTOWER=m
++CONFIG_USB_LCD=m
++CONFIG_USB_LED=m
++CONFIG_USB_CYPRESS_CY7C63=m
++CONFIG_USB_CYTHERM=m
++CONFIG_USB_IDMOUSE=m
++CONFIG_USB_FTDI_ELAN=m
++CONFIG_USB_APPLEDISPLAY=m
++CONFIG_USB_SISUSBVGA=m
++# CONFIG_USB_SISUSBVGA_CON is not set
++CONFIG_USB_LD=m
++CONFIG_USB_TRANCEVIBRATOR=m
++CONFIG_USB_IOWARRIOR=m
++CONFIG_USB_TEST=m
++CONFIG_USB_EHSET_TEST_FIXTURE=m
++CONFIG_USB_ISIGHTFW=m
++CONFIG_USB_YUREX=m
++CONFIG_USB_EZUSB_FX2=m
++CONFIG_USB_HSIC_USB3503=m
++CONFIG_USB_LINK_LAYER_TEST=m
++CONFIG_USB_CHAOSKEY=m
++CONFIG_USB_ATM=m
++CONFIG_USB_SPEEDTOUCH=m
++CONFIG_USB_CXACRU=m
++CONFIG_USB_UEAGLEATM=m
++CONFIG_USB_XUSBATM=m
++
++#
++# USB Physical Layer drivers
++#
++CONFIG_USB_PHY=y
++CONFIG_NOP_USB_XCEIV=y
++CONFIG_USB_GPIO_VBUS=m
++CONFIG_TAHVO_USB=m
++CONFIG_TAHVO_USB_HOST_BY_DEFAULT=y
++CONFIG_USB_ISP1301=m
++CONFIG_USB_GADGET=m
++# CONFIG_USB_GADGET_DEBUG is not set
++# CONFIG_USB_GADGET_DEBUG_FILES is not set
++# CONFIG_USB_GADGET_DEBUG_FS is not set
++CONFIG_USB_GADGET_VBUS_DRAW=2
++CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
++
++#
++# USB Peripheral Controller
++#
++CONFIG_USB_FOTG210_UDC=m
++CONFIG_USB_GR_UDC=m
++CONFIG_USB_R8A66597=m
++CONFIG_USB_PXA27X=m
++CONFIG_USB_MV_UDC=m
++CONFIG_USB_MV_U3D=m
++# CONFIG_USB_M66592 is not set
++CONFIG_USB_BDC_UDC=m
++
++#
++# Platform Support
++#
++CONFIG_USB_BDC_PCI=m
++CONFIG_USB_AMD5536UDC=m
++CONFIG_USB_NET2272=m
++CONFIG_USB_NET2272_DMA=y
++CONFIG_USB_NET2280=m
++CONFIG_USB_GOKU=m
++CONFIG_USB_EG20T=m
++CONFIG_USB_GADGET_XILINX=m
++# CONFIG_USB_DUMMY_HCD is not set
++CONFIG_USB_LIBCOMPOSITE=m
++CONFIG_USB_F_ACM=m
++CONFIG_USB_F_SS_LB=m
++CONFIG_USB_U_SERIAL=m
++CONFIG_USB_U_ETHER=m
++CONFIG_USB_F_SERIAL=m
++CONFIG_USB_F_OBEX=m
++CONFIG_USB_F_NCM=m
++CONFIG_USB_F_ECM=m
++CONFIG_USB_F_PHONET=m
++CONFIG_USB_F_EEM=m
++CONFIG_USB_F_SUBSET=m
++CONFIG_USB_F_RNDIS=m
++CONFIG_USB_F_MASS_STORAGE=m
++CONFIG_USB_F_FS=m
++CONFIG_USB_F_UAC1=m
++CONFIG_USB_F_UAC2=m
++CONFIG_USB_F_UVC=m
++CONFIG_USB_F_MIDI=m
++CONFIG_USB_F_HID=m
++CONFIG_USB_F_PRINTER=m
++CONFIG_USB_CONFIGFS=m
++CONFIG_USB_CONFIGFS_SERIAL=y
++CONFIG_USB_CONFIGFS_ACM=y
++CONFIG_USB_CONFIGFS_OBEX=y
++CONFIG_USB_CONFIGFS_NCM=y
++CONFIG_USB_CONFIGFS_ECM=y
++CONFIG_USB_CONFIGFS_ECM_SUBSET=y
++CONFIG_USB_CONFIGFS_RNDIS=y
++CONFIG_USB_CONFIGFS_EEM=y
++CONFIG_USB_CONFIGFS_PHONET=y
++CONFIG_USB_CONFIGFS_MASS_STORAGE=y
++CONFIG_USB_CONFIGFS_F_LB_SS=y
++CONFIG_USB_CONFIGFS_F_FS=y
++CONFIG_USB_CONFIGFS_F_UAC1=y
++CONFIG_USB_CONFIGFS_F_UAC2=y
++CONFIG_USB_CONFIGFS_F_MIDI=y
++CONFIG_USB_CONFIGFS_F_HID=y
++CONFIG_USB_CONFIGFS_F_UVC=y
++CONFIG_USB_CONFIGFS_F_PRINTER=y
++CONFIG_USB_ZERO=m
++CONFIG_USB_AUDIO=m
++CONFIG_GADGET_UAC1=y
++CONFIG_USB_ETH=m
++CONFIG_USB_ETH_RNDIS=y
++CONFIG_USB_ETH_EEM=y
++CONFIG_USB_G_NCM=m
++CONFIG_USB_GADGETFS=m
++CONFIG_USB_FUNCTIONFS=m
++CONFIG_USB_FUNCTIONFS_ETH=y
++CONFIG_USB_FUNCTIONFS_RNDIS=y
++CONFIG_USB_FUNCTIONFS_GENERIC=y
++CONFIG_USB_MASS_STORAGE=m
++CONFIG_USB_GADGET_TARGET=m
++CONFIG_USB_G_SERIAL=m
++CONFIG_USB_MIDI_GADGET=m
++CONFIG_USB_G_PRINTER=m
++CONFIG_USB_CDC_COMPOSITE=m
++CONFIG_USB_G_NOKIA=m
++CONFIG_USB_G_ACM_MS=m
++# CONFIG_USB_G_MULTI is not set
++CONFIG_USB_G_HID=m
++CONFIG_USB_G_DBGP=m
++# CONFIG_USB_G_DBGP_PRINTK is not set
++CONFIG_USB_G_DBGP_SERIAL=y
++CONFIG_USB_G_WEBCAM=m
++CONFIG_USB_LED_TRIG=y
++CONFIG_UWB=m
++CONFIG_UWB_HWA=m
++CONFIG_UWB_WHCI=m
++CONFIG_UWB_I1480U=m
++CONFIG_MMC=y
++# CONFIG_MMC_DEBUG is not set
++
++#
++# MMC/SD/SDIO Card Drivers
++#
++CONFIG_MMC_BLOCK=m
++CONFIG_MMC_BLOCK_MINORS=8
++CONFIG_MMC_BLOCK_BOUNCE=y
++CONFIG_SDIO_UART=m
++# CONFIG_MMC_TEST is not set
++
++#
++# MMC/SD/SDIO Host Controller Drivers
++#
++CONFIG_MMC_SDHCI=m
++CONFIG_MMC_SDHCI_IO_ACCESSORS=y
++CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER=y
++CONFIG_MMC_SDHCI_PCI=m
++CONFIG_MMC_RICOH_MMC=y
++CONFIG_MMC_SDHCI_PLTFM=m
++CONFIG_MMC_SDHCI_OF_ARASAN=m
++CONFIG_MMC_SDHCI_OF_AT91=m
++CONFIG_MMC_SDHCI_OF_ESDHC=m
++CONFIG_MMC_SDHCI_OF_HLWD=m
++CONFIG_MMC_SDHCI_F_SDH30=m
++CONFIG_MMC_WBSD=m
++CONFIG_MMC_TIFM_SD=m
++CONFIG_MMC_SPI=m
++CONFIG_MMC_CB710=m
++CONFIG_MMC_VIA_SDMMC=m
++CONFIG_MMC_VUB300=m
++CONFIG_MMC_USHC=m
++CONFIG_MMC_USDHI6ROL0=m
++CONFIG_MMC_REALTEK_PCI=m
++CONFIG_MMC_REALTEK_USB=m
++CONFIG_MMC_TOSHIBA_PCI=m
++CONFIG_MMC_MTK=m
++CONFIG_MEMSTICK=m
++# CONFIG_MEMSTICK_DEBUG is not set
++
++#
++# MemoryStick drivers
++#
++# CONFIG_MEMSTICK_UNSAFE_RESUME is not set
++CONFIG_MSPRO_BLOCK=m
++CONFIG_MS_BLOCK=m
++
++#
++# MemoryStick Host Controller Drivers
++#
++CONFIG_MEMSTICK_TIFM_MS=m
++CONFIG_MEMSTICK_JMICRON_38X=m
++CONFIG_MEMSTICK_R592=m
++CONFIG_MEMSTICK_REALTEK_PCI=m
++CONFIG_MEMSTICK_REALTEK_USB=m
++CONFIG_NEW_LEDS=y
++CONFIG_LEDS_CLASS=y
++CONFIG_LEDS_CLASS_FLASH=m
++
++#
++# LED drivers
++#
++CONFIG_LEDS_88PM860X=m
++CONFIG_LEDS_BCM6328=m
++CONFIG_LEDS_BCM6358=m
++CONFIG_LEDS_LM3530=m
++CONFIG_LEDS_LM3533=m
++CONFIG_LEDS_LM3642=m
++CONFIG_LEDS_PCA9532=m
++CONFIG_LEDS_PCA9532_GPIO=y
++CONFIG_LEDS_GPIO=m
++CONFIG_LEDS_LP3944=m
++CONFIG_LEDS_LP55XX_COMMON=m
++CONFIG_LEDS_LP5521=m
++CONFIG_LEDS_LP5523=m
++CONFIG_LEDS_LP5562=m
++CONFIG_LEDS_LP8501=m
++CONFIG_LEDS_LP8788=m
++CONFIG_LEDS_LP8860=m
++CONFIG_LEDS_PCA955X=m
++CONFIG_LEDS_PCA963X=m
++CONFIG_LEDS_WM831X_STATUS=m
++CONFIG_LEDS_WM8350=m
++CONFIG_LEDS_DA903X=m
++CONFIG_LEDS_DA9052=m
++CONFIG_LEDS_DAC124S085=m
++CONFIG_LEDS_PWM=m
++CONFIG_LEDS_REGULATOR=m
++CONFIG_LEDS_BD2802=m
++CONFIG_LEDS_LT3593=m
++CONFIG_LEDS_ADP5520=m
++CONFIG_LEDS_MC13783=m
++CONFIG_LEDS_TCA6507=m
++CONFIG_LEDS_TLC591XX=m
++CONFIG_LEDS_MAX77693=m
++CONFIG_LEDS_MAX8997=m
++CONFIG_LEDS_LM355x=m
++CONFIG_LEDS_MENF21BMC=m
++CONFIG_LEDS_KTD2692=m
++
++#
++# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)
++#
++CONFIG_LEDS_BLINKM=m
++CONFIG_LEDS_POWERNV=m
++CONFIG_LEDS_SYSCON=y
++
++#
++# LED Triggers
++#
++CONFIG_LEDS_TRIGGERS=y
++CONFIG_LEDS_TRIGGER_TIMER=m
++CONFIG_LEDS_TRIGGER_ONESHOT=m
++CONFIG_LEDS_TRIGGER_HEARTBEAT=m
++CONFIG_LEDS_TRIGGER_BACKLIGHT=m
++CONFIG_LEDS_TRIGGER_CPU=y
++CONFIG_LEDS_TRIGGER_GPIO=m
++CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
++
++#
++# iptables trigger is under Netfilter config (LED target)
++#
++CONFIG_LEDS_TRIGGER_TRANSIENT=m
++CONFIG_LEDS_TRIGGER_CAMERA=m
++# CONFIG_ACCESSIBILITY is not set
++CONFIG_INFINIBAND=m
++CONFIG_INFINIBAND_USER_MAD=m
++CONFIG_INFINIBAND_USER_ACCESS=m
++CONFIG_INFINIBAND_USER_MEM=y
++CONFIG_INFINIBAND_ON_DEMAND_PAGING=y
++CONFIG_INFINIBAND_ADDR_TRANS=y
++CONFIG_INFINIBAND_MTHCA=m
++# CONFIG_INFINIBAND_MTHCA_DEBUG is not set
++CONFIG_INFINIBAND_QIB=m
++CONFIG_INFINIBAND_CXGB3=m
++# CONFIG_INFINIBAND_CXGB3_DEBUG is not set
++CONFIG_INFINIBAND_CXGB4=m
++CONFIG_MLX4_INFINIBAND=m
++CONFIG_MLX5_INFINIBAND=m
++CONFIG_INFINIBAND_NES=m
++# CONFIG_INFINIBAND_NES_DEBUG is not set
++CONFIG_INFINIBAND_OCRDMA=m
++CONFIG_INFINIBAND_IPOIB=m
++CONFIG_INFINIBAND_IPOIB_CM=y
++# CONFIG_INFINIBAND_IPOIB_DEBUG is not set
++CONFIG_INFINIBAND_SRP=m
++CONFIG_INFINIBAND_SRPT=m
++CONFIG_INFINIBAND_ISER=m
++CONFIG_INFINIBAND_ISERT=m
++CONFIG_EDAC_ATOMIC_SCRUB=y
++CONFIG_EDAC_SUPPORT=y
++CONFIG_EDAC=y
++# CONFIG_EDAC_LEGACY_SYSFS is not set
++# CONFIG_EDAC_DEBUG is not set
++CONFIG_EDAC_MM_EDAC=m
++CONFIG_EDAC_CPC925=m
++CONFIG_RTC_LIB=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_HCTOSYS=y
++CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
++CONFIG_RTC_SYSTOHC=y
++CONFIG_RTC_SYSTOHC_DEVICE="rtc0"
++# CONFIG_RTC_DEBUG is not set
++
++#
++# RTC interfaces
++#
++CONFIG_RTC_INTF_SYSFS=y
++CONFIG_RTC_INTF_PROC=y
++CONFIG_RTC_INTF_DEV=y
++# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
++# CONFIG_RTC_DRV_TEST is not set
++
++#
++# I2C RTC drivers
++#
++CONFIG_RTC_DRV_88PM860X=m
++CONFIG_RTC_DRV_88PM80X=m
++CONFIG_RTC_DRV_ABB5ZES3=m
++CONFIG_RTC_DRV_ABX80X=m
++CONFIG_RTC_DRV_AS3722=m
++CONFIG_RTC_DRV_DS1307=m
++CONFIG_RTC_DRV_DS1374=m
++CONFIG_RTC_DRV_DS1374_WDT=y
++CONFIG_RTC_DRV_DS1672=m
++CONFIG_RTC_DRV_DS3232=m
++CONFIG_RTC_DRV_HYM8563=m
++CONFIG_RTC_DRV_LP8788=m
++CONFIG_RTC_DRV_MAX6900=m
++CONFIG_RTC_DRV_MAX8907=m
++CONFIG_RTC_DRV_MAX8925=m
++CONFIG_RTC_DRV_MAX8998=m
++CONFIG_RTC_DRV_MAX8997=m
++CONFIG_RTC_DRV_MAX77686=m
++CONFIG_RTC_DRV_RK808=m
++CONFIG_RTC_DRV_MAX77802=m
++CONFIG_RTC_DRV_RS5C372=m
++CONFIG_RTC_DRV_ISL1208=m
++CONFIG_RTC_DRV_ISL12022=m
++CONFIG_RTC_DRV_ISL12057=m
++CONFIG_RTC_DRV_X1205=m
++CONFIG_RTC_DRV_PALMAS=m
++CONFIG_RTC_DRV_PCF2127=m
++CONFIG_RTC_DRV_PCF8523=m
++CONFIG_RTC_DRV_PCF8563=m
++CONFIG_RTC_DRV_PCF85063=m
++CONFIG_RTC_DRV_PCF8583=m
++CONFIG_RTC_DRV_M41T80=m
++CONFIG_RTC_DRV_M41T80_WDT=y
++CONFIG_RTC_DRV_BQ32K=m
++CONFIG_RTC_DRV_TWL4030=m
++CONFIG_RTC_DRV_TPS6586X=m
++CONFIG_RTC_DRV_TPS65910=m
++CONFIG_RTC_DRV_TPS80031=m
++CONFIG_RTC_DRV_RC5T583=m
++CONFIG_RTC_DRV_S35390A=m
++CONFIG_RTC_DRV_FM3130=m
++CONFIG_RTC_DRV_RX8581=m
++CONFIG_RTC_DRV_RX8025=m
++CONFIG_RTC_DRV_EM3027=m
++CONFIG_RTC_DRV_RV3029C2=m
++CONFIG_RTC_DRV_RV8803=m
++CONFIG_RTC_DRV_S5M=m
++
++#
++# SPI RTC drivers
++#
++CONFIG_RTC_DRV_M41T93=m
++CONFIG_RTC_DRV_M41T94=m
++CONFIG_RTC_DRV_DS1305=m
++CONFIG_RTC_DRV_DS1343=m
++CONFIG_RTC_DRV_DS1347=m
++CONFIG_RTC_DRV_DS1390=m
++CONFIG_RTC_DRV_MAX6902=m
++CONFIG_RTC_DRV_R9701=m
++CONFIG_RTC_DRV_RS5C348=m
++CONFIG_RTC_DRV_DS3234=m
++CONFIG_RTC_DRV_PCF2123=m
++CONFIG_RTC_DRV_RX4581=m
++CONFIG_RTC_DRV_MCP795=m
++
++#
++# Platform RTC drivers
++#
++CONFIG_RTC_DRV_CMOS=m
++CONFIG_RTC_DRV_DS1286=m
++CONFIG_RTC_DRV_DS1511=m
++CONFIG_RTC_DRV_DS1553=m
++CONFIG_RTC_DRV_DS1685_FAMILY=m
++CONFIG_RTC_DRV_DS1685=y
++# CONFIG_RTC_DRV_DS1689 is not set
++# CONFIG_RTC_DRV_DS17285 is not set
++# CONFIG_RTC_DRV_DS17485 is not set
++# CONFIG_RTC_DRV_DS17885 is not set
++# CONFIG_RTC_DS1685_PROC_REGS is not set
++# CONFIG_RTC_DS1685_SYSFS_REGS is not set
++CONFIG_RTC_DRV_DS1742=m
++CONFIG_RTC_DRV_DS2404=m
++CONFIG_RTC_DRV_DA9052=m
++CONFIG_RTC_DRV_DA9055=m
++CONFIG_RTC_DRV_DA9063=m
++CONFIG_RTC_DRV_STK17TA8=m
++CONFIG_RTC_DRV_M48T86=m
++CONFIG_RTC_DRV_M48T35=m
++CONFIG_RTC_DRV_M48T59=m
++CONFIG_RTC_DRV_MSM6242=m
++CONFIG_RTC_DRV_BQ4802=m
++CONFIG_RTC_DRV_RP5C01=m
++CONFIG_RTC_DRV_V3020=m
++CONFIG_RTC_DRV_WM831X=m
++CONFIG_RTC_DRV_WM8350=m
++CONFIG_RTC_DRV_PCF50633=m
++CONFIG_RTC_DRV_AB3100=m
++CONFIG_RTC_DRV_OPAL=y
++CONFIG_RTC_DRV_ZYNQMP=m
++
++#
++# on-CPU RTC drivers
++#
++CONFIG_RTC_DRV_GENERIC=m
++CONFIG_RTC_DRV_PCAP=m
++CONFIG_RTC_DRV_MC13XXX=m
++CONFIG_RTC_DRV_SNVS=m
++CONFIG_RTC_DRV_MT6397=m
++
++#
++# HID Sensor RTC drivers
++#
++CONFIG_RTC_DRV_HID_SENSOR_TIME=m
++CONFIG_DMADEVICES=y
++# CONFIG_DMADEVICES_DEBUG is not set
++
++#
++# DMA Devices
++#
++CONFIG_DMA_ENGINE=y
++CONFIG_DMA_VIRTUAL_CHANNELS=m
++CONFIG_DMA_OF=y
++CONFIG_FSL_EDMA=m
++CONFIG_INTEL_IDMA64=m
++CONFIG_DW_DMAC_CORE=m
++CONFIG_DW_DMAC=m
++CONFIG_DW_DMAC_PCI=m
++CONFIG_HSU_DMA=m
++
++#
++# DMA Clients
++#
++CONFIG_ASYNC_TX_DMA=y
++# CONFIG_DMATEST is not set
++CONFIG_AUXDISPLAY=y
++CONFIG_KS0108=m
++CONFIG_KS0108_PORT=0x378
++CONFIG_KS0108_DELAY=2
++CONFIG_UIO=m
++CONFIG_UIO_CIF=m
++CONFIG_UIO_PDRV_GENIRQ=m
++CONFIG_UIO_DMEM_GENIRQ=m
++CONFIG_UIO_AEC=m
++CONFIG_UIO_SERCOS3=m
++CONFIG_UIO_PCI_GENERIC=m
++CONFIG_UIO_NETX=m
++CONFIG_UIO_FSL_ELBC_GPCM=m
++# CONFIG_UIO_FSL_ELBC_GPCM_NETX5152 is not set
++CONFIG_UIO_PRUSS=m
++CONFIG_UIO_MF624=m
++# CONFIG_VFIO_IOMMU_TYPE1 is not set
++CONFIG_VFIO_IOMMU_SPAPR_TCE=m
++CONFIG_VFIO_SPAPR_EEH=m
++CONFIG_VFIO_VIRQFD=m
++CONFIG_VFIO=m
++CONFIG_VFIO_PCI=m
++CONFIG_VFIO_PCI_MMAP=y
++CONFIG_VFIO_PCI_INTX=y
++CONFIG_IRQ_BYPASS_MANAGER=m
++CONFIG_VIRT_DRIVERS=y
++CONFIG_VIRTIO=y
++
++#
++# Virtio drivers
++#
++CONFIG_VIRTIO_PCI=y
++CONFIG_VIRTIO_PCI_LEGACY=y
++CONFIG_VIRTIO_BALLOON=y
++CONFIG_VIRTIO_INPUT=m
++CONFIG_VIRTIO_MMIO=y
++CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
++
++#
++# Microsoft Hyper-V guest support
++#
++CONFIG_STAGING=y
++CONFIG_PRISM2_USB=m
++CONFIG_COMEDI=m
++# CONFIG_COMEDI_DEBUG is not set
++CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB=2048
++CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB=20480
++CONFIG_COMEDI_MISC_DRIVERS=y
++CONFIG_COMEDI_BOND=m
++CONFIG_COMEDI_TEST=m
++CONFIG_COMEDI_PARPORT=m
++CONFIG_COMEDI_SERIAL2002=m
++CONFIG_COMEDI_ISA_DRIVERS=y
++CONFIG_COMEDI_PCL711=m
++CONFIG_COMEDI_PCL724=m
++CONFIG_COMEDI_PCL726=m
++CONFIG_COMEDI_PCL730=m
++CONFIG_COMEDI_PCL812=m
++CONFIG_COMEDI_PCL816=m
++CONFIG_COMEDI_PCL818=m
++CONFIG_COMEDI_PCM3724=m
++CONFIG_COMEDI_AMPLC_DIO200_ISA=m
++CONFIG_COMEDI_AMPLC_PC236_ISA=m
++CONFIG_COMEDI_AMPLC_PC263_ISA=m
++CONFIG_COMEDI_RTI800=m
++CONFIG_COMEDI_RTI802=m
++CONFIG_COMEDI_DAC02=m
++CONFIG_COMEDI_DAS16M1=m
++CONFIG_COMEDI_DAS08_ISA=m
++CONFIG_COMEDI_DAS16=m
++CONFIG_COMEDI_DAS800=m
++CONFIG_COMEDI_DAS1800=m
++CONFIG_COMEDI_DAS6402=m
++CONFIG_COMEDI_DT2801=m
++CONFIG_COMEDI_DT2811=m
++CONFIG_COMEDI_DT2814=m
++CONFIG_COMEDI_DT2815=m
++CONFIG_COMEDI_DT2817=m
++CONFIG_COMEDI_DT282X=m
++CONFIG_COMEDI_DMM32AT=m
++CONFIG_COMEDI_FL512=m
++CONFIG_COMEDI_AIO_AIO12_8=m
++CONFIG_COMEDI_AIO_IIRO_16=m
++CONFIG_COMEDI_II_PCI20KC=m
++CONFIG_COMEDI_C6XDIGIO=m
++CONFIG_COMEDI_MPC624=m
++CONFIG_COMEDI_ADQ12B=m
++CONFIG_COMEDI_NI_AT_A2150=m
++CONFIG_COMEDI_NI_AT_AO=m
++CONFIG_COMEDI_NI_ATMIO=m
++CONFIG_COMEDI_NI_ATMIO16D=m
++CONFIG_COMEDI_NI_LABPC_ISA=m
++CONFIG_COMEDI_PCMAD=m
++CONFIG_COMEDI_PCMDA12=m
++CONFIG_COMEDI_PCMMIO=m
++CONFIG_COMEDI_PCMUIO=m
++CONFIG_COMEDI_MULTIQ3=m
++CONFIG_COMEDI_S526=m
++CONFIG_COMEDI_PCI_DRIVERS=m
++CONFIG_COMEDI_8255_PCI=m
++CONFIG_COMEDI_ADDI_WATCHDOG=m
++CONFIG_COMEDI_ADDI_APCI_1032=m
++CONFIG_COMEDI_ADDI_APCI_1500=m
++CONFIG_COMEDI_ADDI_APCI_1516=m
++CONFIG_COMEDI_ADDI_APCI_1564=m
++CONFIG_COMEDI_ADDI_APCI_16XX=m
++CONFIG_COMEDI_ADDI_APCI_2032=m
++CONFIG_COMEDI_ADDI_APCI_2200=m
++CONFIG_COMEDI_ADDI_APCI_3120=m
++CONFIG_COMEDI_ADDI_APCI_3501=m
++CONFIG_COMEDI_ADDI_APCI_3XXX=m
++CONFIG_COMEDI_ADL_PCI6208=m
++CONFIG_COMEDI_ADL_PCI7X3X=m
++CONFIG_COMEDI_ADL_PCI8164=m
++CONFIG_COMEDI_ADL_PCI9111=m
++CONFIG_COMEDI_ADL_PCI9118=m
++CONFIG_COMEDI_ADV_PCI1710=m
++CONFIG_COMEDI_ADV_PCI1723=m
++CONFIG_COMEDI_ADV_PCI1724=m
++CONFIG_COMEDI_ADV_PCI_DIO=m
++CONFIG_COMEDI_AMPLC_DIO200_PCI=m
++CONFIG_COMEDI_AMPLC_PC236_PCI=m
++CONFIG_COMEDI_AMPLC_PC263_PCI=m
++CONFIG_COMEDI_AMPLC_PCI224=m
++CONFIG_COMEDI_AMPLC_PCI230=m
++CONFIG_COMEDI_CONTEC_PCI_DIO=m
++CONFIG_COMEDI_DAS08_PCI=m
++CONFIG_COMEDI_DT3000=m
++CONFIG_COMEDI_DYNA_PCI10XX=m
++CONFIG_COMEDI_GSC_HPDI=m
++CONFIG_COMEDI_MF6X4=m
++CONFIG_COMEDI_ICP_MULTI=m
++CONFIG_COMEDI_DAQBOARD2000=m
++CONFIG_COMEDI_JR3_PCI=m
++CONFIG_COMEDI_KE_COUNTER=m
++CONFIG_COMEDI_CB_PCIDAS64=m
++CONFIG_COMEDI_CB_PCIDAS=m
++CONFIG_COMEDI_CB_PCIDDA=m
++CONFIG_COMEDI_CB_PCIMDAS=m
++CONFIG_COMEDI_CB_PCIMDDA=m
++CONFIG_COMEDI_ME4000=m
++CONFIG_COMEDI_ME_DAQ=m
++CONFIG_COMEDI_NI_6527=m
++CONFIG_COMEDI_NI_65XX=m
++CONFIG_COMEDI_NI_660X=m
++CONFIG_COMEDI_NI_670X=m
++CONFIG_COMEDI_NI_LABPC_PCI=m
++CONFIG_COMEDI_NI_PCIDIO=m
++CONFIG_COMEDI_NI_PCIMIO=m
++CONFIG_COMEDI_RTD520=m
++CONFIG_COMEDI_S626=m
++CONFIG_COMEDI_MITE=m
++CONFIG_COMEDI_NI_TIOCMD=m
++CONFIG_COMEDI_USB_DRIVERS=m
++CONFIG_COMEDI_DT9812=m
++CONFIG_COMEDI_NI_USB6501=m
++CONFIG_COMEDI_USBDUX=m
++CONFIG_COMEDI_USBDUXFAST=m
++CONFIG_COMEDI_USBDUXSIGMA=m
++CONFIG_COMEDI_VMK80XX=m
++CONFIG_COMEDI_8254=m
++CONFIG_COMEDI_8255=m
++CONFIG_COMEDI_8255_SA=m
++CONFIG_COMEDI_KCOMEDILIB=m
++CONFIG_COMEDI_AMPLC_DIO200=m
++CONFIG_COMEDI_AMPLC_PC236=m
++CONFIG_COMEDI_DAS08=m
++CONFIG_COMEDI_ISADMA=m
++CONFIG_COMEDI_NI_LABPC=m
++CONFIG_COMEDI_NI_LABPC_ISADMA=m
++CONFIG_COMEDI_NI_TIO=m
++CONFIG_PANEL=m
++CONFIG_PANEL_PARPORT=0
++CONFIG_PANEL_PROFILE=5
++# CONFIG_PANEL_CHANGE_MESSAGE is not set
++CONFIG_RTL8192U=m
++CONFIG_RTLLIB=m
++CONFIG_RTLLIB_CRYPTO_CCMP=m
++CONFIG_RTLLIB_CRYPTO_TKIP=m
++CONFIG_RTLLIB_CRYPTO_WEP=m
++CONFIG_RTL8192E=m
++CONFIG_R8712U=m
++CONFIG_R8188EU=m
++CONFIG_88EU_AP_MODE=y
++CONFIG_R8723AU=m
++CONFIG_8723AU_AP_MODE=y
++CONFIG_8723AU_BT_COEXIST=y
++CONFIG_RTS5208=m
++CONFIG_VT6655=m
++CONFIG_VT6656=m
++
++#
++# IIO staging drivers
++#
++
++#
++# Accelerometers
++#
++CONFIG_ADIS16201=m
++CONFIG_ADIS16203=m
++CONFIG_ADIS16204=m
++CONFIG_ADIS16209=m
++CONFIG_ADIS16220=m
++CONFIG_ADIS16240=m
++CONFIG_LIS3L02DQ=m
++CONFIG_SCA3000=m
++
++#
++# Analog to digital converters
++#
++CONFIG_AD7606=m
++CONFIG_AD7606_IFACE_PARALLEL=m
++CONFIG_AD7606_IFACE_SPI=m
++CONFIG_AD7780=m
++CONFIG_AD7816=m
++CONFIG_AD7192=m
++CONFIG_AD7280=m
++
++#
++# Analog digital bi-direction converters
++#
++CONFIG_ADT7316=m
++CONFIG_ADT7316_SPI=m
++CONFIG_ADT7316_I2C=m
++
++#
++# Capacitance to digital converters
++#
++CONFIG_AD7150=m
++CONFIG_AD7152=m
++CONFIG_AD7746=m
++
++#
++# Direct Digital Synthesis
++#
++CONFIG_AD9832=m
++CONFIG_AD9834=m
++
++#
++# Digital gyroscope sensors
++#
++CONFIG_ADIS16060=m
++
++#
++# Network Analyzer, Impedance Converters
++#
++CONFIG_AD5933=m
++
++#
++# Light sensors
++#
++CONFIG_SENSORS_ISL29018=m
++CONFIG_SENSORS_ISL29028=m
++CONFIG_TSL2583=m
++CONFIG_TSL2x7x=m
++
++#
++# Magnetometer sensors
++#
++CONFIG_SENSORS_HMC5843=m
++CONFIG_SENSORS_HMC5843_I2C=m
++CONFIG_SENSORS_HMC5843_SPI=m
++
++#
++# Active energy metering IC
++#
++CONFIG_ADE7753=m
++CONFIG_ADE7754=m
++CONFIG_ADE7758=m
++CONFIG_ADE7759=m
++CONFIG_ADE7854=m
++CONFIG_ADE7854_I2C=m
++CONFIG_ADE7854_SPI=m
++
++#
++# Resolver to digital converters
++#
++CONFIG_AD2S90=m
++CONFIG_AD2S1200=m
++CONFIG_AD2S1210=m
++
++#
++# Triggers - standalone
++#
++CONFIG_IIO_PERIODIC_RTC_TRIGGER=m
++CONFIG_IIO_SIMPLE_DUMMY=m
++# CONFIG_IIO_SIMPLE_DUMMY_EVENTS is not set
++# CONFIG_IIO_SIMPLE_DUMMY_BUFFER is not set
++CONFIG_FB_SM750=m
++CONFIG_FB_XGI=m
++
++#
++# Speakup console speech
++#
++CONFIG_SPEAKUP=m
++CONFIG_SPEAKUP_SYNTH_ACNTSA=m
++CONFIG_SPEAKUP_SYNTH_APOLLO=m
++CONFIG_SPEAKUP_SYNTH_AUDPTR=m
++CONFIG_SPEAKUP_SYNTH_BNS=m
++CONFIG_SPEAKUP_SYNTH_DECTLK=m
++CONFIG_SPEAKUP_SYNTH_DECEXT=m
++CONFIG_SPEAKUP_SYNTH_LTLK=m
++CONFIG_SPEAKUP_SYNTH_SOFT=m
++CONFIG_SPEAKUP_SYNTH_SPKOUT=m
++CONFIG_SPEAKUP_SYNTH_TXPRT=m
++CONFIG_SPEAKUP_SYNTH_DUMMY=m
++CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=m
++CONFIG_STAGING_MEDIA=y
++CONFIG_I2C_BCM2048=m
++CONFIG_DVB_CXD2099=m
++CONFIG_DVB_MN88472=m
++CONFIG_DVB_MN88473=m
++CONFIG_LIRC_STAGING=y
++CONFIG_LIRC_BT829=m
++CONFIG_LIRC_IMON=m
++CONFIG_LIRC_PARALLEL=m
++CONFIG_LIRC_SASEM=m
++CONFIG_LIRC_SERIAL=m
++CONFIG_LIRC_SERIAL_TRANSMITTER=y
++CONFIG_LIRC_SIR=m
++CONFIG_LIRC_ZILOG=m
++CONFIG_STAGING_RDMA=m
++CONFIG_INFINIBAND_AMSO1100=m
++# CONFIG_INFINIBAND_AMSO1100_DEBUG is not set
++CONFIG_INFINIBAND_EHCA=m
++
++#
++# Android
++#
++CONFIG_WIMAX_GDM72XX=m
++CONFIG_WIMAX_GDM72XX_QOS=y
++CONFIG_WIMAX_GDM72XX_K_MODE=y
++CONFIG_WIMAX_GDM72XX_WIMAX2=y
++CONFIG_WIMAX_GDM72XX_USB=y
++# CONFIG_WIMAX_GDM72XX_SDIO is not set
++CONFIG_WIMAX_GDM72XX_USB_PM=y
++CONFIG_LTE_GDM724X=m
++CONFIG_FIREWIRE_SERIAL=m
++CONFIG_FWTTY_MAX_TOTAL_PORTS=64
++CONFIG_FWTTY_MAX_CARD_PORTS=32
++CONFIG_MTD_SPINAND_MT29F=m
++CONFIG_MTD_SPINAND_ONDIEECC=y
++# CONFIG_LUSTRE_FS is not set
++CONFIG_DGNC=m
++CONFIG_DGAP=m
++CONFIG_GS_FPGABOOT=m
++CONFIG_FB_TFT=m
++CONFIG_FB_TFT_AGM1264K_FL=m
++CONFIG_FB_TFT_BD663474=m
++CONFIG_FB_TFT_HX8340BN=m
++CONFIG_FB_TFT_HX8347D=m
++CONFIG_FB_TFT_HX8353D=m
++CONFIG_FB_TFT_HX8357D=m
++CONFIG_FB_TFT_ILI9163=m
++CONFIG_FB_TFT_ILI9320=m
++CONFIG_FB_TFT_ILI9325=m
++CONFIG_FB_TFT_ILI9340=m
++CONFIG_FB_TFT_ILI9341=m
++CONFIG_FB_TFT_ILI9481=m
++CONFIG_FB_TFT_ILI9486=m
++CONFIG_FB_TFT_PCD8544=m
++CONFIG_FB_TFT_RA8875=m
++CONFIG_FB_TFT_S6D02A1=m
++CONFIG_FB_TFT_S6D1121=m
++CONFIG_FB_TFT_SSD1289=m
++CONFIG_FB_TFT_SSD1306=m
++CONFIG_FB_TFT_SSD1331=m
++CONFIG_FB_TFT_SSD1351=m
++CONFIG_FB_TFT_ST7735R=m
++CONFIG_FB_TFT_ST7789V=m
++CONFIG_FB_TFT_TINYLCD=m
++CONFIG_FB_TFT_TLS8204=m
++CONFIG_FB_TFT_UC1611=m
++CONFIG_FB_TFT_UC1701=m
++CONFIG_FB_TFT_UPD161704=m
++CONFIG_FB_TFT_WATTEROTT=m
++CONFIG_FB_FLEX=m
++CONFIG_FB_TFT_FBTFT_DEVICE=m
++# CONFIG_WILC1000_DRIVER is not set
++CONFIG_MOST=m
++CONFIG_MOSTCORE=m
++CONFIG_AIM_CDEV=m
++CONFIG_AIM_NETWORK=m
++CONFIG_AIM_SOUND=m
++CONFIG_AIM_V4L2=m
++CONFIG_HDM_DIM2=m
++CONFIG_HDM_I2C=m
++CONFIG_HDM_USB=m
++
++#
++# Hardware Spinlock drivers
++#
++
++#
++# Clock Source drivers
++#
++CONFIG_I8253_LOCK=y
++CONFIG_CLKBLD_I8253=y
++# CONFIG_ATMEL_PIT is not set
++# CONFIG_SH_TIMER_CMT is not set
++# CONFIG_SH_TIMER_MTU2 is not set
++# CONFIG_SH_TIMER_TMU is not set
++# CONFIG_EM_TIMER_STI is not set
++CONFIG_MAILBOX=y
++CONFIG_ALTERA_MBOX=m
++CONFIG_MAILBOX_TEST=m
++CONFIG_IOMMU_API=y
++CONFIG_IOMMU_SUPPORT=y
++
++#
++# Generic IOMMU Pagetable Support
++#
++CONFIG_OF_IOMMU=y
++CONFIG_SPAPR_TCE_IOMMU=y
++
++#
++# Remoteproc drivers
++#
++CONFIG_REMOTEPROC=m
++CONFIG_STE_MODEM_RPROC=m
++
++#
++# Rpmsg drivers
++#
++
++#
++# SOC (System On Chip) specific Drivers
++#
++# CONFIG_SUNXI_SRAM is not set
++CONFIG_SOC_TI=y
++CONFIG_PM_DEVFREQ=y
++
++#
++# DEVFREQ Governors
++#
++CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
++CONFIG_DEVFREQ_GOV_PERFORMANCE=y
++CONFIG_DEVFREQ_GOV_POWERSAVE=y
++CONFIG_DEVFREQ_GOV_USERSPACE=y
++
++#
++# DEVFREQ Drivers
++#
++CONFIG_PM_DEVFREQ_EVENT=y
++CONFIG_EXTCON=y
++
++#
++# Extcon Device Drivers
++#
++CONFIG_EXTCON_ADC_JACK=m
++CONFIG_EXTCON_ARIZONA=m
++CONFIG_EXTCON_AXP288=m
++CONFIG_EXTCON_GPIO=m
++CONFIG_EXTCON_MAX14577=m
++CONFIG_EXTCON_MAX77693=m
++CONFIG_EXTCON_MAX77843=m
++CONFIG_EXTCON_MAX8997=m
++CONFIG_EXTCON_PALMAS=m
++CONFIG_EXTCON_RT8973A=m
++CONFIG_EXTCON_SM5502=m
++CONFIG_EXTCON_USB_GPIO=m
++CONFIG_MEMORY=y
++CONFIG_IIO=m
++CONFIG_IIO_BUFFER=y
++CONFIG_IIO_BUFFER_CB=m
++CONFIG_IIO_KFIFO_BUF=m
++CONFIG_IIO_TRIGGERED_BUFFER=m
++CONFIG_IIO_TRIGGER=y
++CONFIG_IIO_CONSUMERS_PER_TRIGGER=2
++CONFIG_IIO_TRIGGERED_EVENT=m
++
++#
++# Accelerometers
++#
++CONFIG_BMA180=m
++CONFIG_BMC150_ACCEL=m
++CONFIG_BMC150_ACCEL_I2C=m
++CONFIG_BMC150_ACCEL_SPI=m
++CONFIG_HID_SENSOR_ACCEL_3D=m
++CONFIG_IIO_ST_ACCEL_3AXIS=m
++CONFIG_IIO_ST_ACCEL_I2C_3AXIS=m
++CONFIG_IIO_ST_ACCEL_SPI_3AXIS=m
++CONFIG_KXSD9=m
++CONFIG_KXCJK1013=m
++CONFIG_MMA8452=m
++CONFIG_MMA9551_CORE=m
++CONFIG_MMA9551=m
++CONFIG_MMA9553=m
++CONFIG_MXC4005=m
++CONFIG_STK8312=m
++CONFIG_STK8BA50=m
++
++#
++# Analog to digital converters
++#
++CONFIG_AD_SIGMA_DELTA=m
++CONFIG_AD7266=m
++CONFIG_AD7291=m
++CONFIG_AD7298=m
++CONFIG_AD7476=m
++CONFIG_AD7791=m
++CONFIG_AD7793=m
++CONFIG_AD7887=m
++CONFIG_AD7923=m
++CONFIG_AD799X=m
++CONFIG_AXP288_ADC=m
++CONFIG_DA9150_GPADC=m
++CONFIG_HI8435=m
++CONFIG_LP8788_ADC=m
++CONFIG_MAX1027=m
++CONFIG_MAX1363=m
++CONFIG_MCP320X=m
++CONFIG_MCP3422=m
++CONFIG_MEN_Z188_ADC=m
++CONFIG_NAU7802=m
++CONFIG_QCOM_SPMI_IADC=m
++CONFIG_QCOM_SPMI_VADC=m
++CONFIG_TI_ADC081C=m
++CONFIG_TI_ADC128S052=m
++CONFIG_TI_AM335X_ADC=m
++CONFIG_TWL4030_MADC=m
++CONFIG_TWL6030_GPADC=m
++CONFIG_VF610_ADC=m
++CONFIG_VIPERBOARD_ADC=m
++
++#
++# Amplifiers
++#
++CONFIG_AD8366=m
++
++#
++# Chemical Sensors
++#
++CONFIG_VZ89X=m
++
++#
++# Hid Sensor IIO Common
++#
++CONFIG_HID_SENSOR_IIO_COMMON=m
++CONFIG_HID_SENSOR_IIO_TRIGGER=m
++CONFIG_IIO_MS_SENSORS_I2C=m
++
++#
++# SSP Sensor Common
++#
++CONFIG_IIO_SSP_SENSORS_COMMONS=m
++CONFIG_IIO_SSP_SENSORHUB=m
++CONFIG_IIO_ST_SENSORS_I2C=m
++CONFIG_IIO_ST_SENSORS_SPI=m
++CONFIG_IIO_ST_SENSORS_CORE=m
++
++#
++# Digital to analog converters
++#
++CONFIG_AD5064=m
++CONFIG_AD5360=m
++CONFIG_AD5380=m
++CONFIG_AD5421=m
++CONFIG_AD5446=m
++CONFIG_AD5449=m
++CONFIG_AD5504=m
++CONFIG_AD5624R_SPI=m
++CONFIG_AD5686=m
++CONFIG_AD5755=m
++CONFIG_AD5764=m
++CONFIG_AD5791=m
++CONFIG_AD7303=m
++CONFIG_M62332=m
++CONFIG_MAX517=m
++CONFIG_MAX5821=m
++CONFIG_MCP4725=m
++CONFIG_MCP4922=m
++
++#
++# Frequency Synthesizers DDS/PLL
++#
++
++#
++# Clock Generator/Distribution
++#
++CONFIG_AD9523=m
++
++#
++# Phase-Locked Loop (PLL) frequency synthesizers
++#
++CONFIG_ADF4350=m
++
++#
++# Digital gyroscope sensors
++#
++CONFIG_ADIS16080=m
++CONFIG_ADIS16130=m
++CONFIG_ADIS16136=m
++CONFIG_ADIS16260=m
++CONFIG_ADXRS450=m
++CONFIG_BMG160=m
++CONFIG_BMG160_I2C=m
++CONFIG_BMG160_SPI=m
++CONFIG_HID_SENSOR_GYRO_3D=m
++CONFIG_IIO_ST_GYRO_3AXIS=m
++CONFIG_IIO_ST_GYRO_I2C_3AXIS=m
++CONFIG_IIO_ST_GYRO_SPI_3AXIS=m
++CONFIG_ITG3200=m
++
++#
++# Humidity sensors
++#
++CONFIG_DHT11=m
++CONFIG_HDC100X=m
++CONFIG_HTU21=m
++CONFIG_SI7005=m
++CONFIG_SI7020=m
++
++#
++# Inertial measurement units
++#
++CONFIG_ADIS16400=m
++CONFIG_ADIS16480=m
++CONFIG_KMX61=m
++CONFIG_INV_MPU6050_IIO=m
++CONFIG_IIO_ADIS_LIB=m
++CONFIG_IIO_ADIS_LIB_BUFFER=y
++
++#
++# Light sensors
++#
++CONFIG_ADJD_S311=m
++CONFIG_AL3320A=m
++CONFIG_APDS9300=m
++CONFIG_APDS9960=m
++CONFIG_BH1750=m
++CONFIG_CM32181=m
++CONFIG_CM3232=m
++CONFIG_CM3323=m
++CONFIG_CM36651=m
++CONFIG_GP2AP020A00F=m
++CONFIG_ISL29125=m
++CONFIG_HID_SENSOR_ALS=m
++CONFIG_HID_SENSOR_PROX=m
++CONFIG_JSA1212=m
++CONFIG_RPR0521=m
++CONFIG_SENSORS_LM3533=m
++CONFIG_LTR501=m
++CONFIG_OPT3001=m
++CONFIG_PA12203001=m
++CONFIG_STK3310=m
++CONFIG_TCS3414=m
++CONFIG_TCS3472=m
++CONFIG_SENSORS_TSL2563=m
++CONFIG_TSL4531=m
++CONFIG_US5182D=m
++CONFIG_VCNL4000=m
++
++#
++# Magnetometer sensors
++#
++CONFIG_AK8975=m
++CONFIG_AK09911=m
++CONFIG_BMC150_MAGN=m
++CONFIG_MAG3110=m
++CONFIG_HID_SENSOR_MAGNETOMETER_3D=m
++CONFIG_MMC35240=m
++CONFIG_IIO_ST_MAGN_3AXIS=m
++CONFIG_IIO_ST_MAGN_I2C_3AXIS=m
++CONFIG_IIO_ST_MAGN_SPI_3AXIS=m
++
++#
++# Inclinometer sensors
++#
++CONFIG_HID_SENSOR_INCLINOMETER_3D=m
++CONFIG_HID_SENSOR_DEVICE_ROTATION=m
++
++#
++# Triggers - standalone
++#
++CONFIG_IIO_INTERRUPT_TRIGGER=m
++CONFIG_IIO_SYSFS_TRIGGER=m
++
++#
++# Digital potentiometers
++#
++CONFIG_MCP4531=m
++
++#
++# Pressure sensors
++#
++CONFIG_BMP280=m
++CONFIG_HID_SENSOR_PRESS=m
++CONFIG_MPL115=m
++CONFIG_MPL3115=m
++CONFIG_MS5611=m
++CONFIG_MS5611_I2C=m
++CONFIG_MS5611_SPI=m
++CONFIG_MS5637=m
++CONFIG_IIO_ST_PRESS=m
++CONFIG_IIO_ST_PRESS_I2C=m
++CONFIG_IIO_ST_PRESS_SPI=m
++CONFIG_T5403=m
++
++#
++# Lightning sensors
++#
++CONFIG_AS3935=m
++
++#
++# Proximity sensors
++#
++CONFIG_LIDAR_LITE_V2=m
++CONFIG_SX9500=m
++
++#
++# Temperature sensors
++#
++CONFIG_MLX90614=m
++CONFIG_TMP006=m
++CONFIG_TSYS01=m
++CONFIG_TSYS02D=m
++CONFIG_NTB=m
++CONFIG_NTB_PINGPONG=m
++CONFIG_NTB_TOOL=m
++CONFIG_NTB_TRANSPORT=m
++CONFIG_VME_BUS=y
++
++#
++# VME Bridge Drivers
++#
++CONFIG_VME_TSI148=m
++
++#
++# VME Board Drivers
++#
++CONFIG_VMIVME_7805=m
++
++#
++# VME Device Drivers
++#
++CONFIG_VME_USER=m
++CONFIG_VME_PIO2=m
++CONFIG_PWM=y
++CONFIG_PWM_SYSFS=y
++CONFIG_PWM_FSL_FTM=m
++CONFIG_PWM_LP3943=m
++CONFIG_PWM_PCA9685=m
++CONFIG_PWM_TWL=m
++CONFIG_PWM_TWL_LED=m
++CONFIG_IRQCHIP=y
++CONFIG_IPACK_BUS=m
++CONFIG_BOARD_TPCI200=m
++CONFIG_SERIAL_IPOCTAL=m
++CONFIG_RESET_CONTROLLER=y
++CONFIG_FMC=m
++CONFIG_FMC_FAKEDEV=m
++CONFIG_FMC_TRIVIAL=m
++CONFIG_FMC_WRITE_EEPROM=m
++CONFIG_FMC_CHARDEV=m
++
++#
++# PHY Subsystem
++#
++CONFIG_GENERIC_PHY=y
++CONFIG_PHY_PXA_28NM_HSIC=m
++CONFIG_PHY_PXA_28NM_USB2=m
++CONFIG_BCM_KONA_USB2_PHY=m
++CONFIG_PHY_SAMSUNG_USB2=m
++# CONFIG_PHY_EXYNOS4210_USB2 is not set
++# CONFIG_PHY_EXYNOS4X12_USB2 is not set
++# CONFIG_PHY_EXYNOS5250_USB2 is not set
++CONFIG_PHY_TUSB1210=m
++CONFIG_POWERCAP=y
++CONFIG_MCB=m
++CONFIG_MCB_PCI=m
++
++#
++# Performance monitor support
++#
++CONFIG_RAS=y
++CONFIG_THUNDERBOLT=m
++
++#
++# Android
++#
++# CONFIG_ANDROID is not set
++CONFIG_LIBNVDIMM=y
++CONFIG_BLK_DEV_PMEM=m
++CONFIG_ND_BLK=m
++CONFIG_ND_CLAIM=y
++CONFIG_ND_BTT=m
++CONFIG_BTT=y
++CONFIG_NVMEM=m
++CONFIG_STM=m
++CONFIG_STM_DUMMY=m
++CONFIG_STM_SOURCE_CONSOLE=m
++CONFIG_INTEL_TH=m
++CONFIG_INTEL_TH_PCI=m
++CONFIG_INTEL_TH_GTH=m
++CONFIG_INTEL_TH_STH=m
++CONFIG_INTEL_TH_MSU=m
++CONFIG_INTEL_TH_PTI=m
++# CONFIG_INTEL_TH_DEBUG is not set
++
++#
++# FPGA Configuration Support
++#
++CONFIG_FPGA=m
++CONFIG_FPGA_MGR_ZYNQ_FPGA=m
++
++#
++# File systems
++#
++CONFIG_DCACHE_WORD_ACCESS=y
++# CONFIG_EXT2_FS is not set
++# CONFIG_EXT3_FS is not set
++CONFIG_EXT4_FS=y
++CONFIG_EXT4_USE_FOR_EXT2=y
++CONFIG_EXT4_FS_POSIX_ACL=y
++CONFIG_EXT4_FS_SECURITY=y
++CONFIG_EXT4_ENCRYPTION=m
++CONFIG_EXT4_FS_ENCRYPTION=y
++# CONFIG_EXT4_DEBUG is not set
++CONFIG_JBD2=y
++# CONFIG_JBD2_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=m
++# CONFIG_REISERFS_CHECK is not set
++# CONFIG_REISERFS_PROC_INFO is not set
++CONFIG_REISERFS_FS_XATTR=y
++CONFIG_REISERFS_FS_POSIX_ACL=y
++CONFIG_REISERFS_FS_SECURITY=y
++CONFIG_JFS_FS=m
++CONFIG_JFS_POSIX_ACL=y
++CONFIG_JFS_SECURITY=y
++# CONFIG_JFS_DEBUG is not set
++CONFIG_JFS_STATISTICS=y
++CONFIG_XFS_FS=m
++CONFIG_XFS_QUOTA=y
++CONFIG_XFS_POSIX_ACL=y
++CONFIG_XFS_RT=y
++# CONFIG_XFS_WARN is not set
++# CONFIG_XFS_DEBUG is not set
++CONFIG_GFS2_FS=m
++CONFIG_GFS2_FS_LOCKING_DLM=y
++CONFIG_OCFS2_FS=m
++CONFIG_OCFS2_FS_O2CB=m
++CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m
++CONFIG_OCFS2_FS_STATS=y
++CONFIG_OCFS2_DEBUG_MASKLOG=y
++# CONFIG_OCFS2_DEBUG_FS is not set
++CONFIG_BTRFS_FS=m
++CONFIG_BTRFS_FS_POSIX_ACL=y
++# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set
++# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set
++# CONFIG_BTRFS_DEBUG is not set
++# CONFIG_BTRFS_ASSERT is not set
++CONFIG_NILFS2_FS=m
++CONFIG_F2FS_FS=m
++CONFIG_F2FS_STAT_FS=y
++CONFIG_F2FS_FS_XATTR=y
++CONFIG_F2FS_FS_POSIX_ACL=y
++CONFIG_F2FS_FS_SECURITY=y
++# CONFIG_F2FS_CHECK_FS is not set
++CONFIG_F2FS_FS_ENCRYPTION=y
++# CONFIG_F2FS_IO_TRACE is not set
++CONFIG_FS_DAX=y
++CONFIG_FS_POSIX_ACL=y
++CONFIG_EXPORTFS=y
++CONFIG_FILE_LOCKING=y
++CONFIG_FSNOTIFY=y
++CONFIG_DNOTIFY=y
++CONFIG_INOTIFY_USER=y
++CONFIG_FANOTIFY=y
++CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
++CONFIG_QUOTA=y
++CONFIG_QUOTA_NETLINK_INTERFACE=y
++# CONFIG_PRINT_QUOTA_WARNING is not set
++# CONFIG_QUOTA_DEBUG is not set
++CONFIG_QUOTA_TREE=m
++CONFIG_QFMT_V1=m
++CONFIG_QFMT_V2=m
++CONFIG_QUOTACTL=y
++CONFIG_AUTOFS4_FS=m
++CONFIG_FUSE_FS=y
++CONFIG_CUSE=m
++CONFIG_OVERLAY_FS=m
++
++#
++# Caches
++#
++CONFIG_FSCACHE=m
++CONFIG_FSCACHE_STATS=y
++# CONFIG_FSCACHE_HISTOGRAM is not set
++# CONFIG_FSCACHE_DEBUG is not set
++# CONFIG_FSCACHE_OBJECT_LIST is not set
++CONFIG_CACHEFILES=m
++# CONFIG_CACHEFILES_DEBUG is not set
++# CONFIG_CACHEFILES_HISTOGRAM is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=m
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_UDF_FS=m
++CONFIG_UDF_NLS=y
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=y
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=y
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++CONFIG_NTFS_FS=m
++# CONFIG_NTFS_DEBUG is not set
++# CONFIG_NTFS_RW is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_VMCORE=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_PROC_PAGE_MONITOR=y
++CONFIG_PROC_CHILDREN=y
++CONFIG_KERNFS=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++CONFIG_TMPFS_POSIX_ACL=y
++CONFIG_TMPFS_XATTR=y
++CONFIG_HUGETLBFS=y
++CONFIG_HUGETLB_PAGE=y
++CONFIG_CONFIGFS_FS=m
++CONFIG_MISC_FILESYSTEMS=y
++CONFIG_ADFS_FS=m
++# CONFIG_ADFS_FS_RW is not set
++CONFIG_AFFS_FS=m
++CONFIG_ECRYPT_FS=y
++CONFIG_ECRYPT_FS_MESSAGING=y
++CONFIG_HFS_FS=m
++CONFIG_HFSPLUS_FS=m
++CONFIG_HFSPLUS_FS_POSIX_ACL=y
++CONFIG_BEFS_FS=m
++# CONFIG_BEFS_DEBUG is not set
++CONFIG_BFS_FS=m
++CONFIG_EFS_FS=m
++CONFIG_JFFS2_FS=m
++CONFIG_JFFS2_FS_DEBUG=0
++CONFIG_JFFS2_FS_WRITEBUFFER=y
++# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
++# CONFIG_JFFS2_SUMMARY is not set
++CONFIG_JFFS2_FS_XATTR=y
++CONFIG_JFFS2_FS_POSIX_ACL=y
++CONFIG_JFFS2_FS_SECURITY=y
++CONFIG_JFFS2_COMPRESSION_OPTIONS=y
++CONFIG_JFFS2_ZLIB=y
++CONFIG_JFFS2_LZO=y
++CONFIG_JFFS2_RTIME=y
++# CONFIG_JFFS2_RUBIN is not set
++# CONFIG_JFFS2_CMODE_NONE is not set
++# CONFIG_JFFS2_CMODE_PRIORITY is not set
++# CONFIG_JFFS2_CMODE_SIZE is not set
++CONFIG_JFFS2_CMODE_FAVOURLZO=y
++CONFIG_UBIFS_FS=m
++# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
++CONFIG_UBIFS_FS_LZO=y
++CONFIG_UBIFS_FS_ZLIB=y
++CONFIG_UBIFS_ATIME_SUPPORT=y
++# CONFIG_LOGFS is not set
++CONFIG_CRAMFS=m
++CONFIG_SQUASHFS=m
++# CONFIG_SQUASHFS_FILE_CACHE is not set
++CONFIG_SQUASHFS_FILE_DIRECT=y
++# CONFIG_SQUASHFS_DECOMP_SINGLE is not set
++# CONFIG_SQUASHFS_DECOMP_MULTI is not set
++CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y
++CONFIG_SQUASHFS_XATTR=y
++CONFIG_SQUASHFS_ZLIB=y
++CONFIG_SQUASHFS_LZ4=y
++CONFIG_SQUASHFS_LZO=y
++CONFIG_SQUASHFS_XZ=y
++# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set
++# CONFIG_SQUASHFS_EMBEDDED is not set
++CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
++CONFIG_VXFS_FS=m
++CONFIG_MINIX_FS=m
++CONFIG_OMFS_FS=m
++CONFIG_HPFS_FS=m
++CONFIG_QNX4FS_FS=m
++CONFIG_QNX6FS_FS=m
++# CONFIG_QNX6FS_DEBUG is not set
++CONFIG_ROMFS_FS=m
++CONFIG_ROMFS_BACKED_BY_BLOCK=y
++# CONFIG_ROMFS_BACKED_BY_MTD is not set
++# CONFIG_ROMFS_BACKED_BY_BOTH is not set
++CONFIG_ROMFS_ON_BLOCK=y
++CONFIG_PSTORE=y
++# CONFIG_PSTORE_CONSOLE is not set
++# CONFIG_PSTORE_PMSG is not set
++# CONFIG_PSTORE_FTRACE is not set
++CONFIG_PSTORE_RAM=m
++CONFIG_SYSV_FS=m
++CONFIG_UFS_FS=m
++# CONFIG_UFS_FS_WRITE is not set
++# CONFIG_UFS_DEBUG is not set
++CONFIG_EXOFS_FS=m
++# CONFIG_EXOFS_DEBUG is not set
++CONFIG_ORE=m
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=m
++CONFIG_NFS_V2=m
++CONFIG_NFS_V3=m
++CONFIG_NFS_V3_ACL=y
++CONFIG_NFS_V4=m
++CONFIG_NFS_SWAP=y
++CONFIG_NFS_V4_1=y
++CONFIG_NFS_V4_2=y
++CONFIG_PNFS_FILE_LAYOUT=m
++CONFIG_PNFS_BLOCK=m
++CONFIG_PNFS_OBJLAYOUT=m
++CONFIG_PNFS_FLEXFILE_LAYOUT=m
++CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org"
++CONFIG_NFS_V4_1_MIGRATION=y
++CONFIG_NFS_V4_SECURITY_LABEL=y
++CONFIG_NFS_FSCACHE=y
++# CONFIG_NFS_USE_LEGACY_DNS is not set
++CONFIG_NFS_USE_KERNEL_DNS=y
++CONFIG_NFS_DEBUG=y
++CONFIG_NFSD=m
++CONFIG_NFSD_V2_ACL=y
++CONFIG_NFSD_V3=y
++CONFIG_NFSD_V3_ACL=y
++CONFIG_NFSD_V4=y
++CONFIG_NFSD_PNFS=y
++CONFIG_NFSD_V4_SECURITY_LABEL=y
++# CONFIG_NFSD_FAULT_INJECTION is not set
++CONFIG_GRACE_PERIOD=m
++CONFIG_LOCKD=m
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_ACL_SUPPORT=m
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=m
++CONFIG_SUNRPC_GSS=m
++CONFIG_SUNRPC_BACKCHANNEL=y
++CONFIG_SUNRPC_SWAP=y
++CONFIG_RPCSEC_GSS_KRB5=m
++CONFIG_SUNRPC_DEBUG=y
++CONFIG_SUNRPC_XPRT_RDMA=m
++CONFIG_CEPH_FS=m
++CONFIG_CEPH_FSCACHE=y
++CONFIG_CEPH_FS_POSIX_ACL=y
++CONFIG_CIFS=m
++CONFIG_CIFS_STATS=y
++# CONFIG_CIFS_STATS2 is not set
++CONFIG_CIFS_WEAK_PW_HASH=y
++CONFIG_CIFS_UPCALL=y
++CONFIG_CIFS_XATTR=y
++CONFIG_CIFS_POSIX=y
++CONFIG_CIFS_ACL=y
++CONFIG_CIFS_DEBUG=y
++# CONFIG_CIFS_DEBUG2 is not set
++CONFIG_CIFS_DFS_UPCALL=y
++CONFIG_CIFS_SMB2=y
++CONFIG_CIFS_SMB311=y
++CONFIG_CIFS_FSCACHE=y
++CONFIG_NCP_FS=m
++CONFIG_NCPFS_PACKET_SIGNING=y
++CONFIG_NCPFS_IOCTL_LOCKING=y
++CONFIG_NCPFS_STRONG=y
++CONFIG_NCPFS_NFS_NS=y
++CONFIG_NCPFS_OS2_NS=y
++# CONFIG_NCPFS_SMALLDOS is not set
++CONFIG_NCPFS_NLS=y
++CONFIG_NCPFS_EXTRAS=y
++CONFIG_CODA_FS=m
++CONFIG_AFS_FS=m
++# CONFIG_AFS_DEBUG is not set
++CONFIG_AFS_FSCACHE=y
++CONFIG_9P_FS=m
++CONFIG_9P_FSCACHE=y
++CONFIG_9P_FS_POSIX_ACL=y
++CONFIG_9P_FS_SECURITY=y
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="utf8"
++CONFIG_NLS_CODEPAGE_437=y
++CONFIG_NLS_CODEPAGE_737=m
++CONFIG_NLS_CODEPAGE_775=m
++CONFIG_NLS_CODEPAGE_850=m
++CONFIG_NLS_CODEPAGE_852=m
++CONFIG_NLS_CODEPAGE_855=m
++CONFIG_NLS_CODEPAGE_857=m
++CONFIG_NLS_CODEPAGE_860=m
++CONFIG_NLS_CODEPAGE_861=m
++CONFIG_NLS_CODEPAGE_862=m
++CONFIG_NLS_CODEPAGE_863=m
++CONFIG_NLS_CODEPAGE_864=m
++CONFIG_NLS_CODEPAGE_865=m
++CONFIG_NLS_CODEPAGE_866=m
++CONFIG_NLS_CODEPAGE_869=m
++CONFIG_NLS_CODEPAGE_936=m
++CONFIG_NLS_CODEPAGE_950=m
++CONFIG_NLS_CODEPAGE_932=m
++CONFIG_NLS_CODEPAGE_949=m
++CONFIG_NLS_CODEPAGE_874=m
++CONFIG_NLS_ISO8859_8=m
++CONFIG_NLS_CODEPAGE_1250=m
++CONFIG_NLS_CODEPAGE_1251=m
++CONFIG_NLS_ASCII=m
++CONFIG_NLS_ISO8859_1=m
++CONFIG_NLS_ISO8859_2=m
++CONFIG_NLS_ISO8859_3=m
++CONFIG_NLS_ISO8859_4=m
++CONFIG_NLS_ISO8859_5=m
++CONFIG_NLS_ISO8859_6=m
++CONFIG_NLS_ISO8859_7=m
++CONFIG_NLS_ISO8859_9=m
++CONFIG_NLS_ISO8859_13=m
++CONFIG_NLS_ISO8859_14=m
++CONFIG_NLS_ISO8859_15=m
++CONFIG_NLS_KOI8_R=m
++CONFIG_NLS_KOI8_U=m
++CONFIG_NLS_MAC_ROMAN=m
++CONFIG_NLS_MAC_CELTIC=m
++CONFIG_NLS_MAC_CENTEURO=m
++CONFIG_NLS_MAC_CROATIAN=m
++CONFIG_NLS_MAC_CYRILLIC=m
++CONFIG_NLS_MAC_GAELIC=m
++CONFIG_NLS_MAC_GREEK=m
++CONFIG_NLS_MAC_ICELAND=m
++CONFIG_NLS_MAC_INUIT=m
++CONFIG_NLS_MAC_ROMANIAN=m
++CONFIG_NLS_MAC_TURKISH=m
++CONFIG_NLS_UTF8=m
++CONFIG_DLM=m
++# CONFIG_DLM_DEBUG is not set
++CONFIG_BINARY_PRINTF=y
++
++#
++# Library routines
++#
++CONFIG_RAID6_PQ=m
++CONFIG_BITREVERSE=y
++# CONFIG_HAVE_ARCH_BITREVERSE is not set
++CONFIG_RATIONAL=y
++CONFIG_GENERIC_STRNCPY_FROM_USER=y
++CONFIG_GENERIC_STRNLEN_USER=y
++CONFIG_GENERIC_NET_UTILS=y
++CONFIG_GENERIC_PCI_IOMAP=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_GENERIC_IO=y
++CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y
++CONFIG_ARCH_HAS_FAST_MULTIPLIER=y
++CONFIG_CRC_CCITT=m
++CONFIG_CRC16=y
++CONFIG_CRC_T10DIF=y
++CONFIG_CRC_ITU_T=m
++CONFIG_CRC32=y
++# CONFIG_CRC32_SELFTEST is not set
++CONFIG_CRC32_SLICEBY8=y
++# CONFIG_CRC32_SLICEBY4 is not set
++# CONFIG_CRC32_SARWATE is not set
++# CONFIG_CRC32_BIT is not set
++CONFIG_CRC7=m
++CONFIG_LIBCRC32C=m
++CONFIG_CRC8=m
++# CONFIG_AUDIT_ARCH_COMPAT_GENERIC is not set
++# CONFIG_RANDOM32_SELFTEST is not set
++CONFIG_842_COMPRESS=m
++CONFIG_842_DECOMPRESS=m
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=y
++CONFIG_LZO_COMPRESS=y
++CONFIG_LZO_DECOMPRESS=y
++CONFIG_LZ4_COMPRESS=m
++CONFIG_LZ4HC_COMPRESS=m
++CONFIG_LZ4_DECOMPRESS=y
++CONFIG_XZ_DEC=y
++CONFIG_XZ_DEC_X86=y
++CONFIG_XZ_DEC_POWERPC=y
++CONFIG_XZ_DEC_IA64=y
++CONFIG_XZ_DEC_ARM=y
++CONFIG_XZ_DEC_ARMTHUMB=y
++CONFIG_XZ_DEC_SPARC=y
++CONFIG_XZ_DEC_BCJ=y
++CONFIG_XZ_DEC_TEST=m
++CONFIG_DECOMPRESS_GZIP=y
++CONFIG_DECOMPRESS_BZIP2=y
++CONFIG_DECOMPRESS_LZMA=y
++CONFIG_DECOMPRESS_XZ=y
++CONFIG_DECOMPRESS_LZO=y
++CONFIG_DECOMPRESS_LZ4=y
++CONFIG_GENERIC_ALLOCATOR=y
++CONFIG_REED_SOLOMON=m
++CONFIG_REED_SOLOMON_ENC8=y
++CONFIG_REED_SOLOMON_DEC8=y
++CONFIG_REED_SOLOMON_DEC16=y
++CONFIG_BCH=m
++CONFIG_BCH_CONST_PARAMS=y
++CONFIG_TEXTSEARCH=y
++CONFIG_TEXTSEARCH_KMP=m
++CONFIG_TEXTSEARCH_BM=m
++CONFIG_TEXTSEARCH_FSM=m
++CONFIG_BTREE=y
++CONFIG_INTERVAL_TREE=y
++CONFIG_ASSOCIATIVE_ARRAY=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT_MAP=y
++CONFIG_HAS_DMA=y
++CONFIG_CHECK_SIGNATURE=y
++CONFIG_CPU_RMAP=y
++CONFIG_DQL=y
++CONFIG_GLOB=y
++# CONFIG_GLOB_SELFTEST is not set
++CONFIG_NLATTR=y
++CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y
++CONFIG_LRU_CACHE=m
++CONFIG_CLZ_TAB=y
++CONFIG_CORDIC=m
++CONFIG_DDR=y
++CONFIG_MPILIB=y
++CONFIG_SIGNATURE=y
++CONFIG_LIBFDT=y
++CONFIG_OID_REGISTRY=y
++CONFIG_FONT_SUPPORT=y
++# CONFIG_FONTS is not set
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++# CONFIG_SG_SPLIT is not set
++CONFIG_ARCH_HAS_SG_CHAIN=y
++
++#
++# Kernel hacking
++#
++
++#
++# printk and dmesg options
++#
++CONFIG_PRINTK_TIME=y
++CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4
++CONFIG_DYNAMIC_DEBUG=y
++
++#
++# Compile-time checks and compiler options
++#
++CONFIG_DEBUG_INFO=y
++# CONFIG_DEBUG_INFO_REDUCED is not set
++# CONFIG_DEBUG_INFO_SPLIT is not set
++CONFIG_DEBUG_INFO_DWARF4=y
++CONFIG_GDB_SCRIPTS=y
++# CONFIG_ENABLE_WARN_DEPRECATED is not set
++# CONFIG_ENABLE_MUST_CHECK is not set
++CONFIG_FRAME_WARN=2048
++# CONFIG_STRIP_ASM_SYMS is not set
++# CONFIG_READABLE_ASM is not set
++CONFIG_UNUSED_SYMBOLS=y
++# CONFIG_PAGE_OWNER is not set
++CONFIG_DEBUG_FS=y
++# CONFIG_HEADERS_CHECK is not set
++# CONFIG_DEBUG_SECTION_MISMATCH is not set
++CONFIG_SECTION_MISMATCH_WARN_ONLY=y
++# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1
++CONFIG_DEBUG_KERNEL=y
++
++#
++# Memory Debugging
++#
++# CONFIG_PAGE_EXTENSION is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_DEBUG_OBJECTS is not set
++# CONFIG_SLUB_DEBUG_ON is not set
++# CONFIG_SLUB_STATS is not set
++CONFIG_HAVE_DEBUG_KMEMLEAK=y
++# CONFIG_DEBUG_KMEMLEAK is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_VM is not set
++# CONFIG_DEBUG_MEMORY_INIT is not set
++CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
++# CONFIG_DEBUG_PER_CPU_MAPS is not set
++CONFIG_HAVE_DEBUG_STACKOVERFLOW=y
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_DEBUG_SHIRQ is not set
++
++#
++# Debug Lockups and Hangs
++#
++CONFIG_LOCKUP_DETECTOR=y
++CONFIG_HARDLOCKUP_DETECTOR=y
++# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set
++CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0
++# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
++CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
++CONFIG_DETECT_HUNG_TASK=y
++CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
++# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
++CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
++# CONFIG_PANIC_ON_OOPS is not set
++CONFIG_PANIC_ON_OOPS_VALUE=0
++CONFIG_SCHED_DEBUG=y
++CONFIG_SCHED_INFO=y
++CONFIG_SCHEDSTATS=y
++CONFIG_SCHED_STACK_END_CHECK=y
++# CONFIG_DEBUG_TIMEKEEPING is not set
++CONFIG_TIMER_STATS=y
++
++#
++# Lock Debugging (spinlocks, mutexes, etc...)
++#
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set
++# CONFIG_DEBUG_LOCK_ALLOC is not set
++# CONFIG_PROVE_LOCKING is not set
++# CONFIG_LOCK_STAT is not set
++# CONFIG_DEBUG_ATOMIC_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++CONFIG_LOCK_TORTURE_TEST=m
++CONFIG_STACKTRACE=y
++# CONFIG_DEBUG_KOBJECT is not set
++CONFIG_DEBUG_BUGVERBOSE=y
++# CONFIG_DEBUG_LIST is not set
++# CONFIG_DEBUG_PI_LIST is not set
++# CONFIG_DEBUG_SG is not set
++# CONFIG_DEBUG_NOTIFIERS is not set
++# CONFIG_DEBUG_CREDENTIALS is not set
++
++#
++# RCU Debugging
++#
++# CONFIG_PROVE_RCU is not set
++# CONFIG_SPARSE_RCU_POINTER is not set
++CONFIG_TORTURE_TEST=m
++# CONFIG_RCU_TORTURE_TEST is not set
++CONFIG_RCU_CPU_STALL_TIMEOUT=21
++# CONFIG_RCU_TRACE is not set
++# CONFIG_RCU_EQS_DEBUG is not set
++# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
++CONFIG_NOTIFIER_ERROR_INJECTION=m
++CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
++CONFIG_PM_NOTIFIER_ERROR_INJECT=m
++# CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT is not set
++# CONFIG_FAULT_INJECTION is not set
++# CONFIG_LATENCYTOP is not set
++CONFIG_NOP_TRACER=y
++CONFIG_HAVE_FUNCTION_TRACER=y
++CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
++CONFIG_HAVE_DYNAMIC_FTRACE=y
++CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
++CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
++CONFIG_TRACER_MAX_TRACE=y
++CONFIG_TRACE_CLOCK=y
++CONFIG_RING_BUFFER=y
++CONFIG_EVENT_TRACING=y
++CONFIG_CONTEXT_SWITCH_TRACER=y
++CONFIG_RING_BUFFER_ALLOW_SWAP=y
++CONFIG_TRACING=y
++CONFIG_GENERIC_TRACER=y
++CONFIG_TRACING_SUPPORT=y
++CONFIG_FTRACE=y
++CONFIG_FUNCTION_TRACER=y
++CONFIG_FUNCTION_GRAPH_TRACER=y
++# CONFIG_IRQSOFF_TRACER is not set
++CONFIG_SCHED_TRACER=y
++CONFIG_FTRACE_SYSCALLS=y
++CONFIG_TRACER_SNAPSHOT=y
++# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set
++CONFIG_BRANCH_PROFILE_NONE=y
++# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
++# CONFIG_PROFILE_ALL_BRANCHES is not set
++CONFIG_STACK_TRACER=y
++CONFIG_BLK_DEV_IO_TRACE=y
++CONFIG_KPROBE_EVENT=y
++CONFIG_UPROBE_EVENT=y
++CONFIG_BPF_EVENTS=y
++CONFIG_PROBE_EVENTS=y
++CONFIG_DYNAMIC_FTRACE=y
++CONFIG_FUNCTION_PROFILER=y
++CONFIG_FTRACE_MCOUNT_RECORD=y
++# CONFIG_FTRACE_STARTUP_TEST is not set
++# CONFIG_TRACEPOINT_BENCHMARK is not set
++# CONFIG_RING_BUFFER_BENCHMARK is not set
++# CONFIG_RING_BUFFER_STARTUP_TEST is not set
++# CONFIG_TRACE_ENUM_MAP_FILE is not set
++CONFIG_TRACING_EVENTS_GPIO=y
++
++#
++# Runtime Testing
++#
++# CONFIG_LKDTM is not set
++# CONFIG_TEST_LIST_SORT is not set
++# CONFIG_KPROBES_SANITY_TEST is not set
++# CONFIG_BACKTRACE_SELF_TEST is not set
++CONFIG_RBTREE_TEST=m
++CONFIG_INTERVAL_TREE_TEST=m
++CONFIG_PERCPU_TEST=m
++# CONFIG_ATOMIC64_SELFTEST is not set
++CONFIG_ASYNC_RAID6_TEST=m
++CONFIG_TEST_HEXDUMP=m
++CONFIG_TEST_STRING_HELPERS=m
++CONFIG_TEST_KSTRTOX=m
++CONFIG_TEST_PRINTF=m
++# CONFIG_TEST_RHASHTABLE is not set
++# CONFIG_DMA_API_DEBUG is not set
++CONFIG_TEST_LKM=m
++CONFIG_TEST_USER_COPY=m
++CONFIG_TEST_BPF=m
++CONFIG_TEST_FIRMWARE=m
++CONFIG_TEST_UDELAY=m
++CONFIG_MEMTEST=y
++CONFIG_TEST_STATIC_KEYS=m
++# CONFIG_SAMPLES is not set
++CONFIG_HAVE_ARCH_KGDB=y
++CONFIG_KGDB=y
++CONFIG_KGDB_SERIAL_CONSOLE=y
++# CONFIG_KGDB_TESTS is not set
++CONFIG_KGDB_KDB=y
++CONFIG_KDB_DEFAULT_ENABLE=0x1
++CONFIG_KDB_KEYBOARD=y
++CONFIG_KDB_CONTINUE_CATASTROPHIC=0
++# CONFIG_PPC_DISABLE_WERROR is not set
++CONFIG_PPC_WERROR=y
++# CONFIG_STRICT_MM_TYPECHECKS is not set
++CONFIG_PRINT_STACK_DEPTH=64
++# CONFIG_HCALL_STATS is not set
++# CONFIG_PPC_EMULATED_STATS is not set
++# CONFIG_CODE_PATCHING_SELFTEST is not set
++# CONFIG_FTR_FIXUP_SELFTEST is not set
++# CONFIG_MSI_BITMAP_SELFTEST is not set
++CONFIG_XMON=y
++# CONFIG_XMON_DEFAULT is not set
++CONFIG_XMON_DISASSEMBLY=y
++CONFIG_DEBUGGER=y
++# CONFIG_BOOTX_TEXT is not set
++# CONFIG_PPC_EARLY_DEBUG is not set
++CONFIG_STRICT_DEVMEM=y
++
++#
++# Security options
++#
++CONFIG_KEYS=y
++CONFIG_PERSISTENT_KEYRINGS=y
++CONFIG_BIG_KEYS=y
++CONFIG_TRUSTED_KEYS=y
++CONFIG_ENCRYPTED_KEYS=y
++# CONFIG_SECURITY_DMESG_RESTRICT is not set
++CONFIG_SECURITY=y
++CONFIG_SECURITYFS=y
++CONFIG_SECURITY_NETWORK=y
++CONFIG_SECURITY_NETWORK_XFRM=y
++CONFIG_SECURITY_PATH=y
++CONFIG_LSM_MMAP_MIN_ADDR=0
++CONFIG_SECURITY_SELINUX=y
++CONFIG_SECURITY_SELINUX_BOOTPARAM=y
++CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
++CONFIG_SECURITY_SELINUX_DISABLE=y
++CONFIG_SECURITY_SELINUX_DEVELOP=y
++CONFIG_SECURITY_SELINUX_AVC_STATS=y
++CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
++# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set
++CONFIG_SECURITY_SMACK=y
++# CONFIG_SECURITY_SMACK_BRINGUP is not set
++CONFIG_SECURITY_SMACK_NETFILTER=y
++CONFIG_SECURITY_TOMOYO=y
++CONFIG_SECURITY_TOMOYO_MAX_ACCEPT_ENTRY=2048
++CONFIG_SECURITY_TOMOYO_MAX_AUDIT_LOG=1024
++# CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER is not set
++CONFIG_SECURITY_TOMOYO_POLICY_LOADER="/sbin/tomoyo-init"
++CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER="/sbin/init"
++CONFIG_SECURITY_APPARMOR=y
++CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE=1
++CONFIG_SECURITY_APPARMOR_HASH=y
++CONFIG_SECURITY_YAMA=y
++CONFIG_INTEGRITY=y
++CONFIG_INTEGRITY_SIGNATURE=y
++CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
++CONFIG_INTEGRITY_AUDIT=y
++CONFIG_IMA=y
++CONFIG_IMA_MEASURE_PCR_IDX=10
++CONFIG_IMA_LSM_RULES=y
++# CONFIG_IMA_TEMPLATE is not set
++CONFIG_IMA_NG_TEMPLATE=y
++# CONFIG_IMA_SIG_TEMPLATE is not set
++CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng"
++CONFIG_IMA_DEFAULT_HASH_SHA1=y
++# CONFIG_IMA_DEFAULT_HASH_SHA256 is not set
++# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set
++# CONFIG_IMA_DEFAULT_HASH_WP512 is not set
++CONFIG_IMA_DEFAULT_HASH="sha1"
++CONFIG_IMA_APPRAISE=y
++CONFIG_IMA_TRUSTED_KEYRING=y
++# CONFIG_IMA_LOAD_X509 is not set
++CONFIG_EVM=y
++CONFIG_EVM_ATTR_FSUUID=y
++CONFIG_EVM_EXTRA_SMACK_XATTRS=y
++# CONFIG_DEFAULT_SECURITY_SELINUX is not set
++# CONFIG_DEFAULT_SECURITY_SMACK is not set
++# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
++CONFIG_DEFAULT_SECURITY_APPARMOR=y
++# CONFIG_DEFAULT_SECURITY_DAC is not set
++CONFIG_DEFAULT_SECURITY="apparmor"
++CONFIG_KEYS_COMPAT=y
++CONFIG_XOR_BLOCKS=m
++CONFIG_ASYNC_CORE=m
++CONFIG_ASYNC_MEMCPY=m
++CONFIG_ASYNC_XOR=m
++CONFIG_ASYNC_PQ=m
++CONFIG_ASYNC_RAID6_RECOV=m
++CONFIG_CRYPTO=y
++
++#
++# Crypto core or helper
++#
++CONFIG_CRYPTO_ALGAPI=y
++CONFIG_CRYPTO_ALGAPI2=y
++CONFIG_CRYPTO_AEAD=m
++CONFIG_CRYPTO_AEAD2=y
++CONFIG_CRYPTO_BLKCIPHER=y
++CONFIG_CRYPTO_BLKCIPHER2=y
++CONFIG_CRYPTO_HASH=y
++CONFIG_CRYPTO_HASH2=y
++CONFIG_CRYPTO_RNG=y
++CONFIG_CRYPTO_RNG2=y
++CONFIG_CRYPTO_RNG_DEFAULT=m
++CONFIG_CRYPTO_PCOMP=m
++CONFIG_CRYPTO_PCOMP2=y
++CONFIG_CRYPTO_AKCIPHER2=y
++CONFIG_CRYPTO_AKCIPHER=y
++CONFIG_CRYPTO_RSA=y
++CONFIG_CRYPTO_MANAGER=y
++CONFIG_CRYPTO_MANAGER2=y
++CONFIG_CRYPTO_USER=m
++CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
++CONFIG_CRYPTO_GF128MUL=m
++CONFIG_CRYPTO_NULL=m
++CONFIG_CRYPTO_NULL2=y
++CONFIG_CRYPTO_PCRYPT=m
++CONFIG_CRYPTO_WORKQUEUE=y
++CONFIG_CRYPTO_CRYPTD=m
++CONFIG_CRYPTO_MCRYPTD=m
++CONFIG_CRYPTO_AUTHENC=m
++CONFIG_CRYPTO_TEST=m
++
++#
++# Authenticated Encryption with Associated Data
++#
++CONFIG_CRYPTO_CCM=m
++CONFIG_CRYPTO_GCM=m
++CONFIG_CRYPTO_CHACHA20POLY1305=m
++CONFIG_CRYPTO_SEQIV=m
++CONFIG_CRYPTO_ECHAINIV=m
++
++#
++# Block modes
++#
++CONFIG_CRYPTO_CBC=y
++CONFIG_CRYPTO_CTR=m
++CONFIG_CRYPTO_CTS=m
++CONFIG_CRYPTO_ECB=y
++CONFIG_CRYPTO_LRW=m
++CONFIG_CRYPTO_PCBC=m
++CONFIG_CRYPTO_XTS=m
++CONFIG_CRYPTO_KEYWRAP=m
++
++#
++# Hash modes
++#
++CONFIG_CRYPTO_CMAC=m
++CONFIG_CRYPTO_HMAC=y
++CONFIG_CRYPTO_XCBC=m
++CONFIG_CRYPTO_VMAC=m
++
++#
++# Digest
++#
++CONFIG_CRYPTO_CRC32C=y
++CONFIG_CRYPTO_CRC32=m
++CONFIG_CRYPTO_CRCT10DIF=y
++CONFIG_CRYPTO_GHASH=m
++CONFIG_CRYPTO_POLY1305=m
++CONFIG_CRYPTO_MD4=m
++CONFIG_CRYPTO_MD5=y
++CONFIG_CRYPTO_MD5_PPC=m
++CONFIG_CRYPTO_MICHAEL_MIC=m
++CONFIG_CRYPTO_RMD128=m
++CONFIG_CRYPTO_RMD160=m
++CONFIG_CRYPTO_RMD256=m
++CONFIG_CRYPTO_RMD320=m
++CONFIG_CRYPTO_SHA1=y
++CONFIG_CRYPTO_SHA1_PPC=m
++CONFIG_CRYPTO_SHA256=y
++CONFIG_CRYPTO_SHA512=y
++CONFIG_CRYPTO_TGR192=m
++CONFIG_CRYPTO_WP512=m
++
++#
++# Ciphers
++#
++CONFIG_CRYPTO_AES=y
++CONFIG_CRYPTO_ANUBIS=m
++CONFIG_CRYPTO_ARC4=m
++CONFIG_CRYPTO_BLOWFISH=m
++CONFIG_CRYPTO_BLOWFISH_COMMON=m
++CONFIG_CRYPTO_CAMELLIA=m
++CONFIG_CRYPTO_CAST_COMMON=m
++CONFIG_CRYPTO_CAST5=m
++CONFIG_CRYPTO_CAST6=m
++CONFIG_CRYPTO_DES=m
++CONFIG_CRYPTO_FCRYPT=m
++CONFIG_CRYPTO_KHAZAD=m
++CONFIG_CRYPTO_SALSA20=m
++CONFIG_CRYPTO_CHACHA20=m
++CONFIG_CRYPTO_SEED=m
++CONFIG_CRYPTO_SERPENT=m
++CONFIG_CRYPTO_TEA=m
++CONFIG_CRYPTO_TWOFISH=m
++CONFIG_CRYPTO_TWOFISH_COMMON=m
++
++#
++# Compression
++#
++CONFIG_CRYPTO_DEFLATE=m
++CONFIG_CRYPTO_ZLIB=m
++CONFIG_CRYPTO_LZO=y
++CONFIG_CRYPTO_842=m
++CONFIG_CRYPTO_LZ4=m
++CONFIG_CRYPTO_LZ4HC=m
++
++#
++# Random Number Generation
++#
++CONFIG_CRYPTO_ANSI_CPRNG=m
++CONFIG_CRYPTO_DRBG_MENU=m
++CONFIG_CRYPTO_DRBG_HMAC=y
++CONFIG_CRYPTO_DRBG_HASH=y
++CONFIG_CRYPTO_DRBG_CTR=y
++CONFIG_CRYPTO_DRBG=m
++CONFIG_CRYPTO_JITTERENTROPY=m
++CONFIG_CRYPTO_USER_API=m
++CONFIG_CRYPTO_USER_API_HASH=m
++CONFIG_CRYPTO_USER_API_SKCIPHER=m
++CONFIG_CRYPTO_USER_API_RNG=m
++CONFIG_CRYPTO_USER_API_AEAD=m
++CONFIG_CRYPTO_HASH_INFO=y
++CONFIG_CRYPTO_HW=y
++CONFIG_CRYPTO_DEV_NX=y
++CONFIG_CRYPTO_DEV_NX_COMPRESS=m
++CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES=m
++CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV=m
++CONFIG_CRYPTO_DEV_VMX=y
++CONFIG_CRYPTO_DEV_VMX_ENCRYPT=m
++CONFIG_ASYMMETRIC_KEY_TYPE=y
++CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
++CONFIG_PUBLIC_KEY_ALGO_RSA=y
++CONFIG_X509_CERTIFICATE_PARSER=y
++CONFIG_PKCS7_MESSAGE_PARSER=y
++CONFIG_PKCS7_TEST_KEY=m
++CONFIG_SIGNED_PE_FILE_VERIFICATION=y
++
++#
++# Certificates for signature checking
++#
++CONFIG_MODULE_SIG_KEY="certs/signing_key.pem"
++CONFIG_SYSTEM_TRUSTED_KEYRING=y
++CONFIG_SYSTEM_TRUSTED_KEYS=""
++CONFIG_HAVE_KVM_IRQCHIP=y
++CONFIG_HAVE_KVM_IRQFD=y
++CONFIG_HAVE_KVM_EVENTFD=y
++CONFIG_KVM_MMIO=y
++CONFIG_KVM_COMPAT=y
++CONFIG_VIRTUALIZATION=y
++CONFIG_KVM=y
++CONFIG_KVM_BOOK3S_HANDLER=y
++CONFIG_KVM_BOOK3S_64_HANDLER=y
++CONFIG_KVM_BOOK3S_PR_POSSIBLE=y
++CONFIG_KVM_BOOK3S_HV_POSSIBLE=y
++CONFIG_KVM_BOOK3S_64=m
++CONFIG_KVM_BOOK3S_64_HV=m
++CONFIG_KVM_BOOK3S_64_PR=m
++# CONFIG_KVM_BOOK3S_HV_EXIT_TIMING is not set
++CONFIG_KVM_XICS=y
+diff --git a/arch/x86/configs/rock-dbg_defconfig b/arch/x86/configs/rock-dbg_defconfig
+new file mode 100644
+index 0000000..04e0db27
+--- /dev/null
++++ b/arch/x86/configs/rock-dbg_defconfig
+@@ -0,0 +1,4293 @@
++#
++# Automatically generated file; DO NOT EDIT.
++# Linux/x86 4.16.0-rc7 Kernel Configuration
++#
++CONFIG_64BIT=y
++CONFIG_X86_64=y
++CONFIG_X86=y
++CONFIG_INSTRUCTION_DECODER=y
++CONFIG_OUTPUT_FORMAT="elf64-x86-64"
++CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig"
++CONFIG_LOCKDEP_SUPPORT=y
++CONFIG_STACKTRACE_SUPPORT=y
++CONFIG_MMU=y
++CONFIG_ARCH_MMAP_RND_BITS_MIN=28
++CONFIG_ARCH_MMAP_RND_BITS_MAX=32
++CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8
++CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16
++CONFIG_NEED_DMA_MAP_STATE=y
++CONFIG_NEED_SG_DMA_LENGTH=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_GENERIC_BUG=y
++CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_ARCH_HAS_CPU_RELAX=y
++CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
++CONFIG_HAVE_SETUP_PER_CPU_AREA=y
++CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
++CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
++CONFIG_ARCH_HIBERNATION_POSSIBLE=y
++CONFIG_ARCH_SUSPEND_POSSIBLE=y
++CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y
++CONFIG_ARCH_WANT_GENERAL_HUGETLB=y
++CONFIG_ZONE_DMA32=y
++CONFIG_AUDIT_ARCH=y
++CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
++CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
++CONFIG_X86_64_SMP=y
++CONFIG_ARCH_SUPPORTS_UPROBES=y
++CONFIG_FIX_EARLYCON_MEM=y
++CONFIG_PGTABLE_LEVELS=4
++CONFIG_IRQ_WORK=y
++CONFIG_BUILDTIME_EXTABLE_SORT=y
++CONFIG_THREAD_INFO_IN_TASK=y
++
++#
++# General setup
++#
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_CROSS_COMPILE=""
++# CONFIG_COMPILE_TEST is not set
++CONFIG_LOCALVERSION="-kfd"
++# CONFIG_LOCALVERSION_AUTO is not set
++CONFIG_HAVE_KERNEL_GZIP=y
++CONFIG_HAVE_KERNEL_BZIP2=y
++CONFIG_HAVE_KERNEL_LZMA=y
++CONFIG_HAVE_KERNEL_XZ=y
++CONFIG_HAVE_KERNEL_LZO=y
++CONFIG_HAVE_KERNEL_LZ4=y
++CONFIG_KERNEL_GZIP=y
++# CONFIG_KERNEL_BZIP2 is not set
++# CONFIG_KERNEL_LZMA is not set
++# CONFIG_KERNEL_XZ is not set
++# CONFIG_KERNEL_LZO is not set
++# CONFIG_KERNEL_LZ4 is not set
++CONFIG_DEFAULT_HOSTNAME="(none)"
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_POSIX_MQUEUE_SYSCTL=y
++CONFIG_CROSS_MEMORY_ATTACH=y
++CONFIG_USELIB=y
++CONFIG_AUDIT=y
++CONFIG_HAVE_ARCH_AUDITSYSCALL=y
++CONFIG_AUDITSYSCALL=y
++CONFIG_AUDIT_WATCH=y
++CONFIG_AUDIT_TREE=y
++
++#
++# IRQ subsystem
++#
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_GENERIC_IRQ_SHOW=y
++CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
++CONFIG_GENERIC_PENDING_IRQ=y
++CONFIG_GENERIC_IRQ_MIGRATION=y
++CONFIG_IRQ_DOMAIN=y
++CONFIG_IRQ_DOMAIN_HIERARCHY=y
++CONFIG_GENERIC_MSI_IRQ=y
++CONFIG_GENERIC_MSI_IRQ_DOMAIN=y
++CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y
++CONFIG_GENERIC_IRQ_RESERVATION_MODE=y
++CONFIG_IRQ_FORCED_THREADING=y
++CONFIG_SPARSE_IRQ=y
++# CONFIG_GENERIC_IRQ_DEBUGFS is not set
++CONFIG_CLOCKSOURCE_WATCHDOG=y
++CONFIG_ARCH_CLOCKSOURCE_DATA=y
++CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
++CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y
++CONFIG_GENERIC_CMOS_UPDATE=y
++
++#
++# Timers subsystem
++#
++CONFIG_TICK_ONESHOT=y
++CONFIG_NO_HZ_COMMON=y
++# CONFIG_HZ_PERIODIC is not set
++CONFIG_NO_HZ_IDLE=y
++# CONFIG_NO_HZ_FULL is not set
++CONFIG_NO_HZ=y
++CONFIG_HIGH_RES_TIMERS=y
++
++#
++# CPU/Task time and stats accounting
++#
++CONFIG_TICK_CPU_ACCOUNTING=y
++# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set
++# CONFIG_IRQ_TIME_ACCOUNTING is not set
++CONFIG_BSD_PROCESS_ACCT=y
++CONFIG_BSD_PROCESS_ACCT_V3=y
++CONFIG_TASKSTATS=y
++CONFIG_TASK_DELAY_ACCT=y
++CONFIG_TASK_XACCT=y
++CONFIG_TASK_IO_ACCOUNTING=y
++# CONFIG_CPU_ISOLATION is not set
++
++#
++# RCU Subsystem
++#
++CONFIG_TREE_RCU=y
++# CONFIG_RCU_EXPERT is not set
++CONFIG_SRCU=y
++CONFIG_TREE_SRCU=y
++# CONFIG_TASKS_RCU is not set
++CONFIG_RCU_STALL_COMMON=y
++CONFIG_RCU_NEED_SEGCBLIST=y
++CONFIG_BUILD_BIN2C=y
++# CONFIG_IKCONFIG is not set
++CONFIG_LOG_BUF_SHIFT=18
++CONFIG_LOG_CPU_MAX_BUF_SHIFT=12
++CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13
++CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
++CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
++CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y
++CONFIG_ARCH_SUPPORTS_INT128=y
++CONFIG_NUMA_BALANCING=y
++CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y
++CONFIG_CGROUPS=y
++CONFIG_PAGE_COUNTER=y
++CONFIG_MEMCG=y
++CONFIG_MEMCG_SWAP=y
++# CONFIG_MEMCG_SWAP_ENABLED is not set
++CONFIG_BLK_CGROUP=y
++# CONFIG_DEBUG_BLK_CGROUP is not set
++CONFIG_CGROUP_WRITEBACK=y
++CONFIG_CGROUP_SCHED=y
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_CFS_BANDWIDTH=y
++# CONFIG_RT_GROUP_SCHED is not set
++# CONFIG_CGROUP_PIDS is not set
++# CONFIG_CGROUP_RDMA is not set
++CONFIG_CGROUP_FREEZER=y
++CONFIG_CGROUP_HUGETLB=y
++CONFIG_CPUSETS=y
++CONFIG_PROC_PID_CPUSET=y
++CONFIG_CGROUP_DEVICE=y
++CONFIG_CGROUP_CPUACCT=y
++CONFIG_CGROUP_PERF=y
++# CONFIG_CGROUP_BPF is not set
++# CONFIG_CGROUP_DEBUG is not set
++# CONFIG_SOCK_CGROUP_DATA is not set
++CONFIG_NAMESPACES=y
++CONFIG_UTS_NS=y
++CONFIG_IPC_NS=y
++CONFIG_USER_NS=y
++CONFIG_PID_NS=y
++CONFIG_NET_NS=y
++CONFIG_SCHED_AUTOGROUP=y
++# CONFIG_SYSFS_DEPRECATED is not set
++CONFIG_RELAY=y
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++CONFIG_RD_GZIP=y
++CONFIG_RD_BZIP2=y
++CONFIG_RD_LZMA=y
++CONFIG_RD_XZ=y
++CONFIG_RD_LZO=y
++CONFIG_RD_LZ4=y
++CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_ANON_INODES=y
++CONFIG_HAVE_UID16=y
++CONFIG_SYSCTL_EXCEPTION_TRACE=y
++CONFIG_HAVE_PCSPKR_PLATFORM=y
++CONFIG_BPF=y
++CONFIG_EXPERT=y
++CONFIG_UID16=y
++CONFIG_MULTIUSER=y
++CONFIG_SGETMASK_SYSCALL=y
++CONFIG_SYSFS_SYSCALL=y
++CONFIG_SYSCTL_SYSCALL=y
++CONFIG_FHANDLE=y
++CONFIG_POSIX_TIMERS=y
++CONFIG_PRINTK=y
++CONFIG_PRINTK_NMI=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_PCSPKR_PLATFORM=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_FUTEX_PI=y
++CONFIG_EPOLL=y
++CONFIG_SIGNALFD=y
++CONFIG_TIMERFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_AIO=y
++CONFIG_ADVISE_SYSCALLS=y
++CONFIG_MEMBARRIER=y
++CONFIG_CHECKPOINT_RESTORE=y
++CONFIG_KALLSYMS=y
++CONFIG_KALLSYMS_ALL=y
++CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y
++CONFIG_KALLSYMS_BASE_RELATIVE=y
++CONFIG_BPF_SYSCALL=y
++# CONFIG_USERFAULTFD is not set
++CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y
++# CONFIG_EMBEDDED is not set
++CONFIG_HAVE_PERF_EVENTS=y
++# CONFIG_PC104 is not set
++
++#
++# Kernel Performance Events And Counters
++#
++CONFIG_PERF_EVENTS=y
++# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLUB_DEBUG=y
++# CONFIG_SLUB_MEMCG_SYSFS_ON is not set
++# CONFIG_COMPAT_BRK is not set
++# CONFIG_SLAB is not set
++CONFIG_SLUB=y
++# CONFIG_SLOB is not set
++CONFIG_SLAB_MERGE_DEFAULT=y
++# CONFIG_SLAB_FREELIST_RANDOM is not set
++# CONFIG_SLAB_FREELIST_HARDENED is not set
++CONFIG_SLUB_CPU_PARTIAL=y
++CONFIG_SYSTEM_DATA_VERIFICATION=y
++CONFIG_PROFILING=y
++CONFIG_TRACEPOINTS=y
++CONFIG_CRASH_CORE=y
++CONFIG_KEXEC_CORE=y
++CONFIG_OPROFILE=m
++# CONFIG_OPROFILE_EVENT_MULTIPLEX is not set
++CONFIG_HAVE_OPROFILE=y
++CONFIG_OPROFILE_NMI_TIMER=y
++CONFIG_KPROBES=y
++CONFIG_JUMP_LABEL=y
++# CONFIG_STATIC_KEYS_SELFTEST is not set
++CONFIG_OPTPROBES=y
++CONFIG_KPROBES_ON_FTRACE=y
++CONFIG_UPROBES=y
++# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
++CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
++CONFIG_ARCH_USE_BUILTIN_BSWAP=y
++CONFIG_KRETPROBES=y
++CONFIG_HAVE_IOREMAP_PROT=y
++CONFIG_HAVE_KPROBES=y
++CONFIG_HAVE_KRETPROBES=y
++CONFIG_HAVE_OPTPROBES=y
++CONFIG_HAVE_KPROBES_ON_FTRACE=y
++CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y
++CONFIG_HAVE_NMI=y
++CONFIG_HAVE_ARCH_TRACEHOOK=y
++CONFIG_HAVE_DMA_CONTIGUOUS=y
++CONFIG_GENERIC_SMP_IDLE_THREAD=y
++CONFIG_ARCH_HAS_FORTIFY_SOURCE=y
++CONFIG_ARCH_HAS_SET_MEMORY=y
++CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y
++CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y
++CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
++CONFIG_HAVE_CLK=y
++CONFIG_HAVE_DMA_API_DEBUG=y
++CONFIG_HAVE_HW_BREAKPOINT=y
++CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y
++CONFIG_HAVE_USER_RETURN_NOTIFIER=y
++CONFIG_HAVE_PERF_EVENTS_NMI=y
++CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y
++CONFIG_HAVE_PERF_REGS=y
++CONFIG_HAVE_PERF_USER_STACK_DUMP=y
++CONFIG_HAVE_ARCH_JUMP_LABEL=y
++CONFIG_HAVE_RCU_TABLE_FREE=y
++CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
++CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
++CONFIG_HAVE_CMPXCHG_LOCAL=y
++CONFIG_HAVE_CMPXCHG_DOUBLE=y
++CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y
++CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y
++CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
++CONFIG_SECCOMP_FILTER=y
++CONFIG_HAVE_GCC_PLUGINS=y
++# CONFIG_GCC_PLUGINS is not set
++CONFIG_HAVE_CC_STACKPROTECTOR=y
++# CONFIG_CC_STACKPROTECTOR_NONE is not set
++CONFIG_CC_STACKPROTECTOR_REGULAR=y
++# CONFIG_CC_STACKPROTECTOR_STRONG is not set
++# CONFIG_CC_STACKPROTECTOR_AUTO is not set
++CONFIG_THIN_ARCHIVES=y
++CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y
++CONFIG_HAVE_CONTEXT_TRACKING=y
++CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
++CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
++CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
++CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y
++CONFIG_HAVE_ARCH_HUGE_VMAP=y
++CONFIG_HAVE_ARCH_SOFT_DIRTY=y
++CONFIG_HAVE_MOD_ARCH_SPECIFIC=y
++CONFIG_MODULES_USE_ELF_RELA=y
++CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y
++CONFIG_ARCH_HAS_ELF_RANDOMIZE=y
++CONFIG_HAVE_ARCH_MMAP_RND_BITS=y
++CONFIG_HAVE_EXIT_THREAD=y
++CONFIG_ARCH_MMAP_RND_BITS=28
++CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y
++CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8
++CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y
++CONFIG_HAVE_COPY_THREAD_TLS=y
++CONFIG_HAVE_STACK_VALIDATION=y
++# CONFIG_HAVE_ARCH_HASH is not set
++# CONFIG_ISA_BUS_API is not set
++CONFIG_OLD_SIGSUSPEND3=y
++CONFIG_COMPAT_OLD_SIGACTION=y
++# CONFIG_CPU_NO_EFFICIENT_FFS is not set
++CONFIG_HAVE_ARCH_VMAP_STACK=y
++CONFIG_VMAP_STACK=y
++# CONFIG_ARCH_OPTIONAL_KERNEL_RWX is not set
++# CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT is not set
++CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y
++CONFIG_STRICT_KERNEL_RWX=y
++CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
++CONFIG_STRICT_MODULE_RWX=y
++CONFIG_ARCH_HAS_PHYS_TO_DMA=y
++CONFIG_ARCH_HAS_REFCOUNT=y
++# CONFIG_REFCOUNT_FULL is not set
++
++#
++# GCOV-based kernel profiling
++#
++# CONFIG_GCOV_KERNEL is not set
++CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y
++# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
++CONFIG_RT_MUTEXES=y
++CONFIG_BASE_SMALL=0
++CONFIG_MODULES=y
++# CONFIG_MODULE_FORCE_LOAD is not set
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++CONFIG_MODVERSIONS=y
++CONFIG_MODULE_SRCVERSION_ALL=y
++CONFIG_MODULE_SIG=y
++# CONFIG_MODULE_SIG_FORCE is not set
++CONFIG_MODULE_SIG_ALL=y
++# CONFIG_MODULE_SIG_SHA1 is not set
++# CONFIG_MODULE_SIG_SHA224 is not set
++# CONFIG_MODULE_SIG_SHA256 is not set
++# CONFIG_MODULE_SIG_SHA384 is not set
++CONFIG_MODULE_SIG_SHA512=y
++CONFIG_MODULE_SIG_HASH="sha512"
++# CONFIG_MODULE_COMPRESS is not set
++# CONFIG_TRIM_UNUSED_KSYMS is not set
++CONFIG_MODULES_TREE_LOOKUP=y
++CONFIG_BLOCK=y
++CONFIG_BLK_SCSI_REQUEST=y
++CONFIG_BLK_DEV_BSG=y
++CONFIG_BLK_DEV_BSGLIB=y
++CONFIG_BLK_DEV_INTEGRITY=y
++# CONFIG_BLK_DEV_ZONED is not set
++CONFIG_BLK_DEV_THROTTLING=y
++# CONFIG_BLK_DEV_THROTTLING_LOW is not set
++CONFIG_BLK_CMDLINE_PARSER=y
++# CONFIG_BLK_WBT is not set
++CONFIG_BLK_DEBUG_FS=y
++# CONFIG_BLK_SED_OPAL is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_AIX_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++# CONFIG_MAC_PARTITION is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_BSD_DISKLABEL is not set
++# CONFIG_MINIX_SUBPARTITION is not set
++# CONFIG_SOLARIS_X86_PARTITION is not set
++# CONFIG_UNIXWARE_DISKLABEL is not set
++# CONFIG_LDM_PARTITION is not set
++# CONFIG_SGI_PARTITION is not set
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++# CONFIG_KARMA_PARTITION is not set
++CONFIG_EFI_PARTITION=y
++# CONFIG_SYSV68_PARTITION is not set
++# CONFIG_CMDLINE_PARTITION is not set
++CONFIG_BLOCK_COMPAT=y
++CONFIG_BLK_MQ_PCI=y
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_CFQ_GROUP_IOSCHED=y
++CONFIG_DEFAULT_DEADLINE=y
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="deadline"
++CONFIG_MQ_IOSCHED_DEADLINE=y
++CONFIG_MQ_IOSCHED_KYBER=y
++# CONFIG_IOSCHED_BFQ is not set
++CONFIG_ASN1=y
++CONFIG_UNINLINE_SPIN_UNLOCK=y
++CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
++CONFIG_MUTEX_SPIN_ON_OWNER=y
++CONFIG_RWSEM_SPIN_ON_OWNER=y
++CONFIG_LOCK_SPIN_ON_OWNER=y
++CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y
++CONFIG_QUEUED_SPINLOCKS=y
++CONFIG_ARCH_USE_QUEUED_RWLOCKS=y
++CONFIG_QUEUED_RWLOCKS=y
++CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y
++CONFIG_FREEZER=y
++
++#
++# Processor type and features
++#
++CONFIG_ZONE_DMA=y
++CONFIG_SMP=y
++CONFIG_X86_FEATURE_NAMES=y
++CONFIG_X86_FAST_FEATURE_TESTS=y
++# CONFIG_X86_X2APIC is not set
++CONFIG_X86_MPPARSE=y
++# CONFIG_GOLDFISH is not set
++CONFIG_RETPOLINE=y
++# CONFIG_INTEL_RDT is not set
++CONFIG_X86_EXTENDED_PLATFORM=y
++# CONFIG_X86_VSMP is not set
++# CONFIG_X86_GOLDFISH is not set
++CONFIG_X86_INTEL_LPSS=y
++# CONFIG_X86_AMD_PLATFORM_DEVICE is not set
++CONFIG_IOSF_MBI=y
++CONFIG_IOSF_MBI_DEBUG=y
++CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y
++CONFIG_SCHED_OMIT_FRAME_POINTER=y
++CONFIG_HYPERVISOR_GUEST=y
++CONFIG_PARAVIRT=y
++# CONFIG_PARAVIRT_DEBUG is not set
++CONFIG_PARAVIRT_SPINLOCKS=y
++# CONFIG_QUEUED_LOCK_STAT is not set
++# CONFIG_XEN is not set
++CONFIG_KVM_GUEST=y
++CONFIG_KVM_DEBUG_FS=y
++# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set
++CONFIG_PARAVIRT_CLOCK=y
++# CONFIG_JAILHOUSE_GUEST is not set
++CONFIG_NO_BOOTMEM=y
++# CONFIG_MK8 is not set
++# CONFIG_MPSC is not set
++# CONFIG_MCORE2 is not set
++# CONFIG_MATOM is not set
++CONFIG_GENERIC_CPU=y
++CONFIG_X86_INTERNODE_CACHE_SHIFT=6
++CONFIG_X86_L1_CACHE_SHIFT=6
++CONFIG_X86_TSC=y
++CONFIG_X86_CMPXCHG64=y
++CONFIG_X86_CMOV=y
++CONFIG_X86_MINIMUM_CPU_FAMILY=64
++CONFIG_X86_DEBUGCTLMSR=y
++CONFIG_PROCESSOR_SELECT=y
++CONFIG_CPU_SUP_INTEL=y
++CONFIG_CPU_SUP_AMD=y
++CONFIG_CPU_SUP_CENTAUR=y
++CONFIG_HPET_TIMER=y
++CONFIG_HPET_EMULATE_RTC=y
++CONFIG_DMI=y
++CONFIG_GART_IOMMU=y
++CONFIG_CALGARY_IOMMU=y
++CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT=y
++CONFIG_SWIOTLB=y
++CONFIG_IOMMU_HELPER=y
++# CONFIG_MAXSMP is not set
++CONFIG_NR_CPUS_RANGE_BEGIN=2
++CONFIG_NR_CPUS_RANGE_END=512
++CONFIG_NR_CPUS_DEFAULT=64
++CONFIG_NR_CPUS=256
++CONFIG_SCHED_SMT=y
++CONFIG_SCHED_MC=y
++CONFIG_SCHED_MC_PRIO=y
++# CONFIG_PREEMPT_NONE is not set
++CONFIG_PREEMPT_VOLUNTARY=y
++# CONFIG_PREEMPT is not set
++CONFIG_PREEMPT_COUNT=y
++CONFIG_X86_LOCAL_APIC=y
++CONFIG_X86_IO_APIC=y
++CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
++CONFIG_X86_MCE=y
++# CONFIG_X86_MCELOG_LEGACY is not set
++CONFIG_X86_MCE_INTEL=y
++CONFIG_X86_MCE_AMD=y
++CONFIG_X86_MCE_THRESHOLD=y
++# CONFIG_X86_MCE_INJECT is not set
++CONFIG_X86_THERMAL_VECTOR=y
++
++#
++# Performance monitoring
++#
++CONFIG_PERF_EVENTS_INTEL_UNCORE=y
++CONFIG_PERF_EVENTS_INTEL_RAPL=y
++CONFIG_PERF_EVENTS_INTEL_CSTATE=y
++# CONFIG_PERF_EVENTS_AMD_POWER is not set
++# CONFIG_VM86 is not set
++CONFIG_X86_16BIT=y
++CONFIG_X86_ESPFIX64=y
++CONFIG_X86_VSYSCALL_EMULATION=y
++CONFIG_I8K=m
++CONFIG_MICROCODE=y
++CONFIG_MICROCODE_INTEL=y
++CONFIG_MICROCODE_AMD=y
++CONFIG_MICROCODE_OLD_INTERFACE=y
++CONFIG_X86_MSR=m
++CONFIG_X86_CPUID=m
++# CONFIG_X86_5LEVEL is not set
++CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
++CONFIG_ARCH_DMA_ADDR_T_64BIT=y
++CONFIG_X86_DIRECT_GBPAGES=y
++CONFIG_ARCH_HAS_MEM_ENCRYPT=y
++# CONFIG_AMD_MEM_ENCRYPT is not set
++CONFIG_NUMA=y
++CONFIG_AMD_NUMA=y
++CONFIG_X86_64_ACPI_NUMA=y
++CONFIG_NODES_SPAN_OTHER_NODES=y
++# CONFIG_NUMA_EMU is not set
++CONFIG_NODES_SHIFT=6
++CONFIG_ARCH_SPARSEMEM_ENABLE=y
++CONFIG_ARCH_SPARSEMEM_DEFAULT=y
++CONFIG_ARCH_SELECT_MEMORY_MODEL=y
++CONFIG_ARCH_MEMORY_PROBE=y
++CONFIG_ARCH_PROC_KCORE_TEXT=y
++CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_SPARSEMEM_MANUAL=y
++CONFIG_SPARSEMEM=y
++CONFIG_NEED_MULTIPLE_NODES=y
++CONFIG_HAVE_MEMORY_PRESENT=y
++CONFIG_SPARSEMEM_EXTREME=y
++CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
++CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER=y
++CONFIG_SPARSEMEM_VMEMMAP=y
++CONFIG_HAVE_MEMBLOCK=y
++CONFIG_HAVE_MEMBLOCK_NODE_MAP=y
++CONFIG_HAVE_GENERIC_GUP=y
++CONFIG_ARCH_DISCARD_MEMBLOCK=y
++CONFIG_MEMORY_ISOLATION=y
++CONFIG_HAVE_BOOTMEM_INFO_NODE=y
++CONFIG_MEMORY_HOTPLUG=y
++CONFIG_MEMORY_HOTPLUG_SPARSE=y
++# CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE is not set
++CONFIG_MEMORY_HOTREMOVE=y
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y
++CONFIG_COMPACTION=y
++CONFIG_MIGRATION=y
++CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y
++CONFIG_ARCH_ENABLE_THP_MIGRATION=y
++CONFIG_PHYS_ADDR_T_64BIT=y
++CONFIG_BOUNCE=y
++CONFIG_VIRT_TO_BUS=y
++CONFIG_MMU_NOTIFIER=y
++CONFIG_KSM=y
++CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
++CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
++CONFIG_MEMORY_FAILURE=y
++CONFIG_HWPOISON_INJECT=m
++CONFIG_TRANSPARENT_HUGEPAGE=y
++CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y
++# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set
++CONFIG_ARCH_WANTS_THP_SWAP=y
++CONFIG_THP_SWAP=y
++CONFIG_TRANSPARENT_HUGE_PAGECACHE=y
++CONFIG_CLEANCACHE=y
++CONFIG_FRONTSWAP=y
++CONFIG_CMA=y
++# CONFIG_CMA_DEBUG is not set
++# CONFIG_CMA_DEBUGFS is not set
++CONFIG_CMA_AREAS=7
++CONFIG_MEM_SOFT_DIRTY=y
++CONFIG_ZSWAP=y
++CONFIG_ZPOOL=y
++CONFIG_ZBUD=y
++# CONFIG_Z3FOLD is not set
++CONFIG_ZSMALLOC=y
++CONFIG_PGTABLE_MAPPING=y
++# CONFIG_ZSMALLOC_STAT is not set
++CONFIG_GENERIC_EARLY_IOREMAP=y
++# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set
++# CONFIG_IDLE_PAGE_TRACKING is not set
++CONFIG_ARCH_HAS_ZONE_DEVICE=y
++# CONFIG_ZONE_DEVICE is not set
++CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y
++CONFIG_ARCH_HAS_PKEYS=y
++# CONFIG_PERCPU_STATS is not set
++# CONFIG_GUP_BENCHMARK is not set
++# CONFIG_X86_PMEM_LEGACY is not set
++CONFIG_X86_CHECK_BIOS_CORRUPTION=y
++CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y
++CONFIG_X86_RESERVE_LOW=64
++CONFIG_MTRR=y
++CONFIG_MTRR_SANITIZER=y
++CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1
++CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1
++CONFIG_X86_PAT=y
++CONFIG_ARCH_USES_PG_UNCACHED=y
++CONFIG_ARCH_RANDOM=y
++CONFIG_X86_SMAP=y
++CONFIG_X86_INTEL_UMIP=y
++# CONFIG_X86_INTEL_MPX is not set
++CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y
++CONFIG_EFI=y
++CONFIG_EFI_STUB=y
++CONFIG_EFI_MIXED=y
++CONFIG_SECCOMP=y
++# CONFIG_HZ_100 is not set
++CONFIG_HZ_250=y
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=250
++CONFIG_SCHED_HRTICK=y
++CONFIG_KEXEC=y
++CONFIG_KEXEC_FILE=y
++CONFIG_KEXEC_VERIFY_SIG=y
++CONFIG_CRASH_DUMP=y
++CONFIG_KEXEC_JUMP=y
++CONFIG_PHYSICAL_START=0x1000000
++CONFIG_RELOCATABLE=y
++CONFIG_RANDOMIZE_BASE=y
++CONFIG_X86_NEED_RELOCS=y
++CONFIG_PHYSICAL_ALIGN=0x1000000
++CONFIG_RANDOMIZE_MEMORY=y
++CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa
++CONFIG_HOTPLUG_CPU=y
++# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set
++# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
++# CONFIG_COMPAT_VDSO is not set
++CONFIG_LEGACY_VSYSCALL_EMULATE=y
++# CONFIG_LEGACY_VSYSCALL_NONE is not set
++# CONFIG_CMDLINE_BOOL is not set
++CONFIG_MODIFY_LDT_SYSCALL=y
++CONFIG_HAVE_LIVEPATCH=y
++# CONFIG_LIVEPATCH is not set
++CONFIG_ARCH_HAS_ADD_PAGES=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
++CONFIG_USE_PERCPU_NUMA_NODE_ID=y
++
++#
++# Power management and ACPI options
++#
++CONFIG_ARCH_HIBERNATION_HEADER=y
++CONFIG_SUSPEND=y
++CONFIG_SUSPEND_FREEZER=y
++# CONFIG_SUSPEND_SKIP_SYNC is not set
++CONFIG_HIBERNATE_CALLBACKS=y
++CONFIG_HIBERNATION=y
++CONFIG_PM_STD_PARTITION=""
++CONFIG_PM_SLEEP=y
++CONFIG_PM_SLEEP_SMP=y
++# CONFIG_PM_AUTOSLEEP is not set
++CONFIG_PM_WAKELOCKS=y
++CONFIG_PM_WAKELOCKS_LIMIT=100
++CONFIG_PM_WAKELOCKS_GC=y
++CONFIG_PM=y
++CONFIG_PM_DEBUG=y
++CONFIG_PM_ADVANCED_DEBUG=y
++# CONFIG_PM_TEST_SUSPEND is not set
++CONFIG_PM_SLEEP_DEBUG=y
++# CONFIG_DPM_WATCHDOG is not set
++CONFIG_PM_TRACE=y
++CONFIG_PM_TRACE_RTC=y
++CONFIG_PM_CLK=y
++CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
++CONFIG_ACPI=y
++CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y
++CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y
++CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y
++# CONFIG_ACPI_DEBUGGER is not set
++CONFIG_ACPI_SPCR_TABLE=y
++CONFIG_ACPI_LPIT=y
++CONFIG_ACPI_SLEEP=y
++# CONFIG_ACPI_PROCFS_POWER is not set
++CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y
++CONFIG_ACPI_EC_DEBUGFS=m
++CONFIG_ACPI_AC=y
++CONFIG_ACPI_BATTERY=y
++CONFIG_ACPI_BUTTON=y
++CONFIG_ACPI_VIDEO=m
++CONFIG_ACPI_FAN=y
++CONFIG_ACPI_DOCK=y
++CONFIG_ACPI_CPU_FREQ_PSS=y
++CONFIG_ACPI_PROCESSOR_CSTATE=y
++CONFIG_ACPI_PROCESSOR_IDLE=y
++CONFIG_ACPI_CPPC_LIB=y
++CONFIG_ACPI_PROCESSOR=y
++CONFIG_ACPI_HOTPLUG_CPU=y
++CONFIG_ACPI_PROCESSOR_AGGREGATOR=m
++CONFIG_ACPI_THERMAL=y
++CONFIG_ACPI_NUMA=y
++# CONFIG_ACPI_CUSTOM_DSDT is not set
++CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y
++CONFIG_ACPI_TABLE_UPGRADE=y
++# CONFIG_ACPI_DEBUG is not set
++CONFIG_ACPI_PCI_SLOT=y
++CONFIG_ACPI_CONTAINER=y
++CONFIG_ACPI_HOTPLUG_MEMORY=y
++CONFIG_ACPI_HOTPLUG_IOAPIC=y
++CONFIG_ACPI_SBS=m
++CONFIG_ACPI_HED=y
++# CONFIG_ACPI_CUSTOM_METHOD is not set
++CONFIG_ACPI_BGRT=y
++# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set
++# CONFIG_ACPI_NFIT is not set
++CONFIG_HAVE_ACPI_APEI=y
++CONFIG_HAVE_ACPI_APEI_NMI=y
++CONFIG_ACPI_APEI=y
++CONFIG_ACPI_APEI_GHES=y
++CONFIG_ACPI_APEI_PCIEAER=y
++CONFIG_ACPI_APEI_MEMORY_FAILURE=y
++CONFIG_ACPI_APEI_EINJ=m
++# CONFIG_ACPI_APEI_ERST_DEBUG is not set
++# CONFIG_DPTF_POWER is not set
++# CONFIG_PMIC_OPREGION is not set
++# CONFIG_ACPI_CONFIGFS is not set
++CONFIG_X86_PM_TIMER=y
++CONFIG_SFI=y
++
++#
++# CPU Frequency scaling
++#
++CONFIG_CPU_FREQ=y
++CONFIG_CPU_FREQ_GOV_ATTR_SET=y
++CONFIG_CPU_FREQ_GOV_COMMON=y
++CONFIG_CPU_FREQ_STAT=y
++CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
++# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
++# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
++# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
++# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
++# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set
++CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
++CONFIG_CPU_FREQ_GOV_POWERSAVE=y
++CONFIG_CPU_FREQ_GOV_USERSPACE=y
++CONFIG_CPU_FREQ_GOV_ONDEMAND=y
++CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
++# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set
++
++#
++# CPU frequency scaling drivers
++#
++CONFIG_X86_INTEL_PSTATE=y
++# CONFIG_X86_PCC_CPUFREQ is not set
++CONFIG_X86_ACPI_CPUFREQ=y
++# CONFIG_X86_ACPI_CPUFREQ_CPB is not set
++# CONFIG_X86_POWERNOW_K8 is not set
++# CONFIG_X86_AMD_FREQ_SENSITIVITY is not set
++# CONFIG_X86_SPEEDSTEP_CENTRINO is not set
++# CONFIG_X86_P4_CLOCKMOD is not set
++
++#
++# shared options
++#
++# CONFIG_X86_SPEEDSTEP_LIB is not set
++
++#
++# CPU Idle
++#
++CONFIG_CPU_IDLE=y
++CONFIG_CPU_IDLE_GOV_LADDER=y
++CONFIG_CPU_IDLE_GOV_MENU=y
++# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set
++# CONFIG_INTEL_IDLE is not set
++
++#
++# Bus options (PCI etc.)
++#
++CONFIG_PCI=y
++CONFIG_PCI_DIRECT=y
++CONFIG_PCI_MMCONFIG=y
++CONFIG_PCI_DOMAINS=y
++# CONFIG_PCI_CNB20LE_QUIRK is not set
++CONFIG_PCIEPORTBUS=y
++CONFIG_HOTPLUG_PCI_PCIE=y
++CONFIG_PCIEAER=y
++# CONFIG_PCIE_ECRC is not set
++# CONFIG_PCIEAER_INJECT is not set
++CONFIG_PCIEASPM=y
++CONFIG_PCIEASPM_DEBUG=y
++CONFIG_PCIEASPM_DEFAULT=y
++# CONFIG_PCIEASPM_POWERSAVE is not set
++# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set
++# CONFIG_PCIEASPM_PERFORMANCE is not set
++CONFIG_PCIE_PME=y
++# CONFIG_PCIE_DPC is not set
++# CONFIG_PCIE_PTM is not set
++CONFIG_PCI_BUS_ADDR_T_64BIT=y
++CONFIG_PCI_MSI=y
++CONFIG_PCI_MSI_IRQ_DOMAIN=y
++CONFIG_PCI_QUIRKS=y
++# CONFIG_PCI_DEBUG is not set
++CONFIG_PCI_REALLOC_ENABLE_AUTO=y
++CONFIG_PCI_STUB=y
++CONFIG_PCI_ATS=y
++CONFIG_PCI_LOCKLESS_CONFIG=y
++CONFIG_PCI_IOV=y
++CONFIG_PCI_PRI=y
++CONFIG_PCI_PASID=y
++CONFIG_PCI_LABEL=y
++CONFIG_HOTPLUG_PCI=y
++# CONFIG_HOTPLUG_PCI_ACPI is not set
++# CONFIG_HOTPLUG_PCI_CPCI is not set
++# CONFIG_HOTPLUG_PCI_SHPC is not set
++
++#
++# Cadence PCIe controllers support
++#
++
++#
++# DesignWare PCI Core Support
++#
++# CONFIG_PCIE_DW_PLAT is not set
++
++#
++# PCI host controller drivers
++#
++# CONFIG_VMD is not set
++
++#
++# PCI Endpoint
++#
++# CONFIG_PCI_ENDPOINT is not set
++
++#
++# PCI switch controller drivers
++#
++# CONFIG_PCI_SW_SWITCHTEC is not set
++# CONFIG_ISA_BUS is not set
++CONFIG_ISA_DMA_API=y
++CONFIG_AMD_NB=y
++# CONFIG_PCCARD is not set
++CONFIG_RAPIDIO=y
++# CONFIG_RAPIDIO_TSI721 is not set
++CONFIG_RAPIDIO_DISC_TIMEOUT=30
++# CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS is not set
++CONFIG_RAPIDIO_DMA_ENGINE=y
++# CONFIG_RAPIDIO_DEBUG is not set
++# CONFIG_RAPIDIO_ENUM_BASIC is not set
++# CONFIG_RAPIDIO_CHMAN is not set
++# CONFIG_RAPIDIO_MPORT_CDEV is not set
++
++#
++# RapidIO Switch drivers
++#
++# CONFIG_RAPIDIO_TSI57X is not set
++# CONFIG_RAPIDIO_CPS_XX is not set
++# CONFIG_RAPIDIO_TSI568 is not set
++# CONFIG_RAPIDIO_CPS_GEN2 is not set
++# CONFIG_RAPIDIO_RXS_GEN3 is not set
++# CONFIG_X86_SYSFB is not set
++
++#
++# Executable file formats / Emulations
++#
++CONFIG_BINFMT_ELF=y
++CONFIG_COMPAT_BINFMT_ELF=y
++CONFIG_ELFCORE=y
++CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
++CONFIG_BINFMT_SCRIPT=y
++# CONFIG_HAVE_AOUT is not set
++CONFIG_BINFMT_MISC=y
++CONFIG_COREDUMP=y
++CONFIG_IA32_EMULATION=y
++# CONFIG_IA32_AOUT is not set
++# CONFIG_X86_X32 is not set
++CONFIG_COMPAT_32=y
++CONFIG_COMPAT=y
++CONFIG_COMPAT_FOR_U64_ALIGNMENT=y
++CONFIG_SYSVIPC_COMPAT=y
++CONFIG_X86_DEV_DMA_OPS=y
++CONFIG_NET=y
++CONFIG_NET_INGRESS=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++CONFIG_PACKET_DIAG=y
++CONFIG_UNIX=y
++CONFIG_UNIX_DIAG=y
++# CONFIG_TLS is not set
++CONFIG_XFRM=y
++CONFIG_XFRM_ALGO=y
++CONFIG_XFRM_USER=y
++# CONFIG_XFRM_SUB_POLICY is not set
++# CONFIG_XFRM_MIGRATE is not set
++# CONFIG_XFRM_STATISTICS is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++CONFIG_IP_ADVANCED_ROUTER=y
++# CONFIG_IP_FIB_TRIE_STATS is not set
++CONFIG_IP_MULTIPLE_TABLES=y
++CONFIG_IP_ROUTE_MULTIPATH=y
++CONFIG_IP_ROUTE_VERBOSE=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++CONFIG_IP_PNP_RARP=y
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE_DEMUX is not set
++CONFIG_NET_IP_TUNNEL=y
++CONFIG_IP_MROUTE=y
++# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set
++CONFIG_IP_PIMSM_V1=y
++CONFIG_IP_PIMSM_V2=y
++CONFIG_SYN_COOKIES=y
++# CONFIG_NET_UDP_TUNNEL is not set
++# CONFIG_NET_FOU is not set
++# CONFIG_NET_FOU_IP_TUNNELS is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++CONFIG_INET_TUNNEL=y
++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
++# CONFIG_INET_XFRM_MODE_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_BEET is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_INET_UDP_DIAG is not set
++# CONFIG_INET_RAW_DIAG is not set
++# CONFIG_INET_DIAG_DESTROY is not set
++CONFIG_TCP_CONG_ADVANCED=y
++# CONFIG_TCP_CONG_BIC is not set
++CONFIG_TCP_CONG_CUBIC=y
++# CONFIG_TCP_CONG_WESTWOOD is not set
++# CONFIG_TCP_CONG_HTCP is not set
++# CONFIG_TCP_CONG_HSTCP is not set
++# CONFIG_TCP_CONG_HYBLA is not set
++# CONFIG_TCP_CONG_VEGAS is not set
++# CONFIG_TCP_CONG_NV is not set
++# CONFIG_TCP_CONG_SCALABLE is not set
++# CONFIG_TCP_CONG_LP is not set
++# CONFIG_TCP_CONG_VENO is not set
++# CONFIG_TCP_CONG_YEAH is not set
++# CONFIG_TCP_CONG_ILLINOIS is not set
++# CONFIG_TCP_CONG_DCTCP is not set
++# CONFIG_TCP_CONG_CDG is not set
++# CONFIG_TCP_CONG_BBR is not set
++CONFIG_DEFAULT_CUBIC=y
++# CONFIG_DEFAULT_RENO is not set
++CONFIG_DEFAULT_TCP_CONG="cubic"
++CONFIG_TCP_MD5SIG=y
++CONFIG_IPV6=y
++# CONFIG_IPV6_ROUTER_PREF is not set
++# CONFIG_IPV6_OPTIMISTIC_DAD is not set
++CONFIG_INET6_AH=y
++CONFIG_INET6_ESP=y
++# CONFIG_INET6_ESP_OFFLOAD is not set
++# CONFIG_INET6_IPCOMP is not set
++# CONFIG_IPV6_MIP6 is not set
++# CONFIG_IPV6_ILA is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++CONFIG_INET6_XFRM_MODE_TRANSPORT=y
++CONFIG_INET6_XFRM_MODE_TUNNEL=y
++CONFIG_INET6_XFRM_MODE_BEET=y
++# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
++# CONFIG_IPV6_VTI is not set
++CONFIG_IPV6_SIT=y
++# CONFIG_IPV6_SIT_6RD is not set
++CONFIG_IPV6_NDISC_NODETYPE=y
++# CONFIG_IPV6_TUNNEL is not set
++# CONFIG_IPV6_FOU is not set
++# CONFIG_IPV6_FOU_TUNNEL is not set
++# CONFIG_IPV6_MULTIPLE_TABLES is not set
++# CONFIG_IPV6_MROUTE is not set
++# CONFIG_IPV6_SEG6_LWTUNNEL is not set
++# CONFIG_IPV6_SEG6_HMAC is not set
++CONFIG_NETLABEL=y
++CONFIG_NETWORK_SECMARK=y
++CONFIG_NET_PTP_CLASSIFY=y
++# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
++CONFIG_NETFILTER=y
++CONFIG_NETFILTER_ADVANCED=y
++CONFIG_BRIDGE_NETFILTER=m
++
++#
++# Core Netfilter Configuration
++#
++CONFIG_NETFILTER_INGRESS=y
++CONFIG_NETFILTER_FAMILY_BRIDGE=y
++# CONFIG_NETFILTER_NETLINK_ACCT is not set
++# CONFIG_NETFILTER_NETLINK_QUEUE is not set
++# CONFIG_NETFILTER_NETLINK_LOG is not set
++CONFIG_NF_CONNTRACK=m
++# CONFIG_NF_LOG_NETDEV is not set
++# CONFIG_NF_CONNTRACK_MARK is not set
++# CONFIG_NF_CONNTRACK_SECMARK is not set
++# CONFIG_NF_CONNTRACK_PROCFS is not set
++# CONFIG_NF_CONNTRACK_EVENTS is not set
++# CONFIG_NF_CONNTRACK_TIMEOUT is not set
++# CONFIG_NF_CONNTRACK_TIMESTAMP is not set
++CONFIG_NF_CT_PROTO_DCCP=y
++CONFIG_NF_CT_PROTO_SCTP=y
++CONFIG_NF_CT_PROTO_UDPLITE=y
++# CONFIG_NF_CONNTRACK_AMANDA is not set
++# CONFIG_NF_CONNTRACK_FTP is not set
++# CONFIG_NF_CONNTRACK_H323 is not set
++# CONFIG_NF_CONNTRACK_IRC is not set
++# CONFIG_NF_CONNTRACK_NETBIOS_NS is not set
++# CONFIG_NF_CONNTRACK_SNMP is not set
++# CONFIG_NF_CONNTRACK_PPTP is not set
++# CONFIG_NF_CONNTRACK_SANE is not set
++# CONFIG_NF_CONNTRACK_SIP is not set
++# CONFIG_NF_CONNTRACK_TFTP is not set
++# CONFIG_NF_CT_NETLINK is not set
++# CONFIG_NF_CT_NETLINK_TIMEOUT is not set
++CONFIG_NF_NAT=m
++CONFIG_NF_NAT_NEEDED=y
++CONFIG_NF_NAT_PROTO_DCCP=y
++CONFIG_NF_NAT_PROTO_UDPLITE=y
++CONFIG_NF_NAT_PROTO_SCTP=y
++# CONFIG_NF_NAT_AMANDA is not set
++# CONFIG_NF_NAT_FTP is not set
++# CONFIG_NF_NAT_IRC is not set
++# CONFIG_NF_NAT_SIP is not set
++# CONFIG_NF_NAT_TFTP is not set
++# CONFIG_NF_NAT_REDIRECT is not set
++# CONFIG_NF_TABLES is not set
++CONFIG_NETFILTER_XTABLES=m
++
++#
++# Xtables combined modules
++#
++# CONFIG_NETFILTER_XT_MARK is not set
++# CONFIG_NETFILTER_XT_CONNMARK is not set
++
++#
++# Xtables targets
++#
++# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set
++CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
++# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set
++# CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set
++# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
++# CONFIG_NETFILTER_XT_TARGET_HL is not set
++# CONFIG_NETFILTER_XT_TARGET_HMARK is not set
++# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set
++# CONFIG_NETFILTER_XT_TARGET_LED is not set
++# CONFIG_NETFILTER_XT_TARGET_LOG is not set
++# CONFIG_NETFILTER_XT_TARGET_MARK is not set
++CONFIG_NETFILTER_XT_NAT=m
++# CONFIG_NETFILTER_XT_TARGET_NETMAP is not set
++# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
++# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set
++# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
++# CONFIG_NETFILTER_XT_TARGET_REDIRECT is not set
++# CONFIG_NETFILTER_XT_TARGET_TEE is not set
++# CONFIG_NETFILTER_XT_TARGET_TPROXY is not set
++# CONFIG_NETFILTER_XT_TARGET_SECMARK is not set
++# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
++# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
++
++#
++# Xtables matches
++#
++# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set
++# CONFIG_NETFILTER_XT_MATCH_BPF is not set
++# CONFIG_NETFILTER_XT_MATCH_CGROUP is not set
++# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set
++# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set
++# CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set
++# CONFIG_NETFILTER_XT_MATCH_CONNLABEL is not set
++# CONFIG_NETFILTER_XT_MATCH_CONNLIMIT is not set
++# CONFIG_NETFILTER_XT_MATCH_CONNMARK is not set
++# CONFIG_NETFILTER_XT_MATCH_CONNTRACK is not set
++# CONFIG_NETFILTER_XT_MATCH_CPU is not set
++# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
++# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set
++# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
++# CONFIG_NETFILTER_XT_MATCH_ECN is not set
++# CONFIG_NETFILTER_XT_MATCH_ESP is not set
++# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
++# CONFIG_NETFILTER_XT_MATCH_HELPER is not set
++# CONFIG_NETFILTER_XT_MATCH_HL is not set
++# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set
++# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
++# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
++# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set
++# CONFIG_NETFILTER_XT_MATCH_LIMIT is not set
++# CONFIG_NETFILTER_XT_MATCH_MAC is not set
++# CONFIG_NETFILTER_XT_MATCH_MARK is not set
++# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set
++# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set
++# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
++# CONFIG_NETFILTER_XT_MATCH_POLICY is not set
++# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set
++# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set
++# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set
++# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
++# CONFIG_NETFILTER_XT_MATCH_REALM is not set
++# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
++# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
++# CONFIG_NETFILTER_XT_MATCH_STATE is not set
++# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
++# CONFIG_NETFILTER_XT_MATCH_STRING is not set
++# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
++# CONFIG_NETFILTER_XT_MATCH_TIME is not set
++# CONFIG_NETFILTER_XT_MATCH_U32 is not set
++# CONFIG_IP_SET is not set
++# CONFIG_IP_VS is not set
++
++#
++# IP: Netfilter Configuration
++#
++CONFIG_NF_DEFRAG_IPV4=m
++CONFIG_NF_CONNTRACK_IPV4=m
++# CONFIG_NF_SOCKET_IPV4 is not set
++# CONFIG_NF_DUP_IPV4 is not set
++# CONFIG_NF_LOG_ARP is not set
++# CONFIG_NF_LOG_IPV4 is not set
++# CONFIG_NF_REJECT_IPV4 is not set
++CONFIG_NF_NAT_IPV4=m
++# CONFIG_NF_NAT_PPTP is not set
++# CONFIG_NF_NAT_H323 is not set
++CONFIG_IP_NF_IPTABLES=m
++# CONFIG_IP_NF_MATCH_AH is not set
++# CONFIG_IP_NF_MATCH_ECN is not set
++# CONFIG_IP_NF_MATCH_RPFILTER is not set
++# CONFIG_IP_NF_MATCH_TTL is not set
++# CONFIG_IP_NF_FILTER is not set
++# CONFIG_IP_NF_TARGET_SYNPROXY is not set
++CONFIG_IP_NF_NAT=m
++CONFIG_IP_NF_TARGET_MASQUERADE=m
++# CONFIG_IP_NF_TARGET_NETMAP is not set
++# CONFIG_IP_NF_TARGET_REDIRECT is not set
++CONFIG_IP_NF_MANGLE=m
++# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
++# CONFIG_IP_NF_TARGET_ECN is not set
++# CONFIG_IP_NF_TARGET_TTL is not set
++# CONFIG_IP_NF_RAW is not set
++# CONFIG_IP_NF_SECURITY is not set
++# CONFIG_IP_NF_ARPTABLES is not set
++
++#
++# IPv6: Netfilter Configuration
++#
++CONFIG_NF_DEFRAG_IPV6=m
++CONFIG_NF_CONNTRACK_IPV6=m
++# CONFIG_NF_SOCKET_IPV6 is not set
++# CONFIG_NF_DUP_IPV6 is not set
++# CONFIG_NF_REJECT_IPV6 is not set
++# CONFIG_NF_LOG_IPV6 is not set
++CONFIG_NF_NAT_IPV6=m
++CONFIG_IP6_NF_IPTABLES=m
++# CONFIG_IP6_NF_MATCH_AH is not set
++# CONFIG_IP6_NF_MATCH_EUI64 is not set
++# CONFIG_IP6_NF_MATCH_FRAG is not set
++# CONFIG_IP6_NF_MATCH_OPTS is not set
++# CONFIG_IP6_NF_MATCH_HL is not set
++# CONFIG_IP6_NF_MATCH_IPV6HEADER is not set
++# CONFIG_IP6_NF_MATCH_MH is not set
++# CONFIG_IP6_NF_MATCH_RPFILTER is not set
++# CONFIG_IP6_NF_MATCH_RT is not set
++# CONFIG_IP6_NF_MATCH_SRH is not set
++# CONFIG_IP6_NF_TARGET_HL is not set
++# CONFIG_IP6_NF_FILTER is not set
++# CONFIG_IP6_NF_TARGET_SYNPROXY is not set
++CONFIG_IP6_NF_MANGLE=m
++# CONFIG_IP6_NF_RAW is not set
++# CONFIG_IP6_NF_SECURITY is not set
++CONFIG_IP6_NF_NAT=m
++CONFIG_IP6_NF_TARGET_MASQUERADE=m
++# CONFIG_IP6_NF_TARGET_NPT is not set
++# CONFIG_BRIDGE_NF_EBTABLES is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_RDS is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_L2TP is not set
++CONFIG_STP=y
++CONFIG_BRIDGE=y
++CONFIG_BRIDGE_IGMP_SNOOPING=y
++# CONFIG_BRIDGE_VLAN_FILTERING is not set
++CONFIG_HAVE_NET_DSA=y
++# CONFIG_NET_DSA is not set
++CONFIG_VLAN_8021Q=y
++# CONFIG_VLAN_8021Q_GVRP is not set
++# CONFIG_VLAN_8021Q_MVRP is not set
++# CONFIG_DECNET is not set
++CONFIG_LLC=y
++# CONFIG_LLC2 is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_PHONET is not set
++# CONFIG_6LOWPAN is not set
++# CONFIG_IEEE802154 is not set
++CONFIG_NET_SCHED=y
++
++#
++# Queueing/Scheduling
++#
++# CONFIG_NET_SCH_CBQ is not set
++# CONFIG_NET_SCH_HTB is not set
++# CONFIG_NET_SCH_HFSC is not set
++# CONFIG_NET_SCH_PRIO is not set
++# CONFIG_NET_SCH_MULTIQ is not set
++# CONFIG_NET_SCH_RED is not set
++# CONFIG_NET_SCH_SFB is not set
++# CONFIG_NET_SCH_SFQ is not set
++# CONFIG_NET_SCH_TEQL is not set
++# CONFIG_NET_SCH_TBF is not set
++# CONFIG_NET_SCH_CBS is not set
++# CONFIG_NET_SCH_GRED is not set
++# CONFIG_NET_SCH_DSMARK is not set
++# CONFIG_NET_SCH_NETEM is not set
++# CONFIG_NET_SCH_DRR is not set
++# CONFIG_NET_SCH_MQPRIO is not set
++# CONFIG_NET_SCH_CHOKE is not set
++# CONFIG_NET_SCH_QFQ is not set
++# CONFIG_NET_SCH_CODEL is not set
++# CONFIG_NET_SCH_FQ_CODEL is not set
++# CONFIG_NET_SCH_FQ is not set
++# CONFIG_NET_SCH_HHF is not set
++# CONFIG_NET_SCH_PIE is not set
++# CONFIG_NET_SCH_INGRESS is not set
++# CONFIG_NET_SCH_PLUG is not set
++# CONFIG_NET_SCH_DEFAULT is not set
++
++#
++# Classification
++#
++CONFIG_NET_CLS=y
++# CONFIG_NET_CLS_BASIC is not set
++# CONFIG_NET_CLS_TCINDEX is not set
++# CONFIG_NET_CLS_ROUTE4 is not set
++# CONFIG_NET_CLS_FW is not set
++# CONFIG_NET_CLS_U32 is not set
++# CONFIG_NET_CLS_RSVP is not set
++# CONFIG_NET_CLS_RSVP6 is not set
++# CONFIG_NET_CLS_FLOW is not set
++# CONFIG_NET_CLS_CGROUP is not set
++# CONFIG_NET_CLS_BPF is not set
++# CONFIG_NET_CLS_FLOWER is not set
++# CONFIG_NET_CLS_MATCHALL is not set
++CONFIG_NET_EMATCH=y
++CONFIG_NET_EMATCH_STACK=32
++# CONFIG_NET_EMATCH_CMP is not set
++# CONFIG_NET_EMATCH_NBYTE is not set
++# CONFIG_NET_EMATCH_U32 is not set
++# CONFIG_NET_EMATCH_META is not set
++# CONFIG_NET_EMATCH_TEXT is not set
++CONFIG_NET_CLS_ACT=y
++# CONFIG_NET_ACT_POLICE is not set
++# CONFIG_NET_ACT_GACT is not set
++# CONFIG_NET_ACT_MIRRED is not set
++# CONFIG_NET_ACT_SAMPLE is not set
++# CONFIG_NET_ACT_IPT is not set
++# CONFIG_NET_ACT_NAT is not set
++# CONFIG_NET_ACT_PEDIT is not set
++# CONFIG_NET_ACT_SIMP is not set
++# CONFIG_NET_ACT_SKBEDIT is not set
++# CONFIG_NET_ACT_CSUM is not set
++# CONFIG_NET_ACT_VLAN is not set
++# CONFIG_NET_ACT_BPF is not set
++# CONFIG_NET_ACT_SKBMOD is not set
++# CONFIG_NET_ACT_IFE is not set
++# CONFIG_NET_ACT_TUNNEL_KEY is not set
++CONFIG_NET_SCH_FIFO=y
++# CONFIG_DCB is not set
++CONFIG_DNS_RESOLVER=y
++# CONFIG_BATMAN_ADV is not set
++# CONFIG_OPENVSWITCH is not set
++# CONFIG_VSOCKETS is not set
++CONFIG_NETLINK_DIAG=y
++# CONFIG_MPLS is not set
++# CONFIG_NET_NSH is not set
++# CONFIG_HSR is not set
++# CONFIG_NET_SWITCHDEV is not set
++# CONFIG_NET_L3_MASTER_DEV is not set
++# CONFIG_NET_NCSI is not set
++CONFIG_RPS=y
++CONFIG_RFS_ACCEL=y
++CONFIG_XPS=y
++# CONFIG_CGROUP_NET_PRIO is not set
++# CONFIG_CGROUP_NET_CLASSID is not set
++CONFIG_NET_RX_BUSY_POLL=y
++CONFIG_BQL=y
++# CONFIG_BPF_JIT is not set
++# CONFIG_BPF_STREAM_PARSER is not set
++CONFIG_NET_FLOW_LIMIT=y
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_NET_DROP_MONITOR is not set
++CONFIG_HAMRADIO=y
++
++#
++# Packet Radio protocols
++#
++# CONFIG_AX25 is not set
++# CONFIG_CAN is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++# CONFIG_AF_KCM is not set
++# CONFIG_STREAM_PARSER is not set
++CONFIG_FIB_RULES=y
++CONFIG_WIRELESS=y
++# CONFIG_CFG80211 is not set
++# CONFIG_LIB80211 is not set
++
++#
++# CFG80211 needs to be enabled for MAC80211
++#
++CONFIG_MAC80211_STA_HASH_MAX_SIZE=0
++# CONFIG_WIMAX is not set
++CONFIG_RFKILL=y
++CONFIG_RFKILL_LEDS=y
++CONFIG_RFKILL_INPUT=y
++# CONFIG_NET_9P is not set
++# CONFIG_CAIF is not set
++# CONFIG_CEPH_LIB is not set
++# CONFIG_NFC is not set
++# CONFIG_PSAMPLE is not set
++# CONFIG_NET_IFE is not set
++# CONFIG_LWTUNNEL is not set
++CONFIG_DST_CACHE=y
++CONFIG_GRO_CELLS=y
++# CONFIG_NET_DEVLINK is not set
++CONFIG_MAY_USE_DEVLINK=y
++CONFIG_HAVE_EBPF_JIT=y
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER=y
++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
++CONFIG_DEVTMPFS=y
++CONFIG_DEVTMPFS_MOUNT=y
++CONFIG_STANDALONE=y
++# CONFIG_PREVENT_FIRMWARE_BUILD is not set
++CONFIG_FW_LOADER=y
++CONFIG_EXTRA_FIRMWARE=""
++# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set
++CONFIG_ALLOW_DEV_COREDUMP=y
++# CONFIG_DEBUG_DRIVER is not set
++# CONFIG_DEBUG_DEVRES is not set
++# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set
++# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set
++# CONFIG_SYS_HYPERVISOR is not set
++# CONFIG_GENERIC_CPU_DEVICES is not set
++CONFIG_GENERIC_CPU_AUTOPROBE=y
++CONFIG_GENERIC_CPU_VULNERABILITIES=y
++CONFIG_REGMAP=y
++CONFIG_REGMAP_I2C=y
++CONFIG_DMA_SHARED_BUFFER=y
++# CONFIG_DMA_FENCE_TRACE is not set
++# CONFIG_DMA_CMA is not set
++
++#
++# Bus devices
++#
++# CONFIG_CONNECTOR is not set
++# CONFIG_MTD is not set
++# CONFIG_OF is not set
++CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y
++CONFIG_PARPORT=y
++CONFIG_PARPORT_PC=y
++CONFIG_PARPORT_SERIAL=y
++# CONFIG_PARPORT_PC_FIFO is not set
++# CONFIG_PARPORT_PC_SUPERIO is not set
++# CONFIG_PARPORT_GSC is not set
++# CONFIG_PARPORT_AX88796 is not set
++# CONFIG_PARPORT_1284 is not set
++CONFIG_PNP=y
++CONFIG_PNP_DEBUG_MESSAGES=y
++
++#
++# Protocols
++#
++CONFIG_PNPACPI=y
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_NULL_BLK is not set
++# CONFIG_BLK_DEV_FD is not set
++CONFIG_CDROM=y
++# CONFIG_PARIDE is not set
++# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set
++# CONFIG_ZRAM is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_DRBD is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SKD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=16384
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++# CONFIG_BLK_DEV_RBD is not set
++# CONFIG_BLK_DEV_RSXX is not set
++
++#
++# NVME Support
++#
++# CONFIG_BLK_DEV_NVME is not set
++# CONFIG_NVME_FC is not set
++# CONFIG_NVME_TARGET is not set
++
++#
++# Misc devices
++#
++# CONFIG_SENSORS_LIS3LV02D is not set
++# CONFIG_AD525X_DPOT is not set
++# CONFIG_DUMMY_IRQ is not set
++# CONFIG_IBM_ASM is not set
++# CONFIG_PHANTOM is not set
++# CONFIG_SGI_IOC4 is not set
++# CONFIG_TIFM_CORE is not set
++# CONFIG_ICS932S401 is not set
++# CONFIG_ENCLOSURE_SERVICES is not set
++# CONFIG_HP_ILO is not set
++# CONFIG_APDS9802ALS is not set
++# CONFIG_ISL29003 is not set
++# CONFIG_ISL29020 is not set
++# CONFIG_SENSORS_TSL2550 is not set
++# CONFIG_SENSORS_BH1770 is not set
++# CONFIG_SENSORS_APDS990X is not set
++# CONFIG_HMC6352 is not set
++# CONFIG_DS1682 is not set
++# CONFIG_USB_SWITCH_FSA9480 is not set
++# CONFIG_SRAM is not set
++# CONFIG_PCI_ENDPOINT_TEST is not set
++# CONFIG_MISC_RTSX is not set
++# CONFIG_C2PORT is not set
++
++#
++# EEPROM support
++#
++# CONFIG_EEPROM_AT24 is not set
++# CONFIG_EEPROM_LEGACY is not set
++# CONFIG_EEPROM_MAX6875 is not set
++# CONFIG_EEPROM_93CX6 is not set
++# CONFIG_EEPROM_IDT_89HPESX is not set
++# CONFIG_CB710_CORE is not set
++
++#
++# Texas Instruments shared transport line discipline
++#
++# CONFIG_SENSORS_LIS3_I2C is not set
++# CONFIG_ALTERA_STAPL is not set
++# CONFIG_INTEL_MEI is not set
++# CONFIG_INTEL_MEI_ME is not set
++# CONFIG_INTEL_MEI_TXE is not set
++# CONFIG_VMWARE_VMCI is not set
++
++#
++# Intel MIC & related support
++#
++
++#
++# Intel MIC Bus Driver
++#
++# CONFIG_INTEL_MIC_BUS is not set
++
++#
++# SCIF Bus Driver
++#
++# CONFIG_SCIF_BUS is not set
++
++#
++# VOP Bus Driver
++#
++# CONFIG_VOP_BUS is not set
++
++#
++# Intel MIC Host Driver
++#
++
++#
++# Intel MIC Card Driver
++#
++
++#
++# SCIF Driver
++#
++
++#
++# Intel MIC Coprocessor State Management (COSM) Drivers
++#
++
++#
++# VOP Driver
++#
++# CONFIG_GENWQE is not set
++# CONFIG_ECHO is not set
++# CONFIG_CXL_BASE is not set
++# CONFIG_CXL_AFU_DRIVER_OPS is not set
++# CONFIG_CXL_LIB is not set
++# CONFIG_OCXL_BASE is not set
++# CONFIG_MISC_RTSX_PCI is not set
++# CONFIG_MISC_RTSX_USB is not set
++CONFIG_HAVE_IDE=y
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++CONFIG_SCSI_MOD=y
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++CONFIG_SCSI_DMA=y
++# CONFIG_SCSI_NETLINK is not set
++# CONFIG_SCSI_MQ_DEFAULT is not set
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++CONFIG_BLK_DEV_SR=y
++# CONFIG_BLK_DEV_SR_VENDOR is not set
++CONFIG_CHR_DEV_SG=y
++# CONFIG_CHR_DEV_SCH is not set
++CONFIG_SCSI_CONSTANTS=y
++# CONFIG_SCSI_LOGGING is not set
++# CONFIG_SCSI_SCAN_ASYNC is not set
++
++#
++# SCSI Transports
++#
++CONFIG_SCSI_SPI_ATTRS=y
++# CONFIG_SCSI_FC_ATTRS is not set
++CONFIG_SCSI_ISCSI_ATTRS=y
++# CONFIG_SCSI_SAS_ATTRS is not set
++# CONFIG_SCSI_SAS_LIBSAS is not set
++# CONFIG_SCSI_SRP_ATTRS is not set
++# CONFIG_SCSI_LOWLEVEL is not set
++# CONFIG_SCSI_DH is not set
++# CONFIG_SCSI_OSD_INITIATOR is not set
++CONFIG_ATA=y
++# CONFIG_ATA_NONSTANDARD is not set
++CONFIG_ATA_VERBOSE_ERROR=y
++CONFIG_ATA_ACPI=y
++# CONFIG_SATA_ZPODD is not set
++CONFIG_SATA_PMP=y
++
++#
++# Controllers with non-SFF native interface
++#
++CONFIG_SATA_AHCI=y
++CONFIG_SATA_MOBILE_LPM_POLICY=0
++CONFIG_SATA_AHCI_PLATFORM=y
++# CONFIG_SATA_INIC162X is not set
++# CONFIG_SATA_ACARD_AHCI is not set
++CONFIG_SATA_SIL24=y
++CONFIG_ATA_SFF=y
++
++#
++# SFF controllers with custom DMA interface
++#
++# CONFIG_PDC_ADMA is not set
++# CONFIG_SATA_QSTOR is not set
++CONFIG_SATA_SX4=y
++CONFIG_ATA_BMDMA=y
++
++#
++# SATA SFF controllers with BMDMA
++#
++CONFIG_ATA_PIIX=y
++# CONFIG_SATA_DWC is not set
++# CONFIG_SATA_MV is not set
++# CONFIG_SATA_NV is not set
++CONFIG_SATA_PROMISE=y
++CONFIG_SATA_SIL=y
++# CONFIG_SATA_SIS is not set
++# CONFIG_SATA_SVW is not set
++# CONFIG_SATA_ULI is not set
++# CONFIG_SATA_VIA is not set
++# CONFIG_SATA_VITESSE is not set
++
++#
++# PATA SFF controllers with BMDMA
++#
++# CONFIG_PATA_ALI is not set
++CONFIG_PATA_AMD=y
++# CONFIG_PATA_ARTOP is not set
++CONFIG_PATA_ATIIXP=y
++# CONFIG_PATA_ATP867X is not set
++# CONFIG_PATA_CMD64X is not set
++# CONFIG_PATA_CYPRESS is not set
++# CONFIG_PATA_EFAR is not set
++# CONFIG_PATA_HPT366 is not set
++# CONFIG_PATA_HPT37X is not set
++# CONFIG_PATA_HPT3X2N is not set
++# CONFIG_PATA_HPT3X3 is not set
++# CONFIG_PATA_IT8213 is not set
++# CONFIG_PATA_IT821X is not set
++# CONFIG_PATA_JMICRON is not set
++# CONFIG_PATA_MARVELL is not set
++# CONFIG_PATA_NETCELL is not set
++# CONFIG_PATA_NINJA32 is not set
++# CONFIG_PATA_NS87415 is not set
++CONFIG_PATA_OLDPIIX=y
++# CONFIG_PATA_OPTIDMA is not set
++# CONFIG_PATA_PDC2027X is not set
++# CONFIG_PATA_PDC_OLD is not set
++# CONFIG_PATA_RADISYS is not set
++# CONFIG_PATA_RDC is not set
++CONFIG_PATA_SCH=y
++CONFIG_PATA_SERVERWORKS=y
++# CONFIG_PATA_SIL680 is not set
++# CONFIG_PATA_SIS is not set
++# CONFIG_PATA_TOSHIBA is not set
++# CONFIG_PATA_TRIFLEX is not set
++# CONFIG_PATA_VIA is not set
++# CONFIG_PATA_WINBOND is not set
++
++#
++# PIO-only SFF controllers
++#
++# CONFIG_PATA_CMD640_PCI is not set
++# CONFIG_PATA_MPIIX is not set
++# CONFIG_PATA_NS87410 is not set
++# CONFIG_PATA_OPTI is not set
++# CONFIG_PATA_PLATFORM is not set
++# CONFIG_PATA_RZ1000 is not set
++
++#
++# Generic fallback / legacy drivers
++#
++# CONFIG_PATA_ACPI is not set
++CONFIG_ATA_GENERIC=y
++# CONFIG_PATA_LEGACY is not set
++CONFIG_MD=y
++CONFIG_BLK_DEV_MD=y
++CONFIG_MD_AUTODETECT=y
++# CONFIG_MD_LINEAR is not set
++# CONFIG_MD_RAID0 is not set
++# CONFIG_MD_RAID1 is not set
++# CONFIG_MD_RAID10 is not set
++# CONFIG_MD_RAID456 is not set
++# CONFIG_MD_MULTIPATH is not set
++# CONFIG_MD_FAULTY is not set
++# CONFIG_BCACHE is not set
++CONFIG_BLK_DEV_DM_BUILTIN=y
++CONFIG_BLK_DEV_DM=y
++# CONFIG_DM_MQ_DEFAULT is not set
++# CONFIG_DM_DEBUG is not set
++# CONFIG_DM_UNSTRIPED is not set
++# CONFIG_DM_CRYPT is not set
++# CONFIG_DM_SNAPSHOT is not set
++# CONFIG_DM_THIN_PROVISIONING is not set
++# CONFIG_DM_CACHE is not set
++# CONFIG_DM_ERA is not set
++CONFIG_DM_MIRROR=y
++# CONFIG_DM_LOG_USERSPACE is not set
++# CONFIG_DM_RAID is not set
++CONFIG_DM_ZERO=y
++# CONFIG_DM_MULTIPATH is not set
++# CONFIG_DM_DELAY is not set
++# CONFIG_DM_UEVENT is not set
++# CONFIG_DM_FLAKEY is not set
++# CONFIG_DM_VERITY is not set
++# CONFIG_DM_SWITCH is not set
++# CONFIG_DM_LOG_WRITES is not set
++# CONFIG_DM_INTEGRITY is not set
++# CONFIG_TARGET_CORE is not set
++# CONFIG_FUSION is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_FIREWIRE is not set
++# CONFIG_FIREWIRE_NOSY is not set
++# CONFIG_MACINTOSH_DRIVERS is not set
++CONFIG_NETDEVICES=y
++CONFIG_MII=y
++CONFIG_NET_CORE=y
++# CONFIG_BONDING is not set
++# CONFIG_DUMMY is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_NET_FC is not set
++# CONFIG_IFB is not set
++# CONFIG_NET_TEAM is not set
++CONFIG_MACVLAN=y
++# CONFIG_MACVTAP is not set
++# CONFIG_VXLAN is not set
++# CONFIG_MACSEC is not set
++CONFIG_NETCONSOLE=y
++CONFIG_NETCONSOLE_DYNAMIC=y
++CONFIG_NETPOLL=y
++CONFIG_NET_POLL_CONTROLLER=y
++# CONFIG_RIONET is not set
++# CONFIG_TUN is not set
++# CONFIG_TUN_VNET_CROSS_LE is not set
++CONFIG_VETH=y
++# CONFIG_NLMON is not set
++# CONFIG_ARCNET is not set
++
++#
++# CAIF transport drivers
++#
++
++#
++# Distributed Switch Architecture drivers
++#
++CONFIG_ETHERNET=y
++CONFIG_MDIO=y
++CONFIG_NET_VENDOR_3COM=y
++# CONFIG_VORTEX is not set
++# CONFIG_TYPHOON is not set
++CONFIG_NET_VENDOR_ADAPTEC=y
++# CONFIG_ADAPTEC_STARFIRE is not set
++CONFIG_NET_VENDOR_AGERE=y
++# CONFIG_ET131X is not set
++CONFIG_NET_VENDOR_ALACRITECH=y
++# CONFIG_SLICOSS is not set
++CONFIG_NET_VENDOR_ALTEON=y
++# CONFIG_ACENIC is not set
++# CONFIG_ALTERA_TSE is not set
++CONFIG_NET_VENDOR_AMAZON=y
++# CONFIG_ENA_ETHERNET is not set
++CONFIG_NET_VENDOR_AMD=y
++# CONFIG_AMD8111_ETH is not set
++# CONFIG_PCNET32 is not set
++# CONFIG_AMD_XGBE is not set
++# CONFIG_AMD_XGBE_HAVE_ECC is not set
++CONFIG_NET_VENDOR_AQUANTIA=y
++# CONFIG_AQTION is not set
++CONFIG_NET_VENDOR_ARC=y
++CONFIG_NET_VENDOR_ATHEROS=y
++# CONFIG_ATL2 is not set
++# CONFIG_ATL1 is not set
++# CONFIG_ATL1E is not set
++# CONFIG_ATL1C is not set
++CONFIG_ALX=y
++# CONFIG_NET_VENDOR_AURORA is not set
++CONFIG_NET_CADENCE=y
++# CONFIG_MACB is not set
++CONFIG_NET_VENDOR_BROADCOM=y
++# CONFIG_B44 is not set
++CONFIG_BNX2=y
++# CONFIG_CNIC is not set
++CONFIG_TIGON3=y
++CONFIG_TIGON3_HWMON=y
++# CONFIG_BNX2X is not set
++# CONFIG_BNXT is not set
++CONFIG_NET_VENDOR_BROCADE=y
++# CONFIG_BNA is not set
++CONFIG_NET_VENDOR_CAVIUM=y
++# CONFIG_THUNDER_NIC_PF is not set
++# CONFIG_THUNDER_NIC_VF is not set
++# CONFIG_THUNDER_NIC_BGX is not set
++# CONFIG_THUNDER_NIC_RGX is not set
++CONFIG_CAVIUM_PTP=y
++# CONFIG_LIQUIDIO is not set
++# CONFIG_LIQUIDIO_VF is not set
++CONFIG_NET_VENDOR_CHELSIO=y
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_CHELSIO_T3 is not set
++# CONFIG_CHELSIO_T4 is not set
++# CONFIG_CHELSIO_T4VF is not set
++CONFIG_NET_VENDOR_CISCO=y
++# CONFIG_ENIC is not set
++CONFIG_NET_VENDOR_CORTINA=y
++# CONFIG_CX_ECAT is not set
++# CONFIG_DNET is not set
++CONFIG_NET_VENDOR_DEC=y
++CONFIG_NET_TULIP=y
++# CONFIG_DE2104X is not set
++# CONFIG_TULIP is not set
++# CONFIG_DE4X5 is not set
++# CONFIG_WINBOND_840 is not set
++# CONFIG_DM9102 is not set
++# CONFIG_ULI526X is not set
++CONFIG_NET_VENDOR_DLINK=y
++# CONFIG_DL2K is not set
++# CONFIG_SUNDANCE is not set
++CONFIG_NET_VENDOR_EMULEX=y
++# CONFIG_BE2NET is not set
++CONFIG_NET_VENDOR_EZCHIP=y
++CONFIG_NET_VENDOR_EXAR=y
++# CONFIG_S2IO is not set
++# CONFIG_VXGE is not set
++CONFIG_NET_VENDOR_HP=y
++# CONFIG_HP100 is not set
++CONFIG_NET_VENDOR_HUAWEI=y
++# CONFIG_HINIC is not set
++CONFIG_NET_VENDOR_INTEL=y
++CONFIG_E100=y
++CONFIG_E1000=y
++CONFIG_E1000E=y
++CONFIG_E1000E_HWTS=y
++CONFIG_IGB=y
++CONFIG_IGB_HWMON=y
++CONFIG_IGBVF=y
++CONFIG_IXGB=y
++CONFIG_IXGBE=y
++CONFIG_IXGBE_HWMON=y
++# CONFIG_IXGBEVF is not set
++# CONFIG_I40E is not set
++# CONFIG_I40EVF is not set
++# CONFIG_FM10K is not set
++CONFIG_NET_VENDOR_I825XX=y
++# CONFIG_JME is not set
++CONFIG_NET_VENDOR_MARVELL=y
++# CONFIG_MVMDIO is not set
++# CONFIG_SKGE is not set
++CONFIG_SKY2=y
++# CONFIG_SKY2_DEBUG is not set
++CONFIG_NET_VENDOR_MELLANOX=y
++# CONFIG_MLX4_EN is not set
++# CONFIG_MLX4_CORE is not set
++# CONFIG_MLX5_CORE is not set
++# CONFIG_MLXSW_CORE is not set
++# CONFIG_MLXFW is not set
++CONFIG_NET_VENDOR_MICREL=y
++# CONFIG_KS8842 is not set
++# CONFIG_KS8851_MLL is not set
++# CONFIG_KSZ884X_PCI is not set
++CONFIG_NET_VENDOR_MYRI=y
++# CONFIG_MYRI10GE is not set
++# CONFIG_FEALNX is not set
++CONFIG_NET_VENDOR_NATSEMI=y
++# CONFIG_NATSEMI is not set
++# CONFIG_NS83820 is not set
++CONFIG_NET_VENDOR_NETRONOME=y
++# CONFIG_NFP is not set
++CONFIG_NET_VENDOR_8390=y
++# CONFIG_NE2K_PCI is not set
++CONFIG_NET_VENDOR_NVIDIA=y
++CONFIG_FORCEDETH=y
++CONFIG_NET_VENDOR_OKI=y
++# CONFIG_ETHOC is not set
++# CONFIG_NET_PACKET_ENGINE is not set
++CONFIG_NET_VENDOR_QLOGIC=y
++# CONFIG_QLA3XXX is not set
++# CONFIG_QLCNIC is not set
++# CONFIG_QLGE is not set
++# CONFIG_NETXEN_NIC is not set
++# CONFIG_QED is not set
++CONFIG_NET_VENDOR_QUALCOMM=y
++# CONFIG_QCOM_EMAC is not set
++# CONFIG_RMNET is not set
++CONFIG_NET_VENDOR_REALTEK=y
++# CONFIG_ATP is not set
++CONFIG_8139CP=y
++CONFIG_8139TOO=y
++CONFIG_8139TOO_PIO=y
++# CONFIG_8139TOO_TUNE_TWISTER is not set
++# CONFIG_8139TOO_8129 is not set
++# CONFIG_8139_OLD_RX_RESET is not set
++CONFIG_R8169=y
++CONFIG_NET_VENDOR_RENESAS=y
++CONFIG_NET_VENDOR_RDC=y
++# CONFIG_R6040 is not set
++CONFIG_NET_VENDOR_ROCKER=y
++CONFIG_NET_VENDOR_SAMSUNG=y
++# CONFIG_SXGBE_ETH is not set
++CONFIG_NET_VENDOR_SEEQ=y
++CONFIG_NET_VENDOR_SILAN=y
++# CONFIG_SC92031 is not set
++CONFIG_NET_VENDOR_SIS=y
++# CONFIG_SIS900 is not set
++# CONFIG_SIS190 is not set
++CONFIG_NET_VENDOR_SOLARFLARE=y
++# CONFIG_SFC is not set
++# CONFIG_SFC_FALCON is not set
++CONFIG_NET_VENDOR_SMSC=y
++# CONFIG_EPIC100 is not set
++# CONFIG_SMSC911X is not set
++# CONFIG_SMSC9420 is not set
++CONFIG_NET_VENDOR_SOCIONEXT=y
++CONFIG_NET_VENDOR_STMICRO=y
++# CONFIG_STMMAC_ETH is not set
++CONFIG_NET_VENDOR_SUN=y
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NIU is not set
++CONFIG_NET_VENDOR_TEHUTI=y
++# CONFIG_TEHUTI is not set
++CONFIG_NET_VENDOR_TI=y
++# CONFIG_TI_CPSW_ALE is not set
++# CONFIG_TLAN is not set
++CONFIG_NET_VENDOR_VIA=y
++# CONFIG_VIA_RHINE is not set
++# CONFIG_VIA_VELOCITY is not set
++CONFIG_NET_VENDOR_WIZNET=y
++CONFIG_WIZNET_W5100=y
++CONFIG_WIZNET_W5300=y
++# CONFIG_WIZNET_BUS_DIRECT is not set
++# CONFIG_WIZNET_BUS_INDIRECT is not set
++CONFIG_WIZNET_BUS_ANY=y
++CONFIG_NET_VENDOR_SYNOPSYS=y
++# CONFIG_DWC_XLGMAC is not set
++CONFIG_FDDI=y
++# CONFIG_DEFXX is not set
++# CONFIG_SKFP is not set
++# CONFIG_HIPPI is not set
++# CONFIG_NET_SB1000 is not set
++CONFIG_MDIO_DEVICE=y
++CONFIG_MDIO_BUS=y
++# CONFIG_MDIO_BITBANG is not set
++# CONFIG_MDIO_THUNDER is not set
++CONFIG_PHYLIB=y
++# CONFIG_LED_TRIGGER_PHY is not set
++
++#
++# MII PHY device drivers
++#
++CONFIG_AMD_PHY=y
++# CONFIG_AQUANTIA_PHY is not set
++# CONFIG_AT803X_PHY is not set
++# CONFIG_BCM7XXX_PHY is not set
++# CONFIG_BCM87XX_PHY is not set
++# CONFIG_BROADCOM_PHY is not set
++# CONFIG_CICADA_PHY is not set
++# CONFIG_CORTINA_PHY is not set
++# CONFIG_DAVICOM_PHY is not set
++# CONFIG_DP83822_PHY is not set
++# CONFIG_DP83848_PHY is not set
++# CONFIG_DP83867_PHY is not set
++# CONFIG_FIXED_PHY is not set
++# CONFIG_ICPLUS_PHY is not set
++# CONFIG_INTEL_XWAY_PHY is not set
++# CONFIG_LSI_ET1011C_PHY is not set
++# CONFIG_LXT_PHY is not set
++# CONFIG_MARVELL_PHY is not set
++# CONFIG_MARVELL_10G_PHY is not set
++# CONFIG_MICREL_PHY is not set
++# CONFIG_MICROCHIP_PHY is not set
++# CONFIG_MICROSEMI_PHY is not set
++# CONFIG_NATIONAL_PHY is not set
++# CONFIG_QSEMI_PHY is not set
++# CONFIG_REALTEK_PHY is not set
++# CONFIG_RENESAS_PHY is not set
++# CONFIG_ROCKCHIP_PHY is not set
++# CONFIG_SMSC_PHY is not set
++# CONFIG_STE10XP is not set
++# CONFIG_TERANETICS_PHY is not set
++# CONFIG_VITESSE_PHY is not set
++# CONFIG_XILINX_GMII2RGMII is not set
++# CONFIG_PLIP is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++CONFIG_USB_NET_DRIVERS=m
++# CONFIG_USB_CATC is not set
++# CONFIG_USB_KAWETH is not set
++# CONFIG_USB_PEGASUS is not set
++# CONFIG_USB_RTL8150 is not set
++CONFIG_USB_RTL8152=m
++# CONFIG_USB_LAN78XX is not set
++CONFIG_USB_USBNET=m
++CONFIG_USB_NET_AX8817X=m
++CONFIG_USB_NET_AX88179_178A=m
++CONFIG_USB_NET_CDCETHER=m
++# CONFIG_USB_NET_CDC_EEM is not set
++CONFIG_USB_NET_CDC_NCM=m
++# CONFIG_USB_NET_HUAWEI_CDC_NCM is not set
++# CONFIG_USB_NET_CDC_MBIM is not set
++# CONFIG_USB_NET_DM9601 is not set
++# CONFIG_USB_NET_SR9700 is not set
++# CONFIG_USB_NET_SR9800 is not set
++# CONFIG_USB_NET_SMSC75XX is not set
++# CONFIG_USB_NET_SMSC95XX is not set
++# CONFIG_USB_NET_GL620A is not set
++CONFIG_USB_NET_NET1080=m
++# CONFIG_USB_NET_PLUSB is not set
++# CONFIG_USB_NET_MCS7830 is not set
++# CONFIG_USB_NET_RNDIS_HOST is not set
++CONFIG_USB_NET_CDC_SUBSET_ENABLE=m
++CONFIG_USB_NET_CDC_SUBSET=m
++# CONFIG_USB_ALI_M5632 is not set
++# CONFIG_USB_AN2720 is not set
++CONFIG_USB_BELKIN=y
++# CONFIG_USB_ARMLINUX is not set
++# CONFIG_USB_EPSON2888 is not set
++# CONFIG_USB_KC2190 is not set
++# CONFIG_USB_NET_ZAURUS is not set
++# CONFIG_USB_NET_CX82310_ETH is not set
++# CONFIG_USB_NET_KALMIA is not set
++# CONFIG_USB_NET_QMI_WWAN is not set
++# CONFIG_USB_HSO is not set
++# CONFIG_USB_NET_INT51X1 is not set
++# CONFIG_USB_IPHETH is not set
++# CONFIG_USB_SIERRA_NET is not set
++# CONFIG_USB_VL600 is not set
++# CONFIG_USB_NET_CH9200 is not set
++CONFIG_WLAN=y
++# CONFIG_WIRELESS_WDS is not set
++CONFIG_WLAN_VENDOR_ADMTEK=y
++CONFIG_WLAN_VENDOR_ATH=y
++# CONFIG_ATH_DEBUG is not set
++# CONFIG_ATH5K_PCI is not set
++CONFIG_WLAN_VENDOR_ATMEL=y
++CONFIG_WLAN_VENDOR_BROADCOM=y
++CONFIG_WLAN_VENDOR_CISCO=y
++CONFIG_WLAN_VENDOR_INTEL=y
++CONFIG_WLAN_VENDOR_INTERSIL=y
++# CONFIG_HOSTAP is not set
++# CONFIG_PRISM54 is not set
++CONFIG_WLAN_VENDOR_MARVELL=y
++CONFIG_WLAN_VENDOR_MEDIATEK=y
++CONFIG_WLAN_VENDOR_RALINK=y
++CONFIG_WLAN_VENDOR_REALTEK=y
++CONFIG_WLAN_VENDOR_RSI=y
++CONFIG_WLAN_VENDOR_ST=y
++CONFIG_WLAN_VENDOR_TI=y
++CONFIG_WLAN_VENDOR_ZYDAS=y
++CONFIG_WLAN_VENDOR_QUANTENNA=y
++
++#
++# Enable WiMAX (Networking options) to see the WiMAX drivers
++#
++# CONFIG_WAN is not set
++# CONFIG_VMXNET3 is not set
++# CONFIG_FUJITSU_ES is not set
++# CONFIG_NETDEVSIM is not set
++# CONFIG_ISDN is not set
++# CONFIG_NVM is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++CONFIG_INPUT_LEDS=y
++CONFIG_INPUT_FF_MEMLESS=y
++CONFIG_INPUT_POLLDEV=y
++CONFIG_INPUT_SPARSEKMAP=y
++# CONFIG_INPUT_MATRIXKMAP is not set
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++# CONFIG_INPUT_JOYDEV is not set
++CONFIG_INPUT_EVDEV=y
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++# CONFIG_KEYBOARD_ADP5588 is not set
++# CONFIG_KEYBOARD_ADP5589 is not set
++CONFIG_KEYBOARD_ATKBD=y
++# CONFIG_KEYBOARD_QT1070 is not set
++# CONFIG_KEYBOARD_QT2160 is not set
++# CONFIG_KEYBOARD_DLINK_DIR685 is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++# CONFIG_KEYBOARD_TCA6416 is not set
++# CONFIG_KEYBOARD_TCA8418 is not set
++# CONFIG_KEYBOARD_LM8323 is not set
++# CONFIG_KEYBOARD_LM8333 is not set
++# CONFIG_KEYBOARD_MAX7359 is not set
++# CONFIG_KEYBOARD_MCS is not set
++# CONFIG_KEYBOARD_MPR121 is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++# CONFIG_KEYBOARD_OPENCORES is not set
++# CONFIG_KEYBOARD_SAMSUNG is not set
++# CONFIG_KEYBOARD_STOWAWAY is not set
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=y
++CONFIG_MOUSE_PS2_ALPS=y
++CONFIG_MOUSE_PS2_BYD=y
++CONFIG_MOUSE_PS2_LOGIPS2PP=y
++CONFIG_MOUSE_PS2_SYNAPTICS=y
++CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y
++CONFIG_MOUSE_PS2_CYPRESS=y
++CONFIG_MOUSE_PS2_LIFEBOOK=y
++CONFIG_MOUSE_PS2_TRACKPOINT=y
++# CONFIG_MOUSE_PS2_ELANTECH is not set
++# CONFIG_MOUSE_PS2_SENTELIC is not set
++# CONFIG_MOUSE_PS2_TOUCHKIT is not set
++CONFIG_MOUSE_PS2_FOCALTECH=y
++# CONFIG_MOUSE_PS2_VMMOUSE is not set
++CONFIG_MOUSE_PS2_SMBUS=y
++# CONFIG_MOUSE_SERIAL is not set
++# CONFIG_MOUSE_APPLETOUCH is not set
++# CONFIG_MOUSE_BCM5974 is not set
++# CONFIG_MOUSE_CYAPA is not set
++# CONFIG_MOUSE_ELAN_I2C is not set
++# CONFIG_MOUSE_VSXXXAA is not set
++# CONFIG_MOUSE_SYNAPTICS_I2C is not set
++# CONFIG_MOUSE_SYNAPTICS_USB is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TABLET is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++# CONFIG_RMI4_CORE is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_SERPORT=y
++# CONFIG_SERIO_CT82C710 is not set
++# CONFIG_SERIO_PARKBD is not set
++# CONFIG_SERIO_PCIPS2 is not set
++CONFIG_SERIO_LIBPS2=y
++# CONFIG_SERIO_RAW is not set
++# CONFIG_SERIO_ALTERA_PS2 is not set
++# CONFIG_SERIO_PS2MULT is not set
++# CONFIG_SERIO_ARC_PS2 is not set
++# CONFIG_USERIO is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++CONFIG_TTY=y
++CONFIG_VT=y
++CONFIG_CONSOLE_TRANSLATIONS=y
++CONFIG_VT_CONSOLE=y
++CONFIG_VT_CONSOLE_SLEEP=y
++CONFIG_HW_CONSOLE=y
++CONFIG_VT_HW_CONSOLE_BINDING=y
++CONFIG_UNIX98_PTYS=y
++# CONFIG_LEGACY_PTYS is not set
++CONFIG_SERIAL_NONSTANDARD=y
++# CONFIG_ROCKETPORT is not set
++# CONFIG_CYCLADES is not set
++# CONFIG_MOXA_INTELLIO is not set
++# CONFIG_MOXA_SMARTIO is not set
++# CONFIG_SYNCLINK is not set
++# CONFIG_SYNCLINKMP is not set
++# CONFIG_SYNCLINK_GT is not set
++# CONFIG_NOZOMI is not set
++# CONFIG_ISI is not set
++# CONFIG_N_HDLC is not set
++# CONFIG_N_GSM is not set
++# CONFIG_TRACE_SINK is not set
++CONFIG_DEVMEM=y
++# CONFIG_DEVKMEM is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_EARLYCON=y
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y
++CONFIG_SERIAL_8250_PNP=y
++# CONFIG_SERIAL_8250_FINTEK is not set
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_DMA=y
++CONFIG_SERIAL_8250_PCI=y
++CONFIG_SERIAL_8250_EXAR=y
++CONFIG_SERIAL_8250_NR_UARTS=32
++CONFIG_SERIAL_8250_RUNTIME_UARTS=4
++CONFIG_SERIAL_8250_EXTENDED=y
++CONFIG_SERIAL_8250_MANY_PORTS=y
++CONFIG_SERIAL_8250_SHARE_IRQ=y
++CONFIG_SERIAL_8250_DETECT_IRQ=y
++CONFIG_SERIAL_8250_RSA=y
++# CONFIG_SERIAL_8250_FSL is not set
++# CONFIG_SERIAL_8250_DW is not set
++# CONFIG_SERIAL_8250_RT288X is not set
++CONFIG_SERIAL_8250_LPSS=y
++# CONFIG_SERIAL_8250_MID is not set
++# CONFIG_SERIAL_8250_MOXA is not set
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_KGDB_NMI is not set
++# CONFIG_SERIAL_UARTLITE is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++CONFIG_CONSOLE_POLL=y
++# CONFIG_SERIAL_JSM is not set
++# CONFIG_SERIAL_SCCNXP is not set
++# CONFIG_SERIAL_SC16IS7XX is not set
++# CONFIG_SERIAL_ALTERA_JTAGUART is not set
++# CONFIG_SERIAL_ALTERA_UART is not set
++# CONFIG_SERIAL_ARC is not set
++# CONFIG_SERIAL_RP2 is not set
++# CONFIG_SERIAL_FSL_LPUART is not set
++# CONFIG_SERIAL_DEV_BUS is not set
++# CONFIG_TTY_PRINTK is not set
++# CONFIG_PRINTER is not set
++# CONFIG_PPDEV is not set
++# CONFIG_IPMI_HANDLER is not set
++# CONFIG_HW_RANDOM is not set
++# CONFIG_NVRAM is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++# CONFIG_MWAVE is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_HPET is not set
++# CONFIG_HANGCHECK_TIMER is not set
++# CONFIG_TCG_TPM is not set
++# CONFIG_TELCLOCK is not set
++CONFIG_DEVPORT=y
++# CONFIG_XILLYBUS is not set
++
++#
++# I2C support
++#
++CONFIG_I2C=y
++CONFIG_ACPI_I2C_OPREGION=y
++CONFIG_I2C_BOARDINFO=y
++CONFIG_I2C_COMPAT=y
++# CONFIG_I2C_CHARDEV is not set
++# CONFIG_I2C_MUX is not set
++CONFIG_I2C_HELPER_AUTO=y
++CONFIG_I2C_SMBUS=y
++CONFIG_I2C_ALGOBIT=y
++
++#
++# I2C Hardware Bus support
++#
++
++#
++# PC SMBus host controller drivers
++#
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++# CONFIG_I2C_AMD756 is not set
++# CONFIG_I2C_AMD8111 is not set
++CONFIG_I2C_I801=y
++# CONFIG_I2C_ISCH is not set
++# CONFIG_I2C_ISMT is not set
++CONFIG_I2C_PIIX4=m
++# CONFIG_I2C_NFORCE2 is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++# CONFIG_I2C_SIS96X is not set
++# CONFIG_I2C_VIA is not set
++# CONFIG_I2C_VIAPRO is not set
++
++#
++# ACPI drivers
++#
++# CONFIG_I2C_SCMI is not set
++
++#
++# I2C system bus drivers (mostly embedded / system-on-chip)
++#
++# CONFIG_I2C_DESIGNWARE_PLATFORM is not set
++# CONFIG_I2C_DESIGNWARE_PCI is not set
++# CONFIG_I2C_EMEV2 is not set
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PCA_PLATFORM is not set
++# CONFIG_I2C_PXA_PCI is not set
++# CONFIG_I2C_SIMTEC is not set
++# CONFIG_I2C_XILINX is not set
++
++#
++# External I2C/SMBus adapter drivers
++#
++# CONFIG_I2C_DIOLAN_U2C is not set
++# CONFIG_I2C_PARPORT is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_ROBOTFUZZ_OSIF is not set
++# CONFIG_I2C_TAOS_EVM is not set
++# CONFIG_I2C_TINY_USB is not set
++
++#
++# Other I2C/SMBus bus drivers
++#
++# CONFIG_I2C_MLXCPLD is not set
++# CONFIG_I2C_STUB is not set
++# CONFIG_I2C_SLAVE is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_SPI is not set
++# CONFIG_SPMI is not set
++# CONFIG_HSI is not set
++CONFIG_PPS=y
++# CONFIG_PPS_DEBUG is not set
++
++#
++# PPS clients support
++#
++# CONFIG_PPS_CLIENT_KTIMER is not set
++# CONFIG_PPS_CLIENT_LDISC is not set
++# CONFIG_PPS_CLIENT_PARPORT is not set
++# CONFIG_PPS_CLIENT_GPIO is not set
++
++#
++# PPS generators support
++#
++
++#
++# PTP clock support
++#
++CONFIG_PTP_1588_CLOCK=y
++
++#
++# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks.
++#
++CONFIG_PTP_1588_CLOCK_KVM=y
++CONFIG_PINCTRL=y
++# CONFIG_DEBUG_PINCTRL is not set
++# CONFIG_PINCTRL_AMD is not set
++# CONFIG_PINCTRL_MCP23S08 is not set
++# CONFIG_PINCTRL_SX150X is not set
++# CONFIG_PINCTRL_BAYTRAIL is not set
++# CONFIG_PINCTRL_CHERRYVIEW is not set
++# CONFIG_PINCTRL_BROXTON is not set
++# CONFIG_PINCTRL_CANNONLAKE is not set
++# CONFIG_PINCTRL_CEDARFORK is not set
++# CONFIG_PINCTRL_DENVERTON is not set
++# CONFIG_PINCTRL_GEMINILAKE is not set
++# CONFIG_PINCTRL_LEWISBURG is not set
++# CONFIG_PINCTRL_SUNRISEPOINT is not set
++# CONFIG_GPIOLIB is not set
++# CONFIG_W1 is not set
++# CONFIG_POWER_AVS is not set
++# CONFIG_POWER_RESET is not set
++CONFIG_POWER_SUPPLY=y
++# CONFIG_POWER_SUPPLY_DEBUG is not set
++# CONFIG_PDA_POWER is not set
++# CONFIG_TEST_POWER is not set
++# CONFIG_BATTERY_DS2780 is not set
++# CONFIG_BATTERY_DS2781 is not set
++# CONFIG_BATTERY_DS2782 is not set
++# CONFIG_BATTERY_SBS is not set
++# CONFIG_CHARGER_SBS is not set
++# CONFIG_BATTERY_BQ27XXX is not set
++# CONFIG_BATTERY_MAX17040 is not set
++# CONFIG_BATTERY_MAX17042 is not set
++# CONFIG_CHARGER_MAX8903 is not set
++# CONFIG_CHARGER_LP8727 is not set
++# CONFIG_CHARGER_BQ2415X is not set
++# CONFIG_CHARGER_SMB347 is not set
++# CONFIG_BATTERY_GAUGE_LTC2941 is not set
++CONFIG_HWMON=y
++# CONFIG_HWMON_DEBUG_CHIP is not set
++
++#
++# Native drivers
++#
++# CONFIG_SENSORS_ABITUGURU is not set
++# CONFIG_SENSORS_ABITUGURU3 is not set
++# CONFIG_SENSORS_AD7414 is not set
++# CONFIG_SENSORS_AD7418 is not set
++# CONFIG_SENSORS_ADM1021 is not set
++# CONFIG_SENSORS_ADM1025 is not set
++# CONFIG_SENSORS_ADM1026 is not set
++# CONFIG_SENSORS_ADM1029 is not set
++# CONFIG_SENSORS_ADM1031 is not set
++# CONFIG_SENSORS_ADM9240 is not set
++# CONFIG_SENSORS_ADT7410 is not set
++# CONFIG_SENSORS_ADT7411 is not set
++# CONFIG_SENSORS_ADT7462 is not set
++# CONFIG_SENSORS_ADT7470 is not set
++# CONFIG_SENSORS_ADT7475 is not set
++# CONFIG_SENSORS_ASC7621 is not set
++# CONFIG_SENSORS_K8TEMP is not set
++CONFIG_SENSORS_K10TEMP=m
++# CONFIG_SENSORS_FAM15H_POWER is not set
++# CONFIG_SENSORS_APPLESMC is not set
++# CONFIG_SENSORS_ASB100 is not set
++# CONFIG_SENSORS_ASPEED is not set
++# CONFIG_SENSORS_ATXP1 is not set
++# CONFIG_SENSORS_DS620 is not set
++# CONFIG_SENSORS_DS1621 is not set
++CONFIG_SENSORS_DELL_SMM=m
++# CONFIG_SENSORS_I5K_AMB is not set
++# CONFIG_SENSORS_F71805F is not set
++# CONFIG_SENSORS_F71882FG is not set
++# CONFIG_SENSORS_F75375S is not set
++# CONFIG_SENSORS_FSCHMD is not set
++# CONFIG_SENSORS_FTSTEUTATES is not set
++# CONFIG_SENSORS_GL518SM is not set
++# CONFIG_SENSORS_GL520SM is not set
++# CONFIG_SENSORS_G760A is not set
++# CONFIG_SENSORS_G762 is not set
++# CONFIG_SENSORS_HIH6130 is not set
++# CONFIG_SENSORS_I5500 is not set
++# CONFIG_SENSORS_CORETEMP is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_JC42 is not set
++# CONFIG_SENSORS_POWR1220 is not set
++# CONFIG_SENSORS_LINEAGE is not set
++# CONFIG_SENSORS_LTC2945 is not set
++# CONFIG_SENSORS_LTC2990 is not set
++# CONFIG_SENSORS_LTC4151 is not set
++# CONFIG_SENSORS_LTC4215 is not set
++# CONFIG_SENSORS_LTC4222 is not set
++# CONFIG_SENSORS_LTC4245 is not set
++# CONFIG_SENSORS_LTC4260 is not set
++# CONFIG_SENSORS_LTC4261 is not set
++# CONFIG_SENSORS_MAX16065 is not set
++# CONFIG_SENSORS_MAX1619 is not set
++# CONFIG_SENSORS_MAX1668 is not set
++# CONFIG_SENSORS_MAX197 is not set
++# CONFIG_SENSORS_MAX6621 is not set
++# CONFIG_SENSORS_MAX6639 is not set
++# CONFIG_SENSORS_MAX6642 is not set
++# CONFIG_SENSORS_MAX6650 is not set
++# CONFIG_SENSORS_MAX6697 is not set
++# CONFIG_SENSORS_MAX31790 is not set
++# CONFIG_SENSORS_MCP3021 is not set
++# CONFIG_SENSORS_TC654 is not set
++# CONFIG_SENSORS_LM63 is not set
++# CONFIG_SENSORS_LM73 is not set
++# CONFIG_SENSORS_LM75 is not set
++# CONFIG_SENSORS_LM77 is not set
++# CONFIG_SENSORS_LM78 is not set
++# CONFIG_SENSORS_LM80 is not set
++# CONFIG_SENSORS_LM83 is not set
++# CONFIG_SENSORS_LM85 is not set
++# CONFIG_SENSORS_LM87 is not set
++# CONFIG_SENSORS_LM90 is not set
++# CONFIG_SENSORS_LM92 is not set
++# CONFIG_SENSORS_LM93 is not set
++# CONFIG_SENSORS_LM95234 is not set
++# CONFIG_SENSORS_LM95241 is not set
++# CONFIG_SENSORS_LM95245 is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_PC87427 is not set
++# CONFIG_SENSORS_NTC_THERMISTOR is not set
++# CONFIG_SENSORS_NCT6683 is not set
++# CONFIG_SENSORS_NCT6775 is not set
++# CONFIG_SENSORS_NCT7802 is not set
++# CONFIG_SENSORS_NCT7904 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_PMBUS is not set
++# CONFIG_SENSORS_SHT21 is not set
++# CONFIG_SENSORS_SHT3x is not set
++# CONFIG_SENSORS_SHTC1 is not set
++# CONFIG_SENSORS_SIS5595 is not set
++# CONFIG_SENSORS_DME1737 is not set
++# CONFIG_SENSORS_EMC1403 is not set
++# CONFIG_SENSORS_EMC2103 is not set
++# CONFIG_SENSORS_EMC6W201 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_SMSC47M192 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_SCH5627 is not set
++# CONFIG_SENSORS_SCH5636 is not set
++# CONFIG_SENSORS_STTS751 is not set
++# CONFIG_SENSORS_SMM665 is not set
++# CONFIG_SENSORS_ADC128D818 is not set
++# CONFIG_SENSORS_ADS1015 is not set
++# CONFIG_SENSORS_ADS7828 is not set
++# CONFIG_SENSORS_AMC6821 is not set
++# CONFIG_SENSORS_INA209 is not set
++# CONFIG_SENSORS_INA2XX is not set
++# CONFIG_SENSORS_INA3221 is not set
++# CONFIG_SENSORS_TC74 is not set
++# CONFIG_SENSORS_THMC50 is not set
++# CONFIG_SENSORS_TMP102 is not set
++# CONFIG_SENSORS_TMP103 is not set
++# CONFIG_SENSORS_TMP108 is not set
++# CONFIG_SENSORS_TMP401 is not set
++# CONFIG_SENSORS_TMP421 is not set
++# CONFIG_SENSORS_VIA_CPUTEMP is not set
++# CONFIG_SENSORS_VIA686A is not set
++# CONFIG_SENSORS_VT1211 is not set
++# CONFIG_SENSORS_VT8231 is not set
++# CONFIG_SENSORS_W83773G is not set
++# CONFIG_SENSORS_W83781D is not set
++# CONFIG_SENSORS_W83791D is not set
++# CONFIG_SENSORS_W83792D is not set
++# CONFIG_SENSORS_W83793 is not set
++# CONFIG_SENSORS_W83795 is not set
++# CONFIG_SENSORS_W83L785TS is not set
++# CONFIG_SENSORS_W83L786NG is not set
++# CONFIG_SENSORS_W83627HF is not set
++# CONFIG_SENSORS_W83627EHF is not set
++# CONFIG_SENSORS_XGENE is not set
++
++#
++# ACPI drivers
++#
++# CONFIG_SENSORS_ACPI_POWER is not set
++# CONFIG_SENSORS_ATK0110 is not set
++CONFIG_THERMAL=y
++CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0
++CONFIG_THERMAL_HWMON=y
++CONFIG_THERMAL_WRITABLE_TRIPS=y
++CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
++# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set
++# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
++# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set
++# CONFIG_THERMAL_GOV_FAIR_SHARE is not set
++CONFIG_THERMAL_GOV_STEP_WISE=y
++# CONFIG_THERMAL_GOV_BANG_BANG is not set
++CONFIG_THERMAL_GOV_USER_SPACE=y
++# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set
++# CONFIG_THERMAL_EMULATION is not set
++# CONFIG_INTEL_POWERCLAMP is not set
++CONFIG_X86_PKG_TEMP_THERMAL=m
++# CONFIG_INTEL_SOC_DTS_THERMAL is not set
++
++#
++# ACPI INT340X thermal drivers
++#
++# CONFIG_INT340X_THERMAL is not set
++# CONFIG_INTEL_PCH_THERMAL is not set
++CONFIG_WATCHDOG=y
++# CONFIG_WATCHDOG_CORE is not set
++# CONFIG_WATCHDOG_NOWAYOUT is not set
++CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y
++# CONFIG_WATCHDOG_SYSFS is not set
++
++#
++# Watchdog Device Drivers
++#
++# CONFIG_SOFT_WATCHDOG is not set
++# CONFIG_WDAT_WDT is not set
++# CONFIG_XILINX_WATCHDOG is not set
++# CONFIG_ZIIRAVE_WATCHDOG is not set
++# CONFIG_CADENCE_WATCHDOG is not set
++# CONFIG_DW_WATCHDOG is not set
++# CONFIG_MAX63XX_WATCHDOG is not set
++# CONFIG_ACQUIRE_WDT is not set
++# CONFIG_ADVANTECH_WDT is not set
++# CONFIG_ALIM1535_WDT is not set
++# CONFIG_ALIM7101_WDT is not set
++# CONFIG_F71808E_WDT is not set
++# CONFIG_SP5100_TCO is not set
++# CONFIG_SBC_FITPC2_WATCHDOG is not set
++# CONFIG_EUROTECH_WDT is not set
++# CONFIG_IB700_WDT is not set
++# CONFIG_IBMASR is not set
++# CONFIG_WAFER_WDT is not set
++# CONFIG_I6300ESB_WDT is not set
++# CONFIG_IE6XX_WDT is not set
++# CONFIG_ITCO_WDT is not set
++# CONFIG_IT8712F_WDT is not set
++# CONFIG_IT87_WDT is not set
++# CONFIG_HP_WATCHDOG is not set
++# CONFIG_SC1200_WDT is not set
++# CONFIG_PC87413_WDT is not set
++# CONFIG_NV_TCO is not set
++# CONFIG_60XX_WDT is not set
++# CONFIG_CPU5_WDT is not set
++# CONFIG_SMSC_SCH311X_WDT is not set
++# CONFIG_SMSC37B787_WDT is not set
++# CONFIG_VIA_WDT is not set
++# CONFIG_W83627HF_WDT is not set
++# CONFIG_W83877F_WDT is not set
++# CONFIG_W83977F_WDT is not set
++# CONFIG_MACHZ_WDT is not set
++# CONFIG_SBC_EPX_C3_WATCHDOG is not set
++# CONFIG_NI903X_WDT is not set
++# CONFIG_NIC7018_WDT is not set
++
++#
++# PCI-based Watchdog Cards
++#
++# CONFIG_PCIPCWATCHDOG is not set
++# CONFIG_WDTPCI is not set
++
++#
++# USB-based Watchdog Cards
++#
++# CONFIG_USBPCWATCHDOG is not set
++
++#
++# Watchdog Pretimeout Governors
++#
++# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set
++CONFIG_SSB_POSSIBLE=y
++# CONFIG_SSB is not set
++CONFIG_BCMA_POSSIBLE=y
++# CONFIG_BCMA is not set
++
++#
++# Multifunction device drivers
++#
++# CONFIG_MFD_CORE is not set
++# CONFIG_MFD_AS3711 is not set
++# CONFIG_PMIC_ADP5520 is not set
++# CONFIG_MFD_BCM590XX is not set
++# CONFIG_MFD_BD9571MWV is not set
++# CONFIG_MFD_AXP20X_I2C is not set
++# CONFIG_MFD_CROS_EC is not set
++# CONFIG_PMIC_DA903X is not set
++# CONFIG_MFD_DA9052_I2C is not set
++# CONFIG_MFD_DA9055 is not set
++# CONFIG_MFD_DA9062 is not set
++# CONFIG_MFD_DA9063 is not set
++# CONFIG_MFD_DA9150 is not set
++# CONFIG_MFD_DLN2 is not set
++# CONFIG_MFD_MC13XXX_I2C is not set
++# CONFIG_HTC_PASIC3 is not set
++# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set
++# CONFIG_LPC_ICH is not set
++# CONFIG_LPC_SCH is not set
++# CONFIG_INTEL_SOC_PMIC_CHTWC is not set
++# CONFIG_MFD_INTEL_LPSS_ACPI is not set
++# CONFIG_MFD_INTEL_LPSS_PCI is not set
++# CONFIG_MFD_JANZ_CMODIO is not set
++# CONFIG_MFD_KEMPLD is not set
++# CONFIG_MFD_88PM800 is not set
++# CONFIG_MFD_88PM805 is not set
++# CONFIG_MFD_88PM860X is not set
++# CONFIG_MFD_MAX14577 is not set
++# CONFIG_MFD_MAX77693 is not set
++# CONFIG_MFD_MAX77843 is not set
++# CONFIG_MFD_MAX8907 is not set
++# CONFIG_MFD_MAX8925 is not set
++# CONFIG_MFD_MAX8997 is not set
++# CONFIG_MFD_MAX8998 is not set
++# CONFIG_MFD_MT6397 is not set
++# CONFIG_MFD_MENF21BMC is not set
++# CONFIG_MFD_VIPERBOARD is not set
++# CONFIG_MFD_RETU is not set
++# CONFIG_MFD_PCF50633 is not set
++# CONFIG_MFD_RDC321X is not set
++# CONFIG_MFD_RT5033 is not set
++# CONFIG_MFD_RC5T583 is not set
++# CONFIG_MFD_SEC_CORE is not set
++# CONFIG_MFD_SI476X_CORE is not set
++# CONFIG_MFD_SM501 is not set
++# CONFIG_MFD_SKY81452 is not set
++# CONFIG_MFD_SMSC is not set
++# CONFIG_ABX500_CORE is not set
++# CONFIG_MFD_SYSCON is not set
++# CONFIG_MFD_TI_AM335X_TSCADC is not set
++# CONFIG_MFD_LP3943 is not set
++# CONFIG_MFD_LP8788 is not set
++# CONFIG_MFD_TI_LMU is not set
++# CONFIG_MFD_PALMAS is not set
++# CONFIG_TPS6105X is not set
++# CONFIG_TPS6507X is not set
++# CONFIG_MFD_TPS65086 is not set
++# CONFIG_MFD_TPS65090 is not set
++# CONFIG_MFD_TPS68470 is not set
++# CONFIG_MFD_TI_LP873X is not set
++# CONFIG_MFD_TPS6586X is not set
++# CONFIG_MFD_TPS65912_I2C is not set
++# CONFIG_MFD_TPS80031 is not set
++# CONFIG_TWL4030_CORE is not set
++# CONFIG_TWL6040_CORE is not set
++# CONFIG_MFD_WL1273_CORE is not set
++# CONFIG_MFD_LM3533 is not set
++# CONFIG_MFD_TMIO is not set
++# CONFIG_MFD_VX855 is not set
++# CONFIG_MFD_ARIZONA_I2C is not set
++# CONFIG_MFD_WM8400 is not set
++# CONFIG_MFD_WM831X_I2C is not set
++# CONFIG_MFD_WM8350_I2C is not set
++# CONFIG_MFD_WM8994 is not set
++# CONFIG_REGULATOR is not set
++CONFIG_RC_CORE=y
++CONFIG_RC_MAP=y
++# CONFIG_LIRC is not set
++CONFIG_RC_DECODERS=y
++CONFIG_IR_NEC_DECODER=y
++CONFIG_IR_RC5_DECODER=y
++CONFIG_IR_RC6_DECODER=y
++CONFIG_IR_JVC_DECODER=y
++CONFIG_IR_SONY_DECODER=y
++CONFIG_IR_SANYO_DECODER=y
++CONFIG_IR_SHARP_DECODER=y
++CONFIG_IR_MCE_KBD_DECODER=y
++CONFIG_IR_XMP_DECODER=y
++# CONFIG_RC_DEVICES is not set
++# CONFIG_MEDIA_SUPPORT is not set
++
++#
++# Graphics support
++#
++CONFIG_AGP=y
++CONFIG_AGP_AMD64=y
++CONFIG_AGP_INTEL=y
++# CONFIG_AGP_SIS is not set
++# CONFIG_AGP_VIA is not set
++CONFIG_INTEL_GTT=y
++CONFIG_VGA_ARB=y
++CONFIG_VGA_ARB_MAX_GPUS=16
++# CONFIG_VGA_SWITCHEROO is not set
++CONFIG_DRM=y
++# CONFIG_DRM_DP_AUX_CHARDEV is not set
++# CONFIG_DRM_DEBUG_MM is not set
++# CONFIG_DRM_DEBUG_MM_SELFTEST is not set
++CONFIG_DRM_KMS_HELPER=y
++CONFIG_DRM_KMS_FB_HELPER=y
++CONFIG_DRM_FBDEV_EMULATION=y
++CONFIG_DRM_FBDEV_OVERALLOC=100
++# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set
++CONFIG_DRM_TTM=m
++CONFIG_DRM_SCHED=m
++
++#
++# I2C encoder or helper chips
++#
++# CONFIG_DRM_I2C_CH7006 is not set
++# CONFIG_DRM_I2C_SIL164 is not set
++# CONFIG_DRM_I2C_NXP_TDA998X is not set
++# CONFIG_DRM_RADEON is not set
++CONFIG_DRM_AMDGPU=m
++# CONFIG_DRM_AMDGPU_SI is not set
++CONFIG_DRM_AMDGPU_CIK=y
++CONFIG_DRM_AMDGPU_USERPTR=y
++# CONFIG_DRM_AMDGPU_GART_DEBUGFS is not set
++
++#
++# ACP (Audio CoProcessor) Configuration
++#
++# CONFIG_DRM_AMD_ACP is not set
++
++#
++# Display Engine Configuration
++#
++CONFIG_DRM_AMD_DC=y
++# CONFIG_DRM_AMD_DC_FBC is not set
++CONFIG_DRM_AMD_DC_DCN1_0=y
++# CONFIG_DEBUG_KERNEL_DC is not set
++
++#
++# AMD Library routines
++#
++CONFIG_CHASH=m
++# CONFIG_CHASH_STATS is not set
++# CONFIG_CHASH_SELFTEST is not set
++# CONFIG_DRM_NOUVEAU is not set
++# CONFIG_DRM_I915 is not set
++# CONFIG_DRM_VGEM is not set
++# CONFIG_DRM_VMWGFX is not set
++# CONFIG_DRM_GMA500 is not set
++# CONFIG_DRM_UDL is not set
++CONFIG_DRM_AST=m
++# CONFIG_DRM_MGAG200 is not set
++# CONFIG_DRM_CIRRUS_QEMU is not set
++# CONFIG_DRM_QXL is not set
++# CONFIG_DRM_BOCHS is not set
++CONFIG_DRM_PANEL=y
++
++#
++# Display Panels
++#
++CONFIG_DRM_BRIDGE=y
++CONFIG_DRM_PANEL_BRIDGE=y
++
++#
++# Display Interface Bridges
++#
++# CONFIG_DRM_ANALOGIX_ANX78XX is not set
++CONFIG_HSA_AMD=y
++# CONFIG_DRM_HISI_HIBMC is not set
++# CONFIG_DRM_TINYDRM is not set
++# CONFIG_DRM_LEGACY is not set
++CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y
++# CONFIG_DRM_LIB_RANDOM is not set
++
++#
++# Frame buffer Devices
++#
++CONFIG_FB=y
++# CONFIG_FIRMWARE_EDID is not set
++CONFIG_FB_CMDLINE=y
++CONFIG_FB_NOTIFY=y
++# CONFIG_FB_DDC is not set
++# CONFIG_FB_BOOT_VESA_SUPPORT is not set
++CONFIG_FB_CFB_FILLRECT=y
++CONFIG_FB_CFB_COPYAREA=y
++CONFIG_FB_CFB_IMAGEBLIT=y
++# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
++CONFIG_FB_SYS_FILLRECT=y
++CONFIG_FB_SYS_COPYAREA=y
++CONFIG_FB_SYS_IMAGEBLIT=y
++# CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA is not set
++# CONFIG_FB_FOREIGN_ENDIAN is not set
++CONFIG_FB_SYS_FOPS=y
++CONFIG_FB_DEFERRED_IO=y
++# CONFIG_FB_SVGALIB is not set
++# CONFIG_FB_MACMODES is not set
++# CONFIG_FB_BACKLIGHT is not set
++# CONFIG_FB_MODE_HELPERS is not set
++# CONFIG_FB_TILEBLITTING is not set
++
++#
++# Frame buffer hardware drivers
++#
++# CONFIG_FB_CIRRUS is not set
++# CONFIG_FB_PM2 is not set
++# CONFIG_FB_CYBER2000 is not set
++# CONFIG_FB_ARC is not set
++# CONFIG_FB_ASILIANT is not set
++# CONFIG_FB_IMSTT is not set
++# CONFIG_FB_VGA16 is not set
++# CONFIG_FB_VESA is not set
++# CONFIG_FB_EFI is not set
++# CONFIG_FB_N411 is not set
++# CONFIG_FB_HGA is not set
++# CONFIG_FB_OPENCORES is not set
++# CONFIG_FB_S1D13XXX is not set
++# CONFIG_FB_NVIDIA is not set
++# CONFIG_FB_RIVA is not set
++# CONFIG_FB_I740 is not set
++# CONFIG_FB_LE80578 is not set
++# CONFIG_FB_INTEL is not set
++# CONFIG_FB_MATROX is not set
++# CONFIG_FB_RADEON is not set
++# CONFIG_FB_ATY128 is not set
++# CONFIG_FB_ATY is not set
++# CONFIG_FB_S3 is not set
++# CONFIG_FB_SAVAGE is not set
++# CONFIG_FB_SIS is not set
++# CONFIG_FB_NEOMAGIC is not set
++# CONFIG_FB_KYRO is not set
++# CONFIG_FB_3DFX is not set
++# CONFIG_FB_VOODOO1 is not set
++# CONFIG_FB_VT8623 is not set
++# CONFIG_FB_TRIDENT is not set
++# CONFIG_FB_ARK is not set
++# CONFIG_FB_PM3 is not set
++# CONFIG_FB_CARMINE is not set
++# CONFIG_FB_SMSCUFX is not set
++# CONFIG_FB_UDL is not set
++# CONFIG_FB_IBM_GXT4500 is not set
++# CONFIG_FB_VIRTUAL is not set
++# CONFIG_FB_METRONOME is not set
++# CONFIG_FB_MB862XX is not set
++# CONFIG_FB_BROADSHEET is not set
++# CONFIG_FB_AUO_K190X is not set
++# CONFIG_FB_SIMPLE is not set
++# CONFIG_FB_SM712 is not set
++CONFIG_BACKLIGHT_LCD_SUPPORT=y
++# CONFIG_LCD_CLASS_DEVICE is not set
++CONFIG_BACKLIGHT_CLASS_DEVICE=y
++# CONFIG_BACKLIGHT_GENERIC is not set
++# CONFIG_BACKLIGHT_APPLE is not set
++# CONFIG_BACKLIGHT_PM8941_WLED is not set
++# CONFIG_BACKLIGHT_SAHARA is not set
++# CONFIG_BACKLIGHT_ADP8860 is not set
++# CONFIG_BACKLIGHT_ADP8870 is not set
++# CONFIG_BACKLIGHT_LM3639 is not set
++# CONFIG_BACKLIGHT_LV5207LP is not set
++# CONFIG_BACKLIGHT_BD6107 is not set
++# CONFIG_BACKLIGHT_ARCXCNN is not set
++# CONFIG_VGASTATE is not set
++CONFIG_HDMI=y
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++CONFIG_VGACON_SOFT_SCROLLBACK=y
++CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64
++# CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT is not set
++CONFIG_DUMMY_CONSOLE=y
++CONFIG_DUMMY_CONSOLE_COLUMNS=80
++CONFIG_DUMMY_CONSOLE_ROWS=25
++CONFIG_FRAMEBUFFER_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
++CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
++# CONFIG_LOGO is not set
++# CONFIG_SOUND is not set
++
++#
++# HID support
++#
++CONFIG_HID=y
++CONFIG_HID_BATTERY_STRENGTH=y
++CONFIG_HIDRAW=y
++# CONFIG_UHID is not set
++CONFIG_HID_GENERIC=y
++
++#
++# Special HID drivers
++#
++CONFIG_HID_A4TECH=y
++# CONFIG_HID_ACCUTOUCH is not set
++# CONFIG_HID_ACRUX is not set
++CONFIG_HID_APPLE=y
++# CONFIG_HID_APPLEIR is not set
++# CONFIG_HID_ASUS is not set
++# CONFIG_HID_AUREAL is not set
++CONFIG_HID_BELKIN=y
++# CONFIG_HID_BETOP_FF is not set
++CONFIG_HID_CHERRY=y
++CONFIG_HID_CHICONY=y
++# CONFIG_HID_CORSAIR is not set
++# CONFIG_HID_CMEDIA is not set
++CONFIG_HID_CYPRESS=y
++# CONFIG_HID_DRAGONRISE is not set
++# CONFIG_HID_EMS_FF is not set
++# CONFIG_HID_ELECOM is not set
++# CONFIG_HID_ELO is not set
++CONFIG_HID_EZKEY=y
++# CONFIG_HID_GEMBIRD is not set
++# CONFIG_HID_GFRM is not set
++# CONFIG_HID_HOLTEK is not set
++# CONFIG_HID_GT683R is not set
++# CONFIG_HID_KEYTOUCH is not set
++CONFIG_HID_KYE=y
++# CONFIG_HID_UCLOGIC is not set
++# CONFIG_HID_WALTOP is not set
++# CONFIG_HID_GYRATION is not set
++# CONFIG_HID_ICADE is not set
++# CONFIG_HID_ITE is not set
++# CONFIG_HID_JABRA is not set
++# CONFIG_HID_TWINHAN is not set
++CONFIG_HID_KENSINGTON=y
++# CONFIG_HID_LCPOWER is not set
++# CONFIG_HID_LED is not set
++# CONFIG_HID_LENOVO is not set
++CONFIG_HID_LOGITECH=y
++# CONFIG_HID_LOGITECH_DJ is not set
++# CONFIG_HID_LOGITECH_HIDPP is not set
++# CONFIG_LOGITECH_FF is not set
++# CONFIG_LOGIRUMBLEPAD2_FF is not set
++# CONFIG_LOGIG940_FF is not set
++# CONFIG_LOGIWHEELS_FF is not set
++# CONFIG_HID_MAGICMOUSE is not set
++# CONFIG_HID_MAYFLASH is not set
++CONFIG_HID_MICROSOFT=y
++CONFIG_HID_MONTEREY=y
++# CONFIG_HID_MULTITOUCH is not set
++# CONFIG_HID_NTI is not set
++# CONFIG_HID_NTRIG is not set
++# CONFIG_HID_ORTEK is not set
++# CONFIG_HID_PANTHERLORD is not set
++# CONFIG_HID_PENMOUNT is not set
++# CONFIG_HID_PETALYNX is not set
++# CONFIG_HID_PICOLCD is not set
++CONFIG_HID_PLANTRONICS=y
++# CONFIG_HID_PRIMAX is not set
++# CONFIG_HID_RETRODE is not set
++# CONFIG_HID_ROCCAT is not set
++# CONFIG_HID_SAITEK is not set
++# CONFIG_HID_SAMSUNG is not set
++# CONFIG_HID_SONY is not set
++# CONFIG_HID_SPEEDLINK is not set
++# CONFIG_HID_STEELSERIES is not set
++# CONFIG_HID_SUNPLUS is not set
++# CONFIG_HID_RMI is not set
++# CONFIG_HID_GREENASIA is not set
++# CONFIG_HID_SMARTJOYPLUS is not set
++# CONFIG_HID_TIVO is not set
++# CONFIG_HID_TOPSEED is not set
++# CONFIG_HID_THINGM is not set
++# CONFIG_HID_THRUSTMASTER is not set
++# CONFIG_HID_UDRAW_PS3 is not set
++# CONFIG_HID_WACOM is not set
++# CONFIG_HID_WIIMOTE is not set
++# CONFIG_HID_XINMO is not set
++# CONFIG_HID_ZEROPLUS is not set
++# CONFIG_HID_ZYDACRON is not set
++# CONFIG_HID_SENSOR_HUB is not set
++# CONFIG_HID_ALPS is not set
++
++#
++# USB HID support
++#
++CONFIG_USB_HID=y
++CONFIG_HID_PID=y
++CONFIG_USB_HIDDEV=y
++
++#
++# I2C HID support
++#
++# CONFIG_I2C_HID is not set
++
++#
++# Intel ISH HID support
++#
++# CONFIG_INTEL_ISH_HID is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_SUPPORT=y
++CONFIG_USB_COMMON=y
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB=y
++CONFIG_USB_PCI=y
++# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
++
++#
++# Miscellaneous USB options
++#
++CONFIG_USB_DEFAULT_PERSIST=y
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_OTG is not set
++# CONFIG_USB_OTG_WHITELIST is not set
++# CONFIG_USB_OTG_BLACKLIST_HUB is not set
++# CONFIG_USB_LEDS_TRIGGER_USBPORT is not set
++# CONFIG_USB_MON is not set
++# CONFIG_USB_WUSB_CBAF is not set
++
++#
++# USB Host Controller Drivers
++#
++# CONFIG_USB_C67X00_HCD is not set
++CONFIG_USB_XHCI_HCD=y
++# CONFIG_USB_XHCI_DBGCAP is not set
++CONFIG_USB_XHCI_PCI=y
++# CONFIG_USB_XHCI_PLATFORM is not set
++CONFIG_USB_EHCI_HCD=y
++CONFIG_USB_EHCI_ROOT_HUB_TT=y
++CONFIG_USB_EHCI_TT_NEWSCHED=y
++CONFIG_USB_EHCI_PCI=y
++CONFIG_USB_EHCI_HCD_PLATFORM=y
++# CONFIG_USB_OXU210HP_HCD is not set
++# CONFIG_USB_ISP116X_HCD is not set
++# CONFIG_USB_ISP1362_HCD is not set
++# CONFIG_USB_FOTG210_HCD is not set
++CONFIG_USB_OHCI_HCD=y
++CONFIG_USB_OHCI_HCD_PCI=y
++# CONFIG_USB_OHCI_HCD_PLATFORM is not set
++CONFIG_USB_UHCI_HCD=y
++# CONFIG_USB_SL811_HCD is not set
++# CONFIG_USB_R8A66597_HCD is not set
++# CONFIG_USB_HCD_TEST_MODE is not set
++
++#
++# USB Device Class drivers
++#
++# CONFIG_USB_ACM is not set
++# CONFIG_USB_PRINTER is not set
++# CONFIG_USB_WDM is not set
++# CONFIG_USB_TMC is not set
++
++#
++# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
++#
++
++#
++# also be needed; see USB_STORAGE Help for more info
++#
++CONFIG_USB_STORAGE=y
++# CONFIG_USB_STORAGE_DEBUG is not set
++# CONFIG_USB_STORAGE_REALTEK is not set
++# CONFIG_USB_STORAGE_DATAFAB is not set
++# CONFIG_USB_STORAGE_FREECOM is not set
++# CONFIG_USB_STORAGE_ISD200 is not set
++# CONFIG_USB_STORAGE_USBAT is not set
++# CONFIG_USB_STORAGE_SDDR09 is not set
++# CONFIG_USB_STORAGE_SDDR55 is not set
++# CONFIG_USB_STORAGE_JUMPSHOT is not set
++# CONFIG_USB_STORAGE_ALAUDA is not set
++# CONFIG_USB_STORAGE_ONETOUCH is not set
++# CONFIG_USB_STORAGE_KARMA is not set
++# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
++# CONFIG_USB_STORAGE_ENE_UB6250 is not set
++# CONFIG_USB_UAS is not set
++
++#
++# USB Imaging devices
++#
++# CONFIG_USB_MDC800 is not set
++# CONFIG_USB_MICROTEK is not set
++# CONFIG_USBIP_CORE is not set
++# CONFIG_USB_MUSB_HDRC is not set
++# CONFIG_USB_DWC3 is not set
++# CONFIG_USB_DWC2 is not set
++# CONFIG_USB_CHIPIDEA is not set
++# CONFIG_USB_ISP1760 is not set
++
++#
++# USB port drivers
++#
++# CONFIG_USB_USS720 is not set
++# CONFIG_USB_SERIAL is not set
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++# CONFIG_USB_ADUTUX is not set
++# CONFIG_USB_SEVSEG is not set
++# CONFIG_USB_RIO500 is not set
++# CONFIG_USB_LEGOTOWER is not set
++# CONFIG_USB_LCD is not set
++# CONFIG_USB_CYPRESS_CY7C63 is not set
++# CONFIG_USB_CYTHERM is not set
++# CONFIG_USB_IDMOUSE is not set
++# CONFIG_USB_FTDI_ELAN is not set
++# CONFIG_USB_APPLEDISPLAY is not set
++# CONFIG_USB_SISUSBVGA is not set
++# CONFIG_USB_LD is not set
++# CONFIG_USB_TRANCEVIBRATOR is not set
++# CONFIG_USB_IOWARRIOR is not set
++# CONFIG_USB_TEST is not set
++# CONFIG_USB_EHSET_TEST_FIXTURE is not set
++# CONFIG_USB_ISIGHTFW is not set
++# CONFIG_USB_YUREX is not set
++# CONFIG_USB_EZUSB_FX2 is not set
++# CONFIG_USB_HUB_USB251XB is not set
++# CONFIG_USB_HSIC_USB3503 is not set
++# CONFIG_USB_HSIC_USB4604 is not set
++# CONFIG_USB_LINK_LAYER_TEST is not set
++
++#
++# USB Physical Layer drivers
++#
++# CONFIG_USB_PHY is not set
++# CONFIG_NOP_USB_XCEIV is not set
++# CONFIG_USB_ISP1301 is not set
++# CONFIG_USB_GADGET is not set
++# CONFIG_TYPEC is not set
++# CONFIG_USB_LED_TRIG is not set
++# CONFIG_USB_ULPI_BUS is not set
++# CONFIG_UWB is not set
++# CONFIG_MMC is not set
++# CONFIG_MEMSTICK is not set
++CONFIG_NEW_LEDS=y
++CONFIG_LEDS_CLASS=y
++# CONFIG_LEDS_CLASS_FLASH is not set
++# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set
++
++#
++# LED drivers
++#
++# CONFIG_LEDS_APU is not set
++# CONFIG_LEDS_LM3530 is not set
++# CONFIG_LEDS_LM3642 is not set
++# CONFIG_LEDS_PCA9532 is not set
++# CONFIG_LEDS_LP3944 is not set
++# CONFIG_LEDS_LP5521 is not set
++# CONFIG_LEDS_LP5523 is not set
++# CONFIG_LEDS_LP5562 is not set
++# CONFIG_LEDS_LP8501 is not set
++# CONFIG_LEDS_CLEVO_MAIL is not set
++# CONFIG_LEDS_PCA955X is not set
++# CONFIG_LEDS_PCA963X is not set
++# CONFIG_LEDS_BD2802 is not set
++# CONFIG_LEDS_INTEL_SS4200 is not set
++# CONFIG_LEDS_TCA6507 is not set
++# CONFIG_LEDS_TLC591XX is not set
++# CONFIG_LEDS_LM355x is not set
++
++#
++# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)
++#
++# CONFIG_LEDS_BLINKM is not set
++# CONFIG_LEDS_MLXCPLD is not set
++# CONFIG_LEDS_USER is not set
++# CONFIG_LEDS_NIC78BX is not set
++
++#
++# LED Triggers
++#
++CONFIG_LEDS_TRIGGERS=y
++# CONFIG_LEDS_TRIGGER_TIMER is not set
++# CONFIG_LEDS_TRIGGER_ONESHOT is not set
++# CONFIG_LEDS_TRIGGER_DISK is not set
++# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
++# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
++# CONFIG_LEDS_TRIGGER_CPU is not set
++# CONFIG_LEDS_TRIGGER_ACTIVITY is not set
++# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
++
++#
++# iptables trigger is under Netfilter config (LED target)
++#
++# CONFIG_LEDS_TRIGGER_TRANSIENT is not set
++# CONFIG_LEDS_TRIGGER_CAMERA is not set
++# CONFIG_LEDS_TRIGGER_PANIC is not set
++# CONFIG_LEDS_TRIGGER_NETDEV is not set
++# CONFIG_ACCESSIBILITY is not set
++# CONFIG_INFINIBAND is not set
++CONFIG_EDAC_ATOMIC_SCRUB=y
++CONFIG_EDAC_SUPPORT=y
++# CONFIG_EDAC is not set
++CONFIG_RTC_LIB=y
++CONFIG_RTC_MC146818_LIB=y
++CONFIG_RTC_CLASS=y
++# CONFIG_RTC_HCTOSYS is not set
++CONFIG_RTC_SYSTOHC=y
++CONFIG_RTC_SYSTOHC_DEVICE="rtc0"
++# CONFIG_RTC_DEBUG is not set
++CONFIG_RTC_NVMEM=y
++
++#
++# RTC interfaces
++#
++CONFIG_RTC_INTF_SYSFS=y
++CONFIG_RTC_INTF_PROC=y
++CONFIG_RTC_INTF_DEV=y
++# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
++# CONFIG_RTC_DRV_TEST is not set
++
++#
++# I2C RTC drivers
++#
++# CONFIG_RTC_DRV_ABB5ZES3 is not set
++# CONFIG_RTC_DRV_ABX80X is not set
++# CONFIG_RTC_DRV_DS1307 is not set
++# CONFIG_RTC_DRV_DS1374 is not set
++# CONFIG_RTC_DRV_DS1672 is not set
++# CONFIG_RTC_DRV_MAX6900 is not set
++# CONFIG_RTC_DRV_RS5C372 is not set
++# CONFIG_RTC_DRV_ISL1208 is not set
++# CONFIG_RTC_DRV_ISL12022 is not set
++# CONFIG_RTC_DRV_X1205 is not set
++# CONFIG_RTC_DRV_PCF8523 is not set
++# CONFIG_RTC_DRV_PCF85063 is not set
++# CONFIG_RTC_DRV_PCF85363 is not set
++# CONFIG_RTC_DRV_PCF8563 is not set
++# CONFIG_RTC_DRV_PCF8583 is not set
++# CONFIG_RTC_DRV_M41T80 is not set
++# CONFIG_RTC_DRV_BQ32K is not set
++# CONFIG_RTC_DRV_S35390A is not set
++# CONFIG_RTC_DRV_FM3130 is not set
++# CONFIG_RTC_DRV_RX8010 is not set
++# CONFIG_RTC_DRV_RX8581 is not set
++# CONFIG_RTC_DRV_RX8025 is not set
++# CONFIG_RTC_DRV_EM3027 is not set
++# CONFIG_RTC_DRV_RV8803 is not set
++
++#
++# SPI RTC drivers
++#
++CONFIG_RTC_I2C_AND_SPI=y
++
++#
++# SPI and I2C RTC drivers
++#
++# CONFIG_RTC_DRV_DS3232 is not set
++# CONFIG_RTC_DRV_PCF2127 is not set
++# CONFIG_RTC_DRV_RV3029C2 is not set
++
++#
++# Platform RTC drivers
++#
++CONFIG_RTC_DRV_CMOS=y
++# CONFIG_RTC_DRV_DS1286 is not set
++# CONFIG_RTC_DRV_DS1511 is not set
++# CONFIG_RTC_DRV_DS1553 is not set
++# CONFIG_RTC_DRV_DS1685_FAMILY is not set
++# CONFIG_RTC_DRV_DS1742 is not set
++# CONFIG_RTC_DRV_DS2404 is not set
++# CONFIG_RTC_DRV_STK17TA8 is not set
++# CONFIG_RTC_DRV_M48T86 is not set
++# CONFIG_RTC_DRV_M48T35 is not set
++# CONFIG_RTC_DRV_M48T59 is not set
++# CONFIG_RTC_DRV_MSM6242 is not set
++# CONFIG_RTC_DRV_BQ4802 is not set
++# CONFIG_RTC_DRV_RP5C01 is not set
++# CONFIG_RTC_DRV_V3020 is not set
++
++#
++# on-CPU RTC drivers
++#
++# CONFIG_RTC_DRV_FTRTC010 is not set
++
++#
++# HID Sensor RTC drivers
++#
++# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set
++CONFIG_DMADEVICES=y
++# CONFIG_DMADEVICES_DEBUG is not set
++
++#
++# DMA Devices
++#
++CONFIG_DMA_ENGINE=y
++CONFIG_DMA_ACPI=y
++# CONFIG_ALTERA_MSGDMA is not set
++# CONFIG_INTEL_IDMA64 is not set
++# CONFIG_INTEL_IOATDMA is not set
++# CONFIG_QCOM_HIDMA_MGMT is not set
++# CONFIG_QCOM_HIDMA is not set
++CONFIG_DW_DMAC_CORE=y
++# CONFIG_DW_DMAC is not set
++CONFIG_DW_DMAC_PCI=y
++
++#
++# DMA Clients
++#
++# CONFIG_ASYNC_TX_DMA is not set
++# CONFIG_DMATEST is not set
++
++#
++# DMABUF options
++#
++CONFIG_SYNC_FILE=y
++# CONFIG_SW_SYNC is not set
++# CONFIG_AUXDISPLAY is not set
++# CONFIG_PANEL is not set
++# CONFIG_UIO is not set
++# CONFIG_VFIO is not set
++# CONFIG_VIRT_DRIVERS is not set
++CONFIG_VIRTIO_MENU=y
++# CONFIG_VIRTIO_PCI is not set
++# CONFIG_VIRTIO_MMIO is not set
++
++#
++# Microsoft Hyper-V guest support
++#
++# CONFIG_HYPERV is not set
++# CONFIG_HYPERV_TSCPAGE is not set
++# CONFIG_STAGING is not set
++# CONFIG_X86_PLATFORM_DEVICES is not set
++CONFIG_PMC_ATOM=y
++# CONFIG_CHROME_PLATFORMS is not set
++# CONFIG_MELLANOX_PLATFORM is not set
++CONFIG_CLKDEV_LOOKUP=y
++CONFIG_HAVE_CLK_PREPARE=y
++CONFIG_COMMON_CLK=y
++
++#
++# Common Clock Framework
++#
++# CONFIG_COMMON_CLK_SI5351 is not set
++# CONFIG_COMMON_CLK_CDCE706 is not set
++# CONFIG_COMMON_CLK_CS2000_CP is not set
++# CONFIG_COMMON_CLK_NXP is not set
++# CONFIG_COMMON_CLK_PXA is not set
++# CONFIG_COMMON_CLK_PIC32 is not set
++# CONFIG_HWSPINLOCK is not set
++
++#
++# Clock Source drivers
++#
++CONFIG_CLKEVT_I8253=y
++CONFIG_I8253_LOCK=y
++CONFIG_CLKBLD_I8253=y
++# CONFIG_ATMEL_PIT is not set
++# CONFIG_SH_TIMER_CMT is not set
++# CONFIG_SH_TIMER_MTU2 is not set
++# CONFIG_SH_TIMER_TMU is not set
++# CONFIG_EM_TIMER_STI is not set
++CONFIG_MAILBOX=y
++CONFIG_PCC=y
++# CONFIG_ALTERA_MBOX is not set
++CONFIG_IOMMU_API=y
++CONFIG_IOMMU_SUPPORT=y
++
++#
++# Generic IOMMU Pagetable Support
++#
++CONFIG_IOMMU_IOVA=y
++CONFIG_AMD_IOMMU=y
++CONFIG_AMD_IOMMU_V2=m
++# CONFIG_INTEL_IOMMU is not set
++# CONFIG_IRQ_REMAP is not set
++
++#
++# Remoteproc drivers
++#
++# CONFIG_REMOTEPROC is not set
++
++#
++# Rpmsg drivers
++#
++# CONFIG_RPMSG_QCOM_GLINK_RPM is not set
++# CONFIG_RPMSG_VIRTIO is not set
++# CONFIG_SOUNDWIRE is not set
++
++#
++# SOC (System On Chip) specific Drivers
++#
++
++#
++# Amlogic SoC drivers
++#
++
++#
++# Broadcom SoC drivers
++#
++
++#
++# i.MX SoC drivers
++#
++
++#
++# Qualcomm SoC drivers
++#
++# CONFIG_SUNXI_SRAM is not set
++# CONFIG_SOC_TI is not set
++
++#
++# Xilinx SoC drivers
++#
++# CONFIG_XILINX_VCU is not set
++# CONFIG_PM_DEVFREQ is not set
++# CONFIG_EXTCON is not set
++# CONFIG_MEMORY is not set
++# CONFIG_IIO is not set
++# CONFIG_NTB is not set
++# CONFIG_VME_BUS is not set
++# CONFIG_PWM is not set
++
++#
++# IRQ chip support
++#
++CONFIG_ARM_GIC_MAX_NR=1
++# CONFIG_ARM_GIC_V3_ITS is not set
++# CONFIG_IPACK_BUS is not set
++# CONFIG_RESET_CONTROLLER is not set
++# CONFIG_FMC is not set
++
++#
++# PHY Subsystem
++#
++# CONFIG_GENERIC_PHY is not set
++# CONFIG_BCM_KONA_USB2_PHY is not set
++# CONFIG_PHY_PXA_28NM_HSIC is not set
++# CONFIG_PHY_PXA_28NM_USB2 is not set
++# CONFIG_POWERCAP is not set
++# CONFIG_MCB is not set
++
++#
++# Performance monitor support
++#
++CONFIG_RAS=y
++# CONFIG_RAS_CEC is not set
++# CONFIG_THUNDERBOLT is not set
++
++#
++# Android
++#
++# CONFIG_ANDROID is not set
++# CONFIG_LIBNVDIMM is not set
++CONFIG_DAX=y
++# CONFIG_DEV_DAX is not set
++CONFIG_NVMEM=y
++# CONFIG_STM is not set
++# CONFIG_INTEL_TH is not set
++# CONFIG_FPGA is not set
++# CONFIG_FSI is not set
++# CONFIG_UNISYS_VISORBUS is not set
++# CONFIG_SIOX is not set
++# CONFIG_SLIMBUS is not set
++
++#
++# Firmware Drivers
++#
++# CONFIG_EDD is not set
++CONFIG_FIRMWARE_MEMMAP=y
++# CONFIG_DELL_RBU is not set
++# CONFIG_DCDBAS is not set
++CONFIG_DMIID=y
++# CONFIG_DMI_SYSFS is not set
++CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y
++# CONFIG_ISCSI_IBFT_FIND is not set
++# CONFIG_FW_CFG_SYSFS is not set
++# CONFIG_GOOGLE_FIRMWARE is not set
++
++#
++# EFI (Extensible Firmware Interface) Support
++#
++CONFIG_EFI_VARS=y
++CONFIG_EFI_ESRT=y
++CONFIG_EFI_VARS_PSTORE=y
++# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set
++CONFIG_EFI_RUNTIME_MAP=y
++# CONFIG_EFI_FAKE_MEMMAP is not set
++CONFIG_EFI_RUNTIME_WRAPPERS=y
++# CONFIG_EFI_BOOTLOADER_CONTROL is not set
++# CONFIG_EFI_CAPSULE_LOADER is not set
++# CONFIG_EFI_TEST is not set
++# CONFIG_APPLE_PROPERTIES is not set
++# CONFIG_RESET_ATTACK_MITIGATION is not set
++CONFIG_UEFI_CPER=y
++# CONFIG_EFI_DEV_PATH_PARSER is not set
++
++#
++# Tegra firmware driver
++#
++
++#
++# File systems
++#
++CONFIG_DCACHE_WORD_ACCESS=y
++CONFIG_FS_IOMAP=y
++# CONFIG_EXT2_FS is not set
++# CONFIG_EXT3_FS is not set
++CONFIG_EXT4_FS=y
++CONFIG_EXT4_USE_FOR_EXT2=y
++CONFIG_EXT4_FS_POSIX_ACL=y
++CONFIG_EXT4_FS_SECURITY=y
++# CONFIG_EXT4_ENCRYPTION is not set
++# CONFIG_EXT4_DEBUG is not set
++CONFIG_JBD2=y
++# CONFIG_JBD2_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++CONFIG_XFS_FS=y
++CONFIG_XFS_QUOTA=y
++CONFIG_XFS_POSIX_ACL=y
++CONFIG_XFS_RT=y
++# CONFIG_XFS_ONLINE_SCRUB is not set
++CONFIG_XFS_WARN=y
++# CONFIG_XFS_DEBUG is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_BTRFS_FS is not set
++# CONFIG_NILFS2_FS is not set
++# CONFIG_F2FS_FS is not set
++# CONFIG_FS_DAX is not set
++CONFIG_FS_POSIX_ACL=y
++CONFIG_EXPORTFS=y
++# CONFIG_EXPORTFS_BLOCK_OPS is not set
++CONFIG_FILE_LOCKING=y
++CONFIG_MANDATORY_FILE_LOCKING=y
++# CONFIG_FS_ENCRYPTION is not set
++CONFIG_FSNOTIFY=y
++CONFIG_DNOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_FANOTIFY is not set
++CONFIG_QUOTA=y
++CONFIG_QUOTA_NETLINK_INTERFACE=y
++# CONFIG_PRINT_QUOTA_WARNING is not set
++# CONFIG_QUOTA_DEBUG is not set
++CONFIG_QUOTA_TREE=y
++# CONFIG_QFMT_V1 is not set
++CONFIG_QFMT_V2=y
++CONFIG_QUOTACTL=y
++CONFIG_QUOTACTL_COMPAT=y
++CONFIG_AUTOFS4_FS=y
++CONFIG_FUSE_FS=m
++CONFIG_CUSE=m
++CONFIG_OVERLAY_FS=y
++# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set
++CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y
++# CONFIG_OVERLAY_FS_INDEX is not set
++
++#
++# Caches
++#
++CONFIG_FSCACHE=y
++# CONFIG_FSCACHE_STATS is not set
++# CONFIG_FSCACHE_HISTOGRAM is not set
++# CONFIG_FSCACHE_DEBUG is not set
++# CONFIG_FSCACHE_OBJECT_LIST is not set
++# CONFIG_CACHEFILES is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=y
++CONFIG_MSDOS_FS=y
++CONFIG_VFAT_FS=y
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_FAT_DEFAULT_UTF8 is not set
++CONFIG_NTFS_FS=y
++# CONFIG_NTFS_DEBUG is not set
++CONFIG_NTFS_RW=y
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_VMCORE=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_PROC_PAGE_MONITOR=y
++CONFIG_PROC_CHILDREN=y
++CONFIG_KERNFS=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++CONFIG_TMPFS_POSIX_ACL=y
++CONFIG_TMPFS_XATTR=y
++CONFIG_HUGETLBFS=y
++CONFIG_HUGETLB_PAGE=y
++CONFIG_ARCH_HAS_GIGANTIC_PAGE=y
++CONFIG_CONFIGFS_FS=y
++# CONFIG_EFIVAR_FS is not set
++CONFIG_MISC_FILESYSTEMS=y
++# CONFIG_ORANGEFS_FS is not set
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_ECRYPT_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++# CONFIG_CRAMFS is not set
++# CONFIG_SQUASHFS is not set
++# CONFIG_VXFS_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_OMFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_QNX6FS_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_PSTORE=y
++CONFIG_PSTORE_ZLIB_COMPRESS=y
++# CONFIG_PSTORE_LZO_COMPRESS is not set
++# CONFIG_PSTORE_LZ4_COMPRESS is not set
++# CONFIG_PSTORE_CONSOLE is not set
++# CONFIG_PSTORE_PMSG is not set
++# CONFIG_PSTORE_FTRACE is not set
++# CONFIG_PSTORE_RAM is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V2=y
++CONFIG_NFS_V3=y
++CONFIG_NFS_V3_ACL=y
++CONFIG_NFS_V4=y
++# CONFIG_NFS_SWAP is not set
++# CONFIG_NFS_V4_1 is not set
++CONFIG_ROOT_NFS=y
++# CONFIG_NFS_FSCACHE is not set
++# CONFIG_NFS_USE_LEGACY_DNS is not set
++CONFIG_NFS_USE_KERNEL_DNS=y
++# CONFIG_NFSD is not set
++CONFIG_GRACE_PERIOD=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_ACL_SUPPORT=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++CONFIG_SUNRPC_GSS=y
++# CONFIG_SUNRPC_DEBUG is not set
++# CONFIG_CEPH_FS is not set
++CONFIG_CIFS=y
++# CONFIG_CIFS_STATS is not set
++# CONFIG_CIFS_WEAK_PW_HASH is not set
++# CONFIG_CIFS_UPCALL is not set
++# CONFIG_CIFS_XATTR is not set
++CONFIG_CIFS_DEBUG=y
++# CONFIG_CIFS_DEBUG2 is not set
++# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set
++# CONFIG_CIFS_DFS_UPCALL is not set
++# CONFIG_CIFS_SMB311 is not set
++CONFIG_CIFS_FSCACHE=y
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="utf8"
++CONFIG_NLS_CODEPAGE_437=y
++# CONFIG_NLS_CODEPAGE_737 is not set
++# CONFIG_NLS_CODEPAGE_775 is not set
++# CONFIG_NLS_CODEPAGE_850 is not set
++# CONFIG_NLS_CODEPAGE_852 is not set
++# CONFIG_NLS_CODEPAGE_855 is not set
++# CONFIG_NLS_CODEPAGE_857 is not set
++# CONFIG_NLS_CODEPAGE_860 is not set
++# CONFIG_NLS_CODEPAGE_861 is not set
++# CONFIG_NLS_CODEPAGE_862 is not set
++# CONFIG_NLS_CODEPAGE_863 is not set
++# CONFIG_NLS_CODEPAGE_864 is not set
++# CONFIG_NLS_CODEPAGE_865 is not set
++# CONFIG_NLS_CODEPAGE_866 is not set
++# CONFIG_NLS_CODEPAGE_869 is not set
++# CONFIG_NLS_CODEPAGE_936 is not set
++# CONFIG_NLS_CODEPAGE_950 is not set
++# CONFIG_NLS_CODEPAGE_932 is not set
++# CONFIG_NLS_CODEPAGE_949 is not set
++# CONFIG_NLS_CODEPAGE_874 is not set
++# CONFIG_NLS_ISO8859_8 is not set
++# CONFIG_NLS_CODEPAGE_1250 is not set
++# CONFIG_NLS_CODEPAGE_1251 is not set
++CONFIG_NLS_ASCII=y
++CONFIG_NLS_ISO8859_1=y
++# CONFIG_NLS_ISO8859_2 is not set
++# CONFIG_NLS_ISO8859_3 is not set
++# CONFIG_NLS_ISO8859_4 is not set
++# CONFIG_NLS_ISO8859_5 is not set
++# CONFIG_NLS_ISO8859_6 is not set
++# CONFIG_NLS_ISO8859_7 is not set
++# CONFIG_NLS_ISO8859_9 is not set
++# CONFIG_NLS_ISO8859_13 is not set
++# CONFIG_NLS_ISO8859_14 is not set
++# CONFIG_NLS_ISO8859_15 is not set
++# CONFIG_NLS_KOI8_R is not set
++# CONFIG_NLS_KOI8_U is not set
++# CONFIG_NLS_MAC_ROMAN is not set
++# CONFIG_NLS_MAC_CELTIC is not set
++# CONFIG_NLS_MAC_CENTEURO is not set
++# CONFIG_NLS_MAC_CROATIAN is not set
++# CONFIG_NLS_MAC_CYRILLIC is not set
++# CONFIG_NLS_MAC_GAELIC is not set
++# CONFIG_NLS_MAC_GREEK is not set
++# CONFIG_NLS_MAC_ICELAND is not set
++# CONFIG_NLS_MAC_INUIT is not set
++# CONFIG_NLS_MAC_ROMANIAN is not set
++# CONFIG_NLS_MAC_TURKISH is not set
++CONFIG_NLS_UTF8=y
++# CONFIG_DLM is not set
++
++#
++# Kernel hacking
++#
++CONFIG_TRACE_IRQFLAGS_SUPPORT=y
++
++#
++# printk and dmesg options
++#
++CONFIG_PRINTK_TIME=y
++CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7
++CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4
++# CONFIG_BOOT_PRINTK_DELAY is not set
++CONFIG_DYNAMIC_DEBUG=y
++
++#
++# Compile-time checks and compiler options
++#
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_ENABLE_WARN_DEPRECATED is not set
++CONFIG_ENABLE_MUST_CHECK=y
++CONFIG_FRAME_WARN=2048
++# CONFIG_STRIP_ASM_SYMS is not set
++# CONFIG_READABLE_ASM is not set
++# CONFIG_UNUSED_SYMBOLS is not set
++# CONFIG_PAGE_OWNER is not set
++CONFIG_DEBUG_FS=y
++# CONFIG_HEADERS_CHECK is not set
++# CONFIG_DEBUG_SECTION_MISMATCH is not set
++CONFIG_SECTION_MISMATCH_WARN_ONLY=y
++CONFIG_STACK_VALIDATION=y
++# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1
++CONFIG_MAGIC_SYSRQ_SERIAL=y
++CONFIG_DEBUG_KERNEL=y
++
++#
++# Memory Debugging
++#
++# CONFIG_PAGE_EXTENSION is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_PAGE_POISONING is not set
++# CONFIG_DEBUG_PAGE_REF is not set
++CONFIG_DEBUG_RODATA_TEST=y
++# CONFIG_DEBUG_OBJECTS is not set
++# CONFIG_SLUB_DEBUG_ON is not set
++# CONFIG_SLUB_STATS is not set
++CONFIG_HAVE_DEBUG_KMEMLEAK=y
++CONFIG_DEBUG_KMEMLEAK=y
++CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=400
++# CONFIG_DEBUG_KMEMLEAK_TEST is not set
++CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_VM is not set
++CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y
++# CONFIG_DEBUG_VIRTUAL is not set
++# CONFIG_DEBUG_MEMORY_INIT is not set
++# CONFIG_DEBUG_PER_CPU_MAPS is not set
++CONFIG_HAVE_DEBUG_STACKOVERFLOW=y
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++CONFIG_HAVE_ARCH_KASAN=y
++# CONFIG_KASAN is not set
++CONFIG_ARCH_HAS_KCOV=y
++# CONFIG_KCOV is not set
++CONFIG_DEBUG_SHIRQ=y
++
++#
++# Debug Lockups and Hangs
++#
++CONFIG_LOCKUP_DETECTOR=y
++CONFIG_SOFTLOCKUP_DETECTOR=y
++CONFIG_HARDLOCKUP_DETECTOR_PERF=y
++CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y
++CONFIG_HARDLOCKUP_DETECTOR=y
++# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set
++CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0
++# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
++CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
++CONFIG_DETECT_HUNG_TASK=y
++CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
++# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
++CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
++# CONFIG_WQ_WATCHDOG is not set
++# CONFIG_PANIC_ON_OOPS is not set
++CONFIG_PANIC_ON_OOPS_VALUE=0
++CONFIG_PANIC_TIMEOUT=0
++CONFIG_SCHED_DEBUG=y
++CONFIG_SCHED_INFO=y
++CONFIG_SCHEDSTATS=y
++CONFIG_SCHED_STACK_END_CHECK=y
++# CONFIG_DEBUG_TIMEKEEPING is not set
++
++#
++# Lock Debugging (spinlocks, mutexes, etc...)
++#
++CONFIG_DEBUG_RT_MUTEXES=y
++CONFIG_DEBUG_SPINLOCK=y
++CONFIG_DEBUG_MUTEXES=y
++# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set
++CONFIG_DEBUG_LOCK_ALLOC=y
++CONFIG_PROVE_LOCKING=y
++CONFIG_LOCKDEP=y
++# CONFIG_LOCK_STAT is not set
++# CONFIG_DEBUG_LOCKDEP is not set
++CONFIG_DEBUG_ATOMIC_SLEEP=y
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_LOCK_TORTURE_TEST is not set
++# CONFIG_WW_MUTEX_SELFTEST is not set
++CONFIG_TRACE_IRQFLAGS=y
++CONFIG_STACKTRACE=y
++# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set
++# CONFIG_DEBUG_KOBJECT is not set
++CONFIG_DEBUG_BUGVERBOSE=y
++# CONFIG_DEBUG_LIST is not set
++# CONFIG_DEBUG_PI_LIST is not set
++# CONFIG_DEBUG_SG is not set
++# CONFIG_DEBUG_NOTIFIERS is not set
++# CONFIG_DEBUG_CREDENTIALS is not set
++
++#
++# RCU Debugging
++#
++CONFIG_PROVE_RCU=y
++# CONFIG_TORTURE_TEST is not set
++# CONFIG_RCU_PERF_TEST is not set
++# CONFIG_RCU_TORTURE_TEST is not set
++CONFIG_RCU_CPU_STALL_TIMEOUT=60
++# CONFIG_RCU_TRACE is not set
++# CONFIG_RCU_EQS_DEBUG is not set
++# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set
++# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
++# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set
++# CONFIG_NOTIFIER_ERROR_INJECTION is not set
++# CONFIG_FAULT_INJECTION is not set
++CONFIG_FUNCTION_ERROR_INJECTION=y
++# CONFIG_LATENCYTOP is not set
++CONFIG_USER_STACKTRACE_SUPPORT=y
++CONFIG_NOP_TRACER=y
++CONFIG_HAVE_FUNCTION_TRACER=y
++CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
++CONFIG_HAVE_DYNAMIC_FTRACE=y
++CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
++CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
++CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
++CONFIG_HAVE_FENTRY=y
++CONFIG_HAVE_C_RECORDMCOUNT=y
++CONFIG_TRACER_MAX_TRACE=y
++CONFIG_TRACE_CLOCK=y
++CONFIG_RING_BUFFER=y
++CONFIG_EVENT_TRACING=y
++CONFIG_CONTEXT_SWITCH_TRACER=y
++CONFIG_RING_BUFFER_ALLOW_SWAP=y
++CONFIG_TRACING=y
++CONFIG_GENERIC_TRACER=y
++CONFIG_TRACING_SUPPORT=y
++CONFIG_FTRACE=y
++CONFIG_FUNCTION_TRACER=y
++CONFIG_FUNCTION_GRAPH_TRACER=y
++# CONFIG_IRQSOFF_TRACER is not set
++CONFIG_SCHED_TRACER=y
++CONFIG_HWLAT_TRACER=y
++CONFIG_FTRACE_SYSCALLS=y
++CONFIG_TRACER_SNAPSHOT=y
++# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set
++CONFIG_BRANCH_PROFILE_NONE=y
++# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
++# CONFIG_PROFILE_ALL_BRANCHES is not set
++CONFIG_STACK_TRACER=y
++CONFIG_BLK_DEV_IO_TRACE=y
++CONFIG_KPROBE_EVENTS=y
++CONFIG_UPROBE_EVENTS=y
++CONFIG_BPF_EVENTS=y
++CONFIG_PROBE_EVENTS=y
++CONFIG_DYNAMIC_FTRACE=y
++CONFIG_DYNAMIC_FTRACE_WITH_REGS=y
++CONFIG_FUNCTION_PROFILER=y
++# CONFIG_BPF_KPROBE_OVERRIDE is not set
++CONFIG_FTRACE_MCOUNT_RECORD=y
++# CONFIG_FTRACE_STARTUP_TEST is not set
++CONFIG_MMIOTRACE=y
++CONFIG_TRACING_MAP=y
++CONFIG_HIST_TRIGGERS=y
++# CONFIG_MMIOTRACE_TEST is not set
++# CONFIG_TRACEPOINT_BENCHMARK is not set
++# CONFIG_RING_BUFFER_BENCHMARK is not set
++# CONFIG_RING_BUFFER_STARTUP_TEST is not set
++# CONFIG_TRACE_EVAL_MAP_FILE is not set
++# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
++# CONFIG_DMA_API_DEBUG is not set
++# CONFIG_RUNTIME_TESTING_MENU is not set
++CONFIG_MEMTEST=y
++# CONFIG_BUG_ON_DATA_CORRUPTION is not set
++# CONFIG_SAMPLES is not set
++CONFIG_HAVE_ARCH_KGDB=y
++CONFIG_KGDB=y
++CONFIG_KGDB_SERIAL_CONSOLE=y
++# CONFIG_KGDB_TESTS is not set
++CONFIG_KGDB_LOW_LEVEL_TRAP=y
++CONFIG_KGDB_KDB=y
++CONFIG_KDB_DEFAULT_ENABLE=0x1
++CONFIG_KDB_KEYBOARD=y
++CONFIG_KDB_CONTINUE_CATASTROPHIC=0
++CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y
++# CONFIG_ARCH_WANTS_UBSAN_NO_NULL is not set
++# CONFIG_UBSAN is not set
++CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y
++# CONFIG_STRICT_DEVMEM is not set
++# CONFIG_X86_VERBOSE_BOOTUP is not set
++CONFIG_EARLY_PRINTK=y
++# CONFIG_EARLY_PRINTK_DBGP is not set
++# CONFIG_EARLY_PRINTK_EFI is not set
++# CONFIG_EARLY_PRINTK_USB_XDBC is not set
++# CONFIG_X86_PTDUMP_CORE is not set
++# CONFIG_X86_PTDUMP is not set
++# CONFIG_EFI_PGT_DUMP is not set
++# CONFIG_DEBUG_WX is not set
++CONFIG_DOUBLEFAULT=y
++# CONFIG_DEBUG_TLBFLUSH is not set
++# CONFIG_IOMMU_DEBUG is not set
++CONFIG_HAVE_MMIOTRACE_SUPPORT=y
++# CONFIG_X86_DECODER_SELFTEST is not set
++CONFIG_IO_DELAY_TYPE_0X80=0
++CONFIG_IO_DELAY_TYPE_0XED=1
++CONFIG_IO_DELAY_TYPE_UDELAY=2
++CONFIG_IO_DELAY_TYPE_NONE=3
++# CONFIG_IO_DELAY_0X80 is not set
++CONFIG_IO_DELAY_0XED=y
++# CONFIG_IO_DELAY_UDELAY is not set
++# CONFIG_IO_DELAY_NONE is not set
++CONFIG_DEFAULT_IO_DELAY_TYPE=1
++# CONFIG_DEBUG_BOOT_PARAMS is not set
++# CONFIG_CPA_DEBUG is not set
++CONFIG_OPTIMIZE_INLINING=y
++# CONFIG_DEBUG_ENTRY is not set
++# CONFIG_DEBUG_NMI_SELFTEST is not set
++CONFIG_X86_DEBUG_FPU=y
++# CONFIG_PUNIT_ATOM_DEBUG is not set
++CONFIG_UNWINDER_ORC=y
++# CONFIG_UNWINDER_FRAME_POINTER is not set
++# CONFIG_UNWINDER_GUESS is not set
++
++#
++# Security options
++#
++CONFIG_KEYS=y
++CONFIG_KEYS_COMPAT=y
++# CONFIG_PERSISTENT_KEYRINGS is not set
++# CONFIG_BIG_KEYS is not set
++# CONFIG_ENCRYPTED_KEYS is not set
++# CONFIG_KEY_DH_OPERATIONS is not set
++# CONFIG_SECURITY_DMESG_RESTRICT is not set
++CONFIG_SECURITY=y
++CONFIG_SECURITY_WRITABLE_HOOKS=y
++# CONFIG_SECURITYFS is not set
++CONFIG_SECURITY_NETWORK=y
++CONFIG_PAGE_TABLE_ISOLATION=y
++# CONFIG_SECURITY_NETWORK_XFRM is not set
++# CONFIG_SECURITY_PATH is not set
++CONFIG_LSM_MMAP_MIN_ADDR=65536
++CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y
++# CONFIG_HARDENED_USERCOPY is not set
++# CONFIG_FORTIFY_SOURCE is not set
++# CONFIG_STATIC_USERMODEHELPER is not set
++CONFIG_SECURITY_SELINUX=y
++CONFIG_SECURITY_SELINUX_BOOTPARAM=y
++CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1
++CONFIG_SECURITY_SELINUX_DISABLE=y
++CONFIG_SECURITY_SELINUX_DEVELOP=y
++CONFIG_SECURITY_SELINUX_AVC_STATS=y
++CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
++# CONFIG_SECURITY_SMACK is not set
++# CONFIG_SECURITY_TOMOYO is not set
++# CONFIG_SECURITY_APPARMOR is not set
++# CONFIG_SECURITY_LOADPIN is not set
++# CONFIG_SECURITY_YAMA is not set
++CONFIG_INTEGRITY=y
++# CONFIG_INTEGRITY_SIGNATURE is not set
++CONFIG_INTEGRITY_AUDIT=y
++# CONFIG_IMA is not set
++# CONFIG_EVM is not set
++CONFIG_DEFAULT_SECURITY_SELINUX=y
++# CONFIG_DEFAULT_SECURITY_DAC is not set
++CONFIG_DEFAULT_SECURITY="selinux"
++CONFIG_CRYPTO=y
++
++#
++# Crypto core or helper
++#
++CONFIG_CRYPTO_ALGAPI=y
++CONFIG_CRYPTO_ALGAPI2=y
++CONFIG_CRYPTO_AEAD=y
++CONFIG_CRYPTO_AEAD2=y
++CONFIG_CRYPTO_BLKCIPHER=y
++CONFIG_CRYPTO_BLKCIPHER2=y
++CONFIG_CRYPTO_HASH=y
++CONFIG_CRYPTO_HASH2=y
++CONFIG_CRYPTO_RNG=y
++CONFIG_CRYPTO_RNG2=y
++CONFIG_CRYPTO_RNG_DEFAULT=y
++CONFIG_CRYPTO_AKCIPHER2=y
++CONFIG_CRYPTO_AKCIPHER=y
++CONFIG_CRYPTO_KPP2=y
++CONFIG_CRYPTO_ACOMP2=y
++CONFIG_CRYPTO_RSA=y
++# CONFIG_CRYPTO_DH is not set
++# CONFIG_CRYPTO_ECDH is not set
++CONFIG_CRYPTO_MANAGER=y
++CONFIG_CRYPTO_MANAGER2=y
++# CONFIG_CRYPTO_USER is not set
++CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
++# CONFIG_CRYPTO_GF128MUL is not set
++CONFIG_CRYPTO_NULL=y
++CONFIG_CRYPTO_NULL2=y
++# CONFIG_CRYPTO_PCRYPT is not set
++CONFIG_CRYPTO_WORKQUEUE=y
++# CONFIG_CRYPTO_CRYPTD is not set
++# CONFIG_CRYPTO_MCRYPTD is not set
++CONFIG_CRYPTO_AUTHENC=y
++# CONFIG_CRYPTO_TEST is not set
++
++#
++# Authenticated Encryption with Associated Data
++#
++CONFIG_CRYPTO_CCM=y
++# CONFIG_CRYPTO_GCM is not set
++# CONFIG_CRYPTO_CHACHA20POLY1305 is not set
++CONFIG_CRYPTO_SEQIV=y
++CONFIG_CRYPTO_ECHAINIV=y
++
++#
++# Block modes
++#
++CONFIG_CRYPTO_CBC=y
++CONFIG_CRYPTO_CTR=y
++# CONFIG_CRYPTO_CTS is not set
++CONFIG_CRYPTO_ECB=y
++# CONFIG_CRYPTO_LRW is not set
++# CONFIG_CRYPTO_PCBC is not set
++# CONFIG_CRYPTO_XTS is not set
++# CONFIG_CRYPTO_KEYWRAP is not set
++
++#
++# Hash modes
++#
++CONFIG_CRYPTO_CMAC=y
++CONFIG_CRYPTO_HMAC=y
++# CONFIG_CRYPTO_XCBC is not set
++# CONFIG_CRYPTO_VMAC is not set
++
++#
++# Digest
++#
++CONFIG_CRYPTO_CRC32C=y
++# CONFIG_CRYPTO_CRC32C_INTEL is not set
++# CONFIG_CRYPTO_CRC32 is not set
++# CONFIG_CRYPTO_CRC32_PCLMUL is not set
++CONFIG_CRYPTO_CRCT10DIF=y
++# CONFIG_CRYPTO_CRCT10DIF_PCLMUL is not set
++# CONFIG_CRYPTO_GHASH is not set
++# CONFIG_CRYPTO_POLY1305 is not set
++# CONFIG_CRYPTO_POLY1305_X86_64 is not set
++CONFIG_CRYPTO_MD4=y
++CONFIG_CRYPTO_MD5=y
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_RMD128 is not set
++# CONFIG_CRYPTO_RMD160 is not set
++# CONFIG_CRYPTO_RMD256 is not set
++# CONFIG_CRYPTO_RMD320 is not set
++CONFIG_CRYPTO_SHA1=y
++# CONFIG_CRYPTO_SHA1_SSSE3 is not set
++# CONFIG_CRYPTO_SHA256_SSSE3 is not set
++# CONFIG_CRYPTO_SHA512_SSSE3 is not set
++# CONFIG_CRYPTO_SHA1_MB is not set
++# CONFIG_CRYPTO_SHA256_MB is not set
++# CONFIG_CRYPTO_SHA512_MB is not set
++CONFIG_CRYPTO_SHA256=y
++CONFIG_CRYPTO_SHA512=y
++# CONFIG_CRYPTO_SHA3 is not set
++# CONFIG_CRYPTO_SM3 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL is not set
++
++#
++# Ciphers
++#
++CONFIG_CRYPTO_AES=y
++# CONFIG_CRYPTO_AES_TI is not set
++# CONFIG_CRYPTO_AES_X86_64 is not set
++# CONFIG_CRYPTO_AES_NI_INTEL is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++CONFIG_CRYPTO_ARC4=y
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_BLOWFISH_X86_64 is not set
++# CONFIG_CRYPTO_CAMELLIA is not set
++# CONFIG_CRYPTO_CAMELLIA_X86_64 is not set
++# CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64 is not set
++# CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST5_AVX_X86_64 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_CAST6_AVX_X86_64 is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_DES3_EDE_X86_64 is not set
++# CONFIG_CRYPTO_FCRYPT is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_SALSA20 is not set
++# CONFIG_CRYPTO_SALSA20_X86_64 is not set
++# CONFIG_CRYPTO_CHACHA20 is not set
++# CONFIG_CRYPTO_CHACHA20_X86_64 is not set
++# CONFIG_CRYPTO_SEED is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_SERPENT_SSE2_X86_64 is not set
++# CONFIG_CRYPTO_SERPENT_AVX_X86_64 is not set
++# CONFIG_CRYPTO_SERPENT_AVX2_X86_64 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_TWOFISH_X86_64 is not set
++# CONFIG_CRYPTO_TWOFISH_X86_64_3WAY is not set
++# CONFIG_CRYPTO_TWOFISH_AVX_X86_64 is not set
++
++#
++# Compression
++#
++# CONFIG_CRYPTO_DEFLATE is not set
++CONFIG_CRYPTO_LZO=y
++# CONFIG_CRYPTO_842 is not set
++# CONFIG_CRYPTO_LZ4 is not set
++# CONFIG_CRYPTO_LZ4HC is not set
++
++#
++# Random Number Generation
++#
++# CONFIG_CRYPTO_ANSI_CPRNG is not set
++CONFIG_CRYPTO_DRBG_MENU=y
++CONFIG_CRYPTO_DRBG_HMAC=y
++# CONFIG_CRYPTO_DRBG_HASH is not set
++# CONFIG_CRYPTO_DRBG_CTR is not set
++CONFIG_CRYPTO_DRBG=y
++CONFIG_CRYPTO_JITTERENTROPY=y
++# CONFIG_CRYPTO_USER_API_HASH is not set
++# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
++# CONFIG_CRYPTO_USER_API_RNG is not set
++# CONFIG_CRYPTO_USER_API_AEAD is not set
++CONFIG_CRYPTO_HASH_INFO=y
++CONFIG_CRYPTO_HW=y
++# CONFIG_CRYPTO_DEV_PADLOCK is not set
++# CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC is not set
++# CONFIG_CRYPTO_DEV_CCP is not set
++# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set
++# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set
++# CONFIG_CRYPTO_DEV_QAT_C62X is not set
++# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set
++# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set
++# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set
++# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set
++CONFIG_ASYMMETRIC_KEY_TYPE=y
++CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
++CONFIG_X509_CERTIFICATE_PARSER=y
++CONFIG_PKCS7_MESSAGE_PARSER=y
++# CONFIG_PKCS7_TEST_KEY is not set
++# CONFIG_SIGNED_PE_FILE_VERIFICATION is not set
++
++#
++# Certificates for signature checking
++#
++CONFIG_MODULE_SIG_KEY="certs/signing_key.pem"
++CONFIG_SYSTEM_TRUSTED_KEYRING=y
++CONFIG_SYSTEM_TRUSTED_KEYS=""
++# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set
++# CONFIG_SECONDARY_TRUSTED_KEYRING is not set
++# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set
++CONFIG_HAVE_KVM=y
++CONFIG_VIRTUALIZATION=y
++# CONFIG_KVM is not set
++# CONFIG_VHOST_NET is not set
++# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set
++CONFIG_BINARY_PRINTF=y
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++# CONFIG_HAVE_ARCH_BITREVERSE is not set
++CONFIG_RATIONAL=y
++CONFIG_GENERIC_STRNCPY_FROM_USER=y
++CONFIG_GENERIC_STRNLEN_USER=y
++CONFIG_GENERIC_NET_UTILS=y
++CONFIG_GENERIC_FIND_FIRST_BIT=y
++CONFIG_GENERIC_PCI_IOMAP=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y
++CONFIG_ARCH_HAS_FAST_MULTIPLIER=y
++CONFIG_CRC_CCITT=y
++CONFIG_CRC16=y
++CONFIG_CRC_T10DIF=y
++# CONFIG_CRC_ITU_T is not set
++CONFIG_CRC32=y
++# CONFIG_CRC32_SELFTEST is not set
++CONFIG_CRC32_SLICEBY8=y
++# CONFIG_CRC32_SLICEBY4 is not set
++# CONFIG_CRC32_SARWATE is not set
++# CONFIG_CRC32_BIT is not set
++# CONFIG_CRC4 is not set
++# CONFIG_CRC7 is not set
++CONFIG_LIBCRC32C=y
++# CONFIG_CRC8 is not set
++# CONFIG_AUDIT_ARCH_COMPAT_GENERIC is not set
++# CONFIG_RANDOM32_SELFTEST is not set
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=y
++CONFIG_LZO_COMPRESS=y
++CONFIG_LZO_DECOMPRESS=y
++CONFIG_LZ4_DECOMPRESS=y
++CONFIG_XZ_DEC=y
++CONFIG_XZ_DEC_X86=y
++CONFIG_XZ_DEC_POWERPC=y
++CONFIG_XZ_DEC_IA64=y
++CONFIG_XZ_DEC_ARM=y
++CONFIG_XZ_DEC_ARMTHUMB=y
++CONFIG_XZ_DEC_SPARC=y
++CONFIG_XZ_DEC_BCJ=y
++# CONFIG_XZ_DEC_TEST is not set
++CONFIG_DECOMPRESS_GZIP=y
++CONFIG_DECOMPRESS_BZIP2=y
++CONFIG_DECOMPRESS_LZMA=y
++CONFIG_DECOMPRESS_XZ=y
++CONFIG_DECOMPRESS_LZO=y
++CONFIG_DECOMPRESS_LZ4=y
++CONFIG_GENERIC_ALLOCATOR=y
++CONFIG_INTERVAL_TREE=y
++CONFIG_RADIX_TREE_MULTIORDER=y
++CONFIG_ASSOCIATIVE_ARRAY=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT_MAP=y
++CONFIG_HAS_DMA=y
++CONFIG_SGL_ALLOC=y
++# CONFIG_DMA_DIRECT_OPS is not set
++# CONFIG_DMA_VIRT_OPS is not set
++CONFIG_CHECK_SIGNATURE=y
++CONFIG_CPU_RMAP=y
++CONFIG_DQL=y
++CONFIG_GLOB=y
++# CONFIG_GLOB_SELFTEST is not set
++CONFIG_NLATTR=y
++CONFIG_CLZ_TAB=y
++# CONFIG_CORDIC is not set
++# CONFIG_DDR is not set
++# CONFIG_IRQ_POLL is not set
++CONFIG_MPILIB=y
++CONFIG_OID_REGISTRY=y
++CONFIG_UCS2_STRING=y
++CONFIG_FONT_SUPPORT=y
++# CONFIG_FONTS is not set
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++# CONFIG_SG_SPLIT is not set
++CONFIG_SG_POOL=y
++CONFIG_ARCH_HAS_SG_CHAIN=y
++CONFIG_ARCH_HAS_PMEM_API=y
++CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y
++CONFIG_SBITMAP=y
++# CONFIG_STRING_SELFTEST is not set
+diff --git a/arch/x86/configs/rock-rel_defconfig b/arch/x86/configs/rock-rel_defconfig
+new file mode 100644
+index 0000000..9f2dcd9
+--- /dev/null
++++ b/arch/x86/configs/rock-rel_defconfig
+@@ -0,0 +1,9244 @@
++#
++# Automatically generated file; DO NOT EDIT.
++# Linux/x86 4.16.0-rc1 Kernel Configuration
++#
++CONFIG_64BIT=y
++CONFIG_X86_64=y
++CONFIG_X86=y
++CONFIG_INSTRUCTION_DECODER=y
++CONFIG_OUTPUT_FORMAT="elf64-x86-64"
++CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig"
++CONFIG_LOCKDEP_SUPPORT=y
++CONFIG_STACKTRACE_SUPPORT=y
++CONFIG_MMU=y
++CONFIG_ARCH_MMAP_RND_BITS_MIN=28
++CONFIG_ARCH_MMAP_RND_BITS_MAX=32
++CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8
++CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16
++CONFIG_NEED_DMA_MAP_STATE=y
++CONFIG_NEED_SG_DMA_LENGTH=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_GENERIC_BUG=y
++CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_ARCH_HAS_CPU_RELAX=y
++CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
++CONFIG_HAVE_SETUP_PER_CPU_AREA=y
++CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
++CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
++CONFIG_ARCH_HIBERNATION_POSSIBLE=y
++CONFIG_ARCH_SUSPEND_POSSIBLE=y
++CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y
++CONFIG_ARCH_WANT_GENERAL_HUGETLB=y
++CONFIG_ZONE_DMA32=y
++CONFIG_AUDIT_ARCH=y
++CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
++CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
++CONFIG_HAVE_INTEL_TXT=y
++CONFIG_X86_64_SMP=y
++CONFIG_ARCH_SUPPORTS_UPROBES=y
++CONFIG_FIX_EARLYCON_MEM=y
++CONFIG_PGTABLE_LEVELS=4
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++CONFIG_IRQ_WORK=y
++CONFIG_BUILDTIME_EXTABLE_SORT=y
++CONFIG_THREAD_INFO_IN_TASK=y
++
++#
++# General setup
++#
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_CROSS_COMPILE=""
++# CONFIG_COMPILE_TEST is not set
++CONFIG_LOCALVERSION="-kfd"
++# CONFIG_LOCALVERSION_AUTO is not set
++CONFIG_HAVE_KERNEL_GZIP=y
++CONFIG_HAVE_KERNEL_BZIP2=y
++CONFIG_HAVE_KERNEL_LZMA=y
++CONFIG_HAVE_KERNEL_XZ=y
++CONFIG_HAVE_KERNEL_LZO=y
++CONFIG_HAVE_KERNEL_LZ4=y
++CONFIG_KERNEL_GZIP=y
++# CONFIG_KERNEL_BZIP2 is not set
++# CONFIG_KERNEL_LZMA is not set
++# CONFIG_KERNEL_XZ is not set
++# CONFIG_KERNEL_LZO is not set
++# CONFIG_KERNEL_LZ4 is not set
++CONFIG_DEFAULT_HOSTNAME="(none)"
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_POSIX_MQUEUE_SYSCTL=y
++CONFIG_CROSS_MEMORY_ATTACH=y
++CONFIG_USELIB=y
++CONFIG_AUDIT=y
++CONFIG_HAVE_ARCH_AUDITSYSCALL=y
++CONFIG_AUDITSYSCALL=y
++CONFIG_AUDIT_WATCH=y
++CONFIG_AUDIT_TREE=y
++
++#
++# IRQ subsystem
++#
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_GENERIC_IRQ_SHOW=y
++CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
++CONFIG_GENERIC_PENDING_IRQ=y
++CONFIG_GENERIC_IRQ_MIGRATION=y
++CONFIG_GENERIC_IRQ_CHIP=y
++CONFIG_IRQ_DOMAIN=y
++CONFIG_IRQ_DOMAIN_HIERARCHY=y
++CONFIG_GENERIC_MSI_IRQ=y
++CONFIG_GENERIC_MSI_IRQ_DOMAIN=y
++CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y
++CONFIG_GENERIC_IRQ_RESERVATION_MODE=y
++CONFIG_IRQ_FORCED_THREADING=y
++CONFIG_SPARSE_IRQ=y
++# CONFIG_GENERIC_IRQ_DEBUGFS is not set
++CONFIG_CLOCKSOURCE_WATCHDOG=y
++CONFIG_ARCH_CLOCKSOURCE_DATA=y
++CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
++CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y
++CONFIG_GENERIC_CMOS_UPDATE=y
++
++#
++# Timers subsystem
++#
++CONFIG_TICK_ONESHOT=y
++CONFIG_NO_HZ_COMMON=y
++# CONFIG_HZ_PERIODIC is not set
++CONFIG_NO_HZ_IDLE=y
++# CONFIG_NO_HZ_FULL is not set
++CONFIG_NO_HZ=y
++CONFIG_HIGH_RES_TIMERS=y
++
++#
++# CPU/Task time and stats accounting
++#
++CONFIG_TICK_CPU_ACCOUNTING=y
++# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set
++# CONFIG_IRQ_TIME_ACCOUNTING is not set
++CONFIG_BSD_PROCESS_ACCT=y
++CONFIG_BSD_PROCESS_ACCT_V3=y
++CONFIG_TASKSTATS=y
++CONFIG_TASK_DELAY_ACCT=y
++CONFIG_TASK_XACCT=y
++CONFIG_TASK_IO_ACCOUNTING=y
++# CONFIG_CPU_ISOLATION is not set
++
++#
++# RCU Subsystem
++#
++CONFIG_TREE_RCU=y
++# CONFIG_RCU_EXPERT is not set
++CONFIG_SRCU=y
++CONFIG_TREE_SRCU=y
++# CONFIG_TASKS_RCU is not set
++CONFIG_RCU_STALL_COMMON=y
++CONFIG_RCU_NEED_SEGCBLIST=y
++CONFIG_BUILD_BIN2C=y
++# CONFIG_IKCONFIG is not set
++CONFIG_LOG_BUF_SHIFT=18
++CONFIG_LOG_CPU_MAX_BUF_SHIFT=12
++CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13
++CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
++CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
++CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y
++CONFIG_ARCH_SUPPORTS_INT128=y
++CONFIG_NUMA_BALANCING=y
++CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y
++CONFIG_CGROUPS=y
++CONFIG_PAGE_COUNTER=y
++CONFIG_MEMCG=y
++CONFIG_MEMCG_SWAP=y
++# CONFIG_MEMCG_SWAP_ENABLED is not set
++CONFIG_BLK_CGROUP=y
++# CONFIG_DEBUG_BLK_CGROUP is not set
++CONFIG_CGROUP_WRITEBACK=y
++CONFIG_CGROUP_SCHED=y
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_CFS_BANDWIDTH=y
++# CONFIG_RT_GROUP_SCHED is not set
++CONFIG_CGROUP_PIDS=y
++# CONFIG_CGROUP_RDMA is not set
++CONFIG_CGROUP_FREEZER=y
++CONFIG_CGROUP_HUGETLB=y
++CONFIG_CPUSETS=y
++CONFIG_PROC_PID_CPUSET=y
++CONFIG_CGROUP_DEVICE=y
++CONFIG_CGROUP_CPUACCT=y
++CONFIG_CGROUP_PERF=y
++# CONFIG_CGROUP_BPF is not set
++# CONFIG_CGROUP_DEBUG is not set
++CONFIG_SOCK_CGROUP_DATA=y
++CONFIG_NAMESPACES=y
++CONFIG_UTS_NS=y
++CONFIG_IPC_NS=y
++CONFIG_USER_NS=y
++CONFIG_PID_NS=y
++CONFIG_NET_NS=y
++CONFIG_SCHED_AUTOGROUP=y
++# CONFIG_SYSFS_DEPRECATED is not set
++CONFIG_RELAY=y
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++CONFIG_RD_GZIP=y
++CONFIG_RD_BZIP2=y
++CONFIG_RD_LZMA=y
++CONFIG_RD_XZ=y
++CONFIG_RD_LZO=y
++CONFIG_RD_LZ4=y
++CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_ANON_INODES=y
++CONFIG_HAVE_UID16=y
++CONFIG_SYSCTL_EXCEPTION_TRACE=y
++CONFIG_HAVE_PCSPKR_PLATFORM=y
++CONFIG_BPF=y
++CONFIG_EXPERT=y
++CONFIG_UID16=y
++CONFIG_MULTIUSER=y
++CONFIG_SGETMASK_SYSCALL=y
++CONFIG_SYSFS_SYSCALL=y
++CONFIG_SYSCTL_SYSCALL=y
++CONFIG_FHANDLE=y
++CONFIG_POSIX_TIMERS=y
++CONFIG_PRINTK=y
++CONFIG_PRINTK_NMI=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_PCSPKR_PLATFORM=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_FUTEX_PI=y
++CONFIG_EPOLL=y
++CONFIG_SIGNALFD=y
++CONFIG_TIMERFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_AIO=y
++CONFIG_ADVISE_SYSCALLS=y
++CONFIG_MEMBARRIER=y
++CONFIG_CHECKPOINT_RESTORE=y
++CONFIG_KALLSYMS=y
++CONFIG_KALLSYMS_ALL=y
++CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y
++CONFIG_KALLSYMS_BASE_RELATIVE=y
++CONFIG_BPF_SYSCALL=y
++# CONFIG_BPF_JIT_ALWAYS_ON is not set
++CONFIG_USERFAULTFD=y
++CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y
++# CONFIG_EMBEDDED is not set
++CONFIG_HAVE_PERF_EVENTS=y
++# CONFIG_PC104 is not set
++
++#
++# Kernel Performance Events And Counters
++#
++CONFIG_PERF_EVENTS=y
++# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_SLUB_DEBUG=y
++# CONFIG_SLUB_MEMCG_SYSFS_ON is not set
++# CONFIG_COMPAT_BRK is not set
++# CONFIG_SLAB is not set
++CONFIG_SLUB=y
++# CONFIG_SLOB is not set
++CONFIG_SLAB_MERGE_DEFAULT=y
++# CONFIG_SLAB_FREELIST_RANDOM is not set
++# CONFIG_SLAB_FREELIST_HARDENED is not set
++CONFIG_SLUB_CPU_PARTIAL=y
++CONFIG_SYSTEM_DATA_VERIFICATION=y
++CONFIG_PROFILING=y
++CONFIG_TRACEPOINTS=y
++CONFIG_CRASH_CORE=y
++CONFIG_KEXEC_CORE=y
++CONFIG_OPROFILE=m
++# CONFIG_OPROFILE_EVENT_MULTIPLEX is not set
++CONFIG_HAVE_OPROFILE=y
++CONFIG_OPROFILE_NMI_TIMER=y
++CONFIG_KPROBES=y
++CONFIG_JUMP_LABEL=y
++# CONFIG_STATIC_KEYS_SELFTEST is not set
++CONFIG_OPTPROBES=y
++CONFIG_KPROBES_ON_FTRACE=y
++# CONFIG_UPROBES is not set
++# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
++CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
++CONFIG_ARCH_USE_BUILTIN_BSWAP=y
++CONFIG_KRETPROBES=y
++CONFIG_USER_RETURN_NOTIFIER=y
++CONFIG_HAVE_IOREMAP_PROT=y
++CONFIG_HAVE_KPROBES=y
++CONFIG_HAVE_KRETPROBES=y
++CONFIG_HAVE_OPTPROBES=y
++CONFIG_HAVE_KPROBES_ON_FTRACE=y
++CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y
++CONFIG_HAVE_NMI=y
++CONFIG_HAVE_ARCH_TRACEHOOK=y
++CONFIG_HAVE_DMA_CONTIGUOUS=y
++CONFIG_GENERIC_SMP_IDLE_THREAD=y
++CONFIG_ARCH_HAS_FORTIFY_SOURCE=y
++CONFIG_ARCH_HAS_SET_MEMORY=y
++CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y
++CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y
++CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
++CONFIG_HAVE_CLK=y
++CONFIG_HAVE_DMA_API_DEBUG=y
++CONFIG_HAVE_HW_BREAKPOINT=y
++CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y
++CONFIG_HAVE_USER_RETURN_NOTIFIER=y
++CONFIG_HAVE_PERF_EVENTS_NMI=y
++CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y
++CONFIG_HAVE_PERF_REGS=y
++CONFIG_HAVE_PERF_USER_STACK_DUMP=y
++CONFIG_HAVE_ARCH_JUMP_LABEL=y
++CONFIG_HAVE_RCU_TABLE_FREE=y
++CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
++CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
++CONFIG_HAVE_CMPXCHG_LOCAL=y
++CONFIG_HAVE_CMPXCHG_DOUBLE=y
++CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y
++CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y
++CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
++CONFIG_SECCOMP_FILTER=y
++CONFIG_HAVE_GCC_PLUGINS=y
++# CONFIG_GCC_PLUGINS is not set
++CONFIG_HAVE_CC_STACKPROTECTOR=y
++# CONFIG_CC_STACKPROTECTOR_NONE is not set
++CONFIG_CC_STACKPROTECTOR_REGULAR=y
++# CONFIG_CC_STACKPROTECTOR_STRONG is not set
++# CONFIG_CC_STACKPROTECTOR_AUTO is not set
++CONFIG_THIN_ARCHIVES=y
++CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y
++CONFIG_HAVE_CONTEXT_TRACKING=y
++CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
++CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
++CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
++CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y
++CONFIG_HAVE_ARCH_HUGE_VMAP=y
++CONFIG_HAVE_ARCH_SOFT_DIRTY=y
++CONFIG_HAVE_MOD_ARCH_SPECIFIC=y
++CONFIG_MODULES_USE_ELF_RELA=y
++CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y
++CONFIG_ARCH_HAS_ELF_RANDOMIZE=y
++CONFIG_HAVE_ARCH_MMAP_RND_BITS=y
++CONFIG_HAVE_EXIT_THREAD=y
++CONFIG_ARCH_MMAP_RND_BITS=28
++CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y
++CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8
++CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y
++CONFIG_HAVE_COPY_THREAD_TLS=y
++CONFIG_HAVE_STACK_VALIDATION=y
++# CONFIG_HAVE_ARCH_HASH is not set
++# CONFIG_ISA_BUS_API is not set
++CONFIG_OLD_SIGSUSPEND3=y
++CONFIG_COMPAT_OLD_SIGACTION=y
++# CONFIG_CPU_NO_EFFICIENT_FFS is not set
++CONFIG_HAVE_ARCH_VMAP_STACK=y
++CONFIG_VMAP_STACK=y
++# CONFIG_ARCH_OPTIONAL_KERNEL_RWX is not set
++# CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT is not set
++CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y
++CONFIG_STRICT_KERNEL_RWX=y
++CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
++CONFIG_STRICT_MODULE_RWX=y
++CONFIG_ARCH_HAS_PHYS_TO_DMA=y
++CONFIG_ARCH_HAS_REFCOUNT=y
++# CONFIG_REFCOUNT_FULL is not set
++
++#
++# GCOV-based kernel profiling
++#
++# CONFIG_GCOV_KERNEL is not set
++CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y
++# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
++CONFIG_RT_MUTEXES=y
++CONFIG_BASE_SMALL=0
++CONFIG_MODULES=y
++# CONFIG_MODULE_FORCE_LOAD is not set
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++CONFIG_MODVERSIONS=y
++CONFIG_MODULE_SRCVERSION_ALL=y
++CONFIG_MODULE_SIG=y
++# CONFIG_MODULE_SIG_FORCE is not set
++CONFIG_MODULE_SIG_ALL=y
++# CONFIG_MODULE_SIG_SHA1 is not set
++# CONFIG_MODULE_SIG_SHA224 is not set
++# CONFIG_MODULE_SIG_SHA256 is not set
++# CONFIG_MODULE_SIG_SHA384 is not set
++CONFIG_MODULE_SIG_SHA512=y
++CONFIG_MODULE_SIG_HASH="sha512"
++# CONFIG_MODULE_COMPRESS is not set
++CONFIG_MODULES_TREE_LOOKUP=y
++CONFIG_BLOCK=y
++CONFIG_BLK_SCSI_REQUEST=y
++CONFIG_BLK_DEV_BSG=y
++CONFIG_BLK_DEV_BSGLIB=y
++CONFIG_BLK_DEV_INTEGRITY=y
++# CONFIG_BLK_DEV_ZONED is not set
++CONFIG_BLK_DEV_THROTTLING=y
++# CONFIG_BLK_DEV_THROTTLING_LOW is not set
++CONFIG_BLK_CMDLINE_PARSER=y
++# CONFIG_BLK_WBT is not set
++CONFIG_BLK_DEBUG_FS=y
++# CONFIG_BLK_SED_OPAL is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++CONFIG_AIX_PARTITION=y
++CONFIG_OSF_PARTITION=y
++CONFIG_AMIGA_PARTITION=y
++CONFIG_ATARI_PARTITION=y
++CONFIG_MAC_PARTITION=y
++CONFIG_MSDOS_PARTITION=y
++CONFIG_BSD_DISKLABEL=y
++CONFIG_MINIX_SUBPARTITION=y
++CONFIG_SOLARIS_X86_PARTITION=y
++CONFIG_UNIXWARE_DISKLABEL=y
++CONFIG_LDM_PARTITION=y
++# CONFIG_LDM_DEBUG is not set
++CONFIG_SGI_PARTITION=y
++CONFIG_ULTRIX_PARTITION=y
++CONFIG_SUN_PARTITION=y
++CONFIG_KARMA_PARTITION=y
++CONFIG_EFI_PARTITION=y
++CONFIG_SYSV68_PARTITION=y
++CONFIG_CMDLINE_PARTITION=y
++CONFIG_BLOCK_COMPAT=y
++CONFIG_BLK_MQ_PCI=y
++CONFIG_BLK_MQ_VIRTIO=y
++CONFIG_BLK_MQ_RDMA=y
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_CFQ_GROUP_IOSCHED=y
++CONFIG_DEFAULT_DEADLINE=y
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="deadline"
++CONFIG_MQ_IOSCHED_DEADLINE=y
++CONFIG_MQ_IOSCHED_KYBER=y
++# CONFIG_IOSCHED_BFQ is not set
++CONFIG_PREEMPT_NOTIFIERS=y
++CONFIG_PADATA=y
++CONFIG_ASN1=y
++CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
++CONFIG_INLINE_READ_UNLOCK=y
++CONFIG_INLINE_READ_UNLOCK_IRQ=y
++CONFIG_INLINE_WRITE_UNLOCK=y
++CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
++CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
++CONFIG_MUTEX_SPIN_ON_OWNER=y
++CONFIG_RWSEM_SPIN_ON_OWNER=y
++CONFIG_LOCK_SPIN_ON_OWNER=y
++CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y
++CONFIG_QUEUED_SPINLOCKS=y
++CONFIG_ARCH_USE_QUEUED_RWLOCKS=y
++CONFIG_QUEUED_RWLOCKS=y
++CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y
++CONFIG_FREEZER=y
++
++#
++# Processor type and features
++#
++# CONFIG_ZONE_DMA is not set
++CONFIG_SMP=y
++CONFIG_X86_FEATURE_NAMES=y
++CONFIG_X86_FAST_FEATURE_TESTS=y
++CONFIG_X86_X2APIC=y
++CONFIG_X86_MPPARSE=y
++# CONFIG_GOLDFISH is not set
++CONFIG_RETPOLINE=y
++# CONFIG_INTEL_RDT is not set
++CONFIG_X86_EXTENDED_PLATFORM=y
++CONFIG_X86_NUMACHIP=y
++# CONFIG_X86_VSMP is not set
++# CONFIG_X86_UV is not set
++# CONFIG_X86_GOLDFISH is not set
++# CONFIG_X86_INTEL_MID is not set
++CONFIG_X86_INTEL_LPSS=y
++CONFIG_X86_AMD_PLATFORM_DEVICE=y
++CONFIG_IOSF_MBI=y
++CONFIG_IOSF_MBI_DEBUG=y
++CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y
++CONFIG_SCHED_OMIT_FRAME_POINTER=y
++CONFIG_HYPERVISOR_GUEST=y
++CONFIG_PARAVIRT=y
++# CONFIG_PARAVIRT_DEBUG is not set
++CONFIG_PARAVIRT_SPINLOCKS=y
++# CONFIG_QUEUED_LOCK_STAT is not set
++CONFIG_XEN=y
++CONFIG_XEN_PV=y
++CONFIG_XEN_PV_SMP=y
++CONFIG_XEN_DOM0=y
++CONFIG_XEN_PVHVM=y
++CONFIG_XEN_PVHVM_SMP=y
++CONFIG_XEN_512GB=y
++CONFIG_XEN_SAVE_RESTORE=y
++# CONFIG_XEN_DEBUG_FS is not set
++CONFIG_XEN_PVH=y
++CONFIG_KVM_GUEST=y
++CONFIG_KVM_DEBUG_FS=y
++# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set
++CONFIG_PARAVIRT_CLOCK=y
++# CONFIG_JAILHOUSE_GUEST is not set
++CONFIG_NO_BOOTMEM=y
++# CONFIG_MK8 is not set
++# CONFIG_MPSC is not set
++# CONFIG_MCORE2 is not set
++# CONFIG_MATOM is not set
++CONFIG_GENERIC_CPU=y
++CONFIG_X86_INTERNODE_CACHE_SHIFT=6
++CONFIG_X86_L1_CACHE_SHIFT=6
++CONFIG_X86_TSC=y
++CONFIG_X86_CMPXCHG64=y
++CONFIG_X86_CMOV=y
++CONFIG_X86_MINIMUM_CPU_FAMILY=64
++CONFIG_X86_DEBUGCTLMSR=y
++CONFIG_PROCESSOR_SELECT=y
++CONFIG_CPU_SUP_INTEL=y
++CONFIG_CPU_SUP_AMD=y
++CONFIG_CPU_SUP_CENTAUR=y
++CONFIG_HPET_TIMER=y
++CONFIG_HPET_EMULATE_RTC=y
++CONFIG_DMI=y
++CONFIG_GART_IOMMU=y
++CONFIG_CALGARY_IOMMU=y
++CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT=y
++CONFIG_SWIOTLB=y
++CONFIG_IOMMU_HELPER=y
++# CONFIG_MAXSMP is not set
++CONFIG_NR_CPUS=256
++CONFIG_SCHED_SMT=y
++CONFIG_SCHED_MC=y
++CONFIG_SCHED_MC_PRIO=y
++# CONFIG_PREEMPT_NONE is not set
++CONFIG_PREEMPT_VOLUNTARY=y
++# CONFIG_PREEMPT is not set
++CONFIG_X86_LOCAL_APIC=y
++CONFIG_X86_IO_APIC=y
++CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
++CONFIG_X86_MCE=y
++# CONFIG_X86_MCELOG_LEGACY is not set
++CONFIG_X86_MCE_INTEL=y
++CONFIG_X86_MCE_AMD=y
++CONFIG_X86_MCE_THRESHOLD=y
++CONFIG_X86_MCE_INJECT=m
++CONFIG_X86_THERMAL_VECTOR=y
++
++#
++# Performance monitoring
++#
++CONFIG_PERF_EVENTS_INTEL_UNCORE=y
++CONFIG_PERF_EVENTS_INTEL_RAPL=y
++CONFIG_PERF_EVENTS_INTEL_CSTATE=y
++# CONFIG_PERF_EVENTS_AMD_POWER is not set
++# CONFIG_VM86 is not set
++CONFIG_X86_16BIT=y
++CONFIG_X86_ESPFIX64=y
++CONFIG_X86_VSYSCALL_EMULATION=y
++CONFIG_I8K=m
++CONFIG_MICROCODE=y
++CONFIG_MICROCODE_INTEL=y
++CONFIG_MICROCODE_AMD=y
++CONFIG_MICROCODE_OLD_INTERFACE=y
++CONFIG_X86_MSR=m
++CONFIG_X86_CPUID=m
++# CONFIG_X86_5LEVEL is not set
++CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
++CONFIG_ARCH_DMA_ADDR_T_64BIT=y
++CONFIG_X86_DIRECT_GBPAGES=y
++CONFIG_ARCH_HAS_MEM_ENCRYPT=y
++# CONFIG_AMD_MEM_ENCRYPT is not set
++CONFIG_NUMA=y
++CONFIG_AMD_NUMA=y
++CONFIG_X86_64_ACPI_NUMA=y
++CONFIG_NODES_SPAN_OTHER_NODES=y
++# CONFIG_NUMA_EMU is not set
++CONFIG_NODES_SHIFT=6
++CONFIG_ARCH_SPARSEMEM_ENABLE=y
++CONFIG_ARCH_SPARSEMEM_DEFAULT=y
++CONFIG_ARCH_SELECT_MEMORY_MODEL=y
++CONFIG_ARCH_MEMORY_PROBE=y
++CONFIG_ARCH_PROC_KCORE_TEXT=y
++CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_SPARSEMEM_MANUAL=y
++CONFIG_SPARSEMEM=y
++CONFIG_NEED_MULTIPLE_NODES=y
++CONFIG_HAVE_MEMORY_PRESENT=y
++CONFIG_SPARSEMEM_EXTREME=y
++CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
++CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER=y
++CONFIG_SPARSEMEM_VMEMMAP=y
++CONFIG_HAVE_MEMBLOCK=y
++CONFIG_HAVE_MEMBLOCK_NODE_MAP=y
++CONFIG_HAVE_GENERIC_GUP=y
++CONFIG_ARCH_DISCARD_MEMBLOCK=y
++CONFIG_MEMORY_ISOLATION=y
++CONFIG_HAVE_BOOTMEM_INFO_NODE=y
++CONFIG_MEMORY_HOTPLUG=y
++CONFIG_MEMORY_HOTPLUG_SPARSE=y
++# CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE is not set
++CONFIG_MEMORY_HOTREMOVE=y
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y
++CONFIG_MEMORY_BALLOON=y
++CONFIG_BALLOON_COMPACTION=y
++CONFIG_COMPACTION=y
++CONFIG_MIGRATION=y
++CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y
++CONFIG_ARCH_ENABLE_THP_MIGRATION=y
++CONFIG_PHYS_ADDR_T_64BIT=y
++CONFIG_VIRT_TO_BUS=y
++CONFIG_MMU_NOTIFIER=y
++CONFIG_KSM=y
++CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
++CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
++CONFIG_MEMORY_FAILURE=y
++CONFIG_HWPOISON_INJECT=m
++CONFIG_TRANSPARENT_HUGEPAGE=y
++CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y
++# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set
++CONFIG_ARCH_WANTS_THP_SWAP=y
++CONFIG_THP_SWAP=y
++CONFIG_TRANSPARENT_HUGE_PAGECACHE=y
++CONFIG_CLEANCACHE=y
++CONFIG_FRONTSWAP=y
++CONFIG_CMA=y
++# CONFIG_CMA_DEBUG is not set
++# CONFIG_CMA_DEBUGFS is not set
++CONFIG_CMA_AREAS=7
++CONFIG_MEM_SOFT_DIRTY=y
++CONFIG_ZSWAP=y
++CONFIG_ZPOOL=y
++CONFIG_ZBUD=y
++# CONFIG_Z3FOLD is not set
++CONFIG_ZSMALLOC=y
++CONFIG_PGTABLE_MAPPING=y
++# CONFIG_ZSMALLOC_STAT is not set
++CONFIG_GENERIC_EARLY_IOREMAP=y
++# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set
++CONFIG_IDLE_PAGE_TRACKING=y
++CONFIG_ARCH_HAS_ZONE_DEVICE=y
++CONFIG_ZONE_DEVICE=y
++CONFIG_ARCH_HAS_HMM=y
++# CONFIG_HMM_MIRROR is not set
++# CONFIG_DEVICE_PRIVATE is not set
++# CONFIG_DEVICE_PUBLIC is not set
++CONFIG_FRAME_VECTOR=y
++CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y
++CONFIG_ARCH_HAS_PKEYS=y
++# CONFIG_PERCPU_STATS is not set
++# CONFIG_GUP_BENCHMARK is not set
++CONFIG_X86_PMEM_LEGACY_DEVICE=y
++CONFIG_X86_PMEM_LEGACY=y
++CONFIG_X86_CHECK_BIOS_CORRUPTION=y
++CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y
++CONFIG_X86_RESERVE_LOW=64
++CONFIG_MTRR=y
++CONFIG_MTRR_SANITIZER=y
++CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1
++CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1
++CONFIG_X86_PAT=y
++CONFIG_ARCH_USES_PG_UNCACHED=y
++CONFIG_ARCH_RANDOM=y
++CONFIG_X86_SMAP=y
++CONFIG_X86_INTEL_UMIP=y
++CONFIG_X86_INTEL_MPX=y
++CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y
++CONFIG_EFI=y
++CONFIG_EFI_STUB=y
++CONFIG_EFI_MIXED=y
++CONFIG_SECCOMP=y
++# CONFIG_HZ_100 is not set
++CONFIG_HZ_250=y
++# CONFIG_HZ_300 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=250
++CONFIG_SCHED_HRTICK=y
++CONFIG_KEXEC=y
++CONFIG_KEXEC_FILE=y
++CONFIG_KEXEC_VERIFY_SIG=y
++CONFIG_KEXEC_BZIMAGE_VERIFY_SIG=y
++CONFIG_CRASH_DUMP=y
++CONFIG_KEXEC_JUMP=y
++CONFIG_PHYSICAL_START=0x1000000
++CONFIG_RELOCATABLE=y
++CONFIG_RANDOMIZE_BASE=y
++CONFIG_X86_NEED_RELOCS=y
++CONFIG_PHYSICAL_ALIGN=0x1000000
++CONFIG_RANDOMIZE_MEMORY=y
++CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa
++CONFIG_HOTPLUG_CPU=y
++# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set
++# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
++# CONFIG_COMPAT_VDSO is not set
++# CONFIG_LEGACY_VSYSCALL_NATIVE is not set
++CONFIG_LEGACY_VSYSCALL_EMULATE=y
++# CONFIG_LEGACY_VSYSCALL_NONE is not set
++# CONFIG_CMDLINE_BOOL is not set
++CONFIG_MODIFY_LDT_SYSCALL=y
++CONFIG_HAVE_LIVEPATCH=y
++CONFIG_LIVEPATCH=y
++CONFIG_ARCH_HAS_ADD_PAGES=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
++CONFIG_USE_PERCPU_NUMA_NODE_ID=y
++
++#
++# Power management and ACPI options
++#
++CONFIG_ARCH_HIBERNATION_HEADER=y
++CONFIG_SUSPEND=y
++CONFIG_SUSPEND_FREEZER=y
++# CONFIG_SUSPEND_SKIP_SYNC is not set
++CONFIG_HIBERNATE_CALLBACKS=y
++CONFIG_HIBERNATION=y
++CONFIG_PM_STD_PARTITION=""
++CONFIG_PM_SLEEP=y
++CONFIG_PM_SLEEP_SMP=y
++# CONFIG_PM_AUTOSLEEP is not set
++CONFIG_PM_WAKELOCKS=y
++CONFIG_PM_WAKELOCKS_LIMIT=100
++CONFIG_PM_WAKELOCKS_GC=y
++CONFIG_PM=y
++CONFIG_PM_DEBUG=y
++CONFIG_PM_ADVANCED_DEBUG=y
++# CONFIG_PM_TEST_SUSPEND is not set
++CONFIG_PM_SLEEP_DEBUG=y
++# CONFIG_DPM_WATCHDOG is not set
++CONFIG_PM_TRACE=y
++CONFIG_PM_TRACE_RTC=y
++CONFIG_PM_CLK=y
++CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
++CONFIG_ACPI=y
++CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y
++CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y
++CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y
++# CONFIG_ACPI_DEBUGGER is not set
++CONFIG_ACPI_SPCR_TABLE=y
++CONFIG_ACPI_LPIT=y
++CONFIG_ACPI_SLEEP=y
++# CONFIG_ACPI_PROCFS_POWER is not set
++CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y
++CONFIG_ACPI_EC_DEBUGFS=m
++CONFIG_ACPI_AC=y
++CONFIG_ACPI_BATTERY=y
++CONFIG_ACPI_BUTTON=y
++CONFIG_ACPI_VIDEO=m
++CONFIG_ACPI_FAN=y
++CONFIG_ACPI_DOCK=y
++CONFIG_ACPI_CPU_FREQ_PSS=y
++CONFIG_ACPI_PROCESSOR_CSTATE=y
++CONFIG_ACPI_PROCESSOR_IDLE=y
++CONFIG_ACPI_CPPC_LIB=y
++CONFIG_ACPI_PROCESSOR=y
++CONFIG_ACPI_IPMI=m
++CONFIG_ACPI_HOTPLUG_CPU=y
++CONFIG_ACPI_PROCESSOR_AGGREGATOR=m
++CONFIG_ACPI_THERMAL=y
++CONFIG_ACPI_NUMA=y
++CONFIG_ACPI_CUSTOM_DSDT_FILE=""
++# CONFIG_ACPI_CUSTOM_DSDT is not set
++CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y
++CONFIG_ACPI_TABLE_UPGRADE=y
++# CONFIG_ACPI_DEBUG is not set
++CONFIG_ACPI_PCI_SLOT=y
++CONFIG_ACPI_CONTAINER=y
++CONFIG_ACPI_HOTPLUG_MEMORY=y
++CONFIG_ACPI_HOTPLUG_IOAPIC=y
++CONFIG_ACPI_SBS=m
++CONFIG_ACPI_HED=y
++# CONFIG_ACPI_CUSTOM_METHOD is not set
++CONFIG_ACPI_BGRT=y
++# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set
++CONFIG_ACPI_NFIT=m
++CONFIG_HAVE_ACPI_APEI=y
++CONFIG_HAVE_ACPI_APEI_NMI=y
++CONFIG_ACPI_APEI=y
++CONFIG_ACPI_APEI_GHES=y
++CONFIG_ACPI_APEI_PCIEAER=y
++CONFIG_ACPI_APEI_MEMORY_FAILURE=y
++CONFIG_ACPI_APEI_EINJ=m
++# CONFIG_ACPI_APEI_ERST_DEBUG is not set
++# CONFIG_DPTF_POWER is not set
++CONFIG_ACPI_EXTLOG=m
++# CONFIG_PMIC_OPREGION is not set
++# CONFIG_ACPI_CONFIGFS is not set
++CONFIG_X86_PM_TIMER=y
++CONFIG_SFI=y
++
++#
++# CPU Frequency scaling
++#
++CONFIG_CPU_FREQ=y
++CONFIG_CPU_FREQ_GOV_ATTR_SET=y
++CONFIG_CPU_FREQ_GOV_COMMON=y
++CONFIG_CPU_FREQ_STAT=y
++CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
++# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
++# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
++# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
++# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
++# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set
++CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
++CONFIG_CPU_FREQ_GOV_POWERSAVE=y
++CONFIG_CPU_FREQ_GOV_USERSPACE=y
++CONFIG_CPU_FREQ_GOV_ONDEMAND=y
++CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
++# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set
++
++#
++# CPU frequency scaling drivers
++#
++CONFIG_X86_INTEL_PSTATE=y
++CONFIG_X86_PCC_CPUFREQ=y
++CONFIG_X86_ACPI_CPUFREQ=y
++CONFIG_X86_ACPI_CPUFREQ_CPB=y
++CONFIG_X86_POWERNOW_K8=y
++CONFIG_X86_AMD_FREQ_SENSITIVITY=m
++CONFIG_X86_SPEEDSTEP_CENTRINO=y
++CONFIG_X86_P4_CLOCKMOD=m
++
++#
++# shared options
++#
++CONFIG_X86_SPEEDSTEP_LIB=m
++
++#
++# CPU Idle
++#
++CONFIG_CPU_IDLE=y
++CONFIG_CPU_IDLE_GOV_LADDER=y
++CONFIG_CPU_IDLE_GOV_MENU=y
++# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set
++CONFIG_INTEL_IDLE=y
++
++#
++# Bus options (PCI etc.)
++#
++CONFIG_PCI=y
++CONFIG_PCI_DIRECT=y
++CONFIG_PCI_MMCONFIG=y
++CONFIG_PCI_XEN=y
++CONFIG_PCI_DOMAINS=y
++# CONFIG_PCI_CNB20LE_QUIRK is not set
++CONFIG_PCIEPORTBUS=y
++CONFIG_HOTPLUG_PCI_PCIE=y
++CONFIG_PCIEAER=y
++# CONFIG_PCIE_ECRC is not set
++# CONFIG_PCIEAER_INJECT is not set
++CONFIG_PCIEASPM=y
++CONFIG_PCIEASPM_DEBUG=y
++CONFIG_PCIEASPM_DEFAULT=y
++# CONFIG_PCIEASPM_POWERSAVE is not set
++# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set
++# CONFIG_PCIEASPM_PERFORMANCE is not set
++CONFIG_PCIE_PME=y
++# CONFIG_PCIE_DPC is not set
++# CONFIG_PCIE_PTM is not set
++CONFIG_PCI_BUS_ADDR_T_64BIT=y
++CONFIG_PCI_MSI=y
++CONFIG_PCI_MSI_IRQ_DOMAIN=y
++CONFIG_PCI_QUIRKS=y
++# CONFIG_PCI_DEBUG is not set
++CONFIG_PCI_REALLOC_ENABLE_AUTO=y
++CONFIG_PCI_STUB=m
++CONFIG_XEN_PCIDEV_FRONTEND=m
++CONFIG_PCI_ATS=y
++CONFIG_PCI_LOCKLESS_CONFIG=y
++CONFIG_PCI_IOV=y
++CONFIG_PCI_PRI=y
++CONFIG_PCI_PASID=y
++CONFIG_PCI_LABEL=y
++# CONFIG_PCI_HYPERV is not set
++CONFIG_HOTPLUG_PCI=y
++CONFIG_HOTPLUG_PCI_ACPI=y
++CONFIG_HOTPLUG_PCI_ACPI_IBM=m
++CONFIG_HOTPLUG_PCI_CPCI=y
++CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m
++CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m
++CONFIG_HOTPLUG_PCI_SHPC=m
++
++#
++# Cadence PCIe controllers support
++#
++
++#
++# DesignWare PCI Core Support
++#
++# CONFIG_PCIE_DW_PLAT is not set
++
++#
++# PCI host controller drivers
++#
++# CONFIG_VMD is not set
++
++#
++# PCI Endpoint
++#
++# CONFIG_PCI_ENDPOINT is not set
++
++#
++# PCI switch controller drivers
++#
++# CONFIG_PCI_SW_SWITCHTEC is not set
++# CONFIG_ISA_BUS is not set
++CONFIG_ISA_DMA_API=y
++CONFIG_AMD_NB=y
++CONFIG_PCCARD=m
++CONFIG_PCMCIA=m
++CONFIG_PCMCIA_LOAD_CIS=y
++CONFIG_CARDBUS=y
++
++#
++# PC-card bridges
++#
++CONFIG_YENTA=m
++CONFIG_YENTA_O2=y
++CONFIG_YENTA_RICOH=y
++CONFIG_YENTA_TI=y
++CONFIG_YENTA_ENE_TUNE=y
++CONFIG_YENTA_TOSHIBA=y
++CONFIG_PD6729=m
++CONFIG_I82092=m
++CONFIG_PCCARD_NONSTATIC=y
++CONFIG_RAPIDIO=y
++CONFIG_RAPIDIO_TSI721=m
++CONFIG_RAPIDIO_DISC_TIMEOUT=30
++# CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS is not set
++CONFIG_RAPIDIO_DMA_ENGINE=y
++# CONFIG_RAPIDIO_DEBUG is not set
++CONFIG_RAPIDIO_ENUM_BASIC=m
++# CONFIG_RAPIDIO_CHMAN is not set
++# CONFIG_RAPIDIO_MPORT_CDEV is not set
++
++#
++# RapidIO Switch drivers
++#
++CONFIG_RAPIDIO_TSI57X=m
++CONFIG_RAPIDIO_CPS_XX=m
++CONFIG_RAPIDIO_TSI568=m
++CONFIG_RAPIDIO_CPS_GEN2=m
++# CONFIG_RAPIDIO_RXS_GEN3 is not set
++# CONFIG_X86_SYSFB is not set
++
++#
++# Executable file formats / Emulations
++#
++CONFIG_BINFMT_ELF=y
++CONFIG_COMPAT_BINFMT_ELF=y
++CONFIG_ELFCORE=y
++CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
++CONFIG_BINFMT_SCRIPT=y
++# CONFIG_HAVE_AOUT is not set
++CONFIG_BINFMT_MISC=m
++CONFIG_COREDUMP=y
++CONFIG_IA32_EMULATION=y
++# CONFIG_IA32_AOUT is not set
++CONFIG_X86_X32=y
++CONFIG_COMPAT_32=y
++CONFIG_COMPAT=y
++CONFIG_COMPAT_FOR_U64_ALIGNMENT=y
++CONFIG_SYSVIPC_COMPAT=y
++CONFIG_X86_DEV_DMA_OPS=y
++CONFIG_NET=y
++CONFIG_COMPAT_NETLINK_MESSAGES=y
++CONFIG_NET_INGRESS=y
++CONFIG_NET_EGRESS=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++CONFIG_PACKET_DIAG=m
++CONFIG_UNIX=y
++CONFIG_UNIX_DIAG=m
++# CONFIG_TLS is not set
++CONFIG_XFRM=y
++CONFIG_XFRM_ALGO=m
++CONFIG_XFRM_USER=m
++# CONFIG_XFRM_SUB_POLICY is not set
++# CONFIG_XFRM_MIGRATE is not set
++CONFIG_XFRM_STATISTICS=y
++CONFIG_XFRM_IPCOMP=m
++CONFIG_NET_KEY=m
++# CONFIG_NET_KEY_MIGRATE is not set
++# CONFIG_SMC is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++CONFIG_IP_ADVANCED_ROUTER=y
++CONFIG_IP_FIB_TRIE_STATS=y
++CONFIG_IP_MULTIPLE_TABLES=y
++CONFIG_IP_ROUTE_MULTIPATH=y
++CONFIG_IP_ROUTE_VERBOSE=y
++CONFIG_IP_ROUTE_CLASSID=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++# CONFIG_IP_PNP_BOOTP is not set
++# CONFIG_IP_PNP_RARP is not set
++CONFIG_NET_IPIP=m
++CONFIG_NET_IPGRE_DEMUX=m
++CONFIG_NET_IP_TUNNEL=m
++CONFIG_NET_IPGRE=m
++CONFIG_NET_IPGRE_BROADCAST=y
++CONFIG_IP_MROUTE=y
++# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set
++CONFIG_IP_PIMSM_V1=y
++CONFIG_IP_PIMSM_V2=y
++CONFIG_SYN_COOKIES=y
++CONFIG_NET_IPVTI=m
++CONFIG_NET_UDP_TUNNEL=m
++CONFIG_NET_FOU=m
++CONFIG_NET_FOU_IP_TUNNELS=y
++CONFIG_INET_AH=m
++CONFIG_INET_ESP=m
++# CONFIG_INET_ESP_OFFLOAD is not set
++CONFIG_INET_IPCOMP=m
++CONFIG_INET_XFRM_TUNNEL=m
++CONFIG_INET_TUNNEL=m
++CONFIG_INET_XFRM_MODE_TRANSPORT=m
++CONFIG_INET_XFRM_MODE_TUNNEL=m
++CONFIG_INET_XFRM_MODE_BEET=m
++CONFIG_INET_DIAG=m
++CONFIG_INET_TCP_DIAG=m
++CONFIG_INET_UDP_DIAG=m
++# CONFIG_INET_RAW_DIAG is not set
++# CONFIG_INET_DIAG_DESTROY is not set
++CONFIG_TCP_CONG_ADVANCED=y
++CONFIG_TCP_CONG_BIC=m
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_TCP_CONG_WESTWOOD=m
++CONFIG_TCP_CONG_HTCP=m
++CONFIG_TCP_CONG_HSTCP=m
++CONFIG_TCP_CONG_HYBLA=m
++CONFIG_TCP_CONG_VEGAS=m
++# CONFIG_TCP_CONG_NV is not set
++CONFIG_TCP_CONG_SCALABLE=m
++CONFIG_TCP_CONG_LP=m
++CONFIG_TCP_CONG_VENO=m
++CONFIG_TCP_CONG_YEAH=m
++CONFIG_TCP_CONG_ILLINOIS=m
++CONFIG_TCP_CONG_DCTCP=m
++CONFIG_TCP_CONG_CDG=m
++# CONFIG_TCP_CONG_BBR is not set
++CONFIG_DEFAULT_CUBIC=y
++# CONFIG_DEFAULT_RENO is not set
++CONFIG_DEFAULT_TCP_CONG="cubic"
++CONFIG_TCP_MD5SIG=y
++CONFIG_IPV6=y
++CONFIG_IPV6_ROUTER_PREF=y
++CONFIG_IPV6_ROUTE_INFO=y
++# CONFIG_IPV6_OPTIMISTIC_DAD is not set
++CONFIG_INET6_AH=m
++CONFIG_INET6_ESP=m
++# CONFIG_INET6_ESP_OFFLOAD is not set
++CONFIG_INET6_IPCOMP=m
++CONFIG_IPV6_MIP6=m
++CONFIG_IPV6_ILA=m
++CONFIG_INET6_XFRM_TUNNEL=m
++CONFIG_INET6_TUNNEL=m
++CONFIG_INET6_XFRM_MODE_TRANSPORT=m
++CONFIG_INET6_XFRM_MODE_TUNNEL=m
++CONFIG_INET6_XFRM_MODE_BEET=m
++CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
++CONFIG_IPV6_VTI=m
++CONFIG_IPV6_SIT=m
++CONFIG_IPV6_SIT_6RD=y
++CONFIG_IPV6_NDISC_NODETYPE=y
++CONFIG_IPV6_TUNNEL=m
++CONFIG_IPV6_GRE=m
++CONFIG_IPV6_FOU=m
++CONFIG_IPV6_FOU_TUNNEL=m
++CONFIG_IPV6_MULTIPLE_TABLES=y
++CONFIG_IPV6_SUBTREES=y
++CONFIG_IPV6_MROUTE=y
++CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
++CONFIG_IPV6_PIMSM_V2=y
++# CONFIG_IPV6_SEG6_LWTUNNEL is not set
++# CONFIG_IPV6_SEG6_HMAC is not set
++CONFIG_NETLABEL=y
++CONFIG_NETWORK_SECMARK=y
++CONFIG_NET_PTP_CLASSIFY=y
++# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
++CONFIG_NETFILTER=y
++CONFIG_NETFILTER_ADVANCED=y
++CONFIG_BRIDGE_NETFILTER=m
++
++#
++# Core Netfilter Configuration
++#
++CONFIG_NETFILTER_INGRESS=y
++CONFIG_NETFILTER_NETLINK=m
++CONFIG_NETFILTER_FAMILY_BRIDGE=y
++CONFIG_NETFILTER_FAMILY_ARP=y
++CONFIG_NETFILTER_NETLINK_ACCT=m
++CONFIG_NETFILTER_NETLINK_QUEUE=m
++CONFIG_NETFILTER_NETLINK_LOG=m
++CONFIG_NF_CONNTRACK=m
++CONFIG_NF_LOG_COMMON=m
++# CONFIG_NF_LOG_NETDEV is not set
++CONFIG_NETFILTER_CONNCOUNT=m
++CONFIG_NF_CONNTRACK_MARK=y
++CONFIG_NF_CONNTRACK_SECMARK=y
++CONFIG_NF_CONNTRACK_ZONES=y
++# CONFIG_NF_CONNTRACK_PROCFS is not set
++CONFIG_NF_CONNTRACK_EVENTS=y
++CONFIG_NF_CONNTRACK_TIMEOUT=y
++CONFIG_NF_CONNTRACK_TIMESTAMP=y
++CONFIG_NF_CONNTRACK_LABELS=y
++CONFIG_NF_CT_PROTO_DCCP=y
++CONFIG_NF_CT_PROTO_GRE=m
++CONFIG_NF_CT_PROTO_SCTP=y
++CONFIG_NF_CT_PROTO_UDPLITE=y
++CONFIG_NF_CONNTRACK_AMANDA=m
++CONFIG_NF_CONNTRACK_FTP=m
++CONFIG_NF_CONNTRACK_H323=m
++CONFIG_NF_CONNTRACK_IRC=m
++CONFIG_NF_CONNTRACK_BROADCAST=m
++CONFIG_NF_CONNTRACK_NETBIOS_NS=m
++CONFIG_NF_CONNTRACK_SNMP=m
++CONFIG_NF_CONNTRACK_PPTP=m
++CONFIG_NF_CONNTRACK_SANE=m
++CONFIG_NF_CONNTRACK_SIP=m
++CONFIG_NF_CONNTRACK_TFTP=m
++CONFIG_NF_CT_NETLINK=m
++CONFIG_NF_CT_NETLINK_TIMEOUT=m
++CONFIG_NF_CT_NETLINK_HELPER=m
++CONFIG_NETFILTER_NETLINK_GLUE_CT=y
++CONFIG_NF_NAT=m
++CONFIG_NF_NAT_NEEDED=y
++CONFIG_NF_NAT_PROTO_DCCP=y
++CONFIG_NF_NAT_PROTO_UDPLITE=y
++CONFIG_NF_NAT_PROTO_SCTP=y
++CONFIG_NF_NAT_AMANDA=m
++CONFIG_NF_NAT_FTP=m
++CONFIG_NF_NAT_IRC=m
++CONFIG_NF_NAT_SIP=m
++CONFIG_NF_NAT_TFTP=m
++CONFIG_NF_NAT_REDIRECT=m
++CONFIG_NETFILTER_SYNPROXY=m
++CONFIG_NF_TABLES=m
++CONFIG_NF_TABLES_INET=m
++CONFIG_NF_TABLES_NETDEV=m
++CONFIG_NFT_EXTHDR=m
++CONFIG_NFT_META=m
++# CONFIG_NFT_RT is not set
++# CONFIG_NFT_NUMGEN is not set
++CONFIG_NFT_CT=m
++# CONFIG_NFT_SET_RBTREE is not set
++# CONFIG_NFT_SET_HASH is not set
++# CONFIG_NFT_SET_BITMAP is not set
++CONFIG_NFT_COUNTER=m
++CONFIG_NFT_LOG=m
++CONFIG_NFT_LIMIT=m
++CONFIG_NFT_MASQ=m
++CONFIG_NFT_REDIR=m
++CONFIG_NFT_NAT=m
++# CONFIG_NFT_OBJREF is not set
++CONFIG_NFT_QUEUE=m
++# CONFIG_NFT_QUOTA is not set
++CONFIG_NFT_REJECT=m
++CONFIG_NFT_REJECT_INET=m
++CONFIG_NFT_COMPAT=m
++CONFIG_NFT_HASH=m
++# CONFIG_NF_DUP_NETDEV is not set
++# CONFIG_NFT_DUP_NETDEV is not set
++# CONFIG_NFT_FWD_NETDEV is not set
++# CONFIG_NF_FLOW_TABLE is not set
++CONFIG_NETFILTER_XTABLES=m
++
++#
++# Xtables combined modules
++#
++CONFIG_NETFILTER_XT_MARK=m
++CONFIG_NETFILTER_XT_CONNMARK=m
++CONFIG_NETFILTER_XT_SET=m
++
++#
++# Xtables targets
++#
++CONFIG_NETFILTER_XT_TARGET_AUDIT=m
++CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
++CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
++CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
++CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
++CONFIG_NETFILTER_XT_TARGET_CT=m
++CONFIG_NETFILTER_XT_TARGET_DSCP=m
++CONFIG_NETFILTER_XT_TARGET_HL=m
++CONFIG_NETFILTER_XT_TARGET_HMARK=m
++CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
++CONFIG_NETFILTER_XT_TARGET_LED=m
++CONFIG_NETFILTER_XT_TARGET_LOG=m
++CONFIG_NETFILTER_XT_TARGET_MARK=m
++CONFIG_NETFILTER_XT_NAT=m
++CONFIG_NETFILTER_XT_TARGET_NETMAP=m
++CONFIG_NETFILTER_XT_TARGET_NFLOG=m
++CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
++# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
++CONFIG_NETFILTER_XT_TARGET_RATEEST=m
++CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
++CONFIG_NETFILTER_XT_TARGET_TEE=m
++CONFIG_NETFILTER_XT_TARGET_TPROXY=m
++CONFIG_NETFILTER_XT_TARGET_TRACE=m
++CONFIG_NETFILTER_XT_TARGET_SECMARK=m
++CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
++CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
++
++#
++# Xtables matches
++#
++CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
++CONFIG_NETFILTER_XT_MATCH_BPF=m
++CONFIG_NETFILTER_XT_MATCH_CGROUP=m
++CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
++CONFIG_NETFILTER_XT_MATCH_COMMENT=m
++CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
++CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
++CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
++CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
++CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
++CONFIG_NETFILTER_XT_MATCH_CPU=m
++CONFIG_NETFILTER_XT_MATCH_DCCP=m
++CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
++CONFIG_NETFILTER_XT_MATCH_DSCP=m
++CONFIG_NETFILTER_XT_MATCH_ECN=m
++CONFIG_NETFILTER_XT_MATCH_ESP=m
++CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
++CONFIG_NETFILTER_XT_MATCH_HELPER=m
++CONFIG_NETFILTER_XT_MATCH_HL=m
++CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
++CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
++CONFIG_NETFILTER_XT_MATCH_IPVS=m
++CONFIG_NETFILTER_XT_MATCH_L2TP=m
++CONFIG_NETFILTER_XT_MATCH_LENGTH=m
++CONFIG_NETFILTER_XT_MATCH_LIMIT=m
++CONFIG_NETFILTER_XT_MATCH_MAC=m
++CONFIG_NETFILTER_XT_MATCH_MARK=m
++CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
++CONFIG_NETFILTER_XT_MATCH_NFACCT=m
++CONFIG_NETFILTER_XT_MATCH_OSF=m
++CONFIG_NETFILTER_XT_MATCH_OWNER=m
++CONFIG_NETFILTER_XT_MATCH_POLICY=m
++CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
++CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
++CONFIG_NETFILTER_XT_MATCH_QUOTA=m
++CONFIG_NETFILTER_XT_MATCH_RATEEST=m
++CONFIG_NETFILTER_XT_MATCH_REALM=m
++CONFIG_NETFILTER_XT_MATCH_RECENT=m
++CONFIG_NETFILTER_XT_MATCH_SCTP=m
++CONFIG_NETFILTER_XT_MATCH_STATE=m
++CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
++CONFIG_NETFILTER_XT_MATCH_STRING=m
++CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
++CONFIG_NETFILTER_XT_MATCH_TIME=m
++CONFIG_NETFILTER_XT_MATCH_U32=m
++CONFIG_IP_SET=m
++CONFIG_IP_SET_MAX=256
++CONFIG_IP_SET_BITMAP_IP=m
++CONFIG_IP_SET_BITMAP_IPMAC=m
++CONFIG_IP_SET_BITMAP_PORT=m
++CONFIG_IP_SET_HASH_IP=m
++CONFIG_IP_SET_HASH_IPMARK=m
++CONFIG_IP_SET_HASH_IPPORT=m
++CONFIG_IP_SET_HASH_IPPORTIP=m
++CONFIG_IP_SET_HASH_IPPORTNET=m
++# CONFIG_IP_SET_HASH_IPMAC is not set
++CONFIG_IP_SET_HASH_MAC=m
++CONFIG_IP_SET_HASH_NETPORTNET=m
++CONFIG_IP_SET_HASH_NET=m
++CONFIG_IP_SET_HASH_NETNET=m
++CONFIG_IP_SET_HASH_NETPORT=m
++CONFIG_IP_SET_HASH_NETIFACE=m
++CONFIG_IP_SET_LIST_SET=m
++CONFIG_IP_VS=m
++CONFIG_IP_VS_IPV6=y
++# CONFIG_IP_VS_DEBUG is not set
++CONFIG_IP_VS_TAB_BITS=12
++
++#
++# IPVS transport protocol load balancing support
++#
++CONFIG_IP_VS_PROTO_TCP=y
++CONFIG_IP_VS_PROTO_UDP=y
++CONFIG_IP_VS_PROTO_AH_ESP=y
++CONFIG_IP_VS_PROTO_ESP=y
++CONFIG_IP_VS_PROTO_AH=y
++CONFIG_IP_VS_PROTO_SCTP=y
++
++#
++# IPVS scheduler
++#
++CONFIG_IP_VS_RR=m
++CONFIG_IP_VS_WRR=m
++CONFIG_IP_VS_LC=m
++CONFIG_IP_VS_WLC=m
++CONFIG_IP_VS_FO=m
++CONFIG_IP_VS_OVF=m
++CONFIG_IP_VS_LBLC=m
++CONFIG_IP_VS_LBLCR=m
++CONFIG_IP_VS_DH=m
++CONFIG_IP_VS_SH=m
++CONFIG_IP_VS_SED=m
++CONFIG_IP_VS_NQ=m
++
++#
++# IPVS SH scheduler
++#
++CONFIG_IP_VS_SH_TAB_BITS=8
++
++#
++# IPVS application helper
++#
++CONFIG_IP_VS_FTP=m
++CONFIG_IP_VS_NFCT=y
++CONFIG_IP_VS_PE_SIP=m
++
++#
++# IP: Netfilter Configuration
++#
++CONFIG_NF_DEFRAG_IPV4=m
++CONFIG_NF_CONNTRACK_IPV4=m
++# CONFIG_NF_SOCKET_IPV4 is not set
++CONFIG_NF_TABLES_IPV4=m
++CONFIG_NFT_CHAIN_ROUTE_IPV4=m
++CONFIG_NFT_REJECT_IPV4=m
++CONFIG_NFT_DUP_IPV4=m
++# CONFIG_NFT_FIB_IPV4 is not set
++CONFIG_NF_TABLES_ARP=m
++CONFIG_NF_DUP_IPV4=m
++CONFIG_NF_LOG_ARP=m
++CONFIG_NF_LOG_IPV4=m
++CONFIG_NF_REJECT_IPV4=m
++CONFIG_NF_NAT_IPV4=m
++CONFIG_NFT_CHAIN_NAT_IPV4=m
++CONFIG_NFT_MASQ_IPV4=m
++CONFIG_NFT_REDIR_IPV4=m
++CONFIG_NF_NAT_SNMP_BASIC=m
++CONFIG_NF_NAT_PROTO_GRE=m
++CONFIG_NF_NAT_PPTP=m
++CONFIG_NF_NAT_H323=m
++CONFIG_IP_NF_IPTABLES=m
++CONFIG_IP_NF_MATCH_AH=m
++CONFIG_IP_NF_MATCH_ECN=m
++CONFIG_IP_NF_MATCH_RPFILTER=m
++CONFIG_IP_NF_MATCH_TTL=m
++CONFIG_IP_NF_FILTER=m
++CONFIG_IP_NF_TARGET_REJECT=m
++CONFIG_IP_NF_TARGET_SYNPROXY=m
++CONFIG_IP_NF_NAT=m
++CONFIG_IP_NF_TARGET_MASQUERADE=m
++CONFIG_IP_NF_TARGET_NETMAP=m
++CONFIG_IP_NF_TARGET_REDIRECT=m
++CONFIG_IP_NF_MANGLE=m
++CONFIG_IP_NF_TARGET_CLUSTERIP=m
++CONFIG_IP_NF_TARGET_ECN=m
++CONFIG_IP_NF_TARGET_TTL=m
++CONFIG_IP_NF_RAW=m
++CONFIG_IP_NF_SECURITY=m
++CONFIG_IP_NF_ARPTABLES=m
++CONFIG_IP_NF_ARPFILTER=m
++CONFIG_IP_NF_ARP_MANGLE=m
++
++#
++# IPv6: Netfilter Configuration
++#
++CONFIG_NF_DEFRAG_IPV6=m
++CONFIG_NF_CONNTRACK_IPV6=m
++# CONFIG_NF_SOCKET_IPV6 is not set
++CONFIG_NF_TABLES_IPV6=m
++CONFIG_NFT_CHAIN_ROUTE_IPV6=m
++CONFIG_NFT_REJECT_IPV6=m
++CONFIG_NFT_DUP_IPV6=m
++# CONFIG_NFT_FIB_IPV6 is not set
++CONFIG_NF_DUP_IPV6=m
++CONFIG_NF_REJECT_IPV6=m
++CONFIG_NF_LOG_IPV6=m
++CONFIG_NF_NAT_IPV6=m
++CONFIG_NFT_CHAIN_NAT_IPV6=m
++CONFIG_NFT_MASQ_IPV6=m
++CONFIG_NFT_REDIR_IPV6=m
++CONFIG_IP6_NF_IPTABLES=m
++CONFIG_IP6_NF_MATCH_AH=m
++CONFIG_IP6_NF_MATCH_EUI64=m
++CONFIG_IP6_NF_MATCH_FRAG=m
++CONFIG_IP6_NF_MATCH_OPTS=m
++CONFIG_IP6_NF_MATCH_HL=m
++CONFIG_IP6_NF_MATCH_IPV6HEADER=m
++CONFIG_IP6_NF_MATCH_MH=m
++CONFIG_IP6_NF_MATCH_RPFILTER=m
++CONFIG_IP6_NF_MATCH_RT=m
++# CONFIG_IP6_NF_MATCH_SRH is not set
++CONFIG_IP6_NF_TARGET_HL=m
++CONFIG_IP6_NF_FILTER=m
++CONFIG_IP6_NF_TARGET_REJECT=m
++CONFIG_IP6_NF_TARGET_SYNPROXY=m
++CONFIG_IP6_NF_MANGLE=m
++CONFIG_IP6_NF_RAW=m
++CONFIG_IP6_NF_SECURITY=m
++CONFIG_IP6_NF_NAT=m
++CONFIG_IP6_NF_TARGET_MASQUERADE=m
++CONFIG_IP6_NF_TARGET_NPT=m
++
++#
++# DECnet: Netfilter Configuration
++#
++CONFIG_DECNET_NF_GRABULATOR=m
++CONFIG_NF_TABLES_BRIDGE=m
++CONFIG_NFT_BRIDGE_META=m
++CONFIG_NFT_BRIDGE_REJECT=m
++CONFIG_NF_LOG_BRIDGE=m
++CONFIG_BRIDGE_NF_EBTABLES=m
++CONFIG_BRIDGE_EBT_BROUTE=m
++CONFIG_BRIDGE_EBT_T_FILTER=m
++CONFIG_BRIDGE_EBT_T_NAT=m
++CONFIG_BRIDGE_EBT_802_3=m
++CONFIG_BRIDGE_EBT_AMONG=m
++CONFIG_BRIDGE_EBT_ARP=m
++CONFIG_BRIDGE_EBT_IP=m
++CONFIG_BRIDGE_EBT_IP6=m
++CONFIG_BRIDGE_EBT_LIMIT=m
++CONFIG_BRIDGE_EBT_MARK=m
++CONFIG_BRIDGE_EBT_PKTTYPE=m
++CONFIG_BRIDGE_EBT_STP=m
++CONFIG_BRIDGE_EBT_VLAN=m
++CONFIG_BRIDGE_EBT_ARPREPLY=m
++CONFIG_BRIDGE_EBT_DNAT=m
++CONFIG_BRIDGE_EBT_MARK_T=m
++CONFIG_BRIDGE_EBT_REDIRECT=m
++CONFIG_BRIDGE_EBT_SNAT=m
++CONFIG_BRIDGE_EBT_LOG=m
++CONFIG_BRIDGE_EBT_NFLOG=m
++CONFIG_IP_DCCP=m
++CONFIG_INET_DCCP_DIAG=m
++
++#
++# DCCP CCIDs Configuration
++#
++# CONFIG_IP_DCCP_CCID2_DEBUG is not set
++# CONFIG_IP_DCCP_CCID3 is not set
++
++#
++# DCCP Kernel Hacking
++#
++# CONFIG_IP_DCCP_DEBUG is not set
++CONFIG_IP_SCTP=m
++# CONFIG_SCTP_DBG_OBJCNT is not set
++# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set
++CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y
++# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set
++CONFIG_SCTP_COOKIE_HMAC_MD5=y
++CONFIG_SCTP_COOKIE_HMAC_SHA1=y
++CONFIG_INET_SCTP_DIAG=m
++CONFIG_RDS=m
++CONFIG_RDS_RDMA=m
++CONFIG_RDS_TCP=m
++# CONFIG_RDS_DEBUG is not set
++CONFIG_TIPC=m
++CONFIG_TIPC_MEDIA_IB=y
++CONFIG_TIPC_MEDIA_UDP=y
++CONFIG_ATM=m
++CONFIG_ATM_CLIP=m
++# CONFIG_ATM_CLIP_NO_ICMP is not set
++CONFIG_ATM_LANE=m
++CONFIG_ATM_MPOA=m
++CONFIG_ATM_BR2684=m
++# CONFIG_ATM_BR2684_IPFILTER is not set
++CONFIG_L2TP=m
++CONFIG_L2TP_DEBUGFS=m
++CONFIG_L2TP_V3=y
++CONFIG_L2TP_IP=m
++CONFIG_L2TP_ETH=m
++CONFIG_STP=m
++CONFIG_GARP=m
++CONFIG_MRP=m
++CONFIG_BRIDGE=m
++CONFIG_BRIDGE_IGMP_SNOOPING=y
++CONFIG_BRIDGE_VLAN_FILTERING=y
++CONFIG_HAVE_NET_DSA=y
++# CONFIG_NET_DSA is not set
++CONFIG_VLAN_8021Q=m
++CONFIG_VLAN_8021Q_GVRP=y
++CONFIG_VLAN_8021Q_MVRP=y
++CONFIG_DECNET=m
++# CONFIG_DECNET_ROUTER is not set
++CONFIG_LLC=m
++CONFIG_LLC2=m
++CONFIG_ATALK=m
++CONFIG_DEV_APPLETALK=m
++CONFIG_IPDDP=m
++CONFIG_IPDDP_ENCAP=y
++CONFIG_X25=m
++CONFIG_LAPB=m
++CONFIG_PHONET=m
++CONFIG_6LOWPAN=m
++# CONFIG_6LOWPAN_DEBUGFS is not set
++CONFIG_6LOWPAN_NHC=m
++CONFIG_6LOWPAN_NHC_DEST=m
++CONFIG_6LOWPAN_NHC_FRAGMENT=m
++CONFIG_6LOWPAN_NHC_HOP=m
++CONFIG_6LOWPAN_NHC_IPV6=m
++CONFIG_6LOWPAN_NHC_MOBILITY=m
++CONFIG_6LOWPAN_NHC_ROUTING=m
++CONFIG_6LOWPAN_NHC_UDP=m
++# CONFIG_6LOWPAN_GHC_EXT_HDR_HOP is not set
++# CONFIG_6LOWPAN_GHC_UDP is not set
++# CONFIG_6LOWPAN_GHC_ICMPV6 is not set
++# CONFIG_6LOWPAN_GHC_EXT_HDR_DEST is not set
++# CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG is not set
++# CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE is not set
++CONFIG_IEEE802154=m
++CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y
++CONFIG_IEEE802154_SOCKET=m
++CONFIG_IEEE802154_6LOWPAN=m
++CONFIG_MAC802154=m
++CONFIG_NET_SCHED=y
++
++#
++# Queueing/Scheduling
++#
++CONFIG_NET_SCH_CBQ=m
++CONFIG_NET_SCH_HTB=m
++CONFIG_NET_SCH_HFSC=m
++CONFIG_NET_SCH_ATM=m
++CONFIG_NET_SCH_PRIO=m
++CONFIG_NET_SCH_MULTIQ=m
++CONFIG_NET_SCH_RED=m
++CONFIG_NET_SCH_SFB=m
++CONFIG_NET_SCH_SFQ=m
++CONFIG_NET_SCH_TEQL=m
++CONFIG_NET_SCH_TBF=m
++# CONFIG_NET_SCH_CBS is not set
++CONFIG_NET_SCH_GRED=m
++CONFIG_NET_SCH_DSMARK=m
++CONFIG_NET_SCH_NETEM=m
++CONFIG_NET_SCH_DRR=m
++CONFIG_NET_SCH_MQPRIO=m
++CONFIG_NET_SCH_CHOKE=m
++CONFIG_NET_SCH_QFQ=m
++CONFIG_NET_SCH_CODEL=m
++CONFIG_NET_SCH_FQ_CODEL=m
++CONFIG_NET_SCH_FQ=m
++CONFIG_NET_SCH_HHF=m
++CONFIG_NET_SCH_PIE=m
++CONFIG_NET_SCH_INGRESS=m
++CONFIG_NET_SCH_PLUG=m
++# CONFIG_NET_SCH_DEFAULT is not set
++
++#
++# Classification
++#
++CONFIG_NET_CLS=y
++CONFIG_NET_CLS_BASIC=m
++CONFIG_NET_CLS_TCINDEX=m
++CONFIG_NET_CLS_ROUTE4=m
++CONFIG_NET_CLS_FW=m
++CONFIG_NET_CLS_U32=m
++# CONFIG_CLS_U32_PERF is not set
++CONFIG_CLS_U32_MARK=y
++CONFIG_NET_CLS_RSVP=m
++CONFIG_NET_CLS_RSVP6=m
++CONFIG_NET_CLS_FLOW=m
++CONFIG_NET_CLS_CGROUP=m
++CONFIG_NET_CLS_BPF=m
++CONFIG_NET_CLS_FLOWER=m
++# CONFIG_NET_CLS_MATCHALL is not set
++CONFIG_NET_EMATCH=y
++CONFIG_NET_EMATCH_STACK=32
++CONFIG_NET_EMATCH_CMP=m
++CONFIG_NET_EMATCH_NBYTE=m
++CONFIG_NET_EMATCH_U32=m
++CONFIG_NET_EMATCH_META=m
++CONFIG_NET_EMATCH_TEXT=m
++CONFIG_NET_EMATCH_CANID=m
++CONFIG_NET_EMATCH_IPSET=m
++CONFIG_NET_CLS_ACT=y
++CONFIG_NET_ACT_POLICE=m
++CONFIG_NET_ACT_GACT=m
++CONFIG_GACT_PROB=y
++CONFIG_NET_ACT_MIRRED=m
++# CONFIG_NET_ACT_SAMPLE is not set
++CONFIG_NET_ACT_IPT=m
++CONFIG_NET_ACT_NAT=m
++CONFIG_NET_ACT_PEDIT=m
++CONFIG_NET_ACT_SIMP=m
++CONFIG_NET_ACT_SKBEDIT=m
++CONFIG_NET_ACT_CSUM=m
++CONFIG_NET_ACT_VLAN=m
++CONFIG_NET_ACT_BPF=m
++CONFIG_NET_ACT_CONNMARK=m
++# CONFIG_NET_ACT_SKBMOD is not set
++# CONFIG_NET_ACT_IFE is not set
++# CONFIG_NET_ACT_TUNNEL_KEY is not set
++# CONFIG_NET_CLS_IND is not set
++CONFIG_NET_SCH_FIFO=y
++CONFIG_DCB=y
++CONFIG_DNS_RESOLVER=y
++CONFIG_BATMAN_ADV=m
++# CONFIG_BATMAN_ADV_BATMAN_V is not set
++CONFIG_BATMAN_ADV_BLA=y
++CONFIG_BATMAN_ADV_DAT=y
++CONFIG_BATMAN_ADV_NC=y
++CONFIG_BATMAN_ADV_MCAST=y
++CONFIG_BATMAN_ADV_DEBUGFS=y
++# CONFIG_BATMAN_ADV_DEBUG is not set
++CONFIG_OPENVSWITCH=m
++CONFIG_OPENVSWITCH_GRE=m
++CONFIG_OPENVSWITCH_VXLAN=m
++CONFIG_OPENVSWITCH_GENEVE=m
++CONFIG_VSOCKETS=m
++CONFIG_VSOCKETS_DIAG=m
++CONFIG_VMWARE_VMCI_VSOCKETS=m
++# CONFIG_VIRTIO_VSOCKETS is not set
++# CONFIG_HYPERV_VSOCKETS is not set
++CONFIG_NETLINK_DIAG=m
++CONFIG_MPLS=y
++CONFIG_NET_MPLS_GSO=m
++CONFIG_MPLS_ROUTING=m
++CONFIG_MPLS_IPTUNNEL=m
++CONFIG_NET_NSH=m
++CONFIG_HSR=m
++# CONFIG_NET_SWITCHDEV is not set
++CONFIG_NET_L3_MASTER_DEV=y
++# CONFIG_NET_NCSI is not set
++CONFIG_RPS=y
++CONFIG_RFS_ACCEL=y
++CONFIG_XPS=y
++CONFIG_CGROUP_NET_PRIO=y
++CONFIG_CGROUP_NET_CLASSID=y
++CONFIG_NET_RX_BUSY_POLL=y
++CONFIG_BQL=y
++CONFIG_BPF_JIT=y
++# CONFIG_BPF_STREAM_PARSER is not set
++CONFIG_NET_FLOW_LIMIT=y
++
++#
++# Network testing
++#
++CONFIG_NET_PKTGEN=m
++# CONFIG_NET_DROP_MONITOR is not set
++CONFIG_HAMRADIO=y
++
++#
++# Packet Radio protocols
++#
++CONFIG_AX25=m
++CONFIG_AX25_DAMA_SLAVE=y
++CONFIG_NETROM=m
++CONFIG_ROSE=m
++
++#
++# AX.25 network device drivers
++#
++CONFIG_MKISS=m
++CONFIG_6PACK=m
++CONFIG_BPQETHER=m
++CONFIG_BAYCOM_SER_FDX=m
++CONFIG_BAYCOM_SER_HDX=m
++CONFIG_BAYCOM_PAR=m
++CONFIG_YAM=m
++CONFIG_CAN=m
++CONFIG_CAN_RAW=m
++CONFIG_CAN_BCM=m
++CONFIG_CAN_GW=m
++
++#
++# CAN Device Drivers
++#
++CONFIG_CAN_VCAN=m
++# CONFIG_CAN_VXCAN is not set
++CONFIG_CAN_SLCAN=m
++CONFIG_CAN_DEV=m
++CONFIG_CAN_CALC_BITTIMING=y
++CONFIG_CAN_LEDS=y
++CONFIG_CAN_JANZ_ICAN3=m
++CONFIG_CAN_C_CAN=m
++CONFIG_CAN_C_CAN_PLATFORM=m
++CONFIG_CAN_C_CAN_PCI=m
++CONFIG_CAN_CC770=m
++CONFIG_CAN_CC770_ISA=m
++CONFIG_CAN_CC770_PLATFORM=m
++# CONFIG_CAN_IFI_CANFD is not set
++CONFIG_CAN_M_CAN=m
++# CONFIG_CAN_PEAK_PCIEFD is not set
++CONFIG_CAN_SJA1000=m
++CONFIG_CAN_SJA1000_ISA=m
++CONFIG_CAN_SJA1000_PLATFORM=m
++CONFIG_CAN_EMS_PCMCIA=m
++CONFIG_CAN_EMS_PCI=m
++CONFIG_CAN_PEAK_PCMCIA=m
++CONFIG_CAN_PEAK_PCI=m
++CONFIG_CAN_PEAK_PCIEC=y
++CONFIG_CAN_KVASER_PCI=m
++CONFIG_CAN_PLX_PCI=m
++CONFIG_CAN_SOFTING=m
++CONFIG_CAN_SOFTING_CS=m
++
++#
++# CAN SPI interfaces
++#
++# CONFIG_CAN_HI311X is not set
++CONFIG_CAN_MCP251X=m
++
++#
++# CAN USB interfaces
++#
++CONFIG_CAN_EMS_USB=m
++CONFIG_CAN_ESD_USB2=m
++CONFIG_CAN_GS_USB=m
++CONFIG_CAN_KVASER_USB=m
++CONFIG_CAN_PEAK_USB=m
++CONFIG_CAN_8DEV_USB=m
++# CONFIG_CAN_MCBA_USB is not set
++# CONFIG_CAN_DEBUG_DEVICES is not set
++CONFIG_BT=m
++CONFIG_BT_BREDR=y
++CONFIG_BT_RFCOMM=m
++CONFIG_BT_RFCOMM_TTY=y
++CONFIG_BT_BNEP=m
++CONFIG_BT_BNEP_MC_FILTER=y
++CONFIG_BT_BNEP_PROTO_FILTER=y
++CONFIG_BT_CMTP=m
++CONFIG_BT_HIDP=m
++CONFIG_BT_HS=y
++CONFIG_BT_LE=y
++CONFIG_BT_6LOWPAN=m
++# CONFIG_BT_LEDS is not set
++# CONFIG_BT_SELFTEST is not set
++CONFIG_BT_DEBUGFS=y
++
++#
++# Bluetooth device drivers
++#
++CONFIG_BT_INTEL=m
++CONFIG_BT_BCM=m
++CONFIG_BT_RTL=m
++CONFIG_BT_QCA=m
++CONFIG_BT_HCIBTUSB=m
++# CONFIG_BT_HCIBTUSB_AUTOSUSPEND is not set
++CONFIG_BT_HCIBTUSB_BCM=y
++CONFIG_BT_HCIBTUSB_RTL=y
++CONFIG_BT_HCIBTSDIO=m
++CONFIG_BT_HCIUART=m
++CONFIG_BT_HCIUART_H4=y
++CONFIG_BT_HCIUART_BCSP=y
++CONFIG_BT_HCIUART_ATH3K=y
++CONFIG_BT_HCIUART_3WIRE=y
++CONFIG_BT_HCIUART_INTEL=y
++CONFIG_BT_HCIUART_QCA=y
++# CONFIG_BT_HCIUART_AG6XX is not set
++# CONFIG_BT_HCIUART_MRVL is not set
++CONFIG_BT_HCIBCM203X=m
++CONFIG_BT_HCIBPA10X=m
++CONFIG_BT_HCIBFUSB=m
++CONFIG_BT_HCIDTL1=m
++CONFIG_BT_HCIBT3C=m
++CONFIG_BT_HCIBLUECARD=m
++CONFIG_BT_HCIBTUART=m
++CONFIG_BT_HCIVHCI=m
++CONFIG_BT_MRVL=m
++CONFIG_BT_MRVL_SDIO=m
++CONFIG_BT_ATH3K=m
++CONFIG_BT_WILINK=m
++CONFIG_AF_RXRPC=m
++# CONFIG_AF_RXRPC_IPV6 is not set
++# CONFIG_AF_RXRPC_INJECT_LOSS is not set
++# CONFIG_AF_RXRPC_DEBUG is not set
++# CONFIG_RXKAD is not set
++# CONFIG_AF_KCM is not set
++# CONFIG_STREAM_PARSER is not set
++CONFIG_FIB_RULES=y
++CONFIG_WIRELESS=y
++CONFIG_WIRELESS_EXT=y
++CONFIG_WEXT_CORE=y
++CONFIG_WEXT_PROC=y
++CONFIG_WEXT_SPY=y
++CONFIG_WEXT_PRIV=y
++CONFIG_CFG80211=m
++# CONFIG_NL80211_TESTMODE is not set
++# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
++# CONFIG_CFG80211_CERTIFICATION_ONUS is not set
++CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y
++CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y
++CONFIG_CFG80211_DEFAULT_PS=y
++CONFIG_CFG80211_DEBUGFS=y
++CONFIG_CFG80211_CRDA_SUPPORT=y
++CONFIG_CFG80211_WEXT=y
++CONFIG_CFG80211_WEXT_EXPORT=y
++CONFIG_LIB80211=m
++CONFIG_LIB80211_CRYPT_WEP=m
++CONFIG_LIB80211_CRYPT_CCMP=m
++CONFIG_LIB80211_CRYPT_TKIP=m
++# CONFIG_LIB80211_DEBUG is not set
++CONFIG_MAC80211=m
++CONFIG_MAC80211_HAS_RC=y
++CONFIG_MAC80211_RC_MINSTREL=y
++CONFIG_MAC80211_RC_MINSTREL_HT=y
++CONFIG_MAC80211_RC_MINSTREL_VHT=y
++CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
++CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
++CONFIG_MAC80211_MESH=y
++CONFIG_MAC80211_LEDS=y
++CONFIG_MAC80211_DEBUGFS=y
++CONFIG_MAC80211_MESSAGE_TRACING=y
++# CONFIG_MAC80211_DEBUG_MENU is not set
++CONFIG_MAC80211_STA_HASH_MAX_SIZE=0
++CONFIG_WIMAX=m
++CONFIG_WIMAX_DEBUG_LEVEL=8
++CONFIG_RFKILL=y
++CONFIG_RFKILL_LEDS=y
++CONFIG_RFKILL_INPUT=y
++CONFIG_RFKILL_GPIO=m
++CONFIG_NET_9P=m
++CONFIG_NET_9P_VIRTIO=m
++# CONFIG_NET_9P_XEN is not set
++CONFIG_NET_9P_RDMA=m
++# CONFIG_NET_9P_DEBUG is not set
++CONFIG_CAIF=m
++# CONFIG_CAIF_DEBUG is not set
++CONFIG_CAIF_NETDEV=m
++CONFIG_CAIF_USB=m
++CONFIG_CEPH_LIB=m
++# CONFIG_CEPH_LIB_PRETTYDEBUG is not set
++CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y
++CONFIG_NFC=m
++CONFIG_NFC_DIGITAL=m
++CONFIG_NFC_NCI=m
++CONFIG_NFC_NCI_SPI=m
++CONFIG_NFC_NCI_UART=m
++CONFIG_NFC_HCI=m
++CONFIG_NFC_SHDLC=y
++
++#
++# Near Field Communication (NFC) devices
++#
++CONFIG_NFC_TRF7970A=m
++CONFIG_NFC_MEI_PHY=m
++CONFIG_NFC_SIM=m
++CONFIG_NFC_PORT100=m
++CONFIG_NFC_FDP=m
++CONFIG_NFC_FDP_I2C=m
++CONFIG_NFC_PN544=m
++CONFIG_NFC_PN544_I2C=m
++CONFIG_NFC_PN544_MEI=m
++# CONFIG_NFC_PN533_USB is not set
++# CONFIG_NFC_PN533_I2C is not set
++CONFIG_NFC_MICROREAD=m
++CONFIG_NFC_MICROREAD_I2C=m
++CONFIG_NFC_MICROREAD_MEI=m
++CONFIG_NFC_MRVL=m
++CONFIG_NFC_MRVL_USB=m
++CONFIG_NFC_MRVL_UART=m
++CONFIG_NFC_MRVL_I2C=m
++CONFIG_NFC_MRVL_SPI=m
++CONFIG_NFC_ST21NFCA=m
++CONFIG_NFC_ST21NFCA_I2C=m
++CONFIG_NFC_ST_NCI=m
++CONFIG_NFC_ST_NCI_I2C=m
++CONFIG_NFC_ST_NCI_SPI=m
++CONFIG_NFC_NXP_NCI=m
++CONFIG_NFC_NXP_NCI_I2C=m
++CONFIG_NFC_S3FWRN5=m
++CONFIG_NFC_S3FWRN5_I2C=m
++# CONFIG_NFC_ST95HF is not set
++# CONFIG_PSAMPLE is not set
++# CONFIG_NET_IFE is not set
++CONFIG_LWTUNNEL=y
++CONFIG_LWTUNNEL_BPF=y
++CONFIG_DST_CACHE=y
++CONFIG_GRO_CELLS=y
++# CONFIG_NET_DEVLINK is not set
++CONFIG_MAY_USE_DEVLINK=y
++CONFIG_HAVE_EBPF_JIT=y
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER=y
++CONFIG_UEVENT_HELPER_PATH=""
++CONFIG_DEVTMPFS=y
++CONFIG_DEVTMPFS_MOUNT=y
++# CONFIG_STANDALONE is not set
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=y
++CONFIG_EXTRA_FIRMWARE=""
++CONFIG_FW_LOADER_USER_HELPER=y
++# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set
++CONFIG_WANT_DEV_COREDUMP=y
++CONFIG_ALLOW_DEV_COREDUMP=y
++CONFIG_DEV_COREDUMP=y
++# CONFIG_DEBUG_DRIVER is not set
++# CONFIG_DEBUG_DEVRES is not set
++# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set
++# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set
++CONFIG_SYS_HYPERVISOR=y
++# CONFIG_GENERIC_CPU_DEVICES is not set
++CONFIG_GENERIC_CPU_AUTOPROBE=y
++CONFIG_GENERIC_CPU_VULNERABILITIES=y
++CONFIG_REGMAP=y
++CONFIG_REGMAP_I2C=y
++CONFIG_REGMAP_SPI=y
++CONFIG_REGMAP_SPMI=m
++CONFIG_REGMAP_MMIO=y
++CONFIG_REGMAP_IRQ=y
++CONFIG_DMA_SHARED_BUFFER=y
++# CONFIG_DMA_FENCE_TRACE is not set
++# CONFIG_DMA_CMA is not set
++
++#
++# Bus devices
++#
++CONFIG_CONNECTOR=y
++CONFIG_PROC_EVENTS=y
++CONFIG_MTD=m
++# CONFIG_MTD_TESTS is not set
++CONFIG_MTD_REDBOOT_PARTS=m
++CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
++# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
++# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
++CONFIG_MTD_CMDLINE_PARTS=m
++CONFIG_MTD_AR7_PARTS=m
++
++#
++# Partition parsers
++#
++
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_BLKDEVS=m
++CONFIG_MTD_BLOCK=m
++CONFIG_MTD_BLOCK_RO=m
++CONFIG_FTL=m
++CONFIG_NFTL=m
++CONFIG_NFTL_RW=y
++CONFIG_INFTL=m
++CONFIG_RFD_FTL=m
++CONFIG_SSFDC=m
++CONFIG_SM_FTL=m
++CONFIG_MTD_OOPS=m
++CONFIG_MTD_SWAP=m
++# CONFIG_MTD_PARTITIONED_MASTER is not set
++
++#
++# RAM/ROM/Flash chip drivers
++#
++CONFIG_MTD_CFI=m
++CONFIG_MTD_JEDECPROBE=m
++CONFIG_MTD_GEN_PROBE=m
++# CONFIG_MTD_CFI_ADV_OPTIONS is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++CONFIG_MTD_CFI_INTELEXT=m
++CONFIG_MTD_CFI_AMDSTD=m
++CONFIG_MTD_CFI_STAA=m
++CONFIG_MTD_CFI_UTIL=m
++CONFIG_MTD_RAM=m
++CONFIG_MTD_ROM=m
++CONFIG_MTD_ABSENT=m
++
++#
++# Mapping drivers for chip access
++#
++CONFIG_MTD_COMPLEX_MAPPINGS=y
++CONFIG_MTD_PHYSMAP=m
++# CONFIG_MTD_PHYSMAP_COMPAT is not set
++CONFIG_MTD_SBC_GXX=m
++CONFIG_MTD_AMD76XROM=m
++CONFIG_MTD_ICHXROM=m
++CONFIG_MTD_ESB2ROM=m
++CONFIG_MTD_CK804XROM=m
++CONFIG_MTD_SCB2_FLASH=m
++CONFIG_MTD_NETtel=m
++CONFIG_MTD_L440GX=m
++CONFIG_MTD_PCI=m
++CONFIG_MTD_PCMCIA=m
++# CONFIG_MTD_PCMCIA_ANONYMOUS is not set
++CONFIG_MTD_GPIO_ADDR=m
++CONFIG_MTD_INTEL_VR_NOR=m
++CONFIG_MTD_PLATRAM=m
++CONFIG_MTD_LATCH_ADDR=m
++
++#
++# Self-contained MTD device drivers
++#
++CONFIG_MTD_PMC551=m
++# CONFIG_MTD_PMC551_BUGFIX is not set
++# CONFIG_MTD_PMC551_DEBUG is not set
++CONFIG_MTD_DATAFLASH=m
++# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
++CONFIG_MTD_DATAFLASH_OTP=y
++CONFIG_MTD_M25P80=m
++# CONFIG_MTD_MCHP23K256 is not set
++CONFIG_MTD_SST25L=m
++CONFIG_MTD_SLRAM=m
++CONFIG_MTD_PHRAM=m
++CONFIG_MTD_MTDRAM=m
++CONFIG_MTDRAM_TOTAL_SIZE=4096
++CONFIG_MTDRAM_ERASE_SIZE=128
++CONFIG_MTD_BLOCK2MTD=m
++
++#
++# Disk-On-Chip Device Drivers
++#
++CONFIG_MTD_DOCG3=m
++CONFIG_BCH_CONST_M=14
++CONFIG_BCH_CONST_T=4
++CONFIG_MTD_NAND_ECC=m
++# CONFIG_MTD_NAND_ECC_SMC is not set
++CONFIG_MTD_NAND=m
++CONFIG_MTD_NAND_BCH=m
++CONFIG_MTD_NAND_ECC_BCH=y
++CONFIG_MTD_SM_COMMON=m
++CONFIG_MTD_NAND_DENALI=m
++CONFIG_MTD_NAND_DENALI_PCI=m
++CONFIG_MTD_NAND_GPIO=m
++# CONFIG_MTD_NAND_OMAP_BCH_BUILD is not set
++CONFIG_MTD_NAND_RICOH=m
++CONFIG_MTD_NAND_DISKONCHIP=m
++# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
++CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
++# CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE is not set
++CONFIG_MTD_NAND_DOCG4=m
++CONFIG_MTD_NAND_CAFE=m
++CONFIG_MTD_NAND_NANDSIM=m
++CONFIG_MTD_NAND_PLATFORM=m
++CONFIG_MTD_ONENAND=m
++CONFIG_MTD_ONENAND_VERIFY_WRITE=y
++CONFIG_MTD_ONENAND_GENERIC=m
++# CONFIG_MTD_ONENAND_OTP is not set
++CONFIG_MTD_ONENAND_2X_PROGRAM=y
++
++#
++# LPDDR & LPDDR2 PCM memory drivers
++#
++CONFIG_MTD_LPDDR=m
++CONFIG_MTD_QINFO_PROBE=m
++CONFIG_MTD_SPI_NOR=m
++# CONFIG_MTD_MT81xx_NOR is not set
++CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y
++# CONFIG_SPI_INTEL_SPI_PCI is not set
++# CONFIG_SPI_INTEL_SPI_PLATFORM is not set
++CONFIG_MTD_UBI=m
++CONFIG_MTD_UBI_WL_THRESHOLD=4096
++CONFIG_MTD_UBI_BEB_LIMIT=20
++CONFIG_MTD_UBI_FASTMAP=y
++CONFIG_MTD_UBI_GLUEBI=m
++CONFIG_MTD_UBI_BLOCK=y
++# CONFIG_OF is not set
++CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y
++CONFIG_PARPORT=m
++CONFIG_PARPORT_PC=m
++CONFIG_PARPORT_SERIAL=m
++CONFIG_PARPORT_PC_FIFO=y
++# CONFIG_PARPORT_PC_SUPERIO is not set
++CONFIG_PARPORT_PC_PCMCIA=m
++# CONFIG_PARPORT_GSC is not set
++CONFIG_PARPORT_AX88796=m
++CONFIG_PARPORT_1284=y
++CONFIG_PARPORT_NOT_PC=y
++CONFIG_PNP=y
++# CONFIG_PNP_DEBUG_MESSAGES is not set
++
++#
++# Protocols
++#
++CONFIG_PNPACPI=y
++CONFIG_BLK_DEV=y
++CONFIG_BLK_DEV_NULL_BLK=m
++CONFIG_BLK_DEV_FD=m
++CONFIG_CDROM=y
++CONFIG_PARIDE=m
++
++#
++# Parallel IDE high-level drivers
++#
++CONFIG_PARIDE_PD=m
++CONFIG_PARIDE_PCD=m
++CONFIG_PARIDE_PF=m
++CONFIG_PARIDE_PT=m
++CONFIG_PARIDE_PG=m
++
++#
++# Parallel IDE protocol modules
++#
++CONFIG_PARIDE_ATEN=m
++CONFIG_PARIDE_BPCK=m
++CONFIG_PARIDE_COMM=m
++CONFIG_PARIDE_DSTR=m
++CONFIG_PARIDE_FIT2=m
++CONFIG_PARIDE_FIT3=m
++CONFIG_PARIDE_EPAT=m
++CONFIG_PARIDE_EPATC8=y
++CONFIG_PARIDE_EPIA=m
++CONFIG_PARIDE_FRIQ=m
++CONFIG_PARIDE_FRPW=m
++CONFIG_PARIDE_KBIC=m
++CONFIG_PARIDE_KTTI=m
++CONFIG_PARIDE_ON20=m
++CONFIG_PARIDE_ON26=m
++CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m
++CONFIG_ZRAM=m
++# CONFIG_ZRAM_WRITEBACK is not set
++CONFIG_BLK_DEV_DAC960=m
++CONFIG_BLK_DEV_UMEM=m
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
++CONFIG_BLK_DEV_CRYPTOLOOP=m
++CONFIG_BLK_DEV_DRBD=m
++# CONFIG_DRBD_FAULT_INJECTION is not set
++CONFIG_BLK_DEV_NBD=m
++CONFIG_BLK_DEV_SKD=m
++CONFIG_BLK_DEV_SX8=m
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=65536
++CONFIG_CDROM_PKTCDVD=m
++CONFIG_CDROM_PKTCDVD_BUFFERS=8
++# CONFIG_CDROM_PKTCDVD_WCACHE is not set
++CONFIG_ATA_OVER_ETH=m
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_BLKDEV_BACKEND=m
++CONFIG_VIRTIO_BLK=y
++# CONFIG_VIRTIO_BLK_SCSI is not set
++CONFIG_BLK_DEV_RBD=m
++CONFIG_BLK_DEV_RSXX=m
++
++#
++# NVME Support
++#
++CONFIG_NVME_CORE=y
++CONFIG_BLK_DEV_NVME=y
++# CONFIG_NVME_MULTIPATH is not set
++# CONFIG_NVME_RDMA is not set
++# CONFIG_NVME_FC is not set
++# CONFIG_NVME_TARGET is not set
++
++#
++# Misc devices
++#
++CONFIG_SENSORS_LIS3LV02D=m
++CONFIG_AD525X_DPOT=m
++CONFIG_AD525X_DPOT_I2C=m
++CONFIG_AD525X_DPOT_SPI=m
++CONFIG_DUMMY_IRQ=m
++CONFIG_IBM_ASM=m
++CONFIG_PHANTOM=m
++CONFIG_SGI_IOC4=m
++CONFIG_TIFM_CORE=m
++CONFIG_TIFM_7XX1=m
++CONFIG_ICS932S401=m
++CONFIG_ENCLOSURE_SERVICES=m
++CONFIG_HP_ILO=m
++CONFIG_APDS9802ALS=m
++CONFIG_ISL29003=m
++CONFIG_ISL29020=m
++CONFIG_SENSORS_TSL2550=m
++CONFIG_SENSORS_BH1770=m
++CONFIG_SENSORS_APDS990X=m
++CONFIG_HMC6352=m
++CONFIG_DS1682=m
++CONFIG_VMWARE_BALLOON=m
++CONFIG_USB_SWITCH_FSA9480=m
++CONFIG_LATTICE_ECP3_CONFIG=m
++CONFIG_SRAM=y
++# CONFIG_PCI_ENDPOINT_TEST is not set
++# CONFIG_MISC_RTSX is not set
++CONFIG_C2PORT=m
++CONFIG_C2PORT_DURAMAR_2150=m
++
++#
++# EEPROM support
++#
++CONFIG_EEPROM_AT24=m
++CONFIG_EEPROM_AT25=m
++CONFIG_EEPROM_LEGACY=m
++CONFIG_EEPROM_MAX6875=m
++CONFIG_EEPROM_93CX6=m
++CONFIG_EEPROM_93XX46=m
++# CONFIG_EEPROM_IDT_89HPESX is not set
++CONFIG_CB710_CORE=m
++# CONFIG_CB710_DEBUG is not set
++CONFIG_CB710_DEBUG_ASSUMPTIONS=y
++
++#
++# Texas Instruments shared transport line discipline
++#
++CONFIG_TI_ST=m
++CONFIG_SENSORS_LIS3_I2C=m
++CONFIG_ALTERA_STAPL=m
++CONFIG_INTEL_MEI=m
++CONFIG_INTEL_MEI_ME=m
++CONFIG_INTEL_MEI_TXE=m
++CONFIG_VMWARE_VMCI=m
++
++#
++# Intel MIC & related support
++#
++
++#
++# Intel MIC Bus Driver
++#
++CONFIG_INTEL_MIC_BUS=m
++
++#
++# SCIF Bus Driver
++#
++CONFIG_SCIF_BUS=m
++
++#
++# VOP Bus Driver
++#
++# CONFIG_VOP_BUS is not set
++
++#
++# Intel MIC Host Driver
++#
++
++#
++# Intel MIC Card Driver
++#
++
++#
++# SCIF Driver
++#
++CONFIG_SCIF=m
++
++#
++# Intel MIC Coprocessor State Management (COSM) Drivers
++#
++CONFIG_MIC_COSM=m
++
++#
++# VOP Driver
++#
++CONFIG_VHOST_RING=m
++CONFIG_GENWQE=m
++CONFIG_GENWQE_PLATFORM_ERROR_RECOVERY=0
++CONFIG_ECHO=m
++# CONFIG_CXL_BASE is not set
++# CONFIG_CXL_AFU_DRIVER_OPS is not set
++# CONFIG_CXL_LIB is not set
++# CONFIG_OCXL_BASE is not set
++# CONFIG_MISC_RTSX_PCI is not set
++# CONFIG_MISC_RTSX_USB is not set
++CONFIG_HAVE_IDE=y
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++CONFIG_SCSI_MOD=y
++CONFIG_RAID_ATTRS=m
++CONFIG_SCSI=y
++CONFIG_SCSI_DMA=y
++CONFIG_SCSI_NETLINK=y
++# CONFIG_SCSI_MQ_DEFAULT is not set
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++CONFIG_CHR_DEV_ST=m
++CONFIG_CHR_DEV_OSST=m
++CONFIG_BLK_DEV_SR=y
++# CONFIG_BLK_DEV_SR_VENDOR is not set
++CONFIG_CHR_DEV_SG=y
++CONFIG_CHR_DEV_SCH=m
++CONFIG_SCSI_ENCLOSURE=m
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++CONFIG_SCSI_SCAN_ASYNC=y
++
++#
++# SCSI Transports
++#
++CONFIG_SCSI_SPI_ATTRS=m
++CONFIG_SCSI_FC_ATTRS=m
++CONFIG_SCSI_ISCSI_ATTRS=m
++CONFIG_SCSI_SAS_ATTRS=m
++CONFIG_SCSI_SAS_LIBSAS=m
++CONFIG_SCSI_SAS_ATA=y
++CONFIG_SCSI_SAS_HOST_SMP=y
++CONFIG_SCSI_SRP_ATTRS=m
++CONFIG_SCSI_LOWLEVEL=y
++CONFIG_ISCSI_TCP=m
++CONFIG_ISCSI_BOOT_SYSFS=m
++CONFIG_SCSI_CXGB3_ISCSI=m
++CONFIG_SCSI_CXGB4_ISCSI=m
++CONFIG_SCSI_BNX2_ISCSI=m
++CONFIG_SCSI_BNX2X_FCOE=m
++CONFIG_BE2ISCSI=m
++CONFIG_BLK_DEV_3W_XXXX_RAID=m
++CONFIG_SCSI_HPSA=m
++CONFIG_SCSI_3W_9XXX=m
++CONFIG_SCSI_3W_SAS=m
++CONFIG_SCSI_ACARD=m
++CONFIG_SCSI_AACRAID=m
++CONFIG_SCSI_AIC7XXX=m
++CONFIG_AIC7XXX_CMDS_PER_DEVICE=8
++CONFIG_AIC7XXX_RESET_DELAY_MS=5000
++# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
++CONFIG_AIC7XXX_DEBUG_MASK=0
++CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
++CONFIG_SCSI_AIC79XX=m
++CONFIG_AIC79XX_CMDS_PER_DEVICE=32
++CONFIG_AIC79XX_RESET_DELAY_MS=5000
++# CONFIG_AIC79XX_DEBUG_ENABLE is not set
++CONFIG_AIC79XX_DEBUG_MASK=0
++CONFIG_AIC79XX_REG_PRETTY_PRINT=y
++CONFIG_SCSI_AIC94XX=m
++# CONFIG_AIC94XX_DEBUG is not set
++CONFIG_SCSI_MVSAS=m
++# CONFIG_SCSI_MVSAS_DEBUG is not set
++# CONFIG_SCSI_MVSAS_TASKLET is not set
++CONFIG_SCSI_MVUMI=m
++CONFIG_SCSI_DPT_I2O=m
++CONFIG_SCSI_ADVANSYS=m
++CONFIG_SCSI_ARCMSR=m
++CONFIG_SCSI_ESAS2R=m
++CONFIG_MEGARAID_NEWGEN=y
++CONFIG_MEGARAID_MM=m
++CONFIG_MEGARAID_MAILBOX=m
++CONFIG_MEGARAID_LEGACY=m
++CONFIG_MEGARAID_SAS=m
++CONFIG_SCSI_MPT3SAS=m
++CONFIG_SCSI_MPT2SAS_MAX_SGE=128
++CONFIG_SCSI_MPT3SAS_MAX_SGE=128
++CONFIG_SCSI_MPT2SAS=m
++# CONFIG_SCSI_SMARTPQI is not set
++CONFIG_SCSI_UFSHCD=m
++CONFIG_SCSI_UFSHCD_PCI=m
++# CONFIG_SCSI_UFS_DWC_TC_PCI is not set
++CONFIG_SCSI_UFSHCD_PLATFORM=m
++# CONFIG_SCSI_UFS_DWC_TC_PLATFORM is not set
++CONFIG_SCSI_HPTIOP=m
++CONFIG_SCSI_BUSLOGIC=m
++CONFIG_SCSI_FLASHPOINT=y
++CONFIG_VMWARE_PVSCSI=m
++CONFIG_XEN_SCSI_FRONTEND=m
++CONFIG_HYPERV_STORAGE=m
++CONFIG_LIBFC=m
++CONFIG_LIBFCOE=m
++CONFIG_FCOE=m
++CONFIG_FCOE_FNIC=m
++CONFIG_SCSI_SNIC=m
++# CONFIG_SCSI_SNIC_DEBUG_FS is not set
++CONFIG_SCSI_DMX3191D=m
++CONFIG_SCSI_EATA=m
++CONFIG_SCSI_EATA_TAGGED_QUEUE=y
++CONFIG_SCSI_EATA_LINKED_COMMANDS=y
++CONFIG_SCSI_EATA_MAX_TAGS=16
++CONFIG_SCSI_FUTURE_DOMAIN=m
++CONFIG_SCSI_GDTH=m
++CONFIG_SCSI_ISCI=m
++CONFIG_SCSI_IPS=m
++CONFIG_SCSI_INITIO=m
++CONFIG_SCSI_INIA100=m
++CONFIG_SCSI_PPA=m
++CONFIG_SCSI_IMM=m
++# CONFIG_SCSI_IZIP_EPP16 is not set
++# CONFIG_SCSI_IZIP_SLOW_CTR is not set
++CONFIG_SCSI_STEX=m
++CONFIG_SCSI_SYM53C8XX_2=m
++CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
++CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
++CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
++CONFIG_SCSI_SYM53C8XX_MMIO=y
++CONFIG_SCSI_IPR=m
++CONFIG_SCSI_IPR_TRACE=y
++CONFIG_SCSI_IPR_DUMP=y
++CONFIG_SCSI_QLOGIC_1280=m
++CONFIG_SCSI_QLA_FC=m
++CONFIG_TCM_QLA2XXX=m
++# CONFIG_TCM_QLA2XXX_DEBUG is not set
++CONFIG_SCSI_QLA_ISCSI=m
++# CONFIG_QEDI is not set
++# CONFIG_QEDF is not set
++CONFIG_SCSI_LPFC=m
++# CONFIG_SCSI_LPFC_DEBUG_FS is not set
++CONFIG_SCSI_DC395x=m
++CONFIG_SCSI_AM53C974=m
++CONFIG_SCSI_WD719X=m
++CONFIG_SCSI_DEBUG=m
++CONFIG_SCSI_PMCRAID=m
++CONFIG_SCSI_PM8001=m
++CONFIG_SCSI_BFA_FC=m
++CONFIG_SCSI_VIRTIO=m
++CONFIG_SCSI_CHELSIO_FCOE=m
++CONFIG_SCSI_LOWLEVEL_PCMCIA=y
++CONFIG_PCMCIA_AHA152X=m
++CONFIG_PCMCIA_FDOMAIN=m
++CONFIG_PCMCIA_QLOGIC=m
++CONFIG_PCMCIA_SYM53C500=m
++CONFIG_SCSI_DH=y
++CONFIG_SCSI_DH_RDAC=m
++CONFIG_SCSI_DH_HP_SW=m
++CONFIG_SCSI_DH_EMC=m
++CONFIG_SCSI_DH_ALUA=m
++CONFIG_SCSI_OSD_INITIATOR=m
++CONFIG_SCSI_OSD_ULD=m
++CONFIG_SCSI_OSD_DPRINT_SENSE=1
++# CONFIG_SCSI_OSD_DEBUG is not set
++CONFIG_ATA=y
++# CONFIG_ATA_NONSTANDARD is not set
++CONFIG_ATA_VERBOSE_ERROR=y
++CONFIG_ATA_ACPI=y
++CONFIG_SATA_ZPODD=y
++CONFIG_SATA_PMP=y
++
++#
++# Controllers with non-SFF native interface
++#
++CONFIG_SATA_AHCI=m
++CONFIG_SATA_MOBILE_LPM_POLICY=0
++CONFIG_SATA_AHCI_PLATFORM=m
++CONFIG_SATA_INIC162X=m
++CONFIG_SATA_ACARD_AHCI=m
++CONFIG_SATA_SIL24=m
++CONFIG_ATA_SFF=y
++
++#
++# SFF controllers with custom DMA interface
++#
++CONFIG_PDC_ADMA=m
++CONFIG_SATA_QSTOR=m
++CONFIG_SATA_SX4=m
++CONFIG_ATA_BMDMA=y
++
++#
++# SATA SFF controllers with BMDMA
++#
++CONFIG_ATA_PIIX=y
++# CONFIG_SATA_DWC is not set
++CONFIG_SATA_MV=m
++CONFIG_SATA_NV=m
++CONFIG_SATA_PROMISE=m
++CONFIG_SATA_SIL=m
++CONFIG_SATA_SIS=m
++CONFIG_SATA_SVW=m
++CONFIG_SATA_ULI=m
++CONFIG_SATA_VIA=m
++CONFIG_SATA_VITESSE=m
++
++#
++# PATA SFF controllers with BMDMA
++#
++CONFIG_PATA_ALI=m
++CONFIG_PATA_AMD=m
++CONFIG_PATA_ARTOP=m
++CONFIG_PATA_ATIIXP=m
++CONFIG_PATA_ATP867X=m
++CONFIG_PATA_CMD64X=m
++CONFIG_PATA_CYPRESS=m
++CONFIG_PATA_EFAR=m
++CONFIG_PATA_HPT366=m
++CONFIG_PATA_HPT37X=m
++CONFIG_PATA_HPT3X2N=m
++CONFIG_PATA_HPT3X3=m
++# CONFIG_PATA_HPT3X3_DMA is not set
++CONFIG_PATA_IT8213=m
++CONFIG_PATA_IT821X=m
++CONFIG_PATA_JMICRON=m
++CONFIG_PATA_MARVELL=m
++CONFIG_PATA_NETCELL=m
++CONFIG_PATA_NINJA32=m
++CONFIG_PATA_NS87415=m
++CONFIG_PATA_OLDPIIX=m
++CONFIG_PATA_OPTIDMA=m
++CONFIG_PATA_PDC2027X=m
++CONFIG_PATA_PDC_OLD=m
++CONFIG_PATA_RADISYS=m
++CONFIG_PATA_RDC=m
++CONFIG_PATA_SCH=m
++CONFIG_PATA_SERVERWORKS=m
++CONFIG_PATA_SIL680=m
++CONFIG_PATA_SIS=y
++CONFIG_PATA_TOSHIBA=m
++CONFIG_PATA_TRIFLEX=m
++CONFIG_PATA_VIA=m
++CONFIG_PATA_WINBOND=m
++
++#
++# PIO-only SFF controllers
++#
++CONFIG_PATA_CMD640_PCI=m
++CONFIG_PATA_MPIIX=m
++CONFIG_PATA_NS87410=m
++CONFIG_PATA_OPTI=m
++CONFIG_PATA_PCMCIA=m
++CONFIG_PATA_PLATFORM=m
++CONFIG_PATA_RZ1000=m
++
++#
++# Generic fallback / legacy drivers
++#
++CONFIG_PATA_ACPI=m
++CONFIG_ATA_GENERIC=y
++CONFIG_PATA_LEGACY=m
++CONFIG_MD=y
++CONFIG_BLK_DEV_MD=y
++CONFIG_MD_AUTODETECT=y
++CONFIG_MD_LINEAR=m
++CONFIG_MD_RAID0=m
++CONFIG_MD_RAID1=m
++CONFIG_MD_RAID10=m
++CONFIG_MD_RAID456=m
++CONFIG_MD_MULTIPATH=m
++CONFIG_MD_FAULTY=m
++CONFIG_MD_CLUSTER=m
++CONFIG_BCACHE=m
++# CONFIG_BCACHE_DEBUG is not set
++# CONFIG_BCACHE_CLOSURES_DEBUG is not set
++CONFIG_BLK_DEV_DM_BUILTIN=y
++CONFIG_BLK_DEV_DM=y
++# CONFIG_DM_MQ_DEFAULT is not set
++# CONFIG_DM_DEBUG is not set
++CONFIG_DM_BUFIO=m
++# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set
++CONFIG_DM_BIO_PRISON=m
++CONFIG_DM_PERSISTENT_DATA=m
++# CONFIG_DM_UNSTRIPED is not set
++CONFIG_DM_CRYPT=m
++CONFIG_DM_SNAPSHOT=m
++CONFIG_DM_THIN_PROVISIONING=m
++CONFIG_DM_CACHE=m
++CONFIG_DM_CACHE_SMQ=m
++CONFIG_DM_ERA=m
++CONFIG_DM_MIRROR=m
++CONFIG_DM_LOG_USERSPACE=m
++CONFIG_DM_RAID=m
++CONFIG_DM_ZERO=m
++CONFIG_DM_MULTIPATH=m
++CONFIG_DM_MULTIPATH_QL=m
++CONFIG_DM_MULTIPATH_ST=m
++CONFIG_DM_DELAY=m
++CONFIG_DM_UEVENT=y
++CONFIG_DM_FLAKEY=m
++CONFIG_DM_VERITY=m
++# CONFIG_DM_VERITY_FEC is not set
++CONFIG_DM_SWITCH=m
++CONFIG_DM_LOG_WRITES=m
++# CONFIG_DM_INTEGRITY is not set
++CONFIG_TARGET_CORE=m
++CONFIG_TCM_IBLOCK=m
++CONFIG_TCM_FILEIO=m
++CONFIG_TCM_PSCSI=m
++CONFIG_TCM_USER2=m
++CONFIG_LOOPBACK_TARGET=m
++CONFIG_TCM_FC=m
++CONFIG_ISCSI_TARGET=m
++# CONFIG_ISCSI_TARGET_CXGB4 is not set
++CONFIG_SBP_TARGET=m
++CONFIG_FUSION=y
++CONFIG_FUSION_SPI=m
++CONFIG_FUSION_FC=m
++CONFIG_FUSION_SAS=m
++CONFIG_FUSION_MAX_SGE=128
++CONFIG_FUSION_CTL=m
++CONFIG_FUSION_LAN=m
++CONFIG_FUSION_LOGGING=y
++
++#
++# IEEE 1394 (FireWire) support
++#
++CONFIG_FIREWIRE=m
++CONFIG_FIREWIRE_OHCI=m
++CONFIG_FIREWIRE_SBP2=m
++CONFIG_FIREWIRE_NET=m
++CONFIG_FIREWIRE_NOSY=m
++CONFIG_MACINTOSH_DRIVERS=y
++CONFIG_MAC_EMUMOUSEBTN=m
++CONFIG_NETDEVICES=y
++CONFIG_MII=m
++CONFIG_NET_CORE=y
++CONFIG_BONDING=m
++CONFIG_DUMMY=m
++CONFIG_EQUALIZER=m
++CONFIG_NET_FC=y
++CONFIG_IFB=m
++CONFIG_NET_TEAM=m
++CONFIG_NET_TEAM_MODE_BROADCAST=m
++CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
++CONFIG_NET_TEAM_MODE_RANDOM=m
++CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
++CONFIG_NET_TEAM_MODE_LOADBALANCE=m
++CONFIG_MACVLAN=m
++CONFIG_MACVTAP=m
++CONFIG_IPVLAN=m
++# CONFIG_IPVTAP is not set
++CONFIG_VXLAN=m
++CONFIG_GENEVE=m
++# CONFIG_GTP is not set
++# CONFIG_MACSEC is not set
++CONFIG_NETCONSOLE=y
++CONFIG_NETPOLL=y
++CONFIG_NET_POLL_CONTROLLER=y
++CONFIG_NTB_NETDEV=m
++CONFIG_RIONET=m
++CONFIG_RIONET_TX_SIZE=128
++CONFIG_RIONET_RX_SIZE=128
++CONFIG_TUN=y
++CONFIG_TAP=m
++# CONFIG_TUN_VNET_CROSS_LE is not set
++CONFIG_VETH=m
++CONFIG_VIRTIO_NET=y
++CONFIG_NLMON=m
++CONFIG_NET_VRF=m
++CONFIG_SUNGEM_PHY=m
++CONFIG_ARCNET=m
++CONFIG_ARCNET_1201=m
++CONFIG_ARCNET_1051=m
++CONFIG_ARCNET_RAW=m
++CONFIG_ARCNET_CAP=m
++CONFIG_ARCNET_COM90xx=m
++CONFIG_ARCNET_COM90xxIO=m
++CONFIG_ARCNET_RIM_I=m
++CONFIG_ARCNET_COM20020=m
++CONFIG_ARCNET_COM20020_PCI=m
++CONFIG_ARCNET_COM20020_CS=m
++CONFIG_ATM_DRIVERS=y
++CONFIG_ATM_DUMMY=m
++CONFIG_ATM_TCP=m
++CONFIG_ATM_LANAI=m
++CONFIG_ATM_ENI=m
++# CONFIG_ATM_ENI_DEBUG is not set
++# CONFIG_ATM_ENI_TUNE_BURST is not set
++CONFIG_ATM_FIRESTREAM=m
++CONFIG_ATM_ZATM=m
++# CONFIG_ATM_ZATM_DEBUG is not set
++CONFIG_ATM_NICSTAR=m
++# CONFIG_ATM_NICSTAR_USE_SUNI is not set
++# CONFIG_ATM_NICSTAR_USE_IDT77105 is not set
++CONFIG_ATM_IDT77252=m
++# CONFIG_ATM_IDT77252_DEBUG is not set
++# CONFIG_ATM_IDT77252_RCV_ALL is not set
++CONFIG_ATM_IDT77252_USE_SUNI=y
++CONFIG_ATM_AMBASSADOR=m
++# CONFIG_ATM_AMBASSADOR_DEBUG is not set
++CONFIG_ATM_HORIZON=m
++# CONFIG_ATM_HORIZON_DEBUG is not set
++CONFIG_ATM_IA=m
++# CONFIG_ATM_IA_DEBUG is not set
++CONFIG_ATM_FORE200E=m
++# CONFIG_ATM_FORE200E_USE_TASKLET is not set
++CONFIG_ATM_FORE200E_TX_RETRY=16
++CONFIG_ATM_FORE200E_DEBUG=0
++CONFIG_ATM_HE=m
++CONFIG_ATM_HE_USE_SUNI=y
++CONFIG_ATM_SOLOS=m
++
++#
++# CAIF transport drivers
++#
++CONFIG_CAIF_TTY=m
++CONFIG_CAIF_SPI_SLAVE=m
++# CONFIG_CAIF_SPI_SYNC is not set
++CONFIG_CAIF_HSI=m
++CONFIG_CAIF_VIRTIO=m
++
++#
++# Distributed Switch Architecture drivers
++#
++CONFIG_ETHERNET=y
++CONFIG_MDIO=m
++CONFIG_NET_VENDOR_3COM=y
++CONFIG_PCMCIA_3C574=m
++CONFIG_PCMCIA_3C589=m
++CONFIG_VORTEX=m
++CONFIG_TYPHOON=m
++CONFIG_NET_VENDOR_ADAPTEC=y
++CONFIG_ADAPTEC_STARFIRE=m
++CONFIG_NET_VENDOR_AGERE=y
++CONFIG_ET131X=m
++CONFIG_NET_VENDOR_ALACRITECH=y
++CONFIG_SLICOSS=m
++CONFIG_NET_VENDOR_ALTEON=y
++CONFIG_ACENIC=m
++# CONFIG_ACENIC_OMIT_TIGON_I is not set
++CONFIG_ALTERA_TSE=m
++CONFIG_NET_VENDOR_AMAZON=y
++# CONFIG_ENA_ETHERNET is not set
++CONFIG_NET_VENDOR_AMD=y
++CONFIG_AMD8111_ETH=m
++CONFIG_PCNET32=m
++CONFIG_PCMCIA_NMCLAN=m
++# CONFIG_AMD_XGBE is not set
++# CONFIG_AMD_XGBE_HAVE_ECC is not set
++CONFIG_NET_VENDOR_AQUANTIA=y
++# CONFIG_AQTION is not set
++CONFIG_NET_VENDOR_ARC=y
++CONFIG_NET_VENDOR_ATHEROS=y
++CONFIG_ATL2=m
++CONFIG_ATL1=m
++CONFIG_ATL1E=m
++CONFIG_ATL1C=m
++CONFIG_ALX=m
++CONFIG_NET_VENDOR_AURORA=y
++CONFIG_AURORA_NB8800=m
++CONFIG_NET_CADENCE=y
++CONFIG_MACB=m
++CONFIG_MACB_USE_HWSTAMP=y
++# CONFIG_MACB_PCI is not set
++CONFIG_NET_VENDOR_BROADCOM=y
++CONFIG_B44=m
++CONFIG_B44_PCI_AUTOSELECT=y
++CONFIG_B44_PCICORE_AUTOSELECT=y
++CONFIG_B44_PCI=y
++CONFIG_BNX2=m
++CONFIG_CNIC=m
++CONFIG_TIGON3=m
++CONFIG_TIGON3_HWMON=y
++CONFIG_BNX2X=m
++CONFIG_BNX2X_SRIOV=y
++CONFIG_BNXT=m
++CONFIG_BNXT_SRIOV=y
++CONFIG_BNXT_FLOWER_OFFLOAD=y
++# CONFIG_BNXT_DCB is not set
++CONFIG_NET_VENDOR_BROCADE=y
++CONFIG_BNA=m
++CONFIG_NET_VENDOR_CAVIUM=y
++CONFIG_THUNDER_NIC_PF=m
++CONFIG_THUNDER_NIC_VF=m
++CONFIG_THUNDER_NIC_BGX=m
++CONFIG_THUNDER_NIC_RGX=m
++CONFIG_CAVIUM_PTP=y
++CONFIG_LIQUIDIO=m
++# CONFIG_LIQUIDIO_VF is not set
++CONFIG_NET_VENDOR_CHELSIO=y
++CONFIG_CHELSIO_T1=m
++CONFIG_CHELSIO_T1_1G=y
++CONFIG_CHELSIO_T3=m
++CONFIG_CHELSIO_T4=m
++CONFIG_CHELSIO_T4_DCB=y
++CONFIG_CHELSIO_T4_FCOE=y
++CONFIG_CHELSIO_T4VF=m
++CONFIG_CHELSIO_LIB=m
++CONFIG_NET_VENDOR_CISCO=y
++CONFIG_ENIC=m
++CONFIG_NET_VENDOR_CORTINA=y
++CONFIG_CX_ECAT=m
++CONFIG_DNET=m
++CONFIG_NET_VENDOR_DEC=y
++CONFIG_NET_TULIP=y
++CONFIG_DE2104X=m
++CONFIG_DE2104X_DSL=0
++CONFIG_TULIP=m
++# CONFIG_TULIP_MWI is not set
++# CONFIG_TULIP_MMIO is not set
++# CONFIG_TULIP_NAPI is not set
++CONFIG_DE4X5=m
++CONFIG_WINBOND_840=m
++CONFIG_DM9102=m
++CONFIG_ULI526X=m
++CONFIG_PCMCIA_XIRCOM=m
++CONFIG_NET_VENDOR_DLINK=y
++CONFIG_DL2K=m
++CONFIG_SUNDANCE=m
++# CONFIG_SUNDANCE_MMIO is not set
++CONFIG_NET_VENDOR_EMULEX=y
++CONFIG_BE2NET=m
++CONFIG_BE2NET_HWMON=y
++CONFIG_NET_VENDOR_EZCHIP=y
++CONFIG_NET_VENDOR_EXAR=y
++CONFIG_S2IO=m
++CONFIG_VXGE=m
++# CONFIG_VXGE_DEBUG_TRACE_ALL is not set
++CONFIG_NET_VENDOR_FUJITSU=y
++CONFIG_PCMCIA_FMVJ18X=m
++CONFIG_NET_VENDOR_HP=y
++CONFIG_HP100=m
++CONFIG_NET_VENDOR_HUAWEI=y
++# CONFIG_HINIC is not set
++CONFIG_NET_VENDOR_INTEL=y
++CONFIG_E100=m
++CONFIG_E1000=m
++CONFIG_E1000E=m
++CONFIG_E1000E_HWTS=y
++CONFIG_IGB=m
++CONFIG_IGB_HWMON=y
++CONFIG_IGB_DCA=y
++CONFIG_IGBVF=m
++CONFIG_IXGB=m
++CONFIG_IXGBE=m
++CONFIG_IXGBE_HWMON=y
++CONFIG_IXGBE_DCA=y
++CONFIG_IXGBE_DCB=y
++CONFIG_IXGBEVF=m
++CONFIG_I40E=m
++CONFIG_I40E_DCB=y
++CONFIG_I40EVF=m
++CONFIG_FM10K=m
++CONFIG_NET_VENDOR_I825XX=y
++CONFIG_JME=m
++CONFIG_NET_VENDOR_MARVELL=y
++CONFIG_MVMDIO=m
++CONFIG_SKGE=m
++# CONFIG_SKGE_DEBUG is not set
++CONFIG_SKGE_GENESIS=y
++CONFIG_SKY2=m
++# CONFIG_SKY2_DEBUG is not set
++CONFIG_NET_VENDOR_MELLANOX=y
++CONFIG_MLX4_EN=m
++CONFIG_MLX4_EN_DCB=y
++CONFIG_MLX4_CORE=m
++CONFIG_MLX4_DEBUG=y
++CONFIG_MLX4_CORE_GEN2=y
++CONFIG_MLX5_CORE=m
++# CONFIG_MLX5_FPGA is not set
++CONFIG_MLX5_CORE_EN=y
++CONFIG_MLX5_MPFS=y
++CONFIG_MLX5_ESWITCH=y
++CONFIG_MLX5_CORE_EN_DCB=y
++# CONFIG_MLX5_CORE_IPOIB is not set
++CONFIG_MLXSW_CORE=m
++CONFIG_MLXSW_CORE_HWMON=y
++CONFIG_MLXSW_CORE_THERMAL=y
++CONFIG_MLXSW_PCI=m
++CONFIG_MLXSW_I2C=m
++CONFIG_MLXSW_MINIMAL=m
++# CONFIG_MLXFW is not set
++CONFIG_NET_VENDOR_MICREL=y
++CONFIG_KS8842=m
++CONFIG_KS8851=m
++CONFIG_KS8851_MLL=m
++CONFIG_KSZ884X_PCI=m
++CONFIG_NET_VENDOR_MICROCHIP=y
++CONFIG_ENC28J60=m
++# CONFIG_ENC28J60_WRITEVERIFY is not set
++CONFIG_ENCX24J600=m
++CONFIG_NET_VENDOR_MYRI=y
++CONFIG_MYRI10GE=m
++CONFIG_MYRI10GE_DCA=y
++CONFIG_FEALNX=m
++CONFIG_NET_VENDOR_NATSEMI=y
++CONFIG_NATSEMI=m
++CONFIG_NS83820=m
++CONFIG_NET_VENDOR_NETRONOME=y
++# CONFIG_NFP is not set
++CONFIG_NET_VENDOR_8390=y
++CONFIG_PCMCIA_AXNET=m
++CONFIG_NE2K_PCI=m
++CONFIG_PCMCIA_PCNET=m
++CONFIG_NET_VENDOR_NVIDIA=y
++CONFIG_FORCEDETH=m
++CONFIG_NET_VENDOR_OKI=y
++CONFIG_ETHOC=m
++CONFIG_NET_PACKET_ENGINE=y
++CONFIG_HAMACHI=m
++CONFIG_YELLOWFIN=m
++CONFIG_NET_VENDOR_QLOGIC=y
++CONFIG_QLA3XXX=m
++CONFIG_QLCNIC=m
++CONFIG_QLCNIC_SRIOV=y
++CONFIG_QLCNIC_DCB=y
++CONFIG_QLCNIC_HWMON=y
++CONFIG_QLGE=m
++CONFIG_NETXEN_NIC=m
++CONFIG_QED=m
++CONFIG_QED_SRIOV=y
++CONFIG_QEDE=m
++CONFIG_NET_VENDOR_QUALCOMM=y
++# CONFIG_QCOM_EMAC is not set
++# CONFIG_RMNET is not set
++CONFIG_NET_VENDOR_REALTEK=y
++CONFIG_ATP=m
++CONFIG_8139CP=m
++CONFIG_8139TOO=m
++CONFIG_8139TOO_PIO=y
++# CONFIG_8139TOO_TUNE_TWISTER is not set
++CONFIG_8139TOO_8129=y
++# CONFIG_8139_OLD_RX_RESET is not set
++CONFIG_R8169=m
++CONFIG_NET_VENDOR_RENESAS=y
++CONFIG_NET_VENDOR_RDC=y
++CONFIG_R6040=m
++CONFIG_NET_VENDOR_ROCKER=y
++CONFIG_NET_VENDOR_SAMSUNG=y
++CONFIG_SXGBE_ETH=m
++CONFIG_NET_VENDOR_SEEQ=y
++CONFIG_NET_VENDOR_SILAN=y
++CONFIG_SC92031=m
++CONFIG_NET_VENDOR_SIS=y
++CONFIG_SIS900=m
++CONFIG_SIS190=m
++CONFIG_NET_VENDOR_SOLARFLARE=y
++CONFIG_SFC=m
++CONFIG_SFC_MTD=y
++CONFIG_SFC_MCDI_MON=y
++CONFIG_SFC_SRIOV=y
++CONFIG_SFC_MCDI_LOGGING=y
++# CONFIG_SFC_FALCON is not set
++CONFIG_NET_VENDOR_SMSC=y
++CONFIG_PCMCIA_SMC91C92=m
++CONFIG_EPIC100=m
++CONFIG_SMSC911X=m
++# CONFIG_SMSC911X_ARCH_HOOKS is not set
++CONFIG_SMSC9420=m
++CONFIG_NET_VENDOR_SOCIONEXT=y
++CONFIG_NET_VENDOR_STMICRO=y
++CONFIG_STMMAC_ETH=m
++CONFIG_STMMAC_PLATFORM=m
++CONFIG_DWMAC_GENERIC=m
++# CONFIG_STMMAC_PCI is not set
++CONFIG_NET_VENDOR_SUN=y
++CONFIG_HAPPYMEAL=m
++CONFIG_SUNGEM=m
++CONFIG_CASSINI=m
++CONFIG_NIU=m
++CONFIG_NET_VENDOR_TEHUTI=y
++CONFIG_TEHUTI=m
++CONFIG_NET_VENDOR_TI=y
++CONFIG_TI_CPSW_ALE=m
++CONFIG_TLAN=m
++CONFIG_NET_VENDOR_VIA=y
++CONFIG_VIA_RHINE=m
++CONFIG_VIA_RHINE_MMIO=y
++CONFIG_VIA_VELOCITY=m
++CONFIG_NET_VENDOR_WIZNET=y
++CONFIG_WIZNET_W5100=m
++CONFIG_WIZNET_W5300=m
++# CONFIG_WIZNET_BUS_DIRECT is not set
++# CONFIG_WIZNET_BUS_INDIRECT is not set
++CONFIG_WIZNET_BUS_ANY=y
++# CONFIG_WIZNET_W5100_SPI is not set
++CONFIG_NET_VENDOR_XIRCOM=y
++CONFIG_PCMCIA_XIRC2PS=m
++CONFIG_NET_VENDOR_SYNOPSYS=y
++# CONFIG_DWC_XLGMAC is not set
++CONFIG_FDDI=y
++CONFIG_DEFXX=m
++# CONFIG_DEFXX_MMIO is not set
++CONFIG_SKFP=m
++# CONFIG_HIPPI is not set
++CONFIG_NET_SB1000=m
++CONFIG_MDIO_DEVICE=y
++CONFIG_MDIO_BUS=y
++CONFIG_MDIO_BITBANG=m
++CONFIG_MDIO_CAVIUM=m
++CONFIG_MDIO_GPIO=m
++CONFIG_MDIO_THUNDER=m
++CONFIG_PHYLIB=y
++CONFIG_SWPHY=y
++# CONFIG_LED_TRIGGER_PHY is not set
++
++#
++# MII PHY device drivers
++#
++CONFIG_AMD_PHY=m
++CONFIG_AQUANTIA_PHY=m
++CONFIG_AT803X_PHY=m
++CONFIG_BCM7XXX_PHY=m
++CONFIG_BCM87XX_PHY=m
++CONFIG_BCM_NET_PHYLIB=m
++CONFIG_BROADCOM_PHY=m
++CONFIG_CICADA_PHY=m
++# CONFIG_CORTINA_PHY is not set
++CONFIG_DAVICOM_PHY=m
++# CONFIG_DP83822_PHY is not set
++CONFIG_DP83848_PHY=m
++CONFIG_DP83867_PHY=m
++CONFIG_FIXED_PHY=y
++CONFIG_ICPLUS_PHY=m
++# CONFIG_INTEL_XWAY_PHY is not set
++CONFIG_LSI_ET1011C_PHY=m
++CONFIG_LXT_PHY=m
++CONFIG_MARVELL_PHY=m
++# CONFIG_MARVELL_10G_PHY is not set
++CONFIG_MICREL_PHY=m
++CONFIG_MICROCHIP_PHY=m
++# CONFIG_MICROSEMI_PHY is not set
++CONFIG_NATIONAL_PHY=m
++CONFIG_QSEMI_PHY=m
++CONFIG_REALTEK_PHY=m
++# CONFIG_RENESAS_PHY is not set
++# CONFIG_ROCKCHIP_PHY is not set
++CONFIG_SMSC_PHY=m
++CONFIG_STE10XP=m
++CONFIG_TERANETICS_PHY=m
++CONFIG_VITESSE_PHY=m
++# CONFIG_XILINX_GMII2RGMII is not set
++CONFIG_MICREL_KS8995MA=m
++CONFIG_PLIP=m
++CONFIG_PPP=y
++CONFIG_PPP_BSDCOMP=m
++CONFIG_PPP_DEFLATE=m
++CONFIG_PPP_FILTER=y
++CONFIG_PPP_MPPE=m
++CONFIG_PPP_MULTILINK=y
++CONFIG_PPPOATM=m
++CONFIG_PPPOE=m
++CONFIG_PPTP=m
++CONFIG_PPPOL2TP=m
++CONFIG_PPP_ASYNC=m
++CONFIG_PPP_SYNC_TTY=m
++CONFIG_SLIP=m
++CONFIG_SLHC=y
++CONFIG_SLIP_COMPRESSED=y
++CONFIG_SLIP_SMART=y
++CONFIG_SLIP_MODE_SLIP6=y
++CONFIG_USB_NET_DRIVERS=m
++CONFIG_USB_CATC=m
++CONFIG_USB_KAWETH=m
++CONFIG_USB_PEGASUS=m
++CONFIG_USB_RTL8150=m
++CONFIG_USB_RTL8152=m
++CONFIG_USB_LAN78XX=m
++CONFIG_USB_USBNET=m
++CONFIG_USB_NET_AX8817X=m
++CONFIG_USB_NET_AX88179_178A=m
++CONFIG_USB_NET_CDCETHER=m
++CONFIG_USB_NET_CDC_EEM=m
++CONFIG_USB_NET_CDC_NCM=m
++CONFIG_USB_NET_HUAWEI_CDC_NCM=m
++CONFIG_USB_NET_CDC_MBIM=m
++CONFIG_USB_NET_DM9601=m
++CONFIG_USB_NET_SR9700=m
++CONFIG_USB_NET_SR9800=m
++CONFIG_USB_NET_SMSC75XX=m
++CONFIG_USB_NET_SMSC95XX=m
++CONFIG_USB_NET_GL620A=m
++CONFIG_USB_NET_NET1080=m
++CONFIG_USB_NET_PLUSB=m
++CONFIG_USB_NET_MCS7830=m
++CONFIG_USB_NET_RNDIS_HOST=m
++CONFIG_USB_NET_CDC_SUBSET_ENABLE=m
++CONFIG_USB_NET_CDC_SUBSET=m
++CONFIG_USB_ALI_M5632=y
++CONFIG_USB_AN2720=y
++CONFIG_USB_BELKIN=y
++CONFIG_USB_ARMLINUX=y
++CONFIG_USB_EPSON2888=y
++CONFIG_USB_KC2190=y
++CONFIG_USB_NET_ZAURUS=m
++CONFIG_USB_NET_CX82310_ETH=m
++CONFIG_USB_NET_KALMIA=m
++CONFIG_USB_NET_QMI_WWAN=m
++CONFIG_USB_HSO=m
++CONFIG_USB_NET_INT51X1=m
++CONFIG_USB_CDC_PHONET=m
++CONFIG_USB_IPHETH=m
++CONFIG_USB_SIERRA_NET=m
++CONFIG_USB_VL600=m
++CONFIG_USB_NET_CH9200=m
++CONFIG_WLAN=y
++# CONFIG_WIRELESS_WDS is not set
++CONFIG_WLAN_VENDOR_ADMTEK=y
++CONFIG_ADM8211=m
++CONFIG_ATH_COMMON=m
++CONFIG_WLAN_VENDOR_ATH=y
++# CONFIG_ATH_DEBUG is not set
++CONFIG_ATH5K=m
++# CONFIG_ATH5K_DEBUG is not set
++# CONFIG_ATH5K_TRACER is not set
++CONFIG_ATH5K_PCI=y
++CONFIG_ATH9K_HW=m
++CONFIG_ATH9K_COMMON=m
++CONFIG_ATH9K_COMMON_DEBUG=y
++CONFIG_ATH9K_BTCOEX_SUPPORT=y
++CONFIG_ATH9K=m
++CONFIG_ATH9K_PCI=y
++CONFIG_ATH9K_AHB=y
++CONFIG_ATH9K_DEBUGFS=y
++CONFIG_ATH9K_STATION_STATISTICS=y
++# CONFIG_ATH9K_DYNACK is not set
++CONFIG_ATH9K_WOW=y
++CONFIG_ATH9K_RFKILL=y
++CONFIG_ATH9K_CHANNEL_CONTEXT=y
++CONFIG_ATH9K_PCOEM=y
++CONFIG_ATH9K_HTC=m
++CONFIG_ATH9K_HTC_DEBUGFS=y
++CONFIG_ATH9K_HWRNG=y
++# CONFIG_ATH9K_COMMON_SPECTRAL is not set
++CONFIG_CARL9170=m
++CONFIG_CARL9170_LEDS=y
++# CONFIG_CARL9170_DEBUGFS is not set
++CONFIG_CARL9170_WPC=y
++CONFIG_CARL9170_HWRNG=y
++CONFIG_ATH6KL=m
++CONFIG_ATH6KL_SDIO=m
++CONFIG_ATH6KL_USB=m
++# CONFIG_ATH6KL_DEBUG is not set
++# CONFIG_ATH6KL_TRACING is not set
++CONFIG_AR5523=m
++CONFIG_WIL6210=m
++CONFIG_WIL6210_ISR_COR=y
++CONFIG_WIL6210_TRACING=y
++CONFIG_WIL6210_DEBUGFS=y
++CONFIG_ATH10K=m
++CONFIG_ATH10K_PCI=m
++# CONFIG_ATH10K_SDIO is not set
++# CONFIG_ATH10K_USB is not set
++# CONFIG_ATH10K_DEBUG is not set
++CONFIG_ATH10K_DEBUGFS=y
++# CONFIG_ATH10K_SPECTRAL is not set
++CONFIG_ATH10K_TRACING=y
++CONFIG_WCN36XX=m
++# CONFIG_WCN36XX_DEBUGFS is not set
++CONFIG_WLAN_VENDOR_ATMEL=y
++CONFIG_ATMEL=m
++CONFIG_PCI_ATMEL=m
++CONFIG_PCMCIA_ATMEL=m
++CONFIG_AT76C50X_USB=m
++CONFIG_WLAN_VENDOR_BROADCOM=y
++CONFIG_B43=m
++CONFIG_B43_BCMA=y
++CONFIG_B43_SSB=y
++CONFIG_B43_BUSES_BCMA_AND_SSB=y
++# CONFIG_B43_BUSES_BCMA is not set
++# CONFIG_B43_BUSES_SSB is not set
++CONFIG_B43_PCI_AUTOSELECT=y
++CONFIG_B43_PCICORE_AUTOSELECT=y
++# CONFIG_B43_SDIO is not set
++CONFIG_B43_BCMA_PIO=y
++CONFIG_B43_PIO=y
++CONFIG_B43_PHY_G=y
++CONFIG_B43_PHY_N=y
++CONFIG_B43_PHY_LP=y
++CONFIG_B43_PHY_HT=y
++CONFIG_B43_LEDS=y
++CONFIG_B43_HWRNG=y
++# CONFIG_B43_DEBUG is not set
++CONFIG_B43LEGACY=m
++CONFIG_B43LEGACY_PCI_AUTOSELECT=y
++CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y
++CONFIG_B43LEGACY_LEDS=y
++CONFIG_B43LEGACY_HWRNG=y
++# CONFIG_B43LEGACY_DEBUG is not set
++CONFIG_B43LEGACY_DMA=y
++CONFIG_B43LEGACY_PIO=y
++CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
++# CONFIG_B43LEGACY_DMA_MODE is not set
++# CONFIG_B43LEGACY_PIO_MODE is not set
++CONFIG_BRCMUTIL=m
++CONFIG_BRCMSMAC=m
++CONFIG_BRCMFMAC=m
++CONFIG_BRCMFMAC_PROTO_BCDC=y
++CONFIG_BRCMFMAC_PROTO_MSGBUF=y
++CONFIG_BRCMFMAC_SDIO=y
++CONFIG_BRCMFMAC_USB=y
++CONFIG_BRCMFMAC_PCIE=y
++CONFIG_BRCM_TRACING=y
++# CONFIG_BRCMDBG is not set
++CONFIG_WLAN_VENDOR_CISCO=y
++CONFIG_AIRO=m
++CONFIG_AIRO_CS=m
++CONFIG_WLAN_VENDOR_INTEL=y
++CONFIG_IPW2100=m
++CONFIG_IPW2100_MONITOR=y
++# CONFIG_IPW2100_DEBUG is not set
++CONFIG_IPW2200=m
++CONFIG_IPW2200_MONITOR=y
++CONFIG_IPW2200_RADIOTAP=y
++CONFIG_IPW2200_PROMISCUOUS=y
++CONFIG_IPW2200_QOS=y
++# CONFIG_IPW2200_DEBUG is not set
++CONFIG_LIBIPW=m
++# CONFIG_LIBIPW_DEBUG is not set
++CONFIG_IWLEGACY=m
++CONFIG_IWL4965=m
++CONFIG_IWL3945=m
++
++#
++# iwl3945 / iwl4965 Debugging Options
++#
++# CONFIG_IWLEGACY_DEBUG is not set
++CONFIG_IWLEGACY_DEBUGFS=y
++CONFIG_IWLWIFI=m
++CONFIG_IWLWIFI_LEDS=y
++CONFIG_IWLDVM=m
++CONFIG_IWLMVM=m
++CONFIG_IWLWIFI_OPMODE_MODULAR=y
++# CONFIG_IWLWIFI_BCAST_FILTERING is not set
++# CONFIG_IWLWIFI_PCIE_RTPM is not set
++
++#
++# Debugging Options
++#
++# CONFIG_IWLWIFI_DEBUG is not set
++CONFIG_IWLWIFI_DEBUGFS=y
++CONFIG_IWLWIFI_DEVICE_TRACING=y
++CONFIG_WLAN_VENDOR_INTERSIL=y
++CONFIG_HOSTAP=m
++CONFIG_HOSTAP_FIRMWARE=y
++CONFIG_HOSTAP_FIRMWARE_NVRAM=y
++CONFIG_HOSTAP_PLX=m
++CONFIG_HOSTAP_PCI=m
++CONFIG_HOSTAP_CS=m
++CONFIG_HERMES=m
++# CONFIG_HERMES_PRISM is not set
++CONFIG_HERMES_CACHE_FW_ON_INIT=y
++CONFIG_PLX_HERMES=m
++CONFIG_TMD_HERMES=m
++CONFIG_NORTEL_HERMES=m
++CONFIG_PCMCIA_HERMES=m
++CONFIG_PCMCIA_SPECTRUM=m
++CONFIG_ORINOCO_USB=m
++CONFIG_P54_COMMON=m
++CONFIG_P54_USB=m
++CONFIG_P54_PCI=m
++CONFIG_P54_SPI=m
++# CONFIG_P54_SPI_DEFAULT_EEPROM is not set
++CONFIG_P54_LEDS=y
++# CONFIG_PRISM54 is not set
++CONFIG_WLAN_VENDOR_MARVELL=y
++CONFIG_LIBERTAS=m
++CONFIG_LIBERTAS_USB=m
++CONFIG_LIBERTAS_CS=m
++CONFIG_LIBERTAS_SDIO=m
++CONFIG_LIBERTAS_SPI=m
++# CONFIG_LIBERTAS_DEBUG is not set
++CONFIG_LIBERTAS_MESH=y
++CONFIG_LIBERTAS_THINFIRM=m
++# CONFIG_LIBERTAS_THINFIRM_DEBUG is not set
++CONFIG_LIBERTAS_THINFIRM_USB=m
++CONFIG_MWIFIEX=m
++CONFIG_MWIFIEX_SDIO=m
++CONFIG_MWIFIEX_PCIE=m
++CONFIG_MWIFIEX_USB=m
++CONFIG_MWL8K=m
++CONFIG_WLAN_VENDOR_MEDIATEK=y
++CONFIG_MT7601U=m
++# CONFIG_MT76x2E is not set
++CONFIG_WLAN_VENDOR_RALINK=y
++CONFIG_RT2X00=m
++CONFIG_RT2400PCI=m
++CONFIG_RT2500PCI=m
++CONFIG_RT61PCI=m
++CONFIG_RT2800PCI=m
++CONFIG_RT2800PCI_RT33XX=y
++CONFIG_RT2800PCI_RT35XX=y
++CONFIG_RT2800PCI_RT53XX=y
++CONFIG_RT2800PCI_RT3290=y
++CONFIG_RT2500USB=m
++CONFIG_RT73USB=m
++CONFIG_RT2800USB=m
++CONFIG_RT2800USB_RT33XX=y
++CONFIG_RT2800USB_RT35XX=y
++CONFIG_RT2800USB_RT3573=y
++CONFIG_RT2800USB_RT53XX=y
++CONFIG_RT2800USB_RT55XX=y
++CONFIG_RT2800USB_UNKNOWN=y
++CONFIG_RT2800_LIB=m
++CONFIG_RT2800_LIB_MMIO=m
++CONFIG_RT2X00_LIB_MMIO=m
++CONFIG_RT2X00_LIB_PCI=m
++CONFIG_RT2X00_LIB_USB=m
++CONFIG_RT2X00_LIB=m
++CONFIG_RT2X00_LIB_FIRMWARE=y
++CONFIG_RT2X00_LIB_CRYPTO=y
++CONFIG_RT2X00_LIB_LEDS=y
++# CONFIG_RT2X00_LIB_DEBUGFS is not set
++# CONFIG_RT2X00_DEBUG is not set
++CONFIG_WLAN_VENDOR_REALTEK=y
++CONFIG_RTL8180=m
++CONFIG_RTL8187=m
++CONFIG_RTL8187_LEDS=y
++CONFIG_RTL_CARDS=m
++CONFIG_RTL8192CE=m
++CONFIG_RTL8192SE=m
++CONFIG_RTL8192DE=m
++CONFIG_RTL8723AE=m
++CONFIG_RTL8723BE=m
++CONFIG_RTL8188EE=m
++CONFIG_RTL8192EE=m
++CONFIG_RTL8821AE=m
++CONFIG_RTL8192CU=m
++CONFIG_RTLWIFI=m
++CONFIG_RTLWIFI_PCI=m
++CONFIG_RTLWIFI_USB=m
++# CONFIG_RTLWIFI_DEBUG is not set
++CONFIG_RTL8192C_COMMON=m
++CONFIG_RTL8723_COMMON=m
++CONFIG_RTLBTCOEXIST=m
++CONFIG_RTL8XXXU=m
++CONFIG_RTL8XXXU_UNTESTED=y
++CONFIG_WLAN_VENDOR_RSI=y
++CONFIG_RSI_91X=m
++# CONFIG_RSI_DEBUGFS is not set
++CONFIG_RSI_SDIO=m
++CONFIG_RSI_USB=m
++CONFIG_WLAN_VENDOR_ST=y
++CONFIG_CW1200=m
++CONFIG_CW1200_WLAN_SDIO=m
++CONFIG_CW1200_WLAN_SPI=m
++CONFIG_WLAN_VENDOR_TI=y
++CONFIG_WL1251=m
++CONFIG_WL1251_SPI=m
++CONFIG_WL1251_SDIO=m
++CONFIG_WL12XX=m
++CONFIG_WL18XX=m
++CONFIG_WLCORE=m
++CONFIG_WLCORE_SDIO=m
++CONFIG_WILINK_PLATFORM_DATA=y
++CONFIG_WLAN_VENDOR_ZYDAS=y
++CONFIG_USB_ZD1201=m
++CONFIG_ZD1211RW=m
++# CONFIG_ZD1211RW_DEBUG is not set
++CONFIG_WLAN_VENDOR_QUANTENNA=y
++# CONFIG_QTNFMAC_PEARL_PCIE is not set
++CONFIG_PCMCIA_RAYCS=m
++CONFIG_PCMCIA_WL3501=m
++CONFIG_MAC80211_HWSIM=m
++CONFIG_USB_NET_RNDIS_WLAN=m
++
++#
++# WiMAX Wireless Broadband devices
++#
++CONFIG_WIMAX_I2400M=m
++CONFIG_WIMAX_I2400M_USB=m
++CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8
++CONFIG_WAN=y
++CONFIG_LANMEDIA=m
++CONFIG_HDLC=m
++CONFIG_HDLC_RAW=m
++CONFIG_HDLC_RAW_ETH=m
++CONFIG_HDLC_CISCO=m
++CONFIG_HDLC_FR=m
++CONFIG_HDLC_PPP=m
++CONFIG_HDLC_X25=m
++CONFIG_PCI200SYN=m
++CONFIG_WANXL=m
++CONFIG_PC300TOO=m
++CONFIG_FARSYNC=m
++CONFIG_DSCC4=m
++CONFIG_DSCC4_PCISYNC=y
++CONFIG_DSCC4_PCI_RST=y
++CONFIG_DLCI=m
++CONFIG_DLCI_MAX=8
++CONFIG_LAPBETHER=m
++CONFIG_X25_ASY=m
++CONFIG_SBNI=m
++# CONFIG_SBNI_MULTILINE is not set
++CONFIG_IEEE802154_DRIVERS=m
++CONFIG_IEEE802154_FAKELB=m
++CONFIG_IEEE802154_AT86RF230=m
++CONFIG_IEEE802154_AT86RF230_DEBUGFS=y
++CONFIG_IEEE802154_MRF24J40=m
++CONFIG_IEEE802154_CC2520=m
++CONFIG_IEEE802154_ATUSB=m
++# CONFIG_IEEE802154_ADF7242 is not set
++# CONFIG_IEEE802154_CA8210 is not set
++CONFIG_XEN_NETDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_BACKEND=m
++CONFIG_VMXNET3=m
++CONFIG_FUJITSU_ES=m
++# CONFIG_THUNDERBOLT_NET is not set
++CONFIG_HYPERV_NET=m
++# CONFIG_NETDEVSIM is not set
++CONFIG_ISDN=y
++CONFIG_ISDN_I4L=m
++CONFIG_ISDN_PPP=y
++CONFIG_ISDN_PPP_VJ=y
++CONFIG_ISDN_MPP=y
++CONFIG_IPPP_FILTER=y
++CONFIG_ISDN_PPP_BSDCOMP=m
++CONFIG_ISDN_AUDIO=y
++CONFIG_ISDN_TTY_FAX=y
++CONFIG_ISDN_X25=y
++
++#
++# ISDN feature submodules
++#
++CONFIG_ISDN_DIVERSION=m
++
++#
++# ISDN4Linux hardware drivers
++#
++
++#
++# Passive cards
++#
++CONFIG_ISDN_DRV_HISAX=m
++
++#
++# D-channel protocol features
++#
++CONFIG_HISAX_EURO=y
++CONFIG_DE_AOC=y
++# CONFIG_HISAX_NO_SENDCOMPLETE is not set
++# CONFIG_HISAX_NO_LLC is not set
++# CONFIG_HISAX_NO_KEYPAD is not set
++CONFIG_HISAX_1TR6=y
++CONFIG_HISAX_NI1=y
++CONFIG_HISAX_MAX_CARDS=8
++
++#
++# HiSax supported cards
++#
++CONFIG_HISAX_16_3=y
++CONFIG_HISAX_TELESPCI=y
++CONFIG_HISAX_S0BOX=y
++CONFIG_HISAX_FRITZPCI=y
++CONFIG_HISAX_AVM_A1_PCMCIA=y
++CONFIG_HISAX_ELSA=y
++CONFIG_HISAX_DIEHLDIVA=y
++CONFIG_HISAX_SEDLBAUER=y
++CONFIG_HISAX_NETJET=y
++CONFIG_HISAX_NETJET_U=y
++CONFIG_HISAX_NICCY=y
++CONFIG_HISAX_BKM_A4T=y
++CONFIG_HISAX_SCT_QUADRO=y
++CONFIG_HISAX_GAZEL=y
++CONFIG_HISAX_HFC_PCI=y
++CONFIG_HISAX_W6692=y
++CONFIG_HISAX_HFC_SX=y
++CONFIG_HISAX_ENTERNOW_PCI=y
++# CONFIG_HISAX_DEBUG is not set
++
++#
++# HiSax PCMCIA card service modules
++#
++CONFIG_HISAX_SEDLBAUER_CS=m
++CONFIG_HISAX_ELSA_CS=m
++CONFIG_HISAX_AVM_A1_CS=m
++CONFIG_HISAX_TELES_CS=m
++
++#
++# HiSax sub driver modules
++#
++CONFIG_HISAX_ST5481=m
++CONFIG_HISAX_HFCUSB=m
++CONFIG_HISAX_HFC4S8S=m
++CONFIG_HISAX_FRITZ_PCIPNP=m
++CONFIG_ISDN_CAPI=m
++CONFIG_CAPI_TRACE=y
++CONFIG_ISDN_CAPI_CAPI20=m
++CONFIG_ISDN_CAPI_MIDDLEWARE=y
++CONFIG_ISDN_CAPI_CAPIDRV=m
++# CONFIG_ISDN_CAPI_CAPIDRV_VERBOSE is not set
++
++#
++# CAPI hardware drivers
++#
++CONFIG_CAPI_AVM=y
++CONFIG_ISDN_DRV_AVMB1_B1PCI=m
++CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
++CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
++CONFIG_ISDN_DRV_AVMB1_AVM_CS=m
++CONFIG_ISDN_DRV_AVMB1_T1PCI=m
++CONFIG_ISDN_DRV_AVMB1_C4=m
++CONFIG_CAPI_EICON=y
++CONFIG_ISDN_DIVAS=m
++CONFIG_ISDN_DIVAS_BRIPCI=y
++CONFIG_ISDN_DIVAS_PRIPCI=y
++CONFIG_ISDN_DIVAS_DIVACAPI=m
++CONFIG_ISDN_DIVAS_USERIDI=m
++CONFIG_ISDN_DIVAS_MAINT=m
++CONFIG_ISDN_DRV_GIGASET=m
++# CONFIG_GIGASET_CAPI is not set
++CONFIG_GIGASET_I4L=y
++# CONFIG_GIGASET_DUMMYLL is not set
++CONFIG_GIGASET_BASE=m
++CONFIG_GIGASET_M105=m
++CONFIG_GIGASET_M101=m
++# CONFIG_GIGASET_DEBUG is not set
++CONFIG_HYSDN=m
++CONFIG_HYSDN_CAPI=y
++CONFIG_MISDN=m
++CONFIG_MISDN_DSP=m
++CONFIG_MISDN_L1OIP=m
++
++#
++# mISDN hardware drivers
++#
++CONFIG_MISDN_HFCPCI=m
++CONFIG_MISDN_HFCMULTI=m
++CONFIG_MISDN_HFCUSB=m
++CONFIG_MISDN_AVMFRITZ=m
++CONFIG_MISDN_SPEEDFAX=m
++CONFIG_MISDN_INFINEON=m
++CONFIG_MISDN_W6692=m
++CONFIG_MISDN_NETJET=m
++CONFIG_MISDN_IPAC=m
++CONFIG_MISDN_ISAR=m
++CONFIG_ISDN_HDLC=m
++CONFIG_NVM=y
++# CONFIG_NVM_DEBUG is not set
++# CONFIG_NVM_PBLK is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++CONFIG_INPUT_LEDS=m
++CONFIG_INPUT_FF_MEMLESS=m
++CONFIG_INPUT_POLLDEV=m
++CONFIG_INPUT_SPARSEKMAP=m
++CONFIG_INPUT_MATRIXKMAP=m
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++CONFIG_INPUT_JOYDEV=m
++CONFIG_INPUT_EVDEV=y
++CONFIG_INPUT_EVBUG=m
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++# CONFIG_KEYBOARD_ADC is not set
++CONFIG_KEYBOARD_ADP5520=m
++CONFIG_KEYBOARD_ADP5588=m
++CONFIG_KEYBOARD_ADP5589=m
++CONFIG_KEYBOARD_ATKBD=y
++CONFIG_KEYBOARD_QT1070=m
++CONFIG_KEYBOARD_QT2160=m
++# CONFIG_KEYBOARD_DLINK_DIR685 is not set
++CONFIG_KEYBOARD_LKKBD=m
++CONFIG_KEYBOARD_GPIO=m
++CONFIG_KEYBOARD_GPIO_POLLED=m
++CONFIG_KEYBOARD_TCA6416=m
++CONFIG_KEYBOARD_TCA8418=m
++CONFIG_KEYBOARD_MATRIX=m
++CONFIG_KEYBOARD_LM8323=m
++CONFIG_KEYBOARD_LM8333=m
++CONFIG_KEYBOARD_MAX7359=m
++CONFIG_KEYBOARD_MCS=m
++CONFIG_KEYBOARD_MPR121=m
++CONFIG_KEYBOARD_NEWTON=m
++CONFIG_KEYBOARD_OPENCORES=m
++CONFIG_KEYBOARD_SAMSUNG=m
++CONFIG_KEYBOARD_STOWAWAY=m
++CONFIG_KEYBOARD_SUNKBD=m
++# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set
++CONFIG_KEYBOARD_TWL4030=m
++CONFIG_KEYBOARD_XTKBD=m
++CONFIG_KEYBOARD_CROS_EC=m
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=m
++CONFIG_MOUSE_PS2_ALPS=y
++CONFIG_MOUSE_PS2_BYD=y
++CONFIG_MOUSE_PS2_LOGIPS2PP=y
++CONFIG_MOUSE_PS2_SYNAPTICS=y
++CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y
++CONFIG_MOUSE_PS2_CYPRESS=y
++CONFIG_MOUSE_PS2_LIFEBOOK=y
++CONFIG_MOUSE_PS2_TRACKPOINT=y
++CONFIG_MOUSE_PS2_ELANTECH=y
++CONFIG_MOUSE_PS2_SENTELIC=y
++CONFIG_MOUSE_PS2_TOUCHKIT=y
++CONFIG_MOUSE_PS2_FOCALTECH=y
++CONFIG_MOUSE_PS2_VMMOUSE=y
++CONFIG_MOUSE_PS2_SMBUS=y
++CONFIG_MOUSE_SERIAL=m
++CONFIG_MOUSE_APPLETOUCH=m
++CONFIG_MOUSE_BCM5974=m
++CONFIG_MOUSE_CYAPA=m
++CONFIG_MOUSE_ELAN_I2C=m
++CONFIG_MOUSE_ELAN_I2C_I2C=y
++CONFIG_MOUSE_ELAN_I2C_SMBUS=y
++CONFIG_MOUSE_VSXXXAA=m
++CONFIG_MOUSE_GPIO=m
++CONFIG_MOUSE_SYNAPTICS_I2C=m
++CONFIG_MOUSE_SYNAPTICS_USB=m
++CONFIG_INPUT_JOYSTICK=y
++CONFIG_JOYSTICK_ANALOG=m
++CONFIG_JOYSTICK_A3D=m
++CONFIG_JOYSTICK_ADI=m
++CONFIG_JOYSTICK_COBRA=m
++CONFIG_JOYSTICK_GF2K=m
++CONFIG_JOYSTICK_GRIP=m
++CONFIG_JOYSTICK_GRIP_MP=m
++CONFIG_JOYSTICK_GUILLEMOT=m
++CONFIG_JOYSTICK_INTERACT=m
++CONFIG_JOYSTICK_SIDEWINDER=m
++CONFIG_JOYSTICK_TMDC=m
++CONFIG_JOYSTICK_IFORCE=m
++CONFIG_JOYSTICK_IFORCE_USB=y
++CONFIG_JOYSTICK_IFORCE_232=y
++CONFIG_JOYSTICK_WARRIOR=m
++CONFIG_JOYSTICK_MAGELLAN=m
++CONFIG_JOYSTICK_SPACEORB=m
++CONFIG_JOYSTICK_SPACEBALL=m
++CONFIG_JOYSTICK_STINGER=m
++CONFIG_JOYSTICK_TWIDJOY=m
++CONFIG_JOYSTICK_ZHENHUA=m
++CONFIG_JOYSTICK_DB9=m
++CONFIG_JOYSTICK_GAMECON=m
++CONFIG_JOYSTICK_TURBOGRAFX=m
++CONFIG_JOYSTICK_AS5011=m
++CONFIG_JOYSTICK_JOYDUMP=m
++CONFIG_JOYSTICK_XPAD=m
++CONFIG_JOYSTICK_XPAD_FF=y
++CONFIG_JOYSTICK_XPAD_LEDS=y
++CONFIG_JOYSTICK_WALKERA0701=m
++# CONFIG_JOYSTICK_PSXPAD_SPI is not set
++CONFIG_INPUT_TABLET=y
++CONFIG_TABLET_USB_ACECAD=m
++CONFIG_TABLET_USB_AIPTEK=m
++CONFIG_TABLET_USB_GTCO=m
++CONFIG_TABLET_USB_HANWANG=m
++CONFIG_TABLET_USB_KBTAB=m
++# CONFIG_TABLET_USB_PEGASUS is not set
++CONFIG_TABLET_SERIAL_WACOM4=m
++CONFIG_INPUT_TOUCHSCREEN=y
++CONFIG_TOUCHSCREEN_PROPERTIES=y
++CONFIG_TOUCHSCREEN_88PM860X=m
++CONFIG_TOUCHSCREEN_ADS7846=m
++CONFIG_TOUCHSCREEN_AD7877=m
++CONFIG_TOUCHSCREEN_AD7879=m
++CONFIG_TOUCHSCREEN_AD7879_I2C=m
++CONFIG_TOUCHSCREEN_AD7879_SPI=m
++CONFIG_TOUCHSCREEN_ATMEL_MXT=m
++# CONFIG_TOUCHSCREEN_ATMEL_MXT_T37 is not set
++CONFIG_TOUCHSCREEN_AUO_PIXCIR=m
++CONFIG_TOUCHSCREEN_BU21013=m
++CONFIG_TOUCHSCREEN_CY8CTMG110=m
++CONFIG_TOUCHSCREEN_CYTTSP_CORE=m
++CONFIG_TOUCHSCREEN_CYTTSP_I2C=m
++CONFIG_TOUCHSCREEN_CYTTSP_SPI=m
++CONFIG_TOUCHSCREEN_CYTTSP4_CORE=m
++CONFIG_TOUCHSCREEN_CYTTSP4_I2C=m
++CONFIG_TOUCHSCREEN_CYTTSP4_SPI=m
++CONFIG_TOUCHSCREEN_DA9034=m
++CONFIG_TOUCHSCREEN_DA9052=m
++CONFIG_TOUCHSCREEN_DYNAPRO=m
++CONFIG_TOUCHSCREEN_HAMPSHIRE=m
++CONFIG_TOUCHSCREEN_EETI=m
++# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set
++# CONFIG_TOUCHSCREEN_EXC3000 is not set
++CONFIG_TOUCHSCREEN_FUJITSU=m
++CONFIG_TOUCHSCREEN_GOODIX=m
++# CONFIG_TOUCHSCREEN_HIDEEP is not set
++CONFIG_TOUCHSCREEN_ILI210X=m
++# CONFIG_TOUCHSCREEN_S6SY761 is not set
++CONFIG_TOUCHSCREEN_GUNZE=m
++# CONFIG_TOUCHSCREEN_EKTF2127 is not set
++CONFIG_TOUCHSCREEN_ELAN=m
++CONFIG_TOUCHSCREEN_ELO=m
++CONFIG_TOUCHSCREEN_WACOM_W8001=m
++CONFIG_TOUCHSCREEN_WACOM_I2C=m
++CONFIG_TOUCHSCREEN_MAX11801=m
++CONFIG_TOUCHSCREEN_MCS5000=m
++CONFIG_TOUCHSCREEN_MMS114=m
++# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set
++CONFIG_TOUCHSCREEN_MTOUCH=m
++CONFIG_TOUCHSCREEN_INEXIO=m
++CONFIG_TOUCHSCREEN_MK712=m
++CONFIG_TOUCHSCREEN_PENMOUNT=m
++CONFIG_TOUCHSCREEN_EDT_FT5X06=m
++CONFIG_TOUCHSCREEN_TOUCHRIGHT=m
++CONFIG_TOUCHSCREEN_TOUCHWIN=m
++CONFIG_TOUCHSCREEN_TI_AM335X_TSC=m
++CONFIG_TOUCHSCREEN_UCB1400=m
++CONFIG_TOUCHSCREEN_PIXCIR=m
++CONFIG_TOUCHSCREEN_WDT87XX_I2C=m
++CONFIG_TOUCHSCREEN_WM831X=m
++CONFIG_TOUCHSCREEN_WM97XX=m
++CONFIG_TOUCHSCREEN_WM9705=y
++CONFIG_TOUCHSCREEN_WM9712=y
++CONFIG_TOUCHSCREEN_WM9713=y
++CONFIG_TOUCHSCREEN_USB_COMPOSITE=m
++CONFIG_TOUCHSCREEN_MC13783=m
++CONFIG_TOUCHSCREEN_USB_EGALAX=y
++CONFIG_TOUCHSCREEN_USB_PANJIT=y
++CONFIG_TOUCHSCREEN_USB_3M=y
++CONFIG_TOUCHSCREEN_USB_ITM=y
++CONFIG_TOUCHSCREEN_USB_ETURBO=y
++CONFIG_TOUCHSCREEN_USB_GUNZE=y
++CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y
++CONFIG_TOUCHSCREEN_USB_IRTOUCH=y
++CONFIG_TOUCHSCREEN_USB_IDEALTEK=y
++CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y
++CONFIG_TOUCHSCREEN_USB_GOTOP=y
++CONFIG_TOUCHSCREEN_USB_JASTEC=y
++CONFIG_TOUCHSCREEN_USB_ELO=y
++CONFIG_TOUCHSCREEN_USB_E2I=y
++CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y
++CONFIG_TOUCHSCREEN_USB_ETT_TC45USB=y
++CONFIG_TOUCHSCREEN_USB_NEXIO=y
++CONFIG_TOUCHSCREEN_USB_EASYTOUCH=y
++CONFIG_TOUCHSCREEN_TOUCHIT213=m
++CONFIG_TOUCHSCREEN_TSC_SERIO=m
++CONFIG_TOUCHSCREEN_TSC200X_CORE=m
++CONFIG_TOUCHSCREEN_TSC2004=m
++CONFIG_TOUCHSCREEN_TSC2005=m
++CONFIG_TOUCHSCREEN_TSC2007=m
++# CONFIG_TOUCHSCREEN_TSC2007_IIO is not set
++CONFIG_TOUCHSCREEN_PCAP=m
++# CONFIG_TOUCHSCREEN_RM_TS is not set
++# CONFIG_TOUCHSCREEN_SILEAD is not set
++# CONFIG_TOUCHSCREEN_SIS_I2C is not set
++CONFIG_TOUCHSCREEN_ST1232=m
++# CONFIG_TOUCHSCREEN_STMFTS is not set
++CONFIG_TOUCHSCREEN_SUR40=m
++# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set
++CONFIG_TOUCHSCREEN_SX8654=m
++CONFIG_TOUCHSCREEN_TPS6507X=m
++# CONFIG_TOUCHSCREEN_ZET6223 is not set
++CONFIG_TOUCHSCREEN_ZFORCE=m
++CONFIG_TOUCHSCREEN_ROHM_BU21023=m
++CONFIG_INPUT_MISC=y
++CONFIG_INPUT_88PM860X_ONKEY=m
++CONFIG_INPUT_88PM80X_ONKEY=m
++CONFIG_INPUT_AD714X=m
++CONFIG_INPUT_AD714X_I2C=m
++CONFIG_INPUT_AD714X_SPI=m
++CONFIG_INPUT_ARIZONA_HAPTICS=m
++CONFIG_INPUT_BMA150=m
++CONFIG_INPUT_E3X0_BUTTON=m
++CONFIG_INPUT_PCSPKR=m
++CONFIG_INPUT_MAX77693_HAPTIC=m
++CONFIG_INPUT_MAX8925_ONKEY=m
++CONFIG_INPUT_MAX8997_HAPTIC=m
++CONFIG_INPUT_MC13783_PWRBUTTON=m
++CONFIG_INPUT_MMA8450=m
++CONFIG_INPUT_APANEL=m
++CONFIG_INPUT_GP2A=m
++CONFIG_INPUT_GPIO_BEEPER=m
++# CONFIG_INPUT_GPIO_DECODER is not set
++CONFIG_INPUT_ATLAS_BTNS=m
++CONFIG_INPUT_ATI_REMOTE2=m
++CONFIG_INPUT_KEYSPAN_REMOTE=m
++CONFIG_INPUT_KXTJ9=m
++# CONFIG_INPUT_KXTJ9_POLLED_MODE is not set
++CONFIG_INPUT_POWERMATE=m
++CONFIG_INPUT_YEALINK=m
++CONFIG_INPUT_CM109=m
++CONFIG_INPUT_REGULATOR_HAPTIC=m
++CONFIG_INPUT_RETU_PWRBUTTON=m
++CONFIG_INPUT_TWL4030_PWRBUTTON=m
++CONFIG_INPUT_TWL4030_VIBRA=m
++CONFIG_INPUT_TWL6040_VIBRA=m
++CONFIG_INPUT_UINPUT=y
++CONFIG_INPUT_PALMAS_PWRBUTTON=m
++CONFIG_INPUT_PCF50633_PMU=m
++CONFIG_INPUT_PCF8574=m
++CONFIG_INPUT_PWM_BEEPER=m
++# CONFIG_INPUT_PWM_VIBRA is not set
++CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
++CONFIG_INPUT_DA9052_ONKEY=m
++CONFIG_INPUT_DA9055_ONKEY=m
++CONFIG_INPUT_DA9063_ONKEY=m
++CONFIG_INPUT_WM831X_ON=m
++CONFIG_INPUT_PCAP=m
++CONFIG_INPUT_ADXL34X=m
++CONFIG_INPUT_ADXL34X_I2C=m
++CONFIG_INPUT_ADXL34X_SPI=m
++CONFIG_INPUT_IMS_PCU=m
++CONFIG_INPUT_CMA3000=m
++CONFIG_INPUT_CMA3000_I2C=m
++CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m
++CONFIG_INPUT_IDEAPAD_SLIDEBAR=m
++CONFIG_INPUT_SOC_BUTTON_ARRAY=m
++CONFIG_INPUT_DRV260X_HAPTICS=m
++CONFIG_INPUT_DRV2665_HAPTICS=m
++CONFIG_INPUT_DRV2667_HAPTICS=m
++CONFIG_RMI4_CORE=m
++# CONFIG_RMI4_I2C is not set
++# CONFIG_RMI4_SPI is not set
++# CONFIG_RMI4_SMB is not set
++CONFIG_RMI4_F03=y
++CONFIG_RMI4_F03_SERIO=m
++CONFIG_RMI4_2D_SENSOR=y
++CONFIG_RMI4_F11=y
++CONFIG_RMI4_F12=y
++CONFIG_RMI4_F30=y
++# CONFIG_RMI4_F34 is not set
++# CONFIG_RMI4_F54 is not set
++# CONFIG_RMI4_F55 is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_SERPORT=m
++CONFIG_SERIO_CT82C710=m
++CONFIG_SERIO_PARKBD=m
++CONFIG_SERIO_PCIPS2=m
++CONFIG_SERIO_LIBPS2=y
++CONFIG_SERIO_RAW=m
++CONFIG_SERIO_ALTERA_PS2=m
++CONFIG_SERIO_PS2MULT=m
++CONFIG_SERIO_ARC_PS2=m
++CONFIG_HYPERV_KEYBOARD=m
++# CONFIG_SERIO_GPIO_PS2 is not set
++CONFIG_USERIO=m
++CONFIG_GAMEPORT=m
++CONFIG_GAMEPORT_NS558=m
++CONFIG_GAMEPORT_L4=m
++CONFIG_GAMEPORT_EMU10K1=m
++CONFIG_GAMEPORT_FM801=m
++
++#
++# Character devices
++#
++CONFIG_TTY=y
++CONFIG_VT=y
++CONFIG_CONSOLE_TRANSLATIONS=y
++CONFIG_VT_CONSOLE=y
++CONFIG_VT_CONSOLE_SLEEP=y
++CONFIG_HW_CONSOLE=y
++CONFIG_VT_HW_CONSOLE_BINDING=y
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=0
++CONFIG_SERIAL_NONSTANDARD=y
++CONFIG_ROCKETPORT=m
++CONFIG_CYCLADES=m
++# CONFIG_CYZ_INTR is not set
++CONFIG_MOXA_INTELLIO=m
++CONFIG_MOXA_SMARTIO=m
++CONFIG_SYNCLINK=m
++CONFIG_SYNCLINKMP=m
++CONFIG_SYNCLINK_GT=m
++CONFIG_NOZOMI=m
++CONFIG_ISI=m
++CONFIG_N_HDLC=m
++CONFIG_N_GSM=m
++CONFIG_TRACE_ROUTER=m
++CONFIG_TRACE_SINK=m
++CONFIG_DEVMEM=y
++# CONFIG_DEVKMEM is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_EARLYCON=y
++CONFIG_SERIAL_8250=y
++# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
++CONFIG_SERIAL_8250_PNP=y
++# CONFIG_SERIAL_8250_FINTEK is not set
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_DMA=y
++CONFIG_SERIAL_8250_PCI=y
++CONFIG_SERIAL_8250_EXAR=y
++CONFIG_SERIAL_8250_CS=m
++# CONFIG_SERIAL_8250_MEN_MCB is not set
++CONFIG_SERIAL_8250_NR_UARTS=48
++CONFIG_SERIAL_8250_RUNTIME_UARTS=32
++CONFIG_SERIAL_8250_EXTENDED=y
++CONFIG_SERIAL_8250_MANY_PORTS=y
++CONFIG_SERIAL_8250_SHARE_IRQ=y
++# CONFIG_SERIAL_8250_DETECT_IRQ is not set
++CONFIG_SERIAL_8250_RSA=y
++# CONFIG_SERIAL_8250_FSL is not set
++CONFIG_SERIAL_8250_DW=m
++CONFIG_SERIAL_8250_RT288X=y
++CONFIG_SERIAL_8250_LPSS=y
++CONFIG_SERIAL_8250_MID=m
++# CONFIG_SERIAL_8250_MOXA is not set
++
++#
++# Non-8250 serial port support
++#
++CONFIG_SERIAL_KGDB_NMI=y
++CONFIG_SERIAL_MAX3100=m
++CONFIG_SERIAL_MAX310X=y
++CONFIG_SERIAL_UARTLITE=m
++CONFIG_SERIAL_UARTLITE_NR_UARTS=1
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++CONFIG_CONSOLE_POLL=y
++CONFIG_SERIAL_JSM=m
++CONFIG_SERIAL_SCCNXP=y
++CONFIG_SERIAL_SCCNXP_CONSOLE=y
++CONFIG_SERIAL_SC16IS7XX_CORE=m
++CONFIG_SERIAL_SC16IS7XX=m
++CONFIG_SERIAL_SC16IS7XX_I2C=y
++CONFIG_SERIAL_SC16IS7XX_SPI=y
++CONFIG_SERIAL_ALTERA_JTAGUART=m
++CONFIG_SERIAL_ALTERA_UART=m
++CONFIG_SERIAL_ALTERA_UART_MAXPORTS=4
++CONFIG_SERIAL_ALTERA_UART_BAUDRATE=115200
++# CONFIG_SERIAL_IFX6X60 is not set
++CONFIG_SERIAL_ARC=m
++CONFIG_SERIAL_ARC_NR_PORTS=1
++CONFIG_SERIAL_RP2=m
++CONFIG_SERIAL_RP2_NR_UARTS=32
++CONFIG_SERIAL_FSL_LPUART=m
++CONFIG_SERIAL_MEN_Z135=m
++# CONFIG_SERIAL_DEV_BUS is not set
++CONFIG_TTY_PRINTK=y
++CONFIG_PRINTER=m
++# CONFIG_LP_CONSOLE is not set
++CONFIG_PPDEV=m
++CONFIG_HVC_DRIVER=y
++CONFIG_HVC_IRQ=y
++CONFIG_HVC_XEN=y
++CONFIG_HVC_XEN_FRONTEND=y
++CONFIG_VIRTIO_CONSOLE=y
++CONFIG_IPMI_HANDLER=m
++CONFIG_IPMI_DMI_DECODE=y
++CONFIG_IPMI_PROC_INTERFACE=y
++# CONFIG_IPMI_PANIC_EVENT is not set
++CONFIG_IPMI_DEVICE_INTERFACE=m
++CONFIG_IPMI_SI=m
++CONFIG_IPMI_SSIF=m
++CONFIG_IPMI_WATCHDOG=m
++CONFIG_IPMI_POWEROFF=m
++CONFIG_HW_RANDOM=y
++CONFIG_HW_RANDOM_TIMERIOMEM=m
++CONFIG_HW_RANDOM_INTEL=m
++CONFIG_HW_RANDOM_AMD=m
++CONFIG_HW_RANDOM_VIA=m
++CONFIG_HW_RANDOM_VIRTIO=m
++CONFIG_NVRAM=m
++CONFIG_R3964=m
++CONFIG_APPLICOM=m
++
++#
++# PCMCIA character devices
++#
++CONFIG_SYNCLINK_CS=m
++CONFIG_CARDMAN_4000=m
++CONFIG_CARDMAN_4040=m
++# CONFIG_SCR24X is not set
++CONFIG_IPWIRELESS=m
++CONFIG_MWAVE=m
++CONFIG_RAW_DRIVER=m
++CONFIG_MAX_RAW_DEVS=256
++CONFIG_HPET=y
++CONFIG_HPET_MMAP=y
++CONFIG_HPET_MMAP_DEFAULT=y
++CONFIG_HANGCHECK_TIMER=m
++CONFIG_TCG_TPM=y
++CONFIG_HW_RANDOM_TPM=y
++CONFIG_TCG_TIS_CORE=y
++CONFIG_TCG_TIS=y
++# CONFIG_TCG_TIS_SPI is not set
++CONFIG_TCG_TIS_I2C_ATMEL=m
++CONFIG_TCG_TIS_I2C_INFINEON=m
++CONFIG_TCG_TIS_I2C_NUVOTON=m
++CONFIG_TCG_NSC=m
++CONFIG_TCG_ATMEL=m
++CONFIG_TCG_INFINEON=m
++CONFIG_TCG_XEN=m
++CONFIG_TCG_CRB=m
++# CONFIG_TCG_VTPM_PROXY is not set
++CONFIG_TCG_TIS_ST33ZP24=m
++CONFIG_TCG_TIS_ST33ZP24_I2C=m
++CONFIG_TCG_TIS_ST33ZP24_SPI=m
++CONFIG_TELCLOCK=m
++CONFIG_DEVPORT=y
++CONFIG_XILLYBUS=m
++CONFIG_XILLYBUS_PCIE=m
++
++#
++# I2C support
++#
++CONFIG_I2C=y
++CONFIG_ACPI_I2C_OPREGION=y
++CONFIG_I2C_BOARDINFO=y
++CONFIG_I2C_COMPAT=y
++CONFIG_I2C_CHARDEV=y
++CONFIG_I2C_MUX=m
++
++#
++# Multiplexer I2C Chip support
++#
++CONFIG_I2C_MUX_GPIO=m
++# CONFIG_I2C_MUX_LTC4306 is not set
++CONFIG_I2C_MUX_PCA9541=m
++CONFIG_I2C_MUX_PCA954x=m
++CONFIG_I2C_MUX_REG=m
++# CONFIG_I2C_MUX_MLXCPLD is not set
++CONFIG_I2C_HELPER_AUTO=y
++CONFIG_I2C_SMBUS=m
++CONFIG_I2C_ALGOBIT=m
++CONFIG_I2C_ALGOPCA=m
++
++#
++# I2C Hardware Bus support
++#
++
++#
++# PC SMBus host controller drivers
++#
++CONFIG_I2C_ALI1535=m
++CONFIG_I2C_ALI1563=m
++CONFIG_I2C_ALI15X3=m
++CONFIG_I2C_AMD756=m
++CONFIG_I2C_AMD756_S4882=m
++CONFIG_I2C_AMD8111=m
++CONFIG_I2C_I801=m
++CONFIG_I2C_ISCH=m
++CONFIG_I2C_ISMT=m
++CONFIG_I2C_PIIX4=m
++CONFIG_I2C_NFORCE2=m
++CONFIG_I2C_NFORCE2_S4985=m
++CONFIG_I2C_SIS5595=m
++CONFIG_I2C_SIS630=m
++CONFIG_I2C_SIS96X=m
++CONFIG_I2C_VIA=m
++CONFIG_I2C_VIAPRO=m
++
++#
++# ACPI drivers
++#
++CONFIG_I2C_SCMI=m
++
++#
++# I2C system bus drivers (mostly embedded / system-on-chip)
++#
++CONFIG_I2C_CBUS_GPIO=m
++CONFIG_I2C_DESIGNWARE_CORE=y
++CONFIG_I2C_DESIGNWARE_PLATFORM=y
++# CONFIG_I2C_DESIGNWARE_SLAVE is not set
++CONFIG_I2C_DESIGNWARE_PCI=m
++CONFIG_I2C_DESIGNWARE_BAYTRAIL=y
++CONFIG_I2C_EMEV2=m
++CONFIG_I2C_GPIO=m
++# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set
++CONFIG_I2C_KEMPLD=m
++CONFIG_I2C_OCORES=m
++CONFIG_I2C_PCA_PLATFORM=m
++# CONFIG_I2C_PXA_PCI is not set
++CONFIG_I2C_SIMTEC=m
++CONFIG_I2C_XILINX=m
++
++#
++# External I2C/SMBus adapter drivers
++#
++CONFIG_I2C_DIOLAN_U2C=m
++CONFIG_I2C_DLN2=m
++CONFIG_I2C_PARPORT=m
++CONFIG_I2C_PARPORT_LIGHT=m
++CONFIG_I2C_ROBOTFUZZ_OSIF=m
++CONFIG_I2C_TAOS_EVM=m
++CONFIG_I2C_TINY_USB=m
++CONFIG_I2C_VIPERBOARD=m
++
++#
++# Other I2C/SMBus bus drivers
++#
++# CONFIG_I2C_MLXCPLD is not set
++CONFIG_I2C_CROS_EC_TUNNEL=m
++CONFIG_I2C_STUB=m
++CONFIG_I2C_SLAVE=y
++# CONFIG_I2C_SLAVE_EEPROM is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++CONFIG_SPI=y
++# CONFIG_SPI_DEBUG is not set
++CONFIG_SPI_MASTER=y
++
++#
++# SPI Master Controller Drivers
++#
++CONFIG_SPI_ALTERA=m
++# CONFIG_SPI_AXI_SPI_ENGINE is not set
++CONFIG_SPI_BITBANG=m
++CONFIG_SPI_BUTTERFLY=m
++CONFIG_SPI_CADENCE=m
++CONFIG_SPI_DESIGNWARE=m
++CONFIG_SPI_DW_PCI=m
++CONFIG_SPI_DW_MID_DMA=y
++CONFIG_SPI_DW_MMIO=m
++CONFIG_SPI_DLN2=m
++CONFIG_SPI_GPIO=m
++CONFIG_SPI_LM70_LLP=m
++CONFIG_SPI_OC_TINY=m
++CONFIG_SPI_PXA2XX=m
++CONFIG_SPI_PXA2XX_PCI=m
++# CONFIG_SPI_ROCKCHIP is not set
++CONFIG_SPI_SC18IS602=m
++CONFIG_SPI_XCOMM=m
++# CONFIG_SPI_XILINX is not set
++CONFIG_SPI_ZYNQMP_GQSPI=m
++
++#
++# SPI Protocol Masters
++#
++CONFIG_SPI_SPIDEV=m
++# CONFIG_SPI_LOOPBACK_TEST is not set
++CONFIG_SPI_TLE62X0=m
++# CONFIG_SPI_SLAVE is not set
++CONFIG_SPMI=m
++CONFIG_HSI=m
++CONFIG_HSI_BOARDINFO=y
++
++#
++# HSI controllers
++#
++
++#
++# HSI clients
++#
++CONFIG_HSI_CHAR=m
++CONFIG_PPS=y
++# CONFIG_PPS_DEBUG is not set
++
++#
++# PPS clients support
++#
++# CONFIG_PPS_CLIENT_KTIMER is not set
++CONFIG_PPS_CLIENT_LDISC=m
++CONFIG_PPS_CLIENT_PARPORT=m
++CONFIG_PPS_CLIENT_GPIO=m
++
++#
++# PPS generators support
++#
++
++#
++# PTP clock support
++#
++CONFIG_PTP_1588_CLOCK=y
++
++#
++# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks.
++#
++CONFIG_PTP_1588_CLOCK_KVM=m
++CONFIG_PINCTRL=y
++CONFIG_PINMUX=y
++CONFIG_PINCONF=y
++CONFIG_GENERIC_PINCONF=y
++# CONFIG_DEBUG_PINCTRL is not set
++CONFIG_PINCTRL_AMD=y
++# CONFIG_PINCTRL_MCP23S08 is not set
++CONFIG_PINCTRL_SX150X=y
++CONFIG_PINCTRL_BAYTRAIL=y
++CONFIG_PINCTRL_CHERRYVIEW=m
++CONFIG_PINCTRL_INTEL=m
++CONFIG_PINCTRL_BROXTON=m
++# CONFIG_PINCTRL_CANNONLAKE is not set
++# CONFIG_PINCTRL_CEDARFORK is not set
++# CONFIG_PINCTRL_DENVERTON is not set
++# CONFIG_PINCTRL_GEMINILAKE is not set
++# CONFIG_PINCTRL_LEWISBURG is not set
++CONFIG_PINCTRL_SUNRISEPOINT=m
++CONFIG_GPIOLIB=y
++CONFIG_GPIO_ACPI=y
++CONFIG_GPIOLIB_IRQCHIP=y
++# CONFIG_DEBUG_GPIO is not set
++CONFIG_GPIO_SYSFS=y
++CONFIG_GPIO_GENERIC=m
++CONFIG_GPIO_MAX730X=m
++
++#
++# Memory mapped GPIO drivers
++#
++CONFIG_GPIO_AMDPT=m
++CONFIG_GPIO_DWAPB=m
++# CONFIG_GPIO_EXAR is not set
++CONFIG_GPIO_GENERIC_PLATFORM=m
++CONFIG_GPIO_ICH=m
++CONFIG_GPIO_LYNXPOINT=y
++# CONFIG_GPIO_MB86S7X is not set
++# CONFIG_GPIO_MENZ127 is not set
++# CONFIG_GPIO_MOCKUP is not set
++CONFIG_GPIO_VX855=m
++
++#
++# Port-mapped I/O GPIO drivers
++#
++CONFIG_GPIO_F7188X=m
++CONFIG_GPIO_IT87=m
++CONFIG_GPIO_SCH=m
++CONFIG_GPIO_SCH311X=m
++
++#
++# I2C GPIO expanders
++#
++CONFIG_GPIO_ADP5588=m
++CONFIG_GPIO_MAX7300=m
++CONFIG_GPIO_MAX732X=m
++CONFIG_GPIO_PCA953X=m
++CONFIG_GPIO_PCF857X=m
++# CONFIG_GPIO_TPIC2810 is not set
++
++#
++# MFD GPIO expanders
++#
++CONFIG_GPIO_ADP5520=m
++CONFIG_GPIO_ARIZONA=m
++CONFIG_GPIO_CRYSTAL_COVE=m
++CONFIG_GPIO_DA9052=m
++CONFIG_GPIO_DA9055=m
++CONFIG_GPIO_DLN2=m
++CONFIG_GPIO_JANZ_TTL=m
++CONFIG_GPIO_KEMPLD=m
++CONFIG_GPIO_LP3943=m
++CONFIG_GPIO_PALMAS=y
++CONFIG_GPIO_RC5T583=y
++CONFIG_GPIO_TPS6586X=y
++CONFIG_GPIO_TPS65910=y
++CONFIG_GPIO_TPS65912=m
++CONFIG_GPIO_TWL4030=m
++CONFIG_GPIO_TWL6040=m
++CONFIG_GPIO_UCB1400=m
++CONFIG_GPIO_WM831X=m
++CONFIG_GPIO_WM8350=m
++CONFIG_GPIO_WM8994=m
++
++#
++# PCI GPIO expanders
++#
++CONFIG_GPIO_AMD8111=m
++CONFIG_GPIO_ML_IOH=m
++# CONFIG_GPIO_PCI_IDIO_16 is not set
++# CONFIG_GPIO_PCIE_IDIO_24 is not set
++CONFIG_GPIO_RDC321X=m
++
++#
++# SPI GPIO expanders
++#
++# CONFIG_GPIO_MAX3191X is not set
++CONFIG_GPIO_MAX7301=m
++CONFIG_GPIO_MC33880=m
++# CONFIG_GPIO_PISOSR is not set
++# CONFIG_GPIO_XRA1403 is not set
++
++#
++# USB GPIO expanders
++#
++CONFIG_GPIO_VIPERBOARD=m
++CONFIG_W1=m
++CONFIG_W1_CON=y
++
++#
++# 1-wire Bus Masters
++#
++CONFIG_W1_MASTER_MATROX=m
++CONFIG_W1_MASTER_DS2490=m
++CONFIG_W1_MASTER_DS2482=m
++CONFIG_W1_MASTER_DS1WM=m
++CONFIG_W1_MASTER_GPIO=m
++
++#
++# 1-wire Slaves
++#
++CONFIG_W1_SLAVE_THERM=m
++CONFIG_W1_SLAVE_SMEM=m
++# CONFIG_W1_SLAVE_DS2405 is not set
++CONFIG_W1_SLAVE_DS2408=m
++CONFIG_W1_SLAVE_DS2408_READBACK=y
++CONFIG_W1_SLAVE_DS2413=m
++CONFIG_W1_SLAVE_DS2406=m
++CONFIG_W1_SLAVE_DS2423=m
++# CONFIG_W1_SLAVE_DS2805 is not set
++CONFIG_W1_SLAVE_DS2431=m
++CONFIG_W1_SLAVE_DS2433=m
++# CONFIG_W1_SLAVE_DS2433_CRC is not set
++# CONFIG_W1_SLAVE_DS2438 is not set
++CONFIG_W1_SLAVE_DS2760=m
++CONFIG_W1_SLAVE_DS2780=m
++CONFIG_W1_SLAVE_DS2781=m
++CONFIG_W1_SLAVE_DS28E04=m
++# CONFIG_W1_SLAVE_DS28E17 is not set
++CONFIG_POWER_AVS=y
++CONFIG_POWER_RESET=y
++CONFIG_POWER_RESET_RESTART=y
++CONFIG_POWER_SUPPLY=y
++# CONFIG_POWER_SUPPLY_DEBUG is not set
++CONFIG_PDA_POWER=m
++CONFIG_GENERIC_ADC_BATTERY=m
++CONFIG_MAX8925_POWER=m
++CONFIG_WM831X_BACKUP=m
++CONFIG_WM831X_POWER=m
++CONFIG_WM8350_POWER=m
++CONFIG_TEST_POWER=m
++CONFIG_BATTERY_88PM860X=m
++CONFIG_BATTERY_DS2760=m
++CONFIG_BATTERY_DS2780=m
++CONFIG_BATTERY_DS2781=m
++CONFIG_BATTERY_DS2782=m
++CONFIG_BATTERY_SBS=m
++# CONFIG_CHARGER_SBS is not set
++# CONFIG_MANAGER_SBS is not set
++CONFIG_BATTERY_BQ27XXX=m
++CONFIG_BATTERY_BQ27XXX_I2C=m
++CONFIG_BATTERY_BQ27XXX_HDQ=m
++# CONFIG_BATTERY_BQ27XXX_DT_UPDATES_NVM is not set
++CONFIG_BATTERY_DA9030=m
++CONFIG_BATTERY_DA9052=m
++CONFIG_CHARGER_DA9150=m
++CONFIG_BATTERY_DA9150=m
++CONFIG_BATTERY_MAX17040=m
++CONFIG_BATTERY_MAX17042=m
++# CONFIG_BATTERY_MAX1721X is not set
++CONFIG_BATTERY_TWL4030_MADC=m
++CONFIG_CHARGER_88PM860X=m
++CONFIG_CHARGER_PCF50633=m
++CONFIG_BATTERY_RX51=m
++CONFIG_CHARGER_ISP1704=m
++CONFIG_CHARGER_MAX8903=m
++CONFIG_CHARGER_TWL4030=m
++CONFIG_CHARGER_LP8727=m
++CONFIG_CHARGER_LP8788=m
++CONFIG_CHARGER_GPIO=m
++CONFIG_CHARGER_MANAGER=y
++# CONFIG_CHARGER_LTC3651 is not set
++CONFIG_CHARGER_MAX14577=m
++CONFIG_CHARGER_MAX77693=m
++CONFIG_CHARGER_MAX8997=m
++CONFIG_CHARGER_MAX8998=m
++CONFIG_CHARGER_BQ2415X=m
++CONFIG_CHARGER_BQ24190=m
++CONFIG_CHARGER_BQ24257=m
++CONFIG_CHARGER_BQ24735=m
++CONFIG_CHARGER_BQ25890=m
++CONFIG_CHARGER_SMB347=m
++CONFIG_CHARGER_TPS65090=m
++CONFIG_BATTERY_GAUGE_LTC2941=m
++CONFIG_BATTERY_RT5033=m
++CONFIG_CHARGER_RT9455=m
++CONFIG_HWMON=y
++CONFIG_HWMON_VID=m
++# CONFIG_HWMON_DEBUG_CHIP is not set
++
++#
++# Native drivers
++#
++CONFIG_SENSORS_ABITUGURU=m
++CONFIG_SENSORS_ABITUGURU3=m
++CONFIG_SENSORS_AD7314=m
++CONFIG_SENSORS_AD7414=m
++CONFIG_SENSORS_AD7418=m
++CONFIG_SENSORS_ADM1021=m
++CONFIG_SENSORS_ADM1025=m
++CONFIG_SENSORS_ADM1026=m
++CONFIG_SENSORS_ADM1029=m
++CONFIG_SENSORS_ADM1031=m
++CONFIG_SENSORS_ADM9240=m
++CONFIG_SENSORS_ADT7X10=m
++CONFIG_SENSORS_ADT7310=m
++CONFIG_SENSORS_ADT7410=m
++CONFIG_SENSORS_ADT7411=m
++CONFIG_SENSORS_ADT7462=m
++CONFIG_SENSORS_ADT7470=m
++CONFIG_SENSORS_ADT7475=m
++CONFIG_SENSORS_ASC7621=m
++CONFIG_SENSORS_K8TEMP=m
++CONFIG_SENSORS_K10TEMP=m
++CONFIG_SENSORS_FAM15H_POWER=m
++CONFIG_SENSORS_APPLESMC=m
++CONFIG_SENSORS_ASB100=m
++# CONFIG_SENSORS_ASPEED is not set
++CONFIG_SENSORS_ATXP1=m
++CONFIG_SENSORS_DS620=m
++CONFIG_SENSORS_DS1621=m
++CONFIG_SENSORS_DELL_SMM=m
++CONFIG_SENSORS_DA9052_ADC=m
++CONFIG_SENSORS_DA9055=m
++CONFIG_SENSORS_I5K_AMB=m
++CONFIG_SENSORS_F71805F=m
++CONFIG_SENSORS_F71882FG=m
++CONFIG_SENSORS_F75375S=m
++CONFIG_SENSORS_MC13783_ADC=m
++CONFIG_SENSORS_FSCHMD=m
++# CONFIG_SENSORS_FTSTEUTATES is not set
++CONFIG_SENSORS_GL518SM=m
++CONFIG_SENSORS_GL520SM=m
++CONFIG_SENSORS_G760A=m
++CONFIG_SENSORS_G762=m
++CONFIG_SENSORS_HIH6130=m
++CONFIG_SENSORS_IBMAEM=m
++CONFIG_SENSORS_IBMPEX=m
++CONFIG_SENSORS_IIO_HWMON=m
++CONFIG_SENSORS_I5500=m
++CONFIG_SENSORS_CORETEMP=m
++CONFIG_SENSORS_IT87=m
++CONFIG_SENSORS_JC42=m
++CONFIG_SENSORS_POWR1220=m
++CONFIG_SENSORS_LINEAGE=m
++CONFIG_SENSORS_LTC2945=m
++# CONFIG_SENSORS_LTC2990 is not set
++CONFIG_SENSORS_LTC4151=m
++CONFIG_SENSORS_LTC4215=m
++CONFIG_SENSORS_LTC4222=m
++CONFIG_SENSORS_LTC4245=m
++CONFIG_SENSORS_LTC4260=m
++CONFIG_SENSORS_LTC4261=m
++CONFIG_SENSORS_MAX1111=m
++CONFIG_SENSORS_MAX16065=m
++CONFIG_SENSORS_MAX1619=m
++CONFIG_SENSORS_MAX1668=m
++CONFIG_SENSORS_MAX197=m
++# CONFIG_SENSORS_MAX31722 is not set
++# CONFIG_SENSORS_MAX6621 is not set
++CONFIG_SENSORS_MAX6639=m
++CONFIG_SENSORS_MAX6642=m
++CONFIG_SENSORS_MAX6650=m
++CONFIG_SENSORS_MAX6697=m
++CONFIG_SENSORS_MAX31790=m
++CONFIG_SENSORS_MCP3021=m
++# CONFIG_SENSORS_TC654 is not set
++CONFIG_SENSORS_MENF21BMC_HWMON=m
++CONFIG_SENSORS_ADCXX=m
++CONFIG_SENSORS_LM63=m
++CONFIG_SENSORS_LM70=m
++CONFIG_SENSORS_LM73=m
++CONFIG_SENSORS_LM75=m
++CONFIG_SENSORS_LM77=m
++CONFIG_SENSORS_LM78=m
++CONFIG_SENSORS_LM80=m
++CONFIG_SENSORS_LM83=m
++CONFIG_SENSORS_LM85=m
++CONFIG_SENSORS_LM87=m
++CONFIG_SENSORS_LM90=m
++CONFIG_SENSORS_LM92=m
++CONFIG_SENSORS_LM93=m
++CONFIG_SENSORS_LM95234=m
++CONFIG_SENSORS_LM95241=m
++CONFIG_SENSORS_LM95245=m
++CONFIG_SENSORS_PC87360=m
++CONFIG_SENSORS_PC87427=m
++CONFIG_SENSORS_NTC_THERMISTOR=m
++CONFIG_SENSORS_NCT6683=m
++CONFIG_SENSORS_NCT6775=m
++CONFIG_SENSORS_NCT7802=m
++CONFIG_SENSORS_NCT7904=m
++CONFIG_SENSORS_PCF8591=m
++CONFIG_PMBUS=m
++CONFIG_SENSORS_PMBUS=m
++CONFIG_SENSORS_ADM1275=m
++# CONFIG_SENSORS_IBM_CFFPS is not set
++# CONFIG_SENSORS_IR35221 is not set
++CONFIG_SENSORS_LM25066=m
++CONFIG_SENSORS_LTC2978=m
++CONFIG_SENSORS_LTC2978_REGULATOR=y
++# CONFIG_SENSORS_LTC3815 is not set
++CONFIG_SENSORS_MAX16064=m
++CONFIG_SENSORS_MAX20751=m
++# CONFIG_SENSORS_MAX31785 is not set
++CONFIG_SENSORS_MAX34440=m
++CONFIG_SENSORS_MAX8688=m
++CONFIG_SENSORS_TPS40422=m
++# CONFIG_SENSORS_TPS53679 is not set
++CONFIG_SENSORS_UCD9000=m
++CONFIG_SENSORS_UCD9200=m
++CONFIG_SENSORS_ZL6100=m
++CONFIG_SENSORS_SHT15=m
++CONFIG_SENSORS_SHT21=m
++# CONFIG_SENSORS_SHT3x is not set
++CONFIG_SENSORS_SHTC1=m
++CONFIG_SENSORS_SIS5595=m
++CONFIG_SENSORS_DME1737=m
++CONFIG_SENSORS_EMC1403=m
++CONFIG_SENSORS_EMC2103=m
++CONFIG_SENSORS_EMC6W201=m
++CONFIG_SENSORS_SMSC47M1=m
++CONFIG_SENSORS_SMSC47M192=m
++CONFIG_SENSORS_SMSC47B397=m
++CONFIG_SENSORS_SCH56XX_COMMON=m
++CONFIG_SENSORS_SCH5627=m
++CONFIG_SENSORS_SCH5636=m
++# CONFIG_SENSORS_STTS751 is not set
++CONFIG_SENSORS_SMM665=m
++CONFIG_SENSORS_ADC128D818=m
++CONFIG_SENSORS_ADS1015=m
++CONFIG_SENSORS_ADS7828=m
++CONFIG_SENSORS_ADS7871=m
++CONFIG_SENSORS_AMC6821=m
++CONFIG_SENSORS_INA209=m
++CONFIG_SENSORS_INA2XX=m
++# CONFIG_SENSORS_INA3221 is not set
++CONFIG_SENSORS_TC74=m
++CONFIG_SENSORS_THMC50=m
++CONFIG_SENSORS_TMP102=m
++CONFIG_SENSORS_TMP103=m
++# CONFIG_SENSORS_TMP108 is not set
++CONFIG_SENSORS_TMP401=m
++CONFIG_SENSORS_TMP421=m
++CONFIG_SENSORS_VIA_CPUTEMP=m
++CONFIG_SENSORS_VIA686A=m
++CONFIG_SENSORS_VT1211=m
++CONFIG_SENSORS_VT8231=m
++# CONFIG_SENSORS_W83773G is not set
++CONFIG_SENSORS_W83781D=m
++CONFIG_SENSORS_W83791D=m
++CONFIG_SENSORS_W83792D=m
++CONFIG_SENSORS_W83793=m
++CONFIG_SENSORS_W83795=m
++# CONFIG_SENSORS_W83795_FANCTRL is not set
++CONFIG_SENSORS_W83L785TS=m
++CONFIG_SENSORS_W83L786NG=m
++CONFIG_SENSORS_W83627HF=m
++CONFIG_SENSORS_W83627EHF=m
++CONFIG_SENSORS_WM831X=m
++CONFIG_SENSORS_WM8350=m
++# CONFIG_SENSORS_XGENE is not set
++
++#
++# ACPI drivers
++#
++CONFIG_SENSORS_ACPI_POWER=m
++CONFIG_SENSORS_ATK0110=m
++CONFIG_THERMAL=y
++CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0
++CONFIG_THERMAL_HWMON=y
++CONFIG_THERMAL_WRITABLE_TRIPS=y
++CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
++# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set
++# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
++# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set
++CONFIG_THERMAL_GOV_FAIR_SHARE=y
++CONFIG_THERMAL_GOV_STEP_WISE=y
++CONFIG_THERMAL_GOV_BANG_BANG=y
++CONFIG_THERMAL_GOV_USER_SPACE=y
++CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
++# CONFIG_CLOCK_THERMAL is not set
++# CONFIG_DEVFREQ_THERMAL is not set
++CONFIG_THERMAL_EMULATION=y
++CONFIG_INTEL_POWERCLAMP=m
++CONFIG_X86_PKG_TEMP_THERMAL=m
++CONFIG_INTEL_SOC_DTS_IOSF_CORE=m
++CONFIG_INTEL_SOC_DTS_THERMAL=m
++
++#
++# ACPI INT340X thermal drivers
++#
++CONFIG_INT340X_THERMAL=m
++CONFIG_ACPI_THERMAL_REL=m
++# CONFIG_INT3406_THERMAL is not set
++CONFIG_INTEL_PCH_THERMAL=m
++# CONFIG_GENERIC_ADC_THERMAL is not set
++CONFIG_WATCHDOG=y
++CONFIG_WATCHDOG_CORE=y
++# CONFIG_WATCHDOG_NOWAYOUT is not set
++CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y
++# CONFIG_WATCHDOG_SYSFS is not set
++
++#
++# Watchdog Device Drivers
++#
++CONFIG_SOFT_WATCHDOG=m
++CONFIG_DA9052_WATCHDOG=m
++CONFIG_DA9055_WATCHDOG=m
++CONFIG_DA9063_WATCHDOG=m
++CONFIG_DA9062_WATCHDOG=m
++CONFIG_MENF21BMC_WATCHDOG=m
++# CONFIG_WDAT_WDT is not set
++CONFIG_WM831X_WATCHDOG=m
++CONFIG_WM8350_WATCHDOG=m
++CONFIG_XILINX_WATCHDOG=m
++# CONFIG_ZIIRAVE_WATCHDOG is not set
++CONFIG_CADENCE_WATCHDOG=m
++CONFIG_DW_WATCHDOG=m
++CONFIG_TWL4030_WATCHDOG=m
++CONFIG_MAX63XX_WATCHDOG=m
++CONFIG_RETU_WATCHDOG=m
++CONFIG_ACQUIRE_WDT=m
++CONFIG_ADVANTECH_WDT=m
++CONFIG_ALIM1535_WDT=m
++CONFIG_ALIM7101_WDT=m
++CONFIG_F71808E_WDT=m
++CONFIG_SP5100_TCO=m
++CONFIG_SBC_FITPC2_WATCHDOG=m
++CONFIG_EUROTECH_WDT=m
++CONFIG_IB700_WDT=m
++CONFIG_IBMASR=m
++CONFIG_WAFER_WDT=m
++CONFIG_I6300ESB_WDT=m
++CONFIG_IE6XX_WDT=m
++CONFIG_ITCO_WDT=m
++CONFIG_ITCO_VENDOR_SUPPORT=y
++CONFIG_IT8712F_WDT=m
++CONFIG_IT87_WDT=m
++CONFIG_HP_WATCHDOG=m
++CONFIG_KEMPLD_WDT=m
++CONFIG_HPWDT_NMI_DECODING=y
++CONFIG_SC1200_WDT=m
++CONFIG_PC87413_WDT=m
++CONFIG_NV_TCO=m
++CONFIG_60XX_WDT=m
++CONFIG_CPU5_WDT=m
++CONFIG_SMSC_SCH311X_WDT=m
++CONFIG_SMSC37B787_WDT=m
++CONFIG_VIA_WDT=m
++CONFIG_W83627HF_WDT=m
++CONFIG_W83877F_WDT=m
++CONFIG_W83977F_WDT=m
++CONFIG_MACHZ_WDT=m
++CONFIG_SBC_EPX_C3_WATCHDOG=m
++# CONFIG_INTEL_MEI_WDT is not set
++# CONFIG_NI903X_WDT is not set
++# CONFIG_NIC7018_WDT is not set
++CONFIG_MEN_A21_WDT=m
++CONFIG_XEN_WDT=m
++
++#
++# PCI-based Watchdog Cards
++#
++CONFIG_PCIPCWATCHDOG=m
++CONFIG_WDTPCI=m
++
++#
++# USB-based Watchdog Cards
++#
++CONFIG_USBPCWATCHDOG=m
++
++#
++# Watchdog Pretimeout Governors
++#
++# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set
++CONFIG_SSB_POSSIBLE=y
++CONFIG_SSB=m
++CONFIG_SSB_SPROM=y
++CONFIG_SSB_BLOCKIO=y
++CONFIG_SSB_PCIHOST_POSSIBLE=y
++CONFIG_SSB_PCIHOST=y
++CONFIG_SSB_B43_PCI_BRIDGE=y
++CONFIG_SSB_PCMCIAHOST_POSSIBLE=y
++# CONFIG_SSB_PCMCIAHOST is not set
++CONFIG_SSB_SDIOHOST_POSSIBLE=y
++CONFIG_SSB_SDIOHOST=y
++# CONFIG_SSB_SILENT is not set
++# CONFIG_SSB_DEBUG is not set
++CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
++CONFIG_SSB_DRIVER_PCICORE=y
++CONFIG_SSB_DRIVER_GPIO=y
++CONFIG_BCMA_POSSIBLE=y
++CONFIG_BCMA=m
++CONFIG_BCMA_BLOCKIO=y
++CONFIG_BCMA_HOST_PCI_POSSIBLE=y
++CONFIG_BCMA_HOST_PCI=y
++CONFIG_BCMA_HOST_SOC=y
++CONFIG_BCMA_DRIVER_PCI=y
++CONFIG_BCMA_SFLASH=y
++CONFIG_BCMA_DRIVER_GMAC_CMN=y
++CONFIG_BCMA_DRIVER_GPIO=y
++# CONFIG_BCMA_DEBUG is not set
++
++#
++# Multifunction device drivers
++#
++CONFIG_MFD_CORE=y
++CONFIG_MFD_AS3711=y
++CONFIG_PMIC_ADP5520=y
++CONFIG_MFD_AAT2870_CORE=y
++CONFIG_MFD_BCM590XX=m
++# CONFIG_MFD_BD9571MWV is not set
++# CONFIG_MFD_AXP20X_I2C is not set
++CONFIG_MFD_CROS_EC=m
++CONFIG_MFD_CROS_EC_I2C=m
++CONFIG_MFD_CROS_EC_SPI=m
++# CONFIG_MFD_CROS_EC_CHARDEV is not set
++CONFIG_PMIC_DA903X=y
++CONFIG_PMIC_DA9052=y
++CONFIG_MFD_DA9052_SPI=y
++CONFIG_MFD_DA9052_I2C=y
++CONFIG_MFD_DA9055=y
++CONFIG_MFD_DA9062=m
++CONFIG_MFD_DA9063=y
++CONFIG_MFD_DA9150=m
++CONFIG_MFD_DLN2=m
++CONFIG_MFD_MC13XXX=m
++CONFIG_MFD_MC13XXX_SPI=m
++CONFIG_MFD_MC13XXX_I2C=m
++CONFIG_HTC_PASIC3=m
++CONFIG_HTC_I2CPLD=y
++CONFIG_MFD_INTEL_QUARK_I2C_GPIO=m
++CONFIG_LPC_ICH=m
++CONFIG_LPC_SCH=m
++CONFIG_INTEL_SOC_PMIC=y
++# CONFIG_INTEL_SOC_PMIC_BXTWC is not set
++# CONFIG_INTEL_SOC_PMIC_CHTWC is not set
++# CONFIG_INTEL_SOC_PMIC_CHTDC_TI is not set
++CONFIG_MFD_INTEL_LPSS=m
++CONFIG_MFD_INTEL_LPSS_ACPI=m
++CONFIG_MFD_INTEL_LPSS_PCI=m
++CONFIG_MFD_JANZ_CMODIO=m
++CONFIG_MFD_KEMPLD=m
++CONFIG_MFD_88PM800=m
++CONFIG_MFD_88PM805=m
++CONFIG_MFD_88PM860X=y
++CONFIG_MFD_MAX14577=y
++CONFIG_MFD_MAX77693=y
++CONFIG_MFD_MAX77843=y
++CONFIG_MFD_MAX8907=m
++CONFIG_MFD_MAX8925=y
++CONFIG_MFD_MAX8997=y
++CONFIG_MFD_MAX8998=y
++CONFIG_MFD_MT6397=m
++CONFIG_MFD_MENF21BMC=m
++CONFIG_EZX_PCAP=y
++CONFIG_MFD_VIPERBOARD=m
++CONFIG_MFD_RETU=m
++CONFIG_MFD_PCF50633=m
++CONFIG_PCF50633_ADC=m
++CONFIG_PCF50633_GPIO=m
++CONFIG_UCB1400_CORE=m
++CONFIG_MFD_RDC321X=m
++CONFIG_MFD_RT5033=m
++CONFIG_MFD_RC5T583=y
++CONFIG_MFD_SEC_CORE=y
++CONFIG_MFD_SI476X_CORE=m
++CONFIG_MFD_SM501=m
++CONFIG_MFD_SM501_GPIO=y
++CONFIG_MFD_SKY81452=m
++CONFIG_MFD_SMSC=y
++CONFIG_ABX500_CORE=y
++CONFIG_AB3100_CORE=y
++CONFIG_AB3100_OTP=m
++CONFIG_MFD_SYSCON=y
++CONFIG_MFD_TI_AM335X_TSCADC=m
++CONFIG_MFD_LP3943=m
++CONFIG_MFD_LP8788=y
++# CONFIG_MFD_TI_LMU is not set
++CONFIG_MFD_PALMAS=y
++CONFIG_TPS6105X=m
++CONFIG_TPS65010=m
++CONFIG_TPS6507X=m
++# CONFIG_MFD_TPS65086 is not set
++CONFIG_MFD_TPS65090=y
++# CONFIG_MFD_TPS68470 is not set
++# CONFIG_MFD_TI_LP873X is not set
++CONFIG_MFD_TPS6586X=y
++CONFIG_MFD_TPS65910=y
++CONFIG_MFD_TPS65912=y
++CONFIG_MFD_TPS65912_I2C=y
++CONFIG_MFD_TPS65912_SPI=y
++CONFIG_MFD_TPS80031=y
++CONFIG_TWL4030_CORE=y
++CONFIG_MFD_TWL4030_AUDIO=y
++CONFIG_TWL6040_CORE=y
++CONFIG_MFD_WL1273_CORE=m
++CONFIG_MFD_LM3533=m
++# CONFIG_MFD_TMIO is not set
++CONFIG_MFD_VX855=m
++CONFIG_MFD_ARIZONA=y
++CONFIG_MFD_ARIZONA_I2C=m
++CONFIG_MFD_ARIZONA_SPI=m
++# CONFIG_MFD_CS47L24 is not set
++CONFIG_MFD_WM5102=y
++CONFIG_MFD_WM5110=y
++CONFIG_MFD_WM8997=y
++CONFIG_MFD_WM8998=y
++CONFIG_MFD_WM8400=y
++CONFIG_MFD_WM831X=y
++CONFIG_MFD_WM831X_I2C=y
++CONFIG_MFD_WM831X_SPI=y
++CONFIG_MFD_WM8350=y
++CONFIG_MFD_WM8350_I2C=y
++CONFIG_MFD_WM8994=m
++CONFIG_REGULATOR=y
++# CONFIG_REGULATOR_DEBUG is not set
++CONFIG_REGULATOR_FIXED_VOLTAGE=m
++CONFIG_REGULATOR_VIRTUAL_CONSUMER=m
++CONFIG_REGULATOR_USERSPACE_CONSUMER=m
++CONFIG_REGULATOR_88PM800=m
++CONFIG_REGULATOR_88PM8607=m
++CONFIG_REGULATOR_ACT8865=m
++CONFIG_REGULATOR_AD5398=m
++CONFIG_REGULATOR_ANATOP=m
++CONFIG_REGULATOR_AAT2870=m
++CONFIG_REGULATOR_AB3100=m
++# CONFIG_REGULATOR_ARIZONA_LDO1 is not set
++# CONFIG_REGULATOR_ARIZONA_MICSUPP is not set
++CONFIG_REGULATOR_AS3711=m
++CONFIG_REGULATOR_BCM590XX=m
++CONFIG_REGULATOR_DA903X=m
++CONFIG_REGULATOR_DA9052=m
++CONFIG_REGULATOR_DA9055=m
++CONFIG_REGULATOR_DA9062=m
++CONFIG_REGULATOR_DA9063=m
++CONFIG_REGULATOR_DA9210=m
++CONFIG_REGULATOR_DA9211=m
++CONFIG_REGULATOR_FAN53555=m
++CONFIG_REGULATOR_GPIO=m
++CONFIG_REGULATOR_ISL9305=m
++CONFIG_REGULATOR_ISL6271A=m
++CONFIG_REGULATOR_LP3971=m
++CONFIG_REGULATOR_LP3972=m
++CONFIG_REGULATOR_LP872X=m
++CONFIG_REGULATOR_LP8755=m
++CONFIG_REGULATOR_LP8788=m
++CONFIG_REGULATOR_LTC3589=m
++# CONFIG_REGULATOR_LTC3676 is not set
++CONFIG_REGULATOR_MAX14577=m
++CONFIG_REGULATOR_MAX1586=m
++CONFIG_REGULATOR_MAX8649=m
++CONFIG_REGULATOR_MAX8660=m
++CONFIG_REGULATOR_MAX8907=m
++CONFIG_REGULATOR_MAX8925=m
++CONFIG_REGULATOR_MAX8952=m
++CONFIG_REGULATOR_MAX8997=m
++CONFIG_REGULATOR_MAX8998=m
++CONFIG_REGULATOR_MAX77693=m
++CONFIG_REGULATOR_MC13XXX_CORE=m
++CONFIG_REGULATOR_MC13783=m
++CONFIG_REGULATOR_MC13892=m
++CONFIG_REGULATOR_MT6311=m
++# CONFIG_REGULATOR_MT6323 is not set
++CONFIG_REGULATOR_MT6397=m
++CONFIG_REGULATOR_PALMAS=m
++CONFIG_REGULATOR_PCAP=m
++CONFIG_REGULATOR_PCF50633=m
++CONFIG_REGULATOR_PFUZE100=m
++# CONFIG_REGULATOR_PV88060 is not set
++# CONFIG_REGULATOR_PV88080 is not set
++# CONFIG_REGULATOR_PV88090 is not set
++CONFIG_REGULATOR_PWM=m
++CONFIG_REGULATOR_QCOM_SPMI=m
++CONFIG_REGULATOR_RC5T583=m
++CONFIG_REGULATOR_RT5033=m
++CONFIG_REGULATOR_S2MPA01=m
++CONFIG_REGULATOR_S2MPS11=m
++CONFIG_REGULATOR_S5M8767=m
++CONFIG_REGULATOR_SKY81452=m
++CONFIG_REGULATOR_TPS51632=m
++CONFIG_REGULATOR_TPS6105X=m
++CONFIG_REGULATOR_TPS62360=m
++CONFIG_REGULATOR_TPS65023=m
++CONFIG_REGULATOR_TPS6507X=m
++CONFIG_REGULATOR_TPS65090=m
++# CONFIG_REGULATOR_TPS65132 is not set
++CONFIG_REGULATOR_TPS6524X=m
++CONFIG_REGULATOR_TPS6586X=m
++CONFIG_REGULATOR_TPS65910=m
++CONFIG_REGULATOR_TPS65912=m
++CONFIG_REGULATOR_TPS80031=m
++CONFIG_REGULATOR_TWL4030=m
++CONFIG_REGULATOR_WM831X=m
++CONFIG_REGULATOR_WM8350=m
++CONFIG_REGULATOR_WM8400=m
++CONFIG_REGULATOR_WM8994=m
++CONFIG_RC_CORE=m
++CONFIG_RC_MAP=m
++# CONFIG_LIRC is not set
++CONFIG_RC_DECODERS=y
++CONFIG_IR_NEC_DECODER=m
++CONFIG_IR_RC5_DECODER=m
++CONFIG_IR_RC6_DECODER=m
++CONFIG_IR_JVC_DECODER=m
++CONFIG_IR_SONY_DECODER=m
++CONFIG_IR_SANYO_DECODER=m
++CONFIG_IR_SHARP_DECODER=m
++CONFIG_IR_MCE_KBD_DECODER=m
++CONFIG_IR_XMP_DECODER=m
++CONFIG_RC_DEVICES=y
++CONFIG_RC_ATI_REMOTE=m
++CONFIG_IR_ENE=m
++CONFIG_IR_IMON=m
++CONFIG_IR_MCEUSB=m
++CONFIG_IR_ITE_CIR=m
++CONFIG_IR_FINTEK=m
++CONFIG_IR_NUVOTON=m
++CONFIG_IR_REDRAT3=m
++CONFIG_IR_STREAMZAP=m
++CONFIG_IR_WINBOND_CIR=m
++CONFIG_IR_IGORPLUGUSB=m
++CONFIG_IR_IGUANA=m
++CONFIG_IR_TTUSBIR=m
++CONFIG_RC_LOOPBACK=m
++# CONFIG_IR_SERIAL is not set
++# CONFIG_IR_SIR is not set
++CONFIG_MEDIA_SUPPORT=m
++
++#
++# Multimedia core support
++#
++CONFIG_MEDIA_CAMERA_SUPPORT=y
++CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
++CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
++CONFIG_MEDIA_RADIO_SUPPORT=y
++CONFIG_MEDIA_SDR_SUPPORT=y
++# CONFIG_MEDIA_CEC_SUPPORT is not set
++CONFIG_MEDIA_CONTROLLER=y
++# CONFIG_MEDIA_CONTROLLER_DVB is not set
++CONFIG_VIDEO_DEV=m
++CONFIG_VIDEO_V4L2_SUBDEV_API=y
++CONFIG_VIDEO_V4L2=m
++# CONFIG_VIDEO_ADV_DEBUG is not set
++# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
++# CONFIG_VIDEO_PCI_SKELETON is not set
++CONFIG_VIDEO_TUNER=m
++CONFIG_V4L2_MEM2MEM_DEV=m
++CONFIG_V4L2_FLASH_LED_CLASS=m
++CONFIG_V4L2_FWNODE=m
++CONFIG_VIDEOBUF_GEN=m
++CONFIG_VIDEOBUF_DMA_SG=m
++CONFIG_VIDEOBUF_VMALLOC=m
++CONFIG_VIDEOBUF_DVB=m
++CONFIG_DVB_CORE=m
++# CONFIG_DVB_MMAP is not set
++CONFIG_DVB_NET=y
++CONFIG_TTPCI_EEPROM=m
++CONFIG_DVB_MAX_ADAPTERS=8
++CONFIG_DVB_DYNAMIC_MINORS=y
++# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set
++# CONFIG_DVB_ULE_DEBUG is not set
++
++#
++# Media drivers
++#
++CONFIG_MEDIA_USB_SUPPORT=y
++
++#
++# Webcam devices
++#
++CONFIG_USB_VIDEO_CLASS=m
++CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
++CONFIG_USB_GSPCA=m
++CONFIG_USB_M5602=m
++CONFIG_USB_STV06XX=m
++CONFIG_USB_GL860=m
++CONFIG_USB_GSPCA_BENQ=m
++CONFIG_USB_GSPCA_CONEX=m
++CONFIG_USB_GSPCA_CPIA1=m
++CONFIG_USB_GSPCA_DTCS033=m
++CONFIG_USB_GSPCA_ETOMS=m
++CONFIG_USB_GSPCA_FINEPIX=m
++CONFIG_USB_GSPCA_JEILINJ=m
++CONFIG_USB_GSPCA_JL2005BCD=m
++CONFIG_USB_GSPCA_KINECT=m
++CONFIG_USB_GSPCA_KONICA=m
++CONFIG_USB_GSPCA_MARS=m
++CONFIG_USB_GSPCA_MR97310A=m
++CONFIG_USB_GSPCA_NW80X=m
++CONFIG_USB_GSPCA_OV519=m
++CONFIG_USB_GSPCA_OV534=m
++CONFIG_USB_GSPCA_OV534_9=m
++CONFIG_USB_GSPCA_PAC207=m
++CONFIG_USB_GSPCA_PAC7302=m
++CONFIG_USB_GSPCA_PAC7311=m
++CONFIG_USB_GSPCA_SE401=m
++CONFIG_USB_GSPCA_SN9C2028=m
++CONFIG_USB_GSPCA_SN9C20X=m
++CONFIG_USB_GSPCA_SONIXB=m
++CONFIG_USB_GSPCA_SONIXJ=m
++CONFIG_USB_GSPCA_SPCA500=m
++CONFIG_USB_GSPCA_SPCA501=m
++CONFIG_USB_GSPCA_SPCA505=m
++CONFIG_USB_GSPCA_SPCA506=m
++CONFIG_USB_GSPCA_SPCA508=m
++CONFIG_USB_GSPCA_SPCA561=m
++CONFIG_USB_GSPCA_SPCA1528=m
++CONFIG_USB_GSPCA_SQ905=m
++CONFIG_USB_GSPCA_SQ905C=m
++CONFIG_USB_GSPCA_SQ930X=m
++CONFIG_USB_GSPCA_STK014=m
++CONFIG_USB_GSPCA_STK1135=m
++CONFIG_USB_GSPCA_STV0680=m
++CONFIG_USB_GSPCA_SUNPLUS=m
++CONFIG_USB_GSPCA_T613=m
++CONFIG_USB_GSPCA_TOPRO=m
++CONFIG_USB_GSPCA_TOUPTEK=m
++CONFIG_USB_GSPCA_TV8532=m
++CONFIG_USB_GSPCA_VC032X=m
++CONFIG_USB_GSPCA_VICAM=m
++CONFIG_USB_GSPCA_XIRLINK_CIT=m
++CONFIG_USB_GSPCA_ZC3XX=m
++CONFIG_USB_PWC=m
++# CONFIG_USB_PWC_DEBUG is not set
++CONFIG_USB_PWC_INPUT_EVDEV=y
++CONFIG_VIDEO_CPIA2=m
++CONFIG_USB_ZR364XX=m
++CONFIG_USB_STKWEBCAM=m
++CONFIG_USB_S2255=m
++CONFIG_VIDEO_USBTV=m
++
++#
++# Analog TV USB devices
++#
++CONFIG_VIDEO_PVRUSB2=m
++CONFIG_VIDEO_PVRUSB2_SYSFS=y
++CONFIG_VIDEO_PVRUSB2_DVB=y
++# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
++CONFIG_VIDEO_HDPVR=m
++CONFIG_VIDEO_USBVISION=m
++CONFIG_VIDEO_STK1160_COMMON=m
++CONFIG_VIDEO_STK1160=m
++CONFIG_VIDEO_GO7007=m
++CONFIG_VIDEO_GO7007_USB=m
++CONFIG_VIDEO_GO7007_LOADER=m
++CONFIG_VIDEO_GO7007_USB_S2250_BOARD=m
++
++#
++# Analog/digital TV USB devices
++#
++CONFIG_VIDEO_AU0828=m
++CONFIG_VIDEO_AU0828_V4L2=y
++CONFIG_VIDEO_AU0828_RC=y
++CONFIG_VIDEO_CX231XX=m
++CONFIG_VIDEO_CX231XX_RC=y
++CONFIG_VIDEO_CX231XX_ALSA=m
++CONFIG_VIDEO_CX231XX_DVB=m
++CONFIG_VIDEO_TM6000=m
++CONFIG_VIDEO_TM6000_ALSA=m
++CONFIG_VIDEO_TM6000_DVB=m
++
++#
++# Digital TV USB devices
++#
++CONFIG_DVB_USB=m
++# CONFIG_DVB_USB_DEBUG is not set
++CONFIG_DVB_USB_DIB3000MC=m
++CONFIG_DVB_USB_A800=m
++CONFIG_DVB_USB_DIBUSB_MB=m
++# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set
++CONFIG_DVB_USB_DIBUSB_MC=m
++CONFIG_DVB_USB_DIB0700=m
++CONFIG_DVB_USB_UMT_010=m
++CONFIG_DVB_USB_CXUSB=m
++CONFIG_DVB_USB_M920X=m
++CONFIG_DVB_USB_DIGITV=m
++CONFIG_DVB_USB_VP7045=m
++CONFIG_DVB_USB_VP702X=m
++CONFIG_DVB_USB_GP8PSK=m
++CONFIG_DVB_USB_NOVA_T_USB2=m
++CONFIG_DVB_USB_TTUSB2=m
++CONFIG_DVB_USB_DTT200U=m
++CONFIG_DVB_USB_OPERA1=m
++CONFIG_DVB_USB_AF9005=m
++CONFIG_DVB_USB_AF9005_REMOTE=m
++CONFIG_DVB_USB_PCTV452E=m
++CONFIG_DVB_USB_DW2102=m
++CONFIG_DVB_USB_CINERGY_T2=m
++CONFIG_DVB_USB_DTV5100=m
++CONFIG_DVB_USB_FRIIO=m
++CONFIG_DVB_USB_AZ6027=m
++CONFIG_DVB_USB_TECHNISAT_USB2=m
++CONFIG_DVB_USB_V2=m
++CONFIG_DVB_USB_AF9015=m
++CONFIG_DVB_USB_AF9035=m
++CONFIG_DVB_USB_ANYSEE=m
++CONFIG_DVB_USB_AU6610=m
++CONFIG_DVB_USB_AZ6007=m
++CONFIG_DVB_USB_CE6230=m
++CONFIG_DVB_USB_EC168=m
++CONFIG_DVB_USB_GL861=m
++CONFIG_DVB_USB_LME2510=m
++CONFIG_DVB_USB_MXL111SF=m
++CONFIG_DVB_USB_RTL28XXU=m
++CONFIG_DVB_USB_DVBSKY=m
++# CONFIG_DVB_USB_ZD1301 is not set
++CONFIG_DVB_TTUSB_BUDGET=m
++CONFIG_DVB_TTUSB_DEC=m
++CONFIG_SMS_USB_DRV=m
++CONFIG_DVB_B2C2_FLEXCOP_USB=m
++# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set
++CONFIG_DVB_AS102=m
++
++#
++# Webcam, TV (analog/digital) USB devices
++#
++CONFIG_VIDEO_EM28XX=m
++CONFIG_VIDEO_EM28XX_V4L2=m
++CONFIG_VIDEO_EM28XX_ALSA=m
++CONFIG_VIDEO_EM28XX_DVB=m
++CONFIG_VIDEO_EM28XX_RC=m
++
++#
++# Software defined radio USB devices
++#
++CONFIG_USB_AIRSPY=m
++CONFIG_USB_HACKRF=m
++CONFIG_USB_MSI2500=m
++CONFIG_MEDIA_PCI_SUPPORT=y
++
++#
++# Media capture support
++#
++CONFIG_VIDEO_MEYE=m
++CONFIG_VIDEO_SOLO6X10=m
++# CONFIG_VIDEO_TW5864 is not set
++CONFIG_VIDEO_TW68=m
++# CONFIG_VIDEO_TW686X is not set
++CONFIG_VIDEO_ZORAN=m
++CONFIG_VIDEO_ZORAN_DC30=m
++CONFIG_VIDEO_ZORAN_ZR36060=m
++CONFIG_VIDEO_ZORAN_BUZ=m
++CONFIG_VIDEO_ZORAN_DC10=m
++CONFIG_VIDEO_ZORAN_LML33=m
++CONFIG_VIDEO_ZORAN_LML33R10=m
++CONFIG_VIDEO_ZORAN_AVS6EYES=m
++
++#
++# Media capture/analog TV support
++#
++CONFIG_VIDEO_IVTV=m
++# CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS is not set
++CONFIG_VIDEO_IVTV_ALSA=m
++CONFIG_VIDEO_FB_IVTV=m
++CONFIG_VIDEO_HEXIUM_GEMINI=m
++CONFIG_VIDEO_HEXIUM_ORION=m
++CONFIG_VIDEO_MXB=m
++CONFIG_VIDEO_DT3155=m
++
++#
++# Media capture/analog/hybrid TV support
++#
++CONFIG_VIDEO_CX18=m
++CONFIG_VIDEO_CX18_ALSA=m
++CONFIG_VIDEO_CX23885=m
++CONFIG_MEDIA_ALTERA_CI=m
++CONFIG_VIDEO_CX25821=m
++CONFIG_VIDEO_CX25821_ALSA=m
++CONFIG_VIDEO_CX88=m
++CONFIG_VIDEO_CX88_ALSA=m
++CONFIG_VIDEO_CX88_BLACKBIRD=m
++CONFIG_VIDEO_CX88_DVB=m
++CONFIG_VIDEO_CX88_ENABLE_VP3054=y
++CONFIG_VIDEO_CX88_VP3054=m
++CONFIG_VIDEO_CX88_MPEG=m
++CONFIG_VIDEO_BT848=m
++CONFIG_DVB_BT8XX=m
++CONFIG_VIDEO_SAA7134=m
++CONFIG_VIDEO_SAA7134_ALSA=m
++CONFIG_VIDEO_SAA7134_RC=y
++CONFIG_VIDEO_SAA7134_DVB=m
++CONFIG_VIDEO_SAA7134_GO7007=m
++CONFIG_VIDEO_SAA7164=m
++CONFIG_VIDEO_COBALT=m
++
++#
++# Media digital TV PCI Adapters
++#
++CONFIG_DVB_AV7110_IR=y
++CONFIG_DVB_AV7110=m
++CONFIG_DVB_AV7110_OSD=y
++CONFIG_DVB_BUDGET_CORE=m
++CONFIG_DVB_BUDGET=m
++CONFIG_DVB_BUDGET_CI=m
++CONFIG_DVB_BUDGET_AV=m
++CONFIG_DVB_BUDGET_PATCH=m
++CONFIG_DVB_B2C2_FLEXCOP_PCI=m
++# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set
++CONFIG_DVB_PLUTO2=m
++CONFIG_DVB_DM1105=m
++CONFIG_DVB_PT1=m
++CONFIG_DVB_PT3=m
++CONFIG_MANTIS_CORE=m
++CONFIG_DVB_MANTIS=m
++CONFIG_DVB_HOPPER=m
++CONFIG_DVB_NGENE=m
++CONFIG_DVB_DDBRIDGE=m
++# CONFIG_DVB_DDBRIDGE_MSIENABLE is not set
++CONFIG_DVB_SMIPCIE=m
++CONFIG_DVB_NETUP_UNIDVB=m
++# CONFIG_VIDEO_IPU3_CIO2 is not set
++CONFIG_V4L_PLATFORM_DRIVERS=y
++CONFIG_VIDEO_CAFE_CCIC=m
++CONFIG_VIDEO_VIA_CAMERA=m
++CONFIG_SOC_CAMERA=m
++CONFIG_SOC_CAMERA_PLATFORM=m
++CONFIG_V4L_MEM2MEM_DRIVERS=y
++CONFIG_VIDEO_MEM2MEM_DEINTERLACE=m
++CONFIG_VIDEO_SH_VEU=m
++CONFIG_V4L_TEST_DRIVERS=y
++# CONFIG_VIDEO_VIMC is not set
++CONFIG_VIDEO_VIVID=m
++# CONFIG_VIDEO_VIVID_CEC is not set
++CONFIG_VIDEO_VIVID_MAX_DEVS=64
++CONFIG_VIDEO_VIM2M=m
++CONFIG_DVB_PLATFORM_DRIVERS=y
++# CONFIG_SDR_PLATFORM_DRIVERS is not set
++
++#
++# Supported MMC/SDIO adapters
++#
++CONFIG_SMS_SDIO_DRV=m
++CONFIG_RADIO_ADAPTERS=y
++CONFIG_RADIO_TEA575X=m
++CONFIG_RADIO_SI470X=y
++CONFIG_USB_SI470X=m
++CONFIG_I2C_SI470X=m
++CONFIG_RADIO_SI4713=m
++CONFIG_USB_SI4713=m
++CONFIG_PLATFORM_SI4713=m
++CONFIG_I2C_SI4713=m
++CONFIG_RADIO_SI476X=m
++CONFIG_USB_MR800=m
++CONFIG_USB_DSBR=m
++CONFIG_RADIO_MAXIRADIO=m
++CONFIG_RADIO_SHARK=m
++CONFIG_RADIO_SHARK2=m
++CONFIG_USB_KEENE=m
++CONFIG_USB_RAREMONO=m
++CONFIG_USB_MA901=m
++CONFIG_RADIO_TEA5764=m
++CONFIG_RADIO_SAA7706H=m
++CONFIG_RADIO_TEF6862=m
++CONFIG_RADIO_WL1273=m
++
++#
++# Texas Instruments WL128x FM driver (ST based)
++#
++CONFIG_RADIO_WL128X=m
++
++#
++# Supported FireWire (IEEE 1394) Adapters
++#
++CONFIG_DVB_FIREDTV=m
++CONFIG_DVB_FIREDTV_INPUT=y
++CONFIG_MEDIA_COMMON_OPTIONS=y
++
++#
++# common driver options
++#
++CONFIG_VIDEO_CX2341X=m
++CONFIG_VIDEO_TVEEPROM=m
++CONFIG_CYPRESS_FIRMWARE=m
++CONFIG_VIDEOBUF2_CORE=m
++CONFIG_VIDEOBUF2_MEMOPS=m
++CONFIG_VIDEOBUF2_DMA_CONTIG=m
++CONFIG_VIDEOBUF2_VMALLOC=m
++CONFIG_VIDEOBUF2_DMA_SG=m
++CONFIG_VIDEOBUF2_DVB=m
++CONFIG_DVB_B2C2_FLEXCOP=m
++CONFIG_VIDEO_SAA7146=m
++CONFIG_VIDEO_SAA7146_VV=m
++CONFIG_SMS_SIANO_MDTV=m
++CONFIG_SMS_SIANO_RC=y
++CONFIG_SMS_SIANO_DEBUGFS=y
++CONFIG_VIDEO_V4L2_TPG=m
++
++#
++# Media ancillary drivers (tuners, sensors, i2c, spi, frontends)
++#
++CONFIG_MEDIA_SUBDRV_AUTOSELECT=y
++CONFIG_MEDIA_ATTACH=y
++CONFIG_VIDEO_IR_I2C=m
++
++#
++# Audio decoders, processors and mixers
++#
++CONFIG_VIDEO_TVAUDIO=m
++CONFIG_VIDEO_TDA7432=m
++CONFIG_VIDEO_TDA9840=m
++CONFIG_VIDEO_TEA6415C=m
++CONFIG_VIDEO_TEA6420=m
++CONFIG_VIDEO_MSP3400=m
++CONFIG_VIDEO_CS3308=m
++CONFIG_VIDEO_CS5345=m
++CONFIG_VIDEO_CS53L32A=m
++CONFIG_VIDEO_UDA1342=m
++CONFIG_VIDEO_WM8775=m
++CONFIG_VIDEO_WM8739=m
++CONFIG_VIDEO_VP27SMPX=m
++CONFIG_VIDEO_SONY_BTF_MPX=m
++
++#
++# RDS decoders
++#
++CONFIG_VIDEO_SAA6588=m
++
++#
++# Video decoders
++#
++CONFIG_VIDEO_ADV7604=m
++CONFIG_VIDEO_ADV7842=m
++CONFIG_VIDEO_BT819=m
++CONFIG_VIDEO_BT856=m
++CONFIG_VIDEO_BT866=m
++CONFIG_VIDEO_KS0127=m
++CONFIG_VIDEO_SAA7110=m
++CONFIG_VIDEO_SAA711X=m
++CONFIG_VIDEO_TVP5150=m
++CONFIG_VIDEO_TW2804=m
++CONFIG_VIDEO_TW9903=m
++CONFIG_VIDEO_TW9906=m
++CONFIG_VIDEO_VPX3220=m
++
++#
++# Video and audio decoders
++#
++CONFIG_VIDEO_SAA717X=m
++CONFIG_VIDEO_CX25840=m
++
++#
++# Video encoders
++#
++CONFIG_VIDEO_SAA7127=m
++CONFIG_VIDEO_SAA7185=m
++CONFIG_VIDEO_ADV7170=m
++CONFIG_VIDEO_ADV7175=m
++CONFIG_VIDEO_ADV7511=m
++
++#
++# Camera sensor devices
++#
++CONFIG_VIDEO_OV2640=m
++CONFIG_VIDEO_OV7640=m
++CONFIG_VIDEO_OV7670=m
++CONFIG_VIDEO_MT9M111=m
++CONFIG_VIDEO_MT9V011=m
++
++#
++# Flash devices
++#
++
++#
++# Video improvement chips
++#
++CONFIG_VIDEO_UPD64031A=m
++CONFIG_VIDEO_UPD64083=m
++
++#
++# Audio/Video compression chips
++#
++CONFIG_VIDEO_SAA6752HS=m
++
++#
++# SDR tuner chips
++#
++
++#
++# Miscellaneous helper chips
++#
++CONFIG_VIDEO_M52790=m
++
++#
++# Sensors used on soc_camera driver
++#
++
++#
++# soc_camera sensor drivers
++#
++CONFIG_SOC_CAMERA_IMX074=m
++CONFIG_SOC_CAMERA_MT9M001=m
++CONFIG_SOC_CAMERA_MT9M111=m
++CONFIG_SOC_CAMERA_MT9T031=m
++CONFIG_SOC_CAMERA_MT9T112=m
++CONFIG_SOC_CAMERA_MT9V022=m
++CONFIG_SOC_CAMERA_OV5642=m
++CONFIG_SOC_CAMERA_OV772X=m
++CONFIG_SOC_CAMERA_OV9640=m
++CONFIG_SOC_CAMERA_OV9740=m
++CONFIG_SOC_CAMERA_RJ54N1=m
++CONFIG_SOC_CAMERA_TW9910=m
++CONFIG_MEDIA_TUNER=m
++CONFIG_MEDIA_TUNER_SIMPLE=m
++CONFIG_MEDIA_TUNER_TDA18250=m
++CONFIG_MEDIA_TUNER_TDA8290=m
++CONFIG_MEDIA_TUNER_TDA827X=m
++CONFIG_MEDIA_TUNER_TDA18271=m
++CONFIG_MEDIA_TUNER_TDA9887=m
++CONFIG_MEDIA_TUNER_TEA5761=m
++CONFIG_MEDIA_TUNER_TEA5767=m
++CONFIG_MEDIA_TUNER_MSI001=m
++CONFIG_MEDIA_TUNER_MT20XX=m
++CONFIG_MEDIA_TUNER_MT2060=m
++CONFIG_MEDIA_TUNER_MT2063=m
++CONFIG_MEDIA_TUNER_MT2266=m
++CONFIG_MEDIA_TUNER_MT2131=m
++CONFIG_MEDIA_TUNER_QT1010=m
++CONFIG_MEDIA_TUNER_XC2028=m
++CONFIG_MEDIA_TUNER_XC5000=m
++CONFIG_MEDIA_TUNER_XC4000=m
++CONFIG_MEDIA_TUNER_MXL5005S=m
++CONFIG_MEDIA_TUNER_MXL5007T=m
++CONFIG_MEDIA_TUNER_MC44S803=m
++CONFIG_MEDIA_TUNER_MAX2165=m
++CONFIG_MEDIA_TUNER_TDA18218=m
++CONFIG_MEDIA_TUNER_FC0011=m
++CONFIG_MEDIA_TUNER_FC0012=m
++CONFIG_MEDIA_TUNER_FC0013=m
++CONFIG_MEDIA_TUNER_TDA18212=m
++CONFIG_MEDIA_TUNER_E4000=m
++CONFIG_MEDIA_TUNER_FC2580=m
++CONFIG_MEDIA_TUNER_M88RS6000T=m
++CONFIG_MEDIA_TUNER_TUA9001=m
++CONFIG_MEDIA_TUNER_SI2157=m
++CONFIG_MEDIA_TUNER_IT913X=m
++CONFIG_MEDIA_TUNER_R820T=m
++CONFIG_MEDIA_TUNER_MXL301RF=m
++CONFIG_MEDIA_TUNER_QM1D1C0042=m
++
++#
++# Multistandard (satellite) frontends
++#
++CONFIG_DVB_STB0899=m
++CONFIG_DVB_STB6100=m
++CONFIG_DVB_STV090x=m
++CONFIG_DVB_STV0910=m
++CONFIG_DVB_STV6110x=m
++CONFIG_DVB_STV6111=m
++CONFIG_DVB_MXL5XX=m
++CONFIG_DVB_M88DS3103=m
++
++#
++# Multistandard (cable + terrestrial) frontends
++#
++CONFIG_DVB_DRXK=m
++CONFIG_DVB_TDA18271C2DD=m
++CONFIG_DVB_SI2165=m
++CONFIG_DVB_MN88472=m
++CONFIG_DVB_MN88473=m
++
++#
++# DVB-S (satellite) frontends
++#
++CONFIG_DVB_CX24110=m
++CONFIG_DVB_CX24123=m
++CONFIG_DVB_MT312=m
++CONFIG_DVB_ZL10036=m
++CONFIG_DVB_ZL10039=m
++CONFIG_DVB_S5H1420=m
++CONFIG_DVB_STV0288=m
++CONFIG_DVB_STB6000=m
++CONFIG_DVB_STV0299=m
++CONFIG_DVB_STV6110=m
++CONFIG_DVB_STV0900=m
++CONFIG_DVB_TDA8083=m
++CONFIG_DVB_TDA10086=m
++CONFIG_DVB_TDA8261=m
++CONFIG_DVB_VES1X93=m
++CONFIG_DVB_TUNER_ITD1000=m
++CONFIG_DVB_TUNER_CX24113=m
++CONFIG_DVB_TDA826X=m
++CONFIG_DVB_TUA6100=m
++CONFIG_DVB_CX24116=m
++CONFIG_DVB_CX24117=m
++CONFIG_DVB_CX24120=m
++CONFIG_DVB_SI21XX=m
++CONFIG_DVB_TS2020=m
++CONFIG_DVB_DS3000=m
++CONFIG_DVB_MB86A16=m
++CONFIG_DVB_TDA10071=m
++
++#
++# DVB-T (terrestrial) frontends
++#
++CONFIG_DVB_SP8870=m
++CONFIG_DVB_SP887X=m
++CONFIG_DVB_CX22700=m
++CONFIG_DVB_CX22702=m
++CONFIG_DVB_DRXD=m
++CONFIG_DVB_L64781=m
++CONFIG_DVB_TDA1004X=m
++CONFIG_DVB_NXT6000=m
++CONFIG_DVB_MT352=m
++CONFIG_DVB_ZL10353=m
++CONFIG_DVB_DIB3000MB=m
++CONFIG_DVB_DIB3000MC=m
++CONFIG_DVB_DIB7000M=m
++CONFIG_DVB_DIB7000P=m
++CONFIG_DVB_TDA10048=m
++CONFIG_DVB_AF9013=m
++CONFIG_DVB_EC100=m
++CONFIG_DVB_STV0367=m
++CONFIG_DVB_CXD2820R=m
++CONFIG_DVB_CXD2841ER=m
++CONFIG_DVB_RTL2830=m
++CONFIG_DVB_RTL2832=m
++CONFIG_DVB_RTL2832_SDR=m
++CONFIG_DVB_SI2168=m
++CONFIG_DVB_AS102_FE=m
++CONFIG_DVB_GP8PSK_FE=m
++
++#
++# DVB-C (cable) frontends
++#
++CONFIG_DVB_VES1820=m
++CONFIG_DVB_TDA10021=m
++CONFIG_DVB_TDA10023=m
++CONFIG_DVB_STV0297=m
++
++#
++# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
++#
++CONFIG_DVB_NXT200X=m
++CONFIG_DVB_OR51211=m
++CONFIG_DVB_OR51132=m
++CONFIG_DVB_BCM3510=m
++CONFIG_DVB_LGDT330X=m
++CONFIG_DVB_LGDT3305=m
++CONFIG_DVB_LGDT3306A=m
++CONFIG_DVB_LG2160=m
++CONFIG_DVB_S5H1409=m
++CONFIG_DVB_AU8522=m
++CONFIG_DVB_AU8522_DTV=m
++CONFIG_DVB_AU8522_V4L=m
++CONFIG_DVB_S5H1411=m
++
++#
++# ISDB-T (terrestrial) frontends
++#
++CONFIG_DVB_S921=m
++CONFIG_DVB_DIB8000=m
++CONFIG_DVB_MB86A20S=m
++
++#
++# ISDB-S (satellite) & ISDB-T (terrestrial) frontends
++#
++CONFIG_DVB_TC90522=m
++
++#
++# Digital terrestrial only tuners/PLL
++#
++CONFIG_DVB_PLL=m
++CONFIG_DVB_TUNER_DIB0070=m
++CONFIG_DVB_TUNER_DIB0090=m
++
++#
++# SEC control devices for DVB-S
++#
++CONFIG_DVB_DRX39XYJ=m
++CONFIG_DVB_LNBH25=m
++CONFIG_DVB_LNBP21=m
++CONFIG_DVB_LNBP22=m
++CONFIG_DVB_ISL6405=m
++CONFIG_DVB_ISL6421=m
++CONFIG_DVB_ISL6423=m
++CONFIG_DVB_A8293=m
++CONFIG_DVB_SP2=m
++CONFIG_DVB_LGS8GXX=m
++CONFIG_DVB_ATBM8830=m
++CONFIG_DVB_TDA665x=m
++CONFIG_DVB_IX2505V=m
++CONFIG_DVB_M88RS2000=m
++CONFIG_DVB_AF9033=m
++CONFIG_DVB_HORUS3A=m
++CONFIG_DVB_ASCOT2E=m
++CONFIG_DVB_HELENE=m
++
++#
++# Tools to develop new frontends
++#
++# CONFIG_DVB_DUMMY_FE is not set
++
++#
++# Graphics support
++#
++CONFIG_AGP=y
++CONFIG_AGP_AMD64=y
++CONFIG_AGP_INTEL=y
++CONFIG_AGP_SIS=m
++CONFIG_AGP_VIA=y
++CONFIG_INTEL_GTT=y
++CONFIG_VGA_ARB=y
++CONFIG_VGA_ARB_MAX_GPUS=16
++CONFIG_VGA_SWITCHEROO=y
++CONFIG_DRM=m
++CONFIG_DRM_MIPI_DSI=y
++# CONFIG_DRM_DP_AUX_CHARDEV is not set
++# CONFIG_DRM_DEBUG_MM_SELFTEST is not set
++CONFIG_DRM_KMS_HELPER=m
++CONFIG_DRM_KMS_FB_HELPER=y
++CONFIG_DRM_FBDEV_EMULATION=y
++CONFIG_DRM_FBDEV_OVERALLOC=100
++CONFIG_DRM_LOAD_EDID_FIRMWARE=y
++CONFIG_DRM_TTM=m
++CONFIG_DRM_VM=y
++CONFIG_DRM_SCHED=m
++
++#
++# I2C encoder or helper chips
++#
++CONFIG_DRM_I2C_CH7006=m
++CONFIG_DRM_I2C_SIL164=m
++CONFIG_DRM_I2C_NXP_TDA998X=m
++CONFIG_DRM_RADEON=m
++# CONFIG_DRM_RADEON_USERPTR is not set
++CONFIG_DRM_AMDGPU=m
++# CONFIG_DRM_AMDGPU_SI is not set
++CONFIG_DRM_AMDGPU_CIK=y
++CONFIG_DRM_AMDGPU_USERPTR=y
++# CONFIG_DRM_AMDGPU_GART_DEBUGFS is not set
++
++#
++# ACP (Audio CoProcessor) Configuration
++#
++# CONFIG_DRM_AMD_ACP is not set
++
++#
++# Display Engine Configuration
++#
++CONFIG_DRM_AMD_DC=y
++CONFIG_DRM_AMD_DC_PRE_VEGA=y
++# CONFIG_DRM_AMD_DC_FBC is not set
++CONFIG_DRM_AMD_DC_DCN1_0=y
++# CONFIG_DEBUG_KERNEL_DC is not set
++
++#
++# AMD Library routines
++#
++CONFIG_CHASH=m
++# CONFIG_CHASH_STATS is not set
++# CONFIG_CHASH_SELFTEST is not set
++CONFIG_DRM_NOUVEAU=m
++CONFIG_NOUVEAU_DEBUG=5
++CONFIG_NOUVEAU_DEBUG_DEFAULT=3
++# CONFIG_NOUVEAU_DEBUG_MMU is not set
++CONFIG_DRM_NOUVEAU_BACKLIGHT=y
++CONFIG_DRM_I915=m
++# CONFIG_DRM_I915_ALPHA_SUPPORT is not set
++CONFIG_DRM_I915_CAPTURE_ERROR=y
++CONFIG_DRM_I915_COMPRESS_ERROR=y
++CONFIG_DRM_I915_USERPTR=y
++# CONFIG_DRM_I915_GVT is not set
++
++#
++# drm/i915 Debugging
++#
++# CONFIG_DRM_I915_WERROR is not set
++# CONFIG_DRM_I915_DEBUG is not set
++# CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS is not set
++# CONFIG_DRM_I915_SW_FENCE_CHECK_DAG is not set
++# CONFIG_DRM_I915_SELFTEST is not set
++# CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS is not set
++# CONFIG_DRM_I915_DEBUG_VBLANK_EVADE is not set
++CONFIG_DRM_VGEM=m
++CONFIG_DRM_VMWGFX=m
++CONFIG_DRM_VMWGFX_FBCON=y
++CONFIG_DRM_GMA500=m
++CONFIG_DRM_GMA600=y
++CONFIG_DRM_GMA3600=y
++CONFIG_DRM_UDL=m
++CONFIG_DRM_AST=m
++# CONFIG_DRM_MGAG200 is not set
++CONFIG_DRM_CIRRUS_QEMU=m
++CONFIG_DRM_QXL=m
++# CONFIG_DRM_BOCHS is not set
++CONFIG_DRM_VIRTIO_GPU=m
++CONFIG_DRM_PANEL=y
++
++#
++# Display Panels
++#
++# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set
++CONFIG_DRM_BRIDGE=y
++CONFIG_DRM_PANEL_BRIDGE=y
++
++#
++# Display Interface Bridges
++#
++# CONFIG_DRM_ANALOGIX_ANX78XX is not set
++CONFIG_HSA_AMD=y
++# CONFIG_DRM_HISI_HIBMC is not set
++# CONFIG_DRM_TINYDRM is not set
++# CONFIG_DRM_LEGACY is not set
++CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y
++# CONFIG_DRM_LIB_RANDOM is not set
++
++#
++# Frame buffer Devices
++#
++CONFIG_FB=y
++CONFIG_FIRMWARE_EDID=y
++CONFIG_FB_CMDLINE=y
++CONFIG_FB_NOTIFY=y
++CONFIG_FB_DDC=m
++CONFIG_FB_BOOT_VESA_SUPPORT=y
++CONFIG_FB_CFB_FILLRECT=y
++CONFIG_FB_CFB_COPYAREA=y
++CONFIG_FB_CFB_IMAGEBLIT=y
++# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
++CONFIG_FB_SYS_FILLRECT=m
++CONFIG_FB_SYS_COPYAREA=m
++CONFIG_FB_SYS_IMAGEBLIT=m
++# CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA is not set
++# CONFIG_FB_FOREIGN_ENDIAN is not set
++CONFIG_FB_SYS_FOPS=m
++CONFIG_FB_DEFERRED_IO=y
++CONFIG_FB_HECUBA=m
++CONFIG_FB_SVGALIB=m
++# CONFIG_FB_MACMODES is not set
++CONFIG_FB_BACKLIGHT=y
++CONFIG_FB_MODE_HELPERS=y
++CONFIG_FB_TILEBLITTING=y
++
++#
++# Frame buffer hardware drivers
++#
++CONFIG_FB_CIRRUS=m
++CONFIG_FB_PM2=m
++CONFIG_FB_PM2_FIFO_DISCONNECT=y
++CONFIG_FB_CYBER2000=m
++CONFIG_FB_CYBER2000_DDC=y
++CONFIG_FB_ARC=m
++CONFIG_FB_ASILIANT=y
++CONFIG_FB_IMSTT=y
++CONFIG_FB_VGA16=m
++CONFIG_FB_UVESA=m
++CONFIG_FB_VESA=y
++CONFIG_FB_EFI=y
++CONFIG_FB_N411=m
++CONFIG_FB_HGA=m
++CONFIG_FB_OPENCORES=m
++CONFIG_FB_S1D13XXX=m
++CONFIG_FB_NVIDIA=m
++CONFIG_FB_NVIDIA_I2C=y
++# CONFIG_FB_NVIDIA_DEBUG is not set
++CONFIG_FB_NVIDIA_BACKLIGHT=y
++CONFIG_FB_RIVA=m
++CONFIG_FB_RIVA_I2C=y
++# CONFIG_FB_RIVA_DEBUG is not set
++CONFIG_FB_RIVA_BACKLIGHT=y
++CONFIG_FB_I740=m
++CONFIG_FB_LE80578=m
++CONFIG_FB_CARILLO_RANCH=m
++CONFIG_FB_INTEL=m
++# CONFIG_FB_INTEL_DEBUG is not set
++CONFIG_FB_INTEL_I2C=y
++CONFIG_FB_MATROX=m
++CONFIG_FB_MATROX_MILLENIUM=y
++CONFIG_FB_MATROX_MYSTIQUE=y
++CONFIG_FB_MATROX_G=y
++CONFIG_FB_MATROX_I2C=m
++CONFIG_FB_MATROX_MAVEN=m
++CONFIG_FB_RADEON=m
++CONFIG_FB_RADEON_I2C=y
++CONFIG_FB_RADEON_BACKLIGHT=y
++# CONFIG_FB_RADEON_DEBUG is not set
++CONFIG_FB_ATY128=m
++CONFIG_FB_ATY128_BACKLIGHT=y
++CONFIG_FB_ATY=m
++CONFIG_FB_ATY_CT=y
++# CONFIG_FB_ATY_GENERIC_LCD is not set
++CONFIG_FB_ATY_GX=y
++CONFIG_FB_ATY_BACKLIGHT=y
++CONFIG_FB_S3=m
++CONFIG_FB_S3_DDC=y
++CONFIG_FB_SAVAGE=m
++CONFIG_FB_SAVAGE_I2C=y
++# CONFIG_FB_SAVAGE_ACCEL is not set
++CONFIG_FB_SIS=m
++CONFIG_FB_SIS_300=y
++CONFIG_FB_SIS_315=y
++CONFIG_FB_VIA=m
++# CONFIG_FB_VIA_DIRECT_PROCFS is not set
++CONFIG_FB_VIA_X_COMPATIBILITY=y
++CONFIG_FB_NEOMAGIC=m
++CONFIG_FB_KYRO=m
++CONFIG_FB_3DFX=m
++# CONFIG_FB_3DFX_ACCEL is not set
++# CONFIG_FB_3DFX_I2C is not set
++CONFIG_FB_VOODOO1=m
++CONFIG_FB_VT8623=m
++CONFIG_FB_TRIDENT=m
++CONFIG_FB_ARK=m
++CONFIG_FB_PM3=m
++CONFIG_FB_CARMINE=m
++CONFIG_FB_CARMINE_DRAM_EVAL=y
++# CONFIG_CARMINE_DRAM_CUSTOM is not set
++CONFIG_FB_SM501=m
++CONFIG_FB_SMSCUFX=m
++CONFIG_FB_UDL=m
++CONFIG_FB_IBM_GXT4500=m
++# CONFIG_FB_VIRTUAL is not set
++CONFIG_XEN_FBDEV_FRONTEND=m
++CONFIG_FB_METRONOME=m
++CONFIG_FB_MB862XX=m
++CONFIG_FB_MB862XX_PCI_GDC=y
++CONFIG_FB_MB862XX_I2C=y
++CONFIG_FB_BROADSHEET=m
++CONFIG_FB_AUO_K190X=m
++CONFIG_FB_AUO_K1900=m
++CONFIG_FB_AUO_K1901=m
++CONFIG_FB_HYPERV=m
++CONFIG_FB_SIMPLE=y
++CONFIG_FB_SM712=m
++CONFIG_BACKLIGHT_LCD_SUPPORT=y
++CONFIG_LCD_CLASS_DEVICE=m
++CONFIG_LCD_L4F00242T03=m
++CONFIG_LCD_LMS283GF05=m
++CONFIG_LCD_LTV350QV=m
++CONFIG_LCD_ILI922X=m
++CONFIG_LCD_ILI9320=m
++CONFIG_LCD_TDO24M=m
++CONFIG_LCD_VGG2432A4=m
++CONFIG_LCD_PLATFORM=m
++CONFIG_LCD_S6E63M0=m
++CONFIG_LCD_LD9040=m
++CONFIG_LCD_AMS369FG06=m
++CONFIG_LCD_LMS501KF03=m
++CONFIG_LCD_HX8357=m
++CONFIG_BACKLIGHT_CLASS_DEVICE=y
++CONFIG_BACKLIGHT_GENERIC=m
++CONFIG_BACKLIGHT_LM3533=m
++CONFIG_BACKLIGHT_CARILLO_RANCH=m
++CONFIG_BACKLIGHT_PWM=m
++CONFIG_BACKLIGHT_DA903X=m
++CONFIG_BACKLIGHT_DA9052=m
++CONFIG_BACKLIGHT_MAX8925=m
++CONFIG_BACKLIGHT_APPLE=m
++CONFIG_BACKLIGHT_PM8941_WLED=m
++CONFIG_BACKLIGHT_SAHARA=m
++CONFIG_BACKLIGHT_WM831X=m
++CONFIG_BACKLIGHT_ADP5520=m
++CONFIG_BACKLIGHT_ADP8860=m
++CONFIG_BACKLIGHT_ADP8870=m
++CONFIG_BACKLIGHT_88PM860X=m
++CONFIG_BACKLIGHT_PCF50633=m
++CONFIG_BACKLIGHT_AAT2870=m
++CONFIG_BACKLIGHT_LM3630A=m
++CONFIG_BACKLIGHT_LM3639=m
++CONFIG_BACKLIGHT_LP855X=m
++CONFIG_BACKLIGHT_LP8788=m
++CONFIG_BACKLIGHT_PANDORA=m
++CONFIG_BACKLIGHT_SKY81452=m
++CONFIG_BACKLIGHT_AS3711=m
++CONFIG_BACKLIGHT_GPIO=m
++CONFIG_BACKLIGHT_LV5207LP=m
++CONFIG_BACKLIGHT_BD6107=m
++# CONFIG_BACKLIGHT_ARCXCNN is not set
++CONFIG_VGASTATE=m
++CONFIG_HDMI=y
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++# CONFIG_VGACON_SOFT_SCROLLBACK is not set
++CONFIG_DUMMY_CONSOLE=y
++CONFIG_DUMMY_CONSOLE_COLUMNS=80
++CONFIG_DUMMY_CONSOLE_ROWS=25
++CONFIG_FRAMEBUFFER_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
++CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
++# CONFIG_LOGO is not set
++CONFIG_SOUND=m
++CONFIG_SOUND_OSS_CORE=y
++# CONFIG_SOUND_OSS_CORE_PRECLAIM is not set
++CONFIG_SND=m
++CONFIG_SND_TIMER=m
++CONFIG_SND_PCM=m
++CONFIG_SND_PCM_ELD=y
++CONFIG_SND_PCM_IEC958=y
++CONFIG_SND_DMAENGINE_PCM=m
++CONFIG_SND_HWDEP=m
++CONFIG_SND_SEQ_DEVICE=m
++CONFIG_SND_RAWMIDI=m
++CONFIG_SND_JACK=y
++CONFIG_SND_JACK_INPUT_DEV=y
++CONFIG_SND_OSSEMUL=y
++CONFIG_SND_MIXER_OSS=m
++CONFIG_SND_PCM_OSS=m
++CONFIG_SND_PCM_OSS_PLUGINS=y
++CONFIG_SND_PCM_TIMER=y
++CONFIG_SND_HRTIMER=m
++CONFIG_SND_DYNAMIC_MINORS=y
++CONFIG_SND_MAX_CARDS=32
++CONFIG_SND_SUPPORT_OLD_API=y
++CONFIG_SND_PROC_FS=y
++CONFIG_SND_VERBOSE_PROCFS=y
++# CONFIG_SND_VERBOSE_PRINTK is not set
++# CONFIG_SND_DEBUG is not set
++CONFIG_SND_VMASTER=y
++CONFIG_SND_DMA_SGBUF=y
++CONFIG_SND_SEQUENCER=m
++CONFIG_SND_SEQ_DUMMY=m
++# CONFIG_SND_SEQUENCER_OSS is not set
++CONFIG_SND_SEQ_HRTIMER_DEFAULT=y
++CONFIG_SND_SEQ_MIDI_EVENT=m
++CONFIG_SND_SEQ_MIDI=m
++CONFIG_SND_SEQ_MIDI_EMUL=m
++CONFIG_SND_SEQ_VIRMIDI=m
++CONFIG_SND_MPU401_UART=m
++CONFIG_SND_OPL3_LIB=m
++CONFIG_SND_OPL3_LIB_SEQ=m
++# CONFIG_SND_OPL4_LIB_SEQ is not set
++CONFIG_SND_VX_LIB=m
++CONFIG_SND_AC97_CODEC=m
++CONFIG_SND_DRIVERS=y
++CONFIG_SND_PCSP=m
++CONFIG_SND_DUMMY=m
++CONFIG_SND_ALOOP=m
++CONFIG_SND_VIRMIDI=m
++CONFIG_SND_MTPAV=m
++CONFIG_SND_MTS64=m
++CONFIG_SND_SERIAL_U16550=m
++CONFIG_SND_MPU401=m
++CONFIG_SND_PORTMAN2X4=m
++CONFIG_SND_AC97_POWER_SAVE=y
++CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0
++CONFIG_SND_SB_COMMON=m
++CONFIG_SND_PCI=y
++CONFIG_SND_AD1889=m
++CONFIG_SND_ALS4000=m
++CONFIG_SND_ASIHPI=m
++CONFIG_SND_ATIIXP=m
++CONFIG_SND_ATIIXP_MODEM=m
++CONFIG_SND_AU8810=m
++CONFIG_SND_AU8820=m
++CONFIG_SND_AU8830=m
++CONFIG_SND_AW2=m
++CONFIG_SND_BT87X=m
++# CONFIG_SND_BT87X_OVERCLOCK is not set
++CONFIG_SND_CA0106=m
++CONFIG_SND_CMIPCI=m
++CONFIG_SND_OXYGEN_LIB=m
++CONFIG_SND_OXYGEN=m
++CONFIG_SND_CS4281=m
++CONFIG_SND_CS46XX=m
++CONFIG_SND_CS46XX_NEW_DSP=y
++CONFIG_SND_CTXFI=m
++CONFIG_SND_DARLA20=m
++CONFIG_SND_GINA20=m
++CONFIG_SND_LAYLA20=m
++CONFIG_SND_DARLA24=m
++CONFIG_SND_GINA24=m
++CONFIG_SND_LAYLA24=m
++CONFIG_SND_MONA=m
++CONFIG_SND_MIA=m
++CONFIG_SND_ECHO3G=m
++CONFIG_SND_INDIGO=m
++CONFIG_SND_INDIGOIO=m
++CONFIG_SND_INDIGODJ=m
++CONFIG_SND_INDIGOIOX=m
++CONFIG_SND_INDIGODJX=m
++# CONFIG_SND_EMU10K1_SEQ is not set
++CONFIG_SND_ENS1370=m
++CONFIG_SND_ENS1371=m
++CONFIG_SND_FM801=m
++CONFIG_SND_FM801_TEA575X_BOOL=y
++CONFIG_SND_HDSP=m
++CONFIG_SND_HDSPM=m
++CONFIG_SND_ICE1724=m
++CONFIG_SND_INTEL8X0=m
++CONFIG_SND_INTEL8X0M=m
++CONFIG_SND_KORG1212=m
++CONFIG_SND_LOLA=m
++CONFIG_SND_LX6464ES=m
++CONFIG_SND_MIXART=m
++CONFIG_SND_NM256=m
++CONFIG_SND_PCXHR=m
++CONFIG_SND_RIPTIDE=m
++CONFIG_SND_RME32=m
++CONFIG_SND_RME96=m
++CONFIG_SND_RME9652=m
++CONFIG_SND_VIA82XX=m
++CONFIG_SND_VIA82XX_MODEM=m
++CONFIG_SND_VIRTUOSO=m
++CONFIG_SND_VX222=m
++CONFIG_SND_YMFPCI=m
++
++#
++# HD-Audio
++#
++CONFIG_SND_HDA=m
++CONFIG_SND_HDA_INTEL=m
++CONFIG_SND_HDA_HWDEP=y
++CONFIG_SND_HDA_RECONFIG=y
++CONFIG_SND_HDA_INPUT_BEEP=y
++CONFIG_SND_HDA_INPUT_BEEP_MODE=0
++CONFIG_SND_HDA_PATCH_LOADER=y
++CONFIG_SND_HDA_CODEC_REALTEK=m
++CONFIG_SND_HDA_CODEC_ANALOG=m
++CONFIG_SND_HDA_CODEC_SIGMATEL=m
++CONFIG_SND_HDA_CODEC_VIA=m
++CONFIG_SND_HDA_CODEC_HDMI=m
++CONFIG_SND_HDA_CODEC_CIRRUS=m
++CONFIG_SND_HDA_CODEC_CONEXANT=m
++CONFIG_SND_HDA_CODEC_CA0110=m
++CONFIG_SND_HDA_CODEC_CA0132=m
++CONFIG_SND_HDA_CODEC_CA0132_DSP=y
++CONFIG_SND_HDA_CODEC_CMEDIA=m
++CONFIG_SND_HDA_CODEC_SI3054=m
++CONFIG_SND_HDA_GENERIC=m
++CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0
++CONFIG_SND_HDA_CORE=m
++CONFIG_SND_HDA_DSP_LOADER=y
++CONFIG_SND_HDA_I915=y
++CONFIG_SND_HDA_PREALLOC_SIZE=64
++CONFIG_SND_SPI=y
++CONFIG_SND_USB=y
++CONFIG_SND_USB_AUDIO=m
++CONFIG_SND_USB_UA101=m
++CONFIG_SND_USB_USX2Y=m
++CONFIG_SND_USB_CAIAQ=m
++CONFIG_SND_USB_CAIAQ_INPUT=y
++CONFIG_SND_USB_US122L=m
++CONFIG_SND_USB_6FIRE=m
++CONFIG_SND_USB_HIFACE=m
++CONFIG_SND_BCD2000=m
++CONFIG_SND_USB_LINE6=m
++CONFIG_SND_USB_POD=m
++CONFIG_SND_USB_PODHD=m
++CONFIG_SND_USB_TONEPORT=m
++CONFIG_SND_USB_VARIAX=m
++CONFIG_SND_FIREWIRE=y
++CONFIG_SND_FIREWIRE_LIB=m
++CONFIG_SND_DICE=m
++CONFIG_SND_OXFW=m
++CONFIG_SND_ISIGHT=m
++CONFIG_SND_FIREWORKS=m
++CONFIG_SND_BEBOB=m
++CONFIG_SND_FIREWIRE_DIGI00X=m
++CONFIG_SND_FIREWIRE_TASCAM=m
++# CONFIG_SND_FIREWIRE_MOTU is not set
++# CONFIG_SND_FIREFACE is not set
++CONFIG_SND_PCMCIA=y
++CONFIG_SND_VXPOCKET=m
++CONFIG_SND_PDAUDIOCF=m
++CONFIG_SND_SOC=m
++CONFIG_SND_SOC_AC97_BUS=y
++CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM=y
++# CONFIG_SND_SOC_AMD_ACP is not set
++# CONFIG_SND_SOC_AMD_ACP3x is not set
++CONFIG_SND_ATMEL_SOC=m
++CONFIG_SND_DESIGNWARE_I2S=m
++# CONFIG_SND_DESIGNWARE_PCM is not set
++
++#
++# SoC Audio for Freescale CPUs
++#
++
++#
++# Common SoC Audio options for Freescale CPUs:
++#
++CONFIG_SND_SOC_FSL_ASRC=m
++CONFIG_SND_SOC_FSL_SAI=m
++CONFIG_SND_SOC_FSL_SSI=m
++CONFIG_SND_SOC_FSL_SPDIF=m
++CONFIG_SND_SOC_FSL_ESAI=m
++CONFIG_SND_SOC_IMX_AUDMUX=m
++# CONFIG_SND_I2S_HI6210_I2S is not set
++# CONFIG_SND_SOC_IMG is not set
++# CONFIG_SND_SOC_INTEL_SST_TOPLEVEL is not set
++
++#
++# STMicroelectronics STM32 SOC audio support
++#
++CONFIG_SND_SOC_XTFPGA_I2S=m
++# CONFIG_ZX_TDM is not set
++CONFIG_SND_SOC_I2C_AND_SPI=m
++
++#
++# CODEC drivers
++#
++CONFIG_SND_SOC_AC97_CODEC=m
++CONFIG_SND_SOC_ADAU1701=m
++# CONFIG_SND_SOC_ADAU1761_I2C is not set
++# CONFIG_SND_SOC_ADAU1761_SPI is not set
++# CONFIG_SND_SOC_ADAU7002 is not set
++CONFIG_SND_SOC_AK4104=m
++CONFIG_SND_SOC_AK4554=m
++CONFIG_SND_SOC_AK4613=m
++CONFIG_SND_SOC_AK4642=m
++CONFIG_SND_SOC_AK5386=m
++CONFIG_SND_SOC_ALC5623=m
++# CONFIG_SND_SOC_BT_SCO is not set
++CONFIG_SND_SOC_CS35L32=m
++# CONFIG_SND_SOC_CS35L33 is not set
++# CONFIG_SND_SOC_CS35L34 is not set
++# CONFIG_SND_SOC_CS35L35 is not set
++# CONFIG_SND_SOC_CS42L42 is not set
++CONFIG_SND_SOC_CS42L51=m
++CONFIG_SND_SOC_CS42L51_I2C=m
++CONFIG_SND_SOC_CS42L52=m
++CONFIG_SND_SOC_CS42L56=m
++CONFIG_SND_SOC_CS42L73=m
++CONFIG_SND_SOC_CS4265=m
++CONFIG_SND_SOC_CS4270=m
++CONFIG_SND_SOC_CS4271=m
++CONFIG_SND_SOC_CS4271_I2C=m
++CONFIG_SND_SOC_CS4271_SPI=m
++CONFIG_SND_SOC_CS42XX8=m
++CONFIG_SND_SOC_CS42XX8_I2C=m
++# CONFIG_SND_SOC_CS43130 is not set
++CONFIG_SND_SOC_CS4349=m
++# CONFIG_SND_SOC_CS53L30 is not set
++# CONFIG_SND_SOC_DIO2125 is not set
++CONFIG_SND_SOC_HDMI_CODEC=m
++# CONFIG_SND_SOC_ES7134 is not set
++# CONFIG_SND_SOC_ES8316 is not set
++# CONFIG_SND_SOC_ES8328_I2C is not set
++# CONFIG_SND_SOC_ES8328_SPI is not set
++CONFIG_SND_SOC_GTM601=m
++# CONFIG_SND_SOC_INNO_RK3036 is not set
++# CONFIG_SND_SOC_MAX98504 is not set
++# CONFIG_SND_SOC_MAX98927 is not set
++# CONFIG_SND_SOC_MAX98373 is not set
++# CONFIG_SND_SOC_MAX9860 is not set
++# CONFIG_SND_SOC_MSM8916_WCD_ANALOG is not set
++# CONFIG_SND_SOC_MSM8916_WCD_DIGITAL is not set
++CONFIG_SND_SOC_PCM1681=m
++# CONFIG_SND_SOC_PCM179X_I2C is not set
++# CONFIG_SND_SOC_PCM179X_SPI is not set
++# CONFIG_SND_SOC_PCM186X_I2C is not set
++# CONFIG_SND_SOC_PCM186X_SPI is not set
++# CONFIG_SND_SOC_PCM3168A_I2C is not set
++# CONFIG_SND_SOC_PCM3168A_SPI is not set
++CONFIG_SND_SOC_PCM512x=m
++CONFIG_SND_SOC_PCM512x_I2C=m
++CONFIG_SND_SOC_PCM512x_SPI=m
++# CONFIG_SND_SOC_RT5514_SPI_BUILTIN is not set
++# CONFIG_SND_SOC_RT5616 is not set
++CONFIG_SND_SOC_RT5631=m
++# CONFIG_SND_SOC_RT5677_SPI is not set
++CONFIG_SND_SOC_SGTL5000=m
++CONFIG_SND_SOC_SI476X=m
++CONFIG_SND_SOC_SIGMADSP=m
++CONFIG_SND_SOC_SIGMADSP_I2C=m
++CONFIG_SND_SOC_SIRF_AUDIO_CODEC=m
++CONFIG_SND_SOC_SPDIF=m
++CONFIG_SND_SOC_SSM2602=m
++CONFIG_SND_SOC_SSM2602_SPI=m
++CONFIG_SND_SOC_SSM2602_I2C=m
++CONFIG_SND_SOC_SSM4567=m
++CONFIG_SND_SOC_STA32X=m
++CONFIG_SND_SOC_STA350=m
++CONFIG_SND_SOC_STI_SAS=m
++CONFIG_SND_SOC_TAS2552=m
++CONFIG_SND_SOC_TAS5086=m
++CONFIG_SND_SOC_TAS571X=m
++# CONFIG_SND_SOC_TAS5720 is not set
++# CONFIG_SND_SOC_TAS6424 is not set
++CONFIG_SND_SOC_TFA9879=m
++CONFIG_SND_SOC_TLV320AIC23=m
++CONFIG_SND_SOC_TLV320AIC23_I2C=m
++CONFIG_SND_SOC_TLV320AIC23_SPI=m
++CONFIG_SND_SOC_TLV320AIC31XX=m
++# CONFIG_SND_SOC_TLV320AIC32X4_I2C is not set
++# CONFIG_SND_SOC_TLV320AIC32X4_SPI is not set
++CONFIG_SND_SOC_TLV320AIC3X=m
++CONFIG_SND_SOC_TS3A227E=m
++# CONFIG_SND_SOC_TSCS42XX is not set
++CONFIG_SND_SOC_WM8510=m
++CONFIG_SND_SOC_WM8523=m
++# CONFIG_SND_SOC_WM8524 is not set
++CONFIG_SND_SOC_WM8580=m
++CONFIG_SND_SOC_WM8711=m
++CONFIG_SND_SOC_WM8728=m
++CONFIG_SND_SOC_WM8731=m
++CONFIG_SND_SOC_WM8737=m
++CONFIG_SND_SOC_WM8741=m
++CONFIG_SND_SOC_WM8750=m
++CONFIG_SND_SOC_WM8753=m
++CONFIG_SND_SOC_WM8770=m
++CONFIG_SND_SOC_WM8776=m
++CONFIG_SND_SOC_WM8804=m
++CONFIG_SND_SOC_WM8804_I2C=m
++CONFIG_SND_SOC_WM8804_SPI=m
++CONFIG_SND_SOC_WM8903=m
++# CONFIG_SND_SOC_WM8960 is not set
++CONFIG_SND_SOC_WM8962=m
++# CONFIG_SND_SOC_WM8974 is not set
++CONFIG_SND_SOC_WM8978=m
++# CONFIG_SND_SOC_WM8985 is not set
++# CONFIG_SND_SOC_ZX_AUD96P22 is not set
++# CONFIG_SND_SOC_NAU8540 is not set
++# CONFIG_SND_SOC_NAU8810 is not set
++# CONFIG_SND_SOC_NAU8824 is not set
++CONFIG_SND_SOC_TPA6130A2=m
++CONFIG_SND_SIMPLE_CARD_UTILS=m
++CONFIG_SND_SIMPLE_CARD=m
++CONFIG_SND_X86=y
++# CONFIG_HDMI_LPE_AUDIO is not set
++CONFIG_AC97_BUS=m
++
++#
++# HID support
++#
++CONFIG_HID=m
++CONFIG_HID_BATTERY_STRENGTH=y
++CONFIG_HIDRAW=y
++CONFIG_UHID=m
++CONFIG_HID_GENERIC=m
++
++#
++# Special HID drivers
++#
++CONFIG_HID_A4TECH=m
++# CONFIG_HID_ACCUTOUCH is not set
++CONFIG_HID_ACRUX=m
++CONFIG_HID_ACRUX_FF=y
++CONFIG_HID_APPLE=m
++CONFIG_HID_APPLEIR=m
++# CONFIG_HID_ASUS is not set
++CONFIG_HID_AUREAL=m
++CONFIG_HID_BELKIN=m
++CONFIG_HID_BETOP_FF=m
++CONFIG_HID_CHERRY=m
++CONFIG_HID_CHICONY=m
++CONFIG_HID_CORSAIR=m
++CONFIG_HID_PRODIKEYS=m
++# CONFIG_HID_CMEDIA is not set
++CONFIG_HID_CP2112=m
++CONFIG_HID_CYPRESS=m
++CONFIG_HID_DRAGONRISE=m
++CONFIG_DRAGONRISE_FF=y
++CONFIG_HID_EMS_FF=m
++CONFIG_HID_ELECOM=m
++CONFIG_HID_ELO=m
++CONFIG_HID_EZKEY=m
++CONFIG_HID_GEMBIRD=m
++CONFIG_HID_GFRM=m
++CONFIG_HID_HOLTEK=m
++CONFIG_HOLTEK_FF=y
++CONFIG_HID_GT683R=m
++CONFIG_HID_KEYTOUCH=m
++CONFIG_HID_KYE=m
++CONFIG_HID_UCLOGIC=m
++CONFIG_HID_WALTOP=m
++CONFIG_HID_GYRATION=m
++CONFIG_HID_ICADE=m
++# CONFIG_HID_ITE is not set
++# CONFIG_HID_JABRA is not set
++CONFIG_HID_TWINHAN=m
++CONFIG_HID_KENSINGTON=m
++CONFIG_HID_LCPOWER=m
++CONFIG_HID_LED=m
++CONFIG_HID_LENOVO=m
++CONFIG_HID_LOGITECH=m
++CONFIG_HID_LOGITECH_DJ=m
++CONFIG_HID_LOGITECH_HIDPP=m
++CONFIG_LOGITECH_FF=y
++CONFIG_LOGIRUMBLEPAD2_FF=y
++CONFIG_LOGIG940_FF=y
++CONFIG_LOGIWHEELS_FF=y
++CONFIG_HID_MAGICMOUSE=m
++# CONFIG_HID_MAYFLASH is not set
++CONFIG_HID_MICROSOFT=m
++CONFIG_HID_MONTEREY=m
++CONFIG_HID_MULTITOUCH=m
++# CONFIG_HID_NTI is not set
++CONFIG_HID_NTRIG=m
++CONFIG_HID_ORTEK=m
++CONFIG_HID_PANTHERLORD=m
++CONFIG_PANTHERLORD_FF=y
++CONFIG_HID_PENMOUNT=m
++CONFIG_HID_PETALYNX=m
++CONFIG_HID_PICOLCD=m
++CONFIG_HID_PICOLCD_FB=y
++CONFIG_HID_PICOLCD_BACKLIGHT=y
++CONFIG_HID_PICOLCD_LCD=y
++CONFIG_HID_PICOLCD_LEDS=y
++CONFIG_HID_PICOLCD_CIR=y
++CONFIG_HID_PLANTRONICS=m
++CONFIG_HID_PRIMAX=m
++# CONFIG_HID_RETRODE is not set
++CONFIG_HID_ROCCAT=m
++CONFIG_HID_SAITEK=m
++CONFIG_HID_SAMSUNG=m
++CONFIG_HID_SONY=m
++CONFIG_SONY_FF=y
++CONFIG_HID_SPEEDLINK=m
++CONFIG_HID_STEELSERIES=m
++CONFIG_HID_SUNPLUS=m
++CONFIG_HID_RMI=m
++CONFIG_HID_GREENASIA=m
++CONFIG_GREENASIA_FF=y
++CONFIG_HID_HYPERV_MOUSE=m
++CONFIG_HID_SMARTJOYPLUS=m
++CONFIG_SMARTJOYPLUS_FF=y
++CONFIG_HID_TIVO=m
++CONFIG_HID_TOPSEED=m
++CONFIG_HID_THINGM=m
++CONFIG_HID_THRUSTMASTER=m
++CONFIG_THRUSTMASTER_FF=y
++# CONFIG_HID_UDRAW_PS3 is not set
++CONFIG_HID_WACOM=m
++CONFIG_HID_WIIMOTE=m
++CONFIG_HID_XINMO=m
++CONFIG_HID_ZEROPLUS=m
++CONFIG_ZEROPLUS_FF=y
++CONFIG_HID_ZYDACRON=m
++CONFIG_HID_SENSOR_HUB=m
++CONFIG_HID_SENSOR_CUSTOM_SENSOR=m
++# CONFIG_HID_ALPS is not set
++
++#
++# USB HID support
++#
++CONFIG_USB_HID=m
++CONFIG_HID_PID=y
++CONFIG_USB_HIDDEV=y
++
++#
++# USB HID Boot Protocol drivers
++#
++CONFIG_USB_KBD=m
++CONFIG_USB_MOUSE=m
++
++#
++# I2C HID support
++#
++CONFIG_I2C_HID=m
++
++#
++# Intel ISH HID support
++#
++# CONFIG_INTEL_ISH_HID is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_SUPPORT=y
++CONFIG_USB_COMMON=y
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB=y
++CONFIG_USB_PCI=y
++CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
++
++#
++# Miscellaneous USB options
++#
++CONFIG_USB_DEFAULT_PERSIST=y
++CONFIG_USB_DYNAMIC_MINORS=y
++# CONFIG_USB_OTG is not set
++# CONFIG_USB_OTG_WHITELIST is not set
++# CONFIG_USB_OTG_BLACKLIST_HUB is not set
++# CONFIG_USB_LEDS_TRIGGER_USBPORT is not set
++CONFIG_USB_MON=m
++CONFIG_USB_WUSB=m
++CONFIG_USB_WUSB_CBAF=m
++# CONFIG_USB_WUSB_CBAF_DEBUG is not set
++
++#
++# USB Host Controller Drivers
++#
++CONFIG_USB_C67X00_HCD=m
++CONFIG_USB_XHCI_HCD=y
++# CONFIG_USB_XHCI_DBGCAP is not set
++CONFIG_USB_XHCI_PCI=y
++CONFIG_USB_XHCI_PLATFORM=m
++CONFIG_USB_EHCI_HCD=y
++CONFIG_USB_EHCI_ROOT_HUB_TT=y
++CONFIG_USB_EHCI_TT_NEWSCHED=y
++CONFIG_USB_EHCI_PCI=y
++CONFIG_USB_EHCI_HCD_PLATFORM=y
++CONFIG_USB_OXU210HP_HCD=m
++CONFIG_USB_ISP116X_HCD=m
++CONFIG_USB_ISP1362_HCD=m
++CONFIG_USB_FOTG210_HCD=m
++CONFIG_USB_MAX3421_HCD=m
++CONFIG_USB_OHCI_HCD=y
++CONFIG_USB_OHCI_HCD_PCI=y
++CONFIG_USB_OHCI_HCD_PLATFORM=y
++CONFIG_USB_UHCI_HCD=y
++CONFIG_USB_U132_HCD=m
++CONFIG_USB_SL811_HCD=m
++CONFIG_USB_SL811_HCD_ISO=y
++CONFIG_USB_SL811_CS=m
++CONFIG_USB_R8A66597_HCD=m
++CONFIG_USB_WHCI_HCD=m
++CONFIG_USB_HWA_HCD=m
++CONFIG_USB_HCD_BCMA=m
++CONFIG_USB_HCD_SSB=m
++# CONFIG_USB_HCD_TEST_MODE is not set
++
++#
++# USB Device Class drivers
++#
++CONFIG_USB_ACM=m
++CONFIG_USB_PRINTER=m
++CONFIG_USB_WDM=m
++CONFIG_USB_TMC=m
++
++#
++# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
++#
++
++#
++# also be needed; see USB_STORAGE Help for more info
++#
++CONFIG_USB_STORAGE=m
++# CONFIG_USB_STORAGE_DEBUG is not set
++CONFIG_USB_STORAGE_REALTEK=m
++CONFIG_REALTEK_AUTOPM=y
++CONFIG_USB_STORAGE_DATAFAB=m
++CONFIG_USB_STORAGE_FREECOM=m
++CONFIG_USB_STORAGE_ISD200=m
++CONFIG_USB_STORAGE_USBAT=m
++CONFIG_USB_STORAGE_SDDR09=m
++CONFIG_USB_STORAGE_SDDR55=m
++CONFIG_USB_STORAGE_JUMPSHOT=m
++CONFIG_USB_STORAGE_ALAUDA=m
++CONFIG_USB_STORAGE_ONETOUCH=m
++CONFIG_USB_STORAGE_KARMA=m
++CONFIG_USB_STORAGE_CYPRESS_ATACB=m
++CONFIG_USB_STORAGE_ENE_UB6250=m
++CONFIG_USB_UAS=m
++
++#
++# USB Imaging devices
++#
++CONFIG_USB_MDC800=m
++CONFIG_USB_MICROTEK=m
++CONFIG_USBIP_CORE=m
++CONFIG_USBIP_VHCI_HCD=m
++CONFIG_USBIP_VHCI_HC_PORTS=8
++CONFIG_USBIP_VHCI_NR_HCS=1
++CONFIG_USBIP_HOST=m
++# CONFIG_USBIP_VUDC is not set
++# CONFIG_USBIP_DEBUG is not set
++CONFIG_USB_MUSB_HDRC=m
++# CONFIG_USB_MUSB_HOST is not set
++# CONFIG_USB_MUSB_GADGET is not set
++CONFIG_USB_MUSB_DUAL_ROLE=y
++
++#
++# Platform Glue Layer
++#
++
++#
++# MUSB DMA mode
++#
++CONFIG_MUSB_PIO_ONLY=y
++CONFIG_USB_DWC3=m
++CONFIG_USB_DWC3_ULPI=y
++# CONFIG_USB_DWC3_HOST is not set
++# CONFIG_USB_DWC3_GADGET is not set
++CONFIG_USB_DWC3_DUAL_ROLE=y
++
++#
++# Platform Glue Driver Support
++#
++CONFIG_USB_DWC3_PCI=m
++CONFIG_USB_DWC2=y
++CONFIG_USB_DWC2_HOST=y
++
++#
++# Gadget/Dual-role mode requires USB Gadget support to be enabled
++#
++CONFIG_USB_DWC2_PCI=m
++# CONFIG_USB_DWC2_DEBUG is not set
++# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set
++CONFIG_USB_CHIPIDEA=m
++CONFIG_USB_CHIPIDEA_PCI=m
++CONFIG_USB_CHIPIDEA_UDC=y
++CONFIG_USB_CHIPIDEA_HOST=y
++# CONFIG_USB_CHIPIDEA_ULPI is not set
++CONFIG_USB_ISP1760=m
++CONFIG_USB_ISP1760_HCD=y
++CONFIG_USB_ISP1761_UDC=y
++# CONFIG_USB_ISP1760_HOST_ROLE is not set
++# CONFIG_USB_ISP1760_GADGET_ROLE is not set
++CONFIG_USB_ISP1760_DUAL_ROLE=y
++
++#
++# USB port drivers
++#
++CONFIG_USB_USS720=m
++CONFIG_USB_SERIAL=m
++CONFIG_USB_SERIAL_GENERIC=y
++CONFIG_USB_SERIAL_SIMPLE=m
++CONFIG_USB_SERIAL_AIRCABLE=m
++CONFIG_USB_SERIAL_ARK3116=m
++CONFIG_USB_SERIAL_BELKIN=m
++CONFIG_USB_SERIAL_CH341=m
++CONFIG_USB_SERIAL_WHITEHEAT=m
++CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
++CONFIG_USB_SERIAL_CP210X=m
++CONFIG_USB_SERIAL_CYPRESS_M8=m
++CONFIG_USB_SERIAL_EMPEG=m
++CONFIG_USB_SERIAL_FTDI_SIO=m
++CONFIG_USB_SERIAL_VISOR=m
++CONFIG_USB_SERIAL_IPAQ=m
++CONFIG_USB_SERIAL_IR=m
++CONFIG_USB_SERIAL_EDGEPORT=m
++CONFIG_USB_SERIAL_EDGEPORT_TI=m
++CONFIG_USB_SERIAL_F81232=m
++# CONFIG_USB_SERIAL_F8153X is not set
++CONFIG_USB_SERIAL_GARMIN=m
++CONFIG_USB_SERIAL_IPW=m
++CONFIG_USB_SERIAL_IUU=m
++CONFIG_USB_SERIAL_KEYSPAN_PDA=m
++CONFIG_USB_SERIAL_KEYSPAN=m
++CONFIG_USB_SERIAL_KLSI=m
++CONFIG_USB_SERIAL_KOBIL_SCT=m
++CONFIG_USB_SERIAL_MCT_U232=m
++CONFIG_USB_SERIAL_METRO=m
++CONFIG_USB_SERIAL_MOS7720=m
++CONFIG_USB_SERIAL_MOS7715_PARPORT=y
++CONFIG_USB_SERIAL_MOS7840=m
++CONFIG_USB_SERIAL_MXUPORT=m
++CONFIG_USB_SERIAL_NAVMAN=m
++CONFIG_USB_SERIAL_PL2303=m
++CONFIG_USB_SERIAL_OTI6858=m
++CONFIG_USB_SERIAL_QCAUX=m
++CONFIG_USB_SERIAL_QUALCOMM=m
++CONFIG_USB_SERIAL_SPCP8X5=m
++CONFIG_USB_SERIAL_SAFE=m
++# CONFIG_USB_SERIAL_SAFE_PADDED is not set
++CONFIG_USB_SERIAL_SIERRAWIRELESS=m
++CONFIG_USB_SERIAL_SYMBOL=m
++CONFIG_USB_SERIAL_TI=m
++CONFIG_USB_SERIAL_CYBERJACK=m
++CONFIG_USB_SERIAL_XIRCOM=m
++CONFIG_USB_SERIAL_WWAN=m
++CONFIG_USB_SERIAL_OPTION=m
++CONFIG_USB_SERIAL_OMNINET=m
++CONFIG_USB_SERIAL_OPTICON=m
++CONFIG_USB_SERIAL_XSENS_MT=m
++CONFIG_USB_SERIAL_WISHBONE=m
++CONFIG_USB_SERIAL_SSU100=m
++CONFIG_USB_SERIAL_QT2=m
++# CONFIG_USB_SERIAL_UPD78F0730 is not set
++CONFIG_USB_SERIAL_DEBUG=m
++
++#
++# USB Miscellaneous drivers
++#
++CONFIG_USB_EMI62=m
++CONFIG_USB_EMI26=m
++CONFIG_USB_ADUTUX=m
++CONFIG_USB_SEVSEG=m
++CONFIG_USB_RIO500=m
++CONFIG_USB_LEGOTOWER=m
++CONFIG_USB_LCD=m
++CONFIG_USB_CYPRESS_CY7C63=m
++CONFIG_USB_CYTHERM=m
++CONFIG_USB_IDMOUSE=m
++CONFIG_USB_FTDI_ELAN=m
++CONFIG_USB_APPLEDISPLAY=m
++CONFIG_USB_SISUSBVGA=m
++# CONFIG_USB_SISUSBVGA_CON is not set
++CONFIG_USB_LD=m
++CONFIG_USB_TRANCEVIBRATOR=m
++CONFIG_USB_IOWARRIOR=m
++CONFIG_USB_TEST=m
++CONFIG_USB_EHSET_TEST_FIXTURE=m
++CONFIG_USB_ISIGHTFW=m
++CONFIG_USB_YUREX=m
++CONFIG_USB_EZUSB_FX2=m
++# CONFIG_USB_HUB_USB251XB is not set
++CONFIG_USB_HSIC_USB3503=m
++# CONFIG_USB_HSIC_USB4604 is not set
++CONFIG_USB_LINK_LAYER_TEST=m
++CONFIG_USB_CHAOSKEY=m
++CONFIG_USB_ATM=m
++CONFIG_USB_SPEEDTOUCH=m
++CONFIG_USB_CXACRU=m
++CONFIG_USB_UEAGLEATM=m
++CONFIG_USB_XUSBATM=m
++
++#
++# USB Physical Layer drivers
++#
++CONFIG_USB_PHY=y
++CONFIG_NOP_USB_XCEIV=m
++CONFIG_USB_GPIO_VBUS=m
++CONFIG_TAHVO_USB=m
++CONFIG_TAHVO_USB_HOST_BY_DEFAULT=y
++CONFIG_USB_ISP1301=m
++CONFIG_USB_GADGET=m
++# CONFIG_USB_GADGET_DEBUG is not set
++# CONFIG_USB_GADGET_DEBUG_FILES is not set
++# CONFIG_USB_GADGET_DEBUG_FS is not set
++CONFIG_USB_GADGET_VBUS_DRAW=2
++CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
++# CONFIG_U_SERIAL_CONSOLE is not set
++
++#
++# USB Peripheral Controller
++#
++CONFIG_USB_FOTG210_UDC=m
++CONFIG_USB_GR_UDC=m
++CONFIG_USB_R8A66597=m
++CONFIG_USB_PXA27X=m
++CONFIG_USB_MV_UDC=m
++CONFIG_USB_MV_U3D=m
++CONFIG_USB_SNP_CORE=m
++# CONFIG_USB_M66592 is not set
++CONFIG_USB_BDC_UDC=m
++
++#
++# Platform Support
++#
++CONFIG_USB_BDC_PCI=m
++CONFIG_USB_AMD5536UDC=m
++CONFIG_USB_NET2272=m
++CONFIG_USB_NET2272_DMA=y
++CONFIG_USB_NET2280=m
++CONFIG_USB_GOKU=m
++CONFIG_USB_EG20T=m
++# CONFIG_USB_DUMMY_HCD is not set
++CONFIG_USB_LIBCOMPOSITE=m
++CONFIG_USB_F_ACM=m
++CONFIG_USB_F_SS_LB=m
++CONFIG_USB_U_SERIAL=m
++CONFIG_USB_U_ETHER=m
++CONFIG_USB_U_AUDIO=m
++CONFIG_USB_F_SERIAL=m
++CONFIG_USB_F_OBEX=m
++CONFIG_USB_F_NCM=m
++CONFIG_USB_F_ECM=m
++CONFIG_USB_F_PHONET=m
++CONFIG_USB_F_EEM=m
++CONFIG_USB_F_SUBSET=m
++CONFIG_USB_F_RNDIS=m
++CONFIG_USB_F_MASS_STORAGE=m
++CONFIG_USB_F_FS=m
++CONFIG_USB_F_UAC1=m
++CONFIG_USB_F_UAC2=m
++CONFIG_USB_F_UVC=m
++CONFIG_USB_F_MIDI=m
++CONFIG_USB_F_HID=m
++CONFIG_USB_F_PRINTER=m
++CONFIG_USB_F_TCM=m
++CONFIG_USB_CONFIGFS=m
++CONFIG_USB_CONFIGFS_SERIAL=y
++CONFIG_USB_CONFIGFS_ACM=y
++CONFIG_USB_CONFIGFS_OBEX=y
++CONFIG_USB_CONFIGFS_NCM=y
++CONFIG_USB_CONFIGFS_ECM=y
++CONFIG_USB_CONFIGFS_ECM_SUBSET=y
++CONFIG_USB_CONFIGFS_RNDIS=y
++CONFIG_USB_CONFIGFS_EEM=y
++CONFIG_USB_CONFIGFS_PHONET=y
++CONFIG_USB_CONFIGFS_MASS_STORAGE=y
++CONFIG_USB_CONFIGFS_F_LB_SS=y
++CONFIG_USB_CONFIGFS_F_FS=y
++CONFIG_USB_CONFIGFS_F_UAC1=y
++# CONFIG_USB_CONFIGFS_F_UAC1_LEGACY is not set
++CONFIG_USB_CONFIGFS_F_UAC2=y
++CONFIG_USB_CONFIGFS_F_MIDI=y
++CONFIG_USB_CONFIGFS_F_HID=y
++CONFIG_USB_CONFIGFS_F_UVC=y
++CONFIG_USB_CONFIGFS_F_PRINTER=y
++# CONFIG_USB_CONFIGFS_F_TCM is not set
++CONFIG_USB_ZERO=m
++CONFIG_USB_AUDIO=m
++CONFIG_GADGET_UAC1=y
++# CONFIG_GADGET_UAC1_LEGACY is not set
++CONFIG_USB_ETH=m
++CONFIG_USB_ETH_RNDIS=y
++CONFIG_USB_ETH_EEM=y
++CONFIG_USB_G_NCM=m
++CONFIG_USB_GADGETFS=m
++CONFIG_USB_FUNCTIONFS=m
++CONFIG_USB_FUNCTIONFS_ETH=y
++CONFIG_USB_FUNCTIONFS_RNDIS=y
++CONFIG_USB_FUNCTIONFS_GENERIC=y
++CONFIG_USB_MASS_STORAGE=m
++CONFIG_USB_GADGET_TARGET=m
++CONFIG_USB_G_SERIAL=m
++CONFIG_USB_MIDI_GADGET=m
++CONFIG_USB_G_PRINTER=m
++CONFIG_USB_CDC_COMPOSITE=m
++CONFIG_USB_G_NOKIA=m
++CONFIG_USB_G_ACM_MS=m
++# CONFIG_USB_G_MULTI is not set
++CONFIG_USB_G_HID=m
++CONFIG_USB_G_DBGP=m
++# CONFIG_USB_G_DBGP_PRINTK is not set
++CONFIG_USB_G_DBGP_SERIAL=y
++CONFIG_USB_G_WEBCAM=m
++# CONFIG_TYPEC is not set
++CONFIG_USB_LED_TRIG=y
++CONFIG_USB_ULPI_BUS=m
++CONFIG_UWB=m
++CONFIG_UWB_HWA=m
++CONFIG_UWB_WHCI=m
++CONFIG_UWB_I1480U=m
++CONFIG_MMC=y
++CONFIG_MMC_BLOCK=m
++CONFIG_MMC_BLOCK_MINORS=8
++CONFIG_SDIO_UART=m
++# CONFIG_MMC_TEST is not set
++
++#
++# MMC/SD/SDIO Host Controller Drivers
++#
++# CONFIG_MMC_DEBUG is not set
++CONFIG_MMC_SDHCI=m
++CONFIG_MMC_SDHCI_PCI=m
++CONFIG_MMC_RICOH_MMC=y
++CONFIG_MMC_SDHCI_ACPI=m
++CONFIG_MMC_SDHCI_PLTFM=m
++# CONFIG_MMC_SDHCI_F_SDH30 is not set
++CONFIG_MMC_WBSD=m
++CONFIG_MMC_TIFM_SD=m
++CONFIG_MMC_SPI=m
++CONFIG_MMC_SDRICOH_CS=m
++CONFIG_MMC_CB710=m
++CONFIG_MMC_VIA_SDMMC=m
++CONFIG_MMC_VUB300=m
++CONFIG_MMC_USHC=m
++CONFIG_MMC_USDHI6ROL0=m
++CONFIG_MMC_CQHCI=m
++CONFIG_MMC_TOSHIBA_PCI=m
++CONFIG_MMC_MTK=m
++# CONFIG_MMC_SDHCI_XENON is not set
++CONFIG_MEMSTICK=m
++# CONFIG_MEMSTICK_DEBUG is not set
++
++#
++# MemoryStick drivers
++#
++# CONFIG_MEMSTICK_UNSAFE_RESUME is not set
++CONFIG_MSPRO_BLOCK=m
++CONFIG_MS_BLOCK=m
++
++#
++# MemoryStick Host Controller Drivers
++#
++CONFIG_MEMSTICK_TIFM_MS=m
++CONFIG_MEMSTICK_JMICRON_38X=m
++CONFIG_MEMSTICK_R592=m
++CONFIG_NEW_LEDS=y
++CONFIG_LEDS_CLASS=y
++CONFIG_LEDS_CLASS_FLASH=m
++# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set
++
++#
++# LED drivers
++#
++CONFIG_LEDS_88PM860X=m
++# CONFIG_LEDS_APU is not set
++# CONFIG_LEDS_AS3645A is not set
++CONFIG_LEDS_LM3530=m
++CONFIG_LEDS_LM3533=m
++CONFIG_LEDS_LM3642=m
++# CONFIG_LEDS_MT6323 is not set
++CONFIG_LEDS_PCA9532=m
++CONFIG_LEDS_PCA9532_GPIO=y
++CONFIG_LEDS_GPIO=m
++CONFIG_LEDS_LP3944=m
++# CONFIG_LEDS_LP3952 is not set
++CONFIG_LEDS_LP55XX_COMMON=m
++CONFIG_LEDS_LP5521=m
++CONFIG_LEDS_LP5523=m
++CONFIG_LEDS_LP5562=m
++CONFIG_LEDS_LP8501=m
++CONFIG_LEDS_LP8788=m
++CONFIG_LEDS_CLEVO_MAIL=m
++CONFIG_LEDS_PCA955X=m
++# CONFIG_LEDS_PCA955X_GPIO is not set
++CONFIG_LEDS_PCA963X=m
++CONFIG_LEDS_WM831X_STATUS=m
++CONFIG_LEDS_WM8350=m
++CONFIG_LEDS_DA903X=m
++CONFIG_LEDS_DA9052=m
++CONFIG_LEDS_DAC124S085=m
++CONFIG_LEDS_PWM=m
++CONFIG_LEDS_REGULATOR=m
++CONFIG_LEDS_BD2802=m
++CONFIG_LEDS_INTEL_SS4200=m
++CONFIG_LEDS_LT3593=m
++CONFIG_LEDS_ADP5520=m
++CONFIG_LEDS_MC13783=m
++CONFIG_LEDS_TCA6507=m
++CONFIG_LEDS_TLC591XX=m
++CONFIG_LEDS_MAX8997=m
++CONFIG_LEDS_LM355x=m
++CONFIG_LEDS_MENF21BMC=m
++
++#
++# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)
++#
++CONFIG_LEDS_BLINKM=m
++# CONFIG_LEDS_MLXCPLD is not set
++# CONFIG_LEDS_USER is not set
++# CONFIG_LEDS_NIC78BX is not set
++
++#
++# LED Triggers
++#
++CONFIG_LEDS_TRIGGERS=y
++CONFIG_LEDS_TRIGGER_TIMER=m
++CONFIG_LEDS_TRIGGER_ONESHOT=m
++# CONFIG_LEDS_TRIGGER_DISK is not set
++# CONFIG_LEDS_TRIGGER_MTD is not set
++CONFIG_LEDS_TRIGGER_HEARTBEAT=m
++CONFIG_LEDS_TRIGGER_BACKLIGHT=m
++CONFIG_LEDS_TRIGGER_CPU=y
++# CONFIG_LEDS_TRIGGER_ACTIVITY is not set
++CONFIG_LEDS_TRIGGER_GPIO=m
++CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
++
++#
++# iptables trigger is under Netfilter config (LED target)
++#
++CONFIG_LEDS_TRIGGER_TRANSIENT=m
++CONFIG_LEDS_TRIGGER_CAMERA=m
++# CONFIG_LEDS_TRIGGER_PANIC is not set
++# CONFIG_LEDS_TRIGGER_NETDEV is not set
++# CONFIG_ACCESSIBILITY is not set
++CONFIG_INFINIBAND=m
++CONFIG_INFINIBAND_USER_MAD=m
++CONFIG_INFINIBAND_USER_ACCESS=m
++# CONFIG_INFINIBAND_EXP_USER_ACCESS is not set
++CONFIG_INFINIBAND_USER_MEM=y
++CONFIG_INFINIBAND_ON_DEMAND_PAGING=y
++CONFIG_INFINIBAND_ADDR_TRANS=y
++CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y
++CONFIG_INFINIBAND_MTHCA=m
++# CONFIG_INFINIBAND_MTHCA_DEBUG is not set
++CONFIG_INFINIBAND_QIB=m
++CONFIG_INFINIBAND_QIB_DCA=y
++CONFIG_INFINIBAND_CXGB3=m
++# CONFIG_INFINIBAND_CXGB3_DEBUG is not set
++CONFIG_INFINIBAND_CXGB4=m
++# CONFIG_INFINIBAND_I40IW is not set
++CONFIG_MLX4_INFINIBAND=m
++CONFIG_MLX5_INFINIBAND=m
++CONFIG_INFINIBAND_NES=m
++# CONFIG_INFINIBAND_NES_DEBUG is not set
++CONFIG_INFINIBAND_OCRDMA=m
++# CONFIG_INFINIBAND_VMWARE_PVRDMA is not set
++CONFIG_INFINIBAND_USNIC=m
++CONFIG_INFINIBAND_IPOIB=m
++CONFIG_INFINIBAND_IPOIB_CM=y
++# CONFIG_INFINIBAND_IPOIB_DEBUG is not set
++CONFIG_INFINIBAND_SRP=m
++CONFIG_INFINIBAND_SRPT=m
++CONFIG_INFINIBAND_ISER=m
++CONFIG_INFINIBAND_ISERT=m
++# CONFIG_INFINIBAND_OPA_VNIC is not set
++CONFIG_INFINIBAND_RDMAVT=m
++# CONFIG_RDMA_RXE is not set
++CONFIG_INFINIBAND_HFI1=m
++# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set
++# CONFIG_SDMA_VERBOSITY is not set
++# CONFIG_INFINIBAND_QEDR is not set
++# CONFIG_INFINIBAND_BNXT_RE is not set
++CONFIG_EDAC_ATOMIC_SCRUB=y
++CONFIG_EDAC_SUPPORT=y
++CONFIG_EDAC=y
++# CONFIG_EDAC_LEGACY_SYSFS is not set
++# CONFIG_EDAC_DEBUG is not set
++CONFIG_EDAC_DECODE_MCE=m
++# CONFIG_EDAC_GHES is not set
++CONFIG_EDAC_AMD64=m
++# CONFIG_EDAC_AMD64_ERROR_INJECTION is not set
++CONFIG_EDAC_E752X=m
++CONFIG_EDAC_I82975X=m
++CONFIG_EDAC_I3000=m
++CONFIG_EDAC_I3200=m
++CONFIG_EDAC_IE31200=m
++CONFIG_EDAC_X38=m
++CONFIG_EDAC_I5400=m
++CONFIG_EDAC_I7CORE=m
++CONFIG_EDAC_I5000=m
++CONFIG_EDAC_I5100=m
++CONFIG_EDAC_I7300=m
++CONFIG_EDAC_SBRIDGE=m
++# CONFIG_EDAC_SKX is not set
++# CONFIG_EDAC_PND2 is not set
++CONFIG_RTC_LIB=y
++CONFIG_RTC_MC146818_LIB=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_HCTOSYS=y
++CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
++CONFIG_RTC_SYSTOHC=y
++CONFIG_RTC_SYSTOHC_DEVICE="rtc0"
++# CONFIG_RTC_DEBUG is not set
++CONFIG_RTC_NVMEM=y
++
++#
++# RTC interfaces
++#
++CONFIG_RTC_INTF_SYSFS=y
++CONFIG_RTC_INTF_PROC=y
++CONFIG_RTC_INTF_DEV=y
++# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
++# CONFIG_RTC_DRV_TEST is not set
++
++#
++# I2C RTC drivers
++#
++CONFIG_RTC_DRV_88PM860X=m
++CONFIG_RTC_DRV_88PM80X=m
++CONFIG_RTC_DRV_ABB5ZES3=m
++CONFIG_RTC_DRV_ABX80X=m
++CONFIG_RTC_DRV_DS1307=m
++CONFIG_RTC_DRV_DS1307_HWMON=y
++# CONFIG_RTC_DRV_DS1307_CENTURY is not set
++CONFIG_RTC_DRV_DS1374=m
++CONFIG_RTC_DRV_DS1374_WDT=y
++CONFIG_RTC_DRV_DS1672=m
++CONFIG_RTC_DRV_LP8788=m
++CONFIG_RTC_DRV_MAX6900=m
++CONFIG_RTC_DRV_MAX8907=m
++CONFIG_RTC_DRV_MAX8925=m
++CONFIG_RTC_DRV_MAX8998=m
++CONFIG_RTC_DRV_MAX8997=m
++CONFIG_RTC_DRV_RS5C372=m
++CONFIG_RTC_DRV_ISL1208=m
++CONFIG_RTC_DRV_ISL12022=m
++CONFIG_RTC_DRV_X1205=m
++CONFIG_RTC_DRV_PCF8523=m
++CONFIG_RTC_DRV_PCF85063=m
++# CONFIG_RTC_DRV_PCF85363 is not set
++CONFIG_RTC_DRV_PCF8563=m
++CONFIG_RTC_DRV_PCF8583=m
++CONFIG_RTC_DRV_M41T80=m
++CONFIG_RTC_DRV_M41T80_WDT=y
++CONFIG_RTC_DRV_BQ32K=m
++CONFIG_RTC_DRV_PALMAS=m
++CONFIG_RTC_DRV_TPS6586X=m
++CONFIG_RTC_DRV_TPS65910=m
++CONFIG_RTC_DRV_TPS80031=m
++CONFIG_RTC_DRV_RC5T583=m
++CONFIG_RTC_DRV_S35390A=m
++CONFIG_RTC_DRV_FM3130=m
++# CONFIG_RTC_DRV_RX8010 is not set
++CONFIG_RTC_DRV_RX8581=m
++CONFIG_RTC_DRV_RX8025=m
++CONFIG_RTC_DRV_EM3027=m
++CONFIG_RTC_DRV_RV8803=m
++CONFIG_RTC_DRV_S5M=m
++
++#
++# SPI RTC drivers
++#
++CONFIG_RTC_DRV_M41T93=m
++CONFIG_RTC_DRV_M41T94=m
++# CONFIG_RTC_DRV_DS1302 is not set
++CONFIG_RTC_DRV_DS1305=m
++CONFIG_RTC_DRV_DS1343=m
++CONFIG_RTC_DRV_DS1347=m
++CONFIG_RTC_DRV_DS1390=m
++# CONFIG_RTC_DRV_MAX6916 is not set
++CONFIG_RTC_DRV_R9701=m
++CONFIG_RTC_DRV_RX4581=m
++# CONFIG_RTC_DRV_RX6110 is not set
++CONFIG_RTC_DRV_RS5C348=m
++CONFIG_RTC_DRV_MAX6902=m
++CONFIG_RTC_DRV_PCF2123=m
++CONFIG_RTC_DRV_MCP795=m
++CONFIG_RTC_I2C_AND_SPI=y
++
++#
++# SPI and I2C RTC drivers
++#
++CONFIG_RTC_DRV_DS3232=m
++CONFIG_RTC_DRV_DS3232_HWMON=y
++CONFIG_RTC_DRV_PCF2127=m
++CONFIG_RTC_DRV_RV3029C2=m
++CONFIG_RTC_DRV_RV3029_HWMON=y
++
++#
++# Platform RTC drivers
++#
++CONFIG_RTC_DRV_CMOS=y
++CONFIG_RTC_DRV_DS1286=m
++CONFIG_RTC_DRV_DS1511=m
++CONFIG_RTC_DRV_DS1553=m
++CONFIG_RTC_DRV_DS1685_FAMILY=m
++CONFIG_RTC_DRV_DS1685=y
++# CONFIG_RTC_DRV_DS1689 is not set
++# CONFIG_RTC_DRV_DS17285 is not set
++# CONFIG_RTC_DRV_DS17485 is not set
++# CONFIG_RTC_DRV_DS17885 is not set
++# CONFIG_RTC_DS1685_PROC_REGS is not set
++# CONFIG_RTC_DS1685_SYSFS_REGS is not set
++CONFIG_RTC_DRV_DS1742=m
++CONFIG_RTC_DRV_DS2404=m
++CONFIG_RTC_DRV_DA9052=m
++CONFIG_RTC_DRV_DA9055=m
++CONFIG_RTC_DRV_DA9063=m
++CONFIG_RTC_DRV_STK17TA8=m
++CONFIG_RTC_DRV_M48T86=m
++CONFIG_RTC_DRV_M48T35=m
++CONFIG_RTC_DRV_M48T59=m
++CONFIG_RTC_DRV_MSM6242=m
++CONFIG_RTC_DRV_BQ4802=m
++CONFIG_RTC_DRV_RP5C01=m
++CONFIG_RTC_DRV_V3020=m
++CONFIG_RTC_DRV_WM831X=m
++CONFIG_RTC_DRV_WM8350=m
++CONFIG_RTC_DRV_PCF50633=m
++CONFIG_RTC_DRV_AB3100=m
++# CONFIG_RTC_DRV_CROS_EC is not set
++
++#
++# on-CPU RTC drivers
++#
++# CONFIG_RTC_DRV_FTRTC010 is not set
++CONFIG_RTC_DRV_PCAP=m
++CONFIG_RTC_DRV_MC13XXX=m
++CONFIG_RTC_DRV_MT6397=m
++
++#
++# HID Sensor RTC drivers
++#
++CONFIG_RTC_DRV_HID_SENSOR_TIME=m
++CONFIG_DMADEVICES=y
++# CONFIG_DMADEVICES_DEBUG is not set
++
++#
++# DMA Devices
++#
++CONFIG_DMA_ENGINE=y
++CONFIG_DMA_VIRTUAL_CHANNELS=m
++CONFIG_DMA_ACPI=y
++# CONFIG_ALTERA_MSGDMA is not set
++CONFIG_INTEL_IDMA64=m
++CONFIG_INTEL_IOATDMA=m
++CONFIG_INTEL_MIC_X100_DMA=m
++# CONFIG_QCOM_HIDMA_MGMT is not set
++# CONFIG_QCOM_HIDMA is not set
++CONFIG_DW_DMAC_CORE=y
++CONFIG_DW_DMAC=m
++CONFIG_DW_DMAC_PCI=y
++CONFIG_HSU_DMA=m
++
++#
++# DMA Clients
++#
++CONFIG_ASYNC_TX_DMA=y
++# CONFIG_DMATEST is not set
++CONFIG_DMA_ENGINE_RAID=y
++
++#
++# DMABUF options
++#
++CONFIG_SYNC_FILE=y
++# CONFIG_SW_SYNC is not set
++CONFIG_DCA=m
++CONFIG_AUXDISPLAY=y
++CONFIG_CHARLCD=m
++# CONFIG_HD44780 is not set
++CONFIG_KS0108=m
++CONFIG_KS0108_PORT=0x378
++CONFIG_KS0108_DELAY=2
++CONFIG_CFAG12864B=m
++CONFIG_CFAG12864B_RATE=20
++# CONFIG_IMG_ASCII_LCD is not set
++CONFIG_PANEL=m
++CONFIG_PANEL_PARPORT=0
++CONFIG_PANEL_PROFILE=5
++# CONFIG_PANEL_CHANGE_MESSAGE is not set
++CONFIG_UIO=m
++CONFIG_UIO_CIF=m
++CONFIG_UIO_PDRV_GENIRQ=m
++CONFIG_UIO_DMEM_GENIRQ=m
++CONFIG_UIO_AEC=m
++CONFIG_UIO_SERCOS3=m
++CONFIG_UIO_PCI_GENERIC=m
++CONFIG_UIO_NETX=m
++CONFIG_UIO_PRUSS=m
++CONFIG_UIO_MF624=m
++# CONFIG_UIO_HV_GENERIC is not set
++CONFIG_VFIO_IOMMU_TYPE1=m
++CONFIG_VFIO_VIRQFD=m
++CONFIG_VFIO=m
++# CONFIG_VFIO_NOIOMMU is not set
++CONFIG_VFIO_PCI=m
++CONFIG_VFIO_PCI_VGA=y
++CONFIG_VFIO_PCI_MMAP=y
++CONFIG_VFIO_PCI_INTX=y
++CONFIG_VFIO_PCI_IGD=y
++# CONFIG_VFIO_MDEV is not set
++CONFIG_IRQ_BYPASS_MANAGER=m
++CONFIG_VIRT_DRIVERS=y
++# CONFIG_VBOXGUEST is not set
++CONFIG_VIRTIO=y
++CONFIG_VIRTIO_MENU=y
++CONFIG_VIRTIO_PCI=y
++CONFIG_VIRTIO_PCI_LEGACY=y
++CONFIG_VIRTIO_BALLOON=y
++CONFIG_VIRTIO_INPUT=m
++CONFIG_VIRTIO_MMIO=y
++CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
++
++#
++# Microsoft Hyper-V guest support
++#
++CONFIG_HYPERV=m
++CONFIG_HYPERV_TSCPAGE=y
++CONFIG_HYPERV_UTILS=m
++CONFIG_HYPERV_BALLOON=m
++
++#
++# Xen driver support
++#
++CONFIG_XEN_BALLOON=y
++CONFIG_XEN_SELFBALLOONING=y
++CONFIG_XEN_BALLOON_MEMORY_HOTPLUG=y
++CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT=512
++CONFIG_XEN_SCRUB_PAGES=y
++CONFIG_XEN_DEV_EVTCHN=m
++CONFIG_XEN_BACKEND=y
++CONFIG_XENFS=m
++CONFIG_XEN_COMPAT_XENFS=y
++CONFIG_XEN_SYS_HYPERVISOR=y
++CONFIG_XEN_XENBUS_FRONTEND=y
++CONFIG_XEN_GNTDEV=m
++CONFIG_XEN_GRANT_DEV_ALLOC=m
++CONFIG_SWIOTLB_XEN=y
++CONFIG_XEN_TMEM=m
++CONFIG_XEN_PCIDEV_BACKEND=m
++# CONFIG_XEN_PVCALLS_FRONTEND is not set
++# CONFIG_XEN_PVCALLS_BACKEND is not set
++CONFIG_XEN_SCSI_BACKEND=m
++CONFIG_XEN_PRIVCMD=m
++CONFIG_XEN_ACPI_PROCESSOR=y
++CONFIG_XEN_MCE_LOG=y
++CONFIG_XEN_HAVE_PVMMU=y
++CONFIG_XEN_EFI=y
++CONFIG_XEN_AUTO_XLATE=y
++CONFIG_XEN_ACPI=y
++CONFIG_XEN_SYMS=y
++CONFIG_XEN_HAVE_VPMU=y
++CONFIG_STAGING=y
++CONFIG_IRDA=m
++
++#
++# IrDA protocols
++#
++CONFIG_IRLAN=m
++CONFIG_IRNET=m
++CONFIG_IRCOMM=m
++CONFIG_IRDA_ULTRA=y
++
++#
++# IrDA options
++#
++CONFIG_IRDA_CACHE_LAST_LSAP=y
++CONFIG_IRDA_FAST_RR=y
++# CONFIG_IRDA_DEBUG is not set
++
++#
++# Infrared-port device drivers
++#
++
++#
++# SIR device drivers
++#
++CONFIG_IRTTY_SIR=m
++
++#
++# Dongle support
++#
++CONFIG_DONGLE=y
++CONFIG_ESI_DONGLE=m
++CONFIG_ACTISYS_DONGLE=m
++CONFIG_TEKRAM_DONGLE=m
++CONFIG_TOIM3232_DONGLE=m
++CONFIG_LITELINK_DONGLE=m
++CONFIG_MA600_DONGLE=m
++CONFIG_GIRBIL_DONGLE=m
++CONFIG_MCP2120_DONGLE=m
++CONFIG_OLD_BELKIN_DONGLE=m
++CONFIG_ACT200L_DONGLE=m
++CONFIG_KINGSUN_DONGLE=m
++CONFIG_KSDAZZLE_DONGLE=m
++CONFIG_KS959_DONGLE=m
++
++#
++# FIR device drivers
++#
++CONFIG_USB_IRDA=m
++CONFIG_SIGMATEL_FIR=m
++CONFIG_NSC_FIR=m
++CONFIG_WINBOND_FIR=m
++CONFIG_SMC_IRCC_FIR=m
++CONFIG_ALI_FIR=m
++CONFIG_VLSI_FIR=m
++CONFIG_VIA_FIR=m
++CONFIG_MCS_FIR=m
++CONFIG_IPX=m
++# CONFIG_IPX_INTERN is not set
++CONFIG_NCP_FS=m
++CONFIG_NCPFS_PACKET_SIGNING=y
++CONFIG_NCPFS_IOCTL_LOCKING=y
++CONFIG_NCPFS_STRONG=y
++CONFIG_NCPFS_NFS_NS=y
++CONFIG_NCPFS_OS2_NS=y
++# CONFIG_NCPFS_SMALLDOS is not set
++CONFIG_NCPFS_NLS=y
++CONFIG_NCPFS_EXTRAS=y
++CONFIG_PRISM2_USB=m
++CONFIG_COMEDI=m
++# CONFIG_COMEDI_DEBUG is not set
++CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB=2048
++CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB=20480
++CONFIG_COMEDI_MISC_DRIVERS=y
++CONFIG_COMEDI_BOND=m
++CONFIG_COMEDI_TEST=m
++CONFIG_COMEDI_PARPORT=m
++CONFIG_COMEDI_SERIAL2002=m
++CONFIG_COMEDI_ISA_DRIVERS=y
++CONFIG_COMEDI_PCL711=m
++CONFIG_COMEDI_PCL724=m
++CONFIG_COMEDI_PCL726=m
++CONFIG_COMEDI_PCL730=m
++CONFIG_COMEDI_PCL812=m
++CONFIG_COMEDI_PCL816=m
++CONFIG_COMEDI_PCL818=m
++CONFIG_COMEDI_PCM3724=m
++CONFIG_COMEDI_AMPLC_DIO200_ISA=m
++CONFIG_COMEDI_AMPLC_PC236_ISA=m
++CONFIG_COMEDI_AMPLC_PC263_ISA=m
++CONFIG_COMEDI_RTI800=m
++CONFIG_COMEDI_RTI802=m
++CONFIG_COMEDI_DAC02=m
++CONFIG_COMEDI_DAS16M1=m
++CONFIG_COMEDI_DAS08_ISA=m
++CONFIG_COMEDI_DAS16=m
++CONFIG_COMEDI_DAS800=m
++CONFIG_COMEDI_DAS1800=m
++CONFIG_COMEDI_DAS6402=m
++CONFIG_COMEDI_DT2801=m
++CONFIG_COMEDI_DT2811=m
++CONFIG_COMEDI_DT2814=m
++CONFIG_COMEDI_DT2815=m
++CONFIG_COMEDI_DT2817=m
++CONFIG_COMEDI_DT282X=m
++CONFIG_COMEDI_DMM32AT=m
++CONFIG_COMEDI_FL512=m
++CONFIG_COMEDI_AIO_AIO12_8=m
++CONFIG_COMEDI_AIO_IIRO_16=m
++CONFIG_COMEDI_II_PCI20KC=m
++CONFIG_COMEDI_C6XDIGIO=m
++CONFIG_COMEDI_MPC624=m
++CONFIG_COMEDI_ADQ12B=m
++CONFIG_COMEDI_NI_AT_A2150=m
++CONFIG_COMEDI_NI_AT_AO=m
++CONFIG_COMEDI_NI_ATMIO=m
++CONFIG_COMEDI_NI_ATMIO16D=m
++CONFIG_COMEDI_NI_LABPC_ISA=m
++CONFIG_COMEDI_PCMAD=m
++CONFIG_COMEDI_PCMDA12=m
++CONFIG_COMEDI_PCMMIO=m
++CONFIG_COMEDI_PCMUIO=m
++CONFIG_COMEDI_MULTIQ3=m
++CONFIG_COMEDI_S526=m
++CONFIG_COMEDI_PCI_DRIVERS=m
++CONFIG_COMEDI_8255_PCI=m
++CONFIG_COMEDI_ADDI_WATCHDOG=m
++CONFIG_COMEDI_ADDI_APCI_1032=m
++CONFIG_COMEDI_ADDI_APCI_1500=m
++CONFIG_COMEDI_ADDI_APCI_1516=m
++CONFIG_COMEDI_ADDI_APCI_1564=m
++CONFIG_COMEDI_ADDI_APCI_16XX=m
++CONFIG_COMEDI_ADDI_APCI_2032=m
++CONFIG_COMEDI_ADDI_APCI_2200=m
++CONFIG_COMEDI_ADDI_APCI_3120=m
++CONFIG_COMEDI_ADDI_APCI_3501=m
++CONFIG_COMEDI_ADDI_APCI_3XXX=m
++CONFIG_COMEDI_ADL_PCI6208=m
++CONFIG_COMEDI_ADL_PCI7X3X=m
++CONFIG_COMEDI_ADL_PCI8164=m
++CONFIG_COMEDI_ADL_PCI9111=m
++CONFIG_COMEDI_ADL_PCI9118=m
++CONFIG_COMEDI_ADV_PCI1710=m
++# CONFIG_COMEDI_ADV_PCI1720 is not set
++CONFIG_COMEDI_ADV_PCI1723=m
++CONFIG_COMEDI_ADV_PCI1724=m
++# CONFIG_COMEDI_ADV_PCI1760 is not set
++CONFIG_COMEDI_ADV_PCI_DIO=m
++CONFIG_COMEDI_AMPLC_DIO200_PCI=m
++CONFIG_COMEDI_AMPLC_PC236_PCI=m
++CONFIG_COMEDI_AMPLC_PC263_PCI=m
++CONFIG_COMEDI_AMPLC_PCI224=m
++CONFIG_COMEDI_AMPLC_PCI230=m
++CONFIG_COMEDI_CONTEC_PCI_DIO=m
++CONFIG_COMEDI_DAS08_PCI=m
++CONFIG_COMEDI_DT3000=m
++CONFIG_COMEDI_DYNA_PCI10XX=m
++CONFIG_COMEDI_GSC_HPDI=m
++CONFIG_COMEDI_MF6X4=m
++CONFIG_COMEDI_ICP_MULTI=m
++CONFIG_COMEDI_DAQBOARD2000=m
++CONFIG_COMEDI_JR3_PCI=m
++CONFIG_COMEDI_KE_COUNTER=m
++CONFIG_COMEDI_CB_PCIDAS64=m
++CONFIG_COMEDI_CB_PCIDAS=m
++CONFIG_COMEDI_CB_PCIDDA=m
++CONFIG_COMEDI_CB_PCIMDAS=m
++CONFIG_COMEDI_CB_PCIMDDA=m
++CONFIG_COMEDI_ME4000=m
++CONFIG_COMEDI_ME_DAQ=m
++CONFIG_COMEDI_NI_6527=m
++CONFIG_COMEDI_NI_65XX=m
++CONFIG_COMEDI_NI_660X=m
++CONFIG_COMEDI_NI_670X=m
++CONFIG_COMEDI_NI_LABPC_PCI=m
++CONFIG_COMEDI_NI_PCIDIO=m
++CONFIG_COMEDI_NI_PCIMIO=m
++CONFIG_COMEDI_RTD520=m
++CONFIG_COMEDI_S626=m
++CONFIG_COMEDI_MITE=m
++CONFIG_COMEDI_NI_TIOCMD=m
++CONFIG_COMEDI_PCMCIA_DRIVERS=m
++CONFIG_COMEDI_CB_DAS16_CS=m
++CONFIG_COMEDI_DAS08_CS=m
++CONFIG_COMEDI_NI_DAQ_700_CS=m
++CONFIG_COMEDI_NI_DAQ_DIO24_CS=m
++CONFIG_COMEDI_NI_LABPC_CS=m
++CONFIG_COMEDI_NI_MIO_CS=m
++CONFIG_COMEDI_QUATECH_DAQP_CS=m
++CONFIG_COMEDI_USB_DRIVERS=m
++CONFIG_COMEDI_DT9812=m
++CONFIG_COMEDI_NI_USB6501=m
++CONFIG_COMEDI_USBDUX=m
++CONFIG_COMEDI_USBDUXFAST=m
++CONFIG_COMEDI_USBDUXSIGMA=m
++CONFIG_COMEDI_VMK80XX=m
++CONFIG_COMEDI_8254=m
++CONFIG_COMEDI_8255=m
++CONFIG_COMEDI_8255_SA=m
++CONFIG_COMEDI_KCOMEDILIB=m
++CONFIG_COMEDI_AMPLC_DIO200=m
++CONFIG_COMEDI_AMPLC_PC236=m
++CONFIG_COMEDI_DAS08=m
++CONFIG_COMEDI_ISADMA=m
++CONFIG_COMEDI_NI_LABPC=m
++CONFIG_COMEDI_NI_LABPC_ISADMA=m
++CONFIG_COMEDI_NI_TIO=m
++CONFIG_RTL8192U=m
++CONFIG_RTLLIB=m
++CONFIG_RTLLIB_CRYPTO_CCMP=m
++CONFIG_RTLLIB_CRYPTO_TKIP=m
++CONFIG_RTLLIB_CRYPTO_WEP=m
++CONFIG_RTL8192E=m
++# CONFIG_RTL8723BS is not set
++CONFIG_R8712U=m
++CONFIG_R8188EU=m
++CONFIG_88EU_AP_MODE=y
++# CONFIG_R8822BE is not set
++CONFIG_RTS5208=m
++CONFIG_VT6655=m
++CONFIG_VT6656=m
++
++#
++# IIO staging drivers
++#
++
++#
++# Accelerometers
++#
++CONFIG_ADIS16201=m
++CONFIG_ADIS16203=m
++CONFIG_ADIS16209=m
++CONFIG_ADIS16240=m
++
++#
++# Analog to digital converters
++#
++CONFIG_AD7606=m
++CONFIG_AD7606_IFACE_PARALLEL=m
++CONFIG_AD7606_IFACE_SPI=m
++CONFIG_AD7780=m
++CONFIG_AD7816=m
++CONFIG_AD7192=m
++CONFIG_AD7280=m
++
++#
++# Analog digital bi-direction converters
++#
++CONFIG_ADT7316=m
++CONFIG_ADT7316_SPI=m
++CONFIG_ADT7316_I2C=m
++
++#
++# Capacitance to digital converters
++#
++CONFIG_AD7150=m
++CONFIG_AD7152=m
++CONFIG_AD7746=m
++
++#
++# Direct Digital Synthesis
++#
++CONFIG_AD9832=m
++CONFIG_AD9834=m
++
++#
++# Digital gyroscope sensors
++#
++CONFIG_ADIS16060=m
++
++#
++# Network Analyzer, Impedance Converters
++#
++CONFIG_AD5933=m
++
++#
++# Light sensors
++#
++CONFIG_TSL2x7x=m
++
++#
++# Active energy metering IC
++#
++CONFIG_ADE7753=m
++CONFIG_ADE7754=m
++CONFIG_ADE7758=m
++CONFIG_ADE7759=m
++CONFIG_ADE7854=m
++CONFIG_ADE7854_I2C=m
++CONFIG_ADE7854_SPI=m
++
++#
++# Resolver to digital converters
++#
++CONFIG_AD2S90=m
++CONFIG_AD2S1200=m
++CONFIG_AD2S1210=m
++
++#
++# Triggers - standalone
++#
++CONFIG_FB_SM750=m
++CONFIG_FB_XGI=m
++
++#
++# Speakup console speech
++#
++CONFIG_SPEAKUP=m
++CONFIG_SPEAKUP_SYNTH_ACNTSA=m
++CONFIG_SPEAKUP_SYNTH_APOLLO=m
++CONFIG_SPEAKUP_SYNTH_AUDPTR=m
++CONFIG_SPEAKUP_SYNTH_BNS=m
++CONFIG_SPEAKUP_SYNTH_DECTLK=m
++CONFIG_SPEAKUP_SYNTH_DECEXT=m
++CONFIG_SPEAKUP_SYNTH_LTLK=m
++CONFIG_SPEAKUP_SYNTH_SOFT=m
++CONFIG_SPEAKUP_SYNTH_SPKOUT=m
++CONFIG_SPEAKUP_SYNTH_TXPRT=m
++CONFIG_SPEAKUP_SYNTH_DUMMY=m
++CONFIG_STAGING_MEDIA=y
++# CONFIG_INTEL_ATOMISP is not set
++CONFIG_I2C_BCM2048=m
++CONFIG_DVB_CXD2099=m
++
++#
++# Android
++#
++CONFIG_LTE_GDM724X=m
++CONFIG_FIREWIRE_SERIAL=m
++CONFIG_FWTTY_MAX_TOTAL_PORTS=64
++CONFIG_FWTTY_MAX_CARD_PORTS=32
++CONFIG_MTD_SPINAND_MT29F=m
++CONFIG_MTD_SPINAND_ONDIEECC=y
++# CONFIG_LNET is not set
++CONFIG_DGNC=m
++CONFIG_GS_FPGABOOT=m
++CONFIG_CRYPTO_SKEIN=y
++CONFIG_UNISYSSPAR=y
++CONFIG_UNISYS_VISORNIC=m
++CONFIG_UNISYS_VISORINPUT=m
++CONFIG_UNISYS_VISORHBA=m
++CONFIG_FB_TFT=m
++CONFIG_FB_TFT_AGM1264K_FL=m
++CONFIG_FB_TFT_BD663474=m
++CONFIG_FB_TFT_HX8340BN=m
++CONFIG_FB_TFT_HX8347D=m
++CONFIG_FB_TFT_HX8353D=m
++CONFIG_FB_TFT_HX8357D=m
++CONFIG_FB_TFT_ILI9163=m
++CONFIG_FB_TFT_ILI9320=m
++CONFIG_FB_TFT_ILI9325=m
++CONFIG_FB_TFT_ILI9340=m
++CONFIG_FB_TFT_ILI9341=m
++CONFIG_FB_TFT_ILI9481=m
++CONFIG_FB_TFT_ILI9486=m
++CONFIG_FB_TFT_PCD8544=m
++CONFIG_FB_TFT_RA8875=m
++CONFIG_FB_TFT_S6D02A1=m
++CONFIG_FB_TFT_S6D1121=m
++# CONFIG_FB_TFT_SH1106 is not set
++CONFIG_FB_TFT_SSD1289=m
++# CONFIG_FB_TFT_SSD1305 is not set
++CONFIG_FB_TFT_SSD1306=m
++CONFIG_FB_TFT_SSD1331=m
++CONFIG_FB_TFT_SSD1351=m
++CONFIG_FB_TFT_ST7735R=m
++CONFIG_FB_TFT_ST7789V=m
++CONFIG_FB_TFT_TINYLCD=m
++CONFIG_FB_TFT_TLS8204=m
++CONFIG_FB_TFT_UC1611=m
++CONFIG_FB_TFT_UC1701=m
++CONFIG_FB_TFT_UPD161704=m
++CONFIG_FB_TFT_WATTEROTT=m
++CONFIG_FB_FLEX=m
++CONFIG_FB_TFT_FBTFT_DEVICE=m
++# CONFIG_WILC1000_SDIO is not set
++# CONFIG_WILC1000_SPI is not set
++CONFIG_MOST=m
++# CONFIG_MOST_CDEV is not set
++# CONFIG_MOST_NET is not set
++# CONFIG_MOST_SOUND is not set
++# CONFIG_MOST_VIDEO is not set
++# CONFIG_MOST_DIM2 is not set
++# CONFIG_MOST_I2C is not set
++# CONFIG_MOST_USB is not set
++# CONFIG_KS7010 is not set
++# CONFIG_GREYBUS is not set
++
++#
++# USB Power Delivery and Type-C drivers
++#
++# CONFIG_DRM_VBOXVIDEO is not set
++# CONFIG_PI433 is not set
++CONFIG_X86_PLATFORM_DEVICES=y
++CONFIG_ACER_WMI=m
++# CONFIG_ACER_WIRELESS is not set
++CONFIG_ACERHDF=m
++CONFIG_ALIENWARE_WMI=m
++CONFIG_ASUS_LAPTOP=m
++# CONFIG_DELL_SMBIOS_WMI is not set
++# CONFIG_DELL_SMBIOS_SMM is not set
++# CONFIG_DELL_LAPTOP is not set
++# CONFIG_DELL_WMI is not set
++CONFIG_DELL_WMI_AIO=m
++# CONFIG_DELL_WMI_LED is not set
++CONFIG_DELL_SMO8800=m
++CONFIG_DELL_RBTN=m
++CONFIG_FUJITSU_LAPTOP=m
++CONFIG_FUJITSU_TABLET=m
++CONFIG_AMILO_RFKILL=m
++# CONFIG_GPD_POCKET_FAN is not set
++CONFIG_HP_ACCEL=m
++CONFIG_HP_WIRELESS=m
++CONFIG_HP_WMI=m
++CONFIG_MSI_LAPTOP=m
++CONFIG_PANASONIC_LAPTOP=m
++CONFIG_COMPAL_LAPTOP=m
++CONFIG_SONY_LAPTOP=m
++CONFIG_SONYPI_COMPAT=y
++CONFIG_IDEAPAD_LAPTOP=m
++# CONFIG_SURFACE3_WMI is not set
++CONFIG_THINKPAD_ACPI=m
++CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y
++CONFIG_THINKPAD_ACPI_DEBUGFACILITIES=y
++# CONFIG_THINKPAD_ACPI_DEBUG is not set
++# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set
++CONFIG_THINKPAD_ACPI_VIDEO=y
++CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y
++CONFIG_SENSORS_HDAPS=m
++CONFIG_INTEL_MENLOW=m
++CONFIG_EEEPC_LAPTOP=m
++CONFIG_ASUS_WMI=m
++CONFIG_ASUS_NB_WMI=m
++CONFIG_EEEPC_WMI=m
++# CONFIG_ASUS_WIRELESS is not set
++CONFIG_ACPI_WMI=m
++CONFIG_WMI_BMOF=m
++# CONFIG_INTEL_WMI_THUNDERBOLT is not set
++CONFIG_MSI_WMI=m
++# CONFIG_PEAQ_WMI is not set
++CONFIG_TOPSTAR_LAPTOP=m
++CONFIG_ACPI_TOSHIBA=m
++CONFIG_TOSHIBA_BT_RFKILL=m
++CONFIG_TOSHIBA_HAPS=m
++CONFIG_TOSHIBA_WMI=m
++CONFIG_ACPI_CMPC=m
++# CONFIG_INTEL_CHT_INT33FE is not set
++# CONFIG_INTEL_INT0002_VGPIO is not set
++# CONFIG_INTEL_HID_EVENT is not set
++# CONFIG_INTEL_VBTN is not set
++CONFIG_INTEL_IPS=m
++# CONFIG_INTEL_PMC_CORE is not set
++CONFIG_IBM_RTL=m
++CONFIG_SAMSUNG_LAPTOP=m
++CONFIG_MXM_WMI=m
++CONFIG_INTEL_OAKTRAIL=m
++CONFIG_SAMSUNG_Q10=m
++CONFIG_APPLE_GMUX=m
++CONFIG_INTEL_RST=m
++CONFIG_INTEL_SMARTCONNECT=m
++CONFIG_PVPANIC=m
++CONFIG_INTEL_PMC_IPC=m
++CONFIG_SURFACE_PRO3_BUTTON=m
++# CONFIG_SURFACE_3_BUTTON is not set
++# CONFIG_INTEL_PUNIT_IPC is not set
++# CONFIG_MLX_PLATFORM is not set
++# CONFIG_INTEL_TURBO_MAX_3 is not set
++CONFIG_PMC_ATOM=y
++CONFIG_CHROME_PLATFORMS=y
++CONFIG_CHROMEOS_LAPTOP=m
++CONFIG_CHROMEOS_PSTORE=m
++CONFIG_CROS_EC_LPC=m
++# CONFIG_CROS_EC_LPC_MEC is not set
++CONFIG_CROS_EC_PROTO=y
++# CONFIG_CROS_KBD_LED_BACKLIGHT is not set
++# CONFIG_MELLANOX_PLATFORM is not set
++CONFIG_CLKDEV_LOOKUP=y
++CONFIG_HAVE_CLK_PREPARE=y
++CONFIG_COMMON_CLK=y
++
++#
++# Common Clock Framework
++#
++CONFIG_COMMON_CLK_WM831X=m
++CONFIG_COMMON_CLK_SI5351=m
++CONFIG_COMMON_CLK_CDCE706=m
++# CONFIG_COMMON_CLK_CS2000_CP is not set
++CONFIG_COMMON_CLK_S2MPS11=m
++CONFIG_CLK_TWL6040=m
++# CONFIG_COMMON_CLK_NXP is not set
++CONFIG_COMMON_CLK_PALMAS=m
++CONFIG_COMMON_CLK_PWM=m
++# CONFIG_COMMON_CLK_PXA is not set
++# CONFIG_COMMON_CLK_PIC32 is not set
++# CONFIG_HWSPINLOCK is not set
++
++#
++# Clock Source drivers
++#
++CONFIG_CLKEVT_I8253=y
++CONFIG_I8253_LOCK=y
++CONFIG_CLKBLD_I8253=y
++# CONFIG_ATMEL_PIT is not set
++# CONFIG_SH_TIMER_CMT is not set
++# CONFIG_SH_TIMER_MTU2 is not set
++# CONFIG_SH_TIMER_TMU is not set
++# CONFIG_EM_TIMER_STI is not set
++CONFIG_MAILBOX=y
++CONFIG_PCC=y
++CONFIG_ALTERA_MBOX=m
++CONFIG_IOMMU_API=y
++CONFIG_IOMMU_SUPPORT=y
++
++#
++# Generic IOMMU Pagetable Support
++#
++CONFIG_IOMMU_IOVA=y
++CONFIG_AMD_IOMMU=y
++CONFIG_AMD_IOMMU_V2=m
++CONFIG_DMAR_TABLE=y
++CONFIG_INTEL_IOMMU=y
++CONFIG_INTEL_IOMMU_SVM=y
++# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set
++CONFIG_INTEL_IOMMU_FLOPPY_WA=y
++CONFIG_IRQ_REMAP=y
++
++#
++# Remoteproc drivers
++#
++CONFIG_REMOTEPROC=m
++
++#
++# Rpmsg drivers
++#
++# CONFIG_RPMSG_QCOM_GLINK_RPM is not set
++# CONFIG_RPMSG_VIRTIO is not set
++# CONFIG_SOUNDWIRE is not set
++
++#
++# SOC (System On Chip) specific Drivers
++#
++
++#
++# Amlogic SoC drivers
++#
++
++#
++# Broadcom SoC drivers
++#
++
++#
++# i.MX SoC drivers
++#
++
++#
++# Qualcomm SoC drivers
++#
++# CONFIG_SUNXI_SRAM is not set
++CONFIG_SOC_TI=y
++
++#
++# Xilinx SoC drivers
++#
++# CONFIG_XILINX_VCU is not set
++CONFIG_PM_DEVFREQ=y
++
++#
++# DEVFREQ Governors
++#
++CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
++CONFIG_DEVFREQ_GOV_PERFORMANCE=y
++CONFIG_DEVFREQ_GOV_POWERSAVE=y
++CONFIG_DEVFREQ_GOV_USERSPACE=y
++# CONFIG_DEVFREQ_GOV_PASSIVE is not set
++
++#
++# DEVFREQ Drivers
++#
++CONFIG_PM_DEVFREQ_EVENT=y
++CONFIG_EXTCON=y
++
++#
++# Extcon Device Drivers
++#
++CONFIG_EXTCON_ADC_JACK=m
++CONFIG_EXTCON_ARIZONA=m
++CONFIG_EXTCON_GPIO=m
++# CONFIG_EXTCON_INTEL_INT3496 is not set
++CONFIG_EXTCON_MAX14577=m
++# CONFIG_EXTCON_MAX3355 is not set
++CONFIG_EXTCON_MAX77693=m
++CONFIG_EXTCON_MAX77843=m
++CONFIG_EXTCON_MAX8997=m
++CONFIG_EXTCON_PALMAS=m
++CONFIG_EXTCON_RT8973A=m
++CONFIG_EXTCON_SM5502=m
++CONFIG_EXTCON_USB_GPIO=m
++# CONFIG_EXTCON_USBC_CROS_EC is not set
++CONFIG_MEMORY=y
++CONFIG_IIO=m
++CONFIG_IIO_BUFFER=y
++CONFIG_IIO_BUFFER_CB=m
++# CONFIG_IIO_BUFFER_HW_CONSUMER is not set
++CONFIG_IIO_KFIFO_BUF=m
++CONFIG_IIO_TRIGGERED_BUFFER=m
++# CONFIG_IIO_CONFIGFS is not set
++CONFIG_IIO_TRIGGER=y
++CONFIG_IIO_CONSUMERS_PER_TRIGGER=2
++# CONFIG_IIO_SW_DEVICE is not set
++# CONFIG_IIO_SW_TRIGGER is not set
++CONFIG_IIO_TRIGGERED_EVENT=m
++
++#
++# Accelerometers
++#
++CONFIG_BMA180=m
++# CONFIG_BMA220 is not set
++CONFIG_BMC150_ACCEL=m
++CONFIG_BMC150_ACCEL_I2C=m
++CONFIG_BMC150_ACCEL_SPI=m
++# CONFIG_DA280 is not set
++# CONFIG_DA311 is not set
++# CONFIG_DMARD09 is not set
++# CONFIG_DMARD10 is not set
++CONFIG_HID_SENSOR_ACCEL_3D=m
++# CONFIG_IIO_CROS_EC_ACCEL_LEGACY is not set
++CONFIG_IIO_ST_ACCEL_3AXIS=m
++CONFIG_IIO_ST_ACCEL_I2C_3AXIS=m
++CONFIG_IIO_ST_ACCEL_SPI_3AXIS=m
++CONFIG_KXSD9=m
++CONFIG_KXSD9_SPI=m
++CONFIG_KXSD9_I2C=m
++CONFIG_KXCJK1013=m
++# CONFIG_MC3230 is not set
++# CONFIG_MMA7455_I2C is not set
++# CONFIG_MMA7455_SPI is not set
++# CONFIG_MMA7660 is not set
++CONFIG_MMA8452=m
++CONFIG_MMA9551_CORE=m
++CONFIG_MMA9551=m
++CONFIG_MMA9553=m
++CONFIG_MXC4005=m
++# CONFIG_MXC6255 is not set
++CONFIG_SCA3000=m
++CONFIG_STK8312=m
++CONFIG_STK8BA50=m
++
++#
++# Analog to digital converters
++#
++CONFIG_AD_SIGMA_DELTA=m
++CONFIG_AD7266=m
++CONFIG_AD7291=m
++CONFIG_AD7298=m
++CONFIG_AD7476=m
++# CONFIG_AD7766 is not set
++CONFIG_AD7791=m
++CONFIG_AD7793=m
++CONFIG_AD7887=m
++CONFIG_AD7923=m
++CONFIG_AD799X=m
++CONFIG_CC10001_ADC=m
++CONFIG_DA9150_GPADC=m
++# CONFIG_DLN2_ADC is not set
++CONFIG_HI8435=m
++# CONFIG_HX711 is not set
++# CONFIG_INA2XX_ADC is not set
++CONFIG_LP8788_ADC=m
++# CONFIG_LTC2471 is not set
++# CONFIG_LTC2485 is not set
++# CONFIG_LTC2497 is not set
++CONFIG_MAX1027=m
++# CONFIG_MAX11100 is not set
++# CONFIG_MAX1118 is not set
++CONFIG_MAX1363=m
++# CONFIG_MAX9611 is not set
++CONFIG_MCP320X=m
++CONFIG_MCP3422=m
++CONFIG_MEN_Z188_ADC=m
++CONFIG_NAU7802=m
++# CONFIG_PALMAS_GPADC is not set
++CONFIG_QCOM_VADC_COMMON=m
++CONFIG_QCOM_SPMI_IADC=m
++CONFIG_QCOM_SPMI_VADC=m
++CONFIG_TI_ADC081C=m
++# CONFIG_TI_ADC0832 is not set
++# CONFIG_TI_ADC084S021 is not set
++# CONFIG_TI_ADC12138 is not set
++# CONFIG_TI_ADC108S102 is not set
++CONFIG_TI_ADC128S052=m
++# CONFIG_TI_ADC161S626 is not set
++# CONFIG_TI_ADS1015 is not set
++# CONFIG_TI_ADS7950 is not set
++CONFIG_TI_AM335X_ADC=m
++# CONFIG_TI_TLC4541 is not set
++CONFIG_TWL4030_MADC=m
++CONFIG_TWL6030_GPADC=m
++CONFIG_VIPERBOARD_ADC=m
++
++#
++# Amplifiers
++#
++CONFIG_AD8366=m
++
++#
++# Chemical Sensors
++#
++# CONFIG_ATLAS_PH_SENSOR is not set
++# CONFIG_CCS811 is not set
++# CONFIG_IAQCORE is not set
++CONFIG_VZ89X=m
++# CONFIG_IIO_CROS_EC_SENSORS_CORE is not set
++
++#
++# Hid Sensor IIO Common
++#
++CONFIG_HID_SENSOR_IIO_COMMON=m
++CONFIG_HID_SENSOR_IIO_TRIGGER=m
++CONFIG_IIO_MS_SENSORS_I2C=m
++
++#
++# SSP Sensor Common
++#
++CONFIG_IIO_SSP_SENSORS_COMMONS=m
++CONFIG_IIO_SSP_SENSORHUB=m
++CONFIG_IIO_ST_SENSORS_I2C=m
++CONFIG_IIO_ST_SENSORS_SPI=m
++CONFIG_IIO_ST_SENSORS_CORE=m
++
++#
++# Counters
++#
++
++#
++# Digital to analog converters
++#
++CONFIG_AD5064=m
++CONFIG_AD5360=m
++CONFIG_AD5380=m
++CONFIG_AD5421=m
++CONFIG_AD5446=m
++CONFIG_AD5449=m
++# CONFIG_AD5592R is not set
++# CONFIG_AD5593R is not set
++CONFIG_AD5504=m
++CONFIG_AD5624R_SPI=m
++# CONFIG_LTC2632 is not set
++CONFIG_AD5686=m
++CONFIG_AD5755=m
++# CONFIG_AD5761 is not set
++CONFIG_AD5764=m
++CONFIG_AD5791=m
++CONFIG_AD7303=m
++# CONFIG_AD8801 is not set
++# CONFIG_DS4424 is not set
++CONFIG_M62332=m
++CONFIG_MAX517=m
++CONFIG_MCP4725=m
++CONFIG_MCP4922=m
++# CONFIG_TI_DAC082S085 is not set
++
++#
++# IIO dummy driver
++#
++
++#
++# Frequency Synthesizers DDS/PLL
++#
++
++#
++# Clock Generator/Distribution
++#
++CONFIG_AD9523=m
++
++#
++# Phase-Locked Loop (PLL) frequency synthesizers
++#
++CONFIG_ADF4350=m
++
++#
++# Digital gyroscope sensors
++#
++CONFIG_ADIS16080=m
++CONFIG_ADIS16130=m
++CONFIG_ADIS16136=m
++CONFIG_ADIS16260=m
++CONFIG_ADXRS450=m
++CONFIG_BMG160=m
++CONFIG_BMG160_I2C=m
++CONFIG_BMG160_SPI=m
++CONFIG_HID_SENSOR_GYRO_3D=m
++# CONFIG_MPU3050_I2C is not set
++CONFIG_IIO_ST_GYRO_3AXIS=m
++CONFIG_IIO_ST_GYRO_I2C_3AXIS=m
++CONFIG_IIO_ST_GYRO_SPI_3AXIS=m
++CONFIG_ITG3200=m
++
++#
++# Health Sensors
++#
++
++#
++# Heart Rate Monitors
++#
++# CONFIG_AFE4403 is not set
++# CONFIG_AFE4404 is not set
++# CONFIG_MAX30100 is not set
++# CONFIG_MAX30102 is not set
++
++#
++# Humidity sensors
++#
++# CONFIG_AM2315 is not set
++CONFIG_DHT11=m
++CONFIG_HDC100X=m
++# CONFIG_HID_SENSOR_HUMIDITY is not set
++# CONFIG_HTS221 is not set
++CONFIG_HTU21=m
++CONFIG_SI7005=m
++CONFIG_SI7020=m
++
++#
++# Inertial measurement units
++#
++CONFIG_ADIS16400=m
++CONFIG_ADIS16480=m
++# CONFIG_BMI160_I2C is not set
++# CONFIG_BMI160_SPI is not set
++CONFIG_KMX61=m
++# CONFIG_INV_MPU6050_I2C is not set
++# CONFIG_INV_MPU6050_SPI is not set
++# CONFIG_IIO_ST_LSM6DSX is not set
++CONFIG_IIO_ADIS_LIB=m
++CONFIG_IIO_ADIS_LIB_BUFFER=y
++
++#
++# Light sensors
++#
++CONFIG_ACPI_ALS=m
++CONFIG_ADJD_S311=m
++CONFIG_AL3320A=m
++CONFIG_APDS9300=m
++CONFIG_APDS9960=m
++CONFIG_BH1750=m
++# CONFIG_BH1780 is not set
++CONFIG_CM32181=m
++CONFIG_CM3232=m
++CONFIG_CM3323=m
++CONFIG_CM36651=m
++CONFIG_GP2AP020A00F=m
++CONFIG_SENSORS_ISL29018=m
++CONFIG_SENSORS_ISL29028=m
++CONFIG_ISL29125=m
++CONFIG_HID_SENSOR_ALS=m
++CONFIG_HID_SENSOR_PROX=m
++CONFIG_JSA1212=m
++CONFIG_RPR0521=m
++CONFIG_SENSORS_LM3533=m
++CONFIG_LTR501=m
++# CONFIG_MAX44000 is not set
++CONFIG_OPT3001=m
++CONFIG_PA12203001=m
++# CONFIG_SI1145 is not set
++CONFIG_STK3310=m
++# CONFIG_ST_UVIS25 is not set
++CONFIG_TCS3414=m
++CONFIG_TCS3472=m
++CONFIG_SENSORS_TSL2563=m
++CONFIG_TSL2583=m
++CONFIG_TSL4531=m
++CONFIG_US5182D=m
++CONFIG_VCNL4000=m
++# CONFIG_VEML6070 is not set
++# CONFIG_VL6180 is not set
++# CONFIG_ZOPT2201 is not set
++
++#
++# Magnetometer sensors
++#
++CONFIG_AK8975=m
++CONFIG_AK09911=m
++# CONFIG_BMC150_MAGN_I2C is not set
++# CONFIG_BMC150_MAGN_SPI is not set
++CONFIG_MAG3110=m
++CONFIG_HID_SENSOR_MAGNETOMETER_3D=m
++CONFIG_MMC35240=m
++CONFIG_IIO_ST_MAGN_3AXIS=m
++CONFIG_IIO_ST_MAGN_I2C_3AXIS=m
++CONFIG_IIO_ST_MAGN_SPI_3AXIS=m
++CONFIG_SENSORS_HMC5843=m
++CONFIG_SENSORS_HMC5843_I2C=m
++CONFIG_SENSORS_HMC5843_SPI=m
++
++#
++# Multiplexers
++#
++
++#
++# Inclinometer sensors
++#
++CONFIG_HID_SENSOR_INCLINOMETER_3D=m
++CONFIG_HID_SENSOR_DEVICE_ROTATION=m
++
++#
++# Triggers - standalone
++#
++CONFIG_IIO_INTERRUPT_TRIGGER=m
++CONFIG_IIO_SYSFS_TRIGGER=m
++
++#
++# Digital potentiometers
++#
++# CONFIG_DS1803 is not set
++# CONFIG_MAX5481 is not set
++# CONFIG_MAX5487 is not set
++# CONFIG_MCP4131 is not set
++CONFIG_MCP4531=m
++# CONFIG_TPL0102 is not set
++
++#
++# Digital potentiostats
++#
++# CONFIG_LMP91000 is not set
++
++#
++# Pressure sensors
++#
++# CONFIG_ABP060MG is not set
++CONFIG_BMP280=m
++CONFIG_BMP280_I2C=m
++CONFIG_BMP280_SPI=m
++CONFIG_HID_SENSOR_PRESS=m
++# CONFIG_HP03 is not set
++# CONFIG_MPL115_I2C is not set
++# CONFIG_MPL115_SPI is not set
++CONFIG_MPL3115=m
++CONFIG_MS5611=m
++CONFIG_MS5611_I2C=m
++CONFIG_MS5611_SPI=m
++CONFIG_MS5637=m
++CONFIG_IIO_ST_PRESS=m
++CONFIG_IIO_ST_PRESS_I2C=m
++CONFIG_IIO_ST_PRESS_SPI=m
++CONFIG_T5403=m
++# CONFIG_HP206C is not set
++# CONFIG_ZPA2326 is not set
++
++#
++# Lightning sensors
++#
++CONFIG_AS3935=m
++
++#
++# Proximity and distance sensors
++#
++CONFIG_LIDAR_LITE_V2=m
++# CONFIG_RFD77402 is not set
++# CONFIG_SRF04 is not set
++CONFIG_SX9500=m
++# CONFIG_SRF08 is not set
++
++#
++# Temperature sensors
++#
++# CONFIG_MAXIM_THERMOCOUPLE is not set
++# CONFIG_HID_SENSOR_TEMP is not set
++CONFIG_MLX90614=m
++CONFIG_TMP006=m
++# CONFIG_TMP007 is not set
++CONFIG_TSYS01=m
++CONFIG_TSYS02D=m
++CONFIG_NTB=m
++# CONFIG_NTB_AMD is not set
++# CONFIG_NTB_IDT is not set
++CONFIG_NTB_INTEL=m
++# CONFIG_NTB_SWITCHTEC is not set
++CONFIG_NTB_PINGPONG=m
++CONFIG_NTB_TOOL=m
++# CONFIG_NTB_PERF is not set
++CONFIG_NTB_TRANSPORT=m
++CONFIG_VME_BUS=y
++
++#
++# VME Bridge Drivers
++#
++CONFIG_VME_CA91CX42=m
++CONFIG_VME_TSI148=m
++# CONFIG_VME_FAKE is not set
++
++#
++# VME Board Drivers
++#
++CONFIG_VMIVME_7805=m
++
++#
++# VME Device Drivers
++#
++CONFIG_VME_USER=m
++CONFIG_PWM=y
++CONFIG_PWM_SYSFS=y
++CONFIG_PWM_CRC=y
++# CONFIG_PWM_CROS_EC is not set
++CONFIG_PWM_LP3943=m
++CONFIG_PWM_LPSS=m
++CONFIG_PWM_LPSS_PCI=m
++CONFIG_PWM_LPSS_PLATFORM=m
++CONFIG_PWM_PCA9685=m
++CONFIG_PWM_TWL=m
++CONFIG_PWM_TWL_LED=m
++
++#
++# IRQ chip support
++#
++CONFIG_ARM_GIC_MAX_NR=1
++# CONFIG_ARM_GIC_V3_ITS is not set
++CONFIG_IPACK_BUS=m
++CONFIG_BOARD_TPCI200=m
++CONFIG_SERIAL_IPOCTAL=m
++CONFIG_RESET_CONTROLLER=y
++# CONFIG_RESET_ATH79 is not set
++# CONFIG_RESET_AXS10X is not set
++# CONFIG_RESET_BERLIN is not set
++# CONFIG_RESET_IMX7 is not set
++# CONFIG_RESET_LANTIQ is not set
++# CONFIG_RESET_LPC18XX is not set
++# CONFIG_RESET_MESON is not set
++# CONFIG_RESET_PISTACHIO is not set
++# CONFIG_RESET_SIMPLE is not set
++# CONFIG_RESET_SUNXI is not set
++# CONFIG_RESET_TI_SYSCON is not set
++# CONFIG_RESET_ZYNQ is not set
++# CONFIG_RESET_TEGRA_BPMP is not set
++CONFIG_FMC=m
++CONFIG_FMC_FAKEDEV=m
++CONFIG_FMC_TRIVIAL=m
++CONFIG_FMC_WRITE_EEPROM=m
++CONFIG_FMC_CHARDEV=m
++
++#
++# PHY Subsystem
++#
++CONFIG_GENERIC_PHY=y
++CONFIG_BCM_KONA_USB2_PHY=m
++CONFIG_PHY_PXA_28NM_HSIC=m
++CONFIG_PHY_PXA_28NM_USB2=m
++# CONFIG_PHY_CPCAP_USB is not set
++# CONFIG_PHY_QCOM_USB_HS is not set
++# CONFIG_PHY_QCOM_USB_HSIC is not set
++CONFIG_PHY_SAMSUNG_USB2=m
++# CONFIG_PHY_EXYNOS4210_USB2 is not set
++# CONFIG_PHY_EXYNOS4X12_USB2 is not set
++# CONFIG_PHY_EXYNOS5250_USB2 is not set
++CONFIG_PHY_TUSB1210=m
++CONFIG_POWERCAP=y
++CONFIG_INTEL_RAPL=m
++CONFIG_MCB=m
++CONFIG_MCB_PCI=m
++# CONFIG_MCB_LPC is not set
++
++#
++# Performance monitor support
++#
++CONFIG_RAS=y
++# CONFIG_RAS_CEC is not set
++CONFIG_THUNDERBOLT=m
++
++#
++# Android
++#
++# CONFIG_ANDROID is not set
++CONFIG_LIBNVDIMM=y
++CONFIG_BLK_DEV_PMEM=m
++CONFIG_ND_BLK=m
++CONFIG_ND_CLAIM=y
++CONFIG_ND_BTT=m
++CONFIG_BTT=y
++CONFIG_ND_PFN=m
++CONFIG_NVDIMM_PFN=y
++CONFIG_NVDIMM_DAX=y
++CONFIG_DAX=y
++CONFIG_DEV_DAX=m
++CONFIG_DEV_DAX_PMEM=m
++CONFIG_NVMEM=y
++CONFIG_STM=m
++CONFIG_STM_DUMMY=m
++CONFIG_STM_SOURCE_CONSOLE=m
++# CONFIG_STM_SOURCE_HEARTBEAT is not set
++# CONFIG_STM_SOURCE_FTRACE is not set
++CONFIG_INTEL_TH=m
++CONFIG_INTEL_TH_PCI=m
++CONFIG_INTEL_TH_GTH=m
++CONFIG_INTEL_TH_STH=m
++CONFIG_INTEL_TH_MSU=m
++CONFIG_INTEL_TH_PTI=m
++# CONFIG_INTEL_TH_DEBUG is not set
++CONFIG_FPGA=m
++# CONFIG_ALTERA_PR_IP_CORE is not set
++# CONFIG_FPGA_MGR_ALTERA_PS_SPI is not set
++# CONFIG_FPGA_MGR_ALTERA_CVP is not set
++# CONFIG_FPGA_MGR_XILINX_SPI is not set
++# CONFIG_FPGA_BRIDGE is not set
++# CONFIG_FSI is not set
++CONFIG_PM_OPP=y
++CONFIG_UNISYS_VISORBUS=m
++# CONFIG_SIOX is not set
++# CONFIG_SLIMBUS is not set
++
++#
++# Firmware Drivers
++#
++CONFIG_EDD=y
++CONFIG_EDD_OFF=y
++CONFIG_FIRMWARE_MEMMAP=y
++CONFIG_DELL_RBU=m
++CONFIG_DCDBAS=m
++CONFIG_DMIID=y
++CONFIG_DMI_SYSFS=m
++CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y
++CONFIG_ISCSI_IBFT_FIND=y
++CONFIG_ISCSI_IBFT=m
++# CONFIG_FW_CFG_SYSFS is not set
++# CONFIG_GOOGLE_FIRMWARE is not set
++
++#
++# EFI (Extensible Firmware Interface) Support
++#
++CONFIG_EFI_VARS=y
++CONFIG_EFI_ESRT=y
++CONFIG_EFI_VARS_PSTORE=m
++# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set
++CONFIG_EFI_RUNTIME_MAP=y
++# CONFIG_EFI_FAKE_MEMMAP is not set
++CONFIG_EFI_RUNTIME_WRAPPERS=y
++# CONFIG_EFI_BOOTLOADER_CONTROL is not set
++# CONFIG_EFI_CAPSULE_LOADER is not set
++# CONFIG_EFI_TEST is not set
++CONFIG_APPLE_PROPERTIES=y
++# CONFIG_RESET_ATTACK_MITIGATION is not set
++CONFIG_UEFI_CPER=y
++CONFIG_EFI_DEV_PATH_PARSER=y
++
++#
++# Tegra firmware driver
++#
++
++#
++# File systems
++#
++CONFIG_DCACHE_WORD_ACCESS=y
++CONFIG_FS_IOMAP=y
++# CONFIG_EXT2_FS is not set
++# CONFIG_EXT3_FS is not set
++CONFIG_EXT4_FS=y
++CONFIG_EXT4_USE_FOR_EXT2=y
++CONFIG_EXT4_FS_POSIX_ACL=y
++CONFIG_EXT4_FS_SECURITY=y
++# CONFIG_EXT4_ENCRYPTION is not set
++# CONFIG_EXT4_DEBUG is not set
++CONFIG_JBD2=y
++# CONFIG_JBD2_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=m
++# CONFIG_REISERFS_CHECK is not set
++# CONFIG_REISERFS_PROC_INFO is not set
++CONFIG_REISERFS_FS_XATTR=y
++CONFIG_REISERFS_FS_POSIX_ACL=y
++CONFIG_REISERFS_FS_SECURITY=y
++CONFIG_JFS_FS=m
++CONFIG_JFS_POSIX_ACL=y
++CONFIG_JFS_SECURITY=y
++# CONFIG_JFS_DEBUG is not set
++CONFIG_JFS_STATISTICS=y
++CONFIG_XFS_FS=m
++CONFIG_XFS_QUOTA=y
++CONFIG_XFS_POSIX_ACL=y
++CONFIG_XFS_RT=y
++# CONFIG_XFS_ONLINE_SCRUB is not set
++# CONFIG_XFS_WARN is not set
++# CONFIG_XFS_DEBUG is not set
++CONFIG_GFS2_FS=m
++CONFIG_GFS2_FS_LOCKING_DLM=y
++CONFIG_OCFS2_FS=m
++CONFIG_OCFS2_FS_O2CB=m
++CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m
++CONFIG_OCFS2_FS_STATS=y
++CONFIG_OCFS2_DEBUG_MASKLOG=y
++# CONFIG_OCFS2_DEBUG_FS is not set
++CONFIG_BTRFS_FS=m
++CONFIG_BTRFS_FS_POSIX_ACL=y
++# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set
++# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set
++# CONFIG_BTRFS_DEBUG is not set
++# CONFIG_BTRFS_ASSERT is not set
++# CONFIG_BTRFS_FS_REF_VERIFY is not set
++CONFIG_NILFS2_FS=m
++CONFIG_F2FS_FS=m
++CONFIG_F2FS_STAT_FS=y
++CONFIG_F2FS_FS_XATTR=y
++CONFIG_F2FS_FS_POSIX_ACL=y
++CONFIG_F2FS_FS_SECURITY=y
++# CONFIG_F2FS_CHECK_FS is not set
++CONFIG_F2FS_FS_ENCRYPTION=y
++# CONFIG_F2FS_IO_TRACE is not set
++# CONFIG_F2FS_FAULT_INJECTION is not set
++CONFIG_FS_DAX=y
++CONFIG_FS_DAX_PMD=y
++CONFIG_FS_POSIX_ACL=y
++CONFIG_EXPORTFS=y
++# CONFIG_EXPORTFS_BLOCK_OPS is not set
++CONFIG_FILE_LOCKING=y
++CONFIG_MANDATORY_FILE_LOCKING=y
++CONFIG_FS_ENCRYPTION=m
++CONFIG_FSNOTIFY=y
++CONFIG_DNOTIFY=y
++CONFIG_INOTIFY_USER=y
++CONFIG_FANOTIFY=y
++CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
++CONFIG_QUOTA=y
++CONFIG_QUOTA_NETLINK_INTERFACE=y
++# CONFIG_PRINT_QUOTA_WARNING is not set
++# CONFIG_QUOTA_DEBUG is not set
++CONFIG_QUOTA_TREE=m
++CONFIG_QFMT_V1=m
++CONFIG_QFMT_V2=m
++CONFIG_QUOTACTL=y
++CONFIG_QUOTACTL_COMPAT=y
++CONFIG_AUTOFS4_FS=m
++CONFIG_FUSE_FS=y
++CONFIG_CUSE=m
++CONFIG_OVERLAY_FS=y
++# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set
++CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y
++# CONFIG_OVERLAY_FS_INDEX is not set
++
++#
++# Caches
++#
++CONFIG_FSCACHE=m
++CONFIG_FSCACHE_STATS=y
++# CONFIG_FSCACHE_HISTOGRAM is not set
++# CONFIG_FSCACHE_DEBUG is not set
++# CONFIG_FSCACHE_OBJECT_LIST is not set
++CONFIG_CACHEFILES=m
++# CONFIG_CACHEFILES_DEBUG is not set
++# CONFIG_CACHEFILES_HISTOGRAM is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=m
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_UDF_FS=m
++CONFIG_UDF_NLS=y
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=y
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=y
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_FAT_DEFAULT_UTF8 is not set
++CONFIG_NTFS_FS=m
++# CONFIG_NTFS_DEBUG is not set
++# CONFIG_NTFS_RW is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_VMCORE=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_PROC_PAGE_MONITOR=y
++CONFIG_PROC_CHILDREN=y
++CONFIG_KERNFS=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++CONFIG_TMPFS_POSIX_ACL=y
++CONFIG_TMPFS_XATTR=y
++CONFIG_HUGETLBFS=y
++CONFIG_HUGETLB_PAGE=y
++CONFIG_ARCH_HAS_GIGANTIC_PAGE=y
++CONFIG_CONFIGFS_FS=m
++CONFIG_EFIVAR_FS=y
++CONFIG_MISC_FILESYSTEMS=y
++# CONFIG_ORANGEFS_FS is not set
++CONFIG_ADFS_FS=m
++# CONFIG_ADFS_FS_RW is not set
++CONFIG_AFFS_FS=m
++CONFIG_ECRYPT_FS=y
++CONFIG_ECRYPT_FS_MESSAGING=y
++CONFIG_HFS_FS=m
++CONFIG_HFSPLUS_FS=m
++CONFIG_HFSPLUS_FS_POSIX_ACL=y
++CONFIG_BEFS_FS=m
++# CONFIG_BEFS_DEBUG is not set
++CONFIG_BFS_FS=m
++CONFIG_EFS_FS=m
++CONFIG_JFFS2_FS=m
++CONFIG_JFFS2_FS_DEBUG=0
++CONFIG_JFFS2_FS_WRITEBUFFER=y
++# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
++# CONFIG_JFFS2_SUMMARY is not set
++CONFIG_JFFS2_FS_XATTR=y
++CONFIG_JFFS2_FS_POSIX_ACL=y
++CONFIG_JFFS2_FS_SECURITY=y
++CONFIG_JFFS2_COMPRESSION_OPTIONS=y
++CONFIG_JFFS2_ZLIB=y
++CONFIG_JFFS2_LZO=y
++CONFIG_JFFS2_RTIME=y
++# CONFIG_JFFS2_RUBIN is not set
++# CONFIG_JFFS2_CMODE_NONE is not set
++# CONFIG_JFFS2_CMODE_PRIORITY is not set
++# CONFIG_JFFS2_CMODE_SIZE is not set
++CONFIG_JFFS2_CMODE_FAVOURLZO=y
++CONFIG_UBIFS_FS=m
++# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
++CONFIG_UBIFS_FS_LZO=y
++CONFIG_UBIFS_FS_ZLIB=y
++CONFIG_UBIFS_ATIME_SUPPORT=y
++# CONFIG_UBIFS_FS_ENCRYPTION is not set
++CONFIG_UBIFS_FS_SECURITY=y
++CONFIG_CRAMFS=m
++CONFIG_CRAMFS_BLOCKDEV=y
++# CONFIG_CRAMFS_MTD is not set
++CONFIG_SQUASHFS=m
++# CONFIG_SQUASHFS_FILE_CACHE is not set
++CONFIG_SQUASHFS_FILE_DIRECT=y
++# CONFIG_SQUASHFS_DECOMP_SINGLE is not set
++# CONFIG_SQUASHFS_DECOMP_MULTI is not set
++CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y
++CONFIG_SQUASHFS_XATTR=y
++CONFIG_SQUASHFS_ZLIB=y
++CONFIG_SQUASHFS_LZ4=y
++CONFIG_SQUASHFS_LZO=y
++CONFIG_SQUASHFS_XZ=y
++# CONFIG_SQUASHFS_ZSTD is not set
++# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set
++# CONFIG_SQUASHFS_EMBEDDED is not set
++CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
++CONFIG_VXFS_FS=m
++CONFIG_MINIX_FS=m
++CONFIG_OMFS_FS=m
++CONFIG_HPFS_FS=m
++CONFIG_QNX4FS_FS=m
++CONFIG_QNX6FS_FS=m
++# CONFIG_QNX6FS_DEBUG is not set
++CONFIG_ROMFS_FS=m
++CONFIG_ROMFS_BACKED_BY_BLOCK=y
++# CONFIG_ROMFS_BACKED_BY_MTD is not set
++# CONFIG_ROMFS_BACKED_BY_BOTH is not set
++CONFIG_ROMFS_ON_BLOCK=y
++CONFIG_PSTORE=y
++CONFIG_PSTORE_ZLIB_COMPRESS=y
++# CONFIG_PSTORE_LZO_COMPRESS is not set
++# CONFIG_PSTORE_LZ4_COMPRESS is not set
++# CONFIG_PSTORE_CONSOLE is not set
++# CONFIG_PSTORE_PMSG is not set
++# CONFIG_PSTORE_FTRACE is not set
++CONFIG_PSTORE_RAM=m
++CONFIG_SYSV_FS=m
++CONFIG_UFS_FS=m
++# CONFIG_UFS_FS_WRITE is not set
++# CONFIG_UFS_DEBUG is not set
++CONFIG_EXOFS_FS=m
++# CONFIG_EXOFS_DEBUG is not set
++CONFIG_ORE=m
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=m
++CONFIG_NFS_V2=m
++CONFIG_NFS_V3=m
++CONFIG_NFS_V3_ACL=y
++CONFIG_NFS_V4=m
++CONFIG_NFS_SWAP=y
++CONFIG_NFS_V4_1=y
++CONFIG_NFS_V4_2=y
++CONFIG_PNFS_FILE_LAYOUT=m
++CONFIG_PNFS_BLOCK=m
++CONFIG_PNFS_FLEXFILE_LAYOUT=m
++CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org"
++CONFIG_NFS_V4_1_MIGRATION=y
++CONFIG_NFS_V4_SECURITY_LABEL=y
++CONFIG_NFS_FSCACHE=y
++# CONFIG_NFS_USE_LEGACY_DNS is not set
++CONFIG_NFS_USE_KERNEL_DNS=y
++CONFIG_NFS_DEBUG=y
++CONFIG_NFSD=m
++CONFIG_NFSD_V2_ACL=y
++CONFIG_NFSD_V3=y
++CONFIG_NFSD_V3_ACL=y
++CONFIG_NFSD_V4=y
++# CONFIG_NFSD_BLOCKLAYOUT is not set
++# CONFIG_NFSD_SCSILAYOUT is not set
++# CONFIG_NFSD_FLEXFILELAYOUT is not set
++CONFIG_NFSD_V4_SECURITY_LABEL=y
++# CONFIG_NFSD_FAULT_INJECTION is not set
++CONFIG_GRACE_PERIOD=m
++CONFIG_LOCKD=m
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_ACL_SUPPORT=m
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=m
++CONFIG_SUNRPC_GSS=m
++CONFIG_SUNRPC_BACKCHANNEL=y
++CONFIG_SUNRPC_SWAP=y
++CONFIG_RPCSEC_GSS_KRB5=m
++CONFIG_SUNRPC_DEBUG=y
++CONFIG_SUNRPC_XPRT_RDMA=m
++CONFIG_CEPH_FS=m
++CONFIG_CEPH_FSCACHE=y
++CONFIG_CEPH_FS_POSIX_ACL=y
++CONFIG_CIFS=m
++CONFIG_CIFS_STATS=y
++# CONFIG_CIFS_STATS2 is not set
++CONFIG_CIFS_WEAK_PW_HASH=y
++CONFIG_CIFS_UPCALL=y
++CONFIG_CIFS_XATTR=y
++CONFIG_CIFS_POSIX=y
++CONFIG_CIFS_ACL=y
++CONFIG_CIFS_DEBUG=y
++# CONFIG_CIFS_DEBUG2 is not set
++# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set
++CONFIG_CIFS_DFS_UPCALL=y
++CONFIG_CIFS_SMB311=y
++# CONFIG_CIFS_SMB_DIRECT is not set
++CONFIG_CIFS_FSCACHE=y
++CONFIG_CODA_FS=m
++CONFIG_AFS_FS=m
++# CONFIG_AFS_DEBUG is not set
++CONFIG_AFS_FSCACHE=y
++CONFIG_9P_FS=m
++CONFIG_9P_FSCACHE=y
++CONFIG_9P_FS_POSIX_ACL=y
++CONFIG_9P_FS_SECURITY=y
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="utf8"
++CONFIG_NLS_CODEPAGE_437=y
++CONFIG_NLS_CODEPAGE_737=m
++CONFIG_NLS_CODEPAGE_775=m
++CONFIG_NLS_CODEPAGE_850=m
++CONFIG_NLS_CODEPAGE_852=m
++CONFIG_NLS_CODEPAGE_855=m
++CONFIG_NLS_CODEPAGE_857=m
++CONFIG_NLS_CODEPAGE_860=m
++CONFIG_NLS_CODEPAGE_861=m
++CONFIG_NLS_CODEPAGE_862=m
++CONFIG_NLS_CODEPAGE_863=m
++CONFIG_NLS_CODEPAGE_864=m
++CONFIG_NLS_CODEPAGE_865=m
++CONFIG_NLS_CODEPAGE_866=m
++CONFIG_NLS_CODEPAGE_869=m
++CONFIG_NLS_CODEPAGE_936=m
++CONFIG_NLS_CODEPAGE_950=m
++CONFIG_NLS_CODEPAGE_932=m
++CONFIG_NLS_CODEPAGE_949=m
++CONFIG_NLS_CODEPAGE_874=m
++CONFIG_NLS_ISO8859_8=m
++CONFIG_NLS_CODEPAGE_1250=m
++CONFIG_NLS_CODEPAGE_1251=m
++CONFIG_NLS_ASCII=m
++CONFIG_NLS_ISO8859_1=m
++CONFIG_NLS_ISO8859_2=m
++CONFIG_NLS_ISO8859_3=m
++CONFIG_NLS_ISO8859_4=m
++CONFIG_NLS_ISO8859_5=m
++CONFIG_NLS_ISO8859_6=m
++CONFIG_NLS_ISO8859_7=m
++CONFIG_NLS_ISO8859_9=m
++CONFIG_NLS_ISO8859_13=m
++CONFIG_NLS_ISO8859_14=m
++CONFIG_NLS_ISO8859_15=m
++CONFIG_NLS_KOI8_R=m
++CONFIG_NLS_KOI8_U=m
++CONFIG_NLS_MAC_ROMAN=m
++CONFIG_NLS_MAC_CELTIC=m
++CONFIG_NLS_MAC_CENTEURO=m
++CONFIG_NLS_MAC_CROATIAN=m
++CONFIG_NLS_MAC_CYRILLIC=m
++CONFIG_NLS_MAC_GAELIC=m
++CONFIG_NLS_MAC_GREEK=m
++CONFIG_NLS_MAC_ICELAND=m
++CONFIG_NLS_MAC_INUIT=m
++CONFIG_NLS_MAC_ROMANIAN=m
++CONFIG_NLS_MAC_TURKISH=m
++CONFIG_NLS_UTF8=m
++CONFIG_DLM=m
++# CONFIG_DLM_DEBUG is not set
++
++#
++# Kernel hacking
++#
++CONFIG_TRACE_IRQFLAGS_SUPPORT=y
++
++#
++# printk and dmesg options
++#
++CONFIG_PRINTK_TIME=y
++CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7
++CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4
++CONFIG_BOOT_PRINTK_DELAY=y
++CONFIG_DYNAMIC_DEBUG=y
++
++#
++# Compile-time checks and compiler options
++#
++CONFIG_DEBUG_INFO=y
++# CONFIG_DEBUG_INFO_REDUCED is not set
++# CONFIG_DEBUG_INFO_SPLIT is not set
++CONFIG_DEBUG_INFO_DWARF4=y
++CONFIG_GDB_SCRIPTS=y
++# CONFIG_ENABLE_WARN_DEPRECATED is not set
++# CONFIG_ENABLE_MUST_CHECK is not set
++CONFIG_FRAME_WARN=1024
++# CONFIG_STRIP_ASM_SYMS is not set
++# CONFIG_READABLE_ASM is not set
++CONFIG_UNUSED_SYMBOLS=y
++# CONFIG_PAGE_OWNER is not set
++CONFIG_DEBUG_FS=y
++# CONFIG_HEADERS_CHECK is not set
++# CONFIG_DEBUG_SECTION_MISMATCH is not set
++CONFIG_SECTION_MISMATCH_WARN_ONLY=y
++CONFIG_STACK_VALIDATION=y
++# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1
++CONFIG_MAGIC_SYSRQ_SERIAL=y
++CONFIG_DEBUG_KERNEL=y
++
++#
++# Memory Debugging
++#
++# CONFIG_PAGE_EXTENSION is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_PAGE_POISONING is not set
++# CONFIG_DEBUG_PAGE_REF is not set
++# CONFIG_DEBUG_RODATA_TEST is not set
++# CONFIG_DEBUG_OBJECTS is not set
++# CONFIG_SLUB_DEBUG_ON is not set
++# CONFIG_SLUB_STATS is not set
++CONFIG_HAVE_DEBUG_KMEMLEAK=y
++# CONFIG_DEBUG_KMEMLEAK is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_VM is not set
++CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y
++# CONFIG_DEBUG_VIRTUAL is not set
++# CONFIG_DEBUG_MEMORY_INIT is not set
++CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
++# CONFIG_DEBUG_PER_CPU_MAPS is not set
++CONFIG_HAVE_DEBUG_STACKOVERFLOW=y
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++CONFIG_HAVE_ARCH_KASAN=y
++# CONFIG_KASAN is not set
++CONFIG_ARCH_HAS_KCOV=y
++# CONFIG_KCOV is not set
++# CONFIG_DEBUG_SHIRQ is not set
++
++#
++# Debug Lockups and Hangs
++#
++CONFIG_LOCKUP_DETECTOR=y
++CONFIG_SOFTLOCKUP_DETECTOR=y
++CONFIG_HARDLOCKUP_DETECTOR_PERF=y
++CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y
++CONFIG_HARDLOCKUP_DETECTOR=y
++# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set
++CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0
++# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
++CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
++CONFIG_DETECT_HUNG_TASK=y
++CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
++# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
++CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
++# CONFIG_WQ_WATCHDOG is not set
++# CONFIG_PANIC_ON_OOPS is not set
++CONFIG_PANIC_ON_OOPS_VALUE=0
++CONFIG_PANIC_TIMEOUT=0
++CONFIG_SCHED_DEBUG=y
++CONFIG_SCHED_INFO=y
++CONFIG_SCHEDSTATS=y
++CONFIG_SCHED_STACK_END_CHECK=y
++# CONFIG_DEBUG_TIMEKEEPING is not set
++
++#
++# Lock Debugging (spinlocks, mutexes, etc...)
++#
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set
++# CONFIG_DEBUG_LOCK_ALLOC is not set
++# CONFIG_PROVE_LOCKING is not set
++# CONFIG_LOCK_STAT is not set
++# CONFIG_DEBUG_ATOMIC_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++CONFIG_LOCK_TORTURE_TEST=m
++# CONFIG_WW_MUTEX_SELFTEST is not set
++CONFIG_STACKTRACE=y
++# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set
++# CONFIG_DEBUG_KOBJECT is not set
++CONFIG_DEBUG_BUGVERBOSE=y
++# CONFIG_DEBUG_LIST is not set
++# CONFIG_DEBUG_PI_LIST is not set
++# CONFIG_DEBUG_SG is not set
++# CONFIG_DEBUG_NOTIFIERS is not set
++# CONFIG_DEBUG_CREDENTIALS is not set
++
++#
++# RCU Debugging
++#
++# CONFIG_PROVE_RCU is not set
++CONFIG_TORTURE_TEST=m
++# CONFIG_RCU_PERF_TEST is not set
++# CONFIG_RCU_TORTURE_TEST is not set
++CONFIG_RCU_CPU_STALL_TIMEOUT=60
++# CONFIG_RCU_TRACE is not set
++# CONFIG_RCU_EQS_DEBUG is not set
++# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set
++# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
++# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set
++CONFIG_NOTIFIER_ERROR_INJECTION=m
++CONFIG_PM_NOTIFIER_ERROR_INJECT=m
++# CONFIG_NETDEV_NOTIFIER_ERROR_INJECT is not set
++# CONFIG_FAULT_INJECTION is not set
++CONFIG_FUNCTION_ERROR_INJECTION=y
++# CONFIG_LATENCYTOP is not set
++CONFIG_USER_STACKTRACE_SUPPORT=y
++CONFIG_NOP_TRACER=y
++CONFIG_HAVE_FUNCTION_TRACER=y
++CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
++CONFIG_HAVE_DYNAMIC_FTRACE=y
++CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
++CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
++CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
++CONFIG_HAVE_FENTRY=y
++CONFIG_HAVE_C_RECORDMCOUNT=y
++CONFIG_TRACER_MAX_TRACE=y
++CONFIG_TRACE_CLOCK=y
++CONFIG_RING_BUFFER=y
++CONFIG_EVENT_TRACING=y
++CONFIG_CONTEXT_SWITCH_TRACER=y
++CONFIG_RING_BUFFER_ALLOW_SWAP=y
++CONFIG_TRACING=y
++CONFIG_GENERIC_TRACER=y
++CONFIG_TRACING_SUPPORT=y
++CONFIG_FTRACE=y
++CONFIG_FUNCTION_TRACER=y
++CONFIG_FUNCTION_GRAPH_TRACER=y
++# CONFIG_PREEMPTIRQ_EVENTS is not set
++# CONFIG_IRQSOFF_TRACER is not set
++CONFIG_SCHED_TRACER=y
++# CONFIG_HWLAT_TRACER is not set
++CONFIG_FTRACE_SYSCALLS=y
++CONFIG_TRACER_SNAPSHOT=y
++# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set
++CONFIG_BRANCH_PROFILE_NONE=y
++# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
++# CONFIG_PROFILE_ALL_BRANCHES is not set
++CONFIG_STACK_TRACER=y
++CONFIG_BLK_DEV_IO_TRACE=y
++CONFIG_KPROBE_EVENTS=y
++# CONFIG_UPROBE_EVENTS is not set
++CONFIG_BPF_EVENTS=y
++CONFIG_PROBE_EVENTS=y
++CONFIG_DYNAMIC_FTRACE=y
++CONFIG_DYNAMIC_FTRACE_WITH_REGS=y
++CONFIG_FUNCTION_PROFILER=y
++# CONFIG_BPF_KPROBE_OVERRIDE is not set
++CONFIG_FTRACE_MCOUNT_RECORD=y
++# CONFIG_FTRACE_STARTUP_TEST is not set
++CONFIG_MMIOTRACE=y
++# CONFIG_HIST_TRIGGERS is not set
++# CONFIG_MMIOTRACE_TEST is not set
++# CONFIG_TRACEPOINT_BENCHMARK is not set
++# CONFIG_RING_BUFFER_BENCHMARK is not set
++# CONFIG_RING_BUFFER_STARTUP_TEST is not set
++# CONFIG_TRACE_EVAL_MAP_FILE is not set
++CONFIG_TRACING_EVENTS_GPIO=y
++# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
++# CONFIG_DMA_API_DEBUG is not set
++# CONFIG_RUNTIME_TESTING_MENU is not set
++CONFIG_MEMTEST=y
++# CONFIG_BUG_ON_DATA_CORRUPTION is not set
++# CONFIG_SAMPLES is not set
++CONFIG_HAVE_ARCH_KGDB=y
++CONFIG_KGDB=y
++CONFIG_KGDB_SERIAL_CONSOLE=y
++# CONFIG_KGDB_TESTS is not set
++CONFIG_KGDB_LOW_LEVEL_TRAP=y
++CONFIG_KGDB_KDB=y
++CONFIG_KDB_DEFAULT_ENABLE=0x1
++CONFIG_KDB_KEYBOARD=y
++CONFIG_KDB_CONTINUE_CATASTROPHIC=0
++CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y
++# CONFIG_ARCH_WANTS_UBSAN_NO_NULL is not set
++# CONFIG_UBSAN is not set
++CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y
++CONFIG_STRICT_DEVMEM=y
++# CONFIG_IO_STRICT_DEVMEM is not set
++CONFIG_EARLY_PRINTK_USB=y
++# CONFIG_X86_VERBOSE_BOOTUP is not set
++CONFIG_EARLY_PRINTK=y
++CONFIG_EARLY_PRINTK_DBGP=y
++CONFIG_EARLY_PRINTK_EFI=y
++# CONFIG_EARLY_PRINTK_USB_XDBC is not set
++# CONFIG_X86_PTDUMP_CORE is not set
++# CONFIG_X86_PTDUMP is not set
++# CONFIG_EFI_PGT_DUMP is not set
++# CONFIG_DEBUG_WX is not set
++CONFIG_DOUBLEFAULT=y
++# CONFIG_DEBUG_TLBFLUSH is not set
++# CONFIG_IOMMU_DEBUG is not set
++CONFIG_HAVE_MMIOTRACE_SUPPORT=y
++# CONFIG_X86_DECODER_SELFTEST is not set
++CONFIG_IO_DELAY_TYPE_0X80=0
++CONFIG_IO_DELAY_TYPE_0XED=1
++CONFIG_IO_DELAY_TYPE_UDELAY=2
++CONFIG_IO_DELAY_TYPE_NONE=3
++# CONFIG_IO_DELAY_0X80 is not set
++CONFIG_IO_DELAY_0XED=y
++# CONFIG_IO_DELAY_UDELAY is not set
++# CONFIG_IO_DELAY_NONE is not set
++CONFIG_DEFAULT_IO_DELAY_TYPE=1
++# CONFIG_DEBUG_BOOT_PARAMS is not set
++# CONFIG_CPA_DEBUG is not set
++CONFIG_OPTIMIZE_INLINING=y
++# CONFIG_DEBUG_ENTRY is not set
++# CONFIG_DEBUG_NMI_SELFTEST is not set
++CONFIG_X86_DEBUG_FPU=y
++CONFIG_PUNIT_ATOM_DEBUG=m
++CONFIG_UNWINDER_ORC=y
++# CONFIG_UNWINDER_FRAME_POINTER is not set
++# CONFIG_UNWINDER_GUESS is not set
++
++#
++# Security options
++#
++CONFIG_KEYS=y
++CONFIG_KEYS_COMPAT=y
++CONFIG_PERSISTENT_KEYRINGS=y
++# CONFIG_BIG_KEYS is not set
++CONFIG_TRUSTED_KEYS=y
++CONFIG_ENCRYPTED_KEYS=y
++# CONFIG_KEY_DH_OPERATIONS is not set
++# CONFIG_SECURITY_DMESG_RESTRICT is not set
++CONFIG_SECURITY=y
++CONFIG_SECURITY_WRITABLE_HOOKS=y
++CONFIG_SECURITYFS=y
++CONFIG_SECURITY_NETWORK=y
++CONFIG_PAGE_TABLE_ISOLATION=y
++# CONFIG_SECURITY_INFINIBAND is not set
++CONFIG_SECURITY_NETWORK_XFRM=y
++CONFIG_SECURITY_PATH=y
++CONFIG_INTEL_TXT=y
++CONFIG_LSM_MMAP_MIN_ADDR=0
++CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y
++# CONFIG_HARDENED_USERCOPY is not set
++# CONFIG_FORTIFY_SOURCE is not set
++# CONFIG_STATIC_USERMODEHELPER is not set
++CONFIG_SECURITY_SELINUX=y
++CONFIG_SECURITY_SELINUX_BOOTPARAM=y
++CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
++CONFIG_SECURITY_SELINUX_DISABLE=y
++CONFIG_SECURITY_SELINUX_DEVELOP=y
++CONFIG_SECURITY_SELINUX_AVC_STATS=y
++CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
++CONFIG_SECURITY_SMACK=y
++# CONFIG_SECURITY_SMACK_BRINGUP is not set
++CONFIG_SECURITY_SMACK_NETFILTER=y
++# CONFIG_SECURITY_SMACK_APPEND_SIGNALS is not set
++CONFIG_SECURITY_TOMOYO=y
++CONFIG_SECURITY_TOMOYO_MAX_ACCEPT_ENTRY=2048
++CONFIG_SECURITY_TOMOYO_MAX_AUDIT_LOG=1024
++# CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER is not set
++CONFIG_SECURITY_TOMOYO_POLICY_LOADER="/sbin/tomoyo-init"
++CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER="/sbin/init"
++CONFIG_SECURITY_APPARMOR=y
++CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE=1
++CONFIG_SECURITY_APPARMOR_HASH=y
++CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y
++# CONFIG_SECURITY_APPARMOR_DEBUG is not set
++# CONFIG_SECURITY_LOADPIN is not set
++CONFIG_SECURITY_YAMA=y
++CONFIG_INTEGRITY=y
++CONFIG_INTEGRITY_SIGNATURE=y
++CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
++CONFIG_INTEGRITY_TRUSTED_KEYRING=y
++CONFIG_INTEGRITY_AUDIT=y
++CONFIG_IMA=y
++CONFIG_IMA_MEASURE_PCR_IDX=10
++CONFIG_IMA_LSM_RULES=y
++# CONFIG_IMA_TEMPLATE is not set
++CONFIG_IMA_NG_TEMPLATE=y
++# CONFIG_IMA_SIG_TEMPLATE is not set
++CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng"
++CONFIG_IMA_DEFAULT_HASH_SHA1=y
++# CONFIG_IMA_DEFAULT_HASH_SHA256 is not set
++# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set
++CONFIG_IMA_DEFAULT_HASH="sha1"
++# CONFIG_IMA_WRITE_POLICY is not set
++# CONFIG_IMA_READ_POLICY is not set
++CONFIG_IMA_APPRAISE=y
++CONFIG_IMA_APPRAISE_BOOTPARAM=y
++CONFIG_IMA_TRUSTED_KEYRING=y
++# CONFIG_IMA_BLACKLIST_KEYRING is not set
++# CONFIG_IMA_LOAD_X509 is not set
++CONFIG_EVM=y
++CONFIG_EVM_ATTR_FSUUID=y
++CONFIG_EVM_EXTRA_SMACK_XATTRS=y
++# CONFIG_EVM_LOAD_X509 is not set
++# CONFIG_DEFAULT_SECURITY_SELINUX is not set
++# CONFIG_DEFAULT_SECURITY_SMACK is not set
++# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
++CONFIG_DEFAULT_SECURITY_APPARMOR=y
++# CONFIG_DEFAULT_SECURITY_DAC is not set
++CONFIG_DEFAULT_SECURITY="apparmor"
++CONFIG_XOR_BLOCKS=m
++CONFIG_ASYNC_CORE=m
++CONFIG_ASYNC_MEMCPY=m
++CONFIG_ASYNC_XOR=m
++CONFIG_ASYNC_PQ=m
++CONFIG_ASYNC_RAID6_RECOV=m
++CONFIG_CRYPTO=y
++
++#
++# Crypto core or helper
++#
++CONFIG_CRYPTO_ALGAPI=y
++CONFIG_CRYPTO_ALGAPI2=y
++CONFIG_CRYPTO_AEAD=m
++CONFIG_CRYPTO_AEAD2=y
++CONFIG_CRYPTO_BLKCIPHER=y
++CONFIG_CRYPTO_BLKCIPHER2=y
++CONFIG_CRYPTO_HASH=y
++CONFIG_CRYPTO_HASH2=y
++CONFIG_CRYPTO_RNG=y
++CONFIG_CRYPTO_RNG2=y
++CONFIG_CRYPTO_RNG_DEFAULT=m
++CONFIG_CRYPTO_AKCIPHER2=y
++CONFIG_CRYPTO_AKCIPHER=y
++CONFIG_CRYPTO_KPP2=y
++CONFIG_CRYPTO_KPP=m
++CONFIG_CRYPTO_ACOMP2=y
++CONFIG_CRYPTO_RSA=y
++CONFIG_CRYPTO_DH=m
++CONFIG_CRYPTO_ECDH=m
++CONFIG_CRYPTO_MANAGER=y
++CONFIG_CRYPTO_MANAGER2=y
++CONFIG_CRYPTO_USER=m
++CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
++CONFIG_CRYPTO_GF128MUL=m
++CONFIG_CRYPTO_NULL=m
++CONFIG_CRYPTO_NULL2=y
++CONFIG_CRYPTO_PCRYPT=m
++CONFIG_CRYPTO_WORKQUEUE=y
++CONFIG_CRYPTO_CRYPTD=m
++CONFIG_CRYPTO_MCRYPTD=m
++CONFIG_CRYPTO_AUTHENC=m
++CONFIG_CRYPTO_TEST=m
++CONFIG_CRYPTO_ABLK_HELPER=m
++CONFIG_CRYPTO_SIMD=m
++CONFIG_CRYPTO_GLUE_HELPER_X86=m
++CONFIG_CRYPTO_ENGINE=m
++
++#
++# Authenticated Encryption with Associated Data
++#
++CONFIG_CRYPTO_CCM=m
++CONFIG_CRYPTO_GCM=m
++CONFIG_CRYPTO_CHACHA20POLY1305=m
++CONFIG_CRYPTO_SEQIV=m
++CONFIG_CRYPTO_ECHAINIV=m
++
++#
++# Block modes
++#
++CONFIG_CRYPTO_CBC=y
++CONFIG_CRYPTO_CTR=m
++CONFIG_CRYPTO_CTS=m
++CONFIG_CRYPTO_ECB=y
++CONFIG_CRYPTO_LRW=m
++CONFIG_CRYPTO_PCBC=m
++CONFIG_CRYPTO_XTS=m
++CONFIG_CRYPTO_KEYWRAP=m
++
++#
++# Hash modes
++#
++CONFIG_CRYPTO_CMAC=m
++CONFIG_CRYPTO_HMAC=y
++CONFIG_CRYPTO_XCBC=m
++CONFIG_CRYPTO_VMAC=m
++
++#
++# Digest
++#
++CONFIG_CRYPTO_CRC32C=y
++CONFIG_CRYPTO_CRC32C_INTEL=y
++CONFIG_CRYPTO_CRC32=m
++CONFIG_CRYPTO_CRC32_PCLMUL=m
++CONFIG_CRYPTO_CRCT10DIF=y
++CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m
++CONFIG_CRYPTO_GHASH=m
++CONFIG_CRYPTO_POLY1305=m
++CONFIG_CRYPTO_POLY1305_X86_64=m
++CONFIG_CRYPTO_MD4=m
++CONFIG_CRYPTO_MD5=y
++CONFIG_CRYPTO_MICHAEL_MIC=m
++CONFIG_CRYPTO_RMD128=m
++CONFIG_CRYPTO_RMD160=m
++CONFIG_CRYPTO_RMD256=m
++CONFIG_CRYPTO_RMD320=m
++CONFIG_CRYPTO_SHA1=y
++CONFIG_CRYPTO_SHA1_SSSE3=m
++CONFIG_CRYPTO_SHA256_SSSE3=m
++CONFIG_CRYPTO_SHA512_SSSE3=m
++CONFIG_CRYPTO_SHA1_MB=m
++# CONFIG_CRYPTO_SHA256_MB is not set
++# CONFIG_CRYPTO_SHA512_MB is not set
++CONFIG_CRYPTO_SHA256=y
++CONFIG_CRYPTO_SHA512=y
++# CONFIG_CRYPTO_SHA3 is not set
++# CONFIG_CRYPTO_SM3 is not set
++CONFIG_CRYPTO_TGR192=m
++CONFIG_CRYPTO_WP512=m
++CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m
++
++#
++# Ciphers
++#
++CONFIG_CRYPTO_AES=y
++# CONFIG_CRYPTO_AES_TI is not set
++CONFIG_CRYPTO_AES_X86_64=m
++CONFIG_CRYPTO_AES_NI_INTEL=m
++CONFIG_CRYPTO_ANUBIS=m
++CONFIG_CRYPTO_ARC4=m
++CONFIG_CRYPTO_BLOWFISH=m
++CONFIG_CRYPTO_BLOWFISH_COMMON=m
++CONFIG_CRYPTO_BLOWFISH_X86_64=m
++CONFIG_CRYPTO_CAMELLIA=m
++CONFIG_CRYPTO_CAMELLIA_X86_64=m
++CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m
++CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m
++CONFIG_CRYPTO_CAST_COMMON=m
++CONFIG_CRYPTO_CAST5=m
++CONFIG_CRYPTO_CAST5_AVX_X86_64=m
++CONFIG_CRYPTO_CAST6=m
++CONFIG_CRYPTO_CAST6_AVX_X86_64=m
++CONFIG_CRYPTO_DES=m
++CONFIG_CRYPTO_DES3_EDE_X86_64=m
++CONFIG_CRYPTO_FCRYPT=m
++CONFIG_CRYPTO_KHAZAD=m
++CONFIG_CRYPTO_SALSA20=m
++CONFIG_CRYPTO_SALSA20_X86_64=m
++CONFIG_CRYPTO_CHACHA20=m
++CONFIG_CRYPTO_CHACHA20_X86_64=m
++CONFIG_CRYPTO_SEED=m
++CONFIG_CRYPTO_SERPENT=m
++CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m
++CONFIG_CRYPTO_SERPENT_AVX_X86_64=m
++CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m
++CONFIG_CRYPTO_TEA=m
++CONFIG_CRYPTO_TWOFISH=m
++CONFIG_CRYPTO_TWOFISH_COMMON=m
++CONFIG_CRYPTO_TWOFISH_X86_64=m
++CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m
++CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m
++
++#
++# Compression
++#
++CONFIG_CRYPTO_DEFLATE=m
++CONFIG_CRYPTO_LZO=y
++CONFIG_CRYPTO_842=m
++CONFIG_CRYPTO_LZ4=m
++CONFIG_CRYPTO_LZ4HC=m
++
++#
++# Random Number Generation
++#
++CONFIG_CRYPTO_ANSI_CPRNG=m
++CONFIG_CRYPTO_DRBG_MENU=m
++CONFIG_CRYPTO_DRBG_HMAC=y
++CONFIG_CRYPTO_DRBG_HASH=y
++CONFIG_CRYPTO_DRBG_CTR=y
++CONFIG_CRYPTO_DRBG=m
++CONFIG_CRYPTO_JITTERENTROPY=m
++CONFIG_CRYPTO_USER_API=m
++CONFIG_CRYPTO_USER_API_HASH=m
++CONFIG_CRYPTO_USER_API_SKCIPHER=m
++CONFIG_CRYPTO_USER_API_RNG=m
++CONFIG_CRYPTO_USER_API_AEAD=m
++CONFIG_CRYPTO_HASH_INFO=y
++CONFIG_CRYPTO_HW=y
++CONFIG_CRYPTO_DEV_PADLOCK=y
++CONFIG_CRYPTO_DEV_PADLOCK_AES=m
++CONFIG_CRYPTO_DEV_PADLOCK_SHA=m
++# CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC is not set
++CONFIG_CRYPTO_DEV_CCP=y
++CONFIG_CRYPTO_DEV_CCP_DD=m
++CONFIG_CRYPTO_DEV_SP_CCP=y
++CONFIG_CRYPTO_DEV_CCP_CRYPTO=m
++CONFIG_CRYPTO_DEV_SP_PSP=y
++CONFIG_CRYPTO_DEV_QAT=m
++CONFIG_CRYPTO_DEV_QAT_DH895xCC=m
++# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set
++# CONFIG_CRYPTO_DEV_QAT_C62X is not set
++CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m
++# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set
++# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set
++# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set
++# CONFIG_CRYPTO_DEV_CHELSIO is not set
++CONFIG_CRYPTO_DEV_VIRTIO=m
++CONFIG_ASYMMETRIC_KEY_TYPE=y
++CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
++CONFIG_X509_CERTIFICATE_PARSER=y
++CONFIG_PKCS7_MESSAGE_PARSER=y
++CONFIG_PKCS7_TEST_KEY=m
++CONFIG_SIGNED_PE_FILE_VERIFICATION=y
++
++#
++# Certificates for signature checking
++#
++CONFIG_MODULE_SIG_KEY="certs/signing_key.pem"
++CONFIG_SYSTEM_TRUSTED_KEYRING=y
++CONFIG_SYSTEM_TRUSTED_KEYS=""
++# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set
++# CONFIG_SECONDARY_TRUSTED_KEYRING is not set
++# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set
++CONFIG_HAVE_KVM=y
++CONFIG_HAVE_KVM_IRQCHIP=y
++CONFIG_HAVE_KVM_IRQFD=y
++CONFIG_HAVE_KVM_IRQ_ROUTING=y
++CONFIG_HAVE_KVM_EVENTFD=y
++CONFIG_KVM_MMIO=y
++CONFIG_KVM_ASYNC_PF=y
++CONFIG_HAVE_KVM_MSI=y
++CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y
++CONFIG_KVM_VFIO=y
++CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y
++CONFIG_KVM_COMPAT=y
++CONFIG_HAVE_KVM_IRQ_BYPASS=y
++CONFIG_VIRTUALIZATION=y
++CONFIG_KVM=m
++CONFIG_KVM_INTEL=m
++CONFIG_KVM_AMD=m
++CONFIG_KVM_AMD_SEV=y
++# CONFIG_KVM_MMU_AUDIT is not set
++CONFIG_VHOST_NET=m
++CONFIG_VHOST_SCSI=m
++# CONFIG_VHOST_VSOCK is not set
++CONFIG_VHOST=m
++# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set
++CONFIG_BINARY_PRINTF=y
++
++#
++# Library routines
++#
++CONFIG_RAID6_PQ=m
++CONFIG_BITREVERSE=y
++# CONFIG_HAVE_ARCH_BITREVERSE is not set
++CONFIG_RATIONAL=y
++CONFIG_GENERIC_STRNCPY_FROM_USER=y
++CONFIG_GENERIC_STRNLEN_USER=y
++CONFIG_GENERIC_NET_UTILS=y
++CONFIG_GENERIC_FIND_FIRST_BIT=y
++CONFIG_GENERIC_PCI_IOMAP=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y
++CONFIG_ARCH_HAS_FAST_MULTIPLIER=y
++CONFIG_CRC_CCITT=y
++CONFIG_CRC16=y
++CONFIG_CRC_T10DIF=y
++CONFIG_CRC_ITU_T=m
++CONFIG_CRC32=y
++# CONFIG_CRC32_SELFTEST is not set
++CONFIG_CRC32_SLICEBY8=y
++# CONFIG_CRC32_SLICEBY4 is not set
++# CONFIG_CRC32_SARWATE is not set
++# CONFIG_CRC32_BIT is not set
++# CONFIG_CRC4 is not set
++CONFIG_CRC7=m
++CONFIG_LIBCRC32C=m
++CONFIG_CRC8=m
++CONFIG_XXHASH=m
++# CONFIG_AUDIT_ARCH_COMPAT_GENERIC is not set
++# CONFIG_RANDOM32_SELFTEST is not set
++CONFIG_842_COMPRESS=m
++CONFIG_842_DECOMPRESS=m
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=y
++CONFIG_LZO_COMPRESS=y
++CONFIG_LZO_DECOMPRESS=y
++CONFIG_LZ4_COMPRESS=m
++CONFIG_LZ4HC_COMPRESS=m
++CONFIG_LZ4_DECOMPRESS=y
++CONFIG_ZSTD_COMPRESS=m
++CONFIG_ZSTD_DECOMPRESS=m
++CONFIG_XZ_DEC=y
++CONFIG_XZ_DEC_X86=y
++CONFIG_XZ_DEC_POWERPC=y
++CONFIG_XZ_DEC_IA64=y
++CONFIG_XZ_DEC_ARM=y
++CONFIG_XZ_DEC_ARMTHUMB=y
++CONFIG_XZ_DEC_SPARC=y
++CONFIG_XZ_DEC_BCJ=y
++CONFIG_XZ_DEC_TEST=m
++CONFIG_DECOMPRESS_GZIP=y
++CONFIG_DECOMPRESS_BZIP2=y
++CONFIG_DECOMPRESS_LZMA=y
++CONFIG_DECOMPRESS_XZ=y
++CONFIG_DECOMPRESS_LZO=y
++CONFIG_DECOMPRESS_LZ4=y
++CONFIG_GENERIC_ALLOCATOR=y
++CONFIG_REED_SOLOMON=m
++CONFIG_REED_SOLOMON_ENC8=y
++CONFIG_REED_SOLOMON_DEC8=y
++CONFIG_REED_SOLOMON_DEC16=y
++CONFIG_BCH=m
++CONFIG_BCH_CONST_PARAMS=y
++CONFIG_TEXTSEARCH=y
++CONFIG_TEXTSEARCH_KMP=m
++CONFIG_TEXTSEARCH_BM=m
++CONFIG_TEXTSEARCH_FSM=m
++CONFIG_BTREE=y
++CONFIG_INTERVAL_TREE=y
++CONFIG_RADIX_TREE_MULTIORDER=y
++CONFIG_ASSOCIATIVE_ARRAY=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT_MAP=y
++CONFIG_HAS_DMA=y
++CONFIG_SGL_ALLOC=y
++# CONFIG_DMA_DIRECT_OPS is not set
++CONFIG_DMA_VIRT_OPS=y
++CONFIG_CHECK_SIGNATURE=y
++CONFIG_CPU_RMAP=y
++CONFIG_DQL=y
++CONFIG_GLOB=y
++# CONFIG_GLOB_SELFTEST is not set
++CONFIG_NLATTR=y
++CONFIG_LRU_CACHE=m
++CONFIG_CLZ_TAB=y
++CONFIG_CORDIC=m
++CONFIG_DDR=y
++CONFIG_IRQ_POLL=y
++CONFIG_MPILIB=y
++CONFIG_SIGNATURE=y
++CONFIG_OID_REGISTRY=y
++CONFIG_UCS2_STRING=y
++CONFIG_FONT_SUPPORT=y
++# CONFIG_FONTS is not set
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++# CONFIG_SG_SPLIT is not set
++CONFIG_SG_POOL=y
++CONFIG_ARCH_HAS_SG_CHAIN=y
++CONFIG_ARCH_HAS_PMEM_API=y
++CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y
++CONFIG_SBITMAP=y
++# CONFIG_STRING_SELFTEST is not set
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index 68f3891..a311a9f 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -20,6 +20,7 @@
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
++#undef pr_fmt
+ #define pr_fmt(fmt) "kfd2kgd: " fmt
+
+ #include "amdgpu_amdkfd.h"
+@@ -566,9 +567,11 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
+ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+-
+- amdgpu_dpm_switch_power_profile(adev,
+- PP_SMC_POWER_PROFILE_COMPUTE, !idle);
++ if (adev->powerplay.pp_funcs &&
++ adev->powerplay.pp_funcs->switch_power_profile)
++ amdgpu_dpm_switch_power_profile(adev,
++ PP_SMC_POWER_PROFILE_COMPUTE,
++ !idle);
+ }
+
+ bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+index 29de7a7..0e32dd5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+@@ -22,6 +22,9 @@
+
+ #define pr_fmt(fmt) "kfd2kgd: " fmt
+
++#undef pr_fmt
++#define pr_fmt(fmt) "kfd2kgd: " fmt
++
+ #include <linux/fdtable.h>
+ #include <linux/uaccess.h>
+ #include <linux/firmware.h>
+@@ -231,7 +234,13 @@ static const struct kfd2kgd_calls kfd2kgd = {
+ .copy_mem_to_mem = amdgpu_amdkfd_copy_mem_to_mem,
+ .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
+ .gpu_recover = amdgpu_amdkfd_gpu_reset,
+- .set_compute_idle = amdgpu_amdkfd_set_compute_idle
++ .set_compute_idle = amdgpu_amdkfd_set_compute_idle,
++ .get_dmabuf_info = amdgpu_amdkfd_get_dmabuf_info,
++ .import_dmabuf = amdgpu_amdkfd_gpuvm_import_dmabuf,
++ .export_dmabuf = amdgpu_amdkfd_gpuvm_export_dmabuf,
++ .pin_get_sg_table_bo = amdgpu_amdkfd_gpuvm_pin_get_sg_table,
++ .unpin_put_sg_table_bo = amdgpu_amdkfd_gpuvm_unpin_put_sg_table,
++ .copy_mem_to_mem = amdgpu_amdkfd_copy_mem_to_mem,
+ };
+
+ struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions()
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+index 9388f6a..72b139f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+@@ -22,6 +22,9 @@
+
+ #define pr_fmt(fmt) "kfd2kgd: " fmt
+
++#undef pr_fmt
++#define pr_fmt(fmt) "kfd2kgd: " fmt
++
+ #include <linux/module.h>
+ #include <linux/fdtable.h>
+ #include <linux/uaccess.h>
+@@ -204,7 +207,13 @@ static const struct kfd2kgd_calls kfd2kgd = {
+ .copy_mem_to_mem = amdgpu_amdkfd_copy_mem_to_mem,
+ .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
+ .gpu_recover = amdgpu_amdkfd_gpu_reset,
+- .set_compute_idle = amdgpu_amdkfd_set_compute_idle
++ .set_compute_idle = amdgpu_amdkfd_set_compute_idle,
++ .get_dmabuf_info = amdgpu_amdkfd_get_dmabuf_info,
++ .import_dmabuf = amdgpu_amdkfd_gpuvm_import_dmabuf,
++ .export_dmabuf = amdgpu_amdkfd_gpuvm_export_dmabuf,
++ .pin_get_sg_table_bo = amdgpu_amdkfd_gpuvm_pin_get_sg_table,
++ .unpin_put_sg_table_bo = amdgpu_amdkfd_gpuvm_unpin_put_sg_table,
++ .copy_mem_to_mem = amdgpu_amdkfd_copy_mem_to_mem,
+ };
+
+ struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions()
+@@ -212,9 +221,30 @@ struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions()
+ return (struct kfd2kgd_calls *)&kfd2kgd;
+ }
+
++static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
++{
++ return (struct amdgpu_device *)kgd;
++}
++
+ static int create_process_gpumem(struct kgd_dev *kgd, uint64_t va, size_t size,
+ void *vm, struct kgd_mem **mem)
+ {
++ struct amdgpu_device *adev = get_amdgpu_device(kgd);
++ union TCP_WATCH_CNTL_BITS cntl;
++ unsigned int i;
++
++ cntl.u32All = 0;
++
++ cntl.bitfields.valid = 0;
++ cntl.bitfields.mask = ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK;
++ cntl.bitfields.atc = 1;
++
++ /* Turning off this address until we set all the registers */
++ for (i = 0; i < MAX_WATCH_ADDRESSES; i++)
++ WREG32(watchRegs[i * ADDRESS_WATCH_REG_MAX
++ + ADDRESS_WATCH_REG_CNTL],
++ cntl.u32All);
++
+ return 0;
+ }
+
+@@ -224,11 +254,6 @@ static void destroy_process_gpumem(struct kgd_dev *kgd, struct kgd_mem *mem)
+
+ }
+
+-static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
+-{
+- return (struct amdgpu_device *)kgd;
+-}
+-
+ static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+ uint32_t queue, uint32_t vmid)
+ {
+@@ -806,6 +831,13 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
+ return invalidate_tlbs_with_kiq(adev, pasid);
+ #endif
+
++#ifdef V8_SUPPORT_IT_OFFICIAL
++ struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
++
++ if (ring->ready)
++ return invalidate_tlbs_with_kiq(adev, pasid);
++#endif
++
+ for (vmid = 0; vmid < 16; vmid++) {
+ if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
+ continue;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+index 0409867..727d26d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+@@ -20,6 +20,7 @@
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
++#undef pr_fmt
+ #define pr_fmt(fmt) "kfd2kgd: " fmt
+
+ #include <linux/module.h>
+@@ -150,6 +151,20 @@ static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+ unsigned int watch_point_id,
+ unsigned int reg_offset);
+
++static uint32_t kgd_enable_debug_trap(struct kgd_dev *kgd,
++ uint32_t trap_debug_wave_launch_mode,
++ uint32_t vmid);
++static uint32_t kgd_disable_debug_trap(struct kgd_dev *kgd);
++static uint32_t kgd_set_debug_trap_data(struct kgd_dev *kgd,
++ int trap_data0,
++ int trap_data1);
++static uint32_t kgd_set_wave_launch_trap_override(struct kgd_dev *kgd,
++ uint32_t trap_override,
++ uint32_t trap_mask);
++static uint32_t kgd_set_wave_launch_mode(struct kgd_dev *kgd,
++ uint8_t wave_launch_mode,
++ uint32_t vmid);
++
+ static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
+ uint8_t vmid);
+ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+@@ -254,7 +269,18 @@ static const struct kfd2kgd_calls kfd2kgd = {
+ .copy_mem_to_mem = amdgpu_amdkfd_copy_mem_to_mem,
+ .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
+ .gpu_recover = amdgpu_amdkfd_gpu_reset,
+- .set_compute_idle = amdgpu_amdkfd_set_compute_idle
++ .set_compute_idle = amdgpu_amdkfd_set_compute_idle,
++ .get_dmabuf_info = amdgpu_amdkfd_get_dmabuf_info,
++ .import_dmabuf = amdgpu_amdkfd_gpuvm_import_dmabuf,
++ .export_dmabuf = amdgpu_amdkfd_gpuvm_export_dmabuf,
++ .enable_debug_trap = kgd_enable_debug_trap,
++ .disable_debug_trap = kgd_disable_debug_trap,
++ .set_debug_trap_data = kgd_set_debug_trap_data,
++ .set_wave_launch_trap_override = kgd_set_wave_launch_trap_override,
++ .set_wave_launch_mode = kgd_set_wave_launch_mode,
++ .pin_get_sg_table_bo = amdgpu_amdkfd_gpuvm_pin_get_sg_table,
++ .unpin_put_sg_table_bo = amdgpu_amdkfd_gpuvm_unpin_put_sg_table,
++ .copy_mem_to_mem = amdgpu_amdkfd_copy_mem_to_mem,
+ };
+
+ struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions()
+@@ -1119,6 +1145,141 @@ static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+ watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
+ }
+
++static uint32_t kgd_enable_debug_trap(struct kgd_dev *kgd,
++ uint32_t trap_debug_wave_launch_mode,
++ uint32_t vmid)
++{
++ struct amdgpu_device *adev = get_amdgpu_device(kgd);
++ uint32_t data = 0;
++ uint32_t orig_wave_cntl_value;
++ uint32_t orig_stall_vmid;
++
++ mutex_lock(&adev->grbm_idx_mutex);
++
++ orig_wave_cntl_value = RREG32(SOC15_REG_OFFSET(GC,
++ 0,
++ mmSPI_GDBG_WAVE_CNTL));
++ orig_stall_vmid = REG_GET_FIELD(orig_wave_cntl_value,
++ SPI_GDBG_WAVE_CNTL,
++ STALL_VMID);
++
++ data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 1);
++ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data);
++
++ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA0), 0);
++ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA1), 0);
++
++ data = 0;
++ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data);
++
++ data = 0;
++ data = REG_SET_FIELD(data, SPI_GDBG_TRAP_CONFIG,
++ VMID_SEL, 1<<vmid);
++ data = REG_SET_FIELD(data, SPI_GDBG_TRAP_CONFIG,
++ TRAP_EN, 1);
++ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_CONFIG), data);
++
++ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), orig_stall_vmid);
++
++ mutex_unlock(&adev->grbm_idx_mutex);
++
++ return 0;
++}
++
++static uint32_t kgd_disable_debug_trap(struct kgd_dev *kgd)
++{
++ struct amdgpu_device *adev = get_amdgpu_device(kgd);
++
++ mutex_lock(&adev->grbm_idx_mutex);
++
++ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_CONFIG), 0);
++
++ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA0), 0);
++ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA1), 0);
++
++ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
++
++ mutex_unlock(&adev->grbm_idx_mutex);
++
++ return 0;
++}
++
++static uint32_t kgd_set_debug_trap_data(struct kgd_dev *kgd,
++ int trap_data0,
++ int trap_data1)
++{
++ struct amdgpu_device *adev = get_amdgpu_device(kgd);
++
++ mutex_lock(&adev->grbm_idx_mutex);
++
++ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA0), trap_data0);
++ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA1), trap_data1);
++
++ mutex_unlock(&adev->grbm_idx_mutex);
++ return 0;
++}
++
++static uint32_t kgd_set_wave_launch_trap_override(struct kgd_dev *kgd,
++ uint32_t trap_override,
++ uint32_t trap_mask)
++{
++ struct amdgpu_device *adev = get_amdgpu_device(kgd);
++ uint32_t data = 0;
++
++ mutex_lock(&adev->grbm_idx_mutex);
++
++ data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
++ data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 1);
++ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data);
++
++ data = 0;
++ data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK,
++ EXCP_EN, trap_mask);
++ data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK,
++ REPLACE, trap_override);
++ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data);
++
++ data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
++ data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 0);
++ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data);
++
++ mutex_unlock(&adev->grbm_idx_mutex);
++
++ return 0;
++}
++
++static uint32_t kgd_set_wave_launch_mode(struct kgd_dev *kgd,
++ uint8_t wave_launch_mode,
++ uint32_t vmid)
++{
++ struct amdgpu_device *adev = get_amdgpu_device(kgd);
++ uint32_t data = 0;
++ bool is_stall_mode;
++ bool is_mode_set;
++
++
++ is_stall_mode = (wave_launch_mode == 4);
++ is_mode_set = (wave_launch_mode != 0 && wave_launch_mode != 4);
++
++ mutex_lock(&adev->grbm_idx_mutex);
++
++ data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2,
++ VMID_MASK, is_mode_set ? 1 << vmid : 0);
++ data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2,
++ MODE, is_mode_set ? wave_launch_mode : 0);
++ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL2), data);
++
++ data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
++ data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL,
++ STALL_VMID, is_stall_mode ? 1 << vmid : 0);
++ data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL,
++ STALL_RA, is_stall_mode ? 1 : 0);
++ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data);
++
++ mutex_unlock(&adev->grbm_idx_mutex);
++
++ return 0;
++ }
+ static int write_config_static_mem(struct kgd_dev *kgd, bool swizzle_enable,
+ uint8_t element_size, uint8_t index_stride, uint8_t mtype)
+ {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index c8c00b4..8cd9f0d 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -923,53 +923,53 @@ static bool check_sg_size(struct sg_table *sgt, uint64_t size)
+
+ static int process_validate_vms(struct amdkfd_process_info *process_info)
+ {
+- struct amdgpu_vm *peer_vm;
+- int ret;
++ struct amdgpu_vm *peer_vm;
++ int ret;
+
+- list_for_each_entry(peer_vm, &process_info->vm_list_head,
+- vm_list_node) {
+- ret = vm_validate_pt_pd_bos(peer_vm);
+- if (ret)
+- return ret;
+- }
++ list_for_each_entry(peer_vm, &process_info->vm_list_head,
++ vm_list_node) {
++ ret = vm_validate_pt_pd_bos(peer_vm);
++ if (ret)
++ return ret;
++ }
+
+- return 0;
++ return 0;
+ }
+
+ static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
+- struct amdgpu_sync *sync)
++ struct amdgpu_sync *sync)
+ {
+- struct amdgpu_vm *peer_vm;
+- int ret;
++ struct amdgpu_vm *peer_vm;
++ int ret;
+
+- list_for_each_entry(peer_vm, &process_info->vm_list_head,
+- vm_list_node) {
+- struct amdgpu_bo *pd = peer_vm->root.base.bo;
++ list_for_each_entry(peer_vm, &process_info->vm_list_head,
++ vm_list_node) {
++ struct amdgpu_bo *pd = peer_vm->root.base.bo;
+
+- ret = amdgpu_sync_resv(NULL,
+- sync, pd->tbo.resv,
+- AMDGPU_FENCE_OWNER_UNDEFINED, false);
+- if (ret)
+- return ret;
+- }
++ ret = amdgpu_sync_resv(NULL,
++ sync, pd->tbo.resv,
++ AMDGPU_FENCE_OWNER_UNDEFINED, false);
++ if (ret)
++ return ret;
++ }
+
+- return 0;
++ return 0;
+ }
+
+ static int process_update_pds(struct amdkfd_process_info *process_info,
+- struct amdgpu_sync *sync)
++ struct amdgpu_sync *sync)
+ {
+- struct amdgpu_vm *peer_vm;
+- int ret;
++ struct amdgpu_vm *peer_vm;
++ int ret;
+
+- list_for_each_entry(peer_vm, &process_info->vm_list_head,
+- vm_list_node) {
+- ret = vm_update_pds(peer_vm, sync);
+- if (ret)
+- return ret;
+- }
++ list_for_each_entry(peer_vm, &process_info->vm_list_head,
++ vm_list_node) {
++ ret = vm_update_pds(peer_vm, sync);
++ if (ret)
++ return ret;
++ }
+
+- return 0;
++ return 0;
+ }
+
+ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
+@@ -1097,6 +1097,10 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
+ if (avm->process_info)
+ return -EINVAL;
+
++ /* Already a compute VM? */
++ if (avm->process_info)
++ return -EINVAL;
++
+ /* Convert VM into a compute VM */
+ ret = amdgpu_vm_make_compute(adev, avm, pasid);
+ if (ret)
+@@ -1163,19 +1167,19 @@ void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
+ void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
+ {
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+- struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
++ struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+
+ if (WARN_ON(!kgd || !vm))
+- return;
++ return;
+
+- pr_debug("Releasing process vm %p\n", vm);
++ pr_debug("Releasing process vm %p\n", vm);
+
+- /* The original pasid of amdgpu vm has already been
+- * released during making a amdgpu vm to a compute vm
+- * The current pasid is managed by kfd and will be
+- * released on kfd process destroy. Set amdgpu pasid
+- * to 0 to avoid duplicate release.
+- */
++ /* The original pasid of amdgpu vm has already been
++ * released during making a amdgpu vm to a compute vm
++ * The current pasid is managed by kfd and will be
++ * released on kfd process destroy. Set amdgpu pasid
++ * to 0 to avoid duplicate release.
++ */
+ amdgpu_vm_release_compute(adev, avm);
+ }
+
+@@ -1191,49 +1195,49 @@ uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
+ }
+
+ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+- struct kgd_dev *kgd, uint64_t va, uint64_t size,
+- void *vm, struct sg_table *sg, struct kgd_mem **mem,
+- uint64_t *offset, uint32_t flags)
++ struct kgd_dev *kgd, uint64_t va, uint64_t size,
++ void *vm, struct sg_table *sg, struct kgd_mem **mem,
++ uint64_t *offset, uint32_t flags)
+ {
+- struct amdgpu_device *adev = get_amdgpu_device(kgd);
+- struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+- uint64_t user_addr = 0;
+- enum ttm_bo_type bo_type = ttm_bo_type_device;
+- struct amdgpu_bo *bo;
+- struct amdgpu_bo_param bp;
+- int byte_align;
+- u32 domain, alloc_domain;
+- u64 alloc_flags;
+- uint32_t mapping_flags;
+- int ret;
+-
+- /*
+- * Check on which domain to allocate BO
+- */
+- if (flags & ALLOC_MEM_FLAGS_VRAM) {
+- domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
+- alloc_flags = AMDGPU_GEM_CREATE_VRAM_CLEARED;
+- alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
+- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
+- AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+- } else if (flags & ALLOC_MEM_FLAGS_GTT) {
+- domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
+- alloc_flags = 0;
+- if (sg && !check_sg_size(sg, size))
+- return -EINVAL;
+- } else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
+- domain = AMDGPU_GEM_DOMAIN_GTT;
+- alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
+- alloc_flags = 0;
+- if (!offset || !*offset)
+- return -EINVAL;
+- user_addr = *offset;
+- } else if (flags & ALLOC_MEM_FLAGS_DOORBELL) {
+- domain = AMDGPU_GEM_DOMAIN_GTT;
+- alloc_flags = 0;
+- if (size > UINT_MAX)
+- return -EINVAL;
+- WARN_ON(sg);
++ struct amdgpu_device *adev = get_amdgpu_device(kgd);
++ struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
++ uint64_t user_addr = 0;
++ enum ttm_bo_type bo_type = ttm_bo_type_device;
++ struct amdgpu_bo *bo;
++ struct amdgpu_bo_param bp;
++ int byte_align;
++ u32 domain, alloc_domain;
++ u64 alloc_flags;
++ uint32_t mapping_flags;
++ int ret;
++
++ /*
++ * Check on which domain to allocate BO
++ */
++ if (flags & ALLOC_MEM_FLAGS_VRAM) {
++ domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
++ alloc_flags = AMDGPU_GEM_CREATE_VRAM_CLEARED;
++ alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
++ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
++ AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
++ } else if (flags & ALLOC_MEM_FLAGS_GTT) {
++ domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
++ alloc_flags = 0;
++ if (sg && !check_sg_size(sg, size))
++ return -EINVAL;
++ } else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
++ domain = AMDGPU_GEM_DOMAIN_GTT;
++ alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
++ alloc_flags = 0;
++ if (!offset || !*offset)
++ return -EINVAL;
++ user_addr = *offset;
++ } else if (flags & ALLOC_MEM_FLAGS_DOORBELL) {
++ domain = AMDGPU_GEM_DOMAIN_GTT;
++ alloc_flags = 0;
++ if (size > UINT_MAX)
++ return -EINVAL;
++ WARN_ON(sg);
+ sg = create_doorbell_sg(*offset, size);
+ if (!sg)
+ return -ENOMEM;
+@@ -1638,6 +1642,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
+
+ unreserve_out:
+ unreserve_bo_and_vms(&ctx, false, false);
++
+ out:
+ mutex_unlock(&mem->lock);
+ return ret;
+diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
+index 9ef632a..df92ebb 100644
+--- a/drivers/gpu/drm/amd/amdkfd/Makefile
++++ b/drivers/gpu/drm/amd/amdkfd/Makefile
+@@ -55,7 +55,9 @@ AMDKFD_FILES := $(AMDKFD_PATH)/kfd_module.o \
+ $(AMDKFD_PATH)/kfd_flat_memory.o \
+ $(AMDKFD_PATH)/kfd_rdma.o \
+ $(AMDKFD_PATH)/kfd_crat.o \
+- $(AMDKFD_PATH)/kfd_ipc.o
++ $(AMDKFD_PATH)/kfd_ipc.o \
++ $(AMDKFD_PATH)/kfd_trace.o \
++ $(AMDKFD_PATH)//kfd_peerdirect.o
+
+ ifneq ($(CONFIG_AMD_IOMMU_V2),)
+ AMDKFD_FILES += $(AMDKFD_PATH)/kfd_iommu.o
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 8d56004..9d92522 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -42,6 +42,7 @@
+ #include "kfd_device_queue_manager.h"
+ #include "kfd_dbgmgr.h"
+ #include "kfd_ipc.h"
++#include "kfd_trace.h"
+
+ static long kfd_ioctl(struct file *, unsigned int, unsigned long);
+ static int kfd_open(struct inode *, struct file *);
+@@ -85,6 +86,8 @@ int kfd_chardev_init(void)
+
+ kfd_class->devnode = kfd_devnode;
+
++ kfd_class->devnode = kfd_devnode;
++
+ kfd_device = device_create(kfd_class, NULL,
+ MKDEV(kfd_char_dev_major, 0),
+ NULL, kfd_dev_name);
+@@ -1031,6 +1034,7 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
+ }
+
+
++
+ err = kfd_event_create(filp, p, args->event_type,
+ args->auto_reset != 0, args->node_id,
+ &args->event_id, &args->event_trigger_data,
+@@ -1207,7 +1211,7 @@ static int kfd_ioctl_acquire_vm(struct file *filep, struct kfd_process *p,
+ return ret;
+ }
+
+-static bool kfd_dev_is_large_bar(struct kfd_dev *dev)
++bool kfd_dev_is_large_bar(struct kfd_dev *dev)
+ {
+ struct kfd_local_mem_info mem_info;
+
+@@ -1226,6 +1230,40 @@ static bool kfd_dev_is_large_bar(struct kfd_dev *dev)
+ return false;
+ }
+
++static int kfd_reserve_vram_limit(struct kfd_dev *dev, uint64_t size)
++{
++ int ret = 0;
++ uint64_t limit;
++
++ if (dev->vram_limit.max_vram_limit <=
++ kfd_total_mem_size / 512)
++ return -ENOMEM;
++
++ /* Subtract potential page tables size */
++ limit = dev->vram_limit.max_vram_limit -
++ kfd_total_mem_size / 512;
++
++ spin_lock(&dev->vram_limit.vram_limit_lock);
++
++ if (limit > dev->vram_limit.vram_used + size)
++ dev->vram_limit.vram_used += size;
++ else
++ ret = -ENOMEM;
++
++ spin_unlock(&dev->vram_limit.vram_limit_lock);
++
++ return ret;
++}
++
++void kfd_unreserve_vram_limit(struct kfd_dev *dev, uint64_t size)
++{
++ spin_lock(&dev->vram_limit.vram_limit_lock);
++ dev->vram_limit.vram_used -= size;
++ spin_unlock(&dev->vram_limit.vram_limit_lock);
++
++ WARN_ON_ONCE(dev->vram_limit.vram_used < 0);
++}
++
+ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ struct kfd_process *p, void *data)
+ {
+@@ -1283,6 +1321,34 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ offset = kfd_get_process_doorbells(dev, p);
+ }
+
++ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
++ /* Check if the userptr corresponds to another (or third-party)
++ * device local memory. If so treat is as a doorbell. User
++ * space will be oblivious of this and will use this doorbell
++ * BO as a regular userptr BO
++ */
++ vma = find_vma(current->mm, args->mmap_offset);
++ if (vma && (vma->vm_flags & VM_IO)) {
++ unsigned long pfn;
++
++ follow_pfn(vma, args->mmap_offset, &pfn);
++ flags |= KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL;
++ flags &= ~KFD_IOC_ALLOC_MEM_FLAGS_USERPTR;
++ offset = (pfn << PAGE_SHIFT);
++ } else {
++ if (offset & (PAGE_SIZE - 1)) {
++ pr_debug("Unaligned userptr address:%llx\n",
++ offset);
++ return -EINVAL;
++ }
++ cpuva = offset;
++ }
++ } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
++ if (args->size != kfd_doorbell_process_slice(dev))
++ return -EINVAL;
++ offset = kfd_get_process_doorbells(dev, p);
++ }
++
+ mutex_lock(&p->mutex);
+
+ pdd = kfd_bind_process_to_device(dev, p);
+@@ -1291,13 +1357,19 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ goto err_unlock;
+ }
+
++ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
++ err = kfd_reserve_vram_limit(dev, args->size);
++ if (err)
++ goto err_unlock;
++ }
++
+ err = dev->kfd2kgd->alloc_memory_of_gpu(
+ dev->kgd, args->va_addr, args->size,
+ pdd->vm, NULL, (struct kgd_mem **) &mem, &offset,
+ flags);
+
+ if (err)
+- goto err_unlock;
++ goto err_vram_limit;
+
+ mem_type = flags & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM |
+ KFD_IOC_ALLOC_MEM_FLAGS_GTT |
+@@ -1319,6 +1391,9 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+
+ err_free:
+ dev->kfd2kgd->free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
++err_vram_limit:
++ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
++ kfd_unreserve_vram_limit(dev, args->size);
+ err_unlock:
+ mutex_unlock(&p->mutex);
+ return err;
+@@ -1359,10 +1434,17 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
+ /* If freeing the buffer failed, leave the handle in place for
+ * clean-up during process tear-down.
+ */
+- if (!ret)
+- kfd_process_device_remove_obj_handle(
+- pdd, GET_IDR_HANDLE(args->handle));
++ if (!ret) {
++ /* kfd_process_device_remove_obj_handle will free buf_obj.
++ * So do that last.
++ */
++ if (buf_obj->mem_type & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
++ kfd_unreserve_vram_limit(dev,
++ buf_obj->it.last - buf_obj->it.start + 1);
+
++ kfd_process_device_remove_obj_handle(
++ pdd, GET_IDR_HANDLE(args->handle));
++ }
+ err_unlock:
+ mutex_unlock(&p->mutex);
+ return ret;
+@@ -1379,6 +1461,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ int i;
+ uint32_t *devices_arr = NULL;
+
++ trace_kfd_map_memory_to_gpu_start(p);
+ dev = kfd_device_by_id(GET_GPU_ID(args->handle));
+ if (!dev)
+ return -EINVAL;
+@@ -1465,6 +1548,8 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+
+ kfree(devices_arr);
+
++ trace_kfd_map_memory_to_gpu_end(p,
++ args->n_devices * sizeof(*devices_arr), "Success");
+ return err;
+
+ bind_process_to_device_failed:
+@@ -1474,6 +1559,8 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ copy_from_user_failed:
+ sync_memory_failed:
+ kfree(devices_arr);
++ trace_kfd_map_memory_to_gpu_end(p,
++ args->n_devices * sizeof(*devices_arr), "Failed");
+
+ return err;
+ }
+@@ -2468,6 +2555,136 @@ static int kfd_ioctl_get_queue_wave_state(struct file *filep,
+ return r;
+ }
+
++static int kfd_ioctl_dbg_set_debug_trap(struct file *filep,
++ struct kfd_process *p, void *data)
++{
++ struct kfd_ioctl_dbg_trap_args *args = data;
++ struct kfd_process_device *pdd;
++ int r = 0;
++ struct kfd_dev *dev;
++ uint32_t gpu_id;
++ uint32_t debug_trap_action;
++ uint32_t data1;
++ uint32_t data2;
++
++ debug_trap_action = args->op;
++ gpu_id = args->gpu_id;
++ data1 = args->data1;
++ data2 = args->data2;
++
++ dev = kfd_device_by_id(args->gpu_id);
++ if (!dev)
++ return -EINVAL;
++
++ if (dev->device_info->asic_family < CHIP_VEGA10)
++ return -EINVAL;
++
++ if (dev->mec_fw_version < 406) {
++ pr_err("Unsupported firmware version [%i]\n",
++ dev->mec_fw_version);
++ return -EINVAL;
++ }
++
++ if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
++ pr_err("Unsupported sched_policy: %i", dev->dqm->sched_policy);
++ return -EINVAL;
++ }
++
++ mutex_lock(&p->mutex);
++ pdd = kfd_get_process_device_data(dev, p);
++ if (!pdd) {
++ r = -EINVAL;
++ goto unlock_out;
++ }
++
++ if ((pdd->is_debugging_enabled == false) &&
++ ((debug_trap_action == KFD_IOC_DBG_TRAP_ENABLE &&
++ data1 == 1) ||
++ (debug_trap_action ==
++ KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE &&
++ data1 != 0))) {
++
++ /* We need to reserve the debug trap vmid if we haven't yet, and
++ * are enabling trap debugging, or we are setting the wave
++ * launch mode to something other than normal==0.
++ */
++ r = reserve_debug_trap_vmid(dev->dqm);
++ if (r)
++ goto unlock_out;
++
++ pdd->is_debugging_enabled = true;
++ }
++
++ if (!pdd->is_debugging_enabled) {
++ pr_err("Debugging is not enabled for this device\n");
++ r = -EINVAL;
++ goto unlock_out;
++ }
++
++ switch (debug_trap_action) {
++ case KFD_IOC_DBG_TRAP_ENABLE:
++ switch (data1) {
++ case 0:
++ pdd->debug_trap_enabled = false;
++ r = dev->kfd2kgd->disable_debug_trap(dev->kgd);
++ break;
++ case 1:
++ pdd->debug_trap_enabled = true;
++ r = dev->kfd2kgd->enable_debug_trap(dev->kgd,
++ pdd->trap_debug_wave_launch_mode,
++ dev->vm_info.last_vmid_kfd);
++ break;
++ default:
++ pr_err("Invalid trap enable option: %i\n",
++ data1);
++ r = -EINVAL;
++ }
++ break;
++
++ case KFD_IOC_DBG_TRAP_SET_TRAP_DATA:
++ r = dev->kfd2kgd->set_debug_trap_data(dev->kgd,
++ data1,
++ data2);
++ break;
++
++ case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE:
++ r = dev->kfd2kgd->set_wave_launch_trap_override(
++ dev->kgd,
++ data1,
++ data2);
++ break;
++
++ case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE:
++ pdd->trap_debug_wave_launch_mode = data1;
++ r = dev->kfd2kgd->set_wave_launch_mode(
++ dev->kgd,
++ data1,
++ dev->vm_info.last_vmid_kfd);
++ break;
++ default:
++ pr_err("Invalid option: %i\n", debug_trap_action);
++ r = -EINVAL;
++ }
++
++ if (pdd->trap_debug_wave_launch_mode == 0 &&
++ !pdd->debug_trap_enabled) {
++ int result;
++
++ result = release_debug_trap_vmid(dev->dqm);
++ if (result) {
++ pr_err("Failed to release debug VMID\n");
++ r = result;
++ goto unlock_out;
++ }
++
++ pdd->is_debugging_enabled = false;
++ }
++
++unlock_out:
++ mutex_unlock(&p->mutex);
++ return r;
++}
++
+ #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
+ [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
+ .cmd_drv = 0, .name = #ioctl}
+@@ -2569,6 +2786,10 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE,
+ kfd_ioctl_get_queue_wave_state, 0),
++
++ AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_TRAP,
++ kfd_ioctl_dbg_set_debug_trap, 0),
++
+ };
+
+ #define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h
+index dde7bfb..fe5d7c2 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h
+@@ -119,6 +119,20 @@ union SQ_IND_CMD_BITS {
+ struct {
+ uint32_t data:32;
+ } bitfields, bits;
++ struct {
++ uint32_t cmd:3;
++ uint32_t:1;
++ uint32_t mode:3;
++ uint32_t check_vmid:1;
++ uint32_t data:3;
++ uint32_t:5;
++ uint32_t wave_id:4;
++ uint32_t simd_id:2;
++ uint32_t:2;
++ uint32_t queue_id:3;
++ uint32_t:1;
++ uint32_t vm_id:4;
++ } bitfields_sethalt, bits_sethalt;
+ uint32_t u32All;
+ signed int i32All;
+ float f32All;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index daa35c5..dd85838 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -30,6 +30,8 @@
+ #include "cwsr_trap_handler.h"
+ #include "kfd_iommu.h"
+
++uint64_t kfd_total_mem_size;
++
+ #define MQD_SIZE_ALIGNED 768
+
+ /*
+@@ -322,7 +324,6 @@ static const struct kfd_deviceid supported_devices[] = {
+ { 0x6928, &tonga_device_info }, /* Tonga */
+ { 0x6929, &tonga_device_info }, /* Tonga */
+ { 0x692B, &tonga_device_info }, /* Tonga */
+- { 0x692F, &tonga_vf_device_info }, /* Tonga vf */
+ { 0x6938, &tonga_device_info }, /* Tonga */
+ { 0x6939, &tonga_device_info }, /* Tonga */
+ { 0x7300, &fiji_device_info }, /* Fiji */
+@@ -401,6 +402,10 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ return NULL;
+ }
+
++ kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
++ if (!kfd)
++ return NULL;
++
+ /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
+ * 32 and 64-bit requests are possible and must be
+ * supported.
+@@ -465,6 +470,37 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
+ }
+ }
+
++static void kfd_vram_limit_init(struct kfd_dev *kfd)
++{
++ struct kfd_local_mem_info local_mem_info;
++ uint64_t vram_used;
++
++ spin_lock_init(&kfd->vram_limit.vram_limit_lock);
++
++ if (kfd_total_mem_size == 0) {
++ uint64_t mem;
++ struct sysinfo si;
++
++ si_meminfo(&si);
++ mem = si.totalram - si.totalhigh;
++ mem *= si.mem_unit;
++
++ kfd_total_mem_size += mem;
++ }
++
++ kfd->kfd2kgd->get_local_mem_info(kfd->kgd,
++ &local_mem_info);
++
++ vram_used = kfd->kfd2kgd->get_vram_usage(kfd->kgd);
++
++ kfd->vram_limit.max_vram_limit =
++ local_mem_info.local_mem_size_private +
++ local_mem_info.local_mem_size_public -
++ vram_used;
++
++ kfd_total_mem_size += kfd->vram_limit.max_vram_limit;
++}
++
+ bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ const struct kgd2kfd_shared_resources *gpu_resources)
+ {
+@@ -555,6 +591,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
+
+ kfd_cwsr_init(kfd);
+
++ kfd_vram_limit_init(kfd);
++
+ if (kfd_resume(kfd))
+ goto kfd_resume_error;
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 147dc01..0ed722c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -28,6 +28,7 @@
+ #include <linux/types.h>
+ #include <linux/bitops.h>
+ #include <linux/sched.h>
++#include <linux/sched/mm.h>
+ #include "kfd_priv.h"
+ #include "kfd_device_queue_manager.h"
+ #include "kfd_mqd_manager.h"
+@@ -123,6 +124,31 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
+ qpd->sh_mem_bases);
+ }
+
++bool check_if_queues_active(struct device_queue_manager *dqm,
++ struct qcm_process_device *qpd)
++{
++ bool busy = false;
++ struct queue *q;
++
++ mutex_lock(&dqm->lock);
++ list_for_each_entry(q, &qpd->queues_list, list) {
++ struct mqd_manager *mqd_mgr;
++ enum KFD_MQD_TYPE type;
++
++ type = get_mqd_type_from_queue_type(q->properties.type);
++ mqd_mgr = dqm->ops.get_mqd_manager(dqm, type);
++ if (!mqd_mgr || !mqd_mgr->check_queue_active)
++ continue;
++
++ busy = mqd_mgr->check_queue_active(q);
++ if (busy)
++ break;
++ }
++ mutex_unlock(&dqm->lock);
++
++ return busy;
++}
++
+ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
+ {
+ struct kfd_dev *dev = qpd->dqm->dev;
+@@ -387,8 +413,12 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
+ if (!q->properties.is_active)
+ return 0;
+
+- retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
+- &q->properties, q->process->mm);
++ if (WARN(q->process->mm != current->mm,
++ "should only run in user thread"))
++ retval = -EFAULT;
++ else
++ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
++ &q->properties, current->mm);
+ if (retval)
+ goto out_uninit_mqd;
+
+@@ -545,9 +575,15 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ retval = map_queues_cpsch(dqm);
+ else if (q->properties.is_active &&
+ (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
+- q->properties.type == KFD_QUEUE_TYPE_SDMA))
+- retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
+- &q->properties, q->process->mm);
++ q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
++ if (WARN(q->process->mm != current->mm,
++ "should only run in user thread"))
++ retval = -EFAULT;
++ else
++ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
++ q->pipe, q->queue,
++ &q->properties, current->mm);
++ }
+
+ out_unlock:
+ mutex_unlock(&dqm->lock);
+@@ -653,6 +689,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
+ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+ {
++ struct mm_struct *mm = NULL;
+ struct queue *q;
+ struct mqd_manager *mqd_mgr;
+ struct kfd_process_device *pdd;
+@@ -686,6 +723,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
+ kfd_flush_tlb(pdd);
+ }
+
++ /* Take a safe reference to the mm_struct, which may otherwise
++ * disappear even while the kfd_process is still referenced.
++ */
++ mm = get_task_mm(pdd->process->lead_thread);
++ if (!mm) {
++ retval = -EFAULT;
++ goto out;
++ }
++
+ /* activate all active queues on the qpd */
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ if (!q->properties.is_evicted)
+@@ -700,14 +746,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
+ q->properties.is_evicted = false;
+ q->properties.is_active = true;
+ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
+- q->queue, &q->properties,
+- q->process->mm);
++ q->queue, &q->properties, mm);
+ if (retval)
+ goto out;
+ dqm->queue_count++;
+ }
+ qpd->evicted = 0;
+ out:
++ if (mm)
++ mmput(mm);
+ mutex_unlock(&dqm->lock);
+ return retval;
+ }
+@@ -858,6 +905,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
+ INIT_LIST_HEAD(&dqm->queues);
+ dqm->queue_count = dqm->next_pipe_to_allocate = 0;
+ dqm->sdma_queue_count = 0;
++ dqm->trap_debug_vmid = 0;
+
+ for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
+ int pipe_offset = pipe * get_queues_per_pipe(dqm);
+@@ -1026,7 +1074,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
+ dqm->sdma_queue_count = 0;
+ dqm->active_runlist = false;
+ dqm->sdma_bitmap = (1 << get_num_sdma_queues(dqm)) - 1;
+-
++ dqm->trap_debug_vmid = 0;
+ INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
+
+ return 0;
+@@ -1779,6 +1827,97 @@ static void kfd_process_hw_exception(struct work_struct *work)
+ dqm->dev->kfd2kgd->gpu_recover(dqm->dev->kgd);
+ }
+
++/*
++ * Reserves a vmid for the trap debugger
++ */
++int reserve_debug_trap_vmid(struct device_queue_manager *dqm)
++{
++ int r;
++ int updated_vmid_mask;
++
++ if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
++ pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy);
++ return -EINVAL;
++ }
++
++ mutex_lock(&dqm->lock);
++
++ if (dqm->trap_debug_vmid != 0) {
++ pr_err("Trap debug id already reserved\n");
++ r = -EINVAL;
++ goto out_unlock;
++ }
++
++ r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
++ if (r)
++ goto out_unlock;
++
++ updated_vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
++ updated_vmid_mask &= ~(1 << dqm->dev->vm_info.last_vmid_kfd);
++
++ dqm->dev->shared_resources.compute_vmid_bitmap = updated_vmid_mask;
++ dqm->trap_debug_vmid = dqm->dev->vm_info.last_vmid_kfd;
++ r = set_sched_resources(dqm);
++ if (r)
++ goto out_unlock;
++
++ r = map_queues_cpsch(dqm);
++ if (r)
++ goto out_unlock;
++
++ pr_debug("Reserved VMID for trap debug: %i\n", dqm->trap_debug_vmid);
++out_unlock:
++ mutex_unlock(&dqm->lock);
++ return r;
++}
++
++/*
++ * Releases vmid for the trap debugger
++ */
++int release_debug_trap_vmid(struct device_queue_manager *dqm)
++{
++ int r;
++ int updated_vmid_mask;
++ uint32_t trap_debug_vmid;
++
++ if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
++ pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy);
++ return -EINVAL;
++ }
++
++ mutex_lock(&dqm->lock);
++ trap_debug_vmid = dqm->trap_debug_vmid;
++ if (dqm->trap_debug_vmid == 0) {
++ pr_err("Trap debug id is not reserved\n");
++ r = -EINVAL;
++ goto out_unlock;
++ }
++
++ r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
++ if (r)
++ goto out_unlock;
++
++ updated_vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
++ updated_vmid_mask |= (1 << dqm->dev->vm_info.last_vmid_kfd);
++
++ dqm->dev->shared_resources.compute_vmid_bitmap = updated_vmid_mask;
++ dqm->trap_debug_vmid = 0;
++ r = set_sched_resources(dqm);
++ if (r)
++ goto out_unlock;
++
++ r = map_queues_cpsch(dqm);
++ if (r)
++ goto out_unlock;
++
++ pr_debug("Released VMID for trap debug: %i\n", trap_debug_vmid);
++
++out_unlock:
++ mutex_unlock(&dqm->lock);
++ return r;
++}
++
++
+ #if defined(CONFIG_DEBUG_FS)
+
+ static void seq_reg_dump(struct seq_file *m,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+index 1c4ef00..4c22738 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+@@ -194,6 +194,7 @@ struct device_queue_manager {
+ struct kfd_mem_obj *fence_mem;
+ bool active_runlist;
+ int sched_policy;
++ uint32_t trap_debug_vmid;
+
+ /* hw exception */
+ bool is_hws_hang;
+@@ -216,6 +217,10 @@ unsigned int get_queues_num(struct device_queue_manager *dqm);
+ unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
+ unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
+ unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
++bool check_if_queues_active(struct device_queue_manager *dqm,
++ struct qcm_process_device *qpd);
++int reserve_debug_trap_vmid(struct device_queue_manager *dqm);
++int release_debug_trap_vmid(struct device_queue_manager *dqm);
+
+ static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
+ {
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+index ebe79bf..de53666 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+@@ -126,10 +126,91 @@ void kfd_doorbell_fini(struct kfd_dev *kfd)
+ iounmap(kfd->doorbell_kernel_ptr);
+ }
+
++static int kfd_doorbell_vm_fault(struct vm_fault *vmf)
++{
++ struct kfd_process *process = vmf->vma->vm_private_data;
++
++ pr_debug("Process %d doorbell vm page fault\n", process->pasid);
++
++ kfd_process_remap_doorbells_locked(process);
++
++ kfd_process_schedule_restore(process);
++
++ return VM_FAULT_NOPAGE;
++}
++
++static const struct vm_operations_struct kfd_doorbell_vm_ops = {
++ .fault = kfd_doorbell_vm_fault,
++};
++
++void kfd_doorbell_unmap_locked(struct kfd_process_device *pdd)
++{
++ struct kfd_process *process = pdd->process;
++ struct vm_area_struct *vma;
++ size_t size;
++
++ vma = pdd->qpd.doorbell_vma;
++ /* If process is evicted before queue is created
++ * doorbell is not mapped to user space yet
++ */
++ if (!vma || !pdd->qpd.queue_count) {
++ pdd->qpd.doorbell_mapped = -1;
++ return;
++ }
++
++ pr_debug("Process %d unmapping doorbell 0x%lx\n",
++ process->pasid, vma->vm_start);
++
++ size = kfd_doorbell_process_slice(pdd->dev);
++ zap_vma_ptes(vma, vma->vm_start, size);
++ pdd->qpd.doorbell_mapped = 0;
++}
++
++void kfd_doorbell_unmap(struct kfd_process_device *pdd)
++{
++ mutex_lock(&pdd->qpd.doorbell_lock);
++ kfd_doorbell_unmap_locked(pdd);
++ mutex_unlock(&pdd->qpd.doorbell_lock);
++}
++
++int kfd_doorbell_remap(struct kfd_process_device *pdd)
++{
++ struct kfd_process *process = pdd->process;
++ phys_addr_t address;
++ struct vm_area_struct *vma;
++ size_t size;
++ int ret = 0;
++
++ mutex_lock(&pdd->qpd.doorbell_lock);
++ if (pdd->qpd.doorbell_mapped != 0)
++ goto out_unlock;
++
++ /* Calculate physical address of doorbell */
++ address = kfd_get_process_doorbells(pdd->dev, process);
++ vma = pdd->qpd.doorbell_vma;
++ size = kfd_doorbell_process_slice(pdd->dev);
++
++ pr_debug("Process %d remap doorbell 0x%lx\n", process->pasid,
++ vma->vm_start);
++
++ ret = vm_iomap_memory(vma, address, size);
++ if (ret)
++ pr_err("Process %d failed to remap doorbell 0x%lx\n",
++ process->pasid, vma->vm_start);
++
++out_unlock:
++ pdd->qpd.doorbell_mapped = 1;
++ mutex_unlock(&pdd->qpd.doorbell_lock);
++
++ return ret;
++}
++
+ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
+ struct vm_area_struct *vma)
+ {
+ phys_addr_t address;
++ struct kfd_process_device *pdd;
++ int ret;
+
+ /*
+ * For simplicitly we only allow mapping of the entire doorbell
+@@ -146,20 +227,47 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+- pr_debug("Mapping doorbell page\n"
++ pr_debug("Process %d mapping doorbell page\n"
+ " target user address == 0x%08llX\n"
+ " physical address == 0x%08llX\n"
+ " vm_flags == 0x%04lX\n"
+ " size == 0x%04lX\n",
+- (unsigned long long) vma->vm_start, address, vma->vm_flags,
+- kfd_doorbell_process_slice(dev));
++ process->pasid, (unsigned long long) vma->vm_start,
++ address, vma->vm_flags, kfd_doorbell_process_slice(dev));
++
++ pdd = kfd_get_process_device_data(dev, process);
++ if (WARN_ON_ONCE(!pdd))
++ return 0;
+
++ mutex_lock(&pdd->qpd.doorbell_lock);
+
+- return io_remap_pfn_range(vma,
++ ret = io_remap_pfn_range(vma,
+ vma->vm_start,
+ address >> PAGE_SHIFT,
+ kfd_doorbell_process_slice(dev),
+ vma->vm_page_prot);
++
++ if (!ret && keep_idle_process_evicted) {
++ vma->vm_ops = &kfd_doorbell_vm_ops;
++ vma->vm_private_data = process;
++ pdd->qpd.doorbell_vma = vma;
++
++ /* If process is evicted before the first queue is created,
++ * process will be restored by the page fault when the
++ * doorbell is accessed the first time
++ */
++ if (pdd->qpd.doorbell_mapped == -1) {
++ pr_debug("Process %d evicted, unmapping doorbell\n",
++ process->pasid);
++ kfd_doorbell_unmap_locked(pdd);
++ } else {
++ pdd->qpd.doorbell_mapped = 1;
++ }
++ }
++
++ mutex_unlock(&pdd->qpd.doorbell_lock);
++
++ return ret;
+ }
+
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+index 7a61f38..0149475 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+@@ -62,9 +62,20 @@ int kfd_iommu_device_init(struct kfd_dev *kfd)
+ struct amd_iommu_device_info iommu_info;
+ unsigned int pasid_limit;
+ int err;
++ struct kfd_topology_device *top_dev;
+
+- if (!kfd->device_info->needs_iommu_device)
++ top_dev = kfd_topology_device_by_id(kfd->id);
++
++ /*
++ * Overwrite ATS capability according to needs_iommu_device to fix
++ * potential missing corresponding bit in CRAT of BIOS.
++ */
++ if (!kfd->device_info->needs_iommu_device) {
++ top_dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT;
+ return 0;
++ }
++
++ top_dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
+
+ iommu_info.flags = 0;
+ err = amd_iommu_device_info(kfd->pdev, &iommu_info);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
+index 384d7a3..02366f9 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
+@@ -48,6 +48,12 @@
+ * pending write pointer to that location so subsequent calls to
+ * acquire_packet_buffer will get a correct write pointer
+ *
++ * @acquire_inline_ib: Returns a pointer to the location in the kernel
++ * queue ring buffer where the calling function can write an inline IB. It is
++ * Guaranteed that there is enough space for that IB. It also updates the
++ * pending write pointer to that location so subsequent calls to
++ * acquire_packet_buffer will get a correct write pointer
++ *
+ * @submit_packet: Update the write pointer and doorbell of a kernel queue.
+ *
+ * @sync_with_hw: Wait until the write pointer and the read pointer of a kernel
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+index 684a3bf..0357062 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+@@ -73,6 +73,7 @@ static int pm_map_process_v9(struct packet_manager *pm,
+ struct pm4_mes_map_process *packet;
+ uint64_t vm_page_table_base_addr =
+ (uint64_t)(qpd->page_table_base) << 12;
++ struct kfd_dev *kfd = pm->dqm->dev;
+
+ packet = (struct pm4_mes_map_process *)buffer;
+ memset(buffer, 0, sizeof(struct pm4_mes_map_process));
+@@ -88,6 +89,11 @@ static int pm_map_process_v9(struct packet_manager *pm,
+ packet->bitfields14.sdma_enable = 1;
+ packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
+
++ if (kfd->dqm->trap_debug_vmid) {
++ packet->bitfields2.debug_vmid = kfd->dqm->trap_debug_vmid;
++ packet->bitfields2.new_debug = 1;
++ }
++
+ packet->sh_mem_config = qpd->sh_mem_config;
+ packet->sh_mem_bases = qpd->sh_mem_bases;
+ packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+index 43cecf2..f6d9b8c 100755
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+@@ -67,15 +67,22 @@ static int kfd_init(void)
+ if (err < 0)
+ goto err_topology;
+
++ err = kfd_ipc_init();
++ if (err < 0)
++ goto err_ipc;
++
+ err = kfd_process_create_wq();
+ if (err < 0)
+ goto err_create_wq;
+
++ kfd_init_peer_direct();
++
+ kfd_debugfs_init();
+
+ return 0;
+
+ err_create_wq:
++err_ipc:
+ kfd_topology_shutdown();
+ err_topology:
+ kfd_chardev_exit();
+@@ -86,6 +93,7 @@ static int kfd_init(void)
+ static void kfd_exit(void)
+ {
+ kfd_debugfs_fini();
++ kfd_close_peer_direct();
+ kfd_process_destroy_wq();
+ kfd_topology_shutdown();
+ kfd_chardev_exit();
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+index 1fcf0c4..9657489 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+@@ -136,6 +136,16 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ m->compute_static_thread_mgmt_se3);
+ }
+
++static void set_priority(struct v9_mqd *m, struct queue_properties *q)
++{
++ m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
++ m->cp_hqd_queue_priority = q->priority;
++ m->compute_pgm_rsrc1 = (m->compute_pgm_rsrc1 &
++ (~COMPUTE_PGM_RSRC1__PRIORITY_MASK)) |
++ (spi_priority_map[q->priority] <<
++ COMPUTE_PGM_RSRC1__PRIORITY__SHIFT);
++}
++
+ static int init_mqd(struct mqd_manager *mm, void **mqd,
+ struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+ struct queue_properties *q)
+@@ -189,9 +199,6 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
+ 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
+ 10 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
+
+- m->cp_hqd_pipe_priority = 1;
+- m->cp_hqd_queue_priority = 15;
+-
+ if (q->format == KFD_QUEUE_FORMAT_AQL) {
+ m->cp_hqd_aql_control =
+ 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
+@@ -297,8 +304,7 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
+ m->cp_hqd_ctx_save_control = 0;
+
+ update_cu_mask(mm, mqd, q);
+-
+- update_cu_mask(mm, mqd, q);
++ set_priority(m, q);
+
+ q->is_active = (q->queue_size > 0 &&
+ q->queue_address != 0 &&
+@@ -318,8 +324,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
+ (mm->dev->kgd, mqd, type, timeout,
+ pipe_id, queue_id);
+ }
+-
+-static void uninit_mqd(struct mqd_manager *mm, void *mqd,
++ void uninit_mqd(struct mqd_manager *mm, void *mqd,
+ struct kfd_mem_obj *mqd_mem_obj)
+ {
+ struct kfd_dev *kfd = mm->dev;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
+index ddad9be..29c2476 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
+@@ -144,10 +144,13 @@ struct pm4_mes_map_process {
+
+ union {
+ struct {
+- uint32_t pasid:16;
+- uint32_t reserved1:8;
+- uint32_t diq_enable:1;
+- uint32_t process_quantum:7;
++ uint32_t pasid:16; /* 0 - 15 */
++ uint32_t reserved1:2; /* 16 - 17 */
++ uint32_t debug_vmid:4; /* 18 - 21 */
++ uint32_t new_debug:1; /* 22 */
++ uint32_t tmz:1; /* 23 */
++ uint32_t diq_enable:1; /* 24 */
++ uint32_t process_quantum:7; /* 25 - 31 */
+ } bitfields2;
+ uint32_t ordinal2;
+ };
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index b3f3e86..427ceae 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -151,10 +151,25 @@ extern int noretry;
+ extern int priv_cp_queues;
+
+ /*
++ * Enable privileged mode for all CP queues including user queues
++ */
++extern int priv_cp_queues;
++
++/*
+ * Halt if HWS hang is detected
+ */
+ extern int halt_if_hws_hang;
+
++/*
++ * Restore evicted process only if queues are active
++ */
++extern bool keep_idle_process_evicted;
++
++/* An accumulated available system RAM and
++ * VRAM size of all GPUs
++ */
++extern uint64_t kfd_total_mem_size;
++
+ enum cache_policy {
+ cache_policy_coherent,
+ cache_policy_noncoherent
+@@ -201,6 +216,12 @@ struct kfd_vmid_info {
+ uint32_t vmid_num_kfd;
+ };
+
++struct kfd_vram_limit {
++ uint64_t max_vram_limit;
++ int64_t vram_used;
++ spinlock_t vram_limit_lock;
++};
++
+ struct kfd_dev {
+ struct kgd_dev *kgd;
+
+@@ -270,6 +291,9 @@ struct kfd_dev {
+ unsigned int cwsr_isa_size;
+
+ bool pci_atomic_requested;
++
++ /* VRAM limit */
++ struct kfd_vram_limit vram_limit;
+ };
+
+ struct kfd_ipc_obj;
+@@ -335,6 +359,7 @@ enum kfd_mempool {
+ int kfd_chardev_init(void);
+ void kfd_chardev_exit(void);
+ struct device *kfd_chardev(void);
++void kfd_unreserve_vram_limit(struct kfd_dev *dev, uint64_t size);
+
+ /**
+ * enum kfd_unmap_queues_filter
+@@ -598,6 +623,17 @@ struct qcm_process_device {
+
+ /*doorbell resources per process per device*/
+ unsigned long *doorbell_bitmap;
++ /* doorbell user mmap vma */
++ struct vm_area_struct *doorbell_vma;
++ /* lock to serialize doorbell unmap and remap */
++ struct mutex doorbell_lock;
++
++ /* Indicate if doorbell is mapped or unmapped
++ * -1 means doorbells need to be unmapped because queue is evicted
++ * 0 means doorbells are unmapped
++ * 1 means doorbells are mapped
++ */
++ int doorbell_mapped;
+ };
+
+ /* KFD Memory Eviction */
+@@ -615,6 +651,9 @@ int kfd_process_evict_queues(struct kfd_process *p);
+ int kfd_process_restore_queues(struct kfd_process *p);
+
+
++void kfd_process_schedule_restore(struct kfd_process *p);
++int kfd_process_remap_doorbells_locked(struct kfd_process *p);
++
+ /* 8 byte handle containing GPU ID in the most significant 4 bytes and
+ * idr_handle in the least significant 4 bytes
+ */
+@@ -668,6 +707,22 @@ struct kfd_process_device {
+ */
+ bool already_dequeued;
+
++ /* Flag to indicate if debugging is active on this device for this
++ * process. This is for the new GFX9+ debugging, and indicates that
++ * any of the debug features are enabled, ie: wave launch mode,
++ * address watch, or trap debug. It also indicates that a debug
++ * VMID has been allocated.
++ */
++ bool is_debugging_enabled;
++
++ /* Flag to indicate if trap debugging is active on this device for
++ * this process. This is for the GFX9_ debugging features
++ */
++ bool debug_trap_enabled;
++
++ /* Value of the wave launch mode if debugging is enabled */
++ uint32_t trap_debug_wave_launch_mode;
++
+ /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
+ enum kfd_pdd_bound bound;
+ };
+@@ -752,6 +807,7 @@ struct kfd_process {
+ * restored after an eviction
+ */
+ unsigned long last_restore_timestamp;
++ unsigned long last_evict_timestamp;
+ };
+
+ #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
+@@ -839,6 +895,8 @@ int kfd_doorbell_init(struct kfd_dev *kfd);
+ void kfd_doorbell_fini(struct kfd_dev *kfd);
+ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
+ struct vm_area_struct *vma);
++void kfd_doorbell_unmap(struct kfd_process_device *pdd);
++int kfd_doorbell_remap(struct kfd_process_device *pdd);
+ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
+ unsigned int *doorbell_off);
+ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr);
+@@ -869,6 +927,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu);
+ int kfd_topology_remove_device(struct kfd_dev *gpu);
+ struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
+ uint32_t proximity_domain);
++struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
+ struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
+ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
+ struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd);
+@@ -1072,6 +1131,13 @@ void kfd_close_peer_direct(void);
+ /* IPC Support */
+ int kfd_ipc_init(void);
+
++/* PeerDirect support */
++void kfd_init_peer_direct(void);
++void kfd_close_peer_direct(void);
++
++/* IPC Support */
++int kfd_ipc_init(void);
++
+ /* Debugfs */
+ #if defined(CONFIG_DEBUG_FS)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 304c116..7b9e587 100755
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -34,8 +34,6 @@
+ #include <asm/page.h>
+ #include "kfd_ipc.h"
+
+-struct mm_struct;
+-
+ #include "kfd_priv.h"
+ #include "kfd_device_queue_manager.h"
+ #include "kfd_dbgmgr.h"
+@@ -332,6 +330,10 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
+ run_rdma_free_callback(buf_obj);
+ pdd->dev->kfd2kgd->free_memory_of_gpu(pdd->dev->kgd,
+ buf_obj->mem);
++ if (buf_obj->mem_type & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
++ kfd_unreserve_vram_limit(pdd->dev,
++ buf_obj->it.last - buf_obj->it.start + 1);
++
+ kfd_process_device_remove_obj_handle(pdd, id);
+ }
+ }
+@@ -369,7 +371,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
+
+ kfree(pdd->qpd.doorbell_bitmap);
+ idr_destroy(&pdd->alloc_idr);
+-
++ mutex_destroy(&pdd->qpd.doorbell_lock);
+ kfree(pdd);
+ }
+ }
+@@ -602,6 +604,11 @@ static struct kfd_process *create_process(const struct task_struct *thread,
+ if (err)
+ goto err_init_cwsr;
+
++ /* If PeerDirect interface was not detected try to detect it again
++ * in case if network driver was loaded later.
++ */
++ kfd_init_peer_direct();
++
+ return process;
+
+ err_init_cwsr:
+@@ -682,9 +689,13 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
+ pdd->qpd.dqm = dev->dqm;
+ pdd->qpd.pqm = &p->pqm;
+ pdd->qpd.evicted = 0;
++ mutex_init(&pdd->qpd.doorbell_lock);
+ pdd->process = p;
+ pdd->bound = PDD_UNBOUND;
+ pdd->already_dequeued = false;
++ pdd->is_debugging_enabled = false;
++ pdd->debug_trap_enabled = false;
++ pdd->trap_debug_wave_launch_mode = 0;
+ list_add(&pdd->per_device_list, &p->per_device_data);
+
+ /* Init idr used for memory handle translation */
+@@ -1015,6 +1026,95 @@ int kfd_process_restore_queues(struct kfd_process *p)
+ return ret;
+ }
+
++void kfd_process_schedule_restore(struct kfd_process *p)
++{
++ int ret;
++ unsigned long evicted_jiffies;
++ unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_RESTORE_TIME_MS);
++
++ /* wait at least PROCESS_RESTORE_TIME_MS before attempting to restore
++ */
++ evicted_jiffies = get_jiffies_64() - p->last_evict_timestamp;
++ if (delay_jiffies > evicted_jiffies)
++ delay_jiffies -= evicted_jiffies;
++ else
++ delay_jiffies = 0;
++
++ pr_debug("Process %d schedule restore work\n", p->pasid);
++ ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
++ delay_jiffies);
++ WARN(!ret, "Schedule restore work failed\n");
++}
++
++static void kfd_process_unmap_doorbells(struct kfd_process *p)
++{
++ struct kfd_process_device *pdd;
++ struct mm_struct *mm = p->mm;
++
++ down_write(&mm->mmap_sem);
++
++ list_for_each_entry(pdd, &p->per_device_data, per_device_list)
++ kfd_doorbell_unmap(pdd);
++
++ up_write(&mm->mmap_sem);
++}
++
++int kfd_process_remap_doorbells_locked(struct kfd_process *p)
++{
++ struct kfd_process_device *pdd;
++ int ret = 0;
++
++ list_for_each_entry(pdd, &p->per_device_data, per_device_list)
++ ret = kfd_doorbell_remap(pdd);
++
++ return ret;
++}
++
++static int kfd_process_remap_doorbells(struct kfd_process *p)
++{
++ struct mm_struct *mm = p->mm;
++ int ret = 0;
++
++ down_write(&mm->mmap_sem);
++ ret = kfd_process_remap_doorbells_locked(p);
++ up_write(&mm->mmap_sem);
++
++ return ret;
++}
++
++/**
++ * kfd_process_unmap_doorbells_if_idle - Check if queues are active
++ *
++ * Returns true if queues are idle, and unmap doorbells.
++ * Returns false if queues are active
++ */
++static bool kfd_process_unmap_doorbells_if_idle(struct kfd_process *p)
++{
++ struct kfd_process_device *pdd;
++ bool busy = false;
++
++ if (!keep_idle_process_evicted)
++ return false;
++
++ /* Unmap doorbell first to avoid race conditions. Otherwise while the
++ * second queue is checked, the first queue may get more work, but we
++ * won't detect that since it has been checked
++ */
++ kfd_process_unmap_doorbells(p);
++
++ list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
++ busy = check_if_queues_active(pdd->qpd.dqm, &pdd->qpd);
++ if (busy)
++ break;
++ }
++
++ /* Remap doorbell if process queue is not idle */
++ if (busy)
++ kfd_process_remap_doorbells(p);
++
++ return !busy;
++}
++
+ static void evict_process_worker(struct work_struct *work)
+ {
+ int ret;
+@@ -1027,6 +1127,7 @@ static void evict_process_worker(struct work_struct *work)
+ * lifetime of this thread, kfd_process p will be valid
+ */
+ p = container_of(dwork, struct kfd_process, eviction_work);
++ //trace_kfd_evict_process_worker_start(p);
+ WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
+ "Eviction fence mismatch\n");
+
+@@ -1037,6 +1138,7 @@ static void evict_process_worker(struct work_struct *work)
+ * previous restore work to complete
+ */
+ flush_delayed_work(&p->restore_work);
++ p->last_evict_timestamp = get_jiffies_64();
+
+ pr_info("Started evicting pasid %d\n", p->pasid);
+ ret = kfd_process_evict_queues(p);
+@@ -1046,10 +1148,15 @@ static void evict_process_worker(struct work_struct *work)
+ p->ef = NULL;
+ queue_delayed_work(kfd_restore_wq, &p->restore_work,
+ msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
+-
++ if (!kfd_process_unmap_doorbells_if_idle(p))
++ kfd_process_schedule_restore(p);
++ else
++ pr_debug("Process %d queues idle, doorbell unmapped\n",
++ p->pasid);
+ pr_info("Finished evicting pasid %d\n", p->pasid);
+ } else
+ pr_err("Failed to evict queues of pasid %d\n", p->pasid);
++ //trace_kfd_evict_process_worker_end(p, ret ? "Failed" : "Success");
+ }
+
+ static void restore_process_worker(struct work_struct *work)
+@@ -1065,6 +1172,7 @@ static void restore_process_worker(struct work_struct *work)
+ * lifetime of this thread, kfd_process p will be valid
+ */
+ p = container_of(dwork, struct kfd_process, restore_work);
++ //trace_kfd_restore_process_worker_start(p);
+
+ /* Call restore_process_bos on the first KGD device. This function
+ * takes care of restoring the whole process including other devices.
+@@ -1096,10 +1204,14 @@ static void restore_process_worker(struct work_struct *work)
+ ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
+ msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
+ WARN(!ret, "reschedule restore work failed\n");
++ /*kfd_restore_process_worker_end(p, ret ?
++ "Rescheduled restore" :
++ "Failed to reschedule restore");*/
+ return;
+ }
+
+ ret = kfd_process_restore_queues(p);
++ //kfd_restore_process_worker_end(p, ret ? "Failed" : "Success");
+ if (!ret)
+ pr_info("Finished restoring pasid %d\n", p->pasid);
+ else
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+index 7702156..3be2346 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -63,22 +63,33 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
+ return device;
+ }
+
+-struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
++struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id)
+ {
+- struct kfd_topology_device *top_dev;
+- struct kfd_dev *device = NULL;
++ struct kfd_topology_device *top_dev = NULL;
++ struct kfd_topology_device *ret = NULL;
+
+ down_read(&topology_lock);
+
+ list_for_each_entry(top_dev, &topology_device_list, list)
+ if (top_dev->gpu_id == gpu_id) {
+- device = top_dev->gpu;
++ ret = top_dev;
+ break;
+ }
+
+ up_read(&topology_lock);
+
+- return device;
++ return ret;
++}
++
++struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
++{
++ struct kfd_topology_device *top_dev;
++
++ top_dev = kfd_topology_device_by_id(gpu_id);
++ if (!top_dev)
++ return NULL;
++
++ return top_dev->gpu;
+ }
+
+ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
+@@ -890,7 +901,7 @@ static void kfd_debug_print_topology(void)
+ }
+ up_read(&topology_lock);
+ }
+-
++#ifdef CONFIG_ACPI
+ /* Helper function for intializing platform_xx members of
+ * kfd_system_properties. Uses OEM info from the last CPU/APU node.
+ */
+@@ -909,6 +920,7 @@ static void kfd_update_system_properties(void)
+ }
+ up_read(&topology_lock);
+ }
++#endif
+
+ static void find_system_memory(const struct dmi_header *dm,
+ void *private)
+@@ -1333,9 +1345,10 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
+ case CHIP_VEGA10:
+ case CHIP_VEGA20:
+ case CHIP_RAVEN:
+- dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
+- HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
+- HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
++ dev->node_props.capability |= HSA_CAP_TRAP_DEBUG_SUPPORT |
++ HSA_CAP_TRAP_DEBUG_TRAP_DATA_COUNT |
++ HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_TRAP_OVERRIDE_SUPPORTED |
++ HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_MODE_SUPPORTED;
+ break;
+ default:
+ WARN(1, "Unexpected ASIC family %u",
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+index 2b36baf..c90c287 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+@@ -47,6 +47,10 @@
+ #define HSA_CAP_DOORBELL_TYPE_1_0 0x1
+ #define HSA_CAP_DOORBELL_TYPE_2_0 0x2
+ #define HSA_CAP_AQL_QUEUE_DOUBLE_MAP 0x00004000
++#define HSA_CAP_TRAP_DEBUG_SUPPORT 0x02000000
++#define HSA_CAP_TRAP_DEBUG_TRAP_DATA_COUNT 0x08000000
++#define HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_TRAP_OVERRIDE_SUPPORTED 0x10000000
++#define HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_MODE_SUPPORTED 0x20000000
+
+ struct kfd_node_properties {
+ uint32_t cpu_cores_count;
+@@ -118,7 +122,10 @@ struct kfd_cache_properties {
+ uint32_t cache_type;
+ uint8_t sibling_map[CRAT_SIBLINGMAP_SIZE];
+ struct kobject *kobj;
+- struct attribute attr;
++ struct kfd_dev *gpu;
++ struct attribute attr_props;
++ struct attribute attr_used;
++ struct attribute attr;
+ };
+
+ struct kfd_iolink_properties {
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_trace.c b/drivers/gpu/drm/amd/amdkfd/kfd_trace.c
+new file mode 100644
+index 0000000..805a1da
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_trace.c
+@@ -0,0 +1,26 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++
++#define CREATE_TRACE_POINTS
++#include "kfd_trace.h"
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_trace.h b/drivers/gpu/drm/amd/amdkfd/kfd_trace.h
+new file mode 100644
+index 0000000..345cded
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_trace.h
+@@ -0,0 +1,151 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#if !defined(_AMDKFD_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _KFD_TRACE_H_
++
++
++#include <linux/stringify.h>
++#include <linux/types.h>
++#include <linux/tracepoint.h>
++
++#include "kfd_priv.h"
++#include <linux/kfd_ioctl.h>
++
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM amdkfd
++#define TRACE_INCLUDE_FILE kfd_trace
++
++
++TRACE_EVENT(kfd_map_memory_to_gpu_start,
++ TP_PROTO(struct kfd_process *p),
++ TP_ARGS(p),
++ TP_STRUCT__entry(
++ __field(unsigned int, pasid)
++ ),
++ TP_fast_assign(
++ __entry->pasid = p->pasid;
++ ),
++ TP_printk("pasid =%u", __entry->pasid)
++);
++
++
++TRACE_EVENT(kfd_map_memory_to_gpu_end,
++ TP_PROTO(struct kfd_process *p, u32 array_size, char *pStatusMsg),
++ TP_ARGS(p, array_size, pStatusMsg),
++ TP_STRUCT__entry(
++ __field(unsigned int, pasid)
++ __field(unsigned int, array_size)
++ __string(pStatusMsg, pStatusMsg)
++ ),
++ TP_fast_assign(
++ __entry->pasid = p->pasid;
++ __entry->array_size = array_size;
++ __assign_str(pStatusMsg, pStatusMsg);
++ ),
++ TP_printk("pasid = %u, array_size = %u, StatusMsg=%s",
++ __entry->pasid,
++ __entry->array_size,
++ __get_str(pStatusMsg))
++);
++
++
++TRACE_EVENT(kfd_kgd2kfd_schedule_evict_and_restore_process,
++ TP_PROTO(struct kfd_process *p, u32 delay_jiffies),
++ TP_ARGS(p, delay_jiffies),
++ TP_STRUCT__entry(
++ __field(unsigned int, pasid)
++ __field(unsigned int, delay_jiffies)
++ ),
++ TP_fast_assign(
++ __entry->pasid = p->pasid;
++ __entry->delay_jiffies = delay_jiffies;
++ ),
++ TP_printk("pasid = %u, delay_jiffies = %u",
++ __entry->pasid,
++ __entry->delay_jiffies)
++);
++
++
++TRACE_EVENT(kfd_evict_process_worker_start,
++ TP_PROTO(struct kfd_process *p),
++ TP_ARGS(p),
++ TP_STRUCT__entry(
++ __field(unsigned int, pasid)
++ ),
++ TP_fast_assign(
++ __entry->pasid = p->pasid;
++ ),
++ TP_printk("pasid=%u", __entry->pasid)
++);
++
++
++TRACE_EVENT(kfd_evict_process_worker_end,
++ TP_PROTO(struct kfd_process *p, char *pStatusMsg),
++ TP_ARGS(p, pStatusMsg),
++ TP_STRUCT__entry(
++ __field(unsigned int, pasid)
++ __string(pStatusMsg, pStatusMsg)
++ ),
++ TP_fast_assign(
++ __entry->pasid = p->pasid;
++ __assign_str(pStatusMsg, pStatusMsg);
++ ),
++ TP_printk("pasid=%u, StatusMsg=%s",
++ __entry->pasid, __get_str(pStatusMsg))
++);
++
++
++TRACE_EVENT(kfd_restore_process_worker_start,
++ TP_PROTO(struct kfd_process *p),
++ TP_ARGS(p),
++ TP_STRUCT__entry(
++ __field(unsigned int, pasid)
++ ),
++ TP_fast_assign(
++ __entry->pasid = p->pasid;
++ ),
++ TP_printk("pasid=%u", __entry->pasid)
++);
++
++TRACE_EVENT(kfd_restore_process_worker_end,
++ TP_PROTO(struct kfd_process *p, char *pStatusMsg),
++ TP_ARGS(p, pStatusMsg),
++ TP_STRUCT__entry(
++ __field(unsigned int, pasid)
++ __string(pStatusMsg, pStatusMsg)
++ ),
++ TP_fast_assign(
++ entry->pasid = p->pasid;
++ __assign_str(pStatusMsg, pStatusMsg);
++ ),
++ TP_printk("pasid=%u, StatusMsg=%s",
++ __entry->pasid, __get_str(pStatusMsg))
++);
++
++#endif
++
++/* This part must be outside protection */
++#undef TRACE_INCLUDE_PATH
++#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/amd/amdkfd
++#include <trace/define_trace.h>
+diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+index 9595d40..865fcfd 100755
+--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+@@ -34,6 +34,8 @@
+ #include <linux/dma-fence.h>
+ #include <linux/dma-buf.h>
+ #include <linux/bitmap.h>
++#include <linux/mm_types.h>
++#include <linux/scatterlist.h>
+
+ struct pci_dev;
+
+@@ -194,6 +196,7 @@ struct tile_config {
+ #define ALLOC_MEM_FLAGS_USERPTR (1 << 2)
+ #define ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
+
++
+ /*
+ * Allocation flags attributes/access options.
+ * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
+@@ -281,6 +284,16 @@ struct tile_config {
+ * @set_compute_idle: Indicates that compute is idle on a device. This
+ * can be used to change power profiles depending on compute activity.
+ *
++ * @get_dmabuf_info: Returns information about a dmabuf if it was
++ * created by the GPU driver
++ *
++ * @import_dmabuf: Imports a DMA buffer, creating a new kgd_mem object
++ * Supports only DMA buffers created by GPU driver on the same GPU
++ *
++ * @export_dmabuf: Emports a KFD BO for sharing with other process
++ *
++ * @copy_mem_to_mem: Copies size bytes from source BO to destination BO
++ *
+ * This structure contains function pointers to services that the kgd driver
+ * provides to amdkfd driver.
+ *
+@@ -436,6 +449,20 @@ struct kfd2kgd_calls {
+ void (*gpu_recover)(struct kgd_dev *kgd);
+
+ void (*set_compute_idle)(struct kgd_dev *kgd, bool idle);
++
++ uint32_t (*enable_debug_trap)(struct kgd_dev *kgd,
++ uint32_t trap_debug_wave_launch_mode,
++ uint32_t vmid);
++ uint32_t (*disable_debug_trap)(struct kgd_dev *kgd);
++ uint32_t (*set_debug_trap_data)(struct kgd_dev *kgd,
++ int trap_data0,
++ int trap_data1);
++ uint32_t (*set_wave_launch_trap_override)(struct kgd_dev *kgd,
++ uint32_t trap_override,
++ uint32_t trap_mask);
++ uint32_t (*set_wave_launch_mode)(struct kgd_dev *kgd,
++ uint8_t wave_launch_mode,
++ uint32_t vmid);
+ };
+
+ /**
+diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
+index 1ec5289..2c1e8676 100644
+--- a/include/uapi/linux/kfd_ioctl.h
++++ b/include/uapi/linux/kfd_ioctl.h
+@@ -193,6 +193,37 @@ struct kfd_ioctl_dbg_wave_control_args {
+ uint32_t buf_size_in_bytes; /*including gpu_id and buf_size */
+ };
+
++/* KFD_IOC_DBG_TRAP_ENABLE:
++ * data1: 0=disable, 1=enable
++ * data2: queue ID (for future use)
++ */
++#define KFD_IOC_DBG_TRAP_ENABLE 0
++
++/* KFD_IOC_DBG_TRAP_SET_TRAP_DATA:
++ * data1: SPI_GDBG_TRAP_DATA0
++ * data2: SPI_GDBG_TRAP_DATA1
++ */
++#define KFD_IOC_DBG_TRAP_SET_TRAP_DATA 1
++
++/* KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE:
++ * data1: override mode: 0=OR, 1=REPLACE
++ * data2: mask
++ */
++#define KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE 2
++
++/* KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE:
++ * data1: 0=normal, 1=halt, 2=kill, 3=singlestep, 4=disable
++ * data2: unused
++ */
++#define KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE 3
++
++struct kfd_ioctl_dbg_trap_args {
++ __u32 gpu_id; /* to KFD */
++ __u32 op; /* to KFD */
++ __u32 data1; /* to KFD */
++ __u32 data2; /* to KFD */
++};
++
+ /* Matching HSA_EVENTTYPE */
+ #define KFD_IOC_EVENT_SIGNAL 0
+ #define KFD_IOC_EVENT_NODECHANGE 1
+@@ -262,10 +293,10 @@ struct kfd_hsa_memory_exception_data {
+
+ /* hw exception data */
+ struct kfd_hsa_hw_exception_data {
+- uint32_t reset_type;
+- uint32_t reset_cause;
+- uint32_t memory_lost;
+- uint32_t gpu_id;
++ __u32 reset_type;
++ __u32 reset_cause;
++ __u32 memory_lost;
++ __u32 gpu_id;
+ };
+
+ /* Event data */
+@@ -388,6 +419,36 @@ struct kfd_ioctl_ipc_import_handle_args {
+ uint32_t pad;
+ };
+
++struct kfd_memory_range {
++ __u64 va_addr;
++ __u64 size;
++};
++
++/* flags definitions
++ * BIT0: 0: read operation, 1: write operation.
++ * This also identifies if the src or dst array belongs to remote process
++ */
++#define KFD_CROSS_MEMORY_RW_BIT (1 << 0)
++#define KFD_SET_CROSS_MEMORY_READ(flags) (flags &= ~KFD_CROSS_MEMORY_RW_BIT)
++#define KFD_SET_CROSS_MEMORY_WRITE(flags) (flags |= KFD_CROSS_MEMORY_RW_BIT)
++#define KFD_IS_CROSS_MEMORY_WRITE(flags) (flags & KFD_CROSS_MEMORY_RW_BIT)
++struct kfd_ioctl_cross_memory_copy_args {
++ /* to KFD: Process ID of the remote process */
++ __u32 pid;
++ /* to KFD: See above definition */
++ __u32 flags;
++ /* to KFD: Source GPU VM range */
++ __u64 src_mem_range_array;
++ /* to KFD: Size of above array */
++ __u64 src_mem_array_size;
++ /* to KFD: Destination GPU VM range */
++ __u64 dst_mem_range_array;
++ /* to KFD: Size of above array */
++ __u64 dst_mem_array_size;
++ /* from KFD: Total amount of bytes copied */
++ __u64 bytes_copied;
++};
++
+ struct kfd_ioctl_get_tile_config_args {
+ /* to KFD: pointer to tile array */
+ __u64 tile_config_ptr;
+@@ -411,11 +472,6 @@ struct kfd_ioctl_get_tile_config_args {
+ */
+ };
+
+-struct kfd_memory_range {
+- uint64_t va_addr;
+- uint64_t size;
+-};
+-
+ /* flags definitions
+ * BIT0: 0: read operation, 1: write operation.
+ * This also identifies if the src or dst array belongs to remote process
+@@ -425,24 +481,6 @@ struct kfd_memory_range {
+ #define KFD_SET_CROSS_MEMORY_WRITE(flags) (flags |= KFD_CROSS_MEMORY_RW_BIT)
+ #define KFD_IS_CROSS_MEMORY_WRITE(flags) (flags & KFD_CROSS_MEMORY_RW_BIT)
+
+-struct kfd_ioctl_cross_memory_copy_args {
+- /* to KFD: Process ID of the remote process */
+- uint32_t pid;
+- /* to KFD: See above definition */
+- uint32_t flags;
+- /* to KFD: Source GPU VM range */
+- uint64_t src_mem_range_array;
+- /* to KFD: Size of above array */
+- uint64_t src_mem_array_size;
+- /* to KFD: Destination GPU VM range */
+- uint64_t dst_mem_range_array;
+- /* to KFD: Size of above array */
+- uint64_t dst_mem_array_size;
+- /* from KFD: Total amount of bytes copied */
+- uint64_t bytes_copied;
+-};
+-
+-
+ #define AMDKFD_IOCTL_BASE 'K'
+ #define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
+ #define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
+@@ -546,6 +584,9 @@ struct kfd_ioctl_cross_memory_copy_args {
+ #define AMDKFD_IOC_CROSS_MEMORY_COPY \
+ AMDKFD_IOWR(0x20, struct kfd_ioctl_cross_memory_copy_args)
+
++#define AMDKFD_IOC_DBG_TRAP \
++ AMDKFD_IOW(0x21, struct kfd_ioctl_dbg_trap_args)
++
+ #define AMDKFD_COMMAND_START 0x01
+ #define AMDKFD_COMMAND_END 0x21
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5711-drm-amdkfd-kfd-expose-the-hive_id-of-the-device-thro.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5711-drm-amdkfd-kfd-expose-the-hive_id-of-the-device-thro.patch
new file mode 100644
index 00000000..fb805e8b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5711-drm-amdkfd-kfd-expose-the-hive_id-of-the-device-thro.patch
@@ -0,0 +1,102 @@
+From 1a90a0a6690ca68f0f0f879e5316d7b7719a1e74 Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Fri, 6 Jul 2018 11:32:42 -0400
+Subject: [PATCH 5711/5725] drm/amdkfd: kfd expose the hive_id of the device
+ through its node properties
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Thunk will generate the XGMI topology information when necessary with the hive_id
+for each specified device
+
+Change-Id: I3bbc37bd2af4295e24357ce82f2c760162aff9ca
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 3 +++
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 3 +++
+ drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 4 ++++
+ drivers/gpu/drm/amd/amdkfd/kfd_topology.h | 1 +
+ drivers/gpu/drm/amd/include/kgd_kfd_interface.h | 2 ++
+ 5 files changed, 13 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index dd85838..04a8b5b 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -568,6 +568,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ goto kfd_doorbell_error;
+ }
+
++ if (kfd->kfd2kgd->get_hive_id)
++ kfd->hive_id = kfd->kfd2kgd->get_hive_id(kfd->kgd);
++
+ if (kfd_topology_add_device(kfd)) {
+ dev_err(kfd_device, "Error adding device to topology\n");
+ goto kfd_topology_add_device_error;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 427ceae..ca42b58 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -290,6 +290,9 @@ struct kfd_dev {
+ const void *cwsr_isa;
+ unsigned int cwsr_isa_size;
+
++ /* xGMI */
++ uint64_t hive_id;
++
+ bool pci_atomic_requested;
+
+ /* VRAM limit */
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+index 3be2346..eb859b9 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -487,6 +487,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
+ dev->node_props.location_id);
+ sysfs_show_32bit_prop(buffer, "drm_render_minor",
+ dev->node_props.drm_render_minor);
++ sysfs_show_64bit_prop(buffer, "hive_id",
++ dev->node_props.hive_id);
+
+ if (dev->gpu) {
+ log_max_watch_addr =
+@@ -1322,6 +1324,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
+ dev->node_props.drm_render_minor =
+ gpu->shared_resources.drm_render_minor;
+
++ dev->node_props.hive_id = gpu->hive_id;
++
+ kfd_fill_mem_clk_max_info(dev);
+ kfd_fill_iolink_non_crat_info(dev);
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+index c90c287..fe7b1b6 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+@@ -53,6 +53,7 @@
+ #define HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_MODE_SUPPORTED 0x20000000
+
+ struct kfd_node_properties {
++ uint64_t hive_id;
+ uint32_t cpu_cores_count;
+ uint32_t simd_count;
+ uint32_t mem_banks_count;
+diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+index 865fcfd..e7ba62c 100755
+--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+@@ -450,6 +450,8 @@ struct kfd2kgd_calls {
+
+ void (*set_compute_idle)(struct kgd_dev *kgd, bool idle);
+
++ uint64_t (*get_hive_id)(struct kgd_dev *kgd);
++
+ uint32_t (*enable_debug_trap)(struct kgd_dev *kgd,
+ uint32_t trap_debug_wave_launch_mode,
+ uint32_t vmid);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5712-drm-amdkfd-Add-new-iolink-type-defines.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5712-drm-amdkfd-Add-new-iolink-type-defines.patch
new file mode 100644
index 00000000..94414bf5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5712-drm-amdkfd-Add-new-iolink-type-defines.patch
@@ -0,0 +1,39 @@
+From 49c90679dba9eef4b64ab3e801d6c57df9d13401 Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Mon, 13 Aug 2018 14:02:17 -0400
+Subject: [PATCH 5712/5725] drm/amdkfd: Add new iolink type defines
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Update the iolink type defines according to the new thunk spec
+
+Change-Id: Ie155641b6bfbe005ae0e12c5c31c68157247ea26
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_crat.h | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+index cd7ee6d..1ef8823 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+@@ -248,7 +248,12 @@ struct crat_subtype_ccompute {
+ #define CRAT_IOLINK_TYPE_RAPID_IO 8
+ #define CRAT_IOLINK_TYPE_INFINIBAND 9
+ #define CRAT_IOLINK_TYPE_RESERVED3 10
+-#define CRAT_IOLINK_TYPE_OTHER 11
++#define CRAT_IOLINK_TYPE_XGMI 11
++#define CRAT_IOLINK_TYPE_XGOP 12
++#define CRAT_IOLINK_TYPE_GZ 13
++#define CRAT_IOLINK_TYPE_ETHERNET_RDMA 14
++#define CRAT_IOLINK_TYPE_RDMA_OTHER 15
++#define CRAT_IOLINK_TYPE_OTHER 16
+ #define CRAT_IOLINK_TYPE_MAX 255
+
+ #define CRAT_IOLINK_RESERVED_LENGTH 24
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5713-drm-amdkfd-Generate-xGMI-direct-iolink.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5713-drm-amdkfd-Generate-xGMI-direct-iolink.patch
new file mode 100644
index 00000000..3ea456e9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5713-drm-amdkfd-Generate-xGMI-direct-iolink.patch
@@ -0,0 +1,173 @@
+From 7a3e542dd28b87db46b2812cd3d9ee6fbee04077 Mon Sep 17 00:00:00 2001
+From: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Date: Mon, 13 Aug 2018 14:04:11 -0400
+Subject: [PATCH 5713/5725] drm/amdkfd: Generate xGMI direct iolink
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Generate xGMI iolink for upper level usage
+
+Change-Id: I37bc29fee45cb10d1da849956055c59d823f6f5d
+Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 78 ++++++++++++++++++++++++++++++-----
+ 1 file changed, 68 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+index c540b65..1655e8b 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+@@ -346,7 +346,7 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
+ struct list_head *device_list)
+ {
+ struct kfd_iolink_properties *props = NULL, *props2;
+- struct kfd_topology_device *dev, *cpu_dev;
++ struct kfd_topology_device *dev, *to_dev;
+ uint32_t id_from;
+ uint32_t id_to;
+
+@@ -369,6 +369,8 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
+
+ if (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS)
+ props->weight = 20;
++ else if (props->iolink_type == CRAT_IOLINK_TYPE_XGMI)
++ props->weight = 15;
+ else
+ props->weight = node_distance(id_from, id_to);
+
+@@ -390,19 +392,22 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
+ * links are not built at that time. If a PCIe type is discovered, it
+ * means a GPU is detected and we are adding GPU->CPU to the topology.
+ * At this time, also add the corresponded CPU->GPU link.
++ * For xGMI, we only added the link with one direction in the crat
++ * table, add corresponded reversed direction link now.
+ */
+- if (props && props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS) {
+- cpu_dev = kfd_topology_device_by_proximity_domain(id_to);
+- if (!cpu_dev)
++ if (props && (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS ||
++ props->iolink_type == CRAT_IOLINK_TYPE_XGMI)) {
++ to_dev = kfd_topology_device_by_proximity_domain(id_to);
++ if (!to_dev)
+ return -ENODEV;
+ /* same everything but the other direction */
+ props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL);
+ props2->node_from = id_to;
+ props2->node_to = id_from;
+ props2->kobj = NULL;
+- cpu_dev->io_link_count++;
+- cpu_dev->node_props.io_links_count++;
+- list_add_tail(&props2->list, &cpu_dev->io_link_props);
++ to_dev->io_link_count++;
++ to_dev->node_props.io_links_count++;
++ list_add_tail(&props2->list, &to_dev->io_link_props);
+ }
+
+ return 0;
+@@ -1056,7 +1061,7 @@ static int kfd_fill_gpu_memory_affinity(int *avail_size,
+ *
+ * Return 0 if successful else return -ve value
+ */
+-static int kfd_fill_gpu_direct_io_link(int *avail_size,
++static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
+ struct kfd_dev *kdev,
+ struct crat_subtype_iolink *sub_type_hdr,
+ uint32_t proximity_domain)
+@@ -1088,6 +1093,28 @@ static int kfd_fill_gpu_direct_io_link(int *avail_size,
+ return 0;
+ }
+
++static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
++ struct kfd_dev *kdev,
++ struct crat_subtype_iolink *sub_type_hdr,
++ uint32_t proximity_domain_from,
++ uint32_t proximity_domain_to)
++{
++ *avail_size -= sizeof(struct crat_subtype_iolink);
++ if (*avail_size < 0)
++ return -ENOMEM;
++
++ memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
++
++ sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
++ sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
++ sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
++
++ sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
++ sub_type_hdr->proximity_domain_from = proximity_domain_from;
++ sub_type_hdr->proximity_domain_to = proximity_domain_to;
++ return 0;
++}
++
+ /* kfd_create_vcrat_image_gpu - Create Virtual CRAT for CPU
+ *
+ * @pcrat_image: Fill in VCRAT for GPU
+@@ -1100,14 +1127,16 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
+ {
+ struct crat_header *crat_table = (struct crat_header *)pcrat_image;
+ struct crat_subtype_generic *sub_type_hdr;
++ struct kfd_local_mem_info local_mem_info;
++ struct kfd_topology_device *peer_dev;
+ struct crat_subtype_computeunit *cu;
+ struct kfd_cu_info cu_info;
+ int avail_size = *size;
+ uint32_t total_num_of_cu;
+ int num_of_cache_entries = 0;
+ int cache_mem_filled = 0;
++ uint32_t nid = 0;
+ int ret = 0;
+- struct kfd_local_mem_info local_mem_info;
+
+ if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_GPU)
+ return -EINVAL;
+@@ -1231,7 +1260,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
+ */
+ sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+ cache_mem_filled);
+- ret = kfd_fill_gpu_direct_io_link(&avail_size, kdev,
++ ret = kfd_fill_gpu_direct_io_link_to_cpu(&avail_size, kdev,
+ (struct crat_subtype_iolink *)sub_type_hdr, proximity_domain);
+
+ if (ret < 0)
+@@ -1240,6 +1269,35 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
+ crat_table->length += sub_type_hdr->length;
+ crat_table->total_entries++;
+
++
++ /* Fill in Subtype: IO_LINKS
++ * Direct links from GPU to other GPUs through xGMI.
++ * We will loop GPUs that already be processed (with lower value
++ * of proximity_domain), add the link for the GPUs with same
++ * hive id (from this GPU to other GPU) . The reversed iolink
++ * (from other GPU to this GPU) will be added
++ * in kfd_parse_subtype_iolink.
++ */
++ if (kdev->hive_id) {
++ for (nid = 0; nid < proximity_domain; ++nid) {
++ peer_dev = kfd_topology_device_by_proximity_domain(nid);
++ if (!peer_dev->gpu)
++ continue;
++ if (peer_dev->gpu->hive_id != kdev->hive_id)
++ continue;
++ sub_type_hdr = (typeof(sub_type_hdr))(
++ (char *)sub_type_hdr +
++ sizeof(struct crat_subtype_iolink));
++ ret = kfd_fill_gpu_xgmi_link_to_gpu(
++ &avail_size, kdev,
++ (struct crat_subtype_iolink *)sub_type_hdr,
++ proximity_domain, nid);
++ if (ret < 0)
++ return ret;
++ crat_table->length += sub_type_hdr->length;
++ crat_table->total_entries++;
++ }
++ }
+ *size = crat_table->length;
+ pr_info("Virtual CRAT table created for GPU\n");
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5714-drm-amdkfd-Only-add-bi-directional-iolink-on-GPU-wit.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5714-drm-amdkfd-Only-add-bi-directional-iolink-on-GPU-wit.patch
new file mode 100644
index 00000000..994f9ebe
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5714-drm-amdkfd-Only-add-bi-directional-iolink-on-GPU-wit.patch
@@ -0,0 +1,94 @@
+From 20620531eab1dd6d8a0f7f6314745121f42e1b5c Mon Sep 17 00:00:00 2001
+From: shaoyunl <Shaoyun.Liu@amd.com>
+Date: Fri, 7 Sep 2018 12:00:07 -0400
+Subject: [PATCH 5714/5725] drm/amdkfd: Only add bi-directional iolink on GPU
+ with XGMI or largebar
+
+Change-Id: Ibb6a89ed878fffccb9a8bb4032b07a10ee298a99
+Signed-off-by: shaoyunl <Shaoyun.Liu@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 15 +++++++++------
+ drivers/gpu/drm/amd/amdkfd/kfd_crat.h | 3 ++-
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 1 +
+ 3 files changed, 12 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+index 1655e8b..ee753cb 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+@@ -353,8 +353,8 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
+ id_from = iolink->proximity_domain_from;
+ id_to = iolink->proximity_domain_to;
+
+- pr_debug("Found IO link entry in CRAT table with id_from=%d\n",
+- id_from);
++ pr_debug("Found IO link entry in CRAT table with id_from=%d, id_to %d\n",
++ id_from, id_to);
+ list_for_each_entry(dev, device_list, list) {
+ if (id_from == dev->proximity_domain) {
+ props = kfd_alloc_struct(props);
+@@ -391,12 +391,12 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
+ /* CPU topology is created before GPUs are detected, so CPU->GPU
+ * links are not built at that time. If a PCIe type is discovered, it
+ * means a GPU is detected and we are adding GPU->CPU to the topology.
+- * At this time, also add the corresponded CPU->GPU link.
++ * At this time, also add the corresponded CPU->GPU link if GPU
++ * is large bar.
+ * For xGMI, we only added the link with one direction in the crat
+ * table, add corresponded reversed direction link now.
+ */
+- if (props && (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS ||
+- props->iolink_type == CRAT_IOLINK_TYPE_XGMI)) {
++ if (props && (iolink->flags & CRAT_IOLINK_FLAGS_BI_DIRECTIONAL)) {
+ to_dev = kfd_topology_device_by_proximity_domain(id_to);
+ if (!to_dev)
+ return -ENODEV;
+@@ -1076,6 +1076,8 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
+ sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
+ sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
+ sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
++ if (kfd_dev_is_large_bar(kdev))
++ sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
+
+ /* Fill in IOLINK subtype.
+ * TODO: Fill-in other fields of iolink subtype
+@@ -1107,7 +1109,8 @@ static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
+
+ sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
+ sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
+- sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
++ sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED |
++ CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
+
+ sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
+ sub_type_hdr->proximity_domain_from = proximity_domain_from;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+index 1ef8823..cfdd02f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+@@ -232,7 +232,8 @@ struct crat_subtype_ccompute {
+ #define CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT (1 << 2)
+ #define CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT (1 << 3)
+ #define CRAT_IOLINK_FLAGS_NO_PEER_TO_PEER_DMA (1 << 4)
+-#define CRAT_IOLINK_FLAGS_RESERVED_MASK 0xffffffe0
++#define CRAT_IOLINK_FLAGS_BI_DIRECTIONAL (1 << 31)
++#define CRAT_IOLINK_FLAGS_RESERVED_MASK 0x7fffffe0
+
+ /*
+ * IO interface types
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index ca42b58..1f0d558 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -834,6 +834,7 @@ struct amdkfd_ioctl_desc {
+ unsigned int cmd_drv;
+ const char *name;
+ };
++bool kfd_dev_is_large_bar(struct kfd_dev *dev);
+
+ int kfd_process_create_wq(void);
+ void kfd_process_destroy_wq(void);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5715-drm-amdkfd-change-system-memory-overcommit-limit.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5715-drm-amdkfd-change-system-memory-overcommit-limit.patch
new file mode 100644
index 00000000..30a33a7e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5715-drm-amdkfd-change-system-memory-overcommit-limit.patch
@@ -0,0 +1,208 @@
+From 60d88de85ee6c3e91253719333071f4c15d864a1 Mon Sep 17 00:00:00 2001
+From: Eric Huang <JinhuiEric.Huang@amd.com>
+Date: Wed, 5 Sep 2018 11:46:14 -0400
+Subject: [PATCH 5715/5725] drm/amdkfd: change system memory overcommit limit
+
+It is to improve system limit by:
+1. replacing userptrlimit with a total memory limit that
+conunts TTM memory usage and userptr usage.
+2. counting acc size for all BOs.
+
+KFD-387
+
+Change-Id: I30a356c5cb7517a7e7ad5425de4e79cfc3109594
+Signed-off-by: Eric Huang <JinHuiEric.Huang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 106 +++++++++++++----------
+ 1 file changed, 59 insertions(+), 47 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 8cd9f0d..15a64e8 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -41,9 +41,9 @@
+ /* Impose limit on how much memory KFD can use */
+ static struct {
+ uint64_t max_system_mem_limit;
+- uint64_t max_userptr_mem_limit;
++ uint64_t max_ttm_mem_limit;
+ int64_t system_mem_used;
+- int64_t userptr_mem_used;
++ int64_t ttm_mem_used;
+ spinlock_t mem_limit_lock;
+ } kfd_mem_limit;
+
+@@ -85,8 +85,8 @@ static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
+ }
+
+ /* Set memory usage limits. Current, limits are
+- * System (kernel) memory - 3/8th System RAM
+- * Userptr memory - 3/4th System RAM
++ * System (TTM + userptr) memory - 3/4th System RAM
++ * TTM memory - 3/8th System RAM
+ */
+ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
+ {
+@@ -98,48 +98,54 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
+ mem *= si.mem_unit;
+
+ spin_lock_init(&kfd_mem_limit.mem_limit_lock);
+- kfd_mem_limit.max_system_mem_limit = (mem >> 1) - (mem >> 3);
+- kfd_mem_limit.max_userptr_mem_limit = mem - (mem >> 2);
+- pr_debug("Kernel memory limit %lluM, userptr limit %lluM\n",
++ kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2);
++ kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
++ pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
+ (kfd_mem_limit.max_system_mem_limit >> 20),
+- (kfd_mem_limit.max_userptr_mem_limit >> 20));
++ (kfd_mem_limit.max_ttm_mem_limit >> 20));
+ }
+
+ static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev,
+- uint64_t size, u32 domain)
++ uint64_t size, u32 domain, bool sg)
+ {
+- size_t acc_size;
++ size_t acc_size, system_mem_needed, ttm_mem_needed;
+ int ret = 0;
+
+ acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
+ sizeof(struct amdgpu_bo));
+
+ spin_lock(&kfd_mem_limit.mem_limit_lock);
++
+ if (domain == AMDGPU_GEM_DOMAIN_GTT) {
+- if (kfd_mem_limit.system_mem_used + (acc_size + size) >
+- kfd_mem_limit.max_system_mem_limit) {
+- ret = -ENOMEM;
+- goto err_no_mem;
+- }
+- kfd_mem_limit.system_mem_used += (acc_size + size);
+- } else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
+- if ((kfd_mem_limit.system_mem_used + acc_size >
+- kfd_mem_limit.max_system_mem_limit) ||
+- (kfd_mem_limit.userptr_mem_used + (size + acc_size) >
+- kfd_mem_limit.max_userptr_mem_limit)) {
+- ret = -ENOMEM;
+- goto err_no_mem;
+- }
+- kfd_mem_limit.system_mem_used += acc_size;
+- kfd_mem_limit.userptr_mem_used += size;
++ /* TTM GTT memory */
++ system_mem_needed = acc_size + size;
++ ttm_mem_needed = acc_size + size;
++ } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
++ /* Userptr */
++ system_mem_needed = acc_size + size;
++ ttm_mem_needed = acc_size;
++ } else {
++ /* VRAM and SG */
++ system_mem_needed = acc_size;
++ ttm_mem_needed = acc_size;
+ }
+-err_no_mem:
++
++ if ((kfd_mem_limit.system_mem_used + system_mem_needed >
++ kfd_mem_limit.max_system_mem_limit) ||
++ (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
++ kfd_mem_limit.max_ttm_mem_limit))
++ ret = -ENOMEM;
++ else {
++ kfd_mem_limit.system_mem_used += system_mem_needed;
++ kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
++ }
++
+ spin_unlock(&kfd_mem_limit.mem_limit_lock);
+ return ret;
+ }
+
+ static void unreserve_system_mem_limit(struct amdgpu_device *adev,
+- uint64_t size, u32 domain)
++ uint64_t size, u32 domain, bool sg)
+ {
+ size_t acc_size;
+
+@@ -149,14 +155,18 @@ static void unreserve_system_mem_limit(struct amdgpu_device *adev,
+ spin_lock(&kfd_mem_limit.mem_limit_lock);
+ if (domain == AMDGPU_GEM_DOMAIN_GTT) {
+ kfd_mem_limit.system_mem_used -= (acc_size + size);
+- } else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
++ kfd_mem_limit.ttm_mem_used -= (acc_size + size);
++ } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
++ kfd_mem_limit.system_mem_used -= (acc_size + size);
++ kfd_mem_limit.ttm_mem_used -= acc_size;
++ } else {
+ kfd_mem_limit.system_mem_used -= acc_size;
+- kfd_mem_limit.userptr_mem_used -= size;
++ kfd_mem_limit.ttm_mem_used -= acc_size;
+ }
+ WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
+ "kfd system memory accounting unbalanced");
+- WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
+- "kfd userptr memory accounting unbalanced");
++ WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
++ "kfd TTM memory accounting unbalanced");
+
+ spin_unlock(&kfd_mem_limit.mem_limit_lock);
+ }
+@@ -166,17 +176,22 @@ void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
+ spin_lock(&kfd_mem_limit.mem_limit_lock);
+
+ if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
+- kfd_mem_limit.system_mem_used -= bo->tbo.acc_size;
+- kfd_mem_limit.userptr_mem_used -= amdgpu_bo_size(bo);
+- } else if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT &&
+- !bo->tbo.sg) {
+ kfd_mem_limit.system_mem_used -=
+ (bo->tbo.acc_size + amdgpu_bo_size(bo));
++ kfd_mem_limit.ttm_mem_used -= bo->tbo.acc_size;
++ } else if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) {
++ kfd_mem_limit.system_mem_used -=
++ (bo->tbo.acc_size + amdgpu_bo_size(bo));
++ kfd_mem_limit.ttm_mem_used -=
++ (bo->tbo.acc_size + amdgpu_bo_size(bo));
++ } else {
++ kfd_mem_limit.system_mem_used -= bo->tbo.acc_size;
++ kfd_mem_limit.ttm_mem_used -= bo->tbo.acc_size;
+ }
+ WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
+ "kfd system memory accounting unbalanced");
+- WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
+- "kfd userptr memory accounting unbalanced");
++ WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
++ "kfd TTM memory accounting unbalanced");
+
+ spin_unlock(&kfd_mem_limit.mem_limit_lock);
+ }
+@@ -1285,13 +1300,11 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+
+ amdgpu_sync_create(&(*mem)->sync);
+
+- if (!sg) {
+- ret = amdgpu_amdkfd_reserve_system_mem_limit(adev, size,
+- alloc_domain);
+- if (ret) {
+- pr_debug("Insufficient system memory\n");
+- goto err_reserve_limit;
+- }
++ ret = amdgpu_amdkfd_reserve_system_mem_limit(adev, size,
++ alloc_domain, !!sg);
++ if (ret) {
++ pr_debug("Insufficient system memory\n");
++ goto err_reserve_limit;
+ }
+
+ pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
+@@ -1346,8 +1359,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+ /* Don't unreserve system mem limit twice */
+ goto err_reserve_limit;
+ err_bo_create:
+- if (!sg)
+- unreserve_system_mem_limit(adev, size, alloc_domain);
++ unreserve_system_mem_limit(adev, size, alloc_domain, !!sg);
+ err_reserve_limit:
+ kfree(*mem);
+ err:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5716-drm-amdkfd-Reliably-prevent-reclaim-FS-while-holding.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5716-drm-amdkfd-Reliably-prevent-reclaim-FS-while-holding.patch
new file mode 100644
index 00000000..7c2dc7f8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5716-drm-amdkfd-Reliably-prevent-reclaim-FS-while-holding.patch
@@ -0,0 +1,613 @@
+From d857e475e467902a11a5234a96121519be5972cd Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Wed, 11 Jul 2018 22:32:44 -0400
+Subject: [PATCH 5716/5725] drm/amdkfd: Reliably prevent reclaim-FS while
+ holding DQM lock
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This is needed to prevent deadlocks when MMU notifiers run in
+reclaim-FS context and take the DQM lock for userptr evictions.
+Previously this was done by making all memory allocations under
+DQM locks GFP_NOIO. This is error prone. Using
+memalloc_nofs_save/restore will reliably affect all memory
+allocations anywhere in the kernel while the DQM lock is held.
+
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 24 -----
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 4 +-
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 106 ++++++++++-----------
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 20 +++-
+ include/uapi/linux/kfd_ioctl.h | 4 +-
+ 5 files changed, 75 insertions(+), 83 deletions(-)
+ mode change 100644 => 100755 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+ mode change 100644 => 100755 include/uapi/linux/kfd_ioctl.h
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+old mode 100644
+new mode 100755
+index bf7fa00..a0590d8
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -920,9 +920,6 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
+ */
+ static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
+ {
+- struct sysinfo si;
+- int phys_ram_gb, amdgpu_vm_size_aligned;
+-
+ if (amdgpu_sched_jobs < 4) {
+ dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
+ amdgpu_sched_jobs);
+@@ -947,27 +944,6 @@ static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
+ amdgpu_gtt_size = -1;
+ }
+
+- /* Compute the GPU VM space only if the user
+- * hasn't changed it from the default.
+- */
+- if (amdgpu_vm_size == -1) {
+- /* Computation depends on the amount of physical RAM available.
+- * Cannot exceed 1TB.
+- */
+- si_meminfo(&si);
+- phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit) >> 30;
+- amdgpu_vm_size = min(phys_ram_gb * 3 + 16, 1024);
+-
+- /* GPUVM sizes are almost never perfect powers of two.
+- * Round up to nearest power of two starting from
+- * the minimum allowed but aligned size of 32GB */
+- amdgpu_vm_size_aligned = 32;
+- while (amdgpu_vm_size > amdgpu_vm_size_aligned)
+- amdgpu_vm_size_aligned *= 2;
+-
+- amdgpu_vm_size = amdgpu_vm_size_aligned;
+- }
+-
+ /* valid range is between 4 and 9 inclusive */
+ if (amdgpu_vm_fragment_size != -1 &&
+ (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 04a8b5b..f78269d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -652,7 +652,7 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
+ kgd2kfd_suspend(kfd);
+
+ /* hold dqm->lock to prevent further execution*/
+- mutex_lock(&kfd->dqm->lock);
++ dqm_lock(kfd->dqm);
+
+ kfd_signal_reset_event(kfd);
+ return 0;
+@@ -671,7 +671,7 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd)
+ if (!kfd->init_complete)
+ return 0;
+
+- mutex_unlock(&kfd->dqm->lock);
++ dqm_unlock(kfd->dqm);
+
+ ret = kfd_resume(kfd);
+ if (ret)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 0ed722c..974d58c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -130,7 +130,7 @@ bool check_if_queues_active(struct device_queue_manager *dqm,
+ bool busy = false;
+ struct queue *q;
+
+- mutex_lock(&dqm->lock);
++ dqm_lock(dqm);
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ struct mqd_manager *mqd_mgr;
+ enum KFD_MQD_TYPE type;
+@@ -144,7 +144,7 @@ bool check_if_queues_active(struct device_queue_manager *dqm,
+ if (busy)
+ break;
+ }
+- mutex_unlock(&dqm->lock);
++ dqm_unlock(dqm);
+
+ return busy;
+ }
+@@ -279,7 +279,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
+
+ print_queue(q);
+
+- mutex_lock(&dqm->lock);
++ dqm_lock(dqm);
+
+ if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+ pr_warn("Can't create new usermode queue because %d queues were already created\n",
+@@ -336,7 +336,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
+ dqm->total_queue_count);
+
+ out_unlock:
+- mutex_unlock(&dqm->lock);
++ dqm_unlock(dqm);
+ return retval;
+ }
+
+@@ -500,9 +500,9 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
+ {
+ int retval;
+
+- mutex_lock(&dqm->lock);
++ dqm_lock(dqm);
+ retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
+- mutex_unlock(&dqm->lock);
++ dqm_unlock(dqm);
+
+ return retval;
+ }
+@@ -514,7 +514,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ struct kfd_process_device *pdd;
+ bool prev_active = false;
+
+- mutex_lock(&dqm->lock);
++ dqm_lock(dqm);
+ pdd = kfd_get_process_device_data(q->device, q->process);
+ if (!pdd) {
+ retval = -ENODEV;
+@@ -586,7 +586,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ }
+
+ out_unlock:
+- mutex_unlock(&dqm->lock);
++ dqm_unlock(dqm);
+ return retval;
+ }
+
+@@ -619,7 +619,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
+ struct kfd_process_device *pdd;
+ int retval = 0;
+
+- mutex_lock(&dqm->lock);
++ dqm_lock(dqm);
+ if (qpd->evicted++ > 0) /* already evicted, do nothing */
+ goto out;
+
+@@ -649,7 +649,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
+ }
+
+ out:
+- mutex_unlock(&dqm->lock);
++ dqm_unlock(dqm);
+ return retval;
+ }
+
+@@ -660,7 +660,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
+ struct kfd_process_device *pdd;
+ int retval = 0;
+
+- mutex_lock(&dqm->lock);
++ dqm_lock(dqm);
+ if (qpd->evicted++ > 0) /* already evicted, do nothing */
+ goto out;
+
+@@ -682,7 +682,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+
+ out:
+- mutex_unlock(&dqm->lock);
++ dqm_unlock(dqm);
+ return retval;
+ }
+
+@@ -700,7 +700,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
+ /* Retrieve PD base */
+ pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
+
+- mutex_lock(&dqm->lock);
++ dqm_lock(dqm);
+ if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
+ goto out;
+ if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
+@@ -755,7 +755,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
+ out:
+ if (mm)
+ mmput(mm);
+- mutex_unlock(&dqm->lock);
++ dqm_unlock(dqm);
+ return retval;
+ }
+
+@@ -771,7 +771,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
+ /* Retrieve PD base */
+ pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
+
+- mutex_lock(&dqm->lock);
++ dqm_lock(dqm);
+ if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
+ goto out;
+ if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
+@@ -799,7 +799,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
+ if (!retval)
+ qpd->evicted = 0;
+ out:
+- mutex_unlock(&dqm->lock);
++ dqm_unlock(dqm);
+ return retval;
+ }
+
+@@ -821,7 +821,7 @@ static int register_process(struct device_queue_manager *dqm,
+ /* Retrieve PD base */
+ pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
+
+- mutex_lock(&dqm->lock);
++ dqm_lock(dqm);
+ list_add(&n->list, &dqm->queues);
+
+ /* Update PD Base in QPD */
+@@ -832,7 +832,7 @@ static int register_process(struct device_queue_manager *dqm,
+ if (dqm->processes_count++ == 0)
+ dqm->dev->kfd2kgd->set_compute_idle(dqm->dev->kgd, false);
+
+- mutex_unlock(&dqm->lock);
++ dqm_unlock(dqm);
+
+ return retval;
+ }
+@@ -847,8 +847,7 @@ static int unregister_process(struct device_queue_manager *dqm,
+ list_empty(&qpd->queues_list) ? "empty" : "not empty");
+
+ retval = 0;
+- mutex_lock(&dqm->lock);
+-
++ dqm_lock(dqm);
+ list_for_each_entry_safe(cur, next, &dqm->queues, list) {
+ if (qpd == cur->qpd) {
+ list_del(&cur->list);
+@@ -862,7 +861,7 @@ static int unregister_process(struct device_queue_manager *dqm,
+ /* qpd not found in dqm list */
+ retval = 1;
+ out:
+- mutex_unlock(&dqm->lock);
++ dqm_unlock(dqm);
+ return retval;
+ }
+
+@@ -901,7 +900,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
+ if (!dqm->allocated_queues)
+ return -ENOMEM;
+
+- mutex_init(&dqm->lock);
++ mutex_init(&dqm->lock_hidden);
+ INIT_LIST_HEAD(&dqm->queues);
+ dqm->queue_count = dqm->next_pipe_to_allocate = 0;
+ dqm->sdma_queue_count = 0;
+@@ -931,7 +930,7 @@ static void uninitialize(struct device_queue_manager *dqm)
+ kfree(dqm->allocated_queues);
+ for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
+ kfree(dqm->mqd_mgrs[i]);
+- mutex_destroy(&dqm->lock);
++ mutex_destroy(&dqm->lock_hidden);
+ kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
+ }
+
+@@ -1068,7 +1067,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
+ {
+ pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
+
+- mutex_init(&dqm->lock);
++ mutex_init(&dqm->lock_hidden);
+ INIT_LIST_HEAD(&dqm->queues);
+ dqm->queue_count = dqm->processes_count = 0;
+ dqm->sdma_queue_count = 0;
+@@ -1108,11 +1107,11 @@ static int start_cpsch(struct device_queue_manager *dqm)
+
+ init_interrupts(dqm);
+
+- mutex_lock(&dqm->lock);
++ dqm_lock(dqm);
+ /* clear hang status when driver try to start the hw scheduler */
+ dqm->is_hws_hang = false;
+ execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+- mutex_unlock(&dqm->lock);
++ dqm_unlock(dqm);
+
+ return 0;
+ fail_allocate_vidmem:
+@@ -1124,9 +1123,9 @@ static int start_cpsch(struct device_queue_manager *dqm)
+
+ static int stop_cpsch(struct device_queue_manager *dqm)
+ {
+- mutex_lock(&dqm->lock);
++ dqm_lock(dqm);
+ unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+- mutex_unlock(&dqm->lock);
++ dqm_unlock(dqm);
+
+ kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
+ pm_uninit(&dqm->packets);
+@@ -1138,11 +1137,11 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
+ struct kernel_queue *kq,
+ struct qcm_process_device *qpd)
+ {
+- mutex_lock(&dqm->lock);
++ dqm_lock(dqm);
+ if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+ pr_warn("Can't create new kernel queue because %d queues were already created\n",
+ dqm->total_queue_count);
+- mutex_unlock(&dqm->lock);
++ dqm_unlock(dqm);
+ return -EPERM;
+ }
+
+@@ -1158,7 +1157,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
+ dqm->queue_count++;
+ qpd->is_debug = true;
+ execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+- mutex_unlock(&dqm->lock);
++ dqm_unlock(dqm);
+
+ return 0;
+ }
+@@ -1167,7 +1166,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
+ struct kernel_queue *kq,
+ struct qcm_process_device *qpd)
+ {
+- mutex_lock(&dqm->lock);
++ dqm_lock(dqm);
+ list_del(&kq->list);
+ dqm->queue_count--;
+ qpd->is_debug = false;
+@@ -1179,7 +1178,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
+ dqm->total_queue_count--;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+- mutex_unlock(&dqm->lock);
++ dqm_unlock(dqm);
+ }
+
+ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+@@ -1190,7 +1189,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+
+ retval = 0;
+
+- mutex_lock(&dqm->lock);
++ dqm_lock(dqm);
+
+ if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+ pr_warn("Can't create new usermode queue because %d queues were already created\n",
+@@ -1257,7 +1256,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+
+- mutex_unlock(&dqm->lock);
++ dqm_unlock(dqm);
+ return retval;
+
+ out_deallocate_doorbell:
+@@ -1266,7 +1265,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ deallocate_sdma_queue(dqm, q->sdma_id);
+ out_unlock:
+- mutex_unlock(&dqm->lock);
++ dqm_unlock(dqm);
+
+ return retval;
+ }
+@@ -1397,7 +1396,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
+ retval = 0;
+
+ /* remove queue from list to prevent rescheduling after preemption */
+- mutex_lock(&dqm->lock);
++ dqm_lock(dqm);
+
+ if (qpd->is_debug) {
+ /*
+@@ -1442,14 +1441,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+
+- mutex_unlock(&dqm->lock);
++ dqm_unlock(dqm);
+
+ return retval;
+
+ failed:
+ failed_try_destroy_debugged_queue:
+
+- mutex_unlock(&dqm->lock);
++ dqm_unlock(dqm);
+ return retval;
+ }
+
+@@ -1473,7 +1472,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
+ if (!dqm->asic_ops.set_cache_memory_policy)
+ return retval;
+
+- mutex_lock(&dqm->lock);
++ dqm_lock(dqm);
+
+ if (alternate_aperture_size == 0) {
+ /* base > limit disables APE1 */
+@@ -1519,7 +1518,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
+ qpd->sh_mem_ape1_limit);
+
+ out:
+- mutex_unlock(&dqm->lock);
++ dqm_unlock(dqm);
+ return retval;
+ }
+
+@@ -1550,7 +1549,7 @@ static int process_termination_nocpsch(struct device_queue_manager *dqm,
+ struct device_process_node *cur, *next_dpn;
+ int retval = 0;
+
+- mutex_lock(&dqm->lock);
++ dqm_lock(dqm);
+
+ /* Clear all user mode queues */
+ list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
+@@ -1571,7 +1570,7 @@ static int process_termination_nocpsch(struct device_queue_manager *dqm,
+ }
+ }
+
+- mutex_unlock(&dqm->lock);
++ dqm_unlock(dqm);
+ return retval;
+ }
+
+@@ -1584,7 +1583,7 @@ static int get_wave_state(struct device_queue_manager *dqm,
+ struct mqd_manager *mqd_mgr;
+ int r;
+
+- mutex_lock(&dqm->lock);
++ dqm_lock(dqm);
+
+ if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
+ q->properties.is_active || !q->device->cwsr_enabled) {
+@@ -1607,7 +1606,7 @@ static int get_wave_state(struct device_queue_manager *dqm,
+ ctl_stack_used_size, save_area_used_size);
+
+ dqm_unlock:
+- mutex_unlock(&dqm->lock);
++ dqm_unlock(dqm);
+ return r;
+ }
+
+@@ -1624,7 +1623,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
+
+ retval = 0;
+
+- mutex_lock(&dqm->lock);
++ dqm_lock(dqm);
+
+ /* Clean all kernel queues */
+ list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
+@@ -1679,7 +1678,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
+ }
+
+ out:
+- mutex_unlock(&dqm->lock);
++ dqm_unlock(dqm);
+ return retval;
+ }
+
+@@ -1840,7 +1839,7 @@ int reserve_debug_trap_vmid(struct device_queue_manager *dqm)
+ return -EINVAL;
+ }
+
+- mutex_lock(&dqm->lock);
++ dqm_lock(dqm);
+
+ if (dqm->trap_debug_vmid != 0) {
+ pr_err("Trap debug id already reserved\n");
+@@ -1867,7 +1866,7 @@ int reserve_debug_trap_vmid(struct device_queue_manager *dqm)
+
+ pr_debug("Reserved VMID for trap debug: %i\n", dqm->trap_debug_vmid);
+ out_unlock:
+- mutex_unlock(&dqm->lock);
++ dqm_unlock(dqm);
+ return r;
+ }
+
+@@ -1885,7 +1884,7 @@ int release_debug_trap_vmid(struct device_queue_manager *dqm)
+ return -EINVAL;
+ }
+
+- mutex_lock(&dqm->lock);
++ dqm_lock(dqm);
+ trap_debug_vmid = dqm->trap_debug_vmid;
+ if (dqm->trap_debug_vmid == 0) {
+ pr_err("Trap debug id is not reserved\n");
+@@ -1913,7 +1912,7 @@ int release_debug_trap_vmid(struct device_queue_manager *dqm)
+ pr_debug("Released VMID for trap debug: %i\n", trap_debug_vmid);
+
+ out_unlock:
+- mutex_unlock(&dqm->lock);
++ dqm_unlock(dqm);
+ return r;
+ }
+
+@@ -2005,11 +2004,10 @@ int dqm_debugfs_execute_queues(struct device_queue_manager *dqm)
+ {
+ int r = 0;
+
+- mutex_lock(&dqm->lock);
++ dqm_lock(dqm);
+ dqm->active_runlist = true;
+ r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+- mutex_unlock(&dqm->lock);
+-
++ dqm_unlock(dqm);
+ return r;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+index 4c22738..cc152e7a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+@@ -26,6 +26,8 @@
+
+ #include <linux/rwsem.h>
+ #include <linux/list.h>
++#include <linux/mutex.h>
++#include <linux/sched/mm.h>
+ #include "kfd_priv.h"
+ #include "kfd_mqd_manager.h"
+
+@@ -177,8 +179,9 @@ struct device_queue_manager {
+ struct mqd_manager *mqd_mgrs[KFD_MQD_TYPE_MAX];
+ struct packet_manager packets;
+ struct kfd_dev *dev;
+- struct mutex lock;
++ struct mutex lock_hidden; /* use dqm_lock/unlock(dqm) */
+ struct list_head queues;
++ unsigned int saved_flags;
+ unsigned int processes_count;
+ unsigned int queue_count;
+ unsigned int sdma_queue_count;
+@@ -233,4 +236,19 @@ get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
+ return (pdd->lds_base >> 60) & 0x0E;
+ }
+
++/* The DQM lock can be taken in MMU notifiers. Make sure no reclaim-FS
++ * happens while holding this lock anywhere to prevent deadlocks when
++ * an MMU notifier runs in reclaim-FS context.
++ */
++static inline void dqm_lock(struct device_queue_manager *dqm)
++{
++ mutex_lock(&dqm->lock_hidden);
++ dqm->saved_flags = memalloc_nofs_save();
++}
++static inline void dqm_unlock(struct device_queue_manager *dqm)
++{
++ memalloc_nofs_restore(dqm->saved_flags);
++ mutex_unlock(&dqm->lock_hidden);
++}
++
+ #endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */
+diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
+old mode 100644
+new mode 100755
+index 2c1e8676..7bef0e4
+--- a/include/uapi/linux/kfd_ioctl.h
++++ b/include/uapi/linux/kfd_ioctl.h
+@@ -239,7 +239,7 @@ struct kfd_ioctl_dbg_trap_args {
+ #define KFD_IOC_WAIT_RESULT_TIMEOUT 1
+ #define KFD_IOC_WAIT_RESULT_FAIL 2
+
+-#define KFD_SIGNAL_EVENT_LIMIT 4096
++#define KFD_SIGNAL_EVENT_LIMIT 4096
+
+ /* For kfd_event_data.hw_exception_data.reset_type. */
+ #define KFD_HW_EXCEPTION_WHOLE_GPU_RESET 0
+@@ -588,6 +588,6 @@ struct kfd_ioctl_get_tile_config_args {
+ AMDKFD_IOW(0x21, struct kfd_ioctl_dbg_trap_args)
+
+ #define AMDKFD_COMMAND_START 0x01
+-#define AMDKFD_COMMAND_END 0x21
++#define AMDKFD_COMMAND_END 0x22
+
+ #endif
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5717-Reverted-Update-KFD-Thunk-ioctl-ABI-to-match-upstrea.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5717-Reverted-Update-KFD-Thunk-ioctl-ABI-to-match-upstrea.patch
new file mode 100644
index 00000000..ed19f9f5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5717-Reverted-Update-KFD-Thunk-ioctl-ABI-to-match-upstrea.patch
@@ -0,0 +1,579 @@
+From 97b4e77e5035a3f452d1598a2f55ef206ce3ace2 Mon Sep 17 00:00:00 2001
+From: Ravi Kumar <ravi1.kumar@amd.com>
+Date: Mon, 5 Nov 2018 19:44:33 +0530
+Subject: [PATCH 5717/5725] Reverted: Update KFD-Thunk ioctl ABI to match
+ upstream patch
+
+Signed-off-by: Ravi Kumar <ravi1.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 4 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 141 ++++++++++++++---------
+ drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | 10 ++
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 +
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 11 +-
+ drivers/gpu/drm/amd/include/kgd_kfd_interface.h | 20 ++--
+ include/uapi/linux/kfd_ioctl.h | 82 ++++++-------
+ 7 files changed, 156 insertions(+), 114 deletions(-)
+ mode change 100644 => 100755 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 15a64e8..3b305b3 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -1288,9 +1288,9 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+ VI_BO_SIZE_ALIGN : 1;
+
+ mapping_flags = AMDGPU_VM_PAGE_READABLE;
+- if (flags & ALLOC_MEM_FLAGS_WRITABLE)
++ if (!(flags & ALLOC_MEM_FLAGS_READONLY))
+ mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
+- if (flags & ALLOC_MEM_FLAGS_EXECUTABLE)
++ if (flags & ALLOC_MEM_FLAGS_EXECUTE_ACCESS)
+ mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
+ if (flags & ALLOC_MEM_FLAGS_COHERENT)
+ mapping_flags |= AMDGPU_VM_MTYPE_UC;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+old mode 100644
+new mode 100755
+index 9d92522..d94727a
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1085,14 +1085,17 @@ static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
+
+ return err;
+ }
+-static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
++static int kfd_ioctl_alloc_scratch_memory(struct file *filep,
+ struct kfd_process *p, void *data)
+ {
+- struct kfd_ioctl_set_scratch_backing_va_args *args = data;
++ struct kfd_ioctl_alloc_memory_of_scratch_args *args = data;
+ struct kfd_process_device *pdd;
+ struct kfd_dev *dev;
+ long err;
+
++ if (args->size == 0)
++ return -EINVAL;
++
+ dev = kfd_device_by_id(args->gpu_id);
+ if (!dev)
+ return -EINVAL;
+@@ -1458,7 +1461,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ void *mem;
+ struct kfd_dev *dev, *peer;
+ long err = 0;
+- int i;
++ int i, num_dev = 0;
+ uint32_t *devices_arr = NULL;
+
+ trace_kfd_map_memory_to_gpu_start(p);
+@@ -1466,23 +1469,24 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ if (!dev)
+ return -EINVAL;
+
+- if (!args->n_devices) {
+- pr_debug("Device IDs array empty\n");
++ if (args->device_ids_array_size == 0) {
++ pr_debug("Device ID array size is 0\n");
+ return -EINVAL;
+ }
+- if (args->n_success > args->n_devices) {
+- pr_debug("n_success exceeds n_devices\n");
++
++ if (args->device_ids_array_size % sizeof(uint32_t)) {
++ pr_debug("Node IDs array size %u\n",
++ args->device_ids_array_size);
+ return -EINVAL;
+ }
+
+- devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
+- GFP_KERNEL);
++ devices_arr = kmalloc(args->device_ids_array_size, GFP_KERNEL);
+ if (!devices_arr)
+ return -ENOMEM;
+
+ err = copy_from_user(devices_arr,
+- (void __user *)args->device_ids_array_ptr,
+- args->n_devices * sizeof(*devices_arr));
++ (void __user *)args->device_ids_array_ptr,
++ args->device_ids_array_size);
+ if (err != 0) {
+ err = -EFAULT;
+ goto copy_from_user_failed;
+@@ -1503,11 +1507,12 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ goto get_mem_obj_from_handle_failed;
+ }
+
+- for (i = args->n_success; i < args->n_devices; i++) {
++ num_dev = args->device_ids_array_size / sizeof(uint32_t);
++ for (i = 0 ; i < num_dev; i++) {
+ peer = kfd_device_by_id(devices_arr[i]);
+ if (!peer) {
+ pr_debug("Getting device by id failed for 0x%x\n",
+- devices_arr[i]);
++ devices_arr[i]);
+ err = -EINVAL;
+ goto get_mem_obj_from_handle_failed;
+ }
+@@ -1518,13 +1523,12 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ goto get_mem_obj_from_handle_failed;
+ }
+ err = peer->kfd2kgd->map_memory_to_gpu(
+- peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
+- if (err) {
+- pr_err("Failed to map to gpu %d/%d\n",
+- i, args->n_devices);
++ peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
++ if (err != 0) {
++ pr_err("Failed to map to gpu %d, num_dev=%d\n",
++ i, num_dev);
+ goto map_memory_to_gpu_failed;
+ }
+- args->n_success = i+1;
+ }
+
+ mutex_unlock(&p->mutex);
+@@ -1536,7 +1540,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ }
+
+ /* Flush TLBs after waiting for the page table updates to complete */
+- for (i = 0; i < args->n_devices; i++) {
++ for (i = 0; i < num_dev; i++) {
+ peer = kfd_device_by_id(devices_arr[i]);
+ if (WARN_ON_ONCE(!peer))
+ continue;
+@@ -1549,7 +1553,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ kfree(devices_arr);
+
+ trace_kfd_map_memory_to_gpu_end(p,
+- args->n_devices * sizeof(*devices_arr), "Success");
++ num_dev * sizeof(*devices_arr), "Success");
+ return err;
+
+ bind_process_to_device_failed:
+@@ -1560,7 +1564,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ sync_memory_failed:
+ kfree(devices_arr);
+ trace_kfd_map_memory_to_gpu_end(p,
+- args->n_devices * sizeof(*devices_arr), "Failed");
++ num_dev * sizeof(*devices_arr), "Failed");
+
+ return err;
+ }
+@@ -1573,29 +1577,30 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ void *mem;
+ struct kfd_dev *dev, *peer;
+ long err = 0;
+- uint32_t *devices_arr = NULL, i;
++ uint32_t *devices_arr = NULL, num_dev, i;
+
+ dev = kfd_device_by_id(GET_GPU_ID(args->handle));
+ if (!dev)
+ return -EINVAL;
+
+- if (!args->n_devices) {
+- pr_debug("Device IDs array empty\n");
++ if (args->device_ids_array_size == 0) {
++ pr_debug("Device ID array size is 0\n");
+ return -EINVAL;
+ }
+- if (args->n_success > args->n_devices) {
+- pr_debug("n_success exceeds n_devices\n");
++
++ if (args->device_ids_array_size % sizeof(uint32_t)) {
++ pr_debug("Node IDs array size %u\n",
++ args->device_ids_array_size);
+ return -EINVAL;
+ }
+
+- devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
+- GFP_KERNEL);
++ devices_arr = kmalloc(args->device_ids_array_size, GFP_KERNEL);
+ if (!devices_arr)
+ return -ENOMEM;
+
+ err = copy_from_user(devices_arr,
+- (void __user *)args->device_ids_array_ptr,
+- args->n_devices * sizeof(*devices_arr));
++ (void __user *)args->device_ids_array_ptr,
++ args->device_ids_array_size);
+ if (err != 0) {
+ err = -EFAULT;
+ goto copy_from_user_failed;
+@@ -1605,7 +1610,8 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+
+ pdd = kfd_get_process_device_data(dev, p);
+ if (!pdd) {
+- err = -EINVAL;
++ pr_debug("Process device data doesn't exist\n");
++ err = -ENODEV;
+ goto bind_process_to_device_failed;
+ }
+
+@@ -1616,7 +1622,8 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ goto get_mem_obj_from_handle_failed;
+ }
+
+- for (i = args->n_success; i < args->n_devices; i++) {
++ num_dev = args->device_ids_array_size / sizeof(uint32_t);
++ for (i = 0 ; i < num_dev; i++) {
+ peer = kfd_device_by_id(devices_arr[i]);
+ if (!peer) {
+ err = -EINVAL;
+@@ -1632,10 +1639,9 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
+ if (err) {
+ pr_err("Failed to unmap from gpu %d/%d\n",
+- i, args->n_devices);
++ i, num_dev);
+ goto unmap_memory_from_gpu_failed;
+ }
+- args->n_success = i+1;
+ }
+ kfree(devices_arr);
+
+@@ -1652,6 +1658,34 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ return err;
+ }
+
++static int kfd_ioctl_set_process_dgpu_aperture(struct file *filep,
++ struct kfd_process *p, void *data)
++{
++ struct kfd_ioctl_set_process_dgpu_aperture_args *args = data;
++ struct kfd_dev *dev;
++ struct kfd_process_device *pdd;
++ long err;
++
++ dev = kfd_device_by_id(args->gpu_id);
++ if (!dev)
++ return -EINVAL;
++
++ mutex_lock(&p->mutex);
++
++ pdd = kfd_bind_process_to_device(dev, p);
++ if (IS_ERR(pdd)) {
++ err = PTR_ERR(pdd);
++ goto exit;
++ }
++
++ err = kfd_set_process_dgpu_aperture(pdd, args->dgpu_base,
++ args->dgpu_limit);
++
++exit:
++ mutex_unlock(&p->mutex);
++ return err;
++}
++
+ static int kfd_ioctl_get_dmabuf_info(struct file *filep,
+ struct kfd_process *p, void *data)
+ {
+@@ -1944,7 +1978,7 @@ static int kfd_create_cma_system_bo(struct kfd_dev *kdev, struct kfd_bo *bo,
+ uint64_t bo_size = 0;
+ struct dma_fence *f;
+
+- uint32_t flags = ALLOC_MEM_FLAGS_GTT | ALLOC_MEM_FLAGS_WRITABLE |
++ uint32_t flags = ALLOC_MEM_FLAGS_GTT | ALLOC_MEM_FLAGS_NONPAGED |
+ ALLOC_MEM_FLAGS_NO_SUBSTITUTE;
+
+ *cma_bo = NULL;
+@@ -2739,21 +2773,6 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL,
+ kfd_ioctl_dbg_wave_control, 0),
+
+- AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_SCRATCH_BACKING_VA,
+- kfd_ioctl_set_scratch_backing_va, 0),
+-
+- AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG,
+- kfd_ioctl_get_tile_config, 0),
+-
+- AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_TRAP_HANDLER,
+- kfd_ioctl_set_trap_handler, 0),
+-
+- AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES_NEW,
+- kfd_ioctl_get_process_apertures_new, 0),
+-
+- AMDKFD_IOCTL_DEF(AMDKFD_IOC_ACQUIRE_VM,
+- kfd_ioctl_acquire_vm, 0),
+-
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_MEMORY_OF_GPU,
+ kfd_ioctl_alloc_memory_of_gpu, 0),
+
+@@ -2766,15 +2785,30 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU,
+ kfd_ioctl_unmap_memory_from_gpu, 0),
+
++ AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_MEMORY_OF_SCRATCH,
++ kfd_ioctl_alloc_scratch_memory, 0),
++
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_CU_MASK,
+ kfd_ioctl_set_cu_mask, 0),
+
++ AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_PROCESS_DGPU_APERTURE,
++ kfd_ioctl_set_process_dgpu_aperture, 0),
++
++ AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_TRAP_HANDLER,
++ kfd_ioctl_set_trap_handler, 0),
++
++ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES_NEW,
++ kfd_ioctl_get_process_apertures_new, 0),
++
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_DMABUF_INFO,
+ kfd_ioctl_get_dmabuf_info, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
+ kfd_ioctl_import_dmabuf, 0),
+
++ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG,
++ kfd_ioctl_get_tile_config, 0),
++
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_IPC_IMPORT_HANDLE,
+ kfd_ioctl_ipc_import_handle, 0),
+
+@@ -2787,8 +2821,11 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE,
+ kfd_ioctl_get_queue_wave_state, 0),
+
+- AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_TRAP,
+- kfd_ioctl_dbg_set_debug_trap, 0),
++// AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_TRAP,
++// kfd_ioctl_dbg_set_debug_trap, 0),
++
++ AMDKFD_IOCTL_DEF(AMDKFD_IOC_ACQUIRE_VM,
++ kfd_ioctl_acquire_vm, 0)
+
+ };
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+index 8f123a2..ebe721b 100755
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+@@ -312,6 +312,16 @@
+ #define SVM_CWSR_BASE (SVM_USER_BASE - KFD_CWSR_TBA_TMA_SIZE)
+ #define SVM_IB_BASE (SVM_CWSR_BASE - PAGE_SIZE)
+
++int kfd_set_process_dgpu_aperture(struct kfd_process_device *pdd,
++ uint64_t base, uint64_t limit)
++{
++ if (base < SVM_USER_BASE) {
++ pr_err("Set dgpu vm base 0x%llx failed.\n", base);
++ return -EINVAL;
++ }
++ return 0;
++}
++
+ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
+ {
+ /*
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 1f0d558..34bef7e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -957,6 +957,8 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd);
+
+ /* amdkfd Apertures */
+ int kfd_init_apertures(struct kfd_process *process);
++int kfd_set_process_dgpu_aperture(struct kfd_process_device *pdd,
++ uint64_t base, uint64_t limit);
+
+ /* Queue Context Management */
+ int init_queue(struct queue **q, const struct queue_properties *properties);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 7b9e587..69815c3 100755
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -189,10 +189,8 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
+ static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
+ {
+ struct qcm_process_device *qpd = &pdd->qpd;
+- uint32_t flags = ALLOC_MEM_FLAGS_GTT |
+- ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
+- ALLOC_MEM_FLAGS_WRITABLE |
+- ALLOC_MEM_FLAGS_EXECUTABLE;
++ uint32_t flags = ALLOC_MEM_FLAGS_GTT | ALLOC_MEM_FLAGS_NONPAGED |
++ ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTE_ACCESS;
+ void *kaddr;
+ int ret;
+
+@@ -518,8 +516,9 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
+ {
+ struct kfd_dev *dev = pdd->dev;
+ struct qcm_process_device *qpd = &pdd->qpd;
+- uint32_t flags = ALLOC_MEM_FLAGS_GTT |
+- ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE;
++ uint32_t flags = ALLOC_MEM_FLAGS_GTT | ALLOC_MEM_FLAGS_NONPAGED |
++ ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_READONLY |
++ ALLOC_MEM_FLAGS_EXECUTE_ACCESS;
+ void *kaddr;
+ int ret;
+
+diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+index e7ba62c..49a02e9 100755
+--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+@@ -191,22 +191,22 @@ struct tile_config {
+ * Allocation flag domains
+ * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
+ */
+-#define ALLOC_MEM_FLAGS_VRAM (1 << 0)
+-#define ALLOC_MEM_FLAGS_GTT (1 << 1)
+-#define ALLOC_MEM_FLAGS_USERPTR (1 << 2)
+-#define ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
+-
++#define ALLOC_MEM_FLAGS_VRAM (1 << 0)
++#define ALLOC_MEM_FLAGS_GTT (1 << 1)
++#define ALLOC_MEM_FLAGS_USERPTR (1 << 2)
++#define ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
+
+ /*
+ * Allocation flags attributes/access options.
+ * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
+ */
+-#define ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
+-#define ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
+-#define ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
+-#define ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28) /* TODO */
++#define ALLOC_MEM_FLAGS_NONPAGED (1 << 31)
++#define ALLOC_MEM_FLAGS_READONLY (1 << 30)
++#define ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
++#define ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28)
+ #define ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
+-#define ALLOC_MEM_FLAGS_COHERENT (1 << 26) /* For GFXv9 or later */
++#define ALLOC_MEM_FLAGS_EXECUTE_ACCESS (1 << 26)
++#define ALLOC_MEM_FLAGS_COHERENT (1 << 25)
+
+ /**
+ * struct kfd2kgd_calls
+diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
+index 7bef0e4..80640e69 100755
+--- a/include/uapi/linux/kfd_ioctl.h
++++ b/include/uapi/linux/kfd_ioctl.h
+@@ -320,12 +320,6 @@ struct kfd_ioctl_wait_events_args {
+ uint32_t wait_result; /* from KFD */
+ };
+
+-struct kfd_ioctl_set_scratch_backing_va_args {
+- __u64 va_addr; /* to KFD */
+- __u32 gpu_id; /* to KFD */
+- __u32 pad;
+-};
+-
+ struct kfd_ioctl_alloc_memory_of_scratch_args {
+ uint64_t va_addr; /* to KFD */
+ uint64_t size; /* to KFD */
+@@ -366,17 +360,17 @@ struct kfd_ioctl_free_memory_of_gpu_args {
+ };
+
+ struct kfd_ioctl_map_memory_to_gpu_args {
+- uint64_t handle; /* to KFD */
+- uint64_t device_ids_array_ptr; /* to KFD */
+- uint32_t n_devices; /* to KFD */
+- uint32_t n_success; /* to/from KFD */
++ uint64_t handle; /* to KFD */
++ uint64_t device_ids_array_ptr; /* to KFD */
++ uint32_t device_ids_array_size; /* to KFD */
++ uint32_t pad;
+ };
+
+ struct kfd_ioctl_unmap_memory_from_gpu_args {
+- uint64_t handle; /* to KFD */
+- uint64_t device_ids_array_ptr; /* to KFD */
+- uint32_t n_devices; /* to KFD */
+- uint32_t n_success; /* to/from KFD */
++ uint64_t handle; /* to KFD */
++ uint64_t device_ids_array_ptr; /* to KFD */
++ uint32_t device_ids_array_size; /* to KFD */
++ uint32_t pad;
+ };
+
+ struct kfd_ioctl_set_process_dgpu_aperture_args {
+@@ -535,57 +529,57 @@ struct kfd_ioctl_get_tile_config_args {
+ #define AMDKFD_IOC_DBG_WAVE_CONTROL \
+ AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args)
+
+-#define AMDKFD_IOC_SET_SCRATCH_BACKING_VA \
+- AMDKFD_IOWR(0x11, struct kfd_ioctl_set_scratch_backing_va_args)
+-
+-#define AMDKFD_IOC_GET_TILE_CONFIG \
+- AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args)
+-
+-#define AMDKFD_IOC_SET_TRAP_HANDLER \
+- AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args)
+-
+-#define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW \
+- AMDKFD_IOWR(0x14, \
+- struct kfd_ioctl_get_process_apertures_new_args)
+-
+-#define AMDKFD_IOC_ACQUIRE_VM \
+- AMDKFD_IOW(0x15, struct kfd_ioctl_acquire_vm_args)
+-
+ #define AMDKFD_IOC_ALLOC_MEMORY_OF_GPU \
+- AMDKFD_IOWR(0x16, struct kfd_ioctl_alloc_memory_of_gpu_args)
++ AMDKFD_IOWR(0x11, struct kfd_ioctl_alloc_memory_of_gpu_args)
+
+ #define AMDKFD_IOC_FREE_MEMORY_OF_GPU \
+- AMDKFD_IOW(0x17, struct kfd_ioctl_free_memory_of_gpu_args)
++ AMDKFD_IOW(0x12, struct kfd_ioctl_free_memory_of_gpu_args)
+
+ #define AMDKFD_IOC_MAP_MEMORY_TO_GPU \
+- AMDKFD_IOWR(0x18, struct kfd_ioctl_map_memory_to_gpu_args)
++ AMDKFD_IOWR(0x13, struct kfd_ioctl_map_memory_to_gpu_args)
+
+ #define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU \
+- AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args)
++ AMDKFD_IOWR(0x14, struct kfd_ioctl_unmap_memory_from_gpu_args)
++
++#define AMDKFD_IOC_ALLOC_MEMORY_OF_SCRATCH \
++ AMDKFD_IOWR(0x15, struct kfd_ioctl_alloc_memory_of_scratch_args)
+
+ #define AMDKFD_IOC_SET_CU_MASK \
+- AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args)
++ AMDKFD_IOW(0x16, struct kfd_ioctl_set_cu_mask_args)
++
++#define AMDKFD_IOC_SET_PROCESS_DGPU_APERTURE \
++ AMDKFD_IOW(0x17, \
++ struct kfd_ioctl_set_process_dgpu_aperture_args)
+
+-#define AMDKFD_IOC_GET_QUEUE_WAVE_STATE \
+- AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args)
++#define AMDKFD_IOC_SET_TRAP_HANDLER \
++ AMDKFD_IOW(0x18, struct kfd_ioctl_set_trap_handler_args)
++
++#define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW \
++ AMDKFD_IOWR(0x19, struct kfd_ioctl_get_process_apertures_new_args)
+
+ #define AMDKFD_IOC_GET_DMABUF_INFO \
+- AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args)
++ AMDKFD_IOWR(0x1A, struct kfd_ioctl_get_dmabuf_info_args)
+
+ #define AMDKFD_IOC_IMPORT_DMABUF \
+- AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
++ AMDKFD_IOWR(0x1B, struct kfd_ioctl_import_dmabuf_args)
++
++#define AMDKFD_IOC_GET_TILE_CONFIG \
++ AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_tile_config_args)
+
+ #define AMDKFD_IOC_IPC_IMPORT_HANDLE \
+- AMDKFD_IOWR(0x1E, struct kfd_ioctl_ipc_import_handle_args)
++ AMDKFD_IOWR(0x1D, struct kfd_ioctl_ipc_import_handle_args)
+
+ #define AMDKFD_IOC_IPC_EXPORT_HANDLE \
+- AMDKFD_IOWR(0x1F, struct kfd_ioctl_ipc_export_handle_args)
++ AMDKFD_IOWR(0x1E, struct kfd_ioctl_ipc_export_handle_args)
+
+ #define AMDKFD_IOC_CROSS_MEMORY_COPY \
+- AMDKFD_IOWR(0x20, struct kfd_ioctl_cross_memory_copy_args)
++ AMDKFD_IOWR(0x1F, struct kfd_ioctl_cross_memory_copy_args)
++
++#define AMDKFD_IOC_GET_QUEUE_WAVE_STATE \
++ AMDKFD_IOWR(0x20, struct kfd_ioctl_get_queue_wave_state_args)
+
+-#define AMDKFD_IOC_DBG_TRAP \
+- AMDKFD_IOW(0x21, struct kfd_ioctl_dbg_trap_args)
++#define AMDKFD_IOC_ACQUIRE_VM \
++ AMDKFD_IOW(0x21, struct kfd_ioctl_acquire_vm_args)
+
+ #define AMDKFD_COMMAND_START 0x01
+ #define AMDKFD_COMMAND_END 0x22
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5718-SWDEV-168581-dc-fix-sporadic-multiple-aux-transactio.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5718-SWDEV-168581-dc-fix-sporadic-multiple-aux-transactio.patch
new file mode 100644
index 00000000..3ac61cf6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5718-SWDEV-168581-dc-fix-sporadic-multiple-aux-transactio.patch
@@ -0,0 +1,183 @@
+From 3e75670f11974edd5bab83b6978f98e1235f8f67 Mon Sep 17 00:00:00 2001
+From: Yogesh Mohan Marimuthu <yogesh.mohanmarimuthu@amd.com>
+Date: Thu, 1 Nov 2018 23:58:30 +0530
+Subject: [PATCH 5718/5725] SWDEV-168581 - dc: fix sporadic multiple aux
+ transaction failure
+
+[why]
+When there are multiple aux transaction in parallel, it is sometime
+sporadically the aux transaction starts to continuously fail. The
+aux transaction was failing because the busy bit for the given gpio
+pin was always set. The busy bit was alway set because the
+programming sequence to read, modify and write busy bit was not
+atomic. Due to which when multiple threads are trying to modify the
+busy bits for their gpio pins in the same integer variable sometimes
+the busy bits integer variable is written with old data causing
+failure.
+
+[how]
+Instead of using individual bits to track gpio pins and grouping
+them to integers, one byte will be allcoated for each gpio pin.
+Now whenever a gpio pin needs to be set to mark being used, only
+writing a value of one to that byte is sufficient, other bytes
+are not impacted. Also no need to have atomicity with bytes unlike
+with bits.
+
+Change-Id: I0dfe561dcb1349e0f98ba1afc899f99120bbd5b7
+Signed-off-by: Yogesh Mohan Marimuthu <yogesh.mohanmarimuthu@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c | 61 ++++++----------------
+ drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h | 7 ++-
+ 2 files changed, 19 insertions(+), 49 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+index f06d05a..c9fcebc 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
++++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+@@ -56,7 +56,6 @@ struct gpio_service *dal_gpio_service_create(
+ struct dc_context *ctx)
+ {
+ struct gpio_service *service;
+-
+ uint32_t index_of_id;
+
+ service = kzalloc(sizeof(struct gpio_service), GFP_KERNEL);
+@@ -78,43 +77,32 @@ struct gpio_service *dal_gpio_service_create(
+ goto failure_1;
+ }
+
+- /* allocate and initialize business storage */
++ /* allocate and initialize busyness storage */
+ {
+- const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
+-
+ index_of_id = 0;
+ service->ctx = ctx;
+
+ do {
+ uint32_t number_of_bits =
+ service->factory.number_of_pins[index_of_id];
++ uint32_t i = 0;
+
+- uint32_t number_of_uints =
+- (number_of_bits + bits_per_uint - 1) /
+- bits_per_uint;
+-
+- uint32_t *slot;
+-
+- if (number_of_bits) {
+- uint32_t index_of_uint = 0;
++ if (number_of_bits) {
++ service->busyness[index_of_id] =
++ kzalloc(number_of_bits * sizeof(char),
++ GFP_KERNEL);
+
+- slot = kzalloc(number_of_uints * sizeof(uint32_t),
+- GFP_KERNEL);
+-
+- if (!slot) {
++ if (!service->busyness[index_of_id]) {
+ BREAK_TO_DEBUGGER();
+ goto failure_2;
+ }
+
+ do {
+- slot[index_of_uint] = 0;
+-
+- ++index_of_uint;
+- } while (index_of_uint < number_of_uints);
++ service->busyness[index_of_id][i] = 0;
++ ++i;
++ } while (i < number_of_bits);
+ } else
+- slot = NULL;
+-
+- service->busyness[index_of_id] = slot;
++ service->busyness[index_of_id] = NULL;
+
+ ++index_of_id;
+ } while (index_of_id < GPIO_ID_COUNT);
+@@ -124,13 +112,8 @@ struct gpio_service *dal_gpio_service_create(
+
+ failure_2:
+ while (index_of_id) {
+- uint32_t *slot;
+-
+ --index_of_id;
+-
+- slot = service->busyness[index_of_id];
+-
+- kfree(slot);
++ kfree(service->busyness[index_of_id]);
+ }
+
+ failure_1:
+@@ -168,9 +151,7 @@ void dal_gpio_service_destroy(
+ uint32_t index_of_id = 0;
+
+ do {
+- uint32_t *slot = (*ptr)->busyness[index_of_id];
+-
+- kfree(slot);
++ kfree((*ptr)->busyness[index_of_id]);
+
+ ++index_of_id;
+ } while (index_of_id < GPIO_ID_COUNT);
+@@ -191,11 +172,7 @@ static bool is_pin_busy(
+ enum gpio_id id,
+ uint32_t en)
+ {
+- const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
+-
+- const uint32_t *slot = service->busyness[id] + (en / bits_per_uint);
+-
+- return 0 != (*slot & (1 << (en % bits_per_uint)));
++ return service->busyness[id][en];
+ }
+
+ static void set_pin_busy(
+@@ -203,10 +180,7 @@ static void set_pin_busy(
+ enum gpio_id id,
+ uint32_t en)
+ {
+- const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
+-
+- service->busyness[id][en / bits_per_uint] |=
+- (1 << (en % bits_per_uint));
++ service->busyness[id][en] = true;
+ }
+
+ static void set_pin_free(
+@@ -214,10 +188,7 @@ static void set_pin_free(
+ enum gpio_id id,
+ uint32_t en)
+ {
+- const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
+-
+- service->busyness[id][en / bits_per_uint] &=
+- ~(1 << (en % bits_per_uint));
++ service->busyness[id][en] = false;
+ }
+
+ enum gpio_result dal_gpio_service_open(
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
+index c7f3081..1d501a4 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
++++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
+@@ -36,10 +36,9 @@ struct gpio_service {
+ /*
+ * @brief
+ * Business storage.
+- * For each member of 'enum gpio_id',
+- * store array of bits (packed into uint32_t slots),
+- * index individual bit by 'en' value */
+- uint32_t *busyness[GPIO_ID_COUNT];
++ * one byte For each member of 'enum gpio_id'
++ */
++ char *busyness[GPIO_ID_COUNT];
+ };
+
+ enum gpio_result dal_gpio_service_open(
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5719-drm-amdkfd-Rebsed-some-changes-in-kfd.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5719-drm-amdkfd-Rebsed-some-changes-in-kfd.patch
new file mode 100644
index 00000000..c50ef702
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5719-drm-amdkfd-Rebsed-some-changes-in-kfd.patch
@@ -0,0 +1,333 @@
+From b565f8afba65401dfbef063eedebf8805fddc583 Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Mon, 7 Jan 2019 17:38:51 +0530
+Subject: [PATCH 5719/5725] drm/amdkfd: Rebsed some changes in kfd
+
+Signed-off-by: Ravi Kumar <ravi1.kumar@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 56 +++++++---------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 4 --
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 29 +++++++----
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 26 ++++++++--
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 22 +--------
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 1 +
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c | 0
+ 7 files changed, 64 insertions(+), 74 deletions(-)
+ mode change 100644 => 100755 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+ mode change 100644 => 100755 drivers/gpu/drm/amd/amdkfd/kfd_device.c
+ mode change 100644 => 100755 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+ mode change 100644 => 100755 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index a311a9f..446b013 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -172,14 +172,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
+ &gpu_resources.doorbell_aperture_size,
+ &gpu_resources.doorbell_start_offset);
+
+- if (adev->asic_type < CHIP_VEGA10) {
+- kgd2kfd->device_init(adev->kfd, &gpu_resources);
+- return;
+- }
+-
+- n = (adev->asic_type < CHIP_VEGA20) ? 2 : 8;
+-
+- for (i = 0; i < n; i += 2) {
++ if (adev->asic_type >= CHIP_VEGA10) {
+ /* On SOC15 the BIF is involved in routing
+ * doorbells using the low 12 bits of the
+ * address. Communicate the assignments to
+@@ -187,31 +180,20 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
+ * process in case of 64-bit doorbells so we
+ * can use each doorbell assignment twice.
+ */
+- if (adev->asic_type == CHIP_VEGA10) {
+- gpu_resources.sdma_doorbell[0][i] =
+- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + (i >> 1);
+- gpu_resources.sdma_doorbell[0][i+1] =
+- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1);
+- gpu_resources.sdma_doorbell[1][i] =
+- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + (i >> 1);
+- gpu_resources.sdma_doorbell[1][i+1] =
+- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1);
+- } else {
+- gpu_resources.sdma_doorbell[0][i] =
+- AMDGPU_DOORBELL64_sDMA_ENGINE0 + (i >> 1);
+- gpu_resources.sdma_doorbell[0][i+1] =
+- AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1);
+- gpu_resources.sdma_doorbell[1][i] =
+- AMDGPU_DOORBELL64_sDMA_ENGINE1 + (i >> 1);
+- gpu_resources.sdma_doorbell[1][i+1] =
+- AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1);
+- }
+- }
+- /* Doorbells 0x0e0-0ff and 0x2e0-2ff are reserved for
+- * SDMA, IH and VCN. So don't use them for the CP.
+- */
+- gpu_resources.reserved_doorbell_mask = 0x1e0;
+- gpu_resources.reserved_doorbell_val = 0x0e0;
++ gpu_resources.sdma_doorbell[0][0] =
++ AMDGPU_DOORBELL64_sDMA_ENGINE0;
++ gpu_resources.sdma_doorbell[0][1] =
++ AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200;
++ gpu_resources.sdma_doorbell[1][0] =
++ AMDGPU_DOORBELL64_sDMA_ENGINE1;
++ gpu_resources.sdma_doorbell[1][1] =
++ AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200;
++ /* Doorbells 0x0f0-0ff and 0x2f0-2ff are reserved for
++ * SDMA, IH and VCN. So don't use them for the CP.
++ */
++ gpu_resources.reserved_doorbell_mask = 0x1f0;
++ gpu_resources.reserved_doorbell_val = 0x0f0;
++ }
+
+ kgd2kfd->device_init(adev->kfd, &gpu_resources);
+ }
+@@ -567,11 +549,9 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
+ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+- if (adev->powerplay.pp_funcs &&
+- adev->powerplay.pp_funcs->switch_power_profile)
+- amdgpu_dpm_switch_power_profile(adev,
+- PP_SMC_POWER_PROFILE_COMPUTE,
+- !idle);
++ amdgpu_dpm_switch_power_profile(adev,
++ PP_SMC_POWER_PROFILE_COMPUTE,
++ !idle);
+ }
+
+ bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+old mode 100644
+new mode 100755
+index 727d26d..c46d499
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+@@ -769,10 +769,6 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+ uint32_t temp;
+ struct v9_mqd *m = get_mqd(mqd);
+
+-#if 0
+- unsigned long flags;
+- int retry;
+-#endif
+ if (adev->in_gpu_reset)
+ return -EIO;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 3b305b3..b9ee87a 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -433,6 +433,23 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
+ return 0;
+ }
+
++static int sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
++ struct dma_fence *f)
++{
++ int ret = amdgpu_sync_fence(adev, sync, f, false);
++
++ /* Sync objects can't handle multiple GPUs (contexts) updating
++ * sync->last_vm_update. Fortunately we don't need it for
++ * KFD's purposes, so we can just drop that fence.
++ */
++ if (sync->last_vm_update) {
++ dma_fence_put(sync->last_vm_update);
++ sync->last_vm_update = NULL;
++ }
++
++ return ret;
++}
++
+ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
+ {
+ struct amdgpu_bo *pd = vm->root.base.bo;
+@@ -443,7 +460,7 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
+ if (ret)
+ return ret;
+
+- return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
++ return sync_vm_fence(adev, sync, vm->last_update);
+ }
+
+ /* add_bo_to_vm - Add a BO to a VM
+@@ -847,7 +864,7 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
+ /* Add the eviction fence back */
+ amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
+
+- amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
++ sync_vm_fence(adev, sync, bo_va->last_pt_update);
+
+ return 0;
+ }
+@@ -872,7 +889,7 @@ static int update_gpuvm_pte(struct amdgpu_device *adev,
+ return ret;
+ }
+
+- return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
++ return sync_vm_fence(adev, sync, bo_va->last_pt_update);
+ }
+
+ static int map_bo_to_gpuvm(struct amdgpu_device *adev,
+@@ -1201,12 +1218,8 @@ void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
+ uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
+ {
+ struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+- struct amdgpu_bo *pd = avm->root.base.bo;
+- struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
+
+- if (adev->asic_type < CHIP_VEGA10)
+- return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
+- return avm->pd_phys_addr;
++ return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
+ }
+
+ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index d94727a..de5f930 100755
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -86,7 +86,6 @@ int kfd_chardev_init(void)
+
+ kfd_class->devnode = kfd_devnode;
+
+- kfd_class->devnode = kfd_devnode;
+
+ kfd_device = device_create(kfd_class, NULL,
+ MKDEV(kfd_char_dev_major, 0),
+@@ -459,6 +458,9 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
+
+ mutex_unlock(&p->mutex);
+
++ if (retval)
++ kfree(properties.cu_mask);
++
+ return retval;
+ }
+
+@@ -560,6 +562,11 @@ static int kfd_ioctl_dbg_register(struct file *filep,
+ if (!dev)
+ return -EINVAL;
+
++ if (dev->device_info->asic_family == CHIP_CARRIZO) {
++ pr_debug("kfd_ioctl_dbg_register not supported on CZ\n");
++ return -EINVAL;
++ }
++
+ mutex_lock(&p->mutex);
+ mutex_lock(kfd_get_dbgmgr_mutex());
+
+@@ -606,6 +613,11 @@ static int kfd_ioctl_dbg_unregister(struct file *filep,
+ if (!dev || !dev->dbgmgr)
+ return -EINVAL;
+
++ if (dev->device_info->asic_family == CHIP_CARRIZO) {
++ pr_debug("kfd_ioctl_dbg_unregister not supported on CZ\n");
++ return -EINVAL;
++ }
++
+ mutex_lock(kfd_get_dbgmgr_mutex());
+
+ status = kfd_dbgmgr_unregister(dev->dbgmgr, p);
+@@ -646,6 +658,11 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep,
+ if (!dev)
+ return -EINVAL;
+
++ if (dev->device_info->asic_family == CHIP_CARRIZO) {
++ pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n");
++ return -EINVAL;
++ }
++
+ cmd_from_user = (void __user *) args->content_ptr;
+
+ /* Validate arguments */
+@@ -749,6 +766,11 @@ static int kfd_ioctl_dbg_wave_control(struct file *filep,
+ if (!dev)
+ return -EINVAL;
+
++ if (dev->device_info->asic_family == CHIP_CARRIZO) {
++ pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n");
++ return -EINVAL;
++ }
++
+ /* input size must match the computed "compact" size */
+ if (args->buf_size_in_bytes != computed_buff_size) {
+ pr_debug("size mismatch, computed : actual %u : %u\n",
+@@ -1033,8 +1055,6 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
+ }
+ }
+
+-
+-
+ err = kfd_event_create(filp, p, args->event_type,
+ args->auto_reset != 0, args->node_id,
+ &args->event_id, &args->event_trigger_data,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+old mode 100644
+new mode 100755
+index f78269d..1fb5c65
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -402,10 +402,6 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ return NULL;
+ }
+
+- kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
+- if (!kfd)
+- return NULL;
+-
+ /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
+ * 32 and 64-bit requests are possible and must be
+ * supported.
+@@ -419,27 +415,11 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ pdev->vendor, pdev->device);
+ return NULL;
+ }
+-
++
+ kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
+ if (!kfd)
+ return NULL;
+
+- /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
+- * 32 and 64-bit requests are possible and must be
+- * supported.
+- */
+- ret = pci_enable_atomic_ops_to_root(pdev,
+- PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
+- PCI_EXP_DEVCAP2_ATOMIC_COMP64);
+- if (device_info->needs_pci_atomics && ret < 0) {
+- dev_info(kfd_device,
+- "skipped device %x:%x, PCI rejects atomics",
+- pdev->vendor, pdev->device);
+- kfree(kfd);
+- return NULL;
+- } else if (!ret)
+- kfd->pci_atomic_requested = true;
+-
+ kfd->kgd = kgd;
+ kfd->device_info = device_info;
+ kfd->pdev = pdev;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+old mode 100644
+new mode 100755
+index cc152e7a..0765048
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+@@ -33,6 +33,7 @@
+
+ #define KFD_UNMAP_LATENCY_MS (4000)
+ #define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS (2 * KFD_UNMAP_LATENCY_MS + 1000)
++#define KFD_SDMA_QUEUES_PER_ENGINE (2)
+
+ struct device_process_node {
+ struct qcm_process_device *qpd;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+old mode 100644
+new mode 100755
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5720-drm-amdgpu-Clean-up-KFD-init-and-fini.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5720-drm-amdgpu-Clean-up-KFD-init-and-fini.patch
new file mode 100644
index 00000000..5cc86b48
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5720-drm-amdgpu-Clean-up-KFD-init-and-fini.patch
@@ -0,0 +1,76 @@
+From fd5cb8c9b8c72a3fd6eb462bb173d4da80c0c8b8 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Fri, 31 Aug 2018 16:54:12 -0400
+Subject: [PATCH 5720/5725] drm/amdgpu: Clean up KFD init and fini
+
+Only initialize KFD once by moving amdgpu_amdkfd_init from
+amdgpu_pci_probe to amdgpu_init. This fixes kernel oopses and hangs
+when booting multi-GPU systems.
+
+Also removed some vestiges of KFD being its own module.
+
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 5 +----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 10 ++--------
+ 2 files changed, 3 insertions(+), 12 deletions(-)
+ mode change 100644 => 100755 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index 446b013..09070d0 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -31,7 +31,6 @@
+ #include <linux/module.h>
+
+ const struct kgd2kfd_calls *kgd2kfd;
+-bool (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**);
+
+ static unsigned int compute_vmid_bitmap = 0xFF00;
+
+@@ -53,10 +52,8 @@ int amdgpu_amdkfd_init(void)
+
+ void amdgpu_amdkfd_fini(void)
+ {
+- if (kgd2kfd) {
++ if (kgd2kfd)
+ kgd2kfd->exit();
+- symbol_put(kgd2kfd_init);
+- }
+ }
+
+ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+old mode 100644
+new mode 100755
+index bc8f35a..da922c6
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -959,14 +959,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
+ return -ENODEV;
+ }
+
+- /*
+- * Initialize amdkfd before starting radeon. If it was not loaded yet,
+- * defer radeon probing
+- */
+- ret = amdgpu_amdkfd_init();
+- if (ret == -EPROBE_DEFER)
+- return ret;
+-
+ /* Get rid of things like offb */
+ ret = amdgpu_kick_out_firmware_fb(pdev);
+ if (ret)
+@@ -1321,6 +1313,8 @@ static int __init amdgpu_init(void)
+ msleep(2000);
+ amdgpu_register_atpx_handler();
+ msleep(100);
++ /* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */
++ amdgpu_amdkfd_init();
+ /* let modprobe override vga console setting */
+ return pci_register_driver(pdriver);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5721-net-ethernet-xgbe-expand-PHY_GBIT_FEAUTRES.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5721-net-ethernet-xgbe-expand-PHY_GBIT_FEAUTRES.patch
new file mode 100644
index 00000000..ee1e177b
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5721-net-ethernet-xgbe-expand-PHY_GBIT_FEAUTRES.patch
@@ -0,0 +1,104 @@
+From b63082b3f8728f862094a2e1146e2ee4679823ed Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Sun, 11 Nov 2018 23:32:49 +0530
+Subject: [PATCH 5721/5725] net: ethernet: xgbe: expand PHY_GBIT_FEAUTRES
+
+From d0939c26c53a2b2cecfbe6953858a58abb0158c7
+The macro PHY_GBIT_FEAUTRES needs to change into a bitmap in order to
+support link_modes. Remove its use from xgde by replacing it with its
+definition.
+
+Probably, the current behavior is wrong. It probably should be
+ANDing not assigning.
+
+Signed-off-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 14 ++++++++------
+ drivers/net/phy/phy_device.c | 14 ++++++++++++++
+ include/linux/phy.h | 1 +
+ 3 files changed, 23 insertions(+), 6 deletions(-)
+ mode change 100644 => 100755 drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+ mode change 100644 => 100755 drivers/net/phy/phy_device.c
+ mode change 100644 => 100755 include/linux/phy.h
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+old mode 100644
+new mode 100755
+index 3ceb4f9..194ec27
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -878,9 +878,10 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
+ phy_write(phy_data->phydev, 0x04, 0x0d01);
+ phy_write(phy_data->phydev, 0x00, 0x9140);
+
+- phy_data->phydev->supported = PHY_GBIT_FEATURES;
+- phy_data->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+- phy_data->phydev->advertising = phy_data->phydev->supported;
++ phy_data->phydev->supported = PHY_10BT_FEATURES |
++ PHY_100BT_FEATURES |
++ PHY_1000BT_FEATURES;
++ phy_support_asym_pause(phy_data->phydev);
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "Finisar PHY quirk in place\n");
+@@ -950,9 +951,10 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
+ reg = phy_read(phy_data->phydev, 0x00);
+ phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
+
+- phy_data->phydev->supported = PHY_GBIT_FEATURES;
+- phy_data->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+- phy_data->phydev->advertising = phy_data->phydev->supported;
++ phy_data->phydev->supported = (PHY_10BT_FEATURES |
++ PHY_100BT_FEATURES |
++ PHY_1000BT_FEATURES);
++ phy_support_asym_pause(phy_data->phydev);
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "BelFuse PHY quirk in place\n");
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+old mode 100644
+new mode 100755
+index fe76e2c..f16af99
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -1736,6 +1736,20 @@ int phy_set_max_speed(struct phy_device *phydev, u32 max_speed)
+ }
+ EXPORT_SYMBOL(phy_set_max_speed);
+
++/**
++ * phy_support_asym_pause - Enable support of asym pause
++ * @phydev: target phy_device struct
++ *
++ * Description: Called by the MAC to indicate is supports Asym Pause.
++ */
++void phy_support_asym_pause(struct phy_device *phydev)
++{
++ phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++ phydev->advertising = phydev->supported;
++}
++EXPORT_SYMBOL(phy_support_asym_pause);
++
++
+ static void of_set_phy_supported(struct phy_device *phydev)
+ {
+ struct device_node *node = phydev->mdio.dev.of_node;
+diff --git a/include/linux/phy.h b/include/linux/phy.h
+old mode 100644
+new mode 100755
+index efc04c2..38d36a6
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -913,6 +913,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
+ int phy_start_interrupts(struct phy_device *phydev);
+ void phy_print_status(struct phy_device *phydev);
+ int phy_set_max_speed(struct phy_device *phydev, u32 max_speed);
++void phy_support_asym_pause(struct phy_device *phydev);
+
+ int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
+ int (*run)(struct phy_device *));
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5722-Code-cleanup.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5722-Code-cleanup.patch
new file mode 100644
index 00000000..08c66b6e
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5722-Code-cleanup.patch
@@ -0,0 +1,28 @@
+From 36d62982266efb5a7168bbd12ca45c2f6a383bea Mon Sep 17 00:00:00 2001
+From: Ravi Kumar <ravi1.kumar@amd.com>
+Date: Mon, 3 Dec 2018 12:09:46 +0530
+Subject: [PATCH 5722/5725] Code cleanup.
+
+Cleaned up some unwanted debug prints.
+
+Signed-off-by: Ravi Kumar <ravi1.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index a0590d8..ef0fc42 100755
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1371,8 +1371,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
+
+- printk(">>Ravi: fw_name: %s adev->dev: %d adev->firmware.gpuinfo_fw: %d\n", fw_name, adev->dev, amdgpu_device_parse_gpu_info_fw);
+-
+ err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
+ if (err) {
+ dev_err(adev->dev,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5723-drm-amdgpu-vcn-Fixed-S3-hung-issue.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5723-drm-amdgpu-vcn-Fixed-S3-hung-issue.patch
new file mode 100644
index 00000000..4aa7efc9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5723-drm-amdgpu-vcn-Fixed-S3-hung-issue.patch
@@ -0,0 +1,35 @@
+From b1c81e03a897481d30aa7e100555523e42a38e28 Mon Sep 17 00:00:00 2001
+From: Indrajit Das <indrajit-kumar.das@amd.com>
+Date: Tue, 4 Dec 2018 13:42:13 +0530
+Subject: [PATCH 5723/5725] drm/amdgpu/vcn:Fixed S3 hung issue
+
+Provided by James Zhu
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 0d80b44..3f17116 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -48,6 +48,8 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
+ static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
+ static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
+ static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
++static int vcn_v1_0_set_powergating_state(void *handle,
++ enum amd_powergating_state state);
+
+ /**
+ * vcn_v1_0_early_init - set function pointers
+@@ -222,7 +224,7 @@ static int vcn_v1_0_hw_fini(void *handle)
+ struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+
+ if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
+- vcn_v1_0_stop(adev);
++ vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+
+ ring->ready = false;
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5724-drm-amdgpu-change-VEGA-booting-with-firmware-loaded-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5724-drm-amdgpu-change-VEGA-booting-with-firmware-loaded-.patch
new file mode 100644
index 00000000..a328151d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5724-drm-amdgpu-change-VEGA-booting-with-firmware-loaded-.patch
@@ -0,0 +1,54 @@
+From 503118b5a7aa634b92da29551641a422771aa8a9 Mon Sep 17 00:00:00 2001
+From: Feifei Xu <Feifei.Xu@amd.com>
+Date: Tue, 14 Aug 2018 14:53:53 -0400
+Subject: [PATCH 5724/5725] drm/amdgpu:change VEGA booting with firmware loaded
+ by PSP
+
+With PSP firmware loading, TMR mc address is supposed to be used.
+
+Signed-off-by: James Zhu <James.Zhu@amd.com>
+Acked-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+index 07cb92e..f7fa129 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+@@ -662,9 +662,14 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
+ continue;
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+- lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
++ i == 0 ?
++ adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo:
++ adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_lo);
+ WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+- upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
++ i == 0 ?
++ adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi:
++ adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_hi);
++ WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
+ offset = 0;
+ } else {
+ WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+@@ -672,10 +677,10 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
+ WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ upper_32_bits(adev->uvd.inst[i].gpu_addr));
+ offset = size;
++ WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
++ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ }
+
+- WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
+- AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
+
+ WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5725-Fix-compilation-error-for-kfd.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5725-Fix-compilation-error-for-kfd.patch
new file mode 100644
index 00000000..c6025c0d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5725-Fix-compilation-error-for-kfd.patch
@@ -0,0 +1,45 @@
+From b0cf79f58e8bc41d475f23edca5f9c70ccb13c59 Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Mon, 7 Jan 2019 15:29:04 +0530
+Subject: [PATCH] Fix compilation error for kfd
+
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/Makefile | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
+index df92ebb..1828087 100644
+--- a/drivers/gpu/drm/amd/amdkfd/Makefile
++++ b/drivers/gpu/drm/amd/amdkfd/Makefile
+@@ -57,7 +57,7 @@ AMDKFD_FILES := $(AMDKFD_PATH)/kfd_module.o \
+ $(AMDKFD_PATH)/kfd_crat.o \
+ $(AMDKFD_PATH)/kfd_ipc.o \
+ $(AMDKFD_PATH)/kfd_trace.o \
+- $(AMDKFD_PATH)//kfd_peerdirect.o
++ $(AMDKFD_PATH)/kfd_peerdirect.o
+
+ ifneq ($(CONFIG_AMD_IOMMU_V2),)
+ AMDKFD_FILES += $(AMDKFD_PATH)/kfd_iommu.o
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 69815c3..4ea0fbd 100755
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -732,11 +732,11 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
+
+ if (drm_file)
+ ret = dev->kfd2kgd->acquire_process_vm(
+- dev->kgd, drm_file,
++ dev->kgd, drm_file,p->pasid,
+ &pdd->vm, &p->kgd_process_info, &p->ef);
+ else
+ ret = dev->kfd2kgd->create_process_vm(
+- dev->kgd, &pdd->vm, &p->kgd_process_info, &p->ef);
++ dev->kgd, p->pasid, &pdd->vm, &p->kgd_process_info, &p->ef);
+ if (ret) {
+ pr_err("Failed to create process VM object\n");
+ return ret;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5726-amd-i2s-fix-to-the-fage-fault-when-iommu-is-enabled.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5726-amd-i2s-fix-to-the-fage-fault-when-iommu-is-enabled.patch
new file mode 100644
index 00000000..28dd5c17
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5726-amd-i2s-fix-to-the-fage-fault-when-iommu-is-enabled.patch
@@ -0,0 +1,156 @@
+From 4f19dca18de5c5d3dc374c946f5d42c3e8f3d396 Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Tue, 12 Feb 2019 19:00:21 +0530
+Subject: [PATCH 5726/5758] amd-i2s fix to the fage fault when iommu is enabled
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ sound/soc/amd/raven/acp3x-pcm-dma.c | 27 +++++++++++++++------------
+ sound/soc/soc-core.c | 13 +++++++++++--
+ 2 files changed, 26 insertions(+), 14 deletions(-)
+ mode change 100644 => 100755 sound/soc/amd/raven/acp3x-pcm-dma.c
+ mode change 100644 => 100755 sound/soc/soc-core.c
+
+diff --git a/sound/soc/amd/raven/acp3x-pcm-dma.c b/sound/soc/amd/raven/acp3x-pcm-dma.c
+old mode 100644
+new mode 100755
+index 7ed9d0e..3abdf1f
+--- a/sound/soc/amd/raven/acp3x-pcm-dma.c
++++ b/sound/soc/amd/raven/acp3x-pcm-dma.c
+@@ -22,9 +22,12 @@
+ #include <sound/pcm_params.h>
+ #include <sound/soc.h>
+ #include <sound/soc-dai.h>
++#include <linux/pci.h>
+ #include <linux/io.h>
+ #include "acp3x.h"
+
++#define DRV_NAME "acp3x-i2s-audio"
++
+ struct i2s_dev_data {
+ bool tdm_mode;
+ unsigned int i2s_irq;
+@@ -39,7 +42,7 @@ struct i2s_stream_instance {
+ u16 channels;
+ u32 xfer_resolution;
+ u32 val;
+- struct page *pg;
++ dma_addr_t dma_addr;
+ void __iomem *acp3x_base;
+ };
+
+@@ -223,10 +226,10 @@ static irqreturn_t i2s_irq_handler(int irq, void *dev_id)
+ static void config_acp3x_dma(struct i2s_stream_instance *rtd, int direction)
+ {
+ u16 page_idx;
+- u64 addr;
+ u32 low, high, val, acp_fifo_addr;
+- struct page *pg = rtd->pg;
++ dma_addr_t addr;
+
++ addr = rtd->dma_addr;
+ /* 8 scratch registers used to map one 64 bit address.
+ * For 2 pages (4096 * 2 bytes), it will be 16 registers.
+ */
+@@ -243,7 +246,6 @@ static void config_acp3x_dma(struct i2s_stream_instance *rtd, int direction)
+
+ for (page_idx = 0; page_idx < rtd->num_pages; page_idx++) {
+ /* Load the low address of page int ACP SRAM through SRBM */
+- addr = page_to_phys(pg);
+ low = lower_32_bits(addr);
+ high = upper_32_bits(addr);
+
+@@ -253,7 +255,7 @@ static void config_acp3x_dma(struct i2s_stream_instance *rtd, int direction)
+ + 4);
+ /* Move to next physically contiguos page */
+ val += 8;
+- pg++;
++ addr += PAGE_SIZE;
+ }
+
+ if (direction == SNDRV_PCM_STREAM_PLAYBACK) {
+@@ -339,7 +341,6 @@ static int acp3x_dma_hw_params(struct snd_pcm_substream *substream,
+ int status;
+ uint64_t size;
+ struct snd_dma_buffer *dma_buffer;
+- struct page *pg;
+ struct i2s_stream_instance *rtd = substream->runtime->private_data;
+
+ if (rtd == NULL)
+@@ -352,9 +353,8 @@ static int acp3x_dma_hw_params(struct snd_pcm_substream *substream,
+ return status;
+
+ memset(substream->runtime->dma_area, 0, params_buffer_bytes(params));
+- pg = virt_to_page(substream->dma_buffer.area);
+- if (pg != NULL) {
+- rtd->pg = pg;
++ if (substream->dma_buffer.area) {
++ rtd->dma_addr = substream->dma_buffer.addr;
+ rtd->num_pages = (PAGE_ALIGN(size) >> PAGE_SHIFT);
+ config_acp3x_dma(rtd, substream->stream);
+ status = 0;
+@@ -384,9 +384,12 @@ static snd_pcm_uframes_t acp3x_dma_pointer(struct snd_pcm_substream *substream)
+
+ static int acp3x_dma_new(struct snd_soc_pcm_runtime *rtd)
+ {
++ struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd,
++ DRV_NAME);
++ struct device *parent = component->dev->parent;
+ return snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
+ SNDRV_DMA_TYPE_DEV,
+- NULL, MIN_BUFFER,
++ parent, MIN_BUFFER,
+ MAX_BUFFER);
+ }
+
+@@ -611,7 +614,7 @@ static struct snd_soc_dai_driver acp3x_i2s_dai_driver = {
+ };
+
+ static const struct snd_soc_component_driver acp3x_i2s_component = {
+- .name = "acp3x_i2s",
++ .name = DRV_NAME,
+ };
+
+ static int acp3x_audio_probe(struct platform_device *pdev)
+@@ -802,4 +805,4 @@ module_platform_driver(acp3x_dma_driver);
+ MODULE_AUTHOR("Maruthi.Bayyavarapu@amd.com");
+ MODULE_DESCRIPTION("AMD ACP 3.x PCM Driver");
+ MODULE_LICENSE("GPL v2");
+-MODULE_ALIAS("platform:acp3x-i2s-audio");
++MODULE_ALIAS("platform:"DRV_NAME);
+diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
+old mode 100644
+new mode 100755
+index fee4b0e..719416c
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -590,14 +590,23 @@ struct snd_soc_component *snd_soc_rtdcom_lookup(struct snd_soc_pcm_runtime *rtd,
+ {
+ struct snd_soc_rtdcom_list *rtdcom;
+
++ if (!driver_name)
++ return NULL;
++
+ for_each_rtdcom(rtd, rtdcom) {
+- if ((rtdcom->component->driver->name == driver_name) ||
+- strcmp(rtdcom->component->driver->name, driver_name) == 0)
++ const char *component_name = rtdcom->component->driver->name;
++
++ if (!component_name)
++ continue;
++
++ if ((component_name == driver_name) ||
++ strcmp(component_name, driver_name) == 0)
+ return rtdcom->component;
+ }
+
+ return NULL;
+ }
++EXPORT_SYMBOL_GPL(snd_soc_rtdcom_lookup);
+
+ struct snd_pcm_substream *snd_soc_get_dai_substream(struct snd_soc_card *card,
+ const char *dai_link, int stream)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5727-amd-i2s-dma-pointer-uses-Link-position-counter.This-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5727-amd-i2s-dma-pointer-uses-Link-position-counter.This-.patch
new file mode 100644
index 00000000..3ce63541
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5727-amd-i2s-dma-pointer-uses-Link-position-counter.This-.patch
@@ -0,0 +1,117 @@
+From e0c036d4aad50e29f3a2217324e4faf729f59839 Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Tue, 8 Jan 2019 12:05:52 +0530
+Subject: [PATCH 5727/5758] amd-i2s dma pointer uses Link position counter.This
+ has been changed to Linear position counter and this rectifies underruns.
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ sound/soc/amd/raven/acp3x-pcm-dma.c | 64 ++++++++++++++++++++++++++++++-------
+ 1 file changed, 53 insertions(+), 11 deletions(-)
+
+diff --git a/sound/soc/amd/raven/acp3x-pcm-dma.c b/sound/soc/amd/raven/acp3x-pcm-dma.c
+index 3abdf1f..3bb4ad3 100755
+--- a/sound/soc/amd/raven/acp3x-pcm-dma.c
++++ b/sound/soc/amd/raven/acp3x-pcm-dma.c
+@@ -37,12 +37,24 @@ struct i2s_dev_data {
+ struct snd_pcm_substream *capture_stream;
+ };
+
++union acp3x_dma_count {
++ struct {
++ u32 low;
++ u32 high;
++ } bcount;
++ u64 bytescount;
++};
++
++
+ struct i2s_stream_instance {
+ u16 num_pages;
+ u16 channels;
+ u32 xfer_resolution;
+ u32 val;
+ dma_addr_t dma_addr;
++ u32 byte_cnt_high_reg_offset;
++ u32 byte_cnt_low_reg_offset;
++ u64 bytescount;
+ void __iomem *acp3x_base;
+ };
+
+@@ -335,6 +347,25 @@ static int acp3x_dma_open(struct snd_pcm_substream *substream)
+ return 0;
+ }
+
++static u64 acp_get_byte_count(struct i2s_stream_instance *rtd, int direction)
++{
++ union acp3x_dma_count byte_count;
++
++ if (direction == SNDRV_PCM_STREAM_PLAYBACK) {
++ byte_count.bcount.high = rv_readl(rtd->acp3x_base +
++ mmACP_BT_TX_LINEARPOSITIONCNTR_HIGH);
++ byte_count.bcount.low = rv_readl(rtd->acp3x_base +
++ mmACP_BT_TX_LINEARPOSITIONCNTR_LOW);
++ } else {
++ byte_count.bcount.high = rv_readl(rtd->acp3x_base +
++ mmACP_BT_RX_LINEARPOSITIONCNTR_HIGH);
++ byte_count.bcount.low = rv_readl(rtd->acp3x_base +
++ mmACP_BT_RX_LINEARPOSITIONCNTR_LOW);
++ }
++ return byte_count.bytescount;
++}
++
++
+ static int acp3x_dma_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+ {
+@@ -366,18 +397,28 @@ static int acp3x_dma_hw_params(struct snd_pcm_substream *substream,
+
+ static snd_pcm_uframes_t acp3x_dma_pointer(struct snd_pcm_substream *substream)
+ {
++ u64 bytescount = 0;
+ u32 pos = 0;
+- struct i2s_stream_instance *rtd = substream->runtime->private_data;
+-
+- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+- pos = rv_readl(rtd->acp3x_base +
+- mmACP_BT_TX_LINKPOSITIONCNTR);
+- else
+- pos = rv_readl(rtd->acp3x_base +
+- mmACP_BT_RX_LINKPOSITIONCNTR);
+-
+- if (pos >= MAX_BUFFER)
+- pos = 0;
++ u32 buffersize = 0;
++ struct i2s_stream_instance *rtd =
++ substream->runtime->private_data;
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
++ buffersize = frames_to_bytes(substream->runtime,
++ substream->runtime->buffer_size);
++ bytescount = acp_get_byte_count(rtd, substream->stream);
++ if (bytescount > rtd->bytescount)
++ bytescount -= rtd->bytescount;
++ pos = do_div(bytescount, buffersize);
++ } else {
++ buffersize = frames_to_bytes(substream->runtime,
++ substream->runtime->buffer_size);
++ bytescount = acp_get_byte_count(rtd, substream->stream);
++ if (bytescount > rtd->bytescount)
++ bytescount -= rtd->bytescount;
++ pos = do_div(bytescount, buffersize);
++ }
++ if (pos >= MAX_BUFFER)
++ pos %= buffersize;
+
+ return bytes_to_frames(substream->runtime, pos);
+ }
+@@ -545,6 +586,7 @@ static int acp3x_dai_i2s_trigger(struct snd_pcm_substream *substream,
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
++ rtd->bytescount = acp_get_byte_count(rtd, substream->stream);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ val = rv_readl(rtd->acp3x_base + mmACP_BTTDM_ITER);
+ val = val | BIT(0);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5728-mmc-core-Move-calls-to-prepare_hs400_tuning-closer-t.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5728-mmc-core-Move-calls-to-prepare_hs400_tuning-closer-t.patch
new file mode 100644
index 00000000..e4c306b0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5728-mmc-core-Move-calls-to-prepare_hs400_tuning-closer-t.patch
@@ -0,0 +1,50 @@
+From 5a52cf8e775283cd8bb279baa6039e7c168fa1da Mon Sep 17 00:00:00 2001
+From: Ulf Hansson <ulf.hansson@linaro.org>
+Date: Tue, 22 May 2018 16:26:26 +0200
+Subject: [PATCH 5728/5758] mmc: core: Move calls to ->prepare_hs400_tuning()
+ closer to mmc code
+
+Move the calls to ->prepare_hs400_tuning(), from mmc_retune() into
+mmc_hs400_to_hs200(), as it better belongs there, rather than being generic
+to all type of cards.
+
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Reviewed-by: Simon Horman <horms+renesas@verge.net.au>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/mmc/core/host.c | 3 ---
+ drivers/mmc/core/mmc.c | 4 ++++
+ 2 files changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
+index ad88deb..4651e9b 100644
+--- a/drivers/mmc/core/host.c
++++ b/drivers/mmc/core/host.c
+@@ -148,9 +148,6 @@ int mmc_retune(struct mmc_host *host)
+ goto out;
+
+ return_to_hs400 = true;
+-
+- if (host->ops->prepare_hs400_tuning)
+- host->ops->prepare_hs400_tuning(host, &host->ios);
+ }
+
+ err = mmc_execute_tuning(host->card);
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 29bba1e..c768d08 100755
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -1283,6 +1283,10 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
+
+ mmc_set_bus_speed(card);
+
++ /* Prepare tuning for HS400 mode. */
++ if (host->ops->prepare_hs400_tuning)
++ host->ops->prepare_hs400_tuning(host, &host->ios);
++
+ return 0;
+
+ out_err:
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5729-mmc-core-more-fine-grained-hooks-for-HS400-tuning.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5729-mmc-core-more-fine-grained-hooks-for-HS400-tuning.patch
new file mode 100644
index 00000000..4bf16a6d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5729-mmc-core-more-fine-grained-hooks-for-HS400-tuning.patch
@@ -0,0 +1,89 @@
+From d6e5823c602272567f80cffdcafbf715192d43fc Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Tue, 12 Feb 2019 19:07:08 +0530
+Subject: [PATCH 5729/5758] mmc: core: more fine-grained hooks for HS400 tuning
+
+This adds two new HS400 tuning operations:
+* hs400_downgrade
+* hs400_complete
+
+These supplement the existing HS400 operation:
+* prepare_hs400_tuning
+
+This is motivated by a requirement of Renesas SDHI for the following:
+1. Disabling SCC before selecting to HS if selection of HS400 has occurred.
+ This can be done in an implementation of prepare_hs400_tuning_downgrade
+2. Updating registers after switching to HS400
+ This can be done in an implementation of complete_hs400_tuning
+
+If hs400_downgrade or hs400_complete are not implemented then they are not
+called. Thus means there should be no affect for existing drivers as none
+implemt these ops.
+
+Signed-off-by: Simon Horman <horms+renesas@verge.net.au>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/mmc/core/mmc.c | 11 +++++++++++
+ include/linux/mmc/host.h | 7 +++++++
+ 2 files changed, 18 insertions(+)
+
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index c768d08..cd3604d 100755
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -1165,6 +1165,11 @@ static int mmc_select_hs400(struct mmc_card *card)
+ if (!host->ops->set_hs400_dll) {
+ /* Set host controller to HS timing */
+ mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
++
++ /* Prepare host to downgrade to HS timing */
++ if (host->ops->hs400_downgrade)
++ host->ops->hs400_downgrade(host);
++
+ /* Reduce frequency to HS frequency */
+ max_dtr = card->ext_csd.hs_max_dtr;
+ mmc_set_clock(host, max_dtr);
+@@ -1257,6 +1262,9 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
+
+ mmc_set_timing(host, MMC_TIMING_MMC_HS);
+
++ if (host->ops->hs400_downgrade)
++ host->ops->hs400_downgrade(host);
++
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
+@@ -1394,6 +1402,9 @@ static int mmc_select_hs400es(struct mmc_card *card)
+ if (err)
+ goto out_err;
+
++ if (host->ops->hs400_complete)
++ host->ops->hs400_complete(host);
++
+ return 0;
+
+ out_err:
+diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
+index b7d5611..ba4af38 100755
+--- a/include/linux/mmc/host.h
++++ b/include/linux/mmc/host.h
+@@ -145,6 +145,13 @@ struct mmc_host_ops {
+
+ /* Prepare HS400 target operating frequency depending host driver */
+ int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios);
++
++ /* Prepare for switching from HS400 to HS200 */
++ void (*hs400_downgrade)(struct mmc_host *host);
++
++ /* Complete selection of HS400 */
++ void (*hs400_complete)(struct mmc_host *host);
++
+ /* Prepare enhanced strobe depending host driver */
+ void (*hs400_enhanced_strobe)(struct mmc_host *host,
+ struct mmc_ios *ios);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5730-mmc-sdhci-Export-sdhci-tuning-function-symbol.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5730-mmc-sdhci-Export-sdhci-tuning-function-symbol.patch
new file mode 100644
index 00000000..9de66ac8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5730-mmc-sdhci-Export-sdhci-tuning-function-symbol.patch
@@ -0,0 +1,91 @@
+From 7b0182dd9999843427374c2a0ae7d009786d16d6 Mon Sep 17 00:00:00 2001
+From: "ernest.zhang" <ernest.zhang@bayhubtech.com>
+Date: Mon, 16 Jul 2018 14:26:53 +0800
+Subject: [PATCH 5730/5758] mmc: sdhci: Export sdhci tuning function symbol
+
+Export sdhci tuning function symbols which are used by other SD Host
+controller driver modules.
+
+Signed-off-by: ernest.zhang <ernest.zhang@bayhubtech.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/mmc/host/sdhci.c | 12 ++++++++----
+ drivers/mmc/host/sdhci.h | 5 +++++
+ 2 files changed, 13 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 8837d45..53f6b2a 100755
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -2041,7 +2041,7 @@ static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
+ return 0;
+ }
+
+-static void sdhci_start_tuning(struct sdhci_host *host)
++void sdhci_start_tuning(struct sdhci_host *host)
+ {
+ u16 ctrl;
+
+@@ -2064,14 +2064,16 @@ static void sdhci_start_tuning(struct sdhci_host *host)
+ sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
+ sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
+ }
++EXPORT_SYMBOL_GPL(sdhci_start_tuning);
+
+-static void sdhci_end_tuning(struct sdhci_host *host)
++void sdhci_end_tuning(struct sdhci_host *host)
+ {
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ }
++EXPORT_SYMBOL_GPL(sdhci_end_tuning);
+
+-static void sdhci_reset_tuning(struct sdhci_host *host)
++void sdhci_reset_tuning(struct sdhci_host *host)
+ {
+ u16 ctrl;
+
+@@ -2080,6 +2082,7 @@ static void sdhci_reset_tuning(struct sdhci_host *host)
+ ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+ }
++EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
+
+ static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
+ {
+@@ -2100,7 +2103,7 @@ static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
+ * interrupt setup is different to other commands and there is no timeout
+ * interrupt so special handling is needed.
+ */
+-static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
++void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
+ {
+ struct mmc_host *mmc = host->mmc;
+ struct mmc_command cmd = {};
+@@ -2150,6 +2153,7 @@ static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
+ msecs_to_jiffies(50));
+
+ }
++EXPORT_SYMBOL_GPL(sdhci_send_tuning);
+
+ static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
+ {
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index b5fd294..983b7df 100755
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -735,4 +735,9 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
+
+ void sdhci_dumpregs(struct sdhci_host *host);
+
++void sdhci_start_tuning(struct sdhci_host *host);
++void sdhci_end_tuning(struct sdhci_host *host);
++void sdhci_reset_tuning(struct sdhci_host *host);
++void sdhci_send_tuning(struct sdhci_host *host, u32 opcode);
++
+ #endif /* __SDHCI_HW_H */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5731-mmc-sdhci-Export-sdhci_request.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5731-mmc-sdhci-Export-sdhci_request.patch
new file mode 100644
index 00000000..a41cb1a2
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5731-mmc-sdhci-Export-sdhci_request.patch
@@ -0,0 +1,53 @@
+From 344895043e849dddc9ab272172c7b867227c32ba Mon Sep 17 00:00:00 2001
+From: Aapo Vienamo <avienamo@nvidia.com>
+Date: Mon, 20 Aug 2018 12:23:32 +0300
+Subject: [PATCH 5731/5758] mmc: sdhci: Export sdhci_request()
+
+Allow SDHCI drivers to hook code before and after sdhci_request() by
+making it externally visible.
+
+Signed-off-by: Aapo Vienamo <avienamo@nvidia.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/mmc/host/sdhci.c | 3 ++-
+ drivers/mmc/host/sdhci.h | 1 +
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 53f6b2a..677815e 100755
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1558,7 +1558,7 @@ EXPORT_SYMBOL_GPL(sdhci_set_power);
+ * *
+ \*****************************************************************************/
+
+-static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
++void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+ {
+ struct sdhci_host *host;
+ int present;
+@@ -1597,6 +1597,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
++EXPORT_SYMBOL_GPL(sdhci_request);
+
+ void sdhci_set_bus_width(struct sdhci_host *host, int width)
+ {
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 983b7df..ba5227c 100755
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -711,6 +711,7 @@ void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd);
+ void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd);
++void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq);
+ void sdhci_set_bus_width(struct sdhci_host *host, int width);
+ void sdhci_reset(struct sdhci_host *host, u8 mask);
+ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5732-mmc-sdhci-add-adma_table_cnt-member-to-struct-sdhci_.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5732-mmc-sdhci-add-adma_table_cnt-member-to-struct-sdhci_.patch
new file mode 100644
index 00000000..f2c83301
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5732-mmc-sdhci-add-adma_table_cnt-member-to-struct-sdhci_.patch
@@ -0,0 +1,77 @@
+From 3eeaf018781d1c0203d25400b12041c3238768d3 Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Wed, 16 Jan 2019 11:23:47 +0530
+Subject: [PATCH 5732/5758] mmc: sdhci: add adma_table_cnt member to struct
+ sdhci_host
+
+This patch adds adma_table_cnt member to struct sdhci_host to give more
+flexibility to drivers to control the ADMA table count.
+
+Default value of adma_table_cnt is set to (SDHCI_MAX_SEGS * 2 + 1).
+
+Signed-off-by: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/mmc/host/sdhci.c | 17 +++++++++--------
+ drivers/mmc/host/sdhci.h | 3 +++
+ 2 files changed, 12 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 677815e..f4b7c6e 100755
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -3237,6 +3237,13 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
+
+ host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
+
++ /*
++ * The DMA table descriptor count is calculated as the maximum
++ * number of segments times 2, to allow for an alignment
++ * descriptor for each segment, plus 1 for a nop end descriptor.
++ */
++ host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
++
+ return host;
+ }
+
+@@ -3488,18 +3495,12 @@ int sdhci_setup_host(struct sdhci_host *host)
+ dma_addr_t dma;
+ void *buf;
+
+- /*
+- * The DMA descriptor table size is calculated as the maximum
+- * number of segments times 2, to allow for an alignment
+- * descriptor for each segment, plus 1 for a nop end descriptor,
+- * all multipled by the descriptor size.
+- */
+ if (host->flags & SDHCI_USE_64_BIT_DMA) {
+- host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
++ host->adma_table_sz = host->adma_table_cnt *
+ SDHCI_ADMA2_64_DESC_SZ;
+ host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
+ } else {
+- host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
++ host->adma_table_sz = host->adma_table_cnt *
+ SDHCI_ADMA2_32_DESC_SZ;
+ host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
+ }
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index ba5227c..f72697d 100755
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -550,6 +550,9 @@ struct sdhci_host {
+ /* Host SDMA buffer boundary. */
+ u32 sdma_boundary;
+
++ /* Host ADMA table count */
++ u32 adma_table_cnt;
++
+ unsigned long private[0] ____cacheline_aligned;
+ };
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5733-mmc-sdhci-introduce-adma_write_desc-hook-to-struct-s.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5733-mmc-sdhci-introduce-adma_write_desc-hook-to-struct-s.patch
new file mode 100644
index 00000000..06720733
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5733-mmc-sdhci-introduce-adma_write_desc-hook-to-struct-s.patch
@@ -0,0 +1,128 @@
+From 8411fdf67af6dc346f3b47008b48e978a4832213 Mon Sep 17 00:00:00 2001
+From: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
+Date: Tue, 28 Aug 2018 17:47:23 +0800
+Subject: [PATCH 5733/5758] mmc: sdhci: introduce adma_write_desc() hook to
+ struct sdhci_ops
+
+Add this hook so that it can be overridden with driver specific
+implementations. We also let the original sdhci_adma_write_desc()
+accept &desc so that the function can set its new value. Then export
+the function so that it could be reused by driver's specific
+implementations.
+
+Signed-off-by: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/mmc/host/sdhci.c | 37 +++++++++++++++++++++++--------------
+ drivers/mmc/host/sdhci.h | 4 ++++
+ 2 files changed, 27 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index f4b7c6e..05cd6c0 100755
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -554,10 +554,10 @@ static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
+ local_irq_restore(*flags);
+ }
+
+-static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
+- dma_addr_t addr, int len, unsigned cmd)
++void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
++ dma_addr_t addr, int len, unsigned int cmd)
+ {
+- struct sdhci_adma2_64_desc *dma_desc = desc;
++ struct sdhci_adma2_64_desc *dma_desc = *desc;
+
+ /* 32-bit and 64-bit descriptors have these members in same position */
+ dma_desc->cmd = cpu_to_le16(cmd);
+@@ -566,6 +566,19 @@ static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
+
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
+ dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
++
++ *desc += host->desc_sz;
++}
++EXPORT_SYMBOL_GPL(sdhci_adma_write_desc);
++
++static inline void __sdhci_adma_write_desc(struct sdhci_host *host,
++ void **desc, dma_addr_t addr,
++ int len, unsigned int cmd)
++{
++ if (host->ops->adma_write_desc)
++ host->ops->adma_write_desc(host, desc, addr, len, cmd);
++
++ sdhci_adma_write_desc(host, desc, addr, len, cmd);
+ }
+
+ static void sdhci_adma_mark_end(void *desc)
+@@ -618,28 +631,24 @@ static void sdhci_adma_table_pre(struct sdhci_host *host,
+ }
+
+ /* tran, valid */
+- sdhci_adma_write_desc(host, desc, align_addr, offset,
+- ADMA2_TRAN_VALID);
++ __sdhci_adma_write_desc(host, &desc, align_addr,
++ offset, ADMA2_TRAN_VALID);
+
+ BUG_ON(offset > 65536);
+
+ align += SDHCI_ADMA2_ALIGN;
+ align_addr += SDHCI_ADMA2_ALIGN;
+
+- desc += host->desc_sz;
+-
+ addr += offset;
+ len -= offset;
+ }
+
+ BUG_ON(len > 65536);
+
+- if (len) {
+- /* tran, valid */
+- sdhci_adma_write_desc(host, desc, addr, len,
+- ADMA2_TRAN_VALID);
+- desc += host->desc_sz;
+- }
++ /* tran, valid */
++ if (len)
++ __sdhci_adma_write_desc(host, &desc, addr, len,
++ ADMA2_TRAN_VALID);
+
+ /*
+ * If this triggers then we have a calculation bug
+@@ -656,7 +665,7 @@ static void sdhci_adma_table_pre(struct sdhci_host *host,
+ }
+ } else {
+ /* Add a terminating entry - nop, end, valid */
+- sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
++ __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID);
+ }
+ }
+
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index f72697d..bb57fa0 100755
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -592,6 +592,8 @@ struct sdhci_ops {
+ void (*adma_workaround)(struct sdhci_host *host, u32 intmask);
+ void (*card_event)(struct sdhci_host *host);
+ void (*voltage_switch)(struct sdhci_host *host);
++ void (*adma_write_desc)(struct sdhci_host *host, void **desc,
++ dma_addr_t addr, int len, unsigned int cmd);
+ };
+
+ #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
+@@ -723,6 +725,8 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
+ int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios);
+ void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable);
++void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
++ dma_addr_t addr, int len, unsigned int cmd);
+
+ #ifdef CONFIG_PM
+ int sdhci_suspend_host(struct sdhci_host *host);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5734-mmc-sdhci-Add-version-V4-definition.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5734-mmc-sdhci-Add-version-V4-definition.patch
new file mode 100644
index 00000000..4a400142
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5734-mmc-sdhci-Add-version-V4-definition.patch
@@ -0,0 +1,46 @@
+From 6850c5d0e288e198475c2ad18d393065a49bbd29 Mon Sep 17 00:00:00 2001
+From: Chunyan Zhang <zhang.chunyan@linaro.org>
+Date: Thu, 30 Aug 2018 16:21:37 +0800
+Subject: [PATCH 5734/5758] mmc: sdhci: Add version V4 definition
+
+Added definitions for v400, v410, v420.
+
+Signed-off-by: Chunyan Zhang <zhang.chunyan@linaro.org>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/mmc/host/sdhci.c | 2 +-
+ drivers/mmc/host/sdhci.h | 3 +++
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 05cd6c0..e698218 100755
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -3443,7 +3443,7 @@ int sdhci_setup_host(struct sdhci_host *host)
+
+ override_timeout_clk = host->timeout_clk;
+
+- if (host->version > SDHCI_SPEC_300) {
++ if (host->version > SDHCI_SPEC_420) {
+ pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
+ mmc_hostname(mmc), host->version);
+ }
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index bb57fa0..6bafe26 100755
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -270,6 +270,9 @@
+ #define SDHCI_SPEC_100 0
+ #define SDHCI_SPEC_200 1
+ #define SDHCI_SPEC_300 2
++#define SDHCI_SPEC_400 3
++#define SDHCI_SPEC_410 4
++#define SDHCI_SPEC_420 5
+
+ /*
+ * End of controller registers.
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5735-mmc-sdhci-Add-sd-host-v4-mode.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5735-mmc-sdhci-Add-sd-host-v4-mode.patch
new file mode 100644
index 00000000..bcf7fb09
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5735-mmc-sdhci-Add-sd-host-v4-mode.patch
@@ -0,0 +1,105 @@
+From 8ba06e5eab7b0da6a1f5083d80cbcbde066556ff Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Wed, 16 Jan 2019 11:47:52 +0530
+Subject: [PATCH 5735/5758] mmc: sdhci: Add sd host v4 mode
+
+For SD host controller version 4.00 or later ones, there're two
+modes of implementation - Version 3.00 compatible mode or
+Version 4 mode. This patch introduced an interface to enable
+v4 mode.
+
+Signed-off-by: Chunyan Zhang <zhang.chunyan@linaro.org>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/mmc/host/sdhci.c | 29 +++++++++++++++++++++++++++++
+ drivers/mmc/host/sdhci.h | 3 +++
+ 2 files changed, 32 insertions(+)
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index e698218..1feecbe 100755
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -123,6 +123,29 @@ EXPORT_SYMBOL_GPL(sdhci_dumpregs);
+ * *
+ \*****************************************************************************/
+
++static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
++{
++ u16 ctrl2;
++
++ ctrl2 = sdhci_readb(host, SDHCI_HOST_CONTROL2);
++ if (ctrl2 & SDHCI_CTRL_V4_MODE)
++ return;
++
++ ctrl2 |= SDHCI_CTRL_V4_MODE;
++ sdhci_writeb(host, ctrl2, SDHCI_HOST_CONTROL);
++}
++
++/*
++ * This can be called before sdhci_add_host() by Vendor's host controller
++ * driver to enable v4 mode if supported.
++ */
++void sdhci_enable_v4_mode(struct sdhci_host *host)
++{
++ host->v4_mode = true;
++ sdhci_do_enable_v4_mode(host);
++}
++EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);
++
+ static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
+ {
+ return cmd->data || cmd->flags & MMC_RSP_BUSY;
+@@ -252,6 +275,9 @@ static void sdhci_init(struct sdhci_host *host, int soft)
+ else
+ sdhci_do_reset(host, SDHCI_RESET_ALL);
+
++ if (host->v4_mode)
++ sdhci_do_enable_v4_mode(host);
++
+ sdhci_set_default_irqs(host);
+
+ host->cqe_on = false;
+@@ -3307,6 +3333,9 @@ void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
+
+ sdhci_do_reset(host, SDHCI_RESET_ALL);
+
++ if (host->v4_mode)
++ sdhci_do_enable_v4_mode(host);
++
+ of_property_read_u64(mmc_dev(host->mmc)->of_node,
+ "sdhci-caps-mask", &dt_caps_mask);
+ of_property_read_u64(mmc_dev(host->mmc)->of_node,
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 6bafe26..63c7d61 100755
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -184,6 +184,7 @@
+ #define SDHCI_CTRL_DRV_TYPE_D 0x0030
+ #define SDHCI_CTRL_EXEC_TUNING 0x0040
+ #define SDHCI_CTRL_TUNED_CLK 0x0080
++#define SDHCI_CTRL_V4_MODE 0x1000
+ #define SDHCI_CTRL_PRESET_VAL_ENABLE 0x8000
+
+ #define SDHCI_CAPABILITIES 0x40
+@@ -491,6 +492,7 @@ struct sdhci_host {
+ bool bus_on; /* Bus power prevents runtime suspend */
+ bool preset_enabled; /* Preset is enabled */
+ bool pending_reset; /* Cmd/data reset is pending */
++ bool v4_mode; /* Host Version 4 Enable */
+
+ struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */
+ struct mmc_command *cmd; /* Current command */
+@@ -745,6 +747,7 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
+ int *data_error);
+
+ void sdhci_dumpregs(struct sdhci_host *host);
++void sdhci_enable_v4_mode(struct sdhci_host *host);
+
+ void sdhci_start_tuning(struct sdhci_host *host);
+ void sdhci_end_tuning(struct sdhci_host *host);
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5736-mmc-sdhci-Add-ADMA2-64-bit-addressing-support-for-V4.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5736-mmc-sdhci-Add-ADMA2-64-bit-addressing-support-for-V4.patch
new file mode 100644
index 00000000..c872c0b3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5736-mmc-sdhci-Add-ADMA2-64-bit-addressing-support-for-V4.patch
@@ -0,0 +1,211 @@
+From 227ef7c1031e63d5ebdc50df75f2e3eb01a837ea Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Tue, 12 Feb 2019 19:12:31 +0530
+Subject: [PATCH 5736/5758] mmc: sdhci: Add ADMA2 64-bit addressing support
+ for V4 mode
+
+ADMA2 64-bit addressing support is divided into V3 mode and V4 mode.
+So there are two kinds of descriptors for ADMA2 64-bit addressing
+i.e. 96-bit Descriptor for V3 mode, and 128-bit Descriptor for V4
+mode. 128-bit Descriptor is aligned to 8-byte.
+
+For V4 mode, ADMA2 64-bit addressing is enabled via Host Control 2
+register.
+
+Signed-off-by: Chunyan Zhang <zhang.chunyan@linaro.org>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+[Ulf: Fixed conflict while applying]
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/mmc/host/sdhci.c | 92 +++++++++++++++++++++++++++++++++++-------------
+ drivers/mmc/host/sdhci.h | 12 +++++--
+ 2 files changed, 78 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 1feecbe..0cd4b1e 100755
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -266,6 +266,52 @@ static void sdhci_set_default_irqs(struct sdhci_host *host)
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ }
+
++static void sdhci_config_dma(struct sdhci_host *host)
++{
++ u8 ctrl;
++ u16 ctrl2;
++
++ if (host->version < SDHCI_SPEC_200)
++ return;
++
++ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
++
++ /*
++ * Always adjust the DMA selection as some controllers
++ * (e.g. JMicron) can't do PIO properly when the selection
++ * is ADMA.
++ */
++ ctrl &= ~SDHCI_CTRL_DMA_MASK;
++ if (!(host->flags & SDHCI_REQ_USE_DMA))
++ goto out;
++
++ /* Note if DMA Select is zero then SDMA is selected */
++ if (host->flags & SDHCI_USE_ADMA)
++ ctrl |= SDHCI_CTRL_ADMA32;
++
++ if (host->flags & SDHCI_USE_64_BIT_DMA) {
++ /*
++ * If v4 mode, all supported DMA can be 64-bit addressing if
++ * controller supports 64-bit system address, otherwise only
++ * ADMA can support 64-bit addressing.
++ */
++ if (host->v4_mode) {
++ ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
++ ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
++ sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
++ } else if (host->flags & SDHCI_USE_ADMA) {
++ /*
++ * Don't need to undo SDHCI_CTRL_ADMA32 in order to
++ * set SDHCI_CTRL_ADMA64.
++ */
++ ctrl |= SDHCI_CTRL_ADMA64;
++ }
++ }
++
++out:
++ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
++}
++
+ static void sdhci_init(struct sdhci_host *host, int soft)
+ {
+ struct mmc_host *mmc = host->mmc;
+@@ -839,7 +885,6 @@ static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+
+ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+ {
+- u8 ctrl;
+ struct mmc_data *data = cmd->data;
+
+ if (sdhci_data_line_cmd(cmd))
+@@ -934,25 +979,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+ }
+ }
+
+- /*
+- * Always adjust the DMA selection as some controllers
+- * (e.g. JMicron) can't do PIO properly when the selection
+- * is ADMA.
+- */
+- if (host->version >= SDHCI_SPEC_200) {
+- ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+- ctrl &= ~SDHCI_CTRL_DMA_MASK;
+- if ((host->flags & SDHCI_REQ_USE_DMA) &&
+- (host->flags & SDHCI_USE_ADMA)) {
+- if (host->flags & SDHCI_USE_64_BIT_DMA)
+- ctrl |= SDHCI_CTRL_ADMA64;
+- else
+- ctrl |= SDHCI_CTRL_ADMA32;
+- } else {
+- ctrl |= SDHCI_CTRL_SDMA;
+- }
+- sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+- }
++ sdhci_config_dma(host);
+
+ if (!(host->flags & SDHCI_REQ_USE_DMA)) {
+ int flags;
+@@ -3436,6 +3463,19 @@ static int sdhci_allocate_bounce_buffer(struct sdhci_host *host)
+ return 0;
+ }
+
++static inline bool sdhci_can_64bit_dma(struct sdhci_host *host)
++{
++ /*
++ * According to SD Host Controller spec v4.10, bit[27] added from
++ * version 4.10 in Capabilities Register is used as 64-bit System
++ * Address support for V4 mode.
++ */
++ if (host->version >= SDHCI_SPEC_410 && host->v4_mode)
++ return host->caps & SDHCI_CAN_64BIT_V4;
++
++ return host->caps & SDHCI_CAN_64BIT;
++}
++
+ int sdhci_setup_host(struct sdhci_host *host)
+ {
+ struct mmc_host *mmc;
+@@ -3507,7 +3547,7 @@ int sdhci_setup_host(struct sdhci_host *host)
+ * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
+ * implement.
+ */
+- if (host->caps & SDHCI_CAN_64BIT)
++ if (sdhci_can_64bit_dma(host))
+ host->flags |= SDHCI_USE_64_BIT_DMA;
+
+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
+@@ -3535,8 +3575,8 @@ int sdhci_setup_host(struct sdhci_host *host)
+
+ if (host->flags & SDHCI_USE_64_BIT_DMA) {
+ host->adma_table_sz = host->adma_table_cnt *
+- SDHCI_ADMA2_64_DESC_SZ;
+- host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
++ SDHCI_ADMA2_64_DESC_SZ(host);
++ host->desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
+ } else {
+ host->adma_table_sz = host->adma_table_cnt *
+ SDHCI_ADMA2_32_DESC_SZ;
+@@ -3544,7 +3584,11 @@ int sdhci_setup_host(struct sdhci_host *host)
+ }
+
+ host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
+- buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
++ /*
++ * Use zalloc to zero the reserved high 32-bits of 128-bit
++ * descriptors so that they never need to be written.
++ */
++ buf = dma_zalloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
+ host->adma_table_sz, &dma, GFP_KERNEL);
+ if (!buf) {
+ pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 63c7d61..2763970 100755
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -185,6 +185,7 @@
+ #define SDHCI_CTRL_EXEC_TUNING 0x0040
+ #define SDHCI_CTRL_TUNED_CLK 0x0080
+ #define SDHCI_CTRL_V4_MODE 0x1000
++#define SDHCI_CTRL_64BIT_ADDR 0x2000
+ #define SDHCI_CTRL_PRESET_VAL_ENABLE 0x8000
+
+ #define SDHCI_CAPABILITIES 0x40
+@@ -205,6 +206,7 @@
+ #define SDHCI_CAN_VDD_330 0x01000000
+ #define SDHCI_CAN_VDD_300 0x02000000
+ #define SDHCI_CAN_VDD_180 0x04000000
++#define SDHCI_CAN_64BIT_V4 0x08000000
+ #define SDHCI_CAN_64BIT 0x10000000
+
+ #define SDHCI_SUPPORT_SDR50 0x00000001
+@@ -309,8 +311,14 @@ struct sdhci_adma2_32_desc {
+ */
+ #define SDHCI_ADMA2_DESC_ALIGN 8
+
+-/* ADMA2 64-bit DMA descriptor size */
+-#define SDHCI_ADMA2_64_DESC_SZ 12
++/*
++ * ADMA2 64-bit DMA descriptor size
++ * According to SD Host Controller spec v4.10, there are two kinds of
++ * descriptors for 64-bit addressing mode: 96-bit Descriptor and 128-bit
++ * Descriptor, if Host Version 4 Enable is set in the Host Control 2
++ * register, 128-bit Descriptor will be selected.
++ */
++#define SDHCI_ADMA2_64_DESC_SZ(host) ((host)->v4_mode ? 16 : 12)
+
+ /*
+ * ADMA2 64-bit descriptor. Note 12-byte descriptor can't always be 8-byte
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5737-mmc-sdhci-Add-32-bit-block-count-support-for-v4-mode.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5737-mmc-sdhci-Add-32-bit-block-count-support-for-v4-mode.patch
new file mode 100644
index 00000000..66f28f72
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5737-mmc-sdhci-Add-32-bit-block-count-support-for-v4-mode.patch
@@ -0,0 +1,80 @@
+From 7d08c14012602dc43c5b92b4fe0848801d6d54c1 Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Tue, 12 Feb 2019 19:14:24 +0530
+Subject: [PATCH 5737/5758] mmc: sdhci: Add 32-bit block count support for v4
+ mode
+
+Host Controller Version 4.10 re-defines SDMA System Address register
+as 32-bit Block Count for v4 mode, and SDMA uses ADMA System
+Address register (05Fh-058h) instead if v4 mode is enabled. Also
+when using 32-bit block count, 16-bit block count register need
+to be set to zero.
+
+Since using 32-bit Block Count would cause problems for auto-cmd23,
+it can be chosen via host->quirk2.
+
+Signed-off-by: Chunyan Zhang <zhang.chunyan@linaro.org>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/mmc/host/sdhci.c | 14 +++++++++++++-
+ drivers/mmc/host/sdhci.h | 8 ++++++++
+ 2 files changed, 21 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 0cd4b1e..a082614 100755
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -998,7 +998,19 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+ /* Set the DMA boundary value and block size */
+ sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
+ SDHCI_BLOCK_SIZE);
+- sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
++
++ /*
++ * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
++ * can be supported, in that case 16-bit block count register must be 0.
++ */
++ if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
++ (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
++ if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
++ sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
++ sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
++ } else {
++ sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
++ }
+ }
+
+ static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 2763970..73ae5f8 100755
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -28,6 +28,7 @@
+
+ #define SDHCI_DMA_ADDRESS 0x00
+ #define SDHCI_ARGUMENT2 SDHCI_DMA_ADDRESS
++#define SDHCI_32BIT_BLK_CNT SDHCI_DMA_ADDRESS
+
+ #define SDHCI_BLOCK_SIZE 0x04
+ #define SDHCI_MAKE_BLKSZ(dma, blksz) (((dma & 0x7) << 12) | (blksz & 0xFFF))
+@@ -449,6 +450,13 @@ struct sdhci_host {
+ #define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN (1<<15)
+ /* Controller has CRC in 136 bit Command Response */
+ #define SDHCI_QUIRK2_RSP_136_HAS_CRC (1<<16)
++/*
++ * 32-bit block count may not support eMMC where upper bits of CMD23 are used
++ * for other purposes. Consequently we support 16-bit block count by default.
++ * Otherwise, SDHCI_QUIRK2_USE_32BIT_BLK_CNT can be selected to use 32-bit
++ * block count.
++ */
++#define SDHCI_QUIRK2_USE_32BIT_BLK_CNT (1<<18)
+
+ #define SDHCI_QUIRK2_BROKEN_TUNING_WA (1<<17)
+ int irq; /* Device IRQ */
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5738-mmc-sdhci-Add-Auto-CMD-Auto-Select-support.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5738-mmc-sdhci-Add-Auto-CMD-Auto-Select-support.patch
new file mode 100644
index 00000000..c24ff3b9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5738-mmc-sdhci-Add-Auto-CMD-Auto-Select-support.patch
@@ -0,0 +1,117 @@
+From 2ccce2251f92f8978382dd8c75cc6ccae1b6d97f Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Tue, 12 Feb 2019 19:15:20 +0530
+Subject: [PATCH 5738/5758] mmc: sdhci: Add Auto CMD Auto Select support
+
+As SD Host Controller Specification v4.10 documents:
+Host Controller Version 4.10 defines this "Auto CMD Auto Select" mode.
+Selection of Auto CMD depends on setting of CMD23 Enable in the Host
+Control 2 register which indicates whether card supports CMD23. If CMD23
+Enable =1, Auto CMD23 is used and if CMD23 Enable =0, Auto CMD12 is
+used. In case of Version 4.10 or later, use of Auto CMD Auto Select is
+recommended rather than use of Auto CMD12 Enable or Auto CMD23
+Enable.
+
+This patch add this new mode support.
+
+Signed-off-by: Chunyan Zhang <zhang.chunyan@linaro.org>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/mmc/host/sdhci.c | 49 ++++++++++++++++++++++++++++++++++++++----------
+ drivers/mmc/host/sdhci.h | 2 ++
+ 2 files changed, 41 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index a082614..d7c9274 100755
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1020,6 +1020,43 @@ static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
+ !mrq->cap_cmd_during_tfr;
+ }
+
++static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
++ struct mmc_command *cmd,
++ u16 *mode)
++{
++ bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
++ (cmd->opcode != SD_IO_RW_EXTENDED);
++ bool use_cmd23 = cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
++ u16 ctrl2;
++
++ /*
++ * In case of Version 4.10 or later, use of 'Auto CMD Auto
++ * Select' is recommended rather than use of 'Auto CMD12
++ * Enable' or 'Auto CMD23 Enable'.
++ */
++ if (host->version >= SDHCI_SPEC_410 && (use_cmd12 || use_cmd23)) {
++ *mode |= SDHCI_TRNS_AUTO_SEL;
++
++ ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
++ if (use_cmd23)
++ ctrl2 |= SDHCI_CMD23_ENABLE;
++ else
++ ctrl2 &= ~SDHCI_CMD23_ENABLE;
++ sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
++
++ return;
++ }
++
++ /*
++ * If we are sending CMD23, CMD12 never gets sent
++ * on successful completion (so no Auto-CMD12).
++ */
++ if (use_cmd12)
++ *mode |= SDHCI_TRNS_AUTO_CMD12;
++ else if (use_cmd23)
++ *mode |= SDHCI_TRNS_AUTO_CMD23;
++}
++
+ static void sdhci_set_transfer_mode(struct sdhci_host *host,
+ struct mmc_command *cmd)
+ {
+@@ -1046,17 +1083,9 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
+
+ if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
+ mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
+- /*
+- * If we are sending CMD23, CMD12 never gets sent
+- * on successful completion (so no Auto-CMD12).
+- */
+- if (sdhci_auto_cmd12(host, cmd->mrq) &&
+- (cmd->opcode != SD_IO_RW_EXTENDED))
+- mode |= SDHCI_TRNS_AUTO_CMD12;
+- else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
+- mode |= SDHCI_TRNS_AUTO_CMD23;
++ sdhci_auto_cmd_select(host, cmd, &mode);
++ if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23))
+ sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
+- }
+ }
+
+ if (data->flags & MMC_DATA_READ)
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 73ae5f8..dd3219e 100755
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -42,6 +42,7 @@
+ #define SDHCI_TRNS_BLK_CNT_EN 0x02
+ #define SDHCI_TRNS_AUTO_CMD12 0x04
+ #define SDHCI_TRNS_AUTO_CMD23 0x08
++#define SDHCI_TRNS_AUTO_SEL 0x0C
+ #define SDHCI_TRNS_READ 0x10
+ #define SDHCI_TRNS_MULTI 0x20
+
+@@ -185,6 +186,7 @@
+ #define SDHCI_CTRL_DRV_TYPE_D 0x0030
+ #define SDHCI_CTRL_EXEC_TUNING 0x0040
+ #define SDHCI_CTRL_TUNED_CLK 0x0080
++#define SDHCI_CMD23_ENABLE 0x0800
+ #define SDHCI_CTRL_V4_MODE 0x1000
+ #define SDHCI_CTRL_64BIT_ADDR 0x2000
+ #define SDHCI_CTRL_PRESET_VAL_ENABLE 0x8000
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5739-amd-xgbe-use-dma_mapping_error-to-check-map-errors.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5739-amd-xgbe-use-dma_mapping_error-to-check-map-errors.patch
new file mode 100644
index 00000000..6a2cf4f5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5739-amd-xgbe-use-dma_mapping_error-to-check-map-errors.patch
@@ -0,0 +1,45 @@
+From 814a991d5e22411b1ca2bf47c7c25aa07042de38 Mon Sep 17 00:00:00 2001
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Thu, 26 Jul 2018 09:51:27 +0800
+Subject: [PATCH 5739/5758] amd-xgbe: use dma_mapping_error to check map errors
+
+The dma_mapping_error() returns true or false, but we want
+to return -ENOMEM if there was an error.
+
+Fixes: 174fd2597b0b ("amd-xgbe: Implement split header receive support")
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-desc.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+index cc1e4f8..5330942 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+@@ -289,7 +289,7 @@ static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
+ struct page *pages = NULL;
+ dma_addr_t pages_dma;
+ gfp_t gfp;
+- int order, ret;
++ int order;
+
+ again:
+ order = alloc_order;
+@@ -316,10 +316,9 @@ static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
+ /* Map the pages */
+ pages_dma = dma_map_page(pdata->dev, pages, 0,
+ PAGE_SIZE << order, DMA_FROM_DEVICE);
+- ret = dma_mapping_error(pdata->dev, pages_dma);
+- if (ret) {
++ if (dma_mapping_error(pdata->dev, pages_dma)) {
+ put_page(pages);
+- return ret;
++ return -ENOMEM;
+ }
+
+ pa->pages = pages;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5740-lib-crc-Move-polynomial-definition-to-separate-heade.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5740-lib-crc-Move-polynomial-definition-to-separate-heade.patch
new file mode 100644
index 00000000..0b3384e8
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5740-lib-crc-Move-polynomial-definition-to-separate-heade.patch
@@ -0,0 +1,96 @@
+From f52e39a9b26ab160a1195daaaa195cc018793588 Mon Sep 17 00:00:00 2001
+From: Krzysztof Kozlowski <krzk@kernel.org>
+Date: Tue, 17 Jul 2018 18:05:36 +0200
+Subject: [PATCH 5740/5758] lib/crc: Move polynomial definition to separate
+ header
+
+Allow other drivers and parts of kernel to use the same define for
+CRC32 polynomial, instead of duplicating it in many places. This code
+does not bring any functional changes, except moving existing code.
+
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ include/linux/crc32poly.h | 20 ++++++++++++++++++++
+ lib/crc32.c | 1 +
+ lib/crc32defs.h | 14 --------------
+ lib/gen_crc32table.c | 1 +
+ 4 files changed, 22 insertions(+), 14 deletions(-)
+ create mode 100644 include/linux/crc32poly.h
+
+diff --git a/include/linux/crc32poly.h b/include/linux/crc32poly.h
+new file mode 100644
+index 0000000..7ad5aa9
+--- /dev/null
++++ b/include/linux/crc32poly.h
+@@ -0,0 +1,20 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _LINUX_CRC32_POLY_H
++#define _LINUX_CRC32_POLY_H
++
++/*
++ * There are multiple 16-bit CRC polynomials in common use, but this is
++ * *the* standard CRC-32 polynomial, first popularized by Ethernet.
++ * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
++ */
++#define CRCPOLY_LE 0xedb88320
++#define CRCPOLY_BE 0x04c11db7
++
++/*
++ * This is the CRC32c polynomial, as outlined by Castagnoli.
++ * x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+
++ * x^8+x^6+x^0
++ */
++#define CRC32C_POLY_LE 0x82F63B78
++
++#endif /* _LINUX_CRC32_POLY_H */
+diff --git a/lib/crc32.c b/lib/crc32.c
+index 6ddc92b..82bfc053 100644
+--- a/lib/crc32.c
++++ b/lib/crc32.c
+@@ -27,6 +27,7 @@
+ /* see: Documentation/crc32.txt for a description of algorithms */
+
+ #include <linux/crc32.h>
++#include <linux/crc32poly.h>
+ #include <linux/module.h>
+ #include <linux/types.h>
+ #include <linux/sched.h>
+diff --git a/lib/crc32defs.h b/lib/crc32defs.h
+index cb275a2..0c8fb59 100644
+--- a/lib/crc32defs.h
++++ b/lib/crc32defs.h
+@@ -1,18 +1,4 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- * There are multiple 16-bit CRC polynomials in common use, but this is
+- * *the* standard CRC-32 polynomial, first popularized by Ethernet.
+- * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
+- */
+-#define CRCPOLY_LE 0xedb88320
+-#define CRCPOLY_BE 0x04c11db7
+-
+-/*
+- * This is the CRC32c polynomial, as outlined by Castagnoli.
+- * x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+
+- * x^8+x^6+x^0
+- */
+-#define CRC32C_POLY_LE 0x82F63B78
+
+ /* Try to choose an implementation variant via Kconfig */
+ #ifdef CONFIG_CRC32_SLICEBY8
+diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
+index 8f26660..34c3bc8 100644
+--- a/lib/gen_crc32table.c
++++ b/lib/gen_crc32table.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <stdio.h>
++#include "../include/linux/crc32poly.h"
+ #include "../include/generated/autoconf.h"
+ #include "crc32defs.h"
+ #include <inttypes.h>
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5741-lib-crc-Use-consistent-naming-for-CRC-32-polynomials.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5741-lib-crc-Use-consistent-naming-for-CRC-32-polynomials.patch
new file mode 100644
index 00000000..8739c59c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5741-lib-crc-Use-consistent-naming-for-CRC-32-polynomials.patch
@@ -0,0 +1,105 @@
+From 73207c21e8dd4efdc4c7914f73590ab7b5211e5e Mon Sep 17 00:00:00 2001
+From: Krzysztof Kozlowski <krzk@kernel.org>
+Date: Tue, 17 Jul 2018 18:05:37 +0200
+Subject: [PATCH 5741/5758] lib/crc: Use consistent naming for CRC-32
+ polynomials
+
+Header was defining CRCPOLY_LE/BE and CRC32C_POLY_LE but in fact all of
+them are CRC-32 polynomials so use consistent naming.
+
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ include/linux/crc32poly.h | 4 ++--
+ lib/crc32.c | 10 +++++-----
+ lib/gen_crc32table.c | 4 ++--
+ 3 files changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/include/linux/crc32poly.h b/include/linux/crc32poly.h
+index 7ad5aa9..62c4b77 100644
+--- a/include/linux/crc32poly.h
++++ b/include/linux/crc32poly.h
+@@ -7,8 +7,8 @@
+ * *the* standard CRC-32 polynomial, first popularized by Ethernet.
+ * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
+ */
+-#define CRCPOLY_LE 0xedb88320
+-#define CRCPOLY_BE 0x04c11db7
++#define CRC32_POLY_LE 0xedb88320
++#define CRC32_POLY_BE 0x04c11db7
+
+ /*
+ * This is the CRC32c polynomial, as outlined by Castagnoli.
+diff --git a/lib/crc32.c b/lib/crc32.c
+index 82bfc053..7111c44 100644
+--- a/lib/crc32.c
++++ b/lib/crc32.c
+@@ -185,7 +185,7 @@ static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
+ #if CRC_LE_BITS == 1
+ u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
+ {
+- return crc32_le_generic(crc, p, len, NULL, CRCPOLY_LE);
++ return crc32_le_generic(crc, p, len, NULL, CRC32_POLY_LE);
+ }
+ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
+ {
+@@ -195,7 +195,7 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
+ u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
+ {
+ return crc32_le_generic(crc, p, len,
+- (const u32 (*)[256])crc32table_le, CRCPOLY_LE);
++ (const u32 (*)[256])crc32table_le, CRC32_POLY_LE);
+ }
+ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
+ {
+@@ -269,7 +269,7 @@ static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len,
+
+ u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len)
+ {
+- return crc32_generic_shift(crc, len, CRCPOLY_LE);
++ return crc32_generic_shift(crc, len, CRC32_POLY_LE);
+ }
+
+ u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len)
+@@ -331,13 +331,13 @@ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
+ #if CRC_LE_BITS == 1
+ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
+ {
+- return crc32_be_generic(crc, p, len, NULL, CRCPOLY_BE);
++ return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
+ }
+ #else
+ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
+ {
+ return crc32_be_generic(crc, p, len,
+- (const u32 (*)[256])crc32table_be, CRCPOLY_BE);
++ (const u32 (*)[256])crc32table_be, CRC32_POLY_BE);
+ }
+ #endif
+ EXPORT_SYMBOL(crc32_be);
+diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
+index 34c3bc8..f755b99 100644
+--- a/lib/gen_crc32table.c
++++ b/lib/gen_crc32table.c
+@@ -58,7 +58,7 @@ static void crc32init_le_generic(const uint32_t polynomial,
+
+ static void crc32init_le(void)
+ {
+- crc32init_le_generic(CRCPOLY_LE, crc32table_le);
++ crc32init_le_generic(CRC32_POLY_LE, crc32table_le);
+ }
+
+ static void crc32cinit_le(void)
+@@ -77,7 +77,7 @@ static void crc32init_be(void)
+ crc32table_be[0][0] = 0;
+
+ for (i = 1; i < BE_TABLE_SIZE; i <<= 1) {
+- crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0);
++ crc = (crc << 1) ^ ((crc & 0x80000000) ? CRC32_POLY_BE : 0);
+ for (j = 0; j < i; j++)
+ crc32table_be[0][i + j] = crc ^ crc32table_be[0][j];
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5742-net-ethernet-Use-existing-define-with-polynomial.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5742-net-ethernet-Use-existing-define-with-polynomial.patch
new file mode 100644
index 00000000..fdc6f000
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5742-net-ethernet-Use-existing-define-with-polynomial.patch
@@ -0,0 +1,46 @@
+From 43467fc009a72c1efba2f0d6806584b805685fc0 Mon Sep 17 00:00:00 2001
+From: Krzysztof Kozlowski <krzk@kernel.org>
+Date: Tue, 17 Jul 2018 18:05:39 +0200
+Subject: [PATCH 5742/5758] net: ethernet: Use existing define with polynomial
+
+Do not define again the polynomial but use header with existing define.
+
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+index e107e18..1e929a1 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -119,6 +119,7 @@
+ #include <linux/clk.h>
+ #include <linux/bitrev.h>
+ #include <linux/crc32.h>
++#include <linux/crc32poly.h>
+
+ #include "xgbe.h"
+ #include "xgbe-common.h"
+@@ -887,7 +888,6 @@ static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
+
+ static u32 xgbe_vid_crc32_le(__le16 vid_le)
+ {
+- u32 poly = 0xedb88320; /* CRCPOLY_LE */
+ u32 crc = ~0;
+ u32 temp = 0;
+ unsigned char *data = (unsigned char *)&vid_le;
+@@ -904,7 +904,7 @@ static u32 xgbe_vid_crc32_le(__le16 vid_le)
+ data_byte >>= 1;
+
+ if (temp)
+- crc ^= poly;
++ crc ^= CRC32_POLY_LE;
+ }
+
+ return crc;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5743-net-amd-fix-return-type-of-ndo_start_xmit-function.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5743-net-amd-fix-return-type-of-ndo_start_xmit-function.patch
new file mode 100644
index 00000000..77dba6a6
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5743-net-amd-fix-return-type-of-ndo_start_xmit-function.patch
@@ -0,0 +1,45 @@
+From 56abff6d96f7b1f48e2f01b062d14613846275b7 Mon Sep 17 00:00:00 2001
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Wed, 19 Sep 2018 18:50:17 +0800
+Subject: [PATCH 5743/5758] net: amd: fix return type of ndo_start_xmit
+ function
+
+The method ndo_start_xmit() is defined as returning an 'netdev_tx_t',
+which is a typedef for an enum type, so make sure the implementation in
+this driver has returns 'netdev_tx_t' value, and change the function
+return type to netdev_tx_t.
+
+Found by coccinelle.
+
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 093e2fd..93b4048 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -2009,7 +2009,7 @@ static int xgbe_close(struct net_device *netdev)
+ return 0;
+ }
+
+-static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
++static netdev_tx_t xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+@@ -2018,7 +2018,7 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
+ struct xgbe_ring *ring;
+ struct xgbe_packet_data *packet;
+ struct netdev_queue *txq;
+- int ret;
++ netdev_tx_t ret;
+
+ DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5744-net-phy-Add-helper-for-advertise-to-lcl-value.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5744-net-phy-Add-helper-for-advertise-to-lcl-value.patch
new file mode 100644
index 00000000..bb0f4363
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5744-net-phy-Add-helper-for-advertise-to-lcl-value.patch
@@ -0,0 +1,71 @@
+From 8bdf040cce4c23fb86265d962915c8cee6abe5a7 Mon Sep 17 00:00:00 2001
+From: Andrew Lunn <andrew@lunn.ch>
+Date: Sat, 29 Sep 2018 23:04:13 +0200
+Subject: [PATCH 5744/5758] net: phy: Add helper for advertise to lcl value
+
+Add a helper to convert the local advertising to an LCL capabilities,
+which is then used to resolve pause flow control settings.
+
+Signed-off-by: Andrew Lunn <andrew@lunn.ch>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Reviewed-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 5 +----
+ include/linux/mii.h | 20 ++++++++++++++++++++
+ 2 files changed, 21 insertions(+), 4 deletions(-)
+ mode change 100644 => 100755 include/linux/mii.h
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index 194ec27..151bdb6 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -1497,10 +1497,7 @@ static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata)
+ if (!phy_data->phydev)
+ return;
+
+- if (phy_data->phydev->advertising & ADVERTISED_Pause)
+- lcl_adv |= ADVERTISE_PAUSE_CAP;
+- if (phy_data->phydev->advertising & ADVERTISED_Asym_Pause)
+- lcl_adv |= ADVERTISE_PAUSE_ASYM;
++ lcl_adv = ethtool_adv_to_lcl_adv_t(phy_data->phydev->advertising);
+
+ if (phy_data->phydev->pause) {
+ XGBE_SET_LP_ADV(lks, Pause);
+diff --git a/include/linux/mii.h b/include/linux/mii.h
+old mode 100644
+new mode 100755
+index 55000ee..63cd587
+--- a/include/linux/mii.h
++++ b/include/linux/mii.h
+@@ -302,6 +302,26 @@ static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa)
+ return result | mii_adv_to_ethtool_adv_x(lpa);
+ }
+
++
++/**
++ * ethtool_adv_to_lcl_adv_t
++ * @advertising:pointer to ethtool advertising
++ *
++ * A small helper function that translates ethtool advertising to LVL
++ * pause capabilities.
++ */
++static inline u32 ethtool_adv_to_lcl_adv_t(u32 advertising)
++{
++ u32 lcl_adv = 0;
++
++ if (advertising & ADVERTISED_Pause)
++ lcl_adv |= ADVERTISE_PAUSE_CAP;
++ if (advertising & ADVERTISED_Asym_Pause)
++ lcl_adv |= ADVERTISE_PAUSE_ASYM;
++
++ return lcl_adv;
++}
++
+ /**
+ * mii_advertise_flowctrl - get flow control advertisement flags
+ * @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5745-drivers-net-remove-net-busy_poll.h-inclusion-when-no.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5745-drivers-net-remove-net-busy_poll.h-inclusion-when-no.patch
new file mode 100644
index 00000000..7a1e240d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5745-drivers-net-remove-net-busy_poll.h-inclusion-when-no.patch
@@ -0,0 +1,35 @@
+From 5c6a31fed9eb8e880f669369e269a4c347cf1355 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 25 Oct 2018 06:42:12 -0700
+Subject: [PATCH 5745/5758] drivers: net: remove <net/busy_poll.h> inclusion
+ when not needed
+
+Drivers using generic NAPI interface no longer need to include
+<net/busy_poll.h>, since busy polling was moved to core networking
+stack long ago.
+
+See commit 79e7fff47b7b ("net: remove support for per driver
+ndo_busy_poll()") for reference.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 93b4048..e1c0fea 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -119,7 +119,6 @@
+ #include <linux/tcp.h>
+ #include <linux/if_vlan.h>
+ #include <linux/interrupt.h>
+-#include <net/busy_poll.h>
+ #include <linux/clk.h>
+ #include <linux/if_ether.h>
+ #include <linux/net_tstamp.h>
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5746-amd-eMMC-sdhci-HS400-workaround-for-ZP.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5746-amd-eMMC-sdhci-HS400-workaround-for-ZP.patch
new file mode 100644
index 00000000..e66372cb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5746-amd-eMMC-sdhci-HS400-workaround-for-ZP.patch
@@ -0,0 +1,103 @@
+From 527e0c11bf326ce3e14cc85a79f0cf95cbcff7af Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Tue, 12 Feb 2019 20:08:46 +0530
+Subject: [PATCH 5746/5758] amd-eMMC sdhci HS400 workaround for ZP
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/mmc/core/mmc.c | 2 ++
+ drivers/mmc/host/sdhci-acpi.c | 1 -
+ drivers/mmc/host/sdhci.c | 9 ---------
+ drivers/mmc/host/sdhci.h | 1 -
+ include/linux/mmc/host.h | 1 -
+ 5 files changed, 2 insertions(+), 12 deletions(-)
+ mode change 100755 => 100644 drivers/mmc/host/sdhci-acpi.c
+ mode change 100755 => 100644 drivers/mmc/host/sdhci.c
+ mode change 100755 => 100644 include/linux/mmc/host.h
+
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index cd3604d..20be475 100755
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -1405,6 +1405,8 @@ static int mmc_select_hs400es(struct mmc_card *card)
+ if (host->ops->hs400_complete)
+ host->ops->hs400_complete(host);
+
++ if (host->ops->set_hs400_dll)
++ host->ops->set_hs400_dll(host);
+ return 0;
+
+ out_err:
+diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
+old mode 100755
+new mode 100644
+index 33592a6..b01e906
+--- a/drivers/mmc/host/sdhci-acpi.c
++++ b/drivers/mmc/host/sdhci-acpi.c
+@@ -411,7 +411,6 @@ static const struct sdhci_ops sdhci_acpi_ops_amd = {
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+- .set_hs400_dll = sdhci_acpi_amd_hs400_dll,
+ };
+
+ static const struct sdhci_acpi_chip sdhci_acpi_chip_amd = {
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+old mode 100755
+new mode 100644
+index d7c9274..46346ec
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1983,14 +1983,6 @@ static void sdhci_hw_reset(struct mmc_host *mmc)
+ host->ops->hw_reset(host);
+ }
+
+-static void sdhci_set_hs400_dll(struct mmc_host *mmc)
+-{
+- struct sdhci_host *host = mmc_priv(mmc);
+-
+- if (host->ops && host->ops->set_hs400_dll)
+- host->ops->set_hs400_dll(host);
+-}
+-
+ static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
+ {
+ if (!(host->flags & SDHCI_DEVICE_DEAD)) {
+@@ -2478,7 +2470,6 @@ static const struct mmc_host_ops sdhci_ops = {
+ .get_cd = sdhci_get_cd,
+ .get_ro = sdhci_get_ro,
+ .hw_reset = sdhci_hw_reset,
+- .set_hs400_dll = sdhci_set_hs400_dll,
+ .enable_sdio_irq = sdhci_enable_sdio_irq,
+ .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
+ .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index dd3219e..027d85a 100755
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -611,7 +611,6 @@ struct sdhci_ops {
+ int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode);
+ void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
+ void (*hw_reset)(struct sdhci_host *host);
+- void (*set_hs400_dll)(struct sdhci_host *host);
+ void (*adma_workaround)(struct sdhci_host *host, u32 intmask);
+ void (*card_event)(struct sdhci_host *host);
+ void (*voltage_switch)(struct sdhci_host *host);
+diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
+old mode 100755
+new mode 100644
+index ba4af38..843c38f
+--- a/include/linux/mmc/host.h
++++ b/include/linux/mmc/host.h
+@@ -159,7 +159,6 @@ struct mmc_host_ops {
+ unsigned int max_dtr, int host_drv,
+ int card_drv, int *drv_type);
+ void (*hw_reset)(struct mmc_host *host);
+- void (*set_hs400_dll)(struct mmc_host *host);
+ void (*card_event)(struct mmc_host *host);
+
+ /*
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5747-drm-amd-display-Raise-dispclk-value-for-CZ.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5747-drm-amd-display-Raise-dispclk-value-for-CZ.patch
new file mode 100644
index 00000000..8a75719f
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5747-drm-amd-display-Raise-dispclk-value-for-CZ.patch
@@ -0,0 +1,48 @@
+From 0a5c7924cab22adc4f0e062850072402c53e8c27 Mon Sep 17 00:00:00 2001
+From: Kalyan Alle <kalyan.alle@amd.com>
+Date: Tue, 20 Nov 2018 16:50:29 -0500
+Subject: [PATCH 5747/5758] drm/amd/display: Raise dispclk value for CZ.
+
+[Why]
+The visual corruption due to low display clock value.
+Observed on 4k@60Hz.
+
+[How]
+There was earlier patch for dspclk:
+'drm/amd/display: Raise dispclk value for dce_update_clocks'
+Adding +15% workaround also to to dce11_update_clocks
+
+Signed-off-by: Roman Li <Roman.Li@amd.com>
+Reviewed-by: Nicholas Kazlauskas <Nicholas.Kazlauskas@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+index 493e2f4..1dfe93e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+@@ -648,6 +648,10 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr,
+ {
+ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+ struct dm_pp_power_level_change_request level_change_req;
++ int unpatched_disp_clk = context->bw.dce.dispclk_khz;
++
++ if (!clk_mgr_dce->dfs_bypass_active)
++ context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+
+ level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
+ /* get max clock state from PPLIB */
+@@ -662,6 +666,8 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr,
+ clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+ }
+ dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
++
++ context->bw.dce.dispclk_khz = unpatched_disp_clk;
+ }
+
+ static void dce112_update_clocks(struct clk_mgr *clk_mgr,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5748-drm-amdgpu-gfx8-disable-EDC.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5748-drm-amdgpu-gfx8-disable-EDC.patch
new file mode 100644
index 00000000..0de9769a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5748-drm-amdgpu-gfx8-disable-EDC.patch
@@ -0,0 +1,38 @@
+From 241cbaf30db2728ec005607a659358174cb3a486 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 23 Aug 2016 17:37:36 -0400
+Subject: [PATCH 5748/5758] drm/amdgpu/gfx8: disable EDC
+
+This if fixing the unigine heaven application soft hang
+while running in extreme preset mode
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 90cbf66..43272fb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -1665,7 +1665,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
+ DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+ goto fail;
+ }
+-
++#if 0
+ tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, DED_MODE, 2);
+ tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, PROP_FED, 1);
+ WREG32(mmGB_EDC_MODE, tmp);
+@@ -1673,6 +1673,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
+ tmp = RREG32(mmCC_GC_EDC_CONFIG);
+ tmp = REG_SET_FIELD(tmp, CC_GC_EDC_CONFIG, DIS_EDC, 0) | 1;
+ WREG32(mmCC_GC_EDC_CONFIG, tmp);
++#endif
+
+ /* read back registers to clear the counters */
+ for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5749-net-phy-Also-request-modules-for-C45-IDs.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5749-net-phy-Also-request-modules-for-C45-IDs.patch
new file mode 100644
index 00000000..d9ae9e3c
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5749-net-phy-Also-request-modules-for-C45-IDs.patch
@@ -0,0 +1,56 @@
+From 061b7fcf0d45da2dc816a58f0c01c35c59113871 Mon Sep 17 00:00:00 2001
+From: Jose Abreu <jose.abreu@synopsys.com>
+Date: Sun, 2 Dec 2018 16:33:14 +0100
+Subject: [PATCH 5749/5758] net: phy: Also request modules for C45 IDs
+
+Logic of phy_device_create() requests PHY modules according to PHY ID
+but for C45 PHYs we use different field for the IDs.
+
+Let's also request the modules for these IDs.
+
+Changes from v1:
+- Only request C22 modules if C45 are not present (Andrew)
+
+Signed-off-by: Jose Abreu <joabreu@synopsys.com>
+Cc: Andrew Lunn <andrew@lunn.ch>
+Cc: Florian Fainelli <f.fainelli@gmail.com>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Joao Pinto <joao.pinto@synopsys.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/phy/phy_device.c | 16 +++++++++++++++-
+ 1 file changed, 15 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index f16af99..a711f62 100755
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -402,7 +402,21 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
+ * driver will get bored and give up as soon as it finds that
+ * there's no driver _already_ loaded.
+ */
+- request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id));
++ if (is_c45 && c45_ids) {
++ const int num_ids = ARRAY_SIZE(c45_ids->device_ids);
++ int i;
++
++ for (i = 1; i < num_ids; i++) {
++ if (!(c45_ids->devices_in_package & (1 << i)))
++ continue;
++
++ request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
++ MDIO_ID_ARGS(c45_ids->device_ids[i]));
++ }
++ } else {
++ request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
++ MDIO_ID_ARGS(phy_id));
++ }
+
+ device_initialize(&mdiodev->dev);
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5750-amd-xgbe-Fix-mdio-access-for-non-zero-ports-and-clau.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5750-amd-xgbe-Fix-mdio-access-for-non-zero-ports-and-clau.patch
new file mode 100644
index 00000000..8097ea4d
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5750-amd-xgbe-Fix-mdio-access-for-non-zero-ports-and-clau.patch
@@ -0,0 +1,94 @@
+From 92270fde5c8ff319d5839315d934f73717bfb358 Mon Sep 17 00:00:00 2001
+From: "Lendacky, Thomas" <Thomas.Lendacky@amd.com>
+Date: Thu, 17 Jan 2019 14:20:14 +0000
+Subject: [PATCH 5750/5758] amd-xgbe: Fix mdio access for non-zero ports and
+ clause 45 PHYs
+
+The XGBE hardware has support for performing MDIO operations using an
+MDIO command request. The driver mistakenly uses the mdio port address
+as the MDIO command request device address instead of the MDIO command
+request port address. Additionally, the driver does not properly check
+for and create a clause 45 MDIO command.
+
+Check the supplied MDIO register to determine if the request is a clause
+45 operation (MII_ADDR_C45). For a clause 45 operation, extract the device
+address and register number from the supplied MDIO register and use them
+to set the MDIO command request device address and register number fields.
+For a clause 22 operation, the MDIO request device address is set to zero
+and the MDIO command request register number is set to the supplied MDIO
+register. In either case, the supplied MDIO port address is used as the
+MDIO command request port address.
+
+Fixes: 732f2ab7afb9 ("amd-xgbe: Add support for MDIO attached PHYs")
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Tested-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-common.h | 2 --
+ drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 22 ++++++++++++++++------
+ 2 files changed, 16 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+index d272dc6..b40d437 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+@@ -431,8 +431,6 @@
+ #define MAC_MDIOSCAR_PA_WIDTH 5
+ #define MAC_MDIOSCAR_RA_INDEX 0
+ #define MAC_MDIOSCAR_RA_WIDTH 16
+-#define MAC_MDIOSCAR_REG_INDEX 0
+-#define MAC_MDIOSCAR_REG_WIDTH 21
+ #define MAC_MDIOSCCDR_BUSY_INDEX 22
+ #define MAC_MDIOSCCDR_BUSY_WIDTH 1
+ #define MAC_MDIOSCCDR_CMD_INDEX 16
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+index 1e929a1..4666084 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -1284,6 +1284,20 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
+ }
+ }
+
++static unsigned int xgbe_create_mdio_sca(int port, int reg)
++{
++ unsigned int mdio_sca, da;
++
++ da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
++
++ mdio_sca = 0;
++ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
++ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
++ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da);
++
++ return mdio_sca;
++}
++
+ static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
+ int reg, u16 val)
+ {
+@@ -1291,9 +1305,7 @@ static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
+
+ reinit_completion(&pdata->mdio_complete);
+
+- mdio_sca = 0;
+- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
+- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
++ mdio_sca = xgbe_create_mdio_sca(addr, reg);
+ XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
+
+ mdio_sccd = 0;
+@@ -1317,9 +1329,7 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
+
+ reinit_completion(&pdata->mdio_complete);
+
+- mdio_sca = 0;
+- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
+- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
++ mdio_sca = xgbe_create_mdio_sca(addr, reg);
+ XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
+
+ mdio_sccd = 0;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5751-Revert-drm-amdgpu-make-gfx9-enter-into-rlc-safe-mode.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5751-Revert-drm-amdgpu-make-gfx9-enter-into-rlc-safe-mode.patch
new file mode 100644
index 00000000..7f4bd6c4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5751-Revert-drm-amdgpu-make-gfx9-enter-into-rlc-safe-mode.patch
@@ -0,0 +1,36 @@
+From 824b82e5e9352acec85917a8e400e472f55a07d9 Mon Sep 17 00:00:00 2001
+From: Raveendra Talabattula <raveendra.talabattula@amd.com>
+Date: Thu, 10 Jan 2019 16:22:35 +0530
+Subject: [PATCH 5751/5758] Revert drm/amdgpu: make gfx9 enter into rlc safe
+ mode when set
+
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index ce4bb14..ac2a843 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -3605,8 +3605,6 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
+ {
+ uint32_t data, def;
+
+- amdgpu_gfx_rlc_enter_safe_mode(adev);
+-
+ /* It is disabled by HW by default */
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
+ /* 1 - RLC_CGTT_MGCG_OVERRIDE */
+@@ -3671,8 +3669,6 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
+ WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
+ }
+ }
+-
+- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ }
+
+ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5752-Revert-drm-amdgpu-abstract-the-function-of-enter-exi.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5752-Revert-drm-amdgpu-abstract-the-function-of-enter-exi.patch
new file mode 100644
index 00000000..a710b553
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5752-Revert-drm-amdgpu-abstract-the-function-of-enter-exi.patch
@@ -0,0 +1,1446 @@
+From f3f4335f739ced7a8c0545b5455893a8a85387f4 Mon Sep 17 00:00:00 2001
+From: Raveendra Talabattula <raveendra.talabattula@amd.com>
+Date: Thu, 10 Jan 2019 16:23:30 +0530
+Subject: [PATCH 5752/5758] Revert drm/amdgpu: abstract the function of
+ enter/exit safe mode
+
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c | 229 +--------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h | 33 ++-
+ drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 6 +-
+ drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 24 ++-
+ drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 148 ++++++++++---
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 201 ++++++++++++------
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 183 ++++++++++++----
+ drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 6 +-
+ .../gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c | 12 +-
+ .../gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c | 36 ++--
+ 10 files changed, 470 insertions(+), 408 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+index c8793e6..c5459ab 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+@@ -1,3 +1,4 @@
++
+ /*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+@@ -22,238 +23,12 @@
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+-#include <linux/firmware.h>
++
+ #include "amdgpu.h"
+ #include "amdgpu_gfx.h"
+ #include "amdgpu_rlc.h"
+
+ /**
+- * amdgpu_gfx_rlc_enter_safe_mode - Set RLC into safe mode
+- *
+- * @adev: amdgpu_device pointer
+- *
+- * Set RLC enter into safe mode if RLC is enabled and haven't in safe mode.
+- */
+-void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev)
+-{
+- if (adev->gfx.rlc.in_safe_mode)
+- return;
+-
+- /* if RLC is not enabled, do nothing */
+- if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
+- return;
+-
+- if (adev->cg_flags &
+- (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
+- AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+- adev->gfx.rlc.funcs->set_safe_mode(adev);
+- adev->gfx.rlc.in_safe_mode = true;
+- }
+-}
+-
+-/**
+- * amdgpu_gfx_rlc_exit_safe_mode - Set RLC out of safe mode
+- *
+- * @adev: amdgpu_device pointer
+- *
+- * Set RLC exit safe mode if RLC is enabled and have entered into safe mode.
+- */
+-void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev)
+-{
+- if (!(adev->gfx.rlc.in_safe_mode))
+- return;
+-
+- /* if RLC is not enabled, do nothing */
+- if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
+- return;
+-
+- if (adev->cg_flags &
+- (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
+- AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+- adev->gfx.rlc.funcs->unset_safe_mode(adev);
+- adev->gfx.rlc.in_safe_mode = false;
+- }
+-}
+-
+-/**
+- * amdgpu_gfx_rlc_init_sr - Init save restore block
+- *
+- * @adev: amdgpu_device pointer
+- * @dws: the size of save restore block
+- *
+- * Allocate and setup value to save restore block of rlc.
+- * Returns 0 on succeess or negative error code if allocate failed.
+- */
+-int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
+-{
+- const u32 *src_ptr;
+- volatile u32 *dst_ptr;
+- u32 i;
+- int r;
+-
+- /* allocate save restore block */
+- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+- AMDGPU_GEM_DOMAIN_VRAM,
+- &adev->gfx.rlc.save_restore_obj,
+- &adev->gfx.rlc.save_restore_gpu_addr,
+- (void **)&adev->gfx.rlc.sr_ptr);
+- if (r) {
+- dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
+- amdgpu_gfx_rlc_fini(adev);
+- return r;
+- }
+-
+- /* write the sr buffer */
+- src_ptr = adev->gfx.rlc.reg_list;
+- dst_ptr = adev->gfx.rlc.sr_ptr;
+- for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
+- dst_ptr[i] = cpu_to_le32(src_ptr[i]);
+- amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
+- amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+-
+- return 0;
+-}
+-
+-/**
+- * amdgpu_gfx_rlc_init_csb - Init clear state block
+- *
+- * @adev: amdgpu_device pointer
+- *
+- * Allocate and setup value to clear state block of rlc.
+- * Returns 0 on succeess or negative error code if allocate failed.
+- */
+-int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
+-{
+- volatile u32 *dst_ptr;
+- u32 dws;
+- int r;
+-
+- /* allocate clear state block */
+- adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev);
+- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+- AMDGPU_GEM_DOMAIN_VRAM,
+- &adev->gfx.rlc.clear_state_obj,
+- &adev->gfx.rlc.clear_state_gpu_addr,
+- (void **)&adev->gfx.rlc.cs_ptr);
+- if (r) {
+- dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", r);
+- amdgpu_gfx_rlc_fini(adev);
+- return r;
+- }
+-
+- /* set up the cs buffer */
+- dst_ptr = adev->gfx.rlc.cs_ptr;
+- adev->gfx.rlc.funcs->get_csb_buffer(adev, dst_ptr);
+- amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
+- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+-
+- return 0;
+-}
+-
+-/**
+- * amdgpu_gfx_rlc_init_cpt - Init cp table
+- *
+- * @adev: amdgpu_device pointer
+- *
+- * Allocate and setup value to cp table of rlc.
+- * Returns 0 on succeess or negative error code if allocate failed.
+- */
+-int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
+-{
+- int r;
+-
+- r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
+- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+- &adev->gfx.rlc.cp_table_obj,
+- &adev->gfx.rlc.cp_table_gpu_addr,
+- (void **)&adev->gfx.rlc.cp_table_ptr);
+- if (r) {
+- dev_err(adev->dev, "(%d) failed to create cp table bo\n", r);
+- amdgpu_gfx_rlc_fini(adev);
+- return r;
+- }
+-
+- /* set up the cp table */
+- amdgpu_gfx_rlc_setup_cp_table(adev);
+- amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
+- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+-
+- return 0;
+-}
+-
+-/**
+- * amdgpu_gfx_rlc_setup_cp_table - setup cp the buffer of cp table
+- *
+- * @adev: amdgpu_device pointer
+- *
+- * Write cp firmware data into cp table.
+- */
+-void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
+-{
+- const __le32 *fw_data;
+- volatile u32 *dst_ptr;
+- int me, i, max_me;
+- u32 bo_offset = 0;
+- u32 table_offset, table_size;
+-
+- max_me = adev->gfx.rlc.funcs->get_cp_table_num(adev);
+-
+- /* write the cp table buffer */
+- dst_ptr = adev->gfx.rlc.cp_table_ptr;
+- for (me = 0; me < max_me; me++) {
+- if (me == 0) {
+- const struct gfx_firmware_header_v1_0 *hdr =
+- (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+- fw_data = (const __le32 *)
+- (adev->gfx.ce_fw->data +
+- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+- table_offset = le32_to_cpu(hdr->jt_offset);
+- table_size = le32_to_cpu(hdr->jt_size);
+- } else if (me == 1) {
+- const struct gfx_firmware_header_v1_0 *hdr =
+- (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+- fw_data = (const __le32 *)
+- (adev->gfx.pfp_fw->data +
+- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+- table_offset = le32_to_cpu(hdr->jt_offset);
+- table_size = le32_to_cpu(hdr->jt_size);
+- } else if (me == 2) {
+- const struct gfx_firmware_header_v1_0 *hdr =
+- (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+- fw_data = (const __le32 *)
+- (adev->gfx.me_fw->data +
+- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+- table_offset = le32_to_cpu(hdr->jt_offset);
+- table_size = le32_to_cpu(hdr->jt_size);
+- } else if (me == 3) {
+- const struct gfx_firmware_header_v1_0 *hdr =
+- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+- fw_data = (const __le32 *)
+- (adev->gfx.mec_fw->data +
+- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+- table_offset = le32_to_cpu(hdr->jt_offset);
+- table_size = le32_to_cpu(hdr->jt_size);
+- } else if (me == 4) {
+- const struct gfx_firmware_header_v1_0 *hdr =
+- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+- fw_data = (const __le32 *)
+- (adev->gfx.mec2_fw->data +
+- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+- table_offset = le32_to_cpu(hdr->jt_offset);
+- table_size = le32_to_cpu(hdr->jt_size);
+- }
+-
+- for (i = 0; i < table_size; i ++) {
+- dst_ptr[bo_offset + i] =
+- cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+- }
+-
+- bo_offset += table_size;
+- }
+-}
+-
+-/**
+ * amdgpu_gfx_rlc_fini - Free BO which used for RLC
+ *
+ * @adev: amdgpu_device pointer
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+index 49a8ab5..b3b0920 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+@@ -1,3 +1,4 @@
++
+ /*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+@@ -27,13 +28,9 @@
+ #include "clearstate_defs.h"
+
+ struct amdgpu_rlc_funcs {
+- bool (*is_rlc_enabled)(struct amdgpu_device *adev);
+- void (*set_safe_mode)(struct amdgpu_device *adev);
+- void (*unset_safe_mode)(struct amdgpu_device *adev);
++ void (*enter_safe_mode)(struct amdgpu_device *adev);
++ void (*exit_safe_mode)(struct amdgpu_device *adev);
+ int (*init)(struct amdgpu_device *adev);
+- u32 (*get_csb_size)(struct amdgpu_device *adev);
+- void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer);
+- int (*get_cp_table_num)(struct amdgpu_device *adev);
+ int (*resume)(struct amdgpu_device *adev);
+ void (*stop)(struct amdgpu_device *adev);
+ void (*reset)(struct amdgpu_device *adev);
+@@ -42,21 +39,21 @@ struct amdgpu_rlc_funcs {
+
+ struct amdgpu_rlc {
+ /* for power gating */
+- struct amdgpu_bo *save_restore_obj;
+- uint64_t save_restore_gpu_addr;
+- volatile uint32_t *sr_ptr;
++ struct amdgpu_bo *save_restore_obj;
++ uint64_t save_restore_gpu_addr;
++ volatile uint32_t *sr_ptr;
+ const u32 *reg_list;
+ u32 reg_list_size;
+ /* for clear state */
+- struct amdgpu_bo *clear_state_obj;
+- uint64_t clear_state_gpu_addr;
+- volatile uint32_t *cs_ptr;
++ struct amdgpu_bo *clear_state_obj;
++ uint64_t clear_state_gpu_addr;
++ volatile uint32_t *cs_ptr;
+ const struct cs_section_def *cs_data;
+ u32 clear_state_size;
+ /* for cp tables */
+- struct amdgpu_bo *cp_table_obj;
+- uint64_t cp_table_gpu_addr;
+- volatile uint32_t *cp_table_ptr;
++ struct amdgpu_bo *cp_table_obj;
++ uint64_t cp_table_gpu_addr;
++ volatile uint32_t *cp_table_ptr;
+ u32 cp_table_size;
+
+ /* safe mode for updating CG/PG state */
+@@ -87,12 +84,6 @@ struct amdgpu_rlc {
+ bool is_rlc_v2_1;
+ };
+
+-void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev);
+-void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev);
+-int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws);
+-int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev);
+-int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev);
+-void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev);
+ void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev);
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+index e02631d..ecd69ab 100644
+--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+@@ -743,19 +743,19 @@ static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
+
+ if (pi->caps_sq_ramping || pi->caps_db_ramping ||
+ pi->caps_td_ramping || pi->caps_tcp_ramping) {
+- amdgpu_gfx_rlc_enter_safe_mode(adev);
++ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+
+ if (enable) {
+ ret = ci_program_pt_config_registers(adev, didt_config_ci);
+ if (ret) {
+- amdgpu_gfx_rlc_exit_safe_mode(adev);
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ return ret;
+ }
+ }
+
+ ci_do_enable_didt(adev, enable);
+
+- amdgpu_gfx_rlc_exit_safe_mode(adev);
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ }
+
+ return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+index 075407e..abc8ec6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+@@ -2401,7 +2401,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
+ {
+ const u32 *src_ptr;
+ volatile u32 *dst_ptr;
+- u32 dws;
++ u32 dws, i;
+ u64 reg_list_mc_addr;
+ const struct cs_section_def *cs_data;
+ int r;
+@@ -2416,10 +2416,26 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
+ cs_data = adev->gfx.rlc.cs_data;
+
+ if (src_ptr) {
+- /* init save restore block */
+- r = amdgpu_gfx_rlc_init_sr(adev, dws);
+- if (r)
++ /* save restore block */
++ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ &adev->gfx.rlc.save_restore_obj,
++ &adev->gfx.rlc.save_restore_gpu_addr,
++ (void **)&adev->gfx.rlc.sr_ptr);
++ if (r) {
++ dev_warn(adev->dev, "(%d) create RLC sr bo failed\n",
++ r);
++ amdgpu_gfx_rlc_fini(adev);
+ return r;
++ }
++
++ /* write the sr buffer */
++ dst_ptr = adev->gfx.rlc.sr_ptr;
++ for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
++ dst_ptr[i] = cpu_to_le32(src_ptr[i]);
++
++ amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
++ amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+ }
+
+ if (cs_data) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+index 6815153..19a0e4f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+@@ -882,6 +882,7 @@ static const u32 kalindi_rlc_save_restore_register_list[] =
+
+ static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
+ static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
++static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev);
+ static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
+ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
+
+@@ -3290,7 +3291,8 @@ static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
+ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
+ {
+ const u32 *src_ptr;
+- u32 dws;
++ volatile u32 *dst_ptr;
++ u32 dws, i;
+ const struct cs_section_def *cs_data;
+ int r;
+
+@@ -3317,23 +3319,66 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
+ cs_data = adev->gfx.rlc.cs_data;
+
+ if (src_ptr) {
+- /* init save restore block */
+- r = amdgpu_gfx_rlc_init_sr(adev, dws);
+- if (r)
++ /* save restore block */
++ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ &adev->gfx.rlc.save_restore_obj,
++ &adev->gfx.rlc.save_restore_gpu_addr,
++ (void **)&adev->gfx.rlc.sr_ptr);
++ if (r) {
++ dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r);
++ amdgpu_gfx_rlc_fini(adev);
+ return r;
++ }
++
++ /* write the sr buffer */
++ dst_ptr = adev->gfx.rlc.sr_ptr;
++ for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
++ dst_ptr[i] = cpu_to_le32(src_ptr[i]);
++ amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
++ amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+ }
+
+ if (cs_data) {
+- /* init clear state block */
+- r = amdgpu_gfx_rlc_init_csb(adev);
+- if (r)
++ /* clear state block */
++ adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev);
++
++ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ &adev->gfx.rlc.clear_state_obj,
++ &adev->gfx.rlc.clear_state_gpu_addr,
++ (void **)&adev->gfx.rlc.cs_ptr);
++ if (r) {
++ dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
++ amdgpu_gfx_rlc_fini(adev);
+ return r;
++ }
++
++ /* set up the cs buffer */
++ dst_ptr = adev->gfx.rlc.cs_ptr;
++ gfx_v7_0_get_csb_buffer(adev, dst_ptr);
++ amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
++ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+ }
+
+ if (adev->gfx.rlc.cp_table_size) {
+- r = amdgpu_gfx_rlc_init_cpt(adev);
+- if (r)
++
++ r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
++ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
++ &adev->gfx.rlc.cp_table_obj,
++ &adev->gfx.rlc.cp_table_gpu_addr,
++ (void **)&adev->gfx.rlc.cp_table_ptr);
++ if (r) {
++ dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
++ amdgpu_gfx_rlc_fini(adev);
+ return r;
++ }
++
++ gfx_v7_0_init_cp_pg_table(adev);
++
++ amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
++ amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
++
+ }
+
+ return 0;
+@@ -3414,12 +3459,7 @@ static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
+ return orig;
+ }
+
+-static bool gfx_v7_0_is_rlc_enabled(struct amdgpu_device *adev)
+-{
+- return true;
+-}
+-
+-static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev)
++static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
+ {
+ u32 tmp, i, mask;
+
+@@ -3441,7 +3481,7 @@ static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev)
+ }
+ }
+
+-static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev)
++static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
+ {
+ u32 tmp;
+
+@@ -3757,12 +3797,72 @@ static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
+ WREG32(mmRLC_PG_CNTL, data);
+ }
+
+-static int gfx_v7_0_cp_pg_table_num(struct amdgpu_device *adev)
++static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev)
+ {
++ const __le32 *fw_data;
++ volatile u32 *dst_ptr;
++ int me, i, max_me = 4;
++ u32 bo_offset = 0;
++ u32 table_offset, table_size;
++
+ if (adev->asic_type == CHIP_KAVERI)
+- return 5;
+- else
+- return 4;
++ max_me = 5;
++
++ if (adev->gfx.rlc.cp_table_ptr == NULL)
++ return;
++
++ /* write the cp table buffer */
++ dst_ptr = adev->gfx.rlc.cp_table_ptr;
++ for (me = 0; me < max_me; me++) {
++ if (me == 0) {
++ const struct gfx_firmware_header_v1_0 *hdr =
++ (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
++ fw_data = (const __le32 *)
++ (adev->gfx.ce_fw->data +
++ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
++ table_offset = le32_to_cpu(hdr->jt_offset);
++ table_size = le32_to_cpu(hdr->jt_size);
++ } else if (me == 1) {
++ const struct gfx_firmware_header_v1_0 *hdr =
++ (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
++ fw_data = (const __le32 *)
++ (adev->gfx.pfp_fw->data +
++ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
++ table_offset = le32_to_cpu(hdr->jt_offset);
++ table_size = le32_to_cpu(hdr->jt_size);
++ } else if (me == 2) {
++ const struct gfx_firmware_header_v1_0 *hdr =
++ (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
++ fw_data = (const __le32 *)
++ (adev->gfx.me_fw->data +
++ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
++ table_offset = le32_to_cpu(hdr->jt_offset);
++ table_size = le32_to_cpu(hdr->jt_size);
++ } else if (me == 3) {
++ const struct gfx_firmware_header_v1_0 *hdr =
++ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
++ fw_data = (const __le32 *)
++ (adev->gfx.mec_fw->data +
++ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
++ table_offset = le32_to_cpu(hdr->jt_offset);
++ table_size = le32_to_cpu(hdr->jt_size);
++ } else {
++ const struct gfx_firmware_header_v1_0 *hdr =
++ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
++ fw_data = (const __le32 *)
++ (adev->gfx.mec2_fw->data +
++ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
++ table_offset = le32_to_cpu(hdr->jt_offset);
++ table_size = le32_to_cpu(hdr->jt_size);
++ }
++
++ for (i = 0; i < table_size; i ++) {
++ dst_ptr[bo_offset + i] =
++ cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
++ }
++
++ bo_offset += table_size;
++ }
+ }
+
+ static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
+@@ -4201,12 +4301,8 @@ static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
+ };
+
+ static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
+- .is_rlc_enabled = gfx_v7_0_is_rlc_enabled,
+- .set_safe_mode = gfx_v7_0_set_safe_mode,
+- .unset_safe_mode = gfx_v7_0_unset_safe_mode,
+- .get_csb_size = gfx_v7_0_get_csb_size,
+- .get_csb_buffer = gfx_v7_0_get_csb_buffer,
+- .get_cp_table_num = gfx_v7_0_cp_pg_table_num,
++ .enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode,
++ .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode
+ };
+
+ static int gfx_v7_0_early_init(void *handle)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 43272fb..3ec5832 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -1298,16 +1298,75 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
+ buffer[count++] = cpu_to_le32(0);
+ }
+
+-static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev)
++static void cz_init_cp_jump_table(struct amdgpu_device *adev)
+ {
++ const __le32 *fw_data;
++ volatile u32 *dst_ptr;
++ int me, i, max_me = 4;
++ u32 bo_offset = 0;
++ u32 table_offset, table_size;
++
+ if (adev->asic_type == CHIP_CARRIZO)
+- return 5;
+- else
+- return 4;
++ max_me = 5;
++
++ /* write the cp table buffer */
++ dst_ptr = adev->gfx.rlc.cp_table_ptr;
++ for (me = 0; me < max_me; me++) {
++ if (me == 0) {
++ const struct gfx_firmware_header_v1_0 *hdr =
++ (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
++ fw_data = (const __le32 *)
++ (adev->gfx.ce_fw->data +
++ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
++ table_offset = le32_to_cpu(hdr->jt_offset);
++ table_size = le32_to_cpu(hdr->jt_size);
++ } else if (me == 1) {
++ const struct gfx_firmware_header_v1_0 *hdr =
++ (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
++ fw_data = (const __le32 *)
++ (adev->gfx.pfp_fw->data +
++ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
++ table_offset = le32_to_cpu(hdr->jt_offset);
++ table_size = le32_to_cpu(hdr->jt_size);
++ } else if (me == 2) {
++ const struct gfx_firmware_header_v1_0 *hdr =
++ (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
++ fw_data = (const __le32 *)
++ (adev->gfx.me_fw->data +
++ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
++ table_offset = le32_to_cpu(hdr->jt_offset);
++ table_size = le32_to_cpu(hdr->jt_size);
++ } else if (me == 3) {
++ const struct gfx_firmware_header_v1_0 *hdr =
++ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
++ fw_data = (const __le32 *)
++ (adev->gfx.mec_fw->data +
++ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
++ table_offset = le32_to_cpu(hdr->jt_offset);
++ table_size = le32_to_cpu(hdr->jt_size);
++ } else if (me == 4) {
++ const struct gfx_firmware_header_v1_0 *hdr =
++ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
++ fw_data = (const __le32 *)
++ (adev->gfx.mec2_fw->data +
++ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
++ table_offset = le32_to_cpu(hdr->jt_offset);
++ table_size = le32_to_cpu(hdr->jt_size);
++ }
++
++ for (i = 0; i < table_size; i ++) {
++ dst_ptr[bo_offset + i] =
++ cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
++ }
++
++ bo_offset += table_size;
++ }
+ }
+
+ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
+ {
++ volatile u32 *dst_ptr;
++ u32 dws;
+ const struct cs_section_def *cs_data;
+ int r;
+
+@@ -1316,18 +1375,44 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
+ cs_data = adev->gfx.rlc.cs_data;
+
+ if (cs_data) {
+- /* init clear state block */
+- r = amdgpu_gfx_rlc_init_csb(adev);
+- if (r)
++ /* clear state block */
++ adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev);
++
++ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ &adev->gfx.rlc.clear_state_obj,
++ &adev->gfx.rlc.clear_state_gpu_addr,
++ (void **)&adev->gfx.rlc.cs_ptr);
++ if (r) {
++ dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
++ amdgpu_gfx_rlc_fini(adev);
+ return r;
++ }
++
++ /* set up the cs buffer */
++ dst_ptr = adev->gfx.rlc.cs_ptr;
++ gfx_v8_0_get_csb_buffer(adev, dst_ptr);
++ amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
++ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+ }
+
+ if ((adev->asic_type == CHIP_CARRIZO) ||
+ (adev->asic_type == CHIP_STONEY)) {
+ adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
+- r = amdgpu_gfx_rlc_init_cpt(adev);
+- if (r)
++ r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
++ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
++ &adev->gfx.rlc.cp_table_obj,
++ &adev->gfx.rlc.cp_table_gpu_addr,
++ (void **)&adev->gfx.rlc.cp_table_ptr);
++ if (r) {
++ dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
+ return r;
++ }
++
++ cz_init_cp_jump_table(adev);
++
++ amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
++ amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+ }
+
+ return 0;
+@@ -4880,7 +4965,7 @@ static int gfx_v8_0_hw_fini(void *handle)
+ pr_debug("For SRIOV client, shouldn't do anything.\n");
+ return 0;
+ }
+- amdgpu_gfx_rlc_enter_safe_mode(adev);
++ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ if (!gfx_v8_0_wait_for_idle(adev))
+ gfx_v8_0_cp_enable(adev, false);
+ else
+@@ -4889,7 +4974,7 @@ static int gfx_v8_0_hw_fini(void *handle)
+ gfx_v8_0_rlc_stop(adev);
+ else
+ pr_err("rlc is busy, skip halt rlc\n");
+- amdgpu_gfx_rlc_exit_safe_mode(adev);
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ return 0;
+ }
+
+@@ -5352,7 +5437,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
+ AMD_PG_SUPPORT_RLC_SMU_HS |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GFX_DMG))
+- amdgpu_gfx_rlc_enter_safe_mode(adev);
++ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ switch (adev->asic_type) {
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+@@ -5406,7 +5491,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
+ AMD_PG_SUPPORT_RLC_SMU_HS |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GFX_DMG))
+- amdgpu_gfx_rlc_exit_safe_mode(adev);
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ return 0;
+ }
+
+@@ -5500,53 +5585,57 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
+ #define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
+ #define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
+
+-static bool gfx_v8_0_is_rlc_enabled(struct amdgpu_device *adev)
++static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev)
+ {
+- uint32_t rlc_setting;
++ u32 data;
++ unsigned i;
+
+- rlc_setting = RREG32(mmRLC_CNTL);
+- if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
+- return false;
++ data = RREG32(mmRLC_CNTL);
++ if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
++ return;
+
+- return true;
+-}
++ if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
++ data |= RLC_SAFE_MODE__CMD_MASK;
++ data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
++ data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
++ WREG32(mmRLC_SAFE_MODE, data);
+
+-static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev)
+-{
+- uint32_t data;
+- unsigned i;
+- data = RREG32(mmRLC_CNTL);
+- data |= RLC_SAFE_MODE__CMD_MASK;
+- data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
+- data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
+- WREG32(mmRLC_SAFE_MODE, data);
++ for (i = 0; i < adev->usec_timeout; i++) {
++ if ((RREG32(mmRLC_GPM_STAT) &
++ (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
++ RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
++ (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
++ RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
++ break;
++ udelay(1);
++ }
+
+- /* wait for RLC_SAFE_MODE */
+- for (i = 0; i < adev->usec_timeout; i++) {
+- if ((RREG32(mmRLC_GPM_STAT) &
+- (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
+- RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
+- (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
+- RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
+- break;
+- udelay(1);
+- }
+- for (i = 0; i < adev->usec_timeout; i++) {
+- if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+- break;
+- udelay(1);
++ for (i = 0; i < adev->usec_timeout; i++) {
++ if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
++ break;
++ udelay(1);
++ }
++ adev->gfx.rlc.in_safe_mode = true;
+ }
+ }
+
+-static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev)
++static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev)
+ {
+- uint32_t data;
++ u32 data = 0;
+ unsigned i;
+
+ data = RREG32(mmRLC_CNTL);
+- data |= RLC_SAFE_MODE__CMD_MASK;
+- data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
+- WREG32(mmRLC_SAFE_MODE, data);
++ if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
++ return;
++
++ if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
++ if (adev->gfx.rlc.in_safe_mode) {
++ data |= RLC_SAFE_MODE__CMD_MASK;
++ data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
++ WREG32(mmRLC_SAFE_MODE, data);
++ adev->gfx.rlc.in_safe_mode = false;
++ }
++ }
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+@@ -5556,12 +5645,8 @@ static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev)
+ }
+
+ static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
+- .is_rlc_enabled = gfx_v8_0_is_rlc_enabled,
+- .set_safe_mode = gfx_v8_0_set_safe_mode,
+- .unset_safe_mode = gfx_v8_0_unset_safe_mode,
+- .get_csb_size = gfx_v8_0_get_csb_size,
+- .get_csb_buffer = gfx_v8_0_get_csb_buffer,
+- .get_cp_table_num = gfx_v8_0_cp_jump_table_num,
++ .enter_safe_mode = iceland_enter_rlc_safe_mode,
++ .exit_safe_mode = iceland_exit_rlc_safe_mode
+ };
+
+ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+@@ -5569,7 +5654,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
+ {
+ uint32_t temp, data;
+
+- amdgpu_gfx_rlc_enter_safe_mode(adev);
++ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+
+ /* It is disabled by HW by default */
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
+@@ -5665,7 +5750,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
+ gfx_v8_0_wait_for_rlc_serdes(adev);
+ }
+
+- amdgpu_gfx_rlc_exit_safe_mode(adev);
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ }
+
+ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
+@@ -5675,7 +5760,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
+
+ temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
+
+- amdgpu_gfx_rlc_enter_safe_mode(adev);
++ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
+ temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
+@@ -5758,7 +5843,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
+
+ gfx_v8_0_wait_for_rlc_serdes(adev);
+
+- amdgpu_gfx_rlc_exit_safe_mode(adev);
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ }
+ static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index ac2a843..69fcc77 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1079,13 +1079,72 @@ static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
+ WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
+ }
+
+-static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
++static void rv_init_cp_jump_table(struct amdgpu_device *adev)
+ {
+- return 5;
++ const __le32 *fw_data;
++ volatile u32 *dst_ptr;
++ int me, i, max_me = 5;
++ u32 bo_offset = 0;
++ u32 table_offset, table_size;
++
++ /* write the cp table buffer */
++ dst_ptr = adev->gfx.rlc.cp_table_ptr;
++ for (me = 0; me < max_me; me++) {
++ if (me == 0) {
++ const struct gfx_firmware_header_v1_0 *hdr =
++ (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
++ fw_data = (const __le32 *)
++ (adev->gfx.ce_fw->data +
++ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
++ table_offset = le32_to_cpu(hdr->jt_offset);
++ table_size = le32_to_cpu(hdr->jt_size);
++ } else if (me == 1) {
++ const struct gfx_firmware_header_v1_0 *hdr =
++ (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
++ fw_data = (const __le32 *)
++ (adev->gfx.pfp_fw->data +
++ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
++ table_offset = le32_to_cpu(hdr->jt_offset);
++ table_size = le32_to_cpu(hdr->jt_size);
++ } else if (me == 2) {
++ const struct gfx_firmware_header_v1_0 *hdr =
++ (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
++ fw_data = (const __le32 *)
++ (adev->gfx.me_fw->data +
++ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
++ table_offset = le32_to_cpu(hdr->jt_offset);
++ table_size = le32_to_cpu(hdr->jt_size);
++ } else if (me == 3) {
++ const struct gfx_firmware_header_v1_0 *hdr =
++ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
++ fw_data = (const __le32 *)
++ (adev->gfx.mec_fw->data +
++ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
++ table_offset = le32_to_cpu(hdr->jt_offset);
++ table_size = le32_to_cpu(hdr->jt_size);
++ } else if (me == 4) {
++ const struct gfx_firmware_header_v1_0 *hdr =
++ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
++ fw_data = (const __le32 *)
++ (adev->gfx.mec2_fw->data +
++ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
++ table_offset = le32_to_cpu(hdr->jt_offset);
++ table_size = le32_to_cpu(hdr->jt_size);
++ }
++
++ for (i = 0; i < table_size; i ++) {
++ dst_ptr[bo_offset + i] =
++ cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
++ }
++
++ bo_offset += table_size;
++ }
+ }
+
+ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
+ {
++ volatile u32 *dst_ptr;
++ u32 dws;
+ const struct cs_section_def *cs_data;
+ int r;
+
+@@ -1094,18 +1153,45 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
+ cs_data = adev->gfx.rlc.cs_data;
+
+ if (cs_data) {
+- /* init clear state block */
+- r = amdgpu_gfx_rlc_init_csb(adev);
+- if (r)
++ /* clear state block */
++ adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev);
++ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ &adev->gfx.rlc.clear_state_obj,
++ &adev->gfx.rlc.clear_state_gpu_addr,
++ (void **)&adev->gfx.rlc.cs_ptr);
++ if (r) {
++ dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
++ r);
++ amdgpu_gfx_rlc_fini(adev);
+ return r;
++ }
++ /* set up the cs buffer */
++ dst_ptr = adev->gfx.rlc.cs_ptr;
++ gfx_v9_0_get_csb_buffer(adev, dst_ptr);
++ amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
++ amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
++ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+ }
+
+ if (adev->asic_type == CHIP_RAVEN) {
+ /* TODO: double check the cp_table_size for RV */
+ adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
+- r = amdgpu_gfx_rlc_init_cpt(adev);
+- if (r)
++ r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
++ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
++ &adev->gfx.rlc.cp_table_obj,
++ &adev->gfx.rlc.cp_table_gpu_addr,
++ (void **)&adev->gfx.rlc.cp_table_ptr);
++ if (r) {
++ dev_err(adev->dev,
++ "(%d) failed to create cp table bo\n", r);
++ amdgpu_gfx_rlc_fini(adev);
+ return r;
++ }
++
++ rv_init_cp_jump_table(adev);
++ amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
++ amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+ }
+
+ switch (adev->asic_type) {
+@@ -3527,47 +3613,64 @@ static int gfx_v9_0_late_init(void *handle)
+ return 0;
+ }
+
+-static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
++static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
+ {
+- uint32_t rlc_setting;
++ uint32_t rlc_setting, data;
++ unsigned i;
++
++ if (adev->gfx.rlc.in_safe_mode)
++ return;
+
+ /* if RLC is not enabled, do nothing */
+ rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
+ if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
+- return false;
+-
+- return true;
+-}
+-
+-static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
+-{
+- uint32_t data;
+- unsigned i;
++ return;
+
+- data = RLC_SAFE_MODE__CMD_MASK;
+- data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
+- WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
++ if (adev->cg_flags &
++ (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
++ AMD_CG_SUPPORT_GFX_3D_CGCG)) {
++ data = RLC_SAFE_MODE__CMD_MASK;
++ data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
++ WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
+
+- /* wait for RLC_SAFE_MODE */
+- for (i = 0; i < adev->usec_timeout; i++) {
+- if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+- break;
+- udelay(1);
++ /* wait for RLC_SAFE_MODE */
++ for (i = 0; i < adev->usec_timeout; i++) {
++ if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
++ break;
++ udelay(1);
++ }
++ adev->gfx.rlc.in_safe_mode = true;
+ }
+ }
+
+-static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
++static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
+ {
+- uint32_t data;
++ uint32_t rlc_setting, data;
++
++ if (!adev->gfx.rlc.in_safe_mode)
++ return;
++
++ /* if RLC is not enabled, do nothing */
++ rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
++ if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
++ return;
+
+- data = RLC_SAFE_MODE__CMD_MASK;
+- WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
++ if (adev->cg_flags &
++ (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
++ /*
++ * Try to exit safe mode only if it is already in safe
++ * mode.
++ */
++ data = RLC_SAFE_MODE__CMD_MASK;
++ WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
++ adev->gfx.rlc.in_safe_mode = false;
++ }
+ }
+
+ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
+ bool enable)
+ {
+- amdgpu_gfx_rlc_enter_safe_mode(adev);
++ gfx_v9_0_enter_rlc_safe_mode(adev);
+
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
+ gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
+@@ -3578,7 +3681,7 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
+ gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
+ }
+
+- amdgpu_gfx_rlc_exit_safe_mode(adev);
++ gfx_v9_0_exit_rlc_safe_mode(adev);
+ }
+
+ static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
+@@ -3676,7 +3779,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
+ {
+ uint32_t data, def;
+
+- amdgpu_gfx_rlc_enter_safe_mode(adev);
++ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+
+ /* Enable 3D CGCG/CGLS */
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+@@ -3716,7 +3819,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
+ WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
+ }
+
+- amdgpu_gfx_rlc_exit_safe_mode(adev);
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ }
+
+ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
+@@ -3724,7 +3827,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
+ {
+ uint32_t def, data;
+
+- amdgpu_gfx_rlc_enter_safe_mode(adev);
++ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
+ def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
+@@ -3764,7 +3867,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
+ WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
+ }
+
+- amdgpu_gfx_rlc_exit_safe_mode(adev);
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ }
+
+ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
+@@ -3793,12 +3896,8 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
+ }
+
+ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
+- .is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
+- .set_safe_mode = gfx_v9_0_set_safe_mode,
+- .unset_safe_mode = gfx_v9_0_unset_safe_mode,
+- .get_csb_size = gfx_v9_0_get_csb_size,
+- .get_csb_buffer = gfx_v9_0_get_csb_buffer,
+- .get_cp_table_num = gfx_v9_0_cp_jump_table_num,
++ .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode,
++ .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode
+ };
+
+ static int gfx_v9_0_set_powergating_state(void *handle,
+diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+index 36bcba96..faf06fd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+@@ -508,19 +508,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
+ pi->caps_db_ramping ||
+ pi->caps_td_ramping ||
+ pi->caps_tcp_ramping) {
+- amdgpu_gfx_rlc_enter_safe_mode(adev);
++ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+
+ if (enable) {
+ ret = kv_program_pt_config_registers(adev, didt_config_kv);
+ if (ret) {
+- amdgpu_gfx_rlc_exit_safe_mode(adev);
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ return ret;
+ }
+ }
+
+ kv_do_enable_didt(adev, enable);
+
+- amdgpu_gfx_rlc_exit_safe_mode(adev);
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ }
+
+ return 0;
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+index d138ddae..5e19f59 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+@@ -967,7 +967,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
+ PP_CAP(PHM_PlatformCaps_TDRamping) ||
+ PP_CAP(PHM_PlatformCaps_TCPRamping)) {
+
+- amdgpu_gfx_rlc_enter_safe_mode(adev);
++ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ mutex_lock(&adev->grbm_idx_mutex);
+ value = 0;
+ value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX);
+@@ -1014,13 +1014,13 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
+ "Failed to enable DPM DIDT.", goto error);
+ }
+ mutex_unlock(&adev->grbm_idx_mutex);
+- amdgpu_gfx_rlc_exit_safe_mode(adev);
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ }
+
+ return 0;
+ error:
+ mutex_unlock(&adev->grbm_idx_mutex);
+- amdgpu_gfx_rlc_exit_safe_mode(adev);
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ return result;
+ }
+
+@@ -1034,7 +1034,7 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
+ PP_CAP(PHM_PlatformCaps_TDRamping) ||
+ PP_CAP(PHM_PlatformCaps_TCPRamping)) {
+
+- amdgpu_gfx_rlc_enter_safe_mode(adev);
++ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+
+ result = smu7_enable_didt(hwmgr, false);
+ PP_ASSERT_WITH_CODE((result == 0),
+@@ -1046,12 +1046,12 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to disable DPM DIDT.", goto error);
+ }
+- amdgpu_gfx_rlc_exit_safe_mode(adev);
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ }
+
+ return 0;
+ error:
+- amdgpu_gfx_rlc_exit_safe_mode(adev);
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ return result;
+ }
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+index 6f26cb2..2d88abf 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+@@ -937,7 +937,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
+
+ num_se = adev->gfx.config.max_shader_engines;
+
+- amdgpu_gfx_rlc_enter_safe_mode(adev);
++ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (count = 0; count < num_se; count++) {
+@@ -962,7 +962,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
+
+ vega10_didt_set_mask(hwmgr, true);
+
+- amdgpu_gfx_rlc_exit_safe_mode(adev);
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+
+ return 0;
+ }
+@@ -971,11 +971,11 @@ static int vega10_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
+ {
+ struct amdgpu_device *adev = hwmgr->adev;
+
+- amdgpu_gfx_rlc_enter_safe_mode(adev);
++ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+
+ vega10_didt_set_mask(hwmgr, false);
+
+- amdgpu_gfx_rlc_exit_safe_mode(adev);
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+
+ return 0;
+ }
+@@ -988,7 +988,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
+
+ num_se = adev->gfx.config.max_shader_engines;
+
+- amdgpu_gfx_rlc_enter_safe_mode(adev);
++ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (count = 0; count < num_se; count++) {
+@@ -1007,7 +1007,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
+
+ vega10_didt_set_mask(hwmgr, true);
+
+- amdgpu_gfx_rlc_exit_safe_mode(adev);
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+
+ vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10);
+ if (PP_CAP(PHM_PlatformCaps_GCEDC))
+@@ -1024,11 +1024,11 @@ static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t data;
+
+- amdgpu_gfx_rlc_enter_safe_mode(adev);
++ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+
+ vega10_didt_set_mask(hwmgr, false);
+
+- amdgpu_gfx_rlc_exit_safe_mode(adev);
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+
+ if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
+ data = 0x00000000;
+@@ -1049,7 +1049,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
+
+ num_se = adev->gfx.config.max_shader_engines;
+
+- amdgpu_gfx_rlc_enter_safe_mode(adev);
++ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (count = 0; count < num_se; count++) {
+@@ -1070,7 +1070,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
+
+ vega10_didt_set_mask(hwmgr, true);
+
+- amdgpu_gfx_rlc_exit_safe_mode(adev);
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+
+ return 0;
+ }
+@@ -1079,11 +1079,11 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
+ {
+ struct amdgpu_device *adev = hwmgr->adev;
+
+- amdgpu_gfx_rlc_enter_safe_mode(adev);
++ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+
+ vega10_didt_set_mask(hwmgr, false);
+
+- amdgpu_gfx_rlc_exit_safe_mode(adev);
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+
+ return 0;
+ }
+@@ -1097,7 +1097,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
+
+ num_se = adev->gfx.config.max_shader_engines;
+
+- amdgpu_gfx_rlc_enter_safe_mode(adev);
++ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+
+ vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
+
+@@ -1118,7 +1118,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
+
+ vega10_didt_set_mask(hwmgr, true);
+
+- amdgpu_gfx_rlc_exit_safe_mode(adev);
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+
+ vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10);
+
+@@ -1138,11 +1138,11 @@ static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t data;
+
+- amdgpu_gfx_rlc_enter_safe_mode(adev);
++ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+
+ vega10_didt_set_mask(hwmgr, false);
+
+- amdgpu_gfx_rlc_exit_safe_mode(adev);
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+
+ if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
+ data = 0x00000000;
+@@ -1160,7 +1160,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
+ struct amdgpu_device *adev = hwmgr->adev;
+ int result;
+
+- amdgpu_gfx_rlc_enter_safe_mode(adev);
++ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000);
+@@ -1173,7 +1173,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
+
+ vega10_didt_set_mask(hwmgr, false);
+
+- amdgpu_gfx_rlc_exit_safe_mode(adev);
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+
+ return 0;
+ }
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5753-Revert-drm-amdgpu-separate-amdgpu_rlc-into-a-single-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5753-Revert-drm-amdgpu-separate-amdgpu_rlc-into-a-single-.patch
new file mode 100644
index 00000000..efa60695
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5753-Revert-drm-amdgpu-separate-amdgpu_rlc-into-a-single-.patch
@@ -0,0 +1,454 @@
+From bfaff79812373e4bb7f52dc09bb21a2c5a6e7d2f Mon Sep 17 00:00:00 2001
+From: Raveendra Talabattula <raveendra.talabattula@amd.com>
+Date: Thu, 10 Jan 2019 16:24:27 +0530
+Subject: [PATCH 5753/5758] Revert drm/amdgpu: separate amdgpu_rlc into a
+ single file
+
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/Makefile | 1 -
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 1 -
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 54 +++++++++++++++++++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c | 57 ---------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h | 89 ---------------------------------
+ drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 13 +++--
+ drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 15 ++++--
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 10 +++-
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 17 ++++++-
+ 9 files changed, 97 insertions(+), 160 deletions(-)
+ delete mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+ delete mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
+index 467125d..0b967d9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/Makefile
++++ b/drivers/gpu/drm/amd/amdgpu/Makefile
+@@ -105,7 +105,6 @@ amdgpu-y += \
+ # add GFX block
+ amdgpu-y += \
+ amdgpu_gfx.o \
+- amdgpu_rlc.o \
+ gfx_v8_0.o \
+ gfx_v9_0.o
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index 54ee584..a750242 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -25,7 +25,6 @@
+ #include <drm/drmP.h>
+ #include "amdgpu.h"
+ #include "amdgpu_gfx.h"
+-#include "amdgpu_rlc.h"
+
+ /* delay 0.1 second to enable gfx off feature */
+ #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+index f790e15..b61b5c1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+@@ -29,7 +29,6 @@
+ */
+ #include "clearstate_defs.h"
+ #include "amdgpu_ring.h"
+-#include "amdgpu_rlc.h"
+
+ /* GFX current status */
+ #define AMDGPU_GFX_NORMAL_MODE 0x00000000L
+@@ -38,6 +37,59 @@
+ #define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L
+ #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
+
++
++struct amdgpu_rlc_funcs {
++ void (*enter_safe_mode)(struct amdgpu_device *adev);
++ void (*exit_safe_mode)(struct amdgpu_device *adev);
++};
++
++struct amdgpu_rlc {
++ /* for power gating */
++ struct amdgpu_bo *save_restore_obj;
++ uint64_t save_restore_gpu_addr;
++ volatile uint32_t *sr_ptr;
++ const u32 *reg_list;
++ u32 reg_list_size;
++ /* for clear state */
++ struct amdgpu_bo *clear_state_obj;
++ uint64_t clear_state_gpu_addr;
++ volatile uint32_t *cs_ptr;
++ const struct cs_section_def *cs_data;
++ u32 clear_state_size;
++ /* for cp tables */
++ struct amdgpu_bo *cp_table_obj;
++ uint64_t cp_table_gpu_addr;
++ volatile uint32_t *cp_table_ptr;
++ u32 cp_table_size;
++
++ /* safe mode for updating CG/PG state */
++ bool in_safe_mode;
++ const struct amdgpu_rlc_funcs *funcs;
++
++ /* for firmware data */
++ u32 save_and_restore_offset;
++ u32 clear_state_descriptor_offset;
++ u32 avail_scratch_ram_locations;
++ u32 reg_restore_list_size;
++ u32 reg_list_format_start;
++ u32 reg_list_format_separate_start;
++ u32 starting_offsets_start;
++ u32 reg_list_format_size_bytes;
++ u32 reg_list_size_bytes;
++ u32 reg_list_format_direct_reg_list_length;
++ u32 save_restore_list_cntl_size_bytes;
++ u32 save_restore_list_gpm_size_bytes;
++ u32 save_restore_list_srm_size_bytes;
++
++ u32 *register_list_format;
++ u32 *register_restore;
++ u8 *save_restore_list_cntl;
++ u8 *save_restore_list_gpm;
++ u8 *save_restore_list_srm;
++
++ bool is_rlc_v2_1;
++};
++
+ #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
+
+ struct amdgpu_mec {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+deleted file mode 100644
+index c5459ab..0000000
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
++++ /dev/null
+@@ -1,57 +0,0 @@
+-
+-/*
+- * Copyright 2014 Advanced Micro Devices, Inc.
+- * Copyright 2008 Red Hat Inc.
+- * Copyright 2009 Jerome Glisse.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- */
+-
+-#include "amdgpu.h"
+-#include "amdgpu_gfx.h"
+-#include "amdgpu_rlc.h"
+-
+-/**
+- * amdgpu_gfx_rlc_fini - Free BO which used for RLC
+- *
+- * @adev: amdgpu_device pointer
+- *
+- * Free three BO which is used for rlc_save_restore_block, rlc_clear_state_block
+- * and rlc_jump_table_block.
+- */
+-void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev)
+-{
+- /* save restore block */
+- if (adev->gfx.rlc.save_restore_obj) {
+- amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj,
+- &adev->gfx.rlc.save_restore_gpu_addr,
+- (void **)&adev->gfx.rlc.sr_ptr);
+- }
+-
+- /* clear state block */
+- amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+- &adev->gfx.rlc.clear_state_gpu_addr,
+- (void **)&adev->gfx.rlc.cs_ptr);
+-
+- /* jump table block */
+- amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+- &adev->gfx.rlc.cp_table_gpu_addr,
+- (void **)&adev->gfx.rlc.cp_table_ptr);
+-}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+deleted file mode 100644
+index b3b0920..0000000
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
++++ /dev/null
+@@ -1,89 +0,0 @@
+-
+-/*
+- * Copyright 2014 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- */
+-
+-#ifndef __AMDGPU_RLC_H__
+-#define __AMDGPU_RLC_H__
+-
+-#include "clearstate_defs.h"
+-
+-struct amdgpu_rlc_funcs {
+- void (*enter_safe_mode)(struct amdgpu_device *adev);
+- void (*exit_safe_mode)(struct amdgpu_device *adev);
+- int (*init)(struct amdgpu_device *adev);
+- int (*resume)(struct amdgpu_device *adev);
+- void (*stop)(struct amdgpu_device *adev);
+- void (*reset)(struct amdgpu_device *adev);
+- void (*start)(struct amdgpu_device *adev);
+-};
+-
+-struct amdgpu_rlc {
+- /* for power gating */
+- struct amdgpu_bo *save_restore_obj;
+- uint64_t save_restore_gpu_addr;
+- volatile uint32_t *sr_ptr;
+- const u32 *reg_list;
+- u32 reg_list_size;
+- /* for clear state */
+- struct amdgpu_bo *clear_state_obj;
+- uint64_t clear_state_gpu_addr;
+- volatile uint32_t *cs_ptr;
+- const struct cs_section_def *cs_data;
+- u32 clear_state_size;
+- /* for cp tables */
+- struct amdgpu_bo *cp_table_obj;
+- uint64_t cp_table_gpu_addr;
+- volatile uint32_t *cp_table_ptr;
+- u32 cp_table_size;
+-
+- /* safe mode for updating CG/PG state */
+- bool in_safe_mode;
+- const struct amdgpu_rlc_funcs *funcs;
+-
+- /* for firmware data */
+- u32 save_and_restore_offset;
+- u32 clear_state_descriptor_offset;
+- u32 avail_scratch_ram_locations;
+- u32 reg_restore_list_size;
+- u32 reg_list_format_start;
+- u32 reg_list_format_separate_start;
+- u32 starting_offsets_start;
+- u32 reg_list_format_size_bytes;
+- u32 reg_list_size_bytes;
+- u32 reg_list_format_direct_reg_list_length;
+- u32 save_restore_list_cntl_size_bytes;
+- u32 save_restore_list_gpm_size_bytes;
+- u32 save_restore_list_srm_size_bytes;
+-
+- u32 *register_list_format;
+- u32 *register_restore;
+- u8 *save_restore_list_cntl;
+- u8 *save_restore_list_gpm;
+- u8 *save_restore_list_srm;
+-
+- bool is_rlc_v2_1;
+-};
+-
+-void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev);
+-
+-#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+index abc8ec6..4f8d6a2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+@@ -2397,6 +2397,13 @@ static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
+ amdgpu_ring_write(ring, val);
+ }
+
++static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
++{
++ amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
++ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
++ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
++}
++
+ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
+ {
+ const u32 *src_ptr;
+@@ -2425,7 +2432,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
+ if (r) {
+ dev_warn(adev->dev, "(%d) create RLC sr bo failed\n",
+ r);
+- amdgpu_gfx_rlc_fini(adev);
++ gfx_v6_0_rlc_fini(adev);
+ return r;
+ }
+
+@@ -2450,7 +2457,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
+- amdgpu_gfx_rlc_fini(adev);
++ gfx_v6_0_rlc_fini(adev);
+ return r;
+ }
+
+@@ -3187,7 +3194,7 @@ static int gfx_v6_0_sw_fini(void *handle)
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+ amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
+
+- amdgpu_gfx_rlc_fini(adev);
++ gfx_v6_0_rlc_fini(adev);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+index 19a0e4f..05f7a29 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+@@ -3288,6 +3288,13 @@ static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
+ * The RLC is a multi-purpose microengine that handles a
+ * variety of functions.
+ */
++static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
++{
++ amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
++ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
++ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
++}
++
+ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
+ {
+ const u32 *src_ptr;
+@@ -3327,7 +3334,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
+ (void **)&adev->gfx.rlc.sr_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r);
+- amdgpu_gfx_rlc_fini(adev);
++ gfx_v7_0_rlc_fini(adev);
+ return r;
+ }
+
+@@ -3350,7 +3357,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
+- amdgpu_gfx_rlc_fini(adev);
++ gfx_v7_0_rlc_fini(adev);
+ return r;
+ }
+
+@@ -3370,7 +3377,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
+- amdgpu_gfx_rlc_fini(adev);
++ gfx_v7_0_rlc_fini(adev);
+ return r;
+ }
+
+@@ -4617,7 +4624,7 @@ static int gfx_v7_0_sw_fini(void *handle)
+ amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
+
+ gfx_v7_0_cp_compute_fini(adev);
+- amdgpu_gfx_rlc_fini(adev);
++ gfx_v7_0_rlc_fini(adev);
+ gfx_v7_0_mec_fini(adev);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 3ec5832..9619369 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -1363,6 +1363,12 @@ static void cz_init_cp_jump_table(struct amdgpu_device *adev)
+ }
+ }
+
++static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
++{
++ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
++ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
++}
++
+ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
+ {
+ volatile u32 *dst_ptr;
+@@ -1385,7 +1391,7 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
+- amdgpu_gfx_rlc_fini(adev);
++ gfx_v8_0_rlc_fini(adev);
+ return r;
+ }
+
+@@ -2175,7 +2181,7 @@ static int gfx_v8_0_sw_fini(void *handle)
+ amdgpu_gfx_kiq_fini(adev);
+
+ gfx_v8_0_mec_fini(adev);
+- amdgpu_gfx_rlc_fini(adev);
++ gfx_v8_0_rlc_fini(adev);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 69fcc77..0481e21 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1141,6 +1141,19 @@ static void rv_init_cp_jump_table(struct amdgpu_device *adev)
+ }
+ }
+
++static void gfx_v9_0_rlc_fini(struct amdgpu_device *adev)
++{
++ /* clear state block */
++ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
++ &adev->gfx.rlc.clear_state_gpu_addr,
++ (void **)&adev->gfx.rlc.cs_ptr);
++
++ /* jump table block */
++ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
++ &adev->gfx.rlc.cp_table_gpu_addr,
++ (void **)&adev->gfx.rlc.cp_table_ptr);
++}
++
+ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
+ {
+ volatile u32 *dst_ptr;
+@@ -1163,7 +1176,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
+ r);
+- amdgpu_gfx_rlc_fini(adev);
++ gfx_v9_0_rlc_fini(adev);
+ return r;
+ }
+ /* set up the cs buffer */
+@@ -1185,7 +1198,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
+ if (r) {
+ dev_err(adev->dev,
+ "(%d) failed to create cp table bo\n", r);
+- amdgpu_gfx_rlc_fini(adev);
++ gfx_v9_0_rlc_fini(adev);
+ return r;
+ }
+
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5754-drm-amdgpu-make-gfx9-enter-into-rlc-safe-mode-when-s.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5754-drm-amdgpu-make-gfx9-enter-into-rlc-safe-mode-when-s.patch
new file mode 100644
index 00000000..b5d33fb0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5754-drm-amdgpu-make-gfx9-enter-into-rlc-safe-mode-when-s.patch
@@ -0,0 +1,39 @@
+From 0c68f4fffe18d5894cb4ac2111001999f747c96e Mon Sep 17 00:00:00 2001
+From: Likun Gao <Likun.Gao@amd.com>
+Date: Wed, 9 Jan 2019 10:46:48 +0800
+Subject: [PATCH 5754/5758] drm/amdgpu: make gfx9 enter into rlc safe mode when
+ set MGCG
+
+MGCG should RLC enter into safe mode first.
+
+Signed-off-by: Likun Gao <Likun.Gao@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 0481e21..c7db271 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -3721,6 +3721,8 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
+ {
+ uint32_t data, def;
+
++ adev->gfx.rlc.funcs->enter_safe_mode(adev);
++
+ /* It is disabled by HW by default */
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
+ /* 1 - RLC_CGTT_MGCG_OVERRIDE */
+@@ -3785,6 +3787,8 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
+ WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
+ }
+ }
++
++ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ }
+
+ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5755-Revert-drm-amdgpu-revert-the-commit-interim-disable-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5755-Revert-drm-amdgpu-revert-the-commit-interim-disable-.patch
new file mode 100644
index 00000000..8b6186a9
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5755-Revert-drm-amdgpu-revert-the-commit-interim-disable-.patch
@@ -0,0 +1,32 @@
+From bed5590326e22b00ac99f5836c4c2f5c117fbfac Mon Sep 17 00:00:00 2001
+From: Raveendra Talabattula <raveendra.talabattula@amd.com>
+Date: Thu, 10 Jan 2019 16:25:14 +0530
+Subject: [PATCH 5755/5758] Revert drm/amdgpu: revert the commit interim
+ disable RV2 GFX CG
+
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/soc15.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index a741913..5614c2b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -705,12 +705,9 @@ static int soc15_common_early_init(void *handle)
+ adev->external_rev_id = 0x1;
+
+ if (adev->rev_id >= 0x8) {
+- adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+- AMD_CG_SUPPORT_GFX_MGLS |
++ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+- AMD_CG_SUPPORT_GFX_3D_CGCG |
+ AMD_CG_SUPPORT_GFX_3D_CGLS |
+- AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_HDP_LS |
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5756-Revert-drm-amdgpu-revert-psp-firmware-load-status-ch.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5756-Revert-drm-amdgpu-revert-psp-firmware-load-status-ch.patch
new file mode 100644
index 00000000..73e5485a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5756-Revert-drm-amdgpu-revert-psp-firmware-load-status-ch.patch
@@ -0,0 +1,32 @@
+From 0989b02cc7c434d41af054096dcf43c6b1eea297 Mon Sep 17 00:00:00 2001
+From: Raveendra Talabattula <raveendra.talabattula@amd.com>
+Date: Thu, 7 Feb 2019 11:45:55 +0530
+Subject: [PATCH 5756/5758] Revert drm/amdgpu: revert psp firmware load status
+ check
+
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index a70657d..a176706 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -134,6 +134,13 @@ psp_cmd_submit_buf(struct psp_context *psp,
+ msleep(1);
+ }
+
++ /* the status field must be 0 after FW is loaded */
++ if (ucode && psp->cmd_buf_mem->resp.status) {
++ DRM_ERROR("failed loading with status (%d) and ucode id (%d)\n",
++ psp->cmd_buf_mem->resp.status, ucode->ucode_id);
++ return -EINVAL;
++ }
++
+ if (ucode) {
+ ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
+ ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5757-drm-amdgpu-psp-ignore-psp-response-status.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5757-drm-amdgpu-psp-ignore-psp-response-status.patch
new file mode 100644
index 00000000..cb4024bb
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5757-drm-amdgpu-psp-ignore-psp-response-status.patch
@@ -0,0 +1,54 @@
+From b54e241076bd705560bd6f262846a40a0c3dbeed Mon Sep 17 00:00:00 2001
+From: Aaron Liu <aaron.liu@amd.com>
+Date: Mon, 14 Jan 2019 16:08:32 +0800
+Subject: [PATCH 5757/5758] drm/amdgpu/psp: ignore psp response status
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+In some cases, psp response status is not 0 even there is no
+problem while the command is submitted. Some version of PSP FW
+doesn't write 0 to that field.
+So here we would like to only print a warning instead of an error
+during psp initialization to avoid breaking hw_init and it doesn't
+return -EINVAL.
+
+Change-Id: I680679983f972b6969f4949f1faafaf17fe996a6
+Signed-off-by: Aaron Liu <aaron.liu@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Xiangliang Yu<Xiangliang.Yu@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-by: Paul Menzel <pmenzel+amd-gfx@molgen.mpg.de>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index a176706..78e7469 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -134,11 +134,16 @@ psp_cmd_submit_buf(struct psp_context *psp,
+ msleep(1);
+ }
+
+- /* the status field must be 0 after FW is loaded */
++ /* In some cases, psp response status is not 0 even there is no
++ * problem while the command is submitted. Some version of PSP FW
++ * doesn't write 0 to that field.
++ * So here we would like to only print a warning instead of an error
++ * during psp initialization to avoid breaking hw_init and it doesn't
++ * return -EINVAL.
++ */
+ if (ucode && psp->cmd_buf_mem->resp.status) {
+- DRM_ERROR("failed loading with status (%d) and ucode id (%d)\n",
++ DRM_WARN("failed loading with status (%d) and ucode id (%d)\n",
+ psp->cmd_buf_mem->resp.status, ucode->ucode_id);
+- return -EINVAL;
+ }
+
+ if (ucode) {
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/5758-RTQA4-Fix-build-error-for-hs400-and-hs200.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/5758-RTQA4-Fix-build-error-for-hs400-and-hs200.patch
new file mode 100644
index 00000000..286d4a24
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/5758-RTQA4-Fix-build-error-for-hs400-and-hs200.patch
@@ -0,0 +1,81 @@
+From 80bfa93f2f180dd45284f0d4fc75f0b934fcc329 Mon Sep 17 00:00:00 2001
+From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+Date: Thu, 14 Feb 2019 13:02:47 +0530
+Subject: [PATCH 5758/5758] RTQA4 : Fix build error for hs400 and hs200
+
+Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
+---
+ drivers/mmc/host/sdhci-acpi.c | 1 +
+ drivers/mmc/host/sdhci.c | 11 +++++++++++
+ drivers/mmc/host/sdhci.h | 1 +
+ include/linux/mmc/host.h | 1 +
+ 4 files changed, 14 insertions(+)
+
+diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
+index b01e906..d13a177 100644
+--- a/drivers/mmc/host/sdhci-acpi.c
++++ b/drivers/mmc/host/sdhci-acpi.c
+@@ -411,6 +411,7 @@ static const struct sdhci_ops sdhci_acpi_ops_amd = {
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
++ .set_hs400_dll = sdhci_acpi_amd_hs400_dll,
+ };
+
+ static const struct sdhci_acpi_chip sdhci_acpi_chip_amd = {
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 46346ec..7e29a39 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1983,6 +1983,16 @@ static void sdhci_hw_reset(struct mmc_host *mmc)
+ host->ops->hw_reset(host);
+ }
+
++static void sdhci_set_hs400_dll(struct mmc_host *mmc)
++{
++ struct sdhci_host *host = mmc_priv(mmc);
++
++ if (host->ops && host->ops->set_hs400_dll)
++ host->ops->set_hs400_dll(host);
++}
++
++
++
+ static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
+ {
+ if (!(host->flags & SDHCI_DEVICE_DEAD)) {
+@@ -2470,6 +2480,7 @@ static const struct mmc_host_ops sdhci_ops = {
+ .get_cd = sdhci_get_cd,
+ .get_ro = sdhci_get_ro,
+ .hw_reset = sdhci_hw_reset,
++ .set_hs400_dll = sdhci_set_hs400_dll,
+ .enable_sdio_irq = sdhci_enable_sdio_irq,
+ .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
+ .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 027d85a..dd3219e 100755
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -611,6 +611,7 @@ struct sdhci_ops {
+ int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode);
+ void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
+ void (*hw_reset)(struct sdhci_host *host);
++ void (*set_hs400_dll)(struct sdhci_host *host);
+ void (*adma_workaround)(struct sdhci_host *host, u32 intmask);
+ void (*card_event)(struct sdhci_host *host);
+ void (*voltage_switch)(struct sdhci_host *host);
+diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
+index 843c38f..95a1452 100644
+--- a/include/linux/mmc/host.h
++++ b/include/linux/mmc/host.h
+@@ -159,6 +159,7 @@ struct mmc_host_ops {
+ unsigned int max_dtr, int host_drv,
+ int card_drv, int *drv_type);
+ void (*hw_reset)(struct mmc_host *host);
++ void (*set_hs400_dll)(struct mmc_host *host);
+ void (*card_event)(struct mmc_host *host);
+
+ /*
+--
+2.7.4
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/amd-emmc-patches.scc b/common/recipes-kernel/linux/linux-yocto-4.14.71/amd-emmc-patches.scc
index 4ade5dc9..b7c8d685 100644
--- a/common/recipes-kernel/linux/linux-yocto-4.14.71/amd-emmc-patches.scc
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/amd-emmc-patches.scc
@@ -2,3 +2,16 @@ patch 0093-check-pci-dev-before-getting-pci-alias.patch
patch 0094-mmc-sdhci-acpi-Add-support-for-ACPI-HID-of-AMD-Contr.patch
patch 0095-eMMC-patch-4.14.48.patch
patch 1116-pinctrl-eMMC-and-PinCtrl-is-sharing-the-interrupt-no.patch
+patch 5728-mmc-core-Move-calls-to-prepare_hs400_tuning-closer-t.patch
+patch 5729-mmc-core-more-fine-grained-hooks-for-HS400-tuning.patch
+patch 5730-mmc-sdhci-Export-sdhci-tuning-function-symbol.patch
+patch 5731-mmc-sdhci-Export-sdhci_request.patch
+patch 5732-mmc-sdhci-add-adma_table_cnt-member-to-struct-sdhci_.patch
+patch 5733-mmc-sdhci-introduce-adma_write_desc-hook-to-struct-s.patch
+patch 5734-mmc-sdhci-Add-version-V4-definition.patch
+patch 5735-mmc-sdhci-Add-sd-host-v4-mode.patch
+patch 5736-mmc-sdhci-Add-ADMA2-64-bit-addressing-support-for-V4.patch
+patch 5737-mmc-sdhci-Add-32-bit-block-count-support-for-v4-mode.patch
+patch 5738-mmc-sdhci-Add-Auto-CMD-Auto-Select-support.patch
+patch 5746-amd-eMMC-sdhci-HS400-workaround-for-ZP.patch
+patch 5758-RTQA4-Fix-build-error-for-hs400-and-hs200.patch
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/amd-xgbe-patches.scc b/common/recipes-kernel/linux/linux-yocto-4.14.71/amd-xgbe-patches.scc
index e8e13981..32d73c30 100644
--- a/common/recipes-kernel/linux/linux-yocto-4.14.71/amd-xgbe-patches.scc
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/amd-xgbe-patches.scc
@@ -13,3 +13,10 @@ patch 4108-amd-xgbe-Always-attempt-link-training-in-KR-mode.patch
patch 4109-amd-xgbe-Advertise-FEC-support-with-the-KR-re-driver.patch
patch 4110-amd-xgbe-Update-the-BelFuse-quirk-to-support-SGMII.patch
patch 4111-amd-xgbe-Improve-SFP-100Mbps-auto-negotiation.patch
+patch 5739-amd-xgbe-use-dma_mapping_error-to-check-map-errors.patch
+patch 5742-net-ethernet-Use-existing-define-with-polynomial.patch
+patch 5743-net-amd-fix-return-type-of-ndo_start_xmit-function.patch
+patch 5744-net-phy-Add-helper-for-advertise-to-lcl-value.patch
+patch 5745-drivers-net-remove-net-busy_poll.h-inclusion-when-no.patch
+patch 5749-net-phy-Also-request-modules-for-C45-IDs.patch
+patch 5750-amd-xgbe-Fix-mdio-access-for-non-zero-ports-and-clau.patch
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/amdgpu-patches.scc b/common/recipes-kernel/linux/linux-yocto-4.14.71/amdgpu-patches.scc
index c1482623..0618a367 100755
--- a/common/recipes-kernel/linux/linux-yocto-4.14.71/amdgpu-patches.scc
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/amdgpu-patches.scc
@@ -4112,3 +4112,1606 @@ patch 4129-drm-amd-display-fix-multisync-for-any-connection-ena.patch
patch 4130-RTQA3-compilation-fix-after-back-porting.patch
patch 4131-RTQA3-Backporting-comilation-fix-for-drm-change.patch
patch 4132-RQTAQ3-compilaton-fix-added-io.h.patch
+patch 4133-drm-amdgpu-powerplay-Added-missing-endian-fixes-for-.patch
+patch 4134-drm-amd-powerplay-implement-smu7_smumgr-for-asics-wi.patch
+patch 4135-drm-amd-powerplay-fix-bug-get-wrong-evv-voltage-of-P.patch
+patch 4136-drm-amdgpu-Use-the-drm_driver.dumb_destroy-default.patch
+patch 4137-drm-amd-dc-Add-dc-display-driver-v3.patch
+patch 4138-drm-amd-display-No-need-to-keep-track-of-unreffed-cl.patch
+patch 4139-dma-buf-keep-only-not-signaled-fence-in-reservation_.patch
+patch 4140-drm-amdgpu-Restore-scalable-VM-size-calculation.patch
+patch 4141-drm-amdgpu-fix-and-cleanup-UVD-IB-generation-v2.patch
+patch 4142-drm-amdgpu-cleanup-VCN-IB-generation-v2.patch
+patch 4143-drm-amdkfd-Disable-the-perf-counters-for-old-kernels.patch
+patch 4144-drm-amd-display-fix-Polaris-12-bw-bounding-box-v2.patch
+patch 4145-drm-amdkfd-Fix-and-simplify-sync-object-handling-for.patch
+patch 4146-drm-amdkfd-use-px-to-print-user-space-address-instea.patch
+patch 4147-drm-amdgpu-Fix-unbalanced-memory-accounting-in-error.patch
+patch 4148-drm-amdkfd-Take-reference-to-gtt-usertask.patch
+patch 4149-drm-amdgpu-Avoid-GFP_NOIO.patch
+patch 4150-drm-amdgpu-Fix-acquiring-VM-on-large-BAR-systems.patch
+patch 4151-drm-amdkfd-Simplify-dGPU-event-page-allocation.patch
+patch 4152-drm-amdkfd-Backwards-compatibility-with-old-Thunk.patch
+patch 4153-drm-amdkfd-Remove-pm_map_process_scratch_cik.patch
+patch 4154-drm-amdgpu-Remove-pm_map_process_cik.patch
+patch 4155-drm-amdkfd-Put-packet-sizes-directly-into-packet_man.patch
+patch 4156-drm-amdkfd-GPU-recovery-support-from-KFD-step-1.patch
+patch 4157-drm-amdkfd-signal-hw_exception-event-on-GPU-reset.patch
+patch 4158-drm-amdkfd-remove-check-for-PCIe-upstream-bridge.patch
+patch 4159-drm-amdgpu-Enable-the-gpu-reset-from-amdkfd.patch
+patch 4160-drm-amdkfd-CMA-Refactor-CMA-code.patch
+patch 4161-drm-amdkfd-CMA-Store-cpuva-in-KFD-BO.patch
+patch 4162-drm-amdkfd-CMA-Handle-userptr-to-userptr-BO-copy.patch
+patch 4163-drm-amdgpu-kfd2kgd-Support-BO-create-from-sg.patch
+patch 4164-drm-amdgpu-CMA-Validate-BOs-before-use.patch
+patch 4165-drm-amdkfd-CMA-Use-shadow-system-BO-for-userptr.patch
+patch 4166-Fix-SVM-missing-on-Raven.patch
+patch 4167-drm-amdkfd-Implement-SPI-debug-and-exception-support.patch
+patch 4168-drm-amd-powerplay-initialzie-the-dpm-intial-enabled-.patch
+patch 4169-drm-amd-powerplay-Get-more-than-8-level-gfxclk-state.patch
+patch 4170-amd-powerplay-implement-the-vega12_force_clock_level.patch
+patch 4171-drm-amd-display-Update-MST-edid-property-every-time.patch
+patch 4172-drm-amd-display-Check-dc_sink-every-time-in-MST-hotp.patch
+patch 4173-drm-amd-powerplay-header-file-interface-to-SMU-updat.patch
+patch 4174-drm-amd-powerplay-add-registry-key-to-disable-ACG.patch
+patch 4175-drm-amdgpu-fix-null-pointer-panic-with-direct-fw-loa.patch
+patch 4176-drm-amdgpu-use-ctx-bytes_moved.patch
+patch 4177-drm-amdgpu-fix-and-cleanup-cpu-visible-VRAM-handling.patch
+patch 4178-drm-amd-display-Fix-64-bit-division-in-hwss_edp_powe.patch
+patch 4179-drm-amd-display-Remove-PRE_VEGA-flag.patch
+patch 4180-drm-amd-display-remove-dummy-is_blanked-to-optimise-.patch
+patch 4181-drm-ttm-keep-a-reference-to-transfer-pipelined-BOs.patch
+patch 4182-drm-amdgpu-gfx9-cache-DB_DEBUG2-and-make-it-availabl.patch
+patch 4183-Revert-drm-amd-display-fix-dereferencing-possible-ER.patch
+patch 4184-Revert-drm-amd-display-disable-CRTCs-with-NULL-FB-on.patch
+patch 4185-drm-amdgpu-add-emit_reg_write_reg_wait-ring-callback.patch
+patch 4186-drm-amdgpu-gfx9-add-emit_reg_write_reg_wait-ring-cal.patch
+patch 4187-drm-amdgpu-sdma4-add-emit_reg_write_reg_wait-ring-ca.patch
+patch 4188-drm-amdgpu-uvd7-add-emit_reg_write_reg_wait-ring-cal.patch
+patch 4189-drm-amdgpu-vce4-add-emit_reg_write_reg_wait-ring-cal.patch
+patch 4190-drm-amdgpu-vcn1-add-emit_reg_write_reg_wait-ring-cal.patch
+patch 4191-drm-amdgpu-gmc9-use-amdgpu_ring_emit_reg_write_reg_w.patch
+patch 4192-drm-amdgpu-gmc-steal-the-appropriate-amount-of-vram-.patch
+patch 4193-drm-amdgpu-always-allocate-a-PASIDs-for-each-VM-v2.patch
+patch 4194-drm-amdgpu-Free-VGA-stolen-memory-as-soon-as-possibl.patch
+patch 4195-drm-gpu-sched-fix-force-APP-kill-hang-v4.patch
+patch 4196-drm-amdgpu-revert-add-new-bo-flag-that-indicates-BOs.patch
+patch 4197-drm-amdgpu-revert-Don-t-change-preferred-domian-when.patch
+patch 4198-drm-amdgpu-re-validate-per-VM-BOs-if-required-v2.patch
+patch 4199-drm-amdgpu-Code-Indentation-change-in-the-function.patch
+patch 4200-drm-amd-display-dal-3.1.42.patch
+patch 4201-drm-amd-display-fix-brightness-level-after-resume-fr.patch
+patch 4202-drm-amd-display-Move-dp_pixel_encoding_type-to-strea.patch
+patch 4203-drm-amd-display-Fix-regamma-not-affecting-full-inten.patch
+patch 4204-drm-amd-display-add-method-to-check-for-supported-ra.patch
+patch 4205-drm-amd-display-Fix-bug-where-refresh-rate-becomes-f.patch
+patch 4206-drm-amd-display-fix-segfault-on-insufficient-TG-duri.patch
+patch 4207-drm-amd-display-Fix-bug-that-causes-black-screen.patch
+patch 4208-drm-amd-display-change-dml-init-to-use-default-struc.patch
+patch 4209-drm-amd-display-Add-back-code-to-allow-for-rounding-.patch
+patch 4210-drm-amd-display-Check-lid-state-to-determine-fast-bo.patch
+patch 4211-drm-amd-display-Do-not-create-memory-allocation-if-s.patch
+patch 4212-drm-amd-display-Move-DCC-support-functions-into-dchu.patch
+patch 4213-drm-amd-display-fix-LFC-tearing-at-top-of-screen.patch
+patch 4214-drm-amd-display-HDMI-has-no-sound-after-Panel-power-.patch
+patch 4215-drm-amd-display-refactor-vupdate-interrupt-registrat.patch
+patch 4216-drm-amd-display-Check-SCRATCH-reg-to-determine-S3-re.patch
+patch 4217-drm-amd-display-add-rq-dlg-ttu-to-dtn-log.patch
+patch 4218-drm-amd-display-add-calculated-clock-logging-to-DTN.patch
+patch 4219-drm-amd-display-add-missing-colorspace-for-set-black.patch
+patch 4220-drm-amd-display-Use-dig-enable-to-determine-fast-boo.patch
+patch 4221-drm-amdgpu-ifdef-unused-var.patch
+patch 4222-drm-amdgpu-add-amdgpu_bo_param.patch
+patch 4223-drm-amdgpu-use-amdgpu_bo_param-for-amdgpu_bo_create-.patch
+patch 4224-drm-amdgpu-fix-amdgpu_bo_create-param-changed-for-tt.patch
+patch 4225-drm-amdkcl-fix-amdgpu_bo_param-changed-compile-error.patch
+patch 4226-drm-amdgpu-print-the-vbios-version-in-the-debugfs-fi.patch
+patch 4227-drm-scheduler-always-put-last_sched-fence-in-entity_.patch
+patch 4228-drm-scheduler-move-last_sched-fence-updating-prior-t.patch
+patch 4229-drm-amdgpu-limit-reg_write_reg_wait-workaround-to-SR.patch
+patch 4230-drm-amdgpu-set-preferred_domain-independent-of-fallb.patch
+patch 4231-drm-amdgpu-handle-domain-mask-checking-v2.patch
+patch 4232-drm-scheduler-fix-build-broken-by-move-last_sched-fe.patch
+patch 4233-drm-amdgpu-optionally-do-a-writeback-but-don-t-inval.patch
+patch 4234-drm-amdgpu-fix-list-not-initialized.patch
+patch 4235-drm-amdgpu-init-gfx9-aperture-settings.patch
+patch 4236-drm-amdgpu-simplify-bo_va-list-when-vm-bo-update-v2.patch
+patch 4237-drm-amdgpu-bo-could-be-null-when-access-in-vm-bo-upd.patch
+patch 4238-drm-amdgpu-print-DMA-buf-status-in-debugfs.patch
+patch 4239-drm-amdgpu-Rename-amdgpu_display_framebuffer_domains.patch
+patch 4240-drm-amdgpu-Remove-VRAM-from-shared-bo-domains.patch
+patch 4241-drm-amdgpu-pm-document-power_dpm_force_performance_l.patch
+patch 4242-drm-amdgpu-pm-document-power_dpm_state.patch
+patch 4243-drm-amdgpu-pm-document-pp_table.patch
+patch 4244-drm-amdgpu-pm-document-pp_dpm_sclk-pp_dpm_mclk-pp_dp.patch
+patch 4245-drm-amdgpu-pm-document-pp_power_profile_mode.patch
+patch 4246-drm-amdgpu-pm-document-pp_od_clk_voltage.patch
+patch 4247-drm-amd-pp-Change-voltage-clk-range-for-OD-feature-o.patch
+patch 4248-drm-amdgpu-Enable-scatter-gather-display-support.patch
+patch 4249-Revert-drm-amdgpu-defer-test-IBs-on-the-rings-at-boo.patch
+patch 4250-drm-amdkfd-Use-shared-IH-client-ID.patch
+patch 4251-drm-amdkfd-Implement-hw_exception-work-thread-to-han.patch
+patch 4252-drm-amdkfd-Remove-queue-node-when-destroy-queue-fail.patch
+patch 4253-drm-amdgpu-Always-call-kfd-post-reset-after-reset.patch
+patch 4254-drm-amdkfd-CMA-Remove-diff.-device-restriction.patch
+patch 4255-drm-amdkfd-CMA-Store-mem_type-in-KFD-BO.patch
+patch 4256-drm-amdkfd-CMA-Support-for-diff.-devices.patch
+patch 4257-drm-amdkfd-Remove-unused-variable.patch
+patch 4258-drm-amdgpu-uvd7-add-emit_reg_write_reg_wait-ring-cal.patch
+patch 4259-Hybrid-Version-18.30.0.15.patch
+patch 4260-Hybrid-Version-18.30.1.15.patch
+patch 4261-Revert-drm-amdgpu-set-COMPUTE_PGM_RSRC1-for-SGPR-VGP.patch
+patch 4262-drm-amdgpu-change-pp_dpm-clk-mclk-pcie-input-format.patch
+patch 4263-drm-amdgpu-fix-amdgpu_atpx_get_client_id-s-return-ty.patch
+patch 4264-drm-amdgpu-Set-graphics-noretry-to-1.patch
+patch 4265-drm-amdfd-Don-t-hard-code-wait-time.patch
+patch 4266-drm-amdkfd-CMA-Add-intermediate-wait-if-mGPU.patch
+patch 4267-drm-amdkfd-CMA-Support-multi-device-VRAM-copy.patch
+patch 4268-drm-amdkfd-Reduce-priority-of-context-saving-waves-b.patch
+patch 4269-drm-amdkfd-Introduce-kfd-kernel-module-parameter-hal.patch
+patch 4270-drm-amdkfd-Use-module-parameters-noretry-as-the-inte.patch
+patch 4271-drm-amdkfd-Separate-trap-handler-assembly-code-and-i.patch
+patch 4272-drm-amdkfd-Mellanox-Support-PeerSync-interface.patch
+patch 4273-drm-amdkfd-Fix-CP-soft-hang-on-APUs.patch
+patch 4274-drm-amdgpu-Don-t-use-kiq-to-send-invalid_tlbs-packag.patch
+patch 4275-drm-amdgpu-Don-t-use-shadow-BO-for-compute-context.patch
+patch 4276-drm-amdkfd-Fix-typos-in-trap-handler-comments.patch
+patch 4277-drm-amdkfd-Align-Makefile-with-upstream.patch
+patch 4278-drm-amdkfd-Align-CIK-interrupt-processing-with-upstr.patch
+patch 4279-drm-amdkfd-Remove-IH-patching-workaround-for-Vega10.patch
+patch 4280-drm-amdkfd-Clean-up-mmap-handling.patch
+patch 4281-drm-amdkfd-fix-uninitialized-variable-use.patch
+patch 4282-drm-amdkfd-remove-unused-parameter-from-quiesce_mm-r.patch
+patch 4283-drm-amdkfd-Fix-kernel-queue-rollback-for-64-bit-wptr.patch
+patch 4284-drm-amdkfd-Match-release_mem-interface-with-other-PM.patch
+patch 4285-drm-amdkfd-Simplify-packet-manager-initialization.patch
+patch 4286-drm-amdkfd-Fix-error-handling-in-pm_init.patch
+patch 4287-drm-amdkfd-Fix-pm_debugfs_runlist.patch
+patch 4288-drm-amdkfd-Check-ctx_save_restore_area_address.patch
+patch 4289-drm-amdkfd-Fix-error-handling-around-kfd_process_cre.patch
+patch 4290-drm-amdkfd-Fix-error-handling-in-APU-CWSR-mapping.patch
+patch 4291-drm-amdkfd-Simplify-error-handling-in-kfd_create_pro.patch
+patch 4292-drm-amdkfd-Simplify-obj-handle-allocation.patch
+patch 4293-drm-amdkfd-Error-if-trying-to-acquire-VM-for-a-PDD-t.patch
+patch 4294-drm-amdkfd-Cosmetic-changes-to-match-upstream.patch
+patch 4295-drm-amdkfd-Add-sanity-checks-in-IRQ-handlers.patch
+patch 4296-drm-amdgpu-Check-NULL-pointer-for-job-before-reset-j.patch
+patch 4297-drm-amd-amdgpu-vcn10-Add-callback-for-emit_reg_write.patch
+patch 4298-drm-amd-amdgpu-Add-some-documentation-to-the-debugfs.patch
+patch 4299-drm-amdgpu-abstract-bo_base-init-function.patch
+patch 4300-drm-amdgpu-Fix-KFD-doorbell-SG-BO-mapping.patch
+patch 4301-drm-amdkfd-Don-t-use-kmap_atomic.patch
+patch 4302-drm-amdkcl-fixed-can-t-find-kgd_kfd_interface.h-head.patch
+patch 4303-drm-amdgpu-set-COMPUTE_PGM_RSRC1-for-SGPR-VGPR-clear.patch
+patch 4304-drm-admgpu-fix-mode_valid-s-return-type.patch
+patch 4305-drm-amdgpu-add-VEGAM-ASIC-type.patch
+patch 4306-drm-amdgpu-bypass-GPU-info-firmware-load-for-VEGAM.patch
+patch 4307-drm-amdgpu-set-VEGAM-to-ASIC-family-and-ip-blocks.patch
+patch 4308-drm-amdgpu-specify-VEGAM-ucode-SMU-load-method.patch
+patch 4309-drm-amdgpu-add-VEGAM-SMU-firmware-support.patch
+patch 4310-drm-amdgpu-virtual_dce-add-VEGAM-support.patch
+patch 4311-drm-amdgpu-add-VEGAM-dc-support-check.patch
+patch 4312-drm-amdgpu-skip-VEGAM-MC-firmware-load.patch
+patch 4313-drm-amdgpu-add-VEGAM-GMC-golden-settings.patch
+patch 4314-drm-amdgpu-initialize-VEGAM-GMC-v2.patch
+patch 4315-drm-amdgpu-add-VEGAM-SDMA-firmware-support.patch
+patch 4316-drm-amdgpu-add-VEGAM-SDMA-golden-settings.patch
+patch 4317-drm-amdgpu-add-VEGAM-GFX-firmware-support.patch
+patch 4318-drm-amdgpu-add-VEGAM-GFX-golden-settings.patch
+patch 4319-drm-amdgpu-initialize-VEGAM-GFX.patch
+patch 4320-drm-amdgpu-add-VEGAM-UVD-firmware-support.patch
+patch 4321-drm-amdgpu-add-VEGAM-UVD-encode-support.patch
+patch 4322-drm-amdgpu-add-VEGAM-VCE-firmware-support.patch
+patch 4323-drm-amdgpu-add-VEGAM-to-VCE-harvest-config.patch
+patch 4324-drm-amdgpu-add-VEGAM-support-to-vi.patch
+patch 4325-drm-amdgpu-add-VEGAM-pci-ids.patch
+patch 4326-drm-amd-display-Implement-VEGAM-device-IDs-in-DC.patch
+patch 4327-drm-amd-display-Implement-VEGAM-device-IDs-in-DM.patch
+patch 4328-drm-amdgpu-Add-VEGAM-support-to-the-legacy-DCE-11-mo.patch
+patch 4329-drm-amd-display-Use-HBR2-if-eDP-monitor-it-doesn-t-a.patch
+patch 4330-drm-amd-powerplay-add-smu75-header-files.patch
+patch 4331-drm-amd-add-a-new-struct-in-atombios.h.patch
+patch 4332-drm-amd-powerplay-update-ppatomctrl.c-v2.patch
+patch 4333-drm-amd-powerplay-update-process-pptables.patch
+patch 4334-drm-amd-powerplay-add-smumgr-support-for-VEGAM-v2.patch
+patch 4335-drm-amd-powerplay-add-specific-changes-for-VEGAM-in-.patch
+patch 4336-drm-powerplay-Add-powertune-table-for-VEGAM.patch
+patch 4337-drm-scheduler-don-t-update-last-scheduled-fence-in-T.patch
+patch 4338-drm-amdgpu-For-sriov-reset-move-IB-test-into-exclusi.patch
+patch 4339-drm-amdgpu-sriov-Need-to-set-in_gpu_reset-flag-to-ba.patch
+patch 4340-drm-amd-display-Fix-deadlock-when-flushing-irq.patch
+patch 4341-drm-amd-display-Unify-dm-resume-sequence-into-a-sing.patch
+patch 4342-drm-amd-display-Disallow-enabling-CRTC-without-prima.patch
+patch 4343-drm-amd-display-fix-issue-related-to-infopacket-was-.patch
+patch 4344-drm-amd-display-Make-program_output_csc-HWSS-interfa.patch
+patch 4345-drm-amd-display-Refactor-otg_blank-sequence.patch
+patch 4346-drm-amd-display-DP-link-validation-bug-for-YUV422.patch
+patch 4347-drm-amd-display-dal-3.1.43.patch
+patch 4348-drm-amd-display-Add-user_regamma-to-color-module.patch
+patch 4349-drm-amd-display-add-cursor-TTU-CRQ-related.patch
+patch 4350-drm-amd-display-add-some-DTN-logs-for-input-and-outp.patch
+patch 4351-drm-amd-display-update-dtn-logging-and-goldens.patch
+patch 4352-drm-amd-display-Correct-rounding-calcs-in-mod_freesy.patch
+patch 4353-drm-amd-display-compact-the-rq-dlg-ttu-log.patch
+patch 4354-drm-amd-display-Add-assert-that-chroma-pitch-is-non-.patch
+patch 4355-drm-amd-display-Update-MST-edid-property-every-time.patch
+patch 4356-drm-amd-display-reprogram-infoframe-during-apply_ctx.patch
+patch 4357-drm-amd-display-Check-dc_sink-every-time-in-MST-hotp.patch
+patch 4358-drm-amd-display-to-synchronize-the-hubp-and-dpp-prog.patch
+patch 4359-drm-amd-display-dal-3.1.44.patch
+patch 4360-drm-amd-display-Use-int-for-calculating-vline-start.patch
+patch 4361-drm-amd-display-Couple-formatting-fixes.patch
+patch 4362-drm-amd-display-Add-VG12-ASIC-IDs.patch
+patch 4363-drm-amd-Add-BIOS-smu_info-v3_3-required-struct-def.patch
+patch 4364-drm-amd-display-Add-get_firmware_info_v3_2-for-VG12.patch
+patch 4365-drm-amd-display-Don-t-return-ddc-result-and-read_byt.patch
+patch 4366-drm-amd-display-Use-kvzalloc-for-potentially-large-a.patch
+patch 4367-drm-amd-display-disable-FBC-on-underlay-pipe.patch
+patch 4368-drm-amdgpu-Switch-to-interruptable-wait-to-recover-f.patch
+patch 4369-drm-amd-amdgpu-Add-some-documentation-to-the-debugfs.patch
+patch 4370-drm-amdgpu-invalidate-parent-bo-when-shadow-bo-was-i.patch
+patch 4371-drm-amd-powerplay-fix-spelling-mistake-contruct-cons.patch
+patch 4372-drm-amd-display-clean-up-assignment-of-amdgpu_crtc.patch
+patch 4373-drm-ttm-remove-priority-hard-code-when-initializing-.patch
+patch 4374-drm-amdgpu-set-ttm-bo-priority-before-initialization.patch
+patch 4375-drm-amdgpu-gmc9-remove-unused-register-defs.patch
+patch 4376-drm-amdgpu-fix-null-pointer-for-bo-unmap-trace-funct.patch
+patch 4377-drm-amd-display-remove-need-of-modeset-flag-for-over.patch
+patch 4378-drm-amdgpu-Add-support-to-change-mtype-for-2nd-part-.patch
+patch 4379-drm-amdgpu-drop-printing-the-BO-offset-in-the-gem-de.patch
+patch 4380-drm-amdgpu-print-the-BO-flags-in-the-gem-debugfs-ent.patch
+patch 4381-drm-amdgpu-gfx9-Update-golden-setting-for-gfx9_0.patch
+patch 4382-drm-amd-powerplay-new-framework-to-honour-DAL-clock-.patch
+patch 4383-drm-amd-powerplay-add-a-framework-for-perfroming-pre.patch
+patch 4384-drm-amdgpu-Drop-the-unused-header-files-in-soc15.c.patch
+patch 4385-drm-amdgpu-Fix-hardcoded-base-offset-of-vram-pages.patch
+patch 4386-drm-amd-Add-vega20_ip_offset.h-headerfile-for-vega20.patch
+patch 4387-drm-amdgpu-Add-vega20-to-asic_type-enum.patch
+patch 4388-drm-amdgpu-Add-gpu_info-firmware-for-vega20.patch
+patch 4389-drm-amdgpu-set-asic-family-for-vega20.patch
+patch 4390-drm-amdgpu-Add-smu-firmware-support-for-vega20.patch
+patch 4391-drm-amdgpu-powerplay-Add-initial-vega20-support-v2.patch
+patch 4392-drm-amdgpu-psp-Add-initial-psp-support-for-vega20.patch
+patch 4393-drm-amdgpu-Add-vega20-ucode-loading-method.patch
+patch 4394-drm-amdgpu-Specify-vega20-uvd-firmware.patch
+patch 4395-drm-amdgpu-Specify-vega20-vce-firmware.patch
+patch 4396-drm-amdgpu-virtual_dce-Add-vega20-support.patch
+patch 4397-drm-amdgpu-gmc9-Add-vega20-support.patch
+patch 4398-drm-amdgpu-mmhub-Add-clockgating-support-for-vega20.patch
+patch 4399-drm-amdgpu-sdma4-Specify-vega20-firmware.patch
+patch 4400-drm-amdgpu-sdma4-Add-vega20-golden-settings-v3.patch
+patch 4401-drm-amdgpu-sdma4-Add-clockgating-support-for-vega20.patch
+patch 4402-drm-amdgpu-gfx9-Add-support-for-vega20-firmware.patch
+patch 4403-drm-amdgpu-gfx9-Add-vega20-golden-settings-v3.patch
+patch 4404-drm-amdgpu-gfx9-Add-gfx-config-for-vega20.-v3.patch
+patch 4405-drm-amdgpu-gfx9-Add-support-for-vega20.patch
+patch 4406-drm-amdgpu-gfx9-Add-clockgatting-support-for-vega20.patch
+patch 4407-drm-amdgpu-soc15-Add-vega20-soc15_common_early_init-.patch
+patch 4408-drm-amdgpu-soc15-Set-common-clockgating-for-vega20.patch
+patch 4409-drm-amdgpu-soc15-dynamic-initialize-ip-offset-for-ve.patch
+patch 4410-drm-amdgpu-soc15-Add-ip-blocks-for-vega20-v2.patch
+patch 4411-drm-amdgpu-Add-nbio-support-for-vega20-v2.patch
+patch 4412-drm-amdgpu-Add-vega20-soc-init-sequence-on-emulator-.patch
+patch 4413-drm-amd-display-dm-Add-vega20-support.patch
+patch 4414-drm-amdgpu-Add-vega20-to-dc-support-check.patch
+patch 4415-drm-amd-Add-dce-12.1-gpio-aux-registers.patch
+patch 4416-drm-amd-display-Add-Vega20-config.-support.patch
+patch 4417-drm-amd-display-Remove-COMBO_DISPLAY_PLL0-from-Vega2.patch
+patch 4418-drm-amd-display-Add-BIOS-smu_info-v3_3-support-for-V.patch
+patch 4419-drm-amd-display-Add-harvest-IP-support-for-Vega20.patch
+patch 4420-drm-amdgpu-atomfirmware-add-new-gfx_info-data-table-.patch
+patch 4421-drm-amdgpu-atomfirmware-add-parser-for-gfx_info-tabl.patch
+patch 4422-drm-amdgpu-vg20-fallback-to-vbios-table-if-gpu-info-.patch
+patch 4423-drm-amdgpu-drop-gpu_info-firmware-for-vega20.patch
+patch 4424-drm-amdgpu-Set-vega20-load_type-to-AMDGPU_FW_LOAD_DI.patch
+patch 4425-drm-amd-powerplay-update-vega20-cg-flags.patch
+patch 4426-drm-include-Fix-MP1_BASE-address-for-vega20.patch
+patch 4427-drm-amd-include-vg20-adjust-VCE_BASE-to-reuse-vce-4..patch
+patch 4428-drm-amdgpu-Disable-ip-modules-that-are-not-ready-yet.patch
+patch 4429-drm-amdgpu-vg20-Restruct-uvd-to-support-multiple-uvd.patch
+patch 4430-drm-amdgpu-vg20-Restruct-uvd.inst-to-support-multipl.patch
+patch 4431-drm-amdgpu-vg20-Restruct-uvd.idle_work-to-support-mu.patch
+patch 4432-drm-amdgpu-vg20-increase-3-rings-for-AMDGPU_MAX_RING.patch
+patch 4433-drm-amdgpu-vg20-Enable-the-2nd-instance-for-uvd.patch
+patch 4434-drm-amdgpu-vg20-Add-IH-client-ID-for-the-2nd-UVD.patch
+patch 4435-drm-amdgpu-vg20-Enable-the-2nd-instance-IRQ-for-uvd-.patch
+patch 4436-drm-amdgpu-vg20-Enable-2nd-instance-queue-maping-for.patch
+patch 4437-drm-amdgpu-vg20-Enable-UVD-VCE-for-Vega20.patch
+patch 4438-drm-amdgpu-add-df-3.6-headers.patch
+patch 4439-drm-amdgpu-df-implement-df-v3_6-callback-functions-v.patch
+patch 4440-drm-amdgpu-Switch-to-use-df_v3_6_funcs-for-vega20-v2.patch
+patch 4441-drm-amdgpu-Add-vega20-pci-ids.patch
+patch 4442-drm-amdgpu-flag-Vega20-as-experimental.patch
+patch 4443-drm-amdgpu-gem-remove-unused-variable.patch
+patch 4444-drm-amdgpu-Skip-drm_sched_entity-related-ops-for-KIQ.patch
+patch 4445-drm-scheduler-remove-unused-parameter.patch
+patch 4446-drm-amdgpu-remove-unused-member.patch
+patch 4447-drm-scheduler-Remove-obsolete-spinlock.patch
+patch 4448-drm-amd-amdgpu-Code-comments-for-the-amdgpu_ttm.c-dr.patch
+patch 4449-drm-amdgpu-display-remove-VEGAM-config-option.patch
+patch 4450-drm-amdgpu-display-remove-VEGA20-config-option.patch
+patch 4451-drm-amdgpu-display-fix-vega12-20-handling-in-dal_asi.patch
+patch 4452-drm-amd-pp-missing-curly-braces-in-smu7_enable_sclk_.patch
+patch 4453-drm-scheduler-fix-function-name-prefix-in-comments.patch
+patch 4454-drm-amd-display-Cleanup-unused-SetPlaneConfig.patch
+patch 4455-drm-amd-display-get-rid-of-32.32-unsigned-fixed-poin.patch
+patch 4456-drm-amd-display-inline-more-of-fixed-point-code.patch
+patch 4457-drm-amd-display-Make-DisplayStats-work-with-just-DC-.patch
+patch 4458-drm-amd-display-add-fixed-point-fractional-bit-trunc.patch
+patch 4459-drm-amd-display-truncate-scaling-ratios-and-inits-to.patch
+patch 4460-drm-amd-display-underflow-blankscreen-recovery.patch
+patch 4461-drm-amd-display-Update-HW-sequencer-initialization.patch
+patch 4462-drm-amd-display-fix-31_32_fixpt-shift-functions.patch
+patch 4463-drm-amd-display-fix-a-32-bit-shift-meant-to-be-64-wa.patch
+patch 4464-drm-amd-display-Add-dc-cap-to-restrict-VSR-downscali.patch
+patch 4465-drm-amd-display-disable-mpo-if-brightness-adjusted.patch
+patch 4466-drm-amd-display-Log-DTN-only-after-the-atomic-commit.patch
+patch 4467-drm-amd-display-update-dml-to-allow-sync-with-DV.patch
+patch 4468-drm-amd-display-Fix-up-dm-logging-functionality.patch
+patch 4469-drm-amd-display-use-macro-for-logs.patch
+patch 4470-drm-amd-display-don-t-create-new-dc_sink-if-nothing-.patch
+patch 4471-drm-amd-display-Only-limit-VSR-downscaling-when-actu.patch
+patch 4472-drm-amd-display-constify-a-few-dc_surface_update-fie.patch
+patch 4473-drm-amd-display-Add-fullscreen-transitions-to-log.patch
+patch 4474-drm-amd-display-fix-bug-with-index-check.patch
+patch 4475-drm-amd-display-Clear-underflow-status-for-debug-pur.patch
+patch 4476-drm-amd-display-DCN1-link-encoder.patch
+patch 4477-drm-amd-display-fix-memory-leaks.patch
+patch 4478-drm-amd-display-Clear-connector-s-edid-pointer.patch
+patch 4479-drm-amd-pp-Fix-build-warning-in-vegam.patch
+patch 4480-drm-amdgpu-fix-insert-nop-for-VCN-decode-ring.patch
+patch 4481-drm-amdgpu-fix-insert-nop-for-UVD7-ring.patch
+patch 4482-drm-amdgpu-fix-insert-nop-for-UVD6-ring.patch
+patch 4483-drm-amdgpu-fix-insert-nop-for-UVD5-ring.patch
+patch 4484-drm-amdgpu-fix-insert-nop-for-UVD4.2-ring.patch
+patch 4485-Remove-calls-to-suspend-resume-atomic-helpers-from-a.patch
+patch 4486-Revert-drm-amdgpu-vg20-Restruct-uvd.idle_work-to-sup.patch
+patch 4487-drm-amdgpu-count-fences-from-all-uvd-instances-in-id.patch
+patch 4488-drm-amdgpu-Take-uvd-encode-rings-into-account-in-idl.patch
+patch 4489-drm-amdgpu-Take-vcn-encode-rings-into-account-in-idl.patch
+patch 4490-drm-amdkfd-Fix-kernel-queue-64-bit-doorbell-offset-c.patch
+patch 4491-drm-amdgpu-Avoid-invalidate-tlbs-when-gpu-is-on-rese.patch
+patch 4492-drm-amdkfd-Fix-race-between-scheduler-and-context-re.patch
+patch 4493-drm-amdkfd-Change-the-control-stack-mtype-from-UC-to.patch
+patch 4494-drm-amdgpu-Avoid-destroy-hqd-when-GPU-is-on-reset.patch
+patch 4495-drm-amd-pp-fix-a-couple-locking-issues.patch
+patch 4496-drm-amdgpu-skip-CG-for-VCN-when-late_init-fini.patch
+patch 4497-drm-amd-pp-Add-smu-support-for-VCN-powergating-on-RV.patch
+patch 4498-drm-amdgpu-Add-CG-PG-flags-for-VCN.patch
+patch 4499-drm-amdgpu-Add-SOC15_WAIT_ON_RREG-macro-define.patch
+patch 4500-drm-amdgpu-Add-static-CG-control-for-VCN-on-RV.patch
+patch 4501-drm-amdgpu-Enable-VCN-CG-by-default-on-RV.patch
+patch 4502-drm-amdgpu-Add-VCN-static-PG-support-on-RV.patch
+patch 4503-drm-amdgpu-Enable-VCN-static-PG-by-default-on-RV.patch
+patch 4504-drm-amdgpu-Add-runtime-VCN-PG-support.patch
+patch 4505-drm-amdgpu-rework-VM-state-machine-lock-handling-v2.patch
+patch 4506-drm-amdgpu-cleanup-amdgpu_vm_validate_pt_bos-v2.patch
+patch 4507-drm-amdgpu-further-optimize-amdgpu_vm_handle_moved.patch
+patch 4508-drm-amdgpu-kmap-PDs-PTs-in-amdgpu_vm_update_director.patch
+patch 4509-drm-amdgpu-consistenly-use-VM-moved-flag.patch
+patch 4510-drm-amdgpu-move-VM-BOs-on-LRU-again.patch
+patch 4511-drm-amdgpu-add-rcu_barrier-after-entity-fini.patch
+patch 4512-drm-amdgpu-Remove-unused-variable-in-amdgpu_device_g.patch
+patch 4513-drm-amdkfd-sriov-Put-the-pre-and-post-reset-in-exclu.patch
+patch 4514-drm-amdgpu-pp-remove-duplicate-assignment.patch
+patch 4515-drm-amdgpu-Update-GFX-info-structure-to-match-what-v.patch
+patch 4516-drm-amd-display-Remove-use-of-division-operator-for-.patch
+patch 4517-drm-amd-display-Implement-dm_pp_get_clock_levels_by_.patch
+patch 4518-drm-amdgpu-vcn_v1_0_is_idle-can-be-static.patch
+patch 4519-drm-amdkfd-Fix-a-copy-error-when-exit-compute-profil.patch
+patch 4520-drm-amdkfd-Add-debugfs-interface-to-trigger-HWS-hang.patch
+patch 4521-drm-amdkcl-4.17-fix-prime-bo-for-raven-A-A-issue.patch
+patch 4522-drm-amdgpu-defer-test-IBs-on-the-rings-at-boot-V3.patch
+patch 4523-drm-amd-display-Release-fake-sink.patch
+patch 4524-drm-amd-display-pass-pipe_ctx-straight-to-blank_pixe.patch
+patch 4525-drm-amd-display-add-register-offset-0-check.patch
+patch 4526-drm-amd-display-Do-not-program-interrupt-status-on-d.patch
+patch 4527-drm-amd-display-Clean-up-submit_channel_request.patch
+patch 4528-drm-amd-display-upgrade-scaler-math.patch
+patch 4529-drm-amd-display-dal-3.1.45.patch
+patch 4530-drm-amd-display-Prefix-event-prints-with-Event.patch
+patch 4531-drm-amd-display-Read-DPCD-link-caps-up-to-and-includ.patch
+patch 4532-drm-amd-display-AUX-will-exit-when-HPD-LOW-detected.patch
+patch 4533-drm-amd-display-Add-function-to-get-optc-active-size.patch
+patch 4534-drm-amd-display-replace-msleep-with-udelay-in-fbc-pa.patch
+patch 4535-drm-amd-display-add-DPCD-read-for-Sink-ieee-OUI.patch
+patch 4536-drm-amd-display-add-config-for-sending-VSIF.patch
+patch 4537-drm-amd-display-Fix-indentation-in-dcn10-resource-co.patch
+patch 4538-drm-amd-display-Read-DP_SINK_COUNT_ESI-range-on-HPD-.patch
+patch 4539-drm-amd-display-Default-log-masks-should-include-all.patch
+patch 4540-drm-amd-display-Optimize-DP_SINK_STATUS_ESI-range-re.patch
+patch 4541-drm-amd-display-Dynamic-HDR-metadata-mem-buffer.patch
+patch 4542-drm-amd-display-Refactor-audio-programming.patch
+patch 4543-drm-amd-display-HLG-support.patch
+patch 4544-drm-amd-display-DP-component-depth-16-bpc.patch
+patch 4545-drm-amd-display-Added-documentation-for-some-DC-inte.patch
+patch 4546-drm-amd-display-dal-3.1.46.patch
+patch 4547-drm-amd-display-Set-TMZ-and-DCC-for-secondary-surfac.patch
+patch 4548-drm-amd-display-Destroy-connector-state-on-reset.patch
+patch 4549-drm-amd-display-Prefix-TIMING_STANDARD-entries-with-.patch
+patch 4550-drm-amd-display-DP-YCbCr-4-2-0-support.patch
+patch 4551-drm-amd-display-decouple-front-and-backend-pgm-using.patch
+patch 4552-drm-amd-display-add-dentist-frequency-to-resource-po.patch
+patch 4553-drm-amd-display-fix-dscl_manual_ratio_init.patch
+patch 4554-drm-amd-display-check-if-audio-clk-enable-is-applica.patch
+patch 4555-drm-amd-display-Do-not-limit-color-depth-to-8bpc.patch
+patch 4556-drm-amd-display-dal-3.1.47.patch
+patch 4557-drm-amd-display-Fix-wrong-latency-assignment-for-VEG.patch
+patch 4558-drm-amdgpu-display-check-if-ppfuncs-exists-before-us.patch
+patch 4559-drm-amdgpu-display-drop-DRM_AMD_DC_FBC-kconfig-optio.patch
+patch 4560-drm-amdgpu-display-enable-CONFIG_DRM_AMD_DC_DCN1_0-b.patch
+patch 4561-drm-amd-display-avoid-sleeping-in-atomic-context-whi.patch
+patch 4562-drm-amdkcl-4.7-fix-__drm_atomic_helper_connector_des.patch
+patch 4563-drm-scheduler-fix-a-corner-case-in-dependency-optimi.patch
+patch 4564-drm-amdgpu-remove-unnecessary-scheduler-entity-for-V.patch
+patch 4565-drm-amd-pp-Add-cases-for-getting-phys-and-disp-clks-.patch
+patch 4566-drm-amdgpu-Use-GTT-for-dumb-buffer-if-sg-display-ena.patch
+patch 4567-drm-amdgpu-Add-helper-function-to-get-buffer-domain.patch
+patch 4568-drm-amdgpu-To-get-gds-gws-and-oa-from-adev-gds.patch
+patch 4569-drm-amdgpu-correct-SMU11-SYSPLL0-clock-id-values.patch
+patch 4570-drm-amd-powerplay-bug-fixs-for-getsmuclockinfo.patch
+patch 4571-drm-amdgpu-typo-fix-for-vega20-cg-flags.patch
+patch 4572-drm-amdgpu-fix-ISO-C90-forbids-mixed-declarations.patch
+patch 4573-drm-amdgpu-gds-bo-must-not-be-per-vm-bo.patch
+patch 4574-drm-amd-pp-Connect-display_clock_voltage_request-to-.patch
+patch 4575-drm-amd-pp-Allow-underclocking-when-od-table-is-empt.patch
+patch 4576-drm-gfx9-Update-gc-goldensetting-for-vega20.patch
+patch 4577-drm-amdgpu-Fix-NULL-pointer-when-load-kfd-driver-wit.patch
+patch 4578-drm-amdgpu-add-kernel-doc-for-amdgpu_object.c.patch
+patch 4579-drm-amdgpu-add-checking-for-sos-version.patch
+patch 4580-drm-amdgpu-fix-the-missed-vcn-fw-version-report.patch
+patch 4581-drm-amdgpu-df-fix-potential-array-out-of-bounds-read.patch
+patch 4582-Revert-drm-amdgpu-Add-an-ATPX-quirk-for-hybrid-lapto.patch
+patch 4583-Revert-drm-amdgpu-add-new-device-to-use-atpx-quirk.patch
+patch 4584-Partially-revert-drm-amdgpu-add-atpx-quirk-handling-.patch
+patch 4585-drm-amdgpu-pp-switch-the-default-dpm-implementation-.patch
+patch 4586-drm-amdgpu-Add-documentation-for-PRIME-related-code.patch
+patch 4587-drm-amdgpu-replace-mutex-with-spin_lock-V2.patch
+patch 4588-drm-amdgpu-pp-replace-mutex-with-spin_lock-V2.patch
+patch 4589-drm-amdgpu-avoid-sleep-while-executing-atombios-tabl.patch
+patch 4590-drm-amdgpu-pp-Revert-replace-mutex-with-spin_lock-V2.patch
+patch 4591-drm-amdgpu-Fix-ups-for-amdgpu_object.c-documentation.patch
+patch 4592-drm-scheduler-Avoid-using-wait_event_killable-for-dy.patch
+patch 4593-drm-amdgpu-move-amdgpu_ctx_mgr_entity_fini-to-f_ops-.patch
+patch 4594-drm-amdgpu-fix-clear_all-and-replace-handling-in-the.patch
+patch 4595-Revert-drm-amdgpu-fix-clear_all-and-replace-handling.patch
+patch 4596-drm-amdgpu-fix-clear_all-and-replace-handling-in-the.patch
+patch 4597-drm-amd-powerplay-fix-missed-hwmgr-check-warning-bef.patch
+patch 4598-drm-amdgpu-define-vcn-jpeg-ring.patch
+patch 4599-drm-amdgpu-add-vcn-jpeg-ring.patch
+patch 4600-drm-amdgpu-add-jpeg-packet-defines-to-soc15d.h.patch
+patch 4601-drm-amdgpu-add-more-jpeg-register-offset-headers.patch
+patch 4602-drm-amdgpu-implement-jpeg-ring-functions.patch
+patch 4603-drm-amdgpu-set-jpeg-ring-functions.patch
+patch 4604-drm-amdgpu-add-vcn-jpeg-irq-support.patch
+patch 4605-drm-amdgpu-initialize-vcn-jpeg-ring.patch
+patch 4606-drm-amdgpu-implement-patch-for-fixing-a-known-bug.patch
+patch 4607-drm-amdgpu-define-and-add-extra-dword-for-jpeg-ring.patch
+patch 4608-drm-amdgpu-add-patch-to-jpeg-ring.patch
+patch 4609-drm-amdgpu-add-vcn-jpeg-sw-finish.patch
+patch 4610-drm-amdgpu-add-vcn-jpeg-ring-test.patch
+patch 4611-drm-amdgpu-add-vcn-jpeg-ib-test.patch
+patch 4612-drm-amdgpu-enable-vcn-jpeg-ib-test.patch
+patch 4613-drm-amdgpu-add-AMDGPU_HW_IP_VCN_JPEG-to-info-query.patch
+patch 4614-drm-amdgpu-add-AMDGPU_HW_IP_VCN_JPEG-to-queue-mgr.patch
+patch 4615-drm-amdgpu-Grab-put-runtime-PM-references-in-atomic_.patch
+patch 4616-drm-amd-powerplay-fix-wrong-clock-adjust-sequence.patch
+patch 4617-drm-amdgpu-rename-rmn-to-amn-in-the-MMU-notifier-cod.patch
+patch 4618-drm-amdgpu-fix-documentation-of-amdgpu_mn.c-v2.patch
+patch 4619-drm-amdgpu-Correct-the-ndw-of-bo-update-mapping.patch
+patch 4620-drm-amdgpu-change-gfx8-ib-test-to-use-WB.patch
+patch 4621-drm-amd-Update-KFD-Thunk-ioctl-ABI-to-match-upstream.patch
+patch 4622-drm-amdgpu-Doorbell-assignment-for-8-sdma-user-queue.patch
+patch 4623-drm-amdkfd-Make-the-number-of-SDMA-queues-variable.patch
+patch 4624-drm-amdgpu-Fix-NULL-pointer-when-PP-block-is-disable.patch
+patch 4625-drm-amd-Interface-change-to-support-64-bit-page_tabl.patch
+patch 4626-drm-amdgpu-Add-vega20-support-on-kfd-probe.patch
+patch 4627-drm-amdkfd-Vega20-bring-up-on-amdkfd-side.patch
+patch 4628-drm-amdkfd-reflect-atomic-support-in-IO-link-propert.patch
+patch 4629-drm-amdgpu-Changed-CU-reservation-golden-settings.patch
+patch 4630-drm-amdkfd-Add-check-user-queue-busy-interface.patch
+patch 4631-drm-amdkfd-Replace-mqd-with-mqd_mgr-as-the-variable-.patch
+patch 4632-Hybrid-Version-18.30.2.15.patch
+patch 4633-Revert-drm-amdgpu-replace-mutex-with-spin_lock-V2.patch
+patch 4634-Revert-drm-amd-display-avoid-sleeping-in-atomic-cont.patch
+patch 4635-drm-amdgpu-Added-ISR-for-CP-ECC-EDC-interrupt-v2.patch
+patch 4636-drm-amdgpu-Add-interrupt-SQ-source-struct-to-amdgpu_.patch
+patch 4637-drm-amdgpu-Add-plumbing-for-handling-SQ-EDC-ECC-inte.patch
+patch 4638-drm-amdgpu-remove-unused-parameter-for-va-update.patch
+patch 4639-drm-amd-pp-initialize-result-to-before-or-ing-in-dat.patch
+patch 4640-drm-amd-display-Fix-stale-buffer-object-bo-use.patch
+patch 4641-drm-amd-pp-Fix-OD-feature-enable-failed-on-Vega10-wo.patch
+patch 4642-drm-amdgpu-Update-function-level-documentation-for-G.patch
+patch 4643-drm-amd-include-Update-df-3.6-mask-and-shift-definit.patch
+patch 4644-drm-amdgpu-fix-parsing-indirect-register-list-v2.patch
+patch 4645-drm-amd-powerplay-remove-uncessary-extra-gfxoff-cont.patch
+patch 4646-drm-amd-powerplay-Set-higher-SCLK-MCLK-frequency-tha.patch
+patch 4647-drm-amdgpu-Add-BRACKET_LAYOUT_ENUMs-to-ObjectID.h.patch
+patch 4648-drm-amdgpu-update-documentation-for-amdgpu_irq.c-v3.patch
+patch 4649-drm-amdgpu-fix-typo-in-amdgpu_mn.c-comments.patch
+patch 4650-drm-amdgpu-Consolidate-visible-vs.-real-vram-check-v.patch
+patch 4651-drm-doc-Add-amdgpu-hwmon-power-documentation-v2.patch
+patch 4652-drm-amdgpu-vg20-support-new-UVD-FW-version-naming-co.patch
+patch 4653-drm-amd-pp-Add-S3-support-for-OD-feature.patch
+patch 4654-drm-amdkfd-Fix-the-case-when-a-process-is-NULL.patch
+patch 4655-drm-amdgpu-band-aid-validating-VM-PTs.patch
+patch 4656-drm-amd-pp-Fix-wrong-clock-unit-exported-to-Display.patch
+patch 4657-drm-amd-display-use-the-get_crtc-instead-of-get-exis.patch
+patch 4658-drm-amdgpu-add-new-DF-1.7-register-defs.patch
+patch 4659-drm-amdgpu-add-new-DF-callback-for-ECC-setup.patch
+patch 4660-drm-amdgpu-add-a-df-1.7-implementation-of-enable_ecc.patch
+patch 4661-drm-amdgpu-gmc9-disable-partial-wr-rmw-if-ECC-is-not.patch
+patch 4662-Revert-drm-amd-display-Implement-dm_pp_get_clock_lev.patch
+patch 4663-Revert-drm-amdgpu-band-aid-validating-VM-PTs.patch
+patch 4664-Revert-drm-amdgpu-move-VM-BOs-on-LRU-again.patch
+patch 4665-drm-amdgpu-Make-sure-IB-tests-flushed-after-IP-resum.patch
+patch 4666-drm-amdgpu-gfx9-Update-golden-settings-for-vg10.patch
+patch 4667-drm-amd-display-Fix-Vega10-black-screen-after-mode-c.patch
+patch 4668-drm-amd-pp-Read-vbios-vddc-limit-before-use-them.patch
+patch 4669-drm-amd-pp-Update-clk-with-od-setting-when-set-power.patch
+patch 4670-drm-amdgpu-Make-struct-amdgpu_atif-private-to-amdgpu.patch
+patch 4671-drm-amdgpu-s-disp_detetion_ports-disp_detection_port.patch
+patch 4672-drm-amdgpu-Add-amdgpu_atpx_get_dhandle.patch
+patch 4673-drm-amdgpu-Dynamically-probe-for-ATIF-handle-v2.patch
+patch 4674-drm-amdgpu-Grab-put-runtime-PM-references-in-atomic_.patch
+patch 4675-drm-amdgpu-Count-disabled-CRTCs-in-commit-tail-earli.patch
+patch 4676-drm-amdgpu-delete-duplicated-code-about-runtime-PM-r.patch
+patch 4677-drm-amd-display-Fix-warning-observed-in-mode-change-.patch
+patch 4678-drm-amd-display-Fix-Edid-emulation-for-linux.patch
+patch 4679-drm-amd-display-fix-invalid-function-table-override.patch
+patch 4680-drm-amd-display-make-function-tables-const.patch
+patch 4681-drm-amd-amdgpu-Removing-unwanted-code-from-the-below.patch
+patch 4682-drm-amdgpu-Fix-vce-work-queue-was-not-cancelled-when.patch
+patch 4683-drm-amd-display-skip-multisync-for-slave-displays-ha.patch
+patch 4684-drm-amd-display-multisync-should-be-enabled-only-for.patch
+patch 4685-drm-amd-display-skip-multisync-redo-for-already-enab.patch
+patch 4686-drm-amd-display-initialize-new_stream-status.primary.patch
+patch 4687-x86-MCE-AMD-mce-code-changes-to-fix-the-crash.patch
+patch 4688-drm-amdgpu-No-action-when-VCN-PG-state-is-unchanged.patch
+patch 4689-tpm-tpm_crb-Use-start-method-value-from-ACPI-table-d.patch
+patch 4690-drm-amd-display-Fix-BUG_ON-during-CRTC-atomic-check-.patch
+patch 4691-drm-amd-display-Make-atomic-check-validate-underscan.patch
+patch 4692-drm-amd-display-Update-color-props-when-modeset-is-r.patch
+patch 4693-drm-amd-powerplay-add-control-gfxoff-enabling-in-lat.patch
+patch 4694-drm-amd-powerplay-fix-missed-hwmgr-check-warning-bef.patch
+patch 4695-drm-amd-powerplay-Set-higher-SCLK-MCLK-frequency-tha.patch
+patch 4696-drm-amd-pp-Fix-uninitialized-variable.patch
+patch 4697-drm-amdgpu-Use-kvmalloc_array-for-allocating-VRAM-ma.patch
+patch 4698-drm-amdgpu-Don-t-default-to-DC-support-for-Kaveri-an.patch
+patch 4699-drm-amdgpu-All-UVD-instances-share-one-idle_work-han.patch
+patch 4700-drm-amdgpu-Update-pin_size-values-before-unpinning-B.patch
+patch 4701-drm-amdgpu-Refactor-amdgpu_vram_mgr_bo_invisible_siz.patch
+patch 4702-drm-amdgpu-Make-amdgpu_vram_mgr_bo_invisible_size-al.patch
+patch 4703-drm-amdgpu-GPU-vs-CPU-page-size-fixes-in-amdgpu_vm_b.patch
+patch 4704-drm-amdgpu-fix-UBSAN-Undefined-behaviour-for-amdgpu_.patch
+patch 4705-drm-amdgpu-Support-new-VCN-FW-version-naming-convent.patch
+patch 4706-drm-amd-display-release-spinlock-before-committing-u.patch
+patch 4707-drm-amd-powerplay-correct-vega12-thermal-support-as-.patch
+patch 4708-drm-amd-powerplay-correct-vega12-bootup-values-setti.patch
+patch 4709-drm-amd-powerplay-smc_dpm_info-structure-change.patch
+patch 4710-drm-amdgpu-fix-swapped-emit_ib_size-in-vce3.patch
+patch 4711-drm-amdgpu-pm-fix-display-count-in-non-DC-path.patch
+patch 4712-drm-amdgpu-fix-user-fence-write-race-condition.patch
+patch 4713-drm-amd-display-adding-ycbcr420-pixel-encoding-for-h.patch
+patch 4714-drm-amd-display-add-a-check-for-display-depth-validi.patch
+patch 4715-Revert-drm-amd-display-Don-t-return-ddc-result-and-r.patch
+patch 4716-drm-amdgpu-Reserve-VM-root-shared-fence-slot-for-com.patch
+patch 4717-drm-amdgpu-Verify-root-PD-is-mapped-into-kernel-addr.patch
+patch 4718-amd-dc-dce100-On-dce100-set-clocks-to-0-on-suspend.patch
+patch 4719-drm-amdgpu-pp-smu7-use-a-local-variable-for-toc-inde.patch
+patch 4720-drm-amd-display-Fix-DP-HBR2-Eye-Diagram-Pattern-on-C.patch
+patch 4721-drm-amdgpu-allocate-shared-fence-slot-in-VA-IOCTL.patch
+patch 4722-drm-amd-pp-Make-sure-clock_voltage_limit_table-on-dc.patch
+patch 4723-drm-amdgpu-Fix-uvd-firmware-version-information-for-.patch
+patch 4724-drm-amd-display-fix-type-of-variable.patch
+patch 4725-drm-amdgpu-Fix-ups-for-amdgpu_object.c-documentation.patch
+patch 4726-drm-amd-pp-Remove-SAMU-support-in-powerplay.patch
+patch 4727-drm-amdgpu-Use-real-power-source-in-powerplay-instan.patch
+patch 4728-drm-amd-pp-Implement-update_smc_table-for-CI.patch
+patch 4729-drm-amdgpu-Get-real-power-source-to-initizlize-ac_po.patch
+patch 4730-drm-amdgpu-Update-function-level-documentation-for-G.patch
+patch 4731-drm-amd-display-Drop-to-fail-safe-mode-if-edid-is-ba.patch
+patch 4732-drm-amd-display-Write-TEST_EDID_CHECKSUM_WRITE-for-E.patch
+patch 4733-drm-amd-display-Stream-encoder-update.patch
+patch 4734-drm-amd-display-Move-i2c-and-aux-structs-into-dc_ddc.patch
+patch 4735-drm-amd-display-Add-use_dynamic_meta-flag-to-stream_.patch
+patch 4736-drm-amd-display-Drop-duplicate-dc_stream_set_static_.patch
+patch 4737-drm-amd-display-Make-it-more-clear-when-info-frames-.patch
+patch 4738-drm-amd-display-Convert-quotes-to-Ascii-quotes.patch
+patch 4739-drm-amd-display-Disable-stats-by-default.patch
+patch 4740-drm-amd-display-Add-new-transfer-type-HWPWL.patch
+patch 4741-drm-amd-display-create-sink_id-in-dc_sink-structure-.patch
+patch 4742-drm-amd-display-Allow-DP-register-double-buffer.patch
+patch 4743-drm-amd-display-Add-num_opp-to-resource_caps.patch
+patch 4744-drm-amd-display-Do-not-skip-FBC-init-in-failsafe-mod.patch
+patch 4745-amdgpu-display-use-modern-ktime-accessors.patch
+patch 4746-drm-amdgpu-update-ib_start-size_alignment-same-as-wi.patch
+patch 4747-drm-amdgpu-correct-GART-location-info.patch
+patch 4748-drm-amdgpu-Use-correct-enum-to-set-powergating-state.patch
+patch 4749-drm-amd-amdgpu-Add-a-GPU_LOAD-entry-to-sysfs-v3.patch
+patch 4750-drm-amdgpu-Polish-SQ-IH.patch
+patch 4751-drm-amdgpu-Add-parsing-SQ_EDC_INFO-to-SQ-IH-v3.patch
+patch 4752-drm-amd-display-replace-clocks_value-struct-with-dc_.patch
+patch 4753-drm-amd-display-redesign-dce-dcn-clock-voltage-updat.patch
+patch 4754-drm-amd-display-rename-display-clock-block-to-dccg.patch
+patch 4755-drm-amd-display-move-clock-programming-from-set_band.patch
+patch 4756-drm-amd-display-Adding-dm-pp-clocks-getting-by-volta.patch
+patch 4757-drm-amd-display-Apply-clock-for-voltage-request.patch
+patch 4758-drm-amd-display-Adding-Get-static-clocks-for-dm_pp-i.patch
+patch 4759-drm-amd-display-dal-3.1.48.patch
+patch 4760-drm-amd-display-Introduce-pp-smu-raven-functions.patch
+patch 4761-drm-amd-display-remove-invalid-assert-when-no-max_pi.patch
+patch 4762-drm-amd-display-Use-tg-count-for-opp-init.patch
+patch 4763-drm-amd-display-Use-local-structs-instead-of-struct-.patch
+patch 4764-drm-amd-display-Add-clock-types-to-applying-clk-for-.patch
+patch 4765-drm-amd-display-get-rid-of-cur_clks-from-dcn_bw_outp.patch
+patch 4766-drm-amd-display-move-dcn1-dispclk-programming-to-dcc.patch
+patch 4767-drm-amd-display-clean-up-dccg-divider-calc-and-dcn-c.patch
+patch 4768-drm-amd-display-rename-dce_disp_clk-to-dccg.patch
+patch 4769-drm-amd-display-clean-up-set_bandwidth-usage.patch
+patch 4770-drm-amd-display-remove-unnecessary-pplib-volage-requ.patch
+patch 4771-drm-amd-display-Temporarily-remove-Chroma-logs.patch
+patch 4772-drm-amd-display-Define-dp_alt_mode.patch
+patch 4773-drm-amd-display-fix-dccg-dcn1-ifdef.patch
+patch 4774-drm-amd-display-fix-pplib-voltage-request.patch
+patch 4775-drm-amd-display-add-CHG_DONE-mash-sh-defines-for-den.patch
+patch 4776-drm-amd-display-change-dentist-DID-enum-values-to-up.patch
+patch 4777-drm-amd-display-add-safe_to_lower-support-to-dcn-wm-.patch
+patch 4778-drm-amd-display-support-ACrYCb2101010.patch
+patch 4779-drm-amd-display-fix-use-of-uninitialized-memory.patch
+patch 4780-drm-amd-display-dal-3.1.49.patch
+patch 4781-drm-amd-display-Add-front-end-for-dp-debugfs-files.patch
+patch 4782-drm-amd-display-dal-3.1.50.patch
+patch 4783-drm-amd-display-clean-rq-dlg-ttu-reg-structs-before-.patch
+patch 4784-drm-amd-display-dal-3.1.51.patch
+patch 4785-drm-amd-display-fix-potential-infinite-loop-in-fbc-p.patch
+patch 4786-drm-amd-display-Enable-PPLib-calls-from-DC-on-linux.patch
+patch 4787-drm-amd-display-Add-dmpp-clks-types-for-conversion.patch
+patch 4788-drm-amd-display-Convert-10kHz-clks-from-PPLib-into-k.patch
+patch 4789-drm-amd-display-move-dml-defaults-to-respective-dcn-.patch
+patch 4790-drm-amd-display-Moving-powerplay-functions-to-a-sepa.patch
+patch 4791-drm-amd-display-fix-dcn1-watermark-range-reporting.patch
+patch 4792-drm-amd-display-remove-dcn1-watermark-sets-b-c-and-d.patch
+patch 4793-drm-amd-display-separate-out-wm-change-request-dcn-w.patch
+patch 4794-drm-amd-display-move-dcn-watermark-programming-to-se.patch
+patch 4795-drm-amd-display-remove-soc_bounding_box.c.patch
+patch 4796-drm-amd-display-Check-scaling-ration-not-viewports-p.patch
+patch 4797-drm-amd-display-dal-3.1.52.patch
+patch 4798-drm-amd-display-add-valid-regoffset-and-NULL-pointer.patch
+patch 4799-drm-amd-display-get-board-layout-for-edid-emulation.patch
+patch 4800-drm-amd-display-Allow-option-to-use-worst-case-water.patch
+patch 4801-drm-amdgpu-Rename-entity-cleanup-finctions.patch
+patch 4802-drm-amd-display-don-t-initialize-result.patch
+patch 4803-drm-amdgpu-remove-duplicated-codes.patch
+patch 4804-drm-amd-display-Drop-unnecessary-header-file.patch
+patch 4805-drm-amd-display-Fix-dm-pp-clks-type-convert-error.patch
+patch 4806-drm-amdgpu-Rename-set_mmhub_powergating_by_smu-to-po.patch
+patch 4807-drm-amd-pp-Rename-enable_per_cu_power_gating-to-powe.patch
+patch 4808-drm-amd-pp-Unify-powergate_uvd-vce-mmhub-to-set_powe.patch
+patch 4809-drm-amd-pp-Add-gfx-pg-support-in-smu-through-set_pow.patch
+patch 4810-drm-amd-pp-Add-powergate_gfx-backend-function-on-Rav.patch
+patch 4811-drm-amdgpu-Add-gfx_off-support-in-smu-through-pp_set.patch
+patch 4812-drm-amdgpu-Split-set_pg_state-into-separate-function.patch
+patch 4813-drm-amdgpu-Move-CG-PG-setting-out-of-delay-worker-th.patch
+patch 4814-drm-amdgpu-Add-stutter-mode-ctrl-in-module-parameter.patch
+patch 4815-drm-amd-display-Ctrl-stutter-mode-through-module-par.patch
+patch 4816-drm-amd-display-Fix-a-typo-in-wm_min_memg_clk_in_khz.patch
+patch 4817-drm-amd-powerplay-drop-the-acg-fix.patch
+patch 4818-drm-amd-powerplay-revise-default-dpm-tables-setup.patch
+patch 4819-drm-amd-powerplay-retrieve-all-clock-ranges-on-start.patch
+patch 4820-drm-amd-powerplay-revise-clock-level-setup.patch
+patch 4821-drm-amd-powerplay-initialize-uvd-vce-powergate-statu.patch
+patch 4822-drm-amd-powerplay-correct-smc-display-config-for-mul.patch
+patch 4823-drm-amd-powerplay-drop-unnecessary-uclk-hard-min-set.patch
+patch 4824-drm-amd-powerplay-correct-vega12-max-num-of-dpm-leve.patch
+patch 4825-drm-amd-powerplay-apply-clocks-adjust-rules-on-power.patch
+patch 4826-drm-amd-powerplay-set-vega12-pre-display-configurati.patch
+patch 4827-drm-amd-powerplay-cosmetic-fix.patch
+patch 4828-drm-amdgpu-Use-gmc_vram_full_visible-in-vram_mgr_bo_.patch
+patch 4829-drm-amdgpu-Remove-amdgpu_gem_map_attach-target_dev-d.patch
+patch 4830-drm-amdgpu-pp-add-missing-byte-swapping-in-process_p.patch
+patch 4831-drm-amdgpu-pp-fix-endian-swapping-in-atomctrl_get_vo.patch
+patch 4832-drm-amdgpu-pp-fix-copy-paste-typo-in-smu7_init_dpm_d.patch
+patch 4833-drm-amdgpu-pp-fix-copy-paste-typo-in-smu7_get_pp_tab.patch
+patch 4834-drm-amdgpu-sdma-simplify-sdma-instance-setup.patch
+patch 4835-drm-amdgpu-vce-simplify-vce-instance-setup.patch
+patch 4836-drm-amd-Replace-drm_dev_unref-with-drm_dev_put.patch
+patch 4837-drm-amd-add-SPDX-identifier-and-clarify-license.patch
+patch 4838-drm-amdgpu-fix-the-wrong-type-of-gem-object-creation.patch
+patch 4839-drm-amdgpu-update-uvd_v6_0_ring_vm_funcs-to-use-new-.patch
+patch 4840-drm-amd-pp-Convert-clock-unit-to-KHz-as-defined.patch
+patch 4841-drm-amd-pp-Memory-Latency-is-always-25us-on-Vega10.patch
+patch 4842-drm-amd-pp-Switch-the-tolerable-latency-for-display.patch
+patch 4843-drm-amd-display-Notify-powerplay-the-min_dcef-clock.patch
+patch 4844-drm-amd-display-Notify-powerplay-the-display-control.patch
+patch 4845-drm-amd-pp-Refine-the-interface-exported-to-display.patch
+patch 4846-drm-amd-pp-Remove-duplicate-code-in-vega12_hwmgr.c.patch
+patch 4847-drm-amdgpu-switch-firmware-path-for-CIK-parts-v2.patch
+patch 4848-drm-amdgpu-switch-firmware-path-for-SI-parts.patch
+patch 4849-drm-amdgpu-update-amd_pcie.h-to-include-gen4-speeds.patch
+patch 4850-drm-amdgpu-use-pcie-functions-for-link-width-and-spe.patch
+patch 4851-drm-amd-pp-Export-notify_smu_enable_pwe-to-display.patch
+patch 4852-drm-amd-display-Refine-the-implementation-of-dm_pp_g.patch
+patch 4853-drm-amd-display-Fix-copy-error-when-set-memory-clock.patch
+patch 4854-drm-amd-pp-Remove-the-same-struct-define-in-powerpla.patch
+patch 4855-drm-amd-display-off-by-one-in-find_irq_source_info.patch
+patch 4856-Revert-drm-amd-display-Fix-indentation-in-dcn10-reso.patch
+patch 4857-drm-amd-display-dc-dce-Fix-multiple-potential-intege.patch
+patch 4858-drm-amd-Remove-errors-from-sphinx-documentation.patch
+patch 4859-drm-amdgpu-update-documentation-for-amdgpu_drv.c.patch
+patch 4860-drm-amd-Add-sphinx-documentation-for-amd_ip_funcs.patch
+patch 4861-drm-amdgpu-separate-gpu-address-from-bo-pin.patch
+patch 4862-drm-amdgpu-allocate-gart-memory-when-it-s-required-v.patch
+patch 4863-drm-amdgpu-fix-kmap-error-handling-for-bo-creations.patch
+patch 4864-drm-amdgpu-Add-CLK-IP-base-offset.patch
+patch 4865-drm-amd-pp-Convert-10KHz-to-KHz-as-variable-name.patch
+patch 4866-drm-amd-display-Make-function-pointer-structs-const.patch
+patch 4867-drm-amdgpu-Add-support-for-logging-process-info-in-a.patch
+patch 4868-drm-amdgpu-Present-amdgpu_task_info-in-VM_FAULTS.patch
+patch 4869-drm-amd-pp-Send-khz-clock-values-to-DC-for-smu7-8.patch
+patch 4870-drm-amdgpu-Take-VCN-jpeg-ring-into-account-in-idle-w.patch
+patch 4871-drm-amdgpu-move-cache-window-setup-after-power-and-c.patch
+patch 4872-drm-amdgpu-get-VCN-start-to-process-in-the-dpm-disab.patch
+patch 4873-drm-amd-pp-fix-semicolon.cocci-warnings.patch
+patch 4874-drm-amdgpu-pin-the-csb-buffer-on-hw-init-v2.patch
+patch 4875-drm-amdgpu-init-CSIB-regardless-of-rlc-version-and-p.patch
+patch 4876-drm-amdgpu-correct-rlc-save-restore-list-initializat.patch
+patch 4877-drm-amdgpu-drop-mmRLC_PG_CNTL-clear-v2.patch
+patch 4878-drm-amdgpu-no-touch-for-the-reserved-bit-of-RLC_CGTT.patch
+patch 4879-drm-amdgpu-reduce-the-idle-period-that-RLC-has-to-wa.patch
+patch 4880-drm-amd-powerplay-add-vega12-SMU-gfxoff-support-v3.patch
+patch 4881-drm-amd-powerplay-no-need-to-mask-workable-gfxoff-fe.patch
+patch 4882-drm-amd-powerplay-convert-the-sclk-mclk-into-Mhz-for.patch
+patch 4883-drm-amd-Add-interrupt-source-definitions-for-VI-v3.patch
+patch 4884-drm-amd-Use-newly-added-interrupt-source-defs-for-VI.patch
+patch 4885-drm-amd-Add-interrupt-source-definitions-for-SOC15-v.patch
+patch 4886-drm-amd-Use-newly-added-interrupt-source-defs-for-SO.patch
+patch 4887-drm-amdgpu-fix-TTM-move-entity-init-order.patch
+patch 4888-drm-amdgpu-Keep-track-of-amount-of-pinned-CPU-visibl.patch
+patch 4889-drm-amdgpu-Make-pin_size-values-atomic.patch
+patch 4890-drm-amdgpu-Warn-and-update-pin_size-values-when-dest.patch
+patch 4891-Revert-drm-amd-display-make-dm_dp_aux_transfer-retur.patch
+patch 4892-drm-amd-display-Separate-HUBP-surface-size-and-rotat.patch
+patch 4893-drm-amd-display-Add-avoid_vbios_exec_table-debug-bit.patch
+patch 4894-drm-amd-display-support-access-ddc-for-mst-branch.patch
+patch 4895-drm-amd-display-Implement-cursor-multiplier.patch
+patch 4896-drm-amd-display-Linux-Set-Read-link-rate-and-lane-co.patch
+patch 4897-drm-amd-display-Move-common-GPIO-registers-into-a-co.patch
+patch 4898-drm-amd-display-fix-bug-where-we-are-creating-bogus-.patch
+patch 4899-drm-amd-display-generic-indirect-register-access.patch
+patch 4900-drm-amd-display-fix-incorrect-check-for-atom-table-s.patch
+patch 4901-drm-amd-display-set-read-link-rate-and-lane-count-th.patch
+patch 4902-drm-amd-display-dal-3.1.53.patch
+patch 4903-drm-amd-display-Correct-calculation-of-duration-time.patch
+patch 4904-drm-amd-display-Add-Azalia-registers-to-HW-sequencer.patch
+patch 4905-drm-amd-display-Define-couple-extra-DCN-registers.patch
+patch 4906-drm-amd-display-Expose-configure_encoder-for-link_en.patch
+patch 4907-drm-amd-display-Serialize-is_dp_sink_present.patch
+patch 4908-drm-amd-display-Break-out-function-to-simply-read-au.patch
+patch 4909-drm-amd-display-Return-aux-replies-directly-to-DRM.patch
+patch 4910-drm-amd-display-Convert-remaining-loggers-off-dc_log.patch
+patch 4911-drm-amd-display-read-DP-sink-and-DP-branch-hardware-.patch
+patch 4912-drm-amd-display-dcc-always-on-for-bw-calculations-on.patch
+patch 4913-drm-amd-display-hook-dp-test-pattern-through-debugfs.patch
+patch 4914-drm-amd-display-remove-dentist_vco_freq-from-resourc.patch
+patch 4915-drm-amd-display-drop-unused-register-defines.patch
+patch 4916-drm-amd-display-add-additional-info-for-cursor-posit.patch
+patch 4917-drm-amd-display-Patch-for-extend-time-to-panel-power.patch
+patch 4918-drm-amd-display-Linux-set-read-lane-settings-through.patch
+patch 4919-drm-amd-display-Fix-compile-error-on-older-GCC-versi.patch
+patch 4920-drm-amd-display-add-missing-mask-for-dcn.patch
+patch 4921-drm-amd-display-set-default-GPIO_ID_HPD.patch
+patch 4922-drm-amd-display-add-dcn-cursor-hotsport-rotation-and.patch
+patch 4923-drm-amd-display-expose-dcn10_aux_initialize-in-heade.patch
+patch 4924-drm-amd-display-Linux-hook-test-pattern-through-debu.patch
+patch 4925-drm-amd-display-dal-3.1.54.patch
+patch 4926-drm-amd-display-Add-YCbCr420-only-support-for-HDMI-4.patch
+patch 4927-drm-amd-display-Expose-bunch-of-functions-from-dcn10.patch
+patch 4928-drm-amd-display-Right-shift-AUX-reply-value-sooner-t.patch
+patch 4929-drm-amd-display-Read-AUX-channel-even-if-only-status.patch
+patch 4930-drm-amd-display-introduce-concept-of-send_reset_leng.patch
+patch 4931-drm-amd-display-add-DalEnableHDMI20-key-support.patch
+patch 4932-drm-amd-display-add-pp-to-dc-powerlevel-enum-transla.patch
+patch 4933-drm-amd-display-Add-NULL-check-for-local-sink-in-edp.patch
+patch 4934-drm-amd-display-Return-out_link_loss-from-interrupt-.patch
+patch 4935-drm-amd-display-Add-CRC-support-for-DCN.patch
+patch 4936-drm-amd-display-Expose-couple-OPTC-functions-through.patch
+patch 4937-drm-amd-display-dp-debugfs-allow-link-rate-lane-coun.patch
+patch 4938-drm-amd-display-Fix-new-stream-count-check-in-dc_add.patch
+patch 4939-drm-amd-display-add-max-scl-ratio-to-soc-bounding-bo.patch
+patch 4940-drm-amd-display-update-dml-to-match-DV-dml.patch
+patch 4941-drm-amd-display-dal-3.1.55.patch
+patch 4942-drm-amd-display-Initialize-data-structure-for-DalMpV.patch
+patch 4943-drm-amd-display-properly-turn-autocal-off.patch
+patch 4944-drm-amdgpu-vi-fix-mixed-up-state-in-smu-clockgating-.patch
+patch 4945-drm-amdgpu-pp-smu7-drop-unused-values-in-smu-data-st.patch
+patch 4946-drm-amdgpu-pp-smu7-remove-local-mc_addr-variable.patch
+patch 4947-drm-amdgpu-pp-smu7-cache-smu-firmware-toc.patch
+patch 4948-drm-amdgpu-pp-remove-dead-vega12-code.patch
+patch 4949-drm-amdgpu-pp-split-out-common-smumgr-smu9-code.patch
+patch 4950-drm-amdgpu-pp-switch-smu-callback-type-for-get_argum.patch
+patch 4951-Revert-drm-amd-powerplay-fix-performance-drop-on-Veg.patch
+patch 4952-drm-amdgpu-Allow-to-create-BO-lists-in-CS-ioctl-v3.patch
+patch 4953-drm-amd-display-Add-headers-for-hardcoded-1d-luts.patch
+patch 4954-drm-amd-display-Refactor-SDR-cursor-boosting-in-HDR-.patch
+patch 4955-drm-amd-display-add-HDR-visual-confirm.patch
+patch 4956-drm-amd-display-Add-hook-for-MST-root-branch-info.patch
+patch 4957-drm-amd-display-Move-address-tracking-out-of-HUBP.patch
+patch 4958-drm-amd-display-add-new-dc-debug-structure-to-track-.patch
+patch 4959-drm-amd-display-dal-3.1.56.patch
+patch 4960-drm-amd-display-Null-ptr-check-for-set_sdr_white_lev.patch
+patch 4961-drm-amd-display-Fix-some-checkpatch.pl-errors-and-wa.patch
+patch 4962-drm-amdgpu-cleanup-job-header.patch
+patch 4963-drm-amdgpu-remove-fence-context-from-the-job.patch
+patch 4964-drm-amdgpu-remove-ring-parameter-from-amdgpu_job_sub.patch
+patch 4965-drm-amdgpu-remove-job-ring.patch
+patch 4966-drm-amdgpu-add-amdgpu_job_submit_direct-helper.patch
+patch 4967-drm-amdgpu-remove-job-adev-v2.patch
+patch 4968-drm-amdgpu-minor-cleanup-in-amdgpu_job.c.patch
+patch 4969-drm-amdgpu-allow-for-more-flexible-priority-handling.patch
+patch 4970-drm-amdgpu-change-ring-priority-after-pushing-the-jo.patch
+patch 4971-drm-amdgpu-simplify-the-bo-reference-on-amdgpu_bo_up.patch
+patch 4972-drm-amdgpu-pm-Remove-VLA-usage.patch
+patch 4973-drm-amdgpu-powerplay-use-irq-source-defines-for-smu7.patch
+patch 4974-drm-amd-powerplay-fixed-uninitialized-value.patch
+patch 4975-drm-amdgpu-display-Replace-CONFIG_DRM_AMD_DC_DCN1_0-.patch
+patch 4976-drm-amdgpu-remove-superflous-UVD-encode-entity.patch
+patch 4977-drm-amdgpu-clean-up-UVD-instance-handling-v2.patch
+patch 4978-drm-amdgpu-fix-spelling-mistake-successed-succeeded.patch
+patch 4979-drm-amd-display-Drop-unused-backlight-functions-in-D.patch
+patch 4980-drm-amd-display-Honor-pplib-stutter-mask-for-all-ASI.patch
+patch 4981-drm-amdgpu-lock-and-unlock-console-only-for-amdgpu_f.patch
+patch 4982-drm-amd-pp-Set-Max-clock-level-to-display-by-default.patch
+patch 4983-drm-amd-display-Convert-10kHz-clks-from-PPLib-into-k.patch
+patch 4984-300-compilaiton.patch
+patch 4985-patch-correction-amdgpu-clean-up-UVD-instance-handli.patch
+patch 4986-drm-amdgpu-use-drm_fb-helper-for-console_-un-lock.patch
+patch 4987-drm-amdgpu-Fix-warning-in-dma_fence_is_later-on-resu.patch
+patch 4988-drm-amdgpu-apci-don-t-call-sbios-request-function-if.patch
+patch 4989-drm-amdgpu-acpi-skip-backlight-events-for-DC.patch
+patch 4990-drm-amdgpu-split-ip-suspend-into-2-phases.patch
+patch 4991-drm-amdgpu-rework-suspend-and-resume-to-deal-with-at.patch
+patch 4992-drm-amdgpu-Fix-RLC-safe-mode-test-in-gfx_v9_0_enter_.patch
+patch 4993-drm-amd-powerplay-slow-UCLK-switch-when-multiple-dis.patch
+patch 4994-drm-amd-powerplay-correct-the-argument-for-PPSMC_MSG.patch
+patch 4995-drm-amd-powerplay-allow-slow-switch-only-if-NBPState.patch
+patch 4996-drm-amdgpu-Don-t-warn-on-destroying-a-pinned-BO.patch
+patch 4997-drm-amdgpu-move-the-amdgpu_fbdev_set_suspend-further.patch
+patch 4998-drm-amd-display-Remove-unnecessary-warning.patch
+patch 4999-drm-amd-display-allow-diags-to-skip-initial-link-tra.patch
+patch 5000-drm-amd-display-DPP-CM-ICSC-AYCRCB8888-format-suppor.patch
+patch 5001-drm-amd-display-Decouple-aux-from-i2c.patch
+patch 5002-drm-amd-display-separate-dc_debug-into-dc_debug_opti.patch
+patch 5003-drm-amd-display-DC-3.1.58.patch
+patch 5004-drm-amdgpu-clean-up-coding-style-a-bit.patch
+patch 5005-drm-amdgpu-expose-only-the-first-UVD-instance-for-no.patch
+patch 5006-drm-amdgpu-consistenly-name-amdgpu_bo_-functions.patch
+patch 5007-drm-amdgpu-reduce-the-number-of-placements-for-a-BO.patch
+patch 5008-drm-amdgpu-gmc9-clarify-GPUVM-fault-error-message.patch
+patch 5009-gpu-drm-amdgpu-Replace-mdelay-with-msleep-in-cik_pci.patch
+patch 5010-drm-amdgpu-add-support-for-inplace-IB-patching-for-M.patch
+patch 5011-drm-amdgpu-patch-the-IBs-for-the-second-UVD-instance.patch
+patch 5012-drm-amd-display-Retry-link-training-again.patch
+patch 5013-drm-amd-display-flatten-aux_engine-and-engine.patch
+patch 5014-drm-amd-display-Prevent-PSR-from-being-enabled-if-in.patch
+patch 5015-drm-amd-display-DC-3.1.59.patch
+patch 5016-drm-amd-Add-missing-fields-in-atom_integrated_system.patch
+patch 5017-drm-amdgpu-implement-harvesting-support-for-UVD-7.2-.patch
+patch 5018-drm-amdgpu-correct-evict-flag-for-bo-move.patch
+patch 5019-drm-amdgpu-clean-up-the-superfluous-space-and-align-.patch
+patch 5020-drm-amd-pp-Polaris12-Fix-a-chunk-of-registers-missed.patch
+patch 5021-drm-amd-pp-Delete-unused-temp-variables.patch
+patch 5022-drm-amd-pp-Convert-voltage-unit-in-mV-4-to-mV-on-CZ-.patch
+patch 5023-drm-amdgpu-fix-a-reversed-condition.patch
+patch 5024-drm-amdgpu-add-proper-error-handling-to-amdgpu_bo_li.patch
+patch 5025-drm-amdgpu-fix-total-size-calculation.patch
+patch 5026-drm-amdgpu-return-error-if-both-BOs-and-bo_list-hand.patch
+patch 5027-drm-amdgpu-add-new-amdgpu_vm_bo_trace_cs-function-v2.patch
+patch 5028-drm-amdgpu-move-bo_list-defines-to-amdgpu_bo_list.h.patch
+patch 5029-drm-amdgpu-always-recreate-bo_list.patch
+patch 5030-drm-amdgpu-nuke-amdgpu_bo_list_free.patch
+patch 5031-drm-amdgpu-add-bo_list-iterators.patch
+patch 5032-drm-amdgpu-allocate-the-bo_list-array-after-the-list.patch
+patch 5033-drm-amdgpu-create-an-empty-bo_list-if-no-handle-is-p.patch
+patch 5034-drm-amdgpu-Replace-ttm_bo_reference-with-ttm_bo_get.patch
+patch 5035-drm-amdgpu-Replace-ttm_bo_unref-with-ttm_bo_put.patch
+patch 5036-drm-amd-display-add-missing-void-parameter-to-dc_cre.patch
+patch 5037-drm-amdgpu-pm-Fix-potential-Spectre-v1.patch
+patch 5038-drm-amd-display-Report-non-DP-display-as-disconnecte.patch
+patch 5039-drm-amd-display-Only-require-EDID-read-for-HDMI-and-.patch
+patch 5040-drm-amd-display-Use-requested-HDMI-aspect-ratio.patch
+patch 5041-drm-amd-display-DP-Compliance-400.1.1-failure.patch
+patch 5042-drm-amd-display-Implement-backlight_ops.get_brightne.patch
+patch 5043-drm-amd-display-Read-back-max-backlight-value-at-boo.patch
+patch 5044-drm-amd-display-Destroy-aux_engines-only-once.patch
+patch 5045-drm-amd-display-Implement-custom-degamma-lut-on-dcn.patch
+patch 5046-drm-amd-display-Use-calculated-disp_clk_khz-value-fo.patch
+patch 5047-drm-amd-display-Don-t-share-clk-source-between-DP-an.patch
+patch 5048-drm-amd-display-add-vbios-table-check-for-enabling-d.patch
+patch 5049-drm-amd-display-Add-NULL-check-for-enabling-dp-ss.patch
+patch 5050-drm-amd-display-program-display-clock-on-cache-match.patch
+patch 5051-drm-amd-display-update-clk-for-various-HDMI-color-de.patch
+patch 5052-drm-amd-display-display-connected-to-dp-1-does-not-l.patch
+patch 5053-drm-amdgpu-sriov-give-8s-for-recover-vram-under-RUNT.patch
+patch 5054-drm-amd-display-fix-single-link-DVI-has-no-display.patch
+patch 5055-drm-amd-display-Allow-clock-sharing-b-w-HDMI-and-DVI.patch
+patch 5056-drm-amd-display-Pass-connector-id-when-executing-VBI.patch
+patch 5057-drm-amd-display-Guard-against-null-crtc-in-CRC-IRQ.patch
+patch 5058-drm-amd-pp-Add-ACP-PG-support-in-SMU.patch
+patch 5059-drm-amdgpu-Power-down-acp-if-board-uses-AZ-v2.patch
+patch 5060-drm-amd-amdgpu-Enabling-Power-Gating-for-Stoney-plat.patch
+patch 5061-drm-amdgpu-acp-Powrgate-acp-via-smu.patch
+patch 5062-drm-amgpu-acp-Implement-set_powergating_state-for-ac.patch
+patch 5063-drm-amdgpu-Add-job-pipe-sync-dependecy-trace.patch
+patch 5064-drm-amd-pp-Implement-get_performance_level-for-legac.patch
+patch 5065-drm-amd-display-pass-compat_level-to-hubp.patch
+patch 5066-drm-amd-display-Move-PME-to-function-pointer-call-se.patch
+patch 5067-drm-amd-display-dal-3.1.60.patch
+patch 5068-drm-amd-display-Set-DFS-bypass-flags-for-dce110.patch
+patch 5069-drm-amd-display-Enable-DFS-bypass-support-in-DC-conf.patch
+patch 5070-drm-amd-display-Add-support-for-toggling-DFS-bypass.patch
+patch 5071-drm-amdgpu-Add-amdgpu_gfx_off_ctrl-function.patch
+patch 5072-drm-amdgpu-Put-enable-gfx-off-feature-to-a-delay-thr.patch
+patch 5073-drm-amdgpu-Ctrl-gfx-off-via-amdgpu_gfx_off_ctrl.patch
+patch 5074-drm-amdgpu-Disable-gfx-off-if-VCN-is-busy.patch
+patch 5075-drm-amdgpu-move-gfx-definitions-into-amdgpu_gfx-head.patch
+patch 5076-drm-amdgpu-move-ih-definitions-into-amdgpu_ih-header.patch
+patch 5077-drm-amdgpu-move-sdma-definitions-into-amdgpu_sdma-he.patch
+patch 5078-drm-amdgpu-move-firmware-definitions-into-amdgpu_uco.patch
+patch 5079-drm-amdgpu-move-psp-macro-into-amdgpu_psp-header.patch
+patch 5080-drm-amdgpu-move-gem-definitions-into-amdgpu_gem-head.patch
+patch 5081-drm-amd-display-pass-the-right-num-of-modes-added.patch
+patch 5082-drm-amd-display-correct-image-viewport-calculation.patch
+patch 5083-drm-amd-display-Print-DPP-DTN-log-info-only-for-enab.patch
+patch 5084-drm-amd-display-Use-DGAM-ROM-or-RAM.patch
+patch 5085-drm-amd-display-Add-check-for-num-of-entries-in-gamm.patch
+patch 5086-drm-amdgpu-Delay-100ms-to-enable-gfx-off-feature.patch
+patch 5087-drm-amdgpu-move-ring-macros-into-amdgpu_ring-header.patch
+patch 5088-drm-amdgpu-remove-useless-gds-switch-macro.patch
+patch 5089-drm-amdgpu-move-display-definitions-into-amdgpu_disp.patch
+patch 5090-drm-amdgpu-move-gmc-macros-into-amdgpu_gmc-header.patch
+patch 5091-drm-amdgpu-move-vm-definitions-into-amdgpu_vm-header.patch
+patch 5092-drm-amdgpu-move-missed-gfxoff-entry-into-amdgpu_gfx-.patch
+patch 5093-drm-amdgpu-pp-endian-fixes-for-process_pptables_v1_0.patch
+patch 5094-drm-amdgpu-pp-endian-fixes-for-processpptables.c.patch
+patch 5095-drm-amdgpu-add-emit-reg-write-reg-wait-for-vcn-jpeg.patch
+patch 5096-drm-amdgpu-add-system-interrupt-register-offset-head.patch
+patch 5097-drm-amdgpu-add-system-interrupt-mask-for-jrbc.patch
+patch 5098-drm-amdgpu-enable-system-interrupt-for-jrbc.patch
+patch 5099-drm-amdgpu-add-emit-trap-for-vcn-jpeg.patch
+patch 5100-drm-amdgpu-fix-emit-frame-size-and-comments-for-jpeg.patch
+patch 5101-drm-amdgpu-powerplay-check-vrefresh-when-when-changi.patch
+patch 5102-drm-amdgpu-Cancel-gfx-off-delay-work-when-driver-fin.patch
+patch 5103-drm-amd-display-dc-3.1.61.patch
+patch 5104-drm-amd-display-fix-PIP-bugs-on-Dal3.patch
+patch 5105-drm-amd-display-Add-dprefclk-value-to-dce_dccg.patch
+patch 5106-drm-amd-display-fix-dml-handling-of-mono8-16-pixel-f.patch
+patch 5107-drm-amd-display-add-retimer-log-for-HWQ-tuning-use.patch
+patch 5108-drm-amd-display-Remove-redundant-non-zero-and-overfl.patch
+patch 5109-drm-amd-display-dc-3.1.62.patch
+patch 5110-drm-amdgpu-add-AVFS-control-to-PP_FEATURE_MASK.patch
+patch 5111-drm-amdgpu-powerplay-smu7-enable-AVFS-control-via-pp.patch
+patch 5112-drm-amdgpu-powerplay-vega10-enable-AVFS-control-via-.patch
+patch 5113-drm-amd-display-enable-ABGR-and-XBGR-formats-v4.patch
+patch 5114-drm-amdgpu-enable-ABGR-and-XBGR-formats-v2.patch
+patch 5115-drm-amdgpu-include-Add-nbio-7.4-header-files-v4.patch
+patch 5116-drm-amdgpu-include-Add-sdma0-1-4.2-register-headerfi.patch
+patch 5117-drm-amdgpu-include-add-thm-11.0.2-headers.patch
+patch 5118-drm-amdgpu-include-Add-mp-11.0-header-files.-v2.patch
+patch 5119-Revert-drm-amdgpu-Add-nbio-support-for-vega20-v2.patch
+patch 5120-drm-amdgpu-Add-nbio-7.4-support-for-vega20-v3.patch
+patch 5121-drm-amdgpu-update-atomfirmware.h.patch
+patch 5122-drm-amd-powerplay-add-vega20_inc.h-v2.patch
+patch 5123-drm-amd-powerplay-add-smu11_driver_if.h-v4.patch
+patch 5124-drm-amd-powerplay-add-vega20_ppsmc.h-v2.patch
+patch 5125-drm-amd-powerplay-add-vega20_pptable.h-v2.patch
+patch 5126-drm-amd-powerplay-add-the-smu-manager-for-vega20-v2.patch
+patch 5127-drm-amd-powerplay-new-interfaces-for-ActivityMonitor.patch
+patch 5128-drm-amd-powerplay-add-the-hw-manager-for-vega20-v3.patch
+patch 5129-drm-amd-powerplay-support-workload-profile-query-and.patch
+patch 5130-drm-amd-powerplay-init-vega20-uvd-vce-powergate-stat.patch
+patch 5131-drm-amd-powerplay-correct-force-clock-level-related-.patch
+patch 5132-drm-amd-powerplay-export-vega20-stable-pstate-clocks.patch
+patch 5133-drm-amd-powerplay-add-vega20-pre_display_config_chan.patch
+patch 5134-drm-amd-powerplay-conv-the-vega20-pstate-sclk-mclk-i.patch
+patch 5135-drm-amd-powerplay-initialize-vega20-overdrive-settin.patch
+patch 5136-drm-amd-powerplay-new-interfaces-for-overdrive-vega2.patch
+patch 5137-drm-amd-powerplay-revise-vega20-PPSMC_MSG_SetSoftMin.patch
+patch 5138-drm-amd-powerplay-update-vega20-clocks-threshold-set.patch
+patch 5139-drm-amdgpu-enable-vega20-powerplay-support.patch
+patch 5140-drm-amdgpu-Add-psp-11.0-support-for-vega20.-v2.patch
+patch 5141-drm-amdgpu-vg20-Change-the-load-type-of-vega20-to-ps.patch
+patch 5142-drm-amd-powerplay-enable-fclk-ss-by-default.patch
+patch 5143-drm-amd-powerplay-remove-setting-soc-floor-voltage-b.patch
+patch 5144-drm-amd-powerplay-avoid-enabling-disabling-uvd-vce-d.patch
+patch 5145-drm-amd-powerplay-correct-the-argument-for-PPSMC_MSG.patch
+patch 5146-drm-amd-powerplay-allow-slow-switch-only-if-NBPState.patch
+patch 5147-drm-amd-powerplay-remove-max-DCEFCLK-limitation.patch
+patch 5148-drm-amd-powerplay-added-voltage-boot-time-calibratio.patch
+patch 5149-drm-amdgpu-gfx9-Update-gfx9-golden-settings.patch
+patch 5150-drm-amdgpu-update-vega20-sdma-golden-settings.patch
+patch 5151-drm-amdgpu-psp-Enlarge-PSP-TMR-SIZE-from-3M-to-4M.patch
+patch 5152-drm-amdgpu-remove-experimental-flag-for-vega20.patch
+patch 5153-drm-amdgpu-Cancel-the-delay-work-when-suspend.patch
+patch 5154-drm-amd-pp-OverDrive-gfx-domain-voltage-on-Tonga.patch
+patch 5155-drm-amdgpu-fix-integer-overflow-test-in-amdgpu_bo_li.patch
+patch 5156-drm-amdgpu-Change-VCE-booting-with-firmware-loaded-b.patch
+patch 5157-drm-amdgpu-Use-kvmalloc-for-allocating-UVD-VCE-VCN-B.patch
+patch 5158-drm-amdgpu-added-support-2nd-UVD-instance.patch
+patch 5159-drm-amd-display-Program-vline-interrupt-on-FAST-upda.patch
+patch 5160-drm-amd-display-Enable-Stereo-in-Dal3.patch
+patch 5161-drm-amd-display-Program-vsc_infopacket-in-commit_pla.patch
+patch 5162-drm-amd-display-Handle-HDR-meta-update-as-fast-updat.patch
+patch 5163-drm-amd-display-HDR-dynamic-meta-should-be-treated-a.patch
+patch 5164-drm-amd-display-Program-gamut-remap-as-part-of-strea.patch
+patch 5165-drm-amdgpu-Improve-a-error-message-and-fix-a-typo.patch
+patch 5166-drm-amdgpu-Remove-VM-based-compute-profile-switching.patch
+patch 5167-drm-amdgpu-hybrid-add-AMDGPU-VERSION.patch
+patch 5168-drm-amdgpu-cleanup-HW_IP-query.patch
+patch 5169-Revert-drm-amdgpu-switch-firmware-path-for-SI-parts.patch
+patch 5170-Revert-drm-amdgpu-switch-firmware-path-for-CIK-parts.patch
+patch 5171-drm-amdgpu-Refine-function-name-and-function-args.patch
+patch 5172-drm-amdgpu-Set-power-ungate-state-when-suspend-fini.patch
+patch 5173-drm-amdgpu-Set-clock-ungate-state-when-suspend-fini.patch
+patch 5174-drm-amdgpu-fix-VM-size-reporting-on-Raven.patch
+patch 5175-drm-amdgpu-Do-not-evict-VRAM-on-APUs-with-disabled-H.patch
+patch 5176-drm-amd-display-Do-not-retain-link-settings.patch
+patch 5177-drm-amd-display-Create-new-i2c-resource.patch
+patch 5178-drm-amd-display-Program-csc-matrix-as-part-of-stream.patch
+patch 5179-drm-amdgpu-display-disable-eDP-fast-boot-optimizatio.patch
+patch 5180-drm-amd-display-Define-registers-for-dcn10.patch
+patch 5181-drm-amd-display-Combine-dce80-and-dce100-i2c-hw-func.patch
+patch 5182-drm-amd-display-move-edp-fast-boot-optimization-flag.patch
+patch 5183-drm-amd-display-implement-DPMS-DTN-test-v2.patch
+patch 5184-drm-amdgpu-Remove-the-sriov-checking-and-add-firmwar.patch
+patch 5185-drm-amdgpu-use-kiq-to-do-invalidate-tlb.patch
+patch 5186-drm-amdgpu-remove-fulll-access-for-suspend-phase1.patch
+patch 5187-drm-amdgpu-Fix-compile-warning.patch
+patch 5188-drm-amdgpu-fix-sdma-doorbell-range-setting.patch
+patch 5189-drm-amdgpu-sriov-Only-sriov-runtime-support-use-kiq.patch
+patch 5190-drm-amd-display-fix-a-compile-warning.patch
+patch 5191-drm-amd-display-indent-an-if-statement.patch
+patch 5192-drm-amdgpu-Don-t-use-kiq-in-gpu-reset.patch
+patch 5193-drm-amdgpu-display-add-support-for-LVDS-v5.patch
+patch 5194-drm-amdgpu-amdgpu_kiq_reg_write_reg_wait-can-be-stat.patch
+patch 5195-drm-amdgpu-cleanup-GPU-recovery-check-a-bit-v2.patch
+patch 5196-drm-amdgpu-validate-the-VM-root-PD-from-the-VM-code.patch
+patch 5197-drm-amdgpu-move-setting-the-GART-addr-into-TTM.patch
+patch 5198-drm-amdgpu-rename-gart.robj-into-gart.bo.patch
+patch 5199-drm-amdgpu-remove-gart.table_addr.patch
+patch 5200-drm-amdgpu-set-correct-base-for-THM-NBIF-MP1-IP.patch
+patch 5201-drm-amdgpu-Only-retrieve-GPU-address-of-GART-table-a.patch
+patch 5202-drm-amdgpu-switch-firmware-path-for-SI-parts.patch
+patch 5203-drm-amdgpu-switch-firmware-path-for-CIK-parts-v2.patch
+patch 5204-Hybrid-Version-18.45.0.418.patch
+patch 5205-drm-amdgpu-add-amdgpu_gmc_pd_addr-helper.patch
+patch 5206-drm-amdgpu-add-ring-soft-recovery-v4.patch
+patch 5207-drm-amdgpu-implement-soft_recovery-for-GFX7.patch
+patch 5208-drm-amdgpu-implement-soft_recovery-for-GFX8-v2.patch
+patch 5209-drm-amdgpu-implement-soft_recovery-for-GFX9.patch
+patch 5210-drm-amdgpu-Adjust-the-VM-size-based-on-system-memory.patch
+patch 5211-drm-amdgpu-Enable-disable-gfx-PG-feature-in-rlc-safe.patch
+patch 5212-drm-amdgpu-Remove-duplicated-power-source-update.patch
+patch 5213-drm-amdgpu-Fix-vce-initialize-failed-on-Kaveri-Mulli.patch
+patch 5214-drm-amdgpu-Update-power-state-at-the-end-of-smu-hw_i.patch
+patch 5215-drm-amdgpu-Power-on-uvd-block-when-hw_fini.patch
+patch 5216-drm-amdgpu-Remove-dead-code-in-amdgpu_pm.c.patch
+patch 5217-drm-amdgpu-Remove-duplicate-code-in-gfx_v8_0.c.patch
+patch 5218-drm-amdgpu-Refine-gfx_v8_0_kcq_disable-function.patch
+patch 5219-drm-amdgpu-Remove-duplicate-code-in-gfx_v9_0.c.patch
+patch 5220-drm-amdgpu-Refine-gfx_v9_0_kcq_disable-function.patch
+patch 5221-drm-amdgpu-Change-kiq-initialize-reset-sequence-on-g.patch
+patch 5222-drm-amdgpu-Change-kiq-ring-initialize-sequence-on-gf.patch
+patch 5223-drm-amdgpu-amdgpu_ctx_add_fence-can-t-fail.patch
+patch 5224-drm-amdgpu-fix-holding-mn_lock-while-allocating-memo.patch
+patch 5225-drm-amdgpu-add-amdgpu_gmc_get_pde_for_bo-helper-v2.patch
+patch 5226-drm-amdgpu-enable-GTT-PD-PT-for-raven-v3.patch
+patch 5227-drm-amdgpu-Refine-gmc9-VM-fault-print.patch
+patch 5228-drm-amdgpu-remove-extra-newline-when-printing-VM-fau.patch
+patch 5229-drm-amdgpu-move-full-access-into-amdgpu_device_ip_su.patch
+patch 5230-drm-amdgpu-Need-to-set-moved-to-true-when-evict-bo.patch
+patch 5231-drm-amdgpu-remove-amdgpu_bo_gpu_accessible.patch
+patch 5232-drm-amdgpu-move-amdgpu_device_-vram-gtt-_location.patch
+patch 5233-drm-amdgpu-fix-amdgpu_gmc_gart_location-a-little-bit.patch
+patch 5234-drm-amdgpu-stop-using-gart_start-as-offset-for-the-G.patch
+patch 5235-drm-amdgpu-distinct-between-allocated-GART-space-and.patch
+patch 5236-drm-amdgpu-use-the-smaller-hole-for-GART.patch
+patch 5237-drm-amdgpu-remove-redundant-memset.patch
+patch 5238-drm-amdgpu-add-missing-CHIP_HAINAN-in-amdgpu_ucode_g.patch
+patch 5239-drm-amdgpu-put-GART-away-from-VRAM-v2.patch
+patch 5240-drm-amdgpu-Revert-kmap-PDs-PTs-in-amdgpu_vm_update_d.patch
+patch 5241-drm-amdgpu-gmc9-rework-stolen-vga-memory-handling.patch
+patch 5242-drm-amdgpu-gmc9-don-t-keep-stolen-memory-on-Raven.patch
+patch 5243-drm-amdgpu-gmc9-don-t-keep-stolen-memory-on-vega12.patch
+patch 5244-drm-amdgpu-gmc9-don-t-keep-stolen-memory-on-vega20.patch
+patch 5245-drm-amd-powerplay-added-vega20-overdrive-support-V3.patch
+patch 5246-drm-amd-powerplay-correct-data-type-to-support-under.patch
+patch 5247-drm-amdgpu-Set-pasid-for-compute-vm-v2.patch
+patch 5248-drm-amd-display-Eliminate-i2c-hw-function-pointers.patch
+patch 5249-drm-amd-display-dc-3.1.63.patch
+patch 5250-drm-amd-display-Use-non-deprecated-vblank-handler.patch
+patch 5251-drm-amd-display-Add-support-for-hw_state-logging-via.patch
+patch 5252-drm-amd-display-eliminate-long-wait-between-register.patch
+patch 5253-drm-amd-display-Fix-memory-leak-caused-by-missed-dc_.patch
+patch 5254-drm-amd-display-Remove-redundant-i2c-structs.patch
+patch 5255-drm-amd-display-support-48-MHZ-refclk-off.patch
+patch 5256-drm-amd-display-Flatten-unnecessary-i2c-functions.patch
+patch 5257-drm-amdgpu-fix-mask-in-GART-location-calculation.patch
+patch 5258-drm-amdgpu-revert-stop-using-gart_start-as-offset-fo.patch
+patch 5259-drm-amdgpu-Fix-SDMA-hang-in-prt-mode-v2.patch
+patch 5260-drm-amdgpu-add-new-polaris-pci-id.patch
+patch 5261-drm-amdgpu-sriov-Correct-the-setting-about-sdma-door.patch
+patch 5262-drm-amdgpu-add-picasso-to-asic_type-enum.patch
+patch 5263-drm-amdgpu-add-soc15-support-for-picasso.patch
+patch 5264-drm-amdgpu-add-picasso-ucode-loading-method.patch
+patch 5265-drm-amdgpu-add-picasso-support-for-vcn.patch
+patch 5266-drm-amdgpu-add-clockgating-support-for-picasso.patch
+patch 5267-drm-amdgpu-add-picasso-support-for-gmc.patch
+patch 5268-drm-amdgpu-add-picasso-support-for-gfx_v9_0.patch
+patch 5269-drm-amdgpu-add-picasso-support-for-sdma_v4.patch
+patch 5270-drm-amdgpu-add-picasso-for-amdgpu-kms.patch
+patch 5271-drm-amdgpu-Add-pg-support-for-gfxoff-for-PCO.patch
+patch 5272-drm-amdgpu-Enable-SDMA-power-gating-for-PCO.patch
+patch 5273-drm-amdgpu-enable-mmhub-power-gating.patch
+patch 5274-drm-amdgpu-enable-vcn-powergating-for-PCO.patch
+patch 5275-drm-amdgpu-add-ip-blocks-for-picasso-v2.patch
+patch 5276-drm-amdgpu-add-new-raven-series-device.patch
+patch 5277-drm-amdgpu-enable-gfxoff-in-non-sriov-and-stutter-mo.patch
+patch 5278-drm-amdgpu-use-IP-presence-to-free-uvd-and-vce-handl.patch
+patch 5279-drm-amdgpu-move-get_rev_id-at-first-before-load-gpu_.patch
+patch 5280-drm-amdgpu-set-external-rev-id-for-raven2.patch
+patch 5281-drm-amdgpu-add-raven2-to-gpu_info-firmware.patch
+patch 5282-drm-amdgpu-add-raven2-vcn-firmware-support.patch
+patch 5283-drm-amdgpu-add-psp-support-for-raven2.patch
+patch 5284-drm-amdgpu-sdma4-specify-raven2-firmware.patch
+patch 5285-drm-amdgpu-sdma4-Add-raven2-golden-setting.patch
+patch 5286-drm-amdgpu-gfx9-add-support-for-raven2-gfx-firmware.patch
+patch 5287-drm-amdgpu-gfx9-add-raven2-golden-setting.patch
+patch 5288-drm-amd-display-Add-Raven2-definitions-in-dc.patch
+patch 5289-drm-amd-display-Add-DC-config-flag-for-Raven2-v2.patch
+patch 5290-drm-amd-powerplay-update-smu10_verify_smc-to-raven2-.patch
+patch 5291-drm-amd-powerplay-round-up-the-Mhz-convertion-v2.patch
+patch 5292-drm-amd-powerplay-disable-raven2-force-dpm-level-sup.patch
+patch 5293-drm-amdgpu-set-CG-flags-for-raven2-v2.patch
+patch 5294-drm-amdgpu-Initialize-fences-array-entries-in-amdgpu.patch
+patch 5295-drm-amdgpu-soc15-clean-up-picasso-support.patch
+patch 5296-drm-amdgpu-simplify-Raven-Raven2-and-Picasso-handlin.patch
+patch 5297-drm-amd-display-Fix-3D-stereo-issues.patch
+patch 5298-drm-amd-display-stop-using-switch-for-different-CS-r.patch
+patch 5299-drm-amd-display-dc-3.1.66.patch
+patch 5300-drm-amd-display-add-query-HPD-interface.patch
+patch 5301-drm-amd-display-Drop-amdgpu_display_manager.dal-memb.patch
+patch 5302-drm-amd-display-Drop-amdgpu_dm_prev_state-struct.patch
+patch 5303-drm-amdgpu-add-GDS-GWS-and-OA-debugfs-files.patch
+patch 5304-drm-amdgpu-stop-crashing-on-GDS-GWS-OA-eviction.patch
+patch 5305-drm-amdgpu-don-t-allocate-zero-sized-kernel-BOs.patch
+patch 5306-drm-amdgpu-drop-size-check.patch
+patch 5307-drm-amd-amdgpu-Avoid-fault-when-allocating-an-empty-.patch
+patch 5308-drm-amdgpu-use-processed-values-for-counting.patch
+patch 5309-drm-amdgpu-update-vram_info-structure-in-atomfirmwar.patch
+patch 5310-drm-amdgpu-fix-unknown-vram-mem-type-for-vega20.patch
+patch 5311-drm-amd-powerplay-update-OD-feature-judgement.patch
+patch 5312-drm-amd-powerplay-update-OD-to-take-voltage-value-in.patch
+patch 5313-drm-amd-powerplay-retrieve-the-updated-clock-table-a.patch
+patch 5314-drm-amdgpu-stop-pipelining-VM-PDs-PTs-moves.patch
+patch 5315-drm-amdgpu-always-enable-shadow-BOs-v2.patch
+patch 5316-drm-amdgpu-shadow-BOs-don-t-need-any-alignment.patch
+patch 5317-drm-amdgpu-always-recover-VRAM-during-GPU-recovery.patch
+patch 5318-drm-amdgpu-fix-shadow-BO-restoring.patch
+patch 5319-drm-amdgpu-fix-up-GDS-GWS-OA-shifting.patch
+patch 5320-drm-amdgpu-initialize-GDS-GWS-OA-domains-even-when-t.patch
+patch 5321-drm-amdgpu-move-reserving-GDS-GWS-OA-into-common-cod.patch
+patch 5322-drm-amd-Add-ucode-DMCU-support.patch
+patch 5323-drm-amd-Add-PSP-DMCU-support.patch
+patch 5324-drm-amd-Add-DM-DMCU-support.patch
+patch 5325-drm-amdgpu-Add-DMCU-to-firmware-query-interface.patch
+patch 5326-drm-amd-display-Add-DMCU-firmware-version.patch
+patch 5327-drm-amdgpu-display-return-proper-error-codes-in-dm.patch
+patch 5328-drm-amdgpu-try-allocating-VRAM-as-power-of-two.patch
+patch 5329-drm-amdgpu-enable-AGP-aperture-for-GMC9-v2.patch
+patch 5330-drm-amdgpu-fix-the-page-fault-of-raven2.patch
+patch 5331-drm-amdgpu-add-amdgpu_gmc_agp_location-v3.patch
+patch 5332-drm-amdgpu-Temporary-fix-amdgpu_vm_release_compute-b.patch
+patch 5333-drm-amdgpu-fix-VM-clearing-for-the-root-PD.patch
+patch 5334-drm-amdgpu-fix-preamble-handling.patch
+patch 5335-amdgpu-fix-multi-process-hang-issue.patch
+patch 5336-drm-amdgpu-Fix-page-fault-and-kasan-warning-on-pci-d.patch
+patch 5337-drm-amd-display-Fix-bug-use-wrong-pp-interface.patch
+patch 5338-drm-amdgpu-remove-extra-root-PD-alignment.patch
+patch 5339-drm-amdgpu-add-helper-for-VM-PD-PT-allocation-parame.patch
+patch 5340-drm-amdgpu-add-GMC9-support-for-PDs-PTs-in-system-me.patch
+patch 5341-drm-amdgpu-add-amdgpu_gmc_get_pde_for_bo-helper-v2.patch
+patch 5342-drm-amd-display-Improve-spelling-grammar-and-formatt.patch
+patch 5343-drm-amd-display-Support-reading-hw-state-from-debugf.patch
+patch 5344-Revert-drm-amdgpu-Temporary-fix-amdgpu_vm_release_co.patch
+patch 5345-drm-amdgpu-Use-drm_dev_unplug-in-PCI-.remove.patch
+patch 5346-drm-amdgpu-move-size-calculations-to-the-front-of-th.patch
+patch 5347-drm-amdgpu-fix-amdgpu_mn_unlock-in-the-CS-error-path.patch
+patch 5348-drm-amdgpu-correctly-sign-extend-48bit-addresses-v3.patch
+patch 5349-drm-amdgpu-use-the-AGP-aperture-for-system-memory-ac.patch
+patch 5350-drm-amd-display-Build-stream-update-and-plane-update.patch
+patch 5351-drm-amd-display-Add-DP-YCbCr-4-2-0-support.patch
+patch 5352-drm-amd-display-Fix-DAL217-tests-modify-DTN-logs-for.patch
+patch 5353-drm-amd-display-Add-driver-side-parsing-for-CM.patch
+patch 5354-drm-amd-display-remove-dead-dc-vbios-code.patch
+patch 5355-drm-amd-display-remove-unused-clk_src-code.patch
+patch 5356-drm-amd-display-add-disconnect_delay-to-dc_panel_pat.patch
+patch 5357-drm-amd-display-add-aux-transition-event-log.patch
+patch 5358-drm-amd-display-num-of-sw-i2c-aux-engines-less-than-.patch
+patch 5359-drm-amd-display-Use-DRM-helper-for-best_encoder.patch
+patch 5360-drm-amd-display-Reorder-resource_pool-to-put-i2c-wit.patch
+patch 5361-drm-amd-display-use-link-type-to-decide-stream-enc-a.patch
+patch 5362-drm-amd-display-Remove-call-to-amdgpu_pm_compute_clo.patch
+patch 5363-drm-amd-display-clean-code-for-transition-event-log.patch
+patch 5364-drm-amd-display-Add-invariant-support-instrumentatio.patch
+patch 5365-drm-amd-display-Fix-warning-storm-on-Raven2.patch
+patch 5366-drm-amd-display-RV2-DP-MST-2nd-display-within-daisy-.patch
+patch 5367-drm-amdgpu-interim-disable-RV2-GFX-CG-flag-for-urgen.patch
+patch 5368-drm-drivers-drop-redundant-drm_edid_to_eld-calls.patch
+patch 5369-drm-amdgpu-add-license-to-Makefiles.patch
+patch 5370-drm-amdgpu-Fix-header-file-dependencies.patch
+patch 5371-drm-amdgpu-re-enable-CGCG-on-CZ-and-disable-on-ST.patch
+patch 5372-drm-amdgpu-Handle-64-bit-return-from-drm_crtc_vblank.patch
+patch 5373-drm-amdgpu-fix-module-parameter-descriptions.patch
+patch 5374-drm-amd-amdgpu-re-add-missing-GC-9.1-and-SDMA0-4.1-s.patch
+patch 5375-vga_switcheroo-Use-device-link-for-HDA-controller.patch
+patch 5376-drm-amd-pp-fix-missing-CONFIG_ACPI.patch
+patch 5377-drm-amdgpu-sdma4-use-a-helper-for-SDMA_OP_POLL_REGME.patch
+patch 5378-drm-amdgpu-include-pagemap.h-for-release_pages.patch
+patch 5379-drm-amdgpu-fix-32-bit-build-warning.patch
+patch 5380-drm-amdgpu-Add-AMDGPU_GPU_PAGES_IN_CPU_PAGE-define.patch
+patch 5381-drm-amd-display-Use-2-factor-allocator-calls.patch
+patch 5382-drm-amdgpu-move-context-related-stuff-to-amdgpu_ctx..patch
+patch 5383-drm-amdgpu-add-status-checking-after-fw-is-loaded.patch
+patch 5384-drm-amdgpu-revert-psp-firmware-load-status-check.patch
+patch 5385-Hybrid-Version-18.50.0.418.patch
+patch 5386-drm-amdgpu-improve-VM-state-machine-documentation-v2.patch
+patch 5387-drm-amdgpu-Fix-compute-VM-BO-params-after-rebase-v2.patch
+patch 5388-drm-amdgpu-Fix-warnings-while-make-xmldocs.patch
+patch 5389-drm-amd-powerplay-fix-compile-warning-for-wrong-data.patch
+patch 5390-drm-amdgpu-move-PSP-init-prior-to-IH-in-gpu-reset.patch
+patch 5391-drm-amd-include-update-the-bitfield-define-for-PF_MA.patch
+patch 5392-drm-amdgpu-gmc-add-initial-xgmi-structure-to-amdgpu_.patch
+patch 5393-drm-amdgpu-gmc9-add-a-new-gfxhub-1.1-helper-for-xgmi.patch
+patch 5394-drm-amdgpu-gmc9-Adjust-GART-and-AGP-location-with-xg.patch
+patch 5395-drm-amdgpu-Add-psp-function-interfaces-for-XGMI-supp.patch
+patch 5396-drm-amdgpu-Add-place-holder-functions-for-xgmi-topol.patch
+patch 5397-drm-amdgpu-Generate-XGMI-topology-info-from-driver-l.patch
+patch 5398-drm-amdgpu-Init-correct-fb-region-for-none-XGMI-conf.patch
+patch 5399-drm-amdgpu-fix-error-handling-in-amdgpu_cs_user_fenc.patch
+patch 5400-drm-amdgpu-add-amdgpu_vm_pt_parent-helper.patch
+patch 5401-drm-amdgpu-add-amdgpu_vm_update_func.patch
+patch 5402-drm-amdgpu-Fix-SDMA-TO-after-GPU-reset-v3.patch
+patch 5403-drm-amdgpu-move-cs-dependencies-front-a-bit.patch
+patch 5404-drm-amdgpu-Move-fault-hash-table-to-amdgpu-vm.patch
+patch 5405-drm-amd-display-fix-ptr_ret.cocci-warnings.patch
+patch 5406-drm-amdgpu-Add-error-message-when-register-failed-to.patch
+patch 5407-drm-amdgpu-add-some-VM-PD-PT-iterators-v2.patch
+patch 5408-drm-amdgpu-use-leaf-iterator-for-allocating-PD-PT.patch
+patch 5409-drm-amdgpu-use-dfs-iterator-to-free-PDs-PTs.patch
+patch 5410-drm-amdgpu-use-the-DFS-iterator-in-amdgpu_vm_invalid.patch
+patch 5411-drm-amdgpu-use-leaf-iterator-for-filling-PTs.patch
+patch 5412-drm-amd-display-Fix-pflip-IRQ-status-after-gpu-reset.patch
+patch 5413-drm-amdgpu-remove-amdgpu_bo_list_entry.robj.patch
+patch 5414-drm-amdgpu-remove-amdgpu_bo_list_entry.robj-for-rele.patch
+patch 5415-drm-amdgpu-fix-compilation-of-amdgpu_amdkfd_gpuvm.c.patch
+patch 5416-drm-amdgpu-use-a-single-linked-list-for-amdgpu_vm_bo.patch
+patch 5417-drm-amdgpu-Style-fixes-to-PRIME-code-documentation.patch
+patch 5418-drm-amd-display-add-aux-i2c-event-log.patch
+patch 5419-drm-amdgpu-fix-parameter-documentation-for-amdgpu_vm.patch
+patch 5420-drm-amdgpu-add-vega20-sriov-capability-detection.patch
+patch 5421-drm-amdgpu-Exclude-MM-engines-for-vega20-virtual-dev.patch
+patch 5422-drm-amd-dc-Trigger-set-power-state-task-when-display.patch
+patch 5423-drm-amd-pp-Honour-DC-s-clock-limits-on-Rv.patch
+patch 5424-drm-amd-pp-Return-error-immediately-if-load-firmware.patch
+patch 5425-drm-amd-display-Refactor-FPGA-specific-link-setup.patch
+patch 5426-drm-amd-display-use-proper-pipe_ctx-index.patch
+patch 5427-drm-amd-display-add-pp_smu-NULL-pointer-check.patch
+patch 5428-drm-amd-display-Add-color-bit-info-to-freesync-infof.patch
+patch 5429-drm-amd-display-program-v_update-and-v_ready-with-pr.patch
+patch 5430-drm-amd-display-dc-3.1.67.patch
+patch 5431-drm-amd-display-Stereo-3D-support-in-VSC.patch
+patch 5432-drm-amd-display-Guard-against-null-stream-dereferenc.patch
+patch 5433-drm-amd-display-Remove-mst_hotplug_work.patch
+patch 5434-drm-amd-display-fix-gamma-not-being-applied.patch
+patch 5435-drm-amd-display-Raise-dispclk-value-for-dce120-by-15.patch
+patch 5436-drm-amdgpu-powerplay-add-get_argument-callback-for-v.patch
+patch 5437-drm-amdgpu-powerplay-Move-vega10_enable_smc_features.patch
+patch 5438-drm-amdgpu-powerplay-add-smu-smc_table_manager-callb.patch
+patch 5439-drm-amdgpu-powerplay-add-smu-smc_table_manager-callb.patch
+patch 5440-drm-amdgpu-add-new-AMDGPU_PP_SENSOR_ENABLED_SMC_FEAT.patch
+patch 5441-drm-amdgpu-implement-ENABLED_SMC_FEATURES_MASK-senso.patch
+patch 5442-drm-amdgpu-implement-ENABLED_SMC_FEATURES_MASK-senso.patch
+patch 5443-drm-amdgpu-implement-ENABLED_SMC_FEATURES_MASK-senso.patch
+patch 5444-drm-amdgpu-print-smc-feature-mask-in-debugfs-amdgpu_.patch
+patch 5445-drm-amd-display-remove-redundant-null-pointer-check-.patch
+patch 5446-drm-amdgpu-Add-warning-message-for-INT-SW-fallback.patch
+patch 5447-drm-amdgpu-sriov-Correct-the-setting-about-sdma-door.patch
+patch 5448-drm-amdgpu-Deactivate-SW-interrupt-fallback-in-amdgp.patch
+patch 5449-drm-amdgpu-Refine-function-name.patch
+patch 5450-drm-amdgpu-Halt-rlc-cp-in-rlc_safe_mode.patch
+patch 5451-drm-amdgpu-Remove-redundant-code-in-gfx_v8_0.c.patch
+patch 5452-drm-amd-pp-Disable-dpm-features-on-smu7-8-when-suspe.patch
+patch 5453-drm-amdgpu-drop-extra-newline-in-amdgpu_iv-trace.patch
+patch 5454-drm-amdgpu-make-function-pointers-mandatory.patch
+patch 5455-drm-amdgpu-cleanup-amdgpu_ih.c.patch
+patch 5456-drm-amdgpu-Move-fence-SW-fallback-warning-v3.patch
+patch 5457-drm-amdgpu-move-more-interrupt-processing-into-amdgp.patch
+patch 5458-drm-amdgpu-move-more-defines-into-amdgpu_irq.h.patch
+patch 5459-drm-amdgpu-Use-register-UVD_SCRATCH9-for-VCN-ring-ib.patch
+patch 5460-drm-amdgpu-Add-new-register-offset-mask-to-support-V.patch
+patch 5461-drm-amdgpu-Add-DPG-support-flag.patch
+patch 5462-drm-amdgpu-Add-DPG-mode-read-write-macro.patch
+patch 5463-drm-amdgpu-Add-DPG-mode-support-for-vcn-1.0.patch
+patch 5464-drm-amdgpu-Add-DPG-pause-state.patch
+patch 5465-drm-amdgpu-Add-DPG-pause-mode-support.patch
+patch 5466-drm-amdgpu-soc15-fix-warnings-in-register-macro.patch
+patch 5467-drm-amdgpu-vcn-whitespace-cleanup.patch
+patch 5468-drm-amd-powerplay-correct-the-hwmon-interface-ppt-li.patch
+patch 5469-drm-amd-powerplay-tell-the-correct-gfx-voltage-V2.patch
+patch 5470-drm-amd-powerplay-enable-fan-RPM-and-pwm-settings-V2.patch
+patch 5471-drm-amdgpu-added-vega20-LBPW-support.patch
+patch 5472-drm-amdgpu-change-Raven-always-on-CUs-to-4.patch
+patch 5473-drm-amdgpu-vega20-make-power-profile-output-more-con.patch
+patch 5474-drm-amdgpu-add-default-case-to-switch-statement.patch
+patch 5475-drm-amdgpu-added-AMD-GPU-instance-counting-V2.patch
+patch 5476-drm-amd-powerplay-helper-interfaces-for-MGPU-fan-boo.patch
+patch 5477-drm-amd-powerplay-enable-MGPU-fan-boost-feature-on-V.patch
+patch 5478-drm-amdgpu-Fix-comments-error-in-sdma_v4_1_update_po.patch
+patch 5479-drm-amd-pp-Fix-fan-s-RPM-setting-not-work-on-VI-Vega.patch
+patch 5480-drm-amd-pp-Avoid-divide-by-zero-in-fan_ctrl_set_fan_.patch
+patch 5481-drm-amd-pp-Expose-the-smu-support-for-SDMA-PG-cntl.patch
+patch 5482-drm-amdgpu-Move-out-power-up-down-sdma-out-of-smu.patch
+patch 5483-drm-amd-pp-Remove-uncessary-extra-vcn-pg-cntl-in-smu.patch
+patch 5484-drm-amd-pp-Remove-wrong-code-in-fiji_start_smu.patch
+patch 5485-drm-amd-powerplay-Enable-Disable-NBPSTATE-on-On-OFF-.patch
+patch 5486-drm-amd-display-Add-DC-build_id-to-determine-build-t.patch
+patch 5487-drm-amd-display-fix-4K-stereo-screen-flash-issue.patch
+patch 5488-drm-amd-display-Add-a-check-function-for-virtual-sig.patch
+patch 5489-drm-amd-display-Calculate-swizzle-mode-using-bpp-dur.patch
+patch 5490-drm-amd-display-Add-function-to-fetch-clock-requirem.patch
+patch 5491-drm-amd-display-block-DP-YCbCr420-modes.patch
+patch 5492-drm-amd-display-clean-up-encoding-checks.patch
+patch 5493-drm-amd-display-WA-for-DF-keeps-awake-after-S0i3.patch
+patch 5494-drm-amd-display-dc-3.1.68.patch
+patch 5495-drm-amd-display-fix-memory-leak-in-resource-pools.patch
+patch 5496-drm-amd-display-Flatten-irq-handler-data-struct.patch
+patch 5497-drm-amd-display-fix-Interlace-video-timing.patch
+patch 5498-drm-amd-display-HLK-Periodic-Frame-Notification-test.patch
+patch 5499-drm-amd-display-Fix-Vega10-lightup-on-S3-resume.patch
+patch 5500-drm-amd-display-Raise-dispclk-value-for-dce_update_c.patch
+patch 5501-drm-amd-display-Signal-hw_done-after-waiting-for-fli.patch
+patch 5502-drm-amdgpu-Refine-uvd_v6-7_0_enc_get_destroy_msg.patch
+patch 5503-drm-amdgpu-Add-new-AMDGPU_PP_SENSOR_MIN-MAX_FAN_RPM-.patch
+patch 5504-drm-amd-pp-Implement-AMDGPU_PP_SENSOR_MIN-MAX_FAN_RP.patch
+patch 5505-drm-amdgpu-Add-fan-RPM-setting-via-sysfs.patch
+patch 5506-drm-amdgpu-Disable-sysfs-pwm1-if-not-in-manual-fan-c.patch
+patch 5507-drm-amdgpu-Drop-dead-define-in-amdgpu.h.patch
+patch 5508-drm-amd-pp-Fix-memory-leak-on-CI-AI.patch
+patch 5509-drm-amdgpu-Move-gfx-flag-in_suspend-to-adev.patch
+patch 5510-drm-amd-pp-Refine-function-iceland_start_smu.patch
+patch 5511-drm-amd-pp-Setup-SoftRegsStart-before-request-smu-lo.patch
+patch 5512-drm-amd-pp-Refine-smu7-8-request_smu_load_fw-callbac.patch
+patch 5513-drm-amdgpu-Remove-FW_LOAD_DIRECT-type-support-on-VI.patch
+patch 5514-drm-amdgpu-Don-t-reallocate-ucode-bo-when-suspend.patch
+patch 5515-drm-amd-pp-Allocate-ucode-bo-in-request_smu_load_fw.patch
+patch 5516-drm-amd-pp-Implement-load_firmware-interface.patch
+patch 5517-drm-amdgpu-Add-fw-load-in-gfx_v8-and-sdma_v3.patch
+patch 5518-drm-amdgpu-Change-VI-gfx-sdma-smu-init-sequence.patch
+patch 5519-drm-amdgpu-skip-IB-tests-for-KIQ-in-general.patch
+patch 5520-drm-amdgpu-Always-enable-fan-sensors-for-read.patch
+patch 5521-drm-amdgpu-remove-the-intterupt-handling-for-the-KIQ.patch
+patch 5522-drm-amdgpu-fix-AGP-location-with-VRAM-at-0x0.patch
+patch 5523-drm-amdgpu-fix-incorrect-use-of-amdgpu_irq_add_id-in.patch
+patch 5524-drm-amdgpu-vcn-Remove-unused-code.patch
+patch 5525-drm-amdgpu-vcn-fix-dpg-pause-mode-hang-issue.patch
+patch 5526-drm-amdgpu-vcn-Replace-value-with-defined-macro.patch
+patch 5527-drm-amdgpu-vcn-Correct-VCN-cache-window-definition.patch
+patch 5528-drm-amdgpu-add-CP_DEBUG-register-definition-for-GC9..patch
+patch 5529-drm-amdgpu-fix-CPDMA-hang-in-PRT-mode.patch
+patch 5530-drm-amdgpu-Limit-the-max-mc-address-to-hole-start.patch
+patch 5531-drm-amdgpu-Change-SI-CI-gfx-sdma-smu-init-sequence.patch
+patch 5532-drm-amdgpu-Change-AI-gfx-sdma-smu-init-sequence.patch
+patch 5533-drm-amdgpu-Refine-function-amdgpu_device_ip_late_ini.patch
+patch 5534-drm-amdgpu-Check-late_init-status-before-set-cg-pg-s.patch
+patch 5535-drm-amdgpu-Split-amdgpu_ucode_init-fini_bo-into-two-.patch
+patch 5536-drm-amdgpu-Remove-amdgpu_ucode_fini_bo.patch
+patch 5537-drm-amdgpu-split-ip-hw_init-into-2-phases.patch
+patch 5538-drm-amdgpu-Load-fw-between-hw_init-resume_phase1-and.patch
+patch 5539-drm-amdgpu-Remove-wrong-fw-loading-type-warning.patch
+patch 5540-drm-amdgpu-Remove-the-direct-fw-loading-support-for-.patch
+patch 5541-drm-amdgpu-powerplay-endian-fixes-for-vega10_process.patch
+patch 5542-drm-amdgpu-powerplay-endian-fixes-for-vega12_process.patch
+patch 5543-drm-amdgpu-powerplay-endian-fixes-for-vega20_process.patch
+patch 5544-drm-amdgpu-powerplay-factor-out-some-pptable-helpers.patch
+patch 5545-drm-amdgpu-Suppress-keypresses-from-ACPI_VIDEO-event.patch
+patch 5546-drm-amdgpu-powerplay-fix-missing-break-in-switch-sta.patch
+patch 5547-drm-amdgpu-remove-set-but-not-used-variable-ring-in-.patch
+patch 5548-drm-amdgpu-remove-set-but-not-used-variable-header.patch
+patch 5549-drm-amd-powerplay-translate-power_profile-mode-to-pp.patch
+patch 5550-drm-amd-powerplay-hint-when-power-profile-setting-is.patch
+patch 5551-drm-amdgpu-Set-the-default-value-about-gds-vmid0-siz.patch
+patch 5552-drm-amdgpu-vcn-Add-new-register-offset-mask-for-VCN.patch
+patch 5553-drm-amdgpu-vcn-Update-latest-UVD_MPC-register-for-VC.patch
+patch 5554-drm-amdgpu-vcn-Update-latest-spg-mode-stop-for-VCN.patch
+patch 5555-drm-amdgpu-vcn-Add-ring-W-R-PTR-check-for-VCN-DPG-mo.patch
+patch 5556-drm-amdgpu-vcn-Reduce-unnecessary-local-variable.patch
+patch 5557-drm-amdgpu-vcn-Update-DPG-mode-VCN-memory-control.patch
+patch 5558-drm-amdgpu-vcn-Update-DPG-mode-VCN-global-tiling-reg.patch
+patch 5559-drm-amdgpu-vcn-Add-DPG-mode-Register-XX-check.patch
+patch 5560-drm-amdgpu-vcn-Remove-DPG-mode-unused-steps-during-v.patch
+patch 5561-drm-amdgpu-vcn-Apply-new-UMC-enable-for-VNC-DPG-mode.patch
+patch 5562-drm-amdgpu-vcn-Update-SPG-mode-VCN-memory-control.patch
+patch 5563-drm-amdgpu-vcn-Update-SPG-mode-VCN-global-tiling.patch
+patch 5564-drm-amdgpu-vcn-Move-SPG-mode-mc-resume-after-MPC-con.patch
+patch 5565-drm-amdgpu-vcn-Add-SPG-mode-Register-XX-check.patch
+patch 5566-drm-amdgpu-vcn-Remove-SPG-mode-unused-steps-during-v.patch
+patch 5567-drm-amdgpu-vcn-Apply-new-UMC-enable-for-VNC-DPG-mode.patch
+patch 5568-drm-amdgpu-vcn-Set-VCPU-busy-after-gate-power-during.patch
+patch 5569-drm-amdgpu-vcn-Update-SPG-mode-UVD-status-clear.patch
+patch 5570-drm-amdgpu-display-dm-amdgpu-make-dp-phy-debugfs-for.patch
+patch 5571-drm-amdgpu-update-Vega20-SDMA-golden-setting.patch
+patch 5572-drm-amd-powerplay-added-I2C-controller-configuration.patch
+patch 5573-drm-amd-powerplay-update-PPtable-with-DC-BTC-and-Tvr.patch
+patch 5574-drm-amdgpu-Update-gc_9_0-golden-settings.patch
+patch 5575-drm-amdgpu-fix-sdma-doorbell-comments-typo.patch
+patch 5576-drm-amd-display-fix-bug-of-accessing-invalid-memory.patch
+patch 5577-drm-amd-display-dc-3.2.01.patch
+patch 5578-drm-amd-display-handle-max_vstartup-larger-than-vbla.patch
+patch 5579-drm-amd-display-move-pplib-smu-notification-to-dccg-.patch
+patch 5580-drm-amd-display-remove-safe_to_lower-flag-from-dc-us.patch
+patch 5581-drm-amd-display-Freesync-does-not-engage-on-some-dis.patch
+patch 5582-drm-amd-display-clean-up-base-dccg-struct.patch
+patch 5583-drm-amd-display-split-dccg-clock-manager-into-asic-f.patch
+patch 5584-drm-amd-display-Add-support-for-Freesync-2-HDR-and-C.patch
+patch 5585-drm-amd-display-initialize-dc_transfer_func-ctx.patch
+patch 5586-drm-amd-display-expose-hwseq-functions-and-add-regis.patch
+patch 5587-drm-amd-display-fix-report-display-count-logic.patch
+patch 5588-drm-amd-display-Add-link-encoder-dp_ycbcr420_support.patch
+patch 5589-drm-amd-display-Retiring-set_display_requirements-in.patch
+patch 5590-drm-amd-display-Retiring-set_display_requirements-in.patch
+patch 5591-drm-amd-display-rename-dccg-to-clk_mgr.patch
+patch 5592-drm-amd-display-add-dccg-block.patch
+patch 5593-drm-amd-display-dc-3.2.02.patch
+patch 5594-drm-amd-display-explicit-uint64_t-casting.patch
+patch 5595-drm-amd-display-rename-cstate_pstate_watermarks_st1.patch
+patch 5596-drm-amd-display-Fix-incorrect-end-slope-of-EETF.patch
+patch 5597-drm-amdgpu-correct-SPDX-identifier-in-amdgpu_trace_p.patch
+patch 5598-drm-amd-powerplay-bump-the-PPtable-version-supported.patch
+patch 5599-drm-amd-powerplay-correct-the-clocks-for-DAL-to-be-K.patch
+patch 5600-drm-amd-powerplay-revise-Vega20-pptable-version-chec.patch
+patch 5601-drm-amdgpu-support-Vega20-A1-ASICs.patch
+patch 5602-Revert-drm-amdgpu-add-amdgpu_gmc_get_pde_for_bo-help.patch
+patch 5603-drm-amdgpu-update-smu-firmware-images-for-VI-variant.patch
+patch 5604-drm-amd-display-Raise-dispclk-value-for-Polaris.patch
+patch 5605-drm-amdgpu-update-mc-firmware-image-for-polaris12-va.patch
+patch 5606-drm-amd-display-Fix-6x4K-displays-light-up-on-Vega20.patch
+patch 5607-drm-amdgpu-gmc8-update-MC-firmware-for-polaris.patch
+patch 5608-drm-amdgpu-gmc8-always-load-MC-firmware-in-the-drive.patch
+patch 5609-drm-amdgpu-both-support-PCO-FP5-AM4-rlc-fw.patch
+patch 5610-drm-amdgpu-update-SMC-firmware-image-for-polaris10-v.patch
+patch 5611-drm-amdgpu-powerplay-fix-mclk-switch-limit-on-polari.patch
+patch 5612-drm-amdgpu-powerplay-fix-clock-stretcher-limits-on-p.patch
+patch 5613-drm-amdgpu-powerplay-Apply-avfs-cks-off-voltages-on-.patch
+patch 5614-drm-amdgpu-revert-the-commit-interim-disable-RV2-GFX.patch
+patch 5615-drm-amdgpu-separate-amdgpu_rlc-into-a-single-file.patch
+patch 5616-drm-amdgpu-abstract-the-function-of-enter-exit-safe-.patch
+patch 5617-drm-amdgpu-make-gfx9-enter-into-rlc-safe-mode-when-s.patch
+patch 5618-drm-amdkfd-Roll-back-all-q4-amdkfd-patches-added-by-.patch
+patch 5619-drm-amdkfd-Change-the-control-stack-mtype-from-UC-to.patch
+patch 5620-drm-amdkfd-remove-check-for-PCIe-upstream-bridge.patch
+patch 5621-drm-amdgpu-kfd2kgd-Support-BO-create-from-sg.patch
+patch 5622-drm-amd-Update-KFD-Thunk-ioctl-ABI-to-match-upstream.patch
+patch 5623-drm-amdkfd-Fixing-compilation-issues.patch
+patch 5624-drm-amdkfd-Disable-the-perf-counters-for-old-kernels.patch
+patch 5625-drm-amdkfd-use-px-to-print-user-space-address-instea.patch
+patch 5626-drm-amdkfd-Simplify-dGPU-event-page-allocation.patch
+patch 5627-drm-amdkfd-Backwards-compatibility-with-old-Thunk.patch
+patch 5628-drm-amdkfd-Remove-pm_map_process_scratch_cik.patch
+patch 5629-drm-amdgpu-Remove-pm_map_process_cik.patch
+patch 5630-drm-amdkfd-Put-packet-sizes-directly-into-packet_man.patch
+patch 5631-drm-amdkfd-GPU-recovery-support-from-KFD-step-1.patch
+patch 5632-drm-amd-Add-kfd-ioctl-defines-for-hw_exception-event.patch
+patch 5633-drm-amdkfd-signal-hw_exception-event-on-GPU-reset.patch
+patch 5634-drm-amdkfd-remove-check-for-PCIe-upstream-bridge.patch
+patch 5635-drm-amdkfd-CMA-Refactor-CMA-code.patch
+patch 5636-drm-amdkfd-CMA-Store-cpuva-in-KFD-BO.patch
+patch 5637-drm-amdkfd-CMA-Handle-userptr-to-userptr-BO-copy.patch
+patch 5638-drm-amdkfd-CMA-Use-shadow-system-BO-for-userptr.patch
+patch 5639-Fix-SVM-missing-on-Raven.patch
+patch 5640-drm-amdkfd-Implement-SPI-debug-and-exception-support.patch
+patch 5641-drm-amdkfd-Implement-hw_exception-work-thread-to-han.patch
+patch 5642-drm-amdkfd-CMA-Remove-diff.-device-restriction.patch
+patch 5643-drm-amdkfd-CMA-Store-mem_type-in-KFD-BO.patch
+patch 5644-drm-amdkfd-CMA-Support-for-diff.-devices.patch
+patch 5645-drm-amdkfd-Remove-unused-variable.patch
+patch 5646-drm-amdfd-Don-t-hard-code-wait-time.patch
+patch 5647-drm-amdkfd-CMA-Add-intermediate-wait-if-mGPU.patch
+patch 5648-drm-amdkfd-CMA-Support-multi-device-VRAM-copy.patch
+patch 5649-drm-amdkfd-Reduce-priority-of-context-saving-waves-b.patch
+patch 5650-drm-amdkfd-Introduce-kfd-kernel-module-parameter-hal.patch
+patch 5651-drm-amdkfd-Use-module-parameters-noretry-as-the-inte.patch
+patch 5652-drm-amdkfd-Separate-trap-handler-assembly-code-and-i.patch
+patch 5653-drm-amdkfd-Mellanox-Support-PeerSync-interface.patch
+patch 5654-drm-amdkfd-Fix-CP-soft-hang-on-APUs.patch
+patch 5655-drm-amdkfd-Fix-typos-in-trap-handler-comments.patch
+patch 5656-drm-amdkfd-Align-Makefile-with-upstream.patch
+patch 5657-drm-amdkfd-Align-CIK-interrupt-processing-with-upstr.patch
+patch 5658-drm-amdkfd-Remove-IH-patching-workaround-for-Vega10.patch
+patch 5659-drm-amdkfd-Clean-up-mmap-handling.patch
+patch 5660-drm-amdkfd-fix-uninitialized-variable-use.patch
+patch 5661-drm-amdkfd-Fix-kernel-queue-rollback-for-64-bit-wptr.patch
+patch 5662-drm-amdkfd-Match-release_mem-interface-with-other-PM.patch
+patch 5663-drm-amdkfd-Simplify-packet-manager-initialization.patch
+patch 5664-drm-amdkfd-Fix-error-handling-in-pm_init.patch
+patch 5665-drm-amdkfd-Fix-pm_debugfs_runlist.patch
+patch 5666-drm-amdkfd-Check-ctx_save_restore_area_address.patch
+patch 5667-drm-amdkfd-Fix-error-handling-around-kfd_process_cre.patch
+patch 5668-drm-amdkfd-Fix-error-handling-in-APU-CWSR-mapping.patch
+patch 5669-drm-amdkfd-Simplify-error-handling-in-kfd_create_pro.patch
+patch 5670-drm-amdkfd-Simplify-obj-handle-allocation.patch
+patch 5671-drm-amdkfd-Error-if-trying-to-acquire-VM-for-a-PDD-t.patch
+patch 5672-drm-amdkfd-Cosmetic-changes-to-match-upstream.patch
+patch 5673-drm-amdkfd-Add-sanity-checks-in-IRQ-handlers.patch
+patch 5674-drm-amdkfd-Don-t-use-kmap_atomic.patch
+patch 5675-drm-amdkcl-fixed-can-t-find-kgd_kfd_interface.h-head.patch
+patch 5676-drm-amdkfd-Fix-kernel-queue-64-bit-doorbell-offset-c.patch
+patch 5677-drm-amdkfd-Fix-race-between-scheduler-and-context-re.patch
+patch 5678-drm-amdkfd-Add-debugfs-interface-to-trigger-HWS-hang.patch
+patch 5679-drm-amdkfd-Make-the-number-of-SDMA-queues-variable.patch
+patch 5680-drm-amdkfd-Vega20-bring-up-on-amdkfd-side.patch
+patch 5681-drm-amdkfd-reflect-atomic-support-in-IO-link-propert.patch
+patch 5682-drm-amdkfd-Add-check-user-queue-busy-interface.patch
+patch 5683-drm-amdkfd-Replace-mqd-with-mqd_mgr-as-the-variable-.patch
+patch 5684-drm-amd-amdgpu-Removing-unwanted-code-from-the-below.patch
+patch 5685-drm-amdkfd-Conditionally-enable-PCIe-atomics.patch
+patch 5686-drm-amdkfd-Fix-return-value-0-when-execute_queues_cp.patch
+patch 5687-drm-amdkfd-don-t-always-call-execute_queues_cpsch.patch
+patch 5688-drm-amdkfd-kfd_dev_is_large_bar-can-be-static.patch
+patch 5689-drm-amdkfd-fix-build-select-MMU_NOTIFIER.patch
+patch 5690-drm-amdkfd-Try-to-enable-atomics-for-all-GPUs.patch
+patch 5691-drm-amdkfd-Remove-queue-node-when-destroy-queue-fail.patch
+patch 5692-drm-amdkfd-Remove-vla.patch
+patch 5693-drm-admkfd-use-modern-ktime-accessors.patch
+patch 5694-drm-amdkfd-Stop-using-GFP_NOIO-explicitly.patch
+patch 5695-drm-amdkfd-fix-zero-reading-of-VMID-and-PASID-for-Ha.patch
+patch 5696-drm-amdkfd-Clean-up-reference-of-radeon.patch
+patch 5697-drm-amdkfd-Optimize-out-some-duplicated-code-in-kfd_.patch
+patch 5698-drm-amdkfd-Add-CU-masking-ioctl-to-KFD.patch
+patch 5699-drm-amdkfd-Call-kfd2kgd.set_compute_idle.patch
+patch 5700-Removed-DKMS-installed-KFD-check-for-kernel-version.patch
+patch 5701-drm-amdgpu-Merge-amdkfd-into-amdgpu.patch
+patch 5702-drm-amdgpu-Move-KFD-parameters-to-amdgpu-v3.patch
+patch 5703-kbuild-create-built-in.o-automatically-if-parent-dir.patch
+patch 5704-kbuild-remove-incremental-linking-option.patch
+patch 5705-kbuild-rename-built-in.o-to-built-in.a.patch
+patch 5706-drm-amdgpu-Need-to-set-moved-to-true-when-evict-bo.patch
+patch 5707-Fix-compilation-error.patch
+patch 5708-drm-amdkfd-Release-an-acquired-process-vm.patch
+patch 5709-drm-amdgpu-Relocate-some-definitions-v2.patch
+patch 5710-drm-amdkfd-Copy-in-KFD-related-files.patch
+patch 5711-drm-amdkfd-kfd-expose-the-hive_id-of-the-device-thro.patch
+patch 5712-drm-amdkfd-Add-new-iolink-type-defines.patch
+patch 5713-drm-amdkfd-Generate-xGMI-direct-iolink.patch
+patch 5714-drm-amdkfd-Only-add-bi-directional-iolink-on-GPU-wit.patch
+patch 5715-drm-amdkfd-change-system-memory-overcommit-limit.patch
+patch 5716-drm-amdkfd-Reliably-prevent-reclaim-FS-while-holding.patch
+patch 5717-Reverted-Update-KFD-Thunk-ioctl-ABI-to-match-upstrea.patch
+patch 5718-SWDEV-168581-dc-fix-sporadic-multiple-aux-transactio.patch
+patch 5719-drm-amdkfd-Rebsed-some-changes-in-kfd.patch
+patch 5720-drm-amdgpu-Clean-up-KFD-init-and-fini.patch
+patch 5721-net-ethernet-xgbe-expand-PHY_GBIT_FEAUTRES.patch
+patch 5722-Code-cleanup.patch
+patch 5723-drm-amdgpu-vcn-Fixed-S3-hung-issue.patch
+patch 5724-drm-amdgpu-change-VEGA-booting-with-firmware-loaded-.patch
+patch 5725-Fix-compilation-error-for-kfd.patch
+patch 5747-drm-amd-display-Raise-dispclk-value-for-CZ.patch
+patch 5748-drm-amdgpu-gfx8-disable-EDC.patch
+patch 5751-Revert-drm-amdgpu-make-gfx9-enter-into-rlc-safe-mode.patch
+patch 5752-Revert-drm-amdgpu-abstract-the-function-of-enter-exi.patch
+patch 5753-Revert-drm-amdgpu-separate-amdgpu_rlc-into-a-single-.patch
+patch 5754-drm-amdgpu-make-gfx9-enter-into-rlc-safe-mode-when-s.patch
+patch 5755-Revert-drm-amdgpu-revert-the-commit-interim-disable-.patch
+patch 5756-Revert-drm-amdgpu-revert-psp-firmware-load-status-ch.patch
+patch 5757-drm-amdgpu-psp-ignore-psp-response-status.patch
+
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/misc-patches.scc b/common/recipes-kernel/linux/linux-yocto-4.14.71/misc-patches.scc
new file mode 100644
index 00000000..fbe74ca5
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/misc-patches.scc
@@ -0,0 +1,5 @@
+patch 5726-amd-i2s-fix-to-the-fage-fault-when-iommu-is-enabled.patch
+patch 5727-amd-i2s-dma-pointer-uses-Link-position-counter.This-.patch
+patch 5740-lib-crc-Move-polynomial-definition-to-separate-heade.patch
+patch 5741-lib-crc-Use-consistent-naming-for-CRC-32-polynomials.patch
+
diff --git a/common/recipes-kernel/linux/linux-yocto_4.14.bbappend b/common/recipes-kernel/linux/linux-yocto_4.14.bbappend
index 4d6efd6d..563ccdc0 100644
--- a/common/recipes-kernel/linux/linux-yocto_4.14.bbappend
+++ b/common/recipes-kernel/linux/linux-yocto_4.14.bbappend
@@ -2,4 +2,6 @@ require linux-yocto-common_4.14.inc
KBRANCH_amdx86 ?= "v4.14/standard/base"
SRCREV_machine_amdx86 ?= "78a16a4d8cfd58f91be412797aac248e811d083b"
-SRC_URI_append_amdgpu = " file://amdgpu-patches.scc"
+SRC_URI_append_amdgpu += "file://amdgpu-patches.scc \
+ file://misc-patches.scc"
+